From fd4fef38200acb0727936eda186b5cf23f7f946d Mon Sep 17 00:00:00 2001 From: Alper Rifat Ulucinar Date: Tue, 28 May 2024 17:07:05 +0300 Subject: [PATCH 01/10] Temporarily add the $(UPTEST_LOCAL) make target to consume uptest from the main channel. Signed-off-by: Alper Rifat Ulucinar --- Makefile | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index f21111ea6b..617aa80a70 100644 --- a/Makefile +++ b/Makefile @@ -81,6 +81,8 @@ KIND_VERSION = v0.23.0 UP_VERSION = v0.30.0 UP_CHANNEL = stable UPTEST_VERSION = v0.11.1 +UPTEST_LOCAL_VERSION = v0.12.0-9.gac371c9 +UPTEST_LOCAL_CHANNEL = main KUSTOMIZE_VERSION = v5.3.0 YQ_VERSION = v4.40.5 CROSSPLANE_VERSION = 1.14.6 @@ -90,6 +92,16 @@ export UP_CHANNEL := $(UP_CHANNEL) -include build/makelib/k8s_tools.mk +# uptest download and install +UPTEST_LOCAL := $(TOOLS_HOST_DIR)/uptest-$(UPTEST_LOCAL_VERSION) + +$(UPTEST_LOCAL): + @$(INFO) installing uptest $(UPTEST_LOCAL) + @mkdir -p $(TOOLS_HOST_DIR) + @curl -fsSLo $(UPTEST_LOCAL) https://s3.us-west-2.amazonaws.com/crossplane.uptest.releases/$(UPTEST_LOCAL_CHANNEL)/$(UPTEST_LOCAL_VERSION)/bin/$(SAFEHOST_PLATFORM)/uptest || $(FAIL) + @chmod +x $(UPTEST_LOCAL) + @$(OK) installing uptest $(UPTEST_LOCAL) + # ==================================================================================== # Setup Images @@ -218,9 +230,9 @@ CROSSPLANE_NAMESPACE = upbound-system # aws_secret_access_key = REDACTED' # The associated `ProviderConfig`s will be named as `default` and `peer`. # - UPTEST_DATASOURCE_PATH (optional), see https://github.com/upbound/uptest#injecting-dynamic-values-and-datasource -uptest: $(UPTEST) $(KUBECTL) $(KUTTL) +uptest: $(UPTEST_LOCAL) $(KUBECTL) $(KUTTL) @$(INFO) running automated tests - @KUBECTL=$(KUBECTL) KUTTL=$(KUTTL) CROSSPLANE_NAMESPACE=$(CROSSPLANE_NAMESPACE) $(UPTEST) e2e "${UPTEST_EXAMPLE_LIST}" --data-source="${UPTEST_DATASOURCE_PATH}" --setup-script=cluster/test/setup.sh --default-conditions="Test" || $(FAIL) + @KUBECTL=$(KUBECTL) KUTTL=$(KUTTL) CROSSPLANE_NAMESPACE=$(CROSSPLANE_NAMESPACE) $(UPTEST_LOCAL) e2e "${UPTEST_EXAMPLE_LIST}" --data-source="${UPTEST_DATASOURCE_PATH}" --setup-script=cluster/test/setup.sh --default-conditions="Test" || $(FAIL) @$(OK) running automated tests uptest-local: From 1f3c8ab68c6313de3674127dd88658655edc5fb3 Mon Sep 17 00:00:00 2001 From: Alper Rifat Ulucinar Date: Wed, 29 May 2024 14:27:28 +0300 Subject: [PATCH 02/10] Add the singleton list embedder traverser and configure the API & Terraform converters Signed-off-by: Alper Rifat Ulucinar --- config/registry.go | 99 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 98 insertions(+), 1 deletion(-) diff --git a/config/registry.go b/config/registry.go index 6c6b5bae86..8be9150ad7 100644 --- a/config/registry.go +++ b/config/registry.go @@ -6,10 +6,15 @@ package config import ( "context" + "fmt" + "regexp" + "strconv" + // Note(ezgidemirel): we are importing this to embed provider schema document _ "embed" "github.com/crossplane/upjet/pkg/config" + "github.com/crossplane/upjet/pkg/config/conversion" "github.com/crossplane/upjet/pkg/registry/reference" conversiontfjson "github.com/crossplane/upjet/pkg/types/conversion/tfjson" tfjson "github.com/hashicorp/terraform-json" @@ -52,6 +57,16 @@ var skipList = []string{ "aws_rds_reserved_instance", // Expense of testing } +var ( + reAPIVersion = regexp.MustCompile(`^v(\d+)((alpha|beta)(\d+))?$`) +) + +const ( + errFmtCannotBumpSingletonList = "cannot bump the API version for the resource %q containing a singleton list in its API" + errFmtCannotFindPrev = "cannot compute the previous API versions for the resource %q containing a singleton list in its API" + errFmtInvalidAPIVersion = "cannot parse %q as a Kubernetes API version string" +) + // workaround for the TF AWS v4.67.0-based no-fork release: We would like to // keep the types in the generated CRDs intact // (prevent number->int type replacements). @@ -106,6 +121,7 @@ func GetProvider(ctx context.Context, generationProvider bool) (*config.Provider config.WithMainTemplate(hack.MainTemplate), config.WithTerraformProvider(p), config.WithTerraformPluginFrameworkProvider(fwProvider), + config.WithSchemaTraversers(&config.SingletonListEmbedder{}), config.WithDefaultResourceOptions( GroupKindOverrides(), KindOverrides(), @@ -127,7 +143,88 @@ func GetProvider(ctx context.Context, generationProvider bool) (*config.Provider } pc.ConfigureResources() - return pc, nil + return pc, bumpVersionsWithEmbeddedLists(pc) +} + +func bumpVersionsWithEmbeddedLists(pc *config.Provider) error { + for name, r := range pc.Resources { + r := r + // nothing to do if no singleton list has been converted to + // an embedded object + if len(r.CRDListConversionPaths()) == 0 { + continue + } + + bumped, err := bumpAPIVersion(r.Version) + if err != nil { + return errors.Wrapf(err, errFmtCannotBumpSingletonList, r.Name) + } + + if r.PreviousVersions == nil { + prev, err := getPreviousVersions(bumped) + if err != nil { + return errors.Wrapf(err, errFmtCannotFindPrev, r.Name) + } + r.PreviousVersions = prev + } + + currentVer := r.Version + r.Version = bumped + // we would like to set the storage version to v1beta1 to facilitate + // downgrades. + r.SetCRDStorageVersion(currentVer) + r.ControllerReconcileVersion = currentVer + r.Conversions = []conversion.Conversion{ + conversion.NewIdentityConversionExpandPaths(conversion.AllVersions, conversion.AllVersions, conversion.DefaultPathPrefixes(), r.CRDListConversionPaths()...), + conversion.NewSingletonListConversion(conversion.AllVersions, bumped, conversion.DefaultPathPrefixes(), r.CRDListConversionPaths(), conversion.ToEmbeddedObject), + conversion.NewSingletonListConversion(bumped, conversion.AllVersions, conversion.DefaultPathPrefixes(), r.CRDListConversionPaths(), conversion.ToSingletonList)} + pc.Resources[name] = r + } + return nil +} + +// returns a new API version by bumping the last number if the +// API version string is a Kubernetes API version string such +// as v1alpha1, v1beta1 or v1. Otherwise, returns an error. +// If the specified version is v1beta1, then the bumped version is v1beta2. +// If the specified version is v1, then the bumped version is v2. +func bumpAPIVersion(v string) (string, error) { + m := reAPIVersion.FindStringSubmatch(v) + switch { + // e.g., v1 + case len(m) == 2: + n, err := strconv.ParseUint(m[1], 10, 0) + if err != nil { + return "", errors.Wrapf(err, errFmtInvalidAPIVersion, v) + } + return fmt.Sprintf("v%d", n+1), nil + + // e.g., v1beta1 + case len(m) == 5: + n, err := strconv.ParseUint(m[4], 10, 0) + if err != nil { + return "", errors.Wrapf(err, errFmtInvalidAPIVersion, v) + } + return fmt.Sprintf("v%s%s%d", m[1], m[3], n+1), nil + + default: + // then cannot bump this version string + return "", errors.Errorf(errFmtInvalidAPIVersion, v) + } +} + +func getPreviousVersions(v string) ([]string, error) { + p := "v1beta1" + var result []string + var err error + for p != v { + result = append(result, p) + p, err = bumpAPIVersion(p) + if err != nil { + return nil, err + } + } + return result, nil } // CLIReconciledResourceList returns the list of resources that have external From b84cf4bdb3ec2f3af0f90e4689cf47d930c40f0b Mon Sep 17 00:00:00 2001 From: Alper Rifat Ulucinar Date: Wed, 29 May 2024 14:48:33 +0300 Subject: [PATCH 03/10] Remove the existing Resource.ServerSideApplyMergeStrategies configurations for singleton lists as they are being converted to embedded objects. Signed-off-by: Alper Rifat Ulucinar --- config/eks/config.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/config/eks/config.go b/config/eks/config.go index 21364fba32..56dbec648b 100644 --- a/config/eks/config.go +++ b/config/eks/config.go @@ -29,17 +29,6 @@ func Configure(p *config.Provider) { SelectorFieldName: "SecurityGroupIDSelector", }, } - r.ServerSideApplyMergeStrategies["vpc_config"] = config.MergeStrategy{ - ListMergeStrategy: config.ListMergeStrategy{ - MergeStrategy: config.ListTypeMap, - ListMapKeys: config.ListMapKeys{ - InjectedKey: config.InjectedKey{ - Key: "index", - DefaultValue: `"0"`, - }, - }, - }, - } r.UseAsync = true }) p.AddResourceConfigurator("aws_eks_node_group", func(r *config.Resource) { From b74134f344297010c364f5a4f16bbf73c8379410 Mon Sep 17 00:00:00 2001 From: Alper Rifat Ulucinar Date: Wed, 29 May 2024 15:09:23 +0300 Subject: [PATCH 04/10] Run "make generate" Signed-off-by: Alper Rifat Ulucinar --- .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_analyzer_terraformed.go | 129 + .../v1beta2/zz_analyzer_types.go | 169 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 381 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_certificatevalidation_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + apis/acm/v1beta1/zz_generated.resolvers.go | 56 +- .../acm/v1beta2/zz_certificate_terraformed.go | 134 + apis/acm/v1beta2/zz_certificate_types.go | 348 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/acm/v1beta2/zz_generated.deepcopy.go | 731 + apis/acm/v1beta2/zz_generated.managed.go | 68 + apis/acm/v1beta2/zz_generated.managedlist.go | 17 + apis/acm/v1beta2/zz_groupversion_info.go | 32 + ...z_certificateauthoritycertificate_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + apis/acmpca/v1beta1/zz_generated.resolvers.go | 15 +- apis/acmpca/v1beta1/zz_permission_types.go | 4 +- apis/acmpca/v1beta1/zz_policy_types.go | 4 +- .../v1beta2/zz_certificate_terraformed.go | 129 + apis/acmpca/v1beta2/zz_certificate_types.go | 209 + .../zz_certificateauthority_terraformed.go | 130 + .../v1beta2/zz_certificateauthority_types.go | 498 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + apis/acmpca/v1beta2/zz_generated.deepcopy.go | 1303 + apis/acmpca/v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + apis/acmpca/v1beta2/zz_generated.resolvers.go | 66 + apis/acmpca/v1beta2/zz_groupversion_info.go | 32 + .../zz_alertmanagerdefinition_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + apis/amp/v1beta1/zz_generated.resolvers.go | 6 +- .../v1beta1/zz_rulegroupnamespace_types.go | 2 +- .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/amp/v1beta2/zz_generated.deepcopy.go | 362 + apis/amp/v1beta2/zz_generated.managed.go | 68 + apis/amp/v1beta2/zz_generated.managedlist.go | 17 + apis/amp/v1beta2/zz_generated.resolvers.go | 67 + apis/amp/v1beta2/zz_groupversion_info.go | 32 + apis/amp/v1beta2/zz_workspace_terraformed.go | 129 + apis/amp/v1beta2/zz_workspace_types.go | 181 + .../v1beta1/zz_backendenvironment_types.go | 2 +- apis/amplify/v1beta1/zz_branch_types.go | 2 +- .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../amplify/v1beta1/zz_generated.resolvers.go | 8 +- apis/amplify/v1beta1/zz_webhook_types.go | 4 +- apis/amplify/v1beta2/zz_app_terraformed.go | 129 + apis/amplify/v1beta2/zz_app_types.go | 494 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/amplify/v1beta2/zz_generated.deepcopy.go | 969 + apis/amplify/v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../amplify/v1beta2/zz_generated.resolvers.go | 69 + apis/amplify/v1beta2/zz_groupversion_info.go | 32 + .../apigateway/v1beta1/zz_authorizer_types.go | 8 +- .../v1beta1/zz_basepathmapping_types.go | 12 +- .../apigateway/v1beta1/zz_deployment_types.go | 4 +- .../v1beta1/zz_documentationversion_types.go | 4 +- .../v1beta1/zz_gatewayresponse_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 21 - .../v1beta1/zz_generated.conversion_spokes.go | 154 + .../v1beta1/zz_generated.resolvers.go | 72 +- .../v1beta1/zz_integrationresponse_types.go | 4 +- apis/apigateway/v1beta1/zz_method_types.go | 4 +- .../v1beta1/zz_methodresponse_types.go | 4 +- apis/apigateway/v1beta1/zz_model_types.go | 4 +- .../v1beta1/zz_requestvalidator_types.go | 4 +- apis/apigateway/v1beta1/zz_resource_types.go | 8 +- .../v1beta1/zz_restapipolicy_types.go | 4 +- .../v1beta1/zz_usageplankey_types.go | 4 +- apis/apigateway/v1beta1/zz_vpclink_types.go | 4 +- .../zz_documentationpart_terraformed.go | 129 + .../v1beta2/zz_documentationpart_types.go | 205 + .../v1beta2/zz_domainname_terraformed.go | 129 + .../apigateway/v1beta2/zz_domainname_types.go | 332 + .../v1beta2/zz_generated.conversion_hubs.go | 28 + .../v1beta2/zz_generated.deepcopy.go | 4010 +++ .../v1beta2/zz_generated.managed.go | 428 + .../v1beta2/zz_generated.managedlist.go | 71 + .../v1beta2/zz_generated.resolvers.go | 631 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_integration_terraformed.go | 129 + .../v1beta2/zz_integration_types.go | 413 + .../v1beta2/zz_methodsettings_terraformed.go | 129 + .../v1beta2/zz_methodsettings_types.go | 280 + .../v1beta2/zz_restapi_terraformed.go | 129 + apis/apigateway/v1beta2/zz_restapi_types.go | 271 + .../v1beta2/zz_stage_terraformed.go | 129 + apis/apigateway/v1beta2/zz_stage_types.go | 361 + .../v1beta2/zz_usageplan_terraformed.go | 129 + apis/apigateway/v1beta2/zz_usageplan_types.go | 358 + .../v1beta1/zz_apimapping_types.go | 12 +- .../v1beta1/zz_deployment_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 15 - .../v1beta1/zz_generated.conversion_spokes.go | 114 + .../v1beta1/zz_generated.resolvers.go | 44 +- .../v1beta1/zz_integrationresponse_types.go | 8 +- apis/apigatewayv2/v1beta1/zz_model_types.go | 4 +- apis/apigatewayv2/v1beta1/zz_route_types.go | 12 +- .../v1beta1/zz_routeresponse_types.go | 4 +- .../v1beta2/zz_api_terraformed.go | 129 + apis/apigatewayv2/v1beta2/zz_api_types.go | 353 + .../v1beta2/zz_authorizer_terraformed.go | 129 + .../v1beta2/zz_authorizer_types.go | 302 + .../v1beta2/zz_domainname_terraformed.go | 129 + .../v1beta2/zz_domainname_types.go | 239 + .../v1beta2/zz_generated.conversion_hubs.go | 22 + .../v1beta2/zz_generated.deepcopy.go | 3073 +++ .../v1beta2/zz_generated.managed.go | 308 + .../v1beta2/zz_generated.managedlist.go | 53 + .../v1beta2/zz_generated.resolvers.go | 415 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_integration_terraformed.go | 129 + .../v1beta2/zz_integration_types.go | 434 + .../v1beta2/zz_stage_terraformed.go | 129 + apis/apigatewayv2/v1beta2/zz_stage_types.go | 424 + .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + .../v1beta2/zz_generated.deepcopy.go | 1737 ++ .../v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + .../v1beta2/zz_generated.resolvers.go | 214 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_policy_terraformed.go | 129 + .../appautoscaling/v1beta2/zz_policy_types.go | 638 + .../v1beta2/zz_scheduledaction_terraformed.go | 129 + .../v1beta2/zz_scheduledaction_types.go | 275 + .../appconfig/v1beta1/zz_environment_types.go | 4 +- .../v1beta1/zz_generated.resolvers.go | 4 +- .../v1beta1/zz_generated.conversion_spokes.go | 34 + apis/appflow/v1beta2/zz_flow_terraformed.go | 129 + apis/appflow/v1beta2/zz_flow_types.go | 2512 ++ .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/appflow/v1beta2/zz_generated.deepcopy.go | 5779 +++++ apis/appflow/v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../appflow/v1beta2/zz_generated.resolvers.go | 132 + apis/appflow/v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../zz_eventintegration_terraformed.go | 129 + .../v1beta2/zz_eventintegration_types.go | 162 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 336 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_spokes.go | 154 + .../v1beta2/zz_gatewayroute_terraformed.go | 129 + apis/appmesh/v1beta2/zz_gatewayroute_types.go | 1415 ++ .../v1beta2/zz_generated.conversion_hubs.go | 28 + apis/appmesh/v1beta2/zz_generated.deepcopy.go | 18659 ++++++++++++++ apis/appmesh/v1beta2/zz_generated.managed.go | 428 + .../v1beta2/zz_generated.managedlist.go | 71 + .../appmesh/v1beta2/zz_generated.resolvers.go | 695 + apis/appmesh/v1beta2/zz_groupversion_info.go | 32 + apis/appmesh/v1beta2/zz_mesh_terraformed.go | 129 + apis/appmesh/v1beta2/zz_mesh_types.go | 200 + apis/appmesh/v1beta2/zz_route_terraformed.go | 129 + apis/appmesh/v1beta2/zz_route_types.go | 2101 ++ .../v1beta2/zz_virtualgateway_terraformed.go | 129 + .../v1beta2/zz_virtualgateway_types.go | 1228 + .../v1beta2/zz_virtualnode_terraformed.go | 129 + apis/appmesh/v1beta2/zz_virtualnode_types.go | 2262 ++ .../v1beta2/zz_virtualrouter_terraformed.go | 129 + .../appmesh/v1beta2/zz_virtualrouter_types.go | 249 + .../v1beta2/zz_virtualservice_terraformed.go | 129 + .../v1beta2/zz_virtualservice_types.go | 308 + .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + .../v1beta2/zz_generated.deepcopy.go | 2232 ++ .../apprunner/v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + .../v1beta2/zz_generated.resolvers.go | 166 + .../apprunner/v1beta2/zz_groupversion_info.go | 32 + ..._observabilityconfiguration_terraformed.go | 129 + .../zz_observabilityconfiguration_types.go | 159 + .../v1beta2/zz_service_terraformed.go | 129 + apis/apprunner/v1beta2/zz_service_types.go | 861 + .../v1beta1/zz_fleetstackassociation_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 12 - .../v1beta1/zz_generated.conversion_spokes.go | 94 + .../v1beta1/zz_generated.resolvers.go | 6 +- .../v1beta1/zz_userstackassociation_types.go | 2 +- .../v1beta2/zz_directoryconfig_terraformed.go | 129 + .../v1beta2/zz_directoryconfig_types.go | 156 + .../appstream/v1beta2/zz_fleet_terraformed.go | 129 + apis/appstream/v1beta2/zz_fleet_types.go | 427 + .../v1beta2/zz_generated.conversion_hubs.go | 19 + .../v1beta2/zz_generated.deepcopy.go | 2607 ++ .../appstream/v1beta2/zz_generated.managed.go | 248 + .../v1beta2/zz_generated.managedlist.go | 44 + .../v1beta2/zz_generated.resolvers.go | 207 + .../appstream/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_imagebuilder_terraformed.go | 129 + .../v1beta2/zz_imagebuilder_types.go | 356 + .../appstream/v1beta2/zz_stack_terraformed.go | 129 + apis/appstream/v1beta2/zz_stack_types.go | 408 + apis/appsync/v1beta1/zz_apicache_types.go | 4 +- apis/appsync/v1beta1/zz_apikey_types.go | 2 +- .../v1beta1/zz_generated.conversion_hubs.go | 12 - .../v1beta1/zz_generated.conversion_spokes.go | 94 + .../appsync/v1beta1/zz_generated.resolvers.go | 6 +- .../v1beta2/zz_datasource_terraformed.go | 129 + apis/appsync/v1beta2/zz_datasource_types.go | 607 + .../v1beta2/zz_function_terraformed.go | 129 + apis/appsync/v1beta2/zz_function_types.go | 333 + .../v1beta2/zz_generated.conversion_hubs.go | 19 + apis/appsync/v1beta2/zz_generated.deepcopy.go | 3862 +++ apis/appsync/v1beta2/zz_generated.managed.go | 248 + .../v1beta2/zz_generated.managedlist.go | 44 + .../appsync/v1beta2/zz_generated.resolvers.go | 384 + .../v1beta2/zz_graphqlapi_terraformed.go | 129 + apis/appsync/v1beta2/zz_graphqlapi_types.go | 639 + apis/appsync/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_resolver_terraformed.go | 129 + apis/appsync/v1beta2/zz_resolver_types.go | 380 + .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + apis/athena/v1beta1/zz_generated.resolvers.go | 8 +- apis/athena/v1beta1/zz_namedquery_types.go | 8 +- .../athena/v1beta2/zz_database_terraformed.go | 129 + apis/athena/v1beta2/zz_database_types.go | 232 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + apis/athena/v1beta2/zz_generated.deepcopy.go | 1209 + apis/athena/v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + apis/athena/v1beta2/zz_generated.resolvers.go | 133 + apis/athena/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_workgroup_terraformed.go | 129 + apis/athena/v1beta2/zz_workgroup_types.go | 369 + .../v1beta1/zz_generated.conversion_hubs.go | 9 - .../v1beta1/zz_generated.conversion_spokes.go | 60 + .../v1beta1/zz_generated.resolvers.go | 4 +- .../v1beta1/zz_lifecyclehook_types.go | 2 +- apis/autoscaling/v1beta1/zz_schedule_types.go | 2 +- .../v1beta2/zz_attachment_types.go | 12 +- .../v1beta2/zz_generated.conversion_hubs.go | 8 +- .../v1beta2/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.deepcopy.go | 6510 ++++- .../v1beta2/zz_generated.managed.go | 180 + .../v1beta2/zz_generated.managedlist.go | 27 + .../v1beta2/zz_generated.resolvers.go | 93 +- .../v1beta2/zz_grouptag_terraformed.go | 129 + apis/autoscaling/v1beta2/zz_grouptag_types.go | 169 + .../zz_launchconfiguration_terraformed.go | 129 + .../v1beta2/zz_launchconfiguration_types.go | 514 + .../v1beta2/zz_policy_terraformed.go | 129 + apis/autoscaling/v1beta2/zz_policy_types.go | 1399 ++ .../zz_autoscalinggroup_terraformed.go | 130 + .../v1beta3/zz_autoscalinggroup_types.go | 1916 ++ .../v1beta3/zz_generated.conversion_hubs.go | 10 + .../v1beta3/zz_generated.deepcopy.go | 3806 +++ .../v1beta3/zz_generated.managed.go | 68 + .../v1beta3/zz_generated.managedlist.go | 17 + .../v1beta3/zz_generated.resolvers.go | 330 + .../v1beta3/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 1259 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_scalingplan_terraformed.go | 129 + .../v1beta2/zz_scalingplan_types.go | 625 + .../v1beta1/zz_generated.conversion_hubs.go | 9 - .../v1beta1/zz_generated.conversion_spokes.go | 74 + apis/backup/v1beta1/zz_generated.resolvers.go | 4 +- apis/backup/v1beta1/zz_selection_types.go | 4 +- .../v1beta2/zz_framework_terraformed.go | 129 + apis/backup/v1beta2/zz_framework_types.go | 268 + .../v1beta2/zz_generated.conversion_hubs.go | 16 + apis/backup/v1beta2/zz_generated.deepcopy.go | 2113 ++ apis/backup/v1beta2/zz_generated.managed.go | 188 + .../v1beta2/zz_generated.managedlist.go | 35 + apis/backup/v1beta2/zz_generated.resolvers.go | 73 + apis/backup/v1beta2/zz_groupversion_info.go | 32 + apis/backup/v1beta2/zz_plan_terraformed.go | 129 + apis/backup/v1beta2/zz_plan_types.go | 405 + .../v1beta2/zz_reportplan_terraformed.go | 129 + apis/backup/v1beta2/zz_reportplan_types.go | 283 + .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + apis/batch/v1beta2/zz_generated.deepcopy.go | 2385 ++ apis/batch/v1beta2/zz_generated.managed.go | 128 + .../batch/v1beta2/zz_generated.managedlist.go | 26 + apis/batch/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_jobdefinition_terraformed.go | 129 + apis/batch/v1beta2/zz_jobdefinition_types.go | 801 + .../zz_schedulingpolicy_terraformed.go | 129 + .../v1beta2/zz_schedulingpolicy_types.go | 181 + .../v1beta1/zz_generated.conversion_spokes.go | 54 + apis/budgets/v1beta2/zz_budget_terraformed.go | 129 + apis/budgets/v1beta2/zz_budget_types.go | 546 + .../v1beta2/zz_budgetaction_terraformed.go | 129 + apis/budgets/v1beta2/zz_budgetaction_types.go | 498 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + apis/budgets/v1beta2/zz_generated.deepcopy.go | 2163 ++ apis/budgets/v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + .../budgets/v1beta2/zz_generated.resolvers.go | 153 + apis/budgets/v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/chime/v1beta2/zz_generated.deepcopy.go | 351 + apis/chime/v1beta2/zz_generated.managed.go | 68 + .../chime/v1beta2/zz_generated.managedlist.go | 17 + apis/chime/v1beta2/zz_generated.resolvers.go | 68 + apis/chime/v1beta2/zz_groupversion_info.go | 32 + .../zz_voiceconnectorstreaming_terraformed.go | 129 + .../zz_voiceconnectorstreaming_types.go | 194 + .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + .../v1beta2/zz_generated.deepcopy.go | 1391 ++ .../v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + .../v1beta2/zz_generated.resolvers.go | 119 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_stackset_terraformed.go | 129 + .../v1beta2/zz_stackset_types.go | 377 + .../zz_stacksetinstance_terraformed.go | 129 + .../v1beta2/zz_stacksetinstance_types.go | 302 + .../v1beta1/zz_generated.conversion_hubs.go | 24 - .../v1beta1/zz_generated.conversion_spokes.go | 174 + .../v1beta2/zz_cachepolicy_terraformed.go | 129 + .../v1beta2/zz_cachepolicy_types.go | 368 + .../v1beta2/zz_distribution_terraformed.go | 129 + .../v1beta2/zz_distribution_types.go | 1735 ++ ..._fieldlevelencryptionconfig_terraformed.go | 129 + .../zz_fieldlevelencryptionconfig_types.go | 303 + ...fieldlevelencryptionprofile_terraformed.go | 129 + .../zz_fieldlevelencryptionprofile_types.go | 221 + .../v1beta2/zz_generated.conversion_hubs.go | 31 + .../v1beta2/zz_generated.deepcopy.go | 8930 +++++++ .../v1beta2/zz_generated.managed.go | 488 + .../v1beta2/zz_generated.managedlist.go | 80 + .../v1beta2/zz_generated.resolvers.go | 489 + .../v1beta2/zz_groupversion_info.go | 32 + .../zz_monitoringsubscription_terraformed.go | 129 + .../zz_monitoringsubscription_types.go | 170 + .../zz_originrequestpolicy_terraformed.go | 129 + .../v1beta2/zz_originrequestpolicy_types.go | 257 + .../zz_realtimelogconfig_terraformed.go | 129 + .../v1beta2/zz_realtimelogconfig_types.go | 242 + .../zz_responseheaderspolicy_terraformed.go | 129 + .../v1beta2/zz_responseheaderspolicy_types.go | 715 + .../zz_domainserviceaccesspolicy_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta1/zz_generated.resolvers.go | 7 +- .../v1beta2/zz_domain_terraformed.go | 129 + apis/cloudsearch/v1beta2/zz_domain_types.go | 319 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 608 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.resolvers.go | 4 +- apis/cloudtrail/v1beta1/zz_trail_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../v1beta1/zz_generated.resolvers.go | 4 +- .../v1beta1/zz_metricstream_types.go | 4 +- .../v1beta2/zz_compositealarm_terraformed.go | 129 + .../v1beta2/zz_compositealarm_types.go | 270 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + .../v1beta2/zz_generated.deepcopy.go | 1458 ++ .../v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + .../v1beta2/zz_generated.resolvers.go | 106 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_metricalarm_terraformed.go | 129 + .../v1beta2/zz_metricalarm_types.go | 518 + .../v1beta1/zz_apidestination_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 9 - .../v1beta1/zz_generated.conversion_spokes.go | 74 + .../v1beta1/zz_generated.resolvers.go | 4 +- .../v1beta2/zz_connection_terraformed.go | 129 + .../v1beta2/zz_connection_types.go | 598 + .../v1beta2/zz_generated.conversion_hubs.go | 16 + .../v1beta2/zz_generated.deepcopy.go | 3882 +++ .../v1beta2/zz_generated.managed.go | 188 + .../v1beta2/zz_generated.managedlist.go | 35 + .../v1beta2/zz_generated.resolvers.go | 279 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_permission_terraformed.go | 129 + .../v1beta2/zz_permission_types.go | 223 + .../v1beta2/zz_target_terraformed.go | 129 + .../v1beta2/zz_target_types.go | 1002 + .../v1beta1/zz_destination_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta1/zz_generated.resolvers.go | 8 +- .../v1beta1/zz_subscriptionfilter_types.go | 4 +- .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 396 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_generated.resolvers.go | 67 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_metricfilter_terraformed.go | 129 + .../v1beta2/zz_metricfilter_types.go | 216 + .../v1beta1/zz_generated.conversion_spokes.go | 74 + .../v1beta2/zz_codepipeline_terraformed.go | 129 + .../v1beta2/zz_codepipeline_types.go | 785 + .../zz_customactiontype_terraformed.go | 129 + .../v1beta2/zz_customactiontype_types.go | 375 + .../v1beta2/zz_generated.conversion_hubs.go | 16 + .../v1beta2/zz_generated.deepcopy.go | 3174 +++ .../v1beta2/zz_generated.managed.go | 188 + .../v1beta2/zz_generated.managedlist.go | 35 + .../v1beta2/zz_generated.resolvers.go | 160 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_webhook_terraformed.go | 129 + apis/codepipeline/v1beta2/zz_webhook_types.go | 240 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 373 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_host_terraformed.go | 129 + .../v1beta2/zz_host_types.go | 195 + ...oidentitypoolproviderprincipaltag_types.go | 4 +- .../v1beta1/zz_generated.resolvers.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../v1beta1/zz_generated.resolvers.go | 38 +- .../v1beta1/zz_identityprovider_types.go | 4 +- .../v1beta1/zz_resourceserver_types.go | 4 +- apis/cognitoidp/v1beta1/zz_user_types.go | 2 +- apis/cognitoidp/v1beta1/zz_usergroup_types.go | 4 +- .../v1beta1/zz_useringroup_types.go | 4 +- .../v1beta1/zz_userpoolclient_types.go | 8 +- .../v1beta1/zz_userpooldomain_types.go | 8 +- .../zz_userpooluicustomization_types.go | 4 +- .../v1beta2/zz_generated.conversion_hubs.go | 13 + .../v1beta2/zz_generated.deepcopy.go | 4149 ++++ .../v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + .../v1beta2/zz_generated.resolvers.go | 678 + .../v1beta2/zz_groupversion_info.go | 32 + .../zz_riskconfiguration_terraformed.go | 129 + .../v1beta2/zz_riskconfiguration_types.go | 600 + .../v1beta2/zz_userpool_terraformed.go | 133 + apis/cognitoidp/v1beta2/zz_userpool_types.go | 1482 ++ .../v1beta1/zz_generated.conversion_hubs.go | 15 - .../v1beta1/zz_generated.conversion_spokes.go | 114 + .../v1beta2/zz_configrule_terraformed.go | 129 + .../v1beta2/zz_configrule_types.go | 392 + .../zz_configurationaggregator_terraformed.go | 129 + .../zz_configurationaggregator_types.go | 228 + .../zz_configurationrecorder_terraformed.go | 129 + .../v1beta2/zz_configurationrecorder_types.go | 310 + .../v1beta2/zz_deliverychannel_terraformed.go | 129 + .../v1beta2/zz_deliverychannel_types.go | 178 + .../v1beta2/zz_generated.conversion_hubs.go | 22 + .../v1beta2/zz_generated.deepcopy.go | 2871 +++ .../v1beta2/zz_generated.managed.go | 308 + .../v1beta2/zz_generated.managedlist.go | 53 + .../v1beta2/zz_generated.resolvers.go | 229 + .../v1beta2/zz_groupversion_info.go | 32 + ...zz_remediationconfiguration_terraformed.go | 129 + .../zz_remediationconfiguration_types.go | 282 + .../v1beta1/zz_generated.conversion_hubs.go | 15 - .../v1beta1/zz_generated.conversion_spokes.go | 100 + .../connect/v1beta1/zz_generated.resolvers.go | 6 +- apis/connect/v1beta1/zz_instance_types.go | 4 +- .../zz_lambdafunctionassociation_types.go | 2 +- .../v1beta2/zz_botassociation_terraformed.go | 129 + .../v1beta2/zz_botassociation_types.go | 166 + .../v1beta2/zz_generated.conversion_hubs.go | 13 +- .../v1beta2/zz_generated.conversion_spokes.go | 54 + apis/connect/v1beta2/zz_generated.deepcopy.go | 4231 +++- apis/connect/v1beta2/zz_generated.managed.go | 300 + .../v1beta2/zz_generated.managedlist.go | 45 + .../connect/v1beta2/zz_generated.resolvers.go | 557 +- .../zz_instancestorageconfig_terraformed.go | 129 + .../v1beta2/zz_instancestorageconfig_types.go | 479 + .../v1beta2/zz_quickconnect_terraformed.go | 129 + apis/connect/v1beta2/zz_quickconnect_types.go | 302 + .../v1beta2/zz_routingprofile_types.go | 4 +- apis/connect/v1beta2/zz_user_terraformed.go | 129 + apis/connect/v1beta2/zz_user_types.go | 336 + .../zz_userhierarchystructure_terraformed.go | 129 + .../zz_userhierarchystructure_types.go | 316 + .../v1beta3/zz_generated.conversion_hubs.go | 13 + apis/connect/v1beta3/zz_generated.deepcopy.go | 1069 + apis/connect/v1beta3/zz_generated.managed.go | 128 + .../v1beta3/zz_generated.managedlist.go | 26 + .../connect/v1beta3/zz_generated.resolvers.go | 156 + apis/connect/v1beta3/zz_groupversion_info.go | 32 + .../zz_hoursofoperation_terraformed.go | 129 + .../v1beta3/zz_hoursofoperation_types.go | 284 + apis/connect/v1beta3/zz_queue_terraformed.go | 129 + apis/connect/v1beta3/zz_queue_types.go | 277 + apis/cur/v1beta1/zz_generated.resolvers.go | 4 +- apis/cur/v1beta1/zz_reportdefinition_types.go | 4 +- .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + .../datasync/v1beta2/zz_generated.deepcopy.go | 1638 ++ apis/datasync/v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + .../v1beta2/zz_generated.resolvers.go | 238 + apis/datasync/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_locations3_terraformed.go | 129 + apis/datasync/v1beta2/zz_locations3_types.go | 227 + apis/datasync/v1beta2/zz_task_terraformed.go | 129 + apis/datasync/v1beta2/zz_task_types.go | 640 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + apis/dax/v1beta2/zz_cluster_terraformed.go | 129 + apis/dax/v1beta2/zz_cluster_types.go | 367 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/dax/v1beta2/zz_generated.deepcopy.go | 639 + apis/dax/v1beta2/zz_generated.managed.go | 68 + apis/dax/v1beta2/zz_generated.managedlist.go | 17 + apis/dax/v1beta2/zz_generated.resolvers.go | 108 + apis/dax/v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../zz_deploymentconfig_terraformed.go | 129 + .../v1beta2/zz_deploymentconfig_types.go | 262 + .../v1beta2/zz_deploymentgroup_terraformed.go | 129 + .../v1beta2/zz_deploymentgroup_types.go | 995 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + apis/deploy/v1beta2/zz_generated.deepcopy.go | 2782 +++ apis/deploy/v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + apis/deploy/v1beta2/zz_generated.resolvers.go | 311 + apis/deploy/v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 471 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_generated.resolvers.go | 158 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_testgridproject_terraformed.go | 129 + .../v1beta2/zz_testgridproject_types.go | 250 + .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/dlm/v1beta2/zz_generated.deepcopy.go | 2252 ++ apis/dlm/v1beta2/zz_generated.managed.go | 68 + apis/dlm/v1beta2/zz_generated.managedlist.go | 17 + apis/dlm/v1beta2/zz_generated.resolvers.go | 120 + apis/dlm/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_lifecyclepolicy_terraformed.go | 129 + apis/dlm/v1beta2/zz_lifecyclepolicy_types.go | 984 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + apis/dms/v1beta1/zz_generated.resolvers.go | 8 +- apis/dms/v1beta1/zz_replicationtask_types.go | 8 +- apis/dms/v1beta2/zz_endpoint_terraformed.go | 129 + apis/dms/v1beta2/zz_endpoint_types.go | 1502 ++ .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/dms/v1beta2/zz_generated.deepcopy.go | 2537 ++ apis/dms/v1beta2/zz_generated.managed.go | 68 + apis/dms/v1beta2/zz_generated.managedlist.go | 17 + apis/dms/v1beta2/zz_generated.resolvers.go | 145 + apis/dms/v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_conditionalforwarder_types.go | 2 +- .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + apis/ds/v1beta1/zz_generated.resolvers.go | 2 +- apis/ds/v1beta2/zz_directory_terraformed.go | 129 + apis/ds/v1beta2/zz_directory_types.go | 410 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + apis/ds/v1beta2/zz_generated.deepcopy.go | 1074 + apis/ds/v1beta2/zz_generated.managed.go | 128 + apis/ds/v1beta2/zz_generated.managedlist.go | 26 + apis/ds/v1beta2/zz_generated.resolvers.go | 250 + apis/ds/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_shareddirectory_terraformed.go | 129 + apis/ds/v1beta2/zz_shareddirectory_types.go | 181 + .../v1beta1/zz_contributorinsights_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta1/zz_generated.resolvers.go | 20 +- .../zz_kinesisstreamingdestination_types.go | 8 +- apis/dynamodb/v1beta1/zz_tableitem_types.go | 4 +- .../dynamodb/v1beta1/zz_tablereplica_types.go | 4 +- .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../dynamodb/v1beta2/zz_generated.deepcopy.go | 1609 ++ apis/dynamodb/v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + apis/dynamodb/v1beta2/zz_groupversion_info.go | 32 + apis/dynamodb/v1beta2/zz_table_terraformed.go | 129 + apis/dynamodb/v1beta2/zz_table_types.go | 772 + apis/ec2/v1beta1/zz_eip_types.go | 4 +- apis/ec2/v1beta1/zz_eipassociation_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 39 - .../v1beta1/zz_generated.conversion_spokes.go | 260 + apis/ec2/v1beta1/zz_generated.resolvers.go | 40 +- apis/ec2/v1beta1/zz_instancestate_types.go | 4 +- .../zz_networkinterfaceattachment_types.go | 4 +- .../zz_networkinterfacesgattachment_types.go | 4 +- apis/ec2/v1beta1/zz_volumeattachment_types.go | 4 +- ..._vpcendpointroutetableassociation_types.go | 4 +- ...cendpointsecuritygroupassociation_types.go | 4 +- .../zz_vpcendpointsubnetassociation_types.go | 4 +- .../v1beta1/zz_vpnconnectionroute_types.go | 4 +- .../zz_ebssnapshotimport_terraformed.go | 129 + .../ec2/v1beta2/zz_ebssnapshotimport_types.go | 365 + apis/ec2/v1beta2/zz_flowlog_terraformed.go | 132 + apis/ec2/v1beta2/zz_flowlog_types.go | 375 + .../v1beta2/zz_generated.conversion_hubs.go | 39 + apis/ec2/v1beta2/zz_generated.deepcopy.go | 19333 ++++++++++++++- apis/ec2/v1beta2/zz_generated.managed.go | 780 + apis/ec2/v1beta2/zz_generated.managedlist.go | 117 + apis/ec2/v1beta2/zz_generated.resolvers.go | 1802 +- apis/ec2/v1beta2/zz_instance_terraformed.go | 139 + apis/ec2/v1beta2/zz_instance_types.go | 1381 ++ .../v1beta2/zz_launchtemplate_terraformed.go | 129 + apis/ec2/v1beta2/zz_launchtemplate_types.go | 2166 ++ apis/ec2/v1beta2/zz_route_types.go | 8 +- .../zz_spotfleetrequest_terraformed.go | 129 + apis/ec2/v1beta2/zz_spotfleetrequest_types.go | 1531 ++ .../zz_spotinstancerequest_terraformed.go | 134 + .../v1beta2/zz_spotinstancerequest_types.go | 1088 + .../zz_trafficmirrorfilterrule_terraformed.go | 129 + .../zz_trafficmirrorfilterrule_types.go | 277 + .../ec2/v1beta2/zz_vpcendpoint_terraformed.go | 129 + apis/ec2/v1beta2/zz_vpcendpoint_types.go | 311 + .../v1beta2/zz_vpcipampoolcidr_terraformed.go | 129 + apis/ec2/v1beta2/zz_vpcipampoolcidr_types.go | 183 + .../zz_vpcpeeringconnection_terraformed.go | 129 + .../v1beta2/zz_vpcpeeringconnection_types.go | 237 + ...pcpeeringconnectionaccepter_terraformed.go | 129 + .../zz_vpcpeeringconnectionaccepter_types.go | 233 + ...vpcpeeringconnectionoptions_terraformed.go | 129 + .../zz_vpcpeeringconnectionoptions_types.go | 179 + .../v1beta2/zz_vpnconnection_terraformed.go | 129 + apis/ec2/v1beta2/zz_vpnconnection_types.go | 961 + .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + apis/ecr/v1beta1/zz_generated.resolvers.go | 8 +- apis/ecr/v1beta1/zz_lifecyclepolicy_types.go | 4 +- apis/ecr/v1beta1/zz_repositorypolicy_types.go | 4 +- .../v1beta2/zz_generated.conversion_hubs.go | 13 + apis/ecr/v1beta2/zz_generated.deepcopy.go | 930 + apis/ecr/v1beta2/zz_generated.managed.go | 128 + apis/ecr/v1beta2/zz_generated.managedlist.go | 26 + apis/ecr/v1beta2/zz_generated.resolvers.go | 73 + apis/ecr/v1beta2/zz_groupversion_info.go | 32 + ...zz_replicationconfiguration_terraformed.go | 129 + .../zz_replicationconfiguration_types.go | 206 + apis/ecr/v1beta2/zz_repository_terraformed.go | 129 + apis/ecr/v1beta2/zz_repository_types.go | 227 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta1/zz_generated.resolvers.go | 7 +- .../v1beta1/zz_repositorypolicy_types.go | 4 +- .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 442 + .../ecrpublic/v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../ecrpublic/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_repository_terraformed.go | 129 + apis/ecrpublic/v1beta2/zz_repository_types.go | 209 + .../zz_clustercapacityproviders_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 12 - .../v1beta1/zz_generated.conversion_spokes.go | 94 + apis/ecs/v1beta1/zz_generated.resolvers.go | 4 +- .../zz_capacityprovider_terraformed.go | 129 + apis/ecs/v1beta2/zz_capacityprovider_types.go | 250 + apis/ecs/v1beta2/zz_cluster_terraformed.go | 129 + apis/ecs/v1beta2/zz_cluster_types.go | 306 + .../v1beta2/zz_generated.conversion_hubs.go | 19 + apis/ecs/v1beta2/zz_generated.deepcopy.go | 5151 ++++ apis/ecs/v1beta2/zz_generated.managed.go | 248 + apis/ecs/v1beta2/zz_generated.managedlist.go | 44 + apis/ecs/v1beta2/zz_generated.resolvers.go | 376 + apis/ecs/v1beta2/zz_groupversion_info.go | 32 + apis/ecs/v1beta2/zz_service_terraformed.go | 129 + apis/ecs/v1beta2/zz_service_types.go | 1133 + .../v1beta2/zz_taskdefinition_terraformed.go | 129 + apis/ecs/v1beta2/zz_taskdefinition_types.go | 752 + apis/efs/v1beta1/zz_filesystempolicy_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 12 - .../v1beta1/zz_generated.conversion_spokes.go | 94 + apis/efs/v1beta1/zz_generated.resolvers.go | 8 +- apis/efs/v1beta1/zz_mounttarget_types.go | 4 +- .../efs/v1beta2/zz_accesspoint_terraformed.go | 129 + apis/efs/v1beta2/zz_accesspoint_types.go | 275 + .../v1beta2/zz_backuppolicy_terraformed.go | 129 + apis/efs/v1beta2/zz_backuppolicy_types.go | 149 + apis/efs/v1beta2/zz_filesystem_terraformed.go | 129 + apis/efs/v1beta2/zz_filesystem_types.go | 324 + .../v1beta2/zz_generated.conversion_hubs.go | 19 + apis/efs/v1beta2/zz_generated.deepcopy.go | 1784 ++ apis/efs/v1beta2/zz_generated.managed.go | 248 + apis/efs/v1beta2/zz_generated.managedlist.go | 44 + apis/efs/v1beta2/zz_generated.resolvers.go | 219 + apis/efs/v1beta2/zz_groupversion_info.go | 32 + ...zz_replicationconfiguration_terraformed.go | 129 + .../zz_replicationconfiguration_types.go | 193 + apis/eks/v1beta1/zz_addon_types.go | 4 +- apis/eks/v1beta1/zz_fargateprofile_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 9 - .../v1beta1/zz_generated.conversion_spokes.go | 74 + apis/eks/v1beta1/zz_generated.resolvers.go | 12 +- .../zz_podidentityassociation_types.go | 4 +- apis/eks/v1beta2/zz_cluster_terraformed.go | 129 + apis/eks/v1beta2/zz_cluster_types.go | 566 + .../v1beta2/zz_generated.conversion_hubs.go | 16 + apis/eks/v1beta2/zz_generated.deepcopy.go | 2928 +++ apis/eks/v1beta2/zz_generated.managed.go | 188 + apis/eks/v1beta2/zz_generated.managedlist.go | 35 + apis/eks/v1beta2/zz_generated.resolvers.go | 394 + apis/eks/v1beta2/zz_groupversion_info.go | 32 + .../zz_identityproviderconfig_terraformed.go | 129 + .../zz_identityproviderconfig_types.go | 235 + apis/eks/v1beta2/zz_nodegroup_terraformed.go | 130 + apis/eks/v1beta2/zz_nodegroup_types.go | 580 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 20 + .../v1beta1/zz_generated.resolvers.go | 4 +- .../elasticache/v1beta1/zz_usergroup_types.go | 4 +- .../v1beta2/zz_generated.conversion_hubs.go | 3 + .../v1beta2/zz_generated.deepcopy.go | 398 + .../v1beta2/zz_generated.managed.go | 60 + .../v1beta2/zz_generated.managedlist.go | 9 + .../v1beta2/zz_user_terraformed.go | 129 + apis/elasticache/v1beta2/zz_user_types.go | 193 + .../v1beta1/zz_applicationversion_types.go | 8 +- .../v1beta1/zz_configurationtemplate_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta1/zz_generated.resolvers.go | 12 +- .../v1beta2/zz_application_terraformed.go | 129 + .../v1beta2/zz_application_types.go | 194 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 387 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_generated.resolvers.go | 73 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_domainpolicy_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../v1beta1/zz_generated.resolvers.go | 4 +- .../v1beta2/zz_domain_terraformed.go | 129 + apis/elasticsearch/v1beta2/zz_domain_types.go | 936 + .../zz_domainsamloptions_terraformed.go | 129 + .../v1beta2/zz_domainsamloptions_types.go | 206 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + .../v1beta2/zz_generated.deepcopy.go | 2423 ++ .../v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + .../v1beta2/zz_generated.resolvers.go | 74 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + .../v1beta2/zz_generated.deepcopy.go | 2049 ++ .../v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + .../v1beta2/zz_generated.resolvers.go | 192 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_pipeline_terraformed.go | 129 + .../v1beta2/zz_pipeline_types.go | 459 + .../v1beta2/zz_preset_terraformed.go | 129 + .../v1beta2/zz_preset_types.go | 640 + .../zz_appcookiestickinesspolicy_types.go | 2 +- apis/elb/v1beta1/zz_attachment_types.go | 8 +- .../v1beta1/zz_backendserverpolicy_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + apis/elb/v1beta1/zz_generated.resolvers.go | 34 +- .../zz_lbcookiestickinesspolicy_types.go | 4 +- .../zz_lbsslnegotiationpolicy_types.go | 4 +- apis/elb/v1beta1/zz_listenerpolicy_types.go | 4 +- apis/elb/v1beta1/zz_policy_types.go | 4 +- .../v1beta1/zz_proxyprotocolpolicy_types.go | 4 +- apis/elb/v1beta2/zz_elb_terraformed.go | 130 + apis/elb/v1beta2/zz_elb_types.go | 499 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/elb/v1beta2/zz_generated.deepcopy.go | 928 + apis/elb/v1beta2/zz_generated.managed.go | 68 + apis/elb/v1beta2/zz_generated.managedlist.go | 17 + apis/elb/v1beta2/zz_generated.resolvers.go | 106 + apis/elb/v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_hubs.go | 12 - .../v1beta1/zz_generated.conversion_spokes.go | 94 + apis/elbv2/v1beta1/zz_generated.resolvers.go | 12 +- .../v1beta1/zz_lblistenercertificate_types.go | 8 +- .../zz_lbtargetgroupattachment_types.go | 4 +- .../v1beta2/zz_generated.conversion_hubs.go | 19 + apis/elbv2/v1beta2/zz_generated.deepcopy.go | 5641 +++++ apis/elbv2/v1beta2/zz_generated.managed.go | 248 + .../elbv2/v1beta2/zz_generated.managedlist.go | 44 + apis/elbv2/v1beta2/zz_generated.resolvers.go | 664 + apis/elbv2/v1beta2/zz_groupversion_info.go | 32 + apis/elbv2/v1beta2/zz_lb_terraformed.go | 130 + apis/elbv2/v1beta2/zz_lb_types.go | 579 + .../v1beta2/zz_lblistener_terraformed.go | 129 + apis/elbv2/v1beta2/zz_lblistener_types.go | 807 + .../v1beta2/zz_lblistenerrule_terraformed.go | 129 + apis/elbv2/v1beta2/zz_lblistenerrule_types.go | 1010 + .../v1beta2/zz_lbtargetgroup_terraformed.go | 130 + apis/elbv2/v1beta2/zz_lbtargetgroup_types.go | 566 + .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_application_terraformed.go | 129 + .../v1beta2/zz_application_types.go | 452 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 1023 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../v1beta2/zz_feature_terraformed.go | 129 + apis/evidently/v1beta2/zz_feature_types.go | 290 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + .../v1beta2/zz_generated.deepcopy.go | 1141 + .../evidently/v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + .../v1beta2/zz_generated.resolvers.go | 49 + .../evidently/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_project_terraformed.go | 129 + apis/evidently/v1beta2/zz_project_types.go | 243 + .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_deliverystream_terraformed.go | 131 + .../v1beta2/zz_deliverystream_types.go | 5196 ++++ .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../firehose/v1beta2/zz_generated.deepcopy.go | 10287 ++++++++ apis/firehose/v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_generated.resolvers.go | 1422 ++ apis/firehose/v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../zz_experimenttemplate_terraformed.go | 129 + .../v1beta2/zz_experimenttemplate_types.go | 582 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/fis/v1beta2/zz_generated.deepcopy.go | 1345 + apis/fis/v1beta2/zz_generated.managed.go | 68 + apis/fis/v1beta2/zz_generated.managedlist.go | 17 + apis/fis/v1beta2/zz_generated.resolvers.go | 68 + apis/fis/v1beta2/zz_groupversion_info.go | 32 + apis/fsx/v1beta1/zz_backup_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 15 - .../v1beta1/zz_generated.conversion_spokes.go | 114 + apis/fsx/v1beta1/zz_generated.resolvers.go | 4 +- ...z_datarepositoryassociation_terraformed.go | 129 + .../zz_datarepositoryassociation_types.go | 276 + .../v1beta2/zz_generated.conversion_hubs.go | 22 + apis/fsx/v1beta2/zz_generated.deepcopy.go | 4171 ++++ apis/fsx/v1beta2/zz_generated.managed.go | 308 + apis/fsx/v1beta2/zz_generated.managedlist.go | 53 + apis/fsx/v1beta2/zz_generated.resolvers.go | 576 + apis/fsx/v1beta2/zz_groupversion_info.go | 32 + .../zz_lustrefilesystem_terraformed.go | 129 + apis/fsx/v1beta2/zz_lustrefilesystem_types.go | 462 + .../v1beta2/zz_ontapfilesystem_terraformed.go | 129 + apis/fsx/v1beta2/zz_ontapfilesystem_types.go | 459 + ..._ontapstoragevirtualmachine_terraformed.go | 129 + .../zz_ontapstoragevirtualmachine_types.go | 369 + .../zz_windowsfilesystem_terraformed.go | 129 + .../fsx/v1beta2/zz_windowsfilesystem_types.go | 548 + .../v1beta1/zz_generated.conversion_hubs.go | 12 - .../v1beta1/zz_generated.conversion_spokes.go | 94 + apis/gamelift/v1beta2/zz_alias_terraformed.go | 129 + apis/gamelift/v1beta2/zz_alias_types.go | 182 + apis/gamelift/v1beta2/zz_build_terraformed.go | 129 + apis/gamelift/v1beta2/zz_build_types.go | 261 + apis/gamelift/v1beta2/zz_fleet_terraformed.go | 129 + apis/gamelift/v1beta2/zz_fleet_types.go | 467 + .../v1beta2/zz_generated.conversion_hubs.go | 19 + .../gamelift/v1beta2/zz_generated.deepcopy.go | 2182 ++ apis/gamelift/v1beta2/zz_generated.managed.go | 248 + .../v1beta2/zz_generated.managedlist.go | 44 + .../v1beta2/zz_generated.resolvers.go | 386 + apis/gamelift/v1beta2/zz_groupversion_info.go | 32 + .../gamelift/v1beta2/zz_script_terraformed.go | 129 + apis/gamelift/v1beta2/zz_script_types.go | 259 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../glacier/v1beta1/zz_generated.resolvers.go | 4 +- apis/glacier/v1beta1/zz_vaultlock_types.go | 4 +- .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/glacier/v1beta2/zz_generated.deepcopy.go | 380 + apis/glacier/v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../glacier/v1beta2/zz_generated.resolvers.go | 74 + apis/glacier/v1beta2/zz_groupversion_info.go | 32 + apis/glacier/v1beta2/zz_vault_terraformed.go | 129 + apis/glacier/v1beta2/zz_vault_types.go | 188 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta1/zz_generated.resolvers.go | 7 +- .../v1beta1/zz_listener_types.go | 4 +- .../v1beta2/zz_accelerator_terraformed.go | 129 + .../v1beta2/zz_accelerator_types.go | 227 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 492 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_hubs.go | 27 - .../v1beta1/zz_generated.conversion_spokes.go | 194 + apis/glue/v1beta1/zz_generated.resolvers.go | 2 +- .../v1beta1/zz_userdefinedfunction_types.go | 2 +- .../v1beta2/zz_catalogdatabase_terraformed.go | 129 + apis/glue/v1beta2/zz_catalogdatabase_types.go | 295 + .../v1beta2/zz_catalogtable_terraformed.go | 129 + apis/glue/v1beta2/zz_catalogtable_types.go | 784 + .../glue/v1beta2/zz_classifier_terraformed.go | 129 + apis/glue/v1beta2/zz_classifier_types.go | 314 + .../glue/v1beta2/zz_connection_terraformed.go | 129 + apis/glue/v1beta2/zz_connection_types.go | 245 + apis/glue/v1beta2/zz_crawler_terraformed.go | 129 + apis/glue/v1beta2/zz_crawler_types.go | 910 + ...tacatalogencryptionsettings_terraformed.go | 129 + .../zz_datacatalogencryptionsettings_types.go | 249 + .../v1beta2/zz_generated.conversion_hubs.go | 34 + apis/glue/v1beta2/zz_generated.deepcopy.go | 8314 +++++++ apis/glue/v1beta2/zz_generated.managed.go | 548 + apis/glue/v1beta2/zz_generated.managedlist.go | 89 + apis/glue/v1beta2/zz_generated.resolvers.go | 856 + apis/glue/v1beta2/zz_groupversion_info.go | 32 + apis/glue/v1beta2/zz_job_terraformed.go | 132 + apis/glue/v1beta2/zz_job_types.go | 385 + .../zz_securityconfiguration_terraformed.go | 129 + .../v1beta2/zz_securityconfiguration_types.go | 280 + apis/glue/v1beta2/zz_trigger_terraformed.go | 129 + apis/glue/v1beta2/zz_trigger_types.go | 486 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../grafana/v1beta1/zz_generated.resolvers.go | 16 +- .../v1beta1/zz_licenseassociation_types.go | 4 +- .../v1beta1/zz_roleassociation_types.go | 4 +- .../v1beta1/zz_workspaceapikey_types.go | 4 +- .../zz_workspacesamlconfiguration_types.go | 4 +- .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/grafana/v1beta2/zz_generated.deepcopy.go | 781 + apis/grafana/v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../grafana/v1beta2/zz_generated.resolvers.go | 68 + apis/grafana/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_workspace_terraformed.go | 129 + apis/grafana/v1beta2/zz_workspace_types.go | 358 + .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../v1beta1/zz_generated.resolvers.go | 11 +- apis/guardduty/v1beta1/zz_member_types.go | 8 +- .../v1beta2/zz_detector_terraformed.go | 129 + apis/guardduty/v1beta2/zz_detector_types.go | 324 + .../v1beta2/zz_filter_terraformed.go | 129 + apis/guardduty/v1beta2/zz_filter_types.go | 264 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + .../v1beta2/zz_generated.deepcopy.go | 1282 + .../guardduty/v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + .../v1beta2/zz_generated.resolvers.go | 49 + .../guardduty/v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta1/zz_generated.resolvers.go | 7 +- .../v1beta1/zz_groupmembership_types.go | 4 +- .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 909 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_user_terraformed.go | 129 + apis/identitystore/v1beta2/zz_user_types.go | 486 + .../v1beta1/zz_generated.conversion_hubs.go | 18 - .../v1beta1/zz_generated.conversion_spokes.go | 134 + .../v1beta2/zz_containerrecipe_terraformed.go | 129 + .../v1beta2/zz_containerrecipe_types.go | 568 + ...z_distributionconfiguration_terraformed.go | 129 + .../zz_distributionconfiguration_types.go | 581 + .../v1beta2/zz_generated.conversion_hubs.go | 25 + .../v1beta2/zz_generated.deepcopy.go | 5988 +++++ .../v1beta2/zz_generated.managed.go | 368 + .../v1beta2/zz_generated.managedlist.go | 62 + .../v1beta2/zz_generated.resolvers.go | 671 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_image_terraformed.go | 129 + apis/imagebuilder/v1beta2/zz_image_types.go | 502 + .../v1beta2/zz_imagepipeline_terraformed.go | 129 + .../v1beta2/zz_imagepipeline_types.go | 405 + .../v1beta2/zz_imagerecipe_terraformed.go | 129 + .../v1beta2/zz_imagerecipe_types.go | 448 + ...infrastructureconfiguration_terraformed.go | 129 + .../zz_infrastructureconfiguration_types.go | 438 + .../v1beta1/zz_generated.conversion_hubs.go | 18 - .../v1beta1/zz_generated.conversion_spokes.go | 134 + .../v1beta2/zz_generated.conversion_hubs.go | 25 + apis/iot/v1beta2/zz_generated.deepcopy.go | 8431 +++++++ apis/iot/v1beta2/zz_generated.managed.go | 368 + apis/iot/v1beta2/zz_generated.managedlist.go | 62 + apis/iot/v1beta2/zz_generated.resolvers.go | 490 + apis/iot/v1beta2/zz_groupversion_info.go | 32 + .../zz_indexingconfiguration_terraformed.go | 129 + .../v1beta2/zz_indexingconfiguration_types.go | 365 + .../zz_provisioningtemplate_terraformed.go | 129 + .../v1beta2/zz_provisioningtemplate_types.go | 223 + apis/iot/v1beta2/zz_thinggroup_terraformed.go | 129 + apis/iot/v1beta2/zz_thinggroup_types.go | 234 + apis/iot/v1beta2/zz_thingtype_terraformed.go | 129 + apis/iot/v1beta2/zz_thingtype_types.go | 173 + apis/iot/v1beta2/zz_topicrule_terraformed.go | 129 + apis/iot/v1beta2/zz_topicrule_types.go | 2614 ++ .../zz_topicruledestination_terraformed.go | 129 + .../v1beta2/zz_topicruledestination_types.go | 253 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/ivs/v1beta2/zz_generated.deepcopy.go | 491 + apis/ivs/v1beta2/zz_generated.managed.go | 68 + apis/ivs/v1beta2/zz_generated.managedlist.go | 17 + apis/ivs/v1beta2/zz_groupversion_info.go | 32 + .../zz_recordingconfiguration_terraformed.go | 129 + .../zz_recordingconfiguration_types.go | 221 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 20 + apis/kafka/v1beta1/zz_generated.resolvers.go | 4 +- .../zz_scramsecretassociation_types.go | 4 +- .../v1beta2/zz_generated.conversion_hubs.go | 2 +- .../v1beta2/zz_generated.conversion_spokes.go | 34 + apis/kafka/v1beta2/zz_generated.deepcopy.go | 613 + apis/kafka/v1beta2/zz_generated.managed.go | 60 + .../kafka/v1beta2/zz_generated.managedlist.go | 9 + apis/kafka/v1beta2/zz_generated.resolvers.go | 97 + .../zz_serverlesscluster_terraformed.go | 129 + .../v1beta2/zz_serverlesscluster_types.go | 282 + apis/kafka/v1beta3/zz_cluster_terraformed.go | 129 + apis/kafka/v1beta3/zz_cluster_types.go | 1082 + .../v1beta3/zz_generated.conversion_hubs.go | 10 + apis/kafka/v1beta3/zz_generated.deepcopy.go | 2454 ++ apis/kafka/v1beta3/zz_generated.managed.go | 68 + .../kafka/v1beta3/zz_generated.managedlist.go | 17 + apis/kafka/v1beta3/zz_generated.resolvers.go | 352 + apis/kafka/v1beta3/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../v1beta2/zz_connector_terraformed.go | 129 + .../v1beta2/zz_connector_types.go | 891 + .../v1beta2/zz_customplugin_terraformed.go | 129 + .../v1beta2/zz_customplugin_types.go | 236 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + .../v1beta2/zz_generated.deepcopy.go | 2320 ++ .../v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + .../v1beta2/zz_generated.resolvers.go | 514 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_spokes.go | 114 + .../v1beta2/zz_datasource_terraformed.go | 129 + apis/kendra/v1beta2/zz_datasource_types.go | 1266 + .../v1beta2/zz_experience_terraformed.go | 129 + apis/kendra/v1beta2/zz_experience_types.go | 302 + .../v1beta2/zz_generated.conversion_hubs.go | 22 + apis/kendra/v1beta2/zz_generated.deepcopy.go | 5758 +++++ apis/kendra/v1beta2/zz_generated.managed.go | 308 + .../v1beta2/zz_generated.managedlist.go | 53 + apis/kendra/v1beta2/zz_generated.resolvers.go | 699 + apis/kendra/v1beta2/zz_groupversion_info.go | 32 + apis/kendra/v1beta2/zz_index_terraformed.go | 129 + apis/kendra/v1beta2/zz_index_types.go | 653 + ...z_querysuggestionsblocklist_terraformed.go | 129 + .../zz_querysuggestionsblocklist_types.go | 256 + .../v1beta2/zz_thesaurus_terraformed.go | 129 + apis/kendra/v1beta2/zz_thesaurus_types.go | 278 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 1206 + .../keyspaces/v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_generated.resolvers.go | 68 + .../keyspaces/v1beta2/zz_groupversion_info.go | 32 + .../keyspaces/v1beta2/zz_table_terraformed.go | 129 + apis/keyspaces/v1beta2/zz_table_types.go | 519 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../kinesis/v1beta1/zz_generated.resolvers.go | 4 +- .../v1beta1/zz_streamconsumer_types.go | 4 +- .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/kinesis/v1beta2/zz_generated.deepcopy.go | 435 + apis/kinesis/v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../kinesis/v1beta2/zz_generated.resolvers.go | 68 + apis/kinesis/v1beta2/zz_groupversion_info.go | 32 + apis/kinesis/v1beta2/zz_stream_terraformed.go | 130 + apis/kinesis/v1beta2/zz_stream_types.go | 224 + .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_application_terraformed.go | 129 + .../v1beta2/zz_application_types.go | 1236 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 2657 ++ .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_generated.resolvers.go | 300 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_applicationsnapshot_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta1/zz_generated.resolvers.go | 4 +- .../v1beta2/zz_application_terraformed.go | 129 + .../v1beta2/zz_application_types.go | 1732 ++ .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 4032 +++ .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_generated.resolvers.go | 435 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 1267 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_generated.resolvers.go | 157 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_permissions_terraformed.go | 129 + .../v1beta2/zz_permissions_types.go | 612 + .../v1beta1/zz_generated.conversion_hubs.go | 18 - .../v1beta1/zz_generated.conversion_spokes.go | 134 + apis/lambda/v1beta1/zz_generated.resolvers.go | 12 +- apis/lambda/v1beta1/zz_invocation_types.go | 4 +- apis/lambda/v1beta1/zz_permission_types.go | 8 +- apis/lambda/v1beta2/zz_alias_terraformed.go | 129 + apis/lambda/v1beta2/zz_alias_types.go | 165 + .../zz_codesigningconfig_terraformed.go | 129 + .../v1beta2/zz_codesigningconfig_types.go | 191 + .../zz_eventsourcemapping_terraformed.go | 129 + .../v1beta2/zz_eventsourcemapping_types.go | 587 + .../lambda/v1beta2/zz_function_terraformed.go | 130 + apis/lambda/v1beta2/zz_function_types.go | 842 + ...z_functioneventinvokeconfig_terraformed.go | 129 + .../zz_functioneventinvokeconfig_types.go | 249 + .../v1beta2/zz_functionurl_terraformed.go | 129 + apis/lambda/v1beta2/zz_functionurl_types.go | 249 + .../v1beta2/zz_generated.conversion_hubs.go | 25 + apis/lambda/v1beta2/zz_generated.deepcopy.go | 4604 ++++ apis/lambda/v1beta2/zz_generated.managed.go | 368 + .../v1beta2/zz_generated.managedlist.go | 62 + apis/lambda/v1beta2/zz_generated.resolvers.go | 603 + apis/lambda/v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_hubs.go | 9 - .../v1beta1/zz_generated.conversion_spokes.go | 74 + apis/lexmodels/v1beta2/zz_bot_terraformed.go | 129 + apis/lexmodels/v1beta2/zz_bot_types.go | 452 + .../v1beta2/zz_botalias_terraformed.go | 129 + apis/lexmodels/v1beta2/zz_botalias_types.go | 225 + .../v1beta2/zz_generated.conversion_hubs.go | 16 + .../v1beta2/zz_generated.deepcopy.go | 3182 +++ .../lexmodels/v1beta2/zz_generated.managed.go | 188 + .../v1beta2/zz_generated.managedlist.go | 35 + .../lexmodels/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_intent_terraformed.go | 129 + apis/lexmodels/v1beta2/zz_intent_types.go | 1080 + .../v1beta1/zz_association_types.go | 4 +- .../v1beta1/zz_generated.resolvers.go | 4 +- .../v1beta1/zz_diskattachment_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../v1beta1/zz_generated.resolvers.go | 19 +- .../v1beta1/zz_instancepublicports_types.go | 4 +- .../v1beta1/zz_lbattachment_types.go | 4 +- .../v1beta1/zz_staticipattachment_types.go | 4 +- .../zz_containerservice_terraformed.go | 129 + .../v1beta2/zz_containerservice_types.go | 298 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + .../v1beta2/zz_generated.deepcopy.go | 1089 + .../lightsail/v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + .../lightsail/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_instance_terraformed.go | 129 + apis/lightsail/v1beta2/zz_instance_types.go | 268 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../location/v1beta2/zz_generated.deepcopy.go | 346 + apis/location/v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + apis/location/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_placeindex_terraformed.go | 129 + apis/location/v1beta2/zz_placeindex_types.go | 166 + .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../zz_classificationjob_terraformed.go | 129 + .../v1beta2/zz_classificationjob_types.go | 1009 + .../v1beta2/zz_findingsfilter_terraformed.go | 129 + .../macie2/v1beta2/zz_findingsfilter_types.go | 279 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + apis/macie2/v1beta2/zz_generated.deepcopy.go | 3148 +++ apis/macie2/v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + apis/macie2/v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 381 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_queue_terraformed.go | 129 + apis/mediaconvert/v1beta2/zz_queue_types.go | 190 + .../v1beta1/zz_generated.conversion_hubs.go | 9 - .../v1beta1/zz_generated.conversion_spokes.go | 74 + .../v1beta2/zz_channel_terraformed.go | 129 + apis/medialive/v1beta2/zz_channel_types.go | 8129 +++++++ .../v1beta2/zz_generated.conversion_hubs.go | 16 + .../v1beta2/zz_generated.deepcopy.go | 19594 +++++++++++++++ .../medialive/v1beta2/zz_generated.managed.go | 188 + .../v1beta2/zz_generated.managedlist.go | 35 + .../v1beta2/zz_generated.resolvers.go | 162 + .../medialive/v1beta2/zz_groupversion_info.go | 32 + .../medialive/v1beta2/zz_input_terraformed.go | 129 + apis/medialive/v1beta2/zz_input_types.go | 359 + .../v1beta2/zz_multiplex_terraformed.go | 129 + apis/medialive/v1beta2/zz_multiplex_types.go | 200 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../memorydb/v1beta2/zz_generated.deepcopy.go | 352 + apis/memorydb/v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + apis/memorydb/v1beta2/zz_groupversion_info.go | 32 + apis/memorydb/v1beta2/zz_user_terraformed.go | 129 + apis/memorydb/v1beta2/zz_user_types.go | 163 + apis/mq/v1alpha1/zz_generated.resolvers.go | 4 +- apis/mq/v1alpha1/zz_user_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + apis/mq/v1beta2/zz_broker_terraformed.go | 134 + apis/mq/v1beta2/zz_broker_types.go | 731 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/mq/v1beta2/zz_generated.deepcopy.go | 1403 ++ apis/mq/v1beta2/zz_generated.managed.go | 68 + apis/mq/v1beta2/zz_generated.managedlist.go | 17 + apis/mq/v1beta2/zz_generated.resolvers.go | 188 + apis/mq/v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_clusterendpoint_types.go | 4 +- .../v1beta1/zz_clusterinstance_types.go | 4 +- .../v1beta1/zz_clustersnapshot_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../neptune/v1beta1/zz_generated.resolvers.go | 16 +- .../neptune/v1beta1/zz_globalcluster_types.go | 4 +- .../neptune/v1beta2/zz_cluster_terraformed.go | 129 + apis/neptune/v1beta2/zz_cluster_types.go | 572 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/neptune/v1beta2/zz_generated.deepcopy.go | 963 + apis/neptune/v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../neptune/v1beta2/zz_generated.resolvers.go | 297 + apis/neptune/v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_spokes.go | 94 + .../v1beta2/zz_firewall_terraformed.go | 129 + .../v1beta2/zz_firewall_types.go | 367 + .../v1beta2/zz_firewallpolicy_terraformed.go | 129 + .../v1beta2/zz_firewallpolicy_types.go | 595 + .../v1beta2/zz_generated.conversion_hubs.go | 19 + .../v1beta2/zz_generated.deepcopy.go | 5391 ++++ .../v1beta2/zz_generated.managed.go | 248 + .../v1beta2/zz_generated.managedlist.go | 44 + .../v1beta2/zz_generated.resolvers.go | 372 + .../v1beta2/zz_groupversion_info.go | 32 + .../zz_loggingconfiguration_terraformed.go | 129 + .../v1beta2/zz_loggingconfiguration_types.go | 193 + .../v1beta2/zz_rulegroup_terraformed.go | 129 + .../v1beta2/zz_rulegroup_types.go | 1119 + .../v1beta1/zz_attachmentaccepter_types.go | 8 +- .../v1beta1/zz_connection_types.go | 8 +- .../zz_customergatewayassociation_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 15 - .../v1beta1/zz_generated.conversion_spokes.go | 114 + .../v1beta1/zz_generated.resolvers.go | 28 +- .../v1beta1/zz_linkassociation_types.go | 4 +- ...nsitgatewayconnectpeerassociation_types.go | 4 +- .../zz_connectattachment_terraformed.go | 129 + .../v1beta2/zz_connectattachment_types.go | 253 + .../v1beta2/zz_device_terraformed.go | 129 + .../networkmanager/v1beta2/zz_device_types.go | 308 + .../v1beta2/zz_generated.conversion_hubs.go | 22 + .../v1beta2/zz_generated.deepcopy.go | 2281 ++ .../v1beta2/zz_generated.managed.go | 308 + .../v1beta2/zz_generated.managedlist.go | 53 + .../v1beta2/zz_generated.resolvers.go | 498 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_link_terraformed.go | 129 + apis/networkmanager/v1beta2/zz_link_types.go | 238 + .../v1beta2/zz_site_terraformed.go | 129 + apis/networkmanager/v1beta2/zz_site_types.go | 199 + .../v1beta2/zz_vpcattachment_terraformed.go | 129 + .../v1beta2/zz_vpcattachment_types.go | 274 + .../v1beta1/zz_domainpolicy_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../v1beta1/zz_generated.resolvers.go | 4 +- .../v1beta2/zz_domain_terraformed.go | 129 + apis/opensearch/v1beta2/zz_domain_types.go | 1086 + .../zz_domainsamloptions_terraformed.go | 129 + .../v1beta2/zz_domainsamloptions_types.go | 236 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + .../v1beta2/zz_generated.deepcopy.go | 2813 +++ .../v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + .../v1beta2/zz_generated.resolvers.go | 124 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 312 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_securityconfig_terraformed.go | 129 + .../v1beta2/zz_securityconfig_types.go | 170 + apis/opsworks/v1beta1/zz_application_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 36 - .../v1beta1/zz_generated.conversion_spokes.go | 254 + .../v1beta1/zz_generated.resolvers.go | 24 +- apis/opsworks/v1beta1/zz_instance_types.go | 8 +- apis/opsworks/v1beta1/zz_permission_types.go | 4 +- .../v1beta1/zz_rdsdbinstance_types.go | 8 +- .../v1beta2/zz_customlayer_terraformed.go | 129 + apis/opsworks/v1beta2/zz_customlayer_types.go | 790 + .../v1beta2/zz_ecsclusterlayer_terraformed.go | 129 + .../v1beta2/zz_ecsclusterlayer_types.go | 704 + .../v1beta2/zz_ganglialayer_terraformed.go | 129 + .../opsworks/v1beta2/zz_ganglialayer_types.go | 706 + .../v1beta2/zz_generated.conversion_hubs.go | 43 + .../opsworks/v1beta2/zz_generated.deepcopy.go | 18603 ++++++++++++++ apis/opsworks/v1beta2/zz_generated.managed.go | 728 + .../v1beta2/zz_generated.managedlist.go | 116 + .../v1beta2/zz_generated.resolvers.go | 1200 + apis/opsworks/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_haproxylayer_terraformed.go | 129 + .../opsworks/v1beta2/zz_haproxylayer_types.go | 736 + .../v1beta2/zz_javaapplayer_terraformed.go | 129 + .../opsworks/v1beta2/zz_javaapplayer_types.go | 725 + .../v1beta2/zz_memcachedlayer_terraformed.go | 129 + .../v1beta2/zz_memcachedlayer_types.go | 685 + .../v1beta2/zz_mysqllayer_terraformed.go | 129 + apis/opsworks/v1beta2/zz_mysqllayer_types.go | 695 + .../v1beta2/zz_nodejsapplayer_terraformed.go | 129 + .../v1beta2/zz_nodejsapplayer_types.go | 685 + .../v1beta2/zz_phpapplayer_terraformed.go | 129 + apis/opsworks/v1beta2/zz_phpapplayer_types.go | 675 + .../v1beta2/zz_railsapplayer_terraformed.go | 129 + .../v1beta2/zz_railsapplayer_types.go | 735 + apis/opsworks/v1beta2/zz_stack_terraformed.go | 129 + apis/opsworks/v1beta2/zz_stack_types.go | 463 + .../v1beta2/zz_staticweblayer_terraformed.go | 129 + .../v1beta2/zz_staticweblayer_types.go | 672 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta1/zz_generated.resolvers.go | 7 +- apis/pinpoint/v1beta1/zz_smschannel_types.go | 4 +- apis/pinpoint/v1beta2/zz_app_terraformed.go | 129 + apis/pinpoint/v1beta2/zz_app_types.go | 270 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../pinpoint/v1beta2/zz_generated.deepcopy.go | 566 + apis/pinpoint/v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + apis/pinpoint/v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/qldb/v1beta2/zz_generated.deepcopy.go | 457 + apis/qldb/v1beta2/zz_generated.managed.go | 68 + apis/qldb/v1beta2/zz_generated.managedlist.go | 17 + apis/qldb/v1beta2/zz_generated.resolvers.go | 149 + apis/qldb/v1beta2/zz_groupversion_info.go | 32 + apis/qldb/v1beta2/zz_stream_terraformed.go | 129 + apis/qldb/v1beta2/zz_stream_types.go | 263 + .../v1beta1/zz_clusteractivitystream_types.go | 4 +- apis/rds/v1beta1/zz_clusterendpoint_types.go | 4 +- apis/rds/v1beta1/zz_clusterinstance_types.go | 4 +- .../zz_clusterroleassociation_types.go | 4 +- apis/rds/v1beta1/zz_clustersnapshot_types.go | 4 +- ...stanceautomatedbackupsreplication_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 40 + apis/rds/v1beta1/zz_generated.resolvers.go | 40 +- apis/rds/v1beta1/zz_globalcluster_types.go | 4 +- .../zz_instanceroleassociation_types.go | 4 +- apis/rds/v1beta1/zz_proxytarget_types.go | 4 +- apis/rds/v1beta1/zz_snapshot_types.go | 4 +- apis/rds/v1beta2/zz_cluster_terraformed.go | 129 + apis/rds/v1beta2/zz_cluster_types.go | 1023 + .../v1beta2/zz_generated.conversion_hubs.go | 5 +- .../v1beta2/zz_generated.conversion_spokes.go | 34 + apis/rds/v1beta2/zz_generated.deepcopy.go | 2155 +- apis/rds/v1beta2/zz_generated.managed.go | 120 + apis/rds/v1beta2/zz_generated.managedlist.go | 18 + apis/rds/v1beta2/zz_generated.resolvers.go | 377 +- .../zz_proxydefaulttargetgroup_terraformed.go | 129 + .../zz_proxydefaulttargetgroup_types.go | 197 + .../v1beta3/zz_generated.conversion_hubs.go | 10 + apis/rds/v1beta3/zz_generated.deepcopy.go | 1885 ++ apis/rds/v1beta3/zz_generated.managed.go | 68 + apis/rds/v1beta3/zz_generated.managedlist.go | 17 + apis/rds/v1beta3/zz_generated.resolvers.go | 299 + apis/rds/v1beta3/zz_groupversion_info.go | 32 + apis/rds/v1beta3/zz_instance_terraformed.go | 130 + apis/rds/v1beta3/zz_instance_types.go | 1334 + .../v1beta1/zz_generated.conversion_hubs.go | 9 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../v1beta1/zz_generated.resolvers.go | 8 +- .../zz_snapshotscheduleassociation_types.go | 4 +- apis/redshift/v1beta1/zz_usagelimit_types.go | 4 +- .../v1beta2/zz_cluster_terraformed.go | 129 + apis/redshift/v1beta2/zz_cluster_types.go | 779 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + .../redshift/v1beta2/zz_generated.deepcopy.go | 1880 ++ apis/redshift/v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + .../v1beta2/zz_generated.resolvers.go | 235 + apis/redshift/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_scheduledaction_terraformed.go | 129 + .../v1beta2/zz_scheduledaction_types.go | 319 + .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 531 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_group_terraformed.go | 129 + apis/resourcegroups/v1beta2/zz_group_types.go | 227 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../route53/v1beta1/zz_generated.resolvers.go | 4 +- apis/route53/v1beta1/zz_healthcheck_types.go | 4 +- .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/route53/v1beta2/zz_generated.deepcopy.go | 1082 + apis/route53/v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../route53/v1beta2/zz_generated.resolvers.go | 106 + apis/route53/v1beta2/zz_groupversion_info.go | 32 + apis/route53/v1beta2/zz_record_terraformed.go | 129 + apis/route53/v1beta2/zz_record_types.go | 529 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 466 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_generated.resolvers.go | 107 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_safetyrule_terraformed.go | 129 + .../v1beta2/zz_safetyrule_types.go | 248 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 731 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_generated.resolvers.go | 73 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_resourceset_terraformed.go | 129 + .../v1beta2/zz_resourceset_types.go | 330 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + apis/rum/v1beta1/zz_generated.resolvers.go | 4 +- .../v1beta1/zz_metricsdestination_types.go | 4 +- apis/rum/v1beta2/zz_appmonitor_terraformed.go | 129 + apis/rum/v1beta2/zz_appmonitor_types.go | 288 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/rum/v1beta2/zz_generated.deepcopy.go | 613 + apis/rum/v1beta2/zz_generated.managed.go | 68 + apis/rum/v1beta2/zz_generated.managedlist.go | 17 + apis/rum/v1beta2/zz_groupversion_info.go | 32 + .../zz_bucketaccelerateconfiguration_types.go | 4 +- .../zz_bucketcorsconfiguration_types.go | 4 +- .../s3/v1beta1/zz_bucketnotification_types.go | 4 +- apis/s3/v1beta1/zz_bucketobject_types.go | 4 +- apis/s3/v1beta1/zz_bucketpolicy_types.go | 4 +- .../zz_bucketpublicaccessblock_types.go | 4 +- ...bucketrequestpaymentconfiguration_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 45 - .../v1beta1/zz_generated.conversion_spokes.go | 314 + apis/s3/v1beta1/zz_generated.resolvers.go | 28 +- apis/s3/v1beta2/zz_bucket_terraformed.go | 129 + apis/s3/v1beta2/zz_bucket_types.go | 649 + apis/s3/v1beta2/zz_bucketacl_terraformed.go | 131 + apis/s3/v1beta2/zz_bucketacl_types.go | 290 + ...ucketanalyticsconfiguration_terraformed.go | 129 + .../zz_bucketanalyticsconfiguration_types.go | 319 + ...lligenttieringconfiguration_terraformed.go | 129 + ...etintelligenttieringconfiguration_types.go | 223 + .../v1beta2/zz_bucketinventory_terraformed.go | 129 + apis/s3/v1beta2/zz_bucketinventory_types.go | 390 + ...ucketlifecycleconfiguration_terraformed.go | 129 + .../zz_bucketlifecycleconfiguration_types.go | 546 + .../v1beta2/zz_bucketlogging_terraformed.go | 129 + apis/s3/v1beta2/zz_bucketlogging_types.go | 328 + .../s3/v1beta2/zz_bucketmetric_terraformed.go | 129 + apis/s3/v1beta2/zz_bucketmetric_types.go | 203 + ...cketobjectlockconfiguration_terraformed.go | 129 + .../zz_bucketobjectlockconfiguration_types.go | 218 + .../zz_bucketownershipcontrols_terraformed.go | 129 + .../zz_bucketownershipcontrols_types.go | 151 + ...ketreplicationconfiguration_terraformed.go | 129 + ...zz_bucketreplicationconfiguration_types.go | 726 + ...sideencryptionconfiguration_terraformed.go | 129 + ...serversideencryptionconfiguration_types.go | 220 + .../zz_bucketversioning_terraformed.go | 129 + apis/s3/v1beta2/zz_bucketversioning_types.go | 181 + ..._bucketwebsiteconfiguration_terraformed.go | 131 + .../zz_bucketwebsiteconfiguration_types.go | 380 + .../v1beta2/zz_generated.conversion_hubs.go | 52 + apis/s3/v1beta2/zz_generated.deepcopy.go | 11069 +++++++++ apis/s3/v1beta2/zz_generated.managed.go | 908 + apis/s3/v1beta2/zz_generated.managedlist.go | 143 + apis/s3/v1beta2/zz_generated.resolvers.go | 1121 + apis/s3/v1beta2/zz_groupversion_info.go | 32 + apis/s3/v1beta2/zz_object_terraformed.go | 131 + apis/s3/v1beta2/zz_object_types.go | 458 + .../v1beta1/zz_accesspointpolicy_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 15 - .../v1beta1/zz_generated.conversion_spokes.go | 114 + .../v1beta1/zz_generated.resolvers.go | 8 +- .../zz_objectlambdaaccesspointpolicy_types.go | 4 +- .../v1beta2/zz_accesspoint_terraformed.go | 129 + .../s3control/v1beta2/zz_accesspoint_types.go | 290 + .../v1beta2/zz_generated.conversion_hubs.go | 22 + .../v1beta2/zz_generated.deepcopy.go | 3967 +++ .../s3control/v1beta2/zz_generated.managed.go | 308 + .../v1beta2/zz_generated.managedlist.go | 53 + .../v1beta2/zz_generated.resolvers.go | 319 + .../s3control/v1beta2/zz_groupversion_info.go | 32 + .../zz_multiregionaccesspoint_terraformed.go | 129 + .../zz_multiregionaccesspoint_types.go | 262 + ...ultiregionaccesspointpolicy_terraformed.go | 129 + .../zz_multiregionaccesspointpolicy_types.go | 147 + .../zz_objectlambdaaccesspoint_terraformed.go | 129 + .../zz_objectlambdaaccesspoint_types.go | 301 + ...zz_storagelensconfiguration_terraformed.go | 129 + .../zz_storagelensconfiguration_types.go | 851 + .../v1beta1/zz_generated.conversion_hubs.go | 45 - .../v1beta1/zz_generated.conversion_spokes.go | 314 + apis/sagemaker/v1beta2/zz_app_terraformed.go | 129 + apis/sagemaker/v1beta2/zz_app_types.go | 272 + .../v1beta2/zz_appimageconfig_terraformed.go | 129 + .../v1beta2/zz_appimageconfig_types.go | 458 + .../v1beta2/zz_coderepository_terraformed.go | 129 + .../v1beta2/zz_coderepository_types.go | 181 + .../v1beta2/zz_device_terraformed.go | 129 + apis/sagemaker/v1beta2/zz_device_types.go | 173 + .../v1beta2/zz_devicefleet_terraformed.go | 129 + .../sagemaker/v1beta2/zz_devicefleet_types.go | 203 + .../v1beta2/zz_domain_terraformed.go | 129 + apis/sagemaker/v1beta2/zz_domain_types.go | 2162 ++ .../v1beta2/zz_endpoint_terraformed.go | 129 + apis/sagemaker/v1beta2/zz_endpoint_types.go | 478 + .../zz_endpointconfiguration_terraformed.go | 129 + .../v1beta2/zz_endpointconfiguration_types.go | 911 + .../v1beta2/zz_featuregroup_terraformed.go | 129 + .../v1beta2/zz_featuregroup_types.go | 456 + .../v1beta2/zz_generated.conversion_hubs.go | 52 + .../v1beta2/zz_generated.deepcopy.go | 20246 ++++++++++++++++ .../sagemaker/v1beta2/zz_generated.managed.go | 908 + .../v1beta2/zz_generated.managedlist.go | 143 + .../v1beta2/zz_generated.resolvers.go | 1324 + .../sagemaker/v1beta2/zz_groupversion_info.go | 32 + .../sagemaker/v1beta2/zz_model_terraformed.go | 129 + apis/sagemaker/v1beta2/zz_model_types.go | 640 + .../zz_notebookinstance_terraformed.go | 129 + .../v1beta2/zz_notebookinstance_types.go | 363 + .../sagemaker/v1beta2/zz_space_terraformed.go | 129 + apis/sagemaker/v1beta2/zz_space_types.go | 823 + .../v1beta2/zz_userprofile_terraformed.go | 129 + .../sagemaker/v1beta2/zz_userprofile_types.go | 1508 ++ .../v1beta2/zz_workforce_terraformed.go | 130 + apis/sagemaker/v1beta2/zz_workforce_types.go | 362 + .../v1beta2/zz_workteam_terraformed.go | 129 + apis/sagemaker/v1beta2/zz_workteam_types.go | 345 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 1806 ++ .../scheduler/v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_generated.resolvers.go | 154 + .../scheduler/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_schedule_terraformed.go | 129 + apis/scheduler/v1beta2/zz_schedule_types.go | 860 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 358 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_generated.resolvers.go | 106 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_secretrotation_terraformed.go | 129 + .../v1beta2/zz_secretrotation_types.go | 214 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 9770 ++++++++ .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_insight_terraformed.go | 129 + apis/securityhub/v1beta2/zz_insight_types.go | 4047 +++ .../zz_budgetresourceassociation_types.go | 8 +- .../v1beta1/zz_constraint_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../v1beta1/zz_generated.resolvers.go | 24 +- .../zz_productportfolioassociation_types.go | 4 +- .../v1beta1/zz_provisioningartifact_types.go | 4 +- .../zz_tagoptionresourceassociation_types.go | 4 +- .../v1beta2/zz_generated.conversion_hubs.go | 13 + .../v1beta2/zz_generated.deepcopy.go | 860 + .../v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_product_terraformed.go | 129 + .../v1beta2/zz_product_types.go | 293 + .../v1beta2/zz_serviceaction_terraformed.go | 129 + .../v1beta2/zz_serviceaction_types.go | 192 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 693 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_generated.resolvers.go | 74 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_service_terraformed.go | 129 + .../v1beta2/zz_service_types.go | 338 + .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + .../zz_configurationset_terraformed.go | 129 + apis/ses/v1beta2/zz_configurationset_types.go | 175 + .../zz_eventdestination_terraformed.go | 129 + apis/ses/v1beta2/zz_eventdestination_types.go | 323 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + apis/ses/v1beta2/zz_generated.deepcopy.go | 926 + apis/ses/v1beta2/zz_generated.managed.go | 128 + apis/ses/v1beta2/zz_generated.managedlist.go | 26 + apis/ses/v1beta2/zz_generated.resolvers.go | 195 + apis/ses/v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_hubs.go | 9 - .../v1beta1/zz_generated.conversion_spokes.go | 74 + .../zz_configurationset_terraformed.go | 129 + .../v1beta2/zz_configurationset_types.go | 344 + ...gurationseteventdestination_terraformed.go | 129 + ..._configurationseteventdestination_types.go | 411 + .../v1beta2/zz_emailidentity_terraformed.go | 129 + apis/sesv2/v1beta2/zz_emailidentity_types.go | 205 + .../v1beta2/zz_generated.conversion_hubs.go | 16 + apis/sesv2/v1beta2/zz_generated.deepcopy.go | 2047 ++ apis/sesv2/v1beta2/zz_generated.managed.go | 188 + .../sesv2/v1beta2/zz_generated.managedlist.go | 35 + apis/sesv2/v1beta2/zz_generated.resolvers.go | 303 + apis/sesv2/v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/sfn/v1beta2/zz_generated.deepcopy.go | 522 + apis/sfn/v1beta2/zz_generated.managed.go | 68 + apis/sfn/v1beta2/zz_generated.managedlist.go | 17 + apis/sfn/v1beta2/zz_generated.resolvers.go | 68 + apis/sfn/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_statemachine_terraformed.go | 129 + apis/sfn/v1beta2/zz_statemachine_types.go | 266 + .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + apis/signer/v1beta1/zz_generated.resolvers.go | 9 +- .../zz_signingprofilepermission_types.go | 6 +- .../v1beta2/zz_generated.conversion_hubs.go | 13 + apis/signer/v1beta2/zz_generated.deepcopy.go | 1254 + apis/signer/v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + apis/signer/v1beta2/zz_generated.resolvers.go | 66 + apis/signer/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_signingjob_terraformed.go | 129 + apis/signer/v1beta2/zz_signingjob_types.go | 339 + .../v1beta2/zz_signingprofile_terraformed.go | 129 + .../signer/v1beta2/zz_signingprofile_types.go | 222 + .../v1beta1/zz_generated.conversion_hubs.go | 9 - .../v1beta1/zz_generated.conversion_spokes.go | 74 + .../ssm/v1beta2/zz_association_terraformed.go | 129 + apis/ssm/v1beta2/zz_association_types.go | 335 + .../v1beta2/zz_generated.conversion_hubs.go | 16 + apis/ssm/v1beta2/zz_generated.deepcopy.go | 2433 ++ apis/ssm/v1beta2/zz_generated.managed.go | 188 + apis/ssm/v1beta2/zz_generated.managedlist.go | 35 + apis/ssm/v1beta2/zz_generated.resolvers.go | 413 + apis/ssm/v1beta2/zz_groupversion_info.go | 32 + .../zz_maintenancewindowtask_terraformed.go | 129 + .../v1beta2/zz_maintenancewindowtask_types.go | 747 + .../zz_resourcedatasync_terraformed.go | 129 + apis/ssm/v1beta2/zz_resourcedatasync_types.go | 185 + .../v1beta1/zz_generated.conversion_hubs.go | 6 - .../v1beta1/zz_generated.conversion_spokes.go | 54 + ...omermanagedpolicyattachment_terraformed.go | 129 + ...z_customermanagedpolicyattachment_types.go | 177 + .../v1beta2/zz_generated.conversion_hubs.go | 13 + .../ssoadmin/v1beta2/zz_generated.deepcopy.go | 667 + apis/ssoadmin/v1beta2/zz_generated.managed.go | 128 + .../v1beta2/zz_generated.managedlist.go | 26 + .../v1beta2/zz_generated.resolvers.go | 170 + apis/ssoadmin/v1beta2/zz_groupversion_info.go | 32 + ...rmissionsboundaryattachment_terraformed.go | 129 + .../zz_permissionsboundaryattachment_types.go | 202 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 772 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_generated.resolvers.go | 49 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_table_terraformed.go | 129 + .../timestreamwrite/v1beta2/zz_table_types.go | 348 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 387 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_generated.resolvers.go | 73 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_languagemodel_terraformed.go | 129 + .../v1beta2/zz_languagemodel_types.go | 202 + .../v1beta1/zz_generated.conversion_hubs.go | 12 - .../v1beta1/zz_generated.conversion_spokes.go | 74 + .../v1beta1/zz_generated.resolvers.go | 12 +- apis/transfer/v1beta1/zz_sshkey_types.go | 8 +- apis/transfer/v1beta1/zz_tag_types.go | 4 +- .../v1beta2/zz_generated.conversion_hubs.go | 16 + .../transfer/v1beta2/zz_generated.deepcopy.go | 4409 ++++ apis/transfer/v1beta2/zz_generated.managed.go | 188 + .../v1beta2/zz_generated.managedlist.go | 35 + .../v1beta2/zz_generated.resolvers.go | 335 + apis/transfer/v1beta2/zz_groupversion_info.go | 32 + .../transfer/v1beta2/zz_server_terraformed.go | 129 + apis/transfer/v1beta2/zz_server_types.go | 633 + apis/transfer/v1beta2/zz_user_terraformed.go | 129 + apis/transfer/v1beta2/zz_user_types.go | 289 + .../v1beta2/zz_workflow_terraformed.go | 129 + apis/transfer/v1beta2/zz_workflow_types.go | 1156 + .../v1beta1/zz_generated.conversion_hubs.go | 18 - .../v1beta1/zz_generated.conversion_spokes.go | 134 + .../v1beta2/zz_bytematchset_terraformed.go | 129 + apis/waf/v1beta2/zz_bytematchset_types.go | 238 + .../v1beta2/zz_generated.conversion_hubs.go | 25 + apis/waf/v1beta2/zz_generated.deepcopy.go | 2654 ++ apis/waf/v1beta2/zz_generated.managed.go | 368 + apis/waf/v1beta2/zz_generated.managedlist.go | 62 + apis/waf/v1beta2/zz_generated.resolvers.go | 170 + apis/waf/v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_regexmatchset_terraformed.go | 129 + apis/waf/v1beta2/zz_regexmatchset_types.go | 224 + .../zz_sizeconstraintset_terraformed.go | 129 + .../waf/v1beta2/zz_sizeconstraintset_types.go | 196 + .../zz_sqlinjectionmatchset_terraformed.go | 129 + .../v1beta2/zz_sqlinjectionmatchset_types.go | 194 + apis/waf/v1beta2/zz_webacl_terraformed.go | 129 + apis/waf/v1beta2/zz_webacl_types.go | 403 + .../waf/v1beta2/zz_xssmatchset_terraformed.go | 129 + apis/waf/v1beta2/zz_xssmatchset_types.go | 197 + .../v1beta1/zz_generated.conversion_hubs.go | 18 - .../v1beta1/zz_generated.conversion_spokes.go | 134 + .../v1beta2/zz_bytematchset_terraformed.go | 129 + .../v1beta2/zz_bytematchset_types.go | 190 + .../v1beta2/zz_generated.conversion_hubs.go | 25 + .../v1beta2/zz_generated.deepcopy.go | 2644 ++ .../v1beta2/zz_generated.managed.go | 368 + .../v1beta2/zz_generated.managedlist.go | 62 + .../v1beta2/zz_generated.resolvers.go | 170 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta2/zz_regexmatchset_terraformed.go | 129 + .../v1beta2/zz_regexmatchset_types.go | 221 + .../zz_sizeconstraintset_terraformed.go | 129 + .../v1beta2/zz_sizeconstraintset_types.go | 227 + .../zz_sqlinjectionmatchset_terraformed.go | 129 + .../v1beta2/zz_sqlinjectionmatchset_types.go | 194 + .../v1beta2/zz_webacl_terraformed.go | 129 + apis/wafregional/v1beta2/zz_webacl_types.go | 400 + .../v1beta2/zz_xssmatchset_terraformed.go | 129 + .../v1beta2/zz_xssmatchset_types.go | 170 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_directory_terraformed.go | 129 + apis/workspaces/v1beta2/zz_directory_types.go | 468 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + .../v1beta2/zz_generated.deepcopy.go | 868 + .../v1beta2/zz_generated.managed.go | 68 + .../v1beta2/zz_generated.managedlist.go | 17 + .../v1beta2/zz_generated.resolvers.go | 150 + .../v1beta2/zz_groupversion_info.go | 32 + .../v1beta1/zz_generated.conversion_hubs.go | 3 - .../v1beta1/zz_generated.conversion_spokes.go | 34 + .../v1beta2/zz_generated.conversion_hubs.go | 10 + apis/xray/v1beta2/zz_generated.deepcopy.go | 351 + apis/xray/v1beta2/zz_generated.managed.go | 68 + apis/xray/v1beta2/zz_generated.managedlist.go | 17 + apis/xray/v1beta2/zz_group_terraformed.go | 129 + apis/xray/v1beta2/zz_group_types.go | 172 + apis/xray/v1beta2/zz_groupversion_info.go | 32 + apis/zz_register.go | 240 +- .../accessanalyzer/v1beta2/analyzer.yaml | 11 + .../acm/v1beta1/certificatevalidation.yaml | 6 +- .../acm/v1beta2/certificate.yaml | 15 + .../certificateauthoritycertificate.yaml | 4 +- .../acmpca/v1beta1/permission.yaml | 2 +- .../acmpca/v1beta2/certificate.yaml | 42 + .../acmpca/v1beta2/certificateauthority.yaml | 17 + .../amp/v1beta1/alertmanagerdefinition.yaml | 2 +- .../amp/v1beta1/rulegroupnamespace.yaml | 2 +- examples-generated/amp/v1beta2/workspace.yaml | 14 + .../amplify/v1beta1/backendenvironment.yaml | 2 +- .../amplify/v1beta1/branch.yaml | 2 +- .../amplify/v1beta1/webhook.yaml | 2 +- examples-generated/amplify/v1beta2/app.yaml | 36 + .../apigateway/v1beta1/authorizer.yaml | 4 +- .../apigateway/v1beta1/basepathmapping.yaml | 4 +- .../apigateway/v1beta1/deployment.yaml | 4 +- .../v1beta1/documentationversion.yaml | 4 +- .../apigateway/v1beta1/gatewayresponse.yaml | 2 +- .../v1beta1/integrationresponse.yaml | 4 +- .../apigateway/v1beta1/method.yaml | 2 +- .../apigateway/v1beta1/methodresponse.yaml | 4 +- .../apigateway/v1beta1/model.yaml | 2 +- .../apigateway/v1beta1/resource.yaml | 2 +- .../apigateway/v1beta1/restapipolicy.yaml | 2 +- .../apigateway/v1beta1/usageplankey.yaml | 4 +- .../apigateway/v1beta1/vpclink.yaml | 2 +- .../apigateway/v1beta2/documentationpart.yaml | 34 + .../apigateway/v1beta2/domainname.yaml | 38 + .../apigateway/v1beta2/integration.yaml | 91 + .../apigateway/v1beta2/methodsettings.yaml | 96 + .../apigateway/v1beta2/restapi.yaml | 75 + .../apigateway/v1beta2/stage.yaml | 96 + .../apigateway/v1beta2/usageplan.yaml | 130 + .../apigatewayv2/v1beta1/route.yaml | 2 +- .../apigatewayv2/v1beta2/api.yaml | 14 + .../apigatewayv2/v1beta2/authorizer.yaml | 21 + .../apigatewayv2/v1beta2/domainname.yaml | 17 + .../apigatewayv2/v1beta2/integration.yaml | 15 + .../apigatewayv2/v1beta2/stage.yaml | 14 + .../appautoscaling/v1beta2/policy.yaml | 44 + .../v1beta2/scheduledaction.yaml | 44 + examples-generated/appflow/v1beta2/flow.yaml | 123 + .../v1beta2/eventintegration.yaml | 17 + .../v1beta1/application.yaml | 2 +- .../appmesh/v1beta2/gatewayroute.yaml | 28 + examples-generated/appmesh/v1beta2/mesh.yaml | 11 + examples-generated/appmesh/v1beta2/route.yaml | 32 + .../appmesh/v1beta2/virtualgateway.yaml | 20 + .../appmesh/v1beta2/virtualnode.yaml | 26 + .../appmesh/v1beta2/virtualrouter.yaml | 20 + .../appmesh/v1beta2/virtualservice.yaml | 21 + .../v1beta2/observabilityconfiguration.yaml | 16 + .../apprunner/v1beta2/service.yaml | 37 + .../v1beta1/fleetstackassociation.yaml | 4 +- .../v1beta1/userstackassociation.yaml | 2 +- .../appstream/v1beta2/directoryconfig.yaml | 20 + .../appstream/v1beta2/fleet.yaml | 27 + .../appstream/v1beta2/imagebuilder.yaml | 20 + .../appstream/v1beta2/stack.yaml | 38 + .../appsync/v1beta1/apicache.yaml | 2 +- .../appsync/v1beta1/apikey.yaml | 2 +- .../appsync/v1beta2/datasource.yaml | 89 + .../appsync/v1beta2/function.yaml | 87 + .../appsync/v1beta2/graphqlapi.yaml | 13 + .../appsync/v1beta2/resolver.yaml | 78 + .../athena/v1beta1/namedquery.yaml | 6 +- .../athena/v1beta2/database.yaml | 29 + .../athena/v1beta2/workgroup.yaml | 21 + .../autoscaling/v1beta1/lifecyclehook.yaml | 2 +- .../autoscaling/v1beta1/notification.yaml | 4 +- .../autoscaling/v1beta1/schedule.yaml | 2 +- .../autoscaling/v1beta2/grouptag.yaml | 41 + .../v1beta2/launchconfiguration.yaml | 13 + .../autoscaling/v1beta2/policy.yaml | 41 + .../autoscaling/v1beta3/autoscalinggroup.yaml | 64 + .../autoscalingplans/v1beta2/scalingplan.yaml | 52 + .../backup/v1beta2/framework.yaml | 52 + examples-generated/backup/v1beta2/plan.yaml | 24 + .../backup/v1beta2/reportplan.yaml | 22 + .../batch/v1beta2/jobdefinition.yaml | 61 + .../batch/v1beta2/schedulingpolicy.yaml | 21 + .../budgets/v1beta2/budget.yaml | 31 + .../budgets/v1beta2/budgetaction.yaml | 84 + .../v1beta2/voiceconnectorstreaming.yaml | 33 + .../cloudformation/v1beta2/stackset.yaml | 73 + .../v1beta2/stacksetinstance.yaml | 15 + .../cloudfront/v1beta2/cachepolicy.yaml | 32 + .../cloudfront/v1beta2/distribution.yaml | 137 + .../v1beta2/fieldlevelencryptionconfig.yaml | 26 + .../v1beta2/fieldlevelencryptionprofile.yaml | 42 + .../v1beta2/monitoringsubscription.yaml | 17 + .../v1beta2/originrequestpolicy.yaml | 27 + .../cloudfront/v1beta2/realtimelogconfig.yaml | 56 + .../v1beta2/responseheaderspolicy.yaml | 25 + .../v1beta1/domainserviceaccesspolicy.yaml | 2 +- .../cloudsearch/v1beta2/domain.yaml | 28 + .../cloudtrail/v1beta1/trail.yaml | 2 +- .../cloudwatch/v1beta1/metricstream.yaml | 6 +- .../cloudwatch/v1beta2/compositealarm.yaml | 25 + .../cloudwatch/v1beta2/metricalarm.yaml | 20 + .../cloudwatchevents/v1beta1/rule.yaml | 2 +- .../cloudwatchevents/v1beta2/connection.yaml | 20 + .../cloudwatchevents/v1beta2/permission.yaml | 13 + .../cloudwatchevents/v1beta2/target.yaml | 66 + .../cloudwatchlogs/v1beta2/metricfilter.yaml | 33 + .../codepipeline/v1beta2/codepipeline.yaml | 147 + .../v1beta2/customactiontype.yaml | 20 + .../codepipeline/v1beta2/webhook.yaml | 74 + .../v1beta1/connection.yaml | 2 +- .../codestarconnections/v1beta2/host.yaml | 14 + ...gnitoidentitypoolproviderprincipaltag.yaml | 2 +- .../cognitoidp/v1beta1/identityprovider.yaml | 2 +- .../cognitoidp/v1beta1/resourceserver.yaml | 2 +- .../cognitoidp/v1beta1/user.yaml | 2 +- .../cognitoidp/v1beta1/usergroup.yaml | 2 +- .../cognitoidp/v1beta1/useringroup.yaml | 2 +- .../cognitoidp/v1beta1/userpoolclient.yaml | 2 +- .../cognitoidp/v1beta1/userpooldomain.yaml | 2 +- .../v1beta1/userpooluicustomization.yaml | 2 +- .../cognitoidp/v1beta2/riskconfiguration.yaml | 17 + .../cognitoidp/v1beta2/userpool.yaml | 12 + .../awsconfigurationrecorderstatus.yaml | 6 +- .../configservice/v1beta2/configrule.yaml | 64 + .../v1beta2/configurationaggregator.yaml | 16 + .../v1beta2/configurationrecorder.yaml | 28 + .../v1beta2/deliverychannel.yaml | 78 + .../v1beta2/remediationconfiguration.yaml | 48 + .../connect/v1beta2/botassociation.yaml | 19 + .../v1beta2/instancestorageconfig.yaml | 21 + .../connect/v1beta2/quickconnect.yaml | 22 + examples-generated/connect/v1beta2/user.yaml | 30 + .../v1beta2/userhierarchystructure.yaml | 17 + .../connect/v1beta3/hoursofoperation.yaml | 34 + examples-generated/connect/v1beta3/queue.yaml | 21 + .../datasync/v1beta2/locations3.yaml | 19 + examples-generated/datasync/v1beta2/task.yaml | 20 + examples-generated/dax/v1beta2/cluster.yaml | 16 + .../deploy/v1beta2/deploymentconfig.yaml | 54 + .../deploy/v1beta2/deploymentgroup.yaml | 102 + .../devicefarm/v1beta2/testgridproject.yaml | 22 + .../dlm/v1beta2/lifecyclepolicy.yaml | 64 + examples-generated/dms/v1beta2/endpoint.yaml | 29 + examples-generated/ds/v1beta2/directory.yaml | 79 + .../ds/v1beta2/shareddirectory.yaml | 48 + .../dynamodb/v1beta1/globaltable.yaml | 4 +- .../v1beta1/kinesisstreamingdestination.yaml | 4 +- .../dynamodb/v1beta1/tableitem.yaml | 2 +- .../dynamodb/v1beta1/tablereplica.yaml | 2 +- examples-generated/dynamodb/v1beta1/tag.yaml | 2 +- .../dynamodb/v1beta2/table.yaml | 38 + .../ec2/v1beta1/eipassociation.yaml | 2 +- .../ec2/v1beta1/instancestate.yaml | 2 +- .../v1beta1/networkinterfacesgattachment.yaml | 2 +- .../ec2/v1beta1/spotdatafeedsubscription.yaml | 2 +- examples-generated/ec2/v1beta1/tag.yaml | 2 +- .../transitgatewaymulticastdomain.yaml | 6 +- .../ec2/v1beta1/volumeattachment.yaml | 2 +- .../v1beta1/vpcipampoolcidrallocation.yaml | 2 +- .../ec2/v1beta1/vpnconnectionroute.yaml | 2 +- .../ec2/v1beta2/ebssnapshotimport.yaml | 19 + examples-generated/ec2/v1beta2/flowlog.yaml | 66 + examples-generated/ec2/v1beta2/instance.yaml | 15 + .../ec2/v1beta2/launchtemplate.yaml | 62 + .../ec2/v1beta2/spotfleetrequest.yaml | 39 + .../ec2/v1beta2/spotinstancerequest.yaml | 16 + .../ec2/v1beta2/trafficmirrorfilterrule.yaml | 37 + .../ec2/v1beta2/vpcendpoint.yaml | 17 + .../ec2/v1beta2/vpcipampoolcidr.yaml | 50 + .../ec2/v1beta2/vpcpeeringconnection.yaml | 18 + .../v1beta2/vpcpeeringconnectionaccepter.yaml | 74 + .../v1beta2/vpcpeeringconnectionoptions.yaml | 67 + .../ec2/v1beta2/vpnconnection.yaml | 51 + .../ecr/v1beta1/lifecyclepolicy.yaml | 2 +- .../ecr/v1beta1/repositorypolicy.yaml | 2 +- .../ecr/v1beta2/replicationconfiguration.yaml | 16 + .../ecr/v1beta2/repository.yaml | 14 + .../ecrpublic/v1beta1/repositorypolicy.yaml | 2 +- .../ecrpublic/v1beta2/repository.yaml | 23 + .../ecs/v1beta1/clustercapacityproviders.yaml | 2 +- .../ecs/v1beta2/capacityprovider.yaml | 39 + examples-generated/ecs/v1beta2/cluster.yaml | 14 + examples-generated/ecs/v1beta2/service.yaml | 33 + .../ecs/v1beta2/taskdefinition.yaml | 47 + .../efs/v1beta1/filesystempolicy.yaml | 2 +- .../efs/v1beta2/accesspoint.yaml | 14 + .../efs/v1beta2/backuppolicy.yaml | 31 + .../efs/v1beta2/filesystem.yaml | 14 + .../efs/v1beta2/replicationconfiguration.yaml | 30 + examples-generated/eks/v1beta2/cluster.yaml | 18 + .../eks/v1beta2/identityproviderconfig.yaml | 17 + examples-generated/eks/v1beta2/nodegroup.yaml | 26 + .../elasticache/v1beta1/usergroup.yaml | 2 +- .../elasticache/v1beta2/user.yaml | 20 + .../v1beta1/applicationversion.yaml | 6 +- .../v1beta1/configurationtemplate.yaml | 2 +- .../elasticbeanstalk/v1beta2/application.yaml | 18 + .../elasticsearch/v1beta1/domainpolicy.yaml | 2 +- .../elasticsearch/v1beta2/domain.yaml | 16 + .../v1beta2/domainsamloptions.yaml | 37 + .../elastictranscoder/v1beta2/pipeline.yaml | 28 + .../elastictranscoder/v1beta2/preset.yaml | 58 + .../v1beta1/appcookiestickinesspolicy.yaml | 2 +- .../elb/v1beta1/backendserverpolicy.yaml | 2 +- .../elb/v1beta1/lbcookiestickinesspolicy.yaml | 2 +- .../elb/v1beta1/lbsslnegotiationpolicy.yaml | 2 +- .../elb/v1beta1/listenerpolicy.yaml | 2 +- examples-generated/elb/v1beta1/policy.yaml | 2 +- .../elb/v1beta1/proxyprotocolpolicy.yaml | 2 +- examples-generated/elb/v1beta2/elb.yaml | 43 + .../elbv2/v1beta1/lblistenercertificate.yaml | 6 +- .../v1beta1/lbtargetgroupattachment.yaml | 4 +- examples-generated/elbv2/v1beta2/lb.yaml | 28 + .../elbv2/v1beta2/lblistener.yaml | 51 + .../elbv2/v1beta2/lblistenerrule.yaml | 97 + .../elbv2/v1beta2/lbtargetgroup.yaml | 32 + .../emrserverless/v1beta2/application.yaml | 14 + .../evidently/v1beta2/feature.yaml | 21 + .../evidently/v1beta2/project.yaml | 15 + .../firehose/v1beta2/deliverystream.yaml | 108 + .../fis/v1beta2/experimenttemplate.yaml | 30 + examples-generated/fsx/v1beta1/backup.yaml | 2 +- .../v1beta2/datarepositoryassociation.yaml | 79 + .../fsx/v1beta2/lustrefilesystem.yaml | 15 + .../fsx/v1beta2/ontapfilesystem.yaml | 20 + .../v1beta2/ontapstoragevirtualmachine.yaml | 15 + .../fsx/v1beta2/windowsfilesystem.yaml | 21 + .../gamelift/v1beta2/alias.yaml | 16 + .../gamelift/v1beta2/build.yaml | 23 + .../gamelift/v1beta2/fleet.yaml | 21 + .../gamelift/v1beta2/script.yaml | 22 + .../glacier/v1beta1/vaultlock.yaml | 2 +- examples-generated/glacier/v1beta2/vault.yaml | 35 + .../globalaccelerator/v1beta1/listener.yaml | 2 +- .../v1beta2/accelerator.yaml | 20 + .../glue/v1beta1/userdefinedfunction.yaml | 2 +- examples-generated/glue/v1beta1/workflow.yaml | 4 +- .../glue/v1beta2/catalogdatabase.yaml | 11 + .../glue/v1beta2/catalogtable.yaml | 14 + .../glue/v1beta2/classifier.yaml | 20 + .../glue/v1beta2/connection.yaml | 15 + examples-generated/glue/v1beta2/crawler.yaml | 19 + .../datacatalogencryptionsettings.yaml | 23 + examples-generated/glue/v1beta2/job.yaml | 16 + .../glue/v1beta2/securityconfiguration.yaml | 21 + examples-generated/glue/v1beta2/trigger.yaml | 22 + .../grafana/v1beta1/licenseassociation.yaml | 2 +- .../grafana/v1beta1/roleassociation.yaml | 2 +- .../v1beta1/workspacesamlconfiguration.yaml | 2 +- .../grafana/v1beta2/workspace.yaml | 45 + .../guardduty/v1beta1/member.yaml | 4 +- .../guardduty/v1beta2/detector.yaml | 22 + .../guardduty/v1beta2/filter.yaml | 30 + .../v1beta1/groupmembership.yaml | 2 +- .../identitystore/v1beta2/user.yaml | 19 + .../imagebuilder/v1beta2/containerrecipe.yaml | 33 + .../v1beta2/distributionconfiguration.yaml | 23 + .../imagebuilder/v1beta2/image.yaml | 20 + .../imagebuilder/v1beta2/imagepipeline.yaml | 20 + .../imagebuilder/v1beta2/imagerecipe.yaml | 29 + .../v1beta2/infrastructureconfiguration.yaml | 39 + .../iot/v1beta2/indexingconfiguration.yaml | 28 + .../iot/v1beta2/provisioningtemplate.yaml | 88 + .../iot/v1beta2/thinggroup.yaml | 11 + examples-generated/iot/v1beta2/thingtype.yaml | 12 + examples-generated/iot/v1beta2/topicrule.yaml | 91 + .../iot/v1beta2/topicruledestination.yaml | 23 + .../ivs/v1beta2/recordingconfiguration.yaml | 15 + .../kafka/v1beta1/scramsecretassociation.yaml | 2 +- .../kafka/v1beta2/serverlesscluster.yaml | 22 + examples-generated/kafka/v1beta3/cluster.yaml | 245 + .../kafkaconnect/v1beta2/connector.yaml | 49 + .../kafkaconnect/v1beta2/customplugin.yaml | 55 + .../kendra/v1beta2/datasource.yaml | 20 + .../kendra/v1beta2/experience.yaml | 26 + examples-generated/kendra/v1beta2/index.yaml | 19 + .../v1beta2/querysuggestionsblocklist.yaml | 25 + .../kendra/v1beta2/thesaurus.yaml | 27 + .../keyspaces/v1beta2/table.yaml | 21 + .../kinesis/v1beta1/streamconsumer.yaml | 2 +- .../kinesis/v1beta2/stream.yaml | 20 + .../kinesisanalytics/v1beta2/application.yaml | 47 + .../v1beta2/application.yaml | 83 + .../lakeformation/v1beta2/permissions.yaml | 56 + .../lambda/v1beta1/permission.yaml | 4 +- examples-generated/lambda/v1beta2/alias.yaml | 19 + .../lambda/v1beta2/codesigningconfig.yaml | 18 + .../lambda/v1beta2/eventsourcemapping.yaml | 16 + .../lambda/v1beta2/function.yaml | 35 + .../v1beta2/functioneventinvokeconfig.yaml | 21 + .../lambda/v1beta2/functionurl.yaml | 15 + examples-generated/lexmodels/v1beta2/bot.yaml | 30 + .../lexmodels/v1beta2/botalias.yaml | 14 + .../lexmodels/v1beta2/intent.yaml | 68 + .../licensemanager/v1beta1/association.yaml | 2 +- .../lightsail/v1beta1/diskattachment.yaml | 2 +- .../v1beta1/instancepublicports.yaml | 2 +- .../lightsail/v1beta1/lbattachment.yaml | 2 +- .../lightsail/v1beta1/staticipattachment.yaml | 2 +- .../lightsail/v1beta2/containerservice.yaml | 17 + .../lightsail/v1beta2/instance.yaml | 17 + .../location/v1beta2/placeindex.yaml | 12 + .../macie2/v1beta2/classificationjob.yaml | 32 + .../macie2/v1beta2/findingsfilter.yaml | 34 + .../mediaconvert/v1beta2/queue.yaml | 11 + .../medialive/v1beta2/channel.yaml | 57 + .../medialive/v1beta2/input.yaml | 35 + .../medialive/v1beta2/multiplex.yaml | 23 + examples-generated/memorydb/v1beta2/user.yaml | 18 + examples-generated/mq/v1beta2/broker.yaml | 28 + .../neptune/v1beta1/clusterinstance.yaml | 2 +- .../neptune/v1beta1/eventsubscription.yaml | 2 +- .../neptune/v1beta1/globalcluster.yaml | 4 +- .../neptune/v1beta2/cluster.yaml | 17 + .../networkfirewall/v1beta2/firewall.yaml | 29 + .../v1beta2/firewallpolicy.yaml | 25 + .../v1beta2/loggingconfiguration.yaml | 21 + .../networkfirewall/v1beta2/rulegroup.yaml | 25 + .../v1beta1/customergatewayassociation.yaml | 6 +- .../v1beta2/connectattachment.yaml | 45 + .../networkmanager/v1beta2/device.yaml | 17 + .../networkmanager/v1beta2/link.yaml | 21 + .../networkmanager/v1beta2/site.yaml | 28 + .../networkmanager/v1beta2/vpcattachment.yaml | 19 + .../opensearch/v1beta1/domainpolicy.yaml | 2 +- .../opensearch/v1beta2/domain.yaml | 17 + .../opensearch/v1beta2/domainsamloptions.yaml | 41 + .../v1beta2/securityconfig.yaml | 12 + .../opsworks/v1beta2/customlayer.yaml | 15 + .../opsworks/v1beta2/ecsclusterlayer.yaml | 16 + .../opsworks/v1beta2/ganglialayer.yaml | 14 + .../opsworks/v1beta2/haproxylayer.yaml | 14 + .../opsworks/v1beta2/javaapplayer.yaml | 13 + .../opsworks/v1beta2/memcachedlayer.yaml | 13 + .../opsworks/v1beta2/mysqllayer.yaml | 13 + .../opsworks/v1beta2/nodejsapplayer.yaml | 13 + .../opsworks/v1beta2/phpapplayer.yaml | 13 + .../opsworks/v1beta2/railsapplayer.yaml | 13 + .../opsworks/v1beta2/stack.yaml | 26 + .../opsworks/v1beta2/staticweblayer.yaml | 13 + .../pinpoint/v1beta1/smschannel.yaml | 2 +- examples-generated/pinpoint/v1beta2/app.yaml | 17 + examples-generated/qldb/v1beta2/stream.yaml | 26 + .../rds/v1beta1/clusteractivitystream.yaml | 2 +- .../rds/v1beta1/clusterendpoint.yaml | 2 +- .../rds/v1beta1/clusterinstance.yaml | 2 +- .../rds/v1beta1/dbsnapshotcopy.yaml | 2 +- .../rds/v1beta1/eventsubscription.yaml | 2 +- .../rds/v1beta1/globalcluster.yaml | 4 +- .../rds/v1beta1/proxytarget.yaml | 2 +- examples-generated/rds/v1beta1/snapshot.yaml | 2 +- examples-generated/rds/v1beta2/cluster.yaml | 25 + .../rds/v1beta2/proxydefaulttargetgroup.yaml | 56 + examples-generated/rds/v1beta3/instance.yaml | 25 + .../redshift/v1beta1/eventsubscription.yaml | 2 +- .../redshift/v1beta1/snapshotcopygrant.yaml | 2 +- .../v1beta1/snapshotscheduleassociation.yaml | 2 +- .../redshift/v1beta2/cluster.yaml | 19 + .../redshift/v1beta2/scheduledaction.yaml | 65 + .../resourcegroups/v1beta2/group.yaml | 24 + .../route53/v1beta2/record.yaml | 19 + .../v1beta2/safetyrule.yaml | 22 + .../v1beta2/resourceset.yaml | 16 + .../rum/v1beta2/appmonitor.yaml | 12 + .../bucketaccelerateconfiguration.yaml | 2 +- .../s3/v1beta1/bucketcorsconfiguration.yaml | 2 +- .../s3/v1beta1/bucketnotification.yaml | 2 +- .../s3/v1beta1/bucketpolicy.yaml | 2 +- .../s3/v1beta1/bucketpublicaccessblock.yaml | 2 +- examples-generated/s3/v1beta2/bucket.yaml | 15 + examples-generated/s3/v1beta2/bucketacl.yaml | 49 + .../v1beta2/bucketanalyticsconfiguration.yaml | 52 + ...bucketintelligenttieringconfiguration.yaml | 35 + .../s3/v1beta2/bucketinventory.yaml | 54 + .../v1beta2/bucketlifecycleconfiguration.yaml | 17 + .../s3/v1beta2/bucketlogging.yaml | 84 + .../s3/v1beta2/bucketmetric.yaml | 30 + .../bucketobjectlockconfiguration.yaml | 52 + .../s3/v1beta2/bucketownershipcontrols.yaml | 31 + .../bucketreplicationconfiguration.yaml | 164 + ...cketserversideencryptionconfiguration.yaml | 51 + .../s3/v1beta2/bucketversioning.yaml | 49 + .../v1beta2/bucketwebsiteconfiguration.yaml | 23 + examples-generated/s3/v1beta2/object.yaml | 17 + .../s3control/v1beta1/accesspointpolicy.yaml | 4 +- .../objectlambdaaccesspointpolicy.yaml | 6 +- .../s3control/v1beta2/accesspoint.yaml | 30 + .../v1beta2/multiregionaccesspoint.yaml | 52 + .../v1beta2/multiregionaccesspointpolicy.yaml | 64 + .../v1beta2/objectlambdaaccesspoint.yaml | 57 + .../v1beta2/storagelensconfiguration.yaml | 39 + examples-generated/sagemaker/v1beta2/app.yaml | 19 + .../sagemaker/v1beta2/appimageconfig.yaml | 14 + .../sagemaker/v1beta2/coderepository.yaml | 13 + .../sagemaker/v1beta2/device.yaml | 16 + .../sagemaker/v1beta2/devicefleet.yaml | 16 + .../sagemaker/v1beta2/domain.yaml | 37 + .../sagemaker/v1beta2/endpoint.yaml | 16 + .../v1beta2/endpointconfiguration.yaml | 20 + .../sagemaker/v1beta2/featuregroup.yaml | 21 + .../sagemaker/v1beta2/model.yaml | 30 + .../sagemaker/v1beta2/notebookinstance.yaml | 17 + .../sagemaker/v1beta2/space.yaml | 15 + .../sagemaker/v1beta2/userprofile.yaml | 15 + .../sagemaker/v1beta2/workforce.yaml | 70 + .../sagemaker/v1beta2/workteam.yaml | 26 + .../scheduler/v1beta2/schedule.yaml | 23 + .../v1beta2/secretrotation.yaml | 19 + .../securityhub/v1beta2/insight.yaml | 33 + .../servicecatalog/v1beta2/product.yaml | 18 + .../servicecatalog/v1beta2/serviceaction.yaml | 15 + .../servicediscovery/v1beta2/service.yaml | 58 + .../ses/v1beta1/domaindkim.yaml | 2 +- .../ses/v1beta1/domainmailfrom.yaml | 4 +- .../ses/v1beta2/configurationset.yaml | 11 + .../ses/v1beta2/eventdestination.yaml | 22 + .../emailidentityfeedbackattributes.yaml | 2 +- .../emailidentitymailfromattributes.yaml | 2 +- .../sesv2/v1beta2/configurationset.yaml | 23 + .../configurationseteventdestination.yaml | 38 + .../sesv2/v1beta2/emailidentity.yaml | 11 + .../sfn/v1beta2/statemachine.yaml | 26 + .../v1beta1/signingprofilepermission.yaml | 2 +- .../signer/v1beta2/signingjob.yaml | 39 + .../signer/v1beta2/signingprofile.yaml | 12 + .../ssm/v1beta2/association.yaml | 18 + .../ssm/v1beta2/maintenancewindowtask.yaml | 32 + .../ssm/v1beta2/resourcedatasync.yaml | 51 + .../customermanagedpolicyattachment.yaml | 63 + .../permissionsboundaryattachment.yaml | 64 + .../timestreamwrite/v1beta2/table.yaml | 15 + .../transcribe/v1beta1/vocabulary.yaml | 4 +- .../transcribe/v1beta2/languagemodel.yaml | 99 + .../transfer/v1beta1/sshkey.yaml | 4 +- examples-generated/transfer/v1beta1/tag.yaml | 2 +- .../transfer/v1beta2/server.yaml | 13 + examples-generated/transfer/v1beta2/user.yaml | 69 + .../transfer/v1beta2/workflow.yaml | 16 + .../waf/v1beta2/bytematchset.yaml | 19 + .../waf/v1beta2/regexmatchset.yaml | 38 + .../waf/v1beta2/sizeconstraintset.yaml | 18 + .../waf/v1beta2/sqlinjectionmatchset.yaml | 16 + examples-generated/waf/v1beta2/webacl.yaml | 63 + .../waf/v1beta2/xssmatchset.yaml | 19 + .../wafregional/v1beta2/bytematchset.yaml | 19 + .../wafregional/v1beta2/regexmatchset.yaml | 38 + .../v1beta2/sizeconstraintset.yaml | 18 + .../v1beta2/sqlinjectionmatchset.yaml | 16 + .../wafregional/v1beta2/webacl.yaml | 63 + .../wafregional/v1beta2/xssmatchset.yaml | 19 + .../workspaces/v1beta2/directory.yaml | 212 + examples-generated/xray/v1beta2/group.yaml | 16 + ...cessanalyzer.aws.upbound.io_analyzers.yaml | 401 + .../crds/acm.aws.upbound.io_certificates.yaml | 637 + ...aws.upbound.io_certificateauthorities.yaml | 912 + .../acmpca.aws.upbound.io_certificates.yaml | 620 + .../crds/amp.aws.upbound.io_workspaces.yaml | 550 + package/crds/amplify.aws.upbound.io_apps.yaml | 1016 + ...way.aws.upbound.io_documentationparts.yaml | 577 + ...apigateway.aws.upbound.io_domainnames.yaml | 964 + ...pigateway.aws.upbound.io_integrations.yaml | 1387 ++ ...gateway.aws.upbound.io_methodsettings.yaml | 801 + .../apigateway.aws.upbound.io_restapis.yaml | 721 + .../apigateway.aws.upbound.io_stages.yaml | 883 + .../apigateway.aws.upbound.io_usageplans.yaml | 866 + .../apigatewayv2.aws.upbound.io_apis.yaml | 640 + ...igatewayv2.aws.upbound.io_authorizers.yaml | 822 + ...igatewayv2.aws.upbound.io_domainnames.yaml | 631 + ...gatewayv2.aws.upbound.io_integrations.yaml | 1242 + .../apigatewayv2.aws.upbound.io_stages.yaml | 931 + ...ppautoscaling.aws.upbound.io_policies.yaml | 1159 + ...aling.aws.upbound.io_scheduledactions.yaml | 955 + .../crds/appflow.aws.upbound.io_flows.yaml | 3457 +++ ...ions.aws.upbound.io_eventintegrations.yaml | 393 + .../appmesh.aws.upbound.io_gatewayroutes.yaml | 1861 ++ .../crds/appmesh.aws.upbound.io_meshes.yaml | 419 + .../crds/appmesh.aws.upbound.io_routes.yaml | 2925 +++ ...ppmesh.aws.upbound.io_virtualgateways.yaml | 1582 ++ .../appmesh.aws.upbound.io_virtualnodes.yaml | 2771 +++ ...appmesh.aws.upbound.io_virtualrouters.yaml | 606 + ...ppmesh.aws.upbound.io_virtualservices.yaml | 934 + ...pbound.io_observabilityconfigurations.yaml | 403 + .../apprunner.aws.upbound.io_services.yaml | 1737 ++ ...tream.aws.upbound.io_directoryconfigs.yaml | 440 + .../crds/appstream.aws.upbound.io_fleet.yaml | 966 + ...ppstream.aws.upbound.io_imagebuilders.yaml | 874 + .../crds/appstream.aws.upbound.io_stacks.yaml | 672 + .../appsync.aws.upbound.io_datasources.yaml | 1116 + .../appsync.aws.upbound.io_functions.yaml | 847 + .../appsync.aws.upbound.io_graphqlapis.yaml | 1275 + .../appsync.aws.upbound.io_resolvers.yaml | 837 + .../crds/athena.aws.upbound.io_databases.yaml | 596 + .../athena.aws.upbound.io_workgroups.yaml | 795 + ...ling.aws.upbound.io_autoscalinggroups.yaml | 3598 +++ .../autoscaling.aws.upbound.io_grouptags.yaml | 521 + ...g.aws.upbound.io_launchconfigurations.yaml | 788 + .../autoscaling.aws.upbound.io_policies.yaml | 1869 ++ ...lingplans.aws.upbound.io_scalingplans.yaml | 927 + .../backup.aws.upbound.io_frameworks.yaml | 580 + package/crds/backup.aws.upbound.io_plans.yaml | 852 + .../backup.aws.upbound.io_reportplans.yaml | 586 + .../batch.aws.upbound.io_jobdefinitions.yaml | 1182 + ...tch.aws.upbound.io_schedulingpolicies.yaml | 422 + .../budgets.aws.upbound.io_budgetactions.yaml | 1171 + .../crds/budgets.aws.upbound.io_budgets.yaml | 883 + ...s.upbound.io_voiceconnectorstreamings.yaml | 564 + ...tion.aws.upbound.io_stacksetinstances.yaml | 719 + ...oudformation.aws.upbound.io_stacksets.yaml | 823 + ...oudfront.aws.upbound.io_cachepolicies.yaml | 670 + ...oudfront.aws.upbound.io_distributions.yaml | 3055 +++ ...pbound.io_fieldlevelencryptionconfigs.yaml | 698 + ...bound.io_fieldlevelencryptionprofiles.yaml | 602 + ...ws.upbound.io_monitoringsubscriptions.yaml | 535 + ....aws.upbound.io_originrequestpolicies.yaml | 495 + ...ont.aws.upbound.io_realtimelogconfigs.yaml | 745 + ...ws.upbound.io_responseheaderspolicies.yaml | 1060 + .../cloudsearch.aws.upbound.io_domains.yaml | 573 + ...dwatch.aws.upbound.io_compositealarms.yaml | 833 + ...loudwatch.aws.upbound.io_metricalarms.yaml | 915 + ...atchevents.aws.upbound.io_connections.yaml | 1197 + ...atchevents.aws.upbound.io_permissions.yaml | 729 + ...oudwatchevents.aws.upbound.io_targets.yaml | 1995 ++ ...atchlogs.aws.upbound.io_metricfilters.yaml | 603 + ...pipeline.aws.upbound.io_codepipelines.yaml | 1498 ++ ...line.aws.upbound.io_customactiontypes.yaml | 636 + .../codepipeline.aws.upbound.io_webhooks.yaml | 654 + ...estarconnections.aws.upbound.io_hosts.yaml | 462 + ...idp.aws.upbound.io_riskconfigurations.yaml | 967 + .../cognitoidp.aws.upbound.io_userpools.yaml | 3786 +++ ...figservice.aws.upbound.io_configrules.yaml | 855 + ...s.upbound.io_configurationaggregators.yaml | 605 + ...aws.upbound.io_configurationrecorders.yaml | 717 + ...rvice.aws.upbound.io_deliverychannels.yaml | 539 + ....upbound.io_remediationconfigurations.yaml | 518 + ...onnect.aws.upbound.io_botassociations.yaml | 585 + ...nect.aws.upbound.io_hoursofoperations.yaml | 652 + ...aws.upbound.io_instancestorageconfigs.yaml | 1515 ++ .../crds/connect.aws.upbound.io_queues.yaml | 786 + .../connect.aws.upbound.io_quickconnects.yaml | 692 + ...ws.upbound.io_userhierarchystructures.yaml | 669 + .../crds/connect.aws.upbound.io_users.yaml | 962 + .../datasync.aws.upbound.io_locations3s.yaml | 730 + .../crds/datasync.aws.upbound.io_tasks.yaml | 1473 ++ package/crds/dax.aws.upbound.io_clusters.yaml | 888 + ...ploy.aws.upbound.io_deploymentconfigs.yaml | 481 + ...eploy.aws.upbound.io_deploymentgroups.yaml | 2235 ++ ...efarm.aws.upbound.io_testgridprojects.yaml | 889 + .../dlm.aws.upbound.io_lifecyclepolicies.yaml | 1923 ++ .../crds/dms.aws.upbound.io_endpoints.yaml | 2639 ++ .../crds/ds.aws.upbound.io_directories.yaml | 1256 + .../ds.aws.upbound.io_shareddirectories.yaml | 570 + .../crds/dynamodb.aws.upbound.io_tables.yaml | 1163 + ...ec2.aws.upbound.io_ebssnapshotimports.yaml | 751 + package/crds/ec2.aws.upbound.io_flowlogs.yaml | 1148 + .../crds/ec2.aws.upbound.io_instances.yaml | 2678 ++ .../ec2.aws.upbound.io_launchtemplates.yaml | 3719 +++ .../ec2.aws.upbound.io_spotfleetrequests.yaml | 2471 ++ ...2.aws.upbound.io_spotinstancerequests.yaml | 1552 ++ ...s.upbound.io_trafficmirrorfilterrules.yaml | 665 + .../crds/ec2.aws.upbound.io_vpcendpoints.yaml | 856 + .../ec2.aws.upbound.io_vpcipampoolcidrs.yaml | 549 + ...ound.io_vpcpeeringconnectionaccepters.yaml | 596 + ...pbound.io_vpcpeeringconnectionoptions.yaml | 542 + ....aws.upbound.io_vpcpeeringconnections.yaml | 724 + .../ec2.aws.upbound.io_vpnconnections.yaml | 2091 ++ ....upbound.io_replicationconfigurations.yaml | 447 + .../crds/ecr.aws.upbound.io_repositories.yaml | 603 + ...ecrpublic.aws.upbound.io_repositories.yaml | 494 + .../ecs.aws.upbound.io_capacityproviders.yaml | 636 + package/crds/ecs.aws.upbound.io_clusters.yaml | 559 + package/crds/ecs.aws.upbound.io_services.yaml | 2413 ++ .../ecs.aws.upbound.io_taskdefinitions.yaml | 1327 + .../crds/efs.aws.upbound.io_accesspoints.yaml | 651 + .../efs.aws.upbound.io_backuppolicies.yaml | 499 + .../crds/efs.aws.upbound.io_filesystems.yaml | 733 + ....upbound.io_replicationconfigurations.yaml | 561 + package/crds/eks.aws.upbound.io_clusters.yaml | 1363 ++ ...ws.upbound.io_identityproviderconfigs.yaml | 612 + .../crds/eks.aws.upbound.io_nodegroups.yaml | 1494 ++ .../elasticache.aws.upbound.io_users.yaml | 471 + ...beanstalk.aws.upbound.io_applications.yaml | 562 + .../elasticsearch.aws.upbound.io_domains.yaml | 1451 ++ ...arch.aws.upbound.io_domainsamloptions.yaml | 466 + ...ictranscoder.aws.upbound.io_pipelines.yaml | 1280 + ...stictranscoder.aws.upbound.io_presets.yaml | 1116 + package/crds/elb.aws.upbound.io_elbs.yaml | 1053 + .../elbv2.aws.upbound.io_lblistenerrules.yaml | 2345 ++ .../elbv2.aws.upbound.io_lblisteners.yaml | 1549 ++ package/crds/elbv2.aws.upbound.io_lbs.yaml | 1489 ++ .../elbv2.aws.upbound.io_lbtargetgroups.yaml | 1120 + ...erverless.aws.upbound.io_applications.yaml | 699 + .../evidently.aws.upbound.io_features.yaml | 652 + .../evidently.aws.upbound.io_projects.yaml | 486 + ...rehose.aws.upbound.io_deliverystreams.yaml | 11063 +++++++++ ...is.aws.upbound.io_experimenttemplates.yaml | 983 + ...upbound.io_datarepositoryassociations.yaml | 741 + .../fsx.aws.upbound.io_lustrefilesystems.yaml | 1318 + .../fsx.aws.upbound.io_ontapfilesystems.yaml | 1361 ++ ...pbound.io_ontapstoragevirtualmachines.yaml | 864 + ...fsx.aws.upbound.io_windowsfilesystems.yaml | 1545 ++ .../crds/gamelift.aws.upbound.io_aliases.yaml | 414 + .../crds/gamelift.aws.upbound.io_builds.yaml | 889 + .../crds/gamelift.aws.upbound.io_fleet.yaml | 1017 + .../crds/gamelift.aws.upbound.io_scripts.yaml | 881 + .../crds/glacier.aws.upbound.io_vaults.yaml | 554 + ...celerator.aws.upbound.io_accelerators.yaml | 484 + .../glue.aws.upbound.io_catalogdatabases.yaml | 527 + .../glue.aws.upbound.io_catalogtables.yaml | 1166 + .../crds/glue.aws.upbound.io_classifiers.yaml | 559 + .../crds/glue.aws.upbound.io_connections.yaml | 765 + .../crds/glue.aws.upbound.io_crawlers.yaml | 2000 ++ ...ound.io_datacatalogencryptionsettings.yaml | 771 + package/crds/glue.aws.upbound.io_jobs.yaml | 824 + ...aws.upbound.io_securityconfigurations.yaml | 886 + .../crds/glue.aws.upbound.io_triggers.yaml | 1320 + .../grafana.aws.upbound.io_workspaces.yaml | 827 + .../guardduty.aws.upbound.io_detectors.yaml | 555 + .../guardduty.aws.upbound.io_filters.yaml | 597 + .../identitystore.aws.upbound.io_users.yaml | 701 + ...ilder.aws.upbound.io_containerrecipes.yaml | 1254 + ...upbound.io_distributionconfigurations.yaml | 916 + ...builder.aws.upbound.io_imagepipelines.yaml | 929 + ...gebuilder.aws.upbound.io_imagerecipes.yaml | 861 + .../imagebuilder.aws.upbound.io_images.yaml | 1142 + ...bound.io_infrastructureconfigurations.yaml | 1471 ++ ...aws.upbound.io_indexingconfigurations.yaml | 601 + ....aws.upbound.io_provisioningtemplates.yaml | 594 + .../crds/iot.aws.upbound.io_thinggroups.yaml | 572 + .../crds/iot.aws.upbound.io_thingtypes.yaml | 408 + ....aws.upbound.io_topicruledestinations.yaml | 1015 + .../crds/iot.aws.upbound.io_topicrules.yaml | 3371 +++ ...ws.upbound.io_recordingconfigurations.yaml | 457 + .../crds/kafka.aws.upbound.io_clusters.yaml | 2340 ++ ...fka.aws.upbound.io_serverlessclusters.yaml | 796 + ...afkaconnect.aws.upbound.io_connectors.yaml | 2217 ++ ...aconnect.aws.upbound.io_customplugins.yaml | 714 + .../kendra.aws.upbound.io_datasources.yaml | 2806 +++ .../kendra.aws.upbound.io_experiences.yaml | 806 + .../crds/kendra.aws.upbound.io_indices.yaml | 1147 + ...upbound.io_querysuggestionsblocklists.yaml | 879 + .../crds/kendra.aws.upbound.io_thesaurus.yaml | 1028 + .../crds/keyspaces.aws.upbound.io_tables.yaml | 898 + .../crds/kinesis.aws.upbound.io_streams.yaml | 620 + ...analytics.aws.upbound.io_applications.yaml | 2158 ++ ...alyticsv2.aws.upbound.io_applications.yaml | 3146 +++ ...eformation.aws.upbound.io_permissions.yaml | 1291 + .../crds/lambda.aws.upbound.io_aliases.yaml | 462 + ...bda.aws.upbound.io_codesigningconfigs.yaml | 576 + ...da.aws.upbound.io_eventsourcemappings.yaml | 1159 + ...upbound.io_functioneventinvokeconfigs.yaml | 744 + .../crds/lambda.aws.upbound.io_functions.yaml | 2183 ++ .../lambda.aws.upbound.io_functionurls.yaml | 667 + .../lexmodels.aws.upbound.io_botaliases.yaml | 481 + .../crds/lexmodels.aws.upbound.io_bots.yaml | 791 + .../lexmodels.aws.upbound.io_intents.yaml | 1367 ++ ...sail.aws.upbound.io_containerservices.yaml | 557 + .../lightsail.aws.upbound.io_instances.yaml | 536 + .../location.aws.upbound.io_placeindices.yaml | 406 + ...ie2.aws.upbound.io_classificationjobs.yaml | 1448 ++ ...macie2.aws.upbound.io_findingsfilters.yaml | 583 + .../mediaconvert.aws.upbound.io_queues.yaml | 429 + .../medialive.aws.upbound.io_channels.yaml | 9417 +++++++ .../crds/medialive.aws.upbound.io_inputs.yaml | 752 + .../medialive.aws.upbound.io_multiplices.yaml | 433 + .../crds/memorydb.aws.upbound.io_users.yaml | 420 + package/crds/mq.aws.upbound.io_brokers.yaml | 1677 ++ .../crds/neptune.aws.upbound.io_clusters.yaml | 1856 ++ ...ewall.aws.upbound.io_firewallpolicies.yaml | 1229 + ...workfirewall.aws.upbound.io_firewalls.yaml | 1032 + ....aws.upbound.io_loggingconfigurations.yaml | 583 + ...orkfirewall.aws.upbound.io_rulegroups.yaml | 1895 ++ ...ger.aws.upbound.io_connectattachments.yaml | 880 + ...networkmanager.aws.upbound.io_devices.yaml | 786 + .../networkmanager.aws.upbound.io_links.yaml | 727 + .../networkmanager.aws.upbound.io_sites.yaml | 550 + ...manager.aws.upbound.io_vpcattachments.yaml | 902 + .../opensearch.aws.upbound.io_domains.yaml | 1604 ++ ...arch.aws.upbound.io_domainsamloptions.yaml | 614 + ...erless.aws.upbound.io_securityconfigs.yaml | 389 + .../opsworks.aws.upbound.io_customlayers.yaml | 1447 ++ ...works.aws.upbound.io_ecsclusterlayers.yaml | 1340 + ...opsworks.aws.upbound.io_ganglialayers.yaml | 1221 + ...opsworks.aws.upbound.io_haproxylayers.yaml | 1251 + ...opsworks.aws.upbound.io_javaapplayers.yaml | 1237 + ...sworks.aws.upbound.io_memcachedlayers.yaml | 1198 + .../opsworks.aws.upbound.io_mysqllayers.yaml | 1207 + ...sworks.aws.upbound.io_nodejsapplayers.yaml | 1195 + .../opsworks.aws.upbound.io_phpapplayers.yaml | 1186 + ...psworks.aws.upbound.io_railsapplayers.yaml | 1243 + .../crds/opsworks.aws.upbound.io_stacks.yaml | 1302 + ...sworks.aws.upbound.io_staticweblayers.yaml | 1183 + .../crds/pinpoint.aws.upbound.io_apps.yaml | 523 + package/crds/qldb.aws.upbound.io_streams.yaml | 941 + package/crds/rds.aws.upbound.io_clusters.yaml | 2682 ++ .../crds/rds.aws.upbound.io_instances.yaml | 2783 +++ ...s.upbound.io_proxydefaulttargetgroups.yaml | 623 + .../redshift.aws.upbound.io_clusters.yaml | 1728 ++ ...shift.aws.upbound.io_scheduledactions.yaml | 668 + .../resourcegroups.aws.upbound.io_groups.yaml | 463 + .../crds/route53.aws.upbound.io_records.yaml | 1069 + ...trolconfig.aws.upbound.io_safetyrules.yaml | 787 + ...readiness.aws.upbound.io_resourcesets.yaml | 697 + .../crds/rum.aws.upbound.io_appmonitors.yaml | 622 + .../crds/s3.aws.upbound.io_bucketacls.yaml | 639 + ...ound.io_bucketanalyticsconfigurations.yaml | 796 + ...ucketintelligenttieringconfigurations.yaml | 600 + .../s3.aws.upbound.io_bucketinventories.yaml | 889 + ...ound.io_bucketlifecycleconfigurations.yaml | 1032 + .../s3.aws.upbound.io_bucketloggings.yaml | 797 + .../crds/s3.aws.upbound.io_bucketmetrics.yaml | 689 + ...und.io_bucketobjectlockconfigurations.yaml | 604 + ...ws.upbound.io_bucketownershipcontrols.yaml | 507 + ...nd.io_bucketreplicationconfigurations.yaml | 1618 ++ package/crds/s3.aws.upbound.io_buckets.yaml | 863 + ...ketserversideencryptionconfigurations.yaml | 717 + .../s3.aws.upbound.io_bucketversionings.yaml | 548 + ...pbound.io_bucketwebsiteconfigurations.yaml | 753 + package/crds/s3.aws.upbound.io_objects.yaml | 1053 + ...s3control.aws.upbound.io_accesspoints.yaml | 848 + ...und.io_multiregionaccesspointpolicies.yaml | 389 + ...ws.upbound.io_multiregionaccesspoints.yaml | 604 + ...s.upbound.io_objectlambdaaccesspoints.yaml | 825 + ....upbound.io_storagelensconfigurations.yaml | 1219 + ...emaker.aws.upbound.io_appimageconfigs.yaml | 748 + .../crds/sagemaker.aws.upbound.io_apps.yaml | 777 + ...maker.aws.upbound.io_coderepositories.yaml | 554 + .../sagemaker.aws.upbound.io_devicefleet.yaml | 579 + .../sagemaker.aws.upbound.io_devices.yaml | 530 + .../sagemaker.aws.upbound.io_domains.yaml | 3496 +++ ...aws.upbound.io_endpointconfigurations.yaml | 1635 ++ .../sagemaker.aws.upbound.io_endpoints.yaml | 944 + ...agemaker.aws.upbound.io_featuregroups.yaml | 843 + .../crds/sagemaker.aws.upbound.io_models.yaml | 1135 + ...aker.aws.upbound.io_notebookinstances.yaml | 1211 + .../crds/sagemaker.aws.upbound.io_spaces.yaml | 1194 + ...sagemaker.aws.upbound.io_userprofiles.yaml | 2107 ++ .../sagemaker.aws.upbound.io_workforces.yaml | 912 + .../sagemaker.aws.upbound.io_workteams.yaml | 1149 + .../scheduler.aws.upbound.io_schedules.yaml | 1620 ++ ...anager.aws.upbound.io_secretrotations.yaml | 742 + .../securityhub.aws.upbound.io_insights.yaml | 5033 ++++ ...ervicecatalog.aws.upbound.io_products.yaml | 556 + ...catalog.aws.upbound.io_serviceactions.yaml | 430 + ...vicediscovery.aws.upbound.io_services.yaml | 745 + .../ses.aws.upbound.io_configurationsets.yaml | 420 + .../ses.aws.upbound.io_eventdestinations.yaml | 1062 + ....io_configurationseteventdestinations.yaml | 1332 + ...esv2.aws.upbound.io_configurationsets.yaml | 583 + .../sesv2.aws.upbound.io_emailidentities.yaml | 631 + .../sfn.aws.upbound.io_statemachines.yaml | 659 + .../signer.aws.upbound.io_signingjobs.yaml | 687 + ...signer.aws.upbound.io_signingprofiles.yaml | 456 + .../crds/ssm.aws.upbound.io_associations.yaml | 766 + ...aws.upbound.io_maintenancewindowtasks.yaml | 1906 ++ .../ssm.aws.upbound.io_resourcedatasyncs.yaml | 605 + ...d.io_customermanagedpolicyattachments.yaml | 604 + ...und.io_permissionsboundaryattachments.yaml | 630 + ...timestreamwrite.aws.upbound.io_tables.yaml | 684 + ...nscribe.aws.upbound.io_languagemodels.yaml | 562 + .../crds/transfer.aws.upbound.io_servers.yaml | 1668 ++ .../crds/transfer.aws.upbound.io_users.yaml | 832 + .../transfer.aws.upbound.io_workflows.yaml | 1741 ++ .../waf.aws.upbound.io_bytematchsets.yaml | 471 + .../waf.aws.upbound.io_regexmatchsets.yaml | 587 + ...waf.aws.upbound.io_sizeconstraintsets.yaml | 450 + ....aws.upbound.io_sqlinjectionmatchsets.yaml | 430 + package/crds/waf.aws.upbound.io_webacls.yaml | 928 + .../crds/waf.aws.upbound.io_xssmatchsets.yaml | 426 + ...regional.aws.upbound.io_bytematchsets.yaml | 432 + ...egional.aws.upbound.io_regexmatchsets.yaml | 584 + ...nal.aws.upbound.io_sizeconstraintsets.yaml | 461 + ....aws.upbound.io_sqlinjectionmatchsets.yaml | 428 + .../wafregional.aws.upbound.io_webacls.yaml | 965 + ...fregional.aws.upbound.io_xssmatchsets.yaml | 411 + ...workspaces.aws.upbound.io_directories.yaml | 1151 + package/crds/xray.aws.upbound.io_groups.yaml | 402 + 2590 files changed, 1008389 insertions(+), 4034 deletions(-) create mode 100755 apis/accessanalyzer/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/accessanalyzer/v1beta2/zz_analyzer_terraformed.go create mode 100755 apis/accessanalyzer/v1beta2/zz_analyzer_types.go create mode 100755 apis/accessanalyzer/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/accessanalyzer/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/accessanalyzer/v1beta2/zz_generated.managed.go create mode 100644 apis/accessanalyzer/v1beta2/zz_generated.managedlist.go create mode 100755 apis/accessanalyzer/v1beta2/zz_groupversion_info.go create mode 100755 apis/acm/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/acm/v1beta2/zz_certificate_terraformed.go create mode 100755 apis/acm/v1beta2/zz_certificate_types.go create mode 100755 apis/acm/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/acm/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/acm/v1beta2/zz_generated.managed.go create mode 100644 apis/acm/v1beta2/zz_generated.managedlist.go create mode 100755 apis/acm/v1beta2/zz_groupversion_info.go create mode 100755 apis/acmpca/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/acmpca/v1beta2/zz_certificate_terraformed.go create mode 100755 apis/acmpca/v1beta2/zz_certificate_types.go create mode 100755 apis/acmpca/v1beta2/zz_certificateauthority_terraformed.go create mode 100755 apis/acmpca/v1beta2/zz_certificateauthority_types.go create mode 100755 apis/acmpca/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/acmpca/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/acmpca/v1beta2/zz_generated.managed.go create mode 100644 apis/acmpca/v1beta2/zz_generated.managedlist.go create mode 100644 apis/acmpca/v1beta2/zz_generated.resolvers.go create mode 100755 apis/acmpca/v1beta2/zz_groupversion_info.go create mode 100755 apis/amp/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/amp/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/amp/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/amp/v1beta2/zz_generated.managed.go create mode 100644 apis/amp/v1beta2/zz_generated.managedlist.go create mode 100644 apis/amp/v1beta2/zz_generated.resolvers.go create mode 100755 apis/amp/v1beta2/zz_groupversion_info.go create mode 100755 apis/amp/v1beta2/zz_workspace_terraformed.go create mode 100755 apis/amp/v1beta2/zz_workspace_types.go create mode 100755 apis/amplify/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/amplify/v1beta2/zz_app_terraformed.go create mode 100755 apis/amplify/v1beta2/zz_app_types.go create mode 100755 apis/amplify/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/amplify/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/amplify/v1beta2/zz_generated.managed.go create mode 100644 apis/amplify/v1beta2/zz_generated.managedlist.go create mode 100644 apis/amplify/v1beta2/zz_generated.resolvers.go create mode 100755 apis/amplify/v1beta2/zz_groupversion_info.go create mode 100755 apis/apigateway/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/apigateway/v1beta2/zz_documentationpart_terraformed.go create mode 100755 apis/apigateway/v1beta2/zz_documentationpart_types.go create mode 100755 apis/apigateway/v1beta2/zz_domainname_terraformed.go create mode 100755 apis/apigateway/v1beta2/zz_domainname_types.go create mode 100755 apis/apigateway/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/apigateway/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/apigateway/v1beta2/zz_generated.managed.go create mode 100644 apis/apigateway/v1beta2/zz_generated.managedlist.go create mode 100644 apis/apigateway/v1beta2/zz_generated.resolvers.go create mode 100755 apis/apigateway/v1beta2/zz_groupversion_info.go create mode 100755 apis/apigateway/v1beta2/zz_integration_terraformed.go create mode 100755 apis/apigateway/v1beta2/zz_integration_types.go create mode 100755 apis/apigateway/v1beta2/zz_methodsettings_terraformed.go create mode 100755 apis/apigateway/v1beta2/zz_methodsettings_types.go create mode 100755 apis/apigateway/v1beta2/zz_restapi_terraformed.go create mode 100755 apis/apigateway/v1beta2/zz_restapi_types.go create mode 100755 apis/apigateway/v1beta2/zz_stage_terraformed.go create mode 100755 apis/apigateway/v1beta2/zz_stage_types.go create mode 100755 apis/apigateway/v1beta2/zz_usageplan_terraformed.go create mode 100755 apis/apigateway/v1beta2/zz_usageplan_types.go create mode 100755 apis/apigatewayv2/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/apigatewayv2/v1beta2/zz_api_terraformed.go create mode 100755 apis/apigatewayv2/v1beta2/zz_api_types.go create mode 100755 apis/apigatewayv2/v1beta2/zz_authorizer_terraformed.go create mode 100755 apis/apigatewayv2/v1beta2/zz_authorizer_types.go create mode 100755 apis/apigatewayv2/v1beta2/zz_domainname_terraformed.go create mode 100755 apis/apigatewayv2/v1beta2/zz_domainname_types.go create mode 100755 apis/apigatewayv2/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/apigatewayv2/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/apigatewayv2/v1beta2/zz_generated.managed.go create mode 100644 apis/apigatewayv2/v1beta2/zz_generated.managedlist.go create mode 100644 apis/apigatewayv2/v1beta2/zz_generated.resolvers.go create mode 100755 apis/apigatewayv2/v1beta2/zz_groupversion_info.go create mode 100755 apis/apigatewayv2/v1beta2/zz_integration_terraformed.go create mode 100755 apis/apigatewayv2/v1beta2/zz_integration_types.go create mode 100755 apis/apigatewayv2/v1beta2/zz_stage_terraformed.go create mode 100755 apis/apigatewayv2/v1beta2/zz_stage_types.go create mode 100755 apis/appautoscaling/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/appautoscaling/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/appautoscaling/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/appautoscaling/v1beta2/zz_generated.managed.go create mode 100644 apis/appautoscaling/v1beta2/zz_generated.managedlist.go create mode 100644 apis/appautoscaling/v1beta2/zz_generated.resolvers.go create mode 100755 apis/appautoscaling/v1beta2/zz_groupversion_info.go create mode 100755 apis/appautoscaling/v1beta2/zz_policy_terraformed.go create mode 100755 apis/appautoscaling/v1beta2/zz_policy_types.go create mode 100755 apis/appautoscaling/v1beta2/zz_scheduledaction_terraformed.go create mode 100755 apis/appautoscaling/v1beta2/zz_scheduledaction_types.go create mode 100755 apis/appflow/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/appflow/v1beta2/zz_flow_terraformed.go create mode 100755 apis/appflow/v1beta2/zz_flow_types.go create mode 100755 apis/appflow/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/appflow/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/appflow/v1beta2/zz_generated.managed.go create mode 100644 apis/appflow/v1beta2/zz_generated.managedlist.go create mode 100644 apis/appflow/v1beta2/zz_generated.resolvers.go create mode 100755 apis/appflow/v1beta2/zz_groupversion_info.go create mode 100755 apis/appintegrations/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/appintegrations/v1beta2/zz_eventintegration_terraformed.go create mode 100755 apis/appintegrations/v1beta2/zz_eventintegration_types.go create mode 100755 apis/appintegrations/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/appintegrations/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/appintegrations/v1beta2/zz_generated.managed.go create mode 100644 apis/appintegrations/v1beta2/zz_generated.managedlist.go create mode 100755 apis/appintegrations/v1beta2/zz_groupversion_info.go create mode 100755 apis/appmesh/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/appmesh/v1beta2/zz_gatewayroute_terraformed.go create mode 100755 apis/appmesh/v1beta2/zz_gatewayroute_types.go create mode 100755 apis/appmesh/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/appmesh/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/appmesh/v1beta2/zz_generated.managed.go create mode 100644 apis/appmesh/v1beta2/zz_generated.managedlist.go create mode 100644 apis/appmesh/v1beta2/zz_generated.resolvers.go create mode 100755 apis/appmesh/v1beta2/zz_groupversion_info.go create mode 100755 apis/appmesh/v1beta2/zz_mesh_terraformed.go create mode 100755 apis/appmesh/v1beta2/zz_mesh_types.go create mode 100755 apis/appmesh/v1beta2/zz_route_terraformed.go create mode 100755 apis/appmesh/v1beta2/zz_route_types.go create mode 100755 apis/appmesh/v1beta2/zz_virtualgateway_terraformed.go create mode 100755 apis/appmesh/v1beta2/zz_virtualgateway_types.go create mode 100755 apis/appmesh/v1beta2/zz_virtualnode_terraformed.go create mode 100755 apis/appmesh/v1beta2/zz_virtualnode_types.go create mode 100755 apis/appmesh/v1beta2/zz_virtualrouter_terraformed.go create mode 100755 apis/appmesh/v1beta2/zz_virtualrouter_types.go create mode 100755 apis/appmesh/v1beta2/zz_virtualservice_terraformed.go create mode 100755 apis/appmesh/v1beta2/zz_virtualservice_types.go create mode 100755 apis/apprunner/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/apprunner/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/apprunner/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/apprunner/v1beta2/zz_generated.managed.go create mode 100644 apis/apprunner/v1beta2/zz_generated.managedlist.go create mode 100644 apis/apprunner/v1beta2/zz_generated.resolvers.go create mode 100755 apis/apprunner/v1beta2/zz_groupversion_info.go create mode 100755 apis/apprunner/v1beta2/zz_observabilityconfiguration_terraformed.go create mode 100755 apis/apprunner/v1beta2/zz_observabilityconfiguration_types.go create mode 100755 apis/apprunner/v1beta2/zz_service_terraformed.go create mode 100755 apis/apprunner/v1beta2/zz_service_types.go create mode 100755 apis/appstream/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/appstream/v1beta2/zz_directoryconfig_terraformed.go create mode 100755 apis/appstream/v1beta2/zz_directoryconfig_types.go create mode 100755 apis/appstream/v1beta2/zz_fleet_terraformed.go create mode 100755 apis/appstream/v1beta2/zz_fleet_types.go create mode 100755 apis/appstream/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/appstream/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/appstream/v1beta2/zz_generated.managed.go create mode 100644 apis/appstream/v1beta2/zz_generated.managedlist.go create mode 100644 apis/appstream/v1beta2/zz_generated.resolvers.go create mode 100755 apis/appstream/v1beta2/zz_groupversion_info.go create mode 100755 apis/appstream/v1beta2/zz_imagebuilder_terraformed.go create mode 100755 apis/appstream/v1beta2/zz_imagebuilder_types.go create mode 100755 apis/appstream/v1beta2/zz_stack_terraformed.go create mode 100755 apis/appstream/v1beta2/zz_stack_types.go create mode 100755 apis/appsync/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/appsync/v1beta2/zz_datasource_terraformed.go create mode 100755 apis/appsync/v1beta2/zz_datasource_types.go create mode 100755 apis/appsync/v1beta2/zz_function_terraformed.go create mode 100755 apis/appsync/v1beta2/zz_function_types.go create mode 100755 apis/appsync/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/appsync/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/appsync/v1beta2/zz_generated.managed.go create mode 100644 apis/appsync/v1beta2/zz_generated.managedlist.go create mode 100644 apis/appsync/v1beta2/zz_generated.resolvers.go create mode 100755 apis/appsync/v1beta2/zz_graphqlapi_terraformed.go create mode 100755 apis/appsync/v1beta2/zz_graphqlapi_types.go create mode 100755 apis/appsync/v1beta2/zz_groupversion_info.go create mode 100755 apis/appsync/v1beta2/zz_resolver_terraformed.go create mode 100755 apis/appsync/v1beta2/zz_resolver_types.go create mode 100755 apis/athena/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/athena/v1beta2/zz_database_terraformed.go create mode 100755 apis/athena/v1beta2/zz_database_types.go create mode 100755 apis/athena/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/athena/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/athena/v1beta2/zz_generated.managed.go create mode 100644 apis/athena/v1beta2/zz_generated.managedlist.go create mode 100644 apis/athena/v1beta2/zz_generated.resolvers.go create mode 100755 apis/athena/v1beta2/zz_groupversion_info.go create mode 100755 apis/athena/v1beta2/zz_workgroup_terraformed.go create mode 100755 apis/athena/v1beta2/zz_workgroup_types.go create mode 100755 apis/autoscaling/v1beta2/zz_generated.conversion_spokes.go create mode 100755 apis/autoscaling/v1beta2/zz_grouptag_terraformed.go create mode 100755 apis/autoscaling/v1beta2/zz_grouptag_types.go create mode 100755 apis/autoscaling/v1beta2/zz_launchconfiguration_terraformed.go create mode 100755 apis/autoscaling/v1beta2/zz_launchconfiguration_types.go create mode 100755 apis/autoscaling/v1beta2/zz_policy_terraformed.go create mode 100755 apis/autoscaling/v1beta2/zz_policy_types.go create mode 100755 apis/autoscaling/v1beta3/zz_autoscalinggroup_terraformed.go create mode 100755 apis/autoscaling/v1beta3/zz_autoscalinggroup_types.go create mode 100755 apis/autoscaling/v1beta3/zz_generated.conversion_hubs.go create mode 100644 apis/autoscaling/v1beta3/zz_generated.deepcopy.go create mode 100644 apis/autoscaling/v1beta3/zz_generated.managed.go create mode 100644 apis/autoscaling/v1beta3/zz_generated.managedlist.go create mode 100644 apis/autoscaling/v1beta3/zz_generated.resolvers.go create mode 100755 apis/autoscaling/v1beta3/zz_groupversion_info.go create mode 100755 apis/autoscalingplans/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/autoscalingplans/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/autoscalingplans/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/autoscalingplans/v1beta2/zz_generated.managed.go create mode 100644 apis/autoscalingplans/v1beta2/zz_generated.managedlist.go create mode 100755 apis/autoscalingplans/v1beta2/zz_groupversion_info.go create mode 100755 apis/autoscalingplans/v1beta2/zz_scalingplan_terraformed.go create mode 100755 apis/autoscalingplans/v1beta2/zz_scalingplan_types.go create mode 100755 apis/backup/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/backup/v1beta2/zz_framework_terraformed.go create mode 100755 apis/backup/v1beta2/zz_framework_types.go create mode 100755 apis/backup/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/backup/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/backup/v1beta2/zz_generated.managed.go create mode 100644 apis/backup/v1beta2/zz_generated.managedlist.go create mode 100644 apis/backup/v1beta2/zz_generated.resolvers.go create mode 100755 apis/backup/v1beta2/zz_groupversion_info.go create mode 100755 apis/backup/v1beta2/zz_plan_terraformed.go create mode 100755 apis/backup/v1beta2/zz_plan_types.go create mode 100755 apis/backup/v1beta2/zz_reportplan_terraformed.go create mode 100755 apis/backup/v1beta2/zz_reportplan_types.go create mode 100755 apis/batch/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/batch/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/batch/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/batch/v1beta2/zz_generated.managed.go create mode 100644 apis/batch/v1beta2/zz_generated.managedlist.go create mode 100755 apis/batch/v1beta2/zz_groupversion_info.go create mode 100755 apis/batch/v1beta2/zz_jobdefinition_terraformed.go create mode 100755 apis/batch/v1beta2/zz_jobdefinition_types.go create mode 100755 apis/batch/v1beta2/zz_schedulingpolicy_terraformed.go create mode 100755 apis/batch/v1beta2/zz_schedulingpolicy_types.go create mode 100755 apis/budgets/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/budgets/v1beta2/zz_budget_terraformed.go create mode 100755 apis/budgets/v1beta2/zz_budget_types.go create mode 100755 apis/budgets/v1beta2/zz_budgetaction_terraformed.go create mode 100755 apis/budgets/v1beta2/zz_budgetaction_types.go create mode 100755 apis/budgets/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/budgets/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/budgets/v1beta2/zz_generated.managed.go create mode 100644 apis/budgets/v1beta2/zz_generated.managedlist.go create mode 100644 apis/budgets/v1beta2/zz_generated.resolvers.go create mode 100755 apis/budgets/v1beta2/zz_groupversion_info.go create mode 100755 apis/chime/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/chime/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/chime/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/chime/v1beta2/zz_generated.managed.go create mode 100644 apis/chime/v1beta2/zz_generated.managedlist.go create mode 100644 apis/chime/v1beta2/zz_generated.resolvers.go create mode 100755 apis/chime/v1beta2/zz_groupversion_info.go create mode 100755 apis/chime/v1beta2/zz_voiceconnectorstreaming_terraformed.go create mode 100755 apis/chime/v1beta2/zz_voiceconnectorstreaming_types.go create mode 100755 apis/cloudformation/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/cloudformation/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/cloudformation/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/cloudformation/v1beta2/zz_generated.managed.go create mode 100644 apis/cloudformation/v1beta2/zz_generated.managedlist.go create mode 100644 apis/cloudformation/v1beta2/zz_generated.resolvers.go create mode 100755 apis/cloudformation/v1beta2/zz_groupversion_info.go create mode 100755 apis/cloudformation/v1beta2/zz_stackset_terraformed.go create mode 100755 apis/cloudformation/v1beta2/zz_stackset_types.go create mode 100755 apis/cloudformation/v1beta2/zz_stacksetinstance_terraformed.go create mode 100755 apis/cloudformation/v1beta2/zz_stacksetinstance_types.go create mode 100755 apis/cloudfront/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/cloudfront/v1beta2/zz_cachepolicy_terraformed.go create mode 100755 apis/cloudfront/v1beta2/zz_cachepolicy_types.go create mode 100755 apis/cloudfront/v1beta2/zz_distribution_terraformed.go create mode 100755 apis/cloudfront/v1beta2/zz_distribution_types.go create mode 100755 apis/cloudfront/v1beta2/zz_fieldlevelencryptionconfig_terraformed.go create mode 100755 apis/cloudfront/v1beta2/zz_fieldlevelencryptionconfig_types.go create mode 100755 apis/cloudfront/v1beta2/zz_fieldlevelencryptionprofile_terraformed.go create mode 100755 apis/cloudfront/v1beta2/zz_fieldlevelencryptionprofile_types.go create mode 100755 apis/cloudfront/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/cloudfront/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/cloudfront/v1beta2/zz_generated.managed.go create mode 100644 apis/cloudfront/v1beta2/zz_generated.managedlist.go create mode 100644 apis/cloudfront/v1beta2/zz_generated.resolvers.go create mode 100755 apis/cloudfront/v1beta2/zz_groupversion_info.go create mode 100755 apis/cloudfront/v1beta2/zz_monitoringsubscription_terraformed.go create mode 100755 apis/cloudfront/v1beta2/zz_monitoringsubscription_types.go create mode 100755 apis/cloudfront/v1beta2/zz_originrequestpolicy_terraformed.go create mode 100755 apis/cloudfront/v1beta2/zz_originrequestpolicy_types.go create mode 100755 apis/cloudfront/v1beta2/zz_realtimelogconfig_terraformed.go create mode 100755 apis/cloudfront/v1beta2/zz_realtimelogconfig_types.go create mode 100755 apis/cloudfront/v1beta2/zz_responseheaderspolicy_terraformed.go create mode 100755 apis/cloudfront/v1beta2/zz_responseheaderspolicy_types.go create mode 100755 apis/cloudsearch/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/cloudsearch/v1beta2/zz_domain_terraformed.go create mode 100755 apis/cloudsearch/v1beta2/zz_domain_types.go create mode 100755 apis/cloudsearch/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/cloudsearch/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/cloudsearch/v1beta2/zz_generated.managed.go create mode 100644 apis/cloudsearch/v1beta2/zz_generated.managedlist.go create mode 100755 apis/cloudsearch/v1beta2/zz_groupversion_info.go create mode 100755 apis/cloudwatch/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/cloudwatch/v1beta2/zz_compositealarm_terraformed.go create mode 100755 apis/cloudwatch/v1beta2/zz_compositealarm_types.go create mode 100755 apis/cloudwatch/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/cloudwatch/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/cloudwatch/v1beta2/zz_generated.managed.go create mode 100644 apis/cloudwatch/v1beta2/zz_generated.managedlist.go create mode 100644 apis/cloudwatch/v1beta2/zz_generated.resolvers.go create mode 100755 apis/cloudwatch/v1beta2/zz_groupversion_info.go create mode 100755 apis/cloudwatch/v1beta2/zz_metricalarm_terraformed.go create mode 100755 apis/cloudwatch/v1beta2/zz_metricalarm_types.go create mode 100755 apis/cloudwatchevents/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/cloudwatchevents/v1beta2/zz_connection_terraformed.go create mode 100755 apis/cloudwatchevents/v1beta2/zz_connection_types.go create mode 100755 apis/cloudwatchevents/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/cloudwatchevents/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/cloudwatchevents/v1beta2/zz_generated.managed.go create mode 100644 apis/cloudwatchevents/v1beta2/zz_generated.managedlist.go create mode 100644 apis/cloudwatchevents/v1beta2/zz_generated.resolvers.go create mode 100755 apis/cloudwatchevents/v1beta2/zz_groupversion_info.go create mode 100755 apis/cloudwatchevents/v1beta2/zz_permission_terraformed.go create mode 100755 apis/cloudwatchevents/v1beta2/zz_permission_types.go create mode 100755 apis/cloudwatchevents/v1beta2/zz_target_terraformed.go create mode 100755 apis/cloudwatchevents/v1beta2/zz_target_types.go create mode 100755 apis/cloudwatchlogs/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/cloudwatchlogs/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/cloudwatchlogs/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/cloudwatchlogs/v1beta2/zz_generated.managed.go create mode 100644 apis/cloudwatchlogs/v1beta2/zz_generated.managedlist.go create mode 100644 apis/cloudwatchlogs/v1beta2/zz_generated.resolvers.go create mode 100755 apis/cloudwatchlogs/v1beta2/zz_groupversion_info.go create mode 100755 apis/cloudwatchlogs/v1beta2/zz_metricfilter_terraformed.go create mode 100755 apis/cloudwatchlogs/v1beta2/zz_metricfilter_types.go create mode 100755 apis/codepipeline/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/codepipeline/v1beta2/zz_codepipeline_terraformed.go create mode 100755 apis/codepipeline/v1beta2/zz_codepipeline_types.go create mode 100755 apis/codepipeline/v1beta2/zz_customactiontype_terraformed.go create mode 100755 apis/codepipeline/v1beta2/zz_customactiontype_types.go create mode 100755 apis/codepipeline/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/codepipeline/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/codepipeline/v1beta2/zz_generated.managed.go create mode 100644 apis/codepipeline/v1beta2/zz_generated.managedlist.go create mode 100644 apis/codepipeline/v1beta2/zz_generated.resolvers.go create mode 100755 apis/codepipeline/v1beta2/zz_groupversion_info.go create mode 100755 apis/codepipeline/v1beta2/zz_webhook_terraformed.go create mode 100755 apis/codepipeline/v1beta2/zz_webhook_types.go create mode 100755 apis/codestarconnections/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/codestarconnections/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/codestarconnections/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/codestarconnections/v1beta2/zz_generated.managed.go create mode 100644 apis/codestarconnections/v1beta2/zz_generated.managedlist.go create mode 100755 apis/codestarconnections/v1beta2/zz_groupversion_info.go create mode 100755 apis/codestarconnections/v1beta2/zz_host_terraformed.go create mode 100755 apis/codestarconnections/v1beta2/zz_host_types.go create mode 100755 apis/cognitoidp/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/cognitoidp/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/cognitoidp/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/cognitoidp/v1beta2/zz_generated.managed.go create mode 100644 apis/cognitoidp/v1beta2/zz_generated.managedlist.go create mode 100644 apis/cognitoidp/v1beta2/zz_generated.resolvers.go create mode 100755 apis/cognitoidp/v1beta2/zz_groupversion_info.go create mode 100755 apis/cognitoidp/v1beta2/zz_riskconfiguration_terraformed.go create mode 100755 apis/cognitoidp/v1beta2/zz_riskconfiguration_types.go create mode 100755 apis/cognitoidp/v1beta2/zz_userpool_terraformed.go create mode 100755 apis/cognitoidp/v1beta2/zz_userpool_types.go create mode 100755 apis/configservice/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/configservice/v1beta2/zz_configrule_terraformed.go create mode 100755 apis/configservice/v1beta2/zz_configrule_types.go create mode 100755 apis/configservice/v1beta2/zz_configurationaggregator_terraformed.go create mode 100755 apis/configservice/v1beta2/zz_configurationaggregator_types.go create mode 100755 apis/configservice/v1beta2/zz_configurationrecorder_terraformed.go create mode 100755 apis/configservice/v1beta2/zz_configurationrecorder_types.go create mode 100755 apis/configservice/v1beta2/zz_deliverychannel_terraformed.go create mode 100755 apis/configservice/v1beta2/zz_deliverychannel_types.go create mode 100755 apis/configservice/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/configservice/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/configservice/v1beta2/zz_generated.managed.go create mode 100644 apis/configservice/v1beta2/zz_generated.managedlist.go create mode 100644 apis/configservice/v1beta2/zz_generated.resolvers.go create mode 100755 apis/configservice/v1beta2/zz_groupversion_info.go create mode 100755 apis/configservice/v1beta2/zz_remediationconfiguration_terraformed.go create mode 100755 apis/configservice/v1beta2/zz_remediationconfiguration_types.go create mode 100755 apis/connect/v1beta2/zz_botassociation_terraformed.go create mode 100755 apis/connect/v1beta2/zz_botassociation_types.go create mode 100755 apis/connect/v1beta2/zz_generated.conversion_spokes.go create mode 100755 apis/connect/v1beta2/zz_instancestorageconfig_terraformed.go create mode 100755 apis/connect/v1beta2/zz_instancestorageconfig_types.go create mode 100755 apis/connect/v1beta2/zz_quickconnect_terraformed.go create mode 100755 apis/connect/v1beta2/zz_quickconnect_types.go create mode 100755 apis/connect/v1beta2/zz_user_terraformed.go create mode 100755 apis/connect/v1beta2/zz_user_types.go create mode 100755 apis/connect/v1beta2/zz_userhierarchystructure_terraformed.go create mode 100755 apis/connect/v1beta2/zz_userhierarchystructure_types.go create mode 100755 apis/connect/v1beta3/zz_generated.conversion_hubs.go create mode 100644 apis/connect/v1beta3/zz_generated.deepcopy.go create mode 100644 apis/connect/v1beta3/zz_generated.managed.go create mode 100644 apis/connect/v1beta3/zz_generated.managedlist.go create mode 100644 apis/connect/v1beta3/zz_generated.resolvers.go create mode 100755 apis/connect/v1beta3/zz_groupversion_info.go create mode 100755 apis/connect/v1beta3/zz_hoursofoperation_terraformed.go create mode 100755 apis/connect/v1beta3/zz_hoursofoperation_types.go create mode 100755 apis/connect/v1beta3/zz_queue_terraformed.go create mode 100755 apis/connect/v1beta3/zz_queue_types.go create mode 100755 apis/datasync/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/datasync/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/datasync/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/datasync/v1beta2/zz_generated.managed.go create mode 100644 apis/datasync/v1beta2/zz_generated.managedlist.go create mode 100644 apis/datasync/v1beta2/zz_generated.resolvers.go create mode 100755 apis/datasync/v1beta2/zz_groupversion_info.go create mode 100755 apis/datasync/v1beta2/zz_locations3_terraformed.go create mode 100755 apis/datasync/v1beta2/zz_locations3_types.go create mode 100755 apis/datasync/v1beta2/zz_task_terraformed.go create mode 100755 apis/datasync/v1beta2/zz_task_types.go create mode 100755 apis/dax/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/dax/v1beta2/zz_cluster_terraformed.go create mode 100755 apis/dax/v1beta2/zz_cluster_types.go create mode 100755 apis/dax/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/dax/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/dax/v1beta2/zz_generated.managed.go create mode 100644 apis/dax/v1beta2/zz_generated.managedlist.go create mode 100644 apis/dax/v1beta2/zz_generated.resolvers.go create mode 100755 apis/dax/v1beta2/zz_groupversion_info.go create mode 100755 apis/deploy/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/deploy/v1beta2/zz_deploymentconfig_terraformed.go create mode 100755 apis/deploy/v1beta2/zz_deploymentconfig_types.go create mode 100755 apis/deploy/v1beta2/zz_deploymentgroup_terraformed.go create mode 100755 apis/deploy/v1beta2/zz_deploymentgroup_types.go create mode 100755 apis/deploy/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/deploy/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/deploy/v1beta2/zz_generated.managed.go create mode 100644 apis/deploy/v1beta2/zz_generated.managedlist.go create mode 100644 apis/deploy/v1beta2/zz_generated.resolvers.go create mode 100755 apis/deploy/v1beta2/zz_groupversion_info.go create mode 100755 apis/devicefarm/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/devicefarm/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/devicefarm/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/devicefarm/v1beta2/zz_generated.managed.go create mode 100644 apis/devicefarm/v1beta2/zz_generated.managedlist.go create mode 100644 apis/devicefarm/v1beta2/zz_generated.resolvers.go create mode 100755 apis/devicefarm/v1beta2/zz_groupversion_info.go create mode 100755 apis/devicefarm/v1beta2/zz_testgridproject_terraformed.go create mode 100755 apis/devicefarm/v1beta2/zz_testgridproject_types.go create mode 100755 apis/dlm/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/dlm/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/dlm/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/dlm/v1beta2/zz_generated.managed.go create mode 100644 apis/dlm/v1beta2/zz_generated.managedlist.go create mode 100644 apis/dlm/v1beta2/zz_generated.resolvers.go create mode 100755 apis/dlm/v1beta2/zz_groupversion_info.go create mode 100755 apis/dlm/v1beta2/zz_lifecyclepolicy_terraformed.go create mode 100755 apis/dlm/v1beta2/zz_lifecyclepolicy_types.go create mode 100755 apis/dms/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/dms/v1beta2/zz_endpoint_terraformed.go create mode 100755 apis/dms/v1beta2/zz_endpoint_types.go create mode 100755 apis/dms/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/dms/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/dms/v1beta2/zz_generated.managed.go create mode 100644 apis/dms/v1beta2/zz_generated.managedlist.go create mode 100644 apis/dms/v1beta2/zz_generated.resolvers.go create mode 100755 apis/dms/v1beta2/zz_groupversion_info.go create mode 100755 apis/ds/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/ds/v1beta2/zz_directory_terraformed.go create mode 100755 apis/ds/v1beta2/zz_directory_types.go create mode 100755 apis/ds/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/ds/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/ds/v1beta2/zz_generated.managed.go create mode 100644 apis/ds/v1beta2/zz_generated.managedlist.go create mode 100644 apis/ds/v1beta2/zz_generated.resolvers.go create mode 100755 apis/ds/v1beta2/zz_groupversion_info.go create mode 100755 apis/ds/v1beta2/zz_shareddirectory_terraformed.go create mode 100755 apis/ds/v1beta2/zz_shareddirectory_types.go create mode 100755 apis/dynamodb/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/dynamodb/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/dynamodb/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/dynamodb/v1beta2/zz_generated.managed.go create mode 100644 apis/dynamodb/v1beta2/zz_generated.managedlist.go create mode 100755 apis/dynamodb/v1beta2/zz_groupversion_info.go create mode 100755 apis/dynamodb/v1beta2/zz_table_terraformed.go create mode 100755 apis/dynamodb/v1beta2/zz_table_types.go create mode 100755 apis/ec2/v1beta2/zz_ebssnapshotimport_terraformed.go create mode 100755 apis/ec2/v1beta2/zz_ebssnapshotimport_types.go create mode 100755 apis/ec2/v1beta2/zz_flowlog_terraformed.go create mode 100755 apis/ec2/v1beta2/zz_flowlog_types.go create mode 100755 apis/ec2/v1beta2/zz_instance_terraformed.go create mode 100755 apis/ec2/v1beta2/zz_instance_types.go create mode 100755 apis/ec2/v1beta2/zz_launchtemplate_terraformed.go create mode 100755 apis/ec2/v1beta2/zz_launchtemplate_types.go create mode 100755 apis/ec2/v1beta2/zz_spotfleetrequest_terraformed.go create mode 100755 apis/ec2/v1beta2/zz_spotfleetrequest_types.go create mode 100755 apis/ec2/v1beta2/zz_spotinstancerequest_terraformed.go create mode 100755 apis/ec2/v1beta2/zz_spotinstancerequest_types.go create mode 100755 apis/ec2/v1beta2/zz_trafficmirrorfilterrule_terraformed.go create mode 100755 apis/ec2/v1beta2/zz_trafficmirrorfilterrule_types.go create mode 100755 apis/ec2/v1beta2/zz_vpcendpoint_terraformed.go create mode 100755 apis/ec2/v1beta2/zz_vpcendpoint_types.go create mode 100755 apis/ec2/v1beta2/zz_vpcipampoolcidr_terraformed.go create mode 100755 apis/ec2/v1beta2/zz_vpcipampoolcidr_types.go create mode 100755 apis/ec2/v1beta2/zz_vpcpeeringconnection_terraformed.go create mode 100755 apis/ec2/v1beta2/zz_vpcpeeringconnection_types.go create mode 100755 apis/ec2/v1beta2/zz_vpcpeeringconnectionaccepter_terraformed.go create mode 100755 apis/ec2/v1beta2/zz_vpcpeeringconnectionaccepter_types.go create mode 100755 apis/ec2/v1beta2/zz_vpcpeeringconnectionoptions_terraformed.go create mode 100755 apis/ec2/v1beta2/zz_vpcpeeringconnectionoptions_types.go create mode 100755 apis/ec2/v1beta2/zz_vpnconnection_terraformed.go create mode 100755 apis/ec2/v1beta2/zz_vpnconnection_types.go create mode 100755 apis/ecr/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/ecr/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/ecr/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/ecr/v1beta2/zz_generated.managed.go create mode 100644 apis/ecr/v1beta2/zz_generated.managedlist.go create mode 100644 apis/ecr/v1beta2/zz_generated.resolvers.go create mode 100755 apis/ecr/v1beta2/zz_groupversion_info.go create mode 100755 apis/ecr/v1beta2/zz_replicationconfiguration_terraformed.go create mode 100755 apis/ecr/v1beta2/zz_replicationconfiguration_types.go create mode 100755 apis/ecr/v1beta2/zz_repository_terraformed.go create mode 100755 apis/ecr/v1beta2/zz_repository_types.go create mode 100755 apis/ecrpublic/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/ecrpublic/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/ecrpublic/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/ecrpublic/v1beta2/zz_generated.managed.go create mode 100644 apis/ecrpublic/v1beta2/zz_generated.managedlist.go create mode 100755 apis/ecrpublic/v1beta2/zz_groupversion_info.go create mode 100755 apis/ecrpublic/v1beta2/zz_repository_terraformed.go create mode 100755 apis/ecrpublic/v1beta2/zz_repository_types.go create mode 100755 apis/ecs/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/ecs/v1beta2/zz_capacityprovider_terraformed.go create mode 100755 apis/ecs/v1beta2/zz_capacityprovider_types.go create mode 100755 apis/ecs/v1beta2/zz_cluster_terraformed.go create mode 100755 apis/ecs/v1beta2/zz_cluster_types.go create mode 100755 apis/ecs/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/ecs/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/ecs/v1beta2/zz_generated.managed.go create mode 100644 apis/ecs/v1beta2/zz_generated.managedlist.go create mode 100644 apis/ecs/v1beta2/zz_generated.resolvers.go create mode 100755 apis/ecs/v1beta2/zz_groupversion_info.go create mode 100755 apis/ecs/v1beta2/zz_service_terraformed.go create mode 100755 apis/ecs/v1beta2/zz_service_types.go create mode 100755 apis/ecs/v1beta2/zz_taskdefinition_terraformed.go create mode 100755 apis/ecs/v1beta2/zz_taskdefinition_types.go create mode 100755 apis/efs/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/efs/v1beta2/zz_accesspoint_terraformed.go create mode 100755 apis/efs/v1beta2/zz_accesspoint_types.go create mode 100755 apis/efs/v1beta2/zz_backuppolicy_terraformed.go create mode 100755 apis/efs/v1beta2/zz_backuppolicy_types.go create mode 100755 apis/efs/v1beta2/zz_filesystem_terraformed.go create mode 100755 apis/efs/v1beta2/zz_filesystem_types.go create mode 100755 apis/efs/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/efs/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/efs/v1beta2/zz_generated.managed.go create mode 100644 apis/efs/v1beta2/zz_generated.managedlist.go create mode 100644 apis/efs/v1beta2/zz_generated.resolvers.go create mode 100755 apis/efs/v1beta2/zz_groupversion_info.go create mode 100755 apis/efs/v1beta2/zz_replicationconfiguration_terraformed.go create mode 100755 apis/efs/v1beta2/zz_replicationconfiguration_types.go create mode 100755 apis/eks/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/eks/v1beta2/zz_cluster_terraformed.go create mode 100755 apis/eks/v1beta2/zz_cluster_types.go create mode 100755 apis/eks/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/eks/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/eks/v1beta2/zz_generated.managed.go create mode 100644 apis/eks/v1beta2/zz_generated.managedlist.go create mode 100644 apis/eks/v1beta2/zz_generated.resolvers.go create mode 100755 apis/eks/v1beta2/zz_groupversion_info.go create mode 100755 apis/eks/v1beta2/zz_identityproviderconfig_terraformed.go create mode 100755 apis/eks/v1beta2/zz_identityproviderconfig_types.go create mode 100755 apis/eks/v1beta2/zz_nodegroup_terraformed.go create mode 100755 apis/eks/v1beta2/zz_nodegroup_types.go create mode 100755 apis/elasticache/v1beta2/zz_user_terraformed.go create mode 100755 apis/elasticache/v1beta2/zz_user_types.go create mode 100755 apis/elasticbeanstalk/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/elasticbeanstalk/v1beta2/zz_application_terraformed.go create mode 100755 apis/elasticbeanstalk/v1beta2/zz_application_types.go create mode 100755 apis/elasticbeanstalk/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/elasticbeanstalk/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/elasticbeanstalk/v1beta2/zz_generated.managed.go create mode 100644 apis/elasticbeanstalk/v1beta2/zz_generated.managedlist.go create mode 100644 apis/elasticbeanstalk/v1beta2/zz_generated.resolvers.go create mode 100755 apis/elasticbeanstalk/v1beta2/zz_groupversion_info.go create mode 100755 apis/elasticsearch/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/elasticsearch/v1beta2/zz_domain_terraformed.go create mode 100755 apis/elasticsearch/v1beta2/zz_domain_types.go create mode 100755 apis/elasticsearch/v1beta2/zz_domainsamloptions_terraformed.go create mode 100755 apis/elasticsearch/v1beta2/zz_domainsamloptions_types.go create mode 100755 apis/elasticsearch/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/elasticsearch/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/elasticsearch/v1beta2/zz_generated.managed.go create mode 100644 apis/elasticsearch/v1beta2/zz_generated.managedlist.go create mode 100644 apis/elasticsearch/v1beta2/zz_generated.resolvers.go create mode 100755 apis/elasticsearch/v1beta2/zz_groupversion_info.go create mode 100755 apis/elastictranscoder/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/elastictranscoder/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/elastictranscoder/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/elastictranscoder/v1beta2/zz_generated.managed.go create mode 100644 apis/elastictranscoder/v1beta2/zz_generated.managedlist.go create mode 100644 apis/elastictranscoder/v1beta2/zz_generated.resolvers.go create mode 100755 apis/elastictranscoder/v1beta2/zz_groupversion_info.go create mode 100755 apis/elastictranscoder/v1beta2/zz_pipeline_terraformed.go create mode 100755 apis/elastictranscoder/v1beta2/zz_pipeline_types.go create mode 100755 apis/elastictranscoder/v1beta2/zz_preset_terraformed.go create mode 100755 apis/elastictranscoder/v1beta2/zz_preset_types.go create mode 100755 apis/elb/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/elb/v1beta2/zz_elb_terraformed.go create mode 100755 apis/elb/v1beta2/zz_elb_types.go create mode 100755 apis/elb/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/elb/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/elb/v1beta2/zz_generated.managed.go create mode 100644 apis/elb/v1beta2/zz_generated.managedlist.go create mode 100644 apis/elb/v1beta2/zz_generated.resolvers.go create mode 100755 apis/elb/v1beta2/zz_groupversion_info.go create mode 100755 apis/elbv2/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/elbv2/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/elbv2/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/elbv2/v1beta2/zz_generated.managed.go create mode 100644 apis/elbv2/v1beta2/zz_generated.managedlist.go create mode 100644 apis/elbv2/v1beta2/zz_generated.resolvers.go create mode 100755 apis/elbv2/v1beta2/zz_groupversion_info.go create mode 100755 apis/elbv2/v1beta2/zz_lb_terraformed.go create mode 100755 apis/elbv2/v1beta2/zz_lb_types.go create mode 100755 apis/elbv2/v1beta2/zz_lblistener_terraformed.go create mode 100755 apis/elbv2/v1beta2/zz_lblistener_types.go create mode 100755 apis/elbv2/v1beta2/zz_lblistenerrule_terraformed.go create mode 100755 apis/elbv2/v1beta2/zz_lblistenerrule_types.go create mode 100755 apis/elbv2/v1beta2/zz_lbtargetgroup_terraformed.go create mode 100755 apis/elbv2/v1beta2/zz_lbtargetgroup_types.go create mode 100755 apis/emrserverless/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/emrserverless/v1beta2/zz_application_terraformed.go create mode 100755 apis/emrserverless/v1beta2/zz_application_types.go create mode 100755 apis/emrserverless/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/emrserverless/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/emrserverless/v1beta2/zz_generated.managed.go create mode 100644 apis/emrserverless/v1beta2/zz_generated.managedlist.go create mode 100755 apis/emrserverless/v1beta2/zz_groupversion_info.go create mode 100755 apis/evidently/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/evidently/v1beta2/zz_feature_terraformed.go create mode 100755 apis/evidently/v1beta2/zz_feature_types.go create mode 100755 apis/evidently/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/evidently/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/evidently/v1beta2/zz_generated.managed.go create mode 100644 apis/evidently/v1beta2/zz_generated.managedlist.go create mode 100644 apis/evidently/v1beta2/zz_generated.resolvers.go create mode 100755 apis/evidently/v1beta2/zz_groupversion_info.go create mode 100755 apis/evidently/v1beta2/zz_project_terraformed.go create mode 100755 apis/evidently/v1beta2/zz_project_types.go create mode 100755 apis/firehose/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/firehose/v1beta2/zz_deliverystream_terraformed.go create mode 100755 apis/firehose/v1beta2/zz_deliverystream_types.go create mode 100755 apis/firehose/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/firehose/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/firehose/v1beta2/zz_generated.managed.go create mode 100644 apis/firehose/v1beta2/zz_generated.managedlist.go create mode 100644 apis/firehose/v1beta2/zz_generated.resolvers.go create mode 100755 apis/firehose/v1beta2/zz_groupversion_info.go create mode 100755 apis/fis/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/fis/v1beta2/zz_experimenttemplate_terraformed.go create mode 100755 apis/fis/v1beta2/zz_experimenttemplate_types.go create mode 100755 apis/fis/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/fis/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/fis/v1beta2/zz_generated.managed.go create mode 100644 apis/fis/v1beta2/zz_generated.managedlist.go create mode 100644 apis/fis/v1beta2/zz_generated.resolvers.go create mode 100755 apis/fis/v1beta2/zz_groupversion_info.go create mode 100755 apis/fsx/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/fsx/v1beta2/zz_datarepositoryassociation_terraformed.go create mode 100755 apis/fsx/v1beta2/zz_datarepositoryassociation_types.go create mode 100755 apis/fsx/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/fsx/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/fsx/v1beta2/zz_generated.managed.go create mode 100644 apis/fsx/v1beta2/zz_generated.managedlist.go create mode 100644 apis/fsx/v1beta2/zz_generated.resolvers.go create mode 100755 apis/fsx/v1beta2/zz_groupversion_info.go create mode 100755 apis/fsx/v1beta2/zz_lustrefilesystem_terraformed.go create mode 100755 apis/fsx/v1beta2/zz_lustrefilesystem_types.go create mode 100755 apis/fsx/v1beta2/zz_ontapfilesystem_terraformed.go create mode 100755 apis/fsx/v1beta2/zz_ontapfilesystem_types.go create mode 100755 apis/fsx/v1beta2/zz_ontapstoragevirtualmachine_terraformed.go create mode 100755 apis/fsx/v1beta2/zz_ontapstoragevirtualmachine_types.go create mode 100755 apis/fsx/v1beta2/zz_windowsfilesystem_terraformed.go create mode 100755 apis/fsx/v1beta2/zz_windowsfilesystem_types.go create mode 100755 apis/gamelift/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/gamelift/v1beta2/zz_alias_terraformed.go create mode 100755 apis/gamelift/v1beta2/zz_alias_types.go create mode 100755 apis/gamelift/v1beta2/zz_build_terraformed.go create mode 100755 apis/gamelift/v1beta2/zz_build_types.go create mode 100755 apis/gamelift/v1beta2/zz_fleet_terraformed.go create mode 100755 apis/gamelift/v1beta2/zz_fleet_types.go create mode 100755 apis/gamelift/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/gamelift/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/gamelift/v1beta2/zz_generated.managed.go create mode 100644 apis/gamelift/v1beta2/zz_generated.managedlist.go create mode 100644 apis/gamelift/v1beta2/zz_generated.resolvers.go create mode 100755 apis/gamelift/v1beta2/zz_groupversion_info.go create mode 100755 apis/gamelift/v1beta2/zz_script_terraformed.go create mode 100755 apis/gamelift/v1beta2/zz_script_types.go create mode 100755 apis/glacier/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/glacier/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/glacier/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/glacier/v1beta2/zz_generated.managed.go create mode 100644 apis/glacier/v1beta2/zz_generated.managedlist.go create mode 100644 apis/glacier/v1beta2/zz_generated.resolvers.go create mode 100755 apis/glacier/v1beta2/zz_groupversion_info.go create mode 100755 apis/glacier/v1beta2/zz_vault_terraformed.go create mode 100755 apis/glacier/v1beta2/zz_vault_types.go create mode 100755 apis/globalaccelerator/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/globalaccelerator/v1beta2/zz_accelerator_terraformed.go create mode 100755 apis/globalaccelerator/v1beta2/zz_accelerator_types.go create mode 100755 apis/globalaccelerator/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/globalaccelerator/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/globalaccelerator/v1beta2/zz_generated.managed.go create mode 100644 apis/globalaccelerator/v1beta2/zz_generated.managedlist.go create mode 100755 apis/globalaccelerator/v1beta2/zz_groupversion_info.go create mode 100755 apis/glue/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/glue/v1beta2/zz_catalogdatabase_terraformed.go create mode 100755 apis/glue/v1beta2/zz_catalogdatabase_types.go create mode 100755 apis/glue/v1beta2/zz_catalogtable_terraformed.go create mode 100755 apis/glue/v1beta2/zz_catalogtable_types.go create mode 100755 apis/glue/v1beta2/zz_classifier_terraformed.go create mode 100755 apis/glue/v1beta2/zz_classifier_types.go create mode 100755 apis/glue/v1beta2/zz_connection_terraformed.go create mode 100755 apis/glue/v1beta2/zz_connection_types.go create mode 100755 apis/glue/v1beta2/zz_crawler_terraformed.go create mode 100755 apis/glue/v1beta2/zz_crawler_types.go create mode 100755 apis/glue/v1beta2/zz_datacatalogencryptionsettings_terraformed.go create mode 100755 apis/glue/v1beta2/zz_datacatalogencryptionsettings_types.go create mode 100755 apis/glue/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/glue/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/glue/v1beta2/zz_generated.managed.go create mode 100644 apis/glue/v1beta2/zz_generated.managedlist.go create mode 100644 apis/glue/v1beta2/zz_generated.resolvers.go create mode 100755 apis/glue/v1beta2/zz_groupversion_info.go create mode 100755 apis/glue/v1beta2/zz_job_terraformed.go create mode 100755 apis/glue/v1beta2/zz_job_types.go create mode 100755 apis/glue/v1beta2/zz_securityconfiguration_terraformed.go create mode 100755 apis/glue/v1beta2/zz_securityconfiguration_types.go create mode 100755 apis/glue/v1beta2/zz_trigger_terraformed.go create mode 100755 apis/glue/v1beta2/zz_trigger_types.go create mode 100755 apis/grafana/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/grafana/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/grafana/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/grafana/v1beta2/zz_generated.managed.go create mode 100644 apis/grafana/v1beta2/zz_generated.managedlist.go create mode 100644 apis/grafana/v1beta2/zz_generated.resolvers.go create mode 100755 apis/grafana/v1beta2/zz_groupversion_info.go create mode 100755 apis/grafana/v1beta2/zz_workspace_terraformed.go create mode 100755 apis/grafana/v1beta2/zz_workspace_types.go create mode 100755 apis/guardduty/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/guardduty/v1beta2/zz_detector_terraformed.go create mode 100755 apis/guardduty/v1beta2/zz_detector_types.go create mode 100755 apis/guardduty/v1beta2/zz_filter_terraformed.go create mode 100755 apis/guardduty/v1beta2/zz_filter_types.go create mode 100755 apis/guardduty/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/guardduty/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/guardduty/v1beta2/zz_generated.managed.go create mode 100644 apis/guardduty/v1beta2/zz_generated.managedlist.go create mode 100644 apis/guardduty/v1beta2/zz_generated.resolvers.go create mode 100755 apis/guardduty/v1beta2/zz_groupversion_info.go create mode 100755 apis/identitystore/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/identitystore/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/identitystore/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/identitystore/v1beta2/zz_generated.managed.go create mode 100644 apis/identitystore/v1beta2/zz_generated.managedlist.go create mode 100755 apis/identitystore/v1beta2/zz_groupversion_info.go create mode 100755 apis/identitystore/v1beta2/zz_user_terraformed.go create mode 100755 apis/identitystore/v1beta2/zz_user_types.go create mode 100755 apis/imagebuilder/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/imagebuilder/v1beta2/zz_containerrecipe_terraformed.go create mode 100755 apis/imagebuilder/v1beta2/zz_containerrecipe_types.go create mode 100755 apis/imagebuilder/v1beta2/zz_distributionconfiguration_terraformed.go create mode 100755 apis/imagebuilder/v1beta2/zz_distributionconfiguration_types.go create mode 100755 apis/imagebuilder/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/imagebuilder/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/imagebuilder/v1beta2/zz_generated.managed.go create mode 100644 apis/imagebuilder/v1beta2/zz_generated.managedlist.go create mode 100644 apis/imagebuilder/v1beta2/zz_generated.resolvers.go create mode 100755 apis/imagebuilder/v1beta2/zz_groupversion_info.go create mode 100755 apis/imagebuilder/v1beta2/zz_image_terraformed.go create mode 100755 apis/imagebuilder/v1beta2/zz_image_types.go create mode 100755 apis/imagebuilder/v1beta2/zz_imagepipeline_terraformed.go create mode 100755 apis/imagebuilder/v1beta2/zz_imagepipeline_types.go create mode 100755 apis/imagebuilder/v1beta2/zz_imagerecipe_terraformed.go create mode 100755 apis/imagebuilder/v1beta2/zz_imagerecipe_types.go create mode 100755 apis/imagebuilder/v1beta2/zz_infrastructureconfiguration_terraformed.go create mode 100755 apis/imagebuilder/v1beta2/zz_infrastructureconfiguration_types.go create mode 100755 apis/iot/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/iot/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/iot/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/iot/v1beta2/zz_generated.managed.go create mode 100644 apis/iot/v1beta2/zz_generated.managedlist.go create mode 100644 apis/iot/v1beta2/zz_generated.resolvers.go create mode 100755 apis/iot/v1beta2/zz_groupversion_info.go create mode 100755 apis/iot/v1beta2/zz_indexingconfiguration_terraformed.go create mode 100755 apis/iot/v1beta2/zz_indexingconfiguration_types.go create mode 100755 apis/iot/v1beta2/zz_provisioningtemplate_terraformed.go create mode 100755 apis/iot/v1beta2/zz_provisioningtemplate_types.go create mode 100755 apis/iot/v1beta2/zz_thinggroup_terraformed.go create mode 100755 apis/iot/v1beta2/zz_thinggroup_types.go create mode 100755 apis/iot/v1beta2/zz_thingtype_terraformed.go create mode 100755 apis/iot/v1beta2/zz_thingtype_types.go create mode 100755 apis/iot/v1beta2/zz_topicrule_terraformed.go create mode 100755 apis/iot/v1beta2/zz_topicrule_types.go create mode 100755 apis/iot/v1beta2/zz_topicruledestination_terraformed.go create mode 100755 apis/iot/v1beta2/zz_topicruledestination_types.go create mode 100755 apis/ivs/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/ivs/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/ivs/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/ivs/v1beta2/zz_generated.managed.go create mode 100644 apis/ivs/v1beta2/zz_generated.managedlist.go create mode 100755 apis/ivs/v1beta2/zz_groupversion_info.go create mode 100755 apis/ivs/v1beta2/zz_recordingconfiguration_terraformed.go create mode 100755 apis/ivs/v1beta2/zz_recordingconfiguration_types.go create mode 100755 apis/kafka/v1beta2/zz_generated.conversion_spokes.go create mode 100755 apis/kafka/v1beta2/zz_serverlesscluster_terraformed.go create mode 100755 apis/kafka/v1beta2/zz_serverlesscluster_types.go create mode 100755 apis/kafka/v1beta3/zz_cluster_terraformed.go create mode 100755 apis/kafka/v1beta3/zz_cluster_types.go create mode 100755 apis/kafka/v1beta3/zz_generated.conversion_hubs.go create mode 100644 apis/kafka/v1beta3/zz_generated.deepcopy.go create mode 100644 apis/kafka/v1beta3/zz_generated.managed.go create mode 100644 apis/kafka/v1beta3/zz_generated.managedlist.go create mode 100644 apis/kafka/v1beta3/zz_generated.resolvers.go create mode 100755 apis/kafka/v1beta3/zz_groupversion_info.go create mode 100755 apis/kafkaconnect/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/kafkaconnect/v1beta2/zz_connector_terraformed.go create mode 100755 apis/kafkaconnect/v1beta2/zz_connector_types.go create mode 100755 apis/kafkaconnect/v1beta2/zz_customplugin_terraformed.go create mode 100755 apis/kafkaconnect/v1beta2/zz_customplugin_types.go create mode 100755 apis/kafkaconnect/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/kafkaconnect/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/kafkaconnect/v1beta2/zz_generated.managed.go create mode 100644 apis/kafkaconnect/v1beta2/zz_generated.managedlist.go create mode 100644 apis/kafkaconnect/v1beta2/zz_generated.resolvers.go create mode 100755 apis/kafkaconnect/v1beta2/zz_groupversion_info.go create mode 100755 apis/kendra/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/kendra/v1beta2/zz_datasource_terraformed.go create mode 100755 apis/kendra/v1beta2/zz_datasource_types.go create mode 100755 apis/kendra/v1beta2/zz_experience_terraformed.go create mode 100755 apis/kendra/v1beta2/zz_experience_types.go create mode 100755 apis/kendra/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/kendra/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/kendra/v1beta2/zz_generated.managed.go create mode 100644 apis/kendra/v1beta2/zz_generated.managedlist.go create mode 100644 apis/kendra/v1beta2/zz_generated.resolvers.go create mode 100755 apis/kendra/v1beta2/zz_groupversion_info.go create mode 100755 apis/kendra/v1beta2/zz_index_terraformed.go create mode 100755 apis/kendra/v1beta2/zz_index_types.go create mode 100755 apis/kendra/v1beta2/zz_querysuggestionsblocklist_terraformed.go create mode 100755 apis/kendra/v1beta2/zz_querysuggestionsblocklist_types.go create mode 100755 apis/kendra/v1beta2/zz_thesaurus_terraformed.go create mode 100755 apis/kendra/v1beta2/zz_thesaurus_types.go create mode 100755 apis/keyspaces/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/keyspaces/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/keyspaces/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/keyspaces/v1beta2/zz_generated.managed.go create mode 100644 apis/keyspaces/v1beta2/zz_generated.managedlist.go create mode 100644 apis/keyspaces/v1beta2/zz_generated.resolvers.go create mode 100755 apis/keyspaces/v1beta2/zz_groupversion_info.go create mode 100755 apis/keyspaces/v1beta2/zz_table_terraformed.go create mode 100755 apis/keyspaces/v1beta2/zz_table_types.go create mode 100755 apis/kinesis/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/kinesis/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/kinesis/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/kinesis/v1beta2/zz_generated.managed.go create mode 100644 apis/kinesis/v1beta2/zz_generated.managedlist.go create mode 100644 apis/kinesis/v1beta2/zz_generated.resolvers.go create mode 100755 apis/kinesis/v1beta2/zz_groupversion_info.go create mode 100755 apis/kinesis/v1beta2/zz_stream_terraformed.go create mode 100755 apis/kinesis/v1beta2/zz_stream_types.go create mode 100755 apis/kinesisanalytics/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/kinesisanalytics/v1beta2/zz_application_terraformed.go create mode 100755 apis/kinesisanalytics/v1beta2/zz_application_types.go create mode 100755 apis/kinesisanalytics/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/kinesisanalytics/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/kinesisanalytics/v1beta2/zz_generated.managed.go create mode 100644 apis/kinesisanalytics/v1beta2/zz_generated.managedlist.go create mode 100644 apis/kinesisanalytics/v1beta2/zz_generated.resolvers.go create mode 100755 apis/kinesisanalytics/v1beta2/zz_groupversion_info.go create mode 100755 apis/kinesisanalyticsv2/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/kinesisanalyticsv2/v1beta2/zz_application_terraformed.go create mode 100755 apis/kinesisanalyticsv2/v1beta2/zz_application_types.go create mode 100755 apis/kinesisanalyticsv2/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/kinesisanalyticsv2/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/kinesisanalyticsv2/v1beta2/zz_generated.managed.go create mode 100644 apis/kinesisanalyticsv2/v1beta2/zz_generated.managedlist.go create mode 100644 apis/kinesisanalyticsv2/v1beta2/zz_generated.resolvers.go create mode 100755 apis/kinesisanalyticsv2/v1beta2/zz_groupversion_info.go create mode 100755 apis/lakeformation/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/lakeformation/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/lakeformation/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/lakeformation/v1beta2/zz_generated.managed.go create mode 100644 apis/lakeformation/v1beta2/zz_generated.managedlist.go create mode 100644 apis/lakeformation/v1beta2/zz_generated.resolvers.go create mode 100755 apis/lakeformation/v1beta2/zz_groupversion_info.go create mode 100755 apis/lakeformation/v1beta2/zz_permissions_terraformed.go create mode 100755 apis/lakeformation/v1beta2/zz_permissions_types.go create mode 100755 apis/lambda/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/lambda/v1beta2/zz_alias_terraformed.go create mode 100755 apis/lambda/v1beta2/zz_alias_types.go create mode 100755 apis/lambda/v1beta2/zz_codesigningconfig_terraformed.go create mode 100755 apis/lambda/v1beta2/zz_codesigningconfig_types.go create mode 100755 apis/lambda/v1beta2/zz_eventsourcemapping_terraformed.go create mode 100755 apis/lambda/v1beta2/zz_eventsourcemapping_types.go create mode 100755 apis/lambda/v1beta2/zz_function_terraformed.go create mode 100755 apis/lambda/v1beta2/zz_function_types.go create mode 100755 apis/lambda/v1beta2/zz_functioneventinvokeconfig_terraformed.go create mode 100755 apis/lambda/v1beta2/zz_functioneventinvokeconfig_types.go create mode 100755 apis/lambda/v1beta2/zz_functionurl_terraformed.go create mode 100755 apis/lambda/v1beta2/zz_functionurl_types.go create mode 100755 apis/lambda/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/lambda/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/lambda/v1beta2/zz_generated.managed.go create mode 100644 apis/lambda/v1beta2/zz_generated.managedlist.go create mode 100644 apis/lambda/v1beta2/zz_generated.resolvers.go create mode 100755 apis/lambda/v1beta2/zz_groupversion_info.go create mode 100755 apis/lexmodels/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/lexmodels/v1beta2/zz_bot_terraformed.go create mode 100755 apis/lexmodels/v1beta2/zz_bot_types.go create mode 100755 apis/lexmodels/v1beta2/zz_botalias_terraformed.go create mode 100755 apis/lexmodels/v1beta2/zz_botalias_types.go create mode 100755 apis/lexmodels/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/lexmodels/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/lexmodels/v1beta2/zz_generated.managed.go create mode 100644 apis/lexmodels/v1beta2/zz_generated.managedlist.go create mode 100755 apis/lexmodels/v1beta2/zz_groupversion_info.go create mode 100755 apis/lexmodels/v1beta2/zz_intent_terraformed.go create mode 100755 apis/lexmodels/v1beta2/zz_intent_types.go create mode 100755 apis/lightsail/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/lightsail/v1beta2/zz_containerservice_terraformed.go create mode 100755 apis/lightsail/v1beta2/zz_containerservice_types.go create mode 100755 apis/lightsail/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/lightsail/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/lightsail/v1beta2/zz_generated.managed.go create mode 100644 apis/lightsail/v1beta2/zz_generated.managedlist.go create mode 100755 apis/lightsail/v1beta2/zz_groupversion_info.go create mode 100755 apis/lightsail/v1beta2/zz_instance_terraformed.go create mode 100755 apis/lightsail/v1beta2/zz_instance_types.go create mode 100755 apis/location/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/location/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/location/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/location/v1beta2/zz_generated.managed.go create mode 100644 apis/location/v1beta2/zz_generated.managedlist.go create mode 100755 apis/location/v1beta2/zz_groupversion_info.go create mode 100755 apis/location/v1beta2/zz_placeindex_terraformed.go create mode 100755 apis/location/v1beta2/zz_placeindex_types.go create mode 100755 apis/macie2/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/macie2/v1beta2/zz_classificationjob_terraformed.go create mode 100755 apis/macie2/v1beta2/zz_classificationjob_types.go create mode 100755 apis/macie2/v1beta2/zz_findingsfilter_terraformed.go create mode 100755 apis/macie2/v1beta2/zz_findingsfilter_types.go create mode 100755 apis/macie2/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/macie2/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/macie2/v1beta2/zz_generated.managed.go create mode 100644 apis/macie2/v1beta2/zz_generated.managedlist.go create mode 100755 apis/macie2/v1beta2/zz_groupversion_info.go create mode 100755 apis/mediaconvert/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/mediaconvert/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/mediaconvert/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/mediaconvert/v1beta2/zz_generated.managed.go create mode 100644 apis/mediaconvert/v1beta2/zz_generated.managedlist.go create mode 100755 apis/mediaconvert/v1beta2/zz_groupversion_info.go create mode 100755 apis/mediaconvert/v1beta2/zz_queue_terraformed.go create mode 100755 apis/mediaconvert/v1beta2/zz_queue_types.go create mode 100755 apis/medialive/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/medialive/v1beta2/zz_channel_terraformed.go create mode 100755 apis/medialive/v1beta2/zz_channel_types.go create mode 100755 apis/medialive/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/medialive/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/medialive/v1beta2/zz_generated.managed.go create mode 100644 apis/medialive/v1beta2/zz_generated.managedlist.go create mode 100644 apis/medialive/v1beta2/zz_generated.resolvers.go create mode 100755 apis/medialive/v1beta2/zz_groupversion_info.go create mode 100755 apis/medialive/v1beta2/zz_input_terraformed.go create mode 100755 apis/medialive/v1beta2/zz_input_types.go create mode 100755 apis/medialive/v1beta2/zz_multiplex_terraformed.go create mode 100755 apis/medialive/v1beta2/zz_multiplex_types.go create mode 100755 apis/memorydb/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/memorydb/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/memorydb/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/memorydb/v1beta2/zz_generated.managed.go create mode 100644 apis/memorydb/v1beta2/zz_generated.managedlist.go create mode 100755 apis/memorydb/v1beta2/zz_groupversion_info.go create mode 100755 apis/memorydb/v1beta2/zz_user_terraformed.go create mode 100755 apis/memorydb/v1beta2/zz_user_types.go create mode 100755 apis/mq/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/mq/v1beta2/zz_broker_terraformed.go create mode 100755 apis/mq/v1beta2/zz_broker_types.go create mode 100755 apis/mq/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/mq/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/mq/v1beta2/zz_generated.managed.go create mode 100644 apis/mq/v1beta2/zz_generated.managedlist.go create mode 100644 apis/mq/v1beta2/zz_generated.resolvers.go create mode 100755 apis/mq/v1beta2/zz_groupversion_info.go create mode 100755 apis/neptune/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/neptune/v1beta2/zz_cluster_terraformed.go create mode 100755 apis/neptune/v1beta2/zz_cluster_types.go create mode 100755 apis/neptune/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/neptune/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/neptune/v1beta2/zz_generated.managed.go create mode 100644 apis/neptune/v1beta2/zz_generated.managedlist.go create mode 100644 apis/neptune/v1beta2/zz_generated.resolvers.go create mode 100755 apis/neptune/v1beta2/zz_groupversion_info.go create mode 100755 apis/networkfirewall/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/networkfirewall/v1beta2/zz_firewall_terraformed.go create mode 100755 apis/networkfirewall/v1beta2/zz_firewall_types.go create mode 100755 apis/networkfirewall/v1beta2/zz_firewallpolicy_terraformed.go create mode 100755 apis/networkfirewall/v1beta2/zz_firewallpolicy_types.go create mode 100755 apis/networkfirewall/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/networkfirewall/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/networkfirewall/v1beta2/zz_generated.managed.go create mode 100644 apis/networkfirewall/v1beta2/zz_generated.managedlist.go create mode 100644 apis/networkfirewall/v1beta2/zz_generated.resolvers.go create mode 100755 apis/networkfirewall/v1beta2/zz_groupversion_info.go create mode 100755 apis/networkfirewall/v1beta2/zz_loggingconfiguration_terraformed.go create mode 100755 apis/networkfirewall/v1beta2/zz_loggingconfiguration_types.go create mode 100755 apis/networkfirewall/v1beta2/zz_rulegroup_terraformed.go create mode 100755 apis/networkfirewall/v1beta2/zz_rulegroup_types.go create mode 100755 apis/networkmanager/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/networkmanager/v1beta2/zz_connectattachment_terraformed.go create mode 100755 apis/networkmanager/v1beta2/zz_connectattachment_types.go create mode 100755 apis/networkmanager/v1beta2/zz_device_terraformed.go create mode 100755 apis/networkmanager/v1beta2/zz_device_types.go create mode 100755 apis/networkmanager/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/networkmanager/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/networkmanager/v1beta2/zz_generated.managed.go create mode 100644 apis/networkmanager/v1beta2/zz_generated.managedlist.go create mode 100644 apis/networkmanager/v1beta2/zz_generated.resolvers.go create mode 100755 apis/networkmanager/v1beta2/zz_groupversion_info.go create mode 100755 apis/networkmanager/v1beta2/zz_link_terraformed.go create mode 100755 apis/networkmanager/v1beta2/zz_link_types.go create mode 100755 apis/networkmanager/v1beta2/zz_site_terraformed.go create mode 100755 apis/networkmanager/v1beta2/zz_site_types.go create mode 100755 apis/networkmanager/v1beta2/zz_vpcattachment_terraformed.go create mode 100755 apis/networkmanager/v1beta2/zz_vpcattachment_types.go create mode 100755 apis/opensearch/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/opensearch/v1beta2/zz_domain_terraformed.go create mode 100755 apis/opensearch/v1beta2/zz_domain_types.go create mode 100755 apis/opensearch/v1beta2/zz_domainsamloptions_terraformed.go create mode 100755 apis/opensearch/v1beta2/zz_domainsamloptions_types.go create mode 100755 apis/opensearch/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/opensearch/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/opensearch/v1beta2/zz_generated.managed.go create mode 100644 apis/opensearch/v1beta2/zz_generated.managedlist.go create mode 100644 apis/opensearch/v1beta2/zz_generated.resolvers.go create mode 100755 apis/opensearch/v1beta2/zz_groupversion_info.go create mode 100755 apis/opensearchserverless/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/opensearchserverless/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/opensearchserverless/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/opensearchserverless/v1beta2/zz_generated.managed.go create mode 100644 apis/opensearchserverless/v1beta2/zz_generated.managedlist.go create mode 100755 apis/opensearchserverless/v1beta2/zz_groupversion_info.go create mode 100755 apis/opensearchserverless/v1beta2/zz_securityconfig_terraformed.go create mode 100755 apis/opensearchserverless/v1beta2/zz_securityconfig_types.go create mode 100755 apis/opsworks/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/opsworks/v1beta2/zz_customlayer_terraformed.go create mode 100755 apis/opsworks/v1beta2/zz_customlayer_types.go create mode 100755 apis/opsworks/v1beta2/zz_ecsclusterlayer_terraformed.go create mode 100755 apis/opsworks/v1beta2/zz_ecsclusterlayer_types.go create mode 100755 apis/opsworks/v1beta2/zz_ganglialayer_terraformed.go create mode 100755 apis/opsworks/v1beta2/zz_ganglialayer_types.go create mode 100755 apis/opsworks/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/opsworks/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/opsworks/v1beta2/zz_generated.managed.go create mode 100644 apis/opsworks/v1beta2/zz_generated.managedlist.go create mode 100644 apis/opsworks/v1beta2/zz_generated.resolvers.go create mode 100755 apis/opsworks/v1beta2/zz_groupversion_info.go create mode 100755 apis/opsworks/v1beta2/zz_haproxylayer_terraformed.go create mode 100755 apis/opsworks/v1beta2/zz_haproxylayer_types.go create mode 100755 apis/opsworks/v1beta2/zz_javaapplayer_terraformed.go create mode 100755 apis/opsworks/v1beta2/zz_javaapplayer_types.go create mode 100755 apis/opsworks/v1beta2/zz_memcachedlayer_terraformed.go create mode 100755 apis/opsworks/v1beta2/zz_memcachedlayer_types.go create mode 100755 apis/opsworks/v1beta2/zz_mysqllayer_terraformed.go create mode 100755 apis/opsworks/v1beta2/zz_mysqllayer_types.go create mode 100755 apis/opsworks/v1beta2/zz_nodejsapplayer_terraformed.go create mode 100755 apis/opsworks/v1beta2/zz_nodejsapplayer_types.go create mode 100755 apis/opsworks/v1beta2/zz_phpapplayer_terraformed.go create mode 100755 apis/opsworks/v1beta2/zz_phpapplayer_types.go create mode 100755 apis/opsworks/v1beta2/zz_railsapplayer_terraformed.go create mode 100755 apis/opsworks/v1beta2/zz_railsapplayer_types.go create mode 100755 apis/opsworks/v1beta2/zz_stack_terraformed.go create mode 100755 apis/opsworks/v1beta2/zz_stack_types.go create mode 100755 apis/opsworks/v1beta2/zz_staticweblayer_terraformed.go create mode 100755 apis/opsworks/v1beta2/zz_staticweblayer_types.go create mode 100755 apis/pinpoint/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/pinpoint/v1beta2/zz_app_terraformed.go create mode 100755 apis/pinpoint/v1beta2/zz_app_types.go create mode 100755 apis/pinpoint/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/pinpoint/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/pinpoint/v1beta2/zz_generated.managed.go create mode 100644 apis/pinpoint/v1beta2/zz_generated.managedlist.go create mode 100755 apis/pinpoint/v1beta2/zz_groupversion_info.go create mode 100755 apis/qldb/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/qldb/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/qldb/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/qldb/v1beta2/zz_generated.managed.go create mode 100644 apis/qldb/v1beta2/zz_generated.managedlist.go create mode 100644 apis/qldb/v1beta2/zz_generated.resolvers.go create mode 100755 apis/qldb/v1beta2/zz_groupversion_info.go create mode 100755 apis/qldb/v1beta2/zz_stream_terraformed.go create mode 100755 apis/qldb/v1beta2/zz_stream_types.go create mode 100755 apis/rds/v1beta2/zz_cluster_terraformed.go create mode 100755 apis/rds/v1beta2/zz_cluster_types.go create mode 100755 apis/rds/v1beta2/zz_generated.conversion_spokes.go create mode 100755 apis/rds/v1beta2/zz_proxydefaulttargetgroup_terraformed.go create mode 100755 apis/rds/v1beta2/zz_proxydefaulttargetgroup_types.go create mode 100755 apis/rds/v1beta3/zz_generated.conversion_hubs.go create mode 100644 apis/rds/v1beta3/zz_generated.deepcopy.go create mode 100644 apis/rds/v1beta3/zz_generated.managed.go create mode 100644 apis/rds/v1beta3/zz_generated.managedlist.go create mode 100644 apis/rds/v1beta3/zz_generated.resolvers.go create mode 100755 apis/rds/v1beta3/zz_groupversion_info.go create mode 100755 apis/rds/v1beta3/zz_instance_terraformed.go create mode 100755 apis/rds/v1beta3/zz_instance_types.go create mode 100755 apis/redshift/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/redshift/v1beta2/zz_cluster_terraformed.go create mode 100755 apis/redshift/v1beta2/zz_cluster_types.go create mode 100755 apis/redshift/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/redshift/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/redshift/v1beta2/zz_generated.managed.go create mode 100644 apis/redshift/v1beta2/zz_generated.managedlist.go create mode 100644 apis/redshift/v1beta2/zz_generated.resolvers.go create mode 100755 apis/redshift/v1beta2/zz_groupversion_info.go create mode 100755 apis/redshift/v1beta2/zz_scheduledaction_terraformed.go create mode 100755 apis/redshift/v1beta2/zz_scheduledaction_types.go create mode 100755 apis/resourcegroups/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/resourcegroups/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/resourcegroups/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/resourcegroups/v1beta2/zz_generated.managed.go create mode 100644 apis/resourcegroups/v1beta2/zz_generated.managedlist.go create mode 100755 apis/resourcegroups/v1beta2/zz_group_terraformed.go create mode 100755 apis/resourcegroups/v1beta2/zz_group_types.go create mode 100755 apis/resourcegroups/v1beta2/zz_groupversion_info.go create mode 100755 apis/route53/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/route53/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/route53/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/route53/v1beta2/zz_generated.managed.go create mode 100644 apis/route53/v1beta2/zz_generated.managedlist.go create mode 100644 apis/route53/v1beta2/zz_generated.resolvers.go create mode 100755 apis/route53/v1beta2/zz_groupversion_info.go create mode 100755 apis/route53/v1beta2/zz_record_terraformed.go create mode 100755 apis/route53/v1beta2/zz_record_types.go create mode 100755 apis/route53recoverycontrolconfig/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/route53recoverycontrolconfig/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/route53recoverycontrolconfig/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/route53recoverycontrolconfig/v1beta2/zz_generated.managed.go create mode 100644 apis/route53recoverycontrolconfig/v1beta2/zz_generated.managedlist.go create mode 100644 apis/route53recoverycontrolconfig/v1beta2/zz_generated.resolvers.go create mode 100755 apis/route53recoverycontrolconfig/v1beta2/zz_groupversion_info.go create mode 100755 apis/route53recoverycontrolconfig/v1beta2/zz_safetyrule_terraformed.go create mode 100755 apis/route53recoverycontrolconfig/v1beta2/zz_safetyrule_types.go create mode 100755 apis/route53recoveryreadiness/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/route53recoveryreadiness/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/route53recoveryreadiness/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/route53recoveryreadiness/v1beta2/zz_generated.managed.go create mode 100644 apis/route53recoveryreadiness/v1beta2/zz_generated.managedlist.go create mode 100644 apis/route53recoveryreadiness/v1beta2/zz_generated.resolvers.go create mode 100755 apis/route53recoveryreadiness/v1beta2/zz_groupversion_info.go create mode 100755 apis/route53recoveryreadiness/v1beta2/zz_resourceset_terraformed.go create mode 100755 apis/route53recoveryreadiness/v1beta2/zz_resourceset_types.go create mode 100755 apis/rum/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/rum/v1beta2/zz_appmonitor_terraformed.go create mode 100755 apis/rum/v1beta2/zz_appmonitor_types.go create mode 100755 apis/rum/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/rum/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/rum/v1beta2/zz_generated.managed.go create mode 100644 apis/rum/v1beta2/zz_generated.managedlist.go create mode 100755 apis/rum/v1beta2/zz_groupversion_info.go create mode 100755 apis/s3/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/s3/v1beta2/zz_bucket_terraformed.go create mode 100755 apis/s3/v1beta2/zz_bucket_types.go create mode 100755 apis/s3/v1beta2/zz_bucketacl_terraformed.go create mode 100755 apis/s3/v1beta2/zz_bucketacl_types.go create mode 100755 apis/s3/v1beta2/zz_bucketanalyticsconfiguration_terraformed.go create mode 100755 apis/s3/v1beta2/zz_bucketanalyticsconfiguration_types.go create mode 100755 apis/s3/v1beta2/zz_bucketintelligenttieringconfiguration_terraformed.go create mode 100755 apis/s3/v1beta2/zz_bucketintelligenttieringconfiguration_types.go create mode 100755 apis/s3/v1beta2/zz_bucketinventory_terraformed.go create mode 100755 apis/s3/v1beta2/zz_bucketinventory_types.go create mode 100755 apis/s3/v1beta2/zz_bucketlifecycleconfiguration_terraformed.go create mode 100755 apis/s3/v1beta2/zz_bucketlifecycleconfiguration_types.go create mode 100755 apis/s3/v1beta2/zz_bucketlogging_terraformed.go create mode 100755 apis/s3/v1beta2/zz_bucketlogging_types.go create mode 100755 apis/s3/v1beta2/zz_bucketmetric_terraformed.go create mode 100755 apis/s3/v1beta2/zz_bucketmetric_types.go create mode 100755 apis/s3/v1beta2/zz_bucketobjectlockconfiguration_terraformed.go create mode 100755 apis/s3/v1beta2/zz_bucketobjectlockconfiguration_types.go create mode 100755 apis/s3/v1beta2/zz_bucketownershipcontrols_terraformed.go create mode 100755 apis/s3/v1beta2/zz_bucketownershipcontrols_types.go create mode 100755 apis/s3/v1beta2/zz_bucketreplicationconfiguration_terraformed.go create mode 100755 apis/s3/v1beta2/zz_bucketreplicationconfiguration_types.go create mode 100755 apis/s3/v1beta2/zz_bucketserversideencryptionconfiguration_terraformed.go create mode 100755 apis/s3/v1beta2/zz_bucketserversideencryptionconfiguration_types.go create mode 100755 apis/s3/v1beta2/zz_bucketversioning_terraformed.go create mode 100755 apis/s3/v1beta2/zz_bucketversioning_types.go create mode 100755 apis/s3/v1beta2/zz_bucketwebsiteconfiguration_terraformed.go create mode 100755 apis/s3/v1beta2/zz_bucketwebsiteconfiguration_types.go create mode 100755 apis/s3/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/s3/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/s3/v1beta2/zz_generated.managed.go create mode 100644 apis/s3/v1beta2/zz_generated.managedlist.go create mode 100644 apis/s3/v1beta2/zz_generated.resolvers.go create mode 100755 apis/s3/v1beta2/zz_groupversion_info.go create mode 100755 apis/s3/v1beta2/zz_object_terraformed.go create mode 100755 apis/s3/v1beta2/zz_object_types.go create mode 100755 apis/s3control/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/s3control/v1beta2/zz_accesspoint_terraformed.go create mode 100755 apis/s3control/v1beta2/zz_accesspoint_types.go create mode 100755 apis/s3control/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/s3control/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/s3control/v1beta2/zz_generated.managed.go create mode 100644 apis/s3control/v1beta2/zz_generated.managedlist.go create mode 100644 apis/s3control/v1beta2/zz_generated.resolvers.go create mode 100755 apis/s3control/v1beta2/zz_groupversion_info.go create mode 100755 apis/s3control/v1beta2/zz_multiregionaccesspoint_terraformed.go create mode 100755 apis/s3control/v1beta2/zz_multiregionaccesspoint_types.go create mode 100755 apis/s3control/v1beta2/zz_multiregionaccesspointpolicy_terraformed.go create mode 100755 apis/s3control/v1beta2/zz_multiregionaccesspointpolicy_types.go create mode 100755 apis/s3control/v1beta2/zz_objectlambdaaccesspoint_terraformed.go create mode 100755 apis/s3control/v1beta2/zz_objectlambdaaccesspoint_types.go create mode 100755 apis/s3control/v1beta2/zz_storagelensconfiguration_terraformed.go create mode 100755 apis/s3control/v1beta2/zz_storagelensconfiguration_types.go create mode 100755 apis/sagemaker/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/sagemaker/v1beta2/zz_app_terraformed.go create mode 100755 apis/sagemaker/v1beta2/zz_app_types.go create mode 100755 apis/sagemaker/v1beta2/zz_appimageconfig_terraformed.go create mode 100755 apis/sagemaker/v1beta2/zz_appimageconfig_types.go create mode 100755 apis/sagemaker/v1beta2/zz_coderepository_terraformed.go create mode 100755 apis/sagemaker/v1beta2/zz_coderepository_types.go create mode 100755 apis/sagemaker/v1beta2/zz_device_terraformed.go create mode 100755 apis/sagemaker/v1beta2/zz_device_types.go create mode 100755 apis/sagemaker/v1beta2/zz_devicefleet_terraformed.go create mode 100755 apis/sagemaker/v1beta2/zz_devicefleet_types.go create mode 100755 apis/sagemaker/v1beta2/zz_domain_terraformed.go create mode 100755 apis/sagemaker/v1beta2/zz_domain_types.go create mode 100755 apis/sagemaker/v1beta2/zz_endpoint_terraformed.go create mode 100755 apis/sagemaker/v1beta2/zz_endpoint_types.go create mode 100755 apis/sagemaker/v1beta2/zz_endpointconfiguration_terraformed.go create mode 100755 apis/sagemaker/v1beta2/zz_endpointconfiguration_types.go create mode 100755 apis/sagemaker/v1beta2/zz_featuregroup_terraformed.go create mode 100755 apis/sagemaker/v1beta2/zz_featuregroup_types.go create mode 100755 apis/sagemaker/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/sagemaker/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/sagemaker/v1beta2/zz_generated.managed.go create mode 100644 apis/sagemaker/v1beta2/zz_generated.managedlist.go create mode 100644 apis/sagemaker/v1beta2/zz_generated.resolvers.go create mode 100755 apis/sagemaker/v1beta2/zz_groupversion_info.go create mode 100755 apis/sagemaker/v1beta2/zz_model_terraformed.go create mode 100755 apis/sagemaker/v1beta2/zz_model_types.go create mode 100755 apis/sagemaker/v1beta2/zz_notebookinstance_terraformed.go create mode 100755 apis/sagemaker/v1beta2/zz_notebookinstance_types.go create mode 100755 apis/sagemaker/v1beta2/zz_space_terraformed.go create mode 100755 apis/sagemaker/v1beta2/zz_space_types.go create mode 100755 apis/sagemaker/v1beta2/zz_userprofile_terraformed.go create mode 100755 apis/sagemaker/v1beta2/zz_userprofile_types.go create mode 100755 apis/sagemaker/v1beta2/zz_workforce_terraformed.go create mode 100755 apis/sagemaker/v1beta2/zz_workforce_types.go create mode 100755 apis/sagemaker/v1beta2/zz_workteam_terraformed.go create mode 100755 apis/sagemaker/v1beta2/zz_workteam_types.go create mode 100755 apis/scheduler/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/scheduler/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/scheduler/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/scheduler/v1beta2/zz_generated.managed.go create mode 100644 apis/scheduler/v1beta2/zz_generated.managedlist.go create mode 100644 apis/scheduler/v1beta2/zz_generated.resolvers.go create mode 100755 apis/scheduler/v1beta2/zz_groupversion_info.go create mode 100755 apis/scheduler/v1beta2/zz_schedule_terraformed.go create mode 100755 apis/scheduler/v1beta2/zz_schedule_types.go create mode 100755 apis/secretsmanager/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/secretsmanager/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/secretsmanager/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/secretsmanager/v1beta2/zz_generated.managed.go create mode 100644 apis/secretsmanager/v1beta2/zz_generated.managedlist.go create mode 100644 apis/secretsmanager/v1beta2/zz_generated.resolvers.go create mode 100755 apis/secretsmanager/v1beta2/zz_groupversion_info.go create mode 100755 apis/secretsmanager/v1beta2/zz_secretrotation_terraformed.go create mode 100755 apis/secretsmanager/v1beta2/zz_secretrotation_types.go create mode 100755 apis/securityhub/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/securityhub/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/securityhub/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/securityhub/v1beta2/zz_generated.managed.go create mode 100644 apis/securityhub/v1beta2/zz_generated.managedlist.go create mode 100755 apis/securityhub/v1beta2/zz_groupversion_info.go create mode 100755 apis/securityhub/v1beta2/zz_insight_terraformed.go create mode 100755 apis/securityhub/v1beta2/zz_insight_types.go create mode 100755 apis/servicecatalog/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/servicecatalog/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/servicecatalog/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/servicecatalog/v1beta2/zz_generated.managed.go create mode 100644 apis/servicecatalog/v1beta2/zz_generated.managedlist.go create mode 100755 apis/servicecatalog/v1beta2/zz_groupversion_info.go create mode 100755 apis/servicecatalog/v1beta2/zz_product_terraformed.go create mode 100755 apis/servicecatalog/v1beta2/zz_product_types.go create mode 100755 apis/servicecatalog/v1beta2/zz_serviceaction_terraformed.go create mode 100755 apis/servicecatalog/v1beta2/zz_serviceaction_types.go create mode 100755 apis/servicediscovery/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/servicediscovery/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/servicediscovery/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/servicediscovery/v1beta2/zz_generated.managed.go create mode 100644 apis/servicediscovery/v1beta2/zz_generated.managedlist.go create mode 100644 apis/servicediscovery/v1beta2/zz_generated.resolvers.go create mode 100755 apis/servicediscovery/v1beta2/zz_groupversion_info.go create mode 100755 apis/servicediscovery/v1beta2/zz_service_terraformed.go create mode 100755 apis/servicediscovery/v1beta2/zz_service_types.go create mode 100755 apis/ses/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/ses/v1beta2/zz_configurationset_terraformed.go create mode 100755 apis/ses/v1beta2/zz_configurationset_types.go create mode 100755 apis/ses/v1beta2/zz_eventdestination_terraformed.go create mode 100755 apis/ses/v1beta2/zz_eventdestination_types.go create mode 100755 apis/ses/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/ses/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/ses/v1beta2/zz_generated.managed.go create mode 100644 apis/ses/v1beta2/zz_generated.managedlist.go create mode 100644 apis/ses/v1beta2/zz_generated.resolvers.go create mode 100755 apis/ses/v1beta2/zz_groupversion_info.go create mode 100755 apis/sesv2/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/sesv2/v1beta2/zz_configurationset_terraformed.go create mode 100755 apis/sesv2/v1beta2/zz_configurationset_types.go create mode 100755 apis/sesv2/v1beta2/zz_configurationseteventdestination_terraformed.go create mode 100755 apis/sesv2/v1beta2/zz_configurationseteventdestination_types.go create mode 100755 apis/sesv2/v1beta2/zz_emailidentity_terraformed.go create mode 100755 apis/sesv2/v1beta2/zz_emailidentity_types.go create mode 100755 apis/sesv2/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/sesv2/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/sesv2/v1beta2/zz_generated.managed.go create mode 100644 apis/sesv2/v1beta2/zz_generated.managedlist.go create mode 100644 apis/sesv2/v1beta2/zz_generated.resolvers.go create mode 100755 apis/sesv2/v1beta2/zz_groupversion_info.go create mode 100755 apis/sfn/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/sfn/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/sfn/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/sfn/v1beta2/zz_generated.managed.go create mode 100644 apis/sfn/v1beta2/zz_generated.managedlist.go create mode 100644 apis/sfn/v1beta2/zz_generated.resolvers.go create mode 100755 apis/sfn/v1beta2/zz_groupversion_info.go create mode 100755 apis/sfn/v1beta2/zz_statemachine_terraformed.go create mode 100755 apis/sfn/v1beta2/zz_statemachine_types.go create mode 100755 apis/signer/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/signer/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/signer/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/signer/v1beta2/zz_generated.managed.go create mode 100644 apis/signer/v1beta2/zz_generated.managedlist.go create mode 100644 apis/signer/v1beta2/zz_generated.resolvers.go create mode 100755 apis/signer/v1beta2/zz_groupversion_info.go create mode 100755 apis/signer/v1beta2/zz_signingjob_terraformed.go create mode 100755 apis/signer/v1beta2/zz_signingjob_types.go create mode 100755 apis/signer/v1beta2/zz_signingprofile_terraformed.go create mode 100755 apis/signer/v1beta2/zz_signingprofile_types.go create mode 100755 apis/ssm/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/ssm/v1beta2/zz_association_terraformed.go create mode 100755 apis/ssm/v1beta2/zz_association_types.go create mode 100755 apis/ssm/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/ssm/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/ssm/v1beta2/zz_generated.managed.go create mode 100644 apis/ssm/v1beta2/zz_generated.managedlist.go create mode 100644 apis/ssm/v1beta2/zz_generated.resolvers.go create mode 100755 apis/ssm/v1beta2/zz_groupversion_info.go create mode 100755 apis/ssm/v1beta2/zz_maintenancewindowtask_terraformed.go create mode 100755 apis/ssm/v1beta2/zz_maintenancewindowtask_types.go create mode 100755 apis/ssm/v1beta2/zz_resourcedatasync_terraformed.go create mode 100755 apis/ssm/v1beta2/zz_resourcedatasync_types.go create mode 100755 apis/ssoadmin/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/ssoadmin/v1beta2/zz_customermanagedpolicyattachment_terraformed.go create mode 100755 apis/ssoadmin/v1beta2/zz_customermanagedpolicyattachment_types.go create mode 100755 apis/ssoadmin/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/ssoadmin/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/ssoadmin/v1beta2/zz_generated.managed.go create mode 100644 apis/ssoadmin/v1beta2/zz_generated.managedlist.go create mode 100644 apis/ssoadmin/v1beta2/zz_generated.resolvers.go create mode 100755 apis/ssoadmin/v1beta2/zz_groupversion_info.go create mode 100755 apis/ssoadmin/v1beta2/zz_permissionsboundaryattachment_terraformed.go create mode 100755 apis/ssoadmin/v1beta2/zz_permissionsboundaryattachment_types.go create mode 100755 apis/timestreamwrite/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/timestreamwrite/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/timestreamwrite/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/timestreamwrite/v1beta2/zz_generated.managed.go create mode 100644 apis/timestreamwrite/v1beta2/zz_generated.managedlist.go create mode 100644 apis/timestreamwrite/v1beta2/zz_generated.resolvers.go create mode 100755 apis/timestreamwrite/v1beta2/zz_groupversion_info.go create mode 100755 apis/timestreamwrite/v1beta2/zz_table_terraformed.go create mode 100755 apis/timestreamwrite/v1beta2/zz_table_types.go create mode 100755 apis/transcribe/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/transcribe/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/transcribe/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/transcribe/v1beta2/zz_generated.managed.go create mode 100644 apis/transcribe/v1beta2/zz_generated.managedlist.go create mode 100644 apis/transcribe/v1beta2/zz_generated.resolvers.go create mode 100755 apis/transcribe/v1beta2/zz_groupversion_info.go create mode 100755 apis/transcribe/v1beta2/zz_languagemodel_terraformed.go create mode 100755 apis/transcribe/v1beta2/zz_languagemodel_types.go create mode 100755 apis/transfer/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/transfer/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/transfer/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/transfer/v1beta2/zz_generated.managed.go create mode 100644 apis/transfer/v1beta2/zz_generated.managedlist.go create mode 100644 apis/transfer/v1beta2/zz_generated.resolvers.go create mode 100755 apis/transfer/v1beta2/zz_groupversion_info.go create mode 100755 apis/transfer/v1beta2/zz_server_terraformed.go create mode 100755 apis/transfer/v1beta2/zz_server_types.go create mode 100755 apis/transfer/v1beta2/zz_user_terraformed.go create mode 100755 apis/transfer/v1beta2/zz_user_types.go create mode 100755 apis/transfer/v1beta2/zz_workflow_terraformed.go create mode 100755 apis/transfer/v1beta2/zz_workflow_types.go create mode 100755 apis/waf/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/waf/v1beta2/zz_bytematchset_terraformed.go create mode 100755 apis/waf/v1beta2/zz_bytematchset_types.go create mode 100755 apis/waf/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/waf/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/waf/v1beta2/zz_generated.managed.go create mode 100644 apis/waf/v1beta2/zz_generated.managedlist.go create mode 100644 apis/waf/v1beta2/zz_generated.resolvers.go create mode 100755 apis/waf/v1beta2/zz_groupversion_info.go create mode 100755 apis/waf/v1beta2/zz_regexmatchset_terraformed.go create mode 100755 apis/waf/v1beta2/zz_regexmatchset_types.go create mode 100755 apis/waf/v1beta2/zz_sizeconstraintset_terraformed.go create mode 100755 apis/waf/v1beta2/zz_sizeconstraintset_types.go create mode 100755 apis/waf/v1beta2/zz_sqlinjectionmatchset_terraformed.go create mode 100755 apis/waf/v1beta2/zz_sqlinjectionmatchset_types.go create mode 100755 apis/waf/v1beta2/zz_webacl_terraformed.go create mode 100755 apis/waf/v1beta2/zz_webacl_types.go create mode 100755 apis/waf/v1beta2/zz_xssmatchset_terraformed.go create mode 100755 apis/waf/v1beta2/zz_xssmatchset_types.go create mode 100755 apis/wafregional/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/wafregional/v1beta2/zz_bytematchset_terraformed.go create mode 100755 apis/wafregional/v1beta2/zz_bytematchset_types.go create mode 100755 apis/wafregional/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/wafregional/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/wafregional/v1beta2/zz_generated.managed.go create mode 100644 apis/wafregional/v1beta2/zz_generated.managedlist.go create mode 100644 apis/wafregional/v1beta2/zz_generated.resolvers.go create mode 100755 apis/wafregional/v1beta2/zz_groupversion_info.go create mode 100755 apis/wafregional/v1beta2/zz_regexmatchset_terraformed.go create mode 100755 apis/wafregional/v1beta2/zz_regexmatchset_types.go create mode 100755 apis/wafregional/v1beta2/zz_sizeconstraintset_terraformed.go create mode 100755 apis/wafregional/v1beta2/zz_sizeconstraintset_types.go create mode 100755 apis/wafregional/v1beta2/zz_sqlinjectionmatchset_terraformed.go create mode 100755 apis/wafregional/v1beta2/zz_sqlinjectionmatchset_types.go create mode 100755 apis/wafregional/v1beta2/zz_webacl_terraformed.go create mode 100755 apis/wafregional/v1beta2/zz_webacl_types.go create mode 100755 apis/wafregional/v1beta2/zz_xssmatchset_terraformed.go create mode 100755 apis/wafregional/v1beta2/zz_xssmatchset_types.go create mode 100755 apis/workspaces/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/workspaces/v1beta2/zz_directory_terraformed.go create mode 100755 apis/workspaces/v1beta2/zz_directory_types.go create mode 100755 apis/workspaces/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/workspaces/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/workspaces/v1beta2/zz_generated.managed.go create mode 100644 apis/workspaces/v1beta2/zz_generated.managedlist.go create mode 100644 apis/workspaces/v1beta2/zz_generated.resolvers.go create mode 100755 apis/workspaces/v1beta2/zz_groupversion_info.go create mode 100755 apis/xray/v1beta1/zz_generated.conversion_spokes.go create mode 100755 apis/xray/v1beta2/zz_generated.conversion_hubs.go create mode 100644 apis/xray/v1beta2/zz_generated.deepcopy.go create mode 100644 apis/xray/v1beta2/zz_generated.managed.go create mode 100644 apis/xray/v1beta2/zz_generated.managedlist.go create mode 100755 apis/xray/v1beta2/zz_group_terraformed.go create mode 100755 apis/xray/v1beta2/zz_group_types.go create mode 100755 apis/xray/v1beta2/zz_groupversion_info.go create mode 100644 examples-generated/accessanalyzer/v1beta2/analyzer.yaml create mode 100644 examples-generated/acm/v1beta2/certificate.yaml create mode 100644 examples-generated/acmpca/v1beta2/certificate.yaml create mode 100644 examples-generated/acmpca/v1beta2/certificateauthority.yaml create mode 100644 examples-generated/amp/v1beta2/workspace.yaml create mode 100644 examples-generated/amplify/v1beta2/app.yaml create mode 100644 examples-generated/apigateway/v1beta2/documentationpart.yaml create mode 100644 examples-generated/apigateway/v1beta2/domainname.yaml create mode 100644 examples-generated/apigateway/v1beta2/integration.yaml create mode 100644 examples-generated/apigateway/v1beta2/methodsettings.yaml create mode 100644 examples-generated/apigateway/v1beta2/restapi.yaml create mode 100644 examples-generated/apigateway/v1beta2/stage.yaml create mode 100644 examples-generated/apigateway/v1beta2/usageplan.yaml create mode 100644 examples-generated/apigatewayv2/v1beta2/api.yaml create mode 100644 examples-generated/apigatewayv2/v1beta2/authorizer.yaml create mode 100644 examples-generated/apigatewayv2/v1beta2/domainname.yaml create mode 100644 examples-generated/apigatewayv2/v1beta2/integration.yaml create mode 100644 examples-generated/apigatewayv2/v1beta2/stage.yaml create mode 100644 examples-generated/appautoscaling/v1beta2/policy.yaml create mode 100644 examples-generated/appautoscaling/v1beta2/scheduledaction.yaml create mode 100644 examples-generated/appflow/v1beta2/flow.yaml create mode 100644 examples-generated/appintegrations/v1beta2/eventintegration.yaml create mode 100644 examples-generated/appmesh/v1beta2/gatewayroute.yaml create mode 100644 examples-generated/appmesh/v1beta2/mesh.yaml create mode 100644 examples-generated/appmesh/v1beta2/route.yaml create mode 100644 examples-generated/appmesh/v1beta2/virtualgateway.yaml create mode 100644 examples-generated/appmesh/v1beta2/virtualnode.yaml create mode 100644 examples-generated/appmesh/v1beta2/virtualrouter.yaml create mode 100644 examples-generated/appmesh/v1beta2/virtualservice.yaml create mode 100644 examples-generated/apprunner/v1beta2/observabilityconfiguration.yaml create mode 100644 examples-generated/apprunner/v1beta2/service.yaml create mode 100644 examples-generated/appstream/v1beta2/directoryconfig.yaml create mode 100644 examples-generated/appstream/v1beta2/fleet.yaml create mode 100644 examples-generated/appstream/v1beta2/imagebuilder.yaml create mode 100644 examples-generated/appstream/v1beta2/stack.yaml create mode 100644 examples-generated/appsync/v1beta2/datasource.yaml create mode 100644 examples-generated/appsync/v1beta2/function.yaml create mode 100644 examples-generated/appsync/v1beta2/graphqlapi.yaml create mode 100644 examples-generated/appsync/v1beta2/resolver.yaml create mode 100644 examples-generated/athena/v1beta2/database.yaml create mode 100644 examples-generated/athena/v1beta2/workgroup.yaml create mode 100644 examples-generated/autoscaling/v1beta2/grouptag.yaml create mode 100644 examples-generated/autoscaling/v1beta2/launchconfiguration.yaml create mode 100644 examples-generated/autoscaling/v1beta2/policy.yaml create mode 100644 examples-generated/autoscaling/v1beta3/autoscalinggroup.yaml create mode 100644 examples-generated/autoscalingplans/v1beta2/scalingplan.yaml create mode 100644 examples-generated/backup/v1beta2/framework.yaml create mode 100644 examples-generated/backup/v1beta2/plan.yaml create mode 100644 examples-generated/backup/v1beta2/reportplan.yaml create mode 100644 examples-generated/batch/v1beta2/jobdefinition.yaml create mode 100644 examples-generated/batch/v1beta2/schedulingpolicy.yaml create mode 100644 examples-generated/budgets/v1beta2/budget.yaml create mode 100644 examples-generated/budgets/v1beta2/budgetaction.yaml create mode 100644 examples-generated/chime/v1beta2/voiceconnectorstreaming.yaml create mode 100644 examples-generated/cloudformation/v1beta2/stackset.yaml create mode 100644 examples-generated/cloudformation/v1beta2/stacksetinstance.yaml create mode 100644 examples-generated/cloudfront/v1beta2/cachepolicy.yaml create mode 100644 examples-generated/cloudfront/v1beta2/distribution.yaml create mode 100644 examples-generated/cloudfront/v1beta2/fieldlevelencryptionconfig.yaml create mode 100644 examples-generated/cloudfront/v1beta2/fieldlevelencryptionprofile.yaml create mode 100644 examples-generated/cloudfront/v1beta2/monitoringsubscription.yaml create mode 100644 examples-generated/cloudfront/v1beta2/originrequestpolicy.yaml create mode 100644 examples-generated/cloudfront/v1beta2/realtimelogconfig.yaml create mode 100644 examples-generated/cloudfront/v1beta2/responseheaderspolicy.yaml create mode 100644 examples-generated/cloudsearch/v1beta2/domain.yaml create mode 100644 examples-generated/cloudwatch/v1beta2/compositealarm.yaml create mode 100644 examples-generated/cloudwatch/v1beta2/metricalarm.yaml create mode 100644 examples-generated/cloudwatchevents/v1beta2/connection.yaml create mode 100644 examples-generated/cloudwatchevents/v1beta2/permission.yaml create mode 100644 examples-generated/cloudwatchevents/v1beta2/target.yaml create mode 100644 examples-generated/cloudwatchlogs/v1beta2/metricfilter.yaml create mode 100644 examples-generated/codepipeline/v1beta2/codepipeline.yaml create mode 100644 examples-generated/codepipeline/v1beta2/customactiontype.yaml create mode 100644 examples-generated/codepipeline/v1beta2/webhook.yaml create mode 100644 examples-generated/codestarconnections/v1beta2/host.yaml create mode 100644 examples-generated/cognitoidp/v1beta2/riskconfiguration.yaml create mode 100644 examples-generated/cognitoidp/v1beta2/userpool.yaml create mode 100644 examples-generated/configservice/v1beta2/configrule.yaml create mode 100644 examples-generated/configservice/v1beta2/configurationaggregator.yaml create mode 100644 examples-generated/configservice/v1beta2/configurationrecorder.yaml create mode 100644 examples-generated/configservice/v1beta2/deliverychannel.yaml create mode 100644 examples-generated/configservice/v1beta2/remediationconfiguration.yaml create mode 100644 examples-generated/connect/v1beta2/botassociation.yaml create mode 100644 examples-generated/connect/v1beta2/instancestorageconfig.yaml create mode 100644 examples-generated/connect/v1beta2/quickconnect.yaml create mode 100644 examples-generated/connect/v1beta2/user.yaml create mode 100644 examples-generated/connect/v1beta2/userhierarchystructure.yaml create mode 100644 examples-generated/connect/v1beta3/hoursofoperation.yaml create mode 100644 examples-generated/connect/v1beta3/queue.yaml create mode 100644 examples-generated/datasync/v1beta2/locations3.yaml create mode 100644 examples-generated/datasync/v1beta2/task.yaml create mode 100644 examples-generated/dax/v1beta2/cluster.yaml create mode 100644 examples-generated/deploy/v1beta2/deploymentconfig.yaml create mode 100644 examples-generated/deploy/v1beta2/deploymentgroup.yaml create mode 100644 examples-generated/devicefarm/v1beta2/testgridproject.yaml create mode 100644 examples-generated/dlm/v1beta2/lifecyclepolicy.yaml create mode 100644 examples-generated/dms/v1beta2/endpoint.yaml create mode 100644 examples-generated/ds/v1beta2/directory.yaml create mode 100644 examples-generated/ds/v1beta2/shareddirectory.yaml create mode 100644 examples-generated/dynamodb/v1beta2/table.yaml create mode 100644 examples-generated/ec2/v1beta2/ebssnapshotimport.yaml create mode 100644 examples-generated/ec2/v1beta2/flowlog.yaml create mode 100644 examples-generated/ec2/v1beta2/instance.yaml create mode 100644 examples-generated/ec2/v1beta2/launchtemplate.yaml create mode 100644 examples-generated/ec2/v1beta2/spotfleetrequest.yaml create mode 100644 examples-generated/ec2/v1beta2/spotinstancerequest.yaml create mode 100644 examples-generated/ec2/v1beta2/trafficmirrorfilterrule.yaml create mode 100644 examples-generated/ec2/v1beta2/vpcendpoint.yaml create mode 100644 examples-generated/ec2/v1beta2/vpcipampoolcidr.yaml create mode 100644 examples-generated/ec2/v1beta2/vpcpeeringconnection.yaml create mode 100644 examples-generated/ec2/v1beta2/vpcpeeringconnectionaccepter.yaml create mode 100644 examples-generated/ec2/v1beta2/vpcpeeringconnectionoptions.yaml create mode 100644 examples-generated/ec2/v1beta2/vpnconnection.yaml create mode 100644 examples-generated/ecr/v1beta2/replicationconfiguration.yaml create mode 100644 examples-generated/ecr/v1beta2/repository.yaml create mode 100644 examples-generated/ecrpublic/v1beta2/repository.yaml create mode 100644 examples-generated/ecs/v1beta2/capacityprovider.yaml create mode 100644 examples-generated/ecs/v1beta2/cluster.yaml create mode 100644 examples-generated/ecs/v1beta2/service.yaml create mode 100644 examples-generated/ecs/v1beta2/taskdefinition.yaml create mode 100644 examples-generated/efs/v1beta2/accesspoint.yaml create mode 100644 examples-generated/efs/v1beta2/backuppolicy.yaml create mode 100644 examples-generated/efs/v1beta2/filesystem.yaml create mode 100644 examples-generated/efs/v1beta2/replicationconfiguration.yaml create mode 100644 examples-generated/eks/v1beta2/cluster.yaml create mode 100644 examples-generated/eks/v1beta2/identityproviderconfig.yaml create mode 100644 examples-generated/eks/v1beta2/nodegroup.yaml create mode 100644 examples-generated/elasticache/v1beta2/user.yaml create mode 100644 examples-generated/elasticbeanstalk/v1beta2/application.yaml create mode 100644 examples-generated/elasticsearch/v1beta2/domain.yaml create mode 100644 examples-generated/elasticsearch/v1beta2/domainsamloptions.yaml create mode 100644 examples-generated/elastictranscoder/v1beta2/pipeline.yaml create mode 100644 examples-generated/elastictranscoder/v1beta2/preset.yaml create mode 100644 examples-generated/elb/v1beta2/elb.yaml create mode 100644 examples-generated/elbv2/v1beta2/lb.yaml create mode 100644 examples-generated/elbv2/v1beta2/lblistener.yaml create mode 100644 examples-generated/elbv2/v1beta2/lblistenerrule.yaml create mode 100644 examples-generated/elbv2/v1beta2/lbtargetgroup.yaml create mode 100644 examples-generated/emrserverless/v1beta2/application.yaml create mode 100644 examples-generated/evidently/v1beta2/feature.yaml create mode 100644 examples-generated/evidently/v1beta2/project.yaml create mode 100644 examples-generated/firehose/v1beta2/deliverystream.yaml create mode 100644 examples-generated/fis/v1beta2/experimenttemplate.yaml create mode 100644 examples-generated/fsx/v1beta2/datarepositoryassociation.yaml create mode 100644 examples-generated/fsx/v1beta2/lustrefilesystem.yaml create mode 100644 examples-generated/fsx/v1beta2/ontapfilesystem.yaml create mode 100644 examples-generated/fsx/v1beta2/ontapstoragevirtualmachine.yaml create mode 100644 examples-generated/fsx/v1beta2/windowsfilesystem.yaml create mode 100644 examples-generated/gamelift/v1beta2/alias.yaml create mode 100644 examples-generated/gamelift/v1beta2/build.yaml create mode 100644 examples-generated/gamelift/v1beta2/fleet.yaml create mode 100644 examples-generated/gamelift/v1beta2/script.yaml create mode 100644 examples-generated/glacier/v1beta2/vault.yaml create mode 100644 examples-generated/globalaccelerator/v1beta2/accelerator.yaml create mode 100644 examples-generated/glue/v1beta2/catalogdatabase.yaml create mode 100644 examples-generated/glue/v1beta2/catalogtable.yaml create mode 100644 examples-generated/glue/v1beta2/classifier.yaml create mode 100644 examples-generated/glue/v1beta2/connection.yaml create mode 100644 examples-generated/glue/v1beta2/crawler.yaml create mode 100644 examples-generated/glue/v1beta2/datacatalogencryptionsettings.yaml create mode 100644 examples-generated/glue/v1beta2/job.yaml create mode 100644 examples-generated/glue/v1beta2/securityconfiguration.yaml create mode 100644 examples-generated/glue/v1beta2/trigger.yaml create mode 100644 examples-generated/grafana/v1beta2/workspace.yaml create mode 100644 examples-generated/guardduty/v1beta2/detector.yaml create mode 100644 examples-generated/guardduty/v1beta2/filter.yaml create mode 100644 examples-generated/identitystore/v1beta2/user.yaml create mode 100644 examples-generated/imagebuilder/v1beta2/containerrecipe.yaml create mode 100644 examples-generated/imagebuilder/v1beta2/distributionconfiguration.yaml create mode 100644 examples-generated/imagebuilder/v1beta2/image.yaml create mode 100644 examples-generated/imagebuilder/v1beta2/imagepipeline.yaml create mode 100644 examples-generated/imagebuilder/v1beta2/imagerecipe.yaml create mode 100644 examples-generated/imagebuilder/v1beta2/infrastructureconfiguration.yaml create mode 100644 examples-generated/iot/v1beta2/indexingconfiguration.yaml create mode 100644 examples-generated/iot/v1beta2/provisioningtemplate.yaml create mode 100644 examples-generated/iot/v1beta2/thinggroup.yaml create mode 100644 examples-generated/iot/v1beta2/thingtype.yaml create mode 100644 examples-generated/iot/v1beta2/topicrule.yaml create mode 100644 examples-generated/iot/v1beta2/topicruledestination.yaml create mode 100644 examples-generated/ivs/v1beta2/recordingconfiguration.yaml create mode 100644 examples-generated/kafka/v1beta2/serverlesscluster.yaml create mode 100644 examples-generated/kafka/v1beta3/cluster.yaml create mode 100644 examples-generated/kafkaconnect/v1beta2/connector.yaml create mode 100644 examples-generated/kafkaconnect/v1beta2/customplugin.yaml create mode 100644 examples-generated/kendra/v1beta2/datasource.yaml create mode 100644 examples-generated/kendra/v1beta2/experience.yaml create mode 100644 examples-generated/kendra/v1beta2/index.yaml create mode 100644 examples-generated/kendra/v1beta2/querysuggestionsblocklist.yaml create mode 100644 examples-generated/kendra/v1beta2/thesaurus.yaml create mode 100644 examples-generated/keyspaces/v1beta2/table.yaml create mode 100644 examples-generated/kinesis/v1beta2/stream.yaml create mode 100644 examples-generated/kinesisanalytics/v1beta2/application.yaml create mode 100644 examples-generated/kinesisanalyticsv2/v1beta2/application.yaml create mode 100644 examples-generated/lakeformation/v1beta2/permissions.yaml create mode 100644 examples-generated/lambda/v1beta2/alias.yaml create mode 100644 examples-generated/lambda/v1beta2/codesigningconfig.yaml create mode 100644 examples-generated/lambda/v1beta2/eventsourcemapping.yaml create mode 100644 examples-generated/lambda/v1beta2/function.yaml create mode 100644 examples-generated/lambda/v1beta2/functioneventinvokeconfig.yaml create mode 100644 examples-generated/lambda/v1beta2/functionurl.yaml create mode 100644 examples-generated/lexmodels/v1beta2/bot.yaml create mode 100644 examples-generated/lexmodels/v1beta2/botalias.yaml create mode 100644 examples-generated/lexmodels/v1beta2/intent.yaml create mode 100644 examples-generated/lightsail/v1beta2/containerservice.yaml create mode 100644 examples-generated/lightsail/v1beta2/instance.yaml create mode 100644 examples-generated/location/v1beta2/placeindex.yaml create mode 100644 examples-generated/macie2/v1beta2/classificationjob.yaml create mode 100644 examples-generated/macie2/v1beta2/findingsfilter.yaml create mode 100644 examples-generated/mediaconvert/v1beta2/queue.yaml create mode 100644 examples-generated/medialive/v1beta2/channel.yaml create mode 100644 examples-generated/medialive/v1beta2/input.yaml create mode 100644 examples-generated/medialive/v1beta2/multiplex.yaml create mode 100644 examples-generated/memorydb/v1beta2/user.yaml create mode 100644 examples-generated/mq/v1beta2/broker.yaml create mode 100644 examples-generated/neptune/v1beta2/cluster.yaml create mode 100644 examples-generated/networkfirewall/v1beta2/firewall.yaml create mode 100644 examples-generated/networkfirewall/v1beta2/firewallpolicy.yaml create mode 100644 examples-generated/networkfirewall/v1beta2/loggingconfiguration.yaml create mode 100644 examples-generated/networkfirewall/v1beta2/rulegroup.yaml create mode 100644 examples-generated/networkmanager/v1beta2/connectattachment.yaml create mode 100644 examples-generated/networkmanager/v1beta2/device.yaml create mode 100644 examples-generated/networkmanager/v1beta2/link.yaml create mode 100644 examples-generated/networkmanager/v1beta2/site.yaml create mode 100644 examples-generated/networkmanager/v1beta2/vpcattachment.yaml create mode 100644 examples-generated/opensearch/v1beta2/domain.yaml create mode 100644 examples-generated/opensearch/v1beta2/domainsamloptions.yaml create mode 100644 examples-generated/opensearchserverless/v1beta2/securityconfig.yaml create mode 100644 examples-generated/opsworks/v1beta2/customlayer.yaml create mode 100644 examples-generated/opsworks/v1beta2/ecsclusterlayer.yaml create mode 100644 examples-generated/opsworks/v1beta2/ganglialayer.yaml create mode 100644 examples-generated/opsworks/v1beta2/haproxylayer.yaml create mode 100644 examples-generated/opsworks/v1beta2/javaapplayer.yaml create mode 100644 examples-generated/opsworks/v1beta2/memcachedlayer.yaml create mode 100644 examples-generated/opsworks/v1beta2/mysqllayer.yaml create mode 100644 examples-generated/opsworks/v1beta2/nodejsapplayer.yaml create mode 100644 examples-generated/opsworks/v1beta2/phpapplayer.yaml create mode 100644 examples-generated/opsworks/v1beta2/railsapplayer.yaml create mode 100644 examples-generated/opsworks/v1beta2/stack.yaml create mode 100644 examples-generated/opsworks/v1beta2/staticweblayer.yaml create mode 100644 examples-generated/pinpoint/v1beta2/app.yaml create mode 100644 examples-generated/qldb/v1beta2/stream.yaml create mode 100644 examples-generated/rds/v1beta2/cluster.yaml create mode 100644 examples-generated/rds/v1beta2/proxydefaulttargetgroup.yaml create mode 100644 examples-generated/rds/v1beta3/instance.yaml create mode 100644 examples-generated/redshift/v1beta2/cluster.yaml create mode 100644 examples-generated/redshift/v1beta2/scheduledaction.yaml create mode 100644 examples-generated/resourcegroups/v1beta2/group.yaml create mode 100644 examples-generated/route53/v1beta2/record.yaml create mode 100644 examples-generated/route53recoverycontrolconfig/v1beta2/safetyrule.yaml create mode 100644 examples-generated/route53recoveryreadiness/v1beta2/resourceset.yaml create mode 100644 examples-generated/rum/v1beta2/appmonitor.yaml create mode 100644 examples-generated/s3/v1beta2/bucket.yaml create mode 100644 examples-generated/s3/v1beta2/bucketacl.yaml create mode 100644 examples-generated/s3/v1beta2/bucketanalyticsconfiguration.yaml create mode 100644 examples-generated/s3/v1beta2/bucketintelligenttieringconfiguration.yaml create mode 100644 examples-generated/s3/v1beta2/bucketinventory.yaml create mode 100644 examples-generated/s3/v1beta2/bucketlifecycleconfiguration.yaml create mode 100644 examples-generated/s3/v1beta2/bucketlogging.yaml create mode 100644 examples-generated/s3/v1beta2/bucketmetric.yaml create mode 100644 examples-generated/s3/v1beta2/bucketobjectlockconfiguration.yaml create mode 100644 examples-generated/s3/v1beta2/bucketownershipcontrols.yaml create mode 100644 examples-generated/s3/v1beta2/bucketreplicationconfiguration.yaml create mode 100644 examples-generated/s3/v1beta2/bucketserversideencryptionconfiguration.yaml create mode 100644 examples-generated/s3/v1beta2/bucketversioning.yaml create mode 100644 examples-generated/s3/v1beta2/bucketwebsiteconfiguration.yaml create mode 100644 examples-generated/s3/v1beta2/object.yaml create mode 100644 examples-generated/s3control/v1beta2/accesspoint.yaml create mode 100644 examples-generated/s3control/v1beta2/multiregionaccesspoint.yaml create mode 100644 examples-generated/s3control/v1beta2/multiregionaccesspointpolicy.yaml create mode 100644 examples-generated/s3control/v1beta2/objectlambdaaccesspoint.yaml create mode 100644 examples-generated/s3control/v1beta2/storagelensconfiguration.yaml create mode 100644 examples-generated/sagemaker/v1beta2/app.yaml create mode 100644 examples-generated/sagemaker/v1beta2/appimageconfig.yaml create mode 100644 examples-generated/sagemaker/v1beta2/coderepository.yaml create mode 100644 examples-generated/sagemaker/v1beta2/device.yaml create mode 100644 examples-generated/sagemaker/v1beta2/devicefleet.yaml create mode 100644 examples-generated/sagemaker/v1beta2/domain.yaml create mode 100644 examples-generated/sagemaker/v1beta2/endpoint.yaml create mode 100644 examples-generated/sagemaker/v1beta2/endpointconfiguration.yaml create mode 100644 examples-generated/sagemaker/v1beta2/featuregroup.yaml create mode 100644 examples-generated/sagemaker/v1beta2/model.yaml create mode 100644 examples-generated/sagemaker/v1beta2/notebookinstance.yaml create mode 100644 examples-generated/sagemaker/v1beta2/space.yaml create mode 100644 examples-generated/sagemaker/v1beta2/userprofile.yaml create mode 100644 examples-generated/sagemaker/v1beta2/workforce.yaml create mode 100644 examples-generated/sagemaker/v1beta2/workteam.yaml create mode 100644 examples-generated/scheduler/v1beta2/schedule.yaml create mode 100644 examples-generated/secretsmanager/v1beta2/secretrotation.yaml create mode 100644 examples-generated/securityhub/v1beta2/insight.yaml create mode 100644 examples-generated/servicecatalog/v1beta2/product.yaml create mode 100644 examples-generated/servicecatalog/v1beta2/serviceaction.yaml create mode 100644 examples-generated/servicediscovery/v1beta2/service.yaml create mode 100644 examples-generated/ses/v1beta2/configurationset.yaml create mode 100644 examples-generated/ses/v1beta2/eventdestination.yaml create mode 100644 examples-generated/sesv2/v1beta2/configurationset.yaml create mode 100644 examples-generated/sesv2/v1beta2/configurationseteventdestination.yaml create mode 100644 examples-generated/sesv2/v1beta2/emailidentity.yaml create mode 100644 examples-generated/sfn/v1beta2/statemachine.yaml create mode 100644 examples-generated/signer/v1beta2/signingjob.yaml create mode 100644 examples-generated/signer/v1beta2/signingprofile.yaml create mode 100644 examples-generated/ssm/v1beta2/association.yaml create mode 100644 examples-generated/ssm/v1beta2/maintenancewindowtask.yaml create mode 100644 examples-generated/ssm/v1beta2/resourcedatasync.yaml create mode 100644 examples-generated/ssoadmin/v1beta2/customermanagedpolicyattachment.yaml create mode 100644 examples-generated/ssoadmin/v1beta2/permissionsboundaryattachment.yaml create mode 100644 examples-generated/timestreamwrite/v1beta2/table.yaml create mode 100644 examples-generated/transcribe/v1beta2/languagemodel.yaml create mode 100644 examples-generated/transfer/v1beta2/server.yaml create mode 100644 examples-generated/transfer/v1beta2/user.yaml create mode 100644 examples-generated/transfer/v1beta2/workflow.yaml create mode 100644 examples-generated/waf/v1beta2/bytematchset.yaml create mode 100644 examples-generated/waf/v1beta2/regexmatchset.yaml create mode 100644 examples-generated/waf/v1beta2/sizeconstraintset.yaml create mode 100644 examples-generated/waf/v1beta2/sqlinjectionmatchset.yaml create mode 100644 examples-generated/waf/v1beta2/webacl.yaml create mode 100644 examples-generated/waf/v1beta2/xssmatchset.yaml create mode 100644 examples-generated/wafregional/v1beta2/bytematchset.yaml create mode 100644 examples-generated/wafregional/v1beta2/regexmatchset.yaml create mode 100644 examples-generated/wafregional/v1beta2/sizeconstraintset.yaml create mode 100644 examples-generated/wafregional/v1beta2/sqlinjectionmatchset.yaml create mode 100644 examples-generated/wafregional/v1beta2/webacl.yaml create mode 100644 examples-generated/wafregional/v1beta2/xssmatchset.yaml create mode 100644 examples-generated/workspaces/v1beta2/directory.yaml create mode 100644 examples-generated/xray/v1beta2/group.yaml diff --git a/apis/accessanalyzer/v1beta1/zz_generated.conversion_hubs.go b/apis/accessanalyzer/v1beta1/zz_generated.conversion_hubs.go index 9a24c6b4cb..99ddba00da 100755 --- a/apis/accessanalyzer/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/accessanalyzer/v1beta1/zz_generated.conversion_hubs.go @@ -6,8 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Analyzer) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ArchiveRule) Hub() {} diff --git a/apis/accessanalyzer/v1beta1/zz_generated.conversion_spokes.go b/apis/accessanalyzer/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..5981d676df --- /dev/null +++ b/apis/accessanalyzer/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Analyzer to the hub type. +func (tr *Analyzer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Analyzer type. +func (tr *Analyzer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/accessanalyzer/v1beta2/zz_analyzer_terraformed.go b/apis/accessanalyzer/v1beta2/zz_analyzer_terraformed.go new file mode 100755 index 0000000000..5842e57433 --- /dev/null +++ b/apis/accessanalyzer/v1beta2/zz_analyzer_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Analyzer +func (mg *Analyzer) GetTerraformResourceType() string { + return "aws_accessanalyzer_analyzer" +} + +// GetConnectionDetailsMapping for this Analyzer +func (tr *Analyzer) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Analyzer +func (tr *Analyzer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Analyzer +func (tr *Analyzer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Analyzer +func (tr *Analyzer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Analyzer +func (tr *Analyzer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Analyzer +func (tr *Analyzer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Analyzer +func (tr *Analyzer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Analyzer +func (tr *Analyzer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Analyzer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Analyzer) LateInitialize(attrs []byte) (bool, error) { + params := &AnalyzerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Analyzer) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/accessanalyzer/v1beta2/zz_analyzer_types.go b/apis/accessanalyzer/v1beta2/zz_analyzer_types.go new file mode 100755 index 0000000000..c3880a1b56 --- /dev/null +++ b/apis/accessanalyzer/v1beta2/zz_analyzer_types.go @@ -0,0 +1,169 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AnalyzerInitParameters struct { + + // A block that specifies the configuration of the analyzer. Documented below + Configuration *ConfigurationInitParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Type of Analyzer. Valid values are ACCOUNT, ORGANIZATION, ACCOUNT_UNUSED_ACCESS , ORGANIZATION_UNUSED_ACCESS. Defaults to ACCOUNT. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type AnalyzerObservation struct { + + // ARN of the Analyzer. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A block that specifies the configuration of the analyzer. Documented below + Configuration *ConfigurationObservation `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // Analyzer name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Type of Analyzer. Valid values are ACCOUNT, ORGANIZATION, ACCOUNT_UNUSED_ACCESS , ORGANIZATION_UNUSED_ACCESS. Defaults to ACCOUNT. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type AnalyzerParameters struct { + + // A block that specifies the configuration of the analyzer. Documented below + // +kubebuilder:validation:Optional + Configuration *ConfigurationParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Type of Analyzer. Valid values are ACCOUNT, ORGANIZATION, ACCOUNT_UNUSED_ACCESS , ORGANIZATION_UNUSED_ACCESS. Defaults to ACCOUNT. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ConfigurationInitParameters struct { + + // A block that specifies the configuration of an unused access analyzer for an AWS organization or account. Documented below + UnusedAccess *UnusedAccessInitParameters `json:"unusedAccess,omitempty" tf:"unused_access,omitempty"` +} + +type ConfigurationObservation struct { + + // A block that specifies the configuration of an unused access analyzer for an AWS organization or account. Documented below + UnusedAccess *UnusedAccessObservation `json:"unusedAccess,omitempty" tf:"unused_access,omitempty"` +} + +type ConfigurationParameters struct { + + // A block that specifies the configuration of an unused access analyzer for an AWS organization or account. Documented below + // +kubebuilder:validation:Optional + UnusedAccess *UnusedAccessParameters `json:"unusedAccess,omitempty" tf:"unused_access,omitempty"` +} + +type UnusedAccessInitParameters struct { + + // The specified access age in days for which to generate findings for unused access. + UnusedAccessAge *float64 `json:"unusedAccessAge,omitempty" tf:"unused_access_age,omitempty"` +} + +type UnusedAccessObservation struct { + + // The specified access age in days for which to generate findings for unused access. + UnusedAccessAge *float64 `json:"unusedAccessAge,omitempty" tf:"unused_access_age,omitempty"` +} + +type UnusedAccessParameters struct { + + // The specified access age in days for which to generate findings for unused access. + // +kubebuilder:validation:Optional + UnusedAccessAge *float64 `json:"unusedAccessAge,omitempty" tf:"unused_access_age,omitempty"` +} + +// AnalyzerSpec defines the desired state of Analyzer +type AnalyzerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AnalyzerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AnalyzerInitParameters `json:"initProvider,omitempty"` +} + +// AnalyzerStatus defines the observed state of Analyzer. +type AnalyzerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AnalyzerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Analyzer is the Schema for the Analyzers API. Manages an Access Analyzer Analyzer +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Analyzer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec AnalyzerSpec `json:"spec"` + Status AnalyzerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AnalyzerList contains a list of Analyzers +type AnalyzerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Analyzer `json:"items"` +} + +// Repository type metadata. +var ( + Analyzer_Kind = "Analyzer" + Analyzer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Analyzer_Kind}.String() + Analyzer_KindAPIVersion = Analyzer_Kind + "." + CRDGroupVersion.String() + Analyzer_GroupVersionKind = CRDGroupVersion.WithKind(Analyzer_Kind) +) + +func init() { + SchemeBuilder.Register(&Analyzer{}, &AnalyzerList{}) +} diff --git a/apis/accessanalyzer/v1beta2/zz_generated.conversion_hubs.go b/apis/accessanalyzer/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..4365cfb23f --- /dev/null +++ b/apis/accessanalyzer/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Analyzer) Hub() {} diff --git a/apis/accessanalyzer/v1beta2/zz_generated.deepcopy.go b/apis/accessanalyzer/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..222d804297 --- /dev/null +++ b/apis/accessanalyzer/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,381 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Analyzer) DeepCopyInto(out *Analyzer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Analyzer. +func (in *Analyzer) DeepCopy() *Analyzer { + if in == nil { + return nil + } + out := new(Analyzer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Analyzer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalyzerInitParameters) DeepCopyInto(out *AnalyzerInitParameters) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalyzerInitParameters. +func (in *AnalyzerInitParameters) DeepCopy() *AnalyzerInitParameters { + if in == nil { + return nil + } + out := new(AnalyzerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalyzerList) DeepCopyInto(out *AnalyzerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Analyzer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalyzerList. +func (in *AnalyzerList) DeepCopy() *AnalyzerList { + if in == nil { + return nil + } + out := new(AnalyzerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AnalyzerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalyzerObservation) DeepCopyInto(out *AnalyzerObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalyzerObservation. +func (in *AnalyzerObservation) DeepCopy() *AnalyzerObservation { + if in == nil { + return nil + } + out := new(AnalyzerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalyzerParameters) DeepCopyInto(out *AnalyzerParameters) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalyzerParameters. +func (in *AnalyzerParameters) DeepCopy() *AnalyzerParameters { + if in == nil { + return nil + } + out := new(AnalyzerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalyzerSpec) DeepCopyInto(out *AnalyzerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalyzerSpec. +func (in *AnalyzerSpec) DeepCopy() *AnalyzerSpec { + if in == nil { + return nil + } + out := new(AnalyzerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalyzerStatus) DeepCopyInto(out *AnalyzerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalyzerStatus. +func (in *AnalyzerStatus) DeepCopy() *AnalyzerStatus { + if in == nil { + return nil + } + out := new(AnalyzerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationInitParameters) DeepCopyInto(out *ConfigurationInitParameters) { + *out = *in + if in.UnusedAccess != nil { + in, out := &in.UnusedAccess, &out.UnusedAccess + *out = new(UnusedAccessInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationInitParameters. +func (in *ConfigurationInitParameters) DeepCopy() *ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationObservation) DeepCopyInto(out *ConfigurationObservation) { + *out = *in + if in.UnusedAccess != nil { + in, out := &in.UnusedAccess, &out.UnusedAccess + *out = new(UnusedAccessObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationObservation. +func (in *ConfigurationObservation) DeepCopy() *ConfigurationObservation { + if in == nil { + return nil + } + out := new(ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationParameters) DeepCopyInto(out *ConfigurationParameters) { + *out = *in + if in.UnusedAccess != nil { + in, out := &in.UnusedAccess, &out.UnusedAccess + *out = new(UnusedAccessParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationParameters. +func (in *ConfigurationParameters) DeepCopy() *ConfigurationParameters { + if in == nil { + return nil + } + out := new(ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnusedAccessInitParameters) DeepCopyInto(out *UnusedAccessInitParameters) { + *out = *in + if in.UnusedAccessAge != nil { + in, out := &in.UnusedAccessAge, &out.UnusedAccessAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnusedAccessInitParameters. +func (in *UnusedAccessInitParameters) DeepCopy() *UnusedAccessInitParameters { + if in == nil { + return nil + } + out := new(UnusedAccessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnusedAccessObservation) DeepCopyInto(out *UnusedAccessObservation) { + *out = *in + if in.UnusedAccessAge != nil { + in, out := &in.UnusedAccessAge, &out.UnusedAccessAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnusedAccessObservation. +func (in *UnusedAccessObservation) DeepCopy() *UnusedAccessObservation { + if in == nil { + return nil + } + out := new(UnusedAccessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnusedAccessParameters) DeepCopyInto(out *UnusedAccessParameters) { + *out = *in + if in.UnusedAccessAge != nil { + in, out := &in.UnusedAccessAge, &out.UnusedAccessAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnusedAccessParameters. +func (in *UnusedAccessParameters) DeepCopy() *UnusedAccessParameters { + if in == nil { + return nil + } + out := new(UnusedAccessParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/accessanalyzer/v1beta2/zz_generated.managed.go b/apis/accessanalyzer/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..388dc07647 --- /dev/null +++ b/apis/accessanalyzer/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Analyzer. +func (mg *Analyzer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Analyzer. +func (mg *Analyzer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Analyzer. +func (mg *Analyzer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Analyzer. +func (mg *Analyzer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Analyzer. +func (mg *Analyzer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Analyzer. +func (mg *Analyzer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Analyzer. +func (mg *Analyzer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Analyzer. +func (mg *Analyzer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Analyzer. +func (mg *Analyzer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Analyzer. +func (mg *Analyzer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Analyzer. +func (mg *Analyzer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Analyzer. +func (mg *Analyzer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/accessanalyzer/v1beta2/zz_generated.managedlist.go b/apis/accessanalyzer/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..e14f9fad68 --- /dev/null +++ b/apis/accessanalyzer/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AnalyzerList. +func (l *AnalyzerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/accessanalyzer/v1beta2/zz_groupversion_info.go b/apis/accessanalyzer/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..71a521208e --- /dev/null +++ b/apis/accessanalyzer/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=accessanalyzer.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "accessanalyzer.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/acm/v1beta1/zz_certificatevalidation_types.go b/apis/acm/v1beta1/zz_certificatevalidation_types.go index 59ca1af27e..762089e116 100755 --- a/apis/acm/v1beta1/zz_certificatevalidation_types.go +++ b/apis/acm/v1beta1/zz_certificatevalidation_types.go @@ -16,7 +16,7 @@ import ( type CertificateValidationInitParameters struct { // ARN of the certificate that is being validated. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta1.Certificate + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta2.Certificate CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` // Reference to a Certificate in acm to populate certificateArn. @@ -48,7 +48,7 @@ type CertificateValidationObservation struct { type CertificateValidationParameters struct { // ARN of the certificate that is being validated. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta1.Certificate + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta2.Certificate // +kubebuilder:validation:Optional CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` diff --git a/apis/acm/v1beta1/zz_generated.conversion_hubs.go b/apis/acm/v1beta1/zz_generated.conversion_hubs.go index 269bb14528..587794368f 100755 --- a/apis/acm/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/acm/v1beta1/zz_generated.conversion_hubs.go @@ -6,8 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Certificate) Hub() {} - // Hub marks this type as a conversion hub. func (tr *CertificateValidation) Hub() {} diff --git a/apis/acm/v1beta1/zz_generated.conversion_spokes.go b/apis/acm/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..2a883a567c --- /dev/null +++ b/apis/acm/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Certificate to the hub type. +func (tr *Certificate) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Certificate type. +func (tr *Certificate) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/acm/v1beta1/zz_generated.resolvers.go b/apis/acm/v1beta1/zz_generated.resolvers.go index f98b264b7c..37fe17bd6b 100644 --- a/apis/acm/v1beta1/zz_generated.resolvers.go +++ b/apis/acm/v1beta1/zz_generated.resolvers.go @@ -16,7 +16,7 @@ import ( client "sigs.k8s.io/controller-runtime/pkg/client" ) -func (mg *Certificate) ResolveReferences( // ResolveReferences of this Certificate. +func (mg *CertificateValidation) ResolveReferences( // ResolveReferences of this CertificateValidation. ctx context.Context, c client.Reader) error { var m xpresource.Managed var l xpresource.ManagedList @@ -25,57 +25,7 @@ func (mg *Certificate) ResolveReferences( // ResolveReferences of this Certifica var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta1", "CertificateAuthority", "CertificateAuthorityList") - if err != nil { - return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") - } - - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CertificateAuthorityArn), - Extract: reference.ExternalName(), - Reference: mg.Spec.ForProvider.CertificateAuthorityArnRef, - Selector: mg.Spec.ForProvider.CertificateAuthorityArnSelector, - To: reference.To{List: l, Managed: m}, - }) - } - if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.CertificateAuthorityArn") - } - mg.Spec.ForProvider.CertificateAuthorityArn = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.ForProvider.CertificateAuthorityArnRef = rsp.ResolvedReference - { - m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta1", "CertificateAuthority", "CertificateAuthorityList") - if err != nil { - return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") - } - - rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CertificateAuthorityArn), - Extract: reference.ExternalName(), - Reference: mg.Spec.InitProvider.CertificateAuthorityArnRef, - Selector: mg.Spec.InitProvider.CertificateAuthorityArnSelector, - To: reference.To{List: l, Managed: m}, - }) - } - if err != nil { - return errors.Wrap(err, "mg.Spec.InitProvider.CertificateAuthorityArn") - } - mg.Spec.InitProvider.CertificateAuthorityArn = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.InitProvider.CertificateAuthorityArnRef = rsp.ResolvedReference - - return nil -} - -// ResolveReferences of this CertificateValidation. -func (mg *CertificateValidation) ResolveReferences(ctx context.Context, c client.Reader) error { - var m xpresource.Managed - var l xpresource.ManagedList - r := reference.NewAPIResolver(c, mg) - - var rsp reference.ResolutionResponse - var err error - { - m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta1", "Certificate", "CertificateList") + m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta2", "Certificate", "CertificateList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -94,7 +44,7 @@ func (mg *CertificateValidation) ResolveReferences(ctx context.Context, c client mg.Spec.ForProvider.CertificateArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.CertificateArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta1", "Certificate", "CertificateList") + m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta2", "Certificate", "CertificateList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/acm/v1beta2/zz_certificate_terraformed.go b/apis/acm/v1beta2/zz_certificate_terraformed.go new file mode 100755 index 0000000000..9870addbdd --- /dev/null +++ b/apis/acm/v1beta2/zz_certificate_terraformed.go @@ -0,0 +1,134 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Certificate +func (mg *Certificate) GetTerraformResourceType() string { + return "aws_acm_certificate" +} + +// GetConnectionDetailsMapping for this Certificate +func (tr *Certificate) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"private_key": "privateKeySecretRef"} +} + +// GetObservation of this Certificate +func (tr *Certificate) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Certificate +func (tr *Certificate) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Certificate +func (tr *Certificate) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Certificate +func (tr *Certificate) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Certificate +func (tr *Certificate) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Certificate +func (tr *Certificate) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Certificate +func (tr *Certificate) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Certificate using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Certificate) LateInitialize(attrs []byte) (bool, error) { + params := &CertificateParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("CertificateBody")) + opts = append(opts, resource.WithNameFilter("KeyAlgorithm")) + opts = append(opts, resource.WithNameFilter("Options")) + opts = append(opts, resource.WithNameFilter("SubjectAlternativeNames")) + opts = append(opts, resource.WithNameFilter("ValidationMethod")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Certificate) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/acm/v1beta2/zz_certificate_types.go b/apis/acm/v1beta2/zz_certificate_types.go new file mode 100755 index 0000000000..6c2773aa05 --- /dev/null +++ b/apis/acm/v1beta2/zz_certificate_types.go @@ -0,0 +1,348 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CertificateInitParameters struct { + + // ARN of an ACM PCA + CertificateAuthorityArn *string `json:"certificateAuthorityArn,omitempty" tf:"certificate_authority_arn,omitempty"` + + // Certificate's PEM-formatted public key + CertificateBody *string `json:"certificateBody,omitempty" tf:"certificate_body,omitempty"` + + // Certificate's PEM-formatted chain + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + // Domain name for which the certificate should be issued + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Amount of time to start automatic renewal process before expiration. + // Has no effect if less than 60 days. + // Represented by either + // a subset of RFC 3339 duration supporting years, months, and days (e.g., P90D), + // or a string such as 2160h. + EarlyRenewalDuration *string `json:"earlyRenewalDuration,omitempty" tf:"early_renewal_duration,omitempty"` + + // Specifies the algorithm of the public and private key pair that your Amazon issued certificate uses to encrypt data. See ACM Certificate characteristics for more details. + KeyAlgorithm *string `json:"keyAlgorithm,omitempty" tf:"key_algorithm,omitempty"` + + // Configuration block used to set certificate options. Detailed below. + Options *OptionsInitParameters `json:"options,omitempty" tf:"options,omitempty"` + + // Certificate's PEM-formatted private key + PrivateKeySecretRef *v1.SecretKeySelector `json:"privateKeySecretRef,omitempty" tf:"-"` + + // Set of domains that should be SANs in the issued certificate. + // +listType=set + SubjectAlternativeNames []*string `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Which method to use for validation. DNS or EMAIL are valid. + ValidationMethod *string `json:"validationMethod,omitempty" tf:"validation_method,omitempty"` + + // Configuration block used to specify information about the initial validation of each domain name. Detailed below. + ValidationOption []ValidationOptionInitParameters `json:"validationOption,omitempty" tf:"validation_option,omitempty"` +} + +type CertificateObservation struct { + + // ARN of the certificate + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // ARN of an ACM PCA + CertificateAuthorityArn *string `json:"certificateAuthorityArn,omitempty" tf:"certificate_authority_arn,omitempty"` + + // Certificate's PEM-formatted public key + CertificateBody *string `json:"certificateBody,omitempty" tf:"certificate_body,omitempty"` + + // Certificate's PEM-formatted chain + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + // Domain name for which the certificate should be issued + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Set of domain validation objects which can be used to complete certificate validation. + // Can have more than one element, e.g., if SANs are defined. + // Only set if DNS-validation was used. + DomainValidationOptions []DomainValidationOptionsObservation `json:"domainValidationOptions,omitempty" tf:"domain_validation_options,omitempty"` + + // Amount of time to start automatic renewal process before expiration. + // Has no effect if less than 60 days. + // Represented by either + // a subset of RFC 3339 duration supporting years, months, and days (e.g., P90D), + // or a string such as 2160h. + EarlyRenewalDuration *string `json:"earlyRenewalDuration,omitempty" tf:"early_renewal_duration,omitempty"` + + // ARN of the certificate + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the algorithm of the public and private key pair that your Amazon issued certificate uses to encrypt data. See ACM Certificate characteristics for more details. + KeyAlgorithm *string `json:"keyAlgorithm,omitempty" tf:"key_algorithm,omitempty"` + + // Expiration date and time of the certificate. + NotAfter *string `json:"notAfter,omitempty" tf:"not_after,omitempty"` + + // Start of the validity period of the certificate. + NotBefore *string `json:"notBefore,omitempty" tf:"not_before,omitempty"` + + // Configuration block used to set certificate options. Detailed below. + Options *OptionsObservation `json:"options,omitempty" tf:"options,omitempty"` + + // true if a Private certificate eligible for managed renewal is within the early_renewal_duration period. + PendingRenewal *bool `json:"pendingRenewal,omitempty" tf:"pending_renewal,omitempty"` + + // Whether the certificate is eligible for managed renewal. + RenewalEligibility *string `json:"renewalEligibility,omitempty" tf:"renewal_eligibility,omitempty"` + + // Contains information about the status of ACM's managed renewal for the certificate. + RenewalSummary []RenewalSummaryObservation `json:"renewalSummary,omitempty" tf:"renewal_summary,omitempty"` + + // Status of the certificate. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Set of domains that should be SANs in the issued certificate. + // +listType=set + SubjectAlternativeNames []*string `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Source of the certificate. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // List of addresses that received a validation email. Only set if EMAIL validation was used. + ValidationEmails []*string `json:"validationEmails,omitempty" tf:"validation_emails,omitempty"` + + // Which method to use for validation. DNS or EMAIL are valid. + ValidationMethod *string `json:"validationMethod,omitempty" tf:"validation_method,omitempty"` + + // Configuration block used to specify information about the initial validation of each domain name. Detailed below. + ValidationOption []ValidationOptionObservation `json:"validationOption,omitempty" tf:"validation_option,omitempty"` +} + +type CertificateParameters struct { + + // ARN of an ACM PCA + // +kubebuilder:validation:Optional + CertificateAuthorityArn *string `json:"certificateAuthorityArn,omitempty" tf:"certificate_authority_arn,omitempty"` + + // Certificate's PEM-formatted public key + // +kubebuilder:validation:Optional + CertificateBody *string `json:"certificateBody,omitempty" tf:"certificate_body,omitempty"` + + // Certificate's PEM-formatted chain + // +kubebuilder:validation:Optional + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + // Domain name for which the certificate should be issued + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Amount of time to start automatic renewal process before expiration. + // Has no effect if less than 60 days. + // Represented by either + // a subset of RFC 3339 duration supporting years, months, and days (e.g., P90D), + // or a string such as 2160h. + // +kubebuilder:validation:Optional + EarlyRenewalDuration *string `json:"earlyRenewalDuration,omitempty" tf:"early_renewal_duration,omitempty"` + + // Specifies the algorithm of the public and private key pair that your Amazon issued certificate uses to encrypt data. See ACM Certificate characteristics for more details. + // +kubebuilder:validation:Optional + KeyAlgorithm *string `json:"keyAlgorithm,omitempty" tf:"key_algorithm,omitempty"` + + // Configuration block used to set certificate options. Detailed below. + // +kubebuilder:validation:Optional + Options *OptionsParameters `json:"options,omitempty" tf:"options,omitempty"` + + // Certificate's PEM-formatted private key + // +kubebuilder:validation:Optional + PrivateKeySecretRef *v1.SecretKeySelector `json:"privateKeySecretRef,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Set of domains that should be SANs in the issued certificate. + // +kubebuilder:validation:Optional + // +listType=set + SubjectAlternativeNames []*string `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Which method to use for validation. DNS or EMAIL are valid. + // +kubebuilder:validation:Optional + ValidationMethod *string `json:"validationMethod,omitempty" tf:"validation_method,omitempty"` + + // Configuration block used to specify information about the initial validation of each domain name. Detailed below. + // +kubebuilder:validation:Optional + ValidationOption []ValidationOptionParameters `json:"validationOption,omitempty" tf:"validation_option,omitempty"` +} + +type DomainValidationOptionsInitParameters struct { +} + +type DomainValidationOptionsObservation struct { + + // Fully qualified domain name (FQDN) in the certificate. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The name of the DNS record to create to validate the certificate + ResourceRecordName *string `json:"resourceRecordName,omitempty" tf:"resource_record_name,omitempty"` + + // The type of DNS record to create + ResourceRecordType *string `json:"resourceRecordType,omitempty" tf:"resource_record_type,omitempty"` + + // The value the DNS record needs to have + ResourceRecordValue *string `json:"resourceRecordValue,omitempty" tf:"resource_record_value,omitempty"` +} + +type DomainValidationOptionsParameters struct { +} + +type OptionsInitParameters struct { + + // Whether certificate details should be added to a certificate transparency log. Valid values are ENABLED or DISABLED. See https://docs.aws.amazon.com/acm/latest/userguide/acm-concepts.html#concept-transparency for more details. + CertificateTransparencyLoggingPreference *string `json:"certificateTransparencyLoggingPreference,omitempty" tf:"certificate_transparency_logging_preference,omitempty"` +} + +type OptionsObservation struct { + + // Whether certificate details should be added to a certificate transparency log. Valid values are ENABLED or DISABLED. See https://docs.aws.amazon.com/acm/latest/userguide/acm-concepts.html#concept-transparency for more details. + CertificateTransparencyLoggingPreference *string `json:"certificateTransparencyLoggingPreference,omitempty" tf:"certificate_transparency_logging_preference,omitempty"` +} + +type OptionsParameters struct { + + // Whether certificate details should be added to a certificate transparency log. Valid values are ENABLED or DISABLED. See https://docs.aws.amazon.com/acm/latest/userguide/acm-concepts.html#concept-transparency for more details. + // +kubebuilder:validation:Optional + CertificateTransparencyLoggingPreference *string `json:"certificateTransparencyLoggingPreference,omitempty" tf:"certificate_transparency_logging_preference,omitempty"` +} + +type RenewalSummaryInitParameters struct { +} + +type RenewalSummaryObservation struct { + + // The status of ACM's managed renewal of the certificate + RenewalStatus *string `json:"renewalStatus,omitempty" tf:"renewal_status,omitempty"` + + // The reason that a renewal request was unsuccessful or is pending + RenewalStatusReason *string `json:"renewalStatusReason,omitempty" tf:"renewal_status_reason,omitempty"` + + UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` +} + +type RenewalSummaryParameters struct { +} + +type ValidationOptionInitParameters struct { + + // Fully qualified domain name (FQDN) in the certificate. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Domain name that you want ACM to use to send you validation emails. This domain name is the suffix of the email addresses that you want ACM to use. This must be the same as the domain_name value or a superdomain of the domain_name value. For example, if you request a certificate for "testing.example.com", you can specify "example.com" for this value. + ValidationDomain *string `json:"validationDomain,omitempty" tf:"validation_domain,omitempty"` +} + +type ValidationOptionObservation struct { + + // Fully qualified domain name (FQDN) in the certificate. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Domain name that you want ACM to use to send you validation emails. This domain name is the suffix of the email addresses that you want ACM to use. This must be the same as the domain_name value or a superdomain of the domain_name value. For example, if you request a certificate for "testing.example.com", you can specify "example.com" for this value. + ValidationDomain *string `json:"validationDomain,omitempty" tf:"validation_domain,omitempty"` +} + +type ValidationOptionParameters struct { + + // Fully qualified domain name (FQDN) in the certificate. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName" tf:"domain_name,omitempty"` + + // Domain name that you want ACM to use to send you validation emails. This domain name is the suffix of the email addresses that you want ACM to use. This must be the same as the domain_name value or a superdomain of the domain_name value. For example, if you request a certificate for "testing.example.com", you can specify "example.com" for this value. + // +kubebuilder:validation:Optional + ValidationDomain *string `json:"validationDomain" tf:"validation_domain,omitempty"` +} + +// CertificateSpec defines the desired state of Certificate +type CertificateSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CertificateParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CertificateInitParameters `json:"initProvider,omitempty"` +} + +// CertificateStatus defines the observed state of Certificate. +type CertificateStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CertificateObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Certificate is the Schema for the Certificates API. Requests and manages a certificate from Amazon Certificate Manager (ACM). +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Certificate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec CertificateSpec `json:"spec"` + Status CertificateStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CertificateList contains a list of Certificates +type CertificateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Certificate `json:"items"` +} + +// Repository type metadata. +var ( + Certificate_Kind = "Certificate" + Certificate_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Certificate_Kind}.String() + Certificate_KindAPIVersion = Certificate_Kind + "." + CRDGroupVersion.String() + Certificate_GroupVersionKind = CRDGroupVersion.WithKind(Certificate_Kind) +) + +func init() { + SchemeBuilder.Register(&Certificate{}, &CertificateList{}) +} diff --git a/apis/acm/v1beta2/zz_generated.conversion_hubs.go b/apis/acm/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..26f6b60eeb --- /dev/null +++ b/apis/acm/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Certificate) Hub() {} diff --git a/apis/acm/v1beta2/zz_generated.deepcopy.go b/apis/acm/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..06ee9f8d60 --- /dev/null +++ b/apis/acm/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,731 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Certificate) DeepCopyInto(out *Certificate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Certificate. +func (in *Certificate) DeepCopy() *Certificate { + if in == nil { + return nil + } + out := new(Certificate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Certificate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateInitParameters) DeepCopyInto(out *CertificateInitParameters) { + *out = *in + if in.CertificateAuthorityArn != nil { + in, out := &in.CertificateAuthorityArn, &out.CertificateAuthorityArn + *out = new(string) + **out = **in + } + if in.CertificateBody != nil { + in, out := &in.CertificateBody, &out.CertificateBody + *out = new(string) + **out = **in + } + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.EarlyRenewalDuration != nil { + in, out := &in.EarlyRenewalDuration, &out.EarlyRenewalDuration + *out = new(string) + **out = **in + } + if in.KeyAlgorithm != nil { + in, out := &in.KeyAlgorithm, &out.KeyAlgorithm + *out = new(string) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = new(OptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PrivateKeySecretRef != nil { + in, out := &in.PrivateKeySecretRef, &out.PrivateKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ValidationMethod != nil { + in, out := &in.ValidationMethod, &out.ValidationMethod + *out = new(string) + **out = **in + } + if in.ValidationOption != nil { + in, out := &in.ValidationOption, &out.ValidationOption + *out = make([]ValidationOptionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateInitParameters. +func (in *CertificateInitParameters) DeepCopy() *CertificateInitParameters { + if in == nil { + return nil + } + out := new(CertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateList) DeepCopyInto(out *CertificateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Certificate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateList. +func (in *CertificateList) DeepCopy() *CertificateList { + if in == nil { + return nil + } + out := new(CertificateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateObservation) DeepCopyInto(out *CertificateObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CertificateAuthorityArn != nil { + in, out := &in.CertificateAuthorityArn, &out.CertificateAuthorityArn + *out = new(string) + **out = **in + } + if in.CertificateBody != nil { + in, out := &in.CertificateBody, &out.CertificateBody + *out = new(string) + **out = **in + } + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainValidationOptions != nil { + in, out := &in.DomainValidationOptions, &out.DomainValidationOptions + *out = make([]DomainValidationOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EarlyRenewalDuration != nil { + in, out := &in.EarlyRenewalDuration, &out.EarlyRenewalDuration + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KeyAlgorithm != nil { + in, out := &in.KeyAlgorithm, &out.KeyAlgorithm + *out = new(string) + **out = **in + } + if in.NotAfter != nil { + in, out := &in.NotAfter, &out.NotAfter + *out = new(string) + **out = **in + } + if in.NotBefore != nil { + in, out := &in.NotBefore, &out.NotBefore + *out = new(string) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = new(OptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.PendingRenewal != nil { + in, out := &in.PendingRenewal, &out.PendingRenewal + *out = new(bool) + **out = **in + } + if in.RenewalEligibility != nil { + in, out := &in.RenewalEligibility, &out.RenewalEligibility + *out = new(string) + **out = **in + } + if in.RenewalSummary != nil { + in, out := &in.RenewalSummary, &out.RenewalSummary + *out = make([]RenewalSummaryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.ValidationEmails != nil { + in, out := &in.ValidationEmails, &out.ValidationEmails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ValidationMethod != nil { + in, out := &in.ValidationMethod, &out.ValidationMethod + *out = new(string) + **out = **in + } + if in.ValidationOption != nil { + in, out := &in.ValidationOption, &out.ValidationOption + *out = make([]ValidationOptionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateObservation. +func (in *CertificateObservation) DeepCopy() *CertificateObservation { + if in == nil { + return nil + } + out := new(CertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateParameters) DeepCopyInto(out *CertificateParameters) { + *out = *in + if in.CertificateAuthorityArn != nil { + in, out := &in.CertificateAuthorityArn, &out.CertificateAuthorityArn + *out = new(string) + **out = **in + } + if in.CertificateBody != nil { + in, out := &in.CertificateBody, &out.CertificateBody + *out = new(string) + **out = **in + } + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.EarlyRenewalDuration != nil { + in, out := &in.EarlyRenewalDuration, &out.EarlyRenewalDuration + *out = new(string) + **out = **in + } + if in.KeyAlgorithm != nil { + in, out := &in.KeyAlgorithm, &out.KeyAlgorithm + *out = new(string) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = new(OptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.PrivateKeySecretRef != nil { + in, out := &in.PrivateKeySecretRef, &out.PrivateKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ValidationMethod != nil { + in, out := &in.ValidationMethod, &out.ValidationMethod + *out = new(string) + **out = **in + } + if in.ValidationOption != nil { + in, out := &in.ValidationOption, &out.ValidationOption + *out = make([]ValidationOptionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateParameters. +func (in *CertificateParameters) DeepCopy() *CertificateParameters { + if in == nil { + return nil + } + out := new(CertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSpec) DeepCopyInto(out *CertificateSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSpec. +func (in *CertificateSpec) DeepCopy() *CertificateSpec { + if in == nil { + return nil + } + out := new(CertificateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateStatus) DeepCopyInto(out *CertificateStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateStatus. +func (in *CertificateStatus) DeepCopy() *CertificateStatus { + if in == nil { + return nil + } + out := new(CertificateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainValidationOptionsInitParameters) DeepCopyInto(out *DomainValidationOptionsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainValidationOptionsInitParameters. +func (in *DomainValidationOptionsInitParameters) DeepCopy() *DomainValidationOptionsInitParameters { + if in == nil { + return nil + } + out := new(DomainValidationOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainValidationOptionsObservation) DeepCopyInto(out *DomainValidationOptionsObservation) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.ResourceRecordName != nil { + in, out := &in.ResourceRecordName, &out.ResourceRecordName + *out = new(string) + **out = **in + } + if in.ResourceRecordType != nil { + in, out := &in.ResourceRecordType, &out.ResourceRecordType + *out = new(string) + **out = **in + } + if in.ResourceRecordValue != nil { + in, out := &in.ResourceRecordValue, &out.ResourceRecordValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainValidationOptionsObservation. +func (in *DomainValidationOptionsObservation) DeepCopy() *DomainValidationOptionsObservation { + if in == nil { + return nil + } + out := new(DomainValidationOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainValidationOptionsParameters) DeepCopyInto(out *DomainValidationOptionsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainValidationOptionsParameters. +func (in *DomainValidationOptionsParameters) DeepCopy() *DomainValidationOptionsParameters { + if in == nil { + return nil + } + out := new(DomainValidationOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionsInitParameters) DeepCopyInto(out *OptionsInitParameters) { + *out = *in + if in.CertificateTransparencyLoggingPreference != nil { + in, out := &in.CertificateTransparencyLoggingPreference, &out.CertificateTransparencyLoggingPreference + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionsInitParameters. +func (in *OptionsInitParameters) DeepCopy() *OptionsInitParameters { + if in == nil { + return nil + } + out := new(OptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionsObservation) DeepCopyInto(out *OptionsObservation) { + *out = *in + if in.CertificateTransparencyLoggingPreference != nil { + in, out := &in.CertificateTransparencyLoggingPreference, &out.CertificateTransparencyLoggingPreference + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionsObservation. +func (in *OptionsObservation) DeepCopy() *OptionsObservation { + if in == nil { + return nil + } + out := new(OptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionsParameters) DeepCopyInto(out *OptionsParameters) { + *out = *in + if in.CertificateTransparencyLoggingPreference != nil { + in, out := &in.CertificateTransparencyLoggingPreference, &out.CertificateTransparencyLoggingPreference + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionsParameters. +func (in *OptionsParameters) DeepCopy() *OptionsParameters { + if in == nil { + return nil + } + out := new(OptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RenewalSummaryInitParameters) DeepCopyInto(out *RenewalSummaryInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RenewalSummaryInitParameters. +func (in *RenewalSummaryInitParameters) DeepCopy() *RenewalSummaryInitParameters { + if in == nil { + return nil + } + out := new(RenewalSummaryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RenewalSummaryObservation) DeepCopyInto(out *RenewalSummaryObservation) { + *out = *in + if in.RenewalStatus != nil { + in, out := &in.RenewalStatus, &out.RenewalStatus + *out = new(string) + **out = **in + } + if in.RenewalStatusReason != nil { + in, out := &in.RenewalStatusReason, &out.RenewalStatusReason + *out = new(string) + **out = **in + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RenewalSummaryObservation. +func (in *RenewalSummaryObservation) DeepCopy() *RenewalSummaryObservation { + if in == nil { + return nil + } + out := new(RenewalSummaryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RenewalSummaryParameters) DeepCopyInto(out *RenewalSummaryParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RenewalSummaryParameters. +func (in *RenewalSummaryParameters) DeepCopy() *RenewalSummaryParameters { + if in == nil { + return nil + } + out := new(RenewalSummaryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationOptionInitParameters) DeepCopyInto(out *ValidationOptionInitParameters) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.ValidationDomain != nil { + in, out := &in.ValidationDomain, &out.ValidationDomain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationOptionInitParameters. +func (in *ValidationOptionInitParameters) DeepCopy() *ValidationOptionInitParameters { + if in == nil { + return nil + } + out := new(ValidationOptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationOptionObservation) DeepCopyInto(out *ValidationOptionObservation) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.ValidationDomain != nil { + in, out := &in.ValidationDomain, &out.ValidationDomain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationOptionObservation. +func (in *ValidationOptionObservation) DeepCopy() *ValidationOptionObservation { + if in == nil { + return nil + } + out := new(ValidationOptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationOptionParameters) DeepCopyInto(out *ValidationOptionParameters) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.ValidationDomain != nil { + in, out := &in.ValidationDomain, &out.ValidationDomain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationOptionParameters. +func (in *ValidationOptionParameters) DeepCopy() *ValidationOptionParameters { + if in == nil { + return nil + } + out := new(ValidationOptionParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/acm/v1beta2/zz_generated.managed.go b/apis/acm/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..e03146d433 --- /dev/null +++ b/apis/acm/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Certificate. +func (mg *Certificate) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Certificate. +func (mg *Certificate) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Certificate. +func (mg *Certificate) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Certificate. +func (mg *Certificate) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Certificate. +func (mg *Certificate) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Certificate. +func (mg *Certificate) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Certificate. +func (mg *Certificate) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Certificate. +func (mg *Certificate) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Certificate. +func (mg *Certificate) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Certificate. +func (mg *Certificate) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Certificate. +func (mg *Certificate) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Certificate. +func (mg *Certificate) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/acm/v1beta2/zz_generated.managedlist.go b/apis/acm/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..56cab06fcb --- /dev/null +++ b/apis/acm/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this CertificateList. +func (l *CertificateList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/acm/v1beta2/zz_groupversion_info.go b/apis/acm/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..b43f020741 --- /dev/null +++ b/apis/acm/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=acm.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "acm.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/acmpca/v1beta1/zz_certificateauthoritycertificate_types.go b/apis/acmpca/v1beta1/zz_certificateauthoritycertificate_types.go index e096d1c8df..d5ede19b3f 100755 --- a/apis/acmpca/v1beta1/zz_certificateauthoritycertificate_types.go +++ b/apis/acmpca/v1beta1/zz_certificateauthoritycertificate_types.go @@ -16,7 +16,7 @@ import ( type CertificateAuthorityCertificateInitParameters struct { // ARN of the Certificate Authority. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acmpca/v1beta1.CertificateAuthority + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acmpca/v1beta2.CertificateAuthority CertificateAuthorityArn *string `json:"certificateAuthorityArn,omitempty" tf:"certificate_authority_arn,omitempty"` // Reference to a CertificateAuthority in acmpca to populate certificateAuthorityArn. @@ -45,7 +45,7 @@ type CertificateAuthorityCertificateObservation struct { type CertificateAuthorityCertificateParameters struct { // ARN of the Certificate Authority. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acmpca/v1beta1.CertificateAuthority + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acmpca/v1beta2.CertificateAuthority // +kubebuilder:validation:Optional CertificateAuthorityArn *string `json:"certificateAuthorityArn,omitempty" tf:"certificate_authority_arn,omitempty"` diff --git a/apis/acmpca/v1beta1/zz_generated.conversion_hubs.go b/apis/acmpca/v1beta1/zz_generated.conversion_hubs.go index 47c762ad74..216e6194bb 100755 --- a/apis/acmpca/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/acmpca/v1beta1/zz_generated.conversion_hubs.go @@ -6,12 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Certificate) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *CertificateAuthority) Hub() {} - // Hub marks this type as a conversion hub. func (tr *CertificateAuthorityCertificate) Hub() {} diff --git a/apis/acmpca/v1beta1/zz_generated.conversion_spokes.go b/apis/acmpca/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..3977ae50f1 --- /dev/null +++ b/apis/acmpca/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Certificate to the hub type. +func (tr *Certificate) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Certificate type. +func (tr *Certificate) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this CertificateAuthority to the hub type. +func (tr *CertificateAuthority) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CertificateAuthority type. +func (tr *CertificateAuthority) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/acmpca/v1beta1/zz_generated.resolvers.go b/apis/acmpca/v1beta1/zz_generated.resolvers.go index 60e3c4416e..bdaf49e1c9 100644 --- a/apis/acmpca/v1beta1/zz_generated.resolvers.go +++ b/apis/acmpca/v1beta1/zz_generated.resolvers.go @@ -9,9 +9,10 @@ package v1beta1 import ( "context" reference "github.com/crossplane/crossplane-runtime/pkg/reference" - xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" resource "github.com/crossplane/upjet/pkg/resource" errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" apisresolver "github.com/upbound/provider-aws/internal/apis" client "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -75,7 +76,7 @@ func (mg *CertificateAuthorityCertificate) ResolveReferences(ctx context.Context var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta1", "CertificateAuthority", "CertificateAuthorityList") + m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta2", "CertificateAuthority", "CertificateAuthorityList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -94,7 +95,7 @@ func (mg *CertificateAuthorityCertificate) ResolveReferences(ctx context.Context mg.Spec.ForProvider.CertificateAuthorityArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.CertificateAuthorityArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta1", "CertificateAuthority", "CertificateAuthorityList") + m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta2", "CertificateAuthority", "CertificateAuthorityList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -125,7 +126,7 @@ func (mg *Permission) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta1", "CertificateAuthority", "CertificateAuthorityList") + m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta2", "CertificateAuthority", "CertificateAuthorityList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -144,7 +145,7 @@ func (mg *Permission) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.CertificateAuthorityArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.CertificateAuthorityArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta1", "CertificateAuthority", "CertificateAuthorityList") + m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta2", "CertificateAuthority", "CertificateAuthorityList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -175,7 +176,7 @@ func (mg *Policy) ResolveReferences(ctx context.Context, c client.Reader) error var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta1", "CertificateAuthority", "CertificateAuthorityList") + m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta2", "CertificateAuthority", "CertificateAuthorityList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -194,7 +195,7 @@ func (mg *Policy) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.ForProvider.ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta1", "CertificateAuthority", "CertificateAuthorityList") + m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta2", "CertificateAuthority", "CertificateAuthorityList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/acmpca/v1beta1/zz_permission_types.go b/apis/acmpca/v1beta1/zz_permission_types.go index 7af1f304c6..aeaf7455fa 100755 --- a/apis/acmpca/v1beta1/zz_permission_types.go +++ b/apis/acmpca/v1beta1/zz_permission_types.go @@ -20,7 +20,7 @@ type PermissionInitParameters struct { Actions []*string `json:"actions,omitempty" tf:"actions,omitempty"` // ARN of the CA that grants the permissions. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acmpca/v1beta1.CertificateAuthority + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acmpca/v1beta2.CertificateAuthority // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) CertificateAuthorityArn *string `json:"certificateAuthorityArn,omitempty" tf:"certificate_authority_arn,omitempty"` @@ -68,7 +68,7 @@ type PermissionParameters struct { Actions []*string `json:"actions,omitempty" tf:"actions,omitempty"` // ARN of the CA that grants the permissions. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acmpca/v1beta1.CertificateAuthority + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acmpca/v1beta2.CertificateAuthority // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional CertificateAuthorityArn *string `json:"certificateAuthorityArn,omitempty" tf:"certificate_authority_arn,omitempty"` diff --git a/apis/acmpca/v1beta1/zz_policy_types.go b/apis/acmpca/v1beta1/zz_policy_types.go index 6acbed26b8..971deb9b34 100755 --- a/apis/acmpca/v1beta1/zz_policy_types.go +++ b/apis/acmpca/v1beta1/zz_policy_types.go @@ -19,7 +19,7 @@ type PolicyInitParameters struct { Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` // ARN of the private CA to associate with the policy. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acmpca/v1beta1.CertificateAuthority + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acmpca/v1beta2.CertificateAuthority // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` @@ -54,7 +54,7 @@ type PolicyParameters struct { Region *string `json:"region" tf:"-"` // ARN of the private CA to associate with the policy. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acmpca/v1beta1.CertificateAuthority + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acmpca/v1beta2.CertificateAuthority // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` diff --git a/apis/acmpca/v1beta2/zz_certificate_terraformed.go b/apis/acmpca/v1beta2/zz_certificate_terraformed.go new file mode 100755 index 0000000000..4b6d863061 --- /dev/null +++ b/apis/acmpca/v1beta2/zz_certificate_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Certificate +func (mg *Certificate) GetTerraformResourceType() string { + return "aws_acmpca_certificate" +} + +// GetConnectionDetailsMapping for this Certificate +func (tr *Certificate) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"certificate_signing_request": "certificateSigningRequestSecretRef"} +} + +// GetObservation of this Certificate +func (tr *Certificate) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Certificate +func (tr *Certificate) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Certificate +func (tr *Certificate) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Certificate +func (tr *Certificate) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Certificate +func (tr *Certificate) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Certificate +func (tr *Certificate) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Certificate +func (tr *Certificate) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Certificate using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Certificate) LateInitialize(attrs []byte) (bool, error) { + params := &CertificateParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Certificate) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/acmpca/v1beta2/zz_certificate_types.go b/apis/acmpca/v1beta2/zz_certificate_types.go new file mode 100755 index 0000000000..697f9ef833 --- /dev/null +++ b/apis/acmpca/v1beta2/zz_certificate_types.go @@ -0,0 +1,209 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CertificateInitParameters struct { + + // Specifies X.509 certificate information to be included in the issued certificate. To use with API Passthrough templates + APIPassthrough *string `json:"apiPassthrough,omitempty" tf:"api_passthrough,omitempty"` + + // ARN of the certificate authority. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acmpca/v1beta2.CertificateAuthority + CertificateAuthorityArn *string `json:"certificateAuthorityArn,omitempty" tf:"certificate_authority_arn,omitempty"` + + // Reference to a CertificateAuthority in acmpca to populate certificateAuthorityArn. + // +kubebuilder:validation:Optional + CertificateAuthorityArnRef *v1.Reference `json:"certificateAuthorityArnRef,omitempty" tf:"-"` + + // Selector for a CertificateAuthority in acmpca to populate certificateAuthorityArn. + // +kubebuilder:validation:Optional + CertificateAuthorityArnSelector *v1.Selector `json:"certificateAuthorityArnSelector,omitempty" tf:"-"` + + // Certificate Signing Request in PEM format. + CertificateSigningRequestSecretRef v1.SecretKeySelector `json:"certificateSigningRequestSecretRef" tf:"-"` + + // Algorithm to use to sign certificate requests. Valid values: SHA256WITHRSA, SHA256WITHECDSA, SHA384WITHRSA, SHA384WITHECDSA, SHA512WITHRSA, SHA512WITHECDSA. + SigningAlgorithm *string `json:"signingAlgorithm,omitempty" tf:"signing_algorithm,omitempty"` + + // Template to use when issuing a certificate. + // See ACM PCA Documentation for more information. + TemplateArn *string `json:"templateArn,omitempty" tf:"template_arn,omitempty"` + + // Configures end of the validity period for the certificate. See validity block below. + Validity *ValidityInitParameters `json:"validity,omitempty" tf:"validity,omitempty"` +} + +type CertificateObservation struct { + + // Specifies X.509 certificate information to be included in the issued certificate. To use with API Passthrough templates + APIPassthrough *string `json:"apiPassthrough,omitempty" tf:"api_passthrough,omitempty"` + + // ARN of the certificate. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // PEM-encoded certificate value. + Certificate *string `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // ARN of the certificate authority. + CertificateAuthorityArn *string `json:"certificateAuthorityArn,omitempty" tf:"certificate_authority_arn,omitempty"` + + // PEM-encoded certificate chain that includes any intermediate certificates and chains up to root CA. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Algorithm to use to sign certificate requests. Valid values: SHA256WITHRSA, SHA256WITHECDSA, SHA384WITHRSA, SHA384WITHECDSA, SHA512WITHRSA, SHA512WITHECDSA. + SigningAlgorithm *string `json:"signingAlgorithm,omitempty" tf:"signing_algorithm,omitempty"` + + // Template to use when issuing a certificate. + // See ACM PCA Documentation for more information. + TemplateArn *string `json:"templateArn,omitempty" tf:"template_arn,omitempty"` + + // Configures end of the validity period for the certificate. See validity block below. + Validity *ValidityObservation `json:"validity,omitempty" tf:"validity,omitempty"` +} + +type CertificateParameters struct { + + // Specifies X.509 certificate information to be included in the issued certificate. To use with API Passthrough templates + // +kubebuilder:validation:Optional + APIPassthrough *string `json:"apiPassthrough,omitempty" tf:"api_passthrough,omitempty"` + + // ARN of the certificate authority. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acmpca/v1beta2.CertificateAuthority + // +kubebuilder:validation:Optional + CertificateAuthorityArn *string `json:"certificateAuthorityArn,omitempty" tf:"certificate_authority_arn,omitempty"` + + // Reference to a CertificateAuthority in acmpca to populate certificateAuthorityArn. + // +kubebuilder:validation:Optional + CertificateAuthorityArnRef *v1.Reference `json:"certificateAuthorityArnRef,omitempty" tf:"-"` + + // Selector for a CertificateAuthority in acmpca to populate certificateAuthorityArn. + // +kubebuilder:validation:Optional + CertificateAuthorityArnSelector *v1.Selector `json:"certificateAuthorityArnSelector,omitempty" tf:"-"` + + // Certificate Signing Request in PEM format. + // +kubebuilder:validation:Optional + CertificateSigningRequestSecretRef v1.SecretKeySelector `json:"certificateSigningRequestSecretRef" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Algorithm to use to sign certificate requests. Valid values: SHA256WITHRSA, SHA256WITHECDSA, SHA384WITHRSA, SHA384WITHECDSA, SHA512WITHRSA, SHA512WITHECDSA. + // +kubebuilder:validation:Optional + SigningAlgorithm *string `json:"signingAlgorithm,omitempty" tf:"signing_algorithm,omitempty"` + + // Template to use when issuing a certificate. + // See ACM PCA Documentation for more information. + // +kubebuilder:validation:Optional + TemplateArn *string `json:"templateArn,omitempty" tf:"template_arn,omitempty"` + + // Configures end of the validity period for the certificate. See validity block below. + // +kubebuilder:validation:Optional + Validity *ValidityParameters `json:"validity,omitempty" tf:"validity,omitempty"` +} + +type ValidityInitParameters struct { + + // Determines how value is interpreted. Valid values: DAYS, MONTHS, YEARS, ABSOLUTE, END_DATE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // If type is DAYS, MONTHS, or YEARS, the relative time until the certificate expires. If type is ABSOLUTE, the date in seconds since the Unix epoch. If type is END_DATE, the date in RFC 3339 format. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ValidityObservation struct { + + // Determines how value is interpreted. Valid values: DAYS, MONTHS, YEARS, ABSOLUTE, END_DATE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // If type is DAYS, MONTHS, or YEARS, the relative time until the certificate expires. If type is ABSOLUTE, the date in seconds since the Unix epoch. If type is END_DATE, the date in RFC 3339 format. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ValidityParameters struct { + + // Determines how value is interpreted. Valid values: DAYS, MONTHS, YEARS, ABSOLUTE, END_DATE. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // If type is DAYS, MONTHS, or YEARS, the relative time until the certificate expires. If type is ABSOLUTE, the date in seconds since the Unix epoch. If type is END_DATE, the date in RFC 3339 format. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +// CertificateSpec defines the desired state of Certificate +type CertificateSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CertificateParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CertificateInitParameters `json:"initProvider,omitempty"` +} + +// CertificateStatus defines the observed state of Certificate. +type CertificateStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CertificateObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Certificate is the Schema for the Certificates API. Provides a resource to issue a certificate using AWS Certificate Manager Private Certificate Authority (ACM PCA) +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Certificate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.certificateSigningRequestSecretRef)",message="spec.forProvider.certificateSigningRequestSecretRef is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.signingAlgorithm) || (has(self.initProvider) && has(self.initProvider.signingAlgorithm))",message="spec.forProvider.signingAlgorithm is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.validity) || (has(self.initProvider) && has(self.initProvider.validity))",message="spec.forProvider.validity is a required parameter" + Spec CertificateSpec `json:"spec"` + Status CertificateStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CertificateList contains a list of Certificates +type CertificateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Certificate `json:"items"` +} + +// Repository type metadata. +var ( + Certificate_Kind = "Certificate" + Certificate_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Certificate_Kind}.String() + Certificate_KindAPIVersion = Certificate_Kind + "." + CRDGroupVersion.String() + Certificate_GroupVersionKind = CRDGroupVersion.WithKind(Certificate_Kind) +) + +func init() { + SchemeBuilder.Register(&Certificate{}, &CertificateList{}) +} diff --git a/apis/acmpca/v1beta2/zz_certificateauthority_terraformed.go b/apis/acmpca/v1beta2/zz_certificateauthority_terraformed.go new file mode 100755 index 0000000000..90333a0841 --- /dev/null +++ b/apis/acmpca/v1beta2/zz_certificateauthority_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CertificateAuthority +func (mg *CertificateAuthority) GetTerraformResourceType() string { + return "aws_acmpca_certificate_authority" +} + +// GetConnectionDetailsMapping for this CertificateAuthority +func (tr *CertificateAuthority) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CertificateAuthority +func (tr *CertificateAuthority) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CertificateAuthority +func (tr *CertificateAuthority) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CertificateAuthority +func (tr *CertificateAuthority) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CertificateAuthority +func (tr *CertificateAuthority) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CertificateAuthority +func (tr *CertificateAuthority) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CertificateAuthority +func (tr *CertificateAuthority) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CertificateAuthority +func (tr *CertificateAuthority) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CertificateAuthority using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CertificateAuthority) LateInitialize(attrs []byte) (bool, error) { + params := &CertificateAuthorityParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("RevocationConfiguration")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CertificateAuthority) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/acmpca/v1beta2/zz_certificateauthority_types.go b/apis/acmpca/v1beta2/zz_certificateauthority_types.go new file mode 100755 index 0000000000..4d7de6b738 --- /dev/null +++ b/apis/acmpca/v1beta2/zz_certificateauthority_types.go @@ -0,0 +1,498 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CertificateAuthorityConfigurationInitParameters struct { + + // Type of the public key algorithm and size, in bits, of the key pair that your key pair creates when it issues a certificate. Valid values can be found in the ACM PCA Documentation. + KeyAlgorithm *string `json:"keyAlgorithm,omitempty" tf:"key_algorithm,omitempty"` + + // Name of the algorithm your private CA uses to sign certificate requests. Valid values can be found in the ACM PCA Documentation. + SigningAlgorithm *string `json:"signingAlgorithm,omitempty" tf:"signing_algorithm,omitempty"` + + // Nested argument that contains X.500 distinguished name information. At least one nested attribute must be specified. + Subject *SubjectInitParameters `json:"subject,omitempty" tf:"subject,omitempty"` +} + +type CertificateAuthorityConfigurationObservation struct { + + // Type of the public key algorithm and size, in bits, of the key pair that your key pair creates when it issues a certificate. Valid values can be found in the ACM PCA Documentation. + KeyAlgorithm *string `json:"keyAlgorithm,omitempty" tf:"key_algorithm,omitempty"` + + // Name of the algorithm your private CA uses to sign certificate requests. Valid values can be found in the ACM PCA Documentation. + SigningAlgorithm *string `json:"signingAlgorithm,omitempty" tf:"signing_algorithm,omitempty"` + + // Nested argument that contains X.500 distinguished name information. At least one nested attribute must be specified. + Subject *SubjectObservation `json:"subject,omitempty" tf:"subject,omitempty"` +} + +type CertificateAuthorityConfigurationParameters struct { + + // Type of the public key algorithm and size, in bits, of the key pair that your key pair creates when it issues a certificate. Valid values can be found in the ACM PCA Documentation. + // +kubebuilder:validation:Optional + KeyAlgorithm *string `json:"keyAlgorithm" tf:"key_algorithm,omitempty"` + + // Name of the algorithm your private CA uses to sign certificate requests. Valid values can be found in the ACM PCA Documentation. + // +kubebuilder:validation:Optional + SigningAlgorithm *string `json:"signingAlgorithm" tf:"signing_algorithm,omitempty"` + + // Nested argument that contains X.500 distinguished name information. At least one nested attribute must be specified. + // +kubebuilder:validation:Optional + Subject *SubjectParameters `json:"subject" tf:"subject,omitempty"` +} + +type CertificateAuthorityInitParameters struct { + + // Nested argument containing algorithms and certificate subject information. Defined below. + CertificateAuthorityConfiguration *CertificateAuthorityConfigurationInitParameters `json:"certificateAuthorityConfiguration,omitempty" tf:"certificate_authority_configuration,omitempty"` + + // Whether the certificate authority is enabled or disabled. Defaults to true. Can only be disabled if the CA is in an ACTIVE state. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Cryptographic key management compliance standard used for handling CA keys. Defaults to FIPS_140_2_LEVEL_3_OR_HIGHER. Valid values: FIPS_140_2_LEVEL_3_OR_HIGHER and FIPS_140_2_LEVEL_2_OR_HIGHER. Supported standard for each region can be found in the Storage and security compliance of AWS Private CA private keys Documentation. + KeyStorageSecurityStandard *string `json:"keyStorageSecurityStandard,omitempty" tf:"key_storage_security_standard,omitempty"` + + // Number of days to make a CA restorable after it has been deleted, must be between 7 to 30 days, with default to 30 days. + PermanentDeletionTimeInDays *float64 `json:"permanentDeletionTimeInDays,omitempty" tf:"permanent_deletion_time_in_days,omitempty"` + + // Nested argument containing revocation configuration. Defined below. + RevocationConfiguration *RevocationConfigurationInitParameters `json:"revocationConfiguration,omitempty" tf:"revocation_configuration,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Type of the certificate authority. Defaults to SUBORDINATE. Valid values: ROOT and SUBORDINATE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Specifies whether the CA issues general-purpose certificates that typically require a revocation mechanism, or short-lived certificates that may optionally omit revocation because they expire quickly. Short-lived certificate validity is limited to seven days. Defaults to GENERAL_PURPOSE. Valid values: GENERAL_PURPOSE and SHORT_LIVED_CERTIFICATE. + UsageMode *string `json:"usageMode,omitempty" tf:"usage_mode,omitempty"` +} + +type CertificateAuthorityObservation struct { + + // ARN of the certificate authority. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Base64-encoded certificate authority (CA) certificate. Only available after the certificate authority certificate has been imported. + Certificate *string `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Nested argument containing algorithms and certificate subject information. Defined below. + CertificateAuthorityConfiguration *CertificateAuthorityConfigurationObservation `json:"certificateAuthorityConfiguration,omitempty" tf:"certificate_authority_configuration,omitempty"` + + // Base64-encoded certificate chain that includes any intermediate certificates and chains up to root on-premises certificate that you used to sign your private CA certificate. The chain does not include your private CA certificate. Only available after the certificate authority certificate has been imported. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + // The base64 PEM-encoded certificate signing request (CSR) for your private CA certificate. + CertificateSigningRequest *string `json:"certificateSigningRequest,omitempty" tf:"certificate_signing_request,omitempty"` + + // Whether the certificate authority is enabled or disabled. Defaults to true. Can only be disabled if the CA is in an ACTIVE state. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // ARN of the certificate authority. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Cryptographic key management compliance standard used for handling CA keys. Defaults to FIPS_140_2_LEVEL_3_OR_HIGHER. Valid values: FIPS_140_2_LEVEL_3_OR_HIGHER and FIPS_140_2_LEVEL_2_OR_HIGHER. Supported standard for each region can be found in the Storage and security compliance of AWS Private CA private keys Documentation. + KeyStorageSecurityStandard *string `json:"keyStorageSecurityStandard,omitempty" tf:"key_storage_security_standard,omitempty"` + + // Date and time after which the certificate authority is not valid. Only available after the certificate authority certificate has been imported. + NotAfter *string `json:"notAfter,omitempty" tf:"not_after,omitempty"` + + // Date and time before which the certificate authority is not valid. Only available after the certificate authority certificate has been imported. + NotBefore *string `json:"notBefore,omitempty" tf:"not_before,omitempty"` + + // Number of days to make a CA restorable after it has been deleted, must be between 7 to 30 days, with default to 30 days. + PermanentDeletionTimeInDays *float64 `json:"permanentDeletionTimeInDays,omitempty" tf:"permanent_deletion_time_in_days,omitempty"` + + // Nested argument containing revocation configuration. Defined below. + RevocationConfiguration *RevocationConfigurationObservation `json:"revocationConfiguration,omitempty" tf:"revocation_configuration,omitempty"` + + // Serial number of the certificate authority. Only available after the certificate authority certificate has been imported. + Serial *string `json:"serial,omitempty" tf:"serial,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Type of the certificate authority. Defaults to SUBORDINATE. Valid values: ROOT and SUBORDINATE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Specifies whether the CA issues general-purpose certificates that typically require a revocation mechanism, or short-lived certificates that may optionally omit revocation because they expire quickly. Short-lived certificate validity is limited to seven days. Defaults to GENERAL_PURPOSE. Valid values: GENERAL_PURPOSE and SHORT_LIVED_CERTIFICATE. + UsageMode *string `json:"usageMode,omitempty" tf:"usage_mode,omitempty"` +} + +type CertificateAuthorityParameters struct { + + // Nested argument containing algorithms and certificate subject information. Defined below. + // +kubebuilder:validation:Optional + CertificateAuthorityConfiguration *CertificateAuthorityConfigurationParameters `json:"certificateAuthorityConfiguration,omitempty" tf:"certificate_authority_configuration,omitempty"` + + // Whether the certificate authority is enabled or disabled. Defaults to true. Can only be disabled if the CA is in an ACTIVE state. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Cryptographic key management compliance standard used for handling CA keys. Defaults to FIPS_140_2_LEVEL_3_OR_HIGHER. Valid values: FIPS_140_2_LEVEL_3_OR_HIGHER and FIPS_140_2_LEVEL_2_OR_HIGHER. Supported standard for each region can be found in the Storage and security compliance of AWS Private CA private keys Documentation. + // +kubebuilder:validation:Optional + KeyStorageSecurityStandard *string `json:"keyStorageSecurityStandard,omitempty" tf:"key_storage_security_standard,omitempty"` + + // Number of days to make a CA restorable after it has been deleted, must be between 7 to 30 days, with default to 30 days. + // +kubebuilder:validation:Optional + PermanentDeletionTimeInDays *float64 `json:"permanentDeletionTimeInDays,omitempty" tf:"permanent_deletion_time_in_days,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Nested argument containing revocation configuration. Defined below. + // +kubebuilder:validation:Optional + RevocationConfiguration *RevocationConfigurationParameters `json:"revocationConfiguration,omitempty" tf:"revocation_configuration,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Type of the certificate authority. Defaults to SUBORDINATE. Valid values: ROOT and SUBORDINATE. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Specifies whether the CA issues general-purpose certificates that typically require a revocation mechanism, or short-lived certificates that may optionally omit revocation because they expire quickly. Short-lived certificate validity is limited to seven days. Defaults to GENERAL_PURPOSE. Valid values: GENERAL_PURPOSE and SHORT_LIVED_CERTIFICATE. + // +kubebuilder:validation:Optional + UsageMode *string `json:"usageMode,omitempty" tf:"usage_mode,omitempty"` +} + +type CrlConfigurationInitParameters struct { + + // Name inserted into the certificate CRL Distribution Points extension that enables the use of an alias for the CRL distribution point. Use this value if you don't want the name of your S3 bucket to be public. Must be less than or equal to 253 characters in length. + CustomCname *string `json:"customCname,omitempty" tf:"custom_cname,omitempty"` + + // Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Number of days until a certificate expires. Must be between 1 and 5000. + ExpirationInDays *float64 `json:"expirationInDays,omitempty" tf:"expiration_in_days,omitempty"` + + // Name of the S3 bucket that contains the CRL. If you do not provide a value for the custom_cname argument, the name of your S3 bucket is placed into the CRL Distribution Points extension of the issued certificate. You must specify a bucket policy that allows ACM PCA to write the CRL to your bucket. Must be between 3 and 255 characters in length. + S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` + + // Determines whether the CRL will be publicly readable or privately held in the CRL Amazon S3 bucket. Defaults to PUBLIC_READ. + S3ObjectACL *string `json:"s3ObjectAcl,omitempty" tf:"s3_object_acl,omitempty"` +} + +type CrlConfigurationObservation struct { + + // Name inserted into the certificate CRL Distribution Points extension that enables the use of an alias for the CRL distribution point. Use this value if you don't want the name of your S3 bucket to be public. Must be less than or equal to 253 characters in length. + CustomCname *string `json:"customCname,omitempty" tf:"custom_cname,omitempty"` + + // Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Number of days until a certificate expires. Must be between 1 and 5000. + ExpirationInDays *float64 `json:"expirationInDays,omitempty" tf:"expiration_in_days,omitempty"` + + // Name of the S3 bucket that contains the CRL. If you do not provide a value for the custom_cname argument, the name of your S3 bucket is placed into the CRL Distribution Points extension of the issued certificate. You must specify a bucket policy that allows ACM PCA to write the CRL to your bucket. Must be between 3 and 255 characters in length. + S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` + + // Determines whether the CRL will be publicly readable or privately held in the CRL Amazon S3 bucket. Defaults to PUBLIC_READ. + S3ObjectACL *string `json:"s3ObjectAcl,omitempty" tf:"s3_object_acl,omitempty"` +} + +type CrlConfigurationParameters struct { + + // Name inserted into the certificate CRL Distribution Points extension that enables the use of an alias for the CRL distribution point. Use this value if you don't want the name of your S3 bucket to be public. Must be less than or equal to 253 characters in length. + // +kubebuilder:validation:Optional + CustomCname *string `json:"customCname,omitempty" tf:"custom_cname,omitempty"` + + // Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Number of days until a certificate expires. Must be between 1 and 5000. + // +kubebuilder:validation:Optional + ExpirationInDays *float64 `json:"expirationInDays,omitempty" tf:"expiration_in_days,omitempty"` + + // Name of the S3 bucket that contains the CRL. If you do not provide a value for the custom_cname argument, the name of your S3 bucket is placed into the CRL Distribution Points extension of the issued certificate. You must specify a bucket policy that allows ACM PCA to write the CRL to your bucket. Must be between 3 and 255 characters in length. + // +kubebuilder:validation:Optional + S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` + + // Determines whether the CRL will be publicly readable or privately held in the CRL Amazon S3 bucket. Defaults to PUBLIC_READ. + // +kubebuilder:validation:Optional + S3ObjectACL *string `json:"s3ObjectAcl,omitempty" tf:"s3_object_acl,omitempty"` +} + +type OcspConfigurationInitParameters struct { + + // Boolean value that specifies whether a custom OCSP responder is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // CNAME specifying a customized OCSP domain. Note: The value of the CNAME must not include a protocol prefix such as "http://" or "https://". + OcspCustomCname *string `json:"ocspCustomCname,omitempty" tf:"ocsp_custom_cname,omitempty"` +} + +type OcspConfigurationObservation struct { + + // Boolean value that specifies whether a custom OCSP responder is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // CNAME specifying a customized OCSP domain. Note: The value of the CNAME must not include a protocol prefix such as "http://" or "https://". + OcspCustomCname *string `json:"ocspCustomCname,omitempty" tf:"ocsp_custom_cname,omitempty"` +} + +type OcspConfigurationParameters struct { + + // Boolean value that specifies whether a custom OCSP responder is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // CNAME specifying a customized OCSP domain. Note: The value of the CNAME must not include a protocol prefix such as "http://" or "https://". + // +kubebuilder:validation:Optional + OcspCustomCname *string `json:"ocspCustomCname,omitempty" tf:"ocsp_custom_cname,omitempty"` +} + +type RevocationConfigurationInitParameters struct { + + // Nested argument containing configuration of the certificate revocation list (CRL), if any, maintained by the certificate authority. Defined below. + CrlConfiguration *CrlConfigurationInitParameters `json:"crlConfiguration,omitempty" tf:"crl_configuration,omitempty"` + + // Nested argument containing configuration of + // the custom OCSP responder endpoint. Defined below. + OcspConfiguration *OcspConfigurationInitParameters `json:"ocspConfiguration,omitempty" tf:"ocsp_configuration,omitempty"` +} + +type RevocationConfigurationObservation struct { + + // Nested argument containing configuration of the certificate revocation list (CRL), if any, maintained by the certificate authority. Defined below. + CrlConfiguration *CrlConfigurationObservation `json:"crlConfiguration,omitempty" tf:"crl_configuration,omitempty"` + + // Nested argument containing configuration of + // the custom OCSP responder endpoint. Defined below. + OcspConfiguration *OcspConfigurationObservation `json:"ocspConfiguration,omitempty" tf:"ocsp_configuration,omitempty"` +} + +type RevocationConfigurationParameters struct { + + // Nested argument containing configuration of the certificate revocation list (CRL), if any, maintained by the certificate authority. Defined below. + // +kubebuilder:validation:Optional + CrlConfiguration *CrlConfigurationParameters `json:"crlConfiguration,omitempty" tf:"crl_configuration,omitempty"` + + // Nested argument containing configuration of + // the custom OCSP responder endpoint. Defined below. + // +kubebuilder:validation:Optional + OcspConfiguration *OcspConfigurationParameters `json:"ocspConfiguration,omitempty" tf:"ocsp_configuration,omitempty"` +} + +type SubjectInitParameters struct { + + // Fully qualified domain name (FQDN) associated with the certificate subject. Must be less than or equal to 64 characters in length. + CommonName *string `json:"commonName,omitempty" tf:"common_name,omitempty"` + + // Two digit code that specifies the country in which the certificate subject located. Must be less than or equal to 2 characters in length. + Country *string `json:"country,omitempty" tf:"country,omitempty"` + + // Disambiguating information for the certificate subject. Must be less than or equal to 64 characters in length. + DistinguishedNameQualifier *string `json:"distinguishedNameQualifier,omitempty" tf:"distinguished_name_qualifier,omitempty"` + + // Typically a qualifier appended to the name of an individual. Examples include Jr. for junior, Sr. for senior, and III for third. Must be less than or equal to 3 characters in length. + GenerationQualifier *string `json:"generationQualifier,omitempty" tf:"generation_qualifier,omitempty"` + + // First name. Must be less than or equal to 16 characters in length. + GivenName *string `json:"givenName,omitempty" tf:"given_name,omitempty"` + + // Concatenation that typically contains the first letter of the given_name, the first letter of the middle name if one exists, and the first letter of the surname. Must be less than or equal to 5 characters in length. + Initials *string `json:"initials,omitempty" tf:"initials,omitempty"` + + // Locality (such as a city or town) in which the certificate subject is located. Must be less than or equal to 128 characters in length. + Locality *string `json:"locality,omitempty" tf:"locality,omitempty"` + + // Legal name of the organization with which the certificate subject is affiliated. Must be less than or equal to 64 characters in length. + Organization *string `json:"organization,omitempty" tf:"organization,omitempty"` + + // Subdivision or unit of the organization (such as sales or finance) with which the certificate subject is affiliated. Must be less than or equal to 64 characters in length. + OrganizationalUnit *string `json:"organizationalUnit,omitempty" tf:"organizational_unit,omitempty"` + + // Typically a shortened version of a longer given_name. For example, Jonathan is often shortened to John. Elizabeth is often shortened to Beth, Liz, or Eliza. Must be less than or equal to 128 characters in length. + Pseudonym *string `json:"pseudonym,omitempty" tf:"pseudonym,omitempty"` + + // State in which the subject of the certificate is located. Must be less than or equal to 128 characters in length. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Family name. In the US and the UK for example, the surname of an individual is ordered last. In Asian cultures the surname is typically ordered first. Must be less than or equal to 40 characters in length. + Surname *string `json:"surname,omitempty" tf:"surname,omitempty"` + + // Title such as Mr. or Ms. which is pre-pended to the name to refer formally to the certificate subject. Must be less than or equal to 64 characters in length. + Title *string `json:"title,omitempty" tf:"title,omitempty"` +} + +type SubjectObservation struct { + + // Fully qualified domain name (FQDN) associated with the certificate subject. Must be less than or equal to 64 characters in length. + CommonName *string `json:"commonName,omitempty" tf:"common_name,omitempty"` + + // Two digit code that specifies the country in which the certificate subject located. Must be less than or equal to 2 characters in length. + Country *string `json:"country,omitempty" tf:"country,omitempty"` + + // Disambiguating information for the certificate subject. Must be less than or equal to 64 characters in length. + DistinguishedNameQualifier *string `json:"distinguishedNameQualifier,omitempty" tf:"distinguished_name_qualifier,omitempty"` + + // Typically a qualifier appended to the name of an individual. Examples include Jr. for junior, Sr. for senior, and III for third. Must be less than or equal to 3 characters in length. + GenerationQualifier *string `json:"generationQualifier,omitempty" tf:"generation_qualifier,omitempty"` + + // First name. Must be less than or equal to 16 characters in length. + GivenName *string `json:"givenName,omitempty" tf:"given_name,omitempty"` + + // Concatenation that typically contains the first letter of the given_name, the first letter of the middle name if one exists, and the first letter of the surname. Must be less than or equal to 5 characters in length. + Initials *string `json:"initials,omitempty" tf:"initials,omitempty"` + + // Locality (such as a city or town) in which the certificate subject is located. Must be less than or equal to 128 characters in length. + Locality *string `json:"locality,omitempty" tf:"locality,omitempty"` + + // Legal name of the organization with which the certificate subject is affiliated. Must be less than or equal to 64 characters in length. + Organization *string `json:"organization,omitempty" tf:"organization,omitempty"` + + // Subdivision or unit of the organization (such as sales or finance) with which the certificate subject is affiliated. Must be less than or equal to 64 characters in length. + OrganizationalUnit *string `json:"organizationalUnit,omitempty" tf:"organizational_unit,omitempty"` + + // Typically a shortened version of a longer given_name. For example, Jonathan is often shortened to John. Elizabeth is often shortened to Beth, Liz, or Eliza. Must be less than or equal to 128 characters in length. + Pseudonym *string `json:"pseudonym,omitempty" tf:"pseudonym,omitempty"` + + // State in which the subject of the certificate is located. Must be less than or equal to 128 characters in length. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Family name. In the US and the UK for example, the surname of an individual is ordered last. In Asian cultures the surname is typically ordered first. Must be less than or equal to 40 characters in length. + Surname *string `json:"surname,omitempty" tf:"surname,omitempty"` + + // Title such as Mr. or Ms. which is pre-pended to the name to refer formally to the certificate subject. Must be less than or equal to 64 characters in length. + Title *string `json:"title,omitempty" tf:"title,omitempty"` +} + +type SubjectParameters struct { + + // Fully qualified domain name (FQDN) associated with the certificate subject. Must be less than or equal to 64 characters in length. + // +kubebuilder:validation:Optional + CommonName *string `json:"commonName,omitempty" tf:"common_name,omitempty"` + + // Two digit code that specifies the country in which the certificate subject located. Must be less than or equal to 2 characters in length. + // +kubebuilder:validation:Optional + Country *string `json:"country,omitempty" tf:"country,omitempty"` + + // Disambiguating information for the certificate subject. Must be less than or equal to 64 characters in length. + // +kubebuilder:validation:Optional + DistinguishedNameQualifier *string `json:"distinguishedNameQualifier,omitempty" tf:"distinguished_name_qualifier,omitempty"` + + // Typically a qualifier appended to the name of an individual. Examples include Jr. for junior, Sr. for senior, and III for third. Must be less than or equal to 3 characters in length. + // +kubebuilder:validation:Optional + GenerationQualifier *string `json:"generationQualifier,omitempty" tf:"generation_qualifier,omitempty"` + + // First name. Must be less than or equal to 16 characters in length. + // +kubebuilder:validation:Optional + GivenName *string `json:"givenName,omitempty" tf:"given_name,omitempty"` + + // Concatenation that typically contains the first letter of the given_name, the first letter of the middle name if one exists, and the first letter of the surname. Must be less than or equal to 5 characters in length. + // +kubebuilder:validation:Optional + Initials *string `json:"initials,omitempty" tf:"initials,omitempty"` + + // Locality (such as a city or town) in which the certificate subject is located. Must be less than or equal to 128 characters in length. + // +kubebuilder:validation:Optional + Locality *string `json:"locality,omitempty" tf:"locality,omitempty"` + + // Legal name of the organization with which the certificate subject is affiliated. Must be less than or equal to 64 characters in length. + // +kubebuilder:validation:Optional + Organization *string `json:"organization,omitempty" tf:"organization,omitempty"` + + // Subdivision or unit of the organization (such as sales or finance) with which the certificate subject is affiliated. Must be less than or equal to 64 characters in length. + // +kubebuilder:validation:Optional + OrganizationalUnit *string `json:"organizationalUnit,omitempty" tf:"organizational_unit,omitempty"` + + // Typically a shortened version of a longer given_name. For example, Jonathan is often shortened to John. Elizabeth is often shortened to Beth, Liz, or Eliza. Must be less than or equal to 128 characters in length. + // +kubebuilder:validation:Optional + Pseudonym *string `json:"pseudonym,omitempty" tf:"pseudonym,omitempty"` + + // State in which the subject of the certificate is located. Must be less than or equal to 128 characters in length. + // +kubebuilder:validation:Optional + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Family name. In the US and the UK for example, the surname of an individual is ordered last. In Asian cultures the surname is typically ordered first. Must be less than or equal to 40 characters in length. + // +kubebuilder:validation:Optional + Surname *string `json:"surname,omitempty" tf:"surname,omitempty"` + + // Title such as Mr. or Ms. which is pre-pended to the name to refer formally to the certificate subject. Must be less than or equal to 64 characters in length. + // +kubebuilder:validation:Optional + Title *string `json:"title,omitempty" tf:"title,omitempty"` +} + +// CertificateAuthoritySpec defines the desired state of CertificateAuthority +type CertificateAuthoritySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CertificateAuthorityParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CertificateAuthorityInitParameters `json:"initProvider,omitempty"` +} + +// CertificateAuthorityStatus defines the observed state of CertificateAuthority. +type CertificateAuthorityStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CertificateAuthorityObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CertificateAuthority is the Schema for the CertificateAuthoritys API. Provides a resource to manage AWS Certificate Manager Private Certificate Authorities +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type CertificateAuthority struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.certificateAuthorityConfiguration) || (has(self.initProvider) && has(self.initProvider.certificateAuthorityConfiguration))",message="spec.forProvider.certificateAuthorityConfiguration is a required parameter" + Spec CertificateAuthoritySpec `json:"spec"` + Status CertificateAuthorityStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CertificateAuthorityList contains a list of CertificateAuthoritys +type CertificateAuthorityList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CertificateAuthority `json:"items"` +} + +// Repository type metadata. +var ( + CertificateAuthority_Kind = "CertificateAuthority" + CertificateAuthority_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CertificateAuthority_Kind}.String() + CertificateAuthority_KindAPIVersion = CertificateAuthority_Kind + "." + CRDGroupVersion.String() + CertificateAuthority_GroupVersionKind = CRDGroupVersion.WithKind(CertificateAuthority_Kind) +) + +func init() { + SchemeBuilder.Register(&CertificateAuthority{}, &CertificateAuthorityList{}) +} diff --git a/apis/acmpca/v1beta2/zz_generated.conversion_hubs.go b/apis/acmpca/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..9b587d0397 --- /dev/null +++ b/apis/acmpca/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Certificate) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *CertificateAuthority) Hub() {} diff --git a/apis/acmpca/v1beta2/zz_generated.deepcopy.go b/apis/acmpca/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..09ed07a025 --- /dev/null +++ b/apis/acmpca/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1303 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Certificate) DeepCopyInto(out *Certificate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Certificate. +func (in *Certificate) DeepCopy() *Certificate { + if in == nil { + return nil + } + out := new(Certificate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Certificate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAuthority) DeepCopyInto(out *CertificateAuthority) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAuthority. +func (in *CertificateAuthority) DeepCopy() *CertificateAuthority { + if in == nil { + return nil + } + out := new(CertificateAuthority) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateAuthority) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAuthorityConfigurationInitParameters) DeepCopyInto(out *CertificateAuthorityConfigurationInitParameters) { + *out = *in + if in.KeyAlgorithm != nil { + in, out := &in.KeyAlgorithm, &out.KeyAlgorithm + *out = new(string) + **out = **in + } + if in.SigningAlgorithm != nil { + in, out := &in.SigningAlgorithm, &out.SigningAlgorithm + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(SubjectInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAuthorityConfigurationInitParameters. +func (in *CertificateAuthorityConfigurationInitParameters) DeepCopy() *CertificateAuthorityConfigurationInitParameters { + if in == nil { + return nil + } + out := new(CertificateAuthorityConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAuthorityConfigurationObservation) DeepCopyInto(out *CertificateAuthorityConfigurationObservation) { + *out = *in + if in.KeyAlgorithm != nil { + in, out := &in.KeyAlgorithm, &out.KeyAlgorithm + *out = new(string) + **out = **in + } + if in.SigningAlgorithm != nil { + in, out := &in.SigningAlgorithm, &out.SigningAlgorithm + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(SubjectObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAuthorityConfigurationObservation. +func (in *CertificateAuthorityConfigurationObservation) DeepCopy() *CertificateAuthorityConfigurationObservation { + if in == nil { + return nil + } + out := new(CertificateAuthorityConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAuthorityConfigurationParameters) DeepCopyInto(out *CertificateAuthorityConfigurationParameters) { + *out = *in + if in.KeyAlgorithm != nil { + in, out := &in.KeyAlgorithm, &out.KeyAlgorithm + *out = new(string) + **out = **in + } + if in.SigningAlgorithm != nil { + in, out := &in.SigningAlgorithm, &out.SigningAlgorithm + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(SubjectParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAuthorityConfigurationParameters. +func (in *CertificateAuthorityConfigurationParameters) DeepCopy() *CertificateAuthorityConfigurationParameters { + if in == nil { + return nil + } + out := new(CertificateAuthorityConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAuthorityInitParameters) DeepCopyInto(out *CertificateAuthorityInitParameters) { + *out = *in + if in.CertificateAuthorityConfiguration != nil { + in, out := &in.CertificateAuthorityConfiguration, &out.CertificateAuthorityConfiguration + *out = new(CertificateAuthorityConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KeyStorageSecurityStandard != nil { + in, out := &in.KeyStorageSecurityStandard, &out.KeyStorageSecurityStandard + *out = new(string) + **out = **in + } + if in.PermanentDeletionTimeInDays != nil { + in, out := &in.PermanentDeletionTimeInDays, &out.PermanentDeletionTimeInDays + *out = new(float64) + **out = **in + } + if in.RevocationConfiguration != nil { + in, out := &in.RevocationConfiguration, &out.RevocationConfiguration + *out = new(RevocationConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UsageMode != nil { + in, out := &in.UsageMode, &out.UsageMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAuthorityInitParameters. +func (in *CertificateAuthorityInitParameters) DeepCopy() *CertificateAuthorityInitParameters { + if in == nil { + return nil + } + out := new(CertificateAuthorityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAuthorityList) DeepCopyInto(out *CertificateAuthorityList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CertificateAuthority, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAuthorityList. +func (in *CertificateAuthorityList) DeepCopy() *CertificateAuthorityList { + if in == nil { + return nil + } + out := new(CertificateAuthorityList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateAuthorityList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAuthorityObservation) DeepCopyInto(out *CertificateAuthorityObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(string) + **out = **in + } + if in.CertificateAuthorityConfiguration != nil { + in, out := &in.CertificateAuthorityConfiguration, &out.CertificateAuthorityConfiguration + *out = new(CertificateAuthorityConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.CertificateSigningRequest != nil { + in, out := &in.CertificateSigningRequest, &out.CertificateSigningRequest + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KeyStorageSecurityStandard != nil { + in, out := &in.KeyStorageSecurityStandard, &out.KeyStorageSecurityStandard + *out = new(string) + **out = **in + } + if in.NotAfter != nil { + in, out := &in.NotAfter, &out.NotAfter + *out = new(string) + **out = **in + } + if in.NotBefore != nil { + in, out := &in.NotBefore, &out.NotBefore + *out = new(string) + **out = **in + } + if in.PermanentDeletionTimeInDays != nil { + in, out := &in.PermanentDeletionTimeInDays, &out.PermanentDeletionTimeInDays + *out = new(float64) + **out = **in + } + if in.RevocationConfiguration != nil { + in, out := &in.RevocationConfiguration, &out.RevocationConfiguration + *out = new(RevocationConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Serial != nil { + in, out := &in.Serial, &out.Serial + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UsageMode != nil { + in, out := &in.UsageMode, &out.UsageMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAuthorityObservation. +func (in *CertificateAuthorityObservation) DeepCopy() *CertificateAuthorityObservation { + if in == nil { + return nil + } + out := new(CertificateAuthorityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAuthorityParameters) DeepCopyInto(out *CertificateAuthorityParameters) { + *out = *in + if in.CertificateAuthorityConfiguration != nil { + in, out := &in.CertificateAuthorityConfiguration, &out.CertificateAuthorityConfiguration + *out = new(CertificateAuthorityConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KeyStorageSecurityStandard != nil { + in, out := &in.KeyStorageSecurityStandard, &out.KeyStorageSecurityStandard + *out = new(string) + **out = **in + } + if in.PermanentDeletionTimeInDays != nil { + in, out := &in.PermanentDeletionTimeInDays, &out.PermanentDeletionTimeInDays + *out = new(float64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RevocationConfiguration != nil { + in, out := &in.RevocationConfiguration, &out.RevocationConfiguration + *out = new(RevocationConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UsageMode != nil { + in, out := &in.UsageMode, &out.UsageMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAuthorityParameters. +func (in *CertificateAuthorityParameters) DeepCopy() *CertificateAuthorityParameters { + if in == nil { + return nil + } + out := new(CertificateAuthorityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAuthoritySpec) DeepCopyInto(out *CertificateAuthoritySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAuthoritySpec. +func (in *CertificateAuthoritySpec) DeepCopy() *CertificateAuthoritySpec { + if in == nil { + return nil + } + out := new(CertificateAuthoritySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAuthorityStatus) DeepCopyInto(out *CertificateAuthorityStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAuthorityStatus. +func (in *CertificateAuthorityStatus) DeepCopy() *CertificateAuthorityStatus { + if in == nil { + return nil + } + out := new(CertificateAuthorityStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateInitParameters) DeepCopyInto(out *CertificateInitParameters) { + *out = *in + if in.APIPassthrough != nil { + in, out := &in.APIPassthrough, &out.APIPassthrough + *out = new(string) + **out = **in + } + if in.CertificateAuthorityArn != nil { + in, out := &in.CertificateAuthorityArn, &out.CertificateAuthorityArn + *out = new(string) + **out = **in + } + if in.CertificateAuthorityArnRef != nil { + in, out := &in.CertificateAuthorityArnRef, &out.CertificateAuthorityArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CertificateAuthorityArnSelector != nil { + in, out := &in.CertificateAuthorityArnSelector, &out.CertificateAuthorityArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + out.CertificateSigningRequestSecretRef = in.CertificateSigningRequestSecretRef + if in.SigningAlgorithm != nil { + in, out := &in.SigningAlgorithm, &out.SigningAlgorithm + *out = new(string) + **out = **in + } + if in.TemplateArn != nil { + in, out := &in.TemplateArn, &out.TemplateArn + *out = new(string) + **out = **in + } + if in.Validity != nil { + in, out := &in.Validity, &out.Validity + *out = new(ValidityInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateInitParameters. +func (in *CertificateInitParameters) DeepCopy() *CertificateInitParameters { + if in == nil { + return nil + } + out := new(CertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateList) DeepCopyInto(out *CertificateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Certificate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateList. +func (in *CertificateList) DeepCopy() *CertificateList { + if in == nil { + return nil + } + out := new(CertificateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateObservation) DeepCopyInto(out *CertificateObservation) { + *out = *in + if in.APIPassthrough != nil { + in, out := &in.APIPassthrough, &out.APIPassthrough + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(string) + **out = **in + } + if in.CertificateAuthorityArn != nil { + in, out := &in.CertificateAuthorityArn, &out.CertificateAuthorityArn + *out = new(string) + **out = **in + } + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.SigningAlgorithm != nil { + in, out := &in.SigningAlgorithm, &out.SigningAlgorithm + *out = new(string) + **out = **in + } + if in.TemplateArn != nil { + in, out := &in.TemplateArn, &out.TemplateArn + *out = new(string) + **out = **in + } + if in.Validity != nil { + in, out := &in.Validity, &out.Validity + *out = new(ValidityObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateObservation. +func (in *CertificateObservation) DeepCopy() *CertificateObservation { + if in == nil { + return nil + } + out := new(CertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateParameters) DeepCopyInto(out *CertificateParameters) { + *out = *in + if in.APIPassthrough != nil { + in, out := &in.APIPassthrough, &out.APIPassthrough + *out = new(string) + **out = **in + } + if in.CertificateAuthorityArn != nil { + in, out := &in.CertificateAuthorityArn, &out.CertificateAuthorityArn + *out = new(string) + **out = **in + } + if in.CertificateAuthorityArnRef != nil { + in, out := &in.CertificateAuthorityArnRef, &out.CertificateAuthorityArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CertificateAuthorityArnSelector != nil { + in, out := &in.CertificateAuthorityArnSelector, &out.CertificateAuthorityArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + out.CertificateSigningRequestSecretRef = in.CertificateSigningRequestSecretRef + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SigningAlgorithm != nil { + in, out := &in.SigningAlgorithm, &out.SigningAlgorithm + *out = new(string) + **out = **in + } + if in.TemplateArn != nil { + in, out := &in.TemplateArn, &out.TemplateArn + *out = new(string) + **out = **in + } + if in.Validity != nil { + in, out := &in.Validity, &out.Validity + *out = new(ValidityParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateParameters. +func (in *CertificateParameters) DeepCopy() *CertificateParameters { + if in == nil { + return nil + } + out := new(CertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSpec) DeepCopyInto(out *CertificateSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSpec. +func (in *CertificateSpec) DeepCopy() *CertificateSpec { + if in == nil { + return nil + } + out := new(CertificateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateStatus) DeepCopyInto(out *CertificateStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateStatus. +func (in *CertificateStatus) DeepCopy() *CertificateStatus { + if in == nil { + return nil + } + out := new(CertificateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrlConfigurationInitParameters) DeepCopyInto(out *CrlConfigurationInitParameters) { + *out = *in + if in.CustomCname != nil { + in, out := &in.CustomCname, &out.CustomCname + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ExpirationInDays != nil { + in, out := &in.ExpirationInDays, &out.ExpirationInDays + *out = new(float64) + **out = **in + } + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3ObjectACL != nil { + in, out := &in.S3ObjectACL, &out.S3ObjectACL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrlConfigurationInitParameters. +func (in *CrlConfigurationInitParameters) DeepCopy() *CrlConfigurationInitParameters { + if in == nil { + return nil + } + out := new(CrlConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrlConfigurationObservation) DeepCopyInto(out *CrlConfigurationObservation) { + *out = *in + if in.CustomCname != nil { + in, out := &in.CustomCname, &out.CustomCname + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ExpirationInDays != nil { + in, out := &in.ExpirationInDays, &out.ExpirationInDays + *out = new(float64) + **out = **in + } + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3ObjectACL != nil { + in, out := &in.S3ObjectACL, &out.S3ObjectACL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrlConfigurationObservation. +func (in *CrlConfigurationObservation) DeepCopy() *CrlConfigurationObservation { + if in == nil { + return nil + } + out := new(CrlConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrlConfigurationParameters) DeepCopyInto(out *CrlConfigurationParameters) { + *out = *in + if in.CustomCname != nil { + in, out := &in.CustomCname, &out.CustomCname + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ExpirationInDays != nil { + in, out := &in.ExpirationInDays, &out.ExpirationInDays + *out = new(float64) + **out = **in + } + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3ObjectACL != nil { + in, out := &in.S3ObjectACL, &out.S3ObjectACL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrlConfigurationParameters. +func (in *CrlConfigurationParameters) DeepCopy() *CrlConfigurationParameters { + if in == nil { + return nil + } + out := new(CrlConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OcspConfigurationInitParameters) DeepCopyInto(out *OcspConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.OcspCustomCname != nil { + in, out := &in.OcspCustomCname, &out.OcspCustomCname + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OcspConfigurationInitParameters. +func (in *OcspConfigurationInitParameters) DeepCopy() *OcspConfigurationInitParameters { + if in == nil { + return nil + } + out := new(OcspConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OcspConfigurationObservation) DeepCopyInto(out *OcspConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.OcspCustomCname != nil { + in, out := &in.OcspCustomCname, &out.OcspCustomCname + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OcspConfigurationObservation. +func (in *OcspConfigurationObservation) DeepCopy() *OcspConfigurationObservation { + if in == nil { + return nil + } + out := new(OcspConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OcspConfigurationParameters) DeepCopyInto(out *OcspConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.OcspCustomCname != nil { + in, out := &in.OcspCustomCname, &out.OcspCustomCname + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OcspConfigurationParameters. +func (in *OcspConfigurationParameters) DeepCopy() *OcspConfigurationParameters { + if in == nil { + return nil + } + out := new(OcspConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RevocationConfigurationInitParameters) DeepCopyInto(out *RevocationConfigurationInitParameters) { + *out = *in + if in.CrlConfiguration != nil { + in, out := &in.CrlConfiguration, &out.CrlConfiguration + *out = new(CrlConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OcspConfiguration != nil { + in, out := &in.OcspConfiguration, &out.OcspConfiguration + *out = new(OcspConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RevocationConfigurationInitParameters. +func (in *RevocationConfigurationInitParameters) DeepCopy() *RevocationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RevocationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RevocationConfigurationObservation) DeepCopyInto(out *RevocationConfigurationObservation) { + *out = *in + if in.CrlConfiguration != nil { + in, out := &in.CrlConfiguration, &out.CrlConfiguration + *out = new(CrlConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.OcspConfiguration != nil { + in, out := &in.OcspConfiguration, &out.OcspConfiguration + *out = new(OcspConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RevocationConfigurationObservation. +func (in *RevocationConfigurationObservation) DeepCopy() *RevocationConfigurationObservation { + if in == nil { + return nil + } + out := new(RevocationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RevocationConfigurationParameters) DeepCopyInto(out *RevocationConfigurationParameters) { + *out = *in + if in.CrlConfiguration != nil { + in, out := &in.CrlConfiguration, &out.CrlConfiguration + *out = new(CrlConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.OcspConfiguration != nil { + in, out := &in.OcspConfiguration, &out.OcspConfiguration + *out = new(OcspConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RevocationConfigurationParameters. +func (in *RevocationConfigurationParameters) DeepCopy() *RevocationConfigurationParameters { + if in == nil { + return nil + } + out := new(RevocationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectInitParameters) DeepCopyInto(out *SubjectInitParameters) { + *out = *in + if in.CommonName != nil { + in, out := &in.CommonName, &out.CommonName + *out = new(string) + **out = **in + } + if in.Country != nil { + in, out := &in.Country, &out.Country + *out = new(string) + **out = **in + } + if in.DistinguishedNameQualifier != nil { + in, out := &in.DistinguishedNameQualifier, &out.DistinguishedNameQualifier + *out = new(string) + **out = **in + } + if in.GenerationQualifier != nil { + in, out := &in.GenerationQualifier, &out.GenerationQualifier + *out = new(string) + **out = **in + } + if in.GivenName != nil { + in, out := &in.GivenName, &out.GivenName + *out = new(string) + **out = **in + } + if in.Initials != nil { + in, out := &in.Initials, &out.Initials + *out = new(string) + **out = **in + } + if in.Locality != nil { + in, out := &in.Locality, &out.Locality + *out = new(string) + **out = **in + } + if in.Organization != nil { + in, out := &in.Organization, &out.Organization + *out = new(string) + **out = **in + } + if in.OrganizationalUnit != nil { + in, out := &in.OrganizationalUnit, &out.OrganizationalUnit + *out = new(string) + **out = **in + } + if in.Pseudonym != nil { + in, out := &in.Pseudonym, &out.Pseudonym + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Surname != nil { + in, out := &in.Surname, &out.Surname + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectInitParameters. +func (in *SubjectInitParameters) DeepCopy() *SubjectInitParameters { + if in == nil { + return nil + } + out := new(SubjectInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectObservation) DeepCopyInto(out *SubjectObservation) { + *out = *in + if in.CommonName != nil { + in, out := &in.CommonName, &out.CommonName + *out = new(string) + **out = **in + } + if in.Country != nil { + in, out := &in.Country, &out.Country + *out = new(string) + **out = **in + } + if in.DistinguishedNameQualifier != nil { + in, out := &in.DistinguishedNameQualifier, &out.DistinguishedNameQualifier + *out = new(string) + **out = **in + } + if in.GenerationQualifier != nil { + in, out := &in.GenerationQualifier, &out.GenerationQualifier + *out = new(string) + **out = **in + } + if in.GivenName != nil { + in, out := &in.GivenName, &out.GivenName + *out = new(string) + **out = **in + } + if in.Initials != nil { + in, out := &in.Initials, &out.Initials + *out = new(string) + **out = **in + } + if in.Locality != nil { + in, out := &in.Locality, &out.Locality + *out = new(string) + **out = **in + } + if in.Organization != nil { + in, out := &in.Organization, &out.Organization + *out = new(string) + **out = **in + } + if in.OrganizationalUnit != nil { + in, out := &in.OrganizationalUnit, &out.OrganizationalUnit + *out = new(string) + **out = **in + } + if in.Pseudonym != nil { + in, out := &in.Pseudonym, &out.Pseudonym + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Surname != nil { + in, out := &in.Surname, &out.Surname + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectObservation. +func (in *SubjectObservation) DeepCopy() *SubjectObservation { + if in == nil { + return nil + } + out := new(SubjectObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectParameters) DeepCopyInto(out *SubjectParameters) { + *out = *in + if in.CommonName != nil { + in, out := &in.CommonName, &out.CommonName + *out = new(string) + **out = **in + } + if in.Country != nil { + in, out := &in.Country, &out.Country + *out = new(string) + **out = **in + } + if in.DistinguishedNameQualifier != nil { + in, out := &in.DistinguishedNameQualifier, &out.DistinguishedNameQualifier + *out = new(string) + **out = **in + } + if in.GenerationQualifier != nil { + in, out := &in.GenerationQualifier, &out.GenerationQualifier + *out = new(string) + **out = **in + } + if in.GivenName != nil { + in, out := &in.GivenName, &out.GivenName + *out = new(string) + **out = **in + } + if in.Initials != nil { + in, out := &in.Initials, &out.Initials + *out = new(string) + **out = **in + } + if in.Locality != nil { + in, out := &in.Locality, &out.Locality + *out = new(string) + **out = **in + } + if in.Organization != nil { + in, out := &in.Organization, &out.Organization + *out = new(string) + **out = **in + } + if in.OrganizationalUnit != nil { + in, out := &in.OrganizationalUnit, &out.OrganizationalUnit + *out = new(string) + **out = **in + } + if in.Pseudonym != nil { + in, out := &in.Pseudonym, &out.Pseudonym + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Surname != nil { + in, out := &in.Surname, &out.Surname + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectParameters. +func (in *SubjectParameters) DeepCopy() *SubjectParameters { + if in == nil { + return nil + } + out := new(SubjectParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidityInitParameters) DeepCopyInto(out *ValidityInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidityInitParameters. +func (in *ValidityInitParameters) DeepCopy() *ValidityInitParameters { + if in == nil { + return nil + } + out := new(ValidityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidityObservation) DeepCopyInto(out *ValidityObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidityObservation. +func (in *ValidityObservation) DeepCopy() *ValidityObservation { + if in == nil { + return nil + } + out := new(ValidityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidityParameters) DeepCopyInto(out *ValidityParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidityParameters. +func (in *ValidityParameters) DeepCopy() *ValidityParameters { + if in == nil { + return nil + } + out := new(ValidityParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/acmpca/v1beta2/zz_generated.managed.go b/apis/acmpca/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..01834e4c99 --- /dev/null +++ b/apis/acmpca/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Certificate. +func (mg *Certificate) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Certificate. +func (mg *Certificate) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Certificate. +func (mg *Certificate) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Certificate. +func (mg *Certificate) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Certificate. +func (mg *Certificate) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Certificate. +func (mg *Certificate) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Certificate. +func (mg *Certificate) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Certificate. +func (mg *Certificate) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Certificate. +func (mg *Certificate) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Certificate. +func (mg *Certificate) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Certificate. +func (mg *Certificate) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Certificate. +func (mg *Certificate) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this CertificateAuthority. +func (mg *CertificateAuthority) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CertificateAuthority. +func (mg *CertificateAuthority) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CertificateAuthority. +func (mg *CertificateAuthority) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CertificateAuthority. +func (mg *CertificateAuthority) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CertificateAuthority. +func (mg *CertificateAuthority) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CertificateAuthority. +func (mg *CertificateAuthority) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CertificateAuthority. +func (mg *CertificateAuthority) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CertificateAuthority. +func (mg *CertificateAuthority) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CertificateAuthority. +func (mg *CertificateAuthority) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CertificateAuthority. +func (mg *CertificateAuthority) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CertificateAuthority. +func (mg *CertificateAuthority) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CertificateAuthority. +func (mg *CertificateAuthority) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/acmpca/v1beta2/zz_generated.managedlist.go b/apis/acmpca/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..e082bda9b4 --- /dev/null +++ b/apis/acmpca/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this CertificateAuthorityList. +func (l *CertificateAuthorityList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this CertificateList. +func (l *CertificateList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/acmpca/v1beta2/zz_generated.resolvers.go b/apis/acmpca/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..040f874867 --- /dev/null +++ b/apis/acmpca/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,66 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + errors "github.com/pkg/errors" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Certificate) ResolveReferences( // ResolveReferences of this Certificate. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta2", "CertificateAuthority", "CertificateAuthorityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CertificateAuthorityArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.CertificateAuthorityArnRef, + Selector: mg.Spec.ForProvider.CertificateAuthorityArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CertificateAuthorityArn") + } + mg.Spec.ForProvider.CertificateAuthorityArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CertificateAuthorityArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta2", "CertificateAuthority", "CertificateAuthorityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CertificateAuthorityArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.CertificateAuthorityArnRef, + Selector: mg.Spec.InitProvider.CertificateAuthorityArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CertificateAuthorityArn") + } + mg.Spec.InitProvider.CertificateAuthorityArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CertificateAuthorityArnRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/acmpca/v1beta2/zz_groupversion_info.go b/apis/acmpca/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..dda79eb2a0 --- /dev/null +++ b/apis/acmpca/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=acmpca.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "acmpca.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/amp/v1beta1/zz_alertmanagerdefinition_types.go b/apis/amp/v1beta1/zz_alertmanagerdefinition_types.go index cd86ad9948..a37dfecda5 100755 --- a/apis/amp/v1beta1/zz_alertmanagerdefinition_types.go +++ b/apis/amp/v1beta1/zz_alertmanagerdefinition_types.go @@ -19,7 +19,7 @@ type AlertManagerDefinitionInitParameters struct { Definition *string `json:"definition,omitempty" tf:"definition,omitempty"` // ID of the prometheus workspace the alert manager definition should be linked to - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/amp/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/amp/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` @@ -55,7 +55,7 @@ type AlertManagerDefinitionParameters struct { Region *string `json:"region" tf:"-"` // ID of the prometheus workspace the alert manager definition should be linked to - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/amp/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/amp/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` diff --git a/apis/amp/v1beta1/zz_generated.conversion_hubs.go b/apis/amp/v1beta1/zz_generated.conversion_hubs.go index b6b0e66711..5f7caafaef 100755 --- a/apis/amp/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/amp/v1beta1/zz_generated.conversion_hubs.go @@ -11,6 +11,3 @@ func (tr *AlertManagerDefinition) Hub() {} // Hub marks this type as a conversion hub. func (tr *RuleGroupNamespace) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Workspace) Hub() {} diff --git a/apis/amp/v1beta1/zz_generated.conversion_spokes.go b/apis/amp/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..508736b23d --- /dev/null +++ b/apis/amp/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Workspace to the hub type. +func (tr *Workspace) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Workspace type. +func (tr *Workspace) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/amp/v1beta1/zz_generated.resolvers.go b/apis/amp/v1beta1/zz_generated.resolvers.go index b3393bda0a..14235351c1 100644 --- a/apis/amp/v1beta1/zz_generated.resolvers.go +++ b/apis/amp/v1beta1/zz_generated.resolvers.go @@ -26,7 +26,7 @@ func (mg *AlertManagerDefinition) ResolveReferences( // ResolveReferences of thi var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("amp.aws.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("amp.aws.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -45,7 +45,7 @@ func (mg *AlertManagerDefinition) ResolveReferences( // ResolveReferences of thi mg.Spec.ForProvider.WorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.WorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("amp.aws.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("amp.aws.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -76,7 +76,7 @@ func (mg *RuleGroupNamespace) ResolveReferences(ctx context.Context, c client.Re var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("amp.aws.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("amp.aws.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/amp/v1beta1/zz_rulegroupnamespace_types.go b/apis/amp/v1beta1/zz_rulegroupnamespace_types.go index 2d9ed2a577..1591518048 100755 --- a/apis/amp/v1beta1/zz_rulegroupnamespace_types.go +++ b/apis/amp/v1beta1/zz_rulegroupnamespace_types.go @@ -42,7 +42,7 @@ type RuleGroupNamespaceParameters struct { Region *string `json:"region" tf:"-"` // ID of the prometheus workspace the rule group namespace should be linked to - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/amp/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/amp/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` diff --git a/apis/amp/v1beta2/zz_generated.conversion_hubs.go b/apis/amp/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..60d422318a --- /dev/null +++ b/apis/amp/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Workspace) Hub() {} diff --git a/apis/amp/v1beta2/zz_generated.deepcopy.go b/apis/amp/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..bcde63d743 --- /dev/null +++ b/apis/amp/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,362 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationInitParameters) DeepCopyInto(out *LoggingConfigurationInitParameters) { + *out = *in + if in.LogGroupArn != nil { + in, out := &in.LogGroupArn, &out.LogGroupArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationInitParameters. +func (in *LoggingConfigurationInitParameters) DeepCopy() *LoggingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(LoggingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationObservation) DeepCopyInto(out *LoggingConfigurationObservation) { + *out = *in + if in.LogGroupArn != nil { + in, out := &in.LogGroupArn, &out.LogGroupArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationObservation. +func (in *LoggingConfigurationObservation) DeepCopy() *LoggingConfigurationObservation { + if in == nil { + return nil + } + out := new(LoggingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationParameters) DeepCopyInto(out *LoggingConfigurationParameters) { + *out = *in + if in.LogGroupArn != nil { + in, out := &in.LogGroupArn, &out.LogGroupArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationParameters. +func (in *LoggingConfigurationParameters) DeepCopy() *LoggingConfigurationParameters { + if in == nil { + return nil + } + out := new(LoggingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workspace) DeepCopyInto(out *Workspace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workspace. +func (in *Workspace) DeepCopy() *Workspace { + if in == nil { + return nil + } + out := new(Workspace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Workspace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceInitParameters) DeepCopyInto(out *WorkspaceInitParameters) { + *out = *in + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = new(LoggingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceInitParameters. +func (in *WorkspaceInitParameters) DeepCopy() *WorkspaceInitParameters { + if in == nil { + return nil + } + out := new(WorkspaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceList) DeepCopyInto(out *WorkspaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Workspace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceList. +func (in *WorkspaceList) DeepCopy() *WorkspaceList { + if in == nil { + return nil + } + out := new(WorkspaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkspaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceObservation) DeepCopyInto(out *WorkspaceObservation) { + *out = *in + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = new(LoggingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.PrometheusEndpoint != nil { + in, out := &in.PrometheusEndpoint, &out.PrometheusEndpoint + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceObservation. +func (in *WorkspaceObservation) DeepCopy() *WorkspaceObservation { + if in == nil { + return nil + } + out := new(WorkspaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceParameters) DeepCopyInto(out *WorkspaceParameters) { + *out = *in + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = new(LoggingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceParameters. +func (in *WorkspaceParameters) DeepCopy() *WorkspaceParameters { + if in == nil { + return nil + } + out := new(WorkspaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceSpec) DeepCopyInto(out *WorkspaceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceSpec. +func (in *WorkspaceSpec) DeepCopy() *WorkspaceSpec { + if in == nil { + return nil + } + out := new(WorkspaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceStatus) DeepCopyInto(out *WorkspaceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceStatus. +func (in *WorkspaceStatus) DeepCopy() *WorkspaceStatus { + if in == nil { + return nil + } + out := new(WorkspaceStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/amp/v1beta2/zz_generated.managed.go b/apis/amp/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..562d4553d0 --- /dev/null +++ b/apis/amp/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Workspace. +func (mg *Workspace) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Workspace. +func (mg *Workspace) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Workspace. +func (mg *Workspace) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Workspace. +func (mg *Workspace) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Workspace. +func (mg *Workspace) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Workspace. +func (mg *Workspace) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Workspace. +func (mg *Workspace) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Workspace. +func (mg *Workspace) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Workspace. +func (mg *Workspace) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Workspace. +func (mg *Workspace) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Workspace. +func (mg *Workspace) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Workspace. +func (mg *Workspace) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/amp/v1beta2/zz_generated.managedlist.go b/apis/amp/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..d32cca0e44 --- /dev/null +++ b/apis/amp/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this WorkspaceList. +func (l *WorkspaceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/amp/v1beta2/zz_generated.resolvers.go b/apis/amp/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..6939c025fd --- /dev/null +++ b/apis/amp/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Workspace) ResolveReferences( // ResolveReferences of this Workspace. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSKeyArnRef, + Selector: mg.Spec.ForProvider.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyArn") + } + mg.Spec.ForProvider.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSKeyArnRef, + Selector: mg.Spec.InitProvider.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyArn") + } + mg.Spec.InitProvider.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyArnRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/amp/v1beta2/zz_groupversion_info.go b/apis/amp/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..eb7f41b34a --- /dev/null +++ b/apis/amp/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=amp.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "amp.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/amp/v1beta2/zz_workspace_terraformed.go b/apis/amp/v1beta2/zz_workspace_terraformed.go new file mode 100755 index 0000000000..c97979de4a --- /dev/null +++ b/apis/amp/v1beta2/zz_workspace_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Workspace +func (mg *Workspace) GetTerraformResourceType() string { + return "aws_prometheus_workspace" +} + +// GetConnectionDetailsMapping for this Workspace +func (tr *Workspace) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Workspace +func (tr *Workspace) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Workspace +func (tr *Workspace) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Workspace +func (tr *Workspace) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Workspace +func (tr *Workspace) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Workspace +func (tr *Workspace) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Workspace +func (tr *Workspace) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Workspace +func (tr *Workspace) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Workspace using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Workspace) LateInitialize(attrs []byte) (bool, error) { + params := &WorkspaceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Workspace) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/amp/v1beta2/zz_workspace_types.go b/apis/amp/v1beta2/zz_workspace_types.go new file mode 100755 index 0000000000..3be38dbd4c --- /dev/null +++ b/apis/amp/v1beta2/zz_workspace_types.go @@ -0,0 +1,181 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type LoggingConfigurationInitParameters struct { + + // The ARN of the CloudWatch log group to which the vended log data will be published. This log group must exist. + LogGroupArn *string `json:"logGroupArn,omitempty" tf:"log_group_arn,omitempty"` +} + +type LoggingConfigurationObservation struct { + + // The ARN of the CloudWatch log group to which the vended log data will be published. This log group must exist. + LogGroupArn *string `json:"logGroupArn,omitempty" tf:"log_group_arn,omitempty"` +} + +type LoggingConfigurationParameters struct { + + // The ARN of the CloudWatch log group to which the vended log data will be published. This log group must exist. + // +kubebuilder:validation:Optional + LogGroupArn *string `json:"logGroupArn" tf:"log_group_arn,omitempty"` +} + +type WorkspaceInitParameters struct { + + // The alias of the prometheus workspace. See more in AWS Docs. + Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` + + // The ARN for the KMS encryption key. If this argument is not provided, then the AWS owned encryption key will be used to encrypt the data in the workspace. See more in AWS Docs + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` + + // Logging configuration for the workspace. See Logging Configuration below for details. + LoggingConfiguration *LoggingConfigurationInitParameters `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type WorkspaceObservation struct { + + // The alias of the prometheus workspace. See more in AWS Docs. + Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` + + // Amazon Resource Name (ARN) of the workspace. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Identifier of the workspace + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The ARN for the KMS encryption key. If this argument is not provided, then the AWS owned encryption key will be used to encrypt the data in the workspace. See more in AWS Docs + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Logging configuration for the workspace. See Logging Configuration below for details. + LoggingConfiguration *LoggingConfigurationObservation `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` + + // Prometheus endpoint available for this workspace. + PrometheusEndpoint *string `json:"prometheusEndpoint,omitempty" tf:"prometheus_endpoint,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type WorkspaceParameters struct { + + // The alias of the prometheus workspace. See more in AWS Docs. + // +kubebuilder:validation:Optional + Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` + + // The ARN for the KMS encryption key. If this argument is not provided, then the AWS owned encryption key will be used to encrypt the data in the workspace. See more in AWS Docs + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` + + // Logging configuration for the workspace. See Logging Configuration below for details. + // +kubebuilder:validation:Optional + LoggingConfiguration *LoggingConfigurationParameters `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// WorkspaceSpec defines the desired state of Workspace +type WorkspaceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WorkspaceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WorkspaceInitParameters `json:"initProvider,omitempty"` +} + +// WorkspaceStatus defines the observed state of Workspace. +type WorkspaceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WorkspaceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Workspace is the Schema for the Workspaces API. Manages an Amazon Managed Service for Prometheus (AMP) Workspace +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Workspace struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec WorkspaceSpec `json:"spec"` + Status WorkspaceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WorkspaceList contains a list of Workspaces +type WorkspaceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Workspace `json:"items"` +} + +// Repository type metadata. +var ( + Workspace_Kind = "Workspace" + Workspace_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Workspace_Kind}.String() + Workspace_KindAPIVersion = Workspace_Kind + "." + CRDGroupVersion.String() + Workspace_GroupVersionKind = CRDGroupVersion.WithKind(Workspace_Kind) +) + +func init() { + SchemeBuilder.Register(&Workspace{}, &WorkspaceList{}) +} diff --git a/apis/amplify/v1beta1/zz_backendenvironment_types.go b/apis/amplify/v1beta1/zz_backendenvironment_types.go index 644c99a36c..1719a06a0f 100755 --- a/apis/amplify/v1beta1/zz_backendenvironment_types.go +++ b/apis/amplify/v1beta1/zz_backendenvironment_types.go @@ -43,7 +43,7 @@ type BackendEnvironmentObservation struct { type BackendEnvironmentParameters struct { // Unique ID for an Amplify app. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/amplify/v1beta1.App + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/amplify/v1beta2.App // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` diff --git a/apis/amplify/v1beta1/zz_branch_types.go b/apis/amplify/v1beta1/zz_branch_types.go index 5e8abfdf91..e6b7e61bad 100755 --- a/apis/amplify/v1beta1/zz_branch_types.go +++ b/apis/amplify/v1beta1/zz_branch_types.go @@ -137,7 +137,7 @@ type BranchObservation struct { type BranchParameters struct { // Unique ID for an Amplify app. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/amplify/v1beta1.App + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/amplify/v1beta2.App // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` diff --git a/apis/amplify/v1beta1/zz_generated.conversion_hubs.go b/apis/amplify/v1beta1/zz_generated.conversion_hubs.go index 8c4547bef7..c60242b92d 100755 --- a/apis/amplify/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/amplify/v1beta1/zz_generated.conversion_hubs.go @@ -6,9 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *App) Hub() {} - // Hub marks this type as a conversion hub. func (tr *BackendEnvironment) Hub() {} diff --git a/apis/amplify/v1beta1/zz_generated.conversion_spokes.go b/apis/amplify/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..4e65f01a2d --- /dev/null +++ b/apis/amplify/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this App to the hub type. +func (tr *App) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the App type. +func (tr *App) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/amplify/v1beta1/zz_generated.resolvers.go b/apis/amplify/v1beta1/zz_generated.resolvers.go index 4ff7201987..bd9bbd920a 100644 --- a/apis/amplify/v1beta1/zz_generated.resolvers.go +++ b/apis/amplify/v1beta1/zz_generated.resolvers.go @@ -78,7 +78,7 @@ func (mg *BackendEnvironment) ResolveReferences(ctx context.Context, c client.Re var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("amplify.aws.upbound.io", "v1beta1", "App", "AppList") + m, l, err = apisresolver.GetManagedResource("amplify.aws.upbound.io", "v1beta2", "App", "AppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -109,7 +109,7 @@ func (mg *Branch) ResolveReferences(ctx context.Context, c client.Reader) error var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("amplify.aws.upbound.io", "v1beta1", "App", "AppList") + m, l, err = apisresolver.GetManagedResource("amplify.aws.upbound.io", "v1beta2", "App", "AppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -140,7 +140,7 @@ func (mg *Webhook) ResolveReferences(ctx context.Context, c client.Reader) error var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("amplify.aws.upbound.io", "v1beta1", "App", "AppList") + m, l, err = apisresolver.GetManagedResource("amplify.aws.upbound.io", "v1beta2", "App", "AppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -178,7 +178,7 @@ func (mg *Webhook) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.ForProvider.BranchName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.BranchNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("amplify.aws.upbound.io", "v1beta1", "App", "AppList") + m, l, err = apisresolver.GetManagedResource("amplify.aws.upbound.io", "v1beta2", "App", "AppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/amplify/v1beta1/zz_webhook_types.go b/apis/amplify/v1beta1/zz_webhook_types.go index 77b2b14f95..66782c38d1 100755 --- a/apis/amplify/v1beta1/zz_webhook_types.go +++ b/apis/amplify/v1beta1/zz_webhook_types.go @@ -16,7 +16,7 @@ import ( type WebhookInitParameters struct { // Unique ID for an Amplify app. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/amplify/v1beta1.App + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/amplify/v1beta2.App // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` @@ -67,7 +67,7 @@ type WebhookObservation struct { type WebhookParameters struct { // Unique ID for an Amplify app. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/amplify/v1beta1.App + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/amplify/v1beta2.App // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` diff --git a/apis/amplify/v1beta2/zz_app_terraformed.go b/apis/amplify/v1beta2/zz_app_terraformed.go new file mode 100755 index 0000000000..ae2e786e10 --- /dev/null +++ b/apis/amplify/v1beta2/zz_app_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this App +func (mg *App) GetTerraformResourceType() string { + return "aws_amplify_app" +} + +// GetConnectionDetailsMapping for this App +func (tr *App) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"access_token": "accessTokenSecretRef", "auto_branch_creation_config[*].basic_auth_credentials": "autoBranchCreationConfig[*].basicAuthCredentialsSecretRef", "basic_auth_credentials": "basicAuthCredentialsSecretRef", "oauth_token": "oauthTokenSecretRef"} +} + +// GetObservation of this App +func (tr *App) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this App +func (tr *App) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this App +func (tr *App) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this App +func (tr *App) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this App +func (tr *App) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this App +func (tr *App) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this App +func (tr *App) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this App using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *App) LateInitialize(attrs []byte) (bool, error) { + params := &AppParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *App) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/amplify/v1beta2/zz_app_types.go b/apis/amplify/v1beta2/zz_app_types.go new file mode 100755 index 0000000000..c5ef37ba43 --- /dev/null +++ b/apis/amplify/v1beta2/zz_app_types.go @@ -0,0 +1,494 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AppInitParameters struct { + + // Personal access token for a third-party source control system for an Amplify app. This token must have write access to the relevant repo to create a webhook and a read-only deploy key for the Amplify project. The token is not stored, so after applying this attribute can be removed and the setup token deleted. + AccessTokenSecretRef *v1.SecretKeySelector `json:"accessTokenSecretRef,omitempty" tf:"-"` + + // Automated branch creation configuration for an Amplify app. An auto_branch_creation_config block is documented below. + AutoBranchCreationConfig *AutoBranchCreationConfigInitParameters `json:"autoBranchCreationConfig,omitempty" tf:"auto_branch_creation_config,omitempty"` + + // Automated branch creation glob patterns for an Amplify app. + // +listType=set + AutoBranchCreationPatterns []*string `json:"autoBranchCreationPatterns,omitempty" tf:"auto_branch_creation_patterns,omitempty"` + + // Credentials for basic authorization for an Amplify app. + BasicAuthCredentialsSecretRef *v1.SecretKeySelector `json:"basicAuthCredentialsSecretRef,omitempty" tf:"-"` + + // The build specification (build spec) for an Amplify app. + BuildSpec *string `json:"buildSpec,omitempty" tf:"build_spec,omitempty"` + + // The custom HTTP headers for an Amplify app. + CustomHeaders *string `json:"customHeaders,omitempty" tf:"custom_headers,omitempty"` + + // Custom rewrite and redirect rules for an Amplify app. A custom_rule block is documented below. + CustomRule []CustomRuleInitParameters `json:"customRule,omitempty" tf:"custom_rule,omitempty"` + + // Description for an Amplify app. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Enables automated branch creation for an Amplify app. + EnableAutoBranchCreation *bool `json:"enableAutoBranchCreation,omitempty" tf:"enable_auto_branch_creation,omitempty"` + + // Enables basic authorization for an Amplify app. This will apply to all branches that are part of this app. + EnableBasicAuth *bool `json:"enableBasicAuth,omitempty" tf:"enable_basic_auth,omitempty"` + + // Enables auto-building of branches for the Amplify App. + EnableBranchAutoBuild *bool `json:"enableBranchAutoBuild,omitempty" tf:"enable_branch_auto_build,omitempty"` + + // Automatically disconnects a branch in the Amplify Console when you delete a branch from your Git repository. + EnableBranchAutoDeletion *bool `json:"enableBranchAutoDeletion,omitempty" tf:"enable_branch_auto_deletion,omitempty"` + + // Environment variables map for an Amplify app. + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // AWS Identity and Access Management (IAM) service role for an Amplify app. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + IAMServiceRoleArn *string `json:"iamServiceRoleArn,omitempty" tf:"iam_service_role_arn,omitempty"` + + // Reference to a Role in iam to populate iamServiceRoleArn. + // +kubebuilder:validation:Optional + IAMServiceRoleArnRef *v1.Reference `json:"iamServiceRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate iamServiceRoleArn. + // +kubebuilder:validation:Optional + IAMServiceRoleArnSelector *v1.Selector `json:"iamServiceRoleArnSelector,omitempty" tf:"-"` + + // Name for an Amplify app. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // OAuth token for a third-party source control system for an Amplify app. The OAuth token is used to create a webhook and a read-only deploy key. The OAuth token is not stored. + OauthTokenSecretRef *v1.SecretKeySelector `json:"oauthTokenSecretRef,omitempty" tf:"-"` + + // Platform or framework for an Amplify app. Valid values: WEB, WEB_COMPUTE. Default value: WEB. + Platform *string `json:"platform,omitempty" tf:"platform,omitempty"` + + // Repository for an Amplify app. + Repository *string `json:"repository,omitempty" tf:"repository,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AppObservation struct { + + // ARN of the Amplify app. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Automated branch creation configuration for an Amplify app. An auto_branch_creation_config block is documented below. + AutoBranchCreationConfig *AutoBranchCreationConfigObservation `json:"autoBranchCreationConfig,omitempty" tf:"auto_branch_creation_config,omitempty"` + + // Automated branch creation glob patterns for an Amplify app. + // +listType=set + AutoBranchCreationPatterns []*string `json:"autoBranchCreationPatterns,omitempty" tf:"auto_branch_creation_patterns,omitempty"` + + // The build specification (build spec) for an Amplify app. + BuildSpec *string `json:"buildSpec,omitempty" tf:"build_spec,omitempty"` + + // The custom HTTP headers for an Amplify app. + CustomHeaders *string `json:"customHeaders,omitempty" tf:"custom_headers,omitempty"` + + // Custom rewrite and redirect rules for an Amplify app. A custom_rule block is documented below. + CustomRule []CustomRuleObservation `json:"customRule,omitempty" tf:"custom_rule,omitempty"` + + // Default domain for the Amplify app. + DefaultDomain *string `json:"defaultDomain,omitempty" tf:"default_domain,omitempty"` + + // Description for an Amplify app. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Enables automated branch creation for an Amplify app. + EnableAutoBranchCreation *bool `json:"enableAutoBranchCreation,omitempty" tf:"enable_auto_branch_creation,omitempty"` + + // Enables basic authorization for an Amplify app. This will apply to all branches that are part of this app. + EnableBasicAuth *bool `json:"enableBasicAuth,omitempty" tf:"enable_basic_auth,omitempty"` + + // Enables auto-building of branches for the Amplify App. + EnableBranchAutoBuild *bool `json:"enableBranchAutoBuild,omitempty" tf:"enable_branch_auto_build,omitempty"` + + // Automatically disconnects a branch in the Amplify Console when you delete a branch from your Git repository. + EnableBranchAutoDeletion *bool `json:"enableBranchAutoDeletion,omitempty" tf:"enable_branch_auto_deletion,omitempty"` + + // Environment variables map for an Amplify app. + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // AWS Identity and Access Management (IAM) service role for an Amplify app. + IAMServiceRoleArn *string `json:"iamServiceRoleArn,omitempty" tf:"iam_service_role_arn,omitempty"` + + // Unique ID of the Amplify app. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name for an Amplify app. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Platform or framework for an Amplify app. Valid values: WEB, WEB_COMPUTE. Default value: WEB. + Platform *string `json:"platform,omitempty" tf:"platform,omitempty"` + + // Describes the information about a production branch for an Amplify app. A production_branch block is documented below. + ProductionBranch []ProductionBranchObservation `json:"productionBranch,omitempty" tf:"production_branch,omitempty"` + + // Repository for an Amplify app. + Repository *string `json:"repository,omitempty" tf:"repository,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type AppParameters struct { + + // Personal access token for a third-party source control system for an Amplify app. This token must have write access to the relevant repo to create a webhook and a read-only deploy key for the Amplify project. The token is not stored, so after applying this attribute can be removed and the setup token deleted. + // +kubebuilder:validation:Optional + AccessTokenSecretRef *v1.SecretKeySelector `json:"accessTokenSecretRef,omitempty" tf:"-"` + + // Automated branch creation configuration for an Amplify app. An auto_branch_creation_config block is documented below. + // +kubebuilder:validation:Optional + AutoBranchCreationConfig *AutoBranchCreationConfigParameters `json:"autoBranchCreationConfig,omitempty" tf:"auto_branch_creation_config,omitempty"` + + // Automated branch creation glob patterns for an Amplify app. + // +kubebuilder:validation:Optional + // +listType=set + AutoBranchCreationPatterns []*string `json:"autoBranchCreationPatterns,omitempty" tf:"auto_branch_creation_patterns,omitempty"` + + // Credentials for basic authorization for an Amplify app. + // +kubebuilder:validation:Optional + BasicAuthCredentialsSecretRef *v1.SecretKeySelector `json:"basicAuthCredentialsSecretRef,omitempty" tf:"-"` + + // The build specification (build spec) for an Amplify app. + // +kubebuilder:validation:Optional + BuildSpec *string `json:"buildSpec,omitempty" tf:"build_spec,omitempty"` + + // The custom HTTP headers for an Amplify app. + // +kubebuilder:validation:Optional + CustomHeaders *string `json:"customHeaders,omitempty" tf:"custom_headers,omitempty"` + + // Custom rewrite and redirect rules for an Amplify app. A custom_rule block is documented below. + // +kubebuilder:validation:Optional + CustomRule []CustomRuleParameters `json:"customRule,omitempty" tf:"custom_rule,omitempty"` + + // Description for an Amplify app. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Enables automated branch creation for an Amplify app. + // +kubebuilder:validation:Optional + EnableAutoBranchCreation *bool `json:"enableAutoBranchCreation,omitempty" tf:"enable_auto_branch_creation,omitempty"` + + // Enables basic authorization for an Amplify app. This will apply to all branches that are part of this app. + // +kubebuilder:validation:Optional + EnableBasicAuth *bool `json:"enableBasicAuth,omitempty" tf:"enable_basic_auth,omitempty"` + + // Enables auto-building of branches for the Amplify App. + // +kubebuilder:validation:Optional + EnableBranchAutoBuild *bool `json:"enableBranchAutoBuild,omitempty" tf:"enable_branch_auto_build,omitempty"` + + // Automatically disconnects a branch in the Amplify Console when you delete a branch from your Git repository. + // +kubebuilder:validation:Optional + EnableBranchAutoDeletion *bool `json:"enableBranchAutoDeletion,omitempty" tf:"enable_branch_auto_deletion,omitempty"` + + // Environment variables map for an Amplify app. + // +kubebuilder:validation:Optional + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // AWS Identity and Access Management (IAM) service role for an Amplify app. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + IAMServiceRoleArn *string `json:"iamServiceRoleArn,omitempty" tf:"iam_service_role_arn,omitempty"` + + // Reference to a Role in iam to populate iamServiceRoleArn. + // +kubebuilder:validation:Optional + IAMServiceRoleArnRef *v1.Reference `json:"iamServiceRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate iamServiceRoleArn. + // +kubebuilder:validation:Optional + IAMServiceRoleArnSelector *v1.Selector `json:"iamServiceRoleArnSelector,omitempty" tf:"-"` + + // Name for an Amplify app. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // OAuth token for a third-party source control system for an Amplify app. The OAuth token is used to create a webhook and a read-only deploy key. The OAuth token is not stored. + // +kubebuilder:validation:Optional + OauthTokenSecretRef *v1.SecretKeySelector `json:"oauthTokenSecretRef,omitempty" tf:"-"` + + // Platform or framework for an Amplify app. Valid values: WEB, WEB_COMPUTE. Default value: WEB. + // +kubebuilder:validation:Optional + Platform *string `json:"platform,omitempty" tf:"platform,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Repository for an Amplify app. + // +kubebuilder:validation:Optional + Repository *string `json:"repository,omitempty" tf:"repository,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AutoBranchCreationConfigInitParameters struct { + + // Basic authorization credentials for the autocreated branch. + BasicAuthCredentialsSecretRef *v1.SecretKeySelector `json:"basicAuthCredentialsSecretRef,omitempty" tf:"-"` + + // Build specification (build spec) for the autocreated branch. + BuildSpec *string `json:"buildSpec,omitempty" tf:"build_spec,omitempty"` + + // Enables auto building for the autocreated branch. + EnableAutoBuild *bool `json:"enableAutoBuild,omitempty" tf:"enable_auto_build,omitempty"` + + // Enables basic authorization for the autocreated branch. + EnableBasicAuth *bool `json:"enableBasicAuth,omitempty" tf:"enable_basic_auth,omitempty"` + + // Enables performance mode for the branch. + EnablePerformanceMode *bool `json:"enablePerformanceMode,omitempty" tf:"enable_performance_mode,omitempty"` + + // Enables pull request previews for the autocreated branch. + EnablePullRequestPreview *bool `json:"enablePullRequestPreview,omitempty" tf:"enable_pull_request_preview,omitempty"` + + // Environment variables for the autocreated branch. + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // Framework for the autocreated branch. + Framework *string `json:"framework,omitempty" tf:"framework,omitempty"` + + // Amplify environment name for the pull request. + PullRequestEnvironmentName *string `json:"pullRequestEnvironmentName,omitempty" tf:"pull_request_environment_name,omitempty"` + + // Describes the current stage for the autocreated branch. Valid values: PRODUCTION, BETA, DEVELOPMENT, EXPERIMENTAL, PULL_REQUEST. + Stage *string `json:"stage,omitempty" tf:"stage,omitempty"` +} + +type AutoBranchCreationConfigObservation struct { + + // Build specification (build spec) for the autocreated branch. + BuildSpec *string `json:"buildSpec,omitempty" tf:"build_spec,omitempty"` + + // Enables auto building for the autocreated branch. + EnableAutoBuild *bool `json:"enableAutoBuild,omitempty" tf:"enable_auto_build,omitempty"` + + // Enables basic authorization for the autocreated branch. + EnableBasicAuth *bool `json:"enableBasicAuth,omitempty" tf:"enable_basic_auth,omitempty"` + + // Enables performance mode for the branch. + EnablePerformanceMode *bool `json:"enablePerformanceMode,omitempty" tf:"enable_performance_mode,omitempty"` + + // Enables pull request previews for the autocreated branch. + EnablePullRequestPreview *bool `json:"enablePullRequestPreview,omitempty" tf:"enable_pull_request_preview,omitempty"` + + // Environment variables for the autocreated branch. + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // Framework for the autocreated branch. + Framework *string `json:"framework,omitempty" tf:"framework,omitempty"` + + // Amplify environment name for the pull request. + PullRequestEnvironmentName *string `json:"pullRequestEnvironmentName,omitempty" tf:"pull_request_environment_name,omitempty"` + + // Describes the current stage for the autocreated branch. Valid values: PRODUCTION, BETA, DEVELOPMENT, EXPERIMENTAL, PULL_REQUEST. + Stage *string `json:"stage,omitempty" tf:"stage,omitempty"` +} + +type AutoBranchCreationConfigParameters struct { + + // Basic authorization credentials for the autocreated branch. + // +kubebuilder:validation:Optional + BasicAuthCredentialsSecretRef *v1.SecretKeySelector `json:"basicAuthCredentialsSecretRef,omitempty" tf:"-"` + + // Build specification (build spec) for the autocreated branch. + // +kubebuilder:validation:Optional + BuildSpec *string `json:"buildSpec,omitempty" tf:"build_spec,omitempty"` + + // Enables auto building for the autocreated branch. + // +kubebuilder:validation:Optional + EnableAutoBuild *bool `json:"enableAutoBuild,omitempty" tf:"enable_auto_build,omitempty"` + + // Enables basic authorization for the autocreated branch. + // +kubebuilder:validation:Optional + EnableBasicAuth *bool `json:"enableBasicAuth,omitempty" tf:"enable_basic_auth,omitempty"` + + // Enables performance mode for the branch. + // +kubebuilder:validation:Optional + EnablePerformanceMode *bool `json:"enablePerformanceMode,omitempty" tf:"enable_performance_mode,omitempty"` + + // Enables pull request previews for the autocreated branch. + // +kubebuilder:validation:Optional + EnablePullRequestPreview *bool `json:"enablePullRequestPreview,omitempty" tf:"enable_pull_request_preview,omitempty"` + + // Environment variables for the autocreated branch. + // +kubebuilder:validation:Optional + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // Framework for the autocreated branch. + // +kubebuilder:validation:Optional + Framework *string `json:"framework,omitempty" tf:"framework,omitempty"` + + // Amplify environment name for the pull request. + // +kubebuilder:validation:Optional + PullRequestEnvironmentName *string `json:"pullRequestEnvironmentName,omitempty" tf:"pull_request_environment_name,omitempty"` + + // Describes the current stage for the autocreated branch. Valid values: PRODUCTION, BETA, DEVELOPMENT, EXPERIMENTAL, PULL_REQUEST. + // +kubebuilder:validation:Optional + Stage *string `json:"stage,omitempty" tf:"stage,omitempty"` +} + +type CustomRuleInitParameters struct { + + // Condition for a URL rewrite or redirect rule, such as a country code. + Condition *string `json:"condition,omitempty" tf:"condition,omitempty"` + + // Source pattern for a URL rewrite or redirect rule. + Source *string `json:"source,omitempty" tf:"source,omitempty"` + + // Status code for a URL rewrite or redirect rule. Valid values: 200, 301, 302, 404, 404-200. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Target pattern for a URL rewrite or redirect rule. + Target *string `json:"target,omitempty" tf:"target,omitempty"` +} + +type CustomRuleObservation struct { + + // Condition for a URL rewrite or redirect rule, such as a country code. + Condition *string `json:"condition,omitempty" tf:"condition,omitempty"` + + // Source pattern for a URL rewrite or redirect rule. + Source *string `json:"source,omitempty" tf:"source,omitempty"` + + // Status code for a URL rewrite or redirect rule. Valid values: 200, 301, 302, 404, 404-200. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Target pattern for a URL rewrite or redirect rule. + Target *string `json:"target,omitempty" tf:"target,omitempty"` +} + +type CustomRuleParameters struct { + + // Condition for a URL rewrite or redirect rule, such as a country code. + // +kubebuilder:validation:Optional + Condition *string `json:"condition,omitempty" tf:"condition,omitempty"` + + // Source pattern for a URL rewrite or redirect rule. + // +kubebuilder:validation:Optional + Source *string `json:"source" tf:"source,omitempty"` + + // Status code for a URL rewrite or redirect rule. Valid values: 200, 301, 302, 404, 404-200. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Target pattern for a URL rewrite or redirect rule. + // +kubebuilder:validation:Optional + Target *string `json:"target" tf:"target,omitempty"` +} + +type ProductionBranchInitParameters struct { +} + +type ProductionBranchObservation struct { + + // Branch name for the production branch. + BranchName *string `json:"branchName,omitempty" tf:"branch_name,omitempty"` + + // Last deploy time of the production branch. + LastDeployTime *string `json:"lastDeployTime,omitempty" tf:"last_deploy_time,omitempty"` + + // Status of the production branch. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Thumbnail URL for the production branch. + ThumbnailURL *string `json:"thumbnailUrl,omitempty" tf:"thumbnail_url,omitempty"` +} + +type ProductionBranchParameters struct { +} + +// AppSpec defines the desired state of App +type AppSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AppParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AppInitParameters `json:"initProvider,omitempty"` +} + +// AppStatus defines the observed state of App. +type AppStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AppObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// App is the Schema for the Apps API. Provides an Amplify App resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type App struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec AppSpec `json:"spec"` + Status AppStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AppList contains a list of Apps +type AppList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []App `json:"items"` +} + +// Repository type metadata. +var ( + App_Kind = "App" + App_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: App_Kind}.String() + App_KindAPIVersion = App_Kind + "." + CRDGroupVersion.String() + App_GroupVersionKind = CRDGroupVersion.WithKind(App_Kind) +) + +func init() { + SchemeBuilder.Register(&App{}, &AppList{}) +} diff --git a/apis/amplify/v1beta2/zz_generated.conversion_hubs.go b/apis/amplify/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..0e133939e5 --- /dev/null +++ b/apis/amplify/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *App) Hub() {} diff --git a/apis/amplify/v1beta2/zz_generated.deepcopy.go b/apis/amplify/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..4e13557d73 --- /dev/null +++ b/apis/amplify/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,969 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *App) DeepCopyInto(out *App) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new App. +func (in *App) DeepCopy() *App { + if in == nil { + return nil + } + out := new(App) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *App) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppInitParameters) DeepCopyInto(out *AppInitParameters) { + *out = *in + if in.AccessTokenSecretRef != nil { + in, out := &in.AccessTokenSecretRef, &out.AccessTokenSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AutoBranchCreationConfig != nil { + in, out := &in.AutoBranchCreationConfig, &out.AutoBranchCreationConfig + *out = new(AutoBranchCreationConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoBranchCreationPatterns != nil { + in, out := &in.AutoBranchCreationPatterns, &out.AutoBranchCreationPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BasicAuthCredentialsSecretRef != nil { + in, out := &in.BasicAuthCredentialsSecretRef, &out.BasicAuthCredentialsSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.BuildSpec != nil { + in, out := &in.BuildSpec, &out.BuildSpec + *out = new(string) + **out = **in + } + if in.CustomHeaders != nil { + in, out := &in.CustomHeaders, &out.CustomHeaders + *out = new(string) + **out = **in + } + if in.CustomRule != nil { + in, out := &in.CustomRule, &out.CustomRule + *out = make([]CustomRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EnableAutoBranchCreation != nil { + in, out := &in.EnableAutoBranchCreation, &out.EnableAutoBranchCreation + *out = new(bool) + **out = **in + } + if in.EnableBasicAuth != nil { + in, out := &in.EnableBasicAuth, &out.EnableBasicAuth + *out = new(bool) + **out = **in + } + if in.EnableBranchAutoBuild != nil { + in, out := &in.EnableBranchAutoBuild, &out.EnableBranchAutoBuild + *out = new(bool) + **out = **in + } + if in.EnableBranchAutoDeletion != nil { + in, out := &in.EnableBranchAutoDeletion, &out.EnableBranchAutoDeletion + *out = new(bool) + **out = **in + } + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.IAMServiceRoleArn != nil { + in, out := &in.IAMServiceRoleArn, &out.IAMServiceRoleArn + *out = new(string) + **out = **in + } + if in.IAMServiceRoleArnRef != nil { + in, out := &in.IAMServiceRoleArnRef, &out.IAMServiceRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMServiceRoleArnSelector != nil { + in, out := &in.IAMServiceRoleArnSelector, &out.IAMServiceRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OauthTokenSecretRef != nil { + in, out := &in.OauthTokenSecretRef, &out.OauthTokenSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Platform != nil { + in, out := &in.Platform, &out.Platform + *out = new(string) + **out = **in + } + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppInitParameters. +func (in *AppInitParameters) DeepCopy() *AppInitParameters { + if in == nil { + return nil + } + out := new(AppInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppList) DeepCopyInto(out *AppList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]App, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppList. +func (in *AppList) DeepCopy() *AppList { + if in == nil { + return nil + } + out := new(AppList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppObservation) DeepCopyInto(out *AppObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoBranchCreationConfig != nil { + in, out := &in.AutoBranchCreationConfig, &out.AutoBranchCreationConfig + *out = new(AutoBranchCreationConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoBranchCreationPatterns != nil { + in, out := &in.AutoBranchCreationPatterns, &out.AutoBranchCreationPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BuildSpec != nil { + in, out := &in.BuildSpec, &out.BuildSpec + *out = new(string) + **out = **in + } + if in.CustomHeaders != nil { + in, out := &in.CustomHeaders, &out.CustomHeaders + *out = new(string) + **out = **in + } + if in.CustomRule != nil { + in, out := &in.CustomRule, &out.CustomRule + *out = make([]CustomRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultDomain != nil { + in, out := &in.DefaultDomain, &out.DefaultDomain + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EnableAutoBranchCreation != nil { + in, out := &in.EnableAutoBranchCreation, &out.EnableAutoBranchCreation + *out = new(bool) + **out = **in + } + if in.EnableBasicAuth != nil { + in, out := &in.EnableBasicAuth, &out.EnableBasicAuth + *out = new(bool) + **out = **in + } + if in.EnableBranchAutoBuild != nil { + in, out := &in.EnableBranchAutoBuild, &out.EnableBranchAutoBuild + *out = new(bool) + **out = **in + } + if in.EnableBranchAutoDeletion != nil { + in, out := &in.EnableBranchAutoDeletion, &out.EnableBranchAutoDeletion + *out = new(bool) + **out = **in + } + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.IAMServiceRoleArn != nil { + in, out := &in.IAMServiceRoleArn, &out.IAMServiceRoleArn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Platform != nil { + in, out := &in.Platform, &out.Platform + *out = new(string) + **out = **in + } + if in.ProductionBranch != nil { + in, out := &in.ProductionBranch, &out.ProductionBranch + *out = make([]ProductionBranchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppObservation. +func (in *AppObservation) DeepCopy() *AppObservation { + if in == nil { + return nil + } + out := new(AppObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppParameters) DeepCopyInto(out *AppParameters) { + *out = *in + if in.AccessTokenSecretRef != nil { + in, out := &in.AccessTokenSecretRef, &out.AccessTokenSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AutoBranchCreationConfig != nil { + in, out := &in.AutoBranchCreationConfig, &out.AutoBranchCreationConfig + *out = new(AutoBranchCreationConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoBranchCreationPatterns != nil { + in, out := &in.AutoBranchCreationPatterns, &out.AutoBranchCreationPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BasicAuthCredentialsSecretRef != nil { + in, out := &in.BasicAuthCredentialsSecretRef, &out.BasicAuthCredentialsSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.BuildSpec != nil { + in, out := &in.BuildSpec, &out.BuildSpec + *out = new(string) + **out = **in + } + if in.CustomHeaders != nil { + in, out := &in.CustomHeaders, &out.CustomHeaders + *out = new(string) + **out = **in + } + if in.CustomRule != nil { + in, out := &in.CustomRule, &out.CustomRule + *out = make([]CustomRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EnableAutoBranchCreation != nil { + in, out := &in.EnableAutoBranchCreation, &out.EnableAutoBranchCreation + *out = new(bool) + **out = **in + } + if in.EnableBasicAuth != nil { + in, out := &in.EnableBasicAuth, &out.EnableBasicAuth + *out = new(bool) + **out = **in + } + if in.EnableBranchAutoBuild != nil { + in, out := &in.EnableBranchAutoBuild, &out.EnableBranchAutoBuild + *out = new(bool) + **out = **in + } + if in.EnableBranchAutoDeletion != nil { + in, out := &in.EnableBranchAutoDeletion, &out.EnableBranchAutoDeletion + *out = new(bool) + **out = **in + } + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.IAMServiceRoleArn != nil { + in, out := &in.IAMServiceRoleArn, &out.IAMServiceRoleArn + *out = new(string) + **out = **in + } + if in.IAMServiceRoleArnRef != nil { + in, out := &in.IAMServiceRoleArnRef, &out.IAMServiceRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMServiceRoleArnSelector != nil { + in, out := &in.IAMServiceRoleArnSelector, &out.IAMServiceRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OauthTokenSecretRef != nil { + in, out := &in.OauthTokenSecretRef, &out.OauthTokenSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Platform != nil { + in, out := &in.Platform, &out.Platform + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppParameters. +func (in *AppParameters) DeepCopy() *AppParameters { + if in == nil { + return nil + } + out := new(AppParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppSpec) DeepCopyInto(out *AppSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppSpec. +func (in *AppSpec) DeepCopy() *AppSpec { + if in == nil { + return nil + } + out := new(AppSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppStatus) DeepCopyInto(out *AppStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppStatus. +func (in *AppStatus) DeepCopy() *AppStatus { + if in == nil { + return nil + } + out := new(AppStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoBranchCreationConfigInitParameters) DeepCopyInto(out *AutoBranchCreationConfigInitParameters) { + *out = *in + if in.BasicAuthCredentialsSecretRef != nil { + in, out := &in.BasicAuthCredentialsSecretRef, &out.BasicAuthCredentialsSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.BuildSpec != nil { + in, out := &in.BuildSpec, &out.BuildSpec + *out = new(string) + **out = **in + } + if in.EnableAutoBuild != nil { + in, out := &in.EnableAutoBuild, &out.EnableAutoBuild + *out = new(bool) + **out = **in + } + if in.EnableBasicAuth != nil { + in, out := &in.EnableBasicAuth, &out.EnableBasicAuth + *out = new(bool) + **out = **in + } + if in.EnablePerformanceMode != nil { + in, out := &in.EnablePerformanceMode, &out.EnablePerformanceMode + *out = new(bool) + **out = **in + } + if in.EnablePullRequestPreview != nil { + in, out := &in.EnablePullRequestPreview, &out.EnablePullRequestPreview + *out = new(bool) + **out = **in + } + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Framework != nil { + in, out := &in.Framework, &out.Framework + *out = new(string) + **out = **in + } + if in.PullRequestEnvironmentName != nil { + in, out := &in.PullRequestEnvironmentName, &out.PullRequestEnvironmentName + *out = new(string) + **out = **in + } + if in.Stage != nil { + in, out := &in.Stage, &out.Stage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoBranchCreationConfigInitParameters. +func (in *AutoBranchCreationConfigInitParameters) DeepCopy() *AutoBranchCreationConfigInitParameters { + if in == nil { + return nil + } + out := new(AutoBranchCreationConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoBranchCreationConfigObservation) DeepCopyInto(out *AutoBranchCreationConfigObservation) { + *out = *in + if in.BuildSpec != nil { + in, out := &in.BuildSpec, &out.BuildSpec + *out = new(string) + **out = **in + } + if in.EnableAutoBuild != nil { + in, out := &in.EnableAutoBuild, &out.EnableAutoBuild + *out = new(bool) + **out = **in + } + if in.EnableBasicAuth != nil { + in, out := &in.EnableBasicAuth, &out.EnableBasicAuth + *out = new(bool) + **out = **in + } + if in.EnablePerformanceMode != nil { + in, out := &in.EnablePerformanceMode, &out.EnablePerformanceMode + *out = new(bool) + **out = **in + } + if in.EnablePullRequestPreview != nil { + in, out := &in.EnablePullRequestPreview, &out.EnablePullRequestPreview + *out = new(bool) + **out = **in + } + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Framework != nil { + in, out := &in.Framework, &out.Framework + *out = new(string) + **out = **in + } + if in.PullRequestEnvironmentName != nil { + in, out := &in.PullRequestEnvironmentName, &out.PullRequestEnvironmentName + *out = new(string) + **out = **in + } + if in.Stage != nil { + in, out := &in.Stage, &out.Stage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoBranchCreationConfigObservation. +func (in *AutoBranchCreationConfigObservation) DeepCopy() *AutoBranchCreationConfigObservation { + if in == nil { + return nil + } + out := new(AutoBranchCreationConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoBranchCreationConfigParameters) DeepCopyInto(out *AutoBranchCreationConfigParameters) { + *out = *in + if in.BasicAuthCredentialsSecretRef != nil { + in, out := &in.BasicAuthCredentialsSecretRef, &out.BasicAuthCredentialsSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.BuildSpec != nil { + in, out := &in.BuildSpec, &out.BuildSpec + *out = new(string) + **out = **in + } + if in.EnableAutoBuild != nil { + in, out := &in.EnableAutoBuild, &out.EnableAutoBuild + *out = new(bool) + **out = **in + } + if in.EnableBasicAuth != nil { + in, out := &in.EnableBasicAuth, &out.EnableBasicAuth + *out = new(bool) + **out = **in + } + if in.EnablePerformanceMode != nil { + in, out := &in.EnablePerformanceMode, &out.EnablePerformanceMode + *out = new(bool) + **out = **in + } + if in.EnablePullRequestPreview != nil { + in, out := &in.EnablePullRequestPreview, &out.EnablePullRequestPreview + *out = new(bool) + **out = **in + } + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Framework != nil { + in, out := &in.Framework, &out.Framework + *out = new(string) + **out = **in + } + if in.PullRequestEnvironmentName != nil { + in, out := &in.PullRequestEnvironmentName, &out.PullRequestEnvironmentName + *out = new(string) + **out = **in + } + if in.Stage != nil { + in, out := &in.Stage, &out.Stage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoBranchCreationConfigParameters. +func (in *AutoBranchCreationConfigParameters) DeepCopy() *AutoBranchCreationConfigParameters { + if in == nil { + return nil + } + out := new(AutoBranchCreationConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRuleInitParameters) DeepCopyInto(out *CustomRuleInitParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRuleInitParameters. +func (in *CustomRuleInitParameters) DeepCopy() *CustomRuleInitParameters { + if in == nil { + return nil + } + out := new(CustomRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRuleObservation) DeepCopyInto(out *CustomRuleObservation) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRuleObservation. +func (in *CustomRuleObservation) DeepCopy() *CustomRuleObservation { + if in == nil { + return nil + } + out := new(CustomRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRuleParameters) DeepCopyInto(out *CustomRuleParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRuleParameters. +func (in *CustomRuleParameters) DeepCopy() *CustomRuleParameters { + if in == nil { + return nil + } + out := new(CustomRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductionBranchInitParameters) DeepCopyInto(out *ProductionBranchInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductionBranchInitParameters. +func (in *ProductionBranchInitParameters) DeepCopy() *ProductionBranchInitParameters { + if in == nil { + return nil + } + out := new(ProductionBranchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductionBranchObservation) DeepCopyInto(out *ProductionBranchObservation) { + *out = *in + if in.BranchName != nil { + in, out := &in.BranchName, &out.BranchName + *out = new(string) + **out = **in + } + if in.LastDeployTime != nil { + in, out := &in.LastDeployTime, &out.LastDeployTime + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.ThumbnailURL != nil { + in, out := &in.ThumbnailURL, &out.ThumbnailURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductionBranchObservation. +func (in *ProductionBranchObservation) DeepCopy() *ProductionBranchObservation { + if in == nil { + return nil + } + out := new(ProductionBranchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductionBranchParameters) DeepCopyInto(out *ProductionBranchParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductionBranchParameters. +func (in *ProductionBranchParameters) DeepCopy() *ProductionBranchParameters { + if in == nil { + return nil + } + out := new(ProductionBranchParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/amplify/v1beta2/zz_generated.managed.go b/apis/amplify/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..9cb1884f54 --- /dev/null +++ b/apis/amplify/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this App. +func (mg *App) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this App. +func (mg *App) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this App. +func (mg *App) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this App. +func (mg *App) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this App. +func (mg *App) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this App. +func (mg *App) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this App. +func (mg *App) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this App. +func (mg *App) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this App. +func (mg *App) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this App. +func (mg *App) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this App. +func (mg *App) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this App. +func (mg *App) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/amplify/v1beta2/zz_generated.managedlist.go b/apis/amplify/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..5cfbc83e84 --- /dev/null +++ b/apis/amplify/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AppList. +func (l *AppList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/amplify/v1beta2/zz_generated.resolvers.go b/apis/amplify/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..5219f24485 --- /dev/null +++ b/apis/amplify/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,69 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this App. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *App) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IAMServiceRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.IAMServiceRoleArnRef, + Selector: mg.Spec.ForProvider.IAMServiceRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IAMServiceRoleArn") + } + mg.Spec.ForProvider.IAMServiceRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IAMServiceRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IAMServiceRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.IAMServiceRoleArnRef, + Selector: mg.Spec.InitProvider.IAMServiceRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IAMServiceRoleArn") + } + mg.Spec.InitProvider.IAMServiceRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IAMServiceRoleArnRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/amplify/v1beta2/zz_groupversion_info.go b/apis/amplify/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..e1493dc325 --- /dev/null +++ b/apis/amplify/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=amplify.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "amplify.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/apigateway/v1beta1/zz_authorizer_types.go b/apis/apigateway/v1beta1/zz_authorizer_types.go index d6d5c8de5d..f3bc510925 100755 --- a/apis/apigateway/v1beta1/zz_authorizer_types.go +++ b/apis/apigateway/v1beta1/zz_authorizer_types.go @@ -33,7 +33,7 @@ type AuthorizerInitParameters struct { // Authorizer's Uniform Resource Identifier (URI). This must be a well-formed Lambda function URI in the form of arn:aws:apigateway:{region}:lambda:path/{service_api}, // e.g., arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:012345678912:function:my-function/invocations - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta1.Function + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("invoke_arn",true) AuthorizerURI *string `json:"authorizerUri,omitempty" tf:"authorizer_uri,omitempty"` @@ -59,7 +59,7 @@ type AuthorizerInitParameters struct { ProviderArns []*string `json:"providerArns,omitempty" tf:"provider_arns,omitempty"` // ID of the associated REST API - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` @@ -135,7 +135,7 @@ type AuthorizerParameters struct { // Authorizer's Uniform Resource Identifier (URI). This must be a well-formed Lambda function URI in the form of arn:aws:apigateway:{region}:lambda:path/{service_api}, // e.g., arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:012345678912:function:my-function/invocations - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta1.Function + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("invoke_arn",true) // +kubebuilder:validation:Optional AuthorizerURI *string `json:"authorizerUri,omitempty" tf:"authorizer_uri,omitempty"` @@ -171,7 +171,7 @@ type AuthorizerParameters struct { Region *string `json:"region" tf:"-"` // ID of the associated REST API - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` diff --git a/apis/apigateway/v1beta1/zz_basepathmapping_types.go b/apis/apigateway/v1beta1/zz_basepathmapping_types.go index 1d70f50a44..25adade63b 100755 --- a/apis/apigateway/v1beta1/zz_basepathmapping_types.go +++ b/apis/apigateway/v1beta1/zz_basepathmapping_types.go @@ -16,7 +16,7 @@ import ( type BasePathMappingInitParameters struct { // ID of the API to connect. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` @@ -32,7 +32,7 @@ type BasePathMappingInitParameters struct { BasePath *string `json:"basePath,omitempty" tf:"base_path,omitempty"` // Already-registered domain name to connect the API to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.DomainName + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.DomainName // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("domain_name",false) DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` @@ -45,7 +45,7 @@ type BasePathMappingInitParameters struct { DomainNameSelector *v1.Selector `json:"domainNameSelector,omitempty" tf:"-"` // Name of a specific deployment stage to expose at the given path. If omitted, callers may select any stage by including its name as a path element after the base path. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.Stage + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.Stage // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("stage_name",false) StageName *string `json:"stageName,omitempty" tf:"stage_name,omitempty"` @@ -78,7 +78,7 @@ type BasePathMappingObservation struct { type BasePathMappingParameters struct { // ID of the API to connect. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` @@ -96,7 +96,7 @@ type BasePathMappingParameters struct { BasePath *string `json:"basePath,omitempty" tf:"base_path,omitempty"` // Already-registered domain name to connect the API to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.DomainName + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.DomainName // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("domain_name",false) // +kubebuilder:validation:Optional DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` @@ -115,7 +115,7 @@ type BasePathMappingParameters struct { Region *string `json:"region" tf:"-"` // Name of a specific deployment stage to expose at the given path. If omitted, callers may select any stage by including its name as a path element after the base path. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.Stage + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.Stage // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("stage_name",false) // +kubebuilder:validation:Optional StageName *string `json:"stageName,omitempty" tf:"stage_name,omitempty"` diff --git a/apis/apigateway/v1beta1/zz_deployment_types.go b/apis/apigateway/v1beta1/zz_deployment_types.go index 7740243d14..145ceb8ca0 100755 --- a/apis/apigateway/v1beta1/zz_deployment_types.go +++ b/apis/apigateway/v1beta1/zz_deployment_types.go @@ -19,7 +19,7 @@ type DeploymentInitParameters struct { Description *string `json:"description,omitempty" tf:"description,omitempty"` // REST API identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` @@ -96,7 +96,7 @@ type DeploymentParameters struct { Region *string `json:"region" tf:"-"` // REST API identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` diff --git a/apis/apigateway/v1beta1/zz_documentationversion_types.go b/apis/apigateway/v1beta1/zz_documentationversion_types.go index da3be116fd..5c07ec7e3b 100755 --- a/apis/apigateway/v1beta1/zz_documentationversion_types.go +++ b/apis/apigateway/v1beta1/zz_documentationversion_types.go @@ -19,7 +19,7 @@ type DocumentationVersionInitParameters struct { Description *string `json:"description,omitempty" tf:"description,omitempty"` // ID of the associated Rest API - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` @@ -61,7 +61,7 @@ type DocumentationVersionParameters struct { Region *string `json:"region" tf:"-"` // ID of the associated Rest API - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` diff --git a/apis/apigateway/v1beta1/zz_gatewayresponse_types.go b/apis/apigateway/v1beta1/zz_gatewayresponse_types.go index ebe821175e..605af2935c 100755 --- a/apis/apigateway/v1beta1/zz_gatewayresponse_types.go +++ b/apis/apigateway/v1beta1/zz_gatewayresponse_types.go @@ -27,7 +27,7 @@ type GatewayResponseInitParameters struct { ResponseType *string `json:"responseType,omitempty" tf:"response_type,omitempty"` // String identifier of the associated REST API. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` @@ -86,7 +86,7 @@ type GatewayResponseParameters struct { ResponseType *string `json:"responseType,omitempty" tf:"response_type,omitempty"` // String identifier of the associated REST API. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` diff --git a/apis/apigateway/v1beta1/zz_generated.conversion_hubs.go b/apis/apigateway/v1beta1/zz_generated.conversion_hubs.go index 7585ced9cf..dce8a9f0fd 100755 --- a/apis/apigateway/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/apigateway/v1beta1/zz_generated.conversion_hubs.go @@ -24,21 +24,12 @@ func (tr *ClientCertificate) Hub() {} // Hub marks this type as a conversion hub. func (tr *Deployment) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *DocumentationPart) Hub() {} - // Hub marks this type as a conversion hub. func (tr *DocumentationVersion) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *DomainName) Hub() {} - // Hub marks this type as a conversion hub. func (tr *GatewayResponse) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Integration) Hub() {} - // Hub marks this type as a conversion hub. func (tr *IntegrationResponse) Hub() {} @@ -48,9 +39,6 @@ func (tr *Method) Hub() {} // Hub marks this type as a conversion hub. func (tr *MethodResponse) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *MethodSettings) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Model) Hub() {} @@ -60,18 +48,9 @@ func (tr *RequestValidator) Hub() {} // Hub marks this type as a conversion hub. func (tr *Resource) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *RestAPI) Hub() {} - // Hub marks this type as a conversion hub. func (tr *RestAPIPolicy) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Stage) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *UsagePlan) Hub() {} - // Hub marks this type as a conversion hub. func (tr *UsagePlanKey) Hub() {} diff --git a/apis/apigateway/v1beta1/zz_generated.conversion_spokes.go b/apis/apigateway/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..831f58c3e0 --- /dev/null +++ b/apis/apigateway/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,154 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this DocumentationPart to the hub type. +func (tr *DocumentationPart) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DocumentationPart type. +func (tr *DocumentationPart) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DomainName to the hub type. +func (tr *DomainName) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DomainName type. +func (tr *DomainName) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Integration to the hub type. +func (tr *Integration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Integration type. +func (tr *Integration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MethodSettings to the hub type. +func (tr *MethodSettings) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MethodSettings type. +func (tr *MethodSettings) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this RestAPI to the hub type. +func (tr *RestAPI) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the RestAPI type. +func (tr *RestAPI) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Stage to the hub type. +func (tr *Stage) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Stage type. +func (tr *Stage) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this UsagePlan to the hub type. +func (tr *UsagePlan) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the UsagePlan type. +func (tr *UsagePlan) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/apigateway/v1beta1/zz_generated.resolvers.go b/apis/apigateway/v1beta1/zz_generated.resolvers.go index 232e394c4d..91fcb2d3d2 100644 --- a/apis/apigateway/v1beta1/zz_generated.resolvers.go +++ b/apis/apigateway/v1beta1/zz_generated.resolvers.go @@ -97,7 +97,7 @@ func (mg *Authorizer) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.AuthorizerCredentials = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.AuthorizerCredentialsRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta1", "Function", "FunctionList") + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -116,7 +116,7 @@ func (mg *Authorizer) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.AuthorizerURI = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.AuthorizerURIRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -154,7 +154,7 @@ func (mg *Authorizer) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.InitProvider.AuthorizerCredentials = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.AuthorizerCredentialsRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta1", "Function", "FunctionList") + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -173,7 +173,7 @@ func (mg *Authorizer) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.InitProvider.AuthorizerURI = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.AuthorizerURIRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -204,7 +204,7 @@ func (mg *BasePathMapping) ResolveReferences(ctx context.Context, c client.Reade var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -223,7 +223,7 @@ func (mg *BasePathMapping) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.ForProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.APIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "DomainName", "DomainNameList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "DomainName", "DomainNameList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -242,7 +242,7 @@ func (mg *BasePathMapping) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.ForProvider.DomainName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DomainNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "Stage", "StageList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "Stage", "StageList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -261,7 +261,7 @@ func (mg *BasePathMapping) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.ForProvider.StageName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StageNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -280,7 +280,7 @@ func (mg *BasePathMapping) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.InitProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.APIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "DomainName", "DomainNameList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "DomainName", "DomainNameList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -299,7 +299,7 @@ func (mg *BasePathMapping) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.InitProvider.DomainName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.DomainNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "Stage", "StageList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "Stage", "StageList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -330,7 +330,7 @@ func (mg *Deployment) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -349,7 +349,7 @@ func (mg *Deployment) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.RestAPIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RestAPIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -430,7 +430,7 @@ func (mg *DocumentationVersion) ResolveReferences(ctx context.Context, c client. var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -449,7 +449,7 @@ func (mg *DocumentationVersion) ResolveReferences(ctx context.Context, c client. mg.Spec.ForProvider.RestAPIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RestAPIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -568,7 +568,7 @@ func (mg *GatewayResponse) ResolveReferences(ctx context.Context, c client.Reade var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -587,7 +587,7 @@ func (mg *GatewayResponse) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.ForProvider.RestAPIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RestAPIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -858,7 +858,7 @@ func (mg *IntegrationResponse) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -934,7 +934,7 @@ func (mg *IntegrationResponse) ResolveReferences(ctx context.Context, c client.R mg.Spec.InitProvider.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1022,7 +1022,7 @@ func (mg *Method) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.ForProvider.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1079,7 +1079,7 @@ func (mg *Method) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.InitProvider.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1148,7 +1148,7 @@ func (mg *MethodResponse) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.ForProvider.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1205,7 +1205,7 @@ func (mg *MethodResponse) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.InitProvider.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1324,7 +1324,7 @@ func (mg *Model) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1343,7 +1343,7 @@ func (mg *Model) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.ForProvider.RestAPIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RestAPIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1374,7 +1374,7 @@ func (mg *RequestValidator) ResolveReferences(ctx context.Context, c client.Read var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1393,7 +1393,7 @@ func (mg *RequestValidator) ResolveReferences(ctx context.Context, c client.Read mg.Spec.ForProvider.RestAPIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RestAPIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1424,7 +1424,7 @@ func (mg *Resource) ResolveReferences(ctx context.Context, c client.Reader) erro var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1443,7 +1443,7 @@ func (mg *Resource) ResolveReferences(ctx context.Context, c client.Reader) erro mg.Spec.ForProvider.ParentID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ParentIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1462,7 +1462,7 @@ func (mg *Resource) ResolveReferences(ctx context.Context, c client.Reader) erro mg.Spec.ForProvider.RestAPIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RestAPIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1481,7 +1481,7 @@ func (mg *Resource) ResolveReferences(ctx context.Context, c client.Reader) erro mg.Spec.InitProvider.ParentID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ParentIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1512,7 +1512,7 @@ func (mg *RestAPIPolicy) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1531,7 +1531,7 @@ func (mg *RestAPIPolicy) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.RestAPIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RestAPIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "RestAPI", "RestAPIList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1766,7 +1766,7 @@ func (mg *UsagePlanKey) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.KeyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KeyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "UsagePlan", "UsagePlanList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "UsagePlan", "UsagePlanList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1804,7 +1804,7 @@ func (mg *UsagePlanKey) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.InitProvider.KeyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.KeyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "UsagePlan", "UsagePlanList") + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "UsagePlan", "UsagePlanList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1835,7 +1835,7 @@ func (mg *VPCLink) ResolveReferences(ctx context.Context, c client.Reader) error var mrsp reference.MultiResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta1", "LB", "LBList") + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LB", "LBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1854,7 +1854,7 @@ func (mg *VPCLink) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.ForProvider.TargetArns = reference.ToPtrValues(mrsp.ResolvedValues) mg.Spec.ForProvider.TargetArnRefs = mrsp.ResolvedReferences { - m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta1", "LB", "LBList") + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LB", "LBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/apigateway/v1beta1/zz_integrationresponse_types.go b/apis/apigateway/v1beta1/zz_integrationresponse_types.go index 8015935644..6adbf00266 100755 --- a/apis/apigateway/v1beta1/zz_integrationresponse_types.go +++ b/apis/apigateway/v1beta1/zz_integrationresponse_types.go @@ -53,7 +53,7 @@ type IntegrationResponseInitParameters struct { ResponseTemplates map[string]*string `json:"responseTemplates,omitempty" tf:"response_templates,omitempty"` // ID of the associated REST API. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` @@ -163,7 +163,7 @@ type IntegrationResponseParameters struct { ResponseTemplates map[string]*string `json:"responseTemplates,omitempty" tf:"response_templates,omitempty"` // ID of the associated REST API. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` diff --git a/apis/apigateway/v1beta1/zz_method_types.go b/apis/apigateway/v1beta1/zz_method_types.go index 4ff510fe76..6c4322d84a 100755 --- a/apis/apigateway/v1beta1/zz_method_types.go +++ b/apis/apigateway/v1beta1/zz_method_types.go @@ -72,7 +72,7 @@ type MethodInitParameters struct { ResourceIDSelector *v1.Selector `json:"resourceIdSelector,omitempty" tf:"-"` // ID of the associated REST API - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` @@ -203,7 +203,7 @@ type MethodParameters struct { ResourceIDSelector *v1.Selector `json:"resourceIdSelector,omitempty" tf:"-"` // ID of the associated REST API - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` diff --git a/apis/apigateway/v1beta1/zz_methodresponse_types.go b/apis/apigateway/v1beta1/zz_methodresponse_types.go index ef6851fa41..2c7cb2ebc1 100755 --- a/apis/apigateway/v1beta1/zz_methodresponse_types.go +++ b/apis/apigateway/v1beta1/zz_methodresponse_types.go @@ -50,7 +50,7 @@ type MethodResponseInitParameters struct { ResponseParameters map[string]*bool `json:"responseParameters,omitempty" tf:"response_parameters,omitempty"` // The string identifier of the associated REST API. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` @@ -137,7 +137,7 @@ type MethodResponseParameters struct { ResponseParameters map[string]*bool `json:"responseParameters,omitempty" tf:"response_parameters,omitempty"` // The string identifier of the associated REST API. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` diff --git a/apis/apigateway/v1beta1/zz_model_types.go b/apis/apigateway/v1beta1/zz_model_types.go index 20d627b1cb..8815497bb9 100755 --- a/apis/apigateway/v1beta1/zz_model_types.go +++ b/apis/apigateway/v1beta1/zz_model_types.go @@ -25,7 +25,7 @@ type ModelInitParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // ID of the associated REST API - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` @@ -82,7 +82,7 @@ type ModelParameters struct { Region *string `json:"region" tf:"-"` // ID of the associated REST API - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` diff --git a/apis/apigateway/v1beta1/zz_requestvalidator_types.go b/apis/apigateway/v1beta1/zz_requestvalidator_types.go index a04e45a0b2..1e1e3e5930 100755 --- a/apis/apigateway/v1beta1/zz_requestvalidator_types.go +++ b/apis/apigateway/v1beta1/zz_requestvalidator_types.go @@ -19,7 +19,7 @@ type RequestValidatorInitParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // ID of the associated Rest API - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` @@ -68,7 +68,7 @@ type RequestValidatorParameters struct { Region *string `json:"region" tf:"-"` // ID of the associated Rest API - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` diff --git a/apis/apigateway/v1beta1/zz_resource_types.go b/apis/apigateway/v1beta1/zz_resource_types.go index b18f5d3c11..55b6452d81 100755 --- a/apis/apigateway/v1beta1/zz_resource_types.go +++ b/apis/apigateway/v1beta1/zz_resource_types.go @@ -16,7 +16,7 @@ import ( type ResourceInitParameters struct { // ID of the parent API resource - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("root_resource_id",true) ParentID *string `json:"parentId,omitempty" tf:"parent_id,omitempty"` @@ -32,7 +32,7 @@ type ResourceInitParameters struct { PathPart *string `json:"pathPart,omitempty" tf:"path_part,omitempty"` // ID of the associated REST API - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` @@ -66,7 +66,7 @@ type ResourceObservation struct { type ResourceParameters struct { // ID of the parent API resource - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("root_resource_id",true) // +kubebuilder:validation:Optional ParentID *string `json:"parentId,omitempty" tf:"parent_id,omitempty"` @@ -89,7 +89,7 @@ type ResourceParameters struct { Region *string `json:"region" tf:"-"` // ID of the associated REST API - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` diff --git a/apis/apigateway/v1beta1/zz_restapipolicy_types.go b/apis/apigateway/v1beta1/zz_restapipolicy_types.go index c769610c39..8074d41955 100755 --- a/apis/apigateway/v1beta1/zz_restapipolicy_types.go +++ b/apis/apigateway/v1beta1/zz_restapipolicy_types.go @@ -19,7 +19,7 @@ type RestAPIPolicyInitParameters struct { Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` // ID of the REST API. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` @@ -56,7 +56,7 @@ type RestAPIPolicyParameters struct { Region *string `json:"region" tf:"-"` // ID of the REST API. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.RestAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` diff --git a/apis/apigateway/v1beta1/zz_usageplankey_types.go b/apis/apigateway/v1beta1/zz_usageplankey_types.go index 397ba7054c..773aca59af 100755 --- a/apis/apigateway/v1beta1/zz_usageplankey_types.go +++ b/apis/apigateway/v1beta1/zz_usageplankey_types.go @@ -32,7 +32,7 @@ type UsagePlanKeyInitParameters struct { KeyType *string `json:"keyType,omitempty" tf:"key_type,omitempty"` // Id of the usage plan resource representing to associate the key to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.UsagePlan + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.UsagePlan // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() UsagePlanID *string `json:"usagePlanId,omitempty" tf:"usage_plan_id,omitempty"` @@ -92,7 +92,7 @@ type UsagePlanKeyParameters struct { Region *string `json:"region" tf:"-"` // Id of the usage plan resource representing to associate the key to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.UsagePlan + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.UsagePlan // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional UsagePlanID *string `json:"usagePlanId,omitempty" tf:"usage_plan_id,omitempty"` diff --git a/apis/apigateway/v1beta1/zz_vpclink_types.go b/apis/apigateway/v1beta1/zz_vpclink_types.go index 982a329c00..f566391a77 100755 --- a/apis/apigateway/v1beta1/zz_vpclink_types.go +++ b/apis/apigateway/v1beta1/zz_vpclink_types.go @@ -34,7 +34,7 @@ type VPCLinkInitParameters struct { TargetArnSelector *v1.Selector `json:"targetArnSelector,omitempty" tf:"-"` // List of network load balancer arns in the VPC targeted by the VPC link. Currently AWS only supports 1 target. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta1.LB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LB // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() // +crossplane:generate:reference:refFieldName=TargetArnRefs // +crossplane:generate:reference:selectorFieldName=TargetArnSelector @@ -94,7 +94,7 @@ type VPCLinkParameters struct { TargetArnSelector *v1.Selector `json:"targetArnSelector,omitempty" tf:"-"` // List of network load balancer arns in the VPC targeted by the VPC link. Currently AWS only supports 1 target. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta1.LB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LB // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() // +crossplane:generate:reference:refFieldName=TargetArnRefs // +crossplane:generate:reference:selectorFieldName=TargetArnSelector diff --git a/apis/apigateway/v1beta2/zz_documentationpart_terraformed.go b/apis/apigateway/v1beta2/zz_documentationpart_terraformed.go new file mode 100755 index 0000000000..e50386bd0d --- /dev/null +++ b/apis/apigateway/v1beta2/zz_documentationpart_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DocumentationPart +func (mg *DocumentationPart) GetTerraformResourceType() string { + return "aws_api_gateway_documentation_part" +} + +// GetConnectionDetailsMapping for this DocumentationPart +func (tr *DocumentationPart) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DocumentationPart +func (tr *DocumentationPart) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DocumentationPart +func (tr *DocumentationPart) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DocumentationPart +func (tr *DocumentationPart) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DocumentationPart +func (tr *DocumentationPart) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DocumentationPart +func (tr *DocumentationPart) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DocumentationPart +func (tr *DocumentationPart) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DocumentationPart +func (tr *DocumentationPart) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DocumentationPart using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DocumentationPart) LateInitialize(attrs []byte) (bool, error) { + params := &DocumentationPartParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DocumentationPart) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apigateway/v1beta2/zz_documentationpart_types.go b/apis/apigateway/v1beta2/zz_documentationpart_types.go new file mode 100755 index 0000000000..4669b162e6 --- /dev/null +++ b/apis/apigateway/v1beta2/zz_documentationpart_types.go @@ -0,0 +1,205 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DocumentationPartInitParameters struct { + + // Location of the targeted API entity of the to-be-created documentation part. See below. + Location *LocationInitParameters `json:"location,omitempty" tf:"location,omitempty"` + + // Content map of API-specific key-value pairs describing the targeted API entity. The map must be encoded as a JSON string, e.g., "{ "description": "The API does ..." }". Only Swagger-compliant key-value pairs can be exported and, hence, published. + Properties *string `json:"properties,omitempty" tf:"properties,omitempty"` + + // ID of the associated Rest API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` + + // Reference to a RestAPI in apigateway to populate restApiId. + // +kubebuilder:validation:Optional + RestAPIIDRef *v1.Reference `json:"restApiIdRef,omitempty" tf:"-"` + + // Selector for a RestAPI in apigateway to populate restApiId. + // +kubebuilder:validation:Optional + RestAPIIDSelector *v1.Selector `json:"restApiIdSelector,omitempty" tf:"-"` +} + +type DocumentationPartObservation struct { + + // The DocumentationPart identifier, generated by API Gateway when the documentation part is created. + DocumentationPartID *string `json:"documentationPartId,omitempty" tf:"documentation_part_id,omitempty"` + + // Unique ID of the Documentation Part + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Location of the targeted API entity of the to-be-created documentation part. See below. + Location *LocationObservation `json:"location,omitempty" tf:"location,omitempty"` + + // Content map of API-specific key-value pairs describing the targeted API entity. The map must be encoded as a JSON string, e.g., "{ "description": "The API does ..." }". Only Swagger-compliant key-value pairs can be exported and, hence, published. + Properties *string `json:"properties,omitempty" tf:"properties,omitempty"` + + // ID of the associated Rest API + RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` +} + +type DocumentationPartParameters struct { + + // Location of the targeted API entity of the to-be-created documentation part. See below. + // +kubebuilder:validation:Optional + Location *LocationParameters `json:"location,omitempty" tf:"location,omitempty"` + + // Content map of API-specific key-value pairs describing the targeted API entity. The map must be encoded as a JSON string, e.g., "{ "description": "The API does ..." }". Only Swagger-compliant key-value pairs can be exported and, hence, published. + // +kubebuilder:validation:Optional + Properties *string `json:"properties,omitempty" tf:"properties,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // ID of the associated Rest API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` + + // Reference to a RestAPI in apigateway to populate restApiId. + // +kubebuilder:validation:Optional + RestAPIIDRef *v1.Reference `json:"restApiIdRef,omitempty" tf:"-"` + + // Selector for a RestAPI in apigateway to populate restApiId. + // +kubebuilder:validation:Optional + RestAPIIDSelector *v1.Selector `json:"restApiIdSelector,omitempty" tf:"-"` +} + +type LocationInitParameters struct { + + // HTTP verb of a method. The default value is * for any method. + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // Name of the targeted API entity. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // URL path of the target. The default value is / for the root resource. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // HTTP status code of a response. The default value is * for any status code. + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` + + // Type of API entity to which the documentation content appliesE.g., API, METHOD or REQUEST_BODY + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LocationObservation struct { + + // HTTP verb of a method. The default value is * for any method. + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // Name of the targeted API entity. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // URL path of the target. The default value is / for the root resource. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // HTTP status code of a response. The default value is * for any status code. + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` + + // Type of API entity to which the documentation content appliesE.g., API, METHOD or REQUEST_BODY + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LocationParameters struct { + + // HTTP verb of a method. The default value is * for any method. + // +kubebuilder:validation:Optional + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // Name of the targeted API entity. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // URL path of the target. The default value is / for the root resource. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // HTTP status code of a response. The default value is * for any status code. + // +kubebuilder:validation:Optional + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` + + // Type of API entity to which the documentation content appliesE.g., API, METHOD or REQUEST_BODY + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// DocumentationPartSpec defines the desired state of DocumentationPart +type DocumentationPartSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DocumentationPartParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DocumentationPartInitParameters `json:"initProvider,omitempty"` +} + +// DocumentationPartStatus defines the observed state of DocumentationPart. +type DocumentationPartStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DocumentationPartObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DocumentationPart is the Schema for the DocumentationParts API. Provides a settings of an API Gateway Documentation Part. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type DocumentationPart struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.properties) || (has(self.initProvider) && has(self.initProvider.properties))",message="spec.forProvider.properties is a required parameter" + Spec DocumentationPartSpec `json:"spec"` + Status DocumentationPartStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DocumentationPartList contains a list of DocumentationParts +type DocumentationPartList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DocumentationPart `json:"items"` +} + +// Repository type metadata. +var ( + DocumentationPart_Kind = "DocumentationPart" + DocumentationPart_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DocumentationPart_Kind}.String() + DocumentationPart_KindAPIVersion = DocumentationPart_Kind + "." + CRDGroupVersion.String() + DocumentationPart_GroupVersionKind = CRDGroupVersion.WithKind(DocumentationPart_Kind) +) + +func init() { + SchemeBuilder.Register(&DocumentationPart{}, &DocumentationPartList{}) +} diff --git a/apis/apigateway/v1beta2/zz_domainname_terraformed.go b/apis/apigateway/v1beta2/zz_domainname_terraformed.go new file mode 100755 index 0000000000..41b00a341d --- /dev/null +++ b/apis/apigateway/v1beta2/zz_domainname_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DomainName +func (mg *DomainName) GetTerraformResourceType() string { + return "aws_api_gateway_domain_name" +} + +// GetConnectionDetailsMapping for this DomainName +func (tr *DomainName) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"certificate_private_key": "certificatePrivateKeySecretRef"} +} + +// GetObservation of this DomainName +func (tr *DomainName) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DomainName +func (tr *DomainName) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DomainName +func (tr *DomainName) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DomainName +func (tr *DomainName) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DomainName +func (tr *DomainName) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DomainName +func (tr *DomainName) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DomainName +func (tr *DomainName) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DomainName using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DomainName) LateInitialize(attrs []byte) (bool, error) { + params := &DomainNameParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DomainName) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apigateway/v1beta2/zz_domainname_types.go b/apis/apigateway/v1beta2/zz_domainname_types.go new file mode 100755 index 0000000000..60d0419176 --- /dev/null +++ b/apis/apigateway/v1beta2/zz_domainname_types.go @@ -0,0 +1,332 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DomainNameInitParameters struct { + + // ARN for an AWS-managed certificate. AWS Certificate Manager is the only supported source. Used when an edge-optimized domain name is desired. Conflicts with certificate_name, certificate_body, certificate_chain, certificate_private_key, regional_certificate_arn, and regional_certificate_name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta1.CertificateValidation + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("certificate_arn",false) + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` + + // Reference to a CertificateValidation in acm to populate certificateArn. + // +kubebuilder:validation:Optional + CertificateArnRef *v1.Reference `json:"certificateArnRef,omitempty" tf:"-"` + + // Selector for a CertificateValidation in acm to populate certificateArn. + // +kubebuilder:validation:Optional + CertificateArnSelector *v1.Selector `json:"certificateArnSelector,omitempty" tf:"-"` + + // Certificate issued for the domain name being registered, in PEM format. Only valid for EDGE endpoint configuration type. Conflicts with certificate_arn, regional_certificate_arn, and regional_certificate_name. + CertificateBody *string `json:"certificateBody,omitempty" tf:"certificate_body,omitempty"` + + // Certificate for the CA that issued the certificate, along with any intermediate CA certificates required to create an unbroken chain to a certificate trusted by the intended API clients. Only valid for EDGE endpoint configuration type. Conflicts with certificate_arn, regional_certificate_arn, and regional_certificate_name. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + // Unique name to use when registering this certificate as an IAM server certificate. Conflicts with certificate_arn, regional_certificate_arn, and regional_certificate_name. Required if certificate_arn is not set. + CertificateName *string `json:"certificateName,omitempty" tf:"certificate_name,omitempty"` + + // Private key associated with the domain certificate given in certificate_body. Only valid for EDGE endpoint configuration type. Conflicts with certificate_arn, regional_certificate_arn, and regional_certificate_name. + CertificatePrivateKeySecretRef *v1.SecretKeySelector `json:"certificatePrivateKeySecretRef,omitempty" tf:"-"` + + // Fully-qualified domain name to register. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Configuration block defining API endpoint information including type. See below. + EndpointConfiguration *EndpointConfigurationInitParameters `json:"endpointConfiguration,omitempty" tf:"endpoint_configuration,omitempty"` + + // Mutual TLS authentication configuration for the domain name. See below. + MutualTLSAuthentication *MutualTLSAuthenticationInitParameters `json:"mutualTlsAuthentication,omitempty" tf:"mutual_tls_authentication,omitempty"` + + // ARN of the AWS-issued certificate used to validate custom domain ownership (when certificate_arn is issued via an ACM Private CA or mutual_tls_authentication is configured with an ACM-imported certificate.) + OwnershipVerificationCertificateArn *string `json:"ownershipVerificationCertificateArn,omitempty" tf:"ownership_verification_certificate_arn,omitempty"` + + // ARN for an AWS-managed certificate. AWS Certificate Manager is the only supported source. Used when a regional domain name is desired. Conflicts with certificate_arn, certificate_name, certificate_body, certificate_chain, and certificate_private_key. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta1.CertificateValidation + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("certificate_arn",false) + RegionalCertificateArn *string `json:"regionalCertificateArn,omitempty" tf:"regional_certificate_arn,omitempty"` + + // Reference to a CertificateValidation in acm to populate regionalCertificateArn. + // +kubebuilder:validation:Optional + RegionalCertificateArnRef *v1.Reference `json:"regionalCertificateArnRef,omitempty" tf:"-"` + + // Selector for a CertificateValidation in acm to populate regionalCertificateArn. + // +kubebuilder:validation:Optional + RegionalCertificateArnSelector *v1.Selector `json:"regionalCertificateArnSelector,omitempty" tf:"-"` + + // User-friendly name of the certificate that will be used by regional endpoint for this domain name. Conflicts with certificate_arn, certificate_name, certificate_body, certificate_chain, and certificate_private_key. + RegionalCertificateName *string `json:"regionalCertificateName,omitempty" tf:"regional_certificate_name,omitempty"` + + // Transport Layer Security (TLS) version + cipher suite for this DomainName. Valid values are TLS_1_0 and TLS_1_2. Must be configured to perform drift detection. + SecurityPolicy *string `json:"securityPolicy,omitempty" tf:"security_policy,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type DomainNameObservation struct { + + // ARN of domain name. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // ARN for an AWS-managed certificate. AWS Certificate Manager is the only supported source. Used when an edge-optimized domain name is desired. Conflicts with certificate_name, certificate_body, certificate_chain, certificate_private_key, regional_certificate_arn, and regional_certificate_name. + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` + + // Certificate issued for the domain name being registered, in PEM format. Only valid for EDGE endpoint configuration type. Conflicts with certificate_arn, regional_certificate_arn, and regional_certificate_name. + CertificateBody *string `json:"certificateBody,omitempty" tf:"certificate_body,omitempty"` + + // Certificate for the CA that issued the certificate, along with any intermediate CA certificates required to create an unbroken chain to a certificate trusted by the intended API clients. Only valid for EDGE endpoint configuration type. Conflicts with certificate_arn, regional_certificate_arn, and regional_certificate_name. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + // Unique name to use when registering this certificate as an IAM server certificate. Conflicts with certificate_arn, regional_certificate_arn, and regional_certificate_name. Required if certificate_arn is not set. + CertificateName *string `json:"certificateName,omitempty" tf:"certificate_name,omitempty"` + + // Upload date associated with the domain certificate. + CertificateUploadDate *string `json:"certificateUploadDate,omitempty" tf:"certificate_upload_date,omitempty"` + + // Hostname created by Cloudfront to represent the distribution that implements this domain name mapping. + CloudfrontDomainName *string `json:"cloudfrontDomainName,omitempty" tf:"cloudfront_domain_name,omitempty"` + + // For convenience, the hosted zone ID (Z2FDTNDATAQYW2) that can be used to create a Route53 alias record for the distribution. + CloudfrontZoneID *string `json:"cloudfrontZoneId,omitempty" tf:"cloudfront_zone_id,omitempty"` + + // Fully-qualified domain name to register. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Configuration block defining API endpoint information including type. See below. + EndpointConfiguration *EndpointConfigurationObservation `json:"endpointConfiguration,omitempty" tf:"endpoint_configuration,omitempty"` + + // Internal identifier assigned to this domain name by API Gateway. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Mutual TLS authentication configuration for the domain name. See below. + MutualTLSAuthentication *MutualTLSAuthenticationObservation `json:"mutualTlsAuthentication,omitempty" tf:"mutual_tls_authentication,omitempty"` + + // ARN of the AWS-issued certificate used to validate custom domain ownership (when certificate_arn is issued via an ACM Private CA or mutual_tls_authentication is configured with an ACM-imported certificate.) + OwnershipVerificationCertificateArn *string `json:"ownershipVerificationCertificateArn,omitempty" tf:"ownership_verification_certificate_arn,omitempty"` + + // ARN for an AWS-managed certificate. AWS Certificate Manager is the only supported source. Used when a regional domain name is desired. Conflicts with certificate_arn, certificate_name, certificate_body, certificate_chain, and certificate_private_key. + RegionalCertificateArn *string `json:"regionalCertificateArn,omitempty" tf:"regional_certificate_arn,omitempty"` + + // User-friendly name of the certificate that will be used by regional endpoint for this domain name. Conflicts with certificate_arn, certificate_name, certificate_body, certificate_chain, and certificate_private_key. + RegionalCertificateName *string `json:"regionalCertificateName,omitempty" tf:"regional_certificate_name,omitempty"` + + // Hostname for the custom domain's regional endpoint. + RegionalDomainName *string `json:"regionalDomainName,omitempty" tf:"regional_domain_name,omitempty"` + + // Hosted zone ID that can be used to create a Route53 alias record for the regional endpoint. + RegionalZoneID *string `json:"regionalZoneId,omitempty" tf:"regional_zone_id,omitempty"` + + // Transport Layer Security (TLS) version + cipher suite for this DomainName. Valid values are TLS_1_0 and TLS_1_2. Must be configured to perform drift detection. + SecurityPolicy *string `json:"securityPolicy,omitempty" tf:"security_policy,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type DomainNameParameters struct { + + // ARN for an AWS-managed certificate. AWS Certificate Manager is the only supported source. Used when an edge-optimized domain name is desired. Conflicts with certificate_name, certificate_body, certificate_chain, certificate_private_key, regional_certificate_arn, and regional_certificate_name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta1.CertificateValidation + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("certificate_arn",false) + // +kubebuilder:validation:Optional + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` + + // Reference to a CertificateValidation in acm to populate certificateArn. + // +kubebuilder:validation:Optional + CertificateArnRef *v1.Reference `json:"certificateArnRef,omitempty" tf:"-"` + + // Selector for a CertificateValidation in acm to populate certificateArn. + // +kubebuilder:validation:Optional + CertificateArnSelector *v1.Selector `json:"certificateArnSelector,omitempty" tf:"-"` + + // Certificate issued for the domain name being registered, in PEM format. Only valid for EDGE endpoint configuration type. Conflicts with certificate_arn, regional_certificate_arn, and regional_certificate_name. + // +kubebuilder:validation:Optional + CertificateBody *string `json:"certificateBody,omitempty" tf:"certificate_body,omitempty"` + + // Certificate for the CA that issued the certificate, along with any intermediate CA certificates required to create an unbroken chain to a certificate trusted by the intended API clients. Only valid for EDGE endpoint configuration type. Conflicts with certificate_arn, regional_certificate_arn, and regional_certificate_name. + // +kubebuilder:validation:Optional + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + // Unique name to use when registering this certificate as an IAM server certificate. Conflicts with certificate_arn, regional_certificate_arn, and regional_certificate_name. Required if certificate_arn is not set. + // +kubebuilder:validation:Optional + CertificateName *string `json:"certificateName,omitempty" tf:"certificate_name,omitempty"` + + // Private key associated with the domain certificate given in certificate_body. Only valid for EDGE endpoint configuration type. Conflicts with certificate_arn, regional_certificate_arn, and regional_certificate_name. + // +kubebuilder:validation:Optional + CertificatePrivateKeySecretRef *v1.SecretKeySelector `json:"certificatePrivateKeySecretRef,omitempty" tf:"-"` + + // Fully-qualified domain name to register. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Configuration block defining API endpoint information including type. See below. + // +kubebuilder:validation:Optional + EndpointConfiguration *EndpointConfigurationParameters `json:"endpointConfiguration,omitempty" tf:"endpoint_configuration,omitempty"` + + // Mutual TLS authentication configuration for the domain name. See below. + // +kubebuilder:validation:Optional + MutualTLSAuthentication *MutualTLSAuthenticationParameters `json:"mutualTlsAuthentication,omitempty" tf:"mutual_tls_authentication,omitempty"` + + // ARN of the AWS-issued certificate used to validate custom domain ownership (when certificate_arn is issued via an ACM Private CA or mutual_tls_authentication is configured with an ACM-imported certificate.) + // +kubebuilder:validation:Optional + OwnershipVerificationCertificateArn *string `json:"ownershipVerificationCertificateArn,omitempty" tf:"ownership_verification_certificate_arn,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // ARN for an AWS-managed certificate. AWS Certificate Manager is the only supported source. Used when a regional domain name is desired. Conflicts with certificate_arn, certificate_name, certificate_body, certificate_chain, and certificate_private_key. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta1.CertificateValidation + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("certificate_arn",false) + // +kubebuilder:validation:Optional + RegionalCertificateArn *string `json:"regionalCertificateArn,omitempty" tf:"regional_certificate_arn,omitempty"` + + // Reference to a CertificateValidation in acm to populate regionalCertificateArn. + // +kubebuilder:validation:Optional + RegionalCertificateArnRef *v1.Reference `json:"regionalCertificateArnRef,omitempty" tf:"-"` + + // Selector for a CertificateValidation in acm to populate regionalCertificateArn. + // +kubebuilder:validation:Optional + RegionalCertificateArnSelector *v1.Selector `json:"regionalCertificateArnSelector,omitempty" tf:"-"` + + // User-friendly name of the certificate that will be used by regional endpoint for this domain name. Conflicts with certificate_arn, certificate_name, certificate_body, certificate_chain, and certificate_private_key. + // +kubebuilder:validation:Optional + RegionalCertificateName *string `json:"regionalCertificateName,omitempty" tf:"regional_certificate_name,omitempty"` + + // Transport Layer Security (TLS) version + cipher suite for this DomainName. Valid values are TLS_1_0 and TLS_1_2. Must be configured to perform drift detection. + // +kubebuilder:validation:Optional + SecurityPolicy *string `json:"securityPolicy,omitempty" tf:"security_policy,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type EndpointConfigurationInitParameters struct { + + // List of endpoint types. This resource currently only supports managing a single value. Valid values: EDGE or REGIONAL. If unspecified, defaults to EDGE. Must be declared as REGIONAL in non-Commercial partitions. Refer to the documentation for more information on the difference between edge-optimized and regional APIs. + Types []*string `json:"types,omitempty" tf:"types,omitempty"` +} + +type EndpointConfigurationObservation struct { + + // List of endpoint types. This resource currently only supports managing a single value. Valid values: EDGE or REGIONAL. If unspecified, defaults to EDGE. Must be declared as REGIONAL in non-Commercial partitions. Refer to the documentation for more information on the difference between edge-optimized and regional APIs. + Types []*string `json:"types,omitempty" tf:"types,omitempty"` +} + +type EndpointConfigurationParameters struct { + + // List of endpoint types. This resource currently only supports managing a single value. Valid values: EDGE or REGIONAL. If unspecified, defaults to EDGE. Must be declared as REGIONAL in non-Commercial partitions. Refer to the documentation for more information on the difference between edge-optimized and regional APIs. + // +kubebuilder:validation:Optional + Types []*string `json:"types" tf:"types,omitempty"` +} + +type MutualTLSAuthenticationInitParameters struct { + + // Amazon S3 URL that specifies the truststore for mutual TLS authentication, for example, s3://bucket-name/key-name. The truststore can contain certificates from public or private certificate authorities. To update the truststore, upload a new version to S3, and then update your custom domain name to use the new version. + TruststoreURI *string `json:"truststoreUri,omitempty" tf:"truststore_uri,omitempty"` + + // Version of the S3 object that contains the truststore. To specify a version, you must have versioning enabled for the S3 bucket. + TruststoreVersion *string `json:"truststoreVersion,omitempty" tf:"truststore_version,omitempty"` +} + +type MutualTLSAuthenticationObservation struct { + + // Amazon S3 URL that specifies the truststore for mutual TLS authentication, for example, s3://bucket-name/key-name. The truststore can contain certificates from public or private certificate authorities. To update the truststore, upload a new version to S3, and then update your custom domain name to use the new version. + TruststoreURI *string `json:"truststoreUri,omitempty" tf:"truststore_uri,omitempty"` + + // Version of the S3 object that contains the truststore. To specify a version, you must have versioning enabled for the S3 bucket. + TruststoreVersion *string `json:"truststoreVersion,omitempty" tf:"truststore_version,omitempty"` +} + +type MutualTLSAuthenticationParameters struct { + + // Amazon S3 URL that specifies the truststore for mutual TLS authentication, for example, s3://bucket-name/key-name. The truststore can contain certificates from public or private certificate authorities. To update the truststore, upload a new version to S3, and then update your custom domain name to use the new version. + // +kubebuilder:validation:Optional + TruststoreURI *string `json:"truststoreUri" tf:"truststore_uri,omitempty"` + + // Version of the S3 object that contains the truststore. To specify a version, you must have versioning enabled for the S3 bucket. + // +kubebuilder:validation:Optional + TruststoreVersion *string `json:"truststoreVersion,omitempty" tf:"truststore_version,omitempty"` +} + +// DomainNameSpec defines the desired state of DomainName +type DomainNameSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DomainNameParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DomainNameInitParameters `json:"initProvider,omitempty"` +} + +// DomainNameStatus defines the observed state of DomainName. +type DomainNameStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DomainNameObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DomainName is the Schema for the DomainNames API. Registers a custom domain name for use with AWS API Gateway. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type DomainName struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.domainName) || (has(self.initProvider) && has(self.initProvider.domainName))",message="spec.forProvider.domainName is a required parameter" + Spec DomainNameSpec `json:"spec"` + Status DomainNameStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DomainNameList contains a list of DomainNames +type DomainNameList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DomainName `json:"items"` +} + +// Repository type metadata. +var ( + DomainName_Kind = "DomainName" + DomainName_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DomainName_Kind}.String() + DomainName_KindAPIVersion = DomainName_Kind + "." + CRDGroupVersion.String() + DomainName_GroupVersionKind = CRDGroupVersion.WithKind(DomainName_Kind) +) + +func init() { + SchemeBuilder.Register(&DomainName{}, &DomainNameList{}) +} diff --git a/apis/apigateway/v1beta2/zz_generated.conversion_hubs.go b/apis/apigateway/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..4265ee3136 --- /dev/null +++ b/apis/apigateway/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,28 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *DocumentationPart) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DomainName) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Integration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MethodSettings) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *RestAPI) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Stage) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *UsagePlan) Hub() {} diff --git a/apis/apigateway/v1beta2/zz_generated.deepcopy.go b/apis/apigateway/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..08a440d234 --- /dev/null +++ b/apis/apigateway/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,4010 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIStagesInitParameters) DeepCopyInto(out *APIStagesInitParameters) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.APIIDRef != nil { + in, out := &in.APIIDRef, &out.APIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIIDSelector != nil { + in, out := &in.APIIDSelector, &out.APIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Stage != nil { + in, out := &in.Stage, &out.Stage + *out = new(string) + **out = **in + } + if in.StageRef != nil { + in, out := &in.StageRef, &out.StageRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StageSelector != nil { + in, out := &in.StageSelector, &out.StageSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Throttle != nil { + in, out := &in.Throttle, &out.Throttle + *out = make([]ThrottleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIStagesInitParameters. +func (in *APIStagesInitParameters) DeepCopy() *APIStagesInitParameters { + if in == nil { + return nil + } + out := new(APIStagesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIStagesObservation) DeepCopyInto(out *APIStagesObservation) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.Stage != nil { + in, out := &in.Stage, &out.Stage + *out = new(string) + **out = **in + } + if in.Throttle != nil { + in, out := &in.Throttle, &out.Throttle + *out = make([]ThrottleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIStagesObservation. +func (in *APIStagesObservation) DeepCopy() *APIStagesObservation { + if in == nil { + return nil + } + out := new(APIStagesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIStagesParameters) DeepCopyInto(out *APIStagesParameters) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.APIIDRef != nil { + in, out := &in.APIIDRef, &out.APIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIIDSelector != nil { + in, out := &in.APIIDSelector, &out.APIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Stage != nil { + in, out := &in.Stage, &out.Stage + *out = new(string) + **out = **in + } + if in.StageRef != nil { + in, out := &in.StageRef, &out.StageRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StageSelector != nil { + in, out := &in.StageSelector, &out.StageSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Throttle != nil { + in, out := &in.Throttle, &out.Throttle + *out = make([]ThrottleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIStagesParameters. +func (in *APIStagesParameters) DeepCopy() *APIStagesParameters { + if in == nil { + return nil + } + out := new(APIStagesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogSettingsInitParameters) DeepCopyInto(out *AccessLogSettingsInitParameters) { + *out = *in + if in.DestinationArn != nil { + in, out := &in.DestinationArn, &out.DestinationArn + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogSettingsInitParameters. +func (in *AccessLogSettingsInitParameters) DeepCopy() *AccessLogSettingsInitParameters { + if in == nil { + return nil + } + out := new(AccessLogSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogSettingsObservation) DeepCopyInto(out *AccessLogSettingsObservation) { + *out = *in + if in.DestinationArn != nil { + in, out := &in.DestinationArn, &out.DestinationArn + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogSettingsObservation. +func (in *AccessLogSettingsObservation) DeepCopy() *AccessLogSettingsObservation { + if in == nil { + return nil + } + out := new(AccessLogSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogSettingsParameters) DeepCopyInto(out *AccessLogSettingsParameters) { + *out = *in + if in.DestinationArn != nil { + in, out := &in.DestinationArn, &out.DestinationArn + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogSettingsParameters. +func (in *AccessLogSettingsParameters) DeepCopy() *AccessLogSettingsParameters { + if in == nil { + return nil + } + out := new(AccessLogSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanarySettingsInitParameters) DeepCopyInto(out *CanarySettingsInitParameters) { + *out = *in + if in.PercentTraffic != nil { + in, out := &in.PercentTraffic, &out.PercentTraffic + *out = new(float64) + **out = **in + } + if in.StageVariableOverrides != nil { + in, out := &in.StageVariableOverrides, &out.StageVariableOverrides + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseStageCache != nil { + in, out := &in.UseStageCache, &out.UseStageCache + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanarySettingsInitParameters. +func (in *CanarySettingsInitParameters) DeepCopy() *CanarySettingsInitParameters { + if in == nil { + return nil + } + out := new(CanarySettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanarySettingsObservation) DeepCopyInto(out *CanarySettingsObservation) { + *out = *in + if in.PercentTraffic != nil { + in, out := &in.PercentTraffic, &out.PercentTraffic + *out = new(float64) + **out = **in + } + if in.StageVariableOverrides != nil { + in, out := &in.StageVariableOverrides, &out.StageVariableOverrides + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseStageCache != nil { + in, out := &in.UseStageCache, &out.UseStageCache + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanarySettingsObservation. +func (in *CanarySettingsObservation) DeepCopy() *CanarySettingsObservation { + if in == nil { + return nil + } + out := new(CanarySettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanarySettingsParameters) DeepCopyInto(out *CanarySettingsParameters) { + *out = *in + if in.PercentTraffic != nil { + in, out := &in.PercentTraffic, &out.PercentTraffic + *out = new(float64) + **out = **in + } + if in.StageVariableOverrides != nil { + in, out := &in.StageVariableOverrides, &out.StageVariableOverrides + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseStageCache != nil { + in, out := &in.UseStageCache, &out.UseStageCache + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanarySettingsParameters. +func (in *CanarySettingsParameters) DeepCopy() *CanarySettingsParameters { + if in == nil { + return nil + } + out := new(CanarySettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentationPart) DeepCopyInto(out *DocumentationPart) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentationPart. +func (in *DocumentationPart) DeepCopy() *DocumentationPart { + if in == nil { + return nil + } + out := new(DocumentationPart) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DocumentationPart) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentationPartInitParameters) DeepCopyInto(out *DocumentationPartInitParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(LocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = new(string) + **out = **in + } + if in.RestAPIID != nil { + in, out := &in.RestAPIID, &out.RestAPIID + *out = new(string) + **out = **in + } + if in.RestAPIIDRef != nil { + in, out := &in.RestAPIIDRef, &out.RestAPIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RestAPIIDSelector != nil { + in, out := &in.RestAPIIDSelector, &out.RestAPIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentationPartInitParameters. +func (in *DocumentationPartInitParameters) DeepCopy() *DocumentationPartInitParameters { + if in == nil { + return nil + } + out := new(DocumentationPartInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentationPartList) DeepCopyInto(out *DocumentationPartList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DocumentationPart, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentationPartList. +func (in *DocumentationPartList) DeepCopy() *DocumentationPartList { + if in == nil { + return nil + } + out := new(DocumentationPartList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DocumentationPartList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentationPartObservation) DeepCopyInto(out *DocumentationPartObservation) { + *out = *in + if in.DocumentationPartID != nil { + in, out := &in.DocumentationPartID, &out.DocumentationPartID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(LocationObservation) + (*in).DeepCopyInto(*out) + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = new(string) + **out = **in + } + if in.RestAPIID != nil { + in, out := &in.RestAPIID, &out.RestAPIID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentationPartObservation. +func (in *DocumentationPartObservation) DeepCopy() *DocumentationPartObservation { + if in == nil { + return nil + } + out := new(DocumentationPartObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentationPartParameters) DeepCopyInto(out *DocumentationPartParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(LocationParameters) + (*in).DeepCopyInto(*out) + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RestAPIID != nil { + in, out := &in.RestAPIID, &out.RestAPIID + *out = new(string) + **out = **in + } + if in.RestAPIIDRef != nil { + in, out := &in.RestAPIIDRef, &out.RestAPIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RestAPIIDSelector != nil { + in, out := &in.RestAPIIDSelector, &out.RestAPIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentationPartParameters. +func (in *DocumentationPartParameters) DeepCopy() *DocumentationPartParameters { + if in == nil { + return nil + } + out := new(DocumentationPartParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentationPartSpec) DeepCopyInto(out *DocumentationPartSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentationPartSpec. +func (in *DocumentationPartSpec) DeepCopy() *DocumentationPartSpec { + if in == nil { + return nil + } + out := new(DocumentationPartSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentationPartStatus) DeepCopyInto(out *DocumentationPartStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentationPartStatus. +func (in *DocumentationPartStatus) DeepCopy() *DocumentationPartStatus { + if in == nil { + return nil + } + out := new(DocumentationPartStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainName) DeepCopyInto(out *DomainName) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainName. +func (in *DomainName) DeepCopy() *DomainName { + if in == nil { + return nil + } + out := new(DomainName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DomainName) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainNameInitParameters) DeepCopyInto(out *DomainNameInitParameters) { + *out = *in + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } + if in.CertificateArnRef != nil { + in, out := &in.CertificateArnRef, &out.CertificateArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CertificateArnSelector != nil { + in, out := &in.CertificateArnSelector, &out.CertificateArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CertificateBody != nil { + in, out := &in.CertificateBody, &out.CertificateBody + *out = new(string) + **out = **in + } + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.CertificateName != nil { + in, out := &in.CertificateName, &out.CertificateName + *out = new(string) + **out = **in + } + if in.CertificatePrivateKeySecretRef != nil { + in, out := &in.CertificatePrivateKeySecretRef, &out.CertificatePrivateKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.EndpointConfiguration != nil { + in, out := &in.EndpointConfiguration, &out.EndpointConfiguration + *out = new(EndpointConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MutualTLSAuthentication != nil { + in, out := &in.MutualTLSAuthentication, &out.MutualTLSAuthentication + *out = new(MutualTLSAuthenticationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OwnershipVerificationCertificateArn != nil { + in, out := &in.OwnershipVerificationCertificateArn, &out.OwnershipVerificationCertificateArn + *out = new(string) + **out = **in + } + if in.RegionalCertificateArn != nil { + in, out := &in.RegionalCertificateArn, &out.RegionalCertificateArn + *out = new(string) + **out = **in + } + if in.RegionalCertificateArnRef != nil { + in, out := &in.RegionalCertificateArnRef, &out.RegionalCertificateArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RegionalCertificateArnSelector != nil { + in, out := &in.RegionalCertificateArnSelector, &out.RegionalCertificateArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RegionalCertificateName != nil { + in, out := &in.RegionalCertificateName, &out.RegionalCertificateName + *out = new(string) + **out = **in + } + if in.SecurityPolicy != nil { + in, out := &in.SecurityPolicy, &out.SecurityPolicy + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainNameInitParameters. +func (in *DomainNameInitParameters) DeepCopy() *DomainNameInitParameters { + if in == nil { + return nil + } + out := new(DomainNameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainNameList) DeepCopyInto(out *DomainNameList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DomainName, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainNameList. +func (in *DomainNameList) DeepCopy() *DomainNameList { + if in == nil { + return nil + } + out := new(DomainNameList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DomainNameList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainNameObservation) DeepCopyInto(out *DomainNameObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } + if in.CertificateBody != nil { + in, out := &in.CertificateBody, &out.CertificateBody + *out = new(string) + **out = **in + } + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.CertificateName != nil { + in, out := &in.CertificateName, &out.CertificateName + *out = new(string) + **out = **in + } + if in.CertificateUploadDate != nil { + in, out := &in.CertificateUploadDate, &out.CertificateUploadDate + *out = new(string) + **out = **in + } + if in.CloudfrontDomainName != nil { + in, out := &in.CloudfrontDomainName, &out.CloudfrontDomainName + *out = new(string) + **out = **in + } + if in.CloudfrontZoneID != nil { + in, out := &in.CloudfrontZoneID, &out.CloudfrontZoneID + *out = new(string) + **out = **in + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.EndpointConfiguration != nil { + in, out := &in.EndpointConfiguration, &out.EndpointConfiguration + *out = new(EndpointConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MutualTLSAuthentication != nil { + in, out := &in.MutualTLSAuthentication, &out.MutualTLSAuthentication + *out = new(MutualTLSAuthenticationObservation) + (*in).DeepCopyInto(*out) + } + if in.OwnershipVerificationCertificateArn != nil { + in, out := &in.OwnershipVerificationCertificateArn, &out.OwnershipVerificationCertificateArn + *out = new(string) + **out = **in + } + if in.RegionalCertificateArn != nil { + in, out := &in.RegionalCertificateArn, &out.RegionalCertificateArn + *out = new(string) + **out = **in + } + if in.RegionalCertificateName != nil { + in, out := &in.RegionalCertificateName, &out.RegionalCertificateName + *out = new(string) + **out = **in + } + if in.RegionalDomainName != nil { + in, out := &in.RegionalDomainName, &out.RegionalDomainName + *out = new(string) + **out = **in + } + if in.RegionalZoneID != nil { + in, out := &in.RegionalZoneID, &out.RegionalZoneID + *out = new(string) + **out = **in + } + if in.SecurityPolicy != nil { + in, out := &in.SecurityPolicy, &out.SecurityPolicy + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainNameObservation. +func (in *DomainNameObservation) DeepCopy() *DomainNameObservation { + if in == nil { + return nil + } + out := new(DomainNameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainNameParameters) DeepCopyInto(out *DomainNameParameters) { + *out = *in + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } + if in.CertificateArnRef != nil { + in, out := &in.CertificateArnRef, &out.CertificateArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CertificateArnSelector != nil { + in, out := &in.CertificateArnSelector, &out.CertificateArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CertificateBody != nil { + in, out := &in.CertificateBody, &out.CertificateBody + *out = new(string) + **out = **in + } + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.CertificateName != nil { + in, out := &in.CertificateName, &out.CertificateName + *out = new(string) + **out = **in + } + if in.CertificatePrivateKeySecretRef != nil { + in, out := &in.CertificatePrivateKeySecretRef, &out.CertificatePrivateKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.EndpointConfiguration != nil { + in, out := &in.EndpointConfiguration, &out.EndpointConfiguration + *out = new(EndpointConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.MutualTLSAuthentication != nil { + in, out := &in.MutualTLSAuthentication, &out.MutualTLSAuthentication + *out = new(MutualTLSAuthenticationParameters) + (*in).DeepCopyInto(*out) + } + if in.OwnershipVerificationCertificateArn != nil { + in, out := &in.OwnershipVerificationCertificateArn, &out.OwnershipVerificationCertificateArn + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RegionalCertificateArn != nil { + in, out := &in.RegionalCertificateArn, &out.RegionalCertificateArn + *out = new(string) + **out = **in + } + if in.RegionalCertificateArnRef != nil { + in, out := &in.RegionalCertificateArnRef, &out.RegionalCertificateArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RegionalCertificateArnSelector != nil { + in, out := &in.RegionalCertificateArnSelector, &out.RegionalCertificateArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RegionalCertificateName != nil { + in, out := &in.RegionalCertificateName, &out.RegionalCertificateName + *out = new(string) + **out = **in + } + if in.SecurityPolicy != nil { + in, out := &in.SecurityPolicy, &out.SecurityPolicy + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainNameParameters. +func (in *DomainNameParameters) DeepCopy() *DomainNameParameters { + if in == nil { + return nil + } + out := new(DomainNameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainNameSpec) DeepCopyInto(out *DomainNameSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainNameSpec. +func (in *DomainNameSpec) DeepCopy() *DomainNameSpec { + if in == nil { + return nil + } + out := new(DomainNameSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainNameStatus) DeepCopyInto(out *DomainNameStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainNameStatus. +func (in *DomainNameStatus) DeepCopy() *DomainNameStatus { + if in == nil { + return nil + } + out := new(DomainNameStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConfigurationInitParameters) DeepCopyInto(out *EndpointConfigurationInitParameters) { + *out = *in + if in.Types != nil { + in, out := &in.Types, &out.Types + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConfigurationInitParameters. +func (in *EndpointConfigurationInitParameters) DeepCopy() *EndpointConfigurationInitParameters { + if in == nil { + return nil + } + out := new(EndpointConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConfigurationObservation) DeepCopyInto(out *EndpointConfigurationObservation) { + *out = *in + if in.Types != nil { + in, out := &in.Types, &out.Types + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConfigurationObservation. +func (in *EndpointConfigurationObservation) DeepCopy() *EndpointConfigurationObservation { + if in == nil { + return nil + } + out := new(EndpointConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConfigurationParameters) DeepCopyInto(out *EndpointConfigurationParameters) { + *out = *in + if in.Types != nil { + in, out := &in.Types, &out.Types + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConfigurationParameters. +func (in *EndpointConfigurationParameters) DeepCopy() *EndpointConfigurationParameters { + if in == nil { + return nil + } + out := new(EndpointConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Integration) DeepCopyInto(out *Integration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Integration. +func (in *Integration) DeepCopy() *Integration { + if in == nil { + return nil + } + out := new(Integration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Integration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationInitParameters) DeepCopyInto(out *IntegrationInitParameters) { + *out = *in + if in.CacheKeyParameters != nil { + in, out := &in.CacheKeyParameters, &out.CacheKeyParameters + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CacheNamespace != nil { + in, out := &in.CacheNamespace, &out.CacheNamespace + *out = new(string) + **out = **in + } + if in.ConnectionID != nil { + in, out := &in.ConnectionID, &out.ConnectionID + *out = new(string) + **out = **in + } + if in.ConnectionIDRef != nil { + in, out := &in.ConnectionIDRef, &out.ConnectionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConnectionIDSelector != nil { + in, out := &in.ConnectionIDSelector, &out.ConnectionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ConnectionType != nil { + in, out := &in.ConnectionType, &out.ConnectionType + *out = new(string) + **out = **in + } + if in.ContentHandling != nil { + in, out := &in.ContentHandling, &out.ContentHandling + *out = new(string) + **out = **in + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(string) + **out = **in + } + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = new(string) + **out = **in + } + if in.HTTPMethodRef != nil { + in, out := &in.HTTPMethodRef, &out.HTTPMethodRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.HTTPMethodSelector != nil { + in, out := &in.HTTPMethodSelector, &out.HTTPMethodSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IntegrationHTTPMethod != nil { + in, out := &in.IntegrationHTTPMethod, &out.IntegrationHTTPMethod + *out = new(string) + **out = **in + } + if in.PassthroughBehavior != nil { + in, out := &in.PassthroughBehavior, &out.PassthroughBehavior + *out = new(string) + **out = **in + } + if in.RequestParameters != nil { + in, out := &in.RequestParameters, &out.RequestParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RequestTemplates != nil { + in, out := &in.RequestTemplates, &out.RequestTemplates + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceIDRef != nil { + in, out := &in.ResourceIDRef, &out.ResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceIDSelector != nil { + in, out := &in.ResourceIDSelector, &out.ResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RestAPIID != nil { + in, out := &in.RestAPIID, &out.RestAPIID + *out = new(string) + **out = **in + } + if in.RestAPIIDRef != nil { + in, out := &in.RestAPIIDRef, &out.RestAPIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RestAPIIDSelector != nil { + in, out := &in.RestAPIIDSelector, &out.RestAPIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TimeoutMilliseconds != nil { + in, out := &in.TimeoutMilliseconds, &out.TimeoutMilliseconds + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.URIRef != nil { + in, out := &in.URIRef, &out.URIRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.URISelector != nil { + in, out := &in.URISelector, &out.URISelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationInitParameters. +func (in *IntegrationInitParameters) DeepCopy() *IntegrationInitParameters { + if in == nil { + return nil + } + out := new(IntegrationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationList) DeepCopyInto(out *IntegrationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Integration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationList. +func (in *IntegrationList) DeepCopy() *IntegrationList { + if in == nil { + return nil + } + out := new(IntegrationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IntegrationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationObservation) DeepCopyInto(out *IntegrationObservation) { + *out = *in + if in.CacheKeyParameters != nil { + in, out := &in.CacheKeyParameters, &out.CacheKeyParameters + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CacheNamespace != nil { + in, out := &in.CacheNamespace, &out.CacheNamespace + *out = new(string) + **out = **in + } + if in.ConnectionID != nil { + in, out := &in.ConnectionID, &out.ConnectionID + *out = new(string) + **out = **in + } + if in.ConnectionType != nil { + in, out := &in.ConnectionType, &out.ConnectionType + *out = new(string) + **out = **in + } + if in.ContentHandling != nil { + in, out := &in.ContentHandling, &out.ContentHandling + *out = new(string) + **out = **in + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(string) + **out = **in + } + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IntegrationHTTPMethod != nil { + in, out := &in.IntegrationHTTPMethod, &out.IntegrationHTTPMethod + *out = new(string) + **out = **in + } + if in.PassthroughBehavior != nil { + in, out := &in.PassthroughBehavior, &out.PassthroughBehavior + *out = new(string) + **out = **in + } + if in.RequestParameters != nil { + in, out := &in.RequestParameters, &out.RequestParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RequestTemplates != nil { + in, out := &in.RequestTemplates, &out.RequestTemplates + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.RestAPIID != nil { + in, out := &in.RestAPIID, &out.RestAPIID + *out = new(string) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.TimeoutMilliseconds != nil { + in, out := &in.TimeoutMilliseconds, &out.TimeoutMilliseconds + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationObservation. +func (in *IntegrationObservation) DeepCopy() *IntegrationObservation { + if in == nil { + return nil + } + out := new(IntegrationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationParameters) DeepCopyInto(out *IntegrationParameters) { + *out = *in + if in.CacheKeyParameters != nil { + in, out := &in.CacheKeyParameters, &out.CacheKeyParameters + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CacheNamespace != nil { + in, out := &in.CacheNamespace, &out.CacheNamespace + *out = new(string) + **out = **in + } + if in.ConnectionID != nil { + in, out := &in.ConnectionID, &out.ConnectionID + *out = new(string) + **out = **in + } + if in.ConnectionIDRef != nil { + in, out := &in.ConnectionIDRef, &out.ConnectionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConnectionIDSelector != nil { + in, out := &in.ConnectionIDSelector, &out.ConnectionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ConnectionType != nil { + in, out := &in.ConnectionType, &out.ConnectionType + *out = new(string) + **out = **in + } + if in.ContentHandling != nil { + in, out := &in.ContentHandling, &out.ContentHandling + *out = new(string) + **out = **in + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(string) + **out = **in + } + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = new(string) + **out = **in + } + if in.HTTPMethodRef != nil { + in, out := &in.HTTPMethodRef, &out.HTTPMethodRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.HTTPMethodSelector != nil { + in, out := &in.HTTPMethodSelector, &out.HTTPMethodSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IntegrationHTTPMethod != nil { + in, out := &in.IntegrationHTTPMethod, &out.IntegrationHTTPMethod + *out = new(string) + **out = **in + } + if in.PassthroughBehavior != nil { + in, out := &in.PassthroughBehavior, &out.PassthroughBehavior + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RequestParameters != nil { + in, out := &in.RequestParameters, &out.RequestParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RequestTemplates != nil { + in, out := &in.RequestTemplates, &out.RequestTemplates + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceIDRef != nil { + in, out := &in.ResourceIDRef, &out.ResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceIDSelector != nil { + in, out := &in.ResourceIDSelector, &out.ResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RestAPIID != nil { + in, out := &in.RestAPIID, &out.RestAPIID + *out = new(string) + **out = **in + } + if in.RestAPIIDRef != nil { + in, out := &in.RestAPIIDRef, &out.RestAPIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RestAPIIDSelector != nil { + in, out := &in.RestAPIIDSelector, &out.RestAPIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.TimeoutMilliseconds != nil { + in, out := &in.TimeoutMilliseconds, &out.TimeoutMilliseconds + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.URIRef != nil { + in, out := &in.URIRef, &out.URIRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.URISelector != nil { + in, out := &in.URISelector, &out.URISelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationParameters. +func (in *IntegrationParameters) DeepCopy() *IntegrationParameters { + if in == nil { + return nil + } + out := new(IntegrationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationSpec) DeepCopyInto(out *IntegrationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSpec. +func (in *IntegrationSpec) DeepCopy() *IntegrationSpec { + if in == nil { + return nil + } + out := new(IntegrationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationStatus) DeepCopyInto(out *IntegrationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationStatus. +func (in *IntegrationStatus) DeepCopy() *IntegrationStatus { + if in == nil { + return nil + } + out := new(IntegrationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationInitParameters) DeepCopyInto(out *LocationInitParameters) { + *out = *in + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationInitParameters. +func (in *LocationInitParameters) DeepCopy() *LocationInitParameters { + if in == nil { + return nil + } + out := new(LocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationObservation) DeepCopyInto(out *LocationObservation) { + *out = *in + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationObservation. +func (in *LocationObservation) DeepCopy() *LocationObservation { + if in == nil { + return nil + } + out := new(LocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationParameters) DeepCopyInto(out *LocationParameters) { + *out = *in + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationParameters. +func (in *LocationParameters) DeepCopy() *LocationParameters { + if in == nil { + return nil + } + out := new(LocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MethodSettings) DeepCopyInto(out *MethodSettings) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MethodSettings. +func (in *MethodSettings) DeepCopy() *MethodSettings { + if in == nil { + return nil + } + out := new(MethodSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MethodSettings) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MethodSettingsInitParameters) DeepCopyInto(out *MethodSettingsInitParameters) { + *out = *in + if in.MethodPath != nil { + in, out := &in.MethodPath, &out.MethodPath + *out = new(string) + **out = **in + } + if in.RestAPIID != nil { + in, out := &in.RestAPIID, &out.RestAPIID + *out = new(string) + **out = **in + } + if in.RestAPIIDRef != nil { + in, out := &in.RestAPIIDRef, &out.RestAPIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RestAPIIDSelector != nil { + in, out := &in.RestAPIIDSelector, &out.RestAPIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(SettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StageName != nil { + in, out := &in.StageName, &out.StageName + *out = new(string) + **out = **in + } + if in.StageNameRef != nil { + in, out := &in.StageNameRef, &out.StageNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StageNameSelector != nil { + in, out := &in.StageNameSelector, &out.StageNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MethodSettingsInitParameters. +func (in *MethodSettingsInitParameters) DeepCopy() *MethodSettingsInitParameters { + if in == nil { + return nil + } + out := new(MethodSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MethodSettingsList) DeepCopyInto(out *MethodSettingsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MethodSettings, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MethodSettingsList. +func (in *MethodSettingsList) DeepCopy() *MethodSettingsList { + if in == nil { + return nil + } + out := new(MethodSettingsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MethodSettingsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MethodSettingsObservation) DeepCopyInto(out *MethodSettingsObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MethodPath != nil { + in, out := &in.MethodPath, &out.MethodPath + *out = new(string) + **out = **in + } + if in.RestAPIID != nil { + in, out := &in.RestAPIID, &out.RestAPIID + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(SettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.StageName != nil { + in, out := &in.StageName, &out.StageName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MethodSettingsObservation. +func (in *MethodSettingsObservation) DeepCopy() *MethodSettingsObservation { + if in == nil { + return nil + } + out := new(MethodSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MethodSettingsParameters) DeepCopyInto(out *MethodSettingsParameters) { + *out = *in + if in.MethodPath != nil { + in, out := &in.MethodPath, &out.MethodPath + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RestAPIID != nil { + in, out := &in.RestAPIID, &out.RestAPIID + *out = new(string) + **out = **in + } + if in.RestAPIIDRef != nil { + in, out := &in.RestAPIIDRef, &out.RestAPIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RestAPIIDSelector != nil { + in, out := &in.RestAPIIDSelector, &out.RestAPIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(SettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.StageName != nil { + in, out := &in.StageName, &out.StageName + *out = new(string) + **out = **in + } + if in.StageNameRef != nil { + in, out := &in.StageNameRef, &out.StageNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StageNameSelector != nil { + in, out := &in.StageNameSelector, &out.StageNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MethodSettingsParameters. +func (in *MethodSettingsParameters) DeepCopy() *MethodSettingsParameters { + if in == nil { + return nil + } + out := new(MethodSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MethodSettingsSpec) DeepCopyInto(out *MethodSettingsSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MethodSettingsSpec. +func (in *MethodSettingsSpec) DeepCopy() *MethodSettingsSpec { + if in == nil { + return nil + } + out := new(MethodSettingsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MethodSettingsStatus) DeepCopyInto(out *MethodSettingsStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MethodSettingsStatus. +func (in *MethodSettingsStatus) DeepCopy() *MethodSettingsStatus { + if in == nil { + return nil + } + out := new(MethodSettingsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutualTLSAuthenticationInitParameters) DeepCopyInto(out *MutualTLSAuthenticationInitParameters) { + *out = *in + if in.TruststoreURI != nil { + in, out := &in.TruststoreURI, &out.TruststoreURI + *out = new(string) + **out = **in + } + if in.TruststoreVersion != nil { + in, out := &in.TruststoreVersion, &out.TruststoreVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutualTLSAuthenticationInitParameters. +func (in *MutualTLSAuthenticationInitParameters) DeepCopy() *MutualTLSAuthenticationInitParameters { + if in == nil { + return nil + } + out := new(MutualTLSAuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutualTLSAuthenticationObservation) DeepCopyInto(out *MutualTLSAuthenticationObservation) { + *out = *in + if in.TruststoreURI != nil { + in, out := &in.TruststoreURI, &out.TruststoreURI + *out = new(string) + **out = **in + } + if in.TruststoreVersion != nil { + in, out := &in.TruststoreVersion, &out.TruststoreVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutualTLSAuthenticationObservation. +func (in *MutualTLSAuthenticationObservation) DeepCopy() *MutualTLSAuthenticationObservation { + if in == nil { + return nil + } + out := new(MutualTLSAuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutualTLSAuthenticationParameters) DeepCopyInto(out *MutualTLSAuthenticationParameters) { + *out = *in + if in.TruststoreURI != nil { + in, out := &in.TruststoreURI, &out.TruststoreURI + *out = new(string) + **out = **in + } + if in.TruststoreVersion != nil { + in, out := &in.TruststoreVersion, &out.TruststoreVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutualTLSAuthenticationParameters. +func (in *MutualTLSAuthenticationParameters) DeepCopy() *MutualTLSAuthenticationParameters { + if in == nil { + return nil + } + out := new(MutualTLSAuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaSettingsInitParameters) DeepCopyInto(out *QuotaSettingsInitParameters) { + *out = *in + if in.Limit != nil { + in, out := &in.Limit, &out.Limit + *out = new(float64) + **out = **in + } + if in.Offset != nil { + in, out := &in.Offset, &out.Offset + *out = new(float64) + **out = **in + } + if in.Period != nil { + in, out := &in.Period, &out.Period + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaSettingsInitParameters. +func (in *QuotaSettingsInitParameters) DeepCopy() *QuotaSettingsInitParameters { + if in == nil { + return nil + } + out := new(QuotaSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaSettingsObservation) DeepCopyInto(out *QuotaSettingsObservation) { + *out = *in + if in.Limit != nil { + in, out := &in.Limit, &out.Limit + *out = new(float64) + **out = **in + } + if in.Offset != nil { + in, out := &in.Offset, &out.Offset + *out = new(float64) + **out = **in + } + if in.Period != nil { + in, out := &in.Period, &out.Period + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaSettingsObservation. +func (in *QuotaSettingsObservation) DeepCopy() *QuotaSettingsObservation { + if in == nil { + return nil + } + out := new(QuotaSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaSettingsParameters) DeepCopyInto(out *QuotaSettingsParameters) { + *out = *in + if in.Limit != nil { + in, out := &in.Limit, &out.Limit + *out = new(float64) + **out = **in + } + if in.Offset != nil { + in, out := &in.Offset, &out.Offset + *out = new(float64) + **out = **in + } + if in.Period != nil { + in, out := &in.Period, &out.Period + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaSettingsParameters. +func (in *QuotaSettingsParameters) DeepCopy() *QuotaSettingsParameters { + if in == nil { + return nil + } + out := new(QuotaSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestAPI) DeepCopyInto(out *RestAPI) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestAPI. +func (in *RestAPI) DeepCopy() *RestAPI { + if in == nil { + return nil + } + out := new(RestAPI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RestAPI) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestAPIEndpointConfigurationInitParameters) DeepCopyInto(out *RestAPIEndpointConfigurationInitParameters) { + *out = *in + if in.Types != nil { + in, out := &in.Types, &out.Types + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCEndpointIds != nil { + in, out := &in.VPCEndpointIds, &out.VPCEndpointIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestAPIEndpointConfigurationInitParameters. +func (in *RestAPIEndpointConfigurationInitParameters) DeepCopy() *RestAPIEndpointConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RestAPIEndpointConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestAPIEndpointConfigurationObservation) DeepCopyInto(out *RestAPIEndpointConfigurationObservation) { + *out = *in + if in.Types != nil { + in, out := &in.Types, &out.Types + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCEndpointIds != nil { + in, out := &in.VPCEndpointIds, &out.VPCEndpointIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestAPIEndpointConfigurationObservation. +func (in *RestAPIEndpointConfigurationObservation) DeepCopy() *RestAPIEndpointConfigurationObservation { + if in == nil { + return nil + } + out := new(RestAPIEndpointConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestAPIEndpointConfigurationParameters) DeepCopyInto(out *RestAPIEndpointConfigurationParameters) { + *out = *in + if in.Types != nil { + in, out := &in.Types, &out.Types + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCEndpointIds != nil { + in, out := &in.VPCEndpointIds, &out.VPCEndpointIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestAPIEndpointConfigurationParameters. +func (in *RestAPIEndpointConfigurationParameters) DeepCopy() *RestAPIEndpointConfigurationParameters { + if in == nil { + return nil + } + out := new(RestAPIEndpointConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestAPIInitParameters) DeepCopyInto(out *RestAPIInitParameters) { + *out = *in + if in.APIKeySource != nil { + in, out := &in.APIKeySource, &out.APIKeySource + *out = new(string) + **out = **in + } + if in.BinaryMediaTypes != nil { + in, out := &in.BinaryMediaTypes, &out.BinaryMediaTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisableExecuteAPIEndpoint != nil { + in, out := &in.DisableExecuteAPIEndpoint, &out.DisableExecuteAPIEndpoint + *out = new(bool) + **out = **in + } + if in.EndpointConfiguration != nil { + in, out := &in.EndpointConfiguration, &out.EndpointConfiguration + *out = new(RestAPIEndpointConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FailOnWarnings != nil { + in, out := &in.FailOnWarnings, &out.FailOnWarnings + *out = new(bool) + **out = **in + } + if in.MinimumCompressionSize != nil { + in, out := &in.MinimumCompressionSize, &out.MinimumCompressionSize + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PutRestAPIMode != nil { + in, out := &in.PutRestAPIMode, &out.PutRestAPIMode + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestAPIInitParameters. +func (in *RestAPIInitParameters) DeepCopy() *RestAPIInitParameters { + if in == nil { + return nil + } + out := new(RestAPIInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestAPIList) DeepCopyInto(out *RestAPIList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RestAPI, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestAPIList. +func (in *RestAPIList) DeepCopy() *RestAPIList { + if in == nil { + return nil + } + out := new(RestAPIList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RestAPIList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestAPIObservation) DeepCopyInto(out *RestAPIObservation) { + *out = *in + if in.APIKeySource != nil { + in, out := &in.APIKeySource, &out.APIKeySource + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.BinaryMediaTypes != nil { + in, out := &in.BinaryMediaTypes, &out.BinaryMediaTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.CreatedDate != nil { + in, out := &in.CreatedDate, &out.CreatedDate + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisableExecuteAPIEndpoint != nil { + in, out := &in.DisableExecuteAPIEndpoint, &out.DisableExecuteAPIEndpoint + *out = new(bool) + **out = **in + } + if in.EndpointConfiguration != nil { + in, out := &in.EndpointConfiguration, &out.EndpointConfiguration + *out = new(RestAPIEndpointConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ExecutionArn != nil { + in, out := &in.ExecutionArn, &out.ExecutionArn + *out = new(string) + **out = **in + } + if in.FailOnWarnings != nil { + in, out := &in.FailOnWarnings, &out.FailOnWarnings + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MinimumCompressionSize != nil { + in, out := &in.MinimumCompressionSize, &out.MinimumCompressionSize + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } + if in.PutRestAPIMode != nil { + in, out := &in.PutRestAPIMode, &out.PutRestAPIMode + *out = new(string) + **out = **in + } + if in.RootResourceID != nil { + in, out := &in.RootResourceID, &out.RootResourceID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestAPIObservation. +func (in *RestAPIObservation) DeepCopy() *RestAPIObservation { + if in == nil { + return nil + } + out := new(RestAPIObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestAPIParameters) DeepCopyInto(out *RestAPIParameters) { + *out = *in + if in.APIKeySource != nil { + in, out := &in.APIKeySource, &out.APIKeySource + *out = new(string) + **out = **in + } + if in.BinaryMediaTypes != nil { + in, out := &in.BinaryMediaTypes, &out.BinaryMediaTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisableExecuteAPIEndpoint != nil { + in, out := &in.DisableExecuteAPIEndpoint, &out.DisableExecuteAPIEndpoint + *out = new(bool) + **out = **in + } + if in.EndpointConfiguration != nil { + in, out := &in.EndpointConfiguration, &out.EndpointConfiguration + *out = new(RestAPIEndpointConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.FailOnWarnings != nil { + in, out := &in.FailOnWarnings, &out.FailOnWarnings + *out = new(bool) + **out = **in + } + if in.MinimumCompressionSize != nil { + in, out := &in.MinimumCompressionSize, &out.MinimumCompressionSize + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PutRestAPIMode != nil { + in, out := &in.PutRestAPIMode, &out.PutRestAPIMode + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestAPIParameters. +func (in *RestAPIParameters) DeepCopy() *RestAPIParameters { + if in == nil { + return nil + } + out := new(RestAPIParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestAPISpec) DeepCopyInto(out *RestAPISpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestAPISpec. +func (in *RestAPISpec) DeepCopy() *RestAPISpec { + if in == nil { + return nil + } + out := new(RestAPISpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestAPIStatus) DeepCopyInto(out *RestAPIStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestAPIStatus. +func (in *RestAPIStatus) DeepCopy() *RestAPIStatus { + if in == nil { + return nil + } + out := new(RestAPIStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsInitParameters) DeepCopyInto(out *SettingsInitParameters) { + *out = *in + if in.CacheDataEncrypted != nil { + in, out := &in.CacheDataEncrypted, &out.CacheDataEncrypted + *out = new(bool) + **out = **in + } + if in.CacheTTLInSeconds != nil { + in, out := &in.CacheTTLInSeconds, &out.CacheTTLInSeconds + *out = new(float64) + **out = **in + } + if in.CachingEnabled != nil { + in, out := &in.CachingEnabled, &out.CachingEnabled + *out = new(bool) + **out = **in + } + if in.DataTraceEnabled != nil { + in, out := &in.DataTraceEnabled, &out.DataTraceEnabled + *out = new(bool) + **out = **in + } + if in.LoggingLevel != nil { + in, out := &in.LoggingLevel, &out.LoggingLevel + *out = new(string) + **out = **in + } + if in.MetricsEnabled != nil { + in, out := &in.MetricsEnabled, &out.MetricsEnabled + *out = new(bool) + **out = **in + } + if in.RequireAuthorizationForCacheControl != nil { + in, out := &in.RequireAuthorizationForCacheControl, &out.RequireAuthorizationForCacheControl + *out = new(bool) + **out = **in + } + if in.ThrottlingBurstLimit != nil { + in, out := &in.ThrottlingBurstLimit, &out.ThrottlingBurstLimit + *out = new(float64) + **out = **in + } + if in.ThrottlingRateLimit != nil { + in, out := &in.ThrottlingRateLimit, &out.ThrottlingRateLimit + *out = new(float64) + **out = **in + } + if in.UnauthorizedCacheControlHeaderStrategy != nil { + in, out := &in.UnauthorizedCacheControlHeaderStrategy, &out.UnauthorizedCacheControlHeaderStrategy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsInitParameters. +func (in *SettingsInitParameters) DeepCopy() *SettingsInitParameters { + if in == nil { + return nil + } + out := new(SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsObservation) DeepCopyInto(out *SettingsObservation) { + *out = *in + if in.CacheDataEncrypted != nil { + in, out := &in.CacheDataEncrypted, &out.CacheDataEncrypted + *out = new(bool) + **out = **in + } + if in.CacheTTLInSeconds != nil { + in, out := &in.CacheTTLInSeconds, &out.CacheTTLInSeconds + *out = new(float64) + **out = **in + } + if in.CachingEnabled != nil { + in, out := &in.CachingEnabled, &out.CachingEnabled + *out = new(bool) + **out = **in + } + if in.DataTraceEnabled != nil { + in, out := &in.DataTraceEnabled, &out.DataTraceEnabled + *out = new(bool) + **out = **in + } + if in.LoggingLevel != nil { + in, out := &in.LoggingLevel, &out.LoggingLevel + *out = new(string) + **out = **in + } + if in.MetricsEnabled != nil { + in, out := &in.MetricsEnabled, &out.MetricsEnabled + *out = new(bool) + **out = **in + } + if in.RequireAuthorizationForCacheControl != nil { + in, out := &in.RequireAuthorizationForCacheControl, &out.RequireAuthorizationForCacheControl + *out = new(bool) + **out = **in + } + if in.ThrottlingBurstLimit != nil { + in, out := &in.ThrottlingBurstLimit, &out.ThrottlingBurstLimit + *out = new(float64) + **out = **in + } + if in.ThrottlingRateLimit != nil { + in, out := &in.ThrottlingRateLimit, &out.ThrottlingRateLimit + *out = new(float64) + **out = **in + } + if in.UnauthorizedCacheControlHeaderStrategy != nil { + in, out := &in.UnauthorizedCacheControlHeaderStrategy, &out.UnauthorizedCacheControlHeaderStrategy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsObservation. +func (in *SettingsObservation) DeepCopy() *SettingsObservation { + if in == nil { + return nil + } + out := new(SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsParameters) DeepCopyInto(out *SettingsParameters) { + *out = *in + if in.CacheDataEncrypted != nil { + in, out := &in.CacheDataEncrypted, &out.CacheDataEncrypted + *out = new(bool) + **out = **in + } + if in.CacheTTLInSeconds != nil { + in, out := &in.CacheTTLInSeconds, &out.CacheTTLInSeconds + *out = new(float64) + **out = **in + } + if in.CachingEnabled != nil { + in, out := &in.CachingEnabled, &out.CachingEnabled + *out = new(bool) + **out = **in + } + if in.DataTraceEnabled != nil { + in, out := &in.DataTraceEnabled, &out.DataTraceEnabled + *out = new(bool) + **out = **in + } + if in.LoggingLevel != nil { + in, out := &in.LoggingLevel, &out.LoggingLevel + *out = new(string) + **out = **in + } + if in.MetricsEnabled != nil { + in, out := &in.MetricsEnabled, &out.MetricsEnabled + *out = new(bool) + **out = **in + } + if in.RequireAuthorizationForCacheControl != nil { + in, out := &in.RequireAuthorizationForCacheControl, &out.RequireAuthorizationForCacheControl + *out = new(bool) + **out = **in + } + if in.ThrottlingBurstLimit != nil { + in, out := &in.ThrottlingBurstLimit, &out.ThrottlingBurstLimit + *out = new(float64) + **out = **in + } + if in.ThrottlingRateLimit != nil { + in, out := &in.ThrottlingRateLimit, &out.ThrottlingRateLimit + *out = new(float64) + **out = **in + } + if in.UnauthorizedCacheControlHeaderStrategy != nil { + in, out := &in.UnauthorizedCacheControlHeaderStrategy, &out.UnauthorizedCacheControlHeaderStrategy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsParameters. +func (in *SettingsParameters) DeepCopy() *SettingsParameters { + if in == nil { + return nil + } + out := new(SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Stage) DeepCopyInto(out *Stage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Stage. +func (in *Stage) DeepCopy() *Stage { + if in == nil { + return nil + } + out := new(Stage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Stage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageInitParameters) DeepCopyInto(out *StageInitParameters) { + *out = *in + if in.AccessLogSettings != nil { + in, out := &in.AccessLogSettings, &out.AccessLogSettings + *out = new(AccessLogSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CacheClusterEnabled != nil { + in, out := &in.CacheClusterEnabled, &out.CacheClusterEnabled + *out = new(bool) + **out = **in + } + if in.CacheClusterSize != nil { + in, out := &in.CacheClusterSize, &out.CacheClusterSize + *out = new(string) + **out = **in + } + if in.CanarySettings != nil { + in, out := &in.CanarySettings, &out.CanarySettings + *out = new(CanarySettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientCertificateID != nil { + in, out := &in.ClientCertificateID, &out.ClientCertificateID + *out = new(string) + **out = **in + } + if in.DeploymentID != nil { + in, out := &in.DeploymentID, &out.DeploymentID + *out = new(string) + **out = **in + } + if in.DeploymentIDRef != nil { + in, out := &in.DeploymentIDRef, &out.DeploymentIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DeploymentIDSelector != nil { + in, out := &in.DeploymentIDSelector, &out.DeploymentIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DocumentationVersion != nil { + in, out := &in.DocumentationVersion, &out.DocumentationVersion + *out = new(string) + **out = **in + } + if in.RestAPIID != nil { + in, out := &in.RestAPIID, &out.RestAPIID + *out = new(string) + **out = **in + } + if in.RestAPIIDRef != nil { + in, out := &in.RestAPIIDRef, &out.RestAPIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RestAPIIDSelector != nil { + in, out := &in.RestAPIIDSelector, &out.RestAPIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StageName != nil { + in, out := &in.StageName, &out.StageName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.XrayTracingEnabled != nil { + in, out := &in.XrayTracingEnabled, &out.XrayTracingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageInitParameters. +func (in *StageInitParameters) DeepCopy() *StageInitParameters { + if in == nil { + return nil + } + out := new(StageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageList) DeepCopyInto(out *StageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Stage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageList. +func (in *StageList) DeepCopy() *StageList { + if in == nil { + return nil + } + out := new(StageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageObservation) DeepCopyInto(out *StageObservation) { + *out = *in + if in.AccessLogSettings != nil { + in, out := &in.AccessLogSettings, &out.AccessLogSettings + *out = new(AccessLogSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CacheClusterEnabled != nil { + in, out := &in.CacheClusterEnabled, &out.CacheClusterEnabled + *out = new(bool) + **out = **in + } + if in.CacheClusterSize != nil { + in, out := &in.CacheClusterSize, &out.CacheClusterSize + *out = new(string) + **out = **in + } + if in.CanarySettings != nil { + in, out := &in.CanarySettings, &out.CanarySettings + *out = new(CanarySettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ClientCertificateID != nil { + in, out := &in.ClientCertificateID, &out.ClientCertificateID + *out = new(string) + **out = **in + } + if in.DeploymentID != nil { + in, out := &in.DeploymentID, &out.DeploymentID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DocumentationVersion != nil { + in, out := &in.DocumentationVersion, &out.DocumentationVersion + *out = new(string) + **out = **in + } + if in.ExecutionArn != nil { + in, out := &in.ExecutionArn, &out.ExecutionArn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InvokeURL != nil { + in, out := &in.InvokeURL, &out.InvokeURL + *out = new(string) + **out = **in + } + if in.RestAPIID != nil { + in, out := &in.RestAPIID, &out.RestAPIID + *out = new(string) + **out = **in + } + if in.StageName != nil { + in, out := &in.StageName, &out.StageName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WebACLArn != nil { + in, out := &in.WebACLArn, &out.WebACLArn + *out = new(string) + **out = **in + } + if in.XrayTracingEnabled != nil { + in, out := &in.XrayTracingEnabled, &out.XrayTracingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageObservation. +func (in *StageObservation) DeepCopy() *StageObservation { + if in == nil { + return nil + } + out := new(StageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageParameters) DeepCopyInto(out *StageParameters) { + *out = *in + if in.AccessLogSettings != nil { + in, out := &in.AccessLogSettings, &out.AccessLogSettings + *out = new(AccessLogSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.CacheClusterEnabled != nil { + in, out := &in.CacheClusterEnabled, &out.CacheClusterEnabled + *out = new(bool) + **out = **in + } + if in.CacheClusterSize != nil { + in, out := &in.CacheClusterSize, &out.CacheClusterSize + *out = new(string) + **out = **in + } + if in.CanarySettings != nil { + in, out := &in.CanarySettings, &out.CanarySettings + *out = new(CanarySettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientCertificateID != nil { + in, out := &in.ClientCertificateID, &out.ClientCertificateID + *out = new(string) + **out = **in + } + if in.DeploymentID != nil { + in, out := &in.DeploymentID, &out.DeploymentID + *out = new(string) + **out = **in + } + if in.DeploymentIDRef != nil { + in, out := &in.DeploymentIDRef, &out.DeploymentIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DeploymentIDSelector != nil { + in, out := &in.DeploymentIDSelector, &out.DeploymentIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DocumentationVersion != nil { + in, out := &in.DocumentationVersion, &out.DocumentationVersion + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RestAPIID != nil { + in, out := &in.RestAPIID, &out.RestAPIID + *out = new(string) + **out = **in + } + if in.RestAPIIDRef != nil { + in, out := &in.RestAPIIDRef, &out.RestAPIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RestAPIIDSelector != nil { + in, out := &in.RestAPIIDSelector, &out.RestAPIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StageName != nil { + in, out := &in.StageName, &out.StageName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.XrayTracingEnabled != nil { + in, out := &in.XrayTracingEnabled, &out.XrayTracingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageParameters. +func (in *StageParameters) DeepCopy() *StageParameters { + if in == nil { + return nil + } + out := new(StageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageSpec) DeepCopyInto(out *StageSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageSpec. +func (in *StageSpec) DeepCopy() *StageSpec { + if in == nil { + return nil + } + out := new(StageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageStatus) DeepCopyInto(out *StageStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageStatus. +func (in *StageStatus) DeepCopy() *StageStatus { + if in == nil { + return nil + } + out := new(StageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfigInitParameters) DeepCopyInto(out *TLSConfigInitParameters) { + *out = *in + if in.InsecureSkipVerification != nil { + in, out := &in.InsecureSkipVerification, &out.InsecureSkipVerification + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfigInitParameters. +func (in *TLSConfigInitParameters) DeepCopy() *TLSConfigInitParameters { + if in == nil { + return nil + } + out := new(TLSConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfigObservation) DeepCopyInto(out *TLSConfigObservation) { + *out = *in + if in.InsecureSkipVerification != nil { + in, out := &in.InsecureSkipVerification, &out.InsecureSkipVerification + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfigObservation. +func (in *TLSConfigObservation) DeepCopy() *TLSConfigObservation { + if in == nil { + return nil + } + out := new(TLSConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfigParameters) DeepCopyInto(out *TLSConfigParameters) { + *out = *in + if in.InsecureSkipVerification != nil { + in, out := &in.InsecureSkipVerification, &out.InsecureSkipVerification + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfigParameters. +func (in *TLSConfigParameters) DeepCopy() *TLSConfigParameters { + if in == nil { + return nil + } + out := new(TLSConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThrottleInitParameters) DeepCopyInto(out *ThrottleInitParameters) { + *out = *in + if in.BurstLimit != nil { + in, out := &in.BurstLimit, &out.BurstLimit + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RateLimit != nil { + in, out := &in.RateLimit, &out.RateLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThrottleInitParameters. +func (in *ThrottleInitParameters) DeepCopy() *ThrottleInitParameters { + if in == nil { + return nil + } + out := new(ThrottleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThrottleObservation) DeepCopyInto(out *ThrottleObservation) { + *out = *in + if in.BurstLimit != nil { + in, out := &in.BurstLimit, &out.BurstLimit + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RateLimit != nil { + in, out := &in.RateLimit, &out.RateLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThrottleObservation. +func (in *ThrottleObservation) DeepCopy() *ThrottleObservation { + if in == nil { + return nil + } + out := new(ThrottleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThrottleParameters) DeepCopyInto(out *ThrottleParameters) { + *out = *in + if in.BurstLimit != nil { + in, out := &in.BurstLimit, &out.BurstLimit + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RateLimit != nil { + in, out := &in.RateLimit, &out.RateLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThrottleParameters. +func (in *ThrottleParameters) DeepCopy() *ThrottleParameters { + if in == nil { + return nil + } + out := new(ThrottleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThrottleSettingsInitParameters) DeepCopyInto(out *ThrottleSettingsInitParameters) { + *out = *in + if in.BurstLimit != nil { + in, out := &in.BurstLimit, &out.BurstLimit + *out = new(float64) + **out = **in + } + if in.RateLimit != nil { + in, out := &in.RateLimit, &out.RateLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThrottleSettingsInitParameters. +func (in *ThrottleSettingsInitParameters) DeepCopy() *ThrottleSettingsInitParameters { + if in == nil { + return nil + } + out := new(ThrottleSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThrottleSettingsObservation) DeepCopyInto(out *ThrottleSettingsObservation) { + *out = *in + if in.BurstLimit != nil { + in, out := &in.BurstLimit, &out.BurstLimit + *out = new(float64) + **out = **in + } + if in.RateLimit != nil { + in, out := &in.RateLimit, &out.RateLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThrottleSettingsObservation. +func (in *ThrottleSettingsObservation) DeepCopy() *ThrottleSettingsObservation { + if in == nil { + return nil + } + out := new(ThrottleSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThrottleSettingsParameters) DeepCopyInto(out *ThrottleSettingsParameters) { + *out = *in + if in.BurstLimit != nil { + in, out := &in.BurstLimit, &out.BurstLimit + *out = new(float64) + **out = **in + } + if in.RateLimit != nil { + in, out := &in.RateLimit, &out.RateLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThrottleSettingsParameters. +func (in *ThrottleSettingsParameters) DeepCopy() *ThrottleSettingsParameters { + if in == nil { + return nil + } + out := new(ThrottleSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsagePlan) DeepCopyInto(out *UsagePlan) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsagePlan. +func (in *UsagePlan) DeepCopy() *UsagePlan { + if in == nil { + return nil + } + out := new(UsagePlan) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UsagePlan) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsagePlanInitParameters) DeepCopyInto(out *UsagePlanInitParameters) { + *out = *in + if in.APIStages != nil { + in, out := &in.APIStages, &out.APIStages + *out = make([]APIStagesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProductCode != nil { + in, out := &in.ProductCode, &out.ProductCode + *out = new(string) + **out = **in + } + if in.QuotaSettings != nil { + in, out := &in.QuotaSettings, &out.QuotaSettings + *out = new(QuotaSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThrottleSettings != nil { + in, out := &in.ThrottleSettings, &out.ThrottleSettings + *out = new(ThrottleSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsagePlanInitParameters. +func (in *UsagePlanInitParameters) DeepCopy() *UsagePlanInitParameters { + if in == nil { + return nil + } + out := new(UsagePlanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsagePlanList) DeepCopyInto(out *UsagePlanList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]UsagePlan, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsagePlanList. +func (in *UsagePlanList) DeepCopy() *UsagePlanList { + if in == nil { + return nil + } + out := new(UsagePlanList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UsagePlanList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsagePlanObservation) DeepCopyInto(out *UsagePlanObservation) { + *out = *in + if in.APIStages != nil { + in, out := &in.APIStages, &out.APIStages + *out = make([]APIStagesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProductCode != nil { + in, out := &in.ProductCode, &out.ProductCode + *out = new(string) + **out = **in + } + if in.QuotaSettings != nil { + in, out := &in.QuotaSettings, &out.QuotaSettings + *out = new(QuotaSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThrottleSettings != nil { + in, out := &in.ThrottleSettings, &out.ThrottleSettings + *out = new(ThrottleSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsagePlanObservation. +func (in *UsagePlanObservation) DeepCopy() *UsagePlanObservation { + if in == nil { + return nil + } + out := new(UsagePlanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsagePlanParameters) DeepCopyInto(out *UsagePlanParameters) { + *out = *in + if in.APIStages != nil { + in, out := &in.APIStages, &out.APIStages + *out = make([]APIStagesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProductCode != nil { + in, out := &in.ProductCode, &out.ProductCode + *out = new(string) + **out = **in + } + if in.QuotaSettings != nil { + in, out := &in.QuotaSettings, &out.QuotaSettings + *out = new(QuotaSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThrottleSettings != nil { + in, out := &in.ThrottleSettings, &out.ThrottleSettings + *out = new(ThrottleSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsagePlanParameters. +func (in *UsagePlanParameters) DeepCopy() *UsagePlanParameters { + if in == nil { + return nil + } + out := new(UsagePlanParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsagePlanSpec) DeepCopyInto(out *UsagePlanSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsagePlanSpec. +func (in *UsagePlanSpec) DeepCopy() *UsagePlanSpec { + if in == nil { + return nil + } + out := new(UsagePlanSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsagePlanStatus) DeepCopyInto(out *UsagePlanStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsagePlanStatus. +func (in *UsagePlanStatus) DeepCopy() *UsagePlanStatus { + if in == nil { + return nil + } + out := new(UsagePlanStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/apigateway/v1beta2/zz_generated.managed.go b/apis/apigateway/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..d13be0acf0 --- /dev/null +++ b/apis/apigateway/v1beta2/zz_generated.managed.go @@ -0,0 +1,428 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this DocumentationPart. +func (mg *DocumentationPart) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DocumentationPart. +func (mg *DocumentationPart) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DocumentationPart. +func (mg *DocumentationPart) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DocumentationPart. +func (mg *DocumentationPart) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DocumentationPart. +func (mg *DocumentationPart) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DocumentationPart. +func (mg *DocumentationPart) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DocumentationPart. +func (mg *DocumentationPart) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DocumentationPart. +func (mg *DocumentationPart) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DocumentationPart. +func (mg *DocumentationPart) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DocumentationPart. +func (mg *DocumentationPart) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DocumentationPart. +func (mg *DocumentationPart) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DocumentationPart. +func (mg *DocumentationPart) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DomainName. +func (mg *DomainName) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DomainName. +func (mg *DomainName) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DomainName. +func (mg *DomainName) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DomainName. +func (mg *DomainName) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DomainName. +func (mg *DomainName) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DomainName. +func (mg *DomainName) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DomainName. +func (mg *DomainName) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DomainName. +func (mg *DomainName) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DomainName. +func (mg *DomainName) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DomainName. +func (mg *DomainName) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DomainName. +func (mg *DomainName) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DomainName. +func (mg *DomainName) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Integration. +func (mg *Integration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Integration. +func (mg *Integration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Integration. +func (mg *Integration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Integration. +func (mg *Integration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Integration. +func (mg *Integration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Integration. +func (mg *Integration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Integration. +func (mg *Integration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Integration. +func (mg *Integration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Integration. +func (mg *Integration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Integration. +func (mg *Integration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Integration. +func (mg *Integration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Integration. +func (mg *Integration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MethodSettings. +func (mg *MethodSettings) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MethodSettings. +func (mg *MethodSettings) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MethodSettings. +func (mg *MethodSettings) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MethodSettings. +func (mg *MethodSettings) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MethodSettings. +func (mg *MethodSettings) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MethodSettings. +func (mg *MethodSettings) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MethodSettings. +func (mg *MethodSettings) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MethodSettings. +func (mg *MethodSettings) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MethodSettings. +func (mg *MethodSettings) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MethodSettings. +func (mg *MethodSettings) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MethodSettings. +func (mg *MethodSettings) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MethodSettings. +func (mg *MethodSettings) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this RestAPI. +func (mg *RestAPI) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this RestAPI. +func (mg *RestAPI) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this RestAPI. +func (mg *RestAPI) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this RestAPI. +func (mg *RestAPI) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this RestAPI. +func (mg *RestAPI) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this RestAPI. +func (mg *RestAPI) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this RestAPI. +func (mg *RestAPI) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this RestAPI. +func (mg *RestAPI) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this RestAPI. +func (mg *RestAPI) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this RestAPI. +func (mg *RestAPI) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this RestAPI. +func (mg *RestAPI) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this RestAPI. +func (mg *RestAPI) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Stage. +func (mg *Stage) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Stage. +func (mg *Stage) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Stage. +func (mg *Stage) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Stage. +func (mg *Stage) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Stage. +func (mg *Stage) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Stage. +func (mg *Stage) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Stage. +func (mg *Stage) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Stage. +func (mg *Stage) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Stage. +func (mg *Stage) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Stage. +func (mg *Stage) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Stage. +func (mg *Stage) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Stage. +func (mg *Stage) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this UsagePlan. +func (mg *UsagePlan) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this UsagePlan. +func (mg *UsagePlan) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this UsagePlan. +func (mg *UsagePlan) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this UsagePlan. +func (mg *UsagePlan) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this UsagePlan. +func (mg *UsagePlan) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this UsagePlan. +func (mg *UsagePlan) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this UsagePlan. +func (mg *UsagePlan) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this UsagePlan. +func (mg *UsagePlan) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this UsagePlan. +func (mg *UsagePlan) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this UsagePlan. +func (mg *UsagePlan) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this UsagePlan. +func (mg *UsagePlan) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this UsagePlan. +func (mg *UsagePlan) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/apigateway/v1beta2/zz_generated.managedlist.go b/apis/apigateway/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..a59dcfa287 --- /dev/null +++ b/apis/apigateway/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,71 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this DocumentationPartList. +func (l *DocumentationPartList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DomainNameList. +func (l *DomainNameList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this IntegrationList. +func (l *IntegrationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MethodSettingsList. +func (l *MethodSettingsList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this RestAPIList. +func (l *RestAPIList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this StageList. +func (l *StageList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this UsagePlanList. +func (l *UsagePlanList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/apigateway/v1beta2/zz_generated.resolvers.go b/apis/apigateway/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..46f255fd59 --- /dev/null +++ b/apis/apigateway/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,631 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *DocumentationPart) ResolveReferences( // ResolveReferences of this DocumentationPart. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RestAPIID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.RestAPIIDRef, + Selector: mg.Spec.ForProvider.RestAPIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RestAPIID") + } + mg.Spec.ForProvider.RestAPIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RestAPIIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RestAPIID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.RestAPIIDRef, + Selector: mg.Spec.InitProvider.RestAPIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RestAPIID") + } + mg.Spec.InitProvider.RestAPIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RestAPIIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this DomainName. +func (mg *DomainName) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta1", "CertificateValidation", "CertificateValidationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CertificateArn), + Extract: resource.ExtractParamPath("certificate_arn", false), + Reference: mg.Spec.ForProvider.CertificateArnRef, + Selector: mg.Spec.ForProvider.CertificateArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CertificateArn") + } + mg.Spec.ForProvider.CertificateArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CertificateArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta1", "CertificateValidation", "CertificateValidationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RegionalCertificateArn), + Extract: resource.ExtractParamPath("certificate_arn", false), + Reference: mg.Spec.ForProvider.RegionalCertificateArnRef, + Selector: mg.Spec.ForProvider.RegionalCertificateArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RegionalCertificateArn") + } + mg.Spec.ForProvider.RegionalCertificateArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RegionalCertificateArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta1", "CertificateValidation", "CertificateValidationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CertificateArn), + Extract: resource.ExtractParamPath("certificate_arn", false), + Reference: mg.Spec.InitProvider.CertificateArnRef, + Selector: mg.Spec.InitProvider.CertificateArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CertificateArn") + } + mg.Spec.InitProvider.CertificateArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CertificateArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta1", "CertificateValidation", "CertificateValidationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RegionalCertificateArn), + Extract: resource.ExtractParamPath("certificate_arn", false), + Reference: mg.Spec.InitProvider.RegionalCertificateArnRef, + Selector: mg.Spec.InitProvider.RegionalCertificateArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RegionalCertificateArn") + } + mg.Spec.InitProvider.RegionalCertificateArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RegionalCertificateArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Integration. +func (mg *Integration) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "VPCLink", "VPCLinkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ConnectionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ConnectionIDRef, + Selector: mg.Spec.ForProvider.ConnectionIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ConnectionID") + } + mg.Spec.ForProvider.ConnectionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ConnectionIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "Method", "MethodList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.HTTPMethod), + Extract: resource.ExtractParamPath("http_method", false), + Reference: mg.Spec.ForProvider.HTTPMethodRef, + Selector: mg.Spec.ForProvider.HTTPMethodSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.HTTPMethod") + } + mg.Spec.ForProvider.HTTPMethod = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.HTTPMethodRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "Resource", "ResourceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ResourceIDRef, + Selector: mg.Spec.ForProvider.ResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceID") + } + mg.Spec.ForProvider.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RestAPIID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.RestAPIIDRef, + Selector: mg.Spec.ForProvider.RestAPIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RestAPIID") + } + mg.Spec.ForProvider.RestAPIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RestAPIIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.URI), + Extract: resource.ExtractParamPath("invoke_arn", true), + Reference: mg.Spec.ForProvider.URIRef, + Selector: mg.Spec.ForProvider.URISelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.URI") + } + mg.Spec.ForProvider.URI = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.URIRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "VPCLink", "VPCLinkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ConnectionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ConnectionIDRef, + Selector: mg.Spec.InitProvider.ConnectionIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ConnectionID") + } + mg.Spec.InitProvider.ConnectionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ConnectionIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "Method", "MethodList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.HTTPMethod), + Extract: resource.ExtractParamPath("http_method", false), + Reference: mg.Spec.InitProvider.HTTPMethodRef, + Selector: mg.Spec.InitProvider.HTTPMethodSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.HTTPMethod") + } + mg.Spec.InitProvider.HTTPMethod = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.HTTPMethodRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "Resource", "ResourceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ResourceIDRef, + Selector: mg.Spec.InitProvider.ResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceID") + } + mg.Spec.InitProvider.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RestAPIID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.RestAPIIDRef, + Selector: mg.Spec.InitProvider.RestAPIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RestAPIID") + } + mg.Spec.InitProvider.RestAPIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RestAPIIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.URI), + Extract: resource.ExtractParamPath("invoke_arn", true), + Reference: mg.Spec.InitProvider.URIRef, + Selector: mg.Spec.InitProvider.URISelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.URI") + } + mg.Spec.InitProvider.URI = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.URIRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MethodSettings. +func (mg *MethodSettings) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RestAPIID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.RestAPIIDRef, + Selector: mg.Spec.ForProvider.RestAPIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RestAPIID") + } + mg.Spec.ForProvider.RestAPIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RestAPIIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "Stage", "StageList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StageName), + Extract: resource.ExtractParamPath("stage_name", false), + Reference: mg.Spec.ForProvider.StageNameRef, + Selector: mg.Spec.ForProvider.StageNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StageName") + } + mg.Spec.ForProvider.StageName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StageNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RestAPIID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.RestAPIIDRef, + Selector: mg.Spec.InitProvider.RestAPIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RestAPIID") + } + mg.Spec.InitProvider.RestAPIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RestAPIIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "Stage", "StageList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StageName), + Extract: resource.ExtractParamPath("stage_name", false), + Reference: mg.Spec.InitProvider.StageNameRef, + Selector: mg.Spec.InitProvider.StageNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StageName") + } + mg.Spec.InitProvider.StageName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StageNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Stage. +func (mg *Stage) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "Deployment", "DeploymentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DeploymentID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DeploymentIDRef, + Selector: mg.Spec.ForProvider.DeploymentIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DeploymentID") + } + mg.Spec.ForProvider.DeploymentID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DeploymentIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RestAPIID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.RestAPIIDRef, + Selector: mg.Spec.ForProvider.RestAPIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RestAPIID") + } + mg.Spec.ForProvider.RestAPIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RestAPIIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta1", "Deployment", "DeploymentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DeploymentID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DeploymentIDRef, + Selector: mg.Spec.InitProvider.DeploymentIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DeploymentID") + } + mg.Spec.InitProvider.DeploymentID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DeploymentIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RestAPIID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.RestAPIIDRef, + Selector: mg.Spec.InitProvider.RestAPIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RestAPIID") + } + mg.Spec.InitProvider.RestAPIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RestAPIIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this UsagePlan. +func (mg *UsagePlan) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.APIStages); i3++ { + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIStages[i3].APIID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.APIStages[i3].APIIDRef, + Selector: mg.Spec.ForProvider.APIStages[i3].APIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIStages[i3].APIID") + } + mg.Spec.ForProvider.APIStages[i3].APIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIStages[i3].APIIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.APIStages); i3++ { + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "Stage", "StageList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIStages[i3].Stage), + Extract: resource.ExtractParamPath("stage_name", false), + Reference: mg.Spec.ForProvider.APIStages[i3].StageRef, + Selector: mg.Spec.ForProvider.APIStages[i3].StageSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIStages[i3].Stage") + } + mg.Spec.ForProvider.APIStages[i3].Stage = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIStages[i3].StageRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.APIStages); i3++ { + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "RestAPI", "RestAPIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.APIStages[i3].APIID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.APIStages[i3].APIIDRef, + Selector: mg.Spec.InitProvider.APIStages[i3].APIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.APIStages[i3].APIID") + } + mg.Spec.InitProvider.APIStages[i3].APIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.APIStages[i3].APIIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.APIStages); i3++ { + { + m, l, err = apisresolver.GetManagedResource("apigateway.aws.upbound.io", "v1beta2", "Stage", "StageList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.APIStages[i3].Stage), + Extract: resource.ExtractParamPath("stage_name", false), + Reference: mg.Spec.InitProvider.APIStages[i3].StageRef, + Selector: mg.Spec.InitProvider.APIStages[i3].StageSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.APIStages[i3].Stage") + } + mg.Spec.InitProvider.APIStages[i3].Stage = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.APIStages[i3].StageRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/apigateway/v1beta2/zz_groupversion_info.go b/apis/apigateway/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..fa0ddc2aeb --- /dev/null +++ b/apis/apigateway/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=apigateway.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "apigateway.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/apigateway/v1beta2/zz_integration_terraformed.go b/apis/apigateway/v1beta2/zz_integration_terraformed.go new file mode 100755 index 0000000000..03363171e2 --- /dev/null +++ b/apis/apigateway/v1beta2/zz_integration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Integration +func (mg *Integration) GetTerraformResourceType() string { + return "aws_api_gateway_integration" +} + +// GetConnectionDetailsMapping for this Integration +func (tr *Integration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Integration +func (tr *Integration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Integration +func (tr *Integration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Integration +func (tr *Integration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Integration +func (tr *Integration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Integration +func (tr *Integration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Integration +func (tr *Integration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Integration +func (tr *Integration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Integration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Integration) LateInitialize(attrs []byte) (bool, error) { + params := &IntegrationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Integration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apigateway/v1beta2/zz_integration_types.go b/apis/apigateway/v1beta2/zz_integration_types.go new file mode 100755 index 0000000000..c504b96f51 --- /dev/null +++ b/apis/apigateway/v1beta2/zz_integration_types.go @@ -0,0 +1,413 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IntegrationInitParameters struct { + + // List of cache key parameters for the integration. + // +listType=set + CacheKeyParameters []*string `json:"cacheKeyParameters,omitempty" tf:"cache_key_parameters,omitempty"` + + // Integration's cache namespace. + CacheNamespace *string `json:"cacheNamespace,omitempty" tf:"cache_namespace,omitempty"` + + // ID of the VpcLink used for the integration. Required if connection_type is VPC_LINK + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.VPCLink + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ConnectionID *string `json:"connectionId,omitempty" tf:"connection_id,omitempty"` + + // Reference to a VPCLink in apigateway to populate connectionId. + // +kubebuilder:validation:Optional + ConnectionIDRef *v1.Reference `json:"connectionIdRef,omitempty" tf:"-"` + + // Selector for a VPCLink in apigateway to populate connectionId. + // +kubebuilder:validation:Optional + ConnectionIDSelector *v1.Selector `json:"connectionIdSelector,omitempty" tf:"-"` + + // Integration input's connectionType. Valid values are INTERNET (default for connections through the public routable internet), and VPC_LINK (for private connections between API Gateway and a network load balancer in a VPC). + ConnectionType *string `json:"connectionType,omitempty" tf:"connection_type,omitempty"` + + // How to handle request payload content type conversions. Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT. If this property is not defined, the request payload will be passed through from the method request to integration request without modification, provided that the passthroughBehaviors is configured to support payload pass-through. + ContentHandling *string `json:"contentHandling,omitempty" tf:"content_handling,omitempty"` + + // Credentials required for the integration. For AWS integrations, 2 options are available. To specify an IAM Role for Amazon API Gateway to assume, use the role's ARN. To require that the caller's identity be passed through from the request, specify the string arn:aws:iam::\*:user/\*. + Credentials *string `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // HTTP method (GET, POST, PUT, DELETE, HEAD, OPTION, ANY) + // when calling the associated resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.Method + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("http_method",false) + HTTPMethod *string `json:"httpMethod,omitempty" tf:"http_method,omitempty"` + + // Reference to a Method in apigateway to populate httpMethod. + // +kubebuilder:validation:Optional + HTTPMethodRef *v1.Reference `json:"httpMethodRef,omitempty" tf:"-"` + + // Selector for a Method in apigateway to populate httpMethod. + // +kubebuilder:validation:Optional + HTTPMethodSelector *v1.Selector `json:"httpMethodSelector,omitempty" tf:"-"` + + // Integration HTTP method + // (GET, POST, PUT, DELETE, HEAD, OPTIONs, ANY, PATCH) specifying how API Gateway will interact with the back end. + // Required if type is AWS, AWS_PROXY, HTTP or HTTP_PROXY. + // Not all methods are compatible with all AWS integrations. + // e.g., Lambda function can only be invoked via POST. + IntegrationHTTPMethod *string `json:"integrationHttpMethod,omitempty" tf:"integration_http_method,omitempty"` + + // Integration passthrough behavior (WHEN_NO_MATCH, WHEN_NO_TEMPLATES, NEVER). Required if request_templates is used. + PassthroughBehavior *string `json:"passthroughBehavior,omitempty" tf:"passthrough_behavior,omitempty"` + + // Map of request query string parameters and headers that should be passed to the backend responder. + // For example: request_parameters = { "integration.request.header.X-Some-Other-Header" = "method.request.header.X-Some-Header" } + // +mapType=granular + RequestParameters map[string]*string `json:"requestParameters,omitempty" tf:"request_parameters,omitempty"` + + // Map of the integration's request templates. + // +mapType=granular + RequestTemplates map[string]*string `json:"requestTemplates,omitempty" tf:"request_templates,omitempty"` + + // API resource ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.Resource + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Reference to a Resource in apigateway to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDRef *v1.Reference `json:"resourceIdRef,omitempty" tf:"-"` + + // Selector for a Resource in apigateway to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDSelector *v1.Selector `json:"resourceIdSelector,omitempty" tf:"-"` + + // ID of the associated REST API. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` + + // Reference to a RestAPI in apigateway to populate restApiId. + // +kubebuilder:validation:Optional + RestAPIIDRef *v1.Reference `json:"restApiIdRef,omitempty" tf:"-"` + + // Selector for a RestAPI in apigateway to populate restApiId. + // +kubebuilder:validation:Optional + RestAPIIDSelector *v1.Selector `json:"restApiIdSelector,omitempty" tf:"-"` + + // TLS configuration. See below. + TLSConfig *TLSConfigInitParameters `json:"tlsConfig,omitempty" tf:"tls_config,omitempty"` + + // Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds. + TimeoutMilliseconds *float64 `json:"timeoutMilliseconds,omitempty" tf:"timeout_milliseconds,omitempty"` + + // Integration input's type. Valid values are HTTP (for HTTP backends), MOCK (not calling any real backend), AWS (for AWS services), AWS_PROXY (for Lambda proxy integration) and HTTP_PROXY (for HTTP proxy integration). An HTTP or HTTP_PROXY integration with a connection_type of VPC_LINK is referred to as a private integration and uses a VpcLink to connect API Gateway to a network load balancer of a VPC. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Input's URI. Required if type is AWS, AWS_PROXY, HTTP or HTTP_PROXY. + // For HTTP integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification . For AWS integrations, the URI should be of the form arn:aws:apigateway:{region}:{subdomain.service|service}:{path|action}/{service_api}. region, subdomain and service are used to determine the right endpoint. + // e.g., arn:aws:apigateway:eu-west-1:lambda:path/2015-03-31/functions/arn:aws:lambda:eu-west-1:012345678901:function:my-func/invocations. For private integrations, the URI parameter is not used for routing requests to your endpoint, but is used for setting the Host header and for certificate validation. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("invoke_arn",true) + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Reference to a Function in lambda to populate uri. + // +kubebuilder:validation:Optional + URIRef *v1.Reference `json:"uriRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate uri. + // +kubebuilder:validation:Optional + URISelector *v1.Selector `json:"uriSelector,omitempty" tf:"-"` +} + +type IntegrationObservation struct { + + // List of cache key parameters for the integration. + // +listType=set + CacheKeyParameters []*string `json:"cacheKeyParameters,omitempty" tf:"cache_key_parameters,omitempty"` + + // Integration's cache namespace. + CacheNamespace *string `json:"cacheNamespace,omitempty" tf:"cache_namespace,omitempty"` + + // ID of the VpcLink used for the integration. Required if connection_type is VPC_LINK + ConnectionID *string `json:"connectionId,omitempty" tf:"connection_id,omitempty"` + + // Integration input's connectionType. Valid values are INTERNET (default for connections through the public routable internet), and VPC_LINK (for private connections between API Gateway and a network load balancer in a VPC). + ConnectionType *string `json:"connectionType,omitempty" tf:"connection_type,omitempty"` + + // How to handle request payload content type conversions. Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT. If this property is not defined, the request payload will be passed through from the method request to integration request without modification, provided that the passthroughBehaviors is configured to support payload pass-through. + ContentHandling *string `json:"contentHandling,omitempty" tf:"content_handling,omitempty"` + + // Credentials required for the integration. For AWS integrations, 2 options are available. To specify an IAM Role for Amazon API Gateway to assume, use the role's ARN. To require that the caller's identity be passed through from the request, specify the string arn:aws:iam::\*:user/\*. + Credentials *string `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // HTTP method (GET, POST, PUT, DELETE, HEAD, OPTION, ANY) + // when calling the associated resource. + HTTPMethod *string `json:"httpMethod,omitempty" tf:"http_method,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Integration HTTP method + // (GET, POST, PUT, DELETE, HEAD, OPTIONs, ANY, PATCH) specifying how API Gateway will interact with the back end. + // Required if type is AWS, AWS_PROXY, HTTP or HTTP_PROXY. + // Not all methods are compatible with all AWS integrations. + // e.g., Lambda function can only be invoked via POST. + IntegrationHTTPMethod *string `json:"integrationHttpMethod,omitempty" tf:"integration_http_method,omitempty"` + + // Integration passthrough behavior (WHEN_NO_MATCH, WHEN_NO_TEMPLATES, NEVER). Required if request_templates is used. + PassthroughBehavior *string `json:"passthroughBehavior,omitempty" tf:"passthrough_behavior,omitempty"` + + // Map of request query string parameters and headers that should be passed to the backend responder. + // For example: request_parameters = { "integration.request.header.X-Some-Other-Header" = "method.request.header.X-Some-Header" } + // +mapType=granular + RequestParameters map[string]*string `json:"requestParameters,omitempty" tf:"request_parameters,omitempty"` + + // Map of the integration's request templates. + // +mapType=granular + RequestTemplates map[string]*string `json:"requestTemplates,omitempty" tf:"request_templates,omitempty"` + + // API resource ID. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // ID of the associated REST API. + RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` + + // TLS configuration. See below. + TLSConfig *TLSConfigObservation `json:"tlsConfig,omitempty" tf:"tls_config,omitempty"` + + // Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds. + TimeoutMilliseconds *float64 `json:"timeoutMilliseconds,omitempty" tf:"timeout_milliseconds,omitempty"` + + // Integration input's type. Valid values are HTTP (for HTTP backends), MOCK (not calling any real backend), AWS (for AWS services), AWS_PROXY (for Lambda proxy integration) and HTTP_PROXY (for HTTP proxy integration). An HTTP or HTTP_PROXY integration with a connection_type of VPC_LINK is referred to as a private integration and uses a VpcLink to connect API Gateway to a network load balancer of a VPC. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Input's URI. Required if type is AWS, AWS_PROXY, HTTP or HTTP_PROXY. + // For HTTP integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification . For AWS integrations, the URI should be of the form arn:aws:apigateway:{region}:{subdomain.service|service}:{path|action}/{service_api}. region, subdomain and service are used to determine the right endpoint. + // e.g., arn:aws:apigateway:eu-west-1:lambda:path/2015-03-31/functions/arn:aws:lambda:eu-west-1:012345678901:function:my-func/invocations. For private integrations, the URI parameter is not used for routing requests to your endpoint, but is used for setting the Host header and for certificate validation. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type IntegrationParameters struct { + + // List of cache key parameters for the integration. + // +kubebuilder:validation:Optional + // +listType=set + CacheKeyParameters []*string `json:"cacheKeyParameters,omitempty" tf:"cache_key_parameters,omitempty"` + + // Integration's cache namespace. + // +kubebuilder:validation:Optional + CacheNamespace *string `json:"cacheNamespace,omitempty" tf:"cache_namespace,omitempty"` + + // ID of the VpcLink used for the integration. Required if connection_type is VPC_LINK + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.VPCLink + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ConnectionID *string `json:"connectionId,omitempty" tf:"connection_id,omitempty"` + + // Reference to a VPCLink in apigateway to populate connectionId. + // +kubebuilder:validation:Optional + ConnectionIDRef *v1.Reference `json:"connectionIdRef,omitempty" tf:"-"` + + // Selector for a VPCLink in apigateway to populate connectionId. + // +kubebuilder:validation:Optional + ConnectionIDSelector *v1.Selector `json:"connectionIdSelector,omitempty" tf:"-"` + + // Integration input's connectionType. Valid values are INTERNET (default for connections through the public routable internet), and VPC_LINK (for private connections between API Gateway and a network load balancer in a VPC). + // +kubebuilder:validation:Optional + ConnectionType *string `json:"connectionType,omitempty" tf:"connection_type,omitempty"` + + // How to handle request payload content type conversions. Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT. If this property is not defined, the request payload will be passed through from the method request to integration request without modification, provided that the passthroughBehaviors is configured to support payload pass-through. + // +kubebuilder:validation:Optional + ContentHandling *string `json:"contentHandling,omitempty" tf:"content_handling,omitempty"` + + // Credentials required for the integration. For AWS integrations, 2 options are available. To specify an IAM Role for Amazon API Gateway to assume, use the role's ARN. To require that the caller's identity be passed through from the request, specify the string arn:aws:iam::\*:user/\*. + // +kubebuilder:validation:Optional + Credentials *string `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // HTTP method (GET, POST, PUT, DELETE, HEAD, OPTION, ANY) + // when calling the associated resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.Method + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("http_method",false) + // +kubebuilder:validation:Optional + HTTPMethod *string `json:"httpMethod,omitempty" tf:"http_method,omitempty"` + + // Reference to a Method in apigateway to populate httpMethod. + // +kubebuilder:validation:Optional + HTTPMethodRef *v1.Reference `json:"httpMethodRef,omitempty" tf:"-"` + + // Selector for a Method in apigateway to populate httpMethod. + // +kubebuilder:validation:Optional + HTTPMethodSelector *v1.Selector `json:"httpMethodSelector,omitempty" tf:"-"` + + // Integration HTTP method + // (GET, POST, PUT, DELETE, HEAD, OPTIONs, ANY, PATCH) specifying how API Gateway will interact with the back end. + // Required if type is AWS, AWS_PROXY, HTTP or HTTP_PROXY. + // Not all methods are compatible with all AWS integrations. + // e.g., Lambda function can only be invoked via POST. + // +kubebuilder:validation:Optional + IntegrationHTTPMethod *string `json:"integrationHttpMethod,omitempty" tf:"integration_http_method,omitempty"` + + // Integration passthrough behavior (WHEN_NO_MATCH, WHEN_NO_TEMPLATES, NEVER). Required if request_templates is used. + // +kubebuilder:validation:Optional + PassthroughBehavior *string `json:"passthroughBehavior,omitempty" tf:"passthrough_behavior,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Map of request query string parameters and headers that should be passed to the backend responder. + // For example: request_parameters = { "integration.request.header.X-Some-Other-Header" = "method.request.header.X-Some-Header" } + // +kubebuilder:validation:Optional + // +mapType=granular + RequestParameters map[string]*string `json:"requestParameters,omitempty" tf:"request_parameters,omitempty"` + + // Map of the integration's request templates. + // +kubebuilder:validation:Optional + // +mapType=granular + RequestTemplates map[string]*string `json:"requestTemplates,omitempty" tf:"request_templates,omitempty"` + + // API resource ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.Resource + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Reference to a Resource in apigateway to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDRef *v1.Reference `json:"resourceIdRef,omitempty" tf:"-"` + + // Selector for a Resource in apigateway to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDSelector *v1.Selector `json:"resourceIdSelector,omitempty" tf:"-"` + + // ID of the associated REST API. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` + + // Reference to a RestAPI in apigateway to populate restApiId. + // +kubebuilder:validation:Optional + RestAPIIDRef *v1.Reference `json:"restApiIdRef,omitempty" tf:"-"` + + // Selector for a RestAPI in apigateway to populate restApiId. + // +kubebuilder:validation:Optional + RestAPIIDSelector *v1.Selector `json:"restApiIdSelector,omitempty" tf:"-"` + + // TLS configuration. See below. + // +kubebuilder:validation:Optional + TLSConfig *TLSConfigParameters `json:"tlsConfig,omitempty" tf:"tls_config,omitempty"` + + // Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds. + // +kubebuilder:validation:Optional + TimeoutMilliseconds *float64 `json:"timeoutMilliseconds,omitempty" tf:"timeout_milliseconds,omitempty"` + + // Integration input's type. Valid values are HTTP (for HTTP backends), MOCK (not calling any real backend), AWS (for AWS services), AWS_PROXY (for Lambda proxy integration) and HTTP_PROXY (for HTTP proxy integration). An HTTP or HTTP_PROXY integration with a connection_type of VPC_LINK is referred to as a private integration and uses a VpcLink to connect API Gateway to a network load balancer of a VPC. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Input's URI. Required if type is AWS, AWS_PROXY, HTTP or HTTP_PROXY. + // For HTTP integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification . For AWS integrations, the URI should be of the form arn:aws:apigateway:{region}:{subdomain.service|service}:{path|action}/{service_api}. region, subdomain and service are used to determine the right endpoint. + // e.g., arn:aws:apigateway:eu-west-1:lambda:path/2015-03-31/functions/arn:aws:lambda:eu-west-1:012345678901:function:my-func/invocations. For private integrations, the URI parameter is not used for routing requests to your endpoint, but is used for setting the Host header and for certificate validation. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("invoke_arn",true) + // +kubebuilder:validation:Optional + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Reference to a Function in lambda to populate uri. + // +kubebuilder:validation:Optional + URIRef *v1.Reference `json:"uriRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate uri. + // +kubebuilder:validation:Optional + URISelector *v1.Selector `json:"uriSelector,omitempty" tf:"-"` +} + +type TLSConfigInitParameters struct { + + // Whether or not API Gateway skips verification that the certificate for an integration endpoint is issued by a supported certificate authority. This isn’t recommended, but it enables you to use certificates that are signed by private certificate authorities, or certificates that are self-signed. If enabled, API Gateway still performs basic certificate validation, which includes checking the certificate's expiration date, hostname, and presence of a root certificate authority. Supported only for HTTP and HTTP_PROXY integrations. + InsecureSkipVerification *bool `json:"insecureSkipVerification,omitempty" tf:"insecure_skip_verification,omitempty"` +} + +type TLSConfigObservation struct { + + // Whether or not API Gateway skips verification that the certificate for an integration endpoint is issued by a supported certificate authority. This isn’t recommended, but it enables you to use certificates that are signed by private certificate authorities, or certificates that are self-signed. If enabled, API Gateway still performs basic certificate validation, which includes checking the certificate's expiration date, hostname, and presence of a root certificate authority. Supported only for HTTP and HTTP_PROXY integrations. + InsecureSkipVerification *bool `json:"insecureSkipVerification,omitempty" tf:"insecure_skip_verification,omitempty"` +} + +type TLSConfigParameters struct { + + // Whether or not API Gateway skips verification that the certificate for an integration endpoint is issued by a supported certificate authority. This isn’t recommended, but it enables you to use certificates that are signed by private certificate authorities, or certificates that are self-signed. If enabled, API Gateway still performs basic certificate validation, which includes checking the certificate's expiration date, hostname, and presence of a root certificate authority. Supported only for HTTP and HTTP_PROXY integrations. + // +kubebuilder:validation:Optional + InsecureSkipVerification *bool `json:"insecureSkipVerification,omitempty" tf:"insecure_skip_verification,omitempty"` +} + +// IntegrationSpec defines the desired state of Integration +type IntegrationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IntegrationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IntegrationInitParameters `json:"initProvider,omitempty"` +} + +// IntegrationStatus defines the observed state of Integration. +type IntegrationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IntegrationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Integration is the Schema for the Integrations API. Provides an HTTP Method Integration for an API Gateway Integration. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Integration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + Spec IntegrationSpec `json:"spec"` + Status IntegrationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IntegrationList contains a list of Integrations +type IntegrationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Integration `json:"items"` +} + +// Repository type metadata. +var ( + Integration_Kind = "Integration" + Integration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Integration_Kind}.String() + Integration_KindAPIVersion = Integration_Kind + "." + CRDGroupVersion.String() + Integration_GroupVersionKind = CRDGroupVersion.WithKind(Integration_Kind) +) + +func init() { + SchemeBuilder.Register(&Integration{}, &IntegrationList{}) +} diff --git a/apis/apigateway/v1beta2/zz_methodsettings_terraformed.go b/apis/apigateway/v1beta2/zz_methodsettings_terraformed.go new file mode 100755 index 0000000000..2b9f287080 --- /dev/null +++ b/apis/apigateway/v1beta2/zz_methodsettings_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MethodSettings +func (mg *MethodSettings) GetTerraformResourceType() string { + return "aws_api_gateway_method_settings" +} + +// GetConnectionDetailsMapping for this MethodSettings +func (tr *MethodSettings) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MethodSettings +func (tr *MethodSettings) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MethodSettings +func (tr *MethodSettings) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MethodSettings +func (tr *MethodSettings) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MethodSettings +func (tr *MethodSettings) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MethodSettings +func (tr *MethodSettings) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MethodSettings +func (tr *MethodSettings) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MethodSettings +func (tr *MethodSettings) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MethodSettings using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MethodSettings) LateInitialize(attrs []byte) (bool, error) { + params := &MethodSettingsParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MethodSettings) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apigateway/v1beta2/zz_methodsettings_types.go b/apis/apigateway/v1beta2/zz_methodsettings_types.go new file mode 100755 index 0000000000..df64e56f55 --- /dev/null +++ b/apis/apigateway/v1beta2/zz_methodsettings_types.go @@ -0,0 +1,280 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MethodSettingsInitParameters struct { + + // Method path defined as {resource_path}/{http_method} for an individual method override, or */* for overriding all methods in the stage. Ensure to trim any leading forward slashes in the path (e.g., trimprefix(aws_api_gateway_resource.example.path, "/")). + MethodPath *string `json:"methodPath,omitempty" tf:"method_path,omitempty"` + + // ID of the REST API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` + + // Reference to a RestAPI in apigateway to populate restApiId. + // +kubebuilder:validation:Optional + RestAPIIDRef *v1.Reference `json:"restApiIdRef,omitempty" tf:"-"` + + // Selector for a RestAPI in apigateway to populate restApiId. + // +kubebuilder:validation:Optional + RestAPIIDSelector *v1.Selector `json:"restApiIdSelector,omitempty" tf:"-"` + + // Settings block, see below. + Settings *SettingsInitParameters `json:"settings,omitempty" tf:"settings,omitempty"` + + // Name of the stage + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.Stage + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("stage_name",false) + StageName *string `json:"stageName,omitempty" tf:"stage_name,omitempty"` + + // Reference to a Stage in apigateway to populate stageName. + // +kubebuilder:validation:Optional + StageNameRef *v1.Reference `json:"stageNameRef,omitempty" tf:"-"` + + // Selector for a Stage in apigateway to populate stageName. + // +kubebuilder:validation:Optional + StageNameSelector *v1.Selector `json:"stageNameSelector,omitempty" tf:"-"` +} + +type MethodSettingsObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Method path defined as {resource_path}/{http_method} for an individual method override, or */* for overriding all methods in the stage. Ensure to trim any leading forward slashes in the path (e.g., trimprefix(aws_api_gateway_resource.example.path, "/")). + MethodPath *string `json:"methodPath,omitempty" tf:"method_path,omitempty"` + + // ID of the REST API + RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` + + // Settings block, see below. + Settings *SettingsObservation `json:"settings,omitempty" tf:"settings,omitempty"` + + // Name of the stage + StageName *string `json:"stageName,omitempty" tf:"stage_name,omitempty"` +} + +type MethodSettingsParameters struct { + + // Method path defined as {resource_path}/{http_method} for an individual method override, or */* for overriding all methods in the stage. Ensure to trim any leading forward slashes in the path (e.g., trimprefix(aws_api_gateway_resource.example.path, "/")). + // +kubebuilder:validation:Optional + MethodPath *string `json:"methodPath,omitempty" tf:"method_path,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // ID of the REST API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` + + // Reference to a RestAPI in apigateway to populate restApiId. + // +kubebuilder:validation:Optional + RestAPIIDRef *v1.Reference `json:"restApiIdRef,omitempty" tf:"-"` + + // Selector for a RestAPI in apigateway to populate restApiId. + // +kubebuilder:validation:Optional + RestAPIIDSelector *v1.Selector `json:"restApiIdSelector,omitempty" tf:"-"` + + // Settings block, see below. + // +kubebuilder:validation:Optional + Settings *SettingsParameters `json:"settings,omitempty" tf:"settings,omitempty"` + + // Name of the stage + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.Stage + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("stage_name",false) + // +kubebuilder:validation:Optional + StageName *string `json:"stageName,omitempty" tf:"stage_name,omitempty"` + + // Reference to a Stage in apigateway to populate stageName. + // +kubebuilder:validation:Optional + StageNameRef *v1.Reference `json:"stageNameRef,omitempty" tf:"-"` + + // Selector for a Stage in apigateway to populate stageName. + // +kubebuilder:validation:Optional + StageNameSelector *v1.Selector `json:"stageNameSelector,omitempty" tf:"-"` +} + +type SettingsInitParameters struct { + + // Whether the cached responses are encrypted. + CacheDataEncrypted *bool `json:"cacheDataEncrypted,omitempty" tf:"cache_data_encrypted,omitempty"` + + // Time to live (TTL), in seconds, for cached responses. The higher the TTL, the longer the response will be cached. + CacheTTLInSeconds *float64 `json:"cacheTtlInSeconds,omitempty" tf:"cache_ttl_in_seconds,omitempty"` + + // Whether responses should be cached and returned for requests. A cache cluster must be enabled on the stage for responses to be cached. + CachingEnabled *bool `json:"cachingEnabled,omitempty" tf:"caching_enabled,omitempty"` + + // Whether data trace logging is enabled for this method, which effects the log entries pushed to Amazon CloudWatch Logs. + DataTraceEnabled *bool `json:"dataTraceEnabled,omitempty" tf:"data_trace_enabled,omitempty"` + + // Logging level for this method, which effects the log entries pushed to Amazon CloudWatch Logs. The available levels are OFF, ERROR, and INFO. + LoggingLevel *string `json:"loggingLevel,omitempty" tf:"logging_level,omitempty"` + + // Whether Amazon CloudWatch metrics are enabled for this method. + MetricsEnabled *bool `json:"metricsEnabled,omitempty" tf:"metrics_enabled,omitempty"` + + // Whether authorization is required for a cache invalidation request. + RequireAuthorizationForCacheControl *bool `json:"requireAuthorizationForCacheControl,omitempty" tf:"require_authorization_for_cache_control,omitempty"` + + // Throttling burst limit. Default: -1 (throttling disabled). + ThrottlingBurstLimit *float64 `json:"throttlingBurstLimit,omitempty" tf:"throttling_burst_limit,omitempty"` + + // Throttling rate limit. Default: -1 (throttling disabled). + ThrottlingRateLimit *float64 `json:"throttlingRateLimit,omitempty" tf:"throttling_rate_limit,omitempty"` + + // How to handle unauthorized requests for cache invalidation. The available values are FAIL_WITH_403, SUCCEED_WITH_RESPONSE_HEADER, SUCCEED_WITHOUT_RESPONSE_HEADER. + UnauthorizedCacheControlHeaderStrategy *string `json:"unauthorizedCacheControlHeaderStrategy,omitempty" tf:"unauthorized_cache_control_header_strategy,omitempty"` +} + +type SettingsObservation struct { + + // Whether the cached responses are encrypted. + CacheDataEncrypted *bool `json:"cacheDataEncrypted,omitempty" tf:"cache_data_encrypted,omitempty"` + + // Time to live (TTL), in seconds, for cached responses. The higher the TTL, the longer the response will be cached. + CacheTTLInSeconds *float64 `json:"cacheTtlInSeconds,omitempty" tf:"cache_ttl_in_seconds,omitempty"` + + // Whether responses should be cached and returned for requests. A cache cluster must be enabled on the stage for responses to be cached. + CachingEnabled *bool `json:"cachingEnabled,omitempty" tf:"caching_enabled,omitempty"` + + // Whether data trace logging is enabled for this method, which effects the log entries pushed to Amazon CloudWatch Logs. + DataTraceEnabled *bool `json:"dataTraceEnabled,omitempty" tf:"data_trace_enabled,omitempty"` + + // Logging level for this method, which effects the log entries pushed to Amazon CloudWatch Logs. The available levels are OFF, ERROR, and INFO. + LoggingLevel *string `json:"loggingLevel,omitempty" tf:"logging_level,omitempty"` + + // Whether Amazon CloudWatch metrics are enabled for this method. + MetricsEnabled *bool `json:"metricsEnabled,omitempty" tf:"metrics_enabled,omitempty"` + + // Whether authorization is required for a cache invalidation request. + RequireAuthorizationForCacheControl *bool `json:"requireAuthorizationForCacheControl,omitempty" tf:"require_authorization_for_cache_control,omitempty"` + + // Throttling burst limit. Default: -1 (throttling disabled). + ThrottlingBurstLimit *float64 `json:"throttlingBurstLimit,omitempty" tf:"throttling_burst_limit,omitempty"` + + // Throttling rate limit. Default: -1 (throttling disabled). + ThrottlingRateLimit *float64 `json:"throttlingRateLimit,omitempty" tf:"throttling_rate_limit,omitempty"` + + // How to handle unauthorized requests for cache invalidation. The available values are FAIL_WITH_403, SUCCEED_WITH_RESPONSE_HEADER, SUCCEED_WITHOUT_RESPONSE_HEADER. + UnauthorizedCacheControlHeaderStrategy *string `json:"unauthorizedCacheControlHeaderStrategy,omitempty" tf:"unauthorized_cache_control_header_strategy,omitempty"` +} + +type SettingsParameters struct { + + // Whether the cached responses are encrypted. + // +kubebuilder:validation:Optional + CacheDataEncrypted *bool `json:"cacheDataEncrypted,omitempty" tf:"cache_data_encrypted,omitempty"` + + // Time to live (TTL), in seconds, for cached responses. The higher the TTL, the longer the response will be cached. + // +kubebuilder:validation:Optional + CacheTTLInSeconds *float64 `json:"cacheTtlInSeconds,omitempty" tf:"cache_ttl_in_seconds,omitempty"` + + // Whether responses should be cached and returned for requests. A cache cluster must be enabled on the stage for responses to be cached. + // +kubebuilder:validation:Optional + CachingEnabled *bool `json:"cachingEnabled,omitempty" tf:"caching_enabled,omitempty"` + + // Whether data trace logging is enabled for this method, which effects the log entries pushed to Amazon CloudWatch Logs. + // +kubebuilder:validation:Optional + DataTraceEnabled *bool `json:"dataTraceEnabled,omitempty" tf:"data_trace_enabled,omitempty"` + + // Logging level for this method, which effects the log entries pushed to Amazon CloudWatch Logs. The available levels are OFF, ERROR, and INFO. + // +kubebuilder:validation:Optional + LoggingLevel *string `json:"loggingLevel,omitempty" tf:"logging_level,omitempty"` + + // Whether Amazon CloudWatch metrics are enabled for this method. + // +kubebuilder:validation:Optional + MetricsEnabled *bool `json:"metricsEnabled,omitempty" tf:"metrics_enabled,omitempty"` + + // Whether authorization is required for a cache invalidation request. + // +kubebuilder:validation:Optional + RequireAuthorizationForCacheControl *bool `json:"requireAuthorizationForCacheControl,omitempty" tf:"require_authorization_for_cache_control,omitempty"` + + // Throttling burst limit. Default: -1 (throttling disabled). + // +kubebuilder:validation:Optional + ThrottlingBurstLimit *float64 `json:"throttlingBurstLimit,omitempty" tf:"throttling_burst_limit,omitempty"` + + // Throttling rate limit. Default: -1 (throttling disabled). + // +kubebuilder:validation:Optional + ThrottlingRateLimit *float64 `json:"throttlingRateLimit,omitempty" tf:"throttling_rate_limit,omitempty"` + + // How to handle unauthorized requests for cache invalidation. The available values are FAIL_WITH_403, SUCCEED_WITH_RESPONSE_HEADER, SUCCEED_WITHOUT_RESPONSE_HEADER. + // +kubebuilder:validation:Optional + UnauthorizedCacheControlHeaderStrategy *string `json:"unauthorizedCacheControlHeaderStrategy,omitempty" tf:"unauthorized_cache_control_header_strategy,omitempty"` +} + +// MethodSettingsSpec defines the desired state of MethodSettings +type MethodSettingsSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MethodSettingsParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MethodSettingsInitParameters `json:"initProvider,omitempty"` +} + +// MethodSettingsStatus defines the observed state of MethodSettings. +type MethodSettingsStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MethodSettingsObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MethodSettings is the Schema for the MethodSettingss API. Manages API Gateway Stage Method Settings +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type MethodSettings struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.methodPath) || (has(self.initProvider) && has(self.initProvider.methodPath))",message="spec.forProvider.methodPath is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.settings) || (has(self.initProvider) && has(self.initProvider.settings))",message="spec.forProvider.settings is a required parameter" + Spec MethodSettingsSpec `json:"spec"` + Status MethodSettingsStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MethodSettingsList contains a list of MethodSettingss +type MethodSettingsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MethodSettings `json:"items"` +} + +// Repository type metadata. +var ( + MethodSettings_Kind = "MethodSettings" + MethodSettings_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MethodSettings_Kind}.String() + MethodSettings_KindAPIVersion = MethodSettings_Kind + "." + CRDGroupVersion.String() + MethodSettings_GroupVersionKind = CRDGroupVersion.WithKind(MethodSettings_Kind) +) + +func init() { + SchemeBuilder.Register(&MethodSettings{}, &MethodSettingsList{}) +} diff --git a/apis/apigateway/v1beta2/zz_restapi_terraformed.go b/apis/apigateway/v1beta2/zz_restapi_terraformed.go new file mode 100755 index 0000000000..02905b8acb --- /dev/null +++ b/apis/apigateway/v1beta2/zz_restapi_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this RestAPI +func (mg *RestAPI) GetTerraformResourceType() string { + return "aws_api_gateway_rest_api" +} + +// GetConnectionDetailsMapping for this RestAPI +func (tr *RestAPI) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this RestAPI +func (tr *RestAPI) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this RestAPI +func (tr *RestAPI) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this RestAPI +func (tr *RestAPI) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this RestAPI +func (tr *RestAPI) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this RestAPI +func (tr *RestAPI) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this RestAPI +func (tr *RestAPI) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this RestAPI +func (tr *RestAPI) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this RestAPI using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *RestAPI) LateInitialize(attrs []byte) (bool, error) { + params := &RestAPIParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *RestAPI) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apigateway/v1beta2/zz_restapi_types.go b/apis/apigateway/v1beta2/zz_restapi_types.go new file mode 100755 index 0000000000..736b768441 --- /dev/null +++ b/apis/apigateway/v1beta2/zz_restapi_types.go @@ -0,0 +1,271 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type RestAPIEndpointConfigurationInitParameters struct { + + // List of endpoint types. This resource currently only supports managing a single value. Valid values: EDGE, REGIONAL or PRIVATE. If unspecified, defaults to EDGE. If set to PRIVATE recommend to set put_rest_api_mode = merge to not cause the endpoints and associated Route53 records to be deleted. Refer to the documentation for more information on the difference between edge-optimized and regional APIs. + Types []*string `json:"types,omitempty" tf:"types,omitempty"` + + // Set of VPC Endpoint identifiers. It is only supported for PRIVATE endpoint type. If importing an OpenAPI specification via the body argument, this corresponds to the x-amazon-apigateway-endpoint-configuration extension vpcEndpointIds property. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + // +listType=set + VPCEndpointIds []*string `json:"vpcEndpointIds,omitempty" tf:"vpc_endpoint_ids,omitempty"` +} + +type RestAPIEndpointConfigurationObservation struct { + + // List of endpoint types. This resource currently only supports managing a single value. Valid values: EDGE, REGIONAL or PRIVATE. If unspecified, defaults to EDGE. If set to PRIVATE recommend to set put_rest_api_mode = merge to not cause the endpoints and associated Route53 records to be deleted. Refer to the documentation for more information on the difference between edge-optimized and regional APIs. + Types []*string `json:"types,omitempty" tf:"types,omitempty"` + + // Set of VPC Endpoint identifiers. It is only supported for PRIVATE endpoint type. If importing an OpenAPI specification via the body argument, this corresponds to the x-amazon-apigateway-endpoint-configuration extension vpcEndpointIds property. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + // +listType=set + VPCEndpointIds []*string `json:"vpcEndpointIds,omitempty" tf:"vpc_endpoint_ids,omitempty"` +} + +type RestAPIEndpointConfigurationParameters struct { + + // List of endpoint types. This resource currently only supports managing a single value. Valid values: EDGE, REGIONAL or PRIVATE. If unspecified, defaults to EDGE. If set to PRIVATE recommend to set put_rest_api_mode = merge to not cause the endpoints and associated Route53 records to be deleted. Refer to the documentation for more information on the difference between edge-optimized and regional APIs. + // +kubebuilder:validation:Optional + Types []*string `json:"types" tf:"types,omitempty"` + + // Set of VPC Endpoint identifiers. It is only supported for PRIVATE endpoint type. If importing an OpenAPI specification via the body argument, this corresponds to the x-amazon-apigateway-endpoint-configuration extension vpcEndpointIds property. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + // +kubebuilder:validation:Optional + // +listType=set + VPCEndpointIds []*string `json:"vpcEndpointIds,omitempty" tf:"vpc_endpoint_ids,omitempty"` +} + +type RestAPIInitParameters struct { + + // Source of the API key for requests. Valid values are HEADER (default) and AUTHORIZER. If importing an OpenAPI specification via the body argument, this corresponds to the x-amazon-apigateway-api-key-source extension. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + APIKeySource *string `json:"apiKeySource,omitempty" tf:"api_key_source,omitempty"` + + // List of binary media types supported by the REST API. By default, the REST API supports only UTF-8-encoded text payloads. If importing an OpenAPI specification via the body argument, this corresponds to the x-amazon-apigateway-binary-media-types extension. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + BinaryMediaTypes []*string `json:"binaryMediaTypes,omitempty" tf:"binary_media_types,omitempty"` + + // OpenAPI specification that defines the set of routes and integrations to create as part of the REST API. This configuration, and any updates to it, will replace all REST API configuration except values overridden in this resource configuration and other resource updates applied after this resource but before any aws_api_gateway_deployment creation. More information about REST API OpenAPI support can be found in the API Gateway Developer Guide. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // Description of the REST API. If importing an OpenAPI specification via the body argument, this corresponds to the info.description field. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint. Defaults to false. If importing an OpenAPI specification via the body argument, this corresponds to the x-amazon-apigateway-endpoint-configuration extension disableExecuteApiEndpoint property. If the argument value is true and is different than the OpenAPI value, the argument value will override the OpenAPI value. + DisableExecuteAPIEndpoint *bool `json:"disableExecuteApiEndpoint,omitempty" tf:"disable_execute_api_endpoint,omitempty"` + + // Configuration block defining API endpoint configuration including endpoint type. Defined below. + EndpointConfiguration *RestAPIEndpointConfigurationInitParameters `json:"endpointConfiguration,omitempty" tf:"endpoint_configuration,omitempty"` + + // Whether warnings while API Gateway is creating or updating the resource should return an error or not. Defaults to false + FailOnWarnings *bool `json:"failOnWarnings,omitempty" tf:"fail_on_warnings,omitempty"` + + // Minimum response size to compress for the REST API. String containing an integer value between -1 and 10485760 (10MB). -1 will disable an existing compression configuration, and all other values will enable compression with the configured size. New resources can simply omit this argument to disable compression, rather than setting the value to -1. If importing an OpenAPI specification via the body argument, this corresponds to the x-amazon-apigateway-minimum-compression-size extension. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + MinimumCompressionSize *string `json:"minimumCompressionSize,omitempty" tf:"minimum_compression_size,omitempty"` + + // Name of the REST API. If importing an OpenAPI specification via the body argument, this corresponds to the info.title field. If the argument value is different than the OpenAPI value, the argument value will override the OpenAPI value. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Map of customizations for importing the specification in the body argument. For example, to exclude DocumentationParts from an imported API, set ignore equal to documentation. Additional documentation, including other parameters such as basepath, can be found in the API Gateway Developer Guide. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Mode of the PutRestApi operation when importing an OpenAPI specification via the body argument (create or update operation). Valid values are merge and overwrite. If unspecificed, defaults to overwrite (for backwards compatibility). This corresponds to the x-amazon-apigateway-put-integration-method extension. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + PutRestAPIMode *string `json:"putRestApiMode,omitempty" tf:"put_rest_api_mode,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type RestAPIObservation struct { + + // Source of the API key for requests. Valid values are HEADER (default) and AUTHORIZER. If importing an OpenAPI specification via the body argument, this corresponds to the x-amazon-apigateway-api-key-source extension. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + APIKeySource *string `json:"apiKeySource,omitempty" tf:"api_key_source,omitempty"` + + // ARN + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // List of binary media types supported by the REST API. By default, the REST API supports only UTF-8-encoded text payloads. If importing an OpenAPI specification via the body argument, this corresponds to the x-amazon-apigateway-binary-media-types extension. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + BinaryMediaTypes []*string `json:"binaryMediaTypes,omitempty" tf:"binary_media_types,omitempty"` + + // OpenAPI specification that defines the set of routes and integrations to create as part of the REST API. This configuration, and any updates to it, will replace all REST API configuration except values overridden in this resource configuration and other resource updates applied after this resource but before any aws_api_gateway_deployment creation. More information about REST API OpenAPI support can be found in the API Gateway Developer Guide. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // Creation date of the REST API + CreatedDate *string `json:"createdDate,omitempty" tf:"created_date,omitempty"` + + // Description of the REST API. If importing an OpenAPI specification via the body argument, this corresponds to the info.description field. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint. Defaults to false. If importing an OpenAPI specification via the body argument, this corresponds to the x-amazon-apigateway-endpoint-configuration extension disableExecuteApiEndpoint property. If the argument value is true and is different than the OpenAPI value, the argument value will override the OpenAPI value. + DisableExecuteAPIEndpoint *bool `json:"disableExecuteApiEndpoint,omitempty" tf:"disable_execute_api_endpoint,omitempty"` + + // Configuration block defining API endpoint configuration including endpoint type. Defined below. + EndpointConfiguration *RestAPIEndpointConfigurationObservation `json:"endpointConfiguration,omitempty" tf:"endpoint_configuration,omitempty"` + + // Execution ARN part to be used in lambda_permission's source_arn + // when allowing API Gateway to invoke a Lambda function, + // e.g., arn:aws:execute-api:eu-west-2:123456789012:z4675bid1j, which can be concatenated with allowed stage, method and resource path. + ExecutionArn *string `json:"executionArn,omitempty" tf:"execution_arn,omitempty"` + + // Whether warnings while API Gateway is creating or updating the resource should return an error or not. Defaults to false + FailOnWarnings *bool `json:"failOnWarnings,omitempty" tf:"fail_on_warnings,omitempty"` + + // ID of the REST API + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Minimum response size to compress for the REST API. String containing an integer value between -1 and 10485760 (10MB). -1 will disable an existing compression configuration, and all other values will enable compression with the configured size. New resources can simply omit this argument to disable compression, rather than setting the value to -1. If importing an OpenAPI specification via the body argument, this corresponds to the x-amazon-apigateway-minimum-compression-size extension. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + MinimumCompressionSize *string `json:"minimumCompressionSize,omitempty" tf:"minimum_compression_size,omitempty"` + + // Name of the REST API. If importing an OpenAPI specification via the body argument, this corresponds to the info.title field. If the argument value is different than the OpenAPI value, the argument value will override the OpenAPI value. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Map of customizations for importing the specification in the body argument. For example, to exclude DocumentationParts from an imported API, set ignore equal to documentation. Additional documentation, including other parameters such as basepath, can be found in the API Gateway Developer Guide. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // JSON formatted policy document that controls access to the API Gateway. We recommend using the aws_api_gateway_rest_api_policy resource instead. If importing an OpenAPI specification via the body argument, this corresponds to the x-amazon-apigateway-policy extension. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` + + // Mode of the PutRestApi operation when importing an OpenAPI specification via the body argument (create or update operation). Valid values are merge and overwrite. If unspecificed, defaults to overwrite (for backwards compatibility). This corresponds to the x-amazon-apigateway-put-integration-method extension. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + PutRestAPIMode *string `json:"putRestApiMode,omitempty" tf:"put_rest_api_mode,omitempty"` + + // Resource ID of the REST API's root + RootResourceID *string `json:"rootResourceId,omitempty" tf:"root_resource_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type RestAPIParameters struct { + + // Source of the API key for requests. Valid values are HEADER (default) and AUTHORIZER. If importing an OpenAPI specification via the body argument, this corresponds to the x-amazon-apigateway-api-key-source extension. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + // +kubebuilder:validation:Optional + APIKeySource *string `json:"apiKeySource,omitempty" tf:"api_key_source,omitempty"` + + // List of binary media types supported by the REST API. By default, the REST API supports only UTF-8-encoded text payloads. If importing an OpenAPI specification via the body argument, this corresponds to the x-amazon-apigateway-binary-media-types extension. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + // +kubebuilder:validation:Optional + BinaryMediaTypes []*string `json:"binaryMediaTypes,omitempty" tf:"binary_media_types,omitempty"` + + // OpenAPI specification that defines the set of routes and integrations to create as part of the REST API. This configuration, and any updates to it, will replace all REST API configuration except values overridden in this resource configuration and other resource updates applied after this resource but before any aws_api_gateway_deployment creation. More information about REST API OpenAPI support can be found in the API Gateway Developer Guide. + // +kubebuilder:validation:Optional + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // Description of the REST API. If importing an OpenAPI specification via the body argument, this corresponds to the info.description field. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint. Defaults to false. If importing an OpenAPI specification via the body argument, this corresponds to the x-amazon-apigateway-endpoint-configuration extension disableExecuteApiEndpoint property. If the argument value is true and is different than the OpenAPI value, the argument value will override the OpenAPI value. + // +kubebuilder:validation:Optional + DisableExecuteAPIEndpoint *bool `json:"disableExecuteApiEndpoint,omitempty" tf:"disable_execute_api_endpoint,omitempty"` + + // Configuration block defining API endpoint configuration including endpoint type. Defined below. + // +kubebuilder:validation:Optional + EndpointConfiguration *RestAPIEndpointConfigurationParameters `json:"endpointConfiguration,omitempty" tf:"endpoint_configuration,omitempty"` + + // Whether warnings while API Gateway is creating or updating the resource should return an error or not. Defaults to false + // +kubebuilder:validation:Optional + FailOnWarnings *bool `json:"failOnWarnings,omitempty" tf:"fail_on_warnings,omitempty"` + + // Minimum response size to compress for the REST API. String containing an integer value between -1 and 10485760 (10MB). -1 will disable an existing compression configuration, and all other values will enable compression with the configured size. New resources can simply omit this argument to disable compression, rather than setting the value to -1. If importing an OpenAPI specification via the body argument, this corresponds to the x-amazon-apigateway-minimum-compression-size extension. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + // +kubebuilder:validation:Optional + MinimumCompressionSize *string `json:"minimumCompressionSize,omitempty" tf:"minimum_compression_size,omitempty"` + + // Name of the REST API. If importing an OpenAPI specification via the body argument, this corresponds to the info.title field. If the argument value is different than the OpenAPI value, the argument value will override the OpenAPI value. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Map of customizations for importing the specification in the body argument. For example, to exclude DocumentationParts from an imported API, set ignore equal to documentation. Additional documentation, including other parameters such as basepath, can be found in the API Gateway Developer Guide. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Mode of the PutRestApi operation when importing an OpenAPI specification via the body argument (create or update operation). Valid values are merge and overwrite. If unspecificed, defaults to overwrite (for backwards compatibility). This corresponds to the x-amazon-apigateway-put-integration-method extension. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. + // +kubebuilder:validation:Optional + PutRestAPIMode *string `json:"putRestApiMode,omitempty" tf:"put_rest_api_mode,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// RestAPISpec defines the desired state of RestAPI +type RestAPISpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RestAPIParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RestAPIInitParameters `json:"initProvider,omitempty"` +} + +// RestAPIStatus defines the observed state of RestAPI. +type RestAPIStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RestAPIObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// RestAPI is the Schema for the RestAPIs API. Manages an API Gateway REST API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type RestAPI struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec RestAPISpec `json:"spec"` + Status RestAPIStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RestAPIList contains a list of RestAPIs +type RestAPIList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RestAPI `json:"items"` +} + +// Repository type metadata. +var ( + RestAPI_Kind = "RestAPI" + RestAPI_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: RestAPI_Kind}.String() + RestAPI_KindAPIVersion = RestAPI_Kind + "." + CRDGroupVersion.String() + RestAPI_GroupVersionKind = CRDGroupVersion.WithKind(RestAPI_Kind) +) + +func init() { + SchemeBuilder.Register(&RestAPI{}, &RestAPIList{}) +} diff --git a/apis/apigateway/v1beta2/zz_stage_terraformed.go b/apis/apigateway/v1beta2/zz_stage_terraformed.go new file mode 100755 index 0000000000..07680d840b --- /dev/null +++ b/apis/apigateway/v1beta2/zz_stage_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Stage +func (mg *Stage) GetTerraformResourceType() string { + return "aws_api_gateway_stage" +} + +// GetConnectionDetailsMapping for this Stage +func (tr *Stage) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Stage +func (tr *Stage) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Stage +func (tr *Stage) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Stage +func (tr *Stage) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Stage +func (tr *Stage) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Stage +func (tr *Stage) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Stage +func (tr *Stage) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Stage +func (tr *Stage) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Stage using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Stage) LateInitialize(attrs []byte) (bool, error) { + params := &StageParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Stage) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apigateway/v1beta2/zz_stage_types.go b/apis/apigateway/v1beta2/zz_stage_types.go new file mode 100755 index 0000000000..11b8bc16b9 --- /dev/null +++ b/apis/apigateway/v1beta2/zz_stage_types.go @@ -0,0 +1,361 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessLogSettingsInitParameters struct { + + // ARN of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. Automatically removes trailing :* if present. + DestinationArn *string `json:"destinationArn,omitempty" tf:"destination_arn,omitempty"` + + // Formatting and values recorded in the logs. + // For more information on configuring the log format rules visit the AWS documentation + Format *string `json:"format,omitempty" tf:"format,omitempty"` +} + +type AccessLogSettingsObservation struct { + + // ARN of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. Automatically removes trailing :* if present. + DestinationArn *string `json:"destinationArn,omitempty" tf:"destination_arn,omitempty"` + + // Formatting and values recorded in the logs. + // For more information on configuring the log format rules visit the AWS documentation + Format *string `json:"format,omitempty" tf:"format,omitempty"` +} + +type AccessLogSettingsParameters struct { + + // ARN of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. Automatically removes trailing :* if present. + // +kubebuilder:validation:Optional + DestinationArn *string `json:"destinationArn" tf:"destination_arn,omitempty"` + + // Formatting and values recorded in the logs. + // For more information on configuring the log format rules visit the AWS documentation + // +kubebuilder:validation:Optional + Format *string `json:"format" tf:"format,omitempty"` +} + +type CanarySettingsInitParameters struct { + + // Percent 0.0 - 100.0 of traffic to divert to the canary deployment. + PercentTraffic *float64 `json:"percentTraffic,omitempty" tf:"percent_traffic,omitempty"` + + // Map of overridden stage variables (including new variables) for the canary deployment. + // +mapType=granular + StageVariableOverrides map[string]*string `json:"stageVariableOverrides,omitempty" tf:"stage_variable_overrides,omitempty"` + + // Whether the canary deployment uses the stage cache. Defaults to false. + UseStageCache *bool `json:"useStageCache,omitempty" tf:"use_stage_cache,omitempty"` +} + +type CanarySettingsObservation struct { + + // Percent 0.0 - 100.0 of traffic to divert to the canary deployment. + PercentTraffic *float64 `json:"percentTraffic,omitempty" tf:"percent_traffic,omitempty"` + + // Map of overridden stage variables (including new variables) for the canary deployment. + // +mapType=granular + StageVariableOverrides map[string]*string `json:"stageVariableOverrides,omitempty" tf:"stage_variable_overrides,omitempty"` + + // Whether the canary deployment uses the stage cache. Defaults to false. + UseStageCache *bool `json:"useStageCache,omitempty" tf:"use_stage_cache,omitempty"` +} + +type CanarySettingsParameters struct { + + // Percent 0.0 - 100.0 of traffic to divert to the canary deployment. + // +kubebuilder:validation:Optional + PercentTraffic *float64 `json:"percentTraffic,omitempty" tf:"percent_traffic,omitempty"` + + // Map of overridden stage variables (including new variables) for the canary deployment. + // +kubebuilder:validation:Optional + // +mapType=granular + StageVariableOverrides map[string]*string `json:"stageVariableOverrides,omitempty" tf:"stage_variable_overrides,omitempty"` + + // Whether the canary deployment uses the stage cache. Defaults to false. + // +kubebuilder:validation:Optional + UseStageCache *bool `json:"useStageCache,omitempty" tf:"use_stage_cache,omitempty"` +} + +type StageInitParameters struct { + + // Enables access logs for the API stage. See Access Log Settings below. + AccessLogSettings *AccessLogSettingsInitParameters `json:"accessLogSettings,omitempty" tf:"access_log_settings,omitempty"` + + // Whether a cache cluster is enabled for the stage + CacheClusterEnabled *bool `json:"cacheClusterEnabled,omitempty" tf:"cache_cluster_enabled,omitempty"` + + // Size of the cache cluster for the stage, if enabled. Allowed values include 0.5, 1.6, 6.1, 13.5, 28.4, 58.2, 118 and 237. + CacheClusterSize *string `json:"cacheClusterSize,omitempty" tf:"cache_cluster_size,omitempty"` + + // Configuration settings of a canary deployment. See Canary Settings below. + CanarySettings *CanarySettingsInitParameters `json:"canarySettings,omitempty" tf:"canary_settings,omitempty"` + + // Identifier of a client certificate for the stage. + ClientCertificateID *string `json:"clientCertificateId,omitempty" tf:"client_certificate_id,omitempty"` + + // ID of the deployment that the stage points to + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.Deployment + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + DeploymentID *string `json:"deploymentId,omitempty" tf:"deployment_id,omitempty"` + + // Reference to a Deployment in apigateway to populate deploymentId. + // +kubebuilder:validation:Optional + DeploymentIDRef *v1.Reference `json:"deploymentIdRef,omitempty" tf:"-"` + + // Selector for a Deployment in apigateway to populate deploymentId. + // +kubebuilder:validation:Optional + DeploymentIDSelector *v1.Selector `json:"deploymentIdSelector,omitempty" tf:"-"` + + // Description of the stage. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Version of the associated API documentation + DocumentationVersion *string `json:"documentationVersion,omitempty" tf:"documentation_version,omitempty"` + + // ID of the associated REST API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` + + // Reference to a RestAPI in apigateway to populate restApiId. + // +kubebuilder:validation:Optional + RestAPIIDRef *v1.Reference `json:"restApiIdRef,omitempty" tf:"-"` + + // Selector for a RestAPI in apigateway to populate restApiId. + // +kubebuilder:validation:Optional + RestAPIIDSelector *v1.Selector `json:"restApiIdSelector,omitempty" tf:"-"` + + // Name of the stage + StageName *string `json:"stageName,omitempty" tf:"stage_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map that defines the stage variables + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` + + // Whether active tracing with X-ray is enabled. Defaults to false. + XrayTracingEnabled *bool `json:"xrayTracingEnabled,omitempty" tf:"xray_tracing_enabled,omitempty"` +} + +type StageObservation struct { + + // Enables access logs for the API stage. See Access Log Settings below. + AccessLogSettings *AccessLogSettingsObservation `json:"accessLogSettings,omitempty" tf:"access_log_settings,omitempty"` + + // ARN + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Whether a cache cluster is enabled for the stage + CacheClusterEnabled *bool `json:"cacheClusterEnabled,omitempty" tf:"cache_cluster_enabled,omitempty"` + + // Size of the cache cluster for the stage, if enabled. Allowed values include 0.5, 1.6, 6.1, 13.5, 28.4, 58.2, 118 and 237. + CacheClusterSize *string `json:"cacheClusterSize,omitempty" tf:"cache_cluster_size,omitempty"` + + // Configuration settings of a canary deployment. See Canary Settings below. + CanarySettings *CanarySettingsObservation `json:"canarySettings,omitempty" tf:"canary_settings,omitempty"` + + // Identifier of a client certificate for the stage. + ClientCertificateID *string `json:"clientCertificateId,omitempty" tf:"client_certificate_id,omitempty"` + + // ID of the deployment that the stage points to + DeploymentID *string `json:"deploymentId,omitempty" tf:"deployment_id,omitempty"` + + // Description of the stage. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Version of the associated API documentation + DocumentationVersion *string `json:"documentationVersion,omitempty" tf:"documentation_version,omitempty"` + + // Execution ARN to be used in lambda_permission's source_arn + // when allowing API Gateway to invoke a Lambda function, + // e.g., arn:aws:execute-api:eu-west-2:123456789012:z4675bid1j/prod + ExecutionArn *string `json:"executionArn,omitempty" tf:"execution_arn,omitempty"` + + // ID of the stage + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // URL to invoke the API pointing to the stage, + // e.g., https://z4675bid1j.execute-api.eu-west-2.amazonaws.com/prod + InvokeURL *string `json:"invokeUrl,omitempty" tf:"invoke_url,omitempty"` + + // ID of the associated REST API + RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` + + // Name of the stage + StageName *string `json:"stageName,omitempty" tf:"stage_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Map that defines the stage variables + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` + + // ARN of the WebAcl associated with the Stage. + WebACLArn *string `json:"webAclArn,omitempty" tf:"web_acl_arn,omitempty"` + + // Whether active tracing with X-ray is enabled. Defaults to false. + XrayTracingEnabled *bool `json:"xrayTracingEnabled,omitempty" tf:"xray_tracing_enabled,omitempty"` +} + +type StageParameters struct { + + // Enables access logs for the API stage. See Access Log Settings below. + // +kubebuilder:validation:Optional + AccessLogSettings *AccessLogSettingsParameters `json:"accessLogSettings,omitempty" tf:"access_log_settings,omitempty"` + + // Whether a cache cluster is enabled for the stage + // +kubebuilder:validation:Optional + CacheClusterEnabled *bool `json:"cacheClusterEnabled,omitempty" tf:"cache_cluster_enabled,omitempty"` + + // Size of the cache cluster for the stage, if enabled. Allowed values include 0.5, 1.6, 6.1, 13.5, 28.4, 58.2, 118 and 237. + // +kubebuilder:validation:Optional + CacheClusterSize *string `json:"cacheClusterSize,omitempty" tf:"cache_cluster_size,omitempty"` + + // Configuration settings of a canary deployment. See Canary Settings below. + // +kubebuilder:validation:Optional + CanarySettings *CanarySettingsParameters `json:"canarySettings,omitempty" tf:"canary_settings,omitempty"` + + // Identifier of a client certificate for the stage. + // +kubebuilder:validation:Optional + ClientCertificateID *string `json:"clientCertificateId,omitempty" tf:"client_certificate_id,omitempty"` + + // ID of the deployment that the stage points to + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta1.Deployment + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DeploymentID *string `json:"deploymentId,omitempty" tf:"deployment_id,omitempty"` + + // Reference to a Deployment in apigateway to populate deploymentId. + // +kubebuilder:validation:Optional + DeploymentIDRef *v1.Reference `json:"deploymentIdRef,omitempty" tf:"-"` + + // Selector for a Deployment in apigateway to populate deploymentId. + // +kubebuilder:validation:Optional + DeploymentIDSelector *v1.Selector `json:"deploymentIdSelector,omitempty" tf:"-"` + + // Description of the stage. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Version of the associated API documentation + // +kubebuilder:validation:Optional + DocumentationVersion *string `json:"documentationVersion,omitempty" tf:"documentation_version,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // ID of the associated REST API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + RestAPIID *string `json:"restApiId,omitempty" tf:"rest_api_id,omitempty"` + + // Reference to a RestAPI in apigateway to populate restApiId. + // +kubebuilder:validation:Optional + RestAPIIDRef *v1.Reference `json:"restApiIdRef,omitempty" tf:"-"` + + // Selector for a RestAPI in apigateway to populate restApiId. + // +kubebuilder:validation:Optional + RestAPIIDSelector *v1.Selector `json:"restApiIdSelector,omitempty" tf:"-"` + + // Name of the stage + // +kubebuilder:validation:Optional + StageName *string `json:"stageName,omitempty" tf:"stage_name,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map that defines the stage variables + // +kubebuilder:validation:Optional + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` + + // Whether active tracing with X-ray is enabled. Defaults to false. + // +kubebuilder:validation:Optional + XrayTracingEnabled *bool `json:"xrayTracingEnabled,omitempty" tf:"xray_tracing_enabled,omitempty"` +} + +// StageSpec defines the desired state of Stage +type StageSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StageParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StageInitParameters `json:"initProvider,omitempty"` +} + +// StageStatus defines the observed state of Stage. +type StageStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StageObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Stage is the Schema for the Stages API. Manages an API Gateway Stage. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Stage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.stageName) || (has(self.initProvider) && has(self.initProvider.stageName))",message="spec.forProvider.stageName is a required parameter" + Spec StageSpec `json:"spec"` + Status StageStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StageList contains a list of Stages +type StageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Stage `json:"items"` +} + +// Repository type metadata. +var ( + Stage_Kind = "Stage" + Stage_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Stage_Kind}.String() + Stage_KindAPIVersion = Stage_Kind + "." + CRDGroupVersion.String() + Stage_GroupVersionKind = CRDGroupVersion.WithKind(Stage_Kind) +) + +func init() { + SchemeBuilder.Register(&Stage{}, &StageList{}) +} diff --git a/apis/apigateway/v1beta2/zz_usageplan_terraformed.go b/apis/apigateway/v1beta2/zz_usageplan_terraformed.go new file mode 100755 index 0000000000..9b470f72d6 --- /dev/null +++ b/apis/apigateway/v1beta2/zz_usageplan_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this UsagePlan +func (mg *UsagePlan) GetTerraformResourceType() string { + return "aws_api_gateway_usage_plan" +} + +// GetConnectionDetailsMapping for this UsagePlan +func (tr *UsagePlan) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this UsagePlan +func (tr *UsagePlan) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this UsagePlan +func (tr *UsagePlan) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this UsagePlan +func (tr *UsagePlan) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this UsagePlan +func (tr *UsagePlan) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this UsagePlan +func (tr *UsagePlan) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this UsagePlan +func (tr *UsagePlan) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this UsagePlan +func (tr *UsagePlan) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this UsagePlan using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *UsagePlan) LateInitialize(attrs []byte) (bool, error) { + params := &UsagePlanParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *UsagePlan) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apigateway/v1beta2/zz_usageplan_types.go b/apis/apigateway/v1beta2/zz_usageplan_types.go new file mode 100755 index 0000000000..5d8de16a22 --- /dev/null +++ b/apis/apigateway/v1beta2/zz_usageplan_types.go @@ -0,0 +1,358 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type APIStagesInitParameters struct { + + // API Id of the associated API stage in a usage plan. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // Reference to a RestAPI in apigateway to populate apiId. + // +kubebuilder:validation:Optional + APIIDRef *v1.Reference `json:"apiIdRef,omitempty" tf:"-"` + + // Selector for a RestAPI in apigateway to populate apiId. + // +kubebuilder:validation:Optional + APIIDSelector *v1.Selector `json:"apiIdSelector,omitempty" tf:"-"` + + // API stage name of the associated API stage in a usage plan. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.Stage + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("stage_name",false) + Stage *string `json:"stage,omitempty" tf:"stage,omitempty"` + + // Reference to a Stage in apigateway to populate stage. + // +kubebuilder:validation:Optional + StageRef *v1.Reference `json:"stageRef,omitempty" tf:"-"` + + // Selector for a Stage in apigateway to populate stage. + // +kubebuilder:validation:Optional + StageSelector *v1.Selector `json:"stageSelector,omitempty" tf:"-"` + + // The throttling limits of the usage plan. + Throttle []ThrottleInitParameters `json:"throttle,omitempty" tf:"throttle,omitempty"` +} + +type APIStagesObservation struct { + + // API Id of the associated API stage in a usage plan. + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // API stage name of the associated API stage in a usage plan. + Stage *string `json:"stage,omitempty" tf:"stage,omitempty"` + + // The throttling limits of the usage plan. + Throttle []ThrottleObservation `json:"throttle,omitempty" tf:"throttle,omitempty"` +} + +type APIStagesParameters struct { + + // API Id of the associated API stage in a usage plan. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.RestAPI + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // Reference to a RestAPI in apigateway to populate apiId. + // +kubebuilder:validation:Optional + APIIDRef *v1.Reference `json:"apiIdRef,omitempty" tf:"-"` + + // Selector for a RestAPI in apigateway to populate apiId. + // +kubebuilder:validation:Optional + APIIDSelector *v1.Selector `json:"apiIdSelector,omitempty" tf:"-"` + + // API stage name of the associated API stage in a usage plan. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigateway/v1beta2.Stage + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("stage_name",false) + // +kubebuilder:validation:Optional + Stage *string `json:"stage,omitempty" tf:"stage,omitempty"` + + // Reference to a Stage in apigateway to populate stage. + // +kubebuilder:validation:Optional + StageRef *v1.Reference `json:"stageRef,omitempty" tf:"-"` + + // Selector for a Stage in apigateway to populate stage. + // +kubebuilder:validation:Optional + StageSelector *v1.Selector `json:"stageSelector,omitempty" tf:"-"` + + // The throttling limits of the usage plan. + // +kubebuilder:validation:Optional + Throttle []ThrottleParameters `json:"throttle,omitempty" tf:"throttle,omitempty"` +} + +type QuotaSettingsInitParameters struct { + + // Maximum number of requests that can be made in a given time period. + Limit *float64 `json:"limit,omitempty" tf:"limit,omitempty"` + + // Number of requests subtracted from the given limit in the initial time period. + Offset *float64 `json:"offset,omitempty" tf:"offset,omitempty"` + + // Time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH". + Period *string `json:"period,omitempty" tf:"period,omitempty"` +} + +type QuotaSettingsObservation struct { + + // Maximum number of requests that can be made in a given time period. + Limit *float64 `json:"limit,omitempty" tf:"limit,omitempty"` + + // Number of requests subtracted from the given limit in the initial time period. + Offset *float64 `json:"offset,omitempty" tf:"offset,omitempty"` + + // Time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH". + Period *string `json:"period,omitempty" tf:"period,omitempty"` +} + +type QuotaSettingsParameters struct { + + // Maximum number of requests that can be made in a given time period. + // +kubebuilder:validation:Optional + Limit *float64 `json:"limit" tf:"limit,omitempty"` + + // Number of requests subtracted from the given limit in the initial time period. + // +kubebuilder:validation:Optional + Offset *float64 `json:"offset,omitempty" tf:"offset,omitempty"` + + // Time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH". + // +kubebuilder:validation:Optional + Period *string `json:"period" tf:"period,omitempty"` +} + +type ThrottleInitParameters struct { + + // The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity. + BurstLimit *float64 `json:"burstLimit,omitempty" tf:"burst_limit,omitempty"` + + // Method to apply the throttle settings for. Specfiy the path and method, for example /test/GET. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The API request steady-state rate limit. + RateLimit *float64 `json:"rateLimit,omitempty" tf:"rate_limit,omitempty"` +} + +type ThrottleObservation struct { + + // The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity. + BurstLimit *float64 `json:"burstLimit,omitempty" tf:"burst_limit,omitempty"` + + // Method to apply the throttle settings for. Specfiy the path and method, for example /test/GET. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The API request steady-state rate limit. + RateLimit *float64 `json:"rateLimit,omitempty" tf:"rate_limit,omitempty"` +} + +type ThrottleParameters struct { + + // The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity. + // +kubebuilder:validation:Optional + BurstLimit *float64 `json:"burstLimit,omitempty" tf:"burst_limit,omitempty"` + + // Method to apply the throttle settings for. Specfiy the path and method, for example /test/GET. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` + + // The API request steady-state rate limit. + // +kubebuilder:validation:Optional + RateLimit *float64 `json:"rateLimit,omitempty" tf:"rate_limit,omitempty"` +} + +type ThrottleSettingsInitParameters struct { + + // The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity. + BurstLimit *float64 `json:"burstLimit,omitempty" tf:"burst_limit,omitempty"` + + // The API request steady-state rate limit. + RateLimit *float64 `json:"rateLimit,omitempty" tf:"rate_limit,omitempty"` +} + +type ThrottleSettingsObservation struct { + + // The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity. + BurstLimit *float64 `json:"burstLimit,omitempty" tf:"burst_limit,omitempty"` + + // The API request steady-state rate limit. + RateLimit *float64 `json:"rateLimit,omitempty" tf:"rate_limit,omitempty"` +} + +type ThrottleSettingsParameters struct { + + // The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity. + // +kubebuilder:validation:Optional + BurstLimit *float64 `json:"burstLimit,omitempty" tf:"burst_limit,omitempty"` + + // The API request steady-state rate limit. + // +kubebuilder:validation:Optional + RateLimit *float64 `json:"rateLimit,omitempty" tf:"rate_limit,omitempty"` +} + +type UsagePlanInitParameters struct { + + // Associated API stages of the usage plan. + APIStages []APIStagesInitParameters `json:"apiStages,omitempty" tf:"api_stages,omitempty"` + + // Description of a usage plan. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Name of the usage plan. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // AWS Marketplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace. + ProductCode *string `json:"productCode,omitempty" tf:"product_code,omitempty"` + + // The quota settings of the usage plan. + QuotaSettings *QuotaSettingsInitParameters `json:"quotaSettings,omitempty" tf:"quota_settings,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The throttling limits of the usage plan. + ThrottleSettings *ThrottleSettingsInitParameters `json:"throttleSettings,omitempty" tf:"throttle_settings,omitempty"` +} + +type UsagePlanObservation struct { + + // Associated API stages of the usage plan. + APIStages []APIStagesObservation `json:"apiStages,omitempty" tf:"api_stages,omitempty"` + + // ARN + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Description of a usage plan. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the API resource + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the usage plan. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // AWS Marketplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace. + ProductCode *string `json:"productCode,omitempty" tf:"product_code,omitempty"` + + // The quota settings of the usage plan. + QuotaSettings *QuotaSettingsObservation `json:"quotaSettings,omitempty" tf:"quota_settings,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The throttling limits of the usage plan. + ThrottleSettings *ThrottleSettingsObservation `json:"throttleSettings,omitempty" tf:"throttle_settings,omitempty"` +} + +type UsagePlanParameters struct { + + // Associated API stages of the usage plan. + // +kubebuilder:validation:Optional + APIStages []APIStagesParameters `json:"apiStages,omitempty" tf:"api_stages,omitempty"` + + // Description of a usage plan. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Name of the usage plan. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // AWS Marketplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace. + // +kubebuilder:validation:Optional + ProductCode *string `json:"productCode,omitempty" tf:"product_code,omitempty"` + + // The quota settings of the usage plan. + // +kubebuilder:validation:Optional + QuotaSettings *QuotaSettingsParameters `json:"quotaSettings,omitempty" tf:"quota_settings,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The throttling limits of the usage plan. + // +kubebuilder:validation:Optional + ThrottleSettings *ThrottleSettingsParameters `json:"throttleSettings,omitempty" tf:"throttle_settings,omitempty"` +} + +// UsagePlanSpec defines the desired state of UsagePlan +type UsagePlanSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider UsagePlanParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider UsagePlanInitParameters `json:"initProvider,omitempty"` +} + +// UsagePlanStatus defines the observed state of UsagePlan. +type UsagePlanStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider UsagePlanObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// UsagePlan is the Schema for the UsagePlans API. Provides an API Gateway Usage Plan. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type UsagePlan struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec UsagePlanSpec `json:"spec"` + Status UsagePlanStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// UsagePlanList contains a list of UsagePlans +type UsagePlanList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []UsagePlan `json:"items"` +} + +// Repository type metadata. +var ( + UsagePlan_Kind = "UsagePlan" + UsagePlan_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: UsagePlan_Kind}.String() + UsagePlan_KindAPIVersion = UsagePlan_Kind + "." + CRDGroupVersion.String() + UsagePlan_GroupVersionKind = CRDGroupVersion.WithKind(UsagePlan_Kind) +) + +func init() { + SchemeBuilder.Register(&UsagePlan{}, &UsagePlanList{}) +} diff --git a/apis/apigatewayv2/v1beta1/zz_apimapping_types.go b/apis/apigatewayv2/v1beta1/zz_apimapping_types.go index e0d6beed21..e48664d210 100755 --- a/apis/apigatewayv2/v1beta1/zz_apimapping_types.go +++ b/apis/apigatewayv2/v1beta1/zz_apimapping_types.go @@ -16,7 +16,7 @@ import ( type APIMappingInitParameters struct { // API identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` // Reference to a API in apigatewayv2 to populate apiId. @@ -31,7 +31,7 @@ type APIMappingInitParameters struct { APIMappingKey *string `json:"apiMappingKey,omitempty" tf:"api_mapping_key,omitempty"` // Domain name. Use the aws_apigatewayv2_domain_name resource to configure a domain name. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.DomainName + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.DomainName DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` // Reference to a DomainName in apigatewayv2 to populate domainName. @@ -43,7 +43,7 @@ type APIMappingInitParameters struct { DomainNameSelector *v1.Selector `json:"domainNameSelector,omitempty" tf:"-"` // API stage. Use the aws_apigatewayv2_stage resource to configure an API stage. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.Stage + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.Stage // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() Stage *string `json:"stage,omitempty" tf:"stage,omitempty"` @@ -77,7 +77,7 @@ type APIMappingObservation struct { type APIMappingParameters struct { // API identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API // +kubebuilder:validation:Optional APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` @@ -94,7 +94,7 @@ type APIMappingParameters struct { APIMappingKey *string `json:"apiMappingKey,omitempty" tf:"api_mapping_key,omitempty"` // Domain name. Use the aws_apigatewayv2_domain_name resource to configure a domain name. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.DomainName + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.DomainName // +kubebuilder:validation:Optional DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` @@ -112,7 +112,7 @@ type APIMappingParameters struct { Region *string `json:"region" tf:"-"` // API stage. Use the aws_apigatewayv2_stage resource to configure an API stage. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.Stage + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.Stage // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() // +kubebuilder:validation:Optional Stage *string `json:"stage,omitempty" tf:"stage,omitempty"` diff --git a/apis/apigatewayv2/v1beta1/zz_deployment_types.go b/apis/apigatewayv2/v1beta1/zz_deployment_types.go index 15cb24a90d..21853de9f6 100755 --- a/apis/apigatewayv2/v1beta1/zz_deployment_types.go +++ b/apis/apigatewayv2/v1beta1/zz_deployment_types.go @@ -16,7 +16,7 @@ import ( type DeploymentInitParameters struct { // API identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` // Reference to a API in apigatewayv2 to populate apiId. @@ -49,7 +49,7 @@ type DeploymentObservation struct { type DeploymentParameters struct { // API identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API // +kubebuilder:validation:Optional APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` diff --git a/apis/apigatewayv2/v1beta1/zz_generated.conversion_hubs.go b/apis/apigatewayv2/v1beta1/zz_generated.conversion_hubs.go index 6b13f9b7d8..b3f40b7366 100755 --- a/apis/apigatewayv2/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/apigatewayv2/v1beta1/zz_generated.conversion_hubs.go @@ -6,24 +6,12 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *API) Hub() {} - // Hub marks this type as a conversion hub. func (tr *APIMapping) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Authorizer) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Deployment) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *DomainName) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Integration) Hub() {} - // Hub marks this type as a conversion hub. func (tr *IntegrationResponse) Hub() {} @@ -36,8 +24,5 @@ func (tr *Route) Hub() {} // Hub marks this type as a conversion hub. func (tr *RouteResponse) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Stage) Hub() {} - // Hub marks this type as a conversion hub. func (tr *VPCLink) Hub() {} diff --git a/apis/apigatewayv2/v1beta1/zz_generated.conversion_spokes.go b/apis/apigatewayv2/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..3b24ff4cbf --- /dev/null +++ b/apis/apigatewayv2/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,114 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this API to the hub type. +func (tr *API) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the API type. +func (tr *API) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Authorizer to the hub type. +func (tr *Authorizer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Authorizer type. +func (tr *Authorizer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DomainName to the hub type. +func (tr *DomainName) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DomainName type. +func (tr *DomainName) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Integration to the hub type. +func (tr *Integration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Integration type. +func (tr *Integration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Stage to the hub type. +func (tr *Stage) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Stage type. +func (tr *Stage) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/apigatewayv2/v1beta1/zz_generated.resolvers.go b/apis/apigatewayv2/v1beta1/zz_generated.resolvers.go index d3eb9a7f90..3cc59dcdaf 100644 --- a/apis/apigatewayv2/v1beta1/zz_generated.resolvers.go +++ b/apis/apigatewayv2/v1beta1/zz_generated.resolvers.go @@ -29,7 +29,7 @@ func (mg *APIMapping) ResolveReferences( // ResolveReferences of this APIMapping var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -48,7 +48,7 @@ func (mg *APIMapping) ResolveReferences( // ResolveReferences of this APIMapping mg.Spec.ForProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.APIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "DomainName", "DomainNameList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "DomainName", "DomainNameList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -67,7 +67,7 @@ func (mg *APIMapping) ResolveReferences( // ResolveReferences of this APIMapping mg.Spec.ForProvider.DomainName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DomainNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "Stage", "StageList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "Stage", "StageList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -86,7 +86,7 @@ func (mg *APIMapping) ResolveReferences( // ResolveReferences of this APIMapping mg.Spec.ForProvider.Stage = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StageRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -105,7 +105,7 @@ func (mg *APIMapping) ResolveReferences( // ResolveReferences of this APIMapping mg.Spec.InitProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.APIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "DomainName", "DomainNameList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "DomainName", "DomainNameList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -124,7 +124,7 @@ func (mg *APIMapping) ResolveReferences( // ResolveReferences of this APIMapping mg.Spec.InitProvider.DomainName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.DomainNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "Stage", "StageList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "Stage", "StageList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -243,7 +243,7 @@ func (mg *Deployment) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -262,7 +262,7 @@ func (mg *Deployment) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.APIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -512,7 +512,7 @@ func (mg *IntegrationResponse) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -531,7 +531,7 @@ func (mg *IntegrationResponse) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.APIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "Integration", "IntegrationList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "Integration", "IntegrationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -550,7 +550,7 @@ func (mg *IntegrationResponse) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.IntegrationID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.IntegrationIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -569,7 +569,7 @@ func (mg *IntegrationResponse) ResolveReferences(ctx context.Context, c client.R mg.Spec.InitProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.APIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "Integration", "IntegrationList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "Integration", "IntegrationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -600,7 +600,7 @@ func (mg *Model) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -619,7 +619,7 @@ func (mg *Model) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.ForProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.APIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -650,7 +650,7 @@ func (mg *Route) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -669,7 +669,7 @@ func (mg *Route) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.ForProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.APIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "Authorizer", "AuthorizerList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "Authorizer", "AuthorizerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -688,7 +688,7 @@ func (mg *Route) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.ForProvider.AuthorizerID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.AuthorizerIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "Integration", "IntegrationList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "Integration", "IntegrationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -707,7 +707,7 @@ func (mg *Route) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.ForProvider.Target = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TargetRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -726,7 +726,7 @@ func (mg *Route) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.InitProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.APIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "Authorizer", "AuthorizerList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "Authorizer", "AuthorizerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -745,7 +745,7 @@ func (mg *Route) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.InitProvider.AuthorizerID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.AuthorizerIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "Integration", "IntegrationList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "Integration", "IntegrationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -776,7 +776,7 @@ func (mg *RouteResponse) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -814,7 +814,7 @@ func (mg *RouteResponse) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.RouteID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RouteIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/apigatewayv2/v1beta1/zz_integrationresponse_types.go b/apis/apigatewayv2/v1beta1/zz_integrationresponse_types.go index 1f9d162ef8..0c7b0678de 100755 --- a/apis/apigatewayv2/v1beta1/zz_integrationresponse_types.go +++ b/apis/apigatewayv2/v1beta1/zz_integrationresponse_types.go @@ -16,7 +16,7 @@ import ( type IntegrationResponseInitParameters struct { // API identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` // Reference to a API in apigatewayv2 to populate apiId. @@ -31,7 +31,7 @@ type IntegrationResponseInitParameters struct { ContentHandlingStrategy *string `json:"contentHandlingStrategy,omitempty" tf:"content_handling_strategy,omitempty"` // Identifier of the aws_apigatewayv2_integration. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.Integration + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.Integration IntegrationID *string `json:"integrationId,omitempty" tf:"integration_id,omitempty"` // Reference to a Integration in apigatewayv2 to populate integrationId. @@ -81,7 +81,7 @@ type IntegrationResponseObservation struct { type IntegrationResponseParameters struct { // API identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API // +kubebuilder:validation:Optional APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` @@ -98,7 +98,7 @@ type IntegrationResponseParameters struct { ContentHandlingStrategy *string `json:"contentHandlingStrategy,omitempty" tf:"content_handling_strategy,omitempty"` // Identifier of the aws_apigatewayv2_integration. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.Integration + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.Integration // +kubebuilder:validation:Optional IntegrationID *string `json:"integrationId,omitempty" tf:"integration_id,omitempty"` diff --git a/apis/apigatewayv2/v1beta1/zz_model_types.go b/apis/apigatewayv2/v1beta1/zz_model_types.go index 29bce1cb21..40c2d9f653 100755 --- a/apis/apigatewayv2/v1beta1/zz_model_types.go +++ b/apis/apigatewayv2/v1beta1/zz_model_types.go @@ -16,7 +16,7 @@ import ( type ModelInitParameters struct { // API identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` // Reference to a API in apigatewayv2 to populate apiId. @@ -64,7 +64,7 @@ type ModelObservation struct { type ModelParameters struct { // API identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API // +kubebuilder:validation:Optional APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` diff --git a/apis/apigatewayv2/v1beta1/zz_route_types.go b/apis/apigatewayv2/v1beta1/zz_route_types.go index bef07b10a5..335cc74b00 100755 --- a/apis/apigatewayv2/v1beta1/zz_route_types.go +++ b/apis/apigatewayv2/v1beta1/zz_route_types.go @@ -45,7 +45,7 @@ type RequestParameterParameters struct { type RouteInitParameters struct { // API identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` // Reference to a API in apigatewayv2 to populate apiId. @@ -70,7 +70,7 @@ type RouteInitParameters struct { AuthorizationType *string `json:"authorizationType,omitempty" tf:"authorization_type,omitempty"` // Identifier of the aws_apigatewayv2_authorizer resource to be associated with this route. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.Authorizer + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.Authorizer AuthorizerID *string `json:"authorizerId,omitempty" tf:"authorizer_id,omitempty"` // Reference to a Authorizer in apigatewayv2 to populate authorizerId. @@ -101,7 +101,7 @@ type RouteInitParameters struct { RouteResponseSelectionExpression *string `json:"routeResponseSelectionExpression,omitempty" tf:"route_response_selection_expression,omitempty"` // Target for the route, of the form integrations/IntegrationID, where IntegrationID is the identifier of an aws_apigatewayv2_integration resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.Integration + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.Integration // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common/apis.IntegrationIDPrefixed() Target *string `json:"target,omitempty" tf:"target,omitempty"` @@ -164,7 +164,7 @@ type RouteObservation struct { type RouteParameters struct { // API identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API // +kubebuilder:validation:Optional APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` @@ -193,7 +193,7 @@ type RouteParameters struct { AuthorizationType *string `json:"authorizationType,omitempty" tf:"authorization_type,omitempty"` // Identifier of the aws_apigatewayv2_authorizer resource to be associated with this route. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.Authorizer + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.Authorizer // +kubebuilder:validation:Optional AuthorizerID *string `json:"authorizerId,omitempty" tf:"authorizer_id,omitempty"` @@ -236,7 +236,7 @@ type RouteParameters struct { RouteResponseSelectionExpression *string `json:"routeResponseSelectionExpression,omitempty" tf:"route_response_selection_expression,omitempty"` // Target for the route, of the form integrations/IntegrationID, where IntegrationID is the identifier of an aws_apigatewayv2_integration resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.Integration + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.Integration // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common/apis.IntegrationIDPrefixed() // +kubebuilder:validation:Optional Target *string `json:"target,omitempty" tf:"target,omitempty"` diff --git a/apis/apigatewayv2/v1beta1/zz_routeresponse_types.go b/apis/apigatewayv2/v1beta1/zz_routeresponse_types.go index 565bdfee82..c42afc1190 100755 --- a/apis/apigatewayv2/v1beta1/zz_routeresponse_types.go +++ b/apis/apigatewayv2/v1beta1/zz_routeresponse_types.go @@ -16,7 +16,7 @@ import ( type RouteResponseInitParameters struct { // API identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` // Reference to a API in apigatewayv2 to populate apiId. @@ -75,7 +75,7 @@ type RouteResponseObservation struct { type RouteResponseParameters struct { // API identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API // +kubebuilder:validation:Optional APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` diff --git a/apis/apigatewayv2/v1beta2/zz_api_terraformed.go b/apis/apigatewayv2/v1beta2/zz_api_terraformed.go new file mode 100755 index 0000000000..abe6cc891b --- /dev/null +++ b/apis/apigatewayv2/v1beta2/zz_api_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this API +func (mg *API) GetTerraformResourceType() string { + return "aws_apigatewayv2_api" +} + +// GetConnectionDetailsMapping for this API +func (tr *API) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this API +func (tr *API) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this API +func (tr *API) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this API +func (tr *API) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this API +func (tr *API) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this API +func (tr *API) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this API +func (tr *API) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this API +func (tr *API) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this API using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *API) LateInitialize(attrs []byte) (bool, error) { + params := &APIParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *API) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apigatewayv2/v1beta2/zz_api_types.go b/apis/apigatewayv2/v1beta2/zz_api_types.go new file mode 100755 index 0000000000..ac69afcd76 --- /dev/null +++ b/apis/apigatewayv2/v1beta2/zz_api_types.go @@ -0,0 +1,353 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type APIInitParameters struct { + + // An API key selection expression. + // Valid values: $context.authorizer.usageIdentifierKey, $request.header.x-api-key. Defaults to $request.header.x-api-key. + // Applicable for WebSocket APIs. + APIKeySelectionExpression *string `json:"apiKeySelectionExpression,omitempty" tf:"api_key_selection_expression,omitempty"` + + // An OpenAPI specification that defines the set of routes and integrations to create as part of the HTTP APIs. Supported only for HTTP APIs. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // Cross-origin resource sharing (CORS) configuration. Applicable for HTTP APIs. + CorsConfiguration *CorsConfigurationInitParameters `json:"corsConfiguration,omitempty" tf:"cors_configuration,omitempty"` + + // Part of quick create. Specifies any credentials required for the integration. Applicable for HTTP APIs. + CredentialsArn *string `json:"credentialsArn,omitempty" tf:"credentials_arn,omitempty"` + + // Description of the API. Must be less than or equal to 1024 characters in length. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether clients can invoke the API by using the default execute-api endpoint. + // By default, clients can invoke the API with the default {api_id}.execute-api.{region}.amazonaws.com endpoint. + // To require that clients use a custom domain name to invoke the API, disable the default endpoint. + DisableExecuteAPIEndpoint *bool `json:"disableExecuteApiEndpoint,omitempty" tf:"disable_execute_api_endpoint,omitempty"` + + // Whether warnings should return an error while API Gateway is creating or updating the resource using an OpenAPI specification. Defaults to false. Applicable for HTTP APIs. + FailOnWarnings *bool `json:"failOnWarnings,omitempty" tf:"fail_on_warnings,omitempty"` + + // Name of the API. Must be less than or equal to 128 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // API protocol. Valid values: HTTP, WEBSOCKET. + ProtocolType *string `json:"protocolType,omitempty" tf:"protocol_type,omitempty"` + + // Part of quick create. Specifies any route key. Applicable for HTTP APIs. + RouteKey *string `json:"routeKey,omitempty" tf:"route_key,omitempty"` + + // The route selection expression for the API. + // Defaults to $request.method $request.path. + RouteSelectionExpression *string `json:"routeSelectionExpression,omitempty" tf:"route_selection_expression,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Part of quick create. Quick create produces an API with an integration, a default catch-all route, and a default stage which is configured to automatically deploy changes. + // For HTTP integrations, specify a fully qualified URL. For Lambda integrations, specify a function ARN. + // The type of the integration will be HTTP_PROXY or AWS_PROXY, respectively. Applicable for HTTP APIs. + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // Version identifier for the API. Must be between 1 and 64 characters in length. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type APIObservation struct { + + // URI of the API, of the form https://{api-id}.execute-api.{region}.amazonaws.com for HTTP APIs and wss://{api-id}.execute-api.{region}.amazonaws.com for WebSocket APIs. + APIEndpoint *string `json:"apiEndpoint,omitempty" tf:"api_endpoint,omitempty"` + + // An API key selection expression. + // Valid values: $context.authorizer.usageIdentifierKey, $request.header.x-api-key. Defaults to $request.header.x-api-key. + // Applicable for WebSocket APIs. + APIKeySelectionExpression *string `json:"apiKeySelectionExpression,omitempty" tf:"api_key_selection_expression,omitempty"` + + // ARN of the API. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // An OpenAPI specification that defines the set of routes and integrations to create as part of the HTTP APIs. Supported only for HTTP APIs. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // Cross-origin resource sharing (CORS) configuration. Applicable for HTTP APIs. + CorsConfiguration *CorsConfigurationObservation `json:"corsConfiguration,omitempty" tf:"cors_configuration,omitempty"` + + // Part of quick create. Specifies any credentials required for the integration. Applicable for HTTP APIs. + CredentialsArn *string `json:"credentialsArn,omitempty" tf:"credentials_arn,omitempty"` + + // Description of the API. Must be less than or equal to 1024 characters in length. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether clients can invoke the API by using the default execute-api endpoint. + // By default, clients can invoke the API with the default {api_id}.execute-api.{region}.amazonaws.com endpoint. + // To require that clients use a custom domain name to invoke the API, disable the default endpoint. + DisableExecuteAPIEndpoint *bool `json:"disableExecuteApiEndpoint,omitempty" tf:"disable_execute_api_endpoint,omitempty"` + + // ARN prefix to be used in an aws_lambda_permission's source_arn attribute + // or in an aws_iam_policy to authorize access to the @connections API. + // See the Amazon API Gateway Developer Guide for details. + ExecutionArn *string `json:"executionArn,omitempty" tf:"execution_arn,omitempty"` + + // Whether warnings should return an error while API Gateway is creating or updating the resource using an OpenAPI specification. Defaults to false. Applicable for HTTP APIs. + FailOnWarnings *bool `json:"failOnWarnings,omitempty" tf:"fail_on_warnings,omitempty"` + + // API identifier. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the API. Must be less than or equal to 128 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // API protocol. Valid values: HTTP, WEBSOCKET. + ProtocolType *string `json:"protocolType,omitempty" tf:"protocol_type,omitempty"` + + // Part of quick create. Specifies any route key. Applicable for HTTP APIs. + RouteKey *string `json:"routeKey,omitempty" tf:"route_key,omitempty"` + + // The route selection expression for the API. + // Defaults to $request.method $request.path. + RouteSelectionExpression *string `json:"routeSelectionExpression,omitempty" tf:"route_selection_expression,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Part of quick create. Quick create produces an API with an integration, a default catch-all route, and a default stage which is configured to automatically deploy changes. + // For HTTP integrations, specify a fully qualified URL. For Lambda integrations, specify a function ARN. + // The type of the integration will be HTTP_PROXY or AWS_PROXY, respectively. Applicable for HTTP APIs. + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // Version identifier for the API. Must be between 1 and 64 characters in length. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type APIParameters struct { + + // An API key selection expression. + // Valid values: $context.authorizer.usageIdentifierKey, $request.header.x-api-key. Defaults to $request.header.x-api-key. + // Applicable for WebSocket APIs. + // +kubebuilder:validation:Optional + APIKeySelectionExpression *string `json:"apiKeySelectionExpression,omitempty" tf:"api_key_selection_expression,omitempty"` + + // An OpenAPI specification that defines the set of routes and integrations to create as part of the HTTP APIs. Supported only for HTTP APIs. + // +kubebuilder:validation:Optional + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // Cross-origin resource sharing (CORS) configuration. Applicable for HTTP APIs. + // +kubebuilder:validation:Optional + CorsConfiguration *CorsConfigurationParameters `json:"corsConfiguration,omitempty" tf:"cors_configuration,omitempty"` + + // Part of quick create. Specifies any credentials required for the integration. Applicable for HTTP APIs. + // +kubebuilder:validation:Optional + CredentialsArn *string `json:"credentialsArn,omitempty" tf:"credentials_arn,omitempty"` + + // Description of the API. Must be less than or equal to 1024 characters in length. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether clients can invoke the API by using the default execute-api endpoint. + // By default, clients can invoke the API with the default {api_id}.execute-api.{region}.amazonaws.com endpoint. + // To require that clients use a custom domain name to invoke the API, disable the default endpoint. + // +kubebuilder:validation:Optional + DisableExecuteAPIEndpoint *bool `json:"disableExecuteApiEndpoint,omitempty" tf:"disable_execute_api_endpoint,omitempty"` + + // Whether warnings should return an error while API Gateway is creating or updating the resource using an OpenAPI specification. Defaults to false. Applicable for HTTP APIs. + // +kubebuilder:validation:Optional + FailOnWarnings *bool `json:"failOnWarnings,omitempty" tf:"fail_on_warnings,omitempty"` + + // Name of the API. Must be less than or equal to 128 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // API protocol. Valid values: HTTP, WEBSOCKET. + // +kubebuilder:validation:Optional + ProtocolType *string `json:"protocolType,omitempty" tf:"protocol_type,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Part of quick create. Specifies any route key. Applicable for HTTP APIs. + // +kubebuilder:validation:Optional + RouteKey *string `json:"routeKey,omitempty" tf:"route_key,omitempty"` + + // The route selection expression for the API. + // Defaults to $request.method $request.path. + // +kubebuilder:validation:Optional + RouteSelectionExpression *string `json:"routeSelectionExpression,omitempty" tf:"route_selection_expression,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Part of quick create. Quick create produces an API with an integration, a default catch-all route, and a default stage which is configured to automatically deploy changes. + // For HTTP integrations, specify a fully qualified URL. For Lambda integrations, specify a function ARN. + // The type of the integration will be HTTP_PROXY or AWS_PROXY, respectively. Applicable for HTTP APIs. + // +kubebuilder:validation:Optional + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // Version identifier for the API. Must be between 1 and 64 characters in length. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type CorsConfigurationInitParameters struct { + + // Whether credentials are included in the CORS request. + AllowCredentials *bool `json:"allowCredentials,omitempty" tf:"allow_credentials,omitempty"` + + // Set of allowed HTTP headers. + // +listType=set + AllowHeaders []*string `json:"allowHeaders,omitempty" tf:"allow_headers,omitempty"` + + // Set of allowed HTTP methods. + // +listType=set + AllowMethods []*string `json:"allowMethods,omitempty" tf:"allow_methods,omitempty"` + + // Set of allowed origins. + // +listType=set + AllowOrigins []*string `json:"allowOrigins,omitempty" tf:"allow_origins,omitempty"` + + // Set of exposed HTTP headers. + // +listType=set + ExposeHeaders []*string `json:"exposeHeaders,omitempty" tf:"expose_headers,omitempty"` + + // Number of seconds that the browser should cache preflight request results. + MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` +} + +type CorsConfigurationObservation struct { + + // Whether credentials are included in the CORS request. + AllowCredentials *bool `json:"allowCredentials,omitempty" tf:"allow_credentials,omitempty"` + + // Set of allowed HTTP headers. + // +listType=set + AllowHeaders []*string `json:"allowHeaders,omitempty" tf:"allow_headers,omitempty"` + + // Set of allowed HTTP methods. + // +listType=set + AllowMethods []*string `json:"allowMethods,omitempty" tf:"allow_methods,omitempty"` + + // Set of allowed origins. + // +listType=set + AllowOrigins []*string `json:"allowOrigins,omitempty" tf:"allow_origins,omitempty"` + + // Set of exposed HTTP headers. + // +listType=set + ExposeHeaders []*string `json:"exposeHeaders,omitempty" tf:"expose_headers,omitempty"` + + // Number of seconds that the browser should cache preflight request results. + MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` +} + +type CorsConfigurationParameters struct { + + // Whether credentials are included in the CORS request. + // +kubebuilder:validation:Optional + AllowCredentials *bool `json:"allowCredentials,omitempty" tf:"allow_credentials,omitempty"` + + // Set of allowed HTTP headers. + // +kubebuilder:validation:Optional + // +listType=set + AllowHeaders []*string `json:"allowHeaders,omitempty" tf:"allow_headers,omitempty"` + + // Set of allowed HTTP methods. + // +kubebuilder:validation:Optional + // +listType=set + AllowMethods []*string `json:"allowMethods,omitempty" tf:"allow_methods,omitempty"` + + // Set of allowed origins. + // +kubebuilder:validation:Optional + // +listType=set + AllowOrigins []*string `json:"allowOrigins,omitempty" tf:"allow_origins,omitempty"` + + // Set of exposed HTTP headers. + // +kubebuilder:validation:Optional + // +listType=set + ExposeHeaders []*string `json:"exposeHeaders,omitempty" tf:"expose_headers,omitempty"` + + // Number of seconds that the browser should cache preflight request results. + // +kubebuilder:validation:Optional + MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` +} + +// APISpec defines the desired state of API +type APISpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider APIParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider APIInitParameters `json:"initProvider,omitempty"` +} + +// APIStatus defines the observed state of API. +type APIStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider APIObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// API is the Schema for the APIs API. Manages an Amazon API Gateway Version 2 API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type API struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.protocolType) || (has(self.initProvider) && has(self.initProvider.protocolType))",message="spec.forProvider.protocolType is a required parameter" + Spec APISpec `json:"spec"` + Status APIStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// APIList contains a list of APIs +type APIList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []API `json:"items"` +} + +// Repository type metadata. +var ( + API_Kind = "API" + API_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: API_Kind}.String() + API_KindAPIVersion = API_Kind + "." + CRDGroupVersion.String() + API_GroupVersionKind = CRDGroupVersion.WithKind(API_Kind) +) + +func init() { + SchemeBuilder.Register(&API{}, &APIList{}) +} diff --git a/apis/apigatewayv2/v1beta2/zz_authorizer_terraformed.go b/apis/apigatewayv2/v1beta2/zz_authorizer_terraformed.go new file mode 100755 index 0000000000..8e865e3cca --- /dev/null +++ b/apis/apigatewayv2/v1beta2/zz_authorizer_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Authorizer +func (mg *Authorizer) GetTerraformResourceType() string { + return "aws_apigatewayv2_authorizer" +} + +// GetConnectionDetailsMapping for this Authorizer +func (tr *Authorizer) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Authorizer +func (tr *Authorizer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Authorizer +func (tr *Authorizer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Authorizer +func (tr *Authorizer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Authorizer +func (tr *Authorizer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Authorizer +func (tr *Authorizer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Authorizer +func (tr *Authorizer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Authorizer +func (tr *Authorizer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Authorizer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Authorizer) LateInitialize(attrs []byte) (bool, error) { + params := &AuthorizerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Authorizer) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apigatewayv2/v1beta2/zz_authorizer_types.go b/apis/apigatewayv2/v1beta2/zz_authorizer_types.go new file mode 100755 index 0000000000..717794ad22 --- /dev/null +++ b/apis/apigatewayv2/v1beta2/zz_authorizer_types.go @@ -0,0 +1,302 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuthorizerInitParameters struct { + + // API identifier. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // Reference to a API in apigatewayv2 to populate apiId. + // +kubebuilder:validation:Optional + APIIDRef *v1.Reference `json:"apiIdRef,omitempty" tf:"-"` + + // Selector for a API in apigatewayv2 to populate apiId. + // +kubebuilder:validation:Optional + APIIDSelector *v1.Selector `json:"apiIdSelector,omitempty" tf:"-"` + + // Required credentials as an IAM role for API Gateway to invoke the authorizer. + // Supported only for REQUEST authorizers. + AuthorizerCredentialsArn *string `json:"authorizerCredentialsArn,omitempty" tf:"authorizer_credentials_arn,omitempty"` + + // Format of the payload sent to an HTTP API Lambda authorizer. Required for HTTP API Lambda authorizers. + // Valid values: 1.0, 2.0. + AuthorizerPayloadFormatVersion *string `json:"authorizerPayloadFormatVersion,omitempty" tf:"authorizer_payload_format_version,omitempty"` + + // Time to live (TTL) for cached authorizer results, in seconds. If it equals 0, authorization caching is disabled. + // If it is greater than 0, API Gateway caches authorizer responses. The maximum value is 3600, or 1 hour. Defaults to 300. + // Supported only for HTTP API Lambda authorizers. + AuthorizerResultTTLInSeconds *float64 `json:"authorizerResultTtlInSeconds,omitempty" tf:"authorizer_result_ttl_in_seconds,omitempty"` + + // Authorizer type. Valid values: JWT, REQUEST. + // Specify REQUEST for a Lambda function using incoming request parameters. + // For HTTP APIs, specify JWT to use JSON Web Tokens. + AuthorizerType *string `json:"authorizerType,omitempty" tf:"authorizer_type,omitempty"` + + // Authorizer's Uniform Resource Identifier (URI). + // For REQUEST authorizers this must be a well-formed Lambda function URI, such as the invoke_arn attribute of the aws_lambda_function resource. + // Supported only for REQUEST authorizers. Must be between 1 and 2048 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common/apis/lambda.FunctionInvokeARN() + AuthorizerURI *string `json:"authorizerUri,omitempty" tf:"authorizer_uri,omitempty"` + + // Reference to a Function in lambda to populate authorizerUri. + // +kubebuilder:validation:Optional + AuthorizerURIRef *v1.Reference `json:"authorizerUriRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate authorizerUri. + // +kubebuilder:validation:Optional + AuthorizerURISelector *v1.Selector `json:"authorizerUriSelector,omitempty" tf:"-"` + + // Whether a Lambda authorizer returns a response in a simple format. If enabled, the Lambda authorizer can return a boolean value instead of an IAM policy. + // Supported only for HTTP APIs. + EnableSimpleResponses *bool `json:"enableSimpleResponses,omitempty" tf:"enable_simple_responses,omitempty"` + + // Identity sources for which authorization is requested. + // For REQUEST authorizers the value is a list of one or more mapping expressions of the specified request parameters. + // For JWT authorizers the single entry specifies where to extract the JSON Web Token (JWT) from inbound requests. + // +listType=set + IdentitySources []*string `json:"identitySources,omitempty" tf:"identity_sources,omitempty"` + + // Configuration of a JWT authorizer. Required for the JWT authorizer type. + // Supported only for HTTP APIs. + JwtConfiguration *JwtConfigurationInitParameters `json:"jwtConfiguration,omitempty" tf:"jwt_configuration,omitempty"` + + // Name of the authorizer. Must be between 1 and 128 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type AuthorizerObservation struct { + + // API identifier. + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // Required credentials as an IAM role for API Gateway to invoke the authorizer. + // Supported only for REQUEST authorizers. + AuthorizerCredentialsArn *string `json:"authorizerCredentialsArn,omitempty" tf:"authorizer_credentials_arn,omitempty"` + + // Format of the payload sent to an HTTP API Lambda authorizer. Required for HTTP API Lambda authorizers. + // Valid values: 1.0, 2.0. + AuthorizerPayloadFormatVersion *string `json:"authorizerPayloadFormatVersion,omitempty" tf:"authorizer_payload_format_version,omitempty"` + + // Time to live (TTL) for cached authorizer results, in seconds. If it equals 0, authorization caching is disabled. + // If it is greater than 0, API Gateway caches authorizer responses. The maximum value is 3600, or 1 hour. Defaults to 300. + // Supported only for HTTP API Lambda authorizers. + AuthorizerResultTTLInSeconds *float64 `json:"authorizerResultTtlInSeconds,omitempty" tf:"authorizer_result_ttl_in_seconds,omitempty"` + + // Authorizer type. Valid values: JWT, REQUEST. + // Specify REQUEST for a Lambda function using incoming request parameters. + // For HTTP APIs, specify JWT to use JSON Web Tokens. + AuthorizerType *string `json:"authorizerType,omitempty" tf:"authorizer_type,omitempty"` + + // Authorizer's Uniform Resource Identifier (URI). + // For REQUEST authorizers this must be a well-formed Lambda function URI, such as the invoke_arn attribute of the aws_lambda_function resource. + // Supported only for REQUEST authorizers. Must be between 1 and 2048 characters in length. + AuthorizerURI *string `json:"authorizerUri,omitempty" tf:"authorizer_uri,omitempty"` + + // Whether a Lambda authorizer returns a response in a simple format. If enabled, the Lambda authorizer can return a boolean value instead of an IAM policy. + // Supported only for HTTP APIs. + EnableSimpleResponses *bool `json:"enableSimpleResponses,omitempty" tf:"enable_simple_responses,omitempty"` + + // Authorizer identifier. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Identity sources for which authorization is requested. + // For REQUEST authorizers the value is a list of one or more mapping expressions of the specified request parameters. + // For JWT authorizers the single entry specifies where to extract the JSON Web Token (JWT) from inbound requests. + // +listType=set + IdentitySources []*string `json:"identitySources,omitempty" tf:"identity_sources,omitempty"` + + // Configuration of a JWT authorizer. Required for the JWT authorizer type. + // Supported only for HTTP APIs. + JwtConfiguration *JwtConfigurationObservation `json:"jwtConfiguration,omitempty" tf:"jwt_configuration,omitempty"` + + // Name of the authorizer. Must be between 1 and 128 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type AuthorizerParameters struct { + + // API identifier. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API + // +kubebuilder:validation:Optional + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // Reference to a API in apigatewayv2 to populate apiId. + // +kubebuilder:validation:Optional + APIIDRef *v1.Reference `json:"apiIdRef,omitempty" tf:"-"` + + // Selector for a API in apigatewayv2 to populate apiId. + // +kubebuilder:validation:Optional + APIIDSelector *v1.Selector `json:"apiIdSelector,omitempty" tf:"-"` + + // Required credentials as an IAM role for API Gateway to invoke the authorizer. + // Supported only for REQUEST authorizers. + // +kubebuilder:validation:Optional + AuthorizerCredentialsArn *string `json:"authorizerCredentialsArn,omitempty" tf:"authorizer_credentials_arn,omitempty"` + + // Format of the payload sent to an HTTP API Lambda authorizer. Required for HTTP API Lambda authorizers. + // Valid values: 1.0, 2.0. + // +kubebuilder:validation:Optional + AuthorizerPayloadFormatVersion *string `json:"authorizerPayloadFormatVersion,omitempty" tf:"authorizer_payload_format_version,omitempty"` + + // Time to live (TTL) for cached authorizer results, in seconds. If it equals 0, authorization caching is disabled. + // If it is greater than 0, API Gateway caches authorizer responses. The maximum value is 3600, or 1 hour. Defaults to 300. + // Supported only for HTTP API Lambda authorizers. + // +kubebuilder:validation:Optional + AuthorizerResultTTLInSeconds *float64 `json:"authorizerResultTtlInSeconds,omitempty" tf:"authorizer_result_ttl_in_seconds,omitempty"` + + // Authorizer type. Valid values: JWT, REQUEST. + // Specify REQUEST for a Lambda function using incoming request parameters. + // For HTTP APIs, specify JWT to use JSON Web Tokens. + // +kubebuilder:validation:Optional + AuthorizerType *string `json:"authorizerType,omitempty" tf:"authorizer_type,omitempty"` + + // Authorizer's Uniform Resource Identifier (URI). + // For REQUEST authorizers this must be a well-formed Lambda function URI, such as the invoke_arn attribute of the aws_lambda_function resource. + // Supported only for REQUEST authorizers. Must be between 1 and 2048 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common/apis/lambda.FunctionInvokeARN() + // +kubebuilder:validation:Optional + AuthorizerURI *string `json:"authorizerUri,omitempty" tf:"authorizer_uri,omitempty"` + + // Reference to a Function in lambda to populate authorizerUri. + // +kubebuilder:validation:Optional + AuthorizerURIRef *v1.Reference `json:"authorizerUriRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate authorizerUri. + // +kubebuilder:validation:Optional + AuthorizerURISelector *v1.Selector `json:"authorizerUriSelector,omitempty" tf:"-"` + + // Whether a Lambda authorizer returns a response in a simple format. If enabled, the Lambda authorizer can return a boolean value instead of an IAM policy. + // Supported only for HTTP APIs. + // +kubebuilder:validation:Optional + EnableSimpleResponses *bool `json:"enableSimpleResponses,omitempty" tf:"enable_simple_responses,omitempty"` + + // Identity sources for which authorization is requested. + // For REQUEST authorizers the value is a list of one or more mapping expressions of the specified request parameters. + // For JWT authorizers the single entry specifies where to extract the JSON Web Token (JWT) from inbound requests. + // +kubebuilder:validation:Optional + // +listType=set + IdentitySources []*string `json:"identitySources,omitempty" tf:"identity_sources,omitempty"` + + // Configuration of a JWT authorizer. Required for the JWT authorizer type. + // Supported only for HTTP APIs. + // +kubebuilder:validation:Optional + JwtConfiguration *JwtConfigurationParameters `json:"jwtConfiguration,omitempty" tf:"jwt_configuration,omitempty"` + + // Name of the authorizer. Must be between 1 and 128 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type JwtConfigurationInitParameters struct { + + // List of the intended recipients of the JWT. A valid JWT must provide an aud that matches at least one entry in this list. + // +listType=set + Audience []*string `json:"audience,omitempty" tf:"audience,omitempty"` + + // Base domain of the identity provider that issues JSON Web Tokens, such as the endpoint attribute of the aws_cognito_user_pool resource. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` +} + +type JwtConfigurationObservation struct { + + // List of the intended recipients of the JWT. A valid JWT must provide an aud that matches at least one entry in this list. + // +listType=set + Audience []*string `json:"audience,omitempty" tf:"audience,omitempty"` + + // Base domain of the identity provider that issues JSON Web Tokens, such as the endpoint attribute of the aws_cognito_user_pool resource. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` +} + +type JwtConfigurationParameters struct { + + // List of the intended recipients of the JWT. A valid JWT must provide an aud that matches at least one entry in this list. + // +kubebuilder:validation:Optional + // +listType=set + Audience []*string `json:"audience,omitempty" tf:"audience,omitempty"` + + // Base domain of the identity provider that issues JSON Web Tokens, such as the endpoint attribute of the aws_cognito_user_pool resource. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` +} + +// AuthorizerSpec defines the desired state of Authorizer +type AuthorizerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AuthorizerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AuthorizerInitParameters `json:"initProvider,omitempty"` +} + +// AuthorizerStatus defines the observed state of Authorizer. +type AuthorizerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AuthorizerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Authorizer is the Schema for the Authorizers API. Manages an Amazon API Gateway Version 2 authorizer. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Authorizer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.authorizerType) || (has(self.initProvider) && has(self.initProvider.authorizerType))",message="spec.forProvider.authorizerType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec AuthorizerSpec `json:"spec"` + Status AuthorizerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AuthorizerList contains a list of Authorizers +type AuthorizerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Authorizer `json:"items"` +} + +// Repository type metadata. +var ( + Authorizer_Kind = "Authorizer" + Authorizer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Authorizer_Kind}.String() + Authorizer_KindAPIVersion = Authorizer_Kind + "." + CRDGroupVersion.String() + Authorizer_GroupVersionKind = CRDGroupVersion.WithKind(Authorizer_Kind) +) + +func init() { + SchemeBuilder.Register(&Authorizer{}, &AuthorizerList{}) +} diff --git a/apis/apigatewayv2/v1beta2/zz_domainname_terraformed.go b/apis/apigatewayv2/v1beta2/zz_domainname_terraformed.go new file mode 100755 index 0000000000..5847e51cf0 --- /dev/null +++ b/apis/apigatewayv2/v1beta2/zz_domainname_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DomainName +func (mg *DomainName) GetTerraformResourceType() string { + return "aws_apigatewayv2_domain_name" +} + +// GetConnectionDetailsMapping for this DomainName +func (tr *DomainName) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DomainName +func (tr *DomainName) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DomainName +func (tr *DomainName) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DomainName +func (tr *DomainName) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DomainName +func (tr *DomainName) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DomainName +func (tr *DomainName) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DomainName +func (tr *DomainName) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DomainName +func (tr *DomainName) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DomainName using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DomainName) LateInitialize(attrs []byte) (bool, error) { + params := &DomainNameParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DomainName) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apigatewayv2/v1beta2/zz_domainname_types.go b/apis/apigatewayv2/v1beta2/zz_domainname_types.go new file mode 100755 index 0000000000..0c288e4f13 --- /dev/null +++ b/apis/apigatewayv2/v1beta2/zz_domainname_types.go @@ -0,0 +1,239 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DomainNameConfigurationInitParameters struct { + + // ARN of an AWS-managed certificate that will be used by the endpoint for the domain name. AWS Certificate Manager is the only supported source. Use the aws_acm_certificate resource to configure an ACM certificate. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta2.Certificate + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` + + // Reference to a Certificate in acm to populate certificateArn. + // +kubebuilder:validation:Optional + CertificateArnRef *v1.Reference `json:"certificateArnRef,omitempty" tf:"-"` + + // Selector for a Certificate in acm to populate certificateArn. + // +kubebuilder:validation:Optional + CertificateArnSelector *v1.Selector `json:"certificateArnSelector,omitempty" tf:"-"` + + // Endpoint type. Valid values: REGIONAL. + EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"` + + // ARN of the AWS-issued certificate used to validate custom domain ownership (when certificate_arn is issued via an ACM Private CA or mutual_tls_authentication is configured with an ACM-imported certificate.) + OwnershipVerificationCertificateArn *string `json:"ownershipVerificationCertificateArn,omitempty" tf:"ownership_verification_certificate_arn,omitempty"` + + // Transport Layer Security (TLS) version of the security policy for the domain name. Valid values: TLS_1_2. + SecurityPolicy *string `json:"securityPolicy,omitempty" tf:"security_policy,omitempty"` +} + +type DomainNameConfigurationObservation struct { + + // ARN of an AWS-managed certificate that will be used by the endpoint for the domain name. AWS Certificate Manager is the only supported source. Use the aws_acm_certificate resource to configure an ACM certificate. + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` + + // Endpoint type. Valid values: REGIONAL. + EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"` + + // (Computed) Amazon Route 53 Hosted Zone ID of the endpoint. + HostedZoneID *string `json:"hostedZoneId,omitempty" tf:"hosted_zone_id,omitempty"` + + // ARN of the AWS-issued certificate used to validate custom domain ownership (when certificate_arn is issued via an ACM Private CA or mutual_tls_authentication is configured with an ACM-imported certificate.) + OwnershipVerificationCertificateArn *string `json:"ownershipVerificationCertificateArn,omitempty" tf:"ownership_verification_certificate_arn,omitempty"` + + // Transport Layer Security (TLS) version of the security policy for the domain name. Valid values: TLS_1_2. + SecurityPolicy *string `json:"securityPolicy,omitempty" tf:"security_policy,omitempty"` + + // (Computed) Target domain name. + TargetDomainName *string `json:"targetDomainName,omitempty" tf:"target_domain_name,omitempty"` +} + +type DomainNameConfigurationParameters struct { + + // ARN of an AWS-managed certificate that will be used by the endpoint for the domain name. AWS Certificate Manager is the only supported source. Use the aws_acm_certificate resource to configure an ACM certificate. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta2.Certificate + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` + + // Reference to a Certificate in acm to populate certificateArn. + // +kubebuilder:validation:Optional + CertificateArnRef *v1.Reference `json:"certificateArnRef,omitempty" tf:"-"` + + // Selector for a Certificate in acm to populate certificateArn. + // +kubebuilder:validation:Optional + CertificateArnSelector *v1.Selector `json:"certificateArnSelector,omitempty" tf:"-"` + + // Endpoint type. Valid values: REGIONAL. + // +kubebuilder:validation:Optional + EndpointType *string `json:"endpointType" tf:"endpoint_type,omitempty"` + + // ARN of the AWS-issued certificate used to validate custom domain ownership (when certificate_arn is issued via an ACM Private CA or mutual_tls_authentication is configured with an ACM-imported certificate.) + // +kubebuilder:validation:Optional + OwnershipVerificationCertificateArn *string `json:"ownershipVerificationCertificateArn,omitempty" tf:"ownership_verification_certificate_arn,omitempty"` + + // Transport Layer Security (TLS) version of the security policy for the domain name. Valid values: TLS_1_2. + // +kubebuilder:validation:Optional + SecurityPolicy *string `json:"securityPolicy" tf:"security_policy,omitempty"` +} + +type DomainNameInitParameters struct { + + // Domain name configuration. See below. + DomainNameConfiguration *DomainNameConfigurationInitParameters `json:"domainNameConfiguration,omitempty" tf:"domain_name_configuration,omitempty"` + + // Mutual TLS authentication configuration for the domain name. + MutualTLSAuthentication *MutualTLSAuthenticationInitParameters `json:"mutualTlsAuthentication,omitempty" tf:"mutual_tls_authentication,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type DomainNameObservation struct { + + // API mapping selection expression for the domain name. + APIMappingSelectionExpression *string `json:"apiMappingSelectionExpression,omitempty" tf:"api_mapping_selection_expression,omitempty"` + + // ARN of the domain name. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Domain name configuration. See below. + DomainNameConfiguration *DomainNameConfigurationObservation `json:"domainNameConfiguration,omitempty" tf:"domain_name_configuration,omitempty"` + + // Domain name identifier. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Mutual TLS authentication configuration for the domain name. + MutualTLSAuthentication *MutualTLSAuthenticationObservation `json:"mutualTlsAuthentication,omitempty" tf:"mutual_tls_authentication,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type DomainNameParameters struct { + + // Domain name configuration. See below. + // +kubebuilder:validation:Optional + DomainNameConfiguration *DomainNameConfigurationParameters `json:"domainNameConfiguration,omitempty" tf:"domain_name_configuration,omitempty"` + + // Mutual TLS authentication configuration for the domain name. + // +kubebuilder:validation:Optional + MutualTLSAuthentication *MutualTLSAuthenticationParameters `json:"mutualTlsAuthentication,omitempty" tf:"mutual_tls_authentication,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MutualTLSAuthenticationInitParameters struct { + + // Amazon S3 URL that specifies the truststore for mutual TLS authentication, for example, s3://bucket-name/key-name. The truststore can contain certificates from public or private certificate authorities. To update the truststore, upload a new version to S3, and then update your custom domain name to use the new version. + TruststoreURI *string `json:"truststoreUri,omitempty" tf:"truststore_uri,omitempty"` + + // Version of the S3 object that contains the truststore. To specify a version, you must have versioning enabled for the S3 bucket. + TruststoreVersion *string `json:"truststoreVersion,omitempty" tf:"truststore_version,omitempty"` +} + +type MutualTLSAuthenticationObservation struct { + + // Amazon S3 URL that specifies the truststore for mutual TLS authentication, for example, s3://bucket-name/key-name. The truststore can contain certificates from public or private certificate authorities. To update the truststore, upload a new version to S3, and then update your custom domain name to use the new version. + TruststoreURI *string `json:"truststoreUri,omitempty" tf:"truststore_uri,omitempty"` + + // Version of the S3 object that contains the truststore. To specify a version, you must have versioning enabled for the S3 bucket. + TruststoreVersion *string `json:"truststoreVersion,omitempty" tf:"truststore_version,omitempty"` +} + +type MutualTLSAuthenticationParameters struct { + + // Amazon S3 URL that specifies the truststore for mutual TLS authentication, for example, s3://bucket-name/key-name. The truststore can contain certificates from public or private certificate authorities. To update the truststore, upload a new version to S3, and then update your custom domain name to use the new version. + // +kubebuilder:validation:Optional + TruststoreURI *string `json:"truststoreUri" tf:"truststore_uri,omitempty"` + + // Version of the S3 object that contains the truststore. To specify a version, you must have versioning enabled for the S3 bucket. + // +kubebuilder:validation:Optional + TruststoreVersion *string `json:"truststoreVersion,omitempty" tf:"truststore_version,omitempty"` +} + +// DomainNameSpec defines the desired state of DomainName +type DomainNameSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DomainNameParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DomainNameInitParameters `json:"initProvider,omitempty"` +} + +// DomainNameStatus defines the observed state of DomainName. +type DomainNameStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DomainNameObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DomainName is the Schema for the DomainNames API. Manages an Amazon API Gateway Version 2 domain name. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type DomainName struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.domainNameConfiguration) || (has(self.initProvider) && has(self.initProvider.domainNameConfiguration))",message="spec.forProvider.domainNameConfiguration is a required parameter" + Spec DomainNameSpec `json:"spec"` + Status DomainNameStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DomainNameList contains a list of DomainNames +type DomainNameList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DomainName `json:"items"` +} + +// Repository type metadata. +var ( + DomainName_Kind = "DomainName" + DomainName_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DomainName_Kind}.String() + DomainName_KindAPIVersion = DomainName_Kind + "." + CRDGroupVersion.String() + DomainName_GroupVersionKind = CRDGroupVersion.WithKind(DomainName_Kind) +) + +func init() { + SchemeBuilder.Register(&DomainName{}, &DomainNameList{}) +} diff --git a/apis/apigatewayv2/v1beta2/zz_generated.conversion_hubs.go b/apis/apigatewayv2/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..7335541202 --- /dev/null +++ b/apis/apigatewayv2/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *API) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Authorizer) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DomainName) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Integration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Stage) Hub() {} diff --git a/apis/apigatewayv2/v1beta2/zz_generated.deepcopy.go b/apis/apigatewayv2/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..13ad9ac46f --- /dev/null +++ b/apis/apigatewayv2/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,3073 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *API) DeepCopyInto(out *API) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new API. +func (in *API) DeepCopy() *API { + if in == nil { + return nil + } + out := new(API) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *API) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIInitParameters) DeepCopyInto(out *APIInitParameters) { + *out = *in + if in.APIKeySelectionExpression != nil { + in, out := &in.APIKeySelectionExpression, &out.APIKeySelectionExpression + *out = new(string) + **out = **in + } + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.CorsConfiguration != nil { + in, out := &in.CorsConfiguration, &out.CorsConfiguration + *out = new(CorsConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CredentialsArn != nil { + in, out := &in.CredentialsArn, &out.CredentialsArn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisableExecuteAPIEndpoint != nil { + in, out := &in.DisableExecuteAPIEndpoint, &out.DisableExecuteAPIEndpoint + *out = new(bool) + **out = **in + } + if in.FailOnWarnings != nil { + in, out := &in.FailOnWarnings, &out.FailOnWarnings + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProtocolType != nil { + in, out := &in.ProtocolType, &out.ProtocolType + *out = new(string) + **out = **in + } + if in.RouteKey != nil { + in, out := &in.RouteKey, &out.RouteKey + *out = new(string) + **out = **in + } + if in.RouteSelectionExpression != nil { + in, out := &in.RouteSelectionExpression, &out.RouteSelectionExpression + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIInitParameters. +func (in *APIInitParameters) DeepCopy() *APIInitParameters { + if in == nil { + return nil + } + out := new(APIInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIList) DeepCopyInto(out *APIList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]API, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIList. +func (in *APIList) DeepCopy() *APIList { + if in == nil { + return nil + } + out := new(APIList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIObservation) DeepCopyInto(out *APIObservation) { + *out = *in + if in.APIEndpoint != nil { + in, out := &in.APIEndpoint, &out.APIEndpoint + *out = new(string) + **out = **in + } + if in.APIKeySelectionExpression != nil { + in, out := &in.APIKeySelectionExpression, &out.APIKeySelectionExpression + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.CorsConfiguration != nil { + in, out := &in.CorsConfiguration, &out.CorsConfiguration + *out = new(CorsConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CredentialsArn != nil { + in, out := &in.CredentialsArn, &out.CredentialsArn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisableExecuteAPIEndpoint != nil { + in, out := &in.DisableExecuteAPIEndpoint, &out.DisableExecuteAPIEndpoint + *out = new(bool) + **out = **in + } + if in.ExecutionArn != nil { + in, out := &in.ExecutionArn, &out.ExecutionArn + *out = new(string) + **out = **in + } + if in.FailOnWarnings != nil { + in, out := &in.FailOnWarnings, &out.FailOnWarnings + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProtocolType != nil { + in, out := &in.ProtocolType, &out.ProtocolType + *out = new(string) + **out = **in + } + if in.RouteKey != nil { + in, out := &in.RouteKey, &out.RouteKey + *out = new(string) + **out = **in + } + if in.RouteSelectionExpression != nil { + in, out := &in.RouteSelectionExpression, &out.RouteSelectionExpression + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIObservation. +func (in *APIObservation) DeepCopy() *APIObservation { + if in == nil { + return nil + } + out := new(APIObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIParameters) DeepCopyInto(out *APIParameters) { + *out = *in + if in.APIKeySelectionExpression != nil { + in, out := &in.APIKeySelectionExpression, &out.APIKeySelectionExpression + *out = new(string) + **out = **in + } + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.CorsConfiguration != nil { + in, out := &in.CorsConfiguration, &out.CorsConfiguration + *out = new(CorsConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CredentialsArn != nil { + in, out := &in.CredentialsArn, &out.CredentialsArn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisableExecuteAPIEndpoint != nil { + in, out := &in.DisableExecuteAPIEndpoint, &out.DisableExecuteAPIEndpoint + *out = new(bool) + **out = **in + } + if in.FailOnWarnings != nil { + in, out := &in.FailOnWarnings, &out.FailOnWarnings + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProtocolType != nil { + in, out := &in.ProtocolType, &out.ProtocolType + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RouteKey != nil { + in, out := &in.RouteKey, &out.RouteKey + *out = new(string) + **out = **in + } + if in.RouteSelectionExpression != nil { + in, out := &in.RouteSelectionExpression, &out.RouteSelectionExpression + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIParameters. +func (in *APIParameters) DeepCopy() *APIParameters { + if in == nil { + return nil + } + out := new(APIParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APISpec) DeepCopyInto(out *APISpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APISpec. +func (in *APISpec) DeepCopy() *APISpec { + if in == nil { + return nil + } + out := new(APISpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIStatus) DeepCopyInto(out *APIStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIStatus. +func (in *APIStatus) DeepCopy() *APIStatus { + if in == nil { + return nil + } + out := new(APIStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogSettingsInitParameters) DeepCopyInto(out *AccessLogSettingsInitParameters) { + *out = *in + if in.DestinationArn != nil { + in, out := &in.DestinationArn, &out.DestinationArn + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogSettingsInitParameters. +func (in *AccessLogSettingsInitParameters) DeepCopy() *AccessLogSettingsInitParameters { + if in == nil { + return nil + } + out := new(AccessLogSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogSettingsObservation) DeepCopyInto(out *AccessLogSettingsObservation) { + *out = *in + if in.DestinationArn != nil { + in, out := &in.DestinationArn, &out.DestinationArn + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogSettingsObservation. +func (in *AccessLogSettingsObservation) DeepCopy() *AccessLogSettingsObservation { + if in == nil { + return nil + } + out := new(AccessLogSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogSettingsParameters) DeepCopyInto(out *AccessLogSettingsParameters) { + *out = *in + if in.DestinationArn != nil { + in, out := &in.DestinationArn, &out.DestinationArn + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogSettingsParameters. +func (in *AccessLogSettingsParameters) DeepCopy() *AccessLogSettingsParameters { + if in == nil { + return nil + } + out := new(AccessLogSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authorizer) DeepCopyInto(out *Authorizer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authorizer. +func (in *Authorizer) DeepCopy() *Authorizer { + if in == nil { + return nil + } + out := new(Authorizer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Authorizer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizerInitParameters) DeepCopyInto(out *AuthorizerInitParameters) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.APIIDRef != nil { + in, out := &in.APIIDRef, &out.APIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIIDSelector != nil { + in, out := &in.APIIDSelector, &out.APIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AuthorizerCredentialsArn != nil { + in, out := &in.AuthorizerCredentialsArn, &out.AuthorizerCredentialsArn + *out = new(string) + **out = **in + } + if in.AuthorizerPayloadFormatVersion != nil { + in, out := &in.AuthorizerPayloadFormatVersion, &out.AuthorizerPayloadFormatVersion + *out = new(string) + **out = **in + } + if in.AuthorizerResultTTLInSeconds != nil { + in, out := &in.AuthorizerResultTTLInSeconds, &out.AuthorizerResultTTLInSeconds + *out = new(float64) + **out = **in + } + if in.AuthorizerType != nil { + in, out := &in.AuthorizerType, &out.AuthorizerType + *out = new(string) + **out = **in + } + if in.AuthorizerURI != nil { + in, out := &in.AuthorizerURI, &out.AuthorizerURI + *out = new(string) + **out = **in + } + if in.AuthorizerURIRef != nil { + in, out := &in.AuthorizerURIRef, &out.AuthorizerURIRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AuthorizerURISelector != nil { + in, out := &in.AuthorizerURISelector, &out.AuthorizerURISelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EnableSimpleResponses != nil { + in, out := &in.EnableSimpleResponses, &out.EnableSimpleResponses + *out = new(bool) + **out = **in + } + if in.IdentitySources != nil { + in, out := &in.IdentitySources, &out.IdentitySources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtConfiguration != nil { + in, out := &in.JwtConfiguration, &out.JwtConfiguration + *out = new(JwtConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerInitParameters. +func (in *AuthorizerInitParameters) DeepCopy() *AuthorizerInitParameters { + if in == nil { + return nil + } + out := new(AuthorizerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizerList) DeepCopyInto(out *AuthorizerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Authorizer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerList. +func (in *AuthorizerList) DeepCopy() *AuthorizerList { + if in == nil { + return nil + } + out := new(AuthorizerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthorizerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizerObservation) DeepCopyInto(out *AuthorizerObservation) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.AuthorizerCredentialsArn != nil { + in, out := &in.AuthorizerCredentialsArn, &out.AuthorizerCredentialsArn + *out = new(string) + **out = **in + } + if in.AuthorizerPayloadFormatVersion != nil { + in, out := &in.AuthorizerPayloadFormatVersion, &out.AuthorizerPayloadFormatVersion + *out = new(string) + **out = **in + } + if in.AuthorizerResultTTLInSeconds != nil { + in, out := &in.AuthorizerResultTTLInSeconds, &out.AuthorizerResultTTLInSeconds + *out = new(float64) + **out = **in + } + if in.AuthorizerType != nil { + in, out := &in.AuthorizerType, &out.AuthorizerType + *out = new(string) + **out = **in + } + if in.AuthorizerURI != nil { + in, out := &in.AuthorizerURI, &out.AuthorizerURI + *out = new(string) + **out = **in + } + if in.EnableSimpleResponses != nil { + in, out := &in.EnableSimpleResponses, &out.EnableSimpleResponses + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IdentitySources != nil { + in, out := &in.IdentitySources, &out.IdentitySources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtConfiguration != nil { + in, out := &in.JwtConfiguration, &out.JwtConfiguration + *out = new(JwtConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerObservation. +func (in *AuthorizerObservation) DeepCopy() *AuthorizerObservation { + if in == nil { + return nil + } + out := new(AuthorizerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizerParameters) DeepCopyInto(out *AuthorizerParameters) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.APIIDRef != nil { + in, out := &in.APIIDRef, &out.APIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIIDSelector != nil { + in, out := &in.APIIDSelector, &out.APIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AuthorizerCredentialsArn != nil { + in, out := &in.AuthorizerCredentialsArn, &out.AuthorizerCredentialsArn + *out = new(string) + **out = **in + } + if in.AuthorizerPayloadFormatVersion != nil { + in, out := &in.AuthorizerPayloadFormatVersion, &out.AuthorizerPayloadFormatVersion + *out = new(string) + **out = **in + } + if in.AuthorizerResultTTLInSeconds != nil { + in, out := &in.AuthorizerResultTTLInSeconds, &out.AuthorizerResultTTLInSeconds + *out = new(float64) + **out = **in + } + if in.AuthorizerType != nil { + in, out := &in.AuthorizerType, &out.AuthorizerType + *out = new(string) + **out = **in + } + if in.AuthorizerURI != nil { + in, out := &in.AuthorizerURI, &out.AuthorizerURI + *out = new(string) + **out = **in + } + if in.AuthorizerURIRef != nil { + in, out := &in.AuthorizerURIRef, &out.AuthorizerURIRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AuthorizerURISelector != nil { + in, out := &in.AuthorizerURISelector, &out.AuthorizerURISelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EnableSimpleResponses != nil { + in, out := &in.EnableSimpleResponses, &out.EnableSimpleResponses + *out = new(bool) + **out = **in + } + if in.IdentitySources != nil { + in, out := &in.IdentitySources, &out.IdentitySources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtConfiguration != nil { + in, out := &in.JwtConfiguration, &out.JwtConfiguration + *out = new(JwtConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerParameters. +func (in *AuthorizerParameters) DeepCopy() *AuthorizerParameters { + if in == nil { + return nil + } + out := new(AuthorizerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizerSpec) DeepCopyInto(out *AuthorizerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerSpec. +func (in *AuthorizerSpec) DeepCopy() *AuthorizerSpec { + if in == nil { + return nil + } + out := new(AuthorizerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizerStatus) DeepCopyInto(out *AuthorizerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerStatus. +func (in *AuthorizerStatus) DeepCopy() *AuthorizerStatus { + if in == nil { + return nil + } + out := new(AuthorizerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsConfigurationInitParameters) DeepCopyInto(out *CorsConfigurationInitParameters) { + *out = *in + if in.AllowCredentials != nil { + in, out := &in.AllowCredentials, &out.AllowCredentials + *out = new(bool) + **out = **in + } + if in.AllowHeaders != nil { + in, out := &in.AllowHeaders, &out.AllowHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowMethods != nil { + in, out := &in.AllowMethods, &out.AllowMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowOrigins != nil { + in, out := &in.AllowOrigins, &out.AllowOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposeHeaders != nil { + in, out := &in.ExposeHeaders, &out.ExposeHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsConfigurationInitParameters. +func (in *CorsConfigurationInitParameters) DeepCopy() *CorsConfigurationInitParameters { + if in == nil { + return nil + } + out := new(CorsConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsConfigurationObservation) DeepCopyInto(out *CorsConfigurationObservation) { + *out = *in + if in.AllowCredentials != nil { + in, out := &in.AllowCredentials, &out.AllowCredentials + *out = new(bool) + **out = **in + } + if in.AllowHeaders != nil { + in, out := &in.AllowHeaders, &out.AllowHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowMethods != nil { + in, out := &in.AllowMethods, &out.AllowMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowOrigins != nil { + in, out := &in.AllowOrigins, &out.AllowOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposeHeaders != nil { + in, out := &in.ExposeHeaders, &out.ExposeHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsConfigurationObservation. +func (in *CorsConfigurationObservation) DeepCopy() *CorsConfigurationObservation { + if in == nil { + return nil + } + out := new(CorsConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsConfigurationParameters) DeepCopyInto(out *CorsConfigurationParameters) { + *out = *in + if in.AllowCredentials != nil { + in, out := &in.AllowCredentials, &out.AllowCredentials + *out = new(bool) + **out = **in + } + if in.AllowHeaders != nil { + in, out := &in.AllowHeaders, &out.AllowHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowMethods != nil { + in, out := &in.AllowMethods, &out.AllowMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowOrigins != nil { + in, out := &in.AllowOrigins, &out.AllowOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposeHeaders != nil { + in, out := &in.ExposeHeaders, &out.ExposeHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsConfigurationParameters. +func (in *CorsConfigurationParameters) DeepCopy() *CorsConfigurationParameters { + if in == nil { + return nil + } + out := new(CorsConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultRouteSettingsInitParameters) DeepCopyInto(out *DefaultRouteSettingsInitParameters) { + *out = *in + if in.DataTraceEnabled != nil { + in, out := &in.DataTraceEnabled, &out.DataTraceEnabled + *out = new(bool) + **out = **in + } + if in.DetailedMetricsEnabled != nil { + in, out := &in.DetailedMetricsEnabled, &out.DetailedMetricsEnabled + *out = new(bool) + **out = **in + } + if in.LoggingLevel != nil { + in, out := &in.LoggingLevel, &out.LoggingLevel + *out = new(string) + **out = **in + } + if in.ThrottlingBurstLimit != nil { + in, out := &in.ThrottlingBurstLimit, &out.ThrottlingBurstLimit + *out = new(float64) + **out = **in + } + if in.ThrottlingRateLimit != nil { + in, out := &in.ThrottlingRateLimit, &out.ThrottlingRateLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultRouteSettingsInitParameters. +func (in *DefaultRouteSettingsInitParameters) DeepCopy() *DefaultRouteSettingsInitParameters { + if in == nil { + return nil + } + out := new(DefaultRouteSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultRouteSettingsObservation) DeepCopyInto(out *DefaultRouteSettingsObservation) { + *out = *in + if in.DataTraceEnabled != nil { + in, out := &in.DataTraceEnabled, &out.DataTraceEnabled + *out = new(bool) + **out = **in + } + if in.DetailedMetricsEnabled != nil { + in, out := &in.DetailedMetricsEnabled, &out.DetailedMetricsEnabled + *out = new(bool) + **out = **in + } + if in.LoggingLevel != nil { + in, out := &in.LoggingLevel, &out.LoggingLevel + *out = new(string) + **out = **in + } + if in.ThrottlingBurstLimit != nil { + in, out := &in.ThrottlingBurstLimit, &out.ThrottlingBurstLimit + *out = new(float64) + **out = **in + } + if in.ThrottlingRateLimit != nil { + in, out := &in.ThrottlingRateLimit, &out.ThrottlingRateLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultRouteSettingsObservation. +func (in *DefaultRouteSettingsObservation) DeepCopy() *DefaultRouteSettingsObservation { + if in == nil { + return nil + } + out := new(DefaultRouteSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultRouteSettingsParameters) DeepCopyInto(out *DefaultRouteSettingsParameters) { + *out = *in + if in.DataTraceEnabled != nil { + in, out := &in.DataTraceEnabled, &out.DataTraceEnabled + *out = new(bool) + **out = **in + } + if in.DetailedMetricsEnabled != nil { + in, out := &in.DetailedMetricsEnabled, &out.DetailedMetricsEnabled + *out = new(bool) + **out = **in + } + if in.LoggingLevel != nil { + in, out := &in.LoggingLevel, &out.LoggingLevel + *out = new(string) + **out = **in + } + if in.ThrottlingBurstLimit != nil { + in, out := &in.ThrottlingBurstLimit, &out.ThrottlingBurstLimit + *out = new(float64) + **out = **in + } + if in.ThrottlingRateLimit != nil { + in, out := &in.ThrottlingRateLimit, &out.ThrottlingRateLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultRouteSettingsParameters. +func (in *DefaultRouteSettingsParameters) DeepCopy() *DefaultRouteSettingsParameters { + if in == nil { + return nil + } + out := new(DefaultRouteSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainName) DeepCopyInto(out *DomainName) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainName. +func (in *DomainName) DeepCopy() *DomainName { + if in == nil { + return nil + } + out := new(DomainName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DomainName) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainNameConfigurationInitParameters) DeepCopyInto(out *DomainNameConfigurationInitParameters) { + *out = *in + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } + if in.CertificateArnRef != nil { + in, out := &in.CertificateArnRef, &out.CertificateArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CertificateArnSelector != nil { + in, out := &in.CertificateArnSelector, &out.CertificateArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.OwnershipVerificationCertificateArn != nil { + in, out := &in.OwnershipVerificationCertificateArn, &out.OwnershipVerificationCertificateArn + *out = new(string) + **out = **in + } + if in.SecurityPolicy != nil { + in, out := &in.SecurityPolicy, &out.SecurityPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainNameConfigurationInitParameters. +func (in *DomainNameConfigurationInitParameters) DeepCopy() *DomainNameConfigurationInitParameters { + if in == nil { + return nil + } + out := new(DomainNameConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainNameConfigurationObservation) DeepCopyInto(out *DomainNameConfigurationObservation) { + *out = *in + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.HostedZoneID != nil { + in, out := &in.HostedZoneID, &out.HostedZoneID + *out = new(string) + **out = **in + } + if in.OwnershipVerificationCertificateArn != nil { + in, out := &in.OwnershipVerificationCertificateArn, &out.OwnershipVerificationCertificateArn + *out = new(string) + **out = **in + } + if in.SecurityPolicy != nil { + in, out := &in.SecurityPolicy, &out.SecurityPolicy + *out = new(string) + **out = **in + } + if in.TargetDomainName != nil { + in, out := &in.TargetDomainName, &out.TargetDomainName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainNameConfigurationObservation. +func (in *DomainNameConfigurationObservation) DeepCopy() *DomainNameConfigurationObservation { + if in == nil { + return nil + } + out := new(DomainNameConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainNameConfigurationParameters) DeepCopyInto(out *DomainNameConfigurationParameters) { + *out = *in + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } + if in.CertificateArnRef != nil { + in, out := &in.CertificateArnRef, &out.CertificateArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CertificateArnSelector != nil { + in, out := &in.CertificateArnSelector, &out.CertificateArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.OwnershipVerificationCertificateArn != nil { + in, out := &in.OwnershipVerificationCertificateArn, &out.OwnershipVerificationCertificateArn + *out = new(string) + **out = **in + } + if in.SecurityPolicy != nil { + in, out := &in.SecurityPolicy, &out.SecurityPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainNameConfigurationParameters. +func (in *DomainNameConfigurationParameters) DeepCopy() *DomainNameConfigurationParameters { + if in == nil { + return nil + } + out := new(DomainNameConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainNameInitParameters) DeepCopyInto(out *DomainNameInitParameters) { + *out = *in + if in.DomainNameConfiguration != nil { + in, out := &in.DomainNameConfiguration, &out.DomainNameConfiguration + *out = new(DomainNameConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MutualTLSAuthentication != nil { + in, out := &in.MutualTLSAuthentication, &out.MutualTLSAuthentication + *out = new(MutualTLSAuthenticationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainNameInitParameters. +func (in *DomainNameInitParameters) DeepCopy() *DomainNameInitParameters { + if in == nil { + return nil + } + out := new(DomainNameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainNameList) DeepCopyInto(out *DomainNameList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DomainName, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainNameList. +func (in *DomainNameList) DeepCopy() *DomainNameList { + if in == nil { + return nil + } + out := new(DomainNameList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DomainNameList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainNameObservation) DeepCopyInto(out *DomainNameObservation) { + *out = *in + if in.APIMappingSelectionExpression != nil { + in, out := &in.APIMappingSelectionExpression, &out.APIMappingSelectionExpression + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DomainNameConfiguration != nil { + in, out := &in.DomainNameConfiguration, &out.DomainNameConfiguration + *out = new(DomainNameConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MutualTLSAuthentication != nil { + in, out := &in.MutualTLSAuthentication, &out.MutualTLSAuthentication + *out = new(MutualTLSAuthenticationObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainNameObservation. +func (in *DomainNameObservation) DeepCopy() *DomainNameObservation { + if in == nil { + return nil + } + out := new(DomainNameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainNameParameters) DeepCopyInto(out *DomainNameParameters) { + *out = *in + if in.DomainNameConfiguration != nil { + in, out := &in.DomainNameConfiguration, &out.DomainNameConfiguration + *out = new(DomainNameConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.MutualTLSAuthentication != nil { + in, out := &in.MutualTLSAuthentication, &out.MutualTLSAuthentication + *out = new(MutualTLSAuthenticationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainNameParameters. +func (in *DomainNameParameters) DeepCopy() *DomainNameParameters { + if in == nil { + return nil + } + out := new(DomainNameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainNameSpec) DeepCopyInto(out *DomainNameSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainNameSpec. +func (in *DomainNameSpec) DeepCopy() *DomainNameSpec { + if in == nil { + return nil + } + out := new(DomainNameSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainNameStatus) DeepCopyInto(out *DomainNameStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainNameStatus. +func (in *DomainNameStatus) DeepCopy() *DomainNameStatus { + if in == nil { + return nil + } + out := new(DomainNameStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Integration) DeepCopyInto(out *Integration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Integration. +func (in *Integration) DeepCopy() *Integration { + if in == nil { + return nil + } + out := new(Integration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Integration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationInitParameters) DeepCopyInto(out *IntegrationInitParameters) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.APIIDRef != nil { + in, out := &in.APIIDRef, &out.APIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIIDSelector != nil { + in, out := &in.APIIDSelector, &out.APIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ConnectionID != nil { + in, out := &in.ConnectionID, &out.ConnectionID + *out = new(string) + **out = **in + } + if in.ConnectionIDRef != nil { + in, out := &in.ConnectionIDRef, &out.ConnectionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConnectionIDSelector != nil { + in, out := &in.ConnectionIDSelector, &out.ConnectionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ConnectionType != nil { + in, out := &in.ConnectionType, &out.ConnectionType + *out = new(string) + **out = **in + } + if in.ContentHandlingStrategy != nil { + in, out := &in.ContentHandlingStrategy, &out.ContentHandlingStrategy + *out = new(string) + **out = **in + } + if in.CredentialsArn != nil { + in, out := &in.CredentialsArn, &out.CredentialsArn + *out = new(string) + **out = **in + } + if in.CredentialsArnRef != nil { + in, out := &in.CredentialsArnRef, &out.CredentialsArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CredentialsArnSelector != nil { + in, out := &in.CredentialsArnSelector, &out.CredentialsArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationMethod != nil { + in, out := &in.IntegrationMethod, &out.IntegrationMethod + *out = new(string) + **out = **in + } + if in.IntegrationSubtype != nil { + in, out := &in.IntegrationSubtype, &out.IntegrationSubtype + *out = new(string) + **out = **in + } + if in.IntegrationType != nil { + in, out := &in.IntegrationType, &out.IntegrationType + *out = new(string) + **out = **in + } + if in.IntegrationURI != nil { + in, out := &in.IntegrationURI, &out.IntegrationURI + *out = new(string) + **out = **in + } + if in.IntegrationURIRef != nil { + in, out := &in.IntegrationURIRef, &out.IntegrationURIRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IntegrationURISelector != nil { + in, out := &in.IntegrationURISelector, &out.IntegrationURISelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PassthroughBehavior != nil { + in, out := &in.PassthroughBehavior, &out.PassthroughBehavior + *out = new(string) + **out = **in + } + if in.PayloadFormatVersion != nil { + in, out := &in.PayloadFormatVersion, &out.PayloadFormatVersion + *out = new(string) + **out = **in + } + if in.RequestParameters != nil { + in, out := &in.RequestParameters, &out.RequestParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RequestTemplates != nil { + in, out := &in.RequestTemplates, &out.RequestTemplates + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResponseParameters != nil { + in, out := &in.ResponseParameters, &out.ResponseParameters + *out = make([]ResponseParametersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TemplateSelectionExpression != nil { + in, out := &in.TemplateSelectionExpression, &out.TemplateSelectionExpression + *out = new(string) + **out = **in + } + if in.TimeoutMilliseconds != nil { + in, out := &in.TimeoutMilliseconds, &out.TimeoutMilliseconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationInitParameters. +func (in *IntegrationInitParameters) DeepCopy() *IntegrationInitParameters { + if in == nil { + return nil + } + out := new(IntegrationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationList) DeepCopyInto(out *IntegrationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Integration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationList. +func (in *IntegrationList) DeepCopy() *IntegrationList { + if in == nil { + return nil + } + out := new(IntegrationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IntegrationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationObservation) DeepCopyInto(out *IntegrationObservation) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.ConnectionID != nil { + in, out := &in.ConnectionID, &out.ConnectionID + *out = new(string) + **out = **in + } + if in.ConnectionType != nil { + in, out := &in.ConnectionType, &out.ConnectionType + *out = new(string) + **out = **in + } + if in.ContentHandlingStrategy != nil { + in, out := &in.ContentHandlingStrategy, &out.ContentHandlingStrategy + *out = new(string) + **out = **in + } + if in.CredentialsArn != nil { + in, out := &in.CredentialsArn, &out.CredentialsArn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IntegrationMethod != nil { + in, out := &in.IntegrationMethod, &out.IntegrationMethod + *out = new(string) + **out = **in + } + if in.IntegrationResponseSelectionExpression != nil { + in, out := &in.IntegrationResponseSelectionExpression, &out.IntegrationResponseSelectionExpression + *out = new(string) + **out = **in + } + if in.IntegrationSubtype != nil { + in, out := &in.IntegrationSubtype, &out.IntegrationSubtype + *out = new(string) + **out = **in + } + if in.IntegrationType != nil { + in, out := &in.IntegrationType, &out.IntegrationType + *out = new(string) + **out = **in + } + if in.IntegrationURI != nil { + in, out := &in.IntegrationURI, &out.IntegrationURI + *out = new(string) + **out = **in + } + if in.PassthroughBehavior != nil { + in, out := &in.PassthroughBehavior, &out.PassthroughBehavior + *out = new(string) + **out = **in + } + if in.PayloadFormatVersion != nil { + in, out := &in.PayloadFormatVersion, &out.PayloadFormatVersion + *out = new(string) + **out = **in + } + if in.RequestParameters != nil { + in, out := &in.RequestParameters, &out.RequestParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RequestTemplates != nil { + in, out := &in.RequestTemplates, &out.RequestTemplates + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResponseParameters != nil { + in, out := &in.ResponseParameters, &out.ResponseParameters + *out = make([]ResponseParametersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.TemplateSelectionExpression != nil { + in, out := &in.TemplateSelectionExpression, &out.TemplateSelectionExpression + *out = new(string) + **out = **in + } + if in.TimeoutMilliseconds != nil { + in, out := &in.TimeoutMilliseconds, &out.TimeoutMilliseconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationObservation. +func (in *IntegrationObservation) DeepCopy() *IntegrationObservation { + if in == nil { + return nil + } + out := new(IntegrationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationParameters) DeepCopyInto(out *IntegrationParameters) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.APIIDRef != nil { + in, out := &in.APIIDRef, &out.APIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIIDSelector != nil { + in, out := &in.APIIDSelector, &out.APIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ConnectionID != nil { + in, out := &in.ConnectionID, &out.ConnectionID + *out = new(string) + **out = **in + } + if in.ConnectionIDRef != nil { + in, out := &in.ConnectionIDRef, &out.ConnectionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConnectionIDSelector != nil { + in, out := &in.ConnectionIDSelector, &out.ConnectionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ConnectionType != nil { + in, out := &in.ConnectionType, &out.ConnectionType + *out = new(string) + **out = **in + } + if in.ContentHandlingStrategy != nil { + in, out := &in.ContentHandlingStrategy, &out.ContentHandlingStrategy + *out = new(string) + **out = **in + } + if in.CredentialsArn != nil { + in, out := &in.CredentialsArn, &out.CredentialsArn + *out = new(string) + **out = **in + } + if in.CredentialsArnRef != nil { + in, out := &in.CredentialsArnRef, &out.CredentialsArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CredentialsArnSelector != nil { + in, out := &in.CredentialsArnSelector, &out.CredentialsArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationMethod != nil { + in, out := &in.IntegrationMethod, &out.IntegrationMethod + *out = new(string) + **out = **in + } + if in.IntegrationSubtype != nil { + in, out := &in.IntegrationSubtype, &out.IntegrationSubtype + *out = new(string) + **out = **in + } + if in.IntegrationType != nil { + in, out := &in.IntegrationType, &out.IntegrationType + *out = new(string) + **out = **in + } + if in.IntegrationURI != nil { + in, out := &in.IntegrationURI, &out.IntegrationURI + *out = new(string) + **out = **in + } + if in.IntegrationURIRef != nil { + in, out := &in.IntegrationURIRef, &out.IntegrationURIRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IntegrationURISelector != nil { + in, out := &in.IntegrationURISelector, &out.IntegrationURISelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PassthroughBehavior != nil { + in, out := &in.PassthroughBehavior, &out.PassthroughBehavior + *out = new(string) + **out = **in + } + if in.PayloadFormatVersion != nil { + in, out := &in.PayloadFormatVersion, &out.PayloadFormatVersion + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RequestParameters != nil { + in, out := &in.RequestParameters, &out.RequestParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RequestTemplates != nil { + in, out := &in.RequestTemplates, &out.RequestTemplates + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResponseParameters != nil { + in, out := &in.ResponseParameters, &out.ResponseParameters + *out = make([]ResponseParametersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.TemplateSelectionExpression != nil { + in, out := &in.TemplateSelectionExpression, &out.TemplateSelectionExpression + *out = new(string) + **out = **in + } + if in.TimeoutMilliseconds != nil { + in, out := &in.TimeoutMilliseconds, &out.TimeoutMilliseconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationParameters. +func (in *IntegrationParameters) DeepCopy() *IntegrationParameters { + if in == nil { + return nil + } + out := new(IntegrationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationSpec) DeepCopyInto(out *IntegrationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSpec. +func (in *IntegrationSpec) DeepCopy() *IntegrationSpec { + if in == nil { + return nil + } + out := new(IntegrationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationStatus) DeepCopyInto(out *IntegrationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationStatus. +func (in *IntegrationStatus) DeepCopy() *IntegrationStatus { + if in == nil { + return nil + } + out := new(IntegrationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JwtConfigurationInitParameters) DeepCopyInto(out *JwtConfigurationInitParameters) { + *out = *in + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JwtConfigurationInitParameters. +func (in *JwtConfigurationInitParameters) DeepCopy() *JwtConfigurationInitParameters { + if in == nil { + return nil + } + out := new(JwtConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JwtConfigurationObservation) DeepCopyInto(out *JwtConfigurationObservation) { + *out = *in + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JwtConfigurationObservation. +func (in *JwtConfigurationObservation) DeepCopy() *JwtConfigurationObservation { + if in == nil { + return nil + } + out := new(JwtConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JwtConfigurationParameters) DeepCopyInto(out *JwtConfigurationParameters) { + *out = *in + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JwtConfigurationParameters. +func (in *JwtConfigurationParameters) DeepCopy() *JwtConfigurationParameters { + if in == nil { + return nil + } + out := new(JwtConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutualTLSAuthenticationInitParameters) DeepCopyInto(out *MutualTLSAuthenticationInitParameters) { + *out = *in + if in.TruststoreURI != nil { + in, out := &in.TruststoreURI, &out.TruststoreURI + *out = new(string) + **out = **in + } + if in.TruststoreVersion != nil { + in, out := &in.TruststoreVersion, &out.TruststoreVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutualTLSAuthenticationInitParameters. +func (in *MutualTLSAuthenticationInitParameters) DeepCopy() *MutualTLSAuthenticationInitParameters { + if in == nil { + return nil + } + out := new(MutualTLSAuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutualTLSAuthenticationObservation) DeepCopyInto(out *MutualTLSAuthenticationObservation) { + *out = *in + if in.TruststoreURI != nil { + in, out := &in.TruststoreURI, &out.TruststoreURI + *out = new(string) + **out = **in + } + if in.TruststoreVersion != nil { + in, out := &in.TruststoreVersion, &out.TruststoreVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutualTLSAuthenticationObservation. +func (in *MutualTLSAuthenticationObservation) DeepCopy() *MutualTLSAuthenticationObservation { + if in == nil { + return nil + } + out := new(MutualTLSAuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutualTLSAuthenticationParameters) DeepCopyInto(out *MutualTLSAuthenticationParameters) { + *out = *in + if in.TruststoreURI != nil { + in, out := &in.TruststoreURI, &out.TruststoreURI + *out = new(string) + **out = **in + } + if in.TruststoreVersion != nil { + in, out := &in.TruststoreVersion, &out.TruststoreVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutualTLSAuthenticationParameters. +func (in *MutualTLSAuthenticationParameters) DeepCopy() *MutualTLSAuthenticationParameters { + if in == nil { + return nil + } + out := new(MutualTLSAuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseParametersInitParameters) DeepCopyInto(out *ResponseParametersInitParameters) { + *out = *in + if in.Mappings != nil { + in, out := &in.Mappings, &out.Mappings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseParametersInitParameters. +func (in *ResponseParametersInitParameters) DeepCopy() *ResponseParametersInitParameters { + if in == nil { + return nil + } + out := new(ResponseParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseParametersObservation) DeepCopyInto(out *ResponseParametersObservation) { + *out = *in + if in.Mappings != nil { + in, out := &in.Mappings, &out.Mappings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseParametersObservation. +func (in *ResponseParametersObservation) DeepCopy() *ResponseParametersObservation { + if in == nil { + return nil + } + out := new(ResponseParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseParametersParameters) DeepCopyInto(out *ResponseParametersParameters) { + *out = *in + if in.Mappings != nil { + in, out := &in.Mappings, &out.Mappings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseParametersParameters. +func (in *ResponseParametersParameters) DeepCopy() *ResponseParametersParameters { + if in == nil { + return nil + } + out := new(ResponseParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteSettingsInitParameters) DeepCopyInto(out *RouteSettingsInitParameters) { + *out = *in + if in.DataTraceEnabled != nil { + in, out := &in.DataTraceEnabled, &out.DataTraceEnabled + *out = new(bool) + **out = **in + } + if in.DetailedMetricsEnabled != nil { + in, out := &in.DetailedMetricsEnabled, &out.DetailedMetricsEnabled + *out = new(bool) + **out = **in + } + if in.LoggingLevel != nil { + in, out := &in.LoggingLevel, &out.LoggingLevel + *out = new(string) + **out = **in + } + if in.RouteKey != nil { + in, out := &in.RouteKey, &out.RouteKey + *out = new(string) + **out = **in + } + if in.ThrottlingBurstLimit != nil { + in, out := &in.ThrottlingBurstLimit, &out.ThrottlingBurstLimit + *out = new(float64) + **out = **in + } + if in.ThrottlingRateLimit != nil { + in, out := &in.ThrottlingRateLimit, &out.ThrottlingRateLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteSettingsInitParameters. +func (in *RouteSettingsInitParameters) DeepCopy() *RouteSettingsInitParameters { + if in == nil { + return nil + } + out := new(RouteSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteSettingsObservation) DeepCopyInto(out *RouteSettingsObservation) { + *out = *in + if in.DataTraceEnabled != nil { + in, out := &in.DataTraceEnabled, &out.DataTraceEnabled + *out = new(bool) + **out = **in + } + if in.DetailedMetricsEnabled != nil { + in, out := &in.DetailedMetricsEnabled, &out.DetailedMetricsEnabled + *out = new(bool) + **out = **in + } + if in.LoggingLevel != nil { + in, out := &in.LoggingLevel, &out.LoggingLevel + *out = new(string) + **out = **in + } + if in.RouteKey != nil { + in, out := &in.RouteKey, &out.RouteKey + *out = new(string) + **out = **in + } + if in.ThrottlingBurstLimit != nil { + in, out := &in.ThrottlingBurstLimit, &out.ThrottlingBurstLimit + *out = new(float64) + **out = **in + } + if in.ThrottlingRateLimit != nil { + in, out := &in.ThrottlingRateLimit, &out.ThrottlingRateLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteSettingsObservation. +func (in *RouteSettingsObservation) DeepCopy() *RouteSettingsObservation { + if in == nil { + return nil + } + out := new(RouteSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteSettingsParameters) DeepCopyInto(out *RouteSettingsParameters) { + *out = *in + if in.DataTraceEnabled != nil { + in, out := &in.DataTraceEnabled, &out.DataTraceEnabled + *out = new(bool) + **out = **in + } + if in.DetailedMetricsEnabled != nil { + in, out := &in.DetailedMetricsEnabled, &out.DetailedMetricsEnabled + *out = new(bool) + **out = **in + } + if in.LoggingLevel != nil { + in, out := &in.LoggingLevel, &out.LoggingLevel + *out = new(string) + **out = **in + } + if in.RouteKey != nil { + in, out := &in.RouteKey, &out.RouteKey + *out = new(string) + **out = **in + } + if in.ThrottlingBurstLimit != nil { + in, out := &in.ThrottlingBurstLimit, &out.ThrottlingBurstLimit + *out = new(float64) + **out = **in + } + if in.ThrottlingRateLimit != nil { + in, out := &in.ThrottlingRateLimit, &out.ThrottlingRateLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteSettingsParameters. +func (in *RouteSettingsParameters) DeepCopy() *RouteSettingsParameters { + if in == nil { + return nil + } + out := new(RouteSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Stage) DeepCopyInto(out *Stage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Stage. +func (in *Stage) DeepCopy() *Stage { + if in == nil { + return nil + } + out := new(Stage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Stage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageInitParameters) DeepCopyInto(out *StageInitParameters) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.APIIDRef != nil { + in, out := &in.APIIDRef, &out.APIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIIDSelector != nil { + in, out := &in.APIIDSelector, &out.APIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AccessLogSettings != nil { + in, out := &in.AccessLogSettings, &out.AccessLogSettings + *out = new(AccessLogSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoDeploy != nil { + in, out := &in.AutoDeploy, &out.AutoDeploy + *out = new(bool) + **out = **in + } + if in.ClientCertificateID != nil { + in, out := &in.ClientCertificateID, &out.ClientCertificateID + *out = new(string) + **out = **in + } + if in.DefaultRouteSettings != nil { + in, out := &in.DefaultRouteSettings, &out.DefaultRouteSettings + *out = new(DefaultRouteSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DeploymentID != nil { + in, out := &in.DeploymentID, &out.DeploymentID + *out = new(string) + **out = **in + } + if in.DeploymentIDRef != nil { + in, out := &in.DeploymentIDRef, &out.DeploymentIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DeploymentIDSelector != nil { + in, out := &in.DeploymentIDSelector, &out.DeploymentIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.RouteSettings != nil { + in, out := &in.RouteSettings, &out.RouteSettings + *out = make([]RouteSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StageVariables != nil { + in, out := &in.StageVariables, &out.StageVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageInitParameters. +func (in *StageInitParameters) DeepCopy() *StageInitParameters { + if in == nil { + return nil + } + out := new(StageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageList) DeepCopyInto(out *StageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Stage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageList. +func (in *StageList) DeepCopy() *StageList { + if in == nil { + return nil + } + out := new(StageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageObservation) DeepCopyInto(out *StageObservation) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.AccessLogSettings != nil { + in, out := &in.AccessLogSettings, &out.AccessLogSettings + *out = new(AccessLogSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoDeploy != nil { + in, out := &in.AutoDeploy, &out.AutoDeploy + *out = new(bool) + **out = **in + } + if in.ClientCertificateID != nil { + in, out := &in.ClientCertificateID, &out.ClientCertificateID + *out = new(string) + **out = **in + } + if in.DefaultRouteSettings != nil { + in, out := &in.DefaultRouteSettings, &out.DefaultRouteSettings + *out = new(DefaultRouteSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.DeploymentID != nil { + in, out := &in.DeploymentID, &out.DeploymentID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionArn != nil { + in, out := &in.ExecutionArn, &out.ExecutionArn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InvokeURL != nil { + in, out := &in.InvokeURL, &out.InvokeURL + *out = new(string) + **out = **in + } + if in.RouteSettings != nil { + in, out := &in.RouteSettings, &out.RouteSettings + *out = make([]RouteSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StageVariables != nil { + in, out := &in.StageVariables, &out.StageVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageObservation. +func (in *StageObservation) DeepCopy() *StageObservation { + if in == nil { + return nil + } + out := new(StageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageParameters) DeepCopyInto(out *StageParameters) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.APIIDRef != nil { + in, out := &in.APIIDRef, &out.APIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIIDSelector != nil { + in, out := &in.APIIDSelector, &out.APIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AccessLogSettings != nil { + in, out := &in.AccessLogSettings, &out.AccessLogSettings + *out = new(AccessLogSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoDeploy != nil { + in, out := &in.AutoDeploy, &out.AutoDeploy + *out = new(bool) + **out = **in + } + if in.ClientCertificateID != nil { + in, out := &in.ClientCertificateID, &out.ClientCertificateID + *out = new(string) + **out = **in + } + if in.DefaultRouteSettings != nil { + in, out := &in.DefaultRouteSettings, &out.DefaultRouteSettings + *out = new(DefaultRouteSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.DeploymentID != nil { + in, out := &in.DeploymentID, &out.DeploymentID + *out = new(string) + **out = **in + } + if in.DeploymentIDRef != nil { + in, out := &in.DeploymentIDRef, &out.DeploymentIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DeploymentIDSelector != nil { + in, out := &in.DeploymentIDSelector, &out.DeploymentIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RouteSettings != nil { + in, out := &in.RouteSettings, &out.RouteSettings + *out = make([]RouteSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StageVariables != nil { + in, out := &in.StageVariables, &out.StageVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageParameters. +func (in *StageParameters) DeepCopy() *StageParameters { + if in == nil { + return nil + } + out := new(StageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageSpec) DeepCopyInto(out *StageSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageSpec. +func (in *StageSpec) DeepCopy() *StageSpec { + if in == nil { + return nil + } + out := new(StageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageStatus) DeepCopyInto(out *StageStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageStatus. +func (in *StageStatus) DeepCopy() *StageStatus { + if in == nil { + return nil + } + out := new(StageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfigInitParameters) DeepCopyInto(out *TLSConfigInitParameters) { + *out = *in + if in.ServerNameToVerify != nil { + in, out := &in.ServerNameToVerify, &out.ServerNameToVerify + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfigInitParameters. +func (in *TLSConfigInitParameters) DeepCopy() *TLSConfigInitParameters { + if in == nil { + return nil + } + out := new(TLSConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfigObservation) DeepCopyInto(out *TLSConfigObservation) { + *out = *in + if in.ServerNameToVerify != nil { + in, out := &in.ServerNameToVerify, &out.ServerNameToVerify + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfigObservation. +func (in *TLSConfigObservation) DeepCopy() *TLSConfigObservation { + if in == nil { + return nil + } + out := new(TLSConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfigParameters) DeepCopyInto(out *TLSConfigParameters) { + *out = *in + if in.ServerNameToVerify != nil { + in, out := &in.ServerNameToVerify, &out.ServerNameToVerify + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfigParameters. +func (in *TLSConfigParameters) DeepCopy() *TLSConfigParameters { + if in == nil { + return nil + } + out := new(TLSConfigParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/apigatewayv2/v1beta2/zz_generated.managed.go b/apis/apigatewayv2/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..91ff91f886 --- /dev/null +++ b/apis/apigatewayv2/v1beta2/zz_generated.managed.go @@ -0,0 +1,308 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this API. +func (mg *API) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this API. +func (mg *API) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this API. +func (mg *API) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this API. +func (mg *API) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this API. +func (mg *API) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this API. +func (mg *API) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this API. +func (mg *API) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this API. +func (mg *API) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this API. +func (mg *API) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this API. +func (mg *API) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this API. +func (mg *API) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this API. +func (mg *API) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Authorizer. +func (mg *Authorizer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Authorizer. +func (mg *Authorizer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Authorizer. +func (mg *Authorizer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Authorizer. +func (mg *Authorizer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Authorizer. +func (mg *Authorizer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Authorizer. +func (mg *Authorizer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Authorizer. +func (mg *Authorizer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Authorizer. +func (mg *Authorizer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Authorizer. +func (mg *Authorizer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Authorizer. +func (mg *Authorizer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Authorizer. +func (mg *Authorizer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Authorizer. +func (mg *Authorizer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DomainName. +func (mg *DomainName) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DomainName. +func (mg *DomainName) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DomainName. +func (mg *DomainName) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DomainName. +func (mg *DomainName) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DomainName. +func (mg *DomainName) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DomainName. +func (mg *DomainName) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DomainName. +func (mg *DomainName) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DomainName. +func (mg *DomainName) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DomainName. +func (mg *DomainName) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DomainName. +func (mg *DomainName) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DomainName. +func (mg *DomainName) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DomainName. +func (mg *DomainName) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Integration. +func (mg *Integration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Integration. +func (mg *Integration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Integration. +func (mg *Integration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Integration. +func (mg *Integration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Integration. +func (mg *Integration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Integration. +func (mg *Integration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Integration. +func (mg *Integration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Integration. +func (mg *Integration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Integration. +func (mg *Integration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Integration. +func (mg *Integration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Integration. +func (mg *Integration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Integration. +func (mg *Integration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Stage. +func (mg *Stage) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Stage. +func (mg *Stage) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Stage. +func (mg *Stage) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Stage. +func (mg *Stage) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Stage. +func (mg *Stage) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Stage. +func (mg *Stage) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Stage. +func (mg *Stage) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Stage. +func (mg *Stage) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Stage. +func (mg *Stage) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Stage. +func (mg *Stage) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Stage. +func (mg *Stage) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Stage. +func (mg *Stage) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/apigatewayv2/v1beta2/zz_generated.managedlist.go b/apis/apigatewayv2/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..9219f5c7e8 --- /dev/null +++ b/apis/apigatewayv2/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,53 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this APIList. +func (l *APIList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this AuthorizerList. +func (l *AuthorizerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DomainNameList. +func (l *DomainNameList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this IntegrationList. +func (l *IntegrationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this StageList. +func (l *StageList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/apigatewayv2/v1beta2/zz_generated.resolvers.go b/apis/apigatewayv2/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..dc806be349 --- /dev/null +++ b/apis/apigatewayv2/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,415 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + lambda "github.com/upbound/provider-aws/config/common/apis/lambda" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Authorizer) ResolveReferences( // ResolveReferences of this Authorizer. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.APIIDRef, + Selector: mg.Spec.ForProvider.APIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIID") + } + mg.Spec.ForProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AuthorizerURI), + Extract: lambda.FunctionInvokeARN(), + Reference: mg.Spec.ForProvider.AuthorizerURIRef, + Selector: mg.Spec.ForProvider.AuthorizerURISelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AuthorizerURI") + } + mg.Spec.ForProvider.AuthorizerURI = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AuthorizerURIRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.APIID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.APIIDRef, + Selector: mg.Spec.InitProvider.APIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.APIID") + } + mg.Spec.InitProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.APIIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AuthorizerURI), + Extract: lambda.FunctionInvokeARN(), + Reference: mg.Spec.InitProvider.AuthorizerURIRef, + Selector: mg.Spec.InitProvider.AuthorizerURISelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AuthorizerURI") + } + mg.Spec.InitProvider.AuthorizerURI = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AuthorizerURIRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this DomainName. +func (mg *DomainName) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.DomainNameConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta2", "Certificate", "CertificateList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DomainNameConfiguration.CertificateArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.DomainNameConfiguration.CertificateArnRef, + Selector: mg.Spec.ForProvider.DomainNameConfiguration.CertificateArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DomainNameConfiguration.CertificateArn") + } + mg.Spec.ForProvider.DomainNameConfiguration.CertificateArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DomainNameConfiguration.CertificateArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.DomainNameConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta2", "Certificate", "CertificateList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DomainNameConfiguration.CertificateArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.DomainNameConfiguration.CertificateArnRef, + Selector: mg.Spec.InitProvider.DomainNameConfiguration.CertificateArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DomainNameConfiguration.CertificateArn") + } + mg.Spec.InitProvider.DomainNameConfiguration.CertificateArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DomainNameConfiguration.CertificateArnRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this Integration. +func (mg *Integration) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.APIIDRef, + Selector: mg.Spec.ForProvider.APIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIID") + } + mg.Spec.ForProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "VPCLink", "VPCLinkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ConnectionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ConnectionIDRef, + Selector: mg.Spec.ForProvider.ConnectionIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ConnectionID") + } + mg.Spec.ForProvider.ConnectionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ConnectionIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CredentialsArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.CredentialsArnRef, + Selector: mg.Spec.ForProvider.CredentialsArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CredentialsArn") + } + mg.Spec.ForProvider.CredentialsArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CredentialsArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IntegrationURI), + Extract: resource.ExtractParamPath("invoke_arn", true), + Reference: mg.Spec.ForProvider.IntegrationURIRef, + Selector: mg.Spec.ForProvider.IntegrationURISelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IntegrationURI") + } + mg.Spec.ForProvider.IntegrationURI = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IntegrationURIRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.APIID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.APIIDRef, + Selector: mg.Spec.InitProvider.APIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.APIID") + } + mg.Spec.InitProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.APIIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "VPCLink", "VPCLinkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ConnectionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ConnectionIDRef, + Selector: mg.Spec.InitProvider.ConnectionIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ConnectionID") + } + mg.Spec.InitProvider.ConnectionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ConnectionIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CredentialsArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.CredentialsArnRef, + Selector: mg.Spec.InitProvider.CredentialsArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CredentialsArn") + } + mg.Spec.InitProvider.CredentialsArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CredentialsArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IntegrationURI), + Extract: resource.ExtractParamPath("invoke_arn", true), + Reference: mg.Spec.InitProvider.IntegrationURIRef, + Selector: mg.Spec.InitProvider.IntegrationURISelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IntegrationURI") + } + mg.Spec.InitProvider.IntegrationURI = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IntegrationURIRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Stage. +func (mg *Stage) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.APIIDRef, + Selector: mg.Spec.ForProvider.APIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIID") + } + mg.Spec.ForProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "Deployment", "DeploymentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DeploymentID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DeploymentIDRef, + Selector: mg.Spec.ForProvider.DeploymentIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DeploymentID") + } + mg.Spec.ForProvider.DeploymentID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DeploymentIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta2", "API", "APIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.APIID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.APIIDRef, + Selector: mg.Spec.InitProvider.APIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.APIID") + } + mg.Spec.InitProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.APIIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apigatewayv2.aws.upbound.io", "v1beta1", "Deployment", "DeploymentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DeploymentID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DeploymentIDRef, + Selector: mg.Spec.InitProvider.DeploymentIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DeploymentID") + } + mg.Spec.InitProvider.DeploymentID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DeploymentIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/apigatewayv2/v1beta2/zz_groupversion_info.go b/apis/apigatewayv2/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..998306bce6 --- /dev/null +++ b/apis/apigatewayv2/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=apigatewayv2.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "apigatewayv2.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/apigatewayv2/v1beta2/zz_integration_terraformed.go b/apis/apigatewayv2/v1beta2/zz_integration_terraformed.go new file mode 100755 index 0000000000..08a3117b67 --- /dev/null +++ b/apis/apigatewayv2/v1beta2/zz_integration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Integration +func (mg *Integration) GetTerraformResourceType() string { + return "aws_apigatewayv2_integration" +} + +// GetConnectionDetailsMapping for this Integration +func (tr *Integration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Integration +func (tr *Integration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Integration +func (tr *Integration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Integration +func (tr *Integration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Integration +func (tr *Integration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Integration +func (tr *Integration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Integration +func (tr *Integration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Integration +func (tr *Integration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Integration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Integration) LateInitialize(attrs []byte) (bool, error) { + params := &IntegrationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Integration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apigatewayv2/v1beta2/zz_integration_types.go b/apis/apigatewayv2/v1beta2/zz_integration_types.go new file mode 100755 index 0000000000..83b21aec35 --- /dev/null +++ b/apis/apigatewayv2/v1beta2/zz_integration_types.go @@ -0,0 +1,434 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IntegrationInitParameters struct { + + // API identifier. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // Reference to a API in apigatewayv2 to populate apiId. + // +kubebuilder:validation:Optional + APIIDRef *v1.Reference `json:"apiIdRef,omitempty" tf:"-"` + + // Selector for a API in apigatewayv2 to populate apiId. + // +kubebuilder:validation:Optional + APIIDSelector *v1.Selector `json:"apiIdSelector,omitempty" tf:"-"` + + // ID of the VPC link for a private integration. Supported only for HTTP APIs. Must be between 1 and 1024 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.VPCLink + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ConnectionID *string `json:"connectionId,omitempty" tf:"connection_id,omitempty"` + + // Reference to a VPCLink in apigatewayv2 to populate connectionId. + // +kubebuilder:validation:Optional + ConnectionIDRef *v1.Reference `json:"connectionIdRef,omitempty" tf:"-"` + + // Selector for a VPCLink in apigatewayv2 to populate connectionId. + // +kubebuilder:validation:Optional + ConnectionIDSelector *v1.Selector `json:"connectionIdSelector,omitempty" tf:"-"` + + // Type of the network connection to the integration endpoint. Valid values: INTERNET, VPC_LINK. Default is INTERNET. + ConnectionType *string `json:"connectionType,omitempty" tf:"connection_type,omitempty"` + + // How to handle response payload content type conversions. Valid values: CONVERT_TO_BINARY, CONVERT_TO_TEXT. Supported only for WebSocket APIs. + ContentHandlingStrategy *string `json:"contentHandlingStrategy,omitempty" tf:"content_handling_strategy,omitempty"` + + // Credentials required for the integration, if any. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + CredentialsArn *string `json:"credentialsArn,omitempty" tf:"credentials_arn,omitempty"` + + // Reference to a Role in iam to populate credentialsArn. + // +kubebuilder:validation:Optional + CredentialsArnRef *v1.Reference `json:"credentialsArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate credentialsArn. + // +kubebuilder:validation:Optional + CredentialsArnSelector *v1.Selector `json:"credentialsArnSelector,omitempty" tf:"-"` + + // Description of the integration. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Integration's HTTP method. Must be specified if integration_type is not MOCK. + IntegrationMethod *string `json:"integrationMethod,omitempty" tf:"integration_method,omitempty"` + + // AWS service action to invoke. Supported only for HTTP APIs when integration_type is AWS_PROXY. See the AWS service integration reference documentation for supported values. Must be between 1 and 128 characters in length. + IntegrationSubtype *string `json:"integrationSubtype,omitempty" tf:"integration_subtype,omitempty"` + + // Integration type of an integration. + // Valid values: AWS (supported only for WebSocket APIs), AWS_PROXY, HTTP (supported only for WebSocket APIs), HTTP_PROXY, MOCK (supported only for WebSocket APIs). For an HTTP API private integration, use HTTP_PROXY. + IntegrationType *string `json:"integrationType,omitempty" tf:"integration_type,omitempty"` + + // URI of the Lambda function for a Lambda proxy integration, when integration_type is AWS_PROXY. + // For an HTTP integration, specify a fully-qualified URL. For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("invoke_arn",true) + IntegrationURI *string `json:"integrationUri,omitempty" tf:"integration_uri,omitempty"` + + // Reference to a Function in lambda to populate integrationUri. + // +kubebuilder:validation:Optional + IntegrationURIRef *v1.Reference `json:"integrationUriRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate integrationUri. + // +kubebuilder:validation:Optional + IntegrationURISelector *v1.Selector `json:"integrationUriSelector,omitempty" tf:"-"` + + // Pass-through behavior for incoming requests based on the Content-Type header in the request, and the available mapping templates specified as the request_templates attribute. + // Valid values: WHEN_NO_MATCH, WHEN_NO_TEMPLATES, NEVER. Default is WHEN_NO_MATCH. Supported only for WebSocket APIs. + PassthroughBehavior *string `json:"passthroughBehavior,omitempty" tf:"passthrough_behavior,omitempty"` + + // The format of the payload sent to an integration. Valid values: 1.0, 2.0. Default is 1.0. + PayloadFormatVersion *string `json:"payloadFormatVersion,omitempty" tf:"payload_format_version,omitempty"` + + // For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. + // For HTTP APIs with a specified integration_subtype, a key-value map specifying parameters that are passed to AWS_PROXY integrations. + // For HTTP APIs without a specified integration_subtype, a key-value map specifying how to transform HTTP requests before sending them to the backend. + // See the Amazon API Gateway Developer Guide for details. + // +mapType=granular + RequestParameters map[string]*string `json:"requestParameters,omitempty" tf:"request_parameters,omitempty"` + + // Map of Velocity templates that are applied on the request payload based on the value of the Content-Type header sent by the client. Supported only for WebSocket APIs. + // +mapType=granular + RequestTemplates map[string]*string `json:"requestTemplates,omitempty" tf:"request_templates,omitempty"` + + // Mappings to transform the HTTP response from a backend integration before returning the response to clients. Supported only for HTTP APIs. + ResponseParameters []ResponseParametersInitParameters `json:"responseParameters,omitempty" tf:"response_parameters,omitempty"` + + // TLS configuration for a private integration. Supported only for HTTP APIs. + TLSConfig *TLSConfigInitParameters `json:"tlsConfig,omitempty" tf:"tls_config,omitempty"` + + // The template selection expression for the integration. + TemplateSelectionExpression *string `json:"templateSelectionExpression,omitempty" tf:"template_selection_expression,omitempty"` + + // Custom timeout between 50 and 29,000 milliseconds for WebSocket APIs and between 50 and 30,000 milliseconds for HTTP APIs. + // The default timeout is 29 seconds for WebSocket APIs and 30 seconds for HTTP APIs. + TimeoutMilliseconds *float64 `json:"timeoutMilliseconds,omitempty" tf:"timeout_milliseconds,omitempty"` +} + +type IntegrationObservation struct { + + // API identifier. + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // ID of the VPC link for a private integration. Supported only for HTTP APIs. Must be between 1 and 1024 characters in length. + ConnectionID *string `json:"connectionId,omitempty" tf:"connection_id,omitempty"` + + // Type of the network connection to the integration endpoint. Valid values: INTERNET, VPC_LINK. Default is INTERNET. + ConnectionType *string `json:"connectionType,omitempty" tf:"connection_type,omitempty"` + + // How to handle response payload content type conversions. Valid values: CONVERT_TO_BINARY, CONVERT_TO_TEXT. Supported only for WebSocket APIs. + ContentHandlingStrategy *string `json:"contentHandlingStrategy,omitempty" tf:"content_handling_strategy,omitempty"` + + // Credentials required for the integration, if any. + CredentialsArn *string `json:"credentialsArn,omitempty" tf:"credentials_arn,omitempty"` + + // Description of the integration. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Integration identifier. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Integration's HTTP method. Must be specified if integration_type is not MOCK. + IntegrationMethod *string `json:"integrationMethod,omitempty" tf:"integration_method,omitempty"` + + // The integration response selection expression for the integration. + IntegrationResponseSelectionExpression *string `json:"integrationResponseSelectionExpression,omitempty" tf:"integration_response_selection_expression,omitempty"` + + // AWS service action to invoke. Supported only for HTTP APIs when integration_type is AWS_PROXY. See the AWS service integration reference documentation for supported values. Must be between 1 and 128 characters in length. + IntegrationSubtype *string `json:"integrationSubtype,omitempty" tf:"integration_subtype,omitempty"` + + // Integration type of an integration. + // Valid values: AWS (supported only for WebSocket APIs), AWS_PROXY, HTTP (supported only for WebSocket APIs), HTTP_PROXY, MOCK (supported only for WebSocket APIs). For an HTTP API private integration, use HTTP_PROXY. + IntegrationType *string `json:"integrationType,omitempty" tf:"integration_type,omitempty"` + + // URI of the Lambda function for a Lambda proxy integration, when integration_type is AWS_PROXY. + // For an HTTP integration, specify a fully-qualified URL. For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. + IntegrationURI *string `json:"integrationUri,omitempty" tf:"integration_uri,omitempty"` + + // Pass-through behavior for incoming requests based on the Content-Type header in the request, and the available mapping templates specified as the request_templates attribute. + // Valid values: WHEN_NO_MATCH, WHEN_NO_TEMPLATES, NEVER. Default is WHEN_NO_MATCH. Supported only for WebSocket APIs. + PassthroughBehavior *string `json:"passthroughBehavior,omitempty" tf:"passthrough_behavior,omitempty"` + + // The format of the payload sent to an integration. Valid values: 1.0, 2.0. Default is 1.0. + PayloadFormatVersion *string `json:"payloadFormatVersion,omitempty" tf:"payload_format_version,omitempty"` + + // For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. + // For HTTP APIs with a specified integration_subtype, a key-value map specifying parameters that are passed to AWS_PROXY integrations. + // For HTTP APIs without a specified integration_subtype, a key-value map specifying how to transform HTTP requests before sending them to the backend. + // See the Amazon API Gateway Developer Guide for details. + // +mapType=granular + RequestParameters map[string]*string `json:"requestParameters,omitempty" tf:"request_parameters,omitempty"` + + // Map of Velocity templates that are applied on the request payload based on the value of the Content-Type header sent by the client. Supported only for WebSocket APIs. + // +mapType=granular + RequestTemplates map[string]*string `json:"requestTemplates,omitempty" tf:"request_templates,omitempty"` + + // Mappings to transform the HTTP response from a backend integration before returning the response to clients. Supported only for HTTP APIs. + ResponseParameters []ResponseParametersObservation `json:"responseParameters,omitempty" tf:"response_parameters,omitempty"` + + // TLS configuration for a private integration. Supported only for HTTP APIs. + TLSConfig *TLSConfigObservation `json:"tlsConfig,omitempty" tf:"tls_config,omitempty"` + + // The template selection expression for the integration. + TemplateSelectionExpression *string `json:"templateSelectionExpression,omitempty" tf:"template_selection_expression,omitempty"` + + // Custom timeout between 50 and 29,000 milliseconds for WebSocket APIs and between 50 and 30,000 milliseconds for HTTP APIs. + // The default timeout is 29 seconds for WebSocket APIs and 30 seconds for HTTP APIs. + TimeoutMilliseconds *float64 `json:"timeoutMilliseconds,omitempty" tf:"timeout_milliseconds,omitempty"` +} + +type IntegrationParameters struct { + + // API identifier. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API + // +kubebuilder:validation:Optional + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // Reference to a API in apigatewayv2 to populate apiId. + // +kubebuilder:validation:Optional + APIIDRef *v1.Reference `json:"apiIdRef,omitempty" tf:"-"` + + // Selector for a API in apigatewayv2 to populate apiId. + // +kubebuilder:validation:Optional + APIIDSelector *v1.Selector `json:"apiIdSelector,omitempty" tf:"-"` + + // ID of the VPC link for a private integration. Supported only for HTTP APIs. Must be between 1 and 1024 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.VPCLink + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ConnectionID *string `json:"connectionId,omitempty" tf:"connection_id,omitempty"` + + // Reference to a VPCLink in apigatewayv2 to populate connectionId. + // +kubebuilder:validation:Optional + ConnectionIDRef *v1.Reference `json:"connectionIdRef,omitempty" tf:"-"` + + // Selector for a VPCLink in apigatewayv2 to populate connectionId. + // +kubebuilder:validation:Optional + ConnectionIDSelector *v1.Selector `json:"connectionIdSelector,omitempty" tf:"-"` + + // Type of the network connection to the integration endpoint. Valid values: INTERNET, VPC_LINK. Default is INTERNET. + // +kubebuilder:validation:Optional + ConnectionType *string `json:"connectionType,omitempty" tf:"connection_type,omitempty"` + + // How to handle response payload content type conversions. Valid values: CONVERT_TO_BINARY, CONVERT_TO_TEXT. Supported only for WebSocket APIs. + // +kubebuilder:validation:Optional + ContentHandlingStrategy *string `json:"contentHandlingStrategy,omitempty" tf:"content_handling_strategy,omitempty"` + + // Credentials required for the integration, if any. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + CredentialsArn *string `json:"credentialsArn,omitempty" tf:"credentials_arn,omitempty"` + + // Reference to a Role in iam to populate credentialsArn. + // +kubebuilder:validation:Optional + CredentialsArnRef *v1.Reference `json:"credentialsArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate credentialsArn. + // +kubebuilder:validation:Optional + CredentialsArnSelector *v1.Selector `json:"credentialsArnSelector,omitempty" tf:"-"` + + // Description of the integration. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Integration's HTTP method. Must be specified if integration_type is not MOCK. + // +kubebuilder:validation:Optional + IntegrationMethod *string `json:"integrationMethod,omitempty" tf:"integration_method,omitempty"` + + // AWS service action to invoke. Supported only for HTTP APIs when integration_type is AWS_PROXY. See the AWS service integration reference documentation for supported values. Must be between 1 and 128 characters in length. + // +kubebuilder:validation:Optional + IntegrationSubtype *string `json:"integrationSubtype,omitempty" tf:"integration_subtype,omitempty"` + + // Integration type of an integration. + // Valid values: AWS (supported only for WebSocket APIs), AWS_PROXY, HTTP (supported only for WebSocket APIs), HTTP_PROXY, MOCK (supported only for WebSocket APIs). For an HTTP API private integration, use HTTP_PROXY. + // +kubebuilder:validation:Optional + IntegrationType *string `json:"integrationType,omitempty" tf:"integration_type,omitempty"` + + // URI of the Lambda function for a Lambda proxy integration, when integration_type is AWS_PROXY. + // For an HTTP integration, specify a fully-qualified URL. For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("invoke_arn",true) + // +kubebuilder:validation:Optional + IntegrationURI *string `json:"integrationUri,omitempty" tf:"integration_uri,omitempty"` + + // Reference to a Function in lambda to populate integrationUri. + // +kubebuilder:validation:Optional + IntegrationURIRef *v1.Reference `json:"integrationUriRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate integrationUri. + // +kubebuilder:validation:Optional + IntegrationURISelector *v1.Selector `json:"integrationUriSelector,omitempty" tf:"-"` + + // Pass-through behavior for incoming requests based on the Content-Type header in the request, and the available mapping templates specified as the request_templates attribute. + // Valid values: WHEN_NO_MATCH, WHEN_NO_TEMPLATES, NEVER. Default is WHEN_NO_MATCH. Supported only for WebSocket APIs. + // +kubebuilder:validation:Optional + PassthroughBehavior *string `json:"passthroughBehavior,omitempty" tf:"passthrough_behavior,omitempty"` + + // The format of the payload sent to an integration. Valid values: 1.0, 2.0. Default is 1.0. + // +kubebuilder:validation:Optional + PayloadFormatVersion *string `json:"payloadFormatVersion,omitempty" tf:"payload_format_version,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. + // For HTTP APIs with a specified integration_subtype, a key-value map specifying parameters that are passed to AWS_PROXY integrations. + // For HTTP APIs without a specified integration_subtype, a key-value map specifying how to transform HTTP requests before sending them to the backend. + // See the Amazon API Gateway Developer Guide for details. + // +kubebuilder:validation:Optional + // +mapType=granular + RequestParameters map[string]*string `json:"requestParameters,omitempty" tf:"request_parameters,omitempty"` + + // Map of Velocity templates that are applied on the request payload based on the value of the Content-Type header sent by the client. Supported only for WebSocket APIs. + // +kubebuilder:validation:Optional + // +mapType=granular + RequestTemplates map[string]*string `json:"requestTemplates,omitempty" tf:"request_templates,omitempty"` + + // Mappings to transform the HTTP response from a backend integration before returning the response to clients. Supported only for HTTP APIs. + // +kubebuilder:validation:Optional + ResponseParameters []ResponseParametersParameters `json:"responseParameters,omitempty" tf:"response_parameters,omitempty"` + + // TLS configuration for a private integration. Supported only for HTTP APIs. + // +kubebuilder:validation:Optional + TLSConfig *TLSConfigParameters `json:"tlsConfig,omitempty" tf:"tls_config,omitempty"` + + // The template selection expression for the integration. + // +kubebuilder:validation:Optional + TemplateSelectionExpression *string `json:"templateSelectionExpression,omitempty" tf:"template_selection_expression,omitempty"` + + // Custom timeout between 50 and 29,000 milliseconds for WebSocket APIs and between 50 and 30,000 milliseconds for HTTP APIs. + // The default timeout is 29 seconds for WebSocket APIs and 30 seconds for HTTP APIs. + // +kubebuilder:validation:Optional + TimeoutMilliseconds *float64 `json:"timeoutMilliseconds,omitempty" tf:"timeout_milliseconds,omitempty"` +} + +type ResponseParametersInitParameters struct { + + // Key-value map. The key of this map identifies the location of the request parameter to change, and how to change it. The corresponding value specifies the new data for the parameter. + // See the Amazon API Gateway Developer Guide for details. + // +mapType=granular + Mappings map[string]*string `json:"mappings,omitempty" tf:"mappings,omitempty"` + + // HTTP status code in the range 200-599. + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type ResponseParametersObservation struct { + + // Key-value map. The key of this map identifies the location of the request parameter to change, and how to change it. The corresponding value specifies the new data for the parameter. + // See the Amazon API Gateway Developer Guide for details. + // +mapType=granular + Mappings map[string]*string `json:"mappings,omitempty" tf:"mappings,omitempty"` + + // HTTP status code in the range 200-599. + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type ResponseParametersParameters struct { + + // Key-value map. The key of this map identifies the location of the request parameter to change, and how to change it. The corresponding value specifies the new data for the parameter. + // See the Amazon API Gateway Developer Guide for details. + // +kubebuilder:validation:Optional + // +mapType=granular + Mappings map[string]*string `json:"mappings" tf:"mappings,omitempty"` + + // HTTP status code in the range 200-599. + // +kubebuilder:validation:Optional + StatusCode *string `json:"statusCode" tf:"status_code,omitempty"` +} + +type TLSConfigInitParameters struct { + + // If you specify a server name, API Gateway uses it to verify the hostname on the integration's certificate. The server name is also included in the TLS handshake to support Server Name Indication (SNI) or virtual hosting. + ServerNameToVerify *string `json:"serverNameToVerify,omitempty" tf:"server_name_to_verify,omitempty"` +} + +type TLSConfigObservation struct { + + // If you specify a server name, API Gateway uses it to verify the hostname on the integration's certificate. The server name is also included in the TLS handshake to support Server Name Indication (SNI) or virtual hosting. + ServerNameToVerify *string `json:"serverNameToVerify,omitempty" tf:"server_name_to_verify,omitempty"` +} + +type TLSConfigParameters struct { + + // If you specify a server name, API Gateway uses it to verify the hostname on the integration's certificate. The server name is also included in the TLS handshake to support Server Name Indication (SNI) or virtual hosting. + // +kubebuilder:validation:Optional + ServerNameToVerify *string `json:"serverNameToVerify,omitempty" tf:"server_name_to_verify,omitempty"` +} + +// IntegrationSpec defines the desired state of Integration +type IntegrationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IntegrationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IntegrationInitParameters `json:"initProvider,omitempty"` +} + +// IntegrationStatus defines the observed state of Integration. +type IntegrationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IntegrationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Integration is the Schema for the Integrations API. Manages an Amazon API Gateway Version 2 integration. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Integration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.integrationType) || (has(self.initProvider) && has(self.initProvider.integrationType))",message="spec.forProvider.integrationType is a required parameter" + Spec IntegrationSpec `json:"spec"` + Status IntegrationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IntegrationList contains a list of Integrations +type IntegrationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Integration `json:"items"` +} + +// Repository type metadata. +var ( + Integration_Kind = "Integration" + Integration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Integration_Kind}.String() + Integration_KindAPIVersion = Integration_Kind + "." + CRDGroupVersion.String() + Integration_GroupVersionKind = CRDGroupVersion.WithKind(Integration_Kind) +) + +func init() { + SchemeBuilder.Register(&Integration{}, &IntegrationList{}) +} diff --git a/apis/apigatewayv2/v1beta2/zz_stage_terraformed.go b/apis/apigatewayv2/v1beta2/zz_stage_terraformed.go new file mode 100755 index 0000000000..755b5fd716 --- /dev/null +++ b/apis/apigatewayv2/v1beta2/zz_stage_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Stage +func (mg *Stage) GetTerraformResourceType() string { + return "aws_apigatewayv2_stage" +} + +// GetConnectionDetailsMapping for this Stage +func (tr *Stage) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Stage +func (tr *Stage) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Stage +func (tr *Stage) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Stage +func (tr *Stage) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Stage +func (tr *Stage) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Stage +func (tr *Stage) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Stage +func (tr *Stage) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Stage +func (tr *Stage) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Stage using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Stage) LateInitialize(attrs []byte) (bool, error) { + params := &StageParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Stage) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apigatewayv2/v1beta2/zz_stage_types.go b/apis/apigatewayv2/v1beta2/zz_stage_types.go new file mode 100755 index 0000000000..7b8602f1db --- /dev/null +++ b/apis/apigatewayv2/v1beta2/zz_stage_types.go @@ -0,0 +1,424 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessLogSettingsInitParameters struct { + + // ARN of the CloudWatch Logs log group to receive access logs. Any trailing :* is trimmed from the ARN. + DestinationArn *string `json:"destinationArn,omitempty" tf:"destination_arn,omitempty"` + + // Single line format of the access logs of data. Refer to log settings for HTTP or Websocket. + Format *string `json:"format,omitempty" tf:"format,omitempty"` +} + +type AccessLogSettingsObservation struct { + + // ARN of the CloudWatch Logs log group to receive access logs. Any trailing :* is trimmed from the ARN. + DestinationArn *string `json:"destinationArn,omitempty" tf:"destination_arn,omitempty"` + + // Single line format of the access logs of data. Refer to log settings for HTTP or Websocket. + Format *string `json:"format,omitempty" tf:"format,omitempty"` +} + +type AccessLogSettingsParameters struct { + + // ARN of the CloudWatch Logs log group to receive access logs. Any trailing :* is trimmed from the ARN. + // +kubebuilder:validation:Optional + DestinationArn *string `json:"destinationArn" tf:"destination_arn,omitempty"` + + // Single line format of the access logs of data. Refer to log settings for HTTP or Websocket. + // +kubebuilder:validation:Optional + Format *string `json:"format" tf:"format,omitempty"` +} + +type DefaultRouteSettingsInitParameters struct { + + // Whether data trace logging is enabled for the default route. Affects the log entries pushed to Amazon CloudWatch Logs. + // Defaults to false. Supported only for WebSocket APIs. + DataTraceEnabled *bool `json:"dataTraceEnabled,omitempty" tf:"data_trace_enabled,omitempty"` + + // Whether detailed metrics are enabled for the default route. Defaults to false. + DetailedMetricsEnabled *bool `json:"detailedMetricsEnabled,omitempty" tf:"detailed_metrics_enabled,omitempty"` + + // Logging level for the default route. Affects the log entries pushed to Amazon CloudWatch Logs. + // Valid values: ERROR, INFO, OFF. Defaults to OFF. Supported only for WebSocket APIs. + LoggingLevel *string `json:"loggingLevel,omitempty" tf:"logging_level,omitempty"` + + // Throttling burst limit for the default route. + ThrottlingBurstLimit *float64 `json:"throttlingBurstLimit,omitempty" tf:"throttling_burst_limit,omitempty"` + + // Throttling rate limit for the default route. + ThrottlingRateLimit *float64 `json:"throttlingRateLimit,omitempty" tf:"throttling_rate_limit,omitempty"` +} + +type DefaultRouteSettingsObservation struct { + + // Whether data trace logging is enabled for the default route. Affects the log entries pushed to Amazon CloudWatch Logs. + // Defaults to false. Supported only for WebSocket APIs. + DataTraceEnabled *bool `json:"dataTraceEnabled,omitempty" tf:"data_trace_enabled,omitempty"` + + // Whether detailed metrics are enabled for the default route. Defaults to false. + DetailedMetricsEnabled *bool `json:"detailedMetricsEnabled,omitempty" tf:"detailed_metrics_enabled,omitempty"` + + // Logging level for the default route. Affects the log entries pushed to Amazon CloudWatch Logs. + // Valid values: ERROR, INFO, OFF. Defaults to OFF. Supported only for WebSocket APIs. + LoggingLevel *string `json:"loggingLevel,omitempty" tf:"logging_level,omitempty"` + + // Throttling burst limit for the default route. + ThrottlingBurstLimit *float64 `json:"throttlingBurstLimit,omitempty" tf:"throttling_burst_limit,omitempty"` + + // Throttling rate limit for the default route. + ThrottlingRateLimit *float64 `json:"throttlingRateLimit,omitempty" tf:"throttling_rate_limit,omitempty"` +} + +type DefaultRouteSettingsParameters struct { + + // Whether data trace logging is enabled for the default route. Affects the log entries pushed to Amazon CloudWatch Logs. + // Defaults to false. Supported only for WebSocket APIs. + // +kubebuilder:validation:Optional + DataTraceEnabled *bool `json:"dataTraceEnabled,omitempty" tf:"data_trace_enabled,omitempty"` + + // Whether detailed metrics are enabled for the default route. Defaults to false. + // +kubebuilder:validation:Optional + DetailedMetricsEnabled *bool `json:"detailedMetricsEnabled,omitempty" tf:"detailed_metrics_enabled,omitempty"` + + // Logging level for the default route. Affects the log entries pushed to Amazon CloudWatch Logs. + // Valid values: ERROR, INFO, OFF. Defaults to OFF. Supported only for WebSocket APIs. + // +kubebuilder:validation:Optional + LoggingLevel *string `json:"loggingLevel,omitempty" tf:"logging_level,omitempty"` + + // Throttling burst limit for the default route. + // +kubebuilder:validation:Optional + ThrottlingBurstLimit *float64 `json:"throttlingBurstLimit,omitempty" tf:"throttling_burst_limit,omitempty"` + + // Throttling rate limit for the default route. + // +kubebuilder:validation:Optional + ThrottlingRateLimit *float64 `json:"throttlingRateLimit,omitempty" tf:"throttling_rate_limit,omitempty"` +} + +type RouteSettingsInitParameters struct { + + // Whether data trace logging is enabled for the route. Affects the log entries pushed to Amazon CloudWatch Logs. + // Defaults to false. Supported only for WebSocket APIs. + DataTraceEnabled *bool `json:"dataTraceEnabled,omitempty" tf:"data_trace_enabled,omitempty"` + + // Whether detailed metrics are enabled for the route. Defaults to false. + DetailedMetricsEnabled *bool `json:"detailedMetricsEnabled,omitempty" tf:"detailed_metrics_enabled,omitempty"` + + // Logging level for the route. Affects the log entries pushed to Amazon CloudWatch Logs. + // Valid values: ERROR, INFO, OFF. Defaults to OFF. Supported only for WebSocket APIs. + LoggingLevel *string `json:"loggingLevel,omitempty" tf:"logging_level,omitempty"` + + // Route key. + RouteKey *string `json:"routeKey,omitempty" tf:"route_key,omitempty"` + + // Throttling burst limit for the route. + ThrottlingBurstLimit *float64 `json:"throttlingBurstLimit,omitempty" tf:"throttling_burst_limit,omitempty"` + + // Throttling rate limit for the route. + ThrottlingRateLimit *float64 `json:"throttlingRateLimit,omitempty" tf:"throttling_rate_limit,omitempty"` +} + +type RouteSettingsObservation struct { + + // Whether data trace logging is enabled for the route. Affects the log entries pushed to Amazon CloudWatch Logs. + // Defaults to false. Supported only for WebSocket APIs. + DataTraceEnabled *bool `json:"dataTraceEnabled,omitempty" tf:"data_trace_enabled,omitempty"` + + // Whether detailed metrics are enabled for the route. Defaults to false. + DetailedMetricsEnabled *bool `json:"detailedMetricsEnabled,omitempty" tf:"detailed_metrics_enabled,omitempty"` + + // Logging level for the route. Affects the log entries pushed to Amazon CloudWatch Logs. + // Valid values: ERROR, INFO, OFF. Defaults to OFF. Supported only for WebSocket APIs. + LoggingLevel *string `json:"loggingLevel,omitempty" tf:"logging_level,omitempty"` + + // Route key. + RouteKey *string `json:"routeKey,omitempty" tf:"route_key,omitempty"` + + // Throttling burst limit for the route. + ThrottlingBurstLimit *float64 `json:"throttlingBurstLimit,omitempty" tf:"throttling_burst_limit,omitempty"` + + // Throttling rate limit for the route. + ThrottlingRateLimit *float64 `json:"throttlingRateLimit,omitempty" tf:"throttling_rate_limit,omitempty"` +} + +type RouteSettingsParameters struct { + + // Whether data trace logging is enabled for the route. Affects the log entries pushed to Amazon CloudWatch Logs. + // Defaults to false. Supported only for WebSocket APIs. + // +kubebuilder:validation:Optional + DataTraceEnabled *bool `json:"dataTraceEnabled,omitempty" tf:"data_trace_enabled,omitempty"` + + // Whether detailed metrics are enabled for the route. Defaults to false. + // +kubebuilder:validation:Optional + DetailedMetricsEnabled *bool `json:"detailedMetricsEnabled,omitempty" tf:"detailed_metrics_enabled,omitempty"` + + // Logging level for the route. Affects the log entries pushed to Amazon CloudWatch Logs. + // Valid values: ERROR, INFO, OFF. Defaults to OFF. Supported only for WebSocket APIs. + // +kubebuilder:validation:Optional + LoggingLevel *string `json:"loggingLevel,omitempty" tf:"logging_level,omitempty"` + + // Route key. + // +kubebuilder:validation:Optional + RouteKey *string `json:"routeKey" tf:"route_key,omitempty"` + + // Throttling burst limit for the route. + // +kubebuilder:validation:Optional + ThrottlingBurstLimit *float64 `json:"throttlingBurstLimit,omitempty" tf:"throttling_burst_limit,omitempty"` + + // Throttling rate limit for the route. + // +kubebuilder:validation:Optional + ThrottlingRateLimit *float64 `json:"throttlingRateLimit,omitempty" tf:"throttling_rate_limit,omitempty"` +} + +type StageInitParameters struct { + + // API identifier. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // Reference to a API in apigatewayv2 to populate apiId. + // +kubebuilder:validation:Optional + APIIDRef *v1.Reference `json:"apiIdRef,omitempty" tf:"-"` + + // Selector for a API in apigatewayv2 to populate apiId. + // +kubebuilder:validation:Optional + APIIDSelector *v1.Selector `json:"apiIdSelector,omitempty" tf:"-"` + + // Settings for logging access in this stage. + // Use the aws_api_gateway_account resource to configure permissions for CloudWatch Logging. + AccessLogSettings *AccessLogSettingsInitParameters `json:"accessLogSettings,omitempty" tf:"access_log_settings,omitempty"` + + // Whether updates to an API automatically trigger a new deployment. Defaults to false. Applicable for HTTP APIs. + AutoDeploy *bool `json:"autoDeploy,omitempty" tf:"auto_deploy,omitempty"` + + // Identifier of a client certificate for the stage. Use the aws_api_gateway_client_certificate resource to configure a client certificate. + // Supported only for WebSocket APIs. + ClientCertificateID *string `json:"clientCertificateId,omitempty" tf:"client_certificate_id,omitempty"` + + // Default route settings for the stage. + DefaultRouteSettings *DefaultRouteSettingsInitParameters `json:"defaultRouteSettings,omitempty" tf:"default_route_settings,omitempty"` + + // Deployment identifier of the stage. Use the aws_apigatewayv2_deployment resource to configure a deployment. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.Deployment + DeploymentID *string `json:"deploymentId,omitempty" tf:"deployment_id,omitempty"` + + // Reference to a Deployment in apigatewayv2 to populate deploymentId. + // +kubebuilder:validation:Optional + DeploymentIDRef *v1.Reference `json:"deploymentIdRef,omitempty" tf:"-"` + + // Selector for a Deployment in apigatewayv2 to populate deploymentId. + // +kubebuilder:validation:Optional + DeploymentIDSelector *v1.Selector `json:"deploymentIdSelector,omitempty" tf:"-"` + + // Description for the stage. Must be less than or equal to 1024 characters in length. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Route settings for the stage. + RouteSettings []RouteSettingsInitParameters `json:"routeSettings,omitempty" tf:"route_settings,omitempty"` + + // Map that defines the stage variables for the stage. + // +mapType=granular + StageVariables map[string]*string `json:"stageVariables,omitempty" tf:"stage_variables,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type StageObservation struct { + + // API identifier. + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // Settings for logging access in this stage. + // Use the aws_api_gateway_account resource to configure permissions for CloudWatch Logging. + AccessLogSettings *AccessLogSettingsObservation `json:"accessLogSettings,omitempty" tf:"access_log_settings,omitempty"` + + // ARN of the stage. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Whether updates to an API automatically trigger a new deployment. Defaults to false. Applicable for HTTP APIs. + AutoDeploy *bool `json:"autoDeploy,omitempty" tf:"auto_deploy,omitempty"` + + // Identifier of a client certificate for the stage. Use the aws_api_gateway_client_certificate resource to configure a client certificate. + // Supported only for WebSocket APIs. + ClientCertificateID *string `json:"clientCertificateId,omitempty" tf:"client_certificate_id,omitempty"` + + // Default route settings for the stage. + DefaultRouteSettings *DefaultRouteSettingsObservation `json:"defaultRouteSettings,omitempty" tf:"default_route_settings,omitempty"` + + // Deployment identifier of the stage. Use the aws_apigatewayv2_deployment resource to configure a deployment. + DeploymentID *string `json:"deploymentId,omitempty" tf:"deployment_id,omitempty"` + + // Description for the stage. Must be less than or equal to 1024 characters in length. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ARN prefix to be used in an aws_lambda_permission's source_arn attribute. + // For WebSocket APIs this attribute can additionally be used in an aws_iam_policy to authorize access to the @connections API. + // See the Amazon API Gateway Developer Guide for details. + ExecutionArn *string `json:"executionArn,omitempty" tf:"execution_arn,omitempty"` + + // Stage identifier. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // URL to invoke the API pointing to the stage, + // e.g., wss://z4675bid1j.execute-api.eu-west-2.amazonaws.com/example-stage, or https://z4675bid1j.execute-api.eu-west-2.amazonaws.com/ + InvokeURL *string `json:"invokeUrl,omitempty" tf:"invoke_url,omitempty"` + + // Route settings for the stage. + RouteSettings []RouteSettingsObservation `json:"routeSettings,omitempty" tf:"route_settings,omitempty"` + + // Map that defines the stage variables for the stage. + // +mapType=granular + StageVariables map[string]*string `json:"stageVariables,omitempty" tf:"stage_variables,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type StageParameters struct { + + // API identifier. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2.API + // +kubebuilder:validation:Optional + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // Reference to a API in apigatewayv2 to populate apiId. + // +kubebuilder:validation:Optional + APIIDRef *v1.Reference `json:"apiIdRef,omitempty" tf:"-"` + + // Selector for a API in apigatewayv2 to populate apiId. + // +kubebuilder:validation:Optional + APIIDSelector *v1.Selector `json:"apiIdSelector,omitempty" tf:"-"` + + // Settings for logging access in this stage. + // Use the aws_api_gateway_account resource to configure permissions for CloudWatch Logging. + // +kubebuilder:validation:Optional + AccessLogSettings *AccessLogSettingsParameters `json:"accessLogSettings,omitempty" tf:"access_log_settings,omitempty"` + + // Whether updates to an API automatically trigger a new deployment. Defaults to false. Applicable for HTTP APIs. + // +kubebuilder:validation:Optional + AutoDeploy *bool `json:"autoDeploy,omitempty" tf:"auto_deploy,omitempty"` + + // Identifier of a client certificate for the stage. Use the aws_api_gateway_client_certificate resource to configure a client certificate. + // Supported only for WebSocket APIs. + // +kubebuilder:validation:Optional + ClientCertificateID *string `json:"clientCertificateId,omitempty" tf:"client_certificate_id,omitempty"` + + // Default route settings for the stage. + // +kubebuilder:validation:Optional + DefaultRouteSettings *DefaultRouteSettingsParameters `json:"defaultRouteSettings,omitempty" tf:"default_route_settings,omitempty"` + + // Deployment identifier of the stage. Use the aws_apigatewayv2_deployment resource to configure a deployment. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1.Deployment + // +kubebuilder:validation:Optional + DeploymentID *string `json:"deploymentId,omitempty" tf:"deployment_id,omitempty"` + + // Reference to a Deployment in apigatewayv2 to populate deploymentId. + // +kubebuilder:validation:Optional + DeploymentIDRef *v1.Reference `json:"deploymentIdRef,omitempty" tf:"-"` + + // Selector for a Deployment in apigatewayv2 to populate deploymentId. + // +kubebuilder:validation:Optional + DeploymentIDSelector *v1.Selector `json:"deploymentIdSelector,omitempty" tf:"-"` + + // Description for the stage. Must be less than or equal to 1024 characters in length. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Route settings for the stage. + // +kubebuilder:validation:Optional + RouteSettings []RouteSettingsParameters `json:"routeSettings,omitempty" tf:"route_settings,omitempty"` + + // Map that defines the stage variables for the stage. + // +kubebuilder:validation:Optional + // +mapType=granular + StageVariables map[string]*string `json:"stageVariables,omitempty" tf:"stage_variables,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// StageSpec defines the desired state of Stage +type StageSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StageParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StageInitParameters `json:"initProvider,omitempty"` +} + +// StageStatus defines the observed state of Stage. +type StageStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StageObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Stage is the Schema for the Stages API. Manages an Amazon API Gateway Version 2 stage. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Stage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec StageSpec `json:"spec"` + Status StageStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StageList contains a list of Stages +type StageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Stage `json:"items"` +} + +// Repository type metadata. +var ( + Stage_Kind = "Stage" + Stage_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Stage_Kind}.String() + Stage_KindAPIVersion = Stage_Kind + "." + CRDGroupVersion.String() + Stage_GroupVersionKind = CRDGroupVersion.WithKind(Stage_Kind) +) + +func init() { + SchemeBuilder.Register(&Stage{}, &StageList{}) +} diff --git a/apis/appautoscaling/v1beta1/zz_generated.conversion_hubs.go b/apis/appautoscaling/v1beta1/zz_generated.conversion_hubs.go index 2a7255f7a4..9f9110ed75 100755 --- a/apis/appautoscaling/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/appautoscaling/v1beta1/zz_generated.conversion_hubs.go @@ -6,11 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Policy) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ScheduledAction) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Target) Hub() {} diff --git a/apis/appautoscaling/v1beta1/zz_generated.conversion_spokes.go b/apis/appautoscaling/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..82dd5923ed --- /dev/null +++ b/apis/appautoscaling/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Policy to the hub type. +func (tr *Policy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Policy type. +func (tr *Policy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ScheduledAction to the hub type. +func (tr *ScheduledAction) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ScheduledAction type. +func (tr *ScheduledAction) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/appautoscaling/v1beta2/zz_generated.conversion_hubs.go b/apis/appautoscaling/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..ac85a9eb23 --- /dev/null +++ b/apis/appautoscaling/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Policy) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ScheduledAction) Hub() {} diff --git a/apis/appautoscaling/v1beta2/zz_generated.deepcopy.go b/apis/appautoscaling/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..6c71a23651 --- /dev/null +++ b/apis/appautoscaling/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1737 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedMetricSpecificationInitParameters) DeepCopyInto(out *CustomizedMetricSpecificationInitParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]DimensionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedMetricSpecificationInitParameters. +func (in *CustomizedMetricSpecificationInitParameters) DeepCopy() *CustomizedMetricSpecificationInitParameters { + if in == nil { + return nil + } + out := new(CustomizedMetricSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedMetricSpecificationObservation) DeepCopyInto(out *CustomizedMetricSpecificationObservation) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]DimensionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedMetricSpecificationObservation. +func (in *CustomizedMetricSpecificationObservation) DeepCopy() *CustomizedMetricSpecificationObservation { + if in == nil { + return nil + } + out := new(CustomizedMetricSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedMetricSpecificationParameters) DeepCopyInto(out *CustomizedMetricSpecificationParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]DimensionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedMetricSpecificationParameters. +func (in *CustomizedMetricSpecificationParameters) DeepCopy() *CustomizedMetricSpecificationParameters { + if in == nil { + return nil + } + out := new(CustomizedMetricSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionsInitParameters) DeepCopyInto(out *DimensionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionsInitParameters. +func (in *DimensionsInitParameters) DeepCopy() *DimensionsInitParameters { + if in == nil { + return nil + } + out := new(DimensionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionsObservation) DeepCopyInto(out *DimensionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionsObservation. +func (in *DimensionsObservation) DeepCopy() *DimensionsObservation { + if in == nil { + return nil + } + out := new(DimensionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionsParameters) DeepCopyInto(out *DimensionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionsParameters. +func (in *DimensionsParameters) DeepCopy() *DimensionsParameters { + if in == nil { + return nil + } + out := new(DimensionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDimensionsInitParameters) DeepCopyInto(out *MetricDimensionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDimensionsInitParameters. +func (in *MetricDimensionsInitParameters) DeepCopy() *MetricDimensionsInitParameters { + if in == nil { + return nil + } + out := new(MetricDimensionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDimensionsObservation) DeepCopyInto(out *MetricDimensionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDimensionsObservation. +func (in *MetricDimensionsObservation) DeepCopy() *MetricDimensionsObservation { + if in == nil { + return nil + } + out := new(MetricDimensionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDimensionsParameters) DeepCopyInto(out *MetricDimensionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDimensionsParameters. +func (in *MetricDimensionsParameters) DeepCopy() *MetricDimensionsParameters { + if in == nil { + return nil + } + out := new(MetricDimensionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricInitParameters) DeepCopyInto(out *MetricInitParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]MetricDimensionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricInitParameters. +func (in *MetricInitParameters) DeepCopy() *MetricInitParameters { + if in == nil { + return nil + } + out := new(MetricInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricObservation) DeepCopyInto(out *MetricObservation) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]MetricDimensionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricObservation. +func (in *MetricObservation) DeepCopy() *MetricObservation { + if in == nil { + return nil + } + out := new(MetricObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricParameters) DeepCopyInto(out *MetricParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]MetricDimensionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricParameters. +func (in *MetricParameters) DeepCopy() *MetricParameters { + if in == nil { + return nil + } + out := new(MetricParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatInitParameters) DeepCopyInto(out *MetricStatInitParameters) { + *out = *in + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Stat != nil { + in, out := &in.Stat, &out.Stat + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatInitParameters. +func (in *MetricStatInitParameters) DeepCopy() *MetricStatInitParameters { + if in == nil { + return nil + } + out := new(MetricStatInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatObservation) DeepCopyInto(out *MetricStatObservation) { + *out = *in + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricObservation) + (*in).DeepCopyInto(*out) + } + if in.Stat != nil { + in, out := &in.Stat, &out.Stat + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatObservation. +func (in *MetricStatObservation) DeepCopy() *MetricStatObservation { + if in == nil { + return nil + } + out := new(MetricStatObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatParameters) DeepCopyInto(out *MetricStatParameters) { + *out = *in + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricParameters) + (*in).DeepCopyInto(*out) + } + if in.Stat != nil { + in, out := &in.Stat, &out.Stat + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatParameters. +func (in *MetricStatParameters) DeepCopy() *MetricStatParameters { + if in == nil { + return nil + } + out := new(MetricStatParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsInitParameters) DeepCopyInto(out *MetricsInitParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.MetricStat != nil { + in, out := &in.MetricStat, &out.MetricStat + *out = new(MetricStatInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsInitParameters. +func (in *MetricsInitParameters) DeepCopy() *MetricsInitParameters { + if in == nil { + return nil + } + out := new(MetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsObservation) DeepCopyInto(out *MetricsObservation) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.MetricStat != nil { + in, out := &in.MetricStat, &out.MetricStat + *out = new(MetricStatObservation) + (*in).DeepCopyInto(*out) + } + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsObservation. +func (in *MetricsObservation) DeepCopy() *MetricsObservation { + if in == nil { + return nil + } + out := new(MetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsParameters) DeepCopyInto(out *MetricsParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.MetricStat != nil { + in, out := &in.MetricStat, &out.MetricStat + *out = new(MetricStatParameters) + (*in).DeepCopyInto(*out) + } + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsParameters. +func (in *MetricsParameters) DeepCopy() *MetricsParameters { + if in == nil { + return nil + } + out := new(MetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Policy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyInitParameters) DeepCopyInto(out *PolicyInitParameters) { + *out = *in + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } + if in.StepScalingPolicyConfiguration != nil { + in, out := &in.StepScalingPolicyConfiguration, &out.StepScalingPolicyConfiguration + *out = new(StepScalingPolicyConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetTrackingScalingPolicyConfiguration != nil { + in, out := &in.TargetTrackingScalingPolicyConfiguration, &out.TargetTrackingScalingPolicyConfiguration + *out = new(TargetTrackingScalingPolicyConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyInitParameters. +func (in *PolicyInitParameters) DeepCopy() *PolicyInitParameters { + if in == nil { + return nil + } + out := new(PolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyList) DeepCopyInto(out *PolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Policy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList. +func (in *PolicyList) DeepCopy() *PolicyList { + if in == nil { + return nil + } + out := new(PolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyObservation) DeepCopyInto(out *PolicyObservation) { + *out = *in + if in.AlarmArns != nil { + in, out := &in.AlarmArns, &out.AlarmArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ScalableDimension != nil { + in, out := &in.ScalableDimension, &out.ScalableDimension + *out = new(string) + **out = **in + } + if in.ServiceNamespace != nil { + in, out := &in.ServiceNamespace, &out.ServiceNamespace + *out = new(string) + **out = **in + } + if in.StepScalingPolicyConfiguration != nil { + in, out := &in.StepScalingPolicyConfiguration, &out.StepScalingPolicyConfiguration + *out = new(StepScalingPolicyConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetTrackingScalingPolicyConfiguration != nil { + in, out := &in.TargetTrackingScalingPolicyConfiguration, &out.TargetTrackingScalingPolicyConfiguration + *out = new(TargetTrackingScalingPolicyConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyObservation. +func (in *PolicyObservation) DeepCopy() *PolicyObservation { + if in == nil { + return nil + } + out := new(PolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyParameters) DeepCopyInto(out *PolicyParameters) { + *out = *in + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceIDRef != nil { + in, out := &in.ResourceIDRef, &out.ResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceIDSelector != nil { + in, out := &in.ResourceIDSelector, &out.ResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ScalableDimension != nil { + in, out := &in.ScalableDimension, &out.ScalableDimension + *out = new(string) + **out = **in + } + if in.ScalableDimensionRef != nil { + in, out := &in.ScalableDimensionRef, &out.ScalableDimensionRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ScalableDimensionSelector != nil { + in, out := &in.ScalableDimensionSelector, &out.ScalableDimensionSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceNamespace != nil { + in, out := &in.ServiceNamespace, &out.ServiceNamespace + *out = new(string) + **out = **in + } + if in.ServiceNamespaceRef != nil { + in, out := &in.ServiceNamespaceRef, &out.ServiceNamespaceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceNamespaceSelector != nil { + in, out := &in.ServiceNamespaceSelector, &out.ServiceNamespaceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StepScalingPolicyConfiguration != nil { + in, out := &in.StepScalingPolicyConfiguration, &out.StepScalingPolicyConfiguration + *out = new(StepScalingPolicyConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetTrackingScalingPolicyConfiguration != nil { + in, out := &in.TargetTrackingScalingPolicyConfiguration, &out.TargetTrackingScalingPolicyConfiguration + *out = new(TargetTrackingScalingPolicyConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyParameters. +func (in *PolicyParameters) DeepCopy() *PolicyParameters { + if in == nil { + return nil + } + out := new(PolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicySpec) DeepCopyInto(out *PolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicySpec. +func (in *PolicySpec) DeepCopy() *PolicySpec { + if in == nil { + return nil + } + out := new(PolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyStatus) DeepCopyInto(out *PolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyStatus. +func (in *PolicyStatus) DeepCopy() *PolicyStatus { + if in == nil { + return nil + } + out := new(PolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredefinedMetricSpecificationInitParameters) DeepCopyInto(out *PredefinedMetricSpecificationInitParameters) { + *out = *in + if in.PredefinedMetricType != nil { + in, out := &in.PredefinedMetricType, &out.PredefinedMetricType + *out = new(string) + **out = **in + } + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedMetricSpecificationInitParameters. +func (in *PredefinedMetricSpecificationInitParameters) DeepCopy() *PredefinedMetricSpecificationInitParameters { + if in == nil { + return nil + } + out := new(PredefinedMetricSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredefinedMetricSpecificationObservation) DeepCopyInto(out *PredefinedMetricSpecificationObservation) { + *out = *in + if in.PredefinedMetricType != nil { + in, out := &in.PredefinedMetricType, &out.PredefinedMetricType + *out = new(string) + **out = **in + } + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedMetricSpecificationObservation. +func (in *PredefinedMetricSpecificationObservation) DeepCopy() *PredefinedMetricSpecificationObservation { + if in == nil { + return nil + } + out := new(PredefinedMetricSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredefinedMetricSpecificationParameters) DeepCopyInto(out *PredefinedMetricSpecificationParameters) { + *out = *in + if in.PredefinedMetricType != nil { + in, out := &in.PredefinedMetricType, &out.PredefinedMetricType + *out = new(string) + **out = **in + } + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedMetricSpecificationParameters. +func (in *PredefinedMetricSpecificationParameters) DeepCopy() *PredefinedMetricSpecificationParameters { + if in == nil { + return nil + } + out := new(PredefinedMetricSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalableTargetActionInitParameters) DeepCopyInto(out *ScalableTargetActionInitParameters) { + *out = *in + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(string) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalableTargetActionInitParameters. +func (in *ScalableTargetActionInitParameters) DeepCopy() *ScalableTargetActionInitParameters { + if in == nil { + return nil + } + out := new(ScalableTargetActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalableTargetActionObservation) DeepCopyInto(out *ScalableTargetActionObservation) { + *out = *in + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(string) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalableTargetActionObservation. +func (in *ScalableTargetActionObservation) DeepCopy() *ScalableTargetActionObservation { + if in == nil { + return nil + } + out := new(ScalableTargetActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalableTargetActionParameters) DeepCopyInto(out *ScalableTargetActionParameters) { + *out = *in + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(string) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalableTargetActionParameters. +func (in *ScalableTargetActionParameters) DeepCopy() *ScalableTargetActionParameters { + if in == nil { + return nil + } + out := new(ScalableTargetActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledAction) DeepCopyInto(out *ScheduledAction) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledAction. +func (in *ScheduledAction) DeepCopy() *ScheduledAction { + if in == nil { + return nil + } + out := new(ScheduledAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScheduledAction) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledActionInitParameters) DeepCopyInto(out *ScheduledActionInitParameters) { + *out = *in + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceIDRef != nil { + in, out := &in.ResourceIDRef, &out.ResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceIDSelector != nil { + in, out := &in.ResourceIDSelector, &out.ResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ScalableDimension != nil { + in, out := &in.ScalableDimension, &out.ScalableDimension + *out = new(string) + **out = **in + } + if in.ScalableDimensionRef != nil { + in, out := &in.ScalableDimensionRef, &out.ScalableDimensionRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ScalableDimensionSelector != nil { + in, out := &in.ScalableDimensionSelector, &out.ScalableDimensionSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ScalableTargetAction != nil { + in, out := &in.ScalableTargetAction, &out.ScalableTargetAction + *out = new(ScalableTargetActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.ServiceNamespace != nil { + in, out := &in.ServiceNamespace, &out.ServiceNamespace + *out = new(string) + **out = **in + } + if in.ServiceNamespaceRef != nil { + in, out := &in.ServiceNamespaceRef, &out.ServiceNamespaceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceNamespaceSelector != nil { + in, out := &in.ServiceNamespaceSelector, &out.ServiceNamespaceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledActionInitParameters. +func (in *ScheduledActionInitParameters) DeepCopy() *ScheduledActionInitParameters { + if in == nil { + return nil + } + out := new(ScheduledActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledActionList) DeepCopyInto(out *ScheduledActionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ScheduledAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledActionList. +func (in *ScheduledActionList) DeepCopy() *ScheduledActionList { + if in == nil { + return nil + } + out := new(ScheduledActionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScheduledActionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledActionObservation) DeepCopyInto(out *ScheduledActionObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ScalableDimension != nil { + in, out := &in.ScalableDimension, &out.ScalableDimension + *out = new(string) + **out = **in + } + if in.ScalableTargetAction != nil { + in, out := &in.ScalableTargetAction, &out.ScalableTargetAction + *out = new(ScalableTargetActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.ServiceNamespace != nil { + in, out := &in.ServiceNamespace, &out.ServiceNamespace + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledActionObservation. +func (in *ScheduledActionObservation) DeepCopy() *ScheduledActionObservation { + if in == nil { + return nil + } + out := new(ScheduledActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledActionParameters) DeepCopyInto(out *ScheduledActionParameters) { + *out = *in + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceIDRef != nil { + in, out := &in.ResourceIDRef, &out.ResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceIDSelector != nil { + in, out := &in.ResourceIDSelector, &out.ResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ScalableDimension != nil { + in, out := &in.ScalableDimension, &out.ScalableDimension + *out = new(string) + **out = **in + } + if in.ScalableDimensionRef != nil { + in, out := &in.ScalableDimensionRef, &out.ScalableDimensionRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ScalableDimensionSelector != nil { + in, out := &in.ScalableDimensionSelector, &out.ScalableDimensionSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ScalableTargetAction != nil { + in, out := &in.ScalableTargetAction, &out.ScalableTargetAction + *out = new(ScalableTargetActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.ServiceNamespace != nil { + in, out := &in.ServiceNamespace, &out.ServiceNamespace + *out = new(string) + **out = **in + } + if in.ServiceNamespaceRef != nil { + in, out := &in.ServiceNamespaceRef, &out.ServiceNamespaceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceNamespaceSelector != nil { + in, out := &in.ServiceNamespaceSelector, &out.ServiceNamespaceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledActionParameters. +func (in *ScheduledActionParameters) DeepCopy() *ScheduledActionParameters { + if in == nil { + return nil + } + out := new(ScheduledActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledActionSpec) DeepCopyInto(out *ScheduledActionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledActionSpec. +func (in *ScheduledActionSpec) DeepCopy() *ScheduledActionSpec { + if in == nil { + return nil + } + out := new(ScheduledActionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledActionStatus) DeepCopyInto(out *ScheduledActionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledActionStatus. +func (in *ScheduledActionStatus) DeepCopy() *ScheduledActionStatus { + if in == nil { + return nil + } + out := new(ScheduledActionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepAdjustmentInitParameters) DeepCopyInto(out *StepAdjustmentInitParameters) { + *out = *in + if in.MetricIntervalLowerBound != nil { + in, out := &in.MetricIntervalLowerBound, &out.MetricIntervalLowerBound + *out = new(string) + **out = **in + } + if in.MetricIntervalUpperBound != nil { + in, out := &in.MetricIntervalUpperBound, &out.MetricIntervalUpperBound + *out = new(string) + **out = **in + } + if in.ScalingAdjustment != nil { + in, out := &in.ScalingAdjustment, &out.ScalingAdjustment + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepAdjustmentInitParameters. +func (in *StepAdjustmentInitParameters) DeepCopy() *StepAdjustmentInitParameters { + if in == nil { + return nil + } + out := new(StepAdjustmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepAdjustmentObservation) DeepCopyInto(out *StepAdjustmentObservation) { + *out = *in + if in.MetricIntervalLowerBound != nil { + in, out := &in.MetricIntervalLowerBound, &out.MetricIntervalLowerBound + *out = new(string) + **out = **in + } + if in.MetricIntervalUpperBound != nil { + in, out := &in.MetricIntervalUpperBound, &out.MetricIntervalUpperBound + *out = new(string) + **out = **in + } + if in.ScalingAdjustment != nil { + in, out := &in.ScalingAdjustment, &out.ScalingAdjustment + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepAdjustmentObservation. +func (in *StepAdjustmentObservation) DeepCopy() *StepAdjustmentObservation { + if in == nil { + return nil + } + out := new(StepAdjustmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepAdjustmentParameters) DeepCopyInto(out *StepAdjustmentParameters) { + *out = *in + if in.MetricIntervalLowerBound != nil { + in, out := &in.MetricIntervalLowerBound, &out.MetricIntervalLowerBound + *out = new(string) + **out = **in + } + if in.MetricIntervalUpperBound != nil { + in, out := &in.MetricIntervalUpperBound, &out.MetricIntervalUpperBound + *out = new(string) + **out = **in + } + if in.ScalingAdjustment != nil { + in, out := &in.ScalingAdjustment, &out.ScalingAdjustment + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepAdjustmentParameters. +func (in *StepAdjustmentParameters) DeepCopy() *StepAdjustmentParameters { + if in == nil { + return nil + } + out := new(StepAdjustmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepScalingPolicyConfigurationInitParameters) DeepCopyInto(out *StepScalingPolicyConfigurationInitParameters) { + *out = *in + if in.AdjustmentType != nil { + in, out := &in.AdjustmentType, &out.AdjustmentType + *out = new(string) + **out = **in + } + if in.Cooldown != nil { + in, out := &in.Cooldown, &out.Cooldown + *out = new(float64) + **out = **in + } + if in.MetricAggregationType != nil { + in, out := &in.MetricAggregationType, &out.MetricAggregationType + *out = new(string) + **out = **in + } + if in.MinAdjustmentMagnitude != nil { + in, out := &in.MinAdjustmentMagnitude, &out.MinAdjustmentMagnitude + *out = new(float64) + **out = **in + } + if in.StepAdjustment != nil { + in, out := &in.StepAdjustment, &out.StepAdjustment + *out = make([]StepAdjustmentInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepScalingPolicyConfigurationInitParameters. +func (in *StepScalingPolicyConfigurationInitParameters) DeepCopy() *StepScalingPolicyConfigurationInitParameters { + if in == nil { + return nil + } + out := new(StepScalingPolicyConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepScalingPolicyConfigurationObservation) DeepCopyInto(out *StepScalingPolicyConfigurationObservation) { + *out = *in + if in.AdjustmentType != nil { + in, out := &in.AdjustmentType, &out.AdjustmentType + *out = new(string) + **out = **in + } + if in.Cooldown != nil { + in, out := &in.Cooldown, &out.Cooldown + *out = new(float64) + **out = **in + } + if in.MetricAggregationType != nil { + in, out := &in.MetricAggregationType, &out.MetricAggregationType + *out = new(string) + **out = **in + } + if in.MinAdjustmentMagnitude != nil { + in, out := &in.MinAdjustmentMagnitude, &out.MinAdjustmentMagnitude + *out = new(float64) + **out = **in + } + if in.StepAdjustment != nil { + in, out := &in.StepAdjustment, &out.StepAdjustment + *out = make([]StepAdjustmentObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepScalingPolicyConfigurationObservation. +func (in *StepScalingPolicyConfigurationObservation) DeepCopy() *StepScalingPolicyConfigurationObservation { + if in == nil { + return nil + } + out := new(StepScalingPolicyConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepScalingPolicyConfigurationParameters) DeepCopyInto(out *StepScalingPolicyConfigurationParameters) { + *out = *in + if in.AdjustmentType != nil { + in, out := &in.AdjustmentType, &out.AdjustmentType + *out = new(string) + **out = **in + } + if in.Cooldown != nil { + in, out := &in.Cooldown, &out.Cooldown + *out = new(float64) + **out = **in + } + if in.MetricAggregationType != nil { + in, out := &in.MetricAggregationType, &out.MetricAggregationType + *out = new(string) + **out = **in + } + if in.MinAdjustmentMagnitude != nil { + in, out := &in.MinAdjustmentMagnitude, &out.MinAdjustmentMagnitude + *out = new(float64) + **out = **in + } + if in.StepAdjustment != nil { + in, out := &in.StepAdjustment, &out.StepAdjustment + *out = make([]StepAdjustmentParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepScalingPolicyConfigurationParameters. +func (in *StepScalingPolicyConfigurationParameters) DeepCopy() *StepScalingPolicyConfigurationParameters { + if in == nil { + return nil + } + out := new(StepScalingPolicyConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetTrackingScalingPolicyConfigurationInitParameters) DeepCopyInto(out *TargetTrackingScalingPolicyConfigurationInitParameters) { + *out = *in + if in.CustomizedMetricSpecification != nil { + in, out := &in.CustomizedMetricSpecification, &out.CustomizedMetricSpecification + *out = new(CustomizedMetricSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DisableScaleIn != nil { + in, out := &in.DisableScaleIn, &out.DisableScaleIn + *out = new(bool) + **out = **in + } + if in.PredefinedMetricSpecification != nil { + in, out := &in.PredefinedMetricSpecification, &out.PredefinedMetricSpecification + *out = new(PredefinedMetricSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ScaleInCooldown != nil { + in, out := &in.ScaleInCooldown, &out.ScaleInCooldown + *out = new(float64) + **out = **in + } + if in.ScaleOutCooldown != nil { + in, out := &in.ScaleOutCooldown, &out.ScaleOutCooldown + *out = new(float64) + **out = **in + } + if in.TargetValue != nil { + in, out := &in.TargetValue, &out.TargetValue + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetTrackingScalingPolicyConfigurationInitParameters. +func (in *TargetTrackingScalingPolicyConfigurationInitParameters) DeepCopy() *TargetTrackingScalingPolicyConfigurationInitParameters { + if in == nil { + return nil + } + out := new(TargetTrackingScalingPolicyConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetTrackingScalingPolicyConfigurationObservation) DeepCopyInto(out *TargetTrackingScalingPolicyConfigurationObservation) { + *out = *in + if in.CustomizedMetricSpecification != nil { + in, out := &in.CustomizedMetricSpecification, &out.CustomizedMetricSpecification + *out = new(CustomizedMetricSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.DisableScaleIn != nil { + in, out := &in.DisableScaleIn, &out.DisableScaleIn + *out = new(bool) + **out = **in + } + if in.PredefinedMetricSpecification != nil { + in, out := &in.PredefinedMetricSpecification, &out.PredefinedMetricSpecification + *out = new(PredefinedMetricSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.ScaleInCooldown != nil { + in, out := &in.ScaleInCooldown, &out.ScaleInCooldown + *out = new(float64) + **out = **in + } + if in.ScaleOutCooldown != nil { + in, out := &in.ScaleOutCooldown, &out.ScaleOutCooldown + *out = new(float64) + **out = **in + } + if in.TargetValue != nil { + in, out := &in.TargetValue, &out.TargetValue + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetTrackingScalingPolicyConfigurationObservation. +func (in *TargetTrackingScalingPolicyConfigurationObservation) DeepCopy() *TargetTrackingScalingPolicyConfigurationObservation { + if in == nil { + return nil + } + out := new(TargetTrackingScalingPolicyConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetTrackingScalingPolicyConfigurationParameters) DeepCopyInto(out *TargetTrackingScalingPolicyConfigurationParameters) { + *out = *in + if in.CustomizedMetricSpecification != nil { + in, out := &in.CustomizedMetricSpecification, &out.CustomizedMetricSpecification + *out = new(CustomizedMetricSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.DisableScaleIn != nil { + in, out := &in.DisableScaleIn, &out.DisableScaleIn + *out = new(bool) + **out = **in + } + if in.PredefinedMetricSpecification != nil { + in, out := &in.PredefinedMetricSpecification, &out.PredefinedMetricSpecification + *out = new(PredefinedMetricSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.ScaleInCooldown != nil { + in, out := &in.ScaleInCooldown, &out.ScaleInCooldown + *out = new(float64) + **out = **in + } + if in.ScaleOutCooldown != nil { + in, out := &in.ScaleOutCooldown, &out.ScaleOutCooldown + *out = new(float64) + **out = **in + } + if in.TargetValue != nil { + in, out := &in.TargetValue, &out.TargetValue + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetTrackingScalingPolicyConfigurationParameters. +func (in *TargetTrackingScalingPolicyConfigurationParameters) DeepCopy() *TargetTrackingScalingPolicyConfigurationParameters { + if in == nil { + return nil + } + out := new(TargetTrackingScalingPolicyConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/appautoscaling/v1beta2/zz_generated.managed.go b/apis/appautoscaling/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..43b59128b5 --- /dev/null +++ b/apis/appautoscaling/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Policy. +func (mg *Policy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Policy. +func (mg *Policy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Policy. +func (mg *Policy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Policy. +func (mg *Policy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Policy. +func (mg *Policy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Policy. +func (mg *Policy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Policy. +func (mg *Policy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Policy. +func (mg *Policy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Policy. +func (mg *Policy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Policy. +func (mg *Policy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Policy. +func (mg *Policy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Policy. +func (mg *Policy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ScheduledAction. +func (mg *ScheduledAction) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ScheduledAction. +func (mg *ScheduledAction) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ScheduledAction. +func (mg *ScheduledAction) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ScheduledAction. +func (mg *ScheduledAction) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ScheduledAction. +func (mg *ScheduledAction) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ScheduledAction. +func (mg *ScheduledAction) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ScheduledAction. +func (mg *ScheduledAction) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ScheduledAction. +func (mg *ScheduledAction) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ScheduledAction. +func (mg *ScheduledAction) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ScheduledAction. +func (mg *ScheduledAction) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ScheduledAction. +func (mg *ScheduledAction) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ScheduledAction. +func (mg *ScheduledAction) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/appautoscaling/v1beta2/zz_generated.managedlist.go b/apis/appautoscaling/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..f4173f3c6c --- /dev/null +++ b/apis/appautoscaling/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this PolicyList. +func (l *PolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ScheduledActionList. +func (l *ScheduledActionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/appautoscaling/v1beta2/zz_generated.resolvers.go b/apis/appautoscaling/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..70388499f0 --- /dev/null +++ b/apis/appautoscaling/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,214 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Policy. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Policy) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appautoscaling.aws.upbound.io", "v1beta1", "Target", "TargetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceID), + Extract: resource.ExtractParamPath("resource_id", false), + Reference: mg.Spec.ForProvider.ResourceIDRef, + Selector: mg.Spec.ForProvider.ResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceID") + } + mg.Spec.ForProvider.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appautoscaling.aws.upbound.io", "v1beta1", "Target", "TargetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ScalableDimension), + Extract: resource.ExtractParamPath("scalable_dimension", false), + Reference: mg.Spec.ForProvider.ScalableDimensionRef, + Selector: mg.Spec.ForProvider.ScalableDimensionSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ScalableDimension") + } + mg.Spec.ForProvider.ScalableDimension = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ScalableDimensionRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appautoscaling.aws.upbound.io", "v1beta1", "Target", "TargetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceNamespace), + Extract: resource.ExtractParamPath("service_namespace", false), + Reference: mg.Spec.ForProvider.ServiceNamespaceRef, + Selector: mg.Spec.ForProvider.ServiceNamespaceSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceNamespace") + } + mg.Spec.ForProvider.ServiceNamespace = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceNamespaceRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ScheduledAction. +func (mg *ScheduledAction) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appautoscaling.aws.upbound.io", "v1beta1", "Target", "TargetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceID), + Extract: resource.ExtractParamPath("resource_id", false), + Reference: mg.Spec.ForProvider.ResourceIDRef, + Selector: mg.Spec.ForProvider.ResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceID") + } + mg.Spec.ForProvider.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appautoscaling.aws.upbound.io", "v1beta1", "Target", "TargetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ScalableDimension), + Extract: resource.ExtractParamPath("scalable_dimension", false), + Reference: mg.Spec.ForProvider.ScalableDimensionRef, + Selector: mg.Spec.ForProvider.ScalableDimensionSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ScalableDimension") + } + mg.Spec.ForProvider.ScalableDimension = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ScalableDimensionRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appautoscaling.aws.upbound.io", "v1beta1", "Target", "TargetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceNamespace), + Extract: resource.ExtractParamPath("service_namespace", false), + Reference: mg.Spec.ForProvider.ServiceNamespaceRef, + Selector: mg.Spec.ForProvider.ServiceNamespaceSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceNamespace") + } + mg.Spec.ForProvider.ServiceNamespace = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceNamespaceRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appautoscaling.aws.upbound.io", "v1beta1", "Target", "TargetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceID), + Extract: resource.ExtractParamPath("resource_id", false), + Reference: mg.Spec.InitProvider.ResourceIDRef, + Selector: mg.Spec.InitProvider.ResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceID") + } + mg.Spec.InitProvider.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appautoscaling.aws.upbound.io", "v1beta1", "Target", "TargetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ScalableDimension), + Extract: resource.ExtractParamPath("scalable_dimension", false), + Reference: mg.Spec.InitProvider.ScalableDimensionRef, + Selector: mg.Spec.InitProvider.ScalableDimensionSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ScalableDimension") + } + mg.Spec.InitProvider.ScalableDimension = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ScalableDimensionRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appautoscaling.aws.upbound.io", "v1beta1", "Target", "TargetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceNamespace), + Extract: resource.ExtractParamPath("service_namespace", false), + Reference: mg.Spec.InitProvider.ServiceNamespaceRef, + Selector: mg.Spec.InitProvider.ServiceNamespaceSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceNamespace") + } + mg.Spec.InitProvider.ServiceNamespace = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceNamespaceRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/appautoscaling/v1beta2/zz_groupversion_info.go b/apis/appautoscaling/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..3facbc0168 --- /dev/null +++ b/apis/appautoscaling/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=appautoscaling.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "appautoscaling.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/appautoscaling/v1beta2/zz_policy_terraformed.go b/apis/appautoscaling/v1beta2/zz_policy_terraformed.go new file mode 100755 index 0000000000..333091dcac --- /dev/null +++ b/apis/appautoscaling/v1beta2/zz_policy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Policy +func (mg *Policy) GetTerraformResourceType() string { + return "aws_appautoscaling_policy" +} + +// GetConnectionDetailsMapping for this Policy +func (tr *Policy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Policy +func (tr *Policy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Policy +func (tr *Policy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Policy +func (tr *Policy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Policy +func (tr *Policy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Policy +func (tr *Policy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Policy +func (tr *Policy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Policy +func (tr *Policy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Policy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Policy) LateInitialize(attrs []byte) (bool, error) { + params := &PolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Policy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appautoscaling/v1beta2/zz_policy_types.go b/apis/appautoscaling/v1beta2/zz_policy_types.go new file mode 100755 index 0000000000..e0638f0fd9 --- /dev/null +++ b/apis/appautoscaling/v1beta2/zz_policy_types.go @@ -0,0 +1,638 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomizedMetricSpecificationInitParameters struct { + + // Configuration block(s) with the dimensions of the metric if the metric was published with dimensions. Detailed below. + Dimensions []DimensionsInitParameters `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Metrics to include, as a metric data query. + Metrics []MetricsInitParameters `json:"metrics,omitempty" tf:"metrics,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // Statistic of the metric. Valid values: Average, Minimum, Maximum, SampleCount, and Sum. + Statistic *string `json:"statistic,omitempty" tf:"statistic,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type CustomizedMetricSpecificationObservation struct { + + // Configuration block(s) with the dimensions of the metric if the metric was published with dimensions. Detailed below. + Dimensions []DimensionsObservation `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Metrics to include, as a metric data query. + Metrics []MetricsObservation `json:"metrics,omitempty" tf:"metrics,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // Statistic of the metric. Valid values: Average, Minimum, Maximum, SampleCount, and Sum. + Statistic *string `json:"statistic,omitempty" tf:"statistic,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type CustomizedMetricSpecificationParameters struct { + + // Configuration block(s) with the dimensions of the metric if the metric was published with dimensions. Detailed below. + // +kubebuilder:validation:Optional + Dimensions []DimensionsParameters `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Metrics to include, as a metric data query. + // +kubebuilder:validation:Optional + Metrics []MetricsParameters `json:"metrics,omitempty" tf:"metrics,omitempty"` + + // Namespace of the metric. + // +kubebuilder:validation:Optional + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // Statistic of the metric. Valid values: Average, Minimum, Maximum, SampleCount, and Sum. + // +kubebuilder:validation:Optional + Statistic *string `json:"statistic,omitempty" tf:"statistic,omitempty"` + + // Unit of the metric. + // +kubebuilder:validation:Optional + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type DimensionsInitParameters struct { + + // Name of the dimension. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of the dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DimensionsObservation struct { + + // Name of the dimension. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of the dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DimensionsParameters struct { + + // Name of the dimension. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Value of the dimension. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type MetricDimensionsInitParameters struct { + + // Name of the dimension. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of the dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MetricDimensionsObservation struct { + + // Name of the dimension. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of the dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MetricDimensionsParameters struct { + + // Name of the dimension. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Value of the dimension. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type MetricInitParameters struct { + + // Configuration block(s) with the dimensions of the metric if the metric was published with dimensions. Detailed below. + Dimensions []MetricDimensionsInitParameters `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` +} + +type MetricObservation struct { + + // Configuration block(s) with the dimensions of the metric if the metric was published with dimensions. Detailed below. + Dimensions []MetricDimensionsObservation `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` +} + +type MetricParameters struct { + + // Configuration block(s) with the dimensions of the metric if the metric was published with dimensions. Detailed below. + // +kubebuilder:validation:Optional + Dimensions []MetricDimensionsParameters `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName" tf:"metric_name,omitempty"` + + // Namespace of the metric. + // +kubebuilder:validation:Optional + Namespace *string `json:"namespace" tf:"namespace,omitempty"` +} + +type MetricStatInitParameters struct { + + // Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. + Metric *MetricInitParameters `json:"metric,omitempty" tf:"metric,omitempty"` + + // Statistic of the metrics to return. + Stat *string `json:"stat,omitempty" tf:"stat,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricStatObservation struct { + + // Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. + Metric *MetricObservation `json:"metric,omitempty" tf:"metric,omitempty"` + + // Statistic of the metrics to return. + Stat *string `json:"stat,omitempty" tf:"stat,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricStatParameters struct { + + // Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. + // +kubebuilder:validation:Optional + Metric *MetricParameters `json:"metric" tf:"metric,omitempty"` + + // Statistic of the metrics to return. + // +kubebuilder:validation:Optional + Stat *string `json:"stat" tf:"stat,omitempty"` + + // Unit of the metric. + // +kubebuilder:validation:Optional + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricsInitParameters struct { + + // Math expression used on the returned metric. You must specify either expression or metric_stat, but not both. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Short name for the metric used in target tracking scaling policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Human-readable label for this metric or expression. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Structure that defines CloudWatch metric to be used in target tracking scaling policy. You must specify either expression or metric_stat, but not both. + MetricStat *MetricStatInitParameters `json:"metricStat,omitempty" tf:"metric_stat,omitempty"` + + // Boolean that indicates whether to return the timestamps and raw data values of this metric, the default is true + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +type MetricsObservation struct { + + // Math expression used on the returned metric. You must specify either expression or metric_stat, but not both. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Short name for the metric used in target tracking scaling policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Human-readable label for this metric or expression. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Structure that defines CloudWatch metric to be used in target tracking scaling policy. You must specify either expression or metric_stat, but not both. + MetricStat *MetricStatObservation `json:"metricStat,omitempty" tf:"metric_stat,omitempty"` + + // Boolean that indicates whether to return the timestamps and raw data values of this metric, the default is true + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +type MetricsParameters struct { + + // Math expression used on the returned metric. You must specify either expression or metric_stat, but not both. + // +kubebuilder:validation:Optional + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Short name for the metric used in target tracking scaling policy. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // Human-readable label for this metric or expression. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Structure that defines CloudWatch metric to be used in target tracking scaling policy. You must specify either expression or metric_stat, but not both. + // +kubebuilder:validation:Optional + MetricStat *MetricStatParameters `json:"metricStat,omitempty" tf:"metric_stat,omitempty"` + + // Boolean that indicates whether to return the timestamps and raw data values of this metric, the default is true + // +kubebuilder:validation:Optional + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +type PolicyInitParameters struct { + + // Policy type. Valid values are StepScaling and TargetTrackingScaling. Defaults to StepScaling. Certain services only support only one policy type. For more information see the Target Tracking Scaling Policies and Step Scaling Policies documentation. + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` + + // Step scaling policy configuration, requires policy_type = "StepScaling" (default). See supported fields below. + StepScalingPolicyConfiguration *StepScalingPolicyConfigurationInitParameters `json:"stepScalingPolicyConfiguration,omitempty" tf:"step_scaling_policy_configuration,omitempty"` + + // Target tracking policy, requires policy_type = "TargetTrackingScaling". See supported fields below. + TargetTrackingScalingPolicyConfiguration *TargetTrackingScalingPolicyConfigurationInitParameters `json:"targetTrackingScalingPolicyConfiguration,omitempty" tf:"target_tracking_scaling_policy_configuration,omitempty"` +} + +type PolicyObservation struct { + + // List of CloudWatch alarm ARNs associated with the scaling policy. + AlarmArns []*string `json:"alarmArns,omitempty" tf:"alarm_arns,omitempty"` + + // ARN assigned by AWS to the scaling policy. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Short name for the metric used in target tracking scaling policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Policy type. Valid values are StepScaling and TargetTrackingScaling. Defaults to StepScaling. Certain services only support only one policy type. For more information see the Target Tracking Scaling Policies and Step Scaling Policies documentation. + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` + + // Resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the ResourceId parameter at: AWS Application Auto Scaling API Reference + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Scalable dimension of the scalable target. Documentation can be found in the ScalableDimension parameter at: AWS Application Auto Scaling API Reference + ScalableDimension *string `json:"scalableDimension,omitempty" tf:"scalable_dimension,omitempty"` + + // AWS service namespace of the scalable target. Documentation can be found in the ServiceNamespace parameter at: AWS Application Auto Scaling API Reference + ServiceNamespace *string `json:"serviceNamespace,omitempty" tf:"service_namespace,omitempty"` + + // Step scaling policy configuration, requires policy_type = "StepScaling" (default). See supported fields below. + StepScalingPolicyConfiguration *StepScalingPolicyConfigurationObservation `json:"stepScalingPolicyConfiguration,omitempty" tf:"step_scaling_policy_configuration,omitempty"` + + // Target tracking policy, requires policy_type = "TargetTrackingScaling". See supported fields below. + TargetTrackingScalingPolicyConfiguration *TargetTrackingScalingPolicyConfigurationObservation `json:"targetTrackingScalingPolicyConfiguration,omitempty" tf:"target_tracking_scaling_policy_configuration,omitempty"` +} + +type PolicyParameters struct { + + // Policy type. Valid values are StepScaling and TargetTrackingScaling. Defaults to StepScaling. Certain services only support only one policy type. For more information see the Target Tracking Scaling Policies and Step Scaling Policies documentation. + // +kubebuilder:validation:Optional + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the ResourceId parameter at: AWS Application Auto Scaling API Reference + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appautoscaling/v1beta1.Target + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("resource_id",false) + // +kubebuilder:validation:Optional + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Reference to a Target in appautoscaling to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDRef *v1.Reference `json:"resourceIdRef,omitempty" tf:"-"` + + // Selector for a Target in appautoscaling to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDSelector *v1.Selector `json:"resourceIdSelector,omitempty" tf:"-"` + + // Scalable dimension of the scalable target. Documentation can be found in the ScalableDimension parameter at: AWS Application Auto Scaling API Reference + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appautoscaling/v1beta1.Target + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("scalable_dimension",false) + // +kubebuilder:validation:Optional + ScalableDimension *string `json:"scalableDimension,omitempty" tf:"scalable_dimension,omitempty"` + + // Reference to a Target in appautoscaling to populate scalableDimension. + // +kubebuilder:validation:Optional + ScalableDimensionRef *v1.Reference `json:"scalableDimensionRef,omitempty" tf:"-"` + + // Selector for a Target in appautoscaling to populate scalableDimension. + // +kubebuilder:validation:Optional + ScalableDimensionSelector *v1.Selector `json:"scalableDimensionSelector,omitempty" tf:"-"` + + // AWS service namespace of the scalable target. Documentation can be found in the ServiceNamespace parameter at: AWS Application Auto Scaling API Reference + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appautoscaling/v1beta1.Target + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("service_namespace",false) + // +kubebuilder:validation:Optional + ServiceNamespace *string `json:"serviceNamespace,omitempty" tf:"service_namespace,omitempty"` + + // Reference to a Target in appautoscaling to populate serviceNamespace. + // +kubebuilder:validation:Optional + ServiceNamespaceRef *v1.Reference `json:"serviceNamespaceRef,omitempty" tf:"-"` + + // Selector for a Target in appautoscaling to populate serviceNamespace. + // +kubebuilder:validation:Optional + ServiceNamespaceSelector *v1.Selector `json:"serviceNamespaceSelector,omitempty" tf:"-"` + + // Step scaling policy configuration, requires policy_type = "StepScaling" (default). See supported fields below. + // +kubebuilder:validation:Optional + StepScalingPolicyConfiguration *StepScalingPolicyConfigurationParameters `json:"stepScalingPolicyConfiguration,omitempty" tf:"step_scaling_policy_configuration,omitempty"` + + // Target tracking policy, requires policy_type = "TargetTrackingScaling". See supported fields below. + // +kubebuilder:validation:Optional + TargetTrackingScalingPolicyConfiguration *TargetTrackingScalingPolicyConfigurationParameters `json:"targetTrackingScalingPolicyConfiguration,omitempty" tf:"target_tracking_scaling_policy_configuration,omitempty"` +} + +type PredefinedMetricSpecificationInitParameters struct { + + // Metric type. + PredefinedMetricType *string `json:"predefinedMetricType,omitempty" tf:"predefined_metric_type,omitempty"` + + // Reserved for future use if the predefined_metric_type is not ALBRequestCountPerTarget. If the predefined_metric_type is ALBRequestCountPerTarget, you must specify this argument. Documentation can be found at: AWS Predefined Scaling Metric Specification. Must be less than or equal to 1023 characters in length. + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedMetricSpecificationObservation struct { + + // Metric type. + PredefinedMetricType *string `json:"predefinedMetricType,omitempty" tf:"predefined_metric_type,omitempty"` + + // Reserved for future use if the predefined_metric_type is not ALBRequestCountPerTarget. If the predefined_metric_type is ALBRequestCountPerTarget, you must specify this argument. Documentation can be found at: AWS Predefined Scaling Metric Specification. Must be less than or equal to 1023 characters in length. + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedMetricSpecificationParameters struct { + + // Metric type. + // +kubebuilder:validation:Optional + PredefinedMetricType *string `json:"predefinedMetricType" tf:"predefined_metric_type,omitempty"` + + // Reserved for future use if the predefined_metric_type is not ALBRequestCountPerTarget. If the predefined_metric_type is ALBRequestCountPerTarget, you must specify this argument. Documentation can be found at: AWS Predefined Scaling Metric Specification. Must be less than or equal to 1023 characters in length. + // +kubebuilder:validation:Optional + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type StepAdjustmentInitParameters struct { + + // Lower bound for the difference between the alarm threshold and the CloudWatch metric. Without a value, AWS will treat this bound as negative infinity. + MetricIntervalLowerBound *string `json:"metricIntervalLowerBound,omitempty" tf:"metric_interval_lower_bound,omitempty"` + + // Upper bound for the difference between the alarm threshold and the CloudWatch metric. Without a value, AWS will treat this bound as infinity. The upper bound must be greater than the lower bound. + MetricIntervalUpperBound *string `json:"metricIntervalUpperBound,omitempty" tf:"metric_interval_upper_bound,omitempty"` + + // Number of members by which to scale, when the adjustment bounds are breached. A positive value scales up. A negative value scales down. + ScalingAdjustment *float64 `json:"scalingAdjustment,omitempty" tf:"scaling_adjustment,omitempty"` +} + +type StepAdjustmentObservation struct { + + // Lower bound for the difference between the alarm threshold and the CloudWatch metric. Without a value, AWS will treat this bound as negative infinity. + MetricIntervalLowerBound *string `json:"metricIntervalLowerBound,omitempty" tf:"metric_interval_lower_bound,omitempty"` + + // Upper bound for the difference between the alarm threshold and the CloudWatch metric. Without a value, AWS will treat this bound as infinity. The upper bound must be greater than the lower bound. + MetricIntervalUpperBound *string `json:"metricIntervalUpperBound,omitempty" tf:"metric_interval_upper_bound,omitempty"` + + // Number of members by which to scale, when the adjustment bounds are breached. A positive value scales up. A negative value scales down. + ScalingAdjustment *float64 `json:"scalingAdjustment,omitempty" tf:"scaling_adjustment,omitempty"` +} + +type StepAdjustmentParameters struct { + + // Lower bound for the difference between the alarm threshold and the CloudWatch metric. Without a value, AWS will treat this bound as negative infinity. + // +kubebuilder:validation:Optional + MetricIntervalLowerBound *string `json:"metricIntervalLowerBound,omitempty" tf:"metric_interval_lower_bound,omitempty"` + + // Upper bound for the difference between the alarm threshold and the CloudWatch metric. Without a value, AWS will treat this bound as infinity. The upper bound must be greater than the lower bound. + // +kubebuilder:validation:Optional + MetricIntervalUpperBound *string `json:"metricIntervalUpperBound,omitempty" tf:"metric_interval_upper_bound,omitempty"` + + // Number of members by which to scale, when the adjustment bounds are breached. A positive value scales up. A negative value scales down. + // +kubebuilder:validation:Optional + ScalingAdjustment *float64 `json:"scalingAdjustment" tf:"scaling_adjustment,omitempty"` +} + +type StepScalingPolicyConfigurationInitParameters struct { + + // Whether the adjustment is an absolute number or a percentage of the current capacity. Valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity. + AdjustmentType *string `json:"adjustmentType,omitempty" tf:"adjustment_type,omitempty"` + + // Amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. + Cooldown *float64 `json:"cooldown,omitempty" tf:"cooldown,omitempty"` + + // Aggregation type for the policy's metrics. Valid values are "Minimum", "Maximum", and "Average". Without a value, AWS will treat the aggregation type as "Average". + MetricAggregationType *string `json:"metricAggregationType,omitempty" tf:"metric_aggregation_type,omitempty"` + + // Minimum number to adjust your scalable dimension as a result of a scaling activity. If the adjustment type is PercentChangeInCapacity, the scaling policy changes the scalable dimension of the scalable target by this amount. + MinAdjustmentMagnitude *float64 `json:"minAdjustmentMagnitude,omitempty" tf:"min_adjustment_magnitude,omitempty"` + + // Set of adjustments that manage scaling. These have the following structure: + StepAdjustment []StepAdjustmentInitParameters `json:"stepAdjustment,omitempty" tf:"step_adjustment,omitempty"` +} + +type StepScalingPolicyConfigurationObservation struct { + + // Whether the adjustment is an absolute number or a percentage of the current capacity. Valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity. + AdjustmentType *string `json:"adjustmentType,omitempty" tf:"adjustment_type,omitempty"` + + // Amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. + Cooldown *float64 `json:"cooldown,omitempty" tf:"cooldown,omitempty"` + + // Aggregation type for the policy's metrics. Valid values are "Minimum", "Maximum", and "Average". Without a value, AWS will treat the aggregation type as "Average". + MetricAggregationType *string `json:"metricAggregationType,omitempty" tf:"metric_aggregation_type,omitempty"` + + // Minimum number to adjust your scalable dimension as a result of a scaling activity. If the adjustment type is PercentChangeInCapacity, the scaling policy changes the scalable dimension of the scalable target by this amount. + MinAdjustmentMagnitude *float64 `json:"minAdjustmentMagnitude,omitempty" tf:"min_adjustment_magnitude,omitempty"` + + // Set of adjustments that manage scaling. These have the following structure: + StepAdjustment []StepAdjustmentObservation `json:"stepAdjustment,omitempty" tf:"step_adjustment,omitempty"` +} + +type StepScalingPolicyConfigurationParameters struct { + + // Whether the adjustment is an absolute number or a percentage of the current capacity. Valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity. + // +kubebuilder:validation:Optional + AdjustmentType *string `json:"adjustmentType,omitempty" tf:"adjustment_type,omitempty"` + + // Amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. + // +kubebuilder:validation:Optional + Cooldown *float64 `json:"cooldown,omitempty" tf:"cooldown,omitempty"` + + // Aggregation type for the policy's metrics. Valid values are "Minimum", "Maximum", and "Average". Without a value, AWS will treat the aggregation type as "Average". + // +kubebuilder:validation:Optional + MetricAggregationType *string `json:"metricAggregationType,omitempty" tf:"metric_aggregation_type,omitempty"` + + // Minimum number to adjust your scalable dimension as a result of a scaling activity. If the adjustment type is PercentChangeInCapacity, the scaling policy changes the scalable dimension of the scalable target by this amount. + // +kubebuilder:validation:Optional + MinAdjustmentMagnitude *float64 `json:"minAdjustmentMagnitude,omitempty" tf:"min_adjustment_magnitude,omitempty"` + + // Set of adjustments that manage scaling. These have the following structure: + // +kubebuilder:validation:Optional + StepAdjustment []StepAdjustmentParameters `json:"stepAdjustment,omitempty" tf:"step_adjustment,omitempty"` +} + +type TargetTrackingScalingPolicyConfigurationInitParameters struct { + + // Custom CloudWatch metric. Documentation can be found at: AWS Customized Metric Specification. See supported fields below. + CustomizedMetricSpecification *CustomizedMetricSpecificationInitParameters `json:"customizedMetricSpecification,omitempty" tf:"customized_metric_specification,omitempty"` + + // Whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the scalable resource. The default value is false. + DisableScaleIn *bool `json:"disableScaleIn,omitempty" tf:"disable_scale_in,omitempty"` + + // Predefined metric. See supported fields below. + PredefinedMetricSpecification *PredefinedMetricSpecificationInitParameters `json:"predefinedMetricSpecification,omitempty" tf:"predefined_metric_specification,omitempty"` + + // Amount of time, in seconds, after a scale in activity completes before another scale in activity can start. + ScaleInCooldown *float64 `json:"scaleInCooldown,omitempty" tf:"scale_in_cooldown,omitempty"` + + // Amount of time, in seconds, after a scale out activity completes before another scale out activity can start. + ScaleOutCooldown *float64 `json:"scaleOutCooldown,omitempty" tf:"scale_out_cooldown,omitempty"` + + // Target value for the metric. + TargetValue *float64 `json:"targetValue,omitempty" tf:"target_value,omitempty"` +} + +type TargetTrackingScalingPolicyConfigurationObservation struct { + + // Custom CloudWatch metric. Documentation can be found at: AWS Customized Metric Specification. See supported fields below. + CustomizedMetricSpecification *CustomizedMetricSpecificationObservation `json:"customizedMetricSpecification,omitempty" tf:"customized_metric_specification,omitempty"` + + // Whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the scalable resource. The default value is false. + DisableScaleIn *bool `json:"disableScaleIn,omitempty" tf:"disable_scale_in,omitempty"` + + // Predefined metric. See supported fields below. + PredefinedMetricSpecification *PredefinedMetricSpecificationObservation `json:"predefinedMetricSpecification,omitempty" tf:"predefined_metric_specification,omitempty"` + + // Amount of time, in seconds, after a scale in activity completes before another scale in activity can start. + ScaleInCooldown *float64 `json:"scaleInCooldown,omitempty" tf:"scale_in_cooldown,omitempty"` + + // Amount of time, in seconds, after a scale out activity completes before another scale out activity can start. + ScaleOutCooldown *float64 `json:"scaleOutCooldown,omitempty" tf:"scale_out_cooldown,omitempty"` + + // Target value for the metric. + TargetValue *float64 `json:"targetValue,omitempty" tf:"target_value,omitempty"` +} + +type TargetTrackingScalingPolicyConfigurationParameters struct { + + // Custom CloudWatch metric. Documentation can be found at: AWS Customized Metric Specification. See supported fields below. + // +kubebuilder:validation:Optional + CustomizedMetricSpecification *CustomizedMetricSpecificationParameters `json:"customizedMetricSpecification,omitempty" tf:"customized_metric_specification,omitempty"` + + // Whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the scalable resource. The default value is false. + // +kubebuilder:validation:Optional + DisableScaleIn *bool `json:"disableScaleIn,omitempty" tf:"disable_scale_in,omitempty"` + + // Predefined metric. See supported fields below. + // +kubebuilder:validation:Optional + PredefinedMetricSpecification *PredefinedMetricSpecificationParameters `json:"predefinedMetricSpecification,omitempty" tf:"predefined_metric_specification,omitempty"` + + // Amount of time, in seconds, after a scale in activity completes before another scale in activity can start. + // +kubebuilder:validation:Optional + ScaleInCooldown *float64 `json:"scaleInCooldown,omitempty" tf:"scale_in_cooldown,omitempty"` + + // Amount of time, in seconds, after a scale out activity completes before another scale out activity can start. + // +kubebuilder:validation:Optional + ScaleOutCooldown *float64 `json:"scaleOutCooldown,omitempty" tf:"scale_out_cooldown,omitempty"` + + // Target value for the metric. + // +kubebuilder:validation:Optional + TargetValue *float64 `json:"targetValue" tf:"target_value,omitempty"` +} + +// PolicySpec defines the desired state of Policy +type PolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PolicyInitParameters `json:"initProvider,omitempty"` +} + +// PolicyStatus defines the observed state of Policy. +type PolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Policy is the Schema for the Policys API. Provides an Application AutoScaling Policy resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Policy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec PolicySpec `json:"spec"` + Status PolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PolicyList contains a list of Policys +type PolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Policy `json:"items"` +} + +// Repository type metadata. +var ( + Policy_Kind = "Policy" + Policy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Policy_Kind}.String() + Policy_KindAPIVersion = Policy_Kind + "." + CRDGroupVersion.String() + Policy_GroupVersionKind = CRDGroupVersion.WithKind(Policy_Kind) +) + +func init() { + SchemeBuilder.Register(&Policy{}, &PolicyList{}) +} diff --git a/apis/appautoscaling/v1beta2/zz_scheduledaction_terraformed.go b/apis/appautoscaling/v1beta2/zz_scheduledaction_terraformed.go new file mode 100755 index 0000000000..a77f015965 --- /dev/null +++ b/apis/appautoscaling/v1beta2/zz_scheduledaction_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ScheduledAction +func (mg *ScheduledAction) GetTerraformResourceType() string { + return "aws_appautoscaling_scheduled_action" +} + +// GetConnectionDetailsMapping for this ScheduledAction +func (tr *ScheduledAction) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ScheduledAction +func (tr *ScheduledAction) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ScheduledAction +func (tr *ScheduledAction) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ScheduledAction +func (tr *ScheduledAction) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ScheduledAction +func (tr *ScheduledAction) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ScheduledAction +func (tr *ScheduledAction) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ScheduledAction +func (tr *ScheduledAction) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ScheduledAction +func (tr *ScheduledAction) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ScheduledAction using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ScheduledAction) LateInitialize(attrs []byte) (bool, error) { + params := &ScheduledActionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ScheduledAction) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appautoscaling/v1beta2/zz_scheduledaction_types.go b/apis/appautoscaling/v1beta2/zz_scheduledaction_types.go new file mode 100755 index 0000000000..0a57adc9fd --- /dev/null +++ b/apis/appautoscaling/v1beta2/zz_scheduledaction_types.go @@ -0,0 +1,275 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ScalableTargetActionInitParameters struct { + + // Maximum capacity. At least one of max_capacity or min_capacity must be set. + MaxCapacity *string `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // Minimum capacity. At least one of min_capacity or max_capacity must be set. + MinCapacity *string `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` +} + +type ScalableTargetActionObservation struct { + + // Maximum capacity. At least one of max_capacity or min_capacity must be set. + MaxCapacity *string `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // Minimum capacity. At least one of min_capacity or max_capacity must be set. + MinCapacity *string `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` +} + +type ScalableTargetActionParameters struct { + + // Maximum capacity. At least one of max_capacity or min_capacity must be set. + // +kubebuilder:validation:Optional + MaxCapacity *string `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // Minimum capacity. At least one of min_capacity or max_capacity must be set. + // +kubebuilder:validation:Optional + MinCapacity *string `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` +} + +type ScheduledActionInitParameters struct { + + // Date and time for the scheduled action to end in RFC 3339 format. The timezone is not affected by the setting of timezone. + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Name of the scheduled action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Identifier of the resource associated with the scheduled action. Documentation can be found in the ResourceId parameter at: AWS Application Auto Scaling API Reference + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appautoscaling/v1beta1.Target + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("resource_id",false) + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Reference to a Target in appautoscaling to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDRef *v1.Reference `json:"resourceIdRef,omitempty" tf:"-"` + + // Selector for a Target in appautoscaling to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDSelector *v1.Selector `json:"resourceIdSelector,omitempty" tf:"-"` + + // Scalable dimension. Documentation can be found in the ScalableDimension parameter at: AWS Application Auto Scaling API Reference Example: ecs:service:DesiredCount + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appautoscaling/v1beta1.Target + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("scalable_dimension",false) + ScalableDimension *string `json:"scalableDimension,omitempty" tf:"scalable_dimension,omitempty"` + + // Reference to a Target in appautoscaling to populate scalableDimension. + // +kubebuilder:validation:Optional + ScalableDimensionRef *v1.Reference `json:"scalableDimensionRef,omitempty" tf:"-"` + + // Selector for a Target in appautoscaling to populate scalableDimension. + // +kubebuilder:validation:Optional + ScalableDimensionSelector *v1.Selector `json:"scalableDimensionSelector,omitempty" tf:"-"` + + // New minimum and maximum capacity. You can set both values or just one. See below + ScalableTargetAction *ScalableTargetActionInitParameters `json:"scalableTargetAction,omitempty" tf:"scalable_target_action,omitempty"` + + // Schedule for this action. The following formats are supported: At expressions - at(yyyy-mm-ddThh:mm:ss), Rate expressions - rate(valueunit), Cron expressions - cron(fields). Times for at expressions and cron expressions are evaluated using the time zone configured in timezone. Documentation can be found in the Timezone parameter at: AWS Application Auto Scaling API Reference + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // Namespace of the AWS service. Documentation can be found in the ServiceNamespace parameter at: AWS Application Auto Scaling API Reference Example: ecs + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appautoscaling/v1beta1.Target + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("service_namespace",false) + ServiceNamespace *string `json:"serviceNamespace,omitempty" tf:"service_namespace,omitempty"` + + // Reference to a Target in appautoscaling to populate serviceNamespace. + // +kubebuilder:validation:Optional + ServiceNamespaceRef *v1.Reference `json:"serviceNamespaceRef,omitempty" tf:"-"` + + // Selector for a Target in appautoscaling to populate serviceNamespace. + // +kubebuilder:validation:Optional + ServiceNamespaceSelector *v1.Selector `json:"serviceNamespaceSelector,omitempty" tf:"-"` + + // Date and time for the scheduled action to start in RFC 3339 format. The timezone is not affected by the setting of timezone. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Time zone used when setting a scheduled action by using an at or cron expression. Does not affect timezone for start_time and end_time. Valid values are the canonical names of the IANA time zones supported by Joda-Time, such as Etc/GMT+9 or Pacific/Tahiti. Default is UTC. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type ScheduledActionObservation struct { + + // ARN of the scheduled action. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Date and time for the scheduled action to end in RFC 3339 format. The timezone is not affected by the setting of timezone. + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the scheduled action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Identifier of the resource associated with the scheduled action. Documentation can be found in the ResourceId parameter at: AWS Application Auto Scaling API Reference + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Scalable dimension. Documentation can be found in the ScalableDimension parameter at: AWS Application Auto Scaling API Reference Example: ecs:service:DesiredCount + ScalableDimension *string `json:"scalableDimension,omitempty" tf:"scalable_dimension,omitempty"` + + // New minimum and maximum capacity. You can set both values or just one. See below + ScalableTargetAction *ScalableTargetActionObservation `json:"scalableTargetAction,omitempty" tf:"scalable_target_action,omitempty"` + + // Schedule for this action. The following formats are supported: At expressions - at(yyyy-mm-ddThh:mm:ss), Rate expressions - rate(valueunit), Cron expressions - cron(fields). Times for at expressions and cron expressions are evaluated using the time zone configured in timezone. Documentation can be found in the Timezone parameter at: AWS Application Auto Scaling API Reference + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // Namespace of the AWS service. Documentation can be found in the ServiceNamespace parameter at: AWS Application Auto Scaling API Reference Example: ecs + ServiceNamespace *string `json:"serviceNamespace,omitempty" tf:"service_namespace,omitempty"` + + // Date and time for the scheduled action to start in RFC 3339 format. The timezone is not affected by the setting of timezone. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Time zone used when setting a scheduled action by using an at or cron expression. Does not affect timezone for start_time and end_time. Valid values are the canonical names of the IANA time zones supported by Joda-Time, such as Etc/GMT+9 or Pacific/Tahiti. Default is UTC. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type ScheduledActionParameters struct { + + // Date and time for the scheduled action to end in RFC 3339 format. The timezone is not affected by the setting of timezone. + // +kubebuilder:validation:Optional + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Name of the scheduled action. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Identifier of the resource associated with the scheduled action. Documentation can be found in the ResourceId parameter at: AWS Application Auto Scaling API Reference + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appautoscaling/v1beta1.Target + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("resource_id",false) + // +kubebuilder:validation:Optional + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Reference to a Target in appautoscaling to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDRef *v1.Reference `json:"resourceIdRef,omitempty" tf:"-"` + + // Selector for a Target in appautoscaling to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDSelector *v1.Selector `json:"resourceIdSelector,omitempty" tf:"-"` + + // Scalable dimension. Documentation can be found in the ScalableDimension parameter at: AWS Application Auto Scaling API Reference Example: ecs:service:DesiredCount + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appautoscaling/v1beta1.Target + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("scalable_dimension",false) + // +kubebuilder:validation:Optional + ScalableDimension *string `json:"scalableDimension,omitempty" tf:"scalable_dimension,omitempty"` + + // Reference to a Target in appautoscaling to populate scalableDimension. + // +kubebuilder:validation:Optional + ScalableDimensionRef *v1.Reference `json:"scalableDimensionRef,omitempty" tf:"-"` + + // Selector for a Target in appautoscaling to populate scalableDimension. + // +kubebuilder:validation:Optional + ScalableDimensionSelector *v1.Selector `json:"scalableDimensionSelector,omitempty" tf:"-"` + + // New minimum and maximum capacity. You can set both values or just one. See below + // +kubebuilder:validation:Optional + ScalableTargetAction *ScalableTargetActionParameters `json:"scalableTargetAction,omitempty" tf:"scalable_target_action,omitempty"` + + // Schedule for this action. The following formats are supported: At expressions - at(yyyy-mm-ddThh:mm:ss), Rate expressions - rate(valueunit), Cron expressions - cron(fields). Times for at expressions and cron expressions are evaluated using the time zone configured in timezone. Documentation can be found in the Timezone parameter at: AWS Application Auto Scaling API Reference + // +kubebuilder:validation:Optional + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // Namespace of the AWS service. Documentation can be found in the ServiceNamespace parameter at: AWS Application Auto Scaling API Reference Example: ecs + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appautoscaling/v1beta1.Target + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("service_namespace",false) + // +kubebuilder:validation:Optional + ServiceNamespace *string `json:"serviceNamespace,omitempty" tf:"service_namespace,omitempty"` + + // Reference to a Target in appautoscaling to populate serviceNamespace. + // +kubebuilder:validation:Optional + ServiceNamespaceRef *v1.Reference `json:"serviceNamespaceRef,omitempty" tf:"-"` + + // Selector for a Target in appautoscaling to populate serviceNamespace. + // +kubebuilder:validation:Optional + ServiceNamespaceSelector *v1.Selector `json:"serviceNamespaceSelector,omitempty" tf:"-"` + + // Date and time for the scheduled action to start in RFC 3339 format. The timezone is not affected by the setting of timezone. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Time zone used when setting a scheduled action by using an at or cron expression. Does not affect timezone for start_time and end_time. Valid values are the canonical names of the IANA time zones supported by Joda-Time, such as Etc/GMT+9 or Pacific/Tahiti. Default is UTC. + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +// ScheduledActionSpec defines the desired state of ScheduledAction +type ScheduledActionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ScheduledActionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ScheduledActionInitParameters `json:"initProvider,omitempty"` +} + +// ScheduledActionStatus defines the observed state of ScheduledAction. +type ScheduledActionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ScheduledActionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ScheduledAction is the Schema for the ScheduledActions API. Provides an Application AutoScaling ScheduledAction resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ScheduledAction struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scalableTargetAction) || (has(self.initProvider) && has(self.initProvider.scalableTargetAction))",message="spec.forProvider.scalableTargetAction is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.schedule) || (has(self.initProvider) && has(self.initProvider.schedule))",message="spec.forProvider.schedule is a required parameter" + Spec ScheduledActionSpec `json:"spec"` + Status ScheduledActionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ScheduledActionList contains a list of ScheduledActions +type ScheduledActionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ScheduledAction `json:"items"` +} + +// Repository type metadata. +var ( + ScheduledAction_Kind = "ScheduledAction" + ScheduledAction_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ScheduledAction_Kind}.String() + ScheduledAction_KindAPIVersion = ScheduledAction_Kind + "." + CRDGroupVersion.String() + ScheduledAction_GroupVersionKind = CRDGroupVersion.WithKind(ScheduledAction_Kind) +) + +func init() { + SchemeBuilder.Register(&ScheduledAction{}, &ScheduledActionList{}) +} diff --git a/apis/appconfig/v1beta1/zz_environment_types.go b/apis/appconfig/v1beta1/zz_environment_types.go index 21b5d1ab93..869a63d34b 100755 --- a/apis/appconfig/v1beta1/zz_environment_types.go +++ b/apis/appconfig/v1beta1/zz_environment_types.go @@ -120,7 +120,7 @@ type EnvironmentParameters struct { type MonitorInitParameters struct { // ARN of the Amazon CloudWatch alarm. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatch/v1beta1.MetricAlarm + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatch/v1beta2.MetricAlarm // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) AlarmArn *string `json:"alarmArn,omitempty" tf:"alarm_arn,omitempty"` @@ -158,7 +158,7 @@ type MonitorObservation struct { type MonitorParameters struct { // ARN of the Amazon CloudWatch alarm. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatch/v1beta1.MetricAlarm + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatch/v1beta2.MetricAlarm // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional AlarmArn *string `json:"alarmArn,omitempty" tf:"alarm_arn,omitempty"` diff --git a/apis/appconfig/v1beta1/zz_generated.resolvers.go b/apis/appconfig/v1beta1/zz_generated.resolvers.go index cf84742c03..453d31bcad 100644 --- a/apis/appconfig/v1beta1/zz_generated.resolvers.go +++ b/apis/appconfig/v1beta1/zz_generated.resolvers.go @@ -376,7 +376,7 @@ func (mg *Environment) ResolveReferences(ctx context.Context, c client.Reader) e for i3 := 0; i3 < len(mg.Spec.ForProvider.Monitor); i3++ { { - m, l, err = apisresolver.GetManagedResource("cloudwatch.aws.upbound.io", "v1beta1", "MetricAlarm", "MetricAlarmList") + m, l, err = apisresolver.GetManagedResource("cloudwatch.aws.upbound.io", "v1beta2", "MetricAlarm", "MetricAlarmList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -437,7 +437,7 @@ func (mg *Environment) ResolveReferences(ctx context.Context, c client.Reader) e for i3 := 0; i3 < len(mg.Spec.InitProvider.Monitor); i3++ { { - m, l, err = apisresolver.GetManagedResource("cloudwatch.aws.upbound.io", "v1beta1", "MetricAlarm", "MetricAlarmList") + m, l, err = apisresolver.GetManagedResource("cloudwatch.aws.upbound.io", "v1beta2", "MetricAlarm", "MetricAlarmList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/appflow/v1beta1/zz_generated.conversion_spokes.go b/apis/appflow/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..ce02cc9bcd --- /dev/null +++ b/apis/appflow/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Flow to the hub type. +func (tr *Flow) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Flow type. +func (tr *Flow) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/appflow/v1beta2/zz_flow_terraformed.go b/apis/appflow/v1beta2/zz_flow_terraformed.go new file mode 100755 index 0000000000..f710088ef1 --- /dev/null +++ b/apis/appflow/v1beta2/zz_flow_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Flow +func (mg *Flow) GetTerraformResourceType() string { + return "aws_appflow_flow" +} + +// GetConnectionDetailsMapping for this Flow +func (tr *Flow) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Flow +func (tr *Flow) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Flow +func (tr *Flow) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Flow +func (tr *Flow) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Flow +func (tr *Flow) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Flow +func (tr *Flow) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Flow +func (tr *Flow) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Flow +func (tr *Flow) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Flow using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Flow) LateInitialize(attrs []byte) (bool, error) { + params := &FlowParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Flow) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appflow/v1beta2/zz_flow_types.go b/apis/appflow/v1beta2/zz_flow_types.go new file mode 100755 index 0000000000..8afc69f9e1 --- /dev/null +++ b/apis/appflow/v1beta2/zz_flow_types.go @@ -0,0 +1,2512 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AggregationConfigInitParameters struct { + + // Whether Amazon AppFlow aggregates the flow records into a single file, or leave them unaggregated. Valid values are None and SingleFile. + AggregationType *string `json:"aggregationType,omitempty" tf:"aggregation_type,omitempty"` + + // The desired file size, in MB, for each output file that Amazon AppFlow writes to the flow destination. Integer value. + TargetFileSize *float64 `json:"targetFileSize,omitempty" tf:"target_file_size,omitempty"` +} + +type AggregationConfigObservation struct { + + // Whether Amazon AppFlow aggregates the flow records into a single file, or leave them unaggregated. Valid values are None and SingleFile. + AggregationType *string `json:"aggregationType,omitempty" tf:"aggregation_type,omitempty"` + + // The desired file size, in MB, for each output file that Amazon AppFlow writes to the flow destination. Integer value. + TargetFileSize *float64 `json:"targetFileSize,omitempty" tf:"target_file_size,omitempty"` +} + +type AggregationConfigParameters struct { + + // Whether Amazon AppFlow aggregates the flow records into a single file, or leave them unaggregated. Valid values are None and SingleFile. + // +kubebuilder:validation:Optional + AggregationType *string `json:"aggregationType,omitempty" tf:"aggregation_type,omitempty"` + + // The desired file size, in MB, for each output file that Amazon AppFlow writes to the flow destination. Integer value. + // +kubebuilder:validation:Optional + TargetFileSize *float64 `json:"targetFileSize,omitempty" tf:"target_file_size,omitempty"` +} + +type AmplitudeInitParameters struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type AmplitudeObservation struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type AmplitudeParameters struct { + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type ConnectorOperatorInitParameters struct { + + // Information that is required for querying Amplitude. See Generic Source Properties for more details. + Amplitude *string `json:"amplitude,omitempty" tf:"amplitude,omitempty"` + + // Properties that are required to query the custom Connector. See Custom Connector Destination Properties for more details. + CustomConnector *string `json:"customConnector,omitempty" tf:"custom_connector,omitempty"` + + // Information that is required for querying Datadog. See Generic Source Properties for more details. + Datadog *string `json:"datadog,omitempty" tf:"datadog,omitempty"` + + // Operation to be performed on the provided Dynatrace source fields. Valid values are PROJECTION, BETWEEN, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, and NO_OP. + Dynatrace *string `json:"dynatrace,omitempty" tf:"dynatrace,omitempty"` + + // Operation to be performed on the provided Google Analytics source fields. Valid values are PROJECTION and BETWEEN. + GoogleAnalytics *string `json:"googleAnalytics,omitempty" tf:"google_analytics,omitempty"` + + // Information that is required for querying Infor Nexus. See Generic Source Properties for more details. + InforNexus *string `json:"inforNexus,omitempty" tf:"infor_nexus,omitempty"` + + // Properties that are required to query Marketo. See Generic Destination Properties for more details. + Marketo *string `json:"marketo,omitempty" tf:"marketo,omitempty"` + + // Properties that are required to query Amazon S3. See S3 Destination Properties for more details. + S3 *string `json:"s3,omitempty" tf:"s3,omitempty"` + + // Properties that are required to query Salesforce. See Salesforce Destination Properties for more details. + Salesforce *string `json:"salesforce,omitempty" tf:"salesforce,omitempty"` + + // Properties that are required to query SAPOData. See SAPOData Destination Properties for more details. + SapoData *string `json:"sapoData,omitempty" tf:"sapo_data,omitempty"` + + // Information that is required for querying ServiceNow. See Generic Source Properties for more details. + ServiceNow *string `json:"serviceNow,omitempty" tf:"service_now,omitempty"` + + // Information that is required for querying Singular. See Generic Source Properties for more details. + Singular *string `json:"singular,omitempty" tf:"singular,omitempty"` + + // Information that is required for querying Slack. See Generic Source Properties for more details. + Slack *string `json:"slack,omitempty" tf:"slack,omitempty"` + + // Operation to be performed on the provided Trend Micro source fields. Valid values are PROJECTION, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, and NO_OP. + Trendmicro *string `json:"trendmicro,omitempty" tf:"trendmicro,omitempty"` + + // Information that is required for querying Veeva. See Veeva Source Properties for more details. + Veeva *string `json:"veeva,omitempty" tf:"veeva,omitempty"` + + // Properties that are required to query Zendesk. See Zendesk Destination Properties for more details. + Zendesk *string `json:"zendesk,omitempty" tf:"zendesk,omitempty"` +} + +type ConnectorOperatorObservation struct { + + // Information that is required for querying Amplitude. See Generic Source Properties for more details. + Amplitude *string `json:"amplitude,omitempty" tf:"amplitude,omitempty"` + + // Properties that are required to query the custom Connector. See Custom Connector Destination Properties for more details. + CustomConnector *string `json:"customConnector,omitempty" tf:"custom_connector,omitempty"` + + // Information that is required for querying Datadog. See Generic Source Properties for more details. + Datadog *string `json:"datadog,omitempty" tf:"datadog,omitempty"` + + // Operation to be performed on the provided Dynatrace source fields. Valid values are PROJECTION, BETWEEN, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, and NO_OP. + Dynatrace *string `json:"dynatrace,omitempty" tf:"dynatrace,omitempty"` + + // Operation to be performed on the provided Google Analytics source fields. Valid values are PROJECTION and BETWEEN. + GoogleAnalytics *string `json:"googleAnalytics,omitempty" tf:"google_analytics,omitempty"` + + // Information that is required for querying Infor Nexus. See Generic Source Properties for more details. + InforNexus *string `json:"inforNexus,omitempty" tf:"infor_nexus,omitempty"` + + // Properties that are required to query Marketo. See Generic Destination Properties for more details. + Marketo *string `json:"marketo,omitempty" tf:"marketo,omitempty"` + + // Properties that are required to query Amazon S3. See S3 Destination Properties for more details. + S3 *string `json:"s3,omitempty" tf:"s3,omitempty"` + + // Properties that are required to query Salesforce. See Salesforce Destination Properties for more details. + Salesforce *string `json:"salesforce,omitempty" tf:"salesforce,omitempty"` + + // Properties that are required to query SAPOData. See SAPOData Destination Properties for more details. + SapoData *string `json:"sapoData,omitempty" tf:"sapo_data,omitempty"` + + // Information that is required for querying ServiceNow. See Generic Source Properties for more details. + ServiceNow *string `json:"serviceNow,omitempty" tf:"service_now,omitempty"` + + // Information that is required for querying Singular. See Generic Source Properties for more details. + Singular *string `json:"singular,omitempty" tf:"singular,omitempty"` + + // Information that is required for querying Slack. See Generic Source Properties for more details. + Slack *string `json:"slack,omitempty" tf:"slack,omitempty"` + + // Operation to be performed on the provided Trend Micro source fields. Valid values are PROJECTION, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, and NO_OP. + Trendmicro *string `json:"trendmicro,omitempty" tf:"trendmicro,omitempty"` + + // Information that is required for querying Veeva. See Veeva Source Properties for more details. + Veeva *string `json:"veeva,omitempty" tf:"veeva,omitempty"` + + // Properties that are required to query Zendesk. See Zendesk Destination Properties for more details. + Zendesk *string `json:"zendesk,omitempty" tf:"zendesk,omitempty"` +} + +type ConnectorOperatorParameters struct { + + // Information that is required for querying Amplitude. See Generic Source Properties for more details. + // +kubebuilder:validation:Optional + Amplitude *string `json:"amplitude,omitempty" tf:"amplitude,omitempty"` + + // Properties that are required to query the custom Connector. See Custom Connector Destination Properties for more details. + // +kubebuilder:validation:Optional + CustomConnector *string `json:"customConnector,omitempty" tf:"custom_connector,omitempty"` + + // Information that is required for querying Datadog. See Generic Source Properties for more details. + // +kubebuilder:validation:Optional + Datadog *string `json:"datadog,omitempty" tf:"datadog,omitempty"` + + // Operation to be performed on the provided Dynatrace source fields. Valid values are PROJECTION, BETWEEN, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, and NO_OP. + // +kubebuilder:validation:Optional + Dynatrace *string `json:"dynatrace,omitempty" tf:"dynatrace,omitempty"` + + // Operation to be performed on the provided Google Analytics source fields. Valid values are PROJECTION and BETWEEN. + // +kubebuilder:validation:Optional + GoogleAnalytics *string `json:"googleAnalytics,omitempty" tf:"google_analytics,omitempty"` + + // Information that is required for querying Infor Nexus. See Generic Source Properties for more details. + // +kubebuilder:validation:Optional + InforNexus *string `json:"inforNexus,omitempty" tf:"infor_nexus,omitempty"` + + // Properties that are required to query Marketo. See Generic Destination Properties for more details. + // +kubebuilder:validation:Optional + Marketo *string `json:"marketo,omitempty" tf:"marketo,omitempty"` + + // Properties that are required to query Amazon S3. See S3 Destination Properties for more details. + // +kubebuilder:validation:Optional + S3 *string `json:"s3,omitempty" tf:"s3,omitempty"` + + // Properties that are required to query Salesforce. See Salesforce Destination Properties for more details. + // +kubebuilder:validation:Optional + Salesforce *string `json:"salesforce,omitempty" tf:"salesforce,omitempty"` + + // Properties that are required to query SAPOData. See SAPOData Destination Properties for more details. + // +kubebuilder:validation:Optional + SapoData *string `json:"sapoData,omitempty" tf:"sapo_data,omitempty"` + + // Information that is required for querying ServiceNow. See Generic Source Properties for more details. + // +kubebuilder:validation:Optional + ServiceNow *string `json:"serviceNow,omitempty" tf:"service_now,omitempty"` + + // Information that is required for querying Singular. See Generic Source Properties for more details. + // +kubebuilder:validation:Optional + Singular *string `json:"singular,omitempty" tf:"singular,omitempty"` + + // Information that is required for querying Slack. See Generic Source Properties for more details. + // +kubebuilder:validation:Optional + Slack *string `json:"slack,omitempty" tf:"slack,omitempty"` + + // Operation to be performed on the provided Trend Micro source fields. Valid values are PROJECTION, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, and NO_OP. + // +kubebuilder:validation:Optional + Trendmicro *string `json:"trendmicro,omitempty" tf:"trendmicro,omitempty"` + + // Information that is required for querying Veeva. See Veeva Source Properties for more details. + // +kubebuilder:validation:Optional + Veeva *string `json:"veeva,omitempty" tf:"veeva,omitempty"` + + // Properties that are required to query Zendesk. See Zendesk Destination Properties for more details. + // +kubebuilder:validation:Optional + Zendesk *string `json:"zendesk,omitempty" tf:"zendesk,omitempty"` +} + +type CustomConnectorInitParameters struct { + + // Custom properties that are specific to the connector when it's used as a destination in the flow. Maximum of 50 items. + // +mapType=granular + CustomProperties map[string]*string `json:"customProperties,omitempty" tf:"custom_properties,omitempty"` + + // Entity specified in the custom connector as a destination in the flow. + EntityName *string `json:"entityName,omitempty" tf:"entity_name,omitempty"` + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *ErrorHandlingConfigInitParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update, delete, or upsert. + IDFieldNames []*string `json:"idFieldNames,omitempty" tf:"id_field_names,omitempty"` + + // Type of write operation to be performed in the custom connector when it's used as destination. Valid values are INSERT, UPSERT, UPDATE, and DELETE. + WriteOperationType *string `json:"writeOperationType,omitempty" tf:"write_operation_type,omitempty"` +} + +type CustomConnectorObservation struct { + + // Custom properties that are specific to the connector when it's used as a destination in the flow. Maximum of 50 items. + // +mapType=granular + CustomProperties map[string]*string `json:"customProperties,omitempty" tf:"custom_properties,omitempty"` + + // Entity specified in the custom connector as a destination in the flow. + EntityName *string `json:"entityName,omitempty" tf:"entity_name,omitempty"` + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *ErrorHandlingConfigObservation `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update, delete, or upsert. + IDFieldNames []*string `json:"idFieldNames,omitempty" tf:"id_field_names,omitempty"` + + // Type of write operation to be performed in the custom connector when it's used as destination. Valid values are INSERT, UPSERT, UPDATE, and DELETE. + WriteOperationType *string `json:"writeOperationType,omitempty" tf:"write_operation_type,omitempty"` +} + +type CustomConnectorParameters struct { + + // Custom properties that are specific to the connector when it's used as a destination in the flow. Maximum of 50 items. + // +kubebuilder:validation:Optional + // +mapType=granular + CustomProperties map[string]*string `json:"customProperties,omitempty" tf:"custom_properties,omitempty"` + + // Entity specified in the custom connector as a destination in the flow. + // +kubebuilder:validation:Optional + EntityName *string `json:"entityName" tf:"entity_name,omitempty"` + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + // +kubebuilder:validation:Optional + ErrorHandlingConfig *ErrorHandlingConfigParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update, delete, or upsert. + // +kubebuilder:validation:Optional + IDFieldNames []*string `json:"idFieldNames,omitempty" tf:"id_field_names,omitempty"` + + // Type of write operation to be performed in the custom connector when it's used as destination. Valid values are INSERT, UPSERT, UPDATE, and DELETE. + // +kubebuilder:validation:Optional + WriteOperationType *string `json:"writeOperationType,omitempty" tf:"write_operation_type,omitempty"` +} + +type CustomerProfilesInitParameters struct { + + // Unique name of the Amazon Connect Customer Profiles domain. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Object specified in the Amazon Connect Customer Profiles flow destination. + ObjectTypeName *string `json:"objectTypeName,omitempty" tf:"object_type_name,omitempty"` +} + +type CustomerProfilesObservation struct { + + // Unique name of the Amazon Connect Customer Profiles domain. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Object specified in the Amazon Connect Customer Profiles flow destination. + ObjectTypeName *string `json:"objectTypeName,omitempty" tf:"object_type_name,omitempty"` +} + +type CustomerProfilesParameters struct { + + // Unique name of the Amazon Connect Customer Profiles domain. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName" tf:"domain_name,omitempty"` + + // Object specified in the Amazon Connect Customer Profiles flow destination. + // +kubebuilder:validation:Optional + ObjectTypeName *string `json:"objectTypeName,omitempty" tf:"object_type_name,omitempty"` +} + +type DatadogInitParameters struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type DatadogObservation struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type DatadogParameters struct { + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type DestinationConnectorPropertiesInitParameters struct { + + // Properties that are required to query the custom Connector. See Custom Connector Destination Properties for more details. + CustomConnector *CustomConnectorInitParameters `json:"customConnector,omitempty" tf:"custom_connector,omitempty"` + + // Properties that are required to query Amazon Connect Customer Profiles. See Customer Profiles Destination Properties for more details. + CustomerProfiles *CustomerProfilesInitParameters `json:"customerProfiles,omitempty" tf:"customer_profiles,omitempty"` + + // Properties that are required to query Amazon EventBridge. See Generic Destination Properties for more details. + EventBridge *EventBridgeInitParameters `json:"eventBridge,omitempty" tf:"event_bridge,omitempty"` + + // Properties that are required to query Amazon Honeycode. See Generic Destination Properties for more details. + Honeycode *HoneycodeInitParameters `json:"honeycode,omitempty" tf:"honeycode,omitempty"` + + LookoutMetrics *LookoutMetricsInitParameters `json:"lookoutMetrics,omitempty" tf:"lookout_metrics,omitempty"` + + // Properties that are required to query Marketo. See Generic Destination Properties for more details. + Marketo *MarketoInitParameters `json:"marketo,omitempty" tf:"marketo,omitempty"` + + // Properties that are required to query Amazon Redshift. See Redshift Destination Properties for more details. + Redshift *RedshiftInitParameters `json:"redshift,omitempty" tf:"redshift,omitempty"` + + // Properties that are required to query Amazon S3. See S3 Destination Properties for more details. + S3 *S3InitParameters `json:"s3,omitempty" tf:"s3,omitempty"` + + // Properties that are required to query Salesforce. See Salesforce Destination Properties for more details. + Salesforce *SalesforceInitParameters `json:"salesforce,omitempty" tf:"salesforce,omitempty"` + + // Properties that are required to query SAPOData. See SAPOData Destination Properties for more details. + SapoData *SapoDataInitParameters `json:"sapoData,omitempty" tf:"sapo_data,omitempty"` + + // Properties that are required to query Snowflake. See Snowflake Destination Properties for more details. + Snowflake *SnowflakeInitParameters `json:"snowflake,omitempty" tf:"snowflake,omitempty"` + + // Properties that are required to query Upsolver. See Upsolver Destination Properties for more details. + Upsolver *UpsolverInitParameters `json:"upsolver,omitempty" tf:"upsolver,omitempty"` + + // Properties that are required to query Zendesk. See Zendesk Destination Properties for more details. + Zendesk *ZendeskInitParameters `json:"zendesk,omitempty" tf:"zendesk,omitempty"` +} + +type DestinationConnectorPropertiesObservation struct { + + // Properties that are required to query the custom Connector. See Custom Connector Destination Properties for more details. + CustomConnector *CustomConnectorObservation `json:"customConnector,omitempty" tf:"custom_connector,omitempty"` + + // Properties that are required to query Amazon Connect Customer Profiles. See Customer Profiles Destination Properties for more details. + CustomerProfiles *CustomerProfilesObservation `json:"customerProfiles,omitempty" tf:"customer_profiles,omitempty"` + + // Properties that are required to query Amazon EventBridge. See Generic Destination Properties for more details. + EventBridge *EventBridgeObservation `json:"eventBridge,omitempty" tf:"event_bridge,omitempty"` + + // Properties that are required to query Amazon Honeycode. See Generic Destination Properties for more details. + Honeycode *HoneycodeObservation `json:"honeycode,omitempty" tf:"honeycode,omitempty"` + + LookoutMetrics *LookoutMetricsParameters `json:"lookoutMetrics,omitempty" tf:"lookout_metrics,omitempty"` + + // Properties that are required to query Marketo. See Generic Destination Properties for more details. + Marketo *MarketoObservation `json:"marketo,omitempty" tf:"marketo,omitempty"` + + // Properties that are required to query Amazon Redshift. See Redshift Destination Properties for more details. + Redshift *RedshiftObservation `json:"redshift,omitempty" tf:"redshift,omitempty"` + + // Properties that are required to query Amazon S3. See S3 Destination Properties for more details. + S3 *S3Observation `json:"s3,omitempty" tf:"s3,omitempty"` + + // Properties that are required to query Salesforce. See Salesforce Destination Properties for more details. + Salesforce *SalesforceObservation `json:"salesforce,omitempty" tf:"salesforce,omitempty"` + + // Properties that are required to query SAPOData. See SAPOData Destination Properties for more details. + SapoData *SapoDataObservation `json:"sapoData,omitempty" tf:"sapo_data,omitempty"` + + // Properties that are required to query Snowflake. See Snowflake Destination Properties for more details. + Snowflake *SnowflakeObservation `json:"snowflake,omitempty" tf:"snowflake,omitempty"` + + // Properties that are required to query Upsolver. See Upsolver Destination Properties for more details. + Upsolver *UpsolverObservation `json:"upsolver,omitempty" tf:"upsolver,omitempty"` + + // Properties that are required to query Zendesk. See Zendesk Destination Properties for more details. + Zendesk *ZendeskObservation `json:"zendesk,omitempty" tf:"zendesk,omitempty"` +} + +type DestinationConnectorPropertiesParameters struct { + + // Properties that are required to query the custom Connector. See Custom Connector Destination Properties for more details. + // +kubebuilder:validation:Optional + CustomConnector *CustomConnectorParameters `json:"customConnector,omitempty" tf:"custom_connector,omitempty"` + + // Properties that are required to query Amazon Connect Customer Profiles. See Customer Profiles Destination Properties for more details. + // +kubebuilder:validation:Optional + CustomerProfiles *CustomerProfilesParameters `json:"customerProfiles,omitempty" tf:"customer_profiles,omitempty"` + + // Properties that are required to query Amazon EventBridge. See Generic Destination Properties for more details. + // +kubebuilder:validation:Optional + EventBridge *EventBridgeParameters `json:"eventBridge,omitempty" tf:"event_bridge,omitempty"` + + // Properties that are required to query Amazon Honeycode. See Generic Destination Properties for more details. + // +kubebuilder:validation:Optional + Honeycode *HoneycodeParameters `json:"honeycode,omitempty" tf:"honeycode,omitempty"` + + // +kubebuilder:validation:Optional + LookoutMetrics *LookoutMetricsParameters `json:"lookoutMetrics,omitempty" tf:"lookout_metrics,omitempty"` + + // Properties that are required to query Marketo. See Generic Destination Properties for more details. + // +kubebuilder:validation:Optional + Marketo *MarketoParameters `json:"marketo,omitempty" tf:"marketo,omitempty"` + + // Properties that are required to query Amazon Redshift. See Redshift Destination Properties for more details. + // +kubebuilder:validation:Optional + Redshift *RedshiftParameters `json:"redshift,omitempty" tf:"redshift,omitempty"` + + // Properties that are required to query Amazon S3. See S3 Destination Properties for more details. + // +kubebuilder:validation:Optional + S3 *S3Parameters `json:"s3,omitempty" tf:"s3,omitempty"` + + // Properties that are required to query Salesforce. See Salesforce Destination Properties for more details. + // +kubebuilder:validation:Optional + Salesforce *SalesforceParameters `json:"salesforce,omitempty" tf:"salesforce,omitempty"` + + // Properties that are required to query SAPOData. See SAPOData Destination Properties for more details. + // +kubebuilder:validation:Optional + SapoData *SapoDataParameters `json:"sapoData,omitempty" tf:"sapo_data,omitempty"` + + // Properties that are required to query Snowflake. See Snowflake Destination Properties for more details. + // +kubebuilder:validation:Optional + Snowflake *SnowflakeParameters `json:"snowflake,omitempty" tf:"snowflake,omitempty"` + + // Properties that are required to query Upsolver. See Upsolver Destination Properties for more details. + // +kubebuilder:validation:Optional + Upsolver *UpsolverParameters `json:"upsolver,omitempty" tf:"upsolver,omitempty"` + + // Properties that are required to query Zendesk. See Zendesk Destination Properties for more details. + // +kubebuilder:validation:Optional + Zendesk *ZendeskParameters `json:"zendesk,omitempty" tf:"zendesk,omitempty"` +} + +type DestinationFlowConfigInitParameters struct { + + // API version that the destination connector uses. + APIVersion *string `json:"apiVersion,omitempty" tf:"api_version,omitempty"` + + // Name of the connector profile. This name must be unique for each connector profile in the AWS account. + ConnectorProfileName *string `json:"connectorProfileName,omitempty" tf:"connector_profile_name,omitempty"` + + // Type of connector, such as Salesforce, Amplitude, and so on. Valid values are Salesforce, Singular, Slack, Redshift, S3, Marketo, Googleanalytics, Zendesk, Servicenow, Datadog, Trendmicro, Snowflake, Dynatrace, Infornexus, Amplitude, Veeva, EventBridge, LookoutMetrics, Upsolver, Honeycode, CustomerProfiles, SAPOData, and CustomConnector. + ConnectorType *string `json:"connectorType,omitempty" tf:"connector_type,omitempty"` + + // This stores the information that is required to query a particular connector. See Destination Connector Properties for more information. + DestinationConnectorProperties *DestinationConnectorPropertiesInitParameters `json:"destinationConnectorProperties,omitempty" tf:"destination_connector_properties,omitempty"` +} + +type DestinationFlowConfigObservation struct { + + // API version that the destination connector uses. + APIVersion *string `json:"apiVersion,omitempty" tf:"api_version,omitempty"` + + // Name of the connector profile. This name must be unique for each connector profile in the AWS account. + ConnectorProfileName *string `json:"connectorProfileName,omitempty" tf:"connector_profile_name,omitempty"` + + // Type of connector, such as Salesforce, Amplitude, and so on. Valid values are Salesforce, Singular, Slack, Redshift, S3, Marketo, Googleanalytics, Zendesk, Servicenow, Datadog, Trendmicro, Snowflake, Dynatrace, Infornexus, Amplitude, Veeva, EventBridge, LookoutMetrics, Upsolver, Honeycode, CustomerProfiles, SAPOData, and CustomConnector. + ConnectorType *string `json:"connectorType,omitempty" tf:"connector_type,omitempty"` + + // This stores the information that is required to query a particular connector. See Destination Connector Properties for more information. + DestinationConnectorProperties *DestinationConnectorPropertiesObservation `json:"destinationConnectorProperties,omitempty" tf:"destination_connector_properties,omitempty"` +} + +type DestinationFlowConfigParameters struct { + + // API version that the destination connector uses. + // +kubebuilder:validation:Optional + APIVersion *string `json:"apiVersion,omitempty" tf:"api_version,omitempty"` + + // Name of the connector profile. This name must be unique for each connector profile in the AWS account. + // +kubebuilder:validation:Optional + ConnectorProfileName *string `json:"connectorProfileName,omitempty" tf:"connector_profile_name,omitempty"` + + // Type of connector, such as Salesforce, Amplitude, and so on. Valid values are Salesforce, Singular, Slack, Redshift, S3, Marketo, Googleanalytics, Zendesk, Servicenow, Datadog, Trendmicro, Snowflake, Dynatrace, Infornexus, Amplitude, Veeva, EventBridge, LookoutMetrics, Upsolver, Honeycode, CustomerProfiles, SAPOData, and CustomConnector. + // +kubebuilder:validation:Optional + ConnectorType *string `json:"connectorType" tf:"connector_type,omitempty"` + + // This stores the information that is required to query a particular connector. See Destination Connector Properties for more information. + // +kubebuilder:validation:Optional + DestinationConnectorProperties *DestinationConnectorPropertiesParameters `json:"destinationConnectorProperties" tf:"destination_connector_properties,omitempty"` +} + +type DynatraceInitParameters struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type DynatraceObservation struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type DynatraceParameters struct { + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type ErrorHandlingConfigInitParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type ErrorHandlingConfigObservation struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type ErrorHandlingConfigParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + // +kubebuilder:validation:Optional + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type EventBridgeErrorHandlingConfigInitParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type EventBridgeErrorHandlingConfigObservation struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type EventBridgeErrorHandlingConfigParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + // +kubebuilder:validation:Optional + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type EventBridgeInitParameters struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *EventBridgeErrorHandlingConfigInitParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type EventBridgeObservation struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *EventBridgeErrorHandlingConfigObservation `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type EventBridgeParameters struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + // +kubebuilder:validation:Optional + ErrorHandlingConfig *EventBridgeErrorHandlingConfigParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type FlowInitParameters struct { + + // Description of the flow you want to create. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A Destination Flow Config that controls how Amazon AppFlow places data in the destination connector. + DestinationFlowConfig []DestinationFlowConfigInitParameters `json:"destinationFlowConfig,omitempty" tf:"destination_flow_config,omitempty"` + + // ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for encryption. This is required if you do not want to use the Amazon AppFlow-managed KMS key. If you don't provide anything here, Amazon AppFlow uses the Amazon AppFlow-managed KMS key. + KMSArn *string `json:"kmsArn,omitempty" tf:"kms_arn,omitempty"` + + // The Source Flow Config that controls how Amazon AppFlow retrieves data from the source connector. + SourceFlowConfig *SourceFlowConfigInitParameters `json:"sourceFlowConfig,omitempty" tf:"source_flow_config,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A Task that Amazon AppFlow performs while transferring the data in the flow run. + Task []TaskInitParameters `json:"task,omitempty" tf:"task,omitempty"` + + // A Trigger that determine how and when the flow runs. + TriggerConfig *TriggerConfigInitParameters `json:"triggerConfig,omitempty" tf:"trigger_config,omitempty"` +} + +type FlowObservation struct { + + // Flow's ARN. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Description of the flow you want to create. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A Destination Flow Config that controls how Amazon AppFlow places data in the destination connector. + DestinationFlowConfig []DestinationFlowConfigObservation `json:"destinationFlowConfig,omitempty" tf:"destination_flow_config,omitempty"` + + // The current status of the flow. + FlowStatus *string `json:"flowStatus,omitempty" tf:"flow_status,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for encryption. This is required if you do not want to use the Amazon AppFlow-managed KMS key. If you don't provide anything here, Amazon AppFlow uses the Amazon AppFlow-managed KMS key. + KMSArn *string `json:"kmsArn,omitempty" tf:"kms_arn,omitempty"` + + // The Source Flow Config that controls how Amazon AppFlow retrieves data from the source connector. + SourceFlowConfig *SourceFlowConfigObservation `json:"sourceFlowConfig,omitempty" tf:"source_flow_config,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // A Task that Amazon AppFlow performs while transferring the data in the flow run. + Task []TaskObservation `json:"task,omitempty" tf:"task,omitempty"` + + // A Trigger that determine how and when the flow runs. + TriggerConfig *TriggerConfigObservation `json:"triggerConfig,omitempty" tf:"trigger_config,omitempty"` +} + +type FlowParameters struct { + + // Description of the flow you want to create. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A Destination Flow Config that controls how Amazon AppFlow places data in the destination connector. + // +kubebuilder:validation:Optional + DestinationFlowConfig []DestinationFlowConfigParameters `json:"destinationFlowConfig,omitempty" tf:"destination_flow_config,omitempty"` + + // ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for encryption. This is required if you do not want to use the Amazon AppFlow-managed KMS key. If you don't provide anything here, Amazon AppFlow uses the Amazon AppFlow-managed KMS key. + // +kubebuilder:validation:Optional + KMSArn *string `json:"kmsArn,omitempty" tf:"kms_arn,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The Source Flow Config that controls how Amazon AppFlow retrieves data from the source connector. + // +kubebuilder:validation:Optional + SourceFlowConfig *SourceFlowConfigParameters `json:"sourceFlowConfig,omitempty" tf:"source_flow_config,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A Task that Amazon AppFlow performs while transferring the data in the flow run. + // +kubebuilder:validation:Optional + Task []TaskParameters `json:"task,omitempty" tf:"task,omitempty"` + + // A Trigger that determine how and when the flow runs. + // +kubebuilder:validation:Optional + TriggerConfig *TriggerConfigParameters `json:"triggerConfig,omitempty" tf:"trigger_config,omitempty"` +} + +type GoogleAnalyticsInitParameters struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type GoogleAnalyticsObservation struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type GoogleAnalyticsParameters struct { + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type HoneycodeErrorHandlingConfigInitParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type HoneycodeErrorHandlingConfigObservation struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type HoneycodeErrorHandlingConfigParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + // +kubebuilder:validation:Optional + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type HoneycodeInitParameters struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *HoneycodeErrorHandlingConfigInitParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type HoneycodeObservation struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *HoneycodeErrorHandlingConfigObservation `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type HoneycodeParameters struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + // +kubebuilder:validation:Optional + ErrorHandlingConfig *HoneycodeErrorHandlingConfigParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type IncrementalPullConfigInitParameters struct { + + // Field that specifies the date time or timestamp field as the criteria to use when importing incremental records from the source. + DatetimeTypeFieldName *string `json:"datetimeTypeFieldName,omitempty" tf:"datetime_type_field_name,omitempty"` +} + +type IncrementalPullConfigObservation struct { + + // Field that specifies the date time or timestamp field as the criteria to use when importing incremental records from the source. + DatetimeTypeFieldName *string `json:"datetimeTypeFieldName,omitempty" tf:"datetime_type_field_name,omitempty"` +} + +type IncrementalPullConfigParameters struct { + + // Field that specifies the date time or timestamp field as the criteria to use when importing incremental records from the source. + // +kubebuilder:validation:Optional + DatetimeTypeFieldName *string `json:"datetimeTypeFieldName,omitempty" tf:"datetime_type_field_name,omitempty"` +} + +type InforNexusInitParameters struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type InforNexusObservation struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type InforNexusParameters struct { + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type LookoutMetricsInitParameters struct { +} + +type LookoutMetricsObservation struct { +} + +type LookoutMetricsParameters struct { +} + +type MarketoErrorHandlingConfigInitParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type MarketoErrorHandlingConfigObservation struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type MarketoErrorHandlingConfigParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + // +kubebuilder:validation:Optional + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type MarketoInitParameters struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *MarketoErrorHandlingConfigInitParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type MarketoObservation struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *MarketoErrorHandlingConfigObservation `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type MarketoParameters struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + // +kubebuilder:validation:Optional + ErrorHandlingConfig *MarketoErrorHandlingConfigParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type PrefixConfigInitParameters struct { + + // Determines the level of granularity that's included in the prefix. Valid values are YEAR, MONTH, DAY, HOUR, and MINUTE. + PrefixFormat *string `json:"prefixFormat,omitempty" tf:"prefix_format,omitempty"` + + // Determines the format of the prefix, and whether it applies to the file name, file path, or both. Valid values are FILENAME, PATH, and PATH_AND_FILENAME. + PrefixType *string `json:"prefixType,omitempty" tf:"prefix_type,omitempty"` +} + +type PrefixConfigObservation struct { + + // Determines the level of granularity that's included in the prefix. Valid values are YEAR, MONTH, DAY, HOUR, and MINUTE. + PrefixFormat *string `json:"prefixFormat,omitempty" tf:"prefix_format,omitempty"` + + // Determines the format of the prefix, and whether it applies to the file name, file path, or both. Valid values are FILENAME, PATH, and PATH_AND_FILENAME. + PrefixType *string `json:"prefixType,omitempty" tf:"prefix_type,omitempty"` +} + +type PrefixConfigParameters struct { + + // Determines the level of granularity that's included in the prefix. Valid values are YEAR, MONTH, DAY, HOUR, and MINUTE. + // +kubebuilder:validation:Optional + PrefixFormat *string `json:"prefixFormat,omitempty" tf:"prefix_format,omitempty"` + + // Determines the format of the prefix, and whether it applies to the file name, file path, or both. Valid values are FILENAME, PATH, and PATH_AND_FILENAME. + // +kubebuilder:validation:Optional + PrefixType *string `json:"prefixType,omitempty" tf:"prefix_type,omitempty"` +} + +type RedshiftErrorHandlingConfigInitParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type RedshiftErrorHandlingConfigObservation struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type RedshiftErrorHandlingConfigParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + // +kubebuilder:validation:Optional + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type RedshiftInitParameters struct { + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *RedshiftErrorHandlingConfigInitParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Intermediate bucket that Amazon AppFlow uses when moving data into Amazon Redshift. + IntermediateBucketName *string `json:"intermediateBucketName,omitempty" tf:"intermediate_bucket_name,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type RedshiftObservation struct { + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *RedshiftErrorHandlingConfigObservation `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Intermediate bucket that Amazon AppFlow uses when moving data into Amazon Redshift. + IntermediateBucketName *string `json:"intermediateBucketName,omitempty" tf:"intermediate_bucket_name,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type RedshiftParameters struct { + + // Object key for the bucket in which Amazon AppFlow places the destination files. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + // +kubebuilder:validation:Optional + ErrorHandlingConfig *RedshiftErrorHandlingConfigParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Intermediate bucket that Amazon AppFlow uses when moving data into Amazon Redshift. + // +kubebuilder:validation:Optional + IntermediateBucketName *string `json:"intermediateBucketName" tf:"intermediate_bucket_name,omitempty"` + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type S3InitParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.BucketPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Reference to a BucketPolicy in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameRef *v1.Reference `json:"bucketNameRef,omitempty" tf:"-"` + + // Selector for a BucketPolicy in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameSelector *v1.Selector `json:"bucketNameSelector,omitempty" tf:"-"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Configuration that determines how Amazon AppFlow should format the flow output data when Amazon S3 is used as the destination. See S3 Output Format Config for more details. + S3OutputFormatConfig *S3OutputFormatConfigInitParameters `json:"s3OutputFormatConfig,omitempty" tf:"s3_output_format_config,omitempty"` +} + +type S3InputFormatConfigInitParameters struct { + + // File type that Amazon AppFlow gets from your Amazon S3 bucket. Valid values are CSV and JSON. + S3InputFileType *string `json:"s3InputFileType,omitempty" tf:"s3_input_file_type,omitempty"` +} + +type S3InputFormatConfigObservation struct { + + // File type that Amazon AppFlow gets from your Amazon S3 bucket. Valid values are CSV and JSON. + S3InputFileType *string `json:"s3InputFileType,omitempty" tf:"s3_input_file_type,omitempty"` +} + +type S3InputFormatConfigParameters struct { + + // File type that Amazon AppFlow gets from your Amazon S3 bucket. Valid values are CSV and JSON. + // +kubebuilder:validation:Optional + S3InputFileType *string `json:"s3InputFileType,omitempty" tf:"s3_input_file_type,omitempty"` +} + +type S3Observation struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Configuration that determines how Amazon AppFlow should format the flow output data when Amazon S3 is used as the destination. See S3 Output Format Config for more details. + S3OutputFormatConfig *S3OutputFormatConfigObservation `json:"s3OutputFormatConfig,omitempty" tf:"s3_output_format_config,omitempty"` +} + +type S3OutputFormatConfigAggregationConfigInitParameters struct { + + // Whether Amazon AppFlow aggregates the flow records into a single file, or leave them unaggregated. Valid values are None and SingleFile. + AggregationType *string `json:"aggregationType,omitempty" tf:"aggregation_type,omitempty"` +} + +type S3OutputFormatConfigAggregationConfigObservation struct { + + // Whether Amazon AppFlow aggregates the flow records into a single file, or leave them unaggregated. Valid values are None and SingleFile. + AggregationType *string `json:"aggregationType,omitempty" tf:"aggregation_type,omitempty"` +} + +type S3OutputFormatConfigAggregationConfigParameters struct { + + // Whether Amazon AppFlow aggregates the flow records into a single file, or leave them unaggregated. Valid values are None and SingleFile. + // +kubebuilder:validation:Optional + AggregationType *string `json:"aggregationType,omitempty" tf:"aggregation_type,omitempty"` +} + +type S3OutputFormatConfigInitParameters struct { + + // Aggregation settings that you can use to customize the output format of your flow data. See Aggregation Config for more details. + AggregationConfig *AggregationConfigInitParameters `json:"aggregationConfig,omitempty" tf:"aggregation_config,omitempty"` + + // File type that Amazon AppFlow places in the Amazon S3 bucket. Valid values are CSV, JSON, and PARQUET. + FileType *string `json:"fileType,omitempty" tf:"file_type,omitempty"` + + // Determines the prefix that Amazon AppFlow applies to the folder name in the Amazon S3 bucket. You can name folders according to the flow frequency and date. See Prefix Config for more details. + PrefixConfig *PrefixConfigInitParameters `json:"prefixConfig,omitempty" tf:"prefix_config,omitempty"` + + // Whether the data types from the source system need to be preserved (Only valid for Parquet file type) + PreserveSourceDataTyping *bool `json:"preserveSourceDataTyping,omitempty" tf:"preserve_source_data_typing,omitempty"` +} + +type S3OutputFormatConfigObservation struct { + + // Aggregation settings that you can use to customize the output format of your flow data. See Aggregation Config for more details. + AggregationConfig *AggregationConfigObservation `json:"aggregationConfig,omitempty" tf:"aggregation_config,omitempty"` + + // File type that Amazon AppFlow places in the Amazon S3 bucket. Valid values are CSV, JSON, and PARQUET. + FileType *string `json:"fileType,omitempty" tf:"file_type,omitempty"` + + // Determines the prefix that Amazon AppFlow applies to the folder name in the Amazon S3 bucket. You can name folders according to the flow frequency and date. See Prefix Config for more details. + PrefixConfig *PrefixConfigObservation `json:"prefixConfig,omitempty" tf:"prefix_config,omitempty"` + + // Whether the data types from the source system need to be preserved (Only valid for Parquet file type) + PreserveSourceDataTyping *bool `json:"preserveSourceDataTyping,omitempty" tf:"preserve_source_data_typing,omitempty"` +} + +type S3OutputFormatConfigParameters struct { + + // Aggregation settings that you can use to customize the output format of your flow data. See Aggregation Config for more details. + // +kubebuilder:validation:Optional + AggregationConfig *AggregationConfigParameters `json:"aggregationConfig,omitempty" tf:"aggregation_config,omitempty"` + + // File type that Amazon AppFlow places in the Amazon S3 bucket. Valid values are CSV, JSON, and PARQUET. + // +kubebuilder:validation:Optional + FileType *string `json:"fileType,omitempty" tf:"file_type,omitempty"` + + // Determines the prefix that Amazon AppFlow applies to the folder name in the Amazon S3 bucket. You can name folders according to the flow frequency and date. See Prefix Config for more details. + // +kubebuilder:validation:Optional + PrefixConfig *PrefixConfigParameters `json:"prefixConfig,omitempty" tf:"prefix_config,omitempty"` + + // Whether the data types from the source system need to be preserved (Only valid for Parquet file type) + // +kubebuilder:validation:Optional + PreserveSourceDataTyping *bool `json:"preserveSourceDataTyping,omitempty" tf:"preserve_source_data_typing,omitempty"` +} + +type S3OutputFormatConfigPrefixConfigInitParameters struct { + + // Determines the level of granularity that's included in the prefix. Valid values are YEAR, MONTH, DAY, HOUR, and MINUTE. + PrefixFormat *string `json:"prefixFormat,omitempty" tf:"prefix_format,omitempty"` + + // Determines the format of the prefix, and whether it applies to the file name, file path, or both. Valid values are FILENAME, PATH, and PATH_AND_FILENAME. + PrefixType *string `json:"prefixType,omitempty" tf:"prefix_type,omitempty"` +} + +type S3OutputFormatConfigPrefixConfigObservation struct { + + // Determines the level of granularity that's included in the prefix. Valid values are YEAR, MONTH, DAY, HOUR, and MINUTE. + PrefixFormat *string `json:"prefixFormat,omitempty" tf:"prefix_format,omitempty"` + + // Determines the format of the prefix, and whether it applies to the file name, file path, or both. Valid values are FILENAME, PATH, and PATH_AND_FILENAME. + PrefixType *string `json:"prefixType,omitempty" tf:"prefix_type,omitempty"` +} + +type S3OutputFormatConfigPrefixConfigParameters struct { + + // Determines the level of granularity that's included in the prefix. Valid values are YEAR, MONTH, DAY, HOUR, and MINUTE. + // +kubebuilder:validation:Optional + PrefixFormat *string `json:"prefixFormat,omitempty" tf:"prefix_format,omitempty"` + + // Determines the format of the prefix, and whether it applies to the file name, file path, or both. Valid values are FILENAME, PATH, and PATH_AND_FILENAME. + // +kubebuilder:validation:Optional + PrefixType *string `json:"prefixType" tf:"prefix_type,omitempty"` +} + +type S3Parameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.BucketPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Reference to a BucketPolicy in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameRef *v1.Reference `json:"bucketNameRef,omitempty" tf:"-"` + + // Selector for a BucketPolicy in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameSelector *v1.Selector `json:"bucketNameSelector,omitempty" tf:"-"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Configuration that determines how Amazon AppFlow should format the flow output data when Amazon S3 is used as the destination. See S3 Output Format Config for more details. + // +kubebuilder:validation:Optional + S3OutputFormatConfig *S3OutputFormatConfigParameters `json:"s3OutputFormatConfig,omitempty" tf:"s3_output_format_config,omitempty"` +} + +type SalesforceErrorHandlingConfigInitParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type SalesforceErrorHandlingConfigObservation struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type SalesforceErrorHandlingConfigParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + // +kubebuilder:validation:Optional + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type SalesforceInitParameters struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *SalesforceErrorHandlingConfigInitParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update, delete, or upsert. + IDFieldNames []*string `json:"idFieldNames,omitempty" tf:"id_field_names,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` + + // Type of write operation to be performed in the custom connector when it's used as destination. Valid values are INSERT, UPSERT, UPDATE, and DELETE. + WriteOperationType *string `json:"writeOperationType,omitempty" tf:"write_operation_type,omitempty"` +} + +type SalesforceObservation struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *SalesforceErrorHandlingConfigObservation `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update, delete, or upsert. + IDFieldNames []*string `json:"idFieldNames,omitempty" tf:"id_field_names,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` + + // Type of write operation to be performed in the custom connector when it's used as destination. Valid values are INSERT, UPSERT, UPDATE, and DELETE. + WriteOperationType *string `json:"writeOperationType,omitempty" tf:"write_operation_type,omitempty"` +} + +type SalesforceParameters struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + // +kubebuilder:validation:Optional + ErrorHandlingConfig *SalesforceErrorHandlingConfigParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update, delete, or upsert. + // +kubebuilder:validation:Optional + IDFieldNames []*string `json:"idFieldNames,omitempty" tf:"id_field_names,omitempty"` + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` + + // Type of write operation to be performed in the custom connector when it's used as destination. Valid values are INSERT, UPSERT, UPDATE, and DELETE. + // +kubebuilder:validation:Optional + WriteOperationType *string `json:"writeOperationType,omitempty" tf:"write_operation_type,omitempty"` +} + +type SapoDataErrorHandlingConfigInitParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type SapoDataErrorHandlingConfigObservation struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type SapoDataErrorHandlingConfigParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + // +kubebuilder:validation:Optional + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type SapoDataInitParameters struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *SapoDataErrorHandlingConfigInitParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update, delete, or upsert. + IDFieldNames []*string `json:"idFieldNames,omitempty" tf:"id_field_names,omitempty"` + + // Object path specified in the SAPOData flow destination. + ObjectPath *string `json:"objectPath,omitempty" tf:"object_path,omitempty"` + + // Determines how Amazon AppFlow handles the success response that it gets from the connector after placing data. See Success Response Handling Config for more details. + SuccessResponseHandlingConfig *SuccessResponseHandlingConfigInitParameters `json:"successResponseHandlingConfig,omitempty" tf:"success_response_handling_config,omitempty"` + + // Type of write operation to be performed in the custom connector when it's used as destination. Valid values are INSERT, UPSERT, UPDATE, and DELETE. + WriteOperationType *string `json:"writeOperationType,omitempty" tf:"write_operation_type,omitempty"` +} + +type SapoDataObservation struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *SapoDataErrorHandlingConfigObservation `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update, delete, or upsert. + IDFieldNames []*string `json:"idFieldNames,omitempty" tf:"id_field_names,omitempty"` + + // Object path specified in the SAPOData flow destination. + ObjectPath *string `json:"objectPath,omitempty" tf:"object_path,omitempty"` + + // Determines how Amazon AppFlow handles the success response that it gets from the connector after placing data. See Success Response Handling Config for more details. + SuccessResponseHandlingConfig *SuccessResponseHandlingConfigObservation `json:"successResponseHandlingConfig,omitempty" tf:"success_response_handling_config,omitempty"` + + // Type of write operation to be performed in the custom connector when it's used as destination. Valid values are INSERT, UPSERT, UPDATE, and DELETE. + WriteOperationType *string `json:"writeOperationType,omitempty" tf:"write_operation_type,omitempty"` +} + +type SapoDataParameters struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + // +kubebuilder:validation:Optional + ErrorHandlingConfig *SapoDataErrorHandlingConfigParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update, delete, or upsert. + // +kubebuilder:validation:Optional + IDFieldNames []*string `json:"idFieldNames,omitempty" tf:"id_field_names,omitempty"` + + // Object path specified in the SAPOData flow destination. + // +kubebuilder:validation:Optional + ObjectPath *string `json:"objectPath" tf:"object_path,omitempty"` + + // Determines how Amazon AppFlow handles the success response that it gets from the connector after placing data. See Success Response Handling Config for more details. + // +kubebuilder:validation:Optional + SuccessResponseHandlingConfig *SuccessResponseHandlingConfigParameters `json:"successResponseHandlingConfig,omitempty" tf:"success_response_handling_config,omitempty"` + + // Type of write operation to be performed in the custom connector when it's used as destination. Valid values are INSERT, UPSERT, UPDATE, and DELETE. + // +kubebuilder:validation:Optional + WriteOperationType *string `json:"writeOperationType,omitempty" tf:"write_operation_type,omitempty"` +} + +type ScheduledInitParameters struct { + + // Whether a scheduled flow has an incremental data transfer or a complete data transfer for each flow run. Valid values are Incremental and Complete. + DataPullMode *string `json:"dataPullMode,omitempty" tf:"data_pull_mode,omitempty"` + + // Date range for the records to import from the connector in the first flow run. Must be a valid RFC3339 timestamp. + FirstExecutionFrom *string `json:"firstExecutionFrom,omitempty" tf:"first_execution_from,omitempty"` + + // Scheduled end time for a schedule-triggered flow. Must be a valid RFC3339 timestamp. + ScheduleEndTime *string `json:"scheduleEndTime,omitempty" tf:"schedule_end_time,omitempty"` + + // Scheduling expression that determines the rate at which the schedule will run, for example rate(5minutes). + ScheduleExpression *string `json:"scheduleExpression,omitempty" tf:"schedule_expression,omitempty"` + + // Optional offset that is added to the time interval for a schedule-triggered flow. Maximum value of 36000. + ScheduleOffset *float64 `json:"scheduleOffset,omitempty" tf:"schedule_offset,omitempty"` + + // Scheduled start time for a schedule-triggered flow. Must be a valid RFC3339 timestamp. + ScheduleStartTime *string `json:"scheduleStartTime,omitempty" tf:"schedule_start_time,omitempty"` + + // Time zone used when referring to the date and time of a scheduled-triggered flow, such as America/New_York. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type ScheduledObservation struct { + + // Whether a scheduled flow has an incremental data transfer or a complete data transfer for each flow run. Valid values are Incremental and Complete. + DataPullMode *string `json:"dataPullMode,omitempty" tf:"data_pull_mode,omitempty"` + + // Date range for the records to import from the connector in the first flow run. Must be a valid RFC3339 timestamp. + FirstExecutionFrom *string `json:"firstExecutionFrom,omitempty" tf:"first_execution_from,omitempty"` + + // Scheduled end time for a schedule-triggered flow. Must be a valid RFC3339 timestamp. + ScheduleEndTime *string `json:"scheduleEndTime,omitempty" tf:"schedule_end_time,omitempty"` + + // Scheduling expression that determines the rate at which the schedule will run, for example rate(5minutes). + ScheduleExpression *string `json:"scheduleExpression,omitempty" tf:"schedule_expression,omitempty"` + + // Optional offset that is added to the time interval for a schedule-triggered flow. Maximum value of 36000. + ScheduleOffset *float64 `json:"scheduleOffset,omitempty" tf:"schedule_offset,omitempty"` + + // Scheduled start time for a schedule-triggered flow. Must be a valid RFC3339 timestamp. + ScheduleStartTime *string `json:"scheduleStartTime,omitempty" tf:"schedule_start_time,omitempty"` + + // Time zone used when referring to the date and time of a scheduled-triggered flow, such as America/New_York. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type ScheduledParameters struct { + + // Whether a scheduled flow has an incremental data transfer or a complete data transfer for each flow run. Valid values are Incremental and Complete. + // +kubebuilder:validation:Optional + DataPullMode *string `json:"dataPullMode,omitempty" tf:"data_pull_mode,omitempty"` + + // Date range for the records to import from the connector in the first flow run. Must be a valid RFC3339 timestamp. + // +kubebuilder:validation:Optional + FirstExecutionFrom *string `json:"firstExecutionFrom,omitempty" tf:"first_execution_from,omitempty"` + + // Scheduled end time for a schedule-triggered flow. Must be a valid RFC3339 timestamp. + // +kubebuilder:validation:Optional + ScheduleEndTime *string `json:"scheduleEndTime,omitempty" tf:"schedule_end_time,omitempty"` + + // Scheduling expression that determines the rate at which the schedule will run, for example rate(5minutes). + // +kubebuilder:validation:Optional + ScheduleExpression *string `json:"scheduleExpression" tf:"schedule_expression,omitempty"` + + // Optional offset that is added to the time interval for a schedule-triggered flow. Maximum value of 36000. + // +kubebuilder:validation:Optional + ScheduleOffset *float64 `json:"scheduleOffset,omitempty" tf:"schedule_offset,omitempty"` + + // Scheduled start time for a schedule-triggered flow. Must be a valid RFC3339 timestamp. + // +kubebuilder:validation:Optional + ScheduleStartTime *string `json:"scheduleStartTime,omitempty" tf:"schedule_start_time,omitempty"` + + // Time zone used when referring to the date and time of a scheduled-triggered flow, such as America/New_York. + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type ServiceNowInitParameters struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type ServiceNowObservation struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type ServiceNowParameters struct { + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type SingularInitParameters struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type SingularObservation struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type SingularParameters struct { + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type SlackInitParameters struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type SlackObservation struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type SlackParameters struct { + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type SnowflakeErrorHandlingConfigInitParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type SnowflakeErrorHandlingConfigObservation struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type SnowflakeErrorHandlingConfigParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + // +kubebuilder:validation:Optional + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type SnowflakeInitParameters struct { + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *SnowflakeErrorHandlingConfigInitParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Intermediate bucket that Amazon AppFlow uses when moving data into Amazon Redshift. + IntermediateBucketName *string `json:"intermediateBucketName,omitempty" tf:"intermediate_bucket_name,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type SnowflakeObservation struct { + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *SnowflakeErrorHandlingConfigObservation `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Intermediate bucket that Amazon AppFlow uses when moving data into Amazon Redshift. + IntermediateBucketName *string `json:"intermediateBucketName,omitempty" tf:"intermediate_bucket_name,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type SnowflakeParameters struct { + + // Object key for the bucket in which Amazon AppFlow places the destination files. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + // +kubebuilder:validation:Optional + ErrorHandlingConfig *SnowflakeErrorHandlingConfigParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Intermediate bucket that Amazon AppFlow uses when moving data into Amazon Redshift. + // +kubebuilder:validation:Optional + IntermediateBucketName *string `json:"intermediateBucketName" tf:"intermediate_bucket_name,omitempty"` + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type SourceConnectorPropertiesCustomConnectorInitParameters struct { + + // Custom properties that are specific to the connector when it's used as a destination in the flow. Maximum of 50 items. + // +mapType=granular + CustomProperties map[string]*string `json:"customProperties,omitempty" tf:"custom_properties,omitempty"` + + // Entity specified in the custom connector as a destination in the flow. + EntityName *string `json:"entityName,omitempty" tf:"entity_name,omitempty"` +} + +type SourceConnectorPropertiesCustomConnectorObservation struct { + + // Custom properties that are specific to the connector when it's used as a destination in the flow. Maximum of 50 items. + // +mapType=granular + CustomProperties map[string]*string `json:"customProperties,omitempty" tf:"custom_properties,omitempty"` + + // Entity specified in the custom connector as a destination in the flow. + EntityName *string `json:"entityName,omitempty" tf:"entity_name,omitempty"` +} + +type SourceConnectorPropertiesCustomConnectorParameters struct { + + // Custom properties that are specific to the connector when it's used as a destination in the flow. Maximum of 50 items. + // +kubebuilder:validation:Optional + // +mapType=granular + CustomProperties map[string]*string `json:"customProperties,omitempty" tf:"custom_properties,omitempty"` + + // Entity specified in the custom connector as a destination in the flow. + // +kubebuilder:validation:Optional + EntityName *string `json:"entityName" tf:"entity_name,omitempty"` +} + +type SourceConnectorPropertiesInitParameters struct { + + // Information that is required for querying Amplitude. See Generic Source Properties for more details. + Amplitude *AmplitudeInitParameters `json:"amplitude,omitempty" tf:"amplitude,omitempty"` + + // Properties that are required to query the custom Connector. See Custom Connector Destination Properties for more details. + CustomConnector *SourceConnectorPropertiesCustomConnectorInitParameters `json:"customConnector,omitempty" tf:"custom_connector,omitempty"` + + // Information that is required for querying Datadog. See Generic Source Properties for more details. + Datadog *DatadogInitParameters `json:"datadog,omitempty" tf:"datadog,omitempty"` + + // Operation to be performed on the provided Dynatrace source fields. Valid values are PROJECTION, BETWEEN, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, and NO_OP. + Dynatrace *DynatraceInitParameters `json:"dynatrace,omitempty" tf:"dynatrace,omitempty"` + + // Operation to be performed on the provided Google Analytics source fields. Valid values are PROJECTION and BETWEEN. + GoogleAnalytics *GoogleAnalyticsInitParameters `json:"googleAnalytics,omitempty" tf:"google_analytics,omitempty"` + + // Information that is required for querying Infor Nexus. See Generic Source Properties for more details. + InforNexus *InforNexusInitParameters `json:"inforNexus,omitempty" tf:"infor_nexus,omitempty"` + + // Properties that are required to query Marketo. See Generic Destination Properties for more details. + Marketo *SourceConnectorPropertiesMarketoInitParameters `json:"marketo,omitempty" tf:"marketo,omitempty"` + + // Properties that are required to query Amazon S3. See S3 Destination Properties for more details. + S3 *SourceConnectorPropertiesS3InitParameters `json:"s3,omitempty" tf:"s3,omitempty"` + + // Properties that are required to query Salesforce. See Salesforce Destination Properties for more details. + Salesforce *SourceConnectorPropertiesSalesforceInitParameters `json:"salesforce,omitempty" tf:"salesforce,omitempty"` + + // Properties that are required to query SAPOData. See SAPOData Destination Properties for more details. + SapoData *SourceConnectorPropertiesSapoDataInitParameters `json:"sapoData,omitempty" tf:"sapo_data,omitempty"` + + // Information that is required for querying ServiceNow. See Generic Source Properties for more details. + ServiceNow *ServiceNowInitParameters `json:"serviceNow,omitempty" tf:"service_now,omitempty"` + + // Information that is required for querying Singular. See Generic Source Properties for more details. + Singular *SingularInitParameters `json:"singular,omitempty" tf:"singular,omitempty"` + + // Information that is required for querying Slack. See Generic Source Properties for more details. + Slack *SlackInitParameters `json:"slack,omitempty" tf:"slack,omitempty"` + + // Operation to be performed on the provided Trend Micro source fields. Valid values are PROJECTION, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, and NO_OP. + Trendmicro *TrendmicroInitParameters `json:"trendmicro,omitempty" tf:"trendmicro,omitempty"` + + // Information that is required for querying Veeva. See Veeva Source Properties for more details. + Veeva *VeevaInitParameters `json:"veeva,omitempty" tf:"veeva,omitempty"` + + // Properties that are required to query Zendesk. See Zendesk Destination Properties for more details. + Zendesk *SourceConnectorPropertiesZendeskInitParameters `json:"zendesk,omitempty" tf:"zendesk,omitempty"` +} + +type SourceConnectorPropertiesMarketoInitParameters struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type SourceConnectorPropertiesMarketoObservation struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type SourceConnectorPropertiesMarketoParameters struct { + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type SourceConnectorPropertiesObservation struct { + + // Information that is required for querying Amplitude. See Generic Source Properties for more details. + Amplitude *AmplitudeObservation `json:"amplitude,omitempty" tf:"amplitude,omitempty"` + + // Properties that are required to query the custom Connector. See Custom Connector Destination Properties for more details. + CustomConnector *SourceConnectorPropertiesCustomConnectorObservation `json:"customConnector,omitempty" tf:"custom_connector,omitempty"` + + // Information that is required for querying Datadog. See Generic Source Properties for more details. + Datadog *DatadogObservation `json:"datadog,omitempty" tf:"datadog,omitempty"` + + // Operation to be performed on the provided Dynatrace source fields. Valid values are PROJECTION, BETWEEN, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, and NO_OP. + Dynatrace *DynatraceObservation `json:"dynatrace,omitempty" tf:"dynatrace,omitempty"` + + // Operation to be performed on the provided Google Analytics source fields. Valid values are PROJECTION and BETWEEN. + GoogleAnalytics *GoogleAnalyticsObservation `json:"googleAnalytics,omitempty" tf:"google_analytics,omitempty"` + + // Information that is required for querying Infor Nexus. See Generic Source Properties for more details. + InforNexus *InforNexusObservation `json:"inforNexus,omitempty" tf:"infor_nexus,omitempty"` + + // Properties that are required to query Marketo. See Generic Destination Properties for more details. + Marketo *SourceConnectorPropertiesMarketoObservation `json:"marketo,omitempty" tf:"marketo,omitempty"` + + // Properties that are required to query Amazon S3. See S3 Destination Properties for more details. + S3 *SourceConnectorPropertiesS3Observation `json:"s3,omitempty" tf:"s3,omitempty"` + + // Properties that are required to query Salesforce. See Salesforce Destination Properties for more details. + Salesforce *SourceConnectorPropertiesSalesforceObservation `json:"salesforce,omitempty" tf:"salesforce,omitempty"` + + // Properties that are required to query SAPOData. See SAPOData Destination Properties for more details. + SapoData *SourceConnectorPropertiesSapoDataObservation `json:"sapoData,omitempty" tf:"sapo_data,omitempty"` + + // Information that is required for querying ServiceNow. See Generic Source Properties for more details. + ServiceNow *ServiceNowObservation `json:"serviceNow,omitempty" tf:"service_now,omitempty"` + + // Information that is required for querying Singular. See Generic Source Properties for more details. + Singular *SingularObservation `json:"singular,omitempty" tf:"singular,omitempty"` + + // Information that is required for querying Slack. See Generic Source Properties for more details. + Slack *SlackObservation `json:"slack,omitempty" tf:"slack,omitempty"` + + // Operation to be performed on the provided Trend Micro source fields. Valid values are PROJECTION, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, and NO_OP. + Trendmicro *TrendmicroObservation `json:"trendmicro,omitempty" tf:"trendmicro,omitempty"` + + // Information that is required for querying Veeva. See Veeva Source Properties for more details. + Veeva *VeevaObservation `json:"veeva,omitempty" tf:"veeva,omitempty"` + + // Properties that are required to query Zendesk. See Zendesk Destination Properties for more details. + Zendesk *SourceConnectorPropertiesZendeskObservation `json:"zendesk,omitempty" tf:"zendesk,omitempty"` +} + +type SourceConnectorPropertiesParameters struct { + + // Information that is required for querying Amplitude. See Generic Source Properties for more details. + // +kubebuilder:validation:Optional + Amplitude *AmplitudeParameters `json:"amplitude,omitempty" tf:"amplitude,omitempty"` + + // Properties that are required to query the custom Connector. See Custom Connector Destination Properties for more details. + // +kubebuilder:validation:Optional + CustomConnector *SourceConnectorPropertiesCustomConnectorParameters `json:"customConnector,omitempty" tf:"custom_connector,omitempty"` + + // Information that is required for querying Datadog. See Generic Source Properties for more details. + // +kubebuilder:validation:Optional + Datadog *DatadogParameters `json:"datadog,omitempty" tf:"datadog,omitempty"` + + // Operation to be performed on the provided Dynatrace source fields. Valid values are PROJECTION, BETWEEN, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, and NO_OP. + // +kubebuilder:validation:Optional + Dynatrace *DynatraceParameters `json:"dynatrace,omitempty" tf:"dynatrace,omitempty"` + + // Operation to be performed on the provided Google Analytics source fields. Valid values are PROJECTION and BETWEEN. + // +kubebuilder:validation:Optional + GoogleAnalytics *GoogleAnalyticsParameters `json:"googleAnalytics,omitempty" tf:"google_analytics,omitempty"` + + // Information that is required for querying Infor Nexus. See Generic Source Properties for more details. + // +kubebuilder:validation:Optional + InforNexus *InforNexusParameters `json:"inforNexus,omitempty" tf:"infor_nexus,omitempty"` + + // Properties that are required to query Marketo. See Generic Destination Properties for more details. + // +kubebuilder:validation:Optional + Marketo *SourceConnectorPropertiesMarketoParameters `json:"marketo,omitempty" tf:"marketo,omitempty"` + + // Properties that are required to query Amazon S3. See S3 Destination Properties for more details. + // +kubebuilder:validation:Optional + S3 *SourceConnectorPropertiesS3Parameters `json:"s3,omitempty" tf:"s3,omitempty"` + + // Properties that are required to query Salesforce. See Salesforce Destination Properties for more details. + // +kubebuilder:validation:Optional + Salesforce *SourceConnectorPropertiesSalesforceParameters `json:"salesforce,omitempty" tf:"salesforce,omitempty"` + + // Properties that are required to query SAPOData. See SAPOData Destination Properties for more details. + // +kubebuilder:validation:Optional + SapoData *SourceConnectorPropertiesSapoDataParameters `json:"sapoData,omitempty" tf:"sapo_data,omitempty"` + + // Information that is required for querying ServiceNow. See Generic Source Properties for more details. + // +kubebuilder:validation:Optional + ServiceNow *ServiceNowParameters `json:"serviceNow,omitempty" tf:"service_now,omitempty"` + + // Information that is required for querying Singular. See Generic Source Properties for more details. + // +kubebuilder:validation:Optional + Singular *SingularParameters `json:"singular,omitempty" tf:"singular,omitempty"` + + // Information that is required for querying Slack. See Generic Source Properties for more details. + // +kubebuilder:validation:Optional + Slack *SlackParameters `json:"slack,omitempty" tf:"slack,omitempty"` + + // Operation to be performed on the provided Trend Micro source fields. Valid values are PROJECTION, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, and NO_OP. + // +kubebuilder:validation:Optional + Trendmicro *TrendmicroParameters `json:"trendmicro,omitempty" tf:"trendmicro,omitempty"` + + // Information that is required for querying Veeva. See Veeva Source Properties for more details. + // +kubebuilder:validation:Optional + Veeva *VeevaParameters `json:"veeva,omitempty" tf:"veeva,omitempty"` + + // Properties that are required to query Zendesk. See Zendesk Destination Properties for more details. + // +kubebuilder:validation:Optional + Zendesk *SourceConnectorPropertiesZendeskParameters `json:"zendesk,omitempty" tf:"zendesk,omitempty"` +} + +type SourceConnectorPropertiesS3InitParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.BucketPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Reference to a BucketPolicy in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameRef *v1.Reference `json:"bucketNameRef,omitempty" tf:"-"` + + // Selector for a BucketPolicy in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameSelector *v1.Selector `json:"bucketNameSelector,omitempty" tf:"-"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // When you use Amazon S3 as the source, the configuration format that you provide the flow input data. See S3 Input Format Config for details. + S3InputFormatConfig *S3InputFormatConfigInitParameters `json:"s3InputFormatConfig,omitempty" tf:"s3_input_format_config,omitempty"` +} + +type SourceConnectorPropertiesS3Observation struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // When you use Amazon S3 as the source, the configuration format that you provide the flow input data. See S3 Input Format Config for details. + S3InputFormatConfig *S3InputFormatConfigObservation `json:"s3InputFormatConfig,omitempty" tf:"s3_input_format_config,omitempty"` +} + +type SourceConnectorPropertiesS3Parameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.BucketPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Reference to a BucketPolicy in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameRef *v1.Reference `json:"bucketNameRef,omitempty" tf:"-"` + + // Selector for a BucketPolicy in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameSelector *v1.Selector `json:"bucketNameSelector,omitempty" tf:"-"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix" tf:"bucket_prefix,omitempty"` + + // When you use Amazon S3 as the source, the configuration format that you provide the flow input data. See S3 Input Format Config for details. + // +kubebuilder:validation:Optional + S3InputFormatConfig *S3InputFormatConfigParameters `json:"s3InputFormatConfig,omitempty" tf:"s3_input_format_config,omitempty"` +} + +type SourceConnectorPropertiesSalesforceInitParameters struct { + + // Flag that enables dynamic fetching of new (recently added) fields in the Salesforce objects while running a flow. + EnableDynamicFieldUpdate *bool `json:"enableDynamicFieldUpdate,omitempty" tf:"enable_dynamic_field_update,omitempty"` + + // Whether Amazon AppFlow includes deleted files in the flow run. + IncludeDeletedRecords *bool `json:"includeDeletedRecords,omitempty" tf:"include_deleted_records,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type SourceConnectorPropertiesSalesforceObservation struct { + + // Flag that enables dynamic fetching of new (recently added) fields in the Salesforce objects while running a flow. + EnableDynamicFieldUpdate *bool `json:"enableDynamicFieldUpdate,omitempty" tf:"enable_dynamic_field_update,omitempty"` + + // Whether Amazon AppFlow includes deleted files in the flow run. + IncludeDeletedRecords *bool `json:"includeDeletedRecords,omitempty" tf:"include_deleted_records,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type SourceConnectorPropertiesSalesforceParameters struct { + + // Flag that enables dynamic fetching of new (recently added) fields in the Salesforce objects while running a flow. + // +kubebuilder:validation:Optional + EnableDynamicFieldUpdate *bool `json:"enableDynamicFieldUpdate,omitempty" tf:"enable_dynamic_field_update,omitempty"` + + // Whether Amazon AppFlow includes deleted files in the flow run. + // +kubebuilder:validation:Optional + IncludeDeletedRecords *bool `json:"includeDeletedRecords,omitempty" tf:"include_deleted_records,omitempty"` + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type SourceConnectorPropertiesSapoDataInitParameters struct { + + // Object path specified in the SAPOData flow destination. + ObjectPath *string `json:"objectPath,omitempty" tf:"object_path,omitempty"` +} + +type SourceConnectorPropertiesSapoDataObservation struct { + + // Object path specified in the SAPOData flow destination. + ObjectPath *string `json:"objectPath,omitempty" tf:"object_path,omitempty"` +} + +type SourceConnectorPropertiesSapoDataParameters struct { + + // Object path specified in the SAPOData flow destination. + // +kubebuilder:validation:Optional + ObjectPath *string `json:"objectPath" tf:"object_path,omitempty"` +} + +type SourceConnectorPropertiesZendeskInitParameters struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type SourceConnectorPropertiesZendeskObservation struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type SourceConnectorPropertiesZendeskParameters struct { + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type SourceFlowConfigInitParameters struct { + + // API version that the destination connector uses. + APIVersion *string `json:"apiVersion,omitempty" tf:"api_version,omitempty"` + + // Name of the connector profile. This name must be unique for each connector profile in the AWS account. + ConnectorProfileName *string `json:"connectorProfileName,omitempty" tf:"connector_profile_name,omitempty"` + + // Type of connector, such as Salesforce, Amplitude, and so on. Valid values are Salesforce, Singular, Slack, Redshift, S3, Marketo, Googleanalytics, Zendesk, Servicenow, Datadog, Trendmicro, Snowflake, Dynatrace, Infornexus, Amplitude, Veeva, EventBridge, LookoutMetrics, Upsolver, Honeycode, CustomerProfiles, SAPOData, and CustomConnector. + ConnectorType *string `json:"connectorType,omitempty" tf:"connector_type,omitempty"` + + // Defines the configuration for a scheduled incremental data pull. If a valid configuration is provided, the fields specified in the configuration are used when querying for the incremental data pull. See Incremental Pull Config for more details. + IncrementalPullConfig *IncrementalPullConfigInitParameters `json:"incrementalPullConfig,omitempty" tf:"incremental_pull_config,omitempty"` + + // Information that is required to query a particular source connector. See Source Connector Properties for details. + SourceConnectorProperties *SourceConnectorPropertiesInitParameters `json:"sourceConnectorProperties,omitempty" tf:"source_connector_properties,omitempty"` +} + +type SourceFlowConfigObservation struct { + + // API version that the destination connector uses. + APIVersion *string `json:"apiVersion,omitempty" tf:"api_version,omitempty"` + + // Name of the connector profile. This name must be unique for each connector profile in the AWS account. + ConnectorProfileName *string `json:"connectorProfileName,omitempty" tf:"connector_profile_name,omitempty"` + + // Type of connector, such as Salesforce, Amplitude, and so on. Valid values are Salesforce, Singular, Slack, Redshift, S3, Marketo, Googleanalytics, Zendesk, Servicenow, Datadog, Trendmicro, Snowflake, Dynatrace, Infornexus, Amplitude, Veeva, EventBridge, LookoutMetrics, Upsolver, Honeycode, CustomerProfiles, SAPOData, and CustomConnector. + ConnectorType *string `json:"connectorType,omitempty" tf:"connector_type,omitempty"` + + // Defines the configuration for a scheduled incremental data pull. If a valid configuration is provided, the fields specified in the configuration are used when querying for the incremental data pull. See Incremental Pull Config for more details. + IncrementalPullConfig *IncrementalPullConfigObservation `json:"incrementalPullConfig,omitempty" tf:"incremental_pull_config,omitempty"` + + // Information that is required to query a particular source connector. See Source Connector Properties for details. + SourceConnectorProperties *SourceConnectorPropertiesObservation `json:"sourceConnectorProperties,omitempty" tf:"source_connector_properties,omitempty"` +} + +type SourceFlowConfigParameters struct { + + // API version that the destination connector uses. + // +kubebuilder:validation:Optional + APIVersion *string `json:"apiVersion,omitempty" tf:"api_version,omitempty"` + + // Name of the connector profile. This name must be unique for each connector profile in the AWS account. + // +kubebuilder:validation:Optional + ConnectorProfileName *string `json:"connectorProfileName,omitempty" tf:"connector_profile_name,omitempty"` + + // Type of connector, such as Salesforce, Amplitude, and so on. Valid values are Salesforce, Singular, Slack, Redshift, S3, Marketo, Googleanalytics, Zendesk, Servicenow, Datadog, Trendmicro, Snowflake, Dynatrace, Infornexus, Amplitude, Veeva, EventBridge, LookoutMetrics, Upsolver, Honeycode, CustomerProfiles, SAPOData, and CustomConnector. + // +kubebuilder:validation:Optional + ConnectorType *string `json:"connectorType" tf:"connector_type,omitempty"` + + // Defines the configuration for a scheduled incremental data pull. If a valid configuration is provided, the fields specified in the configuration are used when querying for the incremental data pull. See Incremental Pull Config for more details. + // +kubebuilder:validation:Optional + IncrementalPullConfig *IncrementalPullConfigParameters `json:"incrementalPullConfig,omitempty" tf:"incremental_pull_config,omitempty"` + + // Information that is required to query a particular source connector. See Source Connector Properties for details. + // +kubebuilder:validation:Optional + SourceConnectorProperties *SourceConnectorPropertiesParameters `json:"sourceConnectorProperties" tf:"source_connector_properties,omitempty"` +} + +type SuccessResponseHandlingConfigInitParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` +} + +type SuccessResponseHandlingConfigObservation struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` +} + +type SuccessResponseHandlingConfigParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` +} + +type TaskInitParameters struct { + + // Operation to be performed on the provided source fields. See Connector Operator for details. + ConnectorOperator []ConnectorOperatorInitParameters `json:"connectorOperator,omitempty" tf:"connector_operator,omitempty"` + + // Field in a destination connector, or a field value against which Amazon AppFlow validates a source field. + DestinationField *string `json:"destinationField,omitempty" tf:"destination_field,omitempty"` + + // Source fields to which a particular task is applied. + SourceFields []*string `json:"sourceFields,omitempty" tf:"source_fields,omitempty"` + + // Map used to store task-related information. The execution service looks for particular information based on the TaskType. Valid keys are VALUE, VALUES, DATA_TYPE, UPPER_BOUND, LOWER_BOUND, SOURCE_DATA_TYPE, DESTINATION_DATA_TYPE, VALIDATION_ACTION, MASK_VALUE, MASK_LENGTH, TRUNCATE_LENGTH, MATH_OPERATION_FIELDS_ORDER, CONCAT_FORMAT, SUBFIELD_CATEGORY_MAP, and EXCLUDE_SOURCE_FIELDS_LIST. + // +mapType=granular + TaskProperties map[string]*string `json:"taskProperties,omitempty" tf:"task_properties,omitempty"` + + // Particular task implementation that Amazon AppFlow performs. Valid values are Arithmetic, Filter, Map, Map_all, Mask, Merge, Passthrough, Truncate, and Validate. + TaskType *string `json:"taskType,omitempty" tf:"task_type,omitempty"` +} + +type TaskObservation struct { + + // Operation to be performed on the provided source fields. See Connector Operator for details. + ConnectorOperator []ConnectorOperatorObservation `json:"connectorOperator,omitempty" tf:"connector_operator,omitempty"` + + // Field in a destination connector, or a field value against which Amazon AppFlow validates a source field. + DestinationField *string `json:"destinationField,omitempty" tf:"destination_field,omitempty"` + + // Source fields to which a particular task is applied. + SourceFields []*string `json:"sourceFields,omitempty" tf:"source_fields,omitempty"` + + // Map used to store task-related information. The execution service looks for particular information based on the TaskType. Valid keys are VALUE, VALUES, DATA_TYPE, UPPER_BOUND, LOWER_BOUND, SOURCE_DATA_TYPE, DESTINATION_DATA_TYPE, VALIDATION_ACTION, MASK_VALUE, MASK_LENGTH, TRUNCATE_LENGTH, MATH_OPERATION_FIELDS_ORDER, CONCAT_FORMAT, SUBFIELD_CATEGORY_MAP, and EXCLUDE_SOURCE_FIELDS_LIST. + // +mapType=granular + TaskProperties map[string]*string `json:"taskProperties,omitempty" tf:"task_properties,omitempty"` + + // Particular task implementation that Amazon AppFlow performs. Valid values are Arithmetic, Filter, Map, Map_all, Mask, Merge, Passthrough, Truncate, and Validate. + TaskType *string `json:"taskType,omitempty" tf:"task_type,omitempty"` +} + +type TaskParameters struct { + + // Operation to be performed on the provided source fields. See Connector Operator for details. + // +kubebuilder:validation:Optional + ConnectorOperator []ConnectorOperatorParameters `json:"connectorOperator,omitempty" tf:"connector_operator,omitempty"` + + // Field in a destination connector, or a field value against which Amazon AppFlow validates a source field. + // +kubebuilder:validation:Optional + DestinationField *string `json:"destinationField,omitempty" tf:"destination_field,omitempty"` + + // Source fields to which a particular task is applied. + // +kubebuilder:validation:Optional + SourceFields []*string `json:"sourceFields,omitempty" tf:"source_fields,omitempty"` + + // Map used to store task-related information. The execution service looks for particular information based on the TaskType. Valid keys are VALUE, VALUES, DATA_TYPE, UPPER_BOUND, LOWER_BOUND, SOURCE_DATA_TYPE, DESTINATION_DATA_TYPE, VALIDATION_ACTION, MASK_VALUE, MASK_LENGTH, TRUNCATE_LENGTH, MATH_OPERATION_FIELDS_ORDER, CONCAT_FORMAT, SUBFIELD_CATEGORY_MAP, and EXCLUDE_SOURCE_FIELDS_LIST. + // +kubebuilder:validation:Optional + // +mapType=granular + TaskProperties map[string]*string `json:"taskProperties,omitempty" tf:"task_properties,omitempty"` + + // Particular task implementation that Amazon AppFlow performs. Valid values are Arithmetic, Filter, Map, Map_all, Mask, Merge, Passthrough, Truncate, and Validate. + // +kubebuilder:validation:Optional + TaskType *string `json:"taskType" tf:"task_type,omitempty"` +} + +type TrendmicroInitParameters struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type TrendmicroObservation struct { + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type TrendmicroParameters struct { + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type TriggerConfigInitParameters struct { + + // Configuration details of a schedule-triggered flow as defined by the user. Currently, these settings only apply to the Scheduled trigger type. See Scheduled Trigger Properties for details. + TriggerProperties *TriggerPropertiesInitParameters `json:"triggerProperties,omitempty" tf:"trigger_properties,omitempty"` + + // Type of flow trigger. Valid values are Scheduled, Event, and OnDemand. + TriggerType *string `json:"triggerType,omitempty" tf:"trigger_type,omitempty"` +} + +type TriggerConfigObservation struct { + + // Configuration details of a schedule-triggered flow as defined by the user. Currently, these settings only apply to the Scheduled trigger type. See Scheduled Trigger Properties for details. + TriggerProperties *TriggerPropertiesObservation `json:"triggerProperties,omitempty" tf:"trigger_properties,omitempty"` + + // Type of flow trigger. Valid values are Scheduled, Event, and OnDemand. + TriggerType *string `json:"triggerType,omitempty" tf:"trigger_type,omitempty"` +} + +type TriggerConfigParameters struct { + + // Configuration details of a schedule-triggered flow as defined by the user. Currently, these settings only apply to the Scheduled trigger type. See Scheduled Trigger Properties for details. + // +kubebuilder:validation:Optional + TriggerProperties *TriggerPropertiesParameters `json:"triggerProperties,omitempty" tf:"trigger_properties,omitempty"` + + // Type of flow trigger. Valid values are Scheduled, Event, and OnDemand. + // +kubebuilder:validation:Optional + TriggerType *string `json:"triggerType" tf:"trigger_type,omitempty"` +} + +type TriggerPropertiesInitParameters struct { + Scheduled *ScheduledInitParameters `json:"scheduled,omitempty" tf:"scheduled,omitempty"` +} + +type TriggerPropertiesObservation struct { + Scheduled *ScheduledObservation `json:"scheduled,omitempty" tf:"scheduled,omitempty"` +} + +type TriggerPropertiesParameters struct { + + // +kubebuilder:validation:Optional + Scheduled *ScheduledParameters `json:"scheduled,omitempty" tf:"scheduled,omitempty"` +} + +type UpsolverInitParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Configuration that determines how Amazon AppFlow should format the flow output data when Amazon S3 is used as the destination. See S3 Output Format Config for more details. + S3OutputFormatConfig *UpsolverS3OutputFormatConfigInitParameters `json:"s3OutputFormatConfig,omitempty" tf:"s3_output_format_config,omitempty"` +} + +type UpsolverObservation struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Configuration that determines how Amazon AppFlow should format the flow output data when Amazon S3 is used as the destination. See S3 Output Format Config for more details. + S3OutputFormatConfig *UpsolverS3OutputFormatConfigObservation `json:"s3OutputFormatConfig,omitempty" tf:"s3_output_format_config,omitempty"` +} + +type UpsolverParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Configuration that determines how Amazon AppFlow should format the flow output data when Amazon S3 is used as the destination. See S3 Output Format Config for more details. + // +kubebuilder:validation:Optional + S3OutputFormatConfig *UpsolverS3OutputFormatConfigParameters `json:"s3OutputFormatConfig" tf:"s3_output_format_config,omitempty"` +} + +type UpsolverS3OutputFormatConfigInitParameters struct { + + // Aggregation settings that you can use to customize the output format of your flow data. See Aggregation Config for more details. + AggregationConfig *S3OutputFormatConfigAggregationConfigInitParameters `json:"aggregationConfig,omitempty" tf:"aggregation_config,omitempty"` + + // File type that Amazon AppFlow places in the Amazon S3 bucket. Valid values are CSV, JSON, and PARQUET. + FileType *string `json:"fileType,omitempty" tf:"file_type,omitempty"` + + // Determines the prefix that Amazon AppFlow applies to the folder name in the Amazon S3 bucket. You can name folders according to the flow frequency and date. See Prefix Config for more details. + PrefixConfig *S3OutputFormatConfigPrefixConfigInitParameters `json:"prefixConfig,omitempty" tf:"prefix_config,omitempty"` +} + +type UpsolverS3OutputFormatConfigObservation struct { + + // Aggregation settings that you can use to customize the output format of your flow data. See Aggregation Config for more details. + AggregationConfig *S3OutputFormatConfigAggregationConfigObservation `json:"aggregationConfig,omitempty" tf:"aggregation_config,omitempty"` + + // File type that Amazon AppFlow places in the Amazon S3 bucket. Valid values are CSV, JSON, and PARQUET. + FileType *string `json:"fileType,omitempty" tf:"file_type,omitempty"` + + // Determines the prefix that Amazon AppFlow applies to the folder name in the Amazon S3 bucket. You can name folders according to the flow frequency and date. See Prefix Config for more details. + PrefixConfig *S3OutputFormatConfigPrefixConfigObservation `json:"prefixConfig,omitempty" tf:"prefix_config,omitempty"` +} + +type UpsolverS3OutputFormatConfigParameters struct { + + // Aggregation settings that you can use to customize the output format of your flow data. See Aggregation Config for more details. + // +kubebuilder:validation:Optional + AggregationConfig *S3OutputFormatConfigAggregationConfigParameters `json:"aggregationConfig,omitempty" tf:"aggregation_config,omitempty"` + + // File type that Amazon AppFlow places in the Amazon S3 bucket. Valid values are CSV, JSON, and PARQUET. + // +kubebuilder:validation:Optional + FileType *string `json:"fileType,omitempty" tf:"file_type,omitempty"` + + // Determines the prefix that Amazon AppFlow applies to the folder name in the Amazon S3 bucket. You can name folders according to the flow frequency and date. See Prefix Config for more details. + // +kubebuilder:validation:Optional + PrefixConfig *S3OutputFormatConfigPrefixConfigParameters `json:"prefixConfig" tf:"prefix_config,omitempty"` +} + +type VeevaInitParameters struct { + + // Document type specified in the Veeva document extract flow. + DocumentType *string `json:"documentType,omitempty" tf:"document_type,omitempty"` + + // Boolean value to include All Versions of files in Veeva document extract flow. + IncludeAllVersions *bool `json:"includeAllVersions,omitempty" tf:"include_all_versions,omitempty"` + + // Boolean value to include file renditions in Veeva document extract flow. + IncludeRenditions *bool `json:"includeRenditions,omitempty" tf:"include_renditions,omitempty"` + + // Boolean value to include source files in Veeva document extract flow. + IncludeSourceFiles *bool `json:"includeSourceFiles,omitempty" tf:"include_source_files,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type VeevaObservation struct { + + // Document type specified in the Veeva document extract flow. + DocumentType *string `json:"documentType,omitempty" tf:"document_type,omitempty"` + + // Boolean value to include All Versions of files in Veeva document extract flow. + IncludeAllVersions *bool `json:"includeAllVersions,omitempty" tf:"include_all_versions,omitempty"` + + // Boolean value to include file renditions in Veeva document extract flow. + IncludeRenditions *bool `json:"includeRenditions,omitempty" tf:"include_renditions,omitempty"` + + // Boolean value to include source files in Veeva document extract flow. + IncludeSourceFiles *bool `json:"includeSourceFiles,omitempty" tf:"include_source_files,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` +} + +type VeevaParameters struct { + + // Document type specified in the Veeva document extract flow. + // +kubebuilder:validation:Optional + DocumentType *string `json:"documentType,omitempty" tf:"document_type,omitempty"` + + // Boolean value to include All Versions of files in Veeva document extract flow. + // +kubebuilder:validation:Optional + IncludeAllVersions *bool `json:"includeAllVersions,omitempty" tf:"include_all_versions,omitempty"` + + // Boolean value to include file renditions in Veeva document extract flow. + // +kubebuilder:validation:Optional + IncludeRenditions *bool `json:"includeRenditions,omitempty" tf:"include_renditions,omitempty"` + + // Boolean value to include source files in Veeva document extract flow. + // +kubebuilder:validation:Optional + IncludeSourceFiles *bool `json:"includeSourceFiles,omitempty" tf:"include_source_files,omitempty"` + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` +} + +type ZendeskErrorHandlingConfigInitParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type ZendeskErrorHandlingConfigObservation struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type ZendeskErrorHandlingConfigParameters struct { + + // Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Object key for the bucket in which Amazon AppFlow places the destination files. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // If the flow should fail after the first instance of a failure when attempting to place data in the destination. + // +kubebuilder:validation:Optional + FailOnFirstDestinationError *bool `json:"failOnFirstDestinationError,omitempty" tf:"fail_on_first_destination_error,omitempty"` +} + +type ZendeskInitParameters struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *ZendeskErrorHandlingConfigInitParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update, delete, or upsert. + IDFieldNames []*string `json:"idFieldNames,omitempty" tf:"id_field_names,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` + + // Type of write operation to be performed in the custom connector when it's used as destination. Valid values are INSERT, UPSERT, UPDATE, and DELETE. + WriteOperationType *string `json:"writeOperationType,omitempty" tf:"write_operation_type,omitempty"` +} + +type ZendeskObservation struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + ErrorHandlingConfig *ZendeskErrorHandlingConfigObservation `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update, delete, or upsert. + IDFieldNames []*string `json:"idFieldNames,omitempty" tf:"id_field_names,omitempty"` + + // Object specified in the flow destination. + Object *string `json:"object,omitempty" tf:"object,omitempty"` + + // Type of write operation to be performed in the custom connector when it's used as destination. Valid values are INSERT, UPSERT, UPDATE, and DELETE. + WriteOperationType *string `json:"writeOperationType,omitempty" tf:"write_operation_type,omitempty"` +} + +type ZendeskParameters struct { + + // Settings that determine how Amazon AppFlow handles an error when placing data in the destination. See Error Handling Config for more details. + // +kubebuilder:validation:Optional + ErrorHandlingConfig *ZendeskErrorHandlingConfigParameters `json:"errorHandlingConfig,omitempty" tf:"error_handling_config,omitempty"` + + // Name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update, delete, or upsert. + // +kubebuilder:validation:Optional + IDFieldNames []*string `json:"idFieldNames,omitempty" tf:"id_field_names,omitempty"` + + // Object specified in the flow destination. + // +kubebuilder:validation:Optional + Object *string `json:"object" tf:"object,omitempty"` + + // Type of write operation to be performed in the custom connector when it's used as destination. Valid values are INSERT, UPSERT, UPDATE, and DELETE. + // +kubebuilder:validation:Optional + WriteOperationType *string `json:"writeOperationType,omitempty" tf:"write_operation_type,omitempty"` +} + +// FlowSpec defines the desired state of Flow +type FlowSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FlowParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FlowInitParameters `json:"initProvider,omitempty"` +} + +// FlowStatus defines the observed state of Flow. +type FlowStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FlowObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Flow is the Schema for the Flows API. Provides an AppFlow Flow resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Flow struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.destinationFlowConfig) || (has(self.initProvider) && has(self.initProvider.destinationFlowConfig))",message="spec.forProvider.destinationFlowConfig is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sourceFlowConfig) || (has(self.initProvider) && has(self.initProvider.sourceFlowConfig))",message="spec.forProvider.sourceFlowConfig is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.task) || (has(self.initProvider) && has(self.initProvider.task))",message="spec.forProvider.task is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.triggerConfig) || (has(self.initProvider) && has(self.initProvider.triggerConfig))",message="spec.forProvider.triggerConfig is a required parameter" + Spec FlowSpec `json:"spec"` + Status FlowStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FlowList contains a list of Flows +type FlowList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Flow `json:"items"` +} + +// Repository type metadata. +var ( + Flow_Kind = "Flow" + Flow_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Flow_Kind}.String() + Flow_KindAPIVersion = Flow_Kind + "." + CRDGroupVersion.String() + Flow_GroupVersionKind = CRDGroupVersion.WithKind(Flow_Kind) +) + +func init() { + SchemeBuilder.Register(&Flow{}, &FlowList{}) +} diff --git a/apis/appflow/v1beta2/zz_generated.conversion_hubs.go b/apis/appflow/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..03eaa436b0 --- /dev/null +++ b/apis/appflow/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Flow) Hub() {} diff --git a/apis/appflow/v1beta2/zz_generated.deepcopy.go b/apis/appflow/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..9f8c159393 --- /dev/null +++ b/apis/appflow/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,5779 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationConfigInitParameters) DeepCopyInto(out *AggregationConfigInitParameters) { + *out = *in + if in.AggregationType != nil { + in, out := &in.AggregationType, &out.AggregationType + *out = new(string) + **out = **in + } + if in.TargetFileSize != nil { + in, out := &in.TargetFileSize, &out.TargetFileSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationConfigInitParameters. +func (in *AggregationConfigInitParameters) DeepCopy() *AggregationConfigInitParameters { + if in == nil { + return nil + } + out := new(AggregationConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationConfigObservation) DeepCopyInto(out *AggregationConfigObservation) { + *out = *in + if in.AggregationType != nil { + in, out := &in.AggregationType, &out.AggregationType + *out = new(string) + **out = **in + } + if in.TargetFileSize != nil { + in, out := &in.TargetFileSize, &out.TargetFileSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationConfigObservation. +func (in *AggregationConfigObservation) DeepCopy() *AggregationConfigObservation { + if in == nil { + return nil + } + out := new(AggregationConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationConfigParameters) DeepCopyInto(out *AggregationConfigParameters) { + *out = *in + if in.AggregationType != nil { + in, out := &in.AggregationType, &out.AggregationType + *out = new(string) + **out = **in + } + if in.TargetFileSize != nil { + in, out := &in.TargetFileSize, &out.TargetFileSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationConfigParameters. +func (in *AggregationConfigParameters) DeepCopy() *AggregationConfigParameters { + if in == nil { + return nil + } + out := new(AggregationConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmplitudeInitParameters) DeepCopyInto(out *AmplitudeInitParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmplitudeInitParameters. +func (in *AmplitudeInitParameters) DeepCopy() *AmplitudeInitParameters { + if in == nil { + return nil + } + out := new(AmplitudeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmplitudeObservation) DeepCopyInto(out *AmplitudeObservation) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmplitudeObservation. +func (in *AmplitudeObservation) DeepCopy() *AmplitudeObservation { + if in == nil { + return nil + } + out := new(AmplitudeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmplitudeParameters) DeepCopyInto(out *AmplitudeParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmplitudeParameters. +func (in *AmplitudeParameters) DeepCopy() *AmplitudeParameters { + if in == nil { + return nil + } + out := new(AmplitudeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorOperatorInitParameters) DeepCopyInto(out *ConnectorOperatorInitParameters) { + *out = *in + if in.Amplitude != nil { + in, out := &in.Amplitude, &out.Amplitude + *out = new(string) + **out = **in + } + if in.CustomConnector != nil { + in, out := &in.CustomConnector, &out.CustomConnector + *out = new(string) + **out = **in + } + if in.Datadog != nil { + in, out := &in.Datadog, &out.Datadog + *out = new(string) + **out = **in + } + if in.Dynatrace != nil { + in, out := &in.Dynatrace, &out.Dynatrace + *out = new(string) + **out = **in + } + if in.GoogleAnalytics != nil { + in, out := &in.GoogleAnalytics, &out.GoogleAnalytics + *out = new(string) + **out = **in + } + if in.InforNexus != nil { + in, out := &in.InforNexus, &out.InforNexus + *out = new(string) + **out = **in + } + if in.Marketo != nil { + in, out := &in.Marketo, &out.Marketo + *out = new(string) + **out = **in + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(string) + **out = **in + } + if in.Salesforce != nil { + in, out := &in.Salesforce, &out.Salesforce + *out = new(string) + **out = **in + } + if in.SapoData != nil { + in, out := &in.SapoData, &out.SapoData + *out = new(string) + **out = **in + } + if in.ServiceNow != nil { + in, out := &in.ServiceNow, &out.ServiceNow + *out = new(string) + **out = **in + } + if in.Singular != nil { + in, out := &in.Singular, &out.Singular + *out = new(string) + **out = **in + } + if in.Slack != nil { + in, out := &in.Slack, &out.Slack + *out = new(string) + **out = **in + } + if in.Trendmicro != nil { + in, out := &in.Trendmicro, &out.Trendmicro + *out = new(string) + **out = **in + } + if in.Veeva != nil { + in, out := &in.Veeva, &out.Veeva + *out = new(string) + **out = **in + } + if in.Zendesk != nil { + in, out := &in.Zendesk, &out.Zendesk + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorOperatorInitParameters. +func (in *ConnectorOperatorInitParameters) DeepCopy() *ConnectorOperatorInitParameters { + if in == nil { + return nil + } + out := new(ConnectorOperatorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorOperatorObservation) DeepCopyInto(out *ConnectorOperatorObservation) { + *out = *in + if in.Amplitude != nil { + in, out := &in.Amplitude, &out.Amplitude + *out = new(string) + **out = **in + } + if in.CustomConnector != nil { + in, out := &in.CustomConnector, &out.CustomConnector + *out = new(string) + **out = **in + } + if in.Datadog != nil { + in, out := &in.Datadog, &out.Datadog + *out = new(string) + **out = **in + } + if in.Dynatrace != nil { + in, out := &in.Dynatrace, &out.Dynatrace + *out = new(string) + **out = **in + } + if in.GoogleAnalytics != nil { + in, out := &in.GoogleAnalytics, &out.GoogleAnalytics + *out = new(string) + **out = **in + } + if in.InforNexus != nil { + in, out := &in.InforNexus, &out.InforNexus + *out = new(string) + **out = **in + } + if in.Marketo != nil { + in, out := &in.Marketo, &out.Marketo + *out = new(string) + **out = **in + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(string) + **out = **in + } + if in.Salesforce != nil { + in, out := &in.Salesforce, &out.Salesforce + *out = new(string) + **out = **in + } + if in.SapoData != nil { + in, out := &in.SapoData, &out.SapoData + *out = new(string) + **out = **in + } + if in.ServiceNow != nil { + in, out := &in.ServiceNow, &out.ServiceNow + *out = new(string) + **out = **in + } + if in.Singular != nil { + in, out := &in.Singular, &out.Singular + *out = new(string) + **out = **in + } + if in.Slack != nil { + in, out := &in.Slack, &out.Slack + *out = new(string) + **out = **in + } + if in.Trendmicro != nil { + in, out := &in.Trendmicro, &out.Trendmicro + *out = new(string) + **out = **in + } + if in.Veeva != nil { + in, out := &in.Veeva, &out.Veeva + *out = new(string) + **out = **in + } + if in.Zendesk != nil { + in, out := &in.Zendesk, &out.Zendesk + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorOperatorObservation. +func (in *ConnectorOperatorObservation) DeepCopy() *ConnectorOperatorObservation { + if in == nil { + return nil + } + out := new(ConnectorOperatorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorOperatorParameters) DeepCopyInto(out *ConnectorOperatorParameters) { + *out = *in + if in.Amplitude != nil { + in, out := &in.Amplitude, &out.Amplitude + *out = new(string) + **out = **in + } + if in.CustomConnector != nil { + in, out := &in.CustomConnector, &out.CustomConnector + *out = new(string) + **out = **in + } + if in.Datadog != nil { + in, out := &in.Datadog, &out.Datadog + *out = new(string) + **out = **in + } + if in.Dynatrace != nil { + in, out := &in.Dynatrace, &out.Dynatrace + *out = new(string) + **out = **in + } + if in.GoogleAnalytics != nil { + in, out := &in.GoogleAnalytics, &out.GoogleAnalytics + *out = new(string) + **out = **in + } + if in.InforNexus != nil { + in, out := &in.InforNexus, &out.InforNexus + *out = new(string) + **out = **in + } + if in.Marketo != nil { + in, out := &in.Marketo, &out.Marketo + *out = new(string) + **out = **in + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(string) + **out = **in + } + if in.Salesforce != nil { + in, out := &in.Salesforce, &out.Salesforce + *out = new(string) + **out = **in + } + if in.SapoData != nil { + in, out := &in.SapoData, &out.SapoData + *out = new(string) + **out = **in + } + if in.ServiceNow != nil { + in, out := &in.ServiceNow, &out.ServiceNow + *out = new(string) + **out = **in + } + if in.Singular != nil { + in, out := &in.Singular, &out.Singular + *out = new(string) + **out = **in + } + if in.Slack != nil { + in, out := &in.Slack, &out.Slack + *out = new(string) + **out = **in + } + if in.Trendmicro != nil { + in, out := &in.Trendmicro, &out.Trendmicro + *out = new(string) + **out = **in + } + if in.Veeva != nil { + in, out := &in.Veeva, &out.Veeva + *out = new(string) + **out = **in + } + if in.Zendesk != nil { + in, out := &in.Zendesk, &out.Zendesk + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorOperatorParameters. +func (in *ConnectorOperatorParameters) DeepCopy() *ConnectorOperatorParameters { + if in == nil { + return nil + } + out := new(ConnectorOperatorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomConnectorInitParameters) DeepCopyInto(out *CustomConnectorInitParameters) { + *out = *in + if in.CustomProperties != nil { + in, out := &in.CustomProperties, &out.CustomProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.EntityName != nil { + in, out := &in.EntityName, &out.EntityName + *out = new(string) + **out = **in + } + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(ErrorHandlingConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IDFieldNames != nil { + in, out := &in.IDFieldNames, &out.IDFieldNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WriteOperationType != nil { + in, out := &in.WriteOperationType, &out.WriteOperationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomConnectorInitParameters. +func (in *CustomConnectorInitParameters) DeepCopy() *CustomConnectorInitParameters { + if in == nil { + return nil + } + out := new(CustomConnectorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomConnectorObservation) DeepCopyInto(out *CustomConnectorObservation) { + *out = *in + if in.CustomProperties != nil { + in, out := &in.CustomProperties, &out.CustomProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.EntityName != nil { + in, out := &in.EntityName, &out.EntityName + *out = new(string) + **out = **in + } + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(ErrorHandlingConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.IDFieldNames != nil { + in, out := &in.IDFieldNames, &out.IDFieldNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WriteOperationType != nil { + in, out := &in.WriteOperationType, &out.WriteOperationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomConnectorObservation. +func (in *CustomConnectorObservation) DeepCopy() *CustomConnectorObservation { + if in == nil { + return nil + } + out := new(CustomConnectorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomConnectorParameters) DeepCopyInto(out *CustomConnectorParameters) { + *out = *in + if in.CustomProperties != nil { + in, out := &in.CustomProperties, &out.CustomProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.EntityName != nil { + in, out := &in.EntityName, &out.EntityName + *out = new(string) + **out = **in + } + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(ErrorHandlingConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.IDFieldNames != nil { + in, out := &in.IDFieldNames, &out.IDFieldNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WriteOperationType != nil { + in, out := &in.WriteOperationType, &out.WriteOperationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomConnectorParameters. +func (in *CustomConnectorParameters) DeepCopy() *CustomConnectorParameters { + if in == nil { + return nil + } + out := new(CustomConnectorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerProfilesInitParameters) DeepCopyInto(out *CustomerProfilesInitParameters) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.ObjectTypeName != nil { + in, out := &in.ObjectTypeName, &out.ObjectTypeName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerProfilesInitParameters. +func (in *CustomerProfilesInitParameters) DeepCopy() *CustomerProfilesInitParameters { + if in == nil { + return nil + } + out := new(CustomerProfilesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerProfilesObservation) DeepCopyInto(out *CustomerProfilesObservation) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.ObjectTypeName != nil { + in, out := &in.ObjectTypeName, &out.ObjectTypeName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerProfilesObservation. +func (in *CustomerProfilesObservation) DeepCopy() *CustomerProfilesObservation { + if in == nil { + return nil + } + out := new(CustomerProfilesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerProfilesParameters) DeepCopyInto(out *CustomerProfilesParameters) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.ObjectTypeName != nil { + in, out := &in.ObjectTypeName, &out.ObjectTypeName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerProfilesParameters. +func (in *CustomerProfilesParameters) DeepCopy() *CustomerProfilesParameters { + if in == nil { + return nil + } + out := new(CustomerProfilesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatadogInitParameters) DeepCopyInto(out *DatadogInitParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatadogInitParameters. +func (in *DatadogInitParameters) DeepCopy() *DatadogInitParameters { + if in == nil { + return nil + } + out := new(DatadogInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatadogObservation) DeepCopyInto(out *DatadogObservation) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatadogObservation. +func (in *DatadogObservation) DeepCopy() *DatadogObservation { + if in == nil { + return nil + } + out := new(DatadogObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatadogParameters) DeepCopyInto(out *DatadogParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatadogParameters. +func (in *DatadogParameters) DeepCopy() *DatadogParameters { + if in == nil { + return nil + } + out := new(DatadogParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationConnectorPropertiesInitParameters) DeepCopyInto(out *DestinationConnectorPropertiesInitParameters) { + *out = *in + if in.CustomConnector != nil { + in, out := &in.CustomConnector, &out.CustomConnector + *out = new(CustomConnectorInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomerProfiles != nil { + in, out := &in.CustomerProfiles, &out.CustomerProfiles + *out = new(CustomerProfilesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EventBridge != nil { + in, out := &in.EventBridge, &out.EventBridge + *out = new(EventBridgeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Honeycode != nil { + in, out := &in.Honeycode, &out.Honeycode + *out = new(HoneycodeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LookoutMetrics != nil { + in, out := &in.LookoutMetrics, &out.LookoutMetrics + *out = new(LookoutMetricsInitParameters) + **out = **in + } + if in.Marketo != nil { + in, out := &in.Marketo, &out.Marketo + *out = new(MarketoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Redshift != nil { + in, out := &in.Redshift, &out.Redshift + *out = new(RedshiftInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Salesforce != nil { + in, out := &in.Salesforce, &out.Salesforce + *out = new(SalesforceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SapoData != nil { + in, out := &in.SapoData, &out.SapoData + *out = new(SapoDataInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Snowflake != nil { + in, out := &in.Snowflake, &out.Snowflake + *out = new(SnowflakeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Upsolver != nil { + in, out := &in.Upsolver, &out.Upsolver + *out = new(UpsolverInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Zendesk != nil { + in, out := &in.Zendesk, &out.Zendesk + *out = new(ZendeskInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationConnectorPropertiesInitParameters. +func (in *DestinationConnectorPropertiesInitParameters) DeepCopy() *DestinationConnectorPropertiesInitParameters { + if in == nil { + return nil + } + out := new(DestinationConnectorPropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationConnectorPropertiesObservation) DeepCopyInto(out *DestinationConnectorPropertiesObservation) { + *out = *in + if in.CustomConnector != nil { + in, out := &in.CustomConnector, &out.CustomConnector + *out = new(CustomConnectorObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomerProfiles != nil { + in, out := &in.CustomerProfiles, &out.CustomerProfiles + *out = new(CustomerProfilesObservation) + (*in).DeepCopyInto(*out) + } + if in.EventBridge != nil { + in, out := &in.EventBridge, &out.EventBridge + *out = new(EventBridgeObservation) + (*in).DeepCopyInto(*out) + } + if in.Honeycode != nil { + in, out := &in.Honeycode, &out.Honeycode + *out = new(HoneycodeObservation) + (*in).DeepCopyInto(*out) + } + if in.LookoutMetrics != nil { + in, out := &in.LookoutMetrics, &out.LookoutMetrics + *out = new(LookoutMetricsParameters) + **out = **in + } + if in.Marketo != nil { + in, out := &in.Marketo, &out.Marketo + *out = new(MarketoObservation) + (*in).DeepCopyInto(*out) + } + if in.Redshift != nil { + in, out := &in.Redshift, &out.Redshift + *out = new(RedshiftObservation) + (*in).DeepCopyInto(*out) + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Observation) + (*in).DeepCopyInto(*out) + } + if in.Salesforce != nil { + in, out := &in.Salesforce, &out.Salesforce + *out = new(SalesforceObservation) + (*in).DeepCopyInto(*out) + } + if in.SapoData != nil { + in, out := &in.SapoData, &out.SapoData + *out = new(SapoDataObservation) + (*in).DeepCopyInto(*out) + } + if in.Snowflake != nil { + in, out := &in.Snowflake, &out.Snowflake + *out = new(SnowflakeObservation) + (*in).DeepCopyInto(*out) + } + if in.Upsolver != nil { + in, out := &in.Upsolver, &out.Upsolver + *out = new(UpsolverObservation) + (*in).DeepCopyInto(*out) + } + if in.Zendesk != nil { + in, out := &in.Zendesk, &out.Zendesk + *out = new(ZendeskObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationConnectorPropertiesObservation. +func (in *DestinationConnectorPropertiesObservation) DeepCopy() *DestinationConnectorPropertiesObservation { + if in == nil { + return nil + } + out := new(DestinationConnectorPropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationConnectorPropertiesParameters) DeepCopyInto(out *DestinationConnectorPropertiesParameters) { + *out = *in + if in.CustomConnector != nil { + in, out := &in.CustomConnector, &out.CustomConnector + *out = new(CustomConnectorParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomerProfiles != nil { + in, out := &in.CustomerProfiles, &out.CustomerProfiles + *out = new(CustomerProfilesParameters) + (*in).DeepCopyInto(*out) + } + if in.EventBridge != nil { + in, out := &in.EventBridge, &out.EventBridge + *out = new(EventBridgeParameters) + (*in).DeepCopyInto(*out) + } + if in.Honeycode != nil { + in, out := &in.Honeycode, &out.Honeycode + *out = new(HoneycodeParameters) + (*in).DeepCopyInto(*out) + } + if in.LookoutMetrics != nil { + in, out := &in.LookoutMetrics, &out.LookoutMetrics + *out = new(LookoutMetricsParameters) + **out = **in + } + if in.Marketo != nil { + in, out := &in.Marketo, &out.Marketo + *out = new(MarketoParameters) + (*in).DeepCopyInto(*out) + } + if in.Redshift != nil { + in, out := &in.Redshift, &out.Redshift + *out = new(RedshiftParameters) + (*in).DeepCopyInto(*out) + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Parameters) + (*in).DeepCopyInto(*out) + } + if in.Salesforce != nil { + in, out := &in.Salesforce, &out.Salesforce + *out = new(SalesforceParameters) + (*in).DeepCopyInto(*out) + } + if in.SapoData != nil { + in, out := &in.SapoData, &out.SapoData + *out = new(SapoDataParameters) + (*in).DeepCopyInto(*out) + } + if in.Snowflake != nil { + in, out := &in.Snowflake, &out.Snowflake + *out = new(SnowflakeParameters) + (*in).DeepCopyInto(*out) + } + if in.Upsolver != nil { + in, out := &in.Upsolver, &out.Upsolver + *out = new(UpsolverParameters) + (*in).DeepCopyInto(*out) + } + if in.Zendesk != nil { + in, out := &in.Zendesk, &out.Zendesk + *out = new(ZendeskParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationConnectorPropertiesParameters. +func (in *DestinationConnectorPropertiesParameters) DeepCopy() *DestinationConnectorPropertiesParameters { + if in == nil { + return nil + } + out := new(DestinationConnectorPropertiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationFlowConfigInitParameters) DeepCopyInto(out *DestinationFlowConfigInitParameters) { + *out = *in + if in.APIVersion != nil { + in, out := &in.APIVersion, &out.APIVersion + *out = new(string) + **out = **in + } + if in.ConnectorProfileName != nil { + in, out := &in.ConnectorProfileName, &out.ConnectorProfileName + *out = new(string) + **out = **in + } + if in.ConnectorType != nil { + in, out := &in.ConnectorType, &out.ConnectorType + *out = new(string) + **out = **in + } + if in.DestinationConnectorProperties != nil { + in, out := &in.DestinationConnectorProperties, &out.DestinationConnectorProperties + *out = new(DestinationConnectorPropertiesInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationFlowConfigInitParameters. +func (in *DestinationFlowConfigInitParameters) DeepCopy() *DestinationFlowConfigInitParameters { + if in == nil { + return nil + } + out := new(DestinationFlowConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationFlowConfigObservation) DeepCopyInto(out *DestinationFlowConfigObservation) { + *out = *in + if in.APIVersion != nil { + in, out := &in.APIVersion, &out.APIVersion + *out = new(string) + **out = **in + } + if in.ConnectorProfileName != nil { + in, out := &in.ConnectorProfileName, &out.ConnectorProfileName + *out = new(string) + **out = **in + } + if in.ConnectorType != nil { + in, out := &in.ConnectorType, &out.ConnectorType + *out = new(string) + **out = **in + } + if in.DestinationConnectorProperties != nil { + in, out := &in.DestinationConnectorProperties, &out.DestinationConnectorProperties + *out = new(DestinationConnectorPropertiesObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationFlowConfigObservation. +func (in *DestinationFlowConfigObservation) DeepCopy() *DestinationFlowConfigObservation { + if in == nil { + return nil + } + out := new(DestinationFlowConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationFlowConfigParameters) DeepCopyInto(out *DestinationFlowConfigParameters) { + *out = *in + if in.APIVersion != nil { + in, out := &in.APIVersion, &out.APIVersion + *out = new(string) + **out = **in + } + if in.ConnectorProfileName != nil { + in, out := &in.ConnectorProfileName, &out.ConnectorProfileName + *out = new(string) + **out = **in + } + if in.ConnectorType != nil { + in, out := &in.ConnectorType, &out.ConnectorType + *out = new(string) + **out = **in + } + if in.DestinationConnectorProperties != nil { + in, out := &in.DestinationConnectorProperties, &out.DestinationConnectorProperties + *out = new(DestinationConnectorPropertiesParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationFlowConfigParameters. +func (in *DestinationFlowConfigParameters) DeepCopy() *DestinationFlowConfigParameters { + if in == nil { + return nil + } + out := new(DestinationFlowConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynatraceInitParameters) DeepCopyInto(out *DynatraceInitParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynatraceInitParameters. +func (in *DynatraceInitParameters) DeepCopy() *DynatraceInitParameters { + if in == nil { + return nil + } + out := new(DynatraceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynatraceObservation) DeepCopyInto(out *DynatraceObservation) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynatraceObservation. +func (in *DynatraceObservation) DeepCopy() *DynatraceObservation { + if in == nil { + return nil + } + out := new(DynatraceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynatraceParameters) DeepCopyInto(out *DynatraceParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynatraceParameters. +func (in *DynatraceParameters) DeepCopy() *DynatraceParameters { + if in == nil { + return nil + } + out := new(DynatraceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorHandlingConfigInitParameters) DeepCopyInto(out *ErrorHandlingConfigInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorHandlingConfigInitParameters. +func (in *ErrorHandlingConfigInitParameters) DeepCopy() *ErrorHandlingConfigInitParameters { + if in == nil { + return nil + } + out := new(ErrorHandlingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorHandlingConfigObservation) DeepCopyInto(out *ErrorHandlingConfigObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorHandlingConfigObservation. +func (in *ErrorHandlingConfigObservation) DeepCopy() *ErrorHandlingConfigObservation { + if in == nil { + return nil + } + out := new(ErrorHandlingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorHandlingConfigParameters) DeepCopyInto(out *ErrorHandlingConfigParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorHandlingConfigParameters. +func (in *ErrorHandlingConfigParameters) DeepCopy() *ErrorHandlingConfigParameters { + if in == nil { + return nil + } + out := new(ErrorHandlingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventBridgeErrorHandlingConfigInitParameters) DeepCopyInto(out *EventBridgeErrorHandlingConfigInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventBridgeErrorHandlingConfigInitParameters. +func (in *EventBridgeErrorHandlingConfigInitParameters) DeepCopy() *EventBridgeErrorHandlingConfigInitParameters { + if in == nil { + return nil + } + out := new(EventBridgeErrorHandlingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventBridgeErrorHandlingConfigObservation) DeepCopyInto(out *EventBridgeErrorHandlingConfigObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventBridgeErrorHandlingConfigObservation. +func (in *EventBridgeErrorHandlingConfigObservation) DeepCopy() *EventBridgeErrorHandlingConfigObservation { + if in == nil { + return nil + } + out := new(EventBridgeErrorHandlingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventBridgeErrorHandlingConfigParameters) DeepCopyInto(out *EventBridgeErrorHandlingConfigParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventBridgeErrorHandlingConfigParameters. +func (in *EventBridgeErrorHandlingConfigParameters) DeepCopy() *EventBridgeErrorHandlingConfigParameters { + if in == nil { + return nil + } + out := new(EventBridgeErrorHandlingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventBridgeInitParameters) DeepCopyInto(out *EventBridgeInitParameters) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(EventBridgeErrorHandlingConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventBridgeInitParameters. +func (in *EventBridgeInitParameters) DeepCopy() *EventBridgeInitParameters { + if in == nil { + return nil + } + out := new(EventBridgeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventBridgeObservation) DeepCopyInto(out *EventBridgeObservation) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(EventBridgeErrorHandlingConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventBridgeObservation. +func (in *EventBridgeObservation) DeepCopy() *EventBridgeObservation { + if in == nil { + return nil + } + out := new(EventBridgeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventBridgeParameters) DeepCopyInto(out *EventBridgeParameters) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(EventBridgeErrorHandlingConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventBridgeParameters. +func (in *EventBridgeParameters) DeepCopy() *EventBridgeParameters { + if in == nil { + return nil + } + out := new(EventBridgeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Flow) DeepCopyInto(out *Flow) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Flow. +func (in *Flow) DeepCopy() *Flow { + if in == nil { + return nil + } + out := new(Flow) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Flow) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowInitParameters) DeepCopyInto(out *FlowInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DestinationFlowConfig != nil { + in, out := &in.DestinationFlowConfig, &out.DestinationFlowConfig + *out = make([]DestinationFlowConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KMSArn != nil { + in, out := &in.KMSArn, &out.KMSArn + *out = new(string) + **out = **in + } + if in.SourceFlowConfig != nil { + in, out := &in.SourceFlowConfig, &out.SourceFlowConfig + *out = new(SourceFlowConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Task != nil { + in, out := &in.Task, &out.Task + *out = make([]TaskInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TriggerConfig != nil { + in, out := &in.TriggerConfig, &out.TriggerConfig + *out = new(TriggerConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowInitParameters. +func (in *FlowInitParameters) DeepCopy() *FlowInitParameters { + if in == nil { + return nil + } + out := new(FlowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowList) DeepCopyInto(out *FlowList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Flow, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowList. +func (in *FlowList) DeepCopy() *FlowList { + if in == nil { + return nil + } + out := new(FlowList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowObservation) DeepCopyInto(out *FlowObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DestinationFlowConfig != nil { + in, out := &in.DestinationFlowConfig, &out.DestinationFlowConfig + *out = make([]DestinationFlowConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FlowStatus != nil { + in, out := &in.FlowStatus, &out.FlowStatus + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KMSArn != nil { + in, out := &in.KMSArn, &out.KMSArn + *out = new(string) + **out = **in + } + if in.SourceFlowConfig != nil { + in, out := &in.SourceFlowConfig, &out.SourceFlowConfig + *out = new(SourceFlowConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Task != nil { + in, out := &in.Task, &out.Task + *out = make([]TaskObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TriggerConfig != nil { + in, out := &in.TriggerConfig, &out.TriggerConfig + *out = new(TriggerConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowObservation. +func (in *FlowObservation) DeepCopy() *FlowObservation { + if in == nil { + return nil + } + out := new(FlowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowParameters) DeepCopyInto(out *FlowParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DestinationFlowConfig != nil { + in, out := &in.DestinationFlowConfig, &out.DestinationFlowConfig + *out = make([]DestinationFlowConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KMSArn != nil { + in, out := &in.KMSArn, &out.KMSArn + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SourceFlowConfig != nil { + in, out := &in.SourceFlowConfig, &out.SourceFlowConfig + *out = new(SourceFlowConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Task != nil { + in, out := &in.Task, &out.Task + *out = make([]TaskParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TriggerConfig != nil { + in, out := &in.TriggerConfig, &out.TriggerConfig + *out = new(TriggerConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowParameters. +func (in *FlowParameters) DeepCopy() *FlowParameters { + if in == nil { + return nil + } + out := new(FlowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSpec) DeepCopyInto(out *FlowSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSpec. +func (in *FlowSpec) DeepCopy() *FlowSpec { + if in == nil { + return nil + } + out := new(FlowSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowStatus) DeepCopyInto(out *FlowStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowStatus. +func (in *FlowStatus) DeepCopy() *FlowStatus { + if in == nil { + return nil + } + out := new(FlowStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleAnalyticsInitParameters) DeepCopyInto(out *GoogleAnalyticsInitParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleAnalyticsInitParameters. +func (in *GoogleAnalyticsInitParameters) DeepCopy() *GoogleAnalyticsInitParameters { + if in == nil { + return nil + } + out := new(GoogleAnalyticsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleAnalyticsObservation) DeepCopyInto(out *GoogleAnalyticsObservation) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleAnalyticsObservation. +func (in *GoogleAnalyticsObservation) DeepCopy() *GoogleAnalyticsObservation { + if in == nil { + return nil + } + out := new(GoogleAnalyticsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleAnalyticsParameters) DeepCopyInto(out *GoogleAnalyticsParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleAnalyticsParameters. +func (in *GoogleAnalyticsParameters) DeepCopy() *GoogleAnalyticsParameters { + if in == nil { + return nil + } + out := new(GoogleAnalyticsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HoneycodeErrorHandlingConfigInitParameters) DeepCopyInto(out *HoneycodeErrorHandlingConfigInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HoneycodeErrorHandlingConfigInitParameters. +func (in *HoneycodeErrorHandlingConfigInitParameters) DeepCopy() *HoneycodeErrorHandlingConfigInitParameters { + if in == nil { + return nil + } + out := new(HoneycodeErrorHandlingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HoneycodeErrorHandlingConfigObservation) DeepCopyInto(out *HoneycodeErrorHandlingConfigObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HoneycodeErrorHandlingConfigObservation. +func (in *HoneycodeErrorHandlingConfigObservation) DeepCopy() *HoneycodeErrorHandlingConfigObservation { + if in == nil { + return nil + } + out := new(HoneycodeErrorHandlingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HoneycodeErrorHandlingConfigParameters) DeepCopyInto(out *HoneycodeErrorHandlingConfigParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HoneycodeErrorHandlingConfigParameters. +func (in *HoneycodeErrorHandlingConfigParameters) DeepCopy() *HoneycodeErrorHandlingConfigParameters { + if in == nil { + return nil + } + out := new(HoneycodeErrorHandlingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HoneycodeInitParameters) DeepCopyInto(out *HoneycodeInitParameters) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(HoneycodeErrorHandlingConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HoneycodeInitParameters. +func (in *HoneycodeInitParameters) DeepCopy() *HoneycodeInitParameters { + if in == nil { + return nil + } + out := new(HoneycodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HoneycodeObservation) DeepCopyInto(out *HoneycodeObservation) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(HoneycodeErrorHandlingConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HoneycodeObservation. +func (in *HoneycodeObservation) DeepCopy() *HoneycodeObservation { + if in == nil { + return nil + } + out := new(HoneycodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HoneycodeParameters) DeepCopyInto(out *HoneycodeParameters) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(HoneycodeErrorHandlingConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HoneycodeParameters. +func (in *HoneycodeParameters) DeepCopy() *HoneycodeParameters { + if in == nil { + return nil + } + out := new(HoneycodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncrementalPullConfigInitParameters) DeepCopyInto(out *IncrementalPullConfigInitParameters) { + *out = *in + if in.DatetimeTypeFieldName != nil { + in, out := &in.DatetimeTypeFieldName, &out.DatetimeTypeFieldName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncrementalPullConfigInitParameters. +func (in *IncrementalPullConfigInitParameters) DeepCopy() *IncrementalPullConfigInitParameters { + if in == nil { + return nil + } + out := new(IncrementalPullConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncrementalPullConfigObservation) DeepCopyInto(out *IncrementalPullConfigObservation) { + *out = *in + if in.DatetimeTypeFieldName != nil { + in, out := &in.DatetimeTypeFieldName, &out.DatetimeTypeFieldName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncrementalPullConfigObservation. +func (in *IncrementalPullConfigObservation) DeepCopy() *IncrementalPullConfigObservation { + if in == nil { + return nil + } + out := new(IncrementalPullConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncrementalPullConfigParameters) DeepCopyInto(out *IncrementalPullConfigParameters) { + *out = *in + if in.DatetimeTypeFieldName != nil { + in, out := &in.DatetimeTypeFieldName, &out.DatetimeTypeFieldName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncrementalPullConfigParameters. +func (in *IncrementalPullConfigParameters) DeepCopy() *IncrementalPullConfigParameters { + if in == nil { + return nil + } + out := new(IncrementalPullConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InforNexusInitParameters) DeepCopyInto(out *InforNexusInitParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InforNexusInitParameters. +func (in *InforNexusInitParameters) DeepCopy() *InforNexusInitParameters { + if in == nil { + return nil + } + out := new(InforNexusInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InforNexusObservation) DeepCopyInto(out *InforNexusObservation) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InforNexusObservation. +func (in *InforNexusObservation) DeepCopy() *InforNexusObservation { + if in == nil { + return nil + } + out := new(InforNexusObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InforNexusParameters) DeepCopyInto(out *InforNexusParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InforNexusParameters. +func (in *InforNexusParameters) DeepCopy() *InforNexusParameters { + if in == nil { + return nil + } + out := new(InforNexusParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LookoutMetricsInitParameters) DeepCopyInto(out *LookoutMetricsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LookoutMetricsInitParameters. +func (in *LookoutMetricsInitParameters) DeepCopy() *LookoutMetricsInitParameters { + if in == nil { + return nil + } + out := new(LookoutMetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LookoutMetricsObservation) DeepCopyInto(out *LookoutMetricsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LookoutMetricsObservation. +func (in *LookoutMetricsObservation) DeepCopy() *LookoutMetricsObservation { + if in == nil { + return nil + } + out := new(LookoutMetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LookoutMetricsParameters) DeepCopyInto(out *LookoutMetricsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LookoutMetricsParameters. +func (in *LookoutMetricsParameters) DeepCopy() *LookoutMetricsParameters { + if in == nil { + return nil + } + out := new(LookoutMetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MarketoErrorHandlingConfigInitParameters) DeepCopyInto(out *MarketoErrorHandlingConfigInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MarketoErrorHandlingConfigInitParameters. +func (in *MarketoErrorHandlingConfigInitParameters) DeepCopy() *MarketoErrorHandlingConfigInitParameters { + if in == nil { + return nil + } + out := new(MarketoErrorHandlingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MarketoErrorHandlingConfigObservation) DeepCopyInto(out *MarketoErrorHandlingConfigObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MarketoErrorHandlingConfigObservation. +func (in *MarketoErrorHandlingConfigObservation) DeepCopy() *MarketoErrorHandlingConfigObservation { + if in == nil { + return nil + } + out := new(MarketoErrorHandlingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MarketoErrorHandlingConfigParameters) DeepCopyInto(out *MarketoErrorHandlingConfigParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MarketoErrorHandlingConfigParameters. +func (in *MarketoErrorHandlingConfigParameters) DeepCopy() *MarketoErrorHandlingConfigParameters { + if in == nil { + return nil + } + out := new(MarketoErrorHandlingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MarketoInitParameters) DeepCopyInto(out *MarketoInitParameters) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(MarketoErrorHandlingConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MarketoInitParameters. +func (in *MarketoInitParameters) DeepCopy() *MarketoInitParameters { + if in == nil { + return nil + } + out := new(MarketoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MarketoObservation) DeepCopyInto(out *MarketoObservation) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(MarketoErrorHandlingConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MarketoObservation. +func (in *MarketoObservation) DeepCopy() *MarketoObservation { + if in == nil { + return nil + } + out := new(MarketoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MarketoParameters) DeepCopyInto(out *MarketoParameters) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(MarketoErrorHandlingConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MarketoParameters. +func (in *MarketoParameters) DeepCopy() *MarketoParameters { + if in == nil { + return nil + } + out := new(MarketoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixConfigInitParameters) DeepCopyInto(out *PrefixConfigInitParameters) { + *out = *in + if in.PrefixFormat != nil { + in, out := &in.PrefixFormat, &out.PrefixFormat + *out = new(string) + **out = **in + } + if in.PrefixType != nil { + in, out := &in.PrefixType, &out.PrefixType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixConfigInitParameters. +func (in *PrefixConfigInitParameters) DeepCopy() *PrefixConfigInitParameters { + if in == nil { + return nil + } + out := new(PrefixConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixConfigObservation) DeepCopyInto(out *PrefixConfigObservation) { + *out = *in + if in.PrefixFormat != nil { + in, out := &in.PrefixFormat, &out.PrefixFormat + *out = new(string) + **out = **in + } + if in.PrefixType != nil { + in, out := &in.PrefixType, &out.PrefixType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixConfigObservation. +func (in *PrefixConfigObservation) DeepCopy() *PrefixConfigObservation { + if in == nil { + return nil + } + out := new(PrefixConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixConfigParameters) DeepCopyInto(out *PrefixConfigParameters) { + *out = *in + if in.PrefixFormat != nil { + in, out := &in.PrefixFormat, &out.PrefixFormat + *out = new(string) + **out = **in + } + if in.PrefixType != nil { + in, out := &in.PrefixType, &out.PrefixType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixConfigParameters. +func (in *PrefixConfigParameters) DeepCopy() *PrefixConfigParameters { + if in == nil { + return nil + } + out := new(PrefixConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftErrorHandlingConfigInitParameters) DeepCopyInto(out *RedshiftErrorHandlingConfigInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftErrorHandlingConfigInitParameters. +func (in *RedshiftErrorHandlingConfigInitParameters) DeepCopy() *RedshiftErrorHandlingConfigInitParameters { + if in == nil { + return nil + } + out := new(RedshiftErrorHandlingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftErrorHandlingConfigObservation) DeepCopyInto(out *RedshiftErrorHandlingConfigObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftErrorHandlingConfigObservation. +func (in *RedshiftErrorHandlingConfigObservation) DeepCopy() *RedshiftErrorHandlingConfigObservation { + if in == nil { + return nil + } + out := new(RedshiftErrorHandlingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftErrorHandlingConfigParameters) DeepCopyInto(out *RedshiftErrorHandlingConfigParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftErrorHandlingConfigParameters. +func (in *RedshiftErrorHandlingConfigParameters) DeepCopy() *RedshiftErrorHandlingConfigParameters { + if in == nil { + return nil + } + out := new(RedshiftErrorHandlingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftInitParameters) DeepCopyInto(out *RedshiftInitParameters) { + *out = *in + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(RedshiftErrorHandlingConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IntermediateBucketName != nil { + in, out := &in.IntermediateBucketName, &out.IntermediateBucketName + *out = new(string) + **out = **in + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftInitParameters. +func (in *RedshiftInitParameters) DeepCopy() *RedshiftInitParameters { + if in == nil { + return nil + } + out := new(RedshiftInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftObservation) DeepCopyInto(out *RedshiftObservation) { + *out = *in + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(RedshiftErrorHandlingConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.IntermediateBucketName != nil { + in, out := &in.IntermediateBucketName, &out.IntermediateBucketName + *out = new(string) + **out = **in + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftObservation. +func (in *RedshiftObservation) DeepCopy() *RedshiftObservation { + if in == nil { + return nil + } + out := new(RedshiftObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftParameters) DeepCopyInto(out *RedshiftParameters) { + *out = *in + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(RedshiftErrorHandlingConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.IntermediateBucketName != nil { + in, out := &in.IntermediateBucketName, &out.IntermediateBucketName + *out = new(string) + **out = **in + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftParameters. +func (in *RedshiftParameters) DeepCopy() *RedshiftParameters { + if in == nil { + return nil + } + out := new(RedshiftParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InitParameters) DeepCopyInto(out *S3InitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketNameRef != nil { + in, out := &in.BucketNameRef, &out.BucketNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketNameSelector != nil { + in, out := &in.BucketNameSelector, &out.BucketNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.S3OutputFormatConfig != nil { + in, out := &in.S3OutputFormatConfig, &out.S3OutputFormatConfig + *out = new(S3OutputFormatConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InitParameters. +func (in *S3InitParameters) DeepCopy() *S3InitParameters { + if in == nil { + return nil + } + out := new(S3InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InputFormatConfigInitParameters) DeepCopyInto(out *S3InputFormatConfigInitParameters) { + *out = *in + if in.S3InputFileType != nil { + in, out := &in.S3InputFileType, &out.S3InputFileType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InputFormatConfigInitParameters. +func (in *S3InputFormatConfigInitParameters) DeepCopy() *S3InputFormatConfigInitParameters { + if in == nil { + return nil + } + out := new(S3InputFormatConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InputFormatConfigObservation) DeepCopyInto(out *S3InputFormatConfigObservation) { + *out = *in + if in.S3InputFileType != nil { + in, out := &in.S3InputFileType, &out.S3InputFileType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InputFormatConfigObservation. +func (in *S3InputFormatConfigObservation) DeepCopy() *S3InputFormatConfigObservation { + if in == nil { + return nil + } + out := new(S3InputFormatConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InputFormatConfigParameters) DeepCopyInto(out *S3InputFormatConfigParameters) { + *out = *in + if in.S3InputFileType != nil { + in, out := &in.S3InputFileType, &out.S3InputFileType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InputFormatConfigParameters. +func (in *S3InputFormatConfigParameters) DeepCopy() *S3InputFormatConfigParameters { + if in == nil { + return nil + } + out := new(S3InputFormatConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Observation) DeepCopyInto(out *S3Observation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.S3OutputFormatConfig != nil { + in, out := &in.S3OutputFormatConfig, &out.S3OutputFormatConfig + *out = new(S3OutputFormatConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Observation. +func (in *S3Observation) DeepCopy() *S3Observation { + if in == nil { + return nil + } + out := new(S3Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3OutputFormatConfigAggregationConfigInitParameters) DeepCopyInto(out *S3OutputFormatConfigAggregationConfigInitParameters) { + *out = *in + if in.AggregationType != nil { + in, out := &in.AggregationType, &out.AggregationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3OutputFormatConfigAggregationConfigInitParameters. +func (in *S3OutputFormatConfigAggregationConfigInitParameters) DeepCopy() *S3OutputFormatConfigAggregationConfigInitParameters { + if in == nil { + return nil + } + out := new(S3OutputFormatConfigAggregationConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3OutputFormatConfigAggregationConfigObservation) DeepCopyInto(out *S3OutputFormatConfigAggregationConfigObservation) { + *out = *in + if in.AggregationType != nil { + in, out := &in.AggregationType, &out.AggregationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3OutputFormatConfigAggregationConfigObservation. +func (in *S3OutputFormatConfigAggregationConfigObservation) DeepCopy() *S3OutputFormatConfigAggregationConfigObservation { + if in == nil { + return nil + } + out := new(S3OutputFormatConfigAggregationConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3OutputFormatConfigAggregationConfigParameters) DeepCopyInto(out *S3OutputFormatConfigAggregationConfigParameters) { + *out = *in + if in.AggregationType != nil { + in, out := &in.AggregationType, &out.AggregationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3OutputFormatConfigAggregationConfigParameters. +func (in *S3OutputFormatConfigAggregationConfigParameters) DeepCopy() *S3OutputFormatConfigAggregationConfigParameters { + if in == nil { + return nil + } + out := new(S3OutputFormatConfigAggregationConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3OutputFormatConfigInitParameters) DeepCopyInto(out *S3OutputFormatConfigInitParameters) { + *out = *in + if in.AggregationConfig != nil { + in, out := &in.AggregationConfig, &out.AggregationConfig + *out = new(AggregationConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FileType != nil { + in, out := &in.FileType, &out.FileType + *out = new(string) + **out = **in + } + if in.PrefixConfig != nil { + in, out := &in.PrefixConfig, &out.PrefixConfig + *out = new(PrefixConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PreserveSourceDataTyping != nil { + in, out := &in.PreserveSourceDataTyping, &out.PreserveSourceDataTyping + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3OutputFormatConfigInitParameters. +func (in *S3OutputFormatConfigInitParameters) DeepCopy() *S3OutputFormatConfigInitParameters { + if in == nil { + return nil + } + out := new(S3OutputFormatConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3OutputFormatConfigObservation) DeepCopyInto(out *S3OutputFormatConfigObservation) { + *out = *in + if in.AggregationConfig != nil { + in, out := &in.AggregationConfig, &out.AggregationConfig + *out = new(AggregationConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.FileType != nil { + in, out := &in.FileType, &out.FileType + *out = new(string) + **out = **in + } + if in.PrefixConfig != nil { + in, out := &in.PrefixConfig, &out.PrefixConfig + *out = new(PrefixConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.PreserveSourceDataTyping != nil { + in, out := &in.PreserveSourceDataTyping, &out.PreserveSourceDataTyping + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3OutputFormatConfigObservation. +func (in *S3OutputFormatConfigObservation) DeepCopy() *S3OutputFormatConfigObservation { + if in == nil { + return nil + } + out := new(S3OutputFormatConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3OutputFormatConfigParameters) DeepCopyInto(out *S3OutputFormatConfigParameters) { + *out = *in + if in.AggregationConfig != nil { + in, out := &in.AggregationConfig, &out.AggregationConfig + *out = new(AggregationConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.FileType != nil { + in, out := &in.FileType, &out.FileType + *out = new(string) + **out = **in + } + if in.PrefixConfig != nil { + in, out := &in.PrefixConfig, &out.PrefixConfig + *out = new(PrefixConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.PreserveSourceDataTyping != nil { + in, out := &in.PreserveSourceDataTyping, &out.PreserveSourceDataTyping + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3OutputFormatConfigParameters. +func (in *S3OutputFormatConfigParameters) DeepCopy() *S3OutputFormatConfigParameters { + if in == nil { + return nil + } + out := new(S3OutputFormatConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3OutputFormatConfigPrefixConfigInitParameters) DeepCopyInto(out *S3OutputFormatConfigPrefixConfigInitParameters) { + *out = *in + if in.PrefixFormat != nil { + in, out := &in.PrefixFormat, &out.PrefixFormat + *out = new(string) + **out = **in + } + if in.PrefixType != nil { + in, out := &in.PrefixType, &out.PrefixType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3OutputFormatConfigPrefixConfigInitParameters. +func (in *S3OutputFormatConfigPrefixConfigInitParameters) DeepCopy() *S3OutputFormatConfigPrefixConfigInitParameters { + if in == nil { + return nil + } + out := new(S3OutputFormatConfigPrefixConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3OutputFormatConfigPrefixConfigObservation) DeepCopyInto(out *S3OutputFormatConfigPrefixConfigObservation) { + *out = *in + if in.PrefixFormat != nil { + in, out := &in.PrefixFormat, &out.PrefixFormat + *out = new(string) + **out = **in + } + if in.PrefixType != nil { + in, out := &in.PrefixType, &out.PrefixType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3OutputFormatConfigPrefixConfigObservation. +func (in *S3OutputFormatConfigPrefixConfigObservation) DeepCopy() *S3OutputFormatConfigPrefixConfigObservation { + if in == nil { + return nil + } + out := new(S3OutputFormatConfigPrefixConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3OutputFormatConfigPrefixConfigParameters) DeepCopyInto(out *S3OutputFormatConfigPrefixConfigParameters) { + *out = *in + if in.PrefixFormat != nil { + in, out := &in.PrefixFormat, &out.PrefixFormat + *out = new(string) + **out = **in + } + if in.PrefixType != nil { + in, out := &in.PrefixType, &out.PrefixType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3OutputFormatConfigPrefixConfigParameters. +func (in *S3OutputFormatConfigPrefixConfigParameters) DeepCopy() *S3OutputFormatConfigPrefixConfigParameters { + if in == nil { + return nil + } + out := new(S3OutputFormatConfigPrefixConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Parameters) DeepCopyInto(out *S3Parameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketNameRef != nil { + in, out := &in.BucketNameRef, &out.BucketNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketNameSelector != nil { + in, out := &in.BucketNameSelector, &out.BucketNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.S3OutputFormatConfig != nil { + in, out := &in.S3OutputFormatConfig, &out.S3OutputFormatConfig + *out = new(S3OutputFormatConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Parameters. +func (in *S3Parameters) DeepCopy() *S3Parameters { + if in == nil { + return nil + } + out := new(S3Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SalesforceErrorHandlingConfigInitParameters) DeepCopyInto(out *SalesforceErrorHandlingConfigInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SalesforceErrorHandlingConfigInitParameters. +func (in *SalesforceErrorHandlingConfigInitParameters) DeepCopy() *SalesforceErrorHandlingConfigInitParameters { + if in == nil { + return nil + } + out := new(SalesforceErrorHandlingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SalesforceErrorHandlingConfigObservation) DeepCopyInto(out *SalesforceErrorHandlingConfigObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SalesforceErrorHandlingConfigObservation. +func (in *SalesforceErrorHandlingConfigObservation) DeepCopy() *SalesforceErrorHandlingConfigObservation { + if in == nil { + return nil + } + out := new(SalesforceErrorHandlingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SalesforceErrorHandlingConfigParameters) DeepCopyInto(out *SalesforceErrorHandlingConfigParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SalesforceErrorHandlingConfigParameters. +func (in *SalesforceErrorHandlingConfigParameters) DeepCopy() *SalesforceErrorHandlingConfigParameters { + if in == nil { + return nil + } + out := new(SalesforceErrorHandlingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SalesforceInitParameters) DeepCopyInto(out *SalesforceInitParameters) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(SalesforceErrorHandlingConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IDFieldNames != nil { + in, out := &in.IDFieldNames, &out.IDFieldNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } + if in.WriteOperationType != nil { + in, out := &in.WriteOperationType, &out.WriteOperationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SalesforceInitParameters. +func (in *SalesforceInitParameters) DeepCopy() *SalesforceInitParameters { + if in == nil { + return nil + } + out := new(SalesforceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SalesforceObservation) DeepCopyInto(out *SalesforceObservation) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(SalesforceErrorHandlingConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.IDFieldNames != nil { + in, out := &in.IDFieldNames, &out.IDFieldNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } + if in.WriteOperationType != nil { + in, out := &in.WriteOperationType, &out.WriteOperationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SalesforceObservation. +func (in *SalesforceObservation) DeepCopy() *SalesforceObservation { + if in == nil { + return nil + } + out := new(SalesforceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SalesforceParameters) DeepCopyInto(out *SalesforceParameters) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(SalesforceErrorHandlingConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.IDFieldNames != nil { + in, out := &in.IDFieldNames, &out.IDFieldNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } + if in.WriteOperationType != nil { + in, out := &in.WriteOperationType, &out.WriteOperationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SalesforceParameters. +func (in *SalesforceParameters) DeepCopy() *SalesforceParameters { + if in == nil { + return nil + } + out := new(SalesforceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SapoDataErrorHandlingConfigInitParameters) DeepCopyInto(out *SapoDataErrorHandlingConfigInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SapoDataErrorHandlingConfigInitParameters. +func (in *SapoDataErrorHandlingConfigInitParameters) DeepCopy() *SapoDataErrorHandlingConfigInitParameters { + if in == nil { + return nil + } + out := new(SapoDataErrorHandlingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SapoDataErrorHandlingConfigObservation) DeepCopyInto(out *SapoDataErrorHandlingConfigObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SapoDataErrorHandlingConfigObservation. +func (in *SapoDataErrorHandlingConfigObservation) DeepCopy() *SapoDataErrorHandlingConfigObservation { + if in == nil { + return nil + } + out := new(SapoDataErrorHandlingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SapoDataErrorHandlingConfigParameters) DeepCopyInto(out *SapoDataErrorHandlingConfigParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SapoDataErrorHandlingConfigParameters. +func (in *SapoDataErrorHandlingConfigParameters) DeepCopy() *SapoDataErrorHandlingConfigParameters { + if in == nil { + return nil + } + out := new(SapoDataErrorHandlingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SapoDataInitParameters) DeepCopyInto(out *SapoDataInitParameters) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(SapoDataErrorHandlingConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IDFieldNames != nil { + in, out := &in.IDFieldNames, &out.IDFieldNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ObjectPath != nil { + in, out := &in.ObjectPath, &out.ObjectPath + *out = new(string) + **out = **in + } + if in.SuccessResponseHandlingConfig != nil { + in, out := &in.SuccessResponseHandlingConfig, &out.SuccessResponseHandlingConfig + *out = new(SuccessResponseHandlingConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WriteOperationType != nil { + in, out := &in.WriteOperationType, &out.WriteOperationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SapoDataInitParameters. +func (in *SapoDataInitParameters) DeepCopy() *SapoDataInitParameters { + if in == nil { + return nil + } + out := new(SapoDataInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SapoDataObservation) DeepCopyInto(out *SapoDataObservation) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(SapoDataErrorHandlingConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.IDFieldNames != nil { + in, out := &in.IDFieldNames, &out.IDFieldNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ObjectPath != nil { + in, out := &in.ObjectPath, &out.ObjectPath + *out = new(string) + **out = **in + } + if in.SuccessResponseHandlingConfig != nil { + in, out := &in.SuccessResponseHandlingConfig, &out.SuccessResponseHandlingConfig + *out = new(SuccessResponseHandlingConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.WriteOperationType != nil { + in, out := &in.WriteOperationType, &out.WriteOperationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SapoDataObservation. +func (in *SapoDataObservation) DeepCopy() *SapoDataObservation { + if in == nil { + return nil + } + out := new(SapoDataObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SapoDataParameters) DeepCopyInto(out *SapoDataParameters) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(SapoDataErrorHandlingConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.IDFieldNames != nil { + in, out := &in.IDFieldNames, &out.IDFieldNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ObjectPath != nil { + in, out := &in.ObjectPath, &out.ObjectPath + *out = new(string) + **out = **in + } + if in.SuccessResponseHandlingConfig != nil { + in, out := &in.SuccessResponseHandlingConfig, &out.SuccessResponseHandlingConfig + *out = new(SuccessResponseHandlingConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.WriteOperationType != nil { + in, out := &in.WriteOperationType, &out.WriteOperationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SapoDataParameters. +func (in *SapoDataParameters) DeepCopy() *SapoDataParameters { + if in == nil { + return nil + } + out := new(SapoDataParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledInitParameters) DeepCopyInto(out *ScheduledInitParameters) { + *out = *in + if in.DataPullMode != nil { + in, out := &in.DataPullMode, &out.DataPullMode + *out = new(string) + **out = **in + } + if in.FirstExecutionFrom != nil { + in, out := &in.FirstExecutionFrom, &out.FirstExecutionFrom + *out = new(string) + **out = **in + } + if in.ScheduleEndTime != nil { + in, out := &in.ScheduleEndTime, &out.ScheduleEndTime + *out = new(string) + **out = **in + } + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } + if in.ScheduleOffset != nil { + in, out := &in.ScheduleOffset, &out.ScheduleOffset + *out = new(float64) + **out = **in + } + if in.ScheduleStartTime != nil { + in, out := &in.ScheduleStartTime, &out.ScheduleStartTime + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledInitParameters. +func (in *ScheduledInitParameters) DeepCopy() *ScheduledInitParameters { + if in == nil { + return nil + } + out := new(ScheduledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledObservation) DeepCopyInto(out *ScheduledObservation) { + *out = *in + if in.DataPullMode != nil { + in, out := &in.DataPullMode, &out.DataPullMode + *out = new(string) + **out = **in + } + if in.FirstExecutionFrom != nil { + in, out := &in.FirstExecutionFrom, &out.FirstExecutionFrom + *out = new(string) + **out = **in + } + if in.ScheduleEndTime != nil { + in, out := &in.ScheduleEndTime, &out.ScheduleEndTime + *out = new(string) + **out = **in + } + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } + if in.ScheduleOffset != nil { + in, out := &in.ScheduleOffset, &out.ScheduleOffset + *out = new(float64) + **out = **in + } + if in.ScheduleStartTime != nil { + in, out := &in.ScheduleStartTime, &out.ScheduleStartTime + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledObservation. +func (in *ScheduledObservation) DeepCopy() *ScheduledObservation { + if in == nil { + return nil + } + out := new(ScheduledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledParameters) DeepCopyInto(out *ScheduledParameters) { + *out = *in + if in.DataPullMode != nil { + in, out := &in.DataPullMode, &out.DataPullMode + *out = new(string) + **out = **in + } + if in.FirstExecutionFrom != nil { + in, out := &in.FirstExecutionFrom, &out.FirstExecutionFrom + *out = new(string) + **out = **in + } + if in.ScheduleEndTime != nil { + in, out := &in.ScheduleEndTime, &out.ScheduleEndTime + *out = new(string) + **out = **in + } + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } + if in.ScheduleOffset != nil { + in, out := &in.ScheduleOffset, &out.ScheduleOffset + *out = new(float64) + **out = **in + } + if in.ScheduleStartTime != nil { + in, out := &in.ScheduleStartTime, &out.ScheduleStartTime + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledParameters. +func (in *ScheduledParameters) DeepCopy() *ScheduledParameters { + if in == nil { + return nil + } + out := new(ScheduledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceNowInitParameters) DeepCopyInto(out *ServiceNowInitParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceNowInitParameters. +func (in *ServiceNowInitParameters) DeepCopy() *ServiceNowInitParameters { + if in == nil { + return nil + } + out := new(ServiceNowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceNowObservation) DeepCopyInto(out *ServiceNowObservation) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceNowObservation. +func (in *ServiceNowObservation) DeepCopy() *ServiceNowObservation { + if in == nil { + return nil + } + out := new(ServiceNowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceNowParameters) DeepCopyInto(out *ServiceNowParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceNowParameters. +func (in *ServiceNowParameters) DeepCopy() *ServiceNowParameters { + if in == nil { + return nil + } + out := new(ServiceNowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingularInitParameters) DeepCopyInto(out *SingularInitParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingularInitParameters. +func (in *SingularInitParameters) DeepCopy() *SingularInitParameters { + if in == nil { + return nil + } + out := new(SingularInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingularObservation) DeepCopyInto(out *SingularObservation) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingularObservation. +func (in *SingularObservation) DeepCopy() *SingularObservation { + if in == nil { + return nil + } + out := new(SingularObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingularParameters) DeepCopyInto(out *SingularParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingularParameters. +func (in *SingularParameters) DeepCopy() *SingularParameters { + if in == nil { + return nil + } + out := new(SingularParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlackInitParameters) DeepCopyInto(out *SlackInitParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlackInitParameters. +func (in *SlackInitParameters) DeepCopy() *SlackInitParameters { + if in == nil { + return nil + } + out := new(SlackInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlackObservation) DeepCopyInto(out *SlackObservation) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlackObservation. +func (in *SlackObservation) DeepCopy() *SlackObservation { + if in == nil { + return nil + } + out := new(SlackObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlackParameters) DeepCopyInto(out *SlackParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlackParameters. +func (in *SlackParameters) DeepCopy() *SlackParameters { + if in == nil { + return nil + } + out := new(SlackParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeErrorHandlingConfigInitParameters) DeepCopyInto(out *SnowflakeErrorHandlingConfigInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeErrorHandlingConfigInitParameters. +func (in *SnowflakeErrorHandlingConfigInitParameters) DeepCopy() *SnowflakeErrorHandlingConfigInitParameters { + if in == nil { + return nil + } + out := new(SnowflakeErrorHandlingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeErrorHandlingConfigObservation) DeepCopyInto(out *SnowflakeErrorHandlingConfigObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeErrorHandlingConfigObservation. +func (in *SnowflakeErrorHandlingConfigObservation) DeepCopy() *SnowflakeErrorHandlingConfigObservation { + if in == nil { + return nil + } + out := new(SnowflakeErrorHandlingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeErrorHandlingConfigParameters) DeepCopyInto(out *SnowflakeErrorHandlingConfigParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeErrorHandlingConfigParameters. +func (in *SnowflakeErrorHandlingConfigParameters) DeepCopy() *SnowflakeErrorHandlingConfigParameters { + if in == nil { + return nil + } + out := new(SnowflakeErrorHandlingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeInitParameters) DeepCopyInto(out *SnowflakeInitParameters) { + *out = *in + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(SnowflakeErrorHandlingConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IntermediateBucketName != nil { + in, out := &in.IntermediateBucketName, &out.IntermediateBucketName + *out = new(string) + **out = **in + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeInitParameters. +func (in *SnowflakeInitParameters) DeepCopy() *SnowflakeInitParameters { + if in == nil { + return nil + } + out := new(SnowflakeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeObservation) DeepCopyInto(out *SnowflakeObservation) { + *out = *in + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(SnowflakeErrorHandlingConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.IntermediateBucketName != nil { + in, out := &in.IntermediateBucketName, &out.IntermediateBucketName + *out = new(string) + **out = **in + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeObservation. +func (in *SnowflakeObservation) DeepCopy() *SnowflakeObservation { + if in == nil { + return nil + } + out := new(SnowflakeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeParameters) DeepCopyInto(out *SnowflakeParameters) { + *out = *in + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(SnowflakeErrorHandlingConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.IntermediateBucketName != nil { + in, out := &in.IntermediateBucketName, &out.IntermediateBucketName + *out = new(string) + **out = **in + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeParameters. +func (in *SnowflakeParameters) DeepCopy() *SnowflakeParameters { + if in == nil { + return nil + } + out := new(SnowflakeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesCustomConnectorInitParameters) DeepCopyInto(out *SourceConnectorPropertiesCustomConnectorInitParameters) { + *out = *in + if in.CustomProperties != nil { + in, out := &in.CustomProperties, &out.CustomProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.EntityName != nil { + in, out := &in.EntityName, &out.EntityName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesCustomConnectorInitParameters. +func (in *SourceConnectorPropertiesCustomConnectorInitParameters) DeepCopy() *SourceConnectorPropertiesCustomConnectorInitParameters { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesCustomConnectorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesCustomConnectorObservation) DeepCopyInto(out *SourceConnectorPropertiesCustomConnectorObservation) { + *out = *in + if in.CustomProperties != nil { + in, out := &in.CustomProperties, &out.CustomProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.EntityName != nil { + in, out := &in.EntityName, &out.EntityName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesCustomConnectorObservation. +func (in *SourceConnectorPropertiesCustomConnectorObservation) DeepCopy() *SourceConnectorPropertiesCustomConnectorObservation { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesCustomConnectorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesCustomConnectorParameters) DeepCopyInto(out *SourceConnectorPropertiesCustomConnectorParameters) { + *out = *in + if in.CustomProperties != nil { + in, out := &in.CustomProperties, &out.CustomProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.EntityName != nil { + in, out := &in.EntityName, &out.EntityName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesCustomConnectorParameters. +func (in *SourceConnectorPropertiesCustomConnectorParameters) DeepCopy() *SourceConnectorPropertiesCustomConnectorParameters { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesCustomConnectorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesInitParameters) DeepCopyInto(out *SourceConnectorPropertiesInitParameters) { + *out = *in + if in.Amplitude != nil { + in, out := &in.Amplitude, &out.Amplitude + *out = new(AmplitudeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConnector != nil { + in, out := &in.CustomConnector, &out.CustomConnector + *out = new(SourceConnectorPropertiesCustomConnectorInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Datadog != nil { + in, out := &in.Datadog, &out.Datadog + *out = new(DatadogInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Dynatrace != nil { + in, out := &in.Dynatrace, &out.Dynatrace + *out = new(DynatraceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleAnalytics != nil { + in, out := &in.GoogleAnalytics, &out.GoogleAnalytics + *out = new(GoogleAnalyticsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InforNexus != nil { + in, out := &in.InforNexus, &out.InforNexus + *out = new(InforNexusInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Marketo != nil { + in, out := &in.Marketo, &out.Marketo + *out = new(SourceConnectorPropertiesMarketoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(SourceConnectorPropertiesS3InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Salesforce != nil { + in, out := &in.Salesforce, &out.Salesforce + *out = new(SourceConnectorPropertiesSalesforceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SapoData != nil { + in, out := &in.SapoData, &out.SapoData + *out = new(SourceConnectorPropertiesSapoDataInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceNow != nil { + in, out := &in.ServiceNow, &out.ServiceNow + *out = new(ServiceNowInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Singular != nil { + in, out := &in.Singular, &out.Singular + *out = new(SingularInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Slack != nil { + in, out := &in.Slack, &out.Slack + *out = new(SlackInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Trendmicro != nil { + in, out := &in.Trendmicro, &out.Trendmicro + *out = new(TrendmicroInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Veeva != nil { + in, out := &in.Veeva, &out.Veeva + *out = new(VeevaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Zendesk != nil { + in, out := &in.Zendesk, &out.Zendesk + *out = new(SourceConnectorPropertiesZendeskInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesInitParameters. +func (in *SourceConnectorPropertiesInitParameters) DeepCopy() *SourceConnectorPropertiesInitParameters { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesMarketoInitParameters) DeepCopyInto(out *SourceConnectorPropertiesMarketoInitParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesMarketoInitParameters. +func (in *SourceConnectorPropertiesMarketoInitParameters) DeepCopy() *SourceConnectorPropertiesMarketoInitParameters { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesMarketoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesMarketoObservation) DeepCopyInto(out *SourceConnectorPropertiesMarketoObservation) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesMarketoObservation. +func (in *SourceConnectorPropertiesMarketoObservation) DeepCopy() *SourceConnectorPropertiesMarketoObservation { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesMarketoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesMarketoParameters) DeepCopyInto(out *SourceConnectorPropertiesMarketoParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesMarketoParameters. +func (in *SourceConnectorPropertiesMarketoParameters) DeepCopy() *SourceConnectorPropertiesMarketoParameters { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesMarketoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesObservation) DeepCopyInto(out *SourceConnectorPropertiesObservation) { + *out = *in + if in.Amplitude != nil { + in, out := &in.Amplitude, &out.Amplitude + *out = new(AmplitudeObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomConnector != nil { + in, out := &in.CustomConnector, &out.CustomConnector + *out = new(SourceConnectorPropertiesCustomConnectorObservation) + (*in).DeepCopyInto(*out) + } + if in.Datadog != nil { + in, out := &in.Datadog, &out.Datadog + *out = new(DatadogObservation) + (*in).DeepCopyInto(*out) + } + if in.Dynatrace != nil { + in, out := &in.Dynatrace, &out.Dynatrace + *out = new(DynatraceObservation) + (*in).DeepCopyInto(*out) + } + if in.GoogleAnalytics != nil { + in, out := &in.GoogleAnalytics, &out.GoogleAnalytics + *out = new(GoogleAnalyticsObservation) + (*in).DeepCopyInto(*out) + } + if in.InforNexus != nil { + in, out := &in.InforNexus, &out.InforNexus + *out = new(InforNexusObservation) + (*in).DeepCopyInto(*out) + } + if in.Marketo != nil { + in, out := &in.Marketo, &out.Marketo + *out = new(SourceConnectorPropertiesMarketoObservation) + (*in).DeepCopyInto(*out) + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(SourceConnectorPropertiesS3Observation) + (*in).DeepCopyInto(*out) + } + if in.Salesforce != nil { + in, out := &in.Salesforce, &out.Salesforce + *out = new(SourceConnectorPropertiesSalesforceObservation) + (*in).DeepCopyInto(*out) + } + if in.SapoData != nil { + in, out := &in.SapoData, &out.SapoData + *out = new(SourceConnectorPropertiesSapoDataObservation) + (*in).DeepCopyInto(*out) + } + if in.ServiceNow != nil { + in, out := &in.ServiceNow, &out.ServiceNow + *out = new(ServiceNowObservation) + (*in).DeepCopyInto(*out) + } + if in.Singular != nil { + in, out := &in.Singular, &out.Singular + *out = new(SingularObservation) + (*in).DeepCopyInto(*out) + } + if in.Slack != nil { + in, out := &in.Slack, &out.Slack + *out = new(SlackObservation) + (*in).DeepCopyInto(*out) + } + if in.Trendmicro != nil { + in, out := &in.Trendmicro, &out.Trendmicro + *out = new(TrendmicroObservation) + (*in).DeepCopyInto(*out) + } + if in.Veeva != nil { + in, out := &in.Veeva, &out.Veeva + *out = new(VeevaObservation) + (*in).DeepCopyInto(*out) + } + if in.Zendesk != nil { + in, out := &in.Zendesk, &out.Zendesk + *out = new(SourceConnectorPropertiesZendeskObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesObservation. +func (in *SourceConnectorPropertiesObservation) DeepCopy() *SourceConnectorPropertiesObservation { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesParameters) DeepCopyInto(out *SourceConnectorPropertiesParameters) { + *out = *in + if in.Amplitude != nil { + in, out := &in.Amplitude, &out.Amplitude + *out = new(AmplitudeParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConnector != nil { + in, out := &in.CustomConnector, &out.CustomConnector + *out = new(SourceConnectorPropertiesCustomConnectorParameters) + (*in).DeepCopyInto(*out) + } + if in.Datadog != nil { + in, out := &in.Datadog, &out.Datadog + *out = new(DatadogParameters) + (*in).DeepCopyInto(*out) + } + if in.Dynatrace != nil { + in, out := &in.Dynatrace, &out.Dynatrace + *out = new(DynatraceParameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleAnalytics != nil { + in, out := &in.GoogleAnalytics, &out.GoogleAnalytics + *out = new(GoogleAnalyticsParameters) + (*in).DeepCopyInto(*out) + } + if in.InforNexus != nil { + in, out := &in.InforNexus, &out.InforNexus + *out = new(InforNexusParameters) + (*in).DeepCopyInto(*out) + } + if in.Marketo != nil { + in, out := &in.Marketo, &out.Marketo + *out = new(SourceConnectorPropertiesMarketoParameters) + (*in).DeepCopyInto(*out) + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(SourceConnectorPropertiesS3Parameters) + (*in).DeepCopyInto(*out) + } + if in.Salesforce != nil { + in, out := &in.Salesforce, &out.Salesforce + *out = new(SourceConnectorPropertiesSalesforceParameters) + (*in).DeepCopyInto(*out) + } + if in.SapoData != nil { + in, out := &in.SapoData, &out.SapoData + *out = new(SourceConnectorPropertiesSapoDataParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceNow != nil { + in, out := &in.ServiceNow, &out.ServiceNow + *out = new(ServiceNowParameters) + (*in).DeepCopyInto(*out) + } + if in.Singular != nil { + in, out := &in.Singular, &out.Singular + *out = new(SingularParameters) + (*in).DeepCopyInto(*out) + } + if in.Slack != nil { + in, out := &in.Slack, &out.Slack + *out = new(SlackParameters) + (*in).DeepCopyInto(*out) + } + if in.Trendmicro != nil { + in, out := &in.Trendmicro, &out.Trendmicro + *out = new(TrendmicroParameters) + (*in).DeepCopyInto(*out) + } + if in.Veeva != nil { + in, out := &in.Veeva, &out.Veeva + *out = new(VeevaParameters) + (*in).DeepCopyInto(*out) + } + if in.Zendesk != nil { + in, out := &in.Zendesk, &out.Zendesk + *out = new(SourceConnectorPropertiesZendeskParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesParameters. +func (in *SourceConnectorPropertiesParameters) DeepCopy() *SourceConnectorPropertiesParameters { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesS3InitParameters) DeepCopyInto(out *SourceConnectorPropertiesS3InitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketNameRef != nil { + in, out := &in.BucketNameRef, &out.BucketNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketNameSelector != nil { + in, out := &in.BucketNameSelector, &out.BucketNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.S3InputFormatConfig != nil { + in, out := &in.S3InputFormatConfig, &out.S3InputFormatConfig + *out = new(S3InputFormatConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesS3InitParameters. +func (in *SourceConnectorPropertiesS3InitParameters) DeepCopy() *SourceConnectorPropertiesS3InitParameters { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesS3InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesS3Observation) DeepCopyInto(out *SourceConnectorPropertiesS3Observation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.S3InputFormatConfig != nil { + in, out := &in.S3InputFormatConfig, &out.S3InputFormatConfig + *out = new(S3InputFormatConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesS3Observation. +func (in *SourceConnectorPropertiesS3Observation) DeepCopy() *SourceConnectorPropertiesS3Observation { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesS3Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesS3Parameters) DeepCopyInto(out *SourceConnectorPropertiesS3Parameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketNameRef != nil { + in, out := &in.BucketNameRef, &out.BucketNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketNameSelector != nil { + in, out := &in.BucketNameSelector, &out.BucketNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.S3InputFormatConfig != nil { + in, out := &in.S3InputFormatConfig, &out.S3InputFormatConfig + *out = new(S3InputFormatConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesS3Parameters. +func (in *SourceConnectorPropertiesS3Parameters) DeepCopy() *SourceConnectorPropertiesS3Parameters { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesS3Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesSalesforceInitParameters) DeepCopyInto(out *SourceConnectorPropertiesSalesforceInitParameters) { + *out = *in + if in.EnableDynamicFieldUpdate != nil { + in, out := &in.EnableDynamicFieldUpdate, &out.EnableDynamicFieldUpdate + *out = new(bool) + **out = **in + } + if in.IncludeDeletedRecords != nil { + in, out := &in.IncludeDeletedRecords, &out.IncludeDeletedRecords + *out = new(bool) + **out = **in + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesSalesforceInitParameters. +func (in *SourceConnectorPropertiesSalesforceInitParameters) DeepCopy() *SourceConnectorPropertiesSalesforceInitParameters { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesSalesforceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesSalesforceObservation) DeepCopyInto(out *SourceConnectorPropertiesSalesforceObservation) { + *out = *in + if in.EnableDynamicFieldUpdate != nil { + in, out := &in.EnableDynamicFieldUpdate, &out.EnableDynamicFieldUpdate + *out = new(bool) + **out = **in + } + if in.IncludeDeletedRecords != nil { + in, out := &in.IncludeDeletedRecords, &out.IncludeDeletedRecords + *out = new(bool) + **out = **in + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesSalesforceObservation. +func (in *SourceConnectorPropertiesSalesforceObservation) DeepCopy() *SourceConnectorPropertiesSalesforceObservation { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesSalesforceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesSalesforceParameters) DeepCopyInto(out *SourceConnectorPropertiesSalesforceParameters) { + *out = *in + if in.EnableDynamicFieldUpdate != nil { + in, out := &in.EnableDynamicFieldUpdate, &out.EnableDynamicFieldUpdate + *out = new(bool) + **out = **in + } + if in.IncludeDeletedRecords != nil { + in, out := &in.IncludeDeletedRecords, &out.IncludeDeletedRecords + *out = new(bool) + **out = **in + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesSalesforceParameters. +func (in *SourceConnectorPropertiesSalesforceParameters) DeepCopy() *SourceConnectorPropertiesSalesforceParameters { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesSalesforceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesSapoDataInitParameters) DeepCopyInto(out *SourceConnectorPropertiesSapoDataInitParameters) { + *out = *in + if in.ObjectPath != nil { + in, out := &in.ObjectPath, &out.ObjectPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesSapoDataInitParameters. +func (in *SourceConnectorPropertiesSapoDataInitParameters) DeepCopy() *SourceConnectorPropertiesSapoDataInitParameters { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesSapoDataInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesSapoDataObservation) DeepCopyInto(out *SourceConnectorPropertiesSapoDataObservation) { + *out = *in + if in.ObjectPath != nil { + in, out := &in.ObjectPath, &out.ObjectPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesSapoDataObservation. +func (in *SourceConnectorPropertiesSapoDataObservation) DeepCopy() *SourceConnectorPropertiesSapoDataObservation { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesSapoDataObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesSapoDataParameters) DeepCopyInto(out *SourceConnectorPropertiesSapoDataParameters) { + *out = *in + if in.ObjectPath != nil { + in, out := &in.ObjectPath, &out.ObjectPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesSapoDataParameters. +func (in *SourceConnectorPropertiesSapoDataParameters) DeepCopy() *SourceConnectorPropertiesSapoDataParameters { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesSapoDataParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesZendeskInitParameters) DeepCopyInto(out *SourceConnectorPropertiesZendeskInitParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesZendeskInitParameters. +func (in *SourceConnectorPropertiesZendeskInitParameters) DeepCopy() *SourceConnectorPropertiesZendeskInitParameters { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesZendeskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesZendeskObservation) DeepCopyInto(out *SourceConnectorPropertiesZendeskObservation) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesZendeskObservation. +func (in *SourceConnectorPropertiesZendeskObservation) DeepCopy() *SourceConnectorPropertiesZendeskObservation { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesZendeskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConnectorPropertiesZendeskParameters) DeepCopyInto(out *SourceConnectorPropertiesZendeskParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConnectorPropertiesZendeskParameters. +func (in *SourceConnectorPropertiesZendeskParameters) DeepCopy() *SourceConnectorPropertiesZendeskParameters { + if in == nil { + return nil + } + out := new(SourceConnectorPropertiesZendeskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceFlowConfigInitParameters) DeepCopyInto(out *SourceFlowConfigInitParameters) { + *out = *in + if in.APIVersion != nil { + in, out := &in.APIVersion, &out.APIVersion + *out = new(string) + **out = **in + } + if in.ConnectorProfileName != nil { + in, out := &in.ConnectorProfileName, &out.ConnectorProfileName + *out = new(string) + **out = **in + } + if in.ConnectorType != nil { + in, out := &in.ConnectorType, &out.ConnectorType + *out = new(string) + **out = **in + } + if in.IncrementalPullConfig != nil { + in, out := &in.IncrementalPullConfig, &out.IncrementalPullConfig + *out = new(IncrementalPullConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SourceConnectorProperties != nil { + in, out := &in.SourceConnectorProperties, &out.SourceConnectorProperties + *out = new(SourceConnectorPropertiesInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceFlowConfigInitParameters. +func (in *SourceFlowConfigInitParameters) DeepCopy() *SourceFlowConfigInitParameters { + if in == nil { + return nil + } + out := new(SourceFlowConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceFlowConfigObservation) DeepCopyInto(out *SourceFlowConfigObservation) { + *out = *in + if in.APIVersion != nil { + in, out := &in.APIVersion, &out.APIVersion + *out = new(string) + **out = **in + } + if in.ConnectorProfileName != nil { + in, out := &in.ConnectorProfileName, &out.ConnectorProfileName + *out = new(string) + **out = **in + } + if in.ConnectorType != nil { + in, out := &in.ConnectorType, &out.ConnectorType + *out = new(string) + **out = **in + } + if in.IncrementalPullConfig != nil { + in, out := &in.IncrementalPullConfig, &out.IncrementalPullConfig + *out = new(IncrementalPullConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.SourceConnectorProperties != nil { + in, out := &in.SourceConnectorProperties, &out.SourceConnectorProperties + *out = new(SourceConnectorPropertiesObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceFlowConfigObservation. +func (in *SourceFlowConfigObservation) DeepCopy() *SourceFlowConfigObservation { + if in == nil { + return nil + } + out := new(SourceFlowConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceFlowConfigParameters) DeepCopyInto(out *SourceFlowConfigParameters) { + *out = *in + if in.APIVersion != nil { + in, out := &in.APIVersion, &out.APIVersion + *out = new(string) + **out = **in + } + if in.ConnectorProfileName != nil { + in, out := &in.ConnectorProfileName, &out.ConnectorProfileName + *out = new(string) + **out = **in + } + if in.ConnectorType != nil { + in, out := &in.ConnectorType, &out.ConnectorType + *out = new(string) + **out = **in + } + if in.IncrementalPullConfig != nil { + in, out := &in.IncrementalPullConfig, &out.IncrementalPullConfig + *out = new(IncrementalPullConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.SourceConnectorProperties != nil { + in, out := &in.SourceConnectorProperties, &out.SourceConnectorProperties + *out = new(SourceConnectorPropertiesParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceFlowConfigParameters. +func (in *SourceFlowConfigParameters) DeepCopy() *SourceFlowConfigParameters { + if in == nil { + return nil + } + out := new(SourceFlowConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuccessResponseHandlingConfigInitParameters) DeepCopyInto(out *SuccessResponseHandlingConfigInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessResponseHandlingConfigInitParameters. +func (in *SuccessResponseHandlingConfigInitParameters) DeepCopy() *SuccessResponseHandlingConfigInitParameters { + if in == nil { + return nil + } + out := new(SuccessResponseHandlingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuccessResponseHandlingConfigObservation) DeepCopyInto(out *SuccessResponseHandlingConfigObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessResponseHandlingConfigObservation. +func (in *SuccessResponseHandlingConfigObservation) DeepCopy() *SuccessResponseHandlingConfigObservation { + if in == nil { + return nil + } + out := new(SuccessResponseHandlingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuccessResponseHandlingConfigParameters) DeepCopyInto(out *SuccessResponseHandlingConfigParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessResponseHandlingConfigParameters. +func (in *SuccessResponseHandlingConfigParameters) DeepCopy() *SuccessResponseHandlingConfigParameters { + if in == nil { + return nil + } + out := new(SuccessResponseHandlingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskInitParameters) DeepCopyInto(out *TaskInitParameters) { + *out = *in + if in.ConnectorOperator != nil { + in, out := &in.ConnectorOperator, &out.ConnectorOperator + *out = make([]ConnectorOperatorInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DestinationField != nil { + in, out := &in.DestinationField, &out.DestinationField + *out = new(string) + **out = **in + } + if in.SourceFields != nil { + in, out := &in.SourceFields, &out.SourceFields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TaskProperties != nil { + in, out := &in.TaskProperties, &out.TaskProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskType != nil { + in, out := &in.TaskType, &out.TaskType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskInitParameters. +func (in *TaskInitParameters) DeepCopy() *TaskInitParameters { + if in == nil { + return nil + } + out := new(TaskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskObservation) DeepCopyInto(out *TaskObservation) { + *out = *in + if in.ConnectorOperator != nil { + in, out := &in.ConnectorOperator, &out.ConnectorOperator + *out = make([]ConnectorOperatorObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DestinationField != nil { + in, out := &in.DestinationField, &out.DestinationField + *out = new(string) + **out = **in + } + if in.SourceFields != nil { + in, out := &in.SourceFields, &out.SourceFields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TaskProperties != nil { + in, out := &in.TaskProperties, &out.TaskProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskType != nil { + in, out := &in.TaskType, &out.TaskType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskObservation. +func (in *TaskObservation) DeepCopy() *TaskObservation { + if in == nil { + return nil + } + out := new(TaskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskParameters) DeepCopyInto(out *TaskParameters) { + *out = *in + if in.ConnectorOperator != nil { + in, out := &in.ConnectorOperator, &out.ConnectorOperator + *out = make([]ConnectorOperatorParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DestinationField != nil { + in, out := &in.DestinationField, &out.DestinationField + *out = new(string) + **out = **in + } + if in.SourceFields != nil { + in, out := &in.SourceFields, &out.SourceFields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TaskProperties != nil { + in, out := &in.TaskProperties, &out.TaskProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskType != nil { + in, out := &in.TaskType, &out.TaskType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskParameters. +func (in *TaskParameters) DeepCopy() *TaskParameters { + if in == nil { + return nil + } + out := new(TaskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrendmicroInitParameters) DeepCopyInto(out *TrendmicroInitParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrendmicroInitParameters. +func (in *TrendmicroInitParameters) DeepCopy() *TrendmicroInitParameters { + if in == nil { + return nil + } + out := new(TrendmicroInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrendmicroObservation) DeepCopyInto(out *TrendmicroObservation) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrendmicroObservation. +func (in *TrendmicroObservation) DeepCopy() *TrendmicroObservation { + if in == nil { + return nil + } + out := new(TrendmicroObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrendmicroParameters) DeepCopyInto(out *TrendmicroParameters) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrendmicroParameters. +func (in *TrendmicroParameters) DeepCopy() *TrendmicroParameters { + if in == nil { + return nil + } + out := new(TrendmicroParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerConfigInitParameters) DeepCopyInto(out *TriggerConfigInitParameters) { + *out = *in + if in.TriggerProperties != nil { + in, out := &in.TriggerProperties, &out.TriggerProperties + *out = new(TriggerPropertiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TriggerType != nil { + in, out := &in.TriggerType, &out.TriggerType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerConfigInitParameters. +func (in *TriggerConfigInitParameters) DeepCopy() *TriggerConfigInitParameters { + if in == nil { + return nil + } + out := new(TriggerConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerConfigObservation) DeepCopyInto(out *TriggerConfigObservation) { + *out = *in + if in.TriggerProperties != nil { + in, out := &in.TriggerProperties, &out.TriggerProperties + *out = new(TriggerPropertiesObservation) + (*in).DeepCopyInto(*out) + } + if in.TriggerType != nil { + in, out := &in.TriggerType, &out.TriggerType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerConfigObservation. +func (in *TriggerConfigObservation) DeepCopy() *TriggerConfigObservation { + if in == nil { + return nil + } + out := new(TriggerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerConfigParameters) DeepCopyInto(out *TriggerConfigParameters) { + *out = *in + if in.TriggerProperties != nil { + in, out := &in.TriggerProperties, &out.TriggerProperties + *out = new(TriggerPropertiesParameters) + (*in).DeepCopyInto(*out) + } + if in.TriggerType != nil { + in, out := &in.TriggerType, &out.TriggerType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerConfigParameters. +func (in *TriggerConfigParameters) DeepCopy() *TriggerConfigParameters { + if in == nil { + return nil + } + out := new(TriggerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerPropertiesInitParameters) DeepCopyInto(out *TriggerPropertiesInitParameters) { + *out = *in + if in.Scheduled != nil { + in, out := &in.Scheduled, &out.Scheduled + *out = new(ScheduledInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerPropertiesInitParameters. +func (in *TriggerPropertiesInitParameters) DeepCopy() *TriggerPropertiesInitParameters { + if in == nil { + return nil + } + out := new(TriggerPropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerPropertiesObservation) DeepCopyInto(out *TriggerPropertiesObservation) { + *out = *in + if in.Scheduled != nil { + in, out := &in.Scheduled, &out.Scheduled + *out = new(ScheduledObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerPropertiesObservation. +func (in *TriggerPropertiesObservation) DeepCopy() *TriggerPropertiesObservation { + if in == nil { + return nil + } + out := new(TriggerPropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerPropertiesParameters) DeepCopyInto(out *TriggerPropertiesParameters) { + *out = *in + if in.Scheduled != nil { + in, out := &in.Scheduled, &out.Scheduled + *out = new(ScheduledParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerPropertiesParameters. +func (in *TriggerPropertiesParameters) DeepCopy() *TriggerPropertiesParameters { + if in == nil { + return nil + } + out := new(TriggerPropertiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpsolverInitParameters) DeepCopyInto(out *UpsolverInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.S3OutputFormatConfig != nil { + in, out := &in.S3OutputFormatConfig, &out.S3OutputFormatConfig + *out = new(UpsolverS3OutputFormatConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpsolverInitParameters. +func (in *UpsolverInitParameters) DeepCopy() *UpsolverInitParameters { + if in == nil { + return nil + } + out := new(UpsolverInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpsolverObservation) DeepCopyInto(out *UpsolverObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.S3OutputFormatConfig != nil { + in, out := &in.S3OutputFormatConfig, &out.S3OutputFormatConfig + *out = new(UpsolverS3OutputFormatConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpsolverObservation. +func (in *UpsolverObservation) DeepCopy() *UpsolverObservation { + if in == nil { + return nil + } + out := new(UpsolverObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpsolverParameters) DeepCopyInto(out *UpsolverParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.S3OutputFormatConfig != nil { + in, out := &in.S3OutputFormatConfig, &out.S3OutputFormatConfig + *out = new(UpsolverS3OutputFormatConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpsolverParameters. +func (in *UpsolverParameters) DeepCopy() *UpsolverParameters { + if in == nil { + return nil + } + out := new(UpsolverParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpsolverS3OutputFormatConfigInitParameters) DeepCopyInto(out *UpsolverS3OutputFormatConfigInitParameters) { + *out = *in + if in.AggregationConfig != nil { + in, out := &in.AggregationConfig, &out.AggregationConfig + *out = new(S3OutputFormatConfigAggregationConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FileType != nil { + in, out := &in.FileType, &out.FileType + *out = new(string) + **out = **in + } + if in.PrefixConfig != nil { + in, out := &in.PrefixConfig, &out.PrefixConfig + *out = new(S3OutputFormatConfigPrefixConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpsolverS3OutputFormatConfigInitParameters. +func (in *UpsolverS3OutputFormatConfigInitParameters) DeepCopy() *UpsolverS3OutputFormatConfigInitParameters { + if in == nil { + return nil + } + out := new(UpsolverS3OutputFormatConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpsolverS3OutputFormatConfigObservation) DeepCopyInto(out *UpsolverS3OutputFormatConfigObservation) { + *out = *in + if in.AggregationConfig != nil { + in, out := &in.AggregationConfig, &out.AggregationConfig + *out = new(S3OutputFormatConfigAggregationConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.FileType != nil { + in, out := &in.FileType, &out.FileType + *out = new(string) + **out = **in + } + if in.PrefixConfig != nil { + in, out := &in.PrefixConfig, &out.PrefixConfig + *out = new(S3OutputFormatConfigPrefixConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpsolverS3OutputFormatConfigObservation. +func (in *UpsolverS3OutputFormatConfigObservation) DeepCopy() *UpsolverS3OutputFormatConfigObservation { + if in == nil { + return nil + } + out := new(UpsolverS3OutputFormatConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpsolverS3OutputFormatConfigParameters) DeepCopyInto(out *UpsolverS3OutputFormatConfigParameters) { + *out = *in + if in.AggregationConfig != nil { + in, out := &in.AggregationConfig, &out.AggregationConfig + *out = new(S3OutputFormatConfigAggregationConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.FileType != nil { + in, out := &in.FileType, &out.FileType + *out = new(string) + **out = **in + } + if in.PrefixConfig != nil { + in, out := &in.PrefixConfig, &out.PrefixConfig + *out = new(S3OutputFormatConfigPrefixConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpsolverS3OutputFormatConfigParameters. +func (in *UpsolverS3OutputFormatConfigParameters) DeepCopy() *UpsolverS3OutputFormatConfigParameters { + if in == nil { + return nil + } + out := new(UpsolverS3OutputFormatConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VeevaInitParameters) DeepCopyInto(out *VeevaInitParameters) { + *out = *in + if in.DocumentType != nil { + in, out := &in.DocumentType, &out.DocumentType + *out = new(string) + **out = **in + } + if in.IncludeAllVersions != nil { + in, out := &in.IncludeAllVersions, &out.IncludeAllVersions + *out = new(bool) + **out = **in + } + if in.IncludeRenditions != nil { + in, out := &in.IncludeRenditions, &out.IncludeRenditions + *out = new(bool) + **out = **in + } + if in.IncludeSourceFiles != nil { + in, out := &in.IncludeSourceFiles, &out.IncludeSourceFiles + *out = new(bool) + **out = **in + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VeevaInitParameters. +func (in *VeevaInitParameters) DeepCopy() *VeevaInitParameters { + if in == nil { + return nil + } + out := new(VeevaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VeevaObservation) DeepCopyInto(out *VeevaObservation) { + *out = *in + if in.DocumentType != nil { + in, out := &in.DocumentType, &out.DocumentType + *out = new(string) + **out = **in + } + if in.IncludeAllVersions != nil { + in, out := &in.IncludeAllVersions, &out.IncludeAllVersions + *out = new(bool) + **out = **in + } + if in.IncludeRenditions != nil { + in, out := &in.IncludeRenditions, &out.IncludeRenditions + *out = new(bool) + **out = **in + } + if in.IncludeSourceFiles != nil { + in, out := &in.IncludeSourceFiles, &out.IncludeSourceFiles + *out = new(bool) + **out = **in + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VeevaObservation. +func (in *VeevaObservation) DeepCopy() *VeevaObservation { + if in == nil { + return nil + } + out := new(VeevaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VeevaParameters) DeepCopyInto(out *VeevaParameters) { + *out = *in + if in.DocumentType != nil { + in, out := &in.DocumentType, &out.DocumentType + *out = new(string) + **out = **in + } + if in.IncludeAllVersions != nil { + in, out := &in.IncludeAllVersions, &out.IncludeAllVersions + *out = new(bool) + **out = **in + } + if in.IncludeRenditions != nil { + in, out := &in.IncludeRenditions, &out.IncludeRenditions + *out = new(bool) + **out = **in + } + if in.IncludeSourceFiles != nil { + in, out := &in.IncludeSourceFiles, &out.IncludeSourceFiles + *out = new(bool) + **out = **in + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VeevaParameters. +func (in *VeevaParameters) DeepCopy() *VeevaParameters { + if in == nil { + return nil + } + out := new(VeevaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZendeskErrorHandlingConfigInitParameters) DeepCopyInto(out *ZendeskErrorHandlingConfigInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZendeskErrorHandlingConfigInitParameters. +func (in *ZendeskErrorHandlingConfigInitParameters) DeepCopy() *ZendeskErrorHandlingConfigInitParameters { + if in == nil { + return nil + } + out := new(ZendeskErrorHandlingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZendeskErrorHandlingConfigObservation) DeepCopyInto(out *ZendeskErrorHandlingConfigObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZendeskErrorHandlingConfigObservation. +func (in *ZendeskErrorHandlingConfigObservation) DeepCopy() *ZendeskErrorHandlingConfigObservation { + if in == nil { + return nil + } + out := new(ZendeskErrorHandlingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZendeskErrorHandlingConfigParameters) DeepCopyInto(out *ZendeskErrorHandlingConfigParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.FailOnFirstDestinationError != nil { + in, out := &in.FailOnFirstDestinationError, &out.FailOnFirstDestinationError + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZendeskErrorHandlingConfigParameters. +func (in *ZendeskErrorHandlingConfigParameters) DeepCopy() *ZendeskErrorHandlingConfigParameters { + if in == nil { + return nil + } + out := new(ZendeskErrorHandlingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZendeskInitParameters) DeepCopyInto(out *ZendeskInitParameters) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(ZendeskErrorHandlingConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IDFieldNames != nil { + in, out := &in.IDFieldNames, &out.IDFieldNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } + if in.WriteOperationType != nil { + in, out := &in.WriteOperationType, &out.WriteOperationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZendeskInitParameters. +func (in *ZendeskInitParameters) DeepCopy() *ZendeskInitParameters { + if in == nil { + return nil + } + out := new(ZendeskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZendeskObservation) DeepCopyInto(out *ZendeskObservation) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(ZendeskErrorHandlingConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.IDFieldNames != nil { + in, out := &in.IDFieldNames, &out.IDFieldNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } + if in.WriteOperationType != nil { + in, out := &in.WriteOperationType, &out.WriteOperationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZendeskObservation. +func (in *ZendeskObservation) DeepCopy() *ZendeskObservation { + if in == nil { + return nil + } + out := new(ZendeskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZendeskParameters) DeepCopyInto(out *ZendeskParameters) { + *out = *in + if in.ErrorHandlingConfig != nil { + in, out := &in.ErrorHandlingConfig, &out.ErrorHandlingConfig + *out = new(ZendeskErrorHandlingConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.IDFieldNames != nil { + in, out := &in.IDFieldNames, &out.IDFieldNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(string) + **out = **in + } + if in.WriteOperationType != nil { + in, out := &in.WriteOperationType, &out.WriteOperationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZendeskParameters. +func (in *ZendeskParameters) DeepCopy() *ZendeskParameters { + if in == nil { + return nil + } + out := new(ZendeskParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/appflow/v1beta2/zz_generated.managed.go b/apis/appflow/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..5f068fb0a3 --- /dev/null +++ b/apis/appflow/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Flow. +func (mg *Flow) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Flow. +func (mg *Flow) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Flow. +func (mg *Flow) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Flow. +func (mg *Flow) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Flow. +func (mg *Flow) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Flow. +func (mg *Flow) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Flow. +func (mg *Flow) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Flow. +func (mg *Flow) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Flow. +func (mg *Flow) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Flow. +func (mg *Flow) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Flow. +func (mg *Flow) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Flow. +func (mg *Flow) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/appflow/v1beta2/zz_generated.managedlist.go b/apis/appflow/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..dd5ef1e2e8 --- /dev/null +++ b/apis/appflow/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this FlowList. +func (l *FlowList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/appflow/v1beta2/zz_generated.resolvers.go b/apis/appflow/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..1f80830d2d --- /dev/null +++ b/apis/appflow/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,132 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Flow. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Flow) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.DestinationFlowConfig); i3++ { + if mg.Spec.ForProvider.DestinationFlowConfig[i3].DestinationConnectorProperties != nil { + if mg.Spec.ForProvider.DestinationFlowConfig[i3].DestinationConnectorProperties.S3 != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "BucketPolicy", "BucketPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DestinationFlowConfig[i3].DestinationConnectorProperties.S3.BucketName), + Extract: resource.ExtractParamPath("bucket", false), + Reference: mg.Spec.ForProvider.DestinationFlowConfig[i3].DestinationConnectorProperties.S3.BucketNameRef, + Selector: mg.Spec.ForProvider.DestinationFlowConfig[i3].DestinationConnectorProperties.S3.BucketNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DestinationFlowConfig[i3].DestinationConnectorProperties.S3.BucketName") + } + mg.Spec.ForProvider.DestinationFlowConfig[i3].DestinationConnectorProperties.S3.BucketName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DestinationFlowConfig[i3].DestinationConnectorProperties.S3.BucketNameRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.ForProvider.SourceFlowConfig != nil { + if mg.Spec.ForProvider.SourceFlowConfig.SourceConnectorProperties != nil { + if mg.Spec.ForProvider.SourceFlowConfig.SourceConnectorProperties.S3 != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "BucketPolicy", "BucketPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SourceFlowConfig.SourceConnectorProperties.S3.BucketName), + Extract: resource.ExtractParamPath("bucket", false), + Reference: mg.Spec.ForProvider.SourceFlowConfig.SourceConnectorProperties.S3.BucketNameRef, + Selector: mg.Spec.ForProvider.SourceFlowConfig.SourceConnectorProperties.S3.BucketNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SourceFlowConfig.SourceConnectorProperties.S3.BucketName") + } + mg.Spec.ForProvider.SourceFlowConfig.SourceConnectorProperties.S3.BucketName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SourceFlowConfig.SourceConnectorProperties.S3.BucketNameRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.DestinationFlowConfig); i3++ { + if mg.Spec.InitProvider.DestinationFlowConfig[i3].DestinationConnectorProperties != nil { + if mg.Spec.InitProvider.DestinationFlowConfig[i3].DestinationConnectorProperties.S3 != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "BucketPolicy", "BucketPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DestinationFlowConfig[i3].DestinationConnectorProperties.S3.BucketName), + Extract: resource.ExtractParamPath("bucket", false), + Reference: mg.Spec.InitProvider.DestinationFlowConfig[i3].DestinationConnectorProperties.S3.BucketNameRef, + Selector: mg.Spec.InitProvider.DestinationFlowConfig[i3].DestinationConnectorProperties.S3.BucketNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DestinationFlowConfig[i3].DestinationConnectorProperties.S3.BucketName") + } + mg.Spec.InitProvider.DestinationFlowConfig[i3].DestinationConnectorProperties.S3.BucketName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DestinationFlowConfig[i3].DestinationConnectorProperties.S3.BucketNameRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.InitProvider.SourceFlowConfig != nil { + if mg.Spec.InitProvider.SourceFlowConfig.SourceConnectorProperties != nil { + if mg.Spec.InitProvider.SourceFlowConfig.SourceConnectorProperties.S3 != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "BucketPolicy", "BucketPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SourceFlowConfig.SourceConnectorProperties.S3.BucketName), + Extract: resource.ExtractParamPath("bucket", false), + Reference: mg.Spec.InitProvider.SourceFlowConfig.SourceConnectorProperties.S3.BucketNameRef, + Selector: mg.Spec.InitProvider.SourceFlowConfig.SourceConnectorProperties.S3.BucketNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SourceFlowConfig.SourceConnectorProperties.S3.BucketName") + } + mg.Spec.InitProvider.SourceFlowConfig.SourceConnectorProperties.S3.BucketName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SourceFlowConfig.SourceConnectorProperties.S3.BucketNameRef = rsp.ResolvedReference + + } + } + } + + return nil +} diff --git a/apis/appflow/v1beta2/zz_groupversion_info.go b/apis/appflow/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..c3be1ad211 --- /dev/null +++ b/apis/appflow/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=appflow.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "appflow.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/appintegrations/v1beta1/zz_generated.conversion_spokes.go b/apis/appintegrations/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..11a13b0ded --- /dev/null +++ b/apis/appintegrations/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this EventIntegration to the hub type. +func (tr *EventIntegration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the EventIntegration type. +func (tr *EventIntegration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/appintegrations/v1beta2/zz_eventintegration_terraformed.go b/apis/appintegrations/v1beta2/zz_eventintegration_terraformed.go new file mode 100755 index 0000000000..139b614c49 --- /dev/null +++ b/apis/appintegrations/v1beta2/zz_eventintegration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this EventIntegration +func (mg *EventIntegration) GetTerraformResourceType() string { + return "aws_appintegrations_event_integration" +} + +// GetConnectionDetailsMapping for this EventIntegration +func (tr *EventIntegration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this EventIntegration +func (tr *EventIntegration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this EventIntegration +func (tr *EventIntegration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this EventIntegration +func (tr *EventIntegration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this EventIntegration +func (tr *EventIntegration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this EventIntegration +func (tr *EventIntegration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this EventIntegration +func (tr *EventIntegration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this EventIntegration +func (tr *EventIntegration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this EventIntegration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *EventIntegration) LateInitialize(attrs []byte) (bool, error) { + params := &EventIntegrationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *EventIntegration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appintegrations/v1beta2/zz_eventintegration_types.go b/apis/appintegrations/v1beta2/zz_eventintegration_types.go new file mode 100755 index 0000000000..287ed7ac8e --- /dev/null +++ b/apis/appintegrations/v1beta2/zz_eventintegration_types.go @@ -0,0 +1,162 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EventFilterInitParameters struct { + + // Source of the events. + Source *string `json:"source,omitempty" tf:"source,omitempty"` +} + +type EventFilterObservation struct { + + // Source of the events. + Source *string `json:"source,omitempty" tf:"source,omitempty"` +} + +type EventFilterParameters struct { + + // Source of the events. + // +kubebuilder:validation:Optional + Source *string `json:"source" tf:"source,omitempty"` +} + +type EventIntegrationInitParameters struct { + + // Description of the Event Integration. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Block that defines the configuration information for the event filter. The Event Filter block is documented below. + EventFilter *EventFilterInitParameters `json:"eventFilter,omitempty" tf:"event_filter,omitempty"` + + // EventBridge bus. + EventbridgeBus *string `json:"eventbridgeBus,omitempty" tf:"eventbridge_bus,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type EventIntegrationObservation struct { + + // ARN of the Event Integration. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Description of the Event Integration. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Block that defines the configuration information for the event filter. The Event Filter block is documented below. + EventFilter *EventFilterObservation `json:"eventFilter,omitempty" tf:"event_filter,omitempty"` + + // EventBridge bus. + EventbridgeBus *string `json:"eventbridgeBus,omitempty" tf:"eventbridge_bus,omitempty"` + + // Identifier of the Event Integration which is the name of the Event Integration. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type EventIntegrationParameters struct { + + // Description of the Event Integration. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Block that defines the configuration information for the event filter. The Event Filter block is documented below. + // +kubebuilder:validation:Optional + EventFilter *EventFilterParameters `json:"eventFilter,omitempty" tf:"event_filter,omitempty"` + + // EventBridge bus. + // +kubebuilder:validation:Optional + EventbridgeBus *string `json:"eventbridgeBus,omitempty" tf:"eventbridge_bus,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// EventIntegrationSpec defines the desired state of EventIntegration +type EventIntegrationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider EventIntegrationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider EventIntegrationInitParameters `json:"initProvider,omitempty"` +} + +// EventIntegrationStatus defines the observed state of EventIntegration. +type EventIntegrationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider EventIntegrationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// EventIntegration is the Schema for the EventIntegrations API. Provides details about a specific Amazon AppIntegrations Event Integration +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type EventIntegration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.eventFilter) || (has(self.initProvider) && has(self.initProvider.eventFilter))",message="spec.forProvider.eventFilter is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.eventbridgeBus) || (has(self.initProvider) && has(self.initProvider.eventbridgeBus))",message="spec.forProvider.eventbridgeBus is a required parameter" + Spec EventIntegrationSpec `json:"spec"` + Status EventIntegrationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EventIntegrationList contains a list of EventIntegrations +type EventIntegrationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []EventIntegration `json:"items"` +} + +// Repository type metadata. +var ( + EventIntegration_Kind = "EventIntegration" + EventIntegration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: EventIntegration_Kind}.String() + EventIntegration_KindAPIVersion = EventIntegration_Kind + "." + CRDGroupVersion.String() + EventIntegration_GroupVersionKind = CRDGroupVersion.WithKind(EventIntegration_Kind) +) + +func init() { + SchemeBuilder.Register(&EventIntegration{}, &EventIntegrationList{}) +} diff --git a/apis/appintegrations/v1beta2/zz_generated.conversion_hubs.go b/apis/appintegrations/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..9c91506c7b --- /dev/null +++ b/apis/appintegrations/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *EventIntegration) Hub() {} diff --git a/apis/appintegrations/v1beta2/zz_generated.deepcopy.go b/apis/appintegrations/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..06950a8d12 --- /dev/null +++ b/apis/appintegrations/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,336 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventFilterInitParameters) DeepCopyInto(out *EventFilterInitParameters) { + *out = *in + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventFilterInitParameters. +func (in *EventFilterInitParameters) DeepCopy() *EventFilterInitParameters { + if in == nil { + return nil + } + out := new(EventFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventFilterObservation) DeepCopyInto(out *EventFilterObservation) { + *out = *in + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventFilterObservation. +func (in *EventFilterObservation) DeepCopy() *EventFilterObservation { + if in == nil { + return nil + } + out := new(EventFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventFilterParameters) DeepCopyInto(out *EventFilterParameters) { + *out = *in + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventFilterParameters. +func (in *EventFilterParameters) DeepCopy() *EventFilterParameters { + if in == nil { + return nil + } + out := new(EventFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventIntegration) DeepCopyInto(out *EventIntegration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventIntegration. +func (in *EventIntegration) DeepCopy() *EventIntegration { + if in == nil { + return nil + } + out := new(EventIntegration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventIntegration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventIntegrationInitParameters) DeepCopyInto(out *EventIntegrationInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EventFilter != nil { + in, out := &in.EventFilter, &out.EventFilter + *out = new(EventFilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EventbridgeBus != nil { + in, out := &in.EventbridgeBus, &out.EventbridgeBus + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventIntegrationInitParameters. +func (in *EventIntegrationInitParameters) DeepCopy() *EventIntegrationInitParameters { + if in == nil { + return nil + } + out := new(EventIntegrationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventIntegrationList) DeepCopyInto(out *EventIntegrationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EventIntegration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventIntegrationList. +func (in *EventIntegrationList) DeepCopy() *EventIntegrationList { + if in == nil { + return nil + } + out := new(EventIntegrationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventIntegrationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventIntegrationObservation) DeepCopyInto(out *EventIntegrationObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EventFilter != nil { + in, out := &in.EventFilter, &out.EventFilter + *out = new(EventFilterObservation) + (*in).DeepCopyInto(*out) + } + if in.EventbridgeBus != nil { + in, out := &in.EventbridgeBus, &out.EventbridgeBus + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventIntegrationObservation. +func (in *EventIntegrationObservation) DeepCopy() *EventIntegrationObservation { + if in == nil { + return nil + } + out := new(EventIntegrationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventIntegrationParameters) DeepCopyInto(out *EventIntegrationParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EventFilter != nil { + in, out := &in.EventFilter, &out.EventFilter + *out = new(EventFilterParameters) + (*in).DeepCopyInto(*out) + } + if in.EventbridgeBus != nil { + in, out := &in.EventbridgeBus, &out.EventbridgeBus + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventIntegrationParameters. +func (in *EventIntegrationParameters) DeepCopy() *EventIntegrationParameters { + if in == nil { + return nil + } + out := new(EventIntegrationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventIntegrationSpec) DeepCopyInto(out *EventIntegrationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventIntegrationSpec. +func (in *EventIntegrationSpec) DeepCopy() *EventIntegrationSpec { + if in == nil { + return nil + } + out := new(EventIntegrationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventIntegrationStatus) DeepCopyInto(out *EventIntegrationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventIntegrationStatus. +func (in *EventIntegrationStatus) DeepCopy() *EventIntegrationStatus { + if in == nil { + return nil + } + out := new(EventIntegrationStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/appintegrations/v1beta2/zz_generated.managed.go b/apis/appintegrations/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..d5521ad518 --- /dev/null +++ b/apis/appintegrations/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this EventIntegration. +func (mg *EventIntegration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this EventIntegration. +func (mg *EventIntegration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this EventIntegration. +func (mg *EventIntegration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this EventIntegration. +func (mg *EventIntegration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this EventIntegration. +func (mg *EventIntegration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this EventIntegration. +func (mg *EventIntegration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this EventIntegration. +func (mg *EventIntegration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this EventIntegration. +func (mg *EventIntegration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this EventIntegration. +func (mg *EventIntegration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this EventIntegration. +func (mg *EventIntegration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this EventIntegration. +func (mg *EventIntegration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this EventIntegration. +func (mg *EventIntegration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/appintegrations/v1beta2/zz_generated.managedlist.go b/apis/appintegrations/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..35af06dc3c --- /dev/null +++ b/apis/appintegrations/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this EventIntegrationList. +func (l *EventIntegrationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/appintegrations/v1beta2/zz_groupversion_info.go b/apis/appintegrations/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..58440da2ce --- /dev/null +++ b/apis/appintegrations/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=appintegrations.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "appintegrations.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/appmesh/v1beta1/zz_generated.conversion_spokes.go b/apis/appmesh/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..7ab2ba7bec --- /dev/null +++ b/apis/appmesh/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,154 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this GatewayRoute to the hub type. +func (tr *GatewayRoute) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the GatewayRoute type. +func (tr *GatewayRoute) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Mesh to the hub type. +func (tr *Mesh) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Mesh type. +func (tr *Mesh) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Route to the hub type. +func (tr *Route) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Route type. +func (tr *Route) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VirtualGateway to the hub type. +func (tr *VirtualGateway) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VirtualGateway type. +func (tr *VirtualGateway) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VirtualNode to the hub type. +func (tr *VirtualNode) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VirtualNode type. +func (tr *VirtualNode) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VirtualRouter to the hub type. +func (tr *VirtualRouter) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VirtualRouter type. +func (tr *VirtualRouter) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VirtualService to the hub type. +func (tr *VirtualService) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VirtualService type. +func (tr *VirtualService) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/appmesh/v1beta2/zz_gatewayroute_terraformed.go b/apis/appmesh/v1beta2/zz_gatewayroute_terraformed.go new file mode 100755 index 0000000000..b240244fac --- /dev/null +++ b/apis/appmesh/v1beta2/zz_gatewayroute_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this GatewayRoute +func (mg *GatewayRoute) GetTerraformResourceType() string { + return "aws_appmesh_gateway_route" +} + +// GetConnectionDetailsMapping for this GatewayRoute +func (tr *GatewayRoute) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this GatewayRoute +func (tr *GatewayRoute) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this GatewayRoute +func (tr *GatewayRoute) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this GatewayRoute +func (tr *GatewayRoute) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this GatewayRoute +func (tr *GatewayRoute) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this GatewayRoute +func (tr *GatewayRoute) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this GatewayRoute +func (tr *GatewayRoute) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this GatewayRoute +func (tr *GatewayRoute) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this GatewayRoute using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *GatewayRoute) LateInitialize(attrs []byte) (bool, error) { + params := &GatewayRouteParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *GatewayRoute) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appmesh/v1beta2/zz_gatewayroute_types.go b/apis/appmesh/v1beta2/zz_gatewayroute_types.go new file mode 100755 index 0000000000..d16ad70865 --- /dev/null +++ b/apis/appmesh/v1beta2/zz_gatewayroute_types.go @@ -0,0 +1,1415 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionInitParameters struct { + + // Target that traffic is routed to when a request matches the gateway route. + Target *TargetInitParameters `json:"target,omitempty" tf:"target,omitempty"` +} + +type ActionObservation struct { + + // Target that traffic is routed to when a request matches the gateway route. + Target *TargetObservation `json:"target,omitempty" tf:"target,omitempty"` +} + +type ActionParameters struct { + + // Target that traffic is routed to when a request matches the gateway route. + // +kubebuilder:validation:Optional + Target *TargetParameters `json:"target" tf:"target,omitempty"` +} + +type ActionRewriteInitParameters struct { + + // Host name to rewrite. + Hostname *RewriteHostnameInitParameters `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Exact path to rewrite. + Path *RewritePathInitParameters `json:"path,omitempty" tf:"path,omitempty"` + + // Specified beginning characters to rewrite. + Prefix *RewritePrefixInitParameters `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type ActionRewriteObservation struct { + + // Host name to rewrite. + Hostname *RewriteHostnameObservation `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Exact path to rewrite. + Path *RewritePathObservation `json:"path,omitempty" tf:"path,omitempty"` + + // Specified beginning characters to rewrite. + Prefix *RewritePrefixObservation `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type ActionRewriteParameters struct { + + // Host name to rewrite. + // +kubebuilder:validation:Optional + Hostname *RewriteHostnameParameters `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Exact path to rewrite. + // +kubebuilder:validation:Optional + Path *RewritePathParameters `json:"path,omitempty" tf:"path,omitempty"` + + // Specified beginning characters to rewrite. + // +kubebuilder:validation:Optional + Prefix *RewritePrefixParameters `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type ActionTargetInitParameters struct { + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual service gateway route target. + VirtualService *TargetVirtualServiceInitParameters `json:"virtualService,omitempty" tf:"virtual_service,omitempty"` +} + +type ActionTargetObservation struct { + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual service gateway route target. + VirtualService *TargetVirtualServiceObservation `json:"virtualService,omitempty" tf:"virtual_service,omitempty"` +} + +type ActionTargetParameters struct { + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual service gateway route target. + // +kubebuilder:validation:Optional + VirtualService *TargetVirtualServiceParameters `json:"virtualService" tf:"virtual_service,omitempty"` +} + +type ActionTargetVirtualServiceInitParameters struct { + + // Name of the virtual service that traffic is routed to. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.VirtualService + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + VirtualServiceName *string `json:"virtualServiceName,omitempty" tf:"virtual_service_name,omitempty"` + + // Reference to a VirtualService in appmesh to populate virtualServiceName. + // +kubebuilder:validation:Optional + VirtualServiceNameRef *v1.Reference `json:"virtualServiceNameRef,omitempty" tf:"-"` + + // Selector for a VirtualService in appmesh to populate virtualServiceName. + // +kubebuilder:validation:Optional + VirtualServiceNameSelector *v1.Selector `json:"virtualServiceNameSelector,omitempty" tf:"-"` +} + +type ActionTargetVirtualServiceObservation struct { + + // Name of the virtual service that traffic is routed to. Must be between 1 and 255 characters in length. + VirtualServiceName *string `json:"virtualServiceName,omitempty" tf:"virtual_service_name,omitempty"` +} + +type ActionTargetVirtualServiceParameters struct { + + // Name of the virtual service that traffic is routed to. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.VirtualService + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + // +kubebuilder:validation:Optional + VirtualServiceName *string `json:"virtualServiceName,omitempty" tf:"virtual_service_name,omitempty"` + + // Reference to a VirtualService in appmesh to populate virtualServiceName. + // +kubebuilder:validation:Optional + VirtualServiceNameRef *v1.Reference `json:"virtualServiceNameRef,omitempty" tf:"-"` + + // Selector for a VirtualService in appmesh to populate virtualServiceName. + // +kubebuilder:validation:Optional + VirtualServiceNameSelector *v1.Selector `json:"virtualServiceNameSelector,omitempty" tf:"-"` +} + +type GRPCRouteInitParameters struct { + + // Action to take if a match is determined. + Action *ActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Criteria for determining a request match. + Match *MatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` +} + +type GRPCRouteObservation struct { + + // Action to take if a match is determined. + Action *ActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // Criteria for determining a request match. + Match *MatchObservation `json:"match,omitempty" tf:"match,omitempty"` +} + +type GRPCRouteParameters struct { + + // Action to take if a match is determined. + // +kubebuilder:validation:Optional + Action *ActionParameters `json:"action" tf:"action,omitempty"` + + // Criteria for determining a request match. + // +kubebuilder:validation:Optional + Match *MatchParameters `json:"match" tf:"match,omitempty"` +} + +type GatewayRouteInitParameters struct { + + // Name of the service mesh in which to create the gateway route. Must be between 1 and 255 characters in length. + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the gateway route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Gateway route specification to apply. + Spec *SpecInitParameters `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Name of the virtual gateway to associate the gateway route with. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.VirtualGateway + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + VirtualGatewayName *string `json:"virtualGatewayName,omitempty" tf:"virtual_gateway_name,omitempty"` + + // Reference to a VirtualGateway in appmesh to populate virtualGatewayName. + // +kubebuilder:validation:Optional + VirtualGatewayNameRef *v1.Reference `json:"virtualGatewayNameRef,omitempty" tf:"-"` + + // Selector for a VirtualGateway in appmesh to populate virtualGatewayName. + // +kubebuilder:validation:Optional + VirtualGatewayNameSelector *v1.Selector `json:"virtualGatewayNameSelector,omitempty" tf:"-"` +} + +type GatewayRouteObservation struct { + + // ARN of the gateway route. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Creation date of the gateway route. + CreatedDate *string `json:"createdDate,omitempty" tf:"created_date,omitempty"` + + // ID of the gateway route. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Last update date of the gateway route. + LastUpdatedDate *string `json:"lastUpdatedDate,omitempty" tf:"last_updated_date,omitempty"` + + // Name of the service mesh in which to create the gateway route. Must be between 1 and 255 characters in length. + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the gateway route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Resource owner's AWS account ID. + ResourceOwner *string `json:"resourceOwner,omitempty" tf:"resource_owner,omitempty"` + + // Gateway route specification to apply. + Spec *SpecObservation `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Name of the virtual gateway to associate the gateway route with. Must be between 1 and 255 characters in length. + VirtualGatewayName *string `json:"virtualGatewayName,omitempty" tf:"virtual_gateway_name,omitempty"` +} + +type GatewayRouteParameters struct { + + // Name of the service mesh in which to create the gateway route. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + // +kubebuilder:validation:Optional + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the gateway route. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Gateway route specification to apply. + // +kubebuilder:validation:Optional + Spec *SpecParameters `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Name of the virtual gateway to associate the gateway route with. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.VirtualGateway + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + // +kubebuilder:validation:Optional + VirtualGatewayName *string `json:"virtualGatewayName,omitempty" tf:"virtual_gateway_name,omitempty"` + + // Reference to a VirtualGateway in appmesh to populate virtualGatewayName. + // +kubebuilder:validation:Optional + VirtualGatewayNameRef *v1.Reference `json:"virtualGatewayNameRef,omitempty" tf:"-"` + + // Selector for a VirtualGateway in appmesh to populate virtualGatewayName. + // +kubebuilder:validation:Optional + VirtualGatewayNameSelector *v1.Selector `json:"virtualGatewayNameSelector,omitempty" tf:"-"` +} + +type HTTPRouteActionInitParameters struct { + + // Gateway route action to rewrite. + Rewrite *ActionRewriteInitParameters `json:"rewrite,omitempty" tf:"rewrite,omitempty"` + + // Target that traffic is routed to when a request matches the gateway route. + Target *HTTPRouteActionTargetInitParameters `json:"target,omitempty" tf:"target,omitempty"` +} + +type HTTPRouteActionObservation struct { + + // Gateway route action to rewrite. + Rewrite *ActionRewriteObservation `json:"rewrite,omitempty" tf:"rewrite,omitempty"` + + // Target that traffic is routed to when a request matches the gateway route. + Target *HTTPRouteActionTargetObservation `json:"target,omitempty" tf:"target,omitempty"` +} + +type HTTPRouteActionParameters struct { + + // Gateway route action to rewrite. + // +kubebuilder:validation:Optional + Rewrite *ActionRewriteParameters `json:"rewrite,omitempty" tf:"rewrite,omitempty"` + + // Target that traffic is routed to when a request matches the gateway route. + // +kubebuilder:validation:Optional + Target *HTTPRouteActionTargetParameters `json:"target" tf:"target,omitempty"` +} + +type HTTPRouteActionTargetInitParameters struct { + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual service gateway route target. + VirtualService *ActionTargetVirtualServiceInitParameters `json:"virtualService,omitempty" tf:"virtual_service,omitempty"` +} + +type HTTPRouteActionTargetObservation struct { + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual service gateway route target. + VirtualService *ActionTargetVirtualServiceObservation `json:"virtualService,omitempty" tf:"virtual_service,omitempty"` +} + +type HTTPRouteActionTargetParameters struct { + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual service gateway route target. + // +kubebuilder:validation:Optional + VirtualService *ActionTargetVirtualServiceParameters `json:"virtualService" tf:"virtual_service,omitempty"` +} + +type HTTPRouteInitParameters struct { + + // Action to take if a match is determined. + Action *HTTPRouteActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Criteria for determining a request match. + Match *HTTPRouteMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` +} + +type HTTPRouteMatchHostnameInitParameters struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Header value sent by the client must end with the specified characters. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type HTTPRouteMatchHostnameObservation struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Header value sent by the client must end with the specified characters. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type HTTPRouteMatchHostnameParameters struct { + + // Value used to replace matched path. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Header value sent by the client must end with the specified characters. + // +kubebuilder:validation:Optional + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type HTTPRouteMatchInitParameters struct { + + // Client request headers to match on. + Header []MatchHeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` + + // Host name to rewrite. + Hostname *HTTPRouteMatchHostnameInitParameters `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Exact path to rewrite. + Path *HTTPRouteMatchPathInitParameters `json:"path,omitempty" tf:"path,omitempty"` + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Specified beginning characters to rewrite. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Client request query parameters to match on. + QueryParameter []MatchQueryParameterInitParameters `json:"queryParameter,omitempty" tf:"query_parameter,omitempty"` +} + +type HTTPRouteMatchObservation struct { + + // Client request headers to match on. + Header []MatchHeaderObservation `json:"header,omitempty" tf:"header,omitempty"` + + // Host name to rewrite. + Hostname *HTTPRouteMatchHostnameObservation `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Exact path to rewrite. + Path *HTTPRouteMatchPathObservation `json:"path,omitempty" tf:"path,omitempty"` + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Specified beginning characters to rewrite. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Client request query parameters to match on. + QueryParameter []MatchQueryParameterObservation `json:"queryParameter,omitempty" tf:"query_parameter,omitempty"` +} + +type HTTPRouteMatchParameters struct { + + // Client request headers to match on. + // +kubebuilder:validation:Optional + Header []MatchHeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + + // Host name to rewrite. + // +kubebuilder:validation:Optional + Hostname *HTTPRouteMatchHostnameParameters `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Exact path to rewrite. + // +kubebuilder:validation:Optional + Path *HTTPRouteMatchPathParameters `json:"path,omitempty" tf:"path,omitempty"` + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Specified beginning characters to rewrite. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Client request query parameters to match on. + // +kubebuilder:validation:Optional + QueryParameter []MatchQueryParameterParameters `json:"queryParameter,omitempty" tf:"query_parameter,omitempty"` +} + +type HTTPRouteMatchPathInitParameters struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Header value sent by the client must include the specified characters. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` +} + +type HTTPRouteMatchPathObservation struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Header value sent by the client must include the specified characters. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` +} + +type HTTPRouteMatchPathParameters struct { + + // Value used to replace matched path. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Header value sent by the client must include the specified characters. + // +kubebuilder:validation:Optional + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` +} + +type HTTPRouteObservation struct { + + // Action to take if a match is determined. + Action *HTTPRouteActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // Criteria for determining a request match. + Match *HTTPRouteMatchObservation `json:"match,omitempty" tf:"match,omitempty"` +} + +type HTTPRouteParameters struct { + + // Action to take if a match is determined. + // +kubebuilder:validation:Optional + Action *HTTPRouteActionParameters `json:"action" tf:"action,omitempty"` + + // Criteria for determining a request match. + // +kubebuilder:validation:Optional + Match *HTTPRouteMatchParameters `json:"match" tf:"match,omitempty"` +} + +type HeaderInitParameters struct { + + // If true, the match is on the opposite of the match method and value. Default is false. + Invert *bool `json:"invert,omitempty" tf:"invert,omitempty"` + + // Criteria for determining a request match. + Match *HeaderMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the gateway route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type HeaderMatchInitParameters struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Specified beginning characters to rewrite. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Object that specifies the range of numbers that the header value sent by the client must be included in. + Range *RangeInitParameters `json:"range,omitempty" tf:"range,omitempty"` + + // Header value sent by the client must include the specified characters. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + + // Header value sent by the client must end with the specified characters. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type HeaderMatchObservation struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Specified beginning characters to rewrite. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Object that specifies the range of numbers that the header value sent by the client must be included in. + Range *RangeObservation `json:"range,omitempty" tf:"range,omitempty"` + + // Header value sent by the client must include the specified characters. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + + // Header value sent by the client must end with the specified characters. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type HeaderMatchParameters struct { + + // Value used to replace matched path. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Specified beginning characters to rewrite. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Object that specifies the range of numbers that the header value sent by the client must be included in. + // +kubebuilder:validation:Optional + Range *RangeParameters `json:"range,omitempty" tf:"range,omitempty"` + + // Header value sent by the client must include the specified characters. + // +kubebuilder:validation:Optional + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + + // Header value sent by the client must end with the specified characters. + // +kubebuilder:validation:Optional + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type HeaderObservation struct { + + // If true, the match is on the opposite of the match method and value. Default is false. + Invert *bool `json:"invert,omitempty" tf:"invert,omitempty"` + + // Criteria for determining a request match. + Match *HeaderMatchObservation `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the gateway route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type HeaderParameters struct { + + // If true, the match is on the opposite of the match method and value. Default is false. + // +kubebuilder:validation:Optional + Invert *bool `json:"invert,omitempty" tf:"invert,omitempty"` + + // Criteria for determining a request match. + // +kubebuilder:validation:Optional + Match *HeaderMatchParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the gateway route. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type HostnameInitParameters struct { + + // Default target host name to write to. Valid values: ENABLED, DISABLED. + DefaultTargetHostname *string `json:"defaultTargetHostname,omitempty" tf:"default_target_hostname,omitempty"` +} + +type HostnameObservation struct { + + // Default target host name to write to. Valid values: ENABLED, DISABLED. + DefaultTargetHostname *string `json:"defaultTargetHostname,omitempty" tf:"default_target_hostname,omitempty"` +} + +type HostnameParameters struct { + + // Default target host name to write to. Valid values: ENABLED, DISABLED. + // +kubebuilder:validation:Optional + DefaultTargetHostname *string `json:"defaultTargetHostname" tf:"default_target_hostname,omitempty"` +} + +type Http2RouteActionInitParameters struct { + + // Gateway route action to rewrite. + Rewrite *RewriteInitParameters `json:"rewrite,omitempty" tf:"rewrite,omitempty"` + + // Target that traffic is routed to when a request matches the gateway route. + Target *ActionTargetInitParameters `json:"target,omitempty" tf:"target,omitempty"` +} + +type Http2RouteActionObservation struct { + + // Gateway route action to rewrite. + Rewrite *RewriteObservation `json:"rewrite,omitempty" tf:"rewrite,omitempty"` + + // Target that traffic is routed to when a request matches the gateway route. + Target *ActionTargetObservation `json:"target,omitempty" tf:"target,omitempty"` +} + +type Http2RouteActionParameters struct { + + // Gateway route action to rewrite. + // +kubebuilder:validation:Optional + Rewrite *RewriteParameters `json:"rewrite,omitempty" tf:"rewrite,omitempty"` + + // Target that traffic is routed to when a request matches the gateway route. + // +kubebuilder:validation:Optional + Target *ActionTargetParameters `json:"target" tf:"target,omitempty"` +} + +type Http2RouteInitParameters struct { + + // Action to take if a match is determined. + Action *Http2RouteActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Criteria for determining a request match. + Match *Http2RouteMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` +} + +type Http2RouteMatchInitParameters struct { + + // Client request headers to match on. + Header []HeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` + + // Host name to rewrite. + Hostname *MatchHostnameInitParameters `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Exact path to rewrite. + Path *MatchPathInitParameters `json:"path,omitempty" tf:"path,omitempty"` + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Specified beginning characters to rewrite. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Client request query parameters to match on. + QueryParameter []QueryParameterInitParameters `json:"queryParameter,omitempty" tf:"query_parameter,omitempty"` +} + +type Http2RouteMatchObservation struct { + + // Client request headers to match on. + Header []HeaderObservation `json:"header,omitempty" tf:"header,omitempty"` + + // Host name to rewrite. + Hostname *MatchHostnameObservation `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Exact path to rewrite. + Path *MatchPathObservation `json:"path,omitempty" tf:"path,omitempty"` + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Specified beginning characters to rewrite. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Client request query parameters to match on. + QueryParameter []QueryParameterObservation `json:"queryParameter,omitempty" tf:"query_parameter,omitempty"` +} + +type Http2RouteMatchParameters struct { + + // Client request headers to match on. + // +kubebuilder:validation:Optional + Header []HeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + + // Host name to rewrite. + // +kubebuilder:validation:Optional + Hostname *MatchHostnameParameters `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Exact path to rewrite. + // +kubebuilder:validation:Optional + Path *MatchPathParameters `json:"path,omitempty" tf:"path,omitempty"` + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Specified beginning characters to rewrite. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Client request query parameters to match on. + // +kubebuilder:validation:Optional + QueryParameter []QueryParameterParameters `json:"queryParameter,omitempty" tf:"query_parameter,omitempty"` +} + +type Http2RouteObservation struct { + + // Action to take if a match is determined. + Action *Http2RouteActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // Criteria for determining a request match. + Match *Http2RouteMatchObservation `json:"match,omitempty" tf:"match,omitempty"` +} + +type Http2RouteParameters struct { + + // Action to take if a match is determined. + // +kubebuilder:validation:Optional + Action *Http2RouteActionParameters `json:"action" tf:"action,omitempty"` + + // Criteria for determining a request match. + // +kubebuilder:validation:Optional + Match *Http2RouteMatchParameters `json:"match" tf:"match,omitempty"` +} + +type MatchHeaderInitParameters struct { + + // If true, the match is on the opposite of the match method and value. Default is false. + Invert *bool `json:"invert,omitempty" tf:"invert,omitempty"` + + // Criteria for determining a request match. + Match *MatchHeaderMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the gateway route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type MatchHeaderMatchInitParameters struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Specified beginning characters to rewrite. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Object that specifies the range of numbers that the header value sent by the client must be included in. + Range *MatchRangeInitParameters `json:"range,omitempty" tf:"range,omitempty"` + + // Header value sent by the client must include the specified characters. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + + // Header value sent by the client must end with the specified characters. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type MatchHeaderMatchObservation struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Specified beginning characters to rewrite. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Object that specifies the range of numbers that the header value sent by the client must be included in. + Range *MatchRangeObservation `json:"range,omitempty" tf:"range,omitempty"` + + // Header value sent by the client must include the specified characters. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + + // Header value sent by the client must end with the specified characters. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type MatchHeaderMatchParameters struct { + + // Value used to replace matched path. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Specified beginning characters to rewrite. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Object that specifies the range of numbers that the header value sent by the client must be included in. + // +kubebuilder:validation:Optional + Range *MatchRangeParameters `json:"range,omitempty" tf:"range,omitempty"` + + // Header value sent by the client must include the specified characters. + // +kubebuilder:validation:Optional + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + + // Header value sent by the client must end with the specified characters. + // +kubebuilder:validation:Optional + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type MatchHeaderObservation struct { + + // If true, the match is on the opposite of the match method and value. Default is false. + Invert *bool `json:"invert,omitempty" tf:"invert,omitempty"` + + // Criteria for determining a request match. + Match *MatchHeaderMatchObservation `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the gateway route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type MatchHeaderParameters struct { + + // If true, the match is on the opposite of the match method and value. Default is false. + // +kubebuilder:validation:Optional + Invert *bool `json:"invert,omitempty" tf:"invert,omitempty"` + + // Criteria for determining a request match. + // +kubebuilder:validation:Optional + Match *MatchHeaderMatchParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the gateway route. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type MatchHostnameInitParameters struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Header value sent by the client must end with the specified characters. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type MatchHostnameObservation struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Header value sent by the client must end with the specified characters. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type MatchHostnameParameters struct { + + // Value used to replace matched path. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Header value sent by the client must end with the specified characters. + // +kubebuilder:validation:Optional + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type MatchInitParameters struct { + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Fully qualified domain name for the service to match from the request. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type MatchObservation struct { + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Fully qualified domain name for the service to match from the request. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type MatchParameters struct { + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Fully qualified domain name for the service to match from the request. + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName" tf:"service_name,omitempty"` +} + +type MatchPathInitParameters struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Header value sent by the client must include the specified characters. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` +} + +type MatchPathObservation struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Header value sent by the client must include the specified characters. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` +} + +type MatchPathParameters struct { + + // Value used to replace matched path. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Header value sent by the client must include the specified characters. + // +kubebuilder:validation:Optional + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` +} + +type MatchQueryParameterInitParameters struct { + + // Criteria for determining a request match. + Match *MatchQueryParameterMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the gateway route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type MatchQueryParameterMatchInitParameters struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type MatchQueryParameterMatchObservation struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type MatchQueryParameterMatchParameters struct { + + // Value used to replace matched path. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type MatchQueryParameterObservation struct { + + // Criteria for determining a request match. + Match *MatchQueryParameterMatchObservation `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the gateway route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type MatchQueryParameterParameters struct { + + // Criteria for determining a request match. + // +kubebuilder:validation:Optional + Match *MatchQueryParameterMatchParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the gateway route. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type MatchRangeInitParameters struct { + + // End of the range. + End *float64 `json:"end,omitempty" tf:"end,omitempty"` + + // (Requited) Start of the range. + Start *float64 `json:"start,omitempty" tf:"start,omitempty"` +} + +type MatchRangeObservation struct { + + // End of the range. + End *float64 `json:"end,omitempty" tf:"end,omitempty"` + + // (Requited) Start of the range. + Start *float64 `json:"start,omitempty" tf:"start,omitempty"` +} + +type MatchRangeParameters struct { + + // End of the range. + // +kubebuilder:validation:Optional + End *float64 `json:"end" tf:"end,omitempty"` + + // (Requited) Start of the range. + // +kubebuilder:validation:Optional + Start *float64 `json:"start" tf:"start,omitempty"` +} + +type PathInitParameters struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type PathObservation struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type PathParameters struct { + + // Value used to replace matched path. + // +kubebuilder:validation:Optional + Exact *string `json:"exact" tf:"exact,omitempty"` +} + +type PrefixInitParameters struct { + + // Default prefix used to replace the incoming route prefix when rewritten. Valid values: ENABLED, DISABLED. + DefaultPrefix *string `json:"defaultPrefix,omitempty" tf:"default_prefix,omitempty"` + + // Value used to replace the incoming route prefix when rewritten. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type PrefixObservation struct { + + // Default prefix used to replace the incoming route prefix when rewritten. Valid values: ENABLED, DISABLED. + DefaultPrefix *string `json:"defaultPrefix,omitempty" tf:"default_prefix,omitempty"` + + // Value used to replace the incoming route prefix when rewritten. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type PrefixParameters struct { + + // Default prefix used to replace the incoming route prefix when rewritten. Valid values: ENABLED, DISABLED. + // +kubebuilder:validation:Optional + DefaultPrefix *string `json:"defaultPrefix,omitempty" tf:"default_prefix,omitempty"` + + // Value used to replace the incoming route prefix when rewritten. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type QueryParameterInitParameters struct { + + // Criteria for determining a request match. + Match *QueryParameterMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the gateway route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type QueryParameterMatchInitParameters struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type QueryParameterMatchObservation struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type QueryParameterMatchParameters struct { + + // Value used to replace matched path. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type QueryParameterObservation struct { + + // Criteria for determining a request match. + Match *QueryParameterMatchObservation `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the gateway route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type QueryParameterParameters struct { + + // Criteria for determining a request match. + // +kubebuilder:validation:Optional + Match *QueryParameterMatchParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the gateway route. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type RangeInitParameters struct { + + // End of the range. + End *float64 `json:"end,omitempty" tf:"end,omitempty"` + + // (Requited) Start of the range. + Start *float64 `json:"start,omitempty" tf:"start,omitempty"` +} + +type RangeObservation struct { + + // End of the range. + End *float64 `json:"end,omitempty" tf:"end,omitempty"` + + // (Requited) Start of the range. + Start *float64 `json:"start,omitempty" tf:"start,omitempty"` +} + +type RangeParameters struct { + + // End of the range. + // +kubebuilder:validation:Optional + End *float64 `json:"end" tf:"end,omitempty"` + + // (Requited) Start of the range. + // +kubebuilder:validation:Optional + Start *float64 `json:"start" tf:"start,omitempty"` +} + +type RewriteHostnameInitParameters struct { + + // Default target host name to write to. Valid values: ENABLED, DISABLED. + DefaultTargetHostname *string `json:"defaultTargetHostname,omitempty" tf:"default_target_hostname,omitempty"` +} + +type RewriteHostnameObservation struct { + + // Default target host name to write to. Valid values: ENABLED, DISABLED. + DefaultTargetHostname *string `json:"defaultTargetHostname,omitempty" tf:"default_target_hostname,omitempty"` +} + +type RewriteHostnameParameters struct { + + // Default target host name to write to. Valid values: ENABLED, DISABLED. + // +kubebuilder:validation:Optional + DefaultTargetHostname *string `json:"defaultTargetHostname" tf:"default_target_hostname,omitempty"` +} + +type RewriteInitParameters struct { + + // Host name to rewrite. + Hostname *HostnameInitParameters `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Exact path to rewrite. + Path *PathInitParameters `json:"path,omitempty" tf:"path,omitempty"` + + // Specified beginning characters to rewrite. + Prefix *PrefixInitParameters `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type RewriteObservation struct { + + // Host name to rewrite. + Hostname *HostnameObservation `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Exact path to rewrite. + Path *PathObservation `json:"path,omitempty" tf:"path,omitempty"` + + // Specified beginning characters to rewrite. + Prefix *PrefixObservation `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type RewriteParameters struct { + + // Host name to rewrite. + // +kubebuilder:validation:Optional + Hostname *HostnameParameters `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Exact path to rewrite. + // +kubebuilder:validation:Optional + Path *PathParameters `json:"path,omitempty" tf:"path,omitempty"` + + // Specified beginning characters to rewrite. + // +kubebuilder:validation:Optional + Prefix *PrefixParameters `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type RewritePathInitParameters struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type RewritePathObservation struct { + + // Value used to replace matched path. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type RewritePathParameters struct { + + // Value used to replace matched path. + // +kubebuilder:validation:Optional + Exact *string `json:"exact" tf:"exact,omitempty"` +} + +type RewritePrefixInitParameters struct { + + // Default prefix used to replace the incoming route prefix when rewritten. Valid values: ENABLED, DISABLED. + DefaultPrefix *string `json:"defaultPrefix,omitempty" tf:"default_prefix,omitempty"` + + // Value used to replace the incoming route prefix when rewritten. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RewritePrefixObservation struct { + + // Default prefix used to replace the incoming route prefix when rewritten. Valid values: ENABLED, DISABLED. + DefaultPrefix *string `json:"defaultPrefix,omitempty" tf:"default_prefix,omitempty"` + + // Value used to replace the incoming route prefix when rewritten. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RewritePrefixParameters struct { + + // Default prefix used to replace the incoming route prefix when rewritten. Valid values: ENABLED, DISABLED. + // +kubebuilder:validation:Optional + DefaultPrefix *string `json:"defaultPrefix,omitempty" tf:"default_prefix,omitempty"` + + // Value used to replace the incoming route prefix when rewritten. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type SpecInitParameters struct { + + // Specification of a gRPC gateway route. + GRPCRoute *GRPCRouteInitParameters `json:"grpcRoute,omitempty" tf:"grpc_route,omitempty"` + + // Specification of an HTTP gateway route. + HTTPRoute *HTTPRouteInitParameters `json:"httpRoute,omitempty" tf:"http_route,omitempty"` + + // Specification of an HTTP/2 gateway route. + Http2Route *Http2RouteInitParameters `json:"http2Route,omitempty" tf:"http2_route,omitempty"` + + // Priority for the gateway route, between 0 and 1000. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` +} + +type SpecObservation struct { + + // Specification of a gRPC gateway route. + GRPCRoute *GRPCRouteObservation `json:"grpcRoute,omitempty" tf:"grpc_route,omitempty"` + + // Specification of an HTTP gateway route. + HTTPRoute *HTTPRouteObservation `json:"httpRoute,omitempty" tf:"http_route,omitempty"` + + // Specification of an HTTP/2 gateway route. + Http2Route *Http2RouteObservation `json:"http2Route,omitempty" tf:"http2_route,omitempty"` + + // Priority for the gateway route, between 0 and 1000. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` +} + +type SpecParameters struct { + + // Specification of a gRPC gateway route. + // +kubebuilder:validation:Optional + GRPCRoute *GRPCRouteParameters `json:"grpcRoute,omitempty" tf:"grpc_route,omitempty"` + + // Specification of an HTTP gateway route. + // +kubebuilder:validation:Optional + HTTPRoute *HTTPRouteParameters `json:"httpRoute,omitempty" tf:"http_route,omitempty"` + + // Specification of an HTTP/2 gateway route. + // +kubebuilder:validation:Optional + Http2Route *Http2RouteParameters `json:"http2Route,omitempty" tf:"http2_route,omitempty"` + + // Priority for the gateway route, between 0 and 1000. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` +} + +type TargetInitParameters struct { + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual service gateway route target. + VirtualService *VirtualServiceInitParameters `json:"virtualService,omitempty" tf:"virtual_service,omitempty"` +} + +type TargetObservation struct { + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual service gateway route target. + VirtualService *VirtualServiceObservation `json:"virtualService,omitempty" tf:"virtual_service,omitempty"` +} + +type TargetParameters struct { + + // The port number that corresponds to the target for Virtual Service provider port. This is required when the provider (router or node) of the Virtual Service has multiple listeners. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual service gateway route target. + // +kubebuilder:validation:Optional + VirtualService *VirtualServiceParameters `json:"virtualService" tf:"virtual_service,omitempty"` +} + +type TargetVirtualServiceInitParameters struct { + + // Name of the virtual service that traffic is routed to. Must be between 1 and 255 characters in length. + VirtualServiceName *string `json:"virtualServiceName,omitempty" tf:"virtual_service_name,omitempty"` +} + +type TargetVirtualServiceObservation struct { + + // Name of the virtual service that traffic is routed to. Must be between 1 and 255 characters in length. + VirtualServiceName *string `json:"virtualServiceName,omitempty" tf:"virtual_service_name,omitempty"` +} + +type TargetVirtualServiceParameters struct { + + // Name of the virtual service that traffic is routed to. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + VirtualServiceName *string `json:"virtualServiceName" tf:"virtual_service_name,omitempty"` +} + +type VirtualServiceInitParameters struct { + + // Name of the virtual service that traffic is routed to. Must be between 1 and 255 characters in length. + VirtualServiceName *string `json:"virtualServiceName,omitempty" tf:"virtual_service_name,omitempty"` +} + +type VirtualServiceObservation struct { + + // Name of the virtual service that traffic is routed to. Must be between 1 and 255 characters in length. + VirtualServiceName *string `json:"virtualServiceName,omitempty" tf:"virtual_service_name,omitempty"` +} + +type VirtualServiceParameters struct { + + // Name of the virtual service that traffic is routed to. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + VirtualServiceName *string `json:"virtualServiceName" tf:"virtual_service_name,omitempty"` +} + +// GatewayRouteSpec defines the desired state of GatewayRoute +type GatewayRouteSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GatewayRouteParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GatewayRouteInitParameters `json:"initProvider,omitempty"` +} + +// GatewayRouteStatus defines the observed state of GatewayRoute. +type GatewayRouteStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GatewayRouteObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// GatewayRoute is the Schema for the GatewayRoutes API. Provides an AWS App Mesh gateway route resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type GatewayRoute struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.meshName) || (has(self.initProvider) && has(self.initProvider.meshName))",message="spec.forProvider.meshName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.spec) || (has(self.initProvider) && has(self.initProvider.spec))",message="spec.forProvider.spec is a required parameter" + Spec GatewayRouteSpec `json:"spec"` + Status GatewayRouteStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GatewayRouteList contains a list of GatewayRoutes +type GatewayRouteList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GatewayRoute `json:"items"` +} + +// Repository type metadata. +var ( + GatewayRoute_Kind = "GatewayRoute" + GatewayRoute_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: GatewayRoute_Kind}.String() + GatewayRoute_KindAPIVersion = GatewayRoute_Kind + "." + CRDGroupVersion.String() + GatewayRoute_GroupVersionKind = CRDGroupVersion.WithKind(GatewayRoute_Kind) +) + +func init() { + SchemeBuilder.Register(&GatewayRoute{}, &GatewayRouteList{}) +} diff --git a/apis/appmesh/v1beta2/zz_generated.conversion_hubs.go b/apis/appmesh/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..b22236f54f --- /dev/null +++ b/apis/appmesh/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,28 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *GatewayRoute) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Mesh) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Route) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VirtualGateway) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VirtualNode) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VirtualRouter) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VirtualService) Hub() {} diff --git a/apis/appmesh/v1beta2/zz_generated.deepcopy.go b/apis/appmesh/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..c427c83bf7 --- /dev/null +++ b/apis/appmesh/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,18659 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogFileInitParameters) DeepCopyInto(out *AccessLogFileInitParameters) { + *out = *in + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(FormatInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogFileInitParameters. +func (in *AccessLogFileInitParameters) DeepCopy() *AccessLogFileInitParameters { + if in == nil { + return nil + } + out := new(AccessLogFileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogFileObservation) DeepCopyInto(out *AccessLogFileObservation) { + *out = *in + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(FormatObservation) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogFileObservation. +func (in *AccessLogFileObservation) DeepCopy() *AccessLogFileObservation { + if in == nil { + return nil + } + out := new(AccessLogFileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogFileParameters) DeepCopyInto(out *AccessLogFileParameters) { + *out = *in + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(FormatParameters) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogFileParameters. +func (in *AccessLogFileParameters) DeepCopy() *AccessLogFileParameters { + if in == nil { + return nil + } + out := new(AccessLogFileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogInitParameters) DeepCopyInto(out *AccessLogInitParameters) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(AccessLogFileInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogInitParameters. +func (in *AccessLogInitParameters) DeepCopy() *AccessLogInitParameters { + if in == nil { + return nil + } + out := new(AccessLogInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogObservation) DeepCopyInto(out *AccessLogObservation) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(AccessLogFileObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogObservation. +func (in *AccessLogObservation) DeepCopy() *AccessLogObservation { + if in == nil { + return nil + } + out := new(AccessLogObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogParameters) DeepCopyInto(out *AccessLogParameters) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(AccessLogFileParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogParameters. +func (in *AccessLogParameters) DeepCopy() *AccessLogParameters { + if in == nil { + return nil + } + out := new(AccessLogParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcmInitParameters) DeepCopyInto(out *AcmInitParameters) { + *out = *in + if in.CertificateAuthorityArns != nil { + in, out := &in.CertificateAuthorityArns, &out.CertificateAuthorityArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcmInitParameters. +func (in *AcmInitParameters) DeepCopy() *AcmInitParameters { + if in == nil { + return nil + } + out := new(AcmInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcmObservation) DeepCopyInto(out *AcmObservation) { + *out = *in + if in.CertificateAuthorityArns != nil { + in, out := &in.CertificateAuthorityArns, &out.CertificateAuthorityArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcmObservation. +func (in *AcmObservation) DeepCopy() *AcmObservation { + if in == nil { + return nil + } + out := new(AcmObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcmParameters) DeepCopyInto(out *AcmParameters) { + *out = *in + if in.CertificateAuthorityArns != nil { + in, out := &in.CertificateAuthorityArns, &out.CertificateAuthorityArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcmParameters. +func (in *AcmParameters) DeepCopy() *AcmParameters { + if in == nil { + return nil + } + out := new(AcmParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionInitParameters) DeepCopyInto(out *ActionInitParameters) { + *out = *in + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(TargetInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionInitParameters. +func (in *ActionInitParameters) DeepCopy() *ActionInitParameters { + if in == nil { + return nil + } + out := new(ActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionObservation) DeepCopyInto(out *ActionObservation) { + *out = *in + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(TargetObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionObservation. +func (in *ActionObservation) DeepCopy() *ActionObservation { + if in == nil { + return nil + } + out := new(ActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionParameters) DeepCopyInto(out *ActionParameters) { + *out = *in + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(TargetParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionParameters. +func (in *ActionParameters) DeepCopy() *ActionParameters { + if in == nil { + return nil + } + out := new(ActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionRewriteInitParameters) DeepCopyInto(out *ActionRewriteInitParameters) { + *out = *in + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(RewriteHostnameInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(RewritePathInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(RewritePrefixInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionRewriteInitParameters. +func (in *ActionRewriteInitParameters) DeepCopy() *ActionRewriteInitParameters { + if in == nil { + return nil + } + out := new(ActionRewriteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionRewriteObservation) DeepCopyInto(out *ActionRewriteObservation) { + *out = *in + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(RewriteHostnameObservation) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(RewritePathObservation) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(RewritePrefixObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionRewriteObservation. +func (in *ActionRewriteObservation) DeepCopy() *ActionRewriteObservation { + if in == nil { + return nil + } + out := new(ActionRewriteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionRewriteParameters) DeepCopyInto(out *ActionRewriteParameters) { + *out = *in + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(RewriteHostnameParameters) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(RewritePathParameters) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(RewritePrefixParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionRewriteParameters. +func (in *ActionRewriteParameters) DeepCopy() *ActionRewriteParameters { + if in == nil { + return nil + } + out := new(ActionRewriteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionTargetInitParameters) DeepCopyInto(out *ActionTargetInitParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualService != nil { + in, out := &in.VirtualService, &out.VirtualService + *out = new(TargetVirtualServiceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionTargetInitParameters. +func (in *ActionTargetInitParameters) DeepCopy() *ActionTargetInitParameters { + if in == nil { + return nil + } + out := new(ActionTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionTargetObservation) DeepCopyInto(out *ActionTargetObservation) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualService != nil { + in, out := &in.VirtualService, &out.VirtualService + *out = new(TargetVirtualServiceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionTargetObservation. +func (in *ActionTargetObservation) DeepCopy() *ActionTargetObservation { + if in == nil { + return nil + } + out := new(ActionTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionTargetParameters) DeepCopyInto(out *ActionTargetParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualService != nil { + in, out := &in.VirtualService, &out.VirtualService + *out = new(TargetVirtualServiceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionTargetParameters. +func (in *ActionTargetParameters) DeepCopy() *ActionTargetParameters { + if in == nil { + return nil + } + out := new(ActionTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionTargetVirtualServiceInitParameters) DeepCopyInto(out *ActionTargetVirtualServiceInitParameters) { + *out = *in + if in.VirtualServiceName != nil { + in, out := &in.VirtualServiceName, &out.VirtualServiceName + *out = new(string) + **out = **in + } + if in.VirtualServiceNameRef != nil { + in, out := &in.VirtualServiceNameRef, &out.VirtualServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualServiceNameSelector != nil { + in, out := &in.VirtualServiceNameSelector, &out.VirtualServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionTargetVirtualServiceInitParameters. +func (in *ActionTargetVirtualServiceInitParameters) DeepCopy() *ActionTargetVirtualServiceInitParameters { + if in == nil { + return nil + } + out := new(ActionTargetVirtualServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionTargetVirtualServiceObservation) DeepCopyInto(out *ActionTargetVirtualServiceObservation) { + *out = *in + if in.VirtualServiceName != nil { + in, out := &in.VirtualServiceName, &out.VirtualServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionTargetVirtualServiceObservation. +func (in *ActionTargetVirtualServiceObservation) DeepCopy() *ActionTargetVirtualServiceObservation { + if in == nil { + return nil + } + out := new(ActionTargetVirtualServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionTargetVirtualServiceParameters) DeepCopyInto(out *ActionTargetVirtualServiceParameters) { + *out = *in + if in.VirtualServiceName != nil { + in, out := &in.VirtualServiceName, &out.VirtualServiceName + *out = new(string) + **out = **in + } + if in.VirtualServiceNameRef != nil { + in, out := &in.VirtualServiceNameRef, &out.VirtualServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualServiceNameSelector != nil { + in, out := &in.VirtualServiceNameSelector, &out.VirtualServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionTargetVirtualServiceParameters. +func (in *ActionTargetVirtualServiceParameters) DeepCopy() *ActionTargetVirtualServiceParameters { + if in == nil { + return nil + } + out := new(ActionTargetVirtualServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionWeightedTargetInitParameters) DeepCopyInto(out *ActionWeightedTargetInitParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualNode != nil { + in, out := &in.VirtualNode, &out.VirtualNode + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionWeightedTargetInitParameters. +func (in *ActionWeightedTargetInitParameters) DeepCopy() *ActionWeightedTargetInitParameters { + if in == nil { + return nil + } + out := new(ActionWeightedTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionWeightedTargetObservation) DeepCopyInto(out *ActionWeightedTargetObservation) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualNode != nil { + in, out := &in.VirtualNode, &out.VirtualNode + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionWeightedTargetObservation. +func (in *ActionWeightedTargetObservation) DeepCopy() *ActionWeightedTargetObservation { + if in == nil { + return nil + } + out := new(ActionWeightedTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionWeightedTargetParameters) DeepCopyInto(out *ActionWeightedTargetParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualNode != nil { + in, out := &in.VirtualNode, &out.VirtualNode + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionWeightedTargetParameters. +func (in *ActionWeightedTargetParameters) DeepCopy() *ActionWeightedTargetParameters { + if in == nil { + return nil + } + out := new(ActionWeightedTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsCloudMapInitParameters) DeepCopyInto(out *AwsCloudMapInitParameters) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NamespaceName != nil { + in, out := &in.NamespaceName, &out.NamespaceName + *out = new(string) + **out = **in + } + if in.NamespaceNameRef != nil { + in, out := &in.NamespaceNameRef, &out.NamespaceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NamespaceNameSelector != nil { + in, out := &in.NamespaceNameSelector, &out.NamespaceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsCloudMapInitParameters. +func (in *AwsCloudMapInitParameters) DeepCopy() *AwsCloudMapInitParameters { + if in == nil { + return nil + } + out := new(AwsCloudMapInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsCloudMapObservation) DeepCopyInto(out *AwsCloudMapObservation) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NamespaceName != nil { + in, out := &in.NamespaceName, &out.NamespaceName + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsCloudMapObservation. +func (in *AwsCloudMapObservation) DeepCopy() *AwsCloudMapObservation { + if in == nil { + return nil + } + out := new(AwsCloudMapObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsCloudMapParameters) DeepCopyInto(out *AwsCloudMapParameters) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NamespaceName != nil { + in, out := &in.NamespaceName, &out.NamespaceName + *out = new(string) + **out = **in + } + if in.NamespaceNameRef != nil { + in, out := &in.NamespaceNameRef, &out.NamespaceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NamespaceNameSelector != nil { + in, out := &in.NamespaceNameSelector, &out.NamespaceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsCloudMapParameters. +func (in *AwsCloudMapParameters) DeepCopy() *AwsCloudMapParameters { + if in == nil { + return nil + } + out := new(AwsCloudMapParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendDefaultsClientPolicyInitParameters) DeepCopyInto(out *BackendDefaultsClientPolicyInitParameters) { + *out = *in + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(BackendDefaultsClientPolicyTLSInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendDefaultsClientPolicyInitParameters. +func (in *BackendDefaultsClientPolicyInitParameters) DeepCopy() *BackendDefaultsClientPolicyInitParameters { + if in == nil { + return nil + } + out := new(BackendDefaultsClientPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendDefaultsClientPolicyObservation) DeepCopyInto(out *BackendDefaultsClientPolicyObservation) { + *out = *in + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(BackendDefaultsClientPolicyTLSObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendDefaultsClientPolicyObservation. +func (in *BackendDefaultsClientPolicyObservation) DeepCopy() *BackendDefaultsClientPolicyObservation { + if in == nil { + return nil + } + out := new(BackendDefaultsClientPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendDefaultsClientPolicyParameters) DeepCopyInto(out *BackendDefaultsClientPolicyParameters) { + *out = *in + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(BackendDefaultsClientPolicyTLSParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendDefaultsClientPolicyParameters. +func (in *BackendDefaultsClientPolicyParameters) DeepCopy() *BackendDefaultsClientPolicyParameters { + if in == nil { + return nil + } + out := new(BackendDefaultsClientPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendDefaultsClientPolicyTLSCertificateInitParameters) DeepCopyInto(out *BackendDefaultsClientPolicyTLSCertificateInitParameters) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(ClientPolicyTLSCertificateFileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(ClientPolicyTLSCertificateSdsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendDefaultsClientPolicyTLSCertificateInitParameters. +func (in *BackendDefaultsClientPolicyTLSCertificateInitParameters) DeepCopy() *BackendDefaultsClientPolicyTLSCertificateInitParameters { + if in == nil { + return nil + } + out := new(BackendDefaultsClientPolicyTLSCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendDefaultsClientPolicyTLSCertificateObservation) DeepCopyInto(out *BackendDefaultsClientPolicyTLSCertificateObservation) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(ClientPolicyTLSCertificateFileObservation) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(ClientPolicyTLSCertificateSdsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendDefaultsClientPolicyTLSCertificateObservation. +func (in *BackendDefaultsClientPolicyTLSCertificateObservation) DeepCopy() *BackendDefaultsClientPolicyTLSCertificateObservation { + if in == nil { + return nil + } + out := new(BackendDefaultsClientPolicyTLSCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendDefaultsClientPolicyTLSCertificateParameters) DeepCopyInto(out *BackendDefaultsClientPolicyTLSCertificateParameters) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(ClientPolicyTLSCertificateFileParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(ClientPolicyTLSCertificateSdsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendDefaultsClientPolicyTLSCertificateParameters. +func (in *BackendDefaultsClientPolicyTLSCertificateParameters) DeepCopy() *BackendDefaultsClientPolicyTLSCertificateParameters { + if in == nil { + return nil + } + out := new(BackendDefaultsClientPolicyTLSCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendDefaultsClientPolicyTLSInitParameters) DeepCopyInto(out *BackendDefaultsClientPolicyTLSInitParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(BackendDefaultsClientPolicyTLSCertificateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(BackendDefaultsClientPolicyTLSValidationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendDefaultsClientPolicyTLSInitParameters. +func (in *BackendDefaultsClientPolicyTLSInitParameters) DeepCopy() *BackendDefaultsClientPolicyTLSInitParameters { + if in == nil { + return nil + } + out := new(BackendDefaultsClientPolicyTLSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendDefaultsClientPolicyTLSObservation) DeepCopyInto(out *BackendDefaultsClientPolicyTLSObservation) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(BackendDefaultsClientPolicyTLSCertificateObservation) + (*in).DeepCopyInto(*out) + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(BackendDefaultsClientPolicyTLSValidationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendDefaultsClientPolicyTLSObservation. +func (in *BackendDefaultsClientPolicyTLSObservation) DeepCopy() *BackendDefaultsClientPolicyTLSObservation { + if in == nil { + return nil + } + out := new(BackendDefaultsClientPolicyTLSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendDefaultsClientPolicyTLSParameters) DeepCopyInto(out *BackendDefaultsClientPolicyTLSParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(BackendDefaultsClientPolicyTLSCertificateParameters) + (*in).DeepCopyInto(*out) + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(BackendDefaultsClientPolicyTLSValidationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendDefaultsClientPolicyTLSParameters. +func (in *BackendDefaultsClientPolicyTLSParameters) DeepCopy() *BackendDefaultsClientPolicyTLSParameters { + if in == nil { + return nil + } + out := new(BackendDefaultsClientPolicyTLSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendDefaultsClientPolicyTLSValidationInitParameters) DeepCopyInto(out *BackendDefaultsClientPolicyTLSValidationInitParameters) { + *out = *in + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(ClientPolicyTLSValidationSubjectAlternativeNamesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Trust != nil { + in, out := &in.Trust, &out.Trust + *out = new(ClientPolicyTLSValidationTrustInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendDefaultsClientPolicyTLSValidationInitParameters. +func (in *BackendDefaultsClientPolicyTLSValidationInitParameters) DeepCopy() *BackendDefaultsClientPolicyTLSValidationInitParameters { + if in == nil { + return nil + } + out := new(BackendDefaultsClientPolicyTLSValidationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendDefaultsClientPolicyTLSValidationObservation) DeepCopyInto(out *BackendDefaultsClientPolicyTLSValidationObservation) { + *out = *in + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(ClientPolicyTLSValidationSubjectAlternativeNamesObservation) + (*in).DeepCopyInto(*out) + } + if in.Trust != nil { + in, out := &in.Trust, &out.Trust + *out = new(ClientPolicyTLSValidationTrustObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendDefaultsClientPolicyTLSValidationObservation. +func (in *BackendDefaultsClientPolicyTLSValidationObservation) DeepCopy() *BackendDefaultsClientPolicyTLSValidationObservation { + if in == nil { + return nil + } + out := new(BackendDefaultsClientPolicyTLSValidationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendDefaultsClientPolicyTLSValidationParameters) DeepCopyInto(out *BackendDefaultsClientPolicyTLSValidationParameters) { + *out = *in + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(ClientPolicyTLSValidationSubjectAlternativeNamesParameters) + (*in).DeepCopyInto(*out) + } + if in.Trust != nil { + in, out := &in.Trust, &out.Trust + *out = new(ClientPolicyTLSValidationTrustParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendDefaultsClientPolicyTLSValidationParameters. +func (in *BackendDefaultsClientPolicyTLSValidationParameters) DeepCopy() *BackendDefaultsClientPolicyTLSValidationParameters { + if in == nil { + return nil + } + out := new(BackendDefaultsClientPolicyTLSValidationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendDefaultsInitParameters) DeepCopyInto(out *BackendDefaultsInitParameters) { + *out = *in + if in.ClientPolicy != nil { + in, out := &in.ClientPolicy, &out.ClientPolicy + *out = new(ClientPolicyInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendDefaultsInitParameters. +func (in *BackendDefaultsInitParameters) DeepCopy() *BackendDefaultsInitParameters { + if in == nil { + return nil + } + out := new(BackendDefaultsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendDefaultsObservation) DeepCopyInto(out *BackendDefaultsObservation) { + *out = *in + if in.ClientPolicy != nil { + in, out := &in.ClientPolicy, &out.ClientPolicy + *out = new(ClientPolicyObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendDefaultsObservation. +func (in *BackendDefaultsObservation) DeepCopy() *BackendDefaultsObservation { + if in == nil { + return nil + } + out := new(BackendDefaultsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendDefaultsParameters) DeepCopyInto(out *BackendDefaultsParameters) { + *out = *in + if in.ClientPolicy != nil { + in, out := &in.ClientPolicy, &out.ClientPolicy + *out = new(ClientPolicyParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendDefaultsParameters. +func (in *BackendDefaultsParameters) DeepCopy() *BackendDefaultsParameters { + if in == nil { + return nil + } + out := new(BackendDefaultsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendInitParameters) DeepCopyInto(out *BackendInitParameters) { + *out = *in + if in.VirtualService != nil { + in, out := &in.VirtualService, &out.VirtualService + *out = new(BackendVirtualServiceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendInitParameters. +func (in *BackendInitParameters) DeepCopy() *BackendInitParameters { + if in == nil { + return nil + } + out := new(BackendInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendObservation) DeepCopyInto(out *BackendObservation) { + *out = *in + if in.VirtualService != nil { + in, out := &in.VirtualService, &out.VirtualService + *out = new(BackendVirtualServiceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendObservation. +func (in *BackendObservation) DeepCopy() *BackendObservation { + if in == nil { + return nil + } + out := new(BackendObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendParameters) DeepCopyInto(out *BackendParameters) { + *out = *in + if in.VirtualService != nil { + in, out := &in.VirtualService, &out.VirtualService + *out = new(BackendVirtualServiceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendParameters. +func (in *BackendParameters) DeepCopy() *BackendParameters { + if in == nil { + return nil + } + out := new(BackendParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendVirtualServiceInitParameters) DeepCopyInto(out *BackendVirtualServiceInitParameters) { + *out = *in + if in.ClientPolicy != nil { + in, out := &in.ClientPolicy, &out.ClientPolicy + *out = new(VirtualServiceClientPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VirtualServiceName != nil { + in, out := &in.VirtualServiceName, &out.VirtualServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendVirtualServiceInitParameters. +func (in *BackendVirtualServiceInitParameters) DeepCopy() *BackendVirtualServiceInitParameters { + if in == nil { + return nil + } + out := new(BackendVirtualServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendVirtualServiceObservation) DeepCopyInto(out *BackendVirtualServiceObservation) { + *out = *in + if in.ClientPolicy != nil { + in, out := &in.ClientPolicy, &out.ClientPolicy + *out = new(VirtualServiceClientPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.VirtualServiceName != nil { + in, out := &in.VirtualServiceName, &out.VirtualServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendVirtualServiceObservation. +func (in *BackendVirtualServiceObservation) DeepCopy() *BackendVirtualServiceObservation { + if in == nil { + return nil + } + out := new(BackendVirtualServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendVirtualServiceParameters) DeepCopyInto(out *BackendVirtualServiceParameters) { + *out = *in + if in.ClientPolicy != nil { + in, out := &in.ClientPolicy, &out.ClientPolicy + *out = new(VirtualServiceClientPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.VirtualServiceName != nil { + in, out := &in.VirtualServiceName, &out.VirtualServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendVirtualServiceParameters. +func (in *BackendVirtualServiceParameters) DeepCopy() *BackendVirtualServiceParameters { + if in == nil { + return nil + } + out := new(BackendVirtualServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseEjectionDurationInitParameters) DeepCopyInto(out *BaseEjectionDurationInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseEjectionDurationInitParameters. +func (in *BaseEjectionDurationInitParameters) DeepCopy() *BaseEjectionDurationInitParameters { + if in == nil { + return nil + } + out := new(BaseEjectionDurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseEjectionDurationObservation) DeepCopyInto(out *BaseEjectionDurationObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseEjectionDurationObservation. +func (in *BaseEjectionDurationObservation) DeepCopy() *BaseEjectionDurationObservation { + if in == nil { + return nil + } + out := new(BaseEjectionDurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseEjectionDurationParameters) DeepCopyInto(out *BaseEjectionDurationParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseEjectionDurationParameters. +func (in *BaseEjectionDurationParameters) DeepCopy() *BaseEjectionDurationParameters { + if in == nil { + return nil + } + out := new(BaseEjectionDurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAcmInitParameters) DeepCopyInto(out *CertificateAcmInitParameters) { + *out = *in + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } + if in.CertificateArnRef != nil { + in, out := &in.CertificateArnRef, &out.CertificateArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CertificateArnSelector != nil { + in, out := &in.CertificateArnSelector, &out.CertificateArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAcmInitParameters. +func (in *CertificateAcmInitParameters) DeepCopy() *CertificateAcmInitParameters { + if in == nil { + return nil + } + out := new(CertificateAcmInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAcmObservation) DeepCopyInto(out *CertificateAcmObservation) { + *out = *in + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAcmObservation. +func (in *CertificateAcmObservation) DeepCopy() *CertificateAcmObservation { + if in == nil { + return nil + } + out := new(CertificateAcmObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAcmParameters) DeepCopyInto(out *CertificateAcmParameters) { + *out = *in + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } + if in.CertificateArnRef != nil { + in, out := &in.CertificateArnRef, &out.CertificateArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CertificateArnSelector != nil { + in, out := &in.CertificateArnSelector, &out.CertificateArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAcmParameters. +func (in *CertificateAcmParameters) DeepCopy() *CertificateAcmParameters { + if in == nil { + return nil + } + out := new(CertificateAcmParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateFileInitParameters) DeepCopyInto(out *CertificateFileInitParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.PrivateKey != nil { + in, out := &in.PrivateKey, &out.PrivateKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateFileInitParameters. +func (in *CertificateFileInitParameters) DeepCopy() *CertificateFileInitParameters { + if in == nil { + return nil + } + out := new(CertificateFileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateFileObservation) DeepCopyInto(out *CertificateFileObservation) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.PrivateKey != nil { + in, out := &in.PrivateKey, &out.PrivateKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateFileObservation. +func (in *CertificateFileObservation) DeepCopy() *CertificateFileObservation { + if in == nil { + return nil + } + out := new(CertificateFileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateFileParameters) DeepCopyInto(out *CertificateFileParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.PrivateKey != nil { + in, out := &in.PrivateKey, &out.PrivateKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateFileParameters. +func (in *CertificateFileParameters) DeepCopy() *CertificateFileParameters { + if in == nil { + return nil + } + out := new(CertificateFileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateInitParameters) DeepCopyInto(out *CertificateInitParameters) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(FileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(SdsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateInitParameters. +func (in *CertificateInitParameters) DeepCopy() *CertificateInitParameters { + if in == nil { + return nil + } + out := new(CertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateObservation) DeepCopyInto(out *CertificateObservation) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(FileObservation) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(SdsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateObservation. +func (in *CertificateObservation) DeepCopy() *CertificateObservation { + if in == nil { + return nil + } + out := new(CertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateParameters) DeepCopyInto(out *CertificateParameters) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(FileParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(SdsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateParameters. +func (in *CertificateParameters) DeepCopy() *CertificateParameters { + if in == nil { + return nil + } + out := new(CertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSdsInitParameters) DeepCopyInto(out *CertificateSdsInitParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSdsInitParameters. +func (in *CertificateSdsInitParameters) DeepCopy() *CertificateSdsInitParameters { + if in == nil { + return nil + } + out := new(CertificateSdsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSdsObservation) DeepCopyInto(out *CertificateSdsObservation) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSdsObservation. +func (in *CertificateSdsObservation) DeepCopy() *CertificateSdsObservation { + if in == nil { + return nil + } + out := new(CertificateSdsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSdsParameters) DeepCopyInto(out *CertificateSdsParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSdsParameters. +func (in *CertificateSdsParameters) DeepCopy() *CertificateSdsParameters { + if in == nil { + return nil + } + out := new(CertificateSdsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyInitParameters) DeepCopyInto(out *ClientPolicyInitParameters) { + *out = *in + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyInitParameters. +func (in *ClientPolicyInitParameters) DeepCopy() *ClientPolicyInitParameters { + if in == nil { + return nil + } + out := new(ClientPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyObservation) DeepCopyInto(out *ClientPolicyObservation) { + *out = *in + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyObservation. +func (in *ClientPolicyObservation) DeepCopy() *ClientPolicyObservation { + if in == nil { + return nil + } + out := new(ClientPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyParameters) DeepCopyInto(out *ClientPolicyParameters) { + *out = *in + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyParameters. +func (in *ClientPolicyParameters) DeepCopy() *ClientPolicyParameters { + if in == nil { + return nil + } + out := new(ClientPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSCertificateFileInitParameters) DeepCopyInto(out *ClientPolicyTLSCertificateFileInitParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.PrivateKey != nil { + in, out := &in.PrivateKey, &out.PrivateKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSCertificateFileInitParameters. +func (in *ClientPolicyTLSCertificateFileInitParameters) DeepCopy() *ClientPolicyTLSCertificateFileInitParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSCertificateFileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSCertificateFileObservation) DeepCopyInto(out *ClientPolicyTLSCertificateFileObservation) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.PrivateKey != nil { + in, out := &in.PrivateKey, &out.PrivateKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSCertificateFileObservation. +func (in *ClientPolicyTLSCertificateFileObservation) DeepCopy() *ClientPolicyTLSCertificateFileObservation { + if in == nil { + return nil + } + out := new(ClientPolicyTLSCertificateFileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSCertificateFileParameters) DeepCopyInto(out *ClientPolicyTLSCertificateFileParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.PrivateKey != nil { + in, out := &in.PrivateKey, &out.PrivateKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSCertificateFileParameters. +func (in *ClientPolicyTLSCertificateFileParameters) DeepCopy() *ClientPolicyTLSCertificateFileParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSCertificateFileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSCertificateInitParameters) DeepCopyInto(out *ClientPolicyTLSCertificateInitParameters) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(TLSCertificateFileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(TLSCertificateSdsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSCertificateInitParameters. +func (in *ClientPolicyTLSCertificateInitParameters) DeepCopy() *ClientPolicyTLSCertificateInitParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSCertificateObservation) DeepCopyInto(out *ClientPolicyTLSCertificateObservation) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(TLSCertificateFileObservation) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(TLSCertificateSdsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSCertificateObservation. +func (in *ClientPolicyTLSCertificateObservation) DeepCopy() *ClientPolicyTLSCertificateObservation { + if in == nil { + return nil + } + out := new(ClientPolicyTLSCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSCertificateParameters) DeepCopyInto(out *ClientPolicyTLSCertificateParameters) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(TLSCertificateFileParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(TLSCertificateSdsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSCertificateParameters. +func (in *ClientPolicyTLSCertificateParameters) DeepCopy() *ClientPolicyTLSCertificateParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSCertificateSdsInitParameters) DeepCopyInto(out *ClientPolicyTLSCertificateSdsInitParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSCertificateSdsInitParameters. +func (in *ClientPolicyTLSCertificateSdsInitParameters) DeepCopy() *ClientPolicyTLSCertificateSdsInitParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSCertificateSdsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSCertificateSdsObservation) DeepCopyInto(out *ClientPolicyTLSCertificateSdsObservation) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSCertificateSdsObservation. +func (in *ClientPolicyTLSCertificateSdsObservation) DeepCopy() *ClientPolicyTLSCertificateSdsObservation { + if in == nil { + return nil + } + out := new(ClientPolicyTLSCertificateSdsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSCertificateSdsParameters) DeepCopyInto(out *ClientPolicyTLSCertificateSdsParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSCertificateSdsParameters. +func (in *ClientPolicyTLSCertificateSdsParameters) DeepCopy() *ClientPolicyTLSCertificateSdsParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSCertificateSdsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSInitParameters) DeepCopyInto(out *ClientPolicyTLSInitParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(ClientPolicyTLSCertificateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(ClientPolicyTLSValidationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSInitParameters. +func (in *ClientPolicyTLSInitParameters) DeepCopy() *ClientPolicyTLSInitParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSObservation) DeepCopyInto(out *ClientPolicyTLSObservation) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(ClientPolicyTLSCertificateObservation) + (*in).DeepCopyInto(*out) + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(ClientPolicyTLSValidationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSObservation. +func (in *ClientPolicyTLSObservation) DeepCopy() *ClientPolicyTLSObservation { + if in == nil { + return nil + } + out := new(ClientPolicyTLSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSParameters) DeepCopyInto(out *ClientPolicyTLSParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(ClientPolicyTLSCertificateParameters) + (*in).DeepCopyInto(*out) + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(ClientPolicyTLSValidationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSParameters. +func (in *ClientPolicyTLSParameters) DeepCopy() *ClientPolicyTLSParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationInitParameters) DeepCopyInto(out *ClientPolicyTLSValidationInitParameters) { + *out = *in + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(TLSValidationSubjectAlternativeNamesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Trust != nil { + in, out := &in.Trust, &out.Trust + *out = new(TLSValidationTrustInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationInitParameters. +func (in *ClientPolicyTLSValidationInitParameters) DeepCopy() *ClientPolicyTLSValidationInitParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationObservation) DeepCopyInto(out *ClientPolicyTLSValidationObservation) { + *out = *in + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(TLSValidationSubjectAlternativeNamesObservation) + (*in).DeepCopyInto(*out) + } + if in.Trust != nil { + in, out := &in.Trust, &out.Trust + *out = new(TLSValidationTrustObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationObservation. +func (in *ClientPolicyTLSValidationObservation) DeepCopy() *ClientPolicyTLSValidationObservation { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationParameters) DeepCopyInto(out *ClientPolicyTLSValidationParameters) { + *out = *in + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(TLSValidationSubjectAlternativeNamesParameters) + (*in).DeepCopyInto(*out) + } + if in.Trust != nil { + in, out := &in.Trust, &out.Trust + *out = new(TLSValidationTrustParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationParameters. +func (in *ClientPolicyTLSValidationParameters) DeepCopy() *ClientPolicyTLSValidationParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationSubjectAlternativeNamesInitParameters) DeepCopyInto(out *ClientPolicyTLSValidationSubjectAlternativeNamesInitParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(ClientPolicyTLSValidationSubjectAlternativeNamesMatchInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationSubjectAlternativeNamesInitParameters. +func (in *ClientPolicyTLSValidationSubjectAlternativeNamesInitParameters) DeepCopy() *ClientPolicyTLSValidationSubjectAlternativeNamesInitParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationSubjectAlternativeNamesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationSubjectAlternativeNamesMatchInitParameters) DeepCopyInto(out *ClientPolicyTLSValidationSubjectAlternativeNamesMatchInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationSubjectAlternativeNamesMatchInitParameters. +func (in *ClientPolicyTLSValidationSubjectAlternativeNamesMatchInitParameters) DeepCopy() *ClientPolicyTLSValidationSubjectAlternativeNamesMatchInitParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationSubjectAlternativeNamesMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationSubjectAlternativeNamesMatchObservation) DeepCopyInto(out *ClientPolicyTLSValidationSubjectAlternativeNamesMatchObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationSubjectAlternativeNamesMatchObservation. +func (in *ClientPolicyTLSValidationSubjectAlternativeNamesMatchObservation) DeepCopy() *ClientPolicyTLSValidationSubjectAlternativeNamesMatchObservation { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationSubjectAlternativeNamesMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationSubjectAlternativeNamesMatchParameters) DeepCopyInto(out *ClientPolicyTLSValidationSubjectAlternativeNamesMatchParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationSubjectAlternativeNamesMatchParameters. +func (in *ClientPolicyTLSValidationSubjectAlternativeNamesMatchParameters) DeepCopy() *ClientPolicyTLSValidationSubjectAlternativeNamesMatchParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationSubjectAlternativeNamesMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationSubjectAlternativeNamesObservation) DeepCopyInto(out *ClientPolicyTLSValidationSubjectAlternativeNamesObservation) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(ClientPolicyTLSValidationSubjectAlternativeNamesMatchObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationSubjectAlternativeNamesObservation. +func (in *ClientPolicyTLSValidationSubjectAlternativeNamesObservation) DeepCopy() *ClientPolicyTLSValidationSubjectAlternativeNamesObservation { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationSubjectAlternativeNamesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationSubjectAlternativeNamesParameters) DeepCopyInto(out *ClientPolicyTLSValidationSubjectAlternativeNamesParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(ClientPolicyTLSValidationSubjectAlternativeNamesMatchParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationSubjectAlternativeNamesParameters. +func (in *ClientPolicyTLSValidationSubjectAlternativeNamesParameters) DeepCopy() *ClientPolicyTLSValidationSubjectAlternativeNamesParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationSubjectAlternativeNamesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationTrustFileInitParameters) DeepCopyInto(out *ClientPolicyTLSValidationTrustFileInitParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationTrustFileInitParameters. +func (in *ClientPolicyTLSValidationTrustFileInitParameters) DeepCopy() *ClientPolicyTLSValidationTrustFileInitParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationTrustFileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationTrustFileObservation) DeepCopyInto(out *ClientPolicyTLSValidationTrustFileObservation) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationTrustFileObservation. +func (in *ClientPolicyTLSValidationTrustFileObservation) DeepCopy() *ClientPolicyTLSValidationTrustFileObservation { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationTrustFileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationTrustFileParameters) DeepCopyInto(out *ClientPolicyTLSValidationTrustFileParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationTrustFileParameters. +func (in *ClientPolicyTLSValidationTrustFileParameters) DeepCopy() *ClientPolicyTLSValidationTrustFileParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationTrustFileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationTrustInitParameters) DeepCopyInto(out *ClientPolicyTLSValidationTrustInitParameters) { + *out = *in + if in.Acm != nil { + in, out := &in.Acm, &out.Acm + *out = new(ValidationTrustAcmInitParameters) + (*in).DeepCopyInto(*out) + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(ClientPolicyTLSValidationTrustFileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(ClientPolicyTLSValidationTrustSdsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationTrustInitParameters. +func (in *ClientPolicyTLSValidationTrustInitParameters) DeepCopy() *ClientPolicyTLSValidationTrustInitParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationTrustInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationTrustObservation) DeepCopyInto(out *ClientPolicyTLSValidationTrustObservation) { + *out = *in + if in.Acm != nil { + in, out := &in.Acm, &out.Acm + *out = new(ValidationTrustAcmObservation) + (*in).DeepCopyInto(*out) + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(ClientPolicyTLSValidationTrustFileObservation) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(ClientPolicyTLSValidationTrustSdsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationTrustObservation. +func (in *ClientPolicyTLSValidationTrustObservation) DeepCopy() *ClientPolicyTLSValidationTrustObservation { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationTrustObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationTrustParameters) DeepCopyInto(out *ClientPolicyTLSValidationTrustParameters) { + *out = *in + if in.Acm != nil { + in, out := &in.Acm, &out.Acm + *out = new(ValidationTrustAcmParameters) + (*in).DeepCopyInto(*out) + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(ClientPolicyTLSValidationTrustFileParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(ClientPolicyTLSValidationTrustSdsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationTrustParameters. +func (in *ClientPolicyTLSValidationTrustParameters) DeepCopy() *ClientPolicyTLSValidationTrustParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationTrustParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationTrustSdsInitParameters) DeepCopyInto(out *ClientPolicyTLSValidationTrustSdsInitParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationTrustSdsInitParameters. +func (in *ClientPolicyTLSValidationTrustSdsInitParameters) DeepCopy() *ClientPolicyTLSValidationTrustSdsInitParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationTrustSdsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationTrustSdsObservation) DeepCopyInto(out *ClientPolicyTLSValidationTrustSdsObservation) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationTrustSdsObservation. +func (in *ClientPolicyTLSValidationTrustSdsObservation) DeepCopy() *ClientPolicyTLSValidationTrustSdsObservation { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationTrustSdsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPolicyTLSValidationTrustSdsParameters) DeepCopyInto(out *ClientPolicyTLSValidationTrustSdsParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPolicyTLSValidationTrustSdsParameters. +func (in *ClientPolicyTLSValidationTrustSdsParameters) DeepCopy() *ClientPolicyTLSValidationTrustSdsParameters { + if in == nil { + return nil + } + out := new(ClientPolicyTLSValidationTrustSdsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolGRPCInitParameters) DeepCopyInto(out *ConnectionPoolGRPCInitParameters) { + *out = *in + if in.MaxRequests != nil { + in, out := &in.MaxRequests, &out.MaxRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolGRPCInitParameters. +func (in *ConnectionPoolGRPCInitParameters) DeepCopy() *ConnectionPoolGRPCInitParameters { + if in == nil { + return nil + } + out := new(ConnectionPoolGRPCInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolGRPCObservation) DeepCopyInto(out *ConnectionPoolGRPCObservation) { + *out = *in + if in.MaxRequests != nil { + in, out := &in.MaxRequests, &out.MaxRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolGRPCObservation. +func (in *ConnectionPoolGRPCObservation) DeepCopy() *ConnectionPoolGRPCObservation { + if in == nil { + return nil + } + out := new(ConnectionPoolGRPCObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolGRPCParameters) DeepCopyInto(out *ConnectionPoolGRPCParameters) { + *out = *in + if in.MaxRequests != nil { + in, out := &in.MaxRequests, &out.MaxRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolGRPCParameters. +func (in *ConnectionPoolGRPCParameters) DeepCopy() *ConnectionPoolGRPCParameters { + if in == nil { + return nil + } + out := new(ConnectionPoolGRPCParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolHTTPInitParameters) DeepCopyInto(out *ConnectionPoolHTTPInitParameters) { + *out = *in + if in.MaxConnections != nil { + in, out := &in.MaxConnections, &out.MaxConnections + *out = new(float64) + **out = **in + } + if in.MaxPendingRequests != nil { + in, out := &in.MaxPendingRequests, &out.MaxPendingRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolHTTPInitParameters. +func (in *ConnectionPoolHTTPInitParameters) DeepCopy() *ConnectionPoolHTTPInitParameters { + if in == nil { + return nil + } + out := new(ConnectionPoolHTTPInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolHTTPObservation) DeepCopyInto(out *ConnectionPoolHTTPObservation) { + *out = *in + if in.MaxConnections != nil { + in, out := &in.MaxConnections, &out.MaxConnections + *out = new(float64) + **out = **in + } + if in.MaxPendingRequests != nil { + in, out := &in.MaxPendingRequests, &out.MaxPendingRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolHTTPObservation. +func (in *ConnectionPoolHTTPObservation) DeepCopy() *ConnectionPoolHTTPObservation { + if in == nil { + return nil + } + out := new(ConnectionPoolHTTPObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolHTTPParameters) DeepCopyInto(out *ConnectionPoolHTTPParameters) { + *out = *in + if in.MaxConnections != nil { + in, out := &in.MaxConnections, &out.MaxConnections + *out = new(float64) + **out = **in + } + if in.MaxPendingRequests != nil { + in, out := &in.MaxPendingRequests, &out.MaxPendingRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolHTTPParameters. +func (in *ConnectionPoolHTTPParameters) DeepCopy() *ConnectionPoolHTTPParameters { + if in == nil { + return nil + } + out := new(ConnectionPoolHTTPParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolHttp2InitParameters) DeepCopyInto(out *ConnectionPoolHttp2InitParameters) { + *out = *in + if in.MaxRequests != nil { + in, out := &in.MaxRequests, &out.MaxRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolHttp2InitParameters. +func (in *ConnectionPoolHttp2InitParameters) DeepCopy() *ConnectionPoolHttp2InitParameters { + if in == nil { + return nil + } + out := new(ConnectionPoolHttp2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolHttp2Observation) DeepCopyInto(out *ConnectionPoolHttp2Observation) { + *out = *in + if in.MaxRequests != nil { + in, out := &in.MaxRequests, &out.MaxRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolHttp2Observation. +func (in *ConnectionPoolHttp2Observation) DeepCopy() *ConnectionPoolHttp2Observation { + if in == nil { + return nil + } + out := new(ConnectionPoolHttp2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolHttp2Parameters) DeepCopyInto(out *ConnectionPoolHttp2Parameters) { + *out = *in + if in.MaxRequests != nil { + in, out := &in.MaxRequests, &out.MaxRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolHttp2Parameters. +func (in *ConnectionPoolHttp2Parameters) DeepCopy() *ConnectionPoolHttp2Parameters { + if in == nil { + return nil + } + out := new(ConnectionPoolHttp2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolInitParameters) DeepCopyInto(out *ConnectionPoolInitParameters) { + *out = *in + if in.GRPC != nil { + in, out := &in.GRPC, &out.GRPC + *out = new(GRPCInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(Http2InitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolInitParameters. +func (in *ConnectionPoolInitParameters) DeepCopy() *ConnectionPoolInitParameters { + if in == nil { + return nil + } + out := new(ConnectionPoolInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolObservation) DeepCopyInto(out *ConnectionPoolObservation) { + *out = *in + if in.GRPC != nil { + in, out := &in.GRPC, &out.GRPC + *out = new(GRPCObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPObservation) + (*in).DeepCopyInto(*out) + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(Http2Observation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolObservation. +func (in *ConnectionPoolObservation) DeepCopy() *ConnectionPoolObservation { + if in == nil { + return nil + } + out := new(ConnectionPoolObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolParameters) DeepCopyInto(out *ConnectionPoolParameters) { + *out = *in + if in.GRPC != nil { + in, out := &in.GRPC, &out.GRPC + *out = new(GRPCParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPParameters) + (*in).DeepCopyInto(*out) + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(Http2Parameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolParameters. +func (in *ConnectionPoolParameters) DeepCopy() *ConnectionPoolParameters { + if in == nil { + return nil + } + out := new(ConnectionPoolParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSInitParameters) DeepCopyInto(out *DNSInitParameters) { + *out = *in + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(string) + **out = **in + } + if in.IPPreference != nil { + in, out := &in.IPPreference, &out.IPPreference + *out = new(string) + **out = **in + } + if in.ResponseType != nil { + in, out := &in.ResponseType, &out.ResponseType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSInitParameters. +func (in *DNSInitParameters) DeepCopy() *DNSInitParameters { + if in == nil { + return nil + } + out := new(DNSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSObservation) DeepCopyInto(out *DNSObservation) { + *out = *in + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(string) + **out = **in + } + if in.IPPreference != nil { + in, out := &in.IPPreference, &out.IPPreference + *out = new(string) + **out = **in + } + if in.ResponseType != nil { + in, out := &in.ResponseType, &out.ResponseType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSObservation. +func (in *DNSObservation) DeepCopy() *DNSObservation { + if in == nil { + return nil + } + out := new(DNSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSParameters) DeepCopyInto(out *DNSParameters) { + *out = *in + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(string) + **out = **in + } + if in.IPPreference != nil { + in, out := &in.IPPreference, &out.IPPreference + *out = new(string) + **out = **in + } + if in.ResponseType != nil { + in, out := &in.ResponseType, &out.ResponseType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSParameters. +func (in *DNSParameters) DeepCopy() *DNSParameters { + if in == nil { + return nil + } + out := new(DNSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressFilterInitParameters) DeepCopyInto(out *EgressFilterInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressFilterInitParameters. +func (in *EgressFilterInitParameters) DeepCopy() *EgressFilterInitParameters { + if in == nil { + return nil + } + out := new(EgressFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressFilterObservation) DeepCopyInto(out *EgressFilterObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressFilterObservation. +func (in *EgressFilterObservation) DeepCopy() *EgressFilterObservation { + if in == nil { + return nil + } + out := new(EgressFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressFilterParameters) DeepCopyInto(out *EgressFilterParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressFilterParameters. +func (in *EgressFilterParameters) DeepCopy() *EgressFilterParameters { + if in == nil { + return nil + } + out := new(EgressFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileFormatInitParameters) DeepCopyInto(out *FileFormatInitParameters) { + *out = *in + if in.JSON != nil { + in, out := &in.JSON, &out.JSON + *out = make([]FormatJSONInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileFormatInitParameters. +func (in *FileFormatInitParameters) DeepCopy() *FileFormatInitParameters { + if in == nil { + return nil + } + out := new(FileFormatInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileFormatObservation) DeepCopyInto(out *FileFormatObservation) { + *out = *in + if in.JSON != nil { + in, out := &in.JSON, &out.JSON + *out = make([]FormatJSONObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileFormatObservation. +func (in *FileFormatObservation) DeepCopy() *FileFormatObservation { + if in == nil { + return nil + } + out := new(FileFormatObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileFormatParameters) DeepCopyInto(out *FileFormatParameters) { + *out = *in + if in.JSON != nil { + in, out := &in.JSON, &out.JSON + *out = make([]FormatJSONParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileFormatParameters. +func (in *FileFormatParameters) DeepCopy() *FileFormatParameters { + if in == nil { + return nil + } + out := new(FileFormatParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileInitParameters) DeepCopyInto(out *FileInitParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.PrivateKey != nil { + in, out := &in.PrivateKey, &out.PrivateKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileInitParameters. +func (in *FileInitParameters) DeepCopy() *FileInitParameters { + if in == nil { + return nil + } + out := new(FileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileObservation) DeepCopyInto(out *FileObservation) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.PrivateKey != nil { + in, out := &in.PrivateKey, &out.PrivateKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileObservation. +func (in *FileObservation) DeepCopy() *FileObservation { + if in == nil { + return nil + } + out := new(FileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileParameters) DeepCopyInto(out *FileParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.PrivateKey != nil { + in, out := &in.PrivateKey, &out.PrivateKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileParameters. +func (in *FileParameters) DeepCopy() *FileParameters { + if in == nil { + return nil + } + out := new(FileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormatInitParameters) DeepCopyInto(out *FormatInitParameters) { + *out = *in + if in.JSON != nil { + in, out := &in.JSON, &out.JSON + *out = make([]JSONInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormatInitParameters. +func (in *FormatInitParameters) DeepCopy() *FormatInitParameters { + if in == nil { + return nil + } + out := new(FormatInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormatJSONInitParameters) DeepCopyInto(out *FormatJSONInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormatJSONInitParameters. +func (in *FormatJSONInitParameters) DeepCopy() *FormatJSONInitParameters { + if in == nil { + return nil + } + out := new(FormatJSONInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormatJSONObservation) DeepCopyInto(out *FormatJSONObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormatJSONObservation. +func (in *FormatJSONObservation) DeepCopy() *FormatJSONObservation { + if in == nil { + return nil + } + out := new(FormatJSONObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormatJSONParameters) DeepCopyInto(out *FormatJSONParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormatJSONParameters. +func (in *FormatJSONParameters) DeepCopy() *FormatJSONParameters { + if in == nil { + return nil + } + out := new(FormatJSONParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormatObservation) DeepCopyInto(out *FormatObservation) { + *out = *in + if in.JSON != nil { + in, out := &in.JSON, &out.JSON + *out = make([]JSONObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormatObservation. +func (in *FormatObservation) DeepCopy() *FormatObservation { + if in == nil { + return nil + } + out := new(FormatObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormatParameters) DeepCopyInto(out *FormatParameters) { + *out = *in + if in.JSON != nil { + in, out := &in.JSON, &out.JSON + *out = make([]JSONParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormatParameters. +func (in *FormatParameters) DeepCopy() *FormatParameters { + if in == nil { + return nil + } + out := new(FormatParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCIdleInitParameters) DeepCopyInto(out *GRPCIdleInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCIdleInitParameters. +func (in *GRPCIdleInitParameters) DeepCopy() *GRPCIdleInitParameters { + if in == nil { + return nil + } + out := new(GRPCIdleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCIdleObservation) DeepCopyInto(out *GRPCIdleObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCIdleObservation. +func (in *GRPCIdleObservation) DeepCopy() *GRPCIdleObservation { + if in == nil { + return nil + } + out := new(GRPCIdleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCIdleParameters) DeepCopyInto(out *GRPCIdleParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCIdleParameters. +func (in *GRPCIdleParameters) DeepCopy() *GRPCIdleParameters { + if in == nil { + return nil + } + out := new(GRPCIdleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCInitParameters) DeepCopyInto(out *GRPCInitParameters) { + *out = *in + if in.MaxRequests != nil { + in, out := &in.MaxRequests, &out.MaxRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCInitParameters. +func (in *GRPCInitParameters) DeepCopy() *GRPCInitParameters { + if in == nil { + return nil + } + out := new(GRPCInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCObservation) DeepCopyInto(out *GRPCObservation) { + *out = *in + if in.MaxRequests != nil { + in, out := &in.MaxRequests, &out.MaxRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCObservation. +func (in *GRPCObservation) DeepCopy() *GRPCObservation { + if in == nil { + return nil + } + out := new(GRPCObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCParameters) DeepCopyInto(out *GRPCParameters) { + *out = *in + if in.MaxRequests != nil { + in, out := &in.MaxRequests, &out.MaxRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCParameters. +func (in *GRPCParameters) DeepCopy() *GRPCParameters { + if in == nil { + return nil + } + out := new(GRPCParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCPerRequestInitParameters) DeepCopyInto(out *GRPCPerRequestInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCPerRequestInitParameters. +func (in *GRPCPerRequestInitParameters) DeepCopy() *GRPCPerRequestInitParameters { + if in == nil { + return nil + } + out := new(GRPCPerRequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCPerRequestObservation) DeepCopyInto(out *GRPCPerRequestObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCPerRequestObservation. +func (in *GRPCPerRequestObservation) DeepCopy() *GRPCPerRequestObservation { + if in == nil { + return nil + } + out := new(GRPCPerRequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCPerRequestParameters) DeepCopyInto(out *GRPCPerRequestParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCPerRequestParameters. +func (in *GRPCPerRequestParameters) DeepCopy() *GRPCPerRequestParameters { + if in == nil { + return nil + } + out := new(GRPCPerRequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteActionInitParameters) DeepCopyInto(out *GRPCRouteActionInitParameters) { + *out = *in + if in.WeightedTarget != nil { + in, out := &in.WeightedTarget, &out.WeightedTarget + *out = make([]WeightedTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteActionInitParameters. +func (in *GRPCRouteActionInitParameters) DeepCopy() *GRPCRouteActionInitParameters { + if in == nil { + return nil + } + out := new(GRPCRouteActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteActionObservation) DeepCopyInto(out *GRPCRouteActionObservation) { + *out = *in + if in.WeightedTarget != nil { + in, out := &in.WeightedTarget, &out.WeightedTarget + *out = make([]WeightedTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteActionObservation. +func (in *GRPCRouteActionObservation) DeepCopy() *GRPCRouteActionObservation { + if in == nil { + return nil + } + out := new(GRPCRouteActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteActionParameters) DeepCopyInto(out *GRPCRouteActionParameters) { + *out = *in + if in.WeightedTarget != nil { + in, out := &in.WeightedTarget, &out.WeightedTarget + *out = make([]WeightedTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteActionParameters. +func (in *GRPCRouteActionParameters) DeepCopy() *GRPCRouteActionParameters { + if in == nil { + return nil + } + out := new(GRPCRouteActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteInitParameters) DeepCopyInto(out *GRPCRouteInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(MatchInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteInitParameters. +func (in *GRPCRouteInitParameters) DeepCopy() *GRPCRouteInitParameters { + if in == nil { + return nil + } + out := new(GRPCRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteMatchInitParameters) DeepCopyInto(out *GRPCRouteMatchInitParameters) { + *out = *in + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make([]MetadataInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MethodName != nil { + in, out := &in.MethodName, &out.MethodName + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteMatchInitParameters. +func (in *GRPCRouteMatchInitParameters) DeepCopy() *GRPCRouteMatchInitParameters { + if in == nil { + return nil + } + out := new(GRPCRouteMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteMatchObservation) DeepCopyInto(out *GRPCRouteMatchObservation) { + *out = *in + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make([]MetadataObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MethodName != nil { + in, out := &in.MethodName, &out.MethodName + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteMatchObservation. +func (in *GRPCRouteMatchObservation) DeepCopy() *GRPCRouteMatchObservation { + if in == nil { + return nil + } + out := new(GRPCRouteMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteMatchParameters) DeepCopyInto(out *GRPCRouteMatchParameters) { + *out = *in + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make([]MetadataParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MethodName != nil { + in, out := &in.MethodName, &out.MethodName + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteMatchParameters. +func (in *GRPCRouteMatchParameters) DeepCopy() *GRPCRouteMatchParameters { + if in == nil { + return nil + } + out := new(GRPCRouteMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteObservation) DeepCopyInto(out *GRPCRouteObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(MatchObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteObservation. +func (in *GRPCRouteObservation) DeepCopy() *GRPCRouteObservation { + if in == nil { + return nil + } + out := new(GRPCRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCRouteParameters) DeepCopyInto(out *GRPCRouteParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(MatchParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCRouteParameters. +func (in *GRPCRouteParameters) DeepCopy() *GRPCRouteParameters { + if in == nil { + return nil + } + out := new(GRPCRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayRoute) DeepCopyInto(out *GatewayRoute) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayRoute. +func (in *GatewayRoute) DeepCopy() *GatewayRoute { + if in == nil { + return nil + } + out := new(GatewayRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayRoute) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayRouteInitParameters) DeepCopyInto(out *GatewayRouteInitParameters) { + *out = *in + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(SpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualGatewayName != nil { + in, out := &in.VirtualGatewayName, &out.VirtualGatewayName + *out = new(string) + **out = **in + } + if in.VirtualGatewayNameRef != nil { + in, out := &in.VirtualGatewayNameRef, &out.VirtualGatewayNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualGatewayNameSelector != nil { + in, out := &in.VirtualGatewayNameSelector, &out.VirtualGatewayNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayRouteInitParameters. +func (in *GatewayRouteInitParameters) DeepCopy() *GatewayRouteInitParameters { + if in == nil { + return nil + } + out := new(GatewayRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayRouteList) DeepCopyInto(out *GatewayRouteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GatewayRoute, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayRouteList. +func (in *GatewayRouteList) DeepCopy() *GatewayRouteList { + if in == nil { + return nil + } + out := new(GatewayRouteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayRouteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayRouteObservation) DeepCopyInto(out *GatewayRouteObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CreatedDate != nil { + in, out := &in.CreatedDate, &out.CreatedDate + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LastUpdatedDate != nil { + in, out := &in.LastUpdatedDate, &out.LastUpdatedDate + *out = new(string) + **out = **in + } + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceOwner != nil { + in, out := &in.ResourceOwner, &out.ResourceOwner + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(SpecObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualGatewayName != nil { + in, out := &in.VirtualGatewayName, &out.VirtualGatewayName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayRouteObservation. +func (in *GatewayRouteObservation) DeepCopy() *GatewayRouteObservation { + if in == nil { + return nil + } + out := new(GatewayRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayRouteParameters) DeepCopyInto(out *GatewayRouteParameters) { + *out = *in + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(SpecParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualGatewayName != nil { + in, out := &in.VirtualGatewayName, &out.VirtualGatewayName + *out = new(string) + **out = **in + } + if in.VirtualGatewayNameRef != nil { + in, out := &in.VirtualGatewayNameRef, &out.VirtualGatewayNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualGatewayNameSelector != nil { + in, out := &in.VirtualGatewayNameSelector, &out.VirtualGatewayNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayRouteParameters. +func (in *GatewayRouteParameters) DeepCopy() *GatewayRouteParameters { + if in == nil { + return nil + } + out := new(GatewayRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayRouteSpec) DeepCopyInto(out *GatewayRouteSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayRouteSpec. +func (in *GatewayRouteSpec) DeepCopy() *GatewayRouteSpec { + if in == nil { + return nil + } + out := new(GatewayRouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayRouteStatus) DeepCopyInto(out *GatewayRouteStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayRouteStatus. +func (in *GatewayRouteStatus) DeepCopy() *GatewayRouteStatus { + if in == nil { + return nil + } + out := new(GatewayRouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPIdleInitParameters) DeepCopyInto(out *HTTPIdleInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIdleInitParameters. +func (in *HTTPIdleInitParameters) DeepCopy() *HTTPIdleInitParameters { + if in == nil { + return nil + } + out := new(HTTPIdleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPIdleObservation) DeepCopyInto(out *HTTPIdleObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIdleObservation. +func (in *HTTPIdleObservation) DeepCopy() *HTTPIdleObservation { + if in == nil { + return nil + } + out := new(HTTPIdleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPIdleParameters) DeepCopyInto(out *HTTPIdleParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIdleParameters. +func (in *HTTPIdleParameters) DeepCopy() *HTTPIdleParameters { + if in == nil { + return nil + } + out := new(HTTPIdleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPInitParameters) DeepCopyInto(out *HTTPInitParameters) { + *out = *in + if in.MaxConnections != nil { + in, out := &in.MaxConnections, &out.MaxConnections + *out = new(float64) + **out = **in + } + if in.MaxPendingRequests != nil { + in, out := &in.MaxPendingRequests, &out.MaxPendingRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPInitParameters. +func (in *HTTPInitParameters) DeepCopy() *HTTPInitParameters { + if in == nil { + return nil + } + out := new(HTTPInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPObservation) DeepCopyInto(out *HTTPObservation) { + *out = *in + if in.MaxConnections != nil { + in, out := &in.MaxConnections, &out.MaxConnections + *out = new(float64) + **out = **in + } + if in.MaxPendingRequests != nil { + in, out := &in.MaxPendingRequests, &out.MaxPendingRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPObservation. +func (in *HTTPObservation) DeepCopy() *HTTPObservation { + if in == nil { + return nil + } + out := new(HTTPObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPParameters) DeepCopyInto(out *HTTPParameters) { + *out = *in + if in.MaxConnections != nil { + in, out := &in.MaxConnections, &out.MaxConnections + *out = new(float64) + **out = **in + } + if in.MaxPendingRequests != nil { + in, out := &in.MaxPendingRequests, &out.MaxPendingRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPParameters. +func (in *HTTPParameters) DeepCopy() *HTTPParameters { + if in == nil { + return nil + } + out := new(HTTPParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPPerRequestInitParameters) DeepCopyInto(out *HTTPPerRequestInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPPerRequestInitParameters. +func (in *HTTPPerRequestInitParameters) DeepCopy() *HTTPPerRequestInitParameters { + if in == nil { + return nil + } + out := new(HTTPPerRequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPPerRequestObservation) DeepCopyInto(out *HTTPPerRequestObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPPerRequestObservation. +func (in *HTTPPerRequestObservation) DeepCopy() *HTTPPerRequestObservation { + if in == nil { + return nil + } + out := new(HTTPPerRequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPPerRequestParameters) DeepCopyInto(out *HTTPPerRequestParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPPerRequestParameters. +func (in *HTTPPerRequestParameters) DeepCopy() *HTTPPerRequestParameters { + if in == nil { + return nil + } + out := new(HTTPPerRequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteActionInitParameters) DeepCopyInto(out *HTTPRouteActionInitParameters) { + *out = *in + if in.Rewrite != nil { + in, out := &in.Rewrite, &out.Rewrite + *out = new(ActionRewriteInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(HTTPRouteActionTargetInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteActionInitParameters. +func (in *HTTPRouteActionInitParameters) DeepCopy() *HTTPRouteActionInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteActionObservation) DeepCopyInto(out *HTTPRouteActionObservation) { + *out = *in + if in.Rewrite != nil { + in, out := &in.Rewrite, &out.Rewrite + *out = new(ActionRewriteObservation) + (*in).DeepCopyInto(*out) + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(HTTPRouteActionTargetObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteActionObservation. +func (in *HTTPRouteActionObservation) DeepCopy() *HTTPRouteActionObservation { + if in == nil { + return nil + } + out := new(HTTPRouteActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteActionParameters) DeepCopyInto(out *HTTPRouteActionParameters) { + *out = *in + if in.Rewrite != nil { + in, out := &in.Rewrite, &out.Rewrite + *out = new(ActionRewriteParameters) + (*in).DeepCopyInto(*out) + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(HTTPRouteActionTargetParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteActionParameters. +func (in *HTTPRouteActionParameters) DeepCopy() *HTTPRouteActionParameters { + if in == nil { + return nil + } + out := new(HTTPRouteActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteActionTargetInitParameters) DeepCopyInto(out *HTTPRouteActionTargetInitParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualService != nil { + in, out := &in.VirtualService, &out.VirtualService + *out = new(ActionTargetVirtualServiceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteActionTargetInitParameters. +func (in *HTTPRouteActionTargetInitParameters) DeepCopy() *HTTPRouteActionTargetInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteActionTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteActionTargetObservation) DeepCopyInto(out *HTTPRouteActionTargetObservation) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualService != nil { + in, out := &in.VirtualService, &out.VirtualService + *out = new(ActionTargetVirtualServiceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteActionTargetObservation. +func (in *HTTPRouteActionTargetObservation) DeepCopy() *HTTPRouteActionTargetObservation { + if in == nil { + return nil + } + out := new(HTTPRouteActionTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteActionTargetParameters) DeepCopyInto(out *HTTPRouteActionTargetParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualService != nil { + in, out := &in.VirtualService, &out.VirtualService + *out = new(ActionTargetVirtualServiceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteActionTargetParameters. +func (in *HTTPRouteActionTargetParameters) DeepCopy() *HTTPRouteActionTargetParameters { + if in == nil { + return nil + } + out := new(HTTPRouteActionTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteActionWeightedTargetInitParameters) DeepCopyInto(out *HTTPRouteActionWeightedTargetInitParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualNode != nil { + in, out := &in.VirtualNode, &out.VirtualNode + *out = new(string) + **out = **in + } + if in.VirtualNodeRef != nil { + in, out := &in.VirtualNodeRef, &out.VirtualNodeRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNodeSelector != nil { + in, out := &in.VirtualNodeSelector, &out.VirtualNodeSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteActionWeightedTargetInitParameters. +func (in *HTTPRouteActionWeightedTargetInitParameters) DeepCopy() *HTTPRouteActionWeightedTargetInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteActionWeightedTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteActionWeightedTargetObservation) DeepCopyInto(out *HTTPRouteActionWeightedTargetObservation) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualNode != nil { + in, out := &in.VirtualNode, &out.VirtualNode + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteActionWeightedTargetObservation. +func (in *HTTPRouteActionWeightedTargetObservation) DeepCopy() *HTTPRouteActionWeightedTargetObservation { + if in == nil { + return nil + } + out := new(HTTPRouteActionWeightedTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteActionWeightedTargetParameters) DeepCopyInto(out *HTTPRouteActionWeightedTargetParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualNode != nil { + in, out := &in.VirtualNode, &out.VirtualNode + *out = new(string) + **out = **in + } + if in.VirtualNodeRef != nil { + in, out := &in.VirtualNodeRef, &out.VirtualNodeRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNodeSelector != nil { + in, out := &in.VirtualNodeSelector, &out.VirtualNodeSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteActionWeightedTargetParameters. +func (in *HTTPRouteActionWeightedTargetParameters) DeepCopy() *HTTPRouteActionWeightedTargetParameters { + if in == nil { + return nil + } + out := new(HTTPRouteActionWeightedTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteInitParameters) DeepCopyInto(out *HTTPRouteInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(HTTPRouteActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(HTTPRouteMatchInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteInitParameters. +func (in *HTTPRouteInitParameters) DeepCopy() *HTTPRouteInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchHeaderInitParameters) DeepCopyInto(out *HTTPRouteMatchHeaderInitParameters) { + *out = *in + if in.Invert != nil { + in, out := &in.Invert, &out.Invert + *out = new(bool) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(HTTPRouteMatchHeaderMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchHeaderInitParameters. +func (in *HTTPRouteMatchHeaderInitParameters) DeepCopy() *HTTPRouteMatchHeaderInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteMatchHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchHeaderMatchInitParameters) DeepCopyInto(out *HTTPRouteMatchHeaderMatchInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(MatchHeaderMatchRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchHeaderMatchInitParameters. +func (in *HTTPRouteMatchHeaderMatchInitParameters) DeepCopy() *HTTPRouteMatchHeaderMatchInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteMatchHeaderMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchHeaderMatchObservation) DeepCopyInto(out *HTTPRouteMatchHeaderMatchObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(MatchHeaderMatchRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchHeaderMatchObservation. +func (in *HTTPRouteMatchHeaderMatchObservation) DeepCopy() *HTTPRouteMatchHeaderMatchObservation { + if in == nil { + return nil + } + out := new(HTTPRouteMatchHeaderMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchHeaderMatchParameters) DeepCopyInto(out *HTTPRouteMatchHeaderMatchParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(MatchHeaderMatchRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchHeaderMatchParameters. +func (in *HTTPRouteMatchHeaderMatchParameters) DeepCopy() *HTTPRouteMatchHeaderMatchParameters { + if in == nil { + return nil + } + out := new(HTTPRouteMatchHeaderMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchHeaderObservation) DeepCopyInto(out *HTTPRouteMatchHeaderObservation) { + *out = *in + if in.Invert != nil { + in, out := &in.Invert, &out.Invert + *out = new(bool) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(HTTPRouteMatchHeaderMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchHeaderObservation. +func (in *HTTPRouteMatchHeaderObservation) DeepCopy() *HTTPRouteMatchHeaderObservation { + if in == nil { + return nil + } + out := new(HTTPRouteMatchHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchHeaderParameters) DeepCopyInto(out *HTTPRouteMatchHeaderParameters) { + *out = *in + if in.Invert != nil { + in, out := &in.Invert, &out.Invert + *out = new(bool) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(HTTPRouteMatchHeaderMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchHeaderParameters. +func (in *HTTPRouteMatchHeaderParameters) DeepCopy() *HTTPRouteMatchHeaderParameters { + if in == nil { + return nil + } + out := new(HTTPRouteMatchHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchHostnameInitParameters) DeepCopyInto(out *HTTPRouteMatchHostnameInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchHostnameInitParameters. +func (in *HTTPRouteMatchHostnameInitParameters) DeepCopy() *HTTPRouteMatchHostnameInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteMatchHostnameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchHostnameObservation) DeepCopyInto(out *HTTPRouteMatchHostnameObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchHostnameObservation. +func (in *HTTPRouteMatchHostnameObservation) DeepCopy() *HTTPRouteMatchHostnameObservation { + if in == nil { + return nil + } + out := new(HTTPRouteMatchHostnameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchHostnameParameters) DeepCopyInto(out *HTTPRouteMatchHostnameParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchHostnameParameters. +func (in *HTTPRouteMatchHostnameParameters) DeepCopy() *HTTPRouteMatchHostnameParameters { + if in == nil { + return nil + } + out := new(HTTPRouteMatchHostnameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchInitParameters) DeepCopyInto(out *HTTPRouteMatchInitParameters) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]MatchHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(HTTPRouteMatchHostnameInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(HTTPRouteMatchPathInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.QueryParameter != nil { + in, out := &in.QueryParameter, &out.QueryParameter + *out = make([]MatchQueryParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchInitParameters. +func (in *HTTPRouteMatchInitParameters) DeepCopy() *HTTPRouteMatchInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchObservation) DeepCopyInto(out *HTTPRouteMatchObservation) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]MatchHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(HTTPRouteMatchHostnameObservation) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(HTTPRouteMatchPathObservation) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.QueryParameter != nil { + in, out := &in.QueryParameter, &out.QueryParameter + *out = make([]MatchQueryParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchObservation. +func (in *HTTPRouteMatchObservation) DeepCopy() *HTTPRouteMatchObservation { + if in == nil { + return nil + } + out := new(HTTPRouteMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchParameters) DeepCopyInto(out *HTTPRouteMatchParameters) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]MatchHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(HTTPRouteMatchHostnameParameters) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(HTTPRouteMatchPathParameters) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.QueryParameter != nil { + in, out := &in.QueryParameter, &out.QueryParameter + *out = make([]MatchQueryParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchParameters. +func (in *HTTPRouteMatchParameters) DeepCopy() *HTTPRouteMatchParameters { + if in == nil { + return nil + } + out := new(HTTPRouteMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchPathInitParameters) DeepCopyInto(out *HTTPRouteMatchPathInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchPathInitParameters. +func (in *HTTPRouteMatchPathInitParameters) DeepCopy() *HTTPRouteMatchPathInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteMatchPathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchPathObservation) DeepCopyInto(out *HTTPRouteMatchPathObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchPathObservation. +func (in *HTTPRouteMatchPathObservation) DeepCopy() *HTTPRouteMatchPathObservation { + if in == nil { + return nil + } + out := new(HTTPRouteMatchPathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchPathParameters) DeepCopyInto(out *HTTPRouteMatchPathParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchPathParameters. +func (in *HTTPRouteMatchPathParameters) DeepCopy() *HTTPRouteMatchPathParameters { + if in == nil { + return nil + } + out := new(HTTPRouteMatchPathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchQueryParameterInitParameters) DeepCopyInto(out *HTTPRouteMatchQueryParameterInitParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(HTTPRouteMatchQueryParameterMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchQueryParameterInitParameters. +func (in *HTTPRouteMatchQueryParameterInitParameters) DeepCopy() *HTTPRouteMatchQueryParameterInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteMatchQueryParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchQueryParameterMatchInitParameters) DeepCopyInto(out *HTTPRouteMatchQueryParameterMatchInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchQueryParameterMatchInitParameters. +func (in *HTTPRouteMatchQueryParameterMatchInitParameters) DeepCopy() *HTTPRouteMatchQueryParameterMatchInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteMatchQueryParameterMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchQueryParameterMatchObservation) DeepCopyInto(out *HTTPRouteMatchQueryParameterMatchObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchQueryParameterMatchObservation. +func (in *HTTPRouteMatchQueryParameterMatchObservation) DeepCopy() *HTTPRouteMatchQueryParameterMatchObservation { + if in == nil { + return nil + } + out := new(HTTPRouteMatchQueryParameterMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchQueryParameterMatchParameters) DeepCopyInto(out *HTTPRouteMatchQueryParameterMatchParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchQueryParameterMatchParameters. +func (in *HTTPRouteMatchQueryParameterMatchParameters) DeepCopy() *HTTPRouteMatchQueryParameterMatchParameters { + if in == nil { + return nil + } + out := new(HTTPRouteMatchQueryParameterMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchQueryParameterObservation) DeepCopyInto(out *HTTPRouteMatchQueryParameterObservation) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(HTTPRouteMatchQueryParameterMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchQueryParameterObservation. +func (in *HTTPRouteMatchQueryParameterObservation) DeepCopy() *HTTPRouteMatchQueryParameterObservation { + if in == nil { + return nil + } + out := new(HTTPRouteMatchQueryParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteMatchQueryParameterParameters) DeepCopyInto(out *HTTPRouteMatchQueryParameterParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(HTTPRouteMatchQueryParameterMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteMatchQueryParameterParameters. +func (in *HTTPRouteMatchQueryParameterParameters) DeepCopy() *HTTPRouteMatchQueryParameterParameters { + if in == nil { + return nil + } + out := new(HTTPRouteMatchQueryParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteObservation) DeepCopyInto(out *HTTPRouteObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(HTTPRouteActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(HTTPRouteMatchObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteObservation. +func (in *HTTPRouteObservation) DeepCopy() *HTTPRouteObservation { + if in == nil { + return nil + } + out := new(HTTPRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteParameters) DeepCopyInto(out *HTTPRouteParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(HTTPRouteActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(HTTPRouteMatchParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteParameters. +func (in *HTTPRouteParameters) DeepCopy() *HTTPRouteParameters { + if in == nil { + return nil + } + out := new(HTTPRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRetryPolicyInitParameters) DeepCopyInto(out *HTTPRouteRetryPolicyInitParameters) { + *out = *in + if in.HTTPRetryEvents != nil { + in, out := &in.HTTPRetryEvents, &out.HTTPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxRetries != nil { + in, out := &in.MaxRetries, &out.MaxRetries + *out = new(float64) + **out = **in + } + if in.PerRetryTimeout != nil { + in, out := &in.PerRetryTimeout, &out.PerRetryTimeout + *out = new(HTTPRouteRetryPolicyPerRetryTimeoutInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TCPRetryEvents != nil { + in, out := &in.TCPRetryEvents, &out.TCPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRetryPolicyInitParameters. +func (in *HTTPRouteRetryPolicyInitParameters) DeepCopy() *HTTPRouteRetryPolicyInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteRetryPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRetryPolicyObservation) DeepCopyInto(out *HTTPRouteRetryPolicyObservation) { + *out = *in + if in.HTTPRetryEvents != nil { + in, out := &in.HTTPRetryEvents, &out.HTTPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxRetries != nil { + in, out := &in.MaxRetries, &out.MaxRetries + *out = new(float64) + **out = **in + } + if in.PerRetryTimeout != nil { + in, out := &in.PerRetryTimeout, &out.PerRetryTimeout + *out = new(HTTPRouteRetryPolicyPerRetryTimeoutObservation) + (*in).DeepCopyInto(*out) + } + if in.TCPRetryEvents != nil { + in, out := &in.TCPRetryEvents, &out.TCPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRetryPolicyObservation. +func (in *HTTPRouteRetryPolicyObservation) DeepCopy() *HTTPRouteRetryPolicyObservation { + if in == nil { + return nil + } + out := new(HTTPRouteRetryPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRetryPolicyParameters) DeepCopyInto(out *HTTPRouteRetryPolicyParameters) { + *out = *in + if in.HTTPRetryEvents != nil { + in, out := &in.HTTPRetryEvents, &out.HTTPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxRetries != nil { + in, out := &in.MaxRetries, &out.MaxRetries + *out = new(float64) + **out = **in + } + if in.PerRetryTimeout != nil { + in, out := &in.PerRetryTimeout, &out.PerRetryTimeout + *out = new(HTTPRouteRetryPolicyPerRetryTimeoutParameters) + (*in).DeepCopyInto(*out) + } + if in.TCPRetryEvents != nil { + in, out := &in.TCPRetryEvents, &out.TCPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRetryPolicyParameters. +func (in *HTTPRouteRetryPolicyParameters) DeepCopy() *HTTPRouteRetryPolicyParameters { + if in == nil { + return nil + } + out := new(HTTPRouteRetryPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRetryPolicyPerRetryTimeoutInitParameters) DeepCopyInto(out *HTTPRouteRetryPolicyPerRetryTimeoutInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRetryPolicyPerRetryTimeoutInitParameters. +func (in *HTTPRouteRetryPolicyPerRetryTimeoutInitParameters) DeepCopy() *HTTPRouteRetryPolicyPerRetryTimeoutInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteRetryPolicyPerRetryTimeoutInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRetryPolicyPerRetryTimeoutObservation) DeepCopyInto(out *HTTPRouteRetryPolicyPerRetryTimeoutObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRetryPolicyPerRetryTimeoutObservation. +func (in *HTTPRouteRetryPolicyPerRetryTimeoutObservation) DeepCopy() *HTTPRouteRetryPolicyPerRetryTimeoutObservation { + if in == nil { + return nil + } + out := new(HTTPRouteRetryPolicyPerRetryTimeoutObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRetryPolicyPerRetryTimeoutParameters) DeepCopyInto(out *HTTPRouteRetryPolicyPerRetryTimeoutParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRetryPolicyPerRetryTimeoutParameters. +func (in *HTTPRouteRetryPolicyPerRetryTimeoutParameters) DeepCopy() *HTTPRouteRetryPolicyPerRetryTimeoutParameters { + if in == nil { + return nil + } + out := new(HTTPRouteRetryPolicyPerRetryTimeoutParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteTimeoutIdleInitParameters) DeepCopyInto(out *HTTPRouteTimeoutIdleInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteTimeoutIdleInitParameters. +func (in *HTTPRouteTimeoutIdleInitParameters) DeepCopy() *HTTPRouteTimeoutIdleInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteTimeoutIdleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteTimeoutIdleObservation) DeepCopyInto(out *HTTPRouteTimeoutIdleObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteTimeoutIdleObservation. +func (in *HTTPRouteTimeoutIdleObservation) DeepCopy() *HTTPRouteTimeoutIdleObservation { + if in == nil { + return nil + } + out := new(HTTPRouteTimeoutIdleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteTimeoutIdleParameters) DeepCopyInto(out *HTTPRouteTimeoutIdleParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteTimeoutIdleParameters. +func (in *HTTPRouteTimeoutIdleParameters) DeepCopy() *HTTPRouteTimeoutIdleParameters { + if in == nil { + return nil + } + out := new(HTTPRouteTimeoutIdleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteTimeoutInitParameters) DeepCopyInto(out *HTTPRouteTimeoutInitParameters) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(HTTPRouteTimeoutIdleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(HTTPRouteTimeoutPerRequestInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteTimeoutInitParameters. +func (in *HTTPRouteTimeoutInitParameters) DeepCopy() *HTTPRouteTimeoutInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteTimeoutInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteTimeoutObservation) DeepCopyInto(out *HTTPRouteTimeoutObservation) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(HTTPRouteTimeoutIdleObservation) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(HTTPRouteTimeoutPerRequestObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteTimeoutObservation. +func (in *HTTPRouteTimeoutObservation) DeepCopy() *HTTPRouteTimeoutObservation { + if in == nil { + return nil + } + out := new(HTTPRouteTimeoutObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteTimeoutParameters) DeepCopyInto(out *HTTPRouteTimeoutParameters) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(HTTPRouteTimeoutIdleParameters) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(HTTPRouteTimeoutPerRequestParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteTimeoutParameters. +func (in *HTTPRouteTimeoutParameters) DeepCopy() *HTTPRouteTimeoutParameters { + if in == nil { + return nil + } + out := new(HTTPRouteTimeoutParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteTimeoutPerRequestInitParameters) DeepCopyInto(out *HTTPRouteTimeoutPerRequestInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteTimeoutPerRequestInitParameters. +func (in *HTTPRouteTimeoutPerRequestInitParameters) DeepCopy() *HTTPRouteTimeoutPerRequestInitParameters { + if in == nil { + return nil + } + out := new(HTTPRouteTimeoutPerRequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteTimeoutPerRequestObservation) DeepCopyInto(out *HTTPRouteTimeoutPerRequestObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteTimeoutPerRequestObservation. +func (in *HTTPRouteTimeoutPerRequestObservation) DeepCopy() *HTTPRouteTimeoutPerRequestObservation { + if in == nil { + return nil + } + out := new(HTTPRouteTimeoutPerRequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteTimeoutPerRequestParameters) DeepCopyInto(out *HTTPRouteTimeoutPerRequestParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteTimeoutPerRequestParameters. +func (in *HTTPRouteTimeoutPerRequestParameters) DeepCopy() *HTTPRouteTimeoutPerRequestParameters { + if in == nil { + return nil + } + out := new(HTTPRouteTimeoutPerRequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderInitParameters) DeepCopyInto(out *HeaderInitParameters) { + *out = *in + if in.Invert != nil { + in, out := &in.Invert, &out.Invert + *out = new(bool) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(HeaderMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderInitParameters. +func (in *HeaderInitParameters) DeepCopy() *HeaderInitParameters { + if in == nil { + return nil + } + out := new(HeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderMatchInitParameters) DeepCopyInto(out *HeaderMatchInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(RangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderMatchInitParameters. +func (in *HeaderMatchInitParameters) DeepCopy() *HeaderMatchInitParameters { + if in == nil { + return nil + } + out := new(HeaderMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderMatchObservation) DeepCopyInto(out *HeaderMatchObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(RangeObservation) + (*in).DeepCopyInto(*out) + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderMatchObservation. +func (in *HeaderMatchObservation) DeepCopy() *HeaderMatchObservation { + if in == nil { + return nil + } + out := new(HeaderMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderMatchParameters) DeepCopyInto(out *HeaderMatchParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(RangeParameters) + (*in).DeepCopyInto(*out) + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderMatchParameters. +func (in *HeaderMatchParameters) DeepCopy() *HeaderMatchParameters { + if in == nil { + return nil + } + out := new(HeaderMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderMatchRangeInitParameters) DeepCopyInto(out *HeaderMatchRangeInitParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderMatchRangeInitParameters. +func (in *HeaderMatchRangeInitParameters) DeepCopy() *HeaderMatchRangeInitParameters { + if in == nil { + return nil + } + out := new(HeaderMatchRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderMatchRangeObservation) DeepCopyInto(out *HeaderMatchRangeObservation) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderMatchRangeObservation. +func (in *HeaderMatchRangeObservation) DeepCopy() *HeaderMatchRangeObservation { + if in == nil { + return nil + } + out := new(HeaderMatchRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderMatchRangeParameters) DeepCopyInto(out *HeaderMatchRangeParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderMatchRangeParameters. +func (in *HeaderMatchRangeParameters) DeepCopy() *HeaderMatchRangeParameters { + if in == nil { + return nil + } + out := new(HeaderMatchRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderObservation) DeepCopyInto(out *HeaderObservation) { + *out = *in + if in.Invert != nil { + in, out := &in.Invert, &out.Invert + *out = new(bool) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(HeaderMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderObservation. +func (in *HeaderObservation) DeepCopy() *HeaderObservation { + if in == nil { + return nil + } + out := new(HeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderParameters) DeepCopyInto(out *HeaderParameters) { + *out = *in + if in.Invert != nil { + in, out := &in.Invert, &out.Invert + *out = new(bool) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(HeaderMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderParameters. +func (in *HeaderParameters) DeepCopy() *HeaderParameters { + if in == nil { + return nil + } + out := new(HeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckInitParameters) DeepCopyInto(out *HealthCheckInitParameters) { + *out = *in + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.IntervalMillis != nil { + in, out := &in.IntervalMillis, &out.IntervalMillis + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.TimeoutMillis != nil { + in, out := &in.TimeoutMillis, &out.TimeoutMillis + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckInitParameters. +func (in *HealthCheckInitParameters) DeepCopy() *HealthCheckInitParameters { + if in == nil { + return nil + } + out := new(HealthCheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckObservation) DeepCopyInto(out *HealthCheckObservation) { + *out = *in + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.IntervalMillis != nil { + in, out := &in.IntervalMillis, &out.IntervalMillis + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.TimeoutMillis != nil { + in, out := &in.TimeoutMillis, &out.TimeoutMillis + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckObservation. +func (in *HealthCheckObservation) DeepCopy() *HealthCheckObservation { + if in == nil { + return nil + } + out := new(HealthCheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckParameters) DeepCopyInto(out *HealthCheckParameters) { + *out = *in + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.IntervalMillis != nil { + in, out := &in.IntervalMillis, &out.IntervalMillis + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.TimeoutMillis != nil { + in, out := &in.TimeoutMillis, &out.TimeoutMillis + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckParameters. +func (in *HealthCheckParameters) DeepCopy() *HealthCheckParameters { + if in == nil { + return nil + } + out := new(HealthCheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostnameInitParameters) DeepCopyInto(out *HostnameInitParameters) { + *out = *in + if in.DefaultTargetHostname != nil { + in, out := &in.DefaultTargetHostname, &out.DefaultTargetHostname + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostnameInitParameters. +func (in *HostnameInitParameters) DeepCopy() *HostnameInitParameters { + if in == nil { + return nil + } + out := new(HostnameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostnameObservation) DeepCopyInto(out *HostnameObservation) { + *out = *in + if in.DefaultTargetHostname != nil { + in, out := &in.DefaultTargetHostname, &out.DefaultTargetHostname + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostnameObservation. +func (in *HostnameObservation) DeepCopy() *HostnameObservation { + if in == nil { + return nil + } + out := new(HostnameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostnameParameters) DeepCopyInto(out *HostnameParameters) { + *out = *in + if in.DefaultTargetHostname != nil { + in, out := &in.DefaultTargetHostname, &out.DefaultTargetHostname + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostnameParameters. +func (in *HostnameParameters) DeepCopy() *HostnameParameters { + if in == nil { + return nil + } + out := new(HostnameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2IdleInitParameters) DeepCopyInto(out *Http2IdleInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2IdleInitParameters. +func (in *Http2IdleInitParameters) DeepCopy() *Http2IdleInitParameters { + if in == nil { + return nil + } + out := new(Http2IdleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2IdleObservation) DeepCopyInto(out *Http2IdleObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2IdleObservation. +func (in *Http2IdleObservation) DeepCopy() *Http2IdleObservation { + if in == nil { + return nil + } + out := new(Http2IdleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2IdleParameters) DeepCopyInto(out *Http2IdleParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2IdleParameters. +func (in *Http2IdleParameters) DeepCopy() *Http2IdleParameters { + if in == nil { + return nil + } + out := new(Http2IdleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2InitParameters) DeepCopyInto(out *Http2InitParameters) { + *out = *in + if in.MaxRequests != nil { + in, out := &in.MaxRequests, &out.MaxRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2InitParameters. +func (in *Http2InitParameters) DeepCopy() *Http2InitParameters { + if in == nil { + return nil + } + out := new(Http2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2Observation) DeepCopyInto(out *Http2Observation) { + *out = *in + if in.MaxRequests != nil { + in, out := &in.MaxRequests, &out.MaxRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2Observation. +func (in *Http2Observation) DeepCopy() *Http2Observation { + if in == nil { + return nil + } + out := new(Http2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2Parameters) DeepCopyInto(out *Http2Parameters) { + *out = *in + if in.MaxRequests != nil { + in, out := &in.MaxRequests, &out.MaxRequests + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2Parameters. +func (in *Http2Parameters) DeepCopy() *Http2Parameters { + if in == nil { + return nil + } + out := new(Http2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2PerRequestInitParameters) DeepCopyInto(out *Http2PerRequestInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2PerRequestInitParameters. +func (in *Http2PerRequestInitParameters) DeepCopy() *Http2PerRequestInitParameters { + if in == nil { + return nil + } + out := new(Http2PerRequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2PerRequestObservation) DeepCopyInto(out *Http2PerRequestObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2PerRequestObservation. +func (in *Http2PerRequestObservation) DeepCopy() *Http2PerRequestObservation { + if in == nil { + return nil + } + out := new(Http2PerRequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2PerRequestParameters) DeepCopyInto(out *Http2PerRequestParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2PerRequestParameters. +func (in *Http2PerRequestParameters) DeepCopy() *Http2PerRequestParameters { + if in == nil { + return nil + } + out := new(Http2PerRequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteActionInitParameters) DeepCopyInto(out *Http2RouteActionInitParameters) { + *out = *in + if in.Rewrite != nil { + in, out := &in.Rewrite, &out.Rewrite + *out = new(RewriteInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(ActionTargetInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteActionInitParameters. +func (in *Http2RouteActionInitParameters) DeepCopy() *Http2RouteActionInitParameters { + if in == nil { + return nil + } + out := new(Http2RouteActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteActionObservation) DeepCopyInto(out *Http2RouteActionObservation) { + *out = *in + if in.Rewrite != nil { + in, out := &in.Rewrite, &out.Rewrite + *out = new(RewriteObservation) + (*in).DeepCopyInto(*out) + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(ActionTargetObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteActionObservation. +func (in *Http2RouteActionObservation) DeepCopy() *Http2RouteActionObservation { + if in == nil { + return nil + } + out := new(Http2RouteActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteActionParameters) DeepCopyInto(out *Http2RouteActionParameters) { + *out = *in + if in.Rewrite != nil { + in, out := &in.Rewrite, &out.Rewrite + *out = new(RewriteParameters) + (*in).DeepCopyInto(*out) + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(ActionTargetParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteActionParameters. +func (in *Http2RouteActionParameters) DeepCopy() *Http2RouteActionParameters { + if in == nil { + return nil + } + out := new(Http2RouteActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteInitParameters) DeepCopyInto(out *Http2RouteInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(Http2RouteActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(Http2RouteMatchInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteInitParameters. +func (in *Http2RouteInitParameters) DeepCopy() *Http2RouteInitParameters { + if in == nil { + return nil + } + out := new(Http2RouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchHeaderInitParameters) DeepCopyInto(out *Http2RouteMatchHeaderInitParameters) { + *out = *in + if in.Invert != nil { + in, out := &in.Invert, &out.Invert + *out = new(bool) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(Http2RouteMatchHeaderMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchHeaderInitParameters. +func (in *Http2RouteMatchHeaderInitParameters) DeepCopy() *Http2RouteMatchHeaderInitParameters { + if in == nil { + return nil + } + out := new(Http2RouteMatchHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchHeaderMatchInitParameters) DeepCopyInto(out *Http2RouteMatchHeaderMatchInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(HeaderMatchRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchHeaderMatchInitParameters. +func (in *Http2RouteMatchHeaderMatchInitParameters) DeepCopy() *Http2RouteMatchHeaderMatchInitParameters { + if in == nil { + return nil + } + out := new(Http2RouteMatchHeaderMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchHeaderMatchObservation) DeepCopyInto(out *Http2RouteMatchHeaderMatchObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(HeaderMatchRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchHeaderMatchObservation. +func (in *Http2RouteMatchHeaderMatchObservation) DeepCopy() *Http2RouteMatchHeaderMatchObservation { + if in == nil { + return nil + } + out := new(Http2RouteMatchHeaderMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchHeaderMatchParameters) DeepCopyInto(out *Http2RouteMatchHeaderMatchParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(HeaderMatchRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchHeaderMatchParameters. +func (in *Http2RouteMatchHeaderMatchParameters) DeepCopy() *Http2RouteMatchHeaderMatchParameters { + if in == nil { + return nil + } + out := new(Http2RouteMatchHeaderMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchHeaderObservation) DeepCopyInto(out *Http2RouteMatchHeaderObservation) { + *out = *in + if in.Invert != nil { + in, out := &in.Invert, &out.Invert + *out = new(bool) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(Http2RouteMatchHeaderMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchHeaderObservation. +func (in *Http2RouteMatchHeaderObservation) DeepCopy() *Http2RouteMatchHeaderObservation { + if in == nil { + return nil + } + out := new(Http2RouteMatchHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchHeaderParameters) DeepCopyInto(out *Http2RouteMatchHeaderParameters) { + *out = *in + if in.Invert != nil { + in, out := &in.Invert, &out.Invert + *out = new(bool) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(Http2RouteMatchHeaderMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchHeaderParameters. +func (in *Http2RouteMatchHeaderParameters) DeepCopy() *Http2RouteMatchHeaderParameters { + if in == nil { + return nil + } + out := new(Http2RouteMatchHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchInitParameters) DeepCopyInto(out *Http2RouteMatchInitParameters) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(MatchHostnameInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(MatchPathInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.QueryParameter != nil { + in, out := &in.QueryParameter, &out.QueryParameter + *out = make([]QueryParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchInitParameters. +func (in *Http2RouteMatchInitParameters) DeepCopy() *Http2RouteMatchInitParameters { + if in == nil { + return nil + } + out := new(Http2RouteMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchObservation) DeepCopyInto(out *Http2RouteMatchObservation) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(MatchHostnameObservation) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(MatchPathObservation) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.QueryParameter != nil { + in, out := &in.QueryParameter, &out.QueryParameter + *out = make([]QueryParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchObservation. +func (in *Http2RouteMatchObservation) DeepCopy() *Http2RouteMatchObservation { + if in == nil { + return nil + } + out := new(Http2RouteMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchParameters) DeepCopyInto(out *Http2RouteMatchParameters) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(MatchHostnameParameters) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(MatchPathParameters) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.QueryParameter != nil { + in, out := &in.QueryParameter, &out.QueryParameter + *out = make([]QueryParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchParameters. +func (in *Http2RouteMatchParameters) DeepCopy() *Http2RouteMatchParameters { + if in == nil { + return nil + } + out := new(Http2RouteMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchPathInitParameters) DeepCopyInto(out *Http2RouteMatchPathInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchPathInitParameters. +func (in *Http2RouteMatchPathInitParameters) DeepCopy() *Http2RouteMatchPathInitParameters { + if in == nil { + return nil + } + out := new(Http2RouteMatchPathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchPathObservation) DeepCopyInto(out *Http2RouteMatchPathObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchPathObservation. +func (in *Http2RouteMatchPathObservation) DeepCopy() *Http2RouteMatchPathObservation { + if in == nil { + return nil + } + out := new(Http2RouteMatchPathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchPathParameters) DeepCopyInto(out *Http2RouteMatchPathParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchPathParameters. +func (in *Http2RouteMatchPathParameters) DeepCopy() *Http2RouteMatchPathParameters { + if in == nil { + return nil + } + out := new(Http2RouteMatchPathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchQueryParameterInitParameters) DeepCopyInto(out *Http2RouteMatchQueryParameterInitParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(Http2RouteMatchQueryParameterMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchQueryParameterInitParameters. +func (in *Http2RouteMatchQueryParameterInitParameters) DeepCopy() *Http2RouteMatchQueryParameterInitParameters { + if in == nil { + return nil + } + out := new(Http2RouteMatchQueryParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchQueryParameterMatchInitParameters) DeepCopyInto(out *Http2RouteMatchQueryParameterMatchInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchQueryParameterMatchInitParameters. +func (in *Http2RouteMatchQueryParameterMatchInitParameters) DeepCopy() *Http2RouteMatchQueryParameterMatchInitParameters { + if in == nil { + return nil + } + out := new(Http2RouteMatchQueryParameterMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchQueryParameterMatchObservation) DeepCopyInto(out *Http2RouteMatchQueryParameterMatchObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchQueryParameterMatchObservation. +func (in *Http2RouteMatchQueryParameterMatchObservation) DeepCopy() *Http2RouteMatchQueryParameterMatchObservation { + if in == nil { + return nil + } + out := new(Http2RouteMatchQueryParameterMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchQueryParameterMatchParameters) DeepCopyInto(out *Http2RouteMatchQueryParameterMatchParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchQueryParameterMatchParameters. +func (in *Http2RouteMatchQueryParameterMatchParameters) DeepCopy() *Http2RouteMatchQueryParameterMatchParameters { + if in == nil { + return nil + } + out := new(Http2RouteMatchQueryParameterMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchQueryParameterObservation) DeepCopyInto(out *Http2RouteMatchQueryParameterObservation) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(Http2RouteMatchQueryParameterMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchQueryParameterObservation. +func (in *Http2RouteMatchQueryParameterObservation) DeepCopy() *Http2RouteMatchQueryParameterObservation { + if in == nil { + return nil + } + out := new(Http2RouteMatchQueryParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteMatchQueryParameterParameters) DeepCopyInto(out *Http2RouteMatchQueryParameterParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(Http2RouteMatchQueryParameterMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteMatchQueryParameterParameters. +func (in *Http2RouteMatchQueryParameterParameters) DeepCopy() *Http2RouteMatchQueryParameterParameters { + if in == nil { + return nil + } + out := new(Http2RouteMatchQueryParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteObservation) DeepCopyInto(out *Http2RouteObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(Http2RouteActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(Http2RouteMatchObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteObservation. +func (in *Http2RouteObservation) DeepCopy() *Http2RouteObservation { + if in == nil { + return nil + } + out := new(Http2RouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteParameters) DeepCopyInto(out *Http2RouteParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(Http2RouteActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(Http2RouteMatchParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteParameters. +func (in *Http2RouteParameters) DeepCopy() *Http2RouteParameters { + if in == nil { + return nil + } + out := new(Http2RouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteRetryPolicyInitParameters) DeepCopyInto(out *Http2RouteRetryPolicyInitParameters) { + *out = *in + if in.HTTPRetryEvents != nil { + in, out := &in.HTTPRetryEvents, &out.HTTPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxRetries != nil { + in, out := &in.MaxRetries, &out.MaxRetries + *out = new(float64) + **out = **in + } + if in.PerRetryTimeout != nil { + in, out := &in.PerRetryTimeout, &out.PerRetryTimeout + *out = new(RetryPolicyPerRetryTimeoutInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TCPRetryEvents != nil { + in, out := &in.TCPRetryEvents, &out.TCPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteRetryPolicyInitParameters. +func (in *Http2RouteRetryPolicyInitParameters) DeepCopy() *Http2RouteRetryPolicyInitParameters { + if in == nil { + return nil + } + out := new(Http2RouteRetryPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteRetryPolicyObservation) DeepCopyInto(out *Http2RouteRetryPolicyObservation) { + *out = *in + if in.HTTPRetryEvents != nil { + in, out := &in.HTTPRetryEvents, &out.HTTPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxRetries != nil { + in, out := &in.MaxRetries, &out.MaxRetries + *out = new(float64) + **out = **in + } + if in.PerRetryTimeout != nil { + in, out := &in.PerRetryTimeout, &out.PerRetryTimeout + *out = new(RetryPolicyPerRetryTimeoutObservation) + (*in).DeepCopyInto(*out) + } + if in.TCPRetryEvents != nil { + in, out := &in.TCPRetryEvents, &out.TCPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteRetryPolicyObservation. +func (in *Http2RouteRetryPolicyObservation) DeepCopy() *Http2RouteRetryPolicyObservation { + if in == nil { + return nil + } + out := new(Http2RouteRetryPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteRetryPolicyParameters) DeepCopyInto(out *Http2RouteRetryPolicyParameters) { + *out = *in + if in.HTTPRetryEvents != nil { + in, out := &in.HTTPRetryEvents, &out.HTTPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxRetries != nil { + in, out := &in.MaxRetries, &out.MaxRetries + *out = new(float64) + **out = **in + } + if in.PerRetryTimeout != nil { + in, out := &in.PerRetryTimeout, &out.PerRetryTimeout + *out = new(RetryPolicyPerRetryTimeoutParameters) + (*in).DeepCopyInto(*out) + } + if in.TCPRetryEvents != nil { + in, out := &in.TCPRetryEvents, &out.TCPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteRetryPolicyParameters. +func (in *Http2RouteRetryPolicyParameters) DeepCopy() *Http2RouteRetryPolicyParameters { + if in == nil { + return nil + } + out := new(Http2RouteRetryPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteTimeoutInitParameters) DeepCopyInto(out *Http2RouteTimeoutInitParameters) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(TimeoutIdleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(TimeoutPerRequestInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteTimeoutInitParameters. +func (in *Http2RouteTimeoutInitParameters) DeepCopy() *Http2RouteTimeoutInitParameters { + if in == nil { + return nil + } + out := new(Http2RouteTimeoutInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteTimeoutObservation) DeepCopyInto(out *Http2RouteTimeoutObservation) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(TimeoutIdleObservation) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(TimeoutPerRequestObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteTimeoutObservation. +func (in *Http2RouteTimeoutObservation) DeepCopy() *Http2RouteTimeoutObservation { + if in == nil { + return nil + } + out := new(Http2RouteTimeoutObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Http2RouteTimeoutParameters) DeepCopyInto(out *Http2RouteTimeoutParameters) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(TimeoutIdleParameters) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(TimeoutPerRequestParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Http2RouteTimeoutParameters. +func (in *Http2RouteTimeoutParameters) DeepCopy() *Http2RouteTimeoutParameters { + if in == nil { + return nil + } + out := new(Http2RouteTimeoutParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdleInitParameters) DeepCopyInto(out *IdleInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdleInitParameters. +func (in *IdleInitParameters) DeepCopy() *IdleInitParameters { + if in == nil { + return nil + } + out := new(IdleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdleObservation) DeepCopyInto(out *IdleObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdleObservation. +func (in *IdleObservation) DeepCopy() *IdleObservation { + if in == nil { + return nil + } + out := new(IdleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdleParameters) DeepCopyInto(out *IdleParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdleParameters. +func (in *IdleParameters) DeepCopy() *IdleParameters { + if in == nil { + return nil + } + out := new(IdleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntervalInitParameters) DeepCopyInto(out *IntervalInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntervalInitParameters. +func (in *IntervalInitParameters) DeepCopy() *IntervalInitParameters { + if in == nil { + return nil + } + out := new(IntervalInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntervalObservation) DeepCopyInto(out *IntervalObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntervalObservation. +func (in *IntervalObservation) DeepCopy() *IntervalObservation { + if in == nil { + return nil + } + out := new(IntervalObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntervalParameters) DeepCopyInto(out *IntervalParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntervalParameters. +func (in *IntervalParameters) DeepCopy() *IntervalParameters { + if in == nil { + return nil + } + out := new(IntervalParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONInitParameters) DeepCopyInto(out *JSONInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONInitParameters. +func (in *JSONInitParameters) DeepCopy() *JSONInitParameters { + if in == nil { + return nil + } + out := new(JSONInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONObservation) DeepCopyInto(out *JSONObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONObservation. +func (in *JSONObservation) DeepCopy() *JSONObservation { + if in == nil { + return nil + } + out := new(JSONObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONParameters) DeepCopyInto(out *JSONParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONParameters. +func (in *JSONParameters) DeepCopy() *JSONParameters { + if in == nil { + return nil + } + out := new(JSONParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerConnectionPoolInitParameters) DeepCopyInto(out *ListenerConnectionPoolInitParameters) { + *out = *in + if in.GRPC != nil { + in, out := &in.GRPC, &out.GRPC + *out = new(ConnectionPoolGRPCInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = make([]ConnectionPoolHTTPInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = make([]ConnectionPoolHttp2InitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TCP != nil { + in, out := &in.TCP, &out.TCP + *out = make([]TCPInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerConnectionPoolInitParameters. +func (in *ListenerConnectionPoolInitParameters) DeepCopy() *ListenerConnectionPoolInitParameters { + if in == nil { + return nil + } + out := new(ListenerConnectionPoolInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerConnectionPoolObservation) DeepCopyInto(out *ListenerConnectionPoolObservation) { + *out = *in + if in.GRPC != nil { + in, out := &in.GRPC, &out.GRPC + *out = new(ConnectionPoolGRPCObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = make([]ConnectionPoolHTTPObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = make([]ConnectionPoolHttp2Observation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TCP != nil { + in, out := &in.TCP, &out.TCP + *out = make([]TCPObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerConnectionPoolObservation. +func (in *ListenerConnectionPoolObservation) DeepCopy() *ListenerConnectionPoolObservation { + if in == nil { + return nil + } + out := new(ListenerConnectionPoolObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerConnectionPoolParameters) DeepCopyInto(out *ListenerConnectionPoolParameters) { + *out = *in + if in.GRPC != nil { + in, out := &in.GRPC, &out.GRPC + *out = new(ConnectionPoolGRPCParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = make([]ConnectionPoolHTTPParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = make([]ConnectionPoolHttp2Parameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TCP != nil { + in, out := &in.TCP, &out.TCP + *out = make([]TCPParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerConnectionPoolParameters. +func (in *ListenerConnectionPoolParameters) DeepCopy() *ListenerConnectionPoolParameters { + if in == nil { + return nil + } + out := new(ListenerConnectionPoolParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerHealthCheckInitParameters) DeepCopyInto(out *ListenerHealthCheckInitParameters) { + *out = *in + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.IntervalMillis != nil { + in, out := &in.IntervalMillis, &out.IntervalMillis + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.TimeoutMillis != nil { + in, out := &in.TimeoutMillis, &out.TimeoutMillis + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerHealthCheckInitParameters. +func (in *ListenerHealthCheckInitParameters) DeepCopy() *ListenerHealthCheckInitParameters { + if in == nil { + return nil + } + out := new(ListenerHealthCheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerHealthCheckObservation) DeepCopyInto(out *ListenerHealthCheckObservation) { + *out = *in + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.IntervalMillis != nil { + in, out := &in.IntervalMillis, &out.IntervalMillis + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.TimeoutMillis != nil { + in, out := &in.TimeoutMillis, &out.TimeoutMillis + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerHealthCheckObservation. +func (in *ListenerHealthCheckObservation) DeepCopy() *ListenerHealthCheckObservation { + if in == nil { + return nil + } + out := new(ListenerHealthCheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerHealthCheckParameters) DeepCopyInto(out *ListenerHealthCheckParameters) { + *out = *in + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.IntervalMillis != nil { + in, out := &in.IntervalMillis, &out.IntervalMillis + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.TimeoutMillis != nil { + in, out := &in.TimeoutMillis, &out.TimeoutMillis + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerHealthCheckParameters. +func (in *ListenerHealthCheckParameters) DeepCopy() *ListenerHealthCheckParameters { + if in == nil { + return nil + } + out := new(ListenerHealthCheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerInitParameters) DeepCopyInto(out *ListenerInitParameters) { + *out = *in + if in.ConnectionPool != nil { + in, out := &in.ConnectionPool, &out.ConnectionPool + *out = new(ConnectionPoolInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(HealthCheckInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PortMapping != nil { + in, out := &in.PortMapping, &out.PortMapping + *out = new(PortMappingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(ListenerTLSInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerInitParameters. +func (in *ListenerInitParameters) DeepCopy() *ListenerInitParameters { + if in == nil { + return nil + } + out := new(ListenerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerObservation) DeepCopyInto(out *ListenerObservation) { + *out = *in + if in.ConnectionPool != nil { + in, out := &in.ConnectionPool, &out.ConnectionPool + *out = new(ConnectionPoolObservation) + (*in).DeepCopyInto(*out) + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(HealthCheckObservation) + (*in).DeepCopyInto(*out) + } + if in.PortMapping != nil { + in, out := &in.PortMapping, &out.PortMapping + *out = new(PortMappingObservation) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(ListenerTLSObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerObservation. +func (in *ListenerObservation) DeepCopy() *ListenerObservation { + if in == nil { + return nil + } + out := new(ListenerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerParameters) DeepCopyInto(out *ListenerParameters) { + *out = *in + if in.ConnectionPool != nil { + in, out := &in.ConnectionPool, &out.ConnectionPool + *out = new(ConnectionPoolParameters) + (*in).DeepCopyInto(*out) + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(HealthCheckParameters) + (*in).DeepCopyInto(*out) + } + if in.PortMapping != nil { + in, out := &in.PortMapping, &out.PortMapping + *out = new(PortMappingParameters) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(ListenerTLSParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerParameters. +func (in *ListenerParameters) DeepCopy() *ListenerParameters { + if in == nil { + return nil + } + out := new(ListenerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerPortMappingInitParameters) DeepCopyInto(out *ListenerPortMappingInitParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerPortMappingInitParameters. +func (in *ListenerPortMappingInitParameters) DeepCopy() *ListenerPortMappingInitParameters { + if in == nil { + return nil + } + out := new(ListenerPortMappingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerPortMappingObservation) DeepCopyInto(out *ListenerPortMappingObservation) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerPortMappingObservation. +func (in *ListenerPortMappingObservation) DeepCopy() *ListenerPortMappingObservation { + if in == nil { + return nil + } + out := new(ListenerPortMappingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerPortMappingParameters) DeepCopyInto(out *ListenerPortMappingParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerPortMappingParameters. +func (in *ListenerPortMappingParameters) DeepCopy() *ListenerPortMappingParameters { + if in == nil { + return nil + } + out := new(ListenerPortMappingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSCertificateFileInitParameters) DeepCopyInto(out *ListenerTLSCertificateFileInitParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.PrivateKey != nil { + in, out := &in.PrivateKey, &out.PrivateKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSCertificateFileInitParameters. +func (in *ListenerTLSCertificateFileInitParameters) DeepCopy() *ListenerTLSCertificateFileInitParameters { + if in == nil { + return nil + } + out := new(ListenerTLSCertificateFileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSCertificateFileObservation) DeepCopyInto(out *ListenerTLSCertificateFileObservation) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.PrivateKey != nil { + in, out := &in.PrivateKey, &out.PrivateKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSCertificateFileObservation. +func (in *ListenerTLSCertificateFileObservation) DeepCopy() *ListenerTLSCertificateFileObservation { + if in == nil { + return nil + } + out := new(ListenerTLSCertificateFileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSCertificateFileParameters) DeepCopyInto(out *ListenerTLSCertificateFileParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.PrivateKey != nil { + in, out := &in.PrivateKey, &out.PrivateKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSCertificateFileParameters. +func (in *ListenerTLSCertificateFileParameters) DeepCopy() *ListenerTLSCertificateFileParameters { + if in == nil { + return nil + } + out := new(ListenerTLSCertificateFileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSCertificateInitParameters) DeepCopyInto(out *ListenerTLSCertificateInitParameters) { + *out = *in + if in.Acm != nil { + in, out := &in.Acm, &out.Acm + *out = new(TLSCertificateAcmInitParameters) + (*in).DeepCopyInto(*out) + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(ListenerTLSCertificateFileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(ListenerTLSCertificateSdsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSCertificateInitParameters. +func (in *ListenerTLSCertificateInitParameters) DeepCopy() *ListenerTLSCertificateInitParameters { + if in == nil { + return nil + } + out := new(ListenerTLSCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSCertificateObservation) DeepCopyInto(out *ListenerTLSCertificateObservation) { + *out = *in + if in.Acm != nil { + in, out := &in.Acm, &out.Acm + *out = new(TLSCertificateAcmObservation) + (*in).DeepCopyInto(*out) + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(ListenerTLSCertificateFileObservation) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(ListenerTLSCertificateSdsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSCertificateObservation. +func (in *ListenerTLSCertificateObservation) DeepCopy() *ListenerTLSCertificateObservation { + if in == nil { + return nil + } + out := new(ListenerTLSCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSCertificateParameters) DeepCopyInto(out *ListenerTLSCertificateParameters) { + *out = *in + if in.Acm != nil { + in, out := &in.Acm, &out.Acm + *out = new(TLSCertificateAcmParameters) + (*in).DeepCopyInto(*out) + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(ListenerTLSCertificateFileParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(ListenerTLSCertificateSdsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSCertificateParameters. +func (in *ListenerTLSCertificateParameters) DeepCopy() *ListenerTLSCertificateParameters { + if in == nil { + return nil + } + out := new(ListenerTLSCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSCertificateSdsInitParameters) DeepCopyInto(out *ListenerTLSCertificateSdsInitParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSCertificateSdsInitParameters. +func (in *ListenerTLSCertificateSdsInitParameters) DeepCopy() *ListenerTLSCertificateSdsInitParameters { + if in == nil { + return nil + } + out := new(ListenerTLSCertificateSdsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSCertificateSdsObservation) DeepCopyInto(out *ListenerTLSCertificateSdsObservation) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSCertificateSdsObservation. +func (in *ListenerTLSCertificateSdsObservation) DeepCopy() *ListenerTLSCertificateSdsObservation { + if in == nil { + return nil + } + out := new(ListenerTLSCertificateSdsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSCertificateSdsParameters) DeepCopyInto(out *ListenerTLSCertificateSdsParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSCertificateSdsParameters. +func (in *ListenerTLSCertificateSdsParameters) DeepCopy() *ListenerTLSCertificateSdsParameters { + if in == nil { + return nil + } + out := new(ListenerTLSCertificateSdsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSInitParameters) DeepCopyInto(out *ListenerTLSInitParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(TLSCertificateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(TLSValidationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSInitParameters. +func (in *ListenerTLSInitParameters) DeepCopy() *ListenerTLSInitParameters { + if in == nil { + return nil + } + out := new(ListenerTLSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSObservation) DeepCopyInto(out *ListenerTLSObservation) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(TLSCertificateObservation) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(TLSValidationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSObservation. +func (in *ListenerTLSObservation) DeepCopy() *ListenerTLSObservation { + if in == nil { + return nil + } + out := new(ListenerTLSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSParameters) DeepCopyInto(out *ListenerTLSParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(TLSCertificateParameters) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(TLSValidationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSParameters. +func (in *ListenerTLSParameters) DeepCopy() *ListenerTLSParameters { + if in == nil { + return nil + } + out := new(ListenerTLSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationInitParameters) DeepCopyInto(out *ListenerTLSValidationInitParameters) { + *out = *in + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(ListenerTLSValidationSubjectAlternativeNamesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Trust != nil { + in, out := &in.Trust, &out.Trust + *out = new(ListenerTLSValidationTrustInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationInitParameters. +func (in *ListenerTLSValidationInitParameters) DeepCopy() *ListenerTLSValidationInitParameters { + if in == nil { + return nil + } + out := new(ListenerTLSValidationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationObservation) DeepCopyInto(out *ListenerTLSValidationObservation) { + *out = *in + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(ListenerTLSValidationSubjectAlternativeNamesObservation) + (*in).DeepCopyInto(*out) + } + if in.Trust != nil { + in, out := &in.Trust, &out.Trust + *out = new(ListenerTLSValidationTrustObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationObservation. +func (in *ListenerTLSValidationObservation) DeepCopy() *ListenerTLSValidationObservation { + if in == nil { + return nil + } + out := new(ListenerTLSValidationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationParameters) DeepCopyInto(out *ListenerTLSValidationParameters) { + *out = *in + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(ListenerTLSValidationSubjectAlternativeNamesParameters) + (*in).DeepCopyInto(*out) + } + if in.Trust != nil { + in, out := &in.Trust, &out.Trust + *out = new(ListenerTLSValidationTrustParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationParameters. +func (in *ListenerTLSValidationParameters) DeepCopy() *ListenerTLSValidationParameters { + if in == nil { + return nil + } + out := new(ListenerTLSValidationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationSubjectAlternativeNamesInitParameters) DeepCopyInto(out *ListenerTLSValidationSubjectAlternativeNamesInitParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(ListenerTLSValidationSubjectAlternativeNamesMatchInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationSubjectAlternativeNamesInitParameters. +func (in *ListenerTLSValidationSubjectAlternativeNamesInitParameters) DeepCopy() *ListenerTLSValidationSubjectAlternativeNamesInitParameters { + if in == nil { + return nil + } + out := new(ListenerTLSValidationSubjectAlternativeNamesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationSubjectAlternativeNamesMatchInitParameters) DeepCopyInto(out *ListenerTLSValidationSubjectAlternativeNamesMatchInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationSubjectAlternativeNamesMatchInitParameters. +func (in *ListenerTLSValidationSubjectAlternativeNamesMatchInitParameters) DeepCopy() *ListenerTLSValidationSubjectAlternativeNamesMatchInitParameters { + if in == nil { + return nil + } + out := new(ListenerTLSValidationSubjectAlternativeNamesMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationSubjectAlternativeNamesMatchObservation) DeepCopyInto(out *ListenerTLSValidationSubjectAlternativeNamesMatchObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationSubjectAlternativeNamesMatchObservation. +func (in *ListenerTLSValidationSubjectAlternativeNamesMatchObservation) DeepCopy() *ListenerTLSValidationSubjectAlternativeNamesMatchObservation { + if in == nil { + return nil + } + out := new(ListenerTLSValidationSubjectAlternativeNamesMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationSubjectAlternativeNamesMatchParameters) DeepCopyInto(out *ListenerTLSValidationSubjectAlternativeNamesMatchParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationSubjectAlternativeNamesMatchParameters. +func (in *ListenerTLSValidationSubjectAlternativeNamesMatchParameters) DeepCopy() *ListenerTLSValidationSubjectAlternativeNamesMatchParameters { + if in == nil { + return nil + } + out := new(ListenerTLSValidationSubjectAlternativeNamesMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationSubjectAlternativeNamesObservation) DeepCopyInto(out *ListenerTLSValidationSubjectAlternativeNamesObservation) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(ListenerTLSValidationSubjectAlternativeNamesMatchObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationSubjectAlternativeNamesObservation. +func (in *ListenerTLSValidationSubjectAlternativeNamesObservation) DeepCopy() *ListenerTLSValidationSubjectAlternativeNamesObservation { + if in == nil { + return nil + } + out := new(ListenerTLSValidationSubjectAlternativeNamesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationSubjectAlternativeNamesParameters) DeepCopyInto(out *ListenerTLSValidationSubjectAlternativeNamesParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(ListenerTLSValidationSubjectAlternativeNamesMatchParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationSubjectAlternativeNamesParameters. +func (in *ListenerTLSValidationSubjectAlternativeNamesParameters) DeepCopy() *ListenerTLSValidationSubjectAlternativeNamesParameters { + if in == nil { + return nil + } + out := new(ListenerTLSValidationSubjectAlternativeNamesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationTrustFileInitParameters) DeepCopyInto(out *ListenerTLSValidationTrustFileInitParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationTrustFileInitParameters. +func (in *ListenerTLSValidationTrustFileInitParameters) DeepCopy() *ListenerTLSValidationTrustFileInitParameters { + if in == nil { + return nil + } + out := new(ListenerTLSValidationTrustFileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationTrustFileObservation) DeepCopyInto(out *ListenerTLSValidationTrustFileObservation) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationTrustFileObservation. +func (in *ListenerTLSValidationTrustFileObservation) DeepCopy() *ListenerTLSValidationTrustFileObservation { + if in == nil { + return nil + } + out := new(ListenerTLSValidationTrustFileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationTrustFileParameters) DeepCopyInto(out *ListenerTLSValidationTrustFileParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationTrustFileParameters. +func (in *ListenerTLSValidationTrustFileParameters) DeepCopy() *ListenerTLSValidationTrustFileParameters { + if in == nil { + return nil + } + out := new(ListenerTLSValidationTrustFileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationTrustInitParameters) DeepCopyInto(out *ListenerTLSValidationTrustInitParameters) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(ListenerTLSValidationTrustFileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(ListenerTLSValidationTrustSdsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationTrustInitParameters. +func (in *ListenerTLSValidationTrustInitParameters) DeepCopy() *ListenerTLSValidationTrustInitParameters { + if in == nil { + return nil + } + out := new(ListenerTLSValidationTrustInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationTrustObservation) DeepCopyInto(out *ListenerTLSValidationTrustObservation) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(ListenerTLSValidationTrustFileObservation) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(ListenerTLSValidationTrustSdsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationTrustObservation. +func (in *ListenerTLSValidationTrustObservation) DeepCopy() *ListenerTLSValidationTrustObservation { + if in == nil { + return nil + } + out := new(ListenerTLSValidationTrustObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationTrustParameters) DeepCopyInto(out *ListenerTLSValidationTrustParameters) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(ListenerTLSValidationTrustFileParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(ListenerTLSValidationTrustSdsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationTrustParameters. +func (in *ListenerTLSValidationTrustParameters) DeepCopy() *ListenerTLSValidationTrustParameters { + if in == nil { + return nil + } + out := new(ListenerTLSValidationTrustParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationTrustSdsInitParameters) DeepCopyInto(out *ListenerTLSValidationTrustSdsInitParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationTrustSdsInitParameters. +func (in *ListenerTLSValidationTrustSdsInitParameters) DeepCopy() *ListenerTLSValidationTrustSdsInitParameters { + if in == nil { + return nil + } + out := new(ListenerTLSValidationTrustSdsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationTrustSdsObservation) DeepCopyInto(out *ListenerTLSValidationTrustSdsObservation) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationTrustSdsObservation. +func (in *ListenerTLSValidationTrustSdsObservation) DeepCopy() *ListenerTLSValidationTrustSdsObservation { + if in == nil { + return nil + } + out := new(ListenerTLSValidationTrustSdsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSValidationTrustSdsParameters) DeepCopyInto(out *ListenerTLSValidationTrustSdsParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSValidationTrustSdsParameters. +func (in *ListenerTLSValidationTrustSdsParameters) DeepCopy() *ListenerTLSValidationTrustSdsParameters { + if in == nil { + return nil + } + out := new(ListenerTLSValidationTrustSdsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTimeoutInitParameters) DeepCopyInto(out *ListenerTimeoutInitParameters) { + *out = *in + if in.GRPC != nil { + in, out := &in.GRPC, &out.GRPC + *out = new(TimeoutGRPCInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(TimeoutHTTPInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(TimeoutHttp2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.TCP != nil { + in, out := &in.TCP, &out.TCP + *out = new(TimeoutTCPInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTimeoutInitParameters. +func (in *ListenerTimeoutInitParameters) DeepCopy() *ListenerTimeoutInitParameters { + if in == nil { + return nil + } + out := new(ListenerTimeoutInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTimeoutObservation) DeepCopyInto(out *ListenerTimeoutObservation) { + *out = *in + if in.GRPC != nil { + in, out := &in.GRPC, &out.GRPC + *out = new(TimeoutGRPCObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(TimeoutHTTPObservation) + (*in).DeepCopyInto(*out) + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(TimeoutHttp2Observation) + (*in).DeepCopyInto(*out) + } + if in.TCP != nil { + in, out := &in.TCP, &out.TCP + *out = new(TimeoutTCPObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTimeoutObservation. +func (in *ListenerTimeoutObservation) DeepCopy() *ListenerTimeoutObservation { + if in == nil { + return nil + } + out := new(ListenerTimeoutObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTimeoutParameters) DeepCopyInto(out *ListenerTimeoutParameters) { + *out = *in + if in.GRPC != nil { + in, out := &in.GRPC, &out.GRPC + *out = new(TimeoutGRPCParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(TimeoutHTTPParameters) + (*in).DeepCopyInto(*out) + } + if in.Http2 != nil { + in, out := &in.Http2, &out.Http2 + *out = new(TimeoutHttp2Parameters) + (*in).DeepCopyInto(*out) + } + if in.TCP != nil { + in, out := &in.TCP, &out.TCP + *out = new(TimeoutTCPParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTimeoutParameters. +func (in *ListenerTimeoutParameters) DeepCopy() *ListenerTimeoutParameters { + if in == nil { + return nil + } + out := new(ListenerTimeoutParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingAccessLogFileInitParameters) DeepCopyInto(out *LoggingAccessLogFileInitParameters) { + *out = *in + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(FileFormatInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingAccessLogFileInitParameters. +func (in *LoggingAccessLogFileInitParameters) DeepCopy() *LoggingAccessLogFileInitParameters { + if in == nil { + return nil + } + out := new(LoggingAccessLogFileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingAccessLogFileObservation) DeepCopyInto(out *LoggingAccessLogFileObservation) { + *out = *in + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(FileFormatObservation) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingAccessLogFileObservation. +func (in *LoggingAccessLogFileObservation) DeepCopy() *LoggingAccessLogFileObservation { + if in == nil { + return nil + } + out := new(LoggingAccessLogFileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingAccessLogFileParameters) DeepCopyInto(out *LoggingAccessLogFileParameters) { + *out = *in + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(FileFormatParameters) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingAccessLogFileParameters. +func (in *LoggingAccessLogFileParameters) DeepCopy() *LoggingAccessLogFileParameters { + if in == nil { + return nil + } + out := new(LoggingAccessLogFileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingAccessLogInitParameters) DeepCopyInto(out *LoggingAccessLogInitParameters) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(LoggingAccessLogFileInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingAccessLogInitParameters. +func (in *LoggingAccessLogInitParameters) DeepCopy() *LoggingAccessLogInitParameters { + if in == nil { + return nil + } + out := new(LoggingAccessLogInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingAccessLogObservation) DeepCopyInto(out *LoggingAccessLogObservation) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(LoggingAccessLogFileObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingAccessLogObservation. +func (in *LoggingAccessLogObservation) DeepCopy() *LoggingAccessLogObservation { + if in == nil { + return nil + } + out := new(LoggingAccessLogObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingAccessLogParameters) DeepCopyInto(out *LoggingAccessLogParameters) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(LoggingAccessLogFileParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingAccessLogParameters. +func (in *LoggingAccessLogParameters) DeepCopy() *LoggingAccessLogParameters { + if in == nil { + return nil + } + out := new(LoggingAccessLogParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingInitParameters) DeepCopyInto(out *LoggingInitParameters) { + *out = *in + if in.AccessLog != nil { + in, out := &in.AccessLog, &out.AccessLog + *out = new(AccessLogInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingInitParameters. +func (in *LoggingInitParameters) DeepCopy() *LoggingInitParameters { + if in == nil { + return nil + } + out := new(LoggingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingObservation) DeepCopyInto(out *LoggingObservation) { + *out = *in + if in.AccessLog != nil { + in, out := &in.AccessLog, &out.AccessLog + *out = new(AccessLogObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingObservation. +func (in *LoggingObservation) DeepCopy() *LoggingObservation { + if in == nil { + return nil + } + out := new(LoggingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingParameters) DeepCopyInto(out *LoggingParameters) { + *out = *in + if in.AccessLog != nil { + in, out := &in.AccessLog, &out.AccessLog + *out = new(AccessLogParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingParameters. +func (in *LoggingParameters) DeepCopy() *LoggingParameters { + if in == nil { + return nil + } + out := new(LoggingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchHeaderInitParameters) DeepCopyInto(out *MatchHeaderInitParameters) { + *out = *in + if in.Invert != nil { + in, out := &in.Invert, &out.Invert + *out = new(bool) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(MatchHeaderMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchHeaderInitParameters. +func (in *MatchHeaderInitParameters) DeepCopy() *MatchHeaderInitParameters { + if in == nil { + return nil + } + out := new(MatchHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchHeaderMatchInitParameters) DeepCopyInto(out *MatchHeaderMatchInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(MatchRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchHeaderMatchInitParameters. +func (in *MatchHeaderMatchInitParameters) DeepCopy() *MatchHeaderMatchInitParameters { + if in == nil { + return nil + } + out := new(MatchHeaderMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchHeaderMatchObservation) DeepCopyInto(out *MatchHeaderMatchObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(MatchRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchHeaderMatchObservation. +func (in *MatchHeaderMatchObservation) DeepCopy() *MatchHeaderMatchObservation { + if in == nil { + return nil + } + out := new(MatchHeaderMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchHeaderMatchParameters) DeepCopyInto(out *MatchHeaderMatchParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(MatchRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchHeaderMatchParameters. +func (in *MatchHeaderMatchParameters) DeepCopy() *MatchHeaderMatchParameters { + if in == nil { + return nil + } + out := new(MatchHeaderMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchHeaderMatchRangeInitParameters) DeepCopyInto(out *MatchHeaderMatchRangeInitParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchHeaderMatchRangeInitParameters. +func (in *MatchHeaderMatchRangeInitParameters) DeepCopy() *MatchHeaderMatchRangeInitParameters { + if in == nil { + return nil + } + out := new(MatchHeaderMatchRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchHeaderMatchRangeObservation) DeepCopyInto(out *MatchHeaderMatchRangeObservation) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchHeaderMatchRangeObservation. +func (in *MatchHeaderMatchRangeObservation) DeepCopy() *MatchHeaderMatchRangeObservation { + if in == nil { + return nil + } + out := new(MatchHeaderMatchRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchHeaderMatchRangeParameters) DeepCopyInto(out *MatchHeaderMatchRangeParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchHeaderMatchRangeParameters. +func (in *MatchHeaderMatchRangeParameters) DeepCopy() *MatchHeaderMatchRangeParameters { + if in == nil { + return nil + } + out := new(MatchHeaderMatchRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchHeaderObservation) DeepCopyInto(out *MatchHeaderObservation) { + *out = *in + if in.Invert != nil { + in, out := &in.Invert, &out.Invert + *out = new(bool) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(MatchHeaderMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchHeaderObservation. +func (in *MatchHeaderObservation) DeepCopy() *MatchHeaderObservation { + if in == nil { + return nil + } + out := new(MatchHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchHeaderParameters) DeepCopyInto(out *MatchHeaderParameters) { + *out = *in + if in.Invert != nil { + in, out := &in.Invert, &out.Invert + *out = new(bool) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(MatchHeaderMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchHeaderParameters. +func (in *MatchHeaderParameters) DeepCopy() *MatchHeaderParameters { + if in == nil { + return nil + } + out := new(MatchHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchHostnameInitParameters) DeepCopyInto(out *MatchHostnameInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchHostnameInitParameters. +func (in *MatchHostnameInitParameters) DeepCopy() *MatchHostnameInitParameters { + if in == nil { + return nil + } + out := new(MatchHostnameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchHostnameObservation) DeepCopyInto(out *MatchHostnameObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchHostnameObservation. +func (in *MatchHostnameObservation) DeepCopy() *MatchHostnameObservation { + if in == nil { + return nil + } + out := new(MatchHostnameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchHostnameParameters) DeepCopyInto(out *MatchHostnameParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchHostnameParameters. +func (in *MatchHostnameParameters) DeepCopy() *MatchHostnameParameters { + if in == nil { + return nil + } + out := new(MatchHostnameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchInitParameters) DeepCopyInto(out *MatchInitParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchInitParameters. +func (in *MatchInitParameters) DeepCopy() *MatchInitParameters { + if in == nil { + return nil + } + out := new(MatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchObservation) DeepCopyInto(out *MatchObservation) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchObservation. +func (in *MatchObservation) DeepCopy() *MatchObservation { + if in == nil { + return nil + } + out := new(MatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchParameters) DeepCopyInto(out *MatchParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchParameters. +func (in *MatchParameters) DeepCopy() *MatchParameters { + if in == nil { + return nil + } + out := new(MatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchPathInitParameters) DeepCopyInto(out *MatchPathInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchPathInitParameters. +func (in *MatchPathInitParameters) DeepCopy() *MatchPathInitParameters { + if in == nil { + return nil + } + out := new(MatchPathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchPathObservation) DeepCopyInto(out *MatchPathObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchPathObservation. +func (in *MatchPathObservation) DeepCopy() *MatchPathObservation { + if in == nil { + return nil + } + out := new(MatchPathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchPathParameters) DeepCopyInto(out *MatchPathParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchPathParameters. +func (in *MatchPathParameters) DeepCopy() *MatchPathParameters { + if in == nil { + return nil + } + out := new(MatchPathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchQueryParameterInitParameters) DeepCopyInto(out *MatchQueryParameterInitParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(MatchQueryParameterMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchQueryParameterInitParameters. +func (in *MatchQueryParameterInitParameters) DeepCopy() *MatchQueryParameterInitParameters { + if in == nil { + return nil + } + out := new(MatchQueryParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchQueryParameterMatchInitParameters) DeepCopyInto(out *MatchQueryParameterMatchInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchQueryParameterMatchInitParameters. +func (in *MatchQueryParameterMatchInitParameters) DeepCopy() *MatchQueryParameterMatchInitParameters { + if in == nil { + return nil + } + out := new(MatchQueryParameterMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchQueryParameterMatchObservation) DeepCopyInto(out *MatchQueryParameterMatchObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchQueryParameterMatchObservation. +func (in *MatchQueryParameterMatchObservation) DeepCopy() *MatchQueryParameterMatchObservation { + if in == nil { + return nil + } + out := new(MatchQueryParameterMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchQueryParameterMatchParameters) DeepCopyInto(out *MatchQueryParameterMatchParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchQueryParameterMatchParameters. +func (in *MatchQueryParameterMatchParameters) DeepCopy() *MatchQueryParameterMatchParameters { + if in == nil { + return nil + } + out := new(MatchQueryParameterMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchQueryParameterObservation) DeepCopyInto(out *MatchQueryParameterObservation) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(MatchQueryParameterMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchQueryParameterObservation. +func (in *MatchQueryParameterObservation) DeepCopy() *MatchQueryParameterObservation { + if in == nil { + return nil + } + out := new(MatchQueryParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchQueryParameterParameters) DeepCopyInto(out *MatchQueryParameterParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(MatchQueryParameterMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchQueryParameterParameters. +func (in *MatchQueryParameterParameters) DeepCopy() *MatchQueryParameterParameters { + if in == nil { + return nil + } + out := new(MatchQueryParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchRangeInitParameters) DeepCopyInto(out *MatchRangeInitParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchRangeInitParameters. +func (in *MatchRangeInitParameters) DeepCopy() *MatchRangeInitParameters { + if in == nil { + return nil + } + out := new(MatchRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchRangeObservation) DeepCopyInto(out *MatchRangeObservation) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchRangeObservation. +func (in *MatchRangeObservation) DeepCopy() *MatchRangeObservation { + if in == nil { + return nil + } + out := new(MatchRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchRangeParameters) DeepCopyInto(out *MatchRangeParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchRangeParameters. +func (in *MatchRangeParameters) DeepCopy() *MatchRangeParameters { + if in == nil { + return nil + } + out := new(MatchRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mesh) DeepCopyInto(out *Mesh) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mesh. +func (in *Mesh) DeepCopy() *Mesh { + if in == nil { + return nil + } + out := new(Mesh) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Mesh) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshInitParameters) DeepCopyInto(out *MeshInitParameters) { + *out = *in + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(MeshSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshInitParameters. +func (in *MeshInitParameters) DeepCopy() *MeshInitParameters { + if in == nil { + return nil + } + out := new(MeshInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshList) DeepCopyInto(out *MeshList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Mesh, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshList. +func (in *MeshList) DeepCopy() *MeshList { + if in == nil { + return nil + } + out := new(MeshList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MeshList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshObservation) DeepCopyInto(out *MeshObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CreatedDate != nil { + in, out := &in.CreatedDate, &out.CreatedDate + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LastUpdatedDate != nil { + in, out := &in.LastUpdatedDate, &out.LastUpdatedDate + *out = new(string) + **out = **in + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.ResourceOwner != nil { + in, out := &in.ResourceOwner, &out.ResourceOwner + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(MeshSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshObservation. +func (in *MeshObservation) DeepCopy() *MeshObservation { + if in == nil { + return nil + } + out := new(MeshObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshParameters) DeepCopyInto(out *MeshParameters) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(MeshSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshParameters. +func (in *MeshParameters) DeepCopy() *MeshParameters { + if in == nil { + return nil + } + out := new(MeshParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshSpec) DeepCopyInto(out *MeshSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshSpec. +func (in *MeshSpec) DeepCopy() *MeshSpec { + if in == nil { + return nil + } + out := new(MeshSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshSpecInitParameters) DeepCopyInto(out *MeshSpecInitParameters) { + *out = *in + if in.EgressFilter != nil { + in, out := &in.EgressFilter, &out.EgressFilter + *out = new(EgressFilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceDiscovery != nil { + in, out := &in.ServiceDiscovery, &out.ServiceDiscovery + *out = new(ServiceDiscoveryInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshSpecInitParameters. +func (in *MeshSpecInitParameters) DeepCopy() *MeshSpecInitParameters { + if in == nil { + return nil + } + out := new(MeshSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshSpecObservation) DeepCopyInto(out *MeshSpecObservation) { + *out = *in + if in.EgressFilter != nil { + in, out := &in.EgressFilter, &out.EgressFilter + *out = new(EgressFilterObservation) + (*in).DeepCopyInto(*out) + } + if in.ServiceDiscovery != nil { + in, out := &in.ServiceDiscovery, &out.ServiceDiscovery + *out = new(ServiceDiscoveryObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshSpecObservation. +func (in *MeshSpecObservation) DeepCopy() *MeshSpecObservation { + if in == nil { + return nil + } + out := new(MeshSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshSpecParameters) DeepCopyInto(out *MeshSpecParameters) { + *out = *in + if in.EgressFilter != nil { + in, out := &in.EgressFilter, &out.EgressFilter + *out = new(EgressFilterParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceDiscovery != nil { + in, out := &in.ServiceDiscovery, &out.ServiceDiscovery + *out = new(ServiceDiscoveryParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshSpecParameters. +func (in *MeshSpecParameters) DeepCopy() *MeshSpecParameters { + if in == nil { + return nil + } + out := new(MeshSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MeshStatus) DeepCopyInto(out *MeshStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshStatus. +func (in *MeshStatus) DeepCopy() *MeshStatus { + if in == nil { + return nil + } + out := new(MeshStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataInitParameters) DeepCopyInto(out *MetadataInitParameters) { + *out = *in + if in.Invert != nil { + in, out := &in.Invert, &out.Invert + *out = new(bool) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(MetadataMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataInitParameters. +func (in *MetadataInitParameters) DeepCopy() *MetadataInitParameters { + if in == nil { + return nil + } + out := new(MetadataInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataMatchInitParameters) DeepCopyInto(out *MetadataMatchInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(MetadataMatchRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataMatchInitParameters. +func (in *MetadataMatchInitParameters) DeepCopy() *MetadataMatchInitParameters { + if in == nil { + return nil + } + out := new(MetadataMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataMatchObservation) DeepCopyInto(out *MetadataMatchObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(MetadataMatchRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataMatchObservation. +func (in *MetadataMatchObservation) DeepCopy() *MetadataMatchObservation { + if in == nil { + return nil + } + out := new(MetadataMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataMatchParameters) DeepCopyInto(out *MetadataMatchParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(MetadataMatchRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataMatchParameters. +func (in *MetadataMatchParameters) DeepCopy() *MetadataMatchParameters { + if in == nil { + return nil + } + out := new(MetadataMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataMatchRangeInitParameters) DeepCopyInto(out *MetadataMatchRangeInitParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataMatchRangeInitParameters. +func (in *MetadataMatchRangeInitParameters) DeepCopy() *MetadataMatchRangeInitParameters { + if in == nil { + return nil + } + out := new(MetadataMatchRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataMatchRangeObservation) DeepCopyInto(out *MetadataMatchRangeObservation) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataMatchRangeObservation. +func (in *MetadataMatchRangeObservation) DeepCopy() *MetadataMatchRangeObservation { + if in == nil { + return nil + } + out := new(MetadataMatchRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataMatchRangeParameters) DeepCopyInto(out *MetadataMatchRangeParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataMatchRangeParameters. +func (in *MetadataMatchRangeParameters) DeepCopy() *MetadataMatchRangeParameters { + if in == nil { + return nil + } + out := new(MetadataMatchRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataObservation) DeepCopyInto(out *MetadataObservation) { + *out = *in + if in.Invert != nil { + in, out := &in.Invert, &out.Invert + *out = new(bool) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(MetadataMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataObservation. +func (in *MetadataObservation) DeepCopy() *MetadataObservation { + if in == nil { + return nil + } + out := new(MetadataObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataParameters) DeepCopyInto(out *MetadataParameters) { + *out = *in + if in.Invert != nil { + in, out := &in.Invert, &out.Invert + *out = new(bool) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(MetadataMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataParameters. +func (in *MetadataParameters) DeepCopy() *MetadataParameters { + if in == nil { + return nil + } + out := new(MetadataParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutlierDetectionInitParameters) DeepCopyInto(out *OutlierDetectionInitParameters) { + *out = *in + if in.BaseEjectionDuration != nil { + in, out := &in.BaseEjectionDuration, &out.BaseEjectionDuration + *out = new(BaseEjectionDurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(IntervalInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxEjectionPercent != nil { + in, out := &in.MaxEjectionPercent, &out.MaxEjectionPercent + *out = new(float64) + **out = **in + } + if in.MaxServerErrors != nil { + in, out := &in.MaxServerErrors, &out.MaxServerErrors + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutlierDetectionInitParameters. +func (in *OutlierDetectionInitParameters) DeepCopy() *OutlierDetectionInitParameters { + if in == nil { + return nil + } + out := new(OutlierDetectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutlierDetectionObservation) DeepCopyInto(out *OutlierDetectionObservation) { + *out = *in + if in.BaseEjectionDuration != nil { + in, out := &in.BaseEjectionDuration, &out.BaseEjectionDuration + *out = new(BaseEjectionDurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(IntervalObservation) + (*in).DeepCopyInto(*out) + } + if in.MaxEjectionPercent != nil { + in, out := &in.MaxEjectionPercent, &out.MaxEjectionPercent + *out = new(float64) + **out = **in + } + if in.MaxServerErrors != nil { + in, out := &in.MaxServerErrors, &out.MaxServerErrors + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutlierDetectionObservation. +func (in *OutlierDetectionObservation) DeepCopy() *OutlierDetectionObservation { + if in == nil { + return nil + } + out := new(OutlierDetectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutlierDetectionParameters) DeepCopyInto(out *OutlierDetectionParameters) { + *out = *in + if in.BaseEjectionDuration != nil { + in, out := &in.BaseEjectionDuration, &out.BaseEjectionDuration + *out = new(BaseEjectionDurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(IntervalParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxEjectionPercent != nil { + in, out := &in.MaxEjectionPercent, &out.MaxEjectionPercent + *out = new(float64) + **out = **in + } + if in.MaxServerErrors != nil { + in, out := &in.MaxServerErrors, &out.MaxServerErrors + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutlierDetectionParameters. +func (in *OutlierDetectionParameters) DeepCopy() *OutlierDetectionParameters { + if in == nil { + return nil + } + out := new(OutlierDetectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathInitParameters) DeepCopyInto(out *PathInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathInitParameters. +func (in *PathInitParameters) DeepCopy() *PathInitParameters { + if in == nil { + return nil + } + out := new(PathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathObservation) DeepCopyInto(out *PathObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathObservation. +func (in *PathObservation) DeepCopy() *PathObservation { + if in == nil { + return nil + } + out := new(PathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathParameters) DeepCopyInto(out *PathParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathParameters. +func (in *PathParameters) DeepCopy() *PathParameters { + if in == nil { + return nil + } + out := new(PathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerRequestInitParameters) DeepCopyInto(out *PerRequestInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerRequestInitParameters. +func (in *PerRequestInitParameters) DeepCopy() *PerRequestInitParameters { + if in == nil { + return nil + } + out := new(PerRequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerRequestObservation) DeepCopyInto(out *PerRequestObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerRequestObservation. +func (in *PerRequestObservation) DeepCopy() *PerRequestObservation { + if in == nil { + return nil + } + out := new(PerRequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerRequestParameters) DeepCopyInto(out *PerRequestParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerRequestParameters. +func (in *PerRequestParameters) DeepCopy() *PerRequestParameters { + if in == nil { + return nil + } + out := new(PerRequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerRetryTimeoutInitParameters) DeepCopyInto(out *PerRetryTimeoutInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerRetryTimeoutInitParameters. +func (in *PerRetryTimeoutInitParameters) DeepCopy() *PerRetryTimeoutInitParameters { + if in == nil { + return nil + } + out := new(PerRetryTimeoutInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerRetryTimeoutObservation) DeepCopyInto(out *PerRetryTimeoutObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerRetryTimeoutObservation. +func (in *PerRetryTimeoutObservation) DeepCopy() *PerRetryTimeoutObservation { + if in == nil { + return nil + } + out := new(PerRetryTimeoutObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerRetryTimeoutParameters) DeepCopyInto(out *PerRetryTimeoutParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerRetryTimeoutParameters. +func (in *PerRetryTimeoutParameters) DeepCopy() *PerRetryTimeoutParameters { + if in == nil { + return nil + } + out := new(PerRetryTimeoutParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortMappingInitParameters) DeepCopyInto(out *PortMappingInitParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortMappingInitParameters. +func (in *PortMappingInitParameters) DeepCopy() *PortMappingInitParameters { + if in == nil { + return nil + } + out := new(PortMappingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortMappingObservation) DeepCopyInto(out *PortMappingObservation) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortMappingObservation. +func (in *PortMappingObservation) DeepCopy() *PortMappingObservation { + if in == nil { + return nil + } + out := new(PortMappingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortMappingParameters) DeepCopyInto(out *PortMappingParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortMappingParameters. +func (in *PortMappingParameters) DeepCopy() *PortMappingParameters { + if in == nil { + return nil + } + out := new(PortMappingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixInitParameters) DeepCopyInto(out *PrefixInitParameters) { + *out = *in + if in.DefaultPrefix != nil { + in, out := &in.DefaultPrefix, &out.DefaultPrefix + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixInitParameters. +func (in *PrefixInitParameters) DeepCopy() *PrefixInitParameters { + if in == nil { + return nil + } + out := new(PrefixInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixObservation) DeepCopyInto(out *PrefixObservation) { + *out = *in + if in.DefaultPrefix != nil { + in, out := &in.DefaultPrefix, &out.DefaultPrefix + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixObservation. +func (in *PrefixObservation) DeepCopy() *PrefixObservation { + if in == nil { + return nil + } + out := new(PrefixObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixParameters) DeepCopyInto(out *PrefixParameters) { + *out = *in + if in.DefaultPrefix != nil { + in, out := &in.DefaultPrefix, &out.DefaultPrefix + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixParameters. +func (in *PrefixParameters) DeepCopy() *PrefixParameters { + if in == nil { + return nil + } + out := new(PrefixParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderInitParameters) DeepCopyInto(out *ProviderInitParameters) { + *out = *in + if in.VirtualNode != nil { + in, out := &in.VirtualNode, &out.VirtualNode + *out = new(ProviderVirtualNodeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VirtualRouter != nil { + in, out := &in.VirtualRouter, &out.VirtualRouter + *out = new(ProviderVirtualRouterInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderInitParameters. +func (in *ProviderInitParameters) DeepCopy() *ProviderInitParameters { + if in == nil { + return nil + } + out := new(ProviderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderObservation) DeepCopyInto(out *ProviderObservation) { + *out = *in + if in.VirtualNode != nil { + in, out := &in.VirtualNode, &out.VirtualNode + *out = new(ProviderVirtualNodeObservation) + (*in).DeepCopyInto(*out) + } + if in.VirtualRouter != nil { + in, out := &in.VirtualRouter, &out.VirtualRouter + *out = new(ProviderVirtualRouterObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderObservation. +func (in *ProviderObservation) DeepCopy() *ProviderObservation { + if in == nil { + return nil + } + out := new(ProviderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderParameters) DeepCopyInto(out *ProviderParameters) { + *out = *in + if in.VirtualNode != nil { + in, out := &in.VirtualNode, &out.VirtualNode + *out = new(ProviderVirtualNodeParameters) + (*in).DeepCopyInto(*out) + } + if in.VirtualRouter != nil { + in, out := &in.VirtualRouter, &out.VirtualRouter + *out = new(ProviderVirtualRouterParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderParameters. +func (in *ProviderParameters) DeepCopy() *ProviderParameters { + if in == nil { + return nil + } + out := new(ProviderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderVirtualNodeInitParameters) DeepCopyInto(out *ProviderVirtualNodeInitParameters) { + *out = *in + if in.VirtualNodeName != nil { + in, out := &in.VirtualNodeName, &out.VirtualNodeName + *out = new(string) + **out = **in + } + if in.VirtualNodeNameRef != nil { + in, out := &in.VirtualNodeNameRef, &out.VirtualNodeNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNodeNameSelector != nil { + in, out := &in.VirtualNodeNameSelector, &out.VirtualNodeNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderVirtualNodeInitParameters. +func (in *ProviderVirtualNodeInitParameters) DeepCopy() *ProviderVirtualNodeInitParameters { + if in == nil { + return nil + } + out := new(ProviderVirtualNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderVirtualNodeObservation) DeepCopyInto(out *ProviderVirtualNodeObservation) { + *out = *in + if in.VirtualNodeName != nil { + in, out := &in.VirtualNodeName, &out.VirtualNodeName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderVirtualNodeObservation. +func (in *ProviderVirtualNodeObservation) DeepCopy() *ProviderVirtualNodeObservation { + if in == nil { + return nil + } + out := new(ProviderVirtualNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderVirtualNodeParameters) DeepCopyInto(out *ProviderVirtualNodeParameters) { + *out = *in + if in.VirtualNodeName != nil { + in, out := &in.VirtualNodeName, &out.VirtualNodeName + *out = new(string) + **out = **in + } + if in.VirtualNodeNameRef != nil { + in, out := &in.VirtualNodeNameRef, &out.VirtualNodeNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNodeNameSelector != nil { + in, out := &in.VirtualNodeNameSelector, &out.VirtualNodeNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderVirtualNodeParameters. +func (in *ProviderVirtualNodeParameters) DeepCopy() *ProviderVirtualNodeParameters { + if in == nil { + return nil + } + out := new(ProviderVirtualNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderVirtualRouterInitParameters) DeepCopyInto(out *ProviderVirtualRouterInitParameters) { + *out = *in + if in.VirtualRouterName != nil { + in, out := &in.VirtualRouterName, &out.VirtualRouterName + *out = new(string) + **out = **in + } + if in.VirtualRouterNameRef != nil { + in, out := &in.VirtualRouterNameRef, &out.VirtualRouterNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualRouterNameSelector != nil { + in, out := &in.VirtualRouterNameSelector, &out.VirtualRouterNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderVirtualRouterInitParameters. +func (in *ProviderVirtualRouterInitParameters) DeepCopy() *ProviderVirtualRouterInitParameters { + if in == nil { + return nil + } + out := new(ProviderVirtualRouterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderVirtualRouterObservation) DeepCopyInto(out *ProviderVirtualRouterObservation) { + *out = *in + if in.VirtualRouterName != nil { + in, out := &in.VirtualRouterName, &out.VirtualRouterName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderVirtualRouterObservation. +func (in *ProviderVirtualRouterObservation) DeepCopy() *ProviderVirtualRouterObservation { + if in == nil { + return nil + } + out := new(ProviderVirtualRouterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderVirtualRouterParameters) DeepCopyInto(out *ProviderVirtualRouterParameters) { + *out = *in + if in.VirtualRouterName != nil { + in, out := &in.VirtualRouterName, &out.VirtualRouterName + *out = new(string) + **out = **in + } + if in.VirtualRouterNameRef != nil { + in, out := &in.VirtualRouterNameRef, &out.VirtualRouterNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualRouterNameSelector != nil { + in, out := &in.VirtualRouterNameSelector, &out.VirtualRouterNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderVirtualRouterParameters. +func (in *ProviderVirtualRouterParameters) DeepCopy() *ProviderVirtualRouterParameters { + if in == nil { + return nil + } + out := new(ProviderVirtualRouterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryParameterInitParameters) DeepCopyInto(out *QueryParameterInitParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(QueryParameterMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryParameterInitParameters. +func (in *QueryParameterInitParameters) DeepCopy() *QueryParameterInitParameters { + if in == nil { + return nil + } + out := new(QueryParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryParameterMatchInitParameters) DeepCopyInto(out *QueryParameterMatchInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryParameterMatchInitParameters. +func (in *QueryParameterMatchInitParameters) DeepCopy() *QueryParameterMatchInitParameters { + if in == nil { + return nil + } + out := new(QueryParameterMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryParameterMatchObservation) DeepCopyInto(out *QueryParameterMatchObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryParameterMatchObservation. +func (in *QueryParameterMatchObservation) DeepCopy() *QueryParameterMatchObservation { + if in == nil { + return nil + } + out := new(QueryParameterMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryParameterMatchParameters) DeepCopyInto(out *QueryParameterMatchParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryParameterMatchParameters. +func (in *QueryParameterMatchParameters) DeepCopy() *QueryParameterMatchParameters { + if in == nil { + return nil + } + out := new(QueryParameterMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryParameterObservation) DeepCopyInto(out *QueryParameterObservation) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(QueryParameterMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryParameterObservation. +func (in *QueryParameterObservation) DeepCopy() *QueryParameterObservation { + if in == nil { + return nil + } + out := new(QueryParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryParameterParameters) DeepCopyInto(out *QueryParameterParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(QueryParameterMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryParameterParameters. +func (in *QueryParameterParameters) DeepCopy() *QueryParameterParameters { + if in == nil { + return nil + } + out := new(QueryParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeInitParameters) DeepCopyInto(out *RangeInitParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeInitParameters. +func (in *RangeInitParameters) DeepCopy() *RangeInitParameters { + if in == nil { + return nil + } + out := new(RangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeObservation) DeepCopyInto(out *RangeObservation) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeObservation. +func (in *RangeObservation) DeepCopy() *RangeObservation { + if in == nil { + return nil + } + out := new(RangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeParameters) DeepCopyInto(out *RangeParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeParameters. +func (in *RangeParameters) DeepCopy() *RangeParameters { + if in == nil { + return nil + } + out := new(RangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryPolicyInitParameters) DeepCopyInto(out *RetryPolicyInitParameters) { + *out = *in + if in.GRPCRetryEvents != nil { + in, out := &in.GRPCRetryEvents, &out.GRPCRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPRetryEvents != nil { + in, out := &in.HTTPRetryEvents, &out.HTTPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxRetries != nil { + in, out := &in.MaxRetries, &out.MaxRetries + *out = new(float64) + **out = **in + } + if in.PerRetryTimeout != nil { + in, out := &in.PerRetryTimeout, &out.PerRetryTimeout + *out = new(PerRetryTimeoutInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TCPRetryEvents != nil { + in, out := &in.TCPRetryEvents, &out.TCPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicyInitParameters. +func (in *RetryPolicyInitParameters) DeepCopy() *RetryPolicyInitParameters { + if in == nil { + return nil + } + out := new(RetryPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryPolicyObservation) DeepCopyInto(out *RetryPolicyObservation) { + *out = *in + if in.GRPCRetryEvents != nil { + in, out := &in.GRPCRetryEvents, &out.GRPCRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPRetryEvents != nil { + in, out := &in.HTTPRetryEvents, &out.HTTPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxRetries != nil { + in, out := &in.MaxRetries, &out.MaxRetries + *out = new(float64) + **out = **in + } + if in.PerRetryTimeout != nil { + in, out := &in.PerRetryTimeout, &out.PerRetryTimeout + *out = new(PerRetryTimeoutObservation) + (*in).DeepCopyInto(*out) + } + if in.TCPRetryEvents != nil { + in, out := &in.TCPRetryEvents, &out.TCPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicyObservation. +func (in *RetryPolicyObservation) DeepCopy() *RetryPolicyObservation { + if in == nil { + return nil + } + out := new(RetryPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryPolicyParameters) DeepCopyInto(out *RetryPolicyParameters) { + *out = *in + if in.GRPCRetryEvents != nil { + in, out := &in.GRPCRetryEvents, &out.GRPCRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPRetryEvents != nil { + in, out := &in.HTTPRetryEvents, &out.HTTPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxRetries != nil { + in, out := &in.MaxRetries, &out.MaxRetries + *out = new(float64) + **out = **in + } + if in.PerRetryTimeout != nil { + in, out := &in.PerRetryTimeout, &out.PerRetryTimeout + *out = new(PerRetryTimeoutParameters) + (*in).DeepCopyInto(*out) + } + if in.TCPRetryEvents != nil { + in, out := &in.TCPRetryEvents, &out.TCPRetryEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicyParameters. +func (in *RetryPolicyParameters) DeepCopy() *RetryPolicyParameters { + if in == nil { + return nil + } + out := new(RetryPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryPolicyPerRetryTimeoutInitParameters) DeepCopyInto(out *RetryPolicyPerRetryTimeoutInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicyPerRetryTimeoutInitParameters. +func (in *RetryPolicyPerRetryTimeoutInitParameters) DeepCopy() *RetryPolicyPerRetryTimeoutInitParameters { + if in == nil { + return nil + } + out := new(RetryPolicyPerRetryTimeoutInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryPolicyPerRetryTimeoutObservation) DeepCopyInto(out *RetryPolicyPerRetryTimeoutObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicyPerRetryTimeoutObservation. +func (in *RetryPolicyPerRetryTimeoutObservation) DeepCopy() *RetryPolicyPerRetryTimeoutObservation { + if in == nil { + return nil + } + out := new(RetryPolicyPerRetryTimeoutObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryPolicyPerRetryTimeoutParameters) DeepCopyInto(out *RetryPolicyPerRetryTimeoutParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicyPerRetryTimeoutParameters. +func (in *RetryPolicyPerRetryTimeoutParameters) DeepCopy() *RetryPolicyPerRetryTimeoutParameters { + if in == nil { + return nil + } + out := new(RetryPolicyPerRetryTimeoutParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewriteHostnameInitParameters) DeepCopyInto(out *RewriteHostnameInitParameters) { + *out = *in + if in.DefaultTargetHostname != nil { + in, out := &in.DefaultTargetHostname, &out.DefaultTargetHostname + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteHostnameInitParameters. +func (in *RewriteHostnameInitParameters) DeepCopy() *RewriteHostnameInitParameters { + if in == nil { + return nil + } + out := new(RewriteHostnameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewriteHostnameObservation) DeepCopyInto(out *RewriteHostnameObservation) { + *out = *in + if in.DefaultTargetHostname != nil { + in, out := &in.DefaultTargetHostname, &out.DefaultTargetHostname + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteHostnameObservation. +func (in *RewriteHostnameObservation) DeepCopy() *RewriteHostnameObservation { + if in == nil { + return nil + } + out := new(RewriteHostnameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewriteHostnameParameters) DeepCopyInto(out *RewriteHostnameParameters) { + *out = *in + if in.DefaultTargetHostname != nil { + in, out := &in.DefaultTargetHostname, &out.DefaultTargetHostname + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteHostnameParameters. +func (in *RewriteHostnameParameters) DeepCopy() *RewriteHostnameParameters { + if in == nil { + return nil + } + out := new(RewriteHostnameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewriteInitParameters) DeepCopyInto(out *RewriteInitParameters) { + *out = *in + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(HostnameInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(PathInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(PrefixInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteInitParameters. +func (in *RewriteInitParameters) DeepCopy() *RewriteInitParameters { + if in == nil { + return nil + } + out := new(RewriteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewriteObservation) DeepCopyInto(out *RewriteObservation) { + *out = *in + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(HostnameObservation) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(PathObservation) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(PrefixObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteObservation. +func (in *RewriteObservation) DeepCopy() *RewriteObservation { + if in == nil { + return nil + } + out := new(RewriteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewriteParameters) DeepCopyInto(out *RewriteParameters) { + *out = *in + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(HostnameParameters) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(PathParameters) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(PrefixParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteParameters. +func (in *RewriteParameters) DeepCopy() *RewriteParameters { + if in == nil { + return nil + } + out := new(RewriteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewritePathInitParameters) DeepCopyInto(out *RewritePathInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewritePathInitParameters. +func (in *RewritePathInitParameters) DeepCopy() *RewritePathInitParameters { + if in == nil { + return nil + } + out := new(RewritePathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewritePathObservation) DeepCopyInto(out *RewritePathObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewritePathObservation. +func (in *RewritePathObservation) DeepCopy() *RewritePathObservation { + if in == nil { + return nil + } + out := new(RewritePathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewritePathParameters) DeepCopyInto(out *RewritePathParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewritePathParameters. +func (in *RewritePathParameters) DeepCopy() *RewritePathParameters { + if in == nil { + return nil + } + out := new(RewritePathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewritePrefixInitParameters) DeepCopyInto(out *RewritePrefixInitParameters) { + *out = *in + if in.DefaultPrefix != nil { + in, out := &in.DefaultPrefix, &out.DefaultPrefix + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewritePrefixInitParameters. +func (in *RewritePrefixInitParameters) DeepCopy() *RewritePrefixInitParameters { + if in == nil { + return nil + } + out := new(RewritePrefixInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewritePrefixObservation) DeepCopyInto(out *RewritePrefixObservation) { + *out = *in + if in.DefaultPrefix != nil { + in, out := &in.DefaultPrefix, &out.DefaultPrefix + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewritePrefixObservation. +func (in *RewritePrefixObservation) DeepCopy() *RewritePrefixObservation { + if in == nil { + return nil + } + out := new(RewritePrefixObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewritePrefixParameters) DeepCopyInto(out *RewritePrefixParameters) { + *out = *in + if in.DefaultPrefix != nil { + in, out := &in.DefaultPrefix, &out.DefaultPrefix + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewritePrefixParameters. +func (in *RewritePrefixParameters) DeepCopy() *RewritePrefixParameters { + if in == nil { + return nil + } + out := new(RewritePrefixParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Route) DeepCopyInto(out *Route) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route. +func (in *Route) DeepCopy() *Route { + if in == nil { + return nil + } + out := new(Route) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Route) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteInitParameters) DeepCopyInto(out *RouteInitParameters) { + *out = *in + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshNameRef != nil { + in, out := &in.MeshNameRef, &out.MeshNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MeshNameSelector != nil { + in, out := &in.MeshNameSelector, &out.MeshNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(RouteSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualRouterName != nil { + in, out := &in.VirtualRouterName, &out.VirtualRouterName + *out = new(string) + **out = **in + } + if in.VirtualRouterNameRef != nil { + in, out := &in.VirtualRouterNameRef, &out.VirtualRouterNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualRouterNameSelector != nil { + in, out := &in.VirtualRouterNameSelector, &out.VirtualRouterNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteInitParameters. +func (in *RouteInitParameters) DeepCopy() *RouteInitParameters { + if in == nil { + return nil + } + out := new(RouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteList) DeepCopyInto(out *RouteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Route, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteList. +func (in *RouteList) DeepCopy() *RouteList { + if in == nil { + return nil + } + out := new(RouteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RouteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteObservation) DeepCopyInto(out *RouteObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CreatedDate != nil { + in, out := &in.CreatedDate, &out.CreatedDate + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LastUpdatedDate != nil { + in, out := &in.LastUpdatedDate, &out.LastUpdatedDate + *out = new(string) + **out = **in + } + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceOwner != nil { + in, out := &in.ResourceOwner, &out.ResourceOwner + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(RouteSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualRouterName != nil { + in, out := &in.VirtualRouterName, &out.VirtualRouterName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteObservation. +func (in *RouteObservation) DeepCopy() *RouteObservation { + if in == nil { + return nil + } + out := new(RouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteParameters) DeepCopyInto(out *RouteParameters) { + *out = *in + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshNameRef != nil { + in, out := &in.MeshNameRef, &out.MeshNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MeshNameSelector != nil { + in, out := &in.MeshNameSelector, &out.MeshNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(RouteSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualRouterName != nil { + in, out := &in.VirtualRouterName, &out.VirtualRouterName + *out = new(string) + **out = **in + } + if in.VirtualRouterNameRef != nil { + in, out := &in.VirtualRouterNameRef, &out.VirtualRouterNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualRouterNameSelector != nil { + in, out := &in.VirtualRouterNameSelector, &out.VirtualRouterNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteParameters. +func (in *RouteParameters) DeepCopy() *RouteParameters { + if in == nil { + return nil + } + out := new(RouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteSpec) DeepCopyInto(out *RouteSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteSpec. +func (in *RouteSpec) DeepCopy() *RouteSpec { + if in == nil { + return nil + } + out := new(RouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteSpecInitParameters) DeepCopyInto(out *RouteSpecInitParameters) { + *out = *in + if in.GRPCRoute != nil { + in, out := &in.GRPCRoute, &out.GRPCRoute + *out = new(SpecGRPCRouteInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRoute != nil { + in, out := &in.HTTPRoute, &out.HTTPRoute + *out = new(SpecHTTPRouteInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Http2Route != nil { + in, out := &in.Http2Route, &out.Http2Route + *out = new(SpecHttp2RouteInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.TCPRoute != nil { + in, out := &in.TCPRoute, &out.TCPRoute + *out = new(TCPRouteInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteSpecInitParameters. +func (in *RouteSpecInitParameters) DeepCopy() *RouteSpecInitParameters { + if in == nil { + return nil + } + out := new(RouteSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteSpecObservation) DeepCopyInto(out *RouteSpecObservation) { + *out = *in + if in.GRPCRoute != nil { + in, out := &in.GRPCRoute, &out.GRPCRoute + *out = new(SpecGRPCRouteObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTPRoute != nil { + in, out := &in.HTTPRoute, &out.HTTPRoute + *out = new(SpecHTTPRouteObservation) + (*in).DeepCopyInto(*out) + } + if in.Http2Route != nil { + in, out := &in.Http2Route, &out.Http2Route + *out = new(SpecHttp2RouteObservation) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.TCPRoute != nil { + in, out := &in.TCPRoute, &out.TCPRoute + *out = new(TCPRouteObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteSpecObservation. +func (in *RouteSpecObservation) DeepCopy() *RouteSpecObservation { + if in == nil { + return nil + } + out := new(RouteSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteSpecParameters) DeepCopyInto(out *RouteSpecParameters) { + *out = *in + if in.GRPCRoute != nil { + in, out := &in.GRPCRoute, &out.GRPCRoute + *out = new(SpecGRPCRouteParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRoute != nil { + in, out := &in.HTTPRoute, &out.HTTPRoute + *out = new(SpecHTTPRouteParameters) + (*in).DeepCopyInto(*out) + } + if in.Http2Route != nil { + in, out := &in.Http2Route, &out.Http2Route + *out = new(SpecHttp2RouteParameters) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.TCPRoute != nil { + in, out := &in.TCPRoute, &out.TCPRoute + *out = new(TCPRouteParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteSpecParameters. +func (in *RouteSpecParameters) DeepCopy() *RouteSpecParameters { + if in == nil { + return nil + } + out := new(RouteSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteStatus) DeepCopyInto(out *RouteStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteStatus. +func (in *RouteStatus) DeepCopy() *RouteStatus { + if in == nil { + return nil + } + out := new(RouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SdsInitParameters) DeepCopyInto(out *SdsInitParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SdsInitParameters. +func (in *SdsInitParameters) DeepCopy() *SdsInitParameters { + if in == nil { + return nil + } + out := new(SdsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SdsObservation) DeepCopyInto(out *SdsObservation) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SdsObservation. +func (in *SdsObservation) DeepCopy() *SdsObservation { + if in == nil { + return nil + } + out := new(SdsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SdsParameters) DeepCopyInto(out *SdsParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SdsParameters. +func (in *SdsParameters) DeepCopy() *SdsParameters { + if in == nil { + return nil + } + out := new(SdsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceDiscoveryInitParameters) DeepCopyInto(out *ServiceDiscoveryInitParameters) { + *out = *in + if in.IPPreference != nil { + in, out := &in.IPPreference, &out.IPPreference + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDiscoveryInitParameters. +func (in *ServiceDiscoveryInitParameters) DeepCopy() *ServiceDiscoveryInitParameters { + if in == nil { + return nil + } + out := new(ServiceDiscoveryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceDiscoveryObservation) DeepCopyInto(out *ServiceDiscoveryObservation) { + *out = *in + if in.IPPreference != nil { + in, out := &in.IPPreference, &out.IPPreference + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDiscoveryObservation. +func (in *ServiceDiscoveryObservation) DeepCopy() *ServiceDiscoveryObservation { + if in == nil { + return nil + } + out := new(ServiceDiscoveryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceDiscoveryParameters) DeepCopyInto(out *ServiceDiscoveryParameters) { + *out = *in + if in.IPPreference != nil { + in, out := &in.IPPreference, &out.IPPreference + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDiscoveryParameters. +func (in *ServiceDiscoveryParameters) DeepCopy() *ServiceDiscoveryParameters { + if in == nil { + return nil + } + out := new(ServiceDiscoveryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecBackendDefaultsInitParameters) DeepCopyInto(out *SpecBackendDefaultsInitParameters) { + *out = *in + if in.ClientPolicy != nil { + in, out := &in.ClientPolicy, &out.ClientPolicy + *out = new(BackendDefaultsClientPolicyInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecBackendDefaultsInitParameters. +func (in *SpecBackendDefaultsInitParameters) DeepCopy() *SpecBackendDefaultsInitParameters { + if in == nil { + return nil + } + out := new(SpecBackendDefaultsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecBackendDefaultsObservation) DeepCopyInto(out *SpecBackendDefaultsObservation) { + *out = *in + if in.ClientPolicy != nil { + in, out := &in.ClientPolicy, &out.ClientPolicy + *out = new(BackendDefaultsClientPolicyObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecBackendDefaultsObservation. +func (in *SpecBackendDefaultsObservation) DeepCopy() *SpecBackendDefaultsObservation { + if in == nil { + return nil + } + out := new(SpecBackendDefaultsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecBackendDefaultsParameters) DeepCopyInto(out *SpecBackendDefaultsParameters) { + *out = *in + if in.ClientPolicy != nil { + in, out := &in.ClientPolicy, &out.ClientPolicy + *out = new(BackendDefaultsClientPolicyParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecBackendDefaultsParameters. +func (in *SpecBackendDefaultsParameters) DeepCopy() *SpecBackendDefaultsParameters { + if in == nil { + return nil + } + out := new(SpecBackendDefaultsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecGRPCRouteInitParameters) DeepCopyInto(out *SpecGRPCRouteInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(GRPCRouteActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(GRPCRouteMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(RetryPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(TimeoutInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecGRPCRouteInitParameters. +func (in *SpecGRPCRouteInitParameters) DeepCopy() *SpecGRPCRouteInitParameters { + if in == nil { + return nil + } + out := new(SpecGRPCRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecGRPCRouteObservation) DeepCopyInto(out *SpecGRPCRouteObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(GRPCRouteActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(GRPCRouteMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(RetryPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(TimeoutObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecGRPCRouteObservation. +func (in *SpecGRPCRouteObservation) DeepCopy() *SpecGRPCRouteObservation { + if in == nil { + return nil + } + out := new(SpecGRPCRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecGRPCRouteParameters) DeepCopyInto(out *SpecGRPCRouteParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(GRPCRouteActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(GRPCRouteMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(RetryPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(TimeoutParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecGRPCRouteParameters. +func (in *SpecGRPCRouteParameters) DeepCopy() *SpecGRPCRouteParameters { + if in == nil { + return nil + } + out := new(SpecGRPCRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHTTPRouteActionInitParameters) DeepCopyInto(out *SpecHTTPRouteActionInitParameters) { + *out = *in + if in.WeightedTarget != nil { + in, out := &in.WeightedTarget, &out.WeightedTarget + *out = make([]HTTPRouteActionWeightedTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHTTPRouteActionInitParameters. +func (in *SpecHTTPRouteActionInitParameters) DeepCopy() *SpecHTTPRouteActionInitParameters { + if in == nil { + return nil + } + out := new(SpecHTTPRouteActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHTTPRouteActionObservation) DeepCopyInto(out *SpecHTTPRouteActionObservation) { + *out = *in + if in.WeightedTarget != nil { + in, out := &in.WeightedTarget, &out.WeightedTarget + *out = make([]HTTPRouteActionWeightedTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHTTPRouteActionObservation. +func (in *SpecHTTPRouteActionObservation) DeepCopy() *SpecHTTPRouteActionObservation { + if in == nil { + return nil + } + out := new(SpecHTTPRouteActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHTTPRouteActionParameters) DeepCopyInto(out *SpecHTTPRouteActionParameters) { + *out = *in + if in.WeightedTarget != nil { + in, out := &in.WeightedTarget, &out.WeightedTarget + *out = make([]HTTPRouteActionWeightedTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHTTPRouteActionParameters. +func (in *SpecHTTPRouteActionParameters) DeepCopy() *SpecHTTPRouteActionParameters { + if in == nil { + return nil + } + out := new(SpecHTTPRouteActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHTTPRouteInitParameters) DeepCopyInto(out *SpecHTTPRouteInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(SpecHTTPRouteActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(SpecHTTPRouteMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(HTTPRouteRetryPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(HTTPRouteTimeoutInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHTTPRouteInitParameters. +func (in *SpecHTTPRouteInitParameters) DeepCopy() *SpecHTTPRouteInitParameters { + if in == nil { + return nil + } + out := new(SpecHTTPRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHTTPRouteMatchInitParameters) DeepCopyInto(out *SpecHTTPRouteMatchInitParameters) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HTTPRouteMatchHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(SpecHTTPRouteMatchPathInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.QueryParameter != nil { + in, out := &in.QueryParameter, &out.QueryParameter + *out = make([]HTTPRouteMatchQueryParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHTTPRouteMatchInitParameters. +func (in *SpecHTTPRouteMatchInitParameters) DeepCopy() *SpecHTTPRouteMatchInitParameters { + if in == nil { + return nil + } + out := new(SpecHTTPRouteMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHTTPRouteMatchObservation) DeepCopyInto(out *SpecHTTPRouteMatchObservation) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HTTPRouteMatchHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(SpecHTTPRouteMatchPathObservation) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.QueryParameter != nil { + in, out := &in.QueryParameter, &out.QueryParameter + *out = make([]HTTPRouteMatchQueryParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHTTPRouteMatchObservation. +func (in *SpecHTTPRouteMatchObservation) DeepCopy() *SpecHTTPRouteMatchObservation { + if in == nil { + return nil + } + out := new(SpecHTTPRouteMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHTTPRouteMatchParameters) DeepCopyInto(out *SpecHTTPRouteMatchParameters) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HTTPRouteMatchHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(SpecHTTPRouteMatchPathParameters) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.QueryParameter != nil { + in, out := &in.QueryParameter, &out.QueryParameter + *out = make([]HTTPRouteMatchQueryParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHTTPRouteMatchParameters. +func (in *SpecHTTPRouteMatchParameters) DeepCopy() *SpecHTTPRouteMatchParameters { + if in == nil { + return nil + } + out := new(SpecHTTPRouteMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHTTPRouteMatchPathInitParameters) DeepCopyInto(out *SpecHTTPRouteMatchPathInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHTTPRouteMatchPathInitParameters. +func (in *SpecHTTPRouteMatchPathInitParameters) DeepCopy() *SpecHTTPRouteMatchPathInitParameters { + if in == nil { + return nil + } + out := new(SpecHTTPRouteMatchPathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHTTPRouteMatchPathObservation) DeepCopyInto(out *SpecHTTPRouteMatchPathObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHTTPRouteMatchPathObservation. +func (in *SpecHTTPRouteMatchPathObservation) DeepCopy() *SpecHTTPRouteMatchPathObservation { + if in == nil { + return nil + } + out := new(SpecHTTPRouteMatchPathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHTTPRouteMatchPathParameters) DeepCopyInto(out *SpecHTTPRouteMatchPathParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = new(string) + **out = **in + } + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHTTPRouteMatchPathParameters. +func (in *SpecHTTPRouteMatchPathParameters) DeepCopy() *SpecHTTPRouteMatchPathParameters { + if in == nil { + return nil + } + out := new(SpecHTTPRouteMatchPathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHTTPRouteObservation) DeepCopyInto(out *SpecHTTPRouteObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(SpecHTTPRouteActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(SpecHTTPRouteMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(HTTPRouteRetryPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(HTTPRouteTimeoutObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHTTPRouteObservation. +func (in *SpecHTTPRouteObservation) DeepCopy() *SpecHTTPRouteObservation { + if in == nil { + return nil + } + out := new(SpecHTTPRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHTTPRouteParameters) DeepCopyInto(out *SpecHTTPRouteParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(SpecHTTPRouteActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(SpecHTTPRouteMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(HTTPRouteRetryPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(HTTPRouteTimeoutParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHTTPRouteParameters. +func (in *SpecHTTPRouteParameters) DeepCopy() *SpecHTTPRouteParameters { + if in == nil { + return nil + } + out := new(SpecHTTPRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHttp2RouteActionInitParameters) DeepCopyInto(out *SpecHttp2RouteActionInitParameters) { + *out = *in + if in.WeightedTarget != nil { + in, out := &in.WeightedTarget, &out.WeightedTarget + *out = make([]ActionWeightedTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHttp2RouteActionInitParameters. +func (in *SpecHttp2RouteActionInitParameters) DeepCopy() *SpecHttp2RouteActionInitParameters { + if in == nil { + return nil + } + out := new(SpecHttp2RouteActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHttp2RouteActionObservation) DeepCopyInto(out *SpecHttp2RouteActionObservation) { + *out = *in + if in.WeightedTarget != nil { + in, out := &in.WeightedTarget, &out.WeightedTarget + *out = make([]ActionWeightedTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHttp2RouteActionObservation. +func (in *SpecHttp2RouteActionObservation) DeepCopy() *SpecHttp2RouteActionObservation { + if in == nil { + return nil + } + out := new(SpecHttp2RouteActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHttp2RouteActionParameters) DeepCopyInto(out *SpecHttp2RouteActionParameters) { + *out = *in + if in.WeightedTarget != nil { + in, out := &in.WeightedTarget, &out.WeightedTarget + *out = make([]ActionWeightedTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHttp2RouteActionParameters. +func (in *SpecHttp2RouteActionParameters) DeepCopy() *SpecHttp2RouteActionParameters { + if in == nil { + return nil + } + out := new(SpecHttp2RouteActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHttp2RouteInitParameters) DeepCopyInto(out *SpecHttp2RouteInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(SpecHttp2RouteActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(SpecHttp2RouteMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(Http2RouteRetryPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(Http2RouteTimeoutInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHttp2RouteInitParameters. +func (in *SpecHttp2RouteInitParameters) DeepCopy() *SpecHttp2RouteInitParameters { + if in == nil { + return nil + } + out := new(SpecHttp2RouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHttp2RouteMatchInitParameters) DeepCopyInto(out *SpecHttp2RouteMatchInitParameters) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]Http2RouteMatchHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(Http2RouteMatchPathInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.QueryParameter != nil { + in, out := &in.QueryParameter, &out.QueryParameter + *out = make([]Http2RouteMatchQueryParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHttp2RouteMatchInitParameters. +func (in *SpecHttp2RouteMatchInitParameters) DeepCopy() *SpecHttp2RouteMatchInitParameters { + if in == nil { + return nil + } + out := new(SpecHttp2RouteMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHttp2RouteMatchObservation) DeepCopyInto(out *SpecHttp2RouteMatchObservation) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]Http2RouteMatchHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(Http2RouteMatchPathObservation) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.QueryParameter != nil { + in, out := &in.QueryParameter, &out.QueryParameter + *out = make([]Http2RouteMatchQueryParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHttp2RouteMatchObservation. +func (in *SpecHttp2RouteMatchObservation) DeepCopy() *SpecHttp2RouteMatchObservation { + if in == nil { + return nil + } + out := new(SpecHttp2RouteMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHttp2RouteMatchParameters) DeepCopyInto(out *SpecHttp2RouteMatchParameters) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]Http2RouteMatchHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(Http2RouteMatchPathParameters) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.QueryParameter != nil { + in, out := &in.QueryParameter, &out.QueryParameter + *out = make([]Http2RouteMatchQueryParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHttp2RouteMatchParameters. +func (in *SpecHttp2RouteMatchParameters) DeepCopy() *SpecHttp2RouteMatchParameters { + if in == nil { + return nil + } + out := new(SpecHttp2RouteMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHttp2RouteObservation) DeepCopyInto(out *SpecHttp2RouteObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(SpecHttp2RouteActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(SpecHttp2RouteMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(Http2RouteRetryPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(Http2RouteTimeoutObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHttp2RouteObservation. +func (in *SpecHttp2RouteObservation) DeepCopy() *SpecHttp2RouteObservation { + if in == nil { + return nil + } + out := new(SpecHttp2RouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecHttp2RouteParameters) DeepCopyInto(out *SpecHttp2RouteParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(SpecHttp2RouteActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(SpecHttp2RouteMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(Http2RouteRetryPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(Http2RouteTimeoutParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecHttp2RouteParameters. +func (in *SpecHttp2RouteParameters) DeepCopy() *SpecHttp2RouteParameters { + if in == nil { + return nil + } + out := new(SpecHttp2RouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecInitParameters) DeepCopyInto(out *SpecInitParameters) { + *out = *in + if in.GRPCRoute != nil { + in, out := &in.GRPCRoute, &out.GRPCRoute + *out = new(GRPCRouteInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRoute != nil { + in, out := &in.HTTPRoute, &out.HTTPRoute + *out = new(HTTPRouteInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Http2Route != nil { + in, out := &in.Http2Route, &out.Http2Route + *out = new(Http2RouteInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecInitParameters. +func (in *SpecInitParameters) DeepCopy() *SpecInitParameters { + if in == nil { + return nil + } + out := new(SpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecListenerInitParameters) DeepCopyInto(out *SpecListenerInitParameters) { + *out = *in + if in.ConnectionPool != nil { + in, out := &in.ConnectionPool, &out.ConnectionPool + *out = new(ListenerConnectionPoolInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(ListenerHealthCheckInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OutlierDetection != nil { + in, out := &in.OutlierDetection, &out.OutlierDetection + *out = new(OutlierDetectionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PortMapping != nil { + in, out := &in.PortMapping, &out.PortMapping + *out = new(ListenerPortMappingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(SpecListenerTLSInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(ListenerTimeoutInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecListenerInitParameters. +func (in *SpecListenerInitParameters) DeepCopy() *SpecListenerInitParameters { + if in == nil { + return nil + } + out := new(SpecListenerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecListenerObservation) DeepCopyInto(out *SpecListenerObservation) { + *out = *in + if in.ConnectionPool != nil { + in, out := &in.ConnectionPool, &out.ConnectionPool + *out = new(ListenerConnectionPoolObservation) + (*in).DeepCopyInto(*out) + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(ListenerHealthCheckObservation) + (*in).DeepCopyInto(*out) + } + if in.OutlierDetection != nil { + in, out := &in.OutlierDetection, &out.OutlierDetection + *out = new(OutlierDetectionObservation) + (*in).DeepCopyInto(*out) + } + if in.PortMapping != nil { + in, out := &in.PortMapping, &out.PortMapping + *out = new(ListenerPortMappingObservation) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(SpecListenerTLSObservation) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(ListenerTimeoutObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecListenerObservation. +func (in *SpecListenerObservation) DeepCopy() *SpecListenerObservation { + if in == nil { + return nil + } + out := new(SpecListenerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecListenerParameters) DeepCopyInto(out *SpecListenerParameters) { + *out = *in + if in.ConnectionPool != nil { + in, out := &in.ConnectionPool, &out.ConnectionPool + *out = new(ListenerConnectionPoolParameters) + (*in).DeepCopyInto(*out) + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(ListenerHealthCheckParameters) + (*in).DeepCopyInto(*out) + } + if in.OutlierDetection != nil { + in, out := &in.OutlierDetection, &out.OutlierDetection + *out = new(OutlierDetectionParameters) + (*in).DeepCopyInto(*out) + } + if in.PortMapping != nil { + in, out := &in.PortMapping, &out.PortMapping + *out = new(ListenerPortMappingParameters) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(SpecListenerTLSParameters) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(ListenerTimeoutParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecListenerParameters. +func (in *SpecListenerParameters) DeepCopy() *SpecListenerParameters { + if in == nil { + return nil + } + out := new(SpecListenerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecListenerPortMappingInitParameters) DeepCopyInto(out *SpecListenerPortMappingInitParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecListenerPortMappingInitParameters. +func (in *SpecListenerPortMappingInitParameters) DeepCopy() *SpecListenerPortMappingInitParameters { + if in == nil { + return nil + } + out := new(SpecListenerPortMappingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecListenerPortMappingObservation) DeepCopyInto(out *SpecListenerPortMappingObservation) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecListenerPortMappingObservation. +func (in *SpecListenerPortMappingObservation) DeepCopy() *SpecListenerPortMappingObservation { + if in == nil { + return nil + } + out := new(SpecListenerPortMappingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecListenerPortMappingParameters) DeepCopyInto(out *SpecListenerPortMappingParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecListenerPortMappingParameters. +func (in *SpecListenerPortMappingParameters) DeepCopy() *SpecListenerPortMappingParameters { + if in == nil { + return nil + } + out := new(SpecListenerPortMappingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecListenerTLSInitParameters) DeepCopyInto(out *SpecListenerTLSInitParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(ListenerTLSCertificateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(ListenerTLSValidationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecListenerTLSInitParameters. +func (in *SpecListenerTLSInitParameters) DeepCopy() *SpecListenerTLSInitParameters { + if in == nil { + return nil + } + out := new(SpecListenerTLSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecListenerTLSObservation) DeepCopyInto(out *SpecListenerTLSObservation) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(ListenerTLSCertificateObservation) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(ListenerTLSValidationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecListenerTLSObservation. +func (in *SpecListenerTLSObservation) DeepCopy() *SpecListenerTLSObservation { + if in == nil { + return nil + } + out := new(SpecListenerTLSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecListenerTLSParameters) DeepCopyInto(out *SpecListenerTLSParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(ListenerTLSCertificateParameters) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(ListenerTLSValidationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecListenerTLSParameters. +func (in *SpecListenerTLSParameters) DeepCopy() *SpecListenerTLSParameters { + if in == nil { + return nil + } + out := new(SpecListenerTLSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecLoggingInitParameters) DeepCopyInto(out *SpecLoggingInitParameters) { + *out = *in + if in.AccessLog != nil { + in, out := &in.AccessLog, &out.AccessLog + *out = new(LoggingAccessLogInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecLoggingInitParameters. +func (in *SpecLoggingInitParameters) DeepCopy() *SpecLoggingInitParameters { + if in == nil { + return nil + } + out := new(SpecLoggingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecLoggingObservation) DeepCopyInto(out *SpecLoggingObservation) { + *out = *in + if in.AccessLog != nil { + in, out := &in.AccessLog, &out.AccessLog + *out = new(LoggingAccessLogObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecLoggingObservation. +func (in *SpecLoggingObservation) DeepCopy() *SpecLoggingObservation { + if in == nil { + return nil + } + out := new(SpecLoggingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecLoggingParameters) DeepCopyInto(out *SpecLoggingParameters) { + *out = *in + if in.AccessLog != nil { + in, out := &in.AccessLog, &out.AccessLog + *out = new(LoggingAccessLogParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecLoggingParameters. +func (in *SpecLoggingParameters) DeepCopy() *SpecLoggingParameters { + if in == nil { + return nil + } + out := new(SpecLoggingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecObservation) DeepCopyInto(out *SpecObservation) { + *out = *in + if in.GRPCRoute != nil { + in, out := &in.GRPCRoute, &out.GRPCRoute + *out = new(GRPCRouteObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTPRoute != nil { + in, out := &in.HTTPRoute, &out.HTTPRoute + *out = new(HTTPRouteObservation) + (*in).DeepCopyInto(*out) + } + if in.Http2Route != nil { + in, out := &in.Http2Route, &out.Http2Route + *out = new(Http2RouteObservation) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecObservation. +func (in *SpecObservation) DeepCopy() *SpecObservation { + if in == nil { + return nil + } + out := new(SpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecParameters) DeepCopyInto(out *SpecParameters) { + *out = *in + if in.GRPCRoute != nil { + in, out := &in.GRPCRoute, &out.GRPCRoute + *out = new(GRPCRouteParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRoute != nil { + in, out := &in.HTTPRoute, &out.HTTPRoute + *out = new(HTTPRouteParameters) + (*in).DeepCopyInto(*out) + } + if in.Http2Route != nil { + in, out := &in.Http2Route, &out.Http2Route + *out = new(Http2RouteParameters) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecParameters. +func (in *SpecParameters) DeepCopy() *SpecParameters { + if in == nil { + return nil + } + out := new(SpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecServiceDiscoveryInitParameters) DeepCopyInto(out *SpecServiceDiscoveryInitParameters) { + *out = *in + if in.AwsCloudMap != nil { + in, out := &in.AwsCloudMap, &out.AwsCloudMap + *out = new(AwsCloudMapInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(DNSInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecServiceDiscoveryInitParameters. +func (in *SpecServiceDiscoveryInitParameters) DeepCopy() *SpecServiceDiscoveryInitParameters { + if in == nil { + return nil + } + out := new(SpecServiceDiscoveryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecServiceDiscoveryObservation) DeepCopyInto(out *SpecServiceDiscoveryObservation) { + *out = *in + if in.AwsCloudMap != nil { + in, out := &in.AwsCloudMap, &out.AwsCloudMap + *out = new(AwsCloudMapObservation) + (*in).DeepCopyInto(*out) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(DNSObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecServiceDiscoveryObservation. +func (in *SpecServiceDiscoveryObservation) DeepCopy() *SpecServiceDiscoveryObservation { + if in == nil { + return nil + } + out := new(SpecServiceDiscoveryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecServiceDiscoveryParameters) DeepCopyInto(out *SpecServiceDiscoveryParameters) { + *out = *in + if in.AwsCloudMap != nil { + in, out := &in.AwsCloudMap, &out.AwsCloudMap + *out = new(AwsCloudMapParameters) + (*in).DeepCopyInto(*out) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(DNSParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecServiceDiscoveryParameters. +func (in *SpecServiceDiscoveryParameters) DeepCopy() *SpecServiceDiscoveryParameters { + if in == nil { + return nil + } + out := new(SpecServiceDiscoveryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAlternativeNamesInitParameters) DeepCopyInto(out *SubjectAlternativeNamesInitParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(SubjectAlternativeNamesMatchInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAlternativeNamesInitParameters. +func (in *SubjectAlternativeNamesInitParameters) DeepCopy() *SubjectAlternativeNamesInitParameters { + if in == nil { + return nil + } + out := new(SubjectAlternativeNamesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAlternativeNamesMatchInitParameters) DeepCopyInto(out *SubjectAlternativeNamesMatchInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAlternativeNamesMatchInitParameters. +func (in *SubjectAlternativeNamesMatchInitParameters) DeepCopy() *SubjectAlternativeNamesMatchInitParameters { + if in == nil { + return nil + } + out := new(SubjectAlternativeNamesMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAlternativeNamesMatchObservation) DeepCopyInto(out *SubjectAlternativeNamesMatchObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAlternativeNamesMatchObservation. +func (in *SubjectAlternativeNamesMatchObservation) DeepCopy() *SubjectAlternativeNamesMatchObservation { + if in == nil { + return nil + } + out := new(SubjectAlternativeNamesMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAlternativeNamesMatchParameters) DeepCopyInto(out *SubjectAlternativeNamesMatchParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAlternativeNamesMatchParameters. +func (in *SubjectAlternativeNamesMatchParameters) DeepCopy() *SubjectAlternativeNamesMatchParameters { + if in == nil { + return nil + } + out := new(SubjectAlternativeNamesMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAlternativeNamesObservation) DeepCopyInto(out *SubjectAlternativeNamesObservation) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(SubjectAlternativeNamesMatchObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAlternativeNamesObservation. +func (in *SubjectAlternativeNamesObservation) DeepCopy() *SubjectAlternativeNamesObservation { + if in == nil { + return nil + } + out := new(SubjectAlternativeNamesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAlternativeNamesParameters) DeepCopyInto(out *SubjectAlternativeNamesParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(SubjectAlternativeNamesMatchParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAlternativeNamesParameters. +func (in *SubjectAlternativeNamesParameters) DeepCopy() *SubjectAlternativeNamesParameters { + if in == nil { + return nil + } + out := new(SubjectAlternativeNamesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPIdleInitParameters) DeepCopyInto(out *TCPIdleInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPIdleInitParameters. +func (in *TCPIdleInitParameters) DeepCopy() *TCPIdleInitParameters { + if in == nil { + return nil + } + out := new(TCPIdleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPIdleObservation) DeepCopyInto(out *TCPIdleObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPIdleObservation. +func (in *TCPIdleObservation) DeepCopy() *TCPIdleObservation { + if in == nil { + return nil + } + out := new(TCPIdleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPIdleParameters) DeepCopyInto(out *TCPIdleParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPIdleParameters. +func (in *TCPIdleParameters) DeepCopy() *TCPIdleParameters { + if in == nil { + return nil + } + out := new(TCPIdleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPInitParameters) DeepCopyInto(out *TCPInitParameters) { + *out = *in + if in.MaxConnections != nil { + in, out := &in.MaxConnections, &out.MaxConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPInitParameters. +func (in *TCPInitParameters) DeepCopy() *TCPInitParameters { + if in == nil { + return nil + } + out := new(TCPInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPObservation) DeepCopyInto(out *TCPObservation) { + *out = *in + if in.MaxConnections != nil { + in, out := &in.MaxConnections, &out.MaxConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPObservation. +func (in *TCPObservation) DeepCopy() *TCPObservation { + if in == nil { + return nil + } + out := new(TCPObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPParameters) DeepCopyInto(out *TCPParameters) { + *out = *in + if in.MaxConnections != nil { + in, out := &in.MaxConnections, &out.MaxConnections + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPParameters. +func (in *TCPParameters) DeepCopy() *TCPParameters { + if in == nil { + return nil + } + out := new(TCPParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteActionInitParameters) DeepCopyInto(out *TCPRouteActionInitParameters) { + *out = *in + if in.WeightedTarget != nil { + in, out := &in.WeightedTarget, &out.WeightedTarget + *out = make([]TCPRouteActionWeightedTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteActionInitParameters. +func (in *TCPRouteActionInitParameters) DeepCopy() *TCPRouteActionInitParameters { + if in == nil { + return nil + } + out := new(TCPRouteActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteActionObservation) DeepCopyInto(out *TCPRouteActionObservation) { + *out = *in + if in.WeightedTarget != nil { + in, out := &in.WeightedTarget, &out.WeightedTarget + *out = make([]TCPRouteActionWeightedTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteActionObservation. +func (in *TCPRouteActionObservation) DeepCopy() *TCPRouteActionObservation { + if in == nil { + return nil + } + out := new(TCPRouteActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteActionParameters) DeepCopyInto(out *TCPRouteActionParameters) { + *out = *in + if in.WeightedTarget != nil { + in, out := &in.WeightedTarget, &out.WeightedTarget + *out = make([]TCPRouteActionWeightedTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteActionParameters. +func (in *TCPRouteActionParameters) DeepCopy() *TCPRouteActionParameters { + if in == nil { + return nil + } + out := new(TCPRouteActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteActionWeightedTargetInitParameters) DeepCopyInto(out *TCPRouteActionWeightedTargetInitParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualNode != nil { + in, out := &in.VirtualNode, &out.VirtualNode + *out = new(string) + **out = **in + } + if in.VirtualNodeRef != nil { + in, out := &in.VirtualNodeRef, &out.VirtualNodeRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNodeSelector != nil { + in, out := &in.VirtualNodeSelector, &out.VirtualNodeSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteActionWeightedTargetInitParameters. +func (in *TCPRouteActionWeightedTargetInitParameters) DeepCopy() *TCPRouteActionWeightedTargetInitParameters { + if in == nil { + return nil + } + out := new(TCPRouteActionWeightedTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteActionWeightedTargetObservation) DeepCopyInto(out *TCPRouteActionWeightedTargetObservation) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualNode != nil { + in, out := &in.VirtualNode, &out.VirtualNode + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteActionWeightedTargetObservation. +func (in *TCPRouteActionWeightedTargetObservation) DeepCopy() *TCPRouteActionWeightedTargetObservation { + if in == nil { + return nil + } + out := new(TCPRouteActionWeightedTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteActionWeightedTargetParameters) DeepCopyInto(out *TCPRouteActionWeightedTargetParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualNode != nil { + in, out := &in.VirtualNode, &out.VirtualNode + *out = new(string) + **out = **in + } + if in.VirtualNodeRef != nil { + in, out := &in.VirtualNodeRef, &out.VirtualNodeRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNodeSelector != nil { + in, out := &in.VirtualNodeSelector, &out.VirtualNodeSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteActionWeightedTargetParameters. +func (in *TCPRouteActionWeightedTargetParameters) DeepCopy() *TCPRouteActionWeightedTargetParameters { + if in == nil { + return nil + } + out := new(TCPRouteActionWeightedTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteInitParameters) DeepCopyInto(out *TCPRouteInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(TCPRouteActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(TCPRouteMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(TCPRouteTimeoutInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteInitParameters. +func (in *TCPRouteInitParameters) DeepCopy() *TCPRouteInitParameters { + if in == nil { + return nil + } + out := new(TCPRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteMatchInitParameters) DeepCopyInto(out *TCPRouteMatchInitParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteMatchInitParameters. +func (in *TCPRouteMatchInitParameters) DeepCopy() *TCPRouteMatchInitParameters { + if in == nil { + return nil + } + out := new(TCPRouteMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteMatchObservation) DeepCopyInto(out *TCPRouteMatchObservation) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteMatchObservation. +func (in *TCPRouteMatchObservation) DeepCopy() *TCPRouteMatchObservation { + if in == nil { + return nil + } + out := new(TCPRouteMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteMatchParameters) DeepCopyInto(out *TCPRouteMatchParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteMatchParameters. +func (in *TCPRouteMatchParameters) DeepCopy() *TCPRouteMatchParameters { + if in == nil { + return nil + } + out := new(TCPRouteMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteObservation) DeepCopyInto(out *TCPRouteObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(TCPRouteActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(TCPRouteMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(TCPRouteTimeoutObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteObservation. +func (in *TCPRouteObservation) DeepCopy() *TCPRouteObservation { + if in == nil { + return nil + } + out := new(TCPRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteParameters) DeepCopyInto(out *TCPRouteParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(TCPRouteActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(TCPRouteMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(TCPRouteTimeoutParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteParameters. +func (in *TCPRouteParameters) DeepCopy() *TCPRouteParameters { + if in == nil { + return nil + } + out := new(TCPRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteTimeoutIdleInitParameters) DeepCopyInto(out *TCPRouteTimeoutIdleInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteTimeoutIdleInitParameters. +func (in *TCPRouteTimeoutIdleInitParameters) DeepCopy() *TCPRouteTimeoutIdleInitParameters { + if in == nil { + return nil + } + out := new(TCPRouteTimeoutIdleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteTimeoutIdleObservation) DeepCopyInto(out *TCPRouteTimeoutIdleObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteTimeoutIdleObservation. +func (in *TCPRouteTimeoutIdleObservation) DeepCopy() *TCPRouteTimeoutIdleObservation { + if in == nil { + return nil + } + out := new(TCPRouteTimeoutIdleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteTimeoutIdleParameters) DeepCopyInto(out *TCPRouteTimeoutIdleParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteTimeoutIdleParameters. +func (in *TCPRouteTimeoutIdleParameters) DeepCopy() *TCPRouteTimeoutIdleParameters { + if in == nil { + return nil + } + out := new(TCPRouteTimeoutIdleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteTimeoutInitParameters) DeepCopyInto(out *TCPRouteTimeoutInitParameters) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(TCPRouteTimeoutIdleInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteTimeoutInitParameters. +func (in *TCPRouteTimeoutInitParameters) DeepCopy() *TCPRouteTimeoutInitParameters { + if in == nil { + return nil + } + out := new(TCPRouteTimeoutInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteTimeoutObservation) DeepCopyInto(out *TCPRouteTimeoutObservation) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(TCPRouteTimeoutIdleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteTimeoutObservation. +func (in *TCPRouteTimeoutObservation) DeepCopy() *TCPRouteTimeoutObservation { + if in == nil { + return nil + } + out := new(TCPRouteTimeoutObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRouteTimeoutParameters) DeepCopyInto(out *TCPRouteTimeoutParameters) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(TCPRouteTimeoutIdleParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRouteTimeoutParameters. +func (in *TCPRouteTimeoutParameters) DeepCopy() *TCPRouteTimeoutParameters { + if in == nil { + return nil + } + out := new(TCPRouteTimeoutParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCertificateAcmInitParameters) DeepCopyInto(out *TLSCertificateAcmInitParameters) { + *out = *in + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCertificateAcmInitParameters. +func (in *TLSCertificateAcmInitParameters) DeepCopy() *TLSCertificateAcmInitParameters { + if in == nil { + return nil + } + out := new(TLSCertificateAcmInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCertificateAcmObservation) DeepCopyInto(out *TLSCertificateAcmObservation) { + *out = *in + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCertificateAcmObservation. +func (in *TLSCertificateAcmObservation) DeepCopy() *TLSCertificateAcmObservation { + if in == nil { + return nil + } + out := new(TLSCertificateAcmObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCertificateAcmParameters) DeepCopyInto(out *TLSCertificateAcmParameters) { + *out = *in + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCertificateAcmParameters. +func (in *TLSCertificateAcmParameters) DeepCopy() *TLSCertificateAcmParameters { + if in == nil { + return nil + } + out := new(TLSCertificateAcmParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCertificateFileInitParameters) DeepCopyInto(out *TLSCertificateFileInitParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.PrivateKey != nil { + in, out := &in.PrivateKey, &out.PrivateKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCertificateFileInitParameters. +func (in *TLSCertificateFileInitParameters) DeepCopy() *TLSCertificateFileInitParameters { + if in == nil { + return nil + } + out := new(TLSCertificateFileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCertificateFileObservation) DeepCopyInto(out *TLSCertificateFileObservation) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.PrivateKey != nil { + in, out := &in.PrivateKey, &out.PrivateKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCertificateFileObservation. +func (in *TLSCertificateFileObservation) DeepCopy() *TLSCertificateFileObservation { + if in == nil { + return nil + } + out := new(TLSCertificateFileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCertificateFileParameters) DeepCopyInto(out *TLSCertificateFileParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } + if in.PrivateKey != nil { + in, out := &in.PrivateKey, &out.PrivateKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCertificateFileParameters. +func (in *TLSCertificateFileParameters) DeepCopy() *TLSCertificateFileParameters { + if in == nil { + return nil + } + out := new(TLSCertificateFileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCertificateInitParameters) DeepCopyInto(out *TLSCertificateInitParameters) { + *out = *in + if in.Acm != nil { + in, out := &in.Acm, &out.Acm + *out = new(CertificateAcmInitParameters) + (*in).DeepCopyInto(*out) + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(CertificateFileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(CertificateSdsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCertificateInitParameters. +func (in *TLSCertificateInitParameters) DeepCopy() *TLSCertificateInitParameters { + if in == nil { + return nil + } + out := new(TLSCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCertificateObservation) DeepCopyInto(out *TLSCertificateObservation) { + *out = *in + if in.Acm != nil { + in, out := &in.Acm, &out.Acm + *out = new(CertificateAcmObservation) + (*in).DeepCopyInto(*out) + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(CertificateFileObservation) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(CertificateSdsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCertificateObservation. +func (in *TLSCertificateObservation) DeepCopy() *TLSCertificateObservation { + if in == nil { + return nil + } + out := new(TLSCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCertificateParameters) DeepCopyInto(out *TLSCertificateParameters) { + *out = *in + if in.Acm != nil { + in, out := &in.Acm, &out.Acm + *out = new(CertificateAcmParameters) + (*in).DeepCopyInto(*out) + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(CertificateFileParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(CertificateSdsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCertificateParameters. +func (in *TLSCertificateParameters) DeepCopy() *TLSCertificateParameters { + if in == nil { + return nil + } + out := new(TLSCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCertificateSdsInitParameters) DeepCopyInto(out *TLSCertificateSdsInitParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCertificateSdsInitParameters. +func (in *TLSCertificateSdsInitParameters) DeepCopy() *TLSCertificateSdsInitParameters { + if in == nil { + return nil + } + out := new(TLSCertificateSdsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCertificateSdsObservation) DeepCopyInto(out *TLSCertificateSdsObservation) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCertificateSdsObservation. +func (in *TLSCertificateSdsObservation) DeepCopy() *TLSCertificateSdsObservation { + if in == nil { + return nil + } + out := new(TLSCertificateSdsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCertificateSdsParameters) DeepCopyInto(out *TLSCertificateSdsParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCertificateSdsParameters. +func (in *TLSCertificateSdsParameters) DeepCopy() *TLSCertificateSdsParameters { + if in == nil { + return nil + } + out := new(TLSCertificateSdsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSInitParameters) DeepCopyInto(out *TLSInitParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(CertificateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(ValidationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSInitParameters. +func (in *TLSInitParameters) DeepCopy() *TLSInitParameters { + if in == nil { + return nil + } + out := new(TLSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSObservation) DeepCopyInto(out *TLSObservation) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(CertificateObservation) + (*in).DeepCopyInto(*out) + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(ValidationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSObservation. +func (in *TLSObservation) DeepCopy() *TLSObservation { + if in == nil { + return nil + } + out := new(TLSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSParameters) DeepCopyInto(out *TLSParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(CertificateParameters) + (*in).DeepCopyInto(*out) + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(ValidationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSParameters. +func (in *TLSParameters) DeepCopy() *TLSParameters { + if in == nil { + return nil + } + out := new(TLSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationInitParameters) DeepCopyInto(out *TLSValidationInitParameters) { + *out = *in + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(ValidationSubjectAlternativeNamesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Trust != nil { + in, out := &in.Trust, &out.Trust + *out = new(ValidationTrustInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationInitParameters. +func (in *TLSValidationInitParameters) DeepCopy() *TLSValidationInitParameters { + if in == nil { + return nil + } + out := new(TLSValidationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationObservation) DeepCopyInto(out *TLSValidationObservation) { + *out = *in + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(ValidationSubjectAlternativeNamesObservation) + (*in).DeepCopyInto(*out) + } + if in.Trust != nil { + in, out := &in.Trust, &out.Trust + *out = new(ValidationTrustObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationObservation. +func (in *TLSValidationObservation) DeepCopy() *TLSValidationObservation { + if in == nil { + return nil + } + out := new(TLSValidationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationParameters) DeepCopyInto(out *TLSValidationParameters) { + *out = *in + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(ValidationSubjectAlternativeNamesParameters) + (*in).DeepCopyInto(*out) + } + if in.Trust != nil { + in, out := &in.Trust, &out.Trust + *out = new(ValidationTrustParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationParameters. +func (in *TLSValidationParameters) DeepCopy() *TLSValidationParameters { + if in == nil { + return nil + } + out := new(TLSValidationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationSubjectAlternativeNamesInitParameters) DeepCopyInto(out *TLSValidationSubjectAlternativeNamesInitParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(TLSValidationSubjectAlternativeNamesMatchInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationSubjectAlternativeNamesInitParameters. +func (in *TLSValidationSubjectAlternativeNamesInitParameters) DeepCopy() *TLSValidationSubjectAlternativeNamesInitParameters { + if in == nil { + return nil + } + out := new(TLSValidationSubjectAlternativeNamesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationSubjectAlternativeNamesMatchInitParameters) DeepCopyInto(out *TLSValidationSubjectAlternativeNamesMatchInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationSubjectAlternativeNamesMatchInitParameters. +func (in *TLSValidationSubjectAlternativeNamesMatchInitParameters) DeepCopy() *TLSValidationSubjectAlternativeNamesMatchInitParameters { + if in == nil { + return nil + } + out := new(TLSValidationSubjectAlternativeNamesMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationSubjectAlternativeNamesMatchObservation) DeepCopyInto(out *TLSValidationSubjectAlternativeNamesMatchObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationSubjectAlternativeNamesMatchObservation. +func (in *TLSValidationSubjectAlternativeNamesMatchObservation) DeepCopy() *TLSValidationSubjectAlternativeNamesMatchObservation { + if in == nil { + return nil + } + out := new(TLSValidationSubjectAlternativeNamesMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationSubjectAlternativeNamesMatchParameters) DeepCopyInto(out *TLSValidationSubjectAlternativeNamesMatchParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationSubjectAlternativeNamesMatchParameters. +func (in *TLSValidationSubjectAlternativeNamesMatchParameters) DeepCopy() *TLSValidationSubjectAlternativeNamesMatchParameters { + if in == nil { + return nil + } + out := new(TLSValidationSubjectAlternativeNamesMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationSubjectAlternativeNamesObservation) DeepCopyInto(out *TLSValidationSubjectAlternativeNamesObservation) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(TLSValidationSubjectAlternativeNamesMatchObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationSubjectAlternativeNamesObservation. +func (in *TLSValidationSubjectAlternativeNamesObservation) DeepCopy() *TLSValidationSubjectAlternativeNamesObservation { + if in == nil { + return nil + } + out := new(TLSValidationSubjectAlternativeNamesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationSubjectAlternativeNamesParameters) DeepCopyInto(out *TLSValidationSubjectAlternativeNamesParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(TLSValidationSubjectAlternativeNamesMatchParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationSubjectAlternativeNamesParameters. +func (in *TLSValidationSubjectAlternativeNamesParameters) DeepCopy() *TLSValidationSubjectAlternativeNamesParameters { + if in == nil { + return nil + } + out := new(TLSValidationSubjectAlternativeNamesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationTrustFileInitParameters) DeepCopyInto(out *TLSValidationTrustFileInitParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationTrustFileInitParameters. +func (in *TLSValidationTrustFileInitParameters) DeepCopy() *TLSValidationTrustFileInitParameters { + if in == nil { + return nil + } + out := new(TLSValidationTrustFileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationTrustFileObservation) DeepCopyInto(out *TLSValidationTrustFileObservation) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationTrustFileObservation. +func (in *TLSValidationTrustFileObservation) DeepCopy() *TLSValidationTrustFileObservation { + if in == nil { + return nil + } + out := new(TLSValidationTrustFileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationTrustFileParameters) DeepCopyInto(out *TLSValidationTrustFileParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationTrustFileParameters. +func (in *TLSValidationTrustFileParameters) DeepCopy() *TLSValidationTrustFileParameters { + if in == nil { + return nil + } + out := new(TLSValidationTrustFileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationTrustInitParameters) DeepCopyInto(out *TLSValidationTrustInitParameters) { + *out = *in + if in.Acm != nil { + in, out := &in.Acm, &out.Acm + *out = new(TrustAcmInitParameters) + (*in).DeepCopyInto(*out) + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(TLSValidationTrustFileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(TLSValidationTrustSdsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationTrustInitParameters. +func (in *TLSValidationTrustInitParameters) DeepCopy() *TLSValidationTrustInitParameters { + if in == nil { + return nil + } + out := new(TLSValidationTrustInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationTrustObservation) DeepCopyInto(out *TLSValidationTrustObservation) { + *out = *in + if in.Acm != nil { + in, out := &in.Acm, &out.Acm + *out = new(TrustAcmObservation) + (*in).DeepCopyInto(*out) + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(TLSValidationTrustFileObservation) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(TLSValidationTrustSdsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationTrustObservation. +func (in *TLSValidationTrustObservation) DeepCopy() *TLSValidationTrustObservation { + if in == nil { + return nil + } + out := new(TLSValidationTrustObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationTrustParameters) DeepCopyInto(out *TLSValidationTrustParameters) { + *out = *in + if in.Acm != nil { + in, out := &in.Acm, &out.Acm + *out = new(TrustAcmParameters) + (*in).DeepCopyInto(*out) + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(TLSValidationTrustFileParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(TLSValidationTrustSdsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationTrustParameters. +func (in *TLSValidationTrustParameters) DeepCopy() *TLSValidationTrustParameters { + if in == nil { + return nil + } + out := new(TLSValidationTrustParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationTrustSdsInitParameters) DeepCopyInto(out *TLSValidationTrustSdsInitParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationTrustSdsInitParameters. +func (in *TLSValidationTrustSdsInitParameters) DeepCopy() *TLSValidationTrustSdsInitParameters { + if in == nil { + return nil + } + out := new(TLSValidationTrustSdsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationTrustSdsObservation) DeepCopyInto(out *TLSValidationTrustSdsObservation) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationTrustSdsObservation. +func (in *TLSValidationTrustSdsObservation) DeepCopy() *TLSValidationTrustSdsObservation { + if in == nil { + return nil + } + out := new(TLSValidationTrustSdsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSValidationTrustSdsParameters) DeepCopyInto(out *TLSValidationTrustSdsParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSValidationTrustSdsParameters. +func (in *TLSValidationTrustSdsParameters) DeepCopy() *TLSValidationTrustSdsParameters { + if in == nil { + return nil + } + out := new(TLSValidationTrustSdsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetInitParameters) DeepCopyInto(out *TargetInitParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualService != nil { + in, out := &in.VirtualService, &out.VirtualService + *out = new(VirtualServiceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetInitParameters. +func (in *TargetInitParameters) DeepCopy() *TargetInitParameters { + if in == nil { + return nil + } + out := new(TargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetObservation) DeepCopyInto(out *TargetObservation) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualService != nil { + in, out := &in.VirtualService, &out.VirtualService + *out = new(VirtualServiceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetObservation. +func (in *TargetObservation) DeepCopy() *TargetObservation { + if in == nil { + return nil + } + out := new(TargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetParameters) DeepCopyInto(out *TargetParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualService != nil { + in, out := &in.VirtualService, &out.VirtualService + *out = new(VirtualServiceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetParameters. +func (in *TargetParameters) DeepCopy() *TargetParameters { + if in == nil { + return nil + } + out := new(TargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetVirtualServiceInitParameters) DeepCopyInto(out *TargetVirtualServiceInitParameters) { + *out = *in + if in.VirtualServiceName != nil { + in, out := &in.VirtualServiceName, &out.VirtualServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetVirtualServiceInitParameters. +func (in *TargetVirtualServiceInitParameters) DeepCopy() *TargetVirtualServiceInitParameters { + if in == nil { + return nil + } + out := new(TargetVirtualServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetVirtualServiceObservation) DeepCopyInto(out *TargetVirtualServiceObservation) { + *out = *in + if in.VirtualServiceName != nil { + in, out := &in.VirtualServiceName, &out.VirtualServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetVirtualServiceObservation. +func (in *TargetVirtualServiceObservation) DeepCopy() *TargetVirtualServiceObservation { + if in == nil { + return nil + } + out := new(TargetVirtualServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetVirtualServiceParameters) DeepCopyInto(out *TargetVirtualServiceParameters) { + *out = *in + if in.VirtualServiceName != nil { + in, out := &in.VirtualServiceName, &out.VirtualServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetVirtualServiceParameters. +func (in *TargetVirtualServiceParameters) DeepCopy() *TargetVirtualServiceParameters { + if in == nil { + return nil + } + out := new(TargetVirtualServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutGRPCInitParameters) DeepCopyInto(out *TimeoutGRPCInitParameters) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(GRPCIdleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(GRPCPerRequestInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutGRPCInitParameters. +func (in *TimeoutGRPCInitParameters) DeepCopy() *TimeoutGRPCInitParameters { + if in == nil { + return nil + } + out := new(TimeoutGRPCInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutGRPCObservation) DeepCopyInto(out *TimeoutGRPCObservation) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(GRPCIdleObservation) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(GRPCPerRequestObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutGRPCObservation. +func (in *TimeoutGRPCObservation) DeepCopy() *TimeoutGRPCObservation { + if in == nil { + return nil + } + out := new(TimeoutGRPCObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutGRPCParameters) DeepCopyInto(out *TimeoutGRPCParameters) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(GRPCIdleParameters) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(GRPCPerRequestParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutGRPCParameters. +func (in *TimeoutGRPCParameters) DeepCopy() *TimeoutGRPCParameters { + if in == nil { + return nil + } + out := new(TimeoutGRPCParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutHTTPInitParameters) DeepCopyInto(out *TimeoutHTTPInitParameters) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(HTTPIdleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(HTTPPerRequestInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutHTTPInitParameters. +func (in *TimeoutHTTPInitParameters) DeepCopy() *TimeoutHTTPInitParameters { + if in == nil { + return nil + } + out := new(TimeoutHTTPInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutHTTPObservation) DeepCopyInto(out *TimeoutHTTPObservation) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(HTTPIdleObservation) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(HTTPPerRequestObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutHTTPObservation. +func (in *TimeoutHTTPObservation) DeepCopy() *TimeoutHTTPObservation { + if in == nil { + return nil + } + out := new(TimeoutHTTPObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutHTTPParameters) DeepCopyInto(out *TimeoutHTTPParameters) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(HTTPIdleParameters) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(HTTPPerRequestParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutHTTPParameters. +func (in *TimeoutHTTPParameters) DeepCopy() *TimeoutHTTPParameters { + if in == nil { + return nil + } + out := new(TimeoutHTTPParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutHttp2InitParameters) DeepCopyInto(out *TimeoutHttp2InitParameters) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(Http2IdleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(Http2PerRequestInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutHttp2InitParameters. +func (in *TimeoutHttp2InitParameters) DeepCopy() *TimeoutHttp2InitParameters { + if in == nil { + return nil + } + out := new(TimeoutHttp2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutHttp2Observation) DeepCopyInto(out *TimeoutHttp2Observation) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(Http2IdleObservation) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(Http2PerRequestObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutHttp2Observation. +func (in *TimeoutHttp2Observation) DeepCopy() *TimeoutHttp2Observation { + if in == nil { + return nil + } + out := new(TimeoutHttp2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutHttp2Parameters) DeepCopyInto(out *TimeoutHttp2Parameters) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(Http2IdleParameters) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(Http2PerRequestParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutHttp2Parameters. +func (in *TimeoutHttp2Parameters) DeepCopy() *TimeoutHttp2Parameters { + if in == nil { + return nil + } + out := new(TimeoutHttp2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutIdleInitParameters) DeepCopyInto(out *TimeoutIdleInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutIdleInitParameters. +func (in *TimeoutIdleInitParameters) DeepCopy() *TimeoutIdleInitParameters { + if in == nil { + return nil + } + out := new(TimeoutIdleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutIdleObservation) DeepCopyInto(out *TimeoutIdleObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutIdleObservation. +func (in *TimeoutIdleObservation) DeepCopy() *TimeoutIdleObservation { + if in == nil { + return nil + } + out := new(TimeoutIdleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutIdleParameters) DeepCopyInto(out *TimeoutIdleParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutIdleParameters. +func (in *TimeoutIdleParameters) DeepCopy() *TimeoutIdleParameters { + if in == nil { + return nil + } + out := new(TimeoutIdleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutInitParameters) DeepCopyInto(out *TimeoutInitParameters) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(IdleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(PerRequestInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutInitParameters. +func (in *TimeoutInitParameters) DeepCopy() *TimeoutInitParameters { + if in == nil { + return nil + } + out := new(TimeoutInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutObservation) DeepCopyInto(out *TimeoutObservation) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(IdleObservation) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(PerRequestObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutObservation. +func (in *TimeoutObservation) DeepCopy() *TimeoutObservation { + if in == nil { + return nil + } + out := new(TimeoutObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutParameters) DeepCopyInto(out *TimeoutParameters) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(IdleParameters) + (*in).DeepCopyInto(*out) + } + if in.PerRequest != nil { + in, out := &in.PerRequest, &out.PerRequest + *out = new(PerRequestParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutParameters. +func (in *TimeoutParameters) DeepCopy() *TimeoutParameters { + if in == nil { + return nil + } + out := new(TimeoutParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutPerRequestInitParameters) DeepCopyInto(out *TimeoutPerRequestInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutPerRequestInitParameters. +func (in *TimeoutPerRequestInitParameters) DeepCopy() *TimeoutPerRequestInitParameters { + if in == nil { + return nil + } + out := new(TimeoutPerRequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutPerRequestObservation) DeepCopyInto(out *TimeoutPerRequestObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutPerRequestObservation. +func (in *TimeoutPerRequestObservation) DeepCopy() *TimeoutPerRequestObservation { + if in == nil { + return nil + } + out := new(TimeoutPerRequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutPerRequestParameters) DeepCopyInto(out *TimeoutPerRequestParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutPerRequestParameters. +func (in *TimeoutPerRequestParameters) DeepCopy() *TimeoutPerRequestParameters { + if in == nil { + return nil + } + out := new(TimeoutPerRequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutTCPInitParameters) DeepCopyInto(out *TimeoutTCPInitParameters) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(TCPIdleInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutTCPInitParameters. +func (in *TimeoutTCPInitParameters) DeepCopy() *TimeoutTCPInitParameters { + if in == nil { + return nil + } + out := new(TimeoutTCPInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutTCPObservation) DeepCopyInto(out *TimeoutTCPObservation) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(TCPIdleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutTCPObservation. +func (in *TimeoutTCPObservation) DeepCopy() *TimeoutTCPObservation { + if in == nil { + return nil + } + out := new(TimeoutTCPObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutTCPParameters) DeepCopyInto(out *TimeoutTCPParameters) { + *out = *in + if in.Idle != nil { + in, out := &in.Idle, &out.Idle + *out = new(TCPIdleParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutTCPParameters. +func (in *TimeoutTCPParameters) DeepCopy() *TimeoutTCPParameters { + if in == nil { + return nil + } + out := new(TimeoutTCPParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustAcmInitParameters) DeepCopyInto(out *TrustAcmInitParameters) { + *out = *in + if in.CertificateAuthorityArns != nil { + in, out := &in.CertificateAuthorityArns, &out.CertificateAuthorityArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustAcmInitParameters. +func (in *TrustAcmInitParameters) DeepCopy() *TrustAcmInitParameters { + if in == nil { + return nil + } + out := new(TrustAcmInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustAcmObservation) DeepCopyInto(out *TrustAcmObservation) { + *out = *in + if in.CertificateAuthorityArns != nil { + in, out := &in.CertificateAuthorityArns, &out.CertificateAuthorityArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustAcmObservation. +func (in *TrustAcmObservation) DeepCopy() *TrustAcmObservation { + if in == nil { + return nil + } + out := new(TrustAcmObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustAcmParameters) DeepCopyInto(out *TrustAcmParameters) { + *out = *in + if in.CertificateAuthorityArns != nil { + in, out := &in.CertificateAuthorityArns, &out.CertificateAuthorityArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustAcmParameters. +func (in *TrustAcmParameters) DeepCopy() *TrustAcmParameters { + if in == nil { + return nil + } + out := new(TrustAcmParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustFileInitParameters) DeepCopyInto(out *TrustFileInitParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustFileInitParameters. +func (in *TrustFileInitParameters) DeepCopy() *TrustFileInitParameters { + if in == nil { + return nil + } + out := new(TrustFileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustFileObservation) DeepCopyInto(out *TrustFileObservation) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustFileObservation. +func (in *TrustFileObservation) DeepCopy() *TrustFileObservation { + if in == nil { + return nil + } + out := new(TrustFileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustFileParameters) DeepCopyInto(out *TrustFileParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustFileParameters. +func (in *TrustFileParameters) DeepCopy() *TrustFileParameters { + if in == nil { + return nil + } + out := new(TrustFileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustInitParameters) DeepCopyInto(out *TrustInitParameters) { + *out = *in + if in.Acm != nil { + in, out := &in.Acm, &out.Acm + *out = new(AcmInitParameters) + (*in).DeepCopyInto(*out) + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(TrustFileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(TrustSdsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustInitParameters. +func (in *TrustInitParameters) DeepCopy() *TrustInitParameters { + if in == nil { + return nil + } + out := new(TrustInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustObservation) DeepCopyInto(out *TrustObservation) { + *out = *in + if in.Acm != nil { + in, out := &in.Acm, &out.Acm + *out = new(AcmObservation) + (*in).DeepCopyInto(*out) + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(TrustFileObservation) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(TrustSdsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustObservation. +func (in *TrustObservation) DeepCopy() *TrustObservation { + if in == nil { + return nil + } + out := new(TrustObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustParameters) DeepCopyInto(out *TrustParameters) { + *out = *in + if in.Acm != nil { + in, out := &in.Acm, &out.Acm + *out = new(AcmParameters) + (*in).DeepCopyInto(*out) + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(TrustFileParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(TrustSdsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustParameters. +func (in *TrustParameters) DeepCopy() *TrustParameters { + if in == nil { + return nil + } + out := new(TrustParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustSdsInitParameters) DeepCopyInto(out *TrustSdsInitParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustSdsInitParameters. +func (in *TrustSdsInitParameters) DeepCopy() *TrustSdsInitParameters { + if in == nil { + return nil + } + out := new(TrustSdsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustSdsObservation) DeepCopyInto(out *TrustSdsObservation) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustSdsObservation. +func (in *TrustSdsObservation) DeepCopy() *TrustSdsObservation { + if in == nil { + return nil + } + out := new(TrustSdsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustSdsParameters) DeepCopyInto(out *TrustSdsParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustSdsParameters. +func (in *TrustSdsParameters) DeepCopy() *TrustSdsParameters { + if in == nil { + return nil + } + out := new(TrustSdsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationInitParameters) DeepCopyInto(out *ValidationInitParameters) { + *out = *in + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(SubjectAlternativeNamesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Trust != nil { + in, out := &in.Trust, &out.Trust + *out = new(TrustInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationInitParameters. +func (in *ValidationInitParameters) DeepCopy() *ValidationInitParameters { + if in == nil { + return nil + } + out := new(ValidationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationObservation) DeepCopyInto(out *ValidationObservation) { + *out = *in + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(SubjectAlternativeNamesObservation) + (*in).DeepCopyInto(*out) + } + if in.Trust != nil { + in, out := &in.Trust, &out.Trust + *out = new(TrustObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationObservation. +func (in *ValidationObservation) DeepCopy() *ValidationObservation { + if in == nil { + return nil + } + out := new(ValidationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationParameters) DeepCopyInto(out *ValidationParameters) { + *out = *in + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(SubjectAlternativeNamesParameters) + (*in).DeepCopyInto(*out) + } + if in.Trust != nil { + in, out := &in.Trust, &out.Trust + *out = new(TrustParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationParameters. +func (in *ValidationParameters) DeepCopy() *ValidationParameters { + if in == nil { + return nil + } + out := new(ValidationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationSubjectAlternativeNamesInitParameters) DeepCopyInto(out *ValidationSubjectAlternativeNamesInitParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(ValidationSubjectAlternativeNamesMatchInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationSubjectAlternativeNamesInitParameters. +func (in *ValidationSubjectAlternativeNamesInitParameters) DeepCopy() *ValidationSubjectAlternativeNamesInitParameters { + if in == nil { + return nil + } + out := new(ValidationSubjectAlternativeNamesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationSubjectAlternativeNamesMatchInitParameters) DeepCopyInto(out *ValidationSubjectAlternativeNamesMatchInitParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationSubjectAlternativeNamesMatchInitParameters. +func (in *ValidationSubjectAlternativeNamesMatchInitParameters) DeepCopy() *ValidationSubjectAlternativeNamesMatchInitParameters { + if in == nil { + return nil + } + out := new(ValidationSubjectAlternativeNamesMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationSubjectAlternativeNamesMatchObservation) DeepCopyInto(out *ValidationSubjectAlternativeNamesMatchObservation) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationSubjectAlternativeNamesMatchObservation. +func (in *ValidationSubjectAlternativeNamesMatchObservation) DeepCopy() *ValidationSubjectAlternativeNamesMatchObservation { + if in == nil { + return nil + } + out := new(ValidationSubjectAlternativeNamesMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationSubjectAlternativeNamesMatchParameters) DeepCopyInto(out *ValidationSubjectAlternativeNamesMatchParameters) { + *out = *in + if in.Exact != nil { + in, out := &in.Exact, &out.Exact + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationSubjectAlternativeNamesMatchParameters. +func (in *ValidationSubjectAlternativeNamesMatchParameters) DeepCopy() *ValidationSubjectAlternativeNamesMatchParameters { + if in == nil { + return nil + } + out := new(ValidationSubjectAlternativeNamesMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationSubjectAlternativeNamesObservation) DeepCopyInto(out *ValidationSubjectAlternativeNamesObservation) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(ValidationSubjectAlternativeNamesMatchObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationSubjectAlternativeNamesObservation. +func (in *ValidationSubjectAlternativeNamesObservation) DeepCopy() *ValidationSubjectAlternativeNamesObservation { + if in == nil { + return nil + } + out := new(ValidationSubjectAlternativeNamesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationSubjectAlternativeNamesParameters) DeepCopyInto(out *ValidationSubjectAlternativeNamesParameters) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(ValidationSubjectAlternativeNamesMatchParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationSubjectAlternativeNamesParameters. +func (in *ValidationSubjectAlternativeNamesParameters) DeepCopy() *ValidationSubjectAlternativeNamesParameters { + if in == nil { + return nil + } + out := new(ValidationSubjectAlternativeNamesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationTrustAcmInitParameters) DeepCopyInto(out *ValidationTrustAcmInitParameters) { + *out = *in + if in.CertificateAuthorityArns != nil { + in, out := &in.CertificateAuthorityArns, &out.CertificateAuthorityArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationTrustAcmInitParameters. +func (in *ValidationTrustAcmInitParameters) DeepCopy() *ValidationTrustAcmInitParameters { + if in == nil { + return nil + } + out := new(ValidationTrustAcmInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationTrustAcmObservation) DeepCopyInto(out *ValidationTrustAcmObservation) { + *out = *in + if in.CertificateAuthorityArns != nil { + in, out := &in.CertificateAuthorityArns, &out.CertificateAuthorityArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationTrustAcmObservation. +func (in *ValidationTrustAcmObservation) DeepCopy() *ValidationTrustAcmObservation { + if in == nil { + return nil + } + out := new(ValidationTrustAcmObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationTrustAcmParameters) DeepCopyInto(out *ValidationTrustAcmParameters) { + *out = *in + if in.CertificateAuthorityArns != nil { + in, out := &in.CertificateAuthorityArns, &out.CertificateAuthorityArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationTrustAcmParameters. +func (in *ValidationTrustAcmParameters) DeepCopy() *ValidationTrustAcmParameters { + if in == nil { + return nil + } + out := new(ValidationTrustAcmParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationTrustFileInitParameters) DeepCopyInto(out *ValidationTrustFileInitParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationTrustFileInitParameters. +func (in *ValidationTrustFileInitParameters) DeepCopy() *ValidationTrustFileInitParameters { + if in == nil { + return nil + } + out := new(ValidationTrustFileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationTrustFileObservation) DeepCopyInto(out *ValidationTrustFileObservation) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationTrustFileObservation. +func (in *ValidationTrustFileObservation) DeepCopy() *ValidationTrustFileObservation { + if in == nil { + return nil + } + out := new(ValidationTrustFileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationTrustFileParameters) DeepCopyInto(out *ValidationTrustFileParameters) { + *out = *in + if in.CertificateChain != nil { + in, out := &in.CertificateChain, &out.CertificateChain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationTrustFileParameters. +func (in *ValidationTrustFileParameters) DeepCopy() *ValidationTrustFileParameters { + if in == nil { + return nil + } + out := new(ValidationTrustFileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationTrustInitParameters) DeepCopyInto(out *ValidationTrustInitParameters) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(ValidationTrustFileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(ValidationTrustSdsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationTrustInitParameters. +func (in *ValidationTrustInitParameters) DeepCopy() *ValidationTrustInitParameters { + if in == nil { + return nil + } + out := new(ValidationTrustInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationTrustObservation) DeepCopyInto(out *ValidationTrustObservation) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(ValidationTrustFileObservation) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(ValidationTrustSdsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationTrustObservation. +func (in *ValidationTrustObservation) DeepCopy() *ValidationTrustObservation { + if in == nil { + return nil + } + out := new(ValidationTrustObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationTrustParameters) DeepCopyInto(out *ValidationTrustParameters) { + *out = *in + if in.File != nil { + in, out := &in.File, &out.File + *out = new(ValidationTrustFileParameters) + (*in).DeepCopyInto(*out) + } + if in.Sds != nil { + in, out := &in.Sds, &out.Sds + *out = new(ValidationTrustSdsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationTrustParameters. +func (in *ValidationTrustParameters) DeepCopy() *ValidationTrustParameters { + if in == nil { + return nil + } + out := new(ValidationTrustParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationTrustSdsInitParameters) DeepCopyInto(out *ValidationTrustSdsInitParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationTrustSdsInitParameters. +func (in *ValidationTrustSdsInitParameters) DeepCopy() *ValidationTrustSdsInitParameters { + if in == nil { + return nil + } + out := new(ValidationTrustSdsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationTrustSdsObservation) DeepCopyInto(out *ValidationTrustSdsObservation) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationTrustSdsObservation. +func (in *ValidationTrustSdsObservation) DeepCopy() *ValidationTrustSdsObservation { + if in == nil { + return nil + } + out := new(ValidationTrustSdsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationTrustSdsParameters) DeepCopyInto(out *ValidationTrustSdsParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationTrustSdsParameters. +func (in *ValidationTrustSdsParameters) DeepCopy() *ValidationTrustSdsParameters { + if in == nil { + return nil + } + out := new(ValidationTrustSdsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualGateway) DeepCopyInto(out *VirtualGateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualGateway. +func (in *VirtualGateway) DeepCopy() *VirtualGateway { + if in == nil { + return nil + } + out := new(VirtualGateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualGateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualGatewayInitParameters) DeepCopyInto(out *VirtualGatewayInitParameters) { + *out = *in + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(VirtualGatewaySpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualGatewayInitParameters. +func (in *VirtualGatewayInitParameters) DeepCopy() *VirtualGatewayInitParameters { + if in == nil { + return nil + } + out := new(VirtualGatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualGatewayList) DeepCopyInto(out *VirtualGatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualGateway, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualGatewayList. +func (in *VirtualGatewayList) DeepCopy() *VirtualGatewayList { + if in == nil { + return nil + } + out := new(VirtualGatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualGatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualGatewayObservation) DeepCopyInto(out *VirtualGatewayObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CreatedDate != nil { + in, out := &in.CreatedDate, &out.CreatedDate + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LastUpdatedDate != nil { + in, out := &in.LastUpdatedDate, &out.LastUpdatedDate + *out = new(string) + **out = **in + } + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceOwner != nil { + in, out := &in.ResourceOwner, &out.ResourceOwner + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(VirtualGatewaySpecObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualGatewayObservation. +func (in *VirtualGatewayObservation) DeepCopy() *VirtualGatewayObservation { + if in == nil { + return nil + } + out := new(VirtualGatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualGatewayParameters) DeepCopyInto(out *VirtualGatewayParameters) { + *out = *in + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(VirtualGatewaySpecParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualGatewayParameters. +func (in *VirtualGatewayParameters) DeepCopy() *VirtualGatewayParameters { + if in == nil { + return nil + } + out := new(VirtualGatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualGatewaySpec) DeepCopyInto(out *VirtualGatewaySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualGatewaySpec. +func (in *VirtualGatewaySpec) DeepCopy() *VirtualGatewaySpec { + if in == nil { + return nil + } + out := new(VirtualGatewaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualGatewaySpecInitParameters) DeepCopyInto(out *VirtualGatewaySpecInitParameters) { + *out = *in + if in.BackendDefaults != nil { + in, out := &in.BackendDefaults, &out.BackendDefaults + *out = new(BackendDefaultsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]ListenerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(LoggingInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualGatewaySpecInitParameters. +func (in *VirtualGatewaySpecInitParameters) DeepCopy() *VirtualGatewaySpecInitParameters { + if in == nil { + return nil + } + out := new(VirtualGatewaySpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualGatewaySpecObservation) DeepCopyInto(out *VirtualGatewaySpecObservation) { + *out = *in + if in.BackendDefaults != nil { + in, out := &in.BackendDefaults, &out.BackendDefaults + *out = new(BackendDefaultsObservation) + (*in).DeepCopyInto(*out) + } + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]ListenerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(LoggingObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualGatewaySpecObservation. +func (in *VirtualGatewaySpecObservation) DeepCopy() *VirtualGatewaySpecObservation { + if in == nil { + return nil + } + out := new(VirtualGatewaySpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualGatewaySpecParameters) DeepCopyInto(out *VirtualGatewaySpecParameters) { + *out = *in + if in.BackendDefaults != nil { + in, out := &in.BackendDefaults, &out.BackendDefaults + *out = new(BackendDefaultsParameters) + (*in).DeepCopyInto(*out) + } + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]ListenerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(LoggingParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualGatewaySpecParameters. +func (in *VirtualGatewaySpecParameters) DeepCopy() *VirtualGatewaySpecParameters { + if in == nil { + return nil + } + out := new(VirtualGatewaySpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualGatewayStatus) DeepCopyInto(out *VirtualGatewayStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualGatewayStatus. +func (in *VirtualGatewayStatus) DeepCopy() *VirtualGatewayStatus { + if in == nil { + return nil + } + out := new(VirtualGatewayStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNode) DeepCopyInto(out *VirtualNode) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNode. +func (in *VirtualNode) DeepCopy() *VirtualNode { + if in == nil { + return nil + } + out := new(VirtualNode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualNode) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNodeInitParameters) DeepCopyInto(out *VirtualNodeInitParameters) { + *out = *in + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshNameRef != nil { + in, out := &in.MeshNameRef, &out.MeshNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MeshNameSelector != nil { + in, out := &in.MeshNameSelector, &out.MeshNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(VirtualNodeSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeInitParameters. +func (in *VirtualNodeInitParameters) DeepCopy() *VirtualNodeInitParameters { + if in == nil { + return nil + } + out := new(VirtualNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNodeList) DeepCopyInto(out *VirtualNodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualNode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeList. +func (in *VirtualNodeList) DeepCopy() *VirtualNodeList { + if in == nil { + return nil + } + out := new(VirtualNodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualNodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNodeObservation) DeepCopyInto(out *VirtualNodeObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CreatedDate != nil { + in, out := &in.CreatedDate, &out.CreatedDate + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LastUpdatedDate != nil { + in, out := &in.LastUpdatedDate, &out.LastUpdatedDate + *out = new(string) + **out = **in + } + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceOwner != nil { + in, out := &in.ResourceOwner, &out.ResourceOwner + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(VirtualNodeSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeObservation. +func (in *VirtualNodeObservation) DeepCopy() *VirtualNodeObservation { + if in == nil { + return nil + } + out := new(VirtualNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNodeParameters) DeepCopyInto(out *VirtualNodeParameters) { + *out = *in + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshNameRef != nil { + in, out := &in.MeshNameRef, &out.MeshNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MeshNameSelector != nil { + in, out := &in.MeshNameSelector, &out.MeshNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(VirtualNodeSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeParameters. +func (in *VirtualNodeParameters) DeepCopy() *VirtualNodeParameters { + if in == nil { + return nil + } + out := new(VirtualNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNodeSpec) DeepCopyInto(out *VirtualNodeSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeSpec. +func (in *VirtualNodeSpec) DeepCopy() *VirtualNodeSpec { + if in == nil { + return nil + } + out := new(VirtualNodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNodeSpecInitParameters) DeepCopyInto(out *VirtualNodeSpecInitParameters) { + *out = *in + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = make([]BackendInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendDefaults != nil { + in, out := &in.BackendDefaults, &out.BackendDefaults + *out = new(SpecBackendDefaultsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]SpecListenerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(SpecLoggingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceDiscovery != nil { + in, out := &in.ServiceDiscovery, &out.ServiceDiscovery + *out = new(SpecServiceDiscoveryInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeSpecInitParameters. +func (in *VirtualNodeSpecInitParameters) DeepCopy() *VirtualNodeSpecInitParameters { + if in == nil { + return nil + } + out := new(VirtualNodeSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNodeSpecObservation) DeepCopyInto(out *VirtualNodeSpecObservation) { + *out = *in + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = make([]BackendObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendDefaults != nil { + in, out := &in.BackendDefaults, &out.BackendDefaults + *out = new(SpecBackendDefaultsObservation) + (*in).DeepCopyInto(*out) + } + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]SpecListenerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(SpecLoggingObservation) + (*in).DeepCopyInto(*out) + } + if in.ServiceDiscovery != nil { + in, out := &in.ServiceDiscovery, &out.ServiceDiscovery + *out = new(SpecServiceDiscoveryObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeSpecObservation. +func (in *VirtualNodeSpecObservation) DeepCopy() *VirtualNodeSpecObservation { + if in == nil { + return nil + } + out := new(VirtualNodeSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNodeSpecParameters) DeepCopyInto(out *VirtualNodeSpecParameters) { + *out = *in + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = make([]BackendParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendDefaults != nil { + in, out := &in.BackendDefaults, &out.BackendDefaults + *out = new(SpecBackendDefaultsParameters) + (*in).DeepCopyInto(*out) + } + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]SpecListenerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(SpecLoggingParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceDiscovery != nil { + in, out := &in.ServiceDiscovery, &out.ServiceDiscovery + *out = new(SpecServiceDiscoveryParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeSpecParameters. +func (in *VirtualNodeSpecParameters) DeepCopy() *VirtualNodeSpecParameters { + if in == nil { + return nil + } + out := new(VirtualNodeSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNodeStatus) DeepCopyInto(out *VirtualNodeStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeStatus. +func (in *VirtualNodeStatus) DeepCopy() *VirtualNodeStatus { + if in == nil { + return nil + } + out := new(VirtualNodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualRouter) DeepCopyInto(out *VirtualRouter) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualRouter. +func (in *VirtualRouter) DeepCopy() *VirtualRouter { + if in == nil { + return nil + } + out := new(VirtualRouter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualRouter) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualRouterInitParameters) DeepCopyInto(out *VirtualRouterInitParameters) { + *out = *in + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshNameRef != nil { + in, out := &in.MeshNameRef, &out.MeshNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MeshNameSelector != nil { + in, out := &in.MeshNameSelector, &out.MeshNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(VirtualRouterSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualRouterInitParameters. +func (in *VirtualRouterInitParameters) DeepCopy() *VirtualRouterInitParameters { + if in == nil { + return nil + } + out := new(VirtualRouterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualRouterList) DeepCopyInto(out *VirtualRouterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualRouter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualRouterList. +func (in *VirtualRouterList) DeepCopy() *VirtualRouterList { + if in == nil { + return nil + } + out := new(VirtualRouterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualRouterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualRouterObservation) DeepCopyInto(out *VirtualRouterObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CreatedDate != nil { + in, out := &in.CreatedDate, &out.CreatedDate + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LastUpdatedDate != nil { + in, out := &in.LastUpdatedDate, &out.LastUpdatedDate + *out = new(string) + **out = **in + } + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceOwner != nil { + in, out := &in.ResourceOwner, &out.ResourceOwner + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(VirtualRouterSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualRouterObservation. +func (in *VirtualRouterObservation) DeepCopy() *VirtualRouterObservation { + if in == nil { + return nil + } + out := new(VirtualRouterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualRouterParameters) DeepCopyInto(out *VirtualRouterParameters) { + *out = *in + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshNameRef != nil { + in, out := &in.MeshNameRef, &out.MeshNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MeshNameSelector != nil { + in, out := &in.MeshNameSelector, &out.MeshNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(VirtualRouterSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualRouterParameters. +func (in *VirtualRouterParameters) DeepCopy() *VirtualRouterParameters { + if in == nil { + return nil + } + out := new(VirtualRouterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualRouterSpec) DeepCopyInto(out *VirtualRouterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualRouterSpec. +func (in *VirtualRouterSpec) DeepCopy() *VirtualRouterSpec { + if in == nil { + return nil + } + out := new(VirtualRouterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualRouterSpecInitParameters) DeepCopyInto(out *VirtualRouterSpecInitParameters) { + *out = *in + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]VirtualRouterSpecListenerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualRouterSpecInitParameters. +func (in *VirtualRouterSpecInitParameters) DeepCopy() *VirtualRouterSpecInitParameters { + if in == nil { + return nil + } + out := new(VirtualRouterSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualRouterSpecListenerInitParameters) DeepCopyInto(out *VirtualRouterSpecListenerInitParameters) { + *out = *in + if in.PortMapping != nil { + in, out := &in.PortMapping, &out.PortMapping + *out = new(SpecListenerPortMappingInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualRouterSpecListenerInitParameters. +func (in *VirtualRouterSpecListenerInitParameters) DeepCopy() *VirtualRouterSpecListenerInitParameters { + if in == nil { + return nil + } + out := new(VirtualRouterSpecListenerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualRouterSpecListenerObservation) DeepCopyInto(out *VirtualRouterSpecListenerObservation) { + *out = *in + if in.PortMapping != nil { + in, out := &in.PortMapping, &out.PortMapping + *out = new(SpecListenerPortMappingObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualRouterSpecListenerObservation. +func (in *VirtualRouterSpecListenerObservation) DeepCopy() *VirtualRouterSpecListenerObservation { + if in == nil { + return nil + } + out := new(VirtualRouterSpecListenerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualRouterSpecListenerParameters) DeepCopyInto(out *VirtualRouterSpecListenerParameters) { + *out = *in + if in.PortMapping != nil { + in, out := &in.PortMapping, &out.PortMapping + *out = new(SpecListenerPortMappingParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualRouterSpecListenerParameters. +func (in *VirtualRouterSpecListenerParameters) DeepCopy() *VirtualRouterSpecListenerParameters { + if in == nil { + return nil + } + out := new(VirtualRouterSpecListenerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualRouterSpecObservation) DeepCopyInto(out *VirtualRouterSpecObservation) { + *out = *in + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]VirtualRouterSpecListenerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualRouterSpecObservation. +func (in *VirtualRouterSpecObservation) DeepCopy() *VirtualRouterSpecObservation { + if in == nil { + return nil + } + out := new(VirtualRouterSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualRouterSpecParameters) DeepCopyInto(out *VirtualRouterSpecParameters) { + *out = *in + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]VirtualRouterSpecListenerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualRouterSpecParameters. +func (in *VirtualRouterSpecParameters) DeepCopy() *VirtualRouterSpecParameters { + if in == nil { + return nil + } + out := new(VirtualRouterSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualRouterStatus) DeepCopyInto(out *VirtualRouterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualRouterStatus. +func (in *VirtualRouterStatus) DeepCopy() *VirtualRouterStatus { + if in == nil { + return nil + } + out := new(VirtualRouterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualService) DeepCopyInto(out *VirtualService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualService. +func (in *VirtualService) DeepCopy() *VirtualService { + if in == nil { + return nil + } + out := new(VirtualService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceClientPolicyInitParameters) DeepCopyInto(out *VirtualServiceClientPolicyInitParameters) { + *out = *in + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(ClientPolicyTLSInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceClientPolicyInitParameters. +func (in *VirtualServiceClientPolicyInitParameters) DeepCopy() *VirtualServiceClientPolicyInitParameters { + if in == nil { + return nil + } + out := new(VirtualServiceClientPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceClientPolicyObservation) DeepCopyInto(out *VirtualServiceClientPolicyObservation) { + *out = *in + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(ClientPolicyTLSObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceClientPolicyObservation. +func (in *VirtualServiceClientPolicyObservation) DeepCopy() *VirtualServiceClientPolicyObservation { + if in == nil { + return nil + } + out := new(VirtualServiceClientPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceClientPolicyParameters) DeepCopyInto(out *VirtualServiceClientPolicyParameters) { + *out = *in + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(ClientPolicyTLSParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceClientPolicyParameters. +func (in *VirtualServiceClientPolicyParameters) DeepCopy() *VirtualServiceClientPolicyParameters { + if in == nil { + return nil + } + out := new(VirtualServiceClientPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceInitParameters) DeepCopyInto(out *VirtualServiceInitParameters) { + *out = *in + if in.VirtualServiceName != nil { + in, out := &in.VirtualServiceName, &out.VirtualServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceInitParameters. +func (in *VirtualServiceInitParameters) DeepCopy() *VirtualServiceInitParameters { + if in == nil { + return nil + } + out := new(VirtualServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceInitParameters_2) DeepCopyInto(out *VirtualServiceInitParameters_2) { + *out = *in + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshNameRef != nil { + in, out := &in.MeshNameRef, &out.MeshNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MeshNameSelector != nil { + in, out := &in.MeshNameSelector, &out.MeshNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(VirtualServiceSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceInitParameters_2. +func (in *VirtualServiceInitParameters_2) DeepCopy() *VirtualServiceInitParameters_2 { + if in == nil { + return nil + } + out := new(VirtualServiceInitParameters_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceList) DeepCopyInto(out *VirtualServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceList. +func (in *VirtualServiceList) DeepCopy() *VirtualServiceList { + if in == nil { + return nil + } + out := new(VirtualServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceObservation) DeepCopyInto(out *VirtualServiceObservation) { + *out = *in + if in.VirtualServiceName != nil { + in, out := &in.VirtualServiceName, &out.VirtualServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceObservation. +func (in *VirtualServiceObservation) DeepCopy() *VirtualServiceObservation { + if in == nil { + return nil + } + out := new(VirtualServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceObservation_2) DeepCopyInto(out *VirtualServiceObservation_2) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CreatedDate != nil { + in, out := &in.CreatedDate, &out.CreatedDate + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LastUpdatedDate != nil { + in, out := &in.LastUpdatedDate, &out.LastUpdatedDate + *out = new(string) + **out = **in + } + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceOwner != nil { + in, out := &in.ResourceOwner, &out.ResourceOwner + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(VirtualServiceSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceObservation_2. +func (in *VirtualServiceObservation_2) DeepCopy() *VirtualServiceObservation_2 { + if in == nil { + return nil + } + out := new(VirtualServiceObservation_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceParameters) DeepCopyInto(out *VirtualServiceParameters) { + *out = *in + if in.VirtualServiceName != nil { + in, out := &in.VirtualServiceName, &out.VirtualServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceParameters. +func (in *VirtualServiceParameters) DeepCopy() *VirtualServiceParameters { + if in == nil { + return nil + } + out := new(VirtualServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceParameters_2) DeepCopyInto(out *VirtualServiceParameters_2) { + *out = *in + if in.MeshName != nil { + in, out := &in.MeshName, &out.MeshName + *out = new(string) + **out = **in + } + if in.MeshNameRef != nil { + in, out := &in.MeshNameRef, &out.MeshNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MeshNameSelector != nil { + in, out := &in.MeshNameSelector, &out.MeshNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MeshOwner != nil { + in, out := &in.MeshOwner, &out.MeshOwner + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(VirtualServiceSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceParameters_2. +func (in *VirtualServiceParameters_2) DeepCopy() *VirtualServiceParameters_2 { + if in == nil { + return nil + } + out := new(VirtualServiceParameters_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceSpec) DeepCopyInto(out *VirtualServiceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceSpec. +func (in *VirtualServiceSpec) DeepCopy() *VirtualServiceSpec { + if in == nil { + return nil + } + out := new(VirtualServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceSpecInitParameters) DeepCopyInto(out *VirtualServiceSpecInitParameters) { + *out = *in + if in.Provider != nil { + in, out := &in.Provider, &out.Provider + *out = new(ProviderInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceSpecInitParameters. +func (in *VirtualServiceSpecInitParameters) DeepCopy() *VirtualServiceSpecInitParameters { + if in == nil { + return nil + } + out := new(VirtualServiceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceSpecObservation) DeepCopyInto(out *VirtualServiceSpecObservation) { + *out = *in + if in.Provider != nil { + in, out := &in.Provider, &out.Provider + *out = new(ProviderObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceSpecObservation. +func (in *VirtualServiceSpecObservation) DeepCopy() *VirtualServiceSpecObservation { + if in == nil { + return nil + } + out := new(VirtualServiceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceSpecParameters) DeepCopyInto(out *VirtualServiceSpecParameters) { + *out = *in + if in.Provider != nil { + in, out := &in.Provider, &out.Provider + *out = new(ProviderParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceSpecParameters. +func (in *VirtualServiceSpecParameters) DeepCopy() *VirtualServiceSpecParameters { + if in == nil { + return nil + } + out := new(VirtualServiceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceStatus) DeepCopyInto(out *VirtualServiceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceStatus. +func (in *VirtualServiceStatus) DeepCopy() *VirtualServiceStatus { + if in == nil { + return nil + } + out := new(VirtualServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeightedTargetInitParameters) DeepCopyInto(out *WeightedTargetInitParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualNode != nil { + in, out := &in.VirtualNode, &out.VirtualNode + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedTargetInitParameters. +func (in *WeightedTargetInitParameters) DeepCopy() *WeightedTargetInitParameters { + if in == nil { + return nil + } + out := new(WeightedTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeightedTargetObservation) DeepCopyInto(out *WeightedTargetObservation) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualNode != nil { + in, out := &in.VirtualNode, &out.VirtualNode + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedTargetObservation. +func (in *WeightedTargetObservation) DeepCopy() *WeightedTargetObservation { + if in == nil { + return nil + } + out := new(WeightedTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeightedTargetParameters) DeepCopyInto(out *WeightedTargetParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.VirtualNode != nil { + in, out := &in.VirtualNode, &out.VirtualNode + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedTargetParameters. +func (in *WeightedTargetParameters) DeepCopy() *WeightedTargetParameters { + if in == nil { + return nil + } + out := new(WeightedTargetParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/appmesh/v1beta2/zz_generated.managed.go b/apis/appmesh/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..b36e37b42d --- /dev/null +++ b/apis/appmesh/v1beta2/zz_generated.managed.go @@ -0,0 +1,428 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this GatewayRoute. +func (mg *GatewayRoute) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this GatewayRoute. +func (mg *GatewayRoute) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this GatewayRoute. +func (mg *GatewayRoute) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this GatewayRoute. +func (mg *GatewayRoute) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this GatewayRoute. +func (mg *GatewayRoute) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this GatewayRoute. +func (mg *GatewayRoute) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this GatewayRoute. +func (mg *GatewayRoute) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this GatewayRoute. +func (mg *GatewayRoute) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this GatewayRoute. +func (mg *GatewayRoute) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this GatewayRoute. +func (mg *GatewayRoute) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this GatewayRoute. +func (mg *GatewayRoute) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this GatewayRoute. +func (mg *GatewayRoute) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Mesh. +func (mg *Mesh) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Mesh. +func (mg *Mesh) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Mesh. +func (mg *Mesh) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Mesh. +func (mg *Mesh) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Mesh. +func (mg *Mesh) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Mesh. +func (mg *Mesh) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Mesh. +func (mg *Mesh) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Mesh. +func (mg *Mesh) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Mesh. +func (mg *Mesh) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Mesh. +func (mg *Mesh) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Mesh. +func (mg *Mesh) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Mesh. +func (mg *Mesh) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Route. +func (mg *Route) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Route. +func (mg *Route) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Route. +func (mg *Route) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Route. +func (mg *Route) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Route. +func (mg *Route) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Route. +func (mg *Route) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Route. +func (mg *Route) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Route. +func (mg *Route) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Route. +func (mg *Route) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Route. +func (mg *Route) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Route. +func (mg *Route) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Route. +func (mg *Route) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VirtualGateway. +func (mg *VirtualGateway) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VirtualGateway. +func (mg *VirtualGateway) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VirtualGateway. +func (mg *VirtualGateway) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VirtualGateway. +func (mg *VirtualGateway) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VirtualGateway. +func (mg *VirtualGateway) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VirtualGateway. +func (mg *VirtualGateway) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VirtualGateway. +func (mg *VirtualGateway) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VirtualGateway. +func (mg *VirtualGateway) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VirtualGateway. +func (mg *VirtualGateway) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VirtualGateway. +func (mg *VirtualGateway) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VirtualGateway. +func (mg *VirtualGateway) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VirtualGateway. +func (mg *VirtualGateway) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VirtualNode. +func (mg *VirtualNode) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VirtualNode. +func (mg *VirtualNode) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VirtualNode. +func (mg *VirtualNode) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VirtualNode. +func (mg *VirtualNode) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VirtualNode. +func (mg *VirtualNode) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VirtualNode. +func (mg *VirtualNode) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VirtualNode. +func (mg *VirtualNode) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VirtualNode. +func (mg *VirtualNode) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VirtualNode. +func (mg *VirtualNode) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VirtualNode. +func (mg *VirtualNode) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VirtualNode. +func (mg *VirtualNode) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VirtualNode. +func (mg *VirtualNode) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VirtualRouter. +func (mg *VirtualRouter) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VirtualRouter. +func (mg *VirtualRouter) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VirtualRouter. +func (mg *VirtualRouter) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VirtualRouter. +func (mg *VirtualRouter) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VirtualRouter. +func (mg *VirtualRouter) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VirtualRouter. +func (mg *VirtualRouter) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VirtualRouter. +func (mg *VirtualRouter) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VirtualRouter. +func (mg *VirtualRouter) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VirtualRouter. +func (mg *VirtualRouter) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VirtualRouter. +func (mg *VirtualRouter) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VirtualRouter. +func (mg *VirtualRouter) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VirtualRouter. +func (mg *VirtualRouter) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VirtualService. +func (mg *VirtualService) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VirtualService. +func (mg *VirtualService) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VirtualService. +func (mg *VirtualService) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VirtualService. +func (mg *VirtualService) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VirtualService. +func (mg *VirtualService) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VirtualService. +func (mg *VirtualService) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VirtualService. +func (mg *VirtualService) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VirtualService. +func (mg *VirtualService) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VirtualService. +func (mg *VirtualService) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VirtualService. +func (mg *VirtualService) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VirtualService. +func (mg *VirtualService) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VirtualService. +func (mg *VirtualService) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/appmesh/v1beta2/zz_generated.managedlist.go b/apis/appmesh/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..30b717329b --- /dev/null +++ b/apis/appmesh/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,71 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this GatewayRouteList. +func (l *GatewayRouteList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MeshList. +func (l *MeshList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this RouteList. +func (l *RouteList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VirtualGatewayList. +func (l *VirtualGatewayList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VirtualNodeList. +func (l *VirtualNodeList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VirtualRouterList. +func (l *VirtualRouterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VirtualServiceList. +func (l *VirtualServiceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/appmesh/v1beta2/zz_generated.resolvers.go b/apis/appmesh/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..220158538e --- /dev/null +++ b/apis/appmesh/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,695 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *GatewayRoute) ResolveReferences( // ResolveReferences of this GatewayRoute. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.Spec != nil { + if mg.Spec.ForProvider.Spec.HTTPRoute != nil { + if mg.Spec.ForProvider.Spec.HTTPRoute.Action != nil { + if mg.Spec.ForProvider.Spec.HTTPRoute.Action.Target != nil { + if mg.Spec.ForProvider.Spec.HTTPRoute.Action.Target.VirtualService != nil { + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "VirtualService", "VirtualServiceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Spec.HTTPRoute.Action.Target.VirtualService.VirtualServiceName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.ForProvider.Spec.HTTPRoute.Action.Target.VirtualService.VirtualServiceNameRef, + Selector: mg.Spec.ForProvider.Spec.HTTPRoute.Action.Target.VirtualService.VirtualServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Spec.HTTPRoute.Action.Target.VirtualService.VirtualServiceName") + } + mg.Spec.ForProvider.Spec.HTTPRoute.Action.Target.VirtualService.VirtualServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Spec.HTTPRoute.Action.Target.VirtualService.VirtualServiceNameRef = rsp.ResolvedReference + + } + } + } + } + } + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "VirtualGateway", "VirtualGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualGatewayName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.ForProvider.VirtualGatewayNameRef, + Selector: mg.Spec.ForProvider.VirtualGatewayNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualGatewayName") + } + mg.Spec.ForProvider.VirtualGatewayName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualGatewayNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Spec != nil { + if mg.Spec.InitProvider.Spec.HTTPRoute != nil { + if mg.Spec.InitProvider.Spec.HTTPRoute.Action != nil { + if mg.Spec.InitProvider.Spec.HTTPRoute.Action.Target != nil { + if mg.Spec.InitProvider.Spec.HTTPRoute.Action.Target.VirtualService != nil { + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "VirtualService", "VirtualServiceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Spec.HTTPRoute.Action.Target.VirtualService.VirtualServiceName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.InitProvider.Spec.HTTPRoute.Action.Target.VirtualService.VirtualServiceNameRef, + Selector: mg.Spec.InitProvider.Spec.HTTPRoute.Action.Target.VirtualService.VirtualServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Spec.HTTPRoute.Action.Target.VirtualService.VirtualServiceName") + } + mg.Spec.InitProvider.Spec.HTTPRoute.Action.Target.VirtualService.VirtualServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Spec.HTTPRoute.Action.Target.VirtualService.VirtualServiceNameRef = rsp.ResolvedReference + + } + } + } + } + } + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "VirtualGateway", "VirtualGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VirtualGatewayName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.InitProvider.VirtualGatewayNameRef, + Selector: mg.Spec.InitProvider.VirtualGatewayNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VirtualGatewayName") + } + mg.Spec.InitProvider.VirtualGatewayName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VirtualGatewayNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Route. +func (mg *Route) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "Mesh", "MeshList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MeshName), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.MeshNameRef, + Selector: mg.Spec.ForProvider.MeshNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MeshName") + } + mg.Spec.ForProvider.MeshName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MeshNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Spec != nil { + if mg.Spec.ForProvider.Spec.HTTPRoute != nil { + if mg.Spec.ForProvider.Spec.HTTPRoute.Action != nil { + for i6 := 0; i6 < len(mg.Spec.ForProvider.Spec.HTTPRoute.Action.WeightedTarget); i6++ { + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "VirtualNode", "VirtualNodeList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Spec.HTTPRoute.Action.WeightedTarget[i6].VirtualNode), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.ForProvider.Spec.HTTPRoute.Action.WeightedTarget[i6].VirtualNodeRef, + Selector: mg.Spec.ForProvider.Spec.HTTPRoute.Action.WeightedTarget[i6].VirtualNodeSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Spec.HTTPRoute.Action.WeightedTarget[i6].VirtualNode") + } + mg.Spec.ForProvider.Spec.HTTPRoute.Action.WeightedTarget[i6].VirtualNode = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Spec.HTTPRoute.Action.WeightedTarget[i6].VirtualNodeRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.ForProvider.Spec != nil { + if mg.Spec.ForProvider.Spec.TCPRoute != nil { + if mg.Spec.ForProvider.Spec.TCPRoute.Action != nil { + for i6 := 0; i6 < len(mg.Spec.ForProvider.Spec.TCPRoute.Action.WeightedTarget); i6++ { + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "VirtualNode", "VirtualNodeList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Spec.TCPRoute.Action.WeightedTarget[i6].VirtualNode), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.ForProvider.Spec.TCPRoute.Action.WeightedTarget[i6].VirtualNodeRef, + Selector: mg.Spec.ForProvider.Spec.TCPRoute.Action.WeightedTarget[i6].VirtualNodeSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Spec.TCPRoute.Action.WeightedTarget[i6].VirtualNode") + } + mg.Spec.ForProvider.Spec.TCPRoute.Action.WeightedTarget[i6].VirtualNode = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Spec.TCPRoute.Action.WeightedTarget[i6].VirtualNodeRef = rsp.ResolvedReference + + } + } + } + } + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "VirtualRouter", "VirtualRouterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualRouterName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.ForProvider.VirtualRouterNameRef, + Selector: mg.Spec.ForProvider.VirtualRouterNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualRouterName") + } + mg.Spec.ForProvider.VirtualRouterName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualRouterNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "Mesh", "MeshList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.MeshName), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.MeshNameRef, + Selector: mg.Spec.InitProvider.MeshNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.MeshName") + } + mg.Spec.InitProvider.MeshName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.MeshNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Spec != nil { + if mg.Spec.InitProvider.Spec.HTTPRoute != nil { + if mg.Spec.InitProvider.Spec.HTTPRoute.Action != nil { + for i6 := 0; i6 < len(mg.Spec.InitProvider.Spec.HTTPRoute.Action.WeightedTarget); i6++ { + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "VirtualNode", "VirtualNodeList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Spec.HTTPRoute.Action.WeightedTarget[i6].VirtualNode), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.InitProvider.Spec.HTTPRoute.Action.WeightedTarget[i6].VirtualNodeRef, + Selector: mg.Spec.InitProvider.Spec.HTTPRoute.Action.WeightedTarget[i6].VirtualNodeSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Spec.HTTPRoute.Action.WeightedTarget[i6].VirtualNode") + } + mg.Spec.InitProvider.Spec.HTTPRoute.Action.WeightedTarget[i6].VirtualNode = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Spec.HTTPRoute.Action.WeightedTarget[i6].VirtualNodeRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.InitProvider.Spec != nil { + if mg.Spec.InitProvider.Spec.TCPRoute != nil { + if mg.Spec.InitProvider.Spec.TCPRoute.Action != nil { + for i6 := 0; i6 < len(mg.Spec.InitProvider.Spec.TCPRoute.Action.WeightedTarget); i6++ { + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "VirtualNode", "VirtualNodeList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Spec.TCPRoute.Action.WeightedTarget[i6].VirtualNode), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.InitProvider.Spec.TCPRoute.Action.WeightedTarget[i6].VirtualNodeRef, + Selector: mg.Spec.InitProvider.Spec.TCPRoute.Action.WeightedTarget[i6].VirtualNodeSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Spec.TCPRoute.Action.WeightedTarget[i6].VirtualNode") + } + mg.Spec.InitProvider.Spec.TCPRoute.Action.WeightedTarget[i6].VirtualNode = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Spec.TCPRoute.Action.WeightedTarget[i6].VirtualNodeRef = rsp.ResolvedReference + + } + } + } + } + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "VirtualRouter", "VirtualRouterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VirtualRouterName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.InitProvider.VirtualRouterNameRef, + Selector: mg.Spec.InitProvider.VirtualRouterNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VirtualRouterName") + } + mg.Spec.InitProvider.VirtualRouterName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VirtualRouterNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this VirtualGateway. +func (mg *VirtualGateway) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.Spec != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Spec.Listener); i4++ { + if mg.Spec.ForProvider.Spec.Listener[i4].TLS != nil { + if mg.Spec.ForProvider.Spec.Listener[i4].TLS.Certificate != nil { + if mg.Spec.ForProvider.Spec.Listener[i4].TLS.Certificate.Acm != nil { + { + m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta2", "Certificate", "CertificateList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Spec.Listener[i4].TLS.Certificate.Acm.CertificateArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Spec.Listener[i4].TLS.Certificate.Acm.CertificateArnRef, + Selector: mg.Spec.ForProvider.Spec.Listener[i4].TLS.Certificate.Acm.CertificateArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Spec.Listener[i4].TLS.Certificate.Acm.CertificateArn") + } + mg.Spec.ForProvider.Spec.Listener[i4].TLS.Certificate.Acm.CertificateArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Spec.Listener[i4].TLS.Certificate.Acm.CertificateArnRef = rsp.ResolvedReference + + } + } + } + } + } + if mg.Spec.InitProvider.Spec != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Spec.Listener); i4++ { + if mg.Spec.InitProvider.Spec.Listener[i4].TLS != nil { + if mg.Spec.InitProvider.Spec.Listener[i4].TLS.Certificate != nil { + if mg.Spec.InitProvider.Spec.Listener[i4].TLS.Certificate.Acm != nil { + { + m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta2", "Certificate", "CertificateList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Spec.Listener[i4].TLS.Certificate.Acm.CertificateArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Spec.Listener[i4].TLS.Certificate.Acm.CertificateArnRef, + Selector: mg.Spec.InitProvider.Spec.Listener[i4].TLS.Certificate.Acm.CertificateArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Spec.Listener[i4].TLS.Certificate.Acm.CertificateArn") + } + mg.Spec.InitProvider.Spec.Listener[i4].TLS.Certificate.Acm.CertificateArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Spec.Listener[i4].TLS.Certificate.Acm.CertificateArnRef = rsp.ResolvedReference + + } + } + } + } + } + + return nil +} + +// ResolveReferences of this VirtualNode. +func (mg *VirtualNode) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "Mesh", "MeshList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MeshName), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.MeshNameRef, + Selector: mg.Spec.ForProvider.MeshNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MeshName") + } + mg.Spec.ForProvider.MeshName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MeshNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Spec != nil { + if mg.Spec.ForProvider.Spec.ServiceDiscovery != nil { + if mg.Spec.ForProvider.Spec.ServiceDiscovery.AwsCloudMap != nil { + { + m, l, err = apisresolver.GetManagedResource("servicediscovery.aws.upbound.io", "v1beta1", "HTTPNamespace", "HTTPNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Spec.ServiceDiscovery.AwsCloudMap.NamespaceName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.ForProvider.Spec.ServiceDiscovery.AwsCloudMap.NamespaceNameRef, + Selector: mg.Spec.ForProvider.Spec.ServiceDiscovery.AwsCloudMap.NamespaceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Spec.ServiceDiscovery.AwsCloudMap.NamespaceName") + } + mg.Spec.ForProvider.Spec.ServiceDiscovery.AwsCloudMap.NamespaceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Spec.ServiceDiscovery.AwsCloudMap.NamespaceNameRef = rsp.ResolvedReference + + } + } + } + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "Mesh", "MeshList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.MeshName), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.MeshNameRef, + Selector: mg.Spec.InitProvider.MeshNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.MeshName") + } + mg.Spec.InitProvider.MeshName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.MeshNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Spec != nil { + if mg.Spec.InitProvider.Spec.ServiceDiscovery != nil { + if mg.Spec.InitProvider.Spec.ServiceDiscovery.AwsCloudMap != nil { + { + m, l, err = apisresolver.GetManagedResource("servicediscovery.aws.upbound.io", "v1beta1", "HTTPNamespace", "HTTPNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Spec.ServiceDiscovery.AwsCloudMap.NamespaceName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.InitProvider.Spec.ServiceDiscovery.AwsCloudMap.NamespaceNameRef, + Selector: mg.Spec.InitProvider.Spec.ServiceDiscovery.AwsCloudMap.NamespaceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Spec.ServiceDiscovery.AwsCloudMap.NamespaceName") + } + mg.Spec.InitProvider.Spec.ServiceDiscovery.AwsCloudMap.NamespaceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Spec.ServiceDiscovery.AwsCloudMap.NamespaceNameRef = rsp.ResolvedReference + + } + } + } + + return nil +} + +// ResolveReferences of this VirtualRouter. +func (mg *VirtualRouter) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "Mesh", "MeshList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MeshName), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.MeshNameRef, + Selector: mg.Spec.ForProvider.MeshNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MeshName") + } + mg.Spec.ForProvider.MeshName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MeshNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "Mesh", "MeshList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.MeshName), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.MeshNameRef, + Selector: mg.Spec.InitProvider.MeshNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.MeshName") + } + mg.Spec.InitProvider.MeshName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.MeshNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this VirtualService. +func (mg *VirtualService) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "Mesh", "MeshList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MeshName), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.MeshNameRef, + Selector: mg.Spec.ForProvider.MeshNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MeshName") + } + mg.Spec.ForProvider.MeshName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MeshNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Spec != nil { + if mg.Spec.ForProvider.Spec.Provider != nil { + if mg.Spec.ForProvider.Spec.Provider.VirtualNode != nil { + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "VirtualNode", "VirtualNodeList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Spec.Provider.VirtualNode.VirtualNodeName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.ForProvider.Spec.Provider.VirtualNode.VirtualNodeNameRef, + Selector: mg.Spec.ForProvider.Spec.Provider.VirtualNode.VirtualNodeNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Spec.Provider.VirtualNode.VirtualNodeName") + } + mg.Spec.ForProvider.Spec.Provider.VirtualNode.VirtualNodeName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Spec.Provider.VirtualNode.VirtualNodeNameRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.ForProvider.Spec != nil { + if mg.Spec.ForProvider.Spec.Provider != nil { + if mg.Spec.ForProvider.Spec.Provider.VirtualRouter != nil { + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "VirtualRouter", "VirtualRouterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Spec.Provider.VirtualRouter.VirtualRouterName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.ForProvider.Spec.Provider.VirtualRouter.VirtualRouterNameRef, + Selector: mg.Spec.ForProvider.Spec.Provider.VirtualRouter.VirtualRouterNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Spec.Provider.VirtualRouter.VirtualRouterName") + } + mg.Spec.ForProvider.Spec.Provider.VirtualRouter.VirtualRouterName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Spec.Provider.VirtualRouter.VirtualRouterNameRef = rsp.ResolvedReference + + } + } + } + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "Mesh", "MeshList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.MeshName), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.MeshNameRef, + Selector: mg.Spec.InitProvider.MeshNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.MeshName") + } + mg.Spec.InitProvider.MeshName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.MeshNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Spec != nil { + if mg.Spec.InitProvider.Spec.Provider != nil { + if mg.Spec.InitProvider.Spec.Provider.VirtualNode != nil { + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "VirtualNode", "VirtualNodeList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Spec.Provider.VirtualNode.VirtualNodeName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.InitProvider.Spec.Provider.VirtualNode.VirtualNodeNameRef, + Selector: mg.Spec.InitProvider.Spec.Provider.VirtualNode.VirtualNodeNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Spec.Provider.VirtualNode.VirtualNodeName") + } + mg.Spec.InitProvider.Spec.Provider.VirtualNode.VirtualNodeName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Spec.Provider.VirtualNode.VirtualNodeNameRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.InitProvider.Spec != nil { + if mg.Spec.InitProvider.Spec.Provider != nil { + if mg.Spec.InitProvider.Spec.Provider.VirtualRouter != nil { + { + m, l, err = apisresolver.GetManagedResource("appmesh.aws.upbound.io", "v1beta2", "VirtualRouter", "VirtualRouterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Spec.Provider.VirtualRouter.VirtualRouterName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.InitProvider.Spec.Provider.VirtualRouter.VirtualRouterNameRef, + Selector: mg.Spec.InitProvider.Spec.Provider.VirtualRouter.VirtualRouterNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Spec.Provider.VirtualRouter.VirtualRouterName") + } + mg.Spec.InitProvider.Spec.Provider.VirtualRouter.VirtualRouterName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Spec.Provider.VirtualRouter.VirtualRouterNameRef = rsp.ResolvedReference + + } + } + } + + return nil +} diff --git a/apis/appmesh/v1beta2/zz_groupversion_info.go b/apis/appmesh/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..0494ce95e8 --- /dev/null +++ b/apis/appmesh/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=appmesh.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "appmesh.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/appmesh/v1beta2/zz_mesh_terraformed.go b/apis/appmesh/v1beta2/zz_mesh_terraformed.go new file mode 100755 index 0000000000..9c9fa0be89 --- /dev/null +++ b/apis/appmesh/v1beta2/zz_mesh_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Mesh +func (mg *Mesh) GetTerraformResourceType() string { + return "aws_appmesh_mesh" +} + +// GetConnectionDetailsMapping for this Mesh +func (tr *Mesh) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Mesh +func (tr *Mesh) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Mesh +func (tr *Mesh) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Mesh +func (tr *Mesh) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Mesh +func (tr *Mesh) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Mesh +func (tr *Mesh) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Mesh +func (tr *Mesh) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Mesh +func (tr *Mesh) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Mesh using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Mesh) LateInitialize(attrs []byte) (bool, error) { + params := &MeshParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Mesh) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appmesh/v1beta2/zz_mesh_types.go b/apis/appmesh/v1beta2/zz_mesh_types.go new file mode 100755 index 0000000000..2cbb5098c3 --- /dev/null +++ b/apis/appmesh/v1beta2/zz_mesh_types.go @@ -0,0 +1,200 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EgressFilterInitParameters struct { + + // Egress filter type. By default, the type is DROP_ALL. Valid values are ALLOW_ALL and DROP_ALL. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EgressFilterObservation struct { + + // Egress filter type. By default, the type is DROP_ALL. Valid values are ALLOW_ALL and DROP_ALL. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EgressFilterParameters struct { + + // Egress filter type. By default, the type is DROP_ALL. Valid values are ALLOW_ALL and DROP_ALL. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type MeshInitParameters struct { + + // Service mesh specification to apply. + Spec *MeshSpecInitParameters `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MeshObservation struct { + + // ARN of the service mesh. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Creation date of the service mesh. + CreatedDate *string `json:"createdDate,omitempty" tf:"created_date,omitempty"` + + // ID of the service mesh. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Last update date of the service mesh. + LastUpdatedDate *string `json:"lastUpdatedDate,omitempty" tf:"last_updated_date,omitempty"` + + // AWS account ID of the service mesh's owner. + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Resource owner's AWS account ID. + ResourceOwner *string `json:"resourceOwner,omitempty" tf:"resource_owner,omitempty"` + + // Service mesh specification to apply. + Spec *MeshSpecObservation `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type MeshParameters struct { + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Service mesh specification to apply. + // +kubebuilder:validation:Optional + Spec *MeshSpecParameters `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MeshSpecInitParameters struct { + + // Egress filter rules for the service mesh. + EgressFilter *EgressFilterInitParameters `json:"egressFilter,omitempty" tf:"egress_filter,omitempty"` + + // The service discovery information for the service mesh. + ServiceDiscovery *ServiceDiscoveryInitParameters `json:"serviceDiscovery,omitempty" tf:"service_discovery,omitempty"` +} + +type MeshSpecObservation struct { + + // Egress filter rules for the service mesh. + EgressFilter *EgressFilterObservation `json:"egressFilter,omitempty" tf:"egress_filter,omitempty"` + + // The service discovery information for the service mesh. + ServiceDiscovery *ServiceDiscoveryObservation `json:"serviceDiscovery,omitempty" tf:"service_discovery,omitempty"` +} + +type MeshSpecParameters struct { + + // Egress filter rules for the service mesh. + // +kubebuilder:validation:Optional + EgressFilter *EgressFilterParameters `json:"egressFilter,omitempty" tf:"egress_filter,omitempty"` + + // The service discovery information for the service mesh. + // +kubebuilder:validation:Optional + ServiceDiscovery *ServiceDiscoveryParameters `json:"serviceDiscovery,omitempty" tf:"service_discovery,omitempty"` +} + +type ServiceDiscoveryInitParameters struct { + + // The IP version to use to control traffic within the mesh. Valid values are IPv6_PREFERRED, IPv4_PREFERRED, IPv4_ONLY, and IPv6_ONLY. + IPPreference *string `json:"ipPreference,omitempty" tf:"ip_preference,omitempty"` +} + +type ServiceDiscoveryObservation struct { + + // The IP version to use to control traffic within the mesh. Valid values are IPv6_PREFERRED, IPv4_PREFERRED, IPv4_ONLY, and IPv6_ONLY. + IPPreference *string `json:"ipPreference,omitempty" tf:"ip_preference,omitempty"` +} + +type ServiceDiscoveryParameters struct { + + // The IP version to use to control traffic within the mesh. Valid values are IPv6_PREFERRED, IPv4_PREFERRED, IPv4_ONLY, and IPv6_ONLY. + // +kubebuilder:validation:Optional + IPPreference *string `json:"ipPreference,omitempty" tf:"ip_preference,omitempty"` +} + +// MeshSpec defines the desired state of Mesh +type MeshSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MeshParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MeshInitParameters `json:"initProvider,omitempty"` +} + +// MeshStatus defines the observed state of Mesh. +type MeshStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MeshObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Mesh is the Schema for the Meshs API. Provides an AWS App Mesh service mesh resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Mesh struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec MeshSpec `json:"spec"` + Status MeshStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MeshList contains a list of Meshs +type MeshList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Mesh `json:"items"` +} + +// Repository type metadata. +var ( + Mesh_Kind = "Mesh" + Mesh_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Mesh_Kind}.String() + Mesh_KindAPIVersion = Mesh_Kind + "." + CRDGroupVersion.String() + Mesh_GroupVersionKind = CRDGroupVersion.WithKind(Mesh_Kind) +) + +func init() { + SchemeBuilder.Register(&Mesh{}, &MeshList{}) +} diff --git a/apis/appmesh/v1beta2/zz_route_terraformed.go b/apis/appmesh/v1beta2/zz_route_terraformed.go new file mode 100755 index 0000000000..7f3c66efc3 --- /dev/null +++ b/apis/appmesh/v1beta2/zz_route_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Route +func (mg *Route) GetTerraformResourceType() string { + return "aws_appmesh_route" +} + +// GetConnectionDetailsMapping for this Route +func (tr *Route) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Route +func (tr *Route) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Route +func (tr *Route) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Route +func (tr *Route) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Route +func (tr *Route) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Route +func (tr *Route) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Route +func (tr *Route) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Route +func (tr *Route) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Route using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Route) LateInitialize(attrs []byte) (bool, error) { + params := &RouteParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Route) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appmesh/v1beta2/zz_route_types.go b/apis/appmesh/v1beta2/zz_route_types.go new file mode 100755 index 0000000000..0ecd348f8f --- /dev/null +++ b/apis/appmesh/v1beta2/zz_route_types.go @@ -0,0 +1,2101 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionWeightedTargetInitParameters struct { + + // The port number to match from the request. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual node to associate with the weighted target. Must be between 1 and 255 characters in length. + VirtualNode *string `json:"virtualNode,omitempty" tf:"virtual_node,omitempty"` + + // Relative weight of the weighted target. An integer between 0 and 100. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type ActionWeightedTargetObservation struct { + + // The port number to match from the request. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual node to associate with the weighted target. Must be between 1 and 255 characters in length. + VirtualNode *string `json:"virtualNode,omitempty" tf:"virtual_node,omitempty"` + + // Relative weight of the weighted target. An integer between 0 and 100. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type ActionWeightedTargetParameters struct { + + // The port number to match from the request. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual node to associate with the weighted target. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + VirtualNode *string `json:"virtualNode" tf:"virtual_node,omitempty"` + + // Relative weight of the weighted target. An integer between 0 and 100. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight" tf:"weight,omitempty"` +} + +type GRPCRouteActionInitParameters struct { + + // Targets that traffic is routed to when a request matches the route. + // You can specify one or more targets and their relative weights with which to distribute traffic. + WeightedTarget []WeightedTargetInitParameters `json:"weightedTarget,omitempty" tf:"weighted_target,omitempty"` +} + +type GRPCRouteActionObservation struct { + + // Targets that traffic is routed to when a request matches the route. + // You can specify one or more targets and their relative weights with which to distribute traffic. + WeightedTarget []WeightedTargetObservation `json:"weightedTarget,omitempty" tf:"weighted_target,omitempty"` +} + +type GRPCRouteActionParameters struct { + + // Targets that traffic is routed to when a request matches the route. + // You can specify one or more targets and their relative weights with which to distribute traffic. + // +kubebuilder:validation:Optional + WeightedTarget []WeightedTargetParameters `json:"weightedTarget" tf:"weighted_target,omitempty"` +} + +type GRPCRouteMatchInitParameters struct { + + // Data to match from the gRPC request. + Metadata []MetadataInitParameters `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Method name to match from the request. If you specify a name, you must also specify a service_name. + MethodName *string `json:"methodName,omitempty" tf:"method_name,omitempty"` + + // The port number to match from the request. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Fully qualified domain name for the service to match from the request. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type GRPCRouteMatchObservation struct { + + // Data to match from the gRPC request. + Metadata []MetadataObservation `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Method name to match from the request. If you specify a name, you must also specify a service_name. + MethodName *string `json:"methodName,omitempty" tf:"method_name,omitempty"` + + // The port number to match from the request. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Fully qualified domain name for the service to match from the request. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type GRPCRouteMatchParameters struct { + + // Data to match from the gRPC request. + // +kubebuilder:validation:Optional + Metadata []MetadataParameters `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Method name to match from the request. If you specify a name, you must also specify a service_name. + // +kubebuilder:validation:Optional + MethodName *string `json:"methodName,omitempty" tf:"method_name,omitempty"` + + // The port number to match from the request. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Fully qualified domain name for the service to match from the request. + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type HTTPRouteActionWeightedTargetInitParameters struct { + + // The port number to match from the request. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual node to associate with the weighted target. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.VirtualNode + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + VirtualNode *string `json:"virtualNode,omitempty" tf:"virtual_node,omitempty"` + + // Reference to a VirtualNode in appmesh to populate virtualNode. + // +kubebuilder:validation:Optional + VirtualNodeRef *v1.Reference `json:"virtualNodeRef,omitempty" tf:"-"` + + // Selector for a VirtualNode in appmesh to populate virtualNode. + // +kubebuilder:validation:Optional + VirtualNodeSelector *v1.Selector `json:"virtualNodeSelector,omitempty" tf:"-"` + + // Relative weight of the weighted target. An integer between 0 and 100. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type HTTPRouteActionWeightedTargetObservation struct { + + // The port number to match from the request. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual node to associate with the weighted target. Must be between 1 and 255 characters in length. + VirtualNode *string `json:"virtualNode,omitempty" tf:"virtual_node,omitempty"` + + // Relative weight of the weighted target. An integer between 0 and 100. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type HTTPRouteActionWeightedTargetParameters struct { + + // The port number to match from the request. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual node to associate with the weighted target. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.VirtualNode + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + // +kubebuilder:validation:Optional + VirtualNode *string `json:"virtualNode,omitempty" tf:"virtual_node,omitempty"` + + // Reference to a VirtualNode in appmesh to populate virtualNode. + // +kubebuilder:validation:Optional + VirtualNodeRef *v1.Reference `json:"virtualNodeRef,omitempty" tf:"-"` + + // Selector for a VirtualNode in appmesh to populate virtualNode. + // +kubebuilder:validation:Optional + VirtualNodeSelector *v1.Selector `json:"virtualNodeSelector,omitempty" tf:"-"` + + // Relative weight of the weighted target. An integer between 0 and 100. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight" tf:"weight,omitempty"` +} + +type HTTPRouteMatchHeaderInitParameters struct { + + // If true, the match is on the opposite of the match criteria. Default is false. + Invert *bool `json:"invert,omitempty" tf:"invert,omitempty"` + + // Criteria for determining an gRPC request match. + Match *HTTPRouteMatchHeaderMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type HTTPRouteMatchHeaderMatchInitParameters struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Object that specifies the range of numbers that the value sent by the client must be included in. + Range *MatchHeaderMatchRangeInitParameters `json:"range,omitempty" tf:"range,omitempty"` + + // Value sent by the client must include the specified characters. Must be between 1 and 255 characters in length. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + + // Value sent by the client must end with the specified characters. Must be between 1 and 255 characters in length. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type HTTPRouteMatchHeaderMatchObservation struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Object that specifies the range of numbers that the value sent by the client must be included in. + Range *MatchHeaderMatchRangeObservation `json:"range,omitempty" tf:"range,omitempty"` + + // Value sent by the client must include the specified characters. Must be between 1 and 255 characters in length. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + + // Value sent by the client must end with the specified characters. Must be between 1 and 255 characters in length. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type HTTPRouteMatchHeaderMatchParameters struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Object that specifies the range of numbers that the value sent by the client must be included in. + // +kubebuilder:validation:Optional + Range *MatchHeaderMatchRangeParameters `json:"range,omitempty" tf:"range,omitempty"` + + // Value sent by the client must include the specified characters. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + + // Value sent by the client must end with the specified characters. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type HTTPRouteMatchHeaderObservation struct { + + // If true, the match is on the opposite of the match criteria. Default is false. + Invert *bool `json:"invert,omitempty" tf:"invert,omitempty"` + + // Criteria for determining an gRPC request match. + Match *HTTPRouteMatchHeaderMatchObservation `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type HTTPRouteMatchHeaderParameters struct { + + // If true, the match is on the opposite of the match criteria. Default is false. + // +kubebuilder:validation:Optional + Invert *bool `json:"invert,omitempty" tf:"invert,omitempty"` + + // Criteria for determining an gRPC request match. + // +kubebuilder:validation:Optional + Match *HTTPRouteMatchHeaderMatchParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type HTTPRouteMatchQueryParameterInitParameters struct { + + // Criteria for determining an gRPC request match. + Match *HTTPRouteMatchQueryParameterMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type HTTPRouteMatchQueryParameterMatchInitParameters struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type HTTPRouteMatchQueryParameterMatchObservation struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type HTTPRouteMatchQueryParameterMatchParameters struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type HTTPRouteMatchQueryParameterObservation struct { + + // Criteria for determining an gRPC request match. + Match *HTTPRouteMatchQueryParameterMatchObservation `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type HTTPRouteMatchQueryParameterParameters struct { + + // Criteria for determining an gRPC request match. + // +kubebuilder:validation:Optional + Match *HTTPRouteMatchQueryParameterMatchParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type HTTPRouteRetryPolicyInitParameters struct { + + // List of HTTP retry events. + // Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + // +listType=set + HTTPRetryEvents []*string `json:"httpRetryEvents,omitempty" tf:"http_retry_events,omitempty"` + + // Maximum number of retries. + MaxRetries *float64 `json:"maxRetries,omitempty" tf:"max_retries,omitempty"` + + // Per-retry timeout. + PerRetryTimeout *HTTPRouteRetryPolicyPerRetryTimeoutInitParameters `json:"perRetryTimeout,omitempty" tf:"per_retry_timeout,omitempty"` + + // List of TCP retry events. The only valid value is connection-error. + // +listType=set + TCPRetryEvents []*string `json:"tcpRetryEvents,omitempty" tf:"tcp_retry_events,omitempty"` +} + +type HTTPRouteRetryPolicyObservation struct { + + // List of HTTP retry events. + // Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + // +listType=set + HTTPRetryEvents []*string `json:"httpRetryEvents,omitempty" tf:"http_retry_events,omitempty"` + + // Maximum number of retries. + MaxRetries *float64 `json:"maxRetries,omitempty" tf:"max_retries,omitempty"` + + // Per-retry timeout. + PerRetryTimeout *HTTPRouteRetryPolicyPerRetryTimeoutObservation `json:"perRetryTimeout,omitempty" tf:"per_retry_timeout,omitempty"` + + // List of TCP retry events. The only valid value is connection-error. + // +listType=set + TCPRetryEvents []*string `json:"tcpRetryEvents,omitempty" tf:"tcp_retry_events,omitempty"` +} + +type HTTPRouteRetryPolicyParameters struct { + + // List of HTTP retry events. + // Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + // +kubebuilder:validation:Optional + // +listType=set + HTTPRetryEvents []*string `json:"httpRetryEvents,omitempty" tf:"http_retry_events,omitempty"` + + // Maximum number of retries. + // +kubebuilder:validation:Optional + MaxRetries *float64 `json:"maxRetries" tf:"max_retries,omitempty"` + + // Per-retry timeout. + // +kubebuilder:validation:Optional + PerRetryTimeout *HTTPRouteRetryPolicyPerRetryTimeoutParameters `json:"perRetryTimeout" tf:"per_retry_timeout,omitempty"` + + // List of TCP retry events. The only valid value is connection-error. + // +kubebuilder:validation:Optional + // +listType=set + TCPRetryEvents []*string `json:"tcpRetryEvents,omitempty" tf:"tcp_retry_events,omitempty"` +} + +type HTTPRouteRetryPolicyPerRetryTimeoutInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type HTTPRouteRetryPolicyPerRetryTimeoutObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type HTTPRouteRetryPolicyPerRetryTimeoutParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type HTTPRouteTimeoutIdleInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type HTTPRouteTimeoutIdleObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type HTTPRouteTimeoutIdleParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type HTTPRouteTimeoutInitParameters struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + Idle *HTTPRouteTimeoutIdleInitParameters `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + PerRequest *HTTPRouteTimeoutPerRequestInitParameters `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type HTTPRouteTimeoutObservation struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + Idle *HTTPRouteTimeoutIdleObservation `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + PerRequest *HTTPRouteTimeoutPerRequestObservation `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type HTTPRouteTimeoutParameters struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + // +kubebuilder:validation:Optional + Idle *HTTPRouteTimeoutIdleParameters `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + // +kubebuilder:validation:Optional + PerRequest *HTTPRouteTimeoutPerRequestParameters `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type HTTPRouteTimeoutPerRequestInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type HTTPRouteTimeoutPerRequestObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type HTTPRouteTimeoutPerRequestParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type HeaderMatchRangeInitParameters struct { + + // End of the range. + End *float64 `json:"end,omitempty" tf:"end,omitempty"` + + // (Requited) Start of the range. + Start *float64 `json:"start,omitempty" tf:"start,omitempty"` +} + +type HeaderMatchRangeObservation struct { + + // End of the range. + End *float64 `json:"end,omitempty" tf:"end,omitempty"` + + // (Requited) Start of the range. + Start *float64 `json:"start,omitempty" tf:"start,omitempty"` +} + +type HeaderMatchRangeParameters struct { + + // End of the range. + // +kubebuilder:validation:Optional + End *float64 `json:"end" tf:"end,omitempty"` + + // (Requited) Start of the range. + // +kubebuilder:validation:Optional + Start *float64 `json:"start" tf:"start,omitempty"` +} + +type Http2RouteMatchHeaderInitParameters struct { + + // If true, the match is on the opposite of the match criteria. Default is false. + Invert *bool `json:"invert,omitempty" tf:"invert,omitempty"` + + // Criteria for determining an gRPC request match. + Match *Http2RouteMatchHeaderMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type Http2RouteMatchHeaderMatchInitParameters struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Object that specifies the range of numbers that the value sent by the client must be included in. + Range *HeaderMatchRangeInitParameters `json:"range,omitempty" tf:"range,omitempty"` + + // Value sent by the client must include the specified characters. Must be between 1 and 255 characters in length. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + + // Value sent by the client must end with the specified characters. Must be between 1 and 255 characters in length. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type Http2RouteMatchHeaderMatchObservation struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Object that specifies the range of numbers that the value sent by the client must be included in. + Range *HeaderMatchRangeObservation `json:"range,omitempty" tf:"range,omitempty"` + + // Value sent by the client must include the specified characters. Must be between 1 and 255 characters in length. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + + // Value sent by the client must end with the specified characters. Must be between 1 and 255 characters in length. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type Http2RouteMatchHeaderMatchParameters struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Object that specifies the range of numbers that the value sent by the client must be included in. + // +kubebuilder:validation:Optional + Range *HeaderMatchRangeParameters `json:"range,omitempty" tf:"range,omitempty"` + + // Value sent by the client must include the specified characters. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + + // Value sent by the client must end with the specified characters. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type Http2RouteMatchHeaderObservation struct { + + // If true, the match is on the opposite of the match criteria. Default is false. + Invert *bool `json:"invert,omitempty" tf:"invert,omitempty"` + + // Criteria for determining an gRPC request match. + Match *Http2RouteMatchHeaderMatchObservation `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type Http2RouteMatchHeaderParameters struct { + + // If true, the match is on the opposite of the match criteria. Default is false. + // +kubebuilder:validation:Optional + Invert *bool `json:"invert,omitempty" tf:"invert,omitempty"` + + // Criteria for determining an gRPC request match. + // +kubebuilder:validation:Optional + Match *Http2RouteMatchHeaderMatchParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type Http2RouteMatchPathInitParameters struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Value sent by the client must include the specified characters. Must be between 1 and 255 characters in length. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` +} + +type Http2RouteMatchPathObservation struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Value sent by the client must include the specified characters. Must be between 1 and 255 characters in length. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` +} + +type Http2RouteMatchPathParameters struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Value sent by the client must include the specified characters. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` +} + +type Http2RouteMatchQueryParameterInitParameters struct { + + // Criteria for determining an gRPC request match. + Match *Http2RouteMatchQueryParameterMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type Http2RouteMatchQueryParameterMatchInitParameters struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type Http2RouteMatchQueryParameterMatchObservation struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type Http2RouteMatchQueryParameterMatchParameters struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type Http2RouteMatchQueryParameterObservation struct { + + // Criteria for determining an gRPC request match. + Match *Http2RouteMatchQueryParameterMatchObservation `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type Http2RouteMatchQueryParameterParameters struct { + + // Criteria for determining an gRPC request match. + // +kubebuilder:validation:Optional + Match *Http2RouteMatchQueryParameterMatchParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type Http2RouteRetryPolicyInitParameters struct { + + // List of HTTP retry events. + // Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + // +listType=set + HTTPRetryEvents []*string `json:"httpRetryEvents,omitempty" tf:"http_retry_events,omitempty"` + + // Maximum number of retries. + MaxRetries *float64 `json:"maxRetries,omitempty" tf:"max_retries,omitempty"` + + // Per-retry timeout. + PerRetryTimeout *RetryPolicyPerRetryTimeoutInitParameters `json:"perRetryTimeout,omitempty" tf:"per_retry_timeout,omitempty"` + + // List of TCP retry events. The only valid value is connection-error. + // +listType=set + TCPRetryEvents []*string `json:"tcpRetryEvents,omitempty" tf:"tcp_retry_events,omitempty"` +} + +type Http2RouteRetryPolicyObservation struct { + + // List of HTTP retry events. + // Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + // +listType=set + HTTPRetryEvents []*string `json:"httpRetryEvents,omitempty" tf:"http_retry_events,omitempty"` + + // Maximum number of retries. + MaxRetries *float64 `json:"maxRetries,omitempty" tf:"max_retries,omitempty"` + + // Per-retry timeout. + PerRetryTimeout *RetryPolicyPerRetryTimeoutObservation `json:"perRetryTimeout,omitempty" tf:"per_retry_timeout,omitempty"` + + // List of TCP retry events. The only valid value is connection-error. + // +listType=set + TCPRetryEvents []*string `json:"tcpRetryEvents,omitempty" tf:"tcp_retry_events,omitempty"` +} + +type Http2RouteRetryPolicyParameters struct { + + // List of HTTP retry events. + // Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + // +kubebuilder:validation:Optional + // +listType=set + HTTPRetryEvents []*string `json:"httpRetryEvents,omitempty" tf:"http_retry_events,omitempty"` + + // Maximum number of retries. + // +kubebuilder:validation:Optional + MaxRetries *float64 `json:"maxRetries" tf:"max_retries,omitempty"` + + // Per-retry timeout. + // +kubebuilder:validation:Optional + PerRetryTimeout *RetryPolicyPerRetryTimeoutParameters `json:"perRetryTimeout" tf:"per_retry_timeout,omitempty"` + + // List of TCP retry events. The only valid value is connection-error. + // +kubebuilder:validation:Optional + // +listType=set + TCPRetryEvents []*string `json:"tcpRetryEvents,omitempty" tf:"tcp_retry_events,omitempty"` +} + +type Http2RouteTimeoutInitParameters struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + Idle *TimeoutIdleInitParameters `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + PerRequest *TimeoutPerRequestInitParameters `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type Http2RouteTimeoutObservation struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + Idle *TimeoutIdleObservation `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + PerRequest *TimeoutPerRequestObservation `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type Http2RouteTimeoutParameters struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + // +kubebuilder:validation:Optional + Idle *TimeoutIdleParameters `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + // +kubebuilder:validation:Optional + PerRequest *TimeoutPerRequestParameters `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type IdleInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type IdleObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type IdleParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type MatchHeaderMatchRangeInitParameters struct { + + // End of the range. + End *float64 `json:"end,omitempty" tf:"end,omitempty"` + + // (Requited) Start of the range. + Start *float64 `json:"start,omitempty" tf:"start,omitempty"` +} + +type MatchHeaderMatchRangeObservation struct { + + // End of the range. + End *float64 `json:"end,omitempty" tf:"end,omitempty"` + + // (Requited) Start of the range. + Start *float64 `json:"start,omitempty" tf:"start,omitempty"` +} + +type MatchHeaderMatchRangeParameters struct { + + // End of the range. + // +kubebuilder:validation:Optional + End *float64 `json:"end" tf:"end,omitempty"` + + // (Requited) Start of the range. + // +kubebuilder:validation:Optional + Start *float64 `json:"start" tf:"start,omitempty"` +} + +type MetadataInitParameters struct { + + // If true, the match is on the opposite of the match criteria. Default is false. + Invert *bool `json:"invert,omitempty" tf:"invert,omitempty"` + + // Criteria for determining an gRPC request match. + Match *MetadataMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type MetadataMatchInitParameters struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Object that specifies the range of numbers that the value sent by the client must be included in. + Range *MetadataMatchRangeInitParameters `json:"range,omitempty" tf:"range,omitempty"` + + // Value sent by the client must include the specified characters. Must be between 1 and 255 characters in length. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + + // Value sent by the client must end with the specified characters. Must be between 1 and 255 characters in length. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type MetadataMatchObservation struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Object that specifies the range of numbers that the value sent by the client must be included in. + Range *MetadataMatchRangeObservation `json:"range,omitempty" tf:"range,omitempty"` + + // Value sent by the client must include the specified characters. Must be between 1 and 255 characters in length. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + + // Value sent by the client must end with the specified characters. Must be between 1 and 255 characters in length. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type MetadataMatchParameters struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Object that specifies the range of numbers that the value sent by the client must be included in. + // +kubebuilder:validation:Optional + Range *MetadataMatchRangeParameters `json:"range,omitempty" tf:"range,omitempty"` + + // Value sent by the client must include the specified characters. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` + + // Value sent by the client must end with the specified characters. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type MetadataMatchRangeInitParameters struct { + + // End of the range. + End *float64 `json:"end,omitempty" tf:"end,omitempty"` + + // (Requited) Start of the range. + Start *float64 `json:"start,omitempty" tf:"start,omitempty"` +} + +type MetadataMatchRangeObservation struct { + + // End of the range. + End *float64 `json:"end,omitempty" tf:"end,omitempty"` + + // (Requited) Start of the range. + Start *float64 `json:"start,omitempty" tf:"start,omitempty"` +} + +type MetadataMatchRangeParameters struct { + + // End of the range. + // +kubebuilder:validation:Optional + End *float64 `json:"end" tf:"end,omitempty"` + + // (Requited) Start of the range. + // +kubebuilder:validation:Optional + Start *float64 `json:"start" tf:"start,omitempty"` +} + +type MetadataObservation struct { + + // If true, the match is on the opposite of the match criteria. Default is false. + Invert *bool `json:"invert,omitempty" tf:"invert,omitempty"` + + // Criteria for determining an gRPC request match. + Match *MetadataMatchObservation `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type MetadataParameters struct { + + // If true, the match is on the opposite of the match criteria. Default is false. + // +kubebuilder:validation:Optional + Invert *bool `json:"invert,omitempty" tf:"invert,omitempty"` + + // Criteria for determining an gRPC request match. + // +kubebuilder:validation:Optional + Match *MetadataMatchParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type PerRequestInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type PerRequestObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type PerRequestParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type PerRetryTimeoutInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type PerRetryTimeoutObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type PerRetryTimeoutParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type RetryPolicyInitParameters struct { + + // List of gRPC retry events. + // Valid values: cancelled, deadline-exceeded, internal, resource-exhausted, unavailable. + // +listType=set + GRPCRetryEvents []*string `json:"grpcRetryEvents,omitempty" tf:"grpc_retry_events,omitempty"` + + // List of HTTP retry events. + // Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + // +listType=set + HTTPRetryEvents []*string `json:"httpRetryEvents,omitempty" tf:"http_retry_events,omitempty"` + + // Maximum number of retries. + MaxRetries *float64 `json:"maxRetries,omitempty" tf:"max_retries,omitempty"` + + // Per-retry timeout. + PerRetryTimeout *PerRetryTimeoutInitParameters `json:"perRetryTimeout,omitempty" tf:"per_retry_timeout,omitempty"` + + // List of TCP retry events. The only valid value is connection-error. + // +listType=set + TCPRetryEvents []*string `json:"tcpRetryEvents,omitempty" tf:"tcp_retry_events,omitempty"` +} + +type RetryPolicyObservation struct { + + // List of gRPC retry events. + // Valid values: cancelled, deadline-exceeded, internal, resource-exhausted, unavailable. + // +listType=set + GRPCRetryEvents []*string `json:"grpcRetryEvents,omitempty" tf:"grpc_retry_events,omitempty"` + + // List of HTTP retry events. + // Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + // +listType=set + HTTPRetryEvents []*string `json:"httpRetryEvents,omitempty" tf:"http_retry_events,omitempty"` + + // Maximum number of retries. + MaxRetries *float64 `json:"maxRetries,omitempty" tf:"max_retries,omitempty"` + + // Per-retry timeout. + PerRetryTimeout *PerRetryTimeoutObservation `json:"perRetryTimeout,omitempty" tf:"per_retry_timeout,omitempty"` + + // List of TCP retry events. The only valid value is connection-error. + // +listType=set + TCPRetryEvents []*string `json:"tcpRetryEvents,omitempty" tf:"tcp_retry_events,omitempty"` +} + +type RetryPolicyParameters struct { + + // List of gRPC retry events. + // Valid values: cancelled, deadline-exceeded, internal, resource-exhausted, unavailable. + // +kubebuilder:validation:Optional + // +listType=set + GRPCRetryEvents []*string `json:"grpcRetryEvents,omitempty" tf:"grpc_retry_events,omitempty"` + + // List of HTTP retry events. + // Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + // +kubebuilder:validation:Optional + // +listType=set + HTTPRetryEvents []*string `json:"httpRetryEvents,omitempty" tf:"http_retry_events,omitempty"` + + // Maximum number of retries. + // +kubebuilder:validation:Optional + MaxRetries *float64 `json:"maxRetries" tf:"max_retries,omitempty"` + + // Per-retry timeout. + // +kubebuilder:validation:Optional + PerRetryTimeout *PerRetryTimeoutParameters `json:"perRetryTimeout" tf:"per_retry_timeout,omitempty"` + + // List of TCP retry events. The only valid value is connection-error. + // +kubebuilder:validation:Optional + // +listType=set + TCPRetryEvents []*string `json:"tcpRetryEvents,omitempty" tf:"tcp_retry_events,omitempty"` +} + +type RetryPolicyPerRetryTimeoutInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type RetryPolicyPerRetryTimeoutObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type RetryPolicyPerRetryTimeoutParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type RouteInitParameters struct { + + // Name of the service mesh in which to create the route. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.Mesh + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // Reference to a Mesh in appmesh to populate meshName. + // +kubebuilder:validation:Optional + MeshNameRef *v1.Reference `json:"meshNameRef,omitempty" tf:"-"` + + // Selector for a Mesh in appmesh to populate meshName. + // +kubebuilder:validation:Optional + MeshNameSelector *v1.Selector `json:"meshNameSelector,omitempty" tf:"-"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Route specification to apply. + Spec *RouteSpecInitParameters `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Name of the virtual router in which to create the route. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.VirtualRouter + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + VirtualRouterName *string `json:"virtualRouterName,omitempty" tf:"virtual_router_name,omitempty"` + + // Reference to a VirtualRouter in appmesh to populate virtualRouterName. + // +kubebuilder:validation:Optional + VirtualRouterNameRef *v1.Reference `json:"virtualRouterNameRef,omitempty" tf:"-"` + + // Selector for a VirtualRouter in appmesh to populate virtualRouterName. + // +kubebuilder:validation:Optional + VirtualRouterNameSelector *v1.Selector `json:"virtualRouterNameSelector,omitempty" tf:"-"` +} + +type RouteObservation struct { + + // ARN of the route. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Creation date of the route. + CreatedDate *string `json:"createdDate,omitempty" tf:"created_date,omitempty"` + + // ID of the route. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Last update date of the route. + LastUpdatedDate *string `json:"lastUpdatedDate,omitempty" tf:"last_updated_date,omitempty"` + + // Name of the service mesh in which to create the route. Must be between 1 and 255 characters in length. + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Resource owner's AWS account ID. + ResourceOwner *string `json:"resourceOwner,omitempty" tf:"resource_owner,omitempty"` + + // Route specification to apply. + Spec *RouteSpecObservation `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Name of the virtual router in which to create the route. Must be between 1 and 255 characters in length. + VirtualRouterName *string `json:"virtualRouterName,omitempty" tf:"virtual_router_name,omitempty"` +} + +type RouteParameters struct { + + // Name of the service mesh in which to create the route. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.Mesh + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // Reference to a Mesh in appmesh to populate meshName. + // +kubebuilder:validation:Optional + MeshNameRef *v1.Reference `json:"meshNameRef,omitempty" tf:"-"` + + // Selector for a Mesh in appmesh to populate meshName. + // +kubebuilder:validation:Optional + MeshNameSelector *v1.Selector `json:"meshNameSelector,omitempty" tf:"-"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + // +kubebuilder:validation:Optional + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the route. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Route specification to apply. + // +kubebuilder:validation:Optional + Spec *RouteSpecParameters `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Name of the virtual router in which to create the route. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.VirtualRouter + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + // +kubebuilder:validation:Optional + VirtualRouterName *string `json:"virtualRouterName,omitempty" tf:"virtual_router_name,omitempty"` + + // Reference to a VirtualRouter in appmesh to populate virtualRouterName. + // +kubebuilder:validation:Optional + VirtualRouterNameRef *v1.Reference `json:"virtualRouterNameRef,omitempty" tf:"-"` + + // Selector for a VirtualRouter in appmesh to populate virtualRouterName. + // +kubebuilder:validation:Optional + VirtualRouterNameSelector *v1.Selector `json:"virtualRouterNameSelector,omitempty" tf:"-"` +} + +type RouteSpecInitParameters struct { + + // GRPC routing information for the route. + GRPCRoute *SpecGRPCRouteInitParameters `json:"grpcRoute,omitempty" tf:"grpc_route,omitempty"` + + // HTTP routing information for the route. + HTTPRoute *SpecHTTPRouteInitParameters `json:"httpRoute,omitempty" tf:"http_route,omitempty"` + + // HTTP/2 routing information for the route. + Http2Route *SpecHttp2RouteInitParameters `json:"http2Route,omitempty" tf:"http2_route,omitempty"` + + // Priority for the route, between 0 and 1000. + // Routes are matched based on the specified value, where 0 is the highest priority. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // TCP routing information for the route. + TCPRoute *TCPRouteInitParameters `json:"tcpRoute,omitempty" tf:"tcp_route,omitempty"` +} + +type RouteSpecObservation struct { + + // GRPC routing information for the route. + GRPCRoute *SpecGRPCRouteObservation `json:"grpcRoute,omitempty" tf:"grpc_route,omitempty"` + + // HTTP routing information for the route. + HTTPRoute *SpecHTTPRouteObservation `json:"httpRoute,omitempty" tf:"http_route,omitempty"` + + // HTTP/2 routing information for the route. + Http2Route *SpecHttp2RouteObservation `json:"http2Route,omitempty" tf:"http2_route,omitempty"` + + // Priority for the route, between 0 and 1000. + // Routes are matched based on the specified value, where 0 is the highest priority. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // TCP routing information for the route. + TCPRoute *TCPRouteObservation `json:"tcpRoute,omitempty" tf:"tcp_route,omitempty"` +} + +type RouteSpecParameters struct { + + // GRPC routing information for the route. + // +kubebuilder:validation:Optional + GRPCRoute *SpecGRPCRouteParameters `json:"grpcRoute,omitempty" tf:"grpc_route,omitempty"` + + // HTTP routing information for the route. + // +kubebuilder:validation:Optional + HTTPRoute *SpecHTTPRouteParameters `json:"httpRoute,omitempty" tf:"http_route,omitempty"` + + // HTTP/2 routing information for the route. + // +kubebuilder:validation:Optional + Http2Route *SpecHttp2RouteParameters `json:"http2Route,omitempty" tf:"http2_route,omitempty"` + + // Priority for the route, between 0 and 1000. + // Routes are matched based on the specified value, where 0 is the highest priority. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // TCP routing information for the route. + // +kubebuilder:validation:Optional + TCPRoute *TCPRouteParameters `json:"tcpRoute,omitempty" tf:"tcp_route,omitempty"` +} + +type SpecGRPCRouteInitParameters struct { + + // Action to take if a match is determined. + Action *GRPCRouteActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Criteria for determining an gRPC request match. + Match *GRPCRouteMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Retry policy. + RetryPolicy *RetryPolicyInitParameters `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // Types of timeouts. + Timeout *TimeoutInitParameters `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type SpecGRPCRouteObservation struct { + + // Action to take if a match is determined. + Action *GRPCRouteActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // Criteria for determining an gRPC request match. + Match *GRPCRouteMatchObservation `json:"match,omitempty" tf:"match,omitempty"` + + // Retry policy. + RetryPolicy *RetryPolicyObservation `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // Types of timeouts. + Timeout *TimeoutObservation `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type SpecGRPCRouteParameters struct { + + // Action to take if a match is determined. + // +kubebuilder:validation:Optional + Action *GRPCRouteActionParameters `json:"action" tf:"action,omitempty"` + + // Criteria for determining an gRPC request match. + // +kubebuilder:validation:Optional + Match *GRPCRouteMatchParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Retry policy. + // +kubebuilder:validation:Optional + RetryPolicy *RetryPolicyParameters `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // Types of timeouts. + // +kubebuilder:validation:Optional + Timeout *TimeoutParameters `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type SpecHTTPRouteActionInitParameters struct { + + // Targets that traffic is routed to when a request matches the route. + // You can specify one or more targets and their relative weights with which to distribute traffic. + WeightedTarget []HTTPRouteActionWeightedTargetInitParameters `json:"weightedTarget,omitempty" tf:"weighted_target,omitempty"` +} + +type SpecHTTPRouteActionObservation struct { + + // Targets that traffic is routed to when a request matches the route. + // You can specify one or more targets and their relative weights with which to distribute traffic. + WeightedTarget []HTTPRouteActionWeightedTargetObservation `json:"weightedTarget,omitempty" tf:"weighted_target,omitempty"` +} + +type SpecHTTPRouteActionParameters struct { + + // Targets that traffic is routed to when a request matches the route. + // You can specify one or more targets and their relative weights with which to distribute traffic. + // +kubebuilder:validation:Optional + WeightedTarget []HTTPRouteActionWeightedTargetParameters `json:"weightedTarget" tf:"weighted_target,omitempty"` +} + +type SpecHTTPRouteInitParameters struct { + + // Action to take if a match is determined. + Action *SpecHTTPRouteActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Criteria for determining an gRPC request match. + Match *SpecHTTPRouteMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Retry policy. + RetryPolicy *HTTPRouteRetryPolicyInitParameters `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // Types of timeouts. + Timeout *HTTPRouteTimeoutInitParameters `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type SpecHTTPRouteMatchInitParameters struct { + + // Client request headers to match on. + Header []HTTPRouteMatchHeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` + + // Client request header method to match on. Valid values: GET, HEAD, POST, PUT, DELETE, CONNECT, OPTIONS, TRACE, PATCH. + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // Client request path to match on. + Path *SpecHTTPRouteMatchPathInitParameters `json:"path,omitempty" tf:"path,omitempty"` + + // The port number to match from the request. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Client request query parameters to match on. + QueryParameter []HTTPRouteMatchQueryParameterInitParameters `json:"queryParameter,omitempty" tf:"query_parameter,omitempty"` + + // Client request header scheme to match on. Valid values: http, https. + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` +} + +type SpecHTTPRouteMatchObservation struct { + + // Client request headers to match on. + Header []HTTPRouteMatchHeaderObservation `json:"header,omitempty" tf:"header,omitempty"` + + // Client request header method to match on. Valid values: GET, HEAD, POST, PUT, DELETE, CONNECT, OPTIONS, TRACE, PATCH. + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // Client request path to match on. + Path *SpecHTTPRouteMatchPathObservation `json:"path,omitempty" tf:"path,omitempty"` + + // The port number to match from the request. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Client request query parameters to match on. + QueryParameter []HTTPRouteMatchQueryParameterObservation `json:"queryParameter,omitempty" tf:"query_parameter,omitempty"` + + // Client request header scheme to match on. Valid values: http, https. + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` +} + +type SpecHTTPRouteMatchParameters struct { + + // Client request headers to match on. + // +kubebuilder:validation:Optional + Header []HTTPRouteMatchHeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + + // Client request header method to match on. Valid values: GET, HEAD, POST, PUT, DELETE, CONNECT, OPTIONS, TRACE, PATCH. + // +kubebuilder:validation:Optional + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // Client request path to match on. + // +kubebuilder:validation:Optional + Path *SpecHTTPRouteMatchPathParameters `json:"path,omitempty" tf:"path,omitempty"` + + // The port number to match from the request. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Client request query parameters to match on. + // +kubebuilder:validation:Optional + QueryParameter []HTTPRouteMatchQueryParameterParameters `json:"queryParameter,omitempty" tf:"query_parameter,omitempty"` + + // Client request header scheme to match on. Valid values: http, https. + // +kubebuilder:validation:Optional + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` +} + +type SpecHTTPRouteMatchPathInitParameters struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Value sent by the client must include the specified characters. Must be between 1 and 255 characters in length. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` +} + +type SpecHTTPRouteMatchPathObservation struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Value sent by the client must include the specified characters. Must be between 1 and 255 characters in length. + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` +} + +type SpecHTTPRouteMatchPathParameters struct { + + // Value sent by the client must match the specified value exactly. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Exact *string `json:"exact,omitempty" tf:"exact,omitempty"` + + // Value sent by the client must include the specified characters. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Regex *string `json:"regex,omitempty" tf:"regex,omitempty"` +} + +type SpecHTTPRouteObservation struct { + + // Action to take if a match is determined. + Action *SpecHTTPRouteActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // Criteria for determining an gRPC request match. + Match *SpecHTTPRouteMatchObservation `json:"match,omitempty" tf:"match,omitempty"` + + // Retry policy. + RetryPolicy *HTTPRouteRetryPolicyObservation `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // Types of timeouts. + Timeout *HTTPRouteTimeoutObservation `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type SpecHTTPRouteParameters struct { + + // Action to take if a match is determined. + // +kubebuilder:validation:Optional + Action *SpecHTTPRouteActionParameters `json:"action" tf:"action,omitempty"` + + // Criteria for determining an gRPC request match. + // +kubebuilder:validation:Optional + Match *SpecHTTPRouteMatchParameters `json:"match" tf:"match,omitempty"` + + // Retry policy. + // +kubebuilder:validation:Optional + RetryPolicy *HTTPRouteRetryPolicyParameters `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // Types of timeouts. + // +kubebuilder:validation:Optional + Timeout *HTTPRouteTimeoutParameters `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type SpecHttp2RouteActionInitParameters struct { + + // Targets that traffic is routed to when a request matches the route. + // You can specify one or more targets and their relative weights with which to distribute traffic. + WeightedTarget []ActionWeightedTargetInitParameters `json:"weightedTarget,omitempty" tf:"weighted_target,omitempty"` +} + +type SpecHttp2RouteActionObservation struct { + + // Targets that traffic is routed to when a request matches the route. + // You can specify one or more targets and their relative weights with which to distribute traffic. + WeightedTarget []ActionWeightedTargetObservation `json:"weightedTarget,omitempty" tf:"weighted_target,omitempty"` +} + +type SpecHttp2RouteActionParameters struct { + + // Targets that traffic is routed to when a request matches the route. + // You can specify one or more targets and their relative weights with which to distribute traffic. + // +kubebuilder:validation:Optional + WeightedTarget []ActionWeightedTargetParameters `json:"weightedTarget" tf:"weighted_target,omitempty"` +} + +type SpecHttp2RouteInitParameters struct { + + // Action to take if a match is determined. + Action *SpecHttp2RouteActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Criteria for determining an gRPC request match. + Match *SpecHttp2RouteMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Retry policy. + RetryPolicy *Http2RouteRetryPolicyInitParameters `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // Types of timeouts. + Timeout *Http2RouteTimeoutInitParameters `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type SpecHttp2RouteMatchInitParameters struct { + + // Client request headers to match on. + Header []Http2RouteMatchHeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` + + // Client request header method to match on. Valid values: GET, HEAD, POST, PUT, DELETE, CONNECT, OPTIONS, TRACE, PATCH. + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // Client request path to match on. + Path *Http2RouteMatchPathInitParameters `json:"path,omitempty" tf:"path,omitempty"` + + // The port number to match from the request. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Client request query parameters to match on. + QueryParameter []Http2RouteMatchQueryParameterInitParameters `json:"queryParameter,omitempty" tf:"query_parameter,omitempty"` + + // Client request header scheme to match on. Valid values: http, https. + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` +} + +type SpecHttp2RouteMatchObservation struct { + + // Client request headers to match on. + Header []Http2RouteMatchHeaderObservation `json:"header,omitempty" tf:"header,omitempty"` + + // Client request header method to match on. Valid values: GET, HEAD, POST, PUT, DELETE, CONNECT, OPTIONS, TRACE, PATCH. + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // Client request path to match on. + Path *Http2RouteMatchPathObservation `json:"path,omitempty" tf:"path,omitempty"` + + // The port number to match from the request. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Client request query parameters to match on. + QueryParameter []Http2RouteMatchQueryParameterObservation `json:"queryParameter,omitempty" tf:"query_parameter,omitempty"` + + // Client request header scheme to match on. Valid values: http, https. + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` +} + +type SpecHttp2RouteMatchParameters struct { + + // Client request headers to match on. + // +kubebuilder:validation:Optional + Header []Http2RouteMatchHeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + + // Client request header method to match on. Valid values: GET, HEAD, POST, PUT, DELETE, CONNECT, OPTIONS, TRACE, PATCH. + // +kubebuilder:validation:Optional + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // Client request path to match on. + // +kubebuilder:validation:Optional + Path *Http2RouteMatchPathParameters `json:"path,omitempty" tf:"path,omitempty"` + + // The port number to match from the request. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Value sent by the client must begin with the specified characters. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Client request query parameters to match on. + // +kubebuilder:validation:Optional + QueryParameter []Http2RouteMatchQueryParameterParameters `json:"queryParameter,omitempty" tf:"query_parameter,omitempty"` + + // Client request header scheme to match on. Valid values: http, https. + // +kubebuilder:validation:Optional + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` +} + +type SpecHttp2RouteObservation struct { + + // Action to take if a match is determined. + Action *SpecHttp2RouteActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // Criteria for determining an gRPC request match. + Match *SpecHttp2RouteMatchObservation `json:"match,omitempty" tf:"match,omitempty"` + + // Retry policy. + RetryPolicy *Http2RouteRetryPolicyObservation `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // Types of timeouts. + Timeout *Http2RouteTimeoutObservation `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type SpecHttp2RouteParameters struct { + + // Action to take if a match is determined. + // +kubebuilder:validation:Optional + Action *SpecHttp2RouteActionParameters `json:"action" tf:"action,omitempty"` + + // Criteria for determining an gRPC request match. + // +kubebuilder:validation:Optional + Match *SpecHttp2RouteMatchParameters `json:"match" tf:"match,omitempty"` + + // Retry policy. + // +kubebuilder:validation:Optional + RetryPolicy *Http2RouteRetryPolicyParameters `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // Types of timeouts. + // +kubebuilder:validation:Optional + Timeout *Http2RouteTimeoutParameters `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type TCPRouteActionInitParameters struct { + + // Targets that traffic is routed to when a request matches the route. + // You can specify one or more targets and their relative weights with which to distribute traffic. + WeightedTarget []TCPRouteActionWeightedTargetInitParameters `json:"weightedTarget,omitempty" tf:"weighted_target,omitempty"` +} + +type TCPRouteActionObservation struct { + + // Targets that traffic is routed to when a request matches the route. + // You can specify one or more targets and their relative weights with which to distribute traffic. + WeightedTarget []TCPRouteActionWeightedTargetObservation `json:"weightedTarget,omitempty" tf:"weighted_target,omitempty"` +} + +type TCPRouteActionParameters struct { + + // Targets that traffic is routed to when a request matches the route. + // You can specify one or more targets and their relative weights with which to distribute traffic. + // +kubebuilder:validation:Optional + WeightedTarget []TCPRouteActionWeightedTargetParameters `json:"weightedTarget" tf:"weighted_target,omitempty"` +} + +type TCPRouteActionWeightedTargetInitParameters struct { + + // The port number to match from the request. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual node to associate with the weighted target. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.VirtualNode + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + VirtualNode *string `json:"virtualNode,omitempty" tf:"virtual_node,omitempty"` + + // Reference to a VirtualNode in appmesh to populate virtualNode. + // +kubebuilder:validation:Optional + VirtualNodeRef *v1.Reference `json:"virtualNodeRef,omitempty" tf:"-"` + + // Selector for a VirtualNode in appmesh to populate virtualNode. + // +kubebuilder:validation:Optional + VirtualNodeSelector *v1.Selector `json:"virtualNodeSelector,omitempty" tf:"-"` + + // Relative weight of the weighted target. An integer between 0 and 100. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type TCPRouteActionWeightedTargetObservation struct { + + // The port number to match from the request. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual node to associate with the weighted target. Must be between 1 and 255 characters in length. + VirtualNode *string `json:"virtualNode,omitempty" tf:"virtual_node,omitempty"` + + // Relative weight of the weighted target. An integer between 0 and 100. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type TCPRouteActionWeightedTargetParameters struct { + + // The port number to match from the request. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual node to associate with the weighted target. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.VirtualNode + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + // +kubebuilder:validation:Optional + VirtualNode *string `json:"virtualNode,omitempty" tf:"virtual_node,omitempty"` + + // Reference to a VirtualNode in appmesh to populate virtualNode. + // +kubebuilder:validation:Optional + VirtualNodeRef *v1.Reference `json:"virtualNodeRef,omitempty" tf:"-"` + + // Selector for a VirtualNode in appmesh to populate virtualNode. + // +kubebuilder:validation:Optional + VirtualNodeSelector *v1.Selector `json:"virtualNodeSelector,omitempty" tf:"-"` + + // Relative weight of the weighted target. An integer between 0 and 100. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight" tf:"weight,omitempty"` +} + +type TCPRouteInitParameters struct { + + // Action to take if a match is determined. + Action *TCPRouteActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Criteria for determining an gRPC request match. + Match *TCPRouteMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Types of timeouts. + Timeout *TCPRouteTimeoutInitParameters `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type TCPRouteMatchInitParameters struct { + + // The port number to match from the request. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` +} + +type TCPRouteMatchObservation struct { + + // The port number to match from the request. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` +} + +type TCPRouteMatchParameters struct { + + // The port number to match from the request. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` +} + +type TCPRouteObservation struct { + + // Action to take if a match is determined. + Action *TCPRouteActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // Criteria for determining an gRPC request match. + Match *TCPRouteMatchObservation `json:"match,omitempty" tf:"match,omitempty"` + + // Types of timeouts. + Timeout *TCPRouteTimeoutObservation `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type TCPRouteParameters struct { + + // Action to take if a match is determined. + // +kubebuilder:validation:Optional + Action *TCPRouteActionParameters `json:"action" tf:"action,omitempty"` + + // Criteria for determining an gRPC request match. + // +kubebuilder:validation:Optional + Match *TCPRouteMatchParameters `json:"match,omitempty" tf:"match,omitempty"` + + // Types of timeouts. + // +kubebuilder:validation:Optional + Timeout *TCPRouteTimeoutParameters `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type TCPRouteTimeoutIdleInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type TCPRouteTimeoutIdleObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type TCPRouteTimeoutIdleParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type TCPRouteTimeoutInitParameters struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + Idle *TCPRouteTimeoutIdleInitParameters `json:"idle,omitempty" tf:"idle,omitempty"` +} + +type TCPRouteTimeoutObservation struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + Idle *TCPRouteTimeoutIdleObservation `json:"idle,omitempty" tf:"idle,omitempty"` +} + +type TCPRouteTimeoutParameters struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + // +kubebuilder:validation:Optional + Idle *TCPRouteTimeoutIdleParameters `json:"idle,omitempty" tf:"idle,omitempty"` +} + +type TimeoutIdleInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type TimeoutIdleObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type TimeoutIdleParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type TimeoutInitParameters struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + Idle *IdleInitParameters `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + PerRequest *PerRequestInitParameters `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type TimeoutObservation struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + Idle *IdleObservation `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + PerRequest *PerRequestObservation `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type TimeoutParameters struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + // +kubebuilder:validation:Optional + Idle *IdleParameters `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + // +kubebuilder:validation:Optional + PerRequest *PerRequestParameters `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type TimeoutPerRequestInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type TimeoutPerRequestObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type TimeoutPerRequestParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // Number of time units. Minimum value of 0. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type WeightedTargetInitParameters struct { + + // The port number to match from the request. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual node to associate with the weighted target. Must be between 1 and 255 characters in length. + VirtualNode *string `json:"virtualNode,omitempty" tf:"virtual_node,omitempty"` + + // Relative weight of the weighted target. An integer between 0 and 100. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type WeightedTargetObservation struct { + + // The port number to match from the request. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual node to associate with the weighted target. Must be between 1 and 255 characters in length. + VirtualNode *string `json:"virtualNode,omitempty" tf:"virtual_node,omitempty"` + + // Relative weight of the weighted target. An integer between 0 and 100. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type WeightedTargetParameters struct { + + // The port number to match from the request. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Virtual node to associate with the weighted target. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + VirtualNode *string `json:"virtualNode" tf:"virtual_node,omitempty"` + + // Relative weight of the weighted target. An integer between 0 and 100. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight" tf:"weight,omitempty"` +} + +// RouteSpec defines the desired state of Route +type RouteSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RouteParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RouteInitParameters `json:"initProvider,omitempty"` +} + +// RouteStatus defines the observed state of Route. +type RouteStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RouteObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Route is the Schema for the Routes API. Provides an AWS App Mesh route resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Route struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.spec) || (has(self.initProvider) && has(self.initProvider.spec))",message="spec.forProvider.spec is a required parameter" + Spec RouteSpec `json:"spec"` + Status RouteStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RouteList contains a list of Routes +type RouteList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Route `json:"items"` +} + +// Repository type metadata. +var ( + Route_Kind = "Route" + Route_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Route_Kind}.String() + Route_KindAPIVersion = Route_Kind + "." + CRDGroupVersion.String() + Route_GroupVersionKind = CRDGroupVersion.WithKind(Route_Kind) +) + +func init() { + SchemeBuilder.Register(&Route{}, &RouteList{}) +} diff --git a/apis/appmesh/v1beta2/zz_virtualgateway_terraformed.go b/apis/appmesh/v1beta2/zz_virtualgateway_terraformed.go new file mode 100755 index 0000000000..e06ee0c265 --- /dev/null +++ b/apis/appmesh/v1beta2/zz_virtualgateway_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VirtualGateway +func (mg *VirtualGateway) GetTerraformResourceType() string { + return "aws_appmesh_virtual_gateway" +} + +// GetConnectionDetailsMapping for this VirtualGateway +func (tr *VirtualGateway) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VirtualGateway +func (tr *VirtualGateway) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VirtualGateway +func (tr *VirtualGateway) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VirtualGateway +func (tr *VirtualGateway) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VirtualGateway +func (tr *VirtualGateway) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VirtualGateway +func (tr *VirtualGateway) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VirtualGateway +func (tr *VirtualGateway) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VirtualGateway +func (tr *VirtualGateway) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VirtualGateway using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VirtualGateway) LateInitialize(attrs []byte) (bool, error) { + params := &VirtualGatewayParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VirtualGateway) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appmesh/v1beta2/zz_virtualgateway_types.go b/apis/appmesh/v1beta2/zz_virtualgateway_types.go new file mode 100755 index 0000000000..6ba176f9ac --- /dev/null +++ b/apis/appmesh/v1beta2/zz_virtualgateway_types.go @@ -0,0 +1,1228 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessLogFileInitParameters struct { + + // The specified format for the logs. + Format *FormatInitParameters `json:"format,omitempty" tf:"format,omitempty"` + + // File path to write access logs to. You can use /dev/stdout to send access logs to standard out. Must be between 1 and 255 characters in length. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type AccessLogFileObservation struct { + + // The specified format for the logs. + Format *FormatObservation `json:"format,omitempty" tf:"format,omitempty"` + + // File path to write access logs to. You can use /dev/stdout to send access logs to standard out. Must be between 1 and 255 characters in length. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type AccessLogFileParameters struct { + + // The specified format for the logs. + // +kubebuilder:validation:Optional + Format *FormatParameters `json:"format,omitempty" tf:"format,omitempty"` + + // File path to write access logs to. You can use /dev/stdout to send access logs to standard out. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` +} + +type AccessLogInitParameters struct { + + // Local file certificate. + File *AccessLogFileInitParameters `json:"file,omitempty" tf:"file,omitempty"` +} + +type AccessLogObservation struct { + + // Local file certificate. + File *AccessLogFileObservation `json:"file,omitempty" tf:"file,omitempty"` +} + +type AccessLogParameters struct { + + // Local file certificate. + // +kubebuilder:validation:Optional + File *AccessLogFileParameters `json:"file,omitempty" tf:"file,omitempty"` +} + +type AcmInitParameters struct { + + // One or more ACM ARNs. + // +listType=set + CertificateAuthorityArns []*string `json:"certificateAuthorityArns,omitempty" tf:"certificate_authority_arns,omitempty"` +} + +type AcmObservation struct { + + // One or more ACM ARNs. + // +listType=set + CertificateAuthorityArns []*string `json:"certificateAuthorityArns,omitempty" tf:"certificate_authority_arns,omitempty"` +} + +type AcmParameters struct { + + // One or more ACM ARNs. + // +kubebuilder:validation:Optional + // +listType=set + CertificateAuthorityArns []*string `json:"certificateAuthorityArns" tf:"certificate_authority_arns,omitempty"` +} + +type BackendDefaultsInitParameters struct { + + // Default client policy for virtual gateway backends. + ClientPolicy *ClientPolicyInitParameters `json:"clientPolicy,omitempty" tf:"client_policy,omitempty"` +} + +type BackendDefaultsObservation struct { + + // Default client policy for virtual gateway backends. + ClientPolicy *ClientPolicyObservation `json:"clientPolicy,omitempty" tf:"client_policy,omitempty"` +} + +type BackendDefaultsParameters struct { + + // Default client policy for virtual gateway backends. + // +kubebuilder:validation:Optional + ClientPolicy *ClientPolicyParameters `json:"clientPolicy,omitempty" tf:"client_policy,omitempty"` +} + +type CertificateAcmInitParameters struct { + + // ARN for the certificate. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta2.Certificate + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` + + // Reference to a Certificate in acm to populate certificateArn. + // +kubebuilder:validation:Optional + CertificateArnRef *v1.Reference `json:"certificateArnRef,omitempty" tf:"-"` + + // Selector for a Certificate in acm to populate certificateArn. + // +kubebuilder:validation:Optional + CertificateArnSelector *v1.Selector `json:"certificateArnSelector,omitempty" tf:"-"` +} + +type CertificateAcmObservation struct { + + // ARN for the certificate. + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` +} + +type CertificateAcmParameters struct { + + // ARN for the certificate. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta2.Certificate + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` + + // Reference to a Certificate in acm to populate certificateArn. + // +kubebuilder:validation:Optional + CertificateArnRef *v1.Reference `json:"certificateArnRef,omitempty" tf:"-"` + + // Selector for a Certificate in acm to populate certificateArn. + // +kubebuilder:validation:Optional + CertificateArnSelector *v1.Selector `json:"certificateArnSelector,omitempty" tf:"-"` +} + +type CertificateFileInitParameters struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + // Private key for a certificate stored on the file system of the mesh endpoint that the proxy is running on. + PrivateKey *string `json:"privateKey,omitempty" tf:"private_key,omitempty"` +} + +type CertificateFileObservation struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + // Private key for a certificate stored on the file system of the mesh endpoint that the proxy is running on. + PrivateKey *string `json:"privateKey,omitempty" tf:"private_key,omitempty"` +} + +type CertificateFileParameters struct { + + // Certificate chain for the certificate. + // +kubebuilder:validation:Optional + CertificateChain *string `json:"certificateChain" tf:"certificate_chain,omitempty"` + + // Private key for a certificate stored on the file system of the mesh endpoint that the proxy is running on. + // +kubebuilder:validation:Optional + PrivateKey *string `json:"privateKey" tf:"private_key,omitempty"` +} + +type CertificateInitParameters struct { + + // Local file certificate. + File *FileInitParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *SdsInitParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type CertificateObservation struct { + + // Local file certificate. + File *FileObservation `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *SdsObservation `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type CertificateParameters struct { + + // Local file certificate. + // +kubebuilder:validation:Optional + File *FileParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + // +kubebuilder:validation:Optional + Sds *SdsParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type CertificateSdsInitParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type CertificateSdsObservation struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type CertificateSdsParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type ClientPolicyInitParameters struct { + + // Transport Layer Security (TLS) client policy. + TLS *TLSInitParameters `json:"tls,omitempty" tf:"tls,omitempty"` +} + +type ClientPolicyObservation struct { + + // Transport Layer Security (TLS) client policy. + TLS *TLSObservation `json:"tls,omitempty" tf:"tls,omitempty"` +} + +type ClientPolicyParameters struct { + + // Transport Layer Security (TLS) client policy. + // +kubebuilder:validation:Optional + TLS *TLSParameters `json:"tls,omitempty" tf:"tls,omitempty"` +} + +type ConnectionPoolInitParameters struct { + + // Connection pool information for gRPC listeners. + GRPC *GRPCInitParameters `json:"grpc,omitempty" tf:"grpc,omitempty"` + + // Connection pool information for HTTP listeners. + HTTP *HTTPInitParameters `json:"http,omitempty" tf:"http,omitempty"` + + // Connection pool information for HTTP2 listeners. + Http2 *Http2InitParameters `json:"http2,omitempty" tf:"http2,omitempty"` +} + +type ConnectionPoolObservation struct { + + // Connection pool information for gRPC listeners. + GRPC *GRPCObservation `json:"grpc,omitempty" tf:"grpc,omitempty"` + + // Connection pool information for HTTP listeners. + HTTP *HTTPObservation `json:"http,omitempty" tf:"http,omitempty"` + + // Connection pool information for HTTP2 listeners. + Http2 *Http2Observation `json:"http2,omitempty" tf:"http2,omitempty"` +} + +type ConnectionPoolParameters struct { + + // Connection pool information for gRPC listeners. + // +kubebuilder:validation:Optional + GRPC *GRPCParameters `json:"grpc,omitempty" tf:"grpc,omitempty"` + + // Connection pool information for HTTP listeners. + // +kubebuilder:validation:Optional + HTTP *HTTPParameters `json:"http,omitempty" tf:"http,omitempty"` + + // Connection pool information for HTTP2 listeners. + // +kubebuilder:validation:Optional + Http2 *Http2Parameters `json:"http2,omitempty" tf:"http2,omitempty"` +} + +type FileInitParameters struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + // Private key for a certificate stored on the file system of the mesh endpoint that the proxy is running on. + PrivateKey *string `json:"privateKey,omitempty" tf:"private_key,omitempty"` +} + +type FileObservation struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + // Private key for a certificate stored on the file system of the mesh endpoint that the proxy is running on. + PrivateKey *string `json:"privateKey,omitempty" tf:"private_key,omitempty"` +} + +type FileParameters struct { + + // Certificate chain for the certificate. + // +kubebuilder:validation:Optional + CertificateChain *string `json:"certificateChain" tf:"certificate_chain,omitempty"` + + // Private key for a certificate stored on the file system of the mesh endpoint that the proxy is running on. + // +kubebuilder:validation:Optional + PrivateKey *string `json:"privateKey" tf:"private_key,omitempty"` +} + +type FormatInitParameters struct { + + // The logging format for JSON. + JSON []JSONInitParameters `json:"json,omitempty" tf:"json,omitempty"` + + // The logging format for text. Must be between 1 and 1000 characters in length. + Text *string `json:"text,omitempty" tf:"text,omitempty"` +} + +type FormatObservation struct { + + // The logging format for JSON. + JSON []JSONObservation `json:"json,omitempty" tf:"json,omitempty"` + + // The logging format for text. Must be between 1 and 1000 characters in length. + Text *string `json:"text,omitempty" tf:"text,omitempty"` +} + +type FormatParameters struct { + + // The logging format for JSON. + // +kubebuilder:validation:Optional + JSON []JSONParameters `json:"json,omitempty" tf:"json,omitempty"` + + // The logging format for text. Must be between 1 and 1000 characters in length. + // +kubebuilder:validation:Optional + Text *string `json:"text,omitempty" tf:"text,omitempty"` +} + +type GRPCInitParameters struct { + + // Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster. Minimum value of 1. + MaxRequests *float64 `json:"maxRequests,omitempty" tf:"max_requests,omitempty"` +} + +type GRPCObservation struct { + + // Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster. Minimum value of 1. + MaxRequests *float64 `json:"maxRequests,omitempty" tf:"max_requests,omitempty"` +} + +type GRPCParameters struct { + + // Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster. Minimum value of 1. + // +kubebuilder:validation:Optional + MaxRequests *float64 `json:"maxRequests" tf:"max_requests,omitempty"` +} + +type HTTPInitParameters struct { + + // Maximum number of outbound TCP connections Envoy can establish concurrently with all hosts in upstream cluster. Minimum value of 1. + MaxConnections *float64 `json:"maxConnections,omitempty" tf:"max_connections,omitempty"` + + // Number of overflowing requests after max_connections Envoy will queue to upstream cluster. Minimum value of 1. + MaxPendingRequests *float64 `json:"maxPendingRequests,omitempty" tf:"max_pending_requests,omitempty"` +} + +type HTTPObservation struct { + + // Maximum number of outbound TCP connections Envoy can establish concurrently with all hosts in upstream cluster. Minimum value of 1. + MaxConnections *float64 `json:"maxConnections,omitempty" tf:"max_connections,omitempty"` + + // Number of overflowing requests after max_connections Envoy will queue to upstream cluster. Minimum value of 1. + MaxPendingRequests *float64 `json:"maxPendingRequests,omitempty" tf:"max_pending_requests,omitempty"` +} + +type HTTPParameters struct { + + // Maximum number of outbound TCP connections Envoy can establish concurrently with all hosts in upstream cluster. Minimum value of 1. + // +kubebuilder:validation:Optional + MaxConnections *float64 `json:"maxConnections" tf:"max_connections,omitempty"` + + // Number of overflowing requests after max_connections Envoy will queue to upstream cluster. Minimum value of 1. + // +kubebuilder:validation:Optional + MaxPendingRequests *float64 `json:"maxPendingRequests,omitempty" tf:"max_pending_requests,omitempty"` +} + +type HealthCheckInitParameters struct { + + // Number of consecutive successful health checks that must occur before declaring listener healthy. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + + // Time period in milliseconds between each health check execution. + IntervalMillis *float64 `json:"intervalMillis,omitempty" tf:"interval_millis,omitempty"` + + // File path to write access logs to. You can use /dev/stdout to send access logs to standard out. Must be between 1 and 255 characters in length. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Port used for the port mapping. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol used for the port mapping. Valid values are http, http2, tcp and grpc. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Amount of time to wait when receiving a response from the health check, in milliseconds. + TimeoutMillis *float64 `json:"timeoutMillis,omitempty" tf:"timeout_millis,omitempty"` + + // Number of consecutive failed health checks that must occur before declaring a virtual gateway unhealthy. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` +} + +type HealthCheckObservation struct { + + // Number of consecutive successful health checks that must occur before declaring listener healthy. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + + // Time period in milliseconds between each health check execution. + IntervalMillis *float64 `json:"intervalMillis,omitempty" tf:"interval_millis,omitempty"` + + // File path to write access logs to. You can use /dev/stdout to send access logs to standard out. Must be between 1 and 255 characters in length. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Port used for the port mapping. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol used for the port mapping. Valid values are http, http2, tcp and grpc. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Amount of time to wait when receiving a response from the health check, in milliseconds. + TimeoutMillis *float64 `json:"timeoutMillis,omitempty" tf:"timeout_millis,omitempty"` + + // Number of consecutive failed health checks that must occur before declaring a virtual gateway unhealthy. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` +} + +type HealthCheckParameters struct { + + // Number of consecutive successful health checks that must occur before declaring listener healthy. + // +kubebuilder:validation:Optional + HealthyThreshold *float64 `json:"healthyThreshold" tf:"healthy_threshold,omitempty"` + + // Time period in milliseconds between each health check execution. + // +kubebuilder:validation:Optional + IntervalMillis *float64 `json:"intervalMillis" tf:"interval_millis,omitempty"` + + // File path to write access logs to. You can use /dev/stdout to send access logs to standard out. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Port used for the port mapping. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol used for the port mapping. Valid values are http, http2, tcp and grpc. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` + + // Amount of time to wait when receiving a response from the health check, in milliseconds. + // +kubebuilder:validation:Optional + TimeoutMillis *float64 `json:"timeoutMillis" tf:"timeout_millis,omitempty"` + + // Number of consecutive failed health checks that must occur before declaring a virtual gateway unhealthy. + // +kubebuilder:validation:Optional + UnhealthyThreshold *float64 `json:"unhealthyThreshold" tf:"unhealthy_threshold,omitempty"` +} + +type Http2InitParameters struct { + + // Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster. Minimum value of 1. + MaxRequests *float64 `json:"maxRequests,omitempty" tf:"max_requests,omitempty"` +} + +type Http2Observation struct { + + // Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster. Minimum value of 1. + MaxRequests *float64 `json:"maxRequests,omitempty" tf:"max_requests,omitempty"` +} + +type Http2Parameters struct { + + // Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster. Minimum value of 1. + // +kubebuilder:validation:Optional + MaxRequests *float64 `json:"maxRequests" tf:"max_requests,omitempty"` +} + +type JSONInitParameters struct { + + // The specified key for the JSON. Must be between 1 and 100 characters in length. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type JSONObservation struct { + + // The specified key for the JSON. Must be between 1 and 100 characters in length. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type JSONParameters struct { + + // The specified key for the JSON. Must be between 1 and 100 characters in length. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ListenerInitParameters struct { + + // Connection pool information for the listener. + ConnectionPool *ConnectionPoolInitParameters `json:"connectionPool,omitempty" tf:"connection_pool,omitempty"` + + // Health check information for the listener. + HealthCheck *HealthCheckInitParameters `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + + // Port mapping information for the listener. + PortMapping *PortMappingInitParameters `json:"portMapping,omitempty" tf:"port_mapping,omitempty"` + + // Transport Layer Security (TLS) client policy. + TLS *ListenerTLSInitParameters `json:"tls,omitempty" tf:"tls,omitempty"` +} + +type ListenerObservation struct { + + // Connection pool information for the listener. + ConnectionPool *ConnectionPoolObservation `json:"connectionPool,omitempty" tf:"connection_pool,omitempty"` + + // Health check information for the listener. + HealthCheck *HealthCheckObservation `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + + // Port mapping information for the listener. + PortMapping *PortMappingObservation `json:"portMapping,omitempty" tf:"port_mapping,omitempty"` + + // Transport Layer Security (TLS) client policy. + TLS *ListenerTLSObservation `json:"tls,omitempty" tf:"tls,omitempty"` +} + +type ListenerParameters struct { + + // Connection pool information for the listener. + // +kubebuilder:validation:Optional + ConnectionPool *ConnectionPoolParameters `json:"connectionPool,omitempty" tf:"connection_pool,omitempty"` + + // Health check information for the listener. + // +kubebuilder:validation:Optional + HealthCheck *HealthCheckParameters `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + + // Port mapping information for the listener. + // +kubebuilder:validation:Optional + PortMapping *PortMappingParameters `json:"portMapping" tf:"port_mapping,omitempty"` + + // Transport Layer Security (TLS) client policy. + // +kubebuilder:validation:Optional + TLS *ListenerTLSParameters `json:"tls,omitempty" tf:"tls,omitempty"` +} + +type ListenerTLSInitParameters struct { + + // Virtual gateway's client's Transport Layer Security (TLS) certificate. + Certificate *TLSCertificateInitParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Listener's TLS mode. Valid values: DISABLED, PERMISSIVE, STRICT. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // TLS validation context. + Validation *TLSValidationInitParameters `json:"validation,omitempty" tf:"validation,omitempty"` +} + +type ListenerTLSObservation struct { + + // Virtual gateway's client's Transport Layer Security (TLS) certificate. + Certificate *TLSCertificateObservation `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Listener's TLS mode. Valid values: DISABLED, PERMISSIVE, STRICT. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // TLS validation context. + Validation *TLSValidationObservation `json:"validation,omitempty" tf:"validation,omitempty"` +} + +type ListenerTLSParameters struct { + + // Virtual gateway's client's Transport Layer Security (TLS) certificate. + // +kubebuilder:validation:Optional + Certificate *TLSCertificateParameters `json:"certificate" tf:"certificate,omitempty"` + + // Listener's TLS mode. Valid values: DISABLED, PERMISSIVE, STRICT. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // TLS validation context. + // +kubebuilder:validation:Optional + Validation *TLSValidationParameters `json:"validation,omitempty" tf:"validation,omitempty"` +} + +type LoggingInitParameters struct { + + // Access log configuration for a virtual gateway. + AccessLog *AccessLogInitParameters `json:"accessLog,omitempty" tf:"access_log,omitempty"` +} + +type LoggingObservation struct { + + // Access log configuration for a virtual gateway. + AccessLog *AccessLogObservation `json:"accessLog,omitempty" tf:"access_log,omitempty"` +} + +type LoggingParameters struct { + + // Access log configuration for a virtual gateway. + // +kubebuilder:validation:Optional + AccessLog *AccessLogParameters `json:"accessLog,omitempty" tf:"access_log,omitempty"` +} + +type PortMappingInitParameters struct { + + // Port used for the port mapping. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol used for the port mapping. Valid values are http, http2, tcp and grpc. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type PortMappingObservation struct { + + // Port used for the port mapping. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol used for the port mapping. Valid values are http, http2, tcp and grpc. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type PortMappingParameters struct { + + // Port used for the port mapping. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` + + // Protocol used for the port mapping. Valid values are http, http2, tcp and grpc. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` +} + +type SdsInitParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type SdsObservation struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type SdsParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type SubjectAlternativeNamesInitParameters struct { + + // Criteria for determining a SAN's match. + Match *SubjectAlternativeNamesMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` +} + +type SubjectAlternativeNamesMatchInitParameters struct { + + // Values sent must match the specified values exactly. + // +listType=set + Exact []*string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type SubjectAlternativeNamesMatchObservation struct { + + // Values sent must match the specified values exactly. + // +listType=set + Exact []*string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type SubjectAlternativeNamesMatchParameters struct { + + // Values sent must match the specified values exactly. + // +kubebuilder:validation:Optional + // +listType=set + Exact []*string `json:"exact" tf:"exact,omitempty"` +} + +type SubjectAlternativeNamesObservation struct { + + // Criteria for determining a SAN's match. + Match *SubjectAlternativeNamesMatchObservation `json:"match,omitempty" tf:"match,omitempty"` +} + +type SubjectAlternativeNamesParameters struct { + + // Criteria for determining a SAN's match. + // +kubebuilder:validation:Optional + Match *SubjectAlternativeNamesMatchParameters `json:"match" tf:"match,omitempty"` +} + +type TLSCertificateInitParameters struct { + + // TLS validation context trust for an AWS Certificate Manager (ACM) certificate. + Acm *CertificateAcmInitParameters `json:"acm,omitempty" tf:"acm,omitempty"` + + // Local file certificate. + File *CertificateFileInitParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *CertificateSdsInitParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type TLSCertificateObservation struct { + + // TLS validation context trust for an AWS Certificate Manager (ACM) certificate. + Acm *CertificateAcmObservation `json:"acm,omitempty" tf:"acm,omitempty"` + + // Local file certificate. + File *CertificateFileObservation `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *CertificateSdsObservation `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type TLSCertificateParameters struct { + + // TLS validation context trust for an AWS Certificate Manager (ACM) certificate. + // +kubebuilder:validation:Optional + Acm *CertificateAcmParameters `json:"acm,omitempty" tf:"acm,omitempty"` + + // Local file certificate. + // +kubebuilder:validation:Optional + File *CertificateFileParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + // +kubebuilder:validation:Optional + Sds *CertificateSdsParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type TLSInitParameters struct { + + // Virtual gateway's client's Transport Layer Security (TLS) certificate. + Certificate *CertificateInitParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Whether the policy is enforced. Default is true. + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // One or more ports that the policy is enforced for. + // +listType=set + Ports []*float64 `json:"ports,omitempty" tf:"ports,omitempty"` + + // TLS validation context. + Validation *ValidationInitParameters `json:"validation,omitempty" tf:"validation,omitempty"` +} + +type TLSObservation struct { + + // Virtual gateway's client's Transport Layer Security (TLS) certificate. + Certificate *CertificateObservation `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Whether the policy is enforced. Default is true. + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // One or more ports that the policy is enforced for. + // +listType=set + Ports []*float64 `json:"ports,omitempty" tf:"ports,omitempty"` + + // TLS validation context. + Validation *ValidationObservation `json:"validation,omitempty" tf:"validation,omitempty"` +} + +type TLSParameters struct { + + // Virtual gateway's client's Transport Layer Security (TLS) certificate. + // +kubebuilder:validation:Optional + Certificate *CertificateParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Whether the policy is enforced. Default is true. + // +kubebuilder:validation:Optional + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // One or more ports that the policy is enforced for. + // +kubebuilder:validation:Optional + // +listType=set + Ports []*float64 `json:"ports,omitempty" tf:"ports,omitempty"` + + // TLS validation context. + // +kubebuilder:validation:Optional + Validation *ValidationParameters `json:"validation" tf:"validation,omitempty"` +} + +type TLSValidationInitParameters struct { + + // SANs for a virtual gateway's listener's Transport Layer Security (TLS) validation context. + SubjectAlternativeNames *ValidationSubjectAlternativeNamesInitParameters `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // TLS validation context trust. + Trust *ValidationTrustInitParameters `json:"trust,omitempty" tf:"trust,omitempty"` +} + +type TLSValidationObservation struct { + + // SANs for a virtual gateway's listener's Transport Layer Security (TLS) validation context. + SubjectAlternativeNames *ValidationSubjectAlternativeNamesObservation `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // TLS validation context trust. + Trust *ValidationTrustObservation `json:"trust,omitempty" tf:"trust,omitempty"` +} + +type TLSValidationParameters struct { + + // SANs for a virtual gateway's listener's Transport Layer Security (TLS) validation context. + // +kubebuilder:validation:Optional + SubjectAlternativeNames *ValidationSubjectAlternativeNamesParameters `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // TLS validation context trust. + // +kubebuilder:validation:Optional + Trust *ValidationTrustParameters `json:"trust" tf:"trust,omitempty"` +} + +type TrustFileInitParameters struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` +} + +type TrustFileObservation struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` +} + +type TrustFileParameters struct { + + // Certificate chain for the certificate. + // +kubebuilder:validation:Optional + CertificateChain *string `json:"certificateChain" tf:"certificate_chain,omitempty"` +} + +type TrustInitParameters struct { + + // TLS validation context trust for an AWS Certificate Manager (ACM) certificate. + Acm *AcmInitParameters `json:"acm,omitempty" tf:"acm,omitempty"` + + // Local file certificate. + File *TrustFileInitParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *TrustSdsInitParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type TrustObservation struct { + + // TLS validation context trust for an AWS Certificate Manager (ACM) certificate. + Acm *AcmObservation `json:"acm,omitempty" tf:"acm,omitempty"` + + // Local file certificate. + File *TrustFileObservation `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *TrustSdsObservation `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type TrustParameters struct { + + // TLS validation context trust for an AWS Certificate Manager (ACM) certificate. + // +kubebuilder:validation:Optional + Acm *AcmParameters `json:"acm,omitempty" tf:"acm,omitempty"` + + // Local file certificate. + // +kubebuilder:validation:Optional + File *TrustFileParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + // +kubebuilder:validation:Optional + Sds *TrustSdsParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type TrustSdsInitParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type TrustSdsObservation struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type TrustSdsParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type ValidationInitParameters struct { + + // SANs for a virtual gateway's listener's Transport Layer Security (TLS) validation context. + SubjectAlternativeNames *SubjectAlternativeNamesInitParameters `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // TLS validation context trust. + Trust *TrustInitParameters `json:"trust,omitempty" tf:"trust,omitempty"` +} + +type ValidationObservation struct { + + // SANs for a virtual gateway's listener's Transport Layer Security (TLS) validation context. + SubjectAlternativeNames *SubjectAlternativeNamesObservation `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // TLS validation context trust. + Trust *TrustObservation `json:"trust,omitempty" tf:"trust,omitempty"` +} + +type ValidationParameters struct { + + // SANs for a virtual gateway's listener's Transport Layer Security (TLS) validation context. + // +kubebuilder:validation:Optional + SubjectAlternativeNames *SubjectAlternativeNamesParameters `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // TLS validation context trust. + // +kubebuilder:validation:Optional + Trust *TrustParameters `json:"trust" tf:"trust,omitempty"` +} + +type ValidationSubjectAlternativeNamesInitParameters struct { + + // Criteria for determining a SAN's match. + Match *ValidationSubjectAlternativeNamesMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` +} + +type ValidationSubjectAlternativeNamesMatchInitParameters struct { + + // Values sent must match the specified values exactly. + // +listType=set + Exact []*string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type ValidationSubjectAlternativeNamesMatchObservation struct { + + // Values sent must match the specified values exactly. + // +listType=set + Exact []*string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type ValidationSubjectAlternativeNamesMatchParameters struct { + + // Values sent must match the specified values exactly. + // +kubebuilder:validation:Optional + // +listType=set + Exact []*string `json:"exact" tf:"exact,omitempty"` +} + +type ValidationSubjectAlternativeNamesObservation struct { + + // Criteria for determining a SAN's match. + Match *ValidationSubjectAlternativeNamesMatchObservation `json:"match,omitempty" tf:"match,omitempty"` +} + +type ValidationSubjectAlternativeNamesParameters struct { + + // Criteria for determining a SAN's match. + // +kubebuilder:validation:Optional + Match *ValidationSubjectAlternativeNamesMatchParameters `json:"match" tf:"match,omitempty"` +} + +type ValidationTrustFileInitParameters struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` +} + +type ValidationTrustFileObservation struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` +} + +type ValidationTrustFileParameters struct { + + // Certificate chain for the certificate. + // +kubebuilder:validation:Optional + CertificateChain *string `json:"certificateChain" tf:"certificate_chain,omitempty"` +} + +type ValidationTrustInitParameters struct { + + // Local file certificate. + File *ValidationTrustFileInitParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *ValidationTrustSdsInitParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type ValidationTrustObservation struct { + + // Local file certificate. + File *ValidationTrustFileObservation `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *ValidationTrustSdsObservation `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type ValidationTrustParameters struct { + + // Local file certificate. + // +kubebuilder:validation:Optional + File *ValidationTrustFileParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + // +kubebuilder:validation:Optional + Sds *ValidationTrustSdsParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type ValidationTrustSdsInitParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type ValidationTrustSdsObservation struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type ValidationTrustSdsParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type VirtualGatewayInitParameters struct { + + // Name of the service mesh in which to create the virtual gateway. Must be between 1 and 255 characters in length. + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the virtual gateway. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Virtual gateway specification to apply. + Spec *VirtualGatewaySpecInitParameters `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type VirtualGatewayObservation struct { + + // ARN of the virtual gateway. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Creation date of the virtual gateway. + CreatedDate *string `json:"createdDate,omitempty" tf:"created_date,omitempty"` + + // ID of the virtual gateway. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Last update date of the virtual gateway. + LastUpdatedDate *string `json:"lastUpdatedDate,omitempty" tf:"last_updated_date,omitempty"` + + // Name of the service mesh in which to create the virtual gateway. Must be between 1 and 255 characters in length. + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the virtual gateway. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Resource owner's AWS account ID. + ResourceOwner *string `json:"resourceOwner,omitempty" tf:"resource_owner,omitempty"` + + // Virtual gateway specification to apply. + Spec *VirtualGatewaySpecObservation `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type VirtualGatewayParameters struct { + + // Name of the service mesh in which to create the virtual gateway. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + // +kubebuilder:validation:Optional + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the virtual gateway. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Virtual gateway specification to apply. + // +kubebuilder:validation:Optional + Spec *VirtualGatewaySpecParameters `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type VirtualGatewaySpecInitParameters struct { + + // Defaults for backends. + BackendDefaults *BackendDefaultsInitParameters `json:"backendDefaults,omitempty" tf:"backend_defaults,omitempty"` + + // Listeners that the mesh endpoint is expected to receive inbound traffic from. You can specify one listener. + Listener []ListenerInitParameters `json:"listener,omitempty" tf:"listener,omitempty"` + + // Inbound and outbound access logging information for the virtual gateway. + Logging *LoggingInitParameters `json:"logging,omitempty" tf:"logging,omitempty"` +} + +type VirtualGatewaySpecObservation struct { + + // Defaults for backends. + BackendDefaults *BackendDefaultsObservation `json:"backendDefaults,omitempty" tf:"backend_defaults,omitempty"` + + // Listeners that the mesh endpoint is expected to receive inbound traffic from. You can specify one listener. + Listener []ListenerObservation `json:"listener,omitempty" tf:"listener,omitempty"` + + // Inbound and outbound access logging information for the virtual gateway. + Logging *LoggingObservation `json:"logging,omitempty" tf:"logging,omitempty"` +} + +type VirtualGatewaySpecParameters struct { + + // Defaults for backends. + // +kubebuilder:validation:Optional + BackendDefaults *BackendDefaultsParameters `json:"backendDefaults,omitempty" tf:"backend_defaults,omitempty"` + + // Listeners that the mesh endpoint is expected to receive inbound traffic from. You can specify one listener. + // +kubebuilder:validation:Optional + Listener []ListenerParameters `json:"listener" tf:"listener,omitempty"` + + // Inbound and outbound access logging information for the virtual gateway. + // +kubebuilder:validation:Optional + Logging *LoggingParameters `json:"logging,omitempty" tf:"logging,omitempty"` +} + +// VirtualGatewaySpec defines the desired state of VirtualGateway +type VirtualGatewaySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VirtualGatewayParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VirtualGatewayInitParameters `json:"initProvider,omitempty"` +} + +// VirtualGatewayStatus defines the observed state of VirtualGateway. +type VirtualGatewayStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VirtualGatewayObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VirtualGateway is the Schema for the VirtualGateways API. Provides an AWS App Mesh virtual gateway resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type VirtualGateway struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.meshName) || (has(self.initProvider) && has(self.initProvider.meshName))",message="spec.forProvider.meshName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.spec) || (has(self.initProvider) && has(self.initProvider.spec))",message="spec.forProvider.spec is a required parameter" + Spec VirtualGatewaySpec `json:"spec"` + Status VirtualGatewayStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VirtualGatewayList contains a list of VirtualGateways +type VirtualGatewayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualGateway `json:"items"` +} + +// Repository type metadata. +var ( + VirtualGateway_Kind = "VirtualGateway" + VirtualGateway_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VirtualGateway_Kind}.String() + VirtualGateway_KindAPIVersion = VirtualGateway_Kind + "." + CRDGroupVersion.String() + VirtualGateway_GroupVersionKind = CRDGroupVersion.WithKind(VirtualGateway_Kind) +) + +func init() { + SchemeBuilder.Register(&VirtualGateway{}, &VirtualGatewayList{}) +} diff --git a/apis/appmesh/v1beta2/zz_virtualnode_terraformed.go b/apis/appmesh/v1beta2/zz_virtualnode_terraformed.go new file mode 100755 index 0000000000..9f1e66b03a --- /dev/null +++ b/apis/appmesh/v1beta2/zz_virtualnode_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VirtualNode +func (mg *VirtualNode) GetTerraformResourceType() string { + return "aws_appmesh_virtual_node" +} + +// GetConnectionDetailsMapping for this VirtualNode +func (tr *VirtualNode) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VirtualNode +func (tr *VirtualNode) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VirtualNode +func (tr *VirtualNode) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VirtualNode +func (tr *VirtualNode) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VirtualNode +func (tr *VirtualNode) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VirtualNode +func (tr *VirtualNode) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VirtualNode +func (tr *VirtualNode) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VirtualNode +func (tr *VirtualNode) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VirtualNode using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VirtualNode) LateInitialize(attrs []byte) (bool, error) { + params := &VirtualNodeParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VirtualNode) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/appmesh/v1beta2/zz_virtualnode_types.go b/apis/appmesh/v1beta2/zz_virtualnode_types.go new file mode 100755 index 0000000000..0f642abaa7 --- /dev/null +++ b/apis/appmesh/v1beta2/zz_virtualnode_types.go @@ -0,0 +1,2262 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AwsCloudMapInitParameters struct { + + // String map that contains attributes with values that you can use to filter instances by any custom attribute that you specified when you registered the instance. Only instances that match all of the specified key/value pairs will be returned. + // +mapType=granular + Attributes map[string]*string `json:"attributes,omitempty" tf:"attributes,omitempty"` + + // Name of the AWS Cloud Map namespace to use. + // Use the aws_service_discovery_http_namespace resource to configure a Cloud Map namespace. Must be between 1 and 1024 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicediscovery/v1beta1.HTTPNamespace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + NamespaceName *string `json:"namespaceName,omitempty" tf:"namespace_name,omitempty"` + + // Reference to a HTTPNamespace in servicediscovery to populate namespaceName. + // +kubebuilder:validation:Optional + NamespaceNameRef *v1.Reference `json:"namespaceNameRef,omitempty" tf:"-"` + + // Selector for a HTTPNamespace in servicediscovery to populate namespaceName. + // +kubebuilder:validation:Optional + NamespaceNameSelector *v1.Selector `json:"namespaceNameSelector,omitempty" tf:"-"` + + // attribute of the dns object to hostname. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type AwsCloudMapObservation struct { + + // String map that contains attributes with values that you can use to filter instances by any custom attribute that you specified when you registered the instance. Only instances that match all of the specified key/value pairs will be returned. + // +mapType=granular + Attributes map[string]*string `json:"attributes,omitempty" tf:"attributes,omitempty"` + + // Name of the AWS Cloud Map namespace to use. + // Use the aws_service_discovery_http_namespace resource to configure a Cloud Map namespace. Must be between 1 and 1024 characters in length. + NamespaceName *string `json:"namespaceName,omitempty" tf:"namespace_name,omitempty"` + + // attribute of the dns object to hostname. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type AwsCloudMapParameters struct { + + // String map that contains attributes with values that you can use to filter instances by any custom attribute that you specified when you registered the instance. Only instances that match all of the specified key/value pairs will be returned. + // +kubebuilder:validation:Optional + // +mapType=granular + Attributes map[string]*string `json:"attributes,omitempty" tf:"attributes,omitempty"` + + // Name of the AWS Cloud Map namespace to use. + // Use the aws_service_discovery_http_namespace resource to configure a Cloud Map namespace. Must be between 1 and 1024 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicediscovery/v1beta1.HTTPNamespace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + // +kubebuilder:validation:Optional + NamespaceName *string `json:"namespaceName,omitempty" tf:"namespace_name,omitempty"` + + // Reference to a HTTPNamespace in servicediscovery to populate namespaceName. + // +kubebuilder:validation:Optional + NamespaceNameRef *v1.Reference `json:"namespaceNameRef,omitempty" tf:"-"` + + // Selector for a HTTPNamespace in servicediscovery to populate namespaceName. + // +kubebuilder:validation:Optional + NamespaceNameSelector *v1.Selector `json:"namespaceNameSelector,omitempty" tf:"-"` + + // attribute of the dns object to hostname. + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName" tf:"service_name,omitempty"` +} + +type BackendDefaultsClientPolicyInitParameters struct { + + // Transport Layer Security (TLS) client policy. + TLS *BackendDefaultsClientPolicyTLSInitParameters `json:"tls,omitempty" tf:"tls,omitempty"` +} + +type BackendDefaultsClientPolicyObservation struct { + + // Transport Layer Security (TLS) client policy. + TLS *BackendDefaultsClientPolicyTLSObservation `json:"tls,omitempty" tf:"tls,omitempty"` +} + +type BackendDefaultsClientPolicyParameters struct { + + // Transport Layer Security (TLS) client policy. + // +kubebuilder:validation:Optional + TLS *BackendDefaultsClientPolicyTLSParameters `json:"tls,omitempty" tf:"tls,omitempty"` +} + +type BackendDefaultsClientPolicyTLSCertificateInitParameters struct { + + // Local file certificate. + File *ClientPolicyTLSCertificateFileInitParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *ClientPolicyTLSCertificateSdsInitParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type BackendDefaultsClientPolicyTLSCertificateObservation struct { + + // Local file certificate. + File *ClientPolicyTLSCertificateFileObservation `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *ClientPolicyTLSCertificateSdsObservation `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type BackendDefaultsClientPolicyTLSCertificateParameters struct { + + // Local file certificate. + // +kubebuilder:validation:Optional + File *ClientPolicyTLSCertificateFileParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + // +kubebuilder:validation:Optional + Sds *ClientPolicyTLSCertificateSdsParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type BackendDefaultsClientPolicyTLSInitParameters struct { + + // Virtual node's client's Transport Layer Security (TLS) certificate. + Certificate *BackendDefaultsClientPolicyTLSCertificateInitParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Whether the policy is enforced. Default is true. + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // One or more ports that the policy is enforced for. + // +listType=set + Ports []*float64 `json:"ports,omitempty" tf:"ports,omitempty"` + + // TLS validation context. + Validation *BackendDefaultsClientPolicyTLSValidationInitParameters `json:"validation,omitempty" tf:"validation,omitempty"` +} + +type BackendDefaultsClientPolicyTLSObservation struct { + + // Virtual node's client's Transport Layer Security (TLS) certificate. + Certificate *BackendDefaultsClientPolicyTLSCertificateObservation `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Whether the policy is enforced. Default is true. + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // One or more ports that the policy is enforced for. + // +listType=set + Ports []*float64 `json:"ports,omitempty" tf:"ports,omitempty"` + + // TLS validation context. + Validation *BackendDefaultsClientPolicyTLSValidationObservation `json:"validation,omitempty" tf:"validation,omitempty"` +} + +type BackendDefaultsClientPolicyTLSParameters struct { + + // Virtual node's client's Transport Layer Security (TLS) certificate. + // +kubebuilder:validation:Optional + Certificate *BackendDefaultsClientPolicyTLSCertificateParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Whether the policy is enforced. Default is true. + // +kubebuilder:validation:Optional + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // One or more ports that the policy is enforced for. + // +kubebuilder:validation:Optional + // +listType=set + Ports []*float64 `json:"ports,omitempty" tf:"ports,omitempty"` + + // TLS validation context. + // +kubebuilder:validation:Optional + Validation *BackendDefaultsClientPolicyTLSValidationParameters `json:"validation" tf:"validation,omitempty"` +} + +type BackendDefaultsClientPolicyTLSValidationInitParameters struct { + + // SANs for a TLS validation context. + SubjectAlternativeNames *ClientPolicyTLSValidationSubjectAlternativeNamesInitParameters `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // TLS validation context trust. + Trust *ClientPolicyTLSValidationTrustInitParameters `json:"trust,omitempty" tf:"trust,omitempty"` +} + +type BackendDefaultsClientPolicyTLSValidationObservation struct { + + // SANs for a TLS validation context. + SubjectAlternativeNames *ClientPolicyTLSValidationSubjectAlternativeNamesObservation `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // TLS validation context trust. + Trust *ClientPolicyTLSValidationTrustObservation `json:"trust,omitempty" tf:"trust,omitempty"` +} + +type BackendDefaultsClientPolicyTLSValidationParameters struct { + + // SANs for a TLS validation context. + // +kubebuilder:validation:Optional + SubjectAlternativeNames *ClientPolicyTLSValidationSubjectAlternativeNamesParameters `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // TLS validation context trust. + // +kubebuilder:validation:Optional + Trust *ClientPolicyTLSValidationTrustParameters `json:"trust" tf:"trust,omitempty"` +} + +type BackendInitParameters struct { + + // Virtual service to use as a backend for a virtual node. + VirtualService *BackendVirtualServiceInitParameters `json:"virtualService,omitempty" tf:"virtual_service,omitempty"` +} + +type BackendObservation struct { + + // Virtual service to use as a backend for a virtual node. + VirtualService *BackendVirtualServiceObservation `json:"virtualService,omitempty" tf:"virtual_service,omitempty"` +} + +type BackendParameters struct { + + // Virtual service to use as a backend for a virtual node. + // +kubebuilder:validation:Optional + VirtualService *BackendVirtualServiceParameters `json:"virtualService" tf:"virtual_service,omitempty"` +} + +type BackendVirtualServiceInitParameters struct { + + // Client policy for the backend. + ClientPolicy *VirtualServiceClientPolicyInitParameters `json:"clientPolicy,omitempty" tf:"client_policy,omitempty"` + + // Name of the virtual service that is acting as a virtual node backend. Must be between 1 and 255 characters in length. + VirtualServiceName *string `json:"virtualServiceName,omitempty" tf:"virtual_service_name,omitempty"` +} + +type BackendVirtualServiceObservation struct { + + // Client policy for the backend. + ClientPolicy *VirtualServiceClientPolicyObservation `json:"clientPolicy,omitempty" tf:"client_policy,omitempty"` + + // Name of the virtual service that is acting as a virtual node backend. Must be between 1 and 255 characters in length. + VirtualServiceName *string `json:"virtualServiceName,omitempty" tf:"virtual_service_name,omitempty"` +} + +type BackendVirtualServiceParameters struct { + + // Client policy for the backend. + // +kubebuilder:validation:Optional + ClientPolicy *VirtualServiceClientPolicyParameters `json:"clientPolicy,omitempty" tf:"client_policy,omitempty"` + + // Name of the virtual service that is acting as a virtual node backend. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + VirtualServiceName *string `json:"virtualServiceName" tf:"virtual_service_name,omitempty"` +} + +type BaseEjectionDurationInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type BaseEjectionDurationObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type BaseEjectionDurationParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type ClientPolicyTLSCertificateFileInitParameters struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + // Private key for a certificate stored on the file system of the mesh endpoint that the proxy is running on. + PrivateKey *string `json:"privateKey,omitempty" tf:"private_key,omitempty"` +} + +type ClientPolicyTLSCertificateFileObservation struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + // Private key for a certificate stored on the file system of the mesh endpoint that the proxy is running on. + PrivateKey *string `json:"privateKey,omitempty" tf:"private_key,omitempty"` +} + +type ClientPolicyTLSCertificateFileParameters struct { + + // Certificate chain for the certificate. + // +kubebuilder:validation:Optional + CertificateChain *string `json:"certificateChain" tf:"certificate_chain,omitempty"` + + // Private key for a certificate stored on the file system of the mesh endpoint that the proxy is running on. + // +kubebuilder:validation:Optional + PrivateKey *string `json:"privateKey" tf:"private_key,omitempty"` +} + +type ClientPolicyTLSCertificateInitParameters struct { + + // Local file certificate. + File *TLSCertificateFileInitParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *TLSCertificateSdsInitParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type ClientPolicyTLSCertificateObservation struct { + + // Local file certificate. + File *TLSCertificateFileObservation `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *TLSCertificateSdsObservation `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type ClientPolicyTLSCertificateParameters struct { + + // Local file certificate. + // +kubebuilder:validation:Optional + File *TLSCertificateFileParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + // +kubebuilder:validation:Optional + Sds *TLSCertificateSdsParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type ClientPolicyTLSCertificateSdsInitParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type ClientPolicyTLSCertificateSdsObservation struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type ClientPolicyTLSCertificateSdsParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type ClientPolicyTLSInitParameters struct { + + // Virtual node's client's Transport Layer Security (TLS) certificate. + Certificate *ClientPolicyTLSCertificateInitParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Whether the policy is enforced. Default is true. + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // One or more ports that the policy is enforced for. + // +listType=set + Ports []*float64 `json:"ports,omitempty" tf:"ports,omitempty"` + + // TLS validation context. + Validation *ClientPolicyTLSValidationInitParameters `json:"validation,omitempty" tf:"validation,omitempty"` +} + +type ClientPolicyTLSObservation struct { + + // Virtual node's client's Transport Layer Security (TLS) certificate. + Certificate *ClientPolicyTLSCertificateObservation `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Whether the policy is enforced. Default is true. + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // One or more ports that the policy is enforced for. + // +listType=set + Ports []*float64 `json:"ports,omitempty" tf:"ports,omitempty"` + + // TLS validation context. + Validation *ClientPolicyTLSValidationObservation `json:"validation,omitempty" tf:"validation,omitempty"` +} + +type ClientPolicyTLSParameters struct { + + // Virtual node's client's Transport Layer Security (TLS) certificate. + // +kubebuilder:validation:Optional + Certificate *ClientPolicyTLSCertificateParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Whether the policy is enforced. Default is true. + // +kubebuilder:validation:Optional + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // One or more ports that the policy is enforced for. + // +kubebuilder:validation:Optional + // +listType=set + Ports []*float64 `json:"ports,omitempty" tf:"ports,omitempty"` + + // TLS validation context. + // +kubebuilder:validation:Optional + Validation *ClientPolicyTLSValidationParameters `json:"validation" tf:"validation,omitempty"` +} + +type ClientPolicyTLSValidationInitParameters struct { + + // SANs for a TLS validation context. + SubjectAlternativeNames *TLSValidationSubjectAlternativeNamesInitParameters `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // TLS validation context trust. + Trust *TLSValidationTrustInitParameters `json:"trust,omitempty" tf:"trust,omitempty"` +} + +type ClientPolicyTLSValidationObservation struct { + + // SANs for a TLS validation context. + SubjectAlternativeNames *TLSValidationSubjectAlternativeNamesObservation `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // TLS validation context trust. + Trust *TLSValidationTrustObservation `json:"trust,omitempty" tf:"trust,omitempty"` +} + +type ClientPolicyTLSValidationParameters struct { + + // SANs for a TLS validation context. + // +kubebuilder:validation:Optional + SubjectAlternativeNames *TLSValidationSubjectAlternativeNamesParameters `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // TLS validation context trust. + // +kubebuilder:validation:Optional + Trust *TLSValidationTrustParameters `json:"trust" tf:"trust,omitempty"` +} + +type ClientPolicyTLSValidationSubjectAlternativeNamesInitParameters struct { + + // Criteria for determining a SAN's match. + Match *ClientPolicyTLSValidationSubjectAlternativeNamesMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` +} + +type ClientPolicyTLSValidationSubjectAlternativeNamesMatchInitParameters struct { + + // Values sent must match the specified values exactly. + // +listType=set + Exact []*string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type ClientPolicyTLSValidationSubjectAlternativeNamesMatchObservation struct { + + // Values sent must match the specified values exactly. + // +listType=set + Exact []*string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type ClientPolicyTLSValidationSubjectAlternativeNamesMatchParameters struct { + + // Values sent must match the specified values exactly. + // +kubebuilder:validation:Optional + // +listType=set + Exact []*string `json:"exact" tf:"exact,omitempty"` +} + +type ClientPolicyTLSValidationSubjectAlternativeNamesObservation struct { + + // Criteria for determining a SAN's match. + Match *ClientPolicyTLSValidationSubjectAlternativeNamesMatchObservation `json:"match,omitempty" tf:"match,omitempty"` +} + +type ClientPolicyTLSValidationSubjectAlternativeNamesParameters struct { + + // Criteria for determining a SAN's match. + // +kubebuilder:validation:Optional + Match *ClientPolicyTLSValidationSubjectAlternativeNamesMatchParameters `json:"match" tf:"match,omitempty"` +} + +type ClientPolicyTLSValidationTrustFileInitParameters struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` +} + +type ClientPolicyTLSValidationTrustFileObservation struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` +} + +type ClientPolicyTLSValidationTrustFileParameters struct { + + // Certificate chain for the certificate. + // +kubebuilder:validation:Optional + CertificateChain *string `json:"certificateChain" tf:"certificate_chain,omitempty"` +} + +type ClientPolicyTLSValidationTrustInitParameters struct { + + // TLS validation context trust for an AWS Certificate Manager (ACM) certificate. + Acm *ValidationTrustAcmInitParameters `json:"acm,omitempty" tf:"acm,omitempty"` + + // Local file certificate. + File *ClientPolicyTLSValidationTrustFileInitParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *ClientPolicyTLSValidationTrustSdsInitParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type ClientPolicyTLSValidationTrustObservation struct { + + // TLS validation context trust for an AWS Certificate Manager (ACM) certificate. + Acm *ValidationTrustAcmObservation `json:"acm,omitempty" tf:"acm,omitempty"` + + // Local file certificate. + File *ClientPolicyTLSValidationTrustFileObservation `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *ClientPolicyTLSValidationTrustSdsObservation `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type ClientPolicyTLSValidationTrustParameters struct { + + // TLS validation context trust for an AWS Certificate Manager (ACM) certificate. + // +kubebuilder:validation:Optional + Acm *ValidationTrustAcmParameters `json:"acm,omitempty" tf:"acm,omitempty"` + + // Local file certificate. + // +kubebuilder:validation:Optional + File *ClientPolicyTLSValidationTrustFileParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + // +kubebuilder:validation:Optional + Sds *ClientPolicyTLSValidationTrustSdsParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type ClientPolicyTLSValidationTrustSdsInitParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type ClientPolicyTLSValidationTrustSdsObservation struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type ClientPolicyTLSValidationTrustSdsParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type ConnectionPoolGRPCInitParameters struct { + + // Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster. Minimum value of 1. + MaxRequests *float64 `json:"maxRequests,omitempty" tf:"max_requests,omitempty"` +} + +type ConnectionPoolGRPCObservation struct { + + // Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster. Minimum value of 1. + MaxRequests *float64 `json:"maxRequests,omitempty" tf:"max_requests,omitempty"` +} + +type ConnectionPoolGRPCParameters struct { + + // Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster. Minimum value of 1. + // +kubebuilder:validation:Optional + MaxRequests *float64 `json:"maxRequests" tf:"max_requests,omitempty"` +} + +type ConnectionPoolHTTPInitParameters struct { + + // Maximum number of outbound TCP connections Envoy can establish concurrently with all hosts in upstream cluster. Minimum value of 1. + MaxConnections *float64 `json:"maxConnections,omitempty" tf:"max_connections,omitempty"` + + // Number of overflowing requests after max_connections Envoy will queue to upstream cluster. Minimum value of 1. + MaxPendingRequests *float64 `json:"maxPendingRequests,omitempty" tf:"max_pending_requests,omitempty"` +} + +type ConnectionPoolHTTPObservation struct { + + // Maximum number of outbound TCP connections Envoy can establish concurrently with all hosts in upstream cluster. Minimum value of 1. + MaxConnections *float64 `json:"maxConnections,omitempty" tf:"max_connections,omitempty"` + + // Number of overflowing requests after max_connections Envoy will queue to upstream cluster. Minimum value of 1. + MaxPendingRequests *float64 `json:"maxPendingRequests,omitempty" tf:"max_pending_requests,omitempty"` +} + +type ConnectionPoolHTTPParameters struct { + + // Maximum number of outbound TCP connections Envoy can establish concurrently with all hosts in upstream cluster. Minimum value of 1. + // +kubebuilder:validation:Optional + MaxConnections *float64 `json:"maxConnections" tf:"max_connections,omitempty"` + + // Number of overflowing requests after max_connections Envoy will queue to upstream cluster. Minimum value of 1. + // +kubebuilder:validation:Optional + MaxPendingRequests *float64 `json:"maxPendingRequests,omitempty" tf:"max_pending_requests,omitempty"` +} + +type ConnectionPoolHttp2InitParameters struct { + + // Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster. Minimum value of 1. + MaxRequests *float64 `json:"maxRequests,omitempty" tf:"max_requests,omitempty"` +} + +type ConnectionPoolHttp2Observation struct { + + // Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster. Minimum value of 1. + MaxRequests *float64 `json:"maxRequests,omitempty" tf:"max_requests,omitempty"` +} + +type ConnectionPoolHttp2Parameters struct { + + // Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster. Minimum value of 1. + // +kubebuilder:validation:Optional + MaxRequests *float64 `json:"maxRequests" tf:"max_requests,omitempty"` +} + +type DNSInitParameters struct { + + // DNS host name for your virtual node. + Hostname *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // The preferred IP version that this virtual node uses. Valid values: IPv6_PREFERRED, IPv4_PREFERRED, IPv4_ONLY, IPv6_ONLY. + IPPreference *string `json:"ipPreference,omitempty" tf:"ip_preference,omitempty"` + + // The DNS response type for the virtual node. Valid values: LOADBALANCER, ENDPOINTS. + ResponseType *string `json:"responseType,omitempty" tf:"response_type,omitempty"` +} + +type DNSObservation struct { + + // DNS host name for your virtual node. + Hostname *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // The preferred IP version that this virtual node uses. Valid values: IPv6_PREFERRED, IPv4_PREFERRED, IPv4_ONLY, IPv6_ONLY. + IPPreference *string `json:"ipPreference,omitempty" tf:"ip_preference,omitempty"` + + // The DNS response type for the virtual node. Valid values: LOADBALANCER, ENDPOINTS. + ResponseType *string `json:"responseType,omitempty" tf:"response_type,omitempty"` +} + +type DNSParameters struct { + + // DNS host name for your virtual node. + // +kubebuilder:validation:Optional + Hostname *string `json:"hostname" tf:"hostname,omitempty"` + + // The preferred IP version that this virtual node uses. Valid values: IPv6_PREFERRED, IPv4_PREFERRED, IPv4_ONLY, IPv6_ONLY. + // +kubebuilder:validation:Optional + IPPreference *string `json:"ipPreference,omitempty" tf:"ip_preference,omitempty"` + + // The DNS response type for the virtual node. Valid values: LOADBALANCER, ENDPOINTS. + // +kubebuilder:validation:Optional + ResponseType *string `json:"responseType,omitempty" tf:"response_type,omitempty"` +} + +type FileFormatInitParameters struct { + + // The logging format for JSON. + JSON []FormatJSONInitParameters `json:"json,omitempty" tf:"json,omitempty"` + + // The logging format for text. Must be between 1 and 1000 characters in length. + Text *string `json:"text,omitempty" tf:"text,omitempty"` +} + +type FileFormatObservation struct { + + // The logging format for JSON. + JSON []FormatJSONObservation `json:"json,omitempty" tf:"json,omitempty"` + + // The logging format for text. Must be between 1 and 1000 characters in length. + Text *string `json:"text,omitempty" tf:"text,omitempty"` +} + +type FileFormatParameters struct { + + // The logging format for JSON. + // +kubebuilder:validation:Optional + JSON []FormatJSONParameters `json:"json,omitempty" tf:"json,omitempty"` + + // The logging format for text. Must be between 1 and 1000 characters in length. + // +kubebuilder:validation:Optional + Text *string `json:"text,omitempty" tf:"text,omitempty"` +} + +type FormatJSONInitParameters struct { + + // The specified key for the JSON. Must be between 1 and 100 characters in length. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FormatJSONObservation struct { + + // The specified key for the JSON. Must be between 1 and 100 characters in length. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FormatJSONParameters struct { + + // The specified key for the JSON. Must be between 1 and 100 characters in length. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type GRPCIdleInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type GRPCIdleObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type GRPCIdleParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type GRPCPerRequestInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type GRPCPerRequestObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type GRPCPerRequestParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type HTTPIdleInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type HTTPIdleObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type HTTPIdleParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type HTTPPerRequestInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type HTTPPerRequestObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type HTTPPerRequestParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type Http2IdleInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type Http2IdleObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type Http2IdleParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type Http2PerRequestInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type Http2PerRequestObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type Http2PerRequestParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type IntervalInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type IntervalObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type IntervalParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type ListenerConnectionPoolInitParameters struct { + + // Connection pool information for gRPC listeners. + GRPC *ConnectionPoolGRPCInitParameters `json:"grpc,omitempty" tf:"grpc,omitempty"` + + // Connection pool information for HTTP listeners. + HTTP []ConnectionPoolHTTPInitParameters `json:"http,omitempty" tf:"http,omitempty"` + + // Connection pool information for HTTP2 listeners. + Http2 []ConnectionPoolHttp2InitParameters `json:"http2,omitempty" tf:"http2,omitempty"` + + // Connection pool information for TCP listeners. + TCP []TCPInitParameters `json:"tcp,omitempty" tf:"tcp,omitempty"` +} + +type ListenerConnectionPoolObservation struct { + + // Connection pool information for gRPC listeners. + GRPC *ConnectionPoolGRPCObservation `json:"grpc,omitempty" tf:"grpc,omitempty"` + + // Connection pool information for HTTP listeners. + HTTP []ConnectionPoolHTTPObservation `json:"http,omitempty" tf:"http,omitempty"` + + // Connection pool information for HTTP2 listeners. + Http2 []ConnectionPoolHttp2Observation `json:"http2,omitempty" tf:"http2,omitempty"` + + // Connection pool information for TCP listeners. + TCP []TCPObservation `json:"tcp,omitempty" tf:"tcp,omitempty"` +} + +type ListenerConnectionPoolParameters struct { + + // Connection pool information for gRPC listeners. + // +kubebuilder:validation:Optional + GRPC *ConnectionPoolGRPCParameters `json:"grpc,omitempty" tf:"grpc,omitempty"` + + // Connection pool information for HTTP listeners. + // +kubebuilder:validation:Optional + HTTP []ConnectionPoolHTTPParameters `json:"http,omitempty" tf:"http,omitempty"` + + // Connection pool information for HTTP2 listeners. + // +kubebuilder:validation:Optional + Http2 []ConnectionPoolHttp2Parameters `json:"http2,omitempty" tf:"http2,omitempty"` + + // Connection pool information for TCP listeners. + // +kubebuilder:validation:Optional + TCP []TCPParameters `json:"tcp,omitempty" tf:"tcp,omitempty"` +} + +type ListenerHealthCheckInitParameters struct { + + // Number of consecutive successful health checks that must occur before declaring listener healthy. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + + // Time period in milliseconds between each health check execution. + IntervalMillis *float64 `json:"intervalMillis,omitempty" tf:"interval_millis,omitempty"` + + // File path to write access logs to. You can use /dev/stdout to send access logs to standard out. Must be between 1 and 255 characters in length. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Port used for the port mapping. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol used for the port mapping. Valid values are http, http2, tcp and grpc. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Amount of time to wait when receiving a response from the health check, in milliseconds. + TimeoutMillis *float64 `json:"timeoutMillis,omitempty" tf:"timeout_millis,omitempty"` + + // Number of consecutive failed health checks that must occur before declaring a virtual node unhealthy. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` +} + +type ListenerHealthCheckObservation struct { + + // Number of consecutive successful health checks that must occur before declaring listener healthy. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + + // Time period in milliseconds between each health check execution. + IntervalMillis *float64 `json:"intervalMillis,omitempty" tf:"interval_millis,omitempty"` + + // File path to write access logs to. You can use /dev/stdout to send access logs to standard out. Must be between 1 and 255 characters in length. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Port used for the port mapping. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol used for the port mapping. Valid values are http, http2, tcp and grpc. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Amount of time to wait when receiving a response from the health check, in milliseconds. + TimeoutMillis *float64 `json:"timeoutMillis,omitempty" tf:"timeout_millis,omitempty"` + + // Number of consecutive failed health checks that must occur before declaring a virtual node unhealthy. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` +} + +type ListenerHealthCheckParameters struct { + + // Number of consecutive successful health checks that must occur before declaring listener healthy. + // +kubebuilder:validation:Optional + HealthyThreshold *float64 `json:"healthyThreshold" tf:"healthy_threshold,omitempty"` + + // Time period in milliseconds between each health check execution. + // +kubebuilder:validation:Optional + IntervalMillis *float64 `json:"intervalMillis" tf:"interval_millis,omitempty"` + + // File path to write access logs to. You can use /dev/stdout to send access logs to standard out. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Port used for the port mapping. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol used for the port mapping. Valid values are http, http2, tcp and grpc. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` + + // Amount of time to wait when receiving a response from the health check, in milliseconds. + // +kubebuilder:validation:Optional + TimeoutMillis *float64 `json:"timeoutMillis" tf:"timeout_millis,omitempty"` + + // Number of consecutive failed health checks that must occur before declaring a virtual node unhealthy. + // +kubebuilder:validation:Optional + UnhealthyThreshold *float64 `json:"unhealthyThreshold" tf:"unhealthy_threshold,omitempty"` +} + +type ListenerPortMappingInitParameters struct { + + // Port used for the port mapping. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol used for the port mapping. Valid values are http, http2, tcp and grpc. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type ListenerPortMappingObservation struct { + + // Port used for the port mapping. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol used for the port mapping. Valid values are http, http2, tcp and grpc. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type ListenerPortMappingParameters struct { + + // Port used for the port mapping. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` + + // Protocol used for the port mapping. Valid values are http, http2, tcp and grpc. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` +} + +type ListenerTLSCertificateFileInitParameters struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + // Private key for a certificate stored on the file system of the mesh endpoint that the proxy is running on. + PrivateKey *string `json:"privateKey,omitempty" tf:"private_key,omitempty"` +} + +type ListenerTLSCertificateFileObservation struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + // Private key for a certificate stored on the file system of the mesh endpoint that the proxy is running on. + PrivateKey *string `json:"privateKey,omitempty" tf:"private_key,omitempty"` +} + +type ListenerTLSCertificateFileParameters struct { + + // Certificate chain for the certificate. + // +kubebuilder:validation:Optional + CertificateChain *string `json:"certificateChain" tf:"certificate_chain,omitempty"` + + // Private key for a certificate stored on the file system of the mesh endpoint that the proxy is running on. + // +kubebuilder:validation:Optional + PrivateKey *string `json:"privateKey" tf:"private_key,omitempty"` +} + +type ListenerTLSCertificateInitParameters struct { + + // TLS validation context trust for an AWS Certificate Manager (ACM) certificate. + Acm *TLSCertificateAcmInitParameters `json:"acm,omitempty" tf:"acm,omitempty"` + + // Local file certificate. + File *ListenerTLSCertificateFileInitParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *ListenerTLSCertificateSdsInitParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type ListenerTLSCertificateObservation struct { + + // TLS validation context trust for an AWS Certificate Manager (ACM) certificate. + Acm *TLSCertificateAcmObservation `json:"acm,omitempty" tf:"acm,omitempty"` + + // Local file certificate. + File *ListenerTLSCertificateFileObservation `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *ListenerTLSCertificateSdsObservation `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type ListenerTLSCertificateParameters struct { + + // TLS validation context trust for an AWS Certificate Manager (ACM) certificate. + // +kubebuilder:validation:Optional + Acm *TLSCertificateAcmParameters `json:"acm,omitempty" tf:"acm,omitempty"` + + // Local file certificate. + // +kubebuilder:validation:Optional + File *ListenerTLSCertificateFileParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + // +kubebuilder:validation:Optional + Sds *ListenerTLSCertificateSdsParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type ListenerTLSCertificateSdsInitParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type ListenerTLSCertificateSdsObservation struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type ListenerTLSCertificateSdsParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type ListenerTLSValidationInitParameters struct { + + // SANs for a TLS validation context. + SubjectAlternativeNames *ListenerTLSValidationSubjectAlternativeNamesInitParameters `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // TLS validation context trust. + Trust *ListenerTLSValidationTrustInitParameters `json:"trust,omitempty" tf:"trust,omitempty"` +} + +type ListenerTLSValidationObservation struct { + + // SANs for a TLS validation context. + SubjectAlternativeNames *ListenerTLSValidationSubjectAlternativeNamesObservation `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // TLS validation context trust. + Trust *ListenerTLSValidationTrustObservation `json:"trust,omitempty" tf:"trust,omitempty"` +} + +type ListenerTLSValidationParameters struct { + + // SANs for a TLS validation context. + // +kubebuilder:validation:Optional + SubjectAlternativeNames *ListenerTLSValidationSubjectAlternativeNamesParameters `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // TLS validation context trust. + // +kubebuilder:validation:Optional + Trust *ListenerTLSValidationTrustParameters `json:"trust" tf:"trust,omitempty"` +} + +type ListenerTLSValidationSubjectAlternativeNamesInitParameters struct { + + // Criteria for determining a SAN's match. + Match *ListenerTLSValidationSubjectAlternativeNamesMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` +} + +type ListenerTLSValidationSubjectAlternativeNamesMatchInitParameters struct { + + // Values sent must match the specified values exactly. + // +listType=set + Exact []*string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type ListenerTLSValidationSubjectAlternativeNamesMatchObservation struct { + + // Values sent must match the specified values exactly. + // +listType=set + Exact []*string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type ListenerTLSValidationSubjectAlternativeNamesMatchParameters struct { + + // Values sent must match the specified values exactly. + // +kubebuilder:validation:Optional + // +listType=set + Exact []*string `json:"exact" tf:"exact,omitempty"` +} + +type ListenerTLSValidationSubjectAlternativeNamesObservation struct { + + // Criteria for determining a SAN's match. + Match *ListenerTLSValidationSubjectAlternativeNamesMatchObservation `json:"match,omitempty" tf:"match,omitempty"` +} + +type ListenerTLSValidationSubjectAlternativeNamesParameters struct { + + // Criteria for determining a SAN's match. + // +kubebuilder:validation:Optional + Match *ListenerTLSValidationSubjectAlternativeNamesMatchParameters `json:"match" tf:"match,omitempty"` +} + +type ListenerTLSValidationTrustFileInitParameters struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` +} + +type ListenerTLSValidationTrustFileObservation struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` +} + +type ListenerTLSValidationTrustFileParameters struct { + + // Certificate chain for the certificate. + // +kubebuilder:validation:Optional + CertificateChain *string `json:"certificateChain" tf:"certificate_chain,omitempty"` +} + +type ListenerTLSValidationTrustInitParameters struct { + + // Local file certificate. + File *ListenerTLSValidationTrustFileInitParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *ListenerTLSValidationTrustSdsInitParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type ListenerTLSValidationTrustObservation struct { + + // Local file certificate. + File *ListenerTLSValidationTrustFileObservation `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *ListenerTLSValidationTrustSdsObservation `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type ListenerTLSValidationTrustParameters struct { + + // Local file certificate. + // +kubebuilder:validation:Optional + File *ListenerTLSValidationTrustFileParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + // +kubebuilder:validation:Optional + Sds *ListenerTLSValidationTrustSdsParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type ListenerTLSValidationTrustSdsInitParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type ListenerTLSValidationTrustSdsObservation struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type ListenerTLSValidationTrustSdsParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type ListenerTimeoutInitParameters struct { + + // Connection pool information for gRPC listeners. + GRPC *TimeoutGRPCInitParameters `json:"grpc,omitempty" tf:"grpc,omitempty"` + + // Connection pool information for HTTP listeners. + HTTP *TimeoutHTTPInitParameters `json:"http,omitempty" tf:"http,omitempty"` + + // Connection pool information for HTTP2 listeners. + Http2 *TimeoutHttp2InitParameters `json:"http2,omitempty" tf:"http2,omitempty"` + + // Connection pool information for TCP listeners. + TCP *TimeoutTCPInitParameters `json:"tcp,omitempty" tf:"tcp,omitempty"` +} + +type ListenerTimeoutObservation struct { + + // Connection pool information for gRPC listeners. + GRPC *TimeoutGRPCObservation `json:"grpc,omitempty" tf:"grpc,omitempty"` + + // Connection pool information for HTTP listeners. + HTTP *TimeoutHTTPObservation `json:"http,omitempty" tf:"http,omitempty"` + + // Connection pool information for HTTP2 listeners. + Http2 *TimeoutHttp2Observation `json:"http2,omitempty" tf:"http2,omitempty"` + + // Connection pool information for TCP listeners. + TCP *TimeoutTCPObservation `json:"tcp,omitempty" tf:"tcp,omitempty"` +} + +type ListenerTimeoutParameters struct { + + // Connection pool information for gRPC listeners. + // +kubebuilder:validation:Optional + GRPC *TimeoutGRPCParameters `json:"grpc,omitempty" tf:"grpc,omitempty"` + + // Connection pool information for HTTP listeners. + // +kubebuilder:validation:Optional + HTTP *TimeoutHTTPParameters `json:"http,omitempty" tf:"http,omitempty"` + + // Connection pool information for HTTP2 listeners. + // +kubebuilder:validation:Optional + Http2 *TimeoutHttp2Parameters `json:"http2,omitempty" tf:"http2,omitempty"` + + // Connection pool information for TCP listeners. + // +kubebuilder:validation:Optional + TCP *TimeoutTCPParameters `json:"tcp,omitempty" tf:"tcp,omitempty"` +} + +type LoggingAccessLogFileInitParameters struct { + + // The specified format for the logs. + Format *FileFormatInitParameters `json:"format,omitempty" tf:"format,omitempty"` + + // File path to write access logs to. You can use /dev/stdout to send access logs to standard out. Must be between 1 and 255 characters in length. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type LoggingAccessLogFileObservation struct { + + // The specified format for the logs. + Format *FileFormatObservation `json:"format,omitempty" tf:"format,omitempty"` + + // File path to write access logs to. You can use /dev/stdout to send access logs to standard out. Must be between 1 and 255 characters in length. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type LoggingAccessLogFileParameters struct { + + // The specified format for the logs. + // +kubebuilder:validation:Optional + Format *FileFormatParameters `json:"format,omitempty" tf:"format,omitempty"` + + // File path to write access logs to. You can use /dev/stdout to send access logs to standard out. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` +} + +type LoggingAccessLogInitParameters struct { + + // Local file certificate. + File *LoggingAccessLogFileInitParameters `json:"file,omitempty" tf:"file,omitempty"` +} + +type LoggingAccessLogObservation struct { + + // Local file certificate. + File *LoggingAccessLogFileObservation `json:"file,omitempty" tf:"file,omitempty"` +} + +type LoggingAccessLogParameters struct { + + // Local file certificate. + // +kubebuilder:validation:Optional + File *LoggingAccessLogFileParameters `json:"file,omitempty" tf:"file,omitempty"` +} + +type OutlierDetectionInitParameters struct { + + // Base amount of time for which a host is ejected. + BaseEjectionDuration *BaseEjectionDurationInitParameters `json:"baseEjectionDuration,omitempty" tf:"base_ejection_duration,omitempty"` + + // Time interval between ejection sweep analysis. + Interval *IntervalInitParameters `json:"interval,omitempty" tf:"interval,omitempty"` + + // Maximum percentage of hosts in load balancing pool for upstream service that can be ejected. Will eject at least one host regardless of the value. + // Minimum value of 0. Maximum value of 100. + MaxEjectionPercent *float64 `json:"maxEjectionPercent,omitempty" tf:"max_ejection_percent,omitempty"` + + // Number of consecutive 5xx errors required for ejection. Minimum value of 1. + MaxServerErrors *float64 `json:"maxServerErrors,omitempty" tf:"max_server_errors,omitempty"` +} + +type OutlierDetectionObservation struct { + + // Base amount of time for which a host is ejected. + BaseEjectionDuration *BaseEjectionDurationObservation `json:"baseEjectionDuration,omitempty" tf:"base_ejection_duration,omitempty"` + + // Time interval between ejection sweep analysis. + Interval *IntervalObservation `json:"interval,omitempty" tf:"interval,omitempty"` + + // Maximum percentage of hosts in load balancing pool for upstream service that can be ejected. Will eject at least one host regardless of the value. + // Minimum value of 0. Maximum value of 100. + MaxEjectionPercent *float64 `json:"maxEjectionPercent,omitempty" tf:"max_ejection_percent,omitempty"` + + // Number of consecutive 5xx errors required for ejection. Minimum value of 1. + MaxServerErrors *float64 `json:"maxServerErrors,omitempty" tf:"max_server_errors,omitempty"` +} + +type OutlierDetectionParameters struct { + + // Base amount of time for which a host is ejected. + // +kubebuilder:validation:Optional + BaseEjectionDuration *BaseEjectionDurationParameters `json:"baseEjectionDuration" tf:"base_ejection_duration,omitempty"` + + // Time interval between ejection sweep analysis. + // +kubebuilder:validation:Optional + Interval *IntervalParameters `json:"interval" tf:"interval,omitempty"` + + // Maximum percentage of hosts in load balancing pool for upstream service that can be ejected. Will eject at least one host regardless of the value. + // Minimum value of 0. Maximum value of 100. + // +kubebuilder:validation:Optional + MaxEjectionPercent *float64 `json:"maxEjectionPercent" tf:"max_ejection_percent,omitempty"` + + // Number of consecutive 5xx errors required for ejection. Minimum value of 1. + // +kubebuilder:validation:Optional + MaxServerErrors *float64 `json:"maxServerErrors" tf:"max_server_errors,omitempty"` +} + +type SpecBackendDefaultsInitParameters struct { + + // Client policy for the backend. + ClientPolicy *BackendDefaultsClientPolicyInitParameters `json:"clientPolicy,omitempty" tf:"client_policy,omitempty"` +} + +type SpecBackendDefaultsObservation struct { + + // Client policy for the backend. + ClientPolicy *BackendDefaultsClientPolicyObservation `json:"clientPolicy,omitempty" tf:"client_policy,omitempty"` +} + +type SpecBackendDefaultsParameters struct { + + // Client policy for the backend. + // +kubebuilder:validation:Optional + ClientPolicy *BackendDefaultsClientPolicyParameters `json:"clientPolicy,omitempty" tf:"client_policy,omitempty"` +} + +type SpecListenerInitParameters struct { + + // Connection pool information for the listener. + ConnectionPool *ListenerConnectionPoolInitParameters `json:"connectionPool,omitempty" tf:"connection_pool,omitempty"` + + // Health check information for the listener. + HealthCheck *ListenerHealthCheckInitParameters `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + + // Outlier detection information for the listener. + OutlierDetection *OutlierDetectionInitParameters `json:"outlierDetection,omitempty" tf:"outlier_detection,omitempty"` + + // Port mapping information for the listener. + PortMapping *ListenerPortMappingInitParameters `json:"portMapping,omitempty" tf:"port_mapping,omitempty"` + + // Transport Layer Security (TLS) client policy. + TLS *SpecListenerTLSInitParameters `json:"tls,omitempty" tf:"tls,omitempty"` + + // Timeouts for different protocols. + Timeout *ListenerTimeoutInitParameters `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type SpecListenerObservation struct { + + // Connection pool information for the listener. + ConnectionPool *ListenerConnectionPoolObservation `json:"connectionPool,omitempty" tf:"connection_pool,omitempty"` + + // Health check information for the listener. + HealthCheck *ListenerHealthCheckObservation `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + + // Outlier detection information for the listener. + OutlierDetection *OutlierDetectionObservation `json:"outlierDetection,omitempty" tf:"outlier_detection,omitempty"` + + // Port mapping information for the listener. + PortMapping *ListenerPortMappingObservation `json:"portMapping,omitempty" tf:"port_mapping,omitempty"` + + // Transport Layer Security (TLS) client policy. + TLS *SpecListenerTLSObservation `json:"tls,omitempty" tf:"tls,omitempty"` + + // Timeouts for different protocols. + Timeout *ListenerTimeoutObservation `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type SpecListenerParameters struct { + + // Connection pool information for the listener. + // +kubebuilder:validation:Optional + ConnectionPool *ListenerConnectionPoolParameters `json:"connectionPool,omitempty" tf:"connection_pool,omitempty"` + + // Health check information for the listener. + // +kubebuilder:validation:Optional + HealthCheck *ListenerHealthCheckParameters `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + + // Outlier detection information for the listener. + // +kubebuilder:validation:Optional + OutlierDetection *OutlierDetectionParameters `json:"outlierDetection,omitempty" tf:"outlier_detection,omitempty"` + + // Port mapping information for the listener. + // +kubebuilder:validation:Optional + PortMapping *ListenerPortMappingParameters `json:"portMapping" tf:"port_mapping,omitempty"` + + // Transport Layer Security (TLS) client policy. + // +kubebuilder:validation:Optional + TLS *SpecListenerTLSParameters `json:"tls,omitempty" tf:"tls,omitempty"` + + // Timeouts for different protocols. + // +kubebuilder:validation:Optional + Timeout *ListenerTimeoutParameters `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type SpecListenerTLSInitParameters struct { + + // Virtual node's client's Transport Layer Security (TLS) certificate. + Certificate *ListenerTLSCertificateInitParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Listener's TLS mode. Valid values: DISABLED, PERMISSIVE, STRICT. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // TLS validation context. + Validation *ListenerTLSValidationInitParameters `json:"validation,omitempty" tf:"validation,omitempty"` +} + +type SpecListenerTLSObservation struct { + + // Virtual node's client's Transport Layer Security (TLS) certificate. + Certificate *ListenerTLSCertificateObservation `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Listener's TLS mode. Valid values: DISABLED, PERMISSIVE, STRICT. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // TLS validation context. + Validation *ListenerTLSValidationObservation `json:"validation,omitempty" tf:"validation,omitempty"` +} + +type SpecListenerTLSParameters struct { + + // Virtual node's client's Transport Layer Security (TLS) certificate. + // +kubebuilder:validation:Optional + Certificate *ListenerTLSCertificateParameters `json:"certificate" tf:"certificate,omitempty"` + + // Listener's TLS mode. Valid values: DISABLED, PERMISSIVE, STRICT. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // TLS validation context. + // +kubebuilder:validation:Optional + Validation *ListenerTLSValidationParameters `json:"validation,omitempty" tf:"validation,omitempty"` +} + +type SpecLoggingInitParameters struct { + + // Access log configuration for a virtual node. + AccessLog *LoggingAccessLogInitParameters `json:"accessLog,omitempty" tf:"access_log,omitempty"` +} + +type SpecLoggingObservation struct { + + // Access log configuration for a virtual node. + AccessLog *LoggingAccessLogObservation `json:"accessLog,omitempty" tf:"access_log,omitempty"` +} + +type SpecLoggingParameters struct { + + // Access log configuration for a virtual node. + // +kubebuilder:validation:Optional + AccessLog *LoggingAccessLogParameters `json:"accessLog,omitempty" tf:"access_log,omitempty"` +} + +type SpecServiceDiscoveryInitParameters struct { + + // Any AWS Cloud Map information for the virtual node. + AwsCloudMap *AwsCloudMapInitParameters `json:"awsCloudMap,omitempty" tf:"aws_cloud_map,omitempty"` + + // DNS service name for the virtual node. + DNS *DNSInitParameters `json:"dns,omitempty" tf:"dns,omitempty"` +} + +type SpecServiceDiscoveryObservation struct { + + // Any AWS Cloud Map information for the virtual node. + AwsCloudMap *AwsCloudMapObservation `json:"awsCloudMap,omitempty" tf:"aws_cloud_map,omitempty"` + + // DNS service name for the virtual node. + DNS *DNSObservation `json:"dns,omitempty" tf:"dns,omitempty"` +} + +type SpecServiceDiscoveryParameters struct { + + // Any AWS Cloud Map information for the virtual node. + // +kubebuilder:validation:Optional + AwsCloudMap *AwsCloudMapParameters `json:"awsCloudMap,omitempty" tf:"aws_cloud_map,omitempty"` + + // DNS service name for the virtual node. + // +kubebuilder:validation:Optional + DNS *DNSParameters `json:"dns,omitempty" tf:"dns,omitempty"` +} + +type TCPIdleInitParameters struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type TCPIdleObservation struct { + + // Unit of time. Valid values: ms, s. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type TCPIdleParameters struct { + + // Unit of time. Valid values: ms, s. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // The specified value for the JSON. Must be between 1 and 100 characters in length. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type TCPInitParameters struct { + + // Maximum number of outbound TCP connections Envoy can establish concurrently with all hosts in upstream cluster. Minimum value of 1. + MaxConnections *float64 `json:"maxConnections,omitempty" tf:"max_connections,omitempty"` +} + +type TCPObservation struct { + + // Maximum number of outbound TCP connections Envoy can establish concurrently with all hosts in upstream cluster. Minimum value of 1. + MaxConnections *float64 `json:"maxConnections,omitempty" tf:"max_connections,omitempty"` +} + +type TCPParameters struct { + + // Maximum number of outbound TCP connections Envoy can establish concurrently with all hosts in upstream cluster. Minimum value of 1. + // +kubebuilder:validation:Optional + MaxConnections *float64 `json:"maxConnections" tf:"max_connections,omitempty"` +} + +type TLSCertificateAcmInitParameters struct { + + // ARN for the certificate. + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` +} + +type TLSCertificateAcmObservation struct { + + // ARN for the certificate. + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` +} + +type TLSCertificateAcmParameters struct { + + // ARN for the certificate. + // +kubebuilder:validation:Optional + CertificateArn *string `json:"certificateArn" tf:"certificate_arn,omitempty"` +} + +type TLSCertificateFileInitParameters struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + // Private key for a certificate stored on the file system of the mesh endpoint that the proxy is running on. + PrivateKey *string `json:"privateKey,omitempty" tf:"private_key,omitempty"` +} + +type TLSCertificateFileObservation struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` + + // Private key for a certificate stored on the file system of the mesh endpoint that the proxy is running on. + PrivateKey *string `json:"privateKey,omitempty" tf:"private_key,omitempty"` +} + +type TLSCertificateFileParameters struct { + + // Certificate chain for the certificate. + // +kubebuilder:validation:Optional + CertificateChain *string `json:"certificateChain" tf:"certificate_chain,omitempty"` + + // Private key for a certificate stored on the file system of the mesh endpoint that the proxy is running on. + // +kubebuilder:validation:Optional + PrivateKey *string `json:"privateKey" tf:"private_key,omitempty"` +} + +type TLSCertificateSdsInitParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type TLSCertificateSdsObservation struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type TLSCertificateSdsParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type TLSValidationSubjectAlternativeNamesInitParameters struct { + + // Criteria for determining a SAN's match. + Match *TLSValidationSubjectAlternativeNamesMatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` +} + +type TLSValidationSubjectAlternativeNamesMatchInitParameters struct { + + // Values sent must match the specified values exactly. + // +listType=set + Exact []*string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type TLSValidationSubjectAlternativeNamesMatchObservation struct { + + // Values sent must match the specified values exactly. + // +listType=set + Exact []*string `json:"exact,omitempty" tf:"exact,omitempty"` +} + +type TLSValidationSubjectAlternativeNamesMatchParameters struct { + + // Values sent must match the specified values exactly. + // +kubebuilder:validation:Optional + // +listType=set + Exact []*string `json:"exact" tf:"exact,omitempty"` +} + +type TLSValidationSubjectAlternativeNamesObservation struct { + + // Criteria for determining a SAN's match. + Match *TLSValidationSubjectAlternativeNamesMatchObservation `json:"match,omitempty" tf:"match,omitempty"` +} + +type TLSValidationSubjectAlternativeNamesParameters struct { + + // Criteria for determining a SAN's match. + // +kubebuilder:validation:Optional + Match *TLSValidationSubjectAlternativeNamesMatchParameters `json:"match" tf:"match,omitempty"` +} + +type TLSValidationTrustFileInitParameters struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` +} + +type TLSValidationTrustFileObservation struct { + + // Certificate chain for the certificate. + CertificateChain *string `json:"certificateChain,omitempty" tf:"certificate_chain,omitempty"` +} + +type TLSValidationTrustFileParameters struct { + + // Certificate chain for the certificate. + // +kubebuilder:validation:Optional + CertificateChain *string `json:"certificateChain" tf:"certificate_chain,omitempty"` +} + +type TLSValidationTrustInitParameters struct { + + // TLS validation context trust for an AWS Certificate Manager (ACM) certificate. + Acm *TrustAcmInitParameters `json:"acm,omitempty" tf:"acm,omitempty"` + + // Local file certificate. + File *TLSValidationTrustFileInitParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *TLSValidationTrustSdsInitParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type TLSValidationTrustObservation struct { + + // TLS validation context trust for an AWS Certificate Manager (ACM) certificate. + Acm *TrustAcmObservation `json:"acm,omitempty" tf:"acm,omitempty"` + + // Local file certificate. + File *TLSValidationTrustFileObservation `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + Sds *TLSValidationTrustSdsObservation `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type TLSValidationTrustParameters struct { + + // TLS validation context trust for an AWS Certificate Manager (ACM) certificate. + // +kubebuilder:validation:Optional + Acm *TrustAcmParameters `json:"acm,omitempty" tf:"acm,omitempty"` + + // Local file certificate. + // +kubebuilder:validation:Optional + File *TLSValidationTrustFileParameters `json:"file,omitempty" tf:"file,omitempty"` + + // A Secret Discovery Service certificate. + // +kubebuilder:validation:Optional + Sds *TLSValidationTrustSdsParameters `json:"sds,omitempty" tf:"sds,omitempty"` +} + +type TLSValidationTrustSdsInitParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type TLSValidationTrustSdsObservation struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type TLSValidationTrustSdsParameters struct { + + // Name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type TimeoutGRPCInitParameters struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + Idle *GRPCIdleInitParameters `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + PerRequest *GRPCPerRequestInitParameters `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type TimeoutGRPCObservation struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + Idle *GRPCIdleObservation `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + PerRequest *GRPCPerRequestObservation `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type TimeoutGRPCParameters struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + // +kubebuilder:validation:Optional + Idle *GRPCIdleParameters `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + // +kubebuilder:validation:Optional + PerRequest *GRPCPerRequestParameters `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type TimeoutHTTPInitParameters struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + Idle *HTTPIdleInitParameters `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + PerRequest *HTTPPerRequestInitParameters `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type TimeoutHTTPObservation struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + Idle *HTTPIdleObservation `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + PerRequest *HTTPPerRequestObservation `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type TimeoutHTTPParameters struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + // +kubebuilder:validation:Optional + Idle *HTTPIdleParameters `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + // +kubebuilder:validation:Optional + PerRequest *HTTPPerRequestParameters `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type TimeoutHttp2InitParameters struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + Idle *Http2IdleInitParameters `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + PerRequest *Http2PerRequestInitParameters `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type TimeoutHttp2Observation struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + Idle *Http2IdleObservation `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + PerRequest *Http2PerRequestObservation `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type TimeoutHttp2Parameters struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + // +kubebuilder:validation:Optional + Idle *Http2IdleParameters `json:"idle,omitempty" tf:"idle,omitempty"` + + // Per request timeout. + // +kubebuilder:validation:Optional + PerRequest *Http2PerRequestParameters `json:"perRequest,omitempty" tf:"per_request,omitempty"` +} + +type TimeoutTCPInitParameters struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + Idle *TCPIdleInitParameters `json:"idle,omitempty" tf:"idle,omitempty"` +} + +type TimeoutTCPObservation struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + Idle *TCPIdleObservation `json:"idle,omitempty" tf:"idle,omitempty"` +} + +type TimeoutTCPParameters struct { + + // Idle timeout. An idle timeout bounds the amount of time that a connection may be idle. + // +kubebuilder:validation:Optional + Idle *TCPIdleParameters `json:"idle,omitempty" tf:"idle,omitempty"` +} + +type TrustAcmInitParameters struct { + + // One or more ACM ARNs. + // +listType=set + CertificateAuthorityArns []*string `json:"certificateAuthorityArns,omitempty" tf:"certificate_authority_arns,omitempty"` +} + +type TrustAcmObservation struct { + + // One or more ACM ARNs. + // +listType=set + CertificateAuthorityArns []*string `json:"certificateAuthorityArns,omitempty" tf:"certificate_authority_arns,omitempty"` +} + +type TrustAcmParameters struct { + + // One or more ACM ARNs. + // +kubebuilder:validation:Optional + // +listType=set + CertificateAuthorityArns []*string `json:"certificateAuthorityArns" tf:"certificate_authority_arns,omitempty"` +} + +type ValidationTrustAcmInitParameters struct { + + // One or more ACM ARNs. + // +listType=set + CertificateAuthorityArns []*string `json:"certificateAuthorityArns,omitempty" tf:"certificate_authority_arns,omitempty"` +} + +type ValidationTrustAcmObservation struct { + + // One or more ACM ARNs. + // +listType=set + CertificateAuthorityArns []*string `json:"certificateAuthorityArns,omitempty" tf:"certificate_authority_arns,omitempty"` +} + +type ValidationTrustAcmParameters struct { + + // One or more ACM ARNs. + // +kubebuilder:validation:Optional + // +listType=set + CertificateAuthorityArns []*string `json:"certificateAuthorityArns" tf:"certificate_authority_arns,omitempty"` +} + +type VirtualNodeInitParameters struct { + + // Name of the service mesh in which to create the virtual node. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.Mesh + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // Reference to a Mesh in appmesh to populate meshName. + // +kubebuilder:validation:Optional + MeshNameRef *v1.Reference `json:"meshNameRef,omitempty" tf:"-"` + + // Selector for a Mesh in appmesh to populate meshName. + // +kubebuilder:validation:Optional + MeshNameSelector *v1.Selector `json:"meshNameSelector,omitempty" tf:"-"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the virtual node. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Virtual node specification to apply. + Spec *VirtualNodeSpecInitParameters `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type VirtualNodeObservation struct { + + // ARN of the virtual node. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Creation date of the virtual node. + CreatedDate *string `json:"createdDate,omitempty" tf:"created_date,omitempty"` + + // ID of the virtual node. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Last update date of the virtual node. + LastUpdatedDate *string `json:"lastUpdatedDate,omitempty" tf:"last_updated_date,omitempty"` + + // Name of the service mesh in which to create the virtual node. Must be between 1 and 255 characters in length. + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the virtual node. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Resource owner's AWS account ID. + ResourceOwner *string `json:"resourceOwner,omitempty" tf:"resource_owner,omitempty"` + + // Virtual node specification to apply. + Spec *VirtualNodeSpecObservation `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type VirtualNodeParameters struct { + + // Name of the service mesh in which to create the virtual node. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.Mesh + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // Reference to a Mesh in appmesh to populate meshName. + // +kubebuilder:validation:Optional + MeshNameRef *v1.Reference `json:"meshNameRef,omitempty" tf:"-"` + + // Selector for a Mesh in appmesh to populate meshName. + // +kubebuilder:validation:Optional + MeshNameSelector *v1.Selector `json:"meshNameSelector,omitempty" tf:"-"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + // +kubebuilder:validation:Optional + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the virtual node. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Virtual node specification to apply. + // +kubebuilder:validation:Optional + Spec *VirtualNodeSpecParameters `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type VirtualNodeSpecInitParameters struct { + + // Backends to which the virtual node is expected to send outbound traffic. + Backend []BackendInitParameters `json:"backend,omitempty" tf:"backend,omitempty"` + + // Defaults for backends. + BackendDefaults *SpecBackendDefaultsInitParameters `json:"backendDefaults,omitempty" tf:"backend_defaults,omitempty"` + + // Listeners from which the virtual node is expected to receive inbound traffic. + Listener []SpecListenerInitParameters `json:"listener,omitempty" tf:"listener,omitempty"` + + // Inbound and outbound access logging information for the virtual node. + Logging *SpecLoggingInitParameters `json:"logging,omitempty" tf:"logging,omitempty"` + + // Service discovery information for the virtual node. + ServiceDiscovery *SpecServiceDiscoveryInitParameters `json:"serviceDiscovery,omitempty" tf:"service_discovery,omitempty"` +} + +type VirtualNodeSpecObservation struct { + + // Backends to which the virtual node is expected to send outbound traffic. + Backend []BackendObservation `json:"backend,omitempty" tf:"backend,omitempty"` + + // Defaults for backends. + BackendDefaults *SpecBackendDefaultsObservation `json:"backendDefaults,omitempty" tf:"backend_defaults,omitempty"` + + // Listeners from which the virtual node is expected to receive inbound traffic. + Listener []SpecListenerObservation `json:"listener,omitempty" tf:"listener,omitempty"` + + // Inbound and outbound access logging information for the virtual node. + Logging *SpecLoggingObservation `json:"logging,omitempty" tf:"logging,omitempty"` + + // Service discovery information for the virtual node. + ServiceDiscovery *SpecServiceDiscoveryObservation `json:"serviceDiscovery,omitempty" tf:"service_discovery,omitempty"` +} + +type VirtualNodeSpecParameters struct { + + // Backends to which the virtual node is expected to send outbound traffic. + // +kubebuilder:validation:Optional + Backend []BackendParameters `json:"backend,omitempty" tf:"backend,omitempty"` + + // Defaults for backends. + // +kubebuilder:validation:Optional + BackendDefaults *SpecBackendDefaultsParameters `json:"backendDefaults,omitempty" tf:"backend_defaults,omitempty"` + + // Listeners from which the virtual node is expected to receive inbound traffic. + // +kubebuilder:validation:Optional + Listener []SpecListenerParameters `json:"listener,omitempty" tf:"listener,omitempty"` + + // Inbound and outbound access logging information for the virtual node. + // +kubebuilder:validation:Optional + Logging *SpecLoggingParameters `json:"logging,omitempty" tf:"logging,omitempty"` + + // Service discovery information for the virtual node. + // +kubebuilder:validation:Optional + ServiceDiscovery *SpecServiceDiscoveryParameters `json:"serviceDiscovery,omitempty" tf:"service_discovery,omitempty"` +} + +type VirtualServiceClientPolicyInitParameters struct { + + // Transport Layer Security (TLS) client policy. + TLS *ClientPolicyTLSInitParameters `json:"tls,omitempty" tf:"tls,omitempty"` +} + +type VirtualServiceClientPolicyObservation struct { + + // Transport Layer Security (TLS) client policy. + TLS *ClientPolicyTLSObservation `json:"tls,omitempty" tf:"tls,omitempty"` +} + +type VirtualServiceClientPolicyParameters struct { + + // Transport Layer Security (TLS) client policy. + // +kubebuilder:validation:Optional + TLS *ClientPolicyTLSParameters `json:"tls,omitempty" tf:"tls,omitempty"` +} + +// VirtualNodeSpec defines the desired state of VirtualNode +type VirtualNodeSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VirtualNodeParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VirtualNodeInitParameters `json:"initProvider,omitempty"` +} + +// VirtualNodeStatus defines the observed state of VirtualNode. +type VirtualNodeStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VirtualNodeObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VirtualNode is the Schema for the VirtualNodes API. Provides an AWS App Mesh virtual node resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type VirtualNode struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.spec) || (has(self.initProvider) && has(self.initProvider.spec))",message="spec.forProvider.spec is a required parameter" + Spec VirtualNodeSpec `json:"spec"` + Status VirtualNodeStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VirtualNodeList contains a list of VirtualNodes +type VirtualNodeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualNode `json:"items"` +} + +// Repository type metadata. +var ( + VirtualNode_Kind = "VirtualNode" + VirtualNode_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VirtualNode_Kind}.String() + VirtualNode_KindAPIVersion = VirtualNode_Kind + "." + CRDGroupVersion.String() + VirtualNode_GroupVersionKind = CRDGroupVersion.WithKind(VirtualNode_Kind) +) + +func init() { + SchemeBuilder.Register(&VirtualNode{}, &VirtualNodeList{}) +} diff --git a/apis/appmesh/v1beta2/zz_virtualrouter_terraformed.go b/apis/appmesh/v1beta2/zz_virtualrouter_terraformed.go new file mode 100755 index 0000000000..8c881c6f16 --- /dev/null +++ b/apis/appmesh/v1beta2/zz_virtualrouter_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VirtualRouter +func (mg *VirtualRouter) GetTerraformResourceType() string { + return "aws_appmesh_virtual_router" +} + +// GetConnectionDetailsMapping for this VirtualRouter +func (tr *VirtualRouter) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VirtualRouter +func (tr *VirtualRouter) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VirtualRouter +func (tr *VirtualRouter) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VirtualRouter +func (tr *VirtualRouter) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VirtualRouter +func (tr *VirtualRouter) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VirtualRouter +func (tr *VirtualRouter) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VirtualRouter +func (tr *VirtualRouter) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VirtualRouter +func (tr *VirtualRouter) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VirtualRouter using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VirtualRouter) LateInitialize(attrs []byte) (bool, error) { + params := &VirtualRouterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VirtualRouter) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/appmesh/v1beta2/zz_virtualrouter_types.go b/apis/appmesh/v1beta2/zz_virtualrouter_types.go new file mode 100755 index 0000000000..40f5793b53 --- /dev/null +++ b/apis/appmesh/v1beta2/zz_virtualrouter_types.go @@ -0,0 +1,249 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SpecListenerPortMappingInitParameters struct { + + // Port used for the port mapping. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol used for the port mapping. Valid values are http,http2, tcp and grpc. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type SpecListenerPortMappingObservation struct { + + // Port used for the port mapping. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol used for the port mapping. Valid values are http,http2, tcp and grpc. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type SpecListenerPortMappingParameters struct { + + // Port used for the port mapping. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` + + // Protocol used for the port mapping. Valid values are http,http2, tcp and grpc. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` +} + +type VirtualRouterInitParameters struct { + + // Name of the service mesh in which to create the virtual router. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.Mesh + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // Reference to a Mesh in appmesh to populate meshName. + // +kubebuilder:validation:Optional + MeshNameRef *v1.Reference `json:"meshNameRef,omitempty" tf:"-"` + + // Selector for a Mesh in appmesh to populate meshName. + // +kubebuilder:validation:Optional + MeshNameSelector *v1.Selector `json:"meshNameSelector,omitempty" tf:"-"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the virtual router. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Virtual router specification to apply. + Spec *VirtualRouterSpecInitParameters `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type VirtualRouterObservation struct { + + // ARN of the virtual router. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Creation date of the virtual router. + CreatedDate *string `json:"createdDate,omitempty" tf:"created_date,omitempty"` + + // ID of the virtual router. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Last update date of the virtual router. + LastUpdatedDate *string `json:"lastUpdatedDate,omitempty" tf:"last_updated_date,omitempty"` + + // Name of the service mesh in which to create the virtual router. Must be between 1 and 255 characters in length. + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the virtual router. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Resource owner's AWS account ID. + ResourceOwner *string `json:"resourceOwner,omitempty" tf:"resource_owner,omitempty"` + + // Virtual router specification to apply. + Spec *VirtualRouterSpecObservation `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type VirtualRouterParameters struct { + + // Name of the service mesh in which to create the virtual router. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.Mesh + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // Reference to a Mesh in appmesh to populate meshName. + // +kubebuilder:validation:Optional + MeshNameRef *v1.Reference `json:"meshNameRef,omitempty" tf:"-"` + + // Selector for a Mesh in appmesh to populate meshName. + // +kubebuilder:validation:Optional + MeshNameSelector *v1.Selector `json:"meshNameSelector,omitempty" tf:"-"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + // +kubebuilder:validation:Optional + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the virtual router. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Virtual router specification to apply. + // +kubebuilder:validation:Optional + Spec *VirtualRouterSpecParameters `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type VirtualRouterSpecInitParameters struct { + + // configuration block to the spec argument. + Listener []VirtualRouterSpecListenerInitParameters `json:"listener,omitempty" tf:"listener,omitempty"` +} + +type VirtualRouterSpecListenerInitParameters struct { + + // Port mapping information for the listener. + PortMapping *SpecListenerPortMappingInitParameters `json:"portMapping,omitempty" tf:"port_mapping,omitempty"` +} + +type VirtualRouterSpecListenerObservation struct { + + // Port mapping information for the listener. + PortMapping *SpecListenerPortMappingObservation `json:"portMapping,omitempty" tf:"port_mapping,omitempty"` +} + +type VirtualRouterSpecListenerParameters struct { + + // Port mapping information for the listener. + // +kubebuilder:validation:Optional + PortMapping *SpecListenerPortMappingParameters `json:"portMapping" tf:"port_mapping,omitempty"` +} + +type VirtualRouterSpecObservation struct { + + // configuration block to the spec argument. + Listener []VirtualRouterSpecListenerObservation `json:"listener,omitempty" tf:"listener,omitempty"` +} + +type VirtualRouterSpecParameters struct { + + // configuration block to the spec argument. + // +kubebuilder:validation:Optional + Listener []VirtualRouterSpecListenerParameters `json:"listener,omitempty" tf:"listener,omitempty"` +} + +// VirtualRouterSpec defines the desired state of VirtualRouter +type VirtualRouterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VirtualRouterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VirtualRouterInitParameters `json:"initProvider,omitempty"` +} + +// VirtualRouterStatus defines the observed state of VirtualRouter. +type VirtualRouterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VirtualRouterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VirtualRouter is the Schema for the VirtualRouters API. Provides an AWS App Mesh virtual router resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type VirtualRouter struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.spec) || (has(self.initProvider) && has(self.initProvider.spec))",message="spec.forProvider.spec is a required parameter" + Spec VirtualRouterSpec `json:"spec"` + Status VirtualRouterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VirtualRouterList contains a list of VirtualRouters +type VirtualRouterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualRouter `json:"items"` +} + +// Repository type metadata. +var ( + VirtualRouter_Kind = "VirtualRouter" + VirtualRouter_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VirtualRouter_Kind}.String() + VirtualRouter_KindAPIVersion = VirtualRouter_Kind + "." + CRDGroupVersion.String() + VirtualRouter_GroupVersionKind = CRDGroupVersion.WithKind(VirtualRouter_Kind) +) + +func init() { + SchemeBuilder.Register(&VirtualRouter{}, &VirtualRouterList{}) +} diff --git a/apis/appmesh/v1beta2/zz_virtualservice_terraformed.go b/apis/appmesh/v1beta2/zz_virtualservice_terraformed.go new file mode 100755 index 0000000000..bb6b8fa720 --- /dev/null +++ b/apis/appmesh/v1beta2/zz_virtualservice_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VirtualService +func (mg *VirtualService) GetTerraformResourceType() string { + return "aws_appmesh_virtual_service" +} + +// GetConnectionDetailsMapping for this VirtualService +func (tr *VirtualService) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VirtualService +func (tr *VirtualService) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VirtualService +func (tr *VirtualService) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VirtualService +func (tr *VirtualService) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VirtualService +func (tr *VirtualService) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VirtualService +func (tr *VirtualService) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VirtualService +func (tr *VirtualService) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VirtualService +func (tr *VirtualService) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VirtualService using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VirtualService) LateInitialize(attrs []byte) (bool, error) { + params := &VirtualServiceParameters_2{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VirtualService) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appmesh/v1beta2/zz_virtualservice_types.go b/apis/appmesh/v1beta2/zz_virtualservice_types.go new file mode 100755 index 0000000000..06f17063d1 --- /dev/null +++ b/apis/appmesh/v1beta2/zz_virtualservice_types.go @@ -0,0 +1,308 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ProviderInitParameters struct { + + // Virtual node associated with a virtual service. + VirtualNode *ProviderVirtualNodeInitParameters `json:"virtualNode,omitempty" tf:"virtual_node,omitempty"` + + // Virtual router associated with a virtual service. + VirtualRouter *ProviderVirtualRouterInitParameters `json:"virtualRouter,omitempty" tf:"virtual_router,omitempty"` +} + +type ProviderObservation struct { + + // Virtual node associated with a virtual service. + VirtualNode *ProviderVirtualNodeObservation `json:"virtualNode,omitempty" tf:"virtual_node,omitempty"` + + // Virtual router associated with a virtual service. + VirtualRouter *ProviderVirtualRouterObservation `json:"virtualRouter,omitempty" tf:"virtual_router,omitempty"` +} + +type ProviderParameters struct { + + // Virtual node associated with a virtual service. + // +kubebuilder:validation:Optional + VirtualNode *ProviderVirtualNodeParameters `json:"virtualNode,omitempty" tf:"virtual_node,omitempty"` + + // Virtual router associated with a virtual service. + // +kubebuilder:validation:Optional + VirtualRouter *ProviderVirtualRouterParameters `json:"virtualRouter,omitempty" tf:"virtual_router,omitempty"` +} + +type ProviderVirtualNodeInitParameters struct { + + // Name of the virtual node that is acting as a service provider. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.VirtualNode + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + VirtualNodeName *string `json:"virtualNodeName,omitempty" tf:"virtual_node_name,omitempty"` + + // Reference to a VirtualNode in appmesh to populate virtualNodeName. + // +kubebuilder:validation:Optional + VirtualNodeNameRef *v1.Reference `json:"virtualNodeNameRef,omitempty" tf:"-"` + + // Selector for a VirtualNode in appmesh to populate virtualNodeName. + // +kubebuilder:validation:Optional + VirtualNodeNameSelector *v1.Selector `json:"virtualNodeNameSelector,omitempty" tf:"-"` +} + +type ProviderVirtualNodeObservation struct { + + // Name of the virtual node that is acting as a service provider. Must be between 1 and 255 characters in length. + VirtualNodeName *string `json:"virtualNodeName,omitempty" tf:"virtual_node_name,omitempty"` +} + +type ProviderVirtualNodeParameters struct { + + // Name of the virtual node that is acting as a service provider. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.VirtualNode + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + // +kubebuilder:validation:Optional + VirtualNodeName *string `json:"virtualNodeName,omitempty" tf:"virtual_node_name,omitempty"` + + // Reference to a VirtualNode in appmesh to populate virtualNodeName. + // +kubebuilder:validation:Optional + VirtualNodeNameRef *v1.Reference `json:"virtualNodeNameRef,omitempty" tf:"-"` + + // Selector for a VirtualNode in appmesh to populate virtualNodeName. + // +kubebuilder:validation:Optional + VirtualNodeNameSelector *v1.Selector `json:"virtualNodeNameSelector,omitempty" tf:"-"` +} + +type ProviderVirtualRouterInitParameters struct { + + // Name of the virtual router that is acting as a service provider. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.VirtualRouter + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + VirtualRouterName *string `json:"virtualRouterName,omitempty" tf:"virtual_router_name,omitempty"` + + // Reference to a VirtualRouter in appmesh to populate virtualRouterName. + // +kubebuilder:validation:Optional + VirtualRouterNameRef *v1.Reference `json:"virtualRouterNameRef,omitempty" tf:"-"` + + // Selector for a VirtualRouter in appmesh to populate virtualRouterName. + // +kubebuilder:validation:Optional + VirtualRouterNameSelector *v1.Selector `json:"virtualRouterNameSelector,omitempty" tf:"-"` +} + +type ProviderVirtualRouterObservation struct { + + // Name of the virtual router that is acting as a service provider. Must be between 1 and 255 characters in length. + VirtualRouterName *string `json:"virtualRouterName,omitempty" tf:"virtual_router_name,omitempty"` +} + +type ProviderVirtualRouterParameters struct { + + // Name of the virtual router that is acting as a service provider. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.VirtualRouter + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + // +kubebuilder:validation:Optional + VirtualRouterName *string `json:"virtualRouterName,omitempty" tf:"virtual_router_name,omitempty"` + + // Reference to a VirtualRouter in appmesh to populate virtualRouterName. + // +kubebuilder:validation:Optional + VirtualRouterNameRef *v1.Reference `json:"virtualRouterNameRef,omitempty" tf:"-"` + + // Selector for a VirtualRouter in appmesh to populate virtualRouterName. + // +kubebuilder:validation:Optional + VirtualRouterNameSelector *v1.Selector `json:"virtualRouterNameSelector,omitempty" tf:"-"` +} + +type VirtualServiceInitParameters_2 struct { + + // Name of the service mesh in which to create the virtual service. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.Mesh + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // Reference to a Mesh in appmesh to populate meshName. + // +kubebuilder:validation:Optional + MeshNameRef *v1.Reference `json:"meshNameRef,omitempty" tf:"-"` + + // Selector for a Mesh in appmesh to populate meshName. + // +kubebuilder:validation:Optional + MeshNameSelector *v1.Selector `json:"meshNameSelector,omitempty" tf:"-"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the virtual service. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Virtual service specification to apply. + Spec *VirtualServiceSpecInitParameters `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type VirtualServiceObservation_2 struct { + + // ARN of the virtual service. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Creation date of the virtual service. + CreatedDate *string `json:"createdDate,omitempty" tf:"created_date,omitempty"` + + // ID of the virtual service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Last update date of the virtual service. + LastUpdatedDate *string `json:"lastUpdatedDate,omitempty" tf:"last_updated_date,omitempty"` + + // Name of the service mesh in which to create the virtual service. Must be between 1 and 255 characters in length. + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the virtual service. Must be between 1 and 255 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Resource owner's AWS account ID. + ResourceOwner *string `json:"resourceOwner,omitempty" tf:"resource_owner,omitempty"` + + // Virtual service specification to apply. + Spec *VirtualServiceSpecObservation `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type VirtualServiceParameters_2 struct { + + // Name of the service mesh in which to create the virtual service. Must be between 1 and 255 characters in length. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appmesh/v1beta2.Mesh + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + MeshName *string `json:"meshName,omitempty" tf:"mesh_name,omitempty"` + + // Reference to a Mesh in appmesh to populate meshName. + // +kubebuilder:validation:Optional + MeshNameRef *v1.Reference `json:"meshNameRef,omitempty" tf:"-"` + + // Selector for a Mesh in appmesh to populate meshName. + // +kubebuilder:validation:Optional + MeshNameSelector *v1.Selector `json:"meshNameSelector,omitempty" tf:"-"` + + // AWS account ID of the service mesh's owner. Defaults to the account ID the AWS provider is currently connected to. + // +kubebuilder:validation:Optional + MeshOwner *string `json:"meshOwner,omitempty" tf:"mesh_owner,omitempty"` + + // Name to use for the virtual service. Must be between 1 and 255 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Virtual service specification to apply. + // +kubebuilder:validation:Optional + Spec *VirtualServiceSpecParameters `json:"spec,omitempty" tf:"spec,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type VirtualServiceSpecInitParameters struct { + + // App Mesh object that is acting as the provider for a virtual service. You can specify a single virtual node or virtual router. + Provider *ProviderInitParameters `json:"provider,omitempty" tf:"provider,omitempty"` +} + +type VirtualServiceSpecObservation struct { + + // App Mesh object that is acting as the provider for a virtual service. You can specify a single virtual node or virtual router. + Provider *ProviderObservation `json:"provider,omitempty" tf:"provider,omitempty"` +} + +type VirtualServiceSpecParameters struct { + + // App Mesh object that is acting as the provider for a virtual service. You can specify a single virtual node or virtual router. + // +kubebuilder:validation:Optional + Provider *ProviderParameters `json:"provider,omitempty" tf:"provider,omitempty"` +} + +// VirtualServiceSpec defines the desired state of VirtualService +type VirtualServiceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VirtualServiceParameters_2 `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VirtualServiceInitParameters_2 `json:"initProvider,omitempty"` +} + +// VirtualServiceStatus defines the observed state of VirtualService. +type VirtualServiceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VirtualServiceObservation_2 `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VirtualService is the Schema for the VirtualServices API. Provides an AWS App Mesh virtual service resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type VirtualService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.spec) || (has(self.initProvider) && has(self.initProvider.spec))",message="spec.forProvider.spec is a required parameter" + Spec VirtualServiceSpec `json:"spec"` + Status VirtualServiceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VirtualServiceList contains a list of VirtualServices +type VirtualServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualService `json:"items"` +} + +// Repository type metadata. +var ( + VirtualService_Kind = "VirtualService" + VirtualService_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VirtualService_Kind}.String() + VirtualService_KindAPIVersion = VirtualService_Kind + "." + CRDGroupVersion.String() + VirtualService_GroupVersionKind = CRDGroupVersion.WithKind(VirtualService_Kind) +) + +func init() { + SchemeBuilder.Register(&VirtualService{}, &VirtualServiceList{}) +} diff --git a/apis/apprunner/v1beta1/zz_generated.conversion_hubs.go b/apis/apprunner/v1beta1/zz_generated.conversion_hubs.go index 3196a9c795..d4ffe102ef 100755 --- a/apis/apprunner/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/apprunner/v1beta1/zz_generated.conversion_hubs.go @@ -12,11 +12,5 @@ func (tr *AutoScalingConfigurationVersion) Hub() {} // Hub marks this type as a conversion hub. func (tr *Connection) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ObservabilityConfiguration) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Service) Hub() {} - // Hub marks this type as a conversion hub. func (tr *VPCConnector) Hub() {} diff --git a/apis/apprunner/v1beta1/zz_generated.conversion_spokes.go b/apis/apprunner/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..d86f90de95 --- /dev/null +++ b/apis/apprunner/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ObservabilityConfiguration to the hub type. +func (tr *ObservabilityConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ObservabilityConfiguration type. +func (tr *ObservabilityConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Service to the hub type. +func (tr *Service) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Service type. +func (tr *Service) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/apprunner/v1beta2/zz_generated.conversion_hubs.go b/apis/apprunner/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..3bf19c8e00 --- /dev/null +++ b/apis/apprunner/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ObservabilityConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Service) Hub() {} diff --git a/apis/apprunner/v1beta2/zz_generated.deepcopy.go b/apis/apprunner/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..226d5ed78a --- /dev/null +++ b/apis/apprunner/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2232 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfigurationInitParameters) DeepCopyInto(out *AuthenticationConfigurationInitParameters) { + *out = *in + if in.AccessRoleArn != nil { + in, out := &in.AccessRoleArn, &out.AccessRoleArn + *out = new(string) + **out = **in + } + if in.ConnectionArn != nil { + in, out := &in.ConnectionArn, &out.ConnectionArn + *out = new(string) + **out = **in + } + if in.ConnectionArnRef != nil { + in, out := &in.ConnectionArnRef, &out.ConnectionArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConnectionArnSelector != nil { + in, out := &in.ConnectionArnSelector, &out.ConnectionArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfigurationInitParameters. +func (in *AuthenticationConfigurationInitParameters) DeepCopy() *AuthenticationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AuthenticationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfigurationObservation) DeepCopyInto(out *AuthenticationConfigurationObservation) { + *out = *in + if in.AccessRoleArn != nil { + in, out := &in.AccessRoleArn, &out.AccessRoleArn + *out = new(string) + **out = **in + } + if in.ConnectionArn != nil { + in, out := &in.ConnectionArn, &out.ConnectionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfigurationObservation. +func (in *AuthenticationConfigurationObservation) DeepCopy() *AuthenticationConfigurationObservation { + if in == nil { + return nil + } + out := new(AuthenticationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfigurationParameters) DeepCopyInto(out *AuthenticationConfigurationParameters) { + *out = *in + if in.AccessRoleArn != nil { + in, out := &in.AccessRoleArn, &out.AccessRoleArn + *out = new(string) + **out = **in + } + if in.ConnectionArn != nil { + in, out := &in.ConnectionArn, &out.ConnectionArn + *out = new(string) + **out = **in + } + if in.ConnectionArnRef != nil { + in, out := &in.ConnectionArnRef, &out.ConnectionArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConnectionArnSelector != nil { + in, out := &in.ConnectionArnSelector, &out.ConnectionArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfigurationParameters. +func (in *AuthenticationConfigurationParameters) DeepCopy() *AuthenticationConfigurationParameters { + if in == nil { + return nil + } + out := new(AuthenticationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeConfigurationInitParameters) DeepCopyInto(out *CodeConfigurationInitParameters) { + *out = *in + if in.CodeConfigurationValues != nil { + in, out := &in.CodeConfigurationValues, &out.CodeConfigurationValues + *out = new(CodeConfigurationValuesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigurationSource != nil { + in, out := &in.ConfigurationSource, &out.ConfigurationSource + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeConfigurationInitParameters. +func (in *CodeConfigurationInitParameters) DeepCopy() *CodeConfigurationInitParameters { + if in == nil { + return nil + } + out := new(CodeConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeConfigurationObservation) DeepCopyInto(out *CodeConfigurationObservation) { + *out = *in + if in.CodeConfigurationValues != nil { + in, out := &in.CodeConfigurationValues, &out.CodeConfigurationValues + *out = new(CodeConfigurationValuesObservation) + (*in).DeepCopyInto(*out) + } + if in.ConfigurationSource != nil { + in, out := &in.ConfigurationSource, &out.ConfigurationSource + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeConfigurationObservation. +func (in *CodeConfigurationObservation) DeepCopy() *CodeConfigurationObservation { + if in == nil { + return nil + } + out := new(CodeConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeConfigurationParameters) DeepCopyInto(out *CodeConfigurationParameters) { + *out = *in + if in.CodeConfigurationValues != nil { + in, out := &in.CodeConfigurationValues, &out.CodeConfigurationValues + *out = new(CodeConfigurationValuesParameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigurationSource != nil { + in, out := &in.ConfigurationSource, &out.ConfigurationSource + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeConfigurationParameters. +func (in *CodeConfigurationParameters) DeepCopy() *CodeConfigurationParameters { + if in == nil { + return nil + } + out := new(CodeConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeConfigurationValuesInitParameters) DeepCopyInto(out *CodeConfigurationValuesInitParameters) { + *out = *in + if in.BuildCommand != nil { + in, out := &in.BuildCommand, &out.BuildCommand + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(string) + **out = **in + } + if in.RuntimeEnvironmentSecrets != nil { + in, out := &in.RuntimeEnvironmentSecrets, &out.RuntimeEnvironmentSecrets + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RuntimeEnvironmentVariables != nil { + in, out := &in.RuntimeEnvironmentVariables, &out.RuntimeEnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.StartCommand != nil { + in, out := &in.StartCommand, &out.StartCommand + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeConfigurationValuesInitParameters. +func (in *CodeConfigurationValuesInitParameters) DeepCopy() *CodeConfigurationValuesInitParameters { + if in == nil { + return nil + } + out := new(CodeConfigurationValuesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeConfigurationValuesObservation) DeepCopyInto(out *CodeConfigurationValuesObservation) { + *out = *in + if in.BuildCommand != nil { + in, out := &in.BuildCommand, &out.BuildCommand + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(string) + **out = **in + } + if in.RuntimeEnvironmentSecrets != nil { + in, out := &in.RuntimeEnvironmentSecrets, &out.RuntimeEnvironmentSecrets + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RuntimeEnvironmentVariables != nil { + in, out := &in.RuntimeEnvironmentVariables, &out.RuntimeEnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.StartCommand != nil { + in, out := &in.StartCommand, &out.StartCommand + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeConfigurationValuesObservation. +func (in *CodeConfigurationValuesObservation) DeepCopy() *CodeConfigurationValuesObservation { + if in == nil { + return nil + } + out := new(CodeConfigurationValuesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeConfigurationValuesParameters) DeepCopyInto(out *CodeConfigurationValuesParameters) { + *out = *in + if in.BuildCommand != nil { + in, out := &in.BuildCommand, &out.BuildCommand + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(string) + **out = **in + } + if in.RuntimeEnvironmentSecrets != nil { + in, out := &in.RuntimeEnvironmentSecrets, &out.RuntimeEnvironmentSecrets + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RuntimeEnvironmentVariables != nil { + in, out := &in.RuntimeEnvironmentVariables, &out.RuntimeEnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.StartCommand != nil { + in, out := &in.StartCommand, &out.StartCommand + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeConfigurationValuesParameters. +func (in *CodeConfigurationValuesParameters) DeepCopy() *CodeConfigurationValuesParameters { + if in == nil { + return nil + } + out := new(CodeConfigurationValuesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeRepositoryInitParameters) DeepCopyInto(out *CodeRepositoryInitParameters) { + *out = *in + if in.CodeConfiguration != nil { + in, out := &in.CodeConfiguration, &out.CodeConfiguration + *out = new(CodeConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } + if in.SourceCodeVersion != nil { + in, out := &in.SourceCodeVersion, &out.SourceCodeVersion + *out = new(SourceCodeVersionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SourceDirectory != nil { + in, out := &in.SourceDirectory, &out.SourceDirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeRepositoryInitParameters. +func (in *CodeRepositoryInitParameters) DeepCopy() *CodeRepositoryInitParameters { + if in == nil { + return nil + } + out := new(CodeRepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeRepositoryObservation) DeepCopyInto(out *CodeRepositoryObservation) { + *out = *in + if in.CodeConfiguration != nil { + in, out := &in.CodeConfiguration, &out.CodeConfiguration + *out = new(CodeConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } + if in.SourceCodeVersion != nil { + in, out := &in.SourceCodeVersion, &out.SourceCodeVersion + *out = new(SourceCodeVersionObservation) + (*in).DeepCopyInto(*out) + } + if in.SourceDirectory != nil { + in, out := &in.SourceDirectory, &out.SourceDirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeRepositoryObservation. +func (in *CodeRepositoryObservation) DeepCopy() *CodeRepositoryObservation { + if in == nil { + return nil + } + out := new(CodeRepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeRepositoryParameters) DeepCopyInto(out *CodeRepositoryParameters) { + *out = *in + if in.CodeConfiguration != nil { + in, out := &in.CodeConfiguration, &out.CodeConfiguration + *out = new(CodeConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } + if in.SourceCodeVersion != nil { + in, out := &in.SourceCodeVersion, &out.SourceCodeVersion + *out = new(SourceCodeVersionParameters) + (*in).DeepCopyInto(*out) + } + if in.SourceDirectory != nil { + in, out := &in.SourceDirectory, &out.SourceDirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeRepositoryParameters. +func (in *CodeRepositoryParameters) DeepCopy() *CodeRepositoryParameters { + if in == nil { + return nil + } + out := new(CodeRepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressConfigurationInitParameters) DeepCopyInto(out *EgressConfigurationInitParameters) { + *out = *in + if in.EgressType != nil { + in, out := &in.EgressType, &out.EgressType + *out = new(string) + **out = **in + } + if in.VPCConnectorArn != nil { + in, out := &in.VPCConnectorArn, &out.VPCConnectorArn + *out = new(string) + **out = **in + } + if in.VPCConnectorArnRef != nil { + in, out := &in.VPCConnectorArnRef, &out.VPCConnectorArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCConnectorArnSelector != nil { + in, out := &in.VPCConnectorArnSelector, &out.VPCConnectorArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressConfigurationInitParameters. +func (in *EgressConfigurationInitParameters) DeepCopy() *EgressConfigurationInitParameters { + if in == nil { + return nil + } + out := new(EgressConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressConfigurationObservation) DeepCopyInto(out *EgressConfigurationObservation) { + *out = *in + if in.EgressType != nil { + in, out := &in.EgressType, &out.EgressType + *out = new(string) + **out = **in + } + if in.VPCConnectorArn != nil { + in, out := &in.VPCConnectorArn, &out.VPCConnectorArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressConfigurationObservation. +func (in *EgressConfigurationObservation) DeepCopy() *EgressConfigurationObservation { + if in == nil { + return nil + } + out := new(EgressConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressConfigurationParameters) DeepCopyInto(out *EgressConfigurationParameters) { + *out = *in + if in.EgressType != nil { + in, out := &in.EgressType, &out.EgressType + *out = new(string) + **out = **in + } + if in.VPCConnectorArn != nil { + in, out := &in.VPCConnectorArn, &out.VPCConnectorArn + *out = new(string) + **out = **in + } + if in.VPCConnectorArnRef != nil { + in, out := &in.VPCConnectorArnRef, &out.VPCConnectorArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCConnectorArnSelector != nil { + in, out := &in.VPCConnectorArnSelector, &out.VPCConnectorArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressConfigurationParameters. +func (in *EgressConfigurationParameters) DeepCopy() *EgressConfigurationParameters { + if in == nil { + return nil + } + out := new(EgressConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationInitParameters) DeepCopyInto(out *EncryptionConfigurationInitParameters) { + *out = *in + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationInitParameters. +func (in *EncryptionConfigurationInitParameters) DeepCopy() *EncryptionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationObservation) DeepCopyInto(out *EncryptionConfigurationObservation) { + *out = *in + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationObservation. +func (in *EncryptionConfigurationObservation) DeepCopy() *EncryptionConfigurationObservation { + if in == nil { + return nil + } + out := new(EncryptionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationParameters) DeepCopyInto(out *EncryptionConfigurationParameters) { + *out = *in + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationParameters. +func (in *EncryptionConfigurationParameters) DeepCopy() *EncryptionConfigurationParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckConfigurationInitParameters) DeepCopyInto(out *HealthCheckConfigurationInitParameters) { + *out = *in + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckConfigurationInitParameters. +func (in *HealthCheckConfigurationInitParameters) DeepCopy() *HealthCheckConfigurationInitParameters { + if in == nil { + return nil + } + out := new(HealthCheckConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckConfigurationObservation) DeepCopyInto(out *HealthCheckConfigurationObservation) { + *out = *in + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckConfigurationObservation. +func (in *HealthCheckConfigurationObservation) DeepCopy() *HealthCheckConfigurationObservation { + if in == nil { + return nil + } + out := new(HealthCheckConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckConfigurationParameters) DeepCopyInto(out *HealthCheckConfigurationParameters) { + *out = *in + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckConfigurationParameters. +func (in *HealthCheckConfigurationParameters) DeepCopy() *HealthCheckConfigurationParameters { + if in == nil { + return nil + } + out := new(HealthCheckConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfigurationInitParameters) DeepCopyInto(out *ImageConfigurationInitParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.RuntimeEnvironmentSecrets != nil { + in, out := &in.RuntimeEnvironmentSecrets, &out.RuntimeEnvironmentSecrets + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RuntimeEnvironmentVariables != nil { + in, out := &in.RuntimeEnvironmentVariables, &out.RuntimeEnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.StartCommand != nil { + in, out := &in.StartCommand, &out.StartCommand + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfigurationInitParameters. +func (in *ImageConfigurationInitParameters) DeepCopy() *ImageConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ImageConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfigurationObservation) DeepCopyInto(out *ImageConfigurationObservation) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.RuntimeEnvironmentSecrets != nil { + in, out := &in.RuntimeEnvironmentSecrets, &out.RuntimeEnvironmentSecrets + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RuntimeEnvironmentVariables != nil { + in, out := &in.RuntimeEnvironmentVariables, &out.RuntimeEnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.StartCommand != nil { + in, out := &in.StartCommand, &out.StartCommand + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfigurationObservation. +func (in *ImageConfigurationObservation) DeepCopy() *ImageConfigurationObservation { + if in == nil { + return nil + } + out := new(ImageConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfigurationParameters) DeepCopyInto(out *ImageConfigurationParameters) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.RuntimeEnvironmentSecrets != nil { + in, out := &in.RuntimeEnvironmentSecrets, &out.RuntimeEnvironmentSecrets + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RuntimeEnvironmentVariables != nil { + in, out := &in.RuntimeEnvironmentVariables, &out.RuntimeEnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.StartCommand != nil { + in, out := &in.StartCommand, &out.StartCommand + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfigurationParameters. +func (in *ImageConfigurationParameters) DeepCopy() *ImageConfigurationParameters { + if in == nil { + return nil + } + out := new(ImageConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRepositoryInitParameters) DeepCopyInto(out *ImageRepositoryInitParameters) { + *out = *in + if in.ImageConfiguration != nil { + in, out := &in.ImageConfiguration, &out.ImageConfiguration + *out = new(ImageConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageIdentifier != nil { + in, out := &in.ImageIdentifier, &out.ImageIdentifier + *out = new(string) + **out = **in + } + if in.ImageRepositoryType != nil { + in, out := &in.ImageRepositoryType, &out.ImageRepositoryType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRepositoryInitParameters. +func (in *ImageRepositoryInitParameters) DeepCopy() *ImageRepositoryInitParameters { + if in == nil { + return nil + } + out := new(ImageRepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRepositoryObservation) DeepCopyInto(out *ImageRepositoryObservation) { + *out = *in + if in.ImageConfiguration != nil { + in, out := &in.ImageConfiguration, &out.ImageConfiguration + *out = new(ImageConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ImageIdentifier != nil { + in, out := &in.ImageIdentifier, &out.ImageIdentifier + *out = new(string) + **out = **in + } + if in.ImageRepositoryType != nil { + in, out := &in.ImageRepositoryType, &out.ImageRepositoryType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRepositoryObservation. +func (in *ImageRepositoryObservation) DeepCopy() *ImageRepositoryObservation { + if in == nil { + return nil + } + out := new(ImageRepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRepositoryParameters) DeepCopyInto(out *ImageRepositoryParameters) { + *out = *in + if in.ImageConfiguration != nil { + in, out := &in.ImageConfiguration, &out.ImageConfiguration + *out = new(ImageConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageIdentifier != nil { + in, out := &in.ImageIdentifier, &out.ImageIdentifier + *out = new(string) + **out = **in + } + if in.ImageRepositoryType != nil { + in, out := &in.ImageRepositoryType, &out.ImageRepositoryType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRepositoryParameters. +func (in *ImageRepositoryParameters) DeepCopy() *ImageRepositoryParameters { + if in == nil { + return nil + } + out := new(ImageRepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressConfigurationInitParameters) DeepCopyInto(out *IngressConfigurationInitParameters) { + *out = *in + if in.IsPubliclyAccessible != nil { + in, out := &in.IsPubliclyAccessible, &out.IsPubliclyAccessible + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressConfigurationInitParameters. +func (in *IngressConfigurationInitParameters) DeepCopy() *IngressConfigurationInitParameters { + if in == nil { + return nil + } + out := new(IngressConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressConfigurationObservation) DeepCopyInto(out *IngressConfigurationObservation) { + *out = *in + if in.IsPubliclyAccessible != nil { + in, out := &in.IsPubliclyAccessible, &out.IsPubliclyAccessible + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressConfigurationObservation. +func (in *IngressConfigurationObservation) DeepCopy() *IngressConfigurationObservation { + if in == nil { + return nil + } + out := new(IngressConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressConfigurationParameters) DeepCopyInto(out *IngressConfigurationParameters) { + *out = *in + if in.IsPubliclyAccessible != nil { + in, out := &in.IsPubliclyAccessible, &out.IsPubliclyAccessible + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressConfigurationParameters. +func (in *IngressConfigurationParameters) DeepCopy() *IngressConfigurationParameters { + if in == nil { + return nil + } + out := new(IngressConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceConfigurationInitParameters) DeepCopyInto(out *InstanceConfigurationInitParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.InstanceRoleArn != nil { + in, out := &in.InstanceRoleArn, &out.InstanceRoleArn + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceConfigurationInitParameters. +func (in *InstanceConfigurationInitParameters) DeepCopy() *InstanceConfigurationInitParameters { + if in == nil { + return nil + } + out := new(InstanceConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceConfigurationObservation) DeepCopyInto(out *InstanceConfigurationObservation) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.InstanceRoleArn != nil { + in, out := &in.InstanceRoleArn, &out.InstanceRoleArn + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceConfigurationObservation. +func (in *InstanceConfigurationObservation) DeepCopy() *InstanceConfigurationObservation { + if in == nil { + return nil + } + out := new(InstanceConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceConfigurationParameters) DeepCopyInto(out *InstanceConfigurationParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.InstanceRoleArn != nil { + in, out := &in.InstanceRoleArn, &out.InstanceRoleArn + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceConfigurationParameters. +func (in *InstanceConfigurationParameters) DeepCopy() *InstanceConfigurationParameters { + if in == nil { + return nil + } + out := new(InstanceConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationInitParameters) DeepCopyInto(out *NetworkConfigurationInitParameters) { + *out = *in + if in.EgressConfiguration != nil { + in, out := &in.EgressConfiguration, &out.EgressConfiguration + *out = new(EgressConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.IngressConfiguration != nil { + in, out := &in.IngressConfiguration, &out.IngressConfiguration + *out = new(IngressConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationInitParameters. +func (in *NetworkConfigurationInitParameters) DeepCopy() *NetworkConfigurationInitParameters { + if in == nil { + return nil + } + out := new(NetworkConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationObservation) DeepCopyInto(out *NetworkConfigurationObservation) { + *out = *in + if in.EgressConfiguration != nil { + in, out := &in.EgressConfiguration, &out.EgressConfiguration + *out = new(EgressConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.IngressConfiguration != nil { + in, out := &in.IngressConfiguration, &out.IngressConfiguration + *out = new(IngressConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationObservation. +func (in *NetworkConfigurationObservation) DeepCopy() *NetworkConfigurationObservation { + if in == nil { + return nil + } + out := new(NetworkConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationParameters) DeepCopyInto(out *NetworkConfigurationParameters) { + *out = *in + if in.EgressConfiguration != nil { + in, out := &in.EgressConfiguration, &out.EgressConfiguration + *out = new(EgressConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.IngressConfiguration != nil { + in, out := &in.IngressConfiguration, &out.IngressConfiguration + *out = new(IngressConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationParameters. +func (in *NetworkConfigurationParameters) DeepCopy() *NetworkConfigurationParameters { + if in == nil { + return nil + } + out := new(NetworkConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservabilityConfiguration) DeepCopyInto(out *ObservabilityConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservabilityConfiguration. +func (in *ObservabilityConfiguration) DeepCopy() *ObservabilityConfiguration { + if in == nil { + return nil + } + out := new(ObservabilityConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObservabilityConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservabilityConfigurationInitParameters) DeepCopyInto(out *ObservabilityConfigurationInitParameters) { + *out = *in + if in.ObservabilityConfigurationName != nil { + in, out := &in.ObservabilityConfigurationName, &out.ObservabilityConfigurationName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TraceConfiguration != nil { + in, out := &in.TraceConfiguration, &out.TraceConfiguration + *out = new(TraceConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservabilityConfigurationInitParameters. +func (in *ObservabilityConfigurationInitParameters) DeepCopy() *ObservabilityConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ObservabilityConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservabilityConfigurationList) DeepCopyInto(out *ObservabilityConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ObservabilityConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservabilityConfigurationList. +func (in *ObservabilityConfigurationList) DeepCopy() *ObservabilityConfigurationList { + if in == nil { + return nil + } + out := new(ObservabilityConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObservabilityConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservabilityConfigurationObservation) DeepCopyInto(out *ObservabilityConfigurationObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Latest != nil { + in, out := &in.Latest, &out.Latest + *out = new(bool) + **out = **in + } + if in.ObservabilityConfigurationName != nil { + in, out := &in.ObservabilityConfigurationName, &out.ObservabilityConfigurationName + *out = new(string) + **out = **in + } + if in.ObservabilityConfigurationRevision != nil { + in, out := &in.ObservabilityConfigurationRevision, &out.ObservabilityConfigurationRevision + *out = new(float64) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TraceConfiguration != nil { + in, out := &in.TraceConfiguration, &out.TraceConfiguration + *out = new(TraceConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservabilityConfigurationObservation. +func (in *ObservabilityConfigurationObservation) DeepCopy() *ObservabilityConfigurationObservation { + if in == nil { + return nil + } + out := new(ObservabilityConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservabilityConfigurationParameters) DeepCopyInto(out *ObservabilityConfigurationParameters) { + *out = *in + if in.ObservabilityConfigurationName != nil { + in, out := &in.ObservabilityConfigurationName, &out.ObservabilityConfigurationName + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TraceConfiguration != nil { + in, out := &in.TraceConfiguration, &out.TraceConfiguration + *out = new(TraceConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservabilityConfigurationParameters. +func (in *ObservabilityConfigurationParameters) DeepCopy() *ObservabilityConfigurationParameters { + if in == nil { + return nil + } + out := new(ObservabilityConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservabilityConfigurationSpec) DeepCopyInto(out *ObservabilityConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservabilityConfigurationSpec. +func (in *ObservabilityConfigurationSpec) DeepCopy() *ObservabilityConfigurationSpec { + if in == nil { + return nil + } + out := new(ObservabilityConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservabilityConfigurationStatus) DeepCopyInto(out *ObservabilityConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservabilityConfigurationStatus. +func (in *ObservabilityConfigurationStatus) DeepCopy() *ObservabilityConfigurationStatus { + if in == nil { + return nil + } + out := new(ObservabilityConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Service) DeepCopyInto(out *Service) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. +func (in *Service) DeepCopy() *Service { + if in == nil { + return nil + } + out := new(Service) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Service) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceInitParameters) DeepCopyInto(out *ServiceInitParameters) { + *out = *in + if in.AutoScalingConfigurationArn != nil { + in, out := &in.AutoScalingConfigurationArn, &out.AutoScalingConfigurationArn + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HealthCheckConfiguration != nil { + in, out := &in.HealthCheckConfiguration, &out.HealthCheckConfiguration + *out = new(HealthCheckConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceConfiguration != nil { + in, out := &in.InstanceConfiguration, &out.InstanceConfiguration + *out = new(InstanceConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = new(NetworkConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ObservabilityConfiguration != nil { + in, out := &in.ObservabilityConfiguration, &out.ObservabilityConfiguration + *out = new(ServiceObservabilityConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.SourceConfiguration != nil { + in, out := &in.SourceConfiguration, &out.SourceConfiguration + *out = new(SourceConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceInitParameters. +func (in *ServiceInitParameters) DeepCopy() *ServiceInitParameters { + if in == nil { + return nil + } + out := new(ServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceList) DeepCopyInto(out *ServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. +func (in *ServiceList) DeepCopy() *ServiceList { + if in == nil { + return nil + } + out := new(ServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceObservabilityConfigurationInitParameters) DeepCopyInto(out *ServiceObservabilityConfigurationInitParameters) { + *out = *in + if in.ObservabilityConfigurationArn != nil { + in, out := &in.ObservabilityConfigurationArn, &out.ObservabilityConfigurationArn + *out = new(string) + **out = **in + } + if in.ObservabilityConfigurationArnRef != nil { + in, out := &in.ObservabilityConfigurationArnRef, &out.ObservabilityConfigurationArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ObservabilityConfigurationArnSelector != nil { + in, out := &in.ObservabilityConfigurationArnSelector, &out.ObservabilityConfigurationArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ObservabilityEnabled != nil { + in, out := &in.ObservabilityEnabled, &out.ObservabilityEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceObservabilityConfigurationInitParameters. +func (in *ServiceObservabilityConfigurationInitParameters) DeepCopy() *ServiceObservabilityConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ServiceObservabilityConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceObservabilityConfigurationObservation) DeepCopyInto(out *ServiceObservabilityConfigurationObservation) { + *out = *in + if in.ObservabilityConfigurationArn != nil { + in, out := &in.ObservabilityConfigurationArn, &out.ObservabilityConfigurationArn + *out = new(string) + **out = **in + } + if in.ObservabilityEnabled != nil { + in, out := &in.ObservabilityEnabled, &out.ObservabilityEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceObservabilityConfigurationObservation. +func (in *ServiceObservabilityConfigurationObservation) DeepCopy() *ServiceObservabilityConfigurationObservation { + if in == nil { + return nil + } + out := new(ServiceObservabilityConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceObservabilityConfigurationParameters) DeepCopyInto(out *ServiceObservabilityConfigurationParameters) { + *out = *in + if in.ObservabilityConfigurationArn != nil { + in, out := &in.ObservabilityConfigurationArn, &out.ObservabilityConfigurationArn + *out = new(string) + **out = **in + } + if in.ObservabilityConfigurationArnRef != nil { + in, out := &in.ObservabilityConfigurationArnRef, &out.ObservabilityConfigurationArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ObservabilityConfigurationArnSelector != nil { + in, out := &in.ObservabilityConfigurationArnSelector, &out.ObservabilityConfigurationArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ObservabilityEnabled != nil { + in, out := &in.ObservabilityEnabled, &out.ObservabilityEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceObservabilityConfigurationParameters. +func (in *ServiceObservabilityConfigurationParameters) DeepCopy() *ServiceObservabilityConfigurationParameters { + if in == nil { + return nil + } + out := new(ServiceObservabilityConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceObservation) DeepCopyInto(out *ServiceObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoScalingConfigurationArn != nil { + in, out := &in.AutoScalingConfigurationArn, &out.AutoScalingConfigurationArn + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.HealthCheckConfiguration != nil { + in, out := &in.HealthCheckConfiguration, &out.HealthCheckConfiguration + *out = new(HealthCheckConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceConfiguration != nil { + in, out := &in.InstanceConfiguration, &out.InstanceConfiguration + *out = new(InstanceConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = new(NetworkConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ObservabilityConfiguration != nil { + in, out := &in.ObservabilityConfiguration, &out.ObservabilityConfiguration + *out = new(ServiceObservabilityConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ServiceID != nil { + in, out := &in.ServiceID, &out.ServiceID + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.ServiceURL != nil { + in, out := &in.ServiceURL, &out.ServiceURL + *out = new(string) + **out = **in + } + if in.SourceConfiguration != nil { + in, out := &in.SourceConfiguration, &out.SourceConfiguration + *out = new(SourceConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceObservation. +func (in *ServiceObservation) DeepCopy() *ServiceObservation { + if in == nil { + return nil + } + out := new(ServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceParameters) DeepCopyInto(out *ServiceParameters) { + *out = *in + if in.AutoScalingConfigurationArn != nil { + in, out := &in.AutoScalingConfigurationArn, &out.AutoScalingConfigurationArn + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.HealthCheckConfiguration != nil { + in, out := &in.HealthCheckConfiguration, &out.HealthCheckConfiguration + *out = new(HealthCheckConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceConfiguration != nil { + in, out := &in.InstanceConfiguration, &out.InstanceConfiguration + *out = new(InstanceConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = new(NetworkConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ObservabilityConfiguration != nil { + in, out := &in.ObservabilityConfiguration, &out.ObservabilityConfiguration + *out = new(ServiceObservabilityConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.SourceConfiguration != nil { + in, out := &in.SourceConfiguration, &out.SourceConfiguration + *out = new(SourceConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceParameters. +func (in *ServiceParameters) DeepCopy() *ServiceParameters { + if in == nil { + return nil + } + out := new(ServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. +func (in *ServiceSpec) DeepCopy() *ServiceSpec { + if in == nil { + return nil + } + out := new(ServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. +func (in *ServiceStatus) DeepCopy() *ServiceStatus { + if in == nil { + return nil + } + out := new(ServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceCodeVersionInitParameters) DeepCopyInto(out *SourceCodeVersionInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceCodeVersionInitParameters. +func (in *SourceCodeVersionInitParameters) DeepCopy() *SourceCodeVersionInitParameters { + if in == nil { + return nil + } + out := new(SourceCodeVersionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceCodeVersionObservation) DeepCopyInto(out *SourceCodeVersionObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceCodeVersionObservation. +func (in *SourceCodeVersionObservation) DeepCopy() *SourceCodeVersionObservation { + if in == nil { + return nil + } + out := new(SourceCodeVersionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceCodeVersionParameters) DeepCopyInto(out *SourceCodeVersionParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceCodeVersionParameters. +func (in *SourceCodeVersionParameters) DeepCopy() *SourceCodeVersionParameters { + if in == nil { + return nil + } + out := new(SourceCodeVersionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConfigurationInitParameters) DeepCopyInto(out *SourceConfigurationInitParameters) { + *out = *in + if in.AuthenticationConfiguration != nil { + in, out := &in.AuthenticationConfiguration, &out.AuthenticationConfiguration + *out = new(AuthenticationConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoDeploymentsEnabled != nil { + in, out := &in.AutoDeploymentsEnabled, &out.AutoDeploymentsEnabled + *out = new(bool) + **out = **in + } + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = new(CodeRepositoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageRepository != nil { + in, out := &in.ImageRepository, &out.ImageRepository + *out = new(ImageRepositoryInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConfigurationInitParameters. +func (in *SourceConfigurationInitParameters) DeepCopy() *SourceConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SourceConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConfigurationObservation) DeepCopyInto(out *SourceConfigurationObservation) { + *out = *in + if in.AuthenticationConfiguration != nil { + in, out := &in.AuthenticationConfiguration, &out.AuthenticationConfiguration + *out = new(AuthenticationConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoDeploymentsEnabled != nil { + in, out := &in.AutoDeploymentsEnabled, &out.AutoDeploymentsEnabled + *out = new(bool) + **out = **in + } + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = new(CodeRepositoryObservation) + (*in).DeepCopyInto(*out) + } + if in.ImageRepository != nil { + in, out := &in.ImageRepository, &out.ImageRepository + *out = new(ImageRepositoryObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConfigurationObservation. +func (in *SourceConfigurationObservation) DeepCopy() *SourceConfigurationObservation { + if in == nil { + return nil + } + out := new(SourceConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceConfigurationParameters) DeepCopyInto(out *SourceConfigurationParameters) { + *out = *in + if in.AuthenticationConfiguration != nil { + in, out := &in.AuthenticationConfiguration, &out.AuthenticationConfiguration + *out = new(AuthenticationConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoDeploymentsEnabled != nil { + in, out := &in.AutoDeploymentsEnabled, &out.AutoDeploymentsEnabled + *out = new(bool) + **out = **in + } + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = new(CodeRepositoryParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageRepository != nil { + in, out := &in.ImageRepository, &out.ImageRepository + *out = new(ImageRepositoryParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceConfigurationParameters. +func (in *SourceConfigurationParameters) DeepCopy() *SourceConfigurationParameters { + if in == nil { + return nil + } + out := new(SourceConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TraceConfigurationInitParameters) DeepCopyInto(out *TraceConfigurationInitParameters) { + *out = *in + if in.Vendor != nil { + in, out := &in.Vendor, &out.Vendor + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TraceConfigurationInitParameters. +func (in *TraceConfigurationInitParameters) DeepCopy() *TraceConfigurationInitParameters { + if in == nil { + return nil + } + out := new(TraceConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TraceConfigurationObservation) DeepCopyInto(out *TraceConfigurationObservation) { + *out = *in + if in.Vendor != nil { + in, out := &in.Vendor, &out.Vendor + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TraceConfigurationObservation. +func (in *TraceConfigurationObservation) DeepCopy() *TraceConfigurationObservation { + if in == nil { + return nil + } + out := new(TraceConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TraceConfigurationParameters) DeepCopyInto(out *TraceConfigurationParameters) { + *out = *in + if in.Vendor != nil { + in, out := &in.Vendor, &out.Vendor + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TraceConfigurationParameters. +func (in *TraceConfigurationParameters) DeepCopy() *TraceConfigurationParameters { + if in == nil { + return nil + } + out := new(TraceConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/apprunner/v1beta2/zz_generated.managed.go b/apis/apprunner/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..b1b7f9036f --- /dev/null +++ b/apis/apprunner/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ObservabilityConfiguration. +func (mg *ObservabilityConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ObservabilityConfiguration. +func (mg *ObservabilityConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ObservabilityConfiguration. +func (mg *ObservabilityConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ObservabilityConfiguration. +func (mg *ObservabilityConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ObservabilityConfiguration. +func (mg *ObservabilityConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ObservabilityConfiguration. +func (mg *ObservabilityConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ObservabilityConfiguration. +func (mg *ObservabilityConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ObservabilityConfiguration. +func (mg *ObservabilityConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ObservabilityConfiguration. +func (mg *ObservabilityConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ObservabilityConfiguration. +func (mg *ObservabilityConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ObservabilityConfiguration. +func (mg *ObservabilityConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ObservabilityConfiguration. +func (mg *ObservabilityConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Service. +func (mg *Service) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Service. +func (mg *Service) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Service. +func (mg *Service) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Service. +func (mg *Service) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Service. +func (mg *Service) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Service. +func (mg *Service) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Service. +func (mg *Service) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Service. +func (mg *Service) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Service. +func (mg *Service) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Service. +func (mg *Service) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Service. +func (mg *Service) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Service. +func (mg *Service) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/apprunner/v1beta2/zz_generated.managedlist.go b/apis/apprunner/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..d0d9c9fbd2 --- /dev/null +++ b/apis/apprunner/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ObservabilityConfigurationList. +func (l *ObservabilityConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ServiceList. +func (l *ServiceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/apprunner/v1beta2/zz_generated.resolvers.go b/apis/apprunner/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..6c23cad36b --- /dev/null +++ b/apis/apprunner/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,166 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Service. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Service) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.NetworkConfiguration != nil { + if mg.Spec.ForProvider.NetworkConfiguration.EgressConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("apprunner.aws.upbound.io", "v1beta1", "VPCConnector", "VPCConnectorList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkConfiguration.EgressConfiguration.VPCConnectorArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.NetworkConfiguration.EgressConfiguration.VPCConnectorArnRef, + Selector: mg.Spec.ForProvider.NetworkConfiguration.EgressConfiguration.VPCConnectorArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkConfiguration.EgressConfiguration.VPCConnectorArn") + } + mg.Spec.ForProvider.NetworkConfiguration.EgressConfiguration.VPCConnectorArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkConfiguration.EgressConfiguration.VPCConnectorArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.ObservabilityConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("apprunner.aws.upbound.io", "v1beta2", "ObservabilityConfiguration", "ObservabilityConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ObservabilityConfiguration.ObservabilityConfigurationArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.ObservabilityConfiguration.ObservabilityConfigurationArnRef, + Selector: mg.Spec.ForProvider.ObservabilityConfiguration.ObservabilityConfigurationArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ObservabilityConfiguration.ObservabilityConfigurationArn") + } + mg.Spec.ForProvider.ObservabilityConfiguration.ObservabilityConfigurationArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ObservabilityConfiguration.ObservabilityConfigurationArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.SourceConfiguration != nil { + if mg.Spec.ForProvider.SourceConfiguration.AuthenticationConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("apprunner.aws.upbound.io", "v1beta1", "Connection", "ConnectionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SourceConfiguration.AuthenticationConfiguration.ConnectionArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.SourceConfiguration.AuthenticationConfiguration.ConnectionArnRef, + Selector: mg.Spec.ForProvider.SourceConfiguration.AuthenticationConfiguration.ConnectionArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SourceConfiguration.AuthenticationConfiguration.ConnectionArn") + } + mg.Spec.ForProvider.SourceConfiguration.AuthenticationConfiguration.ConnectionArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SourceConfiguration.AuthenticationConfiguration.ConnectionArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.NetworkConfiguration != nil { + if mg.Spec.InitProvider.NetworkConfiguration.EgressConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("apprunner.aws.upbound.io", "v1beta1", "VPCConnector", "VPCConnectorList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkConfiguration.EgressConfiguration.VPCConnectorArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.NetworkConfiguration.EgressConfiguration.VPCConnectorArnRef, + Selector: mg.Spec.InitProvider.NetworkConfiguration.EgressConfiguration.VPCConnectorArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkConfiguration.EgressConfiguration.VPCConnectorArn") + } + mg.Spec.InitProvider.NetworkConfiguration.EgressConfiguration.VPCConnectorArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkConfiguration.EgressConfiguration.VPCConnectorArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.ObservabilityConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("apprunner.aws.upbound.io", "v1beta2", "ObservabilityConfiguration", "ObservabilityConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ObservabilityConfiguration.ObservabilityConfigurationArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.ObservabilityConfiguration.ObservabilityConfigurationArnRef, + Selector: mg.Spec.InitProvider.ObservabilityConfiguration.ObservabilityConfigurationArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ObservabilityConfiguration.ObservabilityConfigurationArn") + } + mg.Spec.InitProvider.ObservabilityConfiguration.ObservabilityConfigurationArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ObservabilityConfiguration.ObservabilityConfigurationArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.SourceConfiguration != nil { + if mg.Spec.InitProvider.SourceConfiguration.AuthenticationConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("apprunner.aws.upbound.io", "v1beta1", "Connection", "ConnectionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SourceConfiguration.AuthenticationConfiguration.ConnectionArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.SourceConfiguration.AuthenticationConfiguration.ConnectionArnRef, + Selector: mg.Spec.InitProvider.SourceConfiguration.AuthenticationConfiguration.ConnectionArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SourceConfiguration.AuthenticationConfiguration.ConnectionArn") + } + mg.Spec.InitProvider.SourceConfiguration.AuthenticationConfiguration.ConnectionArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SourceConfiguration.AuthenticationConfiguration.ConnectionArnRef = rsp.ResolvedReference + + } + } + + return nil +} diff --git a/apis/apprunner/v1beta2/zz_groupversion_info.go b/apis/apprunner/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..ad084d29c6 --- /dev/null +++ b/apis/apprunner/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=apprunner.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "apprunner.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/apprunner/v1beta2/zz_observabilityconfiguration_terraformed.go b/apis/apprunner/v1beta2/zz_observabilityconfiguration_terraformed.go new file mode 100755 index 0000000000..226dd0a26b --- /dev/null +++ b/apis/apprunner/v1beta2/zz_observabilityconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ObservabilityConfiguration +func (mg *ObservabilityConfiguration) GetTerraformResourceType() string { + return "aws_apprunner_observability_configuration" +} + +// GetConnectionDetailsMapping for this ObservabilityConfiguration +func (tr *ObservabilityConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ObservabilityConfiguration +func (tr *ObservabilityConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ObservabilityConfiguration +func (tr *ObservabilityConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ObservabilityConfiguration +func (tr *ObservabilityConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ObservabilityConfiguration +func (tr *ObservabilityConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ObservabilityConfiguration +func (tr *ObservabilityConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ObservabilityConfiguration +func (tr *ObservabilityConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ObservabilityConfiguration +func (tr *ObservabilityConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ObservabilityConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ObservabilityConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &ObservabilityConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ObservabilityConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apprunner/v1beta2/zz_observabilityconfiguration_types.go b/apis/apprunner/v1beta2/zz_observabilityconfiguration_types.go new file mode 100755 index 0000000000..94abfc3aad --- /dev/null +++ b/apis/apprunner/v1beta2/zz_observabilityconfiguration_types.go @@ -0,0 +1,159 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ObservabilityConfigurationInitParameters struct { + + // Name of the observability configuration. + ObservabilityConfigurationName *string `json:"observabilityConfigurationName,omitempty" tf:"observability_configuration_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration of the tracing feature within this observability configuration. If you don't specify it, App Runner doesn't enable tracing. See Trace Configuration below for more details. + TraceConfiguration *TraceConfigurationInitParameters `json:"traceConfiguration,omitempty" tf:"trace_configuration,omitempty"` +} + +type ObservabilityConfigurationObservation struct { + + // ARN of this observability configuration. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Whether the observability configuration has the highest observability_configuration_revision among all configurations that share the same observability_configuration_name. + Latest *bool `json:"latest,omitempty" tf:"latest,omitempty"` + + // Name of the observability configuration. + ObservabilityConfigurationName *string `json:"observabilityConfigurationName,omitempty" tf:"observability_configuration_name,omitempty"` + + // The revision of this observability configuration. + ObservabilityConfigurationRevision *float64 `json:"observabilityConfigurationRevision,omitempty" tf:"observability_configuration_revision,omitempty"` + + // Current state of the observability configuration. An INACTIVE configuration revision has been deleted and can't be used. It is permanently removed some time after deletion. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Configuration of the tracing feature within this observability configuration. If you don't specify it, App Runner doesn't enable tracing. See Trace Configuration below for more details. + TraceConfiguration *TraceConfigurationObservation `json:"traceConfiguration,omitempty" tf:"trace_configuration,omitempty"` +} + +type ObservabilityConfigurationParameters struct { + + // Name of the observability configuration. + // +kubebuilder:validation:Optional + ObservabilityConfigurationName *string `json:"observabilityConfigurationName,omitempty" tf:"observability_configuration_name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration of the tracing feature within this observability configuration. If you don't specify it, App Runner doesn't enable tracing. See Trace Configuration below for more details. + // +kubebuilder:validation:Optional + TraceConfiguration *TraceConfigurationParameters `json:"traceConfiguration,omitempty" tf:"trace_configuration,omitempty"` +} + +type TraceConfigurationInitParameters struct { + + // Implementation provider chosen for tracing App Runner services. Valid values: AWSXRAY. + Vendor *string `json:"vendor,omitempty" tf:"vendor,omitempty"` +} + +type TraceConfigurationObservation struct { + + // Implementation provider chosen for tracing App Runner services. Valid values: AWSXRAY. + Vendor *string `json:"vendor,omitempty" tf:"vendor,omitempty"` +} + +type TraceConfigurationParameters struct { + + // Implementation provider chosen for tracing App Runner services. Valid values: AWSXRAY. + // +kubebuilder:validation:Optional + Vendor *string `json:"vendor,omitempty" tf:"vendor,omitempty"` +} + +// ObservabilityConfigurationSpec defines the desired state of ObservabilityConfiguration +type ObservabilityConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ObservabilityConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ObservabilityConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// ObservabilityConfigurationStatus defines the observed state of ObservabilityConfiguration. +type ObservabilityConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ObservabilityConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ObservabilityConfiguration is the Schema for the ObservabilityConfigurations API. Manages an App Runner Observability Configuration. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ObservabilityConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.observabilityConfigurationName) || (has(self.initProvider) && has(self.initProvider.observabilityConfigurationName))",message="spec.forProvider.observabilityConfigurationName is a required parameter" + Spec ObservabilityConfigurationSpec `json:"spec"` + Status ObservabilityConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ObservabilityConfigurationList contains a list of ObservabilityConfigurations +type ObservabilityConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ObservabilityConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + ObservabilityConfiguration_Kind = "ObservabilityConfiguration" + ObservabilityConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ObservabilityConfiguration_Kind}.String() + ObservabilityConfiguration_KindAPIVersion = ObservabilityConfiguration_Kind + "." + CRDGroupVersion.String() + ObservabilityConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(ObservabilityConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&ObservabilityConfiguration{}, &ObservabilityConfigurationList{}) +} diff --git a/apis/apprunner/v1beta2/zz_service_terraformed.go b/apis/apprunner/v1beta2/zz_service_terraformed.go new file mode 100755 index 0000000000..c8d151ee02 --- /dev/null +++ b/apis/apprunner/v1beta2/zz_service_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Service +func (mg *Service) GetTerraformResourceType() string { + return "aws_apprunner_service" +} + +// GetConnectionDetailsMapping for this Service +func (tr *Service) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Service +func (tr *Service) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Service +func (tr *Service) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Service +func (tr *Service) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Service +func (tr *Service) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Service +func (tr *Service) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Service +func (tr *Service) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Service +func (tr *Service) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Service using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Service) LateInitialize(attrs []byte) (bool, error) { + params := &ServiceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Service) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apprunner/v1beta2/zz_service_types.go b/apis/apprunner/v1beta2/zz_service_types.go new file mode 100755 index 0000000000..7d29350795 --- /dev/null +++ b/apis/apprunner/v1beta2/zz_service_types.go @@ -0,0 +1,861 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuthenticationConfigurationInitParameters struct { + + // ARN of the IAM role that grants the App Runner service access to a source repository. Required for ECR image repositories (but not for ECR Public) + AccessRoleArn *string `json:"accessRoleArn,omitempty" tf:"access_role_arn,omitempty"` + + // ARN of the App Runner connection that enables the App Runner service to connect to a source repository. Required for GitHub code repositories. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apprunner/v1beta1.Connection + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + ConnectionArn *string `json:"connectionArn,omitempty" tf:"connection_arn,omitempty"` + + // Reference to a Connection in apprunner to populate connectionArn. + // +kubebuilder:validation:Optional + ConnectionArnRef *v1.Reference `json:"connectionArnRef,omitempty" tf:"-"` + + // Selector for a Connection in apprunner to populate connectionArn. + // +kubebuilder:validation:Optional + ConnectionArnSelector *v1.Selector `json:"connectionArnSelector,omitempty" tf:"-"` +} + +type AuthenticationConfigurationObservation struct { + + // ARN of the IAM role that grants the App Runner service access to a source repository. Required for ECR image repositories (but not for ECR Public) + AccessRoleArn *string `json:"accessRoleArn,omitempty" tf:"access_role_arn,omitempty"` + + // ARN of the App Runner connection that enables the App Runner service to connect to a source repository. Required for GitHub code repositories. + ConnectionArn *string `json:"connectionArn,omitempty" tf:"connection_arn,omitempty"` +} + +type AuthenticationConfigurationParameters struct { + + // ARN of the IAM role that grants the App Runner service access to a source repository. Required for ECR image repositories (but not for ECR Public) + // +kubebuilder:validation:Optional + AccessRoleArn *string `json:"accessRoleArn,omitempty" tf:"access_role_arn,omitempty"` + + // ARN of the App Runner connection that enables the App Runner service to connect to a source repository. Required for GitHub code repositories. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apprunner/v1beta1.Connection + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + ConnectionArn *string `json:"connectionArn,omitempty" tf:"connection_arn,omitempty"` + + // Reference to a Connection in apprunner to populate connectionArn. + // +kubebuilder:validation:Optional + ConnectionArnRef *v1.Reference `json:"connectionArnRef,omitempty" tf:"-"` + + // Selector for a Connection in apprunner to populate connectionArn. + // +kubebuilder:validation:Optional + ConnectionArnSelector *v1.Selector `json:"connectionArnSelector,omitempty" tf:"-"` +} + +type CodeConfigurationInitParameters struct { + + // Basic configuration for building and running the App Runner service. Use this parameter to quickly launch an App Runner service without providing an apprunner.yaml file in the source code repository (or ignoring the file if it exists). See Code Configuration Values below for more details. + CodeConfigurationValues *CodeConfigurationValuesInitParameters `json:"codeConfigurationValues,omitempty" tf:"code_configuration_values,omitempty"` + + // Source of the App Runner configuration. Valid values: REPOSITORY, API. Values are interpreted as follows: + ConfigurationSource *string `json:"configurationSource,omitempty" tf:"configuration_source,omitempty"` +} + +type CodeConfigurationObservation struct { + + // Basic configuration for building and running the App Runner service. Use this parameter to quickly launch an App Runner service without providing an apprunner.yaml file in the source code repository (or ignoring the file if it exists). See Code Configuration Values below for more details. + CodeConfigurationValues *CodeConfigurationValuesObservation `json:"codeConfigurationValues,omitempty" tf:"code_configuration_values,omitempty"` + + // Source of the App Runner configuration. Valid values: REPOSITORY, API. Values are interpreted as follows: + ConfigurationSource *string `json:"configurationSource,omitempty" tf:"configuration_source,omitempty"` +} + +type CodeConfigurationParameters struct { + + // Basic configuration for building and running the App Runner service. Use this parameter to quickly launch an App Runner service without providing an apprunner.yaml file in the source code repository (or ignoring the file if it exists). See Code Configuration Values below for more details. + // +kubebuilder:validation:Optional + CodeConfigurationValues *CodeConfigurationValuesParameters `json:"codeConfigurationValues,omitempty" tf:"code_configuration_values,omitempty"` + + // Source of the App Runner configuration. Valid values: REPOSITORY, API. Values are interpreted as follows: + // +kubebuilder:validation:Optional + ConfigurationSource *string `json:"configurationSource" tf:"configuration_source,omitempty"` +} + +type CodeConfigurationValuesInitParameters struct { + + // Command App Runner runs to build your application. + BuildCommand *string `json:"buildCommand,omitempty" tf:"build_command,omitempty"` + + // Port that your application listens to in the container. Defaults to "8080". + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // Runtime environment type for building and running an App Runner service. Represents a programming language runtime. Valid values: PYTHON_3, NODEJS_12, NODEJS_14, NODEJS_16, CORRETTO_8, CORRETTO_11, GO_1, DOTNET_6, PHP_81, RUBY_31. + Runtime *string `json:"runtime,omitempty" tf:"runtime,omitempty"` + + // Secrets and parameters available to your service as environment variables. A map of key/value pairs, where the key is the desired name of the Secret in the environment (i.e. it does not have to match the name of the secret in Secrets Manager or SSM Parameter Store), and the value is the ARN of the secret from AWS Secrets Manager or the ARN of the parameter in AWS SSM Parameter Store. + // +mapType=granular + RuntimeEnvironmentSecrets map[string]*string `json:"runtimeEnvironmentSecrets,omitempty" tf:"runtime_environment_secrets,omitempty"` + + // Environment variables available to your running App Runner service. A map of key/value pairs. Keys with a prefix of AWSAPPRUNNER are reserved for system use and aren't valid. + // +mapType=granular + RuntimeEnvironmentVariables map[string]*string `json:"runtimeEnvironmentVariables,omitempty" tf:"runtime_environment_variables,omitempty"` + + // Command App Runner runs to start the application in the source image. If specified, this command overrides the Docker image’s default start command. + StartCommand *string `json:"startCommand,omitempty" tf:"start_command,omitempty"` +} + +type CodeConfigurationValuesObservation struct { + + // Command App Runner runs to build your application. + BuildCommand *string `json:"buildCommand,omitempty" tf:"build_command,omitempty"` + + // Port that your application listens to in the container. Defaults to "8080". + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // Runtime environment type for building and running an App Runner service. Represents a programming language runtime. Valid values: PYTHON_3, NODEJS_12, NODEJS_14, NODEJS_16, CORRETTO_8, CORRETTO_11, GO_1, DOTNET_6, PHP_81, RUBY_31. + Runtime *string `json:"runtime,omitempty" tf:"runtime,omitempty"` + + // Secrets and parameters available to your service as environment variables. A map of key/value pairs, where the key is the desired name of the Secret in the environment (i.e. it does not have to match the name of the secret in Secrets Manager or SSM Parameter Store), and the value is the ARN of the secret from AWS Secrets Manager or the ARN of the parameter in AWS SSM Parameter Store. + // +mapType=granular + RuntimeEnvironmentSecrets map[string]*string `json:"runtimeEnvironmentSecrets,omitempty" tf:"runtime_environment_secrets,omitempty"` + + // Environment variables available to your running App Runner service. A map of key/value pairs. Keys with a prefix of AWSAPPRUNNER are reserved for system use and aren't valid. + // +mapType=granular + RuntimeEnvironmentVariables map[string]*string `json:"runtimeEnvironmentVariables,omitempty" tf:"runtime_environment_variables,omitempty"` + + // Command App Runner runs to start the application in the source image. If specified, this command overrides the Docker image’s default start command. + StartCommand *string `json:"startCommand,omitempty" tf:"start_command,omitempty"` +} + +type CodeConfigurationValuesParameters struct { + + // Command App Runner runs to build your application. + // +kubebuilder:validation:Optional + BuildCommand *string `json:"buildCommand,omitempty" tf:"build_command,omitempty"` + + // Port that your application listens to in the container. Defaults to "8080". + // +kubebuilder:validation:Optional + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // Runtime environment type for building and running an App Runner service. Represents a programming language runtime. Valid values: PYTHON_3, NODEJS_12, NODEJS_14, NODEJS_16, CORRETTO_8, CORRETTO_11, GO_1, DOTNET_6, PHP_81, RUBY_31. + // +kubebuilder:validation:Optional + Runtime *string `json:"runtime" tf:"runtime,omitempty"` + + // Secrets and parameters available to your service as environment variables. A map of key/value pairs, where the key is the desired name of the Secret in the environment (i.e. it does not have to match the name of the secret in Secrets Manager or SSM Parameter Store), and the value is the ARN of the secret from AWS Secrets Manager or the ARN of the parameter in AWS SSM Parameter Store. + // +kubebuilder:validation:Optional + // +mapType=granular + RuntimeEnvironmentSecrets map[string]*string `json:"runtimeEnvironmentSecrets,omitempty" tf:"runtime_environment_secrets,omitempty"` + + // Environment variables available to your running App Runner service. A map of key/value pairs. Keys with a prefix of AWSAPPRUNNER are reserved for system use and aren't valid. + // +kubebuilder:validation:Optional + // +mapType=granular + RuntimeEnvironmentVariables map[string]*string `json:"runtimeEnvironmentVariables,omitempty" tf:"runtime_environment_variables,omitempty"` + + // Command App Runner runs to start the application in the source image. If specified, this command overrides the Docker image’s default start command. + // +kubebuilder:validation:Optional + StartCommand *string `json:"startCommand,omitempty" tf:"start_command,omitempty"` +} + +type CodeRepositoryInitParameters struct { + + // Configuration for building and running the service from a source code repository. See Code Configuration below for more details. + CodeConfiguration *CodeConfigurationInitParameters `json:"codeConfiguration,omitempty" tf:"code_configuration,omitempty"` + + // Location of the repository that contains the source code. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` + + // Version that should be used within the source code repository. See Source Code Version below for more details. + SourceCodeVersion *SourceCodeVersionInitParameters `json:"sourceCodeVersion,omitempty" tf:"source_code_version,omitempty"` + + // The path of the directory that stores source code and configuration files. The build and start commands also execute from here. The path is absolute from root and, if not specified, defaults to the repository root. + SourceDirectory *string `json:"sourceDirectory,omitempty" tf:"source_directory,omitempty"` +} + +type CodeRepositoryObservation struct { + + // Configuration for building and running the service from a source code repository. See Code Configuration below for more details. + CodeConfiguration *CodeConfigurationObservation `json:"codeConfiguration,omitempty" tf:"code_configuration,omitempty"` + + // Location of the repository that contains the source code. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` + + // Version that should be used within the source code repository. See Source Code Version below for more details. + SourceCodeVersion *SourceCodeVersionObservation `json:"sourceCodeVersion,omitempty" tf:"source_code_version,omitempty"` + + // The path of the directory that stores source code and configuration files. The build and start commands also execute from here. The path is absolute from root and, if not specified, defaults to the repository root. + SourceDirectory *string `json:"sourceDirectory,omitempty" tf:"source_directory,omitempty"` +} + +type CodeRepositoryParameters struct { + + // Configuration for building and running the service from a source code repository. See Code Configuration below for more details. + // +kubebuilder:validation:Optional + CodeConfiguration *CodeConfigurationParameters `json:"codeConfiguration,omitempty" tf:"code_configuration,omitempty"` + + // Location of the repository that contains the source code. + // +kubebuilder:validation:Optional + RepositoryURL *string `json:"repositoryUrl" tf:"repository_url,omitempty"` + + // Version that should be used within the source code repository. See Source Code Version below for more details. + // +kubebuilder:validation:Optional + SourceCodeVersion *SourceCodeVersionParameters `json:"sourceCodeVersion" tf:"source_code_version,omitempty"` + + // The path of the directory that stores source code and configuration files. The build and start commands also execute from here. The path is absolute from root and, if not specified, defaults to the repository root. + // +kubebuilder:validation:Optional + SourceDirectory *string `json:"sourceDirectory,omitempty" tf:"source_directory,omitempty"` +} + +type EgressConfigurationInitParameters struct { + + // The type of egress configuration. Valid values are: DEFAULT and VPC. + EgressType *string `json:"egressType,omitempty" tf:"egress_type,omitempty"` + + // The Amazon Resource Name (ARN) of the App Runner VPC connector that you want to associate with your App Runner service. Only valid when EgressType = VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apprunner/v1beta1.VPCConnector + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + VPCConnectorArn *string `json:"vpcConnectorArn,omitempty" tf:"vpc_connector_arn,omitempty"` + + // Reference to a VPCConnector in apprunner to populate vpcConnectorArn. + // +kubebuilder:validation:Optional + VPCConnectorArnRef *v1.Reference `json:"vpcConnectorArnRef,omitempty" tf:"-"` + + // Selector for a VPCConnector in apprunner to populate vpcConnectorArn. + // +kubebuilder:validation:Optional + VPCConnectorArnSelector *v1.Selector `json:"vpcConnectorArnSelector,omitempty" tf:"-"` +} + +type EgressConfigurationObservation struct { + + // The type of egress configuration. Valid values are: DEFAULT and VPC. + EgressType *string `json:"egressType,omitempty" tf:"egress_type,omitempty"` + + // The Amazon Resource Name (ARN) of the App Runner VPC connector that you want to associate with your App Runner service. Only valid when EgressType = VPC. + VPCConnectorArn *string `json:"vpcConnectorArn,omitempty" tf:"vpc_connector_arn,omitempty"` +} + +type EgressConfigurationParameters struct { + + // The type of egress configuration. Valid values are: DEFAULT and VPC. + // +kubebuilder:validation:Optional + EgressType *string `json:"egressType,omitempty" tf:"egress_type,omitempty"` + + // The Amazon Resource Name (ARN) of the App Runner VPC connector that you want to associate with your App Runner service. Only valid when EgressType = VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apprunner/v1beta1.VPCConnector + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + VPCConnectorArn *string `json:"vpcConnectorArn,omitempty" tf:"vpc_connector_arn,omitempty"` + + // Reference to a VPCConnector in apprunner to populate vpcConnectorArn. + // +kubebuilder:validation:Optional + VPCConnectorArnRef *v1.Reference `json:"vpcConnectorArnRef,omitempty" tf:"-"` + + // Selector for a VPCConnector in apprunner to populate vpcConnectorArn. + // +kubebuilder:validation:Optional + VPCConnectorArnSelector *v1.Selector `json:"vpcConnectorArnSelector,omitempty" tf:"-"` +} + +type EncryptionConfigurationInitParameters struct { + + // ARN of the KMS key used for encryption. + KMSKey *string `json:"kmsKey,omitempty" tf:"kms_key,omitempty"` +} + +type EncryptionConfigurationObservation struct { + + // ARN of the KMS key used for encryption. + KMSKey *string `json:"kmsKey,omitempty" tf:"kms_key,omitempty"` +} + +type EncryptionConfigurationParameters struct { + + // ARN of the KMS key used for encryption. + // +kubebuilder:validation:Optional + KMSKey *string `json:"kmsKey" tf:"kms_key,omitempty"` +} + +type HealthCheckConfigurationInitParameters struct { + + // Number of consecutive checks that must succeed before App Runner decides that the service is healthy. Defaults to 1. Minimum value of 1. Maximum value of 20. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + + // Time interval, in seconds, between health checks. Defaults to 5. Minimum value of 1. Maximum value of 20. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // URL to send requests to for health checks. Defaults to /. Minimum length of 0. Maximum length of 51200. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // IP protocol that App Runner uses to perform health checks for your service. Valid values: TCP, HTTP. Defaults to TCP. If you set protocol to HTTP, App Runner sends health check requests to the HTTP path specified by path. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Time, in seconds, to wait for a health check response before deciding it failed. Defaults to 2. Minimum value of 1. Maximum value of 20. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Number of consecutive checks that must fail before App Runner decides that the service is unhealthy. Defaults to 5. Minimum value of 1. Maximum value of 20. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` +} + +type HealthCheckConfigurationObservation struct { + + // Number of consecutive checks that must succeed before App Runner decides that the service is healthy. Defaults to 1. Minimum value of 1. Maximum value of 20. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + + // Time interval, in seconds, between health checks. Defaults to 5. Minimum value of 1. Maximum value of 20. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // URL to send requests to for health checks. Defaults to /. Minimum length of 0. Maximum length of 51200. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // IP protocol that App Runner uses to perform health checks for your service. Valid values: TCP, HTTP. Defaults to TCP. If you set protocol to HTTP, App Runner sends health check requests to the HTTP path specified by path. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Time, in seconds, to wait for a health check response before deciding it failed. Defaults to 2. Minimum value of 1. Maximum value of 20. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Number of consecutive checks that must fail before App Runner decides that the service is unhealthy. Defaults to 5. Minimum value of 1. Maximum value of 20. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` +} + +type HealthCheckConfigurationParameters struct { + + // Number of consecutive checks that must succeed before App Runner decides that the service is healthy. Defaults to 1. Minimum value of 1. Maximum value of 20. + // +kubebuilder:validation:Optional + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + + // Time interval, in seconds, between health checks. Defaults to 5. Minimum value of 1. Maximum value of 20. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // URL to send requests to for health checks. Defaults to /. Minimum length of 0. Maximum length of 51200. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // IP protocol that App Runner uses to perform health checks for your service. Valid values: TCP, HTTP. Defaults to TCP. If you set protocol to HTTP, App Runner sends health check requests to the HTTP path specified by path. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Time, in seconds, to wait for a health check response before deciding it failed. Defaults to 2. Minimum value of 1. Maximum value of 20. + // +kubebuilder:validation:Optional + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Number of consecutive checks that must fail before App Runner decides that the service is unhealthy. Defaults to 5. Minimum value of 1. Maximum value of 20. + // +kubebuilder:validation:Optional + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` +} + +type ImageConfigurationInitParameters struct { + + // Port that your application listens to in the container. Defaults to "8080". + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // Secrets and parameters available to your service as environment variables. A map of key/value pairs, where the key is the desired name of the Secret in the environment (i.e. it does not have to match the name of the secret in Secrets Manager or SSM Parameter Store), and the value is the ARN of the secret from AWS Secrets Manager or the ARN of the parameter in AWS SSM Parameter Store. + // +mapType=granular + RuntimeEnvironmentSecrets map[string]*string `json:"runtimeEnvironmentSecrets,omitempty" tf:"runtime_environment_secrets,omitempty"` + + // Environment variables available to your running App Runner service. A map of key/value pairs. Keys with a prefix of AWSAPPRUNNER are reserved for system use and aren't valid. + // +mapType=granular + RuntimeEnvironmentVariables map[string]*string `json:"runtimeEnvironmentVariables,omitempty" tf:"runtime_environment_variables,omitempty"` + + // Command App Runner runs to start the application in the source image. If specified, this command overrides the Docker image’s default start command. + StartCommand *string `json:"startCommand,omitempty" tf:"start_command,omitempty"` +} + +type ImageConfigurationObservation struct { + + // Port that your application listens to in the container. Defaults to "8080". + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // Secrets and parameters available to your service as environment variables. A map of key/value pairs, where the key is the desired name of the Secret in the environment (i.e. it does not have to match the name of the secret in Secrets Manager or SSM Parameter Store), and the value is the ARN of the secret from AWS Secrets Manager or the ARN of the parameter in AWS SSM Parameter Store. + // +mapType=granular + RuntimeEnvironmentSecrets map[string]*string `json:"runtimeEnvironmentSecrets,omitempty" tf:"runtime_environment_secrets,omitempty"` + + // Environment variables available to your running App Runner service. A map of key/value pairs. Keys with a prefix of AWSAPPRUNNER are reserved for system use and aren't valid. + // +mapType=granular + RuntimeEnvironmentVariables map[string]*string `json:"runtimeEnvironmentVariables,omitempty" tf:"runtime_environment_variables,omitempty"` + + // Command App Runner runs to start the application in the source image. If specified, this command overrides the Docker image’s default start command. + StartCommand *string `json:"startCommand,omitempty" tf:"start_command,omitempty"` +} + +type ImageConfigurationParameters struct { + + // Port that your application listens to in the container. Defaults to "8080". + // +kubebuilder:validation:Optional + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // Secrets and parameters available to your service as environment variables. A map of key/value pairs, where the key is the desired name of the Secret in the environment (i.e. it does not have to match the name of the secret in Secrets Manager or SSM Parameter Store), and the value is the ARN of the secret from AWS Secrets Manager or the ARN of the parameter in AWS SSM Parameter Store. + // +kubebuilder:validation:Optional + // +mapType=granular + RuntimeEnvironmentSecrets map[string]*string `json:"runtimeEnvironmentSecrets,omitempty" tf:"runtime_environment_secrets,omitempty"` + + // Environment variables available to your running App Runner service. A map of key/value pairs. Keys with a prefix of AWSAPPRUNNER are reserved for system use and aren't valid. + // +kubebuilder:validation:Optional + // +mapType=granular + RuntimeEnvironmentVariables map[string]*string `json:"runtimeEnvironmentVariables,omitempty" tf:"runtime_environment_variables,omitempty"` + + // Command App Runner runs to start the application in the source image. If specified, this command overrides the Docker image’s default start command. + // +kubebuilder:validation:Optional + StartCommand *string `json:"startCommand,omitempty" tf:"start_command,omitempty"` +} + +type ImageRepositoryInitParameters struct { + + // Configuration for running the identified image. See Image Configuration below for more details. + ImageConfiguration *ImageConfigurationInitParameters `json:"imageConfiguration,omitempty" tf:"image_configuration,omitempty"` + + // Identifier of an image. For an image in Amazon Elastic Container Registry (Amazon ECR), this is an image name. For the + // image name format, see Pulling an image in the Amazon ECR User Guide. + ImageIdentifier *string `json:"imageIdentifier,omitempty" tf:"image_identifier,omitempty"` + + // Type of the image repository. This reflects the repository provider and whether the repository is private or public. Valid values: ECR , ECR_PUBLIC. + ImageRepositoryType *string `json:"imageRepositoryType,omitempty" tf:"image_repository_type,omitempty"` +} + +type ImageRepositoryObservation struct { + + // Configuration for running the identified image. See Image Configuration below for more details. + ImageConfiguration *ImageConfigurationObservation `json:"imageConfiguration,omitempty" tf:"image_configuration,omitempty"` + + // Identifier of an image. For an image in Amazon Elastic Container Registry (Amazon ECR), this is an image name. For the + // image name format, see Pulling an image in the Amazon ECR User Guide. + ImageIdentifier *string `json:"imageIdentifier,omitempty" tf:"image_identifier,omitempty"` + + // Type of the image repository. This reflects the repository provider and whether the repository is private or public. Valid values: ECR , ECR_PUBLIC. + ImageRepositoryType *string `json:"imageRepositoryType,omitempty" tf:"image_repository_type,omitempty"` +} + +type ImageRepositoryParameters struct { + + // Configuration for running the identified image. See Image Configuration below for more details. + // +kubebuilder:validation:Optional + ImageConfiguration *ImageConfigurationParameters `json:"imageConfiguration,omitempty" tf:"image_configuration,omitempty"` + + // Identifier of an image. For an image in Amazon Elastic Container Registry (Amazon ECR), this is an image name. For the + // image name format, see Pulling an image in the Amazon ECR User Guide. + // +kubebuilder:validation:Optional + ImageIdentifier *string `json:"imageIdentifier" tf:"image_identifier,omitempty"` + + // Type of the image repository. This reflects the repository provider and whether the repository is private or public. Valid values: ECR , ECR_PUBLIC. + // +kubebuilder:validation:Optional + ImageRepositoryType *string `json:"imageRepositoryType" tf:"image_repository_type,omitempty"` +} + +type IngressConfigurationInitParameters struct { + + // Specifies whether your App Runner service is publicly accessible. To make the service publicly accessible set it to True. To make the service privately accessible, from only within an Amazon VPC set it to False. + IsPubliclyAccessible *bool `json:"isPubliclyAccessible,omitempty" tf:"is_publicly_accessible,omitempty"` +} + +type IngressConfigurationObservation struct { + + // Specifies whether your App Runner service is publicly accessible. To make the service publicly accessible set it to True. To make the service privately accessible, from only within an Amazon VPC set it to False. + IsPubliclyAccessible *bool `json:"isPubliclyAccessible,omitempty" tf:"is_publicly_accessible,omitempty"` +} + +type IngressConfigurationParameters struct { + + // Specifies whether your App Runner service is publicly accessible. To make the service publicly accessible set it to True. To make the service privately accessible, from only within an Amazon VPC set it to False. + // +kubebuilder:validation:Optional + IsPubliclyAccessible *bool `json:"isPubliclyAccessible,omitempty" tf:"is_publicly_accessible,omitempty"` +} + +type InstanceConfigurationInitParameters struct { + + // Number of CPU units reserved for each instance of your App Runner service represented as a String. Defaults to 1024. Valid values: 256|512|1024|2048|4096|(0.25|0.5|1|2|4) vCPU. + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // ARN of an IAM role that provides permissions to your App Runner service. These are permissions that your code needs when it calls any AWS APIs. + InstanceRoleArn *string `json:"instanceRoleArn,omitempty" tf:"instance_role_arn,omitempty"` + + // Amount of memory, in MB or GB, reserved for each instance of your App Runner service. Defaults to 2048. Valid values: 512|1024|2048|3072|4096|6144|8192|10240|12288|(0.5|1|2|3|4|6|8|10|12) GB. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type InstanceConfigurationObservation struct { + + // Number of CPU units reserved for each instance of your App Runner service represented as a String. Defaults to 1024. Valid values: 256|512|1024|2048|4096|(0.25|0.5|1|2|4) vCPU. + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // ARN of an IAM role that provides permissions to your App Runner service. These are permissions that your code needs when it calls any AWS APIs. + InstanceRoleArn *string `json:"instanceRoleArn,omitempty" tf:"instance_role_arn,omitempty"` + + // Amount of memory, in MB or GB, reserved for each instance of your App Runner service. Defaults to 2048. Valid values: 512|1024|2048|3072|4096|6144|8192|10240|12288|(0.5|1|2|3|4|6|8|10|12) GB. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type InstanceConfigurationParameters struct { + + // Number of CPU units reserved for each instance of your App Runner service represented as a String. Defaults to 1024. Valid values: 256|512|1024|2048|4096|(0.25|0.5|1|2|4) vCPU. + // +kubebuilder:validation:Optional + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // ARN of an IAM role that provides permissions to your App Runner service. These are permissions that your code needs when it calls any AWS APIs. + // +kubebuilder:validation:Optional + InstanceRoleArn *string `json:"instanceRoleArn,omitempty" tf:"instance_role_arn,omitempty"` + + // Amount of memory, in MB or GB, reserved for each instance of your App Runner service. Defaults to 2048. Valid values: 512|1024|2048|3072|4096|6144|8192|10240|12288|(0.5|1|2|3|4|6|8|10|12) GB. + // +kubebuilder:validation:Optional + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type NetworkConfigurationInitParameters struct { + + // Network configuration settings for outbound message traffic. See Egress Configuration below for more details. + EgressConfiguration *EgressConfigurationInitParameters `json:"egressConfiguration,omitempty" tf:"egress_configuration,omitempty"` + + // App Runner provides you with the option to choose between Internet Protocol version 4 (IPv4) and dual stack (IPv4 and IPv6) for your incoming public network configuration. Valid values: IPV4, DUAL_STACK. Default: IPV4. + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // Network configuration settings for inbound network traffic. See Ingress Configuration below for more details. + IngressConfiguration *IngressConfigurationInitParameters `json:"ingressConfiguration,omitempty" tf:"ingress_configuration,omitempty"` +} + +type NetworkConfigurationObservation struct { + + // Network configuration settings for outbound message traffic. See Egress Configuration below for more details. + EgressConfiguration *EgressConfigurationObservation `json:"egressConfiguration,omitempty" tf:"egress_configuration,omitempty"` + + // App Runner provides you with the option to choose between Internet Protocol version 4 (IPv4) and dual stack (IPv4 and IPv6) for your incoming public network configuration. Valid values: IPV4, DUAL_STACK. Default: IPV4. + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // Network configuration settings for inbound network traffic. See Ingress Configuration below for more details. + IngressConfiguration *IngressConfigurationObservation `json:"ingressConfiguration,omitempty" tf:"ingress_configuration,omitempty"` +} + +type NetworkConfigurationParameters struct { + + // Network configuration settings for outbound message traffic. See Egress Configuration below for more details. + // +kubebuilder:validation:Optional + EgressConfiguration *EgressConfigurationParameters `json:"egressConfiguration,omitempty" tf:"egress_configuration,omitempty"` + + // App Runner provides you with the option to choose between Internet Protocol version 4 (IPv4) and dual stack (IPv4 and IPv6) for your incoming public network configuration. Valid values: IPV4, DUAL_STACK. Default: IPV4. + // +kubebuilder:validation:Optional + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // Network configuration settings for inbound network traffic. See Ingress Configuration below for more details. + // +kubebuilder:validation:Optional + IngressConfiguration *IngressConfigurationParameters `json:"ingressConfiguration,omitempty" tf:"ingress_configuration,omitempty"` +} + +type ServiceInitParameters struct { + + // ARN of an App Runner automatic scaling configuration resource that you want to associate with your service. If not provided, App Runner associates the latest revision of a default auto scaling configuration. + AutoScalingConfigurationArn *string `json:"autoScalingConfigurationArn,omitempty" tf:"auto_scaling_configuration_arn,omitempty"` + + // (Forces new resource) An optional custom encryption key that App Runner uses to encrypt the copy of your source repository that it maintains and your service logs. By default, App Runner uses an AWS managed CMK. See Encryption Configuration below for more details. + EncryptionConfiguration *EncryptionConfigurationInitParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // Settings of the health check that AWS App Runner performs to monitor the health of your service. See Health Check Configuration below for more details. + HealthCheckConfiguration *HealthCheckConfigurationInitParameters `json:"healthCheckConfiguration,omitempty" tf:"health_check_configuration,omitempty"` + + // The runtime configuration of instances (scaling units) of the App Runner service. See Instance Configuration below for more details. + InstanceConfiguration *InstanceConfigurationInitParameters `json:"instanceConfiguration,omitempty" tf:"instance_configuration,omitempty"` + + // Configuration settings related to network traffic of the web application that the App Runner service runs. See Network Configuration below for more details. + NetworkConfiguration *NetworkConfigurationInitParameters `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // The observability configuration of your service. See Observability Configuration below for more details. + ObservabilityConfiguration *ServiceObservabilityConfigurationInitParameters `json:"observabilityConfiguration,omitempty" tf:"observability_configuration,omitempty"` + + // (Forces new resource) Name of the service. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // The source to deploy to the App Runner service. Can be a code or an image repository. See Source Configuration below for more details. + SourceConfiguration *SourceConfigurationInitParameters `json:"sourceConfiguration,omitempty" tf:"source_configuration,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ServiceObservabilityConfigurationInitParameters struct { + + // ARN of the observability configuration that is associated with the service. Specified only when observability_enabled is true. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apprunner/v1beta2.ObservabilityConfiguration + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + ObservabilityConfigurationArn *string `json:"observabilityConfigurationArn,omitempty" tf:"observability_configuration_arn,omitempty"` + + // Reference to a ObservabilityConfiguration in apprunner to populate observabilityConfigurationArn. + // +kubebuilder:validation:Optional + ObservabilityConfigurationArnRef *v1.Reference `json:"observabilityConfigurationArnRef,omitempty" tf:"-"` + + // Selector for a ObservabilityConfiguration in apprunner to populate observabilityConfigurationArn. + // +kubebuilder:validation:Optional + ObservabilityConfigurationArnSelector *v1.Selector `json:"observabilityConfigurationArnSelector,omitempty" tf:"-"` + + // When true, an observability configuration resource is associated with the service. + ObservabilityEnabled *bool `json:"observabilityEnabled,omitempty" tf:"observability_enabled,omitempty"` +} + +type ServiceObservabilityConfigurationObservation struct { + + // ARN of the observability configuration that is associated with the service. Specified only when observability_enabled is true. + ObservabilityConfigurationArn *string `json:"observabilityConfigurationArn,omitempty" tf:"observability_configuration_arn,omitempty"` + + // When true, an observability configuration resource is associated with the service. + ObservabilityEnabled *bool `json:"observabilityEnabled,omitempty" tf:"observability_enabled,omitempty"` +} + +type ServiceObservabilityConfigurationParameters struct { + + // ARN of the observability configuration that is associated with the service. Specified only when observability_enabled is true. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/apprunner/v1beta2.ObservabilityConfiguration + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + ObservabilityConfigurationArn *string `json:"observabilityConfigurationArn,omitempty" tf:"observability_configuration_arn,omitempty"` + + // Reference to a ObservabilityConfiguration in apprunner to populate observabilityConfigurationArn. + // +kubebuilder:validation:Optional + ObservabilityConfigurationArnRef *v1.Reference `json:"observabilityConfigurationArnRef,omitempty" tf:"-"` + + // Selector for a ObservabilityConfiguration in apprunner to populate observabilityConfigurationArn. + // +kubebuilder:validation:Optional + ObservabilityConfigurationArnSelector *v1.Selector `json:"observabilityConfigurationArnSelector,omitempty" tf:"-"` + + // When true, an observability configuration resource is associated with the service. + // +kubebuilder:validation:Optional + ObservabilityEnabled *bool `json:"observabilityEnabled" tf:"observability_enabled,omitempty"` +} + +type ServiceObservation struct { + + // ARN of the App Runner service. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // ARN of an App Runner automatic scaling configuration resource that you want to associate with your service. If not provided, App Runner associates the latest revision of a default auto scaling configuration. + AutoScalingConfigurationArn *string `json:"autoScalingConfigurationArn,omitempty" tf:"auto_scaling_configuration_arn,omitempty"` + + // (Forces new resource) An optional custom encryption key that App Runner uses to encrypt the copy of your source repository that it maintains and your service logs. By default, App Runner uses an AWS managed CMK. See Encryption Configuration below for more details. + EncryptionConfiguration *EncryptionConfigurationObservation `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // Settings of the health check that AWS App Runner performs to monitor the health of your service. See Health Check Configuration below for more details. + HealthCheckConfiguration *HealthCheckConfigurationObservation `json:"healthCheckConfiguration,omitempty" tf:"health_check_configuration,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The runtime configuration of instances (scaling units) of the App Runner service. See Instance Configuration below for more details. + InstanceConfiguration *InstanceConfigurationObservation `json:"instanceConfiguration,omitempty" tf:"instance_configuration,omitempty"` + + // Configuration settings related to network traffic of the web application that the App Runner service runs. See Network Configuration below for more details. + NetworkConfiguration *NetworkConfigurationObservation `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // The observability configuration of your service. See Observability Configuration below for more details. + ObservabilityConfiguration *ServiceObservabilityConfigurationObservation `json:"observabilityConfiguration,omitempty" tf:"observability_configuration,omitempty"` + + // An alphanumeric ID that App Runner generated for this service. Unique within the AWS Region. + ServiceID *string `json:"serviceId,omitempty" tf:"service_id,omitempty"` + + // (Forces new resource) Name of the service. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Subdomain URL that App Runner generated for this service. You can use this URL to access your service web application. + ServiceURL *string `json:"serviceUrl,omitempty" tf:"service_url,omitempty"` + + // The source to deploy to the App Runner service. Can be a code or an image repository. See Source Configuration below for more details. + SourceConfiguration *SourceConfigurationObservation `json:"sourceConfiguration,omitempty" tf:"source_configuration,omitempty"` + + // Current state of the App Runner service. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type ServiceParameters struct { + + // ARN of an App Runner automatic scaling configuration resource that you want to associate with your service. If not provided, App Runner associates the latest revision of a default auto scaling configuration. + // +kubebuilder:validation:Optional + AutoScalingConfigurationArn *string `json:"autoScalingConfigurationArn,omitempty" tf:"auto_scaling_configuration_arn,omitempty"` + + // (Forces new resource) An optional custom encryption key that App Runner uses to encrypt the copy of your source repository that it maintains and your service logs. By default, App Runner uses an AWS managed CMK. See Encryption Configuration below for more details. + // +kubebuilder:validation:Optional + EncryptionConfiguration *EncryptionConfigurationParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // Settings of the health check that AWS App Runner performs to monitor the health of your service. See Health Check Configuration below for more details. + // +kubebuilder:validation:Optional + HealthCheckConfiguration *HealthCheckConfigurationParameters `json:"healthCheckConfiguration,omitempty" tf:"health_check_configuration,omitempty"` + + // The runtime configuration of instances (scaling units) of the App Runner service. See Instance Configuration below for more details. + // +kubebuilder:validation:Optional + InstanceConfiguration *InstanceConfigurationParameters `json:"instanceConfiguration,omitempty" tf:"instance_configuration,omitempty"` + + // Configuration settings related to network traffic of the web application that the App Runner service runs. See Network Configuration below for more details. + // +kubebuilder:validation:Optional + NetworkConfiguration *NetworkConfigurationParameters `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // The observability configuration of your service. See Observability Configuration below for more details. + // +kubebuilder:validation:Optional + ObservabilityConfiguration *ServiceObservabilityConfigurationParameters `json:"observabilityConfiguration,omitempty" tf:"observability_configuration,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // (Forces new resource) Name of the service. + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // The source to deploy to the App Runner service. Can be a code or an image repository. See Source Configuration below for more details. + // +kubebuilder:validation:Optional + SourceConfiguration *SourceConfigurationParameters `json:"sourceConfiguration,omitempty" tf:"source_configuration,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SourceCodeVersionInitParameters struct { + + // Type of version identifier. For a git-based repository, branches represent versions. Valid values: BRANCH. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Source code version. For a git-based repository, a branch name maps to a specific version. App Runner uses the most recent commit to the branch. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type SourceCodeVersionObservation struct { + + // Type of version identifier. For a git-based repository, branches represent versions. Valid values: BRANCH. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Source code version. For a git-based repository, a branch name maps to a specific version. App Runner uses the most recent commit to the branch. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type SourceCodeVersionParameters struct { + + // Type of version identifier. For a git-based repository, branches represent versions. Valid values: BRANCH. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // Source code version. For a git-based repository, a branch name maps to a specific version. App Runner uses the most recent commit to the branch. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type SourceConfigurationInitParameters struct { + + // Describes resources needed to authenticate access to some source repositories. See Authentication Configuration below for more details. + AuthenticationConfiguration *AuthenticationConfigurationInitParameters `json:"authenticationConfiguration,omitempty" tf:"authentication_configuration,omitempty"` + + // Whether continuous integration from the source repository is enabled for the App Runner service. If set to true, each repository change (source code commit or new image version) starts a deployment. Defaults to true. + AutoDeploymentsEnabled *bool `json:"autoDeploymentsEnabled,omitempty" tf:"auto_deployments_enabled,omitempty"` + + // Description of a source code repository. See Code Repository below for more details. + CodeRepository *CodeRepositoryInitParameters `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // Description of a source image repository. See Image Repository below for more details. + ImageRepository *ImageRepositoryInitParameters `json:"imageRepository,omitempty" tf:"image_repository,omitempty"` +} + +type SourceConfigurationObservation struct { + + // Describes resources needed to authenticate access to some source repositories. See Authentication Configuration below for more details. + AuthenticationConfiguration *AuthenticationConfigurationObservation `json:"authenticationConfiguration,omitempty" tf:"authentication_configuration,omitempty"` + + // Whether continuous integration from the source repository is enabled for the App Runner service. If set to true, each repository change (source code commit or new image version) starts a deployment. Defaults to true. + AutoDeploymentsEnabled *bool `json:"autoDeploymentsEnabled,omitempty" tf:"auto_deployments_enabled,omitempty"` + + // Description of a source code repository. See Code Repository below for more details. + CodeRepository *CodeRepositoryObservation `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // Description of a source image repository. See Image Repository below for more details. + ImageRepository *ImageRepositoryObservation `json:"imageRepository,omitempty" tf:"image_repository,omitempty"` +} + +type SourceConfigurationParameters struct { + + // Describes resources needed to authenticate access to some source repositories. See Authentication Configuration below for more details. + // +kubebuilder:validation:Optional + AuthenticationConfiguration *AuthenticationConfigurationParameters `json:"authenticationConfiguration,omitempty" tf:"authentication_configuration,omitempty"` + + // Whether continuous integration from the source repository is enabled for the App Runner service. If set to true, each repository change (source code commit or new image version) starts a deployment. Defaults to true. + // +kubebuilder:validation:Optional + AutoDeploymentsEnabled *bool `json:"autoDeploymentsEnabled,omitempty" tf:"auto_deployments_enabled,omitempty"` + + // Description of a source code repository. See Code Repository below for more details. + // +kubebuilder:validation:Optional + CodeRepository *CodeRepositoryParameters `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // Description of a source image repository. See Image Repository below for more details. + // +kubebuilder:validation:Optional + ImageRepository *ImageRepositoryParameters `json:"imageRepository,omitempty" tf:"image_repository,omitempty"` +} + +// ServiceSpec defines the desired state of Service +type ServiceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServiceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServiceInitParameters `json:"initProvider,omitempty"` +} + +// ServiceStatus defines the observed state of Service. +type ServiceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServiceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Service is the Schema for the Services API. Manages an App Runner Service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Service struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceName) || (has(self.initProvider) && has(self.initProvider.serviceName))",message="spec.forProvider.serviceName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sourceConfiguration) || (has(self.initProvider) && has(self.initProvider.sourceConfiguration))",message="spec.forProvider.sourceConfiguration is a required parameter" + Spec ServiceSpec `json:"spec"` + Status ServiceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServiceList contains a list of Services +type ServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Service `json:"items"` +} + +// Repository type metadata. +var ( + Service_Kind = "Service" + Service_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Service_Kind}.String() + Service_KindAPIVersion = Service_Kind + "." + CRDGroupVersion.String() + Service_GroupVersionKind = CRDGroupVersion.WithKind(Service_Kind) +) + +func init() { + SchemeBuilder.Register(&Service{}, &ServiceList{}) +} diff --git a/apis/appstream/v1beta1/zz_fleetstackassociation_types.go b/apis/appstream/v1beta1/zz_fleetstackassociation_types.go index 5ad7e32f43..a28d23d66d 100755 --- a/apis/appstream/v1beta1/zz_fleetstackassociation_types.go +++ b/apis/appstream/v1beta1/zz_fleetstackassociation_types.go @@ -31,7 +31,7 @@ type FleetStackAssociationObservation struct { type FleetStackAssociationParameters struct { // Name of the fleet. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appstream/v1beta1.Fleet + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appstream/v1beta2.Fleet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) // +kubebuilder:validation:Optional FleetName *string `json:"fleetName,omitempty" tf:"fleet_name,omitempty"` @@ -50,7 +50,7 @@ type FleetStackAssociationParameters struct { Region *string `json:"region" tf:"-"` // Name of the stack. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appstream/v1beta1.Stack + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appstream/v1beta2.Stack // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) // +kubebuilder:validation:Optional StackName *string `json:"stackName,omitempty" tf:"stack_name,omitempty"` diff --git a/apis/appstream/v1beta1/zz_generated.conversion_hubs.go b/apis/appstream/v1beta1/zz_generated.conversion_hubs.go index 186fa94130..7009a0c5b7 100755 --- a/apis/appstream/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/appstream/v1beta1/zz_generated.conversion_hubs.go @@ -6,21 +6,9 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *DirectoryConfig) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Fleet) Hub() {} - // Hub marks this type as a conversion hub. func (tr *FleetStackAssociation) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ImageBuilder) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Stack) Hub() {} - // Hub marks this type as a conversion hub. func (tr *User) Hub() {} diff --git a/apis/appstream/v1beta1/zz_generated.conversion_spokes.go b/apis/appstream/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..84db49e8fb --- /dev/null +++ b/apis/appstream/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,94 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this DirectoryConfig to the hub type. +func (tr *DirectoryConfig) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DirectoryConfig type. +func (tr *DirectoryConfig) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Fleet to the hub type. +func (tr *Fleet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Fleet type. +func (tr *Fleet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ImageBuilder to the hub type. +func (tr *ImageBuilder) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ImageBuilder type. +func (tr *ImageBuilder) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Stack to the hub type. +func (tr *Stack) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Stack type. +func (tr *Stack) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/appstream/v1beta1/zz_generated.resolvers.go b/apis/appstream/v1beta1/zz_generated.resolvers.go index 3315b4ba58..d1d70515c1 100644 --- a/apis/appstream/v1beta1/zz_generated.resolvers.go +++ b/apis/appstream/v1beta1/zz_generated.resolvers.go @@ -122,7 +122,7 @@ func (mg *FleetStackAssociation) ResolveReferences(ctx context.Context, c client var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("appstream.aws.upbound.io", "v1beta1", "Fleet", "FleetList") + m, l, err = apisresolver.GetManagedResource("appstream.aws.upbound.io", "v1beta2", "Fleet", "FleetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -141,7 +141,7 @@ func (mg *FleetStackAssociation) ResolveReferences(ctx context.Context, c client mg.Spec.ForProvider.FleetName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.FleetNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("appstream.aws.upbound.io", "v1beta1", "Stack", "StackList") + m, l, err = apisresolver.GetManagedResource("appstream.aws.upbound.io", "v1beta2", "Stack", "StackList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -285,7 +285,7 @@ func (mg *UserStackAssociation) ResolveReferences(ctx context.Context, c client. mg.Spec.ForProvider.AuthenticationType = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.AuthenticationTypeRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("appstream.aws.upbound.io", "v1beta1", "Stack", "StackList") + m, l, err = apisresolver.GetManagedResource("appstream.aws.upbound.io", "v1beta2", "Stack", "StackList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/appstream/v1beta1/zz_userstackassociation_types.go b/apis/appstream/v1beta1/zz_userstackassociation_types.go index 5e7b352dbd..7d7fa89c50 100755 --- a/apis/appstream/v1beta1/zz_userstackassociation_types.go +++ b/apis/appstream/v1beta1/zz_userstackassociation_types.go @@ -63,7 +63,7 @@ type UserStackAssociationParameters struct { SendEmailNotification *bool `json:"sendEmailNotification,omitempty" tf:"send_email_notification,omitempty"` // Name of the stack that is associated with the user. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appstream/v1beta1.Stack + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appstream/v1beta2.Stack // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) // +kubebuilder:validation:Optional StackName *string `json:"stackName,omitempty" tf:"stack_name,omitempty"` diff --git a/apis/appstream/v1beta2/zz_directoryconfig_terraformed.go b/apis/appstream/v1beta2/zz_directoryconfig_terraformed.go new file mode 100755 index 0000000000..02da7afe50 --- /dev/null +++ b/apis/appstream/v1beta2/zz_directoryconfig_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DirectoryConfig +func (mg *DirectoryConfig) GetTerraformResourceType() string { + return "aws_appstream_directory_config" +} + +// GetConnectionDetailsMapping for this DirectoryConfig +func (tr *DirectoryConfig) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"service_account_credentials[*].account_password": "serviceAccountCredentials[*].accountPasswordSecretRef"} +} + +// GetObservation of this DirectoryConfig +func (tr *DirectoryConfig) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DirectoryConfig +func (tr *DirectoryConfig) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DirectoryConfig +func (tr *DirectoryConfig) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DirectoryConfig +func (tr *DirectoryConfig) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DirectoryConfig +func (tr *DirectoryConfig) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DirectoryConfig +func (tr *DirectoryConfig) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DirectoryConfig +func (tr *DirectoryConfig) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DirectoryConfig using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DirectoryConfig) LateInitialize(attrs []byte) (bool, error) { + params := &DirectoryConfigParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DirectoryConfig) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appstream/v1beta2/zz_directoryconfig_types.go b/apis/appstream/v1beta2/zz_directoryconfig_types.go new file mode 100755 index 0000000000..5e7a3b9c6f --- /dev/null +++ b/apis/appstream/v1beta2/zz_directoryconfig_types.go @@ -0,0 +1,156 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DirectoryConfigInitParameters struct { + + // Fully qualified name of the directory. + DirectoryName *string `json:"directoryName,omitempty" tf:"directory_name,omitempty"` + + // Distinguished names of the organizational units for computer accounts. + // +listType=set + OrganizationalUnitDistinguishedNames []*string `json:"organizationalUnitDistinguishedNames,omitempty" tf:"organizational_unit_distinguished_names,omitempty"` + + // Configuration block for the name of the directory and organizational unit (OU) to use to join the directory config to a Microsoft Active Directory domain. See service_account_credentials below. + ServiceAccountCredentials *ServiceAccountCredentialsInitParameters `json:"serviceAccountCredentials,omitempty" tf:"service_account_credentials,omitempty"` +} + +type DirectoryConfigObservation struct { + + // Date and time, in UTC and extended RFC 3339 format, when the directory config was created. + CreatedTime *string `json:"createdTime,omitempty" tf:"created_time,omitempty"` + + // Fully qualified name of the directory. + DirectoryName *string `json:"directoryName,omitempty" tf:"directory_name,omitempty"` + + // Unique identifier (ID) of the appstream directory config. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Distinguished names of the organizational units for computer accounts. + // +listType=set + OrganizationalUnitDistinguishedNames []*string `json:"organizationalUnitDistinguishedNames,omitempty" tf:"organizational_unit_distinguished_names,omitempty"` + + // Configuration block for the name of the directory and organizational unit (OU) to use to join the directory config to a Microsoft Active Directory domain. See service_account_credentials below. + ServiceAccountCredentials *ServiceAccountCredentialsObservation `json:"serviceAccountCredentials,omitempty" tf:"service_account_credentials,omitempty"` +} + +type DirectoryConfigParameters struct { + + // Fully qualified name of the directory. + // +kubebuilder:validation:Optional + DirectoryName *string `json:"directoryName,omitempty" tf:"directory_name,omitempty"` + + // Distinguished names of the organizational units for computer accounts. + // +kubebuilder:validation:Optional + // +listType=set + OrganizationalUnitDistinguishedNames []*string `json:"organizationalUnitDistinguishedNames,omitempty" tf:"organizational_unit_distinguished_names,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration block for the name of the directory and organizational unit (OU) to use to join the directory config to a Microsoft Active Directory domain. See service_account_credentials below. + // +kubebuilder:validation:Optional + ServiceAccountCredentials *ServiceAccountCredentialsParameters `json:"serviceAccountCredentials,omitempty" tf:"service_account_credentials,omitempty"` +} + +type ServiceAccountCredentialsInitParameters struct { + + // User name of the account. This account must have the following privileges: create computer objects, join computers to the domain, and change/reset the password on descendant computer objects for the organizational units specified. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Password for the account. + AccountPasswordSecretRef v1.SecretKeySelector `json:"accountPasswordSecretRef" tf:"-"` +} + +type ServiceAccountCredentialsObservation struct { + + // User name of the account. This account must have the following privileges: create computer objects, join computers to the domain, and change/reset the password on descendant computer objects for the organizational units specified. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` +} + +type ServiceAccountCredentialsParameters struct { + + // User name of the account. This account must have the following privileges: create computer objects, join computers to the domain, and change/reset the password on descendant computer objects for the organizational units specified. + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName" tf:"account_name,omitempty"` + + // Password for the account. + // +kubebuilder:validation:Optional + AccountPasswordSecretRef v1.SecretKeySelector `json:"accountPasswordSecretRef" tf:"-"` +} + +// DirectoryConfigSpec defines the desired state of DirectoryConfig +type DirectoryConfigSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DirectoryConfigParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DirectoryConfigInitParameters `json:"initProvider,omitempty"` +} + +// DirectoryConfigStatus defines the observed state of DirectoryConfig. +type DirectoryConfigStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DirectoryConfigObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DirectoryConfig is the Schema for the DirectoryConfigs API. Provides an AppStream Directory Config +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type DirectoryConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.directoryName) || (has(self.initProvider) && has(self.initProvider.directoryName))",message="spec.forProvider.directoryName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.organizationalUnitDistinguishedNames) || (has(self.initProvider) && has(self.initProvider.organizationalUnitDistinguishedNames))",message="spec.forProvider.organizationalUnitDistinguishedNames is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceAccountCredentials) || (has(self.initProvider) && has(self.initProvider.serviceAccountCredentials))",message="spec.forProvider.serviceAccountCredentials is a required parameter" + Spec DirectoryConfigSpec `json:"spec"` + Status DirectoryConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DirectoryConfigList contains a list of DirectoryConfigs +type DirectoryConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DirectoryConfig `json:"items"` +} + +// Repository type metadata. +var ( + DirectoryConfig_Kind = "DirectoryConfig" + DirectoryConfig_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DirectoryConfig_Kind}.String() + DirectoryConfig_KindAPIVersion = DirectoryConfig_Kind + "." + CRDGroupVersion.String() + DirectoryConfig_GroupVersionKind = CRDGroupVersion.WithKind(DirectoryConfig_Kind) +) + +func init() { + SchemeBuilder.Register(&DirectoryConfig{}, &DirectoryConfigList{}) +} diff --git a/apis/appstream/v1beta2/zz_fleet_terraformed.go b/apis/appstream/v1beta2/zz_fleet_terraformed.go new file mode 100755 index 0000000000..806e4cda97 --- /dev/null +++ b/apis/appstream/v1beta2/zz_fleet_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Fleet +func (mg *Fleet) GetTerraformResourceType() string { + return "aws_appstream_fleet" +} + +// GetConnectionDetailsMapping for this Fleet +func (tr *Fleet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Fleet +func (tr *Fleet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Fleet +func (tr *Fleet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Fleet +func (tr *Fleet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Fleet +func (tr *Fleet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Fleet +func (tr *Fleet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Fleet +func (tr *Fleet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Fleet +func (tr *Fleet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Fleet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Fleet) LateInitialize(attrs []byte) (bool, error) { + params := &FleetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Fleet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appstream/v1beta2/zz_fleet_types.go b/apis/appstream/v1beta2/zz_fleet_types.go new file mode 100755 index 0000000000..cb20f4382b --- /dev/null +++ b/apis/appstream/v1beta2/zz_fleet_types.go @@ -0,0 +1,427 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ComputeCapacityInitParameters struct { + + // Desired number of streaming instances. + DesiredInstances *float64 `json:"desiredInstances,omitempty" tf:"desired_instances,omitempty"` + + // Desired number of user sessions for a multi-session fleet. This is not allowed for single-session fleets. + DesiredSessions *float64 `json:"desiredSessions,omitempty" tf:"desired_sessions,omitempty"` +} + +type ComputeCapacityObservation struct { + + // Number of currently available instances that can be used to stream sessions. + Available *float64 `json:"available,omitempty" tf:"available,omitempty"` + + // Desired number of streaming instances. + DesiredInstances *float64 `json:"desiredInstances,omitempty" tf:"desired_instances,omitempty"` + + // Desired number of user sessions for a multi-session fleet. This is not allowed for single-session fleets. + DesiredSessions *float64 `json:"desiredSessions,omitempty" tf:"desired_sessions,omitempty"` + + // Number of instances in use for streaming. + InUse *float64 `json:"inUse,omitempty" tf:"in_use,omitempty"` + + // Total number of simultaneous streaming instances that are running. + Running *float64 `json:"running,omitempty" tf:"running,omitempty"` +} + +type ComputeCapacityParameters struct { + + // Desired number of streaming instances. + // +kubebuilder:validation:Optional + DesiredInstances *float64 `json:"desiredInstances,omitempty" tf:"desired_instances,omitempty"` + + // Desired number of user sessions for a multi-session fleet. This is not allowed for single-session fleets. + // +kubebuilder:validation:Optional + DesiredSessions *float64 `json:"desiredSessions,omitempty" tf:"desired_sessions,omitempty"` +} + +type DomainJoinInfoInitParameters struct { + + // Fully qualified name of the directory (for example, corp.example.com). + DirectoryName *string `json:"directoryName,omitempty" tf:"directory_name,omitempty"` + + // Distinguished name of the organizational unit for computer accounts. + OrganizationalUnitDistinguishedName *string `json:"organizationalUnitDistinguishedName,omitempty" tf:"organizational_unit_distinguished_name,omitempty"` +} + +type DomainJoinInfoObservation struct { + + // Fully qualified name of the directory (for example, corp.example.com). + DirectoryName *string `json:"directoryName,omitempty" tf:"directory_name,omitempty"` + + // Distinguished name of the organizational unit for computer accounts. + OrganizationalUnitDistinguishedName *string `json:"organizationalUnitDistinguishedName,omitempty" tf:"organizational_unit_distinguished_name,omitempty"` +} + +type DomainJoinInfoParameters struct { + + // Fully qualified name of the directory (for example, corp.example.com). + // +kubebuilder:validation:Optional + DirectoryName *string `json:"directoryName,omitempty" tf:"directory_name,omitempty"` + + // Distinguished name of the organizational unit for computer accounts. + // +kubebuilder:validation:Optional + OrganizationalUnitDistinguishedName *string `json:"organizationalUnitDistinguishedName,omitempty" tf:"organizational_unit_distinguished_name,omitempty"` +} + +type FleetInitParameters struct { + + // Configuration block for the desired capacity of the fleet. See below. + ComputeCapacity *ComputeCapacityInitParameters `json:"computeCapacity,omitempty" tf:"compute_capacity,omitempty"` + + // Description to display. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Amount of time that a streaming session remains active after users disconnect. + DisconnectTimeoutInSeconds *float64 `json:"disconnectTimeoutInSeconds,omitempty" tf:"disconnect_timeout_in_seconds,omitempty"` + + // Human-readable friendly name for the AppStream fleet. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below. + DomainJoinInfo *DomainJoinInfoInitParameters `json:"domainJoinInfo,omitempty" tf:"domain_join_info,omitempty"` + + // Enables or disables default internet access for the fleet. + EnableDefaultInternetAccess *bool `json:"enableDefaultInternetAccess,omitempty" tf:"enable_default_internet_access,omitempty"` + + // Fleet type. Valid values are: ON_DEMAND, ALWAYS_ON + FleetType *string `json:"fleetType,omitempty" tf:"fleet_type,omitempty"` + + // ARN of the IAM role to apply to the fleet. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + IAMRoleArn *string `json:"iamRoleArn,omitempty" tf:"iam_role_arn,omitempty"` + + // Reference to a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnRef *v1.Reference `json:"iamRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnSelector *v1.Selector `json:"iamRoleArnSelector,omitempty" tf:"-"` + + // Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the disconnect_timeout_in_seconds time interval begins. Defaults to 60 seconds. + IdleDisconnectTimeoutInSeconds *float64 `json:"idleDisconnectTimeoutInSeconds,omitempty" tf:"idle_disconnect_timeout_in_seconds,omitempty"` + + // ARN of the public, private, or shared image to use. + ImageArn *string `json:"imageArn,omitempty" tf:"image_arn,omitempty"` + + // Name of the image used to create the fleet. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // Instance type to use when launching fleet instances. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The maximum number of user sessions on an instance. This only applies to multi-session fleets. + MaxSessionsPerInstance *float64 `json:"maxSessionsPerInstance,omitempty" tf:"max_sessions_per_instance,omitempty"` + + // Maximum amount of time that a streaming session can remain active, in seconds. + MaxUserDurationInSeconds *float64 `json:"maxUserDurationInSeconds,omitempty" tf:"max_user_duration_in_seconds,omitempty"` + + // Unique name for the fleet. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // AppStream 2.0 view that is displayed to your users when they stream from the fleet. When APP is specified, only the windows of applications opened by users display. When DESKTOP is specified, the standard desktop that is provided by the operating system displays. If not specified, defaults to APP. + StreamView *string `json:"streamView,omitempty" tf:"stream_view,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for the VPC configuration for the image builder. See below. + VPCConfig *VPCConfigInitParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type FleetObservation struct { + + // ARN of the appstream fleet. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Configuration block for the desired capacity of the fleet. See below. + ComputeCapacity *ComputeCapacityObservation `json:"computeCapacity,omitempty" tf:"compute_capacity,omitempty"` + + // Date and time, in UTC and extended RFC 3339 format, when the fleet was created. + CreatedTime *string `json:"createdTime,omitempty" tf:"created_time,omitempty"` + + // Description to display. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Amount of time that a streaming session remains active after users disconnect. + DisconnectTimeoutInSeconds *float64 `json:"disconnectTimeoutInSeconds,omitempty" tf:"disconnect_timeout_in_seconds,omitempty"` + + // Human-readable friendly name for the AppStream fleet. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below. + DomainJoinInfo *DomainJoinInfoObservation `json:"domainJoinInfo,omitempty" tf:"domain_join_info,omitempty"` + + // Enables or disables default internet access for the fleet. + EnableDefaultInternetAccess *bool `json:"enableDefaultInternetAccess,omitempty" tf:"enable_default_internet_access,omitempty"` + + // Fleet type. Valid values are: ON_DEMAND, ALWAYS_ON + FleetType *string `json:"fleetType,omitempty" tf:"fleet_type,omitempty"` + + // ARN of the IAM role to apply to the fleet. + IAMRoleArn *string `json:"iamRoleArn,omitempty" tf:"iam_role_arn,omitempty"` + + // Unique identifier (ID) of the appstream fleet. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the disconnect_timeout_in_seconds time interval begins. Defaults to 60 seconds. + IdleDisconnectTimeoutInSeconds *float64 `json:"idleDisconnectTimeoutInSeconds,omitempty" tf:"idle_disconnect_timeout_in_seconds,omitempty"` + + // ARN of the public, private, or shared image to use. + ImageArn *string `json:"imageArn,omitempty" tf:"image_arn,omitempty"` + + // Name of the image used to create the fleet. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // Instance type to use when launching fleet instances. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The maximum number of user sessions on an instance. This only applies to multi-session fleets. + MaxSessionsPerInstance *float64 `json:"maxSessionsPerInstance,omitempty" tf:"max_sessions_per_instance,omitempty"` + + // Maximum amount of time that a streaming session can remain active, in seconds. + MaxUserDurationInSeconds *float64 `json:"maxUserDurationInSeconds,omitempty" tf:"max_user_duration_in_seconds,omitempty"` + + // Unique name for the fleet. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // State of the fleet. Can be STARTING, RUNNING, STOPPING or STOPPED + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // AppStream 2.0 view that is displayed to your users when they stream from the fleet. When APP is specified, only the windows of applications opened by users display. When DESKTOP is specified, the standard desktop that is provided by the operating system displays. If not specified, defaults to APP. + StreamView *string `json:"streamView,omitempty" tf:"stream_view,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Configuration block for the VPC configuration for the image builder. See below. + VPCConfig *VPCConfigObservation `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type FleetParameters struct { + + // Configuration block for the desired capacity of the fleet. See below. + // +kubebuilder:validation:Optional + ComputeCapacity *ComputeCapacityParameters `json:"computeCapacity,omitempty" tf:"compute_capacity,omitempty"` + + // Description to display. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Amount of time that a streaming session remains active after users disconnect. + // +kubebuilder:validation:Optional + DisconnectTimeoutInSeconds *float64 `json:"disconnectTimeoutInSeconds,omitempty" tf:"disconnect_timeout_in_seconds,omitempty"` + + // Human-readable friendly name for the AppStream fleet. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below. + // +kubebuilder:validation:Optional + DomainJoinInfo *DomainJoinInfoParameters `json:"domainJoinInfo,omitempty" tf:"domain_join_info,omitempty"` + + // Enables or disables default internet access for the fleet. + // +kubebuilder:validation:Optional + EnableDefaultInternetAccess *bool `json:"enableDefaultInternetAccess,omitempty" tf:"enable_default_internet_access,omitempty"` + + // Fleet type. Valid values are: ON_DEMAND, ALWAYS_ON + // +kubebuilder:validation:Optional + FleetType *string `json:"fleetType,omitempty" tf:"fleet_type,omitempty"` + + // ARN of the IAM role to apply to the fleet. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + IAMRoleArn *string `json:"iamRoleArn,omitempty" tf:"iam_role_arn,omitempty"` + + // Reference to a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnRef *v1.Reference `json:"iamRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnSelector *v1.Selector `json:"iamRoleArnSelector,omitempty" tf:"-"` + + // Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the disconnect_timeout_in_seconds time interval begins. Defaults to 60 seconds. + // +kubebuilder:validation:Optional + IdleDisconnectTimeoutInSeconds *float64 `json:"idleDisconnectTimeoutInSeconds,omitempty" tf:"idle_disconnect_timeout_in_seconds,omitempty"` + + // ARN of the public, private, or shared image to use. + // +kubebuilder:validation:Optional + ImageArn *string `json:"imageArn,omitempty" tf:"image_arn,omitempty"` + + // Name of the image used to create the fleet. + // +kubebuilder:validation:Optional + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // Instance type to use when launching fleet instances. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The maximum number of user sessions on an instance. This only applies to multi-session fleets. + // +kubebuilder:validation:Optional + MaxSessionsPerInstance *float64 `json:"maxSessionsPerInstance,omitempty" tf:"max_sessions_per_instance,omitempty"` + + // Maximum amount of time that a streaming session can remain active, in seconds. + // +kubebuilder:validation:Optional + MaxUserDurationInSeconds *float64 `json:"maxUserDurationInSeconds,omitempty" tf:"max_user_duration_in_seconds,omitempty"` + + // Unique name for the fleet. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // AppStream 2.0 view that is displayed to your users when they stream from the fleet. When APP is specified, only the windows of applications opened by users display. When DESKTOP is specified, the standard desktop that is provided by the operating system displays. If not specified, defaults to APP. + // +kubebuilder:validation:Optional + StreamView *string `json:"streamView,omitempty" tf:"stream_view,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for the VPC configuration for the image builder. See below. + // +kubebuilder:validation:Optional + VPCConfig *VPCConfigParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type VPCConfigInitParameters struct { + + // Identifiers of the security groups for the fleet or image builder. + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Identifiers of the subnets to which a network interface is attached from the fleet instance or image builder instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type VPCConfigObservation struct { + + // Identifiers of the security groups for the fleet or image builder. + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // Identifiers of the subnets to which a network interface is attached from the fleet instance or image builder instance. + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type VPCConfigParameters struct { + + // Identifiers of the security groups for the fleet or image builder. + // +kubebuilder:validation:Optional + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Identifiers of the subnets to which a network interface is attached from the fleet instance or image builder instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +kubebuilder:validation:Optional + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +// FleetSpec defines the desired state of Fleet +type FleetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FleetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FleetInitParameters `json:"initProvider,omitempty"` +} + +// FleetStatus defines the observed state of Fleet. +type FleetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FleetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Fleet is the Schema for the Fleets API. Provides an AppStream fleet +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws},path=fleet +type Fleet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.computeCapacity) || (has(self.initProvider) && has(self.initProvider.computeCapacity))",message="spec.forProvider.computeCapacity is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.instanceType) || (has(self.initProvider) && has(self.initProvider.instanceType))",message="spec.forProvider.instanceType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec FleetSpec `json:"spec"` + Status FleetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FleetList contains a list of Fleets +type FleetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Fleet `json:"items"` +} + +// Repository type metadata. +var ( + Fleet_Kind = "Fleet" + Fleet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Fleet_Kind}.String() + Fleet_KindAPIVersion = Fleet_Kind + "." + CRDGroupVersion.String() + Fleet_GroupVersionKind = CRDGroupVersion.WithKind(Fleet_Kind) +) + +func init() { + SchemeBuilder.Register(&Fleet{}, &FleetList{}) +} diff --git a/apis/appstream/v1beta2/zz_generated.conversion_hubs.go b/apis/appstream/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..a517dc5805 --- /dev/null +++ b/apis/appstream/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,19 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *DirectoryConfig) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Fleet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ImageBuilder) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Stack) Hub() {} diff --git a/apis/appstream/v1beta2/zz_generated.deepcopy.go b/apis/appstream/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..7465e61362 --- /dev/null +++ b/apis/appstream/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2607 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessEndpointInitParameters) DeepCopyInto(out *AccessEndpointInitParameters) { + *out = *in + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.VpceID != nil { + in, out := &in.VpceID, &out.VpceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessEndpointInitParameters. +func (in *AccessEndpointInitParameters) DeepCopy() *AccessEndpointInitParameters { + if in == nil { + return nil + } + out := new(AccessEndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessEndpointObservation) DeepCopyInto(out *AccessEndpointObservation) { + *out = *in + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.VpceID != nil { + in, out := &in.VpceID, &out.VpceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessEndpointObservation. +func (in *AccessEndpointObservation) DeepCopy() *AccessEndpointObservation { + if in == nil { + return nil + } + out := new(AccessEndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessEndpointParameters) DeepCopyInto(out *AccessEndpointParameters) { + *out = *in + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.VpceID != nil { + in, out := &in.VpceID, &out.VpceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessEndpointParameters. +func (in *AccessEndpointParameters) DeepCopy() *AccessEndpointParameters { + if in == nil { + return nil + } + out := new(AccessEndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessEndpointsInitParameters) DeepCopyInto(out *AccessEndpointsInitParameters) { + *out = *in + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.VpceID != nil { + in, out := &in.VpceID, &out.VpceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessEndpointsInitParameters. +func (in *AccessEndpointsInitParameters) DeepCopy() *AccessEndpointsInitParameters { + if in == nil { + return nil + } + out := new(AccessEndpointsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessEndpointsObservation) DeepCopyInto(out *AccessEndpointsObservation) { + *out = *in + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.VpceID != nil { + in, out := &in.VpceID, &out.VpceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessEndpointsObservation. +func (in *AccessEndpointsObservation) DeepCopy() *AccessEndpointsObservation { + if in == nil { + return nil + } + out := new(AccessEndpointsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessEndpointsParameters) DeepCopyInto(out *AccessEndpointsParameters) { + *out = *in + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.VpceID != nil { + in, out := &in.VpceID, &out.VpceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessEndpointsParameters. +func (in *AccessEndpointsParameters) DeepCopy() *AccessEndpointsParameters { + if in == nil { + return nil + } + out := new(AccessEndpointsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationSettingsInitParameters) DeepCopyInto(out *ApplicationSettingsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SettingsGroup != nil { + in, out := &in.SettingsGroup, &out.SettingsGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSettingsInitParameters. +func (in *ApplicationSettingsInitParameters) DeepCopy() *ApplicationSettingsInitParameters { + if in == nil { + return nil + } + out := new(ApplicationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationSettingsObservation) DeepCopyInto(out *ApplicationSettingsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SettingsGroup != nil { + in, out := &in.SettingsGroup, &out.SettingsGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSettingsObservation. +func (in *ApplicationSettingsObservation) DeepCopy() *ApplicationSettingsObservation { + if in == nil { + return nil + } + out := new(ApplicationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationSettingsParameters) DeepCopyInto(out *ApplicationSettingsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SettingsGroup != nil { + in, out := &in.SettingsGroup, &out.SettingsGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSettingsParameters. +func (in *ApplicationSettingsParameters) DeepCopy() *ApplicationSettingsParameters { + if in == nil { + return nil + } + out := new(ApplicationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeCapacityInitParameters) DeepCopyInto(out *ComputeCapacityInitParameters) { + *out = *in + if in.DesiredInstances != nil { + in, out := &in.DesiredInstances, &out.DesiredInstances + *out = new(float64) + **out = **in + } + if in.DesiredSessions != nil { + in, out := &in.DesiredSessions, &out.DesiredSessions + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeCapacityInitParameters. +func (in *ComputeCapacityInitParameters) DeepCopy() *ComputeCapacityInitParameters { + if in == nil { + return nil + } + out := new(ComputeCapacityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeCapacityObservation) DeepCopyInto(out *ComputeCapacityObservation) { + *out = *in + if in.Available != nil { + in, out := &in.Available, &out.Available + *out = new(float64) + **out = **in + } + if in.DesiredInstances != nil { + in, out := &in.DesiredInstances, &out.DesiredInstances + *out = new(float64) + **out = **in + } + if in.DesiredSessions != nil { + in, out := &in.DesiredSessions, &out.DesiredSessions + *out = new(float64) + **out = **in + } + if in.InUse != nil { + in, out := &in.InUse, &out.InUse + *out = new(float64) + **out = **in + } + if in.Running != nil { + in, out := &in.Running, &out.Running + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeCapacityObservation. +func (in *ComputeCapacityObservation) DeepCopy() *ComputeCapacityObservation { + if in == nil { + return nil + } + out := new(ComputeCapacityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeCapacityParameters) DeepCopyInto(out *ComputeCapacityParameters) { + *out = *in + if in.DesiredInstances != nil { + in, out := &in.DesiredInstances, &out.DesiredInstances + *out = new(float64) + **out = **in + } + if in.DesiredSessions != nil { + in, out := &in.DesiredSessions, &out.DesiredSessions + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeCapacityParameters. +func (in *ComputeCapacityParameters) DeepCopy() *ComputeCapacityParameters { + if in == nil { + return nil + } + out := new(ComputeCapacityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryConfig) DeepCopyInto(out *DirectoryConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryConfig. +func (in *DirectoryConfig) DeepCopy() *DirectoryConfig { + if in == nil { + return nil + } + out := new(DirectoryConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DirectoryConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryConfigInitParameters) DeepCopyInto(out *DirectoryConfigInitParameters) { + *out = *in + if in.DirectoryName != nil { + in, out := &in.DirectoryName, &out.DirectoryName + *out = new(string) + **out = **in + } + if in.OrganizationalUnitDistinguishedNames != nil { + in, out := &in.OrganizationalUnitDistinguishedNames, &out.OrganizationalUnitDistinguishedNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountCredentials != nil { + in, out := &in.ServiceAccountCredentials, &out.ServiceAccountCredentials + *out = new(ServiceAccountCredentialsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryConfigInitParameters. +func (in *DirectoryConfigInitParameters) DeepCopy() *DirectoryConfigInitParameters { + if in == nil { + return nil + } + out := new(DirectoryConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryConfigList) DeepCopyInto(out *DirectoryConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DirectoryConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryConfigList. +func (in *DirectoryConfigList) DeepCopy() *DirectoryConfigList { + if in == nil { + return nil + } + out := new(DirectoryConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DirectoryConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryConfigObservation) DeepCopyInto(out *DirectoryConfigObservation) { + *out = *in + if in.CreatedTime != nil { + in, out := &in.CreatedTime, &out.CreatedTime + *out = new(string) + **out = **in + } + if in.DirectoryName != nil { + in, out := &in.DirectoryName, &out.DirectoryName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.OrganizationalUnitDistinguishedNames != nil { + in, out := &in.OrganizationalUnitDistinguishedNames, &out.OrganizationalUnitDistinguishedNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountCredentials != nil { + in, out := &in.ServiceAccountCredentials, &out.ServiceAccountCredentials + *out = new(ServiceAccountCredentialsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryConfigObservation. +func (in *DirectoryConfigObservation) DeepCopy() *DirectoryConfigObservation { + if in == nil { + return nil + } + out := new(DirectoryConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryConfigParameters) DeepCopyInto(out *DirectoryConfigParameters) { + *out = *in + if in.DirectoryName != nil { + in, out := &in.DirectoryName, &out.DirectoryName + *out = new(string) + **out = **in + } + if in.OrganizationalUnitDistinguishedNames != nil { + in, out := &in.OrganizationalUnitDistinguishedNames, &out.OrganizationalUnitDistinguishedNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ServiceAccountCredentials != nil { + in, out := &in.ServiceAccountCredentials, &out.ServiceAccountCredentials + *out = new(ServiceAccountCredentialsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryConfigParameters. +func (in *DirectoryConfigParameters) DeepCopy() *DirectoryConfigParameters { + if in == nil { + return nil + } + out := new(DirectoryConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryConfigSpec) DeepCopyInto(out *DirectoryConfigSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryConfigSpec. +func (in *DirectoryConfigSpec) DeepCopy() *DirectoryConfigSpec { + if in == nil { + return nil + } + out := new(DirectoryConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryConfigStatus) DeepCopyInto(out *DirectoryConfigStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryConfigStatus. +func (in *DirectoryConfigStatus) DeepCopy() *DirectoryConfigStatus { + if in == nil { + return nil + } + out := new(DirectoryConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainJoinInfoInitParameters) DeepCopyInto(out *DomainJoinInfoInitParameters) { + *out = *in + if in.DirectoryName != nil { + in, out := &in.DirectoryName, &out.DirectoryName + *out = new(string) + **out = **in + } + if in.OrganizationalUnitDistinguishedName != nil { + in, out := &in.OrganizationalUnitDistinguishedName, &out.OrganizationalUnitDistinguishedName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainJoinInfoInitParameters. +func (in *DomainJoinInfoInitParameters) DeepCopy() *DomainJoinInfoInitParameters { + if in == nil { + return nil + } + out := new(DomainJoinInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainJoinInfoObservation) DeepCopyInto(out *DomainJoinInfoObservation) { + *out = *in + if in.DirectoryName != nil { + in, out := &in.DirectoryName, &out.DirectoryName + *out = new(string) + **out = **in + } + if in.OrganizationalUnitDistinguishedName != nil { + in, out := &in.OrganizationalUnitDistinguishedName, &out.OrganizationalUnitDistinguishedName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainJoinInfoObservation. +func (in *DomainJoinInfoObservation) DeepCopy() *DomainJoinInfoObservation { + if in == nil { + return nil + } + out := new(DomainJoinInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainJoinInfoParameters) DeepCopyInto(out *DomainJoinInfoParameters) { + *out = *in + if in.DirectoryName != nil { + in, out := &in.DirectoryName, &out.DirectoryName + *out = new(string) + **out = **in + } + if in.OrganizationalUnitDistinguishedName != nil { + in, out := &in.OrganizationalUnitDistinguishedName, &out.OrganizationalUnitDistinguishedName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainJoinInfoParameters. +func (in *DomainJoinInfoParameters) DeepCopy() *DomainJoinInfoParameters { + if in == nil { + return nil + } + out := new(DomainJoinInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Fleet) DeepCopyInto(out *Fleet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fleet. +func (in *Fleet) DeepCopy() *Fleet { + if in == nil { + return nil + } + out := new(Fleet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Fleet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FleetInitParameters) DeepCopyInto(out *FleetInitParameters) { + *out = *in + if in.ComputeCapacity != nil { + in, out := &in.ComputeCapacity, &out.ComputeCapacity + *out = new(ComputeCapacityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisconnectTimeoutInSeconds != nil { + in, out := &in.DisconnectTimeoutInSeconds, &out.DisconnectTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.DomainJoinInfo != nil { + in, out := &in.DomainJoinInfo, &out.DomainJoinInfo + *out = new(DomainJoinInfoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EnableDefaultInternetAccess != nil { + in, out := &in.EnableDefaultInternetAccess, &out.EnableDefaultInternetAccess + *out = new(bool) + **out = **in + } + if in.FleetType != nil { + in, out := &in.FleetType, &out.FleetType + *out = new(string) + **out = **in + } + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } + if in.IAMRoleArnRef != nil { + in, out := &in.IAMRoleArnRef, &out.IAMRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMRoleArnSelector != nil { + in, out := &in.IAMRoleArnSelector, &out.IAMRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IdleDisconnectTimeoutInSeconds != nil { + in, out := &in.IdleDisconnectTimeoutInSeconds, &out.IdleDisconnectTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.ImageArn != nil { + in, out := &in.ImageArn, &out.ImageArn + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.MaxSessionsPerInstance != nil { + in, out := &in.MaxSessionsPerInstance, &out.MaxSessionsPerInstance + *out = new(float64) + **out = **in + } + if in.MaxUserDurationInSeconds != nil { + in, out := &in.MaxUserDurationInSeconds, &out.MaxUserDurationInSeconds + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StreamView != nil { + in, out := &in.StreamView, &out.StreamView + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FleetInitParameters. +func (in *FleetInitParameters) DeepCopy() *FleetInitParameters { + if in == nil { + return nil + } + out := new(FleetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FleetList) DeepCopyInto(out *FleetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Fleet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FleetList. +func (in *FleetList) DeepCopy() *FleetList { + if in == nil { + return nil + } + out := new(FleetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FleetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FleetObservation) DeepCopyInto(out *FleetObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ComputeCapacity != nil { + in, out := &in.ComputeCapacity, &out.ComputeCapacity + *out = new(ComputeCapacityObservation) + (*in).DeepCopyInto(*out) + } + if in.CreatedTime != nil { + in, out := &in.CreatedTime, &out.CreatedTime + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisconnectTimeoutInSeconds != nil { + in, out := &in.DisconnectTimeoutInSeconds, &out.DisconnectTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.DomainJoinInfo != nil { + in, out := &in.DomainJoinInfo, &out.DomainJoinInfo + *out = new(DomainJoinInfoObservation) + (*in).DeepCopyInto(*out) + } + if in.EnableDefaultInternetAccess != nil { + in, out := &in.EnableDefaultInternetAccess, &out.EnableDefaultInternetAccess + *out = new(bool) + **out = **in + } + if in.FleetType != nil { + in, out := &in.FleetType, &out.FleetType + *out = new(string) + **out = **in + } + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IdleDisconnectTimeoutInSeconds != nil { + in, out := &in.IdleDisconnectTimeoutInSeconds, &out.IdleDisconnectTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.ImageArn != nil { + in, out := &in.ImageArn, &out.ImageArn + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.MaxSessionsPerInstance != nil { + in, out := &in.MaxSessionsPerInstance, &out.MaxSessionsPerInstance + *out = new(float64) + **out = **in + } + if in.MaxUserDurationInSeconds != nil { + in, out := &in.MaxUserDurationInSeconds, &out.MaxUserDurationInSeconds + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.StreamView != nil { + in, out := &in.StreamView, &out.StreamView + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FleetObservation. +func (in *FleetObservation) DeepCopy() *FleetObservation { + if in == nil { + return nil + } + out := new(FleetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FleetParameters) DeepCopyInto(out *FleetParameters) { + *out = *in + if in.ComputeCapacity != nil { + in, out := &in.ComputeCapacity, &out.ComputeCapacity + *out = new(ComputeCapacityParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisconnectTimeoutInSeconds != nil { + in, out := &in.DisconnectTimeoutInSeconds, &out.DisconnectTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.DomainJoinInfo != nil { + in, out := &in.DomainJoinInfo, &out.DomainJoinInfo + *out = new(DomainJoinInfoParameters) + (*in).DeepCopyInto(*out) + } + if in.EnableDefaultInternetAccess != nil { + in, out := &in.EnableDefaultInternetAccess, &out.EnableDefaultInternetAccess + *out = new(bool) + **out = **in + } + if in.FleetType != nil { + in, out := &in.FleetType, &out.FleetType + *out = new(string) + **out = **in + } + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } + if in.IAMRoleArnRef != nil { + in, out := &in.IAMRoleArnRef, &out.IAMRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMRoleArnSelector != nil { + in, out := &in.IAMRoleArnSelector, &out.IAMRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IdleDisconnectTimeoutInSeconds != nil { + in, out := &in.IdleDisconnectTimeoutInSeconds, &out.IdleDisconnectTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.ImageArn != nil { + in, out := &in.ImageArn, &out.ImageArn + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.MaxSessionsPerInstance != nil { + in, out := &in.MaxSessionsPerInstance, &out.MaxSessionsPerInstance + *out = new(float64) + **out = **in + } + if in.MaxUserDurationInSeconds != nil { + in, out := &in.MaxUserDurationInSeconds, &out.MaxUserDurationInSeconds + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.StreamView != nil { + in, out := &in.StreamView, &out.StreamView + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FleetParameters. +func (in *FleetParameters) DeepCopy() *FleetParameters { + if in == nil { + return nil + } + out := new(FleetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FleetSpec) DeepCopyInto(out *FleetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FleetSpec. +func (in *FleetSpec) DeepCopy() *FleetSpec { + if in == nil { + return nil + } + out := new(FleetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FleetStatus) DeepCopyInto(out *FleetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FleetStatus. +func (in *FleetStatus) DeepCopy() *FleetStatus { + if in == nil { + return nil + } + out := new(FleetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBuilder) DeepCopyInto(out *ImageBuilder) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBuilder. +func (in *ImageBuilder) DeepCopy() *ImageBuilder { + if in == nil { + return nil + } + out := new(ImageBuilder) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageBuilder) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBuilderDomainJoinInfoInitParameters) DeepCopyInto(out *ImageBuilderDomainJoinInfoInitParameters) { + *out = *in + if in.DirectoryName != nil { + in, out := &in.DirectoryName, &out.DirectoryName + *out = new(string) + **out = **in + } + if in.OrganizationalUnitDistinguishedName != nil { + in, out := &in.OrganizationalUnitDistinguishedName, &out.OrganizationalUnitDistinguishedName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBuilderDomainJoinInfoInitParameters. +func (in *ImageBuilderDomainJoinInfoInitParameters) DeepCopy() *ImageBuilderDomainJoinInfoInitParameters { + if in == nil { + return nil + } + out := new(ImageBuilderDomainJoinInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBuilderDomainJoinInfoObservation) DeepCopyInto(out *ImageBuilderDomainJoinInfoObservation) { + *out = *in + if in.DirectoryName != nil { + in, out := &in.DirectoryName, &out.DirectoryName + *out = new(string) + **out = **in + } + if in.OrganizationalUnitDistinguishedName != nil { + in, out := &in.OrganizationalUnitDistinguishedName, &out.OrganizationalUnitDistinguishedName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBuilderDomainJoinInfoObservation. +func (in *ImageBuilderDomainJoinInfoObservation) DeepCopy() *ImageBuilderDomainJoinInfoObservation { + if in == nil { + return nil + } + out := new(ImageBuilderDomainJoinInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBuilderDomainJoinInfoParameters) DeepCopyInto(out *ImageBuilderDomainJoinInfoParameters) { + *out = *in + if in.DirectoryName != nil { + in, out := &in.DirectoryName, &out.DirectoryName + *out = new(string) + **out = **in + } + if in.OrganizationalUnitDistinguishedName != nil { + in, out := &in.OrganizationalUnitDistinguishedName, &out.OrganizationalUnitDistinguishedName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBuilderDomainJoinInfoParameters. +func (in *ImageBuilderDomainJoinInfoParameters) DeepCopy() *ImageBuilderDomainJoinInfoParameters { + if in == nil { + return nil + } + out := new(ImageBuilderDomainJoinInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBuilderInitParameters) DeepCopyInto(out *ImageBuilderInitParameters) { + *out = *in + if in.AccessEndpoint != nil { + in, out := &in.AccessEndpoint, &out.AccessEndpoint + *out = make([]AccessEndpointInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AppstreamAgentVersion != nil { + in, out := &in.AppstreamAgentVersion, &out.AppstreamAgentVersion + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.DomainJoinInfo != nil { + in, out := &in.DomainJoinInfo, &out.DomainJoinInfo + *out = new(ImageBuilderDomainJoinInfoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EnableDefaultInternetAccess != nil { + in, out := &in.EnableDefaultInternetAccess, &out.EnableDefaultInternetAccess + *out = new(bool) + **out = **in + } + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } + if in.IAMRoleArnRef != nil { + in, out := &in.IAMRoleArnRef, &out.IAMRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMRoleArnSelector != nil { + in, out := &in.IAMRoleArnSelector, &out.IAMRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ImageArn != nil { + in, out := &in.ImageArn, &out.ImageArn + *out = new(string) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(ImageBuilderVPCConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBuilderInitParameters. +func (in *ImageBuilderInitParameters) DeepCopy() *ImageBuilderInitParameters { + if in == nil { + return nil + } + out := new(ImageBuilderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBuilderList) DeepCopyInto(out *ImageBuilderList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageBuilder, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBuilderList. +func (in *ImageBuilderList) DeepCopy() *ImageBuilderList { + if in == nil { + return nil + } + out := new(ImageBuilderList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageBuilderList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBuilderObservation) DeepCopyInto(out *ImageBuilderObservation) { + *out = *in + if in.AccessEndpoint != nil { + in, out := &in.AccessEndpoint, &out.AccessEndpoint + *out = make([]AccessEndpointObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AppstreamAgentVersion != nil { + in, out := &in.AppstreamAgentVersion, &out.AppstreamAgentVersion + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CreatedTime != nil { + in, out := &in.CreatedTime, &out.CreatedTime + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.DomainJoinInfo != nil { + in, out := &in.DomainJoinInfo, &out.DomainJoinInfo + *out = new(ImageBuilderDomainJoinInfoObservation) + (*in).DeepCopyInto(*out) + } + if in.EnableDefaultInternetAccess != nil { + in, out := &in.EnableDefaultInternetAccess, &out.EnableDefaultInternetAccess + *out = new(bool) + **out = **in + } + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ImageArn != nil { + in, out := &in.ImageArn, &out.ImageArn + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(ImageBuilderVPCConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBuilderObservation. +func (in *ImageBuilderObservation) DeepCopy() *ImageBuilderObservation { + if in == nil { + return nil + } + out := new(ImageBuilderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBuilderParameters) DeepCopyInto(out *ImageBuilderParameters) { + *out = *in + if in.AccessEndpoint != nil { + in, out := &in.AccessEndpoint, &out.AccessEndpoint + *out = make([]AccessEndpointParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AppstreamAgentVersion != nil { + in, out := &in.AppstreamAgentVersion, &out.AppstreamAgentVersion + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.DomainJoinInfo != nil { + in, out := &in.DomainJoinInfo, &out.DomainJoinInfo + *out = new(ImageBuilderDomainJoinInfoParameters) + (*in).DeepCopyInto(*out) + } + if in.EnableDefaultInternetAccess != nil { + in, out := &in.EnableDefaultInternetAccess, &out.EnableDefaultInternetAccess + *out = new(bool) + **out = **in + } + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } + if in.IAMRoleArnRef != nil { + in, out := &in.IAMRoleArnRef, &out.IAMRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMRoleArnSelector != nil { + in, out := &in.IAMRoleArnSelector, &out.IAMRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ImageArn != nil { + in, out := &in.ImageArn, &out.ImageArn + *out = new(string) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(ImageBuilderVPCConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBuilderParameters. +func (in *ImageBuilderParameters) DeepCopy() *ImageBuilderParameters { + if in == nil { + return nil + } + out := new(ImageBuilderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBuilderSpec) DeepCopyInto(out *ImageBuilderSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBuilderSpec. +func (in *ImageBuilderSpec) DeepCopy() *ImageBuilderSpec { + if in == nil { + return nil + } + out := new(ImageBuilderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBuilderStatus) DeepCopyInto(out *ImageBuilderStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBuilderStatus. +func (in *ImageBuilderStatus) DeepCopy() *ImageBuilderStatus { + if in == nil { + return nil + } + out := new(ImageBuilderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBuilderVPCConfigInitParameters) DeepCopyInto(out *ImageBuilderVPCConfigInitParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBuilderVPCConfigInitParameters. +func (in *ImageBuilderVPCConfigInitParameters) DeepCopy() *ImageBuilderVPCConfigInitParameters { + if in == nil { + return nil + } + out := new(ImageBuilderVPCConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBuilderVPCConfigObservation) DeepCopyInto(out *ImageBuilderVPCConfigObservation) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBuilderVPCConfigObservation. +func (in *ImageBuilderVPCConfigObservation) DeepCopy() *ImageBuilderVPCConfigObservation { + if in == nil { + return nil + } + out := new(ImageBuilderVPCConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBuilderVPCConfigParameters) DeepCopyInto(out *ImageBuilderVPCConfigParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBuilderVPCConfigParameters. +func (in *ImageBuilderVPCConfigParameters) DeepCopy() *ImageBuilderVPCConfigParameters { + if in == nil { + return nil + } + out := new(ImageBuilderVPCConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountCredentialsInitParameters) DeepCopyInto(out *ServiceAccountCredentialsInitParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + out.AccountPasswordSecretRef = in.AccountPasswordSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountCredentialsInitParameters. +func (in *ServiceAccountCredentialsInitParameters) DeepCopy() *ServiceAccountCredentialsInitParameters { + if in == nil { + return nil + } + out := new(ServiceAccountCredentialsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountCredentialsObservation) DeepCopyInto(out *ServiceAccountCredentialsObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountCredentialsObservation. +func (in *ServiceAccountCredentialsObservation) DeepCopy() *ServiceAccountCredentialsObservation { + if in == nil { + return nil + } + out := new(ServiceAccountCredentialsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountCredentialsParameters) DeepCopyInto(out *ServiceAccountCredentialsParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + out.AccountPasswordSecretRef = in.AccountPasswordSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountCredentialsParameters. +func (in *ServiceAccountCredentialsParameters) DeepCopy() *ServiceAccountCredentialsParameters { + if in == nil { + return nil + } + out := new(ServiceAccountCredentialsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Stack) DeepCopyInto(out *Stack) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Stack. +func (in *Stack) DeepCopy() *Stack { + if in == nil { + return nil + } + out := new(Stack) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Stack) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackInitParameters) DeepCopyInto(out *StackInitParameters) { + *out = *in + if in.AccessEndpoints != nil { + in, out := &in.AccessEndpoints, &out.AccessEndpoints + *out = make([]AccessEndpointsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplicationSettings != nil { + in, out := &in.ApplicationSettings, &out.ApplicationSettings + *out = new(ApplicationSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.EmbedHostDomains != nil { + in, out := &in.EmbedHostDomains, &out.EmbedHostDomains + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FeedbackURL != nil { + in, out := &in.FeedbackURL, &out.FeedbackURL + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RedirectURL != nil { + in, out := &in.RedirectURL, &out.RedirectURL + *out = new(string) + **out = **in + } + if in.StorageConnectors != nil { + in, out := &in.StorageConnectors, &out.StorageConnectors + *out = make([]StorageConnectorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StreamingExperienceSettings != nil { + in, out := &in.StreamingExperienceSettings, &out.StreamingExperienceSettings + *out = new(StreamingExperienceSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserSettings != nil { + in, out := &in.UserSettings, &out.UserSettings + *out = make([]UserSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackInitParameters. +func (in *StackInitParameters) DeepCopy() *StackInitParameters { + if in == nil { + return nil + } + out := new(StackInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackList) DeepCopyInto(out *StackList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Stack, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackList. +func (in *StackList) DeepCopy() *StackList { + if in == nil { + return nil + } + out := new(StackList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StackList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackObservation) DeepCopyInto(out *StackObservation) { + *out = *in + if in.AccessEndpoints != nil { + in, out := &in.AccessEndpoints, &out.AccessEndpoints + *out = make([]AccessEndpointsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplicationSettings != nil { + in, out := &in.ApplicationSettings, &out.ApplicationSettings + *out = new(ApplicationSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CreatedTime != nil { + in, out := &in.CreatedTime, &out.CreatedTime + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.EmbedHostDomains != nil { + in, out := &in.EmbedHostDomains, &out.EmbedHostDomains + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FeedbackURL != nil { + in, out := &in.FeedbackURL, &out.FeedbackURL + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RedirectURL != nil { + in, out := &in.RedirectURL, &out.RedirectURL + *out = new(string) + **out = **in + } + if in.StorageConnectors != nil { + in, out := &in.StorageConnectors, &out.StorageConnectors + *out = make([]StorageConnectorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StreamingExperienceSettings != nil { + in, out := &in.StreamingExperienceSettings, &out.StreamingExperienceSettings + *out = new(StreamingExperienceSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserSettings != nil { + in, out := &in.UserSettings, &out.UserSettings + *out = make([]UserSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackObservation. +func (in *StackObservation) DeepCopy() *StackObservation { + if in == nil { + return nil + } + out := new(StackObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackParameters) DeepCopyInto(out *StackParameters) { + *out = *in + if in.AccessEndpoints != nil { + in, out := &in.AccessEndpoints, &out.AccessEndpoints + *out = make([]AccessEndpointsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplicationSettings != nil { + in, out := &in.ApplicationSettings, &out.ApplicationSettings + *out = new(ApplicationSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.EmbedHostDomains != nil { + in, out := &in.EmbedHostDomains, &out.EmbedHostDomains + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FeedbackURL != nil { + in, out := &in.FeedbackURL, &out.FeedbackURL + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RedirectURL != nil { + in, out := &in.RedirectURL, &out.RedirectURL + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.StorageConnectors != nil { + in, out := &in.StorageConnectors, &out.StorageConnectors + *out = make([]StorageConnectorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StreamingExperienceSettings != nil { + in, out := &in.StreamingExperienceSettings, &out.StreamingExperienceSettings + *out = new(StreamingExperienceSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserSettings != nil { + in, out := &in.UserSettings, &out.UserSettings + *out = make([]UserSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackParameters. +func (in *StackParameters) DeepCopy() *StackParameters { + if in == nil { + return nil + } + out := new(StackParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSpec) DeepCopyInto(out *StackSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSpec. +func (in *StackSpec) DeepCopy() *StackSpec { + if in == nil { + return nil + } + out := new(StackSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackStatus) DeepCopyInto(out *StackStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackStatus. +func (in *StackStatus) DeepCopy() *StackStatus { + if in == nil { + return nil + } + out := new(StackStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageConnectorsInitParameters) DeepCopyInto(out *StorageConnectorsInitParameters) { + *out = *in + if in.ConnectorType != nil { + in, out := &in.ConnectorType, &out.ConnectorType + *out = new(string) + **out = **in + } + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceIdentifier != nil { + in, out := &in.ResourceIdentifier, &out.ResourceIdentifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageConnectorsInitParameters. +func (in *StorageConnectorsInitParameters) DeepCopy() *StorageConnectorsInitParameters { + if in == nil { + return nil + } + out := new(StorageConnectorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageConnectorsObservation) DeepCopyInto(out *StorageConnectorsObservation) { + *out = *in + if in.ConnectorType != nil { + in, out := &in.ConnectorType, &out.ConnectorType + *out = new(string) + **out = **in + } + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceIdentifier != nil { + in, out := &in.ResourceIdentifier, &out.ResourceIdentifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageConnectorsObservation. +func (in *StorageConnectorsObservation) DeepCopy() *StorageConnectorsObservation { + if in == nil { + return nil + } + out := new(StorageConnectorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageConnectorsParameters) DeepCopyInto(out *StorageConnectorsParameters) { + *out = *in + if in.ConnectorType != nil { + in, out := &in.ConnectorType, &out.ConnectorType + *out = new(string) + **out = **in + } + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceIdentifier != nil { + in, out := &in.ResourceIdentifier, &out.ResourceIdentifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageConnectorsParameters. +func (in *StorageConnectorsParameters) DeepCopy() *StorageConnectorsParameters { + if in == nil { + return nil + } + out := new(StorageConnectorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingExperienceSettingsInitParameters) DeepCopyInto(out *StreamingExperienceSettingsInitParameters) { + *out = *in + if in.PreferredProtocol != nil { + in, out := &in.PreferredProtocol, &out.PreferredProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingExperienceSettingsInitParameters. +func (in *StreamingExperienceSettingsInitParameters) DeepCopy() *StreamingExperienceSettingsInitParameters { + if in == nil { + return nil + } + out := new(StreamingExperienceSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingExperienceSettingsObservation) DeepCopyInto(out *StreamingExperienceSettingsObservation) { + *out = *in + if in.PreferredProtocol != nil { + in, out := &in.PreferredProtocol, &out.PreferredProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingExperienceSettingsObservation. +func (in *StreamingExperienceSettingsObservation) DeepCopy() *StreamingExperienceSettingsObservation { + if in == nil { + return nil + } + out := new(StreamingExperienceSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingExperienceSettingsParameters) DeepCopyInto(out *StreamingExperienceSettingsParameters) { + *out = *in + if in.PreferredProtocol != nil { + in, out := &in.PreferredProtocol, &out.PreferredProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingExperienceSettingsParameters. +func (in *StreamingExperienceSettingsParameters) DeepCopy() *StreamingExperienceSettingsParameters { + if in == nil { + return nil + } + out := new(StreamingExperienceSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsInitParameters) DeepCopyInto(out *UserSettingsInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsInitParameters. +func (in *UserSettingsInitParameters) DeepCopy() *UserSettingsInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsObservation) DeepCopyInto(out *UserSettingsObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsObservation. +func (in *UserSettingsObservation) DeepCopy() *UserSettingsObservation { + if in == nil { + return nil + } + out := new(UserSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsParameters) DeepCopyInto(out *UserSettingsParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsParameters. +func (in *UserSettingsParameters) DeepCopy() *UserSettingsParameters { + if in == nil { + return nil + } + out := new(UserSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigInitParameters) DeepCopyInto(out *VPCConfigInitParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigInitParameters. +func (in *VPCConfigInitParameters) DeepCopy() *VPCConfigInitParameters { + if in == nil { + return nil + } + out := new(VPCConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigObservation) DeepCopyInto(out *VPCConfigObservation) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigObservation. +func (in *VPCConfigObservation) DeepCopy() *VPCConfigObservation { + if in == nil { + return nil + } + out := new(VPCConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigParameters) DeepCopyInto(out *VPCConfigParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigParameters. +func (in *VPCConfigParameters) DeepCopy() *VPCConfigParameters { + if in == nil { + return nil + } + out := new(VPCConfigParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/appstream/v1beta2/zz_generated.managed.go b/apis/appstream/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..d04a25856a --- /dev/null +++ b/apis/appstream/v1beta2/zz_generated.managed.go @@ -0,0 +1,248 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this DirectoryConfig. +func (mg *DirectoryConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DirectoryConfig. +func (mg *DirectoryConfig) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DirectoryConfig. +func (mg *DirectoryConfig) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DirectoryConfig. +func (mg *DirectoryConfig) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DirectoryConfig. +func (mg *DirectoryConfig) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DirectoryConfig. +func (mg *DirectoryConfig) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DirectoryConfig. +func (mg *DirectoryConfig) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DirectoryConfig. +func (mg *DirectoryConfig) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DirectoryConfig. +func (mg *DirectoryConfig) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DirectoryConfig. +func (mg *DirectoryConfig) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DirectoryConfig. +func (mg *DirectoryConfig) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DirectoryConfig. +func (mg *DirectoryConfig) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Fleet. +func (mg *Fleet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Fleet. +func (mg *Fleet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Fleet. +func (mg *Fleet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Fleet. +func (mg *Fleet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Fleet. +func (mg *Fleet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Fleet. +func (mg *Fleet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Fleet. +func (mg *Fleet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Fleet. +func (mg *Fleet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Fleet. +func (mg *Fleet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Fleet. +func (mg *Fleet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Fleet. +func (mg *Fleet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Fleet. +func (mg *Fleet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ImageBuilder. +func (mg *ImageBuilder) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ImageBuilder. +func (mg *ImageBuilder) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ImageBuilder. +func (mg *ImageBuilder) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ImageBuilder. +func (mg *ImageBuilder) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ImageBuilder. +func (mg *ImageBuilder) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ImageBuilder. +func (mg *ImageBuilder) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ImageBuilder. +func (mg *ImageBuilder) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ImageBuilder. +func (mg *ImageBuilder) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ImageBuilder. +func (mg *ImageBuilder) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ImageBuilder. +func (mg *ImageBuilder) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ImageBuilder. +func (mg *ImageBuilder) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ImageBuilder. +func (mg *ImageBuilder) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Stack. +func (mg *Stack) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Stack. +func (mg *Stack) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Stack. +func (mg *Stack) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Stack. +func (mg *Stack) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Stack. +func (mg *Stack) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Stack. +func (mg *Stack) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Stack. +func (mg *Stack) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Stack. +func (mg *Stack) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Stack. +func (mg *Stack) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Stack. +func (mg *Stack) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Stack. +func (mg *Stack) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Stack. +func (mg *Stack) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/appstream/v1beta2/zz_generated.managedlist.go b/apis/appstream/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..08dbf69fbd --- /dev/null +++ b/apis/appstream/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this DirectoryConfigList. +func (l *DirectoryConfigList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FleetList. +func (l *FleetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ImageBuilderList. +func (l *ImageBuilderList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this StackList. +func (l *StackList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/appstream/v1beta2/zz_generated.resolvers.go b/apis/appstream/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..ce9a2b3cb1 --- /dev/null +++ b/apis/appstream/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,207 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Fleet. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Fleet) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IAMRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.IAMRoleArnRef, + Selector: mg.Spec.ForProvider.IAMRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IAMRoleArn") + } + mg.Spec.ForProvider.IAMRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IAMRoleArnRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCConfig.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCConfig.SubnetIDRefs, + Selector: mg.Spec.ForProvider.VPCConfig.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCConfig.SubnetIds") + } + mg.Spec.ForProvider.VPCConfig.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCConfig.SubnetIDRefs = mrsp.ResolvedReferences + + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IAMRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.IAMRoleArnRef, + Selector: mg.Spec.InitProvider.IAMRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IAMRoleArn") + } + mg.Spec.InitProvider.IAMRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IAMRoleArnRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCConfig.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCConfig.SubnetIDRefs, + Selector: mg.Spec.InitProvider.VPCConfig.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCConfig.SubnetIds") + } + mg.Spec.InitProvider.VPCConfig.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCConfig.SubnetIDRefs = mrsp.ResolvedReferences + + } + + return nil +} + +// ResolveReferences of this ImageBuilder. +func (mg *ImageBuilder) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IAMRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.IAMRoleArnRef, + Selector: mg.Spec.ForProvider.IAMRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IAMRoleArn") + } + mg.Spec.ForProvider.IAMRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IAMRoleArnRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCConfig.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCConfig.SubnetIDRefs, + Selector: mg.Spec.ForProvider.VPCConfig.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCConfig.SubnetIds") + } + mg.Spec.ForProvider.VPCConfig.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCConfig.SubnetIDRefs = mrsp.ResolvedReferences + + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IAMRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.IAMRoleArnRef, + Selector: mg.Spec.InitProvider.IAMRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IAMRoleArn") + } + mg.Spec.InitProvider.IAMRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IAMRoleArnRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCConfig.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCConfig.SubnetIDRefs, + Selector: mg.Spec.InitProvider.VPCConfig.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCConfig.SubnetIds") + } + mg.Spec.InitProvider.VPCConfig.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCConfig.SubnetIDRefs = mrsp.ResolvedReferences + + } + + return nil +} diff --git a/apis/appstream/v1beta2/zz_groupversion_info.go b/apis/appstream/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..670ce53e2c --- /dev/null +++ b/apis/appstream/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=appstream.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "appstream.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/appstream/v1beta2/zz_imagebuilder_terraformed.go b/apis/appstream/v1beta2/zz_imagebuilder_terraformed.go new file mode 100755 index 0000000000..b3bcb8027f --- /dev/null +++ b/apis/appstream/v1beta2/zz_imagebuilder_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ImageBuilder +func (mg *ImageBuilder) GetTerraformResourceType() string { + return "aws_appstream_image_builder" +} + +// GetConnectionDetailsMapping for this ImageBuilder +func (tr *ImageBuilder) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ImageBuilder +func (tr *ImageBuilder) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ImageBuilder +func (tr *ImageBuilder) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ImageBuilder +func (tr *ImageBuilder) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ImageBuilder +func (tr *ImageBuilder) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ImageBuilder +func (tr *ImageBuilder) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ImageBuilder +func (tr *ImageBuilder) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ImageBuilder +func (tr *ImageBuilder) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ImageBuilder using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ImageBuilder) LateInitialize(attrs []byte) (bool, error) { + params := &ImageBuilderParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ImageBuilder) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appstream/v1beta2/zz_imagebuilder_types.go b/apis/appstream/v1beta2/zz_imagebuilder_types.go new file mode 100755 index 0000000000..8aa490bd73 --- /dev/null +++ b/apis/appstream/v1beta2/zz_imagebuilder_types.go @@ -0,0 +1,356 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessEndpointInitParameters struct { + + // Type of interface endpoint. For valid values, refer to the AWS documentation. + EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"` + + // Identifier (ID) of the interface VPC endpoint. + VpceID *string `json:"vpceId,omitempty" tf:"vpce_id,omitempty"` +} + +type AccessEndpointObservation struct { + + // Type of interface endpoint. For valid values, refer to the AWS documentation. + EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"` + + // Identifier (ID) of the interface VPC endpoint. + VpceID *string `json:"vpceId,omitempty" tf:"vpce_id,omitempty"` +} + +type AccessEndpointParameters struct { + + // Type of interface endpoint. For valid values, refer to the AWS documentation. + // +kubebuilder:validation:Optional + EndpointType *string `json:"endpointType" tf:"endpoint_type,omitempty"` + + // Identifier (ID) of the interface VPC endpoint. + // +kubebuilder:validation:Optional + VpceID *string `json:"vpceId,omitempty" tf:"vpce_id,omitempty"` +} + +type ImageBuilderDomainJoinInfoInitParameters struct { + + // Fully qualified name of the directory (for example, corp.example.com). + DirectoryName *string `json:"directoryName,omitempty" tf:"directory_name,omitempty"` + + // Distinguished name of the organizational unit for computer accounts. + OrganizationalUnitDistinguishedName *string `json:"organizationalUnitDistinguishedName,omitempty" tf:"organizational_unit_distinguished_name,omitempty"` +} + +type ImageBuilderDomainJoinInfoObservation struct { + + // Fully qualified name of the directory (for example, corp.example.com). + DirectoryName *string `json:"directoryName,omitempty" tf:"directory_name,omitempty"` + + // Distinguished name of the organizational unit for computer accounts. + OrganizationalUnitDistinguishedName *string `json:"organizationalUnitDistinguishedName,omitempty" tf:"organizational_unit_distinguished_name,omitempty"` +} + +type ImageBuilderDomainJoinInfoParameters struct { + + // Fully qualified name of the directory (for example, corp.example.com). + // +kubebuilder:validation:Optional + DirectoryName *string `json:"directoryName,omitempty" tf:"directory_name,omitempty"` + + // Distinguished name of the organizational unit for computer accounts. + // +kubebuilder:validation:Optional + OrganizationalUnitDistinguishedName *string `json:"organizationalUnitDistinguishedName,omitempty" tf:"organizational_unit_distinguished_name,omitempty"` +} + +type ImageBuilderInitParameters struct { + + // Set of interface VPC endpoint (interface endpoint) objects. Maximum of 4. See below. + AccessEndpoint []AccessEndpointInitParameters `json:"accessEndpoint,omitempty" tf:"access_endpoint,omitempty"` + + // Version of the AppStream 2.0 agent to use for this image builder. + AppstreamAgentVersion *string `json:"appstreamAgentVersion,omitempty" tf:"appstream_agent_version,omitempty"` + + // Description to display. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Human-readable friendly name for the AppStream image builder. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Configuration block for the name of the directory and organizational unit (OU) to use to join the image builder to a Microsoft Active Directory domain. See below. + DomainJoinInfo *ImageBuilderDomainJoinInfoInitParameters `json:"domainJoinInfo,omitempty" tf:"domain_join_info,omitempty"` + + // Enables or disables default internet access for the image builder. + EnableDefaultInternetAccess *bool `json:"enableDefaultInternetAccess,omitempty" tf:"enable_default_internet_access,omitempty"` + + // ARN of the IAM role to apply to the image builder. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + IAMRoleArn *string `json:"iamRoleArn,omitempty" tf:"iam_role_arn,omitempty"` + + // Reference to a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnRef *v1.Reference `json:"iamRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnSelector *v1.Selector `json:"iamRoleArnSelector,omitempty" tf:"-"` + + // ARN of the public, private, or shared image to use. + ImageArn *string `json:"imageArn,omitempty" tf:"image_arn,omitempty"` + + // Instance type to use when launching the image builder. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for the VPC configuration for the image builder. See below. + VPCConfig *ImageBuilderVPCConfigInitParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type ImageBuilderObservation struct { + + // Set of interface VPC endpoint (interface endpoint) objects. Maximum of 4. See below. + AccessEndpoint []AccessEndpointObservation `json:"accessEndpoint,omitempty" tf:"access_endpoint,omitempty"` + + // Version of the AppStream 2.0 agent to use for this image builder. + AppstreamAgentVersion *string `json:"appstreamAgentVersion,omitempty" tf:"appstream_agent_version,omitempty"` + + // ARN of the appstream image builder. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Date and time, in UTC and extended RFC 3339 format, when the image builder was created. + CreatedTime *string `json:"createdTime,omitempty" tf:"created_time,omitempty"` + + // Description to display. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Human-readable friendly name for the AppStream image builder. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Configuration block for the name of the directory and organizational unit (OU) to use to join the image builder to a Microsoft Active Directory domain. See below. + DomainJoinInfo *ImageBuilderDomainJoinInfoObservation `json:"domainJoinInfo,omitempty" tf:"domain_join_info,omitempty"` + + // Enables or disables default internet access for the image builder. + EnableDefaultInternetAccess *bool `json:"enableDefaultInternetAccess,omitempty" tf:"enable_default_internet_access,omitempty"` + + // ARN of the IAM role to apply to the image builder. + IAMRoleArn *string `json:"iamRoleArn,omitempty" tf:"iam_role_arn,omitempty"` + + // Name of the image builder. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ARN of the public, private, or shared image to use. + ImageArn *string `json:"imageArn,omitempty" tf:"image_arn,omitempty"` + + // Name of the image used to create the image builder. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // Instance type to use when launching the image builder. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // State of the image builder. For valid values, refer to the AWS documentation. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Configuration block for the VPC configuration for the image builder. See below. + VPCConfig *ImageBuilderVPCConfigObservation `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type ImageBuilderParameters struct { + + // Set of interface VPC endpoint (interface endpoint) objects. Maximum of 4. See below. + // +kubebuilder:validation:Optional + AccessEndpoint []AccessEndpointParameters `json:"accessEndpoint,omitempty" tf:"access_endpoint,omitempty"` + + // Version of the AppStream 2.0 agent to use for this image builder. + // +kubebuilder:validation:Optional + AppstreamAgentVersion *string `json:"appstreamAgentVersion,omitempty" tf:"appstream_agent_version,omitempty"` + + // Description to display. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Human-readable friendly name for the AppStream image builder. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Configuration block for the name of the directory and organizational unit (OU) to use to join the image builder to a Microsoft Active Directory domain. See below. + // +kubebuilder:validation:Optional + DomainJoinInfo *ImageBuilderDomainJoinInfoParameters `json:"domainJoinInfo,omitempty" tf:"domain_join_info,omitempty"` + + // Enables or disables default internet access for the image builder. + // +kubebuilder:validation:Optional + EnableDefaultInternetAccess *bool `json:"enableDefaultInternetAccess,omitempty" tf:"enable_default_internet_access,omitempty"` + + // ARN of the IAM role to apply to the image builder. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + IAMRoleArn *string `json:"iamRoleArn,omitempty" tf:"iam_role_arn,omitempty"` + + // Reference to a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnRef *v1.Reference `json:"iamRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnSelector *v1.Selector `json:"iamRoleArnSelector,omitempty" tf:"-"` + + // ARN of the public, private, or shared image to use. + // +kubebuilder:validation:Optional + ImageArn *string `json:"imageArn,omitempty" tf:"image_arn,omitempty"` + + // Instance type to use when launching the image builder. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for the VPC configuration for the image builder. See below. + // +kubebuilder:validation:Optional + VPCConfig *ImageBuilderVPCConfigParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type ImageBuilderVPCConfigInitParameters struct { + + // Identifiers of the security groups for the image builder or image builder. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Identifier of the subnet to which a network interface is attached from the image builder instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type ImageBuilderVPCConfigObservation struct { + + // Identifiers of the security groups for the image builder or image builder. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // Identifier of the subnet to which a network interface is attached from the image builder instance. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type ImageBuilderVPCConfigParameters struct { + + // Identifiers of the security groups for the image builder or image builder. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Identifier of the subnet to which a network interface is attached from the image builder instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +// ImageBuilderSpec defines the desired state of ImageBuilder +type ImageBuilderSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ImageBuilderParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ImageBuilderInitParameters `json:"initProvider,omitempty"` +} + +// ImageBuilderStatus defines the observed state of ImageBuilder. +type ImageBuilderStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ImageBuilderObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ImageBuilder is the Schema for the ImageBuilders API. Provides an AppStream image builder +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ImageBuilder struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.instanceType) || (has(self.initProvider) && has(self.initProvider.instanceType))",message="spec.forProvider.instanceType is a required parameter" + Spec ImageBuilderSpec `json:"spec"` + Status ImageBuilderStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ImageBuilderList contains a list of ImageBuilders +type ImageBuilderList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ImageBuilder `json:"items"` +} + +// Repository type metadata. +var ( + ImageBuilder_Kind = "ImageBuilder" + ImageBuilder_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ImageBuilder_Kind}.String() + ImageBuilder_KindAPIVersion = ImageBuilder_Kind + "." + CRDGroupVersion.String() + ImageBuilder_GroupVersionKind = CRDGroupVersion.WithKind(ImageBuilder_Kind) +) + +func init() { + SchemeBuilder.Register(&ImageBuilder{}, &ImageBuilderList{}) +} diff --git a/apis/appstream/v1beta2/zz_stack_terraformed.go b/apis/appstream/v1beta2/zz_stack_terraformed.go new file mode 100755 index 0000000000..72a7a114e1 --- /dev/null +++ b/apis/appstream/v1beta2/zz_stack_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Stack +func (mg *Stack) GetTerraformResourceType() string { + return "aws_appstream_stack" +} + +// GetConnectionDetailsMapping for this Stack +func (tr *Stack) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Stack +func (tr *Stack) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Stack +func (tr *Stack) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Stack +func (tr *Stack) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Stack +func (tr *Stack) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Stack +func (tr *Stack) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Stack +func (tr *Stack) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Stack +func (tr *Stack) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Stack using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Stack) LateInitialize(attrs []byte) (bool, error) { + params := &StackParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Stack) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appstream/v1beta2/zz_stack_types.go b/apis/appstream/v1beta2/zz_stack_types.go new file mode 100755 index 0000000000..3fc7afa435 --- /dev/null +++ b/apis/appstream/v1beta2/zz_stack_types.go @@ -0,0 +1,408 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessEndpointsInitParameters struct { + + // Type of the interface endpoint. + // See the AccessEndpoint AWS API documentation for valid values. + EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"` + + // ID of the VPC in which the interface endpoint is used. + VpceID *string `json:"vpceId,omitempty" tf:"vpce_id,omitempty"` +} + +type AccessEndpointsObservation struct { + + // Type of the interface endpoint. + // See the AccessEndpoint AWS API documentation for valid values. + EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"` + + // ID of the VPC in which the interface endpoint is used. + VpceID *string `json:"vpceId,omitempty" tf:"vpce_id,omitempty"` +} + +type AccessEndpointsParameters struct { + + // Type of the interface endpoint. + // See the AccessEndpoint AWS API documentation for valid values. + // +kubebuilder:validation:Optional + EndpointType *string `json:"endpointType" tf:"endpoint_type,omitempty"` + + // ID of the VPC in which the interface endpoint is used. + // +kubebuilder:validation:Optional + VpceID *string `json:"vpceId,omitempty" tf:"vpce_id,omitempty"` +} + +type ApplicationSettingsInitParameters struct { + + // Whether application settings should be persisted. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Name of the settings group. + // Required when enabled is true. + // Can be up to 100 characters. + SettingsGroup *string `json:"settingsGroup,omitempty" tf:"settings_group,omitempty"` +} + +type ApplicationSettingsObservation struct { + + // Whether application settings should be persisted. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Name of the settings group. + // Required when enabled is true. + // Can be up to 100 characters. + SettingsGroup *string `json:"settingsGroup,omitempty" tf:"settings_group,omitempty"` +} + +type ApplicationSettingsParameters struct { + + // Whether application settings should be persisted. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Name of the settings group. + // Required when enabled is true. + // Can be up to 100 characters. + // +kubebuilder:validation:Optional + SettingsGroup *string `json:"settingsGroup,omitempty" tf:"settings_group,omitempty"` +} + +type StackInitParameters struct { + + // Set of configuration blocks defining the interface VPC endpoints. Users of the stack can connect to AppStream 2.0 only through the specified endpoints. + // See access_endpoints below. + AccessEndpoints []AccessEndpointsInitParameters `json:"accessEndpoints,omitempty" tf:"access_endpoints,omitempty"` + + // Settings for application settings persistence. + // See application_settings below. + ApplicationSettings *ApplicationSettingsInitParameters `json:"applicationSettings,omitempty" tf:"application_settings,omitempty"` + + // Description for the AppStream stack. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Stack name to display. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Domains where AppStream 2.0 streaming sessions can be embedded in an iframe. You must approve the domains that you want to host embedded AppStream 2.0 streaming sessions. + // +listType=set + EmbedHostDomains []*string `json:"embedHostDomains,omitempty" tf:"embed_host_domains,omitempty"` + + // URL that users are redirected to after they click the Send Feedback link. If no URL is specified, no Send Feedback link is displayed. . + FeedbackURL *string `json:"feedbackUrl,omitempty" tf:"feedback_url,omitempty"` + + // Unique name for the AppStream stack. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // URL that users are redirected to after their streaming session ends. + RedirectURL *string `json:"redirectUrl,omitempty" tf:"redirect_url,omitempty"` + + // Configuration block for the storage connectors to enable. + // See storage_connectors below. + StorageConnectors []StorageConnectorsInitParameters `json:"storageConnectors,omitempty" tf:"storage_connectors,omitempty"` + + // The streaming protocol you want your stack to prefer. This can be UDP or TCP. Currently, UDP is only supported in the Windows native client. + // See streaming_experience_settings below. + StreamingExperienceSettings *StreamingExperienceSettingsInitParameters `json:"streamingExperienceSettings,omitempty" tf:"streaming_experience_settings,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for the actions that are enabled or disabled for users during their streaming sessions. If not provided, these settings are configured automatically by AWS. + // See user_settings below. + UserSettings []UserSettingsInitParameters `json:"userSettings,omitempty" tf:"user_settings,omitempty"` +} + +type StackObservation struct { + + // Set of configuration blocks defining the interface VPC endpoints. Users of the stack can connect to AppStream 2.0 only through the specified endpoints. + // See access_endpoints below. + AccessEndpoints []AccessEndpointsObservation `json:"accessEndpoints,omitempty" tf:"access_endpoints,omitempty"` + + // Settings for application settings persistence. + // See application_settings below. + ApplicationSettings *ApplicationSettingsObservation `json:"applicationSettings,omitempty" tf:"application_settings,omitempty"` + + // ARN of the appstream stack. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Date and time, in UTC and extended RFC 3339 format, when the stack was created. + CreatedTime *string `json:"createdTime,omitempty" tf:"created_time,omitempty"` + + // Description for the AppStream stack. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Stack name to display. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Domains where AppStream 2.0 streaming sessions can be embedded in an iframe. You must approve the domains that you want to host embedded AppStream 2.0 streaming sessions. + // +listType=set + EmbedHostDomains []*string `json:"embedHostDomains,omitempty" tf:"embed_host_domains,omitempty"` + + // URL that users are redirected to after they click the Send Feedback link. If no URL is specified, no Send Feedback link is displayed. . + FeedbackURL *string `json:"feedbackUrl,omitempty" tf:"feedback_url,omitempty"` + + // Unique ID of the appstream stack. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Unique name for the AppStream stack. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // URL that users are redirected to after their streaming session ends. + RedirectURL *string `json:"redirectUrl,omitempty" tf:"redirect_url,omitempty"` + + // Configuration block for the storage connectors to enable. + // See storage_connectors below. + StorageConnectors []StorageConnectorsObservation `json:"storageConnectors,omitempty" tf:"storage_connectors,omitempty"` + + // The streaming protocol you want your stack to prefer. This can be UDP or TCP. Currently, UDP is only supported in the Windows native client. + // See streaming_experience_settings below. + StreamingExperienceSettings *StreamingExperienceSettingsObservation `json:"streamingExperienceSettings,omitempty" tf:"streaming_experience_settings,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Configuration block for the actions that are enabled or disabled for users during their streaming sessions. If not provided, these settings are configured automatically by AWS. + // See user_settings below. + UserSettings []UserSettingsObservation `json:"userSettings,omitempty" tf:"user_settings,omitempty"` +} + +type StackParameters struct { + + // Set of configuration blocks defining the interface VPC endpoints. Users of the stack can connect to AppStream 2.0 only through the specified endpoints. + // See access_endpoints below. + // +kubebuilder:validation:Optional + AccessEndpoints []AccessEndpointsParameters `json:"accessEndpoints,omitempty" tf:"access_endpoints,omitempty"` + + // Settings for application settings persistence. + // See application_settings below. + // +kubebuilder:validation:Optional + ApplicationSettings *ApplicationSettingsParameters `json:"applicationSettings,omitempty" tf:"application_settings,omitempty"` + + // Description for the AppStream stack. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Stack name to display. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Domains where AppStream 2.0 streaming sessions can be embedded in an iframe. You must approve the domains that you want to host embedded AppStream 2.0 streaming sessions. + // +kubebuilder:validation:Optional + // +listType=set + EmbedHostDomains []*string `json:"embedHostDomains,omitempty" tf:"embed_host_domains,omitempty"` + + // URL that users are redirected to after they click the Send Feedback link. If no URL is specified, no Send Feedback link is displayed. . + // +kubebuilder:validation:Optional + FeedbackURL *string `json:"feedbackUrl,omitempty" tf:"feedback_url,omitempty"` + + // Unique name for the AppStream stack. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // URL that users are redirected to after their streaming session ends. + // +kubebuilder:validation:Optional + RedirectURL *string `json:"redirectUrl,omitempty" tf:"redirect_url,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration block for the storage connectors to enable. + // See storage_connectors below. + // +kubebuilder:validation:Optional + StorageConnectors []StorageConnectorsParameters `json:"storageConnectors,omitempty" tf:"storage_connectors,omitempty"` + + // The streaming protocol you want your stack to prefer. This can be UDP or TCP. Currently, UDP is only supported in the Windows native client. + // See streaming_experience_settings below. + // +kubebuilder:validation:Optional + StreamingExperienceSettings *StreamingExperienceSettingsParameters `json:"streamingExperienceSettings,omitempty" tf:"streaming_experience_settings,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for the actions that are enabled or disabled for users during their streaming sessions. If not provided, these settings are configured automatically by AWS. + // See user_settings below. + // +kubebuilder:validation:Optional + UserSettings []UserSettingsParameters `json:"userSettings,omitempty" tf:"user_settings,omitempty"` +} + +type StorageConnectorsInitParameters struct { + + // Type of storage connector. + // Valid values are HOMEFOLDERS, GOOGLE_DRIVE, or ONE_DRIVE. + ConnectorType *string `json:"connectorType,omitempty" tf:"connector_type,omitempty"` + + // Names of the domains for the account. + Domains []*string `json:"domains,omitempty" tf:"domains,omitempty"` + + // ARN of the storage connector. + ResourceIdentifier *string `json:"resourceIdentifier,omitempty" tf:"resource_identifier,omitempty"` +} + +type StorageConnectorsObservation struct { + + // Type of storage connector. + // Valid values are HOMEFOLDERS, GOOGLE_DRIVE, or ONE_DRIVE. + ConnectorType *string `json:"connectorType,omitempty" tf:"connector_type,omitempty"` + + // Names of the domains for the account. + Domains []*string `json:"domains,omitempty" tf:"domains,omitempty"` + + // ARN of the storage connector. + ResourceIdentifier *string `json:"resourceIdentifier,omitempty" tf:"resource_identifier,omitempty"` +} + +type StorageConnectorsParameters struct { + + // Type of storage connector. + // Valid values are HOMEFOLDERS, GOOGLE_DRIVE, or ONE_DRIVE. + // +kubebuilder:validation:Optional + ConnectorType *string `json:"connectorType" tf:"connector_type,omitempty"` + + // Names of the domains for the account. + // +kubebuilder:validation:Optional + Domains []*string `json:"domains,omitempty" tf:"domains,omitempty"` + + // ARN of the storage connector. + // +kubebuilder:validation:Optional + ResourceIdentifier *string `json:"resourceIdentifier,omitempty" tf:"resource_identifier,omitempty"` +} + +type StreamingExperienceSettingsInitParameters struct { + + // The preferred protocol that you want to use while streaming your application. + // Valid values are TCP and UDP. + PreferredProtocol *string `json:"preferredProtocol,omitempty" tf:"preferred_protocol,omitempty"` +} + +type StreamingExperienceSettingsObservation struct { + + // The preferred protocol that you want to use while streaming your application. + // Valid values are TCP and UDP. + PreferredProtocol *string `json:"preferredProtocol,omitempty" tf:"preferred_protocol,omitempty"` +} + +type StreamingExperienceSettingsParameters struct { + + // The preferred protocol that you want to use while streaming your application. + // Valid values are TCP and UDP. + // +kubebuilder:validation:Optional + PreferredProtocol *string `json:"preferredProtocol,omitempty" tf:"preferred_protocol,omitempty"` +} + +type UserSettingsInitParameters struct { + + // Action that is enabled or disabled. + // Valid values are CLIPBOARD_COPY_FROM_LOCAL_DEVICE, CLIPBOARD_COPY_TO_LOCAL_DEVICE, FILE_UPLOAD, FILE_DOWNLOAD, PRINTING_TO_LOCAL_DEVICE, DOMAIN_PASSWORD_SIGNIN, or DOMAIN_SMART_CARD_SIGNIN. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // Whether the action is enabled or disabled. + // Valid values are ENABLED or DISABLED. + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` +} + +type UserSettingsObservation struct { + + // Action that is enabled or disabled. + // Valid values are CLIPBOARD_COPY_FROM_LOCAL_DEVICE, CLIPBOARD_COPY_TO_LOCAL_DEVICE, FILE_UPLOAD, FILE_DOWNLOAD, PRINTING_TO_LOCAL_DEVICE, DOMAIN_PASSWORD_SIGNIN, or DOMAIN_SMART_CARD_SIGNIN. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // Whether the action is enabled or disabled. + // Valid values are ENABLED or DISABLED. + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` +} + +type UserSettingsParameters struct { + + // Action that is enabled or disabled. + // Valid values are CLIPBOARD_COPY_FROM_LOCAL_DEVICE, CLIPBOARD_COPY_TO_LOCAL_DEVICE, FILE_UPLOAD, FILE_DOWNLOAD, PRINTING_TO_LOCAL_DEVICE, DOMAIN_PASSWORD_SIGNIN, or DOMAIN_SMART_CARD_SIGNIN. + // +kubebuilder:validation:Optional + Action *string `json:"action" tf:"action,omitempty"` + + // Whether the action is enabled or disabled. + // Valid values are ENABLED or DISABLED. + // +kubebuilder:validation:Optional + Permission *string `json:"permission" tf:"permission,omitempty"` +} + +// StackSpec defines the desired state of Stack +type StackSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StackParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StackInitParameters `json:"initProvider,omitempty"` +} + +// StackStatus defines the observed state of Stack. +type StackStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StackObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Stack is the Schema for the Stacks API. Provides an AppStream stack +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Stack struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec StackSpec `json:"spec"` + Status StackStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StackList contains a list of Stacks +type StackList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Stack `json:"items"` +} + +// Repository type metadata. +var ( + Stack_Kind = "Stack" + Stack_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Stack_Kind}.String() + Stack_KindAPIVersion = Stack_Kind + "." + CRDGroupVersion.String() + Stack_GroupVersionKind = CRDGroupVersion.WithKind(Stack_Kind) +) + +func init() { + SchemeBuilder.Register(&Stack{}, &StackList{}) +} diff --git a/apis/appsync/v1beta1/zz_apicache_types.go b/apis/appsync/v1beta1/zz_apicache_types.go index e06ffcb83f..d8d10c3c45 100755 --- a/apis/appsync/v1beta1/zz_apicache_types.go +++ b/apis/appsync/v1beta1/zz_apicache_types.go @@ -19,7 +19,7 @@ type APICacheInitParameters struct { APICachingBehavior *string `json:"apiCachingBehavior,omitempty" tf:"api_caching_behavior,omitempty"` // GraphQL API ID. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appsync/v1beta1.GraphQLAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appsync/v1beta2.GraphQLAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` @@ -75,7 +75,7 @@ type APICacheParameters struct { APICachingBehavior *string `json:"apiCachingBehavior,omitempty" tf:"api_caching_behavior,omitempty"` // GraphQL API ID. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appsync/v1beta1.GraphQLAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appsync/v1beta2.GraphQLAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` diff --git a/apis/appsync/v1beta1/zz_apikey_types.go b/apis/appsync/v1beta1/zz_apikey_types.go index 02a036cf81..c1d5a636bc 100755 --- a/apis/appsync/v1beta1/zz_apikey_types.go +++ b/apis/appsync/v1beta1/zz_apikey_types.go @@ -40,7 +40,7 @@ type APIKeyObservation struct { type APIKeyParameters struct { // ID of the associated AppSync API - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appsync/v1beta1.GraphQLAPI + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appsync/v1beta2.GraphQLAPI // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` diff --git a/apis/appsync/v1beta1/zz_generated.conversion_hubs.go b/apis/appsync/v1beta1/zz_generated.conversion_hubs.go index e0477490be..7ea169e73c 100755 --- a/apis/appsync/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/appsync/v1beta1/zz_generated.conversion_hubs.go @@ -11,15 +11,3 @@ func (tr *APICache) Hub() {} // Hub marks this type as a conversion hub. func (tr *APIKey) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Datasource) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Function) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *GraphQLAPI) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Resolver) Hub() {} diff --git a/apis/appsync/v1beta1/zz_generated.conversion_spokes.go b/apis/appsync/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..ee1274b909 --- /dev/null +++ b/apis/appsync/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,94 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Datasource to the hub type. +func (tr *Datasource) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Datasource type. +func (tr *Datasource) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Function to the hub type. +func (tr *Function) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Function type. +func (tr *Function) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this GraphQLAPI to the hub type. +func (tr *GraphQLAPI) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the GraphQLAPI type. +func (tr *GraphQLAPI) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Resolver to the hub type. +func (tr *Resolver) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Resolver type. +func (tr *Resolver) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/appsync/v1beta1/zz_generated.resolvers.go b/apis/appsync/v1beta1/zz_generated.resolvers.go index 1cd705c52f..28b6b9f3c3 100644 --- a/apis/appsync/v1beta1/zz_generated.resolvers.go +++ b/apis/appsync/v1beta1/zz_generated.resolvers.go @@ -28,7 +28,7 @@ func (mg *APICache) ResolveReferences(ctx context.Context, c client.Reader) erro var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("appsync.aws.upbound.io", "v1beta1", "GraphQLAPI", "GraphQLAPIList") + m, l, err = apisresolver.GetManagedResource("appsync.aws.upbound.io", "v1beta2", "GraphQLAPI", "GraphQLAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -47,7 +47,7 @@ func (mg *APICache) ResolveReferences(ctx context.Context, c client.Reader) erro mg.Spec.ForProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.APIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("appsync.aws.upbound.io", "v1beta1", "GraphQLAPI", "GraphQLAPIList") + m, l, err = apisresolver.GetManagedResource("appsync.aws.upbound.io", "v1beta2", "GraphQLAPI", "GraphQLAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -78,7 +78,7 @@ func (mg *APIKey) ResolveReferences(ctx context.Context, c client.Reader) error var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("appsync.aws.upbound.io", "v1beta1", "GraphQLAPI", "GraphQLAPIList") + m, l, err = apisresolver.GetManagedResource("appsync.aws.upbound.io", "v1beta2", "GraphQLAPI", "GraphQLAPIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/appsync/v1beta2/zz_datasource_terraformed.go b/apis/appsync/v1beta2/zz_datasource_terraformed.go new file mode 100755 index 0000000000..c17a8b1262 --- /dev/null +++ b/apis/appsync/v1beta2/zz_datasource_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Datasource +func (mg *Datasource) GetTerraformResourceType() string { + return "aws_appsync_datasource" +} + +// GetConnectionDetailsMapping for this Datasource +func (tr *Datasource) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Datasource +func (tr *Datasource) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Datasource +func (tr *Datasource) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Datasource +func (tr *Datasource) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Datasource +func (tr *Datasource) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Datasource +func (tr *Datasource) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Datasource +func (tr *Datasource) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Datasource +func (tr *Datasource) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Datasource using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Datasource) LateInitialize(attrs []byte) (bool, error) { + params := &DatasourceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Datasource) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appsync/v1beta2/zz_datasource_types.go b/apis/appsync/v1beta2/zz_datasource_types.go new file mode 100755 index 0000000000..92d294a2ea --- /dev/null +++ b/apis/appsync/v1beta2/zz_datasource_types.go @@ -0,0 +1,607 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuthorizationConfigInitParameters struct { + + // Authorization type that the HTTP endpoint requires. Default values is AWS_IAM. + AuthorizationType *string `json:"authorizationType,omitempty" tf:"authorization_type,omitempty"` + + // Identity and Access Management (IAM) settings. See AWS IAM Config. + AwsIAMConfig *AwsIAMConfigInitParameters `json:"awsIamConfig,omitempty" tf:"aws_iam_config,omitempty"` +} + +type AuthorizationConfigObservation struct { + + // Authorization type that the HTTP endpoint requires. Default values is AWS_IAM. + AuthorizationType *string `json:"authorizationType,omitempty" tf:"authorization_type,omitempty"` + + // Identity and Access Management (IAM) settings. See AWS IAM Config. + AwsIAMConfig *AwsIAMConfigObservation `json:"awsIamConfig,omitempty" tf:"aws_iam_config,omitempty"` +} + +type AuthorizationConfigParameters struct { + + // Authorization type that the HTTP endpoint requires. Default values is AWS_IAM. + // +kubebuilder:validation:Optional + AuthorizationType *string `json:"authorizationType,omitempty" tf:"authorization_type,omitempty"` + + // Identity and Access Management (IAM) settings. See AWS IAM Config. + // +kubebuilder:validation:Optional + AwsIAMConfig *AwsIAMConfigParameters `json:"awsIamConfig,omitempty" tf:"aws_iam_config,omitempty"` +} + +type AwsIAMConfigInitParameters struct { + + // Signing Amazon Web Services Region for IAM authorization. + SigningRegion *string `json:"signingRegion,omitempty" tf:"signing_region,omitempty"` + + // Signing service name for IAM authorization. + SigningServiceName *string `json:"signingServiceName,omitempty" tf:"signing_service_name,omitempty"` +} + +type AwsIAMConfigObservation struct { + + // Signing Amazon Web Services Region for IAM authorization. + SigningRegion *string `json:"signingRegion,omitempty" tf:"signing_region,omitempty"` + + // Signing service name for IAM authorization. + SigningServiceName *string `json:"signingServiceName,omitempty" tf:"signing_service_name,omitempty"` +} + +type AwsIAMConfigParameters struct { + + // Signing Amazon Web Services Region for IAM authorization. + // +kubebuilder:validation:Optional + SigningRegion *string `json:"signingRegion,omitempty" tf:"signing_region,omitempty"` + + // Signing service name for IAM authorization. + // +kubebuilder:validation:Optional + SigningServiceName *string `json:"signingServiceName,omitempty" tf:"signing_service_name,omitempty"` +} + +type DatasourceInitParameters struct { + + // Description of the data source. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // DynamoDB settings. See DynamoDB Config + DynamodbConfig *DynamodbConfigInitParameters `json:"dynamodbConfig,omitempty" tf:"dynamodb_config,omitempty"` + + // Amazon Elasticsearch settings. See ElasticSearch Config + ElasticsearchConfig *ElasticsearchConfigInitParameters `json:"elasticsearchConfig,omitempty" tf:"elasticsearch_config,omitempty"` + + // AWS EventBridge settings. See Event Bridge Config + EventBridgeConfig *EventBridgeConfigInitParameters `json:"eventBridgeConfig,omitempty" tf:"event_bridge_config,omitempty"` + + // HTTP settings. See HTTP Config + HTTPConfig *HTTPConfigInitParameters `json:"httpConfig,omitempty" tf:"http_config,omitempty"` + + // AWS Lambda settings. See Lambda Config + LambdaConfig *LambdaConfigInitParameters `json:"lambdaConfig,omitempty" tf:"lambda_config,omitempty"` + + // Amazon OpenSearch Service settings. See OpenSearch Service Config + OpensearchserviceConfig *OpensearchserviceConfigInitParameters `json:"opensearchserviceConfig,omitempty" tf:"opensearchservice_config,omitempty"` + + // AWS RDS settings. See Relational Database Config + RelationalDatabaseConfig *RelationalDatabaseConfigInitParameters `json:"relationalDatabaseConfig,omitempty" tf:"relational_database_config,omitempty"` + + // IAM service role ARN for the data source. Required if type is specified as AWS_LAMBDA, AMAZON_DYNAMODB, AMAZON_ELASTICSEARCH, AMAZON_EVENTBRIDGE, or AMAZON_OPENSEARCH_SERVICE. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + ServiceRoleArn *string `json:"serviceRoleArn,omitempty" tf:"service_role_arn,omitempty"` + + // Reference to a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnRef *v1.Reference `json:"serviceRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnSelector *v1.Selector `json:"serviceRoleArnSelector,omitempty" tf:"-"` + + // Type of the Data Source. Valid values: AWS_LAMBDA, AMAZON_DYNAMODB, AMAZON_ELASTICSEARCH, HTTP, NONE, RELATIONAL_DATABASE, AMAZON_EVENTBRIDGE, AMAZON_OPENSEARCH_SERVICE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DatasourceObservation struct { + + // API ID for the GraphQL API for the data source. + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // ARN + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Description of the data source. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // DynamoDB settings. See DynamoDB Config + DynamodbConfig *DynamodbConfigObservation `json:"dynamodbConfig,omitempty" tf:"dynamodb_config,omitempty"` + + // Amazon Elasticsearch settings. See ElasticSearch Config + ElasticsearchConfig *ElasticsearchConfigObservation `json:"elasticsearchConfig,omitempty" tf:"elasticsearch_config,omitempty"` + + // AWS EventBridge settings. See Event Bridge Config + EventBridgeConfig *EventBridgeConfigObservation `json:"eventBridgeConfig,omitempty" tf:"event_bridge_config,omitempty"` + + // HTTP settings. See HTTP Config + HTTPConfig *HTTPConfigObservation `json:"httpConfig,omitempty" tf:"http_config,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // AWS Lambda settings. See Lambda Config + LambdaConfig *LambdaConfigObservation `json:"lambdaConfig,omitempty" tf:"lambda_config,omitempty"` + + // Amazon OpenSearch Service settings. See OpenSearch Service Config + OpensearchserviceConfig *OpensearchserviceConfigObservation `json:"opensearchserviceConfig,omitempty" tf:"opensearchservice_config,omitempty"` + + // AWS RDS settings. See Relational Database Config + RelationalDatabaseConfig *RelationalDatabaseConfigObservation `json:"relationalDatabaseConfig,omitempty" tf:"relational_database_config,omitempty"` + + // IAM service role ARN for the data source. Required if type is specified as AWS_LAMBDA, AMAZON_DYNAMODB, AMAZON_ELASTICSEARCH, AMAZON_EVENTBRIDGE, or AMAZON_OPENSEARCH_SERVICE. + ServiceRoleArn *string `json:"serviceRoleArn,omitempty" tf:"service_role_arn,omitempty"` + + // Type of the Data Source. Valid values: AWS_LAMBDA, AMAZON_DYNAMODB, AMAZON_ELASTICSEARCH, HTTP, NONE, RELATIONAL_DATABASE, AMAZON_EVENTBRIDGE, AMAZON_OPENSEARCH_SERVICE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DatasourceParameters struct { + + // API ID for the GraphQL API for the data source. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appsync/v1beta2.GraphQLAPI + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // Reference to a GraphQLAPI in appsync to populate apiId. + // +kubebuilder:validation:Optional + APIIDRef *v1.Reference `json:"apiIdRef,omitempty" tf:"-"` + + // Selector for a GraphQLAPI in appsync to populate apiId. + // +kubebuilder:validation:Optional + APIIDSelector *v1.Selector `json:"apiIdSelector,omitempty" tf:"-"` + + // Description of the data source. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // DynamoDB settings. See DynamoDB Config + // +kubebuilder:validation:Optional + DynamodbConfig *DynamodbConfigParameters `json:"dynamodbConfig,omitempty" tf:"dynamodb_config,omitempty"` + + // Amazon Elasticsearch settings. See ElasticSearch Config + // +kubebuilder:validation:Optional + ElasticsearchConfig *ElasticsearchConfigParameters `json:"elasticsearchConfig,omitempty" tf:"elasticsearch_config,omitempty"` + + // AWS EventBridge settings. See Event Bridge Config + // +kubebuilder:validation:Optional + EventBridgeConfig *EventBridgeConfigParameters `json:"eventBridgeConfig,omitempty" tf:"event_bridge_config,omitempty"` + + // HTTP settings. See HTTP Config + // +kubebuilder:validation:Optional + HTTPConfig *HTTPConfigParameters `json:"httpConfig,omitempty" tf:"http_config,omitempty"` + + // AWS Lambda settings. See Lambda Config + // +kubebuilder:validation:Optional + LambdaConfig *LambdaConfigParameters `json:"lambdaConfig,omitempty" tf:"lambda_config,omitempty"` + + // Amazon OpenSearch Service settings. See OpenSearch Service Config + // +kubebuilder:validation:Optional + OpensearchserviceConfig *OpensearchserviceConfigParameters `json:"opensearchserviceConfig,omitempty" tf:"opensearchservice_config,omitempty"` + + // AWS region of the DynamoDB table. Defaults to current region. + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // AWS RDS settings. See Relational Database Config + // +kubebuilder:validation:Optional + RelationalDatabaseConfig *RelationalDatabaseConfigParameters `json:"relationalDatabaseConfig,omitempty" tf:"relational_database_config,omitempty"` + + // IAM service role ARN for the data source. Required if type is specified as AWS_LAMBDA, AMAZON_DYNAMODB, AMAZON_ELASTICSEARCH, AMAZON_EVENTBRIDGE, or AMAZON_OPENSEARCH_SERVICE. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + ServiceRoleArn *string `json:"serviceRoleArn,omitempty" tf:"service_role_arn,omitempty"` + + // Reference to a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnRef *v1.Reference `json:"serviceRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnSelector *v1.Selector `json:"serviceRoleArnSelector,omitempty" tf:"-"` + + // Type of the Data Source. Valid values: AWS_LAMBDA, AMAZON_DYNAMODB, AMAZON_ELASTICSEARCH, HTTP, NONE, RELATIONAL_DATABASE, AMAZON_EVENTBRIDGE, AMAZON_OPENSEARCH_SERVICE. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DeltaSyncConfigInitParameters struct { + + // The number of minutes that an Item is stored in the data source. + BaseTableTTL *float64 `json:"baseTableTtl,omitempty" tf:"base_table_ttl,omitempty"` + + // The table name. + DeltaSyncTableName *string `json:"deltaSyncTableName,omitempty" tf:"delta_sync_table_name,omitempty"` + + // The number of minutes that a Delta Sync log entry is stored in the Delta Sync table. + DeltaSyncTableTTL *float64 `json:"deltaSyncTableTtl,omitempty" tf:"delta_sync_table_ttl,omitempty"` +} + +type DeltaSyncConfigObservation struct { + + // The number of minutes that an Item is stored in the data source. + BaseTableTTL *float64 `json:"baseTableTtl,omitempty" tf:"base_table_ttl,omitempty"` + + // The table name. + DeltaSyncTableName *string `json:"deltaSyncTableName,omitempty" tf:"delta_sync_table_name,omitempty"` + + // The number of minutes that a Delta Sync log entry is stored in the Delta Sync table. + DeltaSyncTableTTL *float64 `json:"deltaSyncTableTtl,omitempty" tf:"delta_sync_table_ttl,omitempty"` +} + +type DeltaSyncConfigParameters struct { + + // The number of minutes that an Item is stored in the data source. + // +kubebuilder:validation:Optional + BaseTableTTL *float64 `json:"baseTableTtl,omitempty" tf:"base_table_ttl,omitempty"` + + // The table name. + // +kubebuilder:validation:Optional + DeltaSyncTableName *string `json:"deltaSyncTableName" tf:"delta_sync_table_name,omitempty"` + + // The number of minutes that a Delta Sync log entry is stored in the Delta Sync table. + // +kubebuilder:validation:Optional + DeltaSyncTableTTL *float64 `json:"deltaSyncTableTtl,omitempty" tf:"delta_sync_table_ttl,omitempty"` +} + +type DynamodbConfigInitParameters struct { + + // The DeltaSyncConfig for a versioned data source. See Delta Sync Config + DeltaSyncConfig *DeltaSyncConfigInitParameters `json:"deltaSyncConfig,omitempty" tf:"delta_sync_config,omitempty"` + + // Name of the DynamoDB table. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta2.Table + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` + + // Reference to a Table in dynamodb to populate tableName. + // +kubebuilder:validation:Optional + TableNameRef *v1.Reference `json:"tableNameRef,omitempty" tf:"-"` + + // Selector for a Table in dynamodb to populate tableName. + // +kubebuilder:validation:Optional + TableNameSelector *v1.Selector `json:"tableNameSelector,omitempty" tf:"-"` + + // Set to true to use Amazon Cognito credentials with this data source. + UseCallerCredentials *bool `json:"useCallerCredentials,omitempty" tf:"use_caller_credentials,omitempty"` + + // Detects Conflict Detection and Resolution with this data source. + Versioned *bool `json:"versioned,omitempty" tf:"versioned,omitempty"` +} + +type DynamodbConfigObservation struct { + + // The DeltaSyncConfig for a versioned data source. See Delta Sync Config + DeltaSyncConfig *DeltaSyncConfigObservation `json:"deltaSyncConfig,omitempty" tf:"delta_sync_config,omitempty"` + + // AWS region of the DynamoDB table. Defaults to current region. + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // Name of the DynamoDB table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` + + // Set to true to use Amazon Cognito credentials with this data source. + UseCallerCredentials *bool `json:"useCallerCredentials,omitempty" tf:"use_caller_credentials,omitempty"` + + // Detects Conflict Detection and Resolution with this data source. + Versioned *bool `json:"versioned,omitempty" tf:"versioned,omitempty"` +} + +type DynamodbConfigParameters struct { + + // The DeltaSyncConfig for a versioned data source. See Delta Sync Config + // +kubebuilder:validation:Optional + DeltaSyncConfig *DeltaSyncConfigParameters `json:"deltaSyncConfig,omitempty" tf:"delta_sync_config,omitempty"` + + // AWS region of the DynamoDB table. Defaults to current region. + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // Name of the DynamoDB table. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta2.Table + // +kubebuilder:validation:Optional + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` + + // Reference to a Table in dynamodb to populate tableName. + // +kubebuilder:validation:Optional + TableNameRef *v1.Reference `json:"tableNameRef,omitempty" tf:"-"` + + // Selector for a Table in dynamodb to populate tableName. + // +kubebuilder:validation:Optional + TableNameSelector *v1.Selector `json:"tableNameSelector,omitempty" tf:"-"` + + // Set to true to use Amazon Cognito credentials with this data source. + // +kubebuilder:validation:Optional + UseCallerCredentials *bool `json:"useCallerCredentials,omitempty" tf:"use_caller_credentials,omitempty"` + + // Detects Conflict Detection and Resolution with this data source. + // +kubebuilder:validation:Optional + Versioned *bool `json:"versioned,omitempty" tf:"versioned,omitempty"` +} + +type ElasticsearchConfigInitParameters struct { + + // HTTP endpoint of the Elasticsearch domain. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` +} + +type ElasticsearchConfigObservation struct { + + // HTTP endpoint of the Elasticsearch domain. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // AWS region of the DynamoDB table. Defaults to current region. + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +type ElasticsearchConfigParameters struct { + + // HTTP endpoint of the Elasticsearch domain. + // +kubebuilder:validation:Optional + Endpoint *string `json:"endpoint" tf:"endpoint,omitempty"` + + // AWS region of the DynamoDB table. Defaults to current region. + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +type EventBridgeConfigInitParameters struct { + + // ARN for the EventBridge bus. + EventBusArn *string `json:"eventBusArn,omitempty" tf:"event_bus_arn,omitempty"` +} + +type EventBridgeConfigObservation struct { + + // ARN for the EventBridge bus. + EventBusArn *string `json:"eventBusArn,omitempty" tf:"event_bus_arn,omitempty"` +} + +type EventBridgeConfigParameters struct { + + // ARN for the EventBridge bus. + // +kubebuilder:validation:Optional + EventBusArn *string `json:"eventBusArn" tf:"event_bus_arn,omitempty"` +} + +type HTTPConfigInitParameters struct { + + // Authorization configuration in case the HTTP endpoint requires authorization. See Authorization Config. + AuthorizationConfig *AuthorizationConfigInitParameters `json:"authorizationConfig,omitempty" tf:"authorization_config,omitempty"` + + // HTTP endpoint of the Elasticsearch domain. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` +} + +type HTTPConfigObservation struct { + + // Authorization configuration in case the HTTP endpoint requires authorization. See Authorization Config. + AuthorizationConfig *AuthorizationConfigObservation `json:"authorizationConfig,omitempty" tf:"authorization_config,omitempty"` + + // HTTP endpoint of the Elasticsearch domain. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` +} + +type HTTPConfigParameters struct { + + // Authorization configuration in case the HTTP endpoint requires authorization. See Authorization Config. + // +kubebuilder:validation:Optional + AuthorizationConfig *AuthorizationConfigParameters `json:"authorizationConfig,omitempty" tf:"authorization_config,omitempty"` + + // HTTP endpoint of the Elasticsearch domain. + // +kubebuilder:validation:Optional + Endpoint *string `json:"endpoint" tf:"endpoint,omitempty"` +} + +type HTTPEndpointConfigInitParameters struct { + + // AWS secret store ARN for database credentials. + AwsSecretStoreArn *string `json:"awsSecretStoreArn,omitempty" tf:"aws_secret_store_arn,omitempty"` + + // Amazon RDS cluster identifier. + DBClusterIdentifier *string `json:"dbClusterIdentifier,omitempty" tf:"db_cluster_identifier,omitempty"` + + // Logical database name. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Logical schema name. + Schema *string `json:"schema,omitempty" tf:"schema,omitempty"` +} + +type HTTPEndpointConfigObservation struct { + + // AWS secret store ARN for database credentials. + AwsSecretStoreArn *string `json:"awsSecretStoreArn,omitempty" tf:"aws_secret_store_arn,omitempty"` + + // Amazon RDS cluster identifier. + DBClusterIdentifier *string `json:"dbClusterIdentifier,omitempty" tf:"db_cluster_identifier,omitempty"` + + // Logical database name. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // AWS region of the DynamoDB table. Defaults to current region. + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // Logical schema name. + Schema *string `json:"schema,omitempty" tf:"schema,omitempty"` +} + +type HTTPEndpointConfigParameters struct { + + // AWS secret store ARN for database credentials. + // +kubebuilder:validation:Optional + AwsSecretStoreArn *string `json:"awsSecretStoreArn" tf:"aws_secret_store_arn,omitempty"` + + // Amazon RDS cluster identifier. + // +kubebuilder:validation:Optional + DBClusterIdentifier *string `json:"dbClusterIdentifier" tf:"db_cluster_identifier,omitempty"` + + // Logical database name. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // AWS region of the DynamoDB table. Defaults to current region. + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // Logical schema name. + // +kubebuilder:validation:Optional + Schema *string `json:"schema,omitempty" tf:"schema,omitempty"` +} + +type LambdaConfigInitParameters struct { + + // ARN for the Lambda function. + FunctionArn *string `json:"functionArn,omitempty" tf:"function_arn,omitempty"` +} + +type LambdaConfigObservation struct { + + // ARN for the Lambda function. + FunctionArn *string `json:"functionArn,omitempty" tf:"function_arn,omitempty"` +} + +type LambdaConfigParameters struct { + + // ARN for the Lambda function. + // +kubebuilder:validation:Optional + FunctionArn *string `json:"functionArn" tf:"function_arn,omitempty"` +} + +type OpensearchserviceConfigInitParameters struct { + + // HTTP endpoint of the Elasticsearch domain. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` +} + +type OpensearchserviceConfigObservation struct { + + // HTTP endpoint of the Elasticsearch domain. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // AWS region of the DynamoDB table. Defaults to current region. + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +type OpensearchserviceConfigParameters struct { + + // HTTP endpoint of the Elasticsearch domain. + // +kubebuilder:validation:Optional + Endpoint *string `json:"endpoint" tf:"endpoint,omitempty"` + + // AWS region of the DynamoDB table. Defaults to current region. + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +type RelationalDatabaseConfigInitParameters struct { + + // Amazon RDS HTTP endpoint configuration. See HTTP Endpoint Config. + HTTPEndpointConfig *HTTPEndpointConfigInitParameters `json:"httpEndpointConfig,omitempty" tf:"http_endpoint_config,omitempty"` + + // Source type for the relational database. Valid values: RDS_HTTP_ENDPOINT. + SourceType *string `json:"sourceType,omitempty" tf:"source_type,omitempty"` +} + +type RelationalDatabaseConfigObservation struct { + + // Amazon RDS HTTP endpoint configuration. See HTTP Endpoint Config. + HTTPEndpointConfig *HTTPEndpointConfigObservation `json:"httpEndpointConfig,omitempty" tf:"http_endpoint_config,omitempty"` + + // Source type for the relational database. Valid values: RDS_HTTP_ENDPOINT. + SourceType *string `json:"sourceType,omitempty" tf:"source_type,omitempty"` +} + +type RelationalDatabaseConfigParameters struct { + + // Amazon RDS HTTP endpoint configuration. See HTTP Endpoint Config. + // +kubebuilder:validation:Optional + HTTPEndpointConfig *HTTPEndpointConfigParameters `json:"httpEndpointConfig,omitempty" tf:"http_endpoint_config,omitempty"` + + // Source type for the relational database. Valid values: RDS_HTTP_ENDPOINT. + // +kubebuilder:validation:Optional + SourceType *string `json:"sourceType,omitempty" tf:"source_type,omitempty"` +} + +// DatasourceSpec defines the desired state of Datasource +type DatasourceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DatasourceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DatasourceInitParameters `json:"initProvider,omitempty"` +} + +// DatasourceStatus defines the observed state of Datasource. +type DatasourceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DatasourceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Datasource is the Schema for the Datasources API. Provides an AppSync Data Source. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Datasource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + Spec DatasourceSpec `json:"spec"` + Status DatasourceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DatasourceList contains a list of Datasources +type DatasourceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Datasource `json:"items"` +} + +// Repository type metadata. +var ( + Datasource_Kind = "Datasource" + Datasource_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Datasource_Kind}.String() + Datasource_KindAPIVersion = Datasource_Kind + "." + CRDGroupVersion.String() + Datasource_GroupVersionKind = CRDGroupVersion.WithKind(Datasource_Kind) +) + +func init() { + SchemeBuilder.Register(&Datasource{}, &DatasourceList{}) +} diff --git a/apis/appsync/v1beta2/zz_function_terraformed.go b/apis/appsync/v1beta2/zz_function_terraformed.go new file mode 100755 index 0000000000..863766860d --- /dev/null +++ b/apis/appsync/v1beta2/zz_function_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Function +func (mg *Function) GetTerraformResourceType() string { + return "aws_appsync_function" +} + +// GetConnectionDetailsMapping for this Function +func (tr *Function) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Function +func (tr *Function) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Function +func (tr *Function) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Function +func (tr *Function) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Function +func (tr *Function) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Function +func (tr *Function) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Function +func (tr *Function) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Function +func (tr *Function) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Function using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Function) LateInitialize(attrs []byte) (bool, error) { + params := &FunctionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Function) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appsync/v1beta2/zz_function_types.go b/apis/appsync/v1beta2/zz_function_types.go new file mode 100755 index 0000000000..92da675d3c --- /dev/null +++ b/apis/appsync/v1beta2/zz_function_types.go @@ -0,0 +1,333 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type FunctionInitParameters struct { + + // ID of the associated AppSync API. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appsync/v1beta2.GraphQLAPI + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // Reference to a GraphQLAPI in appsync to populate apiId. + // +kubebuilder:validation:Optional + APIIDRef *v1.Reference `json:"apiIdRef,omitempty" tf:"-"` + + // Selector for a GraphQLAPI in appsync to populate apiId. + // +kubebuilder:validation:Optional + APIIDSelector *v1.Selector `json:"apiIdSelector,omitempty" tf:"-"` + + // The function code that contains the request and response functions. When code is used, the runtime is required. The runtime value must be APPSYNC_JS. + Code *string `json:"code,omitempty" tf:"code,omitempty"` + + // Function data source name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appsync/v1beta2.Datasource + DataSource *string `json:"dataSource,omitempty" tf:"data_source,omitempty"` + + // Reference to a Datasource in appsync to populate dataSource. + // +kubebuilder:validation:Optional + DataSourceRef *v1.Reference `json:"dataSourceRef,omitempty" tf:"-"` + + // Selector for a Datasource in appsync to populate dataSource. + // +kubebuilder:validation:Optional + DataSourceSelector *v1.Selector `json:"dataSourceSelector,omitempty" tf:"-"` + + // Function description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Version of the request mapping template. Currently the supported value is 2018-05-29. Does not apply when specifying code. + FunctionVersion *string `json:"functionVersion,omitempty" tf:"function_version,omitempty"` + + // Maximum batching size for a resolver. Valid values are between 0 and 2000. + MaxBatchSize *float64 `json:"maxBatchSize,omitempty" tf:"max_batch_size,omitempty"` + + // Function name. The function name does not have to be unique. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Function request mapping template. Functions support only the 2018-05-29 version of the request mapping template. + RequestMappingTemplate *string `json:"requestMappingTemplate,omitempty" tf:"request_mapping_template,omitempty"` + + // Function response mapping template. + ResponseMappingTemplate *string `json:"responseMappingTemplate,omitempty" tf:"response_mapping_template,omitempty"` + + // Describes a runtime used by an AWS AppSync pipeline resolver or AWS AppSync function. Specifies the name and version of the runtime to use. Note that if a runtime is specified, code must also be specified. See Runtime. + Runtime *RuntimeInitParameters `json:"runtime,omitempty" tf:"runtime,omitempty"` + + // Describes a Sync configuration for a resolver. See Sync Config. + SyncConfig *SyncConfigInitParameters `json:"syncConfig,omitempty" tf:"sync_config,omitempty"` +} + +type FunctionObservation struct { + + // ID of the associated AppSync API. + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // ARN of the Function object. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The function code that contains the request and response functions. When code is used, the runtime is required. The runtime value must be APPSYNC_JS. + Code *string `json:"code,omitempty" tf:"code,omitempty"` + + // Function data source name. + DataSource *string `json:"dataSource,omitempty" tf:"data_source,omitempty"` + + // Function description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Unique ID representing the Function object. + FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` + + // Version of the request mapping template. Currently the supported value is 2018-05-29. Does not apply when specifying code. + FunctionVersion *string `json:"functionVersion,omitempty" tf:"function_version,omitempty"` + + // API Function ID (Formatted as ApiId-FunctionId) + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Maximum batching size for a resolver. Valid values are between 0 and 2000. + MaxBatchSize *float64 `json:"maxBatchSize,omitempty" tf:"max_batch_size,omitempty"` + + // Function name. The function name does not have to be unique. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Function request mapping template. Functions support only the 2018-05-29 version of the request mapping template. + RequestMappingTemplate *string `json:"requestMappingTemplate,omitempty" tf:"request_mapping_template,omitempty"` + + // Function response mapping template. + ResponseMappingTemplate *string `json:"responseMappingTemplate,omitempty" tf:"response_mapping_template,omitempty"` + + // Describes a runtime used by an AWS AppSync pipeline resolver or AWS AppSync function. Specifies the name and version of the runtime to use. Note that if a runtime is specified, code must also be specified. See Runtime. + Runtime *RuntimeObservation `json:"runtime,omitempty" tf:"runtime,omitempty"` + + // Describes a Sync configuration for a resolver. See Sync Config. + SyncConfig *SyncConfigObservation `json:"syncConfig,omitempty" tf:"sync_config,omitempty"` +} + +type FunctionParameters struct { + + // ID of the associated AppSync API. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appsync/v1beta2.GraphQLAPI + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // Reference to a GraphQLAPI in appsync to populate apiId. + // +kubebuilder:validation:Optional + APIIDRef *v1.Reference `json:"apiIdRef,omitempty" tf:"-"` + + // Selector for a GraphQLAPI in appsync to populate apiId. + // +kubebuilder:validation:Optional + APIIDSelector *v1.Selector `json:"apiIdSelector,omitempty" tf:"-"` + + // The function code that contains the request and response functions. When code is used, the runtime is required. The runtime value must be APPSYNC_JS. + // +kubebuilder:validation:Optional + Code *string `json:"code,omitempty" tf:"code,omitempty"` + + // Function data source name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appsync/v1beta2.Datasource + // +kubebuilder:validation:Optional + DataSource *string `json:"dataSource,omitempty" tf:"data_source,omitempty"` + + // Reference to a Datasource in appsync to populate dataSource. + // +kubebuilder:validation:Optional + DataSourceRef *v1.Reference `json:"dataSourceRef,omitempty" tf:"-"` + + // Selector for a Datasource in appsync to populate dataSource. + // +kubebuilder:validation:Optional + DataSourceSelector *v1.Selector `json:"dataSourceSelector,omitempty" tf:"-"` + + // Function description. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Version of the request mapping template. Currently the supported value is 2018-05-29. Does not apply when specifying code. + // +kubebuilder:validation:Optional + FunctionVersion *string `json:"functionVersion,omitempty" tf:"function_version,omitempty"` + + // Maximum batching size for a resolver. Valid values are between 0 and 2000. + // +kubebuilder:validation:Optional + MaxBatchSize *float64 `json:"maxBatchSize,omitempty" tf:"max_batch_size,omitempty"` + + // Function name. The function name does not have to be unique. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Function request mapping template. Functions support only the 2018-05-29 version of the request mapping template. + // +kubebuilder:validation:Optional + RequestMappingTemplate *string `json:"requestMappingTemplate,omitempty" tf:"request_mapping_template,omitempty"` + + // Function response mapping template. + // +kubebuilder:validation:Optional + ResponseMappingTemplate *string `json:"responseMappingTemplate,omitempty" tf:"response_mapping_template,omitempty"` + + // Describes a runtime used by an AWS AppSync pipeline resolver or AWS AppSync function. Specifies the name and version of the runtime to use. Note that if a runtime is specified, code must also be specified. See Runtime. + // +kubebuilder:validation:Optional + Runtime *RuntimeParameters `json:"runtime,omitempty" tf:"runtime,omitempty"` + + // Describes a Sync configuration for a resolver. See Sync Config. + // +kubebuilder:validation:Optional + SyncConfig *SyncConfigParameters `json:"syncConfig,omitempty" tf:"sync_config,omitempty"` +} + +type LambdaConflictHandlerConfigInitParameters struct { + + // ARN for the Lambda function to use as the Conflict Handler. + LambdaConflictHandlerArn *string `json:"lambdaConflictHandlerArn,omitempty" tf:"lambda_conflict_handler_arn,omitempty"` +} + +type LambdaConflictHandlerConfigObservation struct { + + // ARN for the Lambda function to use as the Conflict Handler. + LambdaConflictHandlerArn *string `json:"lambdaConflictHandlerArn,omitempty" tf:"lambda_conflict_handler_arn,omitempty"` +} + +type LambdaConflictHandlerConfigParameters struct { + + // ARN for the Lambda function to use as the Conflict Handler. + // +kubebuilder:validation:Optional + LambdaConflictHandlerArn *string `json:"lambdaConflictHandlerArn,omitempty" tf:"lambda_conflict_handler_arn,omitempty"` +} + +type RuntimeInitParameters struct { + + // Function name. The function name does not have to be unique. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The version of the runtime to use. Currently, the only allowed version is 1.0.0. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` +} + +type RuntimeObservation struct { + + // Function name. The function name does not have to be unique. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The version of the runtime to use. Currently, the only allowed version is 1.0.0. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` +} + +type RuntimeParameters struct { + + // Function name. The function name does not have to be unique. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The version of the runtime to use. Currently, the only allowed version is 1.0.0. + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion" tf:"runtime_version,omitempty"` +} + +type SyncConfigInitParameters struct { + + // Conflict Detection strategy to use. Valid values are NONE and VERSION. + ConflictDetection *string `json:"conflictDetection,omitempty" tf:"conflict_detection,omitempty"` + + // Conflict Resolution strategy to perform in the event of a conflict. Valid values are NONE, OPTIMISTIC_CONCURRENCY, AUTOMERGE, and LAMBDA. + ConflictHandler *string `json:"conflictHandler,omitempty" tf:"conflict_handler,omitempty"` + + // Lambda Conflict Handler Config when configuring LAMBDA as the Conflict Handler. See Lambda Conflict Handler Config. + LambdaConflictHandlerConfig *LambdaConflictHandlerConfigInitParameters `json:"lambdaConflictHandlerConfig,omitempty" tf:"lambda_conflict_handler_config,omitempty"` +} + +type SyncConfigObservation struct { + + // Conflict Detection strategy to use. Valid values are NONE and VERSION. + ConflictDetection *string `json:"conflictDetection,omitempty" tf:"conflict_detection,omitempty"` + + // Conflict Resolution strategy to perform in the event of a conflict. Valid values are NONE, OPTIMISTIC_CONCURRENCY, AUTOMERGE, and LAMBDA. + ConflictHandler *string `json:"conflictHandler,omitempty" tf:"conflict_handler,omitempty"` + + // Lambda Conflict Handler Config when configuring LAMBDA as the Conflict Handler. See Lambda Conflict Handler Config. + LambdaConflictHandlerConfig *LambdaConflictHandlerConfigObservation `json:"lambdaConflictHandlerConfig,omitempty" tf:"lambda_conflict_handler_config,omitempty"` +} + +type SyncConfigParameters struct { + + // Conflict Detection strategy to use. Valid values are NONE and VERSION. + // +kubebuilder:validation:Optional + ConflictDetection *string `json:"conflictDetection,omitempty" tf:"conflict_detection,omitempty"` + + // Conflict Resolution strategy to perform in the event of a conflict. Valid values are NONE, OPTIMISTIC_CONCURRENCY, AUTOMERGE, and LAMBDA. + // +kubebuilder:validation:Optional + ConflictHandler *string `json:"conflictHandler,omitempty" tf:"conflict_handler,omitempty"` + + // Lambda Conflict Handler Config when configuring LAMBDA as the Conflict Handler. See Lambda Conflict Handler Config. + // +kubebuilder:validation:Optional + LambdaConflictHandlerConfig *LambdaConflictHandlerConfigParameters `json:"lambdaConflictHandlerConfig,omitempty" tf:"lambda_conflict_handler_config,omitempty"` +} + +// FunctionSpec defines the desired state of Function +type FunctionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FunctionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FunctionInitParameters `json:"initProvider,omitempty"` +} + +// FunctionStatus defines the observed state of Function. +type FunctionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FunctionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Function is the Schema for the Functions API. Provides an AppSync Function. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Function struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec FunctionSpec `json:"spec"` + Status FunctionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FunctionList contains a list of Functions +type FunctionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Function `json:"items"` +} + +// Repository type metadata. +var ( + Function_Kind = "Function" + Function_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Function_Kind}.String() + Function_KindAPIVersion = Function_Kind + "." + CRDGroupVersion.String() + Function_GroupVersionKind = CRDGroupVersion.WithKind(Function_Kind) +) + +func init() { + SchemeBuilder.Register(&Function{}, &FunctionList{}) +} diff --git a/apis/appsync/v1beta2/zz_generated.conversion_hubs.go b/apis/appsync/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..914d9bb684 --- /dev/null +++ b/apis/appsync/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,19 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Datasource) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Function) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *GraphQLAPI) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Resolver) Hub() {} diff --git a/apis/appsync/v1beta2/zz_generated.deepcopy.go b/apis/appsync/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..ef7c2a592d --- /dev/null +++ b/apis/appsync/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,3862 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalAuthenticationProviderInitParameters) DeepCopyInto(out *AdditionalAuthenticationProviderInitParameters) { + *out = *in + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } + if in.LambdaAuthorizerConfig != nil { + in, out := &in.LambdaAuthorizerConfig, &out.LambdaAuthorizerConfig + *out = new(LambdaAuthorizerConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OpenIDConnectConfig != nil { + in, out := &in.OpenIDConnectConfig, &out.OpenIDConnectConfig + *out = new(OpenIDConnectConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UserPoolConfig != nil { + in, out := &in.UserPoolConfig, &out.UserPoolConfig + *out = new(UserPoolConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalAuthenticationProviderInitParameters. +func (in *AdditionalAuthenticationProviderInitParameters) DeepCopy() *AdditionalAuthenticationProviderInitParameters { + if in == nil { + return nil + } + out := new(AdditionalAuthenticationProviderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalAuthenticationProviderObservation) DeepCopyInto(out *AdditionalAuthenticationProviderObservation) { + *out = *in + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } + if in.LambdaAuthorizerConfig != nil { + in, out := &in.LambdaAuthorizerConfig, &out.LambdaAuthorizerConfig + *out = new(LambdaAuthorizerConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.OpenIDConnectConfig != nil { + in, out := &in.OpenIDConnectConfig, &out.OpenIDConnectConfig + *out = new(OpenIDConnectConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.UserPoolConfig != nil { + in, out := &in.UserPoolConfig, &out.UserPoolConfig + *out = new(UserPoolConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalAuthenticationProviderObservation. +func (in *AdditionalAuthenticationProviderObservation) DeepCopy() *AdditionalAuthenticationProviderObservation { + if in == nil { + return nil + } + out := new(AdditionalAuthenticationProviderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalAuthenticationProviderParameters) DeepCopyInto(out *AdditionalAuthenticationProviderParameters) { + *out = *in + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } + if in.LambdaAuthorizerConfig != nil { + in, out := &in.LambdaAuthorizerConfig, &out.LambdaAuthorizerConfig + *out = new(LambdaAuthorizerConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.OpenIDConnectConfig != nil { + in, out := &in.OpenIDConnectConfig, &out.OpenIDConnectConfig + *out = new(OpenIDConnectConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.UserPoolConfig != nil { + in, out := &in.UserPoolConfig, &out.UserPoolConfig + *out = new(UserPoolConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalAuthenticationProviderParameters. +func (in *AdditionalAuthenticationProviderParameters) DeepCopy() *AdditionalAuthenticationProviderParameters { + if in == nil { + return nil + } + out := new(AdditionalAuthenticationProviderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationConfigInitParameters) DeepCopyInto(out *AuthorizationConfigInitParameters) { + *out = *in + if in.AuthorizationType != nil { + in, out := &in.AuthorizationType, &out.AuthorizationType + *out = new(string) + **out = **in + } + if in.AwsIAMConfig != nil { + in, out := &in.AwsIAMConfig, &out.AwsIAMConfig + *out = new(AwsIAMConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfigInitParameters. +func (in *AuthorizationConfigInitParameters) DeepCopy() *AuthorizationConfigInitParameters { + if in == nil { + return nil + } + out := new(AuthorizationConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationConfigObservation) DeepCopyInto(out *AuthorizationConfigObservation) { + *out = *in + if in.AuthorizationType != nil { + in, out := &in.AuthorizationType, &out.AuthorizationType + *out = new(string) + **out = **in + } + if in.AwsIAMConfig != nil { + in, out := &in.AwsIAMConfig, &out.AwsIAMConfig + *out = new(AwsIAMConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfigObservation. +func (in *AuthorizationConfigObservation) DeepCopy() *AuthorizationConfigObservation { + if in == nil { + return nil + } + out := new(AuthorizationConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationConfigParameters) DeepCopyInto(out *AuthorizationConfigParameters) { + *out = *in + if in.AuthorizationType != nil { + in, out := &in.AuthorizationType, &out.AuthorizationType + *out = new(string) + **out = **in + } + if in.AwsIAMConfig != nil { + in, out := &in.AwsIAMConfig, &out.AwsIAMConfig + *out = new(AwsIAMConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfigParameters. +func (in *AuthorizationConfigParameters) DeepCopy() *AuthorizationConfigParameters { + if in == nil { + return nil + } + out := new(AuthorizationConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsIAMConfigInitParameters) DeepCopyInto(out *AwsIAMConfigInitParameters) { + *out = *in + if in.SigningRegion != nil { + in, out := &in.SigningRegion, &out.SigningRegion + *out = new(string) + **out = **in + } + if in.SigningServiceName != nil { + in, out := &in.SigningServiceName, &out.SigningServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsIAMConfigInitParameters. +func (in *AwsIAMConfigInitParameters) DeepCopy() *AwsIAMConfigInitParameters { + if in == nil { + return nil + } + out := new(AwsIAMConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsIAMConfigObservation) DeepCopyInto(out *AwsIAMConfigObservation) { + *out = *in + if in.SigningRegion != nil { + in, out := &in.SigningRegion, &out.SigningRegion + *out = new(string) + **out = **in + } + if in.SigningServiceName != nil { + in, out := &in.SigningServiceName, &out.SigningServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsIAMConfigObservation. +func (in *AwsIAMConfigObservation) DeepCopy() *AwsIAMConfigObservation { + if in == nil { + return nil + } + out := new(AwsIAMConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsIAMConfigParameters) DeepCopyInto(out *AwsIAMConfigParameters) { + *out = *in + if in.SigningRegion != nil { + in, out := &in.SigningRegion, &out.SigningRegion + *out = new(string) + **out = **in + } + if in.SigningServiceName != nil { + in, out := &in.SigningServiceName, &out.SigningServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsIAMConfigParameters. +func (in *AwsIAMConfigParameters) DeepCopy() *AwsIAMConfigParameters { + if in == nil { + return nil + } + out := new(AwsIAMConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CachingConfigInitParameters) DeepCopyInto(out *CachingConfigInitParameters) { + *out = *in + if in.CachingKeys != nil { + in, out := &in.CachingKeys, &out.CachingKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CachingConfigInitParameters. +func (in *CachingConfigInitParameters) DeepCopy() *CachingConfigInitParameters { + if in == nil { + return nil + } + out := new(CachingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CachingConfigObservation) DeepCopyInto(out *CachingConfigObservation) { + *out = *in + if in.CachingKeys != nil { + in, out := &in.CachingKeys, &out.CachingKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CachingConfigObservation. +func (in *CachingConfigObservation) DeepCopy() *CachingConfigObservation { + if in == nil { + return nil + } + out := new(CachingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CachingConfigParameters) DeepCopyInto(out *CachingConfigParameters) { + *out = *in + if in.CachingKeys != nil { + in, out := &in.CachingKeys, &out.CachingKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CachingConfigParameters. +func (in *CachingConfigParameters) DeepCopy() *CachingConfigParameters { + if in == nil { + return nil + } + out := new(CachingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Datasource) DeepCopyInto(out *Datasource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Datasource. +func (in *Datasource) DeepCopy() *Datasource { + if in == nil { + return nil + } + out := new(Datasource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Datasource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatasourceInitParameters) DeepCopyInto(out *DatasourceInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DynamodbConfig != nil { + in, out := &in.DynamodbConfig, &out.DynamodbConfig + *out = new(DynamodbConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ElasticsearchConfig != nil { + in, out := &in.ElasticsearchConfig, &out.ElasticsearchConfig + *out = new(ElasticsearchConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EventBridgeConfig != nil { + in, out := &in.EventBridgeConfig, &out.EventBridgeConfig + *out = new(EventBridgeConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LambdaConfig != nil { + in, out := &in.LambdaConfig, &out.LambdaConfig + *out = new(LambdaConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OpensearchserviceConfig != nil { + in, out := &in.OpensearchserviceConfig, &out.OpensearchserviceConfig + *out = new(OpensearchserviceConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RelationalDatabaseConfig != nil { + in, out := &in.RelationalDatabaseConfig, &out.RelationalDatabaseConfig + *out = new(RelationalDatabaseConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceRoleArn != nil { + in, out := &in.ServiceRoleArn, &out.ServiceRoleArn + *out = new(string) + **out = **in + } + if in.ServiceRoleArnRef != nil { + in, out := &in.ServiceRoleArnRef, &out.ServiceRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceRoleArnSelector != nil { + in, out := &in.ServiceRoleArnSelector, &out.ServiceRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatasourceInitParameters. +func (in *DatasourceInitParameters) DeepCopy() *DatasourceInitParameters { + if in == nil { + return nil + } + out := new(DatasourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatasourceList) DeepCopyInto(out *DatasourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Datasource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatasourceList. +func (in *DatasourceList) DeepCopy() *DatasourceList { + if in == nil { + return nil + } + out := new(DatasourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatasourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatasourceObservation) DeepCopyInto(out *DatasourceObservation) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DynamodbConfig != nil { + in, out := &in.DynamodbConfig, &out.DynamodbConfig + *out = new(DynamodbConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ElasticsearchConfig != nil { + in, out := &in.ElasticsearchConfig, &out.ElasticsearchConfig + *out = new(ElasticsearchConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.EventBridgeConfig != nil { + in, out := &in.EventBridgeConfig, &out.EventBridgeConfig + *out = new(EventBridgeConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LambdaConfig != nil { + in, out := &in.LambdaConfig, &out.LambdaConfig + *out = new(LambdaConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.OpensearchserviceConfig != nil { + in, out := &in.OpensearchserviceConfig, &out.OpensearchserviceConfig + *out = new(OpensearchserviceConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.RelationalDatabaseConfig != nil { + in, out := &in.RelationalDatabaseConfig, &out.RelationalDatabaseConfig + *out = new(RelationalDatabaseConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ServiceRoleArn != nil { + in, out := &in.ServiceRoleArn, &out.ServiceRoleArn + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatasourceObservation. +func (in *DatasourceObservation) DeepCopy() *DatasourceObservation { + if in == nil { + return nil + } + out := new(DatasourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatasourceParameters) DeepCopyInto(out *DatasourceParameters) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.APIIDRef != nil { + in, out := &in.APIIDRef, &out.APIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIIDSelector != nil { + in, out := &in.APIIDSelector, &out.APIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DynamodbConfig != nil { + in, out := &in.DynamodbConfig, &out.DynamodbConfig + *out = new(DynamodbConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.ElasticsearchConfig != nil { + in, out := &in.ElasticsearchConfig, &out.ElasticsearchConfig + *out = new(ElasticsearchConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.EventBridgeConfig != nil { + in, out := &in.EventBridgeConfig, &out.EventBridgeConfig + *out = new(EventBridgeConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPConfig != nil { + in, out := &in.HTTPConfig, &out.HTTPConfig + *out = new(HTTPConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.LambdaConfig != nil { + in, out := &in.LambdaConfig, &out.LambdaConfig + *out = new(LambdaConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.OpensearchserviceConfig != nil { + in, out := &in.OpensearchserviceConfig, &out.OpensearchserviceConfig + *out = new(OpensearchserviceConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RelationalDatabaseConfig != nil { + in, out := &in.RelationalDatabaseConfig, &out.RelationalDatabaseConfig + *out = new(RelationalDatabaseConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceRoleArn != nil { + in, out := &in.ServiceRoleArn, &out.ServiceRoleArn + *out = new(string) + **out = **in + } + if in.ServiceRoleArnRef != nil { + in, out := &in.ServiceRoleArnRef, &out.ServiceRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceRoleArnSelector != nil { + in, out := &in.ServiceRoleArnSelector, &out.ServiceRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatasourceParameters. +func (in *DatasourceParameters) DeepCopy() *DatasourceParameters { + if in == nil { + return nil + } + out := new(DatasourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatasourceSpec) DeepCopyInto(out *DatasourceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatasourceSpec. +func (in *DatasourceSpec) DeepCopy() *DatasourceSpec { + if in == nil { + return nil + } + out := new(DatasourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatasourceStatus) DeepCopyInto(out *DatasourceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatasourceStatus. +func (in *DatasourceStatus) DeepCopy() *DatasourceStatus { + if in == nil { + return nil + } + out := new(DatasourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeltaSyncConfigInitParameters) DeepCopyInto(out *DeltaSyncConfigInitParameters) { + *out = *in + if in.BaseTableTTL != nil { + in, out := &in.BaseTableTTL, &out.BaseTableTTL + *out = new(float64) + **out = **in + } + if in.DeltaSyncTableName != nil { + in, out := &in.DeltaSyncTableName, &out.DeltaSyncTableName + *out = new(string) + **out = **in + } + if in.DeltaSyncTableTTL != nil { + in, out := &in.DeltaSyncTableTTL, &out.DeltaSyncTableTTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeltaSyncConfigInitParameters. +func (in *DeltaSyncConfigInitParameters) DeepCopy() *DeltaSyncConfigInitParameters { + if in == nil { + return nil + } + out := new(DeltaSyncConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeltaSyncConfigObservation) DeepCopyInto(out *DeltaSyncConfigObservation) { + *out = *in + if in.BaseTableTTL != nil { + in, out := &in.BaseTableTTL, &out.BaseTableTTL + *out = new(float64) + **out = **in + } + if in.DeltaSyncTableName != nil { + in, out := &in.DeltaSyncTableName, &out.DeltaSyncTableName + *out = new(string) + **out = **in + } + if in.DeltaSyncTableTTL != nil { + in, out := &in.DeltaSyncTableTTL, &out.DeltaSyncTableTTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeltaSyncConfigObservation. +func (in *DeltaSyncConfigObservation) DeepCopy() *DeltaSyncConfigObservation { + if in == nil { + return nil + } + out := new(DeltaSyncConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeltaSyncConfigParameters) DeepCopyInto(out *DeltaSyncConfigParameters) { + *out = *in + if in.BaseTableTTL != nil { + in, out := &in.BaseTableTTL, &out.BaseTableTTL + *out = new(float64) + **out = **in + } + if in.DeltaSyncTableName != nil { + in, out := &in.DeltaSyncTableName, &out.DeltaSyncTableName + *out = new(string) + **out = **in + } + if in.DeltaSyncTableTTL != nil { + in, out := &in.DeltaSyncTableTTL, &out.DeltaSyncTableTTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeltaSyncConfigParameters. +func (in *DeltaSyncConfigParameters) DeepCopy() *DeltaSyncConfigParameters { + if in == nil { + return nil + } + out := new(DeltaSyncConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamodbConfigInitParameters) DeepCopyInto(out *DynamodbConfigInitParameters) { + *out = *in + if in.DeltaSyncConfig != nil { + in, out := &in.DeltaSyncConfig, &out.DeltaSyncConfig + *out = new(DeltaSyncConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.TableNameRef != nil { + in, out := &in.TableNameRef, &out.TableNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TableNameSelector != nil { + in, out := &in.TableNameSelector, &out.TableNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UseCallerCredentials != nil { + in, out := &in.UseCallerCredentials, &out.UseCallerCredentials + *out = new(bool) + **out = **in + } + if in.Versioned != nil { + in, out := &in.Versioned, &out.Versioned + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamodbConfigInitParameters. +func (in *DynamodbConfigInitParameters) DeepCopy() *DynamodbConfigInitParameters { + if in == nil { + return nil + } + out := new(DynamodbConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamodbConfigObservation) DeepCopyInto(out *DynamodbConfigObservation) { + *out = *in + if in.DeltaSyncConfig != nil { + in, out := &in.DeltaSyncConfig, &out.DeltaSyncConfig + *out = new(DeltaSyncConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.UseCallerCredentials != nil { + in, out := &in.UseCallerCredentials, &out.UseCallerCredentials + *out = new(bool) + **out = **in + } + if in.Versioned != nil { + in, out := &in.Versioned, &out.Versioned + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamodbConfigObservation. +func (in *DynamodbConfigObservation) DeepCopy() *DynamodbConfigObservation { + if in == nil { + return nil + } + out := new(DynamodbConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamodbConfigParameters) DeepCopyInto(out *DynamodbConfigParameters) { + *out = *in + if in.DeltaSyncConfig != nil { + in, out := &in.DeltaSyncConfig, &out.DeltaSyncConfig + *out = new(DeltaSyncConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.TableNameRef != nil { + in, out := &in.TableNameRef, &out.TableNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TableNameSelector != nil { + in, out := &in.TableNameSelector, &out.TableNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UseCallerCredentials != nil { + in, out := &in.UseCallerCredentials, &out.UseCallerCredentials + *out = new(bool) + **out = **in + } + if in.Versioned != nil { + in, out := &in.Versioned, &out.Versioned + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamodbConfigParameters. +func (in *DynamodbConfigParameters) DeepCopy() *DynamodbConfigParameters { + if in == nil { + return nil + } + out := new(DynamodbConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchConfigInitParameters) DeepCopyInto(out *ElasticsearchConfigInitParameters) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchConfigInitParameters. +func (in *ElasticsearchConfigInitParameters) DeepCopy() *ElasticsearchConfigInitParameters { + if in == nil { + return nil + } + out := new(ElasticsearchConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchConfigObservation) DeepCopyInto(out *ElasticsearchConfigObservation) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchConfigObservation. +func (in *ElasticsearchConfigObservation) DeepCopy() *ElasticsearchConfigObservation { + if in == nil { + return nil + } + out := new(ElasticsearchConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchConfigParameters) DeepCopyInto(out *ElasticsearchConfigParameters) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchConfigParameters. +func (in *ElasticsearchConfigParameters) DeepCopy() *ElasticsearchConfigParameters { + if in == nil { + return nil + } + out := new(ElasticsearchConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventBridgeConfigInitParameters) DeepCopyInto(out *EventBridgeConfigInitParameters) { + *out = *in + if in.EventBusArn != nil { + in, out := &in.EventBusArn, &out.EventBusArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventBridgeConfigInitParameters. +func (in *EventBridgeConfigInitParameters) DeepCopy() *EventBridgeConfigInitParameters { + if in == nil { + return nil + } + out := new(EventBridgeConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventBridgeConfigObservation) DeepCopyInto(out *EventBridgeConfigObservation) { + *out = *in + if in.EventBusArn != nil { + in, out := &in.EventBusArn, &out.EventBusArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventBridgeConfigObservation. +func (in *EventBridgeConfigObservation) DeepCopy() *EventBridgeConfigObservation { + if in == nil { + return nil + } + out := new(EventBridgeConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventBridgeConfigParameters) DeepCopyInto(out *EventBridgeConfigParameters) { + *out = *in + if in.EventBusArn != nil { + in, out := &in.EventBusArn, &out.EventBusArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventBridgeConfigParameters. +func (in *EventBridgeConfigParameters) DeepCopy() *EventBridgeConfigParameters { + if in == nil { + return nil + } + out := new(EventBridgeConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Function) DeepCopyInto(out *Function) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Function. +func (in *Function) DeepCopy() *Function { + if in == nil { + return nil + } + out := new(Function) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Function) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionInitParameters) DeepCopyInto(out *FunctionInitParameters) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.APIIDRef != nil { + in, out := &in.APIIDRef, &out.APIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIIDSelector != nil { + in, out := &in.APIIDSelector, &out.APIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Code != nil { + in, out := &in.Code, &out.Code + *out = new(string) + **out = **in + } + if in.DataSource != nil { + in, out := &in.DataSource, &out.DataSource + *out = new(string) + **out = **in + } + if in.DataSourceRef != nil { + in, out := &in.DataSourceRef, &out.DataSourceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataSourceSelector != nil { + in, out := &in.DataSourceSelector, &out.DataSourceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FunctionVersion != nil { + in, out := &in.FunctionVersion, &out.FunctionVersion + *out = new(string) + **out = **in + } + if in.MaxBatchSize != nil { + in, out := &in.MaxBatchSize, &out.MaxBatchSize + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RequestMappingTemplate != nil { + in, out := &in.RequestMappingTemplate, &out.RequestMappingTemplate + *out = new(string) + **out = **in + } + if in.ResponseMappingTemplate != nil { + in, out := &in.ResponseMappingTemplate, &out.ResponseMappingTemplate + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(RuntimeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SyncConfig != nil { + in, out := &in.SyncConfig, &out.SyncConfig + *out = new(SyncConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionInitParameters. +func (in *FunctionInitParameters) DeepCopy() *FunctionInitParameters { + if in == nil { + return nil + } + out := new(FunctionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionList) DeepCopyInto(out *FunctionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Function, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionList. +func (in *FunctionList) DeepCopy() *FunctionList { + if in == nil { + return nil + } + out := new(FunctionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionObservation) DeepCopyInto(out *FunctionObservation) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Code != nil { + in, out := &in.Code, &out.Code + *out = new(string) + **out = **in + } + if in.DataSource != nil { + in, out := &in.DataSource, &out.DataSource + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FunctionID != nil { + in, out := &in.FunctionID, &out.FunctionID + *out = new(string) + **out = **in + } + if in.FunctionVersion != nil { + in, out := &in.FunctionVersion, &out.FunctionVersion + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MaxBatchSize != nil { + in, out := &in.MaxBatchSize, &out.MaxBatchSize + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RequestMappingTemplate != nil { + in, out := &in.RequestMappingTemplate, &out.RequestMappingTemplate + *out = new(string) + **out = **in + } + if in.ResponseMappingTemplate != nil { + in, out := &in.ResponseMappingTemplate, &out.ResponseMappingTemplate + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(RuntimeObservation) + (*in).DeepCopyInto(*out) + } + if in.SyncConfig != nil { + in, out := &in.SyncConfig, &out.SyncConfig + *out = new(SyncConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionObservation. +func (in *FunctionObservation) DeepCopy() *FunctionObservation { + if in == nil { + return nil + } + out := new(FunctionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionParameters) DeepCopyInto(out *FunctionParameters) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.APIIDRef != nil { + in, out := &in.APIIDRef, &out.APIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIIDSelector != nil { + in, out := &in.APIIDSelector, &out.APIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Code != nil { + in, out := &in.Code, &out.Code + *out = new(string) + **out = **in + } + if in.DataSource != nil { + in, out := &in.DataSource, &out.DataSource + *out = new(string) + **out = **in + } + if in.DataSourceRef != nil { + in, out := &in.DataSourceRef, &out.DataSourceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataSourceSelector != nil { + in, out := &in.DataSourceSelector, &out.DataSourceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FunctionVersion != nil { + in, out := &in.FunctionVersion, &out.FunctionVersion + *out = new(string) + **out = **in + } + if in.MaxBatchSize != nil { + in, out := &in.MaxBatchSize, &out.MaxBatchSize + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RequestMappingTemplate != nil { + in, out := &in.RequestMappingTemplate, &out.RequestMappingTemplate + *out = new(string) + **out = **in + } + if in.ResponseMappingTemplate != nil { + in, out := &in.ResponseMappingTemplate, &out.ResponseMappingTemplate + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(RuntimeParameters) + (*in).DeepCopyInto(*out) + } + if in.SyncConfig != nil { + in, out := &in.SyncConfig, &out.SyncConfig + *out = new(SyncConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionParameters. +func (in *FunctionParameters) DeepCopy() *FunctionParameters { + if in == nil { + return nil + } + out := new(FunctionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionSpec) DeepCopyInto(out *FunctionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionSpec. +func (in *FunctionSpec) DeepCopy() *FunctionSpec { + if in == nil { + return nil + } + out := new(FunctionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionStatus) DeepCopyInto(out *FunctionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionStatus. +func (in *FunctionStatus) DeepCopy() *FunctionStatus { + if in == nil { + return nil + } + out := new(FunctionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphQLAPI) DeepCopyInto(out *GraphQLAPI) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphQLAPI. +func (in *GraphQLAPI) DeepCopy() *GraphQLAPI { + if in == nil { + return nil + } + out := new(GraphQLAPI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GraphQLAPI) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphQLAPIInitParameters) DeepCopyInto(out *GraphQLAPIInitParameters) { + *out = *in + if in.AdditionalAuthenticationProvider != nil { + in, out := &in.AdditionalAuthenticationProvider, &out.AdditionalAuthenticationProvider + *out = make([]AdditionalAuthenticationProviderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } + if in.IntrospectionConfig != nil { + in, out := &in.IntrospectionConfig, &out.IntrospectionConfig + *out = new(string) + **out = **in + } + if in.LambdaAuthorizerConfig != nil { + in, out := &in.LambdaAuthorizerConfig, &out.LambdaAuthorizerConfig + *out = new(GraphQLAPILambdaAuthorizerConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LogConfig != nil { + in, out := &in.LogConfig, &out.LogConfig + *out = new(LogConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OpenIDConnectConfig != nil { + in, out := &in.OpenIDConnectConfig, &out.OpenIDConnectConfig + *out = new(GraphQLAPIOpenIDConnectConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.QueryDepthLimit != nil { + in, out := &in.QueryDepthLimit, &out.QueryDepthLimit + *out = new(float64) + **out = **in + } + if in.ResolverCountLimit != nil { + in, out := &in.ResolverCountLimit, &out.ResolverCountLimit + *out = new(float64) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserPoolConfig != nil { + in, out := &in.UserPoolConfig, &out.UserPoolConfig + *out = new(GraphQLAPIUserPoolConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Visibility != nil { + in, out := &in.Visibility, &out.Visibility + *out = new(string) + **out = **in + } + if in.XrayEnabled != nil { + in, out := &in.XrayEnabled, &out.XrayEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphQLAPIInitParameters. +func (in *GraphQLAPIInitParameters) DeepCopy() *GraphQLAPIInitParameters { + if in == nil { + return nil + } + out := new(GraphQLAPIInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphQLAPILambdaAuthorizerConfigInitParameters) DeepCopyInto(out *GraphQLAPILambdaAuthorizerConfigInitParameters) { + *out = *in + if in.AuthorizerResultTTLInSeconds != nil { + in, out := &in.AuthorizerResultTTLInSeconds, &out.AuthorizerResultTTLInSeconds + *out = new(float64) + **out = **in + } + if in.AuthorizerURI != nil { + in, out := &in.AuthorizerURI, &out.AuthorizerURI + *out = new(string) + **out = **in + } + if in.IdentityValidationExpression != nil { + in, out := &in.IdentityValidationExpression, &out.IdentityValidationExpression + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphQLAPILambdaAuthorizerConfigInitParameters. +func (in *GraphQLAPILambdaAuthorizerConfigInitParameters) DeepCopy() *GraphQLAPILambdaAuthorizerConfigInitParameters { + if in == nil { + return nil + } + out := new(GraphQLAPILambdaAuthorizerConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphQLAPILambdaAuthorizerConfigObservation) DeepCopyInto(out *GraphQLAPILambdaAuthorizerConfigObservation) { + *out = *in + if in.AuthorizerResultTTLInSeconds != nil { + in, out := &in.AuthorizerResultTTLInSeconds, &out.AuthorizerResultTTLInSeconds + *out = new(float64) + **out = **in + } + if in.AuthorizerURI != nil { + in, out := &in.AuthorizerURI, &out.AuthorizerURI + *out = new(string) + **out = **in + } + if in.IdentityValidationExpression != nil { + in, out := &in.IdentityValidationExpression, &out.IdentityValidationExpression + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphQLAPILambdaAuthorizerConfigObservation. +func (in *GraphQLAPILambdaAuthorizerConfigObservation) DeepCopy() *GraphQLAPILambdaAuthorizerConfigObservation { + if in == nil { + return nil + } + out := new(GraphQLAPILambdaAuthorizerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphQLAPILambdaAuthorizerConfigParameters) DeepCopyInto(out *GraphQLAPILambdaAuthorizerConfigParameters) { + *out = *in + if in.AuthorizerResultTTLInSeconds != nil { + in, out := &in.AuthorizerResultTTLInSeconds, &out.AuthorizerResultTTLInSeconds + *out = new(float64) + **out = **in + } + if in.AuthorizerURI != nil { + in, out := &in.AuthorizerURI, &out.AuthorizerURI + *out = new(string) + **out = **in + } + if in.IdentityValidationExpression != nil { + in, out := &in.IdentityValidationExpression, &out.IdentityValidationExpression + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphQLAPILambdaAuthorizerConfigParameters. +func (in *GraphQLAPILambdaAuthorizerConfigParameters) DeepCopy() *GraphQLAPILambdaAuthorizerConfigParameters { + if in == nil { + return nil + } + out := new(GraphQLAPILambdaAuthorizerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphQLAPIList) DeepCopyInto(out *GraphQLAPIList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GraphQLAPI, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphQLAPIList. +func (in *GraphQLAPIList) DeepCopy() *GraphQLAPIList { + if in == nil { + return nil + } + out := new(GraphQLAPIList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GraphQLAPIList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphQLAPIObservation) DeepCopyInto(out *GraphQLAPIObservation) { + *out = *in + if in.AdditionalAuthenticationProvider != nil { + in, out := &in.AdditionalAuthenticationProvider, &out.AdditionalAuthenticationProvider + *out = make([]AdditionalAuthenticationProviderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IntrospectionConfig != nil { + in, out := &in.IntrospectionConfig, &out.IntrospectionConfig + *out = new(string) + **out = **in + } + if in.LambdaAuthorizerConfig != nil { + in, out := &in.LambdaAuthorizerConfig, &out.LambdaAuthorizerConfig + *out = new(GraphQLAPILambdaAuthorizerConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.LogConfig != nil { + in, out := &in.LogConfig, &out.LogConfig + *out = new(LogConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OpenIDConnectConfig != nil { + in, out := &in.OpenIDConnectConfig, &out.OpenIDConnectConfig + *out = new(GraphQLAPIOpenIDConnectConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.QueryDepthLimit != nil { + in, out := &in.QueryDepthLimit, &out.QueryDepthLimit + *out = new(float64) + **out = **in + } + if in.ResolverCountLimit != nil { + in, out := &in.ResolverCountLimit, &out.ResolverCountLimit + *out = new(float64) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Uris != nil { + in, out := &in.Uris, &out.Uris + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserPoolConfig != nil { + in, out := &in.UserPoolConfig, &out.UserPoolConfig + *out = new(GraphQLAPIUserPoolConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Visibility != nil { + in, out := &in.Visibility, &out.Visibility + *out = new(string) + **out = **in + } + if in.XrayEnabled != nil { + in, out := &in.XrayEnabled, &out.XrayEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphQLAPIObservation. +func (in *GraphQLAPIObservation) DeepCopy() *GraphQLAPIObservation { + if in == nil { + return nil + } + out := new(GraphQLAPIObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphQLAPIOpenIDConnectConfigInitParameters) DeepCopyInto(out *GraphQLAPIOpenIDConnectConfigInitParameters) { + *out = *in + if in.AuthTTL != nil { + in, out := &in.AuthTTL, &out.AuthTTL + *out = new(float64) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.IatTTL != nil { + in, out := &in.IatTTL, &out.IatTTL + *out = new(float64) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphQLAPIOpenIDConnectConfigInitParameters. +func (in *GraphQLAPIOpenIDConnectConfigInitParameters) DeepCopy() *GraphQLAPIOpenIDConnectConfigInitParameters { + if in == nil { + return nil + } + out := new(GraphQLAPIOpenIDConnectConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphQLAPIOpenIDConnectConfigObservation) DeepCopyInto(out *GraphQLAPIOpenIDConnectConfigObservation) { + *out = *in + if in.AuthTTL != nil { + in, out := &in.AuthTTL, &out.AuthTTL + *out = new(float64) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.IatTTL != nil { + in, out := &in.IatTTL, &out.IatTTL + *out = new(float64) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphQLAPIOpenIDConnectConfigObservation. +func (in *GraphQLAPIOpenIDConnectConfigObservation) DeepCopy() *GraphQLAPIOpenIDConnectConfigObservation { + if in == nil { + return nil + } + out := new(GraphQLAPIOpenIDConnectConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphQLAPIOpenIDConnectConfigParameters) DeepCopyInto(out *GraphQLAPIOpenIDConnectConfigParameters) { + *out = *in + if in.AuthTTL != nil { + in, out := &in.AuthTTL, &out.AuthTTL + *out = new(float64) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.IatTTL != nil { + in, out := &in.IatTTL, &out.IatTTL + *out = new(float64) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphQLAPIOpenIDConnectConfigParameters. +func (in *GraphQLAPIOpenIDConnectConfigParameters) DeepCopy() *GraphQLAPIOpenIDConnectConfigParameters { + if in == nil { + return nil + } + out := new(GraphQLAPIOpenIDConnectConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphQLAPIParameters) DeepCopyInto(out *GraphQLAPIParameters) { + *out = *in + if in.AdditionalAuthenticationProvider != nil { + in, out := &in.AdditionalAuthenticationProvider, &out.AdditionalAuthenticationProvider + *out = make([]AdditionalAuthenticationProviderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } + if in.IntrospectionConfig != nil { + in, out := &in.IntrospectionConfig, &out.IntrospectionConfig + *out = new(string) + **out = **in + } + if in.LambdaAuthorizerConfig != nil { + in, out := &in.LambdaAuthorizerConfig, &out.LambdaAuthorizerConfig + *out = new(GraphQLAPILambdaAuthorizerConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.LogConfig != nil { + in, out := &in.LogConfig, &out.LogConfig + *out = new(LogConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OpenIDConnectConfig != nil { + in, out := &in.OpenIDConnectConfig, &out.OpenIDConnectConfig + *out = new(GraphQLAPIOpenIDConnectConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.QueryDepthLimit != nil { + in, out := &in.QueryDepthLimit, &out.QueryDepthLimit + *out = new(float64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ResolverCountLimit != nil { + in, out := &in.ResolverCountLimit, &out.ResolverCountLimit + *out = new(float64) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserPoolConfig != nil { + in, out := &in.UserPoolConfig, &out.UserPoolConfig + *out = new(GraphQLAPIUserPoolConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Visibility != nil { + in, out := &in.Visibility, &out.Visibility + *out = new(string) + **out = **in + } + if in.XrayEnabled != nil { + in, out := &in.XrayEnabled, &out.XrayEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphQLAPIParameters. +func (in *GraphQLAPIParameters) DeepCopy() *GraphQLAPIParameters { + if in == nil { + return nil + } + out := new(GraphQLAPIParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphQLAPISpec) DeepCopyInto(out *GraphQLAPISpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphQLAPISpec. +func (in *GraphQLAPISpec) DeepCopy() *GraphQLAPISpec { + if in == nil { + return nil + } + out := new(GraphQLAPISpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphQLAPIStatus) DeepCopyInto(out *GraphQLAPIStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphQLAPIStatus. +func (in *GraphQLAPIStatus) DeepCopy() *GraphQLAPIStatus { + if in == nil { + return nil + } + out := new(GraphQLAPIStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphQLAPIUserPoolConfigInitParameters) DeepCopyInto(out *GraphQLAPIUserPoolConfigInitParameters) { + *out = *in + if in.AppIDClientRegex != nil { + in, out := &in.AppIDClientRegex, &out.AppIDClientRegex + *out = new(string) + **out = **in + } + if in.AwsRegion != nil { + in, out := &in.AwsRegion, &out.AwsRegion + *out = new(string) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.UserPoolID != nil { + in, out := &in.UserPoolID, &out.UserPoolID + *out = new(string) + **out = **in + } + if in.UserPoolIDRef != nil { + in, out := &in.UserPoolIDRef, &out.UserPoolIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserPoolIDSelector != nil { + in, out := &in.UserPoolIDSelector, &out.UserPoolIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphQLAPIUserPoolConfigInitParameters. +func (in *GraphQLAPIUserPoolConfigInitParameters) DeepCopy() *GraphQLAPIUserPoolConfigInitParameters { + if in == nil { + return nil + } + out := new(GraphQLAPIUserPoolConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphQLAPIUserPoolConfigObservation) DeepCopyInto(out *GraphQLAPIUserPoolConfigObservation) { + *out = *in + if in.AppIDClientRegex != nil { + in, out := &in.AppIDClientRegex, &out.AppIDClientRegex + *out = new(string) + **out = **in + } + if in.AwsRegion != nil { + in, out := &in.AwsRegion, &out.AwsRegion + *out = new(string) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.UserPoolID != nil { + in, out := &in.UserPoolID, &out.UserPoolID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphQLAPIUserPoolConfigObservation. +func (in *GraphQLAPIUserPoolConfigObservation) DeepCopy() *GraphQLAPIUserPoolConfigObservation { + if in == nil { + return nil + } + out := new(GraphQLAPIUserPoolConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphQLAPIUserPoolConfigParameters) DeepCopyInto(out *GraphQLAPIUserPoolConfigParameters) { + *out = *in + if in.AppIDClientRegex != nil { + in, out := &in.AppIDClientRegex, &out.AppIDClientRegex + *out = new(string) + **out = **in + } + if in.AwsRegion != nil { + in, out := &in.AwsRegion, &out.AwsRegion + *out = new(string) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.UserPoolID != nil { + in, out := &in.UserPoolID, &out.UserPoolID + *out = new(string) + **out = **in + } + if in.UserPoolIDRef != nil { + in, out := &in.UserPoolIDRef, &out.UserPoolIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserPoolIDSelector != nil { + in, out := &in.UserPoolIDSelector, &out.UserPoolIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphQLAPIUserPoolConfigParameters. +func (in *GraphQLAPIUserPoolConfigParameters) DeepCopy() *GraphQLAPIUserPoolConfigParameters { + if in == nil { + return nil + } + out := new(GraphQLAPIUserPoolConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfigInitParameters) DeepCopyInto(out *HTTPConfigInitParameters) { + *out = *in + if in.AuthorizationConfig != nil { + in, out := &in.AuthorizationConfig, &out.AuthorizationConfig + *out = new(AuthorizationConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfigInitParameters. +func (in *HTTPConfigInitParameters) DeepCopy() *HTTPConfigInitParameters { + if in == nil { + return nil + } + out := new(HTTPConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfigObservation) DeepCopyInto(out *HTTPConfigObservation) { + *out = *in + if in.AuthorizationConfig != nil { + in, out := &in.AuthorizationConfig, &out.AuthorizationConfig + *out = new(AuthorizationConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfigObservation. +func (in *HTTPConfigObservation) DeepCopy() *HTTPConfigObservation { + if in == nil { + return nil + } + out := new(HTTPConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfigParameters) DeepCopyInto(out *HTTPConfigParameters) { + *out = *in + if in.AuthorizationConfig != nil { + in, out := &in.AuthorizationConfig, &out.AuthorizationConfig + *out = new(AuthorizationConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfigParameters. +func (in *HTTPConfigParameters) DeepCopy() *HTTPConfigParameters { + if in == nil { + return nil + } + out := new(HTTPConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigInitParameters) DeepCopyInto(out *HTTPEndpointConfigInitParameters) { + *out = *in + if in.AwsSecretStoreArn != nil { + in, out := &in.AwsSecretStoreArn, &out.AwsSecretStoreArn + *out = new(string) + **out = **in + } + if in.DBClusterIdentifier != nil { + in, out := &in.DBClusterIdentifier, &out.DBClusterIdentifier + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigInitParameters. +func (in *HTTPEndpointConfigInitParameters) DeepCopy() *HTTPEndpointConfigInitParameters { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigObservation) DeepCopyInto(out *HTTPEndpointConfigObservation) { + *out = *in + if in.AwsSecretStoreArn != nil { + in, out := &in.AwsSecretStoreArn, &out.AwsSecretStoreArn + *out = new(string) + **out = **in + } + if in.DBClusterIdentifier != nil { + in, out := &in.DBClusterIdentifier, &out.DBClusterIdentifier + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigObservation. +func (in *HTTPEndpointConfigObservation) DeepCopy() *HTTPEndpointConfigObservation { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigParameters) DeepCopyInto(out *HTTPEndpointConfigParameters) { + *out = *in + if in.AwsSecretStoreArn != nil { + in, out := &in.AwsSecretStoreArn, &out.AwsSecretStoreArn + *out = new(string) + **out = **in + } + if in.DBClusterIdentifier != nil { + in, out := &in.DBClusterIdentifier, &out.DBClusterIdentifier + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigParameters. +func (in *HTTPEndpointConfigParameters) DeepCopy() *HTTPEndpointConfigParameters { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaAuthorizerConfigInitParameters) DeepCopyInto(out *LambdaAuthorizerConfigInitParameters) { + *out = *in + if in.AuthorizerResultTTLInSeconds != nil { + in, out := &in.AuthorizerResultTTLInSeconds, &out.AuthorizerResultTTLInSeconds + *out = new(float64) + **out = **in + } + if in.AuthorizerURI != nil { + in, out := &in.AuthorizerURI, &out.AuthorizerURI + *out = new(string) + **out = **in + } + if in.IdentityValidationExpression != nil { + in, out := &in.IdentityValidationExpression, &out.IdentityValidationExpression + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaAuthorizerConfigInitParameters. +func (in *LambdaAuthorizerConfigInitParameters) DeepCopy() *LambdaAuthorizerConfigInitParameters { + if in == nil { + return nil + } + out := new(LambdaAuthorizerConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaAuthorizerConfigObservation) DeepCopyInto(out *LambdaAuthorizerConfigObservation) { + *out = *in + if in.AuthorizerResultTTLInSeconds != nil { + in, out := &in.AuthorizerResultTTLInSeconds, &out.AuthorizerResultTTLInSeconds + *out = new(float64) + **out = **in + } + if in.AuthorizerURI != nil { + in, out := &in.AuthorizerURI, &out.AuthorizerURI + *out = new(string) + **out = **in + } + if in.IdentityValidationExpression != nil { + in, out := &in.IdentityValidationExpression, &out.IdentityValidationExpression + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaAuthorizerConfigObservation. +func (in *LambdaAuthorizerConfigObservation) DeepCopy() *LambdaAuthorizerConfigObservation { + if in == nil { + return nil + } + out := new(LambdaAuthorizerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaAuthorizerConfigParameters) DeepCopyInto(out *LambdaAuthorizerConfigParameters) { + *out = *in + if in.AuthorizerResultTTLInSeconds != nil { + in, out := &in.AuthorizerResultTTLInSeconds, &out.AuthorizerResultTTLInSeconds + *out = new(float64) + **out = **in + } + if in.AuthorizerURI != nil { + in, out := &in.AuthorizerURI, &out.AuthorizerURI + *out = new(string) + **out = **in + } + if in.IdentityValidationExpression != nil { + in, out := &in.IdentityValidationExpression, &out.IdentityValidationExpression + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaAuthorizerConfigParameters. +func (in *LambdaAuthorizerConfigParameters) DeepCopy() *LambdaAuthorizerConfigParameters { + if in == nil { + return nil + } + out := new(LambdaAuthorizerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaConfigInitParameters) DeepCopyInto(out *LambdaConfigInitParameters) { + *out = *in + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaConfigInitParameters. +func (in *LambdaConfigInitParameters) DeepCopy() *LambdaConfigInitParameters { + if in == nil { + return nil + } + out := new(LambdaConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaConfigObservation) DeepCopyInto(out *LambdaConfigObservation) { + *out = *in + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaConfigObservation. +func (in *LambdaConfigObservation) DeepCopy() *LambdaConfigObservation { + if in == nil { + return nil + } + out := new(LambdaConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaConfigParameters) DeepCopyInto(out *LambdaConfigParameters) { + *out = *in + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaConfigParameters. +func (in *LambdaConfigParameters) DeepCopy() *LambdaConfigParameters { + if in == nil { + return nil + } + out := new(LambdaConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaConflictHandlerConfigInitParameters) DeepCopyInto(out *LambdaConflictHandlerConfigInitParameters) { + *out = *in + if in.LambdaConflictHandlerArn != nil { + in, out := &in.LambdaConflictHandlerArn, &out.LambdaConflictHandlerArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaConflictHandlerConfigInitParameters. +func (in *LambdaConflictHandlerConfigInitParameters) DeepCopy() *LambdaConflictHandlerConfigInitParameters { + if in == nil { + return nil + } + out := new(LambdaConflictHandlerConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaConflictHandlerConfigObservation) DeepCopyInto(out *LambdaConflictHandlerConfigObservation) { + *out = *in + if in.LambdaConflictHandlerArn != nil { + in, out := &in.LambdaConflictHandlerArn, &out.LambdaConflictHandlerArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaConflictHandlerConfigObservation. +func (in *LambdaConflictHandlerConfigObservation) DeepCopy() *LambdaConflictHandlerConfigObservation { + if in == nil { + return nil + } + out := new(LambdaConflictHandlerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaConflictHandlerConfigParameters) DeepCopyInto(out *LambdaConflictHandlerConfigParameters) { + *out = *in + if in.LambdaConflictHandlerArn != nil { + in, out := &in.LambdaConflictHandlerArn, &out.LambdaConflictHandlerArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaConflictHandlerConfigParameters. +func (in *LambdaConflictHandlerConfigParameters) DeepCopy() *LambdaConflictHandlerConfigParameters { + if in == nil { + return nil + } + out := new(LambdaConflictHandlerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfigInitParameters) DeepCopyInto(out *LogConfigInitParameters) { + *out = *in + if in.CloudwatchLogsRoleArn != nil { + in, out := &in.CloudwatchLogsRoleArn, &out.CloudwatchLogsRoleArn + *out = new(string) + **out = **in + } + if in.CloudwatchLogsRoleArnRef != nil { + in, out := &in.CloudwatchLogsRoleArnRef, &out.CloudwatchLogsRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CloudwatchLogsRoleArnSelector != nil { + in, out := &in.CloudwatchLogsRoleArnSelector, &out.CloudwatchLogsRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExcludeVerboseContent != nil { + in, out := &in.ExcludeVerboseContent, &out.ExcludeVerboseContent + *out = new(bool) + **out = **in + } + if in.FieldLogLevel != nil { + in, out := &in.FieldLogLevel, &out.FieldLogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfigInitParameters. +func (in *LogConfigInitParameters) DeepCopy() *LogConfigInitParameters { + if in == nil { + return nil + } + out := new(LogConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfigObservation) DeepCopyInto(out *LogConfigObservation) { + *out = *in + if in.CloudwatchLogsRoleArn != nil { + in, out := &in.CloudwatchLogsRoleArn, &out.CloudwatchLogsRoleArn + *out = new(string) + **out = **in + } + if in.ExcludeVerboseContent != nil { + in, out := &in.ExcludeVerboseContent, &out.ExcludeVerboseContent + *out = new(bool) + **out = **in + } + if in.FieldLogLevel != nil { + in, out := &in.FieldLogLevel, &out.FieldLogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfigObservation. +func (in *LogConfigObservation) DeepCopy() *LogConfigObservation { + if in == nil { + return nil + } + out := new(LogConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfigParameters) DeepCopyInto(out *LogConfigParameters) { + *out = *in + if in.CloudwatchLogsRoleArn != nil { + in, out := &in.CloudwatchLogsRoleArn, &out.CloudwatchLogsRoleArn + *out = new(string) + **out = **in + } + if in.CloudwatchLogsRoleArnRef != nil { + in, out := &in.CloudwatchLogsRoleArnRef, &out.CloudwatchLogsRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CloudwatchLogsRoleArnSelector != nil { + in, out := &in.CloudwatchLogsRoleArnSelector, &out.CloudwatchLogsRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExcludeVerboseContent != nil { + in, out := &in.ExcludeVerboseContent, &out.ExcludeVerboseContent + *out = new(bool) + **out = **in + } + if in.FieldLogLevel != nil { + in, out := &in.FieldLogLevel, &out.FieldLogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfigParameters. +func (in *LogConfigParameters) DeepCopy() *LogConfigParameters { + if in == nil { + return nil + } + out := new(LogConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDConnectConfigInitParameters) DeepCopyInto(out *OpenIDConnectConfigInitParameters) { + *out = *in + if in.AuthTTL != nil { + in, out := &in.AuthTTL, &out.AuthTTL + *out = new(float64) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.IatTTL != nil { + in, out := &in.IatTTL, &out.IatTTL + *out = new(float64) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectConfigInitParameters. +func (in *OpenIDConnectConfigInitParameters) DeepCopy() *OpenIDConnectConfigInitParameters { + if in == nil { + return nil + } + out := new(OpenIDConnectConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDConnectConfigObservation) DeepCopyInto(out *OpenIDConnectConfigObservation) { + *out = *in + if in.AuthTTL != nil { + in, out := &in.AuthTTL, &out.AuthTTL + *out = new(float64) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.IatTTL != nil { + in, out := &in.IatTTL, &out.IatTTL + *out = new(float64) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectConfigObservation. +func (in *OpenIDConnectConfigObservation) DeepCopy() *OpenIDConnectConfigObservation { + if in == nil { + return nil + } + out := new(OpenIDConnectConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDConnectConfigParameters) DeepCopyInto(out *OpenIDConnectConfigParameters) { + *out = *in + if in.AuthTTL != nil { + in, out := &in.AuthTTL, &out.AuthTTL + *out = new(float64) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.IatTTL != nil { + in, out := &in.IatTTL, &out.IatTTL + *out = new(float64) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectConfigParameters. +func (in *OpenIDConnectConfigParameters) DeepCopy() *OpenIDConnectConfigParameters { + if in == nil { + return nil + } + out := new(OpenIDConnectConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserviceConfigInitParameters) DeepCopyInto(out *OpensearchserviceConfigInitParameters) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserviceConfigInitParameters. +func (in *OpensearchserviceConfigInitParameters) DeepCopy() *OpensearchserviceConfigInitParameters { + if in == nil { + return nil + } + out := new(OpensearchserviceConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserviceConfigObservation) DeepCopyInto(out *OpensearchserviceConfigObservation) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserviceConfigObservation. +func (in *OpensearchserviceConfigObservation) DeepCopy() *OpensearchserviceConfigObservation { + if in == nil { + return nil + } + out := new(OpensearchserviceConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserviceConfigParameters) DeepCopyInto(out *OpensearchserviceConfigParameters) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserviceConfigParameters. +func (in *OpensearchserviceConfigParameters) DeepCopy() *OpensearchserviceConfigParameters { + if in == nil { + return nil + } + out := new(OpensearchserviceConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineConfigInitParameters) DeepCopyInto(out *PipelineConfigInitParameters) { + *out = *in + if in.Functions != nil { + in, out := &in.Functions, &out.Functions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineConfigInitParameters. +func (in *PipelineConfigInitParameters) DeepCopy() *PipelineConfigInitParameters { + if in == nil { + return nil + } + out := new(PipelineConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineConfigObservation) DeepCopyInto(out *PipelineConfigObservation) { + *out = *in + if in.Functions != nil { + in, out := &in.Functions, &out.Functions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineConfigObservation. +func (in *PipelineConfigObservation) DeepCopy() *PipelineConfigObservation { + if in == nil { + return nil + } + out := new(PipelineConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineConfigParameters) DeepCopyInto(out *PipelineConfigParameters) { + *out = *in + if in.Functions != nil { + in, out := &in.Functions, &out.Functions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineConfigParameters. +func (in *PipelineConfigParameters) DeepCopy() *PipelineConfigParameters { + if in == nil { + return nil + } + out := new(PipelineConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelationalDatabaseConfigInitParameters) DeepCopyInto(out *RelationalDatabaseConfigInitParameters) { + *out = *in + if in.HTTPEndpointConfig != nil { + in, out := &in.HTTPEndpointConfig, &out.HTTPEndpointConfig + *out = new(HTTPEndpointConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SourceType != nil { + in, out := &in.SourceType, &out.SourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelationalDatabaseConfigInitParameters. +func (in *RelationalDatabaseConfigInitParameters) DeepCopy() *RelationalDatabaseConfigInitParameters { + if in == nil { + return nil + } + out := new(RelationalDatabaseConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelationalDatabaseConfigObservation) DeepCopyInto(out *RelationalDatabaseConfigObservation) { + *out = *in + if in.HTTPEndpointConfig != nil { + in, out := &in.HTTPEndpointConfig, &out.HTTPEndpointConfig + *out = new(HTTPEndpointConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.SourceType != nil { + in, out := &in.SourceType, &out.SourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelationalDatabaseConfigObservation. +func (in *RelationalDatabaseConfigObservation) DeepCopy() *RelationalDatabaseConfigObservation { + if in == nil { + return nil + } + out := new(RelationalDatabaseConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelationalDatabaseConfigParameters) DeepCopyInto(out *RelationalDatabaseConfigParameters) { + *out = *in + if in.HTTPEndpointConfig != nil { + in, out := &in.HTTPEndpointConfig, &out.HTTPEndpointConfig + *out = new(HTTPEndpointConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.SourceType != nil { + in, out := &in.SourceType, &out.SourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelationalDatabaseConfigParameters. +func (in *RelationalDatabaseConfigParameters) DeepCopy() *RelationalDatabaseConfigParameters { + if in == nil { + return nil + } + out := new(RelationalDatabaseConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Resolver) DeepCopyInto(out *Resolver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resolver. +func (in *Resolver) DeepCopy() *Resolver { + if in == nil { + return nil + } + out := new(Resolver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Resolver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverInitParameters) DeepCopyInto(out *ResolverInitParameters) { + *out = *in + if in.CachingConfig != nil { + in, out := &in.CachingConfig, &out.CachingConfig + *out = new(CachingConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Code != nil { + in, out := &in.Code, &out.Code + *out = new(string) + **out = **in + } + if in.DataSource != nil { + in, out := &in.DataSource, &out.DataSource + *out = new(string) + **out = **in + } + if in.DataSourceRef != nil { + in, out := &in.DataSourceRef, &out.DataSourceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataSourceSelector != nil { + in, out := &in.DataSourceSelector, &out.DataSourceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.MaxBatchSize != nil { + in, out := &in.MaxBatchSize, &out.MaxBatchSize + *out = new(float64) + **out = **in + } + if in.PipelineConfig != nil { + in, out := &in.PipelineConfig, &out.PipelineConfig + *out = new(PipelineConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RequestTemplate != nil { + in, out := &in.RequestTemplate, &out.RequestTemplate + *out = new(string) + **out = **in + } + if in.ResponseTemplate != nil { + in, out := &in.ResponseTemplate, &out.ResponseTemplate + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(ResolverRuntimeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SyncConfig != nil { + in, out := &in.SyncConfig, &out.SyncConfig + *out = new(ResolverSyncConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverInitParameters. +func (in *ResolverInitParameters) DeepCopy() *ResolverInitParameters { + if in == nil { + return nil + } + out := new(ResolverInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverList) DeepCopyInto(out *ResolverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Resolver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverList. +func (in *ResolverList) DeepCopy() *ResolverList { + if in == nil { + return nil + } + out := new(ResolverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResolverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverObservation) DeepCopyInto(out *ResolverObservation) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CachingConfig != nil { + in, out := &in.CachingConfig, &out.CachingConfig + *out = new(CachingConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Code != nil { + in, out := &in.Code, &out.Code + *out = new(string) + **out = **in + } + if in.DataSource != nil { + in, out := &in.DataSource, &out.DataSource + *out = new(string) + **out = **in + } + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.MaxBatchSize != nil { + in, out := &in.MaxBatchSize, &out.MaxBatchSize + *out = new(float64) + **out = **in + } + if in.PipelineConfig != nil { + in, out := &in.PipelineConfig, &out.PipelineConfig + *out = new(PipelineConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.RequestTemplate != nil { + in, out := &in.RequestTemplate, &out.RequestTemplate + *out = new(string) + **out = **in + } + if in.ResponseTemplate != nil { + in, out := &in.ResponseTemplate, &out.ResponseTemplate + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(ResolverRuntimeObservation) + (*in).DeepCopyInto(*out) + } + if in.SyncConfig != nil { + in, out := &in.SyncConfig, &out.SyncConfig + *out = new(ResolverSyncConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverObservation. +func (in *ResolverObservation) DeepCopy() *ResolverObservation { + if in == nil { + return nil + } + out := new(ResolverObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverParameters) DeepCopyInto(out *ResolverParameters) { + *out = *in + if in.APIID != nil { + in, out := &in.APIID, &out.APIID + *out = new(string) + **out = **in + } + if in.APIIDRef != nil { + in, out := &in.APIIDRef, &out.APIIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIIDSelector != nil { + in, out := &in.APIIDSelector, &out.APIIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CachingConfig != nil { + in, out := &in.CachingConfig, &out.CachingConfig + *out = new(CachingConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Code != nil { + in, out := &in.Code, &out.Code + *out = new(string) + **out = **in + } + if in.DataSource != nil { + in, out := &in.DataSource, &out.DataSource + *out = new(string) + **out = **in + } + if in.DataSourceRef != nil { + in, out := &in.DataSourceRef, &out.DataSourceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataSourceSelector != nil { + in, out := &in.DataSourceSelector, &out.DataSourceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.MaxBatchSize != nil { + in, out := &in.MaxBatchSize, &out.MaxBatchSize + *out = new(float64) + **out = **in + } + if in.PipelineConfig != nil { + in, out := &in.PipelineConfig, &out.PipelineConfig + *out = new(PipelineConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RequestTemplate != nil { + in, out := &in.RequestTemplate, &out.RequestTemplate + *out = new(string) + **out = **in + } + if in.ResponseTemplate != nil { + in, out := &in.ResponseTemplate, &out.ResponseTemplate + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(ResolverRuntimeParameters) + (*in).DeepCopyInto(*out) + } + if in.SyncConfig != nil { + in, out := &in.SyncConfig, &out.SyncConfig + *out = new(ResolverSyncConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverParameters. +func (in *ResolverParameters) DeepCopy() *ResolverParameters { + if in == nil { + return nil + } + out := new(ResolverParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverRuntimeInitParameters) DeepCopyInto(out *ResolverRuntimeInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverRuntimeInitParameters. +func (in *ResolverRuntimeInitParameters) DeepCopy() *ResolverRuntimeInitParameters { + if in == nil { + return nil + } + out := new(ResolverRuntimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverRuntimeObservation) DeepCopyInto(out *ResolverRuntimeObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverRuntimeObservation. +func (in *ResolverRuntimeObservation) DeepCopy() *ResolverRuntimeObservation { + if in == nil { + return nil + } + out := new(ResolverRuntimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverRuntimeParameters) DeepCopyInto(out *ResolverRuntimeParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverRuntimeParameters. +func (in *ResolverRuntimeParameters) DeepCopy() *ResolverRuntimeParameters { + if in == nil { + return nil + } + out := new(ResolverRuntimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverSpec) DeepCopyInto(out *ResolverSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverSpec. +func (in *ResolverSpec) DeepCopy() *ResolverSpec { + if in == nil { + return nil + } + out := new(ResolverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverStatus) DeepCopyInto(out *ResolverStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverStatus. +func (in *ResolverStatus) DeepCopy() *ResolverStatus { + if in == nil { + return nil + } + out := new(ResolverStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverSyncConfigInitParameters) DeepCopyInto(out *ResolverSyncConfigInitParameters) { + *out = *in + if in.ConflictDetection != nil { + in, out := &in.ConflictDetection, &out.ConflictDetection + *out = new(string) + **out = **in + } + if in.ConflictHandler != nil { + in, out := &in.ConflictHandler, &out.ConflictHandler + *out = new(string) + **out = **in + } + if in.LambdaConflictHandlerConfig != nil { + in, out := &in.LambdaConflictHandlerConfig, &out.LambdaConflictHandlerConfig + *out = new(SyncConfigLambdaConflictHandlerConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverSyncConfigInitParameters. +func (in *ResolverSyncConfigInitParameters) DeepCopy() *ResolverSyncConfigInitParameters { + if in == nil { + return nil + } + out := new(ResolverSyncConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverSyncConfigObservation) DeepCopyInto(out *ResolverSyncConfigObservation) { + *out = *in + if in.ConflictDetection != nil { + in, out := &in.ConflictDetection, &out.ConflictDetection + *out = new(string) + **out = **in + } + if in.ConflictHandler != nil { + in, out := &in.ConflictHandler, &out.ConflictHandler + *out = new(string) + **out = **in + } + if in.LambdaConflictHandlerConfig != nil { + in, out := &in.LambdaConflictHandlerConfig, &out.LambdaConflictHandlerConfig + *out = new(SyncConfigLambdaConflictHandlerConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverSyncConfigObservation. +func (in *ResolverSyncConfigObservation) DeepCopy() *ResolverSyncConfigObservation { + if in == nil { + return nil + } + out := new(ResolverSyncConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverSyncConfigParameters) DeepCopyInto(out *ResolverSyncConfigParameters) { + *out = *in + if in.ConflictDetection != nil { + in, out := &in.ConflictDetection, &out.ConflictDetection + *out = new(string) + **out = **in + } + if in.ConflictHandler != nil { + in, out := &in.ConflictHandler, &out.ConflictHandler + *out = new(string) + **out = **in + } + if in.LambdaConflictHandlerConfig != nil { + in, out := &in.LambdaConflictHandlerConfig, &out.LambdaConflictHandlerConfig + *out = new(SyncConfigLambdaConflictHandlerConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverSyncConfigParameters. +func (in *ResolverSyncConfigParameters) DeepCopy() *ResolverSyncConfigParameters { + if in == nil { + return nil + } + out := new(ResolverSyncConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeInitParameters) DeepCopyInto(out *RuntimeInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeInitParameters. +func (in *RuntimeInitParameters) DeepCopy() *RuntimeInitParameters { + if in == nil { + return nil + } + out := new(RuntimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeObservation) DeepCopyInto(out *RuntimeObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeObservation. +func (in *RuntimeObservation) DeepCopy() *RuntimeObservation { + if in == nil { + return nil + } + out := new(RuntimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeParameters) DeepCopyInto(out *RuntimeParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeParameters. +func (in *RuntimeParameters) DeepCopy() *RuntimeParameters { + if in == nil { + return nil + } + out := new(RuntimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncConfigInitParameters) DeepCopyInto(out *SyncConfigInitParameters) { + *out = *in + if in.ConflictDetection != nil { + in, out := &in.ConflictDetection, &out.ConflictDetection + *out = new(string) + **out = **in + } + if in.ConflictHandler != nil { + in, out := &in.ConflictHandler, &out.ConflictHandler + *out = new(string) + **out = **in + } + if in.LambdaConflictHandlerConfig != nil { + in, out := &in.LambdaConflictHandlerConfig, &out.LambdaConflictHandlerConfig + *out = new(LambdaConflictHandlerConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncConfigInitParameters. +func (in *SyncConfigInitParameters) DeepCopy() *SyncConfigInitParameters { + if in == nil { + return nil + } + out := new(SyncConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncConfigLambdaConflictHandlerConfigInitParameters) DeepCopyInto(out *SyncConfigLambdaConflictHandlerConfigInitParameters) { + *out = *in + if in.LambdaConflictHandlerArn != nil { + in, out := &in.LambdaConflictHandlerArn, &out.LambdaConflictHandlerArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncConfigLambdaConflictHandlerConfigInitParameters. +func (in *SyncConfigLambdaConflictHandlerConfigInitParameters) DeepCopy() *SyncConfigLambdaConflictHandlerConfigInitParameters { + if in == nil { + return nil + } + out := new(SyncConfigLambdaConflictHandlerConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncConfigLambdaConflictHandlerConfigObservation) DeepCopyInto(out *SyncConfigLambdaConflictHandlerConfigObservation) { + *out = *in + if in.LambdaConflictHandlerArn != nil { + in, out := &in.LambdaConflictHandlerArn, &out.LambdaConflictHandlerArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncConfigLambdaConflictHandlerConfigObservation. +func (in *SyncConfigLambdaConflictHandlerConfigObservation) DeepCopy() *SyncConfigLambdaConflictHandlerConfigObservation { + if in == nil { + return nil + } + out := new(SyncConfigLambdaConflictHandlerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncConfigLambdaConflictHandlerConfigParameters) DeepCopyInto(out *SyncConfigLambdaConflictHandlerConfigParameters) { + *out = *in + if in.LambdaConflictHandlerArn != nil { + in, out := &in.LambdaConflictHandlerArn, &out.LambdaConflictHandlerArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncConfigLambdaConflictHandlerConfigParameters. +func (in *SyncConfigLambdaConflictHandlerConfigParameters) DeepCopy() *SyncConfigLambdaConflictHandlerConfigParameters { + if in == nil { + return nil + } + out := new(SyncConfigLambdaConflictHandlerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncConfigObservation) DeepCopyInto(out *SyncConfigObservation) { + *out = *in + if in.ConflictDetection != nil { + in, out := &in.ConflictDetection, &out.ConflictDetection + *out = new(string) + **out = **in + } + if in.ConflictHandler != nil { + in, out := &in.ConflictHandler, &out.ConflictHandler + *out = new(string) + **out = **in + } + if in.LambdaConflictHandlerConfig != nil { + in, out := &in.LambdaConflictHandlerConfig, &out.LambdaConflictHandlerConfig + *out = new(LambdaConflictHandlerConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncConfigObservation. +func (in *SyncConfigObservation) DeepCopy() *SyncConfigObservation { + if in == nil { + return nil + } + out := new(SyncConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncConfigParameters) DeepCopyInto(out *SyncConfigParameters) { + *out = *in + if in.ConflictDetection != nil { + in, out := &in.ConflictDetection, &out.ConflictDetection + *out = new(string) + **out = **in + } + if in.ConflictHandler != nil { + in, out := &in.ConflictHandler, &out.ConflictHandler + *out = new(string) + **out = **in + } + if in.LambdaConflictHandlerConfig != nil { + in, out := &in.LambdaConflictHandlerConfig, &out.LambdaConflictHandlerConfig + *out = new(LambdaConflictHandlerConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncConfigParameters. +func (in *SyncConfigParameters) DeepCopy() *SyncConfigParameters { + if in == nil { + return nil + } + out := new(SyncConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPoolConfigInitParameters) DeepCopyInto(out *UserPoolConfigInitParameters) { + *out = *in + if in.AppIDClientRegex != nil { + in, out := &in.AppIDClientRegex, &out.AppIDClientRegex + *out = new(string) + **out = **in + } + if in.AwsRegion != nil { + in, out := &in.AwsRegion, &out.AwsRegion + *out = new(string) + **out = **in + } + if in.UserPoolID != nil { + in, out := &in.UserPoolID, &out.UserPoolID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPoolConfigInitParameters. +func (in *UserPoolConfigInitParameters) DeepCopy() *UserPoolConfigInitParameters { + if in == nil { + return nil + } + out := new(UserPoolConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPoolConfigObservation) DeepCopyInto(out *UserPoolConfigObservation) { + *out = *in + if in.AppIDClientRegex != nil { + in, out := &in.AppIDClientRegex, &out.AppIDClientRegex + *out = new(string) + **out = **in + } + if in.AwsRegion != nil { + in, out := &in.AwsRegion, &out.AwsRegion + *out = new(string) + **out = **in + } + if in.UserPoolID != nil { + in, out := &in.UserPoolID, &out.UserPoolID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPoolConfigObservation. +func (in *UserPoolConfigObservation) DeepCopy() *UserPoolConfigObservation { + if in == nil { + return nil + } + out := new(UserPoolConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPoolConfigParameters) DeepCopyInto(out *UserPoolConfigParameters) { + *out = *in + if in.AppIDClientRegex != nil { + in, out := &in.AppIDClientRegex, &out.AppIDClientRegex + *out = new(string) + **out = **in + } + if in.AwsRegion != nil { + in, out := &in.AwsRegion, &out.AwsRegion + *out = new(string) + **out = **in + } + if in.UserPoolID != nil { + in, out := &in.UserPoolID, &out.UserPoolID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPoolConfigParameters. +func (in *UserPoolConfigParameters) DeepCopy() *UserPoolConfigParameters { + if in == nil { + return nil + } + out := new(UserPoolConfigParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/appsync/v1beta2/zz_generated.managed.go b/apis/appsync/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..0b7e7781a4 --- /dev/null +++ b/apis/appsync/v1beta2/zz_generated.managed.go @@ -0,0 +1,248 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Datasource. +func (mg *Datasource) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Datasource. +func (mg *Datasource) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Datasource. +func (mg *Datasource) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Datasource. +func (mg *Datasource) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Datasource. +func (mg *Datasource) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Datasource. +func (mg *Datasource) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Datasource. +func (mg *Datasource) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Datasource. +func (mg *Datasource) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Datasource. +func (mg *Datasource) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Datasource. +func (mg *Datasource) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Datasource. +func (mg *Datasource) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Datasource. +func (mg *Datasource) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Function. +func (mg *Function) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Function. +func (mg *Function) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Function. +func (mg *Function) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Function. +func (mg *Function) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Function. +func (mg *Function) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Function. +func (mg *Function) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Function. +func (mg *Function) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Function. +func (mg *Function) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Function. +func (mg *Function) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Function. +func (mg *Function) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Function. +func (mg *Function) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Function. +func (mg *Function) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this GraphQLAPI. +func (mg *GraphQLAPI) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this GraphQLAPI. +func (mg *GraphQLAPI) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this GraphQLAPI. +func (mg *GraphQLAPI) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this GraphQLAPI. +func (mg *GraphQLAPI) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this GraphQLAPI. +func (mg *GraphQLAPI) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this GraphQLAPI. +func (mg *GraphQLAPI) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this GraphQLAPI. +func (mg *GraphQLAPI) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this GraphQLAPI. +func (mg *GraphQLAPI) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this GraphQLAPI. +func (mg *GraphQLAPI) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this GraphQLAPI. +func (mg *GraphQLAPI) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this GraphQLAPI. +func (mg *GraphQLAPI) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this GraphQLAPI. +func (mg *GraphQLAPI) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Resolver. +func (mg *Resolver) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Resolver. +func (mg *Resolver) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Resolver. +func (mg *Resolver) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Resolver. +func (mg *Resolver) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Resolver. +func (mg *Resolver) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Resolver. +func (mg *Resolver) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Resolver. +func (mg *Resolver) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Resolver. +func (mg *Resolver) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Resolver. +func (mg *Resolver) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Resolver. +func (mg *Resolver) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Resolver. +func (mg *Resolver) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Resolver. +func (mg *Resolver) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/appsync/v1beta2/zz_generated.managedlist.go b/apis/appsync/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..ccd89522ea --- /dev/null +++ b/apis/appsync/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this DatasourceList. +func (l *DatasourceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FunctionList. +func (l *FunctionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this GraphQLAPIList. +func (l *GraphQLAPIList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ResolverList. +func (l *ResolverList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/appsync/v1beta2/zz_generated.resolvers.go b/apis/appsync/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..0b2e570008 --- /dev/null +++ b/apis/appsync/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,384 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Datasource) ResolveReferences( // ResolveReferences of this Datasource. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appsync.aws.upbound.io", "v1beta2", "GraphQLAPI", "GraphQLAPIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.APIIDRef, + Selector: mg.Spec.ForProvider.APIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIID") + } + mg.Spec.ForProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.DynamodbConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta2", "Table", "TableList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DynamodbConfig.TableName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DynamodbConfig.TableNameRef, + Selector: mg.Spec.ForProvider.DynamodbConfig.TableNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DynamodbConfig.TableName") + } + mg.Spec.ForProvider.DynamodbConfig.TableName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DynamodbConfig.TableNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ServiceRoleArnRef, + Selector: mg.Spec.ForProvider.ServiceRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceRoleArn") + } + mg.Spec.ForProvider.ServiceRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceRoleArnRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.DynamodbConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta2", "Table", "TableList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DynamodbConfig.TableName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DynamodbConfig.TableNameRef, + Selector: mg.Spec.InitProvider.DynamodbConfig.TableNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DynamodbConfig.TableName") + } + mg.Spec.InitProvider.DynamodbConfig.TableName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DynamodbConfig.TableNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ServiceRoleArnRef, + Selector: mg.Spec.InitProvider.ServiceRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceRoleArn") + } + mg.Spec.InitProvider.ServiceRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceRoleArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Function. +func (mg *Function) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appsync.aws.upbound.io", "v1beta2", "GraphQLAPI", "GraphQLAPIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.APIIDRef, + Selector: mg.Spec.ForProvider.APIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIID") + } + mg.Spec.ForProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appsync.aws.upbound.io", "v1beta2", "Datasource", "DatasourceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataSource), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DataSourceRef, + Selector: mg.Spec.ForProvider.DataSourceSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataSource") + } + mg.Spec.ForProvider.DataSource = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataSourceRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appsync.aws.upbound.io", "v1beta2", "GraphQLAPI", "GraphQLAPIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.APIID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.APIIDRef, + Selector: mg.Spec.InitProvider.APIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.APIID") + } + mg.Spec.InitProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.APIIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appsync.aws.upbound.io", "v1beta2", "Datasource", "DatasourceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DataSource), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DataSourceRef, + Selector: mg.Spec.InitProvider.DataSourceSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DataSource") + } + mg.Spec.InitProvider.DataSource = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DataSourceRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this GraphQLAPI. +func (mg *GraphQLAPI) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.LogConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LogConfig.CloudwatchLogsRoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.LogConfig.CloudwatchLogsRoleArnRef, + Selector: mg.Spec.ForProvider.LogConfig.CloudwatchLogsRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LogConfig.CloudwatchLogsRoleArn") + } + mg.Spec.ForProvider.LogConfig.CloudwatchLogsRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LogConfig.CloudwatchLogsRoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.UserPoolConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.UserPoolConfig.UserPoolID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.UserPoolConfig.UserPoolIDRef, + Selector: mg.Spec.ForProvider.UserPoolConfig.UserPoolIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.UserPoolConfig.UserPoolID") + } + mg.Spec.ForProvider.UserPoolConfig.UserPoolID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.UserPoolConfig.UserPoolIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LogConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LogConfig.CloudwatchLogsRoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.LogConfig.CloudwatchLogsRoleArnRef, + Selector: mg.Spec.InitProvider.LogConfig.CloudwatchLogsRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LogConfig.CloudwatchLogsRoleArn") + } + mg.Spec.InitProvider.LogConfig.CloudwatchLogsRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LogConfig.CloudwatchLogsRoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.UserPoolConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.UserPoolConfig.UserPoolID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.UserPoolConfig.UserPoolIDRef, + Selector: mg.Spec.InitProvider.UserPoolConfig.UserPoolIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.UserPoolConfig.UserPoolID") + } + mg.Spec.InitProvider.UserPoolConfig.UserPoolID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.UserPoolConfig.UserPoolIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this Resolver. +func (mg *Resolver) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appsync.aws.upbound.io", "v1beta2", "GraphQLAPI", "GraphQLAPIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.APIIDRef, + Selector: mg.Spec.ForProvider.APIIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIID") + } + mg.Spec.ForProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appsync.aws.upbound.io", "v1beta2", "Datasource", "DatasourceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataSource), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DataSourceRef, + Selector: mg.Spec.ForProvider.DataSourceSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataSource") + } + mg.Spec.ForProvider.DataSource = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataSourceRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appsync.aws.upbound.io", "v1beta2", "Datasource", "DatasourceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DataSource), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DataSourceRef, + Selector: mg.Spec.InitProvider.DataSourceSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DataSource") + } + mg.Spec.InitProvider.DataSource = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DataSourceRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/appsync/v1beta2/zz_graphqlapi_terraformed.go b/apis/appsync/v1beta2/zz_graphqlapi_terraformed.go new file mode 100755 index 0000000000..f01e098c26 --- /dev/null +++ b/apis/appsync/v1beta2/zz_graphqlapi_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this GraphQLAPI +func (mg *GraphQLAPI) GetTerraformResourceType() string { + return "aws_appsync_graphql_api" +} + +// GetConnectionDetailsMapping for this GraphQLAPI +func (tr *GraphQLAPI) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this GraphQLAPI +func (tr *GraphQLAPI) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this GraphQLAPI +func (tr *GraphQLAPI) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this GraphQLAPI +func (tr *GraphQLAPI) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this GraphQLAPI +func (tr *GraphQLAPI) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this GraphQLAPI +func (tr *GraphQLAPI) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this GraphQLAPI +func (tr *GraphQLAPI) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this GraphQLAPI +func (tr *GraphQLAPI) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this GraphQLAPI using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *GraphQLAPI) LateInitialize(attrs []byte) (bool, error) { + params := &GraphQLAPIParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *GraphQLAPI) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appsync/v1beta2/zz_graphqlapi_types.go b/apis/appsync/v1beta2/zz_graphqlapi_types.go new file mode 100755 index 0000000000..53a4ed5ebb --- /dev/null +++ b/apis/appsync/v1beta2/zz_graphqlapi_types.go @@ -0,0 +1,639 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AdditionalAuthenticationProviderInitParameters struct { + + // Authentication type. Valid values: API_KEY, AWS_IAM, AMAZON_COGNITO_USER_POOLS, OPENID_CONNECT, AWS_LAMBDA + AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` + + // Nested argument containing Lambda authorizer configuration. Defined below. + LambdaAuthorizerConfig *LambdaAuthorizerConfigInitParameters `json:"lambdaAuthorizerConfig,omitempty" tf:"lambda_authorizer_config,omitempty"` + + // Nested argument containing OpenID Connect configuration. Defined below. + OpenIDConnectConfig *OpenIDConnectConfigInitParameters `json:"openidConnectConfig,omitempty" tf:"openid_connect_config,omitempty"` + + // Amazon Cognito User Pool configuration. Defined below. + UserPoolConfig *UserPoolConfigInitParameters `json:"userPoolConfig,omitempty" tf:"user_pool_config,omitempty"` +} + +type AdditionalAuthenticationProviderObservation struct { + + // Authentication type. Valid values: API_KEY, AWS_IAM, AMAZON_COGNITO_USER_POOLS, OPENID_CONNECT, AWS_LAMBDA + AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` + + // Nested argument containing Lambda authorizer configuration. Defined below. + LambdaAuthorizerConfig *LambdaAuthorizerConfigObservation `json:"lambdaAuthorizerConfig,omitempty" tf:"lambda_authorizer_config,omitempty"` + + // Nested argument containing OpenID Connect configuration. Defined below. + OpenIDConnectConfig *OpenIDConnectConfigObservation `json:"openidConnectConfig,omitempty" tf:"openid_connect_config,omitempty"` + + // Amazon Cognito User Pool configuration. Defined below. + UserPoolConfig *UserPoolConfigObservation `json:"userPoolConfig,omitempty" tf:"user_pool_config,omitempty"` +} + +type AdditionalAuthenticationProviderParameters struct { + + // Authentication type. Valid values: API_KEY, AWS_IAM, AMAZON_COGNITO_USER_POOLS, OPENID_CONNECT, AWS_LAMBDA + // +kubebuilder:validation:Optional + AuthenticationType *string `json:"authenticationType" tf:"authentication_type,omitempty"` + + // Nested argument containing Lambda authorizer configuration. Defined below. + // +kubebuilder:validation:Optional + LambdaAuthorizerConfig *LambdaAuthorizerConfigParameters `json:"lambdaAuthorizerConfig,omitempty" tf:"lambda_authorizer_config,omitempty"` + + // Nested argument containing OpenID Connect configuration. Defined below. + // +kubebuilder:validation:Optional + OpenIDConnectConfig *OpenIDConnectConfigParameters `json:"openidConnectConfig,omitempty" tf:"openid_connect_config,omitempty"` + + // Amazon Cognito User Pool configuration. Defined below. + // +kubebuilder:validation:Optional + UserPoolConfig *UserPoolConfigParameters `json:"userPoolConfig,omitempty" tf:"user_pool_config,omitempty"` +} + +type GraphQLAPIInitParameters struct { + + // One or more additional authentication providers for the GraphqlApi. Defined below. + AdditionalAuthenticationProvider []AdditionalAuthenticationProviderInitParameters `json:"additionalAuthenticationProvider,omitempty" tf:"additional_authentication_provider,omitempty"` + + // Authentication type. Valid values: API_KEY, AWS_IAM, AMAZON_COGNITO_USER_POOLS, OPENID_CONNECT, AWS_LAMBDA + AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` + + // Sets the value of the GraphQL API to enable (ENABLED) or disable (DISABLED) introspection. If no value is provided, the introspection configuration will be set to ENABLED by default. This field will produce an error if the operation attempts to use the introspection feature while this field is disabled. For more information about introspection, see GraphQL introspection. + IntrospectionConfig *string `json:"introspectionConfig,omitempty" tf:"introspection_config,omitempty"` + + // Nested argument containing Lambda authorizer configuration. Defined below. + LambdaAuthorizerConfig *GraphQLAPILambdaAuthorizerConfigInitParameters `json:"lambdaAuthorizerConfig,omitempty" tf:"lambda_authorizer_config,omitempty"` + + // Nested argument containing logging configuration. Defined below. + LogConfig *LogConfigInitParameters `json:"logConfig,omitempty" tf:"log_config,omitempty"` + + // User-supplied name for the GraphqlApi. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Nested argument containing OpenID Connect configuration. Defined below. + OpenIDConnectConfig *GraphQLAPIOpenIDConnectConfigInitParameters `json:"openidConnectConfig,omitempty" tf:"openid_connect_config,omitempty"` + + // The maximum depth a query can have in a single request. Depth refers to the amount of nested levels allowed in the body of query. The default value is 0 (or unspecified), which indicates there's no depth limit. If you set a limit, it can be between 1 and 75 nested levels. This field will produce a limit error if the operation falls out of bounds. + QueryDepthLimit *float64 `json:"queryDepthLimit,omitempty" tf:"query_depth_limit,omitempty"` + + // The maximum number of resolvers that can be invoked in a single request. The default value is 0 (or unspecified), which will set the limit to 10000. When specified, the limit value can be between 1 and 10000. This field will produce a limit error if the operation falls out of bounds. + ResolverCountLimit *float64 `json:"resolverCountLimit,omitempty" tf:"resolver_count_limit,omitempty"` + + // Schema definition, in GraphQL schema language format. + Schema *string `json:"schema,omitempty" tf:"schema,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Amazon Cognito User Pool configuration. Defined below. + UserPoolConfig *GraphQLAPIUserPoolConfigInitParameters `json:"userPoolConfig,omitempty" tf:"user_pool_config,omitempty"` + + // Sets the value of the GraphQL API to public (GLOBAL) or private (PRIVATE). If no value is provided, the visibility will be set to GLOBAL by default. This value cannot be changed once the API has been created. + Visibility *string `json:"visibility,omitempty" tf:"visibility,omitempty"` + + // Whether tracing with X-ray is enabled. Defaults to false. + XrayEnabled *bool `json:"xrayEnabled,omitempty" tf:"xray_enabled,omitempty"` +} + +type GraphQLAPILambdaAuthorizerConfigInitParameters struct { + + // Number of seconds a response should be cached for. The default is 5 minutes (300 seconds). The Lambda function can override this by returning a ttlOverride key in its response. A value of 0 disables caching of responses. Minimum value of 0. Maximum value of 3600. + AuthorizerResultTTLInSeconds *float64 `json:"authorizerResultTtlInSeconds,omitempty" tf:"authorizer_result_ttl_in_seconds,omitempty"` + + // ARN of the Lambda function to be called for authorization. Note: This Lambda function must have a resource-based policy assigned to it, to allow lambda:InvokeFunction from service principal appsync.amazonaws.com. + AuthorizerURI *string `json:"authorizerUri,omitempty" tf:"authorizer_uri,omitempty"` + + // Regular expression for validation of tokens before the Lambda function is called. + IdentityValidationExpression *string `json:"identityValidationExpression,omitempty" tf:"identity_validation_expression,omitempty"` +} + +type GraphQLAPILambdaAuthorizerConfigObservation struct { + + // Number of seconds a response should be cached for. The default is 5 minutes (300 seconds). The Lambda function can override this by returning a ttlOverride key in its response. A value of 0 disables caching of responses. Minimum value of 0. Maximum value of 3600. + AuthorizerResultTTLInSeconds *float64 `json:"authorizerResultTtlInSeconds,omitempty" tf:"authorizer_result_ttl_in_seconds,omitempty"` + + // ARN of the Lambda function to be called for authorization. Note: This Lambda function must have a resource-based policy assigned to it, to allow lambda:InvokeFunction from service principal appsync.amazonaws.com. + AuthorizerURI *string `json:"authorizerUri,omitempty" tf:"authorizer_uri,omitempty"` + + // Regular expression for validation of tokens before the Lambda function is called. + IdentityValidationExpression *string `json:"identityValidationExpression,omitempty" tf:"identity_validation_expression,omitempty"` +} + +type GraphQLAPILambdaAuthorizerConfigParameters struct { + + // Number of seconds a response should be cached for. The default is 5 minutes (300 seconds). The Lambda function can override this by returning a ttlOverride key in its response. A value of 0 disables caching of responses. Minimum value of 0. Maximum value of 3600. + // +kubebuilder:validation:Optional + AuthorizerResultTTLInSeconds *float64 `json:"authorizerResultTtlInSeconds,omitempty" tf:"authorizer_result_ttl_in_seconds,omitempty"` + + // ARN of the Lambda function to be called for authorization. Note: This Lambda function must have a resource-based policy assigned to it, to allow lambda:InvokeFunction from service principal appsync.amazonaws.com. + // +kubebuilder:validation:Optional + AuthorizerURI *string `json:"authorizerUri" tf:"authorizer_uri,omitempty"` + + // Regular expression for validation of tokens before the Lambda function is called. + // +kubebuilder:validation:Optional + IdentityValidationExpression *string `json:"identityValidationExpression,omitempty" tf:"identity_validation_expression,omitempty"` +} + +type GraphQLAPIObservation struct { + + // One or more additional authentication providers for the GraphqlApi. Defined below. + AdditionalAuthenticationProvider []AdditionalAuthenticationProviderObservation `json:"additionalAuthenticationProvider,omitempty" tf:"additional_authentication_provider,omitempty"` + + // ARN + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Authentication type. Valid values: API_KEY, AWS_IAM, AMAZON_COGNITO_USER_POOLS, OPENID_CONNECT, AWS_LAMBDA + AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` + + // API ID + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Sets the value of the GraphQL API to enable (ENABLED) or disable (DISABLED) introspection. If no value is provided, the introspection configuration will be set to ENABLED by default. This field will produce an error if the operation attempts to use the introspection feature while this field is disabled. For more information about introspection, see GraphQL introspection. + IntrospectionConfig *string `json:"introspectionConfig,omitempty" tf:"introspection_config,omitempty"` + + // Nested argument containing Lambda authorizer configuration. Defined below. + LambdaAuthorizerConfig *GraphQLAPILambdaAuthorizerConfigObservation `json:"lambdaAuthorizerConfig,omitempty" tf:"lambda_authorizer_config,omitempty"` + + // Nested argument containing logging configuration. Defined below. + LogConfig *LogConfigObservation `json:"logConfig,omitempty" tf:"log_config,omitempty"` + + // User-supplied name for the GraphqlApi. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Nested argument containing OpenID Connect configuration. Defined below. + OpenIDConnectConfig *GraphQLAPIOpenIDConnectConfigObservation `json:"openidConnectConfig,omitempty" tf:"openid_connect_config,omitempty"` + + // The maximum depth a query can have in a single request. Depth refers to the amount of nested levels allowed in the body of query. The default value is 0 (or unspecified), which indicates there's no depth limit. If you set a limit, it can be between 1 and 75 nested levels. This field will produce a limit error if the operation falls out of bounds. + QueryDepthLimit *float64 `json:"queryDepthLimit,omitempty" tf:"query_depth_limit,omitempty"` + + // The maximum number of resolvers that can be invoked in a single request. The default value is 0 (or unspecified), which will set the limit to 10000. When specified, the limit value can be between 1 and 10000. This field will produce a limit error if the operation falls out of bounds. + ResolverCountLimit *float64 `json:"resolverCountLimit,omitempty" tf:"resolver_count_limit,omitempty"` + + // Schema definition, in GraphQL schema language format. + Schema *string `json:"schema,omitempty" tf:"schema,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Map of URIs associated with the APIE.g., uris["GRAPHQL"] = https://ID.appsync-api.REGION.amazonaws.com/graphql + // +mapType=granular + Uris map[string]*string `json:"uris,omitempty" tf:"uris,omitempty"` + + // Amazon Cognito User Pool configuration. Defined below. + UserPoolConfig *GraphQLAPIUserPoolConfigObservation `json:"userPoolConfig,omitempty" tf:"user_pool_config,omitempty"` + + // Sets the value of the GraphQL API to public (GLOBAL) or private (PRIVATE). If no value is provided, the visibility will be set to GLOBAL by default. This value cannot be changed once the API has been created. + Visibility *string `json:"visibility,omitempty" tf:"visibility,omitempty"` + + // Whether tracing with X-ray is enabled. Defaults to false. + XrayEnabled *bool `json:"xrayEnabled,omitempty" tf:"xray_enabled,omitempty"` +} + +type GraphQLAPIOpenIDConnectConfigInitParameters struct { + + // Number of milliseconds a token is valid after being authenticated. + AuthTTL *float64 `json:"authTtl,omitempty" tf:"auth_ttl,omitempty"` + + // Client identifier of the Relying party at the OpenID identity provider. This identifier is typically obtained when the Relying party is registered with the OpenID identity provider. You can specify a regular expression so the AWS AppSync can validate against multiple client identifiers at a time. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Number of milliseconds a token is valid after being issued to a user. + IatTTL *float64 `json:"iatTtl,omitempty" tf:"iat_ttl,omitempty"` + + // Issuer for the OpenID Connect configuration. The issuer returned by discovery MUST exactly match the value of iss in the ID Token. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` +} + +type GraphQLAPIOpenIDConnectConfigObservation struct { + + // Number of milliseconds a token is valid after being authenticated. + AuthTTL *float64 `json:"authTtl,omitempty" tf:"auth_ttl,omitempty"` + + // Client identifier of the Relying party at the OpenID identity provider. This identifier is typically obtained when the Relying party is registered with the OpenID identity provider. You can specify a regular expression so the AWS AppSync can validate against multiple client identifiers at a time. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Number of milliseconds a token is valid after being issued to a user. + IatTTL *float64 `json:"iatTtl,omitempty" tf:"iat_ttl,omitempty"` + + // Issuer for the OpenID Connect configuration. The issuer returned by discovery MUST exactly match the value of iss in the ID Token. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` +} + +type GraphQLAPIOpenIDConnectConfigParameters struct { + + // Number of milliseconds a token is valid after being authenticated. + // +kubebuilder:validation:Optional + AuthTTL *float64 `json:"authTtl,omitempty" tf:"auth_ttl,omitempty"` + + // Client identifier of the Relying party at the OpenID identity provider. This identifier is typically obtained when the Relying party is registered with the OpenID identity provider. You can specify a regular expression so the AWS AppSync can validate against multiple client identifiers at a time. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Number of milliseconds a token is valid after being issued to a user. + // +kubebuilder:validation:Optional + IatTTL *float64 `json:"iatTtl,omitempty" tf:"iat_ttl,omitempty"` + + // Issuer for the OpenID Connect configuration. The issuer returned by discovery MUST exactly match the value of iss in the ID Token. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer" tf:"issuer,omitempty"` +} + +type GraphQLAPIParameters struct { + + // One or more additional authentication providers for the GraphqlApi. Defined below. + // +kubebuilder:validation:Optional + AdditionalAuthenticationProvider []AdditionalAuthenticationProviderParameters `json:"additionalAuthenticationProvider,omitempty" tf:"additional_authentication_provider,omitempty"` + + // Authentication type. Valid values: API_KEY, AWS_IAM, AMAZON_COGNITO_USER_POOLS, OPENID_CONNECT, AWS_LAMBDA + // +kubebuilder:validation:Optional + AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` + + // Sets the value of the GraphQL API to enable (ENABLED) or disable (DISABLED) introspection. If no value is provided, the introspection configuration will be set to ENABLED by default. This field will produce an error if the operation attempts to use the introspection feature while this field is disabled. For more information about introspection, see GraphQL introspection. + // +kubebuilder:validation:Optional + IntrospectionConfig *string `json:"introspectionConfig,omitempty" tf:"introspection_config,omitempty"` + + // Nested argument containing Lambda authorizer configuration. Defined below. + // +kubebuilder:validation:Optional + LambdaAuthorizerConfig *GraphQLAPILambdaAuthorizerConfigParameters `json:"lambdaAuthorizerConfig,omitempty" tf:"lambda_authorizer_config,omitempty"` + + // Nested argument containing logging configuration. Defined below. + // +kubebuilder:validation:Optional + LogConfig *LogConfigParameters `json:"logConfig,omitempty" tf:"log_config,omitempty"` + + // User-supplied name for the GraphqlApi. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Nested argument containing OpenID Connect configuration. Defined below. + // +kubebuilder:validation:Optional + OpenIDConnectConfig *GraphQLAPIOpenIDConnectConfigParameters `json:"openidConnectConfig,omitempty" tf:"openid_connect_config,omitempty"` + + // The maximum depth a query can have in a single request. Depth refers to the amount of nested levels allowed in the body of query. The default value is 0 (or unspecified), which indicates there's no depth limit. If you set a limit, it can be between 1 and 75 nested levels. This field will produce a limit error if the operation falls out of bounds. + // +kubebuilder:validation:Optional + QueryDepthLimit *float64 `json:"queryDepthLimit,omitempty" tf:"query_depth_limit,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The maximum number of resolvers that can be invoked in a single request. The default value is 0 (or unspecified), which will set the limit to 10000. When specified, the limit value can be between 1 and 10000. This field will produce a limit error if the operation falls out of bounds. + // +kubebuilder:validation:Optional + ResolverCountLimit *float64 `json:"resolverCountLimit,omitempty" tf:"resolver_count_limit,omitempty"` + + // Schema definition, in GraphQL schema language format. + // +kubebuilder:validation:Optional + Schema *string `json:"schema,omitempty" tf:"schema,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Amazon Cognito User Pool configuration. Defined below. + // +kubebuilder:validation:Optional + UserPoolConfig *GraphQLAPIUserPoolConfigParameters `json:"userPoolConfig,omitempty" tf:"user_pool_config,omitempty"` + + // Sets the value of the GraphQL API to public (GLOBAL) or private (PRIVATE). If no value is provided, the visibility will be set to GLOBAL by default. This value cannot be changed once the API has been created. + // +kubebuilder:validation:Optional + Visibility *string `json:"visibility,omitempty" tf:"visibility,omitempty"` + + // Whether tracing with X-ray is enabled. Defaults to false. + // +kubebuilder:validation:Optional + XrayEnabled *bool `json:"xrayEnabled,omitempty" tf:"xray_enabled,omitempty"` +} + +type GraphQLAPIUserPoolConfigInitParameters struct { + + // Regular expression for validating the incoming Amazon Cognito User Pool app client ID. + AppIDClientRegex *string `json:"appIdClientRegex,omitempty" tf:"app_id_client_regex,omitempty"` + + // AWS region in which the user pool was created. + AwsRegion *string `json:"awsRegion,omitempty" tf:"aws_region,omitempty"` + + // Action that you want your GraphQL API to take when a request that uses Amazon Cognito User Pool authentication doesn't match the Amazon Cognito User Pool configuration. Valid: ALLOW and DENY + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // User pool ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` + + // Reference to a UserPool in cognitoidp to populate userPoolId. + // +kubebuilder:validation:Optional + UserPoolIDRef *v1.Reference `json:"userPoolIdRef,omitempty" tf:"-"` + + // Selector for a UserPool in cognitoidp to populate userPoolId. + // +kubebuilder:validation:Optional + UserPoolIDSelector *v1.Selector `json:"userPoolIdSelector,omitempty" tf:"-"` +} + +type GraphQLAPIUserPoolConfigObservation struct { + + // Regular expression for validating the incoming Amazon Cognito User Pool app client ID. + AppIDClientRegex *string `json:"appIdClientRegex,omitempty" tf:"app_id_client_regex,omitempty"` + + // AWS region in which the user pool was created. + AwsRegion *string `json:"awsRegion,omitempty" tf:"aws_region,omitempty"` + + // Action that you want your GraphQL API to take when a request that uses Amazon Cognito User Pool authentication doesn't match the Amazon Cognito User Pool configuration. Valid: ALLOW and DENY + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // User pool ID. + UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` +} + +type GraphQLAPIUserPoolConfigParameters struct { + + // Regular expression for validating the incoming Amazon Cognito User Pool app client ID. + // +kubebuilder:validation:Optional + AppIDClientRegex *string `json:"appIdClientRegex,omitempty" tf:"app_id_client_regex,omitempty"` + + // AWS region in which the user pool was created. + // +kubebuilder:validation:Optional + AwsRegion *string `json:"awsRegion,omitempty" tf:"aws_region,omitempty"` + + // Action that you want your GraphQL API to take when a request that uses Amazon Cognito User Pool authentication doesn't match the Amazon Cognito User Pool configuration. Valid: ALLOW and DENY + // +kubebuilder:validation:Optional + DefaultAction *string `json:"defaultAction" tf:"default_action,omitempty"` + + // User pool ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` + + // Reference to a UserPool in cognitoidp to populate userPoolId. + // +kubebuilder:validation:Optional + UserPoolIDRef *v1.Reference `json:"userPoolIdRef,omitempty" tf:"-"` + + // Selector for a UserPool in cognitoidp to populate userPoolId. + // +kubebuilder:validation:Optional + UserPoolIDSelector *v1.Selector `json:"userPoolIdSelector,omitempty" tf:"-"` +} + +type LambdaAuthorizerConfigInitParameters struct { + + // Number of seconds a response should be cached for. The default is 5 minutes (300 seconds). The Lambda function can override this by returning a ttlOverride key in its response. A value of 0 disables caching of responses. Minimum value of 0. Maximum value of 3600. + AuthorizerResultTTLInSeconds *float64 `json:"authorizerResultTtlInSeconds,omitempty" tf:"authorizer_result_ttl_in_seconds,omitempty"` + + // ARN of the Lambda function to be called for authorization. Note: This Lambda function must have a resource-based policy assigned to it, to allow lambda:InvokeFunction from service principal appsync.amazonaws.com. + AuthorizerURI *string `json:"authorizerUri,omitempty" tf:"authorizer_uri,omitempty"` + + // Regular expression for validation of tokens before the Lambda function is called. + IdentityValidationExpression *string `json:"identityValidationExpression,omitempty" tf:"identity_validation_expression,omitempty"` +} + +type LambdaAuthorizerConfigObservation struct { + + // Number of seconds a response should be cached for. The default is 5 minutes (300 seconds). The Lambda function can override this by returning a ttlOverride key in its response. A value of 0 disables caching of responses. Minimum value of 0. Maximum value of 3600. + AuthorizerResultTTLInSeconds *float64 `json:"authorizerResultTtlInSeconds,omitempty" tf:"authorizer_result_ttl_in_seconds,omitempty"` + + // ARN of the Lambda function to be called for authorization. Note: This Lambda function must have a resource-based policy assigned to it, to allow lambda:InvokeFunction from service principal appsync.amazonaws.com. + AuthorizerURI *string `json:"authorizerUri,omitempty" tf:"authorizer_uri,omitempty"` + + // Regular expression for validation of tokens before the Lambda function is called. + IdentityValidationExpression *string `json:"identityValidationExpression,omitempty" tf:"identity_validation_expression,omitempty"` +} + +type LambdaAuthorizerConfigParameters struct { + + // Number of seconds a response should be cached for. The default is 5 minutes (300 seconds). The Lambda function can override this by returning a ttlOverride key in its response. A value of 0 disables caching of responses. Minimum value of 0. Maximum value of 3600. + // +kubebuilder:validation:Optional + AuthorizerResultTTLInSeconds *float64 `json:"authorizerResultTtlInSeconds,omitempty" tf:"authorizer_result_ttl_in_seconds,omitempty"` + + // ARN of the Lambda function to be called for authorization. Note: This Lambda function must have a resource-based policy assigned to it, to allow lambda:InvokeFunction from service principal appsync.amazonaws.com. + // +kubebuilder:validation:Optional + AuthorizerURI *string `json:"authorizerUri" tf:"authorizer_uri,omitempty"` + + // Regular expression for validation of tokens before the Lambda function is called. + // +kubebuilder:validation:Optional + IdentityValidationExpression *string `json:"identityValidationExpression,omitempty" tf:"identity_validation_expression,omitempty"` +} + +type LogConfigInitParameters struct { + + // Amazon Resource Name of the service role that AWS AppSync will assume to publish to Amazon CloudWatch logs in your account. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + CloudwatchLogsRoleArn *string `json:"cloudwatchLogsRoleArn,omitempty" tf:"cloudwatch_logs_role_arn,omitempty"` + + // Reference to a Role in iam to populate cloudwatchLogsRoleArn. + // +kubebuilder:validation:Optional + CloudwatchLogsRoleArnRef *v1.Reference `json:"cloudwatchLogsRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate cloudwatchLogsRoleArn. + // +kubebuilder:validation:Optional + CloudwatchLogsRoleArnSelector *v1.Selector `json:"cloudwatchLogsRoleArnSelector,omitempty" tf:"-"` + + // Set to TRUE to exclude sections that contain information such as headers, context, and evaluated mapping templates, regardless of logging level. Valid values: true, false. Default value: false + ExcludeVerboseContent *bool `json:"excludeVerboseContent,omitempty" tf:"exclude_verbose_content,omitempty"` + + // Field logging level. Valid values: ALL, ERROR, NONE. + FieldLogLevel *string `json:"fieldLogLevel,omitempty" tf:"field_log_level,omitempty"` +} + +type LogConfigObservation struct { + + // Amazon Resource Name of the service role that AWS AppSync will assume to publish to Amazon CloudWatch logs in your account. + CloudwatchLogsRoleArn *string `json:"cloudwatchLogsRoleArn,omitempty" tf:"cloudwatch_logs_role_arn,omitempty"` + + // Set to TRUE to exclude sections that contain information such as headers, context, and evaluated mapping templates, regardless of logging level. Valid values: true, false. Default value: false + ExcludeVerboseContent *bool `json:"excludeVerboseContent,omitempty" tf:"exclude_verbose_content,omitempty"` + + // Field logging level. Valid values: ALL, ERROR, NONE. + FieldLogLevel *string `json:"fieldLogLevel,omitempty" tf:"field_log_level,omitempty"` +} + +type LogConfigParameters struct { + + // Amazon Resource Name of the service role that AWS AppSync will assume to publish to Amazon CloudWatch logs in your account. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + CloudwatchLogsRoleArn *string `json:"cloudwatchLogsRoleArn,omitempty" tf:"cloudwatch_logs_role_arn,omitempty"` + + // Reference to a Role in iam to populate cloudwatchLogsRoleArn. + // +kubebuilder:validation:Optional + CloudwatchLogsRoleArnRef *v1.Reference `json:"cloudwatchLogsRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate cloudwatchLogsRoleArn. + // +kubebuilder:validation:Optional + CloudwatchLogsRoleArnSelector *v1.Selector `json:"cloudwatchLogsRoleArnSelector,omitempty" tf:"-"` + + // Set to TRUE to exclude sections that contain information such as headers, context, and evaluated mapping templates, regardless of logging level. Valid values: true, false. Default value: false + // +kubebuilder:validation:Optional + ExcludeVerboseContent *bool `json:"excludeVerboseContent,omitempty" tf:"exclude_verbose_content,omitempty"` + + // Field logging level. Valid values: ALL, ERROR, NONE. + // +kubebuilder:validation:Optional + FieldLogLevel *string `json:"fieldLogLevel" tf:"field_log_level,omitempty"` +} + +type OpenIDConnectConfigInitParameters struct { + + // Number of milliseconds a token is valid after being authenticated. + AuthTTL *float64 `json:"authTtl,omitempty" tf:"auth_ttl,omitempty"` + + // Client identifier of the Relying party at the OpenID identity provider. This identifier is typically obtained when the Relying party is registered with the OpenID identity provider. You can specify a regular expression so the AWS AppSync can validate against multiple client identifiers at a time. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Number of milliseconds a token is valid after being issued to a user. + IatTTL *float64 `json:"iatTtl,omitempty" tf:"iat_ttl,omitempty"` + + // Issuer for the OpenID Connect configuration. The issuer returned by discovery MUST exactly match the value of iss in the ID Token. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` +} + +type OpenIDConnectConfigObservation struct { + + // Number of milliseconds a token is valid after being authenticated. + AuthTTL *float64 `json:"authTtl,omitempty" tf:"auth_ttl,omitempty"` + + // Client identifier of the Relying party at the OpenID identity provider. This identifier is typically obtained when the Relying party is registered with the OpenID identity provider. You can specify a regular expression so the AWS AppSync can validate against multiple client identifiers at a time. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Number of milliseconds a token is valid after being issued to a user. + IatTTL *float64 `json:"iatTtl,omitempty" tf:"iat_ttl,omitempty"` + + // Issuer for the OpenID Connect configuration. The issuer returned by discovery MUST exactly match the value of iss in the ID Token. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` +} + +type OpenIDConnectConfigParameters struct { + + // Number of milliseconds a token is valid after being authenticated. + // +kubebuilder:validation:Optional + AuthTTL *float64 `json:"authTtl,omitempty" tf:"auth_ttl,omitempty"` + + // Client identifier of the Relying party at the OpenID identity provider. This identifier is typically obtained when the Relying party is registered with the OpenID identity provider. You can specify a regular expression so the AWS AppSync can validate against multiple client identifiers at a time. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Number of milliseconds a token is valid after being issued to a user. + // +kubebuilder:validation:Optional + IatTTL *float64 `json:"iatTtl,omitempty" tf:"iat_ttl,omitempty"` + + // Issuer for the OpenID Connect configuration. The issuer returned by discovery MUST exactly match the value of iss in the ID Token. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer" tf:"issuer,omitempty"` +} + +type UserPoolConfigInitParameters struct { + + // Regular expression for validating the incoming Amazon Cognito User Pool app client ID. + AppIDClientRegex *string `json:"appIdClientRegex,omitempty" tf:"app_id_client_regex,omitempty"` + + // AWS region in which the user pool was created. + AwsRegion *string `json:"awsRegion,omitempty" tf:"aws_region,omitempty"` + + // User pool ID. + UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` +} + +type UserPoolConfigObservation struct { + + // Regular expression for validating the incoming Amazon Cognito User Pool app client ID. + AppIDClientRegex *string `json:"appIdClientRegex,omitempty" tf:"app_id_client_regex,omitempty"` + + // AWS region in which the user pool was created. + AwsRegion *string `json:"awsRegion,omitempty" tf:"aws_region,omitempty"` + + // User pool ID. + UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` +} + +type UserPoolConfigParameters struct { + + // Regular expression for validating the incoming Amazon Cognito User Pool app client ID. + // +kubebuilder:validation:Optional + AppIDClientRegex *string `json:"appIdClientRegex,omitempty" tf:"app_id_client_regex,omitempty"` + + // AWS region in which the user pool was created. + // +kubebuilder:validation:Optional + AwsRegion *string `json:"awsRegion,omitempty" tf:"aws_region,omitempty"` + + // User pool ID. + // +kubebuilder:validation:Optional + UserPoolID *string `json:"userPoolId" tf:"user_pool_id,omitempty"` +} + +// GraphQLAPISpec defines the desired state of GraphQLAPI +type GraphQLAPISpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GraphQLAPIParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GraphQLAPIInitParameters `json:"initProvider,omitempty"` +} + +// GraphQLAPIStatus defines the observed state of GraphQLAPI. +type GraphQLAPIStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GraphQLAPIObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// GraphQLAPI is the Schema for the GraphQLAPIs API. Provides an AppSync GraphQL API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type GraphQLAPI struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.authenticationType) || (has(self.initProvider) && has(self.initProvider.authenticationType))",message="spec.forProvider.authenticationType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec GraphQLAPISpec `json:"spec"` + Status GraphQLAPIStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GraphQLAPIList contains a list of GraphQLAPIs +type GraphQLAPIList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GraphQLAPI `json:"items"` +} + +// Repository type metadata. +var ( + GraphQLAPI_Kind = "GraphQLAPI" + GraphQLAPI_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: GraphQLAPI_Kind}.String() + GraphQLAPI_KindAPIVersion = GraphQLAPI_Kind + "." + CRDGroupVersion.String() + GraphQLAPI_GroupVersionKind = CRDGroupVersion.WithKind(GraphQLAPI_Kind) +) + +func init() { + SchemeBuilder.Register(&GraphQLAPI{}, &GraphQLAPIList{}) +} diff --git a/apis/appsync/v1beta2/zz_groupversion_info.go b/apis/appsync/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..5bacbc75cd --- /dev/null +++ b/apis/appsync/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=appsync.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "appsync.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/appsync/v1beta2/zz_resolver_terraformed.go b/apis/appsync/v1beta2/zz_resolver_terraformed.go new file mode 100755 index 0000000000..fb83e284f7 --- /dev/null +++ b/apis/appsync/v1beta2/zz_resolver_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Resolver +func (mg *Resolver) GetTerraformResourceType() string { + return "aws_appsync_resolver" +} + +// GetConnectionDetailsMapping for this Resolver +func (tr *Resolver) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Resolver +func (tr *Resolver) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Resolver +func (tr *Resolver) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Resolver +func (tr *Resolver) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Resolver +func (tr *Resolver) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Resolver +func (tr *Resolver) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Resolver +func (tr *Resolver) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Resolver +func (tr *Resolver) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Resolver using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Resolver) LateInitialize(attrs []byte) (bool, error) { + params := &ResolverParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Resolver) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appsync/v1beta2/zz_resolver_types.go b/apis/appsync/v1beta2/zz_resolver_types.go new file mode 100755 index 0000000000..83ea363cbd --- /dev/null +++ b/apis/appsync/v1beta2/zz_resolver_types.go @@ -0,0 +1,380 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CachingConfigInitParameters struct { + + // The caching keys for a resolver that has caching activated. Valid values are entries from the $context.arguments, $context.source, and $context.identity maps. + // +listType=set + CachingKeys []*string `json:"cachingKeys,omitempty" tf:"caching_keys,omitempty"` + + // The TTL in seconds for a resolver that has caching activated. Valid values are between 1 and 3600 seconds. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type CachingConfigObservation struct { + + // The caching keys for a resolver that has caching activated. Valid values are entries from the $context.arguments, $context.source, and $context.identity maps. + // +listType=set + CachingKeys []*string `json:"cachingKeys,omitempty" tf:"caching_keys,omitempty"` + + // The TTL in seconds for a resolver that has caching activated. Valid values are between 1 and 3600 seconds. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type CachingConfigParameters struct { + + // The caching keys for a resolver that has caching activated. Valid values are entries from the $context.arguments, $context.source, and $context.identity maps. + // +kubebuilder:validation:Optional + // +listType=set + CachingKeys []*string `json:"cachingKeys,omitempty" tf:"caching_keys,omitempty"` + + // The TTL in seconds for a resolver that has caching activated. Valid values are between 1 and 3600 seconds. + // +kubebuilder:validation:Optional + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type PipelineConfigInitParameters struct { + + // A list of Function objects. + Functions []*string `json:"functions,omitempty" tf:"functions,omitempty"` +} + +type PipelineConfigObservation struct { + + // A list of Function objects. + Functions []*string `json:"functions,omitempty" tf:"functions,omitempty"` +} + +type PipelineConfigParameters struct { + + // A list of Function objects. + // +kubebuilder:validation:Optional + Functions []*string `json:"functions,omitempty" tf:"functions,omitempty"` +} + +type ResolverInitParameters struct { + + // The Caching Config. See Caching Config. + CachingConfig *CachingConfigInitParameters `json:"cachingConfig,omitempty" tf:"caching_config,omitempty"` + + // The function code that contains the request and response functions. When code is used, the runtime is required. The runtime value must be APPSYNC_JS. + Code *string `json:"code,omitempty" tf:"code,omitempty"` + + // Data source name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appsync/v1beta2.Datasource + DataSource *string `json:"dataSource,omitempty" tf:"data_source,omitempty"` + + // Reference to a Datasource in appsync to populate dataSource. + // +kubebuilder:validation:Optional + DataSourceRef *v1.Reference `json:"dataSourceRef,omitempty" tf:"-"` + + // Selector for a Datasource in appsync to populate dataSource. + // +kubebuilder:validation:Optional + DataSourceSelector *v1.Selector `json:"dataSourceSelector,omitempty" tf:"-"` + + // Resolver type. Valid values are UNIT and PIPELINE. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Maximum batching size for a resolver. Valid values are between 0 and 2000. + MaxBatchSize *float64 `json:"maxBatchSize,omitempty" tf:"max_batch_size,omitempty"` + + // The caching configuration for the resolver. See Pipeline Config. + PipelineConfig *PipelineConfigInitParameters `json:"pipelineConfig,omitempty" tf:"pipeline_config,omitempty"` + + // Request mapping template for UNIT resolver or 'before mapping template' for PIPELINE resolver. Required for non-Lambda resolvers. + RequestTemplate *string `json:"requestTemplate,omitempty" tf:"request_template,omitempty"` + + // Response mapping template for UNIT resolver or 'after mapping template' for PIPELINE resolver. Required for non-Lambda resolvers. + ResponseTemplate *string `json:"responseTemplate,omitempty" tf:"response_template,omitempty"` + + // Describes a runtime used by an AWS AppSync pipeline resolver or AWS AppSync function. Specifies the name and version of the runtime to use. Note that if a runtime is specified, code must also be specified. See Runtime. + Runtime *ResolverRuntimeInitParameters `json:"runtime,omitempty" tf:"runtime,omitempty"` + + // Describes a Sync configuration for a resolver. See Sync Config. + SyncConfig *ResolverSyncConfigInitParameters `json:"syncConfig,omitempty" tf:"sync_config,omitempty"` +} + +type ResolverObservation struct { + + // API ID for the GraphQL API. + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // ARN + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The Caching Config. See Caching Config. + CachingConfig *CachingConfigObservation `json:"cachingConfig,omitempty" tf:"caching_config,omitempty"` + + // The function code that contains the request and response functions. When code is used, the runtime is required. The runtime value must be APPSYNC_JS. + Code *string `json:"code,omitempty" tf:"code,omitempty"` + + // Data source name. + DataSource *string `json:"dataSource,omitempty" tf:"data_source,omitempty"` + + // Field name from the schema defined in the GraphQL API. + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Resolver type. Valid values are UNIT and PIPELINE. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Maximum batching size for a resolver. Valid values are between 0 and 2000. + MaxBatchSize *float64 `json:"maxBatchSize,omitempty" tf:"max_batch_size,omitempty"` + + // The caching configuration for the resolver. See Pipeline Config. + PipelineConfig *PipelineConfigObservation `json:"pipelineConfig,omitempty" tf:"pipeline_config,omitempty"` + + // Request mapping template for UNIT resolver or 'before mapping template' for PIPELINE resolver. Required for non-Lambda resolvers. + RequestTemplate *string `json:"requestTemplate,omitempty" tf:"request_template,omitempty"` + + // Response mapping template for UNIT resolver or 'after mapping template' for PIPELINE resolver. Required for non-Lambda resolvers. + ResponseTemplate *string `json:"responseTemplate,omitempty" tf:"response_template,omitempty"` + + // Describes a runtime used by an AWS AppSync pipeline resolver or AWS AppSync function. Specifies the name and version of the runtime to use. Note that if a runtime is specified, code must also be specified. See Runtime. + Runtime *ResolverRuntimeObservation `json:"runtime,omitempty" tf:"runtime,omitempty"` + + // Describes a Sync configuration for a resolver. See Sync Config. + SyncConfig *ResolverSyncConfigObservation `json:"syncConfig,omitempty" tf:"sync_config,omitempty"` + + // Type name from the schema defined in the GraphQL API. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ResolverParameters struct { + + // API ID for the GraphQL API. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appsync/v1beta2.GraphQLAPI + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` + + // Reference to a GraphQLAPI in appsync to populate apiId. + // +kubebuilder:validation:Optional + APIIDRef *v1.Reference `json:"apiIdRef,omitempty" tf:"-"` + + // Selector for a GraphQLAPI in appsync to populate apiId. + // +kubebuilder:validation:Optional + APIIDSelector *v1.Selector `json:"apiIdSelector,omitempty" tf:"-"` + + // The Caching Config. See Caching Config. + // +kubebuilder:validation:Optional + CachingConfig *CachingConfigParameters `json:"cachingConfig,omitempty" tf:"caching_config,omitempty"` + + // The function code that contains the request and response functions. When code is used, the runtime is required. The runtime value must be APPSYNC_JS. + // +kubebuilder:validation:Optional + Code *string `json:"code,omitempty" tf:"code,omitempty"` + + // Data source name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/appsync/v1beta2.Datasource + // +kubebuilder:validation:Optional + DataSource *string `json:"dataSource,omitempty" tf:"data_source,omitempty"` + + // Reference to a Datasource in appsync to populate dataSource. + // +kubebuilder:validation:Optional + DataSourceRef *v1.Reference `json:"dataSourceRef,omitempty" tf:"-"` + + // Selector for a Datasource in appsync to populate dataSource. + // +kubebuilder:validation:Optional + DataSourceSelector *v1.Selector `json:"dataSourceSelector,omitempty" tf:"-"` + + // Field name from the schema defined in the GraphQL API. + // +kubebuilder:validation:Required + Field *string `json:"field" tf:"field,omitempty"` + + // Resolver type. Valid values are UNIT and PIPELINE. + // +kubebuilder:validation:Optional + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Maximum batching size for a resolver. Valid values are between 0 and 2000. + // +kubebuilder:validation:Optional + MaxBatchSize *float64 `json:"maxBatchSize,omitempty" tf:"max_batch_size,omitempty"` + + // The caching configuration for the resolver. See Pipeline Config. + // +kubebuilder:validation:Optional + PipelineConfig *PipelineConfigParameters `json:"pipelineConfig,omitempty" tf:"pipeline_config,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Request mapping template for UNIT resolver or 'before mapping template' for PIPELINE resolver. Required for non-Lambda resolvers. + // +kubebuilder:validation:Optional + RequestTemplate *string `json:"requestTemplate,omitempty" tf:"request_template,omitempty"` + + // Response mapping template for UNIT resolver or 'after mapping template' for PIPELINE resolver. Required for non-Lambda resolvers. + // +kubebuilder:validation:Optional + ResponseTemplate *string `json:"responseTemplate,omitempty" tf:"response_template,omitempty"` + + // Describes a runtime used by an AWS AppSync pipeline resolver or AWS AppSync function. Specifies the name and version of the runtime to use. Note that if a runtime is specified, code must also be specified. See Runtime. + // +kubebuilder:validation:Optional + Runtime *ResolverRuntimeParameters `json:"runtime,omitempty" tf:"runtime,omitempty"` + + // Describes a Sync configuration for a resolver. See Sync Config. + // +kubebuilder:validation:Optional + SyncConfig *ResolverSyncConfigParameters `json:"syncConfig,omitempty" tf:"sync_config,omitempty"` + + // Type name from the schema defined in the GraphQL API. + // +kubebuilder:validation:Required + Type *string `json:"type" tf:"type,omitempty"` +} + +type ResolverRuntimeInitParameters struct { + + // The name of the runtime to use. Currently, the only allowed value is APPSYNC_JS. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The version of the runtime to use. Currently, the only allowed version is 1.0.0. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` +} + +type ResolverRuntimeObservation struct { + + // The name of the runtime to use. Currently, the only allowed value is APPSYNC_JS. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The version of the runtime to use. Currently, the only allowed version is 1.0.0. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` +} + +type ResolverRuntimeParameters struct { + + // The name of the runtime to use. Currently, the only allowed value is APPSYNC_JS. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The version of the runtime to use. Currently, the only allowed version is 1.0.0. + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion" tf:"runtime_version,omitempty"` +} + +type ResolverSyncConfigInitParameters struct { + + // Conflict Detection strategy to use. Valid values are NONE and VERSION. + ConflictDetection *string `json:"conflictDetection,omitempty" tf:"conflict_detection,omitempty"` + + // Conflict Resolution strategy to perform in the event of a conflict. Valid values are NONE, OPTIMISTIC_CONCURRENCY, AUTOMERGE, and LAMBDA. + ConflictHandler *string `json:"conflictHandler,omitempty" tf:"conflict_handler,omitempty"` + + // Lambda Conflict Handler Config when configuring LAMBDA as the Conflict Handler. See Lambda Conflict Handler Config. + LambdaConflictHandlerConfig *SyncConfigLambdaConflictHandlerConfigInitParameters `json:"lambdaConflictHandlerConfig,omitempty" tf:"lambda_conflict_handler_config,omitempty"` +} + +type ResolverSyncConfigObservation struct { + + // Conflict Detection strategy to use. Valid values are NONE and VERSION. + ConflictDetection *string `json:"conflictDetection,omitempty" tf:"conflict_detection,omitempty"` + + // Conflict Resolution strategy to perform in the event of a conflict. Valid values are NONE, OPTIMISTIC_CONCURRENCY, AUTOMERGE, and LAMBDA. + ConflictHandler *string `json:"conflictHandler,omitempty" tf:"conflict_handler,omitempty"` + + // Lambda Conflict Handler Config when configuring LAMBDA as the Conflict Handler. See Lambda Conflict Handler Config. + LambdaConflictHandlerConfig *SyncConfigLambdaConflictHandlerConfigObservation `json:"lambdaConflictHandlerConfig,omitempty" tf:"lambda_conflict_handler_config,omitempty"` +} + +type ResolverSyncConfigParameters struct { + + // Conflict Detection strategy to use. Valid values are NONE and VERSION. + // +kubebuilder:validation:Optional + ConflictDetection *string `json:"conflictDetection,omitempty" tf:"conflict_detection,omitempty"` + + // Conflict Resolution strategy to perform in the event of a conflict. Valid values are NONE, OPTIMISTIC_CONCURRENCY, AUTOMERGE, and LAMBDA. + // +kubebuilder:validation:Optional + ConflictHandler *string `json:"conflictHandler,omitempty" tf:"conflict_handler,omitempty"` + + // Lambda Conflict Handler Config when configuring LAMBDA as the Conflict Handler. See Lambda Conflict Handler Config. + // +kubebuilder:validation:Optional + LambdaConflictHandlerConfig *SyncConfigLambdaConflictHandlerConfigParameters `json:"lambdaConflictHandlerConfig,omitempty" tf:"lambda_conflict_handler_config,omitempty"` +} + +type SyncConfigLambdaConflictHandlerConfigInitParameters struct { + + // ARN for the Lambda function to use as the Conflict Handler. + LambdaConflictHandlerArn *string `json:"lambdaConflictHandlerArn,omitempty" tf:"lambda_conflict_handler_arn,omitempty"` +} + +type SyncConfigLambdaConflictHandlerConfigObservation struct { + + // ARN for the Lambda function to use as the Conflict Handler. + LambdaConflictHandlerArn *string `json:"lambdaConflictHandlerArn,omitempty" tf:"lambda_conflict_handler_arn,omitempty"` +} + +type SyncConfigLambdaConflictHandlerConfigParameters struct { + + // ARN for the Lambda function to use as the Conflict Handler. + // +kubebuilder:validation:Optional + LambdaConflictHandlerArn *string `json:"lambdaConflictHandlerArn,omitempty" tf:"lambda_conflict_handler_arn,omitempty"` +} + +// ResolverSpec defines the desired state of Resolver +type ResolverSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ResolverParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ResolverInitParameters `json:"initProvider,omitempty"` +} + +// ResolverStatus defines the observed state of Resolver. +type ResolverStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ResolverObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Resolver is the Schema for the Resolvers API. Provides an AppSync Resolver. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Resolver struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ResolverSpec `json:"spec"` + Status ResolverStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ResolverList contains a list of Resolvers +type ResolverList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Resolver `json:"items"` +} + +// Repository type metadata. +var ( + Resolver_Kind = "Resolver" + Resolver_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Resolver_Kind}.String() + Resolver_KindAPIVersion = Resolver_Kind + "." + CRDGroupVersion.String() + Resolver_GroupVersionKind = CRDGroupVersion.WithKind(Resolver_Kind) +) + +func init() { + SchemeBuilder.Register(&Resolver{}, &ResolverList{}) +} diff --git a/apis/athena/v1beta1/zz_generated.conversion_hubs.go b/apis/athena/v1beta1/zz_generated.conversion_hubs.go index 7f8ad66f3b..77a63857cb 100755 --- a/apis/athena/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/athena/v1beta1/zz_generated.conversion_hubs.go @@ -6,14 +6,8 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Database) Hub() {} - // Hub marks this type as a conversion hub. func (tr *DataCatalog) Hub() {} // Hub marks this type as a conversion hub. func (tr *NamedQuery) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Workgroup) Hub() {} diff --git a/apis/athena/v1beta1/zz_generated.conversion_spokes.go b/apis/athena/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..4aa8abe4a5 --- /dev/null +++ b/apis/athena/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Database to the hub type. +func (tr *Database) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Database type. +func (tr *Database) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Workgroup to the hub type. +func (tr *Workgroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Workgroup type. +func (tr *Workgroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/athena/v1beta1/zz_generated.resolvers.go b/apis/athena/v1beta1/zz_generated.resolvers.go index 5fadf68483..cdae015f48 100644 --- a/apis/athena/v1beta1/zz_generated.resolvers.go +++ b/apis/athena/v1beta1/zz_generated.resolvers.go @@ -78,7 +78,7 @@ func (mg *NamedQuery) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("athena.aws.upbound.io", "v1beta1", "Database", "DatabaseList") + m, l, err = apisresolver.GetManagedResource("athena.aws.upbound.io", "v1beta2", "Database", "DatabaseList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -97,7 +97,7 @@ func (mg *NamedQuery) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.Database = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DatabaseRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("athena.aws.upbound.io", "v1beta1", "Workgroup", "WorkgroupList") + m, l, err = apisresolver.GetManagedResource("athena.aws.upbound.io", "v1beta2", "Workgroup", "WorkgroupList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -116,7 +116,7 @@ func (mg *NamedQuery) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.Workgroup = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.WorkgroupRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("athena.aws.upbound.io", "v1beta1", "Database", "DatabaseList") + m, l, err = apisresolver.GetManagedResource("athena.aws.upbound.io", "v1beta2", "Database", "DatabaseList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -135,7 +135,7 @@ func (mg *NamedQuery) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.InitProvider.Database = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.DatabaseRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("athena.aws.upbound.io", "v1beta1", "Workgroup", "WorkgroupList") + m, l, err = apisresolver.GetManagedResource("athena.aws.upbound.io", "v1beta2", "Workgroup", "WorkgroupList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/athena/v1beta1/zz_namedquery_types.go b/apis/athena/v1beta1/zz_namedquery_types.go index 8397b1994f..e25916cf99 100755 --- a/apis/athena/v1beta1/zz_namedquery_types.go +++ b/apis/athena/v1beta1/zz_namedquery_types.go @@ -16,7 +16,7 @@ import ( type NamedQueryInitParameters struct { // Database to which the query belongs. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/athena/v1beta1.Database + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/athena/v1beta2.Database Database *string `json:"database,omitempty" tf:"database,omitempty"` // Reference to a Database in athena to populate database. @@ -37,7 +37,7 @@ type NamedQueryInitParameters struct { Query *string `json:"query,omitempty" tf:"query,omitempty"` // Workgroup to which the query belongs. Defaults to primary - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/athena/v1beta1.Workgroup + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/athena/v1beta2.Workgroup // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() Workgroup *string `json:"workgroup,omitempty" tf:"workgroup,omitempty"` @@ -74,7 +74,7 @@ type NamedQueryObservation struct { type NamedQueryParameters struct { // Database to which the query belongs. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/athena/v1beta1.Database + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/athena/v1beta2.Database // +kubebuilder:validation:Optional Database *string `json:"database,omitempty" tf:"database,omitempty"` @@ -104,7 +104,7 @@ type NamedQueryParameters struct { Region *string `json:"region" tf:"-"` // Workgroup to which the query belongs. Defaults to primary - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/athena/v1beta1.Workgroup + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/athena/v1beta2.Workgroup // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional Workgroup *string `json:"workgroup,omitempty" tf:"workgroup,omitempty"` diff --git a/apis/athena/v1beta2/zz_database_terraformed.go b/apis/athena/v1beta2/zz_database_terraformed.go new file mode 100755 index 0000000000..9e652c1983 --- /dev/null +++ b/apis/athena/v1beta2/zz_database_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Database +func (mg *Database) GetTerraformResourceType() string { + return "aws_athena_database" +} + +// GetConnectionDetailsMapping for this Database +func (tr *Database) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Database +func (tr *Database) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Database +func (tr *Database) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Database +func (tr *Database) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Database +func (tr *Database) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Database +func (tr *Database) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Database +func (tr *Database) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Database +func (tr *Database) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Database using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Database) LateInitialize(attrs []byte) (bool, error) { + params := &DatabaseParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Database) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/athena/v1beta2/zz_database_types.go b/apis/athena/v1beta2/zz_database_types.go new file mode 100755 index 0000000000..0872fcbb4b --- /dev/null +++ b/apis/athena/v1beta2/zz_database_types.go @@ -0,0 +1,232 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ACLConfigurationInitParameters struct { + + // Amazon S3 canned ACL that Athena should specify when storing query results. Valid value is BUCKET_OWNER_FULL_CONTROL. + S3ACLOption *string `json:"s3AclOption,omitempty" tf:"s3_acl_option,omitempty"` +} + +type ACLConfigurationObservation struct { + + // Amazon S3 canned ACL that Athena should specify when storing query results. Valid value is BUCKET_OWNER_FULL_CONTROL. + S3ACLOption *string `json:"s3AclOption,omitempty" tf:"s3_acl_option,omitempty"` +} + +type ACLConfigurationParameters struct { + + // Amazon S3 canned ACL that Athena should specify when storing query results. Valid value is BUCKET_OWNER_FULL_CONTROL. + // +kubebuilder:validation:Optional + S3ACLOption *string `json:"s3AclOption" tf:"s3_acl_option,omitempty"` +} + +type DatabaseInitParameters struct { + + // That an Amazon S3 canned ACL should be set to control ownership of stored query results. See ACL Configuration below. + ACLConfiguration *ACLConfigurationInitParameters `json:"aclConfiguration,omitempty" tf:"acl_configuration,omitempty"` + + // Name of S3 bucket to save the results of the query execution. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Description of the database. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Encryption key block AWS Athena uses to decrypt the data in S3, such as an AWS Key Management Service (AWS KMS) key. See Encryption Configuration below. + EncryptionConfiguration *EncryptionConfigurationInitParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // AWS account ID that you expect to be the owner of the Amazon S3 bucket. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Boolean that indicates all tables should be deleted from the database so that the database can be destroyed without error. The tables are not recoverable. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // Key-value map of custom metadata properties for the database definition. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` +} + +type DatabaseObservation struct { + + // That an Amazon S3 canned ACL should be set to control ownership of stored query results. See ACL Configuration below. + ACLConfiguration *ACLConfigurationObservation `json:"aclConfiguration,omitempty" tf:"acl_configuration,omitempty"` + + // Name of S3 bucket to save the results of the query execution. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Description of the database. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Encryption key block AWS Athena uses to decrypt the data in S3, such as an AWS Key Management Service (AWS KMS) key. See Encryption Configuration below. + EncryptionConfiguration *EncryptionConfigurationObservation `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // AWS account ID that you expect to be the owner of the Amazon S3 bucket. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Boolean that indicates all tables should be deleted from the database so that the database can be destroyed without error. The tables are not recoverable. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // Database name + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Key-value map of custom metadata properties for the database definition. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` +} + +type DatabaseParameters struct { + + // That an Amazon S3 canned ACL should be set to control ownership of stored query results. See ACL Configuration below. + // +kubebuilder:validation:Optional + ACLConfiguration *ACLConfigurationParameters `json:"aclConfiguration,omitempty" tf:"acl_configuration,omitempty"` + + // Name of S3 bucket to save the results of the query execution. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Description of the database. + // +kubebuilder:validation:Optional + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Encryption key block AWS Athena uses to decrypt the data in S3, such as an AWS Key Management Service (AWS KMS) key. See Encryption Configuration below. + // +kubebuilder:validation:Optional + EncryptionConfiguration *EncryptionConfigurationParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // AWS account ID that you expect to be the owner of the Amazon S3 bucket. + // +kubebuilder:validation:Optional + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Boolean that indicates all tables should be deleted from the database so that the database can be destroyed without error. The tables are not recoverable. + // +kubebuilder:validation:Optional + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // Key-value map of custom metadata properties for the database definition. + // +kubebuilder:validation:Optional + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type EncryptionConfigurationInitParameters struct { + + // Type of key; one of SSE_S3, SSE_KMS, CSE_KMS + EncryptionOption *string `json:"encryptionOption,omitempty" tf:"encryption_option,omitempty"` + + // KMS key ARN or ID; required for key types SSE_KMS and CSE_KMS. + KMSKey *string `json:"kmsKey,omitempty" tf:"kms_key,omitempty"` +} + +type EncryptionConfigurationObservation struct { + + // Type of key; one of SSE_S3, SSE_KMS, CSE_KMS + EncryptionOption *string `json:"encryptionOption,omitempty" tf:"encryption_option,omitempty"` + + // KMS key ARN or ID; required for key types SSE_KMS and CSE_KMS. + KMSKey *string `json:"kmsKey,omitempty" tf:"kms_key,omitempty"` +} + +type EncryptionConfigurationParameters struct { + + // Type of key; one of SSE_S3, SSE_KMS, CSE_KMS + // +kubebuilder:validation:Optional + EncryptionOption *string `json:"encryptionOption" tf:"encryption_option,omitempty"` + + // KMS key ARN or ID; required for key types SSE_KMS and CSE_KMS. + // +kubebuilder:validation:Optional + KMSKey *string `json:"kmsKey,omitempty" tf:"kms_key,omitempty"` +} + +// DatabaseSpec defines the desired state of Database +type DatabaseSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DatabaseParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DatabaseInitParameters `json:"initProvider,omitempty"` +} + +// DatabaseStatus defines the observed state of Database. +type DatabaseStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DatabaseObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Database is the Schema for the Databases API. Provides an Athena database. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Database struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DatabaseSpec `json:"spec"` + Status DatabaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DatabaseList contains a list of Databases +type DatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Database `json:"items"` +} + +// Repository type metadata. +var ( + Database_Kind = "Database" + Database_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Database_Kind}.String() + Database_KindAPIVersion = Database_Kind + "." + CRDGroupVersion.String() + Database_GroupVersionKind = CRDGroupVersion.WithKind(Database_Kind) +) + +func init() { + SchemeBuilder.Register(&Database{}, &DatabaseList{}) +} diff --git a/apis/athena/v1beta2/zz_generated.conversion_hubs.go b/apis/athena/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..90346cc6e1 --- /dev/null +++ b/apis/athena/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Database) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Workgroup) Hub() {} diff --git a/apis/athena/v1beta2/zz_generated.deepcopy.go b/apis/athena/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..0de6174a17 --- /dev/null +++ b/apis/athena/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1209 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACLConfigurationInitParameters) DeepCopyInto(out *ACLConfigurationInitParameters) { + *out = *in + if in.S3ACLOption != nil { + in, out := &in.S3ACLOption, &out.S3ACLOption + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACLConfigurationInitParameters. +func (in *ACLConfigurationInitParameters) DeepCopy() *ACLConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ACLConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACLConfigurationObservation) DeepCopyInto(out *ACLConfigurationObservation) { + *out = *in + if in.S3ACLOption != nil { + in, out := &in.S3ACLOption, &out.S3ACLOption + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACLConfigurationObservation. +func (in *ACLConfigurationObservation) DeepCopy() *ACLConfigurationObservation { + if in == nil { + return nil + } + out := new(ACLConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACLConfigurationParameters) DeepCopyInto(out *ACLConfigurationParameters) { + *out = *in + if in.S3ACLOption != nil { + in, out := &in.S3ACLOption, &out.S3ACLOption + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACLConfigurationParameters. +func (in *ACLConfigurationParameters) DeepCopy() *ACLConfigurationParameters { + if in == nil { + return nil + } + out := new(ACLConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationInitParameters) DeepCopyInto(out *ConfigurationInitParameters) { + *out = *in + if in.BytesScannedCutoffPerQuery != nil { + in, out := &in.BytesScannedCutoffPerQuery, &out.BytesScannedCutoffPerQuery + *out = new(float64) + **out = **in + } + if in.EnforceWorkgroupConfiguration != nil { + in, out := &in.EnforceWorkgroupConfiguration, &out.EnforceWorkgroupConfiguration + *out = new(bool) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(EngineVersionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.PublishCloudwatchMetricsEnabled != nil { + in, out := &in.PublishCloudwatchMetricsEnabled, &out.PublishCloudwatchMetricsEnabled + *out = new(bool) + **out = **in + } + if in.RequesterPaysEnabled != nil { + in, out := &in.RequesterPaysEnabled, &out.RequesterPaysEnabled + *out = new(bool) + **out = **in + } + if in.ResultConfiguration != nil { + in, out := &in.ResultConfiguration, &out.ResultConfiguration + *out = new(ResultConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationInitParameters. +func (in *ConfigurationInitParameters) DeepCopy() *ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationObservation) DeepCopyInto(out *ConfigurationObservation) { + *out = *in + if in.BytesScannedCutoffPerQuery != nil { + in, out := &in.BytesScannedCutoffPerQuery, &out.BytesScannedCutoffPerQuery + *out = new(float64) + **out = **in + } + if in.EnforceWorkgroupConfiguration != nil { + in, out := &in.EnforceWorkgroupConfiguration, &out.EnforceWorkgroupConfiguration + *out = new(bool) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(EngineVersionObservation) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.PublishCloudwatchMetricsEnabled != nil { + in, out := &in.PublishCloudwatchMetricsEnabled, &out.PublishCloudwatchMetricsEnabled + *out = new(bool) + **out = **in + } + if in.RequesterPaysEnabled != nil { + in, out := &in.RequesterPaysEnabled, &out.RequesterPaysEnabled + *out = new(bool) + **out = **in + } + if in.ResultConfiguration != nil { + in, out := &in.ResultConfiguration, &out.ResultConfiguration + *out = new(ResultConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationObservation. +func (in *ConfigurationObservation) DeepCopy() *ConfigurationObservation { + if in == nil { + return nil + } + out := new(ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationParameters) DeepCopyInto(out *ConfigurationParameters) { + *out = *in + if in.BytesScannedCutoffPerQuery != nil { + in, out := &in.BytesScannedCutoffPerQuery, &out.BytesScannedCutoffPerQuery + *out = new(float64) + **out = **in + } + if in.EnforceWorkgroupConfiguration != nil { + in, out := &in.EnforceWorkgroupConfiguration, &out.EnforceWorkgroupConfiguration + *out = new(bool) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(EngineVersionParameters) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.PublishCloudwatchMetricsEnabled != nil { + in, out := &in.PublishCloudwatchMetricsEnabled, &out.PublishCloudwatchMetricsEnabled + *out = new(bool) + **out = **in + } + if in.RequesterPaysEnabled != nil { + in, out := &in.RequesterPaysEnabled, &out.RequesterPaysEnabled + *out = new(bool) + **out = **in + } + if in.ResultConfiguration != nil { + in, out := &in.ResultConfiguration, &out.ResultConfiguration + *out = new(ResultConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationParameters. +func (in *ConfigurationParameters) DeepCopy() *ConfigurationParameters { + if in == nil { + return nil + } + out := new(ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Database) DeepCopyInto(out *Database) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Database. +func (in *Database) DeepCopy() *Database { + if in == nil { + return nil + } + out := new(Database) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Database) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseInitParameters) DeepCopyInto(out *DatabaseInitParameters) { + *out = *in + if in.ACLConfiguration != nil { + in, out := &in.ACLConfiguration, &out.ACLConfiguration + *out = new(ACLConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseInitParameters. +func (in *DatabaseInitParameters) DeepCopy() *DatabaseInitParameters { + if in == nil { + return nil + } + out := new(DatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseList) DeepCopyInto(out *DatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Database, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseList. +func (in *DatabaseList) DeepCopy() *DatabaseList { + if in == nil { + return nil + } + out := new(DatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObservation) DeepCopyInto(out *DatabaseObservation) { + *out = *in + if in.ACLConfiguration != nil { + in, out := &in.ACLConfiguration, &out.ACLConfiguration + *out = new(ACLConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObservation. +func (in *DatabaseObservation) DeepCopy() *DatabaseObservation { + if in == nil { + return nil + } + out := new(DatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseParameters) DeepCopyInto(out *DatabaseParameters) { + *out = *in + if in.ACLConfiguration != nil { + in, out := &in.ACLConfiguration, &out.ACLConfiguration + *out = new(ACLConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseParameters. +func (in *DatabaseParameters) DeepCopy() *DatabaseParameters { + if in == nil { + return nil + } + out := new(DatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseSpec) DeepCopyInto(out *DatabaseSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseSpec. +func (in *DatabaseSpec) DeepCopy() *DatabaseSpec { + if in == nil { + return nil + } + out := new(DatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseStatus) DeepCopyInto(out *DatabaseStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseStatus. +func (in *DatabaseStatus) DeepCopy() *DatabaseStatus { + if in == nil { + return nil + } + out := new(DatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationInitParameters) DeepCopyInto(out *EncryptionConfigurationInitParameters) { + *out = *in + if in.EncryptionOption != nil { + in, out := &in.EncryptionOption, &out.EncryptionOption + *out = new(string) + **out = **in + } + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationInitParameters. +func (in *EncryptionConfigurationInitParameters) DeepCopy() *EncryptionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationObservation) DeepCopyInto(out *EncryptionConfigurationObservation) { + *out = *in + if in.EncryptionOption != nil { + in, out := &in.EncryptionOption, &out.EncryptionOption + *out = new(string) + **out = **in + } + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationObservation. +func (in *EncryptionConfigurationObservation) DeepCopy() *EncryptionConfigurationObservation { + if in == nil { + return nil + } + out := new(EncryptionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationParameters) DeepCopyInto(out *EncryptionConfigurationParameters) { + *out = *in + if in.EncryptionOption != nil { + in, out := &in.EncryptionOption, &out.EncryptionOption + *out = new(string) + **out = **in + } + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationParameters. +func (in *EncryptionConfigurationParameters) DeepCopy() *EncryptionConfigurationParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EngineVersionInitParameters) DeepCopyInto(out *EngineVersionInitParameters) { + *out = *in + if in.SelectedEngineVersion != nil { + in, out := &in.SelectedEngineVersion, &out.SelectedEngineVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EngineVersionInitParameters. +func (in *EngineVersionInitParameters) DeepCopy() *EngineVersionInitParameters { + if in == nil { + return nil + } + out := new(EngineVersionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EngineVersionObservation) DeepCopyInto(out *EngineVersionObservation) { + *out = *in + if in.EffectiveEngineVersion != nil { + in, out := &in.EffectiveEngineVersion, &out.EffectiveEngineVersion + *out = new(string) + **out = **in + } + if in.SelectedEngineVersion != nil { + in, out := &in.SelectedEngineVersion, &out.SelectedEngineVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EngineVersionObservation. +func (in *EngineVersionObservation) DeepCopy() *EngineVersionObservation { + if in == nil { + return nil + } + out := new(EngineVersionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EngineVersionParameters) DeepCopyInto(out *EngineVersionParameters) { + *out = *in + if in.SelectedEngineVersion != nil { + in, out := &in.SelectedEngineVersion, &out.SelectedEngineVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EngineVersionParameters. +func (in *EngineVersionParameters) DeepCopy() *EngineVersionParameters { + if in == nil { + return nil + } + out := new(EngineVersionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResultConfigurationACLConfigurationInitParameters) DeepCopyInto(out *ResultConfigurationACLConfigurationInitParameters) { + *out = *in + if in.S3ACLOption != nil { + in, out := &in.S3ACLOption, &out.S3ACLOption + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResultConfigurationACLConfigurationInitParameters. +func (in *ResultConfigurationACLConfigurationInitParameters) DeepCopy() *ResultConfigurationACLConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ResultConfigurationACLConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResultConfigurationACLConfigurationObservation) DeepCopyInto(out *ResultConfigurationACLConfigurationObservation) { + *out = *in + if in.S3ACLOption != nil { + in, out := &in.S3ACLOption, &out.S3ACLOption + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResultConfigurationACLConfigurationObservation. +func (in *ResultConfigurationACLConfigurationObservation) DeepCopy() *ResultConfigurationACLConfigurationObservation { + if in == nil { + return nil + } + out := new(ResultConfigurationACLConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResultConfigurationACLConfigurationParameters) DeepCopyInto(out *ResultConfigurationACLConfigurationParameters) { + *out = *in + if in.S3ACLOption != nil { + in, out := &in.S3ACLOption, &out.S3ACLOption + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResultConfigurationACLConfigurationParameters. +func (in *ResultConfigurationACLConfigurationParameters) DeepCopy() *ResultConfigurationACLConfigurationParameters { + if in == nil { + return nil + } + out := new(ResultConfigurationACLConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResultConfigurationEncryptionConfigurationInitParameters) DeepCopyInto(out *ResultConfigurationEncryptionConfigurationInitParameters) { + *out = *in + if in.EncryptionOption != nil { + in, out := &in.EncryptionOption, &out.EncryptionOption + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResultConfigurationEncryptionConfigurationInitParameters. +func (in *ResultConfigurationEncryptionConfigurationInitParameters) DeepCopy() *ResultConfigurationEncryptionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ResultConfigurationEncryptionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResultConfigurationEncryptionConfigurationObservation) DeepCopyInto(out *ResultConfigurationEncryptionConfigurationObservation) { + *out = *in + if in.EncryptionOption != nil { + in, out := &in.EncryptionOption, &out.EncryptionOption + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResultConfigurationEncryptionConfigurationObservation. +func (in *ResultConfigurationEncryptionConfigurationObservation) DeepCopy() *ResultConfigurationEncryptionConfigurationObservation { + if in == nil { + return nil + } + out := new(ResultConfigurationEncryptionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResultConfigurationEncryptionConfigurationParameters) DeepCopyInto(out *ResultConfigurationEncryptionConfigurationParameters) { + *out = *in + if in.EncryptionOption != nil { + in, out := &in.EncryptionOption, &out.EncryptionOption + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResultConfigurationEncryptionConfigurationParameters. +func (in *ResultConfigurationEncryptionConfigurationParameters) DeepCopy() *ResultConfigurationEncryptionConfigurationParameters { + if in == nil { + return nil + } + out := new(ResultConfigurationEncryptionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResultConfigurationInitParameters) DeepCopyInto(out *ResultConfigurationInitParameters) { + *out = *in + if in.ACLConfiguration != nil { + in, out := &in.ACLConfiguration, &out.ACLConfiguration + *out = new(ResultConfigurationACLConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(ResultConfigurationEncryptionConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.OutputLocation != nil { + in, out := &in.OutputLocation, &out.OutputLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResultConfigurationInitParameters. +func (in *ResultConfigurationInitParameters) DeepCopy() *ResultConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ResultConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResultConfigurationObservation) DeepCopyInto(out *ResultConfigurationObservation) { + *out = *in + if in.ACLConfiguration != nil { + in, out := &in.ACLConfiguration, &out.ACLConfiguration + *out = new(ResultConfigurationACLConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(ResultConfigurationEncryptionConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.OutputLocation != nil { + in, out := &in.OutputLocation, &out.OutputLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResultConfigurationObservation. +func (in *ResultConfigurationObservation) DeepCopy() *ResultConfigurationObservation { + if in == nil { + return nil + } + out := new(ResultConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResultConfigurationParameters) DeepCopyInto(out *ResultConfigurationParameters) { + *out = *in + if in.ACLConfiguration != nil { + in, out := &in.ACLConfiguration, &out.ACLConfiguration + *out = new(ResultConfigurationACLConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(ResultConfigurationEncryptionConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.OutputLocation != nil { + in, out := &in.OutputLocation, &out.OutputLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResultConfigurationParameters. +func (in *ResultConfigurationParameters) DeepCopy() *ResultConfigurationParameters { + if in == nil { + return nil + } + out := new(ResultConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workgroup) DeepCopyInto(out *Workgroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workgroup. +func (in *Workgroup) DeepCopy() *Workgroup { + if in == nil { + return nil + } + out := new(Workgroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Workgroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkgroupInitParameters) DeepCopyInto(out *WorkgroupInitParameters) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkgroupInitParameters. +func (in *WorkgroupInitParameters) DeepCopy() *WorkgroupInitParameters { + if in == nil { + return nil + } + out := new(WorkgroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkgroupList) DeepCopyInto(out *WorkgroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Workgroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkgroupList. +func (in *WorkgroupList) DeepCopy() *WorkgroupList { + if in == nil { + return nil + } + out := new(WorkgroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkgroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkgroupObservation) DeepCopyInto(out *WorkgroupObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkgroupObservation. +func (in *WorkgroupObservation) DeepCopy() *WorkgroupObservation { + if in == nil { + return nil + } + out := new(WorkgroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkgroupParameters) DeepCopyInto(out *WorkgroupParameters) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkgroupParameters. +func (in *WorkgroupParameters) DeepCopy() *WorkgroupParameters { + if in == nil { + return nil + } + out := new(WorkgroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkgroupSpec) DeepCopyInto(out *WorkgroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkgroupSpec. +func (in *WorkgroupSpec) DeepCopy() *WorkgroupSpec { + if in == nil { + return nil + } + out := new(WorkgroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkgroupStatus) DeepCopyInto(out *WorkgroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkgroupStatus. +func (in *WorkgroupStatus) DeepCopy() *WorkgroupStatus { + if in == nil { + return nil + } + out := new(WorkgroupStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/athena/v1beta2/zz_generated.managed.go b/apis/athena/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..962c269a5b --- /dev/null +++ b/apis/athena/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Database. +func (mg *Database) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Database. +func (mg *Database) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Database. +func (mg *Database) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Database. +func (mg *Database) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Database. +func (mg *Database) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Database. +func (mg *Database) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Database. +func (mg *Database) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Database. +func (mg *Database) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Database. +func (mg *Database) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Database. +func (mg *Database) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Database. +func (mg *Database) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Database. +func (mg *Database) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Workgroup. +func (mg *Workgroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Workgroup. +func (mg *Workgroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Workgroup. +func (mg *Workgroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Workgroup. +func (mg *Workgroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Workgroup. +func (mg *Workgroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Workgroup. +func (mg *Workgroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Workgroup. +func (mg *Workgroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Workgroup. +func (mg *Workgroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Workgroup. +func (mg *Workgroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Workgroup. +func (mg *Workgroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Workgroup. +func (mg *Workgroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Workgroup. +func (mg *Workgroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/athena/v1beta2/zz_generated.managedlist.go b/apis/athena/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..f798e36a61 --- /dev/null +++ b/apis/athena/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this DatabaseList. +func (l *DatabaseList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WorkgroupList. +func (l *WorkgroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/athena/v1beta2/zz_generated.resolvers.go b/apis/athena/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..8df16915ec --- /dev/null +++ b/apis/athena/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,133 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Database. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Database) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Workgroup. +func (mg *Workgroup) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.Configuration != nil { + if mg.Spec.ForProvider.Configuration.ResultConfiguration != nil { + if mg.Spec.ForProvider.Configuration.ResultConfiguration.EncryptionConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Configuration.ResultConfiguration.EncryptionConfiguration.KMSKeyArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.Configuration.ResultConfiguration.EncryptionConfiguration.KMSKeyArnRef, + Selector: mg.Spec.ForProvider.Configuration.ResultConfiguration.EncryptionConfiguration.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Configuration.ResultConfiguration.EncryptionConfiguration.KMSKeyArn") + } + mg.Spec.ForProvider.Configuration.ResultConfiguration.EncryptionConfiguration.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Configuration.ResultConfiguration.EncryptionConfiguration.KMSKeyArnRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.InitProvider.Configuration != nil { + if mg.Spec.InitProvider.Configuration.ResultConfiguration != nil { + if mg.Spec.InitProvider.Configuration.ResultConfiguration.EncryptionConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Configuration.ResultConfiguration.EncryptionConfiguration.KMSKeyArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.Configuration.ResultConfiguration.EncryptionConfiguration.KMSKeyArnRef, + Selector: mg.Spec.InitProvider.Configuration.ResultConfiguration.EncryptionConfiguration.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Configuration.ResultConfiguration.EncryptionConfiguration.KMSKeyArn") + } + mg.Spec.InitProvider.Configuration.ResultConfiguration.EncryptionConfiguration.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Configuration.ResultConfiguration.EncryptionConfiguration.KMSKeyArnRef = rsp.ResolvedReference + + } + } + } + + return nil +} diff --git a/apis/athena/v1beta2/zz_groupversion_info.go b/apis/athena/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..fafc8a4fad --- /dev/null +++ b/apis/athena/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=athena.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "athena.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/athena/v1beta2/zz_workgroup_terraformed.go b/apis/athena/v1beta2/zz_workgroup_terraformed.go new file mode 100755 index 0000000000..48d7729a00 --- /dev/null +++ b/apis/athena/v1beta2/zz_workgroup_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Workgroup +func (mg *Workgroup) GetTerraformResourceType() string { + return "aws_athena_workgroup" +} + +// GetConnectionDetailsMapping for this Workgroup +func (tr *Workgroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Workgroup +func (tr *Workgroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Workgroup +func (tr *Workgroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Workgroup +func (tr *Workgroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Workgroup +func (tr *Workgroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Workgroup +func (tr *Workgroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Workgroup +func (tr *Workgroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Workgroup +func (tr *Workgroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Workgroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Workgroup) LateInitialize(attrs []byte) (bool, error) { + params := &WorkgroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Workgroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/athena/v1beta2/zz_workgroup_types.go b/apis/athena/v1beta2/zz_workgroup_types.go new file mode 100755 index 0000000000..3b3e31afa6 --- /dev/null +++ b/apis/athena/v1beta2/zz_workgroup_types.go @@ -0,0 +1,369 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConfigurationInitParameters struct { + + // Integer for the upper data usage limit (cutoff) for the amount of bytes a single query in a workgroup is allowed to scan. Must be at least 10485760. + BytesScannedCutoffPerQuery *float64 `json:"bytesScannedCutoffPerQuery,omitempty" tf:"bytes_scanned_cutoff_per_query,omitempty"` + + // Boolean whether the settings for the workgroup override client-side settings. For more information, see Workgroup Settings Override Client-Side Settings. Defaults to true. + EnforceWorkgroupConfiguration *bool `json:"enforceWorkgroupConfiguration,omitempty" tf:"enforce_workgroup_configuration,omitempty"` + + // Configuration block for the Athena Engine Versioning. For more information, see Athena Engine Versioning. See Engine Version below. + EngineVersion *EngineVersionInitParameters `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + // Role used in a notebook session for accessing the user's resources. + ExecutionRole *string `json:"executionRole,omitempty" tf:"execution_role,omitempty"` + + // Boolean whether Amazon CloudWatch metrics are enabled for the workgroup. Defaults to true. + PublishCloudwatchMetricsEnabled *bool `json:"publishCloudwatchMetricsEnabled,omitempty" tf:"publish_cloudwatch_metrics_enabled,omitempty"` + + // If set to true , allows members assigned to a workgroup to reference Amazon S3 Requester Pays buckets in queries. If set to false , workgroup members cannot query data from Requester Pays buckets, and queries that retrieve data from Requester Pays buckets cause an error. The default is false . For more information about Requester Pays buckets, see Requester Pays Buckets in the Amazon Simple Storage Service Developer Guide. + RequesterPaysEnabled *bool `json:"requesterPaysEnabled,omitempty" tf:"requester_pays_enabled,omitempty"` + + // Configuration block with result settings. See Result Configuration below. + ResultConfiguration *ResultConfigurationInitParameters `json:"resultConfiguration,omitempty" tf:"result_configuration,omitempty"` +} + +type ConfigurationObservation struct { + + // Integer for the upper data usage limit (cutoff) for the amount of bytes a single query in a workgroup is allowed to scan. Must be at least 10485760. + BytesScannedCutoffPerQuery *float64 `json:"bytesScannedCutoffPerQuery,omitempty" tf:"bytes_scanned_cutoff_per_query,omitempty"` + + // Boolean whether the settings for the workgroup override client-side settings. For more information, see Workgroup Settings Override Client-Side Settings. Defaults to true. + EnforceWorkgroupConfiguration *bool `json:"enforceWorkgroupConfiguration,omitempty" tf:"enforce_workgroup_configuration,omitempty"` + + // Configuration block for the Athena Engine Versioning. For more information, see Athena Engine Versioning. See Engine Version below. + EngineVersion *EngineVersionObservation `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + // Role used in a notebook session for accessing the user's resources. + ExecutionRole *string `json:"executionRole,omitempty" tf:"execution_role,omitempty"` + + // Boolean whether Amazon CloudWatch metrics are enabled for the workgroup. Defaults to true. + PublishCloudwatchMetricsEnabled *bool `json:"publishCloudwatchMetricsEnabled,omitempty" tf:"publish_cloudwatch_metrics_enabled,omitempty"` + + // If set to true , allows members assigned to a workgroup to reference Amazon S3 Requester Pays buckets in queries. If set to false , workgroup members cannot query data from Requester Pays buckets, and queries that retrieve data from Requester Pays buckets cause an error. The default is false . For more information about Requester Pays buckets, see Requester Pays Buckets in the Amazon Simple Storage Service Developer Guide. + RequesterPaysEnabled *bool `json:"requesterPaysEnabled,omitempty" tf:"requester_pays_enabled,omitempty"` + + // Configuration block with result settings. See Result Configuration below. + ResultConfiguration *ResultConfigurationObservation `json:"resultConfiguration,omitempty" tf:"result_configuration,omitempty"` +} + +type ConfigurationParameters struct { + + // Integer for the upper data usage limit (cutoff) for the amount of bytes a single query in a workgroup is allowed to scan. Must be at least 10485760. + // +kubebuilder:validation:Optional + BytesScannedCutoffPerQuery *float64 `json:"bytesScannedCutoffPerQuery,omitempty" tf:"bytes_scanned_cutoff_per_query,omitempty"` + + // Boolean whether the settings for the workgroup override client-side settings. For more information, see Workgroup Settings Override Client-Side Settings. Defaults to true. + // +kubebuilder:validation:Optional + EnforceWorkgroupConfiguration *bool `json:"enforceWorkgroupConfiguration,omitempty" tf:"enforce_workgroup_configuration,omitempty"` + + // Configuration block for the Athena Engine Versioning. For more information, see Athena Engine Versioning. See Engine Version below. + // +kubebuilder:validation:Optional + EngineVersion *EngineVersionParameters `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + // Role used in a notebook session for accessing the user's resources. + // +kubebuilder:validation:Optional + ExecutionRole *string `json:"executionRole,omitempty" tf:"execution_role,omitempty"` + + // Boolean whether Amazon CloudWatch metrics are enabled for the workgroup. Defaults to true. + // +kubebuilder:validation:Optional + PublishCloudwatchMetricsEnabled *bool `json:"publishCloudwatchMetricsEnabled,omitempty" tf:"publish_cloudwatch_metrics_enabled,omitempty"` + + // If set to true , allows members assigned to a workgroup to reference Amazon S3 Requester Pays buckets in queries. If set to false , workgroup members cannot query data from Requester Pays buckets, and queries that retrieve data from Requester Pays buckets cause an error. The default is false . For more information about Requester Pays buckets, see Requester Pays Buckets in the Amazon Simple Storage Service Developer Guide. + // +kubebuilder:validation:Optional + RequesterPaysEnabled *bool `json:"requesterPaysEnabled,omitempty" tf:"requester_pays_enabled,omitempty"` + + // Configuration block with result settings. See Result Configuration below. + // +kubebuilder:validation:Optional + ResultConfiguration *ResultConfigurationParameters `json:"resultConfiguration,omitempty" tf:"result_configuration,omitempty"` +} + +type EngineVersionInitParameters struct { + + // Requested engine version. Defaults to AUTO. + SelectedEngineVersion *string `json:"selectedEngineVersion,omitempty" tf:"selected_engine_version,omitempty"` +} + +type EngineVersionObservation struct { + + // The engine version on which the query runs. If selected_engine_version is set to AUTO, the effective engine version is chosen by Athena. + EffectiveEngineVersion *string `json:"effectiveEngineVersion,omitempty" tf:"effective_engine_version,omitempty"` + + // Requested engine version. Defaults to AUTO. + SelectedEngineVersion *string `json:"selectedEngineVersion,omitempty" tf:"selected_engine_version,omitempty"` +} + +type EngineVersionParameters struct { + + // Requested engine version. Defaults to AUTO. + // +kubebuilder:validation:Optional + SelectedEngineVersion *string `json:"selectedEngineVersion,omitempty" tf:"selected_engine_version,omitempty"` +} + +type ResultConfigurationACLConfigurationInitParameters struct { + + // Amazon S3 canned ACL that Athena should specify when storing query results. Valid value is BUCKET_OWNER_FULL_CONTROL. + S3ACLOption *string `json:"s3AclOption,omitempty" tf:"s3_acl_option,omitempty"` +} + +type ResultConfigurationACLConfigurationObservation struct { + + // Amazon S3 canned ACL that Athena should specify when storing query results. Valid value is BUCKET_OWNER_FULL_CONTROL. + S3ACLOption *string `json:"s3AclOption,omitempty" tf:"s3_acl_option,omitempty"` +} + +type ResultConfigurationACLConfigurationParameters struct { + + // Amazon S3 canned ACL that Athena should specify when storing query results. Valid value is BUCKET_OWNER_FULL_CONTROL. + // +kubebuilder:validation:Optional + S3ACLOption *string `json:"s3AclOption" tf:"s3_acl_option,omitempty"` +} + +type ResultConfigurationEncryptionConfigurationInitParameters struct { + + // Whether Amazon S3 server-side encryption with Amazon S3-managed keys (SSE_S3), server-side encryption with KMS-managed keys (SSE_KMS), or client-side encryption with KMS-managed keys (CSE_KMS) is used. If a query runs in a workgroup and the workgroup overrides client-side settings, then the workgroup's setting for encryption is used. It specifies whether query results must be encrypted, for all queries that run in this workgroup. + EncryptionOption *string `json:"encryptionOption,omitempty" tf:"encryption_option,omitempty"` + + // For SSE_KMS and CSE_KMS, this is the KMS key ARN. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` +} + +type ResultConfigurationEncryptionConfigurationObservation struct { + + // Whether Amazon S3 server-side encryption with Amazon S3-managed keys (SSE_S3), server-side encryption with KMS-managed keys (SSE_KMS), or client-side encryption with KMS-managed keys (CSE_KMS) is used. If a query runs in a workgroup and the workgroup overrides client-side settings, then the workgroup's setting for encryption is used. It specifies whether query results must be encrypted, for all queries that run in this workgroup. + EncryptionOption *string `json:"encryptionOption,omitempty" tf:"encryption_option,omitempty"` + + // For SSE_KMS and CSE_KMS, this is the KMS key ARN. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` +} + +type ResultConfigurationEncryptionConfigurationParameters struct { + + // Whether Amazon S3 server-side encryption with Amazon S3-managed keys (SSE_S3), server-side encryption with KMS-managed keys (SSE_KMS), or client-side encryption with KMS-managed keys (CSE_KMS) is used. If a query runs in a workgroup and the workgroup overrides client-side settings, then the workgroup's setting for encryption is used. It specifies whether query results must be encrypted, for all queries that run in this workgroup. + // +kubebuilder:validation:Optional + EncryptionOption *string `json:"encryptionOption,omitempty" tf:"encryption_option,omitempty"` + + // For SSE_KMS and CSE_KMS, this is the KMS key ARN. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` +} + +type ResultConfigurationInitParameters struct { + + // That an Amazon S3 canned ACL should be set to control ownership of stored query results. See ACL Configuration below. + ACLConfiguration *ResultConfigurationACLConfigurationInitParameters `json:"aclConfiguration,omitempty" tf:"acl_configuration,omitempty"` + + // Configuration block with encryption settings. See Encryption Configuration below. + EncryptionConfiguration *ResultConfigurationEncryptionConfigurationInitParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // AWS account ID that you expect to be the owner of the Amazon S3 bucket. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. For more information, see Queries and Query Result Files. + OutputLocation *string `json:"outputLocation,omitempty" tf:"output_location,omitempty"` +} + +type ResultConfigurationObservation struct { + + // That an Amazon S3 canned ACL should be set to control ownership of stored query results. See ACL Configuration below. + ACLConfiguration *ResultConfigurationACLConfigurationObservation `json:"aclConfiguration,omitempty" tf:"acl_configuration,omitempty"` + + // Configuration block with encryption settings. See Encryption Configuration below. + EncryptionConfiguration *ResultConfigurationEncryptionConfigurationObservation `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // AWS account ID that you expect to be the owner of the Amazon S3 bucket. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. For more information, see Queries and Query Result Files. + OutputLocation *string `json:"outputLocation,omitempty" tf:"output_location,omitempty"` +} + +type ResultConfigurationParameters struct { + + // That an Amazon S3 canned ACL should be set to control ownership of stored query results. See ACL Configuration below. + // +kubebuilder:validation:Optional + ACLConfiguration *ResultConfigurationACLConfigurationParameters `json:"aclConfiguration,omitempty" tf:"acl_configuration,omitempty"` + + // Configuration block with encryption settings. See Encryption Configuration below. + // +kubebuilder:validation:Optional + EncryptionConfiguration *ResultConfigurationEncryptionConfigurationParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // AWS account ID that you expect to be the owner of the Amazon S3 bucket. + // +kubebuilder:validation:Optional + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. For more information, see Queries and Query Result Files. + // +kubebuilder:validation:Optional + OutputLocation *string `json:"outputLocation,omitempty" tf:"output_location,omitempty"` +} + +type WorkgroupInitParameters struct { + + // Configuration block with various settings for the workgroup. Documented below. + Configuration *ConfigurationInitParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // Description of the workgroup. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Option to delete the workgroup and its contents even if the workgroup contains any named queries. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // State of the workgroup. Valid values are DISABLED or ENABLED. Defaults to ENABLED. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type WorkgroupObservation struct { + + // ARN of the workgroup + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Configuration block with various settings for the workgroup. Documented below. + Configuration *ConfigurationObservation `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // Description of the workgroup. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Option to delete the workgroup and its contents even if the workgroup contains any named queries. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // Workgroup name + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // State of the workgroup. Valid values are DISABLED or ENABLED. Defaults to ENABLED. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type WorkgroupParameters struct { + + // Configuration block with various settings for the workgroup. Documented below. + // +kubebuilder:validation:Optional + Configuration *ConfigurationParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // Description of the workgroup. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Option to delete the workgroup and its contents even if the workgroup contains any named queries. + // +kubebuilder:validation:Optional + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // State of the workgroup. Valid values are DISABLED or ENABLED. Defaults to ENABLED. + // +kubebuilder:validation:Optional + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// WorkgroupSpec defines the desired state of Workgroup +type WorkgroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WorkgroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WorkgroupInitParameters `json:"initProvider,omitempty"` +} + +// WorkgroupStatus defines the observed state of Workgroup. +type WorkgroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WorkgroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Workgroup is the Schema for the Workgroups API. Manages an Athena Workgroup. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Workgroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec WorkgroupSpec `json:"spec"` + Status WorkgroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WorkgroupList contains a list of Workgroups +type WorkgroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Workgroup `json:"items"` +} + +// Repository type metadata. +var ( + Workgroup_Kind = "Workgroup" + Workgroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Workgroup_Kind}.String() + Workgroup_KindAPIVersion = Workgroup_Kind + "." + CRDGroupVersion.String() + Workgroup_GroupVersionKind = CRDGroupVersion.WithKind(Workgroup_Kind) +) + +func init() { + SchemeBuilder.Register(&Workgroup{}, &WorkgroupList{}) +} diff --git a/apis/autoscaling/v1beta1/zz_generated.conversion_hubs.go b/apis/autoscaling/v1beta1/zz_generated.conversion_hubs.go index 52039f1cc5..167b2ccde4 100755 --- a/apis/autoscaling/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/autoscaling/v1beta1/zz_generated.conversion_hubs.go @@ -6,20 +6,11 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *GroupTag) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LaunchConfiguration) Hub() {} - // Hub marks this type as a conversion hub. func (tr *LifecycleHook) Hub() {} // Hub marks this type as a conversion hub. func (tr *Notification) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Policy) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Schedule) Hub() {} diff --git a/apis/autoscaling/v1beta1/zz_generated.conversion_spokes.go b/apis/autoscaling/v1beta1/zz_generated.conversion_spokes.go index 6371b47432..859b24ff17 100755 --- a/apis/autoscaling/v1beta1/zz_generated.conversion_spokes.go +++ b/apis/autoscaling/v1beta1/zz_generated.conversion_spokes.go @@ -52,3 +52,63 @@ func (tr *AutoscalingGroup) ConvertFrom(srcRaw conversion.Hub) error { } return nil } + +// ConvertTo converts this GroupTag to the hub type. +func (tr *GroupTag) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the GroupTag type. +func (tr *GroupTag) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LaunchConfiguration to the hub type. +func (tr *LaunchConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LaunchConfiguration type. +func (tr *LaunchConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Policy to the hub type. +func (tr *Policy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Policy type. +func (tr *Policy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/autoscaling/v1beta1/zz_generated.resolvers.go b/apis/autoscaling/v1beta1/zz_generated.resolvers.go index e7cb2734ae..46001a1e38 100644 --- a/apis/autoscaling/v1beta1/zz_generated.resolvers.go +++ b/apis/autoscaling/v1beta1/zz_generated.resolvers.go @@ -552,7 +552,7 @@ func (mg *LifecycleHook) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("autoscaling.aws.upbound.io", "v1beta2", "AutoscalingGroup", "AutoscalingGroupList") + m, l, err = apisresolver.GetManagedResource("autoscaling.aws.upbound.io", "v1beta3", "AutoscalingGroup", "AutoscalingGroupList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -702,7 +702,7 @@ func (mg *Schedule) ResolveReferences(ctx context.Context, c client.Reader) erro var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("autoscaling.aws.upbound.io", "v1beta2", "AutoscalingGroup", "AutoscalingGroupList") + m, l, err = apisresolver.GetManagedResource("autoscaling.aws.upbound.io", "v1beta3", "AutoscalingGroup", "AutoscalingGroupList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/autoscaling/v1beta1/zz_lifecyclehook_types.go b/apis/autoscaling/v1beta1/zz_lifecyclehook_types.go index 72e8720e6d..6ee886d258 100755 --- a/apis/autoscaling/v1beta1/zz_lifecyclehook_types.go +++ b/apis/autoscaling/v1beta1/zz_lifecyclehook_types.go @@ -73,7 +73,7 @@ type LifecycleHookObservation struct { type LifecycleHookParameters struct { // Name of the Auto Scaling group to which you want to assign the lifecycle hook - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/autoscaling/v1beta2.AutoscalingGroup + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/autoscaling/v1beta3.AutoscalingGroup // +kubebuilder:validation:Optional AutoscalingGroupName *string `json:"autoscalingGroupName,omitempty" tf:"autoscaling_group_name,omitempty"` diff --git a/apis/autoscaling/v1beta1/zz_schedule_types.go b/apis/autoscaling/v1beta1/zz_schedule_types.go index dd38457afa..2e963da668 100755 --- a/apis/autoscaling/v1beta1/zz_schedule_types.go +++ b/apis/autoscaling/v1beta1/zz_schedule_types.go @@ -72,7 +72,7 @@ type ScheduleObservation struct { type ScheduleParameters struct { // The name of the Auto Scaling group. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/autoscaling/v1beta2.AutoscalingGroup + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/autoscaling/v1beta3.AutoscalingGroup // +kubebuilder:validation:Optional AutoscalingGroupName *string `json:"autoscalingGroupName,omitempty" tf:"autoscaling_group_name,omitempty"` diff --git a/apis/autoscaling/v1beta2/zz_attachment_types.go b/apis/autoscaling/v1beta2/zz_attachment_types.go index ede236e139..2b364d463b 100755 --- a/apis/autoscaling/v1beta2/zz_attachment_types.go +++ b/apis/autoscaling/v1beta2/zz_attachment_types.go @@ -16,7 +16,7 @@ import ( type AttachmentInitParameters struct { // Name of ASG to associate with the ELB. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/autoscaling/v1beta2.AutoscalingGroup + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/autoscaling/v1beta3.AutoscalingGroup AutoscalingGroupName *string `json:"autoscalingGroupName,omitempty" tf:"autoscaling_group_name,omitempty"` // Reference to a AutoscalingGroup in autoscaling to populate autoscalingGroupName. @@ -28,7 +28,7 @@ type AttachmentInitParameters struct { AutoscalingGroupNameSelector *v1.Selector `json:"autoscalingGroupNameSelector,omitempty" tf:"-"` // Name of the ELB. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta1.ELB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() ELB *string `json:"elb,omitempty" tf:"elb,omitempty"` @@ -41,7 +41,7 @@ type AttachmentInitParameters struct { ELBSelector *v1.Selector `json:"elbSelector,omitempty" tf:"-"` // ARN of a load balancer target group. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta1.LBTargetGroup + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBTargetGroup // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) LBTargetGroupArn *string `json:"lbTargetGroupArn,omitempty" tf:"lb_target_group_arn,omitempty"` @@ -71,7 +71,7 @@ type AttachmentObservation struct { type AttachmentParameters struct { // Name of ASG to associate with the ELB. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/autoscaling/v1beta2.AutoscalingGroup + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/autoscaling/v1beta3.AutoscalingGroup // +kubebuilder:validation:Optional AutoscalingGroupName *string `json:"autoscalingGroupName,omitempty" tf:"autoscaling_group_name,omitempty"` @@ -84,7 +84,7 @@ type AttachmentParameters struct { AutoscalingGroupNameSelector *v1.Selector `json:"autoscalingGroupNameSelector,omitempty" tf:"-"` // Name of the ELB. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta1.ELB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ELB *string `json:"elb,omitempty" tf:"elb,omitempty"` @@ -98,7 +98,7 @@ type AttachmentParameters struct { ELBSelector *v1.Selector `json:"elbSelector,omitempty" tf:"-"` // ARN of a load balancer target group. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta1.LBTargetGroup + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBTargetGroup // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional LBTargetGroupArn *string `json:"lbTargetGroupArn,omitempty" tf:"lb_target_group_arn,omitempty"` diff --git a/apis/autoscaling/v1beta2/zz_generated.conversion_hubs.go b/apis/autoscaling/v1beta2/zz_generated.conversion_hubs.go index 6cdf181c5f..f571b655a4 100755 --- a/apis/autoscaling/v1beta2/zz_generated.conversion_hubs.go +++ b/apis/autoscaling/v1beta2/zz_generated.conversion_hubs.go @@ -10,4 +10,10 @@ package v1beta2 func (tr *Attachment) Hub() {} // Hub marks this type as a conversion hub. -func (tr *AutoscalingGroup) Hub() {} +func (tr *GroupTag) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LaunchConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Policy) Hub() {} diff --git a/apis/autoscaling/v1beta2/zz_generated.conversion_spokes.go b/apis/autoscaling/v1beta2/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..4cd20e6ac3 --- /dev/null +++ b/apis/autoscaling/v1beta2/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this AutoscalingGroup to the hub type. +func (tr *AutoscalingGroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the AutoscalingGroup type. +func (tr *AutoscalingGroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/autoscaling/v1beta2/zz_generated.deepcopy.go b/apis/autoscaling/v1beta2/zz_generated.deepcopy.go index 4f149cb1d5..d91e9aefa7 100644 --- a/apis/autoscaling/v1beta2/zz_generated.deepcopy.go +++ b/apis/autoscaling/v1beta2/zz_generated.deepcopy.go @@ -1504,1239 +1504,5029 @@ func (in *BaselineEBSBandwidthMbpsParameters) DeepCopy() *BaselineEBSBandwidthMb } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InitialLifecycleHookInitParameters) DeepCopyInto(out *InitialLifecycleHookInitParameters) { +func (in *CustomizedCapacityMetricSpecificationInitParameters) DeepCopyInto(out *CustomizedCapacityMetricSpecificationInitParameters) { *out = *in - if in.DefaultResult != nil { - in, out := &in.DefaultResult, &out.DefaultResult - *out = new(string) - **out = **in - } - if in.HeartbeatTimeout != nil { - in, out := &in.HeartbeatTimeout, &out.HeartbeatTimeout - *out = new(float64) - **out = **in - } - if in.LifecycleTransition != nil { - in, out := &in.LifecycleTransition, &out.LifecycleTransition - *out = new(string) - **out = **in + if in.MetricDataQueries != nil { + in, out := &in.MetricDataQueries, &out.MetricDataQueries + *out = make([]MetricDataQueriesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) - **out = **in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedCapacityMetricSpecificationInitParameters. +func (in *CustomizedCapacityMetricSpecificationInitParameters) DeepCopy() *CustomizedCapacityMetricSpecificationInitParameters { + if in == nil { + return nil } - if in.NotificationMetadata != nil { - in, out := &in.NotificationMetadata, &out.NotificationMetadata - *out = new(string) - **out = **in + out := new(CustomizedCapacityMetricSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedCapacityMetricSpecificationObservation) DeepCopyInto(out *CustomizedCapacityMetricSpecificationObservation) { + *out = *in + if in.MetricDataQueries != nil { + in, out := &in.MetricDataQueries, &out.MetricDataQueries + *out = make([]MetricDataQueriesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } - if in.NotificationTargetArn != nil { - in, out := &in.NotificationTargetArn, &out.NotificationTargetArn - *out = new(string) - **out = **in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedCapacityMetricSpecificationObservation. +func (in *CustomizedCapacityMetricSpecificationObservation) DeepCopy() *CustomizedCapacityMetricSpecificationObservation { + if in == nil { + return nil } - if in.RoleArn != nil { - in, out := &in.RoleArn, &out.RoleArn - *out = new(string) - **out = **in + out := new(CustomizedCapacityMetricSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedCapacityMetricSpecificationParameters) DeepCopyInto(out *CustomizedCapacityMetricSpecificationParameters) { + *out = *in + if in.MetricDataQueries != nil { + in, out := &in.MetricDataQueries, &out.MetricDataQueries + *out = make([]MetricDataQueriesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitialLifecycleHookInitParameters. -func (in *InitialLifecycleHookInitParameters) DeepCopy() *InitialLifecycleHookInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedCapacityMetricSpecificationParameters. +func (in *CustomizedCapacityMetricSpecificationParameters) DeepCopy() *CustomizedCapacityMetricSpecificationParameters { if in == nil { return nil } - out := new(InitialLifecycleHookInitParameters) + out := new(CustomizedCapacityMetricSpecificationParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InitialLifecycleHookObservation) DeepCopyInto(out *InitialLifecycleHookObservation) { +func (in *CustomizedLoadMetricSpecificationInitParameters) DeepCopyInto(out *CustomizedLoadMetricSpecificationInitParameters) { *out = *in - if in.DefaultResult != nil { - in, out := &in.DefaultResult, &out.DefaultResult - *out = new(string) - **out = **in + if in.MetricDataQueries != nil { + in, out := &in.MetricDataQueries, &out.MetricDataQueries + *out = make([]CustomizedLoadMetricSpecificationMetricDataQueriesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } - if in.HeartbeatTimeout != nil { - in, out := &in.HeartbeatTimeout, &out.HeartbeatTimeout - *out = new(float64) - **out = **in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedLoadMetricSpecificationInitParameters. +func (in *CustomizedLoadMetricSpecificationInitParameters) DeepCopy() *CustomizedLoadMetricSpecificationInitParameters { + if in == nil { + return nil } - if in.LifecycleTransition != nil { - in, out := &in.LifecycleTransition, &out.LifecycleTransition + out := new(CustomizedLoadMetricSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedLoadMetricSpecificationMetricDataQueriesInitParameters) DeepCopyInto(out *CustomizedLoadMetricSpecificationMetricDataQueriesInitParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression *out = new(string) **out = **in } - if in.Name != nil { - in, out := &in.Name, &out.Name + if in.ID != nil { + in, out := &in.ID, &out.ID *out = new(string) **out = **in } - if in.NotificationMetadata != nil { - in, out := &in.NotificationMetadata, &out.NotificationMetadata + if in.Label != nil { + in, out := &in.Label, &out.Label *out = new(string) **out = **in } - if in.NotificationTargetArn != nil { - in, out := &in.NotificationTargetArn, &out.NotificationTargetArn - *out = new(string) - **out = **in + if in.MetricStat != nil { + in, out := &in.MetricStat, &out.MetricStat + *out = new(MetricDataQueriesMetricStatInitParameters) + (*in).DeepCopyInto(*out) } - if in.RoleArn != nil { - in, out := &in.RoleArn, &out.RoleArn - *out = new(string) + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitialLifecycleHookObservation. -func (in *InitialLifecycleHookObservation) DeepCopy() *InitialLifecycleHookObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedLoadMetricSpecificationMetricDataQueriesInitParameters. +func (in *CustomizedLoadMetricSpecificationMetricDataQueriesInitParameters) DeepCopy() *CustomizedLoadMetricSpecificationMetricDataQueriesInitParameters { if in == nil { return nil } - out := new(InitialLifecycleHookObservation) + out := new(CustomizedLoadMetricSpecificationMetricDataQueriesInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InitialLifecycleHookParameters) DeepCopyInto(out *InitialLifecycleHookParameters) { +func (in *CustomizedLoadMetricSpecificationMetricDataQueriesObservation) DeepCopyInto(out *CustomizedLoadMetricSpecificationMetricDataQueriesObservation) { *out = *in - if in.DefaultResult != nil { - in, out := &in.DefaultResult, &out.DefaultResult - *out = new(string) - **out = **in - } - if in.HeartbeatTimeout != nil { - in, out := &in.HeartbeatTimeout, &out.HeartbeatTimeout - *out = new(float64) - **out = **in - } - if in.LifecycleTransition != nil { - in, out := &in.LifecycleTransition, &out.LifecycleTransition + if in.Expression != nil { + in, out := &in.Expression, &out.Expression *out = new(string) **out = **in } - if in.Name != nil { - in, out := &in.Name, &out.Name + if in.ID != nil { + in, out := &in.ID, &out.ID *out = new(string) **out = **in } - if in.NotificationMetadata != nil { - in, out := &in.NotificationMetadata, &out.NotificationMetadata + if in.Label != nil { + in, out := &in.Label, &out.Label *out = new(string) **out = **in } - if in.NotificationTargetArn != nil { - in, out := &in.NotificationTargetArn, &out.NotificationTargetArn - *out = new(string) - **out = **in + if in.MetricStat != nil { + in, out := &in.MetricStat, &out.MetricStat + *out = new(MetricDataQueriesMetricStatObservation) + (*in).DeepCopyInto(*out) } - if in.RoleArn != nil { - in, out := &in.RoleArn, &out.RoleArn - *out = new(string) + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitialLifecycleHookParameters. -func (in *InitialLifecycleHookParameters) DeepCopy() *InitialLifecycleHookParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedLoadMetricSpecificationMetricDataQueriesObservation. +func (in *CustomizedLoadMetricSpecificationMetricDataQueriesObservation) DeepCopy() *CustomizedLoadMetricSpecificationMetricDataQueriesObservation { if in == nil { return nil } - out := new(InitialLifecycleHookParameters) + out := new(CustomizedLoadMetricSpecificationMetricDataQueriesObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstanceMaintenancePolicyInitParameters) DeepCopyInto(out *InstanceMaintenancePolicyInitParameters) { +func (in *CustomizedLoadMetricSpecificationMetricDataQueriesParameters) DeepCopyInto(out *CustomizedLoadMetricSpecificationMetricDataQueriesParameters) { *out = *in - if in.MaxHealthyPercentage != nil { - in, out := &in.MaxHealthyPercentage, &out.MaxHealthyPercentage - *out = new(float64) + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) **out = **in } - if in.MinHealthyPercentage != nil { - in, out := &in.MinHealthyPercentage, &out.MinHealthyPercentage - *out = new(float64) + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.MetricStat != nil { + in, out := &in.MetricStat, &out.MetricStat + *out = new(MetricDataQueriesMetricStatParameters) + (*in).DeepCopyInto(*out) + } + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMaintenancePolicyInitParameters. -func (in *InstanceMaintenancePolicyInitParameters) DeepCopy() *InstanceMaintenancePolicyInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedLoadMetricSpecificationMetricDataQueriesParameters. +func (in *CustomizedLoadMetricSpecificationMetricDataQueriesParameters) DeepCopy() *CustomizedLoadMetricSpecificationMetricDataQueriesParameters { if in == nil { return nil } - out := new(InstanceMaintenancePolicyInitParameters) + out := new(CustomizedLoadMetricSpecificationMetricDataQueriesParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstanceMaintenancePolicyObservation) DeepCopyInto(out *InstanceMaintenancePolicyObservation) { +func (in *CustomizedLoadMetricSpecificationObservation) DeepCopyInto(out *CustomizedLoadMetricSpecificationObservation) { *out = *in - if in.MaxHealthyPercentage != nil { - in, out := &in.MaxHealthyPercentage, &out.MaxHealthyPercentage - *out = new(float64) - **out = **in - } - if in.MinHealthyPercentage != nil { - in, out := &in.MinHealthyPercentage, &out.MinHealthyPercentage - *out = new(float64) - **out = **in + if in.MetricDataQueries != nil { + in, out := &in.MetricDataQueries, &out.MetricDataQueries + *out = make([]CustomizedLoadMetricSpecificationMetricDataQueriesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMaintenancePolicyObservation. -func (in *InstanceMaintenancePolicyObservation) DeepCopy() *InstanceMaintenancePolicyObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedLoadMetricSpecificationObservation. +func (in *CustomizedLoadMetricSpecificationObservation) DeepCopy() *CustomizedLoadMetricSpecificationObservation { if in == nil { return nil } - out := new(InstanceMaintenancePolicyObservation) + out := new(CustomizedLoadMetricSpecificationObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstanceMaintenancePolicyParameters) DeepCopyInto(out *InstanceMaintenancePolicyParameters) { +func (in *CustomizedLoadMetricSpecificationParameters) DeepCopyInto(out *CustomizedLoadMetricSpecificationParameters) { *out = *in - if in.MaxHealthyPercentage != nil { - in, out := &in.MaxHealthyPercentage, &out.MaxHealthyPercentage - *out = new(float64) - **out = **in - } - if in.MinHealthyPercentage != nil { - in, out := &in.MinHealthyPercentage, &out.MinHealthyPercentage - *out = new(float64) - **out = **in + if in.MetricDataQueries != nil { + in, out := &in.MetricDataQueries, &out.MetricDataQueries + *out = make([]CustomizedLoadMetricSpecificationMetricDataQueriesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMaintenancePolicyParameters. -func (in *InstanceMaintenancePolicyParameters) DeepCopy() *InstanceMaintenancePolicyParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedLoadMetricSpecificationParameters. +func (in *CustomizedLoadMetricSpecificationParameters) DeepCopy() *CustomizedLoadMetricSpecificationParameters { if in == nil { return nil } - out := new(InstanceMaintenancePolicyParameters) + out := new(CustomizedLoadMetricSpecificationParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstanceRefreshInitParameters) DeepCopyInto(out *InstanceRefreshInitParameters) { +func (in *CustomizedMetricSpecificationInitParameters) DeepCopyInto(out *CustomizedMetricSpecificationInitParameters) { *out = *in - if in.Preferences != nil { - in, out := &in.Preferences, &out.Preferences - *out = make([]PreferencesInitParameters, len(*in)) + if in.MetricDimension != nil { + in, out := &in.MetricDimension, &out.MetricDimension + *out = make([]MetricDimensionInitParameters, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.Strategy != nil { - in, out := &in.Strategy, &out.Strategy + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName *out = new(string) **out = **in } - if in.Triggers != nil { - in, out := &in.Triggers, &out.Triggers - *out = make([]*string, len(*in)) + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricsInitParameters, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRefreshInitParameters. -func (in *InstanceRefreshInitParameters) DeepCopy() *InstanceRefreshInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedMetricSpecificationInitParameters. +func (in *CustomizedMetricSpecificationInitParameters) DeepCopy() *CustomizedMetricSpecificationInitParameters { if in == nil { return nil } - out := new(InstanceRefreshInitParameters) + out := new(CustomizedMetricSpecificationInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstanceRefreshObservation) DeepCopyInto(out *InstanceRefreshObservation) { +func (in *CustomizedMetricSpecificationObservation) DeepCopyInto(out *CustomizedMetricSpecificationObservation) { *out = *in - if in.Preferences != nil { - in, out := &in.Preferences, &out.Preferences - *out = make([]PreferencesObservation, len(*in)) + if in.MetricDimension != nil { + in, out := &in.MetricDimension, &out.MetricDimension + *out = make([]MetricDimensionObservation, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.Strategy != nil { - in, out := &in.Strategy, &out.Strategy + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName *out = new(string) **out = **in } - if in.Triggers != nil { - in, out := &in.Triggers, &out.Triggers - *out = make([]*string, len(*in)) + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricsObservation, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRefreshObservation. -func (in *InstanceRefreshObservation) DeepCopy() *InstanceRefreshObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedMetricSpecificationObservation. +func (in *CustomizedMetricSpecificationObservation) DeepCopy() *CustomizedMetricSpecificationObservation { if in == nil { return nil } - out := new(InstanceRefreshObservation) + out := new(CustomizedMetricSpecificationObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstanceRefreshParameters) DeepCopyInto(out *InstanceRefreshParameters) { +func (in *CustomizedMetricSpecificationParameters) DeepCopyInto(out *CustomizedMetricSpecificationParameters) { *out = *in - if in.Preferences != nil { - in, out := &in.Preferences, &out.Preferences - *out = make([]PreferencesParameters, len(*in)) + if in.MetricDimension != nil { + in, out := &in.MetricDimension, &out.MetricDimension + *out = make([]MetricDimensionParameters, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.Strategy != nil { - in, out := &in.Strategy, &out.Strategy + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName *out = new(string) **out = **in } - if in.Triggers != nil { - in, out := &in.Triggers, &out.Triggers - *out = make([]*string, len(*in)) + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricsParameters, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRefreshParameters. -func (in *InstanceRefreshParameters) DeepCopy() *InstanceRefreshParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedMetricSpecificationParameters. +func (in *CustomizedMetricSpecificationParameters) DeepCopy() *CustomizedMetricSpecificationParameters { if in == nil { return nil } - out := new(InstanceRefreshParameters) + out := new(CustomizedMetricSpecificationParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstanceRequirementsInitParameters) DeepCopyInto(out *InstanceRequirementsInitParameters) { +func (in *CustomizedScalingMetricSpecificationInitParameters) DeepCopyInto(out *CustomizedScalingMetricSpecificationInitParameters) { *out = *in - if in.AcceleratorCount != nil { - in, out := &in.AcceleratorCount, &out.AcceleratorCount - *out = make([]AcceleratorCountInitParameters, len(*in)) + if in.MetricDataQueries != nil { + in, out := &in.MetricDataQueries, &out.MetricDataQueries + *out = make([]CustomizedScalingMetricSpecificationMetricDataQueriesInitParameters, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.AcceleratorManufacturers != nil { - in, out := &in.AcceleratorManufacturers, &out.AcceleratorManufacturers - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedScalingMetricSpecificationInitParameters. +func (in *CustomizedScalingMetricSpecificationInitParameters) DeepCopy() *CustomizedScalingMetricSpecificationInitParameters { + if in == nil { + return nil } - if in.AcceleratorNames != nil { - in, out := &in.AcceleratorNames, &out.AcceleratorNames - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } + out := new(CustomizedScalingMetricSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedScalingMetricSpecificationMetricDataQueriesInitParameters) DeepCopyInto(out *CustomizedScalingMetricSpecificationMetricDataQueriesInitParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in } - if in.AcceleratorTotalMemoryMib != nil { - in, out := &in.AcceleratorTotalMemoryMib, &out.AcceleratorTotalMemoryMib - *out = make([]AcceleratorTotalMemoryMibInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in } - if in.AcceleratorTypes != nil { - in, out := &in.AcceleratorTypes, &out.AcceleratorTypes - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in } - if in.AllowedInstanceTypes != nil { - in, out := &in.AllowedInstanceTypes, &out.AllowedInstanceTypes - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } + if in.MetricStat != nil { + in, out := &in.MetricStat, &out.MetricStat + *out = new(CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatInitParameters) + (*in).DeepCopyInto(*out) } - if in.BareMetal != nil { - in, out := &in.BareMetal, &out.BareMetal - *out = new(string) + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) **out = **in } - if in.BaselineEBSBandwidthMbps != nil { - in, out := &in.BaselineEBSBandwidthMbps, &out.BaselineEBSBandwidthMbps - *out = make([]BaselineEBSBandwidthMbpsInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedScalingMetricSpecificationMetricDataQueriesInitParameters. +func (in *CustomizedScalingMetricSpecificationMetricDataQueriesInitParameters) DeepCopy() *CustomizedScalingMetricSpecificationMetricDataQueriesInitParameters { + if in == nil { + return nil } - if in.BurstablePerformance != nil { - in, out := &in.BurstablePerformance, &out.BurstablePerformance + out := new(CustomizedScalingMetricSpecificationMetricDataQueriesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatInitParameters) DeepCopyInto(out *CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatInitParameters) { + *out = *in + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricDataQueriesMetricStatMetricInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Stat != nil { + in, out := &in.Stat, &out.Stat *out = new(string) **out = **in } - if in.CPUManufacturers != nil { - in, out := &in.CPUManufacturers, &out.CPUManufacturers - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in } - if in.ExcludedInstanceTypes != nil { - in, out := &in.ExcludedInstanceTypes, &out.ExcludedInstanceTypes - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatInitParameters. +func (in *CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatInitParameters) DeepCopy() *CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatInitParameters { + if in == nil { + return nil } - if in.InstanceGenerations != nil { - in, out := &in.InstanceGenerations, &out.InstanceGenerations - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } + out := new(CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatObservation) DeepCopyInto(out *CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatObservation) { + *out = *in + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricDataQueriesMetricStatMetricObservation) + (*in).DeepCopyInto(*out) } - if in.LocalStorage != nil { - in, out := &in.LocalStorage, &out.LocalStorage + if in.Stat != nil { + in, out := &in.Stat, &out.Stat *out = new(string) **out = **in } - if in.LocalStorageTypes != nil { - in, out := &in.LocalStorageTypes, &out.LocalStorageTypes - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in } - if in.MemoryGibPerVcpu != nil { - in, out := &in.MemoryGibPerVcpu, &out.MemoryGibPerVcpu - *out = make([]MemoryGibPerVcpuInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatObservation. +func (in *CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatObservation) DeepCopy() *CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatObservation { + if in == nil { + return nil } - if in.MemoryMib != nil { - in, out := &in.MemoryMib, &out.MemoryMib - *out = make([]MemoryMibInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + out := new(CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatParameters) DeepCopyInto(out *CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatParameters) { + *out = *in + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricDataQueriesMetricStatMetricParameters) + (*in).DeepCopyInto(*out) } - if in.NetworkBandwidthGbps != nil { - in, out := &in.NetworkBandwidthGbps, &out.NetworkBandwidthGbps - *out = make([]NetworkBandwidthGbpsInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.Stat != nil { + in, out := &in.Stat, &out.Stat + *out = new(string) + **out = **in } - if in.NetworkInterfaceCount != nil { - in, out := &in.NetworkInterfaceCount, &out.NetworkInterfaceCount - *out = make([]NetworkInterfaceCountInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in } - if in.OnDemandMaxPricePercentageOverLowestPrice != nil { - in, out := &in.OnDemandMaxPricePercentageOverLowestPrice, &out.OnDemandMaxPricePercentageOverLowestPrice - *out = new(float64) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatParameters. +func (in *CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatParameters) DeepCopy() *CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatParameters { + if in == nil { + return nil + } + out := new(CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedScalingMetricSpecificationMetricDataQueriesObservation) DeepCopyInto(out *CustomizedScalingMetricSpecificationMetricDataQueriesObservation) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) **out = **in } - if in.RequireHibernateSupport != nil { - in, out := &in.RequireHibernateSupport, &out.RequireHibernateSupport - *out = new(bool) + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) **out = **in } - if in.SpotMaxPricePercentageOverLowestPrice != nil { - in, out := &in.SpotMaxPricePercentageOverLowestPrice, &out.SpotMaxPricePercentageOverLowestPrice - *out = new(float64) + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) **out = **in } - if in.TotalLocalStorageGb != nil { - in, out := &in.TotalLocalStorageGb, &out.TotalLocalStorageGb - *out = make([]TotalLocalStorageGbInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.MetricStat != nil { + in, out := &in.MetricStat, &out.MetricStat + *out = new(CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatObservation) + (*in).DeepCopyInto(*out) } - if in.VcpuCount != nil { - in, out := &in.VcpuCount, &out.VcpuCount - *out = make([]VcpuCountInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) + **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsInitParameters. -func (in *InstanceRequirementsInitParameters) DeepCopy() *InstanceRequirementsInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedScalingMetricSpecificationMetricDataQueriesObservation. +func (in *CustomizedScalingMetricSpecificationMetricDataQueriesObservation) DeepCopy() *CustomizedScalingMetricSpecificationMetricDataQueriesObservation { if in == nil { return nil } - out := new(InstanceRequirementsInitParameters) + out := new(CustomizedScalingMetricSpecificationMetricDataQueriesObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstanceRequirementsObservation) DeepCopyInto(out *InstanceRequirementsObservation) { +func (in *CustomizedScalingMetricSpecificationMetricDataQueriesParameters) DeepCopyInto(out *CustomizedScalingMetricSpecificationMetricDataQueriesParameters) { *out = *in - if in.AcceleratorCount != nil { - in, out := &in.AcceleratorCount, &out.AcceleratorCount - *out = make([]AcceleratorCountObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.MetricStat != nil { + in, out := &in.MetricStat, &out.MetricStat + *out = new(CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatParameters) + (*in).DeepCopyInto(*out) + } + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedScalingMetricSpecificationMetricDataQueriesParameters. +func (in *CustomizedScalingMetricSpecificationMetricDataQueriesParameters) DeepCopy() *CustomizedScalingMetricSpecificationMetricDataQueriesParameters { + if in == nil { + return nil + } + out := new(CustomizedScalingMetricSpecificationMetricDataQueriesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedScalingMetricSpecificationObservation) DeepCopyInto(out *CustomizedScalingMetricSpecificationObservation) { + *out = *in + if in.MetricDataQueries != nil { + in, out := &in.MetricDataQueries, &out.MetricDataQueries + *out = make([]CustomizedScalingMetricSpecificationMetricDataQueriesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedScalingMetricSpecificationObservation. +func (in *CustomizedScalingMetricSpecificationObservation) DeepCopy() *CustomizedScalingMetricSpecificationObservation { + if in == nil { + return nil + } + out := new(CustomizedScalingMetricSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedScalingMetricSpecificationParameters) DeepCopyInto(out *CustomizedScalingMetricSpecificationParameters) { + *out = *in + if in.MetricDataQueries != nil { + in, out := &in.MetricDataQueries, &out.MetricDataQueries + *out = make([]CustomizedScalingMetricSpecificationMetricDataQueriesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedScalingMetricSpecificationParameters. +func (in *CustomizedScalingMetricSpecificationParameters) DeepCopy() *CustomizedScalingMetricSpecificationParameters { + if in == nil { + return nil + } + out := new(CustomizedScalingMetricSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionsInitParameters) DeepCopyInto(out *DimensionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionsInitParameters. +func (in *DimensionsInitParameters) DeepCopy() *DimensionsInitParameters { + if in == nil { + return nil + } + out := new(DimensionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionsObservation) DeepCopyInto(out *DimensionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionsObservation. +func (in *DimensionsObservation) DeepCopy() *DimensionsObservation { + if in == nil { + return nil + } + out := new(DimensionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionsParameters) DeepCopyInto(out *DimensionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionsParameters. +func (in *DimensionsParameters) DeepCopy() *DimensionsParameters { + if in == nil { + return nil + } + out := new(DimensionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSBlockDeviceInitParameters) DeepCopyInto(out *EBSBlockDeviceInitParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSBlockDeviceInitParameters. +func (in *EBSBlockDeviceInitParameters) DeepCopy() *EBSBlockDeviceInitParameters { + if in == nil { + return nil + } + out := new(EBSBlockDeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSBlockDeviceObservation) DeepCopyInto(out *EBSBlockDeviceObservation) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSBlockDeviceObservation. +func (in *EBSBlockDeviceObservation) DeepCopy() *EBSBlockDeviceObservation { + if in == nil { + return nil + } + out := new(EBSBlockDeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSBlockDeviceParameters) DeepCopyInto(out *EBSBlockDeviceParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSBlockDeviceParameters. +func (in *EBSBlockDeviceParameters) DeepCopy() *EBSBlockDeviceParameters { + if in == nil { + return nil + } + out := new(EBSBlockDeviceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralBlockDeviceInitParameters) DeepCopyInto(out *EphemeralBlockDeviceInitParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralBlockDeviceInitParameters. +func (in *EphemeralBlockDeviceInitParameters) DeepCopy() *EphemeralBlockDeviceInitParameters { + if in == nil { + return nil + } + out := new(EphemeralBlockDeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralBlockDeviceObservation) DeepCopyInto(out *EphemeralBlockDeviceObservation) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralBlockDeviceObservation. +func (in *EphemeralBlockDeviceObservation) DeepCopy() *EphemeralBlockDeviceObservation { + if in == nil { + return nil + } + out := new(EphemeralBlockDeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralBlockDeviceParameters) DeepCopyInto(out *EphemeralBlockDeviceParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralBlockDeviceParameters. +func (in *EphemeralBlockDeviceParameters) DeepCopy() *EphemeralBlockDeviceParameters { + if in == nil { + return nil + } + out := new(EphemeralBlockDeviceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupTag) DeepCopyInto(out *GroupTag) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupTag. +func (in *GroupTag) DeepCopy() *GroupTag { + if in == nil { + return nil + } + out := new(GroupTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GroupTag) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupTagInitParameters) DeepCopyInto(out *GroupTagInitParameters) { + *out = *in + if in.AutoscalingGroupName != nil { + in, out := &in.AutoscalingGroupName, &out.AutoscalingGroupName + *out = new(string) + **out = **in + } + if in.AutoscalingGroupNameRef != nil { + in, out := &in.AutoscalingGroupNameRef, &out.AutoscalingGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AutoscalingGroupNameSelector != nil { + in, out := &in.AutoscalingGroupNameSelector, &out.AutoscalingGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(GroupTagTagInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupTagInitParameters. +func (in *GroupTagInitParameters) DeepCopy() *GroupTagInitParameters { + if in == nil { + return nil + } + out := new(GroupTagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupTagList) DeepCopyInto(out *GroupTagList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GroupTag, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupTagList. +func (in *GroupTagList) DeepCopy() *GroupTagList { + if in == nil { + return nil + } + out := new(GroupTagList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GroupTagList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupTagObservation) DeepCopyInto(out *GroupTagObservation) { + *out = *in + if in.AutoscalingGroupName != nil { + in, out := &in.AutoscalingGroupName, &out.AutoscalingGroupName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(GroupTagTagObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupTagObservation. +func (in *GroupTagObservation) DeepCopy() *GroupTagObservation { + if in == nil { + return nil + } + out := new(GroupTagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupTagParameters) DeepCopyInto(out *GroupTagParameters) { + *out = *in + if in.AutoscalingGroupName != nil { + in, out := &in.AutoscalingGroupName, &out.AutoscalingGroupName + *out = new(string) + **out = **in + } + if in.AutoscalingGroupNameRef != nil { + in, out := &in.AutoscalingGroupNameRef, &out.AutoscalingGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AutoscalingGroupNameSelector != nil { + in, out := &in.AutoscalingGroupNameSelector, &out.AutoscalingGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(GroupTagTagParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupTagParameters. +func (in *GroupTagParameters) DeepCopy() *GroupTagParameters { + if in == nil { + return nil + } + out := new(GroupTagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupTagSpec) DeepCopyInto(out *GroupTagSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupTagSpec. +func (in *GroupTagSpec) DeepCopy() *GroupTagSpec { + if in == nil { + return nil + } + out := new(GroupTagSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupTagStatus) DeepCopyInto(out *GroupTagStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupTagStatus. +func (in *GroupTagStatus) DeepCopy() *GroupTagStatus { + if in == nil { + return nil + } + out := new(GroupTagStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupTagTagInitParameters) DeepCopyInto(out *GroupTagTagInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.PropagateAtLaunch != nil { + in, out := &in.PropagateAtLaunch, &out.PropagateAtLaunch + *out = new(bool) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupTagTagInitParameters. +func (in *GroupTagTagInitParameters) DeepCopy() *GroupTagTagInitParameters { + if in == nil { + return nil + } + out := new(GroupTagTagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupTagTagObservation) DeepCopyInto(out *GroupTagTagObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.PropagateAtLaunch != nil { + in, out := &in.PropagateAtLaunch, &out.PropagateAtLaunch + *out = new(bool) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupTagTagObservation. +func (in *GroupTagTagObservation) DeepCopy() *GroupTagTagObservation { + if in == nil { + return nil + } + out := new(GroupTagTagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupTagTagParameters) DeepCopyInto(out *GroupTagTagParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.PropagateAtLaunch != nil { + in, out := &in.PropagateAtLaunch, &out.PropagateAtLaunch + *out = new(bool) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupTagTagParameters. +func (in *GroupTagTagParameters) DeepCopy() *GroupTagTagParameters { + if in == nil { + return nil + } + out := new(GroupTagTagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitialLifecycleHookInitParameters) DeepCopyInto(out *InitialLifecycleHookInitParameters) { + *out = *in + if in.DefaultResult != nil { + in, out := &in.DefaultResult, &out.DefaultResult + *out = new(string) + **out = **in + } + if in.HeartbeatTimeout != nil { + in, out := &in.HeartbeatTimeout, &out.HeartbeatTimeout + *out = new(float64) + **out = **in + } + if in.LifecycleTransition != nil { + in, out := &in.LifecycleTransition, &out.LifecycleTransition + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NotificationMetadata != nil { + in, out := &in.NotificationMetadata, &out.NotificationMetadata + *out = new(string) + **out = **in + } + if in.NotificationTargetArn != nil { + in, out := &in.NotificationTargetArn, &out.NotificationTargetArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitialLifecycleHookInitParameters. +func (in *InitialLifecycleHookInitParameters) DeepCopy() *InitialLifecycleHookInitParameters { + if in == nil { + return nil + } + out := new(InitialLifecycleHookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitialLifecycleHookObservation) DeepCopyInto(out *InitialLifecycleHookObservation) { + *out = *in + if in.DefaultResult != nil { + in, out := &in.DefaultResult, &out.DefaultResult + *out = new(string) + **out = **in + } + if in.HeartbeatTimeout != nil { + in, out := &in.HeartbeatTimeout, &out.HeartbeatTimeout + *out = new(float64) + **out = **in + } + if in.LifecycleTransition != nil { + in, out := &in.LifecycleTransition, &out.LifecycleTransition + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NotificationMetadata != nil { + in, out := &in.NotificationMetadata, &out.NotificationMetadata + *out = new(string) + **out = **in + } + if in.NotificationTargetArn != nil { + in, out := &in.NotificationTargetArn, &out.NotificationTargetArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitialLifecycleHookObservation. +func (in *InitialLifecycleHookObservation) DeepCopy() *InitialLifecycleHookObservation { + if in == nil { + return nil + } + out := new(InitialLifecycleHookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitialLifecycleHookParameters) DeepCopyInto(out *InitialLifecycleHookParameters) { + *out = *in + if in.DefaultResult != nil { + in, out := &in.DefaultResult, &out.DefaultResult + *out = new(string) + **out = **in + } + if in.HeartbeatTimeout != nil { + in, out := &in.HeartbeatTimeout, &out.HeartbeatTimeout + *out = new(float64) + **out = **in + } + if in.LifecycleTransition != nil { + in, out := &in.LifecycleTransition, &out.LifecycleTransition + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NotificationMetadata != nil { + in, out := &in.NotificationMetadata, &out.NotificationMetadata + *out = new(string) + **out = **in + } + if in.NotificationTargetArn != nil { + in, out := &in.NotificationTargetArn, &out.NotificationTargetArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitialLifecycleHookParameters. +func (in *InitialLifecycleHookParameters) DeepCopy() *InitialLifecycleHookParameters { + if in == nil { + return nil + } + out := new(InitialLifecycleHookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMaintenancePolicyInitParameters) DeepCopyInto(out *InstanceMaintenancePolicyInitParameters) { + *out = *in + if in.MaxHealthyPercentage != nil { + in, out := &in.MaxHealthyPercentage, &out.MaxHealthyPercentage + *out = new(float64) + **out = **in + } + if in.MinHealthyPercentage != nil { + in, out := &in.MinHealthyPercentage, &out.MinHealthyPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMaintenancePolicyInitParameters. +func (in *InstanceMaintenancePolicyInitParameters) DeepCopy() *InstanceMaintenancePolicyInitParameters { + if in == nil { + return nil + } + out := new(InstanceMaintenancePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMaintenancePolicyObservation) DeepCopyInto(out *InstanceMaintenancePolicyObservation) { + *out = *in + if in.MaxHealthyPercentage != nil { + in, out := &in.MaxHealthyPercentage, &out.MaxHealthyPercentage + *out = new(float64) + **out = **in + } + if in.MinHealthyPercentage != nil { + in, out := &in.MinHealthyPercentage, &out.MinHealthyPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMaintenancePolicyObservation. +func (in *InstanceMaintenancePolicyObservation) DeepCopy() *InstanceMaintenancePolicyObservation { + if in == nil { + return nil + } + out := new(InstanceMaintenancePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMaintenancePolicyParameters) DeepCopyInto(out *InstanceMaintenancePolicyParameters) { + *out = *in + if in.MaxHealthyPercentage != nil { + in, out := &in.MaxHealthyPercentage, &out.MaxHealthyPercentage + *out = new(float64) + **out = **in + } + if in.MinHealthyPercentage != nil { + in, out := &in.MinHealthyPercentage, &out.MinHealthyPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMaintenancePolicyParameters. +func (in *InstanceMaintenancePolicyParameters) DeepCopy() *InstanceMaintenancePolicyParameters { + if in == nil { + return nil + } + out := new(InstanceMaintenancePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRefreshInitParameters) DeepCopyInto(out *InstanceRefreshInitParameters) { + *out = *in + if in.Preferences != nil { + in, out := &in.Preferences, &out.Preferences + *out = make([]PreferencesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(string) + **out = **in + } + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRefreshInitParameters. +func (in *InstanceRefreshInitParameters) DeepCopy() *InstanceRefreshInitParameters { + if in == nil { + return nil + } + out := new(InstanceRefreshInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRefreshObservation) DeepCopyInto(out *InstanceRefreshObservation) { + *out = *in + if in.Preferences != nil { + in, out := &in.Preferences, &out.Preferences + *out = make([]PreferencesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(string) + **out = **in + } + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRefreshObservation. +func (in *InstanceRefreshObservation) DeepCopy() *InstanceRefreshObservation { + if in == nil { + return nil + } + out := new(InstanceRefreshObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRefreshParameters) DeepCopyInto(out *InstanceRefreshParameters) { + *out = *in + if in.Preferences != nil { + in, out := &in.Preferences, &out.Preferences + *out = make([]PreferencesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(string) + **out = **in + } + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRefreshParameters. +func (in *InstanceRefreshParameters) DeepCopy() *InstanceRefreshParameters { + if in == nil { + return nil + } + out := new(InstanceRefreshParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsInitParameters) DeepCopyInto(out *InstanceRequirementsInitParameters) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = make([]AcceleratorCountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AcceleratorManufacturers != nil { + in, out := &in.AcceleratorManufacturers, &out.AcceleratorManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorNames != nil { + in, out := &in.AcceleratorNames, &out.AcceleratorNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorTotalMemoryMib != nil { + in, out := &in.AcceleratorTotalMemoryMib, &out.AcceleratorTotalMemoryMib + *out = make([]AcceleratorTotalMemoryMibInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AcceleratorTypes != nil { + in, out := &in.AcceleratorTypes, &out.AcceleratorTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedInstanceTypes != nil { + in, out := &in.AllowedInstanceTypes, &out.AllowedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(string) + **out = **in + } + if in.BaselineEBSBandwidthMbps != nil { + in, out := &in.BaselineEBSBandwidthMbps, &out.BaselineEBSBandwidthMbps + *out = make([]BaselineEBSBandwidthMbpsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BurstablePerformance != nil { + in, out := &in.BurstablePerformance, &out.BurstablePerformance + *out = new(string) + **out = **in + } + if in.CPUManufacturers != nil { + in, out := &in.CPUManufacturers, &out.CPUManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExcludedInstanceTypes != nil { + in, out := &in.ExcludedInstanceTypes, &out.ExcludedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceGenerations != nil { + in, out := &in.InstanceGenerations, &out.InstanceGenerations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LocalStorage != nil { + in, out := &in.LocalStorage, &out.LocalStorage + *out = new(string) + **out = **in + } + if in.LocalStorageTypes != nil { + in, out := &in.LocalStorageTypes, &out.LocalStorageTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MemoryGibPerVcpu != nil { + in, out := &in.MemoryGibPerVcpu, &out.MemoryGibPerVcpu + *out = make([]MemoryGibPerVcpuInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MemoryMib != nil { + in, out := &in.MemoryMib, &out.MemoryMib + *out = make([]MemoryMibInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkBandwidthGbps != nil { + in, out := &in.NetworkBandwidthGbps, &out.NetworkBandwidthGbps + *out = make([]NetworkBandwidthGbpsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkInterfaceCount != nil { + in, out := &in.NetworkInterfaceCount, &out.NetworkInterfaceCount + *out = make([]NetworkInterfaceCountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OnDemandMaxPricePercentageOverLowestPrice != nil { + in, out := &in.OnDemandMaxPricePercentageOverLowestPrice, &out.OnDemandMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.RequireHibernateSupport != nil { + in, out := &in.RequireHibernateSupport, &out.RequireHibernateSupport + *out = new(bool) + **out = **in + } + if in.SpotMaxPricePercentageOverLowestPrice != nil { + in, out := &in.SpotMaxPricePercentageOverLowestPrice, &out.SpotMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.TotalLocalStorageGb != nil { + in, out := &in.TotalLocalStorageGb, &out.TotalLocalStorageGb + *out = make([]TotalLocalStorageGbInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VcpuCount != nil { + in, out := &in.VcpuCount, &out.VcpuCount + *out = make([]VcpuCountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsInitParameters. +func (in *InstanceRequirementsInitParameters) DeepCopy() *InstanceRequirementsInitParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsObservation) DeepCopyInto(out *InstanceRequirementsObservation) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = make([]AcceleratorCountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AcceleratorManufacturers != nil { + in, out := &in.AcceleratorManufacturers, &out.AcceleratorManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorNames != nil { + in, out := &in.AcceleratorNames, &out.AcceleratorNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorTotalMemoryMib != nil { + in, out := &in.AcceleratorTotalMemoryMib, &out.AcceleratorTotalMemoryMib + *out = make([]AcceleratorTotalMemoryMibObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AcceleratorTypes != nil { + in, out := &in.AcceleratorTypes, &out.AcceleratorTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedInstanceTypes != nil { + in, out := &in.AllowedInstanceTypes, &out.AllowedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(string) + **out = **in + } + if in.BaselineEBSBandwidthMbps != nil { + in, out := &in.BaselineEBSBandwidthMbps, &out.BaselineEBSBandwidthMbps + *out = make([]BaselineEBSBandwidthMbpsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BurstablePerformance != nil { + in, out := &in.BurstablePerformance, &out.BurstablePerformance + *out = new(string) + **out = **in + } + if in.CPUManufacturers != nil { + in, out := &in.CPUManufacturers, &out.CPUManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExcludedInstanceTypes != nil { + in, out := &in.ExcludedInstanceTypes, &out.ExcludedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceGenerations != nil { + in, out := &in.InstanceGenerations, &out.InstanceGenerations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LocalStorage != nil { + in, out := &in.LocalStorage, &out.LocalStorage + *out = new(string) + **out = **in + } + if in.LocalStorageTypes != nil { + in, out := &in.LocalStorageTypes, &out.LocalStorageTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MemoryGibPerVcpu != nil { + in, out := &in.MemoryGibPerVcpu, &out.MemoryGibPerVcpu + *out = make([]MemoryGibPerVcpuObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MemoryMib != nil { + in, out := &in.MemoryMib, &out.MemoryMib + *out = make([]MemoryMibObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkBandwidthGbps != nil { + in, out := &in.NetworkBandwidthGbps, &out.NetworkBandwidthGbps + *out = make([]NetworkBandwidthGbpsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkInterfaceCount != nil { + in, out := &in.NetworkInterfaceCount, &out.NetworkInterfaceCount + *out = make([]NetworkInterfaceCountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OnDemandMaxPricePercentageOverLowestPrice != nil { + in, out := &in.OnDemandMaxPricePercentageOverLowestPrice, &out.OnDemandMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.RequireHibernateSupport != nil { + in, out := &in.RequireHibernateSupport, &out.RequireHibernateSupport + *out = new(bool) + **out = **in + } + if in.SpotMaxPricePercentageOverLowestPrice != nil { + in, out := &in.SpotMaxPricePercentageOverLowestPrice, &out.SpotMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.TotalLocalStorageGb != nil { + in, out := &in.TotalLocalStorageGb, &out.TotalLocalStorageGb + *out = make([]TotalLocalStorageGbObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VcpuCount != nil { + in, out := &in.VcpuCount, &out.VcpuCount + *out = make([]VcpuCountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsObservation. +func (in *InstanceRequirementsObservation) DeepCopy() *InstanceRequirementsObservation { + if in == nil { + return nil + } + out := new(InstanceRequirementsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsParameters) DeepCopyInto(out *InstanceRequirementsParameters) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = make([]AcceleratorCountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } if in.AcceleratorManufacturers != nil { in, out := &in.AcceleratorManufacturers, &out.AcceleratorManufacturers *out = make([]*string, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorNames != nil { + in, out := &in.AcceleratorNames, &out.AcceleratorNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorTotalMemoryMib != nil { + in, out := &in.AcceleratorTotalMemoryMib, &out.AcceleratorTotalMemoryMib + *out = make([]AcceleratorTotalMemoryMibParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AcceleratorTypes != nil { + in, out := &in.AcceleratorTypes, &out.AcceleratorTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedInstanceTypes != nil { + in, out := &in.AllowedInstanceTypes, &out.AllowedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(string) + **out = **in + } + if in.BaselineEBSBandwidthMbps != nil { + in, out := &in.BaselineEBSBandwidthMbps, &out.BaselineEBSBandwidthMbps + *out = make([]BaselineEBSBandwidthMbpsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BurstablePerformance != nil { + in, out := &in.BurstablePerformance, &out.BurstablePerformance + *out = new(string) + **out = **in + } + if in.CPUManufacturers != nil { + in, out := &in.CPUManufacturers, &out.CPUManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExcludedInstanceTypes != nil { + in, out := &in.ExcludedInstanceTypes, &out.ExcludedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceGenerations != nil { + in, out := &in.InstanceGenerations, &out.InstanceGenerations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LocalStorage != nil { + in, out := &in.LocalStorage, &out.LocalStorage + *out = new(string) + **out = **in + } + if in.LocalStorageTypes != nil { + in, out := &in.LocalStorageTypes, &out.LocalStorageTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MemoryGibPerVcpu != nil { + in, out := &in.MemoryGibPerVcpu, &out.MemoryGibPerVcpu + *out = make([]MemoryGibPerVcpuParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MemoryMib != nil { + in, out := &in.MemoryMib, &out.MemoryMib + *out = make([]MemoryMibParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkBandwidthGbps != nil { + in, out := &in.NetworkBandwidthGbps, &out.NetworkBandwidthGbps + *out = make([]NetworkBandwidthGbpsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkInterfaceCount != nil { + in, out := &in.NetworkInterfaceCount, &out.NetworkInterfaceCount + *out = make([]NetworkInterfaceCountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OnDemandMaxPricePercentageOverLowestPrice != nil { + in, out := &in.OnDemandMaxPricePercentageOverLowestPrice, &out.OnDemandMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.RequireHibernateSupport != nil { + in, out := &in.RequireHibernateSupport, &out.RequireHibernateSupport + *out = new(bool) + **out = **in + } + if in.SpotMaxPricePercentageOverLowestPrice != nil { + in, out := &in.SpotMaxPricePercentageOverLowestPrice, &out.SpotMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.TotalLocalStorageGb != nil { + in, out := &in.TotalLocalStorageGb, &out.TotalLocalStorageGb + *out = make([]TotalLocalStorageGbParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VcpuCount != nil { + in, out := &in.VcpuCount, &out.VcpuCount + *out = make([]VcpuCountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsParameters. +func (in *InstanceRequirementsParameters) DeepCopy() *InstanceRequirementsParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceReusePolicyInitParameters) DeepCopyInto(out *InstanceReusePolicyInitParameters) { + *out = *in + if in.ReuseOnScaleIn != nil { + in, out := &in.ReuseOnScaleIn, &out.ReuseOnScaleIn + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceReusePolicyInitParameters. +func (in *InstanceReusePolicyInitParameters) DeepCopy() *InstanceReusePolicyInitParameters { + if in == nil { + return nil + } + out := new(InstanceReusePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceReusePolicyObservation) DeepCopyInto(out *InstanceReusePolicyObservation) { + *out = *in + if in.ReuseOnScaleIn != nil { + in, out := &in.ReuseOnScaleIn, &out.ReuseOnScaleIn + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceReusePolicyObservation. +func (in *InstanceReusePolicyObservation) DeepCopy() *InstanceReusePolicyObservation { + if in == nil { + return nil + } + out := new(InstanceReusePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceReusePolicyParameters) DeepCopyInto(out *InstanceReusePolicyParameters) { + *out = *in + if in.ReuseOnScaleIn != nil { + in, out := &in.ReuseOnScaleIn, &out.ReuseOnScaleIn + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceReusePolicyParameters. +func (in *InstanceReusePolicyParameters) DeepCopy() *InstanceReusePolicyParameters { + if in == nil { + return nil + } + out := new(InstanceReusePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancesDistributionInitParameters) DeepCopyInto(out *InstancesDistributionInitParameters) { + *out = *in + if in.OnDemandAllocationStrategy != nil { + in, out := &in.OnDemandAllocationStrategy, &out.OnDemandAllocationStrategy + *out = new(string) + **out = **in + } + if in.OnDemandBaseCapacity != nil { + in, out := &in.OnDemandBaseCapacity, &out.OnDemandBaseCapacity + *out = new(float64) + **out = **in + } + if in.OnDemandPercentageAboveBaseCapacity != nil { + in, out := &in.OnDemandPercentageAboveBaseCapacity, &out.OnDemandPercentageAboveBaseCapacity + *out = new(float64) + **out = **in + } + if in.SpotAllocationStrategy != nil { + in, out := &in.SpotAllocationStrategy, &out.SpotAllocationStrategy + *out = new(string) + **out = **in + } + if in.SpotInstancePools != nil { + in, out := &in.SpotInstancePools, &out.SpotInstancePools + *out = new(float64) + **out = **in + } + if in.SpotMaxPrice != nil { + in, out := &in.SpotMaxPrice, &out.SpotMaxPrice + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesDistributionInitParameters. +func (in *InstancesDistributionInitParameters) DeepCopy() *InstancesDistributionInitParameters { + if in == nil { + return nil + } + out := new(InstancesDistributionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancesDistributionObservation) DeepCopyInto(out *InstancesDistributionObservation) { + *out = *in + if in.OnDemandAllocationStrategy != nil { + in, out := &in.OnDemandAllocationStrategy, &out.OnDemandAllocationStrategy + *out = new(string) + **out = **in + } + if in.OnDemandBaseCapacity != nil { + in, out := &in.OnDemandBaseCapacity, &out.OnDemandBaseCapacity + *out = new(float64) + **out = **in + } + if in.OnDemandPercentageAboveBaseCapacity != nil { + in, out := &in.OnDemandPercentageAboveBaseCapacity, &out.OnDemandPercentageAboveBaseCapacity + *out = new(float64) + **out = **in + } + if in.SpotAllocationStrategy != nil { + in, out := &in.SpotAllocationStrategy, &out.SpotAllocationStrategy + *out = new(string) + **out = **in + } + if in.SpotInstancePools != nil { + in, out := &in.SpotInstancePools, &out.SpotInstancePools + *out = new(float64) + **out = **in + } + if in.SpotMaxPrice != nil { + in, out := &in.SpotMaxPrice, &out.SpotMaxPrice + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesDistributionObservation. +func (in *InstancesDistributionObservation) DeepCopy() *InstancesDistributionObservation { + if in == nil { + return nil + } + out := new(InstancesDistributionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancesDistributionParameters) DeepCopyInto(out *InstancesDistributionParameters) { + *out = *in + if in.OnDemandAllocationStrategy != nil { + in, out := &in.OnDemandAllocationStrategy, &out.OnDemandAllocationStrategy + *out = new(string) + **out = **in + } + if in.OnDemandBaseCapacity != nil { + in, out := &in.OnDemandBaseCapacity, &out.OnDemandBaseCapacity + *out = new(float64) + **out = **in + } + if in.OnDemandPercentageAboveBaseCapacity != nil { + in, out := &in.OnDemandPercentageAboveBaseCapacity, &out.OnDemandPercentageAboveBaseCapacity + *out = new(float64) + **out = **in + } + if in.SpotAllocationStrategy != nil { + in, out := &in.SpotAllocationStrategy, &out.SpotAllocationStrategy + *out = new(string) + **out = **in + } + if in.SpotInstancePools != nil { + in, out := &in.SpotInstancePools, &out.SpotInstancePools + *out = new(float64) + **out = **in + } + if in.SpotMaxPrice != nil { + in, out := &in.SpotMaxPrice, &out.SpotMaxPrice + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesDistributionParameters. +func (in *InstancesDistributionParameters) DeepCopy() *InstancesDistributionParameters { + if in == nil { + return nil + } + out := new(InstancesDistributionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchConfiguration) DeepCopyInto(out *LaunchConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchConfiguration. +func (in *LaunchConfiguration) DeepCopy() *LaunchConfiguration { + if in == nil { + return nil + } + out := new(LaunchConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LaunchConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchConfigurationInitParameters) DeepCopyInto(out *LaunchConfigurationInitParameters) { + *out = *in + if in.AssociatePublicIPAddress != nil { + in, out := &in.AssociatePublicIPAddress, &out.AssociatePublicIPAddress + *out = new(bool) + **out = **in + } + if in.EBSBlockDevice != nil { + in, out := &in.EBSBlockDevice, &out.EBSBlockDevice + *out = make([]EBSBlockDeviceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EBSOptimized != nil { + in, out := &in.EBSOptimized, &out.EBSOptimized + *out = new(bool) + **out = **in + } + if in.EnableMonitoring != nil { + in, out := &in.EnableMonitoring, &out.EnableMonitoring + *out = new(bool) + **out = **in + } + if in.EphemeralBlockDevice != nil { + in, out := &in.EphemeralBlockDevice, &out.EphemeralBlockDevice + *out = make([]EphemeralBlockDeviceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(string) + **out = **in + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = new(MetadataOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PlacementTenancy != nil { + in, out := &in.PlacementTenancy, &out.PlacementTenancy + *out = new(string) + **out = **in + } + if in.RootBlockDevice != nil { + in, out := &in.RootBlockDevice, &out.RootBlockDevice + *out = new(RootBlockDeviceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SpotPrice != nil { + in, out := &in.SpotPrice, &out.SpotPrice + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.UserDataBase64 != nil { + in, out := &in.UserDataBase64, &out.UserDataBase64 + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchConfigurationInitParameters. +func (in *LaunchConfigurationInitParameters) DeepCopy() *LaunchConfigurationInitParameters { + if in == nil { + return nil + } + out := new(LaunchConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchConfigurationList) DeepCopyInto(out *LaunchConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LaunchConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchConfigurationList. +func (in *LaunchConfigurationList) DeepCopy() *LaunchConfigurationList { + if in == nil { + return nil + } + out := new(LaunchConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LaunchConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchConfigurationObservation) DeepCopyInto(out *LaunchConfigurationObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AssociatePublicIPAddress != nil { + in, out := &in.AssociatePublicIPAddress, &out.AssociatePublicIPAddress + *out = new(bool) + **out = **in + } + if in.EBSBlockDevice != nil { + in, out := &in.EBSBlockDevice, &out.EBSBlockDevice + *out = make([]EBSBlockDeviceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EBSOptimized != nil { + in, out := &in.EBSOptimized, &out.EBSOptimized + *out = new(bool) + **out = **in + } + if in.EnableMonitoring != nil { + in, out := &in.EnableMonitoring, &out.EnableMonitoring + *out = new(bool) + **out = **in + } + if in.EphemeralBlockDevice != nil { + in, out := &in.EphemeralBlockDevice, &out.EphemeralBlockDevice + *out = make([]EphemeralBlockDeviceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = new(MetadataOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.PlacementTenancy != nil { + in, out := &in.PlacementTenancy, &out.PlacementTenancy + *out = new(string) + **out = **in + } + if in.RootBlockDevice != nil { + in, out := &in.RootBlockDevice, &out.RootBlockDevice + *out = new(RootBlockDeviceObservation) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SpotPrice != nil { + in, out := &in.SpotPrice, &out.SpotPrice + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.UserDataBase64 != nil { + in, out := &in.UserDataBase64, &out.UserDataBase64 + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchConfigurationObservation. +func (in *LaunchConfigurationObservation) DeepCopy() *LaunchConfigurationObservation { + if in == nil { + return nil + } + out := new(LaunchConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchConfigurationParameters) DeepCopyInto(out *LaunchConfigurationParameters) { + *out = *in + if in.AssociatePublicIPAddress != nil { + in, out := &in.AssociatePublicIPAddress, &out.AssociatePublicIPAddress + *out = new(bool) + **out = **in + } + if in.EBSBlockDevice != nil { + in, out := &in.EBSBlockDevice, &out.EBSBlockDevice + *out = make([]EBSBlockDeviceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EBSOptimized != nil { + in, out := &in.EBSOptimized, &out.EBSOptimized + *out = new(bool) + **out = **in + } + if in.EnableMonitoring != nil { + in, out := &in.EnableMonitoring, &out.EnableMonitoring + *out = new(bool) + **out = **in + } + if in.EphemeralBlockDevice != nil { + in, out := &in.EphemeralBlockDevice, &out.EphemeralBlockDevice + *out = make([]EphemeralBlockDeviceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(string) + **out = **in + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = new(MetadataOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.PlacementTenancy != nil { + in, out := &in.PlacementTenancy, &out.PlacementTenancy + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RootBlockDevice != nil { + in, out := &in.RootBlockDevice, &out.RootBlockDevice + *out = new(RootBlockDeviceParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SpotPrice != nil { + in, out := &in.SpotPrice, &out.SpotPrice + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.UserDataBase64 != nil { + in, out := &in.UserDataBase64, &out.UserDataBase64 + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchConfigurationParameters. +func (in *LaunchConfigurationParameters) DeepCopy() *LaunchConfigurationParameters { + if in == nil { + return nil + } + out := new(LaunchConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchConfigurationSpec) DeepCopyInto(out *LaunchConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchConfigurationSpec. +func (in *LaunchConfigurationSpec) DeepCopy() *LaunchConfigurationSpec { + if in == nil { + return nil + } + out := new(LaunchConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchConfigurationStatus) DeepCopyInto(out *LaunchConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchConfigurationStatus. +func (in *LaunchConfigurationStatus) DeepCopy() *LaunchConfigurationStatus { + if in == nil { + return nil + } + out := new(LaunchConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateInitParameters) DeepCopyInto(out *LaunchTemplateInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateInitParameters. +func (in *LaunchTemplateInitParameters) DeepCopy() *LaunchTemplateInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateObservation) DeepCopyInto(out *LaunchTemplateObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateObservation. +func (in *LaunchTemplateObservation) DeepCopy() *LaunchTemplateObservation { + if in == nil { + return nil + } + out := new(LaunchTemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateParameters) DeepCopyInto(out *LaunchTemplateParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateParameters. +func (in *LaunchTemplateParameters) DeepCopy() *LaunchTemplateParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateSpecificationInitParameters) DeepCopyInto(out *LaunchTemplateSpecificationInitParameters) { + *out = *in + if in.LaunchTemplateID != nil { + in, out := &in.LaunchTemplateID, &out.LaunchTemplateID + *out = new(string) + **out = **in + } + if in.LaunchTemplateIDRef != nil { + in, out := &in.LaunchTemplateIDRef, &out.LaunchTemplateIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LaunchTemplateIDSelector != nil { + in, out := &in.LaunchTemplateIDSelector, &out.LaunchTemplateIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LaunchTemplateName != nil { + in, out := &in.LaunchTemplateName, &out.LaunchTemplateName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateSpecificationInitParameters. +func (in *LaunchTemplateSpecificationInitParameters) DeepCopy() *LaunchTemplateSpecificationInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateSpecificationObservation) DeepCopyInto(out *LaunchTemplateSpecificationObservation) { + *out = *in + if in.LaunchTemplateID != nil { + in, out := &in.LaunchTemplateID, &out.LaunchTemplateID + *out = new(string) + **out = **in + } + if in.LaunchTemplateName != nil { + in, out := &in.LaunchTemplateName, &out.LaunchTemplateName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateSpecificationObservation. +func (in *LaunchTemplateSpecificationObservation) DeepCopy() *LaunchTemplateSpecificationObservation { + if in == nil { + return nil + } + out := new(LaunchTemplateSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateSpecificationParameters) DeepCopyInto(out *LaunchTemplateSpecificationParameters) { + *out = *in + if in.LaunchTemplateID != nil { + in, out := &in.LaunchTemplateID, &out.LaunchTemplateID + *out = new(string) + **out = **in + } + if in.LaunchTemplateIDRef != nil { + in, out := &in.LaunchTemplateIDRef, &out.LaunchTemplateIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LaunchTemplateIDSelector != nil { + in, out := &in.LaunchTemplateIDSelector, &out.LaunchTemplateIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LaunchTemplateName != nil { + in, out := &in.LaunchTemplateName, &out.LaunchTemplateName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateSpecificationParameters. +func (in *LaunchTemplateSpecificationParameters) DeepCopy() *LaunchTemplateSpecificationParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryGibPerVcpuInitParameters) DeepCopyInto(out *MemoryGibPerVcpuInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryGibPerVcpuInitParameters. +func (in *MemoryGibPerVcpuInitParameters) DeepCopy() *MemoryGibPerVcpuInitParameters { + if in == nil { + return nil + } + out := new(MemoryGibPerVcpuInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryGibPerVcpuObservation) DeepCopyInto(out *MemoryGibPerVcpuObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryGibPerVcpuObservation. +func (in *MemoryGibPerVcpuObservation) DeepCopy() *MemoryGibPerVcpuObservation { + if in == nil { + return nil + } + out := new(MemoryGibPerVcpuObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryGibPerVcpuParameters) DeepCopyInto(out *MemoryGibPerVcpuParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryGibPerVcpuParameters. +func (in *MemoryGibPerVcpuParameters) DeepCopy() *MemoryGibPerVcpuParameters { + if in == nil { + return nil + } + out := new(MemoryGibPerVcpuParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryMibInitParameters) DeepCopyInto(out *MemoryMibInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryMibInitParameters. +func (in *MemoryMibInitParameters) DeepCopy() *MemoryMibInitParameters { + if in == nil { + return nil + } + out := new(MemoryMibInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryMibObservation) DeepCopyInto(out *MemoryMibObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryMibObservation. +func (in *MemoryMibObservation) DeepCopy() *MemoryMibObservation { + if in == nil { + return nil + } + out := new(MemoryMibObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryMibParameters) DeepCopyInto(out *MemoryMibParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryMibParameters. +func (in *MemoryMibParameters) DeepCopy() *MemoryMibParameters { + if in == nil { + return nil + } + out := new(MemoryMibParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataOptionsInitParameters) DeepCopyInto(out *MetadataOptionsInitParameters) { + *out = *in + if in.HTTPEndpoint != nil { + in, out := &in.HTTPEndpoint, &out.HTTPEndpoint + *out = new(string) + **out = **in + } + if in.HTTPPutResponseHopLimit != nil { + in, out := &in.HTTPPutResponseHopLimit, &out.HTTPPutResponseHopLimit + *out = new(float64) + **out = **in + } + if in.HTTPTokens != nil { + in, out := &in.HTTPTokens, &out.HTTPTokens + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataOptionsInitParameters. +func (in *MetadataOptionsInitParameters) DeepCopy() *MetadataOptionsInitParameters { + if in == nil { + return nil + } + out := new(MetadataOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataOptionsObservation) DeepCopyInto(out *MetadataOptionsObservation) { + *out = *in + if in.HTTPEndpoint != nil { + in, out := &in.HTTPEndpoint, &out.HTTPEndpoint + *out = new(string) + **out = **in + } + if in.HTTPPutResponseHopLimit != nil { + in, out := &in.HTTPPutResponseHopLimit, &out.HTTPPutResponseHopLimit + *out = new(float64) + **out = **in + } + if in.HTTPTokens != nil { + in, out := &in.HTTPTokens, &out.HTTPTokens + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataOptionsObservation. +func (in *MetadataOptionsObservation) DeepCopy() *MetadataOptionsObservation { + if in == nil { + return nil + } + out := new(MetadataOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataOptionsParameters) DeepCopyInto(out *MetadataOptionsParameters) { + *out = *in + if in.HTTPEndpoint != nil { + in, out := &in.HTTPEndpoint, &out.HTTPEndpoint + *out = new(string) + **out = **in + } + if in.HTTPPutResponseHopLimit != nil { + in, out := &in.HTTPPutResponseHopLimit, &out.HTTPPutResponseHopLimit + *out = new(float64) + **out = **in + } + if in.HTTPTokens != nil { + in, out := &in.HTTPTokens, &out.HTTPTokens + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataOptionsParameters. +func (in *MetadataOptionsParameters) DeepCopy() *MetadataOptionsParameters { + if in == nil { + return nil + } + out := new(MetadataOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDataQueriesInitParameters) DeepCopyInto(out *MetricDataQueriesInitParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.MetricStat != nil { + in, out := &in.MetricStat, &out.MetricStat + *out = new(MetricStatInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDataQueriesInitParameters. +func (in *MetricDataQueriesInitParameters) DeepCopy() *MetricDataQueriesInitParameters { + if in == nil { + return nil + } + out := new(MetricDataQueriesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDataQueriesMetricStatInitParameters) DeepCopyInto(out *MetricDataQueriesMetricStatInitParameters) { + *out = *in + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricStatMetricInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Stat != nil { + in, out := &in.Stat, &out.Stat + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDataQueriesMetricStatInitParameters. +func (in *MetricDataQueriesMetricStatInitParameters) DeepCopy() *MetricDataQueriesMetricStatInitParameters { + if in == nil { + return nil + } + out := new(MetricDataQueriesMetricStatInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDataQueriesMetricStatMetricInitParameters) DeepCopyInto(out *MetricDataQueriesMetricStatMetricInitParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]MetricStatMetricDimensionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDataQueriesMetricStatMetricInitParameters. +func (in *MetricDataQueriesMetricStatMetricInitParameters) DeepCopy() *MetricDataQueriesMetricStatMetricInitParameters { + if in == nil { + return nil + } + out := new(MetricDataQueriesMetricStatMetricInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDataQueriesMetricStatMetricObservation) DeepCopyInto(out *MetricDataQueriesMetricStatMetricObservation) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]MetricStatMetricDimensionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDataQueriesMetricStatMetricObservation. +func (in *MetricDataQueriesMetricStatMetricObservation) DeepCopy() *MetricDataQueriesMetricStatMetricObservation { + if in == nil { + return nil + } + out := new(MetricDataQueriesMetricStatMetricObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDataQueriesMetricStatMetricParameters) DeepCopyInto(out *MetricDataQueriesMetricStatMetricParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]MetricStatMetricDimensionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDataQueriesMetricStatMetricParameters. +func (in *MetricDataQueriesMetricStatMetricParameters) DeepCopy() *MetricDataQueriesMetricStatMetricParameters { + if in == nil { + return nil + } + out := new(MetricDataQueriesMetricStatMetricParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDataQueriesMetricStatObservation) DeepCopyInto(out *MetricDataQueriesMetricStatObservation) { + *out = *in + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricStatMetricObservation) + (*in).DeepCopyInto(*out) + } + if in.Stat != nil { + in, out := &in.Stat, &out.Stat + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDataQueriesMetricStatObservation. +func (in *MetricDataQueriesMetricStatObservation) DeepCopy() *MetricDataQueriesMetricStatObservation { + if in == nil { + return nil + } + out := new(MetricDataQueriesMetricStatObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDataQueriesMetricStatParameters) DeepCopyInto(out *MetricDataQueriesMetricStatParameters) { + *out = *in + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricStatMetricParameters) + (*in).DeepCopyInto(*out) + } + if in.Stat != nil { + in, out := &in.Stat, &out.Stat + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDataQueriesMetricStatParameters. +func (in *MetricDataQueriesMetricStatParameters) DeepCopy() *MetricDataQueriesMetricStatParameters { + if in == nil { + return nil + } + out := new(MetricDataQueriesMetricStatParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDataQueriesObservation) DeepCopyInto(out *MetricDataQueriesObservation) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.MetricStat != nil { + in, out := &in.MetricStat, &out.MetricStat + *out = new(MetricStatObservation) + (*in).DeepCopyInto(*out) + } + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDataQueriesObservation. +func (in *MetricDataQueriesObservation) DeepCopy() *MetricDataQueriesObservation { + if in == nil { + return nil + } + out := new(MetricDataQueriesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDataQueriesParameters) DeepCopyInto(out *MetricDataQueriesParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.MetricStat != nil { + in, out := &in.MetricStat, &out.MetricStat + *out = new(MetricStatParameters) + (*in).DeepCopyInto(*out) + } + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDataQueriesParameters. +func (in *MetricDataQueriesParameters) DeepCopy() *MetricDataQueriesParameters { + if in == nil { + return nil + } + out := new(MetricDataQueriesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDimensionInitParameters) DeepCopyInto(out *MetricDimensionInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDimensionInitParameters. +func (in *MetricDimensionInitParameters) DeepCopy() *MetricDimensionInitParameters { + if in == nil { + return nil + } + out := new(MetricDimensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDimensionObservation) DeepCopyInto(out *MetricDimensionObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDimensionObservation. +func (in *MetricDimensionObservation) DeepCopy() *MetricDimensionObservation { + if in == nil { + return nil + } + out := new(MetricDimensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDimensionParameters) DeepCopyInto(out *MetricDimensionParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDimensionParameters. +func (in *MetricDimensionParameters) DeepCopy() *MetricDimensionParameters { + if in == nil { + return nil + } + out := new(MetricDimensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDimensionsInitParameters) DeepCopyInto(out *MetricDimensionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDimensionsInitParameters. +func (in *MetricDimensionsInitParameters) DeepCopy() *MetricDimensionsInitParameters { + if in == nil { + return nil + } + out := new(MetricDimensionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDimensionsObservation) DeepCopyInto(out *MetricDimensionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDimensionsObservation. +func (in *MetricDimensionsObservation) DeepCopy() *MetricDimensionsObservation { + if in == nil { + return nil + } + out := new(MetricDimensionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricDimensionsParameters) DeepCopyInto(out *MetricDimensionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricDimensionsParameters. +func (in *MetricDimensionsParameters) DeepCopy() *MetricDimensionsParameters { + if in == nil { + return nil + } + out := new(MetricDimensionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricInitParameters) DeepCopyInto(out *MetricInitParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]DimensionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricInitParameters. +func (in *MetricInitParameters) DeepCopy() *MetricInitParameters { + if in == nil { + return nil + } + out := new(MetricInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricObservation) DeepCopyInto(out *MetricObservation) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]DimensionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricObservation. +func (in *MetricObservation) DeepCopy() *MetricObservation { + if in == nil { + return nil + } + out := new(MetricObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricParameters) DeepCopyInto(out *MetricParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]DimensionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.AcceleratorNames != nil { - in, out := &in.AcceleratorNames, &out.AcceleratorNames - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricParameters. +func (in *MetricParameters) DeepCopy() *MetricParameters { + if in == nil { + return nil + } + out := new(MetricParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSpecificationInitParameters) DeepCopyInto(out *MetricSpecificationInitParameters) { + *out = *in + if in.CustomizedCapacityMetricSpecification != nil { + in, out := &in.CustomizedCapacityMetricSpecification, &out.CustomizedCapacityMetricSpecification + *out = new(CustomizedCapacityMetricSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomizedLoadMetricSpecification != nil { + in, out := &in.CustomizedLoadMetricSpecification, &out.CustomizedLoadMetricSpecification + *out = new(CustomizedLoadMetricSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomizedScalingMetricSpecification != nil { + in, out := &in.CustomizedScalingMetricSpecification, &out.CustomizedScalingMetricSpecification + *out = new(CustomizedScalingMetricSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PredefinedLoadMetricSpecification != nil { + in, out := &in.PredefinedLoadMetricSpecification, &out.PredefinedLoadMetricSpecification + *out = new(PredefinedLoadMetricSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PredefinedMetricPairSpecification != nil { + in, out := &in.PredefinedMetricPairSpecification, &out.PredefinedMetricPairSpecification + *out = new(PredefinedMetricPairSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PredefinedScalingMetricSpecification != nil { + in, out := &in.PredefinedScalingMetricSpecification, &out.PredefinedScalingMetricSpecification + *out = new(PredefinedScalingMetricSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetValue != nil { + in, out := &in.TargetValue, &out.TargetValue + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpecificationInitParameters. +func (in *MetricSpecificationInitParameters) DeepCopy() *MetricSpecificationInitParameters { + if in == nil { + return nil + } + out := new(MetricSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSpecificationObservation) DeepCopyInto(out *MetricSpecificationObservation) { + *out = *in + if in.CustomizedCapacityMetricSpecification != nil { + in, out := &in.CustomizedCapacityMetricSpecification, &out.CustomizedCapacityMetricSpecification + *out = new(CustomizedCapacityMetricSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomizedLoadMetricSpecification != nil { + in, out := &in.CustomizedLoadMetricSpecification, &out.CustomizedLoadMetricSpecification + *out = new(CustomizedLoadMetricSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomizedScalingMetricSpecification != nil { + in, out := &in.CustomizedScalingMetricSpecification, &out.CustomizedScalingMetricSpecification + *out = new(CustomizedScalingMetricSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.PredefinedLoadMetricSpecification != nil { + in, out := &in.PredefinedLoadMetricSpecification, &out.PredefinedLoadMetricSpecification + *out = new(PredefinedLoadMetricSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.PredefinedMetricPairSpecification != nil { + in, out := &in.PredefinedMetricPairSpecification, &out.PredefinedMetricPairSpecification + *out = new(PredefinedMetricPairSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.PredefinedScalingMetricSpecification != nil { + in, out := &in.PredefinedScalingMetricSpecification, &out.PredefinedScalingMetricSpecification + *out = new(PredefinedScalingMetricSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetValue != nil { + in, out := &in.TargetValue, &out.TargetValue + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpecificationObservation. +func (in *MetricSpecificationObservation) DeepCopy() *MetricSpecificationObservation { + if in == nil { + return nil + } + out := new(MetricSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSpecificationParameters) DeepCopyInto(out *MetricSpecificationParameters) { + *out = *in + if in.CustomizedCapacityMetricSpecification != nil { + in, out := &in.CustomizedCapacityMetricSpecification, &out.CustomizedCapacityMetricSpecification + *out = new(CustomizedCapacityMetricSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomizedLoadMetricSpecification != nil { + in, out := &in.CustomizedLoadMetricSpecification, &out.CustomizedLoadMetricSpecification + *out = new(CustomizedLoadMetricSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomizedScalingMetricSpecification != nil { + in, out := &in.CustomizedScalingMetricSpecification, &out.CustomizedScalingMetricSpecification + *out = new(CustomizedScalingMetricSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.PredefinedLoadMetricSpecification != nil { + in, out := &in.PredefinedLoadMetricSpecification, &out.PredefinedLoadMetricSpecification + *out = new(PredefinedLoadMetricSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.PredefinedMetricPairSpecification != nil { + in, out := &in.PredefinedMetricPairSpecification, &out.PredefinedMetricPairSpecification + *out = new(PredefinedMetricPairSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.PredefinedScalingMetricSpecification != nil { + in, out := &in.PredefinedScalingMetricSpecification, &out.PredefinedScalingMetricSpecification + *out = new(PredefinedScalingMetricSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetValue != nil { + in, out := &in.TargetValue, &out.TargetValue + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpecificationParameters. +func (in *MetricSpecificationParameters) DeepCopy() *MetricSpecificationParameters { + if in == nil { + return nil + } + out := new(MetricSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatInitParameters) DeepCopyInto(out *MetricStatInitParameters) { + *out = *in + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Stat != nil { + in, out := &in.Stat, &out.Stat + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatInitParameters. +func (in *MetricStatInitParameters) DeepCopy() *MetricStatInitParameters { + if in == nil { + return nil + } + out := new(MetricStatInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatMetricDimensionsInitParameters) DeepCopyInto(out *MetricStatMetricDimensionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in } - if in.AcceleratorTotalMemoryMib != nil { - in, out := &in.AcceleratorTotalMemoryMib, &out.AcceleratorTotalMemoryMib - *out = make([]AcceleratorTotalMemoryMibObservation, len(*in)) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatMetricDimensionsInitParameters. +func (in *MetricStatMetricDimensionsInitParameters) DeepCopy() *MetricStatMetricDimensionsInitParameters { + if in == nil { + return nil + } + out := new(MetricStatMetricDimensionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatMetricDimensionsObservation) DeepCopyInto(out *MetricStatMetricDimensionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatMetricDimensionsObservation. +func (in *MetricStatMetricDimensionsObservation) DeepCopy() *MetricStatMetricDimensionsObservation { + if in == nil { + return nil + } + out := new(MetricStatMetricDimensionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatMetricDimensionsParameters) DeepCopyInto(out *MetricStatMetricDimensionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatMetricDimensionsParameters. +func (in *MetricStatMetricDimensionsParameters) DeepCopy() *MetricStatMetricDimensionsParameters { + if in == nil { + return nil + } + out := new(MetricStatMetricDimensionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatMetricInitParameters) DeepCopyInto(out *MetricStatMetricInitParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]MetricDimensionsInitParameters, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.AcceleratorTypes != nil { - in, out := &in.AcceleratorTypes, &out.AcceleratorTypes - *out = make([]*string, len(*in)) + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatMetricInitParameters. +func (in *MetricStatMetricInitParameters) DeepCopy() *MetricStatMetricInitParameters { + if in == nil { + return nil + } + out := new(MetricStatMetricInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatMetricObservation) DeepCopyInto(out *MetricStatMetricObservation) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]MetricDimensionsObservation, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.AllowedInstanceTypes != nil { - in, out := &in.AllowedInstanceTypes, &out.AllowedInstanceTypes - *out = make([]*string, len(*in)) + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatMetricObservation. +func (in *MetricStatMetricObservation) DeepCopy() *MetricStatMetricObservation { + if in == nil { + return nil + } + out := new(MetricStatMetricObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatMetricParameters) DeepCopyInto(out *MetricStatMetricParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]MetricDimensionsParameters, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.BareMetal != nil { - in, out := &in.BareMetal, &out.BareMetal + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatMetricParameters. +func (in *MetricStatMetricParameters) DeepCopy() *MetricStatMetricParameters { + if in == nil { + return nil + } + out := new(MetricStatMetricParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatObservation) DeepCopyInto(out *MetricStatObservation) { + *out = *in + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricObservation) + (*in).DeepCopyInto(*out) + } + if in.Stat != nil { + in, out := &in.Stat, &out.Stat + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatObservation. +func (in *MetricStatObservation) DeepCopy() *MetricStatObservation { + if in == nil { + return nil + } + out := new(MetricStatObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatParameters) DeepCopyInto(out *MetricStatParameters) { + *out = *in + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricParameters) + (*in).DeepCopyInto(*out) + } + if in.Stat != nil { + in, out := &in.Stat, &out.Stat + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatParameters. +func (in *MetricStatParameters) DeepCopy() *MetricStatParameters { + if in == nil { + return nil + } + out := new(MetricStatParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsInitParameters) DeepCopyInto(out *MetricsInitParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.MetricStat != nil { + in, out := &in.MetricStat, &out.MetricStat + *out = new(MetricsMetricStatInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsInitParameters. +func (in *MetricsInitParameters) DeepCopy() *MetricsInitParameters { + if in == nil { + return nil + } + out := new(MetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsMetricStatInitParameters) DeepCopyInto(out *MetricsMetricStatInitParameters) { + *out = *in + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricsMetricStatMetricInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Stat != nil { + in, out := &in.Stat, &out.Stat + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsMetricStatInitParameters. +func (in *MetricsMetricStatInitParameters) DeepCopy() *MetricsMetricStatInitParameters { + if in == nil { + return nil + } + out := new(MetricsMetricStatInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsMetricStatMetricDimensionsInitParameters) DeepCopyInto(out *MetricsMetricStatMetricDimensionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsMetricStatMetricDimensionsInitParameters. +func (in *MetricsMetricStatMetricDimensionsInitParameters) DeepCopy() *MetricsMetricStatMetricDimensionsInitParameters { + if in == nil { + return nil + } + out := new(MetricsMetricStatMetricDimensionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsMetricStatMetricDimensionsObservation) DeepCopyInto(out *MetricsMetricStatMetricDimensionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value *out = new(string) **out = **in } - if in.BaselineEBSBandwidthMbps != nil { - in, out := &in.BaselineEBSBandwidthMbps, &out.BaselineEBSBandwidthMbps - *out = make([]BaselineEBSBandwidthMbpsObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsMetricStatMetricDimensionsObservation. +func (in *MetricsMetricStatMetricDimensionsObservation) DeepCopy() *MetricsMetricStatMetricDimensionsObservation { + if in == nil { + return nil } - if in.BurstablePerformance != nil { - in, out := &in.BurstablePerformance, &out.BurstablePerformance + out := new(MetricsMetricStatMetricDimensionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsMetricStatMetricDimensionsParameters) DeepCopyInto(out *MetricsMetricStatMetricDimensionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name *out = new(string) **out = **in } - if in.CPUManufacturers != nil { - in, out := &in.CPUManufacturers, &out.CPUManufacturers - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in } - if in.ExcludedInstanceTypes != nil { - in, out := &in.ExcludedInstanceTypes, &out.ExcludedInstanceTypes - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsMetricStatMetricDimensionsParameters. +func (in *MetricsMetricStatMetricDimensionsParameters) DeepCopy() *MetricsMetricStatMetricDimensionsParameters { + if in == nil { + return nil } - if in.InstanceGenerations != nil { - in, out := &in.InstanceGenerations, &out.InstanceGenerations - *out = make([]*string, len(*in)) + out := new(MetricsMetricStatMetricDimensionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsMetricStatMetricInitParameters) DeepCopyInto(out *MetricsMetricStatMetricInitParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]MetricsMetricStatMetricDimensionsInitParameters, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.LocalStorage != nil { - in, out := &in.LocalStorage, &out.LocalStorage + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName *out = new(string) **out = **in } - if in.LocalStorageTypes != nil { - in, out := &in.LocalStorageTypes, &out.LocalStorageTypes - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in } - if in.MemoryGibPerVcpu != nil { - in, out := &in.MemoryGibPerVcpu, &out.MemoryGibPerVcpu - *out = make([]MemoryGibPerVcpuObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsMetricStatMetricInitParameters. +func (in *MetricsMetricStatMetricInitParameters) DeepCopy() *MetricsMetricStatMetricInitParameters { + if in == nil { + return nil } - if in.MemoryMib != nil { - in, out := &in.MemoryMib, &out.MemoryMib - *out = make([]MemoryMibObservation, len(*in)) + out := new(MetricsMetricStatMetricInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsMetricStatMetricObservation) DeepCopyInto(out *MetricsMetricStatMetricObservation) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]MetricsMetricStatMetricDimensionsObservation, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.NetworkBandwidthGbps != nil { - in, out := &in.NetworkBandwidthGbps, &out.NetworkBandwidthGbps - *out = make([]NetworkBandwidthGbpsObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in } - if in.NetworkInterfaceCount != nil { - in, out := &in.NetworkInterfaceCount, &out.NetworkInterfaceCount - *out = make([]NetworkInterfaceCountObservation, len(*in)) + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsMetricStatMetricObservation. +func (in *MetricsMetricStatMetricObservation) DeepCopy() *MetricsMetricStatMetricObservation { + if in == nil { + return nil + } + out := new(MetricsMetricStatMetricObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsMetricStatMetricParameters) DeepCopyInto(out *MetricsMetricStatMetricParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]MetricsMetricStatMetricDimensionsParameters, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.OnDemandMaxPricePercentageOverLowestPrice != nil { - in, out := &in.OnDemandMaxPricePercentageOverLowestPrice, &out.OnDemandMaxPricePercentageOverLowestPrice - *out = new(float64) + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) **out = **in } - if in.RequireHibernateSupport != nil { - in, out := &in.RequireHibernateSupport, &out.RequireHibernateSupport - *out = new(bool) + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) **out = **in } - if in.SpotMaxPricePercentageOverLowestPrice != nil { - in, out := &in.SpotMaxPricePercentageOverLowestPrice, &out.SpotMaxPricePercentageOverLowestPrice - *out = new(float64) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsMetricStatMetricParameters. +func (in *MetricsMetricStatMetricParameters) DeepCopy() *MetricsMetricStatMetricParameters { + if in == nil { + return nil + } + out := new(MetricsMetricStatMetricParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsMetricStatObservation) DeepCopyInto(out *MetricsMetricStatObservation) { + *out = *in + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricsMetricStatMetricObservation) + (*in).DeepCopyInto(*out) + } + if in.Stat != nil { + in, out := &in.Stat, &out.Stat + *out = new(string) **out = **in } - if in.TotalLocalStorageGb != nil { - in, out := &in.TotalLocalStorageGb, &out.TotalLocalStorageGb - *out = make([]TotalLocalStorageGbObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in } - if in.VcpuCount != nil { - in, out := &in.VcpuCount, &out.VcpuCount - *out = make([]VcpuCountObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsMetricStatObservation. +func (in *MetricsMetricStatObservation) DeepCopy() *MetricsMetricStatObservation { + if in == nil { + return nil } + out := new(MetricsMetricStatObservation) + in.DeepCopyInto(out) + return out } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsObservation. -func (in *InstanceRequirementsObservation) DeepCopy() *InstanceRequirementsObservation { +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsMetricStatParameters) DeepCopyInto(out *MetricsMetricStatParameters) { + *out = *in + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricsMetricStatMetricParameters) + (*in).DeepCopyInto(*out) + } + if in.Stat != nil { + in, out := &in.Stat, &out.Stat + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsMetricStatParameters. +func (in *MetricsMetricStatParameters) DeepCopy() *MetricsMetricStatParameters { if in == nil { return nil } - out := new(InstanceRequirementsObservation) + out := new(MetricsMetricStatParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstanceRequirementsParameters) DeepCopyInto(out *InstanceRequirementsParameters) { +func (in *MetricsObservation) DeepCopyInto(out *MetricsObservation) { *out = *in - if in.AcceleratorCount != nil { - in, out := &in.AcceleratorCount, &out.AcceleratorCount - *out = make([]AcceleratorCountParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in } - if in.AcceleratorManufacturers != nil { - in, out := &in.AcceleratorManufacturers, &out.AcceleratorManufacturers - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in } - if in.AcceleratorNames != nil { - in, out := &in.AcceleratorNames, &out.AcceleratorNames - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in } - if in.AcceleratorTotalMemoryMib != nil { - in, out := &in.AcceleratorTotalMemoryMib, &out.AcceleratorTotalMemoryMib - *out = make([]AcceleratorTotalMemoryMibParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.MetricStat != nil { + in, out := &in.MetricStat, &out.MetricStat + *out = new(MetricsMetricStatObservation) + (*in).DeepCopyInto(*out) } - if in.AcceleratorTypes != nil { - in, out := &in.AcceleratorTypes, &out.AcceleratorTypes - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) + **out = **in } - if in.AllowedInstanceTypes != nil { - in, out := &in.AllowedInstanceTypes, &out.AllowedInstanceTypes - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsObservation. +func (in *MetricsObservation) DeepCopy() *MetricsObservation { + if in == nil { + return nil } - if in.BareMetal != nil { - in, out := &in.BareMetal, &out.BareMetal + out := new(MetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsParameters) DeepCopyInto(out *MetricsParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression *out = new(string) **out = **in } - if in.BaselineEBSBandwidthMbps != nil { - in, out := &in.BaselineEBSBandwidthMbps, &out.BaselineEBSBandwidthMbps - *out = make([]BaselineEBSBandwidthMbpsParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.BurstablePerformance != nil { - in, out := &in.BurstablePerformance, &out.BurstablePerformance + if in.ID != nil { + in, out := &in.ID, &out.ID *out = new(string) **out = **in } - if in.CPUManufacturers != nil { - in, out := &in.CPUManufacturers, &out.CPUManufacturers - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } - } - if in.ExcludedInstanceTypes != nil { - in, out := &in.ExcludedInstanceTypes, &out.ExcludedInstanceTypes - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in } - if in.InstanceGenerations != nil { - in, out := &in.InstanceGenerations, &out.InstanceGenerations - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } + if in.MetricStat != nil { + in, out := &in.MetricStat, &out.MetricStat + *out = new(MetricsMetricStatParameters) + (*in).DeepCopyInto(*out) } - if in.LocalStorage != nil { - in, out := &in.LocalStorage, &out.LocalStorage - *out = new(string) + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) **out = **in } - if in.LocalStorageTypes != nil { - in, out := &in.LocalStorageTypes, &out.LocalStorageTypes - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsParameters. +func (in *MetricsParameters) DeepCopy() *MetricsParameters { + if in == nil { + return nil } - if in.MemoryGibPerVcpu != nil { - in, out := &in.MemoryGibPerVcpu, &out.MemoryGibPerVcpu - *out = make([]MemoryGibPerVcpuParameters, len(*in)) + out := new(MetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MixedInstancesPolicyInitParameters) DeepCopyInto(out *MixedInstancesPolicyInitParameters) { + *out = *in + if in.InstancesDistribution != nil { + in, out := &in.InstancesDistribution, &out.InstancesDistribution + *out = make([]InstancesDistributionInitParameters, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.MemoryMib != nil { - in, out := &in.MemoryMib, &out.MemoryMib - *out = make([]MemoryMibParameters, len(*in)) + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = make([]MixedInstancesPolicyLaunchTemplateInitParameters, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.NetworkBandwidthGbps != nil { - in, out := &in.NetworkBandwidthGbps, &out.NetworkBandwidthGbps - *out = make([]NetworkBandwidthGbpsParameters, len(*in)) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyInitParameters. +func (in *MixedInstancesPolicyInitParameters) DeepCopy() *MixedInstancesPolicyInitParameters { + if in == nil { + return nil + } + out := new(MixedInstancesPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MixedInstancesPolicyLaunchTemplateInitParameters) DeepCopyInto(out *MixedInstancesPolicyLaunchTemplateInitParameters) { + *out = *in + if in.LaunchTemplateSpecification != nil { + in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification + *out = make([]LaunchTemplateSpecificationInitParameters, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.NetworkInterfaceCount != nil { - in, out := &in.NetworkInterfaceCount, &out.NetworkInterfaceCount - *out = make([]NetworkInterfaceCountParameters, len(*in)) + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = make([]OverrideInitParameters, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.OnDemandMaxPricePercentageOverLowestPrice != nil { - in, out := &in.OnDemandMaxPricePercentageOverLowestPrice, &out.OnDemandMaxPricePercentageOverLowestPrice - *out = new(float64) - **out = **in - } - if in.RequireHibernateSupport != nil { - in, out := &in.RequireHibernateSupport, &out.RequireHibernateSupport - *out = new(bool) - **out = **in - } - if in.SpotMaxPricePercentageOverLowestPrice != nil { - in, out := &in.SpotMaxPricePercentageOverLowestPrice, &out.SpotMaxPricePercentageOverLowestPrice - *out = new(float64) - **out = **in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyLaunchTemplateInitParameters. +func (in *MixedInstancesPolicyLaunchTemplateInitParameters) DeepCopy() *MixedInstancesPolicyLaunchTemplateInitParameters { + if in == nil { + return nil } - if in.TotalLocalStorageGb != nil { - in, out := &in.TotalLocalStorageGb, &out.TotalLocalStorageGb - *out = make([]TotalLocalStorageGbParameters, len(*in)) + out := new(MixedInstancesPolicyLaunchTemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MixedInstancesPolicyLaunchTemplateObservation) DeepCopyInto(out *MixedInstancesPolicyLaunchTemplateObservation) { + *out = *in + if in.LaunchTemplateSpecification != nil { + in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification + *out = make([]LaunchTemplateSpecificationObservation, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.VcpuCount != nil { - in, out := &in.VcpuCount, &out.VcpuCount - *out = make([]VcpuCountParameters, len(*in)) + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = make([]OverrideObservation, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsParameters. -func (in *InstanceRequirementsParameters) DeepCopy() *InstanceRequirementsParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyLaunchTemplateObservation. +func (in *MixedInstancesPolicyLaunchTemplateObservation) DeepCopy() *MixedInstancesPolicyLaunchTemplateObservation { if in == nil { return nil } - out := new(InstanceRequirementsParameters) + out := new(MixedInstancesPolicyLaunchTemplateObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstanceReusePolicyInitParameters) DeepCopyInto(out *InstanceReusePolicyInitParameters) { +func (in *MixedInstancesPolicyLaunchTemplateParameters) DeepCopyInto(out *MixedInstancesPolicyLaunchTemplateParameters) { *out = *in - if in.ReuseOnScaleIn != nil { - in, out := &in.ReuseOnScaleIn, &out.ReuseOnScaleIn - *out = new(bool) - **out = **in + if in.LaunchTemplateSpecification != nil { + in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification + *out = make([]LaunchTemplateSpecificationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = make([]OverrideParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceReusePolicyInitParameters. -func (in *InstanceReusePolicyInitParameters) DeepCopy() *InstanceReusePolicyInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyLaunchTemplateParameters. +func (in *MixedInstancesPolicyLaunchTemplateParameters) DeepCopy() *MixedInstancesPolicyLaunchTemplateParameters { if in == nil { return nil } - out := new(InstanceReusePolicyInitParameters) + out := new(MixedInstancesPolicyLaunchTemplateParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstanceReusePolicyObservation) DeepCopyInto(out *InstanceReusePolicyObservation) { +func (in *MixedInstancesPolicyObservation) DeepCopyInto(out *MixedInstancesPolicyObservation) { *out = *in - if in.ReuseOnScaleIn != nil { - in, out := &in.ReuseOnScaleIn, &out.ReuseOnScaleIn - *out = new(bool) - **out = **in + if in.InstancesDistribution != nil { + in, out := &in.InstancesDistribution, &out.InstancesDistribution + *out = make([]InstancesDistributionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = make([]MixedInstancesPolicyLaunchTemplateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceReusePolicyObservation. -func (in *InstanceReusePolicyObservation) DeepCopy() *InstanceReusePolicyObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyObservation. +func (in *MixedInstancesPolicyObservation) DeepCopy() *MixedInstancesPolicyObservation { if in == nil { return nil } - out := new(InstanceReusePolicyObservation) + out := new(MixedInstancesPolicyObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstanceReusePolicyParameters) DeepCopyInto(out *InstanceReusePolicyParameters) { +func (in *MixedInstancesPolicyParameters) DeepCopyInto(out *MixedInstancesPolicyParameters) { *out = *in - if in.ReuseOnScaleIn != nil { - in, out := &in.ReuseOnScaleIn, &out.ReuseOnScaleIn - *out = new(bool) - **out = **in + if in.InstancesDistribution != nil { + in, out := &in.InstancesDistribution, &out.InstancesDistribution + *out = make([]InstancesDistributionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = make([]MixedInstancesPolicyLaunchTemplateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceReusePolicyParameters. -func (in *InstanceReusePolicyParameters) DeepCopy() *InstanceReusePolicyParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyParameters. +func (in *MixedInstancesPolicyParameters) DeepCopy() *MixedInstancesPolicyParameters { if in == nil { return nil } - out := new(InstanceReusePolicyParameters) + out := new(MixedInstancesPolicyParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstancesDistributionInitParameters) DeepCopyInto(out *InstancesDistributionInitParameters) { +func (in *NetworkBandwidthGbpsInitParameters) DeepCopyInto(out *NetworkBandwidthGbpsInitParameters) { *out = *in - if in.OnDemandAllocationStrategy != nil { - in, out := &in.OnDemandAllocationStrategy, &out.OnDemandAllocationStrategy - *out = new(string) - **out = **in - } - if in.OnDemandBaseCapacity != nil { - in, out := &in.OnDemandBaseCapacity, &out.OnDemandBaseCapacity - *out = new(float64) - **out = **in - } - if in.OnDemandPercentageAboveBaseCapacity != nil { - in, out := &in.OnDemandPercentageAboveBaseCapacity, &out.OnDemandPercentageAboveBaseCapacity + if in.Max != nil { + in, out := &in.Max, &out.Max *out = new(float64) **out = **in } - if in.SpotAllocationStrategy != nil { - in, out := &in.SpotAllocationStrategy, &out.SpotAllocationStrategy - *out = new(string) - **out = **in - } - if in.SpotInstancePools != nil { - in, out := &in.SpotInstancePools, &out.SpotInstancePools + if in.Min != nil { + in, out := &in.Min, &out.Min *out = new(float64) **out = **in } - if in.SpotMaxPrice != nil { - in, out := &in.SpotMaxPrice, &out.SpotMaxPrice - *out = new(string) - **out = **in - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesDistributionInitParameters. -func (in *InstancesDistributionInitParameters) DeepCopy() *InstancesDistributionInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkBandwidthGbpsInitParameters. +func (in *NetworkBandwidthGbpsInitParameters) DeepCopy() *NetworkBandwidthGbpsInitParameters { if in == nil { return nil } - out := new(InstancesDistributionInitParameters) + out := new(NetworkBandwidthGbpsInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstancesDistributionObservation) DeepCopyInto(out *InstancesDistributionObservation) { +func (in *NetworkBandwidthGbpsObservation) DeepCopyInto(out *NetworkBandwidthGbpsObservation) { *out = *in - if in.OnDemandAllocationStrategy != nil { - in, out := &in.OnDemandAllocationStrategy, &out.OnDemandAllocationStrategy - *out = new(string) - **out = **in - } - if in.OnDemandBaseCapacity != nil { - in, out := &in.OnDemandBaseCapacity, &out.OnDemandBaseCapacity - *out = new(float64) - **out = **in - } - if in.OnDemandPercentageAboveBaseCapacity != nil { - in, out := &in.OnDemandPercentageAboveBaseCapacity, &out.OnDemandPercentageAboveBaseCapacity + if in.Max != nil { + in, out := &in.Max, &out.Max *out = new(float64) **out = **in } - if in.SpotAllocationStrategy != nil { - in, out := &in.SpotAllocationStrategy, &out.SpotAllocationStrategy - *out = new(string) - **out = **in - } - if in.SpotInstancePools != nil { - in, out := &in.SpotInstancePools, &out.SpotInstancePools + if in.Min != nil { + in, out := &in.Min, &out.Min *out = new(float64) **out = **in } - if in.SpotMaxPrice != nil { - in, out := &in.SpotMaxPrice, &out.SpotMaxPrice - *out = new(string) - **out = **in - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesDistributionObservation. -func (in *InstancesDistributionObservation) DeepCopy() *InstancesDistributionObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkBandwidthGbpsObservation. +func (in *NetworkBandwidthGbpsObservation) DeepCopy() *NetworkBandwidthGbpsObservation { if in == nil { return nil } - out := new(InstancesDistributionObservation) + out := new(NetworkBandwidthGbpsObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstancesDistributionParameters) DeepCopyInto(out *InstancesDistributionParameters) { +func (in *NetworkBandwidthGbpsParameters) DeepCopyInto(out *NetworkBandwidthGbpsParameters) { *out = *in - if in.OnDemandAllocationStrategy != nil { - in, out := &in.OnDemandAllocationStrategy, &out.OnDemandAllocationStrategy - *out = new(string) - **out = **in - } - if in.OnDemandBaseCapacity != nil { - in, out := &in.OnDemandBaseCapacity, &out.OnDemandBaseCapacity + if in.Max != nil { + in, out := &in.Max, &out.Max *out = new(float64) **out = **in } - if in.OnDemandPercentageAboveBaseCapacity != nil { - in, out := &in.OnDemandPercentageAboveBaseCapacity, &out.OnDemandPercentageAboveBaseCapacity + if in.Min != nil { + in, out := &in.Min, &out.Min *out = new(float64) **out = **in } - if in.SpotAllocationStrategy != nil { - in, out := &in.SpotAllocationStrategy, &out.SpotAllocationStrategy - *out = new(string) - **out = **in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkBandwidthGbpsParameters. +func (in *NetworkBandwidthGbpsParameters) DeepCopy() *NetworkBandwidthGbpsParameters { + if in == nil { + return nil } - if in.SpotInstancePools != nil { - in, out := &in.SpotInstancePools, &out.SpotInstancePools + out := new(NetworkBandwidthGbpsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceCountInitParameters) DeepCopyInto(out *NetworkInterfaceCountInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max *out = new(float64) **out = **in } - if in.SpotMaxPrice != nil { - in, out := &in.SpotMaxPrice, &out.SpotMaxPrice - *out = new(string) + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesDistributionParameters. -func (in *InstancesDistributionParameters) DeepCopy() *InstancesDistributionParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceCountInitParameters. +func (in *NetworkInterfaceCountInitParameters) DeepCopy() *NetworkInterfaceCountInitParameters { if in == nil { return nil } - out := new(InstancesDistributionParameters) + out := new(NetworkInterfaceCountInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LaunchTemplateInitParameters) DeepCopyInto(out *LaunchTemplateInitParameters) { +func (in *NetworkInterfaceCountObservation) DeepCopyInto(out *NetworkInterfaceCountObservation) { *out = *in - if in.ID != nil { - in, out := &in.ID, &out.ID - *out = new(string) - **out = **in - } - if in.IDRef != nil { - in, out := &in.IDRef, &out.IDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) - } - if in.IDSelector != nil { - in, out := &in.IDSelector, &out.IDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) **out = **in } - if in.Version != nil { - in, out := &in.Version, &out.Version - *out = new(string) + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateInitParameters. -func (in *LaunchTemplateInitParameters) DeepCopy() *LaunchTemplateInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceCountObservation. +func (in *NetworkInterfaceCountObservation) DeepCopy() *NetworkInterfaceCountObservation { if in == nil { return nil } - out := new(LaunchTemplateInitParameters) + out := new(NetworkInterfaceCountObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LaunchTemplateObservation) DeepCopyInto(out *LaunchTemplateObservation) { +func (in *NetworkInterfaceCountParameters) DeepCopyInto(out *NetworkInterfaceCountParameters) { *out = *in - if in.ID != nil { - in, out := &in.ID, &out.ID - *out = new(string) - **out = **in - } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) **out = **in } - if in.Version != nil { - in, out := &in.Version, &out.Version - *out = new(string) + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateObservation. -func (in *LaunchTemplateObservation) DeepCopy() *LaunchTemplateObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceCountParameters. +func (in *NetworkInterfaceCountParameters) DeepCopy() *NetworkInterfaceCountParameters { if in == nil { return nil } - out := new(LaunchTemplateObservation) + out := new(NetworkInterfaceCountParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LaunchTemplateParameters) DeepCopyInto(out *LaunchTemplateParameters) { +func (in *OverrideInitParameters) DeepCopyInto(out *OverrideInitParameters) { *out = *in - if in.ID != nil { - in, out := &in.ID, &out.ID - *out = new(string) - **out = **in - } - if in.IDRef != nil { - in, out := &in.IDRef, &out.IDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) - } - if in.IDSelector != nil { - in, out := &in.IDSelector, &out.IDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) + if in.InstanceRequirements != nil { + in, out := &in.InstanceRequirements, &out.InstanceRequirements + *out = make([]InstanceRequirementsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } - if in.Name != nil { - in, out := &in.Name, &out.Name + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType *out = new(string) **out = **in } - if in.Version != nil { - in, out := &in.Version, &out.Version + if in.LaunchTemplateSpecification != nil { + in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification + *out = make([]OverrideLaunchTemplateSpecificationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WeightedCapacity != nil { + in, out := &in.WeightedCapacity, &out.WeightedCapacity *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateParameters. -func (in *LaunchTemplateParameters) DeepCopy() *LaunchTemplateParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideInitParameters. +func (in *OverrideInitParameters) DeepCopy() *OverrideInitParameters { if in == nil { return nil } - out := new(LaunchTemplateParameters) + out := new(OverrideInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LaunchTemplateSpecificationInitParameters) DeepCopyInto(out *LaunchTemplateSpecificationInitParameters) { +func (in *OverrideLaunchTemplateSpecificationInitParameters) DeepCopyInto(out *OverrideLaunchTemplateSpecificationInitParameters) { *out = *in if in.LaunchTemplateID != nil { in, out := &in.LaunchTemplateID, &out.LaunchTemplateID @@ -2765,18 +6555,18 @@ func (in *LaunchTemplateSpecificationInitParameters) DeepCopyInto(out *LaunchTem } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateSpecificationInitParameters. -func (in *LaunchTemplateSpecificationInitParameters) DeepCopy() *LaunchTemplateSpecificationInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideLaunchTemplateSpecificationInitParameters. +func (in *OverrideLaunchTemplateSpecificationInitParameters) DeepCopy() *OverrideLaunchTemplateSpecificationInitParameters { if in == nil { return nil } - out := new(LaunchTemplateSpecificationInitParameters) + out := new(OverrideLaunchTemplateSpecificationInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LaunchTemplateSpecificationObservation) DeepCopyInto(out *LaunchTemplateSpecificationObservation) { +func (in *OverrideLaunchTemplateSpecificationObservation) DeepCopyInto(out *OverrideLaunchTemplateSpecificationObservation) { *out = *in if in.LaunchTemplateID != nil { in, out := &in.LaunchTemplateID, &out.LaunchTemplateID @@ -2795,18 +6585,18 @@ func (in *LaunchTemplateSpecificationObservation) DeepCopyInto(out *LaunchTempla } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateSpecificationObservation. -func (in *LaunchTemplateSpecificationObservation) DeepCopy() *LaunchTemplateSpecificationObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideLaunchTemplateSpecificationObservation. +func (in *OverrideLaunchTemplateSpecificationObservation) DeepCopy() *OverrideLaunchTemplateSpecificationObservation { if in == nil { return nil } - out := new(LaunchTemplateSpecificationObservation) + out := new(OverrideLaunchTemplateSpecificationObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LaunchTemplateSpecificationParameters) DeepCopyInto(out *LaunchTemplateSpecificationParameters) { +func (in *OverrideLaunchTemplateSpecificationParameters) DeepCopyInto(out *OverrideLaunchTemplateSpecificationParameters) { *out = *in if in.LaunchTemplateID != nil { in, out := &in.LaunchTemplateID, &out.LaunchTemplateID @@ -2828,720 +6618,862 @@ func (in *LaunchTemplateSpecificationParameters) DeepCopyInto(out *LaunchTemplat *out = new(string) **out = **in } - if in.Version != nil { - in, out := &in.Version, &out.Version + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideLaunchTemplateSpecificationParameters. +func (in *OverrideLaunchTemplateSpecificationParameters) DeepCopy() *OverrideLaunchTemplateSpecificationParameters { + if in == nil { + return nil + } + out := new(OverrideLaunchTemplateSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideObservation) DeepCopyInto(out *OverrideObservation) { + *out = *in + if in.InstanceRequirements != nil { + in, out := &in.InstanceRequirements, &out.InstanceRequirements + *out = make([]InstanceRequirementsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LaunchTemplateSpecification != nil { + in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification + *out = make([]OverrideLaunchTemplateSpecificationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WeightedCapacity != nil { + in, out := &in.WeightedCapacity, &out.WeightedCapacity *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateSpecificationParameters. -func (in *LaunchTemplateSpecificationParameters) DeepCopy() *LaunchTemplateSpecificationParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideObservation. +func (in *OverrideObservation) DeepCopy() *OverrideObservation { if in == nil { return nil } - out := new(LaunchTemplateSpecificationParameters) + out := new(OverrideObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MemoryGibPerVcpuInitParameters) DeepCopyInto(out *MemoryGibPerVcpuInitParameters) { +func (in *OverrideParameters) DeepCopyInto(out *OverrideParameters) { *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = new(float64) + if in.InstanceRequirements != nil { + in, out := &in.InstanceRequirements, &out.InstanceRequirements + *out = make([]InstanceRequirementsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) **out = **in } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = new(float64) + if in.LaunchTemplateSpecification != nil { + in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification + *out = make([]OverrideLaunchTemplateSpecificationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WeightedCapacity != nil { + in, out := &in.WeightedCapacity, &out.WeightedCapacity + *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryGibPerVcpuInitParameters. -func (in *MemoryGibPerVcpuInitParameters) DeepCopy() *MemoryGibPerVcpuInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideParameters. +func (in *OverrideParameters) DeepCopy() *OverrideParameters { if in == nil { return nil } - out := new(MemoryGibPerVcpuInitParameters) + out := new(OverrideParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MemoryGibPerVcpuObservation) DeepCopyInto(out *MemoryGibPerVcpuObservation) { +func (in *Policy) DeepCopyInto(out *Policy) { *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = new(float64) - **out = **in - } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = new(float64) - **out = **in - } + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryGibPerVcpuObservation. -func (in *MemoryGibPerVcpuObservation) DeepCopy() *MemoryGibPerVcpuObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { if in == nil { return nil } - out := new(MemoryGibPerVcpuObservation) + out := new(Policy) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Policy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MemoryGibPerVcpuParameters) DeepCopyInto(out *MemoryGibPerVcpuParameters) { +func (in *PolicyInitParameters) DeepCopyInto(out *PolicyInitParameters) { *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max + if in.AdjustmentType != nil { + in, out := &in.AdjustmentType, &out.AdjustmentType + *out = new(string) + **out = **in + } + if in.Cooldown != nil { + in, out := &in.Cooldown, &out.Cooldown *out = new(float64) **out = **in } - if in.Min != nil { - in, out := &in.Min, &out.Min + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EstimatedInstanceWarmup != nil { + in, out := &in.EstimatedInstanceWarmup, &out.EstimatedInstanceWarmup *out = new(float64) **out = **in } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryGibPerVcpuParameters. -func (in *MemoryGibPerVcpuParameters) DeepCopy() *MemoryGibPerVcpuParameters { - if in == nil { - return nil + if in.MetricAggregationType != nil { + in, out := &in.MetricAggregationType, &out.MetricAggregationType + *out = new(string) + **out = **in } - out := new(MemoryGibPerVcpuParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MemoryMibInitParameters) DeepCopyInto(out *MemoryMibInitParameters) { - *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max + if in.MinAdjustmentMagnitude != nil { + in, out := &in.MinAdjustmentMagnitude, &out.MinAdjustmentMagnitude *out = new(float64) **out = **in } - if in.Min != nil { - in, out := &in.Min, &out.Min + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } + if in.PredictiveScalingConfiguration != nil { + in, out := &in.PredictiveScalingConfiguration, &out.PredictiveScalingConfiguration + *out = new(PredictiveScalingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ScalingAdjustment != nil { + in, out := &in.ScalingAdjustment, &out.ScalingAdjustment *out = new(float64) **out = **in } + if in.StepAdjustment != nil { + in, out := &in.StepAdjustment, &out.StepAdjustment + *out = make([]StepAdjustmentInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetTrackingConfiguration != nil { + in, out := &in.TargetTrackingConfiguration, &out.TargetTrackingConfiguration + *out = new(TargetTrackingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryMibInitParameters. -func (in *MemoryMibInitParameters) DeepCopy() *MemoryMibInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyInitParameters. +func (in *PolicyInitParameters) DeepCopy() *PolicyInitParameters { if in == nil { return nil } - out := new(MemoryMibInitParameters) + out := new(PolicyInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MemoryMibObservation) DeepCopyInto(out *MemoryMibObservation) { +func (in *PolicyList) DeepCopyInto(out *PolicyList) { *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = new(float64) - **out = **in - } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = new(float64) - **out = **in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Policy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryMibObservation. -func (in *MemoryMibObservation) DeepCopy() *MemoryMibObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList. +func (in *PolicyList) DeepCopy() *PolicyList { if in == nil { return nil } - out := new(MemoryMibObservation) + out := new(PolicyList) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MemoryMibParameters) DeepCopyInto(out *MemoryMibParameters) { +func (in *PolicyObservation) DeepCopyInto(out *PolicyObservation) { *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max + if in.AdjustmentType != nil { + in, out := &in.AdjustmentType, &out.AdjustmentType + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoscalingGroupName != nil { + in, out := &in.AutoscalingGroupName, &out.AutoscalingGroupName + *out = new(string) + **out = **in + } + if in.Cooldown != nil { + in, out := &in.Cooldown, &out.Cooldown *out = new(float64) **out = **in } - if in.Min != nil { - in, out := &in.Min, &out.Min + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EstimatedInstanceWarmup != nil { + in, out := &in.EstimatedInstanceWarmup, &out.EstimatedInstanceWarmup *out = new(float64) **out = **in } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryMibParameters. -func (in *MemoryMibParameters) DeepCopy() *MemoryMibParameters { - if in == nil { - return nil + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in } - out := new(MemoryMibParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MixedInstancesPolicyInitParameters) DeepCopyInto(out *MixedInstancesPolicyInitParameters) { - *out = *in - if in.InstancesDistribution != nil { - in, out := &in.InstancesDistribution, &out.InstancesDistribution - *out = make([]InstancesDistributionInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.MetricAggregationType != nil { + in, out := &in.MetricAggregationType, &out.MetricAggregationType + *out = new(string) + **out = **in } - if in.LaunchTemplate != nil { - in, out := &in.LaunchTemplate, &out.LaunchTemplate - *out = make([]MixedInstancesPolicyLaunchTemplateInitParameters, len(*in)) + if in.MinAdjustmentMagnitude != nil { + in, out := &in.MinAdjustmentMagnitude, &out.MinAdjustmentMagnitude + *out = new(float64) + **out = **in + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } + if in.PredictiveScalingConfiguration != nil { + in, out := &in.PredictiveScalingConfiguration, &out.PredictiveScalingConfiguration + *out = new(PredictiveScalingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ScalingAdjustment != nil { + in, out := &in.ScalingAdjustment, &out.ScalingAdjustment + *out = new(float64) + **out = **in + } + if in.StepAdjustment != nil { + in, out := &in.StepAdjustment, &out.StepAdjustment + *out = make([]StepAdjustmentObservation, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.TargetTrackingConfiguration != nil { + in, out := &in.TargetTrackingConfiguration, &out.TargetTrackingConfiguration + *out = new(TargetTrackingConfigurationObservation) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyInitParameters. -func (in *MixedInstancesPolicyInitParameters) DeepCopy() *MixedInstancesPolicyInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyObservation. +func (in *PolicyObservation) DeepCopy() *PolicyObservation { if in == nil { return nil } - out := new(MixedInstancesPolicyInitParameters) + out := new(PolicyObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MixedInstancesPolicyLaunchTemplateInitParameters) DeepCopyInto(out *MixedInstancesPolicyLaunchTemplateInitParameters) { +func (in *PolicyParameters) DeepCopyInto(out *PolicyParameters) { *out = *in - if in.LaunchTemplateSpecification != nil { - in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification - *out = make([]LaunchTemplateSpecificationInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.AdjustmentType != nil { + in, out := &in.AdjustmentType, &out.AdjustmentType + *out = new(string) + **out = **in + } + if in.AutoscalingGroupName != nil { + in, out := &in.AutoscalingGroupName, &out.AutoscalingGroupName + *out = new(string) + **out = **in + } + if in.AutoscalingGroupNameRef != nil { + in, out := &in.AutoscalingGroupNameRef, &out.AutoscalingGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AutoscalingGroupNameSelector != nil { + in, out := &in.AutoscalingGroupNameSelector, &out.AutoscalingGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Cooldown != nil { + in, out := &in.Cooldown, &out.Cooldown + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EstimatedInstanceWarmup != nil { + in, out := &in.EstimatedInstanceWarmup, &out.EstimatedInstanceWarmup + *out = new(float64) + **out = **in + } + if in.MetricAggregationType != nil { + in, out := &in.MetricAggregationType, &out.MetricAggregationType + *out = new(string) + **out = **in + } + if in.MinAdjustmentMagnitude != nil { + in, out := &in.MinAdjustmentMagnitude, &out.MinAdjustmentMagnitude + *out = new(float64) + **out = **in + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } + if in.PredictiveScalingConfiguration != nil { + in, out := &in.PredictiveScalingConfiguration, &out.PredictiveScalingConfiguration + *out = new(PredictiveScalingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in } - if in.Override != nil { - in, out := &in.Override, &out.Override - *out = make([]OverrideInitParameters, len(*in)) + if in.ScalingAdjustment != nil { + in, out := &in.ScalingAdjustment, &out.ScalingAdjustment + *out = new(float64) + **out = **in + } + if in.StepAdjustment != nil { + in, out := &in.StepAdjustment, &out.StepAdjustment + *out = make([]StepAdjustmentParameters, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.TargetTrackingConfiguration != nil { + in, out := &in.TargetTrackingConfiguration, &out.TargetTrackingConfiguration + *out = new(TargetTrackingConfigurationParameters) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyLaunchTemplateInitParameters. -func (in *MixedInstancesPolicyLaunchTemplateInitParameters) DeepCopy() *MixedInstancesPolicyLaunchTemplateInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyParameters. +func (in *PolicyParameters) DeepCopy() *PolicyParameters { if in == nil { return nil } - out := new(MixedInstancesPolicyLaunchTemplateInitParameters) + out := new(PolicyParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MixedInstancesPolicyLaunchTemplateObservation) DeepCopyInto(out *MixedInstancesPolicyLaunchTemplateObservation) { +func (in *PolicySpec) DeepCopyInto(out *PolicySpec) { *out = *in - if in.LaunchTemplateSpecification != nil { - in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification - *out = make([]LaunchTemplateSpecificationObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Override != nil { - in, out := &in.Override, &out.Override - *out = make([]OverrideObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyLaunchTemplateObservation. -func (in *MixedInstancesPolicyLaunchTemplateObservation) DeepCopy() *MixedInstancesPolicyLaunchTemplateObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicySpec. +func (in *PolicySpec) DeepCopy() *PolicySpec { if in == nil { return nil } - out := new(MixedInstancesPolicyLaunchTemplateObservation) + out := new(PolicySpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MixedInstancesPolicyLaunchTemplateParameters) DeepCopyInto(out *MixedInstancesPolicyLaunchTemplateParameters) { +func (in *PolicyStatus) DeepCopyInto(out *PolicyStatus) { *out = *in - if in.LaunchTemplateSpecification != nil { - in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification - *out = make([]LaunchTemplateSpecificationParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Override != nil { - in, out := &in.Override, &out.Override - *out = make([]OverrideParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyLaunchTemplateParameters. -func (in *MixedInstancesPolicyLaunchTemplateParameters) DeepCopy() *MixedInstancesPolicyLaunchTemplateParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyStatus. +func (in *PolicyStatus) DeepCopy() *PolicyStatus { if in == nil { return nil } - out := new(MixedInstancesPolicyLaunchTemplateParameters) + out := new(PolicyStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MixedInstancesPolicyObservation) DeepCopyInto(out *MixedInstancesPolicyObservation) { +func (in *PredefinedLoadMetricSpecificationInitParameters) DeepCopyInto(out *PredefinedLoadMetricSpecificationInitParameters) { *out = *in - if in.InstancesDistribution != nil { - in, out := &in.InstancesDistribution, &out.InstancesDistribution - *out = make([]InstancesDistributionObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.PredefinedMetricType != nil { + in, out := &in.PredefinedMetricType, &out.PredefinedMetricType + *out = new(string) + **out = **in } - if in.LaunchTemplate != nil { - in, out := &in.LaunchTemplate, &out.LaunchTemplate - *out = make([]MixedInstancesPolicyLaunchTemplateObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) + **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyObservation. -func (in *MixedInstancesPolicyObservation) DeepCopy() *MixedInstancesPolicyObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedLoadMetricSpecificationInitParameters. +func (in *PredefinedLoadMetricSpecificationInitParameters) DeepCopy() *PredefinedLoadMetricSpecificationInitParameters { if in == nil { return nil } - out := new(MixedInstancesPolicyObservation) + out := new(PredefinedLoadMetricSpecificationInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MixedInstancesPolicyParameters) DeepCopyInto(out *MixedInstancesPolicyParameters) { +func (in *PredefinedLoadMetricSpecificationObservation) DeepCopyInto(out *PredefinedLoadMetricSpecificationObservation) { *out = *in - if in.InstancesDistribution != nil { - in, out := &in.InstancesDistribution, &out.InstancesDistribution - *out = make([]InstancesDistributionParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.PredefinedMetricType != nil { + in, out := &in.PredefinedMetricType, &out.PredefinedMetricType + *out = new(string) + **out = **in } - if in.LaunchTemplate != nil { - in, out := &in.LaunchTemplate, &out.LaunchTemplate - *out = make([]MixedInstancesPolicyLaunchTemplateParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) + **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyParameters. -func (in *MixedInstancesPolicyParameters) DeepCopy() *MixedInstancesPolicyParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedLoadMetricSpecificationObservation. +func (in *PredefinedLoadMetricSpecificationObservation) DeepCopy() *PredefinedLoadMetricSpecificationObservation { if in == nil { return nil } - out := new(MixedInstancesPolicyParameters) + out := new(PredefinedLoadMetricSpecificationObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkBandwidthGbpsInitParameters) DeepCopyInto(out *NetworkBandwidthGbpsInitParameters) { +func (in *PredefinedLoadMetricSpecificationParameters) DeepCopyInto(out *PredefinedLoadMetricSpecificationParameters) { *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = new(float64) + if in.PredefinedMetricType != nil { + in, out := &in.PredefinedMetricType, &out.PredefinedMetricType + *out = new(string) **out = **in } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = new(float64) + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkBandwidthGbpsInitParameters. -func (in *NetworkBandwidthGbpsInitParameters) DeepCopy() *NetworkBandwidthGbpsInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedLoadMetricSpecificationParameters. +func (in *PredefinedLoadMetricSpecificationParameters) DeepCopy() *PredefinedLoadMetricSpecificationParameters { if in == nil { return nil } - out := new(NetworkBandwidthGbpsInitParameters) + out := new(PredefinedLoadMetricSpecificationParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkBandwidthGbpsObservation) DeepCopyInto(out *NetworkBandwidthGbpsObservation) { +func (in *PredefinedMetricPairSpecificationInitParameters) DeepCopyInto(out *PredefinedMetricPairSpecificationInitParameters) { *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = new(float64) + if in.PredefinedMetricType != nil { + in, out := &in.PredefinedMetricType, &out.PredefinedMetricType + *out = new(string) **out = **in } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = new(float64) + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkBandwidthGbpsObservation. -func (in *NetworkBandwidthGbpsObservation) DeepCopy() *NetworkBandwidthGbpsObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedMetricPairSpecificationInitParameters. +func (in *PredefinedMetricPairSpecificationInitParameters) DeepCopy() *PredefinedMetricPairSpecificationInitParameters { if in == nil { return nil } - out := new(NetworkBandwidthGbpsObservation) + out := new(PredefinedMetricPairSpecificationInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkBandwidthGbpsParameters) DeepCopyInto(out *NetworkBandwidthGbpsParameters) { +func (in *PredefinedMetricPairSpecificationObservation) DeepCopyInto(out *PredefinedMetricPairSpecificationObservation) { *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = new(float64) + if in.PredefinedMetricType != nil { + in, out := &in.PredefinedMetricType, &out.PredefinedMetricType + *out = new(string) **out = **in } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = new(float64) + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkBandwidthGbpsParameters. -func (in *NetworkBandwidthGbpsParameters) DeepCopy() *NetworkBandwidthGbpsParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedMetricPairSpecificationObservation. +func (in *PredefinedMetricPairSpecificationObservation) DeepCopy() *PredefinedMetricPairSpecificationObservation { if in == nil { return nil } - out := new(NetworkBandwidthGbpsParameters) + out := new(PredefinedMetricPairSpecificationObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkInterfaceCountInitParameters) DeepCopyInto(out *NetworkInterfaceCountInitParameters) { +func (in *PredefinedMetricPairSpecificationParameters) DeepCopyInto(out *PredefinedMetricPairSpecificationParameters) { *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = new(float64) + if in.PredefinedMetricType != nil { + in, out := &in.PredefinedMetricType, &out.PredefinedMetricType + *out = new(string) **out = **in } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = new(float64) + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceCountInitParameters. -func (in *NetworkInterfaceCountInitParameters) DeepCopy() *NetworkInterfaceCountInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedMetricPairSpecificationParameters. +func (in *PredefinedMetricPairSpecificationParameters) DeepCopy() *PredefinedMetricPairSpecificationParameters { if in == nil { return nil } - out := new(NetworkInterfaceCountInitParameters) + out := new(PredefinedMetricPairSpecificationParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkInterfaceCountObservation) DeepCopyInto(out *NetworkInterfaceCountObservation) { +func (in *PredefinedMetricSpecificationInitParameters) DeepCopyInto(out *PredefinedMetricSpecificationInitParameters) { *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = new(float64) + if in.PredefinedMetricType != nil { + in, out := &in.PredefinedMetricType, &out.PredefinedMetricType + *out = new(string) **out = **in } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = new(float64) + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceCountObservation. -func (in *NetworkInterfaceCountObservation) DeepCopy() *NetworkInterfaceCountObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedMetricSpecificationInitParameters. +func (in *PredefinedMetricSpecificationInitParameters) DeepCopy() *PredefinedMetricSpecificationInitParameters { if in == nil { return nil } - out := new(NetworkInterfaceCountObservation) + out := new(PredefinedMetricSpecificationInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkInterfaceCountParameters) DeepCopyInto(out *NetworkInterfaceCountParameters) { +func (in *PredefinedMetricSpecificationObservation) DeepCopyInto(out *PredefinedMetricSpecificationObservation) { *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = new(float64) + if in.PredefinedMetricType != nil { + in, out := &in.PredefinedMetricType, &out.PredefinedMetricType + *out = new(string) **out = **in } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = new(float64) + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceCountParameters. -func (in *NetworkInterfaceCountParameters) DeepCopy() *NetworkInterfaceCountParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedMetricSpecificationObservation. +func (in *PredefinedMetricSpecificationObservation) DeepCopy() *PredefinedMetricSpecificationObservation { if in == nil { return nil } - out := new(NetworkInterfaceCountParameters) + out := new(PredefinedMetricSpecificationObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OverrideInitParameters) DeepCopyInto(out *OverrideInitParameters) { +func (in *PredefinedMetricSpecificationParameters) DeepCopyInto(out *PredefinedMetricSpecificationParameters) { *out = *in - if in.InstanceRequirements != nil { - in, out := &in.InstanceRequirements, &out.InstanceRequirements - *out = make([]InstanceRequirementsInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.InstanceType != nil { - in, out := &in.InstanceType, &out.InstanceType + if in.PredefinedMetricType != nil { + in, out := &in.PredefinedMetricType, &out.PredefinedMetricType *out = new(string) **out = **in } - if in.LaunchTemplateSpecification != nil { - in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification - *out = make([]OverrideLaunchTemplateSpecificationInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.WeightedCapacity != nil { - in, out := &in.WeightedCapacity, &out.WeightedCapacity + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideInitParameters. -func (in *OverrideInitParameters) DeepCopy() *OverrideInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedMetricSpecificationParameters. +func (in *PredefinedMetricSpecificationParameters) DeepCopy() *PredefinedMetricSpecificationParameters { if in == nil { return nil } - out := new(OverrideInitParameters) + out := new(PredefinedMetricSpecificationParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OverrideLaunchTemplateSpecificationInitParameters) DeepCopyInto(out *OverrideLaunchTemplateSpecificationInitParameters) { +func (in *PredefinedScalingMetricSpecificationInitParameters) DeepCopyInto(out *PredefinedScalingMetricSpecificationInitParameters) { *out = *in - if in.LaunchTemplateID != nil { - in, out := &in.LaunchTemplateID, &out.LaunchTemplateID + if in.PredefinedMetricType != nil { + in, out := &in.PredefinedMetricType, &out.PredefinedMetricType *out = new(string) **out = **in } - if in.LaunchTemplateIDRef != nil { - in, out := &in.LaunchTemplateIDRef, &out.LaunchTemplateIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) + **out = **in } - if in.LaunchTemplateIDSelector != nil { - in, out := &in.LaunchTemplateIDSelector, &out.LaunchTemplateIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedScalingMetricSpecificationInitParameters. +func (in *PredefinedScalingMetricSpecificationInitParameters) DeepCopy() *PredefinedScalingMetricSpecificationInitParameters { + if in == nil { + return nil } - if in.LaunchTemplateName != nil { - in, out := &in.LaunchTemplateName, &out.LaunchTemplateName + out := new(PredefinedScalingMetricSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredefinedScalingMetricSpecificationObservation) DeepCopyInto(out *PredefinedScalingMetricSpecificationObservation) { + *out = *in + if in.PredefinedMetricType != nil { + in, out := &in.PredefinedMetricType, &out.PredefinedMetricType *out = new(string) **out = **in } - if in.Version != nil { - in, out := &in.Version, &out.Version + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideLaunchTemplateSpecificationInitParameters. -func (in *OverrideLaunchTemplateSpecificationInitParameters) DeepCopy() *OverrideLaunchTemplateSpecificationInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedScalingMetricSpecificationObservation. +func (in *PredefinedScalingMetricSpecificationObservation) DeepCopy() *PredefinedScalingMetricSpecificationObservation { if in == nil { return nil } - out := new(OverrideLaunchTemplateSpecificationInitParameters) + out := new(PredefinedScalingMetricSpecificationObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OverrideLaunchTemplateSpecificationObservation) DeepCopyInto(out *OverrideLaunchTemplateSpecificationObservation) { +func (in *PredefinedScalingMetricSpecificationParameters) DeepCopyInto(out *PredefinedScalingMetricSpecificationParameters) { *out = *in - if in.LaunchTemplateID != nil { - in, out := &in.LaunchTemplateID, &out.LaunchTemplateID - *out = new(string) - **out = **in - } - if in.LaunchTemplateName != nil { - in, out := &in.LaunchTemplateName, &out.LaunchTemplateName + if in.PredefinedMetricType != nil { + in, out := &in.PredefinedMetricType, &out.PredefinedMetricType *out = new(string) **out = **in } - if in.Version != nil { - in, out := &in.Version, &out.Version + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideLaunchTemplateSpecificationObservation. -func (in *OverrideLaunchTemplateSpecificationObservation) DeepCopy() *OverrideLaunchTemplateSpecificationObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedScalingMetricSpecificationParameters. +func (in *PredefinedScalingMetricSpecificationParameters) DeepCopy() *PredefinedScalingMetricSpecificationParameters { if in == nil { return nil } - out := new(OverrideLaunchTemplateSpecificationObservation) + out := new(PredefinedScalingMetricSpecificationParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OverrideLaunchTemplateSpecificationParameters) DeepCopyInto(out *OverrideLaunchTemplateSpecificationParameters) { +func (in *PredictiveScalingConfigurationInitParameters) DeepCopyInto(out *PredictiveScalingConfigurationInitParameters) { *out = *in - if in.LaunchTemplateID != nil { - in, out := &in.LaunchTemplateID, &out.LaunchTemplateID + if in.MaxCapacityBreachBehavior != nil { + in, out := &in.MaxCapacityBreachBehavior, &out.MaxCapacityBreachBehavior *out = new(string) **out = **in } - if in.LaunchTemplateIDRef != nil { - in, out := &in.LaunchTemplateIDRef, &out.LaunchTemplateIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) + if in.MaxCapacityBuffer != nil { + in, out := &in.MaxCapacityBuffer, &out.MaxCapacityBuffer + *out = new(string) + **out = **in } - if in.LaunchTemplateIDSelector != nil { - in, out := &in.LaunchTemplateIDSelector, &out.LaunchTemplateIDSelector - *out = new(v1.Selector) + if in.MetricSpecification != nil { + in, out := &in.MetricSpecification, &out.MetricSpecification + *out = new(MetricSpecificationInitParameters) (*in).DeepCopyInto(*out) } - if in.LaunchTemplateName != nil { - in, out := &in.LaunchTemplateName, &out.LaunchTemplateName + if in.Mode != nil { + in, out := &in.Mode, &out.Mode *out = new(string) **out = **in } - if in.Version != nil { - in, out := &in.Version, &out.Version + if in.SchedulingBufferTime != nil { + in, out := &in.SchedulingBufferTime, &out.SchedulingBufferTime *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideLaunchTemplateSpecificationParameters. -func (in *OverrideLaunchTemplateSpecificationParameters) DeepCopy() *OverrideLaunchTemplateSpecificationParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredictiveScalingConfigurationInitParameters. +func (in *PredictiveScalingConfigurationInitParameters) DeepCopy() *PredictiveScalingConfigurationInitParameters { if in == nil { return nil } - out := new(OverrideLaunchTemplateSpecificationParameters) + out := new(PredictiveScalingConfigurationInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OverrideObservation) DeepCopyInto(out *OverrideObservation) { - *out = *in - if in.InstanceRequirements != nil { - in, out := &in.InstanceRequirements, &out.InstanceRequirements - *out = make([]InstanceRequirementsObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } +func (in *PredictiveScalingConfigurationObservation) DeepCopyInto(out *PredictiveScalingConfigurationObservation) { + *out = *in + if in.MaxCapacityBreachBehavior != nil { + in, out := &in.MaxCapacityBreachBehavior, &out.MaxCapacityBreachBehavior + *out = new(string) + **out = **in } - if in.InstanceType != nil { - in, out := &in.InstanceType, &out.InstanceType + if in.MaxCapacityBuffer != nil { + in, out := &in.MaxCapacityBuffer, &out.MaxCapacityBuffer *out = new(string) **out = **in } - if in.LaunchTemplateSpecification != nil { - in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification - *out = make([]OverrideLaunchTemplateSpecificationObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.MetricSpecification != nil { + in, out := &in.MetricSpecification, &out.MetricSpecification + *out = new(MetricSpecificationObservation) + (*in).DeepCopyInto(*out) } - if in.WeightedCapacity != nil { - in, out := &in.WeightedCapacity, &out.WeightedCapacity + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.SchedulingBufferTime != nil { + in, out := &in.SchedulingBufferTime, &out.SchedulingBufferTime *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideObservation. -func (in *OverrideObservation) DeepCopy() *OverrideObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredictiveScalingConfigurationObservation. +func (in *PredictiveScalingConfigurationObservation) DeepCopy() *PredictiveScalingConfigurationObservation { if in == nil { return nil } - out := new(OverrideObservation) + out := new(PredictiveScalingConfigurationObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OverrideParameters) DeepCopyInto(out *OverrideParameters) { +func (in *PredictiveScalingConfigurationParameters) DeepCopyInto(out *PredictiveScalingConfigurationParameters) { *out = *in - if in.InstanceRequirements != nil { - in, out := &in.InstanceRequirements, &out.InstanceRequirements - *out = make([]InstanceRequirementsParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.MaxCapacityBreachBehavior != nil { + in, out := &in.MaxCapacityBreachBehavior, &out.MaxCapacityBreachBehavior + *out = new(string) + **out = **in } - if in.InstanceType != nil { - in, out := &in.InstanceType, &out.InstanceType + if in.MaxCapacityBuffer != nil { + in, out := &in.MaxCapacityBuffer, &out.MaxCapacityBuffer *out = new(string) **out = **in } - if in.LaunchTemplateSpecification != nil { - in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification - *out = make([]OverrideLaunchTemplateSpecificationParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.MetricSpecification != nil { + in, out := &in.MetricSpecification, &out.MetricSpecification + *out = new(MetricSpecificationParameters) + (*in).DeepCopyInto(*out) } - if in.WeightedCapacity != nil { - in, out := &in.WeightedCapacity, &out.WeightedCapacity + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.SchedulingBufferTime != nil { + in, out := &in.SchedulingBufferTime, &out.SchedulingBufferTime *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideParameters. -func (in *OverrideParameters) DeepCopy() *OverrideParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredictiveScalingConfigurationParameters. +func (in *PredictiveScalingConfigurationParameters) DeepCopy() *PredictiveScalingConfigurationParameters { if in == nil { return nil } - out := new(OverrideParameters) + out := new(PredictiveScalingConfigurationParameters) in.DeepCopyInto(out) return out } @@ -3765,6 +7697,231 @@ func (in *PreferencesParameters) DeepCopy() *PreferencesParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootBlockDeviceInitParameters) DeepCopyInto(out *RootBlockDeviceInitParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootBlockDeviceInitParameters. +func (in *RootBlockDeviceInitParameters) DeepCopy() *RootBlockDeviceInitParameters { + if in == nil { + return nil + } + out := new(RootBlockDeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootBlockDeviceObservation) DeepCopyInto(out *RootBlockDeviceObservation) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootBlockDeviceObservation. +func (in *RootBlockDeviceObservation) DeepCopy() *RootBlockDeviceObservation { + if in == nil { + return nil + } + out := new(RootBlockDeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootBlockDeviceParameters) DeepCopyInto(out *RootBlockDeviceParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootBlockDeviceParameters. +func (in *RootBlockDeviceParameters) DeepCopy() *RootBlockDeviceParameters { + if in == nil { + return nil + } + out := new(RootBlockDeviceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepAdjustmentInitParameters) DeepCopyInto(out *StepAdjustmentInitParameters) { + *out = *in + if in.MetricIntervalLowerBound != nil { + in, out := &in.MetricIntervalLowerBound, &out.MetricIntervalLowerBound + *out = new(string) + **out = **in + } + if in.MetricIntervalUpperBound != nil { + in, out := &in.MetricIntervalUpperBound, &out.MetricIntervalUpperBound + *out = new(string) + **out = **in + } + if in.ScalingAdjustment != nil { + in, out := &in.ScalingAdjustment, &out.ScalingAdjustment + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepAdjustmentInitParameters. +func (in *StepAdjustmentInitParameters) DeepCopy() *StepAdjustmentInitParameters { + if in == nil { + return nil + } + out := new(StepAdjustmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepAdjustmentObservation) DeepCopyInto(out *StepAdjustmentObservation) { + *out = *in + if in.MetricIntervalLowerBound != nil { + in, out := &in.MetricIntervalLowerBound, &out.MetricIntervalLowerBound + *out = new(string) + **out = **in + } + if in.MetricIntervalUpperBound != nil { + in, out := &in.MetricIntervalUpperBound, &out.MetricIntervalUpperBound + *out = new(string) + **out = **in + } + if in.ScalingAdjustment != nil { + in, out := &in.ScalingAdjustment, &out.ScalingAdjustment + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepAdjustmentObservation. +func (in *StepAdjustmentObservation) DeepCopy() *StepAdjustmentObservation { + if in == nil { + return nil + } + out := new(StepAdjustmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepAdjustmentParameters) DeepCopyInto(out *StepAdjustmentParameters) { + *out = *in + if in.MetricIntervalLowerBound != nil { + in, out := &in.MetricIntervalLowerBound, &out.MetricIntervalLowerBound + *out = new(string) + **out = **in + } + if in.MetricIntervalUpperBound != nil { + in, out := &in.MetricIntervalUpperBound, &out.MetricIntervalUpperBound + *out = new(string) + **out = **in + } + if in.ScalingAdjustment != nil { + in, out := &in.ScalingAdjustment, &out.ScalingAdjustment + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepAdjustmentParameters. +func (in *StepAdjustmentParameters) DeepCopy() *StepAdjustmentParameters { + if in == nil { + return nil + } + out := new(StepAdjustmentParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TagInitParameters) DeepCopyInto(out *TagInitParameters) { *out = *in @@ -3855,6 +8012,111 @@ func (in *TagParameters) DeepCopy() *TagParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetTrackingConfigurationInitParameters) DeepCopyInto(out *TargetTrackingConfigurationInitParameters) { + *out = *in + if in.CustomizedMetricSpecification != nil { + in, out := &in.CustomizedMetricSpecification, &out.CustomizedMetricSpecification + *out = new(CustomizedMetricSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DisableScaleIn != nil { + in, out := &in.DisableScaleIn, &out.DisableScaleIn + *out = new(bool) + **out = **in + } + if in.PredefinedMetricSpecification != nil { + in, out := &in.PredefinedMetricSpecification, &out.PredefinedMetricSpecification + *out = new(PredefinedMetricSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetValue != nil { + in, out := &in.TargetValue, &out.TargetValue + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetTrackingConfigurationInitParameters. +func (in *TargetTrackingConfigurationInitParameters) DeepCopy() *TargetTrackingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(TargetTrackingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetTrackingConfigurationObservation) DeepCopyInto(out *TargetTrackingConfigurationObservation) { + *out = *in + if in.CustomizedMetricSpecification != nil { + in, out := &in.CustomizedMetricSpecification, &out.CustomizedMetricSpecification + *out = new(CustomizedMetricSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.DisableScaleIn != nil { + in, out := &in.DisableScaleIn, &out.DisableScaleIn + *out = new(bool) + **out = **in + } + if in.PredefinedMetricSpecification != nil { + in, out := &in.PredefinedMetricSpecification, &out.PredefinedMetricSpecification + *out = new(PredefinedMetricSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetValue != nil { + in, out := &in.TargetValue, &out.TargetValue + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetTrackingConfigurationObservation. +func (in *TargetTrackingConfigurationObservation) DeepCopy() *TargetTrackingConfigurationObservation { + if in == nil { + return nil + } + out := new(TargetTrackingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetTrackingConfigurationParameters) DeepCopyInto(out *TargetTrackingConfigurationParameters) { + *out = *in + if in.CustomizedMetricSpecification != nil { + in, out := &in.CustomizedMetricSpecification, &out.CustomizedMetricSpecification + *out = new(CustomizedMetricSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.DisableScaleIn != nil { + in, out := &in.DisableScaleIn, &out.DisableScaleIn + *out = new(bool) + **out = **in + } + if in.PredefinedMetricSpecification != nil { + in, out := &in.PredefinedMetricSpecification, &out.PredefinedMetricSpecification + *out = new(PredefinedMetricSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetValue != nil { + in, out := &in.TargetValue, &out.TargetValue + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetTrackingConfigurationParameters. +func (in *TargetTrackingConfigurationParameters) DeepCopy() *TargetTrackingConfigurationParameters { + if in == nil { + return nil + } + out := new(TargetTrackingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TotalLocalStorageGbInitParameters) DeepCopyInto(out *TotalLocalStorageGbInitParameters) { *out = *in diff --git a/apis/autoscaling/v1beta2/zz_generated.managed.go b/apis/autoscaling/v1beta2/zz_generated.managed.go index 9f1ad20531..66575799f5 100644 --- a/apis/autoscaling/v1beta2/zz_generated.managed.go +++ b/apis/autoscaling/v1beta2/zz_generated.managed.go @@ -126,3 +126,183 @@ func (mg *AutoscalingGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnect func (mg *AutoscalingGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { mg.Spec.WriteConnectionSecretToReference = r } + +// GetCondition of this GroupTag. +func (mg *GroupTag) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this GroupTag. +func (mg *GroupTag) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this GroupTag. +func (mg *GroupTag) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this GroupTag. +func (mg *GroupTag) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this GroupTag. +func (mg *GroupTag) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this GroupTag. +func (mg *GroupTag) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this GroupTag. +func (mg *GroupTag) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this GroupTag. +func (mg *GroupTag) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this GroupTag. +func (mg *GroupTag) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this GroupTag. +func (mg *GroupTag) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this GroupTag. +func (mg *GroupTag) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this GroupTag. +func (mg *GroupTag) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LaunchConfiguration. +func (mg *LaunchConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LaunchConfiguration. +func (mg *LaunchConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LaunchConfiguration. +func (mg *LaunchConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LaunchConfiguration. +func (mg *LaunchConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LaunchConfiguration. +func (mg *LaunchConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LaunchConfiguration. +func (mg *LaunchConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LaunchConfiguration. +func (mg *LaunchConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LaunchConfiguration. +func (mg *LaunchConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LaunchConfiguration. +func (mg *LaunchConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LaunchConfiguration. +func (mg *LaunchConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LaunchConfiguration. +func (mg *LaunchConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LaunchConfiguration. +func (mg *LaunchConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Policy. +func (mg *Policy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Policy. +func (mg *Policy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Policy. +func (mg *Policy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Policy. +func (mg *Policy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Policy. +func (mg *Policy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Policy. +func (mg *Policy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Policy. +func (mg *Policy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Policy. +func (mg *Policy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Policy. +func (mg *Policy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Policy. +func (mg *Policy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Policy. +func (mg *Policy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Policy. +func (mg *Policy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/autoscaling/v1beta2/zz_generated.managedlist.go b/apis/autoscaling/v1beta2/zz_generated.managedlist.go index 7346cbb648..fa0e2c74cc 100644 --- a/apis/autoscaling/v1beta2/zz_generated.managedlist.go +++ b/apis/autoscaling/v1beta2/zz_generated.managedlist.go @@ -24,3 +24,30 @@ func (l *AutoscalingGroupList) GetItems() []resource.Managed { } return items } + +// GetItems of this GroupTagList. +func (l *GroupTagList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LaunchConfigurationList. +func (l *LaunchConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this PolicyList. +func (l *PolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/autoscaling/v1beta2/zz_generated.resolvers.go b/apis/autoscaling/v1beta2/zz_generated.resolvers.go index 159bc3fa8f..a5b4f97cf4 100644 --- a/apis/autoscaling/v1beta2/zz_generated.resolvers.go +++ b/apis/autoscaling/v1beta2/zz_generated.resolvers.go @@ -27,7 +27,7 @@ func (mg *Attachment) ResolveReferences( // ResolveReferences of this Attachment var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("autoscaling.aws.upbound.io", "v1beta2", "AutoscalingGroup", "AutoscalingGroupList") + m, l, err = apisresolver.GetManagedResource("autoscaling.aws.upbound.io", "v1beta3", "AutoscalingGroup", "AutoscalingGroupList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -46,7 +46,7 @@ func (mg *Attachment) ResolveReferences( // ResolveReferences of this Attachment mg.Spec.ForProvider.AutoscalingGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.AutoscalingGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta1", "ELB", "ELBList") + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -65,7 +65,7 @@ func (mg *Attachment) ResolveReferences( // ResolveReferences of this Attachment mg.Spec.ForProvider.ELB = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ELBRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta1", "LBTargetGroup", "LBTargetGroupList") + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBTargetGroup", "LBTargetGroupList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -84,7 +84,7 @@ func (mg *Attachment) ResolveReferences( // ResolveReferences of this Attachment mg.Spec.ForProvider.LBTargetGroupArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.LBTargetGroupArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("autoscaling.aws.upbound.io", "v1beta2", "AutoscalingGroup", "AutoscalingGroupList") + m, l, err = apisresolver.GetManagedResource("autoscaling.aws.upbound.io", "v1beta3", "AutoscalingGroup", "AutoscalingGroupList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -103,7 +103,7 @@ func (mg *Attachment) ResolveReferences( // ResolveReferences of this Attachment mg.Spec.InitProvider.AutoscalingGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.AutoscalingGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta1", "ELB", "ELBList") + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -122,7 +122,7 @@ func (mg *Attachment) ResolveReferences( // ResolveReferences of this Attachment mg.Spec.InitProvider.ELB = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ELBRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta1", "LBTargetGroup", "LBTargetGroupList") + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBTargetGroup", "LBTargetGroupList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -454,3 +454,84 @@ func (mg *AutoscalingGroup) ResolveReferences(ctx context.Context, c client.Read return nil } + +// ResolveReferences of this GroupTag. +func (mg *GroupTag) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("autoscaling.aws.upbound.io", "v1beta3", "AutoscalingGroup", "AutoscalingGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AutoscalingGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AutoscalingGroupNameRef, + Selector: mg.Spec.ForProvider.AutoscalingGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AutoscalingGroupName") + } + mg.Spec.ForProvider.AutoscalingGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AutoscalingGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("autoscaling.aws.upbound.io", "v1beta3", "AutoscalingGroup", "AutoscalingGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AutoscalingGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.AutoscalingGroupNameRef, + Selector: mg.Spec.InitProvider.AutoscalingGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AutoscalingGroupName") + } + mg.Spec.InitProvider.AutoscalingGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AutoscalingGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Policy. +func (mg *Policy) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("autoscaling.aws.upbound.io", "v1beta3", "AutoscalingGroup", "AutoscalingGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AutoscalingGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AutoscalingGroupNameRef, + Selector: mg.Spec.ForProvider.AutoscalingGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AutoscalingGroupName") + } + mg.Spec.ForProvider.AutoscalingGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AutoscalingGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/autoscaling/v1beta2/zz_grouptag_terraformed.go b/apis/autoscaling/v1beta2/zz_grouptag_terraformed.go new file mode 100755 index 0000000000..88f21efa03 --- /dev/null +++ b/apis/autoscaling/v1beta2/zz_grouptag_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this GroupTag +func (mg *GroupTag) GetTerraformResourceType() string { + return "aws_autoscaling_group_tag" +} + +// GetConnectionDetailsMapping for this GroupTag +func (tr *GroupTag) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this GroupTag +func (tr *GroupTag) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this GroupTag +func (tr *GroupTag) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this GroupTag +func (tr *GroupTag) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this GroupTag +func (tr *GroupTag) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this GroupTag +func (tr *GroupTag) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this GroupTag +func (tr *GroupTag) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this GroupTag +func (tr *GroupTag) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this GroupTag using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *GroupTag) LateInitialize(attrs []byte) (bool, error) { + params := &GroupTagParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *GroupTag) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/autoscaling/v1beta2/zz_grouptag_types.go b/apis/autoscaling/v1beta2/zz_grouptag_types.go new file mode 100755 index 0000000000..2e7140592c --- /dev/null +++ b/apis/autoscaling/v1beta2/zz_grouptag_types.go @@ -0,0 +1,169 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type GroupTagInitParameters struct { + + // Name of the Autoscaling Group to apply the tag to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/autoscaling/v1beta3.AutoscalingGroup + AutoscalingGroupName *string `json:"autoscalingGroupName,omitempty" tf:"autoscaling_group_name,omitempty"` + + // Reference to a AutoscalingGroup in autoscaling to populate autoscalingGroupName. + // +kubebuilder:validation:Optional + AutoscalingGroupNameRef *v1.Reference `json:"autoscalingGroupNameRef,omitempty" tf:"-"` + + // Selector for a AutoscalingGroup in autoscaling to populate autoscalingGroupName. + // +kubebuilder:validation:Optional + AutoscalingGroupNameSelector *v1.Selector `json:"autoscalingGroupNameSelector,omitempty" tf:"-"` + + // Tag to create. The tag block is documented below. + Tag *GroupTagTagInitParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type GroupTagObservation struct { + + // Name of the Autoscaling Group to apply the tag to. + AutoscalingGroupName *string `json:"autoscalingGroupName,omitempty" tf:"autoscaling_group_name,omitempty"` + + // ASG name and key, separated by a comma (,) + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Tag to create. The tag block is documented below. + Tag *GroupTagTagObservation `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type GroupTagParameters struct { + + // Name of the Autoscaling Group to apply the tag to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/autoscaling/v1beta3.AutoscalingGroup + // +kubebuilder:validation:Optional + AutoscalingGroupName *string `json:"autoscalingGroupName,omitempty" tf:"autoscaling_group_name,omitempty"` + + // Reference to a AutoscalingGroup in autoscaling to populate autoscalingGroupName. + // +kubebuilder:validation:Optional + AutoscalingGroupNameRef *v1.Reference `json:"autoscalingGroupNameRef,omitempty" tf:"-"` + + // Selector for a AutoscalingGroup in autoscaling to populate autoscalingGroupName. + // +kubebuilder:validation:Optional + AutoscalingGroupNameSelector *v1.Selector `json:"autoscalingGroupNameSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Tag to create. The tag block is documented below. + // +kubebuilder:validation:Optional + Tag *GroupTagTagParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type GroupTagTagInitParameters struct { + + // Tag name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Whether to propagate the tags to instances launched by the ASG. + PropagateAtLaunch *bool `json:"propagateAtLaunch,omitempty" tf:"propagate_at_launch,omitempty"` + + // Tag value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type GroupTagTagObservation struct { + + // Tag name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Whether to propagate the tags to instances launched by the ASG. + PropagateAtLaunch *bool `json:"propagateAtLaunch,omitempty" tf:"propagate_at_launch,omitempty"` + + // Tag value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type GroupTagTagParameters struct { + + // Tag name. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Whether to propagate the tags to instances launched by the ASG. + // +kubebuilder:validation:Optional + PropagateAtLaunch *bool `json:"propagateAtLaunch" tf:"propagate_at_launch,omitempty"` + + // Tag value. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +// GroupTagSpec defines the desired state of GroupTag +type GroupTagSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GroupTagParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GroupTagInitParameters `json:"initProvider,omitempty"` +} + +// GroupTagStatus defines the observed state of GroupTag. +type GroupTagStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GroupTagObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// GroupTag is the Schema for the GroupTags API. Manages an individual Autoscaling Group tag +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type GroupTag struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.tag) || (has(self.initProvider) && has(self.initProvider.tag))",message="spec.forProvider.tag is a required parameter" + Spec GroupTagSpec `json:"spec"` + Status GroupTagStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GroupTagList contains a list of GroupTags +type GroupTagList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GroupTag `json:"items"` +} + +// Repository type metadata. +var ( + GroupTag_Kind = "GroupTag" + GroupTag_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: GroupTag_Kind}.String() + GroupTag_KindAPIVersion = GroupTag_Kind + "." + CRDGroupVersion.String() + GroupTag_GroupVersionKind = CRDGroupVersion.WithKind(GroupTag_Kind) +) + +func init() { + SchemeBuilder.Register(&GroupTag{}, &GroupTagList{}) +} diff --git a/apis/autoscaling/v1beta2/zz_launchconfiguration_terraformed.go b/apis/autoscaling/v1beta2/zz_launchconfiguration_terraformed.go new file mode 100755 index 0000000000..66b4f98295 --- /dev/null +++ b/apis/autoscaling/v1beta2/zz_launchconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LaunchConfiguration +func (mg *LaunchConfiguration) GetTerraformResourceType() string { + return "aws_launch_configuration" +} + +// GetConnectionDetailsMapping for this LaunchConfiguration +func (tr *LaunchConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LaunchConfiguration +func (tr *LaunchConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LaunchConfiguration +func (tr *LaunchConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LaunchConfiguration +func (tr *LaunchConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LaunchConfiguration +func (tr *LaunchConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LaunchConfiguration +func (tr *LaunchConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LaunchConfiguration +func (tr *LaunchConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LaunchConfiguration +func (tr *LaunchConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LaunchConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LaunchConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &LaunchConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LaunchConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/autoscaling/v1beta2/zz_launchconfiguration_types.go b/apis/autoscaling/v1beta2/zz_launchconfiguration_types.go new file mode 100755 index 0000000000..9d4b9fec58 --- /dev/null +++ b/apis/autoscaling/v1beta2/zz_launchconfiguration_types.go @@ -0,0 +1,514 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EBSBlockDeviceInitParameters struct { + + // Whether the volume should be destroyed + // on instance termination (Default: true). + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // The name of the device to mount. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Whether the volume should be encrypted or not. Defaults to false. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The amount of provisioned + // IOPS. + // This must be set with a volume_type of "io1". + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Whether the device in the block device mapping of the AMI is suppressed. + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // The Snapshot ID to mount. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // The throughput (MiBps) to provision for a gp3 volume. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // The size of the volume in gigabytes. + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // The type of volume. Can be standard, gp2, gp3, st1, sc1 or io1. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type EBSBlockDeviceObservation struct { + + // Whether the volume should be destroyed + // on instance termination (Default: true). + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // The name of the device to mount. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Whether the volume should be encrypted or not. Defaults to false. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The amount of provisioned + // IOPS. + // This must be set with a volume_type of "io1". + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Whether the device in the block device mapping of the AMI is suppressed. + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // The Snapshot ID to mount. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // The throughput (MiBps) to provision for a gp3 volume. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // The size of the volume in gigabytes. + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // The type of volume. Can be standard, gp2, gp3, st1, sc1 or io1. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type EBSBlockDeviceParameters struct { + + // Whether the volume should be destroyed + // on instance termination (Default: true). + // +kubebuilder:validation:Optional + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // The name of the device to mount. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName" tf:"device_name,omitempty"` + + // Whether the volume should be encrypted or not. Defaults to false. + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The amount of provisioned + // IOPS. + // This must be set with a volume_type of "io1". + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Whether the device in the block device mapping of the AMI is suppressed. + // +kubebuilder:validation:Optional + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // The Snapshot ID to mount. + // +kubebuilder:validation:Optional + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // The throughput (MiBps) to provision for a gp3 volume. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // The size of the volume in gigabytes. + // +kubebuilder:validation:Optional + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // The type of volume. Can be standard, gp2, gp3, st1, sc1 or io1. + // +kubebuilder:validation:Optional + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type EphemeralBlockDeviceInitParameters struct { + + // The name of the block device to mount on the instance. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Whether the device in the block device mapping of the AMI is suppressed. + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // The Instance Store Device Name. + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type EphemeralBlockDeviceObservation struct { + + // The name of the block device to mount on the instance. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Whether the device in the block device mapping of the AMI is suppressed. + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // The Instance Store Device Name. + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type EphemeralBlockDeviceParameters struct { + + // The name of the block device to mount on the instance. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName" tf:"device_name,omitempty"` + + // Whether the device in the block device mapping of the AMI is suppressed. + // +kubebuilder:validation:Optional + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // The Instance Store Device Name. + // +kubebuilder:validation:Optional + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type LaunchConfigurationInitParameters struct { + + // Associate a public ip address with an instance in a VPC. + AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty" tf:"associate_public_ip_address,omitempty"` + + // Additional EBS block devices to attach to the instance. See Block Devices below for details. + EBSBlockDevice []EBSBlockDeviceInitParameters `json:"ebsBlockDevice,omitempty" tf:"ebs_block_device,omitempty"` + + // If true, the launched EC2 instance will be EBS-optimized. + EBSOptimized *bool `json:"ebsOptimized,omitempty" tf:"ebs_optimized,omitempty"` + + // Enables/disables detailed monitoring. This is enabled by default. + EnableMonitoring *bool `json:"enableMonitoring,omitempty" tf:"enable_monitoring,omitempty"` + + // Customize Ephemeral (also known as "Instance Store") volumes on the instance. See Block Devices below for details. + EphemeralBlockDevice []EphemeralBlockDeviceInitParameters `json:"ephemeralBlockDevice,omitempty" tf:"ephemeral_block_device,omitempty"` + + // The name attribute of the IAM instance profile to associate with launched instances. + IAMInstanceProfile *string `json:"iamInstanceProfile,omitempty" tf:"iam_instance_profile,omitempty"` + + // The EC2 image ID to launch. + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // The size of instance to launch. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The key name that should be used for the instance. + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + // The metadata options for the instance. + MetadataOptions *MetadataOptionsInitParameters `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + // The tenancy of the instance. Valid values are default or dedicated, see AWS's Create Launch Configuration for more details. + PlacementTenancy *string `json:"placementTenancy,omitempty" tf:"placement_tenancy,omitempty"` + + // Customize details about the root block device of the instance. See Block Devices below for details. + RootBlockDevice *RootBlockDeviceInitParameters `json:"rootBlockDevice,omitempty" tf:"root_block_device,omitempty"` + + // A list of associated security group IDS. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The maximum price to use for reserving spot instances. + SpotPrice *string `json:"spotPrice,omitempty" tf:"spot_price,omitempty"` + + // The user data to provide when launching the instance. Do not pass gzip-compressed data via this argument; see user_data_base64 instead. + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Can be used instead of user_data to pass base64-encoded binary data directly. Use this instead of user_data whenever the value is not a valid UTF-8 string. For example, gzip-encoded user data must be base64-encoded and passed via this argument to avoid corruption. + UserDataBase64 *string `json:"userDataBase64,omitempty" tf:"user_data_base64,omitempty"` +} + +type LaunchConfigurationObservation struct { + + // The Amazon Resource Name of the launch configuration. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Associate a public ip address with an instance in a VPC. + AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty" tf:"associate_public_ip_address,omitempty"` + + // Additional EBS block devices to attach to the instance. See Block Devices below for details. + EBSBlockDevice []EBSBlockDeviceObservation `json:"ebsBlockDevice,omitempty" tf:"ebs_block_device,omitempty"` + + // If true, the launched EC2 instance will be EBS-optimized. + EBSOptimized *bool `json:"ebsOptimized,omitempty" tf:"ebs_optimized,omitempty"` + + // Enables/disables detailed monitoring. This is enabled by default. + EnableMonitoring *bool `json:"enableMonitoring,omitempty" tf:"enable_monitoring,omitempty"` + + // Customize Ephemeral (also known as "Instance Store") volumes on the instance. See Block Devices below for details. + EphemeralBlockDevice []EphemeralBlockDeviceObservation `json:"ephemeralBlockDevice,omitempty" tf:"ephemeral_block_device,omitempty"` + + // The name attribute of the IAM instance profile to associate with launched instances. + IAMInstanceProfile *string `json:"iamInstanceProfile,omitempty" tf:"iam_instance_profile,omitempty"` + + // The ID of the launch configuration. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The EC2 image ID to launch. + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // The size of instance to launch. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The key name that should be used for the instance. + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + // The metadata options for the instance. + MetadataOptions *MetadataOptionsObservation `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + // The tenancy of the instance. Valid values are default or dedicated, see AWS's Create Launch Configuration for more details. + PlacementTenancy *string `json:"placementTenancy,omitempty" tf:"placement_tenancy,omitempty"` + + // Customize details about the root block device of the instance. See Block Devices below for details. + RootBlockDevice *RootBlockDeviceObservation `json:"rootBlockDevice,omitempty" tf:"root_block_device,omitempty"` + + // A list of associated security group IDS. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The maximum price to use for reserving spot instances. + SpotPrice *string `json:"spotPrice,omitempty" tf:"spot_price,omitempty"` + + // The user data to provide when launching the instance. Do not pass gzip-compressed data via this argument; see user_data_base64 instead. + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Can be used instead of user_data to pass base64-encoded binary data directly. Use this instead of user_data whenever the value is not a valid UTF-8 string. For example, gzip-encoded user data must be base64-encoded and passed via this argument to avoid corruption. + UserDataBase64 *string `json:"userDataBase64,omitempty" tf:"user_data_base64,omitempty"` +} + +type LaunchConfigurationParameters struct { + + // Associate a public ip address with an instance in a VPC. + // +kubebuilder:validation:Optional + AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty" tf:"associate_public_ip_address,omitempty"` + + // Additional EBS block devices to attach to the instance. See Block Devices below for details. + // +kubebuilder:validation:Optional + EBSBlockDevice []EBSBlockDeviceParameters `json:"ebsBlockDevice,omitempty" tf:"ebs_block_device,omitempty"` + + // If true, the launched EC2 instance will be EBS-optimized. + // +kubebuilder:validation:Optional + EBSOptimized *bool `json:"ebsOptimized,omitempty" tf:"ebs_optimized,omitempty"` + + // Enables/disables detailed monitoring. This is enabled by default. + // +kubebuilder:validation:Optional + EnableMonitoring *bool `json:"enableMonitoring,omitempty" tf:"enable_monitoring,omitempty"` + + // Customize Ephemeral (also known as "Instance Store") volumes on the instance. See Block Devices below for details. + // +kubebuilder:validation:Optional + EphemeralBlockDevice []EphemeralBlockDeviceParameters `json:"ephemeralBlockDevice,omitempty" tf:"ephemeral_block_device,omitempty"` + + // The name attribute of the IAM instance profile to associate with launched instances. + // +kubebuilder:validation:Optional + IAMInstanceProfile *string `json:"iamInstanceProfile,omitempty" tf:"iam_instance_profile,omitempty"` + + // The EC2 image ID to launch. + // +kubebuilder:validation:Optional + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // The size of instance to launch. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The key name that should be used for the instance. + // +kubebuilder:validation:Optional + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + // The metadata options for the instance. + // +kubebuilder:validation:Optional + MetadataOptions *MetadataOptionsParameters `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + // The tenancy of the instance. Valid values are default or dedicated, see AWS's Create Launch Configuration for more details. + // +kubebuilder:validation:Optional + PlacementTenancy *string `json:"placementTenancy,omitempty" tf:"placement_tenancy,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Customize details about the root block device of the instance. See Block Devices below for details. + // +kubebuilder:validation:Optional + RootBlockDevice *RootBlockDeviceParameters `json:"rootBlockDevice,omitempty" tf:"root_block_device,omitempty"` + + // A list of associated security group IDS. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The maximum price to use for reserving spot instances. + // +kubebuilder:validation:Optional + SpotPrice *string `json:"spotPrice,omitempty" tf:"spot_price,omitempty"` + + // The user data to provide when launching the instance. Do not pass gzip-compressed data via this argument; see user_data_base64 instead. + // +kubebuilder:validation:Optional + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Can be used instead of user_data to pass base64-encoded binary data directly. Use this instead of user_data whenever the value is not a valid UTF-8 string. For example, gzip-encoded user data must be base64-encoded and passed via this argument to avoid corruption. + // +kubebuilder:validation:Optional + UserDataBase64 *string `json:"userDataBase64,omitempty" tf:"user_data_base64,omitempty"` +} + +type MetadataOptionsInitParameters struct { + + // The state of the metadata service: enabled, disabled. + HTTPEndpoint *string `json:"httpEndpoint,omitempty" tf:"http_endpoint,omitempty"` + + // The desired HTTP PUT response hop limit for instance metadata requests. + HTTPPutResponseHopLimit *float64 `json:"httpPutResponseHopLimit,omitempty" tf:"http_put_response_hop_limit,omitempty"` + + // If session tokens are required: optional, required. + HTTPTokens *string `json:"httpTokens,omitempty" tf:"http_tokens,omitempty"` +} + +type MetadataOptionsObservation struct { + + // The state of the metadata service: enabled, disabled. + HTTPEndpoint *string `json:"httpEndpoint,omitempty" tf:"http_endpoint,omitempty"` + + // The desired HTTP PUT response hop limit for instance metadata requests. + HTTPPutResponseHopLimit *float64 `json:"httpPutResponseHopLimit,omitempty" tf:"http_put_response_hop_limit,omitempty"` + + // If session tokens are required: optional, required. + HTTPTokens *string `json:"httpTokens,omitempty" tf:"http_tokens,omitempty"` +} + +type MetadataOptionsParameters struct { + + // The state of the metadata service: enabled, disabled. + // +kubebuilder:validation:Optional + HTTPEndpoint *string `json:"httpEndpoint,omitempty" tf:"http_endpoint,omitempty"` + + // The desired HTTP PUT response hop limit for instance metadata requests. + // +kubebuilder:validation:Optional + HTTPPutResponseHopLimit *float64 `json:"httpPutResponseHopLimit,omitempty" tf:"http_put_response_hop_limit,omitempty"` + + // If session tokens are required: optional, required. + // +kubebuilder:validation:Optional + HTTPTokens *string `json:"httpTokens,omitempty" tf:"http_tokens,omitempty"` +} + +type RootBlockDeviceInitParameters struct { + + // Whether the volume should be destroyed on instance termination. Defaults to true. + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Whether the volume should be encrypted or not. Defaults to false. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The amount of provisioned IOPS. This must be set with a volume_type of io1. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The throughput (MiBps) to provision for a gp3 volume. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // The size of the volume in gigabytes. + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // The type of volume. Can be standard, gp2, gp3, st1, sc1 or io1. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type RootBlockDeviceObservation struct { + + // Whether the volume should be destroyed on instance termination. Defaults to true. + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Whether the volume should be encrypted or not. Defaults to false. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The amount of provisioned IOPS. This must be set with a volume_type of io1. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The throughput (MiBps) to provision for a gp3 volume. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // The size of the volume in gigabytes. + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // The type of volume. Can be standard, gp2, gp3, st1, sc1 or io1. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type RootBlockDeviceParameters struct { + + // Whether the volume should be destroyed on instance termination. Defaults to true. + // +kubebuilder:validation:Optional + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Whether the volume should be encrypted or not. Defaults to false. + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The amount of provisioned IOPS. This must be set with a volume_type of io1. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The throughput (MiBps) to provision for a gp3 volume. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // The size of the volume in gigabytes. + // +kubebuilder:validation:Optional + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // The type of volume. Can be standard, gp2, gp3, st1, sc1 or io1. + // +kubebuilder:validation:Optional + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +// LaunchConfigurationSpec defines the desired state of LaunchConfiguration +type LaunchConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LaunchConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LaunchConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// LaunchConfigurationStatus defines the observed state of LaunchConfiguration. +type LaunchConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LaunchConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LaunchConfiguration is the Schema for the LaunchConfigurations API. Provides a resource to create a new launch configuration, used for autoscaling groups. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type LaunchConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.imageId) || (has(self.initProvider) && has(self.initProvider.imageId))",message="spec.forProvider.imageId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.instanceType) || (has(self.initProvider) && has(self.initProvider.instanceType))",message="spec.forProvider.instanceType is a required parameter" + Spec LaunchConfigurationSpec `json:"spec"` + Status LaunchConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LaunchConfigurationList contains a list of LaunchConfigurations +type LaunchConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LaunchConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + LaunchConfiguration_Kind = "LaunchConfiguration" + LaunchConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LaunchConfiguration_Kind}.String() + LaunchConfiguration_KindAPIVersion = LaunchConfiguration_Kind + "." + CRDGroupVersion.String() + LaunchConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(LaunchConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&LaunchConfiguration{}, &LaunchConfigurationList{}) +} diff --git a/apis/autoscaling/v1beta2/zz_policy_terraformed.go b/apis/autoscaling/v1beta2/zz_policy_terraformed.go new file mode 100755 index 0000000000..a1de437347 --- /dev/null +++ b/apis/autoscaling/v1beta2/zz_policy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Policy +func (mg *Policy) GetTerraformResourceType() string { + return "aws_autoscaling_policy" +} + +// GetConnectionDetailsMapping for this Policy +func (tr *Policy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Policy +func (tr *Policy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Policy +func (tr *Policy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Policy +func (tr *Policy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Policy +func (tr *Policy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Policy +func (tr *Policy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Policy +func (tr *Policy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Policy +func (tr *Policy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Policy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Policy) LateInitialize(attrs []byte) (bool, error) { + params := &PolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Policy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/autoscaling/v1beta2/zz_policy_types.go b/apis/autoscaling/v1beta2/zz_policy_types.go new file mode 100755 index 0000000000..fab2859836 --- /dev/null +++ b/apis/autoscaling/v1beta2/zz_policy_types.go @@ -0,0 +1,1399 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomizedCapacityMetricSpecificationInitParameters struct { + + // List of up to 10 structures that defines custom capacity metric in predictive scaling policy + MetricDataQueries []MetricDataQueriesInitParameters `json:"metricDataQueries,omitempty" tf:"metric_data_queries,omitempty"` +} + +type CustomizedCapacityMetricSpecificationObservation struct { + + // List of up to 10 structures that defines custom capacity metric in predictive scaling policy + MetricDataQueries []MetricDataQueriesObservation `json:"metricDataQueries,omitempty" tf:"metric_data_queries,omitempty"` +} + +type CustomizedCapacityMetricSpecificationParameters struct { + + // List of up to 10 structures that defines custom capacity metric in predictive scaling policy + // +kubebuilder:validation:Optional + MetricDataQueries []MetricDataQueriesParameters `json:"metricDataQueries" tf:"metric_data_queries,omitempty"` +} + +type CustomizedLoadMetricSpecificationInitParameters struct { + + // List of up to 10 structures that defines custom load metric in predictive scaling policy + MetricDataQueries []CustomizedLoadMetricSpecificationMetricDataQueriesInitParameters `json:"metricDataQueries,omitempty" tf:"metric_data_queries,omitempty"` +} + +type CustomizedLoadMetricSpecificationMetricDataQueriesInitParameters struct { + + // Math expression used on the returned metric. You must specify either expression or metric_stat, but not both. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Short name for the metric used in predictive scaling policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Human-readable label for this metric or expression. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Structure that defines CloudWatch metric to be used in predictive scaling policy. You must specify either expression or metric_stat, but not both. + MetricStat *MetricDataQueriesMetricStatInitParameters `json:"metricStat,omitempty" tf:"metric_stat,omitempty"` + + // Boolean that indicates whether to return the timestamps and raw data values of this metric, the default is true + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +type CustomizedLoadMetricSpecificationMetricDataQueriesObservation struct { + + // Math expression used on the returned metric. You must specify either expression or metric_stat, but not both. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Short name for the metric used in predictive scaling policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Human-readable label for this metric or expression. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Structure that defines CloudWatch metric to be used in predictive scaling policy. You must specify either expression or metric_stat, but not both. + MetricStat *MetricDataQueriesMetricStatObservation `json:"metricStat,omitempty" tf:"metric_stat,omitempty"` + + // Boolean that indicates whether to return the timestamps and raw data values of this metric, the default is true + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +type CustomizedLoadMetricSpecificationMetricDataQueriesParameters struct { + + // Math expression used on the returned metric. You must specify either expression or metric_stat, but not both. + // +kubebuilder:validation:Optional + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Short name for the metric used in predictive scaling policy. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // Human-readable label for this metric or expression. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Structure that defines CloudWatch metric to be used in predictive scaling policy. You must specify either expression or metric_stat, but not both. + // +kubebuilder:validation:Optional + MetricStat *MetricDataQueriesMetricStatParameters `json:"metricStat,omitempty" tf:"metric_stat,omitempty"` + + // Boolean that indicates whether to return the timestamps and raw data values of this metric, the default is true + // +kubebuilder:validation:Optional + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +type CustomizedLoadMetricSpecificationObservation struct { + + // List of up to 10 structures that defines custom load metric in predictive scaling policy + MetricDataQueries []CustomizedLoadMetricSpecificationMetricDataQueriesObservation `json:"metricDataQueries,omitempty" tf:"metric_data_queries,omitempty"` +} + +type CustomizedLoadMetricSpecificationParameters struct { + + // List of up to 10 structures that defines custom load metric in predictive scaling policy + // +kubebuilder:validation:Optional + MetricDataQueries []CustomizedLoadMetricSpecificationMetricDataQueriesParameters `json:"metricDataQueries" tf:"metric_data_queries,omitempty"` +} + +type CustomizedMetricSpecificationInitParameters struct { + + // Dimensions of the metric. + MetricDimension []MetricDimensionInitParameters `json:"metricDimension,omitempty" tf:"metric_dimension,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Metrics to include, as a metric data query. + Metrics []MetricsInitParameters `json:"metrics,omitempty" tf:"metrics,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // Statistic of the metric. + Statistic *string `json:"statistic,omitempty" tf:"statistic,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type CustomizedMetricSpecificationObservation struct { + + // Dimensions of the metric. + MetricDimension []MetricDimensionObservation `json:"metricDimension,omitempty" tf:"metric_dimension,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Metrics to include, as a metric data query. + Metrics []MetricsObservation `json:"metrics,omitempty" tf:"metrics,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // Statistic of the metric. + Statistic *string `json:"statistic,omitempty" tf:"statistic,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type CustomizedMetricSpecificationParameters struct { + + // Dimensions of the metric. + // +kubebuilder:validation:Optional + MetricDimension []MetricDimensionParameters `json:"metricDimension,omitempty" tf:"metric_dimension,omitempty"` + + // Name of the metric. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Metrics to include, as a metric data query. + // +kubebuilder:validation:Optional + Metrics []MetricsParameters `json:"metrics,omitempty" tf:"metrics,omitempty"` + + // Namespace of the metric. + // +kubebuilder:validation:Optional + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // Statistic of the metric. + // +kubebuilder:validation:Optional + Statistic *string `json:"statistic,omitempty" tf:"statistic,omitempty"` + + // Unit of the metric. + // +kubebuilder:validation:Optional + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type CustomizedScalingMetricSpecificationInitParameters struct { + + // List of up to 10 structures that defines custom scaling metric in predictive scaling policy + MetricDataQueries []CustomizedScalingMetricSpecificationMetricDataQueriesInitParameters `json:"metricDataQueries,omitempty" tf:"metric_data_queries,omitempty"` +} + +type CustomizedScalingMetricSpecificationMetricDataQueriesInitParameters struct { + + // Math expression used on the returned metric. You must specify either expression or metric_stat, but not both. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Short name for the metric used in predictive scaling policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Human-readable label for this metric or expression. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Structure that defines CloudWatch metric to be used in predictive scaling policy. You must specify either expression or metric_stat, but not both. + MetricStat *CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatInitParameters `json:"metricStat,omitempty" tf:"metric_stat,omitempty"` + + // Boolean that indicates whether to return the timestamps and raw data values of this metric, the default is true + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +type CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatInitParameters struct { + + // Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. + Metric *MetricDataQueriesMetricStatMetricInitParameters `json:"metric,omitempty" tf:"metric,omitempty"` + + // Statistic of the metrics to return. + Stat *string `json:"stat,omitempty" tf:"stat,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatObservation struct { + + // Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. + Metric *MetricDataQueriesMetricStatMetricObservation `json:"metric,omitempty" tf:"metric,omitempty"` + + // Statistic of the metrics to return. + Stat *string `json:"stat,omitempty" tf:"stat,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatParameters struct { + + // Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. + // +kubebuilder:validation:Optional + Metric *MetricDataQueriesMetricStatMetricParameters `json:"metric" tf:"metric,omitempty"` + + // Statistic of the metrics to return. + // +kubebuilder:validation:Optional + Stat *string `json:"stat" tf:"stat,omitempty"` + + // Unit of the metric. + // +kubebuilder:validation:Optional + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type CustomizedScalingMetricSpecificationMetricDataQueriesObservation struct { + + // Math expression used on the returned metric. You must specify either expression or metric_stat, but not both. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Short name for the metric used in predictive scaling policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Human-readable label for this metric or expression. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Structure that defines CloudWatch metric to be used in predictive scaling policy. You must specify either expression or metric_stat, but not both. + MetricStat *CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatObservation `json:"metricStat,omitempty" tf:"metric_stat,omitempty"` + + // Boolean that indicates whether to return the timestamps and raw data values of this metric, the default is true + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +type CustomizedScalingMetricSpecificationMetricDataQueriesParameters struct { + + // Math expression used on the returned metric. You must specify either expression or metric_stat, but not both. + // +kubebuilder:validation:Optional + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Short name for the metric used in predictive scaling policy. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // Human-readable label for this metric or expression. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Structure that defines CloudWatch metric to be used in predictive scaling policy. You must specify either expression or metric_stat, but not both. + // +kubebuilder:validation:Optional + MetricStat *CustomizedScalingMetricSpecificationMetricDataQueriesMetricStatParameters `json:"metricStat,omitempty" tf:"metric_stat,omitempty"` + + // Boolean that indicates whether to return the timestamps and raw data values of this metric, the default is true + // +kubebuilder:validation:Optional + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +type CustomizedScalingMetricSpecificationObservation struct { + + // List of up to 10 structures that defines custom scaling metric in predictive scaling policy + MetricDataQueries []CustomizedScalingMetricSpecificationMetricDataQueriesObservation `json:"metricDataQueries,omitempty" tf:"metric_data_queries,omitempty"` +} + +type CustomizedScalingMetricSpecificationParameters struct { + + // List of up to 10 structures that defines custom scaling metric in predictive scaling policy + // +kubebuilder:validation:Optional + MetricDataQueries []CustomizedScalingMetricSpecificationMetricDataQueriesParameters `json:"metricDataQueries" tf:"metric_data_queries,omitempty"` +} + +type DimensionsInitParameters struct { + + // Name of the policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of the dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DimensionsObservation struct { + + // Name of the policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of the dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DimensionsParameters struct { + + // Name of the policy. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Value of the dimension. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type MetricDataQueriesInitParameters struct { + + // Math expression used on the returned metric. You must specify either expression or metric_stat, but not both. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Short name for the metric used in predictive scaling policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Human-readable label for this metric or expression. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Structure that defines CloudWatch metric to be used in predictive scaling policy. You must specify either expression or metric_stat, but not both. + MetricStat *MetricStatInitParameters `json:"metricStat,omitempty" tf:"metric_stat,omitempty"` + + // Boolean that indicates whether to return the timestamps and raw data values of this metric, the default is true + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +type MetricDataQueriesMetricStatInitParameters struct { + + // Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. + Metric *MetricStatMetricInitParameters `json:"metric,omitempty" tf:"metric,omitempty"` + + // Statistic of the metrics to return. + Stat *string `json:"stat,omitempty" tf:"stat,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricDataQueriesMetricStatMetricInitParameters struct { + + // Dimensions of the metric. + Dimensions []MetricStatMetricDimensionsInitParameters `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` +} + +type MetricDataQueriesMetricStatMetricObservation struct { + + // Dimensions of the metric. + Dimensions []MetricStatMetricDimensionsObservation `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` +} + +type MetricDataQueriesMetricStatMetricParameters struct { + + // Dimensions of the metric. + // +kubebuilder:validation:Optional + Dimensions []MetricStatMetricDimensionsParameters `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName" tf:"metric_name,omitempty"` + + // Namespace of the metric. + // +kubebuilder:validation:Optional + Namespace *string `json:"namespace" tf:"namespace,omitempty"` +} + +type MetricDataQueriesMetricStatObservation struct { + + // Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. + Metric *MetricStatMetricObservation `json:"metric,omitempty" tf:"metric,omitempty"` + + // Statistic of the metrics to return. + Stat *string `json:"stat,omitempty" tf:"stat,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricDataQueriesMetricStatParameters struct { + + // Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. + // +kubebuilder:validation:Optional + Metric *MetricStatMetricParameters `json:"metric" tf:"metric,omitempty"` + + // Statistic of the metrics to return. + // +kubebuilder:validation:Optional + Stat *string `json:"stat" tf:"stat,omitempty"` + + // Unit of the metric. + // +kubebuilder:validation:Optional + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricDataQueriesObservation struct { + + // Math expression used on the returned metric. You must specify either expression or metric_stat, but not both. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Short name for the metric used in predictive scaling policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Human-readable label for this metric or expression. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Structure that defines CloudWatch metric to be used in predictive scaling policy. You must specify either expression or metric_stat, but not both. + MetricStat *MetricStatObservation `json:"metricStat,omitempty" tf:"metric_stat,omitempty"` + + // Boolean that indicates whether to return the timestamps and raw data values of this metric, the default is true + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +type MetricDataQueriesParameters struct { + + // Math expression used on the returned metric. You must specify either expression or metric_stat, but not both. + // +kubebuilder:validation:Optional + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Short name for the metric used in predictive scaling policy. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // Human-readable label for this metric or expression. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Structure that defines CloudWatch metric to be used in predictive scaling policy. You must specify either expression or metric_stat, but not both. + // +kubebuilder:validation:Optional + MetricStat *MetricStatParameters `json:"metricStat,omitempty" tf:"metric_stat,omitempty"` + + // Boolean that indicates whether to return the timestamps and raw data values of this metric, the default is true + // +kubebuilder:validation:Optional + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +type MetricDimensionInitParameters struct { + + // Name of the policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of the dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MetricDimensionObservation struct { + + // Name of the policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of the dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MetricDimensionParameters struct { + + // Name of the policy. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Value of the dimension. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type MetricDimensionsInitParameters struct { + + // Name of the policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of the dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MetricDimensionsObservation struct { + + // Name of the policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of the dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MetricDimensionsParameters struct { + + // Name of the policy. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Value of the dimension. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type MetricInitParameters struct { + + // Dimensions of the metric. + Dimensions []DimensionsInitParameters `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` +} + +type MetricObservation struct { + + // Dimensions of the metric. + Dimensions []DimensionsObservation `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` +} + +type MetricParameters struct { + + // Dimensions of the metric. + // +kubebuilder:validation:Optional + Dimensions []DimensionsParameters `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName" tf:"metric_name,omitempty"` + + // Namespace of the metric. + // +kubebuilder:validation:Optional + Namespace *string `json:"namespace" tf:"namespace,omitempty"` +} + +type MetricSpecificationInitParameters struct { + + // Customized capacity metric specification. The field is only valid when you use customized_load_metric_specification + CustomizedCapacityMetricSpecification *CustomizedCapacityMetricSpecificationInitParameters `json:"customizedCapacityMetricSpecification,omitempty" tf:"customized_capacity_metric_specification,omitempty"` + + // Customized load metric specification. + CustomizedLoadMetricSpecification *CustomizedLoadMetricSpecificationInitParameters `json:"customizedLoadMetricSpecification,omitempty" tf:"customized_load_metric_specification,omitempty"` + + // Customized scaling metric specification. + CustomizedScalingMetricSpecification *CustomizedScalingMetricSpecificationInitParameters `json:"customizedScalingMetricSpecification,omitempty" tf:"customized_scaling_metric_specification,omitempty"` + + // Predefined load metric specification. + PredefinedLoadMetricSpecification *PredefinedLoadMetricSpecificationInitParameters `json:"predefinedLoadMetricSpecification,omitempty" tf:"predefined_load_metric_specification,omitempty"` + + // Metric pair specification from which Amazon EC2 Auto Scaling determines the appropriate scaling metric and load metric to use. + PredefinedMetricPairSpecification *PredefinedMetricPairSpecificationInitParameters `json:"predefinedMetricPairSpecification,omitempty" tf:"predefined_metric_pair_specification,omitempty"` + + // Predefined scaling metric specification. + PredefinedScalingMetricSpecification *PredefinedScalingMetricSpecificationInitParameters `json:"predefinedScalingMetricSpecification,omitempty" tf:"predefined_scaling_metric_specification,omitempty"` + + // Target value for the metric. + TargetValue *float64 `json:"targetValue,omitempty" tf:"target_value,omitempty"` +} + +type MetricSpecificationObservation struct { + + // Customized capacity metric specification. The field is only valid when you use customized_load_metric_specification + CustomizedCapacityMetricSpecification *CustomizedCapacityMetricSpecificationObservation `json:"customizedCapacityMetricSpecification,omitempty" tf:"customized_capacity_metric_specification,omitempty"` + + // Customized load metric specification. + CustomizedLoadMetricSpecification *CustomizedLoadMetricSpecificationObservation `json:"customizedLoadMetricSpecification,omitempty" tf:"customized_load_metric_specification,omitempty"` + + // Customized scaling metric specification. + CustomizedScalingMetricSpecification *CustomizedScalingMetricSpecificationObservation `json:"customizedScalingMetricSpecification,omitempty" tf:"customized_scaling_metric_specification,omitempty"` + + // Predefined load metric specification. + PredefinedLoadMetricSpecification *PredefinedLoadMetricSpecificationObservation `json:"predefinedLoadMetricSpecification,omitempty" tf:"predefined_load_metric_specification,omitempty"` + + // Metric pair specification from which Amazon EC2 Auto Scaling determines the appropriate scaling metric and load metric to use. + PredefinedMetricPairSpecification *PredefinedMetricPairSpecificationObservation `json:"predefinedMetricPairSpecification,omitempty" tf:"predefined_metric_pair_specification,omitempty"` + + // Predefined scaling metric specification. + PredefinedScalingMetricSpecification *PredefinedScalingMetricSpecificationObservation `json:"predefinedScalingMetricSpecification,omitempty" tf:"predefined_scaling_metric_specification,omitempty"` + + // Target value for the metric. + TargetValue *float64 `json:"targetValue,omitempty" tf:"target_value,omitempty"` +} + +type MetricSpecificationParameters struct { + + // Customized capacity metric specification. The field is only valid when you use customized_load_metric_specification + // +kubebuilder:validation:Optional + CustomizedCapacityMetricSpecification *CustomizedCapacityMetricSpecificationParameters `json:"customizedCapacityMetricSpecification,omitempty" tf:"customized_capacity_metric_specification,omitempty"` + + // Customized load metric specification. + // +kubebuilder:validation:Optional + CustomizedLoadMetricSpecification *CustomizedLoadMetricSpecificationParameters `json:"customizedLoadMetricSpecification,omitempty" tf:"customized_load_metric_specification,omitempty"` + + // Customized scaling metric specification. + // +kubebuilder:validation:Optional + CustomizedScalingMetricSpecification *CustomizedScalingMetricSpecificationParameters `json:"customizedScalingMetricSpecification,omitempty" tf:"customized_scaling_metric_specification,omitempty"` + + // Predefined load metric specification. + // +kubebuilder:validation:Optional + PredefinedLoadMetricSpecification *PredefinedLoadMetricSpecificationParameters `json:"predefinedLoadMetricSpecification,omitempty" tf:"predefined_load_metric_specification,omitempty"` + + // Metric pair specification from which Amazon EC2 Auto Scaling determines the appropriate scaling metric and load metric to use. + // +kubebuilder:validation:Optional + PredefinedMetricPairSpecification *PredefinedMetricPairSpecificationParameters `json:"predefinedMetricPairSpecification,omitempty" tf:"predefined_metric_pair_specification,omitempty"` + + // Predefined scaling metric specification. + // +kubebuilder:validation:Optional + PredefinedScalingMetricSpecification *PredefinedScalingMetricSpecificationParameters `json:"predefinedScalingMetricSpecification,omitempty" tf:"predefined_scaling_metric_specification,omitempty"` + + // Target value for the metric. + // +kubebuilder:validation:Optional + TargetValue *float64 `json:"targetValue" tf:"target_value,omitempty"` +} + +type MetricStatInitParameters struct { + + // Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. + Metric *MetricInitParameters `json:"metric,omitempty" tf:"metric,omitempty"` + + // Statistic of the metrics to return. + Stat *string `json:"stat,omitempty" tf:"stat,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricStatMetricDimensionsInitParameters struct { + + // Name of the policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of the dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MetricStatMetricDimensionsObservation struct { + + // Name of the policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of the dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MetricStatMetricDimensionsParameters struct { + + // Name of the policy. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Value of the dimension. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type MetricStatMetricInitParameters struct { + + // Dimensions of the metric. + Dimensions []MetricDimensionsInitParameters `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` +} + +type MetricStatMetricObservation struct { + + // Dimensions of the metric. + Dimensions []MetricDimensionsObservation `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` +} + +type MetricStatMetricParameters struct { + + // Dimensions of the metric. + // +kubebuilder:validation:Optional + Dimensions []MetricDimensionsParameters `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName" tf:"metric_name,omitempty"` + + // Namespace of the metric. + // +kubebuilder:validation:Optional + Namespace *string `json:"namespace" tf:"namespace,omitempty"` +} + +type MetricStatObservation struct { + + // Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. + Metric *MetricObservation `json:"metric,omitempty" tf:"metric,omitempty"` + + // Statistic of the metrics to return. + Stat *string `json:"stat,omitempty" tf:"stat,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricStatParameters struct { + + // Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. + // +kubebuilder:validation:Optional + Metric *MetricParameters `json:"metric" tf:"metric,omitempty"` + + // Statistic of the metrics to return. + // +kubebuilder:validation:Optional + Stat *string `json:"stat" tf:"stat,omitempty"` + + // Unit of the metric. + // +kubebuilder:validation:Optional + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricsInitParameters struct { + + // Math expression used on the returned metric. You must specify either expression or metric_stat, but not both. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Short name for the metric used in predictive scaling policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Human-readable label for this metric or expression. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Structure that defines CloudWatch metric to be used in predictive scaling policy. You must specify either expression or metric_stat, but not both. + MetricStat *MetricsMetricStatInitParameters `json:"metricStat,omitempty" tf:"metric_stat,omitempty"` + + // Boolean that indicates whether to return the timestamps and raw data values of this metric, the default is true + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +type MetricsMetricStatInitParameters struct { + + // Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. + Metric *MetricsMetricStatMetricInitParameters `json:"metric,omitempty" tf:"metric,omitempty"` + + // Statistic of the metrics to return. + Stat *string `json:"stat,omitempty" tf:"stat,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricsMetricStatMetricDimensionsInitParameters struct { + + // Name of the policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of the dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MetricsMetricStatMetricDimensionsObservation struct { + + // Name of the policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of the dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MetricsMetricStatMetricDimensionsParameters struct { + + // Name of the policy. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Value of the dimension. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type MetricsMetricStatMetricInitParameters struct { + + // Dimensions of the metric. + Dimensions []MetricsMetricStatMetricDimensionsInitParameters `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` +} + +type MetricsMetricStatMetricObservation struct { + + // Dimensions of the metric. + Dimensions []MetricsMetricStatMetricDimensionsObservation `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` +} + +type MetricsMetricStatMetricParameters struct { + + // Dimensions of the metric. + // +kubebuilder:validation:Optional + Dimensions []MetricsMetricStatMetricDimensionsParameters `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName" tf:"metric_name,omitempty"` + + // Namespace of the metric. + // +kubebuilder:validation:Optional + Namespace *string `json:"namespace" tf:"namespace,omitempty"` +} + +type MetricsMetricStatObservation struct { + + // Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. + Metric *MetricsMetricStatMetricObservation `json:"metric,omitempty" tf:"metric,omitempty"` + + // Statistic of the metrics to return. + Stat *string `json:"stat,omitempty" tf:"stat,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricsMetricStatParameters struct { + + // Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. + // +kubebuilder:validation:Optional + Metric *MetricsMetricStatMetricParameters `json:"metric" tf:"metric,omitempty"` + + // Statistic of the metrics to return. + // +kubebuilder:validation:Optional + Stat *string `json:"stat" tf:"stat,omitempty"` + + // Unit of the metric. + // +kubebuilder:validation:Optional + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricsObservation struct { + + // Math expression used on the returned metric. You must specify either expression or metric_stat, but not both. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Short name for the metric used in predictive scaling policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Human-readable label for this metric or expression. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Structure that defines CloudWatch metric to be used in predictive scaling policy. You must specify either expression or metric_stat, but not both. + MetricStat *MetricsMetricStatObservation `json:"metricStat,omitempty" tf:"metric_stat,omitempty"` + + // Boolean that indicates whether to return the timestamps and raw data values of this metric, the default is true + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +type MetricsParameters struct { + + // Math expression used on the returned metric. You must specify either expression or metric_stat, but not both. + // +kubebuilder:validation:Optional + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Short name for the metric used in predictive scaling policy. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // Human-readable label for this metric or expression. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Structure that defines CloudWatch metric to be used in predictive scaling policy. You must specify either expression or metric_stat, but not both. + // +kubebuilder:validation:Optional + MetricStat *MetricsMetricStatParameters `json:"metricStat,omitempty" tf:"metric_stat,omitempty"` + + // Boolean that indicates whether to return the timestamps and raw data values of this metric, the default is true + // +kubebuilder:validation:Optional + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +type PolicyInitParameters struct { + + // Whether the adjustment is an absolute number or a percentage of the current capacity. Valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity. + AdjustmentType *string `json:"adjustmentType,omitempty" tf:"adjustment_type,omitempty"` + + // Amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. + Cooldown *float64 `json:"cooldown,omitempty" tf:"cooldown,omitempty"` + + // Whether the scaling policy is enabled or disabled. Default: true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Estimated time, in seconds, until a newly launched instance will contribute CloudWatch metrics. Without a value, AWS will default to the group's specified cooldown period. + EstimatedInstanceWarmup *float64 `json:"estimatedInstanceWarmup,omitempty" tf:"estimated_instance_warmup,omitempty"` + + // Aggregation type for the policy's metrics. Valid values are "Minimum", "Maximum", and "Average". Without a value, AWS will treat the aggregation type as "Average". + MetricAggregationType *string `json:"metricAggregationType,omitempty" tf:"metric_aggregation_type,omitempty"` + + // Minimum value to scale by when adjustment_type is set to PercentChangeInCapacity. + MinAdjustmentMagnitude *float64 `json:"minAdjustmentMagnitude,omitempty" tf:"min_adjustment_magnitude,omitempty"` + + // Policy type, either "SimpleScaling", "StepScaling", "TargetTrackingScaling", or "PredictiveScaling". If this value isn't provided, AWS will default to "SimpleScaling." + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` + + // Predictive scaling policy configuration to use with Amazon EC2 Auto Scaling. + PredictiveScalingConfiguration *PredictiveScalingConfigurationInitParameters `json:"predictiveScalingConfiguration,omitempty" tf:"predictive_scaling_configuration,omitempty"` + + // Number of instances by which to scale. adjustment_type determines the interpretation of this number (e.g., as an absolute number or as a percentage of the existing Auto Scaling group size). A positive increment adds to the current capacity and a negative value removes from the current capacity. + ScalingAdjustment *float64 `json:"scalingAdjustment,omitempty" tf:"scaling_adjustment,omitempty"` + + // Set of adjustments that manage + // group scaling. These have the following structure: + StepAdjustment []StepAdjustmentInitParameters `json:"stepAdjustment,omitempty" tf:"step_adjustment,omitempty"` + + // Target tracking policy. These have the following structure: + TargetTrackingConfiguration *TargetTrackingConfigurationInitParameters `json:"targetTrackingConfiguration,omitempty" tf:"target_tracking_configuration,omitempty"` +} + +type PolicyObservation struct { + + // Whether the adjustment is an absolute number or a percentage of the current capacity. Valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity. + AdjustmentType *string `json:"adjustmentType,omitempty" tf:"adjustment_type,omitempty"` + + // ARN assigned by AWS to the scaling policy. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Name of the autoscaling group. + AutoscalingGroupName *string `json:"autoscalingGroupName,omitempty" tf:"autoscaling_group_name,omitempty"` + + // Amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. + Cooldown *float64 `json:"cooldown,omitempty" tf:"cooldown,omitempty"` + + // Whether the scaling policy is enabled or disabled. Default: true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Estimated time, in seconds, until a newly launched instance will contribute CloudWatch metrics. Without a value, AWS will default to the group's specified cooldown period. + EstimatedInstanceWarmup *float64 `json:"estimatedInstanceWarmup,omitempty" tf:"estimated_instance_warmup,omitempty"` + + // Short name for the metric used in predictive scaling policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Aggregation type for the policy's metrics. Valid values are "Minimum", "Maximum", and "Average". Without a value, AWS will treat the aggregation type as "Average". + MetricAggregationType *string `json:"metricAggregationType,omitempty" tf:"metric_aggregation_type,omitempty"` + + // Minimum value to scale by when adjustment_type is set to PercentChangeInCapacity. + MinAdjustmentMagnitude *float64 `json:"minAdjustmentMagnitude,omitempty" tf:"min_adjustment_magnitude,omitempty"` + + // Policy type, either "SimpleScaling", "StepScaling", "TargetTrackingScaling", or "PredictiveScaling". If this value isn't provided, AWS will default to "SimpleScaling." + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` + + // Predictive scaling policy configuration to use with Amazon EC2 Auto Scaling. + PredictiveScalingConfiguration *PredictiveScalingConfigurationObservation `json:"predictiveScalingConfiguration,omitempty" tf:"predictive_scaling_configuration,omitempty"` + + // Number of instances by which to scale. adjustment_type determines the interpretation of this number (e.g., as an absolute number or as a percentage of the existing Auto Scaling group size). A positive increment adds to the current capacity and a negative value removes from the current capacity. + ScalingAdjustment *float64 `json:"scalingAdjustment,omitempty" tf:"scaling_adjustment,omitempty"` + + // Set of adjustments that manage + // group scaling. These have the following structure: + StepAdjustment []StepAdjustmentObservation `json:"stepAdjustment,omitempty" tf:"step_adjustment,omitempty"` + + // Target tracking policy. These have the following structure: + TargetTrackingConfiguration *TargetTrackingConfigurationObservation `json:"targetTrackingConfiguration,omitempty" tf:"target_tracking_configuration,omitempty"` +} + +type PolicyParameters struct { + + // Whether the adjustment is an absolute number or a percentage of the current capacity. Valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity. + // +kubebuilder:validation:Optional + AdjustmentType *string `json:"adjustmentType,omitempty" tf:"adjustment_type,omitempty"` + + // Name of the autoscaling group. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/autoscaling/v1beta3.AutoscalingGroup + // +kubebuilder:validation:Optional + AutoscalingGroupName *string `json:"autoscalingGroupName,omitempty" tf:"autoscaling_group_name,omitempty"` + + // Reference to a AutoscalingGroup in autoscaling to populate autoscalingGroupName. + // +kubebuilder:validation:Optional + AutoscalingGroupNameRef *v1.Reference `json:"autoscalingGroupNameRef,omitempty" tf:"-"` + + // Selector for a AutoscalingGroup in autoscaling to populate autoscalingGroupName. + // +kubebuilder:validation:Optional + AutoscalingGroupNameSelector *v1.Selector `json:"autoscalingGroupNameSelector,omitempty" tf:"-"` + + // Amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. + // +kubebuilder:validation:Optional + Cooldown *float64 `json:"cooldown,omitempty" tf:"cooldown,omitempty"` + + // Whether the scaling policy is enabled or disabled. Default: true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Estimated time, in seconds, until a newly launched instance will contribute CloudWatch metrics. Without a value, AWS will default to the group's specified cooldown period. + // +kubebuilder:validation:Optional + EstimatedInstanceWarmup *float64 `json:"estimatedInstanceWarmup,omitempty" tf:"estimated_instance_warmup,omitempty"` + + // Aggregation type for the policy's metrics. Valid values are "Minimum", "Maximum", and "Average". Without a value, AWS will treat the aggregation type as "Average". + // +kubebuilder:validation:Optional + MetricAggregationType *string `json:"metricAggregationType,omitempty" tf:"metric_aggregation_type,omitempty"` + + // Minimum value to scale by when adjustment_type is set to PercentChangeInCapacity. + // +kubebuilder:validation:Optional + MinAdjustmentMagnitude *float64 `json:"minAdjustmentMagnitude,omitempty" tf:"min_adjustment_magnitude,omitempty"` + + // Policy type, either "SimpleScaling", "StepScaling", "TargetTrackingScaling", or "PredictiveScaling". If this value isn't provided, AWS will default to "SimpleScaling." + // +kubebuilder:validation:Optional + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` + + // Predictive scaling policy configuration to use with Amazon EC2 Auto Scaling. + // +kubebuilder:validation:Optional + PredictiveScalingConfiguration *PredictiveScalingConfigurationParameters `json:"predictiveScalingConfiguration,omitempty" tf:"predictive_scaling_configuration,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Number of instances by which to scale. adjustment_type determines the interpretation of this number (e.g., as an absolute number or as a percentage of the existing Auto Scaling group size). A positive increment adds to the current capacity and a negative value removes from the current capacity. + // +kubebuilder:validation:Optional + ScalingAdjustment *float64 `json:"scalingAdjustment,omitempty" tf:"scaling_adjustment,omitempty"` + + // Set of adjustments that manage + // group scaling. These have the following structure: + // +kubebuilder:validation:Optional + StepAdjustment []StepAdjustmentParameters `json:"stepAdjustment,omitempty" tf:"step_adjustment,omitempty"` + + // Target tracking policy. These have the following structure: + // +kubebuilder:validation:Optional + TargetTrackingConfiguration *TargetTrackingConfigurationParameters `json:"targetTrackingConfiguration,omitempty" tf:"target_tracking_configuration,omitempty"` +} + +type PredefinedLoadMetricSpecificationInitParameters struct { + + // Metric type. Valid values are ASGTotalCPUUtilization, ASGTotalNetworkIn, ASGTotalNetworkOut, or ALBTargetGroupRequestCount. + PredefinedMetricType *string `json:"predefinedMetricType,omitempty" tf:"predefined_metric_type,omitempty"` + + // Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to PredefinedMetricSpecification for more information. + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedLoadMetricSpecificationObservation struct { + + // Metric type. Valid values are ASGTotalCPUUtilization, ASGTotalNetworkIn, ASGTotalNetworkOut, or ALBTargetGroupRequestCount. + PredefinedMetricType *string `json:"predefinedMetricType,omitempty" tf:"predefined_metric_type,omitempty"` + + // Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to PredefinedMetricSpecification for more information. + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedLoadMetricSpecificationParameters struct { + + // Metric type. Valid values are ASGTotalCPUUtilization, ASGTotalNetworkIn, ASGTotalNetworkOut, or ALBTargetGroupRequestCount. + // +kubebuilder:validation:Optional + PredefinedMetricType *string `json:"predefinedMetricType" tf:"predefined_metric_type,omitempty"` + + // Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to PredefinedMetricSpecification for more information. + // +kubebuilder:validation:Optional + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedMetricPairSpecificationInitParameters struct { + + // Which metrics to use. There are two different types of metrics for each metric type: one is a load metric and one is a scaling metric. For example, if the metric type is ASGCPUUtilization, the Auto Scaling group's total CPU metric is used as the load metric, and the average CPU metric is used for the scaling metric. Valid values are ASGCPUUtilization, ASGNetworkIn, ASGNetworkOut, or ALBRequestCount. + PredefinedMetricType *string `json:"predefinedMetricType,omitempty" tf:"predefined_metric_type,omitempty"` + + // Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to PredefinedMetricSpecification for more information. + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedMetricPairSpecificationObservation struct { + + // Which metrics to use. There are two different types of metrics for each metric type: one is a load metric and one is a scaling metric. For example, if the metric type is ASGCPUUtilization, the Auto Scaling group's total CPU metric is used as the load metric, and the average CPU metric is used for the scaling metric. Valid values are ASGCPUUtilization, ASGNetworkIn, ASGNetworkOut, or ALBRequestCount. + PredefinedMetricType *string `json:"predefinedMetricType,omitempty" tf:"predefined_metric_type,omitempty"` + + // Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to PredefinedMetricSpecification for more information. + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedMetricPairSpecificationParameters struct { + + // Which metrics to use. There are two different types of metrics for each metric type: one is a load metric and one is a scaling metric. For example, if the metric type is ASGCPUUtilization, the Auto Scaling group's total CPU metric is used as the load metric, and the average CPU metric is used for the scaling metric. Valid values are ASGCPUUtilization, ASGNetworkIn, ASGNetworkOut, or ALBRequestCount. + // +kubebuilder:validation:Optional + PredefinedMetricType *string `json:"predefinedMetricType" tf:"predefined_metric_type,omitempty"` + + // Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to PredefinedMetricSpecification for more information. + // +kubebuilder:validation:Optional + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedMetricSpecificationInitParameters struct { + + // Describes a scaling metric for a predictive scaling policy. Valid values are ASGAverageCPUUtilization, ASGAverageNetworkIn, ASGAverageNetworkOut, or ALBRequestCountPerTarget. + PredefinedMetricType *string `json:"predefinedMetricType,omitempty" tf:"predefined_metric_type,omitempty"` + + // Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to PredefinedMetricSpecification for more information. + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedMetricSpecificationObservation struct { + + // Describes a scaling metric for a predictive scaling policy. Valid values are ASGAverageCPUUtilization, ASGAverageNetworkIn, ASGAverageNetworkOut, or ALBRequestCountPerTarget. + PredefinedMetricType *string `json:"predefinedMetricType,omitempty" tf:"predefined_metric_type,omitempty"` + + // Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to PredefinedMetricSpecification for more information. + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedMetricSpecificationParameters struct { + + // Describes a scaling metric for a predictive scaling policy. Valid values are ASGAverageCPUUtilization, ASGAverageNetworkIn, ASGAverageNetworkOut, or ALBRequestCountPerTarget. + // +kubebuilder:validation:Optional + PredefinedMetricType *string `json:"predefinedMetricType" tf:"predefined_metric_type,omitempty"` + + // Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to PredefinedMetricSpecification for more information. + // +kubebuilder:validation:Optional + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedScalingMetricSpecificationInitParameters struct { + + // Describes a scaling metric for a predictive scaling policy. Valid values are ASGAverageCPUUtilization, ASGAverageNetworkIn, ASGAverageNetworkOut, or ALBRequestCountPerTarget. + PredefinedMetricType *string `json:"predefinedMetricType,omitempty" tf:"predefined_metric_type,omitempty"` + + // Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to PredefinedMetricSpecification for more information. + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedScalingMetricSpecificationObservation struct { + + // Describes a scaling metric for a predictive scaling policy. Valid values are ASGAverageCPUUtilization, ASGAverageNetworkIn, ASGAverageNetworkOut, or ALBRequestCountPerTarget. + PredefinedMetricType *string `json:"predefinedMetricType,omitempty" tf:"predefined_metric_type,omitempty"` + + // Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to PredefinedMetricSpecification for more information. + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedScalingMetricSpecificationParameters struct { + + // Describes a scaling metric for a predictive scaling policy. Valid values are ASGAverageCPUUtilization, ASGAverageNetworkIn, ASGAverageNetworkOut, or ALBRequestCountPerTarget. + // +kubebuilder:validation:Optional + PredefinedMetricType *string `json:"predefinedMetricType" tf:"predefined_metric_type,omitempty"` + + // Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to PredefinedMetricSpecification for more information. + // +kubebuilder:validation:Optional + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredictiveScalingConfigurationInitParameters struct { + + // Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity of the Auto Scaling group. Valid values are HonorMaxCapacity or IncreaseMaxCapacity. Default is HonorMaxCapacity. + MaxCapacityBreachBehavior *string `json:"maxCapacityBreachBehavior,omitempty" tf:"max_capacity_breach_behavior,omitempty"` + + // Size of the capacity buffer to use when the forecast capacity is close to or exceeds the maximum capacity. Valid range is 0 to 100. If set to 0, Amazon EC2 Auto Scaling may scale capacity higher than the maximum capacity to equal but not exceed forecast capacity. + MaxCapacityBuffer *string `json:"maxCapacityBuffer,omitempty" tf:"max_capacity_buffer,omitempty"` + + // This structure includes the metrics and target utilization to use for predictive scaling. + MetricSpecification *MetricSpecificationInitParameters `json:"metricSpecification,omitempty" tf:"metric_specification,omitempty"` + + // Predictive scaling mode. Valid values are ForecastAndScale and ForecastOnly. Default is ForecastOnly. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Amount of time, in seconds, by which the instance launch time can be advanced. Minimum is 0. + SchedulingBufferTime *string `json:"schedulingBufferTime,omitempty" tf:"scheduling_buffer_time,omitempty"` +} + +type PredictiveScalingConfigurationObservation struct { + + // Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity of the Auto Scaling group. Valid values are HonorMaxCapacity or IncreaseMaxCapacity. Default is HonorMaxCapacity. + MaxCapacityBreachBehavior *string `json:"maxCapacityBreachBehavior,omitempty" tf:"max_capacity_breach_behavior,omitempty"` + + // Size of the capacity buffer to use when the forecast capacity is close to or exceeds the maximum capacity. Valid range is 0 to 100. If set to 0, Amazon EC2 Auto Scaling may scale capacity higher than the maximum capacity to equal but not exceed forecast capacity. + MaxCapacityBuffer *string `json:"maxCapacityBuffer,omitempty" tf:"max_capacity_buffer,omitempty"` + + // This structure includes the metrics and target utilization to use for predictive scaling. + MetricSpecification *MetricSpecificationObservation `json:"metricSpecification,omitempty" tf:"metric_specification,omitempty"` + + // Predictive scaling mode. Valid values are ForecastAndScale and ForecastOnly. Default is ForecastOnly. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Amount of time, in seconds, by which the instance launch time can be advanced. Minimum is 0. + SchedulingBufferTime *string `json:"schedulingBufferTime,omitempty" tf:"scheduling_buffer_time,omitempty"` +} + +type PredictiveScalingConfigurationParameters struct { + + // Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity of the Auto Scaling group. Valid values are HonorMaxCapacity or IncreaseMaxCapacity. Default is HonorMaxCapacity. + // +kubebuilder:validation:Optional + MaxCapacityBreachBehavior *string `json:"maxCapacityBreachBehavior,omitempty" tf:"max_capacity_breach_behavior,omitempty"` + + // Size of the capacity buffer to use when the forecast capacity is close to or exceeds the maximum capacity. Valid range is 0 to 100. If set to 0, Amazon EC2 Auto Scaling may scale capacity higher than the maximum capacity to equal but not exceed forecast capacity. + // +kubebuilder:validation:Optional + MaxCapacityBuffer *string `json:"maxCapacityBuffer,omitempty" tf:"max_capacity_buffer,omitempty"` + + // This structure includes the metrics and target utilization to use for predictive scaling. + // +kubebuilder:validation:Optional + MetricSpecification *MetricSpecificationParameters `json:"metricSpecification" tf:"metric_specification,omitempty"` + + // Predictive scaling mode. Valid values are ForecastAndScale and ForecastOnly. Default is ForecastOnly. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Amount of time, in seconds, by which the instance launch time can be advanced. Minimum is 0. + // +kubebuilder:validation:Optional + SchedulingBufferTime *string `json:"schedulingBufferTime,omitempty" tf:"scheduling_buffer_time,omitempty"` +} + +type StepAdjustmentInitParameters struct { + + // Lower bound for the + // difference between the alarm threshold and the CloudWatch metric. + // Without a value, AWS will treat this bound as negative infinity. + MetricIntervalLowerBound *string `json:"metricIntervalLowerBound,omitempty" tf:"metric_interval_lower_bound,omitempty"` + + // Upper bound for the + // difference between the alarm threshold and the CloudWatch metric. + // Without a value, AWS will treat this bound as positive infinity. The upper bound + // must be greater than the lower bound. + MetricIntervalUpperBound *string `json:"metricIntervalUpperBound,omitempty" tf:"metric_interval_upper_bound,omitempty"` + + // Number of instances by which to scale. adjustment_type determines the interpretation of this number (e.g., as an absolute number or as a percentage of the existing Auto Scaling group size). A positive increment adds to the current capacity and a negative value removes from the current capacity. + ScalingAdjustment *float64 `json:"scalingAdjustment,omitempty" tf:"scaling_adjustment,omitempty"` +} + +type StepAdjustmentObservation struct { + + // Lower bound for the + // difference between the alarm threshold and the CloudWatch metric. + // Without a value, AWS will treat this bound as negative infinity. + MetricIntervalLowerBound *string `json:"metricIntervalLowerBound,omitempty" tf:"metric_interval_lower_bound,omitempty"` + + // Upper bound for the + // difference between the alarm threshold and the CloudWatch metric. + // Without a value, AWS will treat this bound as positive infinity. The upper bound + // must be greater than the lower bound. + MetricIntervalUpperBound *string `json:"metricIntervalUpperBound,omitempty" tf:"metric_interval_upper_bound,omitempty"` + + // Number of instances by which to scale. adjustment_type determines the interpretation of this number (e.g., as an absolute number or as a percentage of the existing Auto Scaling group size). A positive increment adds to the current capacity and a negative value removes from the current capacity. + ScalingAdjustment *float64 `json:"scalingAdjustment,omitempty" tf:"scaling_adjustment,omitempty"` +} + +type StepAdjustmentParameters struct { + + // Lower bound for the + // difference between the alarm threshold and the CloudWatch metric. + // Without a value, AWS will treat this bound as negative infinity. + // +kubebuilder:validation:Optional + MetricIntervalLowerBound *string `json:"metricIntervalLowerBound,omitempty" tf:"metric_interval_lower_bound,omitempty"` + + // Upper bound for the + // difference between the alarm threshold and the CloudWatch metric. + // Without a value, AWS will treat this bound as positive infinity. The upper bound + // must be greater than the lower bound. + // +kubebuilder:validation:Optional + MetricIntervalUpperBound *string `json:"metricIntervalUpperBound,omitempty" tf:"metric_interval_upper_bound,omitempty"` + + // Number of instances by which to scale. adjustment_type determines the interpretation of this number (e.g., as an absolute number or as a percentage of the existing Auto Scaling group size). A positive increment adds to the current capacity and a negative value removes from the current capacity. + // +kubebuilder:validation:Optional + ScalingAdjustment *float64 `json:"scalingAdjustment" tf:"scaling_adjustment,omitempty"` +} + +type TargetTrackingConfigurationInitParameters struct { + + // Customized metric. Conflicts with predefined_metric_specification. + CustomizedMetricSpecification *CustomizedMetricSpecificationInitParameters `json:"customizedMetricSpecification,omitempty" tf:"customized_metric_specification,omitempty"` + + // Whether scale in by the target tracking policy is disabled. + DisableScaleIn *bool `json:"disableScaleIn,omitempty" tf:"disable_scale_in,omitempty"` + + // Predefined metric. Conflicts with customized_metric_specification. + PredefinedMetricSpecification *PredefinedMetricSpecificationInitParameters `json:"predefinedMetricSpecification,omitempty" tf:"predefined_metric_specification,omitempty"` + + // Target value for the metric. + TargetValue *float64 `json:"targetValue,omitempty" tf:"target_value,omitempty"` +} + +type TargetTrackingConfigurationObservation struct { + + // Customized metric. Conflicts with predefined_metric_specification. + CustomizedMetricSpecification *CustomizedMetricSpecificationObservation `json:"customizedMetricSpecification,omitempty" tf:"customized_metric_specification,omitempty"` + + // Whether scale in by the target tracking policy is disabled. + DisableScaleIn *bool `json:"disableScaleIn,omitempty" tf:"disable_scale_in,omitempty"` + + // Predefined metric. Conflicts with customized_metric_specification. + PredefinedMetricSpecification *PredefinedMetricSpecificationObservation `json:"predefinedMetricSpecification,omitempty" tf:"predefined_metric_specification,omitempty"` + + // Target value for the metric. + TargetValue *float64 `json:"targetValue,omitempty" tf:"target_value,omitempty"` +} + +type TargetTrackingConfigurationParameters struct { + + // Customized metric. Conflicts with predefined_metric_specification. + // +kubebuilder:validation:Optional + CustomizedMetricSpecification *CustomizedMetricSpecificationParameters `json:"customizedMetricSpecification,omitempty" tf:"customized_metric_specification,omitempty"` + + // Whether scale in by the target tracking policy is disabled. + // +kubebuilder:validation:Optional + DisableScaleIn *bool `json:"disableScaleIn,omitempty" tf:"disable_scale_in,omitempty"` + + // Predefined metric. Conflicts with customized_metric_specification. + // +kubebuilder:validation:Optional + PredefinedMetricSpecification *PredefinedMetricSpecificationParameters `json:"predefinedMetricSpecification,omitempty" tf:"predefined_metric_specification,omitempty"` + + // Target value for the metric. + // +kubebuilder:validation:Optional + TargetValue *float64 `json:"targetValue" tf:"target_value,omitempty"` +} + +// PolicySpec defines the desired state of Policy +type PolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PolicyInitParameters `json:"initProvider,omitempty"` +} + +// PolicyStatus defines the observed state of Policy. +type PolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Policy is the Schema for the Policys API. Provides an AutoScaling Scaling Group resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Policy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec PolicySpec `json:"spec"` + Status PolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PolicyList contains a list of Policys +type PolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Policy `json:"items"` +} + +// Repository type metadata. +var ( + Policy_Kind = "Policy" + Policy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Policy_Kind}.String() + Policy_KindAPIVersion = Policy_Kind + "." + CRDGroupVersion.String() + Policy_GroupVersionKind = CRDGroupVersion.WithKind(Policy_Kind) +) + +func init() { + SchemeBuilder.Register(&Policy{}, &PolicyList{}) +} diff --git a/apis/autoscaling/v1beta3/zz_autoscalinggroup_terraformed.go b/apis/autoscaling/v1beta3/zz_autoscalinggroup_terraformed.go new file mode 100755 index 0000000000..4bc0b40af1 --- /dev/null +++ b/apis/autoscaling/v1beta3/zz_autoscalinggroup_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta3 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AutoscalingGroup +func (mg *AutoscalingGroup) GetTerraformResourceType() string { + return "aws_autoscaling_group" +} + +// GetConnectionDetailsMapping for this AutoscalingGroup +func (tr *AutoscalingGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this AutoscalingGroup +func (tr *AutoscalingGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AutoscalingGroup +func (tr *AutoscalingGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AutoscalingGroup +func (tr *AutoscalingGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AutoscalingGroup +func (tr *AutoscalingGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AutoscalingGroup +func (tr *AutoscalingGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this AutoscalingGroup +func (tr *AutoscalingGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this AutoscalingGroup +func (tr *AutoscalingGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this AutoscalingGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AutoscalingGroup) LateInitialize(attrs []byte) (bool, error) { + params := &AutoscalingGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("AvailabilityZones")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AutoscalingGroup) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/autoscaling/v1beta3/zz_autoscalinggroup_types.go b/apis/autoscaling/v1beta3/zz_autoscalinggroup_types.go new file mode 100755 index 0000000000..49d2efab91 --- /dev/null +++ b/apis/autoscaling/v1beta3/zz_autoscalinggroup_types.go @@ -0,0 +1,1916 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AcceleratorCountInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type AcceleratorCountObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type AcceleratorCountParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type AcceleratorTotalMemoryMibInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type AcceleratorTotalMemoryMibObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type AcceleratorTotalMemoryMibParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type AlarmSpecificationInitParameters struct { + + // List of Cloudwatch alarms. If any of these alarms goes into ALARM state, Instance Refresh is failed. + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` +} + +type AlarmSpecificationObservation struct { + + // List of Cloudwatch alarms. If any of these alarms goes into ALARM state, Instance Refresh is failed. + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` +} + +type AlarmSpecificationParameters struct { + + // List of Cloudwatch alarms. If any of these alarms goes into ALARM state, Instance Refresh is failed. + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` +} + +type AutoscalingGroupInitParameters struct { + + // A list of Availability Zones where instances in the Auto Scaling group can be created. Used for launching into the default VPC subnet in each Availability Zone when not using the vpc_zone_identifier attribute, or for attaching a network interface when an existing network interface ID is specified in a launch template. Conflicts with vpc_zone_identifier. + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // Whether capacity rebalance is enabled. Otherwise, capacity rebalance is disabled. + CapacityRebalance *bool `json:"capacityRebalance,omitempty" tf:"capacity_rebalance,omitempty"` + + // Reserved. + Context *string `json:"context,omitempty" tf:"context,omitempty"` + + // Amount of time, in seconds, after a scaling activity completes before another scaling activity can start. + DefaultCooldown *float64 `json:"defaultCooldown,omitempty" tf:"default_cooldown,omitempty"` + + // Amount of time, in seconds, until a newly launched instance can contribute to the Amazon CloudWatch metrics. This delay lets an instance finish initializing before Amazon EC2 Auto Scaling aggregates instance metrics, resulting in more reliable usage data. Set this value equal to the amount of time that it takes for resource consumption to become stable after an instance reaches the InService state. (See Set the default instance warmup for an Auto Scaling group) + DefaultInstanceWarmup *float64 `json:"defaultInstanceWarmup,omitempty" tf:"default_instance_warmup,omitempty"` + + // Number of Amazon EC2 instances that + // should be running in the group. (See also Waiting for + // Capacity below.) + DesiredCapacity *float64 `json:"desiredCapacity,omitempty" tf:"desired_capacity,omitempty"` + + // The unit of measurement for the value specified for desired_capacity. Supported for attribute-based instance type selection only. Valid values: "units", "vcpu", "memory-mib". + DesiredCapacityType *string `json:"desiredCapacityType,omitempty" tf:"desired_capacity_type,omitempty"` + + // List of metrics to collect. The allowed values are defined by the underlying AWS API. + // +listType=set + EnabledMetrics []*string `json:"enabledMetrics,omitempty" tf:"enabled_metrics,omitempty"` + + // Allows deleting the Auto Scaling Group without waiting + // for all instances in the pool to terminate. You can force an Auto Scaling Group to delete + // even if it's in the process of scaling a resource. This bypasses that + // behavior and potentially leaves resources dangling. + ForceDelete *bool `json:"forceDelete,omitempty" tf:"force_delete,omitempty"` + + // Allows deleting the Auto Scaling Group without waiting for all instances in the warm pool to terminate. + ForceDeleteWarmPool *bool `json:"forceDeleteWarmPool,omitempty" tf:"force_delete_warm_pool,omitempty"` + + // Time (in seconds) after instance comes into service before checking health. + HealthCheckGracePeriod *float64 `json:"healthCheckGracePeriod,omitempty" tf:"health_check_grace_period,omitempty"` + + // "EC2" or "ELB". Controls how health checking is done. + HealthCheckType *string `json:"healthCheckType,omitempty" tf:"health_check_type,omitempty"` + + // Whether to ignore failed Auto Scaling scaling activities while waiting for capacity. The default is false -- failed scaling activities cause errors to be returned. + IgnoreFailedScalingActivities *bool `json:"ignoreFailedScalingActivities,omitempty" tf:"ignore_failed_scaling_activities,omitempty"` + + // One or more + // Lifecycle Hooks + // to attach to the Auto Scaling Group before instances are launched. The + // syntax is exactly the same as the separate + // aws_autoscaling_lifecycle_hook + // resource, without the autoscaling_group_name attribute. Please note that this will only work when creating + // a new Auto Scaling Group. For all other use-cases, please use aws_autoscaling_lifecycle_hook resource. + InitialLifecycleHook []InitialLifecycleHookInitParameters `json:"initialLifecycleHook,omitempty" tf:"initial_lifecycle_hook,omitempty"` + + // If this block is configured, add a instance maintenance policy to the specified Auto Scaling group. Defined below. + InstanceMaintenancePolicy *InstanceMaintenancePolicyInitParameters `json:"instanceMaintenancePolicy,omitempty" tf:"instance_maintenance_policy,omitempty"` + + // If this block is configured, start an + // Instance Refresh + // when this Auto Scaling Group is updated. Defined below. + InstanceRefresh *InstanceRefreshInitParameters `json:"instanceRefresh,omitempty" tf:"instance_refresh,omitempty"` + + // Name of the launch configuration to use. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/autoscaling/v1beta2.LaunchConfiguration + LaunchConfiguration *string `json:"launchConfiguration,omitempty" tf:"launch_configuration,omitempty"` + + // Reference to a LaunchConfiguration in autoscaling to populate launchConfiguration. + // +kubebuilder:validation:Optional + LaunchConfigurationRef *v1.Reference `json:"launchConfigurationRef,omitempty" tf:"-"` + + // Selector for a LaunchConfiguration in autoscaling to populate launchConfiguration. + // +kubebuilder:validation:Optional + LaunchConfigurationSelector *v1.Selector `json:"launchConfigurationSelector,omitempty" tf:"-"` + + // Nested argument with Launch template specification to use to launch instances. See Launch Template below for more details. + LaunchTemplate *LaunchTemplateInitParameters `json:"launchTemplate,omitempty" tf:"launch_template,omitempty"` + + // Maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 86400 and 31536000 seconds. + MaxInstanceLifetime *float64 `json:"maxInstanceLifetime,omitempty" tf:"max_instance_lifetime,omitempty"` + + // Maximum size of the Auto Scaling Group. + MaxSize *float64 `json:"maxSize,omitempty" tf:"max_size,omitempty"` + + // Granularity to associate with the metrics to collect. The only valid value is 1Minute. Default is 1Minute. + MetricsGranularity *string `json:"metricsGranularity,omitempty" tf:"metrics_granularity,omitempty"` + + // Updates will not wait on ELB instance number changes. + // (See also Waiting for Capacity below.) + MinELBCapacity *float64 `json:"minElbCapacity,omitempty" tf:"min_elb_capacity,omitempty"` + + // Minimum size of the Auto Scaling Group. + // (See also Waiting for Capacity below.) + MinSize *float64 `json:"minSize,omitempty" tf:"min_size,omitempty"` + + // Configuration block containing settings to define launch targets for Auto Scaling groups. See Mixed Instances Policy below for more details. + MixedInstancesPolicy *MixedInstancesPolicyInitParameters `json:"mixedInstancesPolicy,omitempty" tf:"mixed_instances_policy,omitempty"` + + // Name of the placement group into which you'll launch your instances, if any. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.PlacementGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + PlacementGroup *string `json:"placementGroup,omitempty" tf:"placement_group,omitempty"` + + // Reference to a PlacementGroup in ec2 to populate placementGroup. + // +kubebuilder:validation:Optional + PlacementGroupRef *v1.Reference `json:"placementGroupRef,omitempty" tf:"-"` + + // Selector for a PlacementGroup in ec2 to populate placementGroup. + // +kubebuilder:validation:Optional + PlacementGroupSelector *v1.Selector `json:"placementGroupSelector,omitempty" tf:"-"` + + // Whether newly launched instances + // are automatically protected from termination by Amazon EC2 Auto Scaling when + // scaling in. For more information about preventing instances from terminating + // on scale in, see Using instance scale-in protection + // in the Amazon EC2 Auto Scaling User Guide. + ProtectFromScaleIn *bool `json:"protectFromScaleIn,omitempty" tf:"protect_from_scale_in,omitempty"` + + // ARN of the service-linked role that the ASG will use to call other AWS services + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + ServiceLinkedRoleArn *string `json:"serviceLinkedRoleArn,omitempty" tf:"service_linked_role_arn,omitempty"` + + // Reference to a Role in iam to populate serviceLinkedRoleArn. + // +kubebuilder:validation:Optional + ServiceLinkedRoleArnRef *v1.Reference `json:"serviceLinkedRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceLinkedRoleArn. + // +kubebuilder:validation:Optional + ServiceLinkedRoleArnSelector *v1.Selector `json:"serviceLinkedRoleArnSelector,omitempty" tf:"-"` + + // List of processes to suspend for the Auto Scaling Group. The allowed values are Launch, Terminate, HealthCheck, ReplaceUnhealthy, AZRebalance, AlarmNotification, ScheduledActions, AddToLoadBalancer, InstanceRefresh. + // Note that if you suspend either the Launch or Terminate process types, it can prevent your Auto Scaling Group from functioning properly. + // +listType=set + SuspendedProcesses []*string `json:"suspendedProcesses,omitempty" tf:"suspended_processes,omitempty"` + + // Configuration block(s) containing resource tags. See Tag below for more details. + Tag []TagInitParameters `json:"tag,omitempty" tf:"tag,omitempty"` + + // List of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are OldestInstance, NewestInstance, OldestLaunchConfiguration, ClosestToNextInstanceHour, OldestLaunchTemplate, AllocationStrategy, Default. Additionally, the ARN of a Lambda function can be specified for custom termination policies. + TerminationPolicies []*string `json:"terminationPolicies,omitempty" tf:"termination_policies,omitempty"` + + // Attaches one or more traffic sources to the specified Auto Scaling group. + TrafficSource []TrafficSourceInitParameters `json:"trafficSource,omitempty" tf:"traffic_source,omitempty"` + + // List of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with availability_zones. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +listType=set + VPCZoneIdentifier []*string `json:"vpcZoneIdentifier,omitempty" tf:"vpc_zone_identifier,omitempty"` + + // References to Subnet in ec2 to populate vpcZoneIdentifier. + // +kubebuilder:validation:Optional + VPCZoneIdentifierRefs []v1.Reference `json:"vpcZoneIdentifierRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate vpcZoneIdentifier. + // +kubebuilder:validation:Optional + VPCZoneIdentifierSelector *v1.Selector `json:"vpcZoneIdentifierSelector,omitempty" tf:"-"` + + // (See also Waiting + // for Capacity below. + WaitForCapacityTimeout *string `json:"waitForCapacityTimeout,omitempty" tf:"wait_for_capacity_timeout,omitempty"` + + // (Takes + // precedence over min_elb_capacity behavior.) + // (See also Waiting for Capacity below.) + WaitForELBCapacity *float64 `json:"waitForElbCapacity,omitempty" tf:"wait_for_elb_capacity,omitempty"` + + // If this block is configured, add a Warm Pool + // to the specified Auto Scaling group. Defined below + WarmPool *WarmPoolInitParameters `json:"warmPool,omitempty" tf:"warm_pool,omitempty"` +} + +type AutoscalingGroupObservation struct { + + // ARN for this Auto Scaling Group + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A list of Availability Zones where instances in the Auto Scaling group can be created. Used for launching into the default VPC subnet in each Availability Zone when not using the vpc_zone_identifier attribute, or for attaching a network interface when an existing network interface ID is specified in a launch template. Conflicts with vpc_zone_identifier. + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // Whether capacity rebalance is enabled. Otherwise, capacity rebalance is disabled. + CapacityRebalance *bool `json:"capacityRebalance,omitempty" tf:"capacity_rebalance,omitempty"` + + // Reserved. + Context *string `json:"context,omitempty" tf:"context,omitempty"` + + // Amount of time, in seconds, after a scaling activity completes before another scaling activity can start. + DefaultCooldown *float64 `json:"defaultCooldown,omitempty" tf:"default_cooldown,omitempty"` + + // Amount of time, in seconds, until a newly launched instance can contribute to the Amazon CloudWatch metrics. This delay lets an instance finish initializing before Amazon EC2 Auto Scaling aggregates instance metrics, resulting in more reliable usage data. Set this value equal to the amount of time that it takes for resource consumption to become stable after an instance reaches the InService state. (See Set the default instance warmup for an Auto Scaling group) + DefaultInstanceWarmup *float64 `json:"defaultInstanceWarmup,omitempty" tf:"default_instance_warmup,omitempty"` + + // Number of Amazon EC2 instances that + // should be running in the group. (See also Waiting for + // Capacity below.) + DesiredCapacity *float64 `json:"desiredCapacity,omitempty" tf:"desired_capacity,omitempty"` + + // The unit of measurement for the value specified for desired_capacity. Supported for attribute-based instance type selection only. Valid values: "units", "vcpu", "memory-mib". + DesiredCapacityType *string `json:"desiredCapacityType,omitempty" tf:"desired_capacity_type,omitempty"` + + // List of metrics to collect. The allowed values are defined by the underlying AWS API. + // +listType=set + EnabledMetrics []*string `json:"enabledMetrics,omitempty" tf:"enabled_metrics,omitempty"` + + // Allows deleting the Auto Scaling Group without waiting + // for all instances in the pool to terminate. You can force an Auto Scaling Group to delete + // even if it's in the process of scaling a resource. This bypasses that + // behavior and potentially leaves resources dangling. + ForceDelete *bool `json:"forceDelete,omitempty" tf:"force_delete,omitempty"` + + // Allows deleting the Auto Scaling Group without waiting for all instances in the warm pool to terminate. + ForceDeleteWarmPool *bool `json:"forceDeleteWarmPool,omitempty" tf:"force_delete_warm_pool,omitempty"` + + // Time (in seconds) after instance comes into service before checking health. + HealthCheckGracePeriod *float64 `json:"healthCheckGracePeriod,omitempty" tf:"health_check_grace_period,omitempty"` + + // "EC2" or "ELB". Controls how health checking is done. + HealthCheckType *string `json:"healthCheckType,omitempty" tf:"health_check_type,omitempty"` + + // Auto Scaling Group id. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Whether to ignore failed Auto Scaling scaling activities while waiting for capacity. The default is false -- failed scaling activities cause errors to be returned. + IgnoreFailedScalingActivities *bool `json:"ignoreFailedScalingActivities,omitempty" tf:"ignore_failed_scaling_activities,omitempty"` + + // One or more + // Lifecycle Hooks + // to attach to the Auto Scaling Group before instances are launched. The + // syntax is exactly the same as the separate + // aws_autoscaling_lifecycle_hook + // resource, without the autoscaling_group_name attribute. Please note that this will only work when creating + // a new Auto Scaling Group. For all other use-cases, please use aws_autoscaling_lifecycle_hook resource. + InitialLifecycleHook []InitialLifecycleHookObservation `json:"initialLifecycleHook,omitempty" tf:"initial_lifecycle_hook,omitempty"` + + // If this block is configured, add a instance maintenance policy to the specified Auto Scaling group. Defined below. + InstanceMaintenancePolicy *InstanceMaintenancePolicyObservation `json:"instanceMaintenancePolicy,omitempty" tf:"instance_maintenance_policy,omitempty"` + + // If this block is configured, start an + // Instance Refresh + // when this Auto Scaling Group is updated. Defined below. + InstanceRefresh *InstanceRefreshObservation `json:"instanceRefresh,omitempty" tf:"instance_refresh,omitempty"` + + // Name of the launch configuration to use. + LaunchConfiguration *string `json:"launchConfiguration,omitempty" tf:"launch_configuration,omitempty"` + + // Nested argument with Launch template specification to use to launch instances. See Launch Template below for more details. + LaunchTemplate *LaunchTemplateObservation `json:"launchTemplate,omitempty" tf:"launch_template,omitempty"` + + // List of elastic load balancer names to add to the autoscaling + // group names. Only valid for classic load balancers. For ALBs, use target_group_arns instead. To remove all load balancer attachments an empty list should be specified. + // +listType=set + LoadBalancers []*string `json:"loadBalancers,omitempty" tf:"load_balancers,omitempty"` + + // Maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 86400 and 31536000 seconds. + MaxInstanceLifetime *float64 `json:"maxInstanceLifetime,omitempty" tf:"max_instance_lifetime,omitempty"` + + // Maximum size of the Auto Scaling Group. + MaxSize *float64 `json:"maxSize,omitempty" tf:"max_size,omitempty"` + + // Granularity to associate with the metrics to collect. The only valid value is 1Minute. Default is 1Minute. + MetricsGranularity *string `json:"metricsGranularity,omitempty" tf:"metrics_granularity,omitempty"` + + // Updates will not wait on ELB instance number changes. + // (See also Waiting for Capacity below.) + MinELBCapacity *float64 `json:"minElbCapacity,omitempty" tf:"min_elb_capacity,omitempty"` + + // Minimum size of the Auto Scaling Group. + // (See also Waiting for Capacity below.) + MinSize *float64 `json:"minSize,omitempty" tf:"min_size,omitempty"` + + // Configuration block containing settings to define launch targets for Auto Scaling groups. See Mixed Instances Policy below for more details. + MixedInstancesPolicy *MixedInstancesPolicyObservation `json:"mixedInstancesPolicy,omitempty" tf:"mixed_instances_policy,omitempty"` + + // Name of the placement group into which you'll launch your instances, if any. + PlacementGroup *string `json:"placementGroup,omitempty" tf:"placement_group,omitempty"` + + // Predicted capacity of the group. + PredictedCapacity *float64 `json:"predictedCapacity,omitempty" tf:"predicted_capacity,omitempty"` + + // Whether newly launched instances + // are automatically protected from termination by Amazon EC2 Auto Scaling when + // scaling in. For more information about preventing instances from terminating + // on scale in, see Using instance scale-in protection + // in the Amazon EC2 Auto Scaling User Guide. + ProtectFromScaleIn *bool `json:"protectFromScaleIn,omitempty" tf:"protect_from_scale_in,omitempty"` + + // ARN of the service-linked role that the ASG will use to call other AWS services + ServiceLinkedRoleArn *string `json:"serviceLinkedRoleArn,omitempty" tf:"service_linked_role_arn,omitempty"` + + // List of processes to suspend for the Auto Scaling Group. The allowed values are Launch, Terminate, HealthCheck, ReplaceUnhealthy, AZRebalance, AlarmNotification, ScheduledActions, AddToLoadBalancer, InstanceRefresh. + // Note that if you suspend either the Launch or Terminate process types, it can prevent your Auto Scaling Group from functioning properly. + // +listType=set + SuspendedProcesses []*string `json:"suspendedProcesses,omitempty" tf:"suspended_processes,omitempty"` + + // Configuration block(s) containing resource tags. See Tag below for more details. + Tag []TagObservation `json:"tag,omitempty" tf:"tag,omitempty"` + + // Set of aws_alb_target_group ARNs, for use with Application or Network Load Balancing. To remove all target group attachments an empty list should be specified. + // +listType=set + TargetGroupArns []*string `json:"targetGroupArns,omitempty" tf:"target_group_arns,omitempty"` + + // List of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are OldestInstance, NewestInstance, OldestLaunchConfiguration, ClosestToNextInstanceHour, OldestLaunchTemplate, AllocationStrategy, Default. Additionally, the ARN of a Lambda function can be specified for custom termination policies. + TerminationPolicies []*string `json:"terminationPolicies,omitempty" tf:"termination_policies,omitempty"` + + // Attaches one or more traffic sources to the specified Auto Scaling group. + TrafficSource []TrafficSourceObservation `json:"trafficSource,omitempty" tf:"traffic_source,omitempty"` + + // List of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with availability_zones. + // +listType=set + VPCZoneIdentifier []*string `json:"vpcZoneIdentifier,omitempty" tf:"vpc_zone_identifier,omitempty"` + + // (See also Waiting + // for Capacity below. + WaitForCapacityTimeout *string `json:"waitForCapacityTimeout,omitempty" tf:"wait_for_capacity_timeout,omitempty"` + + // (Takes + // precedence over min_elb_capacity behavior.) + // (See also Waiting for Capacity below.) + WaitForELBCapacity *float64 `json:"waitForElbCapacity,omitempty" tf:"wait_for_elb_capacity,omitempty"` + + // If this block is configured, add a Warm Pool + // to the specified Auto Scaling group. Defined below + WarmPool *WarmPoolObservation `json:"warmPool,omitempty" tf:"warm_pool,omitempty"` + + // Current size of the warm pool. + WarmPoolSize *float64 `json:"warmPoolSize,omitempty" tf:"warm_pool_size,omitempty"` +} + +type AutoscalingGroupParameters struct { + + // A list of Availability Zones where instances in the Auto Scaling group can be created. Used for launching into the default VPC subnet in each Availability Zone when not using the vpc_zone_identifier attribute, or for attaching a network interface when an existing network interface ID is specified in a launch template. Conflicts with vpc_zone_identifier. + // +kubebuilder:validation:Optional + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // Whether capacity rebalance is enabled. Otherwise, capacity rebalance is disabled. + // +kubebuilder:validation:Optional + CapacityRebalance *bool `json:"capacityRebalance,omitempty" tf:"capacity_rebalance,omitempty"` + + // Reserved. + // +kubebuilder:validation:Optional + Context *string `json:"context,omitempty" tf:"context,omitempty"` + + // Amount of time, in seconds, after a scaling activity completes before another scaling activity can start. + // +kubebuilder:validation:Optional + DefaultCooldown *float64 `json:"defaultCooldown,omitempty" tf:"default_cooldown,omitempty"` + + // Amount of time, in seconds, until a newly launched instance can contribute to the Amazon CloudWatch metrics. This delay lets an instance finish initializing before Amazon EC2 Auto Scaling aggregates instance metrics, resulting in more reliable usage data. Set this value equal to the amount of time that it takes for resource consumption to become stable after an instance reaches the InService state. (See Set the default instance warmup for an Auto Scaling group) + // +kubebuilder:validation:Optional + DefaultInstanceWarmup *float64 `json:"defaultInstanceWarmup,omitempty" tf:"default_instance_warmup,omitempty"` + + // Number of Amazon EC2 instances that + // should be running in the group. (See also Waiting for + // Capacity below.) + // +kubebuilder:validation:Optional + DesiredCapacity *float64 `json:"desiredCapacity,omitempty" tf:"desired_capacity,omitempty"` + + // The unit of measurement for the value specified for desired_capacity. Supported for attribute-based instance type selection only. Valid values: "units", "vcpu", "memory-mib". + // +kubebuilder:validation:Optional + DesiredCapacityType *string `json:"desiredCapacityType,omitempty" tf:"desired_capacity_type,omitempty"` + + // List of metrics to collect. The allowed values are defined by the underlying AWS API. + // +kubebuilder:validation:Optional + // +listType=set + EnabledMetrics []*string `json:"enabledMetrics,omitempty" tf:"enabled_metrics,omitempty"` + + // Allows deleting the Auto Scaling Group without waiting + // for all instances in the pool to terminate. You can force an Auto Scaling Group to delete + // even if it's in the process of scaling a resource. This bypasses that + // behavior and potentially leaves resources dangling. + // +kubebuilder:validation:Optional + ForceDelete *bool `json:"forceDelete,omitempty" tf:"force_delete,omitempty"` + + // Allows deleting the Auto Scaling Group without waiting for all instances in the warm pool to terminate. + // +kubebuilder:validation:Optional + ForceDeleteWarmPool *bool `json:"forceDeleteWarmPool,omitempty" tf:"force_delete_warm_pool,omitempty"` + + // Time (in seconds) after instance comes into service before checking health. + // +kubebuilder:validation:Optional + HealthCheckGracePeriod *float64 `json:"healthCheckGracePeriod,omitempty" tf:"health_check_grace_period,omitempty"` + + // "EC2" or "ELB". Controls how health checking is done. + // +kubebuilder:validation:Optional + HealthCheckType *string `json:"healthCheckType,omitempty" tf:"health_check_type,omitempty"` + + // Whether to ignore failed Auto Scaling scaling activities while waiting for capacity. The default is false -- failed scaling activities cause errors to be returned. + // +kubebuilder:validation:Optional + IgnoreFailedScalingActivities *bool `json:"ignoreFailedScalingActivities,omitempty" tf:"ignore_failed_scaling_activities,omitempty"` + + // One or more + // Lifecycle Hooks + // to attach to the Auto Scaling Group before instances are launched. The + // syntax is exactly the same as the separate + // aws_autoscaling_lifecycle_hook + // resource, without the autoscaling_group_name attribute. Please note that this will only work when creating + // a new Auto Scaling Group. For all other use-cases, please use aws_autoscaling_lifecycle_hook resource. + // +kubebuilder:validation:Optional + InitialLifecycleHook []InitialLifecycleHookParameters `json:"initialLifecycleHook,omitempty" tf:"initial_lifecycle_hook,omitempty"` + + // If this block is configured, add a instance maintenance policy to the specified Auto Scaling group. Defined below. + // +kubebuilder:validation:Optional + InstanceMaintenancePolicy *InstanceMaintenancePolicyParameters `json:"instanceMaintenancePolicy,omitempty" tf:"instance_maintenance_policy,omitempty"` + + // If this block is configured, start an + // Instance Refresh + // when this Auto Scaling Group is updated. Defined below. + // +kubebuilder:validation:Optional + InstanceRefresh *InstanceRefreshParameters `json:"instanceRefresh,omitempty" tf:"instance_refresh,omitempty"` + + // Name of the launch configuration to use. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/autoscaling/v1beta2.LaunchConfiguration + // +kubebuilder:validation:Optional + LaunchConfiguration *string `json:"launchConfiguration,omitempty" tf:"launch_configuration,omitempty"` + + // Reference to a LaunchConfiguration in autoscaling to populate launchConfiguration. + // +kubebuilder:validation:Optional + LaunchConfigurationRef *v1.Reference `json:"launchConfigurationRef,omitempty" tf:"-"` + + // Selector for a LaunchConfiguration in autoscaling to populate launchConfiguration. + // +kubebuilder:validation:Optional + LaunchConfigurationSelector *v1.Selector `json:"launchConfigurationSelector,omitempty" tf:"-"` + + // Nested argument with Launch template specification to use to launch instances. See Launch Template below for more details. + // +kubebuilder:validation:Optional + LaunchTemplate *LaunchTemplateParameters `json:"launchTemplate,omitempty" tf:"launch_template,omitempty"` + + // Maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 86400 and 31536000 seconds. + // +kubebuilder:validation:Optional + MaxInstanceLifetime *float64 `json:"maxInstanceLifetime,omitempty" tf:"max_instance_lifetime,omitempty"` + + // Maximum size of the Auto Scaling Group. + // +kubebuilder:validation:Optional + MaxSize *float64 `json:"maxSize,omitempty" tf:"max_size,omitempty"` + + // Granularity to associate with the metrics to collect. The only valid value is 1Minute. Default is 1Minute. + // +kubebuilder:validation:Optional + MetricsGranularity *string `json:"metricsGranularity,omitempty" tf:"metrics_granularity,omitempty"` + + // Updates will not wait on ELB instance number changes. + // (See also Waiting for Capacity below.) + // +kubebuilder:validation:Optional + MinELBCapacity *float64 `json:"minElbCapacity,omitempty" tf:"min_elb_capacity,omitempty"` + + // Minimum size of the Auto Scaling Group. + // (See also Waiting for Capacity below.) + // +kubebuilder:validation:Optional + MinSize *float64 `json:"minSize,omitempty" tf:"min_size,omitempty"` + + // Configuration block containing settings to define launch targets for Auto Scaling groups. See Mixed Instances Policy below for more details. + // +kubebuilder:validation:Optional + MixedInstancesPolicy *MixedInstancesPolicyParameters `json:"mixedInstancesPolicy,omitempty" tf:"mixed_instances_policy,omitempty"` + + // Name of the placement group into which you'll launch your instances, if any. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.PlacementGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + PlacementGroup *string `json:"placementGroup,omitempty" tf:"placement_group,omitempty"` + + // Reference to a PlacementGroup in ec2 to populate placementGroup. + // +kubebuilder:validation:Optional + PlacementGroupRef *v1.Reference `json:"placementGroupRef,omitempty" tf:"-"` + + // Selector for a PlacementGroup in ec2 to populate placementGroup. + // +kubebuilder:validation:Optional + PlacementGroupSelector *v1.Selector `json:"placementGroupSelector,omitempty" tf:"-"` + + // Whether newly launched instances + // are automatically protected from termination by Amazon EC2 Auto Scaling when + // scaling in. For more information about preventing instances from terminating + // on scale in, see Using instance scale-in protection + // in the Amazon EC2 Auto Scaling User Guide. + // +kubebuilder:validation:Optional + ProtectFromScaleIn *bool `json:"protectFromScaleIn,omitempty" tf:"protect_from_scale_in,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // ARN of the service-linked role that the ASG will use to call other AWS services + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + ServiceLinkedRoleArn *string `json:"serviceLinkedRoleArn,omitempty" tf:"service_linked_role_arn,omitempty"` + + // Reference to a Role in iam to populate serviceLinkedRoleArn. + // +kubebuilder:validation:Optional + ServiceLinkedRoleArnRef *v1.Reference `json:"serviceLinkedRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceLinkedRoleArn. + // +kubebuilder:validation:Optional + ServiceLinkedRoleArnSelector *v1.Selector `json:"serviceLinkedRoleArnSelector,omitempty" tf:"-"` + + // List of processes to suspend for the Auto Scaling Group. The allowed values are Launch, Terminate, HealthCheck, ReplaceUnhealthy, AZRebalance, AlarmNotification, ScheduledActions, AddToLoadBalancer, InstanceRefresh. + // Note that if you suspend either the Launch or Terminate process types, it can prevent your Auto Scaling Group from functioning properly. + // +kubebuilder:validation:Optional + // +listType=set + SuspendedProcesses []*string `json:"suspendedProcesses,omitempty" tf:"suspended_processes,omitempty"` + + // Configuration block(s) containing resource tags. See Tag below for more details. + // +kubebuilder:validation:Optional + Tag []TagParameters `json:"tag,omitempty" tf:"tag,omitempty"` + + // List of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are OldestInstance, NewestInstance, OldestLaunchConfiguration, ClosestToNextInstanceHour, OldestLaunchTemplate, AllocationStrategy, Default. Additionally, the ARN of a Lambda function can be specified for custom termination policies. + // +kubebuilder:validation:Optional + TerminationPolicies []*string `json:"terminationPolicies,omitempty" tf:"termination_policies,omitempty"` + + // Attaches one or more traffic sources to the specified Auto Scaling group. + // +kubebuilder:validation:Optional + TrafficSource []TrafficSourceParameters `json:"trafficSource,omitempty" tf:"traffic_source,omitempty"` + + // List of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with availability_zones. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +kubebuilder:validation:Optional + // +listType=set + VPCZoneIdentifier []*string `json:"vpcZoneIdentifier,omitempty" tf:"vpc_zone_identifier,omitempty"` + + // References to Subnet in ec2 to populate vpcZoneIdentifier. + // +kubebuilder:validation:Optional + VPCZoneIdentifierRefs []v1.Reference `json:"vpcZoneIdentifierRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate vpcZoneIdentifier. + // +kubebuilder:validation:Optional + VPCZoneIdentifierSelector *v1.Selector `json:"vpcZoneIdentifierSelector,omitempty" tf:"-"` + + // (See also Waiting + // for Capacity below. + // +kubebuilder:validation:Optional + WaitForCapacityTimeout *string `json:"waitForCapacityTimeout,omitempty" tf:"wait_for_capacity_timeout,omitempty"` + + // (Takes + // precedence over min_elb_capacity behavior.) + // (See also Waiting for Capacity below.) + // +kubebuilder:validation:Optional + WaitForELBCapacity *float64 `json:"waitForElbCapacity,omitempty" tf:"wait_for_elb_capacity,omitempty"` + + // If this block is configured, add a Warm Pool + // to the specified Auto Scaling group. Defined below + // +kubebuilder:validation:Optional + WarmPool *WarmPoolParameters `json:"warmPool,omitempty" tf:"warm_pool,omitempty"` +} + +type BaselineEBSBandwidthMbpsInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type BaselineEBSBandwidthMbpsObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type BaselineEBSBandwidthMbpsParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InitialLifecycleHookInitParameters struct { + DefaultResult *string `json:"defaultResult,omitempty" tf:"default_result,omitempty"` + + HeartbeatTimeout *float64 `json:"heartbeatTimeout,omitempty" tf:"heartbeat_timeout,omitempty"` + + LifecycleTransition *string `json:"lifecycleTransition,omitempty" tf:"lifecycle_transition,omitempty"` + + // Name of the Auto Scaling Group. Conflicts with name_prefix. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + NotificationMetadata *string `json:"notificationMetadata,omitempty" tf:"notification_metadata,omitempty"` + + // ARN for this Auto Scaling Group + NotificationTargetArn *string `json:"notificationTargetArn,omitempty" tf:"notification_target_arn,omitempty"` + + // ARN for this Auto Scaling Group + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type InitialLifecycleHookObservation struct { + DefaultResult *string `json:"defaultResult,omitempty" tf:"default_result,omitempty"` + + HeartbeatTimeout *float64 `json:"heartbeatTimeout,omitempty" tf:"heartbeat_timeout,omitempty"` + + LifecycleTransition *string `json:"lifecycleTransition,omitempty" tf:"lifecycle_transition,omitempty"` + + // Name of the Auto Scaling Group. Conflicts with name_prefix. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + NotificationMetadata *string `json:"notificationMetadata,omitempty" tf:"notification_metadata,omitempty"` + + // ARN for this Auto Scaling Group + NotificationTargetArn *string `json:"notificationTargetArn,omitempty" tf:"notification_target_arn,omitempty"` + + // ARN for this Auto Scaling Group + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type InitialLifecycleHookParameters struct { + + // +kubebuilder:validation:Optional + DefaultResult *string `json:"defaultResult,omitempty" tf:"default_result,omitempty"` + + // +kubebuilder:validation:Optional + HeartbeatTimeout *float64 `json:"heartbeatTimeout,omitempty" tf:"heartbeat_timeout,omitempty"` + + // +kubebuilder:validation:Optional + LifecycleTransition *string `json:"lifecycleTransition" tf:"lifecycle_transition,omitempty"` + + // Name of the Auto Scaling Group. Conflicts with name_prefix. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // +kubebuilder:validation:Optional + NotificationMetadata *string `json:"notificationMetadata,omitempty" tf:"notification_metadata,omitempty"` + + // ARN for this Auto Scaling Group + // +kubebuilder:validation:Optional + NotificationTargetArn *string `json:"notificationTargetArn,omitempty" tf:"notification_target_arn,omitempty"` + + // ARN for this Auto Scaling Group + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type InstanceMaintenancePolicyInitParameters struct { + + // Amount of capacity in the Auto Scaling group that can be in service and healthy, or pending, to support your workload when an instance refresh is in place, as a percentage of the desired capacity of the Auto Scaling group. Values must be between 100 and 200, defaults to 100. + MaxHealthyPercentage *float64 `json:"maxHealthyPercentage,omitempty" tf:"max_healthy_percentage,omitempty"` + + // Amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group. Defaults to 90. + MinHealthyPercentage *float64 `json:"minHealthyPercentage,omitempty" tf:"min_healthy_percentage,omitempty"` +} + +type InstanceMaintenancePolicyObservation struct { + + // Amount of capacity in the Auto Scaling group that can be in service and healthy, or pending, to support your workload when an instance refresh is in place, as a percentage of the desired capacity of the Auto Scaling group. Values must be between 100 and 200, defaults to 100. + MaxHealthyPercentage *float64 `json:"maxHealthyPercentage,omitempty" tf:"max_healthy_percentage,omitempty"` + + // Amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group. Defaults to 90. + MinHealthyPercentage *float64 `json:"minHealthyPercentage,omitempty" tf:"min_healthy_percentage,omitempty"` +} + +type InstanceMaintenancePolicyParameters struct { + + // Amount of capacity in the Auto Scaling group that can be in service and healthy, or pending, to support your workload when an instance refresh is in place, as a percentage of the desired capacity of the Auto Scaling group. Values must be between 100 and 200, defaults to 100. + // +kubebuilder:validation:Optional + MaxHealthyPercentage *float64 `json:"maxHealthyPercentage" tf:"max_healthy_percentage,omitempty"` + + // Amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group. Defaults to 90. + // +kubebuilder:validation:Optional + MinHealthyPercentage *float64 `json:"minHealthyPercentage" tf:"min_healthy_percentage,omitempty"` +} + +type InstanceRefreshInitParameters struct { + + // Override default parameters for Instance Refresh. + Preferences *PreferencesInitParameters `json:"preferences,omitempty" tf:"preferences,omitempty"` + + // Strategy to use for instance refresh. The only allowed value is Rolling. See StartInstanceRefresh Action for more information. + Strategy *string `json:"strategy,omitempty" tf:"strategy,omitempty"` + + // Set of additional property names that will trigger an Instance Refresh. A refresh will always be triggered by a change in any of launch_configuration, launch_template, or mixed_instances_policy. + // +listType=set + Triggers []*string `json:"triggers,omitempty" tf:"triggers,omitempty"` +} + +type InstanceRefreshObservation struct { + + // Override default parameters for Instance Refresh. + Preferences *PreferencesObservation `json:"preferences,omitempty" tf:"preferences,omitempty"` + + // Strategy to use for instance refresh. The only allowed value is Rolling. See StartInstanceRefresh Action for more information. + Strategy *string `json:"strategy,omitempty" tf:"strategy,omitempty"` + + // Set of additional property names that will trigger an Instance Refresh. A refresh will always be triggered by a change in any of launch_configuration, launch_template, or mixed_instances_policy. + // +listType=set + Triggers []*string `json:"triggers,omitempty" tf:"triggers,omitempty"` +} + +type InstanceRefreshParameters struct { + + // Override default parameters for Instance Refresh. + // +kubebuilder:validation:Optional + Preferences *PreferencesParameters `json:"preferences,omitempty" tf:"preferences,omitempty"` + + // Strategy to use for instance refresh. The only allowed value is Rolling. See StartInstanceRefresh Action for more information. + // +kubebuilder:validation:Optional + Strategy *string `json:"strategy" tf:"strategy,omitempty"` + + // Set of additional property names that will trigger an Instance Refresh. A refresh will always be triggered by a change in any of launch_configuration, launch_template, or mixed_instances_policy. + // +kubebuilder:validation:Optional + // +listType=set + Triggers []*string `json:"triggers,omitempty" tf:"triggers,omitempty"` +} + +type InstanceRequirementsInitParameters struct { + + // Block describing the minimum and maximum number of accelerators (GPUs, FPGAs, or AWS Inferentia chips). Default is no minimum or maximum. + AcceleratorCount *AcceleratorCountInitParameters `json:"acceleratorCount,omitempty" tf:"accelerator_count,omitempty"` + + // List of accelerator manufacturer names. Default is any manufacturer. + // +listType=set + AcceleratorManufacturers []*string `json:"acceleratorManufacturers,omitempty" tf:"accelerator_manufacturers,omitempty"` + + // List of accelerator names. Default is any acclerator. + // +listType=set + AcceleratorNames []*string `json:"acceleratorNames,omitempty" tf:"accelerator_names,omitempty"` + + // Block describing the minimum and maximum total memory of the accelerators. Default is no minimum or maximum. + AcceleratorTotalMemoryMib *AcceleratorTotalMemoryMibInitParameters `json:"acceleratorTotalMemoryMib,omitempty" tf:"accelerator_total_memory_mib,omitempty"` + + // List of accelerator types. Default is any accelerator type. + // +listType=set + AcceleratorTypes []*string `json:"acceleratorTypes,omitempty" tf:"accelerator_types,omitempty"` + + // List of instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes. You can use strings with one or more wild cards, represented by an asterisk (*), to allow an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are allowing the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are allowing all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is all instance types. + // +listType=set + AllowedInstanceTypes []*string `json:"allowedInstanceTypes,omitempty" tf:"allowed_instance_types,omitempty"` + + // Indicate whether bare metal instace types should be included, excluded, or required. Default is excluded. + BareMetal *string `json:"bareMetal,omitempty" tf:"bare_metal,omitempty"` + + // Block describing the minimum and maximum baseline EBS bandwidth, in Mbps. Default is no minimum or maximum. + BaselineEBSBandwidthMbps *BaselineEBSBandwidthMbpsInitParameters `json:"baselineEbsBandwidthMbps,omitempty" tf:"baseline_ebs_bandwidth_mbps,omitempty"` + + // Indicate whether burstable performance instance types should be included, excluded, or required. Default is excluded. + BurstablePerformance *string `json:"burstablePerformance,omitempty" tf:"burstable_performance,omitempty"` + + // List of CPU manufacturer names. Default is any manufacturer. + // +listType=set + CPUManufacturers []*string `json:"cpuManufacturers,omitempty" tf:"cpu_manufacturers,omitempty"` + + // List of instance types to exclude. You can use strings with one or more wild cards, represented by an asterisk (*), to exclude an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are excluding the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are excluding all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is no excluded instance types. + // +listType=set + ExcludedInstanceTypes []*string `json:"excludedInstanceTypes,omitempty" tf:"excluded_instance_types,omitempty"` + + // List of instance generation names. Default is any generation. + // +listType=set + InstanceGenerations []*string `json:"instanceGenerations,omitempty" tf:"instance_generations,omitempty"` + + // Indicate whether instance types with local storage volumes are included, excluded, or required. Default is included. + LocalStorage *string `json:"localStorage,omitempty" tf:"local_storage,omitempty"` + + // List of local storage type names. Default any storage type. + // +listType=set + LocalStorageTypes []*string `json:"localStorageTypes,omitempty" tf:"local_storage_types,omitempty"` + + // Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. + MemoryGibPerVcpu *MemoryGibPerVcpuInitParameters `json:"memoryGibPerVcpu,omitempty" tf:"memory_gib_per_vcpu,omitempty"` + + // Block describing the minimum and maximum amount of memory (MiB). Default is no maximum. + MemoryMib *MemoryMibInitParameters `json:"memoryMib,omitempty" tf:"memory_mib,omitempty"` + + // Block describing the minimum and maximum amount of network bandwidth, in gigabits per second (Gbps). Default is no minimum or maximum. + NetworkBandwidthGbps *NetworkBandwidthGbpsInitParameters `json:"networkBandwidthGbps,omitempty" tf:"network_bandwidth_gbps,omitempty"` + + // Block describing the minimum and maximum number of network interfaces. Default is no minimum or maximum. + NetworkInterfaceCount *NetworkInterfaceCountInitParameters `json:"networkInterfaceCount,omitempty" tf:"network_interface_count,omitempty"` + + // Price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 20. + OnDemandMaxPricePercentageOverLowestPrice *float64 `json:"onDemandMaxPricePercentageOverLowestPrice,omitempty" tf:"on_demand_max_price_percentage_over_lowest_price,omitempty"` + + // Indicate whether instance types must support On-Demand Instance Hibernation, either true or false. Default is false. + RequireHibernateSupport *bool `json:"requireHibernateSupport,omitempty" tf:"require_hibernate_support,omitempty"` + + // Price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. + SpotMaxPricePercentageOverLowestPrice *float64 `json:"spotMaxPricePercentageOverLowestPrice,omitempty" tf:"spot_max_price_percentage_over_lowest_price,omitempty"` + + // Block describing the minimum and maximum total local storage (GB). Default is no minimum or maximum. + TotalLocalStorageGb *TotalLocalStorageGbInitParameters `json:"totalLocalStorageGb,omitempty" tf:"total_local_storage_gb,omitempty"` + + // Block describing the minimum and maximum number of vCPUs. Default is no maximum. + VcpuCount *VcpuCountInitParameters `json:"vcpuCount,omitempty" tf:"vcpu_count,omitempty"` +} + +type InstanceRequirementsObservation struct { + + // Block describing the minimum and maximum number of accelerators (GPUs, FPGAs, or AWS Inferentia chips). Default is no minimum or maximum. + AcceleratorCount *AcceleratorCountObservation `json:"acceleratorCount,omitempty" tf:"accelerator_count,omitempty"` + + // List of accelerator manufacturer names. Default is any manufacturer. + // +listType=set + AcceleratorManufacturers []*string `json:"acceleratorManufacturers,omitempty" tf:"accelerator_manufacturers,omitempty"` + + // List of accelerator names. Default is any acclerator. + // +listType=set + AcceleratorNames []*string `json:"acceleratorNames,omitempty" tf:"accelerator_names,omitempty"` + + // Block describing the minimum and maximum total memory of the accelerators. Default is no minimum or maximum. + AcceleratorTotalMemoryMib *AcceleratorTotalMemoryMibObservation `json:"acceleratorTotalMemoryMib,omitempty" tf:"accelerator_total_memory_mib,omitempty"` + + // List of accelerator types. Default is any accelerator type. + // +listType=set + AcceleratorTypes []*string `json:"acceleratorTypes,omitempty" tf:"accelerator_types,omitempty"` + + // List of instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes. You can use strings with one or more wild cards, represented by an asterisk (*), to allow an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are allowing the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are allowing all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is all instance types. + // +listType=set + AllowedInstanceTypes []*string `json:"allowedInstanceTypes,omitempty" tf:"allowed_instance_types,omitempty"` + + // Indicate whether bare metal instace types should be included, excluded, or required. Default is excluded. + BareMetal *string `json:"bareMetal,omitempty" tf:"bare_metal,omitempty"` + + // Block describing the minimum and maximum baseline EBS bandwidth, in Mbps. Default is no minimum or maximum. + BaselineEBSBandwidthMbps *BaselineEBSBandwidthMbpsObservation `json:"baselineEbsBandwidthMbps,omitempty" tf:"baseline_ebs_bandwidth_mbps,omitempty"` + + // Indicate whether burstable performance instance types should be included, excluded, or required. Default is excluded. + BurstablePerformance *string `json:"burstablePerformance,omitempty" tf:"burstable_performance,omitempty"` + + // List of CPU manufacturer names. Default is any manufacturer. + // +listType=set + CPUManufacturers []*string `json:"cpuManufacturers,omitempty" tf:"cpu_manufacturers,omitempty"` + + // List of instance types to exclude. You can use strings with one or more wild cards, represented by an asterisk (*), to exclude an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are excluding the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are excluding all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is no excluded instance types. + // +listType=set + ExcludedInstanceTypes []*string `json:"excludedInstanceTypes,omitempty" tf:"excluded_instance_types,omitempty"` + + // List of instance generation names. Default is any generation. + // +listType=set + InstanceGenerations []*string `json:"instanceGenerations,omitempty" tf:"instance_generations,omitempty"` + + // Indicate whether instance types with local storage volumes are included, excluded, or required. Default is included. + LocalStorage *string `json:"localStorage,omitempty" tf:"local_storage,omitempty"` + + // List of local storage type names. Default any storage type. + // +listType=set + LocalStorageTypes []*string `json:"localStorageTypes,omitempty" tf:"local_storage_types,omitempty"` + + // Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. + MemoryGibPerVcpu *MemoryGibPerVcpuObservation `json:"memoryGibPerVcpu,omitempty" tf:"memory_gib_per_vcpu,omitempty"` + + // Block describing the minimum and maximum amount of memory (MiB). Default is no maximum. + MemoryMib *MemoryMibObservation `json:"memoryMib,omitempty" tf:"memory_mib,omitempty"` + + // Block describing the minimum and maximum amount of network bandwidth, in gigabits per second (Gbps). Default is no minimum or maximum. + NetworkBandwidthGbps *NetworkBandwidthGbpsObservation `json:"networkBandwidthGbps,omitempty" tf:"network_bandwidth_gbps,omitempty"` + + // Block describing the minimum and maximum number of network interfaces. Default is no minimum or maximum. + NetworkInterfaceCount *NetworkInterfaceCountObservation `json:"networkInterfaceCount,omitempty" tf:"network_interface_count,omitempty"` + + // Price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 20. + OnDemandMaxPricePercentageOverLowestPrice *float64 `json:"onDemandMaxPricePercentageOverLowestPrice,omitempty" tf:"on_demand_max_price_percentage_over_lowest_price,omitempty"` + + // Indicate whether instance types must support On-Demand Instance Hibernation, either true or false. Default is false. + RequireHibernateSupport *bool `json:"requireHibernateSupport,omitempty" tf:"require_hibernate_support,omitempty"` + + // Price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. + SpotMaxPricePercentageOverLowestPrice *float64 `json:"spotMaxPricePercentageOverLowestPrice,omitempty" tf:"spot_max_price_percentage_over_lowest_price,omitempty"` + + // Block describing the minimum and maximum total local storage (GB). Default is no minimum or maximum. + TotalLocalStorageGb *TotalLocalStorageGbObservation `json:"totalLocalStorageGb,omitempty" tf:"total_local_storage_gb,omitempty"` + + // Block describing the minimum and maximum number of vCPUs. Default is no maximum. + VcpuCount *VcpuCountObservation `json:"vcpuCount,omitempty" tf:"vcpu_count,omitempty"` +} + +type InstanceRequirementsParameters struct { + + // Block describing the minimum and maximum number of accelerators (GPUs, FPGAs, or AWS Inferentia chips). Default is no minimum or maximum. + // +kubebuilder:validation:Optional + AcceleratorCount *AcceleratorCountParameters `json:"acceleratorCount,omitempty" tf:"accelerator_count,omitempty"` + + // List of accelerator manufacturer names. Default is any manufacturer. + // +kubebuilder:validation:Optional + // +listType=set + AcceleratorManufacturers []*string `json:"acceleratorManufacturers,omitempty" tf:"accelerator_manufacturers,omitempty"` + + // List of accelerator names. Default is any acclerator. + // +kubebuilder:validation:Optional + // +listType=set + AcceleratorNames []*string `json:"acceleratorNames,omitempty" tf:"accelerator_names,omitempty"` + + // Block describing the minimum and maximum total memory of the accelerators. Default is no minimum or maximum. + // +kubebuilder:validation:Optional + AcceleratorTotalMemoryMib *AcceleratorTotalMemoryMibParameters `json:"acceleratorTotalMemoryMib,omitempty" tf:"accelerator_total_memory_mib,omitempty"` + + // List of accelerator types. Default is any accelerator type. + // +kubebuilder:validation:Optional + // +listType=set + AcceleratorTypes []*string `json:"acceleratorTypes,omitempty" tf:"accelerator_types,omitempty"` + + // List of instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes. You can use strings with one or more wild cards, represented by an asterisk (*), to allow an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are allowing the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are allowing all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is all instance types. + // +kubebuilder:validation:Optional + // +listType=set + AllowedInstanceTypes []*string `json:"allowedInstanceTypes,omitempty" tf:"allowed_instance_types,omitempty"` + + // Indicate whether bare metal instace types should be included, excluded, or required. Default is excluded. + // +kubebuilder:validation:Optional + BareMetal *string `json:"bareMetal,omitempty" tf:"bare_metal,omitempty"` + + // Block describing the minimum and maximum baseline EBS bandwidth, in Mbps. Default is no minimum or maximum. + // +kubebuilder:validation:Optional + BaselineEBSBandwidthMbps *BaselineEBSBandwidthMbpsParameters `json:"baselineEbsBandwidthMbps,omitempty" tf:"baseline_ebs_bandwidth_mbps,omitempty"` + + // Indicate whether burstable performance instance types should be included, excluded, or required. Default is excluded. + // +kubebuilder:validation:Optional + BurstablePerformance *string `json:"burstablePerformance,omitempty" tf:"burstable_performance,omitempty"` + + // List of CPU manufacturer names. Default is any manufacturer. + // +kubebuilder:validation:Optional + // +listType=set + CPUManufacturers []*string `json:"cpuManufacturers,omitempty" tf:"cpu_manufacturers,omitempty"` + + // List of instance types to exclude. You can use strings with one or more wild cards, represented by an asterisk (*), to exclude an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are excluding the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are excluding all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is no excluded instance types. + // +kubebuilder:validation:Optional + // +listType=set + ExcludedInstanceTypes []*string `json:"excludedInstanceTypes,omitempty" tf:"excluded_instance_types,omitempty"` + + // List of instance generation names. Default is any generation. + // +kubebuilder:validation:Optional + // +listType=set + InstanceGenerations []*string `json:"instanceGenerations,omitempty" tf:"instance_generations,omitempty"` + + // Indicate whether instance types with local storage volumes are included, excluded, or required. Default is included. + // +kubebuilder:validation:Optional + LocalStorage *string `json:"localStorage,omitempty" tf:"local_storage,omitempty"` + + // List of local storage type names. Default any storage type. + // +kubebuilder:validation:Optional + // +listType=set + LocalStorageTypes []*string `json:"localStorageTypes,omitempty" tf:"local_storage_types,omitempty"` + + // Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. + // +kubebuilder:validation:Optional + MemoryGibPerVcpu *MemoryGibPerVcpuParameters `json:"memoryGibPerVcpu,omitempty" tf:"memory_gib_per_vcpu,omitempty"` + + // Block describing the minimum and maximum amount of memory (MiB). Default is no maximum. + // +kubebuilder:validation:Optional + MemoryMib *MemoryMibParameters `json:"memoryMib,omitempty" tf:"memory_mib,omitempty"` + + // Block describing the minimum and maximum amount of network bandwidth, in gigabits per second (Gbps). Default is no minimum or maximum. + // +kubebuilder:validation:Optional + NetworkBandwidthGbps *NetworkBandwidthGbpsParameters `json:"networkBandwidthGbps,omitempty" tf:"network_bandwidth_gbps,omitempty"` + + // Block describing the minimum and maximum number of network interfaces. Default is no minimum or maximum. + // +kubebuilder:validation:Optional + NetworkInterfaceCount *NetworkInterfaceCountParameters `json:"networkInterfaceCount,omitempty" tf:"network_interface_count,omitempty"` + + // Price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 20. + // +kubebuilder:validation:Optional + OnDemandMaxPricePercentageOverLowestPrice *float64 `json:"onDemandMaxPricePercentageOverLowestPrice,omitempty" tf:"on_demand_max_price_percentage_over_lowest_price,omitempty"` + + // Indicate whether instance types must support On-Demand Instance Hibernation, either true or false. Default is false. + // +kubebuilder:validation:Optional + RequireHibernateSupport *bool `json:"requireHibernateSupport,omitempty" tf:"require_hibernate_support,omitempty"` + + // Price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. + // +kubebuilder:validation:Optional + SpotMaxPricePercentageOverLowestPrice *float64 `json:"spotMaxPricePercentageOverLowestPrice,omitempty" tf:"spot_max_price_percentage_over_lowest_price,omitempty"` + + // Block describing the minimum and maximum total local storage (GB). Default is no minimum or maximum. + // +kubebuilder:validation:Optional + TotalLocalStorageGb *TotalLocalStorageGbParameters `json:"totalLocalStorageGb,omitempty" tf:"total_local_storage_gb,omitempty"` + + // Block describing the minimum and maximum number of vCPUs. Default is no maximum. + // +kubebuilder:validation:Optional + VcpuCount *VcpuCountParameters `json:"vcpuCount,omitempty" tf:"vcpu_count,omitempty"` +} + +type InstanceReusePolicyInitParameters struct { + + // Whether instances in the Auto Scaling group can be returned to the warm pool on scale in. + ReuseOnScaleIn *bool `json:"reuseOnScaleIn,omitempty" tf:"reuse_on_scale_in,omitempty"` +} + +type InstanceReusePolicyObservation struct { + + // Whether instances in the Auto Scaling group can be returned to the warm pool on scale in. + ReuseOnScaleIn *bool `json:"reuseOnScaleIn,omitempty" tf:"reuse_on_scale_in,omitempty"` +} + +type InstanceReusePolicyParameters struct { + + // Whether instances in the Auto Scaling group can be returned to the warm pool on scale in. + // +kubebuilder:validation:Optional + ReuseOnScaleIn *bool `json:"reuseOnScaleIn,omitempty" tf:"reuse_on_scale_in,omitempty"` +} + +type InstancesDistributionInitParameters struct { + + // Strategy to use when launching on-demand instances. Valid values: prioritized, lowest-price. Default: prioritized. + OnDemandAllocationStrategy *string `json:"onDemandAllocationStrategy,omitempty" tf:"on_demand_allocation_strategy,omitempty"` + + // Absolute minimum amount of desired capacity that must be fulfilled by on-demand instances. Default: 0. + OnDemandBaseCapacity *float64 `json:"onDemandBaseCapacity,omitempty" tf:"on_demand_base_capacity,omitempty"` + + // Percentage split between on-demand and Spot instances above the base on-demand capacity. Default: 100. + OnDemandPercentageAboveBaseCapacity *float64 `json:"onDemandPercentageAboveBaseCapacity,omitempty" tf:"on_demand_percentage_above_base_capacity,omitempty"` + + // How to allocate capacity across the Spot pools. Valid values: lowest-price, capacity-optimized, capacity-optimized-prioritized, and price-capacity-optimized. Default: lowest-price. + SpotAllocationStrategy *string `json:"spotAllocationStrategy,omitempty" tf:"spot_allocation_strategy,omitempty"` + + // Number of Spot pools per availability zone to allocate capacity. EC2 Auto Scaling selects the cheapest Spot pools and evenly allocates Spot capacity across the number of Spot pools that you specify. Only available with spot_allocation_strategy set to lowest-price. Otherwise it must be set to 0, if it has been defined before. Default: 2. + SpotInstancePools *float64 `json:"spotInstancePools,omitempty" tf:"spot_instance_pools,omitempty"` + + // Maximum price per unit hour that the user is willing to pay for the Spot instances. Default: an empty string which means the on-demand price. + SpotMaxPrice *string `json:"spotMaxPrice,omitempty" tf:"spot_max_price,omitempty"` +} + +type InstancesDistributionObservation struct { + + // Strategy to use when launching on-demand instances. Valid values: prioritized, lowest-price. Default: prioritized. + OnDemandAllocationStrategy *string `json:"onDemandAllocationStrategy,omitempty" tf:"on_demand_allocation_strategy,omitempty"` + + // Absolute minimum amount of desired capacity that must be fulfilled by on-demand instances. Default: 0. + OnDemandBaseCapacity *float64 `json:"onDemandBaseCapacity,omitempty" tf:"on_demand_base_capacity,omitempty"` + + // Percentage split between on-demand and Spot instances above the base on-demand capacity. Default: 100. + OnDemandPercentageAboveBaseCapacity *float64 `json:"onDemandPercentageAboveBaseCapacity,omitempty" tf:"on_demand_percentage_above_base_capacity,omitempty"` + + // How to allocate capacity across the Spot pools. Valid values: lowest-price, capacity-optimized, capacity-optimized-prioritized, and price-capacity-optimized. Default: lowest-price. + SpotAllocationStrategy *string `json:"spotAllocationStrategy,omitempty" tf:"spot_allocation_strategy,omitempty"` + + // Number of Spot pools per availability zone to allocate capacity. EC2 Auto Scaling selects the cheapest Spot pools and evenly allocates Spot capacity across the number of Spot pools that you specify. Only available with spot_allocation_strategy set to lowest-price. Otherwise it must be set to 0, if it has been defined before. Default: 2. + SpotInstancePools *float64 `json:"spotInstancePools,omitempty" tf:"spot_instance_pools,omitempty"` + + // Maximum price per unit hour that the user is willing to pay for the Spot instances. Default: an empty string which means the on-demand price. + SpotMaxPrice *string `json:"spotMaxPrice,omitempty" tf:"spot_max_price,omitempty"` +} + +type InstancesDistributionParameters struct { + + // Strategy to use when launching on-demand instances. Valid values: prioritized, lowest-price. Default: prioritized. + // +kubebuilder:validation:Optional + OnDemandAllocationStrategy *string `json:"onDemandAllocationStrategy,omitempty" tf:"on_demand_allocation_strategy,omitempty"` + + // Absolute minimum amount of desired capacity that must be fulfilled by on-demand instances. Default: 0. + // +kubebuilder:validation:Optional + OnDemandBaseCapacity *float64 `json:"onDemandBaseCapacity,omitempty" tf:"on_demand_base_capacity,omitempty"` + + // Percentage split between on-demand and Spot instances above the base on-demand capacity. Default: 100. + // +kubebuilder:validation:Optional + OnDemandPercentageAboveBaseCapacity *float64 `json:"onDemandPercentageAboveBaseCapacity,omitempty" tf:"on_demand_percentage_above_base_capacity,omitempty"` + + // How to allocate capacity across the Spot pools. Valid values: lowest-price, capacity-optimized, capacity-optimized-prioritized, and price-capacity-optimized. Default: lowest-price. + // +kubebuilder:validation:Optional + SpotAllocationStrategy *string `json:"spotAllocationStrategy,omitempty" tf:"spot_allocation_strategy,omitempty"` + + // Number of Spot pools per availability zone to allocate capacity. EC2 Auto Scaling selects the cheapest Spot pools and evenly allocates Spot capacity across the number of Spot pools that you specify. Only available with spot_allocation_strategy set to lowest-price. Otherwise it must be set to 0, if it has been defined before. Default: 2. + // +kubebuilder:validation:Optional + SpotInstancePools *float64 `json:"spotInstancePools,omitempty" tf:"spot_instance_pools,omitempty"` + + // Maximum price per unit hour that the user is willing to pay for the Spot instances. Default: an empty string which means the on-demand price. + // +kubebuilder:validation:Optional + SpotMaxPrice *string `json:"spotMaxPrice,omitempty" tf:"spot_max_price,omitempty"` +} + +type LaunchTemplateInitParameters struct { + + // ID of the launch template. Conflicts with name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.LaunchTemplate + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Reference to a LaunchTemplate in ec2 to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + + // Selector for a LaunchTemplate in ec2 to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` + + // Name of the launch template. Conflicts with id. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Template version. Can be version number, $Latest, or $Default. (Default: $Default). + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type LaunchTemplateObservation struct { + + // ID of the launch template. Conflicts with name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the launch template. Conflicts with id. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Template version. Can be version number, $Latest, or $Default. (Default: $Default). + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type LaunchTemplateParameters struct { + + // ID of the launch template. Conflicts with name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.LaunchTemplate + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Reference to a LaunchTemplate in ec2 to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + + // Selector for a LaunchTemplate in ec2 to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` + + // Name of the launch template. Conflicts with id. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Template version. Can be version number, $Latest, or $Default. (Default: $Default). + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type LaunchTemplateSpecificationInitParameters struct { + + // ID of the launch template. Conflicts with launch_template_name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.LaunchTemplate + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + LaunchTemplateID *string `json:"launchTemplateId,omitempty" tf:"launch_template_id,omitempty"` + + // Reference to a LaunchTemplate in ec2 to populate launchTemplateId. + // +kubebuilder:validation:Optional + LaunchTemplateIDRef *v1.Reference `json:"launchTemplateIdRef,omitempty" tf:"-"` + + // Selector for a LaunchTemplate in ec2 to populate launchTemplateId. + // +kubebuilder:validation:Optional + LaunchTemplateIDSelector *v1.Selector `json:"launchTemplateIdSelector,omitempty" tf:"-"` + + // Name of the launch template. Conflicts with launch_template_id. + LaunchTemplateName *string `json:"launchTemplateName,omitempty" tf:"launch_template_name,omitempty"` + + // Template version. Can be version number, $Latest, or $Default. (Default: $Default). + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type LaunchTemplateSpecificationObservation struct { + + // ID of the launch template. Conflicts with launch_template_name. + LaunchTemplateID *string `json:"launchTemplateId,omitempty" tf:"launch_template_id,omitempty"` + + // Name of the launch template. Conflicts with launch_template_id. + LaunchTemplateName *string `json:"launchTemplateName,omitempty" tf:"launch_template_name,omitempty"` + + // Template version. Can be version number, $Latest, or $Default. (Default: $Default). + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type LaunchTemplateSpecificationParameters struct { + + // ID of the launch template. Conflicts with launch_template_name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.LaunchTemplate + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + LaunchTemplateID *string `json:"launchTemplateId,omitempty" tf:"launch_template_id,omitempty"` + + // Reference to a LaunchTemplate in ec2 to populate launchTemplateId. + // +kubebuilder:validation:Optional + LaunchTemplateIDRef *v1.Reference `json:"launchTemplateIdRef,omitempty" tf:"-"` + + // Selector for a LaunchTemplate in ec2 to populate launchTemplateId. + // +kubebuilder:validation:Optional + LaunchTemplateIDSelector *v1.Selector `json:"launchTemplateIdSelector,omitempty" tf:"-"` + + // Name of the launch template. Conflicts with launch_template_id. + // +kubebuilder:validation:Optional + LaunchTemplateName *string `json:"launchTemplateName,omitempty" tf:"launch_template_name,omitempty"` + + // Template version. Can be version number, $Latest, or $Default. (Default: $Default). + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type MemoryGibPerVcpuInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type MemoryGibPerVcpuObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type MemoryGibPerVcpuParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type MemoryMibInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type MemoryMibObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type MemoryMibParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type MixedInstancesPolicyInitParameters struct { + + // Nested argument containing settings on how to mix on-demand and Spot instances in the Auto Scaling group. Defined below. + InstancesDistribution *InstancesDistributionInitParameters `json:"instancesDistribution,omitempty" tf:"instances_distribution,omitempty"` + + // Nested argument containing launch template settings along with the overrides to specify multiple instance types and weights. Defined below. + LaunchTemplate *MixedInstancesPolicyLaunchTemplateInitParameters `json:"launchTemplate,omitempty" tf:"launch_template,omitempty"` +} + +type MixedInstancesPolicyLaunchTemplateInitParameters struct { + + // Nested argument defines the Launch Template. Defined below. + LaunchTemplateSpecification *LaunchTemplateSpecificationInitParameters `json:"launchTemplateSpecification,omitempty" tf:"launch_template_specification,omitempty"` + + // List of nested arguments provides the ability to specify multiple instance types. This will override the same parameter in the launch template. For on-demand instances, Auto Scaling considers the order of preference of instance types to launch based on the order specified in the overrides list. Defined below. + Override []OverrideInitParameters `json:"override,omitempty" tf:"override,omitempty"` +} + +type MixedInstancesPolicyLaunchTemplateObservation struct { + + // Nested argument defines the Launch Template. Defined below. + LaunchTemplateSpecification *LaunchTemplateSpecificationObservation `json:"launchTemplateSpecification,omitempty" tf:"launch_template_specification,omitempty"` + + // List of nested arguments provides the ability to specify multiple instance types. This will override the same parameter in the launch template. For on-demand instances, Auto Scaling considers the order of preference of instance types to launch based on the order specified in the overrides list. Defined below. + Override []OverrideObservation `json:"override,omitempty" tf:"override,omitempty"` +} + +type MixedInstancesPolicyLaunchTemplateParameters struct { + + // Nested argument defines the Launch Template. Defined below. + // +kubebuilder:validation:Optional + LaunchTemplateSpecification *LaunchTemplateSpecificationParameters `json:"launchTemplateSpecification" tf:"launch_template_specification,omitempty"` + + // List of nested arguments provides the ability to specify multiple instance types. This will override the same parameter in the launch template. For on-demand instances, Auto Scaling considers the order of preference of instance types to launch based on the order specified in the overrides list. Defined below. + // +kubebuilder:validation:Optional + Override []OverrideParameters `json:"override,omitempty" tf:"override,omitempty"` +} + +type MixedInstancesPolicyObservation struct { + + // Nested argument containing settings on how to mix on-demand and Spot instances in the Auto Scaling group. Defined below. + InstancesDistribution *InstancesDistributionObservation `json:"instancesDistribution,omitempty" tf:"instances_distribution,omitempty"` + + // Nested argument containing launch template settings along with the overrides to specify multiple instance types and weights. Defined below. + LaunchTemplate *MixedInstancesPolicyLaunchTemplateObservation `json:"launchTemplate,omitempty" tf:"launch_template,omitempty"` +} + +type MixedInstancesPolicyParameters struct { + + // Nested argument containing settings on how to mix on-demand and Spot instances in the Auto Scaling group. Defined below. + // +kubebuilder:validation:Optional + InstancesDistribution *InstancesDistributionParameters `json:"instancesDistribution,omitempty" tf:"instances_distribution,omitempty"` + + // Nested argument containing launch template settings along with the overrides to specify multiple instance types and weights. Defined below. + // +kubebuilder:validation:Optional + LaunchTemplate *MixedInstancesPolicyLaunchTemplateParameters `json:"launchTemplate" tf:"launch_template,omitempty"` +} + +type NetworkBandwidthGbpsInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type NetworkBandwidthGbpsObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type NetworkBandwidthGbpsParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type NetworkInterfaceCountInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type NetworkInterfaceCountObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type NetworkInterfaceCountParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type OverrideInitParameters struct { + + // Override the instance type in the Launch Template with instance types that satisfy the requirements. + InstanceRequirements *InstanceRequirementsInitParameters `json:"instanceRequirements,omitempty" tf:"instance_requirements,omitempty"` + + // Override the instance type in the Launch Template. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // Nested argument defines the Launch Template. Defined below. + LaunchTemplateSpecification *OverrideLaunchTemplateSpecificationInitParameters `json:"launchTemplateSpecification,omitempty" tf:"launch_template_specification,omitempty"` + + // Number of capacity units, which gives the instance type a proportional weight to other instance types. + WeightedCapacity *string `json:"weightedCapacity,omitempty" tf:"weighted_capacity,omitempty"` +} + +type OverrideLaunchTemplateSpecificationInitParameters struct { + + // ID of the launch template. Conflicts with launch_template_name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.LaunchTemplate + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + LaunchTemplateID *string `json:"launchTemplateId,omitempty" tf:"launch_template_id,omitempty"` + + // Reference to a LaunchTemplate in ec2 to populate launchTemplateId. + // +kubebuilder:validation:Optional + LaunchTemplateIDRef *v1.Reference `json:"launchTemplateIdRef,omitempty" tf:"-"` + + // Selector for a LaunchTemplate in ec2 to populate launchTemplateId. + // +kubebuilder:validation:Optional + LaunchTemplateIDSelector *v1.Selector `json:"launchTemplateIdSelector,omitempty" tf:"-"` + + // Name of the launch template. Conflicts with launch_template_id. + LaunchTemplateName *string `json:"launchTemplateName,omitempty" tf:"launch_template_name,omitempty"` + + // Template version. Can be version number, $Latest, or $Default. (Default: $Default). + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type OverrideLaunchTemplateSpecificationObservation struct { + + // ID of the launch template. Conflicts with launch_template_name. + LaunchTemplateID *string `json:"launchTemplateId,omitempty" tf:"launch_template_id,omitempty"` + + // Name of the launch template. Conflicts with launch_template_id. + LaunchTemplateName *string `json:"launchTemplateName,omitempty" tf:"launch_template_name,omitempty"` + + // Template version. Can be version number, $Latest, or $Default. (Default: $Default). + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type OverrideLaunchTemplateSpecificationParameters struct { + + // ID of the launch template. Conflicts with launch_template_name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.LaunchTemplate + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + LaunchTemplateID *string `json:"launchTemplateId,omitempty" tf:"launch_template_id,omitempty"` + + // Reference to a LaunchTemplate in ec2 to populate launchTemplateId. + // +kubebuilder:validation:Optional + LaunchTemplateIDRef *v1.Reference `json:"launchTemplateIdRef,omitempty" tf:"-"` + + // Selector for a LaunchTemplate in ec2 to populate launchTemplateId. + // +kubebuilder:validation:Optional + LaunchTemplateIDSelector *v1.Selector `json:"launchTemplateIdSelector,omitempty" tf:"-"` + + // Name of the launch template. Conflicts with launch_template_id. + // +kubebuilder:validation:Optional + LaunchTemplateName *string `json:"launchTemplateName,omitempty" tf:"launch_template_name,omitempty"` + + // Template version. Can be version number, $Latest, or $Default. (Default: $Default). + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type OverrideObservation struct { + + // Override the instance type in the Launch Template with instance types that satisfy the requirements. + InstanceRequirements *InstanceRequirementsObservation `json:"instanceRequirements,omitempty" tf:"instance_requirements,omitempty"` + + // Override the instance type in the Launch Template. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // Nested argument defines the Launch Template. Defined below. + LaunchTemplateSpecification *OverrideLaunchTemplateSpecificationObservation `json:"launchTemplateSpecification,omitempty" tf:"launch_template_specification,omitempty"` + + // Number of capacity units, which gives the instance type a proportional weight to other instance types. + WeightedCapacity *string `json:"weightedCapacity,omitempty" tf:"weighted_capacity,omitempty"` +} + +type OverrideParameters struct { + + // Override the instance type in the Launch Template with instance types that satisfy the requirements. + // +kubebuilder:validation:Optional + InstanceRequirements *InstanceRequirementsParameters `json:"instanceRequirements,omitempty" tf:"instance_requirements,omitempty"` + + // Override the instance type in the Launch Template. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // Nested argument defines the Launch Template. Defined below. + // +kubebuilder:validation:Optional + LaunchTemplateSpecification *OverrideLaunchTemplateSpecificationParameters `json:"launchTemplateSpecification,omitempty" tf:"launch_template_specification,omitempty"` + + // Number of capacity units, which gives the instance type a proportional weight to other instance types. + // +kubebuilder:validation:Optional + WeightedCapacity *string `json:"weightedCapacity,omitempty" tf:"weighted_capacity,omitempty"` +} + +type PreferencesInitParameters struct { + + // Alarm Specification for Instance Refresh. + AlarmSpecification *AlarmSpecificationInitParameters `json:"alarmSpecification,omitempty" tf:"alarm_specification,omitempty"` + + // Automatically rollback if instance refresh fails. Defaults to false. This option may only be set to true when specifying a launch_template or mixed_instances_policy. + AutoRollback *bool `json:"autoRollback,omitempty" tf:"auto_rollback,omitempty"` + + // Number of seconds to wait after a checkpoint. Defaults to 3600. + CheckpointDelay *string `json:"checkpointDelay,omitempty" tf:"checkpoint_delay,omitempty"` + + // List of percentages for each checkpoint. Values must be unique and in ascending order. To replace all instances, the final number must be 100. + CheckpointPercentages []*float64 `json:"checkpointPercentages,omitempty" tf:"checkpoint_percentages,omitempty"` + + // Number of seconds until a newly launched instance is configured and ready to use. Default behavior is to use the Auto Scaling Group's health check grace period. + InstanceWarmup *string `json:"instanceWarmup,omitempty" tf:"instance_warmup,omitempty"` + + // Amount of capacity in the Auto Scaling group that can be in service and healthy, or pending, to support your workload when an instance refresh is in place, as a percentage of the desired capacity of the Auto Scaling group. Values must be between 100 and 200, defaults to 100. + MaxHealthyPercentage *float64 `json:"maxHealthyPercentage,omitempty" tf:"max_healthy_percentage,omitempty"` + + // Amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group. Defaults to 90. + MinHealthyPercentage *float64 `json:"minHealthyPercentage,omitempty" tf:"min_healthy_percentage,omitempty"` + + // Behavior when encountering instances protected from scale in are found. Available behaviors are Refresh, Ignore, and Wait. Default is Ignore. + ScaleInProtectedInstances *string `json:"scaleInProtectedInstances,omitempty" tf:"scale_in_protected_instances,omitempty"` + + // Replace instances that already have your desired configuration. Defaults to false. + SkipMatching *bool `json:"skipMatching,omitempty" tf:"skip_matching,omitempty"` + + // Behavior when encountering instances in the Standby state in are found. Available behaviors are Terminate, Ignore, and Wait. Default is Ignore. + StandbyInstances *string `json:"standbyInstances,omitempty" tf:"standby_instances,omitempty"` +} + +type PreferencesObservation struct { + + // Alarm Specification for Instance Refresh. + AlarmSpecification *AlarmSpecificationObservation `json:"alarmSpecification,omitempty" tf:"alarm_specification,omitempty"` + + // Automatically rollback if instance refresh fails. Defaults to false. This option may only be set to true when specifying a launch_template or mixed_instances_policy. + AutoRollback *bool `json:"autoRollback,omitempty" tf:"auto_rollback,omitempty"` + + // Number of seconds to wait after a checkpoint. Defaults to 3600. + CheckpointDelay *string `json:"checkpointDelay,omitempty" tf:"checkpoint_delay,omitempty"` + + // List of percentages for each checkpoint. Values must be unique and in ascending order. To replace all instances, the final number must be 100. + CheckpointPercentages []*float64 `json:"checkpointPercentages,omitempty" tf:"checkpoint_percentages,omitempty"` + + // Number of seconds until a newly launched instance is configured and ready to use. Default behavior is to use the Auto Scaling Group's health check grace period. + InstanceWarmup *string `json:"instanceWarmup,omitempty" tf:"instance_warmup,omitempty"` + + // Amount of capacity in the Auto Scaling group that can be in service and healthy, or pending, to support your workload when an instance refresh is in place, as a percentage of the desired capacity of the Auto Scaling group. Values must be between 100 and 200, defaults to 100. + MaxHealthyPercentage *float64 `json:"maxHealthyPercentage,omitempty" tf:"max_healthy_percentage,omitempty"` + + // Amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group. Defaults to 90. + MinHealthyPercentage *float64 `json:"minHealthyPercentage,omitempty" tf:"min_healthy_percentage,omitempty"` + + // Behavior when encountering instances protected from scale in are found. Available behaviors are Refresh, Ignore, and Wait. Default is Ignore. + ScaleInProtectedInstances *string `json:"scaleInProtectedInstances,omitempty" tf:"scale_in_protected_instances,omitempty"` + + // Replace instances that already have your desired configuration. Defaults to false. + SkipMatching *bool `json:"skipMatching,omitempty" tf:"skip_matching,omitempty"` + + // Behavior when encountering instances in the Standby state in are found. Available behaviors are Terminate, Ignore, and Wait. Default is Ignore. + StandbyInstances *string `json:"standbyInstances,omitempty" tf:"standby_instances,omitempty"` +} + +type PreferencesParameters struct { + + // Alarm Specification for Instance Refresh. + // +kubebuilder:validation:Optional + AlarmSpecification *AlarmSpecificationParameters `json:"alarmSpecification,omitempty" tf:"alarm_specification,omitempty"` + + // Automatically rollback if instance refresh fails. Defaults to false. This option may only be set to true when specifying a launch_template or mixed_instances_policy. + // +kubebuilder:validation:Optional + AutoRollback *bool `json:"autoRollback,omitempty" tf:"auto_rollback,omitempty"` + + // Number of seconds to wait after a checkpoint. Defaults to 3600. + // +kubebuilder:validation:Optional + CheckpointDelay *string `json:"checkpointDelay,omitempty" tf:"checkpoint_delay,omitempty"` + + // List of percentages for each checkpoint. Values must be unique and in ascending order. To replace all instances, the final number must be 100. + // +kubebuilder:validation:Optional + CheckpointPercentages []*float64 `json:"checkpointPercentages,omitempty" tf:"checkpoint_percentages,omitempty"` + + // Number of seconds until a newly launched instance is configured and ready to use. Default behavior is to use the Auto Scaling Group's health check grace period. + // +kubebuilder:validation:Optional + InstanceWarmup *string `json:"instanceWarmup,omitempty" tf:"instance_warmup,omitempty"` + + // Amount of capacity in the Auto Scaling group that can be in service and healthy, or pending, to support your workload when an instance refresh is in place, as a percentage of the desired capacity of the Auto Scaling group. Values must be between 100 and 200, defaults to 100. + // +kubebuilder:validation:Optional + MaxHealthyPercentage *float64 `json:"maxHealthyPercentage,omitempty" tf:"max_healthy_percentage,omitempty"` + + // Amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group. Defaults to 90. + // +kubebuilder:validation:Optional + MinHealthyPercentage *float64 `json:"minHealthyPercentage,omitempty" tf:"min_healthy_percentage,omitempty"` + + // Behavior when encountering instances protected from scale in are found. Available behaviors are Refresh, Ignore, and Wait. Default is Ignore. + // +kubebuilder:validation:Optional + ScaleInProtectedInstances *string `json:"scaleInProtectedInstances,omitempty" tf:"scale_in_protected_instances,omitempty"` + + // Replace instances that already have your desired configuration. Defaults to false. + // +kubebuilder:validation:Optional + SkipMatching *bool `json:"skipMatching,omitempty" tf:"skip_matching,omitempty"` + + // Behavior when encountering instances in the Standby state in are found. Available behaviors are Terminate, Ignore, and Wait. Default is Ignore. + // +kubebuilder:validation:Optional + StandbyInstances *string `json:"standbyInstances,omitempty" tf:"standby_instances,omitempty"` +} + +type TagInitParameters struct { + + // Key + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Enables propagation of the tag to + // Amazon EC2 instances launched via this ASG + PropagateAtLaunch *bool `json:"propagateAtLaunch,omitempty" tf:"propagate_at_launch,omitempty"` + + // Value + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagObservation struct { + + // Key + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Enables propagation of the tag to + // Amazon EC2 instances launched via this ASG + PropagateAtLaunch *bool `json:"propagateAtLaunch,omitempty" tf:"propagate_at_launch,omitempty"` + + // Value + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagParameters struct { + + // Key + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Enables propagation of the tag to + // Amazon EC2 instances launched via this ASG + // +kubebuilder:validation:Optional + PropagateAtLaunch *bool `json:"propagateAtLaunch" tf:"propagate_at_launch,omitempty"` + + // Value + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type TotalLocalStorageGbInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type TotalLocalStorageGbObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type TotalLocalStorageGbParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type TrafficSourceInitParameters struct { + + // Identifies the traffic source. For Application Load Balancers, Gateway Load Balancers, Network Load Balancers, and VPC Lattice, this will be the Amazon Resource Name (ARN) for a target group in this account and Region. For Classic Load Balancers, this will be the name of the Classic Load Balancer in this account and Region. + Identifier *string `json:"identifier,omitempty" tf:"identifier,omitempty"` + + // Provides additional context for the value of Identifier. + // The following lists the valid values: + // elb if identifier is the name of a Classic Load Balancer. + // elbv2 if identifier is the ARN of an Application Load Balancer, Gateway Load Balancer, or Network Load Balancer target group. + // vpc-lattice if identifier is the ARN of a VPC Lattice target group. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type TrafficSourceObservation struct { + + // Identifies the traffic source. For Application Load Balancers, Gateway Load Balancers, Network Load Balancers, and VPC Lattice, this will be the Amazon Resource Name (ARN) for a target group in this account and Region. For Classic Load Balancers, this will be the name of the Classic Load Balancer in this account and Region. + Identifier *string `json:"identifier,omitempty" tf:"identifier,omitempty"` + + // Provides additional context for the value of Identifier. + // The following lists the valid values: + // elb if identifier is the name of a Classic Load Balancer. + // elbv2 if identifier is the ARN of an Application Load Balancer, Gateway Load Balancer, or Network Load Balancer target group. + // vpc-lattice if identifier is the ARN of a VPC Lattice target group. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type TrafficSourceParameters struct { + + // Identifies the traffic source. For Application Load Balancers, Gateway Load Balancers, Network Load Balancers, and VPC Lattice, this will be the Amazon Resource Name (ARN) for a target group in this account and Region. For Classic Load Balancers, this will be the name of the Classic Load Balancer in this account and Region. + // +kubebuilder:validation:Optional + Identifier *string `json:"identifier" tf:"identifier,omitempty"` + + // Provides additional context for the value of Identifier. + // The following lists the valid values: + // elb if identifier is the name of a Classic Load Balancer. + // elbv2 if identifier is the ARN of an Application Load Balancer, Gateway Load Balancer, or Network Load Balancer target group. + // vpc-lattice if identifier is the ARN of a VPC Lattice target group. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type VcpuCountInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type VcpuCountObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type VcpuCountParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type WarmPoolInitParameters struct { + + // Whether instances in the Auto Scaling group can be returned to the warm pool on scale in. The default is to terminate instances in the Auto Scaling group when the group scales in. + InstanceReusePolicy *InstanceReusePolicyInitParameters `json:"instanceReusePolicy,omitempty" tf:"instance_reuse_policy,omitempty"` + + // Total maximum number of instances that are allowed to be in the warm pool or in any state except Terminated for the Auto Scaling group. + MaxGroupPreparedCapacity *float64 `json:"maxGroupPreparedCapacity,omitempty" tf:"max_group_prepared_capacity,omitempty"` + + // Minimum size of the Auto Scaling Group. + // (See also Waiting for Capacity below.) + MinSize *float64 `json:"minSize,omitempty" tf:"min_size,omitempty"` + + // Sets the instance state to transition to after the lifecycle hooks finish. Valid values are: Stopped (default), Running or Hibernated. + PoolState *string `json:"poolState,omitempty" tf:"pool_state,omitempty"` +} + +type WarmPoolObservation struct { + + // Whether instances in the Auto Scaling group can be returned to the warm pool on scale in. The default is to terminate instances in the Auto Scaling group when the group scales in. + InstanceReusePolicy *InstanceReusePolicyObservation `json:"instanceReusePolicy,omitempty" tf:"instance_reuse_policy,omitempty"` + + // Total maximum number of instances that are allowed to be in the warm pool or in any state except Terminated for the Auto Scaling group. + MaxGroupPreparedCapacity *float64 `json:"maxGroupPreparedCapacity,omitempty" tf:"max_group_prepared_capacity,omitempty"` + + // Minimum size of the Auto Scaling Group. + // (See also Waiting for Capacity below.) + MinSize *float64 `json:"minSize,omitempty" tf:"min_size,omitempty"` + + // Sets the instance state to transition to after the lifecycle hooks finish. Valid values are: Stopped (default), Running or Hibernated. + PoolState *string `json:"poolState,omitempty" tf:"pool_state,omitempty"` +} + +type WarmPoolParameters struct { + + // Whether instances in the Auto Scaling group can be returned to the warm pool on scale in. The default is to terminate instances in the Auto Scaling group when the group scales in. + // +kubebuilder:validation:Optional + InstanceReusePolicy *InstanceReusePolicyParameters `json:"instanceReusePolicy,omitempty" tf:"instance_reuse_policy,omitempty"` + + // Total maximum number of instances that are allowed to be in the warm pool or in any state except Terminated for the Auto Scaling group. + // +kubebuilder:validation:Optional + MaxGroupPreparedCapacity *float64 `json:"maxGroupPreparedCapacity,omitempty" tf:"max_group_prepared_capacity,omitempty"` + + // Minimum size of the Auto Scaling Group. + // (See also Waiting for Capacity below.) + // +kubebuilder:validation:Optional + MinSize *float64 `json:"minSize,omitempty" tf:"min_size,omitempty"` + + // Sets the instance state to transition to after the lifecycle hooks finish. Valid values are: Stopped (default), Running or Hibernated. + // +kubebuilder:validation:Optional + PoolState *string `json:"poolState,omitempty" tf:"pool_state,omitempty"` +} + +// AutoscalingGroupSpec defines the desired state of AutoscalingGroup +type AutoscalingGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AutoscalingGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AutoscalingGroupInitParameters `json:"initProvider,omitempty"` +} + +// AutoscalingGroupStatus defines the observed state of AutoscalingGroup. +type AutoscalingGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AutoscalingGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// AutoscalingGroup is the Schema for the AutoscalingGroups API. Provides an Auto Scaling Group resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type AutoscalingGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.maxSize) || (has(self.initProvider) && has(self.initProvider.maxSize))",message="spec.forProvider.maxSize is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.minSize) || (has(self.initProvider) && has(self.initProvider.minSize))",message="spec.forProvider.minSize is a required parameter" + Spec AutoscalingGroupSpec `json:"spec"` + Status AutoscalingGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AutoscalingGroupList contains a list of AutoscalingGroups +type AutoscalingGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AutoscalingGroup `json:"items"` +} + +// Repository type metadata. +var ( + AutoscalingGroup_Kind = "AutoscalingGroup" + AutoscalingGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AutoscalingGroup_Kind}.String() + AutoscalingGroup_KindAPIVersion = AutoscalingGroup_Kind + "." + CRDGroupVersion.String() + AutoscalingGroup_GroupVersionKind = CRDGroupVersion.WithKind(AutoscalingGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&AutoscalingGroup{}, &AutoscalingGroupList{}) +} diff --git a/apis/autoscaling/v1beta3/zz_generated.conversion_hubs.go b/apis/autoscaling/v1beta3/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..9fbb7419f4 --- /dev/null +++ b/apis/autoscaling/v1beta3/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta3 + +// Hub marks this type as a conversion hub. +func (tr *AutoscalingGroup) Hub() {} diff --git a/apis/autoscaling/v1beta3/zz_generated.deepcopy.go b/apis/autoscaling/v1beta3/zz_generated.deepcopy.go new file mode 100644 index 0000000000..41b05e162e --- /dev/null +++ b/apis/autoscaling/v1beta3/zz_generated.deepcopy.go @@ -0,0 +1,3806 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta3 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorCountInitParameters) DeepCopyInto(out *AcceleratorCountInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorCountInitParameters. +func (in *AcceleratorCountInitParameters) DeepCopy() *AcceleratorCountInitParameters { + if in == nil { + return nil + } + out := new(AcceleratorCountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorCountObservation) DeepCopyInto(out *AcceleratorCountObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorCountObservation. +func (in *AcceleratorCountObservation) DeepCopy() *AcceleratorCountObservation { + if in == nil { + return nil + } + out := new(AcceleratorCountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorCountParameters) DeepCopyInto(out *AcceleratorCountParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorCountParameters. +func (in *AcceleratorCountParameters) DeepCopy() *AcceleratorCountParameters { + if in == nil { + return nil + } + out := new(AcceleratorCountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorTotalMemoryMibInitParameters) DeepCopyInto(out *AcceleratorTotalMemoryMibInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorTotalMemoryMibInitParameters. +func (in *AcceleratorTotalMemoryMibInitParameters) DeepCopy() *AcceleratorTotalMemoryMibInitParameters { + if in == nil { + return nil + } + out := new(AcceleratorTotalMemoryMibInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorTotalMemoryMibObservation) DeepCopyInto(out *AcceleratorTotalMemoryMibObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorTotalMemoryMibObservation. +func (in *AcceleratorTotalMemoryMibObservation) DeepCopy() *AcceleratorTotalMemoryMibObservation { + if in == nil { + return nil + } + out := new(AcceleratorTotalMemoryMibObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorTotalMemoryMibParameters) DeepCopyInto(out *AcceleratorTotalMemoryMibParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorTotalMemoryMibParameters. +func (in *AcceleratorTotalMemoryMibParameters) DeepCopy() *AcceleratorTotalMemoryMibParameters { + if in == nil { + return nil + } + out := new(AcceleratorTotalMemoryMibParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlarmSpecificationInitParameters) DeepCopyInto(out *AlarmSpecificationInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlarmSpecificationInitParameters. +func (in *AlarmSpecificationInitParameters) DeepCopy() *AlarmSpecificationInitParameters { + if in == nil { + return nil + } + out := new(AlarmSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlarmSpecificationObservation) DeepCopyInto(out *AlarmSpecificationObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlarmSpecificationObservation. +func (in *AlarmSpecificationObservation) DeepCopy() *AlarmSpecificationObservation { + if in == nil { + return nil + } + out := new(AlarmSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlarmSpecificationParameters) DeepCopyInto(out *AlarmSpecificationParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlarmSpecificationParameters. +func (in *AlarmSpecificationParameters) DeepCopy() *AlarmSpecificationParameters { + if in == nil { + return nil + } + out := new(AlarmSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingGroup) DeepCopyInto(out *AutoscalingGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingGroup. +func (in *AutoscalingGroup) DeepCopy() *AutoscalingGroup { + if in == nil { + return nil + } + out := new(AutoscalingGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutoscalingGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingGroupInitParameters) DeepCopyInto(out *AutoscalingGroupInitParameters) { + *out = *in + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CapacityRebalance != nil { + in, out := &in.CapacityRebalance, &out.CapacityRebalance + *out = new(bool) + **out = **in + } + if in.Context != nil { + in, out := &in.Context, &out.Context + *out = new(string) + **out = **in + } + if in.DefaultCooldown != nil { + in, out := &in.DefaultCooldown, &out.DefaultCooldown + *out = new(float64) + **out = **in + } + if in.DefaultInstanceWarmup != nil { + in, out := &in.DefaultInstanceWarmup, &out.DefaultInstanceWarmup + *out = new(float64) + **out = **in + } + if in.DesiredCapacity != nil { + in, out := &in.DesiredCapacity, &out.DesiredCapacity + *out = new(float64) + **out = **in + } + if in.DesiredCapacityType != nil { + in, out := &in.DesiredCapacityType, &out.DesiredCapacityType + *out = new(string) + **out = **in + } + if in.EnabledMetrics != nil { + in, out := &in.EnabledMetrics, &out.EnabledMetrics + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ForceDelete != nil { + in, out := &in.ForceDelete, &out.ForceDelete + *out = new(bool) + **out = **in + } + if in.ForceDeleteWarmPool != nil { + in, out := &in.ForceDeleteWarmPool, &out.ForceDeleteWarmPool + *out = new(bool) + **out = **in + } + if in.HealthCheckGracePeriod != nil { + in, out := &in.HealthCheckGracePeriod, &out.HealthCheckGracePeriod + *out = new(float64) + **out = **in + } + if in.HealthCheckType != nil { + in, out := &in.HealthCheckType, &out.HealthCheckType + *out = new(string) + **out = **in + } + if in.IgnoreFailedScalingActivities != nil { + in, out := &in.IgnoreFailedScalingActivities, &out.IgnoreFailedScalingActivities + *out = new(bool) + **out = **in + } + if in.InitialLifecycleHook != nil { + in, out := &in.InitialLifecycleHook, &out.InitialLifecycleHook + *out = make([]InitialLifecycleHookInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InstanceMaintenancePolicy != nil { + in, out := &in.InstanceMaintenancePolicy, &out.InstanceMaintenancePolicy + *out = new(InstanceMaintenancePolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceRefresh != nil { + in, out := &in.InstanceRefresh, &out.InstanceRefresh + *out = new(InstanceRefreshInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LaunchConfiguration != nil { + in, out := &in.LaunchConfiguration, &out.LaunchConfiguration + *out = new(string) + **out = **in + } + if in.LaunchConfigurationRef != nil { + in, out := &in.LaunchConfigurationRef, &out.LaunchConfigurationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LaunchConfigurationSelector != nil { + in, out := &in.LaunchConfigurationSelector, &out.LaunchConfigurationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(LaunchTemplateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxInstanceLifetime != nil { + in, out := &in.MaxInstanceLifetime, &out.MaxInstanceLifetime + *out = new(float64) + **out = **in + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(float64) + **out = **in + } + if in.MetricsGranularity != nil { + in, out := &in.MetricsGranularity, &out.MetricsGranularity + *out = new(string) + **out = **in + } + if in.MinELBCapacity != nil { + in, out := &in.MinELBCapacity, &out.MinELBCapacity + *out = new(float64) + **out = **in + } + if in.MinSize != nil { + in, out := &in.MinSize, &out.MinSize + *out = new(float64) + **out = **in + } + if in.MixedInstancesPolicy != nil { + in, out := &in.MixedInstancesPolicy, &out.MixedInstancesPolicy + *out = new(MixedInstancesPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PlacementGroup != nil { + in, out := &in.PlacementGroup, &out.PlacementGroup + *out = new(string) + **out = **in + } + if in.PlacementGroupRef != nil { + in, out := &in.PlacementGroupRef, &out.PlacementGroupRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PlacementGroupSelector != nil { + in, out := &in.PlacementGroupSelector, &out.PlacementGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ProtectFromScaleIn != nil { + in, out := &in.ProtectFromScaleIn, &out.ProtectFromScaleIn + *out = new(bool) + **out = **in + } + if in.ServiceLinkedRoleArn != nil { + in, out := &in.ServiceLinkedRoleArn, &out.ServiceLinkedRoleArn + *out = new(string) + **out = **in + } + if in.ServiceLinkedRoleArnRef != nil { + in, out := &in.ServiceLinkedRoleArnRef, &out.ServiceLinkedRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceLinkedRoleArnSelector != nil { + in, out := &in.ServiceLinkedRoleArnSelector, &out.ServiceLinkedRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SuspendedProcesses != nil { + in, out := &in.SuspendedProcesses, &out.SuspendedProcesses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = make([]TagInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TerminationPolicies != nil { + in, out := &in.TerminationPolicies, &out.TerminationPolicies + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TrafficSource != nil { + in, out := &in.TrafficSource, &out.TrafficSource + *out = make([]TrafficSourceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCZoneIdentifier != nil { + in, out := &in.VPCZoneIdentifier, &out.VPCZoneIdentifier + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCZoneIdentifierRefs != nil { + in, out := &in.VPCZoneIdentifierRefs, &out.VPCZoneIdentifierRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCZoneIdentifierSelector != nil { + in, out := &in.VPCZoneIdentifierSelector, &out.VPCZoneIdentifierSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WaitForCapacityTimeout != nil { + in, out := &in.WaitForCapacityTimeout, &out.WaitForCapacityTimeout + *out = new(string) + **out = **in + } + if in.WaitForELBCapacity != nil { + in, out := &in.WaitForELBCapacity, &out.WaitForELBCapacity + *out = new(float64) + **out = **in + } + if in.WarmPool != nil { + in, out := &in.WarmPool, &out.WarmPool + *out = new(WarmPoolInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingGroupInitParameters. +func (in *AutoscalingGroupInitParameters) DeepCopy() *AutoscalingGroupInitParameters { + if in == nil { + return nil + } + out := new(AutoscalingGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingGroupList) DeepCopyInto(out *AutoscalingGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AutoscalingGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingGroupList. +func (in *AutoscalingGroupList) DeepCopy() *AutoscalingGroupList { + if in == nil { + return nil + } + out := new(AutoscalingGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutoscalingGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingGroupObservation) DeepCopyInto(out *AutoscalingGroupObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CapacityRebalance != nil { + in, out := &in.CapacityRebalance, &out.CapacityRebalance + *out = new(bool) + **out = **in + } + if in.Context != nil { + in, out := &in.Context, &out.Context + *out = new(string) + **out = **in + } + if in.DefaultCooldown != nil { + in, out := &in.DefaultCooldown, &out.DefaultCooldown + *out = new(float64) + **out = **in + } + if in.DefaultInstanceWarmup != nil { + in, out := &in.DefaultInstanceWarmup, &out.DefaultInstanceWarmup + *out = new(float64) + **out = **in + } + if in.DesiredCapacity != nil { + in, out := &in.DesiredCapacity, &out.DesiredCapacity + *out = new(float64) + **out = **in + } + if in.DesiredCapacityType != nil { + in, out := &in.DesiredCapacityType, &out.DesiredCapacityType + *out = new(string) + **out = **in + } + if in.EnabledMetrics != nil { + in, out := &in.EnabledMetrics, &out.EnabledMetrics + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ForceDelete != nil { + in, out := &in.ForceDelete, &out.ForceDelete + *out = new(bool) + **out = **in + } + if in.ForceDeleteWarmPool != nil { + in, out := &in.ForceDeleteWarmPool, &out.ForceDeleteWarmPool + *out = new(bool) + **out = **in + } + if in.HealthCheckGracePeriod != nil { + in, out := &in.HealthCheckGracePeriod, &out.HealthCheckGracePeriod + *out = new(float64) + **out = **in + } + if in.HealthCheckType != nil { + in, out := &in.HealthCheckType, &out.HealthCheckType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IgnoreFailedScalingActivities != nil { + in, out := &in.IgnoreFailedScalingActivities, &out.IgnoreFailedScalingActivities + *out = new(bool) + **out = **in + } + if in.InitialLifecycleHook != nil { + in, out := &in.InitialLifecycleHook, &out.InitialLifecycleHook + *out = make([]InitialLifecycleHookObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InstanceMaintenancePolicy != nil { + in, out := &in.InstanceMaintenancePolicy, &out.InstanceMaintenancePolicy + *out = new(InstanceMaintenancePolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.InstanceRefresh != nil { + in, out := &in.InstanceRefresh, &out.InstanceRefresh + *out = new(InstanceRefreshObservation) + (*in).DeepCopyInto(*out) + } + if in.LaunchConfiguration != nil { + in, out := &in.LaunchConfiguration, &out.LaunchConfiguration + *out = new(string) + **out = **in + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(LaunchTemplateObservation) + (*in).DeepCopyInto(*out) + } + if in.LoadBalancers != nil { + in, out := &in.LoadBalancers, &out.LoadBalancers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxInstanceLifetime != nil { + in, out := &in.MaxInstanceLifetime, &out.MaxInstanceLifetime + *out = new(float64) + **out = **in + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(float64) + **out = **in + } + if in.MetricsGranularity != nil { + in, out := &in.MetricsGranularity, &out.MetricsGranularity + *out = new(string) + **out = **in + } + if in.MinELBCapacity != nil { + in, out := &in.MinELBCapacity, &out.MinELBCapacity + *out = new(float64) + **out = **in + } + if in.MinSize != nil { + in, out := &in.MinSize, &out.MinSize + *out = new(float64) + **out = **in + } + if in.MixedInstancesPolicy != nil { + in, out := &in.MixedInstancesPolicy, &out.MixedInstancesPolicy + *out = new(MixedInstancesPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.PlacementGroup != nil { + in, out := &in.PlacementGroup, &out.PlacementGroup + *out = new(string) + **out = **in + } + if in.PredictedCapacity != nil { + in, out := &in.PredictedCapacity, &out.PredictedCapacity + *out = new(float64) + **out = **in + } + if in.ProtectFromScaleIn != nil { + in, out := &in.ProtectFromScaleIn, &out.ProtectFromScaleIn + *out = new(bool) + **out = **in + } + if in.ServiceLinkedRoleArn != nil { + in, out := &in.ServiceLinkedRoleArn, &out.ServiceLinkedRoleArn + *out = new(string) + **out = **in + } + if in.SuspendedProcesses != nil { + in, out := &in.SuspendedProcesses, &out.SuspendedProcesses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = make([]TagObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupArns != nil { + in, out := &in.TargetGroupArns, &out.TargetGroupArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TerminationPolicies != nil { + in, out := &in.TerminationPolicies, &out.TerminationPolicies + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TrafficSource != nil { + in, out := &in.TrafficSource, &out.TrafficSource + *out = make([]TrafficSourceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCZoneIdentifier != nil { + in, out := &in.VPCZoneIdentifier, &out.VPCZoneIdentifier + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WaitForCapacityTimeout != nil { + in, out := &in.WaitForCapacityTimeout, &out.WaitForCapacityTimeout + *out = new(string) + **out = **in + } + if in.WaitForELBCapacity != nil { + in, out := &in.WaitForELBCapacity, &out.WaitForELBCapacity + *out = new(float64) + **out = **in + } + if in.WarmPool != nil { + in, out := &in.WarmPool, &out.WarmPool + *out = new(WarmPoolObservation) + (*in).DeepCopyInto(*out) + } + if in.WarmPoolSize != nil { + in, out := &in.WarmPoolSize, &out.WarmPoolSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingGroupObservation. +func (in *AutoscalingGroupObservation) DeepCopy() *AutoscalingGroupObservation { + if in == nil { + return nil + } + out := new(AutoscalingGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingGroupParameters) DeepCopyInto(out *AutoscalingGroupParameters) { + *out = *in + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CapacityRebalance != nil { + in, out := &in.CapacityRebalance, &out.CapacityRebalance + *out = new(bool) + **out = **in + } + if in.Context != nil { + in, out := &in.Context, &out.Context + *out = new(string) + **out = **in + } + if in.DefaultCooldown != nil { + in, out := &in.DefaultCooldown, &out.DefaultCooldown + *out = new(float64) + **out = **in + } + if in.DefaultInstanceWarmup != nil { + in, out := &in.DefaultInstanceWarmup, &out.DefaultInstanceWarmup + *out = new(float64) + **out = **in + } + if in.DesiredCapacity != nil { + in, out := &in.DesiredCapacity, &out.DesiredCapacity + *out = new(float64) + **out = **in + } + if in.DesiredCapacityType != nil { + in, out := &in.DesiredCapacityType, &out.DesiredCapacityType + *out = new(string) + **out = **in + } + if in.EnabledMetrics != nil { + in, out := &in.EnabledMetrics, &out.EnabledMetrics + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ForceDelete != nil { + in, out := &in.ForceDelete, &out.ForceDelete + *out = new(bool) + **out = **in + } + if in.ForceDeleteWarmPool != nil { + in, out := &in.ForceDeleteWarmPool, &out.ForceDeleteWarmPool + *out = new(bool) + **out = **in + } + if in.HealthCheckGracePeriod != nil { + in, out := &in.HealthCheckGracePeriod, &out.HealthCheckGracePeriod + *out = new(float64) + **out = **in + } + if in.HealthCheckType != nil { + in, out := &in.HealthCheckType, &out.HealthCheckType + *out = new(string) + **out = **in + } + if in.IgnoreFailedScalingActivities != nil { + in, out := &in.IgnoreFailedScalingActivities, &out.IgnoreFailedScalingActivities + *out = new(bool) + **out = **in + } + if in.InitialLifecycleHook != nil { + in, out := &in.InitialLifecycleHook, &out.InitialLifecycleHook + *out = make([]InitialLifecycleHookParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InstanceMaintenancePolicy != nil { + in, out := &in.InstanceMaintenancePolicy, &out.InstanceMaintenancePolicy + *out = new(InstanceMaintenancePolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceRefresh != nil { + in, out := &in.InstanceRefresh, &out.InstanceRefresh + *out = new(InstanceRefreshParameters) + (*in).DeepCopyInto(*out) + } + if in.LaunchConfiguration != nil { + in, out := &in.LaunchConfiguration, &out.LaunchConfiguration + *out = new(string) + **out = **in + } + if in.LaunchConfigurationRef != nil { + in, out := &in.LaunchConfigurationRef, &out.LaunchConfigurationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LaunchConfigurationSelector != nil { + in, out := &in.LaunchConfigurationSelector, &out.LaunchConfigurationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(LaunchTemplateParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxInstanceLifetime != nil { + in, out := &in.MaxInstanceLifetime, &out.MaxInstanceLifetime + *out = new(float64) + **out = **in + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(float64) + **out = **in + } + if in.MetricsGranularity != nil { + in, out := &in.MetricsGranularity, &out.MetricsGranularity + *out = new(string) + **out = **in + } + if in.MinELBCapacity != nil { + in, out := &in.MinELBCapacity, &out.MinELBCapacity + *out = new(float64) + **out = **in + } + if in.MinSize != nil { + in, out := &in.MinSize, &out.MinSize + *out = new(float64) + **out = **in + } + if in.MixedInstancesPolicy != nil { + in, out := &in.MixedInstancesPolicy, &out.MixedInstancesPolicy + *out = new(MixedInstancesPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.PlacementGroup != nil { + in, out := &in.PlacementGroup, &out.PlacementGroup + *out = new(string) + **out = **in + } + if in.PlacementGroupRef != nil { + in, out := &in.PlacementGroupRef, &out.PlacementGroupRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PlacementGroupSelector != nil { + in, out := &in.PlacementGroupSelector, &out.PlacementGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ProtectFromScaleIn != nil { + in, out := &in.ProtectFromScaleIn, &out.ProtectFromScaleIn + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ServiceLinkedRoleArn != nil { + in, out := &in.ServiceLinkedRoleArn, &out.ServiceLinkedRoleArn + *out = new(string) + **out = **in + } + if in.ServiceLinkedRoleArnRef != nil { + in, out := &in.ServiceLinkedRoleArnRef, &out.ServiceLinkedRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceLinkedRoleArnSelector != nil { + in, out := &in.ServiceLinkedRoleArnSelector, &out.ServiceLinkedRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SuspendedProcesses != nil { + in, out := &in.SuspendedProcesses, &out.SuspendedProcesses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = make([]TagParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TerminationPolicies != nil { + in, out := &in.TerminationPolicies, &out.TerminationPolicies + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TrafficSource != nil { + in, out := &in.TrafficSource, &out.TrafficSource + *out = make([]TrafficSourceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCZoneIdentifier != nil { + in, out := &in.VPCZoneIdentifier, &out.VPCZoneIdentifier + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCZoneIdentifierRefs != nil { + in, out := &in.VPCZoneIdentifierRefs, &out.VPCZoneIdentifierRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCZoneIdentifierSelector != nil { + in, out := &in.VPCZoneIdentifierSelector, &out.VPCZoneIdentifierSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WaitForCapacityTimeout != nil { + in, out := &in.WaitForCapacityTimeout, &out.WaitForCapacityTimeout + *out = new(string) + **out = **in + } + if in.WaitForELBCapacity != nil { + in, out := &in.WaitForELBCapacity, &out.WaitForELBCapacity + *out = new(float64) + **out = **in + } + if in.WarmPool != nil { + in, out := &in.WarmPool, &out.WarmPool + *out = new(WarmPoolParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingGroupParameters. +func (in *AutoscalingGroupParameters) DeepCopy() *AutoscalingGroupParameters { + if in == nil { + return nil + } + out := new(AutoscalingGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingGroupSpec) DeepCopyInto(out *AutoscalingGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingGroupSpec. +func (in *AutoscalingGroupSpec) DeepCopy() *AutoscalingGroupSpec { + if in == nil { + return nil + } + out := new(AutoscalingGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingGroupStatus) DeepCopyInto(out *AutoscalingGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingGroupStatus. +func (in *AutoscalingGroupStatus) DeepCopy() *AutoscalingGroupStatus { + if in == nil { + return nil + } + out := new(AutoscalingGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaselineEBSBandwidthMbpsInitParameters) DeepCopyInto(out *BaselineEBSBandwidthMbpsInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaselineEBSBandwidthMbpsInitParameters. +func (in *BaselineEBSBandwidthMbpsInitParameters) DeepCopy() *BaselineEBSBandwidthMbpsInitParameters { + if in == nil { + return nil + } + out := new(BaselineEBSBandwidthMbpsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaselineEBSBandwidthMbpsObservation) DeepCopyInto(out *BaselineEBSBandwidthMbpsObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaselineEBSBandwidthMbpsObservation. +func (in *BaselineEBSBandwidthMbpsObservation) DeepCopy() *BaselineEBSBandwidthMbpsObservation { + if in == nil { + return nil + } + out := new(BaselineEBSBandwidthMbpsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaselineEBSBandwidthMbpsParameters) DeepCopyInto(out *BaselineEBSBandwidthMbpsParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaselineEBSBandwidthMbpsParameters. +func (in *BaselineEBSBandwidthMbpsParameters) DeepCopy() *BaselineEBSBandwidthMbpsParameters { + if in == nil { + return nil + } + out := new(BaselineEBSBandwidthMbpsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitialLifecycleHookInitParameters) DeepCopyInto(out *InitialLifecycleHookInitParameters) { + *out = *in + if in.DefaultResult != nil { + in, out := &in.DefaultResult, &out.DefaultResult + *out = new(string) + **out = **in + } + if in.HeartbeatTimeout != nil { + in, out := &in.HeartbeatTimeout, &out.HeartbeatTimeout + *out = new(float64) + **out = **in + } + if in.LifecycleTransition != nil { + in, out := &in.LifecycleTransition, &out.LifecycleTransition + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NotificationMetadata != nil { + in, out := &in.NotificationMetadata, &out.NotificationMetadata + *out = new(string) + **out = **in + } + if in.NotificationTargetArn != nil { + in, out := &in.NotificationTargetArn, &out.NotificationTargetArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitialLifecycleHookInitParameters. +func (in *InitialLifecycleHookInitParameters) DeepCopy() *InitialLifecycleHookInitParameters { + if in == nil { + return nil + } + out := new(InitialLifecycleHookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitialLifecycleHookObservation) DeepCopyInto(out *InitialLifecycleHookObservation) { + *out = *in + if in.DefaultResult != nil { + in, out := &in.DefaultResult, &out.DefaultResult + *out = new(string) + **out = **in + } + if in.HeartbeatTimeout != nil { + in, out := &in.HeartbeatTimeout, &out.HeartbeatTimeout + *out = new(float64) + **out = **in + } + if in.LifecycleTransition != nil { + in, out := &in.LifecycleTransition, &out.LifecycleTransition + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NotificationMetadata != nil { + in, out := &in.NotificationMetadata, &out.NotificationMetadata + *out = new(string) + **out = **in + } + if in.NotificationTargetArn != nil { + in, out := &in.NotificationTargetArn, &out.NotificationTargetArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitialLifecycleHookObservation. +func (in *InitialLifecycleHookObservation) DeepCopy() *InitialLifecycleHookObservation { + if in == nil { + return nil + } + out := new(InitialLifecycleHookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitialLifecycleHookParameters) DeepCopyInto(out *InitialLifecycleHookParameters) { + *out = *in + if in.DefaultResult != nil { + in, out := &in.DefaultResult, &out.DefaultResult + *out = new(string) + **out = **in + } + if in.HeartbeatTimeout != nil { + in, out := &in.HeartbeatTimeout, &out.HeartbeatTimeout + *out = new(float64) + **out = **in + } + if in.LifecycleTransition != nil { + in, out := &in.LifecycleTransition, &out.LifecycleTransition + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NotificationMetadata != nil { + in, out := &in.NotificationMetadata, &out.NotificationMetadata + *out = new(string) + **out = **in + } + if in.NotificationTargetArn != nil { + in, out := &in.NotificationTargetArn, &out.NotificationTargetArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitialLifecycleHookParameters. +func (in *InitialLifecycleHookParameters) DeepCopy() *InitialLifecycleHookParameters { + if in == nil { + return nil + } + out := new(InitialLifecycleHookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMaintenancePolicyInitParameters) DeepCopyInto(out *InstanceMaintenancePolicyInitParameters) { + *out = *in + if in.MaxHealthyPercentage != nil { + in, out := &in.MaxHealthyPercentage, &out.MaxHealthyPercentage + *out = new(float64) + **out = **in + } + if in.MinHealthyPercentage != nil { + in, out := &in.MinHealthyPercentage, &out.MinHealthyPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMaintenancePolicyInitParameters. +func (in *InstanceMaintenancePolicyInitParameters) DeepCopy() *InstanceMaintenancePolicyInitParameters { + if in == nil { + return nil + } + out := new(InstanceMaintenancePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMaintenancePolicyObservation) DeepCopyInto(out *InstanceMaintenancePolicyObservation) { + *out = *in + if in.MaxHealthyPercentage != nil { + in, out := &in.MaxHealthyPercentage, &out.MaxHealthyPercentage + *out = new(float64) + **out = **in + } + if in.MinHealthyPercentage != nil { + in, out := &in.MinHealthyPercentage, &out.MinHealthyPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMaintenancePolicyObservation. +func (in *InstanceMaintenancePolicyObservation) DeepCopy() *InstanceMaintenancePolicyObservation { + if in == nil { + return nil + } + out := new(InstanceMaintenancePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMaintenancePolicyParameters) DeepCopyInto(out *InstanceMaintenancePolicyParameters) { + *out = *in + if in.MaxHealthyPercentage != nil { + in, out := &in.MaxHealthyPercentage, &out.MaxHealthyPercentage + *out = new(float64) + **out = **in + } + if in.MinHealthyPercentage != nil { + in, out := &in.MinHealthyPercentage, &out.MinHealthyPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMaintenancePolicyParameters. +func (in *InstanceMaintenancePolicyParameters) DeepCopy() *InstanceMaintenancePolicyParameters { + if in == nil { + return nil + } + out := new(InstanceMaintenancePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRefreshInitParameters) DeepCopyInto(out *InstanceRefreshInitParameters) { + *out = *in + if in.Preferences != nil { + in, out := &in.Preferences, &out.Preferences + *out = new(PreferencesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(string) + **out = **in + } + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRefreshInitParameters. +func (in *InstanceRefreshInitParameters) DeepCopy() *InstanceRefreshInitParameters { + if in == nil { + return nil + } + out := new(InstanceRefreshInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRefreshObservation) DeepCopyInto(out *InstanceRefreshObservation) { + *out = *in + if in.Preferences != nil { + in, out := &in.Preferences, &out.Preferences + *out = new(PreferencesObservation) + (*in).DeepCopyInto(*out) + } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(string) + **out = **in + } + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRefreshObservation. +func (in *InstanceRefreshObservation) DeepCopy() *InstanceRefreshObservation { + if in == nil { + return nil + } + out := new(InstanceRefreshObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRefreshParameters) DeepCopyInto(out *InstanceRefreshParameters) { + *out = *in + if in.Preferences != nil { + in, out := &in.Preferences, &out.Preferences + *out = new(PreferencesParameters) + (*in).DeepCopyInto(*out) + } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(string) + **out = **in + } + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRefreshParameters. +func (in *InstanceRefreshParameters) DeepCopy() *InstanceRefreshParameters { + if in == nil { + return nil + } + out := new(InstanceRefreshParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsInitParameters) DeepCopyInto(out *InstanceRequirementsInitParameters) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = new(AcceleratorCountInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorManufacturers != nil { + in, out := &in.AcceleratorManufacturers, &out.AcceleratorManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorNames != nil { + in, out := &in.AcceleratorNames, &out.AcceleratorNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorTotalMemoryMib != nil { + in, out := &in.AcceleratorTotalMemoryMib, &out.AcceleratorTotalMemoryMib + *out = new(AcceleratorTotalMemoryMibInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorTypes != nil { + in, out := &in.AcceleratorTypes, &out.AcceleratorTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedInstanceTypes != nil { + in, out := &in.AllowedInstanceTypes, &out.AllowedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(string) + **out = **in + } + if in.BaselineEBSBandwidthMbps != nil { + in, out := &in.BaselineEBSBandwidthMbps, &out.BaselineEBSBandwidthMbps + *out = new(BaselineEBSBandwidthMbpsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BurstablePerformance != nil { + in, out := &in.BurstablePerformance, &out.BurstablePerformance + *out = new(string) + **out = **in + } + if in.CPUManufacturers != nil { + in, out := &in.CPUManufacturers, &out.CPUManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExcludedInstanceTypes != nil { + in, out := &in.ExcludedInstanceTypes, &out.ExcludedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceGenerations != nil { + in, out := &in.InstanceGenerations, &out.InstanceGenerations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LocalStorage != nil { + in, out := &in.LocalStorage, &out.LocalStorage + *out = new(string) + **out = **in + } + if in.LocalStorageTypes != nil { + in, out := &in.LocalStorageTypes, &out.LocalStorageTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MemoryGibPerVcpu != nil { + in, out := &in.MemoryGibPerVcpu, &out.MemoryGibPerVcpu + *out = new(MemoryGibPerVcpuInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MemoryMib != nil { + in, out := &in.MemoryMib, &out.MemoryMib + *out = new(MemoryMibInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkBandwidthGbps != nil { + in, out := &in.NetworkBandwidthGbps, &out.NetworkBandwidthGbps + *out = new(NetworkBandwidthGbpsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceCount != nil { + in, out := &in.NetworkInterfaceCount, &out.NetworkInterfaceCount + *out = new(NetworkInterfaceCountInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OnDemandMaxPricePercentageOverLowestPrice != nil { + in, out := &in.OnDemandMaxPricePercentageOverLowestPrice, &out.OnDemandMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.RequireHibernateSupport != nil { + in, out := &in.RequireHibernateSupport, &out.RequireHibernateSupport + *out = new(bool) + **out = **in + } + if in.SpotMaxPricePercentageOverLowestPrice != nil { + in, out := &in.SpotMaxPricePercentageOverLowestPrice, &out.SpotMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.TotalLocalStorageGb != nil { + in, out := &in.TotalLocalStorageGb, &out.TotalLocalStorageGb + *out = new(TotalLocalStorageGbInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VcpuCount != nil { + in, out := &in.VcpuCount, &out.VcpuCount + *out = new(VcpuCountInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsInitParameters. +func (in *InstanceRequirementsInitParameters) DeepCopy() *InstanceRequirementsInitParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsObservation) DeepCopyInto(out *InstanceRequirementsObservation) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = new(AcceleratorCountObservation) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorManufacturers != nil { + in, out := &in.AcceleratorManufacturers, &out.AcceleratorManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorNames != nil { + in, out := &in.AcceleratorNames, &out.AcceleratorNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorTotalMemoryMib != nil { + in, out := &in.AcceleratorTotalMemoryMib, &out.AcceleratorTotalMemoryMib + *out = new(AcceleratorTotalMemoryMibObservation) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorTypes != nil { + in, out := &in.AcceleratorTypes, &out.AcceleratorTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedInstanceTypes != nil { + in, out := &in.AllowedInstanceTypes, &out.AllowedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(string) + **out = **in + } + if in.BaselineEBSBandwidthMbps != nil { + in, out := &in.BaselineEBSBandwidthMbps, &out.BaselineEBSBandwidthMbps + *out = new(BaselineEBSBandwidthMbpsObservation) + (*in).DeepCopyInto(*out) + } + if in.BurstablePerformance != nil { + in, out := &in.BurstablePerformance, &out.BurstablePerformance + *out = new(string) + **out = **in + } + if in.CPUManufacturers != nil { + in, out := &in.CPUManufacturers, &out.CPUManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExcludedInstanceTypes != nil { + in, out := &in.ExcludedInstanceTypes, &out.ExcludedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceGenerations != nil { + in, out := &in.InstanceGenerations, &out.InstanceGenerations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LocalStorage != nil { + in, out := &in.LocalStorage, &out.LocalStorage + *out = new(string) + **out = **in + } + if in.LocalStorageTypes != nil { + in, out := &in.LocalStorageTypes, &out.LocalStorageTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MemoryGibPerVcpu != nil { + in, out := &in.MemoryGibPerVcpu, &out.MemoryGibPerVcpu + *out = new(MemoryGibPerVcpuObservation) + (*in).DeepCopyInto(*out) + } + if in.MemoryMib != nil { + in, out := &in.MemoryMib, &out.MemoryMib + *out = new(MemoryMibObservation) + (*in).DeepCopyInto(*out) + } + if in.NetworkBandwidthGbps != nil { + in, out := &in.NetworkBandwidthGbps, &out.NetworkBandwidthGbps + *out = new(NetworkBandwidthGbpsObservation) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceCount != nil { + in, out := &in.NetworkInterfaceCount, &out.NetworkInterfaceCount + *out = new(NetworkInterfaceCountObservation) + (*in).DeepCopyInto(*out) + } + if in.OnDemandMaxPricePercentageOverLowestPrice != nil { + in, out := &in.OnDemandMaxPricePercentageOverLowestPrice, &out.OnDemandMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.RequireHibernateSupport != nil { + in, out := &in.RequireHibernateSupport, &out.RequireHibernateSupport + *out = new(bool) + **out = **in + } + if in.SpotMaxPricePercentageOverLowestPrice != nil { + in, out := &in.SpotMaxPricePercentageOverLowestPrice, &out.SpotMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.TotalLocalStorageGb != nil { + in, out := &in.TotalLocalStorageGb, &out.TotalLocalStorageGb + *out = new(TotalLocalStorageGbObservation) + (*in).DeepCopyInto(*out) + } + if in.VcpuCount != nil { + in, out := &in.VcpuCount, &out.VcpuCount + *out = new(VcpuCountObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsObservation. +func (in *InstanceRequirementsObservation) DeepCopy() *InstanceRequirementsObservation { + if in == nil { + return nil + } + out := new(InstanceRequirementsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsParameters) DeepCopyInto(out *InstanceRequirementsParameters) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = new(AcceleratorCountParameters) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorManufacturers != nil { + in, out := &in.AcceleratorManufacturers, &out.AcceleratorManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorNames != nil { + in, out := &in.AcceleratorNames, &out.AcceleratorNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorTotalMemoryMib != nil { + in, out := &in.AcceleratorTotalMemoryMib, &out.AcceleratorTotalMemoryMib + *out = new(AcceleratorTotalMemoryMibParameters) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorTypes != nil { + in, out := &in.AcceleratorTypes, &out.AcceleratorTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedInstanceTypes != nil { + in, out := &in.AllowedInstanceTypes, &out.AllowedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(string) + **out = **in + } + if in.BaselineEBSBandwidthMbps != nil { + in, out := &in.BaselineEBSBandwidthMbps, &out.BaselineEBSBandwidthMbps + *out = new(BaselineEBSBandwidthMbpsParameters) + (*in).DeepCopyInto(*out) + } + if in.BurstablePerformance != nil { + in, out := &in.BurstablePerformance, &out.BurstablePerformance + *out = new(string) + **out = **in + } + if in.CPUManufacturers != nil { + in, out := &in.CPUManufacturers, &out.CPUManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExcludedInstanceTypes != nil { + in, out := &in.ExcludedInstanceTypes, &out.ExcludedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceGenerations != nil { + in, out := &in.InstanceGenerations, &out.InstanceGenerations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LocalStorage != nil { + in, out := &in.LocalStorage, &out.LocalStorage + *out = new(string) + **out = **in + } + if in.LocalStorageTypes != nil { + in, out := &in.LocalStorageTypes, &out.LocalStorageTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MemoryGibPerVcpu != nil { + in, out := &in.MemoryGibPerVcpu, &out.MemoryGibPerVcpu + *out = new(MemoryGibPerVcpuParameters) + (*in).DeepCopyInto(*out) + } + if in.MemoryMib != nil { + in, out := &in.MemoryMib, &out.MemoryMib + *out = new(MemoryMibParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkBandwidthGbps != nil { + in, out := &in.NetworkBandwidthGbps, &out.NetworkBandwidthGbps + *out = new(NetworkBandwidthGbpsParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceCount != nil { + in, out := &in.NetworkInterfaceCount, &out.NetworkInterfaceCount + *out = new(NetworkInterfaceCountParameters) + (*in).DeepCopyInto(*out) + } + if in.OnDemandMaxPricePercentageOverLowestPrice != nil { + in, out := &in.OnDemandMaxPricePercentageOverLowestPrice, &out.OnDemandMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.RequireHibernateSupport != nil { + in, out := &in.RequireHibernateSupport, &out.RequireHibernateSupport + *out = new(bool) + **out = **in + } + if in.SpotMaxPricePercentageOverLowestPrice != nil { + in, out := &in.SpotMaxPricePercentageOverLowestPrice, &out.SpotMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.TotalLocalStorageGb != nil { + in, out := &in.TotalLocalStorageGb, &out.TotalLocalStorageGb + *out = new(TotalLocalStorageGbParameters) + (*in).DeepCopyInto(*out) + } + if in.VcpuCount != nil { + in, out := &in.VcpuCount, &out.VcpuCount + *out = new(VcpuCountParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsParameters. +func (in *InstanceRequirementsParameters) DeepCopy() *InstanceRequirementsParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceReusePolicyInitParameters) DeepCopyInto(out *InstanceReusePolicyInitParameters) { + *out = *in + if in.ReuseOnScaleIn != nil { + in, out := &in.ReuseOnScaleIn, &out.ReuseOnScaleIn + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceReusePolicyInitParameters. +func (in *InstanceReusePolicyInitParameters) DeepCopy() *InstanceReusePolicyInitParameters { + if in == nil { + return nil + } + out := new(InstanceReusePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceReusePolicyObservation) DeepCopyInto(out *InstanceReusePolicyObservation) { + *out = *in + if in.ReuseOnScaleIn != nil { + in, out := &in.ReuseOnScaleIn, &out.ReuseOnScaleIn + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceReusePolicyObservation. +func (in *InstanceReusePolicyObservation) DeepCopy() *InstanceReusePolicyObservation { + if in == nil { + return nil + } + out := new(InstanceReusePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceReusePolicyParameters) DeepCopyInto(out *InstanceReusePolicyParameters) { + *out = *in + if in.ReuseOnScaleIn != nil { + in, out := &in.ReuseOnScaleIn, &out.ReuseOnScaleIn + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceReusePolicyParameters. +func (in *InstanceReusePolicyParameters) DeepCopy() *InstanceReusePolicyParameters { + if in == nil { + return nil + } + out := new(InstanceReusePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancesDistributionInitParameters) DeepCopyInto(out *InstancesDistributionInitParameters) { + *out = *in + if in.OnDemandAllocationStrategy != nil { + in, out := &in.OnDemandAllocationStrategy, &out.OnDemandAllocationStrategy + *out = new(string) + **out = **in + } + if in.OnDemandBaseCapacity != nil { + in, out := &in.OnDemandBaseCapacity, &out.OnDemandBaseCapacity + *out = new(float64) + **out = **in + } + if in.OnDemandPercentageAboveBaseCapacity != nil { + in, out := &in.OnDemandPercentageAboveBaseCapacity, &out.OnDemandPercentageAboveBaseCapacity + *out = new(float64) + **out = **in + } + if in.SpotAllocationStrategy != nil { + in, out := &in.SpotAllocationStrategy, &out.SpotAllocationStrategy + *out = new(string) + **out = **in + } + if in.SpotInstancePools != nil { + in, out := &in.SpotInstancePools, &out.SpotInstancePools + *out = new(float64) + **out = **in + } + if in.SpotMaxPrice != nil { + in, out := &in.SpotMaxPrice, &out.SpotMaxPrice + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesDistributionInitParameters. +func (in *InstancesDistributionInitParameters) DeepCopy() *InstancesDistributionInitParameters { + if in == nil { + return nil + } + out := new(InstancesDistributionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancesDistributionObservation) DeepCopyInto(out *InstancesDistributionObservation) { + *out = *in + if in.OnDemandAllocationStrategy != nil { + in, out := &in.OnDemandAllocationStrategy, &out.OnDemandAllocationStrategy + *out = new(string) + **out = **in + } + if in.OnDemandBaseCapacity != nil { + in, out := &in.OnDemandBaseCapacity, &out.OnDemandBaseCapacity + *out = new(float64) + **out = **in + } + if in.OnDemandPercentageAboveBaseCapacity != nil { + in, out := &in.OnDemandPercentageAboveBaseCapacity, &out.OnDemandPercentageAboveBaseCapacity + *out = new(float64) + **out = **in + } + if in.SpotAllocationStrategy != nil { + in, out := &in.SpotAllocationStrategy, &out.SpotAllocationStrategy + *out = new(string) + **out = **in + } + if in.SpotInstancePools != nil { + in, out := &in.SpotInstancePools, &out.SpotInstancePools + *out = new(float64) + **out = **in + } + if in.SpotMaxPrice != nil { + in, out := &in.SpotMaxPrice, &out.SpotMaxPrice + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesDistributionObservation. +func (in *InstancesDistributionObservation) DeepCopy() *InstancesDistributionObservation { + if in == nil { + return nil + } + out := new(InstancesDistributionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancesDistributionParameters) DeepCopyInto(out *InstancesDistributionParameters) { + *out = *in + if in.OnDemandAllocationStrategy != nil { + in, out := &in.OnDemandAllocationStrategy, &out.OnDemandAllocationStrategy + *out = new(string) + **out = **in + } + if in.OnDemandBaseCapacity != nil { + in, out := &in.OnDemandBaseCapacity, &out.OnDemandBaseCapacity + *out = new(float64) + **out = **in + } + if in.OnDemandPercentageAboveBaseCapacity != nil { + in, out := &in.OnDemandPercentageAboveBaseCapacity, &out.OnDemandPercentageAboveBaseCapacity + *out = new(float64) + **out = **in + } + if in.SpotAllocationStrategy != nil { + in, out := &in.SpotAllocationStrategy, &out.SpotAllocationStrategy + *out = new(string) + **out = **in + } + if in.SpotInstancePools != nil { + in, out := &in.SpotInstancePools, &out.SpotInstancePools + *out = new(float64) + **out = **in + } + if in.SpotMaxPrice != nil { + in, out := &in.SpotMaxPrice, &out.SpotMaxPrice + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesDistributionParameters. +func (in *InstancesDistributionParameters) DeepCopy() *InstancesDistributionParameters { + if in == nil { + return nil + } + out := new(InstancesDistributionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateInitParameters) DeepCopyInto(out *LaunchTemplateInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateInitParameters. +func (in *LaunchTemplateInitParameters) DeepCopy() *LaunchTemplateInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateObservation) DeepCopyInto(out *LaunchTemplateObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateObservation. +func (in *LaunchTemplateObservation) DeepCopy() *LaunchTemplateObservation { + if in == nil { + return nil + } + out := new(LaunchTemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateParameters) DeepCopyInto(out *LaunchTemplateParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateParameters. +func (in *LaunchTemplateParameters) DeepCopy() *LaunchTemplateParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateSpecificationInitParameters) DeepCopyInto(out *LaunchTemplateSpecificationInitParameters) { + *out = *in + if in.LaunchTemplateID != nil { + in, out := &in.LaunchTemplateID, &out.LaunchTemplateID + *out = new(string) + **out = **in + } + if in.LaunchTemplateIDRef != nil { + in, out := &in.LaunchTemplateIDRef, &out.LaunchTemplateIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LaunchTemplateIDSelector != nil { + in, out := &in.LaunchTemplateIDSelector, &out.LaunchTemplateIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LaunchTemplateName != nil { + in, out := &in.LaunchTemplateName, &out.LaunchTemplateName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateSpecificationInitParameters. +func (in *LaunchTemplateSpecificationInitParameters) DeepCopy() *LaunchTemplateSpecificationInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateSpecificationObservation) DeepCopyInto(out *LaunchTemplateSpecificationObservation) { + *out = *in + if in.LaunchTemplateID != nil { + in, out := &in.LaunchTemplateID, &out.LaunchTemplateID + *out = new(string) + **out = **in + } + if in.LaunchTemplateName != nil { + in, out := &in.LaunchTemplateName, &out.LaunchTemplateName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateSpecificationObservation. +func (in *LaunchTemplateSpecificationObservation) DeepCopy() *LaunchTemplateSpecificationObservation { + if in == nil { + return nil + } + out := new(LaunchTemplateSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateSpecificationParameters) DeepCopyInto(out *LaunchTemplateSpecificationParameters) { + *out = *in + if in.LaunchTemplateID != nil { + in, out := &in.LaunchTemplateID, &out.LaunchTemplateID + *out = new(string) + **out = **in + } + if in.LaunchTemplateIDRef != nil { + in, out := &in.LaunchTemplateIDRef, &out.LaunchTemplateIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LaunchTemplateIDSelector != nil { + in, out := &in.LaunchTemplateIDSelector, &out.LaunchTemplateIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LaunchTemplateName != nil { + in, out := &in.LaunchTemplateName, &out.LaunchTemplateName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateSpecificationParameters. +func (in *LaunchTemplateSpecificationParameters) DeepCopy() *LaunchTemplateSpecificationParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryGibPerVcpuInitParameters) DeepCopyInto(out *MemoryGibPerVcpuInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryGibPerVcpuInitParameters. +func (in *MemoryGibPerVcpuInitParameters) DeepCopy() *MemoryGibPerVcpuInitParameters { + if in == nil { + return nil + } + out := new(MemoryGibPerVcpuInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryGibPerVcpuObservation) DeepCopyInto(out *MemoryGibPerVcpuObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryGibPerVcpuObservation. +func (in *MemoryGibPerVcpuObservation) DeepCopy() *MemoryGibPerVcpuObservation { + if in == nil { + return nil + } + out := new(MemoryGibPerVcpuObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryGibPerVcpuParameters) DeepCopyInto(out *MemoryGibPerVcpuParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryGibPerVcpuParameters. +func (in *MemoryGibPerVcpuParameters) DeepCopy() *MemoryGibPerVcpuParameters { + if in == nil { + return nil + } + out := new(MemoryGibPerVcpuParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryMibInitParameters) DeepCopyInto(out *MemoryMibInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryMibInitParameters. +func (in *MemoryMibInitParameters) DeepCopy() *MemoryMibInitParameters { + if in == nil { + return nil + } + out := new(MemoryMibInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryMibObservation) DeepCopyInto(out *MemoryMibObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryMibObservation. +func (in *MemoryMibObservation) DeepCopy() *MemoryMibObservation { + if in == nil { + return nil + } + out := new(MemoryMibObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryMibParameters) DeepCopyInto(out *MemoryMibParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryMibParameters. +func (in *MemoryMibParameters) DeepCopy() *MemoryMibParameters { + if in == nil { + return nil + } + out := new(MemoryMibParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MixedInstancesPolicyInitParameters) DeepCopyInto(out *MixedInstancesPolicyInitParameters) { + *out = *in + if in.InstancesDistribution != nil { + in, out := &in.InstancesDistribution, &out.InstancesDistribution + *out = new(InstancesDistributionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(MixedInstancesPolicyLaunchTemplateInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyInitParameters. +func (in *MixedInstancesPolicyInitParameters) DeepCopy() *MixedInstancesPolicyInitParameters { + if in == nil { + return nil + } + out := new(MixedInstancesPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MixedInstancesPolicyLaunchTemplateInitParameters) DeepCopyInto(out *MixedInstancesPolicyLaunchTemplateInitParameters) { + *out = *in + if in.LaunchTemplateSpecification != nil { + in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification + *out = new(LaunchTemplateSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = make([]OverrideInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyLaunchTemplateInitParameters. +func (in *MixedInstancesPolicyLaunchTemplateInitParameters) DeepCopy() *MixedInstancesPolicyLaunchTemplateInitParameters { + if in == nil { + return nil + } + out := new(MixedInstancesPolicyLaunchTemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MixedInstancesPolicyLaunchTemplateObservation) DeepCopyInto(out *MixedInstancesPolicyLaunchTemplateObservation) { + *out = *in + if in.LaunchTemplateSpecification != nil { + in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification + *out = new(LaunchTemplateSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = make([]OverrideObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyLaunchTemplateObservation. +func (in *MixedInstancesPolicyLaunchTemplateObservation) DeepCopy() *MixedInstancesPolicyLaunchTemplateObservation { + if in == nil { + return nil + } + out := new(MixedInstancesPolicyLaunchTemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MixedInstancesPolicyLaunchTemplateParameters) DeepCopyInto(out *MixedInstancesPolicyLaunchTemplateParameters) { + *out = *in + if in.LaunchTemplateSpecification != nil { + in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification + *out = new(LaunchTemplateSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = make([]OverrideParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyLaunchTemplateParameters. +func (in *MixedInstancesPolicyLaunchTemplateParameters) DeepCopy() *MixedInstancesPolicyLaunchTemplateParameters { + if in == nil { + return nil + } + out := new(MixedInstancesPolicyLaunchTemplateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MixedInstancesPolicyObservation) DeepCopyInto(out *MixedInstancesPolicyObservation) { + *out = *in + if in.InstancesDistribution != nil { + in, out := &in.InstancesDistribution, &out.InstancesDistribution + *out = new(InstancesDistributionObservation) + (*in).DeepCopyInto(*out) + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(MixedInstancesPolicyLaunchTemplateObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyObservation. +func (in *MixedInstancesPolicyObservation) DeepCopy() *MixedInstancesPolicyObservation { + if in == nil { + return nil + } + out := new(MixedInstancesPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MixedInstancesPolicyParameters) DeepCopyInto(out *MixedInstancesPolicyParameters) { + *out = *in + if in.InstancesDistribution != nil { + in, out := &in.InstancesDistribution, &out.InstancesDistribution + *out = new(InstancesDistributionParameters) + (*in).DeepCopyInto(*out) + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(MixedInstancesPolicyLaunchTemplateParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MixedInstancesPolicyParameters. +func (in *MixedInstancesPolicyParameters) DeepCopy() *MixedInstancesPolicyParameters { + if in == nil { + return nil + } + out := new(MixedInstancesPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkBandwidthGbpsInitParameters) DeepCopyInto(out *NetworkBandwidthGbpsInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkBandwidthGbpsInitParameters. +func (in *NetworkBandwidthGbpsInitParameters) DeepCopy() *NetworkBandwidthGbpsInitParameters { + if in == nil { + return nil + } + out := new(NetworkBandwidthGbpsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkBandwidthGbpsObservation) DeepCopyInto(out *NetworkBandwidthGbpsObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkBandwidthGbpsObservation. +func (in *NetworkBandwidthGbpsObservation) DeepCopy() *NetworkBandwidthGbpsObservation { + if in == nil { + return nil + } + out := new(NetworkBandwidthGbpsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkBandwidthGbpsParameters) DeepCopyInto(out *NetworkBandwidthGbpsParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkBandwidthGbpsParameters. +func (in *NetworkBandwidthGbpsParameters) DeepCopy() *NetworkBandwidthGbpsParameters { + if in == nil { + return nil + } + out := new(NetworkBandwidthGbpsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceCountInitParameters) DeepCopyInto(out *NetworkInterfaceCountInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceCountInitParameters. +func (in *NetworkInterfaceCountInitParameters) DeepCopy() *NetworkInterfaceCountInitParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceCountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceCountObservation) DeepCopyInto(out *NetworkInterfaceCountObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceCountObservation. +func (in *NetworkInterfaceCountObservation) DeepCopy() *NetworkInterfaceCountObservation { + if in == nil { + return nil + } + out := new(NetworkInterfaceCountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceCountParameters) DeepCopyInto(out *NetworkInterfaceCountParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceCountParameters. +func (in *NetworkInterfaceCountParameters) DeepCopy() *NetworkInterfaceCountParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceCountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideInitParameters) DeepCopyInto(out *OverrideInitParameters) { + *out = *in + if in.InstanceRequirements != nil { + in, out := &in.InstanceRequirements, &out.InstanceRequirements + *out = new(InstanceRequirementsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LaunchTemplateSpecification != nil { + in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification + *out = new(OverrideLaunchTemplateSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WeightedCapacity != nil { + in, out := &in.WeightedCapacity, &out.WeightedCapacity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideInitParameters. +func (in *OverrideInitParameters) DeepCopy() *OverrideInitParameters { + if in == nil { + return nil + } + out := new(OverrideInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideLaunchTemplateSpecificationInitParameters) DeepCopyInto(out *OverrideLaunchTemplateSpecificationInitParameters) { + *out = *in + if in.LaunchTemplateID != nil { + in, out := &in.LaunchTemplateID, &out.LaunchTemplateID + *out = new(string) + **out = **in + } + if in.LaunchTemplateIDRef != nil { + in, out := &in.LaunchTemplateIDRef, &out.LaunchTemplateIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LaunchTemplateIDSelector != nil { + in, out := &in.LaunchTemplateIDSelector, &out.LaunchTemplateIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LaunchTemplateName != nil { + in, out := &in.LaunchTemplateName, &out.LaunchTemplateName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideLaunchTemplateSpecificationInitParameters. +func (in *OverrideLaunchTemplateSpecificationInitParameters) DeepCopy() *OverrideLaunchTemplateSpecificationInitParameters { + if in == nil { + return nil + } + out := new(OverrideLaunchTemplateSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideLaunchTemplateSpecificationObservation) DeepCopyInto(out *OverrideLaunchTemplateSpecificationObservation) { + *out = *in + if in.LaunchTemplateID != nil { + in, out := &in.LaunchTemplateID, &out.LaunchTemplateID + *out = new(string) + **out = **in + } + if in.LaunchTemplateName != nil { + in, out := &in.LaunchTemplateName, &out.LaunchTemplateName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideLaunchTemplateSpecificationObservation. +func (in *OverrideLaunchTemplateSpecificationObservation) DeepCopy() *OverrideLaunchTemplateSpecificationObservation { + if in == nil { + return nil + } + out := new(OverrideLaunchTemplateSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideLaunchTemplateSpecificationParameters) DeepCopyInto(out *OverrideLaunchTemplateSpecificationParameters) { + *out = *in + if in.LaunchTemplateID != nil { + in, out := &in.LaunchTemplateID, &out.LaunchTemplateID + *out = new(string) + **out = **in + } + if in.LaunchTemplateIDRef != nil { + in, out := &in.LaunchTemplateIDRef, &out.LaunchTemplateIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LaunchTemplateIDSelector != nil { + in, out := &in.LaunchTemplateIDSelector, &out.LaunchTemplateIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LaunchTemplateName != nil { + in, out := &in.LaunchTemplateName, &out.LaunchTemplateName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideLaunchTemplateSpecificationParameters. +func (in *OverrideLaunchTemplateSpecificationParameters) DeepCopy() *OverrideLaunchTemplateSpecificationParameters { + if in == nil { + return nil + } + out := new(OverrideLaunchTemplateSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideObservation) DeepCopyInto(out *OverrideObservation) { + *out = *in + if in.InstanceRequirements != nil { + in, out := &in.InstanceRequirements, &out.InstanceRequirements + *out = new(InstanceRequirementsObservation) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LaunchTemplateSpecification != nil { + in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification + *out = new(OverrideLaunchTemplateSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.WeightedCapacity != nil { + in, out := &in.WeightedCapacity, &out.WeightedCapacity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideObservation. +func (in *OverrideObservation) DeepCopy() *OverrideObservation { + if in == nil { + return nil + } + out := new(OverrideObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideParameters) DeepCopyInto(out *OverrideParameters) { + *out = *in + if in.InstanceRequirements != nil { + in, out := &in.InstanceRequirements, &out.InstanceRequirements + *out = new(InstanceRequirementsParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LaunchTemplateSpecification != nil { + in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification + *out = new(OverrideLaunchTemplateSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.WeightedCapacity != nil { + in, out := &in.WeightedCapacity, &out.WeightedCapacity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideParameters. +func (in *OverrideParameters) DeepCopy() *OverrideParameters { + if in == nil { + return nil + } + out := new(OverrideParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferencesInitParameters) DeepCopyInto(out *PreferencesInitParameters) { + *out = *in + if in.AlarmSpecification != nil { + in, out := &in.AlarmSpecification, &out.AlarmSpecification + *out = new(AlarmSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoRollback != nil { + in, out := &in.AutoRollback, &out.AutoRollback + *out = new(bool) + **out = **in + } + if in.CheckpointDelay != nil { + in, out := &in.CheckpointDelay, &out.CheckpointDelay + *out = new(string) + **out = **in + } + if in.CheckpointPercentages != nil { + in, out := &in.CheckpointPercentages, &out.CheckpointPercentages + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.InstanceWarmup != nil { + in, out := &in.InstanceWarmup, &out.InstanceWarmup + *out = new(string) + **out = **in + } + if in.MaxHealthyPercentage != nil { + in, out := &in.MaxHealthyPercentage, &out.MaxHealthyPercentage + *out = new(float64) + **out = **in + } + if in.MinHealthyPercentage != nil { + in, out := &in.MinHealthyPercentage, &out.MinHealthyPercentage + *out = new(float64) + **out = **in + } + if in.ScaleInProtectedInstances != nil { + in, out := &in.ScaleInProtectedInstances, &out.ScaleInProtectedInstances + *out = new(string) + **out = **in + } + if in.SkipMatching != nil { + in, out := &in.SkipMatching, &out.SkipMatching + *out = new(bool) + **out = **in + } + if in.StandbyInstances != nil { + in, out := &in.StandbyInstances, &out.StandbyInstances + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferencesInitParameters. +func (in *PreferencesInitParameters) DeepCopy() *PreferencesInitParameters { + if in == nil { + return nil + } + out := new(PreferencesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferencesObservation) DeepCopyInto(out *PreferencesObservation) { + *out = *in + if in.AlarmSpecification != nil { + in, out := &in.AlarmSpecification, &out.AlarmSpecification + *out = new(AlarmSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoRollback != nil { + in, out := &in.AutoRollback, &out.AutoRollback + *out = new(bool) + **out = **in + } + if in.CheckpointDelay != nil { + in, out := &in.CheckpointDelay, &out.CheckpointDelay + *out = new(string) + **out = **in + } + if in.CheckpointPercentages != nil { + in, out := &in.CheckpointPercentages, &out.CheckpointPercentages + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.InstanceWarmup != nil { + in, out := &in.InstanceWarmup, &out.InstanceWarmup + *out = new(string) + **out = **in + } + if in.MaxHealthyPercentage != nil { + in, out := &in.MaxHealthyPercentage, &out.MaxHealthyPercentage + *out = new(float64) + **out = **in + } + if in.MinHealthyPercentage != nil { + in, out := &in.MinHealthyPercentage, &out.MinHealthyPercentage + *out = new(float64) + **out = **in + } + if in.ScaleInProtectedInstances != nil { + in, out := &in.ScaleInProtectedInstances, &out.ScaleInProtectedInstances + *out = new(string) + **out = **in + } + if in.SkipMatching != nil { + in, out := &in.SkipMatching, &out.SkipMatching + *out = new(bool) + **out = **in + } + if in.StandbyInstances != nil { + in, out := &in.StandbyInstances, &out.StandbyInstances + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferencesObservation. +func (in *PreferencesObservation) DeepCopy() *PreferencesObservation { + if in == nil { + return nil + } + out := new(PreferencesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferencesParameters) DeepCopyInto(out *PreferencesParameters) { + *out = *in + if in.AlarmSpecification != nil { + in, out := &in.AlarmSpecification, &out.AlarmSpecification + *out = new(AlarmSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoRollback != nil { + in, out := &in.AutoRollback, &out.AutoRollback + *out = new(bool) + **out = **in + } + if in.CheckpointDelay != nil { + in, out := &in.CheckpointDelay, &out.CheckpointDelay + *out = new(string) + **out = **in + } + if in.CheckpointPercentages != nil { + in, out := &in.CheckpointPercentages, &out.CheckpointPercentages + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.InstanceWarmup != nil { + in, out := &in.InstanceWarmup, &out.InstanceWarmup + *out = new(string) + **out = **in + } + if in.MaxHealthyPercentage != nil { + in, out := &in.MaxHealthyPercentage, &out.MaxHealthyPercentage + *out = new(float64) + **out = **in + } + if in.MinHealthyPercentage != nil { + in, out := &in.MinHealthyPercentage, &out.MinHealthyPercentage + *out = new(float64) + **out = **in + } + if in.ScaleInProtectedInstances != nil { + in, out := &in.ScaleInProtectedInstances, &out.ScaleInProtectedInstances + *out = new(string) + **out = **in + } + if in.SkipMatching != nil { + in, out := &in.SkipMatching, &out.SkipMatching + *out = new(bool) + **out = **in + } + if in.StandbyInstances != nil { + in, out := &in.StandbyInstances, &out.StandbyInstances + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferencesParameters. +func (in *PreferencesParameters) DeepCopy() *PreferencesParameters { + if in == nil { + return nil + } + out := new(PreferencesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagInitParameters) DeepCopyInto(out *TagInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.PropagateAtLaunch != nil { + in, out := &in.PropagateAtLaunch, &out.PropagateAtLaunch + *out = new(bool) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagInitParameters. +func (in *TagInitParameters) DeepCopy() *TagInitParameters { + if in == nil { + return nil + } + out := new(TagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagObservation) DeepCopyInto(out *TagObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.PropagateAtLaunch != nil { + in, out := &in.PropagateAtLaunch, &out.PropagateAtLaunch + *out = new(bool) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagObservation. +func (in *TagObservation) DeepCopy() *TagObservation { + if in == nil { + return nil + } + out := new(TagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagParameters) DeepCopyInto(out *TagParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.PropagateAtLaunch != nil { + in, out := &in.PropagateAtLaunch, &out.PropagateAtLaunch + *out = new(bool) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagParameters. +func (in *TagParameters) DeepCopy() *TagParameters { + if in == nil { + return nil + } + out := new(TagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TotalLocalStorageGbInitParameters) DeepCopyInto(out *TotalLocalStorageGbInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TotalLocalStorageGbInitParameters. +func (in *TotalLocalStorageGbInitParameters) DeepCopy() *TotalLocalStorageGbInitParameters { + if in == nil { + return nil + } + out := new(TotalLocalStorageGbInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TotalLocalStorageGbObservation) DeepCopyInto(out *TotalLocalStorageGbObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TotalLocalStorageGbObservation. +func (in *TotalLocalStorageGbObservation) DeepCopy() *TotalLocalStorageGbObservation { + if in == nil { + return nil + } + out := new(TotalLocalStorageGbObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TotalLocalStorageGbParameters) DeepCopyInto(out *TotalLocalStorageGbParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TotalLocalStorageGbParameters. +func (in *TotalLocalStorageGbParameters) DeepCopy() *TotalLocalStorageGbParameters { + if in == nil { + return nil + } + out := new(TotalLocalStorageGbParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficSourceInitParameters) DeepCopyInto(out *TrafficSourceInitParameters) { + *out = *in + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficSourceInitParameters. +func (in *TrafficSourceInitParameters) DeepCopy() *TrafficSourceInitParameters { + if in == nil { + return nil + } + out := new(TrafficSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficSourceObservation) DeepCopyInto(out *TrafficSourceObservation) { + *out = *in + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficSourceObservation. +func (in *TrafficSourceObservation) DeepCopy() *TrafficSourceObservation { + if in == nil { + return nil + } + out := new(TrafficSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficSourceParameters) DeepCopyInto(out *TrafficSourceParameters) { + *out = *in + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficSourceParameters. +func (in *TrafficSourceParameters) DeepCopy() *TrafficSourceParameters { + if in == nil { + return nil + } + out := new(TrafficSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VcpuCountInitParameters) DeepCopyInto(out *VcpuCountInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VcpuCountInitParameters. +func (in *VcpuCountInitParameters) DeepCopy() *VcpuCountInitParameters { + if in == nil { + return nil + } + out := new(VcpuCountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VcpuCountObservation) DeepCopyInto(out *VcpuCountObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VcpuCountObservation. +func (in *VcpuCountObservation) DeepCopy() *VcpuCountObservation { + if in == nil { + return nil + } + out := new(VcpuCountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VcpuCountParameters) DeepCopyInto(out *VcpuCountParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VcpuCountParameters. +func (in *VcpuCountParameters) DeepCopy() *VcpuCountParameters { + if in == nil { + return nil + } + out := new(VcpuCountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WarmPoolInitParameters) DeepCopyInto(out *WarmPoolInitParameters) { + *out = *in + if in.InstanceReusePolicy != nil { + in, out := &in.InstanceReusePolicy, &out.InstanceReusePolicy + *out = new(InstanceReusePolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxGroupPreparedCapacity != nil { + in, out := &in.MaxGroupPreparedCapacity, &out.MaxGroupPreparedCapacity + *out = new(float64) + **out = **in + } + if in.MinSize != nil { + in, out := &in.MinSize, &out.MinSize + *out = new(float64) + **out = **in + } + if in.PoolState != nil { + in, out := &in.PoolState, &out.PoolState + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WarmPoolInitParameters. +func (in *WarmPoolInitParameters) DeepCopy() *WarmPoolInitParameters { + if in == nil { + return nil + } + out := new(WarmPoolInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WarmPoolObservation) DeepCopyInto(out *WarmPoolObservation) { + *out = *in + if in.InstanceReusePolicy != nil { + in, out := &in.InstanceReusePolicy, &out.InstanceReusePolicy + *out = new(InstanceReusePolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.MaxGroupPreparedCapacity != nil { + in, out := &in.MaxGroupPreparedCapacity, &out.MaxGroupPreparedCapacity + *out = new(float64) + **out = **in + } + if in.MinSize != nil { + in, out := &in.MinSize, &out.MinSize + *out = new(float64) + **out = **in + } + if in.PoolState != nil { + in, out := &in.PoolState, &out.PoolState + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WarmPoolObservation. +func (in *WarmPoolObservation) DeepCopy() *WarmPoolObservation { + if in == nil { + return nil + } + out := new(WarmPoolObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WarmPoolParameters) DeepCopyInto(out *WarmPoolParameters) { + *out = *in + if in.InstanceReusePolicy != nil { + in, out := &in.InstanceReusePolicy, &out.InstanceReusePolicy + *out = new(InstanceReusePolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxGroupPreparedCapacity != nil { + in, out := &in.MaxGroupPreparedCapacity, &out.MaxGroupPreparedCapacity + *out = new(float64) + **out = **in + } + if in.MinSize != nil { + in, out := &in.MinSize, &out.MinSize + *out = new(float64) + **out = **in + } + if in.PoolState != nil { + in, out := &in.PoolState, &out.PoolState + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WarmPoolParameters. +func (in *WarmPoolParameters) DeepCopy() *WarmPoolParameters { + if in == nil { + return nil + } + out := new(WarmPoolParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/autoscaling/v1beta3/zz_generated.managed.go b/apis/autoscaling/v1beta3/zz_generated.managed.go new file mode 100644 index 0000000000..72f3d0df5e --- /dev/null +++ b/apis/autoscaling/v1beta3/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta3 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this AutoscalingGroup. +func (mg *AutoscalingGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this AutoscalingGroup. +func (mg *AutoscalingGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this AutoscalingGroup. +func (mg *AutoscalingGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this AutoscalingGroup. +func (mg *AutoscalingGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this AutoscalingGroup. +func (mg *AutoscalingGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this AutoscalingGroup. +func (mg *AutoscalingGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this AutoscalingGroup. +func (mg *AutoscalingGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this AutoscalingGroup. +func (mg *AutoscalingGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this AutoscalingGroup. +func (mg *AutoscalingGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this AutoscalingGroup. +func (mg *AutoscalingGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this AutoscalingGroup. +func (mg *AutoscalingGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this AutoscalingGroup. +func (mg *AutoscalingGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/autoscaling/v1beta3/zz_generated.managedlist.go b/apis/autoscaling/v1beta3/zz_generated.managedlist.go new file mode 100644 index 0000000000..bb929852fd --- /dev/null +++ b/apis/autoscaling/v1beta3/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta3 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AutoscalingGroupList. +func (l *AutoscalingGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/autoscaling/v1beta3/zz_generated.resolvers.go b/apis/autoscaling/v1beta3/zz_generated.resolvers.go new file mode 100644 index 0000000000..d55f66e47a --- /dev/null +++ b/apis/autoscaling/v1beta3/zz_generated.resolvers.go @@ -0,0 +1,330 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta3 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *AutoscalingGroup) ResolveReferences( // ResolveReferences of this AutoscalingGroup. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("autoscaling.aws.upbound.io", "v1beta2", "LaunchConfiguration", "LaunchConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LaunchConfiguration), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LaunchConfigurationRef, + Selector: mg.Spec.ForProvider.LaunchConfigurationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LaunchConfiguration") + } + mg.Spec.ForProvider.LaunchConfiguration = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LaunchConfigurationRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.LaunchTemplate != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "LaunchTemplate", "LaunchTemplateList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LaunchTemplate.ID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.LaunchTemplate.IDRef, + Selector: mg.Spec.ForProvider.LaunchTemplate.IDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LaunchTemplate.ID") + } + mg.Spec.ForProvider.LaunchTemplate.ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LaunchTemplate.IDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.MixedInstancesPolicy != nil { + if mg.Spec.ForProvider.MixedInstancesPolicy.LaunchTemplate != nil { + if mg.Spec.ForProvider.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "LaunchTemplate", "LaunchTemplateList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification.LaunchTemplateID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification.LaunchTemplateIDRef, + Selector: mg.Spec.ForProvider.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification.LaunchTemplateIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification.LaunchTemplateID") + } + mg.Spec.ForProvider.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification.LaunchTemplateID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification.LaunchTemplateIDRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.ForProvider.MixedInstancesPolicy != nil { + if mg.Spec.ForProvider.MixedInstancesPolicy.LaunchTemplate != nil { + for i5 := 0; i5 < len(mg.Spec.ForProvider.MixedInstancesPolicy.LaunchTemplate.Override); i5++ { + if mg.Spec.ForProvider.MixedInstancesPolicy.LaunchTemplate.Override[i5].LaunchTemplateSpecification != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "LaunchTemplate", "LaunchTemplateList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MixedInstancesPolicy.LaunchTemplate.Override[i5].LaunchTemplateSpecification.LaunchTemplateID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.MixedInstancesPolicy.LaunchTemplate.Override[i5].LaunchTemplateSpecification.LaunchTemplateIDRef, + Selector: mg.Spec.ForProvider.MixedInstancesPolicy.LaunchTemplate.Override[i5].LaunchTemplateSpecification.LaunchTemplateIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MixedInstancesPolicy.LaunchTemplate.Override[i5].LaunchTemplateSpecification.LaunchTemplateID") + } + mg.Spec.ForProvider.MixedInstancesPolicy.LaunchTemplate.Override[i5].LaunchTemplateSpecification.LaunchTemplateID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MixedInstancesPolicy.LaunchTemplate.Override[i5].LaunchTemplateSpecification.LaunchTemplateIDRef = rsp.ResolvedReference + + } + } + } + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "PlacementGroup", "PlacementGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PlacementGroup), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PlacementGroupRef, + Selector: mg.Spec.ForProvider.PlacementGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PlacementGroup") + } + mg.Spec.ForProvider.PlacementGroup = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PlacementGroupRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceLinkedRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ServiceLinkedRoleArnRef, + Selector: mg.Spec.ForProvider.ServiceLinkedRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceLinkedRoleArn") + } + mg.Spec.ForProvider.ServiceLinkedRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceLinkedRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCZoneIdentifier), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCZoneIdentifierRefs, + Selector: mg.Spec.ForProvider.VPCZoneIdentifierSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCZoneIdentifier") + } + mg.Spec.ForProvider.VPCZoneIdentifier = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCZoneIdentifierRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("autoscaling.aws.upbound.io", "v1beta2", "LaunchConfiguration", "LaunchConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LaunchConfiguration), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LaunchConfigurationRef, + Selector: mg.Spec.InitProvider.LaunchConfigurationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LaunchConfiguration") + } + mg.Spec.InitProvider.LaunchConfiguration = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LaunchConfigurationRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.LaunchTemplate != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "LaunchTemplate", "LaunchTemplateList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LaunchTemplate.ID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.LaunchTemplate.IDRef, + Selector: mg.Spec.InitProvider.LaunchTemplate.IDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LaunchTemplate.ID") + } + mg.Spec.InitProvider.LaunchTemplate.ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LaunchTemplate.IDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.MixedInstancesPolicy != nil { + if mg.Spec.InitProvider.MixedInstancesPolicy.LaunchTemplate != nil { + if mg.Spec.InitProvider.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "LaunchTemplate", "LaunchTemplateList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification.LaunchTemplateID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification.LaunchTemplateIDRef, + Selector: mg.Spec.InitProvider.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification.LaunchTemplateIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification.LaunchTemplateID") + } + mg.Spec.InitProvider.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification.LaunchTemplateID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification.LaunchTemplateIDRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.InitProvider.MixedInstancesPolicy != nil { + if mg.Spec.InitProvider.MixedInstancesPolicy.LaunchTemplate != nil { + for i5 := 0; i5 < len(mg.Spec.InitProvider.MixedInstancesPolicy.LaunchTemplate.Override); i5++ { + if mg.Spec.InitProvider.MixedInstancesPolicy.LaunchTemplate.Override[i5].LaunchTemplateSpecification != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "LaunchTemplate", "LaunchTemplateList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.MixedInstancesPolicy.LaunchTemplate.Override[i5].LaunchTemplateSpecification.LaunchTemplateID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.MixedInstancesPolicy.LaunchTemplate.Override[i5].LaunchTemplateSpecification.LaunchTemplateIDRef, + Selector: mg.Spec.InitProvider.MixedInstancesPolicy.LaunchTemplate.Override[i5].LaunchTemplateSpecification.LaunchTemplateIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.MixedInstancesPolicy.LaunchTemplate.Override[i5].LaunchTemplateSpecification.LaunchTemplateID") + } + mg.Spec.InitProvider.MixedInstancesPolicy.LaunchTemplate.Override[i5].LaunchTemplateSpecification.LaunchTemplateID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.MixedInstancesPolicy.LaunchTemplate.Override[i5].LaunchTemplateSpecification.LaunchTemplateIDRef = rsp.ResolvedReference + + } + } + } + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "PlacementGroup", "PlacementGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PlacementGroup), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PlacementGroupRef, + Selector: mg.Spec.InitProvider.PlacementGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PlacementGroup") + } + mg.Spec.InitProvider.PlacementGroup = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PlacementGroupRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceLinkedRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ServiceLinkedRoleArnRef, + Selector: mg.Spec.InitProvider.ServiceLinkedRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceLinkedRoleArn") + } + mg.Spec.InitProvider.ServiceLinkedRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceLinkedRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCZoneIdentifier), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCZoneIdentifierRefs, + Selector: mg.Spec.InitProvider.VPCZoneIdentifierSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCZoneIdentifier") + } + mg.Spec.InitProvider.VPCZoneIdentifier = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCZoneIdentifierRefs = mrsp.ResolvedReferences + + return nil +} diff --git a/apis/autoscaling/v1beta3/zz_groupversion_info.go b/apis/autoscaling/v1beta3/zz_groupversion_info.go new file mode 100755 index 0000000000..c4d4b78059 --- /dev/null +++ b/apis/autoscaling/v1beta3/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=autoscaling.aws.upbound.io +// +versionName=v1beta3 +package v1beta3 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "autoscaling.aws.upbound.io" + CRDVersion = "v1beta3" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/autoscalingplans/v1beta1/zz_generated.conversion_spokes.go b/apis/autoscalingplans/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..eaf031e485 --- /dev/null +++ b/apis/autoscalingplans/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ScalingPlan to the hub type. +func (tr *ScalingPlan) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ScalingPlan type. +func (tr *ScalingPlan) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/autoscalingplans/v1beta2/zz_generated.conversion_hubs.go b/apis/autoscalingplans/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..2f8e611d8b --- /dev/null +++ b/apis/autoscalingplans/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ScalingPlan) Hub() {} diff --git a/apis/autoscalingplans/v1beta2/zz_generated.deepcopy.go b/apis/autoscalingplans/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..d9292f3b1c --- /dev/null +++ b/apis/autoscalingplans/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1259 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationSourceInitParameters) DeepCopyInto(out *ApplicationSourceInitParameters) { + *out = *in + if in.CloudFormationStackArn != nil { + in, out := &in.CloudFormationStackArn, &out.CloudFormationStackArn + *out = new(string) + **out = **in + } + if in.TagFilter != nil { + in, out := &in.TagFilter, &out.TagFilter + *out = make([]TagFilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSourceInitParameters. +func (in *ApplicationSourceInitParameters) DeepCopy() *ApplicationSourceInitParameters { + if in == nil { + return nil + } + out := new(ApplicationSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationSourceObservation) DeepCopyInto(out *ApplicationSourceObservation) { + *out = *in + if in.CloudFormationStackArn != nil { + in, out := &in.CloudFormationStackArn, &out.CloudFormationStackArn + *out = new(string) + **out = **in + } + if in.TagFilter != nil { + in, out := &in.TagFilter, &out.TagFilter + *out = make([]TagFilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSourceObservation. +func (in *ApplicationSourceObservation) DeepCopy() *ApplicationSourceObservation { + if in == nil { + return nil + } + out := new(ApplicationSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationSourceParameters) DeepCopyInto(out *ApplicationSourceParameters) { + *out = *in + if in.CloudFormationStackArn != nil { + in, out := &in.CloudFormationStackArn, &out.CloudFormationStackArn + *out = new(string) + **out = **in + } + if in.TagFilter != nil { + in, out := &in.TagFilter, &out.TagFilter + *out = make([]TagFilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSourceParameters. +func (in *ApplicationSourceParameters) DeepCopy() *ApplicationSourceParameters { + if in == nil { + return nil + } + out := new(ApplicationSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedLoadMetricSpecificationInitParameters) DeepCopyInto(out *CustomizedLoadMetricSpecificationInitParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedLoadMetricSpecificationInitParameters. +func (in *CustomizedLoadMetricSpecificationInitParameters) DeepCopy() *CustomizedLoadMetricSpecificationInitParameters { + if in == nil { + return nil + } + out := new(CustomizedLoadMetricSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedLoadMetricSpecificationObservation) DeepCopyInto(out *CustomizedLoadMetricSpecificationObservation) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedLoadMetricSpecificationObservation. +func (in *CustomizedLoadMetricSpecificationObservation) DeepCopy() *CustomizedLoadMetricSpecificationObservation { + if in == nil { + return nil + } + out := new(CustomizedLoadMetricSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedLoadMetricSpecificationParameters) DeepCopyInto(out *CustomizedLoadMetricSpecificationParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedLoadMetricSpecificationParameters. +func (in *CustomizedLoadMetricSpecificationParameters) DeepCopy() *CustomizedLoadMetricSpecificationParameters { + if in == nil { + return nil + } + out := new(CustomizedLoadMetricSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedScalingMetricSpecificationInitParameters) DeepCopyInto(out *CustomizedScalingMetricSpecificationInitParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedScalingMetricSpecificationInitParameters. +func (in *CustomizedScalingMetricSpecificationInitParameters) DeepCopy() *CustomizedScalingMetricSpecificationInitParameters { + if in == nil { + return nil + } + out := new(CustomizedScalingMetricSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedScalingMetricSpecificationObservation) DeepCopyInto(out *CustomizedScalingMetricSpecificationObservation) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedScalingMetricSpecificationObservation. +func (in *CustomizedScalingMetricSpecificationObservation) DeepCopy() *CustomizedScalingMetricSpecificationObservation { + if in == nil { + return nil + } + out := new(CustomizedScalingMetricSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizedScalingMetricSpecificationParameters) DeepCopyInto(out *CustomizedScalingMetricSpecificationParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizedScalingMetricSpecificationParameters. +func (in *CustomizedScalingMetricSpecificationParameters) DeepCopy() *CustomizedScalingMetricSpecificationParameters { + if in == nil { + return nil + } + out := new(CustomizedScalingMetricSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredefinedLoadMetricSpecificationInitParameters) DeepCopyInto(out *PredefinedLoadMetricSpecificationInitParameters) { + *out = *in + if in.PredefinedLoadMetricType != nil { + in, out := &in.PredefinedLoadMetricType, &out.PredefinedLoadMetricType + *out = new(string) + **out = **in + } + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedLoadMetricSpecificationInitParameters. +func (in *PredefinedLoadMetricSpecificationInitParameters) DeepCopy() *PredefinedLoadMetricSpecificationInitParameters { + if in == nil { + return nil + } + out := new(PredefinedLoadMetricSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredefinedLoadMetricSpecificationObservation) DeepCopyInto(out *PredefinedLoadMetricSpecificationObservation) { + *out = *in + if in.PredefinedLoadMetricType != nil { + in, out := &in.PredefinedLoadMetricType, &out.PredefinedLoadMetricType + *out = new(string) + **out = **in + } + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedLoadMetricSpecificationObservation. +func (in *PredefinedLoadMetricSpecificationObservation) DeepCopy() *PredefinedLoadMetricSpecificationObservation { + if in == nil { + return nil + } + out := new(PredefinedLoadMetricSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredefinedLoadMetricSpecificationParameters) DeepCopyInto(out *PredefinedLoadMetricSpecificationParameters) { + *out = *in + if in.PredefinedLoadMetricType != nil { + in, out := &in.PredefinedLoadMetricType, &out.PredefinedLoadMetricType + *out = new(string) + **out = **in + } + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedLoadMetricSpecificationParameters. +func (in *PredefinedLoadMetricSpecificationParameters) DeepCopy() *PredefinedLoadMetricSpecificationParameters { + if in == nil { + return nil + } + out := new(PredefinedLoadMetricSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredefinedScalingMetricSpecificationInitParameters) DeepCopyInto(out *PredefinedScalingMetricSpecificationInitParameters) { + *out = *in + if in.PredefinedScalingMetricType != nil { + in, out := &in.PredefinedScalingMetricType, &out.PredefinedScalingMetricType + *out = new(string) + **out = **in + } + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedScalingMetricSpecificationInitParameters. +func (in *PredefinedScalingMetricSpecificationInitParameters) DeepCopy() *PredefinedScalingMetricSpecificationInitParameters { + if in == nil { + return nil + } + out := new(PredefinedScalingMetricSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredefinedScalingMetricSpecificationObservation) DeepCopyInto(out *PredefinedScalingMetricSpecificationObservation) { + *out = *in + if in.PredefinedScalingMetricType != nil { + in, out := &in.PredefinedScalingMetricType, &out.PredefinedScalingMetricType + *out = new(string) + **out = **in + } + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedScalingMetricSpecificationObservation. +func (in *PredefinedScalingMetricSpecificationObservation) DeepCopy() *PredefinedScalingMetricSpecificationObservation { + if in == nil { + return nil + } + out := new(PredefinedScalingMetricSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredefinedScalingMetricSpecificationParameters) DeepCopyInto(out *PredefinedScalingMetricSpecificationParameters) { + *out = *in + if in.PredefinedScalingMetricType != nil { + in, out := &in.PredefinedScalingMetricType, &out.PredefinedScalingMetricType + *out = new(string) + **out = **in + } + if in.ResourceLabel != nil { + in, out := &in.ResourceLabel, &out.ResourceLabel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredefinedScalingMetricSpecificationParameters. +func (in *PredefinedScalingMetricSpecificationParameters) DeepCopy() *PredefinedScalingMetricSpecificationParameters { + if in == nil { + return nil + } + out := new(PredefinedScalingMetricSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingInstructionInitParameters) DeepCopyInto(out *ScalingInstructionInitParameters) { + *out = *in + if in.CustomizedLoadMetricSpecification != nil { + in, out := &in.CustomizedLoadMetricSpecification, &out.CustomizedLoadMetricSpecification + *out = new(CustomizedLoadMetricSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DisableDynamicScaling != nil { + in, out := &in.DisableDynamicScaling, &out.DisableDynamicScaling + *out = new(bool) + **out = **in + } + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } + if in.PredefinedLoadMetricSpecification != nil { + in, out := &in.PredefinedLoadMetricSpecification, &out.PredefinedLoadMetricSpecification + *out = new(PredefinedLoadMetricSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PredictiveScalingMaxCapacityBehavior != nil { + in, out := &in.PredictiveScalingMaxCapacityBehavior, &out.PredictiveScalingMaxCapacityBehavior + *out = new(string) + **out = **in + } + if in.PredictiveScalingMaxCapacityBuffer != nil { + in, out := &in.PredictiveScalingMaxCapacityBuffer, &out.PredictiveScalingMaxCapacityBuffer + *out = new(float64) + **out = **in + } + if in.PredictiveScalingMode != nil { + in, out := &in.PredictiveScalingMode, &out.PredictiveScalingMode + *out = new(string) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ScalableDimension != nil { + in, out := &in.ScalableDimension, &out.ScalableDimension + *out = new(string) + **out = **in + } + if in.ScalingPolicyUpdateBehavior != nil { + in, out := &in.ScalingPolicyUpdateBehavior, &out.ScalingPolicyUpdateBehavior + *out = new(string) + **out = **in + } + if in.ScheduledActionBufferTime != nil { + in, out := &in.ScheduledActionBufferTime, &out.ScheduledActionBufferTime + *out = new(float64) + **out = **in + } + if in.ServiceNamespace != nil { + in, out := &in.ServiceNamespace, &out.ServiceNamespace + *out = new(string) + **out = **in + } + if in.TargetTrackingConfiguration != nil { + in, out := &in.TargetTrackingConfiguration, &out.TargetTrackingConfiguration + *out = make([]TargetTrackingConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingInstructionInitParameters. +func (in *ScalingInstructionInitParameters) DeepCopy() *ScalingInstructionInitParameters { + if in == nil { + return nil + } + out := new(ScalingInstructionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingInstructionObservation) DeepCopyInto(out *ScalingInstructionObservation) { + *out = *in + if in.CustomizedLoadMetricSpecification != nil { + in, out := &in.CustomizedLoadMetricSpecification, &out.CustomizedLoadMetricSpecification + *out = new(CustomizedLoadMetricSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.DisableDynamicScaling != nil { + in, out := &in.DisableDynamicScaling, &out.DisableDynamicScaling + *out = new(bool) + **out = **in + } + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } + if in.PredefinedLoadMetricSpecification != nil { + in, out := &in.PredefinedLoadMetricSpecification, &out.PredefinedLoadMetricSpecification + *out = new(PredefinedLoadMetricSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.PredictiveScalingMaxCapacityBehavior != nil { + in, out := &in.PredictiveScalingMaxCapacityBehavior, &out.PredictiveScalingMaxCapacityBehavior + *out = new(string) + **out = **in + } + if in.PredictiveScalingMaxCapacityBuffer != nil { + in, out := &in.PredictiveScalingMaxCapacityBuffer, &out.PredictiveScalingMaxCapacityBuffer + *out = new(float64) + **out = **in + } + if in.PredictiveScalingMode != nil { + in, out := &in.PredictiveScalingMode, &out.PredictiveScalingMode + *out = new(string) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ScalableDimension != nil { + in, out := &in.ScalableDimension, &out.ScalableDimension + *out = new(string) + **out = **in + } + if in.ScalingPolicyUpdateBehavior != nil { + in, out := &in.ScalingPolicyUpdateBehavior, &out.ScalingPolicyUpdateBehavior + *out = new(string) + **out = **in + } + if in.ScheduledActionBufferTime != nil { + in, out := &in.ScheduledActionBufferTime, &out.ScheduledActionBufferTime + *out = new(float64) + **out = **in + } + if in.ServiceNamespace != nil { + in, out := &in.ServiceNamespace, &out.ServiceNamespace + *out = new(string) + **out = **in + } + if in.TargetTrackingConfiguration != nil { + in, out := &in.TargetTrackingConfiguration, &out.TargetTrackingConfiguration + *out = make([]TargetTrackingConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingInstructionObservation. +func (in *ScalingInstructionObservation) DeepCopy() *ScalingInstructionObservation { + if in == nil { + return nil + } + out := new(ScalingInstructionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingInstructionParameters) DeepCopyInto(out *ScalingInstructionParameters) { + *out = *in + if in.CustomizedLoadMetricSpecification != nil { + in, out := &in.CustomizedLoadMetricSpecification, &out.CustomizedLoadMetricSpecification + *out = new(CustomizedLoadMetricSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.DisableDynamicScaling != nil { + in, out := &in.DisableDynamicScaling, &out.DisableDynamicScaling + *out = new(bool) + **out = **in + } + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } + if in.PredefinedLoadMetricSpecification != nil { + in, out := &in.PredefinedLoadMetricSpecification, &out.PredefinedLoadMetricSpecification + *out = new(PredefinedLoadMetricSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.PredictiveScalingMaxCapacityBehavior != nil { + in, out := &in.PredictiveScalingMaxCapacityBehavior, &out.PredictiveScalingMaxCapacityBehavior + *out = new(string) + **out = **in + } + if in.PredictiveScalingMaxCapacityBuffer != nil { + in, out := &in.PredictiveScalingMaxCapacityBuffer, &out.PredictiveScalingMaxCapacityBuffer + *out = new(float64) + **out = **in + } + if in.PredictiveScalingMode != nil { + in, out := &in.PredictiveScalingMode, &out.PredictiveScalingMode + *out = new(string) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ScalableDimension != nil { + in, out := &in.ScalableDimension, &out.ScalableDimension + *out = new(string) + **out = **in + } + if in.ScalingPolicyUpdateBehavior != nil { + in, out := &in.ScalingPolicyUpdateBehavior, &out.ScalingPolicyUpdateBehavior + *out = new(string) + **out = **in + } + if in.ScheduledActionBufferTime != nil { + in, out := &in.ScheduledActionBufferTime, &out.ScheduledActionBufferTime + *out = new(float64) + **out = **in + } + if in.ServiceNamespace != nil { + in, out := &in.ServiceNamespace, &out.ServiceNamespace + *out = new(string) + **out = **in + } + if in.TargetTrackingConfiguration != nil { + in, out := &in.TargetTrackingConfiguration, &out.TargetTrackingConfiguration + *out = make([]TargetTrackingConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingInstructionParameters. +func (in *ScalingInstructionParameters) DeepCopy() *ScalingInstructionParameters { + if in == nil { + return nil + } + out := new(ScalingInstructionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPlan) DeepCopyInto(out *ScalingPlan) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPlan. +func (in *ScalingPlan) DeepCopy() *ScalingPlan { + if in == nil { + return nil + } + out := new(ScalingPlan) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScalingPlan) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPlanInitParameters) DeepCopyInto(out *ScalingPlanInitParameters) { + *out = *in + if in.ApplicationSource != nil { + in, out := &in.ApplicationSource, &out.ApplicationSource + *out = new(ApplicationSourceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ScalingInstruction != nil { + in, out := &in.ScalingInstruction, &out.ScalingInstruction + *out = make([]ScalingInstructionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPlanInitParameters. +func (in *ScalingPlanInitParameters) DeepCopy() *ScalingPlanInitParameters { + if in == nil { + return nil + } + out := new(ScalingPlanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPlanList) DeepCopyInto(out *ScalingPlanList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ScalingPlan, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPlanList. +func (in *ScalingPlanList) DeepCopy() *ScalingPlanList { + if in == nil { + return nil + } + out := new(ScalingPlanList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScalingPlanList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPlanObservation) DeepCopyInto(out *ScalingPlanObservation) { + *out = *in + if in.ApplicationSource != nil { + in, out := &in.ApplicationSource, &out.ApplicationSource + *out = new(ApplicationSourceObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ScalingInstruction != nil { + in, out := &in.ScalingInstruction, &out.ScalingInstruction + *out = make([]ScalingInstructionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScalingPlanVersion != nil { + in, out := &in.ScalingPlanVersion, &out.ScalingPlanVersion + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPlanObservation. +func (in *ScalingPlanObservation) DeepCopy() *ScalingPlanObservation { + if in == nil { + return nil + } + out := new(ScalingPlanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPlanParameters) DeepCopyInto(out *ScalingPlanParameters) { + *out = *in + if in.ApplicationSource != nil { + in, out := &in.ApplicationSource, &out.ApplicationSource + *out = new(ApplicationSourceParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ScalingInstruction != nil { + in, out := &in.ScalingInstruction, &out.ScalingInstruction + *out = make([]ScalingInstructionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPlanParameters. +func (in *ScalingPlanParameters) DeepCopy() *ScalingPlanParameters { + if in == nil { + return nil + } + out := new(ScalingPlanParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPlanSpec) DeepCopyInto(out *ScalingPlanSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPlanSpec. +func (in *ScalingPlanSpec) DeepCopy() *ScalingPlanSpec { + if in == nil { + return nil + } + out := new(ScalingPlanSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPlanStatus) DeepCopyInto(out *ScalingPlanStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPlanStatus. +func (in *ScalingPlanStatus) DeepCopy() *ScalingPlanStatus { + if in == nil { + return nil + } + out := new(ScalingPlanStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagFilterInitParameters) DeepCopyInto(out *TagFilterInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagFilterInitParameters. +func (in *TagFilterInitParameters) DeepCopy() *TagFilterInitParameters { + if in == nil { + return nil + } + out := new(TagFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagFilterObservation) DeepCopyInto(out *TagFilterObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagFilterObservation. +func (in *TagFilterObservation) DeepCopy() *TagFilterObservation { + if in == nil { + return nil + } + out := new(TagFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagFilterParameters) DeepCopyInto(out *TagFilterParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagFilterParameters. +func (in *TagFilterParameters) DeepCopy() *TagFilterParameters { + if in == nil { + return nil + } + out := new(TagFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetTrackingConfigurationInitParameters) DeepCopyInto(out *TargetTrackingConfigurationInitParameters) { + *out = *in + if in.CustomizedScalingMetricSpecification != nil { + in, out := &in.CustomizedScalingMetricSpecification, &out.CustomizedScalingMetricSpecification + *out = new(CustomizedScalingMetricSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DisableScaleIn != nil { + in, out := &in.DisableScaleIn, &out.DisableScaleIn + *out = new(bool) + **out = **in + } + if in.EstimatedInstanceWarmup != nil { + in, out := &in.EstimatedInstanceWarmup, &out.EstimatedInstanceWarmup + *out = new(float64) + **out = **in + } + if in.PredefinedScalingMetricSpecification != nil { + in, out := &in.PredefinedScalingMetricSpecification, &out.PredefinedScalingMetricSpecification + *out = new(PredefinedScalingMetricSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ScaleInCooldown != nil { + in, out := &in.ScaleInCooldown, &out.ScaleInCooldown + *out = new(float64) + **out = **in + } + if in.ScaleOutCooldown != nil { + in, out := &in.ScaleOutCooldown, &out.ScaleOutCooldown + *out = new(float64) + **out = **in + } + if in.TargetValue != nil { + in, out := &in.TargetValue, &out.TargetValue + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetTrackingConfigurationInitParameters. +func (in *TargetTrackingConfigurationInitParameters) DeepCopy() *TargetTrackingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(TargetTrackingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetTrackingConfigurationObservation) DeepCopyInto(out *TargetTrackingConfigurationObservation) { + *out = *in + if in.CustomizedScalingMetricSpecification != nil { + in, out := &in.CustomizedScalingMetricSpecification, &out.CustomizedScalingMetricSpecification + *out = new(CustomizedScalingMetricSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.DisableScaleIn != nil { + in, out := &in.DisableScaleIn, &out.DisableScaleIn + *out = new(bool) + **out = **in + } + if in.EstimatedInstanceWarmup != nil { + in, out := &in.EstimatedInstanceWarmup, &out.EstimatedInstanceWarmup + *out = new(float64) + **out = **in + } + if in.PredefinedScalingMetricSpecification != nil { + in, out := &in.PredefinedScalingMetricSpecification, &out.PredefinedScalingMetricSpecification + *out = new(PredefinedScalingMetricSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.ScaleInCooldown != nil { + in, out := &in.ScaleInCooldown, &out.ScaleInCooldown + *out = new(float64) + **out = **in + } + if in.ScaleOutCooldown != nil { + in, out := &in.ScaleOutCooldown, &out.ScaleOutCooldown + *out = new(float64) + **out = **in + } + if in.TargetValue != nil { + in, out := &in.TargetValue, &out.TargetValue + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetTrackingConfigurationObservation. +func (in *TargetTrackingConfigurationObservation) DeepCopy() *TargetTrackingConfigurationObservation { + if in == nil { + return nil + } + out := new(TargetTrackingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetTrackingConfigurationParameters) DeepCopyInto(out *TargetTrackingConfigurationParameters) { + *out = *in + if in.CustomizedScalingMetricSpecification != nil { + in, out := &in.CustomizedScalingMetricSpecification, &out.CustomizedScalingMetricSpecification + *out = new(CustomizedScalingMetricSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.DisableScaleIn != nil { + in, out := &in.DisableScaleIn, &out.DisableScaleIn + *out = new(bool) + **out = **in + } + if in.EstimatedInstanceWarmup != nil { + in, out := &in.EstimatedInstanceWarmup, &out.EstimatedInstanceWarmup + *out = new(float64) + **out = **in + } + if in.PredefinedScalingMetricSpecification != nil { + in, out := &in.PredefinedScalingMetricSpecification, &out.PredefinedScalingMetricSpecification + *out = new(PredefinedScalingMetricSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.ScaleInCooldown != nil { + in, out := &in.ScaleInCooldown, &out.ScaleInCooldown + *out = new(float64) + **out = **in + } + if in.ScaleOutCooldown != nil { + in, out := &in.ScaleOutCooldown, &out.ScaleOutCooldown + *out = new(float64) + **out = **in + } + if in.TargetValue != nil { + in, out := &in.TargetValue, &out.TargetValue + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetTrackingConfigurationParameters. +func (in *TargetTrackingConfigurationParameters) DeepCopy() *TargetTrackingConfigurationParameters { + if in == nil { + return nil + } + out := new(TargetTrackingConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/autoscalingplans/v1beta2/zz_generated.managed.go b/apis/autoscalingplans/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..79d9260781 --- /dev/null +++ b/apis/autoscalingplans/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ScalingPlan. +func (mg *ScalingPlan) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ScalingPlan. +func (mg *ScalingPlan) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ScalingPlan. +func (mg *ScalingPlan) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ScalingPlan. +func (mg *ScalingPlan) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ScalingPlan. +func (mg *ScalingPlan) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ScalingPlan. +func (mg *ScalingPlan) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ScalingPlan. +func (mg *ScalingPlan) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ScalingPlan. +func (mg *ScalingPlan) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ScalingPlan. +func (mg *ScalingPlan) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ScalingPlan. +func (mg *ScalingPlan) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ScalingPlan. +func (mg *ScalingPlan) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ScalingPlan. +func (mg *ScalingPlan) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/autoscalingplans/v1beta2/zz_generated.managedlist.go b/apis/autoscalingplans/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..610cbf8d2c --- /dev/null +++ b/apis/autoscalingplans/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ScalingPlanList. +func (l *ScalingPlanList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/autoscalingplans/v1beta2/zz_groupversion_info.go b/apis/autoscalingplans/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..68747dfd01 --- /dev/null +++ b/apis/autoscalingplans/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=autoscalingplans.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "autoscalingplans.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/autoscalingplans/v1beta2/zz_scalingplan_terraformed.go b/apis/autoscalingplans/v1beta2/zz_scalingplan_terraformed.go new file mode 100755 index 0000000000..56240ec5e9 --- /dev/null +++ b/apis/autoscalingplans/v1beta2/zz_scalingplan_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ScalingPlan +func (mg *ScalingPlan) GetTerraformResourceType() string { + return "aws_autoscalingplans_scaling_plan" +} + +// GetConnectionDetailsMapping for this ScalingPlan +func (tr *ScalingPlan) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ScalingPlan +func (tr *ScalingPlan) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ScalingPlan +func (tr *ScalingPlan) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ScalingPlan +func (tr *ScalingPlan) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ScalingPlan +func (tr *ScalingPlan) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ScalingPlan +func (tr *ScalingPlan) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ScalingPlan +func (tr *ScalingPlan) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ScalingPlan +func (tr *ScalingPlan) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ScalingPlan using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ScalingPlan) LateInitialize(attrs []byte) (bool, error) { + params := &ScalingPlanParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ScalingPlan) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/autoscalingplans/v1beta2/zz_scalingplan_types.go b/apis/autoscalingplans/v1beta2/zz_scalingplan_types.go new file mode 100755 index 0000000000..232a092025 --- /dev/null +++ b/apis/autoscalingplans/v1beta2/zz_scalingplan_types.go @@ -0,0 +1,625 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ApplicationSourceInitParameters struct { + + // ARN of a AWS CloudFormation stack. + CloudFormationStackArn *string `json:"cloudformationStackArn,omitempty" tf:"cloudformation_stack_arn,omitempty"` + + // Set of tags. + TagFilter []TagFilterInitParameters `json:"tagFilter,omitempty" tf:"tag_filter,omitempty"` +} + +type ApplicationSourceObservation struct { + + // ARN of a AWS CloudFormation stack. + CloudFormationStackArn *string `json:"cloudformationStackArn,omitempty" tf:"cloudformation_stack_arn,omitempty"` + + // Set of tags. + TagFilter []TagFilterObservation `json:"tagFilter,omitempty" tf:"tag_filter,omitempty"` +} + +type ApplicationSourceParameters struct { + + // ARN of a AWS CloudFormation stack. + // +kubebuilder:validation:Optional + CloudFormationStackArn *string `json:"cloudformationStackArn,omitempty" tf:"cloudformation_stack_arn,omitempty"` + + // Set of tags. + // +kubebuilder:validation:Optional + TagFilter []TagFilterParameters `json:"tagFilter,omitempty" tf:"tag_filter,omitempty"` +} + +type CustomizedLoadMetricSpecificationInitParameters struct { + + // Dimensions of the metric. + // +mapType=granular + Dimensions map[string]*string `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // Statistic of the metric. Currently, the value must always be Sum. + Statistic *string `json:"statistic,omitempty" tf:"statistic,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type CustomizedLoadMetricSpecificationObservation struct { + + // Dimensions of the metric. + // +mapType=granular + Dimensions map[string]*string `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // Statistic of the metric. Currently, the value must always be Sum. + Statistic *string `json:"statistic,omitempty" tf:"statistic,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type CustomizedLoadMetricSpecificationParameters struct { + + // Dimensions of the metric. + // +kubebuilder:validation:Optional + // +mapType=granular + Dimensions map[string]*string `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName" tf:"metric_name,omitempty"` + + // Namespace of the metric. + // +kubebuilder:validation:Optional + Namespace *string `json:"namespace" tf:"namespace,omitempty"` + + // Statistic of the metric. Currently, the value must always be Sum. + // +kubebuilder:validation:Optional + Statistic *string `json:"statistic" tf:"statistic,omitempty"` + + // Unit of the metric. + // +kubebuilder:validation:Optional + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type CustomizedScalingMetricSpecificationInitParameters struct { + + // Dimensions of the metric. + // +mapType=granular + Dimensions map[string]*string `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // Statistic of the metric. Currently, the value must always be Sum. + Statistic *string `json:"statistic,omitempty" tf:"statistic,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type CustomizedScalingMetricSpecificationObservation struct { + + // Dimensions of the metric. + // +mapType=granular + Dimensions map[string]*string `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Namespace of the metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // Statistic of the metric. Currently, the value must always be Sum. + Statistic *string `json:"statistic,omitempty" tf:"statistic,omitempty"` + + // Unit of the metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type CustomizedScalingMetricSpecificationParameters struct { + + // Dimensions of the metric. + // +kubebuilder:validation:Optional + // +mapType=granular + Dimensions map[string]*string `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Name of the metric. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName" tf:"metric_name,omitempty"` + + // Namespace of the metric. + // +kubebuilder:validation:Optional + Namespace *string `json:"namespace" tf:"namespace,omitempty"` + + // Statistic of the metric. Currently, the value must always be Sum. + // +kubebuilder:validation:Optional + Statistic *string `json:"statistic" tf:"statistic,omitempty"` + + // Unit of the metric. + // +kubebuilder:validation:Optional + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type PredefinedLoadMetricSpecificationInitParameters struct { + + // Metric type. Valid values: ALBTargetGroupRequestCount, ASGTotalCPUUtilization, ASGTotalNetworkIn, ASGTotalNetworkOut. + PredefinedLoadMetricType *string `json:"predefinedLoadMetricType,omitempty" tf:"predefined_load_metric_type,omitempty"` + + // Identifies the resource associated with the metric type. + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedLoadMetricSpecificationObservation struct { + + // Metric type. Valid values: ALBTargetGroupRequestCount, ASGTotalCPUUtilization, ASGTotalNetworkIn, ASGTotalNetworkOut. + PredefinedLoadMetricType *string `json:"predefinedLoadMetricType,omitempty" tf:"predefined_load_metric_type,omitempty"` + + // Identifies the resource associated with the metric type. + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedLoadMetricSpecificationParameters struct { + + // Metric type. Valid values: ALBTargetGroupRequestCount, ASGTotalCPUUtilization, ASGTotalNetworkIn, ASGTotalNetworkOut. + // +kubebuilder:validation:Optional + PredefinedLoadMetricType *string `json:"predefinedLoadMetricType" tf:"predefined_load_metric_type,omitempty"` + + // Identifies the resource associated with the metric type. + // +kubebuilder:validation:Optional + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedScalingMetricSpecificationInitParameters struct { + + // Metric type. Valid values: ALBRequestCountPerTarget, ASGAverageCPUUtilization, ASGAverageNetworkIn, ASGAverageNetworkOut, DynamoDBReadCapacityUtilization, DynamoDBWriteCapacityUtilization, ECSServiceAverageCPUUtilization, ECSServiceAverageMemoryUtilization, EC2SpotFleetRequestAverageCPUUtilization, EC2SpotFleetRequestAverageNetworkIn, EC2SpotFleetRequestAverageNetworkOut, RDSReaderAverageCPUUtilization, RDSReaderAverageDatabaseConnections. + PredefinedScalingMetricType *string `json:"predefinedScalingMetricType,omitempty" tf:"predefined_scaling_metric_type,omitempty"` + + // Identifies the resource associated with the metric type. + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedScalingMetricSpecificationObservation struct { + + // Metric type. Valid values: ALBRequestCountPerTarget, ASGAverageCPUUtilization, ASGAverageNetworkIn, ASGAverageNetworkOut, DynamoDBReadCapacityUtilization, DynamoDBWriteCapacityUtilization, ECSServiceAverageCPUUtilization, ECSServiceAverageMemoryUtilization, EC2SpotFleetRequestAverageCPUUtilization, EC2SpotFleetRequestAverageNetworkIn, EC2SpotFleetRequestAverageNetworkOut, RDSReaderAverageCPUUtilization, RDSReaderAverageDatabaseConnections. + PredefinedScalingMetricType *string `json:"predefinedScalingMetricType,omitempty" tf:"predefined_scaling_metric_type,omitempty"` + + // Identifies the resource associated with the metric type. + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type PredefinedScalingMetricSpecificationParameters struct { + + // Metric type. Valid values: ALBRequestCountPerTarget, ASGAverageCPUUtilization, ASGAverageNetworkIn, ASGAverageNetworkOut, DynamoDBReadCapacityUtilization, DynamoDBWriteCapacityUtilization, ECSServiceAverageCPUUtilization, ECSServiceAverageMemoryUtilization, EC2SpotFleetRequestAverageCPUUtilization, EC2SpotFleetRequestAverageNetworkIn, EC2SpotFleetRequestAverageNetworkOut, RDSReaderAverageCPUUtilization, RDSReaderAverageDatabaseConnections. + // +kubebuilder:validation:Optional + PredefinedScalingMetricType *string `json:"predefinedScalingMetricType" tf:"predefined_scaling_metric_type,omitempty"` + + // Identifies the resource associated with the metric type. + // +kubebuilder:validation:Optional + ResourceLabel *string `json:"resourceLabel,omitempty" tf:"resource_label,omitempty"` +} + +type ScalingInstructionInitParameters struct { + + // Customized load metric to use for predictive scaling. You must specify either customized_load_metric_specification or predefined_load_metric_specification when configuring predictive scaling. + // More details can be found in the AWS Auto Scaling API Reference. + CustomizedLoadMetricSpecification *CustomizedLoadMetricSpecificationInitParameters `json:"customizedLoadMetricSpecification,omitempty" tf:"customized_load_metric_specification,omitempty"` + + // Boolean controlling whether dynamic scaling by AWS Auto Scaling is disabled. Defaults to false. + DisableDynamicScaling *bool `json:"disableDynamicScaling,omitempty" tf:"disable_dynamic_scaling,omitempty"` + + // Maximum capacity of the resource. The exception to this upper limit is if you specify a non-default setting for predictive_scaling_max_capacity_behavior. + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // Minimum capacity of the resource. + MinCapacity *float64 `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` + + // Predefined load metric to use for predictive scaling. You must specify either predefined_load_metric_specification or customized_load_metric_specification when configuring predictive scaling. + // More details can be found in the AWS Auto Scaling API Reference. + PredefinedLoadMetricSpecification *PredefinedLoadMetricSpecificationInitParameters `json:"predefinedLoadMetricSpecification,omitempty" tf:"predefined_load_metric_specification,omitempty"` + + // Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity specified for the resource. + // Valid values: SetForecastCapacityToMaxCapacity, SetMaxCapacityAboveForecastCapacity, SetMaxCapacityToForecastCapacity. + PredictiveScalingMaxCapacityBehavior *string `json:"predictiveScalingMaxCapacityBehavior,omitempty" tf:"predictive_scaling_max_capacity_behavior,omitempty"` + + // Size of the capacity buffer to use when the forecast capacity is close to or exceeds the maximum capacity. + PredictiveScalingMaxCapacityBuffer *float64 `json:"predictiveScalingMaxCapacityBuffer,omitempty" tf:"predictive_scaling_max_capacity_buffer,omitempty"` + + // Predictive scaling mode. Valid values: ForecastAndScale, ForecastOnly. + PredictiveScalingMode *string `json:"predictiveScalingMode,omitempty" tf:"predictive_scaling_mode,omitempty"` + + // ID of the resource. This string consists of the resource type and unique identifier. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Scalable dimension associated with the resource. Valid values: autoscaling:autoScalingGroup:DesiredCapacity, dynamodb:index:ReadCapacityUnits, dynamodb:index:WriteCapacityUnits, dynamodb:table:ReadCapacityUnits, dynamodb:table:WriteCapacityUnits, ecs:service:DesiredCount, ec2:spot-fleet-request:TargetCapacity, rds:cluster:ReadReplicaCount. + ScalableDimension *string `json:"scalableDimension,omitempty" tf:"scalable_dimension,omitempty"` + + // Controls whether a resource's externally created scaling policies are kept or replaced. Valid values: KeepExternalPolicies, ReplaceExternalPolicies. Defaults to KeepExternalPolicies. + ScalingPolicyUpdateBehavior *string `json:"scalingPolicyUpdateBehavior,omitempty" tf:"scaling_policy_update_behavior,omitempty"` + + // Amount of time, in seconds, to buffer the run time of scheduled scaling actions when scaling out. + ScheduledActionBufferTime *float64 `json:"scheduledActionBufferTime,omitempty" tf:"scheduled_action_buffer_time,omitempty"` + + // Namespace of the AWS service. Valid values: autoscaling, dynamodb, ecs, ec2, rds. + ServiceNamespace *string `json:"serviceNamespace,omitempty" tf:"service_namespace,omitempty"` + + // Structure that defines new target tracking configurations. Each of these structures includes a specific scaling metric and a target value for the metric, along with various parameters to use with dynamic scaling. + // More details can be found in the AWS Auto Scaling API Reference. + TargetTrackingConfiguration []TargetTrackingConfigurationInitParameters `json:"targetTrackingConfiguration,omitempty" tf:"target_tracking_configuration,omitempty"` +} + +type ScalingInstructionObservation struct { + + // Customized load metric to use for predictive scaling. You must specify either customized_load_metric_specification or predefined_load_metric_specification when configuring predictive scaling. + // More details can be found in the AWS Auto Scaling API Reference. + CustomizedLoadMetricSpecification *CustomizedLoadMetricSpecificationObservation `json:"customizedLoadMetricSpecification,omitempty" tf:"customized_load_metric_specification,omitempty"` + + // Boolean controlling whether dynamic scaling by AWS Auto Scaling is disabled. Defaults to false. + DisableDynamicScaling *bool `json:"disableDynamicScaling,omitempty" tf:"disable_dynamic_scaling,omitempty"` + + // Maximum capacity of the resource. The exception to this upper limit is if you specify a non-default setting for predictive_scaling_max_capacity_behavior. + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // Minimum capacity of the resource. + MinCapacity *float64 `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` + + // Predefined load metric to use for predictive scaling. You must specify either predefined_load_metric_specification or customized_load_metric_specification when configuring predictive scaling. + // More details can be found in the AWS Auto Scaling API Reference. + PredefinedLoadMetricSpecification *PredefinedLoadMetricSpecificationObservation `json:"predefinedLoadMetricSpecification,omitempty" tf:"predefined_load_metric_specification,omitempty"` + + // Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity specified for the resource. + // Valid values: SetForecastCapacityToMaxCapacity, SetMaxCapacityAboveForecastCapacity, SetMaxCapacityToForecastCapacity. + PredictiveScalingMaxCapacityBehavior *string `json:"predictiveScalingMaxCapacityBehavior,omitempty" tf:"predictive_scaling_max_capacity_behavior,omitempty"` + + // Size of the capacity buffer to use when the forecast capacity is close to or exceeds the maximum capacity. + PredictiveScalingMaxCapacityBuffer *float64 `json:"predictiveScalingMaxCapacityBuffer,omitempty" tf:"predictive_scaling_max_capacity_buffer,omitempty"` + + // Predictive scaling mode. Valid values: ForecastAndScale, ForecastOnly. + PredictiveScalingMode *string `json:"predictiveScalingMode,omitempty" tf:"predictive_scaling_mode,omitempty"` + + // ID of the resource. This string consists of the resource type and unique identifier. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Scalable dimension associated with the resource. Valid values: autoscaling:autoScalingGroup:DesiredCapacity, dynamodb:index:ReadCapacityUnits, dynamodb:index:WriteCapacityUnits, dynamodb:table:ReadCapacityUnits, dynamodb:table:WriteCapacityUnits, ecs:service:DesiredCount, ec2:spot-fleet-request:TargetCapacity, rds:cluster:ReadReplicaCount. + ScalableDimension *string `json:"scalableDimension,omitempty" tf:"scalable_dimension,omitempty"` + + // Controls whether a resource's externally created scaling policies are kept or replaced. Valid values: KeepExternalPolicies, ReplaceExternalPolicies. Defaults to KeepExternalPolicies. + ScalingPolicyUpdateBehavior *string `json:"scalingPolicyUpdateBehavior,omitempty" tf:"scaling_policy_update_behavior,omitempty"` + + // Amount of time, in seconds, to buffer the run time of scheduled scaling actions when scaling out. + ScheduledActionBufferTime *float64 `json:"scheduledActionBufferTime,omitempty" tf:"scheduled_action_buffer_time,omitempty"` + + // Namespace of the AWS service. Valid values: autoscaling, dynamodb, ecs, ec2, rds. + ServiceNamespace *string `json:"serviceNamespace,omitempty" tf:"service_namespace,omitempty"` + + // Structure that defines new target tracking configurations. Each of these structures includes a specific scaling metric and a target value for the metric, along with various parameters to use with dynamic scaling. + // More details can be found in the AWS Auto Scaling API Reference. + TargetTrackingConfiguration []TargetTrackingConfigurationObservation `json:"targetTrackingConfiguration,omitempty" tf:"target_tracking_configuration,omitempty"` +} + +type ScalingInstructionParameters struct { + + // Customized load metric to use for predictive scaling. You must specify either customized_load_metric_specification or predefined_load_metric_specification when configuring predictive scaling. + // More details can be found in the AWS Auto Scaling API Reference. + // +kubebuilder:validation:Optional + CustomizedLoadMetricSpecification *CustomizedLoadMetricSpecificationParameters `json:"customizedLoadMetricSpecification,omitempty" tf:"customized_load_metric_specification,omitempty"` + + // Boolean controlling whether dynamic scaling by AWS Auto Scaling is disabled. Defaults to false. + // +kubebuilder:validation:Optional + DisableDynamicScaling *bool `json:"disableDynamicScaling,omitempty" tf:"disable_dynamic_scaling,omitempty"` + + // Maximum capacity of the resource. The exception to this upper limit is if you specify a non-default setting for predictive_scaling_max_capacity_behavior. + // +kubebuilder:validation:Optional + MaxCapacity *float64 `json:"maxCapacity" tf:"max_capacity,omitempty"` + + // Minimum capacity of the resource. + // +kubebuilder:validation:Optional + MinCapacity *float64 `json:"minCapacity" tf:"min_capacity,omitempty"` + + // Predefined load metric to use for predictive scaling. You must specify either predefined_load_metric_specification or customized_load_metric_specification when configuring predictive scaling. + // More details can be found in the AWS Auto Scaling API Reference. + // +kubebuilder:validation:Optional + PredefinedLoadMetricSpecification *PredefinedLoadMetricSpecificationParameters `json:"predefinedLoadMetricSpecification,omitempty" tf:"predefined_load_metric_specification,omitempty"` + + // Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity specified for the resource. + // Valid values: SetForecastCapacityToMaxCapacity, SetMaxCapacityAboveForecastCapacity, SetMaxCapacityToForecastCapacity. + // +kubebuilder:validation:Optional + PredictiveScalingMaxCapacityBehavior *string `json:"predictiveScalingMaxCapacityBehavior,omitempty" tf:"predictive_scaling_max_capacity_behavior,omitempty"` + + // Size of the capacity buffer to use when the forecast capacity is close to or exceeds the maximum capacity. + // +kubebuilder:validation:Optional + PredictiveScalingMaxCapacityBuffer *float64 `json:"predictiveScalingMaxCapacityBuffer,omitempty" tf:"predictive_scaling_max_capacity_buffer,omitempty"` + + // Predictive scaling mode. Valid values: ForecastAndScale, ForecastOnly. + // +kubebuilder:validation:Optional + PredictiveScalingMode *string `json:"predictiveScalingMode,omitempty" tf:"predictive_scaling_mode,omitempty"` + + // ID of the resource. This string consists of the resource type and unique identifier. + // +kubebuilder:validation:Optional + ResourceID *string `json:"resourceId" tf:"resource_id,omitempty"` + + // Scalable dimension associated with the resource. Valid values: autoscaling:autoScalingGroup:DesiredCapacity, dynamodb:index:ReadCapacityUnits, dynamodb:index:WriteCapacityUnits, dynamodb:table:ReadCapacityUnits, dynamodb:table:WriteCapacityUnits, ecs:service:DesiredCount, ec2:spot-fleet-request:TargetCapacity, rds:cluster:ReadReplicaCount. + // +kubebuilder:validation:Optional + ScalableDimension *string `json:"scalableDimension" tf:"scalable_dimension,omitempty"` + + // Controls whether a resource's externally created scaling policies are kept or replaced. Valid values: KeepExternalPolicies, ReplaceExternalPolicies. Defaults to KeepExternalPolicies. + // +kubebuilder:validation:Optional + ScalingPolicyUpdateBehavior *string `json:"scalingPolicyUpdateBehavior,omitempty" tf:"scaling_policy_update_behavior,omitempty"` + + // Amount of time, in seconds, to buffer the run time of scheduled scaling actions when scaling out. + // +kubebuilder:validation:Optional + ScheduledActionBufferTime *float64 `json:"scheduledActionBufferTime,omitempty" tf:"scheduled_action_buffer_time,omitempty"` + + // Namespace of the AWS service. Valid values: autoscaling, dynamodb, ecs, ec2, rds. + // +kubebuilder:validation:Optional + ServiceNamespace *string `json:"serviceNamespace" tf:"service_namespace,omitempty"` + + // Structure that defines new target tracking configurations. Each of these structures includes a specific scaling metric and a target value for the metric, along with various parameters to use with dynamic scaling. + // More details can be found in the AWS Auto Scaling API Reference. + // +kubebuilder:validation:Optional + TargetTrackingConfiguration []TargetTrackingConfigurationParameters `json:"targetTrackingConfiguration" tf:"target_tracking_configuration,omitempty"` +} + +type ScalingPlanInitParameters struct { + + // CloudFormation stack or set of tags. You can create one scaling plan per application source. + ApplicationSource *ApplicationSourceInitParameters `json:"applicationSource,omitempty" tf:"application_source,omitempty"` + + // Name of the scaling plan. Names cannot contain vertical bars, colons, or forward slashes. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Scaling instructions. More details can be found in the AWS Auto Scaling API Reference. + ScalingInstruction []ScalingInstructionInitParameters `json:"scalingInstruction,omitempty" tf:"scaling_instruction,omitempty"` +} + +type ScalingPlanObservation struct { + + // CloudFormation stack or set of tags. You can create one scaling plan per application source. + ApplicationSource *ApplicationSourceObservation `json:"applicationSource,omitempty" tf:"application_source,omitempty"` + + // Scaling plan identifier. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the scaling plan. Names cannot contain vertical bars, colons, or forward slashes. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Scaling instructions. More details can be found in the AWS Auto Scaling API Reference. + ScalingInstruction []ScalingInstructionObservation `json:"scalingInstruction,omitempty" tf:"scaling_instruction,omitempty"` + + // The version number of the scaling plan. This value is always 1. + ScalingPlanVersion *float64 `json:"scalingPlanVersion,omitempty" tf:"scaling_plan_version,omitempty"` +} + +type ScalingPlanParameters struct { + + // CloudFormation stack or set of tags. You can create one scaling plan per application source. + // +kubebuilder:validation:Optional + ApplicationSource *ApplicationSourceParameters `json:"applicationSource,omitempty" tf:"application_source,omitempty"` + + // Name of the scaling plan. Names cannot contain vertical bars, colons, or forward slashes. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Scaling instructions. More details can be found in the AWS Auto Scaling API Reference. + // +kubebuilder:validation:Optional + ScalingInstruction []ScalingInstructionParameters `json:"scalingInstruction,omitempty" tf:"scaling_instruction,omitempty"` +} + +type TagFilterInitParameters struct { + + // Tag key. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Tag values. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type TagFilterObservation struct { + + // Tag key. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Tag values. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type TagFilterParameters struct { + + // Tag key. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Tag values. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type TargetTrackingConfigurationInitParameters struct { + + // Customized metric. You can specify either customized_scaling_metric_specification or predefined_scaling_metric_specification. + // More details can be found in the AWS Auto Scaling API Reference. + CustomizedScalingMetricSpecification *CustomizedScalingMetricSpecificationInitParameters `json:"customizedScalingMetricSpecification,omitempty" tf:"customized_scaling_metric_specification,omitempty"` + + // Boolean indicating whether scale in by the target tracking scaling policy is disabled. Defaults to false. + DisableScaleIn *bool `json:"disableScaleIn,omitempty" tf:"disable_scale_in,omitempty"` + + // Estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. + // This value is used only if the resource is an Auto Scaling group. + EstimatedInstanceWarmup *float64 `json:"estimatedInstanceWarmup,omitempty" tf:"estimated_instance_warmup,omitempty"` + + // Predefined metric. You can specify either predefined_scaling_metric_specification or customized_scaling_metric_specification. + // More details can be found in the AWS Auto Scaling API Reference. + PredefinedScalingMetricSpecification *PredefinedScalingMetricSpecificationInitParameters `json:"predefinedScalingMetricSpecification,omitempty" tf:"predefined_scaling_metric_specification,omitempty"` + + // Amount of time, in seconds, after a scale in activity completes before another scale in activity can start. + // This value is not used if the scalable resource is an Auto Scaling group. + ScaleInCooldown *float64 `json:"scaleInCooldown,omitempty" tf:"scale_in_cooldown,omitempty"` + + // Amount of time, in seconds, after a scale-out activity completes before another scale-out activity can start. + // This value is not used if the scalable resource is an Auto Scaling group. + ScaleOutCooldown *float64 `json:"scaleOutCooldown,omitempty" tf:"scale_out_cooldown,omitempty"` + + // Target value for the metric. + TargetValue *float64 `json:"targetValue,omitempty" tf:"target_value,omitempty"` +} + +type TargetTrackingConfigurationObservation struct { + + // Customized metric. You can specify either customized_scaling_metric_specification or predefined_scaling_metric_specification. + // More details can be found in the AWS Auto Scaling API Reference. + CustomizedScalingMetricSpecification *CustomizedScalingMetricSpecificationObservation `json:"customizedScalingMetricSpecification,omitempty" tf:"customized_scaling_metric_specification,omitempty"` + + // Boolean indicating whether scale in by the target tracking scaling policy is disabled. Defaults to false. + DisableScaleIn *bool `json:"disableScaleIn,omitempty" tf:"disable_scale_in,omitempty"` + + // Estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. + // This value is used only if the resource is an Auto Scaling group. + EstimatedInstanceWarmup *float64 `json:"estimatedInstanceWarmup,omitempty" tf:"estimated_instance_warmup,omitempty"` + + // Predefined metric. You can specify either predefined_scaling_metric_specification or customized_scaling_metric_specification. + // More details can be found in the AWS Auto Scaling API Reference. + PredefinedScalingMetricSpecification *PredefinedScalingMetricSpecificationObservation `json:"predefinedScalingMetricSpecification,omitempty" tf:"predefined_scaling_metric_specification,omitempty"` + + // Amount of time, in seconds, after a scale in activity completes before another scale in activity can start. + // This value is not used if the scalable resource is an Auto Scaling group. + ScaleInCooldown *float64 `json:"scaleInCooldown,omitempty" tf:"scale_in_cooldown,omitempty"` + + // Amount of time, in seconds, after a scale-out activity completes before another scale-out activity can start. + // This value is not used if the scalable resource is an Auto Scaling group. + ScaleOutCooldown *float64 `json:"scaleOutCooldown,omitempty" tf:"scale_out_cooldown,omitempty"` + + // Target value for the metric. + TargetValue *float64 `json:"targetValue,omitempty" tf:"target_value,omitempty"` +} + +type TargetTrackingConfigurationParameters struct { + + // Customized metric. You can specify either customized_scaling_metric_specification or predefined_scaling_metric_specification. + // More details can be found in the AWS Auto Scaling API Reference. + // +kubebuilder:validation:Optional + CustomizedScalingMetricSpecification *CustomizedScalingMetricSpecificationParameters `json:"customizedScalingMetricSpecification,omitempty" tf:"customized_scaling_metric_specification,omitempty"` + + // Boolean indicating whether scale in by the target tracking scaling policy is disabled. Defaults to false. + // +kubebuilder:validation:Optional + DisableScaleIn *bool `json:"disableScaleIn,omitempty" tf:"disable_scale_in,omitempty"` + + // Estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. + // This value is used only if the resource is an Auto Scaling group. + // +kubebuilder:validation:Optional + EstimatedInstanceWarmup *float64 `json:"estimatedInstanceWarmup,omitempty" tf:"estimated_instance_warmup,omitempty"` + + // Predefined metric. You can specify either predefined_scaling_metric_specification or customized_scaling_metric_specification. + // More details can be found in the AWS Auto Scaling API Reference. + // +kubebuilder:validation:Optional + PredefinedScalingMetricSpecification *PredefinedScalingMetricSpecificationParameters `json:"predefinedScalingMetricSpecification,omitempty" tf:"predefined_scaling_metric_specification,omitempty"` + + // Amount of time, in seconds, after a scale in activity completes before another scale in activity can start. + // This value is not used if the scalable resource is an Auto Scaling group. + // +kubebuilder:validation:Optional + ScaleInCooldown *float64 `json:"scaleInCooldown,omitempty" tf:"scale_in_cooldown,omitempty"` + + // Amount of time, in seconds, after a scale-out activity completes before another scale-out activity can start. + // This value is not used if the scalable resource is an Auto Scaling group. + // +kubebuilder:validation:Optional + ScaleOutCooldown *float64 `json:"scaleOutCooldown,omitempty" tf:"scale_out_cooldown,omitempty"` + + // Target value for the metric. + // +kubebuilder:validation:Optional + TargetValue *float64 `json:"targetValue" tf:"target_value,omitempty"` +} + +// ScalingPlanSpec defines the desired state of ScalingPlan +type ScalingPlanSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ScalingPlanParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ScalingPlanInitParameters `json:"initProvider,omitempty"` +} + +// ScalingPlanStatus defines the observed state of ScalingPlan. +type ScalingPlanStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ScalingPlanObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ScalingPlan is the Schema for the ScalingPlans API. Manages an AWS Auto Scaling scaling plan. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ScalingPlan struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.applicationSource) || (has(self.initProvider) && has(self.initProvider.applicationSource))",message="spec.forProvider.applicationSource is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scalingInstruction) || (has(self.initProvider) && has(self.initProvider.scalingInstruction))",message="spec.forProvider.scalingInstruction is a required parameter" + Spec ScalingPlanSpec `json:"spec"` + Status ScalingPlanStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ScalingPlanList contains a list of ScalingPlans +type ScalingPlanList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ScalingPlan `json:"items"` +} + +// Repository type metadata. +var ( + ScalingPlan_Kind = "ScalingPlan" + ScalingPlan_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ScalingPlan_Kind}.String() + ScalingPlan_KindAPIVersion = ScalingPlan_Kind + "." + CRDGroupVersion.String() + ScalingPlan_GroupVersionKind = CRDGroupVersion.WithKind(ScalingPlan_Kind) +) + +func init() { + SchemeBuilder.Register(&ScalingPlan{}, &ScalingPlanList{}) +} diff --git a/apis/backup/v1beta1/zz_generated.conversion_hubs.go b/apis/backup/v1beta1/zz_generated.conversion_hubs.go index 62b6d4d8b4..edf314ff8a 100755 --- a/apis/backup/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/backup/v1beta1/zz_generated.conversion_hubs.go @@ -6,21 +6,12 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Framework) Hub() {} - // Hub marks this type as a conversion hub. func (tr *GlobalSettings) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Plan) Hub() {} - // Hub marks this type as a conversion hub. func (tr *RegionSettings) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ReportPlan) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Selection) Hub() {} diff --git a/apis/backup/v1beta1/zz_generated.conversion_spokes.go b/apis/backup/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..f6396e1128 --- /dev/null +++ b/apis/backup/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Framework to the hub type. +func (tr *Framework) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Framework type. +func (tr *Framework) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Plan to the hub type. +func (tr *Plan) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Plan type. +func (tr *Plan) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ReportPlan to the hub type. +func (tr *ReportPlan) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ReportPlan type. +func (tr *ReportPlan) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/backup/v1beta1/zz_generated.resolvers.go b/apis/backup/v1beta1/zz_generated.resolvers.go index 4133c300b0..922798f2b1 100644 --- a/apis/backup/v1beta1/zz_generated.resolvers.go +++ b/apis/backup/v1beta1/zz_generated.resolvers.go @@ -101,7 +101,7 @@ func (mg *Selection) ResolveReferences(ctx context.Context, c client.Reader) err mg.Spec.ForProvider.IAMRoleArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.IAMRoleArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("backup.aws.upbound.io", "v1beta1", "Plan", "PlanList") + m, l, err = apisresolver.GetManagedResource("backup.aws.upbound.io", "v1beta2", "Plan", "PlanList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -139,7 +139,7 @@ func (mg *Selection) ResolveReferences(ctx context.Context, c client.Reader) err mg.Spec.InitProvider.IAMRoleArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.IAMRoleArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("backup.aws.upbound.io", "v1beta1", "Plan", "PlanList") + m, l, err = apisresolver.GetManagedResource("backup.aws.upbound.io", "v1beta2", "Plan", "PlanList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/backup/v1beta1/zz_selection_types.go b/apis/backup/v1beta1/zz_selection_types.go index a57ce1b692..ae0a1d9789 100755 --- a/apis/backup/v1beta1/zz_selection_types.go +++ b/apis/backup/v1beta1/zz_selection_types.go @@ -74,7 +74,7 @@ type SelectionInitParameters struct { NotResources []*string `json:"notResources,omitempty" tf:"not_resources,omitempty"` // The backup plan ID to be associated with the selection of resources. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/backup/v1beta1.Plan + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/backup/v1beta2.Plan PlanID *string `json:"planId,omitempty" tf:"plan_id,omitempty"` // Reference to a Plan in backup to populate planId. @@ -152,7 +152,7 @@ type SelectionParameters struct { NotResources []*string `json:"notResources,omitempty" tf:"not_resources,omitempty"` // The backup plan ID to be associated with the selection of resources. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/backup/v1beta1.Plan + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/backup/v1beta2.Plan // +kubebuilder:validation:Optional PlanID *string `json:"planId,omitempty" tf:"plan_id,omitempty"` diff --git a/apis/backup/v1beta2/zz_framework_terraformed.go b/apis/backup/v1beta2/zz_framework_terraformed.go new file mode 100755 index 0000000000..1449088f0d --- /dev/null +++ b/apis/backup/v1beta2/zz_framework_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Framework +func (mg *Framework) GetTerraformResourceType() string { + return "aws_backup_framework" +} + +// GetConnectionDetailsMapping for this Framework +func (tr *Framework) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Framework +func (tr *Framework) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Framework +func (tr *Framework) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Framework +func (tr *Framework) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Framework +func (tr *Framework) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Framework +func (tr *Framework) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Framework +func (tr *Framework) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Framework +func (tr *Framework) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Framework using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Framework) LateInitialize(attrs []byte) (bool, error) { + params := &FrameworkParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Framework) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/backup/v1beta2/zz_framework_types.go b/apis/backup/v1beta2/zz_framework_types.go new file mode 100755 index 0000000000..0f0fde6ee9 --- /dev/null +++ b/apis/backup/v1beta2/zz_framework_types.go @@ -0,0 +1,268 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ControlInitParameters struct { + + // One or more input parameter blocks. An example of a control with two parameters is: "backup plan frequency is at least daily and the retention period is at least 1 year". The first parameter is daily. The second parameter is 1 year. Detailed below. + InputParameter []InputParameterInitParameters `json:"inputParameter,omitempty" tf:"input_parameter,omitempty"` + + // The unique name of the framework. The name must be between 1 and 256 characters, starting with a letter, and consisting of letters, numbers, and underscores. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The scope of a control. The control scope defines what the control will evaluate. Three examples of control scopes are: a specific backup plan, all backup plans with a specific tag, or all backup plans. Detailed below. + Scope *ScopeInitParameters `json:"scope,omitempty" tf:"scope,omitempty"` +} + +type ControlObservation struct { + + // One or more input parameter blocks. An example of a control with two parameters is: "backup plan frequency is at least daily and the retention period is at least 1 year". The first parameter is daily. The second parameter is 1 year. Detailed below. + InputParameter []InputParameterObservation `json:"inputParameter,omitempty" tf:"input_parameter,omitempty"` + + // The unique name of the framework. The name must be between 1 and 256 characters, starting with a letter, and consisting of letters, numbers, and underscores. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The scope of a control. The control scope defines what the control will evaluate. Three examples of control scopes are: a specific backup plan, all backup plans with a specific tag, or all backup plans. Detailed below. + Scope *ScopeObservation `json:"scope,omitempty" tf:"scope,omitempty"` +} + +type ControlParameters struct { + + // One or more input parameter blocks. An example of a control with two parameters is: "backup plan frequency is at least daily and the retention period is at least 1 year". The first parameter is daily. The second parameter is 1 year. Detailed below. + // +kubebuilder:validation:Optional + InputParameter []InputParameterParameters `json:"inputParameter,omitempty" tf:"input_parameter,omitempty"` + + // The unique name of the framework. The name must be between 1 and 256 characters, starting with a letter, and consisting of letters, numbers, and underscores. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The scope of a control. The control scope defines what the control will evaluate. Three examples of control scopes are: a specific backup plan, all backup plans with a specific tag, or all backup plans. Detailed below. + // +kubebuilder:validation:Optional + Scope *ScopeParameters `json:"scope,omitempty" tf:"scope,omitempty"` +} + +type FrameworkInitParameters struct { + + // One or more control blocks that make up the framework. Each control in the list has a name, input parameters, and scope. Detailed below. + Control []ControlInitParameters `json:"control,omitempty" tf:"control,omitempty"` + + // The description of the framework with a maximum of 1,024 characters + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The unique name of the framework. The name must be between 1 and 256 characters, starting with a letter, and consisting of letters, numbers, and underscores. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type FrameworkObservation struct { + + // The ARN of the backup framework. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // One or more control blocks that make up the framework. Each control in the list has a name, input parameters, and scope. Detailed below. + Control []ControlObservation `json:"control,omitempty" tf:"control,omitempty"` + + // The date and time that a framework is created, in Unix format and Coordinated Universal Time (UTC). + CreationTime *string `json:"creationTime,omitempty" tf:"creation_time,omitempty"` + + // The deployment status of a framework. The statuses are: CREATE_IN_PROGRESS | UPDATE_IN_PROGRESS | DELETE_IN_PROGRESS | COMPLETED | FAILED. + DeploymentStatus *string `json:"deploymentStatus,omitempty" tf:"deployment_status,omitempty"` + + // The description of the framework with a maximum of 1,024 characters + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The id of the backup framework. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The unique name of the framework. The name must be between 1 and 256 characters, starting with a letter, and consisting of letters, numbers, and underscores. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A framework consists of one or more controls. Each control governs a resource, such as backup plans, backup selections, backup vaults, or recovery points. You can also turn AWS Config recording on or off for each resource. For more information refer to the AWS documentation for Framework Status + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type FrameworkParameters struct { + + // One or more control blocks that make up the framework. Each control in the list has a name, input parameters, and scope. Detailed below. + // +kubebuilder:validation:Optional + Control []ControlParameters `json:"control,omitempty" tf:"control,omitempty"` + + // The description of the framework with a maximum of 1,024 characters + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The unique name of the framework. The name must be between 1 and 256 characters, starting with a letter, and consisting of letters, numbers, and underscores. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type InputParameterInitParameters struct { + + // The unique name of the framework. The name must be between 1 and 256 characters, starting with a letter, and consisting of letters, numbers, and underscores. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of parameter, for example, hourly. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type InputParameterObservation struct { + + // The unique name of the framework. The name must be between 1 and 256 characters, starting with a letter, and consisting of letters, numbers, and underscores. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of parameter, for example, hourly. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type InputParameterParameters struct { + + // The unique name of the framework. The name must be between 1 and 256 characters, starting with a letter, and consisting of letters, numbers, and underscores. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of parameter, for example, hourly. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ScopeInitParameters struct { + + // The ID of the only AWS resource that you want your control scope to contain. Minimum number of 1 item. Maximum number of 100 items. + // +listType=set + ComplianceResourceIds []*string `json:"complianceResourceIds,omitempty" tf:"compliance_resource_ids,omitempty"` + + // Describes whether the control scope includes one or more types of resources, such as EFS or RDS. + // +listType=set + ComplianceResourceTypes []*string `json:"complianceResourceTypes,omitempty" tf:"compliance_resource_types,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ScopeObservation struct { + + // The ID of the only AWS resource that you want your control scope to contain. Minimum number of 1 item. Maximum number of 100 items. + // +listType=set + ComplianceResourceIds []*string `json:"complianceResourceIds,omitempty" tf:"compliance_resource_ids,omitempty"` + + // Describes whether the control scope includes one or more types of resources, such as EFS or RDS. + // +listType=set + ComplianceResourceTypes []*string `json:"complianceResourceTypes,omitempty" tf:"compliance_resource_types,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ScopeParameters struct { + + // The ID of the only AWS resource that you want your control scope to contain. Minimum number of 1 item. Maximum number of 100 items. + // +kubebuilder:validation:Optional + // +listType=set + ComplianceResourceIds []*string `json:"complianceResourceIds,omitempty" tf:"compliance_resource_ids,omitempty"` + + // Describes whether the control scope includes one or more types of resources, such as EFS or RDS. + // +kubebuilder:validation:Optional + // +listType=set + ComplianceResourceTypes []*string `json:"complianceResourceTypes,omitempty" tf:"compliance_resource_types,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// FrameworkSpec defines the desired state of Framework +type FrameworkSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FrameworkParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FrameworkInitParameters `json:"initProvider,omitempty"` +} + +// FrameworkStatus defines the observed state of Framework. +type FrameworkStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FrameworkObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Framework is the Schema for the Frameworks API. Provides an AWS Backup Framework resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Framework struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.control) || (has(self.initProvider) && has(self.initProvider.control))",message="spec.forProvider.control is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec FrameworkSpec `json:"spec"` + Status FrameworkStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FrameworkList contains a list of Frameworks +type FrameworkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Framework `json:"items"` +} + +// Repository type metadata. +var ( + Framework_Kind = "Framework" + Framework_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Framework_Kind}.String() + Framework_KindAPIVersion = Framework_Kind + "." + CRDGroupVersion.String() + Framework_GroupVersionKind = CRDGroupVersion.WithKind(Framework_Kind) +) + +func init() { + SchemeBuilder.Register(&Framework{}, &FrameworkList{}) +} diff --git a/apis/backup/v1beta2/zz_generated.conversion_hubs.go b/apis/backup/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..c7887371eb --- /dev/null +++ b/apis/backup/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Framework) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Plan) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ReportPlan) Hub() {} diff --git a/apis/backup/v1beta2/zz_generated.deepcopy.go b/apis/backup/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..71af3f1414 --- /dev/null +++ b/apis/backup/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2113 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedBackupSettingInitParameters) DeepCopyInto(out *AdvancedBackupSettingInitParameters) { + *out = *in + if in.BackupOptions != nil { + in, out := &in.BackupOptions, &out.BackupOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedBackupSettingInitParameters. +func (in *AdvancedBackupSettingInitParameters) DeepCopy() *AdvancedBackupSettingInitParameters { + if in == nil { + return nil + } + out := new(AdvancedBackupSettingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedBackupSettingObservation) DeepCopyInto(out *AdvancedBackupSettingObservation) { + *out = *in + if in.BackupOptions != nil { + in, out := &in.BackupOptions, &out.BackupOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedBackupSettingObservation. +func (in *AdvancedBackupSettingObservation) DeepCopy() *AdvancedBackupSettingObservation { + if in == nil { + return nil + } + out := new(AdvancedBackupSettingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedBackupSettingParameters) DeepCopyInto(out *AdvancedBackupSettingParameters) { + *out = *in + if in.BackupOptions != nil { + in, out := &in.BackupOptions, &out.BackupOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedBackupSettingParameters. +func (in *AdvancedBackupSettingParameters) DeepCopy() *AdvancedBackupSettingParameters { + if in == nil { + return nil + } + out := new(AdvancedBackupSettingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlInitParameters) DeepCopyInto(out *ControlInitParameters) { + *out = *in + if in.InputParameter != nil { + in, out := &in.InputParameter, &out.InputParameter + *out = make([]InputParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlInitParameters. +func (in *ControlInitParameters) DeepCopy() *ControlInitParameters { + if in == nil { + return nil + } + out := new(ControlInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlObservation) DeepCopyInto(out *ControlObservation) { + *out = *in + if in.InputParameter != nil { + in, out := &in.InputParameter, &out.InputParameter + *out = make([]InputParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlObservation. +func (in *ControlObservation) DeepCopy() *ControlObservation { + if in == nil { + return nil + } + out := new(ControlObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlParameters) DeepCopyInto(out *ControlParameters) { + *out = *in + if in.InputParameter != nil { + in, out := &in.InputParameter, &out.InputParameter + *out = make([]InputParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlParameters. +func (in *ControlParameters) DeepCopy() *ControlParameters { + if in == nil { + return nil + } + out := new(ControlParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyActionInitParameters) DeepCopyInto(out *CopyActionInitParameters) { + *out = *in + if in.DestinationVaultArn != nil { + in, out := &in.DestinationVaultArn, &out.DestinationVaultArn + *out = new(string) + **out = **in + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(LifecycleInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyActionInitParameters. +func (in *CopyActionInitParameters) DeepCopy() *CopyActionInitParameters { + if in == nil { + return nil + } + out := new(CopyActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyActionObservation) DeepCopyInto(out *CopyActionObservation) { + *out = *in + if in.DestinationVaultArn != nil { + in, out := &in.DestinationVaultArn, &out.DestinationVaultArn + *out = new(string) + **out = **in + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(LifecycleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyActionObservation. +func (in *CopyActionObservation) DeepCopy() *CopyActionObservation { + if in == nil { + return nil + } + out := new(CopyActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyActionParameters) DeepCopyInto(out *CopyActionParameters) { + *out = *in + if in.DestinationVaultArn != nil { + in, out := &in.DestinationVaultArn, &out.DestinationVaultArn + *out = new(string) + **out = **in + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(LifecycleParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyActionParameters. +func (in *CopyActionParameters) DeepCopy() *CopyActionParameters { + if in == nil { + return nil + } + out := new(CopyActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Framework) DeepCopyInto(out *Framework) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Framework. +func (in *Framework) DeepCopy() *Framework { + if in == nil { + return nil + } + out := new(Framework) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Framework) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameworkInitParameters) DeepCopyInto(out *FrameworkInitParameters) { + *out = *in + if in.Control != nil { + in, out := &in.Control, &out.Control + *out = make([]ControlInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameworkInitParameters. +func (in *FrameworkInitParameters) DeepCopy() *FrameworkInitParameters { + if in == nil { + return nil + } + out := new(FrameworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameworkList) DeepCopyInto(out *FrameworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Framework, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameworkList. +func (in *FrameworkList) DeepCopy() *FrameworkList { + if in == nil { + return nil + } + out := new(FrameworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrameworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameworkObservation) DeepCopyInto(out *FrameworkObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Control != nil { + in, out := &in.Control, &out.Control + *out = make([]ControlObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreationTime != nil { + in, out := &in.CreationTime, &out.CreationTime + *out = new(string) + **out = **in + } + if in.DeploymentStatus != nil { + in, out := &in.DeploymentStatus, &out.DeploymentStatus + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameworkObservation. +func (in *FrameworkObservation) DeepCopy() *FrameworkObservation { + if in == nil { + return nil + } + out := new(FrameworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameworkParameters) DeepCopyInto(out *FrameworkParameters) { + *out = *in + if in.Control != nil { + in, out := &in.Control, &out.Control + *out = make([]ControlParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameworkParameters. +func (in *FrameworkParameters) DeepCopy() *FrameworkParameters { + if in == nil { + return nil + } + out := new(FrameworkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameworkSpec) DeepCopyInto(out *FrameworkSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameworkSpec. +func (in *FrameworkSpec) DeepCopy() *FrameworkSpec { + if in == nil { + return nil + } + out := new(FrameworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameworkStatus) DeepCopyInto(out *FrameworkStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameworkStatus. +func (in *FrameworkStatus) DeepCopy() *FrameworkStatus { + if in == nil { + return nil + } + out := new(FrameworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputParameterInitParameters) DeepCopyInto(out *InputParameterInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputParameterInitParameters. +func (in *InputParameterInitParameters) DeepCopy() *InputParameterInitParameters { + if in == nil { + return nil + } + out := new(InputParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputParameterObservation) DeepCopyInto(out *InputParameterObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputParameterObservation. +func (in *InputParameterObservation) DeepCopy() *InputParameterObservation { + if in == nil { + return nil + } + out := new(InputParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputParameterParameters) DeepCopyInto(out *InputParameterParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputParameterParameters. +func (in *InputParameterParameters) DeepCopy() *InputParameterParameters { + if in == nil { + return nil + } + out := new(InputParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleInitParameters) DeepCopyInto(out *LifecycleInitParameters) { + *out = *in + if in.ColdStorageAfter != nil { + in, out := &in.ColdStorageAfter, &out.ColdStorageAfter + *out = new(float64) + **out = **in + } + if in.DeleteAfter != nil { + in, out := &in.DeleteAfter, &out.DeleteAfter + *out = new(float64) + **out = **in + } + if in.OptInToArchiveForSupportedResources != nil { + in, out := &in.OptInToArchiveForSupportedResources, &out.OptInToArchiveForSupportedResources + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleInitParameters. +func (in *LifecycleInitParameters) DeepCopy() *LifecycleInitParameters { + if in == nil { + return nil + } + out := new(LifecycleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleObservation) DeepCopyInto(out *LifecycleObservation) { + *out = *in + if in.ColdStorageAfter != nil { + in, out := &in.ColdStorageAfter, &out.ColdStorageAfter + *out = new(float64) + **out = **in + } + if in.DeleteAfter != nil { + in, out := &in.DeleteAfter, &out.DeleteAfter + *out = new(float64) + **out = **in + } + if in.OptInToArchiveForSupportedResources != nil { + in, out := &in.OptInToArchiveForSupportedResources, &out.OptInToArchiveForSupportedResources + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleObservation. +func (in *LifecycleObservation) DeepCopy() *LifecycleObservation { + if in == nil { + return nil + } + out := new(LifecycleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleParameters) DeepCopyInto(out *LifecycleParameters) { + *out = *in + if in.ColdStorageAfter != nil { + in, out := &in.ColdStorageAfter, &out.ColdStorageAfter + *out = new(float64) + **out = **in + } + if in.DeleteAfter != nil { + in, out := &in.DeleteAfter, &out.DeleteAfter + *out = new(float64) + **out = **in + } + if in.OptInToArchiveForSupportedResources != nil { + in, out := &in.OptInToArchiveForSupportedResources, &out.OptInToArchiveForSupportedResources + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleParameters. +func (in *LifecycleParameters) DeepCopy() *LifecycleParameters { + if in == nil { + return nil + } + out := new(LifecycleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Plan) DeepCopyInto(out *Plan) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plan. +func (in *Plan) DeepCopy() *Plan { + if in == nil { + return nil + } + out := new(Plan) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Plan) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanInitParameters) DeepCopyInto(out *PlanInitParameters) { + *out = *in + if in.AdvancedBackupSetting != nil { + in, out := &in.AdvancedBackupSetting, &out.AdvancedBackupSetting + *out = make([]AdvancedBackupSettingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanInitParameters. +func (in *PlanInitParameters) DeepCopy() *PlanInitParameters { + if in == nil { + return nil + } + out := new(PlanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanList) DeepCopyInto(out *PlanList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Plan, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanList. +func (in *PlanList) DeepCopy() *PlanList { + if in == nil { + return nil + } + out := new(PlanList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlanList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanObservation) DeepCopyInto(out *PlanObservation) { + *out = *in + if in.AdvancedBackupSetting != nil { + in, out := &in.AdvancedBackupSetting, &out.AdvancedBackupSetting + *out = make([]AdvancedBackupSettingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanObservation. +func (in *PlanObservation) DeepCopy() *PlanObservation { + if in == nil { + return nil + } + out := new(PlanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanParameters) DeepCopyInto(out *PlanParameters) { + *out = *in + if in.AdvancedBackupSetting != nil { + in, out := &in.AdvancedBackupSetting, &out.AdvancedBackupSetting + *out = make([]AdvancedBackupSettingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanParameters. +func (in *PlanParameters) DeepCopy() *PlanParameters { + if in == nil { + return nil + } + out := new(PlanParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanSpec) DeepCopyInto(out *PlanSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanSpec. +func (in *PlanSpec) DeepCopy() *PlanSpec { + if in == nil { + return nil + } + out := new(PlanSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanStatus) DeepCopyInto(out *PlanStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanStatus. +func (in *PlanStatus) DeepCopy() *PlanStatus { + if in == nil { + return nil + } + out := new(PlanStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReportDeliveryChannelInitParameters) DeepCopyInto(out *ReportDeliveryChannelInitParameters) { + *out = *in + if in.Formats != nil { + in, out := &in.Formats, &out.Formats + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportDeliveryChannelInitParameters. +func (in *ReportDeliveryChannelInitParameters) DeepCopy() *ReportDeliveryChannelInitParameters { + if in == nil { + return nil + } + out := new(ReportDeliveryChannelInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReportDeliveryChannelObservation) DeepCopyInto(out *ReportDeliveryChannelObservation) { + *out = *in + if in.Formats != nil { + in, out := &in.Formats, &out.Formats + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportDeliveryChannelObservation. +func (in *ReportDeliveryChannelObservation) DeepCopy() *ReportDeliveryChannelObservation { + if in == nil { + return nil + } + out := new(ReportDeliveryChannelObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReportDeliveryChannelParameters) DeepCopyInto(out *ReportDeliveryChannelParameters) { + *out = *in + if in.Formats != nil { + in, out := &in.Formats, &out.Formats + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportDeliveryChannelParameters. +func (in *ReportDeliveryChannelParameters) DeepCopy() *ReportDeliveryChannelParameters { + if in == nil { + return nil + } + out := new(ReportDeliveryChannelParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReportPlan) DeepCopyInto(out *ReportPlan) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportPlan. +func (in *ReportPlan) DeepCopy() *ReportPlan { + if in == nil { + return nil + } + out := new(ReportPlan) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReportPlan) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReportPlanInitParameters) DeepCopyInto(out *ReportPlanInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ReportDeliveryChannel != nil { + in, out := &in.ReportDeliveryChannel, &out.ReportDeliveryChannel + *out = new(ReportDeliveryChannelInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ReportSetting != nil { + in, out := &in.ReportSetting, &out.ReportSetting + *out = new(ReportSettingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportPlanInitParameters. +func (in *ReportPlanInitParameters) DeepCopy() *ReportPlanInitParameters { + if in == nil { + return nil + } + out := new(ReportPlanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReportPlanList) DeepCopyInto(out *ReportPlanList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReportPlan, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportPlanList. +func (in *ReportPlanList) DeepCopy() *ReportPlanList { + if in == nil { + return nil + } + out := new(ReportPlanList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReportPlanList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReportPlanObservation) DeepCopyInto(out *ReportPlanObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CreationTime != nil { + in, out := &in.CreationTime, &out.CreationTime + *out = new(string) + **out = **in + } + if in.DeploymentStatus != nil { + in, out := &in.DeploymentStatus, &out.DeploymentStatus + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ReportDeliveryChannel != nil { + in, out := &in.ReportDeliveryChannel, &out.ReportDeliveryChannel + *out = new(ReportDeliveryChannelObservation) + (*in).DeepCopyInto(*out) + } + if in.ReportSetting != nil { + in, out := &in.ReportSetting, &out.ReportSetting + *out = new(ReportSettingObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportPlanObservation. +func (in *ReportPlanObservation) DeepCopy() *ReportPlanObservation { + if in == nil { + return nil + } + out := new(ReportPlanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReportPlanParameters) DeepCopyInto(out *ReportPlanParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ReportDeliveryChannel != nil { + in, out := &in.ReportDeliveryChannel, &out.ReportDeliveryChannel + *out = new(ReportDeliveryChannelParameters) + (*in).DeepCopyInto(*out) + } + if in.ReportSetting != nil { + in, out := &in.ReportSetting, &out.ReportSetting + *out = new(ReportSettingParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportPlanParameters. +func (in *ReportPlanParameters) DeepCopy() *ReportPlanParameters { + if in == nil { + return nil + } + out := new(ReportPlanParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReportPlanSpec) DeepCopyInto(out *ReportPlanSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportPlanSpec. +func (in *ReportPlanSpec) DeepCopy() *ReportPlanSpec { + if in == nil { + return nil + } + out := new(ReportPlanSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReportPlanStatus) DeepCopyInto(out *ReportPlanStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportPlanStatus. +func (in *ReportPlanStatus) DeepCopy() *ReportPlanStatus { + if in == nil { + return nil + } + out := new(ReportPlanStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReportSettingInitParameters) DeepCopyInto(out *ReportSettingInitParameters) { + *out = *in + if in.Accounts != nil { + in, out := &in.Accounts, &out.Accounts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FrameworkArns != nil { + in, out := &in.FrameworkArns, &out.FrameworkArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NumberOfFrameworks != nil { + in, out := &in.NumberOfFrameworks, &out.NumberOfFrameworks + *out = new(float64) + **out = **in + } + if in.OrganizationUnits != nil { + in, out := &in.OrganizationUnits, &out.OrganizationUnits + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ReportTemplate != nil { + in, out := &in.ReportTemplate, &out.ReportTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportSettingInitParameters. +func (in *ReportSettingInitParameters) DeepCopy() *ReportSettingInitParameters { + if in == nil { + return nil + } + out := new(ReportSettingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReportSettingObservation) DeepCopyInto(out *ReportSettingObservation) { + *out = *in + if in.Accounts != nil { + in, out := &in.Accounts, &out.Accounts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FrameworkArns != nil { + in, out := &in.FrameworkArns, &out.FrameworkArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NumberOfFrameworks != nil { + in, out := &in.NumberOfFrameworks, &out.NumberOfFrameworks + *out = new(float64) + **out = **in + } + if in.OrganizationUnits != nil { + in, out := &in.OrganizationUnits, &out.OrganizationUnits + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ReportTemplate != nil { + in, out := &in.ReportTemplate, &out.ReportTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportSettingObservation. +func (in *ReportSettingObservation) DeepCopy() *ReportSettingObservation { + if in == nil { + return nil + } + out := new(ReportSettingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReportSettingParameters) DeepCopyInto(out *ReportSettingParameters) { + *out = *in + if in.Accounts != nil { + in, out := &in.Accounts, &out.Accounts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FrameworkArns != nil { + in, out := &in.FrameworkArns, &out.FrameworkArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NumberOfFrameworks != nil { + in, out := &in.NumberOfFrameworks, &out.NumberOfFrameworks + *out = new(float64) + **out = **in + } + if in.OrganizationUnits != nil { + in, out := &in.OrganizationUnits, &out.OrganizationUnits + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ReportTemplate != nil { + in, out := &in.ReportTemplate, &out.ReportTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportSettingParameters. +func (in *ReportSettingParameters) DeepCopy() *ReportSettingParameters { + if in == nil { + return nil + } + out := new(ReportSettingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleInitParameters) DeepCopyInto(out *RuleInitParameters) { + *out = *in + if in.CompletionWindow != nil { + in, out := &in.CompletionWindow, &out.CompletionWindow + *out = new(float64) + **out = **in + } + if in.CopyAction != nil { + in, out := &in.CopyAction, &out.CopyAction + *out = make([]CopyActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableContinuousBackup != nil { + in, out := &in.EnableContinuousBackup, &out.EnableContinuousBackup + *out = new(bool) + **out = **in + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(RuleLifecycleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RecoveryPointTags != nil { + in, out := &in.RecoveryPointTags, &out.RecoveryPointTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RuleName != nil { + in, out := &in.RuleName, &out.RuleName + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.StartWindow != nil { + in, out := &in.StartWindow, &out.StartWindow + *out = new(float64) + **out = **in + } + if in.TargetVaultName != nil { + in, out := &in.TargetVaultName, &out.TargetVaultName + *out = new(string) + **out = **in + } + if in.TargetVaultNameRef != nil { + in, out := &in.TargetVaultNameRef, &out.TargetVaultNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetVaultNameSelector != nil { + in, out := &in.TargetVaultNameSelector, &out.TargetVaultNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleInitParameters. +func (in *RuleInitParameters) DeepCopy() *RuleInitParameters { + if in == nil { + return nil + } + out := new(RuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleLifecycleInitParameters) DeepCopyInto(out *RuleLifecycleInitParameters) { + *out = *in + if in.ColdStorageAfter != nil { + in, out := &in.ColdStorageAfter, &out.ColdStorageAfter + *out = new(float64) + **out = **in + } + if in.DeleteAfter != nil { + in, out := &in.DeleteAfter, &out.DeleteAfter + *out = new(float64) + **out = **in + } + if in.OptInToArchiveForSupportedResources != nil { + in, out := &in.OptInToArchiveForSupportedResources, &out.OptInToArchiveForSupportedResources + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleLifecycleInitParameters. +func (in *RuleLifecycleInitParameters) DeepCopy() *RuleLifecycleInitParameters { + if in == nil { + return nil + } + out := new(RuleLifecycleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleLifecycleObservation) DeepCopyInto(out *RuleLifecycleObservation) { + *out = *in + if in.ColdStorageAfter != nil { + in, out := &in.ColdStorageAfter, &out.ColdStorageAfter + *out = new(float64) + **out = **in + } + if in.DeleteAfter != nil { + in, out := &in.DeleteAfter, &out.DeleteAfter + *out = new(float64) + **out = **in + } + if in.OptInToArchiveForSupportedResources != nil { + in, out := &in.OptInToArchiveForSupportedResources, &out.OptInToArchiveForSupportedResources + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleLifecycleObservation. +func (in *RuleLifecycleObservation) DeepCopy() *RuleLifecycleObservation { + if in == nil { + return nil + } + out := new(RuleLifecycleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleLifecycleParameters) DeepCopyInto(out *RuleLifecycleParameters) { + *out = *in + if in.ColdStorageAfter != nil { + in, out := &in.ColdStorageAfter, &out.ColdStorageAfter + *out = new(float64) + **out = **in + } + if in.DeleteAfter != nil { + in, out := &in.DeleteAfter, &out.DeleteAfter + *out = new(float64) + **out = **in + } + if in.OptInToArchiveForSupportedResources != nil { + in, out := &in.OptInToArchiveForSupportedResources, &out.OptInToArchiveForSupportedResources + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleLifecycleParameters. +func (in *RuleLifecycleParameters) DeepCopy() *RuleLifecycleParameters { + if in == nil { + return nil + } + out := new(RuleLifecycleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleObservation) DeepCopyInto(out *RuleObservation) { + *out = *in + if in.CompletionWindow != nil { + in, out := &in.CompletionWindow, &out.CompletionWindow + *out = new(float64) + **out = **in + } + if in.CopyAction != nil { + in, out := &in.CopyAction, &out.CopyAction + *out = make([]CopyActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableContinuousBackup != nil { + in, out := &in.EnableContinuousBackup, &out.EnableContinuousBackup + *out = new(bool) + **out = **in + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(RuleLifecycleObservation) + (*in).DeepCopyInto(*out) + } + if in.RecoveryPointTags != nil { + in, out := &in.RecoveryPointTags, &out.RecoveryPointTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RuleName != nil { + in, out := &in.RuleName, &out.RuleName + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.StartWindow != nil { + in, out := &in.StartWindow, &out.StartWindow + *out = new(float64) + **out = **in + } + if in.TargetVaultName != nil { + in, out := &in.TargetVaultName, &out.TargetVaultName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleObservation. +func (in *RuleObservation) DeepCopy() *RuleObservation { + if in == nil { + return nil + } + out := new(RuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleParameters) DeepCopyInto(out *RuleParameters) { + *out = *in + if in.CompletionWindow != nil { + in, out := &in.CompletionWindow, &out.CompletionWindow + *out = new(float64) + **out = **in + } + if in.CopyAction != nil { + in, out := &in.CopyAction, &out.CopyAction + *out = make([]CopyActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableContinuousBackup != nil { + in, out := &in.EnableContinuousBackup, &out.EnableContinuousBackup + *out = new(bool) + **out = **in + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(RuleLifecycleParameters) + (*in).DeepCopyInto(*out) + } + if in.RecoveryPointTags != nil { + in, out := &in.RecoveryPointTags, &out.RecoveryPointTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RuleName != nil { + in, out := &in.RuleName, &out.RuleName + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.StartWindow != nil { + in, out := &in.StartWindow, &out.StartWindow + *out = new(float64) + **out = **in + } + if in.TargetVaultName != nil { + in, out := &in.TargetVaultName, &out.TargetVaultName + *out = new(string) + **out = **in + } + if in.TargetVaultNameRef != nil { + in, out := &in.TargetVaultNameRef, &out.TargetVaultNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetVaultNameSelector != nil { + in, out := &in.TargetVaultNameSelector, &out.TargetVaultNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleParameters. +func (in *RuleParameters) DeepCopy() *RuleParameters { + if in == nil { + return nil + } + out := new(RuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeInitParameters) DeepCopyInto(out *ScopeInitParameters) { + *out = *in + if in.ComplianceResourceIds != nil { + in, out := &in.ComplianceResourceIds, &out.ComplianceResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ComplianceResourceTypes != nil { + in, out := &in.ComplianceResourceTypes, &out.ComplianceResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeInitParameters. +func (in *ScopeInitParameters) DeepCopy() *ScopeInitParameters { + if in == nil { + return nil + } + out := new(ScopeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeObservation) DeepCopyInto(out *ScopeObservation) { + *out = *in + if in.ComplianceResourceIds != nil { + in, out := &in.ComplianceResourceIds, &out.ComplianceResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ComplianceResourceTypes != nil { + in, out := &in.ComplianceResourceTypes, &out.ComplianceResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeObservation. +func (in *ScopeObservation) DeepCopy() *ScopeObservation { + if in == nil { + return nil + } + out := new(ScopeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeParameters) DeepCopyInto(out *ScopeParameters) { + *out = *in + if in.ComplianceResourceIds != nil { + in, out := &in.ComplianceResourceIds, &out.ComplianceResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ComplianceResourceTypes != nil { + in, out := &in.ComplianceResourceTypes, &out.ComplianceResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeParameters. +func (in *ScopeParameters) DeepCopy() *ScopeParameters { + if in == nil { + return nil + } + out := new(ScopeParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/backup/v1beta2/zz_generated.managed.go b/apis/backup/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..c0d288309c --- /dev/null +++ b/apis/backup/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Framework. +func (mg *Framework) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Framework. +func (mg *Framework) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Framework. +func (mg *Framework) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Framework. +func (mg *Framework) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Framework. +func (mg *Framework) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Framework. +func (mg *Framework) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Framework. +func (mg *Framework) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Framework. +func (mg *Framework) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Framework. +func (mg *Framework) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Framework. +func (mg *Framework) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Framework. +func (mg *Framework) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Framework. +func (mg *Framework) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Plan. +func (mg *Plan) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Plan. +func (mg *Plan) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Plan. +func (mg *Plan) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Plan. +func (mg *Plan) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Plan. +func (mg *Plan) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Plan. +func (mg *Plan) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Plan. +func (mg *Plan) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Plan. +func (mg *Plan) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Plan. +func (mg *Plan) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Plan. +func (mg *Plan) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Plan. +func (mg *Plan) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Plan. +func (mg *Plan) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ReportPlan. +func (mg *ReportPlan) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ReportPlan. +func (mg *ReportPlan) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ReportPlan. +func (mg *ReportPlan) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ReportPlan. +func (mg *ReportPlan) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ReportPlan. +func (mg *ReportPlan) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ReportPlan. +func (mg *ReportPlan) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ReportPlan. +func (mg *ReportPlan) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ReportPlan. +func (mg *ReportPlan) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ReportPlan. +func (mg *ReportPlan) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ReportPlan. +func (mg *ReportPlan) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ReportPlan. +func (mg *ReportPlan) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ReportPlan. +func (mg *ReportPlan) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/backup/v1beta2/zz_generated.managedlist.go b/apis/backup/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..7348dd758a --- /dev/null +++ b/apis/backup/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this FrameworkList. +func (l *FrameworkList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this PlanList. +func (l *PlanList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ReportPlanList. +func (l *ReportPlanList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/backup/v1beta2/zz_generated.resolvers.go b/apis/backup/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..2c9cd3050e --- /dev/null +++ b/apis/backup/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,73 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Plan. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Plan) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Rule); i3++ { + { + m, l, err = apisresolver.GetManagedResource("backup.aws.upbound.io", "v1beta1", "Vault", "VaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Rule[i3].TargetVaultName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Rule[i3].TargetVaultNameRef, + Selector: mg.Spec.ForProvider.Rule[i3].TargetVaultNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Rule[i3].TargetVaultName") + } + mg.Spec.ForProvider.Rule[i3].TargetVaultName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Rule[i3].TargetVaultNameRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Rule); i3++ { + { + m, l, err = apisresolver.GetManagedResource("backup.aws.upbound.io", "v1beta1", "Vault", "VaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Rule[i3].TargetVaultName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Rule[i3].TargetVaultNameRef, + Selector: mg.Spec.InitProvider.Rule[i3].TargetVaultNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Rule[i3].TargetVaultName") + } + mg.Spec.InitProvider.Rule[i3].TargetVaultName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Rule[i3].TargetVaultNameRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/backup/v1beta2/zz_groupversion_info.go b/apis/backup/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..ecdf628c3f --- /dev/null +++ b/apis/backup/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=backup.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "backup.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/backup/v1beta2/zz_plan_terraformed.go b/apis/backup/v1beta2/zz_plan_terraformed.go new file mode 100755 index 0000000000..061168a6aa --- /dev/null +++ b/apis/backup/v1beta2/zz_plan_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Plan +func (mg *Plan) GetTerraformResourceType() string { + return "aws_backup_plan" +} + +// GetConnectionDetailsMapping for this Plan +func (tr *Plan) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Plan +func (tr *Plan) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Plan +func (tr *Plan) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Plan +func (tr *Plan) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Plan +func (tr *Plan) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Plan +func (tr *Plan) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Plan +func (tr *Plan) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Plan +func (tr *Plan) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Plan using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Plan) LateInitialize(attrs []byte) (bool, error) { + params := &PlanParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Plan) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/backup/v1beta2/zz_plan_types.go b/apis/backup/v1beta2/zz_plan_types.go new file mode 100755 index 0000000000..5ff2cd583f --- /dev/null +++ b/apis/backup/v1beta2/zz_plan_types.go @@ -0,0 +1,405 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AdvancedBackupSettingInitParameters struct { + + // Specifies the backup option for a selected resource. This option is only available for Windows VSS backup jobs. Set to { WindowsVSS = "enabled" } to enable Windows VSS backup option and create a VSS Windows backup. + // +mapType=granular + BackupOptions map[string]*string `json:"backupOptions,omitempty" tf:"backup_options,omitempty"` + + // The type of AWS resource to be backed up. For VSS Windows backups, the only supported resource type is Amazon EC2. Valid values: EC2. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type AdvancedBackupSettingObservation struct { + + // Specifies the backup option for a selected resource. This option is only available for Windows VSS backup jobs. Set to { WindowsVSS = "enabled" } to enable Windows VSS backup option and create a VSS Windows backup. + // +mapType=granular + BackupOptions map[string]*string `json:"backupOptions,omitempty" tf:"backup_options,omitempty"` + + // The type of AWS resource to be backed up. For VSS Windows backups, the only supported resource type is Amazon EC2. Valid values: EC2. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type AdvancedBackupSettingParameters struct { + + // Specifies the backup option for a selected resource. This option is only available for Windows VSS backup jobs. Set to { WindowsVSS = "enabled" } to enable Windows VSS backup option and create a VSS Windows backup. + // +kubebuilder:validation:Optional + // +mapType=granular + BackupOptions map[string]*string `json:"backupOptions" tf:"backup_options,omitempty"` + + // The type of AWS resource to be backed up. For VSS Windows backups, the only supported resource type is Amazon EC2. Valid values: EC2. + // +kubebuilder:validation:Optional + ResourceType *string `json:"resourceType" tf:"resource_type,omitempty"` +} + +type CopyActionInitParameters struct { + + // An Amazon Resource Name (ARN) that uniquely identifies the destination backup vault for the copied backup. + DestinationVaultArn *string `json:"destinationVaultArn,omitempty" tf:"destination_vault_arn,omitempty"` + + // The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Fields documented below. + Lifecycle *LifecycleInitParameters `json:"lifecycle,omitempty" tf:"lifecycle,omitempty"` +} + +type CopyActionObservation struct { + + // An Amazon Resource Name (ARN) that uniquely identifies the destination backup vault for the copied backup. + DestinationVaultArn *string `json:"destinationVaultArn,omitempty" tf:"destination_vault_arn,omitempty"` + + // The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Fields documented below. + Lifecycle *LifecycleObservation `json:"lifecycle,omitempty" tf:"lifecycle,omitempty"` +} + +type CopyActionParameters struct { + + // An Amazon Resource Name (ARN) that uniquely identifies the destination backup vault for the copied backup. + // +kubebuilder:validation:Optional + DestinationVaultArn *string `json:"destinationVaultArn" tf:"destination_vault_arn,omitempty"` + + // The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Fields documented below. + // +kubebuilder:validation:Optional + Lifecycle *LifecycleParameters `json:"lifecycle,omitempty" tf:"lifecycle,omitempty"` +} + +type LifecycleInitParameters struct { + + // Specifies the number of days after creation that a recovery point is moved to cold storage. + ColdStorageAfter *float64 `json:"coldStorageAfter,omitempty" tf:"cold_storage_after,omitempty"` + + // Specifies the number of days after creation that a recovery point is deleted. Must be 90 days greater than cold_storage_after. + DeleteAfter *float64 `json:"deleteAfter,omitempty" tf:"delete_after,omitempty"` + + // This setting will instruct your backup plan to transition supported resources to archive (cold) storage tier in accordance with your lifecycle settings. + OptInToArchiveForSupportedResources *bool `json:"optInToArchiveForSupportedResources,omitempty" tf:"opt_in_to_archive_for_supported_resources,omitempty"` +} + +type LifecycleObservation struct { + + // Specifies the number of days after creation that a recovery point is moved to cold storage. + ColdStorageAfter *float64 `json:"coldStorageAfter,omitempty" tf:"cold_storage_after,omitempty"` + + // Specifies the number of days after creation that a recovery point is deleted. Must be 90 days greater than cold_storage_after. + DeleteAfter *float64 `json:"deleteAfter,omitempty" tf:"delete_after,omitempty"` + + // This setting will instruct your backup plan to transition supported resources to archive (cold) storage tier in accordance with your lifecycle settings. + OptInToArchiveForSupportedResources *bool `json:"optInToArchiveForSupportedResources,omitempty" tf:"opt_in_to_archive_for_supported_resources,omitempty"` +} + +type LifecycleParameters struct { + + // Specifies the number of days after creation that a recovery point is moved to cold storage. + // +kubebuilder:validation:Optional + ColdStorageAfter *float64 `json:"coldStorageAfter,omitempty" tf:"cold_storage_after,omitempty"` + + // Specifies the number of days after creation that a recovery point is deleted. Must be 90 days greater than cold_storage_after. + // +kubebuilder:validation:Optional + DeleteAfter *float64 `json:"deleteAfter,omitempty" tf:"delete_after,omitempty"` + + // This setting will instruct your backup plan to transition supported resources to archive (cold) storage tier in accordance with your lifecycle settings. + // +kubebuilder:validation:Optional + OptInToArchiveForSupportedResources *bool `json:"optInToArchiveForSupportedResources,omitempty" tf:"opt_in_to_archive_for_supported_resources,omitempty"` +} + +type PlanInitParameters struct { + + // An object that specifies backup options for each resource type. + AdvancedBackupSetting []AdvancedBackupSettingInitParameters `json:"advancedBackupSetting,omitempty" tf:"advanced_backup_setting,omitempty"` + + // The display name of a backup plan. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A rule object that specifies a scheduled task that is used to back up a selection of resources. + Rule []RuleInitParameters `json:"rule,omitempty" tf:"rule,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type PlanObservation struct { + + // An object that specifies backup options for each resource type. + AdvancedBackupSetting []AdvancedBackupSettingObservation `json:"advancedBackupSetting,omitempty" tf:"advanced_backup_setting,omitempty"` + + // The ARN of the backup plan. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The id of the backup plan. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The display name of a backup plan. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A rule object that specifies a scheduled task that is used to back up a selection of resources. + Rule []RuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Unique, randomly generated, Unicode, UTF-8 encoded string that serves as the version ID of the backup plan. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type PlanParameters struct { + + // An object that specifies backup options for each resource type. + // +kubebuilder:validation:Optional + AdvancedBackupSetting []AdvancedBackupSettingParameters `json:"advancedBackupSetting,omitempty" tf:"advanced_backup_setting,omitempty"` + + // The display name of a backup plan. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // A rule object that specifies a scheduled task that is used to back up a selection of resources. + // +kubebuilder:validation:Optional + Rule []RuleParameters `json:"rule,omitempty" tf:"rule,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type RuleInitParameters struct { + + // The amount of time in minutes AWS Backup attempts a backup before canceling the job and returning an error. + CompletionWindow *float64 `json:"completionWindow,omitempty" tf:"completion_window,omitempty"` + + // Configuration block(s) with copy operation settings. Detailed below. + CopyAction []CopyActionInitParameters `json:"copyAction,omitempty" tf:"copy_action,omitempty"` + + // Enable continuous backups for supported resources. + EnableContinuousBackup *bool `json:"enableContinuousBackup,omitempty" tf:"enable_continuous_backup,omitempty"` + + // The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Fields documented below. + Lifecycle *RuleLifecycleInitParameters `json:"lifecycle,omitempty" tf:"lifecycle,omitempty"` + + // Metadata that you can assign to help organize the resources that you create. + // +mapType=granular + RecoveryPointTags map[string]*string `json:"recoveryPointTags,omitempty" tf:"recovery_point_tags,omitempty"` + + // An display name for a backup rule. + RuleName *string `json:"ruleName,omitempty" tf:"rule_name,omitempty"` + + // A CRON expression specifying when AWS Backup initiates a backup job. + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The amount of time in minutes before beginning a backup. + StartWindow *float64 `json:"startWindow,omitempty" tf:"start_window,omitempty"` + + // The name of a logical container where backups are stored. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/backup/v1beta1.Vault + TargetVaultName *string `json:"targetVaultName,omitempty" tf:"target_vault_name,omitempty"` + + // Reference to a Vault in backup to populate targetVaultName. + // +kubebuilder:validation:Optional + TargetVaultNameRef *v1.Reference `json:"targetVaultNameRef,omitempty" tf:"-"` + + // Selector for a Vault in backup to populate targetVaultName. + // +kubebuilder:validation:Optional + TargetVaultNameSelector *v1.Selector `json:"targetVaultNameSelector,omitempty" tf:"-"` +} + +type RuleLifecycleInitParameters struct { + + // Specifies the number of days after creation that a recovery point is moved to cold storage. + ColdStorageAfter *float64 `json:"coldStorageAfter,omitempty" tf:"cold_storage_after,omitempty"` + + // Specifies the number of days after creation that a recovery point is deleted. Must be 90 days greater than cold_storage_after. + DeleteAfter *float64 `json:"deleteAfter,omitempty" tf:"delete_after,omitempty"` + + // This setting will instruct your backup plan to transition supported resources to archive (cold) storage tier in accordance with your lifecycle settings. + OptInToArchiveForSupportedResources *bool `json:"optInToArchiveForSupportedResources,omitempty" tf:"opt_in_to_archive_for_supported_resources,omitempty"` +} + +type RuleLifecycleObservation struct { + + // Specifies the number of days after creation that a recovery point is moved to cold storage. + ColdStorageAfter *float64 `json:"coldStorageAfter,omitempty" tf:"cold_storage_after,omitempty"` + + // Specifies the number of days after creation that a recovery point is deleted. Must be 90 days greater than cold_storage_after. + DeleteAfter *float64 `json:"deleteAfter,omitempty" tf:"delete_after,omitempty"` + + // This setting will instruct your backup plan to transition supported resources to archive (cold) storage tier in accordance with your lifecycle settings. + OptInToArchiveForSupportedResources *bool `json:"optInToArchiveForSupportedResources,omitempty" tf:"opt_in_to_archive_for_supported_resources,omitempty"` +} + +type RuleLifecycleParameters struct { + + // Specifies the number of days after creation that a recovery point is moved to cold storage. + // +kubebuilder:validation:Optional + ColdStorageAfter *float64 `json:"coldStorageAfter,omitempty" tf:"cold_storage_after,omitempty"` + + // Specifies the number of days after creation that a recovery point is deleted. Must be 90 days greater than cold_storage_after. + // +kubebuilder:validation:Optional + DeleteAfter *float64 `json:"deleteAfter,omitempty" tf:"delete_after,omitempty"` + + // This setting will instruct your backup plan to transition supported resources to archive (cold) storage tier in accordance with your lifecycle settings. + // +kubebuilder:validation:Optional + OptInToArchiveForSupportedResources *bool `json:"optInToArchiveForSupportedResources,omitempty" tf:"opt_in_to_archive_for_supported_resources,omitempty"` +} + +type RuleObservation struct { + + // The amount of time in minutes AWS Backup attempts a backup before canceling the job and returning an error. + CompletionWindow *float64 `json:"completionWindow,omitempty" tf:"completion_window,omitempty"` + + // Configuration block(s) with copy operation settings. Detailed below. + CopyAction []CopyActionObservation `json:"copyAction,omitempty" tf:"copy_action,omitempty"` + + // Enable continuous backups for supported resources. + EnableContinuousBackup *bool `json:"enableContinuousBackup,omitempty" tf:"enable_continuous_backup,omitempty"` + + // The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Fields documented below. + Lifecycle *RuleLifecycleObservation `json:"lifecycle,omitempty" tf:"lifecycle,omitempty"` + + // Metadata that you can assign to help organize the resources that you create. + // +mapType=granular + RecoveryPointTags map[string]*string `json:"recoveryPointTags,omitempty" tf:"recovery_point_tags,omitempty"` + + // An display name for a backup rule. + RuleName *string `json:"ruleName,omitempty" tf:"rule_name,omitempty"` + + // A CRON expression specifying when AWS Backup initiates a backup job. + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The amount of time in minutes before beginning a backup. + StartWindow *float64 `json:"startWindow,omitempty" tf:"start_window,omitempty"` + + // The name of a logical container where backups are stored. + TargetVaultName *string `json:"targetVaultName,omitempty" tf:"target_vault_name,omitempty"` +} + +type RuleParameters struct { + + // The amount of time in minutes AWS Backup attempts a backup before canceling the job and returning an error. + // +kubebuilder:validation:Optional + CompletionWindow *float64 `json:"completionWindow,omitempty" tf:"completion_window,omitempty"` + + // Configuration block(s) with copy operation settings. Detailed below. + // +kubebuilder:validation:Optional + CopyAction []CopyActionParameters `json:"copyAction,omitempty" tf:"copy_action,omitempty"` + + // Enable continuous backups for supported resources. + // +kubebuilder:validation:Optional + EnableContinuousBackup *bool `json:"enableContinuousBackup,omitempty" tf:"enable_continuous_backup,omitempty"` + + // The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Fields documented below. + // +kubebuilder:validation:Optional + Lifecycle *RuleLifecycleParameters `json:"lifecycle,omitempty" tf:"lifecycle,omitempty"` + + // Metadata that you can assign to help organize the resources that you create. + // +kubebuilder:validation:Optional + // +mapType=granular + RecoveryPointTags map[string]*string `json:"recoveryPointTags,omitempty" tf:"recovery_point_tags,omitempty"` + + // An display name for a backup rule. + // +kubebuilder:validation:Optional + RuleName *string `json:"ruleName" tf:"rule_name,omitempty"` + + // A CRON expression specifying when AWS Backup initiates a backup job. + // +kubebuilder:validation:Optional + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The amount of time in minutes before beginning a backup. + // +kubebuilder:validation:Optional + StartWindow *float64 `json:"startWindow,omitempty" tf:"start_window,omitempty"` + + // The name of a logical container where backups are stored. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/backup/v1beta1.Vault + // +kubebuilder:validation:Optional + TargetVaultName *string `json:"targetVaultName,omitempty" tf:"target_vault_name,omitempty"` + + // Reference to a Vault in backup to populate targetVaultName. + // +kubebuilder:validation:Optional + TargetVaultNameRef *v1.Reference `json:"targetVaultNameRef,omitempty" tf:"-"` + + // Selector for a Vault in backup to populate targetVaultName. + // +kubebuilder:validation:Optional + TargetVaultNameSelector *v1.Selector `json:"targetVaultNameSelector,omitempty" tf:"-"` +} + +// PlanSpec defines the desired state of Plan +type PlanSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PlanParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PlanInitParameters `json:"initProvider,omitempty"` +} + +// PlanStatus defines the observed state of Plan. +type PlanStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PlanObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Plan is the Schema for the Plans API. Provides an AWS Backup plan resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Plan struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.rule) || (has(self.initProvider) && has(self.initProvider.rule))",message="spec.forProvider.rule is a required parameter" + Spec PlanSpec `json:"spec"` + Status PlanStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PlanList contains a list of Plans +type PlanList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Plan `json:"items"` +} + +// Repository type metadata. +var ( + Plan_Kind = "Plan" + Plan_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Plan_Kind}.String() + Plan_KindAPIVersion = Plan_Kind + "." + CRDGroupVersion.String() + Plan_GroupVersionKind = CRDGroupVersion.WithKind(Plan_Kind) +) + +func init() { + SchemeBuilder.Register(&Plan{}, &PlanList{}) +} diff --git a/apis/backup/v1beta2/zz_reportplan_terraformed.go b/apis/backup/v1beta2/zz_reportplan_terraformed.go new file mode 100755 index 0000000000..24e7931bf0 --- /dev/null +++ b/apis/backup/v1beta2/zz_reportplan_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ReportPlan +func (mg *ReportPlan) GetTerraformResourceType() string { + return "aws_backup_report_plan" +} + +// GetConnectionDetailsMapping for this ReportPlan +func (tr *ReportPlan) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ReportPlan +func (tr *ReportPlan) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ReportPlan +func (tr *ReportPlan) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ReportPlan +func (tr *ReportPlan) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ReportPlan +func (tr *ReportPlan) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ReportPlan +func (tr *ReportPlan) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ReportPlan +func (tr *ReportPlan) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ReportPlan +func (tr *ReportPlan) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ReportPlan using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ReportPlan) LateInitialize(attrs []byte) (bool, error) { + params := &ReportPlanParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ReportPlan) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/backup/v1beta2/zz_reportplan_types.go b/apis/backup/v1beta2/zz_reportplan_types.go new file mode 100755 index 0000000000..67c840b43a --- /dev/null +++ b/apis/backup/v1beta2/zz_reportplan_types.go @@ -0,0 +1,283 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ReportDeliveryChannelInitParameters struct { + + // A list of the format of your reports: CSV, JSON, or both. If not specified, the default format is CSV. + // +listType=set + Formats []*string `json:"formats,omitempty" tf:"formats,omitempty"` + + // The unique name of the S3 bucket that receives your reports. + S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` + + // The prefix for where Backup Audit Manager delivers your reports to Amazon S3. The prefix is this part of the following path: s3://your-bucket-name/prefix/Backup/us-west-2/year/month/day/report-name. If not specified, there is no prefix. + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` +} + +type ReportDeliveryChannelObservation struct { + + // A list of the format of your reports: CSV, JSON, or both. If not specified, the default format is CSV. + // +listType=set + Formats []*string `json:"formats,omitempty" tf:"formats,omitempty"` + + // The unique name of the S3 bucket that receives your reports. + S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` + + // The prefix for where Backup Audit Manager delivers your reports to Amazon S3. The prefix is this part of the following path: s3://your-bucket-name/prefix/Backup/us-west-2/year/month/day/report-name. If not specified, there is no prefix. + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` +} + +type ReportDeliveryChannelParameters struct { + + // A list of the format of your reports: CSV, JSON, or both. If not specified, the default format is CSV. + // +kubebuilder:validation:Optional + // +listType=set + Formats []*string `json:"formats,omitempty" tf:"formats,omitempty"` + + // The unique name of the S3 bucket that receives your reports. + // +kubebuilder:validation:Optional + S3BucketName *string `json:"s3BucketName" tf:"s3_bucket_name,omitempty"` + + // The prefix for where Backup Audit Manager delivers your reports to Amazon S3. The prefix is this part of the following path: s3://your-bucket-name/prefix/Backup/us-west-2/year/month/day/report-name. If not specified, there is no prefix. + // +kubebuilder:validation:Optional + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` +} + +type ReportPlanInitParameters struct { + + // The description of the report plan with a maximum of 1,024 characters + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The unique name of the report plan. The name must be between 1 and 256 characters, starting with a letter, and consisting of letters, numbers, and underscores. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // An object that contains information about where and how to deliver your reports, specifically your Amazon S3 bucket name, S3 key prefix, and the formats of your reports. Detailed below. + ReportDeliveryChannel *ReportDeliveryChannelInitParameters `json:"reportDeliveryChannel,omitempty" tf:"report_delivery_channel,omitempty"` + + // An object that identifies the report template for the report. Reports are built using a report template. Detailed below. + ReportSetting *ReportSettingInitParameters `json:"reportSetting,omitempty" tf:"report_setting,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ReportPlanObservation struct { + + // The ARN of the backup report plan. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The date and time that a report plan is created, in Unix format and Coordinated Universal Time (UTC). + CreationTime *string `json:"creationTime,omitempty" tf:"creation_time,omitempty"` + + // The deployment status of a report plan. The statuses are: CREATE_IN_PROGRESS | UPDATE_IN_PROGRESS | DELETE_IN_PROGRESS | COMPLETED. + DeploymentStatus *string `json:"deploymentStatus,omitempty" tf:"deployment_status,omitempty"` + + // The description of the report plan with a maximum of 1,024 characters + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The id of the backup report plan. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The unique name of the report plan. The name must be between 1 and 256 characters, starting with a letter, and consisting of letters, numbers, and underscores. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // An object that contains information about where and how to deliver your reports, specifically your Amazon S3 bucket name, S3 key prefix, and the formats of your reports. Detailed below. + ReportDeliveryChannel *ReportDeliveryChannelObservation `json:"reportDeliveryChannel,omitempty" tf:"report_delivery_channel,omitempty"` + + // An object that identifies the report template for the report. Reports are built using a report template. Detailed below. + ReportSetting *ReportSettingObservation `json:"reportSetting,omitempty" tf:"report_setting,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type ReportPlanParameters struct { + + // The description of the report plan with a maximum of 1,024 characters + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The unique name of the report plan. The name must be between 1 and 256 characters, starting with a letter, and consisting of letters, numbers, and underscores. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // An object that contains information about where and how to deliver your reports, specifically your Amazon S3 bucket name, S3 key prefix, and the formats of your reports. Detailed below. + // +kubebuilder:validation:Optional + ReportDeliveryChannel *ReportDeliveryChannelParameters `json:"reportDeliveryChannel,omitempty" tf:"report_delivery_channel,omitempty"` + + // An object that identifies the report template for the report. Reports are built using a report template. Detailed below. + // +kubebuilder:validation:Optional + ReportSetting *ReportSettingParameters `json:"reportSetting,omitempty" tf:"report_setting,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ReportSettingInitParameters struct { + + // Specifies the list of accounts a report covers. + // +listType=set + Accounts []*string `json:"accounts,omitempty" tf:"accounts,omitempty"` + + // Specifies the Amazon Resource Names (ARNs) of the frameworks a report covers. + // +listType=set + FrameworkArns []*string `json:"frameworkArns,omitempty" tf:"framework_arns,omitempty"` + + // Specifies the number of frameworks a report covers. + NumberOfFrameworks *float64 `json:"numberOfFrameworks,omitempty" tf:"number_of_frameworks,omitempty"` + + // Specifies the list of Organizational Units a report covers. + // +listType=set + OrganizationUnits []*string `json:"organizationUnits,omitempty" tf:"organization_units,omitempty"` + + // Specifies the list of regions a report covers. + // +listType=set + Regions []*string `json:"regions,omitempty" tf:"regions,omitempty"` + + // Identifies the report template for the report. Reports are built using a report template. The report templates are: RESOURCE_COMPLIANCE_REPORT | CONTROL_COMPLIANCE_REPORT | BACKUP_JOB_REPORT | COPY_JOB_REPORT | RESTORE_JOB_REPORT. + ReportTemplate *string `json:"reportTemplate,omitempty" tf:"report_template,omitempty"` +} + +type ReportSettingObservation struct { + + // Specifies the list of accounts a report covers. + // +listType=set + Accounts []*string `json:"accounts,omitempty" tf:"accounts,omitempty"` + + // Specifies the Amazon Resource Names (ARNs) of the frameworks a report covers. + // +listType=set + FrameworkArns []*string `json:"frameworkArns,omitempty" tf:"framework_arns,omitempty"` + + // Specifies the number of frameworks a report covers. + NumberOfFrameworks *float64 `json:"numberOfFrameworks,omitempty" tf:"number_of_frameworks,omitempty"` + + // Specifies the list of Organizational Units a report covers. + // +listType=set + OrganizationUnits []*string `json:"organizationUnits,omitempty" tf:"organization_units,omitempty"` + + // Specifies the list of regions a report covers. + // +listType=set + Regions []*string `json:"regions,omitempty" tf:"regions,omitempty"` + + // Identifies the report template for the report. Reports are built using a report template. The report templates are: RESOURCE_COMPLIANCE_REPORT | CONTROL_COMPLIANCE_REPORT | BACKUP_JOB_REPORT | COPY_JOB_REPORT | RESTORE_JOB_REPORT. + ReportTemplate *string `json:"reportTemplate,omitempty" tf:"report_template,omitempty"` +} + +type ReportSettingParameters struct { + + // Specifies the list of accounts a report covers. + // +kubebuilder:validation:Optional + // +listType=set + Accounts []*string `json:"accounts,omitempty" tf:"accounts,omitempty"` + + // Specifies the Amazon Resource Names (ARNs) of the frameworks a report covers. + // +kubebuilder:validation:Optional + // +listType=set + FrameworkArns []*string `json:"frameworkArns,omitempty" tf:"framework_arns,omitempty"` + + // Specifies the number of frameworks a report covers. + // +kubebuilder:validation:Optional + NumberOfFrameworks *float64 `json:"numberOfFrameworks,omitempty" tf:"number_of_frameworks,omitempty"` + + // Specifies the list of Organizational Units a report covers. + // +kubebuilder:validation:Optional + // +listType=set + OrganizationUnits []*string `json:"organizationUnits,omitempty" tf:"organization_units,omitempty"` + + // Specifies the list of regions a report covers. + // +kubebuilder:validation:Optional + // +listType=set + Regions []*string `json:"regions,omitempty" tf:"regions,omitempty"` + + // Identifies the report template for the report. Reports are built using a report template. The report templates are: RESOURCE_COMPLIANCE_REPORT | CONTROL_COMPLIANCE_REPORT | BACKUP_JOB_REPORT | COPY_JOB_REPORT | RESTORE_JOB_REPORT. + // +kubebuilder:validation:Optional + ReportTemplate *string `json:"reportTemplate" tf:"report_template,omitempty"` +} + +// ReportPlanSpec defines the desired state of ReportPlan +type ReportPlanSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ReportPlanParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ReportPlanInitParameters `json:"initProvider,omitempty"` +} + +// ReportPlanStatus defines the observed state of ReportPlan. +type ReportPlanStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ReportPlanObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ReportPlan is the Schema for the ReportPlans API. Provides an AWS Backup Report Plan resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ReportPlan struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.reportDeliveryChannel) || (has(self.initProvider) && has(self.initProvider.reportDeliveryChannel))",message="spec.forProvider.reportDeliveryChannel is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.reportSetting) || (has(self.initProvider) && has(self.initProvider.reportSetting))",message="spec.forProvider.reportSetting is a required parameter" + Spec ReportPlanSpec `json:"spec"` + Status ReportPlanStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ReportPlanList contains a list of ReportPlans +type ReportPlanList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ReportPlan `json:"items"` +} + +// Repository type metadata. +var ( + ReportPlan_Kind = "ReportPlan" + ReportPlan_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ReportPlan_Kind}.String() + ReportPlan_KindAPIVersion = ReportPlan_Kind + "." + CRDGroupVersion.String() + ReportPlan_GroupVersionKind = CRDGroupVersion.WithKind(ReportPlan_Kind) +) + +func init() { + SchemeBuilder.Register(&ReportPlan{}, &ReportPlanList{}) +} diff --git a/apis/batch/v1beta1/zz_generated.conversion_spokes.go b/apis/batch/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..db138f96e2 --- /dev/null +++ b/apis/batch/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this JobDefinition to the hub type. +func (tr *JobDefinition) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the JobDefinition type. +func (tr *JobDefinition) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SchedulingPolicy to the hub type. +func (tr *SchedulingPolicy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SchedulingPolicy type. +func (tr *SchedulingPolicy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/batch/v1beta2/zz_generated.conversion_hubs.go b/apis/batch/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..9b7a38f0cd --- /dev/null +++ b/apis/batch/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *JobDefinition) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SchedulingPolicy) Hub() {} diff --git a/apis/batch/v1beta2/zz_generated.deepcopy.go b/apis/batch/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..05db7367db --- /dev/null +++ b/apis/batch/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2385 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainersInitParameters) DeepCopyInto(out *ContainersInitParameters) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImagePullPolicy != nil { + in, out := &in.ImagePullPolicy, &out.ImagePullPolicy + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourcesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContextInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMountsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainersInitParameters. +func (in *ContainersInitParameters) DeepCopy() *ContainersInitParameters { + if in == nil { + return nil + } + out := new(ContainersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainersObservation) DeepCopyInto(out *ContainersObservation) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImagePullPolicy != nil { + in, out := &in.ImagePullPolicy, &out.ImagePullPolicy + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourcesObservation) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContextObservation) + (*in).DeepCopyInto(*out) + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMountsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainersObservation. +func (in *ContainersObservation) DeepCopy() *ContainersObservation { + if in == nil { + return nil + } + out := new(ContainersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainersParameters) DeepCopyInto(out *ContainersParameters) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImagePullPolicy != nil { + in, out := &in.ImagePullPolicy, &out.ImagePullPolicy + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourcesParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContextParameters) + (*in).DeepCopyInto(*out) + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMountsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainersParameters. +func (in *ContainersParameters) DeepCopy() *ContainersParameters { + if in == nil { + return nil + } + out := new(ContainersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EksPropertiesInitParameters) DeepCopyInto(out *EksPropertiesInitParameters) { + *out = *in + if in.PodProperties != nil { + in, out := &in.PodProperties, &out.PodProperties + *out = new(PodPropertiesInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EksPropertiesInitParameters. +func (in *EksPropertiesInitParameters) DeepCopy() *EksPropertiesInitParameters { + if in == nil { + return nil + } + out := new(EksPropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EksPropertiesObservation) DeepCopyInto(out *EksPropertiesObservation) { + *out = *in + if in.PodProperties != nil { + in, out := &in.PodProperties, &out.PodProperties + *out = new(PodPropertiesObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EksPropertiesObservation. +func (in *EksPropertiesObservation) DeepCopy() *EksPropertiesObservation { + if in == nil { + return nil + } + out := new(EksPropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EksPropertiesParameters) DeepCopyInto(out *EksPropertiesParameters) { + *out = *in + if in.PodProperties != nil { + in, out := &in.PodProperties, &out.PodProperties + *out = new(PodPropertiesParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EksPropertiesParameters. +func (in *EksPropertiesParameters) DeepCopy() *EksPropertiesParameters { + if in == nil { + return nil + } + out := new(EksPropertiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmptyDirInitParameters) DeepCopyInto(out *EmptyDirInitParameters) { + *out = *in + if in.Medium != nil { + in, out := &in.Medium, &out.Medium + *out = new(string) + **out = **in + } + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirInitParameters. +func (in *EmptyDirInitParameters) DeepCopy() *EmptyDirInitParameters { + if in == nil { + return nil + } + out := new(EmptyDirInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmptyDirObservation) DeepCopyInto(out *EmptyDirObservation) { + *out = *in + if in.Medium != nil { + in, out := &in.Medium, &out.Medium + *out = new(string) + **out = **in + } + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirObservation. +func (in *EmptyDirObservation) DeepCopy() *EmptyDirObservation { + if in == nil { + return nil + } + out := new(EmptyDirObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmptyDirParameters) DeepCopyInto(out *EmptyDirParameters) { + *out = *in + if in.Medium != nil { + in, out := &in.Medium, &out.Medium + *out = new(string) + **out = **in + } + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirParameters. +func (in *EmptyDirParameters) DeepCopy() *EmptyDirParameters { + if in == nil { + return nil + } + out := new(EmptyDirParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvInitParameters) DeepCopyInto(out *EnvInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvInitParameters. +func (in *EnvInitParameters) DeepCopy() *EnvInitParameters { + if in == nil { + return nil + } + out := new(EnvInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvObservation) DeepCopyInto(out *EnvObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvObservation. +func (in *EnvObservation) DeepCopy() *EnvObservation { + if in == nil { + return nil + } + out := new(EnvObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvParameters) DeepCopyInto(out *EnvParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvParameters. +func (in *EnvParameters) DeepCopy() *EnvParameters { + if in == nil { + return nil + } + out := new(EnvParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EvaluateOnExitInitParameters) DeepCopyInto(out *EvaluateOnExitInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.OnExitCode != nil { + in, out := &in.OnExitCode, &out.OnExitCode + *out = new(string) + **out = **in + } + if in.OnReason != nil { + in, out := &in.OnReason, &out.OnReason + *out = new(string) + **out = **in + } + if in.OnStatusReason != nil { + in, out := &in.OnStatusReason, &out.OnStatusReason + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EvaluateOnExitInitParameters. +func (in *EvaluateOnExitInitParameters) DeepCopy() *EvaluateOnExitInitParameters { + if in == nil { + return nil + } + out := new(EvaluateOnExitInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EvaluateOnExitObservation) DeepCopyInto(out *EvaluateOnExitObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.OnExitCode != nil { + in, out := &in.OnExitCode, &out.OnExitCode + *out = new(string) + **out = **in + } + if in.OnReason != nil { + in, out := &in.OnReason, &out.OnReason + *out = new(string) + **out = **in + } + if in.OnStatusReason != nil { + in, out := &in.OnStatusReason, &out.OnStatusReason + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EvaluateOnExitObservation. +func (in *EvaluateOnExitObservation) DeepCopy() *EvaluateOnExitObservation { + if in == nil { + return nil + } + out := new(EvaluateOnExitObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EvaluateOnExitParameters) DeepCopyInto(out *EvaluateOnExitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.OnExitCode != nil { + in, out := &in.OnExitCode, &out.OnExitCode + *out = new(string) + **out = **in + } + if in.OnReason != nil { + in, out := &in.OnReason, &out.OnReason + *out = new(string) + **out = **in + } + if in.OnStatusReason != nil { + in, out := &in.OnStatusReason, &out.OnStatusReason + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EvaluateOnExitParameters. +func (in *EvaluateOnExitParameters) DeepCopy() *EvaluateOnExitParameters { + if in == nil { + return nil + } + out := new(EvaluateOnExitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FairSharePolicyInitParameters) DeepCopyInto(out *FairSharePolicyInitParameters) { + *out = *in + if in.ComputeReservation != nil { + in, out := &in.ComputeReservation, &out.ComputeReservation + *out = new(float64) + **out = **in + } + if in.ShareDecaySeconds != nil { + in, out := &in.ShareDecaySeconds, &out.ShareDecaySeconds + *out = new(float64) + **out = **in + } + if in.ShareDistribution != nil { + in, out := &in.ShareDistribution, &out.ShareDistribution + *out = make([]ShareDistributionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FairSharePolicyInitParameters. +func (in *FairSharePolicyInitParameters) DeepCopy() *FairSharePolicyInitParameters { + if in == nil { + return nil + } + out := new(FairSharePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FairSharePolicyObservation) DeepCopyInto(out *FairSharePolicyObservation) { + *out = *in + if in.ComputeReservation != nil { + in, out := &in.ComputeReservation, &out.ComputeReservation + *out = new(float64) + **out = **in + } + if in.ShareDecaySeconds != nil { + in, out := &in.ShareDecaySeconds, &out.ShareDecaySeconds + *out = new(float64) + **out = **in + } + if in.ShareDistribution != nil { + in, out := &in.ShareDistribution, &out.ShareDistribution + *out = make([]ShareDistributionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FairSharePolicyObservation. +func (in *FairSharePolicyObservation) DeepCopy() *FairSharePolicyObservation { + if in == nil { + return nil + } + out := new(FairSharePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FairSharePolicyParameters) DeepCopyInto(out *FairSharePolicyParameters) { + *out = *in + if in.ComputeReservation != nil { + in, out := &in.ComputeReservation, &out.ComputeReservation + *out = new(float64) + **out = **in + } + if in.ShareDecaySeconds != nil { + in, out := &in.ShareDecaySeconds, &out.ShareDecaySeconds + *out = new(float64) + **out = **in + } + if in.ShareDistribution != nil { + in, out := &in.ShareDistribution, &out.ShareDistribution + *out = make([]ShareDistributionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FairSharePolicyParameters. +func (in *FairSharePolicyParameters) DeepCopy() *FairSharePolicyParameters { + if in == nil { + return nil + } + out := new(FairSharePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPathInitParameters) DeepCopyInto(out *HostPathInitParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathInitParameters. +func (in *HostPathInitParameters) DeepCopy() *HostPathInitParameters { + if in == nil { + return nil + } + out := new(HostPathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPathObservation) DeepCopyInto(out *HostPathObservation) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathObservation. +func (in *HostPathObservation) DeepCopy() *HostPathObservation { + if in == nil { + return nil + } + out := new(HostPathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPathParameters) DeepCopyInto(out *HostPathParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathParameters. +func (in *HostPathParameters) DeepCopy() *HostPathParameters { + if in == nil { + return nil + } + out := new(HostPathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobDefinition) DeepCopyInto(out *JobDefinition) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobDefinition. +func (in *JobDefinition) DeepCopy() *JobDefinition { + if in == nil { + return nil + } + out := new(JobDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *JobDefinition) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobDefinitionInitParameters) DeepCopyInto(out *JobDefinitionInitParameters) { + *out = *in + if in.ContainerProperties != nil { + in, out := &in.ContainerProperties, &out.ContainerProperties + *out = new(string) + **out = **in + } + if in.DeregisterOnNewRevision != nil { + in, out := &in.DeregisterOnNewRevision, &out.DeregisterOnNewRevision + *out = new(bool) + **out = **in + } + if in.EksProperties != nil { + in, out := &in.EksProperties, &out.EksProperties + *out = new(EksPropertiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NodeProperties != nil { + in, out := &in.NodeProperties, &out.NodeProperties + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PlatformCapabilities != nil { + in, out := &in.PlatformCapabilities, &out.PlatformCapabilities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PropagateTags != nil { + in, out := &in.PropagateTags, &out.PropagateTags + *out = new(bool) + **out = **in + } + if in.RetryStrategy != nil { + in, out := &in.RetryStrategy, &out.RetryStrategy + *out = new(RetryStrategyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SchedulingPriority != nil { + in, out := &in.SchedulingPriority, &out.SchedulingPriority + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(TimeoutInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobDefinitionInitParameters. +func (in *JobDefinitionInitParameters) DeepCopy() *JobDefinitionInitParameters { + if in == nil { + return nil + } + out := new(JobDefinitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobDefinitionList) DeepCopyInto(out *JobDefinitionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]JobDefinition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobDefinitionList. +func (in *JobDefinitionList) DeepCopy() *JobDefinitionList { + if in == nil { + return nil + } + out := new(JobDefinitionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *JobDefinitionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobDefinitionObservation) DeepCopyInto(out *JobDefinitionObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnPrefix != nil { + in, out := &in.ArnPrefix, &out.ArnPrefix + *out = new(string) + **out = **in + } + if in.ContainerProperties != nil { + in, out := &in.ContainerProperties, &out.ContainerProperties + *out = new(string) + **out = **in + } + if in.DeregisterOnNewRevision != nil { + in, out := &in.DeregisterOnNewRevision, &out.DeregisterOnNewRevision + *out = new(bool) + **out = **in + } + if in.EksProperties != nil { + in, out := &in.EksProperties, &out.EksProperties + *out = new(EksPropertiesObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NodeProperties != nil { + in, out := &in.NodeProperties, &out.NodeProperties + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PlatformCapabilities != nil { + in, out := &in.PlatformCapabilities, &out.PlatformCapabilities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PropagateTags != nil { + in, out := &in.PropagateTags, &out.PropagateTags + *out = new(bool) + **out = **in + } + if in.RetryStrategy != nil { + in, out := &in.RetryStrategy, &out.RetryStrategy + *out = new(RetryStrategyObservation) + (*in).DeepCopyInto(*out) + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(float64) + **out = **in + } + if in.SchedulingPriority != nil { + in, out := &in.SchedulingPriority, &out.SchedulingPriority + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(TimeoutObservation) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobDefinitionObservation. +func (in *JobDefinitionObservation) DeepCopy() *JobDefinitionObservation { + if in == nil { + return nil + } + out := new(JobDefinitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobDefinitionParameters) DeepCopyInto(out *JobDefinitionParameters) { + *out = *in + if in.ContainerProperties != nil { + in, out := &in.ContainerProperties, &out.ContainerProperties + *out = new(string) + **out = **in + } + if in.DeregisterOnNewRevision != nil { + in, out := &in.DeregisterOnNewRevision, &out.DeregisterOnNewRevision + *out = new(bool) + **out = **in + } + if in.EksProperties != nil { + in, out := &in.EksProperties, &out.EksProperties + *out = new(EksPropertiesParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NodeProperties != nil { + in, out := &in.NodeProperties, &out.NodeProperties + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PlatformCapabilities != nil { + in, out := &in.PlatformCapabilities, &out.PlatformCapabilities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PropagateTags != nil { + in, out := &in.PropagateTags, &out.PropagateTags + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RetryStrategy != nil { + in, out := &in.RetryStrategy, &out.RetryStrategy + *out = new(RetryStrategyParameters) + (*in).DeepCopyInto(*out) + } + if in.SchedulingPriority != nil { + in, out := &in.SchedulingPriority, &out.SchedulingPriority + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(TimeoutParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobDefinitionParameters. +func (in *JobDefinitionParameters) DeepCopy() *JobDefinitionParameters { + if in == nil { + return nil + } + out := new(JobDefinitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobDefinitionSpec) DeepCopyInto(out *JobDefinitionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobDefinitionSpec. +func (in *JobDefinitionSpec) DeepCopy() *JobDefinitionSpec { + if in == nil { + return nil + } + out := new(JobDefinitionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobDefinitionStatus) DeepCopyInto(out *JobDefinitionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobDefinitionStatus. +func (in *JobDefinitionStatus) DeepCopy() *JobDefinitionStatus { + if in == nil { + return nil + } + out := new(JobDefinitionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataInitParameters) DeepCopyInto(out *MetadataInitParameters) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataInitParameters. +func (in *MetadataInitParameters) DeepCopy() *MetadataInitParameters { + if in == nil { + return nil + } + out := new(MetadataInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataObservation) DeepCopyInto(out *MetadataObservation) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataObservation. +func (in *MetadataObservation) DeepCopy() *MetadataObservation { + if in == nil { + return nil + } + out := new(MetadataObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataParameters) DeepCopyInto(out *MetadataParameters) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataParameters. +func (in *MetadataParameters) DeepCopy() *MetadataParameters { + if in == nil { + return nil + } + out := new(MetadataParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodPropertiesInitParameters) DeepCopyInto(out *PodPropertiesInitParameters) { + *out = *in + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = new(ContainersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DNSPolicy != nil { + in, out := &in.DNSPolicy, &out.DNSPolicy + *out = new(string) + **out = **in + } + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(bool) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(MetadataInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountName != nil { + in, out := &in.ServiceAccountName, &out.ServiceAccountName + *out = new(string) + **out = **in + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]VolumesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPropertiesInitParameters. +func (in *PodPropertiesInitParameters) DeepCopy() *PodPropertiesInitParameters { + if in == nil { + return nil + } + out := new(PodPropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodPropertiesObservation) DeepCopyInto(out *PodPropertiesObservation) { + *out = *in + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = new(ContainersObservation) + (*in).DeepCopyInto(*out) + } + if in.DNSPolicy != nil { + in, out := &in.DNSPolicy, &out.DNSPolicy + *out = new(string) + **out = **in + } + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(bool) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(MetadataObservation) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountName != nil { + in, out := &in.ServiceAccountName, &out.ServiceAccountName + *out = new(string) + **out = **in + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]VolumesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPropertiesObservation. +func (in *PodPropertiesObservation) DeepCopy() *PodPropertiesObservation { + if in == nil { + return nil + } + out := new(PodPropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodPropertiesParameters) DeepCopyInto(out *PodPropertiesParameters) { + *out = *in + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = new(ContainersParameters) + (*in).DeepCopyInto(*out) + } + if in.DNSPolicy != nil { + in, out := &in.DNSPolicy, &out.DNSPolicy + *out = new(string) + **out = **in + } + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(bool) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(MetadataParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountName != nil { + in, out := &in.ServiceAccountName, &out.ServiceAccountName + *out = new(string) + **out = **in + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]VolumesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPropertiesParameters. +func (in *PodPropertiesParameters) DeepCopy() *PodPropertiesParameters { + if in == nil { + return nil + } + out := new(PodPropertiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesInitParameters) DeepCopyInto(out *ResourcesInitParameters) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesInitParameters. +func (in *ResourcesInitParameters) DeepCopy() *ResourcesInitParameters { + if in == nil { + return nil + } + out := new(ResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesObservation) DeepCopyInto(out *ResourcesObservation) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesObservation. +func (in *ResourcesObservation) DeepCopy() *ResourcesObservation { + if in == nil { + return nil + } + out := new(ResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesParameters) DeepCopyInto(out *ResourcesParameters) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesParameters. +func (in *ResourcesParameters) DeepCopy() *ResourcesParameters { + if in == nil { + return nil + } + out := new(ResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryStrategyInitParameters) DeepCopyInto(out *RetryStrategyInitParameters) { + *out = *in + if in.Attempts != nil { + in, out := &in.Attempts, &out.Attempts + *out = new(float64) + **out = **in + } + if in.EvaluateOnExit != nil { + in, out := &in.EvaluateOnExit, &out.EvaluateOnExit + *out = make([]EvaluateOnExitInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryStrategyInitParameters. +func (in *RetryStrategyInitParameters) DeepCopy() *RetryStrategyInitParameters { + if in == nil { + return nil + } + out := new(RetryStrategyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryStrategyObservation) DeepCopyInto(out *RetryStrategyObservation) { + *out = *in + if in.Attempts != nil { + in, out := &in.Attempts, &out.Attempts + *out = new(float64) + **out = **in + } + if in.EvaluateOnExit != nil { + in, out := &in.EvaluateOnExit, &out.EvaluateOnExit + *out = make([]EvaluateOnExitObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryStrategyObservation. +func (in *RetryStrategyObservation) DeepCopy() *RetryStrategyObservation { + if in == nil { + return nil + } + out := new(RetryStrategyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryStrategyParameters) DeepCopyInto(out *RetryStrategyParameters) { + *out = *in + if in.Attempts != nil { + in, out := &in.Attempts, &out.Attempts + *out = new(float64) + **out = **in + } + if in.EvaluateOnExit != nil { + in, out := &in.EvaluateOnExit, &out.EvaluateOnExit + *out = make([]EvaluateOnExitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryStrategyParameters. +func (in *RetryStrategyParameters) DeepCopy() *RetryStrategyParameters { + if in == nil { + return nil + } + out := new(RetryStrategyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingPolicy) DeepCopyInto(out *SchedulingPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingPolicy. +func (in *SchedulingPolicy) DeepCopy() *SchedulingPolicy { + if in == nil { + return nil + } + out := new(SchedulingPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SchedulingPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingPolicyInitParameters) DeepCopyInto(out *SchedulingPolicyInitParameters) { + *out = *in + if in.FairSharePolicy != nil { + in, out := &in.FairSharePolicy, &out.FairSharePolicy + *out = new(FairSharePolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingPolicyInitParameters. +func (in *SchedulingPolicyInitParameters) DeepCopy() *SchedulingPolicyInitParameters { + if in == nil { + return nil + } + out := new(SchedulingPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingPolicyList) DeepCopyInto(out *SchedulingPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SchedulingPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingPolicyList. +func (in *SchedulingPolicyList) DeepCopy() *SchedulingPolicyList { + if in == nil { + return nil + } + out := new(SchedulingPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SchedulingPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingPolicyObservation) DeepCopyInto(out *SchedulingPolicyObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.FairSharePolicy != nil { + in, out := &in.FairSharePolicy, &out.FairSharePolicy + *out = new(FairSharePolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingPolicyObservation. +func (in *SchedulingPolicyObservation) DeepCopy() *SchedulingPolicyObservation { + if in == nil { + return nil + } + out := new(SchedulingPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingPolicyParameters) DeepCopyInto(out *SchedulingPolicyParameters) { + *out = *in + if in.FairSharePolicy != nil { + in, out := &in.FairSharePolicy, &out.FairSharePolicy + *out = new(FairSharePolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingPolicyParameters. +func (in *SchedulingPolicyParameters) DeepCopy() *SchedulingPolicyParameters { + if in == nil { + return nil + } + out := new(SchedulingPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingPolicySpec) DeepCopyInto(out *SchedulingPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingPolicySpec. +func (in *SchedulingPolicySpec) DeepCopy() *SchedulingPolicySpec { + if in == nil { + return nil + } + out := new(SchedulingPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingPolicyStatus) DeepCopyInto(out *SchedulingPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingPolicyStatus. +func (in *SchedulingPolicyStatus) DeepCopy() *SchedulingPolicyStatus { + if in == nil { + return nil + } + out := new(SchedulingPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretInitParameters) DeepCopyInto(out *SecretInitParameters) { + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretInitParameters. +func (in *SecretInitParameters) DeepCopy() *SecretInitParameters { + if in == nil { + return nil + } + out := new(SecretInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretObservation) DeepCopyInto(out *SecretObservation) { + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretObservation. +func (in *SecretObservation) DeepCopy() *SecretObservation { + if in == nil { + return nil + } + out := new(SecretObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretParameters) DeepCopyInto(out *SecretParameters) { + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretParameters. +func (in *SecretParameters) DeepCopy() *SecretParameters { + if in == nil { + return nil + } + out := new(SecretParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityContextInitParameters) DeepCopyInto(out *SecurityContextInitParameters) { + *out = *in + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + *out = new(bool) + **out = **in + } + if in.ReadOnlyRootFileSystem != nil { + in, out := &in.ReadOnlyRootFileSystem, &out.ReadOnlyRootFileSystem + *out = new(bool) + **out = **in + } + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(float64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContextInitParameters. +func (in *SecurityContextInitParameters) DeepCopy() *SecurityContextInitParameters { + if in == nil { + return nil + } + out := new(SecurityContextInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityContextObservation) DeepCopyInto(out *SecurityContextObservation) { + *out = *in + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + *out = new(bool) + **out = **in + } + if in.ReadOnlyRootFileSystem != nil { + in, out := &in.ReadOnlyRootFileSystem, &out.ReadOnlyRootFileSystem + *out = new(bool) + **out = **in + } + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(float64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContextObservation. +func (in *SecurityContextObservation) DeepCopy() *SecurityContextObservation { + if in == nil { + return nil + } + out := new(SecurityContextObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityContextParameters) DeepCopyInto(out *SecurityContextParameters) { + *out = *in + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + *out = new(bool) + **out = **in + } + if in.ReadOnlyRootFileSystem != nil { + in, out := &in.ReadOnlyRootFileSystem, &out.ReadOnlyRootFileSystem + *out = new(bool) + **out = **in + } + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(float64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContextParameters. +func (in *SecurityContextParameters) DeepCopy() *SecurityContextParameters { + if in == nil { + return nil + } + out := new(SecurityContextParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShareDistributionInitParameters) DeepCopyInto(out *ShareDistributionInitParameters) { + *out = *in + if in.ShareIdentifier != nil { + in, out := &in.ShareIdentifier, &out.ShareIdentifier + *out = new(string) + **out = **in + } + if in.WeightFactor != nil { + in, out := &in.WeightFactor, &out.WeightFactor + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShareDistributionInitParameters. +func (in *ShareDistributionInitParameters) DeepCopy() *ShareDistributionInitParameters { + if in == nil { + return nil + } + out := new(ShareDistributionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShareDistributionObservation) DeepCopyInto(out *ShareDistributionObservation) { + *out = *in + if in.ShareIdentifier != nil { + in, out := &in.ShareIdentifier, &out.ShareIdentifier + *out = new(string) + **out = **in + } + if in.WeightFactor != nil { + in, out := &in.WeightFactor, &out.WeightFactor + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShareDistributionObservation. +func (in *ShareDistributionObservation) DeepCopy() *ShareDistributionObservation { + if in == nil { + return nil + } + out := new(ShareDistributionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShareDistributionParameters) DeepCopyInto(out *ShareDistributionParameters) { + *out = *in + if in.ShareIdentifier != nil { + in, out := &in.ShareIdentifier, &out.ShareIdentifier + *out = new(string) + **out = **in + } + if in.WeightFactor != nil { + in, out := &in.WeightFactor, &out.WeightFactor + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShareDistributionParameters. +func (in *ShareDistributionParameters) DeepCopy() *ShareDistributionParameters { + if in == nil { + return nil + } + out := new(ShareDistributionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutInitParameters) DeepCopyInto(out *TimeoutInitParameters) { + *out = *in + if in.AttemptDurationSeconds != nil { + in, out := &in.AttemptDurationSeconds, &out.AttemptDurationSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutInitParameters. +func (in *TimeoutInitParameters) DeepCopy() *TimeoutInitParameters { + if in == nil { + return nil + } + out := new(TimeoutInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutObservation) DeepCopyInto(out *TimeoutObservation) { + *out = *in + if in.AttemptDurationSeconds != nil { + in, out := &in.AttemptDurationSeconds, &out.AttemptDurationSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutObservation. +func (in *TimeoutObservation) DeepCopy() *TimeoutObservation { + if in == nil { + return nil + } + out := new(TimeoutObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutParameters) DeepCopyInto(out *TimeoutParameters) { + *out = *in + if in.AttemptDurationSeconds != nil { + in, out := &in.AttemptDurationSeconds, &out.AttemptDurationSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutParameters. +func (in *TimeoutParameters) DeepCopy() *TimeoutParameters { + if in == nil { + return nil + } + out := new(TimeoutParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMountsInitParameters) DeepCopyInto(out *VolumeMountsInitParameters) { + *out = *in + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMountsInitParameters. +func (in *VolumeMountsInitParameters) DeepCopy() *VolumeMountsInitParameters { + if in == nil { + return nil + } + out := new(VolumeMountsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMountsObservation) DeepCopyInto(out *VolumeMountsObservation) { + *out = *in + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMountsObservation. +func (in *VolumeMountsObservation) DeepCopy() *VolumeMountsObservation { + if in == nil { + return nil + } + out := new(VolumeMountsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMountsParameters) DeepCopyInto(out *VolumeMountsParameters) { + *out = *in + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMountsParameters. +func (in *VolumeMountsParameters) DeepCopy() *VolumeMountsParameters { + if in == nil { + return nil + } + out := new(VolumeMountsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumesInitParameters) DeepCopyInto(out *VolumesInitParameters) { + *out = *in + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumesInitParameters. +func (in *VolumesInitParameters) DeepCopy() *VolumesInitParameters { + if in == nil { + return nil + } + out := new(VolumesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumesObservation) DeepCopyInto(out *VolumesObservation) { + *out = *in + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirObservation) + (*in).DeepCopyInto(*out) + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumesObservation. +func (in *VolumesObservation) DeepCopy() *VolumesObservation { + if in == nil { + return nil + } + out := new(VolumesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumesParameters) DeepCopyInto(out *VolumesParameters) { + *out = *in + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirParameters) + (*in).DeepCopyInto(*out) + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumesParameters. +func (in *VolumesParameters) DeepCopy() *VolumesParameters { + if in == nil { + return nil + } + out := new(VolumesParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/batch/v1beta2/zz_generated.managed.go b/apis/batch/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..594d24ca39 --- /dev/null +++ b/apis/batch/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this JobDefinition. +func (mg *JobDefinition) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this JobDefinition. +func (mg *JobDefinition) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this JobDefinition. +func (mg *JobDefinition) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this JobDefinition. +func (mg *JobDefinition) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this JobDefinition. +func (mg *JobDefinition) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this JobDefinition. +func (mg *JobDefinition) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this JobDefinition. +func (mg *JobDefinition) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this JobDefinition. +func (mg *JobDefinition) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this JobDefinition. +func (mg *JobDefinition) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this JobDefinition. +func (mg *JobDefinition) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this JobDefinition. +func (mg *JobDefinition) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this JobDefinition. +func (mg *JobDefinition) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SchedulingPolicy. +func (mg *SchedulingPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SchedulingPolicy. +func (mg *SchedulingPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SchedulingPolicy. +func (mg *SchedulingPolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SchedulingPolicy. +func (mg *SchedulingPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SchedulingPolicy. +func (mg *SchedulingPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SchedulingPolicy. +func (mg *SchedulingPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SchedulingPolicy. +func (mg *SchedulingPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SchedulingPolicy. +func (mg *SchedulingPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SchedulingPolicy. +func (mg *SchedulingPolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SchedulingPolicy. +func (mg *SchedulingPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SchedulingPolicy. +func (mg *SchedulingPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SchedulingPolicy. +func (mg *SchedulingPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/batch/v1beta2/zz_generated.managedlist.go b/apis/batch/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..eefee17d80 --- /dev/null +++ b/apis/batch/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this JobDefinitionList. +func (l *JobDefinitionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SchedulingPolicyList. +func (l *SchedulingPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/batch/v1beta2/zz_groupversion_info.go b/apis/batch/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..33551218da --- /dev/null +++ b/apis/batch/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=batch.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "batch.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/batch/v1beta2/zz_jobdefinition_terraformed.go b/apis/batch/v1beta2/zz_jobdefinition_terraformed.go new file mode 100755 index 0000000000..2c337b9ecf --- /dev/null +++ b/apis/batch/v1beta2/zz_jobdefinition_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this JobDefinition +func (mg *JobDefinition) GetTerraformResourceType() string { + return "aws_batch_job_definition" +} + +// GetConnectionDetailsMapping for this JobDefinition +func (tr *JobDefinition) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this JobDefinition +func (tr *JobDefinition) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this JobDefinition +func (tr *JobDefinition) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this JobDefinition +func (tr *JobDefinition) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this JobDefinition +func (tr *JobDefinition) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this JobDefinition +func (tr *JobDefinition) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this JobDefinition +func (tr *JobDefinition) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this JobDefinition +func (tr *JobDefinition) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this JobDefinition using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *JobDefinition) LateInitialize(attrs []byte) (bool, error) { + params := &JobDefinitionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *JobDefinition) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/batch/v1beta2/zz_jobdefinition_types.go b/apis/batch/v1beta2/zz_jobdefinition_types.go new file mode 100755 index 0000000000..eebf33af1c --- /dev/null +++ b/apis/batch/v1beta2/zz_jobdefinition_types.go @@ -0,0 +1,801 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ContainersInitParameters struct { + + // An array of arguments to the entrypoint. If this isn't specified, the CMD of the container image is used. This corresponds to the args member in the Entrypoint portion of the Pod in Kubernetes. Environment variable references are expanded using the container's environment. + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // The entrypoint for the container. This isn't run within a shell. If this isn't specified, the ENTRYPOINT of the container image is used. Environment variable references are expanded using the container's environment. + Command []*string `json:"command,omitempty" tf:"command,omitempty"` + + // The environment variables to pass to a container. See EKS Environment below. + Env []EnvInitParameters `json:"env,omitempty" tf:"env,omitempty"` + + // The Docker image used to start the container. + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // The image pull policy for the container. Supported values are Always, IfNotPresent, and Never. + ImagePullPolicy *string `json:"imagePullPolicy,omitempty" tf:"image_pull_policy,omitempty"` + + // The name of the container. If the name isn't specified, the default name "Default" is used. Each container in a pod must have a unique name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type and amount of resources to assign to a container. The supported resources include memory, cpu, and nvidia.com/gpu. + Resources *ResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + + // The security context for a job. + SecurityContext *SecurityContextInitParameters `json:"securityContext,omitempty" tf:"security_context,omitempty"` + + // The volume mounts for the container. + VolumeMounts []VolumeMountsInitParameters `json:"volumeMounts,omitempty" tf:"volume_mounts,omitempty"` +} + +type ContainersObservation struct { + + // An array of arguments to the entrypoint. If this isn't specified, the CMD of the container image is used. This corresponds to the args member in the Entrypoint portion of the Pod in Kubernetes. Environment variable references are expanded using the container's environment. + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // The entrypoint for the container. This isn't run within a shell. If this isn't specified, the ENTRYPOINT of the container image is used. Environment variable references are expanded using the container's environment. + Command []*string `json:"command,omitempty" tf:"command,omitempty"` + + // The environment variables to pass to a container. See EKS Environment below. + Env []EnvObservation `json:"env,omitempty" tf:"env,omitempty"` + + // The Docker image used to start the container. + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // The image pull policy for the container. Supported values are Always, IfNotPresent, and Never. + ImagePullPolicy *string `json:"imagePullPolicy,omitempty" tf:"image_pull_policy,omitempty"` + + // The name of the container. If the name isn't specified, the default name "Default" is used. Each container in a pod must have a unique name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type and amount of resources to assign to a container. The supported resources include memory, cpu, and nvidia.com/gpu. + Resources *ResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + + // The security context for a job. + SecurityContext *SecurityContextObservation `json:"securityContext,omitempty" tf:"security_context,omitempty"` + + // The volume mounts for the container. + VolumeMounts []VolumeMountsObservation `json:"volumeMounts,omitempty" tf:"volume_mounts,omitempty"` +} + +type ContainersParameters struct { + + // An array of arguments to the entrypoint. If this isn't specified, the CMD of the container image is used. This corresponds to the args member in the Entrypoint portion of the Pod in Kubernetes. Environment variable references are expanded using the container's environment. + // +kubebuilder:validation:Optional + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // The entrypoint for the container. This isn't run within a shell. If this isn't specified, the ENTRYPOINT of the container image is used. Environment variable references are expanded using the container's environment. + // +kubebuilder:validation:Optional + Command []*string `json:"command,omitempty" tf:"command,omitempty"` + + // The environment variables to pass to a container. See EKS Environment below. + // +kubebuilder:validation:Optional + Env []EnvParameters `json:"env,omitempty" tf:"env,omitempty"` + + // The Docker image used to start the container. + // +kubebuilder:validation:Optional + Image *string `json:"image" tf:"image,omitempty"` + + // The image pull policy for the container. Supported values are Always, IfNotPresent, and Never. + // +kubebuilder:validation:Optional + ImagePullPolicy *string `json:"imagePullPolicy,omitempty" tf:"image_pull_policy,omitempty"` + + // The name of the container. If the name isn't specified, the default name "Default" is used. Each container in a pod must have a unique name. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type and amount of resources to assign to a container. The supported resources include memory, cpu, and nvidia.com/gpu. + // +kubebuilder:validation:Optional + Resources *ResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` + + // The security context for a job. + // +kubebuilder:validation:Optional + SecurityContext *SecurityContextParameters `json:"securityContext,omitempty" tf:"security_context,omitempty"` + + // The volume mounts for the container. + // +kubebuilder:validation:Optional + VolumeMounts []VolumeMountsParameters `json:"volumeMounts,omitempty" tf:"volume_mounts,omitempty"` +} + +type EksPropertiesInitParameters struct { + + // The properties for the Kubernetes pod resources of a job. See pod_properties below. + PodProperties *PodPropertiesInitParameters `json:"podProperties,omitempty" tf:"pod_properties,omitempty"` +} + +type EksPropertiesObservation struct { + + // The properties for the Kubernetes pod resources of a job. See pod_properties below. + PodProperties *PodPropertiesObservation `json:"podProperties,omitempty" tf:"pod_properties,omitempty"` +} + +type EksPropertiesParameters struct { + + // The properties for the Kubernetes pod resources of a job. See pod_properties below. + // +kubebuilder:validation:Optional + PodProperties *PodPropertiesParameters `json:"podProperties" tf:"pod_properties,omitempty"` +} + +type EmptyDirInitParameters struct { + + // The medium to store the volume. The default value is an empty string, which uses the storage of the node. + Medium *string `json:"medium,omitempty" tf:"medium,omitempty"` + + // The maximum size of the volume. By default, there's no maximum size defined. + SizeLimit *string `json:"sizeLimit,omitempty" tf:"size_limit,omitempty"` +} + +type EmptyDirObservation struct { + + // The medium to store the volume. The default value is an empty string, which uses the storage of the node. + Medium *string `json:"medium,omitempty" tf:"medium,omitempty"` + + // The maximum size of the volume. By default, there's no maximum size defined. + SizeLimit *string `json:"sizeLimit,omitempty" tf:"size_limit,omitempty"` +} + +type EmptyDirParameters struct { + + // The medium to store the volume. The default value is an empty string, which uses the storage of the node. + // +kubebuilder:validation:Optional + Medium *string `json:"medium,omitempty" tf:"medium,omitempty"` + + // The maximum size of the volume. By default, there's no maximum size defined. + // +kubebuilder:validation:Optional + SizeLimit *string `json:"sizeLimit" tf:"size_limit,omitempty"` +} + +type EnvInitParameters struct { + + // Specifies the name of the job definition. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the environment variable. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type EnvObservation struct { + + // Specifies the name of the job definition. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the environment variable. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type EnvParameters struct { + + // Specifies the name of the job definition. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value of the environment variable. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type EvaluateOnExitInitParameters struct { + + // Specifies the action to take if all of the specified conditions are met. The values are not case sensitive. Valid values: retry, exit. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // A glob pattern to match against the decimal representation of the exit code returned for a job. + OnExitCode *string `json:"onExitCode,omitempty" tf:"on_exit_code,omitempty"` + + // A glob pattern to match against the reason returned for a job. + OnReason *string `json:"onReason,omitempty" tf:"on_reason,omitempty"` + + // A glob pattern to match against the status reason returned for a job. + OnStatusReason *string `json:"onStatusReason,omitempty" tf:"on_status_reason,omitempty"` +} + +type EvaluateOnExitObservation struct { + + // Specifies the action to take if all of the specified conditions are met. The values are not case sensitive. Valid values: retry, exit. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // A glob pattern to match against the decimal representation of the exit code returned for a job. + OnExitCode *string `json:"onExitCode,omitempty" tf:"on_exit_code,omitempty"` + + // A glob pattern to match against the reason returned for a job. + OnReason *string `json:"onReason,omitempty" tf:"on_reason,omitempty"` + + // A glob pattern to match against the status reason returned for a job. + OnStatusReason *string `json:"onStatusReason,omitempty" tf:"on_status_reason,omitempty"` +} + +type EvaluateOnExitParameters struct { + + // Specifies the action to take if all of the specified conditions are met. The values are not case sensitive. Valid values: retry, exit. + // +kubebuilder:validation:Optional + Action *string `json:"action" tf:"action,omitempty"` + + // A glob pattern to match against the decimal representation of the exit code returned for a job. + // +kubebuilder:validation:Optional + OnExitCode *string `json:"onExitCode,omitempty" tf:"on_exit_code,omitempty"` + + // A glob pattern to match against the reason returned for a job. + // +kubebuilder:validation:Optional + OnReason *string `json:"onReason,omitempty" tf:"on_reason,omitempty"` + + // A glob pattern to match against the status reason returned for a job. + // +kubebuilder:validation:Optional + OnStatusReason *string `json:"onStatusReason,omitempty" tf:"on_status_reason,omitempty"` +} + +type HostPathInitParameters struct { + + // The path of the file or directory on the host to mount into containers on the pod. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type HostPathObservation struct { + + // The path of the file or directory on the host to mount into containers on the pod. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type HostPathParameters struct { + + // The path of the file or directory on the host to mount into containers on the pod. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` +} + +type JobDefinitionInitParameters struct { + + // A valid container properties provided as a single valid JSON document. This parameter is only valid if the type parameter is container. + ContainerProperties *string `json:"containerProperties,omitempty" tf:"container_properties,omitempty"` + + // When updating a job definition a new revision is created. This parameter determines if the previous version is deregistered (INACTIVE) or left ACTIVE. Defaults to true. + DeregisterOnNewRevision *bool `json:"deregisterOnNewRevision,omitempty" tf:"deregister_on_new_revision,omitempty"` + + // A valid eks properties. This parameter is only valid if the type parameter is container. + EksProperties *EksPropertiesInitParameters `json:"eksProperties,omitempty" tf:"eks_properties,omitempty"` + + // Specifies the name of the job definition. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A valid node properties provided as a single valid JSON document. This parameter is required if the type parameter is multinode. + NodeProperties *string `json:"nodeProperties,omitempty" tf:"node_properties,omitempty"` + + // Specifies the parameter substitution placeholders to set in the job definition. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The platform capabilities required by the job definition. If no value is specified, it defaults to EC2. To run the job on Fargate resources, specify FARGATE. + // +listType=set + PlatformCapabilities []*string `json:"platformCapabilities,omitempty" tf:"platform_capabilities,omitempty"` + + // Specifies whether to propagate the tags from the job definition to the corresponding Amazon ECS task. Default is false. + PropagateTags *bool `json:"propagateTags,omitempty" tf:"propagate_tags,omitempty"` + + // Specifies the retry strategy to use for failed jobs that are submitted with this job definition. Maximum number of retry_strategy is 1. Defined below. + RetryStrategy *RetryStrategyInitParameters `json:"retryStrategy,omitempty" tf:"retry_strategy,omitempty"` + + // The scheduling priority of the job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. Allowed values 0 through 9999. + SchedulingPriority *float64 `json:"schedulingPriority,omitempty" tf:"scheduling_priority,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of timeout is 1. Defined below. + Timeout *TimeoutInitParameters `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // The type of job definition. Must be container or multinode. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type JobDefinitionObservation struct { + + // The Amazon Resource Name of the job definition, includes revision (:#). + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The ARN without the revision number. + ArnPrefix *string `json:"arnPrefix,omitempty" tf:"arn_prefix,omitempty"` + + // A valid container properties provided as a single valid JSON document. This parameter is only valid if the type parameter is container. + ContainerProperties *string `json:"containerProperties,omitempty" tf:"container_properties,omitempty"` + + // When updating a job definition a new revision is created. This parameter determines if the previous version is deregistered (INACTIVE) or left ACTIVE. Defaults to true. + DeregisterOnNewRevision *bool `json:"deregisterOnNewRevision,omitempty" tf:"deregister_on_new_revision,omitempty"` + + // A valid eks properties. This parameter is only valid if the type parameter is container. + EksProperties *EksPropertiesObservation `json:"eksProperties,omitempty" tf:"eks_properties,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the name of the job definition. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A valid node properties provided as a single valid JSON document. This parameter is required if the type parameter is multinode. + NodeProperties *string `json:"nodeProperties,omitempty" tf:"node_properties,omitempty"` + + // Specifies the parameter substitution placeholders to set in the job definition. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The platform capabilities required by the job definition. If no value is specified, it defaults to EC2. To run the job on Fargate resources, specify FARGATE. + // +listType=set + PlatformCapabilities []*string `json:"platformCapabilities,omitempty" tf:"platform_capabilities,omitempty"` + + // Specifies whether to propagate the tags from the job definition to the corresponding Amazon ECS task. Default is false. + PropagateTags *bool `json:"propagateTags,omitempty" tf:"propagate_tags,omitempty"` + + // Specifies the retry strategy to use for failed jobs that are submitted with this job definition. Maximum number of retry_strategy is 1. Defined below. + RetryStrategy *RetryStrategyObservation `json:"retryStrategy,omitempty" tf:"retry_strategy,omitempty"` + + // The revision of the job definition. + Revision *float64 `json:"revision,omitempty" tf:"revision,omitempty"` + + // The scheduling priority of the job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. Allowed values 0 through 9999. + SchedulingPriority *float64 `json:"schedulingPriority,omitempty" tf:"scheduling_priority,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of timeout is 1. Defined below. + Timeout *TimeoutObservation `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // The type of job definition. Must be container or multinode. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type JobDefinitionParameters struct { + + // A valid container properties provided as a single valid JSON document. This parameter is only valid if the type parameter is container. + // +kubebuilder:validation:Optional + ContainerProperties *string `json:"containerProperties,omitempty" tf:"container_properties,omitempty"` + + // When updating a job definition a new revision is created. This parameter determines if the previous version is deregistered (INACTIVE) or left ACTIVE. Defaults to true. + // +kubebuilder:validation:Optional + DeregisterOnNewRevision *bool `json:"deregisterOnNewRevision,omitempty" tf:"deregister_on_new_revision,omitempty"` + + // A valid eks properties. This parameter is only valid if the type parameter is container. + // +kubebuilder:validation:Optional + EksProperties *EksPropertiesParameters `json:"eksProperties,omitempty" tf:"eks_properties,omitempty"` + + // Specifies the name of the job definition. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A valid node properties provided as a single valid JSON document. This parameter is required if the type parameter is multinode. + // +kubebuilder:validation:Optional + NodeProperties *string `json:"nodeProperties,omitempty" tf:"node_properties,omitempty"` + + // Specifies the parameter substitution placeholders to set in the job definition. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The platform capabilities required by the job definition. If no value is specified, it defaults to EC2. To run the job on Fargate resources, specify FARGATE. + // +kubebuilder:validation:Optional + // +listType=set + PlatformCapabilities []*string `json:"platformCapabilities,omitempty" tf:"platform_capabilities,omitempty"` + + // Specifies whether to propagate the tags from the job definition to the corresponding Amazon ECS task. Default is false. + // +kubebuilder:validation:Optional + PropagateTags *bool `json:"propagateTags,omitempty" tf:"propagate_tags,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Specifies the retry strategy to use for failed jobs that are submitted with this job definition. Maximum number of retry_strategy is 1. Defined below. + // +kubebuilder:validation:Optional + RetryStrategy *RetryStrategyParameters `json:"retryStrategy,omitempty" tf:"retry_strategy,omitempty"` + + // The scheduling priority of the job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. Allowed values 0 through 9999. + // +kubebuilder:validation:Optional + SchedulingPriority *float64 `json:"schedulingPriority,omitempty" tf:"scheduling_priority,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of timeout is 1. Defined below. + // +kubebuilder:validation:Optional + Timeout *TimeoutParameters `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // The type of job definition. Must be container or multinode. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type MetadataInitParameters struct { + + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +type MetadataObservation struct { + + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +type MetadataParameters struct { + + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +type PodPropertiesInitParameters struct { + + // The properties of the container that's used on the Amazon EKS pod. See containers below. + Containers *ContainersInitParameters `json:"containers,omitempty" tf:"containers,omitempty"` + + // The DNS policy for the pod. The default value is ClusterFirst. If the host_network argument is not specified, the default is ClusterFirstWithHostNet. ClusterFirst indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see Pod's DNS policy in the Kubernetes documentation. + DNSPolicy *string `json:"dnsPolicy,omitempty" tf:"dns_policy,omitempty"` + + // Indicates if the pod uses the hosts' network IP address. The default value is true. Setting this to false enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. + HostNetwork *bool `json:"hostNetwork,omitempty" tf:"host_network,omitempty"` + + // Metadata about the Kubernetes pod. + Metadata *MetadataInitParameters `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // The name of the service account that's used to run the pod. + ServiceAccountName *string `json:"serviceAccountName,omitempty" tf:"service_account_name,omitempty"` + + // Specifies the volumes for a job definition that uses Amazon EKS resources. AWS Batch supports emptyDir, hostPath, and secret volume types. + Volumes []VolumesInitParameters `json:"volumes,omitempty" tf:"volumes,omitempty"` +} + +type PodPropertiesObservation struct { + + // The properties of the container that's used on the Amazon EKS pod. See containers below. + Containers *ContainersObservation `json:"containers,omitempty" tf:"containers,omitempty"` + + // The DNS policy for the pod. The default value is ClusterFirst. If the host_network argument is not specified, the default is ClusterFirstWithHostNet. ClusterFirst indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see Pod's DNS policy in the Kubernetes documentation. + DNSPolicy *string `json:"dnsPolicy,omitempty" tf:"dns_policy,omitempty"` + + // Indicates if the pod uses the hosts' network IP address. The default value is true. Setting this to false enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. + HostNetwork *bool `json:"hostNetwork,omitempty" tf:"host_network,omitempty"` + + // Metadata about the Kubernetes pod. + Metadata *MetadataObservation `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // The name of the service account that's used to run the pod. + ServiceAccountName *string `json:"serviceAccountName,omitempty" tf:"service_account_name,omitempty"` + + // Specifies the volumes for a job definition that uses Amazon EKS resources. AWS Batch supports emptyDir, hostPath, and secret volume types. + Volumes []VolumesObservation `json:"volumes,omitempty" tf:"volumes,omitempty"` +} + +type PodPropertiesParameters struct { + + // The properties of the container that's used on the Amazon EKS pod. See containers below. + // +kubebuilder:validation:Optional + Containers *ContainersParameters `json:"containers" tf:"containers,omitempty"` + + // The DNS policy for the pod. The default value is ClusterFirst. If the host_network argument is not specified, the default is ClusterFirstWithHostNet. ClusterFirst indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see Pod's DNS policy in the Kubernetes documentation. + // +kubebuilder:validation:Optional + DNSPolicy *string `json:"dnsPolicy,omitempty" tf:"dns_policy,omitempty"` + + // Indicates if the pod uses the hosts' network IP address. The default value is true. Setting this to false enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. + // +kubebuilder:validation:Optional + HostNetwork *bool `json:"hostNetwork,omitempty" tf:"host_network,omitempty"` + + // Metadata about the Kubernetes pod. + // +kubebuilder:validation:Optional + Metadata *MetadataParameters `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // The name of the service account that's used to run the pod. + // +kubebuilder:validation:Optional + ServiceAccountName *string `json:"serviceAccountName,omitempty" tf:"service_account_name,omitempty"` + + // Specifies the volumes for a job definition that uses Amazon EKS resources. AWS Batch supports emptyDir, hostPath, and secret volume types. + // +kubebuilder:validation:Optional + Volumes []VolumesParameters `json:"volumes,omitempty" tf:"volumes,omitempty"` +} + +type ResourcesInitParameters struct { + + // +mapType=granular + Limits map[string]*string `json:"limits,omitempty" tf:"limits,omitempty"` + + // +mapType=granular + Requests map[string]*string `json:"requests,omitempty" tf:"requests,omitempty"` +} + +type ResourcesObservation struct { + + // +mapType=granular + Limits map[string]*string `json:"limits,omitempty" tf:"limits,omitempty"` + + // +mapType=granular + Requests map[string]*string `json:"requests,omitempty" tf:"requests,omitempty"` +} + +type ResourcesParameters struct { + + // +kubebuilder:validation:Optional + // +mapType=granular + Limits map[string]*string `json:"limits,omitempty" tf:"limits,omitempty"` + + // +kubebuilder:validation:Optional + // +mapType=granular + Requests map[string]*string `json:"requests,omitempty" tf:"requests,omitempty"` +} + +type RetryStrategyInitParameters struct { + + // The number of times to move a job to the RUNNABLE status. You may specify between 1 and 10 attempts. + Attempts *float64 `json:"attempts,omitempty" tf:"attempts,omitempty"` + + // The evaluate on exit conditions under which the job should be retried or failed. If this parameter is specified, then the attempts parameter must also be specified. You may specify up to 5 configuration blocks. + EvaluateOnExit []EvaluateOnExitInitParameters `json:"evaluateOnExit,omitempty" tf:"evaluate_on_exit,omitempty"` +} + +type RetryStrategyObservation struct { + + // The number of times to move a job to the RUNNABLE status. You may specify between 1 and 10 attempts. + Attempts *float64 `json:"attempts,omitempty" tf:"attempts,omitempty"` + + // The evaluate on exit conditions under which the job should be retried or failed. If this parameter is specified, then the attempts parameter must also be specified. You may specify up to 5 configuration blocks. + EvaluateOnExit []EvaluateOnExitObservation `json:"evaluateOnExit,omitempty" tf:"evaluate_on_exit,omitempty"` +} + +type RetryStrategyParameters struct { + + // The number of times to move a job to the RUNNABLE status. You may specify between 1 and 10 attempts. + // +kubebuilder:validation:Optional + Attempts *float64 `json:"attempts,omitempty" tf:"attempts,omitempty"` + + // The evaluate on exit conditions under which the job should be retried or failed. If this parameter is specified, then the attempts parameter must also be specified. You may specify up to 5 configuration blocks. + // +kubebuilder:validation:Optional + EvaluateOnExit []EvaluateOnExitParameters `json:"evaluateOnExit,omitempty" tf:"evaluate_on_exit,omitempty"` +} + +type SecretInitParameters struct { + + // Specifies whether the secret or the secret's keys must be defined. + Optional *bool `json:"optional,omitempty" tf:"optional,omitempty"` + + // The name of the secret. The name must be allowed as a DNS subdomain name. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type SecretObservation struct { + + // Specifies whether the secret or the secret's keys must be defined. + Optional *bool `json:"optional,omitempty" tf:"optional,omitempty"` + + // The name of the secret. The name must be allowed as a DNS subdomain name. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type SecretParameters struct { + + // Specifies whether the secret or the secret's keys must be defined. + // +kubebuilder:validation:Optional + Optional *bool `json:"optional,omitempty" tf:"optional,omitempty"` + + // The name of the secret. The name must be allowed as a DNS subdomain name. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type SecurityContextInitParameters struct { + Privileged *bool `json:"privileged,omitempty" tf:"privileged,omitempty"` + + ReadOnlyRootFileSystem *bool `json:"readOnlyRootFileSystem,omitempty" tf:"read_only_root_file_system,omitempty"` + + RunAsGroup *float64 `json:"runAsGroup,omitempty" tf:"run_as_group,omitempty"` + + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" tf:"run_as_non_root,omitempty"` + + RunAsUser *float64 `json:"runAsUser,omitempty" tf:"run_as_user,omitempty"` +} + +type SecurityContextObservation struct { + Privileged *bool `json:"privileged,omitempty" tf:"privileged,omitempty"` + + ReadOnlyRootFileSystem *bool `json:"readOnlyRootFileSystem,omitempty" tf:"read_only_root_file_system,omitempty"` + + RunAsGroup *float64 `json:"runAsGroup,omitempty" tf:"run_as_group,omitempty"` + + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" tf:"run_as_non_root,omitempty"` + + RunAsUser *float64 `json:"runAsUser,omitempty" tf:"run_as_user,omitempty"` +} + +type SecurityContextParameters struct { + + // +kubebuilder:validation:Optional + Privileged *bool `json:"privileged,omitempty" tf:"privileged,omitempty"` + + // +kubebuilder:validation:Optional + ReadOnlyRootFileSystem *bool `json:"readOnlyRootFileSystem,omitempty" tf:"read_only_root_file_system,omitempty"` + + // +kubebuilder:validation:Optional + RunAsGroup *float64 `json:"runAsGroup,omitempty" tf:"run_as_group,omitempty"` + + // +kubebuilder:validation:Optional + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" tf:"run_as_non_root,omitempty"` + + // +kubebuilder:validation:Optional + RunAsUser *float64 `json:"runAsUser,omitempty" tf:"run_as_user,omitempty"` +} + +type TimeoutInitParameters struct { + + // The time duration in seconds after which AWS Batch terminates your jobs if they have not finished. The minimum value for the timeout is 60 seconds. + AttemptDurationSeconds *float64 `json:"attemptDurationSeconds,omitempty" tf:"attempt_duration_seconds,omitempty"` +} + +type TimeoutObservation struct { + + // The time duration in seconds after which AWS Batch terminates your jobs if they have not finished. The minimum value for the timeout is 60 seconds. + AttemptDurationSeconds *float64 `json:"attemptDurationSeconds,omitempty" tf:"attempt_duration_seconds,omitempty"` +} + +type TimeoutParameters struct { + + // The time duration in seconds after which AWS Batch terminates your jobs if they have not finished. The minimum value for the timeout is 60 seconds. + // +kubebuilder:validation:Optional + AttemptDurationSeconds *float64 `json:"attemptDurationSeconds,omitempty" tf:"attempt_duration_seconds,omitempty"` +} + +type VolumeMountsInitParameters struct { + + // The path of the file or directory on the host to mount into containers on the pod. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // Specifies the name of the job definition. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` +} + +type VolumeMountsObservation struct { + + // The path of the file or directory on the host to mount into containers on the pod. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // Specifies the name of the job definition. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` +} + +type VolumeMountsParameters struct { + + // The path of the file or directory on the host to mount into containers on the pod. + // +kubebuilder:validation:Optional + MountPath *string `json:"mountPath" tf:"mount_path,omitempty"` + + // Specifies the name of the job definition. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // +kubebuilder:validation:Optional + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` +} + +type VolumesInitParameters struct { + EmptyDir *EmptyDirInitParameters `json:"emptyDir,omitempty" tf:"empty_dir,omitempty"` + + // The path of the file or directory on the host to mount into containers on the pod. + HostPath *HostPathInitParameters `json:"hostPath,omitempty" tf:"host_path,omitempty"` + + // Specifies the name of the job definition. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + Secret *SecretInitParameters `json:"secret,omitempty" tf:"secret,omitempty"` +} + +type VolumesObservation struct { + EmptyDir *EmptyDirObservation `json:"emptyDir,omitempty" tf:"empty_dir,omitempty"` + + // The path of the file or directory on the host to mount into containers on the pod. + HostPath *HostPathObservation `json:"hostPath,omitempty" tf:"host_path,omitempty"` + + // Specifies the name of the job definition. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + Secret *SecretObservation `json:"secret,omitempty" tf:"secret,omitempty"` +} + +type VolumesParameters struct { + + // +kubebuilder:validation:Optional + EmptyDir *EmptyDirParameters `json:"emptyDir,omitempty" tf:"empty_dir,omitempty"` + + // The path of the file or directory on the host to mount into containers on the pod. + // +kubebuilder:validation:Optional + HostPath *HostPathParameters `json:"hostPath,omitempty" tf:"host_path,omitempty"` + + // Specifies the name of the job definition. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // +kubebuilder:validation:Optional + Secret *SecretParameters `json:"secret,omitempty" tf:"secret,omitempty"` +} + +// JobDefinitionSpec defines the desired state of JobDefinition +type JobDefinitionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider JobDefinitionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider JobDefinitionInitParameters `json:"initProvider,omitempty"` +} + +// JobDefinitionStatus defines the observed state of JobDefinition. +type JobDefinitionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider JobDefinitionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// JobDefinition is the Schema for the JobDefinitions API. Provides a Batch Job Definition resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type JobDefinition struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + Spec JobDefinitionSpec `json:"spec"` + Status JobDefinitionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// JobDefinitionList contains a list of JobDefinitions +type JobDefinitionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []JobDefinition `json:"items"` +} + +// Repository type metadata. +var ( + JobDefinition_Kind = "JobDefinition" + JobDefinition_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: JobDefinition_Kind}.String() + JobDefinition_KindAPIVersion = JobDefinition_Kind + "." + CRDGroupVersion.String() + JobDefinition_GroupVersionKind = CRDGroupVersion.WithKind(JobDefinition_Kind) +) + +func init() { + SchemeBuilder.Register(&JobDefinition{}, &JobDefinitionList{}) +} diff --git a/apis/batch/v1beta2/zz_schedulingpolicy_terraformed.go b/apis/batch/v1beta2/zz_schedulingpolicy_terraformed.go new file mode 100755 index 0000000000..d934dd8513 --- /dev/null +++ b/apis/batch/v1beta2/zz_schedulingpolicy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SchedulingPolicy +func (mg *SchedulingPolicy) GetTerraformResourceType() string { + return "aws_batch_scheduling_policy" +} + +// GetConnectionDetailsMapping for this SchedulingPolicy +func (tr *SchedulingPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SchedulingPolicy +func (tr *SchedulingPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SchedulingPolicy +func (tr *SchedulingPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SchedulingPolicy +func (tr *SchedulingPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SchedulingPolicy +func (tr *SchedulingPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SchedulingPolicy +func (tr *SchedulingPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SchedulingPolicy +func (tr *SchedulingPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SchedulingPolicy +func (tr *SchedulingPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SchedulingPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SchedulingPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &SchedulingPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SchedulingPolicy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/batch/v1beta2/zz_schedulingpolicy_types.go b/apis/batch/v1beta2/zz_schedulingpolicy_types.go new file mode 100755 index 0000000000..a2475a25b5 --- /dev/null +++ b/apis/batch/v1beta2/zz_schedulingpolicy_types.go @@ -0,0 +1,181 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type FairSharePolicyInitParameters struct { + + // A value used to reserve some of the available maximum vCPU for fair share identifiers that have not yet been used. For more information, see FairsharePolicy. + ComputeReservation *float64 `json:"computeReservation,omitempty" tf:"compute_reservation,omitempty"` + + ShareDecaySeconds *float64 `json:"shareDecaySeconds,omitempty" tf:"share_decay_seconds,omitempty"` + + // One or more share distribution blocks which define the weights for the fair share identifiers for the fair share policy. For more information, see FairsharePolicy. The share_distribution block is documented below. + ShareDistribution []ShareDistributionInitParameters `json:"shareDistribution,omitempty" tf:"share_distribution,omitempty"` +} + +type FairSharePolicyObservation struct { + + // A value used to reserve some of the available maximum vCPU for fair share identifiers that have not yet been used. For more information, see FairsharePolicy. + ComputeReservation *float64 `json:"computeReservation,omitempty" tf:"compute_reservation,omitempty"` + + ShareDecaySeconds *float64 `json:"shareDecaySeconds,omitempty" tf:"share_decay_seconds,omitempty"` + + // One or more share distribution blocks which define the weights for the fair share identifiers for the fair share policy. For more information, see FairsharePolicy. The share_distribution block is documented below. + ShareDistribution []ShareDistributionObservation `json:"shareDistribution,omitempty" tf:"share_distribution,omitempty"` +} + +type FairSharePolicyParameters struct { + + // A value used to reserve some of the available maximum vCPU for fair share identifiers that have not yet been used. For more information, see FairsharePolicy. + // +kubebuilder:validation:Optional + ComputeReservation *float64 `json:"computeReservation,omitempty" tf:"compute_reservation,omitempty"` + + // +kubebuilder:validation:Optional + ShareDecaySeconds *float64 `json:"shareDecaySeconds,omitempty" tf:"share_decay_seconds,omitempty"` + + // One or more share distribution blocks which define the weights for the fair share identifiers for the fair share policy. For more information, see FairsharePolicy. The share_distribution block is documented below. + // +kubebuilder:validation:Optional + ShareDistribution []ShareDistributionParameters `json:"shareDistribution,omitempty" tf:"share_distribution,omitempty"` +} + +type SchedulingPolicyInitParameters struct { + FairSharePolicy *FairSharePolicyInitParameters `json:"fairSharePolicy,omitempty" tf:"fair_share_policy,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SchedulingPolicyObservation struct { + + // The Amazon Resource Name of the scheduling policy. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + FairSharePolicy *FairSharePolicyObservation `json:"fairSharePolicy,omitempty" tf:"fair_share_policy,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type SchedulingPolicyParameters struct { + + // +kubebuilder:validation:Optional + FairSharePolicy *FairSharePolicyParameters `json:"fairSharePolicy,omitempty" tf:"fair_share_policy,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ShareDistributionInitParameters struct { + + // A fair share identifier or fair share identifier prefix. For more information, see ShareAttributes. + ShareIdentifier *string `json:"shareIdentifier,omitempty" tf:"share_identifier,omitempty"` + + // The weight factor for the fair share identifier. For more information, see ShareAttributes. + WeightFactor *float64 `json:"weightFactor,omitempty" tf:"weight_factor,omitempty"` +} + +type ShareDistributionObservation struct { + + // A fair share identifier or fair share identifier prefix. For more information, see ShareAttributes. + ShareIdentifier *string `json:"shareIdentifier,omitempty" tf:"share_identifier,omitempty"` + + // The weight factor for the fair share identifier. For more information, see ShareAttributes. + WeightFactor *float64 `json:"weightFactor,omitempty" tf:"weight_factor,omitempty"` +} + +type ShareDistributionParameters struct { + + // A fair share identifier or fair share identifier prefix. For more information, see ShareAttributes. + // +kubebuilder:validation:Optional + ShareIdentifier *string `json:"shareIdentifier" tf:"share_identifier,omitempty"` + + // The weight factor for the fair share identifier. For more information, see ShareAttributes. + // +kubebuilder:validation:Optional + WeightFactor *float64 `json:"weightFactor,omitempty" tf:"weight_factor,omitempty"` +} + +// SchedulingPolicySpec defines the desired state of SchedulingPolicy +type SchedulingPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SchedulingPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SchedulingPolicyInitParameters `json:"initProvider,omitempty"` +} + +// SchedulingPolicyStatus defines the observed state of SchedulingPolicy. +type SchedulingPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SchedulingPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SchedulingPolicy is the Schema for the SchedulingPolicys API. Provides a Batch Scheduling Policy resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type SchedulingPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec SchedulingPolicySpec `json:"spec"` + Status SchedulingPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SchedulingPolicyList contains a list of SchedulingPolicys +type SchedulingPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SchedulingPolicy `json:"items"` +} + +// Repository type metadata. +var ( + SchedulingPolicy_Kind = "SchedulingPolicy" + SchedulingPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SchedulingPolicy_Kind}.String() + SchedulingPolicy_KindAPIVersion = SchedulingPolicy_Kind + "." + CRDGroupVersion.String() + SchedulingPolicy_GroupVersionKind = CRDGroupVersion.WithKind(SchedulingPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&SchedulingPolicy{}, &SchedulingPolicyList{}) +} diff --git a/apis/budgets/v1beta1/zz_generated.conversion_spokes.go b/apis/budgets/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..f9d7bec1ab --- /dev/null +++ b/apis/budgets/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Budget to the hub type. +func (tr *Budget) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Budget type. +func (tr *Budget) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BudgetAction to the hub type. +func (tr *BudgetAction) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BudgetAction type. +func (tr *BudgetAction) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/budgets/v1beta2/zz_budget_terraformed.go b/apis/budgets/v1beta2/zz_budget_terraformed.go new file mode 100755 index 0000000000..3f11a3dd26 --- /dev/null +++ b/apis/budgets/v1beta2/zz_budget_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Budget +func (mg *Budget) GetTerraformResourceType() string { + return "aws_budgets_budget" +} + +// GetConnectionDetailsMapping for this Budget +func (tr *Budget) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Budget +func (tr *Budget) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Budget +func (tr *Budget) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Budget +func (tr *Budget) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Budget +func (tr *Budget) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Budget +func (tr *Budget) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Budget +func (tr *Budget) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Budget +func (tr *Budget) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Budget using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Budget) LateInitialize(attrs []byte) (bool, error) { + params := &BudgetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Budget) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/budgets/v1beta2/zz_budget_types.go b/apis/budgets/v1beta2/zz_budget_types.go new file mode 100755 index 0000000000..f52bf0bbd6 --- /dev/null +++ b/apis/budgets/v1beta2/zz_budget_types.go @@ -0,0 +1,546 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AutoAdjustDataInitParameters struct { + + // The string that defines whether your budget auto-adjusts based on historical or forecasted data. Valid values: FORECAST,HISTORICAL + AutoAdjustType *string `json:"autoAdjustType,omitempty" tf:"auto_adjust_type,omitempty"` + + // Configuration block of Historical Options. Required for auto_adjust_type of HISTORICAL Configuration block that defines the historical data that your auto-adjusting budget is based on. + HistoricalOptions *HistoricalOptionsInitParameters `json:"historicalOptions,omitempty" tf:"historical_options,omitempty"` +} + +type AutoAdjustDataObservation struct { + + // The string that defines whether your budget auto-adjusts based on historical or forecasted data. Valid values: FORECAST,HISTORICAL + AutoAdjustType *string `json:"autoAdjustType,omitempty" tf:"auto_adjust_type,omitempty"` + + // Configuration block of Historical Options. Required for auto_adjust_type of HISTORICAL Configuration block that defines the historical data that your auto-adjusting budget is based on. + HistoricalOptions *HistoricalOptionsObservation `json:"historicalOptions,omitempty" tf:"historical_options,omitempty"` + + // The last time that your budget was auto-adjusted. + LastAutoAdjustTime *string `json:"lastAutoAdjustTime,omitempty" tf:"last_auto_adjust_time,omitempty"` +} + +type AutoAdjustDataParameters struct { + + // The string that defines whether your budget auto-adjusts based on historical or forecasted data. Valid values: FORECAST,HISTORICAL + // +kubebuilder:validation:Optional + AutoAdjustType *string `json:"autoAdjustType" tf:"auto_adjust_type,omitempty"` + + // Configuration block of Historical Options. Required for auto_adjust_type of HISTORICAL Configuration block that defines the historical data that your auto-adjusting budget is based on. + // +kubebuilder:validation:Optional + HistoricalOptions *HistoricalOptionsParameters `json:"historicalOptions,omitempty" tf:"historical_options,omitempty"` +} + +type BudgetInitParameters struct { + + // The ID of the target account for budget. Will use current user's account_id by default if omitted. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Object containing AutoAdjustData which determines the budget amount for an auto-adjusting budget. + AutoAdjustData *AutoAdjustDataInitParameters `json:"autoAdjustData,omitempty" tf:"auto_adjust_data,omitempty"` + + // Whether this budget tracks monetary cost or usage. + BudgetType *string `json:"budgetType,omitempty" tf:"budget_type,omitempty"` + + // A list of CostFilter name/values pair to apply to budget. + CostFilter []CostFilterInitParameters `json:"costFilter,omitempty" tf:"cost_filter,omitempty"` + + // Object containing CostTypes The types of cost included in a budget, such as tax and subscriptions. + CostTypes *CostTypesInitParameters `json:"costTypes,omitempty" tf:"cost_types,omitempty"` + + // The amount of cost or usage being measured for a budget. + LimitAmount *string `json:"limitAmount,omitempty" tf:"limit_amount,omitempty"` + + // The unit of measurement used for the budget forecast, actual spend, or budget threshold, such as dollars or GB. See Spend documentation. + LimitUnit *string `json:"limitUnit,omitempty" tf:"limit_unit,omitempty"` + + // Object containing Budget Notifications. Can be used multiple times to define more than one budget notification. + Notification []NotificationInitParameters `json:"notification,omitempty" tf:"notification,omitempty"` + + // Object containing Planned Budget Limits. Can be used multiple times to plan more than one budget limit. See PlannedBudgetLimits documentation. + PlannedLimit []PlannedLimitInitParameters `json:"plannedLimit,omitempty" tf:"planned_limit,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The end of the time period covered by the budget. There are no restrictions on the end date. Format: 2017-01-01_12:00. + TimePeriodEnd *string `json:"timePeriodEnd,omitempty" tf:"time_period_end,omitempty"` + + // The start of the time period covered by the budget. If you don't specify a start date, AWS defaults to the start of your chosen time period. The start date must come before the end date. Format: 2017-01-01_12:00. + TimePeriodStart *string `json:"timePeriodStart,omitempty" tf:"time_period_start,omitempty"` + + // The length of time until a budget resets the actual and forecasted spend. Valid values: MONTHLY, QUARTERLY, ANNUALLY, and DAILY. + TimeUnit *string `json:"timeUnit,omitempty" tf:"time_unit,omitempty"` +} + +type BudgetObservation struct { + + // The ID of the target account for budget. Will use current user's account_id by default if omitted. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // The ARN of the budget. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Object containing AutoAdjustData which determines the budget amount for an auto-adjusting budget. + AutoAdjustData *AutoAdjustDataObservation `json:"autoAdjustData,omitempty" tf:"auto_adjust_data,omitempty"` + + // Whether this budget tracks monetary cost or usage. + BudgetType *string `json:"budgetType,omitempty" tf:"budget_type,omitempty"` + + // A list of CostFilter name/values pair to apply to budget. + CostFilter []CostFilterObservation `json:"costFilter,omitempty" tf:"cost_filter,omitempty"` + + // Object containing CostTypes The types of cost included in a budget, such as tax and subscriptions. + CostTypes *CostTypesObservation `json:"costTypes,omitempty" tf:"cost_types,omitempty"` + + // id of resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The amount of cost or usage being measured for a budget. + LimitAmount *string `json:"limitAmount,omitempty" tf:"limit_amount,omitempty"` + + // The unit of measurement used for the budget forecast, actual spend, or budget threshold, such as dollars or GB. See Spend documentation. + LimitUnit *string `json:"limitUnit,omitempty" tf:"limit_unit,omitempty"` + + // Object containing Budget Notifications. Can be used multiple times to define more than one budget notification. + Notification []NotificationObservation `json:"notification,omitempty" tf:"notification,omitempty"` + + // Object containing Planned Budget Limits. Can be used multiple times to plan more than one budget limit. See PlannedBudgetLimits documentation. + PlannedLimit []PlannedLimitObservation `json:"plannedLimit,omitempty" tf:"planned_limit,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The end of the time period covered by the budget. There are no restrictions on the end date. Format: 2017-01-01_12:00. + TimePeriodEnd *string `json:"timePeriodEnd,omitempty" tf:"time_period_end,omitempty"` + + // The start of the time period covered by the budget. If you don't specify a start date, AWS defaults to the start of your chosen time period. The start date must come before the end date. Format: 2017-01-01_12:00. + TimePeriodStart *string `json:"timePeriodStart,omitempty" tf:"time_period_start,omitempty"` + + // The length of time until a budget resets the actual and forecasted spend. Valid values: MONTHLY, QUARTERLY, ANNUALLY, and DAILY. + TimeUnit *string `json:"timeUnit,omitempty" tf:"time_unit,omitempty"` +} + +type BudgetParameters struct { + + // The ID of the target account for budget. Will use current user's account_id by default if omitted. + // +kubebuilder:validation:Optional + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Object containing AutoAdjustData which determines the budget amount for an auto-adjusting budget. + // +kubebuilder:validation:Optional + AutoAdjustData *AutoAdjustDataParameters `json:"autoAdjustData,omitempty" tf:"auto_adjust_data,omitempty"` + + // Whether this budget tracks monetary cost or usage. + // +kubebuilder:validation:Optional + BudgetType *string `json:"budgetType,omitempty" tf:"budget_type,omitempty"` + + // A list of CostFilter name/values pair to apply to budget. + // +kubebuilder:validation:Optional + CostFilter []CostFilterParameters `json:"costFilter,omitempty" tf:"cost_filter,omitempty"` + + // Object containing CostTypes The types of cost included in a budget, such as tax and subscriptions. + // +kubebuilder:validation:Optional + CostTypes *CostTypesParameters `json:"costTypes,omitempty" tf:"cost_types,omitempty"` + + // The amount of cost or usage being measured for a budget. + // +kubebuilder:validation:Optional + LimitAmount *string `json:"limitAmount,omitempty" tf:"limit_amount,omitempty"` + + // The unit of measurement used for the budget forecast, actual spend, or budget threshold, such as dollars or GB. See Spend documentation. + // +kubebuilder:validation:Optional + LimitUnit *string `json:"limitUnit,omitempty" tf:"limit_unit,omitempty"` + + // Object containing Budget Notifications. Can be used multiple times to define more than one budget notification. + // +kubebuilder:validation:Optional + Notification []NotificationParameters `json:"notification,omitempty" tf:"notification,omitempty"` + + // Object containing Planned Budget Limits. Can be used multiple times to plan more than one budget limit. See PlannedBudgetLimits documentation. + // +kubebuilder:validation:Optional + PlannedLimit []PlannedLimitParameters `json:"plannedLimit,omitempty" tf:"planned_limit,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The end of the time period covered by the budget. There are no restrictions on the end date. Format: 2017-01-01_12:00. + // +kubebuilder:validation:Optional + TimePeriodEnd *string `json:"timePeriodEnd,omitempty" tf:"time_period_end,omitempty"` + + // The start of the time period covered by the budget. If you don't specify a start date, AWS defaults to the start of your chosen time period. The start date must come before the end date. Format: 2017-01-01_12:00. + // +kubebuilder:validation:Optional + TimePeriodStart *string `json:"timePeriodStart,omitempty" tf:"time_period_start,omitempty"` + + // The length of time until a budget resets the actual and forecasted spend. Valid values: MONTHLY, QUARTERLY, ANNUALLY, and DAILY. + // +kubebuilder:validation:Optional + TimeUnit *string `json:"timeUnit,omitempty" tf:"time_unit,omitempty"` +} + +type CostFilterInitParameters struct { + + // The name of a budget. Unique within accounts. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type CostFilterObservation struct { + + // The name of a budget. Unique within accounts. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type CostFilterParameters struct { + + // The name of a budget. Unique within accounts. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type CostTypesInitParameters struct { + + // A boolean value whether to include credits in the cost budget. Defaults to true + IncludeCredit *bool `json:"includeCredit,omitempty" tf:"include_credit,omitempty"` + + // Whether a budget includes discounts. Defaults to true + IncludeDiscount *bool `json:"includeDiscount,omitempty" tf:"include_discount,omitempty"` + + // A boolean value whether to include other subscription costs in the cost budget. Defaults to true + IncludeOtherSubscription *bool `json:"includeOtherSubscription,omitempty" tf:"include_other_subscription,omitempty"` + + // A boolean value whether to include recurring costs in the cost budget. Defaults to true + IncludeRecurring *bool `json:"includeRecurring,omitempty" tf:"include_recurring,omitempty"` + + // A boolean value whether to include refunds in the cost budget. Defaults to true + IncludeRefund *bool `json:"includeRefund,omitempty" tf:"include_refund,omitempty"` + + // A boolean value whether to include subscriptions in the cost budget. Defaults to true + IncludeSubscription *bool `json:"includeSubscription,omitempty" tf:"include_subscription,omitempty"` + + // A boolean value whether to include support costs in the cost budget. Defaults to true + IncludeSupport *bool `json:"includeSupport,omitempty" tf:"include_support,omitempty"` + + // A boolean value whether to include tax in the cost budget. Defaults to true + IncludeTax *bool `json:"includeTax,omitempty" tf:"include_tax,omitempty"` + + // A boolean value whether to include upfront costs in the cost budget. Defaults to true + IncludeUpfront *bool `json:"includeUpfront,omitempty" tf:"include_upfront,omitempty"` + + // Whether a budget uses the amortized rate. Defaults to false + UseAmortized *bool `json:"useAmortized,omitempty" tf:"use_amortized,omitempty"` + + // A boolean value whether to use blended costs in the cost budget. Defaults to false + UseBlended *bool `json:"useBlended,omitempty" tf:"use_blended,omitempty"` +} + +type CostTypesObservation struct { + + // A boolean value whether to include credits in the cost budget. Defaults to true + IncludeCredit *bool `json:"includeCredit,omitempty" tf:"include_credit,omitempty"` + + // Whether a budget includes discounts. Defaults to true + IncludeDiscount *bool `json:"includeDiscount,omitempty" tf:"include_discount,omitempty"` + + // A boolean value whether to include other subscription costs in the cost budget. Defaults to true + IncludeOtherSubscription *bool `json:"includeOtherSubscription,omitempty" tf:"include_other_subscription,omitempty"` + + // A boolean value whether to include recurring costs in the cost budget. Defaults to true + IncludeRecurring *bool `json:"includeRecurring,omitempty" tf:"include_recurring,omitempty"` + + // A boolean value whether to include refunds in the cost budget. Defaults to true + IncludeRefund *bool `json:"includeRefund,omitempty" tf:"include_refund,omitempty"` + + // A boolean value whether to include subscriptions in the cost budget. Defaults to true + IncludeSubscription *bool `json:"includeSubscription,omitempty" tf:"include_subscription,omitempty"` + + // A boolean value whether to include support costs in the cost budget. Defaults to true + IncludeSupport *bool `json:"includeSupport,omitempty" tf:"include_support,omitempty"` + + // A boolean value whether to include tax in the cost budget. Defaults to true + IncludeTax *bool `json:"includeTax,omitempty" tf:"include_tax,omitempty"` + + // A boolean value whether to include upfront costs in the cost budget. Defaults to true + IncludeUpfront *bool `json:"includeUpfront,omitempty" tf:"include_upfront,omitempty"` + + // Whether a budget uses the amortized rate. Defaults to false + UseAmortized *bool `json:"useAmortized,omitempty" tf:"use_amortized,omitempty"` + + // A boolean value whether to use blended costs in the cost budget. Defaults to false + UseBlended *bool `json:"useBlended,omitempty" tf:"use_blended,omitempty"` +} + +type CostTypesParameters struct { + + // A boolean value whether to include credits in the cost budget. Defaults to true + // +kubebuilder:validation:Optional + IncludeCredit *bool `json:"includeCredit,omitempty" tf:"include_credit,omitempty"` + + // Whether a budget includes discounts. Defaults to true + // +kubebuilder:validation:Optional + IncludeDiscount *bool `json:"includeDiscount,omitempty" tf:"include_discount,omitempty"` + + // A boolean value whether to include other subscription costs in the cost budget. Defaults to true + // +kubebuilder:validation:Optional + IncludeOtherSubscription *bool `json:"includeOtherSubscription,omitempty" tf:"include_other_subscription,omitempty"` + + // A boolean value whether to include recurring costs in the cost budget. Defaults to true + // +kubebuilder:validation:Optional + IncludeRecurring *bool `json:"includeRecurring,omitempty" tf:"include_recurring,omitempty"` + + // A boolean value whether to include refunds in the cost budget. Defaults to true + // +kubebuilder:validation:Optional + IncludeRefund *bool `json:"includeRefund,omitempty" tf:"include_refund,omitempty"` + + // A boolean value whether to include subscriptions in the cost budget. Defaults to true + // +kubebuilder:validation:Optional + IncludeSubscription *bool `json:"includeSubscription,omitempty" tf:"include_subscription,omitempty"` + + // A boolean value whether to include support costs in the cost budget. Defaults to true + // +kubebuilder:validation:Optional + IncludeSupport *bool `json:"includeSupport,omitempty" tf:"include_support,omitempty"` + + // A boolean value whether to include tax in the cost budget. Defaults to true + // +kubebuilder:validation:Optional + IncludeTax *bool `json:"includeTax,omitempty" tf:"include_tax,omitempty"` + + // A boolean value whether to include upfront costs in the cost budget. Defaults to true + // +kubebuilder:validation:Optional + IncludeUpfront *bool `json:"includeUpfront,omitempty" tf:"include_upfront,omitempty"` + + // Whether a budget uses the amortized rate. Defaults to false + // +kubebuilder:validation:Optional + UseAmortized *bool `json:"useAmortized,omitempty" tf:"use_amortized,omitempty"` + + // A boolean value whether to use blended costs in the cost budget. Defaults to false + // +kubebuilder:validation:Optional + UseBlended *bool `json:"useBlended,omitempty" tf:"use_blended,omitempty"` +} + +type HistoricalOptionsInitParameters struct { + + // The number of budget periods included in the moving-average calculation that determines your auto-adjusted budget amount. + BudgetAdjustmentPeriod *float64 `json:"budgetAdjustmentPeriod,omitempty" tf:"budget_adjustment_period,omitempty"` +} + +type HistoricalOptionsObservation struct { + + // The number of budget periods included in the moving-average calculation that determines your auto-adjusted budget amount. + BudgetAdjustmentPeriod *float64 `json:"budgetAdjustmentPeriod,omitempty" tf:"budget_adjustment_period,omitempty"` + + // The integer that describes how many budget periods in your BudgetAdjustmentPeriod are included in the calculation of your current budget limit. If the first budget period in your BudgetAdjustmentPeriod has no cost data, then that budget period isn’t included in the average that determines your budget limit. You can’t set your own LookBackAvailablePeriods. The value is automatically calculated from the budget_adjustment_period and your historical cost data. + LookbackAvailablePeriods *float64 `json:"lookbackAvailablePeriods,omitempty" tf:"lookback_available_periods,omitempty"` +} + +type HistoricalOptionsParameters struct { + + // The number of budget periods included in the moving-average calculation that determines your auto-adjusted budget amount. + // +kubebuilder:validation:Optional + BudgetAdjustmentPeriod *float64 `json:"budgetAdjustmentPeriod" tf:"budget_adjustment_period,omitempty"` +} + +type NotificationInitParameters struct { + + // Comparison operator to use to evaluate the condition. Can be LESS_THAN, EQUAL_TO or GREATER_THAN. + ComparisonOperator *string `json:"comparisonOperator,omitempty" tf:"comparison_operator,omitempty"` + + // What kind of budget value to notify on. Can be ACTUAL or FORECASTED + NotificationType *string `json:"notificationType,omitempty" tf:"notification_type,omitempty"` + + // E-Mail addresses to notify. Either this or subscriber_sns_topic_arns is required. + // +listType=set + SubscriberEmailAddresses []*string `json:"subscriberEmailAddresses,omitempty" tf:"subscriber_email_addresses,omitempty"` + + // SNS topics to notify. Either this or subscriber_email_addresses is required. + // +listType=set + SubscriberSnsTopicArns []*string `json:"subscriberSnsTopicArns,omitempty" tf:"subscriber_sns_topic_arns,omitempty"` + + // Threshold when the notification should be sent. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` + + // What kind of threshold is defined. Can be PERCENTAGE OR ABSOLUTE_VALUE. + ThresholdType *string `json:"thresholdType,omitempty" tf:"threshold_type,omitempty"` +} + +type NotificationObservation struct { + + // Comparison operator to use to evaluate the condition. Can be LESS_THAN, EQUAL_TO or GREATER_THAN. + ComparisonOperator *string `json:"comparisonOperator,omitempty" tf:"comparison_operator,omitempty"` + + // What kind of budget value to notify on. Can be ACTUAL or FORECASTED + NotificationType *string `json:"notificationType,omitempty" tf:"notification_type,omitempty"` + + // E-Mail addresses to notify. Either this or subscriber_sns_topic_arns is required. + // +listType=set + SubscriberEmailAddresses []*string `json:"subscriberEmailAddresses,omitempty" tf:"subscriber_email_addresses,omitempty"` + + // SNS topics to notify. Either this or subscriber_email_addresses is required. + // +listType=set + SubscriberSnsTopicArns []*string `json:"subscriberSnsTopicArns,omitempty" tf:"subscriber_sns_topic_arns,omitempty"` + + // Threshold when the notification should be sent. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` + + // What kind of threshold is defined. Can be PERCENTAGE OR ABSOLUTE_VALUE. + ThresholdType *string `json:"thresholdType,omitempty" tf:"threshold_type,omitempty"` +} + +type NotificationParameters struct { + + // Comparison operator to use to evaluate the condition. Can be LESS_THAN, EQUAL_TO or GREATER_THAN. + // +kubebuilder:validation:Optional + ComparisonOperator *string `json:"comparisonOperator" tf:"comparison_operator,omitempty"` + + // What kind of budget value to notify on. Can be ACTUAL or FORECASTED + // +kubebuilder:validation:Optional + NotificationType *string `json:"notificationType" tf:"notification_type,omitempty"` + + // E-Mail addresses to notify. Either this or subscriber_sns_topic_arns is required. + // +kubebuilder:validation:Optional + // +listType=set + SubscriberEmailAddresses []*string `json:"subscriberEmailAddresses,omitempty" tf:"subscriber_email_addresses,omitempty"` + + // SNS topics to notify. Either this or subscriber_email_addresses is required. + // +kubebuilder:validation:Optional + // +listType=set + SubscriberSnsTopicArns []*string `json:"subscriberSnsTopicArns,omitempty" tf:"subscriber_sns_topic_arns,omitempty"` + + // Threshold when the notification should be sent. + // +kubebuilder:validation:Optional + Threshold *float64 `json:"threshold" tf:"threshold,omitempty"` + + // What kind of threshold is defined. Can be PERCENTAGE OR ABSOLUTE_VALUE. + // +kubebuilder:validation:Optional + ThresholdType *string `json:"thresholdType" tf:"threshold_type,omitempty"` +} + +type PlannedLimitInitParameters struct { + + // The amount of cost or usage being measured for a budget. + Amount *string `json:"amount,omitempty" tf:"amount,omitempty"` + + // The start time of the budget limit. Format: 2017-01-01_12:00. See PlannedBudgetLimits documentation. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // The unit of measurement used for the budget forecast, actual spend, or budget threshold, such as dollars or GB. See Spend documentation. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type PlannedLimitObservation struct { + + // The amount of cost or usage being measured for a budget. + Amount *string `json:"amount,omitempty" tf:"amount,omitempty"` + + // The start time of the budget limit. Format: 2017-01-01_12:00. See PlannedBudgetLimits documentation. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // The unit of measurement used for the budget forecast, actual spend, or budget threshold, such as dollars or GB. See Spend documentation. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type PlannedLimitParameters struct { + + // The amount of cost or usage being measured for a budget. + // +kubebuilder:validation:Optional + Amount *string `json:"amount" tf:"amount,omitempty"` + + // The start time of the budget limit. Format: 2017-01-01_12:00. See PlannedBudgetLimits documentation. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime" tf:"start_time,omitempty"` + + // The unit of measurement used for the budget forecast, actual spend, or budget threshold, such as dollars or GB. See Spend documentation. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` +} + +// BudgetSpec defines the desired state of Budget +type BudgetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BudgetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BudgetInitParameters `json:"initProvider,omitempty"` +} + +// BudgetStatus defines the observed state of Budget. +type BudgetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BudgetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Budget is the Schema for the Budgets API. Provides a budgets budget resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Budget struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.budgetType) || (has(self.initProvider) && has(self.initProvider.budgetType))",message="spec.forProvider.budgetType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.timeUnit) || (has(self.initProvider) && has(self.initProvider.timeUnit))",message="spec.forProvider.timeUnit is a required parameter" + Spec BudgetSpec `json:"spec"` + Status BudgetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BudgetList contains a list of Budgets +type BudgetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Budget `json:"items"` +} + +// Repository type metadata. +var ( + Budget_Kind = "Budget" + Budget_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Budget_Kind}.String() + Budget_KindAPIVersion = Budget_Kind + "." + CRDGroupVersion.String() + Budget_GroupVersionKind = CRDGroupVersion.WithKind(Budget_Kind) +) + +func init() { + SchemeBuilder.Register(&Budget{}, &BudgetList{}) +} diff --git a/apis/budgets/v1beta2/zz_budgetaction_terraformed.go b/apis/budgets/v1beta2/zz_budgetaction_terraformed.go new file mode 100755 index 0000000000..fa08957d80 --- /dev/null +++ b/apis/budgets/v1beta2/zz_budgetaction_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BudgetAction +func (mg *BudgetAction) GetTerraformResourceType() string { + return "aws_budgets_budget_action" +} + +// GetConnectionDetailsMapping for this BudgetAction +func (tr *BudgetAction) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BudgetAction +func (tr *BudgetAction) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BudgetAction +func (tr *BudgetAction) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BudgetAction +func (tr *BudgetAction) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BudgetAction +func (tr *BudgetAction) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BudgetAction +func (tr *BudgetAction) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BudgetAction +func (tr *BudgetAction) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BudgetAction +func (tr *BudgetAction) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BudgetAction using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BudgetAction) LateInitialize(attrs []byte) (bool, error) { + params := &BudgetActionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BudgetAction) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/budgets/v1beta2/zz_budgetaction_types.go b/apis/budgets/v1beta2/zz_budgetaction_types.go new file mode 100755 index 0000000000..138d5f70f9 --- /dev/null +++ b/apis/budgets/v1beta2/zz_budgetaction_types.go @@ -0,0 +1,498 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionThresholdInitParameters struct { + + // The type of threshold for a notification. Valid values are PERCENTAGE or ABSOLUTE_VALUE. + ActionThresholdType *string `json:"actionThresholdType,omitempty" tf:"action_threshold_type,omitempty"` + + // The threshold of a notification. + ActionThresholdValue *float64 `json:"actionThresholdValue,omitempty" tf:"action_threshold_value,omitempty"` +} + +type ActionThresholdObservation struct { + + // The type of threshold for a notification. Valid values are PERCENTAGE or ABSOLUTE_VALUE. + ActionThresholdType *string `json:"actionThresholdType,omitempty" tf:"action_threshold_type,omitempty"` + + // The threshold of a notification. + ActionThresholdValue *float64 `json:"actionThresholdValue,omitempty" tf:"action_threshold_value,omitempty"` +} + +type ActionThresholdParameters struct { + + // The type of threshold for a notification. Valid values are PERCENTAGE or ABSOLUTE_VALUE. + // +kubebuilder:validation:Optional + ActionThresholdType *string `json:"actionThresholdType" tf:"action_threshold_type,omitempty"` + + // The threshold of a notification. + // +kubebuilder:validation:Optional + ActionThresholdValue *float64 `json:"actionThresholdValue" tf:"action_threshold_value,omitempty"` +} + +type BudgetActionInitParameters struct { + + // The ID of the target account for budget. Will use current user's account_id by default if omitted. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // The trigger threshold of the action. See Action Threshold. + ActionThreshold *ActionThresholdInitParameters `json:"actionThreshold,omitempty" tf:"action_threshold,omitempty"` + + // The type of action. This defines the type of tasks that can be carried out by this action. This field also determines the format for definition. Valid values are APPLY_IAM_POLICY, APPLY_SCP_POLICY, and RUN_SSM_DOCUMENTS. + ActionType *string `json:"actionType,omitempty" tf:"action_type,omitempty"` + + // This specifies if the action needs manual or automatic approval. Valid values are AUTOMATIC and MANUAL. + ApprovalModel *string `json:"approvalModel,omitempty" tf:"approval_model,omitempty"` + + // The name of a budget. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/budgets/v1beta2.Budget + BudgetName *string `json:"budgetName,omitempty" tf:"budget_name,omitempty"` + + // Reference to a Budget in budgets to populate budgetName. + // +kubebuilder:validation:Optional + BudgetNameRef *v1.Reference `json:"budgetNameRef,omitempty" tf:"-"` + + // Selector for a Budget in budgets to populate budgetName. + // +kubebuilder:validation:Optional + BudgetNameSelector *v1.Selector `json:"budgetNameSelector,omitempty" tf:"-"` + + // Specifies all of the type-specific parameters. See Definition. + Definition *DefinitionInitParameters `json:"definition,omitempty" tf:"definition,omitempty"` + + // The role passed for action execution and reversion. Roles and actions must be in the same account. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + ExecutionRoleArn *string `json:"executionRoleArn,omitempty" tf:"execution_role_arn,omitempty"` + + // Reference to a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnRef *v1.Reference `json:"executionRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnSelector *v1.Selector `json:"executionRoleArnSelector,omitempty" tf:"-"` + + // The type of a notification. Valid values are ACTUAL or FORECASTED. + NotificationType *string `json:"notificationType,omitempty" tf:"notification_type,omitempty"` + + // A list of subscribers. See Subscriber. + Subscriber []SubscriberInitParameters `json:"subscriber,omitempty" tf:"subscriber,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type BudgetActionObservation struct { + + // The ID of the target account for budget. Will use current user's account_id by default if omitted. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // The id of the budget action. + ActionID *string `json:"actionId,omitempty" tf:"action_id,omitempty"` + + // The trigger threshold of the action. See Action Threshold. + ActionThreshold *ActionThresholdObservation `json:"actionThreshold,omitempty" tf:"action_threshold,omitempty"` + + // The type of action. This defines the type of tasks that can be carried out by this action. This field also determines the format for definition. Valid values are APPLY_IAM_POLICY, APPLY_SCP_POLICY, and RUN_SSM_DOCUMENTS. + ActionType *string `json:"actionType,omitempty" tf:"action_type,omitempty"` + + // This specifies if the action needs manual or automatic approval. Valid values are AUTOMATIC and MANUAL. + ApprovalModel *string `json:"approvalModel,omitempty" tf:"approval_model,omitempty"` + + // The ARN of the budget action. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The name of a budget. + BudgetName *string `json:"budgetName,omitempty" tf:"budget_name,omitempty"` + + // Specifies all of the type-specific parameters. See Definition. + Definition *DefinitionObservation `json:"definition,omitempty" tf:"definition,omitempty"` + + // The role passed for action execution and reversion. Roles and actions must be in the same account. + ExecutionRoleArn *string `json:"executionRoleArn,omitempty" tf:"execution_role_arn,omitempty"` + + // ID of resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The type of a notification. Valid values are ACTUAL or FORECASTED. + NotificationType *string `json:"notificationType,omitempty" tf:"notification_type,omitempty"` + + // The status of the budget action. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // A list of subscribers. See Subscriber. + Subscriber []SubscriberObservation `json:"subscriber,omitempty" tf:"subscriber,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type BudgetActionParameters struct { + + // The ID of the target account for budget. Will use current user's account_id by default if omitted. + // +kubebuilder:validation:Optional + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // The trigger threshold of the action. See Action Threshold. + // +kubebuilder:validation:Optional + ActionThreshold *ActionThresholdParameters `json:"actionThreshold,omitempty" tf:"action_threshold,omitempty"` + + // The type of action. This defines the type of tasks that can be carried out by this action. This field also determines the format for definition. Valid values are APPLY_IAM_POLICY, APPLY_SCP_POLICY, and RUN_SSM_DOCUMENTS. + // +kubebuilder:validation:Optional + ActionType *string `json:"actionType,omitempty" tf:"action_type,omitempty"` + + // This specifies if the action needs manual or automatic approval. Valid values are AUTOMATIC and MANUAL. + // +kubebuilder:validation:Optional + ApprovalModel *string `json:"approvalModel,omitempty" tf:"approval_model,omitempty"` + + // The name of a budget. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/budgets/v1beta2.Budget + // +kubebuilder:validation:Optional + BudgetName *string `json:"budgetName,omitempty" tf:"budget_name,omitempty"` + + // Reference to a Budget in budgets to populate budgetName. + // +kubebuilder:validation:Optional + BudgetNameRef *v1.Reference `json:"budgetNameRef,omitempty" tf:"-"` + + // Selector for a Budget in budgets to populate budgetName. + // +kubebuilder:validation:Optional + BudgetNameSelector *v1.Selector `json:"budgetNameSelector,omitempty" tf:"-"` + + // Specifies all of the type-specific parameters. See Definition. + // +kubebuilder:validation:Optional + Definition *DefinitionParameters `json:"definition,omitempty" tf:"definition,omitempty"` + + // The role passed for action execution and reversion. Roles and actions must be in the same account. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + ExecutionRoleArn *string `json:"executionRoleArn,omitempty" tf:"execution_role_arn,omitempty"` + + // Reference to a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnRef *v1.Reference `json:"executionRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnSelector *v1.Selector `json:"executionRoleArnSelector,omitempty" tf:"-"` + + // The type of a notification. Valid values are ACTUAL or FORECASTED. + // +kubebuilder:validation:Optional + NotificationType *string `json:"notificationType,omitempty" tf:"notification_type,omitempty"` + + // The Region to run the SSM document. + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // A list of subscribers. See Subscriber. + // +kubebuilder:validation:Optional + Subscriber []SubscriberParameters `json:"subscriber,omitempty" tf:"subscriber,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type DefinitionInitParameters struct { + + // The AWS Identity and Access Management (IAM) action definition details. See IAM Action Definition. + IAMActionDefinition *IAMActionDefinitionInitParameters `json:"iamActionDefinition,omitempty" tf:"iam_action_definition,omitempty"` + + // The service control policies (SCPs) action definition details. See SCP Action Definition. + ScpActionDefinition *ScpActionDefinitionInitParameters `json:"scpActionDefinition,omitempty" tf:"scp_action_definition,omitempty"` + + // The AWS Systems Manager (SSM) action definition details. See SSM Action Definition. + SsmActionDefinition *SsmActionDefinitionInitParameters `json:"ssmActionDefinition,omitempty" tf:"ssm_action_definition,omitempty"` +} + +type DefinitionObservation struct { + + // The AWS Identity and Access Management (IAM) action definition details. See IAM Action Definition. + IAMActionDefinition *IAMActionDefinitionObservation `json:"iamActionDefinition,omitempty" tf:"iam_action_definition,omitempty"` + + // The service control policies (SCPs) action definition details. See SCP Action Definition. + ScpActionDefinition *ScpActionDefinitionObservation `json:"scpActionDefinition,omitempty" tf:"scp_action_definition,omitempty"` + + // The AWS Systems Manager (SSM) action definition details. See SSM Action Definition. + SsmActionDefinition *SsmActionDefinitionObservation `json:"ssmActionDefinition,omitempty" tf:"ssm_action_definition,omitempty"` +} + +type DefinitionParameters struct { + + // The AWS Identity and Access Management (IAM) action definition details. See IAM Action Definition. + // +kubebuilder:validation:Optional + IAMActionDefinition *IAMActionDefinitionParameters `json:"iamActionDefinition,omitempty" tf:"iam_action_definition,omitempty"` + + // The service control policies (SCPs) action definition details. See SCP Action Definition. + // +kubebuilder:validation:Optional + ScpActionDefinition *ScpActionDefinitionParameters `json:"scpActionDefinition,omitempty" tf:"scp_action_definition,omitempty"` + + // The AWS Systems Manager (SSM) action definition details. See SSM Action Definition. + // +kubebuilder:validation:Optional + SsmActionDefinition *SsmActionDefinitionParameters `json:"ssmActionDefinition,omitempty" tf:"ssm_action_definition,omitempty"` +} + +type IAMActionDefinitionInitParameters struct { + + // A list of groups to be attached. There must be at least one group. + // +listType=set + Groups []*string `json:"groups,omitempty" tf:"groups,omitempty"` + + // The Amazon Resource Name (ARN) of the policy to be attached. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Policy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + PolicyArn *string `json:"policyArn,omitempty" tf:"policy_arn,omitempty"` + + // Reference to a Policy in iam to populate policyArn. + // +kubebuilder:validation:Optional + PolicyArnRef *v1.Reference `json:"policyArnRef,omitempty" tf:"-"` + + // Selector for a Policy in iam to populate policyArn. + // +kubebuilder:validation:Optional + PolicyArnSelector *v1.Selector `json:"policyArnSelector,omitempty" tf:"-"` + + // A list of roles to be attached. There must be at least one role. + // +listType=set + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + + // A list of users to be attached. There must be at least one user. + // +listType=set + Users []*string `json:"users,omitempty" tf:"users,omitempty"` +} + +type IAMActionDefinitionObservation struct { + + // A list of groups to be attached. There must be at least one group. + // +listType=set + Groups []*string `json:"groups,omitempty" tf:"groups,omitempty"` + + // The Amazon Resource Name (ARN) of the policy to be attached. + PolicyArn *string `json:"policyArn,omitempty" tf:"policy_arn,omitempty"` + + // A list of roles to be attached. There must be at least one role. + // +listType=set + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + + // A list of users to be attached. There must be at least one user. + // +listType=set + Users []*string `json:"users,omitempty" tf:"users,omitempty"` +} + +type IAMActionDefinitionParameters struct { + + // A list of groups to be attached. There must be at least one group. + // +kubebuilder:validation:Optional + // +listType=set + Groups []*string `json:"groups,omitempty" tf:"groups,omitempty"` + + // The Amazon Resource Name (ARN) of the policy to be attached. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Policy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + PolicyArn *string `json:"policyArn,omitempty" tf:"policy_arn,omitempty"` + + // Reference to a Policy in iam to populate policyArn. + // +kubebuilder:validation:Optional + PolicyArnRef *v1.Reference `json:"policyArnRef,omitempty" tf:"-"` + + // Selector for a Policy in iam to populate policyArn. + // +kubebuilder:validation:Optional + PolicyArnSelector *v1.Selector `json:"policyArnSelector,omitempty" tf:"-"` + + // A list of roles to be attached. There must be at least one role. + // +kubebuilder:validation:Optional + // +listType=set + Roles []*string `json:"roles,omitempty" tf:"roles,omitempty"` + + // A list of users to be attached. There must be at least one user. + // +kubebuilder:validation:Optional + // +listType=set + Users []*string `json:"users,omitempty" tf:"users,omitempty"` +} + +type ScpActionDefinitionInitParameters struct { + + // The policy ID attached. + PolicyID *string `json:"policyId,omitempty" tf:"policy_id,omitempty"` + + // A list of target IDs. + // +listType=set + TargetIds []*string `json:"targetIds,omitempty" tf:"target_ids,omitempty"` +} + +type ScpActionDefinitionObservation struct { + + // The policy ID attached. + PolicyID *string `json:"policyId,omitempty" tf:"policy_id,omitempty"` + + // A list of target IDs. + // +listType=set + TargetIds []*string `json:"targetIds,omitempty" tf:"target_ids,omitempty"` +} + +type ScpActionDefinitionParameters struct { + + // The policy ID attached. + // +kubebuilder:validation:Optional + PolicyID *string `json:"policyId" tf:"policy_id,omitempty"` + + // A list of target IDs. + // +kubebuilder:validation:Optional + // +listType=set + TargetIds []*string `json:"targetIds" tf:"target_ids,omitempty"` +} + +type SsmActionDefinitionInitParameters struct { + + // The action subType. Valid values are STOP_EC2_INSTANCES or STOP_RDS_INSTANCES. + ActionSubType *string `json:"actionSubType,omitempty" tf:"action_sub_type,omitempty"` + + // The EC2 and RDS instance IDs. + // +listType=set + InstanceIds []*string `json:"instanceIds,omitempty" tf:"instance_ids,omitempty"` +} + +type SsmActionDefinitionObservation struct { + + // The action subType. Valid values are STOP_EC2_INSTANCES or STOP_RDS_INSTANCES. + ActionSubType *string `json:"actionSubType,omitempty" tf:"action_sub_type,omitempty"` + + // The EC2 and RDS instance IDs. + // +listType=set + InstanceIds []*string `json:"instanceIds,omitempty" tf:"instance_ids,omitempty"` + + // The Region to run the SSM document. + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +type SsmActionDefinitionParameters struct { + + // The action subType. Valid values are STOP_EC2_INSTANCES or STOP_RDS_INSTANCES. + // +kubebuilder:validation:Optional + ActionSubType *string `json:"actionSubType" tf:"action_sub_type,omitempty"` + + // The EC2 and RDS instance IDs. + // +kubebuilder:validation:Optional + // +listType=set + InstanceIds []*string `json:"instanceIds" tf:"instance_ids,omitempty"` + + // The Region to run the SSM document. + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"region,omitempty"` +} + +type SubscriberInitParameters struct { + + // The address that AWS sends budget notifications to, either an SNS topic or an email. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The type of notification that AWS sends to a subscriber. Valid values are SNS or EMAIL. + SubscriptionType *string `json:"subscriptionType,omitempty" tf:"subscription_type,omitempty"` +} + +type SubscriberObservation struct { + + // The address that AWS sends budget notifications to, either an SNS topic or an email. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The type of notification that AWS sends to a subscriber. Valid values are SNS or EMAIL. + SubscriptionType *string `json:"subscriptionType,omitempty" tf:"subscription_type,omitempty"` +} + +type SubscriberParameters struct { + + // The address that AWS sends budget notifications to, either an SNS topic or an email. + // +kubebuilder:validation:Optional + Address *string `json:"address" tf:"address,omitempty"` + + // The type of notification that AWS sends to a subscriber. Valid values are SNS or EMAIL. + // +kubebuilder:validation:Optional + SubscriptionType *string `json:"subscriptionType" tf:"subscription_type,omitempty"` +} + +// BudgetActionSpec defines the desired state of BudgetAction +type BudgetActionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BudgetActionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BudgetActionInitParameters `json:"initProvider,omitempty"` +} + +// BudgetActionStatus defines the observed state of BudgetAction. +type BudgetActionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BudgetActionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BudgetAction is the Schema for the BudgetActions API. Provides a budget action resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type BudgetAction struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.actionThreshold) || (has(self.initProvider) && has(self.initProvider.actionThreshold))",message="spec.forProvider.actionThreshold is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.actionType) || (has(self.initProvider) && has(self.initProvider.actionType))",message="spec.forProvider.actionType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.approvalModel) || (has(self.initProvider) && has(self.initProvider.approvalModel))",message="spec.forProvider.approvalModel is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.definition) || (has(self.initProvider) && has(self.initProvider.definition))",message="spec.forProvider.definition is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.notificationType) || (has(self.initProvider) && has(self.initProvider.notificationType))",message="spec.forProvider.notificationType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.subscriber) || (has(self.initProvider) && has(self.initProvider.subscriber))",message="spec.forProvider.subscriber is a required parameter" + Spec BudgetActionSpec `json:"spec"` + Status BudgetActionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BudgetActionList contains a list of BudgetActions +type BudgetActionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BudgetAction `json:"items"` +} + +// Repository type metadata. +var ( + BudgetAction_Kind = "BudgetAction" + BudgetAction_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BudgetAction_Kind}.String() + BudgetAction_KindAPIVersion = BudgetAction_Kind + "." + CRDGroupVersion.String() + BudgetAction_GroupVersionKind = CRDGroupVersion.WithKind(BudgetAction_Kind) +) + +func init() { + SchemeBuilder.Register(&BudgetAction{}, &BudgetActionList{}) +} diff --git a/apis/budgets/v1beta2/zz_generated.conversion_hubs.go b/apis/budgets/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..bf84420adb --- /dev/null +++ b/apis/budgets/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Budget) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BudgetAction) Hub() {} diff --git a/apis/budgets/v1beta2/zz_generated.deepcopy.go b/apis/budgets/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..8770576e60 --- /dev/null +++ b/apis/budgets/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2163 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionThresholdInitParameters) DeepCopyInto(out *ActionThresholdInitParameters) { + *out = *in + if in.ActionThresholdType != nil { + in, out := &in.ActionThresholdType, &out.ActionThresholdType + *out = new(string) + **out = **in + } + if in.ActionThresholdValue != nil { + in, out := &in.ActionThresholdValue, &out.ActionThresholdValue + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionThresholdInitParameters. +func (in *ActionThresholdInitParameters) DeepCopy() *ActionThresholdInitParameters { + if in == nil { + return nil + } + out := new(ActionThresholdInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionThresholdObservation) DeepCopyInto(out *ActionThresholdObservation) { + *out = *in + if in.ActionThresholdType != nil { + in, out := &in.ActionThresholdType, &out.ActionThresholdType + *out = new(string) + **out = **in + } + if in.ActionThresholdValue != nil { + in, out := &in.ActionThresholdValue, &out.ActionThresholdValue + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionThresholdObservation. +func (in *ActionThresholdObservation) DeepCopy() *ActionThresholdObservation { + if in == nil { + return nil + } + out := new(ActionThresholdObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionThresholdParameters) DeepCopyInto(out *ActionThresholdParameters) { + *out = *in + if in.ActionThresholdType != nil { + in, out := &in.ActionThresholdType, &out.ActionThresholdType + *out = new(string) + **out = **in + } + if in.ActionThresholdValue != nil { + in, out := &in.ActionThresholdValue, &out.ActionThresholdValue + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionThresholdParameters. +func (in *ActionThresholdParameters) DeepCopy() *ActionThresholdParameters { + if in == nil { + return nil + } + out := new(ActionThresholdParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoAdjustDataInitParameters) DeepCopyInto(out *AutoAdjustDataInitParameters) { + *out = *in + if in.AutoAdjustType != nil { + in, out := &in.AutoAdjustType, &out.AutoAdjustType + *out = new(string) + **out = **in + } + if in.HistoricalOptions != nil { + in, out := &in.HistoricalOptions, &out.HistoricalOptions + *out = new(HistoricalOptionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoAdjustDataInitParameters. +func (in *AutoAdjustDataInitParameters) DeepCopy() *AutoAdjustDataInitParameters { + if in == nil { + return nil + } + out := new(AutoAdjustDataInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoAdjustDataObservation) DeepCopyInto(out *AutoAdjustDataObservation) { + *out = *in + if in.AutoAdjustType != nil { + in, out := &in.AutoAdjustType, &out.AutoAdjustType + *out = new(string) + **out = **in + } + if in.HistoricalOptions != nil { + in, out := &in.HistoricalOptions, &out.HistoricalOptions + *out = new(HistoricalOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.LastAutoAdjustTime != nil { + in, out := &in.LastAutoAdjustTime, &out.LastAutoAdjustTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoAdjustDataObservation. +func (in *AutoAdjustDataObservation) DeepCopy() *AutoAdjustDataObservation { + if in == nil { + return nil + } + out := new(AutoAdjustDataObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoAdjustDataParameters) DeepCopyInto(out *AutoAdjustDataParameters) { + *out = *in + if in.AutoAdjustType != nil { + in, out := &in.AutoAdjustType, &out.AutoAdjustType + *out = new(string) + **out = **in + } + if in.HistoricalOptions != nil { + in, out := &in.HistoricalOptions, &out.HistoricalOptions + *out = new(HistoricalOptionsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoAdjustDataParameters. +func (in *AutoAdjustDataParameters) DeepCopy() *AutoAdjustDataParameters { + if in == nil { + return nil + } + out := new(AutoAdjustDataParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Budget) DeepCopyInto(out *Budget) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Budget. +func (in *Budget) DeepCopy() *Budget { + if in == nil { + return nil + } + out := new(Budget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Budget) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetAction) DeepCopyInto(out *BudgetAction) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetAction. +func (in *BudgetAction) DeepCopy() *BudgetAction { + if in == nil { + return nil + } + out := new(BudgetAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BudgetAction) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetActionInitParameters) DeepCopyInto(out *BudgetActionInitParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.ActionThreshold != nil { + in, out := &in.ActionThreshold, &out.ActionThreshold + *out = new(ActionThresholdInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } + if in.ApprovalModel != nil { + in, out := &in.ApprovalModel, &out.ApprovalModel + *out = new(string) + **out = **in + } + if in.BudgetName != nil { + in, out := &in.BudgetName, &out.BudgetName + *out = new(string) + **out = **in + } + if in.BudgetNameRef != nil { + in, out := &in.BudgetNameRef, &out.BudgetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BudgetNameSelector != nil { + in, out := &in.BudgetNameSelector, &out.BudgetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = new(DefinitionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleArn != nil { + in, out := &in.ExecutionRoleArn, &out.ExecutionRoleArn + *out = new(string) + **out = **in + } + if in.ExecutionRoleArnRef != nil { + in, out := &in.ExecutionRoleArnRef, &out.ExecutionRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleArnSelector != nil { + in, out := &in.ExecutionRoleArnSelector, &out.ExecutionRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NotificationType != nil { + in, out := &in.NotificationType, &out.NotificationType + *out = new(string) + **out = **in + } + if in.Subscriber != nil { + in, out := &in.Subscriber, &out.Subscriber + *out = make([]SubscriberInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetActionInitParameters. +func (in *BudgetActionInitParameters) DeepCopy() *BudgetActionInitParameters { + if in == nil { + return nil + } + out := new(BudgetActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetActionList) DeepCopyInto(out *BudgetActionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BudgetAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetActionList. +func (in *BudgetActionList) DeepCopy() *BudgetActionList { + if in == nil { + return nil + } + out := new(BudgetActionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BudgetActionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetActionObservation) DeepCopyInto(out *BudgetActionObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.ActionID != nil { + in, out := &in.ActionID, &out.ActionID + *out = new(string) + **out = **in + } + if in.ActionThreshold != nil { + in, out := &in.ActionThreshold, &out.ActionThreshold + *out = new(ActionThresholdObservation) + (*in).DeepCopyInto(*out) + } + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } + if in.ApprovalModel != nil { + in, out := &in.ApprovalModel, &out.ApprovalModel + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.BudgetName != nil { + in, out := &in.BudgetName, &out.BudgetName + *out = new(string) + **out = **in + } + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = new(DefinitionObservation) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleArn != nil { + in, out := &in.ExecutionRoleArn, &out.ExecutionRoleArn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.NotificationType != nil { + in, out := &in.NotificationType, &out.NotificationType + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Subscriber != nil { + in, out := &in.Subscriber, &out.Subscriber + *out = make([]SubscriberObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetActionObservation. +func (in *BudgetActionObservation) DeepCopy() *BudgetActionObservation { + if in == nil { + return nil + } + out := new(BudgetActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetActionParameters) DeepCopyInto(out *BudgetActionParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.ActionThreshold != nil { + in, out := &in.ActionThreshold, &out.ActionThreshold + *out = new(ActionThresholdParameters) + (*in).DeepCopyInto(*out) + } + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } + if in.ApprovalModel != nil { + in, out := &in.ApprovalModel, &out.ApprovalModel + *out = new(string) + **out = **in + } + if in.BudgetName != nil { + in, out := &in.BudgetName, &out.BudgetName + *out = new(string) + **out = **in + } + if in.BudgetNameRef != nil { + in, out := &in.BudgetNameRef, &out.BudgetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BudgetNameSelector != nil { + in, out := &in.BudgetNameSelector, &out.BudgetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = new(DefinitionParameters) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleArn != nil { + in, out := &in.ExecutionRoleArn, &out.ExecutionRoleArn + *out = new(string) + **out = **in + } + if in.ExecutionRoleArnRef != nil { + in, out := &in.ExecutionRoleArnRef, &out.ExecutionRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleArnSelector != nil { + in, out := &in.ExecutionRoleArnSelector, &out.ExecutionRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NotificationType != nil { + in, out := &in.NotificationType, &out.NotificationType + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Subscriber != nil { + in, out := &in.Subscriber, &out.Subscriber + *out = make([]SubscriberParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetActionParameters. +func (in *BudgetActionParameters) DeepCopy() *BudgetActionParameters { + if in == nil { + return nil + } + out := new(BudgetActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetActionSpec) DeepCopyInto(out *BudgetActionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetActionSpec. +func (in *BudgetActionSpec) DeepCopy() *BudgetActionSpec { + if in == nil { + return nil + } + out := new(BudgetActionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetActionStatus) DeepCopyInto(out *BudgetActionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetActionStatus. +func (in *BudgetActionStatus) DeepCopy() *BudgetActionStatus { + if in == nil { + return nil + } + out := new(BudgetActionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetInitParameters) DeepCopyInto(out *BudgetInitParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.AutoAdjustData != nil { + in, out := &in.AutoAdjustData, &out.AutoAdjustData + *out = new(AutoAdjustDataInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BudgetType != nil { + in, out := &in.BudgetType, &out.BudgetType + *out = new(string) + **out = **in + } + if in.CostFilter != nil { + in, out := &in.CostFilter, &out.CostFilter + *out = make([]CostFilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CostTypes != nil { + in, out := &in.CostTypes, &out.CostTypes + *out = new(CostTypesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LimitAmount != nil { + in, out := &in.LimitAmount, &out.LimitAmount + *out = new(string) + **out = **in + } + if in.LimitUnit != nil { + in, out := &in.LimitUnit, &out.LimitUnit + *out = new(string) + **out = **in + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = make([]NotificationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlannedLimit != nil { + in, out := &in.PlannedLimit, &out.PlannedLimit + *out = make([]PlannedLimitInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TimePeriodEnd != nil { + in, out := &in.TimePeriodEnd, &out.TimePeriodEnd + *out = new(string) + **out = **in + } + if in.TimePeriodStart != nil { + in, out := &in.TimePeriodStart, &out.TimePeriodStart + *out = new(string) + **out = **in + } + if in.TimeUnit != nil { + in, out := &in.TimeUnit, &out.TimeUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetInitParameters. +func (in *BudgetInitParameters) DeepCopy() *BudgetInitParameters { + if in == nil { + return nil + } + out := new(BudgetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetList) DeepCopyInto(out *BudgetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Budget, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetList. +func (in *BudgetList) DeepCopy() *BudgetList { + if in == nil { + return nil + } + out := new(BudgetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BudgetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetObservation) DeepCopyInto(out *BudgetObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoAdjustData != nil { + in, out := &in.AutoAdjustData, &out.AutoAdjustData + *out = new(AutoAdjustDataObservation) + (*in).DeepCopyInto(*out) + } + if in.BudgetType != nil { + in, out := &in.BudgetType, &out.BudgetType + *out = new(string) + **out = **in + } + if in.CostFilter != nil { + in, out := &in.CostFilter, &out.CostFilter + *out = make([]CostFilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CostTypes != nil { + in, out := &in.CostTypes, &out.CostTypes + *out = new(CostTypesObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LimitAmount != nil { + in, out := &in.LimitAmount, &out.LimitAmount + *out = new(string) + **out = **in + } + if in.LimitUnit != nil { + in, out := &in.LimitUnit, &out.LimitUnit + *out = new(string) + **out = **in + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = make([]NotificationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlannedLimit != nil { + in, out := &in.PlannedLimit, &out.PlannedLimit + *out = make([]PlannedLimitObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TimePeriodEnd != nil { + in, out := &in.TimePeriodEnd, &out.TimePeriodEnd + *out = new(string) + **out = **in + } + if in.TimePeriodStart != nil { + in, out := &in.TimePeriodStart, &out.TimePeriodStart + *out = new(string) + **out = **in + } + if in.TimeUnit != nil { + in, out := &in.TimeUnit, &out.TimeUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetObservation. +func (in *BudgetObservation) DeepCopy() *BudgetObservation { + if in == nil { + return nil + } + out := new(BudgetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetParameters) DeepCopyInto(out *BudgetParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.AutoAdjustData != nil { + in, out := &in.AutoAdjustData, &out.AutoAdjustData + *out = new(AutoAdjustDataParameters) + (*in).DeepCopyInto(*out) + } + if in.BudgetType != nil { + in, out := &in.BudgetType, &out.BudgetType + *out = new(string) + **out = **in + } + if in.CostFilter != nil { + in, out := &in.CostFilter, &out.CostFilter + *out = make([]CostFilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CostTypes != nil { + in, out := &in.CostTypes, &out.CostTypes + *out = new(CostTypesParameters) + (*in).DeepCopyInto(*out) + } + if in.LimitAmount != nil { + in, out := &in.LimitAmount, &out.LimitAmount + *out = new(string) + **out = **in + } + if in.LimitUnit != nil { + in, out := &in.LimitUnit, &out.LimitUnit + *out = new(string) + **out = **in + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = make([]NotificationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlannedLimit != nil { + in, out := &in.PlannedLimit, &out.PlannedLimit + *out = make([]PlannedLimitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TimePeriodEnd != nil { + in, out := &in.TimePeriodEnd, &out.TimePeriodEnd + *out = new(string) + **out = **in + } + if in.TimePeriodStart != nil { + in, out := &in.TimePeriodStart, &out.TimePeriodStart + *out = new(string) + **out = **in + } + if in.TimeUnit != nil { + in, out := &in.TimeUnit, &out.TimeUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetParameters. +func (in *BudgetParameters) DeepCopy() *BudgetParameters { + if in == nil { + return nil + } + out := new(BudgetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSpec) DeepCopyInto(out *BudgetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSpec. +func (in *BudgetSpec) DeepCopy() *BudgetSpec { + if in == nil { + return nil + } + out := new(BudgetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetStatus) DeepCopyInto(out *BudgetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetStatus. +func (in *BudgetStatus) DeepCopy() *BudgetStatus { + if in == nil { + return nil + } + out := new(BudgetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CostFilterInitParameters) DeepCopyInto(out *CostFilterInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CostFilterInitParameters. +func (in *CostFilterInitParameters) DeepCopy() *CostFilterInitParameters { + if in == nil { + return nil + } + out := new(CostFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CostFilterObservation) DeepCopyInto(out *CostFilterObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CostFilterObservation. +func (in *CostFilterObservation) DeepCopy() *CostFilterObservation { + if in == nil { + return nil + } + out := new(CostFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CostFilterParameters) DeepCopyInto(out *CostFilterParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CostFilterParameters. +func (in *CostFilterParameters) DeepCopy() *CostFilterParameters { + if in == nil { + return nil + } + out := new(CostFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CostTypesInitParameters) DeepCopyInto(out *CostTypesInitParameters) { + *out = *in + if in.IncludeCredit != nil { + in, out := &in.IncludeCredit, &out.IncludeCredit + *out = new(bool) + **out = **in + } + if in.IncludeDiscount != nil { + in, out := &in.IncludeDiscount, &out.IncludeDiscount + *out = new(bool) + **out = **in + } + if in.IncludeOtherSubscription != nil { + in, out := &in.IncludeOtherSubscription, &out.IncludeOtherSubscription + *out = new(bool) + **out = **in + } + if in.IncludeRecurring != nil { + in, out := &in.IncludeRecurring, &out.IncludeRecurring + *out = new(bool) + **out = **in + } + if in.IncludeRefund != nil { + in, out := &in.IncludeRefund, &out.IncludeRefund + *out = new(bool) + **out = **in + } + if in.IncludeSubscription != nil { + in, out := &in.IncludeSubscription, &out.IncludeSubscription + *out = new(bool) + **out = **in + } + if in.IncludeSupport != nil { + in, out := &in.IncludeSupport, &out.IncludeSupport + *out = new(bool) + **out = **in + } + if in.IncludeTax != nil { + in, out := &in.IncludeTax, &out.IncludeTax + *out = new(bool) + **out = **in + } + if in.IncludeUpfront != nil { + in, out := &in.IncludeUpfront, &out.IncludeUpfront + *out = new(bool) + **out = **in + } + if in.UseAmortized != nil { + in, out := &in.UseAmortized, &out.UseAmortized + *out = new(bool) + **out = **in + } + if in.UseBlended != nil { + in, out := &in.UseBlended, &out.UseBlended + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CostTypesInitParameters. +func (in *CostTypesInitParameters) DeepCopy() *CostTypesInitParameters { + if in == nil { + return nil + } + out := new(CostTypesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CostTypesObservation) DeepCopyInto(out *CostTypesObservation) { + *out = *in + if in.IncludeCredit != nil { + in, out := &in.IncludeCredit, &out.IncludeCredit + *out = new(bool) + **out = **in + } + if in.IncludeDiscount != nil { + in, out := &in.IncludeDiscount, &out.IncludeDiscount + *out = new(bool) + **out = **in + } + if in.IncludeOtherSubscription != nil { + in, out := &in.IncludeOtherSubscription, &out.IncludeOtherSubscription + *out = new(bool) + **out = **in + } + if in.IncludeRecurring != nil { + in, out := &in.IncludeRecurring, &out.IncludeRecurring + *out = new(bool) + **out = **in + } + if in.IncludeRefund != nil { + in, out := &in.IncludeRefund, &out.IncludeRefund + *out = new(bool) + **out = **in + } + if in.IncludeSubscription != nil { + in, out := &in.IncludeSubscription, &out.IncludeSubscription + *out = new(bool) + **out = **in + } + if in.IncludeSupport != nil { + in, out := &in.IncludeSupport, &out.IncludeSupport + *out = new(bool) + **out = **in + } + if in.IncludeTax != nil { + in, out := &in.IncludeTax, &out.IncludeTax + *out = new(bool) + **out = **in + } + if in.IncludeUpfront != nil { + in, out := &in.IncludeUpfront, &out.IncludeUpfront + *out = new(bool) + **out = **in + } + if in.UseAmortized != nil { + in, out := &in.UseAmortized, &out.UseAmortized + *out = new(bool) + **out = **in + } + if in.UseBlended != nil { + in, out := &in.UseBlended, &out.UseBlended + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CostTypesObservation. +func (in *CostTypesObservation) DeepCopy() *CostTypesObservation { + if in == nil { + return nil + } + out := new(CostTypesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CostTypesParameters) DeepCopyInto(out *CostTypesParameters) { + *out = *in + if in.IncludeCredit != nil { + in, out := &in.IncludeCredit, &out.IncludeCredit + *out = new(bool) + **out = **in + } + if in.IncludeDiscount != nil { + in, out := &in.IncludeDiscount, &out.IncludeDiscount + *out = new(bool) + **out = **in + } + if in.IncludeOtherSubscription != nil { + in, out := &in.IncludeOtherSubscription, &out.IncludeOtherSubscription + *out = new(bool) + **out = **in + } + if in.IncludeRecurring != nil { + in, out := &in.IncludeRecurring, &out.IncludeRecurring + *out = new(bool) + **out = **in + } + if in.IncludeRefund != nil { + in, out := &in.IncludeRefund, &out.IncludeRefund + *out = new(bool) + **out = **in + } + if in.IncludeSubscription != nil { + in, out := &in.IncludeSubscription, &out.IncludeSubscription + *out = new(bool) + **out = **in + } + if in.IncludeSupport != nil { + in, out := &in.IncludeSupport, &out.IncludeSupport + *out = new(bool) + **out = **in + } + if in.IncludeTax != nil { + in, out := &in.IncludeTax, &out.IncludeTax + *out = new(bool) + **out = **in + } + if in.IncludeUpfront != nil { + in, out := &in.IncludeUpfront, &out.IncludeUpfront + *out = new(bool) + **out = **in + } + if in.UseAmortized != nil { + in, out := &in.UseAmortized, &out.UseAmortized + *out = new(bool) + **out = **in + } + if in.UseBlended != nil { + in, out := &in.UseBlended, &out.UseBlended + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CostTypesParameters. +func (in *CostTypesParameters) DeepCopy() *CostTypesParameters { + if in == nil { + return nil + } + out := new(CostTypesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefinitionInitParameters) DeepCopyInto(out *DefinitionInitParameters) { + *out = *in + if in.IAMActionDefinition != nil { + in, out := &in.IAMActionDefinition, &out.IAMActionDefinition + *out = new(IAMActionDefinitionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ScpActionDefinition != nil { + in, out := &in.ScpActionDefinition, &out.ScpActionDefinition + *out = new(ScpActionDefinitionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SsmActionDefinition != nil { + in, out := &in.SsmActionDefinition, &out.SsmActionDefinition + *out = new(SsmActionDefinitionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefinitionInitParameters. +func (in *DefinitionInitParameters) DeepCopy() *DefinitionInitParameters { + if in == nil { + return nil + } + out := new(DefinitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefinitionObservation) DeepCopyInto(out *DefinitionObservation) { + *out = *in + if in.IAMActionDefinition != nil { + in, out := &in.IAMActionDefinition, &out.IAMActionDefinition + *out = new(IAMActionDefinitionObservation) + (*in).DeepCopyInto(*out) + } + if in.ScpActionDefinition != nil { + in, out := &in.ScpActionDefinition, &out.ScpActionDefinition + *out = new(ScpActionDefinitionObservation) + (*in).DeepCopyInto(*out) + } + if in.SsmActionDefinition != nil { + in, out := &in.SsmActionDefinition, &out.SsmActionDefinition + *out = new(SsmActionDefinitionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefinitionObservation. +func (in *DefinitionObservation) DeepCopy() *DefinitionObservation { + if in == nil { + return nil + } + out := new(DefinitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefinitionParameters) DeepCopyInto(out *DefinitionParameters) { + *out = *in + if in.IAMActionDefinition != nil { + in, out := &in.IAMActionDefinition, &out.IAMActionDefinition + *out = new(IAMActionDefinitionParameters) + (*in).DeepCopyInto(*out) + } + if in.ScpActionDefinition != nil { + in, out := &in.ScpActionDefinition, &out.ScpActionDefinition + *out = new(ScpActionDefinitionParameters) + (*in).DeepCopyInto(*out) + } + if in.SsmActionDefinition != nil { + in, out := &in.SsmActionDefinition, &out.SsmActionDefinition + *out = new(SsmActionDefinitionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefinitionParameters. +func (in *DefinitionParameters) DeepCopy() *DefinitionParameters { + if in == nil { + return nil + } + out := new(DefinitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HistoricalOptionsInitParameters) DeepCopyInto(out *HistoricalOptionsInitParameters) { + *out = *in + if in.BudgetAdjustmentPeriod != nil { + in, out := &in.BudgetAdjustmentPeriod, &out.BudgetAdjustmentPeriod + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HistoricalOptionsInitParameters. +func (in *HistoricalOptionsInitParameters) DeepCopy() *HistoricalOptionsInitParameters { + if in == nil { + return nil + } + out := new(HistoricalOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HistoricalOptionsObservation) DeepCopyInto(out *HistoricalOptionsObservation) { + *out = *in + if in.BudgetAdjustmentPeriod != nil { + in, out := &in.BudgetAdjustmentPeriod, &out.BudgetAdjustmentPeriod + *out = new(float64) + **out = **in + } + if in.LookbackAvailablePeriods != nil { + in, out := &in.LookbackAvailablePeriods, &out.LookbackAvailablePeriods + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HistoricalOptionsObservation. +func (in *HistoricalOptionsObservation) DeepCopy() *HistoricalOptionsObservation { + if in == nil { + return nil + } + out := new(HistoricalOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HistoricalOptionsParameters) DeepCopyInto(out *HistoricalOptionsParameters) { + *out = *in + if in.BudgetAdjustmentPeriod != nil { + in, out := &in.BudgetAdjustmentPeriod, &out.BudgetAdjustmentPeriod + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HistoricalOptionsParameters. +func (in *HistoricalOptionsParameters) DeepCopy() *HistoricalOptionsParameters { + if in == nil { + return nil + } + out := new(HistoricalOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMActionDefinitionInitParameters) DeepCopyInto(out *IAMActionDefinitionInitParameters) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PolicyArn != nil { + in, out := &in.PolicyArn, &out.PolicyArn + *out = new(string) + **out = **in + } + if in.PolicyArnRef != nil { + in, out := &in.PolicyArnRef, &out.PolicyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PolicyArnSelector != nil { + in, out := &in.PolicyArnSelector, &out.PolicyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMActionDefinitionInitParameters. +func (in *IAMActionDefinitionInitParameters) DeepCopy() *IAMActionDefinitionInitParameters { + if in == nil { + return nil + } + out := new(IAMActionDefinitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMActionDefinitionObservation) DeepCopyInto(out *IAMActionDefinitionObservation) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PolicyArn != nil { + in, out := &in.PolicyArn, &out.PolicyArn + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMActionDefinitionObservation. +func (in *IAMActionDefinitionObservation) DeepCopy() *IAMActionDefinitionObservation { + if in == nil { + return nil + } + out := new(IAMActionDefinitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMActionDefinitionParameters) DeepCopyInto(out *IAMActionDefinitionParameters) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PolicyArn != nil { + in, out := &in.PolicyArn, &out.PolicyArn + *out = new(string) + **out = **in + } + if in.PolicyArnRef != nil { + in, out := &in.PolicyArnRef, &out.PolicyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PolicyArnSelector != nil { + in, out := &in.PolicyArnSelector, &out.PolicyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMActionDefinitionParameters. +func (in *IAMActionDefinitionParameters) DeepCopy() *IAMActionDefinitionParameters { + if in == nil { + return nil + } + out := new(IAMActionDefinitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationInitParameters) DeepCopyInto(out *NotificationInitParameters) { + *out = *in + if in.ComparisonOperator != nil { + in, out := &in.ComparisonOperator, &out.ComparisonOperator + *out = new(string) + **out = **in + } + if in.NotificationType != nil { + in, out := &in.NotificationType, &out.NotificationType + *out = new(string) + **out = **in + } + if in.SubscriberEmailAddresses != nil { + in, out := &in.SubscriberEmailAddresses, &out.SubscriberEmailAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubscriberSnsTopicArns != nil { + in, out := &in.SubscriberSnsTopicArns, &out.SubscriberSnsTopicArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.ThresholdType != nil { + in, out := &in.ThresholdType, &out.ThresholdType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationInitParameters. +func (in *NotificationInitParameters) DeepCopy() *NotificationInitParameters { + if in == nil { + return nil + } + out := new(NotificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationObservation) DeepCopyInto(out *NotificationObservation) { + *out = *in + if in.ComparisonOperator != nil { + in, out := &in.ComparisonOperator, &out.ComparisonOperator + *out = new(string) + **out = **in + } + if in.NotificationType != nil { + in, out := &in.NotificationType, &out.NotificationType + *out = new(string) + **out = **in + } + if in.SubscriberEmailAddresses != nil { + in, out := &in.SubscriberEmailAddresses, &out.SubscriberEmailAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubscriberSnsTopicArns != nil { + in, out := &in.SubscriberSnsTopicArns, &out.SubscriberSnsTopicArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.ThresholdType != nil { + in, out := &in.ThresholdType, &out.ThresholdType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationObservation. +func (in *NotificationObservation) DeepCopy() *NotificationObservation { + if in == nil { + return nil + } + out := new(NotificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationParameters) DeepCopyInto(out *NotificationParameters) { + *out = *in + if in.ComparisonOperator != nil { + in, out := &in.ComparisonOperator, &out.ComparisonOperator + *out = new(string) + **out = **in + } + if in.NotificationType != nil { + in, out := &in.NotificationType, &out.NotificationType + *out = new(string) + **out = **in + } + if in.SubscriberEmailAddresses != nil { + in, out := &in.SubscriberEmailAddresses, &out.SubscriberEmailAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubscriberSnsTopicArns != nil { + in, out := &in.SubscriberSnsTopicArns, &out.SubscriberSnsTopicArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.ThresholdType != nil { + in, out := &in.ThresholdType, &out.ThresholdType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationParameters. +func (in *NotificationParameters) DeepCopy() *NotificationParameters { + if in == nil { + return nil + } + out := new(NotificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlannedLimitInitParameters) DeepCopyInto(out *PlannedLimitInitParameters) { + *out = *in + if in.Amount != nil { + in, out := &in.Amount, &out.Amount + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlannedLimitInitParameters. +func (in *PlannedLimitInitParameters) DeepCopy() *PlannedLimitInitParameters { + if in == nil { + return nil + } + out := new(PlannedLimitInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlannedLimitObservation) DeepCopyInto(out *PlannedLimitObservation) { + *out = *in + if in.Amount != nil { + in, out := &in.Amount, &out.Amount + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlannedLimitObservation. +func (in *PlannedLimitObservation) DeepCopy() *PlannedLimitObservation { + if in == nil { + return nil + } + out := new(PlannedLimitObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlannedLimitParameters) DeepCopyInto(out *PlannedLimitParameters) { + *out = *in + if in.Amount != nil { + in, out := &in.Amount, &out.Amount + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlannedLimitParameters. +func (in *PlannedLimitParameters) DeepCopy() *PlannedLimitParameters { + if in == nil { + return nil + } + out := new(PlannedLimitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScpActionDefinitionInitParameters) DeepCopyInto(out *ScpActionDefinitionInitParameters) { + *out = *in + if in.PolicyID != nil { + in, out := &in.PolicyID, &out.PolicyID + *out = new(string) + **out = **in + } + if in.TargetIds != nil { + in, out := &in.TargetIds, &out.TargetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScpActionDefinitionInitParameters. +func (in *ScpActionDefinitionInitParameters) DeepCopy() *ScpActionDefinitionInitParameters { + if in == nil { + return nil + } + out := new(ScpActionDefinitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScpActionDefinitionObservation) DeepCopyInto(out *ScpActionDefinitionObservation) { + *out = *in + if in.PolicyID != nil { + in, out := &in.PolicyID, &out.PolicyID + *out = new(string) + **out = **in + } + if in.TargetIds != nil { + in, out := &in.TargetIds, &out.TargetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScpActionDefinitionObservation. +func (in *ScpActionDefinitionObservation) DeepCopy() *ScpActionDefinitionObservation { + if in == nil { + return nil + } + out := new(ScpActionDefinitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScpActionDefinitionParameters) DeepCopyInto(out *ScpActionDefinitionParameters) { + *out = *in + if in.PolicyID != nil { + in, out := &in.PolicyID, &out.PolicyID + *out = new(string) + **out = **in + } + if in.TargetIds != nil { + in, out := &in.TargetIds, &out.TargetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScpActionDefinitionParameters. +func (in *ScpActionDefinitionParameters) DeepCopy() *ScpActionDefinitionParameters { + if in == nil { + return nil + } + out := new(ScpActionDefinitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SsmActionDefinitionInitParameters) DeepCopyInto(out *SsmActionDefinitionInitParameters) { + *out = *in + if in.ActionSubType != nil { + in, out := &in.ActionSubType, &out.ActionSubType + *out = new(string) + **out = **in + } + if in.InstanceIds != nil { + in, out := &in.InstanceIds, &out.InstanceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SsmActionDefinitionInitParameters. +func (in *SsmActionDefinitionInitParameters) DeepCopy() *SsmActionDefinitionInitParameters { + if in == nil { + return nil + } + out := new(SsmActionDefinitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SsmActionDefinitionObservation) DeepCopyInto(out *SsmActionDefinitionObservation) { + *out = *in + if in.ActionSubType != nil { + in, out := &in.ActionSubType, &out.ActionSubType + *out = new(string) + **out = **in + } + if in.InstanceIds != nil { + in, out := &in.InstanceIds, &out.InstanceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SsmActionDefinitionObservation. +func (in *SsmActionDefinitionObservation) DeepCopy() *SsmActionDefinitionObservation { + if in == nil { + return nil + } + out := new(SsmActionDefinitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SsmActionDefinitionParameters) DeepCopyInto(out *SsmActionDefinitionParameters) { + *out = *in + if in.ActionSubType != nil { + in, out := &in.ActionSubType, &out.ActionSubType + *out = new(string) + **out = **in + } + if in.InstanceIds != nil { + in, out := &in.InstanceIds, &out.InstanceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SsmActionDefinitionParameters. +func (in *SsmActionDefinitionParameters) DeepCopy() *SsmActionDefinitionParameters { + if in == nil { + return nil + } + out := new(SsmActionDefinitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriberInitParameters) DeepCopyInto(out *SubscriberInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.SubscriptionType != nil { + in, out := &in.SubscriptionType, &out.SubscriptionType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriberInitParameters. +func (in *SubscriberInitParameters) DeepCopy() *SubscriberInitParameters { + if in == nil { + return nil + } + out := new(SubscriberInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriberObservation) DeepCopyInto(out *SubscriberObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.SubscriptionType != nil { + in, out := &in.SubscriptionType, &out.SubscriptionType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriberObservation. +func (in *SubscriberObservation) DeepCopy() *SubscriberObservation { + if in == nil { + return nil + } + out := new(SubscriberObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriberParameters) DeepCopyInto(out *SubscriberParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.SubscriptionType != nil { + in, out := &in.SubscriptionType, &out.SubscriptionType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriberParameters. +func (in *SubscriberParameters) DeepCopy() *SubscriberParameters { + if in == nil { + return nil + } + out := new(SubscriberParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/budgets/v1beta2/zz_generated.managed.go b/apis/budgets/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..48b99e1f81 --- /dev/null +++ b/apis/budgets/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Budget. +func (mg *Budget) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Budget. +func (mg *Budget) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Budget. +func (mg *Budget) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Budget. +func (mg *Budget) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Budget. +func (mg *Budget) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Budget. +func (mg *Budget) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Budget. +func (mg *Budget) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Budget. +func (mg *Budget) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Budget. +func (mg *Budget) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Budget. +func (mg *Budget) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Budget. +func (mg *Budget) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Budget. +func (mg *Budget) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BudgetAction. +func (mg *BudgetAction) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BudgetAction. +func (mg *BudgetAction) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BudgetAction. +func (mg *BudgetAction) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BudgetAction. +func (mg *BudgetAction) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BudgetAction. +func (mg *BudgetAction) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BudgetAction. +func (mg *BudgetAction) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BudgetAction. +func (mg *BudgetAction) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BudgetAction. +func (mg *BudgetAction) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BudgetAction. +func (mg *BudgetAction) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BudgetAction. +func (mg *BudgetAction) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BudgetAction. +func (mg *BudgetAction) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BudgetAction. +func (mg *BudgetAction) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/budgets/v1beta2/zz_generated.managedlist.go b/apis/budgets/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..5aa5739e76 --- /dev/null +++ b/apis/budgets/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this BudgetActionList. +func (l *BudgetActionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BudgetList. +func (l *BudgetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/budgets/v1beta2/zz_generated.resolvers.go b/apis/budgets/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..5fa7da7225 --- /dev/null +++ b/apis/budgets/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,153 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *BudgetAction) ResolveReferences( // ResolveReferences of this BudgetAction. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("budgets.aws.upbound.io", "v1beta2", "Budget", "BudgetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.BudgetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.BudgetNameRef, + Selector: mg.Spec.ForProvider.BudgetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.BudgetName") + } + mg.Spec.ForProvider.BudgetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BudgetNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Definition != nil { + if mg.Spec.ForProvider.Definition.IAMActionDefinition != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Policy", "PolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Definition.IAMActionDefinition.PolicyArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Definition.IAMActionDefinition.PolicyArnRef, + Selector: mg.Spec.ForProvider.Definition.IAMActionDefinition.PolicyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Definition.IAMActionDefinition.PolicyArn") + } + mg.Spec.ForProvider.Definition.IAMActionDefinition.PolicyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Definition.IAMActionDefinition.PolicyArnRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ExecutionRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ExecutionRoleArnRef, + Selector: mg.Spec.ForProvider.ExecutionRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ExecutionRoleArn") + } + mg.Spec.ForProvider.ExecutionRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ExecutionRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("budgets.aws.upbound.io", "v1beta2", "Budget", "BudgetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.BudgetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.BudgetNameRef, + Selector: mg.Spec.InitProvider.BudgetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.BudgetName") + } + mg.Spec.InitProvider.BudgetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BudgetNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Definition != nil { + if mg.Spec.InitProvider.Definition.IAMActionDefinition != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Policy", "PolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Definition.IAMActionDefinition.PolicyArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Definition.IAMActionDefinition.PolicyArnRef, + Selector: mg.Spec.InitProvider.Definition.IAMActionDefinition.PolicyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Definition.IAMActionDefinition.PolicyArn") + } + mg.Spec.InitProvider.Definition.IAMActionDefinition.PolicyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Definition.IAMActionDefinition.PolicyArnRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ExecutionRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ExecutionRoleArnRef, + Selector: mg.Spec.InitProvider.ExecutionRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ExecutionRoleArn") + } + mg.Spec.InitProvider.ExecutionRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ExecutionRoleArnRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/budgets/v1beta2/zz_groupversion_info.go b/apis/budgets/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..0aa5a415ea --- /dev/null +++ b/apis/budgets/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=budgets.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "budgets.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/chime/v1beta1/zz_generated.conversion_hubs.go b/apis/chime/v1beta1/zz_generated.conversion_hubs.go index 1d3802661f..979287745b 100755 --- a/apis/chime/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/chime/v1beta1/zz_generated.conversion_hubs.go @@ -18,9 +18,6 @@ func (tr *VoiceConnectorLogging) Hub() {} // Hub marks this type as a conversion hub. func (tr *VoiceConnectorOrigination) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *VoiceConnectorStreaming) Hub() {} - // Hub marks this type as a conversion hub. func (tr *VoiceConnectorTermination) Hub() {} diff --git a/apis/chime/v1beta1/zz_generated.conversion_spokes.go b/apis/chime/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..c31c235d3a --- /dev/null +++ b/apis/chime/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this VoiceConnectorStreaming to the hub type. +func (tr *VoiceConnectorStreaming) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VoiceConnectorStreaming type. +func (tr *VoiceConnectorStreaming) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/chime/v1beta2/zz_generated.conversion_hubs.go b/apis/chime/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..117c8645e3 --- /dev/null +++ b/apis/chime/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *VoiceConnectorStreaming) Hub() {} diff --git a/apis/chime/v1beta2/zz_generated.deepcopy.go b/apis/chime/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..185916aad2 --- /dev/null +++ b/apis/chime/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,351 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaInsightsConfigurationInitParameters) DeepCopyInto(out *MediaInsightsConfigurationInitParameters) { + *out = *in + if in.ConfigurationArn != nil { + in, out := &in.ConfigurationArn, &out.ConfigurationArn + *out = new(string) + **out = **in + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaInsightsConfigurationInitParameters. +func (in *MediaInsightsConfigurationInitParameters) DeepCopy() *MediaInsightsConfigurationInitParameters { + if in == nil { + return nil + } + out := new(MediaInsightsConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaInsightsConfigurationObservation) DeepCopyInto(out *MediaInsightsConfigurationObservation) { + *out = *in + if in.ConfigurationArn != nil { + in, out := &in.ConfigurationArn, &out.ConfigurationArn + *out = new(string) + **out = **in + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaInsightsConfigurationObservation. +func (in *MediaInsightsConfigurationObservation) DeepCopy() *MediaInsightsConfigurationObservation { + if in == nil { + return nil + } + out := new(MediaInsightsConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaInsightsConfigurationParameters) DeepCopyInto(out *MediaInsightsConfigurationParameters) { + *out = *in + if in.ConfigurationArn != nil { + in, out := &in.ConfigurationArn, &out.ConfigurationArn + *out = new(string) + **out = **in + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaInsightsConfigurationParameters. +func (in *MediaInsightsConfigurationParameters) DeepCopy() *MediaInsightsConfigurationParameters { + if in == nil { + return nil + } + out := new(MediaInsightsConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VoiceConnectorStreaming) DeepCopyInto(out *VoiceConnectorStreaming) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VoiceConnectorStreaming. +func (in *VoiceConnectorStreaming) DeepCopy() *VoiceConnectorStreaming { + if in == nil { + return nil + } + out := new(VoiceConnectorStreaming) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VoiceConnectorStreaming) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VoiceConnectorStreamingInitParameters) DeepCopyInto(out *VoiceConnectorStreamingInitParameters) { + *out = *in + if in.DataRetention != nil { + in, out := &in.DataRetention, &out.DataRetention + *out = new(float64) + **out = **in + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.MediaInsightsConfiguration != nil { + in, out := &in.MediaInsightsConfiguration, &out.MediaInsightsConfiguration + *out = new(MediaInsightsConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StreamingNotificationTargets != nil { + in, out := &in.StreamingNotificationTargets, &out.StreamingNotificationTargets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VoiceConnectorID != nil { + in, out := &in.VoiceConnectorID, &out.VoiceConnectorID + *out = new(string) + **out = **in + } + if in.VoiceConnectorIDRef != nil { + in, out := &in.VoiceConnectorIDRef, &out.VoiceConnectorIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VoiceConnectorIDSelector != nil { + in, out := &in.VoiceConnectorIDSelector, &out.VoiceConnectorIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VoiceConnectorStreamingInitParameters. +func (in *VoiceConnectorStreamingInitParameters) DeepCopy() *VoiceConnectorStreamingInitParameters { + if in == nil { + return nil + } + out := new(VoiceConnectorStreamingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VoiceConnectorStreamingList) DeepCopyInto(out *VoiceConnectorStreamingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VoiceConnectorStreaming, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VoiceConnectorStreamingList. +func (in *VoiceConnectorStreamingList) DeepCopy() *VoiceConnectorStreamingList { + if in == nil { + return nil + } + out := new(VoiceConnectorStreamingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VoiceConnectorStreamingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VoiceConnectorStreamingObservation) DeepCopyInto(out *VoiceConnectorStreamingObservation) { + *out = *in + if in.DataRetention != nil { + in, out := &in.DataRetention, &out.DataRetention + *out = new(float64) + **out = **in + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MediaInsightsConfiguration != nil { + in, out := &in.MediaInsightsConfiguration, &out.MediaInsightsConfiguration + *out = new(MediaInsightsConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.StreamingNotificationTargets != nil { + in, out := &in.StreamingNotificationTargets, &out.StreamingNotificationTargets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VoiceConnectorID != nil { + in, out := &in.VoiceConnectorID, &out.VoiceConnectorID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VoiceConnectorStreamingObservation. +func (in *VoiceConnectorStreamingObservation) DeepCopy() *VoiceConnectorStreamingObservation { + if in == nil { + return nil + } + out := new(VoiceConnectorStreamingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VoiceConnectorStreamingParameters) DeepCopyInto(out *VoiceConnectorStreamingParameters) { + *out = *in + if in.DataRetention != nil { + in, out := &in.DataRetention, &out.DataRetention + *out = new(float64) + **out = **in + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.MediaInsightsConfiguration != nil { + in, out := &in.MediaInsightsConfiguration, &out.MediaInsightsConfiguration + *out = new(MediaInsightsConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.StreamingNotificationTargets != nil { + in, out := &in.StreamingNotificationTargets, &out.StreamingNotificationTargets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VoiceConnectorID != nil { + in, out := &in.VoiceConnectorID, &out.VoiceConnectorID + *out = new(string) + **out = **in + } + if in.VoiceConnectorIDRef != nil { + in, out := &in.VoiceConnectorIDRef, &out.VoiceConnectorIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VoiceConnectorIDSelector != nil { + in, out := &in.VoiceConnectorIDSelector, &out.VoiceConnectorIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VoiceConnectorStreamingParameters. +func (in *VoiceConnectorStreamingParameters) DeepCopy() *VoiceConnectorStreamingParameters { + if in == nil { + return nil + } + out := new(VoiceConnectorStreamingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VoiceConnectorStreamingSpec) DeepCopyInto(out *VoiceConnectorStreamingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VoiceConnectorStreamingSpec. +func (in *VoiceConnectorStreamingSpec) DeepCopy() *VoiceConnectorStreamingSpec { + if in == nil { + return nil + } + out := new(VoiceConnectorStreamingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VoiceConnectorStreamingStatus) DeepCopyInto(out *VoiceConnectorStreamingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VoiceConnectorStreamingStatus. +func (in *VoiceConnectorStreamingStatus) DeepCopy() *VoiceConnectorStreamingStatus { + if in == nil { + return nil + } + out := new(VoiceConnectorStreamingStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/chime/v1beta2/zz_generated.managed.go b/apis/chime/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..e15e0fbcdc --- /dev/null +++ b/apis/chime/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this VoiceConnectorStreaming. +func (mg *VoiceConnectorStreaming) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VoiceConnectorStreaming. +func (mg *VoiceConnectorStreaming) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VoiceConnectorStreaming. +func (mg *VoiceConnectorStreaming) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VoiceConnectorStreaming. +func (mg *VoiceConnectorStreaming) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VoiceConnectorStreaming. +func (mg *VoiceConnectorStreaming) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VoiceConnectorStreaming. +func (mg *VoiceConnectorStreaming) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VoiceConnectorStreaming. +func (mg *VoiceConnectorStreaming) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VoiceConnectorStreaming. +func (mg *VoiceConnectorStreaming) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VoiceConnectorStreaming. +func (mg *VoiceConnectorStreaming) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VoiceConnectorStreaming. +func (mg *VoiceConnectorStreaming) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VoiceConnectorStreaming. +func (mg *VoiceConnectorStreaming) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VoiceConnectorStreaming. +func (mg *VoiceConnectorStreaming) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/chime/v1beta2/zz_generated.managedlist.go b/apis/chime/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..2d659f32a0 --- /dev/null +++ b/apis/chime/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this VoiceConnectorStreamingList. +func (l *VoiceConnectorStreamingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/chime/v1beta2/zz_generated.resolvers.go b/apis/chime/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..ad40be0d0f --- /dev/null +++ b/apis/chime/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *VoiceConnectorStreaming) ResolveReferences( // ResolveReferences of this VoiceConnectorStreaming. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("chime.aws.upbound.io", "v1beta1", "VoiceConnector", "VoiceConnectorList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VoiceConnectorID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VoiceConnectorIDRef, + Selector: mg.Spec.ForProvider.VoiceConnectorIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VoiceConnectorID") + } + mg.Spec.ForProvider.VoiceConnectorID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VoiceConnectorIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("chime.aws.upbound.io", "v1beta1", "VoiceConnector", "VoiceConnectorList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VoiceConnectorID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VoiceConnectorIDRef, + Selector: mg.Spec.InitProvider.VoiceConnectorIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VoiceConnectorID") + } + mg.Spec.InitProvider.VoiceConnectorID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VoiceConnectorIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/chime/v1beta2/zz_groupversion_info.go b/apis/chime/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..6e5118016f --- /dev/null +++ b/apis/chime/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=chime.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "chime.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/chime/v1beta2/zz_voiceconnectorstreaming_terraformed.go b/apis/chime/v1beta2/zz_voiceconnectorstreaming_terraformed.go new file mode 100755 index 0000000000..f185587fad --- /dev/null +++ b/apis/chime/v1beta2/zz_voiceconnectorstreaming_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VoiceConnectorStreaming +func (mg *VoiceConnectorStreaming) GetTerraformResourceType() string { + return "aws_chime_voice_connector_streaming" +} + +// GetConnectionDetailsMapping for this VoiceConnectorStreaming +func (tr *VoiceConnectorStreaming) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VoiceConnectorStreaming +func (tr *VoiceConnectorStreaming) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VoiceConnectorStreaming +func (tr *VoiceConnectorStreaming) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VoiceConnectorStreaming +func (tr *VoiceConnectorStreaming) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VoiceConnectorStreaming +func (tr *VoiceConnectorStreaming) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VoiceConnectorStreaming +func (tr *VoiceConnectorStreaming) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VoiceConnectorStreaming +func (tr *VoiceConnectorStreaming) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VoiceConnectorStreaming +func (tr *VoiceConnectorStreaming) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VoiceConnectorStreaming using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VoiceConnectorStreaming) LateInitialize(attrs []byte) (bool, error) { + params := &VoiceConnectorStreamingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VoiceConnectorStreaming) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/chime/v1beta2/zz_voiceconnectorstreaming_types.go b/apis/chime/v1beta2/zz_voiceconnectorstreaming_types.go new file mode 100755 index 0000000000..f0bc360570 --- /dev/null +++ b/apis/chime/v1beta2/zz_voiceconnectorstreaming_types.go @@ -0,0 +1,194 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MediaInsightsConfigurationInitParameters struct { + + // The media insights configuration that will be invoked by the Voice Connector. + ConfigurationArn *string `json:"configurationArn,omitempty" tf:"configuration_arn,omitempty"` + + // When true, the media insights configuration is not enabled. Defaults to false. + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` +} + +type MediaInsightsConfigurationObservation struct { + + // The media insights configuration that will be invoked by the Voice Connector. + ConfigurationArn *string `json:"configurationArn,omitempty" tf:"configuration_arn,omitempty"` + + // When true, the media insights configuration is not enabled. Defaults to false. + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` +} + +type MediaInsightsConfigurationParameters struct { + + // The media insights configuration that will be invoked by the Voice Connector. + // +kubebuilder:validation:Optional + ConfigurationArn *string `json:"configurationArn,omitempty" tf:"configuration_arn,omitempty"` + + // When true, the media insights configuration is not enabled. Defaults to false. + // +kubebuilder:validation:Optional + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` +} + +type VoiceConnectorStreamingInitParameters struct { + + // The retention period, in hours, for the Amazon Kinesis data. + DataRetention *float64 `json:"dataRetention,omitempty" tf:"data_retention,omitempty"` + + // When true, media streaming to Amazon Kinesis is turned off. Default: false + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // The media insights configuration. See media_insights_configuration. + MediaInsightsConfiguration *MediaInsightsConfigurationInitParameters `json:"mediaInsightsConfiguration,omitempty" tf:"media_insights_configuration,omitempty"` + + // The streaming notification targets. Valid Values: EventBridge | SNS | SQS + // +listType=set + StreamingNotificationTargets []*string `json:"streamingNotificationTargets,omitempty" tf:"streaming_notification_targets,omitempty"` + + // The Amazon Chime Voice Connector ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/chime/v1beta1.VoiceConnector + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + VoiceConnectorID *string `json:"voiceConnectorId,omitempty" tf:"voice_connector_id,omitempty"` + + // Reference to a VoiceConnector in chime to populate voiceConnectorId. + // +kubebuilder:validation:Optional + VoiceConnectorIDRef *v1.Reference `json:"voiceConnectorIdRef,omitempty" tf:"-"` + + // Selector for a VoiceConnector in chime to populate voiceConnectorId. + // +kubebuilder:validation:Optional + VoiceConnectorIDSelector *v1.Selector `json:"voiceConnectorIdSelector,omitempty" tf:"-"` +} + +type VoiceConnectorStreamingObservation struct { + + // The retention period, in hours, for the Amazon Kinesis data. + DataRetention *float64 `json:"dataRetention,omitempty" tf:"data_retention,omitempty"` + + // When true, media streaming to Amazon Kinesis is turned off. Default: false + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // The Amazon Chime Voice Connector ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The media insights configuration. See media_insights_configuration. + MediaInsightsConfiguration *MediaInsightsConfigurationObservation `json:"mediaInsightsConfiguration,omitempty" tf:"media_insights_configuration,omitempty"` + + // The streaming notification targets. Valid Values: EventBridge | SNS | SQS + // +listType=set + StreamingNotificationTargets []*string `json:"streamingNotificationTargets,omitempty" tf:"streaming_notification_targets,omitempty"` + + // The Amazon Chime Voice Connector ID. + VoiceConnectorID *string `json:"voiceConnectorId,omitempty" tf:"voice_connector_id,omitempty"` +} + +type VoiceConnectorStreamingParameters struct { + + // The retention period, in hours, for the Amazon Kinesis data. + // +kubebuilder:validation:Optional + DataRetention *float64 `json:"dataRetention,omitempty" tf:"data_retention,omitempty"` + + // When true, media streaming to Amazon Kinesis is turned off. Default: false + // +kubebuilder:validation:Optional + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // The media insights configuration. See media_insights_configuration. + // +kubebuilder:validation:Optional + MediaInsightsConfiguration *MediaInsightsConfigurationParameters `json:"mediaInsightsConfiguration,omitempty" tf:"media_insights_configuration,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The streaming notification targets. Valid Values: EventBridge | SNS | SQS + // +kubebuilder:validation:Optional + // +listType=set + StreamingNotificationTargets []*string `json:"streamingNotificationTargets,omitempty" tf:"streaming_notification_targets,omitempty"` + + // The Amazon Chime Voice Connector ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/chime/v1beta1.VoiceConnector + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VoiceConnectorID *string `json:"voiceConnectorId,omitempty" tf:"voice_connector_id,omitempty"` + + // Reference to a VoiceConnector in chime to populate voiceConnectorId. + // +kubebuilder:validation:Optional + VoiceConnectorIDRef *v1.Reference `json:"voiceConnectorIdRef,omitempty" tf:"-"` + + // Selector for a VoiceConnector in chime to populate voiceConnectorId. + // +kubebuilder:validation:Optional + VoiceConnectorIDSelector *v1.Selector `json:"voiceConnectorIdSelector,omitempty" tf:"-"` +} + +// VoiceConnectorStreamingSpec defines the desired state of VoiceConnectorStreaming +type VoiceConnectorStreamingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VoiceConnectorStreamingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VoiceConnectorStreamingInitParameters `json:"initProvider,omitempty"` +} + +// VoiceConnectorStreamingStatus defines the observed state of VoiceConnectorStreaming. +type VoiceConnectorStreamingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VoiceConnectorStreamingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VoiceConnectorStreaming is the Schema for the VoiceConnectorStreamings API. The streaming configuration associated with an Amazon Chime Voice Connector. Specifies whether media streaming is enabled for sending to Amazon Kinesis, and shows the retention period for the Amazon Kinesis data, in hours. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type VoiceConnectorStreaming struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.dataRetention) || (has(self.initProvider) && has(self.initProvider.dataRetention))",message="spec.forProvider.dataRetention is a required parameter" + Spec VoiceConnectorStreamingSpec `json:"spec"` + Status VoiceConnectorStreamingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VoiceConnectorStreamingList contains a list of VoiceConnectorStreamings +type VoiceConnectorStreamingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VoiceConnectorStreaming `json:"items"` +} + +// Repository type metadata. +var ( + VoiceConnectorStreaming_Kind = "VoiceConnectorStreaming" + VoiceConnectorStreaming_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VoiceConnectorStreaming_Kind}.String() + VoiceConnectorStreaming_KindAPIVersion = VoiceConnectorStreaming_Kind + "." + CRDGroupVersion.String() + VoiceConnectorStreaming_GroupVersionKind = CRDGroupVersion.WithKind(VoiceConnectorStreaming_Kind) +) + +func init() { + SchemeBuilder.Register(&VoiceConnectorStreaming{}, &VoiceConnectorStreamingList{}) +} diff --git a/apis/cloudformation/v1beta1/zz_generated.conversion_hubs.go b/apis/cloudformation/v1beta1/zz_generated.conversion_hubs.go index 4cc153bada..10da92e334 100755 --- a/apis/cloudformation/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/cloudformation/v1beta1/zz_generated.conversion_hubs.go @@ -8,9 +8,3 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *Stack) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *StackSet) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *StackSetInstance) Hub() {} diff --git a/apis/cloudformation/v1beta1/zz_generated.conversion_spokes.go b/apis/cloudformation/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..875aaf0a20 --- /dev/null +++ b/apis/cloudformation/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this StackSet to the hub type. +func (tr *StackSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the StackSet type. +func (tr *StackSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this StackSetInstance to the hub type. +func (tr *StackSetInstance) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the StackSetInstance type. +func (tr *StackSetInstance) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/cloudformation/v1beta2/zz_generated.conversion_hubs.go b/apis/cloudformation/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..32875616e0 --- /dev/null +++ b/apis/cloudformation/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *StackSet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *StackSetInstance) Hub() {} diff --git a/apis/cloudformation/v1beta2/zz_generated.deepcopy.go b/apis/cloudformation/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..a0e0bc1de6 --- /dev/null +++ b/apis/cloudformation/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1391 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoDeploymentInitParameters) DeepCopyInto(out *AutoDeploymentInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetainStacksOnAccountRemoval != nil { + in, out := &in.RetainStacksOnAccountRemoval, &out.RetainStacksOnAccountRemoval + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoDeploymentInitParameters. +func (in *AutoDeploymentInitParameters) DeepCopy() *AutoDeploymentInitParameters { + if in == nil { + return nil + } + out := new(AutoDeploymentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoDeploymentObservation) DeepCopyInto(out *AutoDeploymentObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetainStacksOnAccountRemoval != nil { + in, out := &in.RetainStacksOnAccountRemoval, &out.RetainStacksOnAccountRemoval + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoDeploymentObservation. +func (in *AutoDeploymentObservation) DeepCopy() *AutoDeploymentObservation { + if in == nil { + return nil + } + out := new(AutoDeploymentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoDeploymentParameters) DeepCopyInto(out *AutoDeploymentParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetainStacksOnAccountRemoval != nil { + in, out := &in.RetainStacksOnAccountRemoval, &out.RetainStacksOnAccountRemoval + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoDeploymentParameters. +func (in *AutoDeploymentParameters) DeepCopy() *AutoDeploymentParameters { + if in == nil { + return nil + } + out := new(AutoDeploymentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentTargetsInitParameters) DeepCopyInto(out *DeploymentTargetsInitParameters) { + *out = *in + if in.OrganizationalUnitIds != nil { + in, out := &in.OrganizationalUnitIds, &out.OrganizationalUnitIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTargetsInitParameters. +func (in *DeploymentTargetsInitParameters) DeepCopy() *DeploymentTargetsInitParameters { + if in == nil { + return nil + } + out := new(DeploymentTargetsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentTargetsObservation) DeepCopyInto(out *DeploymentTargetsObservation) { + *out = *in + if in.OrganizationalUnitIds != nil { + in, out := &in.OrganizationalUnitIds, &out.OrganizationalUnitIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTargetsObservation. +func (in *DeploymentTargetsObservation) DeepCopy() *DeploymentTargetsObservation { + if in == nil { + return nil + } + out := new(DeploymentTargetsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentTargetsParameters) DeepCopyInto(out *DeploymentTargetsParameters) { + *out = *in + if in.OrganizationalUnitIds != nil { + in, out := &in.OrganizationalUnitIds, &out.OrganizationalUnitIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTargetsParameters. +func (in *DeploymentTargetsParameters) DeepCopy() *DeploymentTargetsParameters { + if in == nil { + return nil + } + out := new(DeploymentTargetsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedExecutionInitParameters) DeepCopyInto(out *ManagedExecutionInitParameters) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedExecutionInitParameters. +func (in *ManagedExecutionInitParameters) DeepCopy() *ManagedExecutionInitParameters { + if in == nil { + return nil + } + out := new(ManagedExecutionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedExecutionObservation) DeepCopyInto(out *ManagedExecutionObservation) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedExecutionObservation. +func (in *ManagedExecutionObservation) DeepCopy() *ManagedExecutionObservation { + if in == nil { + return nil + } + out := new(ManagedExecutionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedExecutionParameters) DeepCopyInto(out *ManagedExecutionParameters) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedExecutionParameters. +func (in *ManagedExecutionParameters) DeepCopy() *ManagedExecutionParameters { + if in == nil { + return nil + } + out := new(ManagedExecutionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperationPreferencesInitParameters) DeepCopyInto(out *OperationPreferencesInitParameters) { + *out = *in + if in.FailureToleranceCount != nil { + in, out := &in.FailureToleranceCount, &out.FailureToleranceCount + *out = new(float64) + **out = **in + } + if in.FailureTolerancePercentage != nil { + in, out := &in.FailureTolerancePercentage, &out.FailureTolerancePercentage + *out = new(float64) + **out = **in + } + if in.MaxConcurrentCount != nil { + in, out := &in.MaxConcurrentCount, &out.MaxConcurrentCount + *out = new(float64) + **out = **in + } + if in.MaxConcurrentPercentage != nil { + in, out := &in.MaxConcurrentPercentage, &out.MaxConcurrentPercentage + *out = new(float64) + **out = **in + } + if in.RegionConcurrencyType != nil { + in, out := &in.RegionConcurrencyType, &out.RegionConcurrencyType + *out = new(string) + **out = **in + } + if in.RegionOrder != nil { + in, out := &in.RegionOrder, &out.RegionOrder + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperationPreferencesInitParameters. +func (in *OperationPreferencesInitParameters) DeepCopy() *OperationPreferencesInitParameters { + if in == nil { + return nil + } + out := new(OperationPreferencesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperationPreferencesObservation) DeepCopyInto(out *OperationPreferencesObservation) { + *out = *in + if in.FailureToleranceCount != nil { + in, out := &in.FailureToleranceCount, &out.FailureToleranceCount + *out = new(float64) + **out = **in + } + if in.FailureTolerancePercentage != nil { + in, out := &in.FailureTolerancePercentage, &out.FailureTolerancePercentage + *out = new(float64) + **out = **in + } + if in.MaxConcurrentCount != nil { + in, out := &in.MaxConcurrentCount, &out.MaxConcurrentCount + *out = new(float64) + **out = **in + } + if in.MaxConcurrentPercentage != nil { + in, out := &in.MaxConcurrentPercentage, &out.MaxConcurrentPercentage + *out = new(float64) + **out = **in + } + if in.RegionConcurrencyType != nil { + in, out := &in.RegionConcurrencyType, &out.RegionConcurrencyType + *out = new(string) + **out = **in + } + if in.RegionOrder != nil { + in, out := &in.RegionOrder, &out.RegionOrder + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperationPreferencesObservation. +func (in *OperationPreferencesObservation) DeepCopy() *OperationPreferencesObservation { + if in == nil { + return nil + } + out := new(OperationPreferencesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperationPreferencesParameters) DeepCopyInto(out *OperationPreferencesParameters) { + *out = *in + if in.FailureToleranceCount != nil { + in, out := &in.FailureToleranceCount, &out.FailureToleranceCount + *out = new(float64) + **out = **in + } + if in.FailureTolerancePercentage != nil { + in, out := &in.FailureTolerancePercentage, &out.FailureTolerancePercentage + *out = new(float64) + **out = **in + } + if in.MaxConcurrentCount != nil { + in, out := &in.MaxConcurrentCount, &out.MaxConcurrentCount + *out = new(float64) + **out = **in + } + if in.MaxConcurrentPercentage != nil { + in, out := &in.MaxConcurrentPercentage, &out.MaxConcurrentPercentage + *out = new(float64) + **out = **in + } + if in.RegionConcurrencyType != nil { + in, out := &in.RegionConcurrencyType, &out.RegionConcurrencyType + *out = new(string) + **out = **in + } + if in.RegionOrder != nil { + in, out := &in.RegionOrder, &out.RegionOrder + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperationPreferencesParameters. +func (in *OperationPreferencesParameters) DeepCopy() *OperationPreferencesParameters { + if in == nil { + return nil + } + out := new(OperationPreferencesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackInstanceSummariesInitParameters) DeepCopyInto(out *StackInstanceSummariesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackInstanceSummariesInitParameters. +func (in *StackInstanceSummariesInitParameters) DeepCopy() *StackInstanceSummariesInitParameters { + if in == nil { + return nil + } + out := new(StackInstanceSummariesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackInstanceSummariesObservation) DeepCopyInto(out *StackInstanceSummariesObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.OrganizationalUnitID != nil { + in, out := &in.OrganizationalUnitID, &out.OrganizationalUnitID + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackInstanceSummariesObservation. +func (in *StackInstanceSummariesObservation) DeepCopy() *StackInstanceSummariesObservation { + if in == nil { + return nil + } + out := new(StackInstanceSummariesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackInstanceSummariesParameters) DeepCopyInto(out *StackInstanceSummariesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackInstanceSummariesParameters. +func (in *StackInstanceSummariesParameters) DeepCopy() *StackInstanceSummariesParameters { + if in == nil { + return nil + } + out := new(StackInstanceSummariesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSet) DeepCopyInto(out *StackSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSet. +func (in *StackSet) DeepCopy() *StackSet { + if in == nil { + return nil + } + out := new(StackSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StackSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSetInitParameters) DeepCopyInto(out *StackSetInitParameters) { + *out = *in + if in.AdministrationRoleArn != nil { + in, out := &in.AdministrationRoleArn, &out.AdministrationRoleArn + *out = new(string) + **out = **in + } + if in.AdministrationRoleArnRef != nil { + in, out := &in.AdministrationRoleArnRef, &out.AdministrationRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AdministrationRoleArnSelector != nil { + in, out := &in.AdministrationRoleArnSelector, &out.AdministrationRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AutoDeployment != nil { + in, out := &in.AutoDeployment, &out.AutoDeployment + *out = new(AutoDeploymentInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CallAs != nil { + in, out := &in.CallAs, &out.CallAs + *out = new(string) + **out = **in + } + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionRoleName != nil { + in, out := &in.ExecutionRoleName, &out.ExecutionRoleName + *out = new(string) + **out = **in + } + if in.ManagedExecution != nil { + in, out := &in.ManagedExecution, &out.ManagedExecution + *out = new(ManagedExecutionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OperationPreferences != nil { + in, out := &in.OperationPreferences, &out.OperationPreferences + *out = new(OperationPreferencesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PermissionModel != nil { + in, out := &in.PermissionModel, &out.PermissionModel + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TemplateBody != nil { + in, out := &in.TemplateBody, &out.TemplateBody + *out = new(string) + **out = **in + } + if in.TemplateURL != nil { + in, out := &in.TemplateURL, &out.TemplateURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSetInitParameters. +func (in *StackSetInitParameters) DeepCopy() *StackSetInitParameters { + if in == nil { + return nil + } + out := new(StackSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSetInstance) DeepCopyInto(out *StackSetInstance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSetInstance. +func (in *StackSetInstance) DeepCopy() *StackSetInstance { + if in == nil { + return nil + } + out := new(StackSetInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StackSetInstance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSetInstanceInitParameters) DeepCopyInto(out *StackSetInstanceInitParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.CallAs != nil { + in, out := &in.CallAs, &out.CallAs + *out = new(string) + **out = **in + } + if in.DeploymentTargets != nil { + in, out := &in.DeploymentTargets, &out.DeploymentTargets + *out = new(DeploymentTargetsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OperationPreferences != nil { + in, out := &in.OperationPreferences, &out.OperationPreferences + *out = new(StackSetInstanceOperationPreferencesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ParameterOverrides != nil { + in, out := &in.ParameterOverrides, &out.ParameterOverrides + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RetainStack != nil { + in, out := &in.RetainStack, &out.RetainStack + *out = new(bool) + **out = **in + } + if in.StackSetName != nil { + in, out := &in.StackSetName, &out.StackSetName + *out = new(string) + **out = **in + } + if in.StackSetNameRef != nil { + in, out := &in.StackSetNameRef, &out.StackSetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackSetNameSelector != nil { + in, out := &in.StackSetNameSelector, &out.StackSetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSetInstanceInitParameters. +func (in *StackSetInstanceInitParameters) DeepCopy() *StackSetInstanceInitParameters { + if in == nil { + return nil + } + out := new(StackSetInstanceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSetInstanceList) DeepCopyInto(out *StackSetInstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StackSetInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSetInstanceList. +func (in *StackSetInstanceList) DeepCopy() *StackSetInstanceList { + if in == nil { + return nil + } + out := new(StackSetInstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StackSetInstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSetInstanceObservation) DeepCopyInto(out *StackSetInstanceObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.CallAs != nil { + in, out := &in.CallAs, &out.CallAs + *out = new(string) + **out = **in + } + if in.DeploymentTargets != nil { + in, out := &in.DeploymentTargets, &out.DeploymentTargets + *out = new(DeploymentTargetsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.OperationPreferences != nil { + in, out := &in.OperationPreferences, &out.OperationPreferences + *out = new(StackSetInstanceOperationPreferencesObservation) + (*in).DeepCopyInto(*out) + } + if in.OrganizationalUnitID != nil { + in, out := &in.OrganizationalUnitID, &out.OrganizationalUnitID + *out = new(string) + **out = **in + } + if in.ParameterOverrides != nil { + in, out := &in.ParameterOverrides, &out.ParameterOverrides + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RetainStack != nil { + in, out := &in.RetainStack, &out.RetainStack + *out = new(bool) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackInstanceSummaries != nil { + in, out := &in.StackInstanceSummaries, &out.StackInstanceSummaries + *out = make([]StackInstanceSummariesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StackSetName != nil { + in, out := &in.StackSetName, &out.StackSetName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSetInstanceObservation. +func (in *StackSetInstanceObservation) DeepCopy() *StackSetInstanceObservation { + if in == nil { + return nil + } + out := new(StackSetInstanceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSetInstanceOperationPreferencesInitParameters) DeepCopyInto(out *StackSetInstanceOperationPreferencesInitParameters) { + *out = *in + if in.FailureToleranceCount != nil { + in, out := &in.FailureToleranceCount, &out.FailureToleranceCount + *out = new(float64) + **out = **in + } + if in.FailureTolerancePercentage != nil { + in, out := &in.FailureTolerancePercentage, &out.FailureTolerancePercentage + *out = new(float64) + **out = **in + } + if in.MaxConcurrentCount != nil { + in, out := &in.MaxConcurrentCount, &out.MaxConcurrentCount + *out = new(float64) + **out = **in + } + if in.MaxConcurrentPercentage != nil { + in, out := &in.MaxConcurrentPercentage, &out.MaxConcurrentPercentage + *out = new(float64) + **out = **in + } + if in.RegionConcurrencyType != nil { + in, out := &in.RegionConcurrencyType, &out.RegionConcurrencyType + *out = new(string) + **out = **in + } + if in.RegionOrder != nil { + in, out := &in.RegionOrder, &out.RegionOrder + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSetInstanceOperationPreferencesInitParameters. +func (in *StackSetInstanceOperationPreferencesInitParameters) DeepCopy() *StackSetInstanceOperationPreferencesInitParameters { + if in == nil { + return nil + } + out := new(StackSetInstanceOperationPreferencesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSetInstanceOperationPreferencesObservation) DeepCopyInto(out *StackSetInstanceOperationPreferencesObservation) { + *out = *in + if in.FailureToleranceCount != nil { + in, out := &in.FailureToleranceCount, &out.FailureToleranceCount + *out = new(float64) + **out = **in + } + if in.FailureTolerancePercentage != nil { + in, out := &in.FailureTolerancePercentage, &out.FailureTolerancePercentage + *out = new(float64) + **out = **in + } + if in.MaxConcurrentCount != nil { + in, out := &in.MaxConcurrentCount, &out.MaxConcurrentCount + *out = new(float64) + **out = **in + } + if in.MaxConcurrentPercentage != nil { + in, out := &in.MaxConcurrentPercentage, &out.MaxConcurrentPercentage + *out = new(float64) + **out = **in + } + if in.RegionConcurrencyType != nil { + in, out := &in.RegionConcurrencyType, &out.RegionConcurrencyType + *out = new(string) + **out = **in + } + if in.RegionOrder != nil { + in, out := &in.RegionOrder, &out.RegionOrder + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSetInstanceOperationPreferencesObservation. +func (in *StackSetInstanceOperationPreferencesObservation) DeepCopy() *StackSetInstanceOperationPreferencesObservation { + if in == nil { + return nil + } + out := new(StackSetInstanceOperationPreferencesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSetInstanceOperationPreferencesParameters) DeepCopyInto(out *StackSetInstanceOperationPreferencesParameters) { + *out = *in + if in.FailureToleranceCount != nil { + in, out := &in.FailureToleranceCount, &out.FailureToleranceCount + *out = new(float64) + **out = **in + } + if in.FailureTolerancePercentage != nil { + in, out := &in.FailureTolerancePercentage, &out.FailureTolerancePercentage + *out = new(float64) + **out = **in + } + if in.MaxConcurrentCount != nil { + in, out := &in.MaxConcurrentCount, &out.MaxConcurrentCount + *out = new(float64) + **out = **in + } + if in.MaxConcurrentPercentage != nil { + in, out := &in.MaxConcurrentPercentage, &out.MaxConcurrentPercentage + *out = new(float64) + **out = **in + } + if in.RegionConcurrencyType != nil { + in, out := &in.RegionConcurrencyType, &out.RegionConcurrencyType + *out = new(string) + **out = **in + } + if in.RegionOrder != nil { + in, out := &in.RegionOrder, &out.RegionOrder + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSetInstanceOperationPreferencesParameters. +func (in *StackSetInstanceOperationPreferencesParameters) DeepCopy() *StackSetInstanceOperationPreferencesParameters { + if in == nil { + return nil + } + out := new(StackSetInstanceOperationPreferencesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSetInstanceParameters) DeepCopyInto(out *StackSetInstanceParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.CallAs != nil { + in, out := &in.CallAs, &out.CallAs + *out = new(string) + **out = **in + } + if in.DeploymentTargets != nil { + in, out := &in.DeploymentTargets, &out.DeploymentTargets + *out = new(DeploymentTargetsParameters) + (*in).DeepCopyInto(*out) + } + if in.OperationPreferences != nil { + in, out := &in.OperationPreferences, &out.OperationPreferences + *out = new(StackSetInstanceOperationPreferencesParameters) + (*in).DeepCopyInto(*out) + } + if in.ParameterOverrides != nil { + in, out := &in.ParameterOverrides, &out.ParameterOverrides + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RetainStack != nil { + in, out := &in.RetainStack, &out.RetainStack + *out = new(bool) + **out = **in + } + if in.StackSetName != nil { + in, out := &in.StackSetName, &out.StackSetName + *out = new(string) + **out = **in + } + if in.StackSetNameRef != nil { + in, out := &in.StackSetNameRef, &out.StackSetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackSetNameSelector != nil { + in, out := &in.StackSetNameSelector, &out.StackSetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSetInstanceParameters. +func (in *StackSetInstanceParameters) DeepCopy() *StackSetInstanceParameters { + if in == nil { + return nil + } + out := new(StackSetInstanceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSetInstanceSpec) DeepCopyInto(out *StackSetInstanceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSetInstanceSpec. +func (in *StackSetInstanceSpec) DeepCopy() *StackSetInstanceSpec { + if in == nil { + return nil + } + out := new(StackSetInstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSetInstanceStatus) DeepCopyInto(out *StackSetInstanceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSetInstanceStatus. +func (in *StackSetInstanceStatus) DeepCopy() *StackSetInstanceStatus { + if in == nil { + return nil + } + out := new(StackSetInstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSetList) DeepCopyInto(out *StackSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StackSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSetList. +func (in *StackSetList) DeepCopy() *StackSetList { + if in == nil { + return nil + } + out := new(StackSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StackSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSetObservation) DeepCopyInto(out *StackSetObservation) { + *out = *in + if in.AdministrationRoleArn != nil { + in, out := &in.AdministrationRoleArn, &out.AdministrationRoleArn + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoDeployment != nil { + in, out := &in.AutoDeployment, &out.AutoDeployment + *out = new(AutoDeploymentObservation) + (*in).DeepCopyInto(*out) + } + if in.CallAs != nil { + in, out := &in.CallAs, &out.CallAs + *out = new(string) + **out = **in + } + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionRoleName != nil { + in, out := &in.ExecutionRoleName, &out.ExecutionRoleName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ManagedExecution != nil { + in, out := &in.ManagedExecution, &out.ManagedExecution + *out = new(ManagedExecutionObservation) + (*in).DeepCopyInto(*out) + } + if in.OperationPreferences != nil { + in, out := &in.OperationPreferences, &out.OperationPreferences + *out = new(OperationPreferencesObservation) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PermissionModel != nil { + in, out := &in.PermissionModel, &out.PermissionModel + *out = new(string) + **out = **in + } + if in.StackSetID != nil { + in, out := &in.StackSetID, &out.StackSetID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TemplateBody != nil { + in, out := &in.TemplateBody, &out.TemplateBody + *out = new(string) + **out = **in + } + if in.TemplateURL != nil { + in, out := &in.TemplateURL, &out.TemplateURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSetObservation. +func (in *StackSetObservation) DeepCopy() *StackSetObservation { + if in == nil { + return nil + } + out := new(StackSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSetParameters) DeepCopyInto(out *StackSetParameters) { + *out = *in + if in.AdministrationRoleArn != nil { + in, out := &in.AdministrationRoleArn, &out.AdministrationRoleArn + *out = new(string) + **out = **in + } + if in.AdministrationRoleArnRef != nil { + in, out := &in.AdministrationRoleArnRef, &out.AdministrationRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AdministrationRoleArnSelector != nil { + in, out := &in.AdministrationRoleArnSelector, &out.AdministrationRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AutoDeployment != nil { + in, out := &in.AutoDeployment, &out.AutoDeployment + *out = new(AutoDeploymentParameters) + (*in).DeepCopyInto(*out) + } + if in.CallAs != nil { + in, out := &in.CallAs, &out.CallAs + *out = new(string) + **out = **in + } + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionRoleName != nil { + in, out := &in.ExecutionRoleName, &out.ExecutionRoleName + *out = new(string) + **out = **in + } + if in.ManagedExecution != nil { + in, out := &in.ManagedExecution, &out.ManagedExecution + *out = new(ManagedExecutionParameters) + (*in).DeepCopyInto(*out) + } + if in.OperationPreferences != nil { + in, out := &in.OperationPreferences, &out.OperationPreferences + *out = new(OperationPreferencesParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PermissionModel != nil { + in, out := &in.PermissionModel, &out.PermissionModel + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TemplateBody != nil { + in, out := &in.TemplateBody, &out.TemplateBody + *out = new(string) + **out = **in + } + if in.TemplateURL != nil { + in, out := &in.TemplateURL, &out.TemplateURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSetParameters. +func (in *StackSetParameters) DeepCopy() *StackSetParameters { + if in == nil { + return nil + } + out := new(StackSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSetSpec) DeepCopyInto(out *StackSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSetSpec. +func (in *StackSetSpec) DeepCopy() *StackSetSpec { + if in == nil { + return nil + } + out := new(StackSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSetStatus) DeepCopyInto(out *StackSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSetStatus. +func (in *StackSetStatus) DeepCopy() *StackSetStatus { + if in == nil { + return nil + } + out := new(StackSetStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/cloudformation/v1beta2/zz_generated.managed.go b/apis/cloudformation/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..93a4ec6e5f --- /dev/null +++ b/apis/cloudformation/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this StackSet. +func (mg *StackSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this StackSet. +func (mg *StackSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this StackSet. +func (mg *StackSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this StackSet. +func (mg *StackSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this StackSet. +func (mg *StackSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this StackSet. +func (mg *StackSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this StackSet. +func (mg *StackSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this StackSet. +func (mg *StackSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this StackSet. +func (mg *StackSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this StackSet. +func (mg *StackSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this StackSet. +func (mg *StackSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this StackSet. +func (mg *StackSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this StackSetInstance. +func (mg *StackSetInstance) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this StackSetInstance. +func (mg *StackSetInstance) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this StackSetInstance. +func (mg *StackSetInstance) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this StackSetInstance. +func (mg *StackSetInstance) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this StackSetInstance. +func (mg *StackSetInstance) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this StackSetInstance. +func (mg *StackSetInstance) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this StackSetInstance. +func (mg *StackSetInstance) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this StackSetInstance. +func (mg *StackSetInstance) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this StackSetInstance. +func (mg *StackSetInstance) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this StackSetInstance. +func (mg *StackSetInstance) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this StackSetInstance. +func (mg *StackSetInstance) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this StackSetInstance. +func (mg *StackSetInstance) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/cloudformation/v1beta2/zz_generated.managedlist.go b/apis/cloudformation/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..2333585844 --- /dev/null +++ b/apis/cloudformation/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this StackSetInstanceList. +func (l *StackSetInstanceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this StackSetList. +func (l *StackSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/cloudformation/v1beta2/zz_generated.resolvers.go b/apis/cloudformation/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..57af761a5f --- /dev/null +++ b/apis/cloudformation/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,119 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this StackSet. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *StackSet) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AdministrationRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.AdministrationRoleArnRef, + Selector: mg.Spec.ForProvider.AdministrationRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AdministrationRoleArn") + } + mg.Spec.ForProvider.AdministrationRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AdministrationRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AdministrationRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.AdministrationRoleArnRef, + Selector: mg.Spec.InitProvider.AdministrationRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AdministrationRoleArn") + } + mg.Spec.InitProvider.AdministrationRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AdministrationRoleArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this StackSetInstance. +func (mg *StackSetInstance) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cloudformation.aws.upbound.io", "v1beta2", "StackSet", "StackSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StackSetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StackSetNameRef, + Selector: mg.Spec.ForProvider.StackSetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StackSetName") + } + mg.Spec.ForProvider.StackSetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StackSetNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cloudformation.aws.upbound.io", "v1beta2", "StackSet", "StackSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StackSetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StackSetNameRef, + Selector: mg.Spec.InitProvider.StackSetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StackSetName") + } + mg.Spec.InitProvider.StackSetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StackSetNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/cloudformation/v1beta2/zz_groupversion_info.go b/apis/cloudformation/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..8d8f6b1b83 --- /dev/null +++ b/apis/cloudformation/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=cloudformation.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "cloudformation.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/cloudformation/v1beta2/zz_stackset_terraformed.go b/apis/cloudformation/v1beta2/zz_stackset_terraformed.go new file mode 100755 index 0000000000..e2033137f4 --- /dev/null +++ b/apis/cloudformation/v1beta2/zz_stackset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this StackSet +func (mg *StackSet) GetTerraformResourceType() string { + return "aws_cloudformation_stack_set" +} + +// GetConnectionDetailsMapping for this StackSet +func (tr *StackSet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this StackSet +func (tr *StackSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this StackSet +func (tr *StackSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this StackSet +func (tr *StackSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this StackSet +func (tr *StackSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this StackSet +func (tr *StackSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this StackSet +func (tr *StackSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this StackSet +func (tr *StackSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this StackSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *StackSet) LateInitialize(attrs []byte) (bool, error) { + params := &StackSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *StackSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cloudformation/v1beta2/zz_stackset_types.go b/apis/cloudformation/v1beta2/zz_stackset_types.go new file mode 100755 index 0000000000..8149304859 --- /dev/null +++ b/apis/cloudformation/v1beta2/zz_stackset_types.go @@ -0,0 +1,377 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AutoDeploymentInitParameters struct { + + // Whether or not auto-deployment is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Whether or not to retain stacks when the account is removed. + RetainStacksOnAccountRemoval *bool `json:"retainStacksOnAccountRemoval,omitempty" tf:"retain_stacks_on_account_removal,omitempty"` +} + +type AutoDeploymentObservation struct { + + // Whether or not auto-deployment is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Whether or not to retain stacks when the account is removed. + RetainStacksOnAccountRemoval *bool `json:"retainStacksOnAccountRemoval,omitempty" tf:"retain_stacks_on_account_removal,omitempty"` +} + +type AutoDeploymentParameters struct { + + // Whether or not auto-deployment is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Whether or not to retain stacks when the account is removed. + // +kubebuilder:validation:Optional + RetainStacksOnAccountRemoval *bool `json:"retainStacksOnAccountRemoval,omitempty" tf:"retain_stacks_on_account_removal,omitempty"` +} + +type ManagedExecutionInitParameters struct { + + // When set to true, StackSets performs non-conflicting operations concurrently and queues conflicting operations. After conflicting operations finish, StackSets starts queued operations in request order. Default is false. + Active *bool `json:"active,omitempty" tf:"active,omitempty"` +} + +type ManagedExecutionObservation struct { + + // When set to true, StackSets performs non-conflicting operations concurrently and queues conflicting operations. After conflicting operations finish, StackSets starts queued operations in request order. Default is false. + Active *bool `json:"active,omitempty" tf:"active,omitempty"` +} + +type ManagedExecutionParameters struct { + + // When set to true, StackSets performs non-conflicting operations concurrently and queues conflicting operations. After conflicting operations finish, StackSets starts queued operations in request order. Default is false. + // +kubebuilder:validation:Optional + Active *bool `json:"active,omitempty" tf:"active,omitempty"` +} + +type OperationPreferencesInitParameters struct { + + // The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region. + FailureToleranceCount *float64 `json:"failureToleranceCount,omitempty" tf:"failure_tolerance_count,omitempty"` + + // The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region. + FailureTolerancePercentage *float64 `json:"failureTolerancePercentage,omitempty" tf:"failure_tolerance_percentage,omitempty"` + + // The maximum number of accounts in which to perform this operation at one time. + MaxConcurrentCount *float64 `json:"maxConcurrentCount,omitempty" tf:"max_concurrent_count,omitempty"` + + // The maximum percentage of accounts in which to perform this operation at one time. + MaxConcurrentPercentage *float64 `json:"maxConcurrentPercentage,omitempty" tf:"max_concurrent_percentage,omitempty"` + + // The concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time. + RegionConcurrencyType *string `json:"regionConcurrencyType,omitempty" tf:"region_concurrency_type,omitempty"` + + // The order of the Regions in where you want to perform the stack operation. + RegionOrder []*string `json:"regionOrder,omitempty" tf:"region_order,omitempty"` +} + +type OperationPreferencesObservation struct { + + // The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region. + FailureToleranceCount *float64 `json:"failureToleranceCount,omitempty" tf:"failure_tolerance_count,omitempty"` + + // The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region. + FailureTolerancePercentage *float64 `json:"failureTolerancePercentage,omitempty" tf:"failure_tolerance_percentage,omitempty"` + + // The maximum number of accounts in which to perform this operation at one time. + MaxConcurrentCount *float64 `json:"maxConcurrentCount,omitempty" tf:"max_concurrent_count,omitempty"` + + // The maximum percentage of accounts in which to perform this operation at one time. + MaxConcurrentPercentage *float64 `json:"maxConcurrentPercentage,omitempty" tf:"max_concurrent_percentage,omitempty"` + + // The concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time. + RegionConcurrencyType *string `json:"regionConcurrencyType,omitempty" tf:"region_concurrency_type,omitempty"` + + // The order of the Regions in where you want to perform the stack operation. + RegionOrder []*string `json:"regionOrder,omitempty" tf:"region_order,omitempty"` +} + +type OperationPreferencesParameters struct { + + // The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region. + // +kubebuilder:validation:Optional + FailureToleranceCount *float64 `json:"failureToleranceCount,omitempty" tf:"failure_tolerance_count,omitempty"` + + // The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region. + // +kubebuilder:validation:Optional + FailureTolerancePercentage *float64 `json:"failureTolerancePercentage,omitempty" tf:"failure_tolerance_percentage,omitempty"` + + // The maximum number of accounts in which to perform this operation at one time. + // +kubebuilder:validation:Optional + MaxConcurrentCount *float64 `json:"maxConcurrentCount,omitempty" tf:"max_concurrent_count,omitempty"` + + // The maximum percentage of accounts in which to perform this operation at one time. + // +kubebuilder:validation:Optional + MaxConcurrentPercentage *float64 `json:"maxConcurrentPercentage,omitempty" tf:"max_concurrent_percentage,omitempty"` + + // The concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time. + // +kubebuilder:validation:Optional + RegionConcurrencyType *string `json:"regionConcurrencyType,omitempty" tf:"region_concurrency_type,omitempty"` + + // The order of the Regions in where you want to perform the stack operation. + // +kubebuilder:validation:Optional + RegionOrder []*string `json:"regionOrder,omitempty" tf:"region_order,omitempty"` +} + +type StackSetInitParameters struct { + + // Amazon Resource Number (ARN) of the IAM Role in the administrator account. This must be defined when using the SELF_MANAGED permission model. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + AdministrationRoleArn *string `json:"administrationRoleArn,omitempty" tf:"administration_role_arn,omitempty"` + + // Reference to a Role in iam to populate administrationRoleArn. + // +kubebuilder:validation:Optional + AdministrationRoleArnRef *v1.Reference `json:"administrationRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate administrationRoleArn. + // +kubebuilder:validation:Optional + AdministrationRoleArnSelector *v1.Selector `json:"administrationRoleArnSelector,omitempty" tf:"-"` + + // Configuration block containing the auto-deployment model for your StackSet. This can only be defined when using the SERVICE_MANAGED permission model. + AutoDeployment *AutoDeploymentInitParameters `json:"autoDeployment,omitempty" tf:"auto_deployment,omitempty"` + + // Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account. Valid values: SELF (default), DELEGATED_ADMIN. + CallAs *string `json:"callAs,omitempty" tf:"call_as,omitempty"` + + // A list of capabilities. Valid values: CAPABILITY_IAM, CAPABILITY_NAMED_IAM, CAPABILITY_AUTO_EXPAND. + // +listType=set + Capabilities []*string `json:"capabilities,omitempty" tf:"capabilities,omitempty"` + + // Description of the StackSet. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Name of the IAM Role in all target accounts for StackSet operations. Defaults to AWSCloudFormationStackSetExecutionRole when using the SELF_MANAGED permission model. This should not be defined when using the SERVICE_MANAGED permission model. + ExecutionRoleName *string `json:"executionRoleName,omitempty" tf:"execution_role_name,omitempty"` + + // Configuration block to allow StackSets to perform non-conflicting operations concurrently and queues conflicting operations. + ManagedExecution *ManagedExecutionInitParameters `json:"managedExecution,omitempty" tf:"managed_execution,omitempty"` + + // Preferences for how AWS CloudFormation performs a stack set update. + OperationPreferences *OperationPreferencesInitParameters `json:"operationPreferences,omitempty" tf:"operation_preferences,omitempty"` + + // Key-value map of input parameters for the StackSet template. All template parameters, including those with a Default, must be configured or ignored with lifecycle configuration block ignore_changes argument. All NoEcho template parameters must be ignored with the lifecycle configuration block ignore_changes argument. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Describes how the IAM roles required for your StackSet are created. Valid values: SELF_MANAGED (default), SERVICE_MANAGED. + PermissionModel *string `json:"permissionModel,omitempty" tf:"permission_model,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with template_url. + TemplateBody *string `json:"templateBody,omitempty" tf:"template_body,omitempty"` + + // String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with template_body. + TemplateURL *string `json:"templateUrl,omitempty" tf:"template_url,omitempty"` +} + +type StackSetObservation struct { + + // Amazon Resource Number (ARN) of the IAM Role in the administrator account. This must be defined when using the SELF_MANAGED permission model. + AdministrationRoleArn *string `json:"administrationRoleArn,omitempty" tf:"administration_role_arn,omitempty"` + + // Amazon Resource Name (ARN) of the StackSet. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Configuration block containing the auto-deployment model for your StackSet. This can only be defined when using the SERVICE_MANAGED permission model. + AutoDeployment *AutoDeploymentObservation `json:"autoDeployment,omitempty" tf:"auto_deployment,omitempty"` + + // Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account. Valid values: SELF (default), DELEGATED_ADMIN. + CallAs *string `json:"callAs,omitempty" tf:"call_as,omitempty"` + + // A list of capabilities. Valid values: CAPABILITY_IAM, CAPABILITY_NAMED_IAM, CAPABILITY_AUTO_EXPAND. + // +listType=set + Capabilities []*string `json:"capabilities,omitempty" tf:"capabilities,omitempty"` + + // Description of the StackSet. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Name of the IAM Role in all target accounts for StackSet operations. Defaults to AWSCloudFormationStackSetExecutionRole when using the SELF_MANAGED permission model. This should not be defined when using the SERVICE_MANAGED permission model. + ExecutionRoleName *string `json:"executionRoleName,omitempty" tf:"execution_role_name,omitempty"` + + // Name of the StackSet. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration block to allow StackSets to perform non-conflicting operations concurrently and queues conflicting operations. + ManagedExecution *ManagedExecutionObservation `json:"managedExecution,omitempty" tf:"managed_execution,omitempty"` + + // Preferences for how AWS CloudFormation performs a stack set update. + OperationPreferences *OperationPreferencesObservation `json:"operationPreferences,omitempty" tf:"operation_preferences,omitempty"` + + // Key-value map of input parameters for the StackSet template. All template parameters, including those with a Default, must be configured or ignored with lifecycle configuration block ignore_changes argument. All NoEcho template parameters must be ignored with the lifecycle configuration block ignore_changes argument. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Describes how the IAM roles required for your StackSet are created. Valid values: SELF_MANAGED (default), SERVICE_MANAGED. + PermissionModel *string `json:"permissionModel,omitempty" tf:"permission_model,omitempty"` + + // Unique identifier of the StackSet. + StackSetID *string `json:"stackSetId,omitempty" tf:"stack_set_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with template_url. + TemplateBody *string `json:"templateBody,omitempty" tf:"template_body,omitempty"` + + // String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with template_body. + TemplateURL *string `json:"templateUrl,omitempty" tf:"template_url,omitempty"` +} + +type StackSetParameters struct { + + // Amazon Resource Number (ARN) of the IAM Role in the administrator account. This must be defined when using the SELF_MANAGED permission model. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + AdministrationRoleArn *string `json:"administrationRoleArn,omitempty" tf:"administration_role_arn,omitempty"` + + // Reference to a Role in iam to populate administrationRoleArn. + // +kubebuilder:validation:Optional + AdministrationRoleArnRef *v1.Reference `json:"administrationRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate administrationRoleArn. + // +kubebuilder:validation:Optional + AdministrationRoleArnSelector *v1.Selector `json:"administrationRoleArnSelector,omitempty" tf:"-"` + + // Configuration block containing the auto-deployment model for your StackSet. This can only be defined when using the SERVICE_MANAGED permission model. + // +kubebuilder:validation:Optional + AutoDeployment *AutoDeploymentParameters `json:"autoDeployment,omitempty" tf:"auto_deployment,omitempty"` + + // Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account. Valid values: SELF (default), DELEGATED_ADMIN. + // +kubebuilder:validation:Optional + CallAs *string `json:"callAs,omitempty" tf:"call_as,omitempty"` + + // A list of capabilities. Valid values: CAPABILITY_IAM, CAPABILITY_NAMED_IAM, CAPABILITY_AUTO_EXPAND. + // +kubebuilder:validation:Optional + // +listType=set + Capabilities []*string `json:"capabilities,omitempty" tf:"capabilities,omitempty"` + + // Description of the StackSet. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Name of the IAM Role in all target accounts for StackSet operations. Defaults to AWSCloudFormationStackSetExecutionRole when using the SELF_MANAGED permission model. This should not be defined when using the SERVICE_MANAGED permission model. + // +kubebuilder:validation:Optional + ExecutionRoleName *string `json:"executionRoleName,omitempty" tf:"execution_role_name,omitempty"` + + // Configuration block to allow StackSets to perform non-conflicting operations concurrently and queues conflicting operations. + // +kubebuilder:validation:Optional + ManagedExecution *ManagedExecutionParameters `json:"managedExecution,omitempty" tf:"managed_execution,omitempty"` + + // Preferences for how AWS CloudFormation performs a stack set update. + // +kubebuilder:validation:Optional + OperationPreferences *OperationPreferencesParameters `json:"operationPreferences,omitempty" tf:"operation_preferences,omitempty"` + + // Key-value map of input parameters for the StackSet template. All template parameters, including those with a Default, must be configured or ignored with lifecycle configuration block ignore_changes argument. All NoEcho template parameters must be ignored with the lifecycle configuration block ignore_changes argument. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Describes how the IAM roles required for your StackSet are created. Valid values: SELF_MANAGED (default), SERVICE_MANAGED. + // +kubebuilder:validation:Optional + PermissionModel *string `json:"permissionModel,omitempty" tf:"permission_model,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with template_url. + // +kubebuilder:validation:Optional + TemplateBody *string `json:"templateBody,omitempty" tf:"template_body,omitempty"` + + // String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with template_body. + // +kubebuilder:validation:Optional + TemplateURL *string `json:"templateUrl,omitempty" tf:"template_url,omitempty"` +} + +// StackSetSpec defines the desired state of StackSet +type StackSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StackSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StackSetInitParameters `json:"initProvider,omitempty"` +} + +// StackSetStatus defines the observed state of StackSet. +type StackSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StackSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// StackSet is the Schema for the StackSets API. Manages a CloudFormation StackSet. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type StackSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec StackSetSpec `json:"spec"` + Status StackSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StackSetList contains a list of StackSets +type StackSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StackSet `json:"items"` +} + +// Repository type metadata. +var ( + StackSet_Kind = "StackSet" + StackSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: StackSet_Kind}.String() + StackSet_KindAPIVersion = StackSet_Kind + "." + CRDGroupVersion.String() + StackSet_GroupVersionKind = CRDGroupVersion.WithKind(StackSet_Kind) +) + +func init() { + SchemeBuilder.Register(&StackSet{}, &StackSetList{}) +} diff --git a/apis/cloudformation/v1beta2/zz_stacksetinstance_terraformed.go b/apis/cloudformation/v1beta2/zz_stacksetinstance_terraformed.go new file mode 100755 index 0000000000..9b0d345fb5 --- /dev/null +++ b/apis/cloudformation/v1beta2/zz_stacksetinstance_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this StackSetInstance +func (mg *StackSetInstance) GetTerraformResourceType() string { + return "aws_cloudformation_stack_set_instance" +} + +// GetConnectionDetailsMapping for this StackSetInstance +func (tr *StackSetInstance) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this StackSetInstance +func (tr *StackSetInstance) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this StackSetInstance +func (tr *StackSetInstance) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this StackSetInstance +func (tr *StackSetInstance) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this StackSetInstance +func (tr *StackSetInstance) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this StackSetInstance +func (tr *StackSetInstance) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this StackSetInstance +func (tr *StackSetInstance) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this StackSetInstance +func (tr *StackSetInstance) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this StackSetInstance using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *StackSetInstance) LateInitialize(attrs []byte) (bool, error) { + params := &StackSetInstanceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *StackSetInstance) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cloudformation/v1beta2/zz_stacksetinstance_types.go b/apis/cloudformation/v1beta2/zz_stacksetinstance_types.go new file mode 100755 index 0000000000..96141e738a --- /dev/null +++ b/apis/cloudformation/v1beta2/zz_stacksetinstance_types.go @@ -0,0 +1,302 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DeploymentTargetsInitParameters struct { + + // The organization root ID or organizational unit (OU) IDs to which StackSets deploys. + // +listType=set + OrganizationalUnitIds []*string `json:"organizationalUnitIds,omitempty" tf:"organizational_unit_ids,omitempty"` +} + +type DeploymentTargetsObservation struct { + + // The organization root ID or organizational unit (OU) IDs to which StackSets deploys. + // +listType=set + OrganizationalUnitIds []*string `json:"organizationalUnitIds,omitempty" tf:"organizational_unit_ids,omitempty"` +} + +type DeploymentTargetsParameters struct { + + // The organization root ID or organizational unit (OU) IDs to which StackSets deploys. + // +kubebuilder:validation:Optional + // +listType=set + OrganizationalUnitIds []*string `json:"organizationalUnitIds,omitempty" tf:"organizational_unit_ids,omitempty"` +} + +type StackInstanceSummariesInitParameters struct { +} + +type StackInstanceSummariesObservation struct { + + // Target AWS Account ID to create a Stack based on the StackSet. Defaults to current account. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // The organization root ID or organizational unit (OU) ID in which the stack is deployed. + OrganizationalUnitID *string `json:"organizationalUnitId,omitempty" tf:"organizational_unit_id,omitempty"` + + // Stack identifier. + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` +} + +type StackInstanceSummariesParameters struct { +} + +type StackSetInstanceInitParameters struct { + + // Target AWS Account ID to create a Stack based on the StackSet. Defaults to current account. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account. Valid values: SELF (default), DELEGATED_ADMIN. + CallAs *string `json:"callAs,omitempty" tf:"call_as,omitempty"` + + // The AWS Organizations accounts to which StackSets deploys. StackSets doesn't deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization. Drift detection is not possible for this argument. See deployment_targets below. + DeploymentTargets *DeploymentTargetsInitParameters `json:"deploymentTargets,omitempty" tf:"deployment_targets,omitempty"` + + // Preferences for how AWS CloudFormation performs a stack set operation. + OperationPreferences *StackSetInstanceOperationPreferencesInitParameters `json:"operationPreferences,omitempty" tf:"operation_preferences,omitempty"` + + // Key-value map of input parameters to override from the StackSet for this Instance. + // +mapType=granular + ParameterOverrides map[string]*string `json:"parameterOverrides,omitempty" tf:"parameter_overrides,omitempty"` + + // You cannot reassociate a retained Stack or add an existing, saved Stack to a new StackSet. Defaults to false. + RetainStack *bool `json:"retainStack,omitempty" tf:"retain_stack,omitempty"` + + // Name of the StackSet. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudformation/v1beta2.StackSet + StackSetName *string `json:"stackSetName,omitempty" tf:"stack_set_name,omitempty"` + + // Reference to a StackSet in cloudformation to populate stackSetName. + // +kubebuilder:validation:Optional + StackSetNameRef *v1.Reference `json:"stackSetNameRef,omitempty" tf:"-"` + + // Selector for a StackSet in cloudformation to populate stackSetName. + // +kubebuilder:validation:Optional + StackSetNameSelector *v1.Selector `json:"stackSetNameSelector,omitempty" tf:"-"` +} + +type StackSetInstanceObservation struct { + + // Target AWS Account ID to create a Stack based on the StackSet. Defaults to current account. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account. Valid values: SELF (default), DELEGATED_ADMIN. + CallAs *string `json:"callAs,omitempty" tf:"call_as,omitempty"` + + // The AWS Organizations accounts to which StackSets deploys. StackSets doesn't deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization. Drift detection is not possible for this argument. See deployment_targets below. + DeploymentTargets *DeploymentTargetsObservation `json:"deploymentTargets,omitempty" tf:"deployment_targets,omitempty"` + + // Unique identifier for the resource. If deployment_targets is set, this is a comma-delimited string combining stack set name, organizational unit IDs (/-delimited), and region (ie. mystack,ou-123/ou-456,us-east-1). Otherwise, this is a comma-delimited string combining stack set name, AWS account ID, and region (ie. mystack,123456789012,us-east-1). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Preferences for how AWS CloudFormation performs a stack set operation. + OperationPreferences *StackSetInstanceOperationPreferencesObservation `json:"operationPreferences,omitempty" tf:"operation_preferences,omitempty"` + + // The organization root ID or organizational unit (OU) ID in which the stack is deployed. + OrganizationalUnitID *string `json:"organizationalUnitId,omitempty" tf:"organizational_unit_id,omitempty"` + + // Key-value map of input parameters to override from the StackSet for this Instance. + // +mapType=granular + ParameterOverrides map[string]*string `json:"parameterOverrides,omitempty" tf:"parameter_overrides,omitempty"` + + // You cannot reassociate a retained Stack or add an existing, saved Stack to a new StackSet. Defaults to false. + RetainStack *bool `json:"retainStack,omitempty" tf:"retain_stack,omitempty"` + + // Stack identifier. + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // List of stack instances created from an organizational unit deployment target. This will only be populated when deployment_targets is set. See stack_instance_summaries. + // List of stack instances created from an organizational unit deployment target. This will only be populated when `deployment_targets` is set. + StackInstanceSummaries []StackInstanceSummariesObservation `json:"stackInstanceSummaries,omitempty" tf:"stack_instance_summaries,omitempty"` + + // Name of the StackSet. + StackSetName *string `json:"stackSetName,omitempty" tf:"stack_set_name,omitempty"` +} + +type StackSetInstanceOperationPreferencesInitParameters struct { + + // The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region. + FailureToleranceCount *float64 `json:"failureToleranceCount,omitempty" tf:"failure_tolerance_count,omitempty"` + + // The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region. + FailureTolerancePercentage *float64 `json:"failureTolerancePercentage,omitempty" tf:"failure_tolerance_percentage,omitempty"` + + // The maximum number of accounts in which to perform this operation at one time. + MaxConcurrentCount *float64 `json:"maxConcurrentCount,omitempty" tf:"max_concurrent_count,omitempty"` + + // The maximum percentage of accounts in which to perform this operation at one time. + MaxConcurrentPercentage *float64 `json:"maxConcurrentPercentage,omitempty" tf:"max_concurrent_percentage,omitempty"` + + // The concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time. Valid values are SEQUENTIAL and PARALLEL. + RegionConcurrencyType *string `json:"regionConcurrencyType,omitempty" tf:"region_concurrency_type,omitempty"` + + // The order of the Regions in where you want to perform the stack operation. + RegionOrder []*string `json:"regionOrder,omitempty" tf:"region_order,omitempty"` +} + +type StackSetInstanceOperationPreferencesObservation struct { + + // The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region. + FailureToleranceCount *float64 `json:"failureToleranceCount,omitempty" tf:"failure_tolerance_count,omitempty"` + + // The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region. + FailureTolerancePercentage *float64 `json:"failureTolerancePercentage,omitempty" tf:"failure_tolerance_percentage,omitempty"` + + // The maximum number of accounts in which to perform this operation at one time. + MaxConcurrentCount *float64 `json:"maxConcurrentCount,omitempty" tf:"max_concurrent_count,omitempty"` + + // The maximum percentage of accounts in which to perform this operation at one time. + MaxConcurrentPercentage *float64 `json:"maxConcurrentPercentage,omitempty" tf:"max_concurrent_percentage,omitempty"` + + // The concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time. Valid values are SEQUENTIAL and PARALLEL. + RegionConcurrencyType *string `json:"regionConcurrencyType,omitempty" tf:"region_concurrency_type,omitempty"` + + // The order of the Regions in where you want to perform the stack operation. + RegionOrder []*string `json:"regionOrder,omitempty" tf:"region_order,omitempty"` +} + +type StackSetInstanceOperationPreferencesParameters struct { + + // The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region. + // +kubebuilder:validation:Optional + FailureToleranceCount *float64 `json:"failureToleranceCount,omitempty" tf:"failure_tolerance_count,omitempty"` + + // The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region. + // +kubebuilder:validation:Optional + FailureTolerancePercentage *float64 `json:"failureTolerancePercentage,omitempty" tf:"failure_tolerance_percentage,omitempty"` + + // The maximum number of accounts in which to perform this operation at one time. + // +kubebuilder:validation:Optional + MaxConcurrentCount *float64 `json:"maxConcurrentCount,omitempty" tf:"max_concurrent_count,omitempty"` + + // The maximum percentage of accounts in which to perform this operation at one time. + // +kubebuilder:validation:Optional + MaxConcurrentPercentage *float64 `json:"maxConcurrentPercentage,omitempty" tf:"max_concurrent_percentage,omitempty"` + + // The concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time. Valid values are SEQUENTIAL and PARALLEL. + // +kubebuilder:validation:Optional + RegionConcurrencyType *string `json:"regionConcurrencyType,omitempty" tf:"region_concurrency_type,omitempty"` + + // The order of the Regions in where you want to perform the stack operation. + // +kubebuilder:validation:Optional + RegionOrder []*string `json:"regionOrder,omitempty" tf:"region_order,omitempty"` +} + +type StackSetInstanceParameters struct { + + // Target AWS Account ID to create a Stack based on the StackSet. Defaults to current account. + // +kubebuilder:validation:Optional + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account. Valid values: SELF (default), DELEGATED_ADMIN. + // +kubebuilder:validation:Optional + CallAs *string `json:"callAs,omitempty" tf:"call_as,omitempty"` + + // The AWS Organizations accounts to which StackSets deploys. StackSets doesn't deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization. Drift detection is not possible for this argument. See deployment_targets below. + // +kubebuilder:validation:Optional + DeploymentTargets *DeploymentTargetsParameters `json:"deploymentTargets,omitempty" tf:"deployment_targets,omitempty"` + + // Preferences for how AWS CloudFormation performs a stack set operation. + // +kubebuilder:validation:Optional + OperationPreferences *StackSetInstanceOperationPreferencesParameters `json:"operationPreferences,omitempty" tf:"operation_preferences,omitempty"` + + // Key-value map of input parameters to override from the StackSet for this Instance. + // +kubebuilder:validation:Optional + // +mapType=granular + ParameterOverrides map[string]*string `json:"parameterOverrides,omitempty" tf:"parameter_overrides,omitempty"` + + // Target AWS Region to create a Stack based on the StackSet. Defaults to current region. + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // You cannot reassociate a retained Stack or add an existing, saved Stack to a new StackSet. Defaults to false. + // +kubebuilder:validation:Optional + RetainStack *bool `json:"retainStack,omitempty" tf:"retain_stack,omitempty"` + + // Name of the StackSet. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudformation/v1beta2.StackSet + // +kubebuilder:validation:Optional + StackSetName *string `json:"stackSetName,omitempty" tf:"stack_set_name,omitempty"` + + // Reference to a StackSet in cloudformation to populate stackSetName. + // +kubebuilder:validation:Optional + StackSetNameRef *v1.Reference `json:"stackSetNameRef,omitempty" tf:"-"` + + // Selector for a StackSet in cloudformation to populate stackSetName. + // +kubebuilder:validation:Optional + StackSetNameSelector *v1.Selector `json:"stackSetNameSelector,omitempty" tf:"-"` +} + +// StackSetInstanceSpec defines the desired state of StackSetInstance +type StackSetInstanceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StackSetInstanceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StackSetInstanceInitParameters `json:"initProvider,omitempty"` +} + +// StackSetInstanceStatus defines the observed state of StackSetInstance. +type StackSetInstanceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StackSetInstanceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// StackSetInstance is the Schema for the StackSetInstances API. Manages a CloudFormation StackSet Instance. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type StackSetInstance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec StackSetInstanceSpec `json:"spec"` + Status StackSetInstanceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StackSetInstanceList contains a list of StackSetInstances +type StackSetInstanceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StackSetInstance `json:"items"` +} + +// Repository type metadata. +var ( + StackSetInstance_Kind = "StackSetInstance" + StackSetInstance_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: StackSetInstance_Kind}.String() + StackSetInstance_KindAPIVersion = StackSetInstance_Kind + "." + CRDGroupVersion.String() + StackSetInstance_GroupVersionKind = CRDGroupVersion.WithKind(StackSetInstance_Kind) +) + +func init() { + SchemeBuilder.Register(&StackSetInstance{}, &StackSetInstanceList{}) +} diff --git a/apis/cloudfront/v1beta1/zz_generated.conversion_hubs.go b/apis/cloudfront/v1beta1/zz_generated.conversion_hubs.go index 7567db5090..31f1720fca 100755 --- a/apis/cloudfront/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/cloudfront/v1beta1/zz_generated.conversion_hubs.go @@ -6,41 +6,17 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *CachePolicy) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Distribution) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *FieldLevelEncryptionConfig) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *FieldLevelEncryptionProfile) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Function) Hub() {} // Hub marks this type as a conversion hub. func (tr *KeyGroup) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *MonitoringSubscription) Hub() {} - // Hub marks this type as a conversion hub. func (tr *OriginAccessControl) Hub() {} // Hub marks this type as a conversion hub. func (tr *OriginAccessIdentity) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *OriginRequestPolicy) Hub() {} - // Hub marks this type as a conversion hub. func (tr *PublicKey) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *RealtimeLogConfig) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ResponseHeadersPolicy) Hub() {} diff --git a/apis/cloudfront/v1beta1/zz_generated.conversion_spokes.go b/apis/cloudfront/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..2dd6f4b3fb --- /dev/null +++ b/apis/cloudfront/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,174 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this CachePolicy to the hub type. +func (tr *CachePolicy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CachePolicy type. +func (tr *CachePolicy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Distribution to the hub type. +func (tr *Distribution) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Distribution type. +func (tr *Distribution) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FieldLevelEncryptionConfig to the hub type. +func (tr *FieldLevelEncryptionConfig) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FieldLevelEncryptionConfig type. +func (tr *FieldLevelEncryptionConfig) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FieldLevelEncryptionProfile to the hub type. +func (tr *FieldLevelEncryptionProfile) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FieldLevelEncryptionProfile type. +func (tr *FieldLevelEncryptionProfile) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MonitoringSubscription to the hub type. +func (tr *MonitoringSubscription) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MonitoringSubscription type. +func (tr *MonitoringSubscription) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this OriginRequestPolicy to the hub type. +func (tr *OriginRequestPolicy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the OriginRequestPolicy type. +func (tr *OriginRequestPolicy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this RealtimeLogConfig to the hub type. +func (tr *RealtimeLogConfig) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the RealtimeLogConfig type. +func (tr *RealtimeLogConfig) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ResponseHeadersPolicy to the hub type. +func (tr *ResponseHeadersPolicy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ResponseHeadersPolicy type. +func (tr *ResponseHeadersPolicy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/cloudfront/v1beta2/zz_cachepolicy_terraformed.go b/apis/cloudfront/v1beta2/zz_cachepolicy_terraformed.go new file mode 100755 index 0000000000..9eb6fb2044 --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_cachepolicy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CachePolicy +func (mg *CachePolicy) GetTerraformResourceType() string { + return "aws_cloudfront_cache_policy" +} + +// GetConnectionDetailsMapping for this CachePolicy +func (tr *CachePolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CachePolicy +func (tr *CachePolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CachePolicy +func (tr *CachePolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CachePolicy +func (tr *CachePolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CachePolicy +func (tr *CachePolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CachePolicy +func (tr *CachePolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CachePolicy +func (tr *CachePolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CachePolicy +func (tr *CachePolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CachePolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CachePolicy) LateInitialize(attrs []byte) (bool, error) { + params := &CachePolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CachePolicy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cloudfront/v1beta2/zz_cachepolicy_types.go b/apis/cloudfront/v1beta2/zz_cachepolicy_types.go new file mode 100755 index 0000000000..c216e5db59 --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_cachepolicy_types.go @@ -0,0 +1,368 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CachePolicyInitParameters struct { + + // Description for the cache policy. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Amount of time, in seconds, that objects are allowed to remain in the CloudFront cache before CloudFront sends a new request to the origin server to check if the object has been updated. + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // Maximum amount of time, in seconds, that objects stay in the CloudFront cache before CloudFront sends another request to the origin to see if the object has been updated. + MaxTTL *float64 `json:"maxTtl,omitempty" tf:"max_ttl,omitempty"` + + // Minimum amount of time, in seconds, that objects should remain in the CloudFront cache before a new request is sent to the origin to check for updates. + MinTTL *float64 `json:"minTtl,omitempty" tf:"min_ttl,omitempty"` + + // Unique name used to identify the cache policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration for including HTTP headers, cookies, and URL query strings in the cache key. For more information, refer to the Parameters In Cache Key And Forwarded To Origin section. + ParametersInCacheKeyAndForwardedToOrigin *ParametersInCacheKeyAndForwardedToOriginInitParameters `json:"parametersInCacheKeyAndForwardedToOrigin,omitempty" tf:"parameters_in_cache_key_and_forwarded_to_origin,omitempty"` +} + +type CachePolicyObservation struct { + + // Description for the cache policy. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Amount of time, in seconds, that objects are allowed to remain in the CloudFront cache before CloudFront sends a new request to the origin server to check if the object has been updated. + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // Current version of the cache policy. + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // Identifier for the cache policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Maximum amount of time, in seconds, that objects stay in the CloudFront cache before CloudFront sends another request to the origin to see if the object has been updated. + MaxTTL *float64 `json:"maxTtl,omitempty" tf:"max_ttl,omitempty"` + + // Minimum amount of time, in seconds, that objects should remain in the CloudFront cache before a new request is sent to the origin to check for updates. + MinTTL *float64 `json:"minTtl,omitempty" tf:"min_ttl,omitempty"` + + // Unique name used to identify the cache policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration for including HTTP headers, cookies, and URL query strings in the cache key. For more information, refer to the Parameters In Cache Key And Forwarded To Origin section. + ParametersInCacheKeyAndForwardedToOrigin *ParametersInCacheKeyAndForwardedToOriginObservation `json:"parametersInCacheKeyAndForwardedToOrigin,omitempty" tf:"parameters_in_cache_key_and_forwarded_to_origin,omitempty"` +} + +type CachePolicyParameters struct { + + // Description for the cache policy. + // +kubebuilder:validation:Optional + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Amount of time, in seconds, that objects are allowed to remain in the CloudFront cache before CloudFront sends a new request to the origin server to check if the object has been updated. + // +kubebuilder:validation:Optional + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // Maximum amount of time, in seconds, that objects stay in the CloudFront cache before CloudFront sends another request to the origin to see if the object has been updated. + // +kubebuilder:validation:Optional + MaxTTL *float64 `json:"maxTtl,omitempty" tf:"max_ttl,omitempty"` + + // Minimum amount of time, in seconds, that objects should remain in the CloudFront cache before a new request is sent to the origin to check for updates. + // +kubebuilder:validation:Optional + MinTTL *float64 `json:"minTtl,omitempty" tf:"min_ttl,omitempty"` + + // Unique name used to identify the cache policy. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration for including HTTP headers, cookies, and URL query strings in the cache key. For more information, refer to the Parameters In Cache Key And Forwarded To Origin section. + // +kubebuilder:validation:Optional + ParametersInCacheKeyAndForwardedToOrigin *ParametersInCacheKeyAndForwardedToOriginParameters `json:"parametersInCacheKeyAndForwardedToOrigin,omitempty" tf:"parameters_in_cache_key_and_forwarded_to_origin,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type CookiesConfigInitParameters struct { + + // Whether any cookies in viewer requests are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values for cookie_behavior are none, whitelist, allExcept, and all. + CookieBehavior *string `json:"cookieBehavior,omitempty" tf:"cookie_behavior,omitempty"` + + // Object that contains a list of cookie names. See Items for more information. + Cookies *CookiesInitParameters `json:"cookies,omitempty" tf:"cookies,omitempty"` +} + +type CookiesConfigObservation struct { + + // Whether any cookies in viewer requests are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values for cookie_behavior are none, whitelist, allExcept, and all. + CookieBehavior *string `json:"cookieBehavior,omitempty" tf:"cookie_behavior,omitempty"` + + // Object that contains a list of cookie names. See Items for more information. + Cookies *CookiesObservation `json:"cookies,omitempty" tf:"cookies,omitempty"` +} + +type CookiesConfigParameters struct { + + // Whether any cookies in viewer requests are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values for cookie_behavior are none, whitelist, allExcept, and all. + // +kubebuilder:validation:Optional + CookieBehavior *string `json:"cookieBehavior" tf:"cookie_behavior,omitempty"` + + // Object that contains a list of cookie names. See Items for more information. + // +kubebuilder:validation:Optional + Cookies *CookiesParameters `json:"cookies,omitempty" tf:"cookies,omitempty"` +} + +type CookiesInitParameters struct { + + // List of item names, such as cookies, headers, or query strings. + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type CookiesObservation struct { + + // List of item names, such as cookies, headers, or query strings. + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type CookiesParameters struct { + + // List of item names, such as cookies, headers, or query strings. + // +kubebuilder:validation:Optional + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type HeadersConfigInitParameters struct { + + // Whether any HTTP headers are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values for header_behavior are none and whitelist. + HeaderBehavior *string `json:"headerBehavior,omitempty" tf:"header_behavior,omitempty"` + + // Object contains a list of header names. See Items for more information. + Headers *HeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` +} + +type HeadersConfigObservation struct { + + // Whether any HTTP headers are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values for header_behavior are none and whitelist. + HeaderBehavior *string `json:"headerBehavior,omitempty" tf:"header_behavior,omitempty"` + + // Object contains a list of header names. See Items for more information. + Headers *HeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` +} + +type HeadersConfigParameters struct { + + // Whether any HTTP headers are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values for header_behavior are none and whitelist. + // +kubebuilder:validation:Optional + HeaderBehavior *string `json:"headerBehavior,omitempty" tf:"header_behavior,omitempty"` + + // Object contains a list of header names. See Items for more information. + // +kubebuilder:validation:Optional + Headers *HeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` +} + +type HeadersInitParameters struct { + + // List of item names, such as cookies, headers, or query strings. + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type HeadersObservation struct { + + // List of item names, such as cookies, headers, or query strings. + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type HeadersParameters struct { + + // List of item names, such as cookies, headers, or query strings. + // +kubebuilder:validation:Optional + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type ParametersInCacheKeyAndForwardedToOriginInitParameters struct { + + // Whether any cookies in viewer requests are included in the cache key and automatically included in requests that CloudFront sends to the origin. See Cookies Config for more information. + CookiesConfig *CookiesConfigInitParameters `json:"cookiesConfig,omitempty" tf:"cookies_config,omitempty"` + + // Flag determines whether the Accept-Encoding HTTP header is included in the cache key and in requests that CloudFront sends to the origin. + EnableAcceptEncodingBrotli *bool `json:"enableAcceptEncodingBrotli,omitempty" tf:"enable_accept_encoding_brotli,omitempty"` + + // Whether the Accept-Encoding HTTP header is included in the cache key and in requests sent to the origin by CloudFront. + EnableAcceptEncodingGzip *bool `json:"enableAcceptEncodingGzip,omitempty" tf:"enable_accept_encoding_gzip,omitempty"` + + // Whether any HTTP headers are included in the cache key and automatically included in requests that CloudFront sends to the origin. See Headers Config for more information. + HeadersConfig *HeadersConfigInitParameters `json:"headersConfig,omitempty" tf:"headers_config,omitempty"` + + // Whether any URL query strings in viewer requests are included in the cache key. It also automatically includes these query strings in requests that CloudFront sends to the origin. Please refer to the Query String Config for more information. + QueryStringsConfig *QueryStringsConfigInitParameters `json:"queryStringsConfig,omitempty" tf:"query_strings_config,omitempty"` +} + +type ParametersInCacheKeyAndForwardedToOriginObservation struct { + + // Whether any cookies in viewer requests are included in the cache key and automatically included in requests that CloudFront sends to the origin. See Cookies Config for more information. + CookiesConfig *CookiesConfigObservation `json:"cookiesConfig,omitempty" tf:"cookies_config,omitempty"` + + // Flag determines whether the Accept-Encoding HTTP header is included in the cache key and in requests that CloudFront sends to the origin. + EnableAcceptEncodingBrotli *bool `json:"enableAcceptEncodingBrotli,omitempty" tf:"enable_accept_encoding_brotli,omitempty"` + + // Whether the Accept-Encoding HTTP header is included in the cache key and in requests sent to the origin by CloudFront. + EnableAcceptEncodingGzip *bool `json:"enableAcceptEncodingGzip,omitempty" tf:"enable_accept_encoding_gzip,omitempty"` + + // Whether any HTTP headers are included in the cache key and automatically included in requests that CloudFront sends to the origin. See Headers Config for more information. + HeadersConfig *HeadersConfigObservation `json:"headersConfig,omitempty" tf:"headers_config,omitempty"` + + // Whether any URL query strings in viewer requests are included in the cache key. It also automatically includes these query strings in requests that CloudFront sends to the origin. Please refer to the Query String Config for more information. + QueryStringsConfig *QueryStringsConfigObservation `json:"queryStringsConfig,omitempty" tf:"query_strings_config,omitempty"` +} + +type ParametersInCacheKeyAndForwardedToOriginParameters struct { + + // Whether any cookies in viewer requests are included in the cache key and automatically included in requests that CloudFront sends to the origin. See Cookies Config for more information. + // +kubebuilder:validation:Optional + CookiesConfig *CookiesConfigParameters `json:"cookiesConfig" tf:"cookies_config,omitempty"` + + // Flag determines whether the Accept-Encoding HTTP header is included in the cache key and in requests that CloudFront sends to the origin. + // +kubebuilder:validation:Optional + EnableAcceptEncodingBrotli *bool `json:"enableAcceptEncodingBrotli,omitempty" tf:"enable_accept_encoding_brotli,omitempty"` + + // Whether the Accept-Encoding HTTP header is included in the cache key and in requests sent to the origin by CloudFront. + // +kubebuilder:validation:Optional + EnableAcceptEncodingGzip *bool `json:"enableAcceptEncodingGzip,omitempty" tf:"enable_accept_encoding_gzip,omitempty"` + + // Whether any HTTP headers are included in the cache key and automatically included in requests that CloudFront sends to the origin. See Headers Config for more information. + // +kubebuilder:validation:Optional + HeadersConfig *HeadersConfigParameters `json:"headersConfig" tf:"headers_config,omitempty"` + + // Whether any URL query strings in viewer requests are included in the cache key. It also automatically includes these query strings in requests that CloudFront sends to the origin. Please refer to the Query String Config for more information. + // +kubebuilder:validation:Optional + QueryStringsConfig *QueryStringsConfigParameters `json:"queryStringsConfig" tf:"query_strings_config,omitempty"` +} + +type QueryStringsConfigInitParameters struct { + + // Whether URL query strings in viewer requests are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values for query_string_behavior are none, whitelist, allExcept, and all. + QueryStringBehavior *string `json:"queryStringBehavior,omitempty" tf:"query_string_behavior,omitempty"` + + // Configuration parameter that contains a list of query string names. See Items for more information. + QueryStrings *QueryStringsInitParameters `json:"queryStrings,omitempty" tf:"query_strings,omitempty"` +} + +type QueryStringsConfigObservation struct { + + // Whether URL query strings in viewer requests are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values for query_string_behavior are none, whitelist, allExcept, and all. + QueryStringBehavior *string `json:"queryStringBehavior,omitempty" tf:"query_string_behavior,omitempty"` + + // Configuration parameter that contains a list of query string names. See Items for more information. + QueryStrings *QueryStringsObservation `json:"queryStrings,omitempty" tf:"query_strings,omitempty"` +} + +type QueryStringsConfigParameters struct { + + // Whether URL query strings in viewer requests are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values for query_string_behavior are none, whitelist, allExcept, and all. + // +kubebuilder:validation:Optional + QueryStringBehavior *string `json:"queryStringBehavior" tf:"query_string_behavior,omitempty"` + + // Configuration parameter that contains a list of query string names. See Items for more information. + // +kubebuilder:validation:Optional + QueryStrings *QueryStringsParameters `json:"queryStrings,omitempty" tf:"query_strings,omitempty"` +} + +type QueryStringsInitParameters struct { + + // List of item names, such as cookies, headers, or query strings. + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type QueryStringsObservation struct { + + // List of item names, such as cookies, headers, or query strings. + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type QueryStringsParameters struct { + + // List of item names, such as cookies, headers, or query strings. + // +kubebuilder:validation:Optional + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +// CachePolicySpec defines the desired state of CachePolicy +type CachePolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CachePolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CachePolicyInitParameters `json:"initProvider,omitempty"` +} + +// CachePolicyStatus defines the observed state of CachePolicy. +type CachePolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CachePolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CachePolicy is the Schema for the CachePolicys API. Use the +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type CachePolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.parametersInCacheKeyAndForwardedToOrigin) || (has(self.initProvider) && has(self.initProvider.parametersInCacheKeyAndForwardedToOrigin))",message="spec.forProvider.parametersInCacheKeyAndForwardedToOrigin is a required parameter" + Spec CachePolicySpec `json:"spec"` + Status CachePolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CachePolicyList contains a list of CachePolicys +type CachePolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CachePolicy `json:"items"` +} + +// Repository type metadata. +var ( + CachePolicy_Kind = "CachePolicy" + CachePolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CachePolicy_Kind}.String() + CachePolicy_KindAPIVersion = CachePolicy_Kind + "." + CRDGroupVersion.String() + CachePolicy_GroupVersionKind = CRDGroupVersion.WithKind(CachePolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&CachePolicy{}, &CachePolicyList{}) +} diff --git a/apis/cloudfront/v1beta2/zz_distribution_terraformed.go b/apis/cloudfront/v1beta2/zz_distribution_terraformed.go new file mode 100755 index 0000000000..f7eda87d9c --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_distribution_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Distribution +func (mg *Distribution) GetTerraformResourceType() string { + return "aws_cloudfront_distribution" +} + +// GetConnectionDetailsMapping for this Distribution +func (tr *Distribution) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Distribution +func (tr *Distribution) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Distribution +func (tr *Distribution) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Distribution +func (tr *Distribution) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Distribution +func (tr *Distribution) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Distribution +func (tr *Distribution) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Distribution +func (tr *Distribution) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Distribution +func (tr *Distribution) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Distribution using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Distribution) LateInitialize(attrs []byte) (bool, error) { + params := &DistributionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Distribution) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/cloudfront/v1beta2/zz_distribution_types.go b/apis/cloudfront/v1beta2/zz_distribution_types.go new file mode 100755 index 0000000000..f0725c7fae --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_distribution_types.go @@ -0,0 +1,1735 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomErrorResponseInitParameters struct { + + // Minimum amount of time you want HTTP error codes to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. + ErrorCachingMinTTL *float64 `json:"errorCachingMinTtl,omitempty" tf:"error_caching_min_ttl,omitempty"` + + // 4xx or 5xx HTTP status code that you want to customize. + ErrorCode *float64 `json:"errorCode,omitempty" tf:"error_code,omitempty"` + + // HTTP status code that you want CloudFront to return with the custom error page to the viewer. + ResponseCode *float64 `json:"responseCode,omitempty" tf:"response_code,omitempty"` + + // Path of the custom error page (for example, /custom_404.html). + ResponsePagePath *string `json:"responsePagePath,omitempty" tf:"response_page_path,omitempty"` +} + +type CustomErrorResponseObservation struct { + + // Minimum amount of time you want HTTP error codes to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. + ErrorCachingMinTTL *float64 `json:"errorCachingMinTtl,omitempty" tf:"error_caching_min_ttl,omitempty"` + + // 4xx or 5xx HTTP status code that you want to customize. + ErrorCode *float64 `json:"errorCode,omitempty" tf:"error_code,omitempty"` + + // HTTP status code that you want CloudFront to return with the custom error page to the viewer. + ResponseCode *float64 `json:"responseCode,omitempty" tf:"response_code,omitempty"` + + // Path of the custom error page (for example, /custom_404.html). + ResponsePagePath *string `json:"responsePagePath,omitempty" tf:"response_page_path,omitempty"` +} + +type CustomErrorResponseParameters struct { + + // Minimum amount of time you want HTTP error codes to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. + // +kubebuilder:validation:Optional + ErrorCachingMinTTL *float64 `json:"errorCachingMinTtl,omitempty" tf:"error_caching_min_ttl,omitempty"` + + // 4xx or 5xx HTTP status code that you want to customize. + // +kubebuilder:validation:Optional + ErrorCode *float64 `json:"errorCode" tf:"error_code,omitempty"` + + // HTTP status code that you want CloudFront to return with the custom error page to the viewer. + // +kubebuilder:validation:Optional + ResponseCode *float64 `json:"responseCode,omitempty" tf:"response_code,omitempty"` + + // Path of the custom error page (for example, /custom_404.html). + // +kubebuilder:validation:Optional + ResponsePagePath *string `json:"responsePagePath,omitempty" tf:"response_page_path,omitempty"` +} + +type CustomHeaderInitParameters struct { + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type CustomHeaderObservation struct { + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type CustomHeaderParameters struct { + + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type CustomOriginConfigInitParameters struct { + + // HTTP port the custom origin listens on. + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + + // HTTPS port the custom origin listens on. + HTTPSPort *float64 `json:"httpsPort,omitempty" tf:"https_port,omitempty"` + + // The Custom KeepAlive timeout, in seconds. By default, AWS enforces an upper limit of 60. But you can request an increase. Defaults to 5. + OriginKeepaliveTimeout *float64 `json:"originKeepaliveTimeout,omitempty" tf:"origin_keepalive_timeout,omitempty"` + + // Origin protocol policy to apply to your origin. One of http-only, https-only, or match-viewer. + OriginProtocolPolicy *string `json:"originProtocolPolicy,omitempty" tf:"origin_protocol_policy,omitempty"` + + // The Custom Read timeout, in seconds. By default, AWS enforces an upper limit of 60. But you can request an increase. Defaults to 30. + OriginReadTimeout *float64 `json:"originReadTimeout,omitempty" tf:"origin_read_timeout,omitempty"` + + // List of SSL/TLS protocols that CloudFront can use when connecting to your origin over HTTPS. Valid values: SSLv3, TLSv1, TLSv1.1, TLSv1.2. For more information, see Minimum Origin SSL Protocol in the Amazon CloudFront Developer Guide. + // +listType=set + OriginSSLProtocols []*string `json:"originSslProtocols,omitempty" tf:"origin_ssl_protocols,omitempty"` +} + +type CustomOriginConfigObservation struct { + + // HTTP port the custom origin listens on. + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + + // HTTPS port the custom origin listens on. + HTTPSPort *float64 `json:"httpsPort,omitempty" tf:"https_port,omitempty"` + + // The Custom KeepAlive timeout, in seconds. By default, AWS enforces an upper limit of 60. But you can request an increase. Defaults to 5. + OriginKeepaliveTimeout *float64 `json:"originKeepaliveTimeout,omitempty" tf:"origin_keepalive_timeout,omitempty"` + + // Origin protocol policy to apply to your origin. One of http-only, https-only, or match-viewer. + OriginProtocolPolicy *string `json:"originProtocolPolicy,omitempty" tf:"origin_protocol_policy,omitempty"` + + // The Custom Read timeout, in seconds. By default, AWS enforces an upper limit of 60. But you can request an increase. Defaults to 30. + OriginReadTimeout *float64 `json:"originReadTimeout,omitempty" tf:"origin_read_timeout,omitempty"` + + // List of SSL/TLS protocols that CloudFront can use when connecting to your origin over HTTPS. Valid values: SSLv3, TLSv1, TLSv1.1, TLSv1.2. For more information, see Minimum Origin SSL Protocol in the Amazon CloudFront Developer Guide. + // +listType=set + OriginSSLProtocols []*string `json:"originSslProtocols,omitempty" tf:"origin_ssl_protocols,omitempty"` +} + +type CustomOriginConfigParameters struct { + + // HTTP port the custom origin listens on. + // +kubebuilder:validation:Optional + HTTPPort *float64 `json:"httpPort" tf:"http_port,omitempty"` + + // HTTPS port the custom origin listens on. + // +kubebuilder:validation:Optional + HTTPSPort *float64 `json:"httpsPort" tf:"https_port,omitempty"` + + // The Custom KeepAlive timeout, in seconds. By default, AWS enforces an upper limit of 60. But you can request an increase. Defaults to 5. + // +kubebuilder:validation:Optional + OriginKeepaliveTimeout *float64 `json:"originKeepaliveTimeout,omitempty" tf:"origin_keepalive_timeout,omitempty"` + + // Origin protocol policy to apply to your origin. One of http-only, https-only, or match-viewer. + // +kubebuilder:validation:Optional + OriginProtocolPolicy *string `json:"originProtocolPolicy" tf:"origin_protocol_policy,omitempty"` + + // The Custom Read timeout, in seconds. By default, AWS enforces an upper limit of 60. But you can request an increase. Defaults to 30. + // +kubebuilder:validation:Optional + OriginReadTimeout *float64 `json:"originReadTimeout,omitempty" tf:"origin_read_timeout,omitempty"` + + // List of SSL/TLS protocols that CloudFront can use when connecting to your origin over HTTPS. Valid values: SSLv3, TLSv1, TLSv1.1, TLSv1.2. For more information, see Minimum Origin SSL Protocol in the Amazon CloudFront Developer Guide. + // +kubebuilder:validation:Optional + // +listType=set + OriginSSLProtocols []*string `json:"originSslProtocols" tf:"origin_ssl_protocols,omitempty"` +} + +type DefaultCacheBehaviorInitParameters struct { + + // Controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. + // +listType=set + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // Unique identifier of the cache policy that is attached to the cache behavior. If configuring the default_cache_behavior either cache_policy_id or forwarded_values must be set. + CachePolicyID *string `json:"cachePolicyId,omitempty" tf:"cache_policy_id,omitempty"` + + // Controls whether CloudFront caches the response to requests using the specified HTTP methods. + // +listType=set + CachedMethods []*string `json:"cachedMethods,omitempty" tf:"cached_methods,omitempty"` + + // Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header (default: false). + Compress *bool `json:"compress,omitempty" tf:"compress,omitempty"` + + // Default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request in the absence of an Cache-Control max-age or Expires header. + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // Field level encryption configuration ID. + FieldLevelEncryptionID *string `json:"fieldLevelEncryptionId,omitempty" tf:"field_level_encryption_id,omitempty"` + + // The forwarded values configuration that specifies how CloudFront handles query strings, cookies and headers (maximum one). + ForwardedValues *ForwardedValuesInitParameters `json:"forwardedValues,omitempty" tf:"forwarded_values,omitempty"` + + // A config block that triggers a cloudfront function with specific actions (maximum 2). + FunctionAssociation []FunctionAssociationInitParameters `json:"functionAssociation,omitempty" tf:"function_association,omitempty"` + + // A config block that triggers a lambda function with specific actions (maximum 4). + LambdaFunctionAssociation []LambdaFunctionAssociationInitParameters `json:"lambdaFunctionAssociation,omitempty" tf:"lambda_function_association,omitempty"` + + // Maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. Only effective in the presence of Cache-Control max-age, Cache-Control s-maxage, and Expires headers. + MaxTTL *float64 `json:"maxTtl,omitempty" tf:"max_ttl,omitempty"` + + // Minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. Defaults to 0 seconds. + MinTTL *float64 `json:"minTtl,omitempty" tf:"min_ttl,omitempty"` + + // Unique identifier of the origin request policy that is attached to the behavior. + OriginRequestPolicyID *string `json:"originRequestPolicyId,omitempty" tf:"origin_request_policy_id,omitempty"` + + // ARN of the real-time log configuration that is attached to this cache behavior. + RealtimeLogConfigArn *string `json:"realtimeLogConfigArn,omitempty" tf:"realtime_log_config_arn,omitempty"` + + // Identifier for a response headers policy. + ResponseHeadersPolicyID *string `json:"responseHeadersPolicyId,omitempty" tf:"response_headers_policy_id,omitempty"` + + // Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` + + // Value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior. + TargetOriginID *string `json:"targetOriginId,omitempty" tf:"target_origin_id,omitempty"` + + // List of key group IDs that CloudFront can use to validate signed URLs or signed cookies. See the CloudFront User Guide for more information about this feature. + TrustedKeyGroups []*string `json:"trustedKeyGroups,omitempty" tf:"trusted_key_groups,omitempty"` + + // List of AWS account IDs (or self) that you want to allow to create signed URLs for private content. See the CloudFront User Guide for more information about this feature. + TrustedSigners []*string `json:"trustedSigners,omitempty" tf:"trusted_signers,omitempty"` + + // Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. One of allow-all, https-only, or redirect-to-https. + ViewerProtocolPolicy *string `json:"viewerProtocolPolicy,omitempty" tf:"viewer_protocol_policy,omitempty"` +} + +type DefaultCacheBehaviorObservation struct { + + // Controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. + // +listType=set + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // Unique identifier of the cache policy that is attached to the cache behavior. If configuring the default_cache_behavior either cache_policy_id or forwarded_values must be set. + CachePolicyID *string `json:"cachePolicyId,omitempty" tf:"cache_policy_id,omitempty"` + + // Controls whether CloudFront caches the response to requests using the specified HTTP methods. + // +listType=set + CachedMethods []*string `json:"cachedMethods,omitempty" tf:"cached_methods,omitempty"` + + // Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header (default: false). + Compress *bool `json:"compress,omitempty" tf:"compress,omitempty"` + + // Default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request in the absence of an Cache-Control max-age or Expires header. + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // Field level encryption configuration ID. + FieldLevelEncryptionID *string `json:"fieldLevelEncryptionId,omitempty" tf:"field_level_encryption_id,omitempty"` + + // The forwarded values configuration that specifies how CloudFront handles query strings, cookies and headers (maximum one). + ForwardedValues *ForwardedValuesObservation `json:"forwardedValues,omitempty" tf:"forwarded_values,omitempty"` + + // A config block that triggers a cloudfront function with specific actions (maximum 2). + FunctionAssociation []FunctionAssociationObservation `json:"functionAssociation,omitempty" tf:"function_association,omitempty"` + + // A config block that triggers a lambda function with specific actions (maximum 4). + LambdaFunctionAssociation []LambdaFunctionAssociationObservation `json:"lambdaFunctionAssociation,omitempty" tf:"lambda_function_association,omitempty"` + + // Maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. Only effective in the presence of Cache-Control max-age, Cache-Control s-maxage, and Expires headers. + MaxTTL *float64 `json:"maxTtl,omitempty" tf:"max_ttl,omitempty"` + + // Minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. Defaults to 0 seconds. + MinTTL *float64 `json:"minTtl,omitempty" tf:"min_ttl,omitempty"` + + // Unique identifier of the origin request policy that is attached to the behavior. + OriginRequestPolicyID *string `json:"originRequestPolicyId,omitempty" tf:"origin_request_policy_id,omitempty"` + + // ARN of the real-time log configuration that is attached to this cache behavior. + RealtimeLogConfigArn *string `json:"realtimeLogConfigArn,omitempty" tf:"realtime_log_config_arn,omitempty"` + + // Identifier for a response headers policy. + ResponseHeadersPolicyID *string `json:"responseHeadersPolicyId,omitempty" tf:"response_headers_policy_id,omitempty"` + + // Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` + + // Value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior. + TargetOriginID *string `json:"targetOriginId,omitempty" tf:"target_origin_id,omitempty"` + + // List of key group IDs that CloudFront can use to validate signed URLs or signed cookies. See the CloudFront User Guide for more information about this feature. + TrustedKeyGroups []*string `json:"trustedKeyGroups,omitempty" tf:"trusted_key_groups,omitempty"` + + // List of AWS account IDs (or self) that you want to allow to create signed URLs for private content. See the CloudFront User Guide for more information about this feature. + TrustedSigners []*string `json:"trustedSigners,omitempty" tf:"trusted_signers,omitempty"` + + // Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. One of allow-all, https-only, or redirect-to-https. + ViewerProtocolPolicy *string `json:"viewerProtocolPolicy,omitempty" tf:"viewer_protocol_policy,omitempty"` +} + +type DefaultCacheBehaviorParameters struct { + + // Controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. + // +kubebuilder:validation:Optional + // +listType=set + AllowedMethods []*string `json:"allowedMethods" tf:"allowed_methods,omitempty"` + + // Unique identifier of the cache policy that is attached to the cache behavior. If configuring the default_cache_behavior either cache_policy_id or forwarded_values must be set. + // +kubebuilder:validation:Optional + CachePolicyID *string `json:"cachePolicyId,omitempty" tf:"cache_policy_id,omitempty"` + + // Controls whether CloudFront caches the response to requests using the specified HTTP methods. + // +kubebuilder:validation:Optional + // +listType=set + CachedMethods []*string `json:"cachedMethods" tf:"cached_methods,omitempty"` + + // Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header (default: false). + // +kubebuilder:validation:Optional + Compress *bool `json:"compress,omitempty" tf:"compress,omitempty"` + + // Default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request in the absence of an Cache-Control max-age or Expires header. + // +kubebuilder:validation:Optional + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // Field level encryption configuration ID. + // +kubebuilder:validation:Optional + FieldLevelEncryptionID *string `json:"fieldLevelEncryptionId,omitempty" tf:"field_level_encryption_id,omitempty"` + + // The forwarded values configuration that specifies how CloudFront handles query strings, cookies and headers (maximum one). + // +kubebuilder:validation:Optional + ForwardedValues *ForwardedValuesParameters `json:"forwardedValues,omitempty" tf:"forwarded_values,omitempty"` + + // A config block that triggers a cloudfront function with specific actions (maximum 2). + // +kubebuilder:validation:Optional + FunctionAssociation []FunctionAssociationParameters `json:"functionAssociation,omitempty" tf:"function_association,omitempty"` + + // A config block that triggers a lambda function with specific actions (maximum 4). + // +kubebuilder:validation:Optional + LambdaFunctionAssociation []LambdaFunctionAssociationParameters `json:"lambdaFunctionAssociation,omitempty" tf:"lambda_function_association,omitempty"` + + // Maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. Only effective in the presence of Cache-Control max-age, Cache-Control s-maxage, and Expires headers. + // +kubebuilder:validation:Optional + MaxTTL *float64 `json:"maxTtl,omitempty" tf:"max_ttl,omitempty"` + + // Minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. Defaults to 0 seconds. + // +kubebuilder:validation:Optional + MinTTL *float64 `json:"minTtl,omitempty" tf:"min_ttl,omitempty"` + + // Unique identifier of the origin request policy that is attached to the behavior. + // +kubebuilder:validation:Optional + OriginRequestPolicyID *string `json:"originRequestPolicyId,omitempty" tf:"origin_request_policy_id,omitempty"` + + // ARN of the real-time log configuration that is attached to this cache behavior. + // +kubebuilder:validation:Optional + RealtimeLogConfigArn *string `json:"realtimeLogConfigArn,omitempty" tf:"realtime_log_config_arn,omitempty"` + + // Identifier for a response headers policy. + // +kubebuilder:validation:Optional + ResponseHeadersPolicyID *string `json:"responseHeadersPolicyId,omitempty" tf:"response_headers_policy_id,omitempty"` + + // Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. + // +kubebuilder:validation:Optional + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` + + // Value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior. + // +kubebuilder:validation:Optional + TargetOriginID *string `json:"targetOriginId" tf:"target_origin_id,omitempty"` + + // List of key group IDs that CloudFront can use to validate signed URLs or signed cookies. See the CloudFront User Guide for more information about this feature. + // +kubebuilder:validation:Optional + TrustedKeyGroups []*string `json:"trustedKeyGroups,omitempty" tf:"trusted_key_groups,omitempty"` + + // List of AWS account IDs (or self) that you want to allow to create signed URLs for private content. See the CloudFront User Guide for more information about this feature. + // +kubebuilder:validation:Optional + TrustedSigners []*string `json:"trustedSigners,omitempty" tf:"trusted_signers,omitempty"` + + // Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. One of allow-all, https-only, or redirect-to-https. + // +kubebuilder:validation:Optional + ViewerProtocolPolicy *string `json:"viewerProtocolPolicy" tf:"viewer_protocol_policy,omitempty"` +} + +type DistributionInitParameters struct { + + // Extra CNAMEs (alternate domain names), if any, for this distribution. + // +listType=set + Aliases []*string `json:"aliases,omitempty" tf:"aliases,omitempty"` + + // Any comments you want to include about the distribution. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Identifier of a continuous deployment policy. This argument should only be set on a production distribution. See the aws_cloudfront_continuous_deployment_policy resource for additional details. + ContinuousDeploymentPolicyID *string `json:"continuousDeploymentPolicyId,omitempty" tf:"continuous_deployment_policy_id,omitempty"` + + // One or more custom error response elements (multiples allowed). + CustomErrorResponse []CustomErrorResponseInitParameters `json:"customErrorResponse,omitempty" tf:"custom_error_response,omitempty"` + + // Default cache behavior for this distribution (maximum one). Requires either cache_policy_id (preferred) or forwarded_values (deprecated) be set. + DefaultCacheBehavior *DefaultCacheBehaviorInitParameters `json:"defaultCacheBehavior,omitempty" tf:"default_cache_behavior,omitempty"` + + // Object that you want CloudFront to return (for example, index.html) when an end user requests the root URL. + DefaultRootObject *string `json:"defaultRootObject,omitempty" tf:"default_root_object,omitempty"` + + // Whether the distribution is enabled to accept end user requests for content. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Maximum HTTP version to support on the distribution. Allowed values are http1.1, http2, http2and3 and http3. The default is http2. + HTTPVersion *string `json:"httpVersion,omitempty" tf:"http_version,omitempty"` + + // Whether the IPv6 is enabled for the distribution. + IsIPv6Enabled *bool `json:"isIpv6Enabled,omitempty" tf:"is_ipv6_enabled,omitempty"` + + // The logging configuration that controls how logs are written to your distribution (maximum one). + LoggingConfig *LoggingConfigInitParameters `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // Ordered list of cache behaviors resource for this distribution. List from top to bottom in order of precedence. The topmost cache behavior will have precedence 0. + OrderedCacheBehavior []OrderedCacheBehaviorInitParameters `json:"orderedCacheBehavior,omitempty" tf:"ordered_cache_behavior,omitempty"` + + // One or more origins for this distribution (multiples allowed). + Origin []OriginInitParameters `json:"origin,omitempty" tf:"origin,omitempty"` + + // One or more origin_group for this distribution (multiples allowed). + OriginGroup []OriginGroupInitParameters `json:"originGroup,omitempty" tf:"origin_group,omitempty"` + + // Price class for this distribution. One of PriceClass_All, PriceClass_200, PriceClass_100. + PriceClass *string `json:"priceClass,omitempty" tf:"price_class,omitempty"` + + // The restriction configuration for this distribution (maximum one). + Restrictions *RestrictionsInitParameters `json:"restrictions,omitempty" tf:"restrictions,omitempty"` + + // If this is set, the distribution needs to be deleted manually afterwards. Default: false. + RetainOnDelete *bool `json:"retainOnDelete,omitempty" tf:"retain_on_delete,omitempty"` + + // A Boolean that indicates whether this is a staging distribution. Defaults to false. + Staging *bool `json:"staging,omitempty" tf:"staging,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The SSL configuration for this distribution (maximum one). + ViewerCertificate *ViewerCertificateInitParameters `json:"viewerCertificate,omitempty" tf:"viewer_certificate,omitempty"` + + // If enabled, the resource will wait for the distribution status to change from InProgress to Deployed. Setting this tofalse will skip the process. Default: true. + WaitForDeployment *bool `json:"waitForDeployment,omitempty" tf:"wait_for_deployment,omitempty"` + + // Unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution. To specify a web ACL created using the latest version of AWS WAF (WAFv2), use the ACL ARN, for example aws_wafv2_web_acl.example.arn. To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example aws_waf_web_acl.example.id. The WAF Web ACL must exist in the WAF Global (CloudFront) region and the credentials configuring this argument must have waf:GetWebACL permissions assigned. + WebACLID *string `json:"webAclId,omitempty" tf:"web_acl_id,omitempty"` +} + +type DistributionObservation struct { + + // Extra CNAMEs (alternate domain names), if any, for this distribution. + // +listType=set + Aliases []*string `json:"aliases,omitempty" tf:"aliases,omitempty"` + + // ARN for the distribution. For example: arn:aws:cloudfront::123456789012:distribution/EDFDVBD632BHDS5, where 123456789012 is your AWS account ID. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Internal value used by CloudFront to allow future updates to the distribution configuration. + CallerReference *string `json:"callerReference,omitempty" tf:"caller_reference,omitempty"` + + // Any comments you want to include about the distribution. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Identifier of a continuous deployment policy. This argument should only be set on a production distribution. See the aws_cloudfront_continuous_deployment_policy resource for additional details. + ContinuousDeploymentPolicyID *string `json:"continuousDeploymentPolicyId,omitempty" tf:"continuous_deployment_policy_id,omitempty"` + + // One or more custom error response elements (multiples allowed). + CustomErrorResponse []CustomErrorResponseObservation `json:"customErrorResponse,omitempty" tf:"custom_error_response,omitempty"` + + // Default cache behavior for this distribution (maximum one). Requires either cache_policy_id (preferred) or forwarded_values (deprecated) be set. + DefaultCacheBehavior *DefaultCacheBehaviorObservation `json:"defaultCacheBehavior,omitempty" tf:"default_cache_behavior,omitempty"` + + // Object that you want CloudFront to return (for example, index.html) when an end user requests the root URL. + DefaultRootObject *string `json:"defaultRootObject,omitempty" tf:"default_root_object,omitempty"` + + // DNS domain name of either the S3 bucket, or web site of your custom origin. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Whether the distribution is enabled to accept end user requests for content. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Current version of the distribution's information. For example: E2QWRUHAPOMQZL. + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // Maximum HTTP version to support on the distribution. Allowed values are http1.1, http2, http2and3 and http3. The default is http2. + HTTPVersion *string `json:"httpVersion,omitempty" tf:"http_version,omitempty"` + + // CloudFront Route 53 zone ID that can be used to route an Alias Resource Record Set to. This attribute is simply an alias for the zone ID Z2FDTNDATAQYW2. + HostedZoneID *string `json:"hostedZoneId,omitempty" tf:"hosted_zone_id,omitempty"` + + // Identifier for the distribution. For example: EDFDVBD632BHDS5. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Number of invalidation batches currently in progress. + InProgressValidationBatches *float64 `json:"inProgressValidationBatches,omitempty" tf:"in_progress_validation_batches,omitempty"` + + // Whether the IPv6 is enabled for the distribution. + IsIPv6Enabled *bool `json:"isIpv6Enabled,omitempty" tf:"is_ipv6_enabled,omitempty"` + + // Date and time the distribution was last modified. + LastModifiedTime *string `json:"lastModifiedTime,omitempty" tf:"last_modified_time,omitempty"` + + // The logging configuration that controls how logs are written to your distribution (maximum one). + LoggingConfig *LoggingConfigObservation `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // Ordered list of cache behaviors resource for this distribution. List from top to bottom in order of precedence. The topmost cache behavior will have precedence 0. + OrderedCacheBehavior []OrderedCacheBehaviorObservation `json:"orderedCacheBehavior,omitempty" tf:"ordered_cache_behavior,omitempty"` + + // One or more origins for this distribution (multiples allowed). + Origin []OriginObservation `json:"origin,omitempty" tf:"origin,omitempty"` + + // One or more origin_group for this distribution (multiples allowed). + OriginGroup []OriginGroupObservation `json:"originGroup,omitempty" tf:"origin_group,omitempty"` + + // Price class for this distribution. One of PriceClass_All, PriceClass_200, PriceClass_100. + PriceClass *string `json:"priceClass,omitempty" tf:"price_class,omitempty"` + + // The restriction configuration for this distribution (maximum one). + Restrictions *RestrictionsObservation `json:"restrictions,omitempty" tf:"restrictions,omitempty"` + + // If this is set, the distribution needs to be deleted manually afterwards. Default: false. + RetainOnDelete *bool `json:"retainOnDelete,omitempty" tf:"retain_on_delete,omitempty"` + + // A Boolean that indicates whether this is a staging distribution. Defaults to false. + Staging *bool `json:"staging,omitempty" tf:"staging,omitempty"` + + // Current status of the distribution. Deployed if the distribution's information is fully propagated throughout the Amazon CloudFront system. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // List of key group IDs that CloudFront can use to validate signed URLs or signed cookies. See the CloudFront User Guide for more information about this feature. + TrustedKeyGroups []TrustedKeyGroupsObservation `json:"trustedKeyGroups,omitempty" tf:"trusted_key_groups,omitempty"` + + // List of AWS account IDs (or self) that you want to allow to create signed URLs for private content. See the CloudFront User Guide for more information about this feature. + TrustedSigners []TrustedSignersObservation `json:"trustedSigners,omitempty" tf:"trusted_signers,omitempty"` + + // The SSL configuration for this distribution (maximum one). + ViewerCertificate *ViewerCertificateObservation `json:"viewerCertificate,omitempty" tf:"viewer_certificate,omitempty"` + + // If enabled, the resource will wait for the distribution status to change from InProgress to Deployed. Setting this tofalse will skip the process. Default: true. + WaitForDeployment *bool `json:"waitForDeployment,omitempty" tf:"wait_for_deployment,omitempty"` + + // Unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution. To specify a web ACL created using the latest version of AWS WAF (WAFv2), use the ACL ARN, for example aws_wafv2_web_acl.example.arn. To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example aws_waf_web_acl.example.id. The WAF Web ACL must exist in the WAF Global (CloudFront) region and the credentials configuring this argument must have waf:GetWebACL permissions assigned. + WebACLID *string `json:"webAclId,omitempty" tf:"web_acl_id,omitempty"` +} + +type DistributionParameters struct { + + // Extra CNAMEs (alternate domain names), if any, for this distribution. + // +kubebuilder:validation:Optional + // +listType=set + Aliases []*string `json:"aliases,omitempty" tf:"aliases,omitempty"` + + // Any comments you want to include about the distribution. + // +kubebuilder:validation:Optional + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Identifier of a continuous deployment policy. This argument should only be set on a production distribution. See the aws_cloudfront_continuous_deployment_policy resource for additional details. + // +kubebuilder:validation:Optional + ContinuousDeploymentPolicyID *string `json:"continuousDeploymentPolicyId,omitempty" tf:"continuous_deployment_policy_id,omitempty"` + + // One or more custom error response elements (multiples allowed). + // +kubebuilder:validation:Optional + CustomErrorResponse []CustomErrorResponseParameters `json:"customErrorResponse,omitempty" tf:"custom_error_response,omitempty"` + + // Default cache behavior for this distribution (maximum one). Requires either cache_policy_id (preferred) or forwarded_values (deprecated) be set. + // +kubebuilder:validation:Optional + DefaultCacheBehavior *DefaultCacheBehaviorParameters `json:"defaultCacheBehavior,omitempty" tf:"default_cache_behavior,omitempty"` + + // Object that you want CloudFront to return (for example, index.html) when an end user requests the root URL. + // +kubebuilder:validation:Optional + DefaultRootObject *string `json:"defaultRootObject,omitempty" tf:"default_root_object,omitempty"` + + // Whether the distribution is enabled to accept end user requests for content. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Maximum HTTP version to support on the distribution. Allowed values are http1.1, http2, http2and3 and http3. The default is http2. + // +kubebuilder:validation:Optional + HTTPVersion *string `json:"httpVersion,omitempty" tf:"http_version,omitempty"` + + // Whether the IPv6 is enabled for the distribution. + // +kubebuilder:validation:Optional + IsIPv6Enabled *bool `json:"isIpv6Enabled,omitempty" tf:"is_ipv6_enabled,omitempty"` + + // The logging configuration that controls how logs are written to your distribution (maximum one). + // +kubebuilder:validation:Optional + LoggingConfig *LoggingConfigParameters `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // Ordered list of cache behaviors resource for this distribution. List from top to bottom in order of precedence. The topmost cache behavior will have precedence 0. + // +kubebuilder:validation:Optional + OrderedCacheBehavior []OrderedCacheBehaviorParameters `json:"orderedCacheBehavior,omitempty" tf:"ordered_cache_behavior,omitempty"` + + // One or more origins for this distribution (multiples allowed). + // +kubebuilder:validation:Optional + Origin []OriginParameters `json:"origin,omitempty" tf:"origin,omitempty"` + + // One or more origin_group for this distribution (multiples allowed). + // +kubebuilder:validation:Optional + OriginGroup []OriginGroupParameters `json:"originGroup,omitempty" tf:"origin_group,omitempty"` + + // Price class for this distribution. One of PriceClass_All, PriceClass_200, PriceClass_100. + // +kubebuilder:validation:Optional + PriceClass *string `json:"priceClass,omitempty" tf:"price_class,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The restriction configuration for this distribution (maximum one). + // +kubebuilder:validation:Optional + Restrictions *RestrictionsParameters `json:"restrictions,omitempty" tf:"restrictions,omitempty"` + + // If this is set, the distribution needs to be deleted manually afterwards. Default: false. + // +kubebuilder:validation:Optional + RetainOnDelete *bool `json:"retainOnDelete,omitempty" tf:"retain_on_delete,omitempty"` + + // A Boolean that indicates whether this is a staging distribution. Defaults to false. + // +kubebuilder:validation:Optional + Staging *bool `json:"staging,omitempty" tf:"staging,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The SSL configuration for this distribution (maximum one). + // +kubebuilder:validation:Optional + ViewerCertificate *ViewerCertificateParameters `json:"viewerCertificate,omitempty" tf:"viewer_certificate,omitempty"` + + // If enabled, the resource will wait for the distribution status to change from InProgress to Deployed. Setting this tofalse will skip the process. Default: true. + // +kubebuilder:validation:Optional + WaitForDeployment *bool `json:"waitForDeployment,omitempty" tf:"wait_for_deployment,omitempty"` + + // Unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution. To specify a web ACL created using the latest version of AWS WAF (WAFv2), use the ACL ARN, for example aws_wafv2_web_acl.example.arn. To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example aws_waf_web_acl.example.id. The WAF Web ACL must exist in the WAF Global (CloudFront) region and the credentials configuring this argument must have waf:GetWebACL permissions assigned. + // +kubebuilder:validation:Optional + WebACLID *string `json:"webAclId,omitempty" tf:"web_acl_id,omitempty"` +} + +type FailoverCriteriaInitParameters struct { + + // List of HTTP status codes for the origin group. + // +listType=set + StatusCodes []*float64 `json:"statusCodes,omitempty" tf:"status_codes,omitempty"` +} + +type FailoverCriteriaObservation struct { + + // List of HTTP status codes for the origin group. + // +listType=set + StatusCodes []*float64 `json:"statusCodes,omitempty" tf:"status_codes,omitempty"` +} + +type FailoverCriteriaParameters struct { + + // List of HTTP status codes for the origin group. + // +kubebuilder:validation:Optional + // +listType=set + StatusCodes []*float64 `json:"statusCodes" tf:"status_codes,omitempty"` +} + +type ForwardedValuesCookiesInitParameters struct { + + // Whether you want CloudFront to forward cookies to the origin that is associated with this cache behavior. You can specify all, none or whitelist. If whitelist, you must include the subsequent whitelisted_names. + Forward *string `json:"forward,omitempty" tf:"forward,omitempty"` + + // If you have specified whitelist to forward, the whitelisted cookies that you want CloudFront to forward to your origin. + // +listType=set + WhitelistedNames []*string `json:"whitelistedNames,omitempty" tf:"whitelisted_names,omitempty"` +} + +type ForwardedValuesCookiesObservation struct { + + // Whether you want CloudFront to forward cookies to the origin that is associated with this cache behavior. You can specify all, none or whitelist. If whitelist, you must include the subsequent whitelisted_names. + Forward *string `json:"forward,omitempty" tf:"forward,omitempty"` + + // If you have specified whitelist to forward, the whitelisted cookies that you want CloudFront to forward to your origin. + // +listType=set + WhitelistedNames []*string `json:"whitelistedNames,omitempty" tf:"whitelisted_names,omitempty"` +} + +type ForwardedValuesCookiesParameters struct { + + // Whether you want CloudFront to forward cookies to the origin that is associated with this cache behavior. You can specify all, none or whitelist. If whitelist, you must include the subsequent whitelisted_names. + // +kubebuilder:validation:Optional + Forward *string `json:"forward" tf:"forward,omitempty"` + + // If you have specified whitelist to forward, the whitelisted cookies that you want CloudFront to forward to your origin. + // +kubebuilder:validation:Optional + // +listType=set + WhitelistedNames []*string `json:"whitelistedNames,omitempty" tf:"whitelisted_names,omitempty"` +} + +type ForwardedValuesInitParameters struct { + + // The forwarded values cookies that specifies how CloudFront handles cookies (maximum one). + Cookies *ForwardedValuesCookiesInitParameters `json:"cookies,omitempty" tf:"cookies,omitempty"` + + // Headers, if any, that you want CloudFront to vary upon for this cache behavior. Specify * to include all headers. + // +listType=set + Headers []*string `json:"headers,omitempty" tf:"headers,omitempty"` + + // Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior. + QueryString *bool `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // When specified, along with a value of true for query_string, all query strings are forwarded, however only the query string keys listed in this argument are cached. When omitted with a value of true for query_string, all query string keys are cached. + QueryStringCacheKeys []*string `json:"queryStringCacheKeys,omitempty" tf:"query_string_cache_keys,omitempty"` +} + +type ForwardedValuesObservation struct { + + // The forwarded values cookies that specifies how CloudFront handles cookies (maximum one). + Cookies *ForwardedValuesCookiesObservation `json:"cookies,omitempty" tf:"cookies,omitempty"` + + // Headers, if any, that you want CloudFront to vary upon for this cache behavior. Specify * to include all headers. + // +listType=set + Headers []*string `json:"headers,omitempty" tf:"headers,omitempty"` + + // Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior. + QueryString *bool `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // When specified, along with a value of true for query_string, all query strings are forwarded, however only the query string keys listed in this argument are cached. When omitted with a value of true for query_string, all query string keys are cached. + QueryStringCacheKeys []*string `json:"queryStringCacheKeys,omitempty" tf:"query_string_cache_keys,omitempty"` +} + +type ForwardedValuesParameters struct { + + // The forwarded values cookies that specifies how CloudFront handles cookies (maximum one). + // +kubebuilder:validation:Optional + Cookies *ForwardedValuesCookiesParameters `json:"cookies" tf:"cookies,omitempty"` + + // Headers, if any, that you want CloudFront to vary upon for this cache behavior. Specify * to include all headers. + // +kubebuilder:validation:Optional + // +listType=set + Headers []*string `json:"headers,omitempty" tf:"headers,omitempty"` + + // Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior. + // +kubebuilder:validation:Optional + QueryString *bool `json:"queryString" tf:"query_string,omitempty"` + + // When specified, along with a value of true for query_string, all query strings are forwarded, however only the query string keys listed in this argument are cached. When omitted with a value of true for query_string, all query string keys are cached. + // +kubebuilder:validation:Optional + QueryStringCacheKeys []*string `json:"queryStringCacheKeys,omitempty" tf:"query_string_cache_keys,omitempty"` +} + +type FunctionAssociationInitParameters struct { + + // Specific event to trigger this function. Valid values: viewer-request, origin-request, viewer-response, origin-response. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // ARN of the CloudFront function. + FunctionArn *string `json:"functionArn,omitempty" tf:"function_arn,omitempty"` +} + +type FunctionAssociationObservation struct { + + // Specific event to trigger this function. Valid values: viewer-request, origin-request, viewer-response, origin-response. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // ARN of the CloudFront function. + FunctionArn *string `json:"functionArn,omitempty" tf:"function_arn,omitempty"` +} + +type FunctionAssociationParameters struct { + + // Specific event to trigger this function. Valid values: viewer-request, origin-request, viewer-response, origin-response. + // +kubebuilder:validation:Optional + EventType *string `json:"eventType" tf:"event_type,omitempty"` + + // ARN of the CloudFront function. + // +kubebuilder:validation:Optional + FunctionArn *string `json:"functionArn" tf:"function_arn,omitempty"` +} + +type GeoRestrictionInitParameters struct { + + // ISO 3166-1-alpha-2 codes for which you want CloudFront either to distribute your content (whitelist) or not distribute your content (blacklist). If the type is specified as none an empty array can be used. + // +listType=set + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + + // Method that you want to use to restrict distribution of your content by country: none, whitelist, or blacklist. + RestrictionType *string `json:"restrictionType,omitempty" tf:"restriction_type,omitempty"` +} + +type GeoRestrictionObservation struct { + + // ISO 3166-1-alpha-2 codes for which you want CloudFront either to distribute your content (whitelist) or not distribute your content (blacklist). If the type is specified as none an empty array can be used. + // +listType=set + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + + // Method that you want to use to restrict distribution of your content by country: none, whitelist, or blacklist. + RestrictionType *string `json:"restrictionType,omitempty" tf:"restriction_type,omitempty"` +} + +type GeoRestrictionParameters struct { + + // ISO 3166-1-alpha-2 codes for which you want CloudFront either to distribute your content (whitelist) or not distribute your content (blacklist). If the type is specified as none an empty array can be used. + // +kubebuilder:validation:Optional + // +listType=set + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + + // Method that you want to use to restrict distribution of your content by country: none, whitelist, or blacklist. + // +kubebuilder:validation:Optional + RestrictionType *string `json:"restrictionType" tf:"restriction_type,omitempty"` +} + +type ItemsInitParameters struct { +} + +type ItemsObservation struct { + + // ID of the key group that contains the public keys. + KeyGroupID *string `json:"keyGroupId,omitempty" tf:"key_group_id,omitempty"` + + // Set of CloudFront key pair IDs. + // +listType=set + KeyPairIds []*string `json:"keyPairIds,omitempty" tf:"key_pair_ids,omitempty"` +} + +type ItemsParameters struct { +} + +type LambdaFunctionAssociationInitParameters struct { + + // Specific event to trigger this function. Valid values: viewer-request, origin-request, viewer-response, origin-response. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // When set to true it exposes the request body to the lambda function. Defaults to false. Valid values: true, false. + IncludeBody *bool `json:"includeBody,omitempty" tf:"include_body,omitempty"` + + // ARN of the Lambda function. + LambdaArn *string `json:"lambdaArn,omitempty" tf:"lambda_arn,omitempty"` +} + +type LambdaFunctionAssociationObservation struct { + + // Specific event to trigger this function. Valid values: viewer-request, origin-request, viewer-response, origin-response. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // When set to true it exposes the request body to the lambda function. Defaults to false. Valid values: true, false. + IncludeBody *bool `json:"includeBody,omitempty" tf:"include_body,omitempty"` + + // ARN of the Lambda function. + LambdaArn *string `json:"lambdaArn,omitempty" tf:"lambda_arn,omitempty"` +} + +type LambdaFunctionAssociationParameters struct { + + // Specific event to trigger this function. Valid values: viewer-request, origin-request, viewer-response, origin-response. + // +kubebuilder:validation:Optional + EventType *string `json:"eventType" tf:"event_type,omitempty"` + + // When set to true it exposes the request body to the lambda function. Defaults to false. Valid values: true, false. + // +kubebuilder:validation:Optional + IncludeBody *bool `json:"includeBody,omitempty" tf:"include_body,omitempty"` + + // ARN of the Lambda function. + // +kubebuilder:validation:Optional + LambdaArn *string `json:"lambdaArn" tf:"lambda_arn,omitempty"` +} + +type LoggingConfigInitParameters struct { + + // Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Whether to include cookies in access logs (default: false). + IncludeCookies *bool `json:"includeCookies,omitempty" tf:"include_cookies,omitempty"` + + // Prefix to the access log filenames for this distribution, for example, myprefix/. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type LoggingConfigObservation struct { + + // Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Whether to include cookies in access logs (default: false). + IncludeCookies *bool `json:"includeCookies,omitempty" tf:"include_cookies,omitempty"` + + // Prefix to the access log filenames for this distribution, for example, myprefix/. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type LoggingConfigParameters struct { + + // Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com. + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket" tf:"bucket,omitempty"` + + // Whether to include cookies in access logs (default: false). + // +kubebuilder:validation:Optional + IncludeCookies *bool `json:"includeCookies,omitempty" tf:"include_cookies,omitempty"` + + // Prefix to the access log filenames for this distribution, for example, myprefix/. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type MemberInitParameters struct { + + // Unique identifier for the origin. + OriginID *string `json:"originId,omitempty" tf:"origin_id,omitempty"` +} + +type MemberObservation struct { + + // Unique identifier for the origin. + OriginID *string `json:"originId,omitempty" tf:"origin_id,omitempty"` +} + +type MemberParameters struct { + + // Unique identifier for the origin. + // +kubebuilder:validation:Optional + OriginID *string `json:"originId" tf:"origin_id,omitempty"` +} + +type OrderedCacheBehaviorForwardedValuesCookiesInitParameters struct { + + // Whether you want CloudFront to forward cookies to the origin that is associated with this cache behavior. You can specify all, none or whitelist. If whitelist, you must include the subsequent whitelisted_names. + Forward *string `json:"forward,omitempty" tf:"forward,omitempty"` + + // If you have specified whitelist to forward, the whitelisted cookies that you want CloudFront to forward to your origin. + // +listType=set + WhitelistedNames []*string `json:"whitelistedNames,omitempty" tf:"whitelisted_names,omitempty"` +} + +type OrderedCacheBehaviorForwardedValuesCookiesObservation struct { + + // Whether you want CloudFront to forward cookies to the origin that is associated with this cache behavior. You can specify all, none or whitelist. If whitelist, you must include the subsequent whitelisted_names. + Forward *string `json:"forward,omitempty" tf:"forward,omitempty"` + + // If you have specified whitelist to forward, the whitelisted cookies that you want CloudFront to forward to your origin. + // +listType=set + WhitelistedNames []*string `json:"whitelistedNames,omitempty" tf:"whitelisted_names,omitempty"` +} + +type OrderedCacheBehaviorForwardedValuesCookiesParameters struct { + + // Whether you want CloudFront to forward cookies to the origin that is associated with this cache behavior. You can specify all, none or whitelist. If whitelist, you must include the subsequent whitelisted_names. + // +kubebuilder:validation:Optional + Forward *string `json:"forward" tf:"forward,omitempty"` + + // If you have specified whitelist to forward, the whitelisted cookies that you want CloudFront to forward to your origin. + // +kubebuilder:validation:Optional + // +listType=set + WhitelistedNames []*string `json:"whitelistedNames,omitempty" tf:"whitelisted_names,omitempty"` +} + +type OrderedCacheBehaviorForwardedValuesInitParameters struct { + + // The forwarded values cookies that specifies how CloudFront handles cookies (maximum one). + Cookies *OrderedCacheBehaviorForwardedValuesCookiesInitParameters `json:"cookies,omitempty" tf:"cookies,omitempty"` + + // Headers, if any, that you want CloudFront to vary upon for this cache behavior. Specify * to include all headers. + // +listType=set + Headers []*string `json:"headers,omitempty" tf:"headers,omitempty"` + + // Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior. + QueryString *bool `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // When specified, along with a value of true for query_string, all query strings are forwarded, however only the query string keys listed in this argument are cached. When omitted with a value of true for query_string, all query string keys are cached. + QueryStringCacheKeys []*string `json:"queryStringCacheKeys,omitempty" tf:"query_string_cache_keys,omitempty"` +} + +type OrderedCacheBehaviorForwardedValuesObservation struct { + + // The forwarded values cookies that specifies how CloudFront handles cookies (maximum one). + Cookies *OrderedCacheBehaviorForwardedValuesCookiesObservation `json:"cookies,omitempty" tf:"cookies,omitempty"` + + // Headers, if any, that you want CloudFront to vary upon for this cache behavior. Specify * to include all headers. + // +listType=set + Headers []*string `json:"headers,omitempty" tf:"headers,omitempty"` + + // Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior. + QueryString *bool `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // When specified, along with a value of true for query_string, all query strings are forwarded, however only the query string keys listed in this argument are cached. When omitted with a value of true for query_string, all query string keys are cached. + QueryStringCacheKeys []*string `json:"queryStringCacheKeys,omitempty" tf:"query_string_cache_keys,omitempty"` +} + +type OrderedCacheBehaviorForwardedValuesParameters struct { + + // The forwarded values cookies that specifies how CloudFront handles cookies (maximum one). + // +kubebuilder:validation:Optional + Cookies *OrderedCacheBehaviorForwardedValuesCookiesParameters `json:"cookies" tf:"cookies,omitempty"` + + // Headers, if any, that you want CloudFront to vary upon for this cache behavior. Specify * to include all headers. + // +kubebuilder:validation:Optional + // +listType=set + Headers []*string `json:"headers,omitempty" tf:"headers,omitempty"` + + // Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior. + // +kubebuilder:validation:Optional + QueryString *bool `json:"queryString" tf:"query_string,omitempty"` + + // When specified, along with a value of true for query_string, all query strings are forwarded, however only the query string keys listed in this argument are cached. When omitted with a value of true for query_string, all query string keys are cached. + // +kubebuilder:validation:Optional + QueryStringCacheKeys []*string `json:"queryStringCacheKeys,omitempty" tf:"query_string_cache_keys,omitempty"` +} + +type OrderedCacheBehaviorFunctionAssociationInitParameters struct { + + // Specific event to trigger this function. Valid values: viewer-request, origin-request, viewer-response, origin-response. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // ARN of the CloudFront function. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudfront/v1beta1.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + FunctionArn *string `json:"functionArn,omitempty" tf:"function_arn,omitempty"` + + // Reference to a Function in cloudfront to populate functionArn. + // +kubebuilder:validation:Optional + FunctionArnRef *v1.Reference `json:"functionArnRef,omitempty" tf:"-"` + + // Selector for a Function in cloudfront to populate functionArn. + // +kubebuilder:validation:Optional + FunctionArnSelector *v1.Selector `json:"functionArnSelector,omitempty" tf:"-"` +} + +type OrderedCacheBehaviorFunctionAssociationObservation struct { + + // Specific event to trigger this function. Valid values: viewer-request, origin-request, viewer-response, origin-response. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // ARN of the CloudFront function. + FunctionArn *string `json:"functionArn,omitempty" tf:"function_arn,omitempty"` +} + +type OrderedCacheBehaviorFunctionAssociationParameters struct { + + // Specific event to trigger this function. Valid values: viewer-request, origin-request, viewer-response, origin-response. + // +kubebuilder:validation:Optional + EventType *string `json:"eventType" tf:"event_type,omitempty"` + + // ARN of the CloudFront function. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudfront/v1beta1.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + FunctionArn *string `json:"functionArn,omitempty" tf:"function_arn,omitempty"` + + // Reference to a Function in cloudfront to populate functionArn. + // +kubebuilder:validation:Optional + FunctionArnRef *v1.Reference `json:"functionArnRef,omitempty" tf:"-"` + + // Selector for a Function in cloudfront to populate functionArn. + // +kubebuilder:validation:Optional + FunctionArnSelector *v1.Selector `json:"functionArnSelector,omitempty" tf:"-"` +} + +type OrderedCacheBehaviorInitParameters struct { + + // Controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. + // +listType=set + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // Unique identifier of the cache policy that is attached to the cache behavior. If configuring the default_cache_behavior either cache_policy_id or forwarded_values must be set. + CachePolicyID *string `json:"cachePolicyId,omitempty" tf:"cache_policy_id,omitempty"` + + // Controls whether CloudFront caches the response to requests using the specified HTTP methods. + // +listType=set + CachedMethods []*string `json:"cachedMethods,omitempty" tf:"cached_methods,omitempty"` + + // Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header (default: false). + Compress *bool `json:"compress,omitempty" tf:"compress,omitempty"` + + // Default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request in the absence of an Cache-Control max-age or Expires header. + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // Field level encryption configuration ID. + FieldLevelEncryptionID *string `json:"fieldLevelEncryptionId,omitempty" tf:"field_level_encryption_id,omitempty"` + + // The forwarded values configuration that specifies how CloudFront handles query strings, cookies and headers (maximum one). + ForwardedValues *OrderedCacheBehaviorForwardedValuesInitParameters `json:"forwardedValues,omitempty" tf:"forwarded_values,omitempty"` + + // A config block that triggers a cloudfront function with specific actions (maximum 2). + FunctionAssociation []OrderedCacheBehaviorFunctionAssociationInitParameters `json:"functionAssociation,omitempty" tf:"function_association,omitempty"` + + // A config block that triggers a lambda function with specific actions (maximum 4). + LambdaFunctionAssociation []OrderedCacheBehaviorLambdaFunctionAssociationInitParameters `json:"lambdaFunctionAssociation,omitempty" tf:"lambda_function_association,omitempty"` + + // Maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. Only effective in the presence of Cache-Control max-age, Cache-Control s-maxage, and Expires headers. + MaxTTL *float64 `json:"maxTtl,omitempty" tf:"max_ttl,omitempty"` + + // Minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. Defaults to 0 seconds. + MinTTL *float64 `json:"minTtl,omitempty" tf:"min_ttl,omitempty"` + + // Unique identifier of the origin request policy that is attached to the behavior. + OriginRequestPolicyID *string `json:"originRequestPolicyId,omitempty" tf:"origin_request_policy_id,omitempty"` + + // Pattern (for example, images/*.jpg) that specifies which requests you want this cache behavior to apply to. + PathPattern *string `json:"pathPattern,omitempty" tf:"path_pattern,omitempty"` + + // ARN of the real-time log configuration that is attached to this cache behavior. + RealtimeLogConfigArn *string `json:"realtimeLogConfigArn,omitempty" tf:"realtime_log_config_arn,omitempty"` + + // Identifier for a response headers policy. + ResponseHeadersPolicyID *string `json:"responseHeadersPolicyId,omitempty" tf:"response_headers_policy_id,omitempty"` + + // Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` + + // Value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior. + TargetOriginID *string `json:"targetOriginId,omitempty" tf:"target_origin_id,omitempty"` + + // List of key group IDs that CloudFront can use to validate signed URLs or signed cookies. See the CloudFront User Guide for more information about this feature. + TrustedKeyGroups []*string `json:"trustedKeyGroups,omitempty" tf:"trusted_key_groups,omitempty"` + + // List of AWS account IDs (or self) that you want to allow to create signed URLs for private content. See the CloudFront User Guide for more information about this feature. + TrustedSigners []*string `json:"trustedSigners,omitempty" tf:"trusted_signers,omitempty"` + + // Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. One of allow-all, https-only, or redirect-to-https. + ViewerProtocolPolicy *string `json:"viewerProtocolPolicy,omitempty" tf:"viewer_protocol_policy,omitempty"` +} + +type OrderedCacheBehaviorLambdaFunctionAssociationInitParameters struct { + + // Specific event to trigger this function. Valid values: viewer-request, origin-request, viewer-response, origin-response. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // When set to true it exposes the request body to the lambda function. Defaults to false. Valid values: true, false. + IncludeBody *bool `json:"includeBody,omitempty" tf:"include_body,omitempty"` + + // ARN of the Lambda function. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("qualified_arn",true) + LambdaArn *string `json:"lambdaArn,omitempty" tf:"lambda_arn,omitempty"` + + // Reference to a Function in lambda to populate lambdaArn. + // +kubebuilder:validation:Optional + LambdaArnRef *v1.Reference `json:"lambdaArnRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate lambdaArn. + // +kubebuilder:validation:Optional + LambdaArnSelector *v1.Selector `json:"lambdaArnSelector,omitempty" tf:"-"` +} + +type OrderedCacheBehaviorLambdaFunctionAssociationObservation struct { + + // Specific event to trigger this function. Valid values: viewer-request, origin-request, viewer-response, origin-response. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // When set to true it exposes the request body to the lambda function. Defaults to false. Valid values: true, false. + IncludeBody *bool `json:"includeBody,omitempty" tf:"include_body,omitempty"` + + // ARN of the Lambda function. + LambdaArn *string `json:"lambdaArn,omitempty" tf:"lambda_arn,omitempty"` +} + +type OrderedCacheBehaviorLambdaFunctionAssociationParameters struct { + + // Specific event to trigger this function. Valid values: viewer-request, origin-request, viewer-response, origin-response. + // +kubebuilder:validation:Optional + EventType *string `json:"eventType" tf:"event_type,omitempty"` + + // When set to true it exposes the request body to the lambda function. Defaults to false. Valid values: true, false. + // +kubebuilder:validation:Optional + IncludeBody *bool `json:"includeBody,omitempty" tf:"include_body,omitempty"` + + // ARN of the Lambda function. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("qualified_arn",true) + // +kubebuilder:validation:Optional + LambdaArn *string `json:"lambdaArn,omitempty" tf:"lambda_arn,omitempty"` + + // Reference to a Function in lambda to populate lambdaArn. + // +kubebuilder:validation:Optional + LambdaArnRef *v1.Reference `json:"lambdaArnRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate lambdaArn. + // +kubebuilder:validation:Optional + LambdaArnSelector *v1.Selector `json:"lambdaArnSelector,omitempty" tf:"-"` +} + +type OrderedCacheBehaviorObservation struct { + + // Controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. + // +listType=set + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // Unique identifier of the cache policy that is attached to the cache behavior. If configuring the default_cache_behavior either cache_policy_id or forwarded_values must be set. + CachePolicyID *string `json:"cachePolicyId,omitempty" tf:"cache_policy_id,omitempty"` + + // Controls whether CloudFront caches the response to requests using the specified HTTP methods. + // +listType=set + CachedMethods []*string `json:"cachedMethods,omitempty" tf:"cached_methods,omitempty"` + + // Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header (default: false). + Compress *bool `json:"compress,omitempty" tf:"compress,omitempty"` + + // Default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request in the absence of an Cache-Control max-age or Expires header. + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // Field level encryption configuration ID. + FieldLevelEncryptionID *string `json:"fieldLevelEncryptionId,omitempty" tf:"field_level_encryption_id,omitempty"` + + // The forwarded values configuration that specifies how CloudFront handles query strings, cookies and headers (maximum one). + ForwardedValues *OrderedCacheBehaviorForwardedValuesObservation `json:"forwardedValues,omitempty" tf:"forwarded_values,omitempty"` + + // A config block that triggers a cloudfront function with specific actions (maximum 2). + FunctionAssociation []OrderedCacheBehaviorFunctionAssociationObservation `json:"functionAssociation,omitempty" tf:"function_association,omitempty"` + + // A config block that triggers a lambda function with specific actions (maximum 4). + LambdaFunctionAssociation []OrderedCacheBehaviorLambdaFunctionAssociationObservation `json:"lambdaFunctionAssociation,omitempty" tf:"lambda_function_association,omitempty"` + + // Maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. Only effective in the presence of Cache-Control max-age, Cache-Control s-maxage, and Expires headers. + MaxTTL *float64 `json:"maxTtl,omitempty" tf:"max_ttl,omitempty"` + + // Minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. Defaults to 0 seconds. + MinTTL *float64 `json:"minTtl,omitempty" tf:"min_ttl,omitempty"` + + // Unique identifier of the origin request policy that is attached to the behavior. + OriginRequestPolicyID *string `json:"originRequestPolicyId,omitempty" tf:"origin_request_policy_id,omitempty"` + + // Pattern (for example, images/*.jpg) that specifies which requests you want this cache behavior to apply to. + PathPattern *string `json:"pathPattern,omitempty" tf:"path_pattern,omitempty"` + + // ARN of the real-time log configuration that is attached to this cache behavior. + RealtimeLogConfigArn *string `json:"realtimeLogConfigArn,omitempty" tf:"realtime_log_config_arn,omitempty"` + + // Identifier for a response headers policy. + ResponseHeadersPolicyID *string `json:"responseHeadersPolicyId,omitempty" tf:"response_headers_policy_id,omitempty"` + + // Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` + + // Value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior. + TargetOriginID *string `json:"targetOriginId,omitempty" tf:"target_origin_id,omitempty"` + + // List of key group IDs that CloudFront can use to validate signed URLs or signed cookies. See the CloudFront User Guide for more information about this feature. + TrustedKeyGroups []*string `json:"trustedKeyGroups,omitempty" tf:"trusted_key_groups,omitempty"` + + // List of AWS account IDs (or self) that you want to allow to create signed URLs for private content. See the CloudFront User Guide for more information about this feature. + TrustedSigners []*string `json:"trustedSigners,omitempty" tf:"trusted_signers,omitempty"` + + // Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. One of allow-all, https-only, or redirect-to-https. + ViewerProtocolPolicy *string `json:"viewerProtocolPolicy,omitempty" tf:"viewer_protocol_policy,omitempty"` +} + +type OrderedCacheBehaviorParameters struct { + + // Controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. + // +kubebuilder:validation:Optional + // +listType=set + AllowedMethods []*string `json:"allowedMethods" tf:"allowed_methods,omitempty"` + + // Unique identifier of the cache policy that is attached to the cache behavior. If configuring the default_cache_behavior either cache_policy_id or forwarded_values must be set. + // +kubebuilder:validation:Optional + CachePolicyID *string `json:"cachePolicyId,omitempty" tf:"cache_policy_id,omitempty"` + + // Controls whether CloudFront caches the response to requests using the specified HTTP methods. + // +kubebuilder:validation:Optional + // +listType=set + CachedMethods []*string `json:"cachedMethods" tf:"cached_methods,omitempty"` + + // Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header (default: false). + // +kubebuilder:validation:Optional + Compress *bool `json:"compress,omitempty" tf:"compress,omitempty"` + + // Default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request in the absence of an Cache-Control max-age or Expires header. + // +kubebuilder:validation:Optional + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // Field level encryption configuration ID. + // +kubebuilder:validation:Optional + FieldLevelEncryptionID *string `json:"fieldLevelEncryptionId,omitempty" tf:"field_level_encryption_id,omitempty"` + + // The forwarded values configuration that specifies how CloudFront handles query strings, cookies and headers (maximum one). + // +kubebuilder:validation:Optional + ForwardedValues *OrderedCacheBehaviorForwardedValuesParameters `json:"forwardedValues,omitempty" tf:"forwarded_values,omitempty"` + + // A config block that triggers a cloudfront function with specific actions (maximum 2). + // +kubebuilder:validation:Optional + FunctionAssociation []OrderedCacheBehaviorFunctionAssociationParameters `json:"functionAssociation,omitempty" tf:"function_association,omitempty"` + + // A config block that triggers a lambda function with specific actions (maximum 4). + // +kubebuilder:validation:Optional + LambdaFunctionAssociation []OrderedCacheBehaviorLambdaFunctionAssociationParameters `json:"lambdaFunctionAssociation,omitempty" tf:"lambda_function_association,omitempty"` + + // Maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. Only effective in the presence of Cache-Control max-age, Cache-Control s-maxage, and Expires headers. + // +kubebuilder:validation:Optional + MaxTTL *float64 `json:"maxTtl,omitempty" tf:"max_ttl,omitempty"` + + // Minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. Defaults to 0 seconds. + // +kubebuilder:validation:Optional + MinTTL *float64 `json:"minTtl,omitempty" tf:"min_ttl,omitempty"` + + // Unique identifier of the origin request policy that is attached to the behavior. + // +kubebuilder:validation:Optional + OriginRequestPolicyID *string `json:"originRequestPolicyId,omitempty" tf:"origin_request_policy_id,omitempty"` + + // Pattern (for example, images/*.jpg) that specifies which requests you want this cache behavior to apply to. + // +kubebuilder:validation:Optional + PathPattern *string `json:"pathPattern" tf:"path_pattern,omitempty"` + + // ARN of the real-time log configuration that is attached to this cache behavior. + // +kubebuilder:validation:Optional + RealtimeLogConfigArn *string `json:"realtimeLogConfigArn,omitempty" tf:"realtime_log_config_arn,omitempty"` + + // Identifier for a response headers policy. + // +kubebuilder:validation:Optional + ResponseHeadersPolicyID *string `json:"responseHeadersPolicyId,omitempty" tf:"response_headers_policy_id,omitempty"` + + // Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. + // +kubebuilder:validation:Optional + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` + + // Value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior. + // +kubebuilder:validation:Optional + TargetOriginID *string `json:"targetOriginId" tf:"target_origin_id,omitempty"` + + // List of key group IDs that CloudFront can use to validate signed URLs or signed cookies. See the CloudFront User Guide for more information about this feature. + // +kubebuilder:validation:Optional + TrustedKeyGroups []*string `json:"trustedKeyGroups,omitempty" tf:"trusted_key_groups,omitempty"` + + // List of AWS account IDs (or self) that you want to allow to create signed URLs for private content. See the CloudFront User Guide for more information about this feature. + // +kubebuilder:validation:Optional + TrustedSigners []*string `json:"trustedSigners,omitempty" tf:"trusted_signers,omitempty"` + + // Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. One of allow-all, https-only, or redirect-to-https. + // +kubebuilder:validation:Optional + ViewerProtocolPolicy *string `json:"viewerProtocolPolicy" tf:"viewer_protocol_policy,omitempty"` +} + +type OriginGroupInitParameters struct { + + // The failover criteria for when to failover to the secondary origin. + FailoverCriteria *FailoverCriteriaInitParameters `json:"failoverCriteria,omitempty" tf:"failover_criteria,omitempty"` + + // Ordered member configuration blocks assigned to the origin group, where the first member is the primary origin. You must specify two members. + Member []MemberInitParameters `json:"member,omitempty" tf:"member,omitempty"` + + // Unique identifier for the origin. + OriginID *string `json:"originId,omitempty" tf:"origin_id,omitempty"` +} + +type OriginGroupObservation struct { + + // The failover criteria for when to failover to the secondary origin. + FailoverCriteria *FailoverCriteriaObservation `json:"failoverCriteria,omitempty" tf:"failover_criteria,omitempty"` + + // Ordered member configuration blocks assigned to the origin group, where the first member is the primary origin. You must specify two members. + Member []MemberObservation `json:"member,omitempty" tf:"member,omitempty"` + + // Unique identifier for the origin. + OriginID *string `json:"originId,omitempty" tf:"origin_id,omitempty"` +} + +type OriginGroupParameters struct { + + // The failover criteria for when to failover to the secondary origin. + // +kubebuilder:validation:Optional + FailoverCriteria *FailoverCriteriaParameters `json:"failoverCriteria" tf:"failover_criteria,omitempty"` + + // Ordered member configuration blocks assigned to the origin group, where the first member is the primary origin. You must specify two members. + // +kubebuilder:validation:Optional + Member []MemberParameters `json:"member" tf:"member,omitempty"` + + // Unique identifier for the origin. + // +kubebuilder:validation:Optional + OriginID *string `json:"originId" tf:"origin_id,omitempty"` +} + +type OriginInitParameters struct { + + // Number of times that CloudFront attempts to connect to the origin. Must be between 1-3. Defaults to 3. + ConnectionAttempts *float64 `json:"connectionAttempts,omitempty" tf:"connection_attempts,omitempty"` + + // Number of seconds that CloudFront waits when trying to establish a connection to the origin. Must be between 1-10. Defaults to 10. + ConnectionTimeout *float64 `json:"connectionTimeout,omitempty" tf:"connection_timeout,omitempty"` + + // One or more sub-resources with name and value parameters that specify header data that will be sent to the origin (multiples allowed). + CustomHeader []CustomHeaderInitParameters `json:"customHeader,omitempty" tf:"custom_header,omitempty"` + + // The CloudFront custom origin configuration information. If an S3 origin is required, use origin_access_control_id or s3_origin_config instead. + CustomOriginConfig *CustomOriginConfigInitParameters `json:"customOriginConfig,omitempty" tf:"custom_origin_config,omitempty"` + + // DNS domain name of either the S3 bucket, or web site of your custom origin. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Unique identifier of a CloudFront origin access control for this origin. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudfront/v1beta1.OriginAccessControl + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + OriginAccessControlID *string `json:"originAccessControlId,omitempty" tf:"origin_access_control_id,omitempty"` + + // Reference to a OriginAccessControl in cloudfront to populate originAccessControlId. + // +kubebuilder:validation:Optional + OriginAccessControlIDRef *v1.Reference `json:"originAccessControlIdRef,omitempty" tf:"-"` + + // Selector for a OriginAccessControl in cloudfront to populate originAccessControlId. + // +kubebuilder:validation:Optional + OriginAccessControlIDSelector *v1.Selector `json:"originAccessControlIdSelector,omitempty" tf:"-"` + + // Unique identifier for the origin. + OriginID *string `json:"originId,omitempty" tf:"origin_id,omitempty"` + + // Optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. + OriginPath *string `json:"originPath,omitempty" tf:"origin_path,omitempty"` + + // CloudFront Origin Shield configuration information. Using Origin Shield can help reduce the load on your origin. For more information, see Using Origin Shield in the Amazon CloudFront Developer Guide. + OriginShield *OriginShieldInitParameters `json:"originShield,omitempty" tf:"origin_shield,omitempty"` + + // CloudFront S3 origin configuration information. If a custom origin is required, use custom_origin_config instead. + S3OriginConfig *S3OriginConfigInitParameters `json:"s3OriginConfig,omitempty" tf:"s3_origin_config,omitempty"` +} + +type OriginObservation struct { + + // Number of times that CloudFront attempts to connect to the origin. Must be between 1-3. Defaults to 3. + ConnectionAttempts *float64 `json:"connectionAttempts,omitempty" tf:"connection_attempts,omitempty"` + + // Number of seconds that CloudFront waits when trying to establish a connection to the origin. Must be between 1-10. Defaults to 10. + ConnectionTimeout *float64 `json:"connectionTimeout,omitempty" tf:"connection_timeout,omitempty"` + + // One or more sub-resources with name and value parameters that specify header data that will be sent to the origin (multiples allowed). + CustomHeader []CustomHeaderObservation `json:"customHeader,omitempty" tf:"custom_header,omitempty"` + + // The CloudFront custom origin configuration information. If an S3 origin is required, use origin_access_control_id or s3_origin_config instead. + CustomOriginConfig *CustomOriginConfigObservation `json:"customOriginConfig,omitempty" tf:"custom_origin_config,omitempty"` + + // DNS domain name of either the S3 bucket, or web site of your custom origin. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Unique identifier of a CloudFront origin access control for this origin. + OriginAccessControlID *string `json:"originAccessControlId,omitempty" tf:"origin_access_control_id,omitempty"` + + // Unique identifier for the origin. + OriginID *string `json:"originId,omitempty" tf:"origin_id,omitempty"` + + // Optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. + OriginPath *string `json:"originPath,omitempty" tf:"origin_path,omitempty"` + + // CloudFront Origin Shield configuration information. Using Origin Shield can help reduce the load on your origin. For more information, see Using Origin Shield in the Amazon CloudFront Developer Guide. + OriginShield *OriginShieldObservation `json:"originShield,omitempty" tf:"origin_shield,omitempty"` + + // CloudFront S3 origin configuration information. If a custom origin is required, use custom_origin_config instead. + S3OriginConfig *S3OriginConfigObservation `json:"s3OriginConfig,omitempty" tf:"s3_origin_config,omitempty"` +} + +type OriginParameters struct { + + // Number of times that CloudFront attempts to connect to the origin. Must be between 1-3. Defaults to 3. + // +kubebuilder:validation:Optional + ConnectionAttempts *float64 `json:"connectionAttempts,omitempty" tf:"connection_attempts,omitempty"` + + // Number of seconds that CloudFront waits when trying to establish a connection to the origin. Must be between 1-10. Defaults to 10. + // +kubebuilder:validation:Optional + ConnectionTimeout *float64 `json:"connectionTimeout,omitempty" tf:"connection_timeout,omitempty"` + + // One or more sub-resources with name and value parameters that specify header data that will be sent to the origin (multiples allowed). + // +kubebuilder:validation:Optional + CustomHeader []CustomHeaderParameters `json:"customHeader,omitempty" tf:"custom_header,omitempty"` + + // The CloudFront custom origin configuration information. If an S3 origin is required, use origin_access_control_id or s3_origin_config instead. + // +kubebuilder:validation:Optional + CustomOriginConfig *CustomOriginConfigParameters `json:"customOriginConfig,omitempty" tf:"custom_origin_config,omitempty"` + + // DNS domain name of either the S3 bucket, or web site of your custom origin. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName" tf:"domain_name,omitempty"` + + // Unique identifier of a CloudFront origin access control for this origin. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudfront/v1beta1.OriginAccessControl + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + OriginAccessControlID *string `json:"originAccessControlId,omitempty" tf:"origin_access_control_id,omitempty"` + + // Reference to a OriginAccessControl in cloudfront to populate originAccessControlId. + // +kubebuilder:validation:Optional + OriginAccessControlIDRef *v1.Reference `json:"originAccessControlIdRef,omitempty" tf:"-"` + + // Selector for a OriginAccessControl in cloudfront to populate originAccessControlId. + // +kubebuilder:validation:Optional + OriginAccessControlIDSelector *v1.Selector `json:"originAccessControlIdSelector,omitempty" tf:"-"` + + // Unique identifier for the origin. + // +kubebuilder:validation:Optional + OriginID *string `json:"originId" tf:"origin_id,omitempty"` + + // Optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. + // +kubebuilder:validation:Optional + OriginPath *string `json:"originPath,omitempty" tf:"origin_path,omitempty"` + + // CloudFront Origin Shield configuration information. Using Origin Shield can help reduce the load on your origin. For more information, see Using Origin Shield in the Amazon CloudFront Developer Guide. + // +kubebuilder:validation:Optional + OriginShield *OriginShieldParameters `json:"originShield,omitempty" tf:"origin_shield,omitempty"` + + // CloudFront S3 origin configuration information. If a custom origin is required, use custom_origin_config instead. + // +kubebuilder:validation:Optional + S3OriginConfig *S3OriginConfigParameters `json:"s3OriginConfig,omitempty" tf:"s3_origin_config,omitempty"` +} + +type OriginShieldInitParameters struct { + + // Whether the distribution is enabled to accept end user requests for content. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // AWS Region for Origin Shield. To specify a region, use the region code, not the region name. For example, specify the US East (Ohio) region as us-east-2. + OriginShieldRegion *string `json:"originShieldRegion,omitempty" tf:"origin_shield_region,omitempty"` +} + +type OriginShieldObservation struct { + + // Whether the distribution is enabled to accept end user requests for content. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // AWS Region for Origin Shield. To specify a region, use the region code, not the region name. For example, specify the US East (Ohio) region as us-east-2. + OriginShieldRegion *string `json:"originShieldRegion,omitempty" tf:"origin_shield_region,omitempty"` +} + +type OriginShieldParameters struct { + + // Whether the distribution is enabled to accept end user requests for content. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // AWS Region for Origin Shield. To specify a region, use the region code, not the region name. For example, specify the US East (Ohio) region as us-east-2. + // +kubebuilder:validation:Optional + OriginShieldRegion *string `json:"originShieldRegion,omitempty" tf:"origin_shield_region,omitempty"` +} + +type RestrictionsInitParameters struct { + GeoRestriction *GeoRestrictionInitParameters `json:"geoRestriction,omitempty" tf:"geo_restriction,omitempty"` +} + +type RestrictionsObservation struct { + GeoRestriction *GeoRestrictionObservation `json:"geoRestriction,omitempty" tf:"geo_restriction,omitempty"` +} + +type RestrictionsParameters struct { + + // +kubebuilder:validation:Optional + GeoRestriction *GeoRestrictionParameters `json:"geoRestriction" tf:"geo_restriction,omitempty"` +} + +type S3OriginConfigInitParameters struct { + + // The CloudFront origin access identity to associate with the origin. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudfront/v1beta1.OriginAccessIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("cloudfront_access_identity_path",true) + OriginAccessIdentity *string `json:"originAccessIdentity,omitempty" tf:"origin_access_identity,omitempty"` + + // Reference to a OriginAccessIdentity in cloudfront to populate originAccessIdentity. + // +kubebuilder:validation:Optional + OriginAccessIdentityRef *v1.Reference `json:"originAccessIdentityRef,omitempty" tf:"-"` + + // Selector for a OriginAccessIdentity in cloudfront to populate originAccessIdentity. + // +kubebuilder:validation:Optional + OriginAccessIdentitySelector *v1.Selector `json:"originAccessIdentitySelector,omitempty" tf:"-"` +} + +type S3OriginConfigObservation struct { + + // The CloudFront origin access identity to associate with the origin. + OriginAccessIdentity *string `json:"originAccessIdentity,omitempty" tf:"origin_access_identity,omitempty"` +} + +type S3OriginConfigParameters struct { + + // The CloudFront origin access identity to associate with the origin. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudfront/v1beta1.OriginAccessIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("cloudfront_access_identity_path",true) + // +kubebuilder:validation:Optional + OriginAccessIdentity *string `json:"originAccessIdentity,omitempty" tf:"origin_access_identity,omitempty"` + + // Reference to a OriginAccessIdentity in cloudfront to populate originAccessIdentity. + // +kubebuilder:validation:Optional + OriginAccessIdentityRef *v1.Reference `json:"originAccessIdentityRef,omitempty" tf:"-"` + + // Selector for a OriginAccessIdentity in cloudfront to populate originAccessIdentity. + // +kubebuilder:validation:Optional + OriginAccessIdentitySelector *v1.Selector `json:"originAccessIdentitySelector,omitempty" tf:"-"` +} + +type TrustedKeyGroupsInitParameters struct { +} + +type TrustedKeyGroupsObservation struct { + + // Whether the distribution is enabled to accept end user requests for content. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // List of nested attributes for each key group. + Items []ItemsObservation `json:"items,omitempty" tf:"items,omitempty"` +} + +type TrustedKeyGroupsParameters struct { +} + +type TrustedSignersInitParameters struct { +} + +type TrustedSignersItemsInitParameters struct { +} + +type TrustedSignersItemsObservation struct { + + // AWS account ID or self + AwsAccountNumber *string `json:"awsAccountNumber,omitempty" tf:"aws_account_number,omitempty"` + + // Set of CloudFront key pair IDs. + // +listType=set + KeyPairIds []*string `json:"keyPairIds,omitempty" tf:"key_pair_ids,omitempty"` +} + +type TrustedSignersItemsParameters struct { +} + +type TrustedSignersObservation struct { + + // Whether the distribution is enabled to accept end user requests for content. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // List of nested attributes for each key group. + Items []TrustedSignersItemsObservation `json:"items,omitempty" tf:"items,omitempty"` +} + +type TrustedSignersParameters struct { +} + +type ViewerCertificateInitParameters struct { + + // ARN of the AWS Certificate Manager certificate that you wish to use with this distribution. Specify this, cloudfront_default_certificate, or iam_certificate_id. The ACM certificate must be in US-EAST-1. + AcmCertificateArn *string `json:"acmCertificateArn,omitempty" tf:"acm_certificate_arn,omitempty"` + + // true if you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name for your distribution. Specify this, acm_certificate_arn, or iam_certificate_id. + CloudfrontDefaultCertificate *bool `json:"cloudfrontDefaultCertificate,omitempty" tf:"cloudfront_default_certificate,omitempty"` + + // IAM certificate identifier of the custom viewer certificate for this distribution if you are using a custom domain. Specify this, acm_certificate_arn, or cloudfront_default_certificate. + IAMCertificateID *string `json:"iamCertificateId,omitempty" tf:"iam_certificate_id,omitempty"` + + // Minimum version of the SSL protocol that you want CloudFront to use for HTTPS connections. Can only be set if cloudfront_default_certificate = false. See all possible values in this table under "Security policy." Some examples include: TLSv1.2_2019 and TLSv1.2_2021. Default: TLSv1. NOTE: If you are using a custom certificate (specified with acm_certificate_arn or iam_certificate_id), and have specified sni-only in ssl_support_method, TLSv1 or later must be specified. If you have specified vip in ssl_support_method, only SSLv3 or TLSv1 can be specified. If you have specified cloudfront_default_certificate, TLSv1 must be specified. + MinimumProtocolVersion *string `json:"minimumProtocolVersion,omitempty" tf:"minimum_protocol_version,omitempty"` + + // How you want CloudFront to serve HTTPS requests. One of vip, sni-only, or static-ip. Required if you specify acm_certificate_arn or iam_certificate_id. NOTE: vip causes CloudFront to use a dedicated IP address and may incur extra charges. + SSLSupportMethod *string `json:"sslSupportMethod,omitempty" tf:"ssl_support_method,omitempty"` +} + +type ViewerCertificateObservation struct { + + // ARN of the AWS Certificate Manager certificate that you wish to use with this distribution. Specify this, cloudfront_default_certificate, or iam_certificate_id. The ACM certificate must be in US-EAST-1. + AcmCertificateArn *string `json:"acmCertificateArn,omitempty" tf:"acm_certificate_arn,omitempty"` + + // true if you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name for your distribution. Specify this, acm_certificate_arn, or iam_certificate_id. + CloudfrontDefaultCertificate *bool `json:"cloudfrontDefaultCertificate,omitempty" tf:"cloudfront_default_certificate,omitempty"` + + // IAM certificate identifier of the custom viewer certificate for this distribution if you are using a custom domain. Specify this, acm_certificate_arn, or cloudfront_default_certificate. + IAMCertificateID *string `json:"iamCertificateId,omitempty" tf:"iam_certificate_id,omitempty"` + + // Minimum version of the SSL protocol that you want CloudFront to use for HTTPS connections. Can only be set if cloudfront_default_certificate = false. See all possible values in this table under "Security policy." Some examples include: TLSv1.2_2019 and TLSv1.2_2021. Default: TLSv1. NOTE: If you are using a custom certificate (specified with acm_certificate_arn or iam_certificate_id), and have specified sni-only in ssl_support_method, TLSv1 or later must be specified. If you have specified vip in ssl_support_method, only SSLv3 or TLSv1 can be specified. If you have specified cloudfront_default_certificate, TLSv1 must be specified. + MinimumProtocolVersion *string `json:"minimumProtocolVersion,omitempty" tf:"minimum_protocol_version,omitempty"` + + // How you want CloudFront to serve HTTPS requests. One of vip, sni-only, or static-ip. Required if you specify acm_certificate_arn or iam_certificate_id. NOTE: vip causes CloudFront to use a dedicated IP address and may incur extra charges. + SSLSupportMethod *string `json:"sslSupportMethod,omitempty" tf:"ssl_support_method,omitempty"` +} + +type ViewerCertificateParameters struct { + + // ARN of the AWS Certificate Manager certificate that you wish to use with this distribution. Specify this, cloudfront_default_certificate, or iam_certificate_id. The ACM certificate must be in US-EAST-1. + // +kubebuilder:validation:Optional + AcmCertificateArn *string `json:"acmCertificateArn,omitempty" tf:"acm_certificate_arn,omitempty"` + + // true if you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name for your distribution. Specify this, acm_certificate_arn, or iam_certificate_id. + // +kubebuilder:validation:Optional + CloudfrontDefaultCertificate *bool `json:"cloudfrontDefaultCertificate,omitempty" tf:"cloudfront_default_certificate,omitempty"` + + // IAM certificate identifier of the custom viewer certificate for this distribution if you are using a custom domain. Specify this, acm_certificate_arn, or cloudfront_default_certificate. + // +kubebuilder:validation:Optional + IAMCertificateID *string `json:"iamCertificateId,omitempty" tf:"iam_certificate_id,omitempty"` + + // Minimum version of the SSL protocol that you want CloudFront to use for HTTPS connections. Can only be set if cloudfront_default_certificate = false. See all possible values in this table under "Security policy." Some examples include: TLSv1.2_2019 and TLSv1.2_2021. Default: TLSv1. NOTE: If you are using a custom certificate (specified with acm_certificate_arn or iam_certificate_id), and have specified sni-only in ssl_support_method, TLSv1 or later must be specified. If you have specified vip in ssl_support_method, only SSLv3 or TLSv1 can be specified. If you have specified cloudfront_default_certificate, TLSv1 must be specified. + // +kubebuilder:validation:Optional + MinimumProtocolVersion *string `json:"minimumProtocolVersion,omitempty" tf:"minimum_protocol_version,omitempty"` + + // How you want CloudFront to serve HTTPS requests. One of vip, sni-only, or static-ip. Required if you specify acm_certificate_arn or iam_certificate_id. NOTE: vip causes CloudFront to use a dedicated IP address and may incur extra charges. + // +kubebuilder:validation:Optional + SSLSupportMethod *string `json:"sslSupportMethod,omitempty" tf:"ssl_support_method,omitempty"` +} + +// DistributionSpec defines the desired state of Distribution +type DistributionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DistributionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DistributionInitParameters `json:"initProvider,omitempty"` +} + +// DistributionStatus defines the observed state of Distribution. +type DistributionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DistributionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Distribution is the Schema for the Distributions API. Provides a CloudFront web distribution resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Distribution struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.defaultCacheBehavior) || (has(self.initProvider) && has(self.initProvider.defaultCacheBehavior))",message="spec.forProvider.defaultCacheBehavior is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.enabled) || (has(self.initProvider) && has(self.initProvider.enabled))",message="spec.forProvider.enabled is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.origin) || (has(self.initProvider) && has(self.initProvider.origin))",message="spec.forProvider.origin is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.restrictions) || (has(self.initProvider) && has(self.initProvider.restrictions))",message="spec.forProvider.restrictions is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.viewerCertificate) || (has(self.initProvider) && has(self.initProvider.viewerCertificate))",message="spec.forProvider.viewerCertificate is a required parameter" + Spec DistributionSpec `json:"spec"` + Status DistributionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DistributionList contains a list of Distributions +type DistributionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Distribution `json:"items"` +} + +// Repository type metadata. +var ( + Distribution_Kind = "Distribution" + Distribution_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Distribution_Kind}.String() + Distribution_KindAPIVersion = Distribution_Kind + "." + CRDGroupVersion.String() + Distribution_GroupVersionKind = CRDGroupVersion.WithKind(Distribution_Kind) +) + +func init() { + SchemeBuilder.Register(&Distribution{}, &DistributionList{}) +} diff --git a/apis/cloudfront/v1beta2/zz_fieldlevelencryptionconfig_terraformed.go b/apis/cloudfront/v1beta2/zz_fieldlevelencryptionconfig_terraformed.go new file mode 100755 index 0000000000..818a7e69e5 --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_fieldlevelencryptionconfig_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FieldLevelEncryptionConfig +func (mg *FieldLevelEncryptionConfig) GetTerraformResourceType() string { + return "aws_cloudfront_field_level_encryption_config" +} + +// GetConnectionDetailsMapping for this FieldLevelEncryptionConfig +func (tr *FieldLevelEncryptionConfig) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FieldLevelEncryptionConfig +func (tr *FieldLevelEncryptionConfig) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FieldLevelEncryptionConfig +func (tr *FieldLevelEncryptionConfig) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FieldLevelEncryptionConfig +func (tr *FieldLevelEncryptionConfig) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FieldLevelEncryptionConfig +func (tr *FieldLevelEncryptionConfig) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FieldLevelEncryptionConfig +func (tr *FieldLevelEncryptionConfig) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FieldLevelEncryptionConfig +func (tr *FieldLevelEncryptionConfig) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FieldLevelEncryptionConfig +func (tr *FieldLevelEncryptionConfig) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FieldLevelEncryptionConfig using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FieldLevelEncryptionConfig) LateInitialize(attrs []byte) (bool, error) { + params := &FieldLevelEncryptionConfigParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FieldLevelEncryptionConfig) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cloudfront/v1beta2/zz_fieldlevelencryptionconfig_types.go b/apis/cloudfront/v1beta2/zz_fieldlevelencryptionconfig_types.go new file mode 100755 index 0000000000..567e1cc7f9 --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_fieldlevelencryptionconfig_types.go @@ -0,0 +1,303 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ContentTypeProfileConfigInitParameters struct { + + // Object that contains an attribute items that contains the list of configurations for a field-level encryption content type-profile. See Content Type Profile. + ContentTypeProfiles *ContentTypeProfilesInitParameters `json:"contentTypeProfiles,omitempty" tf:"content_type_profiles,omitempty"` + + // specifies what to do when an unknown content type is provided for the profile. If true, content is forwarded without being encrypted when the content type is unknown. If false (the default), an error is returned when the content type is unknown. + ForwardWhenContentTypeIsUnknown *bool `json:"forwardWhenContentTypeIsUnknown,omitempty" tf:"forward_when_content_type_is_unknown,omitempty"` +} + +type ContentTypeProfileConfigObservation struct { + + // Object that contains an attribute items that contains the list of configurations for a field-level encryption content type-profile. See Content Type Profile. + ContentTypeProfiles *ContentTypeProfilesObservation `json:"contentTypeProfiles,omitempty" tf:"content_type_profiles,omitempty"` + + // specifies what to do when an unknown content type is provided for the profile. If true, content is forwarded without being encrypted when the content type is unknown. If false (the default), an error is returned when the content type is unknown. + ForwardWhenContentTypeIsUnknown *bool `json:"forwardWhenContentTypeIsUnknown,omitempty" tf:"forward_when_content_type_is_unknown,omitempty"` +} + +type ContentTypeProfileConfigParameters struct { + + // Object that contains an attribute items that contains the list of configurations for a field-level encryption content type-profile. See Content Type Profile. + // +kubebuilder:validation:Optional + ContentTypeProfiles *ContentTypeProfilesParameters `json:"contentTypeProfiles" tf:"content_type_profiles,omitempty"` + + // specifies what to do when an unknown content type is provided for the profile. If true, content is forwarded without being encrypted when the content type is unknown. If false (the default), an error is returned when the content type is unknown. + // +kubebuilder:validation:Optional + ForwardWhenContentTypeIsUnknown *bool `json:"forwardWhenContentTypeIsUnknown" tf:"forward_when_content_type_is_unknown,omitempty"` +} + +type ContentTypeProfilesInitParameters struct { + Items []ContentTypeProfilesItemsInitParameters `json:"items,omitempty" tf:"items,omitempty"` +} + +type ContentTypeProfilesItemsInitParameters struct { + + // he content type for a field-level encryption content type-profile mapping. Valid value is application/x-www-form-urlencoded. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // The format for a field-level encryption content type-profile mapping. Valid value is URLEncoded. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The profile ID for a field-level encryption content type-profile mapping. + ProfileID *string `json:"profileId,omitempty" tf:"profile_id,omitempty"` +} + +type ContentTypeProfilesItemsObservation struct { + + // he content type for a field-level encryption content type-profile mapping. Valid value is application/x-www-form-urlencoded. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // The format for a field-level encryption content type-profile mapping. Valid value is URLEncoded. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The profile ID for a field-level encryption content type-profile mapping. + ProfileID *string `json:"profileId,omitempty" tf:"profile_id,omitempty"` +} + +type ContentTypeProfilesItemsParameters struct { + + // he content type for a field-level encryption content type-profile mapping. Valid value is application/x-www-form-urlencoded. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType" tf:"content_type,omitempty"` + + // The format for a field-level encryption content type-profile mapping. Valid value is URLEncoded. + // +kubebuilder:validation:Optional + Format *string `json:"format" tf:"format,omitempty"` + + // The profile ID for a field-level encryption content type-profile mapping. + // +kubebuilder:validation:Optional + ProfileID *string `json:"profileId,omitempty" tf:"profile_id,omitempty"` +} + +type ContentTypeProfilesObservation struct { + Items []ContentTypeProfilesItemsObservation `json:"items,omitempty" tf:"items,omitempty"` +} + +type ContentTypeProfilesParameters struct { + + // +kubebuilder:validation:Optional + Items []ContentTypeProfilesItemsParameters `json:"items" tf:"items,omitempty"` +} + +type FieldLevelEncryptionConfigInitParameters struct { + + // An optional comment about the Field Level Encryption Config. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Content Type Profile Config specifies when to forward content if a content type isn't recognized and profiles to use as by default in a request if a query argument doesn't specify a profile to use. + ContentTypeProfileConfig *ContentTypeProfileConfigInitParameters `json:"contentTypeProfileConfig,omitempty" tf:"content_type_profile_config,omitempty"` + + // Query Arg Profile Config that specifies when to forward content if a profile isn't found and the profile that can be provided as a query argument in a request. + QueryArgProfileConfig *QueryArgProfileConfigInitParameters `json:"queryArgProfileConfig,omitempty" tf:"query_arg_profile_config,omitempty"` +} + +type FieldLevelEncryptionConfigObservation struct { + + // Internal value used by CloudFront to allow future updates to the Field Level Encryption Config. + CallerReference *string `json:"callerReference,omitempty" tf:"caller_reference,omitempty"` + + // An optional comment about the Field Level Encryption Config. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Content Type Profile Config specifies when to forward content if a content type isn't recognized and profiles to use as by default in a request if a query argument doesn't specify a profile to use. + ContentTypeProfileConfig *ContentTypeProfileConfigObservation `json:"contentTypeProfileConfig,omitempty" tf:"content_type_profile_config,omitempty"` + + // The current version of the Field Level Encryption Config. For example: E2QWRUHAPOMQZL. + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // The identifier for the Field Level Encryption Config. For example: K3D5EWEUDCCXON. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Query Arg Profile Config that specifies when to forward content if a profile isn't found and the profile that can be provided as a query argument in a request. + QueryArgProfileConfig *QueryArgProfileConfigObservation `json:"queryArgProfileConfig,omitempty" tf:"query_arg_profile_config,omitempty"` +} + +type FieldLevelEncryptionConfigParameters struct { + + // An optional comment about the Field Level Encryption Config. + // +kubebuilder:validation:Optional + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Content Type Profile Config specifies when to forward content if a content type isn't recognized and profiles to use as by default in a request if a query argument doesn't specify a profile to use. + // +kubebuilder:validation:Optional + ContentTypeProfileConfig *ContentTypeProfileConfigParameters `json:"contentTypeProfileConfig,omitempty" tf:"content_type_profile_config,omitempty"` + + // Query Arg Profile Config that specifies when to forward content if a profile isn't found and the profile that can be provided as a query argument in a request. + // +kubebuilder:validation:Optional + QueryArgProfileConfig *QueryArgProfileConfigParameters `json:"queryArgProfileConfig,omitempty" tf:"query_arg_profile_config,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type QueryArgProfileConfigInitParameters struct { + + // Flag to set if you want a request to be forwarded to the origin even if the profile specified by the field-level encryption query argument, fle-profile, is unknown. + ForwardWhenQueryArgProfileIsUnknown *bool `json:"forwardWhenQueryArgProfileIsUnknown,omitempty" tf:"forward_when_query_arg_profile_is_unknown,omitempty"` + + // Object that contains an attribute items that contains the list ofrofiles specified for query argument-profile mapping for field-level encryption. see Query Arg Profile. + QueryArgProfiles *QueryArgProfilesInitParameters `json:"queryArgProfiles,omitempty" tf:"query_arg_profiles,omitempty"` +} + +type QueryArgProfileConfigObservation struct { + + // Flag to set if you want a request to be forwarded to the origin even if the profile specified by the field-level encryption query argument, fle-profile, is unknown. + ForwardWhenQueryArgProfileIsUnknown *bool `json:"forwardWhenQueryArgProfileIsUnknown,omitempty" tf:"forward_when_query_arg_profile_is_unknown,omitempty"` + + // Object that contains an attribute items that contains the list ofrofiles specified for query argument-profile mapping for field-level encryption. see Query Arg Profile. + QueryArgProfiles *QueryArgProfilesObservation `json:"queryArgProfiles,omitempty" tf:"query_arg_profiles,omitempty"` +} + +type QueryArgProfileConfigParameters struct { + + // Flag to set if you want a request to be forwarded to the origin even if the profile specified by the field-level encryption query argument, fle-profile, is unknown. + // +kubebuilder:validation:Optional + ForwardWhenQueryArgProfileIsUnknown *bool `json:"forwardWhenQueryArgProfileIsUnknown" tf:"forward_when_query_arg_profile_is_unknown,omitempty"` + + // Object that contains an attribute items that contains the list ofrofiles specified for query argument-profile mapping for field-level encryption. see Query Arg Profile. + // +kubebuilder:validation:Optional + QueryArgProfiles *QueryArgProfilesParameters `json:"queryArgProfiles,omitempty" tf:"query_arg_profiles,omitempty"` +} + +type QueryArgProfilesInitParameters struct { + Items []QueryArgProfilesItemsInitParameters `json:"items,omitempty" tf:"items,omitempty"` +} + +type QueryArgProfilesItemsInitParameters struct { + + // The profile ID for a field-level encryption content type-profile mapping. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudfront/v1beta2.FieldLevelEncryptionProfile + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ProfileID *string `json:"profileId,omitempty" tf:"profile_id,omitempty"` + + // Reference to a FieldLevelEncryptionProfile in cloudfront to populate profileId. + // +kubebuilder:validation:Optional + ProfileIDRef *v1.Reference `json:"profileIdRef,omitempty" tf:"-"` + + // Selector for a FieldLevelEncryptionProfile in cloudfront to populate profileId. + // +kubebuilder:validation:Optional + ProfileIDSelector *v1.Selector `json:"profileIdSelector,omitempty" tf:"-"` + + // Query argument for field-level encryption query argument-profile mapping. + QueryArg *string `json:"queryArg,omitempty" tf:"query_arg,omitempty"` +} + +type QueryArgProfilesItemsObservation struct { + + // The profile ID for a field-level encryption content type-profile mapping. + ProfileID *string `json:"profileId,omitempty" tf:"profile_id,omitempty"` + + // Query argument for field-level encryption query argument-profile mapping. + QueryArg *string `json:"queryArg,omitempty" tf:"query_arg,omitempty"` +} + +type QueryArgProfilesItemsParameters struct { + + // The profile ID for a field-level encryption content type-profile mapping. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudfront/v1beta2.FieldLevelEncryptionProfile + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ProfileID *string `json:"profileId,omitempty" tf:"profile_id,omitempty"` + + // Reference to a FieldLevelEncryptionProfile in cloudfront to populate profileId. + // +kubebuilder:validation:Optional + ProfileIDRef *v1.Reference `json:"profileIdRef,omitempty" tf:"-"` + + // Selector for a FieldLevelEncryptionProfile in cloudfront to populate profileId. + // +kubebuilder:validation:Optional + ProfileIDSelector *v1.Selector `json:"profileIdSelector,omitempty" tf:"-"` + + // Query argument for field-level encryption query argument-profile mapping. + // +kubebuilder:validation:Optional + QueryArg *string `json:"queryArg" tf:"query_arg,omitempty"` +} + +type QueryArgProfilesObservation struct { + Items []QueryArgProfilesItemsObservation `json:"items,omitempty" tf:"items,omitempty"` +} + +type QueryArgProfilesParameters struct { + + // +kubebuilder:validation:Optional + Items []QueryArgProfilesItemsParameters `json:"items,omitempty" tf:"items,omitempty"` +} + +// FieldLevelEncryptionConfigSpec defines the desired state of FieldLevelEncryptionConfig +type FieldLevelEncryptionConfigSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FieldLevelEncryptionConfigParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FieldLevelEncryptionConfigInitParameters `json:"initProvider,omitempty"` +} + +// FieldLevelEncryptionConfigStatus defines the observed state of FieldLevelEncryptionConfig. +type FieldLevelEncryptionConfigStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FieldLevelEncryptionConfigObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FieldLevelEncryptionConfig is the Schema for the FieldLevelEncryptionConfigs API. Provides a CloudFront Field-level Encryption Config resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type FieldLevelEncryptionConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.contentTypeProfileConfig) || (has(self.initProvider) && has(self.initProvider.contentTypeProfileConfig))",message="spec.forProvider.contentTypeProfileConfig is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.queryArgProfileConfig) || (has(self.initProvider) && has(self.initProvider.queryArgProfileConfig))",message="spec.forProvider.queryArgProfileConfig is a required parameter" + Spec FieldLevelEncryptionConfigSpec `json:"spec"` + Status FieldLevelEncryptionConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FieldLevelEncryptionConfigList contains a list of FieldLevelEncryptionConfigs +type FieldLevelEncryptionConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FieldLevelEncryptionConfig `json:"items"` +} + +// Repository type metadata. +var ( + FieldLevelEncryptionConfig_Kind = "FieldLevelEncryptionConfig" + FieldLevelEncryptionConfig_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FieldLevelEncryptionConfig_Kind}.String() + FieldLevelEncryptionConfig_KindAPIVersion = FieldLevelEncryptionConfig_Kind + "." + CRDGroupVersion.String() + FieldLevelEncryptionConfig_GroupVersionKind = CRDGroupVersion.WithKind(FieldLevelEncryptionConfig_Kind) +) + +func init() { + SchemeBuilder.Register(&FieldLevelEncryptionConfig{}, &FieldLevelEncryptionConfigList{}) +} diff --git a/apis/cloudfront/v1beta2/zz_fieldlevelencryptionprofile_terraformed.go b/apis/cloudfront/v1beta2/zz_fieldlevelencryptionprofile_terraformed.go new file mode 100755 index 0000000000..73e3905440 --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_fieldlevelencryptionprofile_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FieldLevelEncryptionProfile +func (mg *FieldLevelEncryptionProfile) GetTerraformResourceType() string { + return "aws_cloudfront_field_level_encryption_profile" +} + +// GetConnectionDetailsMapping for this FieldLevelEncryptionProfile +func (tr *FieldLevelEncryptionProfile) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FieldLevelEncryptionProfile +func (tr *FieldLevelEncryptionProfile) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FieldLevelEncryptionProfile +func (tr *FieldLevelEncryptionProfile) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FieldLevelEncryptionProfile +func (tr *FieldLevelEncryptionProfile) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FieldLevelEncryptionProfile +func (tr *FieldLevelEncryptionProfile) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FieldLevelEncryptionProfile +func (tr *FieldLevelEncryptionProfile) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FieldLevelEncryptionProfile +func (tr *FieldLevelEncryptionProfile) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FieldLevelEncryptionProfile +func (tr *FieldLevelEncryptionProfile) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FieldLevelEncryptionProfile using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FieldLevelEncryptionProfile) LateInitialize(attrs []byte) (bool, error) { + params := &FieldLevelEncryptionProfileParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FieldLevelEncryptionProfile) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cloudfront/v1beta2/zz_fieldlevelencryptionprofile_types.go b/apis/cloudfront/v1beta2/zz_fieldlevelencryptionprofile_types.go new file mode 100755 index 0000000000..69211426df --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_fieldlevelencryptionprofile_types.go @@ -0,0 +1,221 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EncryptionEntitiesInitParameters struct { + Items []EncryptionEntitiesItemsInitParameters `json:"items,omitempty" tf:"items,omitempty"` +} + +type EncryptionEntitiesItemsInitParameters struct { + + // Object that contains an attribute items that contains the list of field patterns in a field-level encryption content type profile specify the fields that you want to be encrypted. + FieldPatterns *FieldPatternsInitParameters `json:"fieldPatterns,omitempty" tf:"field_patterns,omitempty"` + + // The provider associated with the public key being used for encryption. + ProviderID *string `json:"providerId,omitempty" tf:"provider_id,omitempty"` + + // The public key associated with a set of field-level encryption patterns, to be used when encrypting the fields that match the patterns. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudfront/v1beta1.PublicKey + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + PublicKeyID *string `json:"publicKeyId,omitempty" tf:"public_key_id,omitempty"` + + // Reference to a PublicKey in cloudfront to populate publicKeyId. + // +kubebuilder:validation:Optional + PublicKeyIDRef *v1.Reference `json:"publicKeyIdRef,omitempty" tf:"-"` + + // Selector for a PublicKey in cloudfront to populate publicKeyId. + // +kubebuilder:validation:Optional + PublicKeyIDSelector *v1.Selector `json:"publicKeyIdSelector,omitempty" tf:"-"` +} + +type EncryptionEntitiesItemsObservation struct { + + // Object that contains an attribute items that contains the list of field patterns in a field-level encryption content type profile specify the fields that you want to be encrypted. + FieldPatterns *FieldPatternsObservation `json:"fieldPatterns,omitempty" tf:"field_patterns,omitempty"` + + // The provider associated with the public key being used for encryption. + ProviderID *string `json:"providerId,omitempty" tf:"provider_id,omitempty"` + + // The public key associated with a set of field-level encryption patterns, to be used when encrypting the fields that match the patterns. + PublicKeyID *string `json:"publicKeyId,omitempty" tf:"public_key_id,omitempty"` +} + +type EncryptionEntitiesItemsParameters struct { + + // Object that contains an attribute items that contains the list of field patterns in a field-level encryption content type profile specify the fields that you want to be encrypted. + // +kubebuilder:validation:Optional + FieldPatterns *FieldPatternsParameters `json:"fieldPatterns" tf:"field_patterns,omitempty"` + + // The provider associated with the public key being used for encryption. + // +kubebuilder:validation:Optional + ProviderID *string `json:"providerId" tf:"provider_id,omitempty"` + + // The public key associated with a set of field-level encryption patterns, to be used when encrypting the fields that match the patterns. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudfront/v1beta1.PublicKey + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + PublicKeyID *string `json:"publicKeyId,omitempty" tf:"public_key_id,omitempty"` + + // Reference to a PublicKey in cloudfront to populate publicKeyId. + // +kubebuilder:validation:Optional + PublicKeyIDRef *v1.Reference `json:"publicKeyIdRef,omitempty" tf:"-"` + + // Selector for a PublicKey in cloudfront to populate publicKeyId. + // +kubebuilder:validation:Optional + PublicKeyIDSelector *v1.Selector `json:"publicKeyIdSelector,omitempty" tf:"-"` +} + +type EncryptionEntitiesObservation struct { + Items []EncryptionEntitiesItemsObservation `json:"items,omitempty" tf:"items,omitempty"` +} + +type EncryptionEntitiesParameters struct { + + // +kubebuilder:validation:Optional + Items []EncryptionEntitiesItemsParameters `json:"items,omitempty" tf:"items,omitempty"` +} + +type FieldLevelEncryptionProfileInitParameters struct { + + // An optional comment about the Field Level Encryption Profile. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // The encryption entities config block for field-level encryption profiles that contains an attribute items which includes the encryption key and field pattern specifications. + EncryptionEntities *EncryptionEntitiesInitParameters `json:"encryptionEntities,omitempty" tf:"encryption_entities,omitempty"` + + // The name of the Field Level Encryption Profile. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type FieldLevelEncryptionProfileObservation struct { + + // Internal value used by CloudFront to allow future updates to the Field Level Encryption Profile. + CallerReference *string `json:"callerReference,omitempty" tf:"caller_reference,omitempty"` + + // An optional comment about the Field Level Encryption Profile. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // The encryption entities config block for field-level encryption profiles that contains an attribute items which includes the encryption key and field pattern specifications. + EncryptionEntities *EncryptionEntitiesObservation `json:"encryptionEntities,omitempty" tf:"encryption_entities,omitempty"` + + // The current version of the Field Level Encryption Profile. For example: E2QWRUHAPOMQZL. + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // The identifier for the Field Level Encryption Profile. For example: K3D5EWEUDCCXON. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Field Level Encryption Profile. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type FieldLevelEncryptionProfileParameters struct { + + // An optional comment about the Field Level Encryption Profile. + // +kubebuilder:validation:Optional + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // The encryption entities config block for field-level encryption profiles that contains an attribute items which includes the encryption key and field pattern specifications. + // +kubebuilder:validation:Optional + EncryptionEntities *EncryptionEntitiesParameters `json:"encryptionEntities,omitempty" tf:"encryption_entities,omitempty"` + + // The name of the Field Level Encryption Profile. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type FieldPatternsInitParameters struct { + + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type FieldPatternsObservation struct { + + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type FieldPatternsParameters struct { + + // +kubebuilder:validation:Optional + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +// FieldLevelEncryptionProfileSpec defines the desired state of FieldLevelEncryptionProfile +type FieldLevelEncryptionProfileSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FieldLevelEncryptionProfileParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FieldLevelEncryptionProfileInitParameters `json:"initProvider,omitempty"` +} + +// FieldLevelEncryptionProfileStatus defines the observed state of FieldLevelEncryptionProfile. +type FieldLevelEncryptionProfileStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FieldLevelEncryptionProfileObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FieldLevelEncryptionProfile is the Schema for the FieldLevelEncryptionProfiles API. Provides a CloudFront Field-level Encryption Profile resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type FieldLevelEncryptionProfile struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.encryptionEntities) || (has(self.initProvider) && has(self.initProvider.encryptionEntities))",message="spec.forProvider.encryptionEntities is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec FieldLevelEncryptionProfileSpec `json:"spec"` + Status FieldLevelEncryptionProfileStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FieldLevelEncryptionProfileList contains a list of FieldLevelEncryptionProfiles +type FieldLevelEncryptionProfileList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FieldLevelEncryptionProfile `json:"items"` +} + +// Repository type metadata. +var ( + FieldLevelEncryptionProfile_Kind = "FieldLevelEncryptionProfile" + FieldLevelEncryptionProfile_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FieldLevelEncryptionProfile_Kind}.String() + FieldLevelEncryptionProfile_KindAPIVersion = FieldLevelEncryptionProfile_Kind + "." + CRDGroupVersion.String() + FieldLevelEncryptionProfile_GroupVersionKind = CRDGroupVersion.WithKind(FieldLevelEncryptionProfile_Kind) +) + +func init() { + SchemeBuilder.Register(&FieldLevelEncryptionProfile{}, &FieldLevelEncryptionProfileList{}) +} diff --git a/apis/cloudfront/v1beta2/zz_generated.conversion_hubs.go b/apis/cloudfront/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..7ccb794eb2 --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,31 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *CachePolicy) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Distribution) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FieldLevelEncryptionConfig) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FieldLevelEncryptionProfile) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MonitoringSubscription) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *OriginRequestPolicy) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *RealtimeLogConfig) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ResponseHeadersPolicy) Hub() {} diff --git a/apis/cloudfront/v1beta2/zz_generated.deepcopy.go b/apis/cloudfront/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..4ca54b1751 --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,8930 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlAllowHeadersInitParameters) DeepCopyInto(out *AccessControlAllowHeadersInitParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlAllowHeadersInitParameters. +func (in *AccessControlAllowHeadersInitParameters) DeepCopy() *AccessControlAllowHeadersInitParameters { + if in == nil { + return nil + } + out := new(AccessControlAllowHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlAllowHeadersObservation) DeepCopyInto(out *AccessControlAllowHeadersObservation) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlAllowHeadersObservation. +func (in *AccessControlAllowHeadersObservation) DeepCopy() *AccessControlAllowHeadersObservation { + if in == nil { + return nil + } + out := new(AccessControlAllowHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlAllowHeadersParameters) DeepCopyInto(out *AccessControlAllowHeadersParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlAllowHeadersParameters. +func (in *AccessControlAllowHeadersParameters) DeepCopy() *AccessControlAllowHeadersParameters { + if in == nil { + return nil + } + out := new(AccessControlAllowHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlAllowMethodsInitParameters) DeepCopyInto(out *AccessControlAllowMethodsInitParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlAllowMethodsInitParameters. +func (in *AccessControlAllowMethodsInitParameters) DeepCopy() *AccessControlAllowMethodsInitParameters { + if in == nil { + return nil + } + out := new(AccessControlAllowMethodsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlAllowMethodsObservation) DeepCopyInto(out *AccessControlAllowMethodsObservation) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlAllowMethodsObservation. +func (in *AccessControlAllowMethodsObservation) DeepCopy() *AccessControlAllowMethodsObservation { + if in == nil { + return nil + } + out := new(AccessControlAllowMethodsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlAllowMethodsParameters) DeepCopyInto(out *AccessControlAllowMethodsParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlAllowMethodsParameters. +func (in *AccessControlAllowMethodsParameters) DeepCopy() *AccessControlAllowMethodsParameters { + if in == nil { + return nil + } + out := new(AccessControlAllowMethodsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlAllowOriginsInitParameters) DeepCopyInto(out *AccessControlAllowOriginsInitParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlAllowOriginsInitParameters. +func (in *AccessControlAllowOriginsInitParameters) DeepCopy() *AccessControlAllowOriginsInitParameters { + if in == nil { + return nil + } + out := new(AccessControlAllowOriginsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlAllowOriginsObservation) DeepCopyInto(out *AccessControlAllowOriginsObservation) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlAllowOriginsObservation. +func (in *AccessControlAllowOriginsObservation) DeepCopy() *AccessControlAllowOriginsObservation { + if in == nil { + return nil + } + out := new(AccessControlAllowOriginsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlAllowOriginsParameters) DeepCopyInto(out *AccessControlAllowOriginsParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlAllowOriginsParameters. +func (in *AccessControlAllowOriginsParameters) DeepCopy() *AccessControlAllowOriginsParameters { + if in == nil { + return nil + } + out := new(AccessControlAllowOriginsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlExposeHeadersInitParameters) DeepCopyInto(out *AccessControlExposeHeadersInitParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlExposeHeadersInitParameters. +func (in *AccessControlExposeHeadersInitParameters) DeepCopy() *AccessControlExposeHeadersInitParameters { + if in == nil { + return nil + } + out := new(AccessControlExposeHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlExposeHeadersObservation) DeepCopyInto(out *AccessControlExposeHeadersObservation) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlExposeHeadersObservation. +func (in *AccessControlExposeHeadersObservation) DeepCopy() *AccessControlExposeHeadersObservation { + if in == nil { + return nil + } + out := new(AccessControlExposeHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlExposeHeadersParameters) DeepCopyInto(out *AccessControlExposeHeadersParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlExposeHeadersParameters. +func (in *AccessControlExposeHeadersParameters) DeepCopy() *AccessControlExposeHeadersParameters { + if in == nil { + return nil + } + out := new(AccessControlExposeHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CachePolicy) DeepCopyInto(out *CachePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CachePolicy. +func (in *CachePolicy) DeepCopy() *CachePolicy { + if in == nil { + return nil + } + out := new(CachePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CachePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CachePolicyInitParameters) DeepCopyInto(out *CachePolicyInitParameters) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.MaxTTL != nil { + in, out := &in.MaxTTL, &out.MaxTTL + *out = new(float64) + **out = **in + } + if in.MinTTL != nil { + in, out := &in.MinTTL, &out.MinTTL + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ParametersInCacheKeyAndForwardedToOrigin != nil { + in, out := &in.ParametersInCacheKeyAndForwardedToOrigin, &out.ParametersInCacheKeyAndForwardedToOrigin + *out = new(ParametersInCacheKeyAndForwardedToOriginInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CachePolicyInitParameters. +func (in *CachePolicyInitParameters) DeepCopy() *CachePolicyInitParameters { + if in == nil { + return nil + } + out := new(CachePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CachePolicyList) DeepCopyInto(out *CachePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CachePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CachePolicyList. +func (in *CachePolicyList) DeepCopy() *CachePolicyList { + if in == nil { + return nil + } + out := new(CachePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CachePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CachePolicyObservation) DeepCopyInto(out *CachePolicyObservation) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MaxTTL != nil { + in, out := &in.MaxTTL, &out.MaxTTL + *out = new(float64) + **out = **in + } + if in.MinTTL != nil { + in, out := &in.MinTTL, &out.MinTTL + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ParametersInCacheKeyAndForwardedToOrigin != nil { + in, out := &in.ParametersInCacheKeyAndForwardedToOrigin, &out.ParametersInCacheKeyAndForwardedToOrigin + *out = new(ParametersInCacheKeyAndForwardedToOriginObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CachePolicyObservation. +func (in *CachePolicyObservation) DeepCopy() *CachePolicyObservation { + if in == nil { + return nil + } + out := new(CachePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CachePolicyParameters) DeepCopyInto(out *CachePolicyParameters) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.MaxTTL != nil { + in, out := &in.MaxTTL, &out.MaxTTL + *out = new(float64) + **out = **in + } + if in.MinTTL != nil { + in, out := &in.MinTTL, &out.MinTTL + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ParametersInCacheKeyAndForwardedToOrigin != nil { + in, out := &in.ParametersInCacheKeyAndForwardedToOrigin, &out.ParametersInCacheKeyAndForwardedToOrigin + *out = new(ParametersInCacheKeyAndForwardedToOriginParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CachePolicyParameters. +func (in *CachePolicyParameters) DeepCopy() *CachePolicyParameters { + if in == nil { + return nil + } + out := new(CachePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CachePolicySpec) DeepCopyInto(out *CachePolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CachePolicySpec. +func (in *CachePolicySpec) DeepCopy() *CachePolicySpec { + if in == nil { + return nil + } + out := new(CachePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CachePolicyStatus) DeepCopyInto(out *CachePolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CachePolicyStatus. +func (in *CachePolicyStatus) DeepCopy() *CachePolicyStatus { + if in == nil { + return nil + } + out := new(CachePolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentSecurityPolicyInitParameters) DeepCopyInto(out *ContentSecurityPolicyInitParameters) { + *out = *in + if in.ContentSecurityPolicy != nil { + in, out := &in.ContentSecurityPolicy, &out.ContentSecurityPolicy + *out = new(string) + **out = **in + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentSecurityPolicyInitParameters. +func (in *ContentSecurityPolicyInitParameters) DeepCopy() *ContentSecurityPolicyInitParameters { + if in == nil { + return nil + } + out := new(ContentSecurityPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentSecurityPolicyObservation) DeepCopyInto(out *ContentSecurityPolicyObservation) { + *out = *in + if in.ContentSecurityPolicy != nil { + in, out := &in.ContentSecurityPolicy, &out.ContentSecurityPolicy + *out = new(string) + **out = **in + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentSecurityPolicyObservation. +func (in *ContentSecurityPolicyObservation) DeepCopy() *ContentSecurityPolicyObservation { + if in == nil { + return nil + } + out := new(ContentSecurityPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentSecurityPolicyParameters) DeepCopyInto(out *ContentSecurityPolicyParameters) { + *out = *in + if in.ContentSecurityPolicy != nil { + in, out := &in.ContentSecurityPolicy, &out.ContentSecurityPolicy + *out = new(string) + **out = **in + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentSecurityPolicyParameters. +func (in *ContentSecurityPolicyParameters) DeepCopy() *ContentSecurityPolicyParameters { + if in == nil { + return nil + } + out := new(ContentSecurityPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentTypeOptionsInitParameters) DeepCopyInto(out *ContentTypeOptionsInitParameters) { + *out = *in + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentTypeOptionsInitParameters. +func (in *ContentTypeOptionsInitParameters) DeepCopy() *ContentTypeOptionsInitParameters { + if in == nil { + return nil + } + out := new(ContentTypeOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentTypeOptionsObservation) DeepCopyInto(out *ContentTypeOptionsObservation) { + *out = *in + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentTypeOptionsObservation. +func (in *ContentTypeOptionsObservation) DeepCopy() *ContentTypeOptionsObservation { + if in == nil { + return nil + } + out := new(ContentTypeOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentTypeOptionsParameters) DeepCopyInto(out *ContentTypeOptionsParameters) { + *out = *in + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentTypeOptionsParameters. +func (in *ContentTypeOptionsParameters) DeepCopy() *ContentTypeOptionsParameters { + if in == nil { + return nil + } + out := new(ContentTypeOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentTypeProfileConfigInitParameters) DeepCopyInto(out *ContentTypeProfileConfigInitParameters) { + *out = *in + if in.ContentTypeProfiles != nil { + in, out := &in.ContentTypeProfiles, &out.ContentTypeProfiles + *out = new(ContentTypeProfilesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardWhenContentTypeIsUnknown != nil { + in, out := &in.ForwardWhenContentTypeIsUnknown, &out.ForwardWhenContentTypeIsUnknown + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentTypeProfileConfigInitParameters. +func (in *ContentTypeProfileConfigInitParameters) DeepCopy() *ContentTypeProfileConfigInitParameters { + if in == nil { + return nil + } + out := new(ContentTypeProfileConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentTypeProfileConfigObservation) DeepCopyInto(out *ContentTypeProfileConfigObservation) { + *out = *in + if in.ContentTypeProfiles != nil { + in, out := &in.ContentTypeProfiles, &out.ContentTypeProfiles + *out = new(ContentTypeProfilesObservation) + (*in).DeepCopyInto(*out) + } + if in.ForwardWhenContentTypeIsUnknown != nil { + in, out := &in.ForwardWhenContentTypeIsUnknown, &out.ForwardWhenContentTypeIsUnknown + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentTypeProfileConfigObservation. +func (in *ContentTypeProfileConfigObservation) DeepCopy() *ContentTypeProfileConfigObservation { + if in == nil { + return nil + } + out := new(ContentTypeProfileConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentTypeProfileConfigParameters) DeepCopyInto(out *ContentTypeProfileConfigParameters) { + *out = *in + if in.ContentTypeProfiles != nil { + in, out := &in.ContentTypeProfiles, &out.ContentTypeProfiles + *out = new(ContentTypeProfilesParameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardWhenContentTypeIsUnknown != nil { + in, out := &in.ForwardWhenContentTypeIsUnknown, &out.ForwardWhenContentTypeIsUnknown + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentTypeProfileConfigParameters. +func (in *ContentTypeProfileConfigParameters) DeepCopy() *ContentTypeProfileConfigParameters { + if in == nil { + return nil + } + out := new(ContentTypeProfileConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentTypeProfilesInitParameters) DeepCopyInto(out *ContentTypeProfilesInitParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ContentTypeProfilesItemsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentTypeProfilesInitParameters. +func (in *ContentTypeProfilesInitParameters) DeepCopy() *ContentTypeProfilesInitParameters { + if in == nil { + return nil + } + out := new(ContentTypeProfilesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentTypeProfilesItemsInitParameters) DeepCopyInto(out *ContentTypeProfilesItemsInitParameters) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.ProfileID != nil { + in, out := &in.ProfileID, &out.ProfileID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentTypeProfilesItemsInitParameters. +func (in *ContentTypeProfilesItemsInitParameters) DeepCopy() *ContentTypeProfilesItemsInitParameters { + if in == nil { + return nil + } + out := new(ContentTypeProfilesItemsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentTypeProfilesItemsObservation) DeepCopyInto(out *ContentTypeProfilesItemsObservation) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.ProfileID != nil { + in, out := &in.ProfileID, &out.ProfileID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentTypeProfilesItemsObservation. +func (in *ContentTypeProfilesItemsObservation) DeepCopy() *ContentTypeProfilesItemsObservation { + if in == nil { + return nil + } + out := new(ContentTypeProfilesItemsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentTypeProfilesItemsParameters) DeepCopyInto(out *ContentTypeProfilesItemsParameters) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.ProfileID != nil { + in, out := &in.ProfileID, &out.ProfileID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentTypeProfilesItemsParameters. +func (in *ContentTypeProfilesItemsParameters) DeepCopy() *ContentTypeProfilesItemsParameters { + if in == nil { + return nil + } + out := new(ContentTypeProfilesItemsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentTypeProfilesObservation) DeepCopyInto(out *ContentTypeProfilesObservation) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ContentTypeProfilesItemsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentTypeProfilesObservation. +func (in *ContentTypeProfilesObservation) DeepCopy() *ContentTypeProfilesObservation { + if in == nil { + return nil + } + out := new(ContentTypeProfilesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentTypeProfilesParameters) DeepCopyInto(out *ContentTypeProfilesParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ContentTypeProfilesItemsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentTypeProfilesParameters. +func (in *ContentTypeProfilesParameters) DeepCopy() *ContentTypeProfilesParameters { + if in == nil { + return nil + } + out := new(ContentTypeProfilesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CookiesConfigCookiesInitParameters) DeepCopyInto(out *CookiesConfigCookiesInitParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookiesConfigCookiesInitParameters. +func (in *CookiesConfigCookiesInitParameters) DeepCopy() *CookiesConfigCookiesInitParameters { + if in == nil { + return nil + } + out := new(CookiesConfigCookiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CookiesConfigCookiesObservation) DeepCopyInto(out *CookiesConfigCookiesObservation) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookiesConfigCookiesObservation. +func (in *CookiesConfigCookiesObservation) DeepCopy() *CookiesConfigCookiesObservation { + if in == nil { + return nil + } + out := new(CookiesConfigCookiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CookiesConfigCookiesParameters) DeepCopyInto(out *CookiesConfigCookiesParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookiesConfigCookiesParameters. +func (in *CookiesConfigCookiesParameters) DeepCopy() *CookiesConfigCookiesParameters { + if in == nil { + return nil + } + out := new(CookiesConfigCookiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CookiesConfigInitParameters) DeepCopyInto(out *CookiesConfigInitParameters) { + *out = *in + if in.CookieBehavior != nil { + in, out := &in.CookieBehavior, &out.CookieBehavior + *out = new(string) + **out = **in + } + if in.Cookies != nil { + in, out := &in.Cookies, &out.Cookies + *out = new(CookiesInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookiesConfigInitParameters. +func (in *CookiesConfigInitParameters) DeepCopy() *CookiesConfigInitParameters { + if in == nil { + return nil + } + out := new(CookiesConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CookiesConfigObservation) DeepCopyInto(out *CookiesConfigObservation) { + *out = *in + if in.CookieBehavior != nil { + in, out := &in.CookieBehavior, &out.CookieBehavior + *out = new(string) + **out = **in + } + if in.Cookies != nil { + in, out := &in.Cookies, &out.Cookies + *out = new(CookiesObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookiesConfigObservation. +func (in *CookiesConfigObservation) DeepCopy() *CookiesConfigObservation { + if in == nil { + return nil + } + out := new(CookiesConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CookiesConfigParameters) DeepCopyInto(out *CookiesConfigParameters) { + *out = *in + if in.CookieBehavior != nil { + in, out := &in.CookieBehavior, &out.CookieBehavior + *out = new(string) + **out = **in + } + if in.Cookies != nil { + in, out := &in.Cookies, &out.Cookies + *out = new(CookiesParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookiesConfigParameters. +func (in *CookiesConfigParameters) DeepCopy() *CookiesConfigParameters { + if in == nil { + return nil + } + out := new(CookiesConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CookiesInitParameters) DeepCopyInto(out *CookiesInitParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookiesInitParameters. +func (in *CookiesInitParameters) DeepCopy() *CookiesInitParameters { + if in == nil { + return nil + } + out := new(CookiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CookiesObservation) DeepCopyInto(out *CookiesObservation) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookiesObservation. +func (in *CookiesObservation) DeepCopy() *CookiesObservation { + if in == nil { + return nil + } + out := new(CookiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CookiesParameters) DeepCopyInto(out *CookiesParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookiesParameters. +func (in *CookiesParameters) DeepCopy() *CookiesParameters { + if in == nil { + return nil + } + out := new(CookiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsConfigInitParameters) DeepCopyInto(out *CorsConfigInitParameters) { + *out = *in + if in.AccessControlAllowCredentials != nil { + in, out := &in.AccessControlAllowCredentials, &out.AccessControlAllowCredentials + *out = new(bool) + **out = **in + } + if in.AccessControlAllowHeaders != nil { + in, out := &in.AccessControlAllowHeaders, &out.AccessControlAllowHeaders + *out = new(AccessControlAllowHeadersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AccessControlAllowMethods != nil { + in, out := &in.AccessControlAllowMethods, &out.AccessControlAllowMethods + *out = new(AccessControlAllowMethodsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AccessControlAllowOrigins != nil { + in, out := &in.AccessControlAllowOrigins, &out.AccessControlAllowOrigins + *out = new(AccessControlAllowOriginsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AccessControlExposeHeaders != nil { + in, out := &in.AccessControlExposeHeaders, &out.AccessControlExposeHeaders + *out = new(AccessControlExposeHeadersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AccessControlMaxAgeSec != nil { + in, out := &in.AccessControlMaxAgeSec, &out.AccessControlMaxAgeSec + *out = new(float64) + **out = **in + } + if in.OriginOverride != nil { + in, out := &in.OriginOverride, &out.OriginOverride + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsConfigInitParameters. +func (in *CorsConfigInitParameters) DeepCopy() *CorsConfigInitParameters { + if in == nil { + return nil + } + out := new(CorsConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsConfigObservation) DeepCopyInto(out *CorsConfigObservation) { + *out = *in + if in.AccessControlAllowCredentials != nil { + in, out := &in.AccessControlAllowCredentials, &out.AccessControlAllowCredentials + *out = new(bool) + **out = **in + } + if in.AccessControlAllowHeaders != nil { + in, out := &in.AccessControlAllowHeaders, &out.AccessControlAllowHeaders + *out = new(AccessControlAllowHeadersObservation) + (*in).DeepCopyInto(*out) + } + if in.AccessControlAllowMethods != nil { + in, out := &in.AccessControlAllowMethods, &out.AccessControlAllowMethods + *out = new(AccessControlAllowMethodsObservation) + (*in).DeepCopyInto(*out) + } + if in.AccessControlAllowOrigins != nil { + in, out := &in.AccessControlAllowOrigins, &out.AccessControlAllowOrigins + *out = new(AccessControlAllowOriginsObservation) + (*in).DeepCopyInto(*out) + } + if in.AccessControlExposeHeaders != nil { + in, out := &in.AccessControlExposeHeaders, &out.AccessControlExposeHeaders + *out = new(AccessControlExposeHeadersObservation) + (*in).DeepCopyInto(*out) + } + if in.AccessControlMaxAgeSec != nil { + in, out := &in.AccessControlMaxAgeSec, &out.AccessControlMaxAgeSec + *out = new(float64) + **out = **in + } + if in.OriginOverride != nil { + in, out := &in.OriginOverride, &out.OriginOverride + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsConfigObservation. +func (in *CorsConfigObservation) DeepCopy() *CorsConfigObservation { + if in == nil { + return nil + } + out := new(CorsConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsConfigParameters) DeepCopyInto(out *CorsConfigParameters) { + *out = *in + if in.AccessControlAllowCredentials != nil { + in, out := &in.AccessControlAllowCredentials, &out.AccessControlAllowCredentials + *out = new(bool) + **out = **in + } + if in.AccessControlAllowHeaders != nil { + in, out := &in.AccessControlAllowHeaders, &out.AccessControlAllowHeaders + *out = new(AccessControlAllowHeadersParameters) + (*in).DeepCopyInto(*out) + } + if in.AccessControlAllowMethods != nil { + in, out := &in.AccessControlAllowMethods, &out.AccessControlAllowMethods + *out = new(AccessControlAllowMethodsParameters) + (*in).DeepCopyInto(*out) + } + if in.AccessControlAllowOrigins != nil { + in, out := &in.AccessControlAllowOrigins, &out.AccessControlAllowOrigins + *out = new(AccessControlAllowOriginsParameters) + (*in).DeepCopyInto(*out) + } + if in.AccessControlExposeHeaders != nil { + in, out := &in.AccessControlExposeHeaders, &out.AccessControlExposeHeaders + *out = new(AccessControlExposeHeadersParameters) + (*in).DeepCopyInto(*out) + } + if in.AccessControlMaxAgeSec != nil { + in, out := &in.AccessControlMaxAgeSec, &out.AccessControlMaxAgeSec + *out = new(float64) + **out = **in + } + if in.OriginOverride != nil { + in, out := &in.OriginOverride, &out.OriginOverride + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsConfigParameters. +func (in *CorsConfigParameters) DeepCopy() *CorsConfigParameters { + if in == nil { + return nil + } + out := new(CorsConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomErrorResponseInitParameters) DeepCopyInto(out *CustomErrorResponseInitParameters) { + *out = *in + if in.ErrorCachingMinTTL != nil { + in, out := &in.ErrorCachingMinTTL, &out.ErrorCachingMinTTL + *out = new(float64) + **out = **in + } + if in.ErrorCode != nil { + in, out := &in.ErrorCode, &out.ErrorCode + *out = new(float64) + **out = **in + } + if in.ResponseCode != nil { + in, out := &in.ResponseCode, &out.ResponseCode + *out = new(float64) + **out = **in + } + if in.ResponsePagePath != nil { + in, out := &in.ResponsePagePath, &out.ResponsePagePath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomErrorResponseInitParameters. +func (in *CustomErrorResponseInitParameters) DeepCopy() *CustomErrorResponseInitParameters { + if in == nil { + return nil + } + out := new(CustomErrorResponseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomErrorResponseObservation) DeepCopyInto(out *CustomErrorResponseObservation) { + *out = *in + if in.ErrorCachingMinTTL != nil { + in, out := &in.ErrorCachingMinTTL, &out.ErrorCachingMinTTL + *out = new(float64) + **out = **in + } + if in.ErrorCode != nil { + in, out := &in.ErrorCode, &out.ErrorCode + *out = new(float64) + **out = **in + } + if in.ResponseCode != nil { + in, out := &in.ResponseCode, &out.ResponseCode + *out = new(float64) + **out = **in + } + if in.ResponsePagePath != nil { + in, out := &in.ResponsePagePath, &out.ResponsePagePath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomErrorResponseObservation. +func (in *CustomErrorResponseObservation) DeepCopy() *CustomErrorResponseObservation { + if in == nil { + return nil + } + out := new(CustomErrorResponseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomErrorResponseParameters) DeepCopyInto(out *CustomErrorResponseParameters) { + *out = *in + if in.ErrorCachingMinTTL != nil { + in, out := &in.ErrorCachingMinTTL, &out.ErrorCachingMinTTL + *out = new(float64) + **out = **in + } + if in.ErrorCode != nil { + in, out := &in.ErrorCode, &out.ErrorCode + *out = new(float64) + **out = **in + } + if in.ResponseCode != nil { + in, out := &in.ResponseCode, &out.ResponseCode + *out = new(float64) + **out = **in + } + if in.ResponsePagePath != nil { + in, out := &in.ResponsePagePath, &out.ResponsePagePath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomErrorResponseParameters. +func (in *CustomErrorResponseParameters) DeepCopy() *CustomErrorResponseParameters { + if in == nil { + return nil + } + out := new(CustomErrorResponseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomHeaderInitParameters) DeepCopyInto(out *CustomHeaderInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomHeaderInitParameters. +func (in *CustomHeaderInitParameters) DeepCopy() *CustomHeaderInitParameters { + if in == nil { + return nil + } + out := new(CustomHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomHeaderObservation) DeepCopyInto(out *CustomHeaderObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomHeaderObservation. +func (in *CustomHeaderObservation) DeepCopy() *CustomHeaderObservation { + if in == nil { + return nil + } + out := new(CustomHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomHeaderParameters) DeepCopyInto(out *CustomHeaderParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomHeaderParameters. +func (in *CustomHeaderParameters) DeepCopy() *CustomHeaderParameters { + if in == nil { + return nil + } + out := new(CustomHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomHeadersConfigInitParameters) DeepCopyInto(out *CustomHeadersConfigInitParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomHeadersConfigItemsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomHeadersConfigInitParameters. +func (in *CustomHeadersConfigInitParameters) DeepCopy() *CustomHeadersConfigInitParameters { + if in == nil { + return nil + } + out := new(CustomHeadersConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomHeadersConfigItemsInitParameters) DeepCopyInto(out *CustomHeadersConfigItemsInitParameters) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = new(string) + **out = **in + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomHeadersConfigItemsInitParameters. +func (in *CustomHeadersConfigItemsInitParameters) DeepCopy() *CustomHeadersConfigItemsInitParameters { + if in == nil { + return nil + } + out := new(CustomHeadersConfigItemsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomHeadersConfigItemsObservation) DeepCopyInto(out *CustomHeadersConfigItemsObservation) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = new(string) + **out = **in + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomHeadersConfigItemsObservation. +func (in *CustomHeadersConfigItemsObservation) DeepCopy() *CustomHeadersConfigItemsObservation { + if in == nil { + return nil + } + out := new(CustomHeadersConfigItemsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomHeadersConfigItemsParameters) DeepCopyInto(out *CustomHeadersConfigItemsParameters) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = new(string) + **out = **in + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomHeadersConfigItemsParameters. +func (in *CustomHeadersConfigItemsParameters) DeepCopy() *CustomHeadersConfigItemsParameters { + if in == nil { + return nil + } + out := new(CustomHeadersConfigItemsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomHeadersConfigObservation) DeepCopyInto(out *CustomHeadersConfigObservation) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomHeadersConfigItemsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomHeadersConfigObservation. +func (in *CustomHeadersConfigObservation) DeepCopy() *CustomHeadersConfigObservation { + if in == nil { + return nil + } + out := new(CustomHeadersConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomHeadersConfigParameters) DeepCopyInto(out *CustomHeadersConfigParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomHeadersConfigItemsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomHeadersConfigParameters. +func (in *CustomHeadersConfigParameters) DeepCopy() *CustomHeadersConfigParameters { + if in == nil { + return nil + } + out := new(CustomHeadersConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomOriginConfigInitParameters) DeepCopyInto(out *CustomOriginConfigInitParameters) { + *out = *in + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.HTTPSPort != nil { + in, out := &in.HTTPSPort, &out.HTTPSPort + *out = new(float64) + **out = **in + } + if in.OriginKeepaliveTimeout != nil { + in, out := &in.OriginKeepaliveTimeout, &out.OriginKeepaliveTimeout + *out = new(float64) + **out = **in + } + if in.OriginProtocolPolicy != nil { + in, out := &in.OriginProtocolPolicy, &out.OriginProtocolPolicy + *out = new(string) + **out = **in + } + if in.OriginReadTimeout != nil { + in, out := &in.OriginReadTimeout, &out.OriginReadTimeout + *out = new(float64) + **out = **in + } + if in.OriginSSLProtocols != nil { + in, out := &in.OriginSSLProtocols, &out.OriginSSLProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomOriginConfigInitParameters. +func (in *CustomOriginConfigInitParameters) DeepCopy() *CustomOriginConfigInitParameters { + if in == nil { + return nil + } + out := new(CustomOriginConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomOriginConfigObservation) DeepCopyInto(out *CustomOriginConfigObservation) { + *out = *in + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.HTTPSPort != nil { + in, out := &in.HTTPSPort, &out.HTTPSPort + *out = new(float64) + **out = **in + } + if in.OriginKeepaliveTimeout != nil { + in, out := &in.OriginKeepaliveTimeout, &out.OriginKeepaliveTimeout + *out = new(float64) + **out = **in + } + if in.OriginProtocolPolicy != nil { + in, out := &in.OriginProtocolPolicy, &out.OriginProtocolPolicy + *out = new(string) + **out = **in + } + if in.OriginReadTimeout != nil { + in, out := &in.OriginReadTimeout, &out.OriginReadTimeout + *out = new(float64) + **out = **in + } + if in.OriginSSLProtocols != nil { + in, out := &in.OriginSSLProtocols, &out.OriginSSLProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomOriginConfigObservation. +func (in *CustomOriginConfigObservation) DeepCopy() *CustomOriginConfigObservation { + if in == nil { + return nil + } + out := new(CustomOriginConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomOriginConfigParameters) DeepCopyInto(out *CustomOriginConfigParameters) { + *out = *in + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.HTTPSPort != nil { + in, out := &in.HTTPSPort, &out.HTTPSPort + *out = new(float64) + **out = **in + } + if in.OriginKeepaliveTimeout != nil { + in, out := &in.OriginKeepaliveTimeout, &out.OriginKeepaliveTimeout + *out = new(float64) + **out = **in + } + if in.OriginProtocolPolicy != nil { + in, out := &in.OriginProtocolPolicy, &out.OriginProtocolPolicy + *out = new(string) + **out = **in + } + if in.OriginReadTimeout != nil { + in, out := &in.OriginReadTimeout, &out.OriginReadTimeout + *out = new(float64) + **out = **in + } + if in.OriginSSLProtocols != nil { + in, out := &in.OriginSSLProtocols, &out.OriginSSLProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomOriginConfigParameters. +func (in *CustomOriginConfigParameters) DeepCopy() *CustomOriginConfigParameters { + if in == nil { + return nil + } + out := new(CustomOriginConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultCacheBehaviorInitParameters) DeepCopyInto(out *DefaultCacheBehaviorInitParameters) { + *out = *in + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CachePolicyID != nil { + in, out := &in.CachePolicyID, &out.CachePolicyID + *out = new(string) + **out = **in + } + if in.CachedMethods != nil { + in, out := &in.CachedMethods, &out.CachedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Compress != nil { + in, out := &in.Compress, &out.Compress + *out = new(bool) + **out = **in + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.FieldLevelEncryptionID != nil { + in, out := &in.FieldLevelEncryptionID, &out.FieldLevelEncryptionID + *out = new(string) + **out = **in + } + if in.ForwardedValues != nil { + in, out := &in.ForwardedValues, &out.ForwardedValues + *out = new(ForwardedValuesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FunctionAssociation != nil { + in, out := &in.FunctionAssociation, &out.FunctionAssociation + *out = make([]FunctionAssociationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LambdaFunctionAssociation != nil { + in, out := &in.LambdaFunctionAssociation, &out.LambdaFunctionAssociation + *out = make([]LambdaFunctionAssociationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxTTL != nil { + in, out := &in.MaxTTL, &out.MaxTTL + *out = new(float64) + **out = **in + } + if in.MinTTL != nil { + in, out := &in.MinTTL, &out.MinTTL + *out = new(float64) + **out = **in + } + if in.OriginRequestPolicyID != nil { + in, out := &in.OriginRequestPolicyID, &out.OriginRequestPolicyID + *out = new(string) + **out = **in + } + if in.RealtimeLogConfigArn != nil { + in, out := &in.RealtimeLogConfigArn, &out.RealtimeLogConfigArn + *out = new(string) + **out = **in + } + if in.ResponseHeadersPolicyID != nil { + in, out := &in.ResponseHeadersPolicyID, &out.ResponseHeadersPolicyID + *out = new(string) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } + if in.TargetOriginID != nil { + in, out := &in.TargetOriginID, &out.TargetOriginID + *out = new(string) + **out = **in + } + if in.TrustedKeyGroups != nil { + in, out := &in.TrustedKeyGroups, &out.TrustedKeyGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TrustedSigners != nil { + in, out := &in.TrustedSigners, &out.TrustedSigners + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ViewerProtocolPolicy != nil { + in, out := &in.ViewerProtocolPolicy, &out.ViewerProtocolPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultCacheBehaviorInitParameters. +func (in *DefaultCacheBehaviorInitParameters) DeepCopy() *DefaultCacheBehaviorInitParameters { + if in == nil { + return nil + } + out := new(DefaultCacheBehaviorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultCacheBehaviorObservation) DeepCopyInto(out *DefaultCacheBehaviorObservation) { + *out = *in + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CachePolicyID != nil { + in, out := &in.CachePolicyID, &out.CachePolicyID + *out = new(string) + **out = **in + } + if in.CachedMethods != nil { + in, out := &in.CachedMethods, &out.CachedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Compress != nil { + in, out := &in.Compress, &out.Compress + *out = new(bool) + **out = **in + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.FieldLevelEncryptionID != nil { + in, out := &in.FieldLevelEncryptionID, &out.FieldLevelEncryptionID + *out = new(string) + **out = **in + } + if in.ForwardedValues != nil { + in, out := &in.ForwardedValues, &out.ForwardedValues + *out = new(ForwardedValuesObservation) + (*in).DeepCopyInto(*out) + } + if in.FunctionAssociation != nil { + in, out := &in.FunctionAssociation, &out.FunctionAssociation + *out = make([]FunctionAssociationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LambdaFunctionAssociation != nil { + in, out := &in.LambdaFunctionAssociation, &out.LambdaFunctionAssociation + *out = make([]LambdaFunctionAssociationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxTTL != nil { + in, out := &in.MaxTTL, &out.MaxTTL + *out = new(float64) + **out = **in + } + if in.MinTTL != nil { + in, out := &in.MinTTL, &out.MinTTL + *out = new(float64) + **out = **in + } + if in.OriginRequestPolicyID != nil { + in, out := &in.OriginRequestPolicyID, &out.OriginRequestPolicyID + *out = new(string) + **out = **in + } + if in.RealtimeLogConfigArn != nil { + in, out := &in.RealtimeLogConfigArn, &out.RealtimeLogConfigArn + *out = new(string) + **out = **in + } + if in.ResponseHeadersPolicyID != nil { + in, out := &in.ResponseHeadersPolicyID, &out.ResponseHeadersPolicyID + *out = new(string) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } + if in.TargetOriginID != nil { + in, out := &in.TargetOriginID, &out.TargetOriginID + *out = new(string) + **out = **in + } + if in.TrustedKeyGroups != nil { + in, out := &in.TrustedKeyGroups, &out.TrustedKeyGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TrustedSigners != nil { + in, out := &in.TrustedSigners, &out.TrustedSigners + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ViewerProtocolPolicy != nil { + in, out := &in.ViewerProtocolPolicy, &out.ViewerProtocolPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultCacheBehaviorObservation. +func (in *DefaultCacheBehaviorObservation) DeepCopy() *DefaultCacheBehaviorObservation { + if in == nil { + return nil + } + out := new(DefaultCacheBehaviorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultCacheBehaviorParameters) DeepCopyInto(out *DefaultCacheBehaviorParameters) { + *out = *in + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CachePolicyID != nil { + in, out := &in.CachePolicyID, &out.CachePolicyID + *out = new(string) + **out = **in + } + if in.CachedMethods != nil { + in, out := &in.CachedMethods, &out.CachedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Compress != nil { + in, out := &in.Compress, &out.Compress + *out = new(bool) + **out = **in + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.FieldLevelEncryptionID != nil { + in, out := &in.FieldLevelEncryptionID, &out.FieldLevelEncryptionID + *out = new(string) + **out = **in + } + if in.ForwardedValues != nil { + in, out := &in.ForwardedValues, &out.ForwardedValues + *out = new(ForwardedValuesParameters) + (*in).DeepCopyInto(*out) + } + if in.FunctionAssociation != nil { + in, out := &in.FunctionAssociation, &out.FunctionAssociation + *out = make([]FunctionAssociationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LambdaFunctionAssociation != nil { + in, out := &in.LambdaFunctionAssociation, &out.LambdaFunctionAssociation + *out = make([]LambdaFunctionAssociationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxTTL != nil { + in, out := &in.MaxTTL, &out.MaxTTL + *out = new(float64) + **out = **in + } + if in.MinTTL != nil { + in, out := &in.MinTTL, &out.MinTTL + *out = new(float64) + **out = **in + } + if in.OriginRequestPolicyID != nil { + in, out := &in.OriginRequestPolicyID, &out.OriginRequestPolicyID + *out = new(string) + **out = **in + } + if in.RealtimeLogConfigArn != nil { + in, out := &in.RealtimeLogConfigArn, &out.RealtimeLogConfigArn + *out = new(string) + **out = **in + } + if in.ResponseHeadersPolicyID != nil { + in, out := &in.ResponseHeadersPolicyID, &out.ResponseHeadersPolicyID + *out = new(string) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } + if in.TargetOriginID != nil { + in, out := &in.TargetOriginID, &out.TargetOriginID + *out = new(string) + **out = **in + } + if in.TrustedKeyGroups != nil { + in, out := &in.TrustedKeyGroups, &out.TrustedKeyGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TrustedSigners != nil { + in, out := &in.TrustedSigners, &out.TrustedSigners + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ViewerProtocolPolicy != nil { + in, out := &in.ViewerProtocolPolicy, &out.ViewerProtocolPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultCacheBehaviorParameters. +func (in *DefaultCacheBehaviorParameters) DeepCopy() *DefaultCacheBehaviorParameters { + if in == nil { + return nil + } + out := new(DefaultCacheBehaviorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Distribution) DeepCopyInto(out *Distribution) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Distribution. +func (in *Distribution) DeepCopy() *Distribution { + if in == nil { + return nil + } + out := new(Distribution) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Distribution) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionInitParameters) DeepCopyInto(out *DistributionInitParameters) { + *out = *in + if in.Aliases != nil { + in, out := &in.Aliases, &out.Aliases + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.ContinuousDeploymentPolicyID != nil { + in, out := &in.ContinuousDeploymentPolicyID, &out.ContinuousDeploymentPolicyID + *out = new(string) + **out = **in + } + if in.CustomErrorResponse != nil { + in, out := &in.CustomErrorResponse, &out.CustomErrorResponse + *out = make([]CustomErrorResponseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultCacheBehavior != nil { + in, out := &in.DefaultCacheBehavior, &out.DefaultCacheBehavior + *out = new(DefaultCacheBehaviorInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultRootObject != nil { + in, out := &in.DefaultRootObject, &out.DefaultRootObject + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPVersion != nil { + in, out := &in.HTTPVersion, &out.HTTPVersion + *out = new(string) + **out = **in + } + if in.IsIPv6Enabled != nil { + in, out := &in.IsIPv6Enabled, &out.IsIPv6Enabled + *out = new(bool) + **out = **in + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = new(LoggingConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OrderedCacheBehavior != nil { + in, out := &in.OrderedCacheBehavior, &out.OrderedCacheBehavior + *out = make([]OrderedCacheBehaviorInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Origin != nil { + in, out := &in.Origin, &out.Origin + *out = make([]OriginInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginGroup != nil { + in, out := &in.OriginGroup, &out.OriginGroup + *out = make([]OriginGroupInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PriceClass != nil { + in, out := &in.PriceClass, &out.PriceClass + *out = new(string) + **out = **in + } + if in.Restrictions != nil { + in, out := &in.Restrictions, &out.Restrictions + *out = new(RestrictionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetainOnDelete != nil { + in, out := &in.RetainOnDelete, &out.RetainOnDelete + *out = new(bool) + **out = **in + } + if in.Staging != nil { + in, out := &in.Staging, &out.Staging + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ViewerCertificate != nil { + in, out := &in.ViewerCertificate, &out.ViewerCertificate + *out = new(ViewerCertificateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WaitForDeployment != nil { + in, out := &in.WaitForDeployment, &out.WaitForDeployment + *out = new(bool) + **out = **in + } + if in.WebACLID != nil { + in, out := &in.WebACLID, &out.WebACLID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionInitParameters. +func (in *DistributionInitParameters) DeepCopy() *DistributionInitParameters { + if in == nil { + return nil + } + out := new(DistributionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionList) DeepCopyInto(out *DistributionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Distribution, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionList. +func (in *DistributionList) DeepCopy() *DistributionList { + if in == nil { + return nil + } + out := new(DistributionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DistributionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionObservation) DeepCopyInto(out *DistributionObservation) { + *out = *in + if in.Aliases != nil { + in, out := &in.Aliases, &out.Aliases + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CallerReference != nil { + in, out := &in.CallerReference, &out.CallerReference + *out = new(string) + **out = **in + } + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.ContinuousDeploymentPolicyID != nil { + in, out := &in.ContinuousDeploymentPolicyID, &out.ContinuousDeploymentPolicyID + *out = new(string) + **out = **in + } + if in.CustomErrorResponse != nil { + in, out := &in.CustomErrorResponse, &out.CustomErrorResponse + *out = make([]CustomErrorResponseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultCacheBehavior != nil { + in, out := &in.DefaultCacheBehavior, &out.DefaultCacheBehavior + *out = new(DefaultCacheBehaviorObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultRootObject != nil { + in, out := &in.DefaultRootObject, &out.DefaultRootObject + *out = new(string) + **out = **in + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.HTTPVersion != nil { + in, out := &in.HTTPVersion, &out.HTTPVersion + *out = new(string) + **out = **in + } + if in.HostedZoneID != nil { + in, out := &in.HostedZoneID, &out.HostedZoneID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InProgressValidationBatches != nil { + in, out := &in.InProgressValidationBatches, &out.InProgressValidationBatches + *out = new(float64) + **out = **in + } + if in.IsIPv6Enabled != nil { + in, out := &in.IsIPv6Enabled, &out.IsIPv6Enabled + *out = new(bool) + **out = **in + } + if in.LastModifiedTime != nil { + in, out := &in.LastModifiedTime, &out.LastModifiedTime + *out = new(string) + **out = **in + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = new(LoggingConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.OrderedCacheBehavior != nil { + in, out := &in.OrderedCacheBehavior, &out.OrderedCacheBehavior + *out = make([]OrderedCacheBehaviorObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Origin != nil { + in, out := &in.Origin, &out.Origin + *out = make([]OriginObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginGroup != nil { + in, out := &in.OriginGroup, &out.OriginGroup + *out = make([]OriginGroupObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PriceClass != nil { + in, out := &in.PriceClass, &out.PriceClass + *out = new(string) + **out = **in + } + if in.Restrictions != nil { + in, out := &in.Restrictions, &out.Restrictions + *out = new(RestrictionsObservation) + (*in).DeepCopyInto(*out) + } + if in.RetainOnDelete != nil { + in, out := &in.RetainOnDelete, &out.RetainOnDelete + *out = new(bool) + **out = **in + } + if in.Staging != nil { + in, out := &in.Staging, &out.Staging + *out = new(bool) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrustedKeyGroups != nil { + in, out := &in.TrustedKeyGroups, &out.TrustedKeyGroups + *out = make([]TrustedKeyGroupsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TrustedSigners != nil { + in, out := &in.TrustedSigners, &out.TrustedSigners + *out = make([]TrustedSignersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ViewerCertificate != nil { + in, out := &in.ViewerCertificate, &out.ViewerCertificate + *out = new(ViewerCertificateObservation) + (*in).DeepCopyInto(*out) + } + if in.WaitForDeployment != nil { + in, out := &in.WaitForDeployment, &out.WaitForDeployment + *out = new(bool) + **out = **in + } + if in.WebACLID != nil { + in, out := &in.WebACLID, &out.WebACLID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionObservation. +func (in *DistributionObservation) DeepCopy() *DistributionObservation { + if in == nil { + return nil + } + out := new(DistributionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionParameters) DeepCopyInto(out *DistributionParameters) { + *out = *in + if in.Aliases != nil { + in, out := &in.Aliases, &out.Aliases + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.ContinuousDeploymentPolicyID != nil { + in, out := &in.ContinuousDeploymentPolicyID, &out.ContinuousDeploymentPolicyID + *out = new(string) + **out = **in + } + if in.CustomErrorResponse != nil { + in, out := &in.CustomErrorResponse, &out.CustomErrorResponse + *out = make([]CustomErrorResponseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultCacheBehavior != nil { + in, out := &in.DefaultCacheBehavior, &out.DefaultCacheBehavior + *out = new(DefaultCacheBehaviorParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultRootObject != nil { + in, out := &in.DefaultRootObject, &out.DefaultRootObject + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPVersion != nil { + in, out := &in.HTTPVersion, &out.HTTPVersion + *out = new(string) + **out = **in + } + if in.IsIPv6Enabled != nil { + in, out := &in.IsIPv6Enabled, &out.IsIPv6Enabled + *out = new(bool) + **out = **in + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = new(LoggingConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.OrderedCacheBehavior != nil { + in, out := &in.OrderedCacheBehavior, &out.OrderedCacheBehavior + *out = make([]OrderedCacheBehaviorParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Origin != nil { + in, out := &in.Origin, &out.Origin + *out = make([]OriginParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginGroup != nil { + in, out := &in.OriginGroup, &out.OriginGroup + *out = make([]OriginGroupParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PriceClass != nil { + in, out := &in.PriceClass, &out.PriceClass + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Restrictions != nil { + in, out := &in.Restrictions, &out.Restrictions + *out = new(RestrictionsParameters) + (*in).DeepCopyInto(*out) + } + if in.RetainOnDelete != nil { + in, out := &in.RetainOnDelete, &out.RetainOnDelete + *out = new(bool) + **out = **in + } + if in.Staging != nil { + in, out := &in.Staging, &out.Staging + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ViewerCertificate != nil { + in, out := &in.ViewerCertificate, &out.ViewerCertificate + *out = new(ViewerCertificateParameters) + (*in).DeepCopyInto(*out) + } + if in.WaitForDeployment != nil { + in, out := &in.WaitForDeployment, &out.WaitForDeployment + *out = new(bool) + **out = **in + } + if in.WebACLID != nil { + in, out := &in.WebACLID, &out.WebACLID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionParameters. +func (in *DistributionParameters) DeepCopy() *DistributionParameters { + if in == nil { + return nil + } + out := new(DistributionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionSpec) DeepCopyInto(out *DistributionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionSpec. +func (in *DistributionSpec) DeepCopy() *DistributionSpec { + if in == nil { + return nil + } + out := new(DistributionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionStatus) DeepCopyInto(out *DistributionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionStatus. +func (in *DistributionStatus) DeepCopy() *DistributionStatus { + if in == nil { + return nil + } + out := new(DistributionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionEntitiesInitParameters) DeepCopyInto(out *EncryptionEntitiesInitParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EncryptionEntitiesItemsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionEntitiesInitParameters. +func (in *EncryptionEntitiesInitParameters) DeepCopy() *EncryptionEntitiesInitParameters { + if in == nil { + return nil + } + out := new(EncryptionEntitiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionEntitiesItemsInitParameters) DeepCopyInto(out *EncryptionEntitiesItemsInitParameters) { + *out = *in + if in.FieldPatterns != nil { + in, out := &in.FieldPatterns, &out.FieldPatterns + *out = new(FieldPatternsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ProviderID != nil { + in, out := &in.ProviderID, &out.ProviderID + *out = new(string) + **out = **in + } + if in.PublicKeyID != nil { + in, out := &in.PublicKeyID, &out.PublicKeyID + *out = new(string) + **out = **in + } + if in.PublicKeyIDRef != nil { + in, out := &in.PublicKeyIDRef, &out.PublicKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PublicKeyIDSelector != nil { + in, out := &in.PublicKeyIDSelector, &out.PublicKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionEntitiesItemsInitParameters. +func (in *EncryptionEntitiesItemsInitParameters) DeepCopy() *EncryptionEntitiesItemsInitParameters { + if in == nil { + return nil + } + out := new(EncryptionEntitiesItemsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionEntitiesItemsObservation) DeepCopyInto(out *EncryptionEntitiesItemsObservation) { + *out = *in + if in.FieldPatterns != nil { + in, out := &in.FieldPatterns, &out.FieldPatterns + *out = new(FieldPatternsObservation) + (*in).DeepCopyInto(*out) + } + if in.ProviderID != nil { + in, out := &in.ProviderID, &out.ProviderID + *out = new(string) + **out = **in + } + if in.PublicKeyID != nil { + in, out := &in.PublicKeyID, &out.PublicKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionEntitiesItemsObservation. +func (in *EncryptionEntitiesItemsObservation) DeepCopy() *EncryptionEntitiesItemsObservation { + if in == nil { + return nil + } + out := new(EncryptionEntitiesItemsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionEntitiesItemsParameters) DeepCopyInto(out *EncryptionEntitiesItemsParameters) { + *out = *in + if in.FieldPatterns != nil { + in, out := &in.FieldPatterns, &out.FieldPatterns + *out = new(FieldPatternsParameters) + (*in).DeepCopyInto(*out) + } + if in.ProviderID != nil { + in, out := &in.ProviderID, &out.ProviderID + *out = new(string) + **out = **in + } + if in.PublicKeyID != nil { + in, out := &in.PublicKeyID, &out.PublicKeyID + *out = new(string) + **out = **in + } + if in.PublicKeyIDRef != nil { + in, out := &in.PublicKeyIDRef, &out.PublicKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PublicKeyIDSelector != nil { + in, out := &in.PublicKeyIDSelector, &out.PublicKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionEntitiesItemsParameters. +func (in *EncryptionEntitiesItemsParameters) DeepCopy() *EncryptionEntitiesItemsParameters { + if in == nil { + return nil + } + out := new(EncryptionEntitiesItemsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionEntitiesObservation) DeepCopyInto(out *EncryptionEntitiesObservation) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EncryptionEntitiesItemsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionEntitiesObservation. +func (in *EncryptionEntitiesObservation) DeepCopy() *EncryptionEntitiesObservation { + if in == nil { + return nil + } + out := new(EncryptionEntitiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionEntitiesParameters) DeepCopyInto(out *EncryptionEntitiesParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EncryptionEntitiesItemsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionEntitiesParameters. +func (in *EncryptionEntitiesParameters) DeepCopy() *EncryptionEntitiesParameters { + if in == nil { + return nil + } + out := new(EncryptionEntitiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointInitParameters) DeepCopyInto(out *EndpointInitParameters) { + *out = *in + if in.KinesisStreamConfig != nil { + in, out := &in.KinesisStreamConfig, &out.KinesisStreamConfig + *out = new(KinesisStreamConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StreamType != nil { + in, out := &in.StreamType, &out.StreamType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointInitParameters. +func (in *EndpointInitParameters) DeepCopy() *EndpointInitParameters { + if in == nil { + return nil + } + out := new(EndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointObservation) DeepCopyInto(out *EndpointObservation) { + *out = *in + if in.KinesisStreamConfig != nil { + in, out := &in.KinesisStreamConfig, &out.KinesisStreamConfig + *out = new(KinesisStreamConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.StreamType != nil { + in, out := &in.StreamType, &out.StreamType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointObservation. +func (in *EndpointObservation) DeepCopy() *EndpointObservation { + if in == nil { + return nil + } + out := new(EndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointParameters) DeepCopyInto(out *EndpointParameters) { + *out = *in + if in.KinesisStreamConfig != nil { + in, out := &in.KinesisStreamConfig, &out.KinesisStreamConfig + *out = new(KinesisStreamConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.StreamType != nil { + in, out := &in.StreamType, &out.StreamType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointParameters. +func (in *EndpointParameters) DeepCopy() *EndpointParameters { + if in == nil { + return nil + } + out := new(EndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverCriteriaInitParameters) DeepCopyInto(out *FailoverCriteriaInitParameters) { + *out = *in + if in.StatusCodes != nil { + in, out := &in.StatusCodes, &out.StatusCodes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverCriteriaInitParameters. +func (in *FailoverCriteriaInitParameters) DeepCopy() *FailoverCriteriaInitParameters { + if in == nil { + return nil + } + out := new(FailoverCriteriaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverCriteriaObservation) DeepCopyInto(out *FailoverCriteriaObservation) { + *out = *in + if in.StatusCodes != nil { + in, out := &in.StatusCodes, &out.StatusCodes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverCriteriaObservation. +func (in *FailoverCriteriaObservation) DeepCopy() *FailoverCriteriaObservation { + if in == nil { + return nil + } + out := new(FailoverCriteriaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverCriteriaParameters) DeepCopyInto(out *FailoverCriteriaParameters) { + *out = *in + if in.StatusCodes != nil { + in, out := &in.StatusCodes, &out.StatusCodes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverCriteriaParameters. +func (in *FailoverCriteriaParameters) DeepCopy() *FailoverCriteriaParameters { + if in == nil { + return nil + } + out := new(FailoverCriteriaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldLevelEncryptionConfig) DeepCopyInto(out *FieldLevelEncryptionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldLevelEncryptionConfig. +func (in *FieldLevelEncryptionConfig) DeepCopy() *FieldLevelEncryptionConfig { + if in == nil { + return nil + } + out := new(FieldLevelEncryptionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FieldLevelEncryptionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldLevelEncryptionConfigInitParameters) DeepCopyInto(out *FieldLevelEncryptionConfigInitParameters) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.ContentTypeProfileConfig != nil { + in, out := &in.ContentTypeProfileConfig, &out.ContentTypeProfileConfig + *out = new(ContentTypeProfileConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.QueryArgProfileConfig != nil { + in, out := &in.QueryArgProfileConfig, &out.QueryArgProfileConfig + *out = new(QueryArgProfileConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldLevelEncryptionConfigInitParameters. +func (in *FieldLevelEncryptionConfigInitParameters) DeepCopy() *FieldLevelEncryptionConfigInitParameters { + if in == nil { + return nil + } + out := new(FieldLevelEncryptionConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldLevelEncryptionConfigList) DeepCopyInto(out *FieldLevelEncryptionConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FieldLevelEncryptionConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldLevelEncryptionConfigList. +func (in *FieldLevelEncryptionConfigList) DeepCopy() *FieldLevelEncryptionConfigList { + if in == nil { + return nil + } + out := new(FieldLevelEncryptionConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FieldLevelEncryptionConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldLevelEncryptionConfigObservation) DeepCopyInto(out *FieldLevelEncryptionConfigObservation) { + *out = *in + if in.CallerReference != nil { + in, out := &in.CallerReference, &out.CallerReference + *out = new(string) + **out = **in + } + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.ContentTypeProfileConfig != nil { + in, out := &in.ContentTypeProfileConfig, &out.ContentTypeProfileConfig + *out = new(ContentTypeProfileConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.QueryArgProfileConfig != nil { + in, out := &in.QueryArgProfileConfig, &out.QueryArgProfileConfig + *out = new(QueryArgProfileConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldLevelEncryptionConfigObservation. +func (in *FieldLevelEncryptionConfigObservation) DeepCopy() *FieldLevelEncryptionConfigObservation { + if in == nil { + return nil + } + out := new(FieldLevelEncryptionConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldLevelEncryptionConfigParameters) DeepCopyInto(out *FieldLevelEncryptionConfigParameters) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.ContentTypeProfileConfig != nil { + in, out := &in.ContentTypeProfileConfig, &out.ContentTypeProfileConfig + *out = new(ContentTypeProfileConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.QueryArgProfileConfig != nil { + in, out := &in.QueryArgProfileConfig, &out.QueryArgProfileConfig + *out = new(QueryArgProfileConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldLevelEncryptionConfigParameters. +func (in *FieldLevelEncryptionConfigParameters) DeepCopy() *FieldLevelEncryptionConfigParameters { + if in == nil { + return nil + } + out := new(FieldLevelEncryptionConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldLevelEncryptionConfigSpec) DeepCopyInto(out *FieldLevelEncryptionConfigSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldLevelEncryptionConfigSpec. +func (in *FieldLevelEncryptionConfigSpec) DeepCopy() *FieldLevelEncryptionConfigSpec { + if in == nil { + return nil + } + out := new(FieldLevelEncryptionConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldLevelEncryptionConfigStatus) DeepCopyInto(out *FieldLevelEncryptionConfigStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldLevelEncryptionConfigStatus. +func (in *FieldLevelEncryptionConfigStatus) DeepCopy() *FieldLevelEncryptionConfigStatus { + if in == nil { + return nil + } + out := new(FieldLevelEncryptionConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldLevelEncryptionProfile) DeepCopyInto(out *FieldLevelEncryptionProfile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldLevelEncryptionProfile. +func (in *FieldLevelEncryptionProfile) DeepCopy() *FieldLevelEncryptionProfile { + if in == nil { + return nil + } + out := new(FieldLevelEncryptionProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FieldLevelEncryptionProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldLevelEncryptionProfileInitParameters) DeepCopyInto(out *FieldLevelEncryptionProfileInitParameters) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.EncryptionEntities != nil { + in, out := &in.EncryptionEntities, &out.EncryptionEntities + *out = new(EncryptionEntitiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldLevelEncryptionProfileInitParameters. +func (in *FieldLevelEncryptionProfileInitParameters) DeepCopy() *FieldLevelEncryptionProfileInitParameters { + if in == nil { + return nil + } + out := new(FieldLevelEncryptionProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldLevelEncryptionProfileList) DeepCopyInto(out *FieldLevelEncryptionProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FieldLevelEncryptionProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldLevelEncryptionProfileList. +func (in *FieldLevelEncryptionProfileList) DeepCopy() *FieldLevelEncryptionProfileList { + if in == nil { + return nil + } + out := new(FieldLevelEncryptionProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FieldLevelEncryptionProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldLevelEncryptionProfileObservation) DeepCopyInto(out *FieldLevelEncryptionProfileObservation) { + *out = *in + if in.CallerReference != nil { + in, out := &in.CallerReference, &out.CallerReference + *out = new(string) + **out = **in + } + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.EncryptionEntities != nil { + in, out := &in.EncryptionEntities, &out.EncryptionEntities + *out = new(EncryptionEntitiesObservation) + (*in).DeepCopyInto(*out) + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldLevelEncryptionProfileObservation. +func (in *FieldLevelEncryptionProfileObservation) DeepCopy() *FieldLevelEncryptionProfileObservation { + if in == nil { + return nil + } + out := new(FieldLevelEncryptionProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldLevelEncryptionProfileParameters) DeepCopyInto(out *FieldLevelEncryptionProfileParameters) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.EncryptionEntities != nil { + in, out := &in.EncryptionEntities, &out.EncryptionEntities + *out = new(EncryptionEntitiesParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldLevelEncryptionProfileParameters. +func (in *FieldLevelEncryptionProfileParameters) DeepCopy() *FieldLevelEncryptionProfileParameters { + if in == nil { + return nil + } + out := new(FieldLevelEncryptionProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldLevelEncryptionProfileSpec) DeepCopyInto(out *FieldLevelEncryptionProfileSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldLevelEncryptionProfileSpec. +func (in *FieldLevelEncryptionProfileSpec) DeepCopy() *FieldLevelEncryptionProfileSpec { + if in == nil { + return nil + } + out := new(FieldLevelEncryptionProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldLevelEncryptionProfileStatus) DeepCopyInto(out *FieldLevelEncryptionProfileStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldLevelEncryptionProfileStatus. +func (in *FieldLevelEncryptionProfileStatus) DeepCopy() *FieldLevelEncryptionProfileStatus { + if in == nil { + return nil + } + out := new(FieldLevelEncryptionProfileStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldPatternsInitParameters) DeepCopyInto(out *FieldPatternsInitParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldPatternsInitParameters. +func (in *FieldPatternsInitParameters) DeepCopy() *FieldPatternsInitParameters { + if in == nil { + return nil + } + out := new(FieldPatternsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldPatternsObservation) DeepCopyInto(out *FieldPatternsObservation) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldPatternsObservation. +func (in *FieldPatternsObservation) DeepCopy() *FieldPatternsObservation { + if in == nil { + return nil + } + out := new(FieldPatternsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldPatternsParameters) DeepCopyInto(out *FieldPatternsParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldPatternsParameters. +func (in *FieldPatternsParameters) DeepCopy() *FieldPatternsParameters { + if in == nil { + return nil + } + out := new(FieldPatternsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardedValuesCookiesInitParameters) DeepCopyInto(out *ForwardedValuesCookiesInitParameters) { + *out = *in + if in.Forward != nil { + in, out := &in.Forward, &out.Forward + *out = new(string) + **out = **in + } + if in.WhitelistedNames != nil { + in, out := &in.WhitelistedNames, &out.WhitelistedNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardedValuesCookiesInitParameters. +func (in *ForwardedValuesCookiesInitParameters) DeepCopy() *ForwardedValuesCookiesInitParameters { + if in == nil { + return nil + } + out := new(ForwardedValuesCookiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardedValuesCookiesObservation) DeepCopyInto(out *ForwardedValuesCookiesObservation) { + *out = *in + if in.Forward != nil { + in, out := &in.Forward, &out.Forward + *out = new(string) + **out = **in + } + if in.WhitelistedNames != nil { + in, out := &in.WhitelistedNames, &out.WhitelistedNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardedValuesCookiesObservation. +func (in *ForwardedValuesCookiesObservation) DeepCopy() *ForwardedValuesCookiesObservation { + if in == nil { + return nil + } + out := new(ForwardedValuesCookiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardedValuesCookiesParameters) DeepCopyInto(out *ForwardedValuesCookiesParameters) { + *out = *in + if in.Forward != nil { + in, out := &in.Forward, &out.Forward + *out = new(string) + **out = **in + } + if in.WhitelistedNames != nil { + in, out := &in.WhitelistedNames, &out.WhitelistedNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardedValuesCookiesParameters. +func (in *ForwardedValuesCookiesParameters) DeepCopy() *ForwardedValuesCookiesParameters { + if in == nil { + return nil + } + out := new(ForwardedValuesCookiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardedValuesInitParameters) DeepCopyInto(out *ForwardedValuesInitParameters) { + *out = *in + if in.Cookies != nil { + in, out := &in.Cookies, &out.Cookies + *out = new(ForwardedValuesCookiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(bool) + **out = **in + } + if in.QueryStringCacheKeys != nil { + in, out := &in.QueryStringCacheKeys, &out.QueryStringCacheKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardedValuesInitParameters. +func (in *ForwardedValuesInitParameters) DeepCopy() *ForwardedValuesInitParameters { + if in == nil { + return nil + } + out := new(ForwardedValuesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardedValuesObservation) DeepCopyInto(out *ForwardedValuesObservation) { + *out = *in + if in.Cookies != nil { + in, out := &in.Cookies, &out.Cookies + *out = new(ForwardedValuesCookiesObservation) + (*in).DeepCopyInto(*out) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(bool) + **out = **in + } + if in.QueryStringCacheKeys != nil { + in, out := &in.QueryStringCacheKeys, &out.QueryStringCacheKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardedValuesObservation. +func (in *ForwardedValuesObservation) DeepCopy() *ForwardedValuesObservation { + if in == nil { + return nil + } + out := new(ForwardedValuesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardedValuesParameters) DeepCopyInto(out *ForwardedValuesParameters) { + *out = *in + if in.Cookies != nil { + in, out := &in.Cookies, &out.Cookies + *out = new(ForwardedValuesCookiesParameters) + (*in).DeepCopyInto(*out) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(bool) + **out = **in + } + if in.QueryStringCacheKeys != nil { + in, out := &in.QueryStringCacheKeys, &out.QueryStringCacheKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardedValuesParameters. +func (in *ForwardedValuesParameters) DeepCopy() *ForwardedValuesParameters { + if in == nil { + return nil + } + out := new(ForwardedValuesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameOptionsInitParameters) DeepCopyInto(out *FrameOptionsInitParameters) { + *out = *in + if in.FrameOption != nil { + in, out := &in.FrameOption, &out.FrameOption + *out = new(string) + **out = **in + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameOptionsInitParameters. +func (in *FrameOptionsInitParameters) DeepCopy() *FrameOptionsInitParameters { + if in == nil { + return nil + } + out := new(FrameOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameOptionsObservation) DeepCopyInto(out *FrameOptionsObservation) { + *out = *in + if in.FrameOption != nil { + in, out := &in.FrameOption, &out.FrameOption + *out = new(string) + **out = **in + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameOptionsObservation. +func (in *FrameOptionsObservation) DeepCopy() *FrameOptionsObservation { + if in == nil { + return nil + } + out := new(FrameOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameOptionsParameters) DeepCopyInto(out *FrameOptionsParameters) { + *out = *in + if in.FrameOption != nil { + in, out := &in.FrameOption, &out.FrameOption + *out = new(string) + **out = **in + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameOptionsParameters. +func (in *FrameOptionsParameters) DeepCopy() *FrameOptionsParameters { + if in == nil { + return nil + } + out := new(FrameOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAssociationInitParameters) DeepCopyInto(out *FunctionAssociationInitParameters) { + *out = *in + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAssociationInitParameters. +func (in *FunctionAssociationInitParameters) DeepCopy() *FunctionAssociationInitParameters { + if in == nil { + return nil + } + out := new(FunctionAssociationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAssociationObservation) DeepCopyInto(out *FunctionAssociationObservation) { + *out = *in + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAssociationObservation. +func (in *FunctionAssociationObservation) DeepCopy() *FunctionAssociationObservation { + if in == nil { + return nil + } + out := new(FunctionAssociationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAssociationParameters) DeepCopyInto(out *FunctionAssociationParameters) { + *out = *in + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAssociationParameters. +func (in *FunctionAssociationParameters) DeepCopy() *FunctionAssociationParameters { + if in == nil { + return nil + } + out := new(FunctionAssociationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoRestrictionInitParameters) DeepCopyInto(out *GeoRestrictionInitParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RestrictionType != nil { + in, out := &in.RestrictionType, &out.RestrictionType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoRestrictionInitParameters. +func (in *GeoRestrictionInitParameters) DeepCopy() *GeoRestrictionInitParameters { + if in == nil { + return nil + } + out := new(GeoRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoRestrictionObservation) DeepCopyInto(out *GeoRestrictionObservation) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RestrictionType != nil { + in, out := &in.RestrictionType, &out.RestrictionType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoRestrictionObservation. +func (in *GeoRestrictionObservation) DeepCopy() *GeoRestrictionObservation { + if in == nil { + return nil + } + out := new(GeoRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoRestrictionParameters) DeepCopyInto(out *GeoRestrictionParameters) { + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RestrictionType != nil { + in, out := &in.RestrictionType, &out.RestrictionType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoRestrictionParameters. +func (in *GeoRestrictionParameters) DeepCopy() *GeoRestrictionParameters { + if in == nil { + return nil + } + out := new(GeoRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersConfigHeadersInitParameters) DeepCopyInto(out *HeadersConfigHeadersInitParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersConfigHeadersInitParameters. +func (in *HeadersConfigHeadersInitParameters) DeepCopy() *HeadersConfigHeadersInitParameters { + if in == nil { + return nil + } + out := new(HeadersConfigHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersConfigHeadersObservation) DeepCopyInto(out *HeadersConfigHeadersObservation) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersConfigHeadersObservation. +func (in *HeadersConfigHeadersObservation) DeepCopy() *HeadersConfigHeadersObservation { + if in == nil { + return nil + } + out := new(HeadersConfigHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersConfigHeadersParameters) DeepCopyInto(out *HeadersConfigHeadersParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersConfigHeadersParameters. +func (in *HeadersConfigHeadersParameters) DeepCopy() *HeadersConfigHeadersParameters { + if in == nil { + return nil + } + out := new(HeadersConfigHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersConfigInitParameters) DeepCopyInto(out *HeadersConfigInitParameters) { + *out = *in + if in.HeaderBehavior != nil { + in, out := &in.HeaderBehavior, &out.HeaderBehavior + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = new(HeadersInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersConfigInitParameters. +func (in *HeadersConfigInitParameters) DeepCopy() *HeadersConfigInitParameters { + if in == nil { + return nil + } + out := new(HeadersConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersConfigObservation) DeepCopyInto(out *HeadersConfigObservation) { + *out = *in + if in.HeaderBehavior != nil { + in, out := &in.HeaderBehavior, &out.HeaderBehavior + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = new(HeadersObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersConfigObservation. +func (in *HeadersConfigObservation) DeepCopy() *HeadersConfigObservation { + if in == nil { + return nil + } + out := new(HeadersConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersConfigParameters) DeepCopyInto(out *HeadersConfigParameters) { + *out = *in + if in.HeaderBehavior != nil { + in, out := &in.HeaderBehavior, &out.HeaderBehavior + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = new(HeadersParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersConfigParameters. +func (in *HeadersConfigParameters) DeepCopy() *HeadersConfigParameters { + if in == nil { + return nil + } + out := new(HeadersConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersInitParameters) DeepCopyInto(out *HeadersInitParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersInitParameters. +func (in *HeadersInitParameters) DeepCopy() *HeadersInitParameters { + if in == nil { + return nil + } + out := new(HeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersObservation) DeepCopyInto(out *HeadersObservation) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersObservation. +func (in *HeadersObservation) DeepCopy() *HeadersObservation { + if in == nil { + return nil + } + out := new(HeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersParameters) DeepCopyInto(out *HeadersParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersParameters. +func (in *HeadersParameters) DeepCopy() *HeadersParameters { + if in == nil { + return nil + } + out := new(HeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ItemsInitParameters) DeepCopyInto(out *ItemsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ItemsInitParameters. +func (in *ItemsInitParameters) DeepCopy() *ItemsInitParameters { + if in == nil { + return nil + } + out := new(ItemsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ItemsObservation) DeepCopyInto(out *ItemsObservation) { + *out = *in + if in.KeyGroupID != nil { + in, out := &in.KeyGroupID, &out.KeyGroupID + *out = new(string) + **out = **in + } + if in.KeyPairIds != nil { + in, out := &in.KeyPairIds, &out.KeyPairIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ItemsObservation. +func (in *ItemsObservation) DeepCopy() *ItemsObservation { + if in == nil { + return nil + } + out := new(ItemsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ItemsParameters) DeepCopyInto(out *ItemsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ItemsParameters. +func (in *ItemsParameters) DeepCopy() *ItemsParameters { + if in == nil { + return nil + } + out := new(ItemsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisStreamConfigInitParameters) DeepCopyInto(out *KinesisStreamConfigInitParameters) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StreamArn != nil { + in, out := &in.StreamArn, &out.StreamArn + *out = new(string) + **out = **in + } + if in.StreamArnRef != nil { + in, out := &in.StreamArnRef, &out.StreamArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamArnSelector != nil { + in, out := &in.StreamArnSelector, &out.StreamArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamConfigInitParameters. +func (in *KinesisStreamConfigInitParameters) DeepCopy() *KinesisStreamConfigInitParameters { + if in == nil { + return nil + } + out := new(KinesisStreamConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisStreamConfigObservation) DeepCopyInto(out *KinesisStreamConfigObservation) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StreamArn != nil { + in, out := &in.StreamArn, &out.StreamArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamConfigObservation. +func (in *KinesisStreamConfigObservation) DeepCopy() *KinesisStreamConfigObservation { + if in == nil { + return nil + } + out := new(KinesisStreamConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisStreamConfigParameters) DeepCopyInto(out *KinesisStreamConfigParameters) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StreamArn != nil { + in, out := &in.StreamArn, &out.StreamArn + *out = new(string) + **out = **in + } + if in.StreamArnRef != nil { + in, out := &in.StreamArnRef, &out.StreamArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamArnSelector != nil { + in, out := &in.StreamArnSelector, &out.StreamArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamConfigParameters. +func (in *KinesisStreamConfigParameters) DeepCopy() *KinesisStreamConfigParameters { + if in == nil { + return nil + } + out := new(KinesisStreamConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaFunctionAssociationInitParameters) DeepCopyInto(out *LambdaFunctionAssociationInitParameters) { + *out = *in + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.IncludeBody != nil { + in, out := &in.IncludeBody, &out.IncludeBody + *out = new(bool) + **out = **in + } + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaFunctionAssociationInitParameters. +func (in *LambdaFunctionAssociationInitParameters) DeepCopy() *LambdaFunctionAssociationInitParameters { + if in == nil { + return nil + } + out := new(LambdaFunctionAssociationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaFunctionAssociationObservation) DeepCopyInto(out *LambdaFunctionAssociationObservation) { + *out = *in + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.IncludeBody != nil { + in, out := &in.IncludeBody, &out.IncludeBody + *out = new(bool) + **out = **in + } + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaFunctionAssociationObservation. +func (in *LambdaFunctionAssociationObservation) DeepCopy() *LambdaFunctionAssociationObservation { + if in == nil { + return nil + } + out := new(LambdaFunctionAssociationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaFunctionAssociationParameters) DeepCopyInto(out *LambdaFunctionAssociationParameters) { + *out = *in + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.IncludeBody != nil { + in, out := &in.IncludeBody, &out.IncludeBody + *out = new(bool) + **out = **in + } + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaFunctionAssociationParameters. +func (in *LambdaFunctionAssociationParameters) DeepCopy() *LambdaFunctionAssociationParameters { + if in == nil { + return nil + } + out := new(LambdaFunctionAssociationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigInitParameters) DeepCopyInto(out *LoggingConfigInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.IncludeCookies != nil { + in, out := &in.IncludeCookies, &out.IncludeCookies + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigInitParameters. +func (in *LoggingConfigInitParameters) DeepCopy() *LoggingConfigInitParameters { + if in == nil { + return nil + } + out := new(LoggingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigObservation) DeepCopyInto(out *LoggingConfigObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.IncludeCookies != nil { + in, out := &in.IncludeCookies, &out.IncludeCookies + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigObservation. +func (in *LoggingConfigObservation) DeepCopy() *LoggingConfigObservation { + if in == nil { + return nil + } + out := new(LoggingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigParameters) DeepCopyInto(out *LoggingConfigParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.IncludeCookies != nil { + in, out := &in.IncludeCookies, &out.IncludeCookies + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigParameters. +func (in *LoggingConfigParameters) DeepCopy() *LoggingConfigParameters { + if in == nil { + return nil + } + out := new(LoggingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemberInitParameters) DeepCopyInto(out *MemberInitParameters) { + *out = *in + if in.OriginID != nil { + in, out := &in.OriginID, &out.OriginID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemberInitParameters. +func (in *MemberInitParameters) DeepCopy() *MemberInitParameters { + if in == nil { + return nil + } + out := new(MemberInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemberObservation) DeepCopyInto(out *MemberObservation) { + *out = *in + if in.OriginID != nil { + in, out := &in.OriginID, &out.OriginID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemberObservation. +func (in *MemberObservation) DeepCopy() *MemberObservation { + if in == nil { + return nil + } + out := new(MemberObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemberParameters) DeepCopyInto(out *MemberParameters) { + *out = *in + if in.OriginID != nil { + in, out := &in.OriginID, &out.OriginID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemberParameters. +func (in *MemberParameters) DeepCopy() *MemberParameters { + if in == nil { + return nil + } + out := new(MemberParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringSubscription) DeepCopyInto(out *MonitoringSubscription) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSubscription. +func (in *MonitoringSubscription) DeepCopy() *MonitoringSubscription { + if in == nil { + return nil + } + out := new(MonitoringSubscription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitoringSubscription) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringSubscriptionInitParameters) DeepCopyInto(out *MonitoringSubscriptionInitParameters) { + *out = *in + if in.DistributionID != nil { + in, out := &in.DistributionID, &out.DistributionID + *out = new(string) + **out = **in + } + if in.DistributionIDRef != nil { + in, out := &in.DistributionIDRef, &out.DistributionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DistributionIDSelector != nil { + in, out := &in.DistributionIDSelector, &out.DistributionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MonitoringSubscription != nil { + in, out := &in.MonitoringSubscription, &out.MonitoringSubscription + *out = new(MonitoringSubscriptionMonitoringSubscriptionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSubscriptionInitParameters. +func (in *MonitoringSubscriptionInitParameters) DeepCopy() *MonitoringSubscriptionInitParameters { + if in == nil { + return nil + } + out := new(MonitoringSubscriptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringSubscriptionList) DeepCopyInto(out *MonitoringSubscriptionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MonitoringSubscription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSubscriptionList. +func (in *MonitoringSubscriptionList) DeepCopy() *MonitoringSubscriptionList { + if in == nil { + return nil + } + out := new(MonitoringSubscriptionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitoringSubscriptionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringSubscriptionMonitoringSubscriptionInitParameters) DeepCopyInto(out *MonitoringSubscriptionMonitoringSubscriptionInitParameters) { + *out = *in + if in.RealtimeMetricsSubscriptionConfig != nil { + in, out := &in.RealtimeMetricsSubscriptionConfig, &out.RealtimeMetricsSubscriptionConfig + *out = new(RealtimeMetricsSubscriptionConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSubscriptionMonitoringSubscriptionInitParameters. +func (in *MonitoringSubscriptionMonitoringSubscriptionInitParameters) DeepCopy() *MonitoringSubscriptionMonitoringSubscriptionInitParameters { + if in == nil { + return nil + } + out := new(MonitoringSubscriptionMonitoringSubscriptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringSubscriptionMonitoringSubscriptionObservation) DeepCopyInto(out *MonitoringSubscriptionMonitoringSubscriptionObservation) { + *out = *in + if in.RealtimeMetricsSubscriptionConfig != nil { + in, out := &in.RealtimeMetricsSubscriptionConfig, &out.RealtimeMetricsSubscriptionConfig + *out = new(RealtimeMetricsSubscriptionConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSubscriptionMonitoringSubscriptionObservation. +func (in *MonitoringSubscriptionMonitoringSubscriptionObservation) DeepCopy() *MonitoringSubscriptionMonitoringSubscriptionObservation { + if in == nil { + return nil + } + out := new(MonitoringSubscriptionMonitoringSubscriptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringSubscriptionMonitoringSubscriptionParameters) DeepCopyInto(out *MonitoringSubscriptionMonitoringSubscriptionParameters) { + *out = *in + if in.RealtimeMetricsSubscriptionConfig != nil { + in, out := &in.RealtimeMetricsSubscriptionConfig, &out.RealtimeMetricsSubscriptionConfig + *out = new(RealtimeMetricsSubscriptionConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSubscriptionMonitoringSubscriptionParameters. +func (in *MonitoringSubscriptionMonitoringSubscriptionParameters) DeepCopy() *MonitoringSubscriptionMonitoringSubscriptionParameters { + if in == nil { + return nil + } + out := new(MonitoringSubscriptionMonitoringSubscriptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringSubscriptionObservation) DeepCopyInto(out *MonitoringSubscriptionObservation) { + *out = *in + if in.DistributionID != nil { + in, out := &in.DistributionID, &out.DistributionID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MonitoringSubscription != nil { + in, out := &in.MonitoringSubscription, &out.MonitoringSubscription + *out = new(MonitoringSubscriptionMonitoringSubscriptionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSubscriptionObservation. +func (in *MonitoringSubscriptionObservation) DeepCopy() *MonitoringSubscriptionObservation { + if in == nil { + return nil + } + out := new(MonitoringSubscriptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringSubscriptionParameters) DeepCopyInto(out *MonitoringSubscriptionParameters) { + *out = *in + if in.DistributionID != nil { + in, out := &in.DistributionID, &out.DistributionID + *out = new(string) + **out = **in + } + if in.DistributionIDRef != nil { + in, out := &in.DistributionIDRef, &out.DistributionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DistributionIDSelector != nil { + in, out := &in.DistributionIDSelector, &out.DistributionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MonitoringSubscription != nil { + in, out := &in.MonitoringSubscription, &out.MonitoringSubscription + *out = new(MonitoringSubscriptionMonitoringSubscriptionParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSubscriptionParameters. +func (in *MonitoringSubscriptionParameters) DeepCopy() *MonitoringSubscriptionParameters { + if in == nil { + return nil + } + out := new(MonitoringSubscriptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringSubscriptionSpec) DeepCopyInto(out *MonitoringSubscriptionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSubscriptionSpec. +func (in *MonitoringSubscriptionSpec) DeepCopy() *MonitoringSubscriptionSpec { + if in == nil { + return nil + } + out := new(MonitoringSubscriptionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringSubscriptionStatus) DeepCopyInto(out *MonitoringSubscriptionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSubscriptionStatus. +func (in *MonitoringSubscriptionStatus) DeepCopy() *MonitoringSubscriptionStatus { + if in == nil { + return nil + } + out := new(MonitoringSubscriptionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedCacheBehaviorForwardedValuesCookiesInitParameters) DeepCopyInto(out *OrderedCacheBehaviorForwardedValuesCookiesInitParameters) { + *out = *in + if in.Forward != nil { + in, out := &in.Forward, &out.Forward + *out = new(string) + **out = **in + } + if in.WhitelistedNames != nil { + in, out := &in.WhitelistedNames, &out.WhitelistedNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedCacheBehaviorForwardedValuesCookiesInitParameters. +func (in *OrderedCacheBehaviorForwardedValuesCookiesInitParameters) DeepCopy() *OrderedCacheBehaviorForwardedValuesCookiesInitParameters { + if in == nil { + return nil + } + out := new(OrderedCacheBehaviorForwardedValuesCookiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedCacheBehaviorForwardedValuesCookiesObservation) DeepCopyInto(out *OrderedCacheBehaviorForwardedValuesCookiesObservation) { + *out = *in + if in.Forward != nil { + in, out := &in.Forward, &out.Forward + *out = new(string) + **out = **in + } + if in.WhitelistedNames != nil { + in, out := &in.WhitelistedNames, &out.WhitelistedNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedCacheBehaviorForwardedValuesCookiesObservation. +func (in *OrderedCacheBehaviorForwardedValuesCookiesObservation) DeepCopy() *OrderedCacheBehaviorForwardedValuesCookiesObservation { + if in == nil { + return nil + } + out := new(OrderedCacheBehaviorForwardedValuesCookiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedCacheBehaviorForwardedValuesCookiesParameters) DeepCopyInto(out *OrderedCacheBehaviorForwardedValuesCookiesParameters) { + *out = *in + if in.Forward != nil { + in, out := &in.Forward, &out.Forward + *out = new(string) + **out = **in + } + if in.WhitelistedNames != nil { + in, out := &in.WhitelistedNames, &out.WhitelistedNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedCacheBehaviorForwardedValuesCookiesParameters. +func (in *OrderedCacheBehaviorForwardedValuesCookiesParameters) DeepCopy() *OrderedCacheBehaviorForwardedValuesCookiesParameters { + if in == nil { + return nil + } + out := new(OrderedCacheBehaviorForwardedValuesCookiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedCacheBehaviorForwardedValuesInitParameters) DeepCopyInto(out *OrderedCacheBehaviorForwardedValuesInitParameters) { + *out = *in + if in.Cookies != nil { + in, out := &in.Cookies, &out.Cookies + *out = new(OrderedCacheBehaviorForwardedValuesCookiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(bool) + **out = **in + } + if in.QueryStringCacheKeys != nil { + in, out := &in.QueryStringCacheKeys, &out.QueryStringCacheKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedCacheBehaviorForwardedValuesInitParameters. +func (in *OrderedCacheBehaviorForwardedValuesInitParameters) DeepCopy() *OrderedCacheBehaviorForwardedValuesInitParameters { + if in == nil { + return nil + } + out := new(OrderedCacheBehaviorForwardedValuesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedCacheBehaviorForwardedValuesObservation) DeepCopyInto(out *OrderedCacheBehaviorForwardedValuesObservation) { + *out = *in + if in.Cookies != nil { + in, out := &in.Cookies, &out.Cookies + *out = new(OrderedCacheBehaviorForwardedValuesCookiesObservation) + (*in).DeepCopyInto(*out) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(bool) + **out = **in + } + if in.QueryStringCacheKeys != nil { + in, out := &in.QueryStringCacheKeys, &out.QueryStringCacheKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedCacheBehaviorForwardedValuesObservation. +func (in *OrderedCacheBehaviorForwardedValuesObservation) DeepCopy() *OrderedCacheBehaviorForwardedValuesObservation { + if in == nil { + return nil + } + out := new(OrderedCacheBehaviorForwardedValuesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedCacheBehaviorForwardedValuesParameters) DeepCopyInto(out *OrderedCacheBehaviorForwardedValuesParameters) { + *out = *in + if in.Cookies != nil { + in, out := &in.Cookies, &out.Cookies + *out = new(OrderedCacheBehaviorForwardedValuesCookiesParameters) + (*in).DeepCopyInto(*out) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(bool) + **out = **in + } + if in.QueryStringCacheKeys != nil { + in, out := &in.QueryStringCacheKeys, &out.QueryStringCacheKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedCacheBehaviorForwardedValuesParameters. +func (in *OrderedCacheBehaviorForwardedValuesParameters) DeepCopy() *OrderedCacheBehaviorForwardedValuesParameters { + if in == nil { + return nil + } + out := new(OrderedCacheBehaviorForwardedValuesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedCacheBehaviorFunctionAssociationInitParameters) DeepCopyInto(out *OrderedCacheBehaviorFunctionAssociationInitParameters) { + *out = *in + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } + if in.FunctionArnRef != nil { + in, out := &in.FunctionArnRef, &out.FunctionArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FunctionArnSelector != nil { + in, out := &in.FunctionArnSelector, &out.FunctionArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedCacheBehaviorFunctionAssociationInitParameters. +func (in *OrderedCacheBehaviorFunctionAssociationInitParameters) DeepCopy() *OrderedCacheBehaviorFunctionAssociationInitParameters { + if in == nil { + return nil + } + out := new(OrderedCacheBehaviorFunctionAssociationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedCacheBehaviorFunctionAssociationObservation) DeepCopyInto(out *OrderedCacheBehaviorFunctionAssociationObservation) { + *out = *in + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedCacheBehaviorFunctionAssociationObservation. +func (in *OrderedCacheBehaviorFunctionAssociationObservation) DeepCopy() *OrderedCacheBehaviorFunctionAssociationObservation { + if in == nil { + return nil + } + out := new(OrderedCacheBehaviorFunctionAssociationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedCacheBehaviorFunctionAssociationParameters) DeepCopyInto(out *OrderedCacheBehaviorFunctionAssociationParameters) { + *out = *in + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } + if in.FunctionArnRef != nil { + in, out := &in.FunctionArnRef, &out.FunctionArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FunctionArnSelector != nil { + in, out := &in.FunctionArnSelector, &out.FunctionArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedCacheBehaviorFunctionAssociationParameters. +func (in *OrderedCacheBehaviorFunctionAssociationParameters) DeepCopy() *OrderedCacheBehaviorFunctionAssociationParameters { + if in == nil { + return nil + } + out := new(OrderedCacheBehaviorFunctionAssociationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedCacheBehaviorInitParameters) DeepCopyInto(out *OrderedCacheBehaviorInitParameters) { + *out = *in + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CachePolicyID != nil { + in, out := &in.CachePolicyID, &out.CachePolicyID + *out = new(string) + **out = **in + } + if in.CachedMethods != nil { + in, out := &in.CachedMethods, &out.CachedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Compress != nil { + in, out := &in.Compress, &out.Compress + *out = new(bool) + **out = **in + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.FieldLevelEncryptionID != nil { + in, out := &in.FieldLevelEncryptionID, &out.FieldLevelEncryptionID + *out = new(string) + **out = **in + } + if in.ForwardedValues != nil { + in, out := &in.ForwardedValues, &out.ForwardedValues + *out = new(OrderedCacheBehaviorForwardedValuesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FunctionAssociation != nil { + in, out := &in.FunctionAssociation, &out.FunctionAssociation + *out = make([]OrderedCacheBehaviorFunctionAssociationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LambdaFunctionAssociation != nil { + in, out := &in.LambdaFunctionAssociation, &out.LambdaFunctionAssociation + *out = make([]OrderedCacheBehaviorLambdaFunctionAssociationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxTTL != nil { + in, out := &in.MaxTTL, &out.MaxTTL + *out = new(float64) + **out = **in + } + if in.MinTTL != nil { + in, out := &in.MinTTL, &out.MinTTL + *out = new(float64) + **out = **in + } + if in.OriginRequestPolicyID != nil { + in, out := &in.OriginRequestPolicyID, &out.OriginRequestPolicyID + *out = new(string) + **out = **in + } + if in.PathPattern != nil { + in, out := &in.PathPattern, &out.PathPattern + *out = new(string) + **out = **in + } + if in.RealtimeLogConfigArn != nil { + in, out := &in.RealtimeLogConfigArn, &out.RealtimeLogConfigArn + *out = new(string) + **out = **in + } + if in.ResponseHeadersPolicyID != nil { + in, out := &in.ResponseHeadersPolicyID, &out.ResponseHeadersPolicyID + *out = new(string) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } + if in.TargetOriginID != nil { + in, out := &in.TargetOriginID, &out.TargetOriginID + *out = new(string) + **out = **in + } + if in.TrustedKeyGroups != nil { + in, out := &in.TrustedKeyGroups, &out.TrustedKeyGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TrustedSigners != nil { + in, out := &in.TrustedSigners, &out.TrustedSigners + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ViewerProtocolPolicy != nil { + in, out := &in.ViewerProtocolPolicy, &out.ViewerProtocolPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedCacheBehaviorInitParameters. +func (in *OrderedCacheBehaviorInitParameters) DeepCopy() *OrderedCacheBehaviorInitParameters { + if in == nil { + return nil + } + out := new(OrderedCacheBehaviorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedCacheBehaviorLambdaFunctionAssociationInitParameters) DeepCopyInto(out *OrderedCacheBehaviorLambdaFunctionAssociationInitParameters) { + *out = *in + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.IncludeBody != nil { + in, out := &in.IncludeBody, &out.IncludeBody + *out = new(bool) + **out = **in + } + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } + if in.LambdaArnRef != nil { + in, out := &in.LambdaArnRef, &out.LambdaArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LambdaArnSelector != nil { + in, out := &in.LambdaArnSelector, &out.LambdaArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedCacheBehaviorLambdaFunctionAssociationInitParameters. +func (in *OrderedCacheBehaviorLambdaFunctionAssociationInitParameters) DeepCopy() *OrderedCacheBehaviorLambdaFunctionAssociationInitParameters { + if in == nil { + return nil + } + out := new(OrderedCacheBehaviorLambdaFunctionAssociationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedCacheBehaviorLambdaFunctionAssociationObservation) DeepCopyInto(out *OrderedCacheBehaviorLambdaFunctionAssociationObservation) { + *out = *in + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.IncludeBody != nil { + in, out := &in.IncludeBody, &out.IncludeBody + *out = new(bool) + **out = **in + } + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedCacheBehaviorLambdaFunctionAssociationObservation. +func (in *OrderedCacheBehaviorLambdaFunctionAssociationObservation) DeepCopy() *OrderedCacheBehaviorLambdaFunctionAssociationObservation { + if in == nil { + return nil + } + out := new(OrderedCacheBehaviorLambdaFunctionAssociationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedCacheBehaviorLambdaFunctionAssociationParameters) DeepCopyInto(out *OrderedCacheBehaviorLambdaFunctionAssociationParameters) { + *out = *in + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.IncludeBody != nil { + in, out := &in.IncludeBody, &out.IncludeBody + *out = new(bool) + **out = **in + } + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } + if in.LambdaArnRef != nil { + in, out := &in.LambdaArnRef, &out.LambdaArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LambdaArnSelector != nil { + in, out := &in.LambdaArnSelector, &out.LambdaArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedCacheBehaviorLambdaFunctionAssociationParameters. +func (in *OrderedCacheBehaviorLambdaFunctionAssociationParameters) DeepCopy() *OrderedCacheBehaviorLambdaFunctionAssociationParameters { + if in == nil { + return nil + } + out := new(OrderedCacheBehaviorLambdaFunctionAssociationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedCacheBehaviorObservation) DeepCopyInto(out *OrderedCacheBehaviorObservation) { + *out = *in + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CachePolicyID != nil { + in, out := &in.CachePolicyID, &out.CachePolicyID + *out = new(string) + **out = **in + } + if in.CachedMethods != nil { + in, out := &in.CachedMethods, &out.CachedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Compress != nil { + in, out := &in.Compress, &out.Compress + *out = new(bool) + **out = **in + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.FieldLevelEncryptionID != nil { + in, out := &in.FieldLevelEncryptionID, &out.FieldLevelEncryptionID + *out = new(string) + **out = **in + } + if in.ForwardedValues != nil { + in, out := &in.ForwardedValues, &out.ForwardedValues + *out = new(OrderedCacheBehaviorForwardedValuesObservation) + (*in).DeepCopyInto(*out) + } + if in.FunctionAssociation != nil { + in, out := &in.FunctionAssociation, &out.FunctionAssociation + *out = make([]OrderedCacheBehaviorFunctionAssociationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LambdaFunctionAssociation != nil { + in, out := &in.LambdaFunctionAssociation, &out.LambdaFunctionAssociation + *out = make([]OrderedCacheBehaviorLambdaFunctionAssociationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxTTL != nil { + in, out := &in.MaxTTL, &out.MaxTTL + *out = new(float64) + **out = **in + } + if in.MinTTL != nil { + in, out := &in.MinTTL, &out.MinTTL + *out = new(float64) + **out = **in + } + if in.OriginRequestPolicyID != nil { + in, out := &in.OriginRequestPolicyID, &out.OriginRequestPolicyID + *out = new(string) + **out = **in + } + if in.PathPattern != nil { + in, out := &in.PathPattern, &out.PathPattern + *out = new(string) + **out = **in + } + if in.RealtimeLogConfigArn != nil { + in, out := &in.RealtimeLogConfigArn, &out.RealtimeLogConfigArn + *out = new(string) + **out = **in + } + if in.ResponseHeadersPolicyID != nil { + in, out := &in.ResponseHeadersPolicyID, &out.ResponseHeadersPolicyID + *out = new(string) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } + if in.TargetOriginID != nil { + in, out := &in.TargetOriginID, &out.TargetOriginID + *out = new(string) + **out = **in + } + if in.TrustedKeyGroups != nil { + in, out := &in.TrustedKeyGroups, &out.TrustedKeyGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TrustedSigners != nil { + in, out := &in.TrustedSigners, &out.TrustedSigners + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ViewerProtocolPolicy != nil { + in, out := &in.ViewerProtocolPolicy, &out.ViewerProtocolPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedCacheBehaviorObservation. +func (in *OrderedCacheBehaviorObservation) DeepCopy() *OrderedCacheBehaviorObservation { + if in == nil { + return nil + } + out := new(OrderedCacheBehaviorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedCacheBehaviorParameters) DeepCopyInto(out *OrderedCacheBehaviorParameters) { + *out = *in + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CachePolicyID != nil { + in, out := &in.CachePolicyID, &out.CachePolicyID + *out = new(string) + **out = **in + } + if in.CachedMethods != nil { + in, out := &in.CachedMethods, &out.CachedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Compress != nil { + in, out := &in.Compress, &out.Compress + *out = new(bool) + **out = **in + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.FieldLevelEncryptionID != nil { + in, out := &in.FieldLevelEncryptionID, &out.FieldLevelEncryptionID + *out = new(string) + **out = **in + } + if in.ForwardedValues != nil { + in, out := &in.ForwardedValues, &out.ForwardedValues + *out = new(OrderedCacheBehaviorForwardedValuesParameters) + (*in).DeepCopyInto(*out) + } + if in.FunctionAssociation != nil { + in, out := &in.FunctionAssociation, &out.FunctionAssociation + *out = make([]OrderedCacheBehaviorFunctionAssociationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LambdaFunctionAssociation != nil { + in, out := &in.LambdaFunctionAssociation, &out.LambdaFunctionAssociation + *out = make([]OrderedCacheBehaviorLambdaFunctionAssociationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxTTL != nil { + in, out := &in.MaxTTL, &out.MaxTTL + *out = new(float64) + **out = **in + } + if in.MinTTL != nil { + in, out := &in.MinTTL, &out.MinTTL + *out = new(float64) + **out = **in + } + if in.OriginRequestPolicyID != nil { + in, out := &in.OriginRequestPolicyID, &out.OriginRequestPolicyID + *out = new(string) + **out = **in + } + if in.PathPattern != nil { + in, out := &in.PathPattern, &out.PathPattern + *out = new(string) + **out = **in + } + if in.RealtimeLogConfigArn != nil { + in, out := &in.RealtimeLogConfigArn, &out.RealtimeLogConfigArn + *out = new(string) + **out = **in + } + if in.ResponseHeadersPolicyID != nil { + in, out := &in.ResponseHeadersPolicyID, &out.ResponseHeadersPolicyID + *out = new(string) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } + if in.TargetOriginID != nil { + in, out := &in.TargetOriginID, &out.TargetOriginID + *out = new(string) + **out = **in + } + if in.TrustedKeyGroups != nil { + in, out := &in.TrustedKeyGroups, &out.TrustedKeyGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TrustedSigners != nil { + in, out := &in.TrustedSigners, &out.TrustedSigners + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ViewerProtocolPolicy != nil { + in, out := &in.ViewerProtocolPolicy, &out.ViewerProtocolPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedCacheBehaviorParameters. +func (in *OrderedCacheBehaviorParameters) DeepCopy() *OrderedCacheBehaviorParameters { + if in == nil { + return nil + } + out := new(OrderedCacheBehaviorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginGroupInitParameters) DeepCopyInto(out *OriginGroupInitParameters) { + *out = *in + if in.FailoverCriteria != nil { + in, out := &in.FailoverCriteria, &out.FailoverCriteria + *out = new(FailoverCriteriaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = make([]MemberInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginID != nil { + in, out := &in.OriginID, &out.OriginID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginGroupInitParameters. +func (in *OriginGroupInitParameters) DeepCopy() *OriginGroupInitParameters { + if in == nil { + return nil + } + out := new(OriginGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginGroupObservation) DeepCopyInto(out *OriginGroupObservation) { + *out = *in + if in.FailoverCriteria != nil { + in, out := &in.FailoverCriteria, &out.FailoverCriteria + *out = new(FailoverCriteriaObservation) + (*in).DeepCopyInto(*out) + } + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = make([]MemberObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginID != nil { + in, out := &in.OriginID, &out.OriginID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginGroupObservation. +func (in *OriginGroupObservation) DeepCopy() *OriginGroupObservation { + if in == nil { + return nil + } + out := new(OriginGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginGroupParameters) DeepCopyInto(out *OriginGroupParameters) { + *out = *in + if in.FailoverCriteria != nil { + in, out := &in.FailoverCriteria, &out.FailoverCriteria + *out = new(FailoverCriteriaParameters) + (*in).DeepCopyInto(*out) + } + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = make([]MemberParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginID != nil { + in, out := &in.OriginID, &out.OriginID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginGroupParameters. +func (in *OriginGroupParameters) DeepCopy() *OriginGroupParameters { + if in == nil { + return nil + } + out := new(OriginGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginInitParameters) DeepCopyInto(out *OriginInitParameters) { + *out = *in + if in.ConnectionAttempts != nil { + in, out := &in.ConnectionAttempts, &out.ConnectionAttempts + *out = new(float64) + **out = **in + } + if in.ConnectionTimeout != nil { + in, out := &in.ConnectionTimeout, &out.ConnectionTimeout + *out = new(float64) + **out = **in + } + if in.CustomHeader != nil { + in, out := &in.CustomHeader, &out.CustomHeader + *out = make([]CustomHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomOriginConfig != nil { + in, out := &in.CustomOriginConfig, &out.CustomOriginConfig + *out = new(CustomOriginConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.OriginAccessControlID != nil { + in, out := &in.OriginAccessControlID, &out.OriginAccessControlID + *out = new(string) + **out = **in + } + if in.OriginAccessControlIDRef != nil { + in, out := &in.OriginAccessControlIDRef, &out.OriginAccessControlIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.OriginAccessControlIDSelector != nil { + in, out := &in.OriginAccessControlIDSelector, &out.OriginAccessControlIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OriginID != nil { + in, out := &in.OriginID, &out.OriginID + *out = new(string) + **out = **in + } + if in.OriginPath != nil { + in, out := &in.OriginPath, &out.OriginPath + *out = new(string) + **out = **in + } + if in.OriginShield != nil { + in, out := &in.OriginShield, &out.OriginShield + *out = new(OriginShieldInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3OriginConfig != nil { + in, out := &in.S3OriginConfig, &out.S3OriginConfig + *out = new(S3OriginConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginInitParameters. +func (in *OriginInitParameters) DeepCopy() *OriginInitParameters { + if in == nil { + return nil + } + out := new(OriginInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginObservation) DeepCopyInto(out *OriginObservation) { + *out = *in + if in.ConnectionAttempts != nil { + in, out := &in.ConnectionAttempts, &out.ConnectionAttempts + *out = new(float64) + **out = **in + } + if in.ConnectionTimeout != nil { + in, out := &in.ConnectionTimeout, &out.ConnectionTimeout + *out = new(float64) + **out = **in + } + if in.CustomHeader != nil { + in, out := &in.CustomHeader, &out.CustomHeader + *out = make([]CustomHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomOriginConfig != nil { + in, out := &in.CustomOriginConfig, &out.CustomOriginConfig + *out = new(CustomOriginConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.OriginAccessControlID != nil { + in, out := &in.OriginAccessControlID, &out.OriginAccessControlID + *out = new(string) + **out = **in + } + if in.OriginID != nil { + in, out := &in.OriginID, &out.OriginID + *out = new(string) + **out = **in + } + if in.OriginPath != nil { + in, out := &in.OriginPath, &out.OriginPath + *out = new(string) + **out = **in + } + if in.OriginShield != nil { + in, out := &in.OriginShield, &out.OriginShield + *out = new(OriginShieldObservation) + (*in).DeepCopyInto(*out) + } + if in.S3OriginConfig != nil { + in, out := &in.S3OriginConfig, &out.S3OriginConfig + *out = new(S3OriginConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginObservation. +func (in *OriginObservation) DeepCopy() *OriginObservation { + if in == nil { + return nil + } + out := new(OriginObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginParameters) DeepCopyInto(out *OriginParameters) { + *out = *in + if in.ConnectionAttempts != nil { + in, out := &in.ConnectionAttempts, &out.ConnectionAttempts + *out = new(float64) + **out = **in + } + if in.ConnectionTimeout != nil { + in, out := &in.ConnectionTimeout, &out.ConnectionTimeout + *out = new(float64) + **out = **in + } + if in.CustomHeader != nil { + in, out := &in.CustomHeader, &out.CustomHeader + *out = make([]CustomHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomOriginConfig != nil { + in, out := &in.CustomOriginConfig, &out.CustomOriginConfig + *out = new(CustomOriginConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.OriginAccessControlID != nil { + in, out := &in.OriginAccessControlID, &out.OriginAccessControlID + *out = new(string) + **out = **in + } + if in.OriginAccessControlIDRef != nil { + in, out := &in.OriginAccessControlIDRef, &out.OriginAccessControlIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.OriginAccessControlIDSelector != nil { + in, out := &in.OriginAccessControlIDSelector, &out.OriginAccessControlIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OriginID != nil { + in, out := &in.OriginID, &out.OriginID + *out = new(string) + **out = **in + } + if in.OriginPath != nil { + in, out := &in.OriginPath, &out.OriginPath + *out = new(string) + **out = **in + } + if in.OriginShield != nil { + in, out := &in.OriginShield, &out.OriginShield + *out = new(OriginShieldParameters) + (*in).DeepCopyInto(*out) + } + if in.S3OriginConfig != nil { + in, out := &in.S3OriginConfig, &out.S3OriginConfig + *out = new(S3OriginConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginParameters. +func (in *OriginParameters) DeepCopy() *OriginParameters { + if in == nil { + return nil + } + out := new(OriginParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginRequestPolicy) DeepCopyInto(out *OriginRequestPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginRequestPolicy. +func (in *OriginRequestPolicy) DeepCopy() *OriginRequestPolicy { + if in == nil { + return nil + } + out := new(OriginRequestPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OriginRequestPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginRequestPolicyCookiesConfigInitParameters) DeepCopyInto(out *OriginRequestPolicyCookiesConfigInitParameters) { + *out = *in + if in.CookieBehavior != nil { + in, out := &in.CookieBehavior, &out.CookieBehavior + *out = new(string) + **out = **in + } + if in.Cookies != nil { + in, out := &in.Cookies, &out.Cookies + *out = new(CookiesConfigCookiesInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginRequestPolicyCookiesConfigInitParameters. +func (in *OriginRequestPolicyCookiesConfigInitParameters) DeepCopy() *OriginRequestPolicyCookiesConfigInitParameters { + if in == nil { + return nil + } + out := new(OriginRequestPolicyCookiesConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginRequestPolicyCookiesConfigObservation) DeepCopyInto(out *OriginRequestPolicyCookiesConfigObservation) { + *out = *in + if in.CookieBehavior != nil { + in, out := &in.CookieBehavior, &out.CookieBehavior + *out = new(string) + **out = **in + } + if in.Cookies != nil { + in, out := &in.Cookies, &out.Cookies + *out = new(CookiesConfigCookiesObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginRequestPolicyCookiesConfigObservation. +func (in *OriginRequestPolicyCookiesConfigObservation) DeepCopy() *OriginRequestPolicyCookiesConfigObservation { + if in == nil { + return nil + } + out := new(OriginRequestPolicyCookiesConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginRequestPolicyCookiesConfigParameters) DeepCopyInto(out *OriginRequestPolicyCookiesConfigParameters) { + *out = *in + if in.CookieBehavior != nil { + in, out := &in.CookieBehavior, &out.CookieBehavior + *out = new(string) + **out = **in + } + if in.Cookies != nil { + in, out := &in.Cookies, &out.Cookies + *out = new(CookiesConfigCookiesParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginRequestPolicyCookiesConfigParameters. +func (in *OriginRequestPolicyCookiesConfigParameters) DeepCopy() *OriginRequestPolicyCookiesConfigParameters { + if in == nil { + return nil + } + out := new(OriginRequestPolicyCookiesConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginRequestPolicyHeadersConfigInitParameters) DeepCopyInto(out *OriginRequestPolicyHeadersConfigInitParameters) { + *out = *in + if in.HeaderBehavior != nil { + in, out := &in.HeaderBehavior, &out.HeaderBehavior + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = new(HeadersConfigHeadersInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginRequestPolicyHeadersConfigInitParameters. +func (in *OriginRequestPolicyHeadersConfigInitParameters) DeepCopy() *OriginRequestPolicyHeadersConfigInitParameters { + if in == nil { + return nil + } + out := new(OriginRequestPolicyHeadersConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginRequestPolicyHeadersConfigObservation) DeepCopyInto(out *OriginRequestPolicyHeadersConfigObservation) { + *out = *in + if in.HeaderBehavior != nil { + in, out := &in.HeaderBehavior, &out.HeaderBehavior + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = new(HeadersConfigHeadersObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginRequestPolicyHeadersConfigObservation. +func (in *OriginRequestPolicyHeadersConfigObservation) DeepCopy() *OriginRequestPolicyHeadersConfigObservation { + if in == nil { + return nil + } + out := new(OriginRequestPolicyHeadersConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginRequestPolicyHeadersConfigParameters) DeepCopyInto(out *OriginRequestPolicyHeadersConfigParameters) { + *out = *in + if in.HeaderBehavior != nil { + in, out := &in.HeaderBehavior, &out.HeaderBehavior + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = new(HeadersConfigHeadersParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginRequestPolicyHeadersConfigParameters. +func (in *OriginRequestPolicyHeadersConfigParameters) DeepCopy() *OriginRequestPolicyHeadersConfigParameters { + if in == nil { + return nil + } + out := new(OriginRequestPolicyHeadersConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginRequestPolicyInitParameters) DeepCopyInto(out *OriginRequestPolicyInitParameters) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.CookiesConfig != nil { + in, out := &in.CookiesConfig, &out.CookiesConfig + *out = new(OriginRequestPolicyCookiesConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersConfig != nil { + in, out := &in.HeadersConfig, &out.HeadersConfig + *out = new(OriginRequestPolicyHeadersConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.QueryStringsConfig != nil { + in, out := &in.QueryStringsConfig, &out.QueryStringsConfig + *out = new(OriginRequestPolicyQueryStringsConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginRequestPolicyInitParameters. +func (in *OriginRequestPolicyInitParameters) DeepCopy() *OriginRequestPolicyInitParameters { + if in == nil { + return nil + } + out := new(OriginRequestPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginRequestPolicyList) DeepCopyInto(out *OriginRequestPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OriginRequestPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginRequestPolicyList. +func (in *OriginRequestPolicyList) DeepCopy() *OriginRequestPolicyList { + if in == nil { + return nil + } + out := new(OriginRequestPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OriginRequestPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginRequestPolicyObservation) DeepCopyInto(out *OriginRequestPolicyObservation) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.CookiesConfig != nil { + in, out := &in.CookiesConfig, &out.CookiesConfig + *out = new(OriginRequestPolicyCookiesConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.HeadersConfig != nil { + in, out := &in.HeadersConfig, &out.HeadersConfig + *out = new(OriginRequestPolicyHeadersConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.QueryStringsConfig != nil { + in, out := &in.QueryStringsConfig, &out.QueryStringsConfig + *out = new(OriginRequestPolicyQueryStringsConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginRequestPolicyObservation. +func (in *OriginRequestPolicyObservation) DeepCopy() *OriginRequestPolicyObservation { + if in == nil { + return nil + } + out := new(OriginRequestPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginRequestPolicyParameters) DeepCopyInto(out *OriginRequestPolicyParameters) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.CookiesConfig != nil { + in, out := &in.CookiesConfig, &out.CookiesConfig + *out = new(OriginRequestPolicyCookiesConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersConfig != nil { + in, out := &in.HeadersConfig, &out.HeadersConfig + *out = new(OriginRequestPolicyHeadersConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.QueryStringsConfig != nil { + in, out := &in.QueryStringsConfig, &out.QueryStringsConfig + *out = new(OriginRequestPolicyQueryStringsConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginRequestPolicyParameters. +func (in *OriginRequestPolicyParameters) DeepCopy() *OriginRequestPolicyParameters { + if in == nil { + return nil + } + out := new(OriginRequestPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginRequestPolicyQueryStringsConfigInitParameters) DeepCopyInto(out *OriginRequestPolicyQueryStringsConfigInitParameters) { + *out = *in + if in.QueryStringBehavior != nil { + in, out := &in.QueryStringBehavior, &out.QueryStringBehavior + *out = new(string) + **out = **in + } + if in.QueryStrings != nil { + in, out := &in.QueryStrings, &out.QueryStrings + *out = new(QueryStringsConfigQueryStringsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginRequestPolicyQueryStringsConfigInitParameters. +func (in *OriginRequestPolicyQueryStringsConfigInitParameters) DeepCopy() *OriginRequestPolicyQueryStringsConfigInitParameters { + if in == nil { + return nil + } + out := new(OriginRequestPolicyQueryStringsConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginRequestPolicyQueryStringsConfigObservation) DeepCopyInto(out *OriginRequestPolicyQueryStringsConfigObservation) { + *out = *in + if in.QueryStringBehavior != nil { + in, out := &in.QueryStringBehavior, &out.QueryStringBehavior + *out = new(string) + **out = **in + } + if in.QueryStrings != nil { + in, out := &in.QueryStrings, &out.QueryStrings + *out = new(QueryStringsConfigQueryStringsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginRequestPolicyQueryStringsConfigObservation. +func (in *OriginRequestPolicyQueryStringsConfigObservation) DeepCopy() *OriginRequestPolicyQueryStringsConfigObservation { + if in == nil { + return nil + } + out := new(OriginRequestPolicyQueryStringsConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginRequestPolicyQueryStringsConfigParameters) DeepCopyInto(out *OriginRequestPolicyQueryStringsConfigParameters) { + *out = *in + if in.QueryStringBehavior != nil { + in, out := &in.QueryStringBehavior, &out.QueryStringBehavior + *out = new(string) + **out = **in + } + if in.QueryStrings != nil { + in, out := &in.QueryStrings, &out.QueryStrings + *out = new(QueryStringsConfigQueryStringsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginRequestPolicyQueryStringsConfigParameters. +func (in *OriginRequestPolicyQueryStringsConfigParameters) DeepCopy() *OriginRequestPolicyQueryStringsConfigParameters { + if in == nil { + return nil + } + out := new(OriginRequestPolicyQueryStringsConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginRequestPolicySpec) DeepCopyInto(out *OriginRequestPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginRequestPolicySpec. +func (in *OriginRequestPolicySpec) DeepCopy() *OriginRequestPolicySpec { + if in == nil { + return nil + } + out := new(OriginRequestPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginRequestPolicyStatus) DeepCopyInto(out *OriginRequestPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginRequestPolicyStatus. +func (in *OriginRequestPolicyStatus) DeepCopy() *OriginRequestPolicyStatus { + if in == nil { + return nil + } + out := new(OriginRequestPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginShieldInitParameters) DeepCopyInto(out *OriginShieldInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.OriginShieldRegion != nil { + in, out := &in.OriginShieldRegion, &out.OriginShieldRegion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginShieldInitParameters. +func (in *OriginShieldInitParameters) DeepCopy() *OriginShieldInitParameters { + if in == nil { + return nil + } + out := new(OriginShieldInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginShieldObservation) DeepCopyInto(out *OriginShieldObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.OriginShieldRegion != nil { + in, out := &in.OriginShieldRegion, &out.OriginShieldRegion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginShieldObservation. +func (in *OriginShieldObservation) DeepCopy() *OriginShieldObservation { + if in == nil { + return nil + } + out := new(OriginShieldObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginShieldParameters) DeepCopyInto(out *OriginShieldParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.OriginShieldRegion != nil { + in, out := &in.OriginShieldRegion, &out.OriginShieldRegion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginShieldParameters. +func (in *OriginShieldParameters) DeepCopy() *OriginShieldParameters { + if in == nil { + return nil + } + out := new(OriginShieldParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersInCacheKeyAndForwardedToOriginInitParameters) DeepCopyInto(out *ParametersInCacheKeyAndForwardedToOriginInitParameters) { + *out = *in + if in.CookiesConfig != nil { + in, out := &in.CookiesConfig, &out.CookiesConfig + *out = new(CookiesConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EnableAcceptEncodingBrotli != nil { + in, out := &in.EnableAcceptEncodingBrotli, &out.EnableAcceptEncodingBrotli + *out = new(bool) + **out = **in + } + if in.EnableAcceptEncodingGzip != nil { + in, out := &in.EnableAcceptEncodingGzip, &out.EnableAcceptEncodingGzip + *out = new(bool) + **out = **in + } + if in.HeadersConfig != nil { + in, out := &in.HeadersConfig, &out.HeadersConfig + *out = new(HeadersConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.QueryStringsConfig != nil { + in, out := &in.QueryStringsConfig, &out.QueryStringsConfig + *out = new(QueryStringsConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersInCacheKeyAndForwardedToOriginInitParameters. +func (in *ParametersInCacheKeyAndForwardedToOriginInitParameters) DeepCopy() *ParametersInCacheKeyAndForwardedToOriginInitParameters { + if in == nil { + return nil + } + out := new(ParametersInCacheKeyAndForwardedToOriginInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersInCacheKeyAndForwardedToOriginObservation) DeepCopyInto(out *ParametersInCacheKeyAndForwardedToOriginObservation) { + *out = *in + if in.CookiesConfig != nil { + in, out := &in.CookiesConfig, &out.CookiesConfig + *out = new(CookiesConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.EnableAcceptEncodingBrotli != nil { + in, out := &in.EnableAcceptEncodingBrotli, &out.EnableAcceptEncodingBrotli + *out = new(bool) + **out = **in + } + if in.EnableAcceptEncodingGzip != nil { + in, out := &in.EnableAcceptEncodingGzip, &out.EnableAcceptEncodingGzip + *out = new(bool) + **out = **in + } + if in.HeadersConfig != nil { + in, out := &in.HeadersConfig, &out.HeadersConfig + *out = new(HeadersConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.QueryStringsConfig != nil { + in, out := &in.QueryStringsConfig, &out.QueryStringsConfig + *out = new(QueryStringsConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersInCacheKeyAndForwardedToOriginObservation. +func (in *ParametersInCacheKeyAndForwardedToOriginObservation) DeepCopy() *ParametersInCacheKeyAndForwardedToOriginObservation { + if in == nil { + return nil + } + out := new(ParametersInCacheKeyAndForwardedToOriginObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersInCacheKeyAndForwardedToOriginParameters) DeepCopyInto(out *ParametersInCacheKeyAndForwardedToOriginParameters) { + *out = *in + if in.CookiesConfig != nil { + in, out := &in.CookiesConfig, &out.CookiesConfig + *out = new(CookiesConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.EnableAcceptEncodingBrotli != nil { + in, out := &in.EnableAcceptEncodingBrotli, &out.EnableAcceptEncodingBrotli + *out = new(bool) + **out = **in + } + if in.EnableAcceptEncodingGzip != nil { + in, out := &in.EnableAcceptEncodingGzip, &out.EnableAcceptEncodingGzip + *out = new(bool) + **out = **in + } + if in.HeadersConfig != nil { + in, out := &in.HeadersConfig, &out.HeadersConfig + *out = new(HeadersConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.QueryStringsConfig != nil { + in, out := &in.QueryStringsConfig, &out.QueryStringsConfig + *out = new(QueryStringsConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersInCacheKeyAndForwardedToOriginParameters. +func (in *ParametersInCacheKeyAndForwardedToOriginParameters) DeepCopy() *ParametersInCacheKeyAndForwardedToOriginParameters { + if in == nil { + return nil + } + out := new(ParametersInCacheKeyAndForwardedToOriginParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryArgProfileConfigInitParameters) DeepCopyInto(out *QueryArgProfileConfigInitParameters) { + *out = *in + if in.ForwardWhenQueryArgProfileIsUnknown != nil { + in, out := &in.ForwardWhenQueryArgProfileIsUnknown, &out.ForwardWhenQueryArgProfileIsUnknown + *out = new(bool) + **out = **in + } + if in.QueryArgProfiles != nil { + in, out := &in.QueryArgProfiles, &out.QueryArgProfiles + *out = new(QueryArgProfilesInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryArgProfileConfigInitParameters. +func (in *QueryArgProfileConfigInitParameters) DeepCopy() *QueryArgProfileConfigInitParameters { + if in == nil { + return nil + } + out := new(QueryArgProfileConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryArgProfileConfigObservation) DeepCopyInto(out *QueryArgProfileConfigObservation) { + *out = *in + if in.ForwardWhenQueryArgProfileIsUnknown != nil { + in, out := &in.ForwardWhenQueryArgProfileIsUnknown, &out.ForwardWhenQueryArgProfileIsUnknown + *out = new(bool) + **out = **in + } + if in.QueryArgProfiles != nil { + in, out := &in.QueryArgProfiles, &out.QueryArgProfiles + *out = new(QueryArgProfilesObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryArgProfileConfigObservation. +func (in *QueryArgProfileConfigObservation) DeepCopy() *QueryArgProfileConfigObservation { + if in == nil { + return nil + } + out := new(QueryArgProfileConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryArgProfileConfigParameters) DeepCopyInto(out *QueryArgProfileConfigParameters) { + *out = *in + if in.ForwardWhenQueryArgProfileIsUnknown != nil { + in, out := &in.ForwardWhenQueryArgProfileIsUnknown, &out.ForwardWhenQueryArgProfileIsUnknown + *out = new(bool) + **out = **in + } + if in.QueryArgProfiles != nil { + in, out := &in.QueryArgProfiles, &out.QueryArgProfiles + *out = new(QueryArgProfilesParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryArgProfileConfigParameters. +func (in *QueryArgProfileConfigParameters) DeepCopy() *QueryArgProfileConfigParameters { + if in == nil { + return nil + } + out := new(QueryArgProfileConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryArgProfilesInitParameters) DeepCopyInto(out *QueryArgProfilesInitParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]QueryArgProfilesItemsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryArgProfilesInitParameters. +func (in *QueryArgProfilesInitParameters) DeepCopy() *QueryArgProfilesInitParameters { + if in == nil { + return nil + } + out := new(QueryArgProfilesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryArgProfilesItemsInitParameters) DeepCopyInto(out *QueryArgProfilesItemsInitParameters) { + *out = *in + if in.ProfileID != nil { + in, out := &in.ProfileID, &out.ProfileID + *out = new(string) + **out = **in + } + if in.ProfileIDRef != nil { + in, out := &in.ProfileIDRef, &out.ProfileIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ProfileIDSelector != nil { + in, out := &in.ProfileIDSelector, &out.ProfileIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.QueryArg != nil { + in, out := &in.QueryArg, &out.QueryArg + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryArgProfilesItemsInitParameters. +func (in *QueryArgProfilesItemsInitParameters) DeepCopy() *QueryArgProfilesItemsInitParameters { + if in == nil { + return nil + } + out := new(QueryArgProfilesItemsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryArgProfilesItemsObservation) DeepCopyInto(out *QueryArgProfilesItemsObservation) { + *out = *in + if in.ProfileID != nil { + in, out := &in.ProfileID, &out.ProfileID + *out = new(string) + **out = **in + } + if in.QueryArg != nil { + in, out := &in.QueryArg, &out.QueryArg + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryArgProfilesItemsObservation. +func (in *QueryArgProfilesItemsObservation) DeepCopy() *QueryArgProfilesItemsObservation { + if in == nil { + return nil + } + out := new(QueryArgProfilesItemsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryArgProfilesItemsParameters) DeepCopyInto(out *QueryArgProfilesItemsParameters) { + *out = *in + if in.ProfileID != nil { + in, out := &in.ProfileID, &out.ProfileID + *out = new(string) + **out = **in + } + if in.ProfileIDRef != nil { + in, out := &in.ProfileIDRef, &out.ProfileIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ProfileIDSelector != nil { + in, out := &in.ProfileIDSelector, &out.ProfileIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.QueryArg != nil { + in, out := &in.QueryArg, &out.QueryArg + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryArgProfilesItemsParameters. +func (in *QueryArgProfilesItemsParameters) DeepCopy() *QueryArgProfilesItemsParameters { + if in == nil { + return nil + } + out := new(QueryArgProfilesItemsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryArgProfilesObservation) DeepCopyInto(out *QueryArgProfilesObservation) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]QueryArgProfilesItemsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryArgProfilesObservation. +func (in *QueryArgProfilesObservation) DeepCopy() *QueryArgProfilesObservation { + if in == nil { + return nil + } + out := new(QueryArgProfilesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryArgProfilesParameters) DeepCopyInto(out *QueryArgProfilesParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]QueryArgProfilesItemsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryArgProfilesParameters. +func (in *QueryArgProfilesParameters) DeepCopy() *QueryArgProfilesParameters { + if in == nil { + return nil + } + out := new(QueryArgProfilesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringsConfigInitParameters) DeepCopyInto(out *QueryStringsConfigInitParameters) { + *out = *in + if in.QueryStringBehavior != nil { + in, out := &in.QueryStringBehavior, &out.QueryStringBehavior + *out = new(string) + **out = **in + } + if in.QueryStrings != nil { + in, out := &in.QueryStrings, &out.QueryStrings + *out = new(QueryStringsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringsConfigInitParameters. +func (in *QueryStringsConfigInitParameters) DeepCopy() *QueryStringsConfigInitParameters { + if in == nil { + return nil + } + out := new(QueryStringsConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringsConfigObservation) DeepCopyInto(out *QueryStringsConfigObservation) { + *out = *in + if in.QueryStringBehavior != nil { + in, out := &in.QueryStringBehavior, &out.QueryStringBehavior + *out = new(string) + **out = **in + } + if in.QueryStrings != nil { + in, out := &in.QueryStrings, &out.QueryStrings + *out = new(QueryStringsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringsConfigObservation. +func (in *QueryStringsConfigObservation) DeepCopy() *QueryStringsConfigObservation { + if in == nil { + return nil + } + out := new(QueryStringsConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringsConfigParameters) DeepCopyInto(out *QueryStringsConfigParameters) { + *out = *in + if in.QueryStringBehavior != nil { + in, out := &in.QueryStringBehavior, &out.QueryStringBehavior + *out = new(string) + **out = **in + } + if in.QueryStrings != nil { + in, out := &in.QueryStrings, &out.QueryStrings + *out = new(QueryStringsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringsConfigParameters. +func (in *QueryStringsConfigParameters) DeepCopy() *QueryStringsConfigParameters { + if in == nil { + return nil + } + out := new(QueryStringsConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringsConfigQueryStringsInitParameters) DeepCopyInto(out *QueryStringsConfigQueryStringsInitParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringsConfigQueryStringsInitParameters. +func (in *QueryStringsConfigQueryStringsInitParameters) DeepCopy() *QueryStringsConfigQueryStringsInitParameters { + if in == nil { + return nil + } + out := new(QueryStringsConfigQueryStringsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringsConfigQueryStringsObservation) DeepCopyInto(out *QueryStringsConfigQueryStringsObservation) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringsConfigQueryStringsObservation. +func (in *QueryStringsConfigQueryStringsObservation) DeepCopy() *QueryStringsConfigQueryStringsObservation { + if in == nil { + return nil + } + out := new(QueryStringsConfigQueryStringsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringsConfigQueryStringsParameters) DeepCopyInto(out *QueryStringsConfigQueryStringsParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringsConfigQueryStringsParameters. +func (in *QueryStringsConfigQueryStringsParameters) DeepCopy() *QueryStringsConfigQueryStringsParameters { + if in == nil { + return nil + } + out := new(QueryStringsConfigQueryStringsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringsInitParameters) DeepCopyInto(out *QueryStringsInitParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringsInitParameters. +func (in *QueryStringsInitParameters) DeepCopy() *QueryStringsInitParameters { + if in == nil { + return nil + } + out := new(QueryStringsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringsObservation) DeepCopyInto(out *QueryStringsObservation) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringsObservation. +func (in *QueryStringsObservation) DeepCopy() *QueryStringsObservation { + if in == nil { + return nil + } + out := new(QueryStringsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringsParameters) DeepCopyInto(out *QueryStringsParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringsParameters. +func (in *QueryStringsParameters) DeepCopy() *QueryStringsParameters { + if in == nil { + return nil + } + out := new(QueryStringsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RealtimeLogConfig) DeepCopyInto(out *RealtimeLogConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RealtimeLogConfig. +func (in *RealtimeLogConfig) DeepCopy() *RealtimeLogConfig { + if in == nil { + return nil + } + out := new(RealtimeLogConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RealtimeLogConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RealtimeLogConfigInitParameters) DeepCopyInto(out *RealtimeLogConfigInitParameters) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(EndpointInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SamplingRate != nil { + in, out := &in.SamplingRate, &out.SamplingRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RealtimeLogConfigInitParameters. +func (in *RealtimeLogConfigInitParameters) DeepCopy() *RealtimeLogConfigInitParameters { + if in == nil { + return nil + } + out := new(RealtimeLogConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RealtimeLogConfigList) DeepCopyInto(out *RealtimeLogConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RealtimeLogConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RealtimeLogConfigList. +func (in *RealtimeLogConfigList) DeepCopy() *RealtimeLogConfigList { + if in == nil { + return nil + } + out := new(RealtimeLogConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RealtimeLogConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RealtimeLogConfigObservation) DeepCopyInto(out *RealtimeLogConfigObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(EndpointObservation) + (*in).DeepCopyInto(*out) + } + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SamplingRate != nil { + in, out := &in.SamplingRate, &out.SamplingRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RealtimeLogConfigObservation. +func (in *RealtimeLogConfigObservation) DeepCopy() *RealtimeLogConfigObservation { + if in == nil { + return nil + } + out := new(RealtimeLogConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RealtimeLogConfigParameters) DeepCopyInto(out *RealtimeLogConfigParameters) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(EndpointParameters) + (*in).DeepCopyInto(*out) + } + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SamplingRate != nil { + in, out := &in.SamplingRate, &out.SamplingRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RealtimeLogConfigParameters. +func (in *RealtimeLogConfigParameters) DeepCopy() *RealtimeLogConfigParameters { + if in == nil { + return nil + } + out := new(RealtimeLogConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RealtimeLogConfigSpec) DeepCopyInto(out *RealtimeLogConfigSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RealtimeLogConfigSpec. +func (in *RealtimeLogConfigSpec) DeepCopy() *RealtimeLogConfigSpec { + if in == nil { + return nil + } + out := new(RealtimeLogConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RealtimeLogConfigStatus) DeepCopyInto(out *RealtimeLogConfigStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RealtimeLogConfigStatus. +func (in *RealtimeLogConfigStatus) DeepCopy() *RealtimeLogConfigStatus { + if in == nil { + return nil + } + out := new(RealtimeLogConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RealtimeMetricsSubscriptionConfigInitParameters) DeepCopyInto(out *RealtimeMetricsSubscriptionConfigInitParameters) { + *out = *in + if in.RealtimeMetricsSubscriptionStatus != nil { + in, out := &in.RealtimeMetricsSubscriptionStatus, &out.RealtimeMetricsSubscriptionStatus + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RealtimeMetricsSubscriptionConfigInitParameters. +func (in *RealtimeMetricsSubscriptionConfigInitParameters) DeepCopy() *RealtimeMetricsSubscriptionConfigInitParameters { + if in == nil { + return nil + } + out := new(RealtimeMetricsSubscriptionConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RealtimeMetricsSubscriptionConfigObservation) DeepCopyInto(out *RealtimeMetricsSubscriptionConfigObservation) { + *out = *in + if in.RealtimeMetricsSubscriptionStatus != nil { + in, out := &in.RealtimeMetricsSubscriptionStatus, &out.RealtimeMetricsSubscriptionStatus + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RealtimeMetricsSubscriptionConfigObservation. +func (in *RealtimeMetricsSubscriptionConfigObservation) DeepCopy() *RealtimeMetricsSubscriptionConfigObservation { + if in == nil { + return nil + } + out := new(RealtimeMetricsSubscriptionConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RealtimeMetricsSubscriptionConfigParameters) DeepCopyInto(out *RealtimeMetricsSubscriptionConfigParameters) { + *out = *in + if in.RealtimeMetricsSubscriptionStatus != nil { + in, out := &in.RealtimeMetricsSubscriptionStatus, &out.RealtimeMetricsSubscriptionStatus + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RealtimeMetricsSubscriptionConfigParameters. +func (in *RealtimeMetricsSubscriptionConfigParameters) DeepCopy() *RealtimeMetricsSubscriptionConfigParameters { + if in == nil { + return nil + } + out := new(RealtimeMetricsSubscriptionConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferrerPolicyInitParameters) DeepCopyInto(out *ReferrerPolicyInitParameters) { + *out = *in + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } + if in.ReferrerPolicy != nil { + in, out := &in.ReferrerPolicy, &out.ReferrerPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferrerPolicyInitParameters. +func (in *ReferrerPolicyInitParameters) DeepCopy() *ReferrerPolicyInitParameters { + if in == nil { + return nil + } + out := new(ReferrerPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferrerPolicyObservation) DeepCopyInto(out *ReferrerPolicyObservation) { + *out = *in + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } + if in.ReferrerPolicy != nil { + in, out := &in.ReferrerPolicy, &out.ReferrerPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferrerPolicyObservation. +func (in *ReferrerPolicyObservation) DeepCopy() *ReferrerPolicyObservation { + if in == nil { + return nil + } + out := new(ReferrerPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferrerPolicyParameters) DeepCopyInto(out *ReferrerPolicyParameters) { + *out = *in + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } + if in.ReferrerPolicy != nil { + in, out := &in.ReferrerPolicy, &out.ReferrerPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferrerPolicyParameters. +func (in *ReferrerPolicyParameters) DeepCopy() *ReferrerPolicyParameters { + if in == nil { + return nil + } + out := new(ReferrerPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoveHeadersConfigInitParameters) DeepCopyInto(out *RemoveHeadersConfigInitParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RemoveHeadersConfigItemsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveHeadersConfigInitParameters. +func (in *RemoveHeadersConfigInitParameters) DeepCopy() *RemoveHeadersConfigInitParameters { + if in == nil { + return nil + } + out := new(RemoveHeadersConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoveHeadersConfigItemsInitParameters) DeepCopyInto(out *RemoveHeadersConfigItemsInitParameters) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveHeadersConfigItemsInitParameters. +func (in *RemoveHeadersConfigItemsInitParameters) DeepCopy() *RemoveHeadersConfigItemsInitParameters { + if in == nil { + return nil + } + out := new(RemoveHeadersConfigItemsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoveHeadersConfigItemsObservation) DeepCopyInto(out *RemoveHeadersConfigItemsObservation) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveHeadersConfigItemsObservation. +func (in *RemoveHeadersConfigItemsObservation) DeepCopy() *RemoveHeadersConfigItemsObservation { + if in == nil { + return nil + } + out := new(RemoveHeadersConfigItemsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoveHeadersConfigItemsParameters) DeepCopyInto(out *RemoveHeadersConfigItemsParameters) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveHeadersConfigItemsParameters. +func (in *RemoveHeadersConfigItemsParameters) DeepCopy() *RemoveHeadersConfigItemsParameters { + if in == nil { + return nil + } + out := new(RemoveHeadersConfigItemsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoveHeadersConfigObservation) DeepCopyInto(out *RemoveHeadersConfigObservation) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RemoveHeadersConfigItemsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveHeadersConfigObservation. +func (in *RemoveHeadersConfigObservation) DeepCopy() *RemoveHeadersConfigObservation { + if in == nil { + return nil + } + out := new(RemoveHeadersConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoveHeadersConfigParameters) DeepCopyInto(out *RemoveHeadersConfigParameters) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RemoveHeadersConfigItemsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveHeadersConfigParameters. +func (in *RemoveHeadersConfigParameters) DeepCopy() *RemoveHeadersConfigParameters { + if in == nil { + return nil + } + out := new(RemoveHeadersConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeadersPolicy) DeepCopyInto(out *ResponseHeadersPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeadersPolicy. +func (in *ResponseHeadersPolicy) DeepCopy() *ResponseHeadersPolicy { + if in == nil { + return nil + } + out := new(ResponseHeadersPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResponseHeadersPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeadersPolicyInitParameters) DeepCopyInto(out *ResponseHeadersPolicyInitParameters) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.CorsConfig != nil { + in, out := &in.CorsConfig, &out.CorsConfig + *out = new(CorsConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomHeadersConfig != nil { + in, out := &in.CustomHeadersConfig, &out.CustomHeadersConfig + *out = new(CustomHeadersConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RemoveHeadersConfig != nil { + in, out := &in.RemoveHeadersConfig, &out.RemoveHeadersConfig + *out = new(RemoveHeadersConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityHeadersConfig != nil { + in, out := &in.SecurityHeadersConfig, &out.SecurityHeadersConfig + *out = new(SecurityHeadersConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServerTimingHeadersConfig != nil { + in, out := &in.ServerTimingHeadersConfig, &out.ServerTimingHeadersConfig + *out = new(ServerTimingHeadersConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeadersPolicyInitParameters. +func (in *ResponseHeadersPolicyInitParameters) DeepCopy() *ResponseHeadersPolicyInitParameters { + if in == nil { + return nil + } + out := new(ResponseHeadersPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeadersPolicyList) DeepCopyInto(out *ResponseHeadersPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResponseHeadersPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeadersPolicyList. +func (in *ResponseHeadersPolicyList) DeepCopy() *ResponseHeadersPolicyList { + if in == nil { + return nil + } + out := new(ResponseHeadersPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResponseHeadersPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeadersPolicyObservation) DeepCopyInto(out *ResponseHeadersPolicyObservation) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.CorsConfig != nil { + in, out := &in.CorsConfig, &out.CorsConfig + *out = new(CorsConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomHeadersConfig != nil { + in, out := &in.CustomHeadersConfig, &out.CustomHeadersConfig + *out = new(CustomHeadersConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RemoveHeadersConfig != nil { + in, out := &in.RemoveHeadersConfig, &out.RemoveHeadersConfig + *out = new(RemoveHeadersConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.SecurityHeadersConfig != nil { + in, out := &in.SecurityHeadersConfig, &out.SecurityHeadersConfig + *out = new(SecurityHeadersConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ServerTimingHeadersConfig != nil { + in, out := &in.ServerTimingHeadersConfig, &out.ServerTimingHeadersConfig + *out = new(ServerTimingHeadersConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeadersPolicyObservation. +func (in *ResponseHeadersPolicyObservation) DeepCopy() *ResponseHeadersPolicyObservation { + if in == nil { + return nil + } + out := new(ResponseHeadersPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeadersPolicyParameters) DeepCopyInto(out *ResponseHeadersPolicyParameters) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.CorsConfig != nil { + in, out := &in.CorsConfig, &out.CorsConfig + *out = new(CorsConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomHeadersConfig != nil { + in, out := &in.CustomHeadersConfig, &out.CustomHeadersConfig + *out = new(CustomHeadersConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RemoveHeadersConfig != nil { + in, out := &in.RemoveHeadersConfig, &out.RemoveHeadersConfig + *out = new(RemoveHeadersConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityHeadersConfig != nil { + in, out := &in.SecurityHeadersConfig, &out.SecurityHeadersConfig + *out = new(SecurityHeadersConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.ServerTimingHeadersConfig != nil { + in, out := &in.ServerTimingHeadersConfig, &out.ServerTimingHeadersConfig + *out = new(ServerTimingHeadersConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeadersPolicyParameters. +func (in *ResponseHeadersPolicyParameters) DeepCopy() *ResponseHeadersPolicyParameters { + if in == nil { + return nil + } + out := new(ResponseHeadersPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeadersPolicySpec) DeepCopyInto(out *ResponseHeadersPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeadersPolicySpec. +func (in *ResponseHeadersPolicySpec) DeepCopy() *ResponseHeadersPolicySpec { + if in == nil { + return nil + } + out := new(ResponseHeadersPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeadersPolicyStatus) DeepCopyInto(out *ResponseHeadersPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeadersPolicyStatus. +func (in *ResponseHeadersPolicyStatus) DeepCopy() *ResponseHeadersPolicyStatus { + if in == nil { + return nil + } + out := new(ResponseHeadersPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestrictionsInitParameters) DeepCopyInto(out *RestrictionsInitParameters) { + *out = *in + if in.GeoRestriction != nil { + in, out := &in.GeoRestriction, &out.GeoRestriction + *out = new(GeoRestrictionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestrictionsInitParameters. +func (in *RestrictionsInitParameters) DeepCopy() *RestrictionsInitParameters { + if in == nil { + return nil + } + out := new(RestrictionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestrictionsObservation) DeepCopyInto(out *RestrictionsObservation) { + *out = *in + if in.GeoRestriction != nil { + in, out := &in.GeoRestriction, &out.GeoRestriction + *out = new(GeoRestrictionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestrictionsObservation. +func (in *RestrictionsObservation) DeepCopy() *RestrictionsObservation { + if in == nil { + return nil + } + out := new(RestrictionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestrictionsParameters) DeepCopyInto(out *RestrictionsParameters) { + *out = *in + if in.GeoRestriction != nil { + in, out := &in.GeoRestriction, &out.GeoRestriction + *out = new(GeoRestrictionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestrictionsParameters. +func (in *RestrictionsParameters) DeepCopy() *RestrictionsParameters { + if in == nil { + return nil + } + out := new(RestrictionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3OriginConfigInitParameters) DeepCopyInto(out *S3OriginConfigInitParameters) { + *out = *in + if in.OriginAccessIdentity != nil { + in, out := &in.OriginAccessIdentity, &out.OriginAccessIdentity + *out = new(string) + **out = **in + } + if in.OriginAccessIdentityRef != nil { + in, out := &in.OriginAccessIdentityRef, &out.OriginAccessIdentityRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.OriginAccessIdentitySelector != nil { + in, out := &in.OriginAccessIdentitySelector, &out.OriginAccessIdentitySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3OriginConfigInitParameters. +func (in *S3OriginConfigInitParameters) DeepCopy() *S3OriginConfigInitParameters { + if in == nil { + return nil + } + out := new(S3OriginConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3OriginConfigObservation) DeepCopyInto(out *S3OriginConfigObservation) { + *out = *in + if in.OriginAccessIdentity != nil { + in, out := &in.OriginAccessIdentity, &out.OriginAccessIdentity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3OriginConfigObservation. +func (in *S3OriginConfigObservation) DeepCopy() *S3OriginConfigObservation { + if in == nil { + return nil + } + out := new(S3OriginConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3OriginConfigParameters) DeepCopyInto(out *S3OriginConfigParameters) { + *out = *in + if in.OriginAccessIdentity != nil { + in, out := &in.OriginAccessIdentity, &out.OriginAccessIdentity + *out = new(string) + **out = **in + } + if in.OriginAccessIdentityRef != nil { + in, out := &in.OriginAccessIdentityRef, &out.OriginAccessIdentityRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.OriginAccessIdentitySelector != nil { + in, out := &in.OriginAccessIdentitySelector, &out.OriginAccessIdentitySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3OriginConfigParameters. +func (in *S3OriginConfigParameters) DeepCopy() *S3OriginConfigParameters { + if in == nil { + return nil + } + out := new(S3OriginConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityHeadersConfigInitParameters) DeepCopyInto(out *SecurityHeadersConfigInitParameters) { + *out = *in + if in.ContentSecurityPolicy != nil { + in, out := &in.ContentSecurityPolicy, &out.ContentSecurityPolicy + *out = new(ContentSecurityPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ContentTypeOptions != nil { + in, out := &in.ContentTypeOptions, &out.ContentTypeOptions + *out = new(ContentTypeOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FrameOptions != nil { + in, out := &in.FrameOptions, &out.FrameOptions + *out = new(FrameOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ReferrerPolicy != nil { + in, out := &in.ReferrerPolicy, &out.ReferrerPolicy + *out = new(ReferrerPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StrictTransportSecurity != nil { + in, out := &in.StrictTransportSecurity, &out.StrictTransportSecurity + *out = new(StrictTransportSecurityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.XSSProtection != nil { + in, out := &in.XSSProtection, &out.XSSProtection + *out = new(XSSProtectionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityHeadersConfigInitParameters. +func (in *SecurityHeadersConfigInitParameters) DeepCopy() *SecurityHeadersConfigInitParameters { + if in == nil { + return nil + } + out := new(SecurityHeadersConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityHeadersConfigObservation) DeepCopyInto(out *SecurityHeadersConfigObservation) { + *out = *in + if in.ContentSecurityPolicy != nil { + in, out := &in.ContentSecurityPolicy, &out.ContentSecurityPolicy + *out = new(ContentSecurityPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.ContentTypeOptions != nil { + in, out := &in.ContentTypeOptions, &out.ContentTypeOptions + *out = new(ContentTypeOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.FrameOptions != nil { + in, out := &in.FrameOptions, &out.FrameOptions + *out = new(FrameOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.ReferrerPolicy != nil { + in, out := &in.ReferrerPolicy, &out.ReferrerPolicy + *out = new(ReferrerPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.StrictTransportSecurity != nil { + in, out := &in.StrictTransportSecurity, &out.StrictTransportSecurity + *out = new(StrictTransportSecurityObservation) + (*in).DeepCopyInto(*out) + } + if in.XSSProtection != nil { + in, out := &in.XSSProtection, &out.XSSProtection + *out = new(XSSProtectionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityHeadersConfigObservation. +func (in *SecurityHeadersConfigObservation) DeepCopy() *SecurityHeadersConfigObservation { + if in == nil { + return nil + } + out := new(SecurityHeadersConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityHeadersConfigParameters) DeepCopyInto(out *SecurityHeadersConfigParameters) { + *out = *in + if in.ContentSecurityPolicy != nil { + in, out := &in.ContentSecurityPolicy, &out.ContentSecurityPolicy + *out = new(ContentSecurityPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.ContentTypeOptions != nil { + in, out := &in.ContentTypeOptions, &out.ContentTypeOptions + *out = new(ContentTypeOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.FrameOptions != nil { + in, out := &in.FrameOptions, &out.FrameOptions + *out = new(FrameOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.ReferrerPolicy != nil { + in, out := &in.ReferrerPolicy, &out.ReferrerPolicy + *out = new(ReferrerPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.StrictTransportSecurity != nil { + in, out := &in.StrictTransportSecurity, &out.StrictTransportSecurity + *out = new(StrictTransportSecurityParameters) + (*in).DeepCopyInto(*out) + } + if in.XSSProtection != nil { + in, out := &in.XSSProtection, &out.XSSProtection + *out = new(XSSProtectionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityHeadersConfigParameters. +func (in *SecurityHeadersConfigParameters) DeepCopy() *SecurityHeadersConfigParameters { + if in == nil { + return nil + } + out := new(SecurityHeadersConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerTimingHeadersConfigInitParameters) DeepCopyInto(out *ServerTimingHeadersConfigInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SamplingRate != nil { + in, out := &in.SamplingRate, &out.SamplingRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerTimingHeadersConfigInitParameters. +func (in *ServerTimingHeadersConfigInitParameters) DeepCopy() *ServerTimingHeadersConfigInitParameters { + if in == nil { + return nil + } + out := new(ServerTimingHeadersConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerTimingHeadersConfigObservation) DeepCopyInto(out *ServerTimingHeadersConfigObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SamplingRate != nil { + in, out := &in.SamplingRate, &out.SamplingRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerTimingHeadersConfigObservation. +func (in *ServerTimingHeadersConfigObservation) DeepCopy() *ServerTimingHeadersConfigObservation { + if in == nil { + return nil + } + out := new(ServerTimingHeadersConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerTimingHeadersConfigParameters) DeepCopyInto(out *ServerTimingHeadersConfigParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SamplingRate != nil { + in, out := &in.SamplingRate, &out.SamplingRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerTimingHeadersConfigParameters. +func (in *ServerTimingHeadersConfigParameters) DeepCopy() *ServerTimingHeadersConfigParameters { + if in == nil { + return nil + } + out := new(ServerTimingHeadersConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrictTransportSecurityInitParameters) DeepCopyInto(out *StrictTransportSecurityInitParameters) { + *out = *in + if in.AccessControlMaxAgeSec != nil { + in, out := &in.AccessControlMaxAgeSec, &out.AccessControlMaxAgeSec + *out = new(float64) + **out = **in + } + if in.IncludeSubdomains != nil { + in, out := &in.IncludeSubdomains, &out.IncludeSubdomains + *out = new(bool) + **out = **in + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } + if in.Preload != nil { + in, out := &in.Preload, &out.Preload + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrictTransportSecurityInitParameters. +func (in *StrictTransportSecurityInitParameters) DeepCopy() *StrictTransportSecurityInitParameters { + if in == nil { + return nil + } + out := new(StrictTransportSecurityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrictTransportSecurityObservation) DeepCopyInto(out *StrictTransportSecurityObservation) { + *out = *in + if in.AccessControlMaxAgeSec != nil { + in, out := &in.AccessControlMaxAgeSec, &out.AccessControlMaxAgeSec + *out = new(float64) + **out = **in + } + if in.IncludeSubdomains != nil { + in, out := &in.IncludeSubdomains, &out.IncludeSubdomains + *out = new(bool) + **out = **in + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } + if in.Preload != nil { + in, out := &in.Preload, &out.Preload + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrictTransportSecurityObservation. +func (in *StrictTransportSecurityObservation) DeepCopy() *StrictTransportSecurityObservation { + if in == nil { + return nil + } + out := new(StrictTransportSecurityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrictTransportSecurityParameters) DeepCopyInto(out *StrictTransportSecurityParameters) { + *out = *in + if in.AccessControlMaxAgeSec != nil { + in, out := &in.AccessControlMaxAgeSec, &out.AccessControlMaxAgeSec + *out = new(float64) + **out = **in + } + if in.IncludeSubdomains != nil { + in, out := &in.IncludeSubdomains, &out.IncludeSubdomains + *out = new(bool) + **out = **in + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } + if in.Preload != nil { + in, out := &in.Preload, &out.Preload + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrictTransportSecurityParameters. +func (in *StrictTransportSecurityParameters) DeepCopy() *StrictTransportSecurityParameters { + if in == nil { + return nil + } + out := new(StrictTransportSecurityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedKeyGroupsInitParameters) DeepCopyInto(out *TrustedKeyGroupsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedKeyGroupsInitParameters. +func (in *TrustedKeyGroupsInitParameters) DeepCopy() *TrustedKeyGroupsInitParameters { + if in == nil { + return nil + } + out := new(TrustedKeyGroupsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedKeyGroupsObservation) DeepCopyInto(out *TrustedKeyGroupsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ItemsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedKeyGroupsObservation. +func (in *TrustedKeyGroupsObservation) DeepCopy() *TrustedKeyGroupsObservation { + if in == nil { + return nil + } + out := new(TrustedKeyGroupsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedKeyGroupsParameters) DeepCopyInto(out *TrustedKeyGroupsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedKeyGroupsParameters. +func (in *TrustedKeyGroupsParameters) DeepCopy() *TrustedKeyGroupsParameters { + if in == nil { + return nil + } + out := new(TrustedKeyGroupsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedSignersInitParameters) DeepCopyInto(out *TrustedSignersInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedSignersInitParameters. +func (in *TrustedSignersInitParameters) DeepCopy() *TrustedSignersInitParameters { + if in == nil { + return nil + } + out := new(TrustedSignersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedSignersItemsInitParameters) DeepCopyInto(out *TrustedSignersItemsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedSignersItemsInitParameters. +func (in *TrustedSignersItemsInitParameters) DeepCopy() *TrustedSignersItemsInitParameters { + if in == nil { + return nil + } + out := new(TrustedSignersItemsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedSignersItemsObservation) DeepCopyInto(out *TrustedSignersItemsObservation) { + *out = *in + if in.AwsAccountNumber != nil { + in, out := &in.AwsAccountNumber, &out.AwsAccountNumber + *out = new(string) + **out = **in + } + if in.KeyPairIds != nil { + in, out := &in.KeyPairIds, &out.KeyPairIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedSignersItemsObservation. +func (in *TrustedSignersItemsObservation) DeepCopy() *TrustedSignersItemsObservation { + if in == nil { + return nil + } + out := new(TrustedSignersItemsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedSignersItemsParameters) DeepCopyInto(out *TrustedSignersItemsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedSignersItemsParameters. +func (in *TrustedSignersItemsParameters) DeepCopy() *TrustedSignersItemsParameters { + if in == nil { + return nil + } + out := new(TrustedSignersItemsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedSignersObservation) DeepCopyInto(out *TrustedSignersObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TrustedSignersItemsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedSignersObservation. +func (in *TrustedSignersObservation) DeepCopy() *TrustedSignersObservation { + if in == nil { + return nil + } + out := new(TrustedSignersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedSignersParameters) DeepCopyInto(out *TrustedSignersParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedSignersParameters. +func (in *TrustedSignersParameters) DeepCopy() *TrustedSignersParameters { + if in == nil { + return nil + } + out := new(TrustedSignersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ViewerCertificateInitParameters) DeepCopyInto(out *ViewerCertificateInitParameters) { + *out = *in + if in.AcmCertificateArn != nil { + in, out := &in.AcmCertificateArn, &out.AcmCertificateArn + *out = new(string) + **out = **in + } + if in.CloudfrontDefaultCertificate != nil { + in, out := &in.CloudfrontDefaultCertificate, &out.CloudfrontDefaultCertificate + *out = new(bool) + **out = **in + } + if in.IAMCertificateID != nil { + in, out := &in.IAMCertificateID, &out.IAMCertificateID + *out = new(string) + **out = **in + } + if in.MinimumProtocolVersion != nil { + in, out := &in.MinimumProtocolVersion, &out.MinimumProtocolVersion + *out = new(string) + **out = **in + } + if in.SSLSupportMethod != nil { + in, out := &in.SSLSupportMethod, &out.SSLSupportMethod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ViewerCertificateInitParameters. +func (in *ViewerCertificateInitParameters) DeepCopy() *ViewerCertificateInitParameters { + if in == nil { + return nil + } + out := new(ViewerCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ViewerCertificateObservation) DeepCopyInto(out *ViewerCertificateObservation) { + *out = *in + if in.AcmCertificateArn != nil { + in, out := &in.AcmCertificateArn, &out.AcmCertificateArn + *out = new(string) + **out = **in + } + if in.CloudfrontDefaultCertificate != nil { + in, out := &in.CloudfrontDefaultCertificate, &out.CloudfrontDefaultCertificate + *out = new(bool) + **out = **in + } + if in.IAMCertificateID != nil { + in, out := &in.IAMCertificateID, &out.IAMCertificateID + *out = new(string) + **out = **in + } + if in.MinimumProtocolVersion != nil { + in, out := &in.MinimumProtocolVersion, &out.MinimumProtocolVersion + *out = new(string) + **out = **in + } + if in.SSLSupportMethod != nil { + in, out := &in.SSLSupportMethod, &out.SSLSupportMethod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ViewerCertificateObservation. +func (in *ViewerCertificateObservation) DeepCopy() *ViewerCertificateObservation { + if in == nil { + return nil + } + out := new(ViewerCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ViewerCertificateParameters) DeepCopyInto(out *ViewerCertificateParameters) { + *out = *in + if in.AcmCertificateArn != nil { + in, out := &in.AcmCertificateArn, &out.AcmCertificateArn + *out = new(string) + **out = **in + } + if in.CloudfrontDefaultCertificate != nil { + in, out := &in.CloudfrontDefaultCertificate, &out.CloudfrontDefaultCertificate + *out = new(bool) + **out = **in + } + if in.IAMCertificateID != nil { + in, out := &in.IAMCertificateID, &out.IAMCertificateID + *out = new(string) + **out = **in + } + if in.MinimumProtocolVersion != nil { + in, out := &in.MinimumProtocolVersion, &out.MinimumProtocolVersion + *out = new(string) + **out = **in + } + if in.SSLSupportMethod != nil { + in, out := &in.SSLSupportMethod, &out.SSLSupportMethod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ViewerCertificateParameters. +func (in *ViewerCertificateParameters) DeepCopy() *ViewerCertificateParameters { + if in == nil { + return nil + } + out := new(ViewerCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSProtectionInitParameters) DeepCopyInto(out *XSSProtectionInitParameters) { + *out = *in + if in.ModeBlock != nil { + in, out := &in.ModeBlock, &out.ModeBlock + *out = new(bool) + **out = **in + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } + if in.Protection != nil { + in, out := &in.Protection, &out.Protection + *out = new(bool) + **out = **in + } + if in.ReportURI != nil { + in, out := &in.ReportURI, &out.ReportURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSProtectionInitParameters. +func (in *XSSProtectionInitParameters) DeepCopy() *XSSProtectionInitParameters { + if in == nil { + return nil + } + out := new(XSSProtectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSProtectionObservation) DeepCopyInto(out *XSSProtectionObservation) { + *out = *in + if in.ModeBlock != nil { + in, out := &in.ModeBlock, &out.ModeBlock + *out = new(bool) + **out = **in + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } + if in.Protection != nil { + in, out := &in.Protection, &out.Protection + *out = new(bool) + **out = **in + } + if in.ReportURI != nil { + in, out := &in.ReportURI, &out.ReportURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSProtectionObservation. +func (in *XSSProtectionObservation) DeepCopy() *XSSProtectionObservation { + if in == nil { + return nil + } + out := new(XSSProtectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSProtectionParameters) DeepCopyInto(out *XSSProtectionParameters) { + *out = *in + if in.ModeBlock != nil { + in, out := &in.ModeBlock, &out.ModeBlock + *out = new(bool) + **out = **in + } + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(bool) + **out = **in + } + if in.Protection != nil { + in, out := &in.Protection, &out.Protection + *out = new(bool) + **out = **in + } + if in.ReportURI != nil { + in, out := &in.ReportURI, &out.ReportURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSProtectionParameters. +func (in *XSSProtectionParameters) DeepCopy() *XSSProtectionParameters { + if in == nil { + return nil + } + out := new(XSSProtectionParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/cloudfront/v1beta2/zz_generated.managed.go b/apis/cloudfront/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..1a63a67968 --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_generated.managed.go @@ -0,0 +1,488 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this CachePolicy. +func (mg *CachePolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CachePolicy. +func (mg *CachePolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CachePolicy. +func (mg *CachePolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CachePolicy. +func (mg *CachePolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CachePolicy. +func (mg *CachePolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CachePolicy. +func (mg *CachePolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CachePolicy. +func (mg *CachePolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CachePolicy. +func (mg *CachePolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CachePolicy. +func (mg *CachePolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CachePolicy. +func (mg *CachePolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CachePolicy. +func (mg *CachePolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CachePolicy. +func (mg *CachePolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Distribution. +func (mg *Distribution) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Distribution. +func (mg *Distribution) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Distribution. +func (mg *Distribution) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Distribution. +func (mg *Distribution) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Distribution. +func (mg *Distribution) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Distribution. +func (mg *Distribution) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Distribution. +func (mg *Distribution) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Distribution. +func (mg *Distribution) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Distribution. +func (mg *Distribution) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Distribution. +func (mg *Distribution) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Distribution. +func (mg *Distribution) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Distribution. +func (mg *Distribution) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FieldLevelEncryptionConfig. +func (mg *FieldLevelEncryptionConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FieldLevelEncryptionConfig. +func (mg *FieldLevelEncryptionConfig) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FieldLevelEncryptionConfig. +func (mg *FieldLevelEncryptionConfig) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FieldLevelEncryptionConfig. +func (mg *FieldLevelEncryptionConfig) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FieldLevelEncryptionConfig. +func (mg *FieldLevelEncryptionConfig) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FieldLevelEncryptionConfig. +func (mg *FieldLevelEncryptionConfig) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FieldLevelEncryptionConfig. +func (mg *FieldLevelEncryptionConfig) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FieldLevelEncryptionConfig. +func (mg *FieldLevelEncryptionConfig) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FieldLevelEncryptionConfig. +func (mg *FieldLevelEncryptionConfig) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FieldLevelEncryptionConfig. +func (mg *FieldLevelEncryptionConfig) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FieldLevelEncryptionConfig. +func (mg *FieldLevelEncryptionConfig) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FieldLevelEncryptionConfig. +func (mg *FieldLevelEncryptionConfig) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FieldLevelEncryptionProfile. +func (mg *FieldLevelEncryptionProfile) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FieldLevelEncryptionProfile. +func (mg *FieldLevelEncryptionProfile) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FieldLevelEncryptionProfile. +func (mg *FieldLevelEncryptionProfile) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FieldLevelEncryptionProfile. +func (mg *FieldLevelEncryptionProfile) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FieldLevelEncryptionProfile. +func (mg *FieldLevelEncryptionProfile) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FieldLevelEncryptionProfile. +func (mg *FieldLevelEncryptionProfile) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FieldLevelEncryptionProfile. +func (mg *FieldLevelEncryptionProfile) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FieldLevelEncryptionProfile. +func (mg *FieldLevelEncryptionProfile) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FieldLevelEncryptionProfile. +func (mg *FieldLevelEncryptionProfile) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FieldLevelEncryptionProfile. +func (mg *FieldLevelEncryptionProfile) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FieldLevelEncryptionProfile. +func (mg *FieldLevelEncryptionProfile) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FieldLevelEncryptionProfile. +func (mg *FieldLevelEncryptionProfile) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MonitoringSubscription. +func (mg *MonitoringSubscription) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MonitoringSubscription. +func (mg *MonitoringSubscription) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MonitoringSubscription. +func (mg *MonitoringSubscription) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MonitoringSubscription. +func (mg *MonitoringSubscription) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MonitoringSubscription. +func (mg *MonitoringSubscription) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MonitoringSubscription. +func (mg *MonitoringSubscription) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MonitoringSubscription. +func (mg *MonitoringSubscription) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MonitoringSubscription. +func (mg *MonitoringSubscription) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MonitoringSubscription. +func (mg *MonitoringSubscription) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MonitoringSubscription. +func (mg *MonitoringSubscription) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MonitoringSubscription. +func (mg *MonitoringSubscription) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MonitoringSubscription. +func (mg *MonitoringSubscription) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this OriginRequestPolicy. +func (mg *OriginRequestPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this OriginRequestPolicy. +func (mg *OriginRequestPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this OriginRequestPolicy. +func (mg *OriginRequestPolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this OriginRequestPolicy. +func (mg *OriginRequestPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this OriginRequestPolicy. +func (mg *OriginRequestPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this OriginRequestPolicy. +func (mg *OriginRequestPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this OriginRequestPolicy. +func (mg *OriginRequestPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this OriginRequestPolicy. +func (mg *OriginRequestPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this OriginRequestPolicy. +func (mg *OriginRequestPolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this OriginRequestPolicy. +func (mg *OriginRequestPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this OriginRequestPolicy. +func (mg *OriginRequestPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this OriginRequestPolicy. +func (mg *OriginRequestPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this RealtimeLogConfig. +func (mg *RealtimeLogConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this RealtimeLogConfig. +func (mg *RealtimeLogConfig) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this RealtimeLogConfig. +func (mg *RealtimeLogConfig) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this RealtimeLogConfig. +func (mg *RealtimeLogConfig) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this RealtimeLogConfig. +func (mg *RealtimeLogConfig) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this RealtimeLogConfig. +func (mg *RealtimeLogConfig) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this RealtimeLogConfig. +func (mg *RealtimeLogConfig) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this RealtimeLogConfig. +func (mg *RealtimeLogConfig) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this RealtimeLogConfig. +func (mg *RealtimeLogConfig) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this RealtimeLogConfig. +func (mg *RealtimeLogConfig) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this RealtimeLogConfig. +func (mg *RealtimeLogConfig) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this RealtimeLogConfig. +func (mg *RealtimeLogConfig) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ResponseHeadersPolicy. +func (mg *ResponseHeadersPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ResponseHeadersPolicy. +func (mg *ResponseHeadersPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ResponseHeadersPolicy. +func (mg *ResponseHeadersPolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ResponseHeadersPolicy. +func (mg *ResponseHeadersPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ResponseHeadersPolicy. +func (mg *ResponseHeadersPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ResponseHeadersPolicy. +func (mg *ResponseHeadersPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ResponseHeadersPolicy. +func (mg *ResponseHeadersPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ResponseHeadersPolicy. +func (mg *ResponseHeadersPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ResponseHeadersPolicy. +func (mg *ResponseHeadersPolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ResponseHeadersPolicy. +func (mg *ResponseHeadersPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ResponseHeadersPolicy. +func (mg *ResponseHeadersPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ResponseHeadersPolicy. +func (mg *ResponseHeadersPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/cloudfront/v1beta2/zz_generated.managedlist.go b/apis/cloudfront/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..519bc6e09f --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,80 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this CachePolicyList. +func (l *CachePolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DistributionList. +func (l *DistributionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FieldLevelEncryptionConfigList. +func (l *FieldLevelEncryptionConfigList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FieldLevelEncryptionProfileList. +func (l *FieldLevelEncryptionProfileList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MonitoringSubscriptionList. +func (l *MonitoringSubscriptionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this OriginRequestPolicyList. +func (l *OriginRequestPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this RealtimeLogConfigList. +func (l *RealtimeLogConfigList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ResponseHeadersPolicyList. +func (l *ResponseHeadersPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/cloudfront/v1beta2/zz_generated.resolvers.go b/apis/cloudfront/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..46ba224be1 --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,489 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Distribution) ResolveReferences( // ResolveReferences of this Distribution. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.OrderedCacheBehavior); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.OrderedCacheBehavior[i3].FunctionAssociation); i4++ { + { + m, l, err = apisresolver.GetManagedResource("cloudfront.aws.upbound.io", "v1beta1", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.OrderedCacheBehavior[i3].FunctionAssociation[i4].FunctionArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.OrderedCacheBehavior[i3].FunctionAssociation[i4].FunctionArnRef, + Selector: mg.Spec.ForProvider.OrderedCacheBehavior[i3].FunctionAssociation[i4].FunctionArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OrderedCacheBehavior[i3].FunctionAssociation[i4].FunctionArn") + } + mg.Spec.ForProvider.OrderedCacheBehavior[i3].FunctionAssociation[i4].FunctionArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OrderedCacheBehavior[i3].FunctionAssociation[i4].FunctionArnRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.OrderedCacheBehavior); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.OrderedCacheBehavior[i3].LambdaFunctionAssociation); i4++ { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.OrderedCacheBehavior[i3].LambdaFunctionAssociation[i4].LambdaArn), + Extract: resource.ExtractParamPath("qualified_arn", true), + Reference: mg.Spec.ForProvider.OrderedCacheBehavior[i3].LambdaFunctionAssociation[i4].LambdaArnRef, + Selector: mg.Spec.ForProvider.OrderedCacheBehavior[i3].LambdaFunctionAssociation[i4].LambdaArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OrderedCacheBehavior[i3].LambdaFunctionAssociation[i4].LambdaArn") + } + mg.Spec.ForProvider.OrderedCacheBehavior[i3].LambdaFunctionAssociation[i4].LambdaArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OrderedCacheBehavior[i3].LambdaFunctionAssociation[i4].LambdaArnRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Origin); i3++ { + { + m, l, err = apisresolver.GetManagedResource("cloudfront.aws.upbound.io", "v1beta1", "OriginAccessControl", "OriginAccessControlList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Origin[i3].OriginAccessControlID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Origin[i3].OriginAccessControlIDRef, + Selector: mg.Spec.ForProvider.Origin[i3].OriginAccessControlIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Origin[i3].OriginAccessControlID") + } + mg.Spec.ForProvider.Origin[i3].OriginAccessControlID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Origin[i3].OriginAccessControlIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Origin); i3++ { + if mg.Spec.ForProvider.Origin[i3].S3OriginConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("cloudfront.aws.upbound.io", "v1beta1", "OriginAccessIdentity", "OriginAccessIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Origin[i3].S3OriginConfig.OriginAccessIdentity), + Extract: resource.ExtractParamPath("cloudfront_access_identity_path", true), + Reference: mg.Spec.ForProvider.Origin[i3].S3OriginConfig.OriginAccessIdentityRef, + Selector: mg.Spec.ForProvider.Origin[i3].S3OriginConfig.OriginAccessIdentitySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Origin[i3].S3OriginConfig.OriginAccessIdentity") + } + mg.Spec.ForProvider.Origin[i3].S3OriginConfig.OriginAccessIdentity = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Origin[i3].S3OriginConfig.OriginAccessIdentityRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.OrderedCacheBehavior); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.OrderedCacheBehavior[i3].FunctionAssociation); i4++ { + { + m, l, err = apisresolver.GetManagedResource("cloudfront.aws.upbound.io", "v1beta1", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.OrderedCacheBehavior[i3].FunctionAssociation[i4].FunctionArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.OrderedCacheBehavior[i3].FunctionAssociation[i4].FunctionArnRef, + Selector: mg.Spec.InitProvider.OrderedCacheBehavior[i3].FunctionAssociation[i4].FunctionArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OrderedCacheBehavior[i3].FunctionAssociation[i4].FunctionArn") + } + mg.Spec.InitProvider.OrderedCacheBehavior[i3].FunctionAssociation[i4].FunctionArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OrderedCacheBehavior[i3].FunctionAssociation[i4].FunctionArnRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.OrderedCacheBehavior); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.OrderedCacheBehavior[i3].LambdaFunctionAssociation); i4++ { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.OrderedCacheBehavior[i3].LambdaFunctionAssociation[i4].LambdaArn), + Extract: resource.ExtractParamPath("qualified_arn", true), + Reference: mg.Spec.InitProvider.OrderedCacheBehavior[i3].LambdaFunctionAssociation[i4].LambdaArnRef, + Selector: mg.Spec.InitProvider.OrderedCacheBehavior[i3].LambdaFunctionAssociation[i4].LambdaArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OrderedCacheBehavior[i3].LambdaFunctionAssociation[i4].LambdaArn") + } + mg.Spec.InitProvider.OrderedCacheBehavior[i3].LambdaFunctionAssociation[i4].LambdaArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OrderedCacheBehavior[i3].LambdaFunctionAssociation[i4].LambdaArnRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Origin); i3++ { + { + m, l, err = apisresolver.GetManagedResource("cloudfront.aws.upbound.io", "v1beta1", "OriginAccessControl", "OriginAccessControlList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Origin[i3].OriginAccessControlID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Origin[i3].OriginAccessControlIDRef, + Selector: mg.Spec.InitProvider.Origin[i3].OriginAccessControlIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Origin[i3].OriginAccessControlID") + } + mg.Spec.InitProvider.Origin[i3].OriginAccessControlID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Origin[i3].OriginAccessControlIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Origin); i3++ { + if mg.Spec.InitProvider.Origin[i3].S3OriginConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("cloudfront.aws.upbound.io", "v1beta1", "OriginAccessIdentity", "OriginAccessIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Origin[i3].S3OriginConfig.OriginAccessIdentity), + Extract: resource.ExtractParamPath("cloudfront_access_identity_path", true), + Reference: mg.Spec.InitProvider.Origin[i3].S3OriginConfig.OriginAccessIdentityRef, + Selector: mg.Spec.InitProvider.Origin[i3].S3OriginConfig.OriginAccessIdentitySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Origin[i3].S3OriginConfig.OriginAccessIdentity") + } + mg.Spec.InitProvider.Origin[i3].S3OriginConfig.OriginAccessIdentity = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Origin[i3].S3OriginConfig.OriginAccessIdentityRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this FieldLevelEncryptionConfig. +func (mg *FieldLevelEncryptionConfig) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.QueryArgProfileConfig != nil { + if mg.Spec.ForProvider.QueryArgProfileConfig.QueryArgProfiles != nil { + for i5 := 0; i5 < len(mg.Spec.ForProvider.QueryArgProfileConfig.QueryArgProfiles.Items); i5++ { + { + m, l, err = apisresolver.GetManagedResource("cloudfront.aws.upbound.io", "v1beta2", "FieldLevelEncryptionProfile", "FieldLevelEncryptionProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.QueryArgProfileConfig.QueryArgProfiles.Items[i5].ProfileID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.QueryArgProfileConfig.QueryArgProfiles.Items[i5].ProfileIDRef, + Selector: mg.Spec.ForProvider.QueryArgProfileConfig.QueryArgProfiles.Items[i5].ProfileIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.QueryArgProfileConfig.QueryArgProfiles.Items[i5].ProfileID") + } + mg.Spec.ForProvider.QueryArgProfileConfig.QueryArgProfiles.Items[i5].ProfileID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.QueryArgProfileConfig.QueryArgProfiles.Items[i5].ProfileIDRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.InitProvider.QueryArgProfileConfig != nil { + if mg.Spec.InitProvider.QueryArgProfileConfig.QueryArgProfiles != nil { + for i5 := 0; i5 < len(mg.Spec.InitProvider.QueryArgProfileConfig.QueryArgProfiles.Items); i5++ { + { + m, l, err = apisresolver.GetManagedResource("cloudfront.aws.upbound.io", "v1beta2", "FieldLevelEncryptionProfile", "FieldLevelEncryptionProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.QueryArgProfileConfig.QueryArgProfiles.Items[i5].ProfileID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.QueryArgProfileConfig.QueryArgProfiles.Items[i5].ProfileIDRef, + Selector: mg.Spec.InitProvider.QueryArgProfileConfig.QueryArgProfiles.Items[i5].ProfileIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.QueryArgProfileConfig.QueryArgProfiles.Items[i5].ProfileID") + } + mg.Spec.InitProvider.QueryArgProfileConfig.QueryArgProfiles.Items[i5].ProfileID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.QueryArgProfileConfig.QueryArgProfiles.Items[i5].ProfileIDRef = rsp.ResolvedReference + + } + } + } + + return nil +} + +// ResolveReferences of this FieldLevelEncryptionProfile. +func (mg *FieldLevelEncryptionProfile) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.EncryptionEntities != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.EncryptionEntities.Items); i4++ { + { + m, l, err = apisresolver.GetManagedResource("cloudfront.aws.upbound.io", "v1beta1", "PublicKey", "PublicKeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EncryptionEntities.Items[i4].PublicKeyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.EncryptionEntities.Items[i4].PublicKeyIDRef, + Selector: mg.Spec.ForProvider.EncryptionEntities.Items[i4].PublicKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EncryptionEntities.Items[i4].PublicKeyID") + } + mg.Spec.ForProvider.EncryptionEntities.Items[i4].PublicKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EncryptionEntities.Items[i4].PublicKeyIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.EncryptionEntities != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.EncryptionEntities.Items); i4++ { + { + m, l, err = apisresolver.GetManagedResource("cloudfront.aws.upbound.io", "v1beta1", "PublicKey", "PublicKeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EncryptionEntities.Items[i4].PublicKeyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.EncryptionEntities.Items[i4].PublicKeyIDRef, + Selector: mg.Spec.InitProvider.EncryptionEntities.Items[i4].PublicKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EncryptionEntities.Items[i4].PublicKeyID") + } + mg.Spec.InitProvider.EncryptionEntities.Items[i4].PublicKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EncryptionEntities.Items[i4].PublicKeyIDRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this MonitoringSubscription. +func (mg *MonitoringSubscription) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cloudfront.aws.upbound.io", "v1beta2", "Distribution", "DistributionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DistributionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DistributionIDRef, + Selector: mg.Spec.ForProvider.DistributionIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DistributionID") + } + mg.Spec.ForProvider.DistributionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DistributionIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cloudfront.aws.upbound.io", "v1beta2", "Distribution", "DistributionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DistributionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DistributionIDRef, + Selector: mg.Spec.InitProvider.DistributionIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DistributionID") + } + mg.Spec.InitProvider.DistributionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DistributionIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this RealtimeLogConfig. +func (mg *RealtimeLogConfig) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.Endpoint != nil { + if mg.Spec.ForProvider.Endpoint.KinesisStreamConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Endpoint.KinesisStreamConfig.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Endpoint.KinesisStreamConfig.RoleArnRef, + Selector: mg.Spec.ForProvider.Endpoint.KinesisStreamConfig.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Endpoint.KinesisStreamConfig.RoleArn") + } + mg.Spec.ForProvider.Endpoint.KinesisStreamConfig.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Endpoint.KinesisStreamConfig.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Endpoint != nil { + if mg.Spec.ForProvider.Endpoint.KinesisStreamConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Endpoint.KinesisStreamConfig.StreamArn), + Extract: common.TerraformID(), + Reference: mg.Spec.ForProvider.Endpoint.KinesisStreamConfig.StreamArnRef, + Selector: mg.Spec.ForProvider.Endpoint.KinesisStreamConfig.StreamArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Endpoint.KinesisStreamConfig.StreamArn") + } + mg.Spec.ForProvider.Endpoint.KinesisStreamConfig.StreamArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Endpoint.KinesisStreamConfig.StreamArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Endpoint != nil { + if mg.Spec.InitProvider.Endpoint.KinesisStreamConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Endpoint.KinesisStreamConfig.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Endpoint.KinesisStreamConfig.RoleArnRef, + Selector: mg.Spec.InitProvider.Endpoint.KinesisStreamConfig.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Endpoint.KinesisStreamConfig.RoleArn") + } + mg.Spec.InitProvider.Endpoint.KinesisStreamConfig.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Endpoint.KinesisStreamConfig.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Endpoint != nil { + if mg.Spec.InitProvider.Endpoint.KinesisStreamConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Endpoint.KinesisStreamConfig.StreamArn), + Extract: common.TerraformID(), + Reference: mg.Spec.InitProvider.Endpoint.KinesisStreamConfig.StreamArnRef, + Selector: mg.Spec.InitProvider.Endpoint.KinesisStreamConfig.StreamArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Endpoint.KinesisStreamConfig.StreamArn") + } + mg.Spec.InitProvider.Endpoint.KinesisStreamConfig.StreamArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Endpoint.KinesisStreamConfig.StreamArnRef = rsp.ResolvedReference + + } + } + + return nil +} diff --git a/apis/cloudfront/v1beta2/zz_groupversion_info.go b/apis/cloudfront/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..fec01f8bed --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=cloudfront.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "cloudfront.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/cloudfront/v1beta2/zz_monitoringsubscription_terraformed.go b/apis/cloudfront/v1beta2/zz_monitoringsubscription_terraformed.go new file mode 100755 index 0000000000..ecbb601cdb --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_monitoringsubscription_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MonitoringSubscription +func (mg *MonitoringSubscription) GetTerraformResourceType() string { + return "aws_cloudfront_monitoring_subscription" +} + +// GetConnectionDetailsMapping for this MonitoringSubscription +func (tr *MonitoringSubscription) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MonitoringSubscription +func (tr *MonitoringSubscription) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MonitoringSubscription +func (tr *MonitoringSubscription) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MonitoringSubscription +func (tr *MonitoringSubscription) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MonitoringSubscription +func (tr *MonitoringSubscription) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MonitoringSubscription +func (tr *MonitoringSubscription) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MonitoringSubscription +func (tr *MonitoringSubscription) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MonitoringSubscription +func (tr *MonitoringSubscription) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MonitoringSubscription using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MonitoringSubscription) LateInitialize(attrs []byte) (bool, error) { + params := &MonitoringSubscriptionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MonitoringSubscription) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cloudfront/v1beta2/zz_monitoringsubscription_types.go b/apis/cloudfront/v1beta2/zz_monitoringsubscription_types.go new file mode 100755 index 0000000000..bea131f865 --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_monitoringsubscription_types.go @@ -0,0 +1,170 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MonitoringSubscriptionInitParameters struct { + + // The ID of the distribution that you are enabling metrics for. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudfront/v1beta2.Distribution + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + DistributionID *string `json:"distributionId,omitempty" tf:"distribution_id,omitempty"` + + // Reference to a Distribution in cloudfront to populate distributionId. + // +kubebuilder:validation:Optional + DistributionIDRef *v1.Reference `json:"distributionIdRef,omitempty" tf:"-"` + + // Selector for a Distribution in cloudfront to populate distributionId. + // +kubebuilder:validation:Optional + DistributionIDSelector *v1.Selector `json:"distributionIdSelector,omitempty" tf:"-"` + + // A monitoring subscription. This structure contains information about whether additional CloudWatch metrics are enabled for a given CloudFront distribution. + MonitoringSubscription *MonitoringSubscriptionMonitoringSubscriptionInitParameters `json:"monitoringSubscription,omitempty" tf:"monitoring_subscription,omitempty"` +} + +type MonitoringSubscriptionMonitoringSubscriptionInitParameters struct { + + // A subscription configuration for additional CloudWatch metrics. See below. + RealtimeMetricsSubscriptionConfig *RealtimeMetricsSubscriptionConfigInitParameters `json:"realtimeMetricsSubscriptionConfig,omitempty" tf:"realtime_metrics_subscription_config,omitempty"` +} + +type MonitoringSubscriptionMonitoringSubscriptionObservation struct { + + // A subscription configuration for additional CloudWatch metrics. See below. + RealtimeMetricsSubscriptionConfig *RealtimeMetricsSubscriptionConfigObservation `json:"realtimeMetricsSubscriptionConfig,omitempty" tf:"realtime_metrics_subscription_config,omitempty"` +} + +type MonitoringSubscriptionMonitoringSubscriptionParameters struct { + + // A subscription configuration for additional CloudWatch metrics. See below. + // +kubebuilder:validation:Optional + RealtimeMetricsSubscriptionConfig *RealtimeMetricsSubscriptionConfigParameters `json:"realtimeMetricsSubscriptionConfig" tf:"realtime_metrics_subscription_config,omitempty"` +} + +type MonitoringSubscriptionObservation struct { + + // The ID of the distribution that you are enabling metrics for. + DistributionID *string `json:"distributionId,omitempty" tf:"distribution_id,omitempty"` + + // The ID of the CloudFront monitoring subscription, which corresponds to the distribution_id. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A monitoring subscription. This structure contains information about whether additional CloudWatch metrics are enabled for a given CloudFront distribution. + MonitoringSubscription *MonitoringSubscriptionMonitoringSubscriptionObservation `json:"monitoringSubscription,omitempty" tf:"monitoring_subscription,omitempty"` +} + +type MonitoringSubscriptionParameters struct { + + // The ID of the distribution that you are enabling metrics for. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudfront/v1beta2.Distribution + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DistributionID *string `json:"distributionId,omitempty" tf:"distribution_id,omitempty"` + + // Reference to a Distribution in cloudfront to populate distributionId. + // +kubebuilder:validation:Optional + DistributionIDRef *v1.Reference `json:"distributionIdRef,omitempty" tf:"-"` + + // Selector for a Distribution in cloudfront to populate distributionId. + // +kubebuilder:validation:Optional + DistributionIDSelector *v1.Selector `json:"distributionIdSelector,omitempty" tf:"-"` + + // A monitoring subscription. This structure contains information about whether additional CloudWatch metrics are enabled for a given CloudFront distribution. + // +kubebuilder:validation:Optional + MonitoringSubscription *MonitoringSubscriptionMonitoringSubscriptionParameters `json:"monitoringSubscription,omitempty" tf:"monitoring_subscription,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type RealtimeMetricsSubscriptionConfigInitParameters struct { + + // A flag that indicates whether additional CloudWatch metrics are enabled for a given CloudFront distribution. Valid values are Enabled and Disabled. See below. + RealtimeMetricsSubscriptionStatus *string `json:"realtimeMetricsSubscriptionStatus,omitempty" tf:"realtime_metrics_subscription_status,omitempty"` +} + +type RealtimeMetricsSubscriptionConfigObservation struct { + + // A flag that indicates whether additional CloudWatch metrics are enabled for a given CloudFront distribution. Valid values are Enabled and Disabled. See below. + RealtimeMetricsSubscriptionStatus *string `json:"realtimeMetricsSubscriptionStatus,omitempty" tf:"realtime_metrics_subscription_status,omitempty"` +} + +type RealtimeMetricsSubscriptionConfigParameters struct { + + // A flag that indicates whether additional CloudWatch metrics are enabled for a given CloudFront distribution. Valid values are Enabled and Disabled. See below. + // +kubebuilder:validation:Optional + RealtimeMetricsSubscriptionStatus *string `json:"realtimeMetricsSubscriptionStatus" tf:"realtime_metrics_subscription_status,omitempty"` +} + +// MonitoringSubscriptionSpec defines the desired state of MonitoringSubscription +type MonitoringSubscriptionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MonitoringSubscriptionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MonitoringSubscriptionInitParameters `json:"initProvider,omitempty"` +} + +// MonitoringSubscriptionStatus defines the observed state of MonitoringSubscription. +type MonitoringSubscriptionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MonitoringSubscriptionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MonitoringSubscription is the Schema for the MonitoringSubscriptions API. Provides a CloudFront monitoring subscription resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type MonitoringSubscription struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.monitoringSubscription) || (has(self.initProvider) && has(self.initProvider.monitoringSubscription))",message="spec.forProvider.monitoringSubscription is a required parameter" + Spec MonitoringSubscriptionSpec `json:"spec"` + Status MonitoringSubscriptionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MonitoringSubscriptionList contains a list of MonitoringSubscriptions +type MonitoringSubscriptionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MonitoringSubscription `json:"items"` +} + +// Repository type metadata. +var ( + MonitoringSubscription_Kind = "MonitoringSubscription" + MonitoringSubscription_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MonitoringSubscription_Kind}.String() + MonitoringSubscription_KindAPIVersion = MonitoringSubscription_Kind + "." + CRDGroupVersion.String() + MonitoringSubscription_GroupVersionKind = CRDGroupVersion.WithKind(MonitoringSubscription_Kind) +) + +func init() { + SchemeBuilder.Register(&MonitoringSubscription{}, &MonitoringSubscriptionList{}) +} diff --git a/apis/cloudfront/v1beta2/zz_originrequestpolicy_terraformed.go b/apis/cloudfront/v1beta2/zz_originrequestpolicy_terraformed.go new file mode 100755 index 0000000000..fda082531a --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_originrequestpolicy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this OriginRequestPolicy +func (mg *OriginRequestPolicy) GetTerraformResourceType() string { + return "aws_cloudfront_origin_request_policy" +} + +// GetConnectionDetailsMapping for this OriginRequestPolicy +func (tr *OriginRequestPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this OriginRequestPolicy +func (tr *OriginRequestPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this OriginRequestPolicy +func (tr *OriginRequestPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this OriginRequestPolicy +func (tr *OriginRequestPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this OriginRequestPolicy +func (tr *OriginRequestPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this OriginRequestPolicy +func (tr *OriginRequestPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this OriginRequestPolicy +func (tr *OriginRequestPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this OriginRequestPolicy +func (tr *OriginRequestPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this OriginRequestPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *OriginRequestPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &OriginRequestPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *OriginRequestPolicy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cloudfront/v1beta2/zz_originrequestpolicy_types.go b/apis/cloudfront/v1beta2/zz_originrequestpolicy_types.go new file mode 100755 index 0000000000..bed7962c59 --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_originrequestpolicy_types.go @@ -0,0 +1,257 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CookiesConfigCookiesInitParameters struct { + + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type CookiesConfigCookiesObservation struct { + + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type CookiesConfigCookiesParameters struct { + + // +kubebuilder:validation:Optional + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type HeadersConfigHeadersInitParameters struct { + + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type HeadersConfigHeadersObservation struct { + + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type HeadersConfigHeadersParameters struct { + + // +kubebuilder:validation:Optional + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type OriginRequestPolicyCookiesConfigInitParameters struct { + CookieBehavior *string `json:"cookieBehavior,omitempty" tf:"cookie_behavior,omitempty"` + + Cookies *CookiesConfigCookiesInitParameters `json:"cookies,omitempty" tf:"cookies,omitempty"` +} + +type OriginRequestPolicyCookiesConfigObservation struct { + CookieBehavior *string `json:"cookieBehavior,omitempty" tf:"cookie_behavior,omitempty"` + + Cookies *CookiesConfigCookiesObservation `json:"cookies,omitempty" tf:"cookies,omitempty"` +} + +type OriginRequestPolicyCookiesConfigParameters struct { + + // +kubebuilder:validation:Optional + CookieBehavior *string `json:"cookieBehavior" tf:"cookie_behavior,omitempty"` + + // +kubebuilder:validation:Optional + Cookies *CookiesConfigCookiesParameters `json:"cookies,omitempty" tf:"cookies,omitempty"` +} + +type OriginRequestPolicyHeadersConfigInitParameters struct { + HeaderBehavior *string `json:"headerBehavior,omitempty" tf:"header_behavior,omitempty"` + + Headers *HeadersConfigHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` +} + +type OriginRequestPolicyHeadersConfigObservation struct { + HeaderBehavior *string `json:"headerBehavior,omitempty" tf:"header_behavior,omitempty"` + + Headers *HeadersConfigHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` +} + +type OriginRequestPolicyHeadersConfigParameters struct { + + // +kubebuilder:validation:Optional + HeaderBehavior *string `json:"headerBehavior,omitempty" tf:"header_behavior,omitempty"` + + // +kubebuilder:validation:Optional + Headers *HeadersConfigHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` +} + +type OriginRequestPolicyInitParameters struct { + + // Comment to describe the origin request policy. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Object that determines whether any cookies in viewer requests (and if so, which cookies) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See Cookies Config for more information. + CookiesConfig *OriginRequestPolicyCookiesConfigInitParameters `json:"cookiesConfig,omitempty" tf:"cookies_config,omitempty"` + + // Object that determines whether any HTTP headers (and if so, which headers) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See Headers Config for more information. + HeadersConfig *OriginRequestPolicyHeadersConfigInitParameters `json:"headersConfig,omitempty" tf:"headers_config,omitempty"` + + // Object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See Query String Config for more information. + QueryStringsConfig *OriginRequestPolicyQueryStringsConfigInitParameters `json:"queryStringsConfig,omitempty" tf:"query_strings_config,omitempty"` +} + +type OriginRequestPolicyObservation struct { + + // Comment to describe the origin request policy. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Object that determines whether any cookies in viewer requests (and if so, which cookies) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See Cookies Config for more information. + CookiesConfig *OriginRequestPolicyCookiesConfigObservation `json:"cookiesConfig,omitempty" tf:"cookies_config,omitempty"` + + // The current version of the origin request policy. + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // Object that determines whether any HTTP headers (and if so, which headers) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See Headers Config for more information. + HeadersConfig *OriginRequestPolicyHeadersConfigObservation `json:"headersConfig,omitempty" tf:"headers_config,omitempty"` + + // The identifier for the origin request policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See Query String Config for more information. + QueryStringsConfig *OriginRequestPolicyQueryStringsConfigObservation `json:"queryStringsConfig,omitempty" tf:"query_strings_config,omitempty"` +} + +type OriginRequestPolicyParameters struct { + + // Comment to describe the origin request policy. + // +kubebuilder:validation:Optional + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Object that determines whether any cookies in viewer requests (and if so, which cookies) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See Cookies Config for more information. + // +kubebuilder:validation:Optional + CookiesConfig *OriginRequestPolicyCookiesConfigParameters `json:"cookiesConfig,omitempty" tf:"cookies_config,omitempty"` + + // Object that determines whether any HTTP headers (and if so, which headers) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See Headers Config for more information. + // +kubebuilder:validation:Optional + HeadersConfig *OriginRequestPolicyHeadersConfigParameters `json:"headersConfig,omitempty" tf:"headers_config,omitempty"` + + // Object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See Query String Config for more information. + // +kubebuilder:validation:Optional + QueryStringsConfig *OriginRequestPolicyQueryStringsConfigParameters `json:"queryStringsConfig,omitempty" tf:"query_strings_config,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type OriginRequestPolicyQueryStringsConfigInitParameters struct { + QueryStringBehavior *string `json:"queryStringBehavior,omitempty" tf:"query_string_behavior,omitempty"` + + QueryStrings *QueryStringsConfigQueryStringsInitParameters `json:"queryStrings,omitempty" tf:"query_strings,omitempty"` +} + +type OriginRequestPolicyQueryStringsConfigObservation struct { + QueryStringBehavior *string `json:"queryStringBehavior,omitempty" tf:"query_string_behavior,omitempty"` + + QueryStrings *QueryStringsConfigQueryStringsObservation `json:"queryStrings,omitempty" tf:"query_strings,omitempty"` +} + +type OriginRequestPolicyQueryStringsConfigParameters struct { + + // +kubebuilder:validation:Optional + QueryStringBehavior *string `json:"queryStringBehavior" tf:"query_string_behavior,omitempty"` + + // +kubebuilder:validation:Optional + QueryStrings *QueryStringsConfigQueryStringsParameters `json:"queryStrings,omitempty" tf:"query_strings,omitempty"` +} + +type QueryStringsConfigQueryStringsInitParameters struct { + + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type QueryStringsConfigQueryStringsObservation struct { + + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type QueryStringsConfigQueryStringsParameters struct { + + // +kubebuilder:validation:Optional + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +// OriginRequestPolicySpec defines the desired state of OriginRequestPolicy +type OriginRequestPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider OriginRequestPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider OriginRequestPolicyInitParameters `json:"initProvider,omitempty"` +} + +// OriginRequestPolicyStatus defines the observed state of OriginRequestPolicy. +type OriginRequestPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider OriginRequestPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// OriginRequestPolicy is the Schema for the OriginRequestPolicys API. Determines the values that CloudFront includes in requests that it sends to the origin. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type OriginRequestPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.cookiesConfig) || (has(self.initProvider) && has(self.initProvider.cookiesConfig))",message="spec.forProvider.cookiesConfig is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.headersConfig) || (has(self.initProvider) && has(self.initProvider.headersConfig))",message="spec.forProvider.headersConfig is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.queryStringsConfig) || (has(self.initProvider) && has(self.initProvider.queryStringsConfig))",message="spec.forProvider.queryStringsConfig is a required parameter" + Spec OriginRequestPolicySpec `json:"spec"` + Status OriginRequestPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OriginRequestPolicyList contains a list of OriginRequestPolicys +type OriginRequestPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OriginRequestPolicy `json:"items"` +} + +// Repository type metadata. +var ( + OriginRequestPolicy_Kind = "OriginRequestPolicy" + OriginRequestPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: OriginRequestPolicy_Kind}.String() + OriginRequestPolicy_KindAPIVersion = OriginRequestPolicy_Kind + "." + CRDGroupVersion.String() + OriginRequestPolicy_GroupVersionKind = CRDGroupVersion.WithKind(OriginRequestPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&OriginRequestPolicy{}, &OriginRequestPolicyList{}) +} diff --git a/apis/cloudfront/v1beta2/zz_realtimelogconfig_terraformed.go b/apis/cloudfront/v1beta2/zz_realtimelogconfig_terraformed.go new file mode 100755 index 0000000000..e454f1a5dc --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_realtimelogconfig_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this RealtimeLogConfig +func (mg *RealtimeLogConfig) GetTerraformResourceType() string { + return "aws_cloudfront_realtime_log_config" +} + +// GetConnectionDetailsMapping for this RealtimeLogConfig +func (tr *RealtimeLogConfig) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this RealtimeLogConfig +func (tr *RealtimeLogConfig) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this RealtimeLogConfig +func (tr *RealtimeLogConfig) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this RealtimeLogConfig +func (tr *RealtimeLogConfig) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this RealtimeLogConfig +func (tr *RealtimeLogConfig) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this RealtimeLogConfig +func (tr *RealtimeLogConfig) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this RealtimeLogConfig +func (tr *RealtimeLogConfig) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this RealtimeLogConfig +func (tr *RealtimeLogConfig) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this RealtimeLogConfig using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *RealtimeLogConfig) LateInitialize(attrs []byte) (bool, error) { + params := &RealtimeLogConfigParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *RealtimeLogConfig) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cloudfront/v1beta2/zz_realtimelogconfig_types.go b/apis/cloudfront/v1beta2/zz_realtimelogconfig_types.go new file mode 100755 index 0000000000..478259fa78 --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_realtimelogconfig_types.go @@ -0,0 +1,242 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EndpointInitParameters struct { + + // The Amazon Kinesis data stream configuration. + KinesisStreamConfig *KinesisStreamConfigInitParameters `json:"kinesisStreamConfig,omitempty" tf:"kinesis_stream_config,omitempty"` + + // The type of data stream where real-time log data is sent. The only valid value is Kinesis. + StreamType *string `json:"streamType,omitempty" tf:"stream_type,omitempty"` +} + +type EndpointObservation struct { + + // The Amazon Kinesis data stream configuration. + KinesisStreamConfig *KinesisStreamConfigObservation `json:"kinesisStreamConfig,omitempty" tf:"kinesis_stream_config,omitempty"` + + // The type of data stream where real-time log data is sent. The only valid value is Kinesis. + StreamType *string `json:"streamType,omitempty" tf:"stream_type,omitempty"` +} + +type EndpointParameters struct { + + // The Amazon Kinesis data stream configuration. + // +kubebuilder:validation:Optional + KinesisStreamConfig *KinesisStreamConfigParameters `json:"kinesisStreamConfig" tf:"kinesis_stream_config,omitempty"` + + // The type of data stream where real-time log data is sent. The only valid value is Kinesis. + // +kubebuilder:validation:Optional + StreamType *string `json:"streamType" tf:"stream_type,omitempty"` +} + +type KinesisStreamConfigInitParameters struct { + + // The ARN of an IAM role that CloudFront can use to send real-time log data to the Kinesis data stream. + // See the AWS documentation for more information. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The ARN of the Kinesis data stream. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() + StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` + + // Reference to a Stream in kinesis to populate streamArn. + // +kubebuilder:validation:Optional + StreamArnRef *v1.Reference `json:"streamArnRef,omitempty" tf:"-"` + + // Selector for a Stream in kinesis to populate streamArn. + // +kubebuilder:validation:Optional + StreamArnSelector *v1.Selector `json:"streamArnSelector,omitempty" tf:"-"` +} + +type KinesisStreamConfigObservation struct { + + // The ARN of an IAM role that CloudFront can use to send real-time log data to the Kinesis data stream. + // See the AWS documentation for more information. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The ARN of the Kinesis data stream. + StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` +} + +type KinesisStreamConfigParameters struct { + + // The ARN of an IAM role that CloudFront can use to send real-time log data to the Kinesis data stream. + // See the AWS documentation for more information. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The ARN of the Kinesis data stream. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() + // +kubebuilder:validation:Optional + StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` + + // Reference to a Stream in kinesis to populate streamArn. + // +kubebuilder:validation:Optional + StreamArnRef *v1.Reference `json:"streamArnRef,omitempty" tf:"-"` + + // Selector for a Stream in kinesis to populate streamArn. + // +kubebuilder:validation:Optional + StreamArnSelector *v1.Selector `json:"streamArnSelector,omitempty" tf:"-"` +} + +type RealtimeLogConfigInitParameters struct { + + // The Amazon Kinesis data streams where real-time log data is sent. + Endpoint *EndpointInitParameters `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The fields that are included in each real-time log record. See the AWS documentation for supported values. + // +listType=set + Fields []*string `json:"fields,omitempty" tf:"fields,omitempty"` + + // The unique name to identify this real-time log configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The sampling rate for this real-time log configuration. The sampling rate determines the percentage of viewer requests that are represented in the real-time log data. An integer between 1 and 100, inclusive. + SamplingRate *float64 `json:"samplingRate,omitempty" tf:"sampling_rate,omitempty"` +} + +type RealtimeLogConfigObservation struct { + + // The ARN (Amazon Resource Name) of the CloudFront real-time log configuration. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The Amazon Kinesis data streams where real-time log data is sent. + Endpoint *EndpointObservation `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The fields that are included in each real-time log record. See the AWS documentation for supported values. + // +listType=set + Fields []*string `json:"fields,omitempty" tf:"fields,omitempty"` + + // The ID of the CloudFront real-time log configuration. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The unique name to identify this real-time log configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The sampling rate for this real-time log configuration. The sampling rate determines the percentage of viewer requests that are represented in the real-time log data. An integer between 1 and 100, inclusive. + SamplingRate *float64 `json:"samplingRate,omitempty" tf:"sampling_rate,omitempty"` +} + +type RealtimeLogConfigParameters struct { + + // The Amazon Kinesis data streams where real-time log data is sent. + // +kubebuilder:validation:Optional + Endpoint *EndpointParameters `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The fields that are included in each real-time log record. See the AWS documentation for supported values. + // +kubebuilder:validation:Optional + // +listType=set + Fields []*string `json:"fields,omitempty" tf:"fields,omitempty"` + + // The unique name to identify this real-time log configuration. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The sampling rate for this real-time log configuration. The sampling rate determines the percentage of viewer requests that are represented in the real-time log data. An integer between 1 and 100, inclusive. + // +kubebuilder:validation:Optional + SamplingRate *float64 `json:"samplingRate,omitempty" tf:"sampling_rate,omitempty"` +} + +// RealtimeLogConfigSpec defines the desired state of RealtimeLogConfig +type RealtimeLogConfigSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RealtimeLogConfigParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RealtimeLogConfigInitParameters `json:"initProvider,omitempty"` +} + +// RealtimeLogConfigStatus defines the observed state of RealtimeLogConfig. +type RealtimeLogConfigStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RealtimeLogConfigObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// RealtimeLogConfig is the Schema for the RealtimeLogConfigs API. Provides a CloudFront real-time log configuration resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type RealtimeLogConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.endpoint) || (has(self.initProvider) && has(self.initProvider.endpoint))",message="spec.forProvider.endpoint is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.fields) || (has(self.initProvider) && has(self.initProvider.fields))",message="spec.forProvider.fields is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.samplingRate) || (has(self.initProvider) && has(self.initProvider.samplingRate))",message="spec.forProvider.samplingRate is a required parameter" + Spec RealtimeLogConfigSpec `json:"spec"` + Status RealtimeLogConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RealtimeLogConfigList contains a list of RealtimeLogConfigs +type RealtimeLogConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RealtimeLogConfig `json:"items"` +} + +// Repository type metadata. +var ( + RealtimeLogConfig_Kind = "RealtimeLogConfig" + RealtimeLogConfig_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: RealtimeLogConfig_Kind}.String() + RealtimeLogConfig_KindAPIVersion = RealtimeLogConfig_Kind + "." + CRDGroupVersion.String() + RealtimeLogConfig_GroupVersionKind = CRDGroupVersion.WithKind(RealtimeLogConfig_Kind) +) + +func init() { + SchemeBuilder.Register(&RealtimeLogConfig{}, &RealtimeLogConfigList{}) +} diff --git a/apis/cloudfront/v1beta2/zz_responseheaderspolicy_terraformed.go b/apis/cloudfront/v1beta2/zz_responseheaderspolicy_terraformed.go new file mode 100755 index 0000000000..0b0e0dadd1 --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_responseheaderspolicy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ResponseHeadersPolicy +func (mg *ResponseHeadersPolicy) GetTerraformResourceType() string { + return "aws_cloudfront_response_headers_policy" +} + +// GetConnectionDetailsMapping for this ResponseHeadersPolicy +func (tr *ResponseHeadersPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ResponseHeadersPolicy +func (tr *ResponseHeadersPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ResponseHeadersPolicy +func (tr *ResponseHeadersPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ResponseHeadersPolicy +func (tr *ResponseHeadersPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ResponseHeadersPolicy +func (tr *ResponseHeadersPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ResponseHeadersPolicy +func (tr *ResponseHeadersPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ResponseHeadersPolicy +func (tr *ResponseHeadersPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ResponseHeadersPolicy +func (tr *ResponseHeadersPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ResponseHeadersPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ResponseHeadersPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &ResponseHeadersPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ResponseHeadersPolicy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cloudfront/v1beta2/zz_responseheaderspolicy_types.go b/apis/cloudfront/v1beta2/zz_responseheaderspolicy_types.go new file mode 100755 index 0000000000..657c70206f --- /dev/null +++ b/apis/cloudfront/v1beta2/zz_responseheaderspolicy_types.go @@ -0,0 +1,715 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessControlAllowHeadersInitParameters struct { + + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type AccessControlAllowHeadersObservation struct { + + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type AccessControlAllowHeadersParameters struct { + + // +kubebuilder:validation:Optional + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type AccessControlAllowMethodsInitParameters struct { + + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type AccessControlAllowMethodsObservation struct { + + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type AccessControlAllowMethodsParameters struct { + + // +kubebuilder:validation:Optional + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type AccessControlAllowOriginsInitParameters struct { + + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type AccessControlAllowOriginsObservation struct { + + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type AccessControlAllowOriginsParameters struct { + + // +kubebuilder:validation:Optional + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type AccessControlExposeHeadersInitParameters struct { + + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type AccessControlExposeHeadersObservation struct { + + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type AccessControlExposeHeadersParameters struct { + + // +kubebuilder:validation:Optional + // +listType=set + Items []*string `json:"items,omitempty" tf:"items,omitempty"` +} + +type ContentSecurityPolicyInitParameters struct { + + // The policy directives and their values that CloudFront includes as values for the Content-Security-Policy HTTP response header. See Content Security Policy for more information. + ContentSecurityPolicy *string `json:"contentSecurityPolicy,omitempty" tf:"content_security_policy,omitempty"` + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + Override *bool `json:"override,omitempty" tf:"override,omitempty"` +} + +type ContentSecurityPolicyObservation struct { + + // The policy directives and their values that CloudFront includes as values for the Content-Security-Policy HTTP response header. See Content Security Policy for more information. + ContentSecurityPolicy *string `json:"contentSecurityPolicy,omitempty" tf:"content_security_policy,omitempty"` + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + Override *bool `json:"override,omitempty" tf:"override,omitempty"` +} + +type ContentSecurityPolicyParameters struct { + + // The policy directives and their values that CloudFront includes as values for the Content-Security-Policy HTTP response header. See Content Security Policy for more information. + // +kubebuilder:validation:Optional + ContentSecurityPolicy *string `json:"contentSecurityPolicy" tf:"content_security_policy,omitempty"` + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + // +kubebuilder:validation:Optional + Override *bool `json:"override" tf:"override,omitempty"` +} + +type ContentTypeOptionsInitParameters struct { + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + Override *bool `json:"override,omitempty" tf:"override,omitempty"` +} + +type ContentTypeOptionsObservation struct { + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + Override *bool `json:"override,omitempty" tf:"override,omitempty"` +} + +type ContentTypeOptionsParameters struct { + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + // +kubebuilder:validation:Optional + Override *bool `json:"override" tf:"override,omitempty"` +} + +type CorsConfigInitParameters struct { + + // A Boolean value that CloudFront uses as the value for the Access-Control-Allow-Credentials HTTP response header. + AccessControlAllowCredentials *bool `json:"accessControlAllowCredentials,omitempty" tf:"access_control_allow_credentials,omitempty"` + + // Object that contains an attribute items that contains a list of HTTP header names that CloudFront includes as values for the Access-Control-Allow-Headers HTTP response header. + AccessControlAllowHeaders *AccessControlAllowHeadersInitParameters `json:"accessControlAllowHeaders,omitempty" tf:"access_control_allow_headers,omitempty"` + + // Object that contains an attribute items that contains a list of HTTP methods that CloudFront includes as values for the Access-Control-Allow-Methods HTTP response header. Valid values: GET | POST | OPTIONS | PUT | DELETE | HEAD | ALL + AccessControlAllowMethods *AccessControlAllowMethodsInitParameters `json:"accessControlAllowMethods,omitempty" tf:"access_control_allow_methods,omitempty"` + + // Object that contains an attribute items that contains a list of origins that CloudFront can use as the value for the Access-Control-Allow-Origin HTTP response header. + AccessControlAllowOrigins *AccessControlAllowOriginsInitParameters `json:"accessControlAllowOrigins,omitempty" tf:"access_control_allow_origins,omitempty"` + + // Object that contains an attribute items that contains a list of HTTP headers that CloudFront includes as values for the Access-Control-Expose-Headers HTTP response header. + AccessControlExposeHeaders *AccessControlExposeHeadersInitParameters `json:"accessControlExposeHeaders,omitempty" tf:"access_control_expose_headers,omitempty"` + + // A number that CloudFront uses as the value for the Access-Control-Max-Age HTTP response header. + AccessControlMaxAgeSec *float64 `json:"accessControlMaxAgeSec,omitempty" tf:"access_control_max_age_sec,omitempty"` + + // A Boolean value that determines how CloudFront behaves for the HTTP response header. + OriginOverride *bool `json:"originOverride,omitempty" tf:"origin_override,omitempty"` +} + +type CorsConfigObservation struct { + + // A Boolean value that CloudFront uses as the value for the Access-Control-Allow-Credentials HTTP response header. + AccessControlAllowCredentials *bool `json:"accessControlAllowCredentials,omitempty" tf:"access_control_allow_credentials,omitempty"` + + // Object that contains an attribute items that contains a list of HTTP header names that CloudFront includes as values for the Access-Control-Allow-Headers HTTP response header. + AccessControlAllowHeaders *AccessControlAllowHeadersObservation `json:"accessControlAllowHeaders,omitempty" tf:"access_control_allow_headers,omitempty"` + + // Object that contains an attribute items that contains a list of HTTP methods that CloudFront includes as values for the Access-Control-Allow-Methods HTTP response header. Valid values: GET | POST | OPTIONS | PUT | DELETE | HEAD | ALL + AccessControlAllowMethods *AccessControlAllowMethodsObservation `json:"accessControlAllowMethods,omitempty" tf:"access_control_allow_methods,omitempty"` + + // Object that contains an attribute items that contains a list of origins that CloudFront can use as the value for the Access-Control-Allow-Origin HTTP response header. + AccessControlAllowOrigins *AccessControlAllowOriginsObservation `json:"accessControlAllowOrigins,omitempty" tf:"access_control_allow_origins,omitempty"` + + // Object that contains an attribute items that contains a list of HTTP headers that CloudFront includes as values for the Access-Control-Expose-Headers HTTP response header. + AccessControlExposeHeaders *AccessControlExposeHeadersObservation `json:"accessControlExposeHeaders,omitempty" tf:"access_control_expose_headers,omitempty"` + + // A number that CloudFront uses as the value for the Access-Control-Max-Age HTTP response header. + AccessControlMaxAgeSec *float64 `json:"accessControlMaxAgeSec,omitempty" tf:"access_control_max_age_sec,omitempty"` + + // A Boolean value that determines how CloudFront behaves for the HTTP response header. + OriginOverride *bool `json:"originOverride,omitempty" tf:"origin_override,omitempty"` +} + +type CorsConfigParameters struct { + + // A Boolean value that CloudFront uses as the value for the Access-Control-Allow-Credentials HTTP response header. + // +kubebuilder:validation:Optional + AccessControlAllowCredentials *bool `json:"accessControlAllowCredentials" tf:"access_control_allow_credentials,omitempty"` + + // Object that contains an attribute items that contains a list of HTTP header names that CloudFront includes as values for the Access-Control-Allow-Headers HTTP response header. + // +kubebuilder:validation:Optional + AccessControlAllowHeaders *AccessControlAllowHeadersParameters `json:"accessControlAllowHeaders" tf:"access_control_allow_headers,omitempty"` + + // Object that contains an attribute items that contains a list of HTTP methods that CloudFront includes as values for the Access-Control-Allow-Methods HTTP response header. Valid values: GET | POST | OPTIONS | PUT | DELETE | HEAD | ALL + // +kubebuilder:validation:Optional + AccessControlAllowMethods *AccessControlAllowMethodsParameters `json:"accessControlAllowMethods" tf:"access_control_allow_methods,omitempty"` + + // Object that contains an attribute items that contains a list of origins that CloudFront can use as the value for the Access-Control-Allow-Origin HTTP response header. + // +kubebuilder:validation:Optional + AccessControlAllowOrigins *AccessControlAllowOriginsParameters `json:"accessControlAllowOrigins" tf:"access_control_allow_origins,omitempty"` + + // Object that contains an attribute items that contains a list of HTTP headers that CloudFront includes as values for the Access-Control-Expose-Headers HTTP response header. + // +kubebuilder:validation:Optional + AccessControlExposeHeaders *AccessControlExposeHeadersParameters `json:"accessControlExposeHeaders,omitempty" tf:"access_control_expose_headers,omitempty"` + + // A number that CloudFront uses as the value for the Access-Control-Max-Age HTTP response header. + // +kubebuilder:validation:Optional + AccessControlMaxAgeSec *float64 `json:"accessControlMaxAgeSec,omitempty" tf:"access_control_max_age_sec,omitempty"` + + // A Boolean value that determines how CloudFront behaves for the HTTP response header. + // +kubebuilder:validation:Optional + OriginOverride *bool `json:"originOverride" tf:"origin_override,omitempty"` +} + +type CustomHeadersConfigInitParameters struct { + Items []CustomHeadersConfigItemsInitParameters `json:"items,omitempty" tf:"items,omitempty"` +} + +type CustomHeadersConfigItemsInitParameters struct { + + // The HTTP response header name. + Header *string `json:"header,omitempty" tf:"header,omitempty"` + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + Override *bool `json:"override,omitempty" tf:"override,omitempty"` + + // The value for the HTTP response header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type CustomHeadersConfigItemsObservation struct { + + // The HTTP response header name. + Header *string `json:"header,omitempty" tf:"header,omitempty"` + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + Override *bool `json:"override,omitempty" tf:"override,omitempty"` + + // The value for the HTTP response header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type CustomHeadersConfigItemsParameters struct { + + // The HTTP response header name. + // +kubebuilder:validation:Optional + Header *string `json:"header" tf:"header,omitempty"` + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + // +kubebuilder:validation:Optional + Override *bool `json:"override" tf:"override,omitempty"` + + // The value for the HTTP response header. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type CustomHeadersConfigObservation struct { + Items []CustomHeadersConfigItemsObservation `json:"items,omitempty" tf:"items,omitempty"` +} + +type CustomHeadersConfigParameters struct { + + // +kubebuilder:validation:Optional + Items []CustomHeadersConfigItemsParameters `json:"items,omitempty" tf:"items,omitempty"` +} + +type FrameOptionsInitParameters struct { + + // The value of the X-Frame-Options HTTP response header. Valid values: DENY | SAMEORIGIN + FrameOption *string `json:"frameOption,omitempty" tf:"frame_option,omitempty"` + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + Override *bool `json:"override,omitempty" tf:"override,omitempty"` +} + +type FrameOptionsObservation struct { + + // The value of the X-Frame-Options HTTP response header. Valid values: DENY | SAMEORIGIN + FrameOption *string `json:"frameOption,omitempty" tf:"frame_option,omitempty"` + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + Override *bool `json:"override,omitempty" tf:"override,omitempty"` +} + +type FrameOptionsParameters struct { + + // The value of the X-Frame-Options HTTP response header. Valid values: DENY | SAMEORIGIN + // +kubebuilder:validation:Optional + FrameOption *string `json:"frameOption" tf:"frame_option,omitempty"` + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + // +kubebuilder:validation:Optional + Override *bool `json:"override" tf:"override,omitempty"` +} + +type ReferrerPolicyInitParameters struct { + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + Override *bool `json:"override,omitempty" tf:"override,omitempty"` + + // Determines whether CloudFront includes the Referrer-Policy HTTP response header and the header’s value. See Referrer Policy for more information. + ReferrerPolicy *string `json:"referrerPolicy,omitempty" tf:"referrer_policy,omitempty"` +} + +type ReferrerPolicyObservation struct { + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + Override *bool `json:"override,omitempty" tf:"override,omitempty"` + + // Determines whether CloudFront includes the Referrer-Policy HTTP response header and the header’s value. See Referrer Policy for more information. + ReferrerPolicy *string `json:"referrerPolicy,omitempty" tf:"referrer_policy,omitempty"` +} + +type ReferrerPolicyParameters struct { + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + // +kubebuilder:validation:Optional + Override *bool `json:"override" tf:"override,omitempty"` + + // Determines whether CloudFront includes the Referrer-Policy HTTP response header and the header’s value. See Referrer Policy for more information. + // +kubebuilder:validation:Optional + ReferrerPolicy *string `json:"referrerPolicy" tf:"referrer_policy,omitempty"` +} + +type RemoveHeadersConfigInitParameters struct { + Items []RemoveHeadersConfigItemsInitParameters `json:"items,omitempty" tf:"items,omitempty"` +} + +type RemoveHeadersConfigItemsInitParameters struct { + + // The HTTP response header name. + Header *string `json:"header,omitempty" tf:"header,omitempty"` +} + +type RemoveHeadersConfigItemsObservation struct { + + // The HTTP response header name. + Header *string `json:"header,omitempty" tf:"header,omitempty"` +} + +type RemoveHeadersConfigItemsParameters struct { + + // The HTTP response header name. + // +kubebuilder:validation:Optional + Header *string `json:"header" tf:"header,omitempty"` +} + +type RemoveHeadersConfigObservation struct { + Items []RemoveHeadersConfigItemsObservation `json:"items,omitempty" tf:"items,omitempty"` +} + +type RemoveHeadersConfigParameters struct { + + // +kubebuilder:validation:Optional + Items []RemoveHeadersConfigItemsParameters `json:"items,omitempty" tf:"items,omitempty"` +} + +type ResponseHeadersPolicyInitParameters struct { + + // A comment to describe the response headers policy. The comment cannot be longer than 128 characters. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // A configuration for a set of HTTP response headers that are used for Cross-Origin Resource Sharing (CORS). See Cors Config for more information. + CorsConfig *CorsConfigInitParameters `json:"corsConfig,omitempty" tf:"cors_config,omitempty"` + + // Object that contains an attribute items that contains a list of custom headers. See Custom Header for more information. + CustomHeadersConfig *CustomHeadersConfigInitParameters `json:"customHeadersConfig,omitempty" tf:"custom_headers_config,omitempty"` + + // The current version of the response headers policy. + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // A unique name to identify the response headers policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A configuration for a set of HTTP headers to remove from the HTTP response. Object that contains an attribute items that contains a list of headers. See Remove Header for more information. + RemoveHeadersConfig *RemoveHeadersConfigInitParameters `json:"removeHeadersConfig,omitempty" tf:"remove_headers_config,omitempty"` + + // A configuration for a set of security-related HTTP response headers. See Security Headers Config for more information. + SecurityHeadersConfig *SecurityHeadersConfigInitParameters `json:"securityHeadersConfig,omitempty" tf:"security_headers_config,omitempty"` + + // A configuration for enabling the Server-Timing header in HTTP responses sent from CloudFront. See Server Timing Headers Config for more information. + ServerTimingHeadersConfig *ServerTimingHeadersConfigInitParameters `json:"serverTimingHeadersConfig,omitempty" tf:"server_timing_headers_config,omitempty"` +} + +type ResponseHeadersPolicyObservation struct { + + // A comment to describe the response headers policy. The comment cannot be longer than 128 characters. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // A configuration for a set of HTTP response headers that are used for Cross-Origin Resource Sharing (CORS). See Cors Config for more information. + CorsConfig *CorsConfigObservation `json:"corsConfig,omitempty" tf:"cors_config,omitempty"` + + // Object that contains an attribute items that contains a list of custom headers. See Custom Header for more information. + CustomHeadersConfig *CustomHeadersConfigObservation `json:"customHeadersConfig,omitempty" tf:"custom_headers_config,omitempty"` + + // The current version of the response headers policy. + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // The identifier for the response headers policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A unique name to identify the response headers policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A configuration for a set of HTTP headers to remove from the HTTP response. Object that contains an attribute items that contains a list of headers. See Remove Header for more information. + RemoveHeadersConfig *RemoveHeadersConfigObservation `json:"removeHeadersConfig,omitempty" tf:"remove_headers_config,omitempty"` + + // A configuration for a set of security-related HTTP response headers. See Security Headers Config for more information. + SecurityHeadersConfig *SecurityHeadersConfigObservation `json:"securityHeadersConfig,omitempty" tf:"security_headers_config,omitempty"` + + // A configuration for enabling the Server-Timing header in HTTP responses sent from CloudFront. See Server Timing Headers Config for more information. + ServerTimingHeadersConfig *ServerTimingHeadersConfigObservation `json:"serverTimingHeadersConfig,omitempty" tf:"server_timing_headers_config,omitempty"` +} + +type ResponseHeadersPolicyParameters struct { + + // A comment to describe the response headers policy. The comment cannot be longer than 128 characters. + // +kubebuilder:validation:Optional + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // A configuration for a set of HTTP response headers that are used for Cross-Origin Resource Sharing (CORS). See Cors Config for more information. + // +kubebuilder:validation:Optional + CorsConfig *CorsConfigParameters `json:"corsConfig,omitempty" tf:"cors_config,omitempty"` + + // Object that contains an attribute items that contains a list of custom headers. See Custom Header for more information. + // +kubebuilder:validation:Optional + CustomHeadersConfig *CustomHeadersConfigParameters `json:"customHeadersConfig,omitempty" tf:"custom_headers_config,omitempty"` + + // The current version of the response headers policy. + // +kubebuilder:validation:Optional + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // A unique name to identify the response headers policy. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // A configuration for a set of HTTP headers to remove from the HTTP response. Object that contains an attribute items that contains a list of headers. See Remove Header for more information. + // +kubebuilder:validation:Optional + RemoveHeadersConfig *RemoveHeadersConfigParameters `json:"removeHeadersConfig,omitempty" tf:"remove_headers_config,omitempty"` + + // A configuration for a set of security-related HTTP response headers. See Security Headers Config for more information. + // +kubebuilder:validation:Optional + SecurityHeadersConfig *SecurityHeadersConfigParameters `json:"securityHeadersConfig,omitempty" tf:"security_headers_config,omitempty"` + + // A configuration for enabling the Server-Timing header in HTTP responses sent from CloudFront. See Server Timing Headers Config for more information. + // +kubebuilder:validation:Optional + ServerTimingHeadersConfig *ServerTimingHeadersConfigParameters `json:"serverTimingHeadersConfig,omitempty" tf:"server_timing_headers_config,omitempty"` +} + +type SecurityHeadersConfigInitParameters struct { + + // The policy directives and their values that CloudFront includes as values for the Content-Security-Policy HTTP response header. See Content Security Policy for more information. + ContentSecurityPolicy *ContentSecurityPolicyInitParameters `json:"contentSecurityPolicy,omitempty" tf:"content_security_policy,omitempty"` + + // Determines whether CloudFront includes the X-Content-Type-Options HTTP response header with its value set to nosniff. See Content Type Options for more information. + ContentTypeOptions *ContentTypeOptionsInitParameters `json:"contentTypeOptions,omitempty" tf:"content_type_options,omitempty"` + + // Determines whether CloudFront includes the X-Frame-Options HTTP response header and the header’s value. See Frame Options for more information. + FrameOptions *FrameOptionsInitParameters `json:"frameOptions,omitempty" tf:"frame_options,omitempty"` + + // Determines whether CloudFront includes the Referrer-Policy HTTP response header and the header’s value. See Referrer Policy for more information. + ReferrerPolicy *ReferrerPolicyInitParameters `json:"referrerPolicy,omitempty" tf:"referrer_policy,omitempty"` + + // Determines whether CloudFront includes the Strict-Transport-Security HTTP response header and the header’s value. See Strict Transport Security for more information. + StrictTransportSecurity *StrictTransportSecurityInitParameters `json:"strictTransportSecurity,omitempty" tf:"strict_transport_security,omitempty"` + + // Determine whether CloudFront includes the X-XSS-Protection HTTP response header and the header’s value. See XSS Protection for more information. + XSSProtection *XSSProtectionInitParameters `json:"xssProtection,omitempty" tf:"xss_protection,omitempty"` +} + +type SecurityHeadersConfigObservation struct { + + // The policy directives and their values that CloudFront includes as values for the Content-Security-Policy HTTP response header. See Content Security Policy for more information. + ContentSecurityPolicy *ContentSecurityPolicyObservation `json:"contentSecurityPolicy,omitempty" tf:"content_security_policy,omitempty"` + + // Determines whether CloudFront includes the X-Content-Type-Options HTTP response header with its value set to nosniff. See Content Type Options for more information. + ContentTypeOptions *ContentTypeOptionsObservation `json:"contentTypeOptions,omitempty" tf:"content_type_options,omitempty"` + + // Determines whether CloudFront includes the X-Frame-Options HTTP response header and the header’s value. See Frame Options for more information. + FrameOptions *FrameOptionsObservation `json:"frameOptions,omitempty" tf:"frame_options,omitempty"` + + // Determines whether CloudFront includes the Referrer-Policy HTTP response header and the header’s value. See Referrer Policy for more information. + ReferrerPolicy *ReferrerPolicyObservation `json:"referrerPolicy,omitempty" tf:"referrer_policy,omitempty"` + + // Determines whether CloudFront includes the Strict-Transport-Security HTTP response header and the header’s value. See Strict Transport Security for more information. + StrictTransportSecurity *StrictTransportSecurityObservation `json:"strictTransportSecurity,omitempty" tf:"strict_transport_security,omitempty"` + + // Determine whether CloudFront includes the X-XSS-Protection HTTP response header and the header’s value. See XSS Protection for more information. + XSSProtection *XSSProtectionObservation `json:"xssProtection,omitempty" tf:"xss_protection,omitempty"` +} + +type SecurityHeadersConfigParameters struct { + + // The policy directives and their values that CloudFront includes as values for the Content-Security-Policy HTTP response header. See Content Security Policy for more information. + // +kubebuilder:validation:Optional + ContentSecurityPolicy *ContentSecurityPolicyParameters `json:"contentSecurityPolicy,omitempty" tf:"content_security_policy,omitempty"` + + // Determines whether CloudFront includes the X-Content-Type-Options HTTP response header with its value set to nosniff. See Content Type Options for more information. + // +kubebuilder:validation:Optional + ContentTypeOptions *ContentTypeOptionsParameters `json:"contentTypeOptions,omitempty" tf:"content_type_options,omitempty"` + + // Determines whether CloudFront includes the X-Frame-Options HTTP response header and the header’s value. See Frame Options for more information. + // +kubebuilder:validation:Optional + FrameOptions *FrameOptionsParameters `json:"frameOptions,omitempty" tf:"frame_options,omitempty"` + + // Determines whether CloudFront includes the Referrer-Policy HTTP response header and the header’s value. See Referrer Policy for more information. + // +kubebuilder:validation:Optional + ReferrerPolicy *ReferrerPolicyParameters `json:"referrerPolicy,omitempty" tf:"referrer_policy,omitempty"` + + // Determines whether CloudFront includes the Strict-Transport-Security HTTP response header and the header’s value. See Strict Transport Security for more information. + // +kubebuilder:validation:Optional + StrictTransportSecurity *StrictTransportSecurityParameters `json:"strictTransportSecurity,omitempty" tf:"strict_transport_security,omitempty"` + + // Determine whether CloudFront includes the X-XSS-Protection HTTP response header and the header’s value. See XSS Protection for more information. + // +kubebuilder:validation:Optional + XSSProtection *XSSProtectionParameters `json:"xssProtection,omitempty" tf:"xss_protection,omitempty"` +} + +type ServerTimingHeadersConfigInitParameters struct { + + // A Whether CloudFront adds the Server-Timing header to HTTP responses that it sends in response to requests that match a cache behavior that's associated with this response headers policy. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A number 0–100 (inclusive) that specifies the percentage of responses that you want CloudFront to add the Server-Timing header to. Valid range: Minimum value of 0.0. Maximum value of 100.0. + SamplingRate *float64 `json:"samplingRate,omitempty" tf:"sampling_rate,omitempty"` +} + +type ServerTimingHeadersConfigObservation struct { + + // A Whether CloudFront adds the Server-Timing header to HTTP responses that it sends in response to requests that match a cache behavior that's associated with this response headers policy. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A number 0–100 (inclusive) that specifies the percentage of responses that you want CloudFront to add the Server-Timing header to. Valid range: Minimum value of 0.0. Maximum value of 100.0. + SamplingRate *float64 `json:"samplingRate,omitempty" tf:"sampling_rate,omitempty"` +} + +type ServerTimingHeadersConfigParameters struct { + + // A Whether CloudFront adds the Server-Timing header to HTTP responses that it sends in response to requests that match a cache behavior that's associated with this response headers policy. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // A number 0–100 (inclusive) that specifies the percentage of responses that you want CloudFront to add the Server-Timing header to. Valid range: Minimum value of 0.0. Maximum value of 100.0. + // +kubebuilder:validation:Optional + SamplingRate *float64 `json:"samplingRate" tf:"sampling_rate,omitempty"` +} + +type StrictTransportSecurityInitParameters struct { + + // A number that CloudFront uses as the value for the Access-Control-Max-Age HTTP response header. + AccessControlMaxAgeSec *float64 `json:"accessControlMaxAgeSec,omitempty" tf:"access_control_max_age_sec,omitempty"` + + // Whether CloudFront includes the includeSubDomains directive in the Strict-Transport-Security HTTP response header. + IncludeSubdomains *bool `json:"includeSubdomains,omitempty" tf:"include_subdomains,omitempty"` + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + Override *bool `json:"override,omitempty" tf:"override,omitempty"` + + // Whether CloudFront includes the preload directive in the Strict-Transport-Security HTTP response header. + Preload *bool `json:"preload,omitempty" tf:"preload,omitempty"` +} + +type StrictTransportSecurityObservation struct { + + // A number that CloudFront uses as the value for the Access-Control-Max-Age HTTP response header. + AccessControlMaxAgeSec *float64 `json:"accessControlMaxAgeSec,omitempty" tf:"access_control_max_age_sec,omitempty"` + + // Whether CloudFront includes the includeSubDomains directive in the Strict-Transport-Security HTTP response header. + IncludeSubdomains *bool `json:"includeSubdomains,omitempty" tf:"include_subdomains,omitempty"` + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + Override *bool `json:"override,omitempty" tf:"override,omitempty"` + + // Whether CloudFront includes the preload directive in the Strict-Transport-Security HTTP response header. + Preload *bool `json:"preload,omitempty" tf:"preload,omitempty"` +} + +type StrictTransportSecurityParameters struct { + + // A number that CloudFront uses as the value for the Access-Control-Max-Age HTTP response header. + // +kubebuilder:validation:Optional + AccessControlMaxAgeSec *float64 `json:"accessControlMaxAgeSec" tf:"access_control_max_age_sec,omitempty"` + + // Whether CloudFront includes the includeSubDomains directive in the Strict-Transport-Security HTTP response header. + // +kubebuilder:validation:Optional + IncludeSubdomains *bool `json:"includeSubdomains,omitempty" tf:"include_subdomains,omitempty"` + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + // +kubebuilder:validation:Optional + Override *bool `json:"override" tf:"override,omitempty"` + + // Whether CloudFront includes the preload directive in the Strict-Transport-Security HTTP response header. + // +kubebuilder:validation:Optional + Preload *bool `json:"preload,omitempty" tf:"preload,omitempty"` +} + +type XSSProtectionInitParameters struct { + + // Whether CloudFront includes the mode=block directive in the X-XSS-Protection header. + ModeBlock *bool `json:"modeBlock,omitempty" tf:"mode_block,omitempty"` + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + Override *bool `json:"override,omitempty" tf:"override,omitempty"` + + // A Boolean value that determines the value of the X-XSS-Protection HTTP response header. When this setting is true, the value of the X-XSS-Protection header is 1. When this setting is false, the value of the X-XSS-Protection header is 0. + Protection *bool `json:"protection,omitempty" tf:"protection,omitempty"` + + // A reporting URI, which CloudFront uses as the value of the report directive in the X-XSS-Protection header. You cannot specify a report_uri when mode_block is true. + ReportURI *string `json:"reportUri,omitempty" tf:"report_uri,omitempty"` +} + +type XSSProtectionObservation struct { + + // Whether CloudFront includes the mode=block directive in the X-XSS-Protection header. + ModeBlock *bool `json:"modeBlock,omitempty" tf:"mode_block,omitempty"` + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + Override *bool `json:"override,omitempty" tf:"override,omitempty"` + + // A Boolean value that determines the value of the X-XSS-Protection HTTP response header. When this setting is true, the value of the X-XSS-Protection header is 1. When this setting is false, the value of the X-XSS-Protection header is 0. + Protection *bool `json:"protection,omitempty" tf:"protection,omitempty"` + + // A reporting URI, which CloudFront uses as the value of the report directive in the X-XSS-Protection header. You cannot specify a report_uri when mode_block is true. + ReportURI *string `json:"reportUri,omitempty" tf:"report_uri,omitempty"` +} + +type XSSProtectionParameters struct { + + // Whether CloudFront includes the mode=block directive in the X-XSS-Protection header. + // +kubebuilder:validation:Optional + ModeBlock *bool `json:"modeBlock,omitempty" tf:"mode_block,omitempty"` + + // Whether CloudFront overrides a response header with the same name received from the origin with the header specifies here. + // +kubebuilder:validation:Optional + Override *bool `json:"override" tf:"override,omitempty"` + + // A Boolean value that determines the value of the X-XSS-Protection HTTP response header. When this setting is true, the value of the X-XSS-Protection header is 1. When this setting is false, the value of the X-XSS-Protection header is 0. + // +kubebuilder:validation:Optional + Protection *bool `json:"protection" tf:"protection,omitempty"` + + // A reporting URI, which CloudFront uses as the value of the report directive in the X-XSS-Protection header. You cannot specify a report_uri when mode_block is true. + // +kubebuilder:validation:Optional + ReportURI *string `json:"reportUri,omitempty" tf:"report_uri,omitempty"` +} + +// ResponseHeadersPolicySpec defines the desired state of ResponseHeadersPolicy +type ResponseHeadersPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ResponseHeadersPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ResponseHeadersPolicyInitParameters `json:"initProvider,omitempty"` +} + +// ResponseHeadersPolicyStatus defines the observed state of ResponseHeadersPolicy. +type ResponseHeadersPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ResponseHeadersPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ResponseHeadersPolicy is the Schema for the ResponseHeadersPolicys API. Provides a CloudFront response headers policy resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ResponseHeadersPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ResponseHeadersPolicySpec `json:"spec"` + Status ResponseHeadersPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ResponseHeadersPolicyList contains a list of ResponseHeadersPolicys +type ResponseHeadersPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ResponseHeadersPolicy `json:"items"` +} + +// Repository type metadata. +var ( + ResponseHeadersPolicy_Kind = "ResponseHeadersPolicy" + ResponseHeadersPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ResponseHeadersPolicy_Kind}.String() + ResponseHeadersPolicy_KindAPIVersion = ResponseHeadersPolicy_Kind + "." + CRDGroupVersion.String() + ResponseHeadersPolicy_GroupVersionKind = CRDGroupVersion.WithKind(ResponseHeadersPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&ResponseHeadersPolicy{}, &ResponseHeadersPolicyList{}) +} diff --git a/apis/cloudsearch/v1beta1/zz_domainserviceaccesspolicy_types.go b/apis/cloudsearch/v1beta1/zz_domainserviceaccesspolicy_types.go index 1a0da4fcf3..2cf010428a 100755 --- a/apis/cloudsearch/v1beta1/zz_domainserviceaccesspolicy_types.go +++ b/apis/cloudsearch/v1beta1/zz_domainserviceaccesspolicy_types.go @@ -19,7 +19,7 @@ type DomainServiceAccessPolicyInitParameters struct { AccessPolicy *string `json:"accessPolicy,omitempty" tf:"access_policy,omitempty"` // The CloudSearch domain name the policy applies to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudsearch/v1beta1.Domain + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudsearch/v1beta2.Domain // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` @@ -50,7 +50,7 @@ type DomainServiceAccessPolicyParameters struct { AccessPolicy *string `json:"accessPolicy,omitempty" tf:"access_policy,omitempty"` // The CloudSearch domain name the policy applies to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudsearch/v1beta1.Domain + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudsearch/v1beta2.Domain // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` diff --git a/apis/cloudsearch/v1beta1/zz_generated.conversion_hubs.go b/apis/cloudsearch/v1beta1/zz_generated.conversion_hubs.go index 6e484f0b2b..954c10b17a 100755 --- a/apis/cloudsearch/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/cloudsearch/v1beta1/zz_generated.conversion_hubs.go @@ -6,8 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Domain) Hub() {} - // Hub marks this type as a conversion hub. func (tr *DomainServiceAccessPolicy) Hub() {} diff --git a/apis/cloudsearch/v1beta1/zz_generated.conversion_spokes.go b/apis/cloudsearch/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..f4ea7317b9 --- /dev/null +++ b/apis/cloudsearch/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Domain to the hub type. +func (tr *Domain) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Domain type. +func (tr *Domain) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/cloudsearch/v1beta1/zz_generated.resolvers.go b/apis/cloudsearch/v1beta1/zz_generated.resolvers.go index 03092ca766..fda027cff1 100644 --- a/apis/cloudsearch/v1beta1/zz_generated.resolvers.go +++ b/apis/cloudsearch/v1beta1/zz_generated.resolvers.go @@ -9,9 +9,10 @@ package v1beta1 import ( "context" reference "github.com/crossplane/crossplane-runtime/pkg/reference" - xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" resource "github.com/crossplane/upjet/pkg/resource" errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" apisresolver "github.com/upbound/provider-aws/internal/apis" client "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -25,7 +26,7 @@ func (mg *DomainServiceAccessPolicy) ResolveReferences( // ResolveReferences of var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cloudsearch.aws.upbound.io", "v1beta1", "Domain", "DomainList") + m, l, err = apisresolver.GetManagedResource("cloudsearch.aws.upbound.io", "v1beta2", "Domain", "DomainList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -44,7 +45,7 @@ func (mg *DomainServiceAccessPolicy) ResolveReferences( // ResolveReferences of mg.Spec.ForProvider.DomainName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DomainNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cloudsearch.aws.upbound.io", "v1beta1", "Domain", "DomainList") + m, l, err = apisresolver.GetManagedResource("cloudsearch.aws.upbound.io", "v1beta2", "Domain", "DomainList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/cloudsearch/v1beta2/zz_domain_terraformed.go b/apis/cloudsearch/v1beta2/zz_domain_terraformed.go new file mode 100755 index 0000000000..25155d488d --- /dev/null +++ b/apis/cloudsearch/v1beta2/zz_domain_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Domain +func (mg *Domain) GetTerraformResourceType() string { + return "aws_cloudsearch_domain" +} + +// GetConnectionDetailsMapping for this Domain +func (tr *Domain) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Domain +func (tr *Domain) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Domain +func (tr *Domain) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Domain +func (tr *Domain) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Domain +func (tr *Domain) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Domain +func (tr *Domain) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Domain +func (tr *Domain) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Domain +func (tr *Domain) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Domain using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Domain) LateInitialize(attrs []byte) (bool, error) { + params := &DomainParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Domain) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cloudsearch/v1beta2/zz_domain_types.go b/apis/cloudsearch/v1beta2/zz_domain_types.go new file mode 100755 index 0000000000..ba57238dcf --- /dev/null +++ b/apis/cloudsearch/v1beta2/zz_domain_types.go @@ -0,0 +1,319 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DomainInitParameters struct { + + // Domain endpoint options. Documented below. + EndpointOptions *EndpointOptionsInitParameters `json:"endpointOptions,omitempty" tf:"endpoint_options,omitempty"` + + // The index fields for documents added to the domain. Documented below. + IndexField []IndexFieldInitParameters `json:"indexField,omitempty" tf:"index_field,omitempty"` + + // Whether or not to maintain extra instances for the domain in a second Availability Zone to ensure high availability. + MultiAz *bool `json:"multiAz,omitempty" tf:"multi_az,omitempty"` + + // Domain scaling parameters. Documented below. + ScalingParameters *ScalingParametersInitParameters `json:"scalingParameters,omitempty" tf:"scaling_parameters,omitempty"` +} + +type DomainObservation struct { + + // The domain's ARN. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The service endpoint for updating documents in a search domain. + DocumentServiceEndpoint *string `json:"documentServiceEndpoint,omitempty" tf:"document_service_endpoint,omitempty"` + + // An internally generated unique identifier for the domain. + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` + + // Domain endpoint options. Documented below. + EndpointOptions *EndpointOptionsObservation `json:"endpointOptions,omitempty" tf:"endpoint_options,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The index fields for documents added to the domain. Documented below. + IndexField []IndexFieldObservation `json:"indexField,omitempty" tf:"index_field,omitempty"` + + // Whether or not to maintain extra instances for the domain in a second Availability Zone to ensure high availability. + MultiAz *bool `json:"multiAz,omitempty" tf:"multi_az,omitempty"` + + // Domain scaling parameters. Documented below. + ScalingParameters *ScalingParametersObservation `json:"scalingParameters,omitempty" tf:"scaling_parameters,omitempty"` + + // The service endpoint for requesting search results from a search domain. + SearchServiceEndpoint *string `json:"searchServiceEndpoint,omitempty" tf:"search_service_endpoint,omitempty"` +} + +type DomainParameters struct { + + // Domain endpoint options. Documented below. + // +kubebuilder:validation:Optional + EndpointOptions *EndpointOptionsParameters `json:"endpointOptions,omitempty" tf:"endpoint_options,omitempty"` + + // The index fields for documents added to the domain. Documented below. + // +kubebuilder:validation:Optional + IndexField []IndexFieldParameters `json:"indexField,omitempty" tf:"index_field,omitempty"` + + // Whether or not to maintain extra instances for the domain in a second Availability Zone to ensure high availability. + // +kubebuilder:validation:Optional + MultiAz *bool `json:"multiAz,omitempty" tf:"multi_az,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Domain scaling parameters. Documented below. + // +kubebuilder:validation:Optional + ScalingParameters *ScalingParametersParameters `json:"scalingParameters,omitempty" tf:"scaling_parameters,omitempty"` +} + +type EndpointOptionsInitParameters struct { + + // Enables or disables the requirement that all requests to the domain arrive over HTTPS. + EnforceHTTPS *bool `json:"enforceHttps,omitempty" tf:"enforce_https,omitempty"` + + // The minimum required TLS version. See the AWS documentation for valid values. + TLSSecurityPolicy *string `json:"tlsSecurityPolicy,omitempty" tf:"tls_security_policy,omitempty"` +} + +type EndpointOptionsObservation struct { + + // Enables or disables the requirement that all requests to the domain arrive over HTTPS. + EnforceHTTPS *bool `json:"enforceHttps,omitempty" tf:"enforce_https,omitempty"` + + // The minimum required TLS version. See the AWS documentation for valid values. + TLSSecurityPolicy *string `json:"tlsSecurityPolicy,omitempty" tf:"tls_security_policy,omitempty"` +} + +type EndpointOptionsParameters struct { + + // Enables or disables the requirement that all requests to the domain arrive over HTTPS. + // +kubebuilder:validation:Optional + EnforceHTTPS *bool `json:"enforceHttps,omitempty" tf:"enforce_https,omitempty"` + + // The minimum required TLS version. See the AWS documentation for valid values. + // +kubebuilder:validation:Optional + TLSSecurityPolicy *string `json:"tlsSecurityPolicy,omitempty" tf:"tls_security_policy,omitempty"` +} + +type IndexFieldInitParameters struct { + + // The analysis scheme you want to use for a text field. The analysis scheme specifies the language-specific text processing options that are used during indexing. + AnalysisScheme *string `json:"analysisScheme,omitempty" tf:"analysis_scheme,omitempty"` + + // The default value for the field. This value is used when no value is specified for the field in the document data. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // You can get facet information by enabling this. + Facet *bool `json:"facet,omitempty" tf:"facet,omitempty"` + + // You can highlight information. + Highlight *bool `json:"highlight,omitempty" tf:"highlight,omitempty"` + + // The name of the CloudSearch domain. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // You can enable returning the value of all searchable fields. + Return *bool `json:"return,omitempty" tf:"return,omitempty"` + + // You can set whether this index should be searchable or not. + Search *bool `json:"search,omitempty" tf:"search,omitempty"` + + // You can enable the property to be sortable. + Sort *bool `json:"sort,omitempty" tf:"sort,omitempty"` + + // A comma-separated list of source fields to map to the field. Specifying a source field copies data from one field to another, enabling you to use the same source data in different ways by configuring different options for the fields. + SourceFields *string `json:"sourceFields,omitempty" tf:"source_fields,omitempty"` + + // The field type. Valid values: date, date-array, double, double-array, int, int-array, literal, literal-array, text, text-array. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IndexFieldObservation struct { + + // The analysis scheme you want to use for a text field. The analysis scheme specifies the language-specific text processing options that are used during indexing. + AnalysisScheme *string `json:"analysisScheme,omitempty" tf:"analysis_scheme,omitempty"` + + // The default value for the field. This value is used when no value is specified for the field in the document data. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // You can get facet information by enabling this. + Facet *bool `json:"facet,omitempty" tf:"facet,omitempty"` + + // You can highlight information. + Highlight *bool `json:"highlight,omitempty" tf:"highlight,omitempty"` + + // The name of the CloudSearch domain. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // You can enable returning the value of all searchable fields. + Return *bool `json:"return,omitempty" tf:"return,omitempty"` + + // You can set whether this index should be searchable or not. + Search *bool `json:"search,omitempty" tf:"search,omitempty"` + + // You can enable the property to be sortable. + Sort *bool `json:"sort,omitempty" tf:"sort,omitempty"` + + // A comma-separated list of source fields to map to the field. Specifying a source field copies data from one field to another, enabling you to use the same source data in different ways by configuring different options for the fields. + SourceFields *string `json:"sourceFields,omitempty" tf:"source_fields,omitempty"` + + // The field type. Valid values: date, date-array, double, double-array, int, int-array, literal, literal-array, text, text-array. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IndexFieldParameters struct { + + // The analysis scheme you want to use for a text field. The analysis scheme specifies the language-specific text processing options that are used during indexing. + // +kubebuilder:validation:Optional + AnalysisScheme *string `json:"analysisScheme,omitempty" tf:"analysis_scheme,omitempty"` + + // The default value for the field. This value is used when no value is specified for the field in the document data. + // +kubebuilder:validation:Optional + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // You can get facet information by enabling this. + // +kubebuilder:validation:Optional + Facet *bool `json:"facet,omitempty" tf:"facet,omitempty"` + + // You can highlight information. + // +kubebuilder:validation:Optional + Highlight *bool `json:"highlight,omitempty" tf:"highlight,omitempty"` + + // The name of the CloudSearch domain. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // You can enable returning the value of all searchable fields. + // +kubebuilder:validation:Optional + Return *bool `json:"return,omitempty" tf:"return,omitempty"` + + // You can set whether this index should be searchable or not. + // +kubebuilder:validation:Optional + Search *bool `json:"search,omitempty" tf:"search,omitempty"` + + // You can enable the property to be sortable. + // +kubebuilder:validation:Optional + Sort *bool `json:"sort,omitempty" tf:"sort,omitempty"` + + // A comma-separated list of source fields to map to the field. Specifying a source field copies data from one field to another, enabling you to use the same source data in different ways by configuring different options for the fields. + // +kubebuilder:validation:Optional + SourceFields *string `json:"sourceFields,omitempty" tf:"source_fields,omitempty"` + + // The field type. Valid values: date, date-array, double, double-array, int, int-array, literal, literal-array, text, text-array. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ScalingParametersInitParameters struct { + + // The instance type that you want to preconfigure for your domain. See the AWS documentation for valid values. + DesiredInstanceType *string `json:"desiredInstanceType,omitempty" tf:"desired_instance_type,omitempty"` + + // The number of partitions you want to preconfigure for your domain. Only valid when you select search.2xlarge as the instance type. + DesiredPartitionCount *float64 `json:"desiredPartitionCount,omitempty" tf:"desired_partition_count,omitempty"` + + // The number of replicas you want to preconfigure for each index partition. + DesiredReplicationCount *float64 `json:"desiredReplicationCount,omitempty" tf:"desired_replication_count,omitempty"` +} + +type ScalingParametersObservation struct { + + // The instance type that you want to preconfigure for your domain. See the AWS documentation for valid values. + DesiredInstanceType *string `json:"desiredInstanceType,omitempty" tf:"desired_instance_type,omitempty"` + + // The number of partitions you want to preconfigure for your domain. Only valid when you select search.2xlarge as the instance type. + DesiredPartitionCount *float64 `json:"desiredPartitionCount,omitempty" tf:"desired_partition_count,omitempty"` + + // The number of replicas you want to preconfigure for each index partition. + DesiredReplicationCount *float64 `json:"desiredReplicationCount,omitempty" tf:"desired_replication_count,omitempty"` +} + +type ScalingParametersParameters struct { + + // The instance type that you want to preconfigure for your domain. See the AWS documentation for valid values. + // +kubebuilder:validation:Optional + DesiredInstanceType *string `json:"desiredInstanceType,omitempty" tf:"desired_instance_type,omitempty"` + + // The number of partitions you want to preconfigure for your domain. Only valid when you select search.2xlarge as the instance type. + // +kubebuilder:validation:Optional + DesiredPartitionCount *float64 `json:"desiredPartitionCount,omitempty" tf:"desired_partition_count,omitempty"` + + // The number of replicas you want to preconfigure for each index partition. + // +kubebuilder:validation:Optional + DesiredReplicationCount *float64 `json:"desiredReplicationCount,omitempty" tf:"desired_replication_count,omitempty"` +} + +// DomainSpec defines the desired state of Domain +type DomainSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DomainParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DomainInitParameters `json:"initProvider,omitempty"` +} + +// DomainStatus defines the observed state of Domain. +type DomainStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DomainObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Domain is the Schema for the Domains API. Provides an CloudSearch domain resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Domain struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DomainSpec `json:"spec"` + Status DomainStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DomainList contains a list of Domains +type DomainList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Domain `json:"items"` +} + +// Repository type metadata. +var ( + Domain_Kind = "Domain" + Domain_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Domain_Kind}.String() + Domain_KindAPIVersion = Domain_Kind + "." + CRDGroupVersion.String() + Domain_GroupVersionKind = CRDGroupVersion.WithKind(Domain_Kind) +) + +func init() { + SchemeBuilder.Register(&Domain{}, &DomainList{}) +} diff --git a/apis/cloudsearch/v1beta2/zz_generated.conversion_hubs.go b/apis/cloudsearch/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..2721e98cb8 --- /dev/null +++ b/apis/cloudsearch/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Domain) Hub() {} diff --git a/apis/cloudsearch/v1beta2/zz_generated.deepcopy.go b/apis/cloudsearch/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..d8949de992 --- /dev/null +++ b/apis/cloudsearch/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,608 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Domain) DeepCopyInto(out *Domain) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Domain. +func (in *Domain) DeepCopy() *Domain { + if in == nil { + return nil + } + out := new(Domain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Domain) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainInitParameters) DeepCopyInto(out *DomainInitParameters) { + *out = *in + if in.EndpointOptions != nil { + in, out := &in.EndpointOptions, &out.EndpointOptions + *out = new(EndpointOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IndexField != nil { + in, out := &in.IndexField, &out.IndexField + *out = make([]IndexFieldInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MultiAz != nil { + in, out := &in.MultiAz, &out.MultiAz + *out = new(bool) + **out = **in + } + if in.ScalingParameters != nil { + in, out := &in.ScalingParameters, &out.ScalingParameters + *out = new(ScalingParametersInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainInitParameters. +func (in *DomainInitParameters) DeepCopy() *DomainInitParameters { + if in == nil { + return nil + } + out := new(DomainInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainList) DeepCopyInto(out *DomainList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Domain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainList. +func (in *DomainList) DeepCopy() *DomainList { + if in == nil { + return nil + } + out := new(DomainList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DomainList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainObservation) DeepCopyInto(out *DomainObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DocumentServiceEndpoint != nil { + in, out := &in.DocumentServiceEndpoint, &out.DocumentServiceEndpoint + *out = new(string) + **out = **in + } + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.EndpointOptions != nil { + in, out := &in.EndpointOptions, &out.EndpointOptions + *out = new(EndpointOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IndexField != nil { + in, out := &in.IndexField, &out.IndexField + *out = make([]IndexFieldObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MultiAz != nil { + in, out := &in.MultiAz, &out.MultiAz + *out = new(bool) + **out = **in + } + if in.ScalingParameters != nil { + in, out := &in.ScalingParameters, &out.ScalingParameters + *out = new(ScalingParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.SearchServiceEndpoint != nil { + in, out := &in.SearchServiceEndpoint, &out.SearchServiceEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainObservation. +func (in *DomainObservation) DeepCopy() *DomainObservation { + if in == nil { + return nil + } + out := new(DomainObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainParameters) DeepCopyInto(out *DomainParameters) { + *out = *in + if in.EndpointOptions != nil { + in, out := &in.EndpointOptions, &out.EndpointOptions + *out = new(EndpointOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.IndexField != nil { + in, out := &in.IndexField, &out.IndexField + *out = make([]IndexFieldParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MultiAz != nil { + in, out := &in.MultiAz, &out.MultiAz + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ScalingParameters != nil { + in, out := &in.ScalingParameters, &out.ScalingParameters + *out = new(ScalingParametersParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainParameters. +func (in *DomainParameters) DeepCopy() *DomainParameters { + if in == nil { + return nil + } + out := new(DomainParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSpec) DeepCopyInto(out *DomainSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSpec. +func (in *DomainSpec) DeepCopy() *DomainSpec { + if in == nil { + return nil + } + out := new(DomainSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainStatus) DeepCopyInto(out *DomainStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainStatus. +func (in *DomainStatus) DeepCopy() *DomainStatus { + if in == nil { + return nil + } + out := new(DomainStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointOptionsInitParameters) DeepCopyInto(out *EndpointOptionsInitParameters) { + *out = *in + if in.EnforceHTTPS != nil { + in, out := &in.EnforceHTTPS, &out.EnforceHTTPS + *out = new(bool) + **out = **in + } + if in.TLSSecurityPolicy != nil { + in, out := &in.TLSSecurityPolicy, &out.TLSSecurityPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointOptionsInitParameters. +func (in *EndpointOptionsInitParameters) DeepCopy() *EndpointOptionsInitParameters { + if in == nil { + return nil + } + out := new(EndpointOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointOptionsObservation) DeepCopyInto(out *EndpointOptionsObservation) { + *out = *in + if in.EnforceHTTPS != nil { + in, out := &in.EnforceHTTPS, &out.EnforceHTTPS + *out = new(bool) + **out = **in + } + if in.TLSSecurityPolicy != nil { + in, out := &in.TLSSecurityPolicy, &out.TLSSecurityPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointOptionsObservation. +func (in *EndpointOptionsObservation) DeepCopy() *EndpointOptionsObservation { + if in == nil { + return nil + } + out := new(EndpointOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointOptionsParameters) DeepCopyInto(out *EndpointOptionsParameters) { + *out = *in + if in.EnforceHTTPS != nil { + in, out := &in.EnforceHTTPS, &out.EnforceHTTPS + *out = new(bool) + **out = **in + } + if in.TLSSecurityPolicy != nil { + in, out := &in.TLSSecurityPolicy, &out.TLSSecurityPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointOptionsParameters. +func (in *EndpointOptionsParameters) DeepCopy() *EndpointOptionsParameters { + if in == nil { + return nil + } + out := new(EndpointOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexFieldInitParameters) DeepCopyInto(out *IndexFieldInitParameters) { + *out = *in + if in.AnalysisScheme != nil { + in, out := &in.AnalysisScheme, &out.AnalysisScheme + *out = new(string) + **out = **in + } + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Facet != nil { + in, out := &in.Facet, &out.Facet + *out = new(bool) + **out = **in + } + if in.Highlight != nil { + in, out := &in.Highlight, &out.Highlight + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Return != nil { + in, out := &in.Return, &out.Return + *out = new(bool) + **out = **in + } + if in.Search != nil { + in, out := &in.Search, &out.Search + *out = new(bool) + **out = **in + } + if in.Sort != nil { + in, out := &in.Sort, &out.Sort + *out = new(bool) + **out = **in + } + if in.SourceFields != nil { + in, out := &in.SourceFields, &out.SourceFields + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexFieldInitParameters. +func (in *IndexFieldInitParameters) DeepCopy() *IndexFieldInitParameters { + if in == nil { + return nil + } + out := new(IndexFieldInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexFieldObservation) DeepCopyInto(out *IndexFieldObservation) { + *out = *in + if in.AnalysisScheme != nil { + in, out := &in.AnalysisScheme, &out.AnalysisScheme + *out = new(string) + **out = **in + } + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Facet != nil { + in, out := &in.Facet, &out.Facet + *out = new(bool) + **out = **in + } + if in.Highlight != nil { + in, out := &in.Highlight, &out.Highlight + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Return != nil { + in, out := &in.Return, &out.Return + *out = new(bool) + **out = **in + } + if in.Search != nil { + in, out := &in.Search, &out.Search + *out = new(bool) + **out = **in + } + if in.Sort != nil { + in, out := &in.Sort, &out.Sort + *out = new(bool) + **out = **in + } + if in.SourceFields != nil { + in, out := &in.SourceFields, &out.SourceFields + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexFieldObservation. +func (in *IndexFieldObservation) DeepCopy() *IndexFieldObservation { + if in == nil { + return nil + } + out := new(IndexFieldObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexFieldParameters) DeepCopyInto(out *IndexFieldParameters) { + *out = *in + if in.AnalysisScheme != nil { + in, out := &in.AnalysisScheme, &out.AnalysisScheme + *out = new(string) + **out = **in + } + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Facet != nil { + in, out := &in.Facet, &out.Facet + *out = new(bool) + **out = **in + } + if in.Highlight != nil { + in, out := &in.Highlight, &out.Highlight + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Return != nil { + in, out := &in.Return, &out.Return + *out = new(bool) + **out = **in + } + if in.Search != nil { + in, out := &in.Search, &out.Search + *out = new(bool) + **out = **in + } + if in.Sort != nil { + in, out := &in.Sort, &out.Sort + *out = new(bool) + **out = **in + } + if in.SourceFields != nil { + in, out := &in.SourceFields, &out.SourceFields + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexFieldParameters. +func (in *IndexFieldParameters) DeepCopy() *IndexFieldParameters { + if in == nil { + return nil + } + out := new(IndexFieldParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingParametersInitParameters) DeepCopyInto(out *ScalingParametersInitParameters) { + *out = *in + if in.DesiredInstanceType != nil { + in, out := &in.DesiredInstanceType, &out.DesiredInstanceType + *out = new(string) + **out = **in + } + if in.DesiredPartitionCount != nil { + in, out := &in.DesiredPartitionCount, &out.DesiredPartitionCount + *out = new(float64) + **out = **in + } + if in.DesiredReplicationCount != nil { + in, out := &in.DesiredReplicationCount, &out.DesiredReplicationCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingParametersInitParameters. +func (in *ScalingParametersInitParameters) DeepCopy() *ScalingParametersInitParameters { + if in == nil { + return nil + } + out := new(ScalingParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingParametersObservation) DeepCopyInto(out *ScalingParametersObservation) { + *out = *in + if in.DesiredInstanceType != nil { + in, out := &in.DesiredInstanceType, &out.DesiredInstanceType + *out = new(string) + **out = **in + } + if in.DesiredPartitionCount != nil { + in, out := &in.DesiredPartitionCount, &out.DesiredPartitionCount + *out = new(float64) + **out = **in + } + if in.DesiredReplicationCount != nil { + in, out := &in.DesiredReplicationCount, &out.DesiredReplicationCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingParametersObservation. +func (in *ScalingParametersObservation) DeepCopy() *ScalingParametersObservation { + if in == nil { + return nil + } + out := new(ScalingParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingParametersParameters) DeepCopyInto(out *ScalingParametersParameters) { + *out = *in + if in.DesiredInstanceType != nil { + in, out := &in.DesiredInstanceType, &out.DesiredInstanceType + *out = new(string) + **out = **in + } + if in.DesiredPartitionCount != nil { + in, out := &in.DesiredPartitionCount, &out.DesiredPartitionCount + *out = new(float64) + **out = **in + } + if in.DesiredReplicationCount != nil { + in, out := &in.DesiredReplicationCount, &out.DesiredReplicationCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingParametersParameters. +func (in *ScalingParametersParameters) DeepCopy() *ScalingParametersParameters { + if in == nil { + return nil + } + out := new(ScalingParametersParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/cloudsearch/v1beta2/zz_generated.managed.go b/apis/cloudsearch/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..b88fe6bf7e --- /dev/null +++ b/apis/cloudsearch/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Domain. +func (mg *Domain) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Domain. +func (mg *Domain) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Domain. +func (mg *Domain) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Domain. +func (mg *Domain) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Domain. +func (mg *Domain) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Domain. +func (mg *Domain) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Domain. +func (mg *Domain) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Domain. +func (mg *Domain) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Domain. +func (mg *Domain) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Domain. +func (mg *Domain) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Domain. +func (mg *Domain) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Domain. +func (mg *Domain) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/cloudsearch/v1beta2/zz_generated.managedlist.go b/apis/cloudsearch/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..af7c9f32e4 --- /dev/null +++ b/apis/cloudsearch/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this DomainList. +func (l *DomainList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/cloudsearch/v1beta2/zz_groupversion_info.go b/apis/cloudsearch/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..c463e90354 --- /dev/null +++ b/apis/cloudsearch/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=cloudsearch.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "cloudsearch.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/cloudtrail/v1beta1/zz_generated.resolvers.go b/apis/cloudtrail/v1beta1/zz_generated.resolvers.go index 5b639a3d50..9a48545474 100644 --- a/apis/cloudtrail/v1beta1/zz_generated.resolvers.go +++ b/apis/cloudtrail/v1beta1/zz_generated.resolvers.go @@ -115,7 +115,7 @@ func (mg *Trail) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -172,7 +172,7 @@ func (mg *Trail) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.InitProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.KMSKeyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/cloudtrail/v1beta1/zz_trail_types.go b/apis/cloudtrail/v1beta1/zz_trail_types.go index 5ad31a9a0c..a49b8df1a1 100755 --- a/apis/cloudtrail/v1beta1/zz_trail_types.go +++ b/apis/cloudtrail/v1beta1/zz_trail_types.go @@ -276,7 +276,7 @@ type TrailInitParameters struct { KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` // Name of the S3 bucket designated for publishing log files. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` @@ -432,7 +432,7 @@ type TrailParameters struct { Region *string `json:"region" tf:"-"` // Name of the S3 bucket designated for publishing log files. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` diff --git a/apis/cloudwatch/v1beta1/zz_generated.conversion_hubs.go b/apis/cloudwatch/v1beta1/zz_generated.conversion_hubs.go index 6e95c574d8..b4f8c65179 100755 --- a/apis/cloudwatch/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/cloudwatch/v1beta1/zz_generated.conversion_hubs.go @@ -6,14 +6,8 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *CompositeAlarm) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Dashboard) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *MetricAlarm) Hub() {} - // Hub marks this type as a conversion hub. func (tr *MetricStream) Hub() {} diff --git a/apis/cloudwatch/v1beta1/zz_generated.conversion_spokes.go b/apis/cloudwatch/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..8ad76fd360 --- /dev/null +++ b/apis/cloudwatch/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this CompositeAlarm to the hub type. +func (tr *CompositeAlarm) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CompositeAlarm type. +func (tr *CompositeAlarm) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MetricAlarm to the hub type. +func (tr *MetricAlarm) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MetricAlarm type. +func (tr *MetricAlarm) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/cloudwatch/v1beta1/zz_generated.resolvers.go b/apis/cloudwatch/v1beta1/zz_generated.resolvers.go index 6045ec7c83..3e89b1af23 100644 --- a/apis/cloudwatch/v1beta1/zz_generated.resolvers.go +++ b/apis/cloudwatch/v1beta1/zz_generated.resolvers.go @@ -115,7 +115,7 @@ func (mg *MetricStream) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta1", "DeliveryStream", "DeliveryStreamList") + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -153,7 +153,7 @@ func (mg *MetricStream) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta1", "DeliveryStream", "DeliveryStreamList") + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/cloudwatch/v1beta1/zz_metricstream_types.go b/apis/cloudwatch/v1beta1/zz_metricstream_types.go index 7c0f702f44..dc372d964d 100755 --- a/apis/cloudwatch/v1beta1/zz_metricstream_types.go +++ b/apis/cloudwatch/v1beta1/zz_metricstream_types.go @@ -112,7 +112,7 @@ type MetricStreamInitParameters struct { ExcludeFilter []ExcludeFilterInitParameters `json:"excludeFilter,omitempty" tf:"exclude_filter,omitempty"` // ARN of the Amazon Kinesis Firehose delivery stream to use for this metric stream. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta1.DeliveryStream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) FirehoseArn *string `json:"firehoseArn,omitempty" tf:"firehose_arn,omitempty"` @@ -213,7 +213,7 @@ type MetricStreamParameters struct { ExcludeFilter []ExcludeFilterParameters `json:"excludeFilter,omitempty" tf:"exclude_filter,omitempty"` // ARN of the Amazon Kinesis Firehose delivery stream to use for this metric stream. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta1.DeliveryStream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) // +kubebuilder:validation:Optional FirehoseArn *string `json:"firehoseArn,omitempty" tf:"firehose_arn,omitempty"` diff --git a/apis/cloudwatch/v1beta2/zz_compositealarm_terraformed.go b/apis/cloudwatch/v1beta2/zz_compositealarm_terraformed.go new file mode 100755 index 0000000000..1b13798514 --- /dev/null +++ b/apis/cloudwatch/v1beta2/zz_compositealarm_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CompositeAlarm +func (mg *CompositeAlarm) GetTerraformResourceType() string { + return "aws_cloudwatch_composite_alarm" +} + +// GetConnectionDetailsMapping for this CompositeAlarm +func (tr *CompositeAlarm) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CompositeAlarm +func (tr *CompositeAlarm) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CompositeAlarm +func (tr *CompositeAlarm) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CompositeAlarm +func (tr *CompositeAlarm) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CompositeAlarm +func (tr *CompositeAlarm) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CompositeAlarm +func (tr *CompositeAlarm) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CompositeAlarm +func (tr *CompositeAlarm) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CompositeAlarm +func (tr *CompositeAlarm) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CompositeAlarm using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CompositeAlarm) LateInitialize(attrs []byte) (bool, error) { + params := &CompositeAlarmParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CompositeAlarm) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cloudwatch/v1beta2/zz_compositealarm_types.go b/apis/cloudwatch/v1beta2/zz_compositealarm_types.go new file mode 100755 index 0000000000..ef5fa16e86 --- /dev/null +++ b/apis/cloudwatch/v1beta2/zz_compositealarm_types.go @@ -0,0 +1,270 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionsSuppressorInitParameters struct { + + // Can be an AlarmName or an Amazon Resource Name (ARN) from an existing alarm. + Alarm *string `json:"alarm,omitempty" tf:"alarm,omitempty"` + + // The maximum time in seconds that the composite alarm waits after suppressor alarm goes out of the ALARM state. After this time, the composite alarm performs its actions. + ExtensionPeriod *float64 `json:"extensionPeriod,omitempty" tf:"extension_period,omitempty"` + + // The maximum time in seconds that the composite alarm waits for the suppressor alarm to go into the ALARM state. After this time, the composite alarm performs its actions. + WaitPeriod *float64 `json:"waitPeriod,omitempty" tf:"wait_period,omitempty"` +} + +type ActionsSuppressorObservation struct { + + // Can be an AlarmName or an Amazon Resource Name (ARN) from an existing alarm. + Alarm *string `json:"alarm,omitempty" tf:"alarm,omitempty"` + + // The maximum time in seconds that the composite alarm waits after suppressor alarm goes out of the ALARM state. After this time, the composite alarm performs its actions. + ExtensionPeriod *float64 `json:"extensionPeriod,omitempty" tf:"extension_period,omitempty"` + + // The maximum time in seconds that the composite alarm waits for the suppressor alarm to go into the ALARM state. After this time, the composite alarm performs its actions. + WaitPeriod *float64 `json:"waitPeriod,omitempty" tf:"wait_period,omitempty"` +} + +type ActionsSuppressorParameters struct { + + // Can be an AlarmName or an Amazon Resource Name (ARN) from an existing alarm. + // +kubebuilder:validation:Optional + Alarm *string `json:"alarm" tf:"alarm,omitempty"` + + // The maximum time in seconds that the composite alarm waits after suppressor alarm goes out of the ALARM state. After this time, the composite alarm performs its actions. + // +kubebuilder:validation:Optional + ExtensionPeriod *float64 `json:"extensionPeriod" tf:"extension_period,omitempty"` + + // The maximum time in seconds that the composite alarm waits for the suppressor alarm to go into the ALARM state. After this time, the composite alarm performs its actions. + // +kubebuilder:validation:Optional + WaitPeriod *float64 `json:"waitPeriod" tf:"wait_period,omitempty"` +} + +type CompositeAlarmInitParameters struct { + + // Indicates whether actions should be executed during any changes to the alarm state of the composite alarm. Defaults to true. + ActionsEnabled *bool `json:"actionsEnabled,omitempty" tf:"actions_enabled,omitempty"` + + // Actions will be suppressed if the suppressor alarm is in the ALARM state. + ActionsSuppressor *ActionsSuppressorInitParameters `json:"actionsSuppressor,omitempty" tf:"actions_suppressor,omitempty"` + + // The set of actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an ARN. Up to 5 actions are allowed. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +listType=set + AlarmActions []*string `json:"alarmActions,omitempty" tf:"alarm_actions,omitempty"` + + // References to Topic in sns to populate alarmActions. + // +kubebuilder:validation:Optional + AlarmActionsRefs []v1.Reference `json:"alarmActionsRefs,omitempty" tf:"-"` + + // Selector for a list of Topic in sns to populate alarmActions. + // +kubebuilder:validation:Optional + AlarmActionsSelector *v1.Selector `json:"alarmActionsSelector,omitempty" tf:"-"` + + // The description for the composite alarm. + AlarmDescription *string `json:"alarmDescription,omitempty" tf:"alarm_description,omitempty"` + + // An expression that specifies which other alarms are to be evaluated to determine this composite alarm's state. For syntax, see Creating a Composite Alarm. The maximum length is 10240 characters. + AlarmRule *string `json:"alarmRule,omitempty" tf:"alarm_rule,omitempty"` + + // The set of actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an ARN. Up to 5 actions are allowed. + // +listType=set + InsufficientDataActions []*string `json:"insufficientDataActions,omitempty" tf:"insufficient_data_actions,omitempty"` + + // The set of actions to execute when this alarm transitions to an OK state from any other state. Each action is specified as an ARN. Up to 5 actions are allowed. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +listType=set + OkActions []*string `json:"okActions,omitempty" tf:"ok_actions,omitempty"` + + // References to Topic in sns to populate okActions. + // +kubebuilder:validation:Optional + OkActionsRefs []v1.Reference `json:"okActionsRefs,omitempty" tf:"-"` + + // Selector for a list of Topic in sns to populate okActions. + // +kubebuilder:validation:Optional + OkActionsSelector *v1.Selector `json:"okActionsSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type CompositeAlarmObservation struct { + + // Indicates whether actions should be executed during any changes to the alarm state of the composite alarm. Defaults to true. + ActionsEnabled *bool `json:"actionsEnabled,omitempty" tf:"actions_enabled,omitempty"` + + // Actions will be suppressed if the suppressor alarm is in the ALARM state. + ActionsSuppressor *ActionsSuppressorObservation `json:"actionsSuppressor,omitempty" tf:"actions_suppressor,omitempty"` + + // The set of actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an ARN. Up to 5 actions are allowed. + // +listType=set + AlarmActions []*string `json:"alarmActions,omitempty" tf:"alarm_actions,omitempty"` + + // The description for the composite alarm. + AlarmDescription *string `json:"alarmDescription,omitempty" tf:"alarm_description,omitempty"` + + // An expression that specifies which other alarms are to be evaluated to determine this composite alarm's state. For syntax, see Creating a Composite Alarm. The maximum length is 10240 characters. + AlarmRule *string `json:"alarmRule,omitempty" tf:"alarm_rule,omitempty"` + + // The ARN of the composite alarm. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The ID of the composite alarm resource, which is equivalent to its alarm_name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The set of actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an ARN. Up to 5 actions are allowed. + // +listType=set + InsufficientDataActions []*string `json:"insufficientDataActions,omitempty" tf:"insufficient_data_actions,omitempty"` + + // The set of actions to execute when this alarm transitions to an OK state from any other state. Each action is specified as an ARN. Up to 5 actions are allowed. + // +listType=set + OkActions []*string `json:"okActions,omitempty" tf:"ok_actions,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type CompositeAlarmParameters struct { + + // Indicates whether actions should be executed during any changes to the alarm state of the composite alarm. Defaults to true. + // +kubebuilder:validation:Optional + ActionsEnabled *bool `json:"actionsEnabled,omitempty" tf:"actions_enabled,omitempty"` + + // Actions will be suppressed if the suppressor alarm is in the ALARM state. + // +kubebuilder:validation:Optional + ActionsSuppressor *ActionsSuppressorParameters `json:"actionsSuppressor,omitempty" tf:"actions_suppressor,omitempty"` + + // The set of actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an ARN. Up to 5 actions are allowed. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + // +listType=set + AlarmActions []*string `json:"alarmActions,omitempty" tf:"alarm_actions,omitempty"` + + // References to Topic in sns to populate alarmActions. + // +kubebuilder:validation:Optional + AlarmActionsRefs []v1.Reference `json:"alarmActionsRefs,omitempty" tf:"-"` + + // Selector for a list of Topic in sns to populate alarmActions. + // +kubebuilder:validation:Optional + AlarmActionsSelector *v1.Selector `json:"alarmActionsSelector,omitempty" tf:"-"` + + // The description for the composite alarm. + // +kubebuilder:validation:Optional + AlarmDescription *string `json:"alarmDescription,omitempty" tf:"alarm_description,omitempty"` + + // An expression that specifies which other alarms are to be evaluated to determine this composite alarm's state. For syntax, see Creating a Composite Alarm. The maximum length is 10240 characters. + // +kubebuilder:validation:Optional + AlarmRule *string `json:"alarmRule,omitempty" tf:"alarm_rule,omitempty"` + + // The set of actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an ARN. Up to 5 actions are allowed. + // +kubebuilder:validation:Optional + // +listType=set + InsufficientDataActions []*string `json:"insufficientDataActions,omitempty" tf:"insufficient_data_actions,omitempty"` + + // The set of actions to execute when this alarm transitions to an OK state from any other state. Each action is specified as an ARN. Up to 5 actions are allowed. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + // +listType=set + OkActions []*string `json:"okActions,omitempty" tf:"ok_actions,omitempty"` + + // References to Topic in sns to populate okActions. + // +kubebuilder:validation:Optional + OkActionsRefs []v1.Reference `json:"okActionsRefs,omitempty" tf:"-"` + + // Selector for a list of Topic in sns to populate okActions. + // +kubebuilder:validation:Optional + OkActionsSelector *v1.Selector `json:"okActionsSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// CompositeAlarmSpec defines the desired state of CompositeAlarm +type CompositeAlarmSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CompositeAlarmParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CompositeAlarmInitParameters `json:"initProvider,omitempty"` +} + +// CompositeAlarmStatus defines the observed state of CompositeAlarm. +type CompositeAlarmStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CompositeAlarmObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CompositeAlarm is the Schema for the CompositeAlarms API. Provides a CloudWatch Composite Alarm resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type CompositeAlarm struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.alarmRule) || (has(self.initProvider) && has(self.initProvider.alarmRule))",message="spec.forProvider.alarmRule is a required parameter" + Spec CompositeAlarmSpec `json:"spec"` + Status CompositeAlarmStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CompositeAlarmList contains a list of CompositeAlarms +type CompositeAlarmList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CompositeAlarm `json:"items"` +} + +// Repository type metadata. +var ( + CompositeAlarm_Kind = "CompositeAlarm" + CompositeAlarm_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CompositeAlarm_Kind}.String() + CompositeAlarm_KindAPIVersion = CompositeAlarm_Kind + "." + CRDGroupVersion.String() + CompositeAlarm_GroupVersionKind = CRDGroupVersion.WithKind(CompositeAlarm_Kind) +) + +func init() { + SchemeBuilder.Register(&CompositeAlarm{}, &CompositeAlarmList{}) +} diff --git a/apis/cloudwatch/v1beta2/zz_generated.conversion_hubs.go b/apis/cloudwatch/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..3ee4acbbbd --- /dev/null +++ b/apis/cloudwatch/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *CompositeAlarm) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MetricAlarm) Hub() {} diff --git a/apis/cloudwatch/v1beta2/zz_generated.deepcopy.go b/apis/cloudwatch/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..4e052b3f7d --- /dev/null +++ b/apis/cloudwatch/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1458 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsSuppressorInitParameters) DeepCopyInto(out *ActionsSuppressorInitParameters) { + *out = *in + if in.Alarm != nil { + in, out := &in.Alarm, &out.Alarm + *out = new(string) + **out = **in + } + if in.ExtensionPeriod != nil { + in, out := &in.ExtensionPeriod, &out.ExtensionPeriod + *out = new(float64) + **out = **in + } + if in.WaitPeriod != nil { + in, out := &in.WaitPeriod, &out.WaitPeriod + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsSuppressorInitParameters. +func (in *ActionsSuppressorInitParameters) DeepCopy() *ActionsSuppressorInitParameters { + if in == nil { + return nil + } + out := new(ActionsSuppressorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsSuppressorObservation) DeepCopyInto(out *ActionsSuppressorObservation) { + *out = *in + if in.Alarm != nil { + in, out := &in.Alarm, &out.Alarm + *out = new(string) + **out = **in + } + if in.ExtensionPeriod != nil { + in, out := &in.ExtensionPeriod, &out.ExtensionPeriod + *out = new(float64) + **out = **in + } + if in.WaitPeriod != nil { + in, out := &in.WaitPeriod, &out.WaitPeriod + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsSuppressorObservation. +func (in *ActionsSuppressorObservation) DeepCopy() *ActionsSuppressorObservation { + if in == nil { + return nil + } + out := new(ActionsSuppressorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsSuppressorParameters) DeepCopyInto(out *ActionsSuppressorParameters) { + *out = *in + if in.Alarm != nil { + in, out := &in.Alarm, &out.Alarm + *out = new(string) + **out = **in + } + if in.ExtensionPeriod != nil { + in, out := &in.ExtensionPeriod, &out.ExtensionPeriod + *out = new(float64) + **out = **in + } + if in.WaitPeriod != nil { + in, out := &in.WaitPeriod, &out.WaitPeriod + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsSuppressorParameters. +func (in *ActionsSuppressorParameters) DeepCopy() *ActionsSuppressorParameters { + if in == nil { + return nil + } + out := new(ActionsSuppressorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeAlarm) DeepCopyInto(out *CompositeAlarm) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeAlarm. +func (in *CompositeAlarm) DeepCopy() *CompositeAlarm { + if in == nil { + return nil + } + out := new(CompositeAlarm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CompositeAlarm) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeAlarmInitParameters) DeepCopyInto(out *CompositeAlarmInitParameters) { + *out = *in + if in.ActionsEnabled != nil { + in, out := &in.ActionsEnabled, &out.ActionsEnabled + *out = new(bool) + **out = **in + } + if in.ActionsSuppressor != nil { + in, out := &in.ActionsSuppressor, &out.ActionsSuppressor + *out = new(ActionsSuppressorInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AlarmActions != nil { + in, out := &in.AlarmActions, &out.AlarmActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AlarmActionsRefs != nil { + in, out := &in.AlarmActionsRefs, &out.AlarmActionsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AlarmActionsSelector != nil { + in, out := &in.AlarmActionsSelector, &out.AlarmActionsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AlarmDescription != nil { + in, out := &in.AlarmDescription, &out.AlarmDescription + *out = new(string) + **out = **in + } + if in.AlarmRule != nil { + in, out := &in.AlarmRule, &out.AlarmRule + *out = new(string) + **out = **in + } + if in.InsufficientDataActions != nil { + in, out := &in.InsufficientDataActions, &out.InsufficientDataActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OkActions != nil { + in, out := &in.OkActions, &out.OkActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OkActionsRefs != nil { + in, out := &in.OkActionsRefs, &out.OkActionsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OkActionsSelector != nil { + in, out := &in.OkActionsSelector, &out.OkActionsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeAlarmInitParameters. +func (in *CompositeAlarmInitParameters) DeepCopy() *CompositeAlarmInitParameters { + if in == nil { + return nil + } + out := new(CompositeAlarmInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeAlarmList) DeepCopyInto(out *CompositeAlarmList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CompositeAlarm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeAlarmList. +func (in *CompositeAlarmList) DeepCopy() *CompositeAlarmList { + if in == nil { + return nil + } + out := new(CompositeAlarmList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CompositeAlarmList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeAlarmObservation) DeepCopyInto(out *CompositeAlarmObservation) { + *out = *in + if in.ActionsEnabled != nil { + in, out := &in.ActionsEnabled, &out.ActionsEnabled + *out = new(bool) + **out = **in + } + if in.ActionsSuppressor != nil { + in, out := &in.ActionsSuppressor, &out.ActionsSuppressor + *out = new(ActionsSuppressorObservation) + (*in).DeepCopyInto(*out) + } + if in.AlarmActions != nil { + in, out := &in.AlarmActions, &out.AlarmActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AlarmDescription != nil { + in, out := &in.AlarmDescription, &out.AlarmDescription + *out = new(string) + **out = **in + } + if in.AlarmRule != nil { + in, out := &in.AlarmRule, &out.AlarmRule + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InsufficientDataActions != nil { + in, out := &in.InsufficientDataActions, &out.InsufficientDataActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OkActions != nil { + in, out := &in.OkActions, &out.OkActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeAlarmObservation. +func (in *CompositeAlarmObservation) DeepCopy() *CompositeAlarmObservation { + if in == nil { + return nil + } + out := new(CompositeAlarmObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeAlarmParameters) DeepCopyInto(out *CompositeAlarmParameters) { + *out = *in + if in.ActionsEnabled != nil { + in, out := &in.ActionsEnabled, &out.ActionsEnabled + *out = new(bool) + **out = **in + } + if in.ActionsSuppressor != nil { + in, out := &in.ActionsSuppressor, &out.ActionsSuppressor + *out = new(ActionsSuppressorParameters) + (*in).DeepCopyInto(*out) + } + if in.AlarmActions != nil { + in, out := &in.AlarmActions, &out.AlarmActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AlarmActionsRefs != nil { + in, out := &in.AlarmActionsRefs, &out.AlarmActionsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AlarmActionsSelector != nil { + in, out := &in.AlarmActionsSelector, &out.AlarmActionsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AlarmDescription != nil { + in, out := &in.AlarmDescription, &out.AlarmDescription + *out = new(string) + **out = **in + } + if in.AlarmRule != nil { + in, out := &in.AlarmRule, &out.AlarmRule + *out = new(string) + **out = **in + } + if in.InsufficientDataActions != nil { + in, out := &in.InsufficientDataActions, &out.InsufficientDataActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OkActions != nil { + in, out := &in.OkActions, &out.OkActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OkActionsRefs != nil { + in, out := &in.OkActionsRefs, &out.OkActionsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OkActionsSelector != nil { + in, out := &in.OkActionsSelector, &out.OkActionsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeAlarmParameters. +func (in *CompositeAlarmParameters) DeepCopy() *CompositeAlarmParameters { + if in == nil { + return nil + } + out := new(CompositeAlarmParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeAlarmSpec) DeepCopyInto(out *CompositeAlarmSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeAlarmSpec. +func (in *CompositeAlarmSpec) DeepCopy() *CompositeAlarmSpec { + if in == nil { + return nil + } + out := new(CompositeAlarmSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeAlarmStatus) DeepCopyInto(out *CompositeAlarmStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeAlarmStatus. +func (in *CompositeAlarmStatus) DeepCopy() *CompositeAlarmStatus { + if in == nil { + return nil + } + out := new(CompositeAlarmStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricAlarm) DeepCopyInto(out *MetricAlarm) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricAlarm. +func (in *MetricAlarm) DeepCopy() *MetricAlarm { + if in == nil { + return nil + } + out := new(MetricAlarm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MetricAlarm) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricAlarmInitParameters) DeepCopyInto(out *MetricAlarmInitParameters) { + *out = *in + if in.ActionsEnabled != nil { + in, out := &in.ActionsEnabled, &out.ActionsEnabled + *out = new(bool) + **out = **in + } + if in.AlarmActions != nil { + in, out := &in.AlarmActions, &out.AlarmActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AlarmDescription != nil { + in, out := &in.AlarmDescription, &out.AlarmDescription + *out = new(string) + **out = **in + } + if in.ComparisonOperator != nil { + in, out := &in.ComparisonOperator, &out.ComparisonOperator + *out = new(string) + **out = **in + } + if in.DatapointsToAlarm != nil { + in, out := &in.DatapointsToAlarm, &out.DatapointsToAlarm + *out = new(float64) + **out = **in + } + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.EvaluateLowSampleCountPercentiles != nil { + in, out := &in.EvaluateLowSampleCountPercentiles, &out.EvaluateLowSampleCountPercentiles + *out = new(string) + **out = **in + } + if in.EvaluationPeriods != nil { + in, out := &in.EvaluationPeriods, &out.EvaluationPeriods + *out = new(float64) + **out = **in + } + if in.ExtendedStatistic != nil { + in, out := &in.ExtendedStatistic, &out.ExtendedStatistic + *out = new(string) + **out = **in + } + if in.InsufficientDataActions != nil { + in, out := &in.InsufficientDataActions, &out.InsufficientDataActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricQuery != nil { + in, out := &in.MetricQuery, &out.MetricQuery + *out = make([]MetricQueryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.OkActions != nil { + in, out := &in.OkActions, &out.OkActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Period != nil { + in, out := &in.Period, &out.Period + *out = new(float64) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.ThresholdMetricID != nil { + in, out := &in.ThresholdMetricID, &out.ThresholdMetricID + *out = new(string) + **out = **in + } + if in.TreatMissingData != nil { + in, out := &in.TreatMissingData, &out.TreatMissingData + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricAlarmInitParameters. +func (in *MetricAlarmInitParameters) DeepCopy() *MetricAlarmInitParameters { + if in == nil { + return nil + } + out := new(MetricAlarmInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricAlarmList) DeepCopyInto(out *MetricAlarmList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MetricAlarm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricAlarmList. +func (in *MetricAlarmList) DeepCopy() *MetricAlarmList { + if in == nil { + return nil + } + out := new(MetricAlarmList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MetricAlarmList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricAlarmObservation) DeepCopyInto(out *MetricAlarmObservation) { + *out = *in + if in.ActionsEnabled != nil { + in, out := &in.ActionsEnabled, &out.ActionsEnabled + *out = new(bool) + **out = **in + } + if in.AlarmActions != nil { + in, out := &in.AlarmActions, &out.AlarmActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AlarmDescription != nil { + in, out := &in.AlarmDescription, &out.AlarmDescription + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ComparisonOperator != nil { + in, out := &in.ComparisonOperator, &out.ComparisonOperator + *out = new(string) + **out = **in + } + if in.DatapointsToAlarm != nil { + in, out := &in.DatapointsToAlarm, &out.DatapointsToAlarm + *out = new(float64) + **out = **in + } + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.EvaluateLowSampleCountPercentiles != nil { + in, out := &in.EvaluateLowSampleCountPercentiles, &out.EvaluateLowSampleCountPercentiles + *out = new(string) + **out = **in + } + if in.EvaluationPeriods != nil { + in, out := &in.EvaluationPeriods, &out.EvaluationPeriods + *out = new(float64) + **out = **in + } + if in.ExtendedStatistic != nil { + in, out := &in.ExtendedStatistic, &out.ExtendedStatistic + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InsufficientDataActions != nil { + in, out := &in.InsufficientDataActions, &out.InsufficientDataActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricQuery != nil { + in, out := &in.MetricQuery, &out.MetricQuery + *out = make([]MetricQueryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.OkActions != nil { + in, out := &in.OkActions, &out.OkActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Period != nil { + in, out := &in.Period, &out.Period + *out = new(float64) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.ThresholdMetricID != nil { + in, out := &in.ThresholdMetricID, &out.ThresholdMetricID + *out = new(string) + **out = **in + } + if in.TreatMissingData != nil { + in, out := &in.TreatMissingData, &out.TreatMissingData + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricAlarmObservation. +func (in *MetricAlarmObservation) DeepCopy() *MetricAlarmObservation { + if in == nil { + return nil + } + out := new(MetricAlarmObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricAlarmParameters) DeepCopyInto(out *MetricAlarmParameters) { + *out = *in + if in.ActionsEnabled != nil { + in, out := &in.ActionsEnabled, &out.ActionsEnabled + *out = new(bool) + **out = **in + } + if in.AlarmActions != nil { + in, out := &in.AlarmActions, &out.AlarmActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AlarmDescription != nil { + in, out := &in.AlarmDescription, &out.AlarmDescription + *out = new(string) + **out = **in + } + if in.ComparisonOperator != nil { + in, out := &in.ComparisonOperator, &out.ComparisonOperator + *out = new(string) + **out = **in + } + if in.DatapointsToAlarm != nil { + in, out := &in.DatapointsToAlarm, &out.DatapointsToAlarm + *out = new(float64) + **out = **in + } + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.EvaluateLowSampleCountPercentiles != nil { + in, out := &in.EvaluateLowSampleCountPercentiles, &out.EvaluateLowSampleCountPercentiles + *out = new(string) + **out = **in + } + if in.EvaluationPeriods != nil { + in, out := &in.EvaluationPeriods, &out.EvaluationPeriods + *out = new(float64) + **out = **in + } + if in.ExtendedStatistic != nil { + in, out := &in.ExtendedStatistic, &out.ExtendedStatistic + *out = new(string) + **out = **in + } + if in.InsufficientDataActions != nil { + in, out := &in.InsufficientDataActions, &out.InsufficientDataActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricQuery != nil { + in, out := &in.MetricQuery, &out.MetricQuery + *out = make([]MetricQueryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.OkActions != nil { + in, out := &in.OkActions, &out.OkActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Period != nil { + in, out := &in.Period, &out.Period + *out = new(float64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.ThresholdMetricID != nil { + in, out := &in.ThresholdMetricID, &out.ThresholdMetricID + *out = new(string) + **out = **in + } + if in.TreatMissingData != nil { + in, out := &in.TreatMissingData, &out.TreatMissingData + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricAlarmParameters. +func (in *MetricAlarmParameters) DeepCopy() *MetricAlarmParameters { + if in == nil { + return nil + } + out := new(MetricAlarmParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricAlarmSpec) DeepCopyInto(out *MetricAlarmSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricAlarmSpec. +func (in *MetricAlarmSpec) DeepCopy() *MetricAlarmSpec { + if in == nil { + return nil + } + out := new(MetricAlarmSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricAlarmStatus) DeepCopyInto(out *MetricAlarmStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricAlarmStatus. +func (in *MetricAlarmStatus) DeepCopy() *MetricAlarmStatus { + if in == nil { + return nil + } + out := new(MetricAlarmStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricInitParameters) DeepCopyInto(out *MetricInitParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Period != nil { + in, out := &in.Period, &out.Period + *out = new(float64) + **out = **in + } + if in.Stat != nil { + in, out := &in.Stat, &out.Stat + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricInitParameters. +func (in *MetricInitParameters) DeepCopy() *MetricInitParameters { + if in == nil { + return nil + } + out := new(MetricInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricObservation) DeepCopyInto(out *MetricObservation) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Period != nil { + in, out := &in.Period, &out.Period + *out = new(float64) + **out = **in + } + if in.Stat != nil { + in, out := &in.Stat, &out.Stat + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricObservation. +func (in *MetricObservation) DeepCopy() *MetricObservation { + if in == nil { + return nil + } + out := new(MetricObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricParameters) DeepCopyInto(out *MetricParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Period != nil { + in, out := &in.Period, &out.Period + *out = new(float64) + **out = **in + } + if in.Stat != nil { + in, out := &in.Stat, &out.Stat + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricParameters. +func (in *MetricParameters) DeepCopy() *MetricParameters { + if in == nil { + return nil + } + out := new(MetricParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricQueryInitParameters) DeepCopyInto(out *MetricQueryInitParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Period != nil { + in, out := &in.Period, &out.Period + *out = new(float64) + **out = **in + } + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricQueryInitParameters. +func (in *MetricQueryInitParameters) DeepCopy() *MetricQueryInitParameters { + if in == nil { + return nil + } + out := new(MetricQueryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricQueryObservation) DeepCopyInto(out *MetricQueryObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricObservation) + (*in).DeepCopyInto(*out) + } + if in.Period != nil { + in, out := &in.Period, &out.Period + *out = new(float64) + **out = **in + } + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricQueryObservation. +func (in *MetricQueryObservation) DeepCopy() *MetricQueryObservation { + if in == nil { + return nil + } + out := new(MetricQueryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricQueryParameters) DeepCopyInto(out *MetricQueryParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(MetricParameters) + (*in).DeepCopyInto(*out) + } + if in.Period != nil { + in, out := &in.Period, &out.Period + *out = new(float64) + **out = **in + } + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricQueryParameters. +func (in *MetricQueryParameters) DeepCopy() *MetricQueryParameters { + if in == nil { + return nil + } + out := new(MetricQueryParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/cloudwatch/v1beta2/zz_generated.managed.go b/apis/cloudwatch/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..dd56045206 --- /dev/null +++ b/apis/cloudwatch/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this CompositeAlarm. +func (mg *CompositeAlarm) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CompositeAlarm. +func (mg *CompositeAlarm) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CompositeAlarm. +func (mg *CompositeAlarm) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CompositeAlarm. +func (mg *CompositeAlarm) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CompositeAlarm. +func (mg *CompositeAlarm) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CompositeAlarm. +func (mg *CompositeAlarm) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CompositeAlarm. +func (mg *CompositeAlarm) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CompositeAlarm. +func (mg *CompositeAlarm) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CompositeAlarm. +func (mg *CompositeAlarm) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CompositeAlarm. +func (mg *CompositeAlarm) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CompositeAlarm. +func (mg *CompositeAlarm) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CompositeAlarm. +func (mg *CompositeAlarm) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MetricAlarm. +func (mg *MetricAlarm) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MetricAlarm. +func (mg *MetricAlarm) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MetricAlarm. +func (mg *MetricAlarm) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MetricAlarm. +func (mg *MetricAlarm) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MetricAlarm. +func (mg *MetricAlarm) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MetricAlarm. +func (mg *MetricAlarm) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MetricAlarm. +func (mg *MetricAlarm) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MetricAlarm. +func (mg *MetricAlarm) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MetricAlarm. +func (mg *MetricAlarm) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MetricAlarm. +func (mg *MetricAlarm) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MetricAlarm. +func (mg *MetricAlarm) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MetricAlarm. +func (mg *MetricAlarm) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/cloudwatch/v1beta2/zz_generated.managedlist.go b/apis/cloudwatch/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..879279283d --- /dev/null +++ b/apis/cloudwatch/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this CompositeAlarmList. +func (l *CompositeAlarmList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MetricAlarmList. +func (l *MetricAlarmList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/cloudwatch/v1beta2/zz_generated.resolvers.go b/apis/cloudwatch/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..c901794afb --- /dev/null +++ b/apis/cloudwatch/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,106 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *CompositeAlarm) ResolveReferences( // ResolveReferences of this CompositeAlarm. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.AlarmActions), + Extract: resource.ExtractParamPath("arn", true), + References: mg.Spec.ForProvider.AlarmActionsRefs, + Selector: mg.Spec.ForProvider.AlarmActionsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AlarmActions") + } + mg.Spec.ForProvider.AlarmActions = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.AlarmActionsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.OkActions), + Extract: resource.ExtractParamPath("arn", true), + References: mg.Spec.ForProvider.OkActionsRefs, + Selector: mg.Spec.ForProvider.OkActionsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OkActions") + } + mg.Spec.ForProvider.OkActions = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.OkActionsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.AlarmActions), + Extract: resource.ExtractParamPath("arn", true), + References: mg.Spec.InitProvider.AlarmActionsRefs, + Selector: mg.Spec.InitProvider.AlarmActionsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AlarmActions") + } + mg.Spec.InitProvider.AlarmActions = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.AlarmActionsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.OkActions), + Extract: resource.ExtractParamPath("arn", true), + References: mg.Spec.InitProvider.OkActionsRefs, + Selector: mg.Spec.InitProvider.OkActionsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OkActions") + } + mg.Spec.InitProvider.OkActions = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.OkActionsRefs = mrsp.ResolvedReferences + + return nil +} diff --git a/apis/cloudwatch/v1beta2/zz_groupversion_info.go b/apis/cloudwatch/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..26f51044ec --- /dev/null +++ b/apis/cloudwatch/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=cloudwatch.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "cloudwatch.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/cloudwatch/v1beta2/zz_metricalarm_terraformed.go b/apis/cloudwatch/v1beta2/zz_metricalarm_terraformed.go new file mode 100755 index 0000000000..33b7b14360 --- /dev/null +++ b/apis/cloudwatch/v1beta2/zz_metricalarm_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MetricAlarm +func (mg *MetricAlarm) GetTerraformResourceType() string { + return "aws_cloudwatch_metric_alarm" +} + +// GetConnectionDetailsMapping for this MetricAlarm +func (tr *MetricAlarm) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MetricAlarm +func (tr *MetricAlarm) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MetricAlarm +func (tr *MetricAlarm) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MetricAlarm +func (tr *MetricAlarm) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MetricAlarm +func (tr *MetricAlarm) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MetricAlarm +func (tr *MetricAlarm) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MetricAlarm +func (tr *MetricAlarm) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MetricAlarm +func (tr *MetricAlarm) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MetricAlarm using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MetricAlarm) LateInitialize(attrs []byte) (bool, error) { + params := &MetricAlarmParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MetricAlarm) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/cloudwatch/v1beta2/zz_metricalarm_types.go b/apis/cloudwatch/v1beta2/zz_metricalarm_types.go new file mode 100755 index 0000000000..db58dc5f52 --- /dev/null +++ b/apis/cloudwatch/v1beta2/zz_metricalarm_types.go @@ -0,0 +1,518 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MetricAlarmInitParameters struct { + + // Indicates whether or not actions should be executed during any changes to the alarm's state. Defaults to true. + ActionsEnabled *bool `json:"actionsEnabled,omitempty" tf:"actions_enabled,omitempty"` + + // The list of actions to execute when this alarm transitions into an ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN). + // +listType=set + AlarmActions []*string `json:"alarmActions,omitempty" tf:"alarm_actions,omitempty"` + + // The description for the alarm. + AlarmDescription *string `json:"alarmDescription,omitempty" tf:"alarm_description,omitempty"` + + // The arithmetic operation to use when comparing the specified Statistic and Threshold. The specified Statistic value is used as the first operand. Either of the following is supported: GreaterThanOrEqualToThreshold, GreaterThanThreshold, LessThanThreshold, LessThanOrEqualToThreshold. Additionally, the values LessThanLowerOrGreaterThanUpperThreshold, LessThanLowerThreshold, and GreaterThanUpperThreshold are used only for alarms based on anomaly detection models. + ComparisonOperator *string `json:"comparisonOperator,omitempty" tf:"comparison_operator,omitempty"` + + // The number of datapoints that must be breaching to trigger the alarm. + DatapointsToAlarm *float64 `json:"datapointsToAlarm,omitempty" tf:"datapoints_to_alarm,omitempty"` + + // The dimensions for the alarm's associated metric. For the list of available dimensions see the AWS documentation here. + // +mapType=granular + Dimensions map[string]*string `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Used only for alarms based on percentiles. + // If you specify ignore, the alarm state will not change during periods with too few data points to be statistically significant. + // If you specify evaluate or omit this parameter, the alarm will always be evaluated and possibly change state no matter how many data points are available. + // The following values are supported: ignore, and evaluate. + EvaluateLowSampleCountPercentiles *string `json:"evaluateLowSampleCountPercentiles,omitempty" tf:"evaluate_low_sample_count_percentiles,omitempty"` + + // The number of periods over which data is compared to the specified threshold. + EvaluationPeriods *float64 `json:"evaluationPeriods,omitempty" tf:"evaluation_periods,omitempty"` + + // The percentile statistic for the metric associated with the alarm. Specify a value between p0.0 and p100. + ExtendedStatistic *string `json:"extendedStatistic,omitempty" tf:"extended_statistic,omitempty"` + + // The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN). + // +listType=set + InsufficientDataActions []*string `json:"insufficientDataActions,omitempty" tf:"insufficient_data_actions,omitempty"` + + // The name for the alarm's associated metric. + // See docs for supported metrics. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Enables you to create an alarm based on a metric math expression. You may specify at most 20. + MetricQuery []MetricQueryInitParameters `json:"metricQuery,omitempty" tf:"metric_query,omitempty"` + + // The namespace for the alarm's associated metric. See docs for the list of namespaces. + // See docs for supported metrics. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // The list of actions to execute when this alarm transitions into an OK state from any other state. Each action is specified as an Amazon Resource Name (ARN). + // +listType=set + OkActions []*string `json:"okActions,omitempty" tf:"ok_actions,omitempty"` + + // The period in seconds over which the specified statistic is applied. + // Valid values are 10, 30, or any multiple of 60. + Period *float64 `json:"period,omitempty" tf:"period,omitempty"` + + // The statistic to apply to the alarm's associated metric. + // Either of the following is supported: SampleCount, Average, Sum, Minimum, Maximum + Statistic *string `json:"statistic,omitempty" tf:"statistic,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The value against which the specified statistic is compared. This parameter is required for alarms based on static thresholds, but should not be used for alarms based on anomaly detection models. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` + + // If this is an alarm based on an anomaly detection model, make this value match the ID of the ANOMALY_DETECTION_BAND function. + ThresholdMetricID *string `json:"thresholdMetricId,omitempty" tf:"threshold_metric_id,omitempty"` + + // Sets how this alarm is to handle missing data points. The following values are supported: missing, ignore, breaching and notBreaching. Defaults to missing. + TreatMissingData *string `json:"treatMissingData,omitempty" tf:"treat_missing_data,omitempty"` + + // The unit for the alarm's associated metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricAlarmObservation struct { + + // Indicates whether or not actions should be executed during any changes to the alarm's state. Defaults to true. + ActionsEnabled *bool `json:"actionsEnabled,omitempty" tf:"actions_enabled,omitempty"` + + // The list of actions to execute when this alarm transitions into an ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN). + // +listType=set + AlarmActions []*string `json:"alarmActions,omitempty" tf:"alarm_actions,omitempty"` + + // The description for the alarm. + AlarmDescription *string `json:"alarmDescription,omitempty" tf:"alarm_description,omitempty"` + + // The ARN of the CloudWatch Metric Alarm. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The arithmetic operation to use when comparing the specified Statistic and Threshold. The specified Statistic value is used as the first operand. Either of the following is supported: GreaterThanOrEqualToThreshold, GreaterThanThreshold, LessThanThreshold, LessThanOrEqualToThreshold. Additionally, the values LessThanLowerOrGreaterThanUpperThreshold, LessThanLowerThreshold, and GreaterThanUpperThreshold are used only for alarms based on anomaly detection models. + ComparisonOperator *string `json:"comparisonOperator,omitempty" tf:"comparison_operator,omitempty"` + + // The number of datapoints that must be breaching to trigger the alarm. + DatapointsToAlarm *float64 `json:"datapointsToAlarm,omitempty" tf:"datapoints_to_alarm,omitempty"` + + // The dimensions for the alarm's associated metric. For the list of available dimensions see the AWS documentation here. + // +mapType=granular + Dimensions map[string]*string `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Used only for alarms based on percentiles. + // If you specify ignore, the alarm state will not change during periods with too few data points to be statistically significant. + // If you specify evaluate or omit this parameter, the alarm will always be evaluated and possibly change state no matter how many data points are available. + // The following values are supported: ignore, and evaluate. + EvaluateLowSampleCountPercentiles *string `json:"evaluateLowSampleCountPercentiles,omitempty" tf:"evaluate_low_sample_count_percentiles,omitempty"` + + // The number of periods over which data is compared to the specified threshold. + EvaluationPeriods *float64 `json:"evaluationPeriods,omitempty" tf:"evaluation_periods,omitempty"` + + // The percentile statistic for the metric associated with the alarm. Specify a value between p0.0 and p100. + ExtendedStatistic *string `json:"extendedStatistic,omitempty" tf:"extended_statistic,omitempty"` + + // The ID of the health check. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN). + // +listType=set + InsufficientDataActions []*string `json:"insufficientDataActions,omitempty" tf:"insufficient_data_actions,omitempty"` + + // The name for the alarm's associated metric. + // See docs for supported metrics. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Enables you to create an alarm based on a metric math expression. You may specify at most 20. + MetricQuery []MetricQueryObservation `json:"metricQuery,omitempty" tf:"metric_query,omitempty"` + + // The namespace for the alarm's associated metric. See docs for the list of namespaces. + // See docs for supported metrics. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // The list of actions to execute when this alarm transitions into an OK state from any other state. Each action is specified as an Amazon Resource Name (ARN). + // +listType=set + OkActions []*string `json:"okActions,omitempty" tf:"ok_actions,omitempty"` + + // The period in seconds over which the specified statistic is applied. + // Valid values are 10, 30, or any multiple of 60. + Period *float64 `json:"period,omitempty" tf:"period,omitempty"` + + // The statistic to apply to the alarm's associated metric. + // Either of the following is supported: SampleCount, Average, Sum, Minimum, Maximum + Statistic *string `json:"statistic,omitempty" tf:"statistic,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The value against which the specified statistic is compared. This parameter is required for alarms based on static thresholds, but should not be used for alarms based on anomaly detection models. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` + + // If this is an alarm based on an anomaly detection model, make this value match the ID of the ANOMALY_DETECTION_BAND function. + ThresholdMetricID *string `json:"thresholdMetricId,omitempty" tf:"threshold_metric_id,omitempty"` + + // Sets how this alarm is to handle missing data points. The following values are supported: missing, ignore, breaching and notBreaching. Defaults to missing. + TreatMissingData *string `json:"treatMissingData,omitempty" tf:"treat_missing_data,omitempty"` + + // The unit for the alarm's associated metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricAlarmParameters struct { + + // Indicates whether or not actions should be executed during any changes to the alarm's state. Defaults to true. + // +kubebuilder:validation:Optional + ActionsEnabled *bool `json:"actionsEnabled,omitempty" tf:"actions_enabled,omitempty"` + + // The list of actions to execute when this alarm transitions into an ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN). + // +kubebuilder:validation:Optional + // +listType=set + AlarmActions []*string `json:"alarmActions,omitempty" tf:"alarm_actions,omitempty"` + + // The description for the alarm. + // +kubebuilder:validation:Optional + AlarmDescription *string `json:"alarmDescription,omitempty" tf:"alarm_description,omitempty"` + + // The arithmetic operation to use when comparing the specified Statistic and Threshold. The specified Statistic value is used as the first operand. Either of the following is supported: GreaterThanOrEqualToThreshold, GreaterThanThreshold, LessThanThreshold, LessThanOrEqualToThreshold. Additionally, the values LessThanLowerOrGreaterThanUpperThreshold, LessThanLowerThreshold, and GreaterThanUpperThreshold are used only for alarms based on anomaly detection models. + // +kubebuilder:validation:Optional + ComparisonOperator *string `json:"comparisonOperator,omitempty" tf:"comparison_operator,omitempty"` + + // The number of datapoints that must be breaching to trigger the alarm. + // +kubebuilder:validation:Optional + DatapointsToAlarm *float64 `json:"datapointsToAlarm,omitempty" tf:"datapoints_to_alarm,omitempty"` + + // The dimensions for the alarm's associated metric. For the list of available dimensions see the AWS documentation here. + // +kubebuilder:validation:Optional + // +mapType=granular + Dimensions map[string]*string `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Used only for alarms based on percentiles. + // If you specify ignore, the alarm state will not change during periods with too few data points to be statistically significant. + // If you specify evaluate or omit this parameter, the alarm will always be evaluated and possibly change state no matter how many data points are available. + // The following values are supported: ignore, and evaluate. + // +kubebuilder:validation:Optional + EvaluateLowSampleCountPercentiles *string `json:"evaluateLowSampleCountPercentiles,omitempty" tf:"evaluate_low_sample_count_percentiles,omitempty"` + + // The number of periods over which data is compared to the specified threshold. + // +kubebuilder:validation:Optional + EvaluationPeriods *float64 `json:"evaluationPeriods,omitempty" tf:"evaluation_periods,omitempty"` + + // The percentile statistic for the metric associated with the alarm. Specify a value between p0.0 and p100. + // +kubebuilder:validation:Optional + ExtendedStatistic *string `json:"extendedStatistic,omitempty" tf:"extended_statistic,omitempty"` + + // The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN). + // +kubebuilder:validation:Optional + // +listType=set + InsufficientDataActions []*string `json:"insufficientDataActions,omitempty" tf:"insufficient_data_actions,omitempty"` + + // The name for the alarm's associated metric. + // See docs for supported metrics. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // Enables you to create an alarm based on a metric math expression. You may specify at most 20. + // +kubebuilder:validation:Optional + MetricQuery []MetricQueryParameters `json:"metricQuery,omitempty" tf:"metric_query,omitempty"` + + // The namespace for the alarm's associated metric. See docs for the list of namespaces. + // See docs for supported metrics. + // +kubebuilder:validation:Optional + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // The list of actions to execute when this alarm transitions into an OK state from any other state. Each action is specified as an Amazon Resource Name (ARN). + // +kubebuilder:validation:Optional + // +listType=set + OkActions []*string `json:"okActions,omitempty" tf:"ok_actions,omitempty"` + + // The period in seconds over which the specified statistic is applied. + // Valid values are 10, 30, or any multiple of 60. + // +kubebuilder:validation:Optional + Period *float64 `json:"period,omitempty" tf:"period,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The statistic to apply to the alarm's associated metric. + // Either of the following is supported: SampleCount, Average, Sum, Minimum, Maximum + // +kubebuilder:validation:Optional + Statistic *string `json:"statistic,omitempty" tf:"statistic,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The value against which the specified statistic is compared. This parameter is required for alarms based on static thresholds, but should not be used for alarms based on anomaly detection models. + // +kubebuilder:validation:Optional + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` + + // If this is an alarm based on an anomaly detection model, make this value match the ID of the ANOMALY_DETECTION_BAND function. + // +kubebuilder:validation:Optional + ThresholdMetricID *string `json:"thresholdMetricId,omitempty" tf:"threshold_metric_id,omitempty"` + + // Sets how this alarm is to handle missing data points. The following values are supported: missing, ignore, breaching and notBreaching. Defaults to missing. + // +kubebuilder:validation:Optional + TreatMissingData *string `json:"treatMissingData,omitempty" tf:"treat_missing_data,omitempty"` + + // The unit for the alarm's associated metric. + // +kubebuilder:validation:Optional + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricInitParameters struct { + + // The dimensions for this metric. For the list of available dimensions see the AWS documentation here. + // +mapType=granular + Dimensions map[string]*string `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // The name for this metric. + // See docs for supported metrics. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // The namespace for this metric. See docs for the list of namespaces. + // See docs for supported metrics. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // Granularity in seconds of returned data points. + // For metrics with regular resolution, valid values are any multiple of 60. + // For high-resolution metrics, valid values are 1, 5, 10, 30, or any multiple of 60. + Period *float64 `json:"period,omitempty" tf:"period,omitempty"` + + // The statistic to apply to this metric. + // See docs for supported statistics. + Stat *string `json:"stat,omitempty" tf:"stat,omitempty"` + + // The unit for this metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricObservation struct { + + // The dimensions for this metric. For the list of available dimensions see the AWS documentation here. + // +mapType=granular + Dimensions map[string]*string `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // The name for this metric. + // See docs for supported metrics. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // The namespace for this metric. See docs for the list of namespaces. + // See docs for supported metrics. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // Granularity in seconds of returned data points. + // For metrics with regular resolution, valid values are any multiple of 60. + // For high-resolution metrics, valid values are 1, 5, 10, 30, or any multiple of 60. + Period *float64 `json:"period,omitempty" tf:"period,omitempty"` + + // The statistic to apply to this metric. + // See docs for supported statistics. + Stat *string `json:"stat,omitempty" tf:"stat,omitempty"` + + // The unit for this metric. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricParameters struct { + + // The dimensions for this metric. For the list of available dimensions see the AWS documentation here. + // +kubebuilder:validation:Optional + // +mapType=granular + Dimensions map[string]*string `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // The name for this metric. + // See docs for supported metrics. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName" tf:"metric_name,omitempty"` + + // The namespace for this metric. See docs for the list of namespaces. + // See docs for supported metrics. + // +kubebuilder:validation:Optional + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // Granularity in seconds of returned data points. + // For metrics with regular resolution, valid values are any multiple of 60. + // For high-resolution metrics, valid values are 1, 5, 10, 30, or any multiple of 60. + // +kubebuilder:validation:Optional + Period *float64 `json:"period" tf:"period,omitempty"` + + // The statistic to apply to this metric. + // See docs for supported statistics. + // +kubebuilder:validation:Optional + Stat *string `json:"stat" tf:"stat,omitempty"` + + // The unit for this metric. + // +kubebuilder:validation:Optional + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` +} + +type MetricQueryInitParameters struct { + + // The ID of the account where the metrics are located, if this is a cross-account alarm. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // The math expression to be performed on the returned data, if this object is performing a math expression. This expression can use the id of the other metrics to refer to those metrics, and can also use the id of other expressions to use the result of those expressions. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // A short name used to tie this object to the results in the response. If you are performing math expressions on this set of data, this name represents that data and can serve as a variable in the mathematical expression. The valid characters are letters, numbers, and underscore. The first character must be a lowercase letter. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A human-readable label for this metric or expression. This is especially useful if this is an expression, so that you know what the value represents. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The metric to be returned, along with statistics, period, and units. Use this parameter only if this object is retrieving a metric and not performing a math expression on returned data. + Metric *MetricInitParameters `json:"metric,omitempty" tf:"metric,omitempty"` + + // Granularity in seconds of returned data points. + // For metrics with regular resolution, valid values are any multiple of 60. + // For high-resolution metrics, valid values are 1, 5, 10, 30, or any multiple of 60. + Period *float64 `json:"period,omitempty" tf:"period,omitempty"` + + // Specify exactly one metric_query to be true to use that metric_query result as the alarm. + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +type MetricQueryObservation struct { + + // The ID of the account where the metrics are located, if this is a cross-account alarm. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // The math expression to be performed on the returned data, if this object is performing a math expression. This expression can use the id of the other metrics to refer to those metrics, and can also use the id of other expressions to use the result of those expressions. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // A short name used to tie this object to the results in the response. If you are performing math expressions on this set of data, this name represents that data and can serve as a variable in the mathematical expression. The valid characters are letters, numbers, and underscore. The first character must be a lowercase letter. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A human-readable label for this metric or expression. This is especially useful if this is an expression, so that you know what the value represents. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The metric to be returned, along with statistics, period, and units. Use this parameter only if this object is retrieving a metric and not performing a math expression on returned data. + Metric *MetricObservation `json:"metric,omitempty" tf:"metric,omitempty"` + + // Granularity in seconds of returned data points. + // For metrics with regular resolution, valid values are any multiple of 60. + // For high-resolution metrics, valid values are 1, 5, 10, 30, or any multiple of 60. + Period *float64 `json:"period,omitempty" tf:"period,omitempty"` + + // Specify exactly one metric_query to be true to use that metric_query result as the alarm. + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +type MetricQueryParameters struct { + + // The ID of the account where the metrics are located, if this is a cross-account alarm. + // +kubebuilder:validation:Optional + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // The math expression to be performed on the returned data, if this object is performing a math expression. This expression can use the id of the other metrics to refer to those metrics, and can also use the id of other expressions to use the result of those expressions. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide. + // +kubebuilder:validation:Optional + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // A short name used to tie this object to the results in the response. If you are performing math expressions on this set of data, this name represents that data and can serve as a variable in the mathematical expression. The valid characters are letters, numbers, and underscore. The first character must be a lowercase letter. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // A human-readable label for this metric or expression. This is especially useful if this is an expression, so that you know what the value represents. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The metric to be returned, along with statistics, period, and units. Use this parameter only if this object is retrieving a metric and not performing a math expression on returned data. + // +kubebuilder:validation:Optional + Metric *MetricParameters `json:"metric,omitempty" tf:"metric,omitempty"` + + // Granularity in seconds of returned data points. + // For metrics with regular resolution, valid values are any multiple of 60. + // For high-resolution metrics, valid values are 1, 5, 10, 30, or any multiple of 60. + // +kubebuilder:validation:Optional + Period *float64 `json:"period,omitempty" tf:"period,omitempty"` + + // Specify exactly one metric_query to be true to use that metric_query result as the alarm. + // +kubebuilder:validation:Optional + ReturnData *bool `json:"returnData,omitempty" tf:"return_data,omitempty"` +} + +// MetricAlarmSpec defines the desired state of MetricAlarm +type MetricAlarmSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MetricAlarmParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MetricAlarmInitParameters `json:"initProvider,omitempty"` +} + +// MetricAlarmStatus defines the observed state of MetricAlarm. +type MetricAlarmStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MetricAlarmObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MetricAlarm is the Schema for the MetricAlarms API. Provides a CloudWatch Metric Alarm resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type MetricAlarm struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.comparisonOperator) || (has(self.initProvider) && has(self.initProvider.comparisonOperator))",message="spec.forProvider.comparisonOperator is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.evaluationPeriods) || (has(self.initProvider) && has(self.initProvider.evaluationPeriods))",message="spec.forProvider.evaluationPeriods is a required parameter" + Spec MetricAlarmSpec `json:"spec"` + Status MetricAlarmStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MetricAlarmList contains a list of MetricAlarms +type MetricAlarmList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MetricAlarm `json:"items"` +} + +// Repository type metadata. +var ( + MetricAlarm_Kind = "MetricAlarm" + MetricAlarm_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MetricAlarm_Kind}.String() + MetricAlarm_KindAPIVersion = MetricAlarm_Kind + "." + CRDGroupVersion.String() + MetricAlarm_GroupVersionKind = CRDGroupVersion.WithKind(MetricAlarm_Kind) +) + +func init() { + SchemeBuilder.Register(&MetricAlarm{}, &MetricAlarmList{}) +} diff --git a/apis/cloudwatchevents/v1beta1/zz_apidestination_types.go b/apis/cloudwatchevents/v1beta1/zz_apidestination_types.go index aa9858b179..a51c06f064 100755 --- a/apis/cloudwatchevents/v1beta1/zz_apidestination_types.go +++ b/apis/cloudwatchevents/v1beta1/zz_apidestination_types.go @@ -16,7 +16,7 @@ import ( type APIDestinationInitParameters struct { // ARN of the EventBridge Connection to use for the API Destination. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchevents/v1beta1.Connection + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchevents/v1beta2.Connection // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) ConnectionArn *string `json:"connectionArn,omitempty" tf:"connection_arn,omitempty"` @@ -67,7 +67,7 @@ type APIDestinationObservation struct { type APIDestinationParameters struct { // ARN of the EventBridge Connection to use for the API Destination. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchevents/v1beta1.Connection + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchevents/v1beta2.Connection // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional ConnectionArn *string `json:"connectionArn,omitempty" tf:"connection_arn,omitempty"` diff --git a/apis/cloudwatchevents/v1beta1/zz_generated.conversion_hubs.go b/apis/cloudwatchevents/v1beta1/zz_generated.conversion_hubs.go index fd231d3e05..4f71649f55 100755 --- a/apis/cloudwatchevents/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/cloudwatchevents/v1beta1/zz_generated.conversion_hubs.go @@ -18,14 +18,5 @@ func (tr *Bus) Hub() {} // Hub marks this type as a conversion hub. func (tr *BusPolicy) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Connection) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Permission) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Rule) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Target) Hub() {} diff --git a/apis/cloudwatchevents/v1beta1/zz_generated.conversion_spokes.go b/apis/cloudwatchevents/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..4552655979 --- /dev/null +++ b/apis/cloudwatchevents/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Connection to the hub type. +func (tr *Connection) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Connection type. +func (tr *Connection) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Permission to the hub type. +func (tr *Permission) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Permission type. +func (tr *Permission) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Target to the hub type. +func (tr *Target) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Target type. +func (tr *Target) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/cloudwatchevents/v1beta1/zz_generated.resolvers.go b/apis/cloudwatchevents/v1beta1/zz_generated.resolvers.go index 85192b06bd..88b65bcdd0 100644 --- a/apis/cloudwatchevents/v1beta1/zz_generated.resolvers.go +++ b/apis/cloudwatchevents/v1beta1/zz_generated.resolvers.go @@ -27,7 +27,7 @@ func (mg *APIDestination) ResolveReferences( // ResolveReferences of this APIDes var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cloudwatchevents.aws.upbound.io", "v1beta1", "Connection", "ConnectionList") + m, l, err = apisresolver.GetManagedResource("cloudwatchevents.aws.upbound.io", "v1beta2", "Connection", "ConnectionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -46,7 +46,7 @@ func (mg *APIDestination) ResolveReferences( // ResolveReferences of this APIDes mg.Spec.ForProvider.ConnectionArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ConnectionArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cloudwatchevents.aws.upbound.io", "v1beta1", "Connection", "ConnectionList") + m, l, err = apisresolver.GetManagedResource("cloudwatchevents.aws.upbound.io", "v1beta2", "Connection", "ConnectionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/cloudwatchevents/v1beta2/zz_connection_terraformed.go b/apis/cloudwatchevents/v1beta2/zz_connection_terraformed.go new file mode 100755 index 0000000000..ad938c7237 --- /dev/null +++ b/apis/cloudwatchevents/v1beta2/zz_connection_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Connection +func (mg *Connection) GetTerraformResourceType() string { + return "aws_cloudwatch_event_connection" +} + +// GetConnectionDetailsMapping for this Connection +func (tr *Connection) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"auth_parameters[*].api_key[*].value": "authParameters[*].apiKey[*].valueSecretRef", "auth_parameters[*].basic[*].password": "authParameters[*].basic[*].passwordSecretRef", "auth_parameters[*].invocation_http_parameters[*].body[*].value": "authParameters[*].invocationHttpParameters[*].body[*].valueSecretRef", "auth_parameters[*].invocation_http_parameters[*].header[*].value": "authParameters[*].invocationHttpParameters[*].header[*].valueSecretRef", "auth_parameters[*].invocation_http_parameters[*].query_string[*].value": "authParameters[*].invocationHttpParameters[*].queryString[*].valueSecretRef", "auth_parameters[*].oauth[*].client_parameters[*].client_secret": "authParameters[*].oauth[*].clientParameters[*].clientSecretSecretRef", "auth_parameters[*].oauth[*].oauth_http_parameters[*].body[*].value": "authParameters[*].oauth[*].oauthHttpParameters[*].body[*].valueSecretRef", "auth_parameters[*].oauth[*].oauth_http_parameters[*].header[*].value": "authParameters[*].oauth[*].oauthHttpParameters[*].header[*].valueSecretRef", "auth_parameters[*].oauth[*].oauth_http_parameters[*].query_string[*].value": "authParameters[*].oauth[*].oauthHttpParameters[*].queryString[*].valueSecretRef"} +} + +// GetObservation of this Connection +func (tr *Connection) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Connection +func (tr *Connection) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Connection +func (tr *Connection) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Connection +func (tr *Connection) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Connection +func (tr *Connection) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Connection +func (tr *Connection) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Connection +func (tr *Connection) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Connection using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Connection) LateInitialize(attrs []byte) (bool, error) { + params := &ConnectionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Connection) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cloudwatchevents/v1beta2/zz_connection_types.go b/apis/cloudwatchevents/v1beta2/zz_connection_types.go new file mode 100755 index 0000000000..b7e2c42b7a --- /dev/null +++ b/apis/cloudwatchevents/v1beta2/zz_connection_types.go @@ -0,0 +1,598 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type APIKeyInitParameters struct { + + // Header Name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Header Value. Created and stored in AWS Secrets Manager. + ValueSecretRef v1.SecretKeySelector `json:"valueSecretRef" tf:"-"` +} + +type APIKeyObservation struct { + + // Header Name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type APIKeyParameters struct { + + // Header Name. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Header Value. Created and stored in AWS Secrets Manager. + // +kubebuilder:validation:Optional + ValueSecretRef v1.SecretKeySelector `json:"valueSecretRef" tf:"-"` +} + +type AuthParametersInitParameters struct { + + // Parameters used for API_KEY authorization. An API key to include in the header for each authentication request. A maximum of 1 are allowed. Conflicts with basic and oauth. Documented below. + APIKey *APIKeyInitParameters `json:"apiKey,omitempty" tf:"api_key,omitempty"` + + // Parameters used for BASIC authorization. A maximum of 1 are allowed. Conflicts with api_key and oauth. Documented below. + Basic *BasicInitParameters `json:"basic,omitempty" tf:"basic,omitempty"` + + // Invocation Http Parameters are additional credentials used to sign each Invocation of the ApiDestination created from this Connection. If the ApiDestination Rule Target has additional HttpParameters, the values will be merged together, with the Connection Invocation Http Parameters taking precedence. Secret values are stored and managed by AWS Secrets Manager. A maximum of 1 are allowed. Documented below. + InvocationHTTPParameters *InvocationHTTPParametersInitParameters `json:"invocationHttpParameters,omitempty" tf:"invocation_http_parameters,omitempty"` + + // Parameters used for OAUTH_CLIENT_CREDENTIALS authorization. A maximum of 1 are allowed. Conflicts with basic and api_key. Documented below. + Oauth *OauthInitParameters `json:"oauth,omitempty" tf:"oauth,omitempty"` +} + +type AuthParametersObservation struct { + + // Parameters used for API_KEY authorization. An API key to include in the header for each authentication request. A maximum of 1 are allowed. Conflicts with basic and oauth. Documented below. + APIKey *APIKeyObservation `json:"apiKey,omitempty" tf:"api_key,omitempty"` + + // Parameters used for BASIC authorization. A maximum of 1 are allowed. Conflicts with api_key and oauth. Documented below. + Basic *BasicObservation `json:"basic,omitempty" tf:"basic,omitempty"` + + // Invocation Http Parameters are additional credentials used to sign each Invocation of the ApiDestination created from this Connection. If the ApiDestination Rule Target has additional HttpParameters, the values will be merged together, with the Connection Invocation Http Parameters taking precedence. Secret values are stored and managed by AWS Secrets Manager. A maximum of 1 are allowed. Documented below. + InvocationHTTPParameters *InvocationHTTPParametersObservation `json:"invocationHttpParameters,omitempty" tf:"invocation_http_parameters,omitempty"` + + // Parameters used for OAUTH_CLIENT_CREDENTIALS authorization. A maximum of 1 are allowed. Conflicts with basic and api_key. Documented below. + Oauth *OauthObservation `json:"oauth,omitempty" tf:"oauth,omitempty"` +} + +type AuthParametersParameters struct { + + // Parameters used for API_KEY authorization. An API key to include in the header for each authentication request. A maximum of 1 are allowed. Conflicts with basic and oauth. Documented below. + // +kubebuilder:validation:Optional + APIKey *APIKeyParameters `json:"apiKey,omitempty" tf:"api_key,omitempty"` + + // Parameters used for BASIC authorization. A maximum of 1 are allowed. Conflicts with api_key and oauth. Documented below. + // +kubebuilder:validation:Optional + Basic *BasicParameters `json:"basic,omitempty" tf:"basic,omitempty"` + + // Invocation Http Parameters are additional credentials used to sign each Invocation of the ApiDestination created from this Connection. If the ApiDestination Rule Target has additional HttpParameters, the values will be merged together, with the Connection Invocation Http Parameters taking precedence. Secret values are stored and managed by AWS Secrets Manager. A maximum of 1 are allowed. Documented below. + // +kubebuilder:validation:Optional + InvocationHTTPParameters *InvocationHTTPParametersParameters `json:"invocationHttpParameters,omitempty" tf:"invocation_http_parameters,omitempty"` + + // Parameters used for OAUTH_CLIENT_CREDENTIALS authorization. A maximum of 1 are allowed. Conflicts with basic and api_key. Documented below. + // +kubebuilder:validation:Optional + Oauth *OauthParameters `json:"oauth,omitempty" tf:"oauth,omitempty"` +} + +type BasicInitParameters struct { + + // A password for the authorization. Created and stored in AWS Secrets Manager. + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // A username for the authorization. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type BasicObservation struct { + + // A username for the authorization. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type BasicParameters struct { + + // A password for the authorization. Created and stored in AWS Secrets Manager. + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // A username for the authorization. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type BodyInitParameters struct { + + // Specified whether the value is secret. + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Header Value. Created and stored in AWS Secrets Manager. + ValueSecretRef *v1.SecretKeySelector `json:"valueSecretRef,omitempty" tf:"-"` +} + +type BodyObservation struct { + + // Specified whether the value is secret. + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type BodyParameters struct { + + // Specified whether the value is secret. + // +kubebuilder:validation:Optional + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Header Value. Created and stored in AWS Secrets Manager. + // +kubebuilder:validation:Optional + ValueSecretRef *v1.SecretKeySelector `json:"valueSecretRef,omitempty" tf:"-"` +} + +type ClientParametersInitParameters struct { + + // The client ID for the credentials to use for authorization. Created and stored in AWS Secrets Manager. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The client secret for the credentials to use for authorization. Created and stored in AWS Secrets Manager. + ClientSecretSecretRef v1.SecretKeySelector `json:"clientSecretSecretRef" tf:"-"` +} + +type ClientParametersObservation struct { + + // The client ID for the credentials to use for authorization. Created and stored in AWS Secrets Manager. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type ClientParametersParameters struct { + + // The client ID for the credentials to use for authorization. Created and stored in AWS Secrets Manager. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The client secret for the credentials to use for authorization. Created and stored in AWS Secrets Manager. + // +kubebuilder:validation:Optional + ClientSecretSecretRef v1.SecretKeySelector `json:"clientSecretSecretRef" tf:"-"` +} + +type ConnectionInitParameters struct { + + // Parameters used for authorization. A maximum of 1 are allowed. Documented below. + AuthParameters *AuthParametersInitParameters `json:"authParameters,omitempty" tf:"auth_parameters,omitempty"` + + // Choose the type of authorization to use for the connection. One of API_KEY,BASIC,OAUTH_CLIENT_CREDENTIALS. + AuthorizationType *string `json:"authorizationType,omitempty" tf:"authorization_type,omitempty"` + + // Enter a description for the connection. Maximum of 512 characters. + Description *string `json:"description,omitempty" tf:"description,omitempty"` +} + +type ConnectionObservation struct { + + // The Amazon Resource Name (ARN) of the connection. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Parameters used for authorization. A maximum of 1 are allowed. Documented below. + AuthParameters *AuthParametersObservation `json:"authParameters,omitempty" tf:"auth_parameters,omitempty"` + + // Choose the type of authorization to use for the connection. One of API_KEY,BASIC,OAUTH_CLIENT_CREDENTIALS. + AuthorizationType *string `json:"authorizationType,omitempty" tf:"authorization_type,omitempty"` + + // Enter a description for the connection. Maximum of 512 characters. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Amazon Resource Name (ARN) of the secret created from the authorization parameters specified for the connection. + SecretArn *string `json:"secretArn,omitempty" tf:"secret_arn,omitempty"` +} + +type ConnectionParameters struct { + + // Parameters used for authorization. A maximum of 1 are allowed. Documented below. + // +kubebuilder:validation:Optional + AuthParameters *AuthParametersParameters `json:"authParameters,omitempty" tf:"auth_parameters,omitempty"` + + // Choose the type of authorization to use for the connection. One of API_KEY,BASIC,OAUTH_CLIENT_CREDENTIALS. + // +kubebuilder:validation:Optional + AuthorizationType *string `json:"authorizationType,omitempty" tf:"authorization_type,omitempty"` + + // Enter a description for the connection. Maximum of 512 characters. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type HeaderInitParameters struct { + + // Specified whether the value is secret. + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Header Value. Created and stored in AWS Secrets Manager. + ValueSecretRef *v1.SecretKeySelector `json:"valueSecretRef,omitempty" tf:"-"` +} + +type HeaderObservation struct { + + // Specified whether the value is secret. + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type HeaderParameters struct { + + // Specified whether the value is secret. + // +kubebuilder:validation:Optional + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Header Value. Created and stored in AWS Secrets Manager. + // +kubebuilder:validation:Optional + ValueSecretRef *v1.SecretKeySelector `json:"valueSecretRef,omitempty" tf:"-"` +} + +type InvocationHTTPParametersInitParameters struct { + + // Contains additional body string parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + Body []BodyInitParameters `json:"body,omitempty" tf:"body,omitempty"` + + // Contains additional header parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + Header []HeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` + + // Contains additional query string parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + QueryString []QueryStringInitParameters `json:"queryString,omitempty" tf:"query_string,omitempty"` +} + +type InvocationHTTPParametersObservation struct { + + // Contains additional body string parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + Body []BodyObservation `json:"body,omitempty" tf:"body,omitempty"` + + // Contains additional header parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + Header []HeaderObservation `json:"header,omitempty" tf:"header,omitempty"` + + // Contains additional query string parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + QueryString []QueryStringObservation `json:"queryString,omitempty" tf:"query_string,omitempty"` +} + +type InvocationHTTPParametersParameters struct { + + // Contains additional body string parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + // +kubebuilder:validation:Optional + Body []BodyParameters `json:"body,omitempty" tf:"body,omitempty"` + + // Contains additional header parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + // +kubebuilder:validation:Optional + Header []HeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + + // Contains additional query string parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + // +kubebuilder:validation:Optional + QueryString []QueryStringParameters `json:"queryString,omitempty" tf:"query_string,omitempty"` +} + +type OauthHTTPParametersBodyInitParameters struct { + + // Specified whether the value is secret. + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Header Value. Created and stored in AWS Secrets Manager. + ValueSecretRef *v1.SecretKeySelector `json:"valueSecretRef,omitempty" tf:"-"` +} + +type OauthHTTPParametersBodyObservation struct { + + // Specified whether the value is secret. + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type OauthHTTPParametersBodyParameters struct { + + // Specified whether the value is secret. + // +kubebuilder:validation:Optional + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Header Value. Created and stored in AWS Secrets Manager. + // +kubebuilder:validation:Optional + ValueSecretRef *v1.SecretKeySelector `json:"valueSecretRef,omitempty" tf:"-"` +} + +type OauthHTTPParametersHeaderInitParameters struct { + + // Specified whether the value is secret. + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Header Value. Created and stored in AWS Secrets Manager. + ValueSecretRef *v1.SecretKeySelector `json:"valueSecretRef,omitempty" tf:"-"` +} + +type OauthHTTPParametersHeaderObservation struct { + + // Specified whether the value is secret. + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type OauthHTTPParametersHeaderParameters struct { + + // Specified whether the value is secret. + // +kubebuilder:validation:Optional + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Header Value. Created and stored in AWS Secrets Manager. + // +kubebuilder:validation:Optional + ValueSecretRef *v1.SecretKeySelector `json:"valueSecretRef,omitempty" tf:"-"` +} + +type OauthHTTPParametersInitParameters struct { + + // Contains additional body string parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + Body []OauthHTTPParametersBodyInitParameters `json:"body,omitempty" tf:"body,omitempty"` + + // Contains additional header parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + Header []OauthHTTPParametersHeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` + + // Contains additional query string parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + QueryString []OauthHTTPParametersQueryStringInitParameters `json:"queryString,omitempty" tf:"query_string,omitempty"` +} + +type OauthHTTPParametersObservation struct { + + // Contains additional body string parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + Body []OauthHTTPParametersBodyObservation `json:"body,omitempty" tf:"body,omitempty"` + + // Contains additional header parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + Header []OauthHTTPParametersHeaderObservation `json:"header,omitempty" tf:"header,omitempty"` + + // Contains additional query string parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + QueryString []OauthHTTPParametersQueryStringObservation `json:"queryString,omitempty" tf:"query_string,omitempty"` +} + +type OauthHTTPParametersParameters struct { + + // Contains additional body string parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + // +kubebuilder:validation:Optional + Body []OauthHTTPParametersBodyParameters `json:"body,omitempty" tf:"body,omitempty"` + + // Contains additional header parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + // +kubebuilder:validation:Optional + Header []OauthHTTPParametersHeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + + // Contains additional query string parameters for the connection. You can include up to 100 additional body string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB. Each parameter can contain the following: + // +kubebuilder:validation:Optional + QueryString []OauthHTTPParametersQueryStringParameters `json:"queryString,omitempty" tf:"query_string,omitempty"` +} + +type OauthHTTPParametersQueryStringInitParameters struct { + + // Specified whether the value is secret. + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Header Value. Created and stored in AWS Secrets Manager. + ValueSecretRef *v1.SecretKeySelector `json:"valueSecretRef,omitempty" tf:"-"` +} + +type OauthHTTPParametersQueryStringObservation struct { + + // Specified whether the value is secret. + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type OauthHTTPParametersQueryStringParameters struct { + + // Specified whether the value is secret. + // +kubebuilder:validation:Optional + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Header Value. Created and stored in AWS Secrets Manager. + // +kubebuilder:validation:Optional + ValueSecretRef *v1.SecretKeySelector `json:"valueSecretRef,omitempty" tf:"-"` +} + +type OauthInitParameters struct { + + // The URL to the authorization endpoint. + AuthorizationEndpoint *string `json:"authorizationEndpoint,omitempty" tf:"authorization_endpoint,omitempty"` + + // Contains the client parameters for OAuth authorization. Contains the following two parameters. + ClientParameters *ClientParametersInitParameters `json:"clientParameters,omitempty" tf:"client_parameters,omitempty"` + + // A password for the authorization. Created and stored in AWS Secrets Manager. + HTTPMethod *string `json:"httpMethod,omitempty" tf:"http_method,omitempty"` + + // OAuth Http Parameters are additional credentials used to sign the request to the authorization endpoint to exchange the OAuth Client information for an access token. Secret values are stored and managed by AWS Secrets Manager. A maximum of 1 are allowed. Documented below. + OauthHTTPParameters *OauthHTTPParametersInitParameters `json:"oauthHttpParameters,omitempty" tf:"oauth_http_parameters,omitempty"` +} + +type OauthObservation struct { + + // The URL to the authorization endpoint. + AuthorizationEndpoint *string `json:"authorizationEndpoint,omitempty" tf:"authorization_endpoint,omitempty"` + + // Contains the client parameters for OAuth authorization. Contains the following two parameters. + ClientParameters *ClientParametersObservation `json:"clientParameters,omitempty" tf:"client_parameters,omitempty"` + + // A password for the authorization. Created and stored in AWS Secrets Manager. + HTTPMethod *string `json:"httpMethod,omitempty" tf:"http_method,omitempty"` + + // OAuth Http Parameters are additional credentials used to sign the request to the authorization endpoint to exchange the OAuth Client information for an access token. Secret values are stored and managed by AWS Secrets Manager. A maximum of 1 are allowed. Documented below. + OauthHTTPParameters *OauthHTTPParametersObservation `json:"oauthHttpParameters,omitempty" tf:"oauth_http_parameters,omitempty"` +} + +type OauthParameters struct { + + // The URL to the authorization endpoint. + // +kubebuilder:validation:Optional + AuthorizationEndpoint *string `json:"authorizationEndpoint" tf:"authorization_endpoint,omitempty"` + + // Contains the client parameters for OAuth authorization. Contains the following two parameters. + // +kubebuilder:validation:Optional + ClientParameters *ClientParametersParameters `json:"clientParameters,omitempty" tf:"client_parameters,omitempty"` + + // A password for the authorization. Created and stored in AWS Secrets Manager. + // +kubebuilder:validation:Optional + HTTPMethod *string `json:"httpMethod" tf:"http_method,omitempty"` + + // OAuth Http Parameters are additional credentials used to sign the request to the authorization endpoint to exchange the OAuth Client information for an access token. Secret values are stored and managed by AWS Secrets Manager. A maximum of 1 are allowed. Documented below. + // +kubebuilder:validation:Optional + OauthHTTPParameters *OauthHTTPParametersParameters `json:"oauthHttpParameters" tf:"oauth_http_parameters,omitempty"` +} + +type QueryStringInitParameters struct { + + // Specified whether the value is secret. + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Header Value. Created and stored in AWS Secrets Manager. + ValueSecretRef *v1.SecretKeySelector `json:"valueSecretRef,omitempty" tf:"-"` +} + +type QueryStringObservation struct { + + // Specified whether the value is secret. + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type QueryStringParameters struct { + + // Specified whether the value is secret. + // +kubebuilder:validation:Optional + IsValueSecret *bool `json:"isValueSecret,omitempty" tf:"is_value_secret,omitempty"` + + // Header Name. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Header Value. Created and stored in AWS Secrets Manager. + // +kubebuilder:validation:Optional + ValueSecretRef *v1.SecretKeySelector `json:"valueSecretRef,omitempty" tf:"-"` +} + +// ConnectionSpec defines the desired state of Connection +type ConnectionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ConnectionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ConnectionInitParameters `json:"initProvider,omitempty"` +} + +// ConnectionStatus defines the observed state of Connection. +type ConnectionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ConnectionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Connection is the Schema for the Connections API. Provides an EventBridge connection resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Connection struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.authParameters) || (has(self.initProvider) && has(self.initProvider.authParameters))",message="spec.forProvider.authParameters is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.authorizationType) || (has(self.initProvider) && has(self.initProvider.authorizationType))",message="spec.forProvider.authorizationType is a required parameter" + Spec ConnectionSpec `json:"spec"` + Status ConnectionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConnectionList contains a list of Connections +type ConnectionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Connection `json:"items"` +} + +// Repository type metadata. +var ( + Connection_Kind = "Connection" + Connection_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Connection_Kind}.String() + Connection_KindAPIVersion = Connection_Kind + "." + CRDGroupVersion.String() + Connection_GroupVersionKind = CRDGroupVersion.WithKind(Connection_Kind) +) + +func init() { + SchemeBuilder.Register(&Connection{}, &ConnectionList{}) +} diff --git a/apis/cloudwatchevents/v1beta2/zz_generated.conversion_hubs.go b/apis/cloudwatchevents/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..e0dfc4e624 --- /dev/null +++ b/apis/cloudwatchevents/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Connection) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Permission) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Target) Hub() {} diff --git a/apis/cloudwatchevents/v1beta2/zz_generated.deepcopy.go b/apis/cloudwatchevents/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..9995f1b3a0 --- /dev/null +++ b/apis/cloudwatchevents/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,3882 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIKeyInitParameters) DeepCopyInto(out *APIKeyInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + out.ValueSecretRef = in.ValueSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIKeyInitParameters. +func (in *APIKeyInitParameters) DeepCopy() *APIKeyInitParameters { + if in == nil { + return nil + } + out := new(APIKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIKeyObservation) DeepCopyInto(out *APIKeyObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIKeyObservation. +func (in *APIKeyObservation) DeepCopy() *APIKeyObservation { + if in == nil { + return nil + } + out := new(APIKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIKeyParameters) DeepCopyInto(out *APIKeyParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + out.ValueSecretRef = in.ValueSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIKeyParameters. +func (in *APIKeyParameters) DeepCopy() *APIKeyParameters { + if in == nil { + return nil + } + out := new(APIKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthParametersInitParameters) DeepCopyInto(out *AuthParametersInitParameters) { + *out = *in + if in.APIKey != nil { + in, out := &in.APIKey, &out.APIKey + *out = new(APIKeyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Basic != nil { + in, out := &in.Basic, &out.Basic + *out = new(BasicInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InvocationHTTPParameters != nil { + in, out := &in.InvocationHTTPParameters, &out.InvocationHTTPParameters + *out = new(InvocationHTTPParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Oauth != nil { + in, out := &in.Oauth, &out.Oauth + *out = new(OauthInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthParametersInitParameters. +func (in *AuthParametersInitParameters) DeepCopy() *AuthParametersInitParameters { + if in == nil { + return nil + } + out := new(AuthParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthParametersObservation) DeepCopyInto(out *AuthParametersObservation) { + *out = *in + if in.APIKey != nil { + in, out := &in.APIKey, &out.APIKey + *out = new(APIKeyObservation) + (*in).DeepCopyInto(*out) + } + if in.Basic != nil { + in, out := &in.Basic, &out.Basic + *out = new(BasicObservation) + (*in).DeepCopyInto(*out) + } + if in.InvocationHTTPParameters != nil { + in, out := &in.InvocationHTTPParameters, &out.InvocationHTTPParameters + *out = new(InvocationHTTPParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.Oauth != nil { + in, out := &in.Oauth, &out.Oauth + *out = new(OauthObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthParametersObservation. +func (in *AuthParametersObservation) DeepCopy() *AuthParametersObservation { + if in == nil { + return nil + } + out := new(AuthParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthParametersParameters) DeepCopyInto(out *AuthParametersParameters) { + *out = *in + if in.APIKey != nil { + in, out := &in.APIKey, &out.APIKey + *out = new(APIKeyParameters) + (*in).DeepCopyInto(*out) + } + if in.Basic != nil { + in, out := &in.Basic, &out.Basic + *out = new(BasicParameters) + (*in).DeepCopyInto(*out) + } + if in.InvocationHTTPParameters != nil { + in, out := &in.InvocationHTTPParameters, &out.InvocationHTTPParameters + *out = new(InvocationHTTPParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.Oauth != nil { + in, out := &in.Oauth, &out.Oauth + *out = new(OauthParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthParametersParameters. +func (in *AuthParametersParameters) DeepCopy() *AuthParametersParameters { + if in == nil { + return nil + } + out := new(AuthParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicInitParameters) DeepCopyInto(out *BasicInitParameters) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicInitParameters. +func (in *BasicInitParameters) DeepCopy() *BasicInitParameters { + if in == nil { + return nil + } + out := new(BasicInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicObservation) DeepCopyInto(out *BasicObservation) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicObservation. +func (in *BasicObservation) DeepCopy() *BasicObservation { + if in == nil { + return nil + } + out := new(BasicObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicParameters) DeepCopyInto(out *BasicParameters) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicParameters. +func (in *BasicParameters) DeepCopy() *BasicParameters { + if in == nil { + return nil + } + out := new(BasicParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BatchTargetInitParameters) DeepCopyInto(out *BatchTargetInitParameters) { + *out = *in + if in.ArraySize != nil { + in, out := &in.ArraySize, &out.ArraySize + *out = new(float64) + **out = **in + } + if in.JobAttempts != nil { + in, out := &in.JobAttempts, &out.JobAttempts + *out = new(float64) + **out = **in + } + if in.JobDefinition != nil { + in, out := &in.JobDefinition, &out.JobDefinition + *out = new(string) + **out = **in + } + if in.JobName != nil { + in, out := &in.JobName, &out.JobName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BatchTargetInitParameters. +func (in *BatchTargetInitParameters) DeepCopy() *BatchTargetInitParameters { + if in == nil { + return nil + } + out := new(BatchTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BatchTargetObservation) DeepCopyInto(out *BatchTargetObservation) { + *out = *in + if in.ArraySize != nil { + in, out := &in.ArraySize, &out.ArraySize + *out = new(float64) + **out = **in + } + if in.JobAttempts != nil { + in, out := &in.JobAttempts, &out.JobAttempts + *out = new(float64) + **out = **in + } + if in.JobDefinition != nil { + in, out := &in.JobDefinition, &out.JobDefinition + *out = new(string) + **out = **in + } + if in.JobName != nil { + in, out := &in.JobName, &out.JobName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BatchTargetObservation. +func (in *BatchTargetObservation) DeepCopy() *BatchTargetObservation { + if in == nil { + return nil + } + out := new(BatchTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BatchTargetParameters) DeepCopyInto(out *BatchTargetParameters) { + *out = *in + if in.ArraySize != nil { + in, out := &in.ArraySize, &out.ArraySize + *out = new(float64) + **out = **in + } + if in.JobAttempts != nil { + in, out := &in.JobAttempts, &out.JobAttempts + *out = new(float64) + **out = **in + } + if in.JobDefinition != nil { + in, out := &in.JobDefinition, &out.JobDefinition + *out = new(string) + **out = **in + } + if in.JobName != nil { + in, out := &in.JobName, &out.JobName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BatchTargetParameters. +func (in *BatchTargetParameters) DeepCopy() *BatchTargetParameters { + if in == nil { + return nil + } + out := new(BatchTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BodyInitParameters) DeepCopyInto(out *BodyInitParameters) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ValueSecretRef != nil { + in, out := &in.ValueSecretRef, &out.ValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BodyInitParameters. +func (in *BodyInitParameters) DeepCopy() *BodyInitParameters { + if in == nil { + return nil + } + out := new(BodyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BodyObservation) DeepCopyInto(out *BodyObservation) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BodyObservation. +func (in *BodyObservation) DeepCopy() *BodyObservation { + if in == nil { + return nil + } + out := new(BodyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BodyParameters) DeepCopyInto(out *BodyParameters) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ValueSecretRef != nil { + in, out := &in.ValueSecretRef, &out.ValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BodyParameters. +func (in *BodyParameters) DeepCopy() *BodyParameters { + if in == nil { + return nil + } + out := new(BodyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityProviderStrategyInitParameters) DeepCopyInto(out *CapacityProviderStrategyInitParameters) { + *out = *in + if in.Base != nil { + in, out := &in.Base, &out.Base + *out = new(float64) + **out = **in + } + if in.CapacityProvider != nil { + in, out := &in.CapacityProvider, &out.CapacityProvider + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityProviderStrategyInitParameters. +func (in *CapacityProviderStrategyInitParameters) DeepCopy() *CapacityProviderStrategyInitParameters { + if in == nil { + return nil + } + out := new(CapacityProviderStrategyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityProviderStrategyObservation) DeepCopyInto(out *CapacityProviderStrategyObservation) { + *out = *in + if in.Base != nil { + in, out := &in.Base, &out.Base + *out = new(float64) + **out = **in + } + if in.CapacityProvider != nil { + in, out := &in.CapacityProvider, &out.CapacityProvider + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityProviderStrategyObservation. +func (in *CapacityProviderStrategyObservation) DeepCopy() *CapacityProviderStrategyObservation { + if in == nil { + return nil + } + out := new(CapacityProviderStrategyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityProviderStrategyParameters) DeepCopyInto(out *CapacityProviderStrategyParameters) { + *out = *in + if in.Base != nil { + in, out := &in.Base, &out.Base + *out = new(float64) + **out = **in + } + if in.CapacityProvider != nil { + in, out := &in.CapacityProvider, &out.CapacityProvider + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityProviderStrategyParameters. +func (in *CapacityProviderStrategyParameters) DeepCopy() *CapacityProviderStrategyParameters { + if in == nil { + return nil + } + out := new(CapacityProviderStrategyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientParametersInitParameters) DeepCopyInto(out *ClientParametersInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + out.ClientSecretSecretRef = in.ClientSecretSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientParametersInitParameters. +func (in *ClientParametersInitParameters) DeepCopy() *ClientParametersInitParameters { + if in == nil { + return nil + } + out := new(ClientParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientParametersObservation) DeepCopyInto(out *ClientParametersObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientParametersObservation. +func (in *ClientParametersObservation) DeepCopy() *ClientParametersObservation { + if in == nil { + return nil + } + out := new(ClientParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientParametersParameters) DeepCopyInto(out *ClientParametersParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + out.ClientSecretSecretRef = in.ClientSecretSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientParametersParameters. +func (in *ClientParametersParameters) DeepCopy() *ClientParametersParameters { + if in == nil { + return nil + } + out := new(ClientParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionInitParameters) DeepCopyInto(out *ConditionInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + if in.ValueRef != nil { + in, out := &in.ValueRef, &out.ValueRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ValueSelector != nil { + in, out := &in.ValueSelector, &out.ValueSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionInitParameters. +func (in *ConditionInitParameters) DeepCopy() *ConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionObservation) DeepCopyInto(out *ConditionObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionObservation. +func (in *ConditionObservation) DeepCopy() *ConditionObservation { + if in == nil { + return nil + } + out := new(ConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionParameters) DeepCopyInto(out *ConditionParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + if in.ValueRef != nil { + in, out := &in.ValueRef, &out.ValueRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ValueSelector != nil { + in, out := &in.ValueSelector, &out.ValueSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionParameters. +func (in *ConditionParameters) DeepCopy() *ConditionParameters { + if in == nil { + return nil + } + out := new(ConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Connection) DeepCopyInto(out *Connection) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connection. +func (in *Connection) DeepCopy() *Connection { + if in == nil { + return nil + } + out := new(Connection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Connection) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionInitParameters) DeepCopyInto(out *ConnectionInitParameters) { + *out = *in + if in.AuthParameters != nil { + in, out := &in.AuthParameters, &out.AuthParameters + *out = new(AuthParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthorizationType != nil { + in, out := &in.AuthorizationType, &out.AuthorizationType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionInitParameters. +func (in *ConnectionInitParameters) DeepCopy() *ConnectionInitParameters { + if in == nil { + return nil + } + out := new(ConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionList) DeepCopyInto(out *ConnectionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Connection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionList. +func (in *ConnectionList) DeepCopy() *ConnectionList { + if in == nil { + return nil + } + out := new(ConnectionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConnectionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionObservation) DeepCopyInto(out *ConnectionObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AuthParameters != nil { + in, out := &in.AuthParameters, &out.AuthParameters + *out = new(AuthParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.AuthorizationType != nil { + in, out := &in.AuthorizationType, &out.AuthorizationType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.SecretArn != nil { + in, out := &in.SecretArn, &out.SecretArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionObservation. +func (in *ConnectionObservation) DeepCopy() *ConnectionObservation { + if in == nil { + return nil + } + out := new(ConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionParameters) DeepCopyInto(out *ConnectionParameters) { + *out = *in + if in.AuthParameters != nil { + in, out := &in.AuthParameters, &out.AuthParameters + *out = new(AuthParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthorizationType != nil { + in, out := &in.AuthorizationType, &out.AuthorizationType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionParameters. +func (in *ConnectionParameters) DeepCopy() *ConnectionParameters { + if in == nil { + return nil + } + out := new(ConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionSpec) DeepCopyInto(out *ConnectionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionSpec. +func (in *ConnectionSpec) DeepCopy() *ConnectionSpec { + if in == nil { + return nil + } + out := new(ConnectionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionStatus) DeepCopyInto(out *ConnectionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionStatus. +func (in *ConnectionStatus) DeepCopy() *ConnectionStatus { + if in == nil { + return nil + } + out := new(ConnectionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeadLetterConfigInitParameters) DeepCopyInto(out *DeadLetterConfigInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeadLetterConfigInitParameters. +func (in *DeadLetterConfigInitParameters) DeepCopy() *DeadLetterConfigInitParameters { + if in == nil { + return nil + } + out := new(DeadLetterConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeadLetterConfigObservation) DeepCopyInto(out *DeadLetterConfigObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeadLetterConfigObservation. +func (in *DeadLetterConfigObservation) DeepCopy() *DeadLetterConfigObservation { + if in == nil { + return nil + } + out := new(DeadLetterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeadLetterConfigParameters) DeepCopyInto(out *DeadLetterConfigParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeadLetterConfigParameters. +func (in *DeadLetterConfigParameters) DeepCopy() *DeadLetterConfigParameters { + if in == nil { + return nil + } + out := new(DeadLetterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsTargetInitParameters) DeepCopyInto(out *EcsTargetInitParameters) { + *out = *in + if in.CapacityProviderStrategy != nil { + in, out := &in.CapacityProviderStrategy, &out.CapacityProviderStrategy + *out = make([]CapacityProviderStrategyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableEcsManagedTags != nil { + in, out := &in.EnableEcsManagedTags, &out.EnableEcsManagedTags + *out = new(bool) + **out = **in + } + if in.EnableExecuteCommand != nil { + in, out := &in.EnableExecuteCommand, &out.EnableExecuteCommand + *out = new(bool) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(string) + **out = **in + } + if in.LaunchType != nil { + in, out := &in.LaunchType, &out.LaunchType + *out = new(string) + **out = **in + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = new(NetworkConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OrderedPlacementStrategy != nil { + in, out := &in.OrderedPlacementStrategy, &out.OrderedPlacementStrategy + *out = make([]OrderedPlacementStrategyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementConstraint != nil { + in, out := &in.PlacementConstraint, &out.PlacementConstraint + *out = make([]PlacementConstraintInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformVersion != nil { + in, out := &in.PlatformVersion, &out.PlatformVersion + *out = new(string) + **out = **in + } + if in.PropagateTags != nil { + in, out := &in.PropagateTags, &out.PropagateTags + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskCount != nil { + in, out := &in.TaskCount, &out.TaskCount + *out = new(float64) + **out = **in + } + if in.TaskDefinitionArn != nil { + in, out := &in.TaskDefinitionArn, &out.TaskDefinitionArn + *out = new(string) + **out = **in + } + if in.TaskDefinitionArnRef != nil { + in, out := &in.TaskDefinitionArnRef, &out.TaskDefinitionArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TaskDefinitionArnSelector != nil { + in, out := &in.TaskDefinitionArnSelector, &out.TaskDefinitionArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsTargetInitParameters. +func (in *EcsTargetInitParameters) DeepCopy() *EcsTargetInitParameters { + if in == nil { + return nil + } + out := new(EcsTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsTargetObservation) DeepCopyInto(out *EcsTargetObservation) { + *out = *in + if in.CapacityProviderStrategy != nil { + in, out := &in.CapacityProviderStrategy, &out.CapacityProviderStrategy + *out = make([]CapacityProviderStrategyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableEcsManagedTags != nil { + in, out := &in.EnableEcsManagedTags, &out.EnableEcsManagedTags + *out = new(bool) + **out = **in + } + if in.EnableExecuteCommand != nil { + in, out := &in.EnableExecuteCommand, &out.EnableExecuteCommand + *out = new(bool) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(string) + **out = **in + } + if in.LaunchType != nil { + in, out := &in.LaunchType, &out.LaunchType + *out = new(string) + **out = **in + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = new(NetworkConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.OrderedPlacementStrategy != nil { + in, out := &in.OrderedPlacementStrategy, &out.OrderedPlacementStrategy + *out = make([]OrderedPlacementStrategyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementConstraint != nil { + in, out := &in.PlacementConstraint, &out.PlacementConstraint + *out = make([]PlacementConstraintObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformVersion != nil { + in, out := &in.PlatformVersion, &out.PlatformVersion + *out = new(string) + **out = **in + } + if in.PropagateTags != nil { + in, out := &in.PropagateTags, &out.PropagateTags + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskCount != nil { + in, out := &in.TaskCount, &out.TaskCount + *out = new(float64) + **out = **in + } + if in.TaskDefinitionArn != nil { + in, out := &in.TaskDefinitionArn, &out.TaskDefinitionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsTargetObservation. +func (in *EcsTargetObservation) DeepCopy() *EcsTargetObservation { + if in == nil { + return nil + } + out := new(EcsTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsTargetParameters) DeepCopyInto(out *EcsTargetParameters) { + *out = *in + if in.CapacityProviderStrategy != nil { + in, out := &in.CapacityProviderStrategy, &out.CapacityProviderStrategy + *out = make([]CapacityProviderStrategyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableEcsManagedTags != nil { + in, out := &in.EnableEcsManagedTags, &out.EnableEcsManagedTags + *out = new(bool) + **out = **in + } + if in.EnableExecuteCommand != nil { + in, out := &in.EnableExecuteCommand, &out.EnableExecuteCommand + *out = new(bool) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(string) + **out = **in + } + if in.LaunchType != nil { + in, out := &in.LaunchType, &out.LaunchType + *out = new(string) + **out = **in + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = new(NetworkConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.OrderedPlacementStrategy != nil { + in, out := &in.OrderedPlacementStrategy, &out.OrderedPlacementStrategy + *out = make([]OrderedPlacementStrategyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementConstraint != nil { + in, out := &in.PlacementConstraint, &out.PlacementConstraint + *out = make([]PlacementConstraintParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformVersion != nil { + in, out := &in.PlatformVersion, &out.PlatformVersion + *out = new(string) + **out = **in + } + if in.PropagateTags != nil { + in, out := &in.PropagateTags, &out.PropagateTags + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskCount != nil { + in, out := &in.TaskCount, &out.TaskCount + *out = new(float64) + **out = **in + } + if in.TaskDefinitionArn != nil { + in, out := &in.TaskDefinitionArn, &out.TaskDefinitionArn + *out = new(string) + **out = **in + } + if in.TaskDefinitionArnRef != nil { + in, out := &in.TaskDefinitionArnRef, &out.TaskDefinitionArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TaskDefinitionArnSelector != nil { + in, out := &in.TaskDefinitionArnSelector, &out.TaskDefinitionArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsTargetParameters. +func (in *EcsTargetParameters) DeepCopy() *EcsTargetParameters { + if in == nil { + return nil + } + out := new(EcsTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPTargetInitParameters) DeepCopyInto(out *HTTPTargetInitParameters) { + *out = *in + if in.HeaderParameters != nil { + in, out := &in.HeaderParameters, &out.HeaderParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PathParameterValues != nil { + in, out := &in.PathParameterValues, &out.PathParameterValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryStringParameters != nil { + in, out := &in.QueryStringParameters, &out.QueryStringParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPTargetInitParameters. +func (in *HTTPTargetInitParameters) DeepCopy() *HTTPTargetInitParameters { + if in == nil { + return nil + } + out := new(HTTPTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPTargetObservation) DeepCopyInto(out *HTTPTargetObservation) { + *out = *in + if in.HeaderParameters != nil { + in, out := &in.HeaderParameters, &out.HeaderParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PathParameterValues != nil { + in, out := &in.PathParameterValues, &out.PathParameterValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryStringParameters != nil { + in, out := &in.QueryStringParameters, &out.QueryStringParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPTargetObservation. +func (in *HTTPTargetObservation) DeepCopy() *HTTPTargetObservation { + if in == nil { + return nil + } + out := new(HTTPTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPTargetParameters) DeepCopyInto(out *HTTPTargetParameters) { + *out = *in + if in.HeaderParameters != nil { + in, out := &in.HeaderParameters, &out.HeaderParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PathParameterValues != nil { + in, out := &in.PathParameterValues, &out.PathParameterValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryStringParameters != nil { + in, out := &in.QueryStringParameters, &out.QueryStringParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPTargetParameters. +func (in *HTTPTargetParameters) DeepCopy() *HTTPTargetParameters { + if in == nil { + return nil + } + out := new(HTTPTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderInitParameters) DeepCopyInto(out *HeaderInitParameters) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ValueSecretRef != nil { + in, out := &in.ValueSecretRef, &out.ValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderInitParameters. +func (in *HeaderInitParameters) DeepCopy() *HeaderInitParameters { + if in == nil { + return nil + } + out := new(HeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderObservation) DeepCopyInto(out *HeaderObservation) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderObservation. +func (in *HeaderObservation) DeepCopy() *HeaderObservation { + if in == nil { + return nil + } + out := new(HeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderParameters) DeepCopyInto(out *HeaderParameters) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ValueSecretRef != nil { + in, out := &in.ValueSecretRef, &out.ValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderParameters. +func (in *HeaderParameters) DeepCopy() *HeaderParameters { + if in == nil { + return nil + } + out := new(HeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputTransformerInitParameters) DeepCopyInto(out *InputTransformerInitParameters) { + *out = *in + if in.InputPaths != nil { + in, out := &in.InputPaths, &out.InputPaths + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.InputTemplate != nil { + in, out := &in.InputTemplate, &out.InputTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputTransformerInitParameters. +func (in *InputTransformerInitParameters) DeepCopy() *InputTransformerInitParameters { + if in == nil { + return nil + } + out := new(InputTransformerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputTransformerObservation) DeepCopyInto(out *InputTransformerObservation) { + *out = *in + if in.InputPaths != nil { + in, out := &in.InputPaths, &out.InputPaths + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.InputTemplate != nil { + in, out := &in.InputTemplate, &out.InputTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputTransformerObservation. +func (in *InputTransformerObservation) DeepCopy() *InputTransformerObservation { + if in == nil { + return nil + } + out := new(InputTransformerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputTransformerParameters) DeepCopyInto(out *InputTransformerParameters) { + *out = *in + if in.InputPaths != nil { + in, out := &in.InputPaths, &out.InputPaths + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.InputTemplate != nil { + in, out := &in.InputTemplate, &out.InputTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputTransformerParameters. +func (in *InputTransformerParameters) DeepCopy() *InputTransformerParameters { + if in == nil { + return nil + } + out := new(InputTransformerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InvocationHTTPParametersInitParameters) DeepCopyInto(out *InvocationHTTPParametersInitParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = make([]BodyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = make([]QueryStringInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InvocationHTTPParametersInitParameters. +func (in *InvocationHTTPParametersInitParameters) DeepCopy() *InvocationHTTPParametersInitParameters { + if in == nil { + return nil + } + out := new(InvocationHTTPParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InvocationHTTPParametersObservation) DeepCopyInto(out *InvocationHTTPParametersObservation) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = make([]BodyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = make([]QueryStringObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InvocationHTTPParametersObservation. +func (in *InvocationHTTPParametersObservation) DeepCopy() *InvocationHTTPParametersObservation { + if in == nil { + return nil + } + out := new(InvocationHTTPParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InvocationHTTPParametersParameters) DeepCopyInto(out *InvocationHTTPParametersParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = make([]BodyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = make([]QueryStringParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InvocationHTTPParametersParameters. +func (in *InvocationHTTPParametersParameters) DeepCopy() *InvocationHTTPParametersParameters { + if in == nil { + return nil + } + out := new(InvocationHTTPParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisTargetInitParameters) DeepCopyInto(out *KinesisTargetInitParameters) { + *out = *in + if in.PartitionKeyPath != nil { + in, out := &in.PartitionKeyPath, &out.PartitionKeyPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisTargetInitParameters. +func (in *KinesisTargetInitParameters) DeepCopy() *KinesisTargetInitParameters { + if in == nil { + return nil + } + out := new(KinesisTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisTargetObservation) DeepCopyInto(out *KinesisTargetObservation) { + *out = *in + if in.PartitionKeyPath != nil { + in, out := &in.PartitionKeyPath, &out.PartitionKeyPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisTargetObservation. +func (in *KinesisTargetObservation) DeepCopy() *KinesisTargetObservation { + if in == nil { + return nil + } + out := new(KinesisTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisTargetParameters) DeepCopyInto(out *KinesisTargetParameters) { + *out = *in + if in.PartitionKeyPath != nil { + in, out := &in.PartitionKeyPath, &out.PartitionKeyPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisTargetParameters. +func (in *KinesisTargetParameters) DeepCopy() *KinesisTargetParameters { + if in == nil { + return nil + } + out := new(KinesisTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationInitParameters) DeepCopyInto(out *NetworkConfigurationInitParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationInitParameters. +func (in *NetworkConfigurationInitParameters) DeepCopy() *NetworkConfigurationInitParameters { + if in == nil { + return nil + } + out := new(NetworkConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationObservation) DeepCopyInto(out *NetworkConfigurationObservation) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationObservation. +func (in *NetworkConfigurationObservation) DeepCopy() *NetworkConfigurationObservation { + if in == nil { + return nil + } + out := new(NetworkConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationParameters) DeepCopyInto(out *NetworkConfigurationParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationParameters. +func (in *NetworkConfigurationParameters) DeepCopy() *NetworkConfigurationParameters { + if in == nil { + return nil + } + out := new(NetworkConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OauthHTTPParametersBodyInitParameters) DeepCopyInto(out *OauthHTTPParametersBodyInitParameters) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ValueSecretRef != nil { + in, out := &in.ValueSecretRef, &out.ValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OauthHTTPParametersBodyInitParameters. +func (in *OauthHTTPParametersBodyInitParameters) DeepCopy() *OauthHTTPParametersBodyInitParameters { + if in == nil { + return nil + } + out := new(OauthHTTPParametersBodyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OauthHTTPParametersBodyObservation) DeepCopyInto(out *OauthHTTPParametersBodyObservation) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OauthHTTPParametersBodyObservation. +func (in *OauthHTTPParametersBodyObservation) DeepCopy() *OauthHTTPParametersBodyObservation { + if in == nil { + return nil + } + out := new(OauthHTTPParametersBodyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OauthHTTPParametersBodyParameters) DeepCopyInto(out *OauthHTTPParametersBodyParameters) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ValueSecretRef != nil { + in, out := &in.ValueSecretRef, &out.ValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OauthHTTPParametersBodyParameters. +func (in *OauthHTTPParametersBodyParameters) DeepCopy() *OauthHTTPParametersBodyParameters { + if in == nil { + return nil + } + out := new(OauthHTTPParametersBodyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OauthHTTPParametersHeaderInitParameters) DeepCopyInto(out *OauthHTTPParametersHeaderInitParameters) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ValueSecretRef != nil { + in, out := &in.ValueSecretRef, &out.ValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OauthHTTPParametersHeaderInitParameters. +func (in *OauthHTTPParametersHeaderInitParameters) DeepCopy() *OauthHTTPParametersHeaderInitParameters { + if in == nil { + return nil + } + out := new(OauthHTTPParametersHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OauthHTTPParametersHeaderObservation) DeepCopyInto(out *OauthHTTPParametersHeaderObservation) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OauthHTTPParametersHeaderObservation. +func (in *OauthHTTPParametersHeaderObservation) DeepCopy() *OauthHTTPParametersHeaderObservation { + if in == nil { + return nil + } + out := new(OauthHTTPParametersHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OauthHTTPParametersHeaderParameters) DeepCopyInto(out *OauthHTTPParametersHeaderParameters) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ValueSecretRef != nil { + in, out := &in.ValueSecretRef, &out.ValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OauthHTTPParametersHeaderParameters. +func (in *OauthHTTPParametersHeaderParameters) DeepCopy() *OauthHTTPParametersHeaderParameters { + if in == nil { + return nil + } + out := new(OauthHTTPParametersHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OauthHTTPParametersInitParameters) DeepCopyInto(out *OauthHTTPParametersInitParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = make([]OauthHTTPParametersBodyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]OauthHTTPParametersHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = make([]OauthHTTPParametersQueryStringInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OauthHTTPParametersInitParameters. +func (in *OauthHTTPParametersInitParameters) DeepCopy() *OauthHTTPParametersInitParameters { + if in == nil { + return nil + } + out := new(OauthHTTPParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OauthHTTPParametersObservation) DeepCopyInto(out *OauthHTTPParametersObservation) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = make([]OauthHTTPParametersBodyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]OauthHTTPParametersHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = make([]OauthHTTPParametersQueryStringObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OauthHTTPParametersObservation. +func (in *OauthHTTPParametersObservation) DeepCopy() *OauthHTTPParametersObservation { + if in == nil { + return nil + } + out := new(OauthHTTPParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OauthHTTPParametersParameters) DeepCopyInto(out *OauthHTTPParametersParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = make([]OauthHTTPParametersBodyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]OauthHTTPParametersHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = make([]OauthHTTPParametersQueryStringParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OauthHTTPParametersParameters. +func (in *OauthHTTPParametersParameters) DeepCopy() *OauthHTTPParametersParameters { + if in == nil { + return nil + } + out := new(OauthHTTPParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OauthHTTPParametersQueryStringInitParameters) DeepCopyInto(out *OauthHTTPParametersQueryStringInitParameters) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ValueSecretRef != nil { + in, out := &in.ValueSecretRef, &out.ValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OauthHTTPParametersQueryStringInitParameters. +func (in *OauthHTTPParametersQueryStringInitParameters) DeepCopy() *OauthHTTPParametersQueryStringInitParameters { + if in == nil { + return nil + } + out := new(OauthHTTPParametersQueryStringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OauthHTTPParametersQueryStringObservation) DeepCopyInto(out *OauthHTTPParametersQueryStringObservation) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OauthHTTPParametersQueryStringObservation. +func (in *OauthHTTPParametersQueryStringObservation) DeepCopy() *OauthHTTPParametersQueryStringObservation { + if in == nil { + return nil + } + out := new(OauthHTTPParametersQueryStringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OauthHTTPParametersQueryStringParameters) DeepCopyInto(out *OauthHTTPParametersQueryStringParameters) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ValueSecretRef != nil { + in, out := &in.ValueSecretRef, &out.ValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OauthHTTPParametersQueryStringParameters. +func (in *OauthHTTPParametersQueryStringParameters) DeepCopy() *OauthHTTPParametersQueryStringParameters { + if in == nil { + return nil + } + out := new(OauthHTTPParametersQueryStringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OauthInitParameters) DeepCopyInto(out *OauthInitParameters) { + *out = *in + if in.AuthorizationEndpoint != nil { + in, out := &in.AuthorizationEndpoint, &out.AuthorizationEndpoint + *out = new(string) + **out = **in + } + if in.ClientParameters != nil { + in, out := &in.ClientParameters, &out.ClientParameters + *out = new(ClientParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = new(string) + **out = **in + } + if in.OauthHTTPParameters != nil { + in, out := &in.OauthHTTPParameters, &out.OauthHTTPParameters + *out = new(OauthHTTPParametersInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OauthInitParameters. +func (in *OauthInitParameters) DeepCopy() *OauthInitParameters { + if in == nil { + return nil + } + out := new(OauthInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OauthObservation) DeepCopyInto(out *OauthObservation) { + *out = *in + if in.AuthorizationEndpoint != nil { + in, out := &in.AuthorizationEndpoint, &out.AuthorizationEndpoint + *out = new(string) + **out = **in + } + if in.ClientParameters != nil { + in, out := &in.ClientParameters, &out.ClientParameters + *out = new(ClientParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = new(string) + **out = **in + } + if in.OauthHTTPParameters != nil { + in, out := &in.OauthHTTPParameters, &out.OauthHTTPParameters + *out = new(OauthHTTPParametersObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OauthObservation. +func (in *OauthObservation) DeepCopy() *OauthObservation { + if in == nil { + return nil + } + out := new(OauthObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OauthParameters) DeepCopyInto(out *OauthParameters) { + *out = *in + if in.AuthorizationEndpoint != nil { + in, out := &in.AuthorizationEndpoint, &out.AuthorizationEndpoint + *out = new(string) + **out = **in + } + if in.ClientParameters != nil { + in, out := &in.ClientParameters, &out.ClientParameters + *out = new(ClientParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPMethod != nil { + in, out := &in.HTTPMethod, &out.HTTPMethod + *out = new(string) + **out = **in + } + if in.OauthHTTPParameters != nil { + in, out := &in.OauthHTTPParameters, &out.OauthHTTPParameters + *out = new(OauthHTTPParametersParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OauthParameters. +func (in *OauthParameters) DeepCopy() *OauthParameters { + if in == nil { + return nil + } + out := new(OauthParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedPlacementStrategyInitParameters) DeepCopyInto(out *OrderedPlacementStrategyInitParameters) { + *out = *in + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedPlacementStrategyInitParameters. +func (in *OrderedPlacementStrategyInitParameters) DeepCopy() *OrderedPlacementStrategyInitParameters { + if in == nil { + return nil + } + out := new(OrderedPlacementStrategyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedPlacementStrategyObservation) DeepCopyInto(out *OrderedPlacementStrategyObservation) { + *out = *in + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedPlacementStrategyObservation. +func (in *OrderedPlacementStrategyObservation) DeepCopy() *OrderedPlacementStrategyObservation { + if in == nil { + return nil + } + out := new(OrderedPlacementStrategyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedPlacementStrategyParameters) DeepCopyInto(out *OrderedPlacementStrategyParameters) { + *out = *in + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedPlacementStrategyParameters. +func (in *OrderedPlacementStrategyParameters) DeepCopy() *OrderedPlacementStrategyParameters { + if in == nil { + return nil + } + out := new(OrderedPlacementStrategyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Permission) DeepCopyInto(out *Permission) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Permission. +func (in *Permission) DeepCopy() *Permission { + if in == nil { + return nil + } + out := new(Permission) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Permission) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionInitParameters) DeepCopyInto(out *PermissionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(ConditionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EventBusName != nil { + in, out := &in.EventBusName, &out.EventBusName + *out = new(string) + **out = **in + } + if in.EventBusNameRef != nil { + in, out := &in.EventBusNameRef, &out.EventBusNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventBusNameSelector != nil { + in, out := &in.EventBusNameSelector, &out.EventBusNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Principal != nil { + in, out := &in.Principal, &out.Principal + *out = new(string) + **out = **in + } + if in.StatementID != nil { + in, out := &in.StatementID, &out.StatementID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionInitParameters. +func (in *PermissionInitParameters) DeepCopy() *PermissionInitParameters { + if in == nil { + return nil + } + out := new(PermissionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionList) DeepCopyInto(out *PermissionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Permission, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionList. +func (in *PermissionList) DeepCopy() *PermissionList { + if in == nil { + return nil + } + out := new(PermissionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PermissionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionObservation) DeepCopyInto(out *PermissionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(ConditionObservation) + (*in).DeepCopyInto(*out) + } + if in.EventBusName != nil { + in, out := &in.EventBusName, &out.EventBusName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Principal != nil { + in, out := &in.Principal, &out.Principal + *out = new(string) + **out = **in + } + if in.StatementID != nil { + in, out := &in.StatementID, &out.StatementID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionObservation. +func (in *PermissionObservation) DeepCopy() *PermissionObservation { + if in == nil { + return nil + } + out := new(PermissionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionParameters) DeepCopyInto(out *PermissionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(ConditionParameters) + (*in).DeepCopyInto(*out) + } + if in.EventBusName != nil { + in, out := &in.EventBusName, &out.EventBusName + *out = new(string) + **out = **in + } + if in.EventBusNameRef != nil { + in, out := &in.EventBusNameRef, &out.EventBusNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventBusNameSelector != nil { + in, out := &in.EventBusNameSelector, &out.EventBusNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Principal != nil { + in, out := &in.Principal, &out.Principal + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.StatementID != nil { + in, out := &in.StatementID, &out.StatementID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionParameters. +func (in *PermissionParameters) DeepCopy() *PermissionParameters { + if in == nil { + return nil + } + out := new(PermissionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionSpec) DeepCopyInto(out *PermissionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionSpec. +func (in *PermissionSpec) DeepCopy() *PermissionSpec { + if in == nil { + return nil + } + out := new(PermissionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionStatus) DeepCopyInto(out *PermissionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionStatus. +func (in *PermissionStatus) DeepCopy() *PermissionStatus { + if in == nil { + return nil + } + out := new(PermissionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineParameterListInitParameters) DeepCopyInto(out *PipelineParameterListInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineParameterListInitParameters. +func (in *PipelineParameterListInitParameters) DeepCopy() *PipelineParameterListInitParameters { + if in == nil { + return nil + } + out := new(PipelineParameterListInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineParameterListObservation) DeepCopyInto(out *PipelineParameterListObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineParameterListObservation. +func (in *PipelineParameterListObservation) DeepCopy() *PipelineParameterListObservation { + if in == nil { + return nil + } + out := new(PipelineParameterListObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineParameterListParameters) DeepCopyInto(out *PipelineParameterListParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineParameterListParameters. +func (in *PipelineParameterListParameters) DeepCopy() *PipelineParameterListParameters { + if in == nil { + return nil + } + out := new(PipelineParameterListParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementConstraintInitParameters) DeepCopyInto(out *PlacementConstraintInitParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementConstraintInitParameters. +func (in *PlacementConstraintInitParameters) DeepCopy() *PlacementConstraintInitParameters { + if in == nil { + return nil + } + out := new(PlacementConstraintInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementConstraintObservation) DeepCopyInto(out *PlacementConstraintObservation) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementConstraintObservation. +func (in *PlacementConstraintObservation) DeepCopy() *PlacementConstraintObservation { + if in == nil { + return nil + } + out := new(PlacementConstraintObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementConstraintParameters) DeepCopyInto(out *PlacementConstraintParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementConstraintParameters. +func (in *PlacementConstraintParameters) DeepCopy() *PlacementConstraintParameters { + if in == nil { + return nil + } + out := new(PlacementConstraintParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringInitParameters) DeepCopyInto(out *QueryStringInitParameters) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ValueSecretRef != nil { + in, out := &in.ValueSecretRef, &out.ValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringInitParameters. +func (in *QueryStringInitParameters) DeepCopy() *QueryStringInitParameters { + if in == nil { + return nil + } + out := new(QueryStringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringObservation) DeepCopyInto(out *QueryStringObservation) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringObservation. +func (in *QueryStringObservation) DeepCopy() *QueryStringObservation { + if in == nil { + return nil + } + out := new(QueryStringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringParameters) DeepCopyInto(out *QueryStringParameters) { + *out = *in + if in.IsValueSecret != nil { + in, out := &in.IsValueSecret, &out.IsValueSecret + *out = new(bool) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ValueSecretRef != nil { + in, out := &in.ValueSecretRef, &out.ValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringParameters. +func (in *QueryStringParameters) DeepCopy() *QueryStringParameters { + if in == nil { + return nil + } + out := new(QueryStringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftTargetInitParameters) DeepCopyInto(out *RedshiftTargetInitParameters) { + *out = *in + if in.DBUser != nil { + in, out := &in.DBUser, &out.DBUser + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.SQL != nil { + in, out := &in.SQL, &out.SQL + *out = new(string) + **out = **in + } + if in.SecretsManagerArn != nil { + in, out := &in.SecretsManagerArn, &out.SecretsManagerArn + *out = new(string) + **out = **in + } + if in.StatementName != nil { + in, out := &in.StatementName, &out.StatementName + *out = new(string) + **out = **in + } + if in.WithEvent != nil { + in, out := &in.WithEvent, &out.WithEvent + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftTargetInitParameters. +func (in *RedshiftTargetInitParameters) DeepCopy() *RedshiftTargetInitParameters { + if in == nil { + return nil + } + out := new(RedshiftTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftTargetObservation) DeepCopyInto(out *RedshiftTargetObservation) { + *out = *in + if in.DBUser != nil { + in, out := &in.DBUser, &out.DBUser + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.SQL != nil { + in, out := &in.SQL, &out.SQL + *out = new(string) + **out = **in + } + if in.SecretsManagerArn != nil { + in, out := &in.SecretsManagerArn, &out.SecretsManagerArn + *out = new(string) + **out = **in + } + if in.StatementName != nil { + in, out := &in.StatementName, &out.StatementName + *out = new(string) + **out = **in + } + if in.WithEvent != nil { + in, out := &in.WithEvent, &out.WithEvent + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftTargetObservation. +func (in *RedshiftTargetObservation) DeepCopy() *RedshiftTargetObservation { + if in == nil { + return nil + } + out := new(RedshiftTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftTargetParameters) DeepCopyInto(out *RedshiftTargetParameters) { + *out = *in + if in.DBUser != nil { + in, out := &in.DBUser, &out.DBUser + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.SQL != nil { + in, out := &in.SQL, &out.SQL + *out = new(string) + **out = **in + } + if in.SecretsManagerArn != nil { + in, out := &in.SecretsManagerArn, &out.SecretsManagerArn + *out = new(string) + **out = **in + } + if in.StatementName != nil { + in, out := &in.StatementName, &out.StatementName + *out = new(string) + **out = **in + } + if in.WithEvent != nil { + in, out := &in.WithEvent, &out.WithEvent + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftTargetParameters. +func (in *RedshiftTargetParameters) DeepCopy() *RedshiftTargetParameters { + if in == nil { + return nil + } + out := new(RedshiftTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryPolicyInitParameters) DeepCopyInto(out *RetryPolicyInitParameters) { + *out = *in + if in.MaximumEventAgeInSeconds != nil { + in, out := &in.MaximumEventAgeInSeconds, &out.MaximumEventAgeInSeconds + *out = new(float64) + **out = **in + } + if in.MaximumRetryAttempts != nil { + in, out := &in.MaximumRetryAttempts, &out.MaximumRetryAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicyInitParameters. +func (in *RetryPolicyInitParameters) DeepCopy() *RetryPolicyInitParameters { + if in == nil { + return nil + } + out := new(RetryPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryPolicyObservation) DeepCopyInto(out *RetryPolicyObservation) { + *out = *in + if in.MaximumEventAgeInSeconds != nil { + in, out := &in.MaximumEventAgeInSeconds, &out.MaximumEventAgeInSeconds + *out = new(float64) + **out = **in + } + if in.MaximumRetryAttempts != nil { + in, out := &in.MaximumRetryAttempts, &out.MaximumRetryAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicyObservation. +func (in *RetryPolicyObservation) DeepCopy() *RetryPolicyObservation { + if in == nil { + return nil + } + out := new(RetryPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryPolicyParameters) DeepCopyInto(out *RetryPolicyParameters) { + *out = *in + if in.MaximumEventAgeInSeconds != nil { + in, out := &in.MaximumEventAgeInSeconds, &out.MaximumEventAgeInSeconds + *out = new(float64) + **out = **in + } + if in.MaximumRetryAttempts != nil { + in, out := &in.MaximumRetryAttempts, &out.MaximumRetryAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicyParameters. +func (in *RetryPolicyParameters) DeepCopy() *RetryPolicyParameters { + if in == nil { + return nil + } + out := new(RetryPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunCommandTargetsInitParameters) DeepCopyInto(out *RunCommandTargetsInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunCommandTargetsInitParameters. +func (in *RunCommandTargetsInitParameters) DeepCopy() *RunCommandTargetsInitParameters { + if in == nil { + return nil + } + out := new(RunCommandTargetsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunCommandTargetsObservation) DeepCopyInto(out *RunCommandTargetsObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunCommandTargetsObservation. +func (in *RunCommandTargetsObservation) DeepCopy() *RunCommandTargetsObservation { + if in == nil { + return nil + } + out := new(RunCommandTargetsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunCommandTargetsParameters) DeepCopyInto(out *RunCommandTargetsParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunCommandTargetsParameters. +func (in *RunCommandTargetsParameters) DeepCopy() *RunCommandTargetsParameters { + if in == nil { + return nil + } + out := new(RunCommandTargetsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SagemakerPipelineTargetInitParameters) DeepCopyInto(out *SagemakerPipelineTargetInitParameters) { + *out = *in + if in.PipelineParameterList != nil { + in, out := &in.PipelineParameterList, &out.PipelineParameterList + *out = make([]PipelineParameterListInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SagemakerPipelineTargetInitParameters. +func (in *SagemakerPipelineTargetInitParameters) DeepCopy() *SagemakerPipelineTargetInitParameters { + if in == nil { + return nil + } + out := new(SagemakerPipelineTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SagemakerPipelineTargetObservation) DeepCopyInto(out *SagemakerPipelineTargetObservation) { + *out = *in + if in.PipelineParameterList != nil { + in, out := &in.PipelineParameterList, &out.PipelineParameterList + *out = make([]PipelineParameterListObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SagemakerPipelineTargetObservation. +func (in *SagemakerPipelineTargetObservation) DeepCopy() *SagemakerPipelineTargetObservation { + if in == nil { + return nil + } + out := new(SagemakerPipelineTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SagemakerPipelineTargetParameters) DeepCopyInto(out *SagemakerPipelineTargetParameters) { + *out = *in + if in.PipelineParameterList != nil { + in, out := &in.PipelineParameterList, &out.PipelineParameterList + *out = make([]PipelineParameterListParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SagemakerPipelineTargetParameters. +func (in *SagemakerPipelineTargetParameters) DeepCopy() *SagemakerPipelineTargetParameters { + if in == nil { + return nil + } + out := new(SagemakerPipelineTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqsTargetInitParameters) DeepCopyInto(out *SqsTargetInitParameters) { + *out = *in + if in.MessageGroupID != nil { + in, out := &in.MessageGroupID, &out.MessageGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqsTargetInitParameters. +func (in *SqsTargetInitParameters) DeepCopy() *SqsTargetInitParameters { + if in == nil { + return nil + } + out := new(SqsTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqsTargetObservation) DeepCopyInto(out *SqsTargetObservation) { + *out = *in + if in.MessageGroupID != nil { + in, out := &in.MessageGroupID, &out.MessageGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqsTargetObservation. +func (in *SqsTargetObservation) DeepCopy() *SqsTargetObservation { + if in == nil { + return nil + } + out := new(SqsTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqsTargetParameters) DeepCopyInto(out *SqsTargetParameters) { + *out = *in + if in.MessageGroupID != nil { + in, out := &in.MessageGroupID, &out.MessageGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqsTargetParameters. +func (in *SqsTargetParameters) DeepCopy() *SqsTargetParameters { + if in == nil { + return nil + } + out := new(SqsTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Target) DeepCopyInto(out *Target) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Target. +func (in *Target) DeepCopy() *Target { + if in == nil { + return nil + } + out := new(Target) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Target) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetInitParameters) DeepCopyInto(out *TargetInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.BatchTarget != nil { + in, out := &in.BatchTarget, &out.BatchTarget + *out = new(BatchTargetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DeadLetterConfig != nil { + in, out := &in.DeadLetterConfig, &out.DeadLetterConfig + *out = new(DeadLetterConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EcsTarget != nil { + in, out := &in.EcsTarget, &out.EcsTarget + *out = new(EcsTargetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EventBusName != nil { + in, out := &in.EventBusName, &out.EventBusName + *out = new(string) + **out = **in + } + if in.EventBusNameRef != nil { + in, out := &in.EventBusNameRef, &out.EventBusNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventBusNameSelector != nil { + in, out := &in.EventBusNameSelector, &out.EventBusNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.HTTPTarget != nil { + in, out := &in.HTTPTarget, &out.HTTPTarget + *out = new(HTTPTargetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Input != nil { + in, out := &in.Input, &out.Input + *out = new(string) + **out = **in + } + if in.InputPath != nil { + in, out := &in.InputPath, &out.InputPath + *out = new(string) + **out = **in + } + if in.InputTransformer != nil { + in, out := &in.InputTransformer, &out.InputTransformer + *out = new(InputTransformerInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisTarget != nil { + in, out := &in.KinesisTarget, &out.KinesisTarget + *out = new(KinesisTargetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RedshiftTarget != nil { + in, out := &in.RedshiftTarget, &out.RedshiftTarget + *out = new(RedshiftTargetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(RetryPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(string) + **out = **in + } + if in.RuleRef != nil { + in, out := &in.RuleRef, &out.RuleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RuleSelector != nil { + in, out := &in.RuleSelector, &out.RuleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RunCommandTargets != nil { + in, out := &in.RunCommandTargets, &out.RunCommandTargets + *out = make([]RunCommandTargetsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SagemakerPipelineTarget != nil { + in, out := &in.SagemakerPipelineTarget, &out.SagemakerPipelineTarget + *out = new(SagemakerPipelineTargetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SqsTarget != nil { + in, out := &in.SqsTarget, &out.SqsTarget + *out = new(SqsTargetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetID != nil { + in, out := &in.TargetID, &out.TargetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetInitParameters. +func (in *TargetInitParameters) DeepCopy() *TargetInitParameters { + if in == nil { + return nil + } + out := new(TargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetList) DeepCopyInto(out *TargetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Target, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetList. +func (in *TargetList) DeepCopy() *TargetList { + if in == nil { + return nil + } + out := new(TargetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TargetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetObservation) DeepCopyInto(out *TargetObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.BatchTarget != nil { + in, out := &in.BatchTarget, &out.BatchTarget + *out = new(BatchTargetObservation) + (*in).DeepCopyInto(*out) + } + if in.DeadLetterConfig != nil { + in, out := &in.DeadLetterConfig, &out.DeadLetterConfig + *out = new(DeadLetterConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.EcsTarget != nil { + in, out := &in.EcsTarget, &out.EcsTarget + *out = new(EcsTargetObservation) + (*in).DeepCopyInto(*out) + } + if in.EventBusName != nil { + in, out := &in.EventBusName, &out.EventBusName + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.HTTPTarget != nil { + in, out := &in.HTTPTarget, &out.HTTPTarget + *out = new(HTTPTargetObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Input != nil { + in, out := &in.Input, &out.Input + *out = new(string) + **out = **in + } + if in.InputPath != nil { + in, out := &in.InputPath, &out.InputPath + *out = new(string) + **out = **in + } + if in.InputTransformer != nil { + in, out := &in.InputTransformer, &out.InputTransformer + *out = new(InputTransformerObservation) + (*in).DeepCopyInto(*out) + } + if in.KinesisTarget != nil { + in, out := &in.KinesisTarget, &out.KinesisTarget + *out = new(KinesisTargetObservation) + (*in).DeepCopyInto(*out) + } + if in.RedshiftTarget != nil { + in, out := &in.RedshiftTarget, &out.RedshiftTarget + *out = new(RedshiftTargetObservation) + (*in).DeepCopyInto(*out) + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(RetryPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(string) + **out = **in + } + if in.RunCommandTargets != nil { + in, out := &in.RunCommandTargets, &out.RunCommandTargets + *out = make([]RunCommandTargetsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SagemakerPipelineTarget != nil { + in, out := &in.SagemakerPipelineTarget, &out.SagemakerPipelineTarget + *out = new(SagemakerPipelineTargetObservation) + (*in).DeepCopyInto(*out) + } + if in.SqsTarget != nil { + in, out := &in.SqsTarget, &out.SqsTarget + *out = new(SqsTargetObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetID != nil { + in, out := &in.TargetID, &out.TargetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetObservation. +func (in *TargetObservation) DeepCopy() *TargetObservation { + if in == nil { + return nil + } + out := new(TargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetParameters) DeepCopyInto(out *TargetParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.BatchTarget != nil { + in, out := &in.BatchTarget, &out.BatchTarget + *out = new(BatchTargetParameters) + (*in).DeepCopyInto(*out) + } + if in.DeadLetterConfig != nil { + in, out := &in.DeadLetterConfig, &out.DeadLetterConfig + *out = new(DeadLetterConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.EcsTarget != nil { + in, out := &in.EcsTarget, &out.EcsTarget + *out = new(EcsTargetParameters) + (*in).DeepCopyInto(*out) + } + if in.EventBusName != nil { + in, out := &in.EventBusName, &out.EventBusName + *out = new(string) + **out = **in + } + if in.EventBusNameRef != nil { + in, out := &in.EventBusNameRef, &out.EventBusNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventBusNameSelector != nil { + in, out := &in.EventBusNameSelector, &out.EventBusNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.HTTPTarget != nil { + in, out := &in.HTTPTarget, &out.HTTPTarget + *out = new(HTTPTargetParameters) + (*in).DeepCopyInto(*out) + } + if in.Input != nil { + in, out := &in.Input, &out.Input + *out = new(string) + **out = **in + } + if in.InputPath != nil { + in, out := &in.InputPath, &out.InputPath + *out = new(string) + **out = **in + } + if in.InputTransformer != nil { + in, out := &in.InputTransformer, &out.InputTransformer + *out = new(InputTransformerParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisTarget != nil { + in, out := &in.KinesisTarget, &out.KinesisTarget + *out = new(KinesisTargetParameters) + (*in).DeepCopyInto(*out) + } + if in.RedshiftTarget != nil { + in, out := &in.RedshiftTarget, &out.RedshiftTarget + *out = new(RedshiftTargetParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(RetryPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(string) + **out = **in + } + if in.RuleRef != nil { + in, out := &in.RuleRef, &out.RuleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RuleSelector != nil { + in, out := &in.RuleSelector, &out.RuleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RunCommandTargets != nil { + in, out := &in.RunCommandTargets, &out.RunCommandTargets + *out = make([]RunCommandTargetsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SagemakerPipelineTarget != nil { + in, out := &in.SagemakerPipelineTarget, &out.SagemakerPipelineTarget + *out = new(SagemakerPipelineTargetParameters) + (*in).DeepCopyInto(*out) + } + if in.SqsTarget != nil { + in, out := &in.SqsTarget, &out.SqsTarget + *out = new(SqsTargetParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetID != nil { + in, out := &in.TargetID, &out.TargetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetParameters. +func (in *TargetParameters) DeepCopy() *TargetParameters { + if in == nil { + return nil + } + out := new(TargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetSpec) DeepCopyInto(out *TargetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetSpec. +func (in *TargetSpec) DeepCopy() *TargetSpec { + if in == nil { + return nil + } + out := new(TargetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetStatus) DeepCopyInto(out *TargetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetStatus. +func (in *TargetStatus) DeepCopy() *TargetStatus { + if in == nil { + return nil + } + out := new(TargetStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/cloudwatchevents/v1beta2/zz_generated.managed.go b/apis/cloudwatchevents/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..79071d37bc --- /dev/null +++ b/apis/cloudwatchevents/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Connection. +func (mg *Connection) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Connection. +func (mg *Connection) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Connection. +func (mg *Connection) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Connection. +func (mg *Connection) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Connection. +func (mg *Connection) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Connection. +func (mg *Connection) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Connection. +func (mg *Connection) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Connection. +func (mg *Connection) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Connection. +func (mg *Connection) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Connection. +func (mg *Connection) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Connection. +func (mg *Connection) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Connection. +func (mg *Connection) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Permission. +func (mg *Permission) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Permission. +func (mg *Permission) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Permission. +func (mg *Permission) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Permission. +func (mg *Permission) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Permission. +func (mg *Permission) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Permission. +func (mg *Permission) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Permission. +func (mg *Permission) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Permission. +func (mg *Permission) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Permission. +func (mg *Permission) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Permission. +func (mg *Permission) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Permission. +func (mg *Permission) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Permission. +func (mg *Permission) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Target. +func (mg *Target) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Target. +func (mg *Target) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Target. +func (mg *Target) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Target. +func (mg *Target) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Target. +func (mg *Target) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Target. +func (mg *Target) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Target. +func (mg *Target) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Target. +func (mg *Target) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Target. +func (mg *Target) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Target. +func (mg *Target) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Target. +func (mg *Target) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Target. +func (mg *Target) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/cloudwatchevents/v1beta2/zz_generated.managedlist.go b/apis/cloudwatchevents/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..34975ae6c4 --- /dev/null +++ b/apis/cloudwatchevents/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ConnectionList. +func (l *ConnectionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this PermissionList. +func (l *PermissionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TargetList. +func (l *TargetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/cloudwatchevents/v1beta2/zz_generated.resolvers.go b/apis/cloudwatchevents/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..985f3a50a7 --- /dev/null +++ b/apis/cloudwatchevents/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,279 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Permission) ResolveReferences( // ResolveReferences of this Permission. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.Condition != nil { + { + m, l, err = apisresolver.GetManagedResource("organizations.aws.upbound.io", "v1beta1", "Organization", "OrganizationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Condition.Value), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Condition.ValueRef, + Selector: mg.Spec.ForProvider.Condition.ValueSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Condition.Value") + } + mg.Spec.ForProvider.Condition.Value = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Condition.ValueRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("cloudwatchevents.aws.upbound.io", "v1beta1", "Bus", "BusList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EventBusName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.EventBusNameRef, + Selector: mg.Spec.ForProvider.EventBusNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EventBusName") + } + mg.Spec.ForProvider.EventBusName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EventBusNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Condition != nil { + { + m, l, err = apisresolver.GetManagedResource("organizations.aws.upbound.io", "v1beta1", "Organization", "OrganizationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Condition.Value), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Condition.ValueRef, + Selector: mg.Spec.InitProvider.Condition.ValueSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Condition.Value") + } + mg.Spec.InitProvider.Condition.Value = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Condition.ValueRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("cloudwatchevents.aws.upbound.io", "v1beta1", "Bus", "BusList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EventBusName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.EventBusNameRef, + Selector: mg.Spec.InitProvider.EventBusNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EventBusName") + } + mg.Spec.InitProvider.EventBusName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EventBusNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Target. +func (mg *Target) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.EcsTarget != nil { + { + m, l, err = apisresolver.GetManagedResource("ecs.aws.upbound.io", "v1beta2", "TaskDefinition", "TaskDefinitionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EcsTarget.TaskDefinitionArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.EcsTarget.TaskDefinitionArnRef, + Selector: mg.Spec.ForProvider.EcsTarget.TaskDefinitionArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EcsTarget.TaskDefinitionArn") + } + mg.Spec.ForProvider.EcsTarget.TaskDefinitionArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EcsTarget.TaskDefinitionArnRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("cloudwatchevents.aws.upbound.io", "v1beta1", "Bus", "BusList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EventBusName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.EventBusNameRef, + Selector: mg.Spec.ForProvider.EventBusNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EventBusName") + } + mg.Spec.ForProvider.EventBusName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EventBusNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cloudwatchevents.aws.upbound.io", "v1beta1", "Rule", "RuleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Rule), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.RuleRef, + Selector: mg.Spec.ForProvider.RuleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Rule") + } + mg.Spec.ForProvider.Rule = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RuleRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.EcsTarget != nil { + { + m, l, err = apisresolver.GetManagedResource("ecs.aws.upbound.io", "v1beta2", "TaskDefinition", "TaskDefinitionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EcsTarget.TaskDefinitionArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.EcsTarget.TaskDefinitionArnRef, + Selector: mg.Spec.InitProvider.EcsTarget.TaskDefinitionArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EcsTarget.TaskDefinitionArn") + } + mg.Spec.InitProvider.EcsTarget.TaskDefinitionArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EcsTarget.TaskDefinitionArnRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("cloudwatchevents.aws.upbound.io", "v1beta1", "Bus", "BusList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EventBusName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.EventBusNameRef, + Selector: mg.Spec.InitProvider.EventBusNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EventBusName") + } + mg.Spec.InitProvider.EventBusName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EventBusNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cloudwatchevents.aws.upbound.io", "v1beta1", "Rule", "RuleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Rule), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.RuleRef, + Selector: mg.Spec.InitProvider.RuleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Rule") + } + mg.Spec.InitProvider.Rule = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RuleRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/cloudwatchevents/v1beta2/zz_groupversion_info.go b/apis/cloudwatchevents/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..268479bf11 --- /dev/null +++ b/apis/cloudwatchevents/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=cloudwatchevents.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "cloudwatchevents.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/cloudwatchevents/v1beta2/zz_permission_terraformed.go b/apis/cloudwatchevents/v1beta2/zz_permission_terraformed.go new file mode 100755 index 0000000000..bb67c5a8d4 --- /dev/null +++ b/apis/cloudwatchevents/v1beta2/zz_permission_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Permission +func (mg *Permission) GetTerraformResourceType() string { + return "aws_cloudwatch_event_permission" +} + +// GetConnectionDetailsMapping for this Permission +func (tr *Permission) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Permission +func (tr *Permission) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Permission +func (tr *Permission) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Permission +func (tr *Permission) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Permission +func (tr *Permission) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Permission +func (tr *Permission) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Permission +func (tr *Permission) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Permission +func (tr *Permission) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Permission using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Permission) LateInitialize(attrs []byte) (bool, error) { + params := &PermissionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Permission) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cloudwatchevents/v1beta2/zz_permission_types.go b/apis/cloudwatchevents/v1beta2/zz_permission_types.go new file mode 100755 index 0000000000..e3dbdacd3d --- /dev/null +++ b/apis/cloudwatchevents/v1beta2/zz_permission_types.go @@ -0,0 +1,223 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConditionInitParameters struct { + + // Key for the condition. Valid values: aws:PrincipalOrgID. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Type of condition. Value values: StringEquals. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Value for the key. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/organizations/v1beta1.Organization + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Value *string `json:"value,omitempty" tf:"value,omitempty"` + + // Reference to a Organization in organizations to populate value. + // +kubebuilder:validation:Optional + ValueRef *v1.Reference `json:"valueRef,omitempty" tf:"-"` + + // Selector for a Organization in organizations to populate value. + // +kubebuilder:validation:Optional + ValueSelector *v1.Selector `json:"valueSelector,omitempty" tf:"-"` +} + +type ConditionObservation struct { + + // Key for the condition. Valid values: aws:PrincipalOrgID. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Type of condition. Value values: StringEquals. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Value for the key. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ConditionParameters struct { + + // Key for the condition. Valid values: aws:PrincipalOrgID. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Type of condition. Value values: StringEquals. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // Value for the key. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/organizations/v1beta1.Organization + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` + + // Reference to a Organization in organizations to populate value. + // +kubebuilder:validation:Optional + ValueRef *v1.Reference `json:"valueRef,omitempty" tf:"-"` + + // Selector for a Organization in organizations to populate value. + // +kubebuilder:validation:Optional + ValueSelector *v1.Selector `json:"valueSelector,omitempty" tf:"-"` +} + +type PermissionInitParameters struct { + + // The action that you are enabling the other account to perform. Defaults to events:PutEvents. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // Configuration block to limit the event bus permissions you are granting to only accounts that fulfill the condition. Specified below. + Condition *ConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // The name of the event bus to set the permissions on. + // If you omit this, the permissions are set on the default event bus. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchevents/v1beta1.Bus + EventBusName *string `json:"eventBusName,omitempty" tf:"event_bus_name,omitempty"` + + // Reference to a Bus in cloudwatchevents to populate eventBusName. + // +kubebuilder:validation:Optional + EventBusNameRef *v1.Reference `json:"eventBusNameRef,omitempty" tf:"-"` + + // Selector for a Bus in cloudwatchevents to populate eventBusName. + // +kubebuilder:validation:Optional + EventBusNameSelector *v1.Selector `json:"eventBusNameSelector,omitempty" tf:"-"` + + // The 12-digit AWS account ID that you are permitting to put events to your default event bus. Specify * to permit any account to put events to your default event bus, optionally limited by condition. + Principal *string `json:"principal,omitempty" tf:"principal,omitempty"` + + // An identifier string for the external account that you are granting permissions to. + StatementID *string `json:"statementId,omitempty" tf:"statement_id,omitempty"` +} + +type PermissionObservation struct { + + // The action that you are enabling the other account to perform. Defaults to events:PutEvents. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // Configuration block to limit the event bus permissions you are granting to only accounts that fulfill the condition. Specified below. + Condition *ConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` + + // The name of the event bus to set the permissions on. + // If you omit this, the permissions are set on the default event bus. + EventBusName *string `json:"eventBusName,omitempty" tf:"event_bus_name,omitempty"` + + // The statement ID of the EventBridge permission. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The 12-digit AWS account ID that you are permitting to put events to your default event bus. Specify * to permit any account to put events to your default event bus, optionally limited by condition. + Principal *string `json:"principal,omitempty" tf:"principal,omitempty"` + + // An identifier string for the external account that you are granting permissions to. + StatementID *string `json:"statementId,omitempty" tf:"statement_id,omitempty"` +} + +type PermissionParameters struct { + + // The action that you are enabling the other account to perform. Defaults to events:PutEvents. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // Configuration block to limit the event bus permissions you are granting to only accounts that fulfill the condition. Specified below. + // +kubebuilder:validation:Optional + Condition *ConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // The name of the event bus to set the permissions on. + // If you omit this, the permissions are set on the default event bus. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchevents/v1beta1.Bus + // +kubebuilder:validation:Optional + EventBusName *string `json:"eventBusName,omitempty" tf:"event_bus_name,omitempty"` + + // Reference to a Bus in cloudwatchevents to populate eventBusName. + // +kubebuilder:validation:Optional + EventBusNameRef *v1.Reference `json:"eventBusNameRef,omitempty" tf:"-"` + + // Selector for a Bus in cloudwatchevents to populate eventBusName. + // +kubebuilder:validation:Optional + EventBusNameSelector *v1.Selector `json:"eventBusNameSelector,omitempty" tf:"-"` + + // The 12-digit AWS account ID that you are permitting to put events to your default event bus. Specify * to permit any account to put events to your default event bus, optionally limited by condition. + // +kubebuilder:validation:Optional + Principal *string `json:"principal,omitempty" tf:"principal,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // An identifier string for the external account that you are granting permissions to. + // +kubebuilder:validation:Optional + StatementID *string `json:"statementId,omitempty" tf:"statement_id,omitempty"` +} + +// PermissionSpec defines the desired state of Permission +type PermissionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PermissionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PermissionInitParameters `json:"initProvider,omitempty"` +} + +// PermissionStatus defines the observed state of Permission. +type PermissionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PermissionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Permission is the Schema for the Permissions API. Provides a resource to create an EventBridge permission to support cross-account events in the current account default event bus. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Permission struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.principal) || (has(self.initProvider) && has(self.initProvider.principal))",message="spec.forProvider.principal is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.statementId) || (has(self.initProvider) && has(self.initProvider.statementId))",message="spec.forProvider.statementId is a required parameter" + Spec PermissionSpec `json:"spec"` + Status PermissionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PermissionList contains a list of Permissions +type PermissionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Permission `json:"items"` +} + +// Repository type metadata. +var ( + Permission_Kind = "Permission" + Permission_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Permission_Kind}.String() + Permission_KindAPIVersion = Permission_Kind + "." + CRDGroupVersion.String() + Permission_GroupVersionKind = CRDGroupVersion.WithKind(Permission_Kind) +) + +func init() { + SchemeBuilder.Register(&Permission{}, &PermissionList{}) +} diff --git a/apis/cloudwatchevents/v1beta2/zz_target_terraformed.go b/apis/cloudwatchevents/v1beta2/zz_target_terraformed.go new file mode 100755 index 0000000000..8ca32f2c7c --- /dev/null +++ b/apis/cloudwatchevents/v1beta2/zz_target_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Target +func (mg *Target) GetTerraformResourceType() string { + return "aws_cloudwatch_event_target" +} + +// GetConnectionDetailsMapping for this Target +func (tr *Target) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Target +func (tr *Target) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Target +func (tr *Target) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Target +func (tr *Target) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Target +func (tr *Target) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Target +func (tr *Target) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Target +func (tr *Target) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Target +func (tr *Target) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Target using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Target) LateInitialize(attrs []byte) (bool, error) { + params := &TargetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Target) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/cloudwatchevents/v1beta2/zz_target_types.go b/apis/cloudwatchevents/v1beta2/zz_target_types.go new file mode 100755 index 0000000000..532e1838b1 --- /dev/null +++ b/apis/cloudwatchevents/v1beta2/zz_target_types.go @@ -0,0 +1,1002 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BatchTargetInitParameters struct { + + // The size of the array, if this is an array batch job. Valid values are integers between 2 and 10,000. + ArraySize *float64 `json:"arraySize,omitempty" tf:"array_size,omitempty"` + + // The number of times to attempt to retry, if the job fails. Valid values are 1 to 10. + JobAttempts *float64 `json:"jobAttempts,omitempty" tf:"job_attempts,omitempty"` + + // The ARN or name of the job definition to use if the event target is an AWS Batch job. This job definition must already exist. + JobDefinition *string `json:"jobDefinition,omitempty" tf:"job_definition,omitempty"` + + // The name to use for this execution of the job, if the target is an AWS Batch job. + JobName *string `json:"jobName,omitempty" tf:"job_name,omitempty"` +} + +type BatchTargetObservation struct { + + // The size of the array, if this is an array batch job. Valid values are integers between 2 and 10,000. + ArraySize *float64 `json:"arraySize,omitempty" tf:"array_size,omitempty"` + + // The number of times to attempt to retry, if the job fails. Valid values are 1 to 10. + JobAttempts *float64 `json:"jobAttempts,omitempty" tf:"job_attempts,omitempty"` + + // The ARN or name of the job definition to use if the event target is an AWS Batch job. This job definition must already exist. + JobDefinition *string `json:"jobDefinition,omitempty" tf:"job_definition,omitempty"` + + // The name to use for this execution of the job, if the target is an AWS Batch job. + JobName *string `json:"jobName,omitempty" tf:"job_name,omitempty"` +} + +type BatchTargetParameters struct { + + // The size of the array, if this is an array batch job. Valid values are integers between 2 and 10,000. + // +kubebuilder:validation:Optional + ArraySize *float64 `json:"arraySize,omitempty" tf:"array_size,omitempty"` + + // The number of times to attempt to retry, if the job fails. Valid values are 1 to 10. + // +kubebuilder:validation:Optional + JobAttempts *float64 `json:"jobAttempts,omitempty" tf:"job_attempts,omitempty"` + + // The ARN or name of the job definition to use if the event target is an AWS Batch job. This job definition must already exist. + // +kubebuilder:validation:Optional + JobDefinition *string `json:"jobDefinition" tf:"job_definition,omitempty"` + + // The name to use for this execution of the job, if the target is an AWS Batch job. + // +kubebuilder:validation:Optional + JobName *string `json:"jobName" tf:"job_name,omitempty"` +} + +type CapacityProviderStrategyInitParameters struct { + + // The base value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. Defaults to 0. + Base *float64 `json:"base,omitempty" tf:"base,omitempty"` + + // Short name of the capacity provider. + CapacityProvider *string `json:"capacityProvider,omitempty" tf:"capacity_provider,omitempty"` + + // The weight value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The weight value is taken into consideration after the base value, if defined, is satisfied. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type CapacityProviderStrategyObservation struct { + + // The base value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. Defaults to 0. + Base *float64 `json:"base,omitempty" tf:"base,omitempty"` + + // Short name of the capacity provider. + CapacityProvider *string `json:"capacityProvider,omitempty" tf:"capacity_provider,omitempty"` + + // The weight value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The weight value is taken into consideration after the base value, if defined, is satisfied. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type CapacityProviderStrategyParameters struct { + + // The base value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. Defaults to 0. + // +kubebuilder:validation:Optional + Base *float64 `json:"base,omitempty" tf:"base,omitempty"` + + // Short name of the capacity provider. + // +kubebuilder:validation:Optional + CapacityProvider *string `json:"capacityProvider" tf:"capacity_provider,omitempty"` + + // The weight value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The weight value is taken into consideration after the base value, if defined, is satisfied. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type DeadLetterConfigInitParameters struct { + + // - ARN of the SQS queue specified as the target for the dead-letter queue. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` +} + +type DeadLetterConfigObservation struct { + + // - ARN of the SQS queue specified as the target for the dead-letter queue. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` +} + +type DeadLetterConfigParameters struct { + + // - ARN of the SQS queue specified as the target for the dead-letter queue. + // +kubebuilder:validation:Optional + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` +} + +type EcsTargetInitParameters struct { + + // The capacity provider strategy to use for the task. If a capacity_provider_strategy specified, the launch_type parameter must be omitted. If no capacity_provider_strategy or launch_type is specified, the default capacity provider strategy for the cluster is used. Can be one or more. See below. + CapacityProviderStrategy []CapacityProviderStrategyInitParameters `json:"capacityProviderStrategy,omitempty" tf:"capacity_provider_strategy,omitempty"` + + // Specifies whether to enable Amazon ECS managed tags for the task. + EnableEcsManagedTags *bool `json:"enableEcsManagedTags,omitempty" tf:"enable_ecs_managed_tags,omitempty"` + + // Whether or not to enable the execute command functionality for the containers in this task. If true, this enables execute command functionality on all containers in the task. + EnableExecuteCommand *bool `json:"enableExecuteCommand,omitempty" tf:"enable_execute_command,omitempty"` + + // Specifies an ECS task group for the task. The maximum length is 255 characters. + Group *string `json:"group,omitempty" tf:"group,omitempty"` + + // Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. Valid values include: EC2, EXTERNAL, or FARGATE. + LaunchType *string `json:"launchType,omitempty" tf:"launch_type,omitempty"` + + // Use this if the ECS task uses the awsvpc network mode. This specifies the VPC subnets and security groups associated with the task, and whether a public IP address is to be used. Required if launch_type is FARGATE because the awsvpc mode is required for Fargate tasks. + NetworkConfiguration *NetworkConfigurationInitParameters `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // An array of placement strategy objects to use for the task. You can specify a maximum of five strategy rules per task. + OrderedPlacementStrategy []OrderedPlacementStrategyInitParameters `json:"orderedPlacementStrategy,omitempty" tf:"ordered_placement_strategy,omitempty"` + + // An array of placement constraint objects to use for the task. You can specify up to 10 constraints per task (including constraints in the task definition and those specified at runtime). See Below. + PlacementConstraint []PlacementConstraintInitParameters `json:"placementConstraint,omitempty" tf:"placement_constraint,omitempty"` + + // Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0. This is used only if LaunchType is FARGATE. For more information about valid platform versions, see AWS Fargate Platform Versions. + PlatformVersion *string `json:"platformVersion,omitempty" tf:"platform_version,omitempty"` + + // Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags are not propagated. Tags can only be propagated to the task during task creation. The only valid value is: TASK_DEFINITION. + PropagateTags *string `json:"propagateTags,omitempty" tf:"propagate_tags,omitempty"` + + // A map of tags to assign to ecs resources. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The number of tasks to create based on the TaskDefinition. Defaults to 1. + TaskCount *float64 `json:"taskCount,omitempty" tf:"task_count,omitempty"` + + // The ARN of the task definition to use if the event target is an Amazon ECS cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecs/v1beta2.TaskDefinition + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + TaskDefinitionArn *string `json:"taskDefinitionArn,omitempty" tf:"task_definition_arn,omitempty"` + + // Reference to a TaskDefinition in ecs to populate taskDefinitionArn. + // +kubebuilder:validation:Optional + TaskDefinitionArnRef *v1.Reference `json:"taskDefinitionArnRef,omitempty" tf:"-"` + + // Selector for a TaskDefinition in ecs to populate taskDefinitionArn. + // +kubebuilder:validation:Optional + TaskDefinitionArnSelector *v1.Selector `json:"taskDefinitionArnSelector,omitempty" tf:"-"` +} + +type EcsTargetObservation struct { + + // The capacity provider strategy to use for the task. If a capacity_provider_strategy specified, the launch_type parameter must be omitted. If no capacity_provider_strategy or launch_type is specified, the default capacity provider strategy for the cluster is used. Can be one or more. See below. + CapacityProviderStrategy []CapacityProviderStrategyObservation `json:"capacityProviderStrategy,omitempty" tf:"capacity_provider_strategy,omitempty"` + + // Specifies whether to enable Amazon ECS managed tags for the task. + EnableEcsManagedTags *bool `json:"enableEcsManagedTags,omitempty" tf:"enable_ecs_managed_tags,omitempty"` + + // Whether or not to enable the execute command functionality for the containers in this task. If true, this enables execute command functionality on all containers in the task. + EnableExecuteCommand *bool `json:"enableExecuteCommand,omitempty" tf:"enable_execute_command,omitempty"` + + // Specifies an ECS task group for the task. The maximum length is 255 characters. + Group *string `json:"group,omitempty" tf:"group,omitempty"` + + // Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. Valid values include: EC2, EXTERNAL, or FARGATE. + LaunchType *string `json:"launchType,omitempty" tf:"launch_type,omitempty"` + + // Use this if the ECS task uses the awsvpc network mode. This specifies the VPC subnets and security groups associated with the task, and whether a public IP address is to be used. Required if launch_type is FARGATE because the awsvpc mode is required for Fargate tasks. + NetworkConfiguration *NetworkConfigurationObservation `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // An array of placement strategy objects to use for the task. You can specify a maximum of five strategy rules per task. + OrderedPlacementStrategy []OrderedPlacementStrategyObservation `json:"orderedPlacementStrategy,omitempty" tf:"ordered_placement_strategy,omitempty"` + + // An array of placement constraint objects to use for the task. You can specify up to 10 constraints per task (including constraints in the task definition and those specified at runtime). See Below. + PlacementConstraint []PlacementConstraintObservation `json:"placementConstraint,omitempty" tf:"placement_constraint,omitempty"` + + // Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0. This is used only if LaunchType is FARGATE. For more information about valid platform versions, see AWS Fargate Platform Versions. + PlatformVersion *string `json:"platformVersion,omitempty" tf:"platform_version,omitempty"` + + // Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags are not propagated. Tags can only be propagated to the task during task creation. The only valid value is: TASK_DEFINITION. + PropagateTags *string `json:"propagateTags,omitempty" tf:"propagate_tags,omitempty"` + + // A map of tags to assign to ecs resources. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The number of tasks to create based on the TaskDefinition. Defaults to 1. + TaskCount *float64 `json:"taskCount,omitempty" tf:"task_count,omitempty"` + + // The ARN of the task definition to use if the event target is an Amazon ECS cluster. + TaskDefinitionArn *string `json:"taskDefinitionArn,omitempty" tf:"task_definition_arn,omitempty"` +} + +type EcsTargetParameters struct { + + // The capacity provider strategy to use for the task. If a capacity_provider_strategy specified, the launch_type parameter must be omitted. If no capacity_provider_strategy or launch_type is specified, the default capacity provider strategy for the cluster is used. Can be one or more. See below. + // +kubebuilder:validation:Optional + CapacityProviderStrategy []CapacityProviderStrategyParameters `json:"capacityProviderStrategy,omitempty" tf:"capacity_provider_strategy,omitempty"` + + // Specifies whether to enable Amazon ECS managed tags for the task. + // +kubebuilder:validation:Optional + EnableEcsManagedTags *bool `json:"enableEcsManagedTags,omitempty" tf:"enable_ecs_managed_tags,omitempty"` + + // Whether or not to enable the execute command functionality for the containers in this task. If true, this enables execute command functionality on all containers in the task. + // +kubebuilder:validation:Optional + EnableExecuteCommand *bool `json:"enableExecuteCommand,omitempty" tf:"enable_execute_command,omitempty"` + + // Specifies an ECS task group for the task. The maximum length is 255 characters. + // +kubebuilder:validation:Optional + Group *string `json:"group,omitempty" tf:"group,omitempty"` + + // Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. Valid values include: EC2, EXTERNAL, or FARGATE. + // +kubebuilder:validation:Optional + LaunchType *string `json:"launchType,omitempty" tf:"launch_type,omitempty"` + + // Use this if the ECS task uses the awsvpc network mode. This specifies the VPC subnets and security groups associated with the task, and whether a public IP address is to be used. Required if launch_type is FARGATE because the awsvpc mode is required for Fargate tasks. + // +kubebuilder:validation:Optional + NetworkConfiguration *NetworkConfigurationParameters `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // An array of placement strategy objects to use for the task. You can specify a maximum of five strategy rules per task. + // +kubebuilder:validation:Optional + OrderedPlacementStrategy []OrderedPlacementStrategyParameters `json:"orderedPlacementStrategy,omitempty" tf:"ordered_placement_strategy,omitempty"` + + // An array of placement constraint objects to use for the task. You can specify up to 10 constraints per task (including constraints in the task definition and those specified at runtime). See Below. + // +kubebuilder:validation:Optional + PlacementConstraint []PlacementConstraintParameters `json:"placementConstraint,omitempty" tf:"placement_constraint,omitempty"` + + // Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0. This is used only if LaunchType is FARGATE. For more information about valid platform versions, see AWS Fargate Platform Versions. + // +kubebuilder:validation:Optional + PlatformVersion *string `json:"platformVersion,omitempty" tf:"platform_version,omitempty"` + + // Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags are not propagated. Tags can only be propagated to the task during task creation. The only valid value is: TASK_DEFINITION. + // +kubebuilder:validation:Optional + PropagateTags *string `json:"propagateTags,omitempty" tf:"propagate_tags,omitempty"` + + // A map of tags to assign to ecs resources. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The number of tasks to create based on the TaskDefinition. Defaults to 1. + // +kubebuilder:validation:Optional + TaskCount *float64 `json:"taskCount,omitempty" tf:"task_count,omitempty"` + + // The ARN of the task definition to use if the event target is an Amazon ECS cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecs/v1beta2.TaskDefinition + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + TaskDefinitionArn *string `json:"taskDefinitionArn,omitempty" tf:"task_definition_arn,omitempty"` + + // Reference to a TaskDefinition in ecs to populate taskDefinitionArn. + // +kubebuilder:validation:Optional + TaskDefinitionArnRef *v1.Reference `json:"taskDefinitionArnRef,omitempty" tf:"-"` + + // Selector for a TaskDefinition in ecs to populate taskDefinitionArn. + // +kubebuilder:validation:Optional + TaskDefinitionArnSelector *v1.Selector `json:"taskDefinitionArnSelector,omitempty" tf:"-"` +} + +type HTTPTargetInitParameters struct { + + // Enables you to specify HTTP headers to add to the request. + // +mapType=granular + HeaderParameters map[string]*string `json:"headerParameters,omitempty" tf:"header_parameters,omitempty"` + + // The list of values that correspond sequentially to any path variables in your endpoint ARN (for example arn:aws:execute-api:us-east-1:123456:myapi/*/POST/pets/*). + PathParameterValues []*string `json:"pathParameterValues,omitempty" tf:"path_parameter_values,omitempty"` + + // Represents keys/values of query string parameters that are appended to the invoked endpoint. + // +mapType=granular + QueryStringParameters map[string]*string `json:"queryStringParameters,omitempty" tf:"query_string_parameters,omitempty"` +} + +type HTTPTargetObservation struct { + + // Enables you to specify HTTP headers to add to the request. + // +mapType=granular + HeaderParameters map[string]*string `json:"headerParameters,omitempty" tf:"header_parameters,omitempty"` + + // The list of values that correspond sequentially to any path variables in your endpoint ARN (for example arn:aws:execute-api:us-east-1:123456:myapi/*/POST/pets/*). + PathParameterValues []*string `json:"pathParameterValues,omitempty" tf:"path_parameter_values,omitempty"` + + // Represents keys/values of query string parameters that are appended to the invoked endpoint. + // +mapType=granular + QueryStringParameters map[string]*string `json:"queryStringParameters,omitempty" tf:"query_string_parameters,omitempty"` +} + +type HTTPTargetParameters struct { + + // Enables you to specify HTTP headers to add to the request. + // +kubebuilder:validation:Optional + // +mapType=granular + HeaderParameters map[string]*string `json:"headerParameters,omitempty" tf:"header_parameters,omitempty"` + + // The list of values that correspond sequentially to any path variables in your endpoint ARN (for example arn:aws:execute-api:us-east-1:123456:myapi/*/POST/pets/*). + // +kubebuilder:validation:Optional + PathParameterValues []*string `json:"pathParameterValues,omitempty" tf:"path_parameter_values,omitempty"` + + // Represents keys/values of query string parameters that are appended to the invoked endpoint. + // +kubebuilder:validation:Optional + // +mapType=granular + QueryStringParameters map[string]*string `json:"queryStringParameters,omitempty" tf:"query_string_parameters,omitempty"` +} + +type InputTransformerInitParameters struct { + + // Key value pairs specified in the form of JSONPath (for example, time = $.time) + // +mapType=granular + InputPaths map[string]*string `json:"inputPaths,omitempty" tf:"input_paths,omitempty"` + + // Template to customize data sent to the target. Must be valid JSON. To send a string value, the string value must include double quotes.g., "\"Your string goes here.\\nA new line.\"" + InputTemplate *string `json:"inputTemplate,omitempty" tf:"input_template,omitempty"` +} + +type InputTransformerObservation struct { + + // Key value pairs specified in the form of JSONPath (for example, time = $.time) + // +mapType=granular + InputPaths map[string]*string `json:"inputPaths,omitempty" tf:"input_paths,omitempty"` + + // Template to customize data sent to the target. Must be valid JSON. To send a string value, the string value must include double quotes.g., "\"Your string goes here.\\nA new line.\"" + InputTemplate *string `json:"inputTemplate,omitempty" tf:"input_template,omitempty"` +} + +type InputTransformerParameters struct { + + // Key value pairs specified in the form of JSONPath (for example, time = $.time) + // +kubebuilder:validation:Optional + // +mapType=granular + InputPaths map[string]*string `json:"inputPaths,omitempty" tf:"input_paths,omitempty"` + + // Template to customize data sent to the target. Must be valid JSON. To send a string value, the string value must include double quotes.g., "\"Your string goes here.\\nA new line.\"" + // +kubebuilder:validation:Optional + InputTemplate *string `json:"inputTemplate" tf:"input_template,omitempty"` +} + +type KinesisTargetInitParameters struct { + + // The JSON path to be extracted from the event and used as the partition key. + PartitionKeyPath *string `json:"partitionKeyPath,omitempty" tf:"partition_key_path,omitempty"` +} + +type KinesisTargetObservation struct { + + // The JSON path to be extracted from the event and used as the partition key. + PartitionKeyPath *string `json:"partitionKeyPath,omitempty" tf:"partition_key_path,omitempty"` +} + +type KinesisTargetParameters struct { + + // The JSON path to be extracted from the event and used as the partition key. + // +kubebuilder:validation:Optional + PartitionKeyPath *string `json:"partitionKeyPath,omitempty" tf:"partition_key_path,omitempty"` +} + +type NetworkConfigurationInitParameters struct { + + // Assign a public IP address to the ENI (Fargate launch type only). Valid values are true or false. Defaults to false. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The subnets associated with the task or service. + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` +} + +type NetworkConfigurationObservation struct { + + // Assign a public IP address to the ENI (Fargate launch type only). Valid values are true or false. Defaults to false. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The subnets associated with the task or service. + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` +} + +type NetworkConfigurationParameters struct { + + // Assign a public IP address to the ENI (Fargate launch type only). Valid values are true or false. Defaults to false. + // +kubebuilder:validation:Optional + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The subnets associated with the task or service. + // +kubebuilder:validation:Optional + // +listType=set + Subnets []*string `json:"subnets" tf:"subnets,omitempty"` +} + +type OrderedPlacementStrategyInitParameters struct { + + // The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are cpu and memory. For the random placement strategy, this field is not used. For more information, see Amazon ECS task placement strategies. + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + // Type of placement strategy. The only valid values at this time are binpack, random and spread. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OrderedPlacementStrategyObservation struct { + + // The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are cpu and memory. For the random placement strategy, this field is not used. For more information, see Amazon ECS task placement strategies. + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + // Type of placement strategy. The only valid values at this time are binpack, random and spread. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OrderedPlacementStrategyParameters struct { + + // The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are cpu and memory. For the random placement strategy, this field is not used. For more information, see Amazon ECS task placement strategies. + // +kubebuilder:validation:Optional + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + // Type of placement strategy. The only valid values at this time are binpack, random and spread. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type PipelineParameterListInitParameters struct { + + // Name of parameter to start execution of a SageMaker Model Building Pipeline. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of parameter to start execution of a SageMaker Model Building Pipeline. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type PipelineParameterListObservation struct { + + // Name of parameter to start execution of a SageMaker Model Building Pipeline. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of parameter to start execution of a SageMaker Model Building Pipeline. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type PipelineParameterListParameters struct { + + // Name of parameter to start execution of a SageMaker Model Building Pipeline. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Value of parameter to start execution of a SageMaker Model Building Pipeline. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type PlacementConstraintInitParameters struct { + + // Cluster Query Language expression to apply to the constraint. Does not need to be specified for the distinctInstance type. For more information, see Cluster Query Language in the Amazon EC2 Container Service Developer Guide. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Type of constraint. The only valid values at this time are memberOf and distinctInstance. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PlacementConstraintObservation struct { + + // Cluster Query Language expression to apply to the constraint. Does not need to be specified for the distinctInstance type. For more information, see Cluster Query Language in the Amazon EC2 Container Service Developer Guide. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Type of constraint. The only valid values at this time are memberOf and distinctInstance. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PlacementConstraintParameters struct { + + // Cluster Query Language expression to apply to the constraint. Does not need to be specified for the distinctInstance type. For more information, see Cluster Query Language in the Amazon EC2 Container Service Developer Guide. + // +kubebuilder:validation:Optional + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Type of constraint. The only valid values at this time are memberOf and distinctInstance. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type RedshiftTargetInitParameters struct { + + // The database user name. + DBUser *string `json:"dbUser,omitempty" tf:"db_user,omitempty"` + + // The name of the database. + Database *string `json:"database,omitempty" tf:"database,omitempty"` + + // The SQL statement text to run. + SQL *string `json:"sql,omitempty" tf:"sql,omitempty"` + + // The name or ARN of the secret that enables access to the database. + SecretsManagerArn *string `json:"secretsManagerArn,omitempty" tf:"secrets_manager_arn,omitempty"` + + // The name of the SQL statement. + StatementName *string `json:"statementName,omitempty" tf:"statement_name,omitempty"` + + // Indicates whether to send an event back to EventBridge after the SQL statement runs. + WithEvent *bool `json:"withEvent,omitempty" tf:"with_event,omitempty"` +} + +type RedshiftTargetObservation struct { + + // The database user name. + DBUser *string `json:"dbUser,omitempty" tf:"db_user,omitempty"` + + // The name of the database. + Database *string `json:"database,omitempty" tf:"database,omitempty"` + + // The SQL statement text to run. + SQL *string `json:"sql,omitempty" tf:"sql,omitempty"` + + // The name or ARN of the secret that enables access to the database. + SecretsManagerArn *string `json:"secretsManagerArn,omitempty" tf:"secrets_manager_arn,omitempty"` + + // The name of the SQL statement. + StatementName *string `json:"statementName,omitempty" tf:"statement_name,omitempty"` + + // Indicates whether to send an event back to EventBridge after the SQL statement runs. + WithEvent *bool `json:"withEvent,omitempty" tf:"with_event,omitempty"` +} + +type RedshiftTargetParameters struct { + + // The database user name. + // +kubebuilder:validation:Optional + DBUser *string `json:"dbUser,omitempty" tf:"db_user,omitempty"` + + // The name of the database. + // +kubebuilder:validation:Optional + Database *string `json:"database" tf:"database,omitempty"` + + // The SQL statement text to run. + // +kubebuilder:validation:Optional + SQL *string `json:"sql,omitempty" tf:"sql,omitempty"` + + // The name or ARN of the secret that enables access to the database. + // +kubebuilder:validation:Optional + SecretsManagerArn *string `json:"secretsManagerArn,omitempty" tf:"secrets_manager_arn,omitempty"` + + // The name of the SQL statement. + // +kubebuilder:validation:Optional + StatementName *string `json:"statementName,omitempty" tf:"statement_name,omitempty"` + + // Indicates whether to send an event back to EventBridge after the SQL statement runs. + // +kubebuilder:validation:Optional + WithEvent *bool `json:"withEvent,omitempty" tf:"with_event,omitempty"` +} + +type RetryPolicyInitParameters struct { + + // The age in seconds to continue to make retry attempts. + MaximumEventAgeInSeconds *float64 `json:"maximumEventAgeInSeconds,omitempty" tf:"maximum_event_age_in_seconds,omitempty"` + + // maximum number of retry attempts to make before the request fails + MaximumRetryAttempts *float64 `json:"maximumRetryAttempts,omitempty" tf:"maximum_retry_attempts,omitempty"` +} + +type RetryPolicyObservation struct { + + // The age in seconds to continue to make retry attempts. + MaximumEventAgeInSeconds *float64 `json:"maximumEventAgeInSeconds,omitempty" tf:"maximum_event_age_in_seconds,omitempty"` + + // maximum number of retry attempts to make before the request fails + MaximumRetryAttempts *float64 `json:"maximumRetryAttempts,omitempty" tf:"maximum_retry_attempts,omitempty"` +} + +type RetryPolicyParameters struct { + + // The age in seconds to continue to make retry attempts. + // +kubebuilder:validation:Optional + MaximumEventAgeInSeconds *float64 `json:"maximumEventAgeInSeconds,omitempty" tf:"maximum_event_age_in_seconds,omitempty"` + + // maximum number of retry attempts to make before the request fails + // +kubebuilder:validation:Optional + MaximumRetryAttempts *float64 `json:"maximumRetryAttempts,omitempty" tf:"maximum_retry_attempts,omitempty"` +} + +type RunCommandTargetsInitParameters struct { + + // Can be either tag:tag-key or InstanceIds. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // If Key is tag:tag-key, Values is a list of tag values. If Key is InstanceIds, Values is a list of Amazon EC2 instance IDs. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type RunCommandTargetsObservation struct { + + // Can be either tag:tag-key or InstanceIds. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // If Key is tag:tag-key, Values is a list of tag values. If Key is InstanceIds, Values is a list of Amazon EC2 instance IDs. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type RunCommandTargetsParameters struct { + + // Can be either tag:tag-key or InstanceIds. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // If Key is tag:tag-key, Values is a list of tag values. If Key is InstanceIds, Values is a list of Amazon EC2 instance IDs. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type SagemakerPipelineTargetInitParameters struct { + + // List of Parameter names and values for SageMaker Model Building Pipeline execution. + PipelineParameterList []PipelineParameterListInitParameters `json:"pipelineParameterList,omitempty" tf:"pipeline_parameter_list,omitempty"` +} + +type SagemakerPipelineTargetObservation struct { + + // List of Parameter names and values for SageMaker Model Building Pipeline execution. + PipelineParameterList []PipelineParameterListObservation `json:"pipelineParameterList,omitempty" tf:"pipeline_parameter_list,omitempty"` +} + +type SagemakerPipelineTargetParameters struct { + + // List of Parameter names and values for SageMaker Model Building Pipeline execution. + // +kubebuilder:validation:Optional + PipelineParameterList []PipelineParameterListParameters `json:"pipelineParameterList,omitempty" tf:"pipeline_parameter_list,omitempty"` +} + +type SqsTargetInitParameters struct { + + // The FIFO message group ID to use as the target. + MessageGroupID *string `json:"messageGroupId,omitempty" tf:"message_group_id,omitempty"` +} + +type SqsTargetObservation struct { + + // The FIFO message group ID to use as the target. + MessageGroupID *string `json:"messageGroupId,omitempty" tf:"message_group_id,omitempty"` +} + +type SqsTargetParameters struct { + + // The FIFO message group ID to use as the target. + // +kubebuilder:validation:Optional + MessageGroupID *string `json:"messageGroupId,omitempty" tf:"message_group_id,omitempty"` +} + +type TargetInitParameters struct { + + // The Amazon Resource Name (ARN) of the target. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Parameters used when you are using the rule to invoke an Amazon Batch Job. Documented below. A maximum of 1 are allowed. + BatchTarget *BatchTargetInitParameters `json:"batchTarget,omitempty" tf:"batch_target,omitempty"` + + // Parameters used when you are providing a dead letter config. Documented below. A maximum of 1 are allowed. + DeadLetterConfig *DeadLetterConfigInitParameters `json:"deadLetterConfig,omitempty" tf:"dead_letter_config,omitempty"` + + // Parameters used when you are using the rule to invoke Amazon ECS Task. Documented below. A maximum of 1 are allowed. + EcsTarget *EcsTargetInitParameters `json:"ecsTarget,omitempty" tf:"ecs_target,omitempty"` + + // The name or ARN of the event bus to associate with the rule. + // If you omit this, the default event bus is used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchevents/v1beta1.Bus + EventBusName *string `json:"eventBusName,omitempty" tf:"event_bus_name,omitempty"` + + // Reference to a Bus in cloudwatchevents to populate eventBusName. + // +kubebuilder:validation:Optional + EventBusNameRef *v1.Reference `json:"eventBusNameRef,omitempty" tf:"-"` + + // Selector for a Bus in cloudwatchevents to populate eventBusName. + // +kubebuilder:validation:Optional + EventBusNameSelector *v1.Selector `json:"eventBusNameSelector,omitempty" tf:"-"` + + // Used to delete managed rules created by AWS. Defaults to false. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // Parameters used when you are using the rule to invoke an API Gateway REST endpoint. Documented below. A maximum of 1 is allowed. + HTTPTarget *HTTPTargetInitParameters `json:"httpTarget,omitempty" tf:"http_target,omitempty"` + + // Valid JSON text passed to the target. Conflicts with input_path and input_transformer. + Input *string `json:"input,omitempty" tf:"input,omitempty"` + + // The value of the JSONPath that is used for extracting part of the matched event when passing it to the target. Conflicts with input and input_transformer. + InputPath *string `json:"inputPath,omitempty" tf:"input_path,omitempty"` + + // Parameters used when you are providing a custom input to a target based on certain event data. Conflicts with input and input_path. + InputTransformer *InputTransformerInitParameters `json:"inputTransformer,omitempty" tf:"input_transformer,omitempty"` + + // Parameters used when you are using the rule to invoke an Amazon Kinesis Stream. Documented below. A maximum of 1 are allowed. + KinesisTarget *KinesisTargetInitParameters `json:"kinesisTarget,omitempty" tf:"kinesis_target,omitempty"` + + // Parameters used when you are using the rule to invoke an Amazon Redshift Statement. Documented below. A maximum of 1 are allowed. + RedshiftTarget *RedshiftTargetInitParameters `json:"redshiftTarget,omitempty" tf:"redshift_target,omitempty"` + + // Parameters used when you are providing retry policies. Documented below. A maximum of 1 are allowed. + RetryPolicy *RetryPolicyInitParameters `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // The Amazon Resource Name (ARN) of the IAM role to be used for this target when the rule is triggered. Required if ecs_target is used or target in arn is EC2 instance, Kinesis data stream, Step Functions state machine, or Event Bus in different account or region. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The name of the rule you want to add targets to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchevents/v1beta1.Rule + Rule *string `json:"rule,omitempty" tf:"rule,omitempty"` + + // Reference to a Rule in cloudwatchevents to populate rule. + // +kubebuilder:validation:Optional + RuleRef *v1.Reference `json:"ruleRef,omitempty" tf:"-"` + + // Selector for a Rule in cloudwatchevents to populate rule. + // +kubebuilder:validation:Optional + RuleSelector *v1.Selector `json:"ruleSelector,omitempty" tf:"-"` + + // Parameters used when you are using the rule to invoke Amazon EC2 Run Command. Documented below. A maximum of 5 are allowed. + RunCommandTargets []RunCommandTargetsInitParameters `json:"runCommandTargets,omitempty" tf:"run_command_targets,omitempty"` + + // Parameters used when you are using the rule to invoke an Amazon SageMaker Pipeline. Documented below. A maximum of 1 are allowed. + SagemakerPipelineTarget *SagemakerPipelineTargetInitParameters `json:"sagemakerPipelineTarget,omitempty" tf:"sagemaker_pipeline_target,omitempty"` + + // Parameters used when you are using the rule to invoke an Amazon SQS Queue. Documented below. A maximum of 1 are allowed. + SqsTarget *SqsTargetInitParameters `json:"sqsTarget,omitempty" tf:"sqs_target,omitempty"` + + // The unique target assignment ID. If missing, will generate a random, unique id. + TargetID *string `json:"targetId,omitempty" tf:"target_id,omitempty"` +} + +type TargetObservation struct { + + // The Amazon Resource Name (ARN) of the target. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Parameters used when you are using the rule to invoke an Amazon Batch Job. Documented below. A maximum of 1 are allowed. + BatchTarget *BatchTargetObservation `json:"batchTarget,omitempty" tf:"batch_target,omitempty"` + + // Parameters used when you are providing a dead letter config. Documented below. A maximum of 1 are allowed. + DeadLetterConfig *DeadLetterConfigObservation `json:"deadLetterConfig,omitempty" tf:"dead_letter_config,omitempty"` + + // Parameters used when you are using the rule to invoke Amazon ECS Task. Documented below. A maximum of 1 are allowed. + EcsTarget *EcsTargetObservation `json:"ecsTarget,omitempty" tf:"ecs_target,omitempty"` + + // The name or ARN of the event bus to associate with the rule. + // If you omit this, the default event bus is used. + EventBusName *string `json:"eventBusName,omitempty" tf:"event_bus_name,omitempty"` + + // Used to delete managed rules created by AWS. Defaults to false. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // Parameters used when you are using the rule to invoke an API Gateway REST endpoint. Documented below. A maximum of 1 is allowed. + HTTPTarget *HTTPTargetObservation `json:"httpTarget,omitempty" tf:"http_target,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Valid JSON text passed to the target. Conflicts with input_path and input_transformer. + Input *string `json:"input,omitempty" tf:"input,omitempty"` + + // The value of the JSONPath that is used for extracting part of the matched event when passing it to the target. Conflicts with input and input_transformer. + InputPath *string `json:"inputPath,omitempty" tf:"input_path,omitempty"` + + // Parameters used when you are providing a custom input to a target based on certain event data. Conflicts with input and input_path. + InputTransformer *InputTransformerObservation `json:"inputTransformer,omitempty" tf:"input_transformer,omitempty"` + + // Parameters used when you are using the rule to invoke an Amazon Kinesis Stream. Documented below. A maximum of 1 are allowed. + KinesisTarget *KinesisTargetObservation `json:"kinesisTarget,omitempty" tf:"kinesis_target,omitempty"` + + // Parameters used when you are using the rule to invoke an Amazon Redshift Statement. Documented below. A maximum of 1 are allowed. + RedshiftTarget *RedshiftTargetObservation `json:"redshiftTarget,omitempty" tf:"redshift_target,omitempty"` + + // Parameters used when you are providing retry policies. Documented below. A maximum of 1 are allowed. + RetryPolicy *RetryPolicyObservation `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // The Amazon Resource Name (ARN) of the IAM role to be used for this target when the rule is triggered. Required if ecs_target is used or target in arn is EC2 instance, Kinesis data stream, Step Functions state machine, or Event Bus in different account or region. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the rule you want to add targets to. + Rule *string `json:"rule,omitempty" tf:"rule,omitempty"` + + // Parameters used when you are using the rule to invoke Amazon EC2 Run Command. Documented below. A maximum of 5 are allowed. + RunCommandTargets []RunCommandTargetsObservation `json:"runCommandTargets,omitempty" tf:"run_command_targets,omitempty"` + + // Parameters used when you are using the rule to invoke an Amazon SageMaker Pipeline. Documented below. A maximum of 1 are allowed. + SagemakerPipelineTarget *SagemakerPipelineTargetObservation `json:"sagemakerPipelineTarget,omitempty" tf:"sagemaker_pipeline_target,omitempty"` + + // Parameters used when you are using the rule to invoke an Amazon SQS Queue. Documented below. A maximum of 1 are allowed. + SqsTarget *SqsTargetObservation `json:"sqsTarget,omitempty" tf:"sqs_target,omitempty"` + + // The unique target assignment ID. If missing, will generate a random, unique id. + TargetID *string `json:"targetId,omitempty" tf:"target_id,omitempty"` +} + +type TargetParameters struct { + + // The Amazon Resource Name (ARN) of the target. + // +kubebuilder:validation:Optional + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Parameters used when you are using the rule to invoke an Amazon Batch Job. Documented below. A maximum of 1 are allowed. + // +kubebuilder:validation:Optional + BatchTarget *BatchTargetParameters `json:"batchTarget,omitempty" tf:"batch_target,omitempty"` + + // Parameters used when you are providing a dead letter config. Documented below. A maximum of 1 are allowed. + // +kubebuilder:validation:Optional + DeadLetterConfig *DeadLetterConfigParameters `json:"deadLetterConfig,omitempty" tf:"dead_letter_config,omitempty"` + + // Parameters used when you are using the rule to invoke Amazon ECS Task. Documented below. A maximum of 1 are allowed. + // +kubebuilder:validation:Optional + EcsTarget *EcsTargetParameters `json:"ecsTarget,omitempty" tf:"ecs_target,omitempty"` + + // The name or ARN of the event bus to associate with the rule. + // If you omit this, the default event bus is used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchevents/v1beta1.Bus + // +kubebuilder:validation:Optional + EventBusName *string `json:"eventBusName,omitempty" tf:"event_bus_name,omitempty"` + + // Reference to a Bus in cloudwatchevents to populate eventBusName. + // +kubebuilder:validation:Optional + EventBusNameRef *v1.Reference `json:"eventBusNameRef,omitempty" tf:"-"` + + // Selector for a Bus in cloudwatchevents to populate eventBusName. + // +kubebuilder:validation:Optional + EventBusNameSelector *v1.Selector `json:"eventBusNameSelector,omitempty" tf:"-"` + + // Used to delete managed rules created by AWS. Defaults to false. + // +kubebuilder:validation:Optional + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // Parameters used when you are using the rule to invoke an API Gateway REST endpoint. Documented below. A maximum of 1 is allowed. + // +kubebuilder:validation:Optional + HTTPTarget *HTTPTargetParameters `json:"httpTarget,omitempty" tf:"http_target,omitempty"` + + // Valid JSON text passed to the target. Conflicts with input_path and input_transformer. + // +kubebuilder:validation:Optional + Input *string `json:"input,omitempty" tf:"input,omitempty"` + + // The value of the JSONPath that is used for extracting part of the matched event when passing it to the target. Conflicts with input and input_transformer. + // +kubebuilder:validation:Optional + InputPath *string `json:"inputPath,omitempty" tf:"input_path,omitempty"` + + // Parameters used when you are providing a custom input to a target based on certain event data. Conflicts with input and input_path. + // +kubebuilder:validation:Optional + InputTransformer *InputTransformerParameters `json:"inputTransformer,omitempty" tf:"input_transformer,omitempty"` + + // Parameters used when you are using the rule to invoke an Amazon Kinesis Stream. Documented below. A maximum of 1 are allowed. + // +kubebuilder:validation:Optional + KinesisTarget *KinesisTargetParameters `json:"kinesisTarget,omitempty" tf:"kinesis_target,omitempty"` + + // Parameters used when you are using the rule to invoke an Amazon Redshift Statement. Documented below. A maximum of 1 are allowed. + // +kubebuilder:validation:Optional + RedshiftTarget *RedshiftTargetParameters `json:"redshiftTarget,omitempty" tf:"redshift_target,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Parameters used when you are providing retry policies. Documented below. A maximum of 1 are allowed. + // +kubebuilder:validation:Optional + RetryPolicy *RetryPolicyParameters `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // The Amazon Resource Name (ARN) of the IAM role to be used for this target when the rule is triggered. Required if ecs_target is used or target in arn is EC2 instance, Kinesis data stream, Step Functions state machine, or Event Bus in different account or region. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The name of the rule you want to add targets to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchevents/v1beta1.Rule + // +kubebuilder:validation:Optional + Rule *string `json:"rule,omitempty" tf:"rule,omitempty"` + + // Reference to a Rule in cloudwatchevents to populate rule. + // +kubebuilder:validation:Optional + RuleRef *v1.Reference `json:"ruleRef,omitempty" tf:"-"` + + // Selector for a Rule in cloudwatchevents to populate rule. + // +kubebuilder:validation:Optional + RuleSelector *v1.Selector `json:"ruleSelector,omitempty" tf:"-"` + + // Parameters used when you are using the rule to invoke Amazon EC2 Run Command. Documented below. A maximum of 5 are allowed. + // +kubebuilder:validation:Optional + RunCommandTargets []RunCommandTargetsParameters `json:"runCommandTargets,omitempty" tf:"run_command_targets,omitempty"` + + // Parameters used when you are using the rule to invoke an Amazon SageMaker Pipeline. Documented below. A maximum of 1 are allowed. + // +kubebuilder:validation:Optional + SagemakerPipelineTarget *SagemakerPipelineTargetParameters `json:"sagemakerPipelineTarget,omitempty" tf:"sagemaker_pipeline_target,omitempty"` + + // Parameters used when you are using the rule to invoke an Amazon SQS Queue. Documented below. A maximum of 1 are allowed. + // +kubebuilder:validation:Optional + SqsTarget *SqsTargetParameters `json:"sqsTarget,omitempty" tf:"sqs_target,omitempty"` + + // The unique target assignment ID. If missing, will generate a random, unique id. + // +kubebuilder:validation:Optional + TargetID *string `json:"targetId,omitempty" tf:"target_id,omitempty"` +} + +// TargetSpec defines the desired state of Target +type TargetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TargetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TargetInitParameters `json:"initProvider,omitempty"` +} + +// TargetStatus defines the observed state of Target. +type TargetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TargetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Target is the Schema for the Targets API. Provides an EventBridge Target resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Target struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.arn) || (has(self.initProvider) && has(self.initProvider.arn))",message="spec.forProvider.arn is a required parameter" + Spec TargetSpec `json:"spec"` + Status TargetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TargetList contains a list of Targets +type TargetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Target `json:"items"` +} + +// Repository type metadata. +var ( + Target_Kind = "Target" + Target_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Target_Kind}.String() + Target_KindAPIVersion = Target_Kind + "." + CRDGroupVersion.String() + Target_GroupVersionKind = CRDGroupVersion.WithKind(Target_Kind) +) + +func init() { + SchemeBuilder.Register(&Target{}, &TargetList{}) +} diff --git a/apis/cloudwatchlogs/v1beta1/zz_destination_types.go b/apis/cloudwatchlogs/v1beta1/zz_destination_types.go index 3a1b35a9e8..57e92cc69a 100755 --- a/apis/cloudwatchlogs/v1beta1/zz_destination_types.go +++ b/apis/cloudwatchlogs/v1beta1/zz_destination_types.go @@ -33,7 +33,7 @@ type DestinationInitParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // The ARN of the target Amazon Kinesis stream resource for the destination. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta1.Stream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() TargetArn *string `json:"targetArn,omitempty" tf:"target_arn,omitempty"` @@ -95,7 +95,7 @@ type DestinationParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // The ARN of the target Amazon Kinesis stream resource for the destination. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta1.Stream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() // +kubebuilder:validation:Optional TargetArn *string `json:"targetArn,omitempty" tf:"target_arn,omitempty"` diff --git a/apis/cloudwatchlogs/v1beta1/zz_generated.conversion_hubs.go b/apis/cloudwatchlogs/v1beta1/zz_generated.conversion_hubs.go index 13bcc98351..909a5e93b2 100755 --- a/apis/cloudwatchlogs/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/cloudwatchlogs/v1beta1/zz_generated.conversion_hubs.go @@ -18,9 +18,6 @@ func (tr *DestinationPolicy) Hub() {} // Hub marks this type as a conversion hub. func (tr *Group) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *MetricFilter) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ResourcePolicy) Hub() {} diff --git a/apis/cloudwatchlogs/v1beta1/zz_generated.conversion_spokes.go b/apis/cloudwatchlogs/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..599907b1b2 --- /dev/null +++ b/apis/cloudwatchlogs/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this MetricFilter to the hub type. +func (tr *MetricFilter) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MetricFilter type. +func (tr *MetricFilter) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/cloudwatchlogs/v1beta1/zz_generated.resolvers.go b/apis/cloudwatchlogs/v1beta1/zz_generated.resolvers.go index 1e348d687a..dc9cb48ac6 100644 --- a/apis/cloudwatchlogs/v1beta1/zz_generated.resolvers.go +++ b/apis/cloudwatchlogs/v1beta1/zz_generated.resolvers.go @@ -45,7 +45,7 @@ func (mg *Destination) ResolveReferences( // ResolveReferences of this Destinati mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta1", "Stream", "StreamList") + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -83,7 +83,7 @@ func (mg *Destination) ResolveReferences( // ResolveReferences of this Destinati mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta1", "Stream", "StreamList") + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -264,7 +264,7 @@ func (mg *SubscriptionFilter) ResolveReferences(ctx context.Context, c client.Re var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta1", "Stream", "StreamList") + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -302,7 +302,7 @@ func (mg *SubscriptionFilter) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta1", "Stream", "StreamList") + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/cloudwatchlogs/v1beta1/zz_subscriptionfilter_types.go b/apis/cloudwatchlogs/v1beta1/zz_subscriptionfilter_types.go index 863ef13184..7537ad69b4 100755 --- a/apis/cloudwatchlogs/v1beta1/zz_subscriptionfilter_types.go +++ b/apis/cloudwatchlogs/v1beta1/zz_subscriptionfilter_types.go @@ -16,7 +16,7 @@ import ( type SubscriptionFilterInitParameters struct { // The ARN of the destination to deliver matching log events to. Kinesis stream or Lambda function ARN. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta1.Stream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() DestinationArn *string `json:"destinationArn,omitempty" tf:"destination_arn,omitempty"` @@ -80,7 +80,7 @@ type SubscriptionFilterObservation struct { type SubscriptionFilterParameters struct { // The ARN of the destination to deliver matching log events to. Kinesis stream or Lambda function ARN. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta1.Stream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() // +kubebuilder:validation:Optional DestinationArn *string `json:"destinationArn,omitempty" tf:"destination_arn,omitempty"` diff --git a/apis/cloudwatchlogs/v1beta2/zz_generated.conversion_hubs.go b/apis/cloudwatchlogs/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..9c9f5a0dee --- /dev/null +++ b/apis/cloudwatchlogs/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *MetricFilter) Hub() {} diff --git a/apis/cloudwatchlogs/v1beta2/zz_generated.deepcopy.go b/apis/cloudwatchlogs/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..2886edd9fe --- /dev/null +++ b/apis/cloudwatchlogs/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,396 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricFilter) DeepCopyInto(out *MetricFilter) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricFilter. +func (in *MetricFilter) DeepCopy() *MetricFilter { + if in == nil { + return nil + } + out := new(MetricFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MetricFilter) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricFilterInitParameters) DeepCopyInto(out *MetricFilterInitParameters) { + *out = *in + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogGroupNameRef != nil { + in, out := &in.LogGroupNameRef, &out.LogGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogGroupNameSelector != nil { + in, out := &in.LogGroupNameSelector, &out.LogGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MetricTransformation != nil { + in, out := &in.MetricTransformation, &out.MetricTransformation + *out = new(MetricTransformationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricFilterInitParameters. +func (in *MetricFilterInitParameters) DeepCopy() *MetricFilterInitParameters { + if in == nil { + return nil + } + out := new(MetricFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricFilterList) DeepCopyInto(out *MetricFilterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MetricFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricFilterList. +func (in *MetricFilterList) DeepCopy() *MetricFilterList { + if in == nil { + return nil + } + out := new(MetricFilterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MetricFilterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricFilterObservation) DeepCopyInto(out *MetricFilterObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MetricTransformation != nil { + in, out := &in.MetricTransformation, &out.MetricTransformation + *out = new(MetricTransformationObservation) + (*in).DeepCopyInto(*out) + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricFilterObservation. +func (in *MetricFilterObservation) DeepCopy() *MetricFilterObservation { + if in == nil { + return nil + } + out := new(MetricFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricFilterParameters) DeepCopyInto(out *MetricFilterParameters) { + *out = *in + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogGroupNameRef != nil { + in, out := &in.LogGroupNameRef, &out.LogGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogGroupNameSelector != nil { + in, out := &in.LogGroupNameSelector, &out.LogGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MetricTransformation != nil { + in, out := &in.MetricTransformation, &out.MetricTransformation + *out = new(MetricTransformationParameters) + (*in).DeepCopyInto(*out) + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricFilterParameters. +func (in *MetricFilterParameters) DeepCopy() *MetricFilterParameters { + if in == nil { + return nil + } + out := new(MetricFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricFilterSpec) DeepCopyInto(out *MetricFilterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricFilterSpec. +func (in *MetricFilterSpec) DeepCopy() *MetricFilterSpec { + if in == nil { + return nil + } + out := new(MetricFilterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricFilterStatus) DeepCopyInto(out *MetricFilterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricFilterStatus. +func (in *MetricFilterStatus) DeepCopy() *MetricFilterStatus { + if in == nil { + return nil + } + out := new(MetricFilterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricTransformationInitParameters) DeepCopyInto(out *MetricTransformationInitParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricTransformationInitParameters. +func (in *MetricTransformationInitParameters) DeepCopy() *MetricTransformationInitParameters { + if in == nil { + return nil + } + out := new(MetricTransformationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricTransformationObservation) DeepCopyInto(out *MetricTransformationObservation) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricTransformationObservation. +func (in *MetricTransformationObservation) DeepCopy() *MetricTransformationObservation { + if in == nil { + return nil + } + out := new(MetricTransformationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricTransformationParameters) DeepCopyInto(out *MetricTransformationParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricTransformationParameters. +func (in *MetricTransformationParameters) DeepCopy() *MetricTransformationParameters { + if in == nil { + return nil + } + out := new(MetricTransformationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/cloudwatchlogs/v1beta2/zz_generated.managed.go b/apis/cloudwatchlogs/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..7f3f222592 --- /dev/null +++ b/apis/cloudwatchlogs/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this MetricFilter. +func (mg *MetricFilter) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MetricFilter. +func (mg *MetricFilter) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MetricFilter. +func (mg *MetricFilter) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MetricFilter. +func (mg *MetricFilter) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MetricFilter. +func (mg *MetricFilter) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MetricFilter. +func (mg *MetricFilter) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MetricFilter. +func (mg *MetricFilter) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MetricFilter. +func (mg *MetricFilter) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MetricFilter. +func (mg *MetricFilter) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MetricFilter. +func (mg *MetricFilter) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MetricFilter. +func (mg *MetricFilter) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MetricFilter. +func (mg *MetricFilter) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/cloudwatchlogs/v1beta2/zz_generated.managedlist.go b/apis/cloudwatchlogs/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..e8200e16b9 --- /dev/null +++ b/apis/cloudwatchlogs/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this MetricFilterList. +func (l *MetricFilterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/cloudwatchlogs/v1beta2/zz_generated.resolvers.go b/apis/cloudwatchlogs/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..c74dac8e9f --- /dev/null +++ b/apis/cloudwatchlogs/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *MetricFilter) ResolveReferences( // ResolveReferences of this MetricFilter. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Group", "GroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LogGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LogGroupNameRef, + Selector: mg.Spec.ForProvider.LogGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LogGroupName") + } + mg.Spec.ForProvider.LogGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LogGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Group", "GroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LogGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LogGroupNameRef, + Selector: mg.Spec.InitProvider.LogGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LogGroupName") + } + mg.Spec.InitProvider.LogGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LogGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/cloudwatchlogs/v1beta2/zz_groupversion_info.go b/apis/cloudwatchlogs/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..36455bfc0a --- /dev/null +++ b/apis/cloudwatchlogs/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=cloudwatchlogs.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "cloudwatchlogs.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/cloudwatchlogs/v1beta2/zz_metricfilter_terraformed.go b/apis/cloudwatchlogs/v1beta2/zz_metricfilter_terraformed.go new file mode 100755 index 0000000000..a441e3360e --- /dev/null +++ b/apis/cloudwatchlogs/v1beta2/zz_metricfilter_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MetricFilter +func (mg *MetricFilter) GetTerraformResourceType() string { + return "aws_cloudwatch_log_metric_filter" +} + +// GetConnectionDetailsMapping for this MetricFilter +func (tr *MetricFilter) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MetricFilter +func (tr *MetricFilter) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MetricFilter +func (tr *MetricFilter) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MetricFilter +func (tr *MetricFilter) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MetricFilter +func (tr *MetricFilter) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MetricFilter +func (tr *MetricFilter) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MetricFilter +func (tr *MetricFilter) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MetricFilter +func (tr *MetricFilter) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MetricFilter using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MetricFilter) LateInitialize(attrs []byte) (bool, error) { + params := &MetricFilterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MetricFilter) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cloudwatchlogs/v1beta2/zz_metricfilter_types.go b/apis/cloudwatchlogs/v1beta2/zz_metricfilter_types.go new file mode 100755 index 0000000000..a16addb32f --- /dev/null +++ b/apis/cloudwatchlogs/v1beta2/zz_metricfilter_types.go @@ -0,0 +1,216 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MetricFilterInitParameters struct { + + // The name of the log group to associate the metric filter with. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Group + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // Reference to a Group in cloudwatchlogs to populate logGroupName. + // +kubebuilder:validation:Optional + LogGroupNameRef *v1.Reference `json:"logGroupNameRef,omitempty" tf:"-"` + + // Selector for a Group in cloudwatchlogs to populate logGroupName. + // +kubebuilder:validation:Optional + LogGroupNameSelector *v1.Selector `json:"logGroupNameSelector,omitempty" tf:"-"` + + // A block defining collection of information needed to define how metric data gets emitted. See below. + MetricTransformation *MetricTransformationInitParameters `json:"metricTransformation,omitempty" tf:"metric_transformation,omitempty"` + + // A valid CloudWatch Logs filter pattern + // for extracting metric data out of ingested log events. + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` +} + +type MetricFilterObservation struct { + + // The name of the metric filter. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the log group to associate the metric filter with. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // A block defining collection of information needed to define how metric data gets emitted. See below. + MetricTransformation *MetricTransformationObservation `json:"metricTransformation,omitempty" tf:"metric_transformation,omitempty"` + + // A valid CloudWatch Logs filter pattern + // for extracting metric data out of ingested log events. + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` +} + +type MetricFilterParameters struct { + + // The name of the log group to associate the metric filter with. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Group + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // Reference to a Group in cloudwatchlogs to populate logGroupName. + // +kubebuilder:validation:Optional + LogGroupNameRef *v1.Reference `json:"logGroupNameRef,omitempty" tf:"-"` + + // Selector for a Group in cloudwatchlogs to populate logGroupName. + // +kubebuilder:validation:Optional + LogGroupNameSelector *v1.Selector `json:"logGroupNameSelector,omitempty" tf:"-"` + + // A block defining collection of information needed to define how metric data gets emitted. See below. + // +kubebuilder:validation:Optional + MetricTransformation *MetricTransformationParameters `json:"metricTransformation,omitempty" tf:"metric_transformation,omitempty"` + + // A valid CloudWatch Logs filter pattern + // for extracting metric data out of ingested log events. + // +kubebuilder:validation:Optional + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type MetricTransformationInitParameters struct { + + // The value to emit when a filter pattern does not match a log event. Conflicts with dimensions. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // Map of fields to use as dimensions for the metric. Up to 3 dimensions are allowed. Conflicts with default_value. + // +mapType=granular + Dimensions map[string]*string `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // The name of the CloudWatch metric to which the monitored log information should be published (e.g., ErrorCount) + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The destination namespace of the CloudWatch metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // The unit to assign to the metric. If you omit this, the unit is set as None. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // What to publish to the metric. For example, if you're counting the occurrences of a particular term like "Error", the value will be "1" for each occurrence. If you're counting the bytes transferred the published value will be the value in the log event. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MetricTransformationObservation struct { + + // The value to emit when a filter pattern does not match a log event. Conflicts with dimensions. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // Map of fields to use as dimensions for the metric. Up to 3 dimensions are allowed. Conflicts with default_value. + // +mapType=granular + Dimensions map[string]*string `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // The name of the CloudWatch metric to which the monitored log information should be published (e.g., ErrorCount) + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The destination namespace of the CloudWatch metric. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // The unit to assign to the metric. If you omit this, the unit is set as None. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // What to publish to the metric. For example, if you're counting the occurrences of a particular term like "Error", the value will be "1" for each occurrence. If you're counting the bytes transferred the published value will be the value in the log event. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MetricTransformationParameters struct { + + // The value to emit when a filter pattern does not match a log event. Conflicts with dimensions. + // +kubebuilder:validation:Optional + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // Map of fields to use as dimensions for the metric. Up to 3 dimensions are allowed. Conflicts with default_value. + // +kubebuilder:validation:Optional + // +mapType=granular + Dimensions map[string]*string `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // The name of the CloudWatch metric to which the monitored log information should be published (e.g., ErrorCount) + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The destination namespace of the CloudWatch metric. + // +kubebuilder:validation:Optional + Namespace *string `json:"namespace" tf:"namespace,omitempty"` + + // The unit to assign to the metric. If you omit this, the unit is set as None. + // +kubebuilder:validation:Optional + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // What to publish to the metric. For example, if you're counting the occurrences of a particular term like "Error", the value will be "1" for each occurrence. If you're counting the bytes transferred the published value will be the value in the log event. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +// MetricFilterSpec defines the desired state of MetricFilter +type MetricFilterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MetricFilterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MetricFilterInitParameters `json:"initProvider,omitempty"` +} + +// MetricFilterStatus defines the observed state of MetricFilter. +type MetricFilterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MetricFilterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MetricFilter is the Schema for the MetricFilters API. Provides a CloudWatch Log Metric Filter resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type MetricFilter struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.metricTransformation) || (has(self.initProvider) && has(self.initProvider.metricTransformation))",message="spec.forProvider.metricTransformation is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.pattern) || (has(self.initProvider) && has(self.initProvider.pattern))",message="spec.forProvider.pattern is a required parameter" + Spec MetricFilterSpec `json:"spec"` + Status MetricFilterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MetricFilterList contains a list of MetricFilters +type MetricFilterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MetricFilter `json:"items"` +} + +// Repository type metadata. +var ( + MetricFilter_Kind = "MetricFilter" + MetricFilter_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MetricFilter_Kind}.String() + MetricFilter_KindAPIVersion = MetricFilter_Kind + "." + CRDGroupVersion.String() + MetricFilter_GroupVersionKind = CRDGroupVersion.WithKind(MetricFilter_Kind) +) + +func init() { + SchemeBuilder.Register(&MetricFilter{}, &MetricFilterList{}) +} diff --git a/apis/codepipeline/v1beta1/zz_generated.conversion_spokes.go b/apis/codepipeline/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..1bb78d9881 --- /dev/null +++ b/apis/codepipeline/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Codepipeline to the hub type. +func (tr *Codepipeline) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Codepipeline type. +func (tr *Codepipeline) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this CustomActionType to the hub type. +func (tr *CustomActionType) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CustomActionType type. +func (tr *CustomActionType) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Webhook to the hub type. +func (tr *Webhook) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Webhook type. +func (tr *Webhook) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/codepipeline/v1beta2/zz_codepipeline_terraformed.go b/apis/codepipeline/v1beta2/zz_codepipeline_terraformed.go new file mode 100755 index 0000000000..fc5806edd2 --- /dev/null +++ b/apis/codepipeline/v1beta2/zz_codepipeline_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Codepipeline +func (mg *Codepipeline) GetTerraformResourceType() string { + return "aws_codepipeline" +} + +// GetConnectionDetailsMapping for this Codepipeline +func (tr *Codepipeline) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Codepipeline +func (tr *Codepipeline) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Codepipeline +func (tr *Codepipeline) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Codepipeline +func (tr *Codepipeline) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Codepipeline +func (tr *Codepipeline) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Codepipeline +func (tr *Codepipeline) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Codepipeline +func (tr *Codepipeline) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Codepipeline +func (tr *Codepipeline) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Codepipeline using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Codepipeline) LateInitialize(attrs []byte) (bool, error) { + params := &CodepipelineParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Codepipeline) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/codepipeline/v1beta2/zz_codepipeline_types.go b/apis/codepipeline/v1beta2/zz_codepipeline_types.go new file mode 100755 index 0000000000..6ab78dac1f --- /dev/null +++ b/apis/codepipeline/v1beta2/zz_codepipeline_types.go @@ -0,0 +1,785 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionInitParameters struct { + + // A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Possible values are Approval, Build, Deploy, Invoke, Source and Test. + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // A map of the action declaration's configuration. Configurations options for action types and providers can be found in the Pipeline Structure Reference and Action Structure Reference documentation. + // +mapType=granular + Configuration map[string]*string `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // A list of artifact names to be worked on. + InputArtifacts []*string `json:"inputArtifacts,omitempty" tf:"input_artifacts,omitempty"` + + // The action declaration's name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The namespace all output variables will be accessed from. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // A list of artifact names to output. Output artifact names must be unique within a pipeline. + OutputArtifacts []*string `json:"outputArtifacts,omitempty" tf:"output_artifacts,omitempty"` + + // The creator of the action being called. Possible values are AWS, Custom and ThirdParty. + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + // The provider of the service being called by the action. Valid providers are determined by the action category. Provider names are listed in the Action Structure Reference documentation. + Provider *string `json:"provider,omitempty" tf:"provider,omitempty"` + + // The ARN of the IAM service role that will perform the declared action. This is assumed through the roleArn for the pipeline. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The order in which actions are run. + RunOrder *float64 `json:"runOrder,omitempty" tf:"run_order,omitempty"` + + // A string that identifies the action type. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ActionObservation struct { + + // A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Possible values are Approval, Build, Deploy, Invoke, Source and Test. + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // A map of the action declaration's configuration. Configurations options for action types and providers can be found in the Pipeline Structure Reference and Action Structure Reference documentation. + // +mapType=granular + Configuration map[string]*string `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // A list of artifact names to be worked on. + InputArtifacts []*string `json:"inputArtifacts,omitempty" tf:"input_artifacts,omitempty"` + + // The action declaration's name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The namespace all output variables will be accessed from. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // A list of artifact names to output. Output artifact names must be unique within a pipeline. + OutputArtifacts []*string `json:"outputArtifacts,omitempty" tf:"output_artifacts,omitempty"` + + // The creator of the action being called. Possible values are AWS, Custom and ThirdParty. + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + // The provider of the service being called by the action. Valid providers are determined by the action category. Provider names are listed in the Action Structure Reference documentation. + Provider *string `json:"provider,omitempty" tf:"provider,omitempty"` + + // The region in which to run the action. + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // The ARN of the IAM service role that will perform the declared action. This is assumed through the roleArn for the pipeline. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The order in which actions are run. + RunOrder *float64 `json:"runOrder,omitempty" tf:"run_order,omitempty"` + + // A string that identifies the action type. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ActionParameters struct { + + // A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Possible values are Approval, Build, Deploy, Invoke, Source and Test. + // +kubebuilder:validation:Optional + Category *string `json:"category" tf:"category,omitempty"` + + // A map of the action declaration's configuration. Configurations options for action types and providers can be found in the Pipeline Structure Reference and Action Structure Reference documentation. + // +kubebuilder:validation:Optional + // +mapType=granular + Configuration map[string]*string `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // A list of artifact names to be worked on. + // +kubebuilder:validation:Optional + InputArtifacts []*string `json:"inputArtifacts,omitempty" tf:"input_artifacts,omitempty"` + + // The action declaration's name. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The namespace all output variables will be accessed from. + // +kubebuilder:validation:Optional + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // A list of artifact names to output. Output artifact names must be unique within a pipeline. + // +kubebuilder:validation:Optional + OutputArtifacts []*string `json:"outputArtifacts,omitempty" tf:"output_artifacts,omitempty"` + + // The creator of the action being called. Possible values are AWS, Custom and ThirdParty. + // +kubebuilder:validation:Optional + Owner *string `json:"owner" tf:"owner,omitempty"` + + // The provider of the service being called by the action. Valid providers are determined by the action category. Provider names are listed in the Action Structure Reference documentation. + // +kubebuilder:validation:Optional + Provider *string `json:"provider" tf:"provider,omitempty"` + + // The region in which to run the action. + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // The ARN of the IAM service role that will perform the declared action. This is assumed through the roleArn for the pipeline. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The order in which actions are run. + // +kubebuilder:validation:Optional + RunOrder *float64 `json:"runOrder,omitempty" tf:"run_order,omitempty"` + + // A string that identifies the action type. + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` +} + +type ArtifactStoreInitParameters struct { + + // The encryption key block AWS CodePipeline uses to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If you don't specify a key, AWS CodePipeline uses the default key for Amazon Simple Storage Service (Amazon S3). An encryption_key block is documented below. + EncryptionKey *EncryptionKeyInitParameters `json:"encryptionKey,omitempty" tf:"encryption_key,omitempty"` + + // The location where AWS CodePipeline stores artifacts for a pipeline; currently only S3 is supported. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Reference to a Bucket in s3 to populate location. + // +kubebuilder:validation:Optional + LocationRef *v1.Reference `json:"locationRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate location. + // +kubebuilder:validation:Optional + LocationSelector *v1.Selector `json:"locationSelector,omitempty" tf:"-"` + + // The type of the artifact store, such as Amazon S3 + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ArtifactStoreObservation struct { + + // The encryption key block AWS CodePipeline uses to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If you don't specify a key, AWS CodePipeline uses the default key for Amazon Simple Storage Service (Amazon S3). An encryption_key block is documented below. + EncryptionKey *EncryptionKeyObservation `json:"encryptionKey,omitempty" tf:"encryption_key,omitempty"` + + // The location where AWS CodePipeline stores artifacts for a pipeline; currently only S3 is supported. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The region where the artifact store is located. Required for a cross-region CodePipeline, do not provide for a single-region CodePipeline. + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // The type of the artifact store, such as Amazon S3 + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ArtifactStoreParameters struct { + + // The encryption key block AWS CodePipeline uses to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If you don't specify a key, AWS CodePipeline uses the default key for Amazon Simple Storage Service (Amazon S3). An encryption_key block is documented below. + // +kubebuilder:validation:Optional + EncryptionKey *EncryptionKeyParameters `json:"encryptionKey,omitempty" tf:"encryption_key,omitempty"` + + // The location where AWS CodePipeline stores artifacts for a pipeline; currently only S3 is supported. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Reference to a Bucket in s3 to populate location. + // +kubebuilder:validation:Optional + LocationRef *v1.Reference `json:"locationRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate location. + // +kubebuilder:validation:Optional + LocationSelector *v1.Selector `json:"locationSelector,omitempty" tf:"-"` + + // The region where the artifact store is located. Required for a cross-region CodePipeline, do not provide for a single-region CodePipeline. + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // The type of the artifact store, such as Amazon S3 + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type BranchesInitParameters struct { + + // A list of patterns of Git tags that, when pushed, are to be excluded from starting the pipeline. + Excludes []*string `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // A list of patterns of Git tags that, when pushed, are to be included as criteria that starts the pipeline. + Includes []*string `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type BranchesObservation struct { + + // A list of patterns of Git tags that, when pushed, are to be excluded from starting the pipeline. + Excludes []*string `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // A list of patterns of Git tags that, when pushed, are to be included as criteria that starts the pipeline. + Includes []*string `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type BranchesParameters struct { + + // A list of patterns of Git tags that, when pushed, are to be excluded from starting the pipeline. + // +kubebuilder:validation:Optional + Excludes []*string `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // A list of patterns of Git tags that, when pushed, are to be included as criteria that starts the pipeline. + // +kubebuilder:validation:Optional + Includes []*string `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type CodepipelineInitParameters struct { + + // One or more artifact_store blocks. Artifact stores are documented below. + ArtifactStore []ArtifactStoreInitParameters `json:"artifactStore,omitempty" tf:"artifact_store,omitempty"` + + // The method that the pipeline will use to handle multiple executions. The default mode is SUPERSEDED. For value values, refer to the AWS documentation. + ExecutionMode *string `json:"executionMode,omitempty" tf:"execution_mode,omitempty"` + + // Type of the pipeline. Possible values are: V1 and V2. Default value is V1. + PipelineType *string `json:"pipelineType,omitempty" tf:"pipeline_type,omitempty"` + + // A service role Amazon Resource Name (ARN) that grants AWS CodePipeline permission to make calls to AWS services on your behalf. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // (Minimum of at least two stage blocks is required) A stage block. Stages are documented below. + Stage []StageInitParameters `json:"stage,omitempty" tf:"stage,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A trigger block. Valid only when pipeline_type is V2. Triggers are documented below. + Trigger []TriggerInitParameters `json:"trigger,omitempty" tf:"trigger,omitempty"` + + // A pipeline-level variable block. Valid only when pipeline_type is V2. Variable are documented below. + Variable []VariableInitParameters `json:"variable,omitempty" tf:"variable,omitempty"` +} + +type CodepipelineObservation struct { + + // The codepipeline ARN. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // One or more artifact_store blocks. Artifact stores are documented below. + ArtifactStore []ArtifactStoreObservation `json:"artifactStore,omitempty" tf:"artifact_store,omitempty"` + + // The method that the pipeline will use to handle multiple executions. The default mode is SUPERSEDED. For value values, refer to the AWS documentation. + ExecutionMode *string `json:"executionMode,omitempty" tf:"execution_mode,omitempty"` + + // The codepipeline ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Type of the pipeline. Possible values are: V1 and V2. Default value is V1. + PipelineType *string `json:"pipelineType,omitempty" tf:"pipeline_type,omitempty"` + + // A service role Amazon Resource Name (ARN) that grants AWS CodePipeline permission to make calls to AWS services on your behalf. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // (Minimum of at least two stage blocks is required) A stage block. Stages are documented below. + Stage []StageObservation `json:"stage,omitempty" tf:"stage,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // A trigger block. Valid only when pipeline_type is V2. Triggers are documented below. + Trigger []TriggerObservation `json:"trigger,omitempty" tf:"trigger,omitempty"` + + // A pipeline-level variable block. Valid only when pipeline_type is V2. Variable are documented below. + Variable []VariableObservation `json:"variable,omitempty" tf:"variable,omitempty"` +} + +type CodepipelineParameters struct { + + // One or more artifact_store blocks. Artifact stores are documented below. + // +kubebuilder:validation:Optional + ArtifactStore []ArtifactStoreParameters `json:"artifactStore,omitempty" tf:"artifact_store,omitempty"` + + // The method that the pipeline will use to handle multiple executions. The default mode is SUPERSEDED. For value values, refer to the AWS documentation. + // +kubebuilder:validation:Optional + ExecutionMode *string `json:"executionMode,omitempty" tf:"execution_mode,omitempty"` + + // Type of the pipeline. Possible values are: V1 and V2. Default value is V1. + // +kubebuilder:validation:Optional + PipelineType *string `json:"pipelineType,omitempty" tf:"pipeline_type,omitempty"` + + // The region in which to run the action. + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // A service role Amazon Resource Name (ARN) that grants AWS CodePipeline permission to make calls to AWS services on your behalf. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // (Minimum of at least two stage blocks is required) A stage block. Stages are documented below. + // +kubebuilder:validation:Optional + Stage []StageParameters `json:"stage,omitempty" tf:"stage,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A trigger block. Valid only when pipeline_type is V2. Triggers are documented below. + // +kubebuilder:validation:Optional + Trigger []TriggerParameters `json:"trigger,omitempty" tf:"trigger,omitempty"` + + // A pipeline-level variable block. Valid only when pipeline_type is V2. Variable are documented below. + // +kubebuilder:validation:Optional + Variable []VariableParameters `json:"variable,omitempty" tf:"variable,omitempty"` +} + +type EncryptionKeyInitParameters struct { + + // The KMS key ARN or ID + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The type of key; currently only KMS is supported + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EncryptionKeyObservation struct { + + // The KMS key ARN or ID + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The type of key; currently only KMS is supported + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EncryptionKeyParameters struct { + + // The KMS key ARN or ID + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // The type of key; currently only KMS is supported + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type FilePathsInitParameters struct { + + // A list of patterns of Git tags that, when pushed, are to be excluded from starting the pipeline. + Excludes []*string `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // A list of patterns of Git tags that, when pushed, are to be included as criteria that starts the pipeline. + Includes []*string `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type FilePathsObservation struct { + + // A list of patterns of Git tags that, when pushed, are to be excluded from starting the pipeline. + Excludes []*string `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // A list of patterns of Git tags that, when pushed, are to be included as criteria that starts the pipeline. + Includes []*string `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type FilePathsParameters struct { + + // A list of patterns of Git tags that, when pushed, are to be excluded from starting the pipeline. + // +kubebuilder:validation:Optional + Excludes []*string `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // A list of patterns of Git tags that, when pushed, are to be included as criteria that starts the pipeline. + // +kubebuilder:validation:Optional + Includes []*string `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type GitConfigurationInitParameters struct { + + // The field where the repository event that will start the pipeline is specified as pull requests. A pull_request block is documented below. + PullRequest []PullRequestInitParameters `json:"pullRequest,omitempty" tf:"pull_request,omitempty"` + + // The field where the repository event that will start the pipeline, such as pushing Git tags, is specified with details. A push block is documented below. + Push []PushInitParameters `json:"push,omitempty" tf:"push,omitempty"` + + // The name of the pipeline source action where the trigger configuration. + SourceActionName *string `json:"sourceActionName,omitempty" tf:"source_action_name,omitempty"` +} + +type GitConfigurationObservation struct { + + // The field where the repository event that will start the pipeline is specified as pull requests. A pull_request block is documented below. + PullRequest []PullRequestObservation `json:"pullRequest,omitempty" tf:"pull_request,omitempty"` + + // The field where the repository event that will start the pipeline, such as pushing Git tags, is specified with details. A push block is documented below. + Push []PushObservation `json:"push,omitempty" tf:"push,omitempty"` + + // The name of the pipeline source action where the trigger configuration. + SourceActionName *string `json:"sourceActionName,omitempty" tf:"source_action_name,omitempty"` +} + +type GitConfigurationParameters struct { + + // The field where the repository event that will start the pipeline is specified as pull requests. A pull_request block is documented below. + // +kubebuilder:validation:Optional + PullRequest []PullRequestParameters `json:"pullRequest,omitempty" tf:"pull_request,omitempty"` + + // The field where the repository event that will start the pipeline, such as pushing Git tags, is specified with details. A push block is documented below. + // +kubebuilder:validation:Optional + Push []PushParameters `json:"push,omitempty" tf:"push,omitempty"` + + // The name of the pipeline source action where the trigger configuration. + // +kubebuilder:validation:Optional + SourceActionName *string `json:"sourceActionName" tf:"source_action_name,omitempty"` +} + +type PullRequestInitParameters struct { + + // The field that specifies to filter on branches for the pull request trigger configuration. A branches block is documented below. + Branches *BranchesInitParameters `json:"branches,omitempty" tf:"branches,omitempty"` + + // A list that specifies which pull request events to filter on (opened, updated, closed) for the trigger configuration. Possible values are OPEN, UPDATED and CLOSED. + Events []*string `json:"events,omitempty" tf:"events,omitempty"` + + // The field that specifies to filter on file paths for the pull request trigger configuration. A file_paths block is documented below. + FilePaths *FilePathsInitParameters `json:"filePaths,omitempty" tf:"file_paths,omitempty"` +} + +type PullRequestObservation struct { + + // The field that specifies to filter on branches for the pull request trigger configuration. A branches block is documented below. + Branches *BranchesObservation `json:"branches,omitempty" tf:"branches,omitempty"` + + // A list that specifies which pull request events to filter on (opened, updated, closed) for the trigger configuration. Possible values are OPEN, UPDATED and CLOSED. + Events []*string `json:"events,omitempty" tf:"events,omitempty"` + + // The field that specifies to filter on file paths for the pull request trigger configuration. A file_paths block is documented below. + FilePaths *FilePathsObservation `json:"filePaths,omitempty" tf:"file_paths,omitempty"` +} + +type PullRequestParameters struct { + + // The field that specifies to filter on branches for the pull request trigger configuration. A branches block is documented below. + // +kubebuilder:validation:Optional + Branches *BranchesParameters `json:"branches,omitempty" tf:"branches,omitempty"` + + // A list that specifies which pull request events to filter on (opened, updated, closed) for the trigger configuration. Possible values are OPEN, UPDATED and CLOSED. + // +kubebuilder:validation:Optional + Events []*string `json:"events,omitempty" tf:"events,omitempty"` + + // The field that specifies to filter on file paths for the pull request trigger configuration. A file_paths block is documented below. + // +kubebuilder:validation:Optional + FilePaths *FilePathsParameters `json:"filePaths,omitempty" tf:"file_paths,omitempty"` +} + +type PushBranchesInitParameters struct { + + // A list of patterns of Git tags that, when pushed, are to be excluded from starting the pipeline. + Excludes []*string `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // A list of patterns of Git tags that, when pushed, are to be included as criteria that starts the pipeline. + Includes []*string `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type PushBranchesObservation struct { + + // A list of patterns of Git tags that, when pushed, are to be excluded from starting the pipeline. + Excludes []*string `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // A list of patterns of Git tags that, when pushed, are to be included as criteria that starts the pipeline. + Includes []*string `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type PushBranchesParameters struct { + + // A list of patterns of Git tags that, when pushed, are to be excluded from starting the pipeline. + // +kubebuilder:validation:Optional + Excludes []*string `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // A list of patterns of Git tags that, when pushed, are to be included as criteria that starts the pipeline. + // +kubebuilder:validation:Optional + Includes []*string `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type PushFilePathsInitParameters struct { + + // A list of patterns of Git tags that, when pushed, are to be excluded from starting the pipeline. + Excludes []*string `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // A list of patterns of Git tags that, when pushed, are to be included as criteria that starts the pipeline. + Includes []*string `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type PushFilePathsObservation struct { + + // A list of patterns of Git tags that, when pushed, are to be excluded from starting the pipeline. + Excludes []*string `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // A list of patterns of Git tags that, when pushed, are to be included as criteria that starts the pipeline. + Includes []*string `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type PushFilePathsParameters struct { + + // A list of patterns of Git tags that, when pushed, are to be excluded from starting the pipeline. + // +kubebuilder:validation:Optional + Excludes []*string `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // A list of patterns of Git tags that, when pushed, are to be included as criteria that starts the pipeline. + // +kubebuilder:validation:Optional + Includes []*string `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type PushInitParameters struct { + + // The field that specifies to filter on branches for the pull request trigger configuration. A branches block is documented below. + Branches *PushBranchesInitParameters `json:"branches,omitempty" tf:"branches,omitempty"` + + // The field that specifies to filter on file paths for the pull request trigger configuration. A file_paths block is documented below. + FilePaths *PushFilePathsInitParameters `json:"filePaths,omitempty" tf:"file_paths,omitempty"` + + // Key-value map of resource tags. + Tags *TagsInitParameters `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type PushObservation struct { + + // The field that specifies to filter on branches for the pull request trigger configuration. A branches block is documented below. + Branches *PushBranchesObservation `json:"branches,omitempty" tf:"branches,omitempty"` + + // The field that specifies to filter on file paths for the pull request trigger configuration. A file_paths block is documented below. + FilePaths *PushFilePathsObservation `json:"filePaths,omitempty" tf:"file_paths,omitempty"` + + // Key-value map of resource tags. + Tags *TagsObservation `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type PushParameters struct { + + // The field that specifies to filter on branches for the pull request trigger configuration. A branches block is documented below. + // +kubebuilder:validation:Optional + Branches *PushBranchesParameters `json:"branches,omitempty" tf:"branches,omitempty"` + + // The field that specifies to filter on file paths for the pull request trigger configuration. A file_paths block is documented below. + // +kubebuilder:validation:Optional + FilePaths *PushFilePathsParameters `json:"filePaths,omitempty" tf:"file_paths,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + Tags *TagsParameters `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type StageInitParameters struct { + + // The action(s) to include in the stage. Defined as an action block below + Action []ActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // The name of the stage. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type StageObservation struct { + + // The action(s) to include in the stage. Defined as an action block below + Action []ActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // The name of the stage. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type StageParameters struct { + + // The action(s) to include in the stage. Defined as an action block below + // +kubebuilder:validation:Optional + Action []ActionParameters `json:"action" tf:"action,omitempty"` + + // The name of the stage. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type TagsInitParameters struct { + + // A list of patterns of Git tags that, when pushed, are to be excluded from starting the pipeline. + Excludes []*string `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // A list of patterns of Git tags that, when pushed, are to be included as criteria that starts the pipeline. + Includes []*string `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type TagsObservation struct { + + // A list of patterns of Git tags that, when pushed, are to be excluded from starting the pipeline. + Excludes []*string `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // A list of patterns of Git tags that, when pushed, are to be included as criteria that starts the pipeline. + Includes []*string `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type TagsParameters struct { + + // A list of patterns of Git tags that, when pushed, are to be excluded from starting the pipeline. + // +kubebuilder:validation:Optional + Excludes []*string `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // A list of patterns of Git tags that, when pushed, are to be included as criteria that starts the pipeline. + // +kubebuilder:validation:Optional + Includes []*string `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type TriggerInitParameters struct { + + // Provides the filter criteria and the source stage for the repository event that starts the pipeline. For more information, refer to the AWS documentation. A git_configuration block is documented below. + GitConfiguration *GitConfigurationInitParameters `json:"gitConfiguration,omitempty" tf:"git_configuration,omitempty"` + + // The source provider for the event. Possible value is CodeStarSourceConnection. + ProviderType *string `json:"providerType,omitempty" tf:"provider_type,omitempty"` +} + +type TriggerObservation struct { + + // Provides the filter criteria and the source stage for the repository event that starts the pipeline. For more information, refer to the AWS documentation. A git_configuration block is documented below. + GitConfiguration *GitConfigurationObservation `json:"gitConfiguration,omitempty" tf:"git_configuration,omitempty"` + + // The source provider for the event. Possible value is CodeStarSourceConnection. + ProviderType *string `json:"providerType,omitempty" tf:"provider_type,omitempty"` +} + +type TriggerParameters struct { + + // Provides the filter criteria and the source stage for the repository event that starts the pipeline. For more information, refer to the AWS documentation. A git_configuration block is documented below. + // +kubebuilder:validation:Optional + GitConfiguration *GitConfigurationParameters `json:"gitConfiguration" tf:"git_configuration,omitempty"` + + // The source provider for the event. Possible value is CodeStarSourceConnection. + // +kubebuilder:validation:Optional + ProviderType *string `json:"providerType" tf:"provider_type,omitempty"` +} + +type VariableInitParameters struct { + + // The default value of a pipeline-level variable. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // The description of a pipeline-level variable. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of a pipeline-level variable. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type VariableObservation struct { + + // The default value of a pipeline-level variable. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // The description of a pipeline-level variable. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of a pipeline-level variable. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type VariableParameters struct { + + // The default value of a pipeline-level variable. + // +kubebuilder:validation:Optional + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // The description of a pipeline-level variable. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of a pipeline-level variable. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +// CodepipelineSpec defines the desired state of Codepipeline +type CodepipelineSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CodepipelineParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CodepipelineInitParameters `json:"initProvider,omitempty"` +} + +// CodepipelineStatus defines the observed state of Codepipeline. +type CodepipelineStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CodepipelineObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Codepipeline is the Schema for the Codepipelines API. Provides a CodePipeline +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Codepipeline struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.artifactStore) || (has(self.initProvider) && has(self.initProvider.artifactStore))",message="spec.forProvider.artifactStore is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.stage) || (has(self.initProvider) && has(self.initProvider.stage))",message="spec.forProvider.stage is a required parameter" + Spec CodepipelineSpec `json:"spec"` + Status CodepipelineStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CodepipelineList contains a list of Codepipelines +type CodepipelineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Codepipeline `json:"items"` +} + +// Repository type metadata. +var ( + Codepipeline_Kind = "Codepipeline" + Codepipeline_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Codepipeline_Kind}.String() + Codepipeline_KindAPIVersion = Codepipeline_Kind + "." + CRDGroupVersion.String() + Codepipeline_GroupVersionKind = CRDGroupVersion.WithKind(Codepipeline_Kind) +) + +func init() { + SchemeBuilder.Register(&Codepipeline{}, &CodepipelineList{}) +} diff --git a/apis/codepipeline/v1beta2/zz_customactiontype_terraformed.go b/apis/codepipeline/v1beta2/zz_customactiontype_terraformed.go new file mode 100755 index 0000000000..d3d8283a47 --- /dev/null +++ b/apis/codepipeline/v1beta2/zz_customactiontype_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CustomActionType +func (mg *CustomActionType) GetTerraformResourceType() string { + return "aws_codepipeline_custom_action_type" +} + +// GetConnectionDetailsMapping for this CustomActionType +func (tr *CustomActionType) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CustomActionType +func (tr *CustomActionType) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CustomActionType +func (tr *CustomActionType) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CustomActionType +func (tr *CustomActionType) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CustomActionType +func (tr *CustomActionType) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CustomActionType +func (tr *CustomActionType) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CustomActionType +func (tr *CustomActionType) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CustomActionType +func (tr *CustomActionType) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CustomActionType using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CustomActionType) LateInitialize(attrs []byte) (bool, error) { + params := &CustomActionTypeParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CustomActionType) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/codepipeline/v1beta2/zz_customactiontype_types.go b/apis/codepipeline/v1beta2/zz_customactiontype_types.go new file mode 100755 index 0000000000..77411a7835 --- /dev/null +++ b/apis/codepipeline/v1beta2/zz_customactiontype_types.go @@ -0,0 +1,375 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConfigurationPropertyInitParameters struct { + + // The description of the action configuration property. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether the configuration property is a key. + Key *bool `json:"key,omitempty" tf:"key,omitempty"` + + // The name of the action configuration property. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Indicates that the property will be used in conjunction with PollForJobs. + Queryable *bool `json:"queryable,omitempty" tf:"queryable,omitempty"` + + // Whether the configuration property is a required value. + Required *bool `json:"required,omitempty" tf:"required,omitempty"` + + // Whether the configuration property is secret. + Secret *bool `json:"secret,omitempty" tf:"secret,omitempty"` + + // The type of the configuration property. Valid values: String, Number, Boolean + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ConfigurationPropertyObservation struct { + + // The description of the action configuration property. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether the configuration property is a key. + Key *bool `json:"key,omitempty" tf:"key,omitempty"` + + // The name of the action configuration property. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Indicates that the property will be used in conjunction with PollForJobs. + Queryable *bool `json:"queryable,omitempty" tf:"queryable,omitempty"` + + // Whether the configuration property is a required value. + Required *bool `json:"required,omitempty" tf:"required,omitempty"` + + // Whether the configuration property is secret. + Secret *bool `json:"secret,omitempty" tf:"secret,omitempty"` + + // The type of the configuration property. Valid values: String, Number, Boolean + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ConfigurationPropertyParameters struct { + + // The description of the action configuration property. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether the configuration property is a key. + // +kubebuilder:validation:Optional + Key *bool `json:"key" tf:"key,omitempty"` + + // The name of the action configuration property. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Indicates that the property will be used in conjunction with PollForJobs. + // +kubebuilder:validation:Optional + Queryable *bool `json:"queryable,omitempty" tf:"queryable,omitempty"` + + // Whether the configuration property is a required value. + // +kubebuilder:validation:Optional + Required *bool `json:"required" tf:"required,omitempty"` + + // Whether the configuration property is secret. + // +kubebuilder:validation:Optional + Secret *bool `json:"secret" tf:"secret,omitempty"` + + // The type of the configuration property. Valid values: String, Number, Boolean + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type CustomActionTypeInitParameters struct { + + // The category of the custom action. Valid values: Source, Build, Deploy, Test, Invoke, Approval + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // The configuration properties for the custom action. Max 10 items. + ConfigurationProperty []ConfigurationPropertyInitParameters `json:"configurationProperty,omitempty" tf:"configuration_property,omitempty"` + + // The details of the input artifact for the action. + InputArtifactDetails *InputArtifactDetailsInitParameters `json:"inputArtifactDetails,omitempty" tf:"input_artifact_details,omitempty"` + + // The details of the output artifact of the action. + OutputArtifactDetails *OutputArtifactDetailsInitParameters `json:"outputArtifactDetails,omitempty" tf:"output_artifact_details,omitempty"` + + // The provider of the service used in the custom action + ProviderName *string `json:"providerName,omitempty" tf:"provider_name,omitempty"` + + // The settings for an action type. + Settings *SettingsInitParameters `json:"settings,omitempty" tf:"settings,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The version identifier of the custom action. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type CustomActionTypeObservation struct { + + // The action ARN. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The category of the custom action. Valid values: Source, Build, Deploy, Test, Invoke, Approval + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // The configuration properties for the custom action. Max 10 items. + ConfigurationProperty []ConfigurationPropertyObservation `json:"configurationProperty,omitempty" tf:"configuration_property,omitempty"` + + // Composed of category, provider and version + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The details of the input artifact for the action. + InputArtifactDetails *InputArtifactDetailsObservation `json:"inputArtifactDetails,omitempty" tf:"input_artifact_details,omitempty"` + + // The details of the output artifact of the action. + OutputArtifactDetails *OutputArtifactDetailsObservation `json:"outputArtifactDetails,omitempty" tf:"output_artifact_details,omitempty"` + + // The creator of the action being called. + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + // The provider of the service used in the custom action + ProviderName *string `json:"providerName,omitempty" tf:"provider_name,omitempty"` + + // The settings for an action type. + Settings *SettingsObservation `json:"settings,omitempty" tf:"settings,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The version identifier of the custom action. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type CustomActionTypeParameters struct { + + // The category of the custom action. Valid values: Source, Build, Deploy, Test, Invoke, Approval + // +kubebuilder:validation:Optional + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // The configuration properties for the custom action. Max 10 items. + // +kubebuilder:validation:Optional + ConfigurationProperty []ConfigurationPropertyParameters `json:"configurationProperty,omitempty" tf:"configuration_property,omitempty"` + + // The details of the input artifact for the action. + // +kubebuilder:validation:Optional + InputArtifactDetails *InputArtifactDetailsParameters `json:"inputArtifactDetails,omitempty" tf:"input_artifact_details,omitempty"` + + // The details of the output artifact of the action. + // +kubebuilder:validation:Optional + OutputArtifactDetails *OutputArtifactDetailsParameters `json:"outputArtifactDetails,omitempty" tf:"output_artifact_details,omitempty"` + + // The provider of the service used in the custom action + // +kubebuilder:validation:Optional + ProviderName *string `json:"providerName,omitempty" tf:"provider_name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The settings for an action type. + // +kubebuilder:validation:Optional + Settings *SettingsParameters `json:"settings,omitempty" tf:"settings,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The version identifier of the custom action. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type InputArtifactDetailsInitParameters struct { + + // The maximum number of artifacts allowed for the action type. Min: 0, Max: 5 + MaximumCount *float64 `json:"maximumCount,omitempty" tf:"maximum_count,omitempty"` + + // The minimum number of artifacts allowed for the action type. Min: 0, Max: 5 + MinimumCount *float64 `json:"minimumCount,omitempty" tf:"minimum_count,omitempty"` +} + +type InputArtifactDetailsObservation struct { + + // The maximum number of artifacts allowed for the action type. Min: 0, Max: 5 + MaximumCount *float64 `json:"maximumCount,omitempty" tf:"maximum_count,omitempty"` + + // The minimum number of artifacts allowed for the action type. Min: 0, Max: 5 + MinimumCount *float64 `json:"minimumCount,omitempty" tf:"minimum_count,omitempty"` +} + +type InputArtifactDetailsParameters struct { + + // The maximum number of artifacts allowed for the action type. Min: 0, Max: 5 + // +kubebuilder:validation:Optional + MaximumCount *float64 `json:"maximumCount" tf:"maximum_count,omitempty"` + + // The minimum number of artifacts allowed for the action type. Min: 0, Max: 5 + // +kubebuilder:validation:Optional + MinimumCount *float64 `json:"minimumCount" tf:"minimum_count,omitempty"` +} + +type OutputArtifactDetailsInitParameters struct { + + // The maximum number of artifacts allowed for the action type. Min: 0, Max: 5 + MaximumCount *float64 `json:"maximumCount,omitempty" tf:"maximum_count,omitempty"` + + // The minimum number of artifacts allowed for the action type. Min: 0, Max: 5 + MinimumCount *float64 `json:"minimumCount,omitempty" tf:"minimum_count,omitempty"` +} + +type OutputArtifactDetailsObservation struct { + + // The maximum number of artifacts allowed for the action type. Min: 0, Max: 5 + MaximumCount *float64 `json:"maximumCount,omitempty" tf:"maximum_count,omitempty"` + + // The minimum number of artifacts allowed for the action type. Min: 0, Max: 5 + MinimumCount *float64 `json:"minimumCount,omitempty" tf:"minimum_count,omitempty"` +} + +type OutputArtifactDetailsParameters struct { + + // The maximum number of artifacts allowed for the action type. Min: 0, Max: 5 + // +kubebuilder:validation:Optional + MaximumCount *float64 `json:"maximumCount" tf:"maximum_count,omitempty"` + + // The minimum number of artifacts allowed for the action type. Min: 0, Max: 5 + // +kubebuilder:validation:Optional + MinimumCount *float64 `json:"minimumCount" tf:"minimum_count,omitempty"` +} + +type SettingsInitParameters struct { + + // The URL returned to the AWS CodePipeline console that provides a deep link to the resources of the external system. + EntityURLTemplate *string `json:"entityUrlTemplate,omitempty" tf:"entity_url_template,omitempty"` + + // The URL returned to the AWS CodePipeline console that contains a link to the top-level landing page for the external system. + ExecutionURLTemplate *string `json:"executionUrlTemplate,omitempty" tf:"execution_url_template,omitempty"` + + // The URL returned to the AWS CodePipeline console that contains a link to the page where customers can update or change the configuration of the external action. + RevisionURLTemplate *string `json:"revisionUrlTemplate,omitempty" tf:"revision_url_template,omitempty"` + + // The URL of a sign-up page where users can sign up for an external service and perform initial configuration of the action provided by that service. + ThirdPartyConfigurationURL *string `json:"thirdPartyConfigurationUrl,omitempty" tf:"third_party_configuration_url,omitempty"` +} + +type SettingsObservation struct { + + // The URL returned to the AWS CodePipeline console that provides a deep link to the resources of the external system. + EntityURLTemplate *string `json:"entityUrlTemplate,omitempty" tf:"entity_url_template,omitempty"` + + // The URL returned to the AWS CodePipeline console that contains a link to the top-level landing page for the external system. + ExecutionURLTemplate *string `json:"executionUrlTemplate,omitempty" tf:"execution_url_template,omitempty"` + + // The URL returned to the AWS CodePipeline console that contains a link to the page where customers can update or change the configuration of the external action. + RevisionURLTemplate *string `json:"revisionUrlTemplate,omitempty" tf:"revision_url_template,omitempty"` + + // The URL of a sign-up page where users can sign up for an external service and perform initial configuration of the action provided by that service. + ThirdPartyConfigurationURL *string `json:"thirdPartyConfigurationUrl,omitempty" tf:"third_party_configuration_url,omitempty"` +} + +type SettingsParameters struct { + + // The URL returned to the AWS CodePipeline console that provides a deep link to the resources of the external system. + // +kubebuilder:validation:Optional + EntityURLTemplate *string `json:"entityUrlTemplate,omitempty" tf:"entity_url_template,omitempty"` + + // The URL returned to the AWS CodePipeline console that contains a link to the top-level landing page for the external system. + // +kubebuilder:validation:Optional + ExecutionURLTemplate *string `json:"executionUrlTemplate,omitempty" tf:"execution_url_template,omitempty"` + + // The URL returned to the AWS CodePipeline console that contains a link to the page where customers can update or change the configuration of the external action. + // +kubebuilder:validation:Optional + RevisionURLTemplate *string `json:"revisionUrlTemplate,omitempty" tf:"revision_url_template,omitempty"` + + // The URL of a sign-up page where users can sign up for an external service and perform initial configuration of the action provided by that service. + // +kubebuilder:validation:Optional + ThirdPartyConfigurationURL *string `json:"thirdPartyConfigurationUrl,omitempty" tf:"third_party_configuration_url,omitempty"` +} + +// CustomActionTypeSpec defines the desired state of CustomActionType +type CustomActionTypeSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CustomActionTypeParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CustomActionTypeInitParameters `json:"initProvider,omitempty"` +} + +// CustomActionTypeStatus defines the observed state of CustomActionType. +type CustomActionTypeStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CustomActionTypeObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CustomActionType is the Schema for the CustomActionTypes API. Provides a CodePipeline CustomActionType. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type CustomActionType struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.category) || (has(self.initProvider) && has(self.initProvider.category))",message="spec.forProvider.category is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.inputArtifactDetails) || (has(self.initProvider) && has(self.initProvider.inputArtifactDetails))",message="spec.forProvider.inputArtifactDetails is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.outputArtifactDetails) || (has(self.initProvider) && has(self.initProvider.outputArtifactDetails))",message="spec.forProvider.outputArtifactDetails is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.providerName) || (has(self.initProvider) && has(self.initProvider.providerName))",message="spec.forProvider.providerName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.version) || (has(self.initProvider) && has(self.initProvider.version))",message="spec.forProvider.version is a required parameter" + Spec CustomActionTypeSpec `json:"spec"` + Status CustomActionTypeStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CustomActionTypeList contains a list of CustomActionTypes +type CustomActionTypeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CustomActionType `json:"items"` +} + +// Repository type metadata. +var ( + CustomActionType_Kind = "CustomActionType" + CustomActionType_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CustomActionType_Kind}.String() + CustomActionType_KindAPIVersion = CustomActionType_Kind + "." + CRDGroupVersion.String() + CustomActionType_GroupVersionKind = CRDGroupVersion.WithKind(CustomActionType_Kind) +) + +func init() { + SchemeBuilder.Register(&CustomActionType{}, &CustomActionTypeList{}) +} diff --git a/apis/codepipeline/v1beta2/zz_generated.conversion_hubs.go b/apis/codepipeline/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..a7cf9796ab --- /dev/null +++ b/apis/codepipeline/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Codepipeline) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *CustomActionType) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Webhook) Hub() {} diff --git a/apis/codepipeline/v1beta2/zz_generated.deepcopy.go b/apis/codepipeline/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..a1fc1d1399 --- /dev/null +++ b/apis/codepipeline/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,3174 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionInitParameters) DeepCopyInto(out *ActionInitParameters) { + *out = *in + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.InputArtifacts != nil { + in, out := &in.InputArtifacts, &out.InputArtifacts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.OutputArtifacts != nil { + in, out := &in.OutputArtifacts, &out.OutputArtifacts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.Provider != nil { + in, out := &in.Provider, &out.Provider + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RunOrder != nil { + in, out := &in.RunOrder, &out.RunOrder + *out = new(float64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionInitParameters. +func (in *ActionInitParameters) DeepCopy() *ActionInitParameters { + if in == nil { + return nil + } + out := new(ActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionObservation) DeepCopyInto(out *ActionObservation) { + *out = *in + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.InputArtifacts != nil { + in, out := &in.InputArtifacts, &out.InputArtifacts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.OutputArtifacts != nil { + in, out := &in.OutputArtifacts, &out.OutputArtifacts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.Provider != nil { + in, out := &in.Provider, &out.Provider + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RunOrder != nil { + in, out := &in.RunOrder, &out.RunOrder + *out = new(float64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionObservation. +func (in *ActionObservation) DeepCopy() *ActionObservation { + if in == nil { + return nil + } + out := new(ActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionParameters) DeepCopyInto(out *ActionParameters) { + *out = *in + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.InputArtifacts != nil { + in, out := &in.InputArtifacts, &out.InputArtifacts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.OutputArtifacts != nil { + in, out := &in.OutputArtifacts, &out.OutputArtifacts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.Provider != nil { + in, out := &in.Provider, &out.Provider + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RunOrder != nil { + in, out := &in.RunOrder, &out.RunOrder + *out = new(float64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionParameters. +func (in *ActionParameters) DeepCopy() *ActionParameters { + if in == nil { + return nil + } + out := new(ActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactStoreInitParameters) DeepCopyInto(out *ArtifactStoreInitParameters) { + *out = *in + if in.EncryptionKey != nil { + in, out := &in.EncryptionKey, &out.EncryptionKey + *out = new(EncryptionKeyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LocationRef != nil { + in, out := &in.LocationRef, &out.LocationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LocationSelector != nil { + in, out := &in.LocationSelector, &out.LocationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactStoreInitParameters. +func (in *ArtifactStoreInitParameters) DeepCopy() *ArtifactStoreInitParameters { + if in == nil { + return nil + } + out := new(ArtifactStoreInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactStoreObservation) DeepCopyInto(out *ArtifactStoreObservation) { + *out = *in + if in.EncryptionKey != nil { + in, out := &in.EncryptionKey, &out.EncryptionKey + *out = new(EncryptionKeyObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactStoreObservation. +func (in *ArtifactStoreObservation) DeepCopy() *ArtifactStoreObservation { + if in == nil { + return nil + } + out := new(ArtifactStoreObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactStoreParameters) DeepCopyInto(out *ArtifactStoreParameters) { + *out = *in + if in.EncryptionKey != nil { + in, out := &in.EncryptionKey, &out.EncryptionKey + *out = new(EncryptionKeyParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LocationRef != nil { + in, out := &in.LocationRef, &out.LocationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LocationSelector != nil { + in, out := &in.LocationSelector, &out.LocationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactStoreParameters. +func (in *ArtifactStoreParameters) DeepCopy() *ArtifactStoreParameters { + if in == nil { + return nil + } + out := new(ArtifactStoreParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfigurationInitParameters) DeepCopyInto(out *AuthenticationConfigurationInitParameters) { + *out = *in + if in.AllowedIPRange != nil { + in, out := &in.AllowedIPRange, &out.AllowedIPRange + *out = new(string) + **out = **in + } + if in.SecretTokenSecretRef != nil { + in, out := &in.SecretTokenSecretRef, &out.SecretTokenSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfigurationInitParameters. +func (in *AuthenticationConfigurationInitParameters) DeepCopy() *AuthenticationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AuthenticationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfigurationObservation) DeepCopyInto(out *AuthenticationConfigurationObservation) { + *out = *in + if in.AllowedIPRange != nil { + in, out := &in.AllowedIPRange, &out.AllowedIPRange + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfigurationObservation. +func (in *AuthenticationConfigurationObservation) DeepCopy() *AuthenticationConfigurationObservation { + if in == nil { + return nil + } + out := new(AuthenticationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfigurationParameters) DeepCopyInto(out *AuthenticationConfigurationParameters) { + *out = *in + if in.AllowedIPRange != nil { + in, out := &in.AllowedIPRange, &out.AllowedIPRange + *out = new(string) + **out = **in + } + if in.SecretTokenSecretRef != nil { + in, out := &in.SecretTokenSecretRef, &out.SecretTokenSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfigurationParameters. +func (in *AuthenticationConfigurationParameters) DeepCopy() *AuthenticationConfigurationParameters { + if in == nil { + return nil + } + out := new(AuthenticationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BranchesInitParameters) DeepCopyInto(out *BranchesInitParameters) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BranchesInitParameters. +func (in *BranchesInitParameters) DeepCopy() *BranchesInitParameters { + if in == nil { + return nil + } + out := new(BranchesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BranchesObservation) DeepCopyInto(out *BranchesObservation) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BranchesObservation. +func (in *BranchesObservation) DeepCopy() *BranchesObservation { + if in == nil { + return nil + } + out := new(BranchesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BranchesParameters) DeepCopyInto(out *BranchesParameters) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BranchesParameters. +func (in *BranchesParameters) DeepCopy() *BranchesParameters { + if in == nil { + return nil + } + out := new(BranchesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Codepipeline) DeepCopyInto(out *Codepipeline) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Codepipeline. +func (in *Codepipeline) DeepCopy() *Codepipeline { + if in == nil { + return nil + } + out := new(Codepipeline) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Codepipeline) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodepipelineInitParameters) DeepCopyInto(out *CodepipelineInitParameters) { + *out = *in + if in.ArtifactStore != nil { + in, out := &in.ArtifactStore, &out.ArtifactStore + *out = make([]ArtifactStoreInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExecutionMode != nil { + in, out := &in.ExecutionMode, &out.ExecutionMode + *out = new(string) + **out = **in + } + if in.PipelineType != nil { + in, out := &in.PipelineType, &out.PipelineType + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Stage != nil { + in, out := &in.Stage, &out.Stage + *out = make([]StageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = make([]TriggerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Variable != nil { + in, out := &in.Variable, &out.Variable + *out = make([]VariableInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodepipelineInitParameters. +func (in *CodepipelineInitParameters) DeepCopy() *CodepipelineInitParameters { + if in == nil { + return nil + } + out := new(CodepipelineInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodepipelineList) DeepCopyInto(out *CodepipelineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Codepipeline, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodepipelineList. +func (in *CodepipelineList) DeepCopy() *CodepipelineList { + if in == nil { + return nil + } + out := new(CodepipelineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CodepipelineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodepipelineObservation) DeepCopyInto(out *CodepipelineObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArtifactStore != nil { + in, out := &in.ArtifactStore, &out.ArtifactStore + *out = make([]ArtifactStoreObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExecutionMode != nil { + in, out := &in.ExecutionMode, &out.ExecutionMode + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PipelineType != nil { + in, out := &in.PipelineType, &out.PipelineType + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Stage != nil { + in, out := &in.Stage, &out.Stage + *out = make([]StageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = make([]TriggerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Variable != nil { + in, out := &in.Variable, &out.Variable + *out = make([]VariableObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodepipelineObservation. +func (in *CodepipelineObservation) DeepCopy() *CodepipelineObservation { + if in == nil { + return nil + } + out := new(CodepipelineObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodepipelineParameters) DeepCopyInto(out *CodepipelineParameters) { + *out = *in + if in.ArtifactStore != nil { + in, out := &in.ArtifactStore, &out.ArtifactStore + *out = make([]ArtifactStoreParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExecutionMode != nil { + in, out := &in.ExecutionMode, &out.ExecutionMode + *out = new(string) + **out = **in + } + if in.PipelineType != nil { + in, out := &in.PipelineType, &out.PipelineType + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Stage != nil { + in, out := &in.Stage, &out.Stage + *out = make([]StageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = make([]TriggerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Variable != nil { + in, out := &in.Variable, &out.Variable + *out = make([]VariableParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodepipelineParameters. +func (in *CodepipelineParameters) DeepCopy() *CodepipelineParameters { + if in == nil { + return nil + } + out := new(CodepipelineParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodepipelineSpec) DeepCopyInto(out *CodepipelineSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodepipelineSpec. +func (in *CodepipelineSpec) DeepCopy() *CodepipelineSpec { + if in == nil { + return nil + } + out := new(CodepipelineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodepipelineStatus) DeepCopyInto(out *CodepipelineStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodepipelineStatus. +func (in *CodepipelineStatus) DeepCopy() *CodepipelineStatus { + if in == nil { + return nil + } + out := new(CodepipelineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationPropertyInitParameters) DeepCopyInto(out *ConfigurationPropertyInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Queryable != nil { + in, out := &in.Queryable, &out.Queryable + *out = new(bool) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationPropertyInitParameters. +func (in *ConfigurationPropertyInitParameters) DeepCopy() *ConfigurationPropertyInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationPropertyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationPropertyObservation) DeepCopyInto(out *ConfigurationPropertyObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Queryable != nil { + in, out := &in.Queryable, &out.Queryable + *out = new(bool) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationPropertyObservation. +func (in *ConfigurationPropertyObservation) DeepCopy() *ConfigurationPropertyObservation { + if in == nil { + return nil + } + out := new(ConfigurationPropertyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationPropertyParameters) DeepCopyInto(out *ConfigurationPropertyParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Queryable != nil { + in, out := &in.Queryable, &out.Queryable + *out = new(bool) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationPropertyParameters. +func (in *ConfigurationPropertyParameters) DeepCopy() *ConfigurationPropertyParameters { + if in == nil { + return nil + } + out := new(ConfigurationPropertyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomActionType) DeepCopyInto(out *CustomActionType) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomActionType. +func (in *CustomActionType) DeepCopy() *CustomActionType { + if in == nil { + return nil + } + out := new(CustomActionType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomActionType) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomActionTypeInitParameters) DeepCopyInto(out *CustomActionTypeInitParameters) { + *out = *in + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.ConfigurationProperty != nil { + in, out := &in.ConfigurationProperty, &out.ConfigurationProperty + *out = make([]ConfigurationPropertyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputArtifactDetails != nil { + in, out := &in.InputArtifactDetails, &out.InputArtifactDetails + *out = new(InputArtifactDetailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputArtifactDetails != nil { + in, out := &in.OutputArtifactDetails, &out.OutputArtifactDetails + *out = new(OutputArtifactDetailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ProviderName != nil { + in, out := &in.ProviderName, &out.ProviderName + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(SettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomActionTypeInitParameters. +func (in *CustomActionTypeInitParameters) DeepCopy() *CustomActionTypeInitParameters { + if in == nil { + return nil + } + out := new(CustomActionTypeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomActionTypeList) DeepCopyInto(out *CustomActionTypeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomActionType, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomActionTypeList. +func (in *CustomActionTypeList) DeepCopy() *CustomActionTypeList { + if in == nil { + return nil + } + out := new(CustomActionTypeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomActionTypeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomActionTypeObservation) DeepCopyInto(out *CustomActionTypeObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.ConfigurationProperty != nil { + in, out := &in.ConfigurationProperty, &out.ConfigurationProperty + *out = make([]ConfigurationPropertyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InputArtifactDetails != nil { + in, out := &in.InputArtifactDetails, &out.InputArtifactDetails + *out = new(InputArtifactDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.OutputArtifactDetails != nil { + in, out := &in.OutputArtifactDetails, &out.OutputArtifactDetails + *out = new(OutputArtifactDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.ProviderName != nil { + in, out := &in.ProviderName, &out.ProviderName + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(SettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomActionTypeObservation. +func (in *CustomActionTypeObservation) DeepCopy() *CustomActionTypeObservation { + if in == nil { + return nil + } + out := new(CustomActionTypeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomActionTypeParameters) DeepCopyInto(out *CustomActionTypeParameters) { + *out = *in + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.ConfigurationProperty != nil { + in, out := &in.ConfigurationProperty, &out.ConfigurationProperty + *out = make([]ConfigurationPropertyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputArtifactDetails != nil { + in, out := &in.InputArtifactDetails, &out.InputArtifactDetails + *out = new(InputArtifactDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputArtifactDetails != nil { + in, out := &in.OutputArtifactDetails, &out.OutputArtifactDetails + *out = new(OutputArtifactDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.ProviderName != nil { + in, out := &in.ProviderName, &out.ProviderName + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(SettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomActionTypeParameters. +func (in *CustomActionTypeParameters) DeepCopy() *CustomActionTypeParameters { + if in == nil { + return nil + } + out := new(CustomActionTypeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomActionTypeSpec) DeepCopyInto(out *CustomActionTypeSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomActionTypeSpec. +func (in *CustomActionTypeSpec) DeepCopy() *CustomActionTypeSpec { + if in == nil { + return nil + } + out := new(CustomActionTypeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomActionTypeStatus) DeepCopyInto(out *CustomActionTypeStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomActionTypeStatus. +func (in *CustomActionTypeStatus) DeepCopy() *CustomActionTypeStatus { + if in == nil { + return nil + } + out := new(CustomActionTypeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionKeyInitParameters) DeepCopyInto(out *EncryptionKeyInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionKeyInitParameters. +func (in *EncryptionKeyInitParameters) DeepCopy() *EncryptionKeyInitParameters { + if in == nil { + return nil + } + out := new(EncryptionKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionKeyObservation) DeepCopyInto(out *EncryptionKeyObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionKeyObservation. +func (in *EncryptionKeyObservation) DeepCopy() *EncryptionKeyObservation { + if in == nil { + return nil + } + out := new(EncryptionKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionKeyParameters) DeepCopyInto(out *EncryptionKeyParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionKeyParameters. +func (in *EncryptionKeyParameters) DeepCopy() *EncryptionKeyParameters { + if in == nil { + return nil + } + out := new(EncryptionKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilePathsInitParameters) DeepCopyInto(out *FilePathsInitParameters) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilePathsInitParameters. +func (in *FilePathsInitParameters) DeepCopy() *FilePathsInitParameters { + if in == nil { + return nil + } + out := new(FilePathsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilePathsObservation) DeepCopyInto(out *FilePathsObservation) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilePathsObservation. +func (in *FilePathsObservation) DeepCopy() *FilePathsObservation { + if in == nil { + return nil + } + out := new(FilePathsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilePathsParameters) DeepCopyInto(out *FilePathsParameters) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilePathsParameters. +func (in *FilePathsParameters) DeepCopy() *FilePathsParameters { + if in == nil { + return nil + } + out := new(FilePathsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterInitParameters) DeepCopyInto(out *FilterInitParameters) { + *out = *in + if in.JSONPath != nil { + in, out := &in.JSONPath, &out.JSONPath + *out = new(string) + **out = **in + } + if in.MatchEquals != nil { + in, out := &in.MatchEquals, &out.MatchEquals + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterInitParameters. +func (in *FilterInitParameters) DeepCopy() *FilterInitParameters { + if in == nil { + return nil + } + out := new(FilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterObservation) DeepCopyInto(out *FilterObservation) { + *out = *in + if in.JSONPath != nil { + in, out := &in.JSONPath, &out.JSONPath + *out = new(string) + **out = **in + } + if in.MatchEquals != nil { + in, out := &in.MatchEquals, &out.MatchEquals + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterObservation. +func (in *FilterObservation) DeepCopy() *FilterObservation { + if in == nil { + return nil + } + out := new(FilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterParameters) DeepCopyInto(out *FilterParameters) { + *out = *in + if in.JSONPath != nil { + in, out := &in.JSONPath, &out.JSONPath + *out = new(string) + **out = **in + } + if in.MatchEquals != nil { + in, out := &in.MatchEquals, &out.MatchEquals + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterParameters. +func (in *FilterParameters) DeepCopy() *FilterParameters { + if in == nil { + return nil + } + out := new(FilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitConfigurationInitParameters) DeepCopyInto(out *GitConfigurationInitParameters) { + *out = *in + if in.PullRequest != nil { + in, out := &in.PullRequest, &out.PullRequest + *out = make([]PullRequestInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Push != nil { + in, out := &in.Push, &out.Push + *out = make([]PushInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceActionName != nil { + in, out := &in.SourceActionName, &out.SourceActionName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitConfigurationInitParameters. +func (in *GitConfigurationInitParameters) DeepCopy() *GitConfigurationInitParameters { + if in == nil { + return nil + } + out := new(GitConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitConfigurationObservation) DeepCopyInto(out *GitConfigurationObservation) { + *out = *in + if in.PullRequest != nil { + in, out := &in.PullRequest, &out.PullRequest + *out = make([]PullRequestObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Push != nil { + in, out := &in.Push, &out.Push + *out = make([]PushObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceActionName != nil { + in, out := &in.SourceActionName, &out.SourceActionName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitConfigurationObservation. +func (in *GitConfigurationObservation) DeepCopy() *GitConfigurationObservation { + if in == nil { + return nil + } + out := new(GitConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitConfigurationParameters) DeepCopyInto(out *GitConfigurationParameters) { + *out = *in + if in.PullRequest != nil { + in, out := &in.PullRequest, &out.PullRequest + *out = make([]PullRequestParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Push != nil { + in, out := &in.Push, &out.Push + *out = make([]PushParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceActionName != nil { + in, out := &in.SourceActionName, &out.SourceActionName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitConfigurationParameters. +func (in *GitConfigurationParameters) DeepCopy() *GitConfigurationParameters { + if in == nil { + return nil + } + out := new(GitConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputArtifactDetailsInitParameters) DeepCopyInto(out *InputArtifactDetailsInitParameters) { + *out = *in + if in.MaximumCount != nil { + in, out := &in.MaximumCount, &out.MaximumCount + *out = new(float64) + **out = **in + } + if in.MinimumCount != nil { + in, out := &in.MinimumCount, &out.MinimumCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputArtifactDetailsInitParameters. +func (in *InputArtifactDetailsInitParameters) DeepCopy() *InputArtifactDetailsInitParameters { + if in == nil { + return nil + } + out := new(InputArtifactDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputArtifactDetailsObservation) DeepCopyInto(out *InputArtifactDetailsObservation) { + *out = *in + if in.MaximumCount != nil { + in, out := &in.MaximumCount, &out.MaximumCount + *out = new(float64) + **out = **in + } + if in.MinimumCount != nil { + in, out := &in.MinimumCount, &out.MinimumCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputArtifactDetailsObservation. +func (in *InputArtifactDetailsObservation) DeepCopy() *InputArtifactDetailsObservation { + if in == nil { + return nil + } + out := new(InputArtifactDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputArtifactDetailsParameters) DeepCopyInto(out *InputArtifactDetailsParameters) { + *out = *in + if in.MaximumCount != nil { + in, out := &in.MaximumCount, &out.MaximumCount + *out = new(float64) + **out = **in + } + if in.MinimumCount != nil { + in, out := &in.MinimumCount, &out.MinimumCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputArtifactDetailsParameters. +func (in *InputArtifactDetailsParameters) DeepCopy() *InputArtifactDetailsParameters { + if in == nil { + return nil + } + out := new(InputArtifactDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputArtifactDetailsInitParameters) DeepCopyInto(out *OutputArtifactDetailsInitParameters) { + *out = *in + if in.MaximumCount != nil { + in, out := &in.MaximumCount, &out.MaximumCount + *out = new(float64) + **out = **in + } + if in.MinimumCount != nil { + in, out := &in.MinimumCount, &out.MinimumCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputArtifactDetailsInitParameters. +func (in *OutputArtifactDetailsInitParameters) DeepCopy() *OutputArtifactDetailsInitParameters { + if in == nil { + return nil + } + out := new(OutputArtifactDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputArtifactDetailsObservation) DeepCopyInto(out *OutputArtifactDetailsObservation) { + *out = *in + if in.MaximumCount != nil { + in, out := &in.MaximumCount, &out.MaximumCount + *out = new(float64) + **out = **in + } + if in.MinimumCount != nil { + in, out := &in.MinimumCount, &out.MinimumCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputArtifactDetailsObservation. +func (in *OutputArtifactDetailsObservation) DeepCopy() *OutputArtifactDetailsObservation { + if in == nil { + return nil + } + out := new(OutputArtifactDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputArtifactDetailsParameters) DeepCopyInto(out *OutputArtifactDetailsParameters) { + *out = *in + if in.MaximumCount != nil { + in, out := &in.MaximumCount, &out.MaximumCount + *out = new(float64) + **out = **in + } + if in.MinimumCount != nil { + in, out := &in.MinimumCount, &out.MinimumCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputArtifactDetailsParameters. +func (in *OutputArtifactDetailsParameters) DeepCopy() *OutputArtifactDetailsParameters { + if in == nil { + return nil + } + out := new(OutputArtifactDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PullRequestInitParameters) DeepCopyInto(out *PullRequestInitParameters) { + *out = *in + if in.Branches != nil { + in, out := &in.Branches, &out.Branches + *out = new(BranchesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FilePaths != nil { + in, out := &in.FilePaths, &out.FilePaths + *out = new(FilePathsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PullRequestInitParameters. +func (in *PullRequestInitParameters) DeepCopy() *PullRequestInitParameters { + if in == nil { + return nil + } + out := new(PullRequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PullRequestObservation) DeepCopyInto(out *PullRequestObservation) { + *out = *in + if in.Branches != nil { + in, out := &in.Branches, &out.Branches + *out = new(BranchesObservation) + (*in).DeepCopyInto(*out) + } + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FilePaths != nil { + in, out := &in.FilePaths, &out.FilePaths + *out = new(FilePathsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PullRequestObservation. +func (in *PullRequestObservation) DeepCopy() *PullRequestObservation { + if in == nil { + return nil + } + out := new(PullRequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PullRequestParameters) DeepCopyInto(out *PullRequestParameters) { + *out = *in + if in.Branches != nil { + in, out := &in.Branches, &out.Branches + *out = new(BranchesParameters) + (*in).DeepCopyInto(*out) + } + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FilePaths != nil { + in, out := &in.FilePaths, &out.FilePaths + *out = new(FilePathsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PullRequestParameters. +func (in *PullRequestParameters) DeepCopy() *PullRequestParameters { + if in == nil { + return nil + } + out := new(PullRequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PushBranchesInitParameters) DeepCopyInto(out *PushBranchesInitParameters) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PushBranchesInitParameters. +func (in *PushBranchesInitParameters) DeepCopy() *PushBranchesInitParameters { + if in == nil { + return nil + } + out := new(PushBranchesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PushBranchesObservation) DeepCopyInto(out *PushBranchesObservation) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PushBranchesObservation. +func (in *PushBranchesObservation) DeepCopy() *PushBranchesObservation { + if in == nil { + return nil + } + out := new(PushBranchesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PushBranchesParameters) DeepCopyInto(out *PushBranchesParameters) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PushBranchesParameters. +func (in *PushBranchesParameters) DeepCopy() *PushBranchesParameters { + if in == nil { + return nil + } + out := new(PushBranchesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PushFilePathsInitParameters) DeepCopyInto(out *PushFilePathsInitParameters) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PushFilePathsInitParameters. +func (in *PushFilePathsInitParameters) DeepCopy() *PushFilePathsInitParameters { + if in == nil { + return nil + } + out := new(PushFilePathsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PushFilePathsObservation) DeepCopyInto(out *PushFilePathsObservation) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PushFilePathsObservation. +func (in *PushFilePathsObservation) DeepCopy() *PushFilePathsObservation { + if in == nil { + return nil + } + out := new(PushFilePathsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PushFilePathsParameters) DeepCopyInto(out *PushFilePathsParameters) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PushFilePathsParameters. +func (in *PushFilePathsParameters) DeepCopy() *PushFilePathsParameters { + if in == nil { + return nil + } + out := new(PushFilePathsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PushInitParameters) DeepCopyInto(out *PushInitParameters) { + *out = *in + if in.Branches != nil { + in, out := &in.Branches, &out.Branches + *out = new(PushBranchesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FilePaths != nil { + in, out := &in.FilePaths, &out.FilePaths + *out = new(PushFilePathsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = new(TagsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PushInitParameters. +func (in *PushInitParameters) DeepCopy() *PushInitParameters { + if in == nil { + return nil + } + out := new(PushInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PushObservation) DeepCopyInto(out *PushObservation) { + *out = *in + if in.Branches != nil { + in, out := &in.Branches, &out.Branches + *out = new(PushBranchesObservation) + (*in).DeepCopyInto(*out) + } + if in.FilePaths != nil { + in, out := &in.FilePaths, &out.FilePaths + *out = new(PushFilePathsObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = new(TagsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PushObservation. +func (in *PushObservation) DeepCopy() *PushObservation { + if in == nil { + return nil + } + out := new(PushObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PushParameters) DeepCopyInto(out *PushParameters) { + *out = *in + if in.Branches != nil { + in, out := &in.Branches, &out.Branches + *out = new(PushBranchesParameters) + (*in).DeepCopyInto(*out) + } + if in.FilePaths != nil { + in, out := &in.FilePaths, &out.FilePaths + *out = new(PushFilePathsParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = new(TagsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PushParameters. +func (in *PushParameters) DeepCopy() *PushParameters { + if in == nil { + return nil + } + out := new(PushParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsInitParameters) DeepCopyInto(out *SettingsInitParameters) { + *out = *in + if in.EntityURLTemplate != nil { + in, out := &in.EntityURLTemplate, &out.EntityURLTemplate + *out = new(string) + **out = **in + } + if in.ExecutionURLTemplate != nil { + in, out := &in.ExecutionURLTemplate, &out.ExecutionURLTemplate + *out = new(string) + **out = **in + } + if in.RevisionURLTemplate != nil { + in, out := &in.RevisionURLTemplate, &out.RevisionURLTemplate + *out = new(string) + **out = **in + } + if in.ThirdPartyConfigurationURL != nil { + in, out := &in.ThirdPartyConfigurationURL, &out.ThirdPartyConfigurationURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsInitParameters. +func (in *SettingsInitParameters) DeepCopy() *SettingsInitParameters { + if in == nil { + return nil + } + out := new(SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsObservation) DeepCopyInto(out *SettingsObservation) { + *out = *in + if in.EntityURLTemplate != nil { + in, out := &in.EntityURLTemplate, &out.EntityURLTemplate + *out = new(string) + **out = **in + } + if in.ExecutionURLTemplate != nil { + in, out := &in.ExecutionURLTemplate, &out.ExecutionURLTemplate + *out = new(string) + **out = **in + } + if in.RevisionURLTemplate != nil { + in, out := &in.RevisionURLTemplate, &out.RevisionURLTemplate + *out = new(string) + **out = **in + } + if in.ThirdPartyConfigurationURL != nil { + in, out := &in.ThirdPartyConfigurationURL, &out.ThirdPartyConfigurationURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsObservation. +func (in *SettingsObservation) DeepCopy() *SettingsObservation { + if in == nil { + return nil + } + out := new(SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsParameters) DeepCopyInto(out *SettingsParameters) { + *out = *in + if in.EntityURLTemplate != nil { + in, out := &in.EntityURLTemplate, &out.EntityURLTemplate + *out = new(string) + **out = **in + } + if in.ExecutionURLTemplate != nil { + in, out := &in.ExecutionURLTemplate, &out.ExecutionURLTemplate + *out = new(string) + **out = **in + } + if in.RevisionURLTemplate != nil { + in, out := &in.RevisionURLTemplate, &out.RevisionURLTemplate + *out = new(string) + **out = **in + } + if in.ThirdPartyConfigurationURL != nil { + in, out := &in.ThirdPartyConfigurationURL, &out.ThirdPartyConfigurationURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsParameters. +func (in *SettingsParameters) DeepCopy() *SettingsParameters { + if in == nil { + return nil + } + out := new(SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageInitParameters) DeepCopyInto(out *StageInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]ActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageInitParameters. +func (in *StageInitParameters) DeepCopy() *StageInitParameters { + if in == nil { + return nil + } + out := new(StageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageObservation) DeepCopyInto(out *StageObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]ActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageObservation. +func (in *StageObservation) DeepCopy() *StageObservation { + if in == nil { + return nil + } + out := new(StageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageParameters) DeepCopyInto(out *StageParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]ActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageParameters. +func (in *StageParameters) DeepCopy() *StageParameters { + if in == nil { + return nil + } + out := new(StageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagsInitParameters) DeepCopyInto(out *TagsInitParameters) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagsInitParameters. +func (in *TagsInitParameters) DeepCopy() *TagsInitParameters { + if in == nil { + return nil + } + out := new(TagsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagsObservation) DeepCopyInto(out *TagsObservation) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagsObservation. +func (in *TagsObservation) DeepCopy() *TagsObservation { + if in == nil { + return nil + } + out := new(TagsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagsParameters) DeepCopyInto(out *TagsParameters) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagsParameters. +func (in *TagsParameters) DeepCopy() *TagsParameters { + if in == nil { + return nil + } + out := new(TagsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerInitParameters) DeepCopyInto(out *TriggerInitParameters) { + *out = *in + if in.GitConfiguration != nil { + in, out := &in.GitConfiguration, &out.GitConfiguration + *out = new(GitConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ProviderType != nil { + in, out := &in.ProviderType, &out.ProviderType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerInitParameters. +func (in *TriggerInitParameters) DeepCopy() *TriggerInitParameters { + if in == nil { + return nil + } + out := new(TriggerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerObservation) DeepCopyInto(out *TriggerObservation) { + *out = *in + if in.GitConfiguration != nil { + in, out := &in.GitConfiguration, &out.GitConfiguration + *out = new(GitConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ProviderType != nil { + in, out := &in.ProviderType, &out.ProviderType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerObservation. +func (in *TriggerObservation) DeepCopy() *TriggerObservation { + if in == nil { + return nil + } + out := new(TriggerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerParameters) DeepCopyInto(out *TriggerParameters) { + *out = *in + if in.GitConfiguration != nil { + in, out := &in.GitConfiguration, &out.GitConfiguration + *out = new(GitConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ProviderType != nil { + in, out := &in.ProviderType, &out.ProviderType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerParameters. +func (in *TriggerParameters) DeepCopy() *TriggerParameters { + if in == nil { + return nil + } + out := new(TriggerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VariableInitParameters) DeepCopyInto(out *VariableInitParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VariableInitParameters. +func (in *VariableInitParameters) DeepCopy() *VariableInitParameters { + if in == nil { + return nil + } + out := new(VariableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VariableObservation) DeepCopyInto(out *VariableObservation) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VariableObservation. +func (in *VariableObservation) DeepCopy() *VariableObservation { + if in == nil { + return nil + } + out := new(VariableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VariableParameters) DeepCopyInto(out *VariableParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VariableParameters. +func (in *VariableParameters) DeepCopy() *VariableParameters { + if in == nil { + return nil + } + out := new(VariableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Webhook) DeepCopyInto(out *Webhook) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. +func (in *Webhook) DeepCopy() *Webhook { + if in == nil { + return nil + } + out := new(Webhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Webhook) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookInitParameters) DeepCopyInto(out *WebhookInitParameters) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(string) + **out = **in + } + if in.AuthenticationConfiguration != nil { + in, out := &in.AuthenticationConfiguration, &out.AuthenticationConfiguration + *out = new(AuthenticationConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]FilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetAction != nil { + in, out := &in.TargetAction, &out.TargetAction + *out = new(string) + **out = **in + } + if in.TargetPipeline != nil { + in, out := &in.TargetPipeline, &out.TargetPipeline + *out = new(string) + **out = **in + } + if in.TargetPipelineRef != nil { + in, out := &in.TargetPipelineRef, &out.TargetPipelineRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetPipelineSelector != nil { + in, out := &in.TargetPipelineSelector, &out.TargetPipelineSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookInitParameters. +func (in *WebhookInitParameters) DeepCopy() *WebhookInitParameters { + if in == nil { + return nil + } + out := new(WebhookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookList) DeepCopyInto(out *WebhookList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Webhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookList. +func (in *WebhookList) DeepCopy() *WebhookList { + if in == nil { + return nil + } + out := new(WebhookList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebhookList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookObservation) DeepCopyInto(out *WebhookObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(string) + **out = **in + } + if in.AuthenticationConfiguration != nil { + in, out := &in.AuthenticationConfiguration, &out.AuthenticationConfiguration + *out = new(AuthenticationConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]FilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetAction != nil { + in, out := &in.TargetAction, &out.TargetAction + *out = new(string) + **out = **in + } + if in.TargetPipeline != nil { + in, out := &in.TargetPipeline, &out.TargetPipeline + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookObservation. +func (in *WebhookObservation) DeepCopy() *WebhookObservation { + if in == nil { + return nil + } + out := new(WebhookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookParameters) DeepCopyInto(out *WebhookParameters) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(string) + **out = **in + } + if in.AuthenticationConfiguration != nil { + in, out := &in.AuthenticationConfiguration, &out.AuthenticationConfiguration + *out = new(AuthenticationConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]FilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetAction != nil { + in, out := &in.TargetAction, &out.TargetAction + *out = new(string) + **out = **in + } + if in.TargetPipeline != nil { + in, out := &in.TargetPipeline, &out.TargetPipeline + *out = new(string) + **out = **in + } + if in.TargetPipelineRef != nil { + in, out := &in.TargetPipelineRef, &out.TargetPipelineRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetPipelineSelector != nil { + in, out := &in.TargetPipelineSelector, &out.TargetPipelineSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookParameters. +func (in *WebhookParameters) DeepCopy() *WebhookParameters { + if in == nil { + return nil + } + out := new(WebhookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookSpec) DeepCopyInto(out *WebhookSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookSpec. +func (in *WebhookSpec) DeepCopy() *WebhookSpec { + if in == nil { + return nil + } + out := new(WebhookSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookStatus) DeepCopyInto(out *WebhookStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookStatus. +func (in *WebhookStatus) DeepCopy() *WebhookStatus { + if in == nil { + return nil + } + out := new(WebhookStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/codepipeline/v1beta2/zz_generated.managed.go b/apis/codepipeline/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..80a9658ea7 --- /dev/null +++ b/apis/codepipeline/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Codepipeline. +func (mg *Codepipeline) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Codepipeline. +func (mg *Codepipeline) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Codepipeline. +func (mg *Codepipeline) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Codepipeline. +func (mg *Codepipeline) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Codepipeline. +func (mg *Codepipeline) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Codepipeline. +func (mg *Codepipeline) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Codepipeline. +func (mg *Codepipeline) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Codepipeline. +func (mg *Codepipeline) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Codepipeline. +func (mg *Codepipeline) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Codepipeline. +func (mg *Codepipeline) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Codepipeline. +func (mg *Codepipeline) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Codepipeline. +func (mg *Codepipeline) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this CustomActionType. +func (mg *CustomActionType) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CustomActionType. +func (mg *CustomActionType) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CustomActionType. +func (mg *CustomActionType) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CustomActionType. +func (mg *CustomActionType) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CustomActionType. +func (mg *CustomActionType) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CustomActionType. +func (mg *CustomActionType) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CustomActionType. +func (mg *CustomActionType) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CustomActionType. +func (mg *CustomActionType) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CustomActionType. +func (mg *CustomActionType) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CustomActionType. +func (mg *CustomActionType) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CustomActionType. +func (mg *CustomActionType) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CustomActionType. +func (mg *CustomActionType) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Webhook. +func (mg *Webhook) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Webhook. +func (mg *Webhook) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Webhook. +func (mg *Webhook) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Webhook. +func (mg *Webhook) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Webhook. +func (mg *Webhook) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Webhook. +func (mg *Webhook) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Webhook. +func (mg *Webhook) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Webhook. +func (mg *Webhook) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Webhook. +func (mg *Webhook) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Webhook. +func (mg *Webhook) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Webhook. +func (mg *Webhook) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Webhook. +func (mg *Webhook) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/codepipeline/v1beta2/zz_generated.managedlist.go b/apis/codepipeline/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..89a4572181 --- /dev/null +++ b/apis/codepipeline/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this CodepipelineList. +func (l *CodepipelineList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this CustomActionTypeList. +func (l *CustomActionTypeList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WebhookList. +func (l *WebhookList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/codepipeline/v1beta2/zz_generated.resolvers.go b/apis/codepipeline/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..9c176fc48c --- /dev/null +++ b/apis/codepipeline/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,160 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Codepipeline) ResolveReferences( // ResolveReferences of this Codepipeline. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.ArtifactStore); i3++ { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ArtifactStore[i3].Location), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ArtifactStore[i3].LocationRef, + Selector: mg.Spec.ForProvider.ArtifactStore[i3].LocationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ArtifactStore[i3].Location") + } + mg.Spec.ForProvider.ArtifactStore[i3].Location = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ArtifactStore[i3].LocationRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.ArtifactStore); i3++ { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ArtifactStore[i3].Location), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ArtifactStore[i3].LocationRef, + Selector: mg.Spec.InitProvider.ArtifactStore[i3].LocationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ArtifactStore[i3].Location") + } + mg.Spec.InitProvider.ArtifactStore[i3].Location = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ArtifactStore[i3].LocationRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Webhook. +func (mg *Webhook) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("codepipeline.aws.upbound.io", "v1beta2", "Codepipeline", "CodepipelineList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TargetPipeline), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.TargetPipelineRef, + Selector: mg.Spec.ForProvider.TargetPipelineSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TargetPipeline") + } + mg.Spec.ForProvider.TargetPipeline = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TargetPipelineRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("codepipeline.aws.upbound.io", "v1beta2", "Codepipeline", "CodepipelineList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TargetPipeline), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.TargetPipelineRef, + Selector: mg.Spec.InitProvider.TargetPipelineSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TargetPipeline") + } + mg.Spec.InitProvider.TargetPipeline = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TargetPipelineRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/codepipeline/v1beta2/zz_groupversion_info.go b/apis/codepipeline/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..7960d658fc --- /dev/null +++ b/apis/codepipeline/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=codepipeline.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "codepipeline.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/codepipeline/v1beta2/zz_webhook_terraformed.go b/apis/codepipeline/v1beta2/zz_webhook_terraformed.go new file mode 100755 index 0000000000..107bda313c --- /dev/null +++ b/apis/codepipeline/v1beta2/zz_webhook_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Webhook +func (mg *Webhook) GetTerraformResourceType() string { + return "aws_codepipeline_webhook" +} + +// GetConnectionDetailsMapping for this Webhook +func (tr *Webhook) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"authentication_configuration[*].secret_token": "authenticationConfiguration[*].secretTokenSecretRef"} +} + +// GetObservation of this Webhook +func (tr *Webhook) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Webhook +func (tr *Webhook) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Webhook +func (tr *Webhook) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Webhook +func (tr *Webhook) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Webhook +func (tr *Webhook) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Webhook +func (tr *Webhook) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Webhook +func (tr *Webhook) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Webhook using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Webhook) LateInitialize(attrs []byte) (bool, error) { + params := &WebhookParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Webhook) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/codepipeline/v1beta2/zz_webhook_types.go b/apis/codepipeline/v1beta2/zz_webhook_types.go new file mode 100755 index 0000000000..38006707e7 --- /dev/null +++ b/apis/codepipeline/v1beta2/zz_webhook_types.go @@ -0,0 +1,240 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuthenticationConfigurationInitParameters struct { + + // A valid CIDR block for IP filtering. Required for IP. + AllowedIPRange *string `json:"allowedIpRange,omitempty" tf:"allowed_ip_range,omitempty"` + + // The shared secret for the GitHub repository webhook. Set this as secret in your github_repository_webhook's configuration block. Required for GITHUB_HMAC. + SecretTokenSecretRef *v1.SecretKeySelector `json:"secretTokenSecretRef,omitempty" tf:"-"` +} + +type AuthenticationConfigurationObservation struct { + + // A valid CIDR block for IP filtering. Required for IP. + AllowedIPRange *string `json:"allowedIpRange,omitempty" tf:"allowed_ip_range,omitempty"` +} + +type AuthenticationConfigurationParameters struct { + + // A valid CIDR block for IP filtering. Required for IP. + // +kubebuilder:validation:Optional + AllowedIPRange *string `json:"allowedIpRange,omitempty" tf:"allowed_ip_range,omitempty"` + + // The shared secret for the GitHub repository webhook. Set this as secret in your github_repository_webhook's configuration block. Required for GITHUB_HMAC. + // +kubebuilder:validation:Optional + SecretTokenSecretRef *v1.SecretKeySelector `json:"secretTokenSecretRef,omitempty" tf:"-"` +} + +type FilterInitParameters struct { + + // The JSON path to filter on. + JSONPath *string `json:"jsonPath,omitempty" tf:"json_path,omitempty"` + + // The value to match on (e.g., refs/heads/{Branch}). See AWS docs for details. + MatchEquals *string `json:"matchEquals,omitempty" tf:"match_equals,omitempty"` +} + +type FilterObservation struct { + + // The JSON path to filter on. + JSONPath *string `json:"jsonPath,omitempty" tf:"json_path,omitempty"` + + // The value to match on (e.g., refs/heads/{Branch}). See AWS docs for details. + MatchEquals *string `json:"matchEquals,omitempty" tf:"match_equals,omitempty"` +} + +type FilterParameters struct { + + // The JSON path to filter on. + // +kubebuilder:validation:Optional + JSONPath *string `json:"jsonPath" tf:"json_path,omitempty"` + + // The value to match on (e.g., refs/heads/{Branch}). See AWS docs for details. + // +kubebuilder:validation:Optional + MatchEquals *string `json:"matchEquals" tf:"match_equals,omitempty"` +} + +type WebhookInitParameters struct { + + // The type of authentication to use. One of IP, GITHUB_HMAC, or UNAUTHENTICATED. + Authentication *string `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // An auth block. Required for IP and GITHUB_HMAC. Auth blocks are documented below. + AuthenticationConfiguration *AuthenticationConfigurationInitParameters `json:"authenticationConfiguration,omitempty" tf:"authentication_configuration,omitempty"` + + // One or more filter blocks. Filter blocks are documented below. + Filter []FilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The name of the action in a pipeline you want to connect to the webhook. The action must be from the source (first) stage of the pipeline. + TargetAction *string `json:"targetAction,omitempty" tf:"target_action,omitempty"` + + // The name of the pipeline. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/codepipeline/v1beta2.Codepipeline + TargetPipeline *string `json:"targetPipeline,omitempty" tf:"target_pipeline,omitempty"` + + // Reference to a Codepipeline in codepipeline to populate targetPipeline. + // +kubebuilder:validation:Optional + TargetPipelineRef *v1.Reference `json:"targetPipelineRef,omitempty" tf:"-"` + + // Selector for a Codepipeline in codepipeline to populate targetPipeline. + // +kubebuilder:validation:Optional + TargetPipelineSelector *v1.Selector `json:"targetPipelineSelector,omitempty" tf:"-"` +} + +type WebhookObservation struct { + + // The CodePipeline webhook's ARN. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The type of authentication to use. One of IP, GITHUB_HMAC, or UNAUTHENTICATED. + Authentication *string `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // An auth block. Required for IP and GITHUB_HMAC. Auth blocks are documented below. + AuthenticationConfiguration *AuthenticationConfigurationObservation `json:"authenticationConfiguration,omitempty" tf:"authentication_configuration,omitempty"` + + // One or more filter blocks. Filter blocks are documented below. + Filter []FilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + // The CodePipeline webhook's ARN. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The name of the action in a pipeline you want to connect to the webhook. The action must be from the source (first) stage of the pipeline. + TargetAction *string `json:"targetAction,omitempty" tf:"target_action,omitempty"` + + // The name of the pipeline. + TargetPipeline *string `json:"targetPipeline,omitempty" tf:"target_pipeline,omitempty"` + + // The CodePipeline webhook's URL. POST events to this endpoint to trigger the target. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type WebhookParameters struct { + + // The type of authentication to use. One of IP, GITHUB_HMAC, or UNAUTHENTICATED. + // +kubebuilder:validation:Optional + Authentication *string `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // An auth block. Required for IP and GITHUB_HMAC. Auth blocks are documented below. + // +kubebuilder:validation:Optional + AuthenticationConfiguration *AuthenticationConfigurationParameters `json:"authenticationConfiguration,omitempty" tf:"authentication_configuration,omitempty"` + + // One or more filter blocks. Filter blocks are documented below. + // +kubebuilder:validation:Optional + Filter []FilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The name of the action in a pipeline you want to connect to the webhook. The action must be from the source (first) stage of the pipeline. + // +kubebuilder:validation:Optional + TargetAction *string `json:"targetAction,omitempty" tf:"target_action,omitempty"` + + // The name of the pipeline. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/codepipeline/v1beta2.Codepipeline + // +kubebuilder:validation:Optional + TargetPipeline *string `json:"targetPipeline,omitempty" tf:"target_pipeline,omitempty"` + + // Reference to a Codepipeline in codepipeline to populate targetPipeline. + // +kubebuilder:validation:Optional + TargetPipelineRef *v1.Reference `json:"targetPipelineRef,omitempty" tf:"-"` + + // Selector for a Codepipeline in codepipeline to populate targetPipeline. + // +kubebuilder:validation:Optional + TargetPipelineSelector *v1.Selector `json:"targetPipelineSelector,omitempty" tf:"-"` +} + +// WebhookSpec defines the desired state of Webhook +type WebhookSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WebhookParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WebhookInitParameters `json:"initProvider,omitempty"` +} + +// WebhookStatus defines the observed state of Webhook. +type WebhookStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WebhookObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Webhook is the Schema for the Webhooks API. Provides a CodePipeline Webhook +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Webhook struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.authentication) || (has(self.initProvider) && has(self.initProvider.authentication))",message="spec.forProvider.authentication is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.filter) || (has(self.initProvider) && has(self.initProvider.filter))",message="spec.forProvider.filter is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.targetAction) || (has(self.initProvider) && has(self.initProvider.targetAction))",message="spec.forProvider.targetAction is a required parameter" + Spec WebhookSpec `json:"spec"` + Status WebhookStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WebhookList contains a list of Webhooks +type WebhookList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Webhook `json:"items"` +} + +// Repository type metadata. +var ( + Webhook_Kind = "Webhook" + Webhook_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Webhook_Kind}.String() + Webhook_KindAPIVersion = Webhook_Kind + "." + CRDGroupVersion.String() + Webhook_GroupVersionKind = CRDGroupVersion.WithKind(Webhook_Kind) +) + +func init() { + SchemeBuilder.Register(&Webhook{}, &WebhookList{}) +} diff --git a/apis/codestarconnections/v1beta1/zz_generated.conversion_hubs.go b/apis/codestarconnections/v1beta1/zz_generated.conversion_hubs.go index 18706d2f12..840464aee2 100755 --- a/apis/codestarconnections/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/codestarconnections/v1beta1/zz_generated.conversion_hubs.go @@ -8,6 +8,3 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *Connection) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Host) Hub() {} diff --git a/apis/codestarconnections/v1beta1/zz_generated.conversion_spokes.go b/apis/codestarconnections/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..078b0f8aec --- /dev/null +++ b/apis/codestarconnections/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Host to the hub type. +func (tr *Host) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Host type. +func (tr *Host) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/codestarconnections/v1beta2/zz_generated.conversion_hubs.go b/apis/codestarconnections/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..cbce267f9c --- /dev/null +++ b/apis/codestarconnections/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Host) Hub() {} diff --git a/apis/codestarconnections/v1beta2/zz_generated.deepcopy.go b/apis/codestarconnections/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..920966e923 --- /dev/null +++ b/apis/codestarconnections/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,373 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Host) DeepCopyInto(out *Host) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Host. +func (in *Host) DeepCopy() *Host { + if in == nil { + return nil + } + out := new(Host) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Host) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostInitParameters) DeepCopyInto(out *HostInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProviderEndpoint != nil { + in, out := &in.ProviderEndpoint, &out.ProviderEndpoint + *out = new(string) + **out = **in + } + if in.ProviderType != nil { + in, out := &in.ProviderType, &out.ProviderType + *out = new(string) + **out = **in + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostInitParameters. +func (in *HostInitParameters) DeepCopy() *HostInitParameters { + if in == nil { + return nil + } + out := new(HostInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostList) DeepCopyInto(out *HostList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Host, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostList. +func (in *HostList) DeepCopy() *HostList { + if in == nil { + return nil + } + out := new(HostList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostObservation) DeepCopyInto(out *HostObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProviderEndpoint != nil { + in, out := &in.ProviderEndpoint, &out.ProviderEndpoint + *out = new(string) + **out = **in + } + if in.ProviderType != nil { + in, out := &in.ProviderType, &out.ProviderType + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostObservation. +func (in *HostObservation) DeepCopy() *HostObservation { + if in == nil { + return nil + } + out := new(HostObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostParameters) DeepCopyInto(out *HostParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProviderEndpoint != nil { + in, out := &in.ProviderEndpoint, &out.ProviderEndpoint + *out = new(string) + **out = **in + } + if in.ProviderType != nil { + in, out := &in.ProviderType, &out.ProviderType + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostParameters. +func (in *HostParameters) DeepCopy() *HostParameters { + if in == nil { + return nil + } + out := new(HostParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostSpec) DeepCopyInto(out *HostSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostSpec. +func (in *HostSpec) DeepCopy() *HostSpec { + if in == nil { + return nil + } + out := new(HostSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostStatus) DeepCopyInto(out *HostStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostStatus. +func (in *HostStatus) DeepCopy() *HostStatus { + if in == nil { + return nil + } + out := new(HostStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigurationInitParameters) DeepCopyInto(out *VPCConfigurationInitParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TLSCertificate != nil { + in, out := &in.TLSCertificate, &out.TLSCertificate + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigurationInitParameters. +func (in *VPCConfigurationInitParameters) DeepCopy() *VPCConfigurationInitParameters { + if in == nil { + return nil + } + out := new(VPCConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigurationObservation) DeepCopyInto(out *VPCConfigurationObservation) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TLSCertificate != nil { + in, out := &in.TLSCertificate, &out.TLSCertificate + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigurationObservation. +func (in *VPCConfigurationObservation) DeepCopy() *VPCConfigurationObservation { + if in == nil { + return nil + } + out := new(VPCConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigurationParameters) DeepCopyInto(out *VPCConfigurationParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TLSCertificate != nil { + in, out := &in.TLSCertificate, &out.TLSCertificate + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigurationParameters. +func (in *VPCConfigurationParameters) DeepCopy() *VPCConfigurationParameters { + if in == nil { + return nil + } + out := new(VPCConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/codestarconnections/v1beta2/zz_generated.managed.go b/apis/codestarconnections/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..4d938c8ea2 --- /dev/null +++ b/apis/codestarconnections/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Host. +func (mg *Host) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Host. +func (mg *Host) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Host. +func (mg *Host) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Host. +func (mg *Host) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Host. +func (mg *Host) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Host. +func (mg *Host) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Host. +func (mg *Host) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Host. +func (mg *Host) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Host. +func (mg *Host) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Host. +func (mg *Host) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Host. +func (mg *Host) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Host. +func (mg *Host) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/codestarconnections/v1beta2/zz_generated.managedlist.go b/apis/codestarconnections/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..5bf499673d --- /dev/null +++ b/apis/codestarconnections/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this HostList. +func (l *HostList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/codestarconnections/v1beta2/zz_groupversion_info.go b/apis/codestarconnections/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..316c4e6da4 --- /dev/null +++ b/apis/codestarconnections/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=codestarconnections.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "codestarconnections.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/codestarconnections/v1beta2/zz_host_terraformed.go b/apis/codestarconnections/v1beta2/zz_host_terraformed.go new file mode 100755 index 0000000000..fb2c30c238 --- /dev/null +++ b/apis/codestarconnections/v1beta2/zz_host_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Host +func (mg *Host) GetTerraformResourceType() string { + return "aws_codestarconnections_host" +} + +// GetConnectionDetailsMapping for this Host +func (tr *Host) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Host +func (tr *Host) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Host +func (tr *Host) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Host +func (tr *Host) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Host +func (tr *Host) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Host +func (tr *Host) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Host +func (tr *Host) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Host +func (tr *Host) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Host using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Host) LateInitialize(attrs []byte) (bool, error) { + params := &HostParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Host) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/codestarconnections/v1beta2/zz_host_types.go b/apis/codestarconnections/v1beta2/zz_host_types.go new file mode 100755 index 0000000000..5be7d35d51 --- /dev/null +++ b/apis/codestarconnections/v1beta2/zz_host_types.go @@ -0,0 +1,195 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type HostInitParameters struct { + + // The name of the host to be created. The name must be unique in the calling AWS account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The endpoint of the infrastructure to be represented by the host after it is created. + ProviderEndpoint *string `json:"providerEndpoint,omitempty" tf:"provider_endpoint,omitempty"` + + // The name of the external provider where your third-party code repository is configured. + ProviderType *string `json:"providerType,omitempty" tf:"provider_type,omitempty"` + + // The VPC configuration to be provisioned for the host. A VPC must be configured, and the infrastructure to be represented by the host must already be connected to the VPC. + VPCConfiguration *VPCConfigurationInitParameters `json:"vpcConfiguration,omitempty" tf:"vpc_configuration,omitempty"` +} + +type HostObservation struct { + + // The CodeStar Host ARN. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The CodeStar Host ARN. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the host to be created. The name must be unique in the calling AWS account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The endpoint of the infrastructure to be represented by the host after it is created. + ProviderEndpoint *string `json:"providerEndpoint,omitempty" tf:"provider_endpoint,omitempty"` + + // The name of the external provider where your third-party code repository is configured. + ProviderType *string `json:"providerType,omitempty" tf:"provider_type,omitempty"` + + // The CodeStar Host status. Possible values are PENDING, AVAILABLE, VPC_CONFIG_DELETING, VPC_CONFIG_INITIALIZING, and VPC_CONFIG_FAILED_INITIALIZATION. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // The VPC configuration to be provisioned for the host. A VPC must be configured, and the infrastructure to be represented by the host must already be connected to the VPC. + VPCConfiguration *VPCConfigurationObservation `json:"vpcConfiguration,omitempty" tf:"vpc_configuration,omitempty"` +} + +type HostParameters struct { + + // The name of the host to be created. The name must be unique in the calling AWS account. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The endpoint of the infrastructure to be represented by the host after it is created. + // +kubebuilder:validation:Optional + ProviderEndpoint *string `json:"providerEndpoint,omitempty" tf:"provider_endpoint,omitempty"` + + // The name of the external provider where your third-party code repository is configured. + // +kubebuilder:validation:Optional + ProviderType *string `json:"providerType,omitempty" tf:"provider_type,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The VPC configuration to be provisioned for the host. A VPC must be configured, and the infrastructure to be represented by the host must already be connected to the VPC. + // +kubebuilder:validation:Optional + VPCConfiguration *VPCConfigurationParameters `json:"vpcConfiguration,omitempty" tf:"vpc_configuration,omitempty"` +} + +type VPCConfigurationInitParameters struct { + + // ID of the security group or security groups associated with the Amazon VPC connected to the infrastructure where your provider type is installed. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The ID of the subnet or subnets associated with the Amazon VPC connected to the infrastructure where your provider type is installed. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // The value of the Transport Layer Security (TLS) certificate associated with the infrastructure where your provider type is installed. + TLSCertificate *string `json:"tlsCertificate,omitempty" tf:"tls_certificate,omitempty"` + + // The ID of the Amazon VPC connected to the infrastructure where your provider type is installed. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type VPCConfigurationObservation struct { + + // ID of the security group or security groups associated with the Amazon VPC connected to the infrastructure where your provider type is installed. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The ID of the subnet or subnets associated with the Amazon VPC connected to the infrastructure where your provider type is installed. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // The value of the Transport Layer Security (TLS) certificate associated with the infrastructure where your provider type is installed. + TLSCertificate *string `json:"tlsCertificate,omitempty" tf:"tls_certificate,omitempty"` + + // The ID of the Amazon VPC connected to the infrastructure where your provider type is installed. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type VPCConfigurationParameters struct { + + // ID of the security group or security groups associated with the Amazon VPC connected to the infrastructure where your provider type is installed. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds" tf:"security_group_ids,omitempty"` + + // The ID of the subnet or subnets associated with the Amazon VPC connected to the infrastructure where your provider type is installed. + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds" tf:"subnet_ids,omitempty"` + + // The value of the Transport Layer Security (TLS) certificate associated with the infrastructure where your provider type is installed. + // +kubebuilder:validation:Optional + TLSCertificate *string `json:"tlsCertificate,omitempty" tf:"tls_certificate,omitempty"` + + // The ID of the Amazon VPC connected to the infrastructure where your provider type is installed. + // +kubebuilder:validation:Optional + VPCID *string `json:"vpcId" tf:"vpc_id,omitempty"` +} + +// HostSpec defines the desired state of Host +type HostSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider HostParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider HostInitParameters `json:"initProvider,omitempty"` +} + +// HostStatus defines the observed state of Host. +type HostStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider HostObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Host is the Schema for the Hosts API. Provides a CodeStar Host +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Host struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.providerEndpoint) || (has(self.initProvider) && has(self.initProvider.providerEndpoint))",message="spec.forProvider.providerEndpoint is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.providerType) || (has(self.initProvider) && has(self.initProvider.providerType))",message="spec.forProvider.providerType is a required parameter" + Spec HostSpec `json:"spec"` + Status HostStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HostList contains a list of Hosts +type HostList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Host `json:"items"` +} + +// Repository type metadata. +var ( + Host_Kind = "Host" + Host_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Host_Kind}.String() + Host_KindAPIVersion = Host_Kind + "." + CRDGroupVersion.String() + Host_GroupVersionKind = CRDGroupVersion.WithKind(Host_Kind) +) + +func init() { + SchemeBuilder.Register(&Host{}, &HostList{}) +} diff --git a/apis/cognitoidentity/v1beta1/zz_cognitoidentitypoolproviderprincipaltag_types.go b/apis/cognitoidentity/v1beta1/zz_cognitoidentitypoolproviderprincipaltag_types.go index ae5b008c88..5ad5da720b 100755 --- a/apis/cognitoidentity/v1beta1/zz_cognitoidentitypoolproviderprincipaltag_types.go +++ b/apis/cognitoidentity/v1beta1/zz_cognitoidentitypoolproviderprincipaltag_types.go @@ -29,7 +29,7 @@ type CognitoIdentityPoolProviderPrincipalTagInitParameters struct { IdentityPoolIDSelector *v1.Selector `json:"identityPoolIdSelector,omitempty" tf:"-"` // The name of the identity provider. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPool + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("endpoint",true) IdentityProviderName *string `json:"identityProviderName,omitempty" tf:"identity_provider_name,omitempty"` @@ -83,7 +83,7 @@ type CognitoIdentityPoolProviderPrincipalTagParameters struct { IdentityPoolIDSelector *v1.Selector `json:"identityPoolIdSelector,omitempty" tf:"-"` // The name of the identity provider. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPool + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("endpoint",true) // +kubebuilder:validation:Optional IdentityProviderName *string `json:"identityProviderName,omitempty" tf:"identity_provider_name,omitempty"` diff --git a/apis/cognitoidentity/v1beta1/zz_generated.resolvers.go b/apis/cognitoidentity/v1beta1/zz_generated.resolvers.go index 9699896f87..43ff32641c 100644 --- a/apis/cognitoidentity/v1beta1/zz_generated.resolvers.go +++ b/apis/cognitoidentity/v1beta1/zz_generated.resolvers.go @@ -46,7 +46,7 @@ func (mg *CognitoIdentityPoolProviderPrincipalTag) ResolveReferences( // Resolve mg.Spec.ForProvider.IdentityPoolID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.IdentityPoolIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPool", "UserPoolList") + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -84,7 +84,7 @@ func (mg *CognitoIdentityPoolProviderPrincipalTag) ResolveReferences( // Resolve mg.Spec.InitProvider.IdentityPoolID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.IdentityPoolIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPool", "UserPoolList") + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/cognitoidp/v1beta1/zz_generated.conversion_hubs.go b/apis/cognitoidp/v1beta1/zz_generated.conversion_hubs.go index 2eee34db2b..ff8d395ba0 100755 --- a/apis/cognitoidp/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/cognitoidp/v1beta1/zz_generated.conversion_hubs.go @@ -12,9 +12,6 @@ func (tr *IdentityProvider) Hub() {} // Hub marks this type as a conversion hub. func (tr *ResourceServer) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *RiskConfiguration) Hub() {} - // Hub marks this type as a conversion hub. func (tr *User) Hub() {} @@ -24,9 +21,6 @@ func (tr *UserGroup) Hub() {} // Hub marks this type as a conversion hub. func (tr *UserInGroup) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *UserPool) Hub() {} - // Hub marks this type as a conversion hub. func (tr *UserPoolClient) Hub() {} diff --git a/apis/cognitoidp/v1beta1/zz_generated.conversion_spokes.go b/apis/cognitoidp/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..275cfe07dd --- /dev/null +++ b/apis/cognitoidp/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this RiskConfiguration to the hub type. +func (tr *RiskConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the RiskConfiguration type. +func (tr *RiskConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this UserPool to the hub type. +func (tr *UserPool) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the UserPool type. +func (tr *UserPool) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/cognitoidp/v1beta1/zz_generated.resolvers.go b/apis/cognitoidp/v1beta1/zz_generated.resolvers.go index cba956df05..b4e47863d9 100644 --- a/apis/cognitoidp/v1beta1/zz_generated.resolvers.go +++ b/apis/cognitoidp/v1beta1/zz_generated.resolvers.go @@ -27,7 +27,7 @@ func (mg *IdentityProvider) ResolveReferences( // ResolveReferences of this Iden var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPool", "UserPoolList") + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -46,7 +46,7 @@ func (mg *IdentityProvider) ResolveReferences( // ResolveReferences of this Iden mg.Spec.ForProvider.UserPoolID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.UserPoolIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPool", "UserPoolList") + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -77,7 +77,7 @@ func (mg *ResourceServer) ResolveReferences(ctx context.Context, c client.Reader var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPool", "UserPoolList") + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -96,7 +96,7 @@ func (mg *ResourceServer) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.ForProvider.UserPoolID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.UserPoolIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPool", "UserPoolList") + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -177,7 +177,7 @@ func (mg *User) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPool", "UserPoolList") + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -227,7 +227,7 @@ func (mg *UserGroup) ResolveReferences(ctx context.Context, c client.Reader) err mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPool", "UserPoolList") + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -265,7 +265,7 @@ func (mg *UserGroup) ResolveReferences(ctx context.Context, c client.Reader) err mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPool", "UserPoolList") + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -315,7 +315,7 @@ func (mg *UserInGroup) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.ForProvider.GroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.GroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPool", "UserPoolList") + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -372,7 +372,7 @@ func (mg *UserInGroup) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.InitProvider.GroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.GroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPool", "UserPoolList") + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1033,7 +1033,7 @@ func (mg *UserPoolClient) ResolveReferences(ctx context.Context, c client.Reader for i3 := 0; i3 < len(mg.Spec.ForProvider.AnalyticsConfiguration); i3++ { { - m, l, err = apisresolver.GetManagedResource("pinpoint.aws.upbound.io", "v1beta1", "App", "AppList") + m, l, err = apisresolver.GetManagedResource("pinpoint.aws.upbound.io", "v1beta2", "App", "AppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1074,7 +1074,7 @@ func (mg *UserPoolClient) ResolveReferences(ctx context.Context, c client.Reader } { - m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPool", "UserPoolList") + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1094,7 +1094,7 @@ func (mg *UserPoolClient) ResolveReferences(ctx context.Context, c client.Reader for i3 := 0; i3 < len(mg.Spec.InitProvider.AnalyticsConfiguration); i3++ { { - m, l, err = apisresolver.GetManagedResource("pinpoint.aws.upbound.io", "v1beta1", "App", "AppList") + m, l, err = apisresolver.GetManagedResource("pinpoint.aws.upbound.io", "v1beta2", "App", "AppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1135,7 +1135,7 @@ func (mg *UserPoolClient) ResolveReferences(ctx context.Context, c client.Reader } { - m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPool", "UserPoolList") + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1165,7 +1165,7 @@ func (mg *UserPoolDomain) ResolveReferences(ctx context.Context, c client.Reader var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta1", "Certificate", "CertificateList") + m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta2", "Certificate", "CertificateList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1184,7 +1184,7 @@ func (mg *UserPoolDomain) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.ForProvider.CertificateArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.CertificateArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPool", "UserPoolList") + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1203,7 +1203,7 @@ func (mg *UserPoolDomain) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.ForProvider.UserPoolID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.UserPoolIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta1", "Certificate", "CertificateList") + m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta2", "Certificate", "CertificateList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1222,7 +1222,7 @@ func (mg *UserPoolDomain) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.InitProvider.CertificateArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.CertificateArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPool", "UserPoolList") + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1272,7 +1272,7 @@ func (mg *UserPoolUICustomization) ResolveReferences(ctx context.Context, c clie mg.Spec.ForProvider.ClientID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ClientIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPool", "UserPoolList") + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1310,7 +1310,7 @@ func (mg *UserPoolUICustomization) ResolveReferences(ctx context.Context, c clie mg.Spec.InitProvider.ClientID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ClientIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPool", "UserPoolList") + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/cognitoidp/v1beta1/zz_identityprovider_types.go b/apis/cognitoidp/v1beta1/zz_identityprovider_types.go index fcff71603d..6da98926c3 100755 --- a/apis/cognitoidp/v1beta1/zz_identityprovider_types.go +++ b/apis/cognitoidp/v1beta1/zz_identityprovider_types.go @@ -33,7 +33,7 @@ type IdentityProviderInitParameters struct { ProviderType *string `json:"providerType,omitempty" tf:"provider_type,omitempty"` // The user pool id - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPool + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` // Reference to a UserPool in cognitoidp to populate userPoolId. @@ -100,7 +100,7 @@ type IdentityProviderParameters struct { Region *string `json:"region" tf:"-"` // The user pool id - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPool + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool // +kubebuilder:validation:Optional UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` diff --git a/apis/cognitoidp/v1beta1/zz_resourceserver_types.go b/apis/cognitoidp/v1beta1/zz_resourceserver_types.go index 0b7c7d53d7..7db8a4e7d4 100755 --- a/apis/cognitoidp/v1beta1/zz_resourceserver_types.go +++ b/apis/cognitoidp/v1beta1/zz_resourceserver_types.go @@ -25,7 +25,7 @@ type ResourceServerInitParameters struct { Scope []ScopeInitParameters `json:"scope,omitempty" tf:"scope,omitempty"` // User pool the client belongs to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPool + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` // Reference to a UserPool in cognitoidp to populate userPoolId. @@ -76,7 +76,7 @@ type ResourceServerParameters struct { Scope []ScopeParameters `json:"scope,omitempty" tf:"scope,omitempty"` // User pool the client belongs to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPool + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool // +kubebuilder:validation:Optional UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` diff --git a/apis/cognitoidp/v1beta1/zz_user_types.go b/apis/cognitoidp/v1beta1/zz_user_types.go index a6452b80a4..dd090749d8 100755 --- a/apis/cognitoidp/v1beta1/zz_user_types.go +++ b/apis/cognitoidp/v1beta1/zz_user_types.go @@ -138,7 +138,7 @@ type UserParameters struct { TemporaryPasswordSecretRef *v1.SecretKeySelector `json:"temporaryPasswordSecretRef,omitempty" tf:"-"` // The user pool ID for the user pool where the user will be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPool + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` diff --git a/apis/cognitoidp/v1beta1/zz_usergroup_types.go b/apis/cognitoidp/v1beta1/zz_usergroup_types.go index be9cd71565..8abf08c1af 100755 --- a/apis/cognitoidp/v1beta1/zz_usergroup_types.go +++ b/apis/cognitoidp/v1beta1/zz_usergroup_types.go @@ -38,7 +38,7 @@ type UserGroupInitParameters struct { RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` // The user pool ID. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPool + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` // Reference to a UserPool in cognitoidp to populate userPoolId. @@ -104,7 +104,7 @@ type UserGroupParameters struct { RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` // The user pool ID. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPool + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool // +kubebuilder:validation:Optional UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` diff --git a/apis/cognitoidp/v1beta1/zz_useringroup_types.go b/apis/cognitoidp/v1beta1/zz_useringroup_types.go index 5cc45c2af1..5d9a1652d6 100755 --- a/apis/cognitoidp/v1beta1/zz_useringroup_types.go +++ b/apis/cognitoidp/v1beta1/zz_useringroup_types.go @@ -29,7 +29,7 @@ type UserInGroupInitParameters struct { GroupNameSelector *v1.Selector `json:"groupNameSelector,omitempty" tf:"-"` // The user pool ID of the user and group. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPool + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` @@ -90,7 +90,7 @@ type UserInGroupParameters struct { Region *string `json:"region" tf:"-"` // The user pool ID of the user and group. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPool + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` diff --git a/apis/cognitoidp/v1beta1/zz_userpoolclient_types.go b/apis/cognitoidp/v1beta1/zz_userpoolclient_types.go index 1fba0bcce6..3c5486bc65 100755 --- a/apis/cognitoidp/v1beta1/zz_userpoolclient_types.go +++ b/apis/cognitoidp/v1beta1/zz_userpoolclient_types.go @@ -19,7 +19,7 @@ type AnalyticsConfigurationInitParameters struct { ApplicationArn *string `json:"applicationArn,omitempty" tf:"application_arn,omitempty"` // Application ID for an Amazon Pinpoint application. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/pinpoint/v1beta1.App + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/pinpoint/v1beta2.App // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("application_id",true) ApplicationID *string `json:"applicationId,omitempty" tf:"application_id,omitempty"` @@ -76,7 +76,7 @@ type AnalyticsConfigurationParameters struct { ApplicationArn *string `json:"applicationArn,omitempty" tf:"application_arn,omitempty"` // Application ID for an Amazon Pinpoint application. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/pinpoint/v1beta1.App + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/pinpoint/v1beta2.App // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("application_id",true) // +kubebuilder:validation:Optional ApplicationID *string `json:"applicationId,omitempty" tf:"application_id,omitempty"` @@ -227,7 +227,7 @@ type UserPoolClientInitParameters struct { TokenValidityUnits []TokenValidityUnitsInitParameters `json:"tokenValidityUnits,omitempty" tf:"token_validity_units,omitempty"` // User pool the client belongs to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPool + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` // Reference to a UserPool in cognitoidp to populate userPoolId. @@ -430,7 +430,7 @@ type UserPoolClientParameters struct { TokenValidityUnits []TokenValidityUnitsParameters `json:"tokenValidityUnits,omitempty" tf:"token_validity_units,omitempty"` // User pool the client belongs to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPool + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool // +kubebuilder:validation:Optional UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` diff --git a/apis/cognitoidp/v1beta1/zz_userpooldomain_types.go b/apis/cognitoidp/v1beta1/zz_userpooldomain_types.go index 39cc92faba..d80c5c8c90 100755 --- a/apis/cognitoidp/v1beta1/zz_userpooldomain_types.go +++ b/apis/cognitoidp/v1beta1/zz_userpooldomain_types.go @@ -16,7 +16,7 @@ import ( type UserPoolDomainInitParameters struct { // The ARN of an ISSUED ACM certificate in us-east-1 for a custom domain. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta1.Certificate + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta2.Certificate // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` @@ -32,7 +32,7 @@ type UserPoolDomainInitParameters struct { Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` // The user pool ID. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPool + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` // Reference to a UserPool in cognitoidp to populate userPoolId. @@ -79,7 +79,7 @@ type UserPoolDomainObservation struct { type UserPoolDomainParameters struct { // The ARN of an ISSUED ACM certificate in us-east-1 for a custom domain. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta1.Certificate + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta2.Certificate // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` @@ -102,7 +102,7 @@ type UserPoolDomainParameters struct { Region *string `json:"region" tf:"-"` // The user pool ID. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPool + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool // +kubebuilder:validation:Optional UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` diff --git a/apis/cognitoidp/v1beta1/zz_userpooluicustomization_types.go b/apis/cognitoidp/v1beta1/zz_userpooluicustomization_types.go index 4e358c48c6..f5301485fe 100755 --- a/apis/cognitoidp/v1beta1/zz_userpooluicustomization_types.go +++ b/apis/cognitoidp/v1beta1/zz_userpooluicustomization_types.go @@ -34,7 +34,7 @@ type UserPoolUICustomizationInitParameters struct { ImageFile *string `json:"imageFile,omitempty" tf:"image_file,omitempty"` // The user pool ID for the user pool. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPool + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` // Reference to a UserPool in cognitoidp to populate userPoolId. @@ -104,7 +104,7 @@ type UserPoolUICustomizationParameters struct { Region *string `json:"region" tf:"-"` // The user pool ID for the user pool. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPool + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool // +kubebuilder:validation:Optional UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` diff --git a/apis/cognitoidp/v1beta2/zz_generated.conversion_hubs.go b/apis/cognitoidp/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..a34adbf7d7 --- /dev/null +++ b/apis/cognitoidp/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *RiskConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *UserPool) Hub() {} diff --git a/apis/cognitoidp/v1beta2/zz_generated.deepcopy.go b/apis/cognitoidp/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..e5dac18278 --- /dev/null +++ b/apis/cognitoidp/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,4149 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountRecoverySettingInitParameters) DeepCopyInto(out *AccountRecoverySettingInitParameters) { + *out = *in + if in.RecoveryMechanism != nil { + in, out := &in.RecoveryMechanism, &out.RecoveryMechanism + *out = make([]RecoveryMechanismInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountRecoverySettingInitParameters. +func (in *AccountRecoverySettingInitParameters) DeepCopy() *AccountRecoverySettingInitParameters { + if in == nil { + return nil + } + out := new(AccountRecoverySettingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountRecoverySettingObservation) DeepCopyInto(out *AccountRecoverySettingObservation) { + *out = *in + if in.RecoveryMechanism != nil { + in, out := &in.RecoveryMechanism, &out.RecoveryMechanism + *out = make([]RecoveryMechanismObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountRecoverySettingObservation. +func (in *AccountRecoverySettingObservation) DeepCopy() *AccountRecoverySettingObservation { + if in == nil { + return nil + } + out := new(AccountRecoverySettingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountRecoverySettingParameters) DeepCopyInto(out *AccountRecoverySettingParameters) { + *out = *in + if in.RecoveryMechanism != nil { + in, out := &in.RecoveryMechanism, &out.RecoveryMechanism + *out = make([]RecoveryMechanismParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountRecoverySettingParameters. +func (in *AccountRecoverySettingParameters) DeepCopy() *AccountRecoverySettingParameters { + if in == nil { + return nil + } + out := new(AccountRecoverySettingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountTakeoverRiskConfigurationInitParameters) DeepCopyInto(out *AccountTakeoverRiskConfigurationInitParameters) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = new(ActionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NotifyConfiguration != nil { + in, out := &in.NotifyConfiguration, &out.NotifyConfiguration + *out = new(NotifyConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountTakeoverRiskConfigurationInitParameters. +func (in *AccountTakeoverRiskConfigurationInitParameters) DeepCopy() *AccountTakeoverRiskConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AccountTakeoverRiskConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountTakeoverRiskConfigurationObservation) DeepCopyInto(out *AccountTakeoverRiskConfigurationObservation) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = new(ActionsObservation) + (*in).DeepCopyInto(*out) + } + if in.NotifyConfiguration != nil { + in, out := &in.NotifyConfiguration, &out.NotifyConfiguration + *out = new(NotifyConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountTakeoverRiskConfigurationObservation. +func (in *AccountTakeoverRiskConfigurationObservation) DeepCopy() *AccountTakeoverRiskConfigurationObservation { + if in == nil { + return nil + } + out := new(AccountTakeoverRiskConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountTakeoverRiskConfigurationParameters) DeepCopyInto(out *AccountTakeoverRiskConfigurationParameters) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = new(ActionsParameters) + (*in).DeepCopyInto(*out) + } + if in.NotifyConfiguration != nil { + in, out := &in.NotifyConfiguration, &out.NotifyConfiguration + *out = new(NotifyConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountTakeoverRiskConfigurationParameters. +func (in *AccountTakeoverRiskConfigurationParameters) DeepCopy() *AccountTakeoverRiskConfigurationParameters { + if in == nil { + return nil + } + out := new(AccountTakeoverRiskConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsInitParameters) DeepCopyInto(out *ActionsInitParameters) { + *out = *in + if in.HighAction != nil { + in, out := &in.HighAction, &out.HighAction + *out = new(HighActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LowAction != nil { + in, out := &in.LowAction, &out.LowAction + *out = new(LowActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MediumAction != nil { + in, out := &in.MediumAction, &out.MediumAction + *out = new(MediumActionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsInitParameters. +func (in *ActionsInitParameters) DeepCopy() *ActionsInitParameters { + if in == nil { + return nil + } + out := new(ActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsObservation) DeepCopyInto(out *ActionsObservation) { + *out = *in + if in.HighAction != nil { + in, out := &in.HighAction, &out.HighAction + *out = new(HighActionObservation) + (*in).DeepCopyInto(*out) + } + if in.LowAction != nil { + in, out := &in.LowAction, &out.LowAction + *out = new(LowActionObservation) + (*in).DeepCopyInto(*out) + } + if in.MediumAction != nil { + in, out := &in.MediumAction, &out.MediumAction + *out = new(MediumActionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsObservation. +func (in *ActionsObservation) DeepCopy() *ActionsObservation { + if in == nil { + return nil + } + out := new(ActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsParameters) DeepCopyInto(out *ActionsParameters) { + *out = *in + if in.HighAction != nil { + in, out := &in.HighAction, &out.HighAction + *out = new(HighActionParameters) + (*in).DeepCopyInto(*out) + } + if in.LowAction != nil { + in, out := &in.LowAction, &out.LowAction + *out = new(LowActionParameters) + (*in).DeepCopyInto(*out) + } + if in.MediumAction != nil { + in, out := &in.MediumAction, &out.MediumAction + *out = new(MediumActionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsParameters. +func (in *ActionsParameters) DeepCopy() *ActionsParameters { + if in == nil { + return nil + } + out := new(ActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminCreateUserConfigInitParameters) DeepCopyInto(out *AdminCreateUserConfigInitParameters) { + *out = *in + if in.AllowAdminCreateUserOnly != nil { + in, out := &in.AllowAdminCreateUserOnly, &out.AllowAdminCreateUserOnly + *out = new(bool) + **out = **in + } + if in.InviteMessageTemplate != nil { + in, out := &in.InviteMessageTemplate, &out.InviteMessageTemplate + *out = new(InviteMessageTemplateInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminCreateUserConfigInitParameters. +func (in *AdminCreateUserConfigInitParameters) DeepCopy() *AdminCreateUserConfigInitParameters { + if in == nil { + return nil + } + out := new(AdminCreateUserConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminCreateUserConfigObservation) DeepCopyInto(out *AdminCreateUserConfigObservation) { + *out = *in + if in.AllowAdminCreateUserOnly != nil { + in, out := &in.AllowAdminCreateUserOnly, &out.AllowAdminCreateUserOnly + *out = new(bool) + **out = **in + } + if in.InviteMessageTemplate != nil { + in, out := &in.InviteMessageTemplate, &out.InviteMessageTemplate + *out = new(InviteMessageTemplateObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminCreateUserConfigObservation. +func (in *AdminCreateUserConfigObservation) DeepCopy() *AdminCreateUserConfigObservation { + if in == nil { + return nil + } + out := new(AdminCreateUserConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminCreateUserConfigParameters) DeepCopyInto(out *AdminCreateUserConfigParameters) { + *out = *in + if in.AllowAdminCreateUserOnly != nil { + in, out := &in.AllowAdminCreateUserOnly, &out.AllowAdminCreateUserOnly + *out = new(bool) + **out = **in + } + if in.InviteMessageTemplate != nil { + in, out := &in.InviteMessageTemplate, &out.InviteMessageTemplate + *out = new(InviteMessageTemplateParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminCreateUserConfigParameters. +func (in *AdminCreateUserConfigParameters) DeepCopy() *AdminCreateUserConfigParameters { + if in == nil { + return nil + } + out := new(AdminCreateUserConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockEmailInitParameters) DeepCopyInto(out *BlockEmailInitParameters) { + *out = *in + if in.HTMLBody != nil { + in, out := &in.HTMLBody, &out.HTMLBody + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.TextBody != nil { + in, out := &in.TextBody, &out.TextBody + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockEmailInitParameters. +func (in *BlockEmailInitParameters) DeepCopy() *BlockEmailInitParameters { + if in == nil { + return nil + } + out := new(BlockEmailInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockEmailObservation) DeepCopyInto(out *BlockEmailObservation) { + *out = *in + if in.HTMLBody != nil { + in, out := &in.HTMLBody, &out.HTMLBody + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.TextBody != nil { + in, out := &in.TextBody, &out.TextBody + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockEmailObservation. +func (in *BlockEmailObservation) DeepCopy() *BlockEmailObservation { + if in == nil { + return nil + } + out := new(BlockEmailObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockEmailParameters) DeepCopyInto(out *BlockEmailParameters) { + *out = *in + if in.HTMLBody != nil { + in, out := &in.HTMLBody, &out.HTMLBody + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.TextBody != nil { + in, out := &in.TextBody, &out.TextBody + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockEmailParameters. +func (in *BlockEmailParameters) DeepCopy() *BlockEmailParameters { + if in == nil { + return nil + } + out := new(BlockEmailParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompromisedCredentialsRiskConfigurationActionsInitParameters) DeepCopyInto(out *CompromisedCredentialsRiskConfigurationActionsInitParameters) { + *out = *in + if in.EventAction != nil { + in, out := &in.EventAction, &out.EventAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompromisedCredentialsRiskConfigurationActionsInitParameters. +func (in *CompromisedCredentialsRiskConfigurationActionsInitParameters) DeepCopy() *CompromisedCredentialsRiskConfigurationActionsInitParameters { + if in == nil { + return nil + } + out := new(CompromisedCredentialsRiskConfigurationActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompromisedCredentialsRiskConfigurationActionsObservation) DeepCopyInto(out *CompromisedCredentialsRiskConfigurationActionsObservation) { + *out = *in + if in.EventAction != nil { + in, out := &in.EventAction, &out.EventAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompromisedCredentialsRiskConfigurationActionsObservation. +func (in *CompromisedCredentialsRiskConfigurationActionsObservation) DeepCopy() *CompromisedCredentialsRiskConfigurationActionsObservation { + if in == nil { + return nil + } + out := new(CompromisedCredentialsRiskConfigurationActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompromisedCredentialsRiskConfigurationActionsParameters) DeepCopyInto(out *CompromisedCredentialsRiskConfigurationActionsParameters) { + *out = *in + if in.EventAction != nil { + in, out := &in.EventAction, &out.EventAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompromisedCredentialsRiskConfigurationActionsParameters. +func (in *CompromisedCredentialsRiskConfigurationActionsParameters) DeepCopy() *CompromisedCredentialsRiskConfigurationActionsParameters { + if in == nil { + return nil + } + out := new(CompromisedCredentialsRiskConfigurationActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompromisedCredentialsRiskConfigurationInitParameters) DeepCopyInto(out *CompromisedCredentialsRiskConfigurationInitParameters) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = new(CompromisedCredentialsRiskConfigurationActionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EventFilter != nil { + in, out := &in.EventFilter, &out.EventFilter + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompromisedCredentialsRiskConfigurationInitParameters. +func (in *CompromisedCredentialsRiskConfigurationInitParameters) DeepCopy() *CompromisedCredentialsRiskConfigurationInitParameters { + if in == nil { + return nil + } + out := new(CompromisedCredentialsRiskConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompromisedCredentialsRiskConfigurationObservation) DeepCopyInto(out *CompromisedCredentialsRiskConfigurationObservation) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = new(CompromisedCredentialsRiskConfigurationActionsObservation) + (*in).DeepCopyInto(*out) + } + if in.EventFilter != nil { + in, out := &in.EventFilter, &out.EventFilter + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompromisedCredentialsRiskConfigurationObservation. +func (in *CompromisedCredentialsRiskConfigurationObservation) DeepCopy() *CompromisedCredentialsRiskConfigurationObservation { + if in == nil { + return nil + } + out := new(CompromisedCredentialsRiskConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompromisedCredentialsRiskConfigurationParameters) DeepCopyInto(out *CompromisedCredentialsRiskConfigurationParameters) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = new(CompromisedCredentialsRiskConfigurationActionsParameters) + (*in).DeepCopyInto(*out) + } + if in.EventFilter != nil { + in, out := &in.EventFilter, &out.EventFilter + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompromisedCredentialsRiskConfigurationParameters. +func (in *CompromisedCredentialsRiskConfigurationParameters) DeepCopy() *CompromisedCredentialsRiskConfigurationParameters { + if in == nil { + return nil + } + out := new(CompromisedCredentialsRiskConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomEmailSenderInitParameters) DeepCopyInto(out *CustomEmailSenderInitParameters) { + *out = *in + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } + if in.LambdaArnRef != nil { + in, out := &in.LambdaArnRef, &out.LambdaArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LambdaArnSelector != nil { + in, out := &in.LambdaArnSelector, &out.LambdaArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LambdaVersion != nil { + in, out := &in.LambdaVersion, &out.LambdaVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomEmailSenderInitParameters. +func (in *CustomEmailSenderInitParameters) DeepCopy() *CustomEmailSenderInitParameters { + if in == nil { + return nil + } + out := new(CustomEmailSenderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomEmailSenderObservation) DeepCopyInto(out *CustomEmailSenderObservation) { + *out = *in + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } + if in.LambdaVersion != nil { + in, out := &in.LambdaVersion, &out.LambdaVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomEmailSenderObservation. +func (in *CustomEmailSenderObservation) DeepCopy() *CustomEmailSenderObservation { + if in == nil { + return nil + } + out := new(CustomEmailSenderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomEmailSenderParameters) DeepCopyInto(out *CustomEmailSenderParameters) { + *out = *in + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } + if in.LambdaArnRef != nil { + in, out := &in.LambdaArnRef, &out.LambdaArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LambdaArnSelector != nil { + in, out := &in.LambdaArnSelector, &out.LambdaArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LambdaVersion != nil { + in, out := &in.LambdaVersion, &out.LambdaVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomEmailSenderParameters. +func (in *CustomEmailSenderParameters) DeepCopy() *CustomEmailSenderParameters { + if in == nil { + return nil + } + out := new(CustomEmailSenderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomSMSSenderInitParameters) DeepCopyInto(out *CustomSMSSenderInitParameters) { + *out = *in + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } + if in.LambdaArnRef != nil { + in, out := &in.LambdaArnRef, &out.LambdaArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LambdaArnSelector != nil { + in, out := &in.LambdaArnSelector, &out.LambdaArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LambdaVersion != nil { + in, out := &in.LambdaVersion, &out.LambdaVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomSMSSenderInitParameters. +func (in *CustomSMSSenderInitParameters) DeepCopy() *CustomSMSSenderInitParameters { + if in == nil { + return nil + } + out := new(CustomSMSSenderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomSMSSenderObservation) DeepCopyInto(out *CustomSMSSenderObservation) { + *out = *in + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } + if in.LambdaVersion != nil { + in, out := &in.LambdaVersion, &out.LambdaVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomSMSSenderObservation. +func (in *CustomSMSSenderObservation) DeepCopy() *CustomSMSSenderObservation { + if in == nil { + return nil + } + out := new(CustomSMSSenderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomSMSSenderParameters) DeepCopyInto(out *CustomSMSSenderParameters) { + *out = *in + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } + if in.LambdaArnRef != nil { + in, out := &in.LambdaArnRef, &out.LambdaArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LambdaArnSelector != nil { + in, out := &in.LambdaArnSelector, &out.LambdaArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LambdaVersion != nil { + in, out := &in.LambdaVersion, &out.LambdaVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomSMSSenderParameters. +func (in *CustomSMSSenderParameters) DeepCopy() *CustomSMSSenderParameters { + if in == nil { + return nil + } + out := new(CustomSMSSenderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceConfigurationInitParameters) DeepCopyInto(out *DeviceConfigurationInitParameters) { + *out = *in + if in.ChallengeRequiredOnNewDevice != nil { + in, out := &in.ChallengeRequiredOnNewDevice, &out.ChallengeRequiredOnNewDevice + *out = new(bool) + **out = **in + } + if in.DeviceOnlyRememberedOnUserPrompt != nil { + in, out := &in.DeviceOnlyRememberedOnUserPrompt, &out.DeviceOnlyRememberedOnUserPrompt + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConfigurationInitParameters. +func (in *DeviceConfigurationInitParameters) DeepCopy() *DeviceConfigurationInitParameters { + if in == nil { + return nil + } + out := new(DeviceConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceConfigurationObservation) DeepCopyInto(out *DeviceConfigurationObservation) { + *out = *in + if in.ChallengeRequiredOnNewDevice != nil { + in, out := &in.ChallengeRequiredOnNewDevice, &out.ChallengeRequiredOnNewDevice + *out = new(bool) + **out = **in + } + if in.DeviceOnlyRememberedOnUserPrompt != nil { + in, out := &in.DeviceOnlyRememberedOnUserPrompt, &out.DeviceOnlyRememberedOnUserPrompt + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConfigurationObservation. +func (in *DeviceConfigurationObservation) DeepCopy() *DeviceConfigurationObservation { + if in == nil { + return nil + } + out := new(DeviceConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceConfigurationParameters) DeepCopyInto(out *DeviceConfigurationParameters) { + *out = *in + if in.ChallengeRequiredOnNewDevice != nil { + in, out := &in.ChallengeRequiredOnNewDevice, &out.ChallengeRequiredOnNewDevice + *out = new(bool) + **out = **in + } + if in.DeviceOnlyRememberedOnUserPrompt != nil { + in, out := &in.DeviceOnlyRememberedOnUserPrompt, &out.DeviceOnlyRememberedOnUserPrompt + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConfigurationParameters. +func (in *DeviceConfigurationParameters) DeepCopy() *DeviceConfigurationParameters { + if in == nil { + return nil + } + out := new(DeviceConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailConfigurationInitParameters) DeepCopyInto(out *EmailConfigurationInitParameters) { + *out = *in + if in.ConfigurationSet != nil { + in, out := &in.ConfigurationSet, &out.ConfigurationSet + *out = new(string) + **out = **in + } + if in.EmailSendingAccount != nil { + in, out := &in.EmailSendingAccount, &out.EmailSendingAccount + *out = new(string) + **out = **in + } + if in.FromEmailAddress != nil { + in, out := &in.FromEmailAddress, &out.FromEmailAddress + *out = new(string) + **out = **in + } + if in.ReplyToEmailAddress != nil { + in, out := &in.ReplyToEmailAddress, &out.ReplyToEmailAddress + *out = new(string) + **out = **in + } + if in.SourceArn != nil { + in, out := &in.SourceArn, &out.SourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailConfigurationInitParameters. +func (in *EmailConfigurationInitParameters) DeepCopy() *EmailConfigurationInitParameters { + if in == nil { + return nil + } + out := new(EmailConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailConfigurationObservation) DeepCopyInto(out *EmailConfigurationObservation) { + *out = *in + if in.ConfigurationSet != nil { + in, out := &in.ConfigurationSet, &out.ConfigurationSet + *out = new(string) + **out = **in + } + if in.EmailSendingAccount != nil { + in, out := &in.EmailSendingAccount, &out.EmailSendingAccount + *out = new(string) + **out = **in + } + if in.FromEmailAddress != nil { + in, out := &in.FromEmailAddress, &out.FromEmailAddress + *out = new(string) + **out = **in + } + if in.ReplyToEmailAddress != nil { + in, out := &in.ReplyToEmailAddress, &out.ReplyToEmailAddress + *out = new(string) + **out = **in + } + if in.SourceArn != nil { + in, out := &in.SourceArn, &out.SourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailConfigurationObservation. +func (in *EmailConfigurationObservation) DeepCopy() *EmailConfigurationObservation { + if in == nil { + return nil + } + out := new(EmailConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailConfigurationParameters) DeepCopyInto(out *EmailConfigurationParameters) { + *out = *in + if in.ConfigurationSet != nil { + in, out := &in.ConfigurationSet, &out.ConfigurationSet + *out = new(string) + **out = **in + } + if in.EmailSendingAccount != nil { + in, out := &in.EmailSendingAccount, &out.EmailSendingAccount + *out = new(string) + **out = **in + } + if in.FromEmailAddress != nil { + in, out := &in.FromEmailAddress, &out.FromEmailAddress + *out = new(string) + **out = **in + } + if in.ReplyToEmailAddress != nil { + in, out := &in.ReplyToEmailAddress, &out.ReplyToEmailAddress + *out = new(string) + **out = **in + } + if in.SourceArn != nil { + in, out := &in.SourceArn, &out.SourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailConfigurationParameters. +func (in *EmailConfigurationParameters) DeepCopy() *EmailConfigurationParameters { + if in == nil { + return nil + } + out := new(EmailConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HighActionInitParameters) DeepCopyInto(out *HighActionInitParameters) { + *out = *in + if in.EventAction != nil { + in, out := &in.EventAction, &out.EventAction + *out = new(string) + **out = **in + } + if in.Notify != nil { + in, out := &in.Notify, &out.Notify + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HighActionInitParameters. +func (in *HighActionInitParameters) DeepCopy() *HighActionInitParameters { + if in == nil { + return nil + } + out := new(HighActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HighActionObservation) DeepCopyInto(out *HighActionObservation) { + *out = *in + if in.EventAction != nil { + in, out := &in.EventAction, &out.EventAction + *out = new(string) + **out = **in + } + if in.Notify != nil { + in, out := &in.Notify, &out.Notify + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HighActionObservation. +func (in *HighActionObservation) DeepCopy() *HighActionObservation { + if in == nil { + return nil + } + out := new(HighActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HighActionParameters) DeepCopyInto(out *HighActionParameters) { + *out = *in + if in.EventAction != nil { + in, out := &in.EventAction, &out.EventAction + *out = new(string) + **out = **in + } + if in.Notify != nil { + in, out := &in.Notify, &out.Notify + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HighActionParameters. +func (in *HighActionParameters) DeepCopy() *HighActionParameters { + if in == nil { + return nil + } + out := new(HighActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InviteMessageTemplateInitParameters) DeepCopyInto(out *InviteMessageTemplateInitParameters) { + *out = *in + if in.EmailMessage != nil { + in, out := &in.EmailMessage, &out.EmailMessage + *out = new(string) + **out = **in + } + if in.EmailSubject != nil { + in, out := &in.EmailSubject, &out.EmailSubject + *out = new(string) + **out = **in + } + if in.SMSMessage != nil { + in, out := &in.SMSMessage, &out.SMSMessage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InviteMessageTemplateInitParameters. +func (in *InviteMessageTemplateInitParameters) DeepCopy() *InviteMessageTemplateInitParameters { + if in == nil { + return nil + } + out := new(InviteMessageTemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InviteMessageTemplateObservation) DeepCopyInto(out *InviteMessageTemplateObservation) { + *out = *in + if in.EmailMessage != nil { + in, out := &in.EmailMessage, &out.EmailMessage + *out = new(string) + **out = **in + } + if in.EmailSubject != nil { + in, out := &in.EmailSubject, &out.EmailSubject + *out = new(string) + **out = **in + } + if in.SMSMessage != nil { + in, out := &in.SMSMessage, &out.SMSMessage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InviteMessageTemplateObservation. +func (in *InviteMessageTemplateObservation) DeepCopy() *InviteMessageTemplateObservation { + if in == nil { + return nil + } + out := new(InviteMessageTemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InviteMessageTemplateParameters) DeepCopyInto(out *InviteMessageTemplateParameters) { + *out = *in + if in.EmailMessage != nil { + in, out := &in.EmailMessage, &out.EmailMessage + *out = new(string) + **out = **in + } + if in.EmailSubject != nil { + in, out := &in.EmailSubject, &out.EmailSubject + *out = new(string) + **out = **in + } + if in.SMSMessage != nil { + in, out := &in.SMSMessage, &out.SMSMessage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InviteMessageTemplateParameters. +func (in *InviteMessageTemplateParameters) DeepCopy() *InviteMessageTemplateParameters { + if in == nil { + return nil + } + out := new(InviteMessageTemplateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaConfigInitParameters) DeepCopyInto(out *LambdaConfigInitParameters) { + *out = *in + if in.CreateAuthChallenge != nil { + in, out := &in.CreateAuthChallenge, &out.CreateAuthChallenge + *out = new(string) + **out = **in + } + if in.CreateAuthChallengeRef != nil { + in, out := &in.CreateAuthChallengeRef, &out.CreateAuthChallengeRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CreateAuthChallengeSelector != nil { + in, out := &in.CreateAuthChallengeSelector, &out.CreateAuthChallengeSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomEmailSender != nil { + in, out := &in.CustomEmailSender, &out.CustomEmailSender + *out = new(CustomEmailSenderInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomMessage != nil { + in, out := &in.CustomMessage, &out.CustomMessage + *out = new(string) + **out = **in + } + if in.CustomMessageRef != nil { + in, out := &in.CustomMessageRef, &out.CustomMessageRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CustomMessageSelector != nil { + in, out := &in.CustomMessageSelector, &out.CustomMessageSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSMSSender != nil { + in, out := &in.CustomSMSSender, &out.CustomSMSSender + *out = new(CustomSMSSenderInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefineAuthChallenge != nil { + in, out := &in.DefineAuthChallenge, &out.DefineAuthChallenge + *out = new(string) + **out = **in + } + if in.DefineAuthChallengeRef != nil { + in, out := &in.DefineAuthChallengeRef, &out.DefineAuthChallengeRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DefineAuthChallengeSelector != nil { + in, out := &in.DefineAuthChallengeSelector, &out.DefineAuthChallengeSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PostAuthentication != nil { + in, out := &in.PostAuthentication, &out.PostAuthentication + *out = new(string) + **out = **in + } + if in.PostAuthenticationRef != nil { + in, out := &in.PostAuthenticationRef, &out.PostAuthenticationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PostAuthenticationSelector != nil { + in, out := &in.PostAuthenticationSelector, &out.PostAuthenticationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PostConfirmation != nil { + in, out := &in.PostConfirmation, &out.PostConfirmation + *out = new(string) + **out = **in + } + if in.PostConfirmationRef != nil { + in, out := &in.PostConfirmationRef, &out.PostConfirmationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PostConfirmationSelector != nil { + in, out := &in.PostConfirmationSelector, &out.PostConfirmationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PreAuthentication != nil { + in, out := &in.PreAuthentication, &out.PreAuthentication + *out = new(string) + **out = **in + } + if in.PreAuthenticationRef != nil { + in, out := &in.PreAuthenticationRef, &out.PreAuthenticationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PreAuthenticationSelector != nil { + in, out := &in.PreAuthenticationSelector, &out.PreAuthenticationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PreSignUp != nil { + in, out := &in.PreSignUp, &out.PreSignUp + *out = new(string) + **out = **in + } + if in.PreSignUpRef != nil { + in, out := &in.PreSignUpRef, &out.PreSignUpRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PreSignUpSelector != nil { + in, out := &in.PreSignUpSelector, &out.PreSignUpSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PreTokenGeneration != nil { + in, out := &in.PreTokenGeneration, &out.PreTokenGeneration + *out = new(string) + **out = **in + } + if in.PreTokenGenerationConfig != nil { + in, out := &in.PreTokenGenerationConfig, &out.PreTokenGenerationConfig + *out = new(PreTokenGenerationConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PreTokenGenerationRef != nil { + in, out := &in.PreTokenGenerationRef, &out.PreTokenGenerationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PreTokenGenerationSelector != nil { + in, out := &in.PreTokenGenerationSelector, &out.PreTokenGenerationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserMigration != nil { + in, out := &in.UserMigration, &out.UserMigration + *out = new(string) + **out = **in + } + if in.UserMigrationRef != nil { + in, out := &in.UserMigrationRef, &out.UserMigrationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserMigrationSelector != nil { + in, out := &in.UserMigrationSelector, &out.UserMigrationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VerifyAuthChallengeResponse != nil { + in, out := &in.VerifyAuthChallengeResponse, &out.VerifyAuthChallengeResponse + *out = new(string) + **out = **in + } + if in.VerifyAuthChallengeResponseRef != nil { + in, out := &in.VerifyAuthChallengeResponseRef, &out.VerifyAuthChallengeResponseRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VerifyAuthChallengeResponseSelector != nil { + in, out := &in.VerifyAuthChallengeResponseSelector, &out.VerifyAuthChallengeResponseSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaConfigInitParameters. +func (in *LambdaConfigInitParameters) DeepCopy() *LambdaConfigInitParameters { + if in == nil { + return nil + } + out := new(LambdaConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaConfigObservation) DeepCopyInto(out *LambdaConfigObservation) { + *out = *in + if in.CreateAuthChallenge != nil { + in, out := &in.CreateAuthChallenge, &out.CreateAuthChallenge + *out = new(string) + **out = **in + } + if in.CustomEmailSender != nil { + in, out := &in.CustomEmailSender, &out.CustomEmailSender + *out = new(CustomEmailSenderObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomMessage != nil { + in, out := &in.CustomMessage, &out.CustomMessage + *out = new(string) + **out = **in + } + if in.CustomSMSSender != nil { + in, out := &in.CustomSMSSender, &out.CustomSMSSender + *out = new(CustomSMSSenderObservation) + (*in).DeepCopyInto(*out) + } + if in.DefineAuthChallenge != nil { + in, out := &in.DefineAuthChallenge, &out.DefineAuthChallenge + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.PostAuthentication != nil { + in, out := &in.PostAuthentication, &out.PostAuthentication + *out = new(string) + **out = **in + } + if in.PostConfirmation != nil { + in, out := &in.PostConfirmation, &out.PostConfirmation + *out = new(string) + **out = **in + } + if in.PreAuthentication != nil { + in, out := &in.PreAuthentication, &out.PreAuthentication + *out = new(string) + **out = **in + } + if in.PreSignUp != nil { + in, out := &in.PreSignUp, &out.PreSignUp + *out = new(string) + **out = **in + } + if in.PreTokenGeneration != nil { + in, out := &in.PreTokenGeneration, &out.PreTokenGeneration + *out = new(string) + **out = **in + } + if in.PreTokenGenerationConfig != nil { + in, out := &in.PreTokenGenerationConfig, &out.PreTokenGenerationConfig + *out = new(PreTokenGenerationConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.UserMigration != nil { + in, out := &in.UserMigration, &out.UserMigration + *out = new(string) + **out = **in + } + if in.VerifyAuthChallengeResponse != nil { + in, out := &in.VerifyAuthChallengeResponse, &out.VerifyAuthChallengeResponse + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaConfigObservation. +func (in *LambdaConfigObservation) DeepCopy() *LambdaConfigObservation { + if in == nil { + return nil + } + out := new(LambdaConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaConfigParameters) DeepCopyInto(out *LambdaConfigParameters) { + *out = *in + if in.CreateAuthChallenge != nil { + in, out := &in.CreateAuthChallenge, &out.CreateAuthChallenge + *out = new(string) + **out = **in + } + if in.CreateAuthChallengeRef != nil { + in, out := &in.CreateAuthChallengeRef, &out.CreateAuthChallengeRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CreateAuthChallengeSelector != nil { + in, out := &in.CreateAuthChallengeSelector, &out.CreateAuthChallengeSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomEmailSender != nil { + in, out := &in.CustomEmailSender, &out.CustomEmailSender + *out = new(CustomEmailSenderParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomMessage != nil { + in, out := &in.CustomMessage, &out.CustomMessage + *out = new(string) + **out = **in + } + if in.CustomMessageRef != nil { + in, out := &in.CustomMessageRef, &out.CustomMessageRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CustomMessageSelector != nil { + in, out := &in.CustomMessageSelector, &out.CustomMessageSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSMSSender != nil { + in, out := &in.CustomSMSSender, &out.CustomSMSSender + *out = new(CustomSMSSenderParameters) + (*in).DeepCopyInto(*out) + } + if in.DefineAuthChallenge != nil { + in, out := &in.DefineAuthChallenge, &out.DefineAuthChallenge + *out = new(string) + **out = **in + } + if in.DefineAuthChallengeRef != nil { + in, out := &in.DefineAuthChallengeRef, &out.DefineAuthChallengeRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DefineAuthChallengeSelector != nil { + in, out := &in.DefineAuthChallengeSelector, &out.DefineAuthChallengeSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PostAuthentication != nil { + in, out := &in.PostAuthentication, &out.PostAuthentication + *out = new(string) + **out = **in + } + if in.PostAuthenticationRef != nil { + in, out := &in.PostAuthenticationRef, &out.PostAuthenticationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PostAuthenticationSelector != nil { + in, out := &in.PostAuthenticationSelector, &out.PostAuthenticationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PostConfirmation != nil { + in, out := &in.PostConfirmation, &out.PostConfirmation + *out = new(string) + **out = **in + } + if in.PostConfirmationRef != nil { + in, out := &in.PostConfirmationRef, &out.PostConfirmationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PostConfirmationSelector != nil { + in, out := &in.PostConfirmationSelector, &out.PostConfirmationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PreAuthentication != nil { + in, out := &in.PreAuthentication, &out.PreAuthentication + *out = new(string) + **out = **in + } + if in.PreAuthenticationRef != nil { + in, out := &in.PreAuthenticationRef, &out.PreAuthenticationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PreAuthenticationSelector != nil { + in, out := &in.PreAuthenticationSelector, &out.PreAuthenticationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PreSignUp != nil { + in, out := &in.PreSignUp, &out.PreSignUp + *out = new(string) + **out = **in + } + if in.PreSignUpRef != nil { + in, out := &in.PreSignUpRef, &out.PreSignUpRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PreSignUpSelector != nil { + in, out := &in.PreSignUpSelector, &out.PreSignUpSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PreTokenGeneration != nil { + in, out := &in.PreTokenGeneration, &out.PreTokenGeneration + *out = new(string) + **out = **in + } + if in.PreTokenGenerationConfig != nil { + in, out := &in.PreTokenGenerationConfig, &out.PreTokenGenerationConfig + *out = new(PreTokenGenerationConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.PreTokenGenerationRef != nil { + in, out := &in.PreTokenGenerationRef, &out.PreTokenGenerationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PreTokenGenerationSelector != nil { + in, out := &in.PreTokenGenerationSelector, &out.PreTokenGenerationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserMigration != nil { + in, out := &in.UserMigration, &out.UserMigration + *out = new(string) + **out = **in + } + if in.UserMigrationRef != nil { + in, out := &in.UserMigrationRef, &out.UserMigrationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserMigrationSelector != nil { + in, out := &in.UserMigrationSelector, &out.UserMigrationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VerifyAuthChallengeResponse != nil { + in, out := &in.VerifyAuthChallengeResponse, &out.VerifyAuthChallengeResponse + *out = new(string) + **out = **in + } + if in.VerifyAuthChallengeResponseRef != nil { + in, out := &in.VerifyAuthChallengeResponseRef, &out.VerifyAuthChallengeResponseRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VerifyAuthChallengeResponseSelector != nil { + in, out := &in.VerifyAuthChallengeResponseSelector, &out.VerifyAuthChallengeResponseSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaConfigParameters. +func (in *LambdaConfigParameters) DeepCopy() *LambdaConfigParameters { + if in == nil { + return nil + } + out := new(LambdaConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LowActionInitParameters) DeepCopyInto(out *LowActionInitParameters) { + *out = *in + if in.EventAction != nil { + in, out := &in.EventAction, &out.EventAction + *out = new(string) + **out = **in + } + if in.Notify != nil { + in, out := &in.Notify, &out.Notify + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LowActionInitParameters. +func (in *LowActionInitParameters) DeepCopy() *LowActionInitParameters { + if in == nil { + return nil + } + out := new(LowActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LowActionObservation) DeepCopyInto(out *LowActionObservation) { + *out = *in + if in.EventAction != nil { + in, out := &in.EventAction, &out.EventAction + *out = new(string) + **out = **in + } + if in.Notify != nil { + in, out := &in.Notify, &out.Notify + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LowActionObservation. +func (in *LowActionObservation) DeepCopy() *LowActionObservation { + if in == nil { + return nil + } + out := new(LowActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LowActionParameters) DeepCopyInto(out *LowActionParameters) { + *out = *in + if in.EventAction != nil { + in, out := &in.EventAction, &out.EventAction + *out = new(string) + **out = **in + } + if in.Notify != nil { + in, out := &in.Notify, &out.Notify + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LowActionParameters. +func (in *LowActionParameters) DeepCopy() *LowActionParameters { + if in == nil { + return nil + } + out := new(LowActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediumActionInitParameters) DeepCopyInto(out *MediumActionInitParameters) { + *out = *in + if in.EventAction != nil { + in, out := &in.EventAction, &out.EventAction + *out = new(string) + **out = **in + } + if in.Notify != nil { + in, out := &in.Notify, &out.Notify + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediumActionInitParameters. +func (in *MediumActionInitParameters) DeepCopy() *MediumActionInitParameters { + if in == nil { + return nil + } + out := new(MediumActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediumActionObservation) DeepCopyInto(out *MediumActionObservation) { + *out = *in + if in.EventAction != nil { + in, out := &in.EventAction, &out.EventAction + *out = new(string) + **out = **in + } + if in.Notify != nil { + in, out := &in.Notify, &out.Notify + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediumActionObservation. +func (in *MediumActionObservation) DeepCopy() *MediumActionObservation { + if in == nil { + return nil + } + out := new(MediumActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediumActionParameters) DeepCopyInto(out *MediumActionParameters) { + *out = *in + if in.EventAction != nil { + in, out := &in.EventAction, &out.EventAction + *out = new(string) + **out = **in + } + if in.Notify != nil { + in, out := &in.Notify, &out.Notify + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediumActionParameters. +func (in *MediumActionParameters) DeepCopy() *MediumActionParameters { + if in == nil { + return nil + } + out := new(MediumActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MfaEmailInitParameters) DeepCopyInto(out *MfaEmailInitParameters) { + *out = *in + if in.HTMLBody != nil { + in, out := &in.HTMLBody, &out.HTMLBody + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.TextBody != nil { + in, out := &in.TextBody, &out.TextBody + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MfaEmailInitParameters. +func (in *MfaEmailInitParameters) DeepCopy() *MfaEmailInitParameters { + if in == nil { + return nil + } + out := new(MfaEmailInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MfaEmailObservation) DeepCopyInto(out *MfaEmailObservation) { + *out = *in + if in.HTMLBody != nil { + in, out := &in.HTMLBody, &out.HTMLBody + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.TextBody != nil { + in, out := &in.TextBody, &out.TextBody + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MfaEmailObservation. +func (in *MfaEmailObservation) DeepCopy() *MfaEmailObservation { + if in == nil { + return nil + } + out := new(MfaEmailObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MfaEmailParameters) DeepCopyInto(out *MfaEmailParameters) { + *out = *in + if in.HTMLBody != nil { + in, out := &in.HTMLBody, &out.HTMLBody + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.TextBody != nil { + in, out := &in.TextBody, &out.TextBody + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MfaEmailParameters. +func (in *MfaEmailParameters) DeepCopy() *MfaEmailParameters { + if in == nil { + return nil + } + out := new(MfaEmailParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoActionEmailInitParameters) DeepCopyInto(out *NoActionEmailInitParameters) { + *out = *in + if in.HTMLBody != nil { + in, out := &in.HTMLBody, &out.HTMLBody + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.TextBody != nil { + in, out := &in.TextBody, &out.TextBody + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoActionEmailInitParameters. +func (in *NoActionEmailInitParameters) DeepCopy() *NoActionEmailInitParameters { + if in == nil { + return nil + } + out := new(NoActionEmailInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoActionEmailObservation) DeepCopyInto(out *NoActionEmailObservation) { + *out = *in + if in.HTMLBody != nil { + in, out := &in.HTMLBody, &out.HTMLBody + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.TextBody != nil { + in, out := &in.TextBody, &out.TextBody + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoActionEmailObservation. +func (in *NoActionEmailObservation) DeepCopy() *NoActionEmailObservation { + if in == nil { + return nil + } + out := new(NoActionEmailObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoActionEmailParameters) DeepCopyInto(out *NoActionEmailParameters) { + *out = *in + if in.HTMLBody != nil { + in, out := &in.HTMLBody, &out.HTMLBody + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.TextBody != nil { + in, out := &in.TextBody, &out.TextBody + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoActionEmailParameters. +func (in *NoActionEmailParameters) DeepCopy() *NoActionEmailParameters { + if in == nil { + return nil + } + out := new(NoActionEmailParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotifyConfigurationInitParameters) DeepCopyInto(out *NotifyConfigurationInitParameters) { + *out = *in + if in.BlockEmail != nil { + in, out := &in.BlockEmail, &out.BlockEmail + *out = new(BlockEmailInitParameters) + (*in).DeepCopyInto(*out) + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(string) + **out = **in + } + if in.MfaEmail != nil { + in, out := &in.MfaEmail, &out.MfaEmail + *out = new(MfaEmailInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NoActionEmail != nil { + in, out := &in.NoActionEmail, &out.NoActionEmail + *out = new(NoActionEmailInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ReplyTo != nil { + in, out := &in.ReplyTo, &out.ReplyTo + *out = new(string) + **out = **in + } + if in.SourceArn != nil { + in, out := &in.SourceArn, &out.SourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotifyConfigurationInitParameters. +func (in *NotifyConfigurationInitParameters) DeepCopy() *NotifyConfigurationInitParameters { + if in == nil { + return nil + } + out := new(NotifyConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotifyConfigurationObservation) DeepCopyInto(out *NotifyConfigurationObservation) { + *out = *in + if in.BlockEmail != nil { + in, out := &in.BlockEmail, &out.BlockEmail + *out = new(BlockEmailObservation) + (*in).DeepCopyInto(*out) + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(string) + **out = **in + } + if in.MfaEmail != nil { + in, out := &in.MfaEmail, &out.MfaEmail + *out = new(MfaEmailObservation) + (*in).DeepCopyInto(*out) + } + if in.NoActionEmail != nil { + in, out := &in.NoActionEmail, &out.NoActionEmail + *out = new(NoActionEmailObservation) + (*in).DeepCopyInto(*out) + } + if in.ReplyTo != nil { + in, out := &in.ReplyTo, &out.ReplyTo + *out = new(string) + **out = **in + } + if in.SourceArn != nil { + in, out := &in.SourceArn, &out.SourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotifyConfigurationObservation. +func (in *NotifyConfigurationObservation) DeepCopy() *NotifyConfigurationObservation { + if in == nil { + return nil + } + out := new(NotifyConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotifyConfigurationParameters) DeepCopyInto(out *NotifyConfigurationParameters) { + *out = *in + if in.BlockEmail != nil { + in, out := &in.BlockEmail, &out.BlockEmail + *out = new(BlockEmailParameters) + (*in).DeepCopyInto(*out) + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(string) + **out = **in + } + if in.MfaEmail != nil { + in, out := &in.MfaEmail, &out.MfaEmail + *out = new(MfaEmailParameters) + (*in).DeepCopyInto(*out) + } + if in.NoActionEmail != nil { + in, out := &in.NoActionEmail, &out.NoActionEmail + *out = new(NoActionEmailParameters) + (*in).DeepCopyInto(*out) + } + if in.ReplyTo != nil { + in, out := &in.ReplyTo, &out.ReplyTo + *out = new(string) + **out = **in + } + if in.SourceArn != nil { + in, out := &in.SourceArn, &out.SourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotifyConfigurationParameters. +func (in *NotifyConfigurationParameters) DeepCopy() *NotifyConfigurationParameters { + if in == nil { + return nil + } + out := new(NotifyConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberAttributeConstraintsInitParameters) DeepCopyInto(out *NumberAttributeConstraintsInitParameters) { + *out = *in + if in.MaxValue != nil { + in, out := &in.MaxValue, &out.MaxValue + *out = new(string) + **out = **in + } + if in.MinValue != nil { + in, out := &in.MinValue, &out.MinValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberAttributeConstraintsInitParameters. +func (in *NumberAttributeConstraintsInitParameters) DeepCopy() *NumberAttributeConstraintsInitParameters { + if in == nil { + return nil + } + out := new(NumberAttributeConstraintsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberAttributeConstraintsObservation) DeepCopyInto(out *NumberAttributeConstraintsObservation) { + *out = *in + if in.MaxValue != nil { + in, out := &in.MaxValue, &out.MaxValue + *out = new(string) + **out = **in + } + if in.MinValue != nil { + in, out := &in.MinValue, &out.MinValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberAttributeConstraintsObservation. +func (in *NumberAttributeConstraintsObservation) DeepCopy() *NumberAttributeConstraintsObservation { + if in == nil { + return nil + } + out := new(NumberAttributeConstraintsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberAttributeConstraintsParameters) DeepCopyInto(out *NumberAttributeConstraintsParameters) { + *out = *in + if in.MaxValue != nil { + in, out := &in.MaxValue, &out.MaxValue + *out = new(string) + **out = **in + } + if in.MinValue != nil { + in, out := &in.MinValue, &out.MinValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberAttributeConstraintsParameters. +func (in *NumberAttributeConstraintsParameters) DeepCopy() *NumberAttributeConstraintsParameters { + if in == nil { + return nil + } + out := new(NumberAttributeConstraintsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PasswordPolicyInitParameters) DeepCopyInto(out *PasswordPolicyInitParameters) { + *out = *in + if in.MinimumLength != nil { + in, out := &in.MinimumLength, &out.MinimumLength + *out = new(float64) + **out = **in + } + if in.RequireLowercase != nil { + in, out := &in.RequireLowercase, &out.RequireLowercase + *out = new(bool) + **out = **in + } + if in.RequireNumbers != nil { + in, out := &in.RequireNumbers, &out.RequireNumbers + *out = new(bool) + **out = **in + } + if in.RequireSymbols != nil { + in, out := &in.RequireSymbols, &out.RequireSymbols + *out = new(bool) + **out = **in + } + if in.RequireUppercase != nil { + in, out := &in.RequireUppercase, &out.RequireUppercase + *out = new(bool) + **out = **in + } + if in.TemporaryPasswordValidityDays != nil { + in, out := &in.TemporaryPasswordValidityDays, &out.TemporaryPasswordValidityDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordPolicyInitParameters. +func (in *PasswordPolicyInitParameters) DeepCopy() *PasswordPolicyInitParameters { + if in == nil { + return nil + } + out := new(PasswordPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PasswordPolicyObservation) DeepCopyInto(out *PasswordPolicyObservation) { + *out = *in + if in.MinimumLength != nil { + in, out := &in.MinimumLength, &out.MinimumLength + *out = new(float64) + **out = **in + } + if in.RequireLowercase != nil { + in, out := &in.RequireLowercase, &out.RequireLowercase + *out = new(bool) + **out = **in + } + if in.RequireNumbers != nil { + in, out := &in.RequireNumbers, &out.RequireNumbers + *out = new(bool) + **out = **in + } + if in.RequireSymbols != nil { + in, out := &in.RequireSymbols, &out.RequireSymbols + *out = new(bool) + **out = **in + } + if in.RequireUppercase != nil { + in, out := &in.RequireUppercase, &out.RequireUppercase + *out = new(bool) + **out = **in + } + if in.TemporaryPasswordValidityDays != nil { + in, out := &in.TemporaryPasswordValidityDays, &out.TemporaryPasswordValidityDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordPolicyObservation. +func (in *PasswordPolicyObservation) DeepCopy() *PasswordPolicyObservation { + if in == nil { + return nil + } + out := new(PasswordPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PasswordPolicyParameters) DeepCopyInto(out *PasswordPolicyParameters) { + *out = *in + if in.MinimumLength != nil { + in, out := &in.MinimumLength, &out.MinimumLength + *out = new(float64) + **out = **in + } + if in.RequireLowercase != nil { + in, out := &in.RequireLowercase, &out.RequireLowercase + *out = new(bool) + **out = **in + } + if in.RequireNumbers != nil { + in, out := &in.RequireNumbers, &out.RequireNumbers + *out = new(bool) + **out = **in + } + if in.RequireSymbols != nil { + in, out := &in.RequireSymbols, &out.RequireSymbols + *out = new(bool) + **out = **in + } + if in.RequireUppercase != nil { + in, out := &in.RequireUppercase, &out.RequireUppercase + *out = new(bool) + **out = **in + } + if in.TemporaryPasswordValidityDays != nil { + in, out := &in.TemporaryPasswordValidityDays, &out.TemporaryPasswordValidityDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordPolicyParameters. +func (in *PasswordPolicyParameters) DeepCopy() *PasswordPolicyParameters { + if in == nil { + return nil + } + out := new(PasswordPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreTokenGenerationConfigInitParameters) DeepCopyInto(out *PreTokenGenerationConfigInitParameters) { + *out = *in + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } + if in.LambdaVersion != nil { + in, out := &in.LambdaVersion, &out.LambdaVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreTokenGenerationConfigInitParameters. +func (in *PreTokenGenerationConfigInitParameters) DeepCopy() *PreTokenGenerationConfigInitParameters { + if in == nil { + return nil + } + out := new(PreTokenGenerationConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreTokenGenerationConfigObservation) DeepCopyInto(out *PreTokenGenerationConfigObservation) { + *out = *in + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } + if in.LambdaVersion != nil { + in, out := &in.LambdaVersion, &out.LambdaVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreTokenGenerationConfigObservation. +func (in *PreTokenGenerationConfigObservation) DeepCopy() *PreTokenGenerationConfigObservation { + if in == nil { + return nil + } + out := new(PreTokenGenerationConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreTokenGenerationConfigParameters) DeepCopyInto(out *PreTokenGenerationConfigParameters) { + *out = *in + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } + if in.LambdaVersion != nil { + in, out := &in.LambdaVersion, &out.LambdaVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreTokenGenerationConfigParameters. +func (in *PreTokenGenerationConfigParameters) DeepCopy() *PreTokenGenerationConfigParameters { + if in == nil { + return nil + } + out := new(PreTokenGenerationConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecoveryMechanismInitParameters) DeepCopyInto(out *RecoveryMechanismInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecoveryMechanismInitParameters. +func (in *RecoveryMechanismInitParameters) DeepCopy() *RecoveryMechanismInitParameters { + if in == nil { + return nil + } + out := new(RecoveryMechanismInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecoveryMechanismObservation) DeepCopyInto(out *RecoveryMechanismObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecoveryMechanismObservation. +func (in *RecoveryMechanismObservation) DeepCopy() *RecoveryMechanismObservation { + if in == nil { + return nil + } + out := new(RecoveryMechanismObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecoveryMechanismParameters) DeepCopyInto(out *RecoveryMechanismParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecoveryMechanismParameters. +func (in *RecoveryMechanismParameters) DeepCopy() *RecoveryMechanismParameters { + if in == nil { + return nil + } + out := new(RecoveryMechanismParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RiskConfiguration) DeepCopyInto(out *RiskConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RiskConfiguration. +func (in *RiskConfiguration) DeepCopy() *RiskConfiguration { + if in == nil { + return nil + } + out := new(RiskConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RiskConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RiskConfigurationInitParameters) DeepCopyInto(out *RiskConfigurationInitParameters) { + *out = *in + if in.AccountTakeoverRiskConfiguration != nil { + in, out := &in.AccountTakeoverRiskConfiguration, &out.AccountTakeoverRiskConfiguration + *out = new(AccountTakeoverRiskConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.CompromisedCredentialsRiskConfiguration != nil { + in, out := &in.CompromisedCredentialsRiskConfiguration, &out.CompromisedCredentialsRiskConfiguration + *out = new(CompromisedCredentialsRiskConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RiskExceptionConfiguration != nil { + in, out := &in.RiskExceptionConfiguration, &out.RiskExceptionConfiguration + *out = new(RiskExceptionConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UserPoolID != nil { + in, out := &in.UserPoolID, &out.UserPoolID + *out = new(string) + **out = **in + } + if in.UserPoolIDRef != nil { + in, out := &in.UserPoolIDRef, &out.UserPoolIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserPoolIDSelector != nil { + in, out := &in.UserPoolIDSelector, &out.UserPoolIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RiskConfigurationInitParameters. +func (in *RiskConfigurationInitParameters) DeepCopy() *RiskConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RiskConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RiskConfigurationList) DeepCopyInto(out *RiskConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RiskConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RiskConfigurationList. +func (in *RiskConfigurationList) DeepCopy() *RiskConfigurationList { + if in == nil { + return nil + } + out := new(RiskConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RiskConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RiskConfigurationObservation) DeepCopyInto(out *RiskConfigurationObservation) { + *out = *in + if in.AccountTakeoverRiskConfiguration != nil { + in, out := &in.AccountTakeoverRiskConfiguration, &out.AccountTakeoverRiskConfiguration + *out = new(AccountTakeoverRiskConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.CompromisedCredentialsRiskConfiguration != nil { + in, out := &in.CompromisedCredentialsRiskConfiguration, &out.CompromisedCredentialsRiskConfiguration + *out = new(CompromisedCredentialsRiskConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RiskExceptionConfiguration != nil { + in, out := &in.RiskExceptionConfiguration, &out.RiskExceptionConfiguration + *out = new(RiskExceptionConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.UserPoolID != nil { + in, out := &in.UserPoolID, &out.UserPoolID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RiskConfigurationObservation. +func (in *RiskConfigurationObservation) DeepCopy() *RiskConfigurationObservation { + if in == nil { + return nil + } + out := new(RiskConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RiskConfigurationParameters) DeepCopyInto(out *RiskConfigurationParameters) { + *out = *in + if in.AccountTakeoverRiskConfiguration != nil { + in, out := &in.AccountTakeoverRiskConfiguration, &out.AccountTakeoverRiskConfiguration + *out = new(AccountTakeoverRiskConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.CompromisedCredentialsRiskConfiguration != nil { + in, out := &in.CompromisedCredentialsRiskConfiguration, &out.CompromisedCredentialsRiskConfiguration + *out = new(CompromisedCredentialsRiskConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RiskExceptionConfiguration != nil { + in, out := &in.RiskExceptionConfiguration, &out.RiskExceptionConfiguration + *out = new(RiskExceptionConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.UserPoolID != nil { + in, out := &in.UserPoolID, &out.UserPoolID + *out = new(string) + **out = **in + } + if in.UserPoolIDRef != nil { + in, out := &in.UserPoolIDRef, &out.UserPoolIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserPoolIDSelector != nil { + in, out := &in.UserPoolIDSelector, &out.UserPoolIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RiskConfigurationParameters. +func (in *RiskConfigurationParameters) DeepCopy() *RiskConfigurationParameters { + if in == nil { + return nil + } + out := new(RiskConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RiskConfigurationSpec) DeepCopyInto(out *RiskConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RiskConfigurationSpec. +func (in *RiskConfigurationSpec) DeepCopy() *RiskConfigurationSpec { + if in == nil { + return nil + } + out := new(RiskConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RiskConfigurationStatus) DeepCopyInto(out *RiskConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RiskConfigurationStatus. +func (in *RiskConfigurationStatus) DeepCopy() *RiskConfigurationStatus { + if in == nil { + return nil + } + out := new(RiskConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RiskExceptionConfigurationInitParameters) DeepCopyInto(out *RiskExceptionConfigurationInitParameters) { + *out = *in + if in.BlockedIPRangeList != nil { + in, out := &in.BlockedIPRangeList, &out.BlockedIPRangeList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SkippedIPRangeList != nil { + in, out := &in.SkippedIPRangeList, &out.SkippedIPRangeList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RiskExceptionConfigurationInitParameters. +func (in *RiskExceptionConfigurationInitParameters) DeepCopy() *RiskExceptionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RiskExceptionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RiskExceptionConfigurationObservation) DeepCopyInto(out *RiskExceptionConfigurationObservation) { + *out = *in + if in.BlockedIPRangeList != nil { + in, out := &in.BlockedIPRangeList, &out.BlockedIPRangeList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SkippedIPRangeList != nil { + in, out := &in.SkippedIPRangeList, &out.SkippedIPRangeList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RiskExceptionConfigurationObservation. +func (in *RiskExceptionConfigurationObservation) DeepCopy() *RiskExceptionConfigurationObservation { + if in == nil { + return nil + } + out := new(RiskExceptionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RiskExceptionConfigurationParameters) DeepCopyInto(out *RiskExceptionConfigurationParameters) { + *out = *in + if in.BlockedIPRangeList != nil { + in, out := &in.BlockedIPRangeList, &out.BlockedIPRangeList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SkippedIPRangeList != nil { + in, out := &in.SkippedIPRangeList, &out.SkippedIPRangeList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RiskExceptionConfigurationParameters. +func (in *RiskExceptionConfigurationParameters) DeepCopy() *RiskExceptionConfigurationParameters { + if in == nil { + return nil + } + out := new(RiskExceptionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SMSConfigurationInitParameters) DeepCopyInto(out *SMSConfigurationInitParameters) { + *out = *in + if in.ExternalID != nil { + in, out := &in.ExternalID, &out.ExternalID + *out = new(string) + **out = **in + } + if in.SnsCallerArn != nil { + in, out := &in.SnsCallerArn, &out.SnsCallerArn + *out = new(string) + **out = **in + } + if in.SnsCallerArnRef != nil { + in, out := &in.SnsCallerArnRef, &out.SnsCallerArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SnsCallerArnSelector != nil { + in, out := &in.SnsCallerArnSelector, &out.SnsCallerArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SnsRegion != nil { + in, out := &in.SnsRegion, &out.SnsRegion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMSConfigurationInitParameters. +func (in *SMSConfigurationInitParameters) DeepCopy() *SMSConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SMSConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SMSConfigurationObservation) DeepCopyInto(out *SMSConfigurationObservation) { + *out = *in + if in.ExternalID != nil { + in, out := &in.ExternalID, &out.ExternalID + *out = new(string) + **out = **in + } + if in.SnsCallerArn != nil { + in, out := &in.SnsCallerArn, &out.SnsCallerArn + *out = new(string) + **out = **in + } + if in.SnsRegion != nil { + in, out := &in.SnsRegion, &out.SnsRegion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMSConfigurationObservation. +func (in *SMSConfigurationObservation) DeepCopy() *SMSConfigurationObservation { + if in == nil { + return nil + } + out := new(SMSConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SMSConfigurationParameters) DeepCopyInto(out *SMSConfigurationParameters) { + *out = *in + if in.ExternalID != nil { + in, out := &in.ExternalID, &out.ExternalID + *out = new(string) + **out = **in + } + if in.SnsCallerArn != nil { + in, out := &in.SnsCallerArn, &out.SnsCallerArn + *out = new(string) + **out = **in + } + if in.SnsCallerArnRef != nil { + in, out := &in.SnsCallerArnRef, &out.SnsCallerArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SnsCallerArnSelector != nil { + in, out := &in.SnsCallerArnSelector, &out.SnsCallerArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SnsRegion != nil { + in, out := &in.SnsRegion, &out.SnsRegion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMSConfigurationParameters. +func (in *SMSConfigurationParameters) DeepCopy() *SMSConfigurationParameters { + if in == nil { + return nil + } + out := new(SMSConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaInitParameters) DeepCopyInto(out *SchemaInitParameters) { + *out = *in + if in.AttributeDataType != nil { + in, out := &in.AttributeDataType, &out.AttributeDataType + *out = new(string) + **out = **in + } + if in.DeveloperOnlyAttribute != nil { + in, out := &in.DeveloperOnlyAttribute, &out.DeveloperOnlyAttribute + *out = new(bool) + **out = **in + } + if in.Mutable != nil { + in, out := &in.Mutable, &out.Mutable + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NumberAttributeConstraints != nil { + in, out := &in.NumberAttributeConstraints, &out.NumberAttributeConstraints + *out = new(NumberAttributeConstraintsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.StringAttributeConstraints != nil { + in, out := &in.StringAttributeConstraints, &out.StringAttributeConstraints + *out = new(StringAttributeConstraintsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaInitParameters. +func (in *SchemaInitParameters) DeepCopy() *SchemaInitParameters { + if in == nil { + return nil + } + out := new(SchemaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaObservation) DeepCopyInto(out *SchemaObservation) { + *out = *in + if in.AttributeDataType != nil { + in, out := &in.AttributeDataType, &out.AttributeDataType + *out = new(string) + **out = **in + } + if in.DeveloperOnlyAttribute != nil { + in, out := &in.DeveloperOnlyAttribute, &out.DeveloperOnlyAttribute + *out = new(bool) + **out = **in + } + if in.Mutable != nil { + in, out := &in.Mutable, &out.Mutable + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NumberAttributeConstraints != nil { + in, out := &in.NumberAttributeConstraints, &out.NumberAttributeConstraints + *out = new(NumberAttributeConstraintsObservation) + (*in).DeepCopyInto(*out) + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.StringAttributeConstraints != nil { + in, out := &in.StringAttributeConstraints, &out.StringAttributeConstraints + *out = new(StringAttributeConstraintsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaObservation. +func (in *SchemaObservation) DeepCopy() *SchemaObservation { + if in == nil { + return nil + } + out := new(SchemaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaParameters) DeepCopyInto(out *SchemaParameters) { + *out = *in + if in.AttributeDataType != nil { + in, out := &in.AttributeDataType, &out.AttributeDataType + *out = new(string) + **out = **in + } + if in.DeveloperOnlyAttribute != nil { + in, out := &in.DeveloperOnlyAttribute, &out.DeveloperOnlyAttribute + *out = new(bool) + **out = **in + } + if in.Mutable != nil { + in, out := &in.Mutable, &out.Mutable + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NumberAttributeConstraints != nil { + in, out := &in.NumberAttributeConstraints, &out.NumberAttributeConstraints + *out = new(NumberAttributeConstraintsParameters) + (*in).DeepCopyInto(*out) + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.StringAttributeConstraints != nil { + in, out := &in.StringAttributeConstraints, &out.StringAttributeConstraints + *out = new(StringAttributeConstraintsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaParameters. +func (in *SchemaParameters) DeepCopy() *SchemaParameters { + if in == nil { + return nil + } + out := new(SchemaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SoftwareTokenMfaConfigurationInitParameters) DeepCopyInto(out *SoftwareTokenMfaConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareTokenMfaConfigurationInitParameters. +func (in *SoftwareTokenMfaConfigurationInitParameters) DeepCopy() *SoftwareTokenMfaConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SoftwareTokenMfaConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SoftwareTokenMfaConfigurationObservation) DeepCopyInto(out *SoftwareTokenMfaConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareTokenMfaConfigurationObservation. +func (in *SoftwareTokenMfaConfigurationObservation) DeepCopy() *SoftwareTokenMfaConfigurationObservation { + if in == nil { + return nil + } + out := new(SoftwareTokenMfaConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SoftwareTokenMfaConfigurationParameters) DeepCopyInto(out *SoftwareTokenMfaConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareTokenMfaConfigurationParameters. +func (in *SoftwareTokenMfaConfigurationParameters) DeepCopy() *SoftwareTokenMfaConfigurationParameters { + if in == nil { + return nil + } + out := new(SoftwareTokenMfaConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringAttributeConstraintsInitParameters) DeepCopyInto(out *StringAttributeConstraintsInitParameters) { + *out = *in + if in.MaxLength != nil { + in, out := &in.MaxLength, &out.MaxLength + *out = new(string) + **out = **in + } + if in.MinLength != nil { + in, out := &in.MinLength, &out.MinLength + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringAttributeConstraintsInitParameters. +func (in *StringAttributeConstraintsInitParameters) DeepCopy() *StringAttributeConstraintsInitParameters { + if in == nil { + return nil + } + out := new(StringAttributeConstraintsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringAttributeConstraintsObservation) DeepCopyInto(out *StringAttributeConstraintsObservation) { + *out = *in + if in.MaxLength != nil { + in, out := &in.MaxLength, &out.MaxLength + *out = new(string) + **out = **in + } + if in.MinLength != nil { + in, out := &in.MinLength, &out.MinLength + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringAttributeConstraintsObservation. +func (in *StringAttributeConstraintsObservation) DeepCopy() *StringAttributeConstraintsObservation { + if in == nil { + return nil + } + out := new(StringAttributeConstraintsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringAttributeConstraintsParameters) DeepCopyInto(out *StringAttributeConstraintsParameters) { + *out = *in + if in.MaxLength != nil { + in, out := &in.MaxLength, &out.MaxLength + *out = new(string) + **out = **in + } + if in.MinLength != nil { + in, out := &in.MinLength, &out.MinLength + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringAttributeConstraintsParameters. +func (in *StringAttributeConstraintsParameters) DeepCopy() *StringAttributeConstraintsParameters { + if in == nil { + return nil + } + out := new(StringAttributeConstraintsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAttributeUpdateSettingsInitParameters) DeepCopyInto(out *UserAttributeUpdateSettingsInitParameters) { + *out = *in + if in.AttributesRequireVerificationBeforeUpdate != nil { + in, out := &in.AttributesRequireVerificationBeforeUpdate, &out.AttributesRequireVerificationBeforeUpdate + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAttributeUpdateSettingsInitParameters. +func (in *UserAttributeUpdateSettingsInitParameters) DeepCopy() *UserAttributeUpdateSettingsInitParameters { + if in == nil { + return nil + } + out := new(UserAttributeUpdateSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAttributeUpdateSettingsObservation) DeepCopyInto(out *UserAttributeUpdateSettingsObservation) { + *out = *in + if in.AttributesRequireVerificationBeforeUpdate != nil { + in, out := &in.AttributesRequireVerificationBeforeUpdate, &out.AttributesRequireVerificationBeforeUpdate + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAttributeUpdateSettingsObservation. +func (in *UserAttributeUpdateSettingsObservation) DeepCopy() *UserAttributeUpdateSettingsObservation { + if in == nil { + return nil + } + out := new(UserAttributeUpdateSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAttributeUpdateSettingsParameters) DeepCopyInto(out *UserAttributeUpdateSettingsParameters) { + *out = *in + if in.AttributesRequireVerificationBeforeUpdate != nil { + in, out := &in.AttributesRequireVerificationBeforeUpdate, &out.AttributesRequireVerificationBeforeUpdate + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAttributeUpdateSettingsParameters. +func (in *UserAttributeUpdateSettingsParameters) DeepCopy() *UserAttributeUpdateSettingsParameters { + if in == nil { + return nil + } + out := new(UserAttributeUpdateSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPool) DeepCopyInto(out *UserPool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPool. +func (in *UserPool) DeepCopy() *UserPool { + if in == nil { + return nil + } + out := new(UserPool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserPool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPoolAddOnsInitParameters) DeepCopyInto(out *UserPoolAddOnsInitParameters) { + *out = *in + if in.AdvancedSecurityMode != nil { + in, out := &in.AdvancedSecurityMode, &out.AdvancedSecurityMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPoolAddOnsInitParameters. +func (in *UserPoolAddOnsInitParameters) DeepCopy() *UserPoolAddOnsInitParameters { + if in == nil { + return nil + } + out := new(UserPoolAddOnsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPoolAddOnsObservation) DeepCopyInto(out *UserPoolAddOnsObservation) { + *out = *in + if in.AdvancedSecurityMode != nil { + in, out := &in.AdvancedSecurityMode, &out.AdvancedSecurityMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPoolAddOnsObservation. +func (in *UserPoolAddOnsObservation) DeepCopy() *UserPoolAddOnsObservation { + if in == nil { + return nil + } + out := new(UserPoolAddOnsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPoolAddOnsParameters) DeepCopyInto(out *UserPoolAddOnsParameters) { + *out = *in + if in.AdvancedSecurityMode != nil { + in, out := &in.AdvancedSecurityMode, &out.AdvancedSecurityMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPoolAddOnsParameters. +func (in *UserPoolAddOnsParameters) DeepCopy() *UserPoolAddOnsParameters { + if in == nil { + return nil + } + out := new(UserPoolAddOnsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPoolInitParameters) DeepCopyInto(out *UserPoolInitParameters) { + *out = *in + if in.AccountRecoverySetting != nil { + in, out := &in.AccountRecoverySetting, &out.AccountRecoverySetting + *out = new(AccountRecoverySettingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdminCreateUserConfig != nil { + in, out := &in.AdminCreateUserConfig, &out.AdminCreateUserConfig + *out = new(AdminCreateUserConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AliasAttributes != nil { + in, out := &in.AliasAttributes, &out.AliasAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AutoVerifiedAttributes != nil { + in, out := &in.AutoVerifiedAttributes, &out.AutoVerifiedAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(string) + **out = **in + } + if in.DeviceConfiguration != nil { + in, out := &in.DeviceConfiguration, &out.DeviceConfiguration + *out = new(DeviceConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EmailConfiguration != nil { + in, out := &in.EmailConfiguration, &out.EmailConfiguration + *out = new(EmailConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EmailVerificationMessage != nil { + in, out := &in.EmailVerificationMessage, &out.EmailVerificationMessage + *out = new(string) + **out = **in + } + if in.EmailVerificationSubject != nil { + in, out := &in.EmailVerificationSubject, &out.EmailVerificationSubject + *out = new(string) + **out = **in + } + if in.LambdaConfig != nil { + in, out := &in.LambdaConfig, &out.LambdaConfig + *out = new(LambdaConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MfaConfiguration != nil { + in, out := &in.MfaConfiguration, &out.MfaConfiguration + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PasswordPolicy != nil { + in, out := &in.PasswordPolicy, &out.PasswordPolicy + *out = new(PasswordPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SMSAuthenticationMessage != nil { + in, out := &in.SMSAuthenticationMessage, &out.SMSAuthenticationMessage + *out = new(string) + **out = **in + } + if in.SMSConfiguration != nil { + in, out := &in.SMSConfiguration, &out.SMSConfiguration + *out = new(SMSConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SMSVerificationMessage != nil { + in, out := &in.SMSVerificationMessage, &out.SMSVerificationMessage + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = make([]SchemaInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SoftwareTokenMfaConfiguration != nil { + in, out := &in.SoftwareTokenMfaConfiguration, &out.SoftwareTokenMfaConfiguration + *out = new(SoftwareTokenMfaConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserAttributeUpdateSettings != nil { + in, out := &in.UserAttributeUpdateSettings, &out.UserAttributeUpdateSettings + *out = new(UserAttributeUpdateSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UserPoolAddOns != nil { + in, out := &in.UserPoolAddOns, &out.UserPoolAddOns + *out = new(UserPoolAddOnsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UsernameAttributes != nil { + in, out := &in.UsernameAttributes, &out.UsernameAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UsernameConfiguration != nil { + in, out := &in.UsernameConfiguration, &out.UsernameConfiguration + *out = new(UsernameConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VerificationMessageTemplate != nil { + in, out := &in.VerificationMessageTemplate, &out.VerificationMessageTemplate + *out = new(VerificationMessageTemplateInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPoolInitParameters. +func (in *UserPoolInitParameters) DeepCopy() *UserPoolInitParameters { + if in == nil { + return nil + } + out := new(UserPoolInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPoolList) DeepCopyInto(out *UserPoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]UserPool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPoolList. +func (in *UserPoolList) DeepCopy() *UserPoolList { + if in == nil { + return nil + } + out := new(UserPoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserPoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPoolObservation) DeepCopyInto(out *UserPoolObservation) { + *out = *in + if in.AccountRecoverySetting != nil { + in, out := &in.AccountRecoverySetting, &out.AccountRecoverySetting + *out = new(AccountRecoverySettingObservation) + (*in).DeepCopyInto(*out) + } + if in.AdminCreateUserConfig != nil { + in, out := &in.AdminCreateUserConfig, &out.AdminCreateUserConfig + *out = new(AdminCreateUserConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.AliasAttributes != nil { + in, out := &in.AliasAttributes, &out.AliasAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoVerifiedAttributes != nil { + in, out := &in.AutoVerifiedAttributes, &out.AutoVerifiedAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CreationDate != nil { + in, out := &in.CreationDate, &out.CreationDate + *out = new(string) + **out = **in + } + if in.CustomDomain != nil { + in, out := &in.CustomDomain, &out.CustomDomain + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(string) + **out = **in + } + if in.DeviceConfiguration != nil { + in, out := &in.DeviceConfiguration, &out.DeviceConfiguration + *out = new(DeviceConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.EmailConfiguration != nil { + in, out := &in.EmailConfiguration, &out.EmailConfiguration + *out = new(EmailConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.EmailVerificationMessage != nil { + in, out := &in.EmailVerificationMessage, &out.EmailVerificationMessage + *out = new(string) + **out = **in + } + if in.EmailVerificationSubject != nil { + in, out := &in.EmailVerificationSubject, &out.EmailVerificationSubject + *out = new(string) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.EstimatedNumberOfUsers != nil { + in, out := &in.EstimatedNumberOfUsers, &out.EstimatedNumberOfUsers + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LambdaConfig != nil { + in, out := &in.LambdaConfig, &out.LambdaConfig + *out = new(LambdaConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.LastModifiedDate != nil { + in, out := &in.LastModifiedDate, &out.LastModifiedDate + *out = new(string) + **out = **in + } + if in.MfaConfiguration != nil { + in, out := &in.MfaConfiguration, &out.MfaConfiguration + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PasswordPolicy != nil { + in, out := &in.PasswordPolicy, &out.PasswordPolicy + *out = new(PasswordPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.SMSAuthenticationMessage != nil { + in, out := &in.SMSAuthenticationMessage, &out.SMSAuthenticationMessage + *out = new(string) + **out = **in + } + if in.SMSConfiguration != nil { + in, out := &in.SMSConfiguration, &out.SMSConfiguration + *out = new(SMSConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.SMSVerificationMessage != nil { + in, out := &in.SMSVerificationMessage, &out.SMSVerificationMessage + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = make([]SchemaObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SoftwareTokenMfaConfiguration != nil { + in, out := &in.SoftwareTokenMfaConfiguration, &out.SoftwareTokenMfaConfiguration + *out = new(SoftwareTokenMfaConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserAttributeUpdateSettings != nil { + in, out := &in.UserAttributeUpdateSettings, &out.UserAttributeUpdateSettings + *out = new(UserAttributeUpdateSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.UserPoolAddOns != nil { + in, out := &in.UserPoolAddOns, &out.UserPoolAddOns + *out = new(UserPoolAddOnsObservation) + (*in).DeepCopyInto(*out) + } + if in.UsernameAttributes != nil { + in, out := &in.UsernameAttributes, &out.UsernameAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UsernameConfiguration != nil { + in, out := &in.UsernameConfiguration, &out.UsernameConfiguration + *out = new(UsernameConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.VerificationMessageTemplate != nil { + in, out := &in.VerificationMessageTemplate, &out.VerificationMessageTemplate + *out = new(VerificationMessageTemplateObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPoolObservation. +func (in *UserPoolObservation) DeepCopy() *UserPoolObservation { + if in == nil { + return nil + } + out := new(UserPoolObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPoolParameters) DeepCopyInto(out *UserPoolParameters) { + *out = *in + if in.AccountRecoverySetting != nil { + in, out := &in.AccountRecoverySetting, &out.AccountRecoverySetting + *out = new(AccountRecoverySettingParameters) + (*in).DeepCopyInto(*out) + } + if in.AdminCreateUserConfig != nil { + in, out := &in.AdminCreateUserConfig, &out.AdminCreateUserConfig + *out = new(AdminCreateUserConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.AliasAttributes != nil { + in, out := &in.AliasAttributes, &out.AliasAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AutoVerifiedAttributes != nil { + in, out := &in.AutoVerifiedAttributes, &out.AutoVerifiedAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(string) + **out = **in + } + if in.DeviceConfiguration != nil { + in, out := &in.DeviceConfiguration, &out.DeviceConfiguration + *out = new(DeviceConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.EmailConfiguration != nil { + in, out := &in.EmailConfiguration, &out.EmailConfiguration + *out = new(EmailConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.EmailVerificationMessage != nil { + in, out := &in.EmailVerificationMessage, &out.EmailVerificationMessage + *out = new(string) + **out = **in + } + if in.EmailVerificationSubject != nil { + in, out := &in.EmailVerificationSubject, &out.EmailVerificationSubject + *out = new(string) + **out = **in + } + if in.LambdaConfig != nil { + in, out := &in.LambdaConfig, &out.LambdaConfig + *out = new(LambdaConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.MfaConfiguration != nil { + in, out := &in.MfaConfiguration, &out.MfaConfiguration + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PasswordPolicy != nil { + in, out := &in.PasswordPolicy, &out.PasswordPolicy + *out = new(PasswordPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SMSAuthenticationMessage != nil { + in, out := &in.SMSAuthenticationMessage, &out.SMSAuthenticationMessage + *out = new(string) + **out = **in + } + if in.SMSConfiguration != nil { + in, out := &in.SMSConfiguration, &out.SMSConfiguration + *out = new(SMSConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.SMSVerificationMessage != nil { + in, out := &in.SMSVerificationMessage, &out.SMSVerificationMessage + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = make([]SchemaParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SoftwareTokenMfaConfiguration != nil { + in, out := &in.SoftwareTokenMfaConfiguration, &out.SoftwareTokenMfaConfiguration + *out = new(SoftwareTokenMfaConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserAttributeUpdateSettings != nil { + in, out := &in.UserAttributeUpdateSettings, &out.UserAttributeUpdateSettings + *out = new(UserAttributeUpdateSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.UserPoolAddOns != nil { + in, out := &in.UserPoolAddOns, &out.UserPoolAddOns + *out = new(UserPoolAddOnsParameters) + (*in).DeepCopyInto(*out) + } + if in.UsernameAttributes != nil { + in, out := &in.UsernameAttributes, &out.UsernameAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UsernameConfiguration != nil { + in, out := &in.UsernameConfiguration, &out.UsernameConfiguration + *out = new(UsernameConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.VerificationMessageTemplate != nil { + in, out := &in.VerificationMessageTemplate, &out.VerificationMessageTemplate + *out = new(VerificationMessageTemplateParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPoolParameters. +func (in *UserPoolParameters) DeepCopy() *UserPoolParameters { + if in == nil { + return nil + } + out := new(UserPoolParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPoolSpec) DeepCopyInto(out *UserPoolSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPoolSpec. +func (in *UserPoolSpec) DeepCopy() *UserPoolSpec { + if in == nil { + return nil + } + out := new(UserPoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPoolStatus) DeepCopyInto(out *UserPoolStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPoolStatus. +func (in *UserPoolStatus) DeepCopy() *UserPoolStatus { + if in == nil { + return nil + } + out := new(UserPoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsernameConfigurationInitParameters) DeepCopyInto(out *UsernameConfigurationInitParameters) { + *out = *in + if in.CaseSensitive != nil { + in, out := &in.CaseSensitive, &out.CaseSensitive + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsernameConfigurationInitParameters. +func (in *UsernameConfigurationInitParameters) DeepCopy() *UsernameConfigurationInitParameters { + if in == nil { + return nil + } + out := new(UsernameConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsernameConfigurationObservation) DeepCopyInto(out *UsernameConfigurationObservation) { + *out = *in + if in.CaseSensitive != nil { + in, out := &in.CaseSensitive, &out.CaseSensitive + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsernameConfigurationObservation. +func (in *UsernameConfigurationObservation) DeepCopy() *UsernameConfigurationObservation { + if in == nil { + return nil + } + out := new(UsernameConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsernameConfigurationParameters) DeepCopyInto(out *UsernameConfigurationParameters) { + *out = *in + if in.CaseSensitive != nil { + in, out := &in.CaseSensitive, &out.CaseSensitive + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsernameConfigurationParameters. +func (in *UsernameConfigurationParameters) DeepCopy() *UsernameConfigurationParameters { + if in == nil { + return nil + } + out := new(UsernameConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerificationMessageTemplateInitParameters) DeepCopyInto(out *VerificationMessageTemplateInitParameters) { + *out = *in + if in.DefaultEmailOption != nil { + in, out := &in.DefaultEmailOption, &out.DefaultEmailOption + *out = new(string) + **out = **in + } + if in.EmailMessage != nil { + in, out := &in.EmailMessage, &out.EmailMessage + *out = new(string) + **out = **in + } + if in.EmailMessageByLink != nil { + in, out := &in.EmailMessageByLink, &out.EmailMessageByLink + *out = new(string) + **out = **in + } + if in.EmailSubject != nil { + in, out := &in.EmailSubject, &out.EmailSubject + *out = new(string) + **out = **in + } + if in.EmailSubjectByLink != nil { + in, out := &in.EmailSubjectByLink, &out.EmailSubjectByLink + *out = new(string) + **out = **in + } + if in.SMSMessage != nil { + in, out := &in.SMSMessage, &out.SMSMessage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerificationMessageTemplateInitParameters. +func (in *VerificationMessageTemplateInitParameters) DeepCopy() *VerificationMessageTemplateInitParameters { + if in == nil { + return nil + } + out := new(VerificationMessageTemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerificationMessageTemplateObservation) DeepCopyInto(out *VerificationMessageTemplateObservation) { + *out = *in + if in.DefaultEmailOption != nil { + in, out := &in.DefaultEmailOption, &out.DefaultEmailOption + *out = new(string) + **out = **in + } + if in.EmailMessage != nil { + in, out := &in.EmailMessage, &out.EmailMessage + *out = new(string) + **out = **in + } + if in.EmailMessageByLink != nil { + in, out := &in.EmailMessageByLink, &out.EmailMessageByLink + *out = new(string) + **out = **in + } + if in.EmailSubject != nil { + in, out := &in.EmailSubject, &out.EmailSubject + *out = new(string) + **out = **in + } + if in.EmailSubjectByLink != nil { + in, out := &in.EmailSubjectByLink, &out.EmailSubjectByLink + *out = new(string) + **out = **in + } + if in.SMSMessage != nil { + in, out := &in.SMSMessage, &out.SMSMessage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerificationMessageTemplateObservation. +func (in *VerificationMessageTemplateObservation) DeepCopy() *VerificationMessageTemplateObservation { + if in == nil { + return nil + } + out := new(VerificationMessageTemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerificationMessageTemplateParameters) DeepCopyInto(out *VerificationMessageTemplateParameters) { + *out = *in + if in.DefaultEmailOption != nil { + in, out := &in.DefaultEmailOption, &out.DefaultEmailOption + *out = new(string) + **out = **in + } + if in.EmailMessage != nil { + in, out := &in.EmailMessage, &out.EmailMessage + *out = new(string) + **out = **in + } + if in.EmailMessageByLink != nil { + in, out := &in.EmailMessageByLink, &out.EmailMessageByLink + *out = new(string) + **out = **in + } + if in.EmailSubject != nil { + in, out := &in.EmailSubject, &out.EmailSubject + *out = new(string) + **out = **in + } + if in.EmailSubjectByLink != nil { + in, out := &in.EmailSubjectByLink, &out.EmailSubjectByLink + *out = new(string) + **out = **in + } + if in.SMSMessage != nil { + in, out := &in.SMSMessage, &out.SMSMessage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerificationMessageTemplateParameters. +func (in *VerificationMessageTemplateParameters) DeepCopy() *VerificationMessageTemplateParameters { + if in == nil { + return nil + } + out := new(VerificationMessageTemplateParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/cognitoidp/v1beta2/zz_generated.managed.go b/apis/cognitoidp/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..235bedae1e --- /dev/null +++ b/apis/cognitoidp/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this RiskConfiguration. +func (mg *RiskConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this RiskConfiguration. +func (mg *RiskConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this RiskConfiguration. +func (mg *RiskConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this RiskConfiguration. +func (mg *RiskConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this RiskConfiguration. +func (mg *RiskConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this RiskConfiguration. +func (mg *RiskConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this RiskConfiguration. +func (mg *RiskConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this RiskConfiguration. +func (mg *RiskConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this RiskConfiguration. +func (mg *RiskConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this RiskConfiguration. +func (mg *RiskConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this RiskConfiguration. +func (mg *RiskConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this RiskConfiguration. +func (mg *RiskConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this UserPool. +func (mg *UserPool) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this UserPool. +func (mg *UserPool) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this UserPool. +func (mg *UserPool) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this UserPool. +func (mg *UserPool) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this UserPool. +func (mg *UserPool) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this UserPool. +func (mg *UserPool) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this UserPool. +func (mg *UserPool) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this UserPool. +func (mg *UserPool) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this UserPool. +func (mg *UserPool) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this UserPool. +func (mg *UserPool) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this UserPool. +func (mg *UserPool) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this UserPool. +func (mg *UserPool) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/cognitoidp/v1beta2/zz_generated.managedlist.go b/apis/cognitoidp/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..a836b9f81f --- /dev/null +++ b/apis/cognitoidp/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this RiskConfigurationList. +func (l *RiskConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this UserPoolList. +func (l *UserPoolList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/cognitoidp/v1beta2/zz_generated.resolvers.go b/apis/cognitoidp/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..e17948dc8c --- /dev/null +++ b/apis/cognitoidp/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,678 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *RiskConfiguration) ResolveReferences( // ResolveReferences of this RiskConfiguration. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.UserPoolID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.UserPoolIDRef, + Selector: mg.Spec.ForProvider.UserPoolIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.UserPoolID") + } + mg.Spec.ForProvider.UserPoolID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.UserPoolIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.UserPoolID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.UserPoolIDRef, + Selector: mg.Spec.InitProvider.UserPoolIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.UserPoolID") + } + mg.Spec.InitProvider.UserPoolID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.UserPoolIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this UserPool. +func (mg *UserPool) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LambdaConfig.CreateAuthChallenge), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.LambdaConfig.CreateAuthChallengeRef, + Selector: mg.Spec.ForProvider.LambdaConfig.CreateAuthChallengeSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LambdaConfig.CreateAuthChallenge") + } + mg.Spec.ForProvider.LambdaConfig.CreateAuthChallenge = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LambdaConfig.CreateAuthChallengeRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.LambdaConfig != nil { + if mg.Spec.ForProvider.LambdaConfig.CustomEmailSender != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LambdaConfig.CustomEmailSender.LambdaArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.LambdaConfig.CustomEmailSender.LambdaArnRef, + Selector: mg.Spec.ForProvider.LambdaConfig.CustomEmailSender.LambdaArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LambdaConfig.CustomEmailSender.LambdaArn") + } + mg.Spec.ForProvider.LambdaConfig.CustomEmailSender.LambdaArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LambdaConfig.CustomEmailSender.LambdaArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LambdaConfig.CustomMessage), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.LambdaConfig.CustomMessageRef, + Selector: mg.Spec.ForProvider.LambdaConfig.CustomMessageSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LambdaConfig.CustomMessage") + } + mg.Spec.ForProvider.LambdaConfig.CustomMessage = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LambdaConfig.CustomMessageRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.LambdaConfig != nil { + if mg.Spec.ForProvider.LambdaConfig.CustomSMSSender != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LambdaConfig.CustomSMSSender.LambdaArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.LambdaConfig.CustomSMSSender.LambdaArnRef, + Selector: mg.Spec.ForProvider.LambdaConfig.CustomSMSSender.LambdaArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LambdaConfig.CustomSMSSender.LambdaArn") + } + mg.Spec.ForProvider.LambdaConfig.CustomSMSSender.LambdaArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LambdaConfig.CustomSMSSender.LambdaArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LambdaConfig.DefineAuthChallenge), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.LambdaConfig.DefineAuthChallengeRef, + Selector: mg.Spec.ForProvider.LambdaConfig.DefineAuthChallengeSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LambdaConfig.DefineAuthChallenge") + } + mg.Spec.ForProvider.LambdaConfig.DefineAuthChallenge = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LambdaConfig.DefineAuthChallengeRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LambdaConfig.KMSKeyID), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.LambdaConfig.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.LambdaConfig.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LambdaConfig.KMSKeyID") + } + mg.Spec.ForProvider.LambdaConfig.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LambdaConfig.KMSKeyIDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LambdaConfig.PostAuthentication), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.LambdaConfig.PostAuthenticationRef, + Selector: mg.Spec.ForProvider.LambdaConfig.PostAuthenticationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LambdaConfig.PostAuthentication") + } + mg.Spec.ForProvider.LambdaConfig.PostAuthentication = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LambdaConfig.PostAuthenticationRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LambdaConfig.PostConfirmation), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.LambdaConfig.PostConfirmationRef, + Selector: mg.Spec.ForProvider.LambdaConfig.PostConfirmationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LambdaConfig.PostConfirmation") + } + mg.Spec.ForProvider.LambdaConfig.PostConfirmation = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LambdaConfig.PostConfirmationRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LambdaConfig.PreAuthentication), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.LambdaConfig.PreAuthenticationRef, + Selector: mg.Spec.ForProvider.LambdaConfig.PreAuthenticationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LambdaConfig.PreAuthentication") + } + mg.Spec.ForProvider.LambdaConfig.PreAuthentication = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LambdaConfig.PreAuthenticationRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LambdaConfig.PreSignUp), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.LambdaConfig.PreSignUpRef, + Selector: mg.Spec.ForProvider.LambdaConfig.PreSignUpSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LambdaConfig.PreSignUp") + } + mg.Spec.ForProvider.LambdaConfig.PreSignUp = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LambdaConfig.PreSignUpRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LambdaConfig.PreTokenGeneration), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.LambdaConfig.PreTokenGenerationRef, + Selector: mg.Spec.ForProvider.LambdaConfig.PreTokenGenerationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LambdaConfig.PreTokenGeneration") + } + mg.Spec.ForProvider.LambdaConfig.PreTokenGeneration = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LambdaConfig.PreTokenGenerationRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LambdaConfig.UserMigration), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.LambdaConfig.UserMigrationRef, + Selector: mg.Spec.ForProvider.LambdaConfig.UserMigrationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LambdaConfig.UserMigration") + } + mg.Spec.ForProvider.LambdaConfig.UserMigration = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LambdaConfig.UserMigrationRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LambdaConfig.VerifyAuthChallengeResponse), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.LambdaConfig.VerifyAuthChallengeResponseRef, + Selector: mg.Spec.ForProvider.LambdaConfig.VerifyAuthChallengeResponseSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LambdaConfig.VerifyAuthChallengeResponse") + } + mg.Spec.ForProvider.LambdaConfig.VerifyAuthChallengeResponse = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LambdaConfig.VerifyAuthChallengeResponseRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.SMSConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SMSConfiguration.SnsCallerArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.SMSConfiguration.SnsCallerArnRef, + Selector: mg.Spec.ForProvider.SMSConfiguration.SnsCallerArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SMSConfiguration.SnsCallerArn") + } + mg.Spec.ForProvider.SMSConfiguration.SnsCallerArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SMSConfiguration.SnsCallerArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LambdaConfig.CreateAuthChallenge), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.LambdaConfig.CreateAuthChallengeRef, + Selector: mg.Spec.InitProvider.LambdaConfig.CreateAuthChallengeSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LambdaConfig.CreateAuthChallenge") + } + mg.Spec.InitProvider.LambdaConfig.CreateAuthChallenge = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LambdaConfig.CreateAuthChallengeRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LambdaConfig != nil { + if mg.Spec.InitProvider.LambdaConfig.CustomEmailSender != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LambdaConfig.CustomEmailSender.LambdaArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.LambdaConfig.CustomEmailSender.LambdaArnRef, + Selector: mg.Spec.InitProvider.LambdaConfig.CustomEmailSender.LambdaArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LambdaConfig.CustomEmailSender.LambdaArn") + } + mg.Spec.InitProvider.LambdaConfig.CustomEmailSender.LambdaArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LambdaConfig.CustomEmailSender.LambdaArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LambdaConfig.CustomMessage), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.LambdaConfig.CustomMessageRef, + Selector: mg.Spec.InitProvider.LambdaConfig.CustomMessageSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LambdaConfig.CustomMessage") + } + mg.Spec.InitProvider.LambdaConfig.CustomMessage = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LambdaConfig.CustomMessageRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LambdaConfig != nil { + if mg.Spec.InitProvider.LambdaConfig.CustomSMSSender != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LambdaConfig.CustomSMSSender.LambdaArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.LambdaConfig.CustomSMSSender.LambdaArnRef, + Selector: mg.Spec.InitProvider.LambdaConfig.CustomSMSSender.LambdaArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LambdaConfig.CustomSMSSender.LambdaArn") + } + mg.Spec.InitProvider.LambdaConfig.CustomSMSSender.LambdaArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LambdaConfig.CustomSMSSender.LambdaArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LambdaConfig.DefineAuthChallenge), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.LambdaConfig.DefineAuthChallengeRef, + Selector: mg.Spec.InitProvider.LambdaConfig.DefineAuthChallengeSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LambdaConfig.DefineAuthChallenge") + } + mg.Spec.InitProvider.LambdaConfig.DefineAuthChallenge = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LambdaConfig.DefineAuthChallengeRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LambdaConfig.KMSKeyID), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.LambdaConfig.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.LambdaConfig.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LambdaConfig.KMSKeyID") + } + mg.Spec.InitProvider.LambdaConfig.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LambdaConfig.KMSKeyIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LambdaConfig.PostAuthentication), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.LambdaConfig.PostAuthenticationRef, + Selector: mg.Spec.InitProvider.LambdaConfig.PostAuthenticationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LambdaConfig.PostAuthentication") + } + mg.Spec.InitProvider.LambdaConfig.PostAuthentication = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LambdaConfig.PostAuthenticationRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LambdaConfig.PostConfirmation), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.LambdaConfig.PostConfirmationRef, + Selector: mg.Spec.InitProvider.LambdaConfig.PostConfirmationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LambdaConfig.PostConfirmation") + } + mg.Spec.InitProvider.LambdaConfig.PostConfirmation = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LambdaConfig.PostConfirmationRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LambdaConfig.PreAuthentication), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.LambdaConfig.PreAuthenticationRef, + Selector: mg.Spec.InitProvider.LambdaConfig.PreAuthenticationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LambdaConfig.PreAuthentication") + } + mg.Spec.InitProvider.LambdaConfig.PreAuthentication = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LambdaConfig.PreAuthenticationRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LambdaConfig.PreSignUp), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.LambdaConfig.PreSignUpRef, + Selector: mg.Spec.InitProvider.LambdaConfig.PreSignUpSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LambdaConfig.PreSignUp") + } + mg.Spec.InitProvider.LambdaConfig.PreSignUp = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LambdaConfig.PreSignUpRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LambdaConfig.PreTokenGeneration), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.LambdaConfig.PreTokenGenerationRef, + Selector: mg.Spec.InitProvider.LambdaConfig.PreTokenGenerationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LambdaConfig.PreTokenGeneration") + } + mg.Spec.InitProvider.LambdaConfig.PreTokenGeneration = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LambdaConfig.PreTokenGenerationRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LambdaConfig.UserMigration), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.LambdaConfig.UserMigrationRef, + Selector: mg.Spec.InitProvider.LambdaConfig.UserMigrationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LambdaConfig.UserMigration") + } + mg.Spec.InitProvider.LambdaConfig.UserMigration = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LambdaConfig.UserMigrationRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LambdaConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LambdaConfig.VerifyAuthChallengeResponse), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.LambdaConfig.VerifyAuthChallengeResponseRef, + Selector: mg.Spec.InitProvider.LambdaConfig.VerifyAuthChallengeResponseSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LambdaConfig.VerifyAuthChallengeResponse") + } + mg.Spec.InitProvider.LambdaConfig.VerifyAuthChallengeResponse = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LambdaConfig.VerifyAuthChallengeResponseRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.SMSConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SMSConfiguration.SnsCallerArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.SMSConfiguration.SnsCallerArnRef, + Selector: mg.Spec.InitProvider.SMSConfiguration.SnsCallerArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SMSConfiguration.SnsCallerArn") + } + mg.Spec.InitProvider.SMSConfiguration.SnsCallerArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SMSConfiguration.SnsCallerArnRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/cognitoidp/v1beta2/zz_groupversion_info.go b/apis/cognitoidp/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..0d092a0d89 --- /dev/null +++ b/apis/cognitoidp/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=cognitoidp.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "cognitoidp.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/cognitoidp/v1beta2/zz_riskconfiguration_terraformed.go b/apis/cognitoidp/v1beta2/zz_riskconfiguration_terraformed.go new file mode 100755 index 0000000000..12e9b5769d --- /dev/null +++ b/apis/cognitoidp/v1beta2/zz_riskconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this RiskConfiguration +func (mg *RiskConfiguration) GetTerraformResourceType() string { + return "aws_cognito_risk_configuration" +} + +// GetConnectionDetailsMapping for this RiskConfiguration +func (tr *RiskConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this RiskConfiguration +func (tr *RiskConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this RiskConfiguration +func (tr *RiskConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this RiskConfiguration +func (tr *RiskConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this RiskConfiguration +func (tr *RiskConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this RiskConfiguration +func (tr *RiskConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this RiskConfiguration +func (tr *RiskConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this RiskConfiguration +func (tr *RiskConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this RiskConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *RiskConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &RiskConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *RiskConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cognitoidp/v1beta2/zz_riskconfiguration_types.go b/apis/cognitoidp/v1beta2/zz_riskconfiguration_types.go new file mode 100755 index 0000000000..79ccef815d --- /dev/null +++ b/apis/cognitoidp/v1beta2/zz_riskconfiguration_types.go @@ -0,0 +1,600 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccountTakeoverRiskConfigurationInitParameters struct { + + // Account takeover risk configuration actions. See details below. + Actions *ActionsInitParameters `json:"actions,omitempty" tf:"actions,omitempty"` + + // The notify configuration used to construct email notifications. See details below. + NotifyConfiguration *NotifyConfigurationInitParameters `json:"notifyConfiguration,omitempty" tf:"notify_configuration,omitempty"` +} + +type AccountTakeoverRiskConfigurationObservation struct { + + // Account takeover risk configuration actions. See details below. + Actions *ActionsObservation `json:"actions,omitempty" tf:"actions,omitempty"` + + // The notify configuration used to construct email notifications. See details below. + NotifyConfiguration *NotifyConfigurationObservation `json:"notifyConfiguration,omitempty" tf:"notify_configuration,omitempty"` +} + +type AccountTakeoverRiskConfigurationParameters struct { + + // Account takeover risk configuration actions. See details below. + // +kubebuilder:validation:Optional + Actions *ActionsParameters `json:"actions" tf:"actions,omitempty"` + + // The notify configuration used to construct email notifications. See details below. + // +kubebuilder:validation:Optional + NotifyConfiguration *NotifyConfigurationParameters `json:"notifyConfiguration" tf:"notify_configuration,omitempty"` +} + +type ActionsInitParameters struct { + + // Action to take for a high risk. See action block below. + HighAction *HighActionInitParameters `json:"highAction,omitempty" tf:"high_action,omitempty"` + + // Action to take for a low risk. See action block below. + LowAction *LowActionInitParameters `json:"lowAction,omitempty" tf:"low_action,omitempty"` + + // Action to take for a medium risk. See action block below. + MediumAction *MediumActionInitParameters `json:"mediumAction,omitempty" tf:"medium_action,omitempty"` +} + +type ActionsObservation struct { + + // Action to take for a high risk. See action block below. + HighAction *HighActionObservation `json:"highAction,omitempty" tf:"high_action,omitempty"` + + // Action to take for a low risk. See action block below. + LowAction *LowActionObservation `json:"lowAction,omitempty" tf:"low_action,omitempty"` + + // Action to take for a medium risk. See action block below. + MediumAction *MediumActionObservation `json:"mediumAction,omitempty" tf:"medium_action,omitempty"` +} + +type ActionsParameters struct { + + // Action to take for a high risk. See action block below. + // +kubebuilder:validation:Optional + HighAction *HighActionParameters `json:"highAction,omitempty" tf:"high_action,omitempty"` + + // Action to take for a low risk. See action block below. + // +kubebuilder:validation:Optional + LowAction *LowActionParameters `json:"lowAction,omitempty" tf:"low_action,omitempty"` + + // Action to take for a medium risk. See action block below. + // +kubebuilder:validation:Optional + MediumAction *MediumActionParameters `json:"mediumAction,omitempty" tf:"medium_action,omitempty"` +} + +type BlockEmailInitParameters struct { + + // The email HTML body. + HTMLBody *string `json:"htmlBody,omitempty" tf:"html_body,omitempty"` + + // The email subject. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // The email text body. + TextBody *string `json:"textBody,omitempty" tf:"text_body,omitempty"` +} + +type BlockEmailObservation struct { + + // The email HTML body. + HTMLBody *string `json:"htmlBody,omitempty" tf:"html_body,omitempty"` + + // The email subject. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // The email text body. + TextBody *string `json:"textBody,omitempty" tf:"text_body,omitempty"` +} + +type BlockEmailParameters struct { + + // The email HTML body. + // +kubebuilder:validation:Optional + HTMLBody *string `json:"htmlBody" tf:"html_body,omitempty"` + + // The email subject. + // +kubebuilder:validation:Optional + Subject *string `json:"subject" tf:"subject,omitempty"` + + // The email text body. + // +kubebuilder:validation:Optional + TextBody *string `json:"textBody" tf:"text_body,omitempty"` +} + +type CompromisedCredentialsRiskConfigurationActionsInitParameters struct { + + // The action to take in response to the account takeover action. Valid values are BLOCK, MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + EventAction *string `json:"eventAction,omitempty" tf:"event_action,omitempty"` +} + +type CompromisedCredentialsRiskConfigurationActionsObservation struct { + + // The action to take in response to the account takeover action. Valid values are BLOCK, MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + EventAction *string `json:"eventAction,omitempty" tf:"event_action,omitempty"` +} + +type CompromisedCredentialsRiskConfigurationActionsParameters struct { + + // The action to take in response to the account takeover action. Valid values are BLOCK, MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + // +kubebuilder:validation:Optional + EventAction *string `json:"eventAction" tf:"event_action,omitempty"` +} + +type CompromisedCredentialsRiskConfigurationInitParameters struct { + + // The compromised credentials risk configuration actions. See details below. + Actions *CompromisedCredentialsRiskConfigurationActionsInitParameters `json:"actions,omitempty" tf:"actions,omitempty"` + + // Perform the action for these events. The default is to perform all events if no event filter is specified. Valid values are SIGN_IN, PASSWORD_CHANGE, and SIGN_UP. + // +listType=set + EventFilter []*string `json:"eventFilter,omitempty" tf:"event_filter,omitempty"` +} + +type CompromisedCredentialsRiskConfigurationObservation struct { + + // The compromised credentials risk configuration actions. See details below. + Actions *CompromisedCredentialsRiskConfigurationActionsObservation `json:"actions,omitempty" tf:"actions,omitempty"` + + // Perform the action for these events. The default is to perform all events if no event filter is specified. Valid values are SIGN_IN, PASSWORD_CHANGE, and SIGN_UP. + // +listType=set + EventFilter []*string `json:"eventFilter,omitempty" tf:"event_filter,omitempty"` +} + +type CompromisedCredentialsRiskConfigurationParameters struct { + + // The compromised credentials risk configuration actions. See details below. + // +kubebuilder:validation:Optional + Actions *CompromisedCredentialsRiskConfigurationActionsParameters `json:"actions" tf:"actions,omitempty"` + + // Perform the action for these events. The default is to perform all events if no event filter is specified. Valid values are SIGN_IN, PASSWORD_CHANGE, and SIGN_UP. + // +kubebuilder:validation:Optional + // +listType=set + EventFilter []*string `json:"eventFilter,omitempty" tf:"event_filter,omitempty"` +} + +type HighActionInitParameters struct { + + // The action to take in response to the account takeover action. Valid values are BLOCK, MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + EventAction *string `json:"eventAction,omitempty" tf:"event_action,omitempty"` + + // Whether to send a notification. + Notify *bool `json:"notify,omitempty" tf:"notify,omitempty"` +} + +type HighActionObservation struct { + + // The action to take in response to the account takeover action. Valid values are BLOCK, MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + EventAction *string `json:"eventAction,omitempty" tf:"event_action,omitempty"` + + // Whether to send a notification. + Notify *bool `json:"notify,omitempty" tf:"notify,omitempty"` +} + +type HighActionParameters struct { + + // The action to take in response to the account takeover action. Valid values are BLOCK, MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + // +kubebuilder:validation:Optional + EventAction *string `json:"eventAction" tf:"event_action,omitempty"` + + // Whether to send a notification. + // +kubebuilder:validation:Optional + Notify *bool `json:"notify" tf:"notify,omitempty"` +} + +type LowActionInitParameters struct { + + // The action to take in response to the account takeover action. Valid values are BLOCK, MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + EventAction *string `json:"eventAction,omitempty" tf:"event_action,omitempty"` + + // Whether to send a notification. + Notify *bool `json:"notify,omitempty" tf:"notify,omitempty"` +} + +type LowActionObservation struct { + + // The action to take in response to the account takeover action. Valid values are BLOCK, MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + EventAction *string `json:"eventAction,omitempty" tf:"event_action,omitempty"` + + // Whether to send a notification. + Notify *bool `json:"notify,omitempty" tf:"notify,omitempty"` +} + +type LowActionParameters struct { + + // The action to take in response to the account takeover action. Valid values are BLOCK, MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + // +kubebuilder:validation:Optional + EventAction *string `json:"eventAction" tf:"event_action,omitempty"` + + // Whether to send a notification. + // +kubebuilder:validation:Optional + Notify *bool `json:"notify" tf:"notify,omitempty"` +} + +type MediumActionInitParameters struct { + + // The action to take in response to the account takeover action. Valid values are BLOCK, MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + EventAction *string `json:"eventAction,omitempty" tf:"event_action,omitempty"` + + // Whether to send a notification. + Notify *bool `json:"notify,omitempty" tf:"notify,omitempty"` +} + +type MediumActionObservation struct { + + // The action to take in response to the account takeover action. Valid values are BLOCK, MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + EventAction *string `json:"eventAction,omitempty" tf:"event_action,omitempty"` + + // Whether to send a notification. + Notify *bool `json:"notify,omitempty" tf:"notify,omitempty"` +} + +type MediumActionParameters struct { + + // The action to take in response to the account takeover action. Valid values are BLOCK, MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + // +kubebuilder:validation:Optional + EventAction *string `json:"eventAction" tf:"event_action,omitempty"` + + // Whether to send a notification. + // +kubebuilder:validation:Optional + Notify *bool `json:"notify" tf:"notify,omitempty"` +} + +type MfaEmailInitParameters struct { + + // The email HTML body. + HTMLBody *string `json:"htmlBody,omitempty" tf:"html_body,omitempty"` + + // The email subject. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // The email text body. + TextBody *string `json:"textBody,omitempty" tf:"text_body,omitempty"` +} + +type MfaEmailObservation struct { + + // The email HTML body. + HTMLBody *string `json:"htmlBody,omitempty" tf:"html_body,omitempty"` + + // The email subject. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // The email text body. + TextBody *string `json:"textBody,omitempty" tf:"text_body,omitempty"` +} + +type MfaEmailParameters struct { + + // The email HTML body. + // +kubebuilder:validation:Optional + HTMLBody *string `json:"htmlBody" tf:"html_body,omitempty"` + + // The email subject. + // +kubebuilder:validation:Optional + Subject *string `json:"subject" tf:"subject,omitempty"` + + // The email text body. + // +kubebuilder:validation:Optional + TextBody *string `json:"textBody" tf:"text_body,omitempty"` +} + +type NoActionEmailInitParameters struct { + + // The email HTML body. + HTMLBody *string `json:"htmlBody,omitempty" tf:"html_body,omitempty"` + + // The email subject. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // The email text body. + TextBody *string `json:"textBody,omitempty" tf:"text_body,omitempty"` +} + +type NoActionEmailObservation struct { + + // The email HTML body. + HTMLBody *string `json:"htmlBody,omitempty" tf:"html_body,omitempty"` + + // The email subject. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // The email text body. + TextBody *string `json:"textBody,omitempty" tf:"text_body,omitempty"` +} + +type NoActionEmailParameters struct { + + // The email HTML body. + // +kubebuilder:validation:Optional + HTMLBody *string `json:"htmlBody" tf:"html_body,omitempty"` + + // The email subject. + // +kubebuilder:validation:Optional + Subject *string `json:"subject" tf:"subject,omitempty"` + + // The email text body. + // +kubebuilder:validation:Optional + TextBody *string `json:"textBody" tf:"text_body,omitempty"` +} + +type NotifyConfigurationInitParameters struct { + + // Email template used when a detected risk event is blocked. See notify email type below. + BlockEmail *BlockEmailInitParameters `json:"blockEmail,omitempty" tf:"block_email,omitempty"` + + // The email address that is sending the email. The address must be either individually verified with Amazon Simple Email Service, or from a domain that has been verified with Amazon SES. + From *string `json:"from,omitempty" tf:"from,omitempty"` + + // The multi-factor authentication (MFA) email template used when MFA is challenged as part of a detected risk. See notify email type below. + MfaEmail *MfaEmailInitParameters `json:"mfaEmail,omitempty" tf:"mfa_email,omitempty"` + + // The email template used when a detected risk event is allowed. See notify email type below. + NoActionEmail *NoActionEmailInitParameters `json:"noActionEmail,omitempty" tf:"no_action_email,omitempty"` + + // The destination to which the receiver of an email should reply to. + ReplyTo *string `json:"replyTo,omitempty" tf:"reply_to,omitempty"` + + // The Amazon Resource Name (ARN) of the identity that is associated with the sending authorization policy. This identity permits Amazon Cognito to send for the email address specified in the From parameter. + SourceArn *string `json:"sourceArn,omitempty" tf:"source_arn,omitempty"` +} + +type NotifyConfigurationObservation struct { + + // Email template used when a detected risk event is blocked. See notify email type below. + BlockEmail *BlockEmailObservation `json:"blockEmail,omitempty" tf:"block_email,omitempty"` + + // The email address that is sending the email. The address must be either individually verified with Amazon Simple Email Service, or from a domain that has been verified with Amazon SES. + From *string `json:"from,omitempty" tf:"from,omitempty"` + + // The multi-factor authentication (MFA) email template used when MFA is challenged as part of a detected risk. See notify email type below. + MfaEmail *MfaEmailObservation `json:"mfaEmail,omitempty" tf:"mfa_email,omitempty"` + + // The email template used when a detected risk event is allowed. See notify email type below. + NoActionEmail *NoActionEmailObservation `json:"noActionEmail,omitempty" tf:"no_action_email,omitempty"` + + // The destination to which the receiver of an email should reply to. + ReplyTo *string `json:"replyTo,omitempty" tf:"reply_to,omitempty"` + + // The Amazon Resource Name (ARN) of the identity that is associated with the sending authorization policy. This identity permits Amazon Cognito to send for the email address specified in the From parameter. + SourceArn *string `json:"sourceArn,omitempty" tf:"source_arn,omitempty"` +} + +type NotifyConfigurationParameters struct { + + // Email template used when a detected risk event is blocked. See notify email type below. + // +kubebuilder:validation:Optional + BlockEmail *BlockEmailParameters `json:"blockEmail,omitempty" tf:"block_email,omitempty"` + + // The email address that is sending the email. The address must be either individually verified with Amazon Simple Email Service, or from a domain that has been verified with Amazon SES. + // +kubebuilder:validation:Optional + From *string `json:"from,omitempty" tf:"from,omitempty"` + + // The multi-factor authentication (MFA) email template used when MFA is challenged as part of a detected risk. See notify email type below. + // +kubebuilder:validation:Optional + MfaEmail *MfaEmailParameters `json:"mfaEmail,omitempty" tf:"mfa_email,omitempty"` + + // The email template used when a detected risk event is allowed. See notify email type below. + // +kubebuilder:validation:Optional + NoActionEmail *NoActionEmailParameters `json:"noActionEmail,omitempty" tf:"no_action_email,omitempty"` + + // The destination to which the receiver of an email should reply to. + // +kubebuilder:validation:Optional + ReplyTo *string `json:"replyTo,omitempty" tf:"reply_to,omitempty"` + + // The Amazon Resource Name (ARN) of the identity that is associated with the sending authorization policy. This identity permits Amazon Cognito to send for the email address specified in the From parameter. + // +kubebuilder:validation:Optional + SourceArn *string `json:"sourceArn" tf:"source_arn,omitempty"` +} + +type RiskConfigurationInitParameters struct { + + // The account takeover risk configuration. See details below. + AccountTakeoverRiskConfiguration *AccountTakeoverRiskConfigurationInitParameters `json:"accountTakeoverRiskConfiguration,omitempty" tf:"account_takeover_risk_configuration,omitempty"` + + // The app client ID. When the client ID is not provided, the same risk configuration is applied to all the clients in the User Pool. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The compromised credentials risk configuration. See details below. + CompromisedCredentialsRiskConfiguration *CompromisedCredentialsRiskConfigurationInitParameters `json:"compromisedCredentialsRiskConfiguration,omitempty" tf:"compromised_credentials_risk_configuration,omitempty"` + + // The configuration to override the risk decision. See details below. + RiskExceptionConfiguration *RiskExceptionConfigurationInitParameters `json:"riskExceptionConfiguration,omitempty" tf:"risk_exception_configuration,omitempty"` + + // The user pool ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` + + // Reference to a UserPool in cognitoidp to populate userPoolId. + // +kubebuilder:validation:Optional + UserPoolIDRef *v1.Reference `json:"userPoolIdRef,omitempty" tf:"-"` + + // Selector for a UserPool in cognitoidp to populate userPoolId. + // +kubebuilder:validation:Optional + UserPoolIDSelector *v1.Selector `json:"userPoolIdSelector,omitempty" tf:"-"` +} + +type RiskConfigurationObservation struct { + + // The account takeover risk configuration. See details below. + AccountTakeoverRiskConfiguration *AccountTakeoverRiskConfigurationObservation `json:"accountTakeoverRiskConfiguration,omitempty" tf:"account_takeover_risk_configuration,omitempty"` + + // The app client ID. When the client ID is not provided, the same risk configuration is applied to all the clients in the User Pool. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The compromised credentials risk configuration. See details below. + CompromisedCredentialsRiskConfiguration *CompromisedCredentialsRiskConfigurationObservation `json:"compromisedCredentialsRiskConfiguration,omitempty" tf:"compromised_credentials_risk_configuration,omitempty"` + + // The user pool ID or the user pool ID and Client Id separated by a : if the configuration is client specific. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The configuration to override the risk decision. See details below. + RiskExceptionConfiguration *RiskExceptionConfigurationObservation `json:"riskExceptionConfiguration,omitempty" tf:"risk_exception_configuration,omitempty"` + + // The user pool ID. + UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` +} + +type RiskConfigurationParameters struct { + + // The account takeover risk configuration. See details below. + // +kubebuilder:validation:Optional + AccountTakeoverRiskConfiguration *AccountTakeoverRiskConfigurationParameters `json:"accountTakeoverRiskConfiguration,omitempty" tf:"account_takeover_risk_configuration,omitempty"` + + // The app client ID. When the client ID is not provided, the same risk configuration is applied to all the clients in the User Pool. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The compromised credentials risk configuration. See details below. + // +kubebuilder:validation:Optional + CompromisedCredentialsRiskConfiguration *CompromisedCredentialsRiskConfigurationParameters `json:"compromisedCredentialsRiskConfiguration,omitempty" tf:"compromised_credentials_risk_configuration,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The configuration to override the risk decision. See details below. + // +kubebuilder:validation:Optional + RiskExceptionConfiguration *RiskExceptionConfigurationParameters `json:"riskExceptionConfiguration,omitempty" tf:"risk_exception_configuration,omitempty"` + + // The user pool ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` + + // Reference to a UserPool in cognitoidp to populate userPoolId. + // +kubebuilder:validation:Optional + UserPoolIDRef *v1.Reference `json:"userPoolIdRef,omitempty" tf:"-"` + + // Selector for a UserPool in cognitoidp to populate userPoolId. + // +kubebuilder:validation:Optional + UserPoolIDSelector *v1.Selector `json:"userPoolIdSelector,omitempty" tf:"-"` +} + +type RiskExceptionConfigurationInitParameters struct { + + // Overrides the risk decision to always block the pre-authentication requests. + // The IP range is in CIDR notation, a compact representation of an IP address and its routing prefix. + // Can contain a maximum of 200 items. + // +listType=set + BlockedIPRangeList []*string `json:"blockedIpRangeList,omitempty" tf:"blocked_ip_range_list,omitempty"` + + // Risk detection isn't performed on the IP addresses in this range list. + // The IP range is in CIDR notation. + // Can contain a maximum of 200 items. + // +listType=set + SkippedIPRangeList []*string `json:"skippedIpRangeList,omitempty" tf:"skipped_ip_range_list,omitempty"` +} + +type RiskExceptionConfigurationObservation struct { + + // Overrides the risk decision to always block the pre-authentication requests. + // The IP range is in CIDR notation, a compact representation of an IP address and its routing prefix. + // Can contain a maximum of 200 items. + // +listType=set + BlockedIPRangeList []*string `json:"blockedIpRangeList,omitempty" tf:"blocked_ip_range_list,omitempty"` + + // Risk detection isn't performed on the IP addresses in this range list. + // The IP range is in CIDR notation. + // Can contain a maximum of 200 items. + // +listType=set + SkippedIPRangeList []*string `json:"skippedIpRangeList,omitempty" tf:"skipped_ip_range_list,omitempty"` +} + +type RiskExceptionConfigurationParameters struct { + + // Overrides the risk decision to always block the pre-authentication requests. + // The IP range is in CIDR notation, a compact representation of an IP address and its routing prefix. + // Can contain a maximum of 200 items. + // +kubebuilder:validation:Optional + // +listType=set + BlockedIPRangeList []*string `json:"blockedIpRangeList,omitempty" tf:"blocked_ip_range_list,omitempty"` + + // Risk detection isn't performed on the IP addresses in this range list. + // The IP range is in CIDR notation. + // Can contain a maximum of 200 items. + // +kubebuilder:validation:Optional + // +listType=set + SkippedIPRangeList []*string `json:"skippedIpRangeList,omitempty" tf:"skipped_ip_range_list,omitempty"` +} + +// RiskConfigurationSpec defines the desired state of RiskConfiguration +type RiskConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RiskConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RiskConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// RiskConfigurationStatus defines the observed state of RiskConfiguration. +type RiskConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RiskConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// RiskConfiguration is the Schema for the RiskConfigurations API. Provides a Cognito Risk Configuration resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type RiskConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec RiskConfigurationSpec `json:"spec"` + Status RiskConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RiskConfigurationList contains a list of RiskConfigurations +type RiskConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RiskConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + RiskConfiguration_Kind = "RiskConfiguration" + RiskConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: RiskConfiguration_Kind}.String() + RiskConfiguration_KindAPIVersion = RiskConfiguration_Kind + "." + CRDGroupVersion.String() + RiskConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(RiskConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&RiskConfiguration{}, &RiskConfigurationList{}) +} diff --git a/apis/cognitoidp/v1beta2/zz_userpool_terraformed.go b/apis/cognitoidp/v1beta2/zz_userpool_terraformed.go new file mode 100755 index 0000000000..670a11d28e --- /dev/null +++ b/apis/cognitoidp/v1beta2/zz_userpool_terraformed.go @@ -0,0 +1,133 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this UserPool +func (mg *UserPool) GetTerraformResourceType() string { + return "aws_cognito_user_pool" +} + +// GetConnectionDetailsMapping for this UserPool +func (tr *UserPool) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this UserPool +func (tr *UserPool) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this UserPool +func (tr *UserPool) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this UserPool +func (tr *UserPool) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this UserPool +func (tr *UserPool) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this UserPool +func (tr *UserPool) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this UserPool +func (tr *UserPool) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this UserPool +func (tr *UserPool) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this UserPool using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *UserPool) LateInitialize(attrs []byte) (bool, error) { + params := &UserPoolParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("EmailVerificationMessage")) + opts = append(opts, resource.WithNameFilter("EmailVerificationSubject")) + opts = append(opts, resource.WithNameFilter("SMSVerificationMessage")) + opts = append(opts, resource.WithNameFilter("VerificationMessageTemplate")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *UserPool) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cognitoidp/v1beta2/zz_userpool_types.go b/apis/cognitoidp/v1beta2/zz_userpool_types.go new file mode 100755 index 0000000000..88c48cebdc --- /dev/null +++ b/apis/cognitoidp/v1beta2/zz_userpool_types.go @@ -0,0 +1,1482 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccountRecoverySettingInitParameters struct { + + // List of Account Recovery Options of the following structure: + RecoveryMechanism []RecoveryMechanismInitParameters `json:"recoveryMechanism,omitempty" tf:"recovery_mechanism,omitempty"` +} + +type AccountRecoverySettingObservation struct { + + // List of Account Recovery Options of the following structure: + RecoveryMechanism []RecoveryMechanismObservation `json:"recoveryMechanism,omitempty" tf:"recovery_mechanism,omitempty"` +} + +type AccountRecoverySettingParameters struct { + + // List of Account Recovery Options of the following structure: + // +kubebuilder:validation:Optional + RecoveryMechanism []RecoveryMechanismParameters `json:"recoveryMechanism,omitempty" tf:"recovery_mechanism,omitempty"` +} + +type AdminCreateUserConfigInitParameters struct { + + // Set to True if only the administrator is allowed to create user profiles. Set to False if users can sign themselves up via an app. + AllowAdminCreateUserOnly *bool `json:"allowAdminCreateUserOnly,omitempty" tf:"allow_admin_create_user_only,omitempty"` + + // Invite message template structure. Detailed below. + InviteMessageTemplate *InviteMessageTemplateInitParameters `json:"inviteMessageTemplate,omitempty" tf:"invite_message_template,omitempty"` +} + +type AdminCreateUserConfigObservation struct { + + // Set to True if only the administrator is allowed to create user profiles. Set to False if users can sign themselves up via an app. + AllowAdminCreateUserOnly *bool `json:"allowAdminCreateUserOnly,omitempty" tf:"allow_admin_create_user_only,omitempty"` + + // Invite message template structure. Detailed below. + InviteMessageTemplate *InviteMessageTemplateObservation `json:"inviteMessageTemplate,omitempty" tf:"invite_message_template,omitempty"` +} + +type AdminCreateUserConfigParameters struct { + + // Set to True if only the administrator is allowed to create user profiles. Set to False if users can sign themselves up via an app. + // +kubebuilder:validation:Optional + AllowAdminCreateUserOnly *bool `json:"allowAdminCreateUserOnly,omitempty" tf:"allow_admin_create_user_only,omitempty"` + + // Invite message template structure. Detailed below. + // +kubebuilder:validation:Optional + InviteMessageTemplate *InviteMessageTemplateParameters `json:"inviteMessageTemplate,omitempty" tf:"invite_message_template,omitempty"` +} + +type CustomEmailSenderInitParameters struct { + + // The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send email notifications to users. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + LambdaArn *string `json:"lambdaArn,omitempty" tf:"lambda_arn,omitempty"` + + // Reference to a Function in lambda to populate lambdaArn. + // +kubebuilder:validation:Optional + LambdaArnRef *v1.Reference `json:"lambdaArnRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate lambdaArn. + // +kubebuilder:validation:Optional + LambdaArnSelector *v1.Selector `json:"lambdaArnSelector,omitempty" tf:"-"` + + // The Lambda version represents the signature of the "request" attribute in the "event" information Amazon Cognito passes to your custom email Lambda function. The only supported value is V1_0. + LambdaVersion *string `json:"lambdaVersion,omitempty" tf:"lambda_version,omitempty"` +} + +type CustomEmailSenderObservation struct { + + // The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send email notifications to users. + LambdaArn *string `json:"lambdaArn,omitempty" tf:"lambda_arn,omitempty"` + + // The Lambda version represents the signature of the "request" attribute in the "event" information Amazon Cognito passes to your custom email Lambda function. The only supported value is V1_0. + LambdaVersion *string `json:"lambdaVersion,omitempty" tf:"lambda_version,omitempty"` +} + +type CustomEmailSenderParameters struct { + + // The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send email notifications to users. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + LambdaArn *string `json:"lambdaArn,omitempty" tf:"lambda_arn,omitempty"` + + // Reference to a Function in lambda to populate lambdaArn. + // +kubebuilder:validation:Optional + LambdaArnRef *v1.Reference `json:"lambdaArnRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate lambdaArn. + // +kubebuilder:validation:Optional + LambdaArnSelector *v1.Selector `json:"lambdaArnSelector,omitempty" tf:"-"` + + // The Lambda version represents the signature of the "request" attribute in the "event" information Amazon Cognito passes to your custom email Lambda function. The only supported value is V1_0. + // +kubebuilder:validation:Optional + LambdaVersion *string `json:"lambdaVersion" tf:"lambda_version,omitempty"` +} + +type CustomSMSSenderInitParameters struct { + + // The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send SMS notifications to users. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + LambdaArn *string `json:"lambdaArn,omitempty" tf:"lambda_arn,omitempty"` + + // Reference to a Function in lambda to populate lambdaArn. + // +kubebuilder:validation:Optional + LambdaArnRef *v1.Reference `json:"lambdaArnRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate lambdaArn. + // +kubebuilder:validation:Optional + LambdaArnSelector *v1.Selector `json:"lambdaArnSelector,omitempty" tf:"-"` + + // The Lambda version represents the signature of the "request" attribute in the "event" information Amazon Cognito passes to your custom SMS Lambda function. The only supported value is V1_0. + LambdaVersion *string `json:"lambdaVersion,omitempty" tf:"lambda_version,omitempty"` +} + +type CustomSMSSenderObservation struct { + + // The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send SMS notifications to users. + LambdaArn *string `json:"lambdaArn,omitempty" tf:"lambda_arn,omitempty"` + + // The Lambda version represents the signature of the "request" attribute in the "event" information Amazon Cognito passes to your custom SMS Lambda function. The only supported value is V1_0. + LambdaVersion *string `json:"lambdaVersion,omitempty" tf:"lambda_version,omitempty"` +} + +type CustomSMSSenderParameters struct { + + // The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send SMS notifications to users. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + LambdaArn *string `json:"lambdaArn,omitempty" tf:"lambda_arn,omitempty"` + + // Reference to a Function in lambda to populate lambdaArn. + // +kubebuilder:validation:Optional + LambdaArnRef *v1.Reference `json:"lambdaArnRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate lambdaArn. + // +kubebuilder:validation:Optional + LambdaArnSelector *v1.Selector `json:"lambdaArnSelector,omitempty" tf:"-"` + + // The Lambda version represents the signature of the "request" attribute in the "event" information Amazon Cognito passes to your custom SMS Lambda function. The only supported value is V1_0. + // +kubebuilder:validation:Optional + LambdaVersion *string `json:"lambdaVersion" tf:"lambda_version,omitempty"` +} + +type DeviceConfigurationInitParameters struct { + + // Whether a challenge is required on a new device. Only applicable to a new device. + ChallengeRequiredOnNewDevice *bool `json:"challengeRequiredOnNewDevice,omitempty" tf:"challenge_required_on_new_device,omitempty"` + + // Whether a device is only remembered on user prompt. false equates to "Always" remember, true is "User Opt In," and not using a device_configuration block is "No." + DeviceOnlyRememberedOnUserPrompt *bool `json:"deviceOnlyRememberedOnUserPrompt,omitempty" tf:"device_only_remembered_on_user_prompt,omitempty"` +} + +type DeviceConfigurationObservation struct { + + // Whether a challenge is required on a new device. Only applicable to a new device. + ChallengeRequiredOnNewDevice *bool `json:"challengeRequiredOnNewDevice,omitempty" tf:"challenge_required_on_new_device,omitempty"` + + // Whether a device is only remembered on user prompt. false equates to "Always" remember, true is "User Opt In," and not using a device_configuration block is "No." + DeviceOnlyRememberedOnUserPrompt *bool `json:"deviceOnlyRememberedOnUserPrompt,omitempty" tf:"device_only_remembered_on_user_prompt,omitempty"` +} + +type DeviceConfigurationParameters struct { + + // Whether a challenge is required on a new device. Only applicable to a new device. + // +kubebuilder:validation:Optional + ChallengeRequiredOnNewDevice *bool `json:"challengeRequiredOnNewDevice,omitempty" tf:"challenge_required_on_new_device,omitempty"` + + // Whether a device is only remembered on user prompt. false equates to "Always" remember, true is "User Opt In," and not using a device_configuration block is "No." + // +kubebuilder:validation:Optional + DeviceOnlyRememberedOnUserPrompt *bool `json:"deviceOnlyRememberedOnUserPrompt,omitempty" tf:"device_only_remembered_on_user_prompt,omitempty"` +} + +type EmailConfigurationInitParameters struct { + + // Email configuration set name from SES. + ConfigurationSet *string `json:"configurationSet,omitempty" tf:"configuration_set,omitempty"` + + // Email delivery method to use. COGNITO_DEFAULT for the default email functionality built into Cognito or DEVELOPER to use your Amazon SES configuration. Required to be DEVELOPER if from_email_address is set. + EmailSendingAccount *string `json:"emailSendingAccount,omitempty" tf:"email_sending_account,omitempty"` + + // Sender’s email address or sender’s display name with their email address (e.g., john@example.com, John Smith or \"John Smith Ph.D.\" ). Escaped double quotes are required around display names that contain certain characters as specified in RFC 5322. + FromEmailAddress *string `json:"fromEmailAddress,omitempty" tf:"from_email_address,omitempty"` + + // REPLY-TO email address. + ReplyToEmailAddress *string `json:"replyToEmailAddress,omitempty" tf:"reply_to_email_address,omitempty"` + + // ARN of the SES verified email identity to use. Required if email_sending_account is set to DEVELOPER. + SourceArn *string `json:"sourceArn,omitempty" tf:"source_arn,omitempty"` +} + +type EmailConfigurationObservation struct { + + // Email configuration set name from SES. + ConfigurationSet *string `json:"configurationSet,omitempty" tf:"configuration_set,omitempty"` + + // Email delivery method to use. COGNITO_DEFAULT for the default email functionality built into Cognito or DEVELOPER to use your Amazon SES configuration. Required to be DEVELOPER if from_email_address is set. + EmailSendingAccount *string `json:"emailSendingAccount,omitempty" tf:"email_sending_account,omitempty"` + + // Sender’s email address or sender’s display name with their email address (e.g., john@example.com, John Smith or \"John Smith Ph.D.\" ). Escaped double quotes are required around display names that contain certain characters as specified in RFC 5322. + FromEmailAddress *string `json:"fromEmailAddress,omitempty" tf:"from_email_address,omitempty"` + + // REPLY-TO email address. + ReplyToEmailAddress *string `json:"replyToEmailAddress,omitempty" tf:"reply_to_email_address,omitempty"` + + // ARN of the SES verified email identity to use. Required if email_sending_account is set to DEVELOPER. + SourceArn *string `json:"sourceArn,omitempty" tf:"source_arn,omitempty"` +} + +type EmailConfigurationParameters struct { + + // Email configuration set name from SES. + // +kubebuilder:validation:Optional + ConfigurationSet *string `json:"configurationSet,omitempty" tf:"configuration_set,omitempty"` + + // Email delivery method to use. COGNITO_DEFAULT for the default email functionality built into Cognito or DEVELOPER to use your Amazon SES configuration. Required to be DEVELOPER if from_email_address is set. + // +kubebuilder:validation:Optional + EmailSendingAccount *string `json:"emailSendingAccount,omitempty" tf:"email_sending_account,omitempty"` + + // Sender’s email address or sender’s display name with their email address (e.g., john@example.com, John Smith or \"John Smith Ph.D.\" ). Escaped double quotes are required around display names that contain certain characters as specified in RFC 5322. + // +kubebuilder:validation:Optional + FromEmailAddress *string `json:"fromEmailAddress,omitempty" tf:"from_email_address,omitempty"` + + // REPLY-TO email address. + // +kubebuilder:validation:Optional + ReplyToEmailAddress *string `json:"replyToEmailAddress,omitempty" tf:"reply_to_email_address,omitempty"` + + // ARN of the SES verified email identity to use. Required if email_sending_account is set to DEVELOPER. + // +kubebuilder:validation:Optional + SourceArn *string `json:"sourceArn,omitempty" tf:"source_arn,omitempty"` +} + +type InviteMessageTemplateInitParameters struct { + + // Message template for email messages. Must contain {username} and {####} placeholders, for username and temporary password, respectively. + EmailMessage *string `json:"emailMessage,omitempty" tf:"email_message,omitempty"` + + // Subject line for email messages. + EmailSubject *string `json:"emailSubject,omitempty" tf:"email_subject,omitempty"` + + // Message template for SMS messages. Must contain {username} and {####} placeholders, for username and temporary password, respectively. + SMSMessage *string `json:"smsMessage,omitempty" tf:"sms_message,omitempty"` +} + +type InviteMessageTemplateObservation struct { + + // Message template for email messages. Must contain {username} and {####} placeholders, for username and temporary password, respectively. + EmailMessage *string `json:"emailMessage,omitempty" tf:"email_message,omitempty"` + + // Subject line for email messages. + EmailSubject *string `json:"emailSubject,omitempty" tf:"email_subject,omitempty"` + + // Message template for SMS messages. Must contain {username} and {####} placeholders, for username and temporary password, respectively. + SMSMessage *string `json:"smsMessage,omitempty" tf:"sms_message,omitempty"` +} + +type InviteMessageTemplateParameters struct { + + // Message template for email messages. Must contain {username} and {####} placeholders, for username and temporary password, respectively. + // +kubebuilder:validation:Optional + EmailMessage *string `json:"emailMessage,omitempty" tf:"email_message,omitempty"` + + // Subject line for email messages. + // +kubebuilder:validation:Optional + EmailSubject *string `json:"emailSubject,omitempty" tf:"email_subject,omitempty"` + + // Message template for SMS messages. Must contain {username} and {####} placeholders, for username and temporary password, respectively. + // +kubebuilder:validation:Optional + SMSMessage *string `json:"smsMessage,omitempty" tf:"sms_message,omitempty"` +} + +type LambdaConfigInitParameters struct { + + // ARN of the lambda creating an authentication challenge. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + CreateAuthChallenge *string `json:"createAuthChallenge,omitempty" tf:"create_auth_challenge,omitempty"` + + // Reference to a Function in lambda to populate createAuthChallenge. + // +kubebuilder:validation:Optional + CreateAuthChallengeRef *v1.Reference `json:"createAuthChallengeRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate createAuthChallenge. + // +kubebuilder:validation:Optional + CreateAuthChallengeSelector *v1.Selector `json:"createAuthChallengeSelector,omitempty" tf:"-"` + + // A custom email sender AWS Lambda trigger. See custom_email_sender Below. + CustomEmailSender *CustomEmailSenderInitParameters `json:"customEmailSender,omitempty" tf:"custom_email_sender,omitempty"` + + // Custom Message AWS Lambda trigger. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + CustomMessage *string `json:"customMessage,omitempty" tf:"custom_message,omitempty"` + + // Reference to a Function in lambda to populate customMessage. + // +kubebuilder:validation:Optional + CustomMessageRef *v1.Reference `json:"customMessageRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate customMessage. + // +kubebuilder:validation:Optional + CustomMessageSelector *v1.Selector `json:"customMessageSelector,omitempty" tf:"-"` + + // A custom SMS sender AWS Lambda trigger. See custom_sms_sender Below. + CustomSMSSender *CustomSMSSenderInitParameters `json:"customSmsSender,omitempty" tf:"custom_sms_sender,omitempty"` + + // Defines the authentication challenge. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + DefineAuthChallenge *string `json:"defineAuthChallenge,omitempty" tf:"define_auth_challenge,omitempty"` + + // Reference to a Function in lambda to populate defineAuthChallenge. + // +kubebuilder:validation:Optional + DefineAuthChallengeRef *v1.Reference `json:"defineAuthChallengeRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate defineAuthChallenge. + // +kubebuilder:validation:Optional + DefineAuthChallengeSelector *v1.Selector `json:"defineAuthChallengeSelector,omitempty" tf:"-"` + + // The Amazon Resource Name of Key Management Service Customer master keys. Amazon Cognito uses the key to encrypt codes and temporary passwords sent to CustomEmailSender and CustomSMSSender. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Post-authentication AWS Lambda trigger. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + PostAuthentication *string `json:"postAuthentication,omitempty" tf:"post_authentication,omitempty"` + + // Reference to a Function in lambda to populate postAuthentication. + // +kubebuilder:validation:Optional + PostAuthenticationRef *v1.Reference `json:"postAuthenticationRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate postAuthentication. + // +kubebuilder:validation:Optional + PostAuthenticationSelector *v1.Selector `json:"postAuthenticationSelector,omitempty" tf:"-"` + + // Post-confirmation AWS Lambda trigger. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + PostConfirmation *string `json:"postConfirmation,omitempty" tf:"post_confirmation,omitempty"` + + // Reference to a Function in lambda to populate postConfirmation. + // +kubebuilder:validation:Optional + PostConfirmationRef *v1.Reference `json:"postConfirmationRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate postConfirmation. + // +kubebuilder:validation:Optional + PostConfirmationSelector *v1.Selector `json:"postConfirmationSelector,omitempty" tf:"-"` + + // Pre-authentication AWS Lambda trigger. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + PreAuthentication *string `json:"preAuthentication,omitempty" tf:"pre_authentication,omitempty"` + + // Reference to a Function in lambda to populate preAuthentication. + // +kubebuilder:validation:Optional + PreAuthenticationRef *v1.Reference `json:"preAuthenticationRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate preAuthentication. + // +kubebuilder:validation:Optional + PreAuthenticationSelector *v1.Selector `json:"preAuthenticationSelector,omitempty" tf:"-"` + + // Pre-registration AWS Lambda trigger. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + PreSignUp *string `json:"preSignUp,omitempty" tf:"pre_sign_up,omitempty"` + + // Reference to a Function in lambda to populate preSignUp. + // +kubebuilder:validation:Optional + PreSignUpRef *v1.Reference `json:"preSignUpRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate preSignUp. + // +kubebuilder:validation:Optional + PreSignUpSelector *v1.Selector `json:"preSignUpSelector,omitempty" tf:"-"` + + // Allow to customize identity token claims before token generation. Set this parameter for legacy purposes; for new instances of pre token generation triggers, set the lambda_arn of pre_token_generation_config. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + PreTokenGeneration *string `json:"preTokenGeneration,omitempty" tf:"pre_token_generation,omitempty"` + + // Allow to customize access tokens. See pre_token_configuration_type + PreTokenGenerationConfig *PreTokenGenerationConfigInitParameters `json:"preTokenGenerationConfig,omitempty" tf:"pre_token_generation_config,omitempty"` + + // Reference to a Function in lambda to populate preTokenGeneration. + // +kubebuilder:validation:Optional + PreTokenGenerationRef *v1.Reference `json:"preTokenGenerationRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate preTokenGeneration. + // +kubebuilder:validation:Optional + PreTokenGenerationSelector *v1.Selector `json:"preTokenGenerationSelector,omitempty" tf:"-"` + + // User migration Lambda config type. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + UserMigration *string `json:"userMigration,omitempty" tf:"user_migration,omitempty"` + + // Reference to a Function in lambda to populate userMigration. + // +kubebuilder:validation:Optional + UserMigrationRef *v1.Reference `json:"userMigrationRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate userMigration. + // +kubebuilder:validation:Optional + UserMigrationSelector *v1.Selector `json:"userMigrationSelector,omitempty" tf:"-"` + + // Verifies the authentication challenge response. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + VerifyAuthChallengeResponse *string `json:"verifyAuthChallengeResponse,omitempty" tf:"verify_auth_challenge_response,omitempty"` + + // Reference to a Function in lambda to populate verifyAuthChallengeResponse. + // +kubebuilder:validation:Optional + VerifyAuthChallengeResponseRef *v1.Reference `json:"verifyAuthChallengeResponseRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate verifyAuthChallengeResponse. + // +kubebuilder:validation:Optional + VerifyAuthChallengeResponseSelector *v1.Selector `json:"verifyAuthChallengeResponseSelector,omitempty" tf:"-"` +} + +type LambdaConfigObservation struct { + + // ARN of the lambda creating an authentication challenge. + CreateAuthChallenge *string `json:"createAuthChallenge,omitempty" tf:"create_auth_challenge,omitempty"` + + // A custom email sender AWS Lambda trigger. See custom_email_sender Below. + CustomEmailSender *CustomEmailSenderObservation `json:"customEmailSender,omitempty" tf:"custom_email_sender,omitempty"` + + // Custom Message AWS Lambda trigger. + CustomMessage *string `json:"customMessage,omitempty" tf:"custom_message,omitempty"` + + // A custom SMS sender AWS Lambda trigger. See custom_sms_sender Below. + CustomSMSSender *CustomSMSSenderObservation `json:"customSmsSender,omitempty" tf:"custom_sms_sender,omitempty"` + + // Defines the authentication challenge. + DefineAuthChallenge *string `json:"defineAuthChallenge,omitempty" tf:"define_auth_challenge,omitempty"` + + // The Amazon Resource Name of Key Management Service Customer master keys. Amazon Cognito uses the key to encrypt codes and temporary passwords sent to CustomEmailSender and CustomSMSSender. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Post-authentication AWS Lambda trigger. + PostAuthentication *string `json:"postAuthentication,omitempty" tf:"post_authentication,omitempty"` + + // Post-confirmation AWS Lambda trigger. + PostConfirmation *string `json:"postConfirmation,omitempty" tf:"post_confirmation,omitempty"` + + // Pre-authentication AWS Lambda trigger. + PreAuthentication *string `json:"preAuthentication,omitempty" tf:"pre_authentication,omitempty"` + + // Pre-registration AWS Lambda trigger. + PreSignUp *string `json:"preSignUp,omitempty" tf:"pre_sign_up,omitempty"` + + // Allow to customize identity token claims before token generation. Set this parameter for legacy purposes; for new instances of pre token generation triggers, set the lambda_arn of pre_token_generation_config. + PreTokenGeneration *string `json:"preTokenGeneration,omitempty" tf:"pre_token_generation,omitempty"` + + // Allow to customize access tokens. See pre_token_configuration_type + PreTokenGenerationConfig *PreTokenGenerationConfigObservation `json:"preTokenGenerationConfig,omitempty" tf:"pre_token_generation_config,omitempty"` + + // User migration Lambda config type. + UserMigration *string `json:"userMigration,omitempty" tf:"user_migration,omitempty"` + + // Verifies the authentication challenge response. + VerifyAuthChallengeResponse *string `json:"verifyAuthChallengeResponse,omitempty" tf:"verify_auth_challenge_response,omitempty"` +} + +type LambdaConfigParameters struct { + + // ARN of the lambda creating an authentication challenge. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + CreateAuthChallenge *string `json:"createAuthChallenge,omitempty" tf:"create_auth_challenge,omitempty"` + + // Reference to a Function in lambda to populate createAuthChallenge. + // +kubebuilder:validation:Optional + CreateAuthChallengeRef *v1.Reference `json:"createAuthChallengeRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate createAuthChallenge. + // +kubebuilder:validation:Optional + CreateAuthChallengeSelector *v1.Selector `json:"createAuthChallengeSelector,omitempty" tf:"-"` + + // A custom email sender AWS Lambda trigger. See custom_email_sender Below. + // +kubebuilder:validation:Optional + CustomEmailSender *CustomEmailSenderParameters `json:"customEmailSender,omitempty" tf:"custom_email_sender,omitempty"` + + // Custom Message AWS Lambda trigger. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + CustomMessage *string `json:"customMessage,omitempty" tf:"custom_message,omitempty"` + + // Reference to a Function in lambda to populate customMessage. + // +kubebuilder:validation:Optional + CustomMessageRef *v1.Reference `json:"customMessageRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate customMessage. + // +kubebuilder:validation:Optional + CustomMessageSelector *v1.Selector `json:"customMessageSelector,omitempty" tf:"-"` + + // A custom SMS sender AWS Lambda trigger. See custom_sms_sender Below. + // +kubebuilder:validation:Optional + CustomSMSSender *CustomSMSSenderParameters `json:"customSmsSender,omitempty" tf:"custom_sms_sender,omitempty"` + + // Defines the authentication challenge. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + DefineAuthChallenge *string `json:"defineAuthChallenge,omitempty" tf:"define_auth_challenge,omitempty"` + + // Reference to a Function in lambda to populate defineAuthChallenge. + // +kubebuilder:validation:Optional + DefineAuthChallengeRef *v1.Reference `json:"defineAuthChallengeRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate defineAuthChallenge. + // +kubebuilder:validation:Optional + DefineAuthChallengeSelector *v1.Selector `json:"defineAuthChallengeSelector,omitempty" tf:"-"` + + // The Amazon Resource Name of Key Management Service Customer master keys. Amazon Cognito uses the key to encrypt codes and temporary passwords sent to CustomEmailSender and CustomSMSSender. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Post-authentication AWS Lambda trigger. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + PostAuthentication *string `json:"postAuthentication,omitempty" tf:"post_authentication,omitempty"` + + // Reference to a Function in lambda to populate postAuthentication. + // +kubebuilder:validation:Optional + PostAuthenticationRef *v1.Reference `json:"postAuthenticationRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate postAuthentication. + // +kubebuilder:validation:Optional + PostAuthenticationSelector *v1.Selector `json:"postAuthenticationSelector,omitempty" tf:"-"` + + // Post-confirmation AWS Lambda trigger. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + PostConfirmation *string `json:"postConfirmation,omitempty" tf:"post_confirmation,omitempty"` + + // Reference to a Function in lambda to populate postConfirmation. + // +kubebuilder:validation:Optional + PostConfirmationRef *v1.Reference `json:"postConfirmationRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate postConfirmation. + // +kubebuilder:validation:Optional + PostConfirmationSelector *v1.Selector `json:"postConfirmationSelector,omitempty" tf:"-"` + + // Pre-authentication AWS Lambda trigger. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + PreAuthentication *string `json:"preAuthentication,omitempty" tf:"pre_authentication,omitempty"` + + // Reference to a Function in lambda to populate preAuthentication. + // +kubebuilder:validation:Optional + PreAuthenticationRef *v1.Reference `json:"preAuthenticationRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate preAuthentication. + // +kubebuilder:validation:Optional + PreAuthenticationSelector *v1.Selector `json:"preAuthenticationSelector,omitempty" tf:"-"` + + // Pre-registration AWS Lambda trigger. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + PreSignUp *string `json:"preSignUp,omitempty" tf:"pre_sign_up,omitempty"` + + // Reference to a Function in lambda to populate preSignUp. + // +kubebuilder:validation:Optional + PreSignUpRef *v1.Reference `json:"preSignUpRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate preSignUp. + // +kubebuilder:validation:Optional + PreSignUpSelector *v1.Selector `json:"preSignUpSelector,omitempty" tf:"-"` + + // Allow to customize identity token claims before token generation. Set this parameter for legacy purposes; for new instances of pre token generation triggers, set the lambda_arn of pre_token_generation_config. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + PreTokenGeneration *string `json:"preTokenGeneration,omitempty" tf:"pre_token_generation,omitempty"` + + // Allow to customize access tokens. See pre_token_configuration_type + // +kubebuilder:validation:Optional + PreTokenGenerationConfig *PreTokenGenerationConfigParameters `json:"preTokenGenerationConfig,omitempty" tf:"pre_token_generation_config,omitempty"` + + // Reference to a Function in lambda to populate preTokenGeneration. + // +kubebuilder:validation:Optional + PreTokenGenerationRef *v1.Reference `json:"preTokenGenerationRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate preTokenGeneration. + // +kubebuilder:validation:Optional + PreTokenGenerationSelector *v1.Selector `json:"preTokenGenerationSelector,omitempty" tf:"-"` + + // User migration Lambda config type. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + UserMigration *string `json:"userMigration,omitempty" tf:"user_migration,omitempty"` + + // Reference to a Function in lambda to populate userMigration. + // +kubebuilder:validation:Optional + UserMigrationRef *v1.Reference `json:"userMigrationRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate userMigration. + // +kubebuilder:validation:Optional + UserMigrationSelector *v1.Selector `json:"userMigrationSelector,omitempty" tf:"-"` + + // Verifies the authentication challenge response. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + VerifyAuthChallengeResponse *string `json:"verifyAuthChallengeResponse,omitempty" tf:"verify_auth_challenge_response,omitempty"` + + // Reference to a Function in lambda to populate verifyAuthChallengeResponse. + // +kubebuilder:validation:Optional + VerifyAuthChallengeResponseRef *v1.Reference `json:"verifyAuthChallengeResponseRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate verifyAuthChallengeResponse. + // +kubebuilder:validation:Optional + VerifyAuthChallengeResponseSelector *v1.Selector `json:"verifyAuthChallengeResponseSelector,omitempty" tf:"-"` +} + +type NumberAttributeConstraintsInitParameters struct { + + // Maximum value of an attribute that is of the number data type. + MaxValue *string `json:"maxValue,omitempty" tf:"max_value,omitempty"` + + // Minimum value of an attribute that is of the number data type. + MinValue *string `json:"minValue,omitempty" tf:"min_value,omitempty"` +} + +type NumberAttributeConstraintsObservation struct { + + // Maximum value of an attribute that is of the number data type. + MaxValue *string `json:"maxValue,omitempty" tf:"max_value,omitempty"` + + // Minimum value of an attribute that is of the number data type. + MinValue *string `json:"minValue,omitempty" tf:"min_value,omitempty"` +} + +type NumberAttributeConstraintsParameters struct { + + // Maximum value of an attribute that is of the number data type. + // +kubebuilder:validation:Optional + MaxValue *string `json:"maxValue,omitempty" tf:"max_value,omitempty"` + + // Minimum value of an attribute that is of the number data type. + // +kubebuilder:validation:Optional + MinValue *string `json:"minValue,omitempty" tf:"min_value,omitempty"` +} + +type PasswordPolicyInitParameters struct { + + // Minimum length of the password policy that you have set. + MinimumLength *float64 `json:"minimumLength,omitempty" tf:"minimum_length,omitempty"` + + // Whether you have required users to use at least one lowercase letter in their password. + RequireLowercase *bool `json:"requireLowercase,omitempty" tf:"require_lowercase,omitempty"` + + // Whether you have required users to use at least one number in their password. + RequireNumbers *bool `json:"requireNumbers,omitempty" tf:"require_numbers,omitempty"` + + // Whether you have required users to use at least one symbol in their password. + RequireSymbols *bool `json:"requireSymbols,omitempty" tf:"require_symbols,omitempty"` + + // Whether you have required users to use at least one uppercase letter in their password. + RequireUppercase *bool `json:"requireUppercase,omitempty" tf:"require_uppercase,omitempty"` + + // In the password policy you have set, refers to the number of days a temporary password is valid. If the user does not sign-in during this time, their password will need to be reset by an administrator. + TemporaryPasswordValidityDays *float64 `json:"temporaryPasswordValidityDays,omitempty" tf:"temporary_password_validity_days,omitempty"` +} + +type PasswordPolicyObservation struct { + + // Minimum length of the password policy that you have set. + MinimumLength *float64 `json:"minimumLength,omitempty" tf:"minimum_length,omitempty"` + + // Whether you have required users to use at least one lowercase letter in their password. + RequireLowercase *bool `json:"requireLowercase,omitempty" tf:"require_lowercase,omitempty"` + + // Whether you have required users to use at least one number in their password. + RequireNumbers *bool `json:"requireNumbers,omitempty" tf:"require_numbers,omitempty"` + + // Whether you have required users to use at least one symbol in their password. + RequireSymbols *bool `json:"requireSymbols,omitempty" tf:"require_symbols,omitempty"` + + // Whether you have required users to use at least one uppercase letter in their password. + RequireUppercase *bool `json:"requireUppercase,omitempty" tf:"require_uppercase,omitempty"` + + // In the password policy you have set, refers to the number of days a temporary password is valid. If the user does not sign-in during this time, their password will need to be reset by an administrator. + TemporaryPasswordValidityDays *float64 `json:"temporaryPasswordValidityDays,omitempty" tf:"temporary_password_validity_days,omitempty"` +} + +type PasswordPolicyParameters struct { + + // Minimum length of the password policy that you have set. + // +kubebuilder:validation:Optional + MinimumLength *float64 `json:"minimumLength,omitempty" tf:"minimum_length,omitempty"` + + // Whether you have required users to use at least one lowercase letter in their password. + // +kubebuilder:validation:Optional + RequireLowercase *bool `json:"requireLowercase,omitempty" tf:"require_lowercase,omitempty"` + + // Whether you have required users to use at least one number in their password. + // +kubebuilder:validation:Optional + RequireNumbers *bool `json:"requireNumbers,omitempty" tf:"require_numbers,omitempty"` + + // Whether you have required users to use at least one symbol in their password. + // +kubebuilder:validation:Optional + RequireSymbols *bool `json:"requireSymbols,omitempty" tf:"require_symbols,omitempty"` + + // Whether you have required users to use at least one uppercase letter in their password. + // +kubebuilder:validation:Optional + RequireUppercase *bool `json:"requireUppercase,omitempty" tf:"require_uppercase,omitempty"` + + // In the password policy you have set, refers to the number of days a temporary password is valid. If the user does not sign-in during this time, their password will need to be reset by an administrator. + // +kubebuilder:validation:Optional + TemporaryPasswordValidityDays *float64 `json:"temporaryPasswordValidityDays,omitempty" tf:"temporary_password_validity_days,omitempty"` +} + +type PreTokenGenerationConfigInitParameters struct { + + // The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send SMS notifications to users. + LambdaArn *string `json:"lambdaArn,omitempty" tf:"lambda_arn,omitempty"` + + // The Lambda version represents the signature of the "version" attribute in the "event" information Amazon Cognito passes to your pre Token Generation Lambda function. The supported values are V1_0, V2_0. + LambdaVersion *string `json:"lambdaVersion,omitempty" tf:"lambda_version,omitempty"` +} + +type PreTokenGenerationConfigObservation struct { + + // The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send SMS notifications to users. + LambdaArn *string `json:"lambdaArn,omitempty" tf:"lambda_arn,omitempty"` + + // The Lambda version represents the signature of the "version" attribute in the "event" information Amazon Cognito passes to your pre Token Generation Lambda function. The supported values are V1_0, V2_0. + LambdaVersion *string `json:"lambdaVersion,omitempty" tf:"lambda_version,omitempty"` +} + +type PreTokenGenerationConfigParameters struct { + + // The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send SMS notifications to users. + // +kubebuilder:validation:Optional + LambdaArn *string `json:"lambdaArn" tf:"lambda_arn,omitempty"` + + // The Lambda version represents the signature of the "version" attribute in the "event" information Amazon Cognito passes to your pre Token Generation Lambda function. The supported values are V1_0, V2_0. + // +kubebuilder:validation:Optional + LambdaVersion *string `json:"lambdaVersion" tf:"lambda_version,omitempty"` +} + +type RecoveryMechanismInitParameters struct { + + // Name of the user pool. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Positive integer specifying priority of a method with 1 being the highest priority. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` +} + +type RecoveryMechanismObservation struct { + + // Name of the user pool. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Positive integer specifying priority of a method with 1 being the highest priority. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` +} + +type RecoveryMechanismParameters struct { + + // Name of the user pool. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Positive integer specifying priority of a method with 1 being the highest priority. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority" tf:"priority,omitempty"` +} + +type SMSConfigurationInitParameters struct { + + // External ID used in IAM role trust relationships. For more information about using external IDs, see How to Use an External ID When Granting Access to Your AWS Resources to a Third Party. + ExternalID *string `json:"externalId,omitempty" tf:"external_id,omitempty"` + + // ARN of the Amazon SNS caller. This is usually the IAM role that you've given Cognito permission to assume. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + SnsCallerArn *string `json:"snsCallerArn,omitempty" tf:"sns_caller_arn,omitempty"` + + // Reference to a Role in iam to populate snsCallerArn. + // +kubebuilder:validation:Optional + SnsCallerArnRef *v1.Reference `json:"snsCallerArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate snsCallerArn. + // +kubebuilder:validation:Optional + SnsCallerArnSelector *v1.Selector `json:"snsCallerArnSelector,omitempty" tf:"-"` + + // The AWS Region to use with Amazon SNS integration. You can choose the same Region as your user pool, or a supported Legacy Amazon SNS alternate Region. Amazon Cognito resources in the Asia Pacific (Seoul) AWS Region must use your Amazon SNS configuration in the Asia Pacific (Tokyo) Region. For more information, see SMS message settings for Amazon Cognito user pools. + SnsRegion *string `json:"snsRegion,omitempty" tf:"sns_region,omitempty"` +} + +type SMSConfigurationObservation struct { + + // External ID used in IAM role trust relationships. For more information about using external IDs, see How to Use an External ID When Granting Access to Your AWS Resources to a Third Party. + ExternalID *string `json:"externalId,omitempty" tf:"external_id,omitempty"` + + // ARN of the Amazon SNS caller. This is usually the IAM role that you've given Cognito permission to assume. + SnsCallerArn *string `json:"snsCallerArn,omitempty" tf:"sns_caller_arn,omitempty"` + + // The AWS Region to use with Amazon SNS integration. You can choose the same Region as your user pool, or a supported Legacy Amazon SNS alternate Region. Amazon Cognito resources in the Asia Pacific (Seoul) AWS Region must use your Amazon SNS configuration in the Asia Pacific (Tokyo) Region. For more information, see SMS message settings for Amazon Cognito user pools. + SnsRegion *string `json:"snsRegion,omitempty" tf:"sns_region,omitempty"` +} + +type SMSConfigurationParameters struct { + + // External ID used in IAM role trust relationships. For more information about using external IDs, see How to Use an External ID When Granting Access to Your AWS Resources to a Third Party. + // +kubebuilder:validation:Optional + ExternalID *string `json:"externalId" tf:"external_id,omitempty"` + + // ARN of the Amazon SNS caller. This is usually the IAM role that you've given Cognito permission to assume. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + SnsCallerArn *string `json:"snsCallerArn,omitempty" tf:"sns_caller_arn,omitempty"` + + // Reference to a Role in iam to populate snsCallerArn. + // +kubebuilder:validation:Optional + SnsCallerArnRef *v1.Reference `json:"snsCallerArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate snsCallerArn. + // +kubebuilder:validation:Optional + SnsCallerArnSelector *v1.Selector `json:"snsCallerArnSelector,omitempty" tf:"-"` + + // The AWS Region to use with Amazon SNS integration. You can choose the same Region as your user pool, or a supported Legacy Amazon SNS alternate Region. Amazon Cognito resources in the Asia Pacific (Seoul) AWS Region must use your Amazon SNS configuration in the Asia Pacific (Tokyo) Region. For more information, see SMS message settings for Amazon Cognito user pools. + // +kubebuilder:validation:Optional + SnsRegion *string `json:"snsRegion,omitempty" tf:"sns_region,omitempty"` +} + +type SchemaInitParameters struct { + + // Attribute data type. Must be one of Boolean, Number, String, DateTime. + AttributeDataType *string `json:"attributeDataType,omitempty" tf:"attribute_data_type,omitempty"` + + // Whether the attribute type is developer only. + DeveloperOnlyAttribute *bool `json:"developerOnlyAttribute,omitempty" tf:"developer_only_attribute,omitempty"` + + // Whether the attribute can be changed once it has been created. + Mutable *bool `json:"mutable,omitempty" tf:"mutable,omitempty"` + + // Name of the user pool. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration block for the constraints for an attribute of the number type. Detailed below. + NumberAttributeConstraints *NumberAttributeConstraintsInitParameters `json:"numberAttributeConstraints,omitempty" tf:"number_attribute_constraints,omitempty"` + + // Whether a user pool attribute is required. If the attribute is required and the user does not provide a value, registration or sign-in will fail. + Required *bool `json:"required,omitempty" tf:"required,omitempty"` + + // Constraints for an attribute of the string type. Detailed below. + StringAttributeConstraints *StringAttributeConstraintsInitParameters `json:"stringAttributeConstraints,omitempty" tf:"string_attribute_constraints,omitempty"` +} + +type SchemaObservation struct { + + // Attribute data type. Must be one of Boolean, Number, String, DateTime. + AttributeDataType *string `json:"attributeDataType,omitempty" tf:"attribute_data_type,omitempty"` + + // Whether the attribute type is developer only. + DeveloperOnlyAttribute *bool `json:"developerOnlyAttribute,omitempty" tf:"developer_only_attribute,omitempty"` + + // Whether the attribute can be changed once it has been created. + Mutable *bool `json:"mutable,omitempty" tf:"mutable,omitempty"` + + // Name of the user pool. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration block for the constraints for an attribute of the number type. Detailed below. + NumberAttributeConstraints *NumberAttributeConstraintsObservation `json:"numberAttributeConstraints,omitempty" tf:"number_attribute_constraints,omitempty"` + + // Whether a user pool attribute is required. If the attribute is required and the user does not provide a value, registration or sign-in will fail. + Required *bool `json:"required,omitempty" tf:"required,omitempty"` + + // Constraints for an attribute of the string type. Detailed below. + StringAttributeConstraints *StringAttributeConstraintsObservation `json:"stringAttributeConstraints,omitempty" tf:"string_attribute_constraints,omitempty"` +} + +type SchemaParameters struct { + + // Attribute data type. Must be one of Boolean, Number, String, DateTime. + // +kubebuilder:validation:Optional + AttributeDataType *string `json:"attributeDataType" tf:"attribute_data_type,omitempty"` + + // Whether the attribute type is developer only. + // +kubebuilder:validation:Optional + DeveloperOnlyAttribute *bool `json:"developerOnlyAttribute,omitempty" tf:"developer_only_attribute,omitempty"` + + // Whether the attribute can be changed once it has been created. + // +kubebuilder:validation:Optional + Mutable *bool `json:"mutable,omitempty" tf:"mutable,omitempty"` + + // Name of the user pool. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Configuration block for the constraints for an attribute of the number type. Detailed below. + // +kubebuilder:validation:Optional + NumberAttributeConstraints *NumberAttributeConstraintsParameters `json:"numberAttributeConstraints,omitempty" tf:"number_attribute_constraints,omitempty"` + + // Whether a user pool attribute is required. If the attribute is required and the user does not provide a value, registration or sign-in will fail. + // +kubebuilder:validation:Optional + Required *bool `json:"required,omitempty" tf:"required,omitempty"` + + // Constraints for an attribute of the string type. Detailed below. + // +kubebuilder:validation:Optional + StringAttributeConstraints *StringAttributeConstraintsParameters `json:"stringAttributeConstraints,omitempty" tf:"string_attribute_constraints,omitempty"` +} + +type SoftwareTokenMfaConfigurationInitParameters struct { + + // Boolean whether to enable software token Multi-Factor (MFA) tokens, such as Time-based One-Time Password (TOTP). To disable software token MFA When sms_configuration is not present, the mfa_configuration argument must be set to OFF and the software_token_mfa_configuration configuration block must be fully removed. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type SoftwareTokenMfaConfigurationObservation struct { + + // Boolean whether to enable software token Multi-Factor (MFA) tokens, such as Time-based One-Time Password (TOTP). To disable software token MFA When sms_configuration is not present, the mfa_configuration argument must be set to OFF and the software_token_mfa_configuration configuration block must be fully removed. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type SoftwareTokenMfaConfigurationParameters struct { + + // Boolean whether to enable software token Multi-Factor (MFA) tokens, such as Time-based One-Time Password (TOTP). To disable software token MFA When sms_configuration is not present, the mfa_configuration argument must be set to OFF and the software_token_mfa_configuration configuration block must be fully removed. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + +type StringAttributeConstraintsInitParameters struct { + + // Maximum length of an attribute value of the string type. + MaxLength *string `json:"maxLength,omitempty" tf:"max_length,omitempty"` + + // Minimum length of an attribute value of the string type. + MinLength *string `json:"minLength,omitempty" tf:"min_length,omitempty"` +} + +type StringAttributeConstraintsObservation struct { + + // Maximum length of an attribute value of the string type. + MaxLength *string `json:"maxLength,omitempty" tf:"max_length,omitempty"` + + // Minimum length of an attribute value of the string type. + MinLength *string `json:"minLength,omitempty" tf:"min_length,omitempty"` +} + +type StringAttributeConstraintsParameters struct { + + // Maximum length of an attribute value of the string type. + // +kubebuilder:validation:Optional + MaxLength *string `json:"maxLength,omitempty" tf:"max_length,omitempty"` + + // Minimum length of an attribute value of the string type. + // +kubebuilder:validation:Optional + MinLength *string `json:"minLength,omitempty" tf:"min_length,omitempty"` +} + +type UserAttributeUpdateSettingsInitParameters struct { + + // A list of attributes requiring verification before update. If set, the provided value(s) must also be set in auto_verified_attributes. Valid values: email, phone_number. + // +listType=set + AttributesRequireVerificationBeforeUpdate []*string `json:"attributesRequireVerificationBeforeUpdate,omitempty" tf:"attributes_require_verification_before_update,omitempty"` +} + +type UserAttributeUpdateSettingsObservation struct { + + // A list of attributes requiring verification before update. If set, the provided value(s) must also be set in auto_verified_attributes. Valid values: email, phone_number. + // +listType=set + AttributesRequireVerificationBeforeUpdate []*string `json:"attributesRequireVerificationBeforeUpdate,omitempty" tf:"attributes_require_verification_before_update,omitempty"` +} + +type UserAttributeUpdateSettingsParameters struct { + + // A list of attributes requiring verification before update. If set, the provided value(s) must also be set in auto_verified_attributes. Valid values: email, phone_number. + // +kubebuilder:validation:Optional + // +listType=set + AttributesRequireVerificationBeforeUpdate []*string `json:"attributesRequireVerificationBeforeUpdate" tf:"attributes_require_verification_before_update,omitempty"` +} + +type UserPoolAddOnsInitParameters struct { + + // Mode for advanced security, must be one of OFF, AUDIT or ENFORCED. + AdvancedSecurityMode *string `json:"advancedSecurityMode,omitempty" tf:"advanced_security_mode,omitempty"` +} + +type UserPoolAddOnsObservation struct { + + // Mode for advanced security, must be one of OFF, AUDIT or ENFORCED. + AdvancedSecurityMode *string `json:"advancedSecurityMode,omitempty" tf:"advanced_security_mode,omitempty"` +} + +type UserPoolAddOnsParameters struct { + + // Mode for advanced security, must be one of OFF, AUDIT or ENFORCED. + // +kubebuilder:validation:Optional + AdvancedSecurityMode *string `json:"advancedSecurityMode" tf:"advanced_security_mode,omitempty"` +} + +type UserPoolInitParameters struct { + + // Configuration block to define which verified available method a user can use to recover their forgotten password. Detailed below. + AccountRecoverySetting *AccountRecoverySettingInitParameters `json:"accountRecoverySetting,omitempty" tf:"account_recovery_setting,omitempty"` + + // Configuration block for creating a new user profile. Detailed below. + AdminCreateUserConfig *AdminCreateUserConfigInitParameters `json:"adminCreateUserConfig,omitempty" tf:"admin_create_user_config,omitempty"` + + // Attributes supported as an alias for this user pool. Valid values: phone_number, email, or preferred_username. Conflicts with username_attributes. + // +listType=set + AliasAttributes []*string `json:"aliasAttributes,omitempty" tf:"alias_attributes,omitempty"` + + // Attributes to be auto-verified. Valid values: email, phone_number. + // +listType=set + AutoVerifiedAttributes []*string `json:"autoVerifiedAttributes,omitempty" tf:"auto_verified_attributes,omitempty"` + + // When active, DeletionProtection prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature. Valid values are ACTIVE and INACTIVE, Default value is INACTIVE. + DeletionProtection *string `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Configuration block for the user pool's device tracking. Detailed below. + DeviceConfiguration *DeviceConfigurationInitParameters `json:"deviceConfiguration,omitempty" tf:"device_configuration,omitempty"` + + // Configuration block for configuring email. Detailed below. + EmailConfiguration *EmailConfigurationInitParameters `json:"emailConfiguration,omitempty" tf:"email_configuration,omitempty"` + + // String representing the email verification message. Conflicts with verification_message_template configuration block email_message argument. + EmailVerificationMessage *string `json:"emailVerificationMessage,omitempty" tf:"email_verification_message,omitempty"` + + // String representing the email verification subject. Conflicts with verification_message_template configuration block email_subject argument. + EmailVerificationSubject *string `json:"emailVerificationSubject,omitempty" tf:"email_verification_subject,omitempty"` + + // Configuration block for the AWS Lambda triggers associated with the user pool. Detailed below. + LambdaConfig *LambdaConfigInitParameters `json:"lambdaConfig,omitempty" tf:"lambda_config,omitempty"` + + // Multi-Factor Authentication (MFA) configuration for the User Pool. Defaults of OFF. Valid values are OFF (MFA Tokens are not required), ON (MFA is required for all users to sign in; requires at least one of sms_configuration or software_token_mfa_configuration to be configured), or OPTIONAL (MFA Will be required only for individual users who have MFA Enabled; requires at least one of sms_configuration or software_token_mfa_configuration to be configured). + MfaConfiguration *string `json:"mfaConfiguration,omitempty" tf:"mfa_configuration,omitempty"` + + // Name of the user pool. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration block for information about the user pool password policy. Detailed below. + PasswordPolicy *PasswordPolicyInitParameters `json:"passwordPolicy,omitempty" tf:"password_policy,omitempty"` + + // String representing the SMS authentication message. The Message must contain the {####} placeholder, which will be replaced with the code. + SMSAuthenticationMessage *string `json:"smsAuthenticationMessage,omitempty" tf:"sms_authentication_message,omitempty"` + + // Configuration block for Short Message Service (SMS) settings. Detailed below. These settings apply to SMS user verification and SMS Multi-Factor Authentication (MFA). Due to Cognito API restrictions, the SMS configuration cannot be removed without recreating the Cognito User Pool. For user data safety, this resource will ignore the removal of this configuration by disabling drift detection. To force resource recreation after this configuration has been applied, see the taint command. + SMSConfiguration *SMSConfigurationInitParameters `json:"smsConfiguration,omitempty" tf:"sms_configuration,omitempty"` + + // String representing the SMS verification message. Conflicts with verification_message_template configuration block sms_message argument. + SMSVerificationMessage *string `json:"smsVerificationMessage,omitempty" tf:"sms_verification_message,omitempty"` + + // Configuration block for the schema attributes of a user pool. Detailed below. Schema attributes from the standard attribute set only need to be specified if they are different from the default configuration. Attributes can be added, but not modified or removed. Maximum of 50 attributes. + Schema []SchemaInitParameters `json:"schema,omitempty" tf:"schema,omitempty"` + + // Configuration block for software token Mult-Factor Authentication (MFA) settings. Detailed below. + SoftwareTokenMfaConfiguration *SoftwareTokenMfaConfigurationInitParameters `json:"softwareTokenMfaConfiguration,omitempty" tf:"software_token_mfa_configuration,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for user attribute update settings. Detailed below. + UserAttributeUpdateSettings *UserAttributeUpdateSettingsInitParameters `json:"userAttributeUpdateSettings,omitempty" tf:"user_attribute_update_settings,omitempty"` + + // Configuration block for user pool add-ons to enable user pool advanced security mode features. Detailed below. + UserPoolAddOns *UserPoolAddOnsInitParameters `json:"userPoolAddOns,omitempty" tf:"user_pool_add_ons,omitempty"` + + // Whether email addresses or phone numbers can be specified as usernames when a user signs up. Conflicts with alias_attributes. + // +listType=set + UsernameAttributes []*string `json:"usernameAttributes,omitempty" tf:"username_attributes,omitempty"` + + // Configuration block for username configuration. Detailed below. + UsernameConfiguration *UsernameConfigurationInitParameters `json:"usernameConfiguration,omitempty" tf:"username_configuration,omitempty"` + + // Configuration block for verification message templates. Detailed below. + VerificationMessageTemplate *VerificationMessageTemplateInitParameters `json:"verificationMessageTemplate,omitempty" tf:"verification_message_template,omitempty"` +} + +type UserPoolObservation struct { + + // Configuration block to define which verified available method a user can use to recover their forgotten password. Detailed below. + AccountRecoverySetting *AccountRecoverySettingObservation `json:"accountRecoverySetting,omitempty" tf:"account_recovery_setting,omitempty"` + + // Configuration block for creating a new user profile. Detailed below. + AdminCreateUserConfig *AdminCreateUserConfigObservation `json:"adminCreateUserConfig,omitempty" tf:"admin_create_user_config,omitempty"` + + // Attributes supported as an alias for this user pool. Valid values: phone_number, email, or preferred_username. Conflicts with username_attributes. + // +listType=set + AliasAttributes []*string `json:"aliasAttributes,omitempty" tf:"alias_attributes,omitempty"` + + // ARN of the user pool. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Attributes to be auto-verified. Valid values: email, phone_number. + // +listType=set + AutoVerifiedAttributes []*string `json:"autoVerifiedAttributes,omitempty" tf:"auto_verified_attributes,omitempty"` + + // Date the user pool was created. + CreationDate *string `json:"creationDate,omitempty" tf:"creation_date,omitempty"` + + // A custom domain name that you provide to Amazon Cognito. This parameter applies only if you use a custom domain to host the sign-up and sign-in pages for your application. For example: auth.example.com. + CustomDomain *string `json:"customDomain,omitempty" tf:"custom_domain,omitempty"` + + // When active, DeletionProtection prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature. Valid values are ACTIVE and INACTIVE, Default value is INACTIVE. + DeletionProtection *string `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Configuration block for the user pool's device tracking. Detailed below. + DeviceConfiguration *DeviceConfigurationObservation `json:"deviceConfiguration,omitempty" tf:"device_configuration,omitempty"` + + // Holds the domain prefix if the user pool has a domain associated with it. + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // Configuration block for configuring email. Detailed below. + EmailConfiguration *EmailConfigurationObservation `json:"emailConfiguration,omitempty" tf:"email_configuration,omitempty"` + + // String representing the email verification message. Conflicts with verification_message_template configuration block email_message argument. + EmailVerificationMessage *string `json:"emailVerificationMessage,omitempty" tf:"email_verification_message,omitempty"` + + // String representing the email verification subject. Conflicts with verification_message_template configuration block email_subject argument. + EmailVerificationSubject *string `json:"emailVerificationSubject,omitempty" tf:"email_verification_subject,omitempty"` + + // Endpoint name of the user pool. Example format: cognito-idp.REGION.amazonaws.com/xxxx_yyyyy + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // A number estimating the size of the user pool. + EstimatedNumberOfUsers *float64 `json:"estimatedNumberOfUsers,omitempty" tf:"estimated_number_of_users,omitempty"` + + // ID of the user pool. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration block for the AWS Lambda triggers associated with the user pool. Detailed below. + LambdaConfig *LambdaConfigObservation `json:"lambdaConfig,omitempty" tf:"lambda_config,omitempty"` + + // Date the user pool was last modified. + LastModifiedDate *string `json:"lastModifiedDate,omitempty" tf:"last_modified_date,omitempty"` + + // Multi-Factor Authentication (MFA) configuration for the User Pool. Defaults of OFF. Valid values are OFF (MFA Tokens are not required), ON (MFA is required for all users to sign in; requires at least one of sms_configuration or software_token_mfa_configuration to be configured), or OPTIONAL (MFA Will be required only for individual users who have MFA Enabled; requires at least one of sms_configuration or software_token_mfa_configuration to be configured). + MfaConfiguration *string `json:"mfaConfiguration,omitempty" tf:"mfa_configuration,omitempty"` + + // Name of the user pool. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration block for information about the user pool password policy. Detailed below. + PasswordPolicy *PasswordPolicyObservation `json:"passwordPolicy,omitempty" tf:"password_policy,omitempty"` + + // String representing the SMS authentication message. The Message must contain the {####} placeholder, which will be replaced with the code. + SMSAuthenticationMessage *string `json:"smsAuthenticationMessage,omitempty" tf:"sms_authentication_message,omitempty"` + + // Configuration block for Short Message Service (SMS) settings. Detailed below. These settings apply to SMS user verification and SMS Multi-Factor Authentication (MFA). Due to Cognito API restrictions, the SMS configuration cannot be removed without recreating the Cognito User Pool. For user data safety, this resource will ignore the removal of this configuration by disabling drift detection. To force resource recreation after this configuration has been applied, see the taint command. + SMSConfiguration *SMSConfigurationObservation `json:"smsConfiguration,omitempty" tf:"sms_configuration,omitempty"` + + // String representing the SMS verification message. Conflicts with verification_message_template configuration block sms_message argument. + SMSVerificationMessage *string `json:"smsVerificationMessage,omitempty" tf:"sms_verification_message,omitempty"` + + // Configuration block for the schema attributes of a user pool. Detailed below. Schema attributes from the standard attribute set only need to be specified if they are different from the default configuration. Attributes can be added, but not modified or removed. Maximum of 50 attributes. + Schema []SchemaObservation `json:"schema,omitempty" tf:"schema,omitempty"` + + // Configuration block for software token Mult-Factor Authentication (MFA) settings. Detailed below. + SoftwareTokenMfaConfiguration *SoftwareTokenMfaConfigurationObservation `json:"softwareTokenMfaConfiguration,omitempty" tf:"software_token_mfa_configuration,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Configuration block for user attribute update settings. Detailed below. + UserAttributeUpdateSettings *UserAttributeUpdateSettingsObservation `json:"userAttributeUpdateSettings,omitempty" tf:"user_attribute_update_settings,omitempty"` + + // Configuration block for user pool add-ons to enable user pool advanced security mode features. Detailed below. + UserPoolAddOns *UserPoolAddOnsObservation `json:"userPoolAddOns,omitempty" tf:"user_pool_add_ons,omitempty"` + + // Whether email addresses or phone numbers can be specified as usernames when a user signs up. Conflicts with alias_attributes. + // +listType=set + UsernameAttributes []*string `json:"usernameAttributes,omitempty" tf:"username_attributes,omitempty"` + + // Configuration block for username configuration. Detailed below. + UsernameConfiguration *UsernameConfigurationObservation `json:"usernameConfiguration,omitempty" tf:"username_configuration,omitempty"` + + // Configuration block for verification message templates. Detailed below. + VerificationMessageTemplate *VerificationMessageTemplateObservation `json:"verificationMessageTemplate,omitempty" tf:"verification_message_template,omitempty"` +} + +type UserPoolParameters struct { + + // Configuration block to define which verified available method a user can use to recover their forgotten password. Detailed below. + // +kubebuilder:validation:Optional + AccountRecoverySetting *AccountRecoverySettingParameters `json:"accountRecoverySetting,omitempty" tf:"account_recovery_setting,omitempty"` + + // Configuration block for creating a new user profile. Detailed below. + // +kubebuilder:validation:Optional + AdminCreateUserConfig *AdminCreateUserConfigParameters `json:"adminCreateUserConfig,omitempty" tf:"admin_create_user_config,omitempty"` + + // Attributes supported as an alias for this user pool. Valid values: phone_number, email, or preferred_username. Conflicts with username_attributes. + // +kubebuilder:validation:Optional + // +listType=set + AliasAttributes []*string `json:"aliasAttributes,omitempty" tf:"alias_attributes,omitempty"` + + // Attributes to be auto-verified. Valid values: email, phone_number. + // +kubebuilder:validation:Optional + // +listType=set + AutoVerifiedAttributes []*string `json:"autoVerifiedAttributes,omitempty" tf:"auto_verified_attributes,omitempty"` + + // When active, DeletionProtection prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature. Valid values are ACTIVE and INACTIVE, Default value is INACTIVE. + // +kubebuilder:validation:Optional + DeletionProtection *string `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Configuration block for the user pool's device tracking. Detailed below. + // +kubebuilder:validation:Optional + DeviceConfiguration *DeviceConfigurationParameters `json:"deviceConfiguration,omitempty" tf:"device_configuration,omitempty"` + + // Configuration block for configuring email. Detailed below. + // +kubebuilder:validation:Optional + EmailConfiguration *EmailConfigurationParameters `json:"emailConfiguration,omitempty" tf:"email_configuration,omitempty"` + + // String representing the email verification message. Conflicts with verification_message_template configuration block email_message argument. + // +kubebuilder:validation:Optional + EmailVerificationMessage *string `json:"emailVerificationMessage,omitempty" tf:"email_verification_message,omitempty"` + + // String representing the email verification subject. Conflicts with verification_message_template configuration block email_subject argument. + // +kubebuilder:validation:Optional + EmailVerificationSubject *string `json:"emailVerificationSubject,omitempty" tf:"email_verification_subject,omitempty"` + + // Configuration block for the AWS Lambda triggers associated with the user pool. Detailed below. + // +kubebuilder:validation:Optional + LambdaConfig *LambdaConfigParameters `json:"lambdaConfig,omitempty" tf:"lambda_config,omitempty"` + + // Multi-Factor Authentication (MFA) configuration for the User Pool. Defaults of OFF. Valid values are OFF (MFA Tokens are not required), ON (MFA is required for all users to sign in; requires at least one of sms_configuration or software_token_mfa_configuration to be configured), or OPTIONAL (MFA Will be required only for individual users who have MFA Enabled; requires at least one of sms_configuration or software_token_mfa_configuration to be configured). + // +kubebuilder:validation:Optional + MfaConfiguration *string `json:"mfaConfiguration,omitempty" tf:"mfa_configuration,omitempty"` + + // Name of the user pool. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration block for information about the user pool password policy. Detailed below. + // +kubebuilder:validation:Optional + PasswordPolicy *PasswordPolicyParameters `json:"passwordPolicy,omitempty" tf:"password_policy,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // String representing the SMS authentication message. The Message must contain the {####} placeholder, which will be replaced with the code. + // +kubebuilder:validation:Optional + SMSAuthenticationMessage *string `json:"smsAuthenticationMessage,omitempty" tf:"sms_authentication_message,omitempty"` + + // Configuration block for Short Message Service (SMS) settings. Detailed below. These settings apply to SMS user verification and SMS Multi-Factor Authentication (MFA). Due to Cognito API restrictions, the SMS configuration cannot be removed without recreating the Cognito User Pool. For user data safety, this resource will ignore the removal of this configuration by disabling drift detection. To force resource recreation after this configuration has been applied, see the taint command. + // +kubebuilder:validation:Optional + SMSConfiguration *SMSConfigurationParameters `json:"smsConfiguration,omitempty" tf:"sms_configuration,omitempty"` + + // String representing the SMS verification message. Conflicts with verification_message_template configuration block sms_message argument. + // +kubebuilder:validation:Optional + SMSVerificationMessage *string `json:"smsVerificationMessage,omitempty" tf:"sms_verification_message,omitempty"` + + // Configuration block for the schema attributes of a user pool. Detailed below. Schema attributes from the standard attribute set only need to be specified if they are different from the default configuration. Attributes can be added, but not modified or removed. Maximum of 50 attributes. + // +kubebuilder:validation:Optional + Schema []SchemaParameters `json:"schema,omitempty" tf:"schema,omitempty"` + + // Configuration block for software token Mult-Factor Authentication (MFA) settings. Detailed below. + // +kubebuilder:validation:Optional + SoftwareTokenMfaConfiguration *SoftwareTokenMfaConfigurationParameters `json:"softwareTokenMfaConfiguration,omitempty" tf:"software_token_mfa_configuration,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for user attribute update settings. Detailed below. + // +kubebuilder:validation:Optional + UserAttributeUpdateSettings *UserAttributeUpdateSettingsParameters `json:"userAttributeUpdateSettings,omitempty" tf:"user_attribute_update_settings,omitempty"` + + // Configuration block for user pool add-ons to enable user pool advanced security mode features. Detailed below. + // +kubebuilder:validation:Optional + UserPoolAddOns *UserPoolAddOnsParameters `json:"userPoolAddOns,omitempty" tf:"user_pool_add_ons,omitempty"` + + // Whether email addresses or phone numbers can be specified as usernames when a user signs up. Conflicts with alias_attributes. + // +kubebuilder:validation:Optional + // +listType=set + UsernameAttributes []*string `json:"usernameAttributes,omitempty" tf:"username_attributes,omitempty"` + + // Configuration block for username configuration. Detailed below. + // +kubebuilder:validation:Optional + UsernameConfiguration *UsernameConfigurationParameters `json:"usernameConfiguration,omitempty" tf:"username_configuration,omitempty"` + + // Configuration block for verification message templates. Detailed below. + // +kubebuilder:validation:Optional + VerificationMessageTemplate *VerificationMessageTemplateParameters `json:"verificationMessageTemplate,omitempty" tf:"verification_message_template,omitempty"` +} + +type UsernameConfigurationInitParameters struct { + + // Whether username case sensitivity will be applied for all users in the user pool through Cognito APIs. + CaseSensitive *bool `json:"caseSensitive,omitempty" tf:"case_sensitive,omitempty"` +} + +type UsernameConfigurationObservation struct { + + // Whether username case sensitivity will be applied for all users in the user pool through Cognito APIs. + CaseSensitive *bool `json:"caseSensitive,omitempty" tf:"case_sensitive,omitempty"` +} + +type UsernameConfigurationParameters struct { + + // Whether username case sensitivity will be applied for all users in the user pool through Cognito APIs. + // +kubebuilder:validation:Optional + CaseSensitive *bool `json:"caseSensitive" tf:"case_sensitive,omitempty"` +} + +type VerificationMessageTemplateInitParameters struct { + + // Default email option. Must be either CONFIRM_WITH_CODE or CONFIRM_WITH_LINK. Defaults to CONFIRM_WITH_CODE. + DefaultEmailOption *string `json:"defaultEmailOption,omitempty" tf:"default_email_option,omitempty"` + + // Email message template. Must contain the {####} placeholder. Conflicts with email_verification_message argument. + EmailMessage *string `json:"emailMessage,omitempty" tf:"email_message,omitempty"` + + // Email message template for sending a confirmation link to the user, it must contain the {##Click Here##} placeholder. + EmailMessageByLink *string `json:"emailMessageByLink,omitempty" tf:"email_message_by_link,omitempty"` + + // Subject line for the email message template. Conflicts with email_verification_subject argument. + EmailSubject *string `json:"emailSubject,omitempty" tf:"email_subject,omitempty"` + + // Subject line for the email message template for sending a confirmation link to the user. + EmailSubjectByLink *string `json:"emailSubjectByLink,omitempty" tf:"email_subject_by_link,omitempty"` + + // SMS message template. Must contain the {####} placeholder. Conflicts with sms_verification_message argument. + SMSMessage *string `json:"smsMessage,omitempty" tf:"sms_message,omitempty"` +} + +type VerificationMessageTemplateObservation struct { + + // Default email option. Must be either CONFIRM_WITH_CODE or CONFIRM_WITH_LINK. Defaults to CONFIRM_WITH_CODE. + DefaultEmailOption *string `json:"defaultEmailOption,omitempty" tf:"default_email_option,omitempty"` + + // Email message template. Must contain the {####} placeholder. Conflicts with email_verification_message argument. + EmailMessage *string `json:"emailMessage,omitempty" tf:"email_message,omitempty"` + + // Email message template for sending a confirmation link to the user, it must contain the {##Click Here##} placeholder. + EmailMessageByLink *string `json:"emailMessageByLink,omitempty" tf:"email_message_by_link,omitempty"` + + // Subject line for the email message template. Conflicts with email_verification_subject argument. + EmailSubject *string `json:"emailSubject,omitempty" tf:"email_subject,omitempty"` + + // Subject line for the email message template for sending a confirmation link to the user. + EmailSubjectByLink *string `json:"emailSubjectByLink,omitempty" tf:"email_subject_by_link,omitempty"` + + // SMS message template. Must contain the {####} placeholder. Conflicts with sms_verification_message argument. + SMSMessage *string `json:"smsMessage,omitempty" tf:"sms_message,omitempty"` +} + +type VerificationMessageTemplateParameters struct { + + // Default email option. Must be either CONFIRM_WITH_CODE or CONFIRM_WITH_LINK. Defaults to CONFIRM_WITH_CODE. + // +kubebuilder:validation:Optional + DefaultEmailOption *string `json:"defaultEmailOption,omitempty" tf:"default_email_option,omitempty"` + + // Email message template. Must contain the {####} placeholder. Conflicts with email_verification_message argument. + // +kubebuilder:validation:Optional + EmailMessage *string `json:"emailMessage,omitempty" tf:"email_message,omitempty"` + + // Email message template for sending a confirmation link to the user, it must contain the {##Click Here##} placeholder. + // +kubebuilder:validation:Optional + EmailMessageByLink *string `json:"emailMessageByLink,omitempty" tf:"email_message_by_link,omitempty"` + + // Subject line for the email message template. Conflicts with email_verification_subject argument. + // +kubebuilder:validation:Optional + EmailSubject *string `json:"emailSubject,omitempty" tf:"email_subject,omitempty"` + + // Subject line for the email message template for sending a confirmation link to the user. + // +kubebuilder:validation:Optional + EmailSubjectByLink *string `json:"emailSubjectByLink,omitempty" tf:"email_subject_by_link,omitempty"` + + // SMS message template. Must contain the {####} placeholder. Conflicts with sms_verification_message argument. + // +kubebuilder:validation:Optional + SMSMessage *string `json:"smsMessage,omitempty" tf:"sms_message,omitempty"` +} + +// UserPoolSpec defines the desired state of UserPool +type UserPoolSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider UserPoolParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider UserPoolInitParameters `json:"initProvider,omitempty"` +} + +// UserPoolStatus defines the observed state of UserPool. +type UserPoolStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider UserPoolObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// UserPool is the Schema for the UserPools API. Provides a Cognito User Pool resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type UserPool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec UserPoolSpec `json:"spec"` + Status UserPoolStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// UserPoolList contains a list of UserPools +type UserPoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []UserPool `json:"items"` +} + +// Repository type metadata. +var ( + UserPool_Kind = "UserPool" + UserPool_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: UserPool_Kind}.String() + UserPool_KindAPIVersion = UserPool_Kind + "." + CRDGroupVersion.String() + UserPool_GroupVersionKind = CRDGroupVersion.WithKind(UserPool_Kind) +) + +func init() { + SchemeBuilder.Register(&UserPool{}, &UserPoolList{}) +} diff --git a/apis/configservice/v1beta1/zz_generated.conversion_hubs.go b/apis/configservice/v1beta1/zz_generated.conversion_hubs.go index c628540d52..117e1abd6d 100755 --- a/apis/configservice/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/configservice/v1beta1/zz_generated.conversion_hubs.go @@ -9,20 +9,5 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *AWSConfigurationRecorderStatus) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ConfigRule) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ConfigurationAggregator) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ConfigurationRecorder) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ConformancePack) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *DeliveryChannel) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *RemediationConfiguration) Hub() {} diff --git a/apis/configservice/v1beta1/zz_generated.conversion_spokes.go b/apis/configservice/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..b1e2d70bdb --- /dev/null +++ b/apis/configservice/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,114 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ConfigRule to the hub type. +func (tr *ConfigRule) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ConfigRule type. +func (tr *ConfigRule) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ConfigurationAggregator to the hub type. +func (tr *ConfigurationAggregator) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ConfigurationAggregator type. +func (tr *ConfigurationAggregator) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ConfigurationRecorder to the hub type. +func (tr *ConfigurationRecorder) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ConfigurationRecorder type. +func (tr *ConfigurationRecorder) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DeliveryChannel to the hub type. +func (tr *DeliveryChannel) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DeliveryChannel type. +func (tr *DeliveryChannel) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this RemediationConfiguration to the hub type. +func (tr *RemediationConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the RemediationConfiguration type. +func (tr *RemediationConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/configservice/v1beta2/zz_configrule_terraformed.go b/apis/configservice/v1beta2/zz_configrule_terraformed.go new file mode 100755 index 0000000000..631fa87ffd --- /dev/null +++ b/apis/configservice/v1beta2/zz_configrule_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ConfigRule +func (mg *ConfigRule) GetTerraformResourceType() string { + return "aws_config_config_rule" +} + +// GetConnectionDetailsMapping for this ConfigRule +func (tr *ConfigRule) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ConfigRule +func (tr *ConfigRule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ConfigRule +func (tr *ConfigRule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ConfigRule +func (tr *ConfigRule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ConfigRule +func (tr *ConfigRule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ConfigRule +func (tr *ConfigRule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ConfigRule +func (tr *ConfigRule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ConfigRule +func (tr *ConfigRule) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ConfigRule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ConfigRule) LateInitialize(attrs []byte) (bool, error) { + params := &ConfigRuleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ConfigRule) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/configservice/v1beta2/zz_configrule_types.go b/apis/configservice/v1beta2/zz_configrule_types.go new file mode 100755 index 0000000000..c8e18f8774 --- /dev/null +++ b/apis/configservice/v1beta2/zz_configrule_types.go @@ -0,0 +1,392 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConfigRuleInitParameters struct { + + // Description of the rule + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The modes the Config rule can be evaluated in. See Evaluation Mode for more details. + EvaluationMode []EvaluationModeInitParameters `json:"evaluationMode,omitempty" tf:"evaluation_mode,omitempty"` + + // A string in JSON format that is passed to the AWS Config rule Lambda function. + InputParameters *string `json:"inputParameters,omitempty" tf:"input_parameters,omitempty"` + + // The maximum frequency with which AWS Config runs evaluations for a rule. + MaximumExecutionFrequency *string `json:"maximumExecutionFrequency,omitempty" tf:"maximum_execution_frequency,omitempty"` + + // Scope defines which resources can trigger an evaluation for the rule. See Scope Below. + Scope *ScopeInitParameters `json:"scope,omitempty" tf:"scope,omitempty"` + + // Source specifies the rule owner, the rule identifier, and the notifications that cause the function to evaluate your AWS resources. See Source Below. + Source *SourceInitParameters `json:"source,omitempty" tf:"source,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ConfigRuleObservation struct { + + // The ARN of the config rule + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Description of the rule + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The modes the Config rule can be evaluated in. See Evaluation Mode for more details. + EvaluationMode []EvaluationModeObservation `json:"evaluationMode,omitempty" tf:"evaluation_mode,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A string in JSON format that is passed to the AWS Config rule Lambda function. + InputParameters *string `json:"inputParameters,omitempty" tf:"input_parameters,omitempty"` + + // The maximum frequency with which AWS Config runs evaluations for a rule. + MaximumExecutionFrequency *string `json:"maximumExecutionFrequency,omitempty" tf:"maximum_execution_frequency,omitempty"` + + // The ID of the config rule + RuleID *string `json:"ruleId,omitempty" tf:"rule_id,omitempty"` + + // Scope defines which resources can trigger an evaluation for the rule. See Scope Below. + Scope *ScopeObservation `json:"scope,omitempty" tf:"scope,omitempty"` + + // Source specifies the rule owner, the rule identifier, and the notifications that cause the function to evaluate your AWS resources. See Source Below. + Source *SourceObservation `json:"source,omitempty" tf:"source,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type ConfigRuleParameters struct { + + // Description of the rule + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The modes the Config rule can be evaluated in. See Evaluation Mode for more details. + // +kubebuilder:validation:Optional + EvaluationMode []EvaluationModeParameters `json:"evaluationMode,omitempty" tf:"evaluation_mode,omitempty"` + + // A string in JSON format that is passed to the AWS Config rule Lambda function. + // +kubebuilder:validation:Optional + InputParameters *string `json:"inputParameters,omitempty" tf:"input_parameters,omitempty"` + + // The maximum frequency with which AWS Config runs evaluations for a rule. + // +kubebuilder:validation:Optional + MaximumExecutionFrequency *string `json:"maximumExecutionFrequency,omitempty" tf:"maximum_execution_frequency,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Scope defines which resources can trigger an evaluation for the rule. See Scope Below. + // +kubebuilder:validation:Optional + Scope *ScopeParameters `json:"scope,omitempty" tf:"scope,omitempty"` + + // Source specifies the rule owner, the rule identifier, and the notifications that cause the function to evaluate your AWS resources. See Source Below. + // +kubebuilder:validation:Optional + Source *SourceParameters `json:"source,omitempty" tf:"source,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type CustomPolicyDetailsInitParameters struct { + + // The boolean expression for enabling debug logging for your Config Custom Policy rule. The default value is false. + EnableDebugLogDelivery *bool `json:"enableDebugLogDelivery,omitempty" tf:"enable_debug_log_delivery,omitempty"` + + // The runtime system for your Config Custom Policy rule. Guard is a policy-as-code language that allows you to write policies that are enforced by Config Custom Policy rules. For more information about Guard, see the Guard GitHub Repository. + PolicyRuntime *string `json:"policyRuntime,omitempty" tf:"policy_runtime,omitempty"` + + // The policy definition containing the logic for your Config Custom Policy rule. + PolicyText *string `json:"policyText,omitempty" tf:"policy_text,omitempty"` +} + +type CustomPolicyDetailsObservation struct { + + // The boolean expression for enabling debug logging for your Config Custom Policy rule. The default value is false. + EnableDebugLogDelivery *bool `json:"enableDebugLogDelivery,omitempty" tf:"enable_debug_log_delivery,omitempty"` + + // The runtime system for your Config Custom Policy rule. Guard is a policy-as-code language that allows you to write policies that are enforced by Config Custom Policy rules. For more information about Guard, see the Guard GitHub Repository. + PolicyRuntime *string `json:"policyRuntime,omitempty" tf:"policy_runtime,omitempty"` + + // The policy definition containing the logic for your Config Custom Policy rule. + PolicyText *string `json:"policyText,omitempty" tf:"policy_text,omitempty"` +} + +type CustomPolicyDetailsParameters struct { + + // The boolean expression for enabling debug logging for your Config Custom Policy rule. The default value is false. + // +kubebuilder:validation:Optional + EnableDebugLogDelivery *bool `json:"enableDebugLogDelivery,omitempty" tf:"enable_debug_log_delivery,omitempty"` + + // The runtime system for your Config Custom Policy rule. Guard is a policy-as-code language that allows you to write policies that are enforced by Config Custom Policy rules. For more information about Guard, see the Guard GitHub Repository. + // +kubebuilder:validation:Optional + PolicyRuntime *string `json:"policyRuntime" tf:"policy_runtime,omitempty"` + + // The policy definition containing the logic for your Config Custom Policy rule. + // +kubebuilder:validation:Optional + PolicyText *string `json:"policyText" tf:"policy_text,omitempty"` +} + +type EvaluationModeInitParameters struct { + + // The mode of an evaluation. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type EvaluationModeObservation struct { + + // The mode of an evaluation. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type EvaluationModeParameters struct { + + // The mode of an evaluation. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type ScopeInitParameters struct { + + // The IDs of the only AWS resource that you want to trigger an evaluation for the rule. If you specify a resource ID, you must specify one resource type for compliance_resource_types. + ComplianceResourceID *string `json:"complianceResourceId,omitempty" tf:"compliance_resource_id,omitempty"` + + // A list of resource types of only those AWS resources that you want to trigger an evaluation for the ruleE.g., AWS::EC2::Instance. You can only specify one type if you also specify a resource ID for compliance_resource_id. See relevant part of AWS Docs for available types. + // +listType=set + ComplianceResourceTypes []*string `json:"complianceResourceTypes,omitempty" tf:"compliance_resource_types,omitempty"` + + // The tag key that is applied to only those AWS resources that you want you want to trigger an evaluation for the rule. + TagKey *string `json:"tagKey,omitempty" tf:"tag_key,omitempty"` + + // The tag value applied to only those AWS resources that you want to trigger an evaluation for the rule. + TagValue *string `json:"tagValue,omitempty" tf:"tag_value,omitempty"` +} + +type ScopeObservation struct { + + // The IDs of the only AWS resource that you want to trigger an evaluation for the rule. If you specify a resource ID, you must specify one resource type for compliance_resource_types. + ComplianceResourceID *string `json:"complianceResourceId,omitempty" tf:"compliance_resource_id,omitempty"` + + // A list of resource types of only those AWS resources that you want to trigger an evaluation for the ruleE.g., AWS::EC2::Instance. You can only specify one type if you also specify a resource ID for compliance_resource_id. See relevant part of AWS Docs for available types. + // +listType=set + ComplianceResourceTypes []*string `json:"complianceResourceTypes,omitempty" tf:"compliance_resource_types,omitempty"` + + // The tag key that is applied to only those AWS resources that you want you want to trigger an evaluation for the rule. + TagKey *string `json:"tagKey,omitempty" tf:"tag_key,omitempty"` + + // The tag value applied to only those AWS resources that you want to trigger an evaluation for the rule. + TagValue *string `json:"tagValue,omitempty" tf:"tag_value,omitempty"` +} + +type ScopeParameters struct { + + // The IDs of the only AWS resource that you want to trigger an evaluation for the rule. If you specify a resource ID, you must specify one resource type for compliance_resource_types. + // +kubebuilder:validation:Optional + ComplianceResourceID *string `json:"complianceResourceId,omitempty" tf:"compliance_resource_id,omitempty"` + + // A list of resource types of only those AWS resources that you want to trigger an evaluation for the ruleE.g., AWS::EC2::Instance. You can only specify one type if you also specify a resource ID for compliance_resource_id. See relevant part of AWS Docs for available types. + // +kubebuilder:validation:Optional + // +listType=set + ComplianceResourceTypes []*string `json:"complianceResourceTypes,omitempty" tf:"compliance_resource_types,omitempty"` + + // The tag key that is applied to only those AWS resources that you want you want to trigger an evaluation for the rule. + // +kubebuilder:validation:Optional + TagKey *string `json:"tagKey,omitempty" tf:"tag_key,omitempty"` + + // The tag value applied to only those AWS resources that you want to trigger an evaluation for the rule. + // +kubebuilder:validation:Optional + TagValue *string `json:"tagValue,omitempty" tf:"tag_value,omitempty"` +} + +type SourceDetailInitParameters struct { + + // The source of the event, such as an AWS service, that triggers AWS Config to evaluate your AWSresources. This defaults to aws.config and is the only valid value. + EventSource *string `json:"eventSource,omitempty" tf:"event_source,omitempty"` + + // The maximum frequency with which AWS Config runs evaluations for a rule. + MaximumExecutionFrequency *string `json:"maximumExecutionFrequency,omitempty" tf:"maximum_execution_frequency,omitempty"` + + // The type of notification that triggers AWS Config to run an evaluation for a rule. You canspecify the following notification types: + MessageType *string `json:"messageType,omitempty" tf:"message_type,omitempty"` +} + +type SourceDetailObservation struct { + + // The source of the event, such as an AWS service, that triggers AWS Config to evaluate your AWSresources. This defaults to aws.config and is the only valid value. + EventSource *string `json:"eventSource,omitempty" tf:"event_source,omitempty"` + + // The maximum frequency with which AWS Config runs evaluations for a rule. + MaximumExecutionFrequency *string `json:"maximumExecutionFrequency,omitempty" tf:"maximum_execution_frequency,omitempty"` + + // The type of notification that triggers AWS Config to run an evaluation for a rule. You canspecify the following notification types: + MessageType *string `json:"messageType,omitempty" tf:"message_type,omitempty"` +} + +type SourceDetailParameters struct { + + // The source of the event, such as an AWS service, that triggers AWS Config to evaluate your AWSresources. This defaults to aws.config and is the only valid value. + // +kubebuilder:validation:Optional + EventSource *string `json:"eventSource,omitempty" tf:"event_source,omitempty"` + + // The maximum frequency with which AWS Config runs evaluations for a rule. + // +kubebuilder:validation:Optional + MaximumExecutionFrequency *string `json:"maximumExecutionFrequency,omitempty" tf:"maximum_execution_frequency,omitempty"` + + // The type of notification that triggers AWS Config to run an evaluation for a rule. You canspecify the following notification types: + // +kubebuilder:validation:Optional + MessageType *string `json:"messageType,omitempty" tf:"message_type,omitempty"` +} + +type SourceInitParameters struct { + + // Provides the runtime system, policy definition, and whether debug logging is enabled. Required when owner is set to CUSTOM_POLICY. See Custom Policy Details Below. + CustomPolicyDetails *CustomPolicyDetailsInitParameters `json:"customPolicyDetails,omitempty" tf:"custom_policy_details,omitempty"` + + // Indicates whether AWS or the customer owns and manages the AWS Config rule. Valid values are AWS, CUSTOM_LAMBDA or CUSTOM_POLICY. For more information about managed rules, see the AWS Config Managed Rules documentation. For more information about custom rules, see the AWS Config Custom Rules documentation. Custom Lambda Functions require permissions to allow the AWS Config service to invoke them, e.g., via the aws_lambda_permission resource. + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + // Provides the source and type of the event that causes AWS Config to evaluate your AWS resources. Only valid if owner is CUSTOM_LAMBDA or CUSTOM_POLICY. See Source Detail Below. + SourceDetail []SourceDetailInitParameters `json:"sourceDetail,omitempty" tf:"source_detail,omitempty"` + + // For AWS Config managed rules, a predefined identifier, e.g IAM_PASSWORD_POLICY. For custom Lambda rules, the identifier is the ARN of the Lambda Function, such as arn:aws:lambda:us-east-1:123456789012:function:custom_rule_name or the arn attribute of the aws_lambda_function resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + SourceIdentifier *string `json:"sourceIdentifier,omitempty" tf:"source_identifier,omitempty"` + + // Reference to a Function in lambda to populate sourceIdentifier. + // +kubebuilder:validation:Optional + SourceIdentifierRef *v1.Reference `json:"sourceIdentifierRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate sourceIdentifier. + // +kubebuilder:validation:Optional + SourceIdentifierSelector *v1.Selector `json:"sourceIdentifierSelector,omitempty" tf:"-"` +} + +type SourceObservation struct { + + // Provides the runtime system, policy definition, and whether debug logging is enabled. Required when owner is set to CUSTOM_POLICY. See Custom Policy Details Below. + CustomPolicyDetails *CustomPolicyDetailsObservation `json:"customPolicyDetails,omitempty" tf:"custom_policy_details,omitempty"` + + // Indicates whether AWS or the customer owns and manages the AWS Config rule. Valid values are AWS, CUSTOM_LAMBDA or CUSTOM_POLICY. For more information about managed rules, see the AWS Config Managed Rules documentation. For more information about custom rules, see the AWS Config Custom Rules documentation. Custom Lambda Functions require permissions to allow the AWS Config service to invoke them, e.g., via the aws_lambda_permission resource. + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + // Provides the source and type of the event that causes AWS Config to evaluate your AWS resources. Only valid if owner is CUSTOM_LAMBDA or CUSTOM_POLICY. See Source Detail Below. + SourceDetail []SourceDetailObservation `json:"sourceDetail,omitempty" tf:"source_detail,omitempty"` + + // For AWS Config managed rules, a predefined identifier, e.g IAM_PASSWORD_POLICY. For custom Lambda rules, the identifier is the ARN of the Lambda Function, such as arn:aws:lambda:us-east-1:123456789012:function:custom_rule_name or the arn attribute of the aws_lambda_function resource. + SourceIdentifier *string `json:"sourceIdentifier,omitempty" tf:"source_identifier,omitempty"` +} + +type SourceParameters struct { + + // Provides the runtime system, policy definition, and whether debug logging is enabled. Required when owner is set to CUSTOM_POLICY. See Custom Policy Details Below. + // +kubebuilder:validation:Optional + CustomPolicyDetails *CustomPolicyDetailsParameters `json:"customPolicyDetails,omitempty" tf:"custom_policy_details,omitempty"` + + // Indicates whether AWS or the customer owns and manages the AWS Config rule. Valid values are AWS, CUSTOM_LAMBDA or CUSTOM_POLICY. For more information about managed rules, see the AWS Config Managed Rules documentation. For more information about custom rules, see the AWS Config Custom Rules documentation. Custom Lambda Functions require permissions to allow the AWS Config service to invoke them, e.g., via the aws_lambda_permission resource. + // +kubebuilder:validation:Optional + Owner *string `json:"owner" tf:"owner,omitempty"` + + // Provides the source and type of the event that causes AWS Config to evaluate your AWS resources. Only valid if owner is CUSTOM_LAMBDA or CUSTOM_POLICY. See Source Detail Below. + // +kubebuilder:validation:Optional + SourceDetail []SourceDetailParameters `json:"sourceDetail,omitempty" tf:"source_detail,omitempty"` + + // For AWS Config managed rules, a predefined identifier, e.g IAM_PASSWORD_POLICY. For custom Lambda rules, the identifier is the ARN of the Lambda Function, such as arn:aws:lambda:us-east-1:123456789012:function:custom_rule_name or the arn attribute of the aws_lambda_function resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + SourceIdentifier *string `json:"sourceIdentifier,omitempty" tf:"source_identifier,omitempty"` + + // Reference to a Function in lambda to populate sourceIdentifier. + // +kubebuilder:validation:Optional + SourceIdentifierRef *v1.Reference `json:"sourceIdentifierRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate sourceIdentifier. + // +kubebuilder:validation:Optional + SourceIdentifierSelector *v1.Selector `json:"sourceIdentifierSelector,omitempty" tf:"-"` +} + +// ConfigRuleSpec defines the desired state of ConfigRule +type ConfigRuleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ConfigRuleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ConfigRuleInitParameters `json:"initProvider,omitempty"` +} + +// ConfigRuleStatus defines the observed state of ConfigRule. +type ConfigRuleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ConfigRuleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ConfigRule is the Schema for the ConfigRules API. Provides an AWS Config Rule. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ConfigRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.source) || (has(self.initProvider) && has(self.initProvider.source))",message="spec.forProvider.source is a required parameter" + Spec ConfigRuleSpec `json:"spec"` + Status ConfigRuleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConfigRuleList contains a list of ConfigRules +type ConfigRuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ConfigRule `json:"items"` +} + +// Repository type metadata. +var ( + ConfigRule_Kind = "ConfigRule" + ConfigRule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ConfigRule_Kind}.String() + ConfigRule_KindAPIVersion = ConfigRule_Kind + "." + CRDGroupVersion.String() + ConfigRule_GroupVersionKind = CRDGroupVersion.WithKind(ConfigRule_Kind) +) + +func init() { + SchemeBuilder.Register(&ConfigRule{}, &ConfigRuleList{}) +} diff --git a/apis/configservice/v1beta2/zz_configurationaggregator_terraformed.go b/apis/configservice/v1beta2/zz_configurationaggregator_terraformed.go new file mode 100755 index 0000000000..a09eca5441 --- /dev/null +++ b/apis/configservice/v1beta2/zz_configurationaggregator_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ConfigurationAggregator +func (mg *ConfigurationAggregator) GetTerraformResourceType() string { + return "aws_config_configuration_aggregator" +} + +// GetConnectionDetailsMapping for this ConfigurationAggregator +func (tr *ConfigurationAggregator) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ConfigurationAggregator +func (tr *ConfigurationAggregator) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ConfigurationAggregator +func (tr *ConfigurationAggregator) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ConfigurationAggregator +func (tr *ConfigurationAggregator) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ConfigurationAggregator +func (tr *ConfigurationAggregator) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ConfigurationAggregator +func (tr *ConfigurationAggregator) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ConfigurationAggregator +func (tr *ConfigurationAggregator) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ConfigurationAggregator +func (tr *ConfigurationAggregator) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ConfigurationAggregator using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ConfigurationAggregator) LateInitialize(attrs []byte) (bool, error) { + params := &ConfigurationAggregatorParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ConfigurationAggregator) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/configservice/v1beta2/zz_configurationaggregator_types.go b/apis/configservice/v1beta2/zz_configurationaggregator_types.go new file mode 100755 index 0000000000..cd20584796 --- /dev/null +++ b/apis/configservice/v1beta2/zz_configurationaggregator_types.go @@ -0,0 +1,228 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccountAggregationSourceInitParameters struct { + + // List of 12-digit account IDs of the account(s) being aggregated. + AccountIds []*string `json:"accountIds,omitempty" tf:"account_ids,omitempty"` + + // If true, aggregate existing AWS Config regions and future regions. + AllRegions *bool `json:"allRegions,omitempty" tf:"all_regions,omitempty"` + + // List of source regions being aggregated. + Regions []*string `json:"regions,omitempty" tf:"regions,omitempty"` +} + +type AccountAggregationSourceObservation struct { + + // List of 12-digit account IDs of the account(s) being aggregated. + AccountIds []*string `json:"accountIds,omitempty" tf:"account_ids,omitempty"` + + // If true, aggregate existing AWS Config regions and future regions. + AllRegions *bool `json:"allRegions,omitempty" tf:"all_regions,omitempty"` + + // List of source regions being aggregated. + Regions []*string `json:"regions,omitempty" tf:"regions,omitempty"` +} + +type AccountAggregationSourceParameters struct { + + // List of 12-digit account IDs of the account(s) being aggregated. + // +kubebuilder:validation:Optional + AccountIds []*string `json:"accountIds" tf:"account_ids,omitempty"` + + // If true, aggregate existing AWS Config regions and future regions. + // +kubebuilder:validation:Optional + AllRegions *bool `json:"allRegions,omitempty" tf:"all_regions,omitempty"` + + // List of source regions being aggregated. + // +kubebuilder:validation:Optional + Regions []*string `json:"regions,omitempty" tf:"regions,omitempty"` +} + +type ConfigurationAggregatorInitParameters struct { + + // The account(s) to aggregate config data from as documented below. + AccountAggregationSource *AccountAggregationSourceInitParameters `json:"accountAggregationSource,omitempty" tf:"account_aggregation_source,omitempty"` + + // The organization to aggregate config data from as documented below. + OrganizationAggregationSource *OrganizationAggregationSourceInitParameters `json:"organizationAggregationSource,omitempty" tf:"organization_aggregation_source,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ConfigurationAggregatorObservation struct { + + // The account(s) to aggregate config data from as documented below. + AccountAggregationSource *AccountAggregationSourceObservation `json:"accountAggregationSource,omitempty" tf:"account_aggregation_source,omitempty"` + + // The ARN of the aggregator + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The organization to aggregate config data from as documented below. + OrganizationAggregationSource *OrganizationAggregationSourceObservation `json:"organizationAggregationSource,omitempty" tf:"organization_aggregation_source,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type ConfigurationAggregatorParameters struct { + + // The account(s) to aggregate config data from as documented below. + // +kubebuilder:validation:Optional + AccountAggregationSource *AccountAggregationSourceParameters `json:"accountAggregationSource,omitempty" tf:"account_aggregation_source,omitempty"` + + // The organization to aggregate config data from as documented below. + // +kubebuilder:validation:Optional + OrganizationAggregationSource *OrganizationAggregationSourceParameters `json:"organizationAggregationSource,omitempty" tf:"organization_aggregation_source,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type OrganizationAggregationSourceInitParameters struct { + + // If true, aggregate existing AWS Config regions and future regions. + AllRegions *bool `json:"allRegions,omitempty" tf:"all_regions,omitempty"` + + // List of source regions being aggregated. + Regions []*string `json:"regions,omitempty" tf:"regions,omitempty"` + + // ARN of the IAM role used to retrieve AWS Organization details associated with the aggregator account. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type OrganizationAggregationSourceObservation struct { + + // If true, aggregate existing AWS Config regions and future regions. + AllRegions *bool `json:"allRegions,omitempty" tf:"all_regions,omitempty"` + + // List of source regions being aggregated. + Regions []*string `json:"regions,omitempty" tf:"regions,omitempty"` + + // ARN of the IAM role used to retrieve AWS Organization details associated with the aggregator account. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type OrganizationAggregationSourceParameters struct { + + // If true, aggregate existing AWS Config regions and future regions. + // +kubebuilder:validation:Optional + AllRegions *bool `json:"allRegions,omitempty" tf:"all_regions,omitempty"` + + // List of source regions being aggregated. + // +kubebuilder:validation:Optional + Regions []*string `json:"regions,omitempty" tf:"regions,omitempty"` + + // ARN of the IAM role used to retrieve AWS Organization details associated with the aggregator account. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +// ConfigurationAggregatorSpec defines the desired state of ConfigurationAggregator +type ConfigurationAggregatorSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ConfigurationAggregatorParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ConfigurationAggregatorInitParameters `json:"initProvider,omitempty"` +} + +// ConfigurationAggregatorStatus defines the observed state of ConfigurationAggregator. +type ConfigurationAggregatorStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ConfigurationAggregatorObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ConfigurationAggregator is the Schema for the ConfigurationAggregators API. Manages an AWS Config Configuration Aggregator. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ConfigurationAggregator struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ConfigurationAggregatorSpec `json:"spec"` + Status ConfigurationAggregatorStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConfigurationAggregatorList contains a list of ConfigurationAggregators +type ConfigurationAggregatorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ConfigurationAggregator `json:"items"` +} + +// Repository type metadata. +var ( + ConfigurationAggregator_Kind = "ConfigurationAggregator" + ConfigurationAggregator_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ConfigurationAggregator_Kind}.String() + ConfigurationAggregator_KindAPIVersion = ConfigurationAggregator_Kind + "." + CRDGroupVersion.String() + ConfigurationAggregator_GroupVersionKind = CRDGroupVersion.WithKind(ConfigurationAggregator_Kind) +) + +func init() { + SchemeBuilder.Register(&ConfigurationAggregator{}, &ConfigurationAggregatorList{}) +} diff --git a/apis/configservice/v1beta2/zz_configurationrecorder_terraformed.go b/apis/configservice/v1beta2/zz_configurationrecorder_terraformed.go new file mode 100755 index 0000000000..3db7d13671 --- /dev/null +++ b/apis/configservice/v1beta2/zz_configurationrecorder_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ConfigurationRecorder +func (mg *ConfigurationRecorder) GetTerraformResourceType() string { + return "aws_config_configuration_recorder" +} + +// GetConnectionDetailsMapping for this ConfigurationRecorder +func (tr *ConfigurationRecorder) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ConfigurationRecorder +func (tr *ConfigurationRecorder) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ConfigurationRecorder +func (tr *ConfigurationRecorder) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ConfigurationRecorder +func (tr *ConfigurationRecorder) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ConfigurationRecorder +func (tr *ConfigurationRecorder) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ConfigurationRecorder +func (tr *ConfigurationRecorder) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ConfigurationRecorder +func (tr *ConfigurationRecorder) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ConfigurationRecorder +func (tr *ConfigurationRecorder) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ConfigurationRecorder using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ConfigurationRecorder) LateInitialize(attrs []byte) (bool, error) { + params := &ConfigurationRecorderParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ConfigurationRecorder) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/configservice/v1beta2/zz_configurationrecorder_types.go b/apis/configservice/v1beta2/zz_configurationrecorder_types.go new file mode 100755 index 0000000000..4a95b22045 --- /dev/null +++ b/apis/configservice/v1beta2/zz_configurationrecorder_types.go @@ -0,0 +1,310 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConfigurationRecorderInitParameters struct { + + // Recording group - see below. + RecordingGroup *RecordingGroupInitParameters `json:"recordingGroup,omitempty" tf:"recording_group,omitempty"` + + // Recording mode - see below. + RecordingMode *RecordingModeInitParameters `json:"recordingMode,omitempty" tf:"recording_mode,omitempty"` + + // Amazon Resource Name (ARN) of the IAM role. Used to make read or write requests to the delivery channel and to describe the AWS resources associated with the account. See AWS Docs for more details. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type ConfigurationRecorderObservation struct { + + // Name of the recorder + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Recording group - see below. + RecordingGroup *RecordingGroupObservation `json:"recordingGroup,omitempty" tf:"recording_group,omitempty"` + + // Recording mode - see below. + RecordingMode *RecordingModeObservation `json:"recordingMode,omitempty" tf:"recording_mode,omitempty"` + + // Amazon Resource Name (ARN) of the IAM role. Used to make read or write requests to the delivery channel and to describe the AWS resources associated with the account. See AWS Docs for more details. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type ConfigurationRecorderParameters struct { + + // Recording group - see below. + // +kubebuilder:validation:Optional + RecordingGroup *RecordingGroupParameters `json:"recordingGroup,omitempty" tf:"recording_group,omitempty"` + + // Recording mode - see below. + // +kubebuilder:validation:Optional + RecordingMode *RecordingModeParameters `json:"recordingMode,omitempty" tf:"recording_mode,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Amazon Resource Name (ARN) of the IAM role. Used to make read or write requests to the delivery channel and to describe the AWS resources associated with the account. See AWS Docs for more details. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type ExclusionByResourceTypesInitParameters struct { + + // A list that specifies the types of AWS resources for which AWS Config excludes records configuration changes. See relevant part of AWS Docs for available types. + // +listType=set + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` +} + +type ExclusionByResourceTypesObservation struct { + + // A list that specifies the types of AWS resources for which AWS Config excludes records configuration changes. See relevant part of AWS Docs for available types. + // +listType=set + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` +} + +type ExclusionByResourceTypesParameters struct { + + // A list that specifies the types of AWS resources for which AWS Config excludes records configuration changes. See relevant part of AWS Docs for available types. + // +kubebuilder:validation:Optional + // +listType=set + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` +} + +type RecordingGroupInitParameters struct { + + // Specifies whether AWS Config records configuration changes for every supported type of regional resource (which includes any new type that will become supported in the future). Conflicts with resource_types. Defaults to true. + AllSupported *bool `json:"allSupported,omitempty" tf:"all_supported,omitempty"` + + // An object that specifies how AWS Config excludes resource types from being recorded by the configuration recorder.To use this option, you must set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES Requires all_supported = false. Conflicts with resource_types. + ExclusionByResourceTypes []ExclusionByResourceTypesInitParameters `json:"exclusionByResourceTypes,omitempty" tf:"exclusion_by_resource_types,omitempty"` + + // Specifies whether AWS Config includes all supported types of global resources with the resources that it records. Requires all_supported = true. Conflicts with resource_types. + IncludeGlobalResourceTypes *bool `json:"includeGlobalResourceTypes,omitempty" tf:"include_global_resource_types,omitempty"` + + // Recording Strategy. Detailed below. + RecordingStrategy []RecordingStrategyInitParameters `json:"recordingStrategy,omitempty" tf:"recording_strategy,omitempty"` + + // A list that specifies the types of AWS resources for which AWS Config records configuration changes (for example, AWS::EC2::Instance or AWS::CloudTrail::Trail). See relevant part of AWS Docs for available types. In order to use this attribute, all_supported must be set to false. + // +listType=set + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` +} + +type RecordingGroupObservation struct { + + // Specifies whether AWS Config records configuration changes for every supported type of regional resource (which includes any new type that will become supported in the future). Conflicts with resource_types. Defaults to true. + AllSupported *bool `json:"allSupported,omitempty" tf:"all_supported,omitempty"` + + // An object that specifies how AWS Config excludes resource types from being recorded by the configuration recorder.To use this option, you must set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES Requires all_supported = false. Conflicts with resource_types. + ExclusionByResourceTypes []ExclusionByResourceTypesObservation `json:"exclusionByResourceTypes,omitempty" tf:"exclusion_by_resource_types,omitempty"` + + // Specifies whether AWS Config includes all supported types of global resources with the resources that it records. Requires all_supported = true. Conflicts with resource_types. + IncludeGlobalResourceTypes *bool `json:"includeGlobalResourceTypes,omitempty" tf:"include_global_resource_types,omitempty"` + + // Recording Strategy. Detailed below. + RecordingStrategy []RecordingStrategyObservation `json:"recordingStrategy,omitempty" tf:"recording_strategy,omitempty"` + + // A list that specifies the types of AWS resources for which AWS Config records configuration changes (for example, AWS::EC2::Instance or AWS::CloudTrail::Trail). See relevant part of AWS Docs for available types. In order to use this attribute, all_supported must be set to false. + // +listType=set + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` +} + +type RecordingGroupParameters struct { + + // Specifies whether AWS Config records configuration changes for every supported type of regional resource (which includes any new type that will become supported in the future). Conflicts with resource_types. Defaults to true. + // +kubebuilder:validation:Optional + AllSupported *bool `json:"allSupported,omitempty" tf:"all_supported,omitempty"` + + // An object that specifies how AWS Config excludes resource types from being recorded by the configuration recorder.To use this option, you must set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES Requires all_supported = false. Conflicts with resource_types. + // +kubebuilder:validation:Optional + ExclusionByResourceTypes []ExclusionByResourceTypesParameters `json:"exclusionByResourceTypes,omitempty" tf:"exclusion_by_resource_types,omitempty"` + + // Specifies whether AWS Config includes all supported types of global resources with the resources that it records. Requires all_supported = true. Conflicts with resource_types. + // +kubebuilder:validation:Optional + IncludeGlobalResourceTypes *bool `json:"includeGlobalResourceTypes,omitempty" tf:"include_global_resource_types,omitempty"` + + // Recording Strategy. Detailed below. + // +kubebuilder:validation:Optional + RecordingStrategy []RecordingStrategyParameters `json:"recordingStrategy,omitempty" tf:"recording_strategy,omitempty"` + + // A list that specifies the types of AWS resources for which AWS Config records configuration changes (for example, AWS::EC2::Instance or AWS::CloudTrail::Trail). See relevant part of AWS Docs for available types. In order to use this attribute, all_supported must be set to false. + // +kubebuilder:validation:Optional + // +listType=set + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` +} + +type RecordingModeInitParameters struct { + + // Default reecording frequency. CONTINUOUS or DAILY. + RecordingFrequency *string `json:"recordingFrequency,omitempty" tf:"recording_frequency,omitempty"` + + // Recording mode overrides. Detailed below. + RecordingModeOverride *RecordingModeOverrideInitParameters `json:"recordingModeOverride,omitempty" tf:"recording_mode_override,omitempty"` +} + +type RecordingModeObservation struct { + + // Default reecording frequency. CONTINUOUS or DAILY. + RecordingFrequency *string `json:"recordingFrequency,omitempty" tf:"recording_frequency,omitempty"` + + // Recording mode overrides. Detailed below. + RecordingModeOverride *RecordingModeOverrideObservation `json:"recordingModeOverride,omitempty" tf:"recording_mode_override,omitempty"` +} + +type RecordingModeOverrideInitParameters struct { + + // A description you provide of the override. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Default reecording frequency. CONTINUOUS or DAILY. + RecordingFrequency *string `json:"recordingFrequency,omitempty" tf:"recording_frequency,omitempty"` + + // A list that specifies the types of AWS resources for which AWS Config excludes records configuration changes. See relevant part of AWS Docs for available types. + // +listType=set + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` +} + +type RecordingModeOverrideObservation struct { + + // A description you provide of the override. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Default reecording frequency. CONTINUOUS or DAILY. + RecordingFrequency *string `json:"recordingFrequency,omitempty" tf:"recording_frequency,omitempty"` + + // A list that specifies the types of AWS resources for which AWS Config excludes records configuration changes. See relevant part of AWS Docs for available types. + // +listType=set + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` +} + +type RecordingModeOverrideParameters struct { + + // A description you provide of the override. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Default reecording frequency. CONTINUOUS or DAILY. + // +kubebuilder:validation:Optional + RecordingFrequency *string `json:"recordingFrequency" tf:"recording_frequency,omitempty"` + + // A list that specifies the types of AWS resources for which AWS Config excludes records configuration changes. See relevant part of AWS Docs for available types. + // +kubebuilder:validation:Optional + // +listType=set + ResourceTypes []*string `json:"resourceTypes" tf:"resource_types,omitempty"` +} + +type RecordingModeParameters struct { + + // Default reecording frequency. CONTINUOUS or DAILY. + // +kubebuilder:validation:Optional + RecordingFrequency *string `json:"recordingFrequency,omitempty" tf:"recording_frequency,omitempty"` + + // Recording mode overrides. Detailed below. + // +kubebuilder:validation:Optional + RecordingModeOverride *RecordingModeOverrideParameters `json:"recordingModeOverride,omitempty" tf:"recording_mode_override,omitempty"` +} + +type RecordingStrategyInitParameters struct { + UseOnly *string `json:"useOnly,omitempty" tf:"use_only,omitempty"` +} + +type RecordingStrategyObservation struct { + UseOnly *string `json:"useOnly,omitempty" tf:"use_only,omitempty"` +} + +type RecordingStrategyParameters struct { + + // +kubebuilder:validation:Optional + UseOnly *string `json:"useOnly,omitempty" tf:"use_only,omitempty"` +} + +// ConfigurationRecorderSpec defines the desired state of ConfigurationRecorder +type ConfigurationRecorderSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ConfigurationRecorderParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ConfigurationRecorderInitParameters `json:"initProvider,omitempty"` +} + +// ConfigurationRecorderStatus defines the observed state of ConfigurationRecorder. +type ConfigurationRecorderStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ConfigurationRecorderObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ConfigurationRecorder is the Schema for the ConfigurationRecorders API. Provides an AWS Config Configuration Recorder. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ConfigurationRecorder struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ConfigurationRecorderSpec `json:"spec"` + Status ConfigurationRecorderStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConfigurationRecorderList contains a list of ConfigurationRecorders +type ConfigurationRecorderList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ConfigurationRecorder `json:"items"` +} + +// Repository type metadata. +var ( + ConfigurationRecorder_Kind = "ConfigurationRecorder" + ConfigurationRecorder_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ConfigurationRecorder_Kind}.String() + ConfigurationRecorder_KindAPIVersion = ConfigurationRecorder_Kind + "." + CRDGroupVersion.String() + ConfigurationRecorder_GroupVersionKind = CRDGroupVersion.WithKind(ConfigurationRecorder_Kind) +) + +func init() { + SchemeBuilder.Register(&ConfigurationRecorder{}, &ConfigurationRecorderList{}) +} diff --git a/apis/configservice/v1beta2/zz_deliverychannel_terraformed.go b/apis/configservice/v1beta2/zz_deliverychannel_terraformed.go new file mode 100755 index 0000000000..8f97bbba5a --- /dev/null +++ b/apis/configservice/v1beta2/zz_deliverychannel_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DeliveryChannel +func (mg *DeliveryChannel) GetTerraformResourceType() string { + return "aws_config_delivery_channel" +} + +// GetConnectionDetailsMapping for this DeliveryChannel +func (tr *DeliveryChannel) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DeliveryChannel +func (tr *DeliveryChannel) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DeliveryChannel +func (tr *DeliveryChannel) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DeliveryChannel +func (tr *DeliveryChannel) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DeliveryChannel +func (tr *DeliveryChannel) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DeliveryChannel +func (tr *DeliveryChannel) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DeliveryChannel +func (tr *DeliveryChannel) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DeliveryChannel +func (tr *DeliveryChannel) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DeliveryChannel using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DeliveryChannel) LateInitialize(attrs []byte) (bool, error) { + params := &DeliveryChannelParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DeliveryChannel) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/configservice/v1beta2/zz_deliverychannel_types.go b/apis/configservice/v1beta2/zz_deliverychannel_types.go new file mode 100755 index 0000000000..4673a20881 --- /dev/null +++ b/apis/configservice/v1beta2/zz_deliverychannel_types.go @@ -0,0 +1,178 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DeliveryChannelInitParameters struct { + + // The name of the S3 bucket used to store the configuration history. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` + + // Reference to a Bucket in s3 to populate s3BucketName. + // +kubebuilder:validation:Optional + S3BucketNameRef *v1.Reference `json:"s3BucketNameRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate s3BucketName. + // +kubebuilder:validation:Optional + S3BucketNameSelector *v1.Selector `json:"s3BucketNameSelector,omitempty" tf:"-"` + + // The ARN of the AWS KMS key used to encrypt objects delivered by AWS Config. Must belong to the same Region as the destination S3 bucket. + S3KMSKeyArn *string `json:"s3KmsKeyArn,omitempty" tf:"s3_kms_key_arn,omitempty"` + + // The prefix for the specified S3 bucket. + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` + + // Options for how AWS Config delivers configuration snapshots. See below + SnapshotDeliveryProperties *SnapshotDeliveryPropertiesInitParameters `json:"snapshotDeliveryProperties,omitempty" tf:"snapshot_delivery_properties,omitempty"` + + // The ARN of the SNS topic that AWS Config delivers notifications to. + SnsTopicArn *string `json:"snsTopicArn,omitempty" tf:"sns_topic_arn,omitempty"` +} + +type DeliveryChannelObservation struct { + + // The name of the delivery channel. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the S3 bucket used to store the configuration history. + S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` + + // The ARN of the AWS KMS key used to encrypt objects delivered by AWS Config. Must belong to the same Region as the destination S3 bucket. + S3KMSKeyArn *string `json:"s3KmsKeyArn,omitempty" tf:"s3_kms_key_arn,omitempty"` + + // The prefix for the specified S3 bucket. + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` + + // Options for how AWS Config delivers configuration snapshots. See below + SnapshotDeliveryProperties *SnapshotDeliveryPropertiesObservation `json:"snapshotDeliveryProperties,omitempty" tf:"snapshot_delivery_properties,omitempty"` + + // The ARN of the SNS topic that AWS Config delivers notifications to. + SnsTopicArn *string `json:"snsTopicArn,omitempty" tf:"sns_topic_arn,omitempty"` +} + +type DeliveryChannelParameters struct { + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The name of the S3 bucket used to store the configuration history. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +kubebuilder:validation:Optional + S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` + + // Reference to a Bucket in s3 to populate s3BucketName. + // +kubebuilder:validation:Optional + S3BucketNameRef *v1.Reference `json:"s3BucketNameRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate s3BucketName. + // +kubebuilder:validation:Optional + S3BucketNameSelector *v1.Selector `json:"s3BucketNameSelector,omitempty" tf:"-"` + + // The ARN of the AWS KMS key used to encrypt objects delivered by AWS Config. Must belong to the same Region as the destination S3 bucket. + // +kubebuilder:validation:Optional + S3KMSKeyArn *string `json:"s3KmsKeyArn,omitempty" tf:"s3_kms_key_arn,omitempty"` + + // The prefix for the specified S3 bucket. + // +kubebuilder:validation:Optional + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` + + // Options for how AWS Config delivers configuration snapshots. See below + // +kubebuilder:validation:Optional + SnapshotDeliveryProperties *SnapshotDeliveryPropertiesParameters `json:"snapshotDeliveryProperties,omitempty" tf:"snapshot_delivery_properties,omitempty"` + + // The ARN of the SNS topic that AWS Config delivers notifications to. + // +kubebuilder:validation:Optional + SnsTopicArn *string `json:"snsTopicArn,omitempty" tf:"sns_topic_arn,omitempty"` +} + +type SnapshotDeliveryPropertiesInitParameters struct { + + // - The frequency with which AWS Config recurringly delivers configuration snapshotsE.g., One_Hour or Three_Hours. Valid values are listed here. + DeliveryFrequency *string `json:"deliveryFrequency,omitempty" tf:"delivery_frequency,omitempty"` +} + +type SnapshotDeliveryPropertiesObservation struct { + + // - The frequency with which AWS Config recurringly delivers configuration snapshotsE.g., One_Hour or Three_Hours. Valid values are listed here. + DeliveryFrequency *string `json:"deliveryFrequency,omitempty" tf:"delivery_frequency,omitempty"` +} + +type SnapshotDeliveryPropertiesParameters struct { + + // - The frequency with which AWS Config recurringly delivers configuration snapshotsE.g., One_Hour or Three_Hours. Valid values are listed here. + // +kubebuilder:validation:Optional + DeliveryFrequency *string `json:"deliveryFrequency,omitempty" tf:"delivery_frequency,omitempty"` +} + +// DeliveryChannelSpec defines the desired state of DeliveryChannel +type DeliveryChannelSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DeliveryChannelParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DeliveryChannelInitParameters `json:"initProvider,omitempty"` +} + +// DeliveryChannelStatus defines the observed state of DeliveryChannel. +type DeliveryChannelStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DeliveryChannelObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DeliveryChannel is the Schema for the DeliveryChannels API. Provides an AWS Config Delivery Channel. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type DeliveryChannel struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DeliveryChannelSpec `json:"spec"` + Status DeliveryChannelStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DeliveryChannelList contains a list of DeliveryChannels +type DeliveryChannelList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DeliveryChannel `json:"items"` +} + +// Repository type metadata. +var ( + DeliveryChannel_Kind = "DeliveryChannel" + DeliveryChannel_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DeliveryChannel_Kind}.String() + DeliveryChannel_KindAPIVersion = DeliveryChannel_Kind + "." + CRDGroupVersion.String() + DeliveryChannel_GroupVersionKind = CRDGroupVersion.WithKind(DeliveryChannel_Kind) +) + +func init() { + SchemeBuilder.Register(&DeliveryChannel{}, &DeliveryChannelList{}) +} diff --git a/apis/configservice/v1beta2/zz_generated.conversion_hubs.go b/apis/configservice/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..a7ec986150 --- /dev/null +++ b/apis/configservice/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ConfigRule) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ConfigurationAggregator) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ConfigurationRecorder) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DeliveryChannel) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *RemediationConfiguration) Hub() {} diff --git a/apis/configservice/v1beta2/zz_generated.deepcopy.go b/apis/configservice/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..ebe2a883e0 --- /dev/null +++ b/apis/configservice/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2871 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountAggregationSourceInitParameters) DeepCopyInto(out *AccountAggregationSourceInitParameters) { + *out = *in + if in.AccountIds != nil { + in, out := &in.AccountIds, &out.AccountIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllRegions != nil { + in, out := &in.AllRegions, &out.AllRegions + *out = new(bool) + **out = **in + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountAggregationSourceInitParameters. +func (in *AccountAggregationSourceInitParameters) DeepCopy() *AccountAggregationSourceInitParameters { + if in == nil { + return nil + } + out := new(AccountAggregationSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountAggregationSourceObservation) DeepCopyInto(out *AccountAggregationSourceObservation) { + *out = *in + if in.AccountIds != nil { + in, out := &in.AccountIds, &out.AccountIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllRegions != nil { + in, out := &in.AllRegions, &out.AllRegions + *out = new(bool) + **out = **in + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountAggregationSourceObservation. +func (in *AccountAggregationSourceObservation) DeepCopy() *AccountAggregationSourceObservation { + if in == nil { + return nil + } + out := new(AccountAggregationSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountAggregationSourceParameters) DeepCopyInto(out *AccountAggregationSourceParameters) { + *out = *in + if in.AccountIds != nil { + in, out := &in.AccountIds, &out.AccountIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllRegions != nil { + in, out := &in.AllRegions, &out.AllRegions + *out = new(bool) + **out = **in + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountAggregationSourceParameters. +func (in *AccountAggregationSourceParameters) DeepCopy() *AccountAggregationSourceParameters { + if in == nil { + return nil + } + out := new(AccountAggregationSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigRule) DeepCopyInto(out *ConfigRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigRule. +func (in *ConfigRule) DeepCopy() *ConfigRule { + if in == nil { + return nil + } + out := new(ConfigRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigRuleInitParameters) DeepCopyInto(out *ConfigRuleInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EvaluationMode != nil { + in, out := &in.EvaluationMode, &out.EvaluationMode + *out = make([]EvaluationModeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputParameters != nil { + in, out := &in.InputParameters, &out.InputParameters + *out = new(string) + **out = **in + } + if in.MaximumExecutionFrequency != nil { + in, out := &in.MaximumExecutionFrequency, &out.MaximumExecutionFrequency + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(SourceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigRuleInitParameters. +func (in *ConfigRuleInitParameters) DeepCopy() *ConfigRuleInitParameters { + if in == nil { + return nil + } + out := new(ConfigRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigRuleList) DeepCopyInto(out *ConfigRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigRuleList. +func (in *ConfigRuleList) DeepCopy() *ConfigRuleList { + if in == nil { + return nil + } + out := new(ConfigRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigRuleObservation) DeepCopyInto(out *ConfigRuleObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EvaluationMode != nil { + in, out := &in.EvaluationMode, &out.EvaluationMode + *out = make([]EvaluationModeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InputParameters != nil { + in, out := &in.InputParameters, &out.InputParameters + *out = new(string) + **out = **in + } + if in.MaximumExecutionFrequency != nil { + in, out := &in.MaximumExecutionFrequency, &out.MaximumExecutionFrequency + *out = new(string) + **out = **in + } + if in.RuleID != nil { + in, out := &in.RuleID, &out.RuleID + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeObservation) + (*in).DeepCopyInto(*out) + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(SourceObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigRuleObservation. +func (in *ConfigRuleObservation) DeepCopy() *ConfigRuleObservation { + if in == nil { + return nil + } + out := new(ConfigRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigRuleParameters) DeepCopyInto(out *ConfigRuleParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EvaluationMode != nil { + in, out := &in.EvaluationMode, &out.EvaluationMode + *out = make([]EvaluationModeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputParameters != nil { + in, out := &in.InputParameters, &out.InputParameters + *out = new(string) + **out = **in + } + if in.MaximumExecutionFrequency != nil { + in, out := &in.MaximumExecutionFrequency, &out.MaximumExecutionFrequency + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeParameters) + (*in).DeepCopyInto(*out) + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(SourceParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigRuleParameters. +func (in *ConfigRuleParameters) DeepCopy() *ConfigRuleParameters { + if in == nil { + return nil + } + out := new(ConfigRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigRuleSpec) DeepCopyInto(out *ConfigRuleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigRuleSpec. +func (in *ConfigRuleSpec) DeepCopy() *ConfigRuleSpec { + if in == nil { + return nil + } + out := new(ConfigRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigRuleStatus) DeepCopyInto(out *ConfigRuleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigRuleStatus. +func (in *ConfigRuleStatus) DeepCopy() *ConfigRuleStatus { + if in == nil { + return nil + } + out := new(ConfigRuleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationAggregator) DeepCopyInto(out *ConfigurationAggregator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationAggregator. +func (in *ConfigurationAggregator) DeepCopy() *ConfigurationAggregator { + if in == nil { + return nil + } + out := new(ConfigurationAggregator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigurationAggregator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationAggregatorInitParameters) DeepCopyInto(out *ConfigurationAggregatorInitParameters) { + *out = *in + if in.AccountAggregationSource != nil { + in, out := &in.AccountAggregationSource, &out.AccountAggregationSource + *out = new(AccountAggregationSourceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OrganizationAggregationSource != nil { + in, out := &in.OrganizationAggregationSource, &out.OrganizationAggregationSource + *out = new(OrganizationAggregationSourceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationAggregatorInitParameters. +func (in *ConfigurationAggregatorInitParameters) DeepCopy() *ConfigurationAggregatorInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationAggregatorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationAggregatorList) DeepCopyInto(out *ConfigurationAggregatorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigurationAggregator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationAggregatorList. +func (in *ConfigurationAggregatorList) DeepCopy() *ConfigurationAggregatorList { + if in == nil { + return nil + } + out := new(ConfigurationAggregatorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigurationAggregatorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationAggregatorObservation) DeepCopyInto(out *ConfigurationAggregatorObservation) { + *out = *in + if in.AccountAggregationSource != nil { + in, out := &in.AccountAggregationSource, &out.AccountAggregationSource + *out = new(AccountAggregationSourceObservation) + (*in).DeepCopyInto(*out) + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.OrganizationAggregationSource != nil { + in, out := &in.OrganizationAggregationSource, &out.OrganizationAggregationSource + *out = new(OrganizationAggregationSourceObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationAggregatorObservation. +func (in *ConfigurationAggregatorObservation) DeepCopy() *ConfigurationAggregatorObservation { + if in == nil { + return nil + } + out := new(ConfigurationAggregatorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationAggregatorParameters) DeepCopyInto(out *ConfigurationAggregatorParameters) { + *out = *in + if in.AccountAggregationSource != nil { + in, out := &in.AccountAggregationSource, &out.AccountAggregationSource + *out = new(AccountAggregationSourceParameters) + (*in).DeepCopyInto(*out) + } + if in.OrganizationAggregationSource != nil { + in, out := &in.OrganizationAggregationSource, &out.OrganizationAggregationSource + *out = new(OrganizationAggregationSourceParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationAggregatorParameters. +func (in *ConfigurationAggregatorParameters) DeepCopy() *ConfigurationAggregatorParameters { + if in == nil { + return nil + } + out := new(ConfigurationAggregatorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationAggregatorSpec) DeepCopyInto(out *ConfigurationAggregatorSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationAggregatorSpec. +func (in *ConfigurationAggregatorSpec) DeepCopy() *ConfigurationAggregatorSpec { + if in == nil { + return nil + } + out := new(ConfigurationAggregatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationAggregatorStatus) DeepCopyInto(out *ConfigurationAggregatorStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationAggregatorStatus. +func (in *ConfigurationAggregatorStatus) DeepCopy() *ConfigurationAggregatorStatus { + if in == nil { + return nil + } + out := new(ConfigurationAggregatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationRecorder) DeepCopyInto(out *ConfigurationRecorder) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationRecorder. +func (in *ConfigurationRecorder) DeepCopy() *ConfigurationRecorder { + if in == nil { + return nil + } + out := new(ConfigurationRecorder) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigurationRecorder) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationRecorderInitParameters) DeepCopyInto(out *ConfigurationRecorderInitParameters) { + *out = *in + if in.RecordingGroup != nil { + in, out := &in.RecordingGroup, &out.RecordingGroup + *out = new(RecordingGroupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RecordingMode != nil { + in, out := &in.RecordingMode, &out.RecordingMode + *out = new(RecordingModeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationRecorderInitParameters. +func (in *ConfigurationRecorderInitParameters) DeepCopy() *ConfigurationRecorderInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationRecorderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationRecorderList) DeepCopyInto(out *ConfigurationRecorderList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigurationRecorder, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationRecorderList. +func (in *ConfigurationRecorderList) DeepCopy() *ConfigurationRecorderList { + if in == nil { + return nil + } + out := new(ConfigurationRecorderList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigurationRecorderList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationRecorderObservation) DeepCopyInto(out *ConfigurationRecorderObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RecordingGroup != nil { + in, out := &in.RecordingGroup, &out.RecordingGroup + *out = new(RecordingGroupObservation) + (*in).DeepCopyInto(*out) + } + if in.RecordingMode != nil { + in, out := &in.RecordingMode, &out.RecordingMode + *out = new(RecordingModeObservation) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationRecorderObservation. +func (in *ConfigurationRecorderObservation) DeepCopy() *ConfigurationRecorderObservation { + if in == nil { + return nil + } + out := new(ConfigurationRecorderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationRecorderParameters) DeepCopyInto(out *ConfigurationRecorderParameters) { + *out = *in + if in.RecordingGroup != nil { + in, out := &in.RecordingGroup, &out.RecordingGroup + *out = new(RecordingGroupParameters) + (*in).DeepCopyInto(*out) + } + if in.RecordingMode != nil { + in, out := &in.RecordingMode, &out.RecordingMode + *out = new(RecordingModeParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationRecorderParameters. +func (in *ConfigurationRecorderParameters) DeepCopy() *ConfigurationRecorderParameters { + if in == nil { + return nil + } + out := new(ConfigurationRecorderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationRecorderSpec) DeepCopyInto(out *ConfigurationRecorderSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationRecorderSpec. +func (in *ConfigurationRecorderSpec) DeepCopy() *ConfigurationRecorderSpec { + if in == nil { + return nil + } + out := new(ConfigurationRecorderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationRecorderStatus) DeepCopyInto(out *ConfigurationRecorderStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationRecorderStatus. +func (in *ConfigurationRecorderStatus) DeepCopy() *ConfigurationRecorderStatus { + if in == nil { + return nil + } + out := new(ConfigurationRecorderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPolicyDetailsInitParameters) DeepCopyInto(out *CustomPolicyDetailsInitParameters) { + *out = *in + if in.EnableDebugLogDelivery != nil { + in, out := &in.EnableDebugLogDelivery, &out.EnableDebugLogDelivery + *out = new(bool) + **out = **in + } + if in.PolicyRuntime != nil { + in, out := &in.PolicyRuntime, &out.PolicyRuntime + *out = new(string) + **out = **in + } + if in.PolicyText != nil { + in, out := &in.PolicyText, &out.PolicyText + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPolicyDetailsInitParameters. +func (in *CustomPolicyDetailsInitParameters) DeepCopy() *CustomPolicyDetailsInitParameters { + if in == nil { + return nil + } + out := new(CustomPolicyDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPolicyDetailsObservation) DeepCopyInto(out *CustomPolicyDetailsObservation) { + *out = *in + if in.EnableDebugLogDelivery != nil { + in, out := &in.EnableDebugLogDelivery, &out.EnableDebugLogDelivery + *out = new(bool) + **out = **in + } + if in.PolicyRuntime != nil { + in, out := &in.PolicyRuntime, &out.PolicyRuntime + *out = new(string) + **out = **in + } + if in.PolicyText != nil { + in, out := &in.PolicyText, &out.PolicyText + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPolicyDetailsObservation. +func (in *CustomPolicyDetailsObservation) DeepCopy() *CustomPolicyDetailsObservation { + if in == nil { + return nil + } + out := new(CustomPolicyDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPolicyDetailsParameters) DeepCopyInto(out *CustomPolicyDetailsParameters) { + *out = *in + if in.EnableDebugLogDelivery != nil { + in, out := &in.EnableDebugLogDelivery, &out.EnableDebugLogDelivery + *out = new(bool) + **out = **in + } + if in.PolicyRuntime != nil { + in, out := &in.PolicyRuntime, &out.PolicyRuntime + *out = new(string) + **out = **in + } + if in.PolicyText != nil { + in, out := &in.PolicyText, &out.PolicyText + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPolicyDetailsParameters. +func (in *CustomPolicyDetailsParameters) DeepCopy() *CustomPolicyDetailsParameters { + if in == nil { + return nil + } + out := new(CustomPolicyDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryChannel) DeepCopyInto(out *DeliveryChannel) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryChannel. +func (in *DeliveryChannel) DeepCopy() *DeliveryChannel { + if in == nil { + return nil + } + out := new(DeliveryChannel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeliveryChannel) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryChannelInitParameters) DeepCopyInto(out *DeliveryChannelInitParameters) { + *out = *in + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3BucketNameRef != nil { + in, out := &in.S3BucketNameRef, &out.S3BucketNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.S3BucketNameSelector != nil { + in, out := &in.S3BucketNameSelector, &out.S3BucketNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3KMSKeyArn != nil { + in, out := &in.S3KMSKeyArn, &out.S3KMSKeyArn + *out = new(string) + **out = **in + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } + if in.SnapshotDeliveryProperties != nil { + in, out := &in.SnapshotDeliveryProperties, &out.SnapshotDeliveryProperties + *out = new(SnapshotDeliveryPropertiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SnsTopicArn != nil { + in, out := &in.SnsTopicArn, &out.SnsTopicArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryChannelInitParameters. +func (in *DeliveryChannelInitParameters) DeepCopy() *DeliveryChannelInitParameters { + if in == nil { + return nil + } + out := new(DeliveryChannelInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryChannelList) DeepCopyInto(out *DeliveryChannelList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeliveryChannel, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryChannelList. +func (in *DeliveryChannelList) DeepCopy() *DeliveryChannelList { + if in == nil { + return nil + } + out := new(DeliveryChannelList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeliveryChannelList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryChannelObservation) DeepCopyInto(out *DeliveryChannelObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3KMSKeyArn != nil { + in, out := &in.S3KMSKeyArn, &out.S3KMSKeyArn + *out = new(string) + **out = **in + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } + if in.SnapshotDeliveryProperties != nil { + in, out := &in.SnapshotDeliveryProperties, &out.SnapshotDeliveryProperties + *out = new(SnapshotDeliveryPropertiesObservation) + (*in).DeepCopyInto(*out) + } + if in.SnsTopicArn != nil { + in, out := &in.SnsTopicArn, &out.SnsTopicArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryChannelObservation. +func (in *DeliveryChannelObservation) DeepCopy() *DeliveryChannelObservation { + if in == nil { + return nil + } + out := new(DeliveryChannelObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryChannelParameters) DeepCopyInto(out *DeliveryChannelParameters) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3BucketNameRef != nil { + in, out := &in.S3BucketNameRef, &out.S3BucketNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.S3BucketNameSelector != nil { + in, out := &in.S3BucketNameSelector, &out.S3BucketNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3KMSKeyArn != nil { + in, out := &in.S3KMSKeyArn, &out.S3KMSKeyArn + *out = new(string) + **out = **in + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } + if in.SnapshotDeliveryProperties != nil { + in, out := &in.SnapshotDeliveryProperties, &out.SnapshotDeliveryProperties + *out = new(SnapshotDeliveryPropertiesParameters) + (*in).DeepCopyInto(*out) + } + if in.SnsTopicArn != nil { + in, out := &in.SnsTopicArn, &out.SnsTopicArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryChannelParameters. +func (in *DeliveryChannelParameters) DeepCopy() *DeliveryChannelParameters { + if in == nil { + return nil + } + out := new(DeliveryChannelParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryChannelSpec) DeepCopyInto(out *DeliveryChannelSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryChannelSpec. +func (in *DeliveryChannelSpec) DeepCopy() *DeliveryChannelSpec { + if in == nil { + return nil + } + out := new(DeliveryChannelSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryChannelStatus) DeepCopyInto(out *DeliveryChannelStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryChannelStatus. +func (in *DeliveryChannelStatus) DeepCopy() *DeliveryChannelStatus { + if in == nil { + return nil + } + out := new(DeliveryChannelStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EvaluationModeInitParameters) DeepCopyInto(out *EvaluationModeInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EvaluationModeInitParameters. +func (in *EvaluationModeInitParameters) DeepCopy() *EvaluationModeInitParameters { + if in == nil { + return nil + } + out := new(EvaluationModeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EvaluationModeObservation) DeepCopyInto(out *EvaluationModeObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EvaluationModeObservation. +func (in *EvaluationModeObservation) DeepCopy() *EvaluationModeObservation { + if in == nil { + return nil + } + out := new(EvaluationModeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EvaluationModeParameters) DeepCopyInto(out *EvaluationModeParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EvaluationModeParameters. +func (in *EvaluationModeParameters) DeepCopy() *EvaluationModeParameters { + if in == nil { + return nil + } + out := new(EvaluationModeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExclusionByResourceTypesInitParameters) DeepCopyInto(out *ExclusionByResourceTypesInitParameters) { + *out = *in + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExclusionByResourceTypesInitParameters. +func (in *ExclusionByResourceTypesInitParameters) DeepCopy() *ExclusionByResourceTypesInitParameters { + if in == nil { + return nil + } + out := new(ExclusionByResourceTypesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExclusionByResourceTypesObservation) DeepCopyInto(out *ExclusionByResourceTypesObservation) { + *out = *in + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExclusionByResourceTypesObservation. +func (in *ExclusionByResourceTypesObservation) DeepCopy() *ExclusionByResourceTypesObservation { + if in == nil { + return nil + } + out := new(ExclusionByResourceTypesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExclusionByResourceTypesParameters) DeepCopyInto(out *ExclusionByResourceTypesParameters) { + *out = *in + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExclusionByResourceTypesParameters. +func (in *ExclusionByResourceTypesParameters) DeepCopy() *ExclusionByResourceTypesParameters { + if in == nil { + return nil + } + out := new(ExclusionByResourceTypesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecutionControlsInitParameters) DeepCopyInto(out *ExecutionControlsInitParameters) { + *out = *in + if in.SsmControls != nil { + in, out := &in.SsmControls, &out.SsmControls + *out = new(SsmControlsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutionControlsInitParameters. +func (in *ExecutionControlsInitParameters) DeepCopy() *ExecutionControlsInitParameters { + if in == nil { + return nil + } + out := new(ExecutionControlsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecutionControlsObservation) DeepCopyInto(out *ExecutionControlsObservation) { + *out = *in + if in.SsmControls != nil { + in, out := &in.SsmControls, &out.SsmControls + *out = new(SsmControlsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutionControlsObservation. +func (in *ExecutionControlsObservation) DeepCopy() *ExecutionControlsObservation { + if in == nil { + return nil + } + out := new(ExecutionControlsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecutionControlsParameters) DeepCopyInto(out *ExecutionControlsParameters) { + *out = *in + if in.SsmControls != nil { + in, out := &in.SsmControls, &out.SsmControls + *out = new(SsmControlsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutionControlsParameters. +func (in *ExecutionControlsParameters) DeepCopy() *ExecutionControlsParameters { + if in == nil { + return nil + } + out := new(ExecutionControlsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationAggregationSourceInitParameters) DeepCopyInto(out *OrganizationAggregationSourceInitParameters) { + *out = *in + if in.AllRegions != nil { + in, out := &in.AllRegions, &out.AllRegions + *out = new(bool) + **out = **in + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationAggregationSourceInitParameters. +func (in *OrganizationAggregationSourceInitParameters) DeepCopy() *OrganizationAggregationSourceInitParameters { + if in == nil { + return nil + } + out := new(OrganizationAggregationSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationAggregationSourceObservation) DeepCopyInto(out *OrganizationAggregationSourceObservation) { + *out = *in + if in.AllRegions != nil { + in, out := &in.AllRegions, &out.AllRegions + *out = new(bool) + **out = **in + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationAggregationSourceObservation. +func (in *OrganizationAggregationSourceObservation) DeepCopy() *OrganizationAggregationSourceObservation { + if in == nil { + return nil + } + out := new(OrganizationAggregationSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationAggregationSourceParameters) DeepCopyInto(out *OrganizationAggregationSourceParameters) { + *out = *in + if in.AllRegions != nil { + in, out := &in.AllRegions, &out.AllRegions + *out = new(bool) + **out = **in + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationAggregationSourceParameters. +func (in *OrganizationAggregationSourceParameters) DeepCopy() *OrganizationAggregationSourceParameters { + if in == nil { + return nil + } + out := new(OrganizationAggregationSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterInitParameters) DeepCopyInto(out *ParameterInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceValue != nil { + in, out := &in.ResourceValue, &out.ResourceValue + *out = new(string) + **out = **in + } + if in.StaticValue != nil { + in, out := &in.StaticValue, &out.StaticValue + *out = new(string) + **out = **in + } + if in.StaticValues != nil { + in, out := &in.StaticValues, &out.StaticValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterInitParameters. +func (in *ParameterInitParameters) DeepCopy() *ParameterInitParameters { + if in == nil { + return nil + } + out := new(ParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterObservation) DeepCopyInto(out *ParameterObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceValue != nil { + in, out := &in.ResourceValue, &out.ResourceValue + *out = new(string) + **out = **in + } + if in.StaticValue != nil { + in, out := &in.StaticValue, &out.StaticValue + *out = new(string) + **out = **in + } + if in.StaticValues != nil { + in, out := &in.StaticValues, &out.StaticValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterObservation. +func (in *ParameterObservation) DeepCopy() *ParameterObservation { + if in == nil { + return nil + } + out := new(ParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterParameters) DeepCopyInto(out *ParameterParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceValue != nil { + in, out := &in.ResourceValue, &out.ResourceValue + *out = new(string) + **out = **in + } + if in.StaticValue != nil { + in, out := &in.StaticValue, &out.StaticValue + *out = new(string) + **out = **in + } + if in.StaticValues != nil { + in, out := &in.StaticValues, &out.StaticValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterParameters. +func (in *ParameterParameters) DeepCopy() *ParameterParameters { + if in == nil { + return nil + } + out := new(ParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingGroupInitParameters) DeepCopyInto(out *RecordingGroupInitParameters) { + *out = *in + if in.AllSupported != nil { + in, out := &in.AllSupported, &out.AllSupported + *out = new(bool) + **out = **in + } + if in.ExclusionByResourceTypes != nil { + in, out := &in.ExclusionByResourceTypes, &out.ExclusionByResourceTypes + *out = make([]ExclusionByResourceTypesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IncludeGlobalResourceTypes != nil { + in, out := &in.IncludeGlobalResourceTypes, &out.IncludeGlobalResourceTypes + *out = new(bool) + **out = **in + } + if in.RecordingStrategy != nil { + in, out := &in.RecordingStrategy, &out.RecordingStrategy + *out = make([]RecordingStrategyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingGroupInitParameters. +func (in *RecordingGroupInitParameters) DeepCopy() *RecordingGroupInitParameters { + if in == nil { + return nil + } + out := new(RecordingGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingGroupObservation) DeepCopyInto(out *RecordingGroupObservation) { + *out = *in + if in.AllSupported != nil { + in, out := &in.AllSupported, &out.AllSupported + *out = new(bool) + **out = **in + } + if in.ExclusionByResourceTypes != nil { + in, out := &in.ExclusionByResourceTypes, &out.ExclusionByResourceTypes + *out = make([]ExclusionByResourceTypesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IncludeGlobalResourceTypes != nil { + in, out := &in.IncludeGlobalResourceTypes, &out.IncludeGlobalResourceTypes + *out = new(bool) + **out = **in + } + if in.RecordingStrategy != nil { + in, out := &in.RecordingStrategy, &out.RecordingStrategy + *out = make([]RecordingStrategyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingGroupObservation. +func (in *RecordingGroupObservation) DeepCopy() *RecordingGroupObservation { + if in == nil { + return nil + } + out := new(RecordingGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingGroupParameters) DeepCopyInto(out *RecordingGroupParameters) { + *out = *in + if in.AllSupported != nil { + in, out := &in.AllSupported, &out.AllSupported + *out = new(bool) + **out = **in + } + if in.ExclusionByResourceTypes != nil { + in, out := &in.ExclusionByResourceTypes, &out.ExclusionByResourceTypes + *out = make([]ExclusionByResourceTypesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IncludeGlobalResourceTypes != nil { + in, out := &in.IncludeGlobalResourceTypes, &out.IncludeGlobalResourceTypes + *out = new(bool) + **out = **in + } + if in.RecordingStrategy != nil { + in, out := &in.RecordingStrategy, &out.RecordingStrategy + *out = make([]RecordingStrategyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingGroupParameters. +func (in *RecordingGroupParameters) DeepCopy() *RecordingGroupParameters { + if in == nil { + return nil + } + out := new(RecordingGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingModeInitParameters) DeepCopyInto(out *RecordingModeInitParameters) { + *out = *in + if in.RecordingFrequency != nil { + in, out := &in.RecordingFrequency, &out.RecordingFrequency + *out = new(string) + **out = **in + } + if in.RecordingModeOverride != nil { + in, out := &in.RecordingModeOverride, &out.RecordingModeOverride + *out = new(RecordingModeOverrideInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingModeInitParameters. +func (in *RecordingModeInitParameters) DeepCopy() *RecordingModeInitParameters { + if in == nil { + return nil + } + out := new(RecordingModeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingModeObservation) DeepCopyInto(out *RecordingModeObservation) { + *out = *in + if in.RecordingFrequency != nil { + in, out := &in.RecordingFrequency, &out.RecordingFrequency + *out = new(string) + **out = **in + } + if in.RecordingModeOverride != nil { + in, out := &in.RecordingModeOverride, &out.RecordingModeOverride + *out = new(RecordingModeOverrideObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingModeObservation. +func (in *RecordingModeObservation) DeepCopy() *RecordingModeObservation { + if in == nil { + return nil + } + out := new(RecordingModeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingModeOverrideInitParameters) DeepCopyInto(out *RecordingModeOverrideInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.RecordingFrequency != nil { + in, out := &in.RecordingFrequency, &out.RecordingFrequency + *out = new(string) + **out = **in + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingModeOverrideInitParameters. +func (in *RecordingModeOverrideInitParameters) DeepCopy() *RecordingModeOverrideInitParameters { + if in == nil { + return nil + } + out := new(RecordingModeOverrideInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingModeOverrideObservation) DeepCopyInto(out *RecordingModeOverrideObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.RecordingFrequency != nil { + in, out := &in.RecordingFrequency, &out.RecordingFrequency + *out = new(string) + **out = **in + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingModeOverrideObservation. +func (in *RecordingModeOverrideObservation) DeepCopy() *RecordingModeOverrideObservation { + if in == nil { + return nil + } + out := new(RecordingModeOverrideObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingModeOverrideParameters) DeepCopyInto(out *RecordingModeOverrideParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.RecordingFrequency != nil { + in, out := &in.RecordingFrequency, &out.RecordingFrequency + *out = new(string) + **out = **in + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingModeOverrideParameters. +func (in *RecordingModeOverrideParameters) DeepCopy() *RecordingModeOverrideParameters { + if in == nil { + return nil + } + out := new(RecordingModeOverrideParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingModeParameters) DeepCopyInto(out *RecordingModeParameters) { + *out = *in + if in.RecordingFrequency != nil { + in, out := &in.RecordingFrequency, &out.RecordingFrequency + *out = new(string) + **out = **in + } + if in.RecordingModeOverride != nil { + in, out := &in.RecordingModeOverride, &out.RecordingModeOverride + *out = new(RecordingModeOverrideParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingModeParameters. +func (in *RecordingModeParameters) DeepCopy() *RecordingModeParameters { + if in == nil { + return nil + } + out := new(RecordingModeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingStrategyInitParameters) DeepCopyInto(out *RecordingStrategyInitParameters) { + *out = *in + if in.UseOnly != nil { + in, out := &in.UseOnly, &out.UseOnly + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingStrategyInitParameters. +func (in *RecordingStrategyInitParameters) DeepCopy() *RecordingStrategyInitParameters { + if in == nil { + return nil + } + out := new(RecordingStrategyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingStrategyObservation) DeepCopyInto(out *RecordingStrategyObservation) { + *out = *in + if in.UseOnly != nil { + in, out := &in.UseOnly, &out.UseOnly + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingStrategyObservation. +func (in *RecordingStrategyObservation) DeepCopy() *RecordingStrategyObservation { + if in == nil { + return nil + } + out := new(RecordingStrategyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingStrategyParameters) DeepCopyInto(out *RecordingStrategyParameters) { + *out = *in + if in.UseOnly != nil { + in, out := &in.UseOnly, &out.UseOnly + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingStrategyParameters. +func (in *RecordingStrategyParameters) DeepCopy() *RecordingStrategyParameters { + if in == nil { + return nil + } + out := new(RecordingStrategyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemediationConfiguration) DeepCopyInto(out *RemediationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemediationConfiguration. +func (in *RemediationConfiguration) DeepCopy() *RemediationConfiguration { + if in == nil { + return nil + } + out := new(RemediationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RemediationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemediationConfigurationInitParameters) DeepCopyInto(out *RemediationConfigurationInitParameters) { + *out = *in + if in.Automatic != nil { + in, out := &in.Automatic, &out.Automatic + *out = new(bool) + **out = **in + } + if in.ExecutionControls != nil { + in, out := &in.ExecutionControls, &out.ExecutionControls + *out = new(ExecutionControlsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaximumAutomaticAttempts != nil { + in, out := &in.MaximumAutomaticAttempts, &out.MaximumAutomaticAttempts + *out = new(float64) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } + if in.RetryAttemptSeconds != nil { + in, out := &in.RetryAttemptSeconds, &out.RetryAttemptSeconds + *out = new(float64) + **out = **in + } + if in.TargetID != nil { + in, out := &in.TargetID, &out.TargetID + *out = new(string) + **out = **in + } + if in.TargetType != nil { + in, out := &in.TargetType, &out.TargetType + *out = new(string) + **out = **in + } + if in.TargetVersion != nil { + in, out := &in.TargetVersion, &out.TargetVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemediationConfigurationInitParameters. +func (in *RemediationConfigurationInitParameters) DeepCopy() *RemediationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RemediationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemediationConfigurationList) DeepCopyInto(out *RemediationConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RemediationConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemediationConfigurationList. +func (in *RemediationConfigurationList) DeepCopy() *RemediationConfigurationList { + if in == nil { + return nil + } + out := new(RemediationConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RemediationConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemediationConfigurationObservation) DeepCopyInto(out *RemediationConfigurationObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Automatic != nil { + in, out := &in.Automatic, &out.Automatic + *out = new(bool) + **out = **in + } + if in.ExecutionControls != nil { + in, out := &in.ExecutionControls, &out.ExecutionControls + *out = new(ExecutionControlsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MaximumAutomaticAttempts != nil { + in, out := &in.MaximumAutomaticAttempts, &out.MaximumAutomaticAttempts + *out = new(float64) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } + if in.RetryAttemptSeconds != nil { + in, out := &in.RetryAttemptSeconds, &out.RetryAttemptSeconds + *out = new(float64) + **out = **in + } + if in.TargetID != nil { + in, out := &in.TargetID, &out.TargetID + *out = new(string) + **out = **in + } + if in.TargetType != nil { + in, out := &in.TargetType, &out.TargetType + *out = new(string) + **out = **in + } + if in.TargetVersion != nil { + in, out := &in.TargetVersion, &out.TargetVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemediationConfigurationObservation. +func (in *RemediationConfigurationObservation) DeepCopy() *RemediationConfigurationObservation { + if in == nil { + return nil + } + out := new(RemediationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemediationConfigurationParameters) DeepCopyInto(out *RemediationConfigurationParameters) { + *out = *in + if in.Automatic != nil { + in, out := &in.Automatic, &out.Automatic + *out = new(bool) + **out = **in + } + if in.ExecutionControls != nil { + in, out := &in.ExecutionControls, &out.ExecutionControls + *out = new(ExecutionControlsParameters) + (*in).DeepCopyInto(*out) + } + if in.MaximumAutomaticAttempts != nil { + in, out := &in.MaximumAutomaticAttempts, &out.MaximumAutomaticAttempts + *out = new(float64) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } + if in.RetryAttemptSeconds != nil { + in, out := &in.RetryAttemptSeconds, &out.RetryAttemptSeconds + *out = new(float64) + **out = **in + } + if in.TargetID != nil { + in, out := &in.TargetID, &out.TargetID + *out = new(string) + **out = **in + } + if in.TargetType != nil { + in, out := &in.TargetType, &out.TargetType + *out = new(string) + **out = **in + } + if in.TargetVersion != nil { + in, out := &in.TargetVersion, &out.TargetVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemediationConfigurationParameters. +func (in *RemediationConfigurationParameters) DeepCopy() *RemediationConfigurationParameters { + if in == nil { + return nil + } + out := new(RemediationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemediationConfigurationSpec) DeepCopyInto(out *RemediationConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemediationConfigurationSpec. +func (in *RemediationConfigurationSpec) DeepCopy() *RemediationConfigurationSpec { + if in == nil { + return nil + } + out := new(RemediationConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemediationConfigurationStatus) DeepCopyInto(out *RemediationConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemediationConfigurationStatus. +func (in *RemediationConfigurationStatus) DeepCopy() *RemediationConfigurationStatus { + if in == nil { + return nil + } + out := new(RemediationConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeInitParameters) DeepCopyInto(out *ScopeInitParameters) { + *out = *in + if in.ComplianceResourceID != nil { + in, out := &in.ComplianceResourceID, &out.ComplianceResourceID + *out = new(string) + **out = **in + } + if in.ComplianceResourceTypes != nil { + in, out := &in.ComplianceResourceTypes, &out.ComplianceResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TagKey != nil { + in, out := &in.TagKey, &out.TagKey + *out = new(string) + **out = **in + } + if in.TagValue != nil { + in, out := &in.TagValue, &out.TagValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeInitParameters. +func (in *ScopeInitParameters) DeepCopy() *ScopeInitParameters { + if in == nil { + return nil + } + out := new(ScopeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeObservation) DeepCopyInto(out *ScopeObservation) { + *out = *in + if in.ComplianceResourceID != nil { + in, out := &in.ComplianceResourceID, &out.ComplianceResourceID + *out = new(string) + **out = **in + } + if in.ComplianceResourceTypes != nil { + in, out := &in.ComplianceResourceTypes, &out.ComplianceResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TagKey != nil { + in, out := &in.TagKey, &out.TagKey + *out = new(string) + **out = **in + } + if in.TagValue != nil { + in, out := &in.TagValue, &out.TagValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeObservation. +func (in *ScopeObservation) DeepCopy() *ScopeObservation { + if in == nil { + return nil + } + out := new(ScopeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeParameters) DeepCopyInto(out *ScopeParameters) { + *out = *in + if in.ComplianceResourceID != nil { + in, out := &in.ComplianceResourceID, &out.ComplianceResourceID + *out = new(string) + **out = **in + } + if in.ComplianceResourceTypes != nil { + in, out := &in.ComplianceResourceTypes, &out.ComplianceResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TagKey != nil { + in, out := &in.TagKey, &out.TagKey + *out = new(string) + **out = **in + } + if in.TagValue != nil { + in, out := &in.TagValue, &out.TagValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeParameters. +func (in *ScopeParameters) DeepCopy() *ScopeParameters { + if in == nil { + return nil + } + out := new(ScopeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotDeliveryPropertiesInitParameters) DeepCopyInto(out *SnapshotDeliveryPropertiesInitParameters) { + *out = *in + if in.DeliveryFrequency != nil { + in, out := &in.DeliveryFrequency, &out.DeliveryFrequency + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotDeliveryPropertiesInitParameters. +func (in *SnapshotDeliveryPropertiesInitParameters) DeepCopy() *SnapshotDeliveryPropertiesInitParameters { + if in == nil { + return nil + } + out := new(SnapshotDeliveryPropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotDeliveryPropertiesObservation) DeepCopyInto(out *SnapshotDeliveryPropertiesObservation) { + *out = *in + if in.DeliveryFrequency != nil { + in, out := &in.DeliveryFrequency, &out.DeliveryFrequency + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotDeliveryPropertiesObservation. +func (in *SnapshotDeliveryPropertiesObservation) DeepCopy() *SnapshotDeliveryPropertiesObservation { + if in == nil { + return nil + } + out := new(SnapshotDeliveryPropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotDeliveryPropertiesParameters) DeepCopyInto(out *SnapshotDeliveryPropertiesParameters) { + *out = *in + if in.DeliveryFrequency != nil { + in, out := &in.DeliveryFrequency, &out.DeliveryFrequency + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotDeliveryPropertiesParameters. +func (in *SnapshotDeliveryPropertiesParameters) DeepCopy() *SnapshotDeliveryPropertiesParameters { + if in == nil { + return nil + } + out := new(SnapshotDeliveryPropertiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceDetailInitParameters) DeepCopyInto(out *SourceDetailInitParameters) { + *out = *in + if in.EventSource != nil { + in, out := &in.EventSource, &out.EventSource + *out = new(string) + **out = **in + } + if in.MaximumExecutionFrequency != nil { + in, out := &in.MaximumExecutionFrequency, &out.MaximumExecutionFrequency + *out = new(string) + **out = **in + } + if in.MessageType != nil { + in, out := &in.MessageType, &out.MessageType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceDetailInitParameters. +func (in *SourceDetailInitParameters) DeepCopy() *SourceDetailInitParameters { + if in == nil { + return nil + } + out := new(SourceDetailInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceDetailObservation) DeepCopyInto(out *SourceDetailObservation) { + *out = *in + if in.EventSource != nil { + in, out := &in.EventSource, &out.EventSource + *out = new(string) + **out = **in + } + if in.MaximumExecutionFrequency != nil { + in, out := &in.MaximumExecutionFrequency, &out.MaximumExecutionFrequency + *out = new(string) + **out = **in + } + if in.MessageType != nil { + in, out := &in.MessageType, &out.MessageType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceDetailObservation. +func (in *SourceDetailObservation) DeepCopy() *SourceDetailObservation { + if in == nil { + return nil + } + out := new(SourceDetailObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceDetailParameters) DeepCopyInto(out *SourceDetailParameters) { + *out = *in + if in.EventSource != nil { + in, out := &in.EventSource, &out.EventSource + *out = new(string) + **out = **in + } + if in.MaximumExecutionFrequency != nil { + in, out := &in.MaximumExecutionFrequency, &out.MaximumExecutionFrequency + *out = new(string) + **out = **in + } + if in.MessageType != nil { + in, out := &in.MessageType, &out.MessageType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceDetailParameters. +func (in *SourceDetailParameters) DeepCopy() *SourceDetailParameters { + if in == nil { + return nil + } + out := new(SourceDetailParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceInitParameters) DeepCopyInto(out *SourceInitParameters) { + *out = *in + if in.CustomPolicyDetails != nil { + in, out := &in.CustomPolicyDetails, &out.CustomPolicyDetails + *out = new(CustomPolicyDetailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.SourceDetail != nil { + in, out := &in.SourceDetail, &out.SourceDetail + *out = make([]SourceDetailInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIdentifier != nil { + in, out := &in.SourceIdentifier, &out.SourceIdentifier + *out = new(string) + **out = **in + } + if in.SourceIdentifierRef != nil { + in, out := &in.SourceIdentifierRef, &out.SourceIdentifierRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceIdentifierSelector != nil { + in, out := &in.SourceIdentifierSelector, &out.SourceIdentifierSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceInitParameters. +func (in *SourceInitParameters) DeepCopy() *SourceInitParameters { + if in == nil { + return nil + } + out := new(SourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceObservation) DeepCopyInto(out *SourceObservation) { + *out = *in + if in.CustomPolicyDetails != nil { + in, out := &in.CustomPolicyDetails, &out.CustomPolicyDetails + *out = new(CustomPolicyDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.SourceDetail != nil { + in, out := &in.SourceDetail, &out.SourceDetail + *out = make([]SourceDetailObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIdentifier != nil { + in, out := &in.SourceIdentifier, &out.SourceIdentifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceObservation. +func (in *SourceObservation) DeepCopy() *SourceObservation { + if in == nil { + return nil + } + out := new(SourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceParameters) DeepCopyInto(out *SourceParameters) { + *out = *in + if in.CustomPolicyDetails != nil { + in, out := &in.CustomPolicyDetails, &out.CustomPolicyDetails + *out = new(CustomPolicyDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.SourceDetail != nil { + in, out := &in.SourceDetail, &out.SourceDetail + *out = make([]SourceDetailParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIdentifier != nil { + in, out := &in.SourceIdentifier, &out.SourceIdentifier + *out = new(string) + **out = **in + } + if in.SourceIdentifierRef != nil { + in, out := &in.SourceIdentifierRef, &out.SourceIdentifierRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceIdentifierSelector != nil { + in, out := &in.SourceIdentifierSelector, &out.SourceIdentifierSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceParameters. +func (in *SourceParameters) DeepCopy() *SourceParameters { + if in == nil { + return nil + } + out := new(SourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SsmControlsInitParameters) DeepCopyInto(out *SsmControlsInitParameters) { + *out = *in + if in.ConcurrentExecutionRatePercentage != nil { + in, out := &in.ConcurrentExecutionRatePercentage, &out.ConcurrentExecutionRatePercentage + *out = new(float64) + **out = **in + } + if in.ErrorPercentage != nil { + in, out := &in.ErrorPercentage, &out.ErrorPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SsmControlsInitParameters. +func (in *SsmControlsInitParameters) DeepCopy() *SsmControlsInitParameters { + if in == nil { + return nil + } + out := new(SsmControlsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SsmControlsObservation) DeepCopyInto(out *SsmControlsObservation) { + *out = *in + if in.ConcurrentExecutionRatePercentage != nil { + in, out := &in.ConcurrentExecutionRatePercentage, &out.ConcurrentExecutionRatePercentage + *out = new(float64) + **out = **in + } + if in.ErrorPercentage != nil { + in, out := &in.ErrorPercentage, &out.ErrorPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SsmControlsObservation. +func (in *SsmControlsObservation) DeepCopy() *SsmControlsObservation { + if in == nil { + return nil + } + out := new(SsmControlsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SsmControlsParameters) DeepCopyInto(out *SsmControlsParameters) { + *out = *in + if in.ConcurrentExecutionRatePercentage != nil { + in, out := &in.ConcurrentExecutionRatePercentage, &out.ConcurrentExecutionRatePercentage + *out = new(float64) + **out = **in + } + if in.ErrorPercentage != nil { + in, out := &in.ErrorPercentage, &out.ErrorPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SsmControlsParameters. +func (in *SsmControlsParameters) DeepCopy() *SsmControlsParameters { + if in == nil { + return nil + } + out := new(SsmControlsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/configservice/v1beta2/zz_generated.managed.go b/apis/configservice/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..eeea6ef77f --- /dev/null +++ b/apis/configservice/v1beta2/zz_generated.managed.go @@ -0,0 +1,308 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ConfigRule. +func (mg *ConfigRule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ConfigRule. +func (mg *ConfigRule) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ConfigRule. +func (mg *ConfigRule) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ConfigRule. +func (mg *ConfigRule) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ConfigRule. +func (mg *ConfigRule) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ConfigRule. +func (mg *ConfigRule) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ConfigRule. +func (mg *ConfigRule) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ConfigRule. +func (mg *ConfigRule) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ConfigRule. +func (mg *ConfigRule) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ConfigRule. +func (mg *ConfigRule) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ConfigRule. +func (mg *ConfigRule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ConfigRule. +func (mg *ConfigRule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ConfigurationAggregator. +func (mg *ConfigurationAggregator) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ConfigurationAggregator. +func (mg *ConfigurationAggregator) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ConfigurationAggregator. +func (mg *ConfigurationAggregator) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ConfigurationAggregator. +func (mg *ConfigurationAggregator) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ConfigurationAggregator. +func (mg *ConfigurationAggregator) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ConfigurationAggregator. +func (mg *ConfigurationAggregator) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ConfigurationAggregator. +func (mg *ConfigurationAggregator) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ConfigurationAggregator. +func (mg *ConfigurationAggregator) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ConfigurationAggregator. +func (mg *ConfigurationAggregator) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ConfigurationAggregator. +func (mg *ConfigurationAggregator) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ConfigurationAggregator. +func (mg *ConfigurationAggregator) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ConfigurationAggregator. +func (mg *ConfigurationAggregator) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ConfigurationRecorder. +func (mg *ConfigurationRecorder) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ConfigurationRecorder. +func (mg *ConfigurationRecorder) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ConfigurationRecorder. +func (mg *ConfigurationRecorder) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ConfigurationRecorder. +func (mg *ConfigurationRecorder) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ConfigurationRecorder. +func (mg *ConfigurationRecorder) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ConfigurationRecorder. +func (mg *ConfigurationRecorder) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ConfigurationRecorder. +func (mg *ConfigurationRecorder) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ConfigurationRecorder. +func (mg *ConfigurationRecorder) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ConfigurationRecorder. +func (mg *ConfigurationRecorder) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ConfigurationRecorder. +func (mg *ConfigurationRecorder) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ConfigurationRecorder. +func (mg *ConfigurationRecorder) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ConfigurationRecorder. +func (mg *ConfigurationRecorder) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DeliveryChannel. +func (mg *DeliveryChannel) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DeliveryChannel. +func (mg *DeliveryChannel) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DeliveryChannel. +func (mg *DeliveryChannel) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DeliveryChannel. +func (mg *DeliveryChannel) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DeliveryChannel. +func (mg *DeliveryChannel) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DeliveryChannel. +func (mg *DeliveryChannel) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DeliveryChannel. +func (mg *DeliveryChannel) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DeliveryChannel. +func (mg *DeliveryChannel) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DeliveryChannel. +func (mg *DeliveryChannel) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DeliveryChannel. +func (mg *DeliveryChannel) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DeliveryChannel. +func (mg *DeliveryChannel) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DeliveryChannel. +func (mg *DeliveryChannel) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this RemediationConfiguration. +func (mg *RemediationConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this RemediationConfiguration. +func (mg *RemediationConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this RemediationConfiguration. +func (mg *RemediationConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this RemediationConfiguration. +func (mg *RemediationConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this RemediationConfiguration. +func (mg *RemediationConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this RemediationConfiguration. +func (mg *RemediationConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this RemediationConfiguration. +func (mg *RemediationConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this RemediationConfiguration. +func (mg *RemediationConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this RemediationConfiguration. +func (mg *RemediationConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this RemediationConfiguration. +func (mg *RemediationConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this RemediationConfiguration. +func (mg *RemediationConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this RemediationConfiguration. +func (mg *RemediationConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/configservice/v1beta2/zz_generated.managedlist.go b/apis/configservice/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..d37d925463 --- /dev/null +++ b/apis/configservice/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,53 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ConfigRuleList. +func (l *ConfigRuleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ConfigurationAggregatorList. +func (l *ConfigurationAggregatorList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ConfigurationRecorderList. +func (l *ConfigurationRecorderList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DeliveryChannelList. +func (l *DeliveryChannelList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this RemediationConfigurationList. +func (l *RemediationConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/configservice/v1beta2/zz_generated.resolvers.go b/apis/configservice/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..c5abaf8e1e --- /dev/null +++ b/apis/configservice/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,229 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *ConfigRule) ResolveReferences( // ResolveReferences of this ConfigRule. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.Source != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Source.SourceIdentifier), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Source.SourceIdentifierRef, + Selector: mg.Spec.ForProvider.Source.SourceIdentifierSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Source.SourceIdentifier") + } + mg.Spec.ForProvider.Source.SourceIdentifier = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Source.SourceIdentifierRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Source != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Source.SourceIdentifier), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Source.SourceIdentifierRef, + Selector: mg.Spec.InitProvider.Source.SourceIdentifierSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Source.SourceIdentifier") + } + mg.Spec.InitProvider.Source.SourceIdentifier = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Source.SourceIdentifierRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this ConfigurationAggregator. +func (mg *ConfigurationAggregator) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.OrganizationAggregationSource != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.OrganizationAggregationSource.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.OrganizationAggregationSource.RoleArnRef, + Selector: mg.Spec.ForProvider.OrganizationAggregationSource.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OrganizationAggregationSource.RoleArn") + } + mg.Spec.ForProvider.OrganizationAggregationSource.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OrganizationAggregationSource.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.OrganizationAggregationSource != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.OrganizationAggregationSource.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.OrganizationAggregationSource.RoleArnRef, + Selector: mg.Spec.InitProvider.OrganizationAggregationSource.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OrganizationAggregationSource.RoleArn") + } + mg.Spec.InitProvider.OrganizationAggregationSource.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OrganizationAggregationSource.RoleArnRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this ConfigurationRecorder. +func (mg *ConfigurationRecorder) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this DeliveryChannel. +func (mg *DeliveryChannel) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.S3BucketName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.S3BucketNameRef, + Selector: mg.Spec.ForProvider.S3BucketNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.S3BucketName") + } + mg.Spec.ForProvider.S3BucketName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.S3BucketNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.S3BucketName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.S3BucketNameRef, + Selector: mg.Spec.InitProvider.S3BucketNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.S3BucketName") + } + mg.Spec.InitProvider.S3BucketName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.S3BucketNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/configservice/v1beta2/zz_groupversion_info.go b/apis/configservice/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..299c12fb3c --- /dev/null +++ b/apis/configservice/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=configservice.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "configservice.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/configservice/v1beta2/zz_remediationconfiguration_terraformed.go b/apis/configservice/v1beta2/zz_remediationconfiguration_terraformed.go new file mode 100755 index 0000000000..1012da6640 --- /dev/null +++ b/apis/configservice/v1beta2/zz_remediationconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this RemediationConfiguration +func (mg *RemediationConfiguration) GetTerraformResourceType() string { + return "aws_config_remediation_configuration" +} + +// GetConnectionDetailsMapping for this RemediationConfiguration +func (tr *RemediationConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this RemediationConfiguration +func (tr *RemediationConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this RemediationConfiguration +func (tr *RemediationConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this RemediationConfiguration +func (tr *RemediationConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this RemediationConfiguration +func (tr *RemediationConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this RemediationConfiguration +func (tr *RemediationConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this RemediationConfiguration +func (tr *RemediationConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this RemediationConfiguration +func (tr *RemediationConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this RemediationConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *RemediationConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &RemediationConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *RemediationConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/configservice/v1beta2/zz_remediationconfiguration_types.go b/apis/configservice/v1beta2/zz_remediationconfiguration_types.go new file mode 100755 index 0000000000..58a6c085d8 --- /dev/null +++ b/apis/configservice/v1beta2/zz_remediationconfiguration_types.go @@ -0,0 +1,282 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ExecutionControlsInitParameters struct { + + // Configuration block for SSM controls. See below. + SsmControls *SsmControlsInitParameters `json:"ssmControls,omitempty" tf:"ssm_controls,omitempty"` +} + +type ExecutionControlsObservation struct { + + // Configuration block for SSM controls. See below. + SsmControls *SsmControlsObservation `json:"ssmControls,omitempty" tf:"ssm_controls,omitempty"` +} + +type ExecutionControlsParameters struct { + + // Configuration block for SSM controls. See below. + // +kubebuilder:validation:Optional + SsmControls *SsmControlsParameters `json:"ssmControls,omitempty" tf:"ssm_controls,omitempty"` +} + +type ParameterInitParameters struct { + + // Name of the attribute. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value is dynamic and changes at run-time. + ResourceValue *string `json:"resourceValue,omitempty" tf:"resource_value,omitempty"` + + // Value is static and does not change at run-time. + StaticValue *string `json:"staticValue,omitempty" tf:"static_value,omitempty"` + + // List of static values. + StaticValues []*string `json:"staticValues,omitempty" tf:"static_values,omitempty"` +} + +type ParameterObservation struct { + + // Name of the attribute. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value is dynamic and changes at run-time. + ResourceValue *string `json:"resourceValue,omitempty" tf:"resource_value,omitempty"` + + // Value is static and does not change at run-time. + StaticValue *string `json:"staticValue,omitempty" tf:"static_value,omitempty"` + + // List of static values. + StaticValues []*string `json:"staticValues,omitempty" tf:"static_values,omitempty"` +} + +type ParameterParameters struct { + + // Name of the attribute. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Value is dynamic and changes at run-time. + // +kubebuilder:validation:Optional + ResourceValue *string `json:"resourceValue,omitempty" tf:"resource_value,omitempty"` + + // Value is static and does not change at run-time. + // +kubebuilder:validation:Optional + StaticValue *string `json:"staticValue,omitempty" tf:"static_value,omitempty"` + + // List of static values. + // +kubebuilder:validation:Optional + StaticValues []*string `json:"staticValues,omitempty" tf:"static_values,omitempty"` +} + +type RemediationConfigurationInitParameters struct { + + // Remediation is triggered automatically if true. + Automatic *bool `json:"automatic,omitempty" tf:"automatic,omitempty"` + + // Configuration block for execution controls. See below. + ExecutionControls *ExecutionControlsInitParameters `json:"executionControls,omitempty" tf:"execution_controls,omitempty"` + + // Maximum number of failed attempts for auto-remediation. If you do not select a number, the default is 5. + MaximumAutomaticAttempts *float64 `json:"maximumAutomaticAttempts,omitempty" tf:"maximum_automatic_attempts,omitempty"` + + // Can be specified multiple times for each parameter. Each parameter block supports arguments below. + Parameter []ParameterInitParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // Type of resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // Maximum time in seconds that AWS Config runs auto-remediation. If you do not select a number, the default is 60 seconds. + RetryAttemptSeconds *float64 `json:"retryAttemptSeconds,omitempty" tf:"retry_attempt_seconds,omitempty"` + + // Target ID is the name of the public document. + TargetID *string `json:"targetId,omitempty" tf:"target_id,omitempty"` + + // Type of the target. Target executes remediation. For example, SSM document. + TargetType *string `json:"targetType,omitempty" tf:"target_type,omitempty"` + + // Version of the target. For example, version of the SSM document + TargetVersion *string `json:"targetVersion,omitempty" tf:"target_version,omitempty"` +} + +type RemediationConfigurationObservation struct { + + // ARN of the Config Remediation Configuration. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Remediation is triggered automatically if true. + Automatic *bool `json:"automatic,omitempty" tf:"automatic,omitempty"` + + // Configuration block for execution controls. See below. + ExecutionControls *ExecutionControlsObservation `json:"executionControls,omitempty" tf:"execution_controls,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Maximum number of failed attempts for auto-remediation. If you do not select a number, the default is 5. + MaximumAutomaticAttempts *float64 `json:"maximumAutomaticAttempts,omitempty" tf:"maximum_automatic_attempts,omitempty"` + + // Can be specified multiple times for each parameter. Each parameter block supports arguments below. + Parameter []ParameterObservation `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // Type of resource. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // Maximum time in seconds that AWS Config runs auto-remediation. If you do not select a number, the default is 60 seconds. + RetryAttemptSeconds *float64 `json:"retryAttemptSeconds,omitempty" tf:"retry_attempt_seconds,omitempty"` + + // Target ID is the name of the public document. + TargetID *string `json:"targetId,omitempty" tf:"target_id,omitempty"` + + // Type of the target. Target executes remediation. For example, SSM document. + TargetType *string `json:"targetType,omitempty" tf:"target_type,omitempty"` + + // Version of the target. For example, version of the SSM document + TargetVersion *string `json:"targetVersion,omitempty" tf:"target_version,omitempty"` +} + +type RemediationConfigurationParameters struct { + + // Remediation is triggered automatically if true. + // +kubebuilder:validation:Optional + Automatic *bool `json:"automatic,omitempty" tf:"automatic,omitempty"` + + // Configuration block for execution controls. See below. + // +kubebuilder:validation:Optional + ExecutionControls *ExecutionControlsParameters `json:"executionControls,omitempty" tf:"execution_controls,omitempty"` + + // Maximum number of failed attempts for auto-remediation. If you do not select a number, the default is 5. + // +kubebuilder:validation:Optional + MaximumAutomaticAttempts *float64 `json:"maximumAutomaticAttempts,omitempty" tf:"maximum_automatic_attempts,omitempty"` + + // Can be specified multiple times for each parameter. Each parameter block supports arguments below. + // +kubebuilder:validation:Optional + Parameter []ParameterParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Type of resource. + // +kubebuilder:validation:Optional + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // Maximum time in seconds that AWS Config runs auto-remediation. If you do not select a number, the default is 60 seconds. + // +kubebuilder:validation:Optional + RetryAttemptSeconds *float64 `json:"retryAttemptSeconds,omitempty" tf:"retry_attempt_seconds,omitempty"` + + // Target ID is the name of the public document. + // +kubebuilder:validation:Optional + TargetID *string `json:"targetId,omitempty" tf:"target_id,omitempty"` + + // Type of the target. Target executes remediation. For example, SSM document. + // +kubebuilder:validation:Optional + TargetType *string `json:"targetType,omitempty" tf:"target_type,omitempty"` + + // Version of the target. For example, version of the SSM document + // +kubebuilder:validation:Optional + TargetVersion *string `json:"targetVersion,omitempty" tf:"target_version,omitempty"` +} + +type SsmControlsInitParameters struct { + + // Maximum percentage of remediation actions allowed to run in parallel on the non-compliant resources for that specific rule. The default value is 10%. + ConcurrentExecutionRatePercentage *float64 `json:"concurrentExecutionRatePercentage,omitempty" tf:"concurrent_execution_rate_percentage,omitempty"` + + // Percentage of errors that are allowed before SSM stops running automations on non-compliant resources for that specific rule. The default is 50%. + ErrorPercentage *float64 `json:"errorPercentage,omitempty" tf:"error_percentage,omitempty"` +} + +type SsmControlsObservation struct { + + // Maximum percentage of remediation actions allowed to run in parallel on the non-compliant resources for that specific rule. The default value is 10%. + ConcurrentExecutionRatePercentage *float64 `json:"concurrentExecutionRatePercentage,omitempty" tf:"concurrent_execution_rate_percentage,omitempty"` + + // Percentage of errors that are allowed before SSM stops running automations on non-compliant resources for that specific rule. The default is 50%. + ErrorPercentage *float64 `json:"errorPercentage,omitempty" tf:"error_percentage,omitempty"` +} + +type SsmControlsParameters struct { + + // Maximum percentage of remediation actions allowed to run in parallel on the non-compliant resources for that specific rule. The default value is 10%. + // +kubebuilder:validation:Optional + ConcurrentExecutionRatePercentage *float64 `json:"concurrentExecutionRatePercentage,omitempty" tf:"concurrent_execution_rate_percentage,omitempty"` + + // Percentage of errors that are allowed before SSM stops running automations on non-compliant resources for that specific rule. The default is 50%. + // +kubebuilder:validation:Optional + ErrorPercentage *float64 `json:"errorPercentage,omitempty" tf:"error_percentage,omitempty"` +} + +// RemediationConfigurationSpec defines the desired state of RemediationConfiguration +type RemediationConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RemediationConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RemediationConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// RemediationConfigurationStatus defines the observed state of RemediationConfiguration. +type RemediationConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RemediationConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// RemediationConfiguration is the Schema for the RemediationConfigurations API. Provides an AWS Config Remediation Configuration. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type RemediationConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.targetId) || (has(self.initProvider) && has(self.initProvider.targetId))",message="spec.forProvider.targetId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.targetType) || (has(self.initProvider) && has(self.initProvider.targetType))",message="spec.forProvider.targetType is a required parameter" + Spec RemediationConfigurationSpec `json:"spec"` + Status RemediationConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RemediationConfigurationList contains a list of RemediationConfigurations +type RemediationConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RemediationConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + RemediationConfiguration_Kind = "RemediationConfiguration" + RemediationConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: RemediationConfiguration_Kind}.String() + RemediationConfiguration_KindAPIVersion = RemediationConfiguration_Kind + "." + CRDGroupVersion.String() + RemediationConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(RemediationConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&RemediationConfiguration{}, &RemediationConfigurationList{}) +} diff --git a/apis/connect/v1beta1/zz_generated.conversion_hubs.go b/apis/connect/v1beta1/zz_generated.conversion_hubs.go index c723f4571e..dada8409c2 100755 --- a/apis/connect/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/connect/v1beta1/zz_generated.conversion_hubs.go @@ -6,9 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *BotAssociation) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ContactFlow) Hub() {} @@ -18,26 +15,14 @@ func (tr *ContactFlowModule) Hub() {} // Hub marks this type as a conversion hub. func (tr *Instance) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *InstanceStorageConfig) Hub() {} - // Hub marks this type as a conversion hub. func (tr *LambdaFunctionAssociation) Hub() {} // Hub marks this type as a conversion hub. func (tr *PhoneNumber) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *QuickConnect) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SecurityProfile) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *User) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *UserHierarchyStructure) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Vocabulary) Hub() {} diff --git a/apis/connect/v1beta1/zz_generated.conversion_spokes.go b/apis/connect/v1beta1/zz_generated.conversion_spokes.go index f289fc679c..d3c9f99565 100755 --- a/apis/connect/v1beta1/zz_generated.conversion_spokes.go +++ b/apis/connect/v1beta1/zz_generated.conversion_spokes.go @@ -13,6 +13,26 @@ import ( "sigs.k8s.io/controller-runtime/pkg/conversion" ) +// ConvertTo converts this BotAssociation to the hub type. +func (tr *BotAssociation) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BotAssociation type. +func (tr *BotAssociation) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + // ConvertTo converts this HoursOfOperation to the hub type. func (tr *HoursOfOperation) ConvertTo(dstRaw conversion.Hub) error { spokeVersion := tr.GetObjectKind().GroupVersionKind().Version @@ -33,6 +53,26 @@ func (tr *HoursOfOperation) ConvertFrom(srcRaw conversion.Hub) error { return nil } +// ConvertTo converts this InstanceStorageConfig to the hub type. +func (tr *InstanceStorageConfig) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the InstanceStorageConfig type. +func (tr *InstanceStorageConfig) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + // ConvertTo converts this Queue to the hub type. func (tr *Queue) ConvertTo(dstRaw conversion.Hub) error { spokeVersion := tr.GetObjectKind().GroupVersionKind().Version @@ -53,6 +93,26 @@ func (tr *Queue) ConvertFrom(srcRaw conversion.Hub) error { return nil } +// ConvertTo converts this QuickConnect to the hub type. +func (tr *QuickConnect) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the QuickConnect type. +func (tr *QuickConnect) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + // ConvertTo converts this RoutingProfile to the hub type. func (tr *RoutingProfile) ConvertTo(dstRaw conversion.Hub) error { spokeVersion := tr.GetObjectKind().GroupVersionKind().Version @@ -72,3 +132,43 @@ func (tr *RoutingProfile) ConvertFrom(srcRaw conversion.Hub) error { } return nil } + +// ConvertTo converts this User to the hub type. +func (tr *User) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the User type. +func (tr *User) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this UserHierarchyStructure to the hub type. +func (tr *UserHierarchyStructure) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the UserHierarchyStructure type. +func (tr *UserHierarchyStructure) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/connect/v1beta1/zz_generated.resolvers.go b/apis/connect/v1beta1/zz_generated.resolvers.go index d4e6158c03..ec617a22d8 100644 --- a/apis/connect/v1beta1/zz_generated.resolvers.go +++ b/apis/connect/v1beta1/zz_generated.resolvers.go @@ -250,7 +250,7 @@ func (mg *Instance) ResolveReferences(ctx context.Context, c client.Reader) erro var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("ds.aws.upbound.io", "v1beta1", "Directory", "DirectoryList") + m, l, err = apisresolver.GetManagedResource("ds.aws.upbound.io", "v1beta2", "Directory", "DirectoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -269,7 +269,7 @@ func (mg *Instance) ResolveReferences(ctx context.Context, c client.Reader) erro mg.Spec.ForProvider.DirectoryID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DirectoryIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ds.aws.upbound.io", "v1beta1", "Directory", "DirectoryList") + m, l, err = apisresolver.GetManagedResource("ds.aws.upbound.io", "v1beta2", "Directory", "DirectoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -589,7 +589,7 @@ func (mg *LambdaFunctionAssociation) ResolveReferences(ctx context.Context, c cl var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta1", "Function", "FunctionList") + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/connect/v1beta1/zz_instance_types.go b/apis/connect/v1beta1/zz_instance_types.go index ce30536d96..1b1a40ec87 100755 --- a/apis/connect/v1beta1/zz_instance_types.go +++ b/apis/connect/v1beta1/zz_instance_types.go @@ -25,7 +25,7 @@ type InstanceInitParameters struct { ContactLensEnabled *bool `json:"contactLensEnabled,omitempty" tf:"contact_lens_enabled,omitempty"` // The identifier for the directory if identity_management_type is EXISTING_DIRECTORY. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ds/v1beta1.Directory + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ds/v1beta2.Directory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() DirectoryID *string `json:"directoryId,omitempty" tf:"directory_id,omitempty"` @@ -119,7 +119,7 @@ type InstanceParameters struct { ContactLensEnabled *bool `json:"contactLensEnabled,omitempty" tf:"contact_lens_enabled,omitempty"` // The identifier for the directory if identity_management_type is EXISTING_DIRECTORY. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ds/v1beta1.Directory + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ds/v1beta2.Directory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DirectoryID *string `json:"directoryId,omitempty" tf:"directory_id,omitempty"` diff --git a/apis/connect/v1beta1/zz_lambdafunctionassociation_types.go b/apis/connect/v1beta1/zz_lambdafunctionassociation_types.go index 23261774af..0e49b9dd61 100755 --- a/apis/connect/v1beta1/zz_lambdafunctionassociation_types.go +++ b/apis/connect/v1beta1/zz_lambdafunctionassociation_types.go @@ -31,7 +31,7 @@ type LambdaFunctionAssociationObservation struct { type LambdaFunctionAssociationParameters struct { // Amazon Resource Name (ARN) of the Lambda Function, omitting any version or alias qualifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta1.Function + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional FunctionArn *string `json:"functionArn,omitempty" tf:"function_arn,omitempty"` diff --git a/apis/connect/v1beta2/zz_botassociation_terraformed.go b/apis/connect/v1beta2/zz_botassociation_terraformed.go new file mode 100755 index 0000000000..9b23a8588e --- /dev/null +++ b/apis/connect/v1beta2/zz_botassociation_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BotAssociation +func (mg *BotAssociation) GetTerraformResourceType() string { + return "aws_connect_bot_association" +} + +// GetConnectionDetailsMapping for this BotAssociation +func (tr *BotAssociation) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BotAssociation +func (tr *BotAssociation) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BotAssociation +func (tr *BotAssociation) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BotAssociation +func (tr *BotAssociation) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BotAssociation +func (tr *BotAssociation) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BotAssociation +func (tr *BotAssociation) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BotAssociation +func (tr *BotAssociation) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BotAssociation +func (tr *BotAssociation) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BotAssociation using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BotAssociation) LateInitialize(attrs []byte) (bool, error) { + params := &BotAssociationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BotAssociation) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/connect/v1beta2/zz_botassociation_types.go b/apis/connect/v1beta2/zz_botassociation_types.go new file mode 100755 index 0000000000..3a7669ea6d --- /dev/null +++ b/apis/connect/v1beta2/zz_botassociation_types.go @@ -0,0 +1,166 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BotAssociationInitParameters struct { + + // Configuration information of an Amazon Lex (V1) bot. Detailed below. + LexBot *LexBotInitParameters `json:"lexBot,omitempty" tf:"lex_bot,omitempty"` +} + +type BotAssociationObservation struct { + + // The Amazon Connect instance ID, Lex (V1) bot name, and Lex (V1) bot region separated by colons (:). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance. + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // Configuration information of an Amazon Lex (V1) bot. Detailed below. + LexBot *LexBotObservation `json:"lexBot,omitempty" tf:"lex_bot,omitempty"` +} + +type BotAssociationParameters struct { + + // The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta1.Instance + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // Reference to a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDRef *v1.Reference `json:"instanceIdRef,omitempty" tf:"-"` + + // Selector for a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDSelector *v1.Selector `json:"instanceIdSelector,omitempty" tf:"-"` + + // Configuration information of an Amazon Lex (V1) bot. Detailed below. + // +kubebuilder:validation:Optional + LexBot *LexBotParameters `json:"lexBot,omitempty" tf:"lex_bot,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type LexBotInitParameters struct { + + // The Region that the Amazon Lex (V1) bot was created in. Defaults to current region. + LexRegion *string `json:"lexRegion,omitempty" tf:"lex_region,omitempty"` + + // The name of the Amazon Lex (V1) bot. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lexmodels/v1beta2.Bot + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a Bot in lexmodels to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a Bot in lexmodels to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` +} + +type LexBotObservation struct { + + // The Region that the Amazon Lex (V1) bot was created in. Defaults to current region. + LexRegion *string `json:"lexRegion,omitempty" tf:"lex_region,omitempty"` + + // The name of the Amazon Lex (V1) bot. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type LexBotParameters struct { + + // The Region that the Amazon Lex (V1) bot was created in. Defaults to current region. + // +kubebuilder:validation:Optional + LexRegion *string `json:"lexRegion,omitempty" tf:"lex_region,omitempty"` + + // The name of the Amazon Lex (V1) bot. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lexmodels/v1beta2.Bot + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a Bot in lexmodels to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a Bot in lexmodels to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` +} + +// BotAssociationSpec defines the desired state of BotAssociation +type BotAssociationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BotAssociationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BotAssociationInitParameters `json:"initProvider,omitempty"` +} + +// BotAssociationStatus defines the observed state of BotAssociation. +type BotAssociationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BotAssociationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BotAssociation is the Schema for the BotAssociations API. Associates an Amazon Connect instance to an Amazon Lex (V1) bot +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type BotAssociation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.lexBot) || (has(self.initProvider) && has(self.initProvider.lexBot))",message="spec.forProvider.lexBot is a required parameter" + Spec BotAssociationSpec `json:"spec"` + Status BotAssociationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BotAssociationList contains a list of BotAssociations +type BotAssociationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BotAssociation `json:"items"` +} + +// Repository type metadata. +var ( + BotAssociation_Kind = "BotAssociation" + BotAssociation_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BotAssociation_Kind}.String() + BotAssociation_KindAPIVersion = BotAssociation_Kind + "." + CRDGroupVersion.String() + BotAssociation_GroupVersionKind = CRDGroupVersion.WithKind(BotAssociation_Kind) +) + +func init() { + SchemeBuilder.Register(&BotAssociation{}, &BotAssociationList{}) +} diff --git a/apis/connect/v1beta2/zz_generated.conversion_hubs.go b/apis/connect/v1beta2/zz_generated.conversion_hubs.go index 38621898fc..2bcb758a3a 100755 --- a/apis/connect/v1beta2/zz_generated.conversion_hubs.go +++ b/apis/connect/v1beta2/zz_generated.conversion_hubs.go @@ -7,10 +7,19 @@ package v1beta2 // Hub marks this type as a conversion hub. -func (tr *HoursOfOperation) Hub() {} +func (tr *BotAssociation) Hub() {} // Hub marks this type as a conversion hub. -func (tr *Queue) Hub() {} +func (tr *InstanceStorageConfig) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *QuickConnect) Hub() {} // Hub marks this type as a conversion hub. func (tr *RoutingProfile) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *User) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *UserHierarchyStructure) Hub() {} diff --git a/apis/connect/v1beta2/zz_generated.conversion_spokes.go b/apis/connect/v1beta2/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..53784c549e --- /dev/null +++ b/apis/connect/v1beta2/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this HoursOfOperation to the hub type. +func (tr *HoursOfOperation) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the HoursOfOperation type. +func (tr *HoursOfOperation) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Queue to the hub type. +func (tr *Queue) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Queue type. +func (tr *Queue) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/connect/v1beta2/zz_generated.deepcopy.go b/apis/connect/v1beta2/zz_generated.deepcopy.go index 3cf0c9e64a..0ee98e6fcb 100644 --- a/apis/connect/v1beta2/zz_generated.deepcopy.go +++ b/apis/connect/v1beta2/zz_generated.deepcopy.go @@ -13,6 +13,190 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotAssociation) DeepCopyInto(out *BotAssociation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotAssociation. +func (in *BotAssociation) DeepCopy() *BotAssociation { + if in == nil { + return nil + } + out := new(BotAssociation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BotAssociation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotAssociationInitParameters) DeepCopyInto(out *BotAssociationInitParameters) { + *out = *in + if in.LexBot != nil { + in, out := &in.LexBot, &out.LexBot + *out = new(LexBotInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotAssociationInitParameters. +func (in *BotAssociationInitParameters) DeepCopy() *BotAssociationInitParameters { + if in == nil { + return nil + } + out := new(BotAssociationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotAssociationList) DeepCopyInto(out *BotAssociationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BotAssociation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotAssociationList. +func (in *BotAssociationList) DeepCopy() *BotAssociationList { + if in == nil { + return nil + } + out := new(BotAssociationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BotAssociationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotAssociationObservation) DeepCopyInto(out *BotAssociationObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.LexBot != nil { + in, out := &in.LexBot, &out.LexBot + *out = new(LexBotObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotAssociationObservation. +func (in *BotAssociationObservation) DeepCopy() *BotAssociationObservation { + if in == nil { + return nil + } + out := new(BotAssociationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotAssociationParameters) DeepCopyInto(out *BotAssociationParameters) { + *out = *in + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceIDRef != nil { + in, out := &in.InstanceIDRef, &out.InstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceIDSelector != nil { + in, out := &in.InstanceIDSelector, &out.InstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LexBot != nil { + in, out := &in.LexBot, &out.LexBot + *out = new(LexBotParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotAssociationParameters. +func (in *BotAssociationParameters) DeepCopy() *BotAssociationParameters { + if in == nil { + return nil + } + out := new(BotAssociationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotAssociationSpec) DeepCopyInto(out *BotAssociationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotAssociationSpec. +func (in *BotAssociationSpec) DeepCopy() *BotAssociationSpec { + if in == nil { + return nil + } + out := new(BotAssociationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotAssociationStatus) DeepCopyInto(out *BotAssociationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotAssociationStatus. +func (in *BotAssociationStatus) DeepCopy() *BotAssociationStatus { + if in == nil { + return nil + } + out := new(BotAssociationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConfigInitParameters) DeepCopyInto(out *ConfigInitParameters) { *out = *in @@ -115,6 +299,101 @@ func (in *ConfigParameters) DeepCopy() *ConfigParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigInitParameters) DeepCopyInto(out *EncryptionConfigInitParameters) { + *out = *in + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.KeyIDRef != nil { + in, out := &in.KeyIDRef, &out.KeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyIDSelector != nil { + in, out := &in.KeyIDSelector, &out.KeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigInitParameters. +func (in *EncryptionConfigInitParameters) DeepCopy() *EncryptionConfigInitParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigObservation) DeepCopyInto(out *EncryptionConfigObservation) { + *out = *in + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigObservation. +func (in *EncryptionConfigObservation) DeepCopy() *EncryptionConfigObservation { + if in == nil { + return nil + } + out := new(EncryptionConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigParameters) DeepCopyInto(out *EncryptionConfigParameters) { + *out = *in + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.KeyIDRef != nil { + in, out := &in.KeyIDRef, &out.KeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyIDSelector != nil { + in, out := &in.KeyIDSelector, &out.KeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigParameters. +func (in *EncryptionConfigParameters) DeepCopy() *EncryptionConfigParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EndTimeInitParameters) DeepCopyInto(out *EndTimeInitParameters) { *out = *in @@ -191,59 +470,179 @@ func (in *EndTimeParameters) DeepCopy() *EndTimeParameters { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HoursOfOperation) DeepCopyInto(out *HoursOfOperation) { +func (in *HierarchyStructureInitParameters) DeepCopyInto(out *HierarchyStructureInitParameters) { *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) + if in.LevelFive != nil { + in, out := &in.LevelFive, &out.LevelFive + *out = new(LevelFiveInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LevelFour != nil { + in, out := &in.LevelFour, &out.LevelFour + *out = new(LevelFourInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LevelOne != nil { + in, out := &in.LevelOne, &out.LevelOne + *out = new(LevelOneInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LevelThree != nil { + in, out := &in.LevelThree, &out.LevelThree + *out = new(LevelThreeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LevelTwo != nil { + in, out := &in.LevelTwo, &out.LevelTwo + *out = new(LevelTwoInitParameters) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HoursOfOperation. -func (in *HoursOfOperation) DeepCopy() *HoursOfOperation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HierarchyStructureInitParameters. +func (in *HierarchyStructureInitParameters) DeepCopy() *HierarchyStructureInitParameters { if in == nil { return nil } - out := new(HoursOfOperation) + out := new(HierarchyStructureInitParameters) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *HoursOfOperation) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HoursOfOperationInitParameters) DeepCopyInto(out *HoursOfOperationInitParameters) { +func (in *HierarchyStructureObservation) DeepCopyInto(out *HierarchyStructureObservation) { *out = *in - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = make([]ConfigInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.LevelFive != nil { + in, out := &in.LevelFive, &out.LevelFive + *out = new(LevelFiveObservation) + (*in).DeepCopyInto(*out) } - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(string) - **out = **in + if in.LevelFour != nil { + in, out := &in.LevelFour, &out.LevelFour + *out = new(LevelFourObservation) + (*in).DeepCopyInto(*out) } - if in.InstanceID != nil { - in, out := &in.InstanceID, &out.InstanceID - *out = new(string) - **out = **in + if in.LevelOne != nil { + in, out := &in.LevelOne, &out.LevelOne + *out = new(LevelOneObservation) + (*in).DeepCopyInto(*out) } - if in.InstanceIDRef != nil { - in, out := &in.InstanceIDRef, &out.InstanceIDRef - *out = new(v1.Reference) + if in.LevelThree != nil { + in, out := &in.LevelThree, &out.LevelThree + *out = new(LevelThreeObservation) (*in).DeepCopyInto(*out) } - if in.InstanceIDSelector != nil { - in, out := &in.InstanceIDSelector, &out.InstanceIDSelector + if in.LevelTwo != nil { + in, out := &in.LevelTwo, &out.LevelTwo + *out = new(LevelTwoObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HierarchyStructureObservation. +func (in *HierarchyStructureObservation) DeepCopy() *HierarchyStructureObservation { + if in == nil { + return nil + } + out := new(HierarchyStructureObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HierarchyStructureParameters) DeepCopyInto(out *HierarchyStructureParameters) { + *out = *in + if in.LevelFive != nil { + in, out := &in.LevelFive, &out.LevelFive + *out = new(LevelFiveParameters) + (*in).DeepCopyInto(*out) + } + if in.LevelFour != nil { + in, out := &in.LevelFour, &out.LevelFour + *out = new(LevelFourParameters) + (*in).DeepCopyInto(*out) + } + if in.LevelOne != nil { + in, out := &in.LevelOne, &out.LevelOne + *out = new(LevelOneParameters) + (*in).DeepCopyInto(*out) + } + if in.LevelThree != nil { + in, out := &in.LevelThree, &out.LevelThree + *out = new(LevelThreeParameters) + (*in).DeepCopyInto(*out) + } + if in.LevelTwo != nil { + in, out := &in.LevelTwo, &out.LevelTwo + *out = new(LevelTwoParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HierarchyStructureParameters. +func (in *HierarchyStructureParameters) DeepCopy() *HierarchyStructureParameters { + if in == nil { + return nil + } + out := new(HierarchyStructureParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HoursOfOperation) DeepCopyInto(out *HoursOfOperation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HoursOfOperation. +func (in *HoursOfOperation) DeepCopy() *HoursOfOperation { + if in == nil { + return nil + } + out := new(HoursOfOperation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HoursOfOperation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HoursOfOperationInitParameters) DeepCopyInto(out *HoursOfOperationInitParameters) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]ConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceIDRef != nil { + in, out := &in.InstanceIDRef, &out.InstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceIDSelector != nil { + in, out := &in.InstanceIDSelector, &out.InstanceIDSelector *out = new(v1.Selector) (*in).DeepCopyInto(*out) } @@ -515,191 +914,188 @@ func (in *HoursOfOperationStatus) DeepCopy() *HoursOfOperationStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MediaConcurrenciesInitParameters) DeepCopyInto(out *MediaConcurrenciesInitParameters) { +func (in *IdentityInfoInitParameters) DeepCopyInto(out *IdentityInfoInitParameters) { *out = *in - if in.Channel != nil { - in, out := &in.Channel, &out.Channel + if in.Email != nil { + in, out := &in.Email, &out.Email *out = new(string) **out = **in } - if in.Concurrency != nil { - in, out := &in.Concurrency, &out.Concurrency - *out = new(float64) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaConcurrenciesInitParameters. -func (in *MediaConcurrenciesInitParameters) DeepCopy() *MediaConcurrenciesInitParameters { - if in == nil { - return nil - } - out := new(MediaConcurrenciesInitParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MediaConcurrenciesObservation) DeepCopyInto(out *MediaConcurrenciesObservation) { - *out = *in - if in.Channel != nil { - in, out := &in.Channel, &out.Channel + if in.FirstName != nil { + in, out := &in.FirstName, &out.FirstName *out = new(string) **out = **in } - if in.Concurrency != nil { - in, out := &in.Concurrency, &out.Concurrency - *out = new(float64) + if in.LastName != nil { + in, out := &in.LastName, &out.LastName + *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaConcurrenciesObservation. -func (in *MediaConcurrenciesObservation) DeepCopy() *MediaConcurrenciesObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInfoInitParameters. +func (in *IdentityInfoInitParameters) DeepCopy() *IdentityInfoInitParameters { if in == nil { return nil } - out := new(MediaConcurrenciesObservation) + out := new(IdentityInfoInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MediaConcurrenciesParameters) DeepCopyInto(out *MediaConcurrenciesParameters) { +func (in *IdentityInfoObservation) DeepCopyInto(out *IdentityInfoObservation) { *out = *in - if in.Channel != nil { - in, out := &in.Channel, &out.Channel + if in.Email != nil { + in, out := &in.Email, &out.Email *out = new(string) **out = **in } - if in.Concurrency != nil { - in, out := &in.Concurrency, &out.Concurrency - *out = new(float64) + if in.FirstName != nil { + in, out := &in.FirstName, &out.FirstName + *out = new(string) + **out = **in + } + if in.LastName != nil { + in, out := &in.LastName, &out.LastName + *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaConcurrenciesParameters. -func (in *MediaConcurrenciesParameters) DeepCopy() *MediaConcurrenciesParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInfoObservation. +func (in *IdentityInfoObservation) DeepCopy() *IdentityInfoObservation { if in == nil { return nil } - out := new(MediaConcurrenciesParameters) + out := new(IdentityInfoObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OutboundCallerConfigInitParameters) DeepCopyInto(out *OutboundCallerConfigInitParameters) { +func (in *IdentityInfoParameters) DeepCopyInto(out *IdentityInfoParameters) { *out = *in - if in.OutboundCallerIDName != nil { - in, out := &in.OutboundCallerIDName, &out.OutboundCallerIDName + if in.Email != nil { + in, out := &in.Email, &out.Email *out = new(string) **out = **in } - if in.OutboundCallerIDNumberID != nil { - in, out := &in.OutboundCallerIDNumberID, &out.OutboundCallerIDNumberID + if in.FirstName != nil { + in, out := &in.FirstName, &out.FirstName *out = new(string) **out = **in } - if in.OutboundFlowID != nil { - in, out := &in.OutboundFlowID, &out.OutboundFlowID + if in.LastName != nil { + in, out := &in.LastName, &out.LastName *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutboundCallerConfigInitParameters. -func (in *OutboundCallerConfigInitParameters) DeepCopy() *OutboundCallerConfigInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInfoParameters. +func (in *IdentityInfoParameters) DeepCopy() *IdentityInfoParameters { if in == nil { return nil } - out := new(OutboundCallerConfigInitParameters) + out := new(IdentityInfoParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OutboundCallerConfigObservation) DeepCopyInto(out *OutboundCallerConfigObservation) { +func (in *InstanceStorageConfig) DeepCopyInto(out *InstanceStorageConfig) { *out = *in - if in.OutboundCallerIDName != nil { - in, out := &in.OutboundCallerIDName, &out.OutboundCallerIDName - *out = new(string) - **out = **in - } - if in.OutboundCallerIDNumberID != nil { - in, out := &in.OutboundCallerIDNumberID, &out.OutboundCallerIDNumberID - *out = new(string) - **out = **in - } - if in.OutboundFlowID != nil { - in, out := &in.OutboundFlowID, &out.OutboundFlowID - *out = new(string) - **out = **in - } + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutboundCallerConfigObservation. -func (in *OutboundCallerConfigObservation) DeepCopy() *OutboundCallerConfigObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceStorageConfig. +func (in *InstanceStorageConfig) DeepCopy() *InstanceStorageConfig { if in == nil { return nil } - out := new(OutboundCallerConfigObservation) + out := new(InstanceStorageConfig) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstanceStorageConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OutboundCallerConfigParameters) DeepCopyInto(out *OutboundCallerConfigParameters) { +func (in *InstanceStorageConfigInitParameters) DeepCopyInto(out *InstanceStorageConfigInitParameters) { *out = *in - if in.OutboundCallerIDName != nil { - in, out := &in.OutboundCallerIDName, &out.OutboundCallerIDName + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID *out = new(string) **out = **in } - if in.OutboundCallerIDNumberID != nil { - in, out := &in.OutboundCallerIDNumberID, &out.OutboundCallerIDNumberID - *out = new(string) - **out = **in + if in.InstanceIDRef != nil { + in, out := &in.InstanceIDRef, &out.InstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) } - if in.OutboundFlowID != nil { - in, out := &in.OutboundFlowID, &out.OutboundFlowID + if in.InstanceIDSelector != nil { + in, out := &in.InstanceIDSelector, &out.InstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType *out = new(string) **out = **in } + if in.StorageConfig != nil { + in, out := &in.StorageConfig, &out.StorageConfig + *out = new(StorageConfigInitParameters) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutboundCallerConfigParameters. -func (in *OutboundCallerConfigParameters) DeepCopy() *OutboundCallerConfigParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceStorageConfigInitParameters. +func (in *InstanceStorageConfigInitParameters) DeepCopy() *InstanceStorageConfigInitParameters { if in == nil { return nil } - out := new(OutboundCallerConfigParameters) + out := new(InstanceStorageConfigInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Queue) DeepCopyInto(out *Queue) { +func (in *InstanceStorageConfigList) DeepCopyInto(out *InstanceStorageConfigList) { *out = *in out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InstanceStorageConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Queue. -func (in *Queue) DeepCopy() *Queue { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceStorageConfigList. +func (in *InstanceStorageConfigList) DeepCopy() *InstanceStorageConfigList { if in == nil { return nil } - out := new(Queue) + out := new(InstanceStorageConfigList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Queue) DeepCopyObject() runtime.Object { +func (in *InstanceStorageConfigList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -707,141 +1103,2968 @@ func (in *Queue) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QueueConfigsInitParameters) DeepCopyInto(out *QueueConfigsInitParameters) { +func (in *InstanceStorageConfigObservation) DeepCopyInto(out *InstanceStorageConfigObservation) { *out = *in - if in.Channel != nil { - in, out := &in.Channel, &out.Channel + if in.AssociationID != nil { + in, out := &in.AssociationID, &out.AssociationID *out = new(string) **out = **in } - if in.Delay != nil { - in, out := &in.Delay, &out.Delay - *out = new(float64) + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) **out = **in } - if in.Priority != nil { - in, out := &in.Priority, &out.Priority - *out = new(float64) + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) **out = **in } - if in.QueueID != nil { - in, out := &in.QueueID, &out.QueueID + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType *out = new(string) **out = **in } + if in.StorageConfig != nil { + in, out := &in.StorageConfig, &out.StorageConfig + *out = new(StorageConfigObservation) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueConfigsInitParameters. -func (in *QueueConfigsInitParameters) DeepCopy() *QueueConfigsInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceStorageConfigObservation. +func (in *InstanceStorageConfigObservation) DeepCopy() *InstanceStorageConfigObservation { if in == nil { return nil } - out := new(QueueConfigsInitParameters) + out := new(InstanceStorageConfigObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QueueConfigsObservation) DeepCopyInto(out *QueueConfigsObservation) { +func (in *InstanceStorageConfigParameters) DeepCopyInto(out *InstanceStorageConfigParameters) { *out = *in - if in.Channel != nil { - in, out := &in.Channel, &out.Channel + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID *out = new(string) **out = **in } - if in.Delay != nil { - in, out := &in.Delay, &out.Delay - *out = new(float64) - **out = **in + if in.InstanceIDRef != nil { + in, out := &in.InstanceIDRef, &out.InstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) } - if in.Priority != nil { - in, out := &in.Priority, &out.Priority - *out = new(float64) - **out = **in + if in.InstanceIDSelector != nil { + in, out := &in.InstanceIDSelector, &out.InstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) } - if in.QueueArn != nil { - in, out := &in.QueueArn, &out.QueueArn + if in.Region != nil { + in, out := &in.Region, &out.Region *out = new(string) **out = **in } - if in.QueueID != nil { - in, out := &in.QueueID, &out.QueueID + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType *out = new(string) **out = **in } - if in.QueueName != nil { - in, out := &in.QueueName, &out.QueueName + if in.StorageConfig != nil { + in, out := &in.StorageConfig, &out.StorageConfig + *out = new(StorageConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceStorageConfigParameters. +func (in *InstanceStorageConfigParameters) DeepCopy() *InstanceStorageConfigParameters { + if in == nil { + return nil + } + out := new(InstanceStorageConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceStorageConfigSpec) DeepCopyInto(out *InstanceStorageConfigSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceStorageConfigSpec. +func (in *InstanceStorageConfigSpec) DeepCopy() *InstanceStorageConfigSpec { + if in == nil { + return nil + } + out := new(InstanceStorageConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceStorageConfigStatus) DeepCopyInto(out *InstanceStorageConfigStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceStorageConfigStatus. +func (in *InstanceStorageConfigStatus) DeepCopy() *InstanceStorageConfigStatus { + if in == nil { + return nil + } + out := new(InstanceStorageConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisFirehoseConfigInitParameters) DeepCopyInto(out *KinesisFirehoseConfigInitParameters) { + *out = *in + if in.FirehoseArn != nil { + in, out := &in.FirehoseArn, &out.FirehoseArn *out = new(string) **out = **in } + if in.FirehoseArnRef != nil { + in, out := &in.FirehoseArnRef, &out.FirehoseArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FirehoseArnSelector != nil { + in, out := &in.FirehoseArnSelector, &out.FirehoseArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueConfigsObservation. -func (in *QueueConfigsObservation) DeepCopy() *QueueConfigsObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseConfigInitParameters. +func (in *KinesisFirehoseConfigInitParameters) DeepCopy() *KinesisFirehoseConfigInitParameters { if in == nil { return nil } - out := new(QueueConfigsObservation) + out := new(KinesisFirehoseConfigInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QueueConfigsParameters) DeepCopyInto(out *QueueConfigsParameters) { +func (in *KinesisFirehoseConfigObservation) DeepCopyInto(out *KinesisFirehoseConfigObservation) { *out = *in - if in.Channel != nil { - in, out := &in.Channel, &out.Channel + if in.FirehoseArn != nil { + in, out := &in.FirehoseArn, &out.FirehoseArn *out = new(string) **out = **in } - if in.Delay != nil { - in, out := &in.Delay, &out.Delay +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseConfigObservation. +func (in *KinesisFirehoseConfigObservation) DeepCopy() *KinesisFirehoseConfigObservation { + if in == nil { + return nil + } + out := new(KinesisFirehoseConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisFirehoseConfigParameters) DeepCopyInto(out *KinesisFirehoseConfigParameters) { + *out = *in + if in.FirehoseArn != nil { + in, out := &in.FirehoseArn, &out.FirehoseArn + *out = new(string) + **out = **in + } + if in.FirehoseArnRef != nil { + in, out := &in.FirehoseArnRef, &out.FirehoseArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FirehoseArnSelector != nil { + in, out := &in.FirehoseArnSelector, &out.FirehoseArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseConfigParameters. +func (in *KinesisFirehoseConfigParameters) DeepCopy() *KinesisFirehoseConfigParameters { + if in == nil { + return nil + } + out := new(KinesisFirehoseConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisStreamConfigInitParameters) DeepCopyInto(out *KinesisStreamConfigInitParameters) { + *out = *in + if in.StreamArn != nil { + in, out := &in.StreamArn, &out.StreamArn + *out = new(string) + **out = **in + } + if in.StreamArnRef != nil { + in, out := &in.StreamArnRef, &out.StreamArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamArnSelector != nil { + in, out := &in.StreamArnSelector, &out.StreamArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamConfigInitParameters. +func (in *KinesisStreamConfigInitParameters) DeepCopy() *KinesisStreamConfigInitParameters { + if in == nil { + return nil + } + out := new(KinesisStreamConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisStreamConfigObservation) DeepCopyInto(out *KinesisStreamConfigObservation) { + *out = *in + if in.StreamArn != nil { + in, out := &in.StreamArn, &out.StreamArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamConfigObservation. +func (in *KinesisStreamConfigObservation) DeepCopy() *KinesisStreamConfigObservation { + if in == nil { + return nil + } + out := new(KinesisStreamConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisStreamConfigParameters) DeepCopyInto(out *KinesisStreamConfigParameters) { + *out = *in + if in.StreamArn != nil { + in, out := &in.StreamArn, &out.StreamArn + *out = new(string) + **out = **in + } + if in.StreamArnRef != nil { + in, out := &in.StreamArnRef, &out.StreamArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamArnSelector != nil { + in, out := &in.StreamArnSelector, &out.StreamArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamConfigParameters. +func (in *KinesisStreamConfigParameters) DeepCopy() *KinesisStreamConfigParameters { + if in == nil { + return nil + } + out := new(KinesisStreamConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisVideoStreamConfigInitParameters) DeepCopyInto(out *KinesisVideoStreamConfigInitParameters) { + *out = *in + if in.EncryptionConfig != nil { + in, out := &in.EncryptionConfig, &out.EncryptionConfig + *out = new(EncryptionConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RetentionPeriodHours != nil { + in, out := &in.RetentionPeriodHours, &out.RetentionPeriodHours *out = new(float64) **out = **in } - if in.Priority != nil { - in, out := &in.Priority, &out.Priority +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisVideoStreamConfigInitParameters. +func (in *KinesisVideoStreamConfigInitParameters) DeepCopy() *KinesisVideoStreamConfigInitParameters { + if in == nil { + return nil + } + out := new(KinesisVideoStreamConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisVideoStreamConfigObservation) DeepCopyInto(out *KinesisVideoStreamConfigObservation) { + *out = *in + if in.EncryptionConfig != nil { + in, out := &in.EncryptionConfig, &out.EncryptionConfig + *out = new(EncryptionConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RetentionPeriodHours != nil { + in, out := &in.RetentionPeriodHours, &out.RetentionPeriodHours *out = new(float64) **out = **in } - if in.QueueID != nil { - in, out := &in.QueueID, &out.QueueID +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisVideoStreamConfigObservation. +func (in *KinesisVideoStreamConfigObservation) DeepCopy() *KinesisVideoStreamConfigObservation { + if in == nil { + return nil + } + out := new(KinesisVideoStreamConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisVideoStreamConfigParameters) DeepCopyInto(out *KinesisVideoStreamConfigParameters) { + *out = *in + if in.EncryptionConfig != nil { + in, out := &in.EncryptionConfig, &out.EncryptionConfig + *out = new(EncryptionConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix *out = new(string) **out = **in } + if in.RetentionPeriodHours != nil { + in, out := &in.RetentionPeriodHours, &out.RetentionPeriodHours + *out = new(float64) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueConfigsParameters. -func (in *QueueConfigsParameters) DeepCopy() *QueueConfigsParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisVideoStreamConfigParameters. +func (in *KinesisVideoStreamConfigParameters) DeepCopy() *KinesisVideoStreamConfigParameters { if in == nil { return nil } - out := new(QueueConfigsParameters) + out := new(KinesisVideoStreamConfigParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QueueInitParameters) DeepCopyInto(out *QueueInitParameters) { +func (in *LevelFiveInitParameters) DeepCopyInto(out *LevelFiveInitParameters) { *out = *in - if in.Description != nil { - in, out := &in.Description, &out.Description + if in.Name != nil { + in, out := &in.Name, &out.Name *out = new(string) **out = **in } - if in.HoursOfOperationID != nil { - in, out := &in.HoursOfOperationID, &out.HoursOfOperationID +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LevelFiveInitParameters. +func (in *LevelFiveInitParameters) DeepCopy() *LevelFiveInitParameters { + if in == nil { + return nil + } + out := new(LevelFiveInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LevelFiveObservation) DeepCopyInto(out *LevelFiveObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn *out = new(string) **out = **in } - if in.HoursOfOperationIDRef != nil { - in, out := &in.HoursOfOperationIDRef, &out.HoursOfOperationIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LevelFiveObservation. +func (in *LevelFiveObservation) DeepCopy() *LevelFiveObservation { + if in == nil { + return nil + } + out := new(LevelFiveObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LevelFiveParameters) DeepCopyInto(out *LevelFiveParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LevelFiveParameters. +func (in *LevelFiveParameters) DeepCopy() *LevelFiveParameters { + if in == nil { + return nil + } + out := new(LevelFiveParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LevelFourInitParameters) DeepCopyInto(out *LevelFourInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LevelFourInitParameters. +func (in *LevelFourInitParameters) DeepCopy() *LevelFourInitParameters { + if in == nil { + return nil + } + out := new(LevelFourInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LevelFourObservation) DeepCopyInto(out *LevelFourObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LevelFourObservation. +func (in *LevelFourObservation) DeepCopy() *LevelFourObservation { + if in == nil { + return nil + } + out := new(LevelFourObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LevelFourParameters) DeepCopyInto(out *LevelFourParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LevelFourParameters. +func (in *LevelFourParameters) DeepCopy() *LevelFourParameters { + if in == nil { + return nil + } + out := new(LevelFourParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LevelOneInitParameters) DeepCopyInto(out *LevelOneInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LevelOneInitParameters. +func (in *LevelOneInitParameters) DeepCopy() *LevelOneInitParameters { + if in == nil { + return nil + } + out := new(LevelOneInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LevelOneObservation) DeepCopyInto(out *LevelOneObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LevelOneObservation. +func (in *LevelOneObservation) DeepCopy() *LevelOneObservation { + if in == nil { + return nil + } + out := new(LevelOneObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LevelOneParameters) DeepCopyInto(out *LevelOneParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LevelOneParameters. +func (in *LevelOneParameters) DeepCopy() *LevelOneParameters { + if in == nil { + return nil + } + out := new(LevelOneParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LevelThreeInitParameters) DeepCopyInto(out *LevelThreeInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LevelThreeInitParameters. +func (in *LevelThreeInitParameters) DeepCopy() *LevelThreeInitParameters { + if in == nil { + return nil + } + out := new(LevelThreeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LevelThreeObservation) DeepCopyInto(out *LevelThreeObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LevelThreeObservation. +func (in *LevelThreeObservation) DeepCopy() *LevelThreeObservation { + if in == nil { + return nil + } + out := new(LevelThreeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LevelThreeParameters) DeepCopyInto(out *LevelThreeParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LevelThreeParameters. +func (in *LevelThreeParameters) DeepCopy() *LevelThreeParameters { + if in == nil { + return nil + } + out := new(LevelThreeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LevelTwoInitParameters) DeepCopyInto(out *LevelTwoInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LevelTwoInitParameters. +func (in *LevelTwoInitParameters) DeepCopy() *LevelTwoInitParameters { + if in == nil { + return nil + } + out := new(LevelTwoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LevelTwoObservation) DeepCopyInto(out *LevelTwoObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LevelTwoObservation. +func (in *LevelTwoObservation) DeepCopy() *LevelTwoObservation { + if in == nil { + return nil + } + out := new(LevelTwoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LevelTwoParameters) DeepCopyInto(out *LevelTwoParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LevelTwoParameters. +func (in *LevelTwoParameters) DeepCopy() *LevelTwoParameters { + if in == nil { + return nil + } + out := new(LevelTwoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LexBotInitParameters) DeepCopyInto(out *LexBotInitParameters) { + *out = *in + if in.LexRegion != nil { + in, out := &in.LexRegion, &out.LexRegion + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LexBotInitParameters. +func (in *LexBotInitParameters) DeepCopy() *LexBotInitParameters { + if in == nil { + return nil + } + out := new(LexBotInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LexBotObservation) DeepCopyInto(out *LexBotObservation) { + *out = *in + if in.LexRegion != nil { + in, out := &in.LexRegion, &out.LexRegion + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LexBotObservation. +func (in *LexBotObservation) DeepCopy() *LexBotObservation { + if in == nil { + return nil + } + out := new(LexBotObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LexBotParameters) DeepCopyInto(out *LexBotParameters) { + *out = *in + if in.LexRegion != nil { + in, out := &in.LexRegion, &out.LexRegion + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LexBotParameters. +func (in *LexBotParameters) DeepCopy() *LexBotParameters { + if in == nil { + return nil + } + out := new(LexBotParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaConcurrenciesInitParameters) DeepCopyInto(out *MediaConcurrenciesInitParameters) { + *out = *in + if in.Channel != nil { + in, out := &in.Channel, &out.Channel + *out = new(string) + **out = **in + } + if in.Concurrency != nil { + in, out := &in.Concurrency, &out.Concurrency + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaConcurrenciesInitParameters. +func (in *MediaConcurrenciesInitParameters) DeepCopy() *MediaConcurrenciesInitParameters { + if in == nil { + return nil + } + out := new(MediaConcurrenciesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaConcurrenciesObservation) DeepCopyInto(out *MediaConcurrenciesObservation) { + *out = *in + if in.Channel != nil { + in, out := &in.Channel, &out.Channel + *out = new(string) + **out = **in + } + if in.Concurrency != nil { + in, out := &in.Concurrency, &out.Concurrency + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaConcurrenciesObservation. +func (in *MediaConcurrenciesObservation) DeepCopy() *MediaConcurrenciesObservation { + if in == nil { + return nil + } + out := new(MediaConcurrenciesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaConcurrenciesParameters) DeepCopyInto(out *MediaConcurrenciesParameters) { + *out = *in + if in.Channel != nil { + in, out := &in.Channel, &out.Channel + *out = new(string) + **out = **in + } + if in.Concurrency != nil { + in, out := &in.Concurrency, &out.Concurrency + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaConcurrenciesParameters. +func (in *MediaConcurrenciesParameters) DeepCopy() *MediaConcurrenciesParameters { + if in == nil { + return nil + } + out := new(MediaConcurrenciesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutboundCallerConfigInitParameters) DeepCopyInto(out *OutboundCallerConfigInitParameters) { + *out = *in + if in.OutboundCallerIDName != nil { + in, out := &in.OutboundCallerIDName, &out.OutboundCallerIDName + *out = new(string) + **out = **in + } + if in.OutboundCallerIDNumberID != nil { + in, out := &in.OutboundCallerIDNumberID, &out.OutboundCallerIDNumberID + *out = new(string) + **out = **in + } + if in.OutboundFlowID != nil { + in, out := &in.OutboundFlowID, &out.OutboundFlowID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutboundCallerConfigInitParameters. +func (in *OutboundCallerConfigInitParameters) DeepCopy() *OutboundCallerConfigInitParameters { + if in == nil { + return nil + } + out := new(OutboundCallerConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutboundCallerConfigObservation) DeepCopyInto(out *OutboundCallerConfigObservation) { + *out = *in + if in.OutboundCallerIDName != nil { + in, out := &in.OutboundCallerIDName, &out.OutboundCallerIDName + *out = new(string) + **out = **in + } + if in.OutboundCallerIDNumberID != nil { + in, out := &in.OutboundCallerIDNumberID, &out.OutboundCallerIDNumberID + *out = new(string) + **out = **in + } + if in.OutboundFlowID != nil { + in, out := &in.OutboundFlowID, &out.OutboundFlowID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutboundCallerConfigObservation. +func (in *OutboundCallerConfigObservation) DeepCopy() *OutboundCallerConfigObservation { + if in == nil { + return nil + } + out := new(OutboundCallerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutboundCallerConfigParameters) DeepCopyInto(out *OutboundCallerConfigParameters) { + *out = *in + if in.OutboundCallerIDName != nil { + in, out := &in.OutboundCallerIDName, &out.OutboundCallerIDName + *out = new(string) + **out = **in + } + if in.OutboundCallerIDNumberID != nil { + in, out := &in.OutboundCallerIDNumberID, &out.OutboundCallerIDNumberID + *out = new(string) + **out = **in + } + if in.OutboundFlowID != nil { + in, out := &in.OutboundFlowID, &out.OutboundFlowID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutboundCallerConfigParameters. +func (in *OutboundCallerConfigParameters) DeepCopy() *OutboundCallerConfigParameters { + if in == nil { + return nil + } + out := new(OutboundCallerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PhoneConfigInitParameters) DeepCopyInto(out *PhoneConfigInitParameters) { + *out = *in + if in.PhoneNumber != nil { + in, out := &in.PhoneNumber, &out.PhoneNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhoneConfigInitParameters. +func (in *PhoneConfigInitParameters) DeepCopy() *PhoneConfigInitParameters { + if in == nil { + return nil + } + out := new(PhoneConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PhoneConfigObservation) DeepCopyInto(out *PhoneConfigObservation) { + *out = *in + if in.PhoneNumber != nil { + in, out := &in.PhoneNumber, &out.PhoneNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhoneConfigObservation. +func (in *PhoneConfigObservation) DeepCopy() *PhoneConfigObservation { + if in == nil { + return nil + } + out := new(PhoneConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PhoneConfigParameters) DeepCopyInto(out *PhoneConfigParameters) { + *out = *in + if in.PhoneNumber != nil { + in, out := &in.PhoneNumber, &out.PhoneNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhoneConfigParameters. +func (in *PhoneConfigParameters) DeepCopy() *PhoneConfigParameters { + if in == nil { + return nil + } + out := new(PhoneConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Queue) DeepCopyInto(out *Queue) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Queue. +func (in *Queue) DeepCopy() *Queue { + if in == nil { + return nil + } + out := new(Queue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Queue) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueConfigInitParameters) DeepCopyInto(out *QueueConfigInitParameters) { + *out = *in + if in.ContactFlowID != nil { + in, out := &in.ContactFlowID, &out.ContactFlowID + *out = new(string) + **out = **in + } + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueConfigInitParameters. +func (in *QueueConfigInitParameters) DeepCopy() *QueueConfigInitParameters { + if in == nil { + return nil + } + out := new(QueueConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueConfigObservation) DeepCopyInto(out *QueueConfigObservation) { + *out = *in + if in.ContactFlowID != nil { + in, out := &in.ContactFlowID, &out.ContactFlowID + *out = new(string) + **out = **in + } + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueConfigObservation. +func (in *QueueConfigObservation) DeepCopy() *QueueConfigObservation { + if in == nil { + return nil + } + out := new(QueueConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueConfigParameters) DeepCopyInto(out *QueueConfigParameters) { + *out = *in + if in.ContactFlowID != nil { + in, out := &in.ContactFlowID, &out.ContactFlowID + *out = new(string) + **out = **in + } + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueConfigParameters. +func (in *QueueConfigParameters) DeepCopy() *QueueConfigParameters { + if in == nil { + return nil + } + out := new(QueueConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueConfigsInitParameters) DeepCopyInto(out *QueueConfigsInitParameters) { + *out = *in + if in.Channel != nil { + in, out := &in.Channel, &out.Channel + *out = new(string) + **out = **in + } + if in.Delay != nil { + in, out := &in.Delay, &out.Delay + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueConfigsInitParameters. +func (in *QueueConfigsInitParameters) DeepCopy() *QueueConfigsInitParameters { + if in == nil { + return nil + } + out := new(QueueConfigsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueConfigsObservation) DeepCopyInto(out *QueueConfigsObservation) { + *out = *in + if in.Channel != nil { + in, out := &in.Channel, &out.Channel + *out = new(string) + **out = **in + } + if in.Delay != nil { + in, out := &in.Delay, &out.Delay + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.QueueArn != nil { + in, out := &in.QueueArn, &out.QueueArn + *out = new(string) + **out = **in + } + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } + if in.QueueName != nil { + in, out := &in.QueueName, &out.QueueName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueConfigsObservation. +func (in *QueueConfigsObservation) DeepCopy() *QueueConfigsObservation { + if in == nil { + return nil + } + out := new(QueueConfigsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueConfigsParameters) DeepCopyInto(out *QueueConfigsParameters) { + *out = *in + if in.Channel != nil { + in, out := &in.Channel, &out.Channel + *out = new(string) + **out = **in + } + if in.Delay != nil { + in, out := &in.Delay, &out.Delay + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueConfigsParameters. +func (in *QueueConfigsParameters) DeepCopy() *QueueConfigsParameters { + if in == nil { + return nil + } + out := new(QueueConfigsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueInitParameters) DeepCopyInto(out *QueueInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.HoursOfOperationID != nil { + in, out := &in.HoursOfOperationID, &out.HoursOfOperationID + *out = new(string) + **out = **in + } + if in.HoursOfOperationIDRef != nil { + in, out := &in.HoursOfOperationIDRef, &out.HoursOfOperationIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.HoursOfOperationIDSelector != nil { + in, out := &in.HoursOfOperationIDSelector, &out.HoursOfOperationIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceIDRef != nil { + in, out := &in.InstanceIDRef, &out.InstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceIDSelector != nil { + in, out := &in.InstanceIDSelector, &out.InstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MaxContacts != nil { + in, out := &in.MaxContacts, &out.MaxContacts + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutboundCallerConfig != nil { + in, out := &in.OutboundCallerConfig, &out.OutboundCallerConfig + *out = make([]OutboundCallerConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QuickConnectIds != nil { + in, out := &in.QuickConnectIds, &out.QuickConnectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueInitParameters. +func (in *QueueInitParameters) DeepCopy() *QueueInitParameters { + if in == nil { + return nil + } + out := new(QueueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueList) DeepCopyInto(out *QueueList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Queue, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueList. +func (in *QueueList) DeepCopy() *QueueList { + if in == nil { + return nil + } + out := new(QueueList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *QueueList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueObservation) DeepCopyInto(out *QueueObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.HoursOfOperationID != nil { + in, out := &in.HoursOfOperationID, &out.HoursOfOperationID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.MaxContacts != nil { + in, out := &in.MaxContacts, &out.MaxContacts + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutboundCallerConfig != nil { + in, out := &in.OutboundCallerConfig, &out.OutboundCallerConfig + *out = make([]OutboundCallerConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } + if in.QuickConnectIds != nil { + in, out := &in.QuickConnectIds, &out.QuickConnectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueObservation. +func (in *QueueObservation) DeepCopy() *QueueObservation { + if in == nil { + return nil + } + out := new(QueueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueParameters) DeepCopyInto(out *QueueParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.HoursOfOperationID != nil { + in, out := &in.HoursOfOperationID, &out.HoursOfOperationID + *out = new(string) + **out = **in + } + if in.HoursOfOperationIDRef != nil { + in, out := &in.HoursOfOperationIDRef, &out.HoursOfOperationIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.HoursOfOperationIDSelector != nil { + in, out := &in.HoursOfOperationIDSelector, &out.HoursOfOperationIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceIDRef != nil { + in, out := &in.InstanceIDRef, &out.InstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceIDSelector != nil { + in, out := &in.InstanceIDSelector, &out.InstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MaxContacts != nil { + in, out := &in.MaxContacts, &out.MaxContacts + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutboundCallerConfig != nil { + in, out := &in.OutboundCallerConfig, &out.OutboundCallerConfig + *out = make([]OutboundCallerConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QuickConnectIds != nil { + in, out := &in.QuickConnectIds, &out.QuickConnectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueParameters. +func (in *QueueParameters) DeepCopy() *QueueParameters { + if in == nil { + return nil + } + out := new(QueueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueSpec) DeepCopyInto(out *QueueSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueSpec. +func (in *QueueSpec) DeepCopy() *QueueSpec { + if in == nil { + return nil + } + out := new(QueueSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueStatus) DeepCopyInto(out *QueueStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueStatus. +func (in *QueueStatus) DeepCopy() *QueueStatus { + if in == nil { + return nil + } + out := new(QueueStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuickConnect) DeepCopyInto(out *QuickConnect) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickConnect. +func (in *QuickConnect) DeepCopy() *QuickConnect { + if in == nil { + return nil + } + out := new(QuickConnect) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *QuickConnect) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuickConnectConfigInitParameters) DeepCopyInto(out *QuickConnectConfigInitParameters) { + *out = *in + if in.PhoneConfig != nil { + in, out := &in.PhoneConfig, &out.PhoneConfig + *out = make([]PhoneConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueueConfig != nil { + in, out := &in.QueueConfig, &out.QueueConfig + *out = make([]QueueConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QuickConnectType != nil { + in, out := &in.QuickConnectType, &out.QuickConnectType + *out = new(string) + **out = **in + } + if in.UserConfig != nil { + in, out := &in.UserConfig, &out.UserConfig + *out = make([]UserConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickConnectConfigInitParameters. +func (in *QuickConnectConfigInitParameters) DeepCopy() *QuickConnectConfigInitParameters { + if in == nil { + return nil + } + out := new(QuickConnectConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuickConnectConfigObservation) DeepCopyInto(out *QuickConnectConfigObservation) { + *out = *in + if in.PhoneConfig != nil { + in, out := &in.PhoneConfig, &out.PhoneConfig + *out = make([]PhoneConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueueConfig != nil { + in, out := &in.QueueConfig, &out.QueueConfig + *out = make([]QueueConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QuickConnectType != nil { + in, out := &in.QuickConnectType, &out.QuickConnectType + *out = new(string) + **out = **in + } + if in.UserConfig != nil { + in, out := &in.UserConfig, &out.UserConfig + *out = make([]UserConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickConnectConfigObservation. +func (in *QuickConnectConfigObservation) DeepCopy() *QuickConnectConfigObservation { + if in == nil { + return nil + } + out := new(QuickConnectConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuickConnectConfigParameters) DeepCopyInto(out *QuickConnectConfigParameters) { + *out = *in + if in.PhoneConfig != nil { + in, out := &in.PhoneConfig, &out.PhoneConfig + *out = make([]PhoneConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueueConfig != nil { + in, out := &in.QueueConfig, &out.QueueConfig + *out = make([]QueueConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QuickConnectType != nil { + in, out := &in.QuickConnectType, &out.QuickConnectType + *out = new(string) + **out = **in + } + if in.UserConfig != nil { + in, out := &in.UserConfig, &out.UserConfig + *out = make([]UserConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickConnectConfigParameters. +func (in *QuickConnectConfigParameters) DeepCopy() *QuickConnectConfigParameters { + if in == nil { + return nil + } + out := new(QuickConnectConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuickConnectInitParameters) DeepCopyInto(out *QuickConnectInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceIDRef != nil { + in, out := &in.InstanceIDRef, &out.InstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceIDSelector != nil { + in, out := &in.InstanceIDSelector, &out.InstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.QuickConnectConfig != nil { + in, out := &in.QuickConnectConfig, &out.QuickConnectConfig + *out = new(QuickConnectConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickConnectInitParameters. +func (in *QuickConnectInitParameters) DeepCopy() *QuickConnectInitParameters { + if in == nil { + return nil + } + out := new(QuickConnectInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuickConnectList) DeepCopyInto(out *QuickConnectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]QuickConnect, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickConnectList. +func (in *QuickConnectList) DeepCopy() *QuickConnectList { + if in == nil { + return nil + } + out := new(QuickConnectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *QuickConnectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuickConnectObservation) DeepCopyInto(out *QuickConnectObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.QuickConnectConfig != nil { + in, out := &in.QuickConnectConfig, &out.QuickConnectConfig + *out = new(QuickConnectConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.QuickConnectID != nil { + in, out := &in.QuickConnectID, &out.QuickConnectID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickConnectObservation. +func (in *QuickConnectObservation) DeepCopy() *QuickConnectObservation { + if in == nil { + return nil + } + out := new(QuickConnectObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuickConnectParameters) DeepCopyInto(out *QuickConnectParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceIDRef != nil { + in, out := &in.InstanceIDRef, &out.InstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceIDSelector != nil { + in, out := &in.InstanceIDSelector, &out.InstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.QuickConnectConfig != nil { + in, out := &in.QuickConnectConfig, &out.QuickConnectConfig + *out = new(QuickConnectConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickConnectParameters. +func (in *QuickConnectParameters) DeepCopy() *QuickConnectParameters { + if in == nil { + return nil + } + out := new(QuickConnectParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuickConnectSpec) DeepCopyInto(out *QuickConnectSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickConnectSpec. +func (in *QuickConnectSpec) DeepCopy() *QuickConnectSpec { + if in == nil { + return nil + } + out := new(QuickConnectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuickConnectStatus) DeepCopyInto(out *QuickConnectStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickConnectStatus. +func (in *QuickConnectStatus) DeepCopy() *QuickConnectStatus { + if in == nil { + return nil + } + out := new(QuickConnectStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingProfile) DeepCopyInto(out *RoutingProfile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingProfile. +func (in *RoutingProfile) DeepCopy() *RoutingProfile { + if in == nil { + return nil + } + out := new(RoutingProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoutingProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingProfileInitParameters) DeepCopyInto(out *RoutingProfileInitParameters) { + *out = *in + if in.DefaultOutboundQueueID != nil { + in, out := &in.DefaultOutboundQueueID, &out.DefaultOutboundQueueID + *out = new(string) + **out = **in + } + if in.DefaultOutboundQueueIDRef != nil { + in, out := &in.DefaultOutboundQueueIDRef, &out.DefaultOutboundQueueIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DefaultOutboundQueueIDSelector != nil { + in, out := &in.DefaultOutboundQueueIDSelector, &out.DefaultOutboundQueueIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceIDRef != nil { + in, out := &in.InstanceIDRef, &out.InstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceIDSelector != nil { + in, out := &in.InstanceIDSelector, &out.InstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MediaConcurrencies != nil { + in, out := &in.MediaConcurrencies, &out.MediaConcurrencies + *out = make([]MediaConcurrenciesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.QueueConfigs != nil { + in, out := &in.QueueConfigs, &out.QueueConfigs + *out = make([]QueueConfigsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingProfileInitParameters. +func (in *RoutingProfileInitParameters) DeepCopy() *RoutingProfileInitParameters { + if in == nil { + return nil + } + out := new(RoutingProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingProfileList) DeepCopyInto(out *RoutingProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoutingProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingProfileList. +func (in *RoutingProfileList) DeepCopy() *RoutingProfileList { + if in == nil { + return nil + } + out := new(RoutingProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoutingProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingProfileObservation) DeepCopyInto(out *RoutingProfileObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DefaultOutboundQueueID != nil { + in, out := &in.DefaultOutboundQueueID, &out.DefaultOutboundQueueID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.MediaConcurrencies != nil { + in, out := &in.MediaConcurrencies, &out.MediaConcurrencies + *out = make([]MediaConcurrenciesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.QueueConfigs != nil { + in, out := &in.QueueConfigs, &out.QueueConfigs + *out = make([]QueueConfigsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoutingProfileID != nil { + in, out := &in.RoutingProfileID, &out.RoutingProfileID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingProfileObservation. +func (in *RoutingProfileObservation) DeepCopy() *RoutingProfileObservation { + if in == nil { + return nil + } + out := new(RoutingProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingProfileParameters) DeepCopyInto(out *RoutingProfileParameters) { + *out = *in + if in.DefaultOutboundQueueID != nil { + in, out := &in.DefaultOutboundQueueID, &out.DefaultOutboundQueueID + *out = new(string) + **out = **in + } + if in.DefaultOutboundQueueIDRef != nil { + in, out := &in.DefaultOutboundQueueIDRef, &out.DefaultOutboundQueueIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DefaultOutboundQueueIDSelector != nil { + in, out := &in.DefaultOutboundQueueIDSelector, &out.DefaultOutboundQueueIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceIDRef != nil { + in, out := &in.InstanceIDRef, &out.InstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceIDSelector != nil { + in, out := &in.InstanceIDSelector, &out.InstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MediaConcurrencies != nil { + in, out := &in.MediaConcurrencies, &out.MediaConcurrencies + *out = make([]MediaConcurrenciesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.QueueConfigs != nil { + in, out := &in.QueueConfigs, &out.QueueConfigs + *out = make([]QueueConfigsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingProfileParameters. +func (in *RoutingProfileParameters) DeepCopy() *RoutingProfileParameters { + if in == nil { + return nil + } + out := new(RoutingProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingProfileSpec) DeepCopyInto(out *RoutingProfileSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingProfileSpec. +func (in *RoutingProfileSpec) DeepCopy() *RoutingProfileSpec { + if in == nil { + return nil + } + out := new(RoutingProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingProfileStatus) DeepCopyInto(out *RoutingProfileStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingProfileStatus. +func (in *RoutingProfileStatus) DeepCopy() *RoutingProfileStatus { + if in == nil { + return nil + } + out := new(RoutingProfileStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigEncryptionConfigInitParameters) DeepCopyInto(out *S3ConfigEncryptionConfigInitParameters) { + *out = *in + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.KeyIDRef != nil { + in, out := &in.KeyIDRef, &out.KeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyIDSelector != nil { + in, out := &in.KeyIDSelector, &out.KeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigEncryptionConfigInitParameters. +func (in *S3ConfigEncryptionConfigInitParameters) DeepCopy() *S3ConfigEncryptionConfigInitParameters { + if in == nil { + return nil + } + out := new(S3ConfigEncryptionConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigEncryptionConfigObservation) DeepCopyInto(out *S3ConfigEncryptionConfigObservation) { + *out = *in + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigEncryptionConfigObservation. +func (in *S3ConfigEncryptionConfigObservation) DeepCopy() *S3ConfigEncryptionConfigObservation { + if in == nil { + return nil + } + out := new(S3ConfigEncryptionConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigEncryptionConfigParameters) DeepCopyInto(out *S3ConfigEncryptionConfigParameters) { + *out = *in + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.KeyIDRef != nil { + in, out := &in.KeyIDRef, &out.KeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyIDSelector != nil { + in, out := &in.KeyIDSelector, &out.KeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigEncryptionConfigParameters. +func (in *S3ConfigEncryptionConfigParameters) DeepCopy() *S3ConfigEncryptionConfigParameters { + if in == nil { + return nil + } + out := new(S3ConfigEncryptionConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigInitParameters) DeepCopyInto(out *S3ConfigInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketNameRef != nil { + in, out := &in.BucketNameRef, &out.BucketNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketNameSelector != nil { + in, out := &in.BucketNameSelector, &out.BucketNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.EncryptionConfig != nil { + in, out := &in.EncryptionConfig, &out.EncryptionConfig + *out = new(S3ConfigEncryptionConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigInitParameters. +func (in *S3ConfigInitParameters) DeepCopy() *S3ConfigInitParameters { + if in == nil { + return nil + } + out := new(S3ConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigObservation) DeepCopyInto(out *S3ConfigObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.EncryptionConfig != nil { + in, out := &in.EncryptionConfig, &out.EncryptionConfig + *out = new(S3ConfigEncryptionConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigObservation. +func (in *S3ConfigObservation) DeepCopy() *S3ConfigObservation { + if in == nil { + return nil + } + out := new(S3ConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigParameters) DeepCopyInto(out *S3ConfigParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketNameRef != nil { + in, out := &in.BucketNameRef, &out.BucketNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketNameSelector != nil { + in, out := &in.BucketNameSelector, &out.BucketNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.EncryptionConfig != nil { + in, out := &in.EncryptionConfig, &out.EncryptionConfig + *out = new(S3ConfigEncryptionConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigParameters. +func (in *S3ConfigParameters) DeepCopy() *S3ConfigParameters { + if in == nil { + return nil + } + out := new(S3ConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartTimeInitParameters) DeepCopyInto(out *StartTimeInitParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartTimeInitParameters. +func (in *StartTimeInitParameters) DeepCopy() *StartTimeInitParameters { + if in == nil { + return nil + } + out := new(StartTimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartTimeObservation) DeepCopyInto(out *StartTimeObservation) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartTimeObservation. +func (in *StartTimeObservation) DeepCopy() *StartTimeObservation { + if in == nil { + return nil + } + out := new(StartTimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartTimeParameters) DeepCopyInto(out *StartTimeParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartTimeParameters. +func (in *StartTimeParameters) DeepCopy() *StartTimeParameters { + if in == nil { + return nil + } + out := new(StartTimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageConfigInitParameters) DeepCopyInto(out *StorageConfigInitParameters) { + *out = *in + if in.KinesisFirehoseConfig != nil { + in, out := &in.KinesisFirehoseConfig, &out.KinesisFirehoseConfig + *out = new(KinesisFirehoseConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisStreamConfig != nil { + in, out := &in.KinesisStreamConfig, &out.KinesisStreamConfig + *out = new(KinesisStreamConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisVideoStreamConfig != nil { + in, out := &in.KinesisVideoStreamConfig, &out.KinesisVideoStreamConfig + *out = new(KinesisVideoStreamConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3Config != nil { + in, out := &in.S3Config, &out.S3Config + *out = new(S3ConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageConfigInitParameters. +func (in *StorageConfigInitParameters) DeepCopy() *StorageConfigInitParameters { + if in == nil { + return nil + } + out := new(StorageConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageConfigObservation) DeepCopyInto(out *StorageConfigObservation) { + *out = *in + if in.KinesisFirehoseConfig != nil { + in, out := &in.KinesisFirehoseConfig, &out.KinesisFirehoseConfig + *out = new(KinesisFirehoseConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.KinesisStreamConfig != nil { + in, out := &in.KinesisStreamConfig, &out.KinesisStreamConfig + *out = new(KinesisStreamConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.KinesisVideoStreamConfig != nil { + in, out := &in.KinesisVideoStreamConfig, &out.KinesisVideoStreamConfig + *out = new(KinesisVideoStreamConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.S3Config != nil { + in, out := &in.S3Config, &out.S3Config + *out = new(S3ConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageConfigObservation. +func (in *StorageConfigObservation) DeepCopy() *StorageConfigObservation { + if in == nil { + return nil + } + out := new(StorageConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageConfigParameters) DeepCopyInto(out *StorageConfigParameters) { + *out = *in + if in.KinesisFirehoseConfig != nil { + in, out := &in.KinesisFirehoseConfig, &out.KinesisFirehoseConfig + *out = new(KinesisFirehoseConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisStreamConfig != nil { + in, out := &in.KinesisStreamConfig, &out.KinesisStreamConfig + *out = new(KinesisStreamConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisVideoStreamConfig != nil { + in, out := &in.KinesisVideoStreamConfig, &out.KinesisVideoStreamConfig + *out = new(KinesisVideoStreamConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.S3Config != nil { + in, out := &in.S3Config, &out.S3Config + *out = new(S3ConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageConfigParameters. +func (in *StorageConfigParameters) DeepCopy() *StorageConfigParameters { + if in == nil { + return nil + } + out := new(StorageConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *User) DeepCopyInto(out *User) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User. +func (in *User) DeepCopy() *User { + if in == nil { + return nil + } + out := new(User) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *User) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserConfigInitParameters) DeepCopyInto(out *UserConfigInitParameters) { + *out = *in + if in.ContactFlowID != nil { + in, out := &in.ContactFlowID, &out.ContactFlowID + *out = new(string) + **out = **in + } + if in.UserID != nil { + in, out := &in.UserID, &out.UserID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserConfigInitParameters. +func (in *UserConfigInitParameters) DeepCopy() *UserConfigInitParameters { + if in == nil { + return nil + } + out := new(UserConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserConfigObservation) DeepCopyInto(out *UserConfigObservation) { + *out = *in + if in.ContactFlowID != nil { + in, out := &in.ContactFlowID, &out.ContactFlowID + *out = new(string) + **out = **in + } + if in.UserID != nil { + in, out := &in.UserID, &out.UserID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserConfigObservation. +func (in *UserConfigObservation) DeepCopy() *UserConfigObservation { + if in == nil { + return nil + } + out := new(UserConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserConfigParameters) DeepCopyInto(out *UserConfigParameters) { + *out = *in + if in.ContactFlowID != nil { + in, out := &in.ContactFlowID, &out.ContactFlowID + *out = new(string) + **out = **in + } + if in.UserID != nil { + in, out := &in.UserID, &out.UserID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserConfigParameters. +func (in *UserConfigParameters) DeepCopy() *UserConfigParameters { + if in == nil { + return nil + } + out := new(UserConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserHierarchyStructure) DeepCopyInto(out *UserHierarchyStructure) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserHierarchyStructure. +func (in *UserHierarchyStructure) DeepCopy() *UserHierarchyStructure { + if in == nil { + return nil } - if in.HoursOfOperationIDSelector != nil { - in, out := &in.HoursOfOperationIDSelector, &out.HoursOfOperationIDSelector - *out = new(v1.Selector) + out := new(UserHierarchyStructure) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserHierarchyStructure) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserHierarchyStructureInitParameters) DeepCopyInto(out *UserHierarchyStructureInitParameters) { + *out = *in + if in.HierarchyStructure != nil { + in, out := &in.HierarchyStructure, &out.HierarchyStructure + *out = new(HierarchyStructureInitParameters) (*in).DeepCopyInto(*out) } if in.InstanceID != nil { @@ -859,93 +4082,44 @@ func (in *QueueInitParameters) DeepCopyInto(out *QueueInitParameters) { *out = new(v1.Selector) (*in).DeepCopyInto(*out) } - if in.MaxContacts != nil { - in, out := &in.MaxContacts, &out.MaxContacts - *out = new(float64) - **out = **in - } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) - **out = **in - } - if in.OutboundCallerConfig != nil { - in, out := &in.OutboundCallerConfig, &out.OutboundCallerConfig - *out = make([]OutboundCallerConfigInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.QuickConnectIds != nil { - in, out := &in.QuickConnectIds, &out.QuickConnectIds - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } - } - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(string) - **out = **in - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make(map[string]*string, len(*in)) - for key, val := range *in { - var outVal *string - if val == nil { - (*out)[key] = nil - } else { - inVal := (*in)[key] - in, out := &inVal, &outVal - *out = new(string) - **out = **in - } - (*out)[key] = outVal - } - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueInitParameters. -func (in *QueueInitParameters) DeepCopy() *QueueInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserHierarchyStructureInitParameters. +func (in *UserHierarchyStructureInitParameters) DeepCopy() *UserHierarchyStructureInitParameters { if in == nil { return nil } - out := new(QueueInitParameters) + out := new(UserHierarchyStructureInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QueueList) DeepCopyInto(out *QueueList) { +func (in *UserHierarchyStructureList) DeepCopyInto(out *UserHierarchyStructureList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]Queue, len(*in)) + *out = make([]UserHierarchyStructure, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueList. -func (in *QueueList) DeepCopy() *QueueList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserHierarchyStructureList. +func (in *UserHierarchyStructureList) DeepCopy() *UserHierarchyStructureList { if in == nil { return nil } - out := new(QueueList) + out := new(UserHierarchyStructureList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *QueueList) DeepCopyObject() runtime.Object { +func (in *UserHierarchyStructureList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -953,22 +4127,12 @@ func (in *QueueList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QueueObservation) DeepCopyInto(out *QueueObservation) { +func (in *UserHierarchyStructureObservation) DeepCopyInto(out *UserHierarchyStructureObservation) { *out = *in - if in.Arn != nil { - in, out := &in.Arn, &out.Arn - *out = new(string) - **out = **in - } - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(string) - **out = **in - } - if in.HoursOfOperationID != nil { - in, out := &in.HoursOfOperationID, &out.HoursOfOperationID - *out = new(string) - **out = **in + if in.HierarchyStructure != nil { + in, out := &in.HierarchyStructure, &out.HierarchyStructure + *out = new(HierarchyStructureObservation) + (*in).DeepCopyInto(*out) } if in.ID != nil { in, out := &in.ID, &out.ID @@ -980,109 +4144,24 @@ func (in *QueueObservation) DeepCopyInto(out *QueueObservation) { *out = new(string) **out = **in } - if in.MaxContacts != nil { - in, out := &in.MaxContacts, &out.MaxContacts - *out = new(float64) - **out = **in - } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) - **out = **in - } - if in.OutboundCallerConfig != nil { - in, out := &in.OutboundCallerConfig, &out.OutboundCallerConfig - *out = make([]OutboundCallerConfigObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.QueueID != nil { - in, out := &in.QueueID, &out.QueueID - *out = new(string) - **out = **in - } - if in.QuickConnectIds != nil { - in, out := &in.QuickConnectIds, &out.QuickConnectIds - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } - } - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(string) - **out = **in - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make(map[string]*string, len(*in)) - for key, val := range *in { - var outVal *string - if val == nil { - (*out)[key] = nil - } else { - inVal := (*in)[key] - in, out := &inVal, &outVal - *out = new(string) - **out = **in - } - (*out)[key] = outVal - } - } - if in.TagsAll != nil { - in, out := &in.TagsAll, &out.TagsAll - *out = make(map[string]*string, len(*in)) - for key, val := range *in { - var outVal *string - if val == nil { - (*out)[key] = nil - } else { - inVal := (*in)[key] - in, out := &inVal, &outVal - *out = new(string) - **out = **in - } - (*out)[key] = outVal - } - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueObservation. -func (in *QueueObservation) DeepCopy() *QueueObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserHierarchyStructureObservation. +func (in *UserHierarchyStructureObservation) DeepCopy() *UserHierarchyStructureObservation { if in == nil { return nil } - out := new(QueueObservation) + out := new(UserHierarchyStructureObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QueueParameters) DeepCopyInto(out *QueueParameters) { +func (in *UserHierarchyStructureParameters) DeepCopyInto(out *UserHierarchyStructureParameters) { *out = *in - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(string) - **out = **in - } - if in.HoursOfOperationID != nil { - in, out := &in.HoursOfOperationID, &out.HoursOfOperationID - *out = new(string) - **out = **in - } - if in.HoursOfOperationIDRef != nil { - in, out := &in.HoursOfOperationIDRef, &out.HoursOfOperationIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) - } - if in.HoursOfOperationIDSelector != nil { - in, out := &in.HoursOfOperationIDSelector, &out.HoursOfOperationIDSelector - *out = new(v1.Selector) + if in.HierarchyStructure != nil { + in, out := &in.HierarchyStructure, &out.HierarchyStructure + *out = new(HierarchyStructureParameters) (*in).DeepCopyInto(*out) } if in.InstanceID != nil { @@ -1100,157 +4179,76 @@ func (in *QueueParameters) DeepCopyInto(out *QueueParameters) { *out = new(v1.Selector) (*in).DeepCopyInto(*out) } - if in.MaxContacts != nil { - in, out := &in.MaxContacts, &out.MaxContacts - *out = new(float64) - **out = **in - } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) - **out = **in - } - if in.OutboundCallerConfig != nil { - in, out := &in.OutboundCallerConfig, &out.OutboundCallerConfig - *out = make([]OutboundCallerConfigParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.QuickConnectIds != nil { - in, out := &in.QuickConnectIds, &out.QuickConnectIds - *out = make([]*string, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } - } - } if in.Region != nil { in, out := &in.Region, &out.Region *out = new(string) **out = **in } - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(string) - **out = **in - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make(map[string]*string, len(*in)) - for key, val := range *in { - var outVal *string - if val == nil { - (*out)[key] = nil - } else { - inVal := (*in)[key] - in, out := &inVal, &outVal - *out = new(string) - **out = **in - } - (*out)[key] = outVal - } - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueParameters. -func (in *QueueParameters) DeepCopy() *QueueParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserHierarchyStructureParameters. +func (in *UserHierarchyStructureParameters) DeepCopy() *UserHierarchyStructureParameters { if in == nil { return nil } - out := new(QueueParameters) + out := new(UserHierarchyStructureParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QueueSpec) DeepCopyInto(out *QueueSpec) { +func (in *UserHierarchyStructureSpec) DeepCopyInto(out *UserHierarchyStructureSpec) { *out = *in in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) - in.InitProvider.DeepCopyInto(&out.InitProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueSpec. -func (in *QueueSpec) DeepCopy() *QueueSpec { - if in == nil { - return nil - } - out := new(QueueSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QueueStatus) DeepCopyInto(out *QueueStatus) { - *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - in.AtProvider.DeepCopyInto(&out.AtProvider) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueStatus. -func (in *QueueStatus) DeepCopy() *QueueStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserHierarchyStructureSpec. +func (in *UserHierarchyStructureSpec) DeepCopy() *UserHierarchyStructureSpec { if in == nil { return nil } - out := new(QueueStatus) + out := new(UserHierarchyStructureSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoutingProfile) DeepCopyInto(out *RoutingProfile) { +func (in *UserHierarchyStructureStatus) DeepCopyInto(out *UserHierarchyStructureStatus) { *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingProfile. -func (in *RoutingProfile) DeepCopy() *RoutingProfile { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserHierarchyStructureStatus. +func (in *UserHierarchyStructureStatus) DeepCopy() *UserHierarchyStructureStatus { if in == nil { return nil } - out := new(RoutingProfile) + out := new(UserHierarchyStructureStatus) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RoutingProfile) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoutingProfileInitParameters) DeepCopyInto(out *RoutingProfileInitParameters) { +func (in *UserInitParameters) DeepCopyInto(out *UserInitParameters) { *out = *in - if in.DefaultOutboundQueueID != nil { - in, out := &in.DefaultOutboundQueueID, &out.DefaultOutboundQueueID + if in.DirectoryUserID != nil { + in, out := &in.DirectoryUserID, &out.DirectoryUserID *out = new(string) **out = **in } - if in.DefaultOutboundQueueIDRef != nil { - in, out := &in.DefaultOutboundQueueIDRef, &out.DefaultOutboundQueueIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) - } - if in.DefaultOutboundQueueIDSelector != nil { - in, out := &in.DefaultOutboundQueueIDSelector, &out.DefaultOutboundQueueIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } - if in.Description != nil { - in, out := &in.Description, &out.Description + if in.HierarchyGroupID != nil { + in, out := &in.HierarchyGroupID, &out.HierarchyGroupID *out = new(string) **out = **in } + if in.IdentityInfo != nil { + in, out := &in.IdentityInfo, &out.IdentityInfo + *out = new(IdentityInfoInitParameters) + (*in).DeepCopyInto(*out) + } if in.InstanceID != nil { in, out := &in.InstanceID, &out.InstanceID *out = new(string) @@ -1266,23 +4264,45 @@ func (in *RoutingProfileInitParameters) DeepCopyInto(out *RoutingProfileInitPara *out = new(v1.Selector) (*in).DeepCopyInto(*out) } - if in.MediaConcurrencies != nil { - in, out := &in.MediaConcurrencies, &out.MediaConcurrencies - *out = make([]MediaConcurrenciesInitParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } if in.Name != nil { in, out := &in.Name, &out.Name *out = new(string) **out = **in } - if in.QueueConfigs != nil { - in, out := &in.QueueConfigs, &out.QueueConfigs - *out = make([]QueueConfigsInitParameters, len(*in)) + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.PhoneConfig != nil { + in, out := &in.PhoneConfig, &out.PhoneConfig + *out = new(UserPhoneConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RoutingProfileID != nil { + in, out := &in.RoutingProfileID, &out.RoutingProfileID + *out = new(string) + **out = **in + } + if in.RoutingProfileIDRef != nil { + in, out := &in.RoutingProfileIDRef, &out.RoutingProfileIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoutingProfileIDSelector != nil { + in, out := &in.RoutingProfileIDSelector, &out.RoutingProfileIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityProfileIds != nil { + in, out := &in.SecurityProfileIds, &out.SecurityProfileIds + *out = make([]*string, len(*in)) for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } } } if in.Tags != nil { @@ -1303,42 +4323,42 @@ func (in *RoutingProfileInitParameters) DeepCopyInto(out *RoutingProfileInitPara } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingProfileInitParameters. -func (in *RoutingProfileInitParameters) DeepCopy() *RoutingProfileInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInitParameters. +func (in *UserInitParameters) DeepCopy() *UserInitParameters { if in == nil { return nil } - out := new(RoutingProfileInitParameters) + out := new(UserInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoutingProfileList) DeepCopyInto(out *RoutingProfileList) { +func (in *UserList) DeepCopyInto(out *UserList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]RoutingProfile, len(*in)) + *out = make([]User, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingProfileList. -func (in *RoutingProfileList) DeepCopy() *RoutingProfileList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserList. +func (in *UserList) DeepCopy() *UserList { if in == nil { return nil } - out := new(RoutingProfileList) + out := new(UserList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RoutingProfileList) DeepCopyObject() runtime.Object { +func (in *UserList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -1346,20 +4366,20 @@ func (in *RoutingProfileList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoutingProfileObservation) DeepCopyInto(out *RoutingProfileObservation) { +func (in *UserObservation) DeepCopyInto(out *UserObservation) { *out = *in if in.Arn != nil { in, out := &in.Arn, &out.Arn *out = new(string) **out = **in } - if in.DefaultOutboundQueueID != nil { - in, out := &in.DefaultOutboundQueueID, &out.DefaultOutboundQueueID + if in.DirectoryUserID != nil { + in, out := &in.DirectoryUserID, &out.DirectoryUserID *out = new(string) **out = **in } - if in.Description != nil { - in, out := &in.Description, &out.Description + if in.HierarchyGroupID != nil { + in, out := &in.HierarchyGroupID, &out.HierarchyGroupID *out = new(string) **out = **in } @@ -1368,35 +4388,42 @@ func (in *RoutingProfileObservation) DeepCopyInto(out *RoutingProfileObservation *out = new(string) **out = **in } + if in.IdentityInfo != nil { + in, out := &in.IdentityInfo, &out.IdentityInfo + *out = new(IdentityInfoObservation) + (*in).DeepCopyInto(*out) + } if in.InstanceID != nil { in, out := &in.InstanceID, &out.InstanceID *out = new(string) **out = **in } - if in.MediaConcurrencies != nil { - in, out := &in.MediaConcurrencies, &out.MediaConcurrencies - *out = make([]MediaConcurrenciesObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } if in.Name != nil { in, out := &in.Name, &out.Name *out = new(string) **out = **in } - if in.QueueConfigs != nil { - in, out := &in.QueueConfigs, &out.QueueConfigs - *out = make([]QueueConfigsObservation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.PhoneConfig != nil { + in, out := &in.PhoneConfig, &out.PhoneConfig + *out = new(UserPhoneConfigObservation) + (*in).DeepCopyInto(*out) } if in.RoutingProfileID != nil { in, out := &in.RoutingProfileID, &out.RoutingProfileID *out = new(string) **out = **in } + if in.SecurityProfileIds != nil { + in, out := &in.SecurityProfileIds, &out.SecurityProfileIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make(map[string]*string, len(*in)) @@ -1429,41 +4456,41 @@ func (in *RoutingProfileObservation) DeepCopyInto(out *RoutingProfileObservation (*out)[key] = outVal } } + if in.UserID != nil { + in, out := &in.UserID, &out.UserID + *out = new(string) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingProfileObservation. -func (in *RoutingProfileObservation) DeepCopy() *RoutingProfileObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserObservation. +func (in *UserObservation) DeepCopy() *UserObservation { if in == nil { return nil } - out := new(RoutingProfileObservation) + out := new(UserObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoutingProfileParameters) DeepCopyInto(out *RoutingProfileParameters) { +func (in *UserParameters) DeepCopyInto(out *UserParameters) { *out = *in - if in.DefaultOutboundQueueID != nil { - in, out := &in.DefaultOutboundQueueID, &out.DefaultOutboundQueueID + if in.DirectoryUserID != nil { + in, out := &in.DirectoryUserID, &out.DirectoryUserID *out = new(string) **out = **in } - if in.DefaultOutboundQueueIDRef != nil { - in, out := &in.DefaultOutboundQueueIDRef, &out.DefaultOutboundQueueIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) - } - if in.DefaultOutboundQueueIDSelector != nil { - in, out := &in.DefaultOutboundQueueIDSelector, &out.DefaultOutboundQueueIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } - if in.Description != nil { - in, out := &in.Description, &out.Description + if in.HierarchyGroupID != nil { + in, out := &in.HierarchyGroupID, &out.HierarchyGroupID *out = new(string) **out = **in } + if in.IdentityInfo != nil { + in, out := &in.IdentityInfo, &out.IdentityInfo + *out = new(IdentityInfoParameters) + (*in).DeepCopyInto(*out) + } if in.InstanceID != nil { in, out := &in.InstanceID, &out.InstanceID *out = new(string) @@ -1479,30 +4506,52 @@ func (in *RoutingProfileParameters) DeepCopyInto(out *RoutingProfileParameters) *out = new(v1.Selector) (*in).DeepCopyInto(*out) } - if in.MediaConcurrencies != nil { - in, out := &in.MediaConcurrencies, &out.MediaConcurrencies - *out = make([]MediaConcurrenciesParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } if in.Name != nil { in, out := &in.Name, &out.Name *out = new(string) **out = **in } - if in.QueueConfigs != nil { - in, out := &in.QueueConfigs, &out.QueueConfigs - *out = make([]QueueConfigsParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.PhoneConfig != nil { + in, out := &in.PhoneConfig, &out.PhoneConfig + *out = new(UserPhoneConfigParameters) + (*in).DeepCopyInto(*out) } if in.Region != nil { in, out := &in.Region, &out.Region *out = new(string) **out = **in } + if in.RoutingProfileID != nil { + in, out := &in.RoutingProfileID, &out.RoutingProfileID + *out = new(string) + **out = **in + } + if in.RoutingProfileIDRef != nil { + in, out := &in.RoutingProfileIDRef, &out.RoutingProfileIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoutingProfileIDSelector != nil { + in, out := &in.RoutingProfileIDSelector, &out.RoutingProfileIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityProfileIds != nil { + in, out := &in.SecurityProfileIds, &out.SecurityProfileIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make(map[string]*string, len(*in)) @@ -1521,122 +4570,152 @@ func (in *RoutingProfileParameters) DeepCopyInto(out *RoutingProfileParameters) } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingProfileParameters. -func (in *RoutingProfileParameters) DeepCopy() *RoutingProfileParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserParameters. +func (in *UserParameters) DeepCopy() *UserParameters { if in == nil { return nil } - out := new(RoutingProfileParameters) + out := new(UserParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoutingProfileSpec) DeepCopyInto(out *RoutingProfileSpec) { +func (in *UserPhoneConfigInitParameters) DeepCopyInto(out *UserPhoneConfigInitParameters) { *out = *in - in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) - in.InitProvider.DeepCopyInto(&out.InitProvider) + if in.AfterContactWorkTimeLimit != nil { + in, out := &in.AfterContactWorkTimeLimit, &out.AfterContactWorkTimeLimit + *out = new(float64) + **out = **in + } + if in.AutoAccept != nil { + in, out := &in.AutoAccept, &out.AutoAccept + *out = new(bool) + **out = **in + } + if in.DeskPhoneNumber != nil { + in, out := &in.DeskPhoneNumber, &out.DeskPhoneNumber + *out = new(string) + **out = **in + } + if in.PhoneType != nil { + in, out := &in.PhoneType, &out.PhoneType + *out = new(string) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingProfileSpec. -func (in *RoutingProfileSpec) DeepCopy() *RoutingProfileSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPhoneConfigInitParameters. +func (in *UserPhoneConfigInitParameters) DeepCopy() *UserPhoneConfigInitParameters { if in == nil { return nil } - out := new(RoutingProfileSpec) + out := new(UserPhoneConfigInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoutingProfileStatus) DeepCopyInto(out *RoutingProfileStatus) { +func (in *UserPhoneConfigObservation) DeepCopyInto(out *UserPhoneConfigObservation) { *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - in.AtProvider.DeepCopyInto(&out.AtProvider) + if in.AfterContactWorkTimeLimit != nil { + in, out := &in.AfterContactWorkTimeLimit, &out.AfterContactWorkTimeLimit + *out = new(float64) + **out = **in + } + if in.AutoAccept != nil { + in, out := &in.AutoAccept, &out.AutoAccept + *out = new(bool) + **out = **in + } + if in.DeskPhoneNumber != nil { + in, out := &in.DeskPhoneNumber, &out.DeskPhoneNumber + *out = new(string) + **out = **in + } + if in.PhoneType != nil { + in, out := &in.PhoneType, &out.PhoneType + *out = new(string) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingProfileStatus. -func (in *RoutingProfileStatus) DeepCopy() *RoutingProfileStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPhoneConfigObservation. +func (in *UserPhoneConfigObservation) DeepCopy() *UserPhoneConfigObservation { if in == nil { return nil } - out := new(RoutingProfileStatus) + out := new(UserPhoneConfigObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StartTimeInitParameters) DeepCopyInto(out *StartTimeInitParameters) { +func (in *UserPhoneConfigParameters) DeepCopyInto(out *UserPhoneConfigParameters) { *out = *in - if in.Hours != nil { - in, out := &in.Hours, &out.Hours + if in.AfterContactWorkTimeLimit != nil { + in, out := &in.AfterContactWorkTimeLimit, &out.AfterContactWorkTimeLimit *out = new(float64) **out = **in } - if in.Minutes != nil { - in, out := &in.Minutes, &out.Minutes - *out = new(float64) + if in.AutoAccept != nil { + in, out := &in.AutoAccept, &out.AutoAccept + *out = new(bool) + **out = **in + } + if in.DeskPhoneNumber != nil { + in, out := &in.DeskPhoneNumber, &out.DeskPhoneNumber + *out = new(string) + **out = **in + } + if in.PhoneType != nil { + in, out := &in.PhoneType, &out.PhoneType + *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartTimeInitParameters. -func (in *StartTimeInitParameters) DeepCopy() *StartTimeInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPhoneConfigParameters. +func (in *UserPhoneConfigParameters) DeepCopy() *UserPhoneConfigParameters { if in == nil { return nil } - out := new(StartTimeInitParameters) + out := new(UserPhoneConfigParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StartTimeObservation) DeepCopyInto(out *StartTimeObservation) { +func (in *UserSpec) DeepCopyInto(out *UserSpec) { *out = *in - if in.Hours != nil { - in, out := &in.Hours, &out.Hours - *out = new(float64) - **out = **in - } - if in.Minutes != nil { - in, out := &in.Minutes, &out.Minutes - *out = new(float64) - **out = **in - } + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartTimeObservation. -func (in *StartTimeObservation) DeepCopy() *StartTimeObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSpec. +func (in *UserSpec) DeepCopy() *UserSpec { if in == nil { return nil } - out := new(StartTimeObservation) + out := new(UserSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StartTimeParameters) DeepCopyInto(out *StartTimeParameters) { +func (in *UserStatus) DeepCopyInto(out *UserStatus) { *out = *in - if in.Hours != nil { - in, out := &in.Hours, &out.Hours - *out = new(float64) - **out = **in - } - if in.Minutes != nil { - in, out := &in.Minutes, &out.Minutes - *out = new(float64) - **out = **in - } + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartTimeParameters. -func (in *StartTimeParameters) DeepCopy() *StartTimeParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserStatus. +func (in *UserStatus) DeepCopy() *UserStatus { if in == nil { return nil } - out := new(StartTimeParameters) + out := new(UserStatus) in.DeepCopyInto(out) return out } diff --git a/apis/connect/v1beta2/zz_generated.managed.go b/apis/connect/v1beta2/zz_generated.managed.go index cfd9eaae8c..97c167663a 100644 --- a/apis/connect/v1beta2/zz_generated.managed.go +++ b/apis/connect/v1beta2/zz_generated.managed.go @@ -7,6 +7,66 @@ package v1beta2 import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +// GetCondition of this BotAssociation. +func (mg *BotAssociation) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BotAssociation. +func (mg *BotAssociation) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BotAssociation. +func (mg *BotAssociation) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BotAssociation. +func (mg *BotAssociation) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BotAssociation. +func (mg *BotAssociation) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BotAssociation. +func (mg *BotAssociation) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BotAssociation. +func (mg *BotAssociation) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BotAssociation. +func (mg *BotAssociation) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BotAssociation. +func (mg *BotAssociation) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BotAssociation. +func (mg *BotAssociation) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BotAssociation. +func (mg *BotAssociation) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BotAssociation. +func (mg *BotAssociation) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this HoursOfOperation. func (mg *HoursOfOperation) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) @@ -67,6 +127,66 @@ func (mg *HoursOfOperation) SetWriteConnectionSecretToReference(r *xpv1.SecretRe mg.Spec.WriteConnectionSecretToReference = r } +// GetCondition of this InstanceStorageConfig. +func (mg *InstanceStorageConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this InstanceStorageConfig. +func (mg *InstanceStorageConfig) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this InstanceStorageConfig. +func (mg *InstanceStorageConfig) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this InstanceStorageConfig. +func (mg *InstanceStorageConfig) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this InstanceStorageConfig. +func (mg *InstanceStorageConfig) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this InstanceStorageConfig. +func (mg *InstanceStorageConfig) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this InstanceStorageConfig. +func (mg *InstanceStorageConfig) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this InstanceStorageConfig. +func (mg *InstanceStorageConfig) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this InstanceStorageConfig. +func (mg *InstanceStorageConfig) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this InstanceStorageConfig. +func (mg *InstanceStorageConfig) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this InstanceStorageConfig. +func (mg *InstanceStorageConfig) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this InstanceStorageConfig. +func (mg *InstanceStorageConfig) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this Queue. func (mg *Queue) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) @@ -127,6 +247,66 @@ func (mg *Queue) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { mg.Spec.WriteConnectionSecretToReference = r } +// GetCondition of this QuickConnect. +func (mg *QuickConnect) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this QuickConnect. +func (mg *QuickConnect) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this QuickConnect. +func (mg *QuickConnect) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this QuickConnect. +func (mg *QuickConnect) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this QuickConnect. +func (mg *QuickConnect) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this QuickConnect. +func (mg *QuickConnect) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this QuickConnect. +func (mg *QuickConnect) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this QuickConnect. +func (mg *QuickConnect) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this QuickConnect. +func (mg *QuickConnect) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this QuickConnect. +func (mg *QuickConnect) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this QuickConnect. +func (mg *QuickConnect) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this QuickConnect. +func (mg *QuickConnect) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this RoutingProfile. func (mg *RoutingProfile) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) @@ -186,3 +366,123 @@ func (mg *RoutingProfile) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectio func (mg *RoutingProfile) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { mg.Spec.WriteConnectionSecretToReference = r } + +// GetCondition of this User. +func (mg *User) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this User. +func (mg *User) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this User. +func (mg *User) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this User. +func (mg *User) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this User. +func (mg *User) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this User. +func (mg *User) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this User. +func (mg *User) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this User. +func (mg *User) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this User. +func (mg *User) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this User. +func (mg *User) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this User. +func (mg *User) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this User. +func (mg *User) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this UserHierarchyStructure. +func (mg *UserHierarchyStructure) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this UserHierarchyStructure. +func (mg *UserHierarchyStructure) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this UserHierarchyStructure. +func (mg *UserHierarchyStructure) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this UserHierarchyStructure. +func (mg *UserHierarchyStructure) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this UserHierarchyStructure. +func (mg *UserHierarchyStructure) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this UserHierarchyStructure. +func (mg *UserHierarchyStructure) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this UserHierarchyStructure. +func (mg *UserHierarchyStructure) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this UserHierarchyStructure. +func (mg *UserHierarchyStructure) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this UserHierarchyStructure. +func (mg *UserHierarchyStructure) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this UserHierarchyStructure. +func (mg *UserHierarchyStructure) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this UserHierarchyStructure. +func (mg *UserHierarchyStructure) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this UserHierarchyStructure. +func (mg *UserHierarchyStructure) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/connect/v1beta2/zz_generated.managedlist.go b/apis/connect/v1beta2/zz_generated.managedlist.go index ad2c9787cf..4829a21c34 100644 --- a/apis/connect/v1beta2/zz_generated.managedlist.go +++ b/apis/connect/v1beta2/zz_generated.managedlist.go @@ -7,6 +7,15 @@ package v1beta2 import resource "github.com/crossplane/crossplane-runtime/pkg/resource" +// GetItems of this BotAssociationList. +func (l *BotAssociationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this HoursOfOperationList. func (l *HoursOfOperationList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) @@ -16,6 +25,15 @@ func (l *HoursOfOperationList) GetItems() []resource.Managed { return items } +// GetItems of this InstanceStorageConfigList. +func (l *InstanceStorageConfigList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this QueueList. func (l *QueueList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) @@ -25,6 +43,15 @@ func (l *QueueList) GetItems() []resource.Managed { return items } +// GetItems of this QuickConnectList. +func (l *QuickConnectList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this RoutingProfileList. func (l *RoutingProfileList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) @@ -33,3 +60,21 @@ func (l *RoutingProfileList) GetItems() []resource.Managed { } return items } + +// GetItems of this UserHierarchyStructureList. +func (l *UserHierarchyStructureList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this UserList. +func (l *UserList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/connect/v1beta2/zz_generated.resolvers.go b/apis/connect/v1beta2/zz_generated.resolvers.go index dd9bde5eab..f50ccbfc51 100644 --- a/apis/connect/v1beta2/zz_generated.resolvers.go +++ b/apis/connect/v1beta2/zz_generated.resolvers.go @@ -17,12 +17,86 @@ import ( client "sigs.k8s.io/controller-runtime/pkg/client" ) -func (mg *HoursOfOperation) ResolveReferences( // ResolveReferences of this HoursOfOperation. +func (mg *BotAssociation) ResolveReferences( // ResolveReferences of this BotAssociation. ctx context.Context, c client.Reader) error { var m xpresource.Managed var l xpresource.ManagedList r := reference.NewAPIResolver(c, mg) + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InstanceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.InstanceIDRef, + Selector: mg.Spec.ForProvider.InstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InstanceID") + } + mg.Spec.ForProvider.InstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InstanceIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.LexBot != nil { + { + m, l, err = apisresolver.GetManagedResource("lexmodels.aws.upbound.io", "v1beta2", "Bot", "BotList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LexBot.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LexBot.NameRef, + Selector: mg.Spec.ForProvider.LexBot.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LexBot.Name") + } + mg.Spec.ForProvider.LexBot.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LexBot.NameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LexBot != nil { + { + m, l, err = apisresolver.GetManagedResource("lexmodels.aws.upbound.io", "v1beta2", "Bot", "BotList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LexBot.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LexBot.NameRef, + Selector: mg.Spec.InitProvider.LexBot.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LexBot.Name") + } + mg.Spec.InitProvider.LexBot.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LexBot.NameRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this HoursOfOperation. +func (mg *HoursOfOperation) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + var rsp reference.ResolutionResponse var err error { @@ -67,6 +141,295 @@ func (mg *HoursOfOperation) ResolveReferences( // ResolveReferences of this Hour return nil } +// ResolveReferences of this InstanceStorageConfig. +func (mg *InstanceStorageConfig) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InstanceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.InstanceIDRef, + Selector: mg.Spec.ForProvider.InstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InstanceID") + } + mg.Spec.ForProvider.InstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InstanceIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.StorageConfig != nil { + if mg.Spec.ForProvider.StorageConfig.KinesisFirehoseConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageConfig.KinesisFirehoseConfig.FirehoseArn), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.ForProvider.StorageConfig.KinesisFirehoseConfig.FirehoseArnRef, + Selector: mg.Spec.ForProvider.StorageConfig.KinesisFirehoseConfig.FirehoseArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageConfig.KinesisFirehoseConfig.FirehoseArn") + } + mg.Spec.ForProvider.StorageConfig.KinesisFirehoseConfig.FirehoseArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageConfig.KinesisFirehoseConfig.FirehoseArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.StorageConfig != nil { + if mg.Spec.ForProvider.StorageConfig.KinesisStreamConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageConfig.KinesisStreamConfig.StreamArn), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.ForProvider.StorageConfig.KinesisStreamConfig.StreamArnRef, + Selector: mg.Spec.ForProvider.StorageConfig.KinesisStreamConfig.StreamArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageConfig.KinesisStreamConfig.StreamArn") + } + mg.Spec.ForProvider.StorageConfig.KinesisStreamConfig.StreamArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageConfig.KinesisStreamConfig.StreamArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.StorageConfig != nil { + if mg.Spec.ForProvider.StorageConfig.KinesisVideoStreamConfig != nil { + if mg.Spec.ForProvider.StorageConfig.KinesisVideoStreamConfig.EncryptionConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageConfig.KinesisVideoStreamConfig.EncryptionConfig.KeyID), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.StorageConfig.KinesisVideoStreamConfig.EncryptionConfig.KeyIDRef, + Selector: mg.Spec.ForProvider.StorageConfig.KinesisVideoStreamConfig.EncryptionConfig.KeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageConfig.KinesisVideoStreamConfig.EncryptionConfig.KeyID") + } + mg.Spec.ForProvider.StorageConfig.KinesisVideoStreamConfig.EncryptionConfig.KeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageConfig.KinesisVideoStreamConfig.EncryptionConfig.KeyIDRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.ForProvider.StorageConfig != nil { + if mg.Spec.ForProvider.StorageConfig.S3Config != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageConfig.S3Config.BucketName), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StorageConfig.S3Config.BucketNameRef, + Selector: mg.Spec.ForProvider.StorageConfig.S3Config.BucketNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageConfig.S3Config.BucketName") + } + mg.Spec.ForProvider.StorageConfig.S3Config.BucketName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageConfig.S3Config.BucketNameRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.StorageConfig != nil { + if mg.Spec.ForProvider.StorageConfig.S3Config != nil { + if mg.Spec.ForProvider.StorageConfig.S3Config.EncryptionConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageConfig.S3Config.EncryptionConfig.KeyID), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.StorageConfig.S3Config.EncryptionConfig.KeyIDRef, + Selector: mg.Spec.ForProvider.StorageConfig.S3Config.EncryptionConfig.KeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageConfig.S3Config.EncryptionConfig.KeyID") + } + mg.Spec.ForProvider.StorageConfig.S3Config.EncryptionConfig.KeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageConfig.S3Config.EncryptionConfig.KeyIDRef = rsp.ResolvedReference + + } + } + } + { + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.InstanceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.InstanceIDRef, + Selector: mg.Spec.InitProvider.InstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InstanceID") + } + mg.Spec.InitProvider.InstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.InstanceIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.StorageConfig != nil { + if mg.Spec.InitProvider.StorageConfig.KinesisFirehoseConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageConfig.KinesisFirehoseConfig.FirehoseArn), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.InitProvider.StorageConfig.KinesisFirehoseConfig.FirehoseArnRef, + Selector: mg.Spec.InitProvider.StorageConfig.KinesisFirehoseConfig.FirehoseArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageConfig.KinesisFirehoseConfig.FirehoseArn") + } + mg.Spec.InitProvider.StorageConfig.KinesisFirehoseConfig.FirehoseArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageConfig.KinesisFirehoseConfig.FirehoseArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.StorageConfig != nil { + if mg.Spec.InitProvider.StorageConfig.KinesisStreamConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageConfig.KinesisStreamConfig.StreamArn), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.InitProvider.StorageConfig.KinesisStreamConfig.StreamArnRef, + Selector: mg.Spec.InitProvider.StorageConfig.KinesisStreamConfig.StreamArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageConfig.KinesisStreamConfig.StreamArn") + } + mg.Spec.InitProvider.StorageConfig.KinesisStreamConfig.StreamArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageConfig.KinesisStreamConfig.StreamArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.StorageConfig != nil { + if mg.Spec.InitProvider.StorageConfig.KinesisVideoStreamConfig != nil { + if mg.Spec.InitProvider.StorageConfig.KinesisVideoStreamConfig.EncryptionConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageConfig.KinesisVideoStreamConfig.EncryptionConfig.KeyID), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.StorageConfig.KinesisVideoStreamConfig.EncryptionConfig.KeyIDRef, + Selector: mg.Spec.InitProvider.StorageConfig.KinesisVideoStreamConfig.EncryptionConfig.KeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageConfig.KinesisVideoStreamConfig.EncryptionConfig.KeyID") + } + mg.Spec.InitProvider.StorageConfig.KinesisVideoStreamConfig.EncryptionConfig.KeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageConfig.KinesisVideoStreamConfig.EncryptionConfig.KeyIDRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.InitProvider.StorageConfig != nil { + if mg.Spec.InitProvider.StorageConfig.S3Config != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageConfig.S3Config.BucketName), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StorageConfig.S3Config.BucketNameRef, + Selector: mg.Spec.InitProvider.StorageConfig.S3Config.BucketNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageConfig.S3Config.BucketName") + } + mg.Spec.InitProvider.StorageConfig.S3Config.BucketName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageConfig.S3Config.BucketNameRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.StorageConfig != nil { + if mg.Spec.InitProvider.StorageConfig.S3Config != nil { + if mg.Spec.InitProvider.StorageConfig.S3Config.EncryptionConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageConfig.S3Config.EncryptionConfig.KeyID), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.StorageConfig.S3Config.EncryptionConfig.KeyIDRef, + Selector: mg.Spec.InitProvider.StorageConfig.S3Config.EncryptionConfig.KeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageConfig.S3Config.EncryptionConfig.KeyID") + } + mg.Spec.InitProvider.StorageConfig.S3Config.EncryptionConfig.KeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageConfig.S3Config.EncryptionConfig.KeyIDRef = rsp.ResolvedReference + + } + } + } + + return nil +} + // ResolveReferences of this Queue. func (mg *Queue) ResolveReferences(ctx context.Context, c client.Reader) error { var m xpresource.Managed @@ -155,6 +518,56 @@ func (mg *Queue) ResolveReferences(ctx context.Context, c client.Reader) error { return nil } +// ResolveReferences of this QuickConnect. +func (mg *QuickConnect) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InstanceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.InstanceIDRef, + Selector: mg.Spec.ForProvider.InstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InstanceID") + } + mg.Spec.ForProvider.InstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InstanceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.InstanceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.InstanceIDRef, + Selector: mg.Spec.InitProvider.InstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InstanceID") + } + mg.Spec.InitProvider.InstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.InstanceIDRef = rsp.ResolvedReference + + return nil +} + // ResolveReferences of this RoutingProfile. func (mg *RoutingProfile) ResolveReferences(ctx context.Context, c client.Reader) error { var m xpresource.Managed @@ -164,7 +577,7 @@ func (mg *RoutingProfile) ResolveReferences(ctx context.Context, c client.Reader var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta2", "Queue", "QueueList") + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta3", "Queue", "QueueList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -202,7 +615,7 @@ func (mg *RoutingProfile) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.ForProvider.InstanceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.InstanceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta2", "Queue", "QueueList") + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta3", "Queue", "QueueList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -242,3 +655,141 @@ func (mg *RoutingProfile) ResolveReferences(ctx context.Context, c client.Reader return nil } + +// ResolveReferences of this User. +func (mg *User) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InstanceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.InstanceIDRef, + Selector: mg.Spec.ForProvider.InstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InstanceID") + } + mg.Spec.ForProvider.InstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InstanceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta2", "RoutingProfile", "RoutingProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoutingProfileID), + Extract: resource.ExtractParamPath("routing_profile_id", true), + Reference: mg.Spec.ForProvider.RoutingProfileIDRef, + Selector: mg.Spec.ForProvider.RoutingProfileIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoutingProfileID") + } + mg.Spec.ForProvider.RoutingProfileID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoutingProfileIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.InstanceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.InstanceIDRef, + Selector: mg.Spec.InitProvider.InstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InstanceID") + } + mg.Spec.InitProvider.InstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.InstanceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta2", "RoutingProfile", "RoutingProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoutingProfileID), + Extract: resource.ExtractParamPath("routing_profile_id", true), + Reference: mg.Spec.InitProvider.RoutingProfileIDRef, + Selector: mg.Spec.InitProvider.RoutingProfileIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoutingProfileID") + } + mg.Spec.InitProvider.RoutingProfileID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoutingProfileIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this UserHierarchyStructure. +func (mg *UserHierarchyStructure) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InstanceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.InstanceIDRef, + Selector: mg.Spec.ForProvider.InstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InstanceID") + } + mg.Spec.ForProvider.InstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InstanceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.InstanceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.InstanceIDRef, + Selector: mg.Spec.InitProvider.InstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InstanceID") + } + mg.Spec.InitProvider.InstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.InstanceIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/connect/v1beta2/zz_instancestorageconfig_terraformed.go b/apis/connect/v1beta2/zz_instancestorageconfig_terraformed.go new file mode 100755 index 0000000000..5cbd84b836 --- /dev/null +++ b/apis/connect/v1beta2/zz_instancestorageconfig_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this InstanceStorageConfig +func (mg *InstanceStorageConfig) GetTerraformResourceType() string { + return "aws_connect_instance_storage_config" +} + +// GetConnectionDetailsMapping for this InstanceStorageConfig +func (tr *InstanceStorageConfig) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this InstanceStorageConfig +func (tr *InstanceStorageConfig) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this InstanceStorageConfig +func (tr *InstanceStorageConfig) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this InstanceStorageConfig +func (tr *InstanceStorageConfig) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this InstanceStorageConfig +func (tr *InstanceStorageConfig) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this InstanceStorageConfig +func (tr *InstanceStorageConfig) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this InstanceStorageConfig +func (tr *InstanceStorageConfig) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this InstanceStorageConfig +func (tr *InstanceStorageConfig) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this InstanceStorageConfig using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *InstanceStorageConfig) LateInitialize(attrs []byte) (bool, error) { + params := &InstanceStorageConfigParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *InstanceStorageConfig) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/connect/v1beta2/zz_instancestorageconfig_types.go b/apis/connect/v1beta2/zz_instancestorageconfig_types.go new file mode 100755 index 0000000000..66be78ddd8 --- /dev/null +++ b/apis/connect/v1beta2/zz_instancestorageconfig_types.go @@ -0,0 +1,479 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EncryptionConfigInitParameters struct { + + // The type of encryption. Valid Values: KMS. + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` + + // The full ARN of the encryption key. Be sure to provide the full ARN of the encryption key, not just the ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // Reference to a Key in kms to populate keyId. + // +kubebuilder:validation:Optional + KeyIDRef *v1.Reference `json:"keyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate keyId. + // +kubebuilder:validation:Optional + KeyIDSelector *v1.Selector `json:"keyIdSelector,omitempty" tf:"-"` +} + +type EncryptionConfigObservation struct { + + // The type of encryption. Valid Values: KMS. + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` + + // The full ARN of the encryption key. Be sure to provide the full ARN of the encryption key, not just the ID. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` +} + +type EncryptionConfigParameters struct { + + // The type of encryption. Valid Values: KMS. + // +kubebuilder:validation:Optional + EncryptionType *string `json:"encryptionType" tf:"encryption_type,omitempty"` + + // The full ARN of the encryption key. Be sure to provide the full ARN of the encryption key, not just the ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // Reference to a Key in kms to populate keyId. + // +kubebuilder:validation:Optional + KeyIDRef *v1.Reference `json:"keyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate keyId. + // +kubebuilder:validation:Optional + KeyIDSelector *v1.Selector `json:"keyIdSelector,omitempty" tf:"-"` +} + +type InstanceStorageConfigInitParameters struct { + + // Specifies the identifier of the hosting Amazon Connect Instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta1.Instance + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // Reference to a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDRef *v1.Reference `json:"instanceIdRef,omitempty" tf:"-"` + + // Selector for a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDSelector *v1.Selector `json:"instanceIdSelector,omitempty" tf:"-"` + + // A valid resource type. Valid Values: AGENT_EVENTS | ATTACHMENTS | CALL_RECORDINGS | CHAT_TRANSCRIPTS | CONTACT_EVALUATIONS | CONTACT_TRACE_RECORDS | MEDIA_STREAMS | REAL_TIME_CONTACT_ANALYSIS_SEGMENTS | SCHEDULED_REPORTS | SCREEN_RECORDINGS. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // Specifies the storage configuration options for the Connect Instance. Documented below. + StorageConfig *StorageConfigInitParameters `json:"storageConfig,omitempty" tf:"storage_config,omitempty"` +} + +type InstanceStorageConfigObservation struct { + + // The existing association identifier that uniquely identifies the resource type and storage config for the given instance ID. + AssociationID *string `json:"associationId,omitempty" tf:"association_id,omitempty"` + + // The identifier of the hosting Amazon Connect Instance, association_id, and resource_type separated by a colon (:). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the identifier of the hosting Amazon Connect Instance. + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // A valid resource type. Valid Values: AGENT_EVENTS | ATTACHMENTS | CALL_RECORDINGS | CHAT_TRANSCRIPTS | CONTACT_EVALUATIONS | CONTACT_TRACE_RECORDS | MEDIA_STREAMS | REAL_TIME_CONTACT_ANALYSIS_SEGMENTS | SCHEDULED_REPORTS | SCREEN_RECORDINGS. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // Specifies the storage configuration options for the Connect Instance. Documented below. + StorageConfig *StorageConfigObservation `json:"storageConfig,omitempty" tf:"storage_config,omitempty"` +} + +type InstanceStorageConfigParameters struct { + + // Specifies the identifier of the hosting Amazon Connect Instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta1.Instance + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // Reference to a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDRef *v1.Reference `json:"instanceIdRef,omitempty" tf:"-"` + + // Selector for a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDSelector *v1.Selector `json:"instanceIdSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // A valid resource type. Valid Values: AGENT_EVENTS | ATTACHMENTS | CALL_RECORDINGS | CHAT_TRANSCRIPTS | CONTACT_EVALUATIONS | CONTACT_TRACE_RECORDS | MEDIA_STREAMS | REAL_TIME_CONTACT_ANALYSIS_SEGMENTS | SCHEDULED_REPORTS | SCREEN_RECORDINGS. + // +kubebuilder:validation:Optional + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // Specifies the storage configuration options for the Connect Instance. Documented below. + // +kubebuilder:validation:Optional + StorageConfig *StorageConfigParameters `json:"storageConfig,omitempty" tf:"storage_config,omitempty"` +} + +type KinesisFirehoseConfigInitParameters struct { + + // The Amazon Resource Name (ARN) of the delivery stream. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + FirehoseArn *string `json:"firehoseArn,omitempty" tf:"firehose_arn,omitempty"` + + // Reference to a DeliveryStream in firehose to populate firehoseArn. + // +kubebuilder:validation:Optional + FirehoseArnRef *v1.Reference `json:"firehoseArnRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate firehoseArn. + // +kubebuilder:validation:Optional + FirehoseArnSelector *v1.Selector `json:"firehoseArnSelector,omitempty" tf:"-"` +} + +type KinesisFirehoseConfigObservation struct { + + // The Amazon Resource Name (ARN) of the delivery stream. + FirehoseArn *string `json:"firehoseArn,omitempty" tf:"firehose_arn,omitempty"` +} + +type KinesisFirehoseConfigParameters struct { + + // The Amazon Resource Name (ARN) of the delivery stream. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + // +kubebuilder:validation:Optional + FirehoseArn *string `json:"firehoseArn,omitempty" tf:"firehose_arn,omitempty"` + + // Reference to a DeliveryStream in firehose to populate firehoseArn. + // +kubebuilder:validation:Optional + FirehoseArnRef *v1.Reference `json:"firehoseArnRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate firehoseArn. + // +kubebuilder:validation:Optional + FirehoseArnSelector *v1.Selector `json:"firehoseArnSelector,omitempty" tf:"-"` +} + +type KinesisStreamConfigInitParameters struct { + + // The Amazon Resource Name (ARN) of the data stream. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` + + // Reference to a Stream in kinesis to populate streamArn. + // +kubebuilder:validation:Optional + StreamArnRef *v1.Reference `json:"streamArnRef,omitempty" tf:"-"` + + // Selector for a Stream in kinesis to populate streamArn. + // +kubebuilder:validation:Optional + StreamArnSelector *v1.Selector `json:"streamArnSelector,omitempty" tf:"-"` +} + +type KinesisStreamConfigObservation struct { + + // The Amazon Resource Name (ARN) of the data stream. + StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` +} + +type KinesisStreamConfigParameters struct { + + // The Amazon Resource Name (ARN) of the data stream. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + // +kubebuilder:validation:Optional + StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` + + // Reference to a Stream in kinesis to populate streamArn. + // +kubebuilder:validation:Optional + StreamArnRef *v1.Reference `json:"streamArnRef,omitempty" tf:"-"` + + // Selector for a Stream in kinesis to populate streamArn. + // +kubebuilder:validation:Optional + StreamArnSelector *v1.Selector `json:"streamArnSelector,omitempty" tf:"-"` +} + +type KinesisVideoStreamConfigInitParameters struct { + + // The encryption configuration. Documented below. + EncryptionConfig *EncryptionConfigInitParameters `json:"encryptionConfig,omitempty" tf:"encryption_config,omitempty"` + + // The prefix of the video stream. Minimum length of 1. Maximum length of 128. When read from the state, the value returned is -connect--contact- since the API appends additional details to the prefix. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The number of hours data is retained in the stream. Kinesis Video Streams retains the data in a data store that is associated with the stream. Minimum value of 0. Maximum value of 87600. A value of 0, indicates that the stream does not persist data. + RetentionPeriodHours *float64 `json:"retentionPeriodHours,omitempty" tf:"retention_period_hours,omitempty"` +} + +type KinesisVideoStreamConfigObservation struct { + + // The encryption configuration. Documented below. + EncryptionConfig *EncryptionConfigObservation `json:"encryptionConfig,omitempty" tf:"encryption_config,omitempty"` + + // The prefix of the video stream. Minimum length of 1. Maximum length of 128. When read from the state, the value returned is -connect--contact- since the API appends additional details to the prefix. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The number of hours data is retained in the stream. Kinesis Video Streams retains the data in a data store that is associated with the stream. Minimum value of 0. Maximum value of 87600. A value of 0, indicates that the stream does not persist data. + RetentionPeriodHours *float64 `json:"retentionPeriodHours,omitempty" tf:"retention_period_hours,omitempty"` +} + +type KinesisVideoStreamConfigParameters struct { + + // The encryption configuration. Documented below. + // +kubebuilder:validation:Optional + EncryptionConfig *EncryptionConfigParameters `json:"encryptionConfig" tf:"encryption_config,omitempty"` + + // The prefix of the video stream. Minimum length of 1. Maximum length of 128. When read from the state, the value returned is -connect--contact- since the API appends additional details to the prefix. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix" tf:"prefix,omitempty"` + + // The number of hours data is retained in the stream. Kinesis Video Streams retains the data in a data store that is associated with the stream. Minimum value of 0. Maximum value of 87600. A value of 0, indicates that the stream does not persist data. + // +kubebuilder:validation:Optional + RetentionPeriodHours *float64 `json:"retentionPeriodHours" tf:"retention_period_hours,omitempty"` +} + +type S3ConfigEncryptionConfigInitParameters struct { + + // The type of encryption. Valid Values: KMS. + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` + + // The full ARN of the encryption key. Be sure to provide the full ARN of the encryption key, not just the ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // Reference to a Key in kms to populate keyId. + // +kubebuilder:validation:Optional + KeyIDRef *v1.Reference `json:"keyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate keyId. + // +kubebuilder:validation:Optional + KeyIDSelector *v1.Selector `json:"keyIdSelector,omitempty" tf:"-"` +} + +type S3ConfigEncryptionConfigObservation struct { + + // The type of encryption. Valid Values: KMS. + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` + + // The full ARN of the encryption key. Be sure to provide the full ARN of the encryption key, not just the ID. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` +} + +type S3ConfigEncryptionConfigParameters struct { + + // The type of encryption. Valid Values: KMS. + // +kubebuilder:validation:Optional + EncryptionType *string `json:"encryptionType" tf:"encryption_type,omitempty"` + + // The full ARN of the encryption key. Be sure to provide the full ARN of the encryption key, not just the ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // Reference to a Key in kms to populate keyId. + // +kubebuilder:validation:Optional + KeyIDRef *v1.Reference `json:"keyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate keyId. + // +kubebuilder:validation:Optional + KeyIDSelector *v1.Selector `json:"keyIdSelector,omitempty" tf:"-"` +} + +type S3ConfigInitParameters struct { + + // The S3 bucket name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Reference to a Bucket in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameRef *v1.Reference `json:"bucketNameRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameSelector *v1.Selector `json:"bucketNameSelector,omitempty" tf:"-"` + + // The S3 bucket prefix. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // The encryption configuration. Documented below. + EncryptionConfig *S3ConfigEncryptionConfigInitParameters `json:"encryptionConfig,omitempty" tf:"encryption_config,omitempty"` +} + +type S3ConfigObservation struct { + + // The S3 bucket name. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // The S3 bucket prefix. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // The encryption configuration. Documented below. + EncryptionConfig *S3ConfigEncryptionConfigObservation `json:"encryptionConfig,omitempty" tf:"encryption_config,omitempty"` +} + +type S3ConfigParameters struct { + + // The S3 bucket name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Reference to a Bucket in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameRef *v1.Reference `json:"bucketNameRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameSelector *v1.Selector `json:"bucketNameSelector,omitempty" tf:"-"` + + // The S3 bucket prefix. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix" tf:"bucket_prefix,omitempty"` + + // The encryption configuration. Documented below. + // +kubebuilder:validation:Optional + EncryptionConfig *S3ConfigEncryptionConfigParameters `json:"encryptionConfig,omitempty" tf:"encryption_config,omitempty"` +} + +type StorageConfigInitParameters struct { + + // A block that specifies the configuration of the Kinesis Firehose delivery stream. Documented below. + KinesisFirehoseConfig *KinesisFirehoseConfigInitParameters `json:"kinesisFirehoseConfig,omitempty" tf:"kinesis_firehose_config,omitempty"` + + // A block that specifies the configuration of the Kinesis data stream. Documented below. + KinesisStreamConfig *KinesisStreamConfigInitParameters `json:"kinesisStreamConfig,omitempty" tf:"kinesis_stream_config,omitempty"` + + // A block that specifies the configuration of the Kinesis video stream. Documented below. + KinesisVideoStreamConfig *KinesisVideoStreamConfigInitParameters `json:"kinesisVideoStreamConfig,omitempty" tf:"kinesis_video_stream_config,omitempty"` + + // A block that specifies the configuration of S3 Bucket. Documented below. + S3Config *S3ConfigInitParameters `json:"s3Config,omitempty" tf:"s3_config,omitempty"` + + // A valid storage type. Valid Values: S3 | KINESIS_VIDEO_STREAM | KINESIS_STREAM | KINESIS_FIREHOSE. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` +} + +type StorageConfigObservation struct { + + // A block that specifies the configuration of the Kinesis Firehose delivery stream. Documented below. + KinesisFirehoseConfig *KinesisFirehoseConfigObservation `json:"kinesisFirehoseConfig,omitempty" tf:"kinesis_firehose_config,omitempty"` + + // A block that specifies the configuration of the Kinesis data stream. Documented below. + KinesisStreamConfig *KinesisStreamConfigObservation `json:"kinesisStreamConfig,omitempty" tf:"kinesis_stream_config,omitempty"` + + // A block that specifies the configuration of the Kinesis video stream. Documented below. + KinesisVideoStreamConfig *KinesisVideoStreamConfigObservation `json:"kinesisVideoStreamConfig,omitempty" tf:"kinesis_video_stream_config,omitempty"` + + // A block that specifies the configuration of S3 Bucket. Documented below. + S3Config *S3ConfigObservation `json:"s3Config,omitempty" tf:"s3_config,omitempty"` + + // A valid storage type. Valid Values: S3 | KINESIS_VIDEO_STREAM | KINESIS_STREAM | KINESIS_FIREHOSE. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` +} + +type StorageConfigParameters struct { + + // A block that specifies the configuration of the Kinesis Firehose delivery stream. Documented below. + // +kubebuilder:validation:Optional + KinesisFirehoseConfig *KinesisFirehoseConfigParameters `json:"kinesisFirehoseConfig,omitempty" tf:"kinesis_firehose_config,omitempty"` + + // A block that specifies the configuration of the Kinesis data stream. Documented below. + // +kubebuilder:validation:Optional + KinesisStreamConfig *KinesisStreamConfigParameters `json:"kinesisStreamConfig,omitempty" tf:"kinesis_stream_config,omitempty"` + + // A block that specifies the configuration of the Kinesis video stream. Documented below. + // +kubebuilder:validation:Optional + KinesisVideoStreamConfig *KinesisVideoStreamConfigParameters `json:"kinesisVideoStreamConfig,omitempty" tf:"kinesis_video_stream_config,omitempty"` + + // A block that specifies the configuration of S3 Bucket. Documented below. + // +kubebuilder:validation:Optional + S3Config *S3ConfigParameters `json:"s3Config,omitempty" tf:"s3_config,omitempty"` + + // A valid storage type. Valid Values: S3 | KINESIS_VIDEO_STREAM | KINESIS_STREAM | KINESIS_FIREHOSE. + // +kubebuilder:validation:Optional + StorageType *string `json:"storageType" tf:"storage_type,omitempty"` +} + +// InstanceStorageConfigSpec defines the desired state of InstanceStorageConfig +type InstanceStorageConfigSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider InstanceStorageConfigParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider InstanceStorageConfigInitParameters `json:"initProvider,omitempty"` +} + +// InstanceStorageConfigStatus defines the observed state of InstanceStorageConfig. +type InstanceStorageConfigStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider InstanceStorageConfigObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// InstanceStorageConfig is the Schema for the InstanceStorageConfigs API. Provides details about a specific Amazon Connect Instance Storage Config. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type InstanceStorageConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.resourceType) || (has(self.initProvider) && has(self.initProvider.resourceType))",message="spec.forProvider.resourceType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageConfig) || (has(self.initProvider) && has(self.initProvider.storageConfig))",message="spec.forProvider.storageConfig is a required parameter" + Spec InstanceStorageConfigSpec `json:"spec"` + Status InstanceStorageConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// InstanceStorageConfigList contains a list of InstanceStorageConfigs +type InstanceStorageConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []InstanceStorageConfig `json:"items"` +} + +// Repository type metadata. +var ( + InstanceStorageConfig_Kind = "InstanceStorageConfig" + InstanceStorageConfig_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: InstanceStorageConfig_Kind}.String() + InstanceStorageConfig_KindAPIVersion = InstanceStorageConfig_Kind + "." + CRDGroupVersion.String() + InstanceStorageConfig_GroupVersionKind = CRDGroupVersion.WithKind(InstanceStorageConfig_Kind) +) + +func init() { + SchemeBuilder.Register(&InstanceStorageConfig{}, &InstanceStorageConfigList{}) +} diff --git a/apis/connect/v1beta2/zz_quickconnect_terraformed.go b/apis/connect/v1beta2/zz_quickconnect_terraformed.go new file mode 100755 index 0000000000..04409e4d44 --- /dev/null +++ b/apis/connect/v1beta2/zz_quickconnect_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this QuickConnect +func (mg *QuickConnect) GetTerraformResourceType() string { + return "aws_connect_quick_connect" +} + +// GetConnectionDetailsMapping for this QuickConnect +func (tr *QuickConnect) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this QuickConnect +func (tr *QuickConnect) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this QuickConnect +func (tr *QuickConnect) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this QuickConnect +func (tr *QuickConnect) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this QuickConnect +func (tr *QuickConnect) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this QuickConnect +func (tr *QuickConnect) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this QuickConnect +func (tr *QuickConnect) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this QuickConnect +func (tr *QuickConnect) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this QuickConnect using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *QuickConnect) LateInitialize(attrs []byte) (bool, error) { + params := &QuickConnectParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *QuickConnect) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/connect/v1beta2/zz_quickconnect_types.go b/apis/connect/v1beta2/zz_quickconnect_types.go new file mode 100755 index 0000000000..cfd286d176 --- /dev/null +++ b/apis/connect/v1beta2/zz_quickconnect_types.go @@ -0,0 +1,302 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type PhoneConfigInitParameters struct { + + // Specifies the phone number in in E.164 format. + PhoneNumber *string `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` +} + +type PhoneConfigObservation struct { + + // Specifies the phone number in in E.164 format. + PhoneNumber *string `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` +} + +type PhoneConfigParameters struct { + + // Specifies the phone number in in E.164 format. + // +kubebuilder:validation:Optional + PhoneNumber *string `json:"phoneNumber" tf:"phone_number,omitempty"` +} + +type QueueConfigInitParameters struct { + + // Specifies the identifier of the contact flow. + ContactFlowID *string `json:"contactFlowId,omitempty" tf:"contact_flow_id,omitempty"` + + // Specifies the identifier for the queue. + QueueID *string `json:"queueId,omitempty" tf:"queue_id,omitempty"` +} + +type QueueConfigObservation struct { + + // Specifies the identifier of the contact flow. + ContactFlowID *string `json:"contactFlowId,omitempty" tf:"contact_flow_id,omitempty"` + + // Specifies the identifier for the queue. + QueueID *string `json:"queueId,omitempty" tf:"queue_id,omitempty"` +} + +type QueueConfigParameters struct { + + // Specifies the identifier of the contact flow. + // +kubebuilder:validation:Optional + ContactFlowID *string `json:"contactFlowId" tf:"contact_flow_id,omitempty"` + + // Specifies the identifier for the queue. + // +kubebuilder:validation:Optional + QueueID *string `json:"queueId" tf:"queue_id,omitempty"` +} + +type QuickConnectConfigInitParameters struct { + + // Specifies the phone configuration of the Quick Connect. This is required only if quick_connect_type is PHONE_NUMBER. The phone_config block is documented below. + PhoneConfig []PhoneConfigInitParameters `json:"phoneConfig,omitempty" tf:"phone_config,omitempty"` + + // Specifies the queue configuration of the Quick Connect. This is required only if quick_connect_type is QUEUE. The queue_config block is documented below. + QueueConfig []QueueConfigInitParameters `json:"queueConfig,omitempty" tf:"queue_config,omitempty"` + + // Specifies the configuration type of the quick connect. valid values are PHONE_NUMBER, QUEUE, USER. + QuickConnectType *string `json:"quickConnectType,omitempty" tf:"quick_connect_type,omitempty"` + + // Specifies the user configuration of the Quick Connect. This is required only if quick_connect_type is USER. The user_config block is documented below. + UserConfig []UserConfigInitParameters `json:"userConfig,omitempty" tf:"user_config,omitempty"` +} + +type QuickConnectConfigObservation struct { + + // Specifies the phone configuration of the Quick Connect. This is required only if quick_connect_type is PHONE_NUMBER. The phone_config block is documented below. + PhoneConfig []PhoneConfigObservation `json:"phoneConfig,omitempty" tf:"phone_config,omitempty"` + + // Specifies the queue configuration of the Quick Connect. This is required only if quick_connect_type is QUEUE. The queue_config block is documented below. + QueueConfig []QueueConfigObservation `json:"queueConfig,omitempty" tf:"queue_config,omitempty"` + + // Specifies the configuration type of the quick connect. valid values are PHONE_NUMBER, QUEUE, USER. + QuickConnectType *string `json:"quickConnectType,omitempty" tf:"quick_connect_type,omitempty"` + + // Specifies the user configuration of the Quick Connect. This is required only if quick_connect_type is USER. The user_config block is documented below. + UserConfig []UserConfigObservation `json:"userConfig,omitempty" tf:"user_config,omitempty"` +} + +type QuickConnectConfigParameters struct { + + // Specifies the phone configuration of the Quick Connect. This is required only if quick_connect_type is PHONE_NUMBER. The phone_config block is documented below. + // +kubebuilder:validation:Optional + PhoneConfig []PhoneConfigParameters `json:"phoneConfig,omitempty" tf:"phone_config,omitempty"` + + // Specifies the queue configuration of the Quick Connect. This is required only if quick_connect_type is QUEUE. The queue_config block is documented below. + // +kubebuilder:validation:Optional + QueueConfig []QueueConfigParameters `json:"queueConfig,omitempty" tf:"queue_config,omitempty"` + + // Specifies the configuration type of the quick connect. valid values are PHONE_NUMBER, QUEUE, USER. + // +kubebuilder:validation:Optional + QuickConnectType *string `json:"quickConnectType" tf:"quick_connect_type,omitempty"` + + // Specifies the user configuration of the Quick Connect. This is required only if quick_connect_type is USER. The user_config block is documented below. + // +kubebuilder:validation:Optional + UserConfig []UserConfigParameters `json:"userConfig,omitempty" tf:"user_config,omitempty"` +} + +type QuickConnectInitParameters struct { + + // Specifies the description of the Quick Connect. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the identifier of the hosting Amazon Connect Instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta1.Instance + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // Reference to a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDRef *v1.Reference `json:"instanceIdRef,omitempty" tf:"-"` + + // Selector for a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDSelector *v1.Selector `json:"instanceIdSelector,omitempty" tf:"-"` + + // Specifies the name of the Quick Connect. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A block that defines the configuration information for the Quick Connect: quick_connect_type and one of phone_config, queue_config, user_config . The Quick Connect Config block is documented below. + QuickConnectConfig *QuickConnectConfigInitParameters `json:"quickConnectConfig,omitempty" tf:"quick_connect_config,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type QuickConnectObservation struct { + + // The Amazon Resource Name (ARN) of the Quick Connect. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Specifies the description of the Quick Connect. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The identifier of the hosting Amazon Connect Instance and identifier of the Quick Connect separated by a colon (:). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the identifier of the hosting Amazon Connect Instance. + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // Specifies the name of the Quick Connect. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A block that defines the configuration information for the Quick Connect: quick_connect_type and one of phone_config, queue_config, user_config . The Quick Connect Config block is documented below. + QuickConnectConfig *QuickConnectConfigObservation `json:"quickConnectConfig,omitempty" tf:"quick_connect_config,omitempty"` + + // The identifier for the Quick Connect. + QuickConnectID *string `json:"quickConnectId,omitempty" tf:"quick_connect_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type QuickConnectParameters struct { + + // Specifies the description of the Quick Connect. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the identifier of the hosting Amazon Connect Instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta1.Instance + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // Reference to a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDRef *v1.Reference `json:"instanceIdRef,omitempty" tf:"-"` + + // Selector for a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDSelector *v1.Selector `json:"instanceIdSelector,omitempty" tf:"-"` + + // Specifies the name of the Quick Connect. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A block that defines the configuration information for the Quick Connect: quick_connect_type and one of phone_config, queue_config, user_config . The Quick Connect Config block is documented below. + // +kubebuilder:validation:Optional + QuickConnectConfig *QuickConnectConfigParameters `json:"quickConnectConfig,omitempty" tf:"quick_connect_config,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type UserConfigInitParameters struct { + + // Specifies the identifier of the contact flow. + ContactFlowID *string `json:"contactFlowId,omitempty" tf:"contact_flow_id,omitempty"` + + // Specifies the identifier for the user. + UserID *string `json:"userId,omitempty" tf:"user_id,omitempty"` +} + +type UserConfigObservation struct { + + // Specifies the identifier of the contact flow. + ContactFlowID *string `json:"contactFlowId,omitempty" tf:"contact_flow_id,omitempty"` + + // Specifies the identifier for the user. + UserID *string `json:"userId,omitempty" tf:"user_id,omitempty"` +} + +type UserConfigParameters struct { + + // Specifies the identifier of the contact flow. + // +kubebuilder:validation:Optional + ContactFlowID *string `json:"contactFlowId" tf:"contact_flow_id,omitempty"` + + // Specifies the identifier for the user. + // +kubebuilder:validation:Optional + UserID *string `json:"userId" tf:"user_id,omitempty"` +} + +// QuickConnectSpec defines the desired state of QuickConnect +type QuickConnectSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider QuickConnectParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider QuickConnectInitParameters `json:"initProvider,omitempty"` +} + +// QuickConnectStatus defines the observed state of QuickConnect. +type QuickConnectStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider QuickConnectObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// QuickConnect is the Schema for the QuickConnects API. Provides details about a specific Amazon Quick Connect +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type QuickConnect struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.quickConnectConfig) || (has(self.initProvider) && has(self.initProvider.quickConnectConfig))",message="spec.forProvider.quickConnectConfig is a required parameter" + Spec QuickConnectSpec `json:"spec"` + Status QuickConnectStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// QuickConnectList contains a list of QuickConnects +type QuickConnectList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []QuickConnect `json:"items"` +} + +// Repository type metadata. +var ( + QuickConnect_Kind = "QuickConnect" + QuickConnect_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: QuickConnect_Kind}.String() + QuickConnect_KindAPIVersion = QuickConnect_Kind + "." + CRDGroupVersion.String() + QuickConnect_GroupVersionKind = CRDGroupVersion.WithKind(QuickConnect_Kind) +) + +func init() { + SchemeBuilder.Register(&QuickConnect{}, &QuickConnectList{}) +} diff --git a/apis/connect/v1beta2/zz_routingprofile_types.go b/apis/connect/v1beta2/zz_routingprofile_types.go index d532617c78..f3a350b266 100755 --- a/apis/connect/v1beta2/zz_routingprofile_types.go +++ b/apis/connect/v1beta2/zz_routingprofile_types.go @@ -100,7 +100,7 @@ type QueueConfigsParameters struct { type RoutingProfileInitParameters struct { // Specifies the default outbound queue for the Routing Profile. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta2.Queue + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta3.Queue // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("queue_id",true) DefaultOutboundQueueID *string `json:"defaultOutboundQueueId,omitempty" tf:"default_outbound_queue_id,omitempty"` @@ -183,7 +183,7 @@ type RoutingProfileObservation struct { type RoutingProfileParameters struct { // Specifies the default outbound queue for the Routing Profile. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta2.Queue + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta3.Queue // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("queue_id",true) // +kubebuilder:validation:Optional DefaultOutboundQueueID *string `json:"defaultOutboundQueueId,omitempty" tf:"default_outbound_queue_id,omitempty"` diff --git a/apis/connect/v1beta2/zz_user_terraformed.go b/apis/connect/v1beta2/zz_user_terraformed.go new file mode 100755 index 0000000000..bc5df10bbe --- /dev/null +++ b/apis/connect/v1beta2/zz_user_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this User +func (mg *User) GetTerraformResourceType() string { + return "aws_connect_user" +} + +// GetConnectionDetailsMapping for this User +func (tr *User) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "passwordSecretRef"} +} + +// GetObservation of this User +func (tr *User) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this User +func (tr *User) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this User +func (tr *User) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this User +func (tr *User) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this User +func (tr *User) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this User +func (tr *User) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this User +func (tr *User) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this User using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *User) LateInitialize(attrs []byte) (bool, error) { + params := &UserParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *User) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/connect/v1beta2/zz_user_types.go b/apis/connect/v1beta2/zz_user_types.go new file mode 100755 index 0000000000..013068739c --- /dev/null +++ b/apis/connect/v1beta2/zz_user_types.go @@ -0,0 +1,336 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IdentityInfoInitParameters struct { + + // The email address. If you are using SAML for identity management and include this parameter, an error is returned. Note that updates to the email is supported. From the UpdateUserIdentityInfo API documentation it is strongly recommended to limit who has the ability to invoke UpdateUserIdentityInfo. Someone with that ability can change the login credentials of other users by changing their email address. This poses a security risk to your organization. They can change the email address of a user to the attacker's email address, and then reset the password through email. For more information, see Best Practices for Security Profiles in the Amazon Connect Administrator Guide. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // The first name. This is required if you are using Amazon Connect or SAML for identity management. Minimum length of 1. Maximum length of 100. + FirstName *string `json:"firstName,omitempty" tf:"first_name,omitempty"` + + // The last name. This is required if you are using Amazon Connect or SAML for identity management. Minimum length of 1. Maximum length of 100. + LastName *string `json:"lastName,omitempty" tf:"last_name,omitempty"` +} + +type IdentityInfoObservation struct { + + // The email address. If you are using SAML for identity management and include this parameter, an error is returned. Note that updates to the email is supported. From the UpdateUserIdentityInfo API documentation it is strongly recommended to limit who has the ability to invoke UpdateUserIdentityInfo. Someone with that ability can change the login credentials of other users by changing their email address. This poses a security risk to your organization. They can change the email address of a user to the attacker's email address, and then reset the password through email. For more information, see Best Practices for Security Profiles in the Amazon Connect Administrator Guide. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // The first name. This is required if you are using Amazon Connect or SAML for identity management. Minimum length of 1. Maximum length of 100. + FirstName *string `json:"firstName,omitempty" tf:"first_name,omitempty"` + + // The last name. This is required if you are using Amazon Connect or SAML for identity management. Minimum length of 1. Maximum length of 100. + LastName *string `json:"lastName,omitempty" tf:"last_name,omitempty"` +} + +type IdentityInfoParameters struct { + + // The email address. If you are using SAML for identity management and include this parameter, an error is returned. Note that updates to the email is supported. From the UpdateUserIdentityInfo API documentation it is strongly recommended to limit who has the ability to invoke UpdateUserIdentityInfo. Someone with that ability can change the login credentials of other users by changing their email address. This poses a security risk to your organization. They can change the email address of a user to the attacker's email address, and then reset the password through email. For more information, see Best Practices for Security Profiles in the Amazon Connect Administrator Guide. + // +kubebuilder:validation:Optional + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // The first name. This is required if you are using Amazon Connect or SAML for identity management. Minimum length of 1. Maximum length of 100. + // +kubebuilder:validation:Optional + FirstName *string `json:"firstName,omitempty" tf:"first_name,omitempty"` + + // The last name. This is required if you are using Amazon Connect or SAML for identity management. Minimum length of 1. Maximum length of 100. + // +kubebuilder:validation:Optional + LastName *string `json:"lastName,omitempty" tf:"last_name,omitempty"` +} + +type UserInitParameters struct { + + // The identifier of the user account in the directory used for identity management. If Amazon Connect cannot access the directory, you can specify this identifier to authenticate users. If you include the identifier, we assume that Amazon Connect cannot access the directory. Otherwise, the identity information is used to authenticate users from your directory. This parameter is required if you are using an existing directory for identity management in Amazon Connect when Amazon Connect cannot access your directory to authenticate users. If you are using SAML for identity management and include this parameter, an error is returned. + DirectoryUserID *string `json:"directoryUserId,omitempty" tf:"directory_user_id,omitempty"` + + // The identifier of the hierarchy group for the user. + HierarchyGroupID *string `json:"hierarchyGroupId,omitempty" tf:"hierarchy_group_id,omitempty"` + + // A block that contains information about the identity of the user. Documented below. + IdentityInfo *IdentityInfoInitParameters `json:"identityInfo,omitempty" tf:"identity_info,omitempty"` + + // Specifies the identifier of the hosting Amazon Connect Instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta1.Instance + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // Reference to a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDRef *v1.Reference `json:"instanceIdRef,omitempty" tf:"-"` + + // Selector for a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDSelector *v1.Selector `json:"instanceIdSelector,omitempty" tf:"-"` + + // The user name for the account. For instances not using SAML for identity management, the user name can include up to 20 characters. If you are using SAML for identity management, the user name can include up to 64 characters from [a-zA-Z0-9_-.\@]+. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The password for the user account. A password is required if you are using Amazon Connect for identity management. Otherwise, it is an error to include a password. + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A block that contains information about the phone settings for the user. Documented below. + PhoneConfig *UserPhoneConfigInitParameters `json:"phoneConfig,omitempty" tf:"phone_config,omitempty"` + + // The identifier of the routing profile for the user. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta2.RoutingProfile + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("routing_profile_id",true) + RoutingProfileID *string `json:"routingProfileId,omitempty" tf:"routing_profile_id,omitempty"` + + // Reference to a RoutingProfile in connect to populate routingProfileId. + // +kubebuilder:validation:Optional + RoutingProfileIDRef *v1.Reference `json:"routingProfileIdRef,omitempty" tf:"-"` + + // Selector for a RoutingProfile in connect to populate routingProfileId. + // +kubebuilder:validation:Optional + RoutingProfileIDSelector *v1.Selector `json:"routingProfileIdSelector,omitempty" tf:"-"` + + // A list of identifiers for the security profiles for the user. Specify a minimum of 1 and maximum of 10 security profile ids. For more information, see Best Practices for Security Profiles in the Amazon Connect Administrator Guide. + // +listType=set + SecurityProfileIds []*string `json:"securityProfileIds,omitempty" tf:"security_profile_ids,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type UserObservation struct { + + // The Amazon Resource Name (ARN) of the user. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The identifier of the user account in the directory used for identity management. If Amazon Connect cannot access the directory, you can specify this identifier to authenticate users. If you include the identifier, we assume that Amazon Connect cannot access the directory. Otherwise, the identity information is used to authenticate users from your directory. This parameter is required if you are using an existing directory for identity management in Amazon Connect when Amazon Connect cannot access your directory to authenticate users. If you are using SAML for identity management and include this parameter, an error is returned. + DirectoryUserID *string `json:"directoryUserId,omitempty" tf:"directory_user_id,omitempty"` + + // The identifier of the hierarchy group for the user. + HierarchyGroupID *string `json:"hierarchyGroupId,omitempty" tf:"hierarchy_group_id,omitempty"` + + // The identifier of the hosting Amazon Connect Instance and identifier of the user + // separated by a colon (:). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A block that contains information about the identity of the user. Documented below. + IdentityInfo *IdentityInfoObservation `json:"identityInfo,omitempty" tf:"identity_info,omitempty"` + + // Specifies the identifier of the hosting Amazon Connect Instance. + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // The user name for the account. For instances not using SAML for identity management, the user name can include up to 20 characters. If you are using SAML for identity management, the user name can include up to 64 characters from [a-zA-Z0-9_-.\@]+. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A block that contains information about the phone settings for the user. Documented below. + PhoneConfig *UserPhoneConfigObservation `json:"phoneConfig,omitempty" tf:"phone_config,omitempty"` + + // The identifier of the routing profile for the user. + RoutingProfileID *string `json:"routingProfileId,omitempty" tf:"routing_profile_id,omitempty"` + + // A list of identifiers for the security profiles for the user. Specify a minimum of 1 and maximum of 10 security profile ids. For more information, see Best Practices for Security Profiles in the Amazon Connect Administrator Guide. + // +listType=set + SecurityProfileIds []*string `json:"securityProfileIds,omitempty" tf:"security_profile_ids,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The identifier for the user. + UserID *string `json:"userId,omitempty" tf:"user_id,omitempty"` +} + +type UserParameters struct { + + // The identifier of the user account in the directory used for identity management. If Amazon Connect cannot access the directory, you can specify this identifier to authenticate users. If you include the identifier, we assume that Amazon Connect cannot access the directory. Otherwise, the identity information is used to authenticate users from your directory. This parameter is required if you are using an existing directory for identity management in Amazon Connect when Amazon Connect cannot access your directory to authenticate users. If you are using SAML for identity management and include this parameter, an error is returned. + // +kubebuilder:validation:Optional + DirectoryUserID *string `json:"directoryUserId,omitempty" tf:"directory_user_id,omitempty"` + + // The identifier of the hierarchy group for the user. + // +kubebuilder:validation:Optional + HierarchyGroupID *string `json:"hierarchyGroupId,omitempty" tf:"hierarchy_group_id,omitempty"` + + // A block that contains information about the identity of the user. Documented below. + // +kubebuilder:validation:Optional + IdentityInfo *IdentityInfoParameters `json:"identityInfo,omitempty" tf:"identity_info,omitempty"` + + // Specifies the identifier of the hosting Amazon Connect Instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta1.Instance + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // Reference to a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDRef *v1.Reference `json:"instanceIdRef,omitempty" tf:"-"` + + // Selector for a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDSelector *v1.Selector `json:"instanceIdSelector,omitempty" tf:"-"` + + // The user name for the account. For instances not using SAML for identity management, the user name can include up to 20 characters. If you are using SAML for identity management, the user name can include up to 64 characters from [a-zA-Z0-9_-.\@]+. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The password for the user account. A password is required if you are using Amazon Connect for identity management. Otherwise, it is an error to include a password. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A block that contains information about the phone settings for the user. Documented below. + // +kubebuilder:validation:Optional + PhoneConfig *UserPhoneConfigParameters `json:"phoneConfig,omitempty" tf:"phone_config,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The identifier of the routing profile for the user. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta2.RoutingProfile + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("routing_profile_id",true) + // +kubebuilder:validation:Optional + RoutingProfileID *string `json:"routingProfileId,omitempty" tf:"routing_profile_id,omitempty"` + + // Reference to a RoutingProfile in connect to populate routingProfileId. + // +kubebuilder:validation:Optional + RoutingProfileIDRef *v1.Reference `json:"routingProfileIdRef,omitempty" tf:"-"` + + // Selector for a RoutingProfile in connect to populate routingProfileId. + // +kubebuilder:validation:Optional + RoutingProfileIDSelector *v1.Selector `json:"routingProfileIdSelector,omitempty" tf:"-"` + + // A list of identifiers for the security profiles for the user. Specify a minimum of 1 and maximum of 10 security profile ids. For more information, see Best Practices for Security Profiles in the Amazon Connect Administrator Guide. + // +kubebuilder:validation:Optional + // +listType=set + SecurityProfileIds []*string `json:"securityProfileIds,omitempty" tf:"security_profile_ids,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type UserPhoneConfigInitParameters struct { + + // The After Call Work (ACW) timeout setting, in seconds. Minimum value of 0. + AfterContactWorkTimeLimit *float64 `json:"afterContactWorkTimeLimit,omitempty" tf:"after_contact_work_time_limit,omitempty"` + + // When Auto-Accept Call is enabled for an available agent, the agent connects to contacts automatically. + AutoAccept *bool `json:"autoAccept,omitempty" tf:"auto_accept,omitempty"` + + // The phone number for the user's desk phone. Required if phone_type is set as DESK_PHONE. + DeskPhoneNumber *string `json:"deskPhoneNumber,omitempty" tf:"desk_phone_number,omitempty"` + + // The phone type. Valid values are DESK_PHONE and SOFT_PHONE. + PhoneType *string `json:"phoneType,omitempty" tf:"phone_type,omitempty"` +} + +type UserPhoneConfigObservation struct { + + // The After Call Work (ACW) timeout setting, in seconds. Minimum value of 0. + AfterContactWorkTimeLimit *float64 `json:"afterContactWorkTimeLimit,omitempty" tf:"after_contact_work_time_limit,omitempty"` + + // When Auto-Accept Call is enabled for an available agent, the agent connects to contacts automatically. + AutoAccept *bool `json:"autoAccept,omitempty" tf:"auto_accept,omitempty"` + + // The phone number for the user's desk phone. Required if phone_type is set as DESK_PHONE. + DeskPhoneNumber *string `json:"deskPhoneNumber,omitempty" tf:"desk_phone_number,omitempty"` + + // The phone type. Valid values are DESK_PHONE and SOFT_PHONE. + PhoneType *string `json:"phoneType,omitempty" tf:"phone_type,omitempty"` +} + +type UserPhoneConfigParameters struct { + + // The After Call Work (ACW) timeout setting, in seconds. Minimum value of 0. + // +kubebuilder:validation:Optional + AfterContactWorkTimeLimit *float64 `json:"afterContactWorkTimeLimit,omitempty" tf:"after_contact_work_time_limit,omitempty"` + + // When Auto-Accept Call is enabled for an available agent, the agent connects to contacts automatically. + // +kubebuilder:validation:Optional + AutoAccept *bool `json:"autoAccept,omitempty" tf:"auto_accept,omitempty"` + + // The phone number for the user's desk phone. Required if phone_type is set as DESK_PHONE. + // +kubebuilder:validation:Optional + DeskPhoneNumber *string `json:"deskPhoneNumber,omitempty" tf:"desk_phone_number,omitempty"` + + // The phone type. Valid values are DESK_PHONE and SOFT_PHONE. + // +kubebuilder:validation:Optional + PhoneType *string `json:"phoneType" tf:"phone_type,omitempty"` +} + +// UserSpec defines the desired state of User +type UserSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider UserParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider UserInitParameters `json:"initProvider,omitempty"` +} + +// UserStatus defines the observed state of User. +type UserStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider UserObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// User is the Schema for the Users API. Provides details about a specific Amazon Connect User +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type User struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.phoneConfig) || (has(self.initProvider) && has(self.initProvider.phoneConfig))",message="spec.forProvider.phoneConfig is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.securityProfileIds) || (has(self.initProvider) && has(self.initProvider.securityProfileIds))",message="spec.forProvider.securityProfileIds is a required parameter" + Spec UserSpec `json:"spec"` + Status UserStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// UserList contains a list of Users +type UserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []User `json:"items"` +} + +// Repository type metadata. +var ( + User_Kind = "User" + User_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: User_Kind}.String() + User_KindAPIVersion = User_Kind + "." + CRDGroupVersion.String() + User_GroupVersionKind = CRDGroupVersion.WithKind(User_Kind) +) + +func init() { + SchemeBuilder.Register(&User{}, &UserList{}) +} diff --git a/apis/connect/v1beta2/zz_userhierarchystructure_terraformed.go b/apis/connect/v1beta2/zz_userhierarchystructure_terraformed.go new file mode 100755 index 0000000000..61c487e677 --- /dev/null +++ b/apis/connect/v1beta2/zz_userhierarchystructure_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this UserHierarchyStructure +func (mg *UserHierarchyStructure) GetTerraformResourceType() string { + return "aws_connect_user_hierarchy_structure" +} + +// GetConnectionDetailsMapping for this UserHierarchyStructure +func (tr *UserHierarchyStructure) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this UserHierarchyStructure +func (tr *UserHierarchyStructure) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this UserHierarchyStructure +func (tr *UserHierarchyStructure) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this UserHierarchyStructure +func (tr *UserHierarchyStructure) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this UserHierarchyStructure +func (tr *UserHierarchyStructure) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this UserHierarchyStructure +func (tr *UserHierarchyStructure) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this UserHierarchyStructure +func (tr *UserHierarchyStructure) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this UserHierarchyStructure +func (tr *UserHierarchyStructure) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this UserHierarchyStructure using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *UserHierarchyStructure) LateInitialize(attrs []byte) (bool, error) { + params := &UserHierarchyStructureParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *UserHierarchyStructure) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/connect/v1beta2/zz_userhierarchystructure_types.go b/apis/connect/v1beta2/zz_userhierarchystructure_types.go new file mode 100755 index 0000000000..95f430ac0e --- /dev/null +++ b/apis/connect/v1beta2/zz_userhierarchystructure_types.go @@ -0,0 +1,316 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type HierarchyStructureInitParameters struct { + + // A block that defines the details of level five. The level block is documented below. + LevelFive *LevelFiveInitParameters `json:"levelFive,omitempty" tf:"level_five,omitempty"` + + // A block that defines the details of level four. The level block is documented below. + LevelFour *LevelFourInitParameters `json:"levelFour,omitempty" tf:"level_four,omitempty"` + + // A block that defines the details of level one. The level block is documented below. + LevelOne *LevelOneInitParameters `json:"levelOne,omitempty" tf:"level_one,omitempty"` + + // A block that defines the details of level three. The level block is documented below. + LevelThree *LevelThreeInitParameters `json:"levelThree,omitempty" tf:"level_three,omitempty"` + + // A block that defines the details of level two. The level block is documented below. + LevelTwo *LevelTwoInitParameters `json:"levelTwo,omitempty" tf:"level_two,omitempty"` +} + +type HierarchyStructureObservation struct { + + // A block that defines the details of level five. The level block is documented below. + LevelFive *LevelFiveObservation `json:"levelFive,omitempty" tf:"level_five,omitempty"` + + // A block that defines the details of level four. The level block is documented below. + LevelFour *LevelFourObservation `json:"levelFour,omitempty" tf:"level_four,omitempty"` + + // A block that defines the details of level one. The level block is documented below. + LevelOne *LevelOneObservation `json:"levelOne,omitempty" tf:"level_one,omitempty"` + + // A block that defines the details of level three. The level block is documented below. + LevelThree *LevelThreeObservation `json:"levelThree,omitempty" tf:"level_three,omitempty"` + + // A block that defines the details of level two. The level block is documented below. + LevelTwo *LevelTwoObservation `json:"levelTwo,omitempty" tf:"level_two,omitempty"` +} + +type HierarchyStructureParameters struct { + + // A block that defines the details of level five. The level block is documented below. + // +kubebuilder:validation:Optional + LevelFive *LevelFiveParameters `json:"levelFive,omitempty" tf:"level_five,omitempty"` + + // A block that defines the details of level four. The level block is documented below. + // +kubebuilder:validation:Optional + LevelFour *LevelFourParameters `json:"levelFour,omitempty" tf:"level_four,omitempty"` + + // A block that defines the details of level one. The level block is documented below. + // +kubebuilder:validation:Optional + LevelOne *LevelOneParameters `json:"levelOne,omitempty" tf:"level_one,omitempty"` + + // A block that defines the details of level three. The level block is documented below. + // +kubebuilder:validation:Optional + LevelThree *LevelThreeParameters `json:"levelThree,omitempty" tf:"level_three,omitempty"` + + // A block that defines the details of level two. The level block is documented below. + // +kubebuilder:validation:Optional + LevelTwo *LevelTwoParameters `json:"levelTwo,omitempty" tf:"level_two,omitempty"` +} + +type LevelFiveInitParameters struct { + + // The name of the user hierarchy level. Must not be more than 50 characters. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type LevelFiveObservation struct { + + // The Amazon Resource Name (ARN) of the hierarchy level. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The identifier of the hosting Amazon Connect Instance. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the user hierarchy level. Must not be more than 50 characters. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type LevelFiveParameters struct { + + // The name of the user hierarchy level. Must not be more than 50 characters. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type LevelFourInitParameters struct { + + // The name of the user hierarchy level. Must not be more than 50 characters. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type LevelFourObservation struct { + + // The Amazon Resource Name (ARN) of the hierarchy level. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The identifier of the hosting Amazon Connect Instance. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the user hierarchy level. Must not be more than 50 characters. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type LevelFourParameters struct { + + // The name of the user hierarchy level. Must not be more than 50 characters. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type LevelOneInitParameters struct { + + // The name of the user hierarchy level. Must not be more than 50 characters. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type LevelOneObservation struct { + + // The Amazon Resource Name (ARN) of the hierarchy level. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The identifier of the hosting Amazon Connect Instance. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the user hierarchy level. Must not be more than 50 characters. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type LevelOneParameters struct { + + // The name of the user hierarchy level. Must not be more than 50 characters. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type LevelThreeInitParameters struct { + + // The name of the user hierarchy level. Must not be more than 50 characters. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type LevelThreeObservation struct { + + // The Amazon Resource Name (ARN) of the hierarchy level. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The identifier of the hosting Amazon Connect Instance. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the user hierarchy level. Must not be more than 50 characters. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type LevelThreeParameters struct { + + // The name of the user hierarchy level. Must not be more than 50 characters. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type LevelTwoInitParameters struct { + + // The name of the user hierarchy level. Must not be more than 50 characters. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type LevelTwoObservation struct { + + // The Amazon Resource Name (ARN) of the hierarchy level. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The identifier of the hosting Amazon Connect Instance. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the user hierarchy level. Must not be more than 50 characters. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type LevelTwoParameters struct { + + // The name of the user hierarchy level. Must not be more than 50 characters. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type UserHierarchyStructureInitParameters struct { + + // A block that defines the hierarchy structure's levels. The hierarchy_structure block is documented below. + HierarchyStructure *HierarchyStructureInitParameters `json:"hierarchyStructure,omitempty" tf:"hierarchy_structure,omitempty"` + + // Specifies the identifier of the hosting Amazon Connect Instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta1.Instance + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // Reference to a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDRef *v1.Reference `json:"instanceIdRef,omitempty" tf:"-"` + + // Selector for a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDSelector *v1.Selector `json:"instanceIdSelector,omitempty" tf:"-"` +} + +type UserHierarchyStructureObservation struct { + + // A block that defines the hierarchy structure's levels. The hierarchy_structure block is documented below. + HierarchyStructure *HierarchyStructureObservation `json:"hierarchyStructure,omitempty" tf:"hierarchy_structure,omitempty"` + + // The identifier of the hosting Amazon Connect Instance. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the identifier of the hosting Amazon Connect Instance. + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` +} + +type UserHierarchyStructureParameters struct { + + // A block that defines the hierarchy structure's levels. The hierarchy_structure block is documented below. + // +kubebuilder:validation:Optional + HierarchyStructure *HierarchyStructureParameters `json:"hierarchyStructure,omitempty" tf:"hierarchy_structure,omitempty"` + + // Specifies the identifier of the hosting Amazon Connect Instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta1.Instance + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // Reference to a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDRef *v1.Reference `json:"instanceIdRef,omitempty" tf:"-"` + + // Selector for a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDSelector *v1.Selector `json:"instanceIdSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +// UserHierarchyStructureSpec defines the desired state of UserHierarchyStructure +type UserHierarchyStructureSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider UserHierarchyStructureParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider UserHierarchyStructureInitParameters `json:"initProvider,omitempty"` +} + +// UserHierarchyStructureStatus defines the observed state of UserHierarchyStructure. +type UserHierarchyStructureStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider UserHierarchyStructureObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// UserHierarchyStructure is the Schema for the UserHierarchyStructures API. Provides details about a specific Amazon Connect User Hierarchy Structure +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type UserHierarchyStructure struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.hierarchyStructure) || (has(self.initProvider) && has(self.initProvider.hierarchyStructure))",message="spec.forProvider.hierarchyStructure is a required parameter" + Spec UserHierarchyStructureSpec `json:"spec"` + Status UserHierarchyStructureStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// UserHierarchyStructureList contains a list of UserHierarchyStructures +type UserHierarchyStructureList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []UserHierarchyStructure `json:"items"` +} + +// Repository type metadata. +var ( + UserHierarchyStructure_Kind = "UserHierarchyStructure" + UserHierarchyStructure_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: UserHierarchyStructure_Kind}.String() + UserHierarchyStructure_KindAPIVersion = UserHierarchyStructure_Kind + "." + CRDGroupVersion.String() + UserHierarchyStructure_GroupVersionKind = CRDGroupVersion.WithKind(UserHierarchyStructure_Kind) +) + +func init() { + SchemeBuilder.Register(&UserHierarchyStructure{}, &UserHierarchyStructureList{}) +} diff --git a/apis/connect/v1beta3/zz_generated.conversion_hubs.go b/apis/connect/v1beta3/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..92e9b9eb4d --- /dev/null +++ b/apis/connect/v1beta3/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta3 + +// Hub marks this type as a conversion hub. +func (tr *HoursOfOperation) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Queue) Hub() {} diff --git a/apis/connect/v1beta3/zz_generated.deepcopy.go b/apis/connect/v1beta3/zz_generated.deepcopy.go new file mode 100644 index 0000000000..adc454fa4c --- /dev/null +++ b/apis/connect/v1beta3/zz_generated.deepcopy.go @@ -0,0 +1,1069 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta3 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigInitParameters) DeepCopyInto(out *ConfigInitParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(EndTimeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(StartTimeInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigInitParameters. +func (in *ConfigInitParameters) DeepCopy() *ConfigInitParameters { + if in == nil { + return nil + } + out := new(ConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigObservation) DeepCopyInto(out *ConfigObservation) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(EndTimeObservation) + (*in).DeepCopyInto(*out) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(StartTimeObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigObservation. +func (in *ConfigObservation) DeepCopy() *ConfigObservation { + if in == nil { + return nil + } + out := new(ConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigParameters) DeepCopyInto(out *ConfigParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(EndTimeParameters) + (*in).DeepCopyInto(*out) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(StartTimeParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigParameters. +func (in *ConfigParameters) DeepCopy() *ConfigParameters { + if in == nil { + return nil + } + out := new(ConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndTimeInitParameters) DeepCopyInto(out *EndTimeInitParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndTimeInitParameters. +func (in *EndTimeInitParameters) DeepCopy() *EndTimeInitParameters { + if in == nil { + return nil + } + out := new(EndTimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndTimeObservation) DeepCopyInto(out *EndTimeObservation) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndTimeObservation. +func (in *EndTimeObservation) DeepCopy() *EndTimeObservation { + if in == nil { + return nil + } + out := new(EndTimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndTimeParameters) DeepCopyInto(out *EndTimeParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndTimeParameters. +func (in *EndTimeParameters) DeepCopy() *EndTimeParameters { + if in == nil { + return nil + } + out := new(EndTimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HoursOfOperation) DeepCopyInto(out *HoursOfOperation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HoursOfOperation. +func (in *HoursOfOperation) DeepCopy() *HoursOfOperation { + if in == nil { + return nil + } + out := new(HoursOfOperation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HoursOfOperation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HoursOfOperationInitParameters) DeepCopyInto(out *HoursOfOperationInitParameters) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]ConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceIDRef != nil { + in, out := &in.InstanceIDRef, &out.InstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceIDSelector != nil { + in, out := &in.InstanceIDSelector, &out.InstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HoursOfOperationInitParameters. +func (in *HoursOfOperationInitParameters) DeepCopy() *HoursOfOperationInitParameters { + if in == nil { + return nil + } + out := new(HoursOfOperationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HoursOfOperationList) DeepCopyInto(out *HoursOfOperationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HoursOfOperation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HoursOfOperationList. +func (in *HoursOfOperationList) DeepCopy() *HoursOfOperationList { + if in == nil { + return nil + } + out := new(HoursOfOperationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HoursOfOperationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HoursOfOperationObservation) DeepCopyInto(out *HoursOfOperationObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]ConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.HoursOfOperationID != nil { + in, out := &in.HoursOfOperationID, &out.HoursOfOperationID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HoursOfOperationObservation. +func (in *HoursOfOperationObservation) DeepCopy() *HoursOfOperationObservation { + if in == nil { + return nil + } + out := new(HoursOfOperationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HoursOfOperationParameters) DeepCopyInto(out *HoursOfOperationParameters) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]ConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceIDRef != nil { + in, out := &in.InstanceIDRef, &out.InstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceIDSelector != nil { + in, out := &in.InstanceIDSelector, &out.InstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HoursOfOperationParameters. +func (in *HoursOfOperationParameters) DeepCopy() *HoursOfOperationParameters { + if in == nil { + return nil + } + out := new(HoursOfOperationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HoursOfOperationSpec) DeepCopyInto(out *HoursOfOperationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HoursOfOperationSpec. +func (in *HoursOfOperationSpec) DeepCopy() *HoursOfOperationSpec { + if in == nil { + return nil + } + out := new(HoursOfOperationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HoursOfOperationStatus) DeepCopyInto(out *HoursOfOperationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HoursOfOperationStatus. +func (in *HoursOfOperationStatus) DeepCopy() *HoursOfOperationStatus { + if in == nil { + return nil + } + out := new(HoursOfOperationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutboundCallerConfigInitParameters) DeepCopyInto(out *OutboundCallerConfigInitParameters) { + *out = *in + if in.OutboundCallerIDName != nil { + in, out := &in.OutboundCallerIDName, &out.OutboundCallerIDName + *out = new(string) + **out = **in + } + if in.OutboundCallerIDNumberID != nil { + in, out := &in.OutboundCallerIDNumberID, &out.OutboundCallerIDNumberID + *out = new(string) + **out = **in + } + if in.OutboundFlowID != nil { + in, out := &in.OutboundFlowID, &out.OutboundFlowID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutboundCallerConfigInitParameters. +func (in *OutboundCallerConfigInitParameters) DeepCopy() *OutboundCallerConfigInitParameters { + if in == nil { + return nil + } + out := new(OutboundCallerConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutboundCallerConfigObservation) DeepCopyInto(out *OutboundCallerConfigObservation) { + *out = *in + if in.OutboundCallerIDName != nil { + in, out := &in.OutboundCallerIDName, &out.OutboundCallerIDName + *out = new(string) + **out = **in + } + if in.OutboundCallerIDNumberID != nil { + in, out := &in.OutboundCallerIDNumberID, &out.OutboundCallerIDNumberID + *out = new(string) + **out = **in + } + if in.OutboundFlowID != nil { + in, out := &in.OutboundFlowID, &out.OutboundFlowID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutboundCallerConfigObservation. +func (in *OutboundCallerConfigObservation) DeepCopy() *OutboundCallerConfigObservation { + if in == nil { + return nil + } + out := new(OutboundCallerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutboundCallerConfigParameters) DeepCopyInto(out *OutboundCallerConfigParameters) { + *out = *in + if in.OutboundCallerIDName != nil { + in, out := &in.OutboundCallerIDName, &out.OutboundCallerIDName + *out = new(string) + **out = **in + } + if in.OutboundCallerIDNumberID != nil { + in, out := &in.OutboundCallerIDNumberID, &out.OutboundCallerIDNumberID + *out = new(string) + **out = **in + } + if in.OutboundFlowID != nil { + in, out := &in.OutboundFlowID, &out.OutboundFlowID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutboundCallerConfigParameters. +func (in *OutboundCallerConfigParameters) DeepCopy() *OutboundCallerConfigParameters { + if in == nil { + return nil + } + out := new(OutboundCallerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Queue) DeepCopyInto(out *Queue) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Queue. +func (in *Queue) DeepCopy() *Queue { + if in == nil { + return nil + } + out := new(Queue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Queue) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueInitParameters) DeepCopyInto(out *QueueInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.HoursOfOperationID != nil { + in, out := &in.HoursOfOperationID, &out.HoursOfOperationID + *out = new(string) + **out = **in + } + if in.HoursOfOperationIDRef != nil { + in, out := &in.HoursOfOperationIDRef, &out.HoursOfOperationIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.HoursOfOperationIDSelector != nil { + in, out := &in.HoursOfOperationIDSelector, &out.HoursOfOperationIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceIDRef != nil { + in, out := &in.InstanceIDRef, &out.InstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceIDSelector != nil { + in, out := &in.InstanceIDSelector, &out.InstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MaxContacts != nil { + in, out := &in.MaxContacts, &out.MaxContacts + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutboundCallerConfig != nil { + in, out := &in.OutboundCallerConfig, &out.OutboundCallerConfig + *out = new(OutboundCallerConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.QuickConnectIds != nil { + in, out := &in.QuickConnectIds, &out.QuickConnectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueInitParameters. +func (in *QueueInitParameters) DeepCopy() *QueueInitParameters { + if in == nil { + return nil + } + out := new(QueueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueList) DeepCopyInto(out *QueueList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Queue, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueList. +func (in *QueueList) DeepCopy() *QueueList { + if in == nil { + return nil + } + out := new(QueueList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *QueueList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueObservation) DeepCopyInto(out *QueueObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.HoursOfOperationID != nil { + in, out := &in.HoursOfOperationID, &out.HoursOfOperationID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.MaxContacts != nil { + in, out := &in.MaxContacts, &out.MaxContacts + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutboundCallerConfig != nil { + in, out := &in.OutboundCallerConfig, &out.OutboundCallerConfig + *out = new(OutboundCallerConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } + if in.QuickConnectIds != nil { + in, out := &in.QuickConnectIds, &out.QuickConnectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueObservation. +func (in *QueueObservation) DeepCopy() *QueueObservation { + if in == nil { + return nil + } + out := new(QueueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueParameters) DeepCopyInto(out *QueueParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.HoursOfOperationID != nil { + in, out := &in.HoursOfOperationID, &out.HoursOfOperationID + *out = new(string) + **out = **in + } + if in.HoursOfOperationIDRef != nil { + in, out := &in.HoursOfOperationIDRef, &out.HoursOfOperationIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.HoursOfOperationIDSelector != nil { + in, out := &in.HoursOfOperationIDSelector, &out.HoursOfOperationIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceIDRef != nil { + in, out := &in.InstanceIDRef, &out.InstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceIDSelector != nil { + in, out := &in.InstanceIDSelector, &out.InstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MaxContacts != nil { + in, out := &in.MaxContacts, &out.MaxContacts + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutboundCallerConfig != nil { + in, out := &in.OutboundCallerConfig, &out.OutboundCallerConfig + *out = new(OutboundCallerConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.QuickConnectIds != nil { + in, out := &in.QuickConnectIds, &out.QuickConnectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueParameters. +func (in *QueueParameters) DeepCopy() *QueueParameters { + if in == nil { + return nil + } + out := new(QueueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueSpec) DeepCopyInto(out *QueueSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueSpec. +func (in *QueueSpec) DeepCopy() *QueueSpec { + if in == nil { + return nil + } + out := new(QueueSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueStatus) DeepCopyInto(out *QueueStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueStatus. +func (in *QueueStatus) DeepCopy() *QueueStatus { + if in == nil { + return nil + } + out := new(QueueStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartTimeInitParameters) DeepCopyInto(out *StartTimeInitParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartTimeInitParameters. +func (in *StartTimeInitParameters) DeepCopy() *StartTimeInitParameters { + if in == nil { + return nil + } + out := new(StartTimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartTimeObservation) DeepCopyInto(out *StartTimeObservation) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartTimeObservation. +func (in *StartTimeObservation) DeepCopy() *StartTimeObservation { + if in == nil { + return nil + } + out := new(StartTimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartTimeParameters) DeepCopyInto(out *StartTimeParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartTimeParameters. +func (in *StartTimeParameters) DeepCopy() *StartTimeParameters { + if in == nil { + return nil + } + out := new(StartTimeParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/connect/v1beta3/zz_generated.managed.go b/apis/connect/v1beta3/zz_generated.managed.go new file mode 100644 index 0000000000..c93299f103 --- /dev/null +++ b/apis/connect/v1beta3/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta3 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this HoursOfOperation. +func (mg *HoursOfOperation) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this HoursOfOperation. +func (mg *HoursOfOperation) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this HoursOfOperation. +func (mg *HoursOfOperation) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this HoursOfOperation. +func (mg *HoursOfOperation) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this HoursOfOperation. +func (mg *HoursOfOperation) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this HoursOfOperation. +func (mg *HoursOfOperation) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this HoursOfOperation. +func (mg *HoursOfOperation) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this HoursOfOperation. +func (mg *HoursOfOperation) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this HoursOfOperation. +func (mg *HoursOfOperation) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this HoursOfOperation. +func (mg *HoursOfOperation) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this HoursOfOperation. +func (mg *HoursOfOperation) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this HoursOfOperation. +func (mg *HoursOfOperation) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Queue. +func (mg *Queue) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Queue. +func (mg *Queue) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Queue. +func (mg *Queue) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Queue. +func (mg *Queue) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Queue. +func (mg *Queue) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Queue. +func (mg *Queue) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Queue. +func (mg *Queue) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Queue. +func (mg *Queue) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Queue. +func (mg *Queue) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Queue. +func (mg *Queue) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Queue. +func (mg *Queue) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Queue. +func (mg *Queue) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/connect/v1beta3/zz_generated.managedlist.go b/apis/connect/v1beta3/zz_generated.managedlist.go new file mode 100644 index 0000000000..dd43056c5d --- /dev/null +++ b/apis/connect/v1beta3/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta3 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this HoursOfOperationList. +func (l *HoursOfOperationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this QueueList. +func (l *QueueList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/connect/v1beta3/zz_generated.resolvers.go b/apis/connect/v1beta3/zz_generated.resolvers.go new file mode 100644 index 0000000000..ba13bf17ec --- /dev/null +++ b/apis/connect/v1beta3/zz_generated.resolvers.go @@ -0,0 +1,156 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta3 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *HoursOfOperation) ResolveReferences( // ResolveReferences of this HoursOfOperation. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InstanceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.InstanceIDRef, + Selector: mg.Spec.ForProvider.InstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InstanceID") + } + mg.Spec.ForProvider.InstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InstanceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.InstanceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.InstanceIDRef, + Selector: mg.Spec.InitProvider.InstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InstanceID") + } + mg.Spec.InitProvider.InstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.InstanceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Queue. +func (mg *Queue) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta3", "HoursOfOperation", "HoursOfOperationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.HoursOfOperationID), + Extract: resource.ExtractParamPath("hours_of_operation_id", true), + Reference: mg.Spec.ForProvider.HoursOfOperationIDRef, + Selector: mg.Spec.ForProvider.HoursOfOperationIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.HoursOfOperationID") + } + mg.Spec.ForProvider.HoursOfOperationID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.HoursOfOperationIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InstanceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.InstanceIDRef, + Selector: mg.Spec.ForProvider.InstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InstanceID") + } + mg.Spec.ForProvider.InstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InstanceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta3", "HoursOfOperation", "HoursOfOperationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.HoursOfOperationID), + Extract: resource.ExtractParamPath("hours_of_operation_id", true), + Reference: mg.Spec.InitProvider.HoursOfOperationIDRef, + Selector: mg.Spec.InitProvider.HoursOfOperationIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.HoursOfOperationID") + } + mg.Spec.InitProvider.HoursOfOperationID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.HoursOfOperationIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("connect.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.InstanceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.InstanceIDRef, + Selector: mg.Spec.InitProvider.InstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InstanceID") + } + mg.Spec.InitProvider.InstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.InstanceIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/connect/v1beta3/zz_groupversion_info.go b/apis/connect/v1beta3/zz_groupversion_info.go new file mode 100755 index 0000000000..6b928795fb --- /dev/null +++ b/apis/connect/v1beta3/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=connect.aws.upbound.io +// +versionName=v1beta3 +package v1beta3 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "connect.aws.upbound.io" + CRDVersion = "v1beta3" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/connect/v1beta3/zz_hoursofoperation_terraformed.go b/apis/connect/v1beta3/zz_hoursofoperation_terraformed.go new file mode 100755 index 0000000000..384754830a --- /dev/null +++ b/apis/connect/v1beta3/zz_hoursofoperation_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta3 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this HoursOfOperation +func (mg *HoursOfOperation) GetTerraformResourceType() string { + return "aws_connect_hours_of_operation" +} + +// GetConnectionDetailsMapping for this HoursOfOperation +func (tr *HoursOfOperation) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this HoursOfOperation +func (tr *HoursOfOperation) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this HoursOfOperation +func (tr *HoursOfOperation) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this HoursOfOperation +func (tr *HoursOfOperation) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this HoursOfOperation +func (tr *HoursOfOperation) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this HoursOfOperation +func (tr *HoursOfOperation) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this HoursOfOperation +func (tr *HoursOfOperation) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this HoursOfOperation +func (tr *HoursOfOperation) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this HoursOfOperation using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *HoursOfOperation) LateInitialize(attrs []byte) (bool, error) { + params := &HoursOfOperationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *HoursOfOperation) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/connect/v1beta3/zz_hoursofoperation_types.go b/apis/connect/v1beta3/zz_hoursofoperation_types.go new file mode 100755 index 0000000000..bc811f9b42 --- /dev/null +++ b/apis/connect/v1beta3/zz_hoursofoperation_types.go @@ -0,0 +1,284 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConfigInitParameters struct { + + // Specifies the day that the hours of operation applies to. + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + // A end time block specifies the time that your contact center closes. The end_time is documented below. + EndTime *EndTimeInitParameters `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // A start time block specifies the time that your contact center opens. The start_time is documented below. + StartTime *StartTimeInitParameters `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type ConfigObservation struct { + + // Specifies the day that the hours of operation applies to. + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + // A end time block specifies the time that your contact center closes. The end_time is documented below. + EndTime *EndTimeObservation `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // A start time block specifies the time that your contact center opens. The start_time is documented below. + StartTime *StartTimeObservation `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type ConfigParameters struct { + + // Specifies the day that the hours of operation applies to. + // +kubebuilder:validation:Optional + Day *string `json:"day" tf:"day,omitempty"` + + // A end time block specifies the time that your contact center closes. The end_time is documented below. + // +kubebuilder:validation:Optional + EndTime *EndTimeParameters `json:"endTime" tf:"end_time,omitempty"` + + // A start time block specifies the time that your contact center opens. The start_time is documented below. + // +kubebuilder:validation:Optional + StartTime *StartTimeParameters `json:"startTime" tf:"start_time,omitempty"` +} + +type EndTimeInitParameters struct { + + // Specifies the hour of closing. + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // Specifies the minute of closing. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` +} + +type EndTimeObservation struct { + + // Specifies the hour of closing. + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // Specifies the minute of closing. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` +} + +type EndTimeParameters struct { + + // Specifies the hour of closing. + // +kubebuilder:validation:Optional + Hours *float64 `json:"hours" tf:"hours,omitempty"` + + // Specifies the minute of closing. + // +kubebuilder:validation:Optional + Minutes *float64 `json:"minutes" tf:"minutes,omitempty"` +} + +type HoursOfOperationInitParameters struct { + + // One or more config blocks which define the configuration information for the hours of operation: day, start time, and end time . Config blocks are documented below. + Config []ConfigInitParameters `json:"config,omitempty" tf:"config,omitempty"` + + // Specifies the description of the Hours of Operation. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the identifier of the hosting Amazon Connect Instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta1.Instance + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // Reference to a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDRef *v1.Reference `json:"instanceIdRef,omitempty" tf:"-"` + + // Selector for a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDSelector *v1.Selector `json:"instanceIdSelector,omitempty" tf:"-"` + + // Specifies the name of the Hours of Operation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the time zone of the Hours of Operation. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type HoursOfOperationObservation struct { + + // The Amazon Resource Name (ARN) of the Hours of Operation. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // One or more config blocks which define the configuration information for the hours of operation: day, start time, and end time . Config blocks are documented below. + Config []ConfigObservation `json:"config,omitempty" tf:"config,omitempty"` + + // Specifies the description of the Hours of Operation. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The identifier for the hours of operation. + HoursOfOperationID *string `json:"hoursOfOperationId,omitempty" tf:"hours_of_operation_id,omitempty"` + + // The identifier of the hosting Amazon Connect Instance and identifier of the Hours of Operation separated by a colon (:). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the identifier of the hosting Amazon Connect Instance. + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // Specifies the name of the Hours of Operation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Specifies the time zone of the Hours of Operation. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type HoursOfOperationParameters struct { + + // One or more config blocks which define the configuration information for the hours of operation: day, start time, and end time . Config blocks are documented below. + // +kubebuilder:validation:Optional + Config []ConfigParameters `json:"config,omitempty" tf:"config,omitempty"` + + // Specifies the description of the Hours of Operation. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the identifier of the hosting Amazon Connect Instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta1.Instance + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // Reference to a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDRef *v1.Reference `json:"instanceIdRef,omitempty" tf:"-"` + + // Selector for a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDSelector *v1.Selector `json:"instanceIdSelector,omitempty" tf:"-"` + + // Specifies the name of the Hours of Operation. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the time zone of the Hours of Operation. + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type StartTimeInitParameters struct { + + // Specifies the hour of opening. + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // Specifies the minute of opening. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` +} + +type StartTimeObservation struct { + + // Specifies the hour of opening. + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // Specifies the minute of opening. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` +} + +type StartTimeParameters struct { + + // Specifies the hour of opening. + // +kubebuilder:validation:Optional + Hours *float64 `json:"hours" tf:"hours,omitempty"` + + // Specifies the minute of opening. + // +kubebuilder:validation:Optional + Minutes *float64 `json:"minutes" tf:"minutes,omitempty"` +} + +// HoursOfOperationSpec defines the desired state of HoursOfOperation +type HoursOfOperationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider HoursOfOperationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider HoursOfOperationInitParameters `json:"initProvider,omitempty"` +} + +// HoursOfOperationStatus defines the observed state of HoursOfOperation. +type HoursOfOperationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider HoursOfOperationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// HoursOfOperation is the Schema for the HoursOfOperations API. Provides details about a specific Amazon Connect Hours of Operation. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type HoursOfOperation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.config) || (has(self.initProvider) && has(self.initProvider.config))",message="spec.forProvider.config is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.timeZone) || (has(self.initProvider) && has(self.initProvider.timeZone))",message="spec.forProvider.timeZone is a required parameter" + Spec HoursOfOperationSpec `json:"spec"` + Status HoursOfOperationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HoursOfOperationList contains a list of HoursOfOperations +type HoursOfOperationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HoursOfOperation `json:"items"` +} + +// Repository type metadata. +var ( + HoursOfOperation_Kind = "HoursOfOperation" + HoursOfOperation_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: HoursOfOperation_Kind}.String() + HoursOfOperation_KindAPIVersion = HoursOfOperation_Kind + "." + CRDGroupVersion.String() + HoursOfOperation_GroupVersionKind = CRDGroupVersion.WithKind(HoursOfOperation_Kind) +) + +func init() { + SchemeBuilder.Register(&HoursOfOperation{}, &HoursOfOperationList{}) +} diff --git a/apis/connect/v1beta3/zz_queue_terraformed.go b/apis/connect/v1beta3/zz_queue_terraformed.go new file mode 100755 index 0000000000..2a7464c1da --- /dev/null +++ b/apis/connect/v1beta3/zz_queue_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta3 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Queue +func (mg *Queue) GetTerraformResourceType() string { + return "aws_connect_queue" +} + +// GetConnectionDetailsMapping for this Queue +func (tr *Queue) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Queue +func (tr *Queue) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Queue +func (tr *Queue) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Queue +func (tr *Queue) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Queue +func (tr *Queue) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Queue +func (tr *Queue) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Queue +func (tr *Queue) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Queue +func (tr *Queue) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Queue using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Queue) LateInitialize(attrs []byte) (bool, error) { + params := &QueueParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Queue) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/connect/v1beta3/zz_queue_types.go b/apis/connect/v1beta3/zz_queue_types.go new file mode 100755 index 0000000000..b791d573d0 --- /dev/null +++ b/apis/connect/v1beta3/zz_queue_types.go @@ -0,0 +1,277 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type OutboundCallerConfigInitParameters struct { + + // Specifies the caller ID name. + OutboundCallerIDName *string `json:"outboundCallerIdName,omitempty" tf:"outbound_caller_id_name,omitempty"` + + // Specifies the caller ID number. + OutboundCallerIDNumberID *string `json:"outboundCallerIdNumberId,omitempty" tf:"outbound_caller_id_number_id,omitempty"` + + // Specifies outbound whisper flow to be used during an outbound call. + OutboundFlowID *string `json:"outboundFlowId,omitempty" tf:"outbound_flow_id,omitempty"` +} + +type OutboundCallerConfigObservation struct { + + // Specifies the caller ID name. + OutboundCallerIDName *string `json:"outboundCallerIdName,omitempty" tf:"outbound_caller_id_name,omitempty"` + + // Specifies the caller ID number. + OutboundCallerIDNumberID *string `json:"outboundCallerIdNumberId,omitempty" tf:"outbound_caller_id_number_id,omitempty"` + + // Specifies outbound whisper flow to be used during an outbound call. + OutboundFlowID *string `json:"outboundFlowId,omitempty" tf:"outbound_flow_id,omitempty"` +} + +type OutboundCallerConfigParameters struct { + + // Specifies the caller ID name. + // +kubebuilder:validation:Optional + OutboundCallerIDName *string `json:"outboundCallerIdName,omitempty" tf:"outbound_caller_id_name,omitempty"` + + // Specifies the caller ID number. + // +kubebuilder:validation:Optional + OutboundCallerIDNumberID *string `json:"outboundCallerIdNumberId,omitempty" tf:"outbound_caller_id_number_id,omitempty"` + + // Specifies outbound whisper flow to be used during an outbound call. + // +kubebuilder:validation:Optional + OutboundFlowID *string `json:"outboundFlowId,omitempty" tf:"outbound_flow_id,omitempty"` +} + +type QueueInitParameters struct { + + // Specifies the description of the Queue. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the identifier of the Hours of Operation. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta3.HoursOfOperation + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("hours_of_operation_id",true) + HoursOfOperationID *string `json:"hoursOfOperationId,omitempty" tf:"hours_of_operation_id,omitempty"` + + // Reference to a HoursOfOperation in connect to populate hoursOfOperationId. + // +kubebuilder:validation:Optional + HoursOfOperationIDRef *v1.Reference `json:"hoursOfOperationIdRef,omitempty" tf:"-"` + + // Selector for a HoursOfOperation in connect to populate hoursOfOperationId. + // +kubebuilder:validation:Optional + HoursOfOperationIDSelector *v1.Selector `json:"hoursOfOperationIdSelector,omitempty" tf:"-"` + + // Specifies the identifier of the hosting Amazon Connect Instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta1.Instance + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // Reference to a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDRef *v1.Reference `json:"instanceIdRef,omitempty" tf:"-"` + + // Selector for a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDSelector *v1.Selector `json:"instanceIdSelector,omitempty" tf:"-"` + + // Specifies the maximum number of contacts that can be in the queue before it is considered full. Minimum value of 0. + MaxContacts *float64 `json:"maxContacts,omitempty" tf:"max_contacts,omitempty"` + + // Specifies the name of the Queue. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A block that defines the outbound caller ID name, number, and outbound whisper flow. The Outbound Caller Config block is documented below. + OutboundCallerConfig *OutboundCallerConfigInitParameters `json:"outboundCallerConfig,omitempty" tf:"outbound_caller_config,omitempty"` + + // Specifies a list of quick connects ids that determine the quick connects available to agents who are working the queue. + // +listType=set + QuickConnectIds []*string `json:"quickConnectIds,omitempty" tf:"quick_connect_ids,omitempty"` + + // Specifies the description of the Queue. Valid values are ENABLED, DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type QueueObservation struct { + + // The Amazon Resource Name (ARN) of the Queue. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Specifies the description of the Queue. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the identifier of the Hours of Operation. + HoursOfOperationID *string `json:"hoursOfOperationId,omitempty" tf:"hours_of_operation_id,omitempty"` + + // The identifier of the hosting Amazon Connect Instance and identifier of the Queue separated by a colon (:). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the identifier of the hosting Amazon Connect Instance. + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // Specifies the maximum number of contacts that can be in the queue before it is considered full. Minimum value of 0. + MaxContacts *float64 `json:"maxContacts,omitempty" tf:"max_contacts,omitempty"` + + // Specifies the name of the Queue. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A block that defines the outbound caller ID name, number, and outbound whisper flow. The Outbound Caller Config block is documented below. + OutboundCallerConfig *OutboundCallerConfigObservation `json:"outboundCallerConfig,omitempty" tf:"outbound_caller_config,omitempty"` + + // The identifier for the Queue. + QueueID *string `json:"queueId,omitempty" tf:"queue_id,omitempty"` + + // Specifies a list of quick connects ids that determine the quick connects available to agents who are working the queue. + // +listType=set + QuickConnectIds []*string `json:"quickConnectIds,omitempty" tf:"quick_connect_ids,omitempty"` + + // Specifies the description of the Queue. Valid values are ENABLED, DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type QueueParameters struct { + + // Specifies the description of the Queue. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the identifier of the Hours of Operation. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta3.HoursOfOperation + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("hours_of_operation_id",true) + // +kubebuilder:validation:Optional + HoursOfOperationID *string `json:"hoursOfOperationId,omitempty" tf:"hours_of_operation_id,omitempty"` + + // Reference to a HoursOfOperation in connect to populate hoursOfOperationId. + // +kubebuilder:validation:Optional + HoursOfOperationIDRef *v1.Reference `json:"hoursOfOperationIdRef,omitempty" tf:"-"` + + // Selector for a HoursOfOperation in connect to populate hoursOfOperationId. + // +kubebuilder:validation:Optional + HoursOfOperationIDSelector *v1.Selector `json:"hoursOfOperationIdSelector,omitempty" tf:"-"` + + // Specifies the identifier of the hosting Amazon Connect Instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/connect/v1beta1.Instance + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // Reference to a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDRef *v1.Reference `json:"instanceIdRef,omitempty" tf:"-"` + + // Selector for a Instance in connect to populate instanceId. + // +kubebuilder:validation:Optional + InstanceIDSelector *v1.Selector `json:"instanceIdSelector,omitempty" tf:"-"` + + // Specifies the maximum number of contacts that can be in the queue before it is considered full. Minimum value of 0. + // +kubebuilder:validation:Optional + MaxContacts *float64 `json:"maxContacts,omitempty" tf:"max_contacts,omitempty"` + + // Specifies the name of the Queue. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A block that defines the outbound caller ID name, number, and outbound whisper flow. The Outbound Caller Config block is documented below. + // +kubebuilder:validation:Optional + OutboundCallerConfig *OutboundCallerConfigParameters `json:"outboundCallerConfig,omitempty" tf:"outbound_caller_config,omitempty"` + + // Specifies a list of quick connects ids that determine the quick connects available to agents who are working the queue. + // +kubebuilder:validation:Optional + // +listType=set + QuickConnectIds []*string `json:"quickConnectIds,omitempty" tf:"quick_connect_ids,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Specifies the description of the Queue. Valid values are ENABLED, DISABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// QueueSpec defines the desired state of Queue +type QueueSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider QueueParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider QueueInitParameters `json:"initProvider,omitempty"` +} + +// QueueStatus defines the observed state of Queue. +type QueueStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider QueueObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Queue is the Schema for the Queues API. Provides details about a specific Amazon Connect Queue +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Queue struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec QueueSpec `json:"spec"` + Status QueueStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// QueueList contains a list of Queues +type QueueList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Queue `json:"items"` +} + +// Repository type metadata. +var ( + Queue_Kind = "Queue" + Queue_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Queue_Kind}.String() + Queue_KindAPIVersion = Queue_Kind + "." + CRDGroupVersion.String() + Queue_GroupVersionKind = CRDGroupVersion.WithKind(Queue_Kind) +) + +func init() { + SchemeBuilder.Register(&Queue{}, &QueueList{}) +} diff --git a/apis/cur/v1beta1/zz_generated.resolvers.go b/apis/cur/v1beta1/zz_generated.resolvers.go index f0abaa70d6..8eaac7d709 100644 --- a/apis/cur/v1beta1/zz_generated.resolvers.go +++ b/apis/cur/v1beta1/zz_generated.resolvers.go @@ -25,7 +25,7 @@ func (mg *ReportDefinition) ResolveReferences( // ResolveReferences of this Repo var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -44,7 +44,7 @@ func (mg *ReportDefinition) ResolveReferences( // ResolveReferences of this Repo mg.Spec.ForProvider.S3Bucket = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.S3BucketRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/cur/v1beta1/zz_reportdefinition_types.go b/apis/cur/v1beta1/zz_reportdefinition_types.go index c7d2895afb..50fd4334c2 100755 --- a/apis/cur/v1beta1/zz_reportdefinition_types.go +++ b/apis/cur/v1beta1/zz_reportdefinition_types.go @@ -36,7 +36,7 @@ type ReportDefinitionInitParameters struct { ReportVersioning *string `json:"reportVersioning,omitempty" tf:"report_versioning,omitempty"` // Name of the existing S3 bucket to hold generated reports. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket S3Bucket *string `json:"s3Bucket,omitempty" tf:"s3_bucket,omitempty"` // Reference to a Bucket in s3 to populate s3Bucket. @@ -131,7 +131,7 @@ type ReportDefinitionParameters struct { ReportVersioning *string `json:"reportVersioning,omitempty" tf:"report_versioning,omitempty"` // Name of the existing S3 bucket to hold generated reports. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +kubebuilder:validation:Optional S3Bucket *string `json:"s3Bucket,omitempty" tf:"s3_bucket,omitempty"` diff --git a/apis/datasync/v1beta1/zz_generated.conversion_spokes.go b/apis/datasync/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..96983dcaa4 --- /dev/null +++ b/apis/datasync/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this LocationS3 to the hub type. +func (tr *LocationS3) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LocationS3 type. +func (tr *LocationS3) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Task to the hub type. +func (tr *Task) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Task type. +func (tr *Task) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/datasync/v1beta2/zz_generated.conversion_hubs.go b/apis/datasync/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..e4e2abedf9 --- /dev/null +++ b/apis/datasync/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *LocationS3) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Task) Hub() {} diff --git a/apis/datasync/v1beta2/zz_generated.deepcopy.go b/apis/datasync/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..89f087a51b --- /dev/null +++ b/apis/datasync/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1638 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludesInitParameters) DeepCopyInto(out *ExcludesInitParameters) { + *out = *in + if in.FilterType != nil { + in, out := &in.FilterType, &out.FilterType + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludesInitParameters. +func (in *ExcludesInitParameters) DeepCopy() *ExcludesInitParameters { + if in == nil { + return nil + } + out := new(ExcludesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludesObservation) DeepCopyInto(out *ExcludesObservation) { + *out = *in + if in.FilterType != nil { + in, out := &in.FilterType, &out.FilterType + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludesObservation. +func (in *ExcludesObservation) DeepCopy() *ExcludesObservation { + if in == nil { + return nil + } + out := new(ExcludesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludesParameters) DeepCopyInto(out *ExcludesParameters) { + *out = *in + if in.FilterType != nil { + in, out := &in.FilterType, &out.FilterType + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludesParameters. +func (in *ExcludesParameters) DeepCopy() *ExcludesParameters { + if in == nil { + return nil + } + out := new(ExcludesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludesInitParameters) DeepCopyInto(out *IncludesInitParameters) { + *out = *in + if in.FilterType != nil { + in, out := &in.FilterType, &out.FilterType + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludesInitParameters. +func (in *IncludesInitParameters) DeepCopy() *IncludesInitParameters { + if in == nil { + return nil + } + out := new(IncludesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludesObservation) DeepCopyInto(out *IncludesObservation) { + *out = *in + if in.FilterType != nil { + in, out := &in.FilterType, &out.FilterType + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludesObservation. +func (in *IncludesObservation) DeepCopy() *IncludesObservation { + if in == nil { + return nil + } + out := new(IncludesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludesParameters) DeepCopyInto(out *IncludesParameters) { + *out = *in + if in.FilterType != nil { + in, out := &in.FilterType, &out.FilterType + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludesParameters. +func (in *IncludesParameters) DeepCopy() *IncludesParameters { + if in == nil { + return nil + } + out := new(IncludesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationS3) DeepCopyInto(out *LocationS3) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationS3. +func (in *LocationS3) DeepCopy() *LocationS3 { + if in == nil { + return nil + } + out := new(LocationS3) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocationS3) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationS3InitParameters) DeepCopyInto(out *LocationS3InitParameters) { + *out = *in + if in.AgentArns != nil { + in, out := &in.AgentArns, &out.AgentArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.S3BucketArn != nil { + in, out := &in.S3BucketArn, &out.S3BucketArn + *out = new(string) + **out = **in + } + if in.S3BucketArnRef != nil { + in, out := &in.S3BucketArnRef, &out.S3BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.S3BucketArnSelector != nil { + in, out := &in.S3BucketArnSelector, &out.S3BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3Config != nil { + in, out := &in.S3Config, &out.S3Config + *out = new(S3ConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3StorageClass != nil { + in, out := &in.S3StorageClass, &out.S3StorageClass + *out = new(string) + **out = **in + } + if in.Subdirectory != nil { + in, out := &in.Subdirectory, &out.Subdirectory + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationS3InitParameters. +func (in *LocationS3InitParameters) DeepCopy() *LocationS3InitParameters { + if in == nil { + return nil + } + out := new(LocationS3InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationS3List) DeepCopyInto(out *LocationS3List) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LocationS3, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationS3List. +func (in *LocationS3List) DeepCopy() *LocationS3List { + if in == nil { + return nil + } + out := new(LocationS3List) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocationS3List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationS3Observation) DeepCopyInto(out *LocationS3Observation) { + *out = *in + if in.AgentArns != nil { + in, out := &in.AgentArns, &out.AgentArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.S3BucketArn != nil { + in, out := &in.S3BucketArn, &out.S3BucketArn + *out = new(string) + **out = **in + } + if in.S3Config != nil { + in, out := &in.S3Config, &out.S3Config + *out = new(S3ConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.S3StorageClass != nil { + in, out := &in.S3StorageClass, &out.S3StorageClass + *out = new(string) + **out = **in + } + if in.Subdirectory != nil { + in, out := &in.Subdirectory, &out.Subdirectory + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationS3Observation. +func (in *LocationS3Observation) DeepCopy() *LocationS3Observation { + if in == nil { + return nil + } + out := new(LocationS3Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationS3Parameters) DeepCopyInto(out *LocationS3Parameters) { + *out = *in + if in.AgentArns != nil { + in, out := &in.AgentArns, &out.AgentArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.S3BucketArn != nil { + in, out := &in.S3BucketArn, &out.S3BucketArn + *out = new(string) + **out = **in + } + if in.S3BucketArnRef != nil { + in, out := &in.S3BucketArnRef, &out.S3BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.S3BucketArnSelector != nil { + in, out := &in.S3BucketArnSelector, &out.S3BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3Config != nil { + in, out := &in.S3Config, &out.S3Config + *out = new(S3ConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.S3StorageClass != nil { + in, out := &in.S3StorageClass, &out.S3StorageClass + *out = new(string) + **out = **in + } + if in.Subdirectory != nil { + in, out := &in.Subdirectory, &out.Subdirectory + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationS3Parameters. +func (in *LocationS3Parameters) DeepCopy() *LocationS3Parameters { + if in == nil { + return nil + } + out := new(LocationS3Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationS3Spec) DeepCopyInto(out *LocationS3Spec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationS3Spec. +func (in *LocationS3Spec) DeepCopy() *LocationS3Spec { + if in == nil { + return nil + } + out := new(LocationS3Spec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationS3Status) DeepCopyInto(out *LocationS3Status) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationS3Status. +func (in *LocationS3Status) DeepCopy() *LocationS3Status { + if in == nil { + return nil + } + out := new(LocationS3Status) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionsInitParameters) DeepCopyInto(out *OptionsInitParameters) { + *out = *in + if in.Atime != nil { + in, out := &in.Atime, &out.Atime + *out = new(string) + **out = **in + } + if in.BytesPerSecond != nil { + in, out := &in.BytesPerSecond, &out.BytesPerSecond + *out = new(float64) + **out = **in + } + if in.GID != nil { + in, out := &in.GID, &out.GID + *out = new(string) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } + if in.Mtime != nil { + in, out := &in.Mtime, &out.Mtime + *out = new(string) + **out = **in + } + if in.ObjectTags != nil { + in, out := &in.ObjectTags, &out.ObjectTags + *out = new(string) + **out = **in + } + if in.OverwriteMode != nil { + in, out := &in.OverwriteMode, &out.OverwriteMode + *out = new(string) + **out = **in + } + if in.PosixPermissions != nil { + in, out := &in.PosixPermissions, &out.PosixPermissions + *out = new(string) + **out = **in + } + if in.PreserveDeletedFiles != nil { + in, out := &in.PreserveDeletedFiles, &out.PreserveDeletedFiles + *out = new(string) + **out = **in + } + if in.PreserveDevices != nil { + in, out := &in.PreserveDevices, &out.PreserveDevices + *out = new(string) + **out = **in + } + if in.SecurityDescriptorCopyFlags != nil { + in, out := &in.SecurityDescriptorCopyFlags, &out.SecurityDescriptorCopyFlags + *out = new(string) + **out = **in + } + if in.TaskQueueing != nil { + in, out := &in.TaskQueueing, &out.TaskQueueing + *out = new(string) + **out = **in + } + if in.TransferMode != nil { + in, out := &in.TransferMode, &out.TransferMode + *out = new(string) + **out = **in + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(string) + **out = **in + } + if in.VerifyMode != nil { + in, out := &in.VerifyMode, &out.VerifyMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionsInitParameters. +func (in *OptionsInitParameters) DeepCopy() *OptionsInitParameters { + if in == nil { + return nil + } + out := new(OptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionsObservation) DeepCopyInto(out *OptionsObservation) { + *out = *in + if in.Atime != nil { + in, out := &in.Atime, &out.Atime + *out = new(string) + **out = **in + } + if in.BytesPerSecond != nil { + in, out := &in.BytesPerSecond, &out.BytesPerSecond + *out = new(float64) + **out = **in + } + if in.GID != nil { + in, out := &in.GID, &out.GID + *out = new(string) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } + if in.Mtime != nil { + in, out := &in.Mtime, &out.Mtime + *out = new(string) + **out = **in + } + if in.ObjectTags != nil { + in, out := &in.ObjectTags, &out.ObjectTags + *out = new(string) + **out = **in + } + if in.OverwriteMode != nil { + in, out := &in.OverwriteMode, &out.OverwriteMode + *out = new(string) + **out = **in + } + if in.PosixPermissions != nil { + in, out := &in.PosixPermissions, &out.PosixPermissions + *out = new(string) + **out = **in + } + if in.PreserveDeletedFiles != nil { + in, out := &in.PreserveDeletedFiles, &out.PreserveDeletedFiles + *out = new(string) + **out = **in + } + if in.PreserveDevices != nil { + in, out := &in.PreserveDevices, &out.PreserveDevices + *out = new(string) + **out = **in + } + if in.SecurityDescriptorCopyFlags != nil { + in, out := &in.SecurityDescriptorCopyFlags, &out.SecurityDescriptorCopyFlags + *out = new(string) + **out = **in + } + if in.TaskQueueing != nil { + in, out := &in.TaskQueueing, &out.TaskQueueing + *out = new(string) + **out = **in + } + if in.TransferMode != nil { + in, out := &in.TransferMode, &out.TransferMode + *out = new(string) + **out = **in + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(string) + **out = **in + } + if in.VerifyMode != nil { + in, out := &in.VerifyMode, &out.VerifyMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionsObservation. +func (in *OptionsObservation) DeepCopy() *OptionsObservation { + if in == nil { + return nil + } + out := new(OptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionsParameters) DeepCopyInto(out *OptionsParameters) { + *out = *in + if in.Atime != nil { + in, out := &in.Atime, &out.Atime + *out = new(string) + **out = **in + } + if in.BytesPerSecond != nil { + in, out := &in.BytesPerSecond, &out.BytesPerSecond + *out = new(float64) + **out = **in + } + if in.GID != nil { + in, out := &in.GID, &out.GID + *out = new(string) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } + if in.Mtime != nil { + in, out := &in.Mtime, &out.Mtime + *out = new(string) + **out = **in + } + if in.ObjectTags != nil { + in, out := &in.ObjectTags, &out.ObjectTags + *out = new(string) + **out = **in + } + if in.OverwriteMode != nil { + in, out := &in.OverwriteMode, &out.OverwriteMode + *out = new(string) + **out = **in + } + if in.PosixPermissions != nil { + in, out := &in.PosixPermissions, &out.PosixPermissions + *out = new(string) + **out = **in + } + if in.PreserveDeletedFiles != nil { + in, out := &in.PreserveDeletedFiles, &out.PreserveDeletedFiles + *out = new(string) + **out = **in + } + if in.PreserveDevices != nil { + in, out := &in.PreserveDevices, &out.PreserveDevices + *out = new(string) + **out = **in + } + if in.SecurityDescriptorCopyFlags != nil { + in, out := &in.SecurityDescriptorCopyFlags, &out.SecurityDescriptorCopyFlags + *out = new(string) + **out = **in + } + if in.TaskQueueing != nil { + in, out := &in.TaskQueueing, &out.TaskQueueing + *out = new(string) + **out = **in + } + if in.TransferMode != nil { + in, out := &in.TransferMode, &out.TransferMode + *out = new(string) + **out = **in + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(string) + **out = **in + } + if in.VerifyMode != nil { + in, out := &in.VerifyMode, &out.VerifyMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionsParameters. +func (in *OptionsParameters) DeepCopy() *OptionsParameters { + if in == nil { + return nil + } + out := new(OptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReportOverridesInitParameters) DeepCopyInto(out *ReportOverridesInitParameters) { + *out = *in + if in.DeletedOverride != nil { + in, out := &in.DeletedOverride, &out.DeletedOverride + *out = new(string) + **out = **in + } + if in.SkippedOverride != nil { + in, out := &in.SkippedOverride, &out.SkippedOverride + *out = new(string) + **out = **in + } + if in.TransferredOverride != nil { + in, out := &in.TransferredOverride, &out.TransferredOverride + *out = new(string) + **out = **in + } + if in.VerifiedOverride != nil { + in, out := &in.VerifiedOverride, &out.VerifiedOverride + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportOverridesInitParameters. +func (in *ReportOverridesInitParameters) DeepCopy() *ReportOverridesInitParameters { + if in == nil { + return nil + } + out := new(ReportOverridesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReportOverridesObservation) DeepCopyInto(out *ReportOverridesObservation) { + *out = *in + if in.DeletedOverride != nil { + in, out := &in.DeletedOverride, &out.DeletedOverride + *out = new(string) + **out = **in + } + if in.SkippedOverride != nil { + in, out := &in.SkippedOverride, &out.SkippedOverride + *out = new(string) + **out = **in + } + if in.TransferredOverride != nil { + in, out := &in.TransferredOverride, &out.TransferredOverride + *out = new(string) + **out = **in + } + if in.VerifiedOverride != nil { + in, out := &in.VerifiedOverride, &out.VerifiedOverride + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportOverridesObservation. +func (in *ReportOverridesObservation) DeepCopy() *ReportOverridesObservation { + if in == nil { + return nil + } + out := new(ReportOverridesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReportOverridesParameters) DeepCopyInto(out *ReportOverridesParameters) { + *out = *in + if in.DeletedOverride != nil { + in, out := &in.DeletedOverride, &out.DeletedOverride + *out = new(string) + **out = **in + } + if in.SkippedOverride != nil { + in, out := &in.SkippedOverride, &out.SkippedOverride + *out = new(string) + **out = **in + } + if in.TransferredOverride != nil { + in, out := &in.TransferredOverride, &out.TransferredOverride + *out = new(string) + **out = **in + } + if in.VerifiedOverride != nil { + in, out := &in.VerifiedOverride, &out.VerifiedOverride + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportOverridesParameters. +func (in *ReportOverridesParameters) DeepCopy() *ReportOverridesParameters { + if in == nil { + return nil + } + out := new(ReportOverridesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigInitParameters) DeepCopyInto(out *S3ConfigInitParameters) { + *out = *in + if in.BucketAccessRoleArn != nil { + in, out := &in.BucketAccessRoleArn, &out.BucketAccessRoleArn + *out = new(string) + **out = **in + } + if in.BucketAccessRoleArnRef != nil { + in, out := &in.BucketAccessRoleArnRef, &out.BucketAccessRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketAccessRoleArnSelector != nil { + in, out := &in.BucketAccessRoleArnSelector, &out.BucketAccessRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigInitParameters. +func (in *S3ConfigInitParameters) DeepCopy() *S3ConfigInitParameters { + if in == nil { + return nil + } + out := new(S3ConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigObservation) DeepCopyInto(out *S3ConfigObservation) { + *out = *in + if in.BucketAccessRoleArn != nil { + in, out := &in.BucketAccessRoleArn, &out.BucketAccessRoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigObservation. +func (in *S3ConfigObservation) DeepCopy() *S3ConfigObservation { + if in == nil { + return nil + } + out := new(S3ConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigParameters) DeepCopyInto(out *S3ConfigParameters) { + *out = *in + if in.BucketAccessRoleArn != nil { + in, out := &in.BucketAccessRoleArn, &out.BucketAccessRoleArn + *out = new(string) + **out = **in + } + if in.BucketAccessRoleArnRef != nil { + in, out := &in.BucketAccessRoleArnRef, &out.BucketAccessRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketAccessRoleArnSelector != nil { + in, out := &in.BucketAccessRoleArnSelector, &out.BucketAccessRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigParameters. +func (in *S3ConfigParameters) DeepCopy() *S3ConfigParameters { + if in == nil { + return nil + } + out := new(S3ConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DestinationInitParameters) DeepCopyInto(out *S3DestinationInitParameters) { + *out = *in + if in.BucketAccessRoleArn != nil { + in, out := &in.BucketAccessRoleArn, &out.BucketAccessRoleArn + *out = new(string) + **out = **in + } + if in.S3BucketArn != nil { + in, out := &in.S3BucketArn, &out.S3BucketArn + *out = new(string) + **out = **in + } + if in.Subdirectory != nil { + in, out := &in.Subdirectory, &out.Subdirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DestinationInitParameters. +func (in *S3DestinationInitParameters) DeepCopy() *S3DestinationInitParameters { + if in == nil { + return nil + } + out := new(S3DestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DestinationObservation) DeepCopyInto(out *S3DestinationObservation) { + *out = *in + if in.BucketAccessRoleArn != nil { + in, out := &in.BucketAccessRoleArn, &out.BucketAccessRoleArn + *out = new(string) + **out = **in + } + if in.S3BucketArn != nil { + in, out := &in.S3BucketArn, &out.S3BucketArn + *out = new(string) + **out = **in + } + if in.Subdirectory != nil { + in, out := &in.Subdirectory, &out.Subdirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DestinationObservation. +func (in *S3DestinationObservation) DeepCopy() *S3DestinationObservation { + if in == nil { + return nil + } + out := new(S3DestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DestinationParameters) DeepCopyInto(out *S3DestinationParameters) { + *out = *in + if in.BucketAccessRoleArn != nil { + in, out := &in.BucketAccessRoleArn, &out.BucketAccessRoleArn + *out = new(string) + **out = **in + } + if in.S3BucketArn != nil { + in, out := &in.S3BucketArn, &out.S3BucketArn + *out = new(string) + **out = **in + } + if in.Subdirectory != nil { + in, out := &in.Subdirectory, &out.Subdirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DestinationParameters. +func (in *S3DestinationParameters) DeepCopy() *S3DestinationParameters { + if in == nil { + return nil + } + out := new(S3DestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleInitParameters) DeepCopyInto(out *ScheduleInitParameters) { + *out = *in + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleInitParameters. +func (in *ScheduleInitParameters) DeepCopy() *ScheduleInitParameters { + if in == nil { + return nil + } + out := new(ScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleObservation) DeepCopyInto(out *ScheduleObservation) { + *out = *in + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleObservation. +func (in *ScheduleObservation) DeepCopy() *ScheduleObservation { + if in == nil { + return nil + } + out := new(ScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleParameters) DeepCopyInto(out *ScheduleParameters) { + *out = *in + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleParameters. +func (in *ScheduleParameters) DeepCopy() *ScheduleParameters { + if in == nil { + return nil + } + out := new(ScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Task) DeepCopyInto(out *Task) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Task. +func (in *Task) DeepCopy() *Task { + if in == nil { + return nil + } + out := new(Task) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Task) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskInitParameters) DeepCopyInto(out *TaskInitParameters) { + *out = *in + if in.CloudwatchLogGroupArn != nil { + in, out := &in.CloudwatchLogGroupArn, &out.CloudwatchLogGroupArn + *out = new(string) + **out = **in + } + if in.CloudwatchLogGroupArnRef != nil { + in, out := &in.CloudwatchLogGroupArnRef, &out.CloudwatchLogGroupArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CloudwatchLogGroupArnSelector != nil { + in, out := &in.CloudwatchLogGroupArnSelector, &out.CloudwatchLogGroupArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DestinationLocationArn != nil { + in, out := &in.DestinationLocationArn, &out.DestinationLocationArn + *out = new(string) + **out = **in + } + if in.DestinationLocationArnRef != nil { + in, out := &in.DestinationLocationArnRef, &out.DestinationLocationArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DestinationLocationArnSelector != nil { + in, out := &in.DestinationLocationArnSelector, &out.DestinationLocationArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = new(ExcludesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = new(IncludesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = new(OptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SourceLocationArn != nil { + in, out := &in.SourceLocationArn, &out.SourceLocationArn + *out = new(string) + **out = **in + } + if in.SourceLocationArnRef != nil { + in, out := &in.SourceLocationArnRef, &out.SourceLocationArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceLocationArnSelector != nil { + in, out := &in.SourceLocationArnSelector, &out.SourceLocationArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskReportConfig != nil { + in, out := &in.TaskReportConfig, &out.TaskReportConfig + *out = new(TaskReportConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskInitParameters. +func (in *TaskInitParameters) DeepCopy() *TaskInitParameters { + if in == nil { + return nil + } + out := new(TaskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskList) DeepCopyInto(out *TaskList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Task, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskList. +func (in *TaskList) DeepCopy() *TaskList { + if in == nil { + return nil + } + out := new(TaskList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TaskList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskObservation) DeepCopyInto(out *TaskObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CloudwatchLogGroupArn != nil { + in, out := &in.CloudwatchLogGroupArn, &out.CloudwatchLogGroupArn + *out = new(string) + **out = **in + } + if in.DestinationLocationArn != nil { + in, out := &in.DestinationLocationArn, &out.DestinationLocationArn + *out = new(string) + **out = **in + } + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = new(ExcludesObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = new(IncludesObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = new(OptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleObservation) + (*in).DeepCopyInto(*out) + } + if in.SourceLocationArn != nil { + in, out := &in.SourceLocationArn, &out.SourceLocationArn + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskReportConfig != nil { + in, out := &in.TaskReportConfig, &out.TaskReportConfig + *out = new(TaskReportConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskObservation. +func (in *TaskObservation) DeepCopy() *TaskObservation { + if in == nil { + return nil + } + out := new(TaskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskParameters) DeepCopyInto(out *TaskParameters) { + *out = *in + if in.CloudwatchLogGroupArn != nil { + in, out := &in.CloudwatchLogGroupArn, &out.CloudwatchLogGroupArn + *out = new(string) + **out = **in + } + if in.CloudwatchLogGroupArnRef != nil { + in, out := &in.CloudwatchLogGroupArnRef, &out.CloudwatchLogGroupArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CloudwatchLogGroupArnSelector != nil { + in, out := &in.CloudwatchLogGroupArnSelector, &out.CloudwatchLogGroupArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DestinationLocationArn != nil { + in, out := &in.DestinationLocationArn, &out.DestinationLocationArn + *out = new(string) + **out = **in + } + if in.DestinationLocationArnRef != nil { + in, out := &in.DestinationLocationArnRef, &out.DestinationLocationArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DestinationLocationArnSelector != nil { + in, out := &in.DestinationLocationArnSelector, &out.DestinationLocationArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = new(ExcludesParameters) + (*in).DeepCopyInto(*out) + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = new(IncludesParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = new(OptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleParameters) + (*in).DeepCopyInto(*out) + } + if in.SourceLocationArn != nil { + in, out := &in.SourceLocationArn, &out.SourceLocationArn + *out = new(string) + **out = **in + } + if in.SourceLocationArnRef != nil { + in, out := &in.SourceLocationArnRef, &out.SourceLocationArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceLocationArnSelector != nil { + in, out := &in.SourceLocationArnSelector, &out.SourceLocationArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskReportConfig != nil { + in, out := &in.TaskReportConfig, &out.TaskReportConfig + *out = new(TaskReportConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskParameters. +func (in *TaskParameters) DeepCopy() *TaskParameters { + if in == nil { + return nil + } + out := new(TaskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskReportConfigInitParameters) DeepCopyInto(out *TaskReportConfigInitParameters) { + *out = *in + if in.OutputType != nil { + in, out := &in.OutputType, &out.OutputType + *out = new(string) + **out = **in + } + if in.ReportLevel != nil { + in, out := &in.ReportLevel, &out.ReportLevel + *out = new(string) + **out = **in + } + if in.ReportOverrides != nil { + in, out := &in.ReportOverrides, &out.ReportOverrides + *out = new(ReportOverridesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3Destination != nil { + in, out := &in.S3Destination, &out.S3Destination + *out = new(S3DestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3ObjectVersioning != nil { + in, out := &in.S3ObjectVersioning, &out.S3ObjectVersioning + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskReportConfigInitParameters. +func (in *TaskReportConfigInitParameters) DeepCopy() *TaskReportConfigInitParameters { + if in == nil { + return nil + } + out := new(TaskReportConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskReportConfigObservation) DeepCopyInto(out *TaskReportConfigObservation) { + *out = *in + if in.OutputType != nil { + in, out := &in.OutputType, &out.OutputType + *out = new(string) + **out = **in + } + if in.ReportLevel != nil { + in, out := &in.ReportLevel, &out.ReportLevel + *out = new(string) + **out = **in + } + if in.ReportOverrides != nil { + in, out := &in.ReportOverrides, &out.ReportOverrides + *out = new(ReportOverridesObservation) + (*in).DeepCopyInto(*out) + } + if in.S3Destination != nil { + in, out := &in.S3Destination, &out.S3Destination + *out = new(S3DestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.S3ObjectVersioning != nil { + in, out := &in.S3ObjectVersioning, &out.S3ObjectVersioning + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskReportConfigObservation. +func (in *TaskReportConfigObservation) DeepCopy() *TaskReportConfigObservation { + if in == nil { + return nil + } + out := new(TaskReportConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskReportConfigParameters) DeepCopyInto(out *TaskReportConfigParameters) { + *out = *in + if in.OutputType != nil { + in, out := &in.OutputType, &out.OutputType + *out = new(string) + **out = **in + } + if in.ReportLevel != nil { + in, out := &in.ReportLevel, &out.ReportLevel + *out = new(string) + **out = **in + } + if in.ReportOverrides != nil { + in, out := &in.ReportOverrides, &out.ReportOverrides + *out = new(ReportOverridesParameters) + (*in).DeepCopyInto(*out) + } + if in.S3Destination != nil { + in, out := &in.S3Destination, &out.S3Destination + *out = new(S3DestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.S3ObjectVersioning != nil { + in, out := &in.S3ObjectVersioning, &out.S3ObjectVersioning + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskReportConfigParameters. +func (in *TaskReportConfigParameters) DeepCopy() *TaskReportConfigParameters { + if in == nil { + return nil + } + out := new(TaskReportConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskSpec. +func (in *TaskSpec) DeepCopy() *TaskSpec { + if in == nil { + return nil + } + out := new(TaskSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskStatus) DeepCopyInto(out *TaskStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskStatus. +func (in *TaskStatus) DeepCopy() *TaskStatus { + if in == nil { + return nil + } + out := new(TaskStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/datasync/v1beta2/zz_generated.managed.go b/apis/datasync/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..e0ab04a681 --- /dev/null +++ b/apis/datasync/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this LocationS3. +func (mg *LocationS3) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LocationS3. +func (mg *LocationS3) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LocationS3. +func (mg *LocationS3) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LocationS3. +func (mg *LocationS3) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LocationS3. +func (mg *LocationS3) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LocationS3. +func (mg *LocationS3) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LocationS3. +func (mg *LocationS3) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LocationS3. +func (mg *LocationS3) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LocationS3. +func (mg *LocationS3) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LocationS3. +func (mg *LocationS3) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LocationS3. +func (mg *LocationS3) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LocationS3. +func (mg *LocationS3) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Task. +func (mg *Task) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Task. +func (mg *Task) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Task. +func (mg *Task) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Task. +func (mg *Task) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Task. +func (mg *Task) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Task. +func (mg *Task) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Task. +func (mg *Task) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Task. +func (mg *Task) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Task. +func (mg *Task) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Task. +func (mg *Task) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Task. +func (mg *Task) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Task. +func (mg *Task) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/datasync/v1beta2/zz_generated.managedlist.go b/apis/datasync/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..5096ff89a1 --- /dev/null +++ b/apis/datasync/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this LocationS3List. +func (l *LocationS3List) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TaskList. +func (l *TaskList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/datasync/v1beta2/zz_generated.resolvers.go b/apis/datasync/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..f8d72bfdea --- /dev/null +++ b/apis/datasync/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,238 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *LocationS3) ResolveReferences( // ResolveReferences of this LocationS3. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.S3BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.S3BucketArnRef, + Selector: mg.Spec.ForProvider.S3BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.S3BucketArn") + } + mg.Spec.ForProvider.S3BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.S3BucketArnRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.S3Config != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.S3Config.BucketAccessRoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.S3Config.BucketAccessRoleArnRef, + Selector: mg.Spec.ForProvider.S3Config.BucketAccessRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.S3Config.BucketAccessRoleArn") + } + mg.Spec.ForProvider.S3Config.BucketAccessRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.S3Config.BucketAccessRoleArnRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.S3BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.S3BucketArnRef, + Selector: mg.Spec.InitProvider.S3BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.S3BucketArn") + } + mg.Spec.InitProvider.S3BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.S3BucketArnRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.S3Config != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.S3Config.BucketAccessRoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.S3Config.BucketAccessRoleArnRef, + Selector: mg.Spec.InitProvider.S3Config.BucketAccessRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.S3Config.BucketAccessRoleArn") + } + mg.Spec.InitProvider.S3Config.BucketAccessRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.S3Config.BucketAccessRoleArnRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this Task. +func (mg *Task) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Group", "GroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CloudwatchLogGroupArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.CloudwatchLogGroupArnRef, + Selector: mg.Spec.ForProvider.CloudwatchLogGroupArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CloudwatchLogGroupArn") + } + mg.Spec.ForProvider.CloudwatchLogGroupArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CloudwatchLogGroupArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("datasync.aws.upbound.io", "v1beta2", "LocationS3", "LocationS3List") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DestinationLocationArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DestinationLocationArnRef, + Selector: mg.Spec.ForProvider.DestinationLocationArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DestinationLocationArn") + } + mg.Spec.ForProvider.DestinationLocationArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DestinationLocationArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("datasync.aws.upbound.io", "v1beta2", "LocationS3", "LocationS3List") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SourceLocationArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.SourceLocationArnRef, + Selector: mg.Spec.ForProvider.SourceLocationArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SourceLocationArn") + } + mg.Spec.ForProvider.SourceLocationArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SourceLocationArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Group", "GroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CloudwatchLogGroupArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.CloudwatchLogGroupArnRef, + Selector: mg.Spec.InitProvider.CloudwatchLogGroupArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CloudwatchLogGroupArn") + } + mg.Spec.InitProvider.CloudwatchLogGroupArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CloudwatchLogGroupArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("datasync.aws.upbound.io", "v1beta2", "LocationS3", "LocationS3List") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DestinationLocationArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DestinationLocationArnRef, + Selector: mg.Spec.InitProvider.DestinationLocationArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DestinationLocationArn") + } + mg.Spec.InitProvider.DestinationLocationArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DestinationLocationArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("datasync.aws.upbound.io", "v1beta2", "LocationS3", "LocationS3List") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SourceLocationArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.SourceLocationArnRef, + Selector: mg.Spec.InitProvider.SourceLocationArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SourceLocationArn") + } + mg.Spec.InitProvider.SourceLocationArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SourceLocationArnRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/datasync/v1beta2/zz_groupversion_info.go b/apis/datasync/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..8b1addb93d --- /dev/null +++ b/apis/datasync/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=datasync.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "datasync.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/datasync/v1beta2/zz_locations3_terraformed.go b/apis/datasync/v1beta2/zz_locations3_terraformed.go new file mode 100755 index 0000000000..a206a8428c --- /dev/null +++ b/apis/datasync/v1beta2/zz_locations3_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LocationS3 +func (mg *LocationS3) GetTerraformResourceType() string { + return "aws_datasync_location_s3" +} + +// GetConnectionDetailsMapping for this LocationS3 +func (tr *LocationS3) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LocationS3 +func (tr *LocationS3) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LocationS3 +func (tr *LocationS3) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LocationS3 +func (tr *LocationS3) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LocationS3 +func (tr *LocationS3) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LocationS3 +func (tr *LocationS3) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LocationS3 +func (tr *LocationS3) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LocationS3 +func (tr *LocationS3) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LocationS3 using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LocationS3) LateInitialize(attrs []byte) (bool, error) { + params := &LocationS3Parameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LocationS3) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datasync/v1beta2/zz_locations3_types.go b/apis/datasync/v1beta2/zz_locations3_types.go new file mode 100755 index 0000000000..28410fb32c --- /dev/null +++ b/apis/datasync/v1beta2/zz_locations3_types.go @@ -0,0 +1,227 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type LocationS3InitParameters struct { + + // A list of DataSync Agent ARNs with which this location will be associated. + // +listType=set + AgentArns []*string `json:"agentArns,omitempty" tf:"agent_arns,omitempty"` + + // Amazon Resource Name (ARN) of the S3 Bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + S3BucketArn *string `json:"s3BucketArn,omitempty" tf:"s3_bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate s3BucketArn. + // +kubebuilder:validation:Optional + S3BucketArnRef *v1.Reference `json:"s3BucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate s3BucketArn. + // +kubebuilder:validation:Optional + S3BucketArnSelector *v1.Selector `json:"s3BucketArnSelector,omitempty" tf:"-"` + + // Configuration block containing information for connecting to S3. + S3Config *S3ConfigInitParameters `json:"s3Config,omitempty" tf:"s3_config,omitempty"` + + // The Amazon S3 storage class that you want to store your files in when this location is used as a task destination. Valid values + S3StorageClass *string `json:"s3StorageClass,omitempty" tf:"s3_storage_class,omitempty"` + + // Prefix to perform actions as source or destination. + Subdirectory *string `json:"subdirectory,omitempty" tf:"subdirectory,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type LocationS3Observation struct { + + // A list of DataSync Agent ARNs with which this location will be associated. + // +listType=set + AgentArns []*string `json:"agentArns,omitempty" tf:"agent_arns,omitempty"` + + // Amazon Resource Name (ARN) of the DataSync Location. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Amazon Resource Name (ARN) of the DataSync Location. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Amazon Resource Name (ARN) of the S3 Bucket. + S3BucketArn *string `json:"s3BucketArn,omitempty" tf:"s3_bucket_arn,omitempty"` + + // Configuration block containing information for connecting to S3. + S3Config *S3ConfigObservation `json:"s3Config,omitempty" tf:"s3_config,omitempty"` + + // The Amazon S3 storage class that you want to store your files in when this location is used as a task destination. Valid values + S3StorageClass *string `json:"s3StorageClass,omitempty" tf:"s3_storage_class,omitempty"` + + // Prefix to perform actions as source or destination. + Subdirectory *string `json:"subdirectory,omitempty" tf:"subdirectory,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type LocationS3Parameters struct { + + // A list of DataSync Agent ARNs with which this location will be associated. + // +kubebuilder:validation:Optional + // +listType=set + AgentArns []*string `json:"agentArns,omitempty" tf:"agent_arns,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Amazon Resource Name (ARN) of the S3 Bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + S3BucketArn *string `json:"s3BucketArn,omitempty" tf:"s3_bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate s3BucketArn. + // +kubebuilder:validation:Optional + S3BucketArnRef *v1.Reference `json:"s3BucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate s3BucketArn. + // +kubebuilder:validation:Optional + S3BucketArnSelector *v1.Selector `json:"s3BucketArnSelector,omitempty" tf:"-"` + + // Configuration block containing information for connecting to S3. + // +kubebuilder:validation:Optional + S3Config *S3ConfigParameters `json:"s3Config,omitempty" tf:"s3_config,omitempty"` + + // The Amazon S3 storage class that you want to store your files in when this location is used as a task destination. Valid values + // +kubebuilder:validation:Optional + S3StorageClass *string `json:"s3StorageClass,omitempty" tf:"s3_storage_class,omitempty"` + + // Prefix to perform actions as source or destination. + // +kubebuilder:validation:Optional + Subdirectory *string `json:"subdirectory,omitempty" tf:"subdirectory,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type S3ConfigInitParameters struct { + + // ARN of the IAM Role used to connect to the S3 Bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + BucketAccessRoleArn *string `json:"bucketAccessRoleArn,omitempty" tf:"bucket_access_role_arn,omitempty"` + + // Reference to a Role in iam to populate bucketAccessRoleArn. + // +kubebuilder:validation:Optional + BucketAccessRoleArnRef *v1.Reference `json:"bucketAccessRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate bucketAccessRoleArn. + // +kubebuilder:validation:Optional + BucketAccessRoleArnSelector *v1.Selector `json:"bucketAccessRoleArnSelector,omitempty" tf:"-"` +} + +type S3ConfigObservation struct { + + // ARN of the IAM Role used to connect to the S3 Bucket. + BucketAccessRoleArn *string `json:"bucketAccessRoleArn,omitempty" tf:"bucket_access_role_arn,omitempty"` +} + +type S3ConfigParameters struct { + + // ARN of the IAM Role used to connect to the S3 Bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + BucketAccessRoleArn *string `json:"bucketAccessRoleArn,omitempty" tf:"bucket_access_role_arn,omitempty"` + + // Reference to a Role in iam to populate bucketAccessRoleArn. + // +kubebuilder:validation:Optional + BucketAccessRoleArnRef *v1.Reference `json:"bucketAccessRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate bucketAccessRoleArn. + // +kubebuilder:validation:Optional + BucketAccessRoleArnSelector *v1.Selector `json:"bucketAccessRoleArnSelector,omitempty" tf:"-"` +} + +// LocationS3Spec defines the desired state of LocationS3 +type LocationS3Spec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LocationS3Parameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LocationS3InitParameters `json:"initProvider,omitempty"` +} + +// LocationS3Status defines the observed state of LocationS3. +type LocationS3Status struct { + v1.ResourceStatus `json:",inline"` + AtProvider LocationS3Observation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LocationS3 is the Schema for the LocationS3s API. Manages an AWS DataSync S3 Location +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type LocationS3 struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.s3Config) || (has(self.initProvider) && has(self.initProvider.s3Config))",message="spec.forProvider.s3Config is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.subdirectory) || (has(self.initProvider) && has(self.initProvider.subdirectory))",message="spec.forProvider.subdirectory is a required parameter" + Spec LocationS3Spec `json:"spec"` + Status LocationS3Status `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LocationS3List contains a list of LocationS3s +type LocationS3List struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LocationS3 `json:"items"` +} + +// Repository type metadata. +var ( + LocationS3_Kind = "LocationS3" + LocationS3_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LocationS3_Kind}.String() + LocationS3_KindAPIVersion = LocationS3_Kind + "." + CRDGroupVersion.String() + LocationS3_GroupVersionKind = CRDGroupVersion.WithKind(LocationS3_Kind) +) + +func init() { + SchemeBuilder.Register(&LocationS3{}, &LocationS3List{}) +} diff --git a/apis/datasync/v1beta2/zz_task_terraformed.go b/apis/datasync/v1beta2/zz_task_terraformed.go new file mode 100755 index 0000000000..7d9ce0a618 --- /dev/null +++ b/apis/datasync/v1beta2/zz_task_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Task +func (mg *Task) GetTerraformResourceType() string { + return "aws_datasync_task" +} + +// GetConnectionDetailsMapping for this Task +func (tr *Task) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Task +func (tr *Task) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Task +func (tr *Task) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Task +func (tr *Task) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Task +func (tr *Task) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Task +func (tr *Task) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Task +func (tr *Task) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Task +func (tr *Task) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Task using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Task) LateInitialize(attrs []byte) (bool, error) { + params := &TaskParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Task) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datasync/v1beta2/zz_task_types.go b/apis/datasync/v1beta2/zz_task_types.go new file mode 100755 index 0000000000..ae04213783 --- /dev/null +++ b/apis/datasync/v1beta2/zz_task_types.go @@ -0,0 +1,640 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ExcludesInitParameters struct { + + // The type of filter rule to apply. Valid values: SIMPLE_PATTERN. + FilterType *string `json:"filterType,omitempty" tf:"filter_type,omitempty"` + + // A single filter string that consists of the patterns to exclude. The patterns are delimited by "|" (that is, a pipe), for example: /folder1|/folder2 + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ExcludesObservation struct { + + // The type of filter rule to apply. Valid values: SIMPLE_PATTERN. + FilterType *string `json:"filterType,omitempty" tf:"filter_type,omitempty"` + + // A single filter string that consists of the patterns to exclude. The patterns are delimited by "|" (that is, a pipe), for example: /folder1|/folder2 + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ExcludesParameters struct { + + // The type of filter rule to apply. Valid values: SIMPLE_PATTERN. + // +kubebuilder:validation:Optional + FilterType *string `json:"filterType,omitempty" tf:"filter_type,omitempty"` + + // A single filter string that consists of the patterns to exclude. The patterns are delimited by "|" (that is, a pipe), for example: /folder1|/folder2 + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type IncludesInitParameters struct { + + // The type of filter rule to apply. Valid values: SIMPLE_PATTERN. + FilterType *string `json:"filterType,omitempty" tf:"filter_type,omitempty"` + + // A single filter string that consists of the patterns to exclude. The patterns are delimited by "|" (that is, a pipe), for example: /folder1|/folder2 + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type IncludesObservation struct { + + // The type of filter rule to apply. Valid values: SIMPLE_PATTERN. + FilterType *string `json:"filterType,omitempty" tf:"filter_type,omitempty"` + + // A single filter string that consists of the patterns to exclude. The patterns are delimited by "|" (that is, a pipe), for example: /folder1|/folder2 + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type IncludesParameters struct { + + // The type of filter rule to apply. Valid values: SIMPLE_PATTERN. + // +kubebuilder:validation:Optional + FilterType *string `json:"filterType,omitempty" tf:"filter_type,omitempty"` + + // A single filter string that consists of the patterns to exclude. The patterns are delimited by "|" (that is, a pipe), for example: /folder1|/folder2 + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type OptionsInitParameters struct { + + // A file metadata that shows the last time a file was accessed (that is when the file was read or written to). If set to BEST_EFFORT, the DataSync Task attempts to preserve the original (that is, the version before sync PREPARING phase) atime attribute on all source files. Valid values: BEST_EFFORT, NONE. Default: BEST_EFFORT. + Atime *string `json:"atime,omitempty" tf:"atime,omitempty"` + + // Limits the bandwidth utilized. For example, to set a maximum of 1 MB, set this value to 1048576. Value values: -1 or greater. Default: -1 (unlimited). + BytesPerSecond *float64 `json:"bytesPerSecond,omitempty" tf:"bytes_per_second,omitempty"` + + // Group identifier of the file's owners. Valid values: BOTH, INT_VALUE, NAME, NONE. Default: INT_VALUE (preserve integer value of the ID). + GID *string `json:"gid,omitempty" tf:"gid,omitempty"` + + // Determines the type of logs that DataSync publishes to a log stream in the Amazon CloudWatch log group that you provide. Valid values: OFF, BASIC, TRANSFER. Default: OFF. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` + + // A file metadata that indicates the last time a file was modified (written to) before the sync PREPARING phase. Value values: NONE, PRESERVE. Default: PRESERVE. + Mtime *string `json:"mtime,omitempty" tf:"mtime,omitempty"` + + // Specifies whether object tags are maintained when transferring between object storage systems. If you want your DataSync task to ignore object tags, specify the NONE value. Valid values: PRESERVE, NONE. Default value: PRESERVE. + ObjectTags *string `json:"objectTags,omitempty" tf:"object_tags,omitempty"` + + // Determines whether files at the destination should be overwritten or preserved when copying files. Valid values: ALWAYS, NEVER. Default: ALWAYS. + OverwriteMode *string `json:"overwriteMode,omitempty" tf:"overwrite_mode,omitempty"` + + // Determines which users or groups can access a file for a specific purpose such as reading, writing, or execution of the file. Valid values: NONE, PRESERVE. Default: PRESERVE. + PosixPermissions *string `json:"posixPermissions,omitempty" tf:"posix_permissions,omitempty"` + + // Whether files deleted in the source should be removed or preserved in the destination file system. Valid values: PRESERVE, REMOVE. Default: PRESERVE. + PreserveDeletedFiles *string `json:"preserveDeletedFiles,omitempty" tf:"preserve_deleted_files,omitempty"` + + // Whether the DataSync Task should preserve the metadata of block and character devices in the source files system, and recreate the files with that device name and metadata on the destination. The DataSync Task can’t sync the actual contents of such devices, because many of the devices are non-terminal and don’t return an end of file (EOF) marker. Valid values: NONE, PRESERVE. Default: NONE (ignore special devices). + PreserveDevices *string `json:"preserveDevices,omitempty" tf:"preserve_devices,omitempty"` + + // Determines which components of the SMB security descriptor are copied from source to destination objects. This value is only used for transfers between SMB and Amazon FSx for Windows File Server locations, or between two Amazon FSx for Windows File Server locations. Valid values: NONE, OWNER_DACL, OWNER_DACL_SACL. Default: OWNER_DACL. + SecurityDescriptorCopyFlags *string `json:"securityDescriptorCopyFlags,omitempty" tf:"security_descriptor_copy_flags,omitempty"` + + // Determines whether tasks should be queued before executing the tasks. Valid values: ENABLED, DISABLED. Default ENABLED. + TaskQueueing *string `json:"taskQueueing,omitempty" tf:"task_queueing,omitempty"` + + // Determines whether DataSync transfers only the data and metadata that differ between the source and the destination location, or whether DataSync transfers all the content from the source, without comparing to the destination location. Valid values: CHANGED, ALL. Default: CHANGED + TransferMode *string `json:"transferMode,omitempty" tf:"transfer_mode,omitempty"` + + // User identifier of the file's owners. Valid values: BOTH, INT_VALUE, NAME, NONE. Default: INT_VALUE (preserve integer value of the ID). + UID *string `json:"uid,omitempty" tf:"uid,omitempty"` + + // Whether a data integrity verification should be performed at the end of a task execution after all data and metadata have been transferred. Valid values: NONE, POINT_IN_TIME_CONSISTENT, ONLY_FILES_TRANSFERRED. Default: POINT_IN_TIME_CONSISTENT. + VerifyMode *string `json:"verifyMode,omitempty" tf:"verify_mode,omitempty"` +} + +type OptionsObservation struct { + + // A file metadata that shows the last time a file was accessed (that is when the file was read or written to). If set to BEST_EFFORT, the DataSync Task attempts to preserve the original (that is, the version before sync PREPARING phase) atime attribute on all source files. Valid values: BEST_EFFORT, NONE. Default: BEST_EFFORT. + Atime *string `json:"atime,omitempty" tf:"atime,omitempty"` + + // Limits the bandwidth utilized. For example, to set a maximum of 1 MB, set this value to 1048576. Value values: -1 or greater. Default: -1 (unlimited). + BytesPerSecond *float64 `json:"bytesPerSecond,omitempty" tf:"bytes_per_second,omitempty"` + + // Group identifier of the file's owners. Valid values: BOTH, INT_VALUE, NAME, NONE. Default: INT_VALUE (preserve integer value of the ID). + GID *string `json:"gid,omitempty" tf:"gid,omitempty"` + + // Determines the type of logs that DataSync publishes to a log stream in the Amazon CloudWatch log group that you provide. Valid values: OFF, BASIC, TRANSFER. Default: OFF. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` + + // A file metadata that indicates the last time a file was modified (written to) before the sync PREPARING phase. Value values: NONE, PRESERVE. Default: PRESERVE. + Mtime *string `json:"mtime,omitempty" tf:"mtime,omitempty"` + + // Specifies whether object tags are maintained when transferring between object storage systems. If you want your DataSync task to ignore object tags, specify the NONE value. Valid values: PRESERVE, NONE. Default value: PRESERVE. + ObjectTags *string `json:"objectTags,omitempty" tf:"object_tags,omitempty"` + + // Determines whether files at the destination should be overwritten or preserved when copying files. Valid values: ALWAYS, NEVER. Default: ALWAYS. + OverwriteMode *string `json:"overwriteMode,omitempty" tf:"overwrite_mode,omitempty"` + + // Determines which users or groups can access a file for a specific purpose such as reading, writing, or execution of the file. Valid values: NONE, PRESERVE. Default: PRESERVE. + PosixPermissions *string `json:"posixPermissions,omitempty" tf:"posix_permissions,omitempty"` + + // Whether files deleted in the source should be removed or preserved in the destination file system. Valid values: PRESERVE, REMOVE. Default: PRESERVE. + PreserveDeletedFiles *string `json:"preserveDeletedFiles,omitempty" tf:"preserve_deleted_files,omitempty"` + + // Whether the DataSync Task should preserve the metadata of block and character devices in the source files system, and recreate the files with that device name and metadata on the destination. The DataSync Task can’t sync the actual contents of such devices, because many of the devices are non-terminal and don’t return an end of file (EOF) marker. Valid values: NONE, PRESERVE. Default: NONE (ignore special devices). + PreserveDevices *string `json:"preserveDevices,omitempty" tf:"preserve_devices,omitempty"` + + // Determines which components of the SMB security descriptor are copied from source to destination objects. This value is only used for transfers between SMB and Amazon FSx for Windows File Server locations, or between two Amazon FSx for Windows File Server locations. Valid values: NONE, OWNER_DACL, OWNER_DACL_SACL. Default: OWNER_DACL. + SecurityDescriptorCopyFlags *string `json:"securityDescriptorCopyFlags,omitempty" tf:"security_descriptor_copy_flags,omitempty"` + + // Determines whether tasks should be queued before executing the tasks. Valid values: ENABLED, DISABLED. Default ENABLED. + TaskQueueing *string `json:"taskQueueing,omitempty" tf:"task_queueing,omitempty"` + + // Determines whether DataSync transfers only the data and metadata that differ between the source and the destination location, or whether DataSync transfers all the content from the source, without comparing to the destination location. Valid values: CHANGED, ALL. Default: CHANGED + TransferMode *string `json:"transferMode,omitempty" tf:"transfer_mode,omitempty"` + + // User identifier of the file's owners. Valid values: BOTH, INT_VALUE, NAME, NONE. Default: INT_VALUE (preserve integer value of the ID). + UID *string `json:"uid,omitempty" tf:"uid,omitempty"` + + // Whether a data integrity verification should be performed at the end of a task execution after all data and metadata have been transferred. Valid values: NONE, POINT_IN_TIME_CONSISTENT, ONLY_FILES_TRANSFERRED. Default: POINT_IN_TIME_CONSISTENT. + VerifyMode *string `json:"verifyMode,omitempty" tf:"verify_mode,omitempty"` +} + +type OptionsParameters struct { + + // A file metadata that shows the last time a file was accessed (that is when the file was read or written to). If set to BEST_EFFORT, the DataSync Task attempts to preserve the original (that is, the version before sync PREPARING phase) atime attribute on all source files. Valid values: BEST_EFFORT, NONE. Default: BEST_EFFORT. + // +kubebuilder:validation:Optional + Atime *string `json:"atime,omitempty" tf:"atime,omitempty"` + + // Limits the bandwidth utilized. For example, to set a maximum of 1 MB, set this value to 1048576. Value values: -1 or greater. Default: -1 (unlimited). + // +kubebuilder:validation:Optional + BytesPerSecond *float64 `json:"bytesPerSecond,omitempty" tf:"bytes_per_second,omitempty"` + + // Group identifier of the file's owners. Valid values: BOTH, INT_VALUE, NAME, NONE. Default: INT_VALUE (preserve integer value of the ID). + // +kubebuilder:validation:Optional + GID *string `json:"gid,omitempty" tf:"gid,omitempty"` + + // Determines the type of logs that DataSync publishes to a log stream in the Amazon CloudWatch log group that you provide. Valid values: OFF, BASIC, TRANSFER. Default: OFF. + // +kubebuilder:validation:Optional + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` + + // A file metadata that indicates the last time a file was modified (written to) before the sync PREPARING phase. Value values: NONE, PRESERVE. Default: PRESERVE. + // +kubebuilder:validation:Optional + Mtime *string `json:"mtime,omitempty" tf:"mtime,omitempty"` + + // Specifies whether object tags are maintained when transferring between object storage systems. If you want your DataSync task to ignore object tags, specify the NONE value. Valid values: PRESERVE, NONE. Default value: PRESERVE. + // +kubebuilder:validation:Optional + ObjectTags *string `json:"objectTags,omitempty" tf:"object_tags,omitempty"` + + // Determines whether files at the destination should be overwritten or preserved when copying files. Valid values: ALWAYS, NEVER. Default: ALWAYS. + // +kubebuilder:validation:Optional + OverwriteMode *string `json:"overwriteMode,omitempty" tf:"overwrite_mode,omitempty"` + + // Determines which users or groups can access a file for a specific purpose such as reading, writing, or execution of the file. Valid values: NONE, PRESERVE. Default: PRESERVE. + // +kubebuilder:validation:Optional + PosixPermissions *string `json:"posixPermissions,omitempty" tf:"posix_permissions,omitempty"` + + // Whether files deleted in the source should be removed or preserved in the destination file system. Valid values: PRESERVE, REMOVE. Default: PRESERVE. + // +kubebuilder:validation:Optional + PreserveDeletedFiles *string `json:"preserveDeletedFiles,omitempty" tf:"preserve_deleted_files,omitempty"` + + // Whether the DataSync Task should preserve the metadata of block and character devices in the source files system, and recreate the files with that device name and metadata on the destination. The DataSync Task can’t sync the actual contents of such devices, because many of the devices are non-terminal and don’t return an end of file (EOF) marker. Valid values: NONE, PRESERVE. Default: NONE (ignore special devices). + // +kubebuilder:validation:Optional + PreserveDevices *string `json:"preserveDevices,omitempty" tf:"preserve_devices,omitempty"` + + // Determines which components of the SMB security descriptor are copied from source to destination objects. This value is only used for transfers between SMB and Amazon FSx for Windows File Server locations, or between two Amazon FSx for Windows File Server locations. Valid values: NONE, OWNER_DACL, OWNER_DACL_SACL. Default: OWNER_DACL. + // +kubebuilder:validation:Optional + SecurityDescriptorCopyFlags *string `json:"securityDescriptorCopyFlags,omitempty" tf:"security_descriptor_copy_flags,omitempty"` + + // Determines whether tasks should be queued before executing the tasks. Valid values: ENABLED, DISABLED. Default ENABLED. + // +kubebuilder:validation:Optional + TaskQueueing *string `json:"taskQueueing,omitempty" tf:"task_queueing,omitempty"` + + // Determines whether DataSync transfers only the data and metadata that differ between the source and the destination location, or whether DataSync transfers all the content from the source, without comparing to the destination location. Valid values: CHANGED, ALL. Default: CHANGED + // +kubebuilder:validation:Optional + TransferMode *string `json:"transferMode,omitempty" tf:"transfer_mode,omitempty"` + + // User identifier of the file's owners. Valid values: BOTH, INT_VALUE, NAME, NONE. Default: INT_VALUE (preserve integer value of the ID). + // +kubebuilder:validation:Optional + UID *string `json:"uid,omitempty" tf:"uid,omitempty"` + + // Whether a data integrity verification should be performed at the end of a task execution after all data and metadata have been transferred. Valid values: NONE, POINT_IN_TIME_CONSISTENT, ONLY_FILES_TRANSFERRED. Default: POINT_IN_TIME_CONSISTENT. + // +kubebuilder:validation:Optional + VerifyMode *string `json:"verifyMode,omitempty" tf:"verify_mode,omitempty"` +} + +type ReportOverridesInitParameters struct { + + // Specifies the level of reporting for the files, objects, and directories that DataSync attempted to delete in your destination location. This only applies if you configure your task to delete data in the destination that isn't in the source. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS. + DeletedOverride *string `json:"deletedOverride,omitempty" tf:"deleted_override,omitempty"` + + // Specifies the level of reporting for the files, objects, and directories that DataSync attempted to skip during your transfer. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS. + SkippedOverride *string `json:"skippedOverride,omitempty" tf:"skipped_override,omitempty"` + + // Specifies the level of reporting for the files, objects, and directories that DataSync attempted to transfer. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS. + TransferredOverride *string `json:"transferredOverride,omitempty" tf:"transferred_override,omitempty"` + + // Specifies the level of reporting for the files, objects, and directories that DataSync attempted to verify at the end of your transfer. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS. + VerifiedOverride *string `json:"verifiedOverride,omitempty" tf:"verified_override,omitempty"` +} + +type ReportOverridesObservation struct { + + // Specifies the level of reporting for the files, objects, and directories that DataSync attempted to delete in your destination location. This only applies if you configure your task to delete data in the destination that isn't in the source. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS. + DeletedOverride *string `json:"deletedOverride,omitempty" tf:"deleted_override,omitempty"` + + // Specifies the level of reporting for the files, objects, and directories that DataSync attempted to skip during your transfer. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS. + SkippedOverride *string `json:"skippedOverride,omitempty" tf:"skipped_override,omitempty"` + + // Specifies the level of reporting for the files, objects, and directories that DataSync attempted to transfer. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS. + TransferredOverride *string `json:"transferredOverride,omitempty" tf:"transferred_override,omitempty"` + + // Specifies the level of reporting for the files, objects, and directories that DataSync attempted to verify at the end of your transfer. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS. + VerifiedOverride *string `json:"verifiedOverride,omitempty" tf:"verified_override,omitempty"` +} + +type ReportOverridesParameters struct { + + // Specifies the level of reporting for the files, objects, and directories that DataSync attempted to delete in your destination location. This only applies if you configure your task to delete data in the destination that isn't in the source. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS. + // +kubebuilder:validation:Optional + DeletedOverride *string `json:"deletedOverride,omitempty" tf:"deleted_override,omitempty"` + + // Specifies the level of reporting for the files, objects, and directories that DataSync attempted to skip during your transfer. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS. + // +kubebuilder:validation:Optional + SkippedOverride *string `json:"skippedOverride,omitempty" tf:"skipped_override,omitempty"` + + // Specifies the level of reporting for the files, objects, and directories that DataSync attempted to transfer. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS. + // +kubebuilder:validation:Optional + TransferredOverride *string `json:"transferredOverride,omitempty" tf:"transferred_override,omitempty"` + + // Specifies the level of reporting for the files, objects, and directories that DataSync attempted to verify at the end of your transfer. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS. + // +kubebuilder:validation:Optional + VerifiedOverride *string `json:"verifiedOverride,omitempty" tf:"verified_override,omitempty"` +} + +type S3DestinationInitParameters struct { + + // Specifies the Amazon Resource Name (ARN) of the IAM policy that allows DataSync to upload a task report to your S3 bucket. + BucketAccessRoleArn *string `json:"bucketAccessRoleArn,omitempty" tf:"bucket_access_role_arn,omitempty"` + + // Specifies the ARN of the S3 bucket where DataSync uploads your report. + S3BucketArn *string `json:"s3BucketArn,omitempty" tf:"s3_bucket_arn,omitempty"` + + // Specifies a bucket prefix for your report. + Subdirectory *string `json:"subdirectory,omitempty" tf:"subdirectory,omitempty"` +} + +type S3DestinationObservation struct { + + // Specifies the Amazon Resource Name (ARN) of the IAM policy that allows DataSync to upload a task report to your S3 bucket. + BucketAccessRoleArn *string `json:"bucketAccessRoleArn,omitempty" tf:"bucket_access_role_arn,omitempty"` + + // Specifies the ARN of the S3 bucket where DataSync uploads your report. + S3BucketArn *string `json:"s3BucketArn,omitempty" tf:"s3_bucket_arn,omitempty"` + + // Specifies a bucket prefix for your report. + Subdirectory *string `json:"subdirectory,omitempty" tf:"subdirectory,omitempty"` +} + +type S3DestinationParameters struct { + + // Specifies the Amazon Resource Name (ARN) of the IAM policy that allows DataSync to upload a task report to your S3 bucket. + // +kubebuilder:validation:Optional + BucketAccessRoleArn *string `json:"bucketAccessRoleArn" tf:"bucket_access_role_arn,omitempty"` + + // Specifies the ARN of the S3 bucket where DataSync uploads your report. + // +kubebuilder:validation:Optional + S3BucketArn *string `json:"s3BucketArn" tf:"s3_bucket_arn,omitempty"` + + // Specifies a bucket prefix for your report. + // +kubebuilder:validation:Optional + Subdirectory *string `json:"subdirectory,omitempty" tf:"subdirectory,omitempty"` +} + +type ScheduleInitParameters struct { + + // Specifies the schedule you want your task to use for repeated executions. For more information, see Schedule Expressions for Rules. + ScheduleExpression *string `json:"scheduleExpression,omitempty" tf:"schedule_expression,omitempty"` +} + +type ScheduleObservation struct { + + // Specifies the schedule you want your task to use for repeated executions. For more information, see Schedule Expressions for Rules. + ScheduleExpression *string `json:"scheduleExpression,omitempty" tf:"schedule_expression,omitempty"` +} + +type ScheduleParameters struct { + + // Specifies the schedule you want your task to use for repeated executions. For more information, see Schedule Expressions for Rules. + // +kubebuilder:validation:Optional + ScheduleExpression *string `json:"scheduleExpression" tf:"schedule_expression,omitempty"` +} + +type TaskInitParameters struct { + + // Amazon Resource Name (ARN) of the CloudWatch Log Group that is used to monitor and log events in the sync task. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Group + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + CloudwatchLogGroupArn *string `json:"cloudwatchLogGroupArn,omitempty" tf:"cloudwatch_log_group_arn,omitempty"` + + // Reference to a Group in cloudwatchlogs to populate cloudwatchLogGroupArn. + // +kubebuilder:validation:Optional + CloudwatchLogGroupArnRef *v1.Reference `json:"cloudwatchLogGroupArnRef,omitempty" tf:"-"` + + // Selector for a Group in cloudwatchlogs to populate cloudwatchLogGroupArn. + // +kubebuilder:validation:Optional + CloudwatchLogGroupArnSelector *v1.Selector `json:"cloudwatchLogGroupArnSelector,omitempty" tf:"-"` + + // Amazon Resource Name (ARN) of destination DataSync Location. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/datasync/v1beta2.LocationS3 + DestinationLocationArn *string `json:"destinationLocationArn,omitempty" tf:"destination_location_arn,omitempty"` + + // Reference to a LocationS3 in datasync to populate destinationLocationArn. + // +kubebuilder:validation:Optional + DestinationLocationArnRef *v1.Reference `json:"destinationLocationArnRef,omitempty" tf:"-"` + + // Selector for a LocationS3 in datasync to populate destinationLocationArn. + // +kubebuilder:validation:Optional + DestinationLocationArnSelector *v1.Selector `json:"destinationLocationArnSelector,omitempty" tf:"-"` + + // Filter rules that determines which files to exclude from a task. + Excludes *ExcludesInitParameters `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // Filter rules that determines which files to include in a task. + Includes *IncludesInitParameters `json:"includes,omitempty" tf:"includes,omitempty"` + + // Name of the DataSync Task. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration block containing option that controls the default behavior when you start an execution of this DataSync Task. For each individual task execution, you can override these options by specifying an overriding configuration in those executions. + Options *OptionsInitParameters `json:"options,omitempty" tf:"options,omitempty"` + + // Specifies a schedule used to periodically transfer files from a source to a destination location. + Schedule *ScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // Amazon Resource Name (ARN) of source DataSync Location. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/datasync/v1beta2.LocationS3 + SourceLocationArn *string `json:"sourceLocationArn,omitempty" tf:"source_location_arn,omitempty"` + + // Reference to a LocationS3 in datasync to populate sourceLocationArn. + // +kubebuilder:validation:Optional + SourceLocationArnRef *v1.Reference `json:"sourceLocationArnRef,omitempty" tf:"-"` + + // Selector for a LocationS3 in datasync to populate sourceLocationArn. + // +kubebuilder:validation:Optional + SourceLocationArnSelector *v1.Selector `json:"sourceLocationArnSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block containing the configuration of a DataSync Task Report. See task_report_config below. + TaskReportConfig *TaskReportConfigInitParameters `json:"taskReportConfig,omitempty" tf:"task_report_config,omitempty"` +} + +type TaskObservation struct { + + // Amazon Resource Name (ARN) of the DataSync Task. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Amazon Resource Name (ARN) of the CloudWatch Log Group that is used to monitor and log events in the sync task. + CloudwatchLogGroupArn *string `json:"cloudwatchLogGroupArn,omitempty" tf:"cloudwatch_log_group_arn,omitempty"` + + // Amazon Resource Name (ARN) of destination DataSync Location. + DestinationLocationArn *string `json:"destinationLocationArn,omitempty" tf:"destination_location_arn,omitempty"` + + // Filter rules that determines which files to exclude from a task. + Excludes *ExcludesObservation `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // Amazon Resource Name (ARN) of the DataSync Task. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Filter rules that determines which files to include in a task. + Includes *IncludesObservation `json:"includes,omitempty" tf:"includes,omitempty"` + + // Name of the DataSync Task. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration block containing option that controls the default behavior when you start an execution of this DataSync Task. For each individual task execution, you can override these options by specifying an overriding configuration in those executions. + Options *OptionsObservation `json:"options,omitempty" tf:"options,omitempty"` + + // Specifies a schedule used to periodically transfer files from a source to a destination location. + Schedule *ScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // Amazon Resource Name (ARN) of source DataSync Location. + SourceLocationArn *string `json:"sourceLocationArn,omitempty" tf:"source_location_arn,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Configuration block containing the configuration of a DataSync Task Report. See task_report_config below. + TaskReportConfig *TaskReportConfigObservation `json:"taskReportConfig,omitempty" tf:"task_report_config,omitempty"` +} + +type TaskParameters struct { + + // Amazon Resource Name (ARN) of the CloudWatch Log Group that is used to monitor and log events in the sync task. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Group + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + CloudwatchLogGroupArn *string `json:"cloudwatchLogGroupArn,omitempty" tf:"cloudwatch_log_group_arn,omitempty"` + + // Reference to a Group in cloudwatchlogs to populate cloudwatchLogGroupArn. + // +kubebuilder:validation:Optional + CloudwatchLogGroupArnRef *v1.Reference `json:"cloudwatchLogGroupArnRef,omitempty" tf:"-"` + + // Selector for a Group in cloudwatchlogs to populate cloudwatchLogGroupArn. + // +kubebuilder:validation:Optional + CloudwatchLogGroupArnSelector *v1.Selector `json:"cloudwatchLogGroupArnSelector,omitempty" tf:"-"` + + // Amazon Resource Name (ARN) of destination DataSync Location. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/datasync/v1beta2.LocationS3 + // +kubebuilder:validation:Optional + DestinationLocationArn *string `json:"destinationLocationArn,omitempty" tf:"destination_location_arn,omitempty"` + + // Reference to a LocationS3 in datasync to populate destinationLocationArn. + // +kubebuilder:validation:Optional + DestinationLocationArnRef *v1.Reference `json:"destinationLocationArnRef,omitempty" tf:"-"` + + // Selector for a LocationS3 in datasync to populate destinationLocationArn. + // +kubebuilder:validation:Optional + DestinationLocationArnSelector *v1.Selector `json:"destinationLocationArnSelector,omitempty" tf:"-"` + + // Filter rules that determines which files to exclude from a task. + // +kubebuilder:validation:Optional + Excludes *ExcludesParameters `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // Filter rules that determines which files to include in a task. + // +kubebuilder:validation:Optional + Includes *IncludesParameters `json:"includes,omitempty" tf:"includes,omitempty"` + + // Name of the DataSync Task. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration block containing option that controls the default behavior when you start an execution of this DataSync Task. For each individual task execution, you can override these options by specifying an overriding configuration in those executions. + // +kubebuilder:validation:Optional + Options *OptionsParameters `json:"options,omitempty" tf:"options,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Specifies a schedule used to periodically transfer files from a source to a destination location. + // +kubebuilder:validation:Optional + Schedule *ScheduleParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // Amazon Resource Name (ARN) of source DataSync Location. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/datasync/v1beta2.LocationS3 + // +kubebuilder:validation:Optional + SourceLocationArn *string `json:"sourceLocationArn,omitempty" tf:"source_location_arn,omitempty"` + + // Reference to a LocationS3 in datasync to populate sourceLocationArn. + // +kubebuilder:validation:Optional + SourceLocationArnRef *v1.Reference `json:"sourceLocationArnRef,omitempty" tf:"-"` + + // Selector for a LocationS3 in datasync to populate sourceLocationArn. + // +kubebuilder:validation:Optional + SourceLocationArnSelector *v1.Selector `json:"sourceLocationArnSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block containing the configuration of a DataSync Task Report. See task_report_config below. + // +kubebuilder:validation:Optional + TaskReportConfig *TaskReportConfigParameters `json:"taskReportConfig,omitempty" tf:"task_report_config,omitempty"` +} + +type TaskReportConfigInitParameters struct { + + // Specifies the type of task report you'd like. Valid values: SUMMARY_ONLY and STANDARD. + OutputType *string `json:"outputType,omitempty" tf:"output_type,omitempty"` + + // Specifies whether you want your task report to include only what went wrong with your transfer or a list of what succeeded and didn't. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS. + ReportLevel *string `json:"reportLevel,omitempty" tf:"report_level,omitempty"` + + // Configuration block containing the configuration of the reporting level for aspects of your task report. See report_overrides below. + ReportOverrides *ReportOverridesInitParameters `json:"reportOverrides,omitempty" tf:"report_overrides,omitempty"` + + // Configuration block containing the configuration for the Amazon S3 bucket where DataSync uploads your task report. See s3_destination below. + S3Destination *S3DestinationInitParameters `json:"s3Destination,omitempty" tf:"s3_destination,omitempty"` + + // Specifies whether your task report includes the new version of each object transferred into an S3 bucket. This only applies if you enable versioning on your bucket. Keep in mind that setting this to INCLUDE can increase the duration of your task execution. Valid values: INCLUDE and NONE. + S3ObjectVersioning *string `json:"s3ObjectVersioning,omitempty" tf:"s3_object_versioning,omitempty"` +} + +type TaskReportConfigObservation struct { + + // Specifies the type of task report you'd like. Valid values: SUMMARY_ONLY and STANDARD. + OutputType *string `json:"outputType,omitempty" tf:"output_type,omitempty"` + + // Specifies whether you want your task report to include only what went wrong with your transfer or a list of what succeeded and didn't. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS. + ReportLevel *string `json:"reportLevel,omitempty" tf:"report_level,omitempty"` + + // Configuration block containing the configuration of the reporting level for aspects of your task report. See report_overrides below. + ReportOverrides *ReportOverridesObservation `json:"reportOverrides,omitempty" tf:"report_overrides,omitempty"` + + // Configuration block containing the configuration for the Amazon S3 bucket where DataSync uploads your task report. See s3_destination below. + S3Destination *S3DestinationObservation `json:"s3Destination,omitempty" tf:"s3_destination,omitempty"` + + // Specifies whether your task report includes the new version of each object transferred into an S3 bucket. This only applies if you enable versioning on your bucket. Keep in mind that setting this to INCLUDE can increase the duration of your task execution. Valid values: INCLUDE and NONE. + S3ObjectVersioning *string `json:"s3ObjectVersioning,omitempty" tf:"s3_object_versioning,omitempty"` +} + +type TaskReportConfigParameters struct { + + // Specifies the type of task report you'd like. Valid values: SUMMARY_ONLY and STANDARD. + // +kubebuilder:validation:Optional + OutputType *string `json:"outputType,omitempty" tf:"output_type,omitempty"` + + // Specifies whether you want your task report to include only what went wrong with your transfer or a list of what succeeded and didn't. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS. + // +kubebuilder:validation:Optional + ReportLevel *string `json:"reportLevel,omitempty" tf:"report_level,omitempty"` + + // Configuration block containing the configuration of the reporting level for aspects of your task report. See report_overrides below. + // +kubebuilder:validation:Optional + ReportOverrides *ReportOverridesParameters `json:"reportOverrides,omitempty" tf:"report_overrides,omitempty"` + + // Configuration block containing the configuration for the Amazon S3 bucket where DataSync uploads your task report. See s3_destination below. + // +kubebuilder:validation:Optional + S3Destination *S3DestinationParameters `json:"s3Destination" tf:"s3_destination,omitempty"` + + // Specifies whether your task report includes the new version of each object transferred into an S3 bucket. This only applies if you enable versioning on your bucket. Keep in mind that setting this to INCLUDE can increase the duration of your task execution. Valid values: INCLUDE and NONE. + // +kubebuilder:validation:Optional + S3ObjectVersioning *string `json:"s3ObjectVersioning,omitempty" tf:"s3_object_versioning,omitempty"` +} + +// TaskSpec defines the desired state of Task +type TaskSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TaskParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TaskInitParameters `json:"initProvider,omitempty"` +} + +// TaskStatus defines the observed state of Task. +type TaskStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TaskObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Task is the Schema for the Tasks API. Manages an AWS DataSync Task +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Task struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec TaskSpec `json:"spec"` + Status TaskStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TaskList contains a list of Tasks +type TaskList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Task `json:"items"` +} + +// Repository type metadata. +var ( + Task_Kind = "Task" + Task_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Task_Kind}.String() + Task_KindAPIVersion = Task_Kind + "." + CRDGroupVersion.String() + Task_GroupVersionKind = CRDGroupVersion.WithKind(Task_Kind) +) + +func init() { + SchemeBuilder.Register(&Task{}, &TaskList{}) +} diff --git a/apis/dax/v1beta1/zz_generated.conversion_hubs.go b/apis/dax/v1beta1/zz_generated.conversion_hubs.go index 185600ddff..d9598eb4f4 100755 --- a/apis/dax/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/dax/v1beta1/zz_generated.conversion_hubs.go @@ -6,9 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Cluster) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ParameterGroup) Hub() {} diff --git a/apis/dax/v1beta1/zz_generated.conversion_spokes.go b/apis/dax/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..49d5c675ce --- /dev/null +++ b/apis/dax/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Cluster to the hub type. +func (tr *Cluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Cluster type. +func (tr *Cluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/dax/v1beta2/zz_cluster_terraformed.go b/apis/dax/v1beta2/zz_cluster_terraformed.go new file mode 100755 index 0000000000..cf9b20c362 --- /dev/null +++ b/apis/dax/v1beta2/zz_cluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Cluster +func (mg *Cluster) GetTerraformResourceType() string { + return "aws_dax_cluster" +} + +// GetConnectionDetailsMapping for this Cluster +func (tr *Cluster) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Cluster +func (tr *Cluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Cluster +func (tr *Cluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Cluster +func (tr *Cluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Cluster +func (tr *Cluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Cluster +func (tr *Cluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Cluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Cluster) LateInitialize(attrs []byte) (bool, error) { + params := &ClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Cluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/dax/v1beta2/zz_cluster_types.go b/apis/dax/v1beta2/zz_cluster_types.go new file mode 100755 index 0000000000..a07d0fe423 --- /dev/null +++ b/apis/dax/v1beta2/zz_cluster_types.go @@ -0,0 +1,367 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ClusterInitParameters struct { + + // List of Availability Zones in which the + // nodes will be created + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // – The type of encryption the + // cluster's endpoint should support. Valid values are: NONE and TLS. + // Default value is NONE. + ClusterEndpointEncryptionType *string `json:"clusterEndpointEncryptionType,omitempty" tf:"cluster_endpoint_encryption_type,omitempty"` + + // – Description for the cluster + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A valid Amazon Resource Name (ARN) that identifies + // an IAM role. At runtime, DAX will assume this role and use the role's + // permissions to access DynamoDB on your behalf + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + IAMRoleArn *string `json:"iamRoleArn,omitempty" tf:"iam_role_arn,omitempty"` + + // Reference to a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnRef *v1.Reference `json:"iamRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnSelector *v1.Selector `json:"iamRoleArnSelector,omitempty" tf:"-"` + + // ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Example: + // sun:05:00-sun:09:00 + MaintenanceWindow *string `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // – The compute and memory capacity of the nodes. See + // Nodes for supported node types + NodeType *string `json:"nodeType,omitempty" tf:"node_type,omitempty"` + + // east-1:012345678999:my_sns_topic + NotificationTopicArn *string `json:"notificationTopicArn,omitempty" tf:"notification_topic_arn,omitempty"` + + // – Name of the parameter group to associate + // with this DAX cluster + ParameterGroupName *string `json:"parameterGroupName,omitempty" tf:"parameter_group_name,omitempty"` + + // node cluster, without any read + // replicas + ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // – One or more VPC security groups associated + // with the cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // Encrypt at rest options + ServerSideEncryption *ServerSideEncryptionInitParameters `json:"serverSideEncryption,omitempty" tf:"server_side_encryption,omitempty"` + + // – Name of the subnet group to be used for the + // cluster + SubnetGroupName *string `json:"subnetGroupName,omitempty" tf:"subnet_group_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ClusterObservation struct { + + // The ARN of the DAX cluster + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // List of Availability Zones in which the + // nodes will be created + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // The DNS name of the DAX cluster without the port appended + ClusterAddress *string `json:"clusterAddress,omitempty" tf:"cluster_address,omitempty"` + + // – The type of encryption the + // cluster's endpoint should support. Valid values are: NONE and TLS. + // Default value is NONE. + ClusterEndpointEncryptionType *string `json:"clusterEndpointEncryptionType,omitempty" tf:"cluster_endpoint_encryption_type,omitempty"` + + // The configuration endpoint for this DAX cluster, + // consisting of a DNS name and a port number + ConfigurationEndpoint *string `json:"configurationEndpoint,omitempty" tf:"configuration_endpoint,omitempty"` + + // – Description for the cluster + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A valid Amazon Resource Name (ARN) that identifies + // an IAM role. At runtime, DAX will assume this role and use the role's + // permissions to access DynamoDB on your behalf + IAMRoleArn *string `json:"iamRoleArn,omitempty" tf:"iam_role_arn,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Example: + // sun:05:00-sun:09:00 + MaintenanceWindow *string `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // – The compute and memory capacity of the nodes. See + // Nodes for supported node types + NodeType *string `json:"nodeType,omitempty" tf:"node_type,omitempty"` + + // List of node objects including id, address, port and + // availability_zone. Referenceable e.g., as + // ${aws_dax_cluster.test.nodes.0.address} + Nodes []NodesObservation `json:"nodes,omitempty" tf:"nodes,omitempty"` + + // east-1:012345678999:my_sns_topic + NotificationTopicArn *string `json:"notificationTopicArn,omitempty" tf:"notification_topic_arn,omitempty"` + + // – Name of the parameter group to associate + // with this DAX cluster + ParameterGroupName *string `json:"parameterGroupName,omitempty" tf:"parameter_group_name,omitempty"` + + // The port used by the configuration endpoint + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // node cluster, without any read + // replicas + ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` + + // – One or more VPC security groups associated + // with the cluster + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // Encrypt at rest options + ServerSideEncryption *ServerSideEncryptionObservation `json:"serverSideEncryption,omitempty" tf:"server_side_encryption,omitempty"` + + // – Name of the subnet group to be used for the + // cluster + SubnetGroupName *string `json:"subnetGroupName,omitempty" tf:"subnet_group_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type ClusterParameters struct { + + // List of Availability Zones in which the + // nodes will be created + // +kubebuilder:validation:Optional + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // – The type of encryption the + // cluster's endpoint should support. Valid values are: NONE and TLS. + // Default value is NONE. + // +kubebuilder:validation:Optional + ClusterEndpointEncryptionType *string `json:"clusterEndpointEncryptionType,omitempty" tf:"cluster_endpoint_encryption_type,omitempty"` + + // – Description for the cluster + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A valid Amazon Resource Name (ARN) that identifies + // an IAM role. At runtime, DAX will assume this role and use the role's + // permissions to access DynamoDB on your behalf + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + IAMRoleArn *string `json:"iamRoleArn,omitempty" tf:"iam_role_arn,omitempty"` + + // Reference to a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnRef *v1.Reference `json:"iamRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnSelector *v1.Selector `json:"iamRoleArnSelector,omitempty" tf:"-"` + + // ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Example: + // sun:05:00-sun:09:00 + // +kubebuilder:validation:Optional + MaintenanceWindow *string `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // – The compute and memory capacity of the nodes. See + // Nodes for supported node types + // +kubebuilder:validation:Optional + NodeType *string `json:"nodeType,omitempty" tf:"node_type,omitempty"` + + // east-1:012345678999:my_sns_topic + // +kubebuilder:validation:Optional + NotificationTopicArn *string `json:"notificationTopicArn,omitempty" tf:"notification_topic_arn,omitempty"` + + // – Name of the parameter group to associate + // with this DAX cluster + // +kubebuilder:validation:Optional + ParameterGroupName *string `json:"parameterGroupName,omitempty" tf:"parameter_group_name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // node cluster, without any read + // replicas + // +kubebuilder:validation:Optional + ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // – One or more VPC security groups associated + // with the cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // Encrypt at rest options + // +kubebuilder:validation:Optional + ServerSideEncryption *ServerSideEncryptionParameters `json:"serverSideEncryption,omitempty" tf:"server_side_encryption,omitempty"` + + // – Name of the subnet group to be used for the + // cluster + // +kubebuilder:validation:Optional + SubnetGroupName *string `json:"subnetGroupName,omitempty" tf:"subnet_group_name,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type NodesInitParameters struct { +} + +type NodesObservation struct { + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The port used by the configuration endpoint + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` +} + +type NodesParameters struct { +} + +type ServerSideEncryptionInitParameters struct { + + // Whether to enable encryption at rest. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ServerSideEncryptionObservation struct { + + // Whether to enable encryption at rest. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ServerSideEncryptionParameters struct { + + // Whether to enable encryption at rest. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +// ClusterSpec defines the desired state of Cluster +type ClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ClusterInitParameters `json:"initProvider,omitempty"` +} + +// ClusterStatus defines the observed state of Cluster. +type ClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Cluster is the Schema for the Clusters API. Provides an DAX Cluster resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.nodeType) || (has(self.initProvider) && has(self.initProvider.nodeType))",message="spec.forProvider.nodeType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.replicationFactor) || (has(self.initProvider) && has(self.initProvider.replicationFactor))",message="spec.forProvider.replicationFactor is a required parameter" + Spec ClusterSpec `json:"spec"` + Status ClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterList contains a list of Clusters +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +// Repository type metadata. +var ( + Cluster_Kind = "Cluster" + Cluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Cluster_Kind}.String() + Cluster_KindAPIVersion = Cluster_Kind + "." + CRDGroupVersion.String() + Cluster_GroupVersionKind = CRDGroupVersion.WithKind(Cluster_Kind) +) + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/apis/dax/v1beta2/zz_generated.conversion_hubs.go b/apis/dax/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..5640ab69b4 --- /dev/null +++ b/apis/dax/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Cluster) Hub() {} diff --git a/apis/dax/v1beta2/zz_generated.deepcopy.go b/apis/dax/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..6e45f58978 --- /dev/null +++ b/apis/dax/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,639 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { + *out = *in + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClusterEndpointEncryptionType != nil { + in, out := &in.ClusterEndpointEncryptionType, &out.ClusterEndpointEncryptionType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } + if in.IAMRoleArnRef != nil { + in, out := &in.IAMRoleArnRef, &out.IAMRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMRoleArnSelector != nil { + in, out := &in.IAMRoleArnSelector, &out.IAMRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(string) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = new(string) + **out = **in + } + if in.NotificationTopicArn != nil { + in, out := &in.NotificationTopicArn, &out.NotificationTopicArn + *out = new(string) + **out = **in + } + if in.ParameterGroupName != nil { + in, out := &in.ParameterGroupName, &out.ParameterGroupName + *out = new(string) + **out = **in + } + if in.ReplicationFactor != nil { + in, out := &in.ReplicationFactor, &out.ReplicationFactor + *out = new(float64) + **out = **in + } + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServerSideEncryption != nil { + in, out := &in.ServerSideEncryption, &out.ServerSideEncryption + *out = new(ServerSideEncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SubnetGroupName != nil { + in, out := &in.SubnetGroupName, &out.SubnetGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInitParameters. +func (in *ClusterInitParameters) DeepCopy() *ClusterInitParameters { + if in == nil { + return nil + } + out := new(ClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClusterAddress != nil { + in, out := &in.ClusterAddress, &out.ClusterAddress + *out = new(string) + **out = **in + } + if in.ClusterEndpointEncryptionType != nil { + in, out := &in.ClusterEndpointEncryptionType, &out.ClusterEndpointEncryptionType + *out = new(string) + **out = **in + } + if in.ConfigurationEndpoint != nil { + in, out := &in.ConfigurationEndpoint, &out.ConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(string) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = new(string) + **out = **in + } + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]NodesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotificationTopicArn != nil { + in, out := &in.NotificationTopicArn, &out.NotificationTopicArn + *out = new(string) + **out = **in + } + if in.ParameterGroupName != nil { + in, out := &in.ParameterGroupName, &out.ParameterGroupName + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.ReplicationFactor != nil { + in, out := &in.ReplicationFactor, &out.ReplicationFactor + *out = new(float64) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServerSideEncryption != nil { + in, out := &in.ServerSideEncryption, &out.ServerSideEncryption + *out = new(ServerSideEncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.SubnetGroupName != nil { + in, out := &in.SubnetGroupName, &out.SubnetGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservation. +func (in *ClusterObservation) DeepCopy() *ClusterObservation { + if in == nil { + return nil + } + out := new(ClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { + *out = *in + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClusterEndpointEncryptionType != nil { + in, out := &in.ClusterEndpointEncryptionType, &out.ClusterEndpointEncryptionType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } + if in.IAMRoleArnRef != nil { + in, out := &in.IAMRoleArnRef, &out.IAMRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMRoleArnSelector != nil { + in, out := &in.IAMRoleArnSelector, &out.IAMRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(string) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = new(string) + **out = **in + } + if in.NotificationTopicArn != nil { + in, out := &in.NotificationTopicArn, &out.NotificationTopicArn + *out = new(string) + **out = **in + } + if in.ParameterGroupName != nil { + in, out := &in.ParameterGroupName, &out.ParameterGroupName + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ReplicationFactor != nil { + in, out := &in.ReplicationFactor, &out.ReplicationFactor + *out = new(float64) + **out = **in + } + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServerSideEncryption != nil { + in, out := &in.ServerSideEncryption, &out.ServerSideEncryption + *out = new(ServerSideEncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.SubnetGroupName != nil { + in, out := &in.SubnetGroupName, &out.SubnetGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. +func (in *ClusterParameters) DeepCopy() *ClusterParameters { + if in == nil { + return nil + } + out := new(ClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodesInitParameters) DeepCopyInto(out *NodesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodesInitParameters. +func (in *NodesInitParameters) DeepCopy() *NodesInitParameters { + if in == nil { + return nil + } + out := new(NodesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodesObservation) DeepCopyInto(out *NodesObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodesObservation. +func (in *NodesObservation) DeepCopy() *NodesObservation { + if in == nil { + return nil + } + out := new(NodesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodesParameters) DeepCopyInto(out *NodesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodesParameters. +func (in *NodesParameters) DeepCopy() *NodesParameters { + if in == nil { + return nil + } + out := new(NodesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionInitParameters) DeepCopyInto(out *ServerSideEncryptionInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionInitParameters. +func (in *ServerSideEncryptionInitParameters) DeepCopy() *ServerSideEncryptionInitParameters { + if in == nil { + return nil + } + out := new(ServerSideEncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionObservation) DeepCopyInto(out *ServerSideEncryptionObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionObservation. +func (in *ServerSideEncryptionObservation) DeepCopy() *ServerSideEncryptionObservation { + if in == nil { + return nil + } + out := new(ServerSideEncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionParameters) DeepCopyInto(out *ServerSideEncryptionParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionParameters. +func (in *ServerSideEncryptionParameters) DeepCopy() *ServerSideEncryptionParameters { + if in == nil { + return nil + } + out := new(ServerSideEncryptionParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/dax/v1beta2/zz_generated.managed.go b/apis/dax/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..8a26829392 --- /dev/null +++ b/apis/dax/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Cluster. +func (mg *Cluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Cluster. +func (mg *Cluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Cluster. +func (mg *Cluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Cluster. +func (mg *Cluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Cluster. +func (mg *Cluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Cluster. +func (mg *Cluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Cluster. +func (mg *Cluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Cluster. +func (mg *Cluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/dax/v1beta2/zz_generated.managedlist.go b/apis/dax/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..a6628c8e29 --- /dev/null +++ b/apis/dax/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ClusterList. +func (l *ClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/dax/v1beta2/zz_generated.resolvers.go b/apis/dax/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..a6df80fd44 --- /dev/null +++ b/apis/dax/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,108 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Cluster. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Cluster) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IAMRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.IAMRoleArnRef, + Selector: mg.Spec.ForProvider.IAMRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IAMRoleArn") + } + mg.Spec.ForProvider.IAMRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IAMRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroupIds") + } + mg.Spec.ForProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IAMRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.IAMRoleArnRef, + Selector: mg.Spec.InitProvider.IAMRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IAMRoleArn") + } + mg.Spec.InitProvider.IAMRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IAMRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroupIds") + } + mg.Spec.InitProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupIDRefs = mrsp.ResolvedReferences + + return nil +} diff --git a/apis/dax/v1beta2/zz_groupversion_info.go b/apis/dax/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..1437103863 --- /dev/null +++ b/apis/dax/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=dax.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "dax.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/deploy/v1beta1/zz_generated.conversion_hubs.go b/apis/deploy/v1beta1/zz_generated.conversion_hubs.go index 4c56f14335..6b6c3ee1ee 100755 --- a/apis/deploy/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/deploy/v1beta1/zz_generated.conversion_hubs.go @@ -8,9 +8,3 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *App) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *DeploymentConfig) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *DeploymentGroup) Hub() {} diff --git a/apis/deploy/v1beta1/zz_generated.conversion_spokes.go b/apis/deploy/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..ddd45439dd --- /dev/null +++ b/apis/deploy/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this DeploymentConfig to the hub type. +func (tr *DeploymentConfig) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DeploymentConfig type. +func (tr *DeploymentConfig) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DeploymentGroup to the hub type. +func (tr *DeploymentGroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DeploymentGroup type. +func (tr *DeploymentGroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/deploy/v1beta2/zz_deploymentconfig_terraformed.go b/apis/deploy/v1beta2/zz_deploymentconfig_terraformed.go new file mode 100755 index 0000000000..8cb11fbb1f --- /dev/null +++ b/apis/deploy/v1beta2/zz_deploymentconfig_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DeploymentConfig +func (mg *DeploymentConfig) GetTerraformResourceType() string { + return "aws_codedeploy_deployment_config" +} + +// GetConnectionDetailsMapping for this DeploymentConfig +func (tr *DeploymentConfig) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DeploymentConfig +func (tr *DeploymentConfig) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DeploymentConfig +func (tr *DeploymentConfig) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DeploymentConfig +func (tr *DeploymentConfig) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DeploymentConfig +func (tr *DeploymentConfig) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DeploymentConfig +func (tr *DeploymentConfig) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DeploymentConfig +func (tr *DeploymentConfig) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DeploymentConfig +func (tr *DeploymentConfig) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DeploymentConfig using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DeploymentConfig) LateInitialize(attrs []byte) (bool, error) { + params := &DeploymentConfigParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DeploymentConfig) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/deploy/v1beta2/zz_deploymentconfig_types.go b/apis/deploy/v1beta2/zz_deploymentconfig_types.go new file mode 100755 index 0000000000..e430e8cb73 --- /dev/null +++ b/apis/deploy/v1beta2/zz_deploymentconfig_types.go @@ -0,0 +1,262 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DeploymentConfigInitParameters struct { + + // The compute platform can be Server, Lambda, or ECS. Default is Server. + ComputePlatform *string `json:"computePlatform,omitempty" tf:"compute_platform,omitempty"` + + // A minimum_healthy_hosts block. Required for Server compute platform. Minimum Healthy Hosts are documented below. + MinimumHealthyHosts *MinimumHealthyHostsInitParameters `json:"minimumHealthyHosts,omitempty" tf:"minimum_healthy_hosts,omitempty"` + + // A traffic_routing_config block. Traffic Routing Config is documented below. + TrafficRoutingConfig *TrafficRoutingConfigInitParameters `json:"trafficRoutingConfig,omitempty" tf:"traffic_routing_config,omitempty"` +} + +type DeploymentConfigObservation struct { + + // The ARN of the deployment config. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The compute platform can be Server, Lambda, or ECS. Default is Server. + ComputePlatform *string `json:"computePlatform,omitempty" tf:"compute_platform,omitempty"` + + // The AWS Assigned deployment config id + DeploymentConfigID *string `json:"deploymentConfigId,omitempty" tf:"deployment_config_id,omitempty"` + + // The deployment group's config name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A minimum_healthy_hosts block. Required for Server compute platform. Minimum Healthy Hosts are documented below. + MinimumHealthyHosts *MinimumHealthyHostsObservation `json:"minimumHealthyHosts,omitempty" tf:"minimum_healthy_hosts,omitempty"` + + // A traffic_routing_config block. Traffic Routing Config is documented below. + TrafficRoutingConfig *TrafficRoutingConfigObservation `json:"trafficRoutingConfig,omitempty" tf:"traffic_routing_config,omitempty"` +} + +type DeploymentConfigParameters struct { + + // The compute platform can be Server, Lambda, or ECS. Default is Server. + // +kubebuilder:validation:Optional + ComputePlatform *string `json:"computePlatform,omitempty" tf:"compute_platform,omitempty"` + + // A minimum_healthy_hosts block. Required for Server compute platform. Minimum Healthy Hosts are documented below. + // +kubebuilder:validation:Optional + MinimumHealthyHosts *MinimumHealthyHostsParameters `json:"minimumHealthyHosts,omitempty" tf:"minimum_healthy_hosts,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // A traffic_routing_config block. Traffic Routing Config is documented below. + // +kubebuilder:validation:Optional + TrafficRoutingConfig *TrafficRoutingConfigParameters `json:"trafficRoutingConfig,omitempty" tf:"traffic_routing_config,omitempty"` +} + +type MinimumHealthyHostsInitParameters struct { + + // The type can either be FLEET_PERCENT or HOST_COUNT. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The value when the type is FLEET_PERCENT represents the minimum number of healthy instances as + // a percentage of the total number of instances in the deployment. If you specify FLEET_PERCENT, at the start of the + // deployment, AWS CodeDeploy converts the percentage to the equivalent number of instance and rounds up fractional instances. + // When the type is HOST_COUNT, the value represents the minimum number of healthy instances as an absolute value. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type MinimumHealthyHostsObservation struct { + + // The type can either be FLEET_PERCENT or HOST_COUNT. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The value when the type is FLEET_PERCENT represents the minimum number of healthy instances as + // a percentage of the total number of instances in the deployment. If you specify FLEET_PERCENT, at the start of the + // deployment, AWS CodeDeploy converts the percentage to the equivalent number of instance and rounds up fractional instances. + // When the type is HOST_COUNT, the value represents the minimum number of healthy instances as an absolute value. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type MinimumHealthyHostsParameters struct { + + // The type can either be FLEET_PERCENT or HOST_COUNT. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The value when the type is FLEET_PERCENT represents the minimum number of healthy instances as + // a percentage of the total number of instances in the deployment. If you specify FLEET_PERCENT, at the start of the + // deployment, AWS CodeDeploy converts the percentage to the equivalent number of instance and rounds up fractional instances. + // When the type is HOST_COUNT, the value represents the minimum number of healthy instances as an absolute value. + // +kubebuilder:validation:Optional + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type TimeBasedCanaryInitParameters struct { + + // The number of minutes between the first and second traffic shifts of a TimeBasedCanary deployment. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The percentage of traffic to shift in the first increment of a TimeBasedCanary deployment. + Percentage *float64 `json:"percentage,omitempty" tf:"percentage,omitempty"` +} + +type TimeBasedCanaryObservation struct { + + // The number of minutes between the first and second traffic shifts of a TimeBasedCanary deployment. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The percentage of traffic to shift in the first increment of a TimeBasedCanary deployment. + Percentage *float64 `json:"percentage,omitempty" tf:"percentage,omitempty"` +} + +type TimeBasedCanaryParameters struct { + + // The number of minutes between the first and second traffic shifts of a TimeBasedCanary deployment. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The percentage of traffic to shift in the first increment of a TimeBasedCanary deployment. + // +kubebuilder:validation:Optional + Percentage *float64 `json:"percentage,omitempty" tf:"percentage,omitempty"` +} + +type TimeBasedLinearInitParameters struct { + + // The number of minutes between the first and second traffic shifts of a TimeBasedCanary deployment. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The percentage of traffic to shift in the first increment of a TimeBasedCanary deployment. + Percentage *float64 `json:"percentage,omitempty" tf:"percentage,omitempty"` +} + +type TimeBasedLinearObservation struct { + + // The number of minutes between the first and second traffic shifts of a TimeBasedCanary deployment. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The percentage of traffic to shift in the first increment of a TimeBasedCanary deployment. + Percentage *float64 `json:"percentage,omitempty" tf:"percentage,omitempty"` +} + +type TimeBasedLinearParameters struct { + + // The number of minutes between the first and second traffic shifts of a TimeBasedCanary deployment. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The percentage of traffic to shift in the first increment of a TimeBasedCanary deployment. + // +kubebuilder:validation:Optional + Percentage *float64 `json:"percentage,omitempty" tf:"percentage,omitempty"` +} + +type TrafficRoutingConfigInitParameters struct { + + // The time based canary configuration information. If type is TimeBasedLinear, use time_based_linear instead. + TimeBasedCanary *TimeBasedCanaryInitParameters `json:"timeBasedCanary,omitempty" tf:"time_based_canary,omitempty"` + + // The time based linear configuration information. If type is TimeBasedCanary, use time_based_canary instead. + TimeBasedLinear *TimeBasedLinearInitParameters `json:"timeBasedLinear,omitempty" tf:"time_based_linear,omitempty"` + + // Type of traffic routing config. One of TimeBasedCanary, TimeBasedLinear, AllAtOnce. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type TrafficRoutingConfigObservation struct { + + // The time based canary configuration information. If type is TimeBasedLinear, use time_based_linear instead. + TimeBasedCanary *TimeBasedCanaryObservation `json:"timeBasedCanary,omitempty" tf:"time_based_canary,omitempty"` + + // The time based linear configuration information. If type is TimeBasedCanary, use time_based_canary instead. + TimeBasedLinear *TimeBasedLinearObservation `json:"timeBasedLinear,omitempty" tf:"time_based_linear,omitempty"` + + // Type of traffic routing config. One of TimeBasedCanary, TimeBasedLinear, AllAtOnce. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type TrafficRoutingConfigParameters struct { + + // The time based canary configuration information. If type is TimeBasedLinear, use time_based_linear instead. + // +kubebuilder:validation:Optional + TimeBasedCanary *TimeBasedCanaryParameters `json:"timeBasedCanary,omitempty" tf:"time_based_canary,omitempty"` + + // The time based linear configuration information. If type is TimeBasedCanary, use time_based_canary instead. + // +kubebuilder:validation:Optional + TimeBasedLinear *TimeBasedLinearParameters `json:"timeBasedLinear,omitempty" tf:"time_based_linear,omitempty"` + + // Type of traffic routing config. One of TimeBasedCanary, TimeBasedLinear, AllAtOnce. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +// DeploymentConfigSpec defines the desired state of DeploymentConfig +type DeploymentConfigSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DeploymentConfigParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DeploymentConfigInitParameters `json:"initProvider,omitempty"` +} + +// DeploymentConfigStatus defines the observed state of DeploymentConfig. +type DeploymentConfigStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DeploymentConfigObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DeploymentConfig is the Schema for the DeploymentConfigs API. Provides a CodeDeploy deployment config. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type DeploymentConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DeploymentConfigSpec `json:"spec"` + Status DeploymentConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DeploymentConfigList contains a list of DeploymentConfigs +type DeploymentConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DeploymentConfig `json:"items"` +} + +// Repository type metadata. +var ( + DeploymentConfig_Kind = "DeploymentConfig" + DeploymentConfig_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DeploymentConfig_Kind}.String() + DeploymentConfig_KindAPIVersion = DeploymentConfig_Kind + "." + CRDGroupVersion.String() + DeploymentConfig_GroupVersionKind = CRDGroupVersion.WithKind(DeploymentConfig_Kind) +) + +func init() { + SchemeBuilder.Register(&DeploymentConfig{}, &DeploymentConfigList{}) +} diff --git a/apis/deploy/v1beta2/zz_deploymentgroup_terraformed.go b/apis/deploy/v1beta2/zz_deploymentgroup_terraformed.go new file mode 100755 index 0000000000..0b227b5e68 --- /dev/null +++ b/apis/deploy/v1beta2/zz_deploymentgroup_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DeploymentGroup +func (mg *DeploymentGroup) GetTerraformResourceType() string { + return "aws_codedeploy_deployment_group" +} + +// GetConnectionDetailsMapping for this DeploymentGroup +func (tr *DeploymentGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DeploymentGroup +func (tr *DeploymentGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DeploymentGroup +func (tr *DeploymentGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DeploymentGroup +func (tr *DeploymentGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DeploymentGroup +func (tr *DeploymentGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DeploymentGroup +func (tr *DeploymentGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DeploymentGroup +func (tr *DeploymentGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DeploymentGroup +func (tr *DeploymentGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DeploymentGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DeploymentGroup) LateInitialize(attrs []byte) (bool, error) { + params := &DeploymentGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DeploymentGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/deploy/v1beta2/zz_deploymentgroup_types.go b/apis/deploy/v1beta2/zz_deploymentgroup_types.go new file mode 100755 index 0000000000..9439313b5c --- /dev/null +++ b/apis/deploy/v1beta2/zz_deploymentgroup_types.go @@ -0,0 +1,995 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AlarmConfigurationInitParameters struct { + + // A list of alarms configured for the deployment group. A maximum of 10 alarms can be added to a deployment group. + // +listType=set + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // Indicates whether the alarm configuration is enabled. This option is useful when you want to temporarily deactivate alarm monitoring for a deployment group without having to add the same alarms again later. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Indicates whether a deployment should continue if information about the current state of alarms cannot be retrieved from CloudWatch. The default value is false. + IgnorePollAlarmFailure *bool `json:"ignorePollAlarmFailure,omitempty" tf:"ignore_poll_alarm_failure,omitempty"` +} + +type AlarmConfigurationObservation struct { + + // A list of alarms configured for the deployment group. A maximum of 10 alarms can be added to a deployment group. + // +listType=set + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // Indicates whether the alarm configuration is enabled. This option is useful when you want to temporarily deactivate alarm monitoring for a deployment group without having to add the same alarms again later. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Indicates whether a deployment should continue if information about the current state of alarms cannot be retrieved from CloudWatch. The default value is false. + IgnorePollAlarmFailure *bool `json:"ignorePollAlarmFailure,omitempty" tf:"ignore_poll_alarm_failure,omitempty"` +} + +type AlarmConfigurationParameters struct { + + // A list of alarms configured for the deployment group. A maximum of 10 alarms can be added to a deployment group. + // +kubebuilder:validation:Optional + // +listType=set + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // Indicates whether the alarm configuration is enabled. This option is useful when you want to temporarily deactivate alarm monitoring for a deployment group without having to add the same alarms again later. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Indicates whether a deployment should continue if information about the current state of alarms cannot be retrieved from CloudWatch. The default value is false. + // +kubebuilder:validation:Optional + IgnorePollAlarmFailure *bool `json:"ignorePollAlarmFailure,omitempty" tf:"ignore_poll_alarm_failure,omitempty"` +} + +type AutoRollbackConfigurationInitParameters struct { + + // Indicates whether the alarm configuration is enabled. This option is useful when you want to temporarily deactivate alarm monitoring for a deployment group without having to add the same alarms again later. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The event type or types that trigger a rollback. Supported types are DEPLOYMENT_FAILURE, DEPLOYMENT_STOP_ON_ALARM and DEPLOYMENT_STOP_ON_REQUEST. + // +listType=set + Events []*string `json:"events,omitempty" tf:"events,omitempty"` +} + +type AutoRollbackConfigurationObservation struct { + + // Indicates whether the alarm configuration is enabled. This option is useful when you want to temporarily deactivate alarm monitoring for a deployment group without having to add the same alarms again later. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The event type or types that trigger a rollback. Supported types are DEPLOYMENT_FAILURE, DEPLOYMENT_STOP_ON_ALARM and DEPLOYMENT_STOP_ON_REQUEST. + // +listType=set + Events []*string `json:"events,omitempty" tf:"events,omitempty"` +} + +type AutoRollbackConfigurationParameters struct { + + // Indicates whether the alarm configuration is enabled. This option is useful when you want to temporarily deactivate alarm monitoring for a deployment group without having to add the same alarms again later. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The event type or types that trigger a rollback. Supported types are DEPLOYMENT_FAILURE, DEPLOYMENT_STOP_ON_ALARM and DEPLOYMENT_STOP_ON_REQUEST. + // +kubebuilder:validation:Optional + // +listType=set + Events []*string `json:"events,omitempty" tf:"events,omitempty"` +} + +type BlueGreenDeploymentConfigInitParameters struct { + + // Information about the action to take when newly provisioned instances are ready to receive traffic in a blue/green deployment (documented below). + DeploymentReadyOption *DeploymentReadyOptionInitParameters `json:"deploymentReadyOption,omitempty" tf:"deployment_ready_option,omitempty"` + + // Information about how instances are provisioned for a replacement environment in a blue/green deployment (documented below). + GreenFleetProvisioningOption *GreenFleetProvisioningOptionInitParameters `json:"greenFleetProvisioningOption,omitempty" tf:"green_fleet_provisioning_option,omitempty"` + + // Information about whether to terminate instances in the original fleet during a blue/green deployment (documented below). + TerminateBlueInstancesOnDeploymentSuccess *TerminateBlueInstancesOnDeploymentSuccessInitParameters `json:"terminateBlueInstancesOnDeploymentSuccess,omitempty" tf:"terminate_blue_instances_on_deployment_success,omitempty"` +} + +type BlueGreenDeploymentConfigObservation struct { + + // Information about the action to take when newly provisioned instances are ready to receive traffic in a blue/green deployment (documented below). + DeploymentReadyOption *DeploymentReadyOptionObservation `json:"deploymentReadyOption,omitempty" tf:"deployment_ready_option,omitempty"` + + // Information about how instances are provisioned for a replacement environment in a blue/green deployment (documented below). + GreenFleetProvisioningOption *GreenFleetProvisioningOptionObservation `json:"greenFleetProvisioningOption,omitempty" tf:"green_fleet_provisioning_option,omitempty"` + + // Information about whether to terminate instances in the original fleet during a blue/green deployment (documented below). + TerminateBlueInstancesOnDeploymentSuccess *TerminateBlueInstancesOnDeploymentSuccessObservation `json:"terminateBlueInstancesOnDeploymentSuccess,omitempty" tf:"terminate_blue_instances_on_deployment_success,omitempty"` +} + +type BlueGreenDeploymentConfigParameters struct { + + // Information about the action to take when newly provisioned instances are ready to receive traffic in a blue/green deployment (documented below). + // +kubebuilder:validation:Optional + DeploymentReadyOption *DeploymentReadyOptionParameters `json:"deploymentReadyOption,omitempty" tf:"deployment_ready_option,omitempty"` + + // Information about how instances are provisioned for a replacement environment in a blue/green deployment (documented below). + // +kubebuilder:validation:Optional + GreenFleetProvisioningOption *GreenFleetProvisioningOptionParameters `json:"greenFleetProvisioningOption,omitempty" tf:"green_fleet_provisioning_option,omitempty"` + + // Information about whether to terminate instances in the original fleet during a blue/green deployment (documented below). + // +kubebuilder:validation:Optional + TerminateBlueInstancesOnDeploymentSuccess *TerminateBlueInstancesOnDeploymentSuccessParameters `json:"terminateBlueInstancesOnDeploymentSuccess,omitempty" tf:"terminate_blue_instances_on_deployment_success,omitempty"` +} + +type DeploymentGroupInitParameters struct { + + // Configuration block of alarms associated with the deployment group (documented below). + AlarmConfiguration *AlarmConfigurationInitParameters `json:"alarmConfiguration,omitempty" tf:"alarm_configuration,omitempty"` + + // Configuration block of the automatic rollback configuration associated with the deployment group (documented below). + AutoRollbackConfiguration *AutoRollbackConfigurationInitParameters `json:"autoRollbackConfiguration,omitempty" tf:"auto_rollback_configuration,omitempty"` + + // Autoscaling groups associated with the deployment group. + // +listType=set + AutoscalingGroups []*string `json:"autoscalingGroups,omitempty" tf:"autoscaling_groups,omitempty"` + + // Configuration block of the blue/green deployment options for a deployment group (documented below). + BlueGreenDeploymentConfig *BlueGreenDeploymentConfigInitParameters `json:"blueGreenDeploymentConfig,omitempty" tf:"blue_green_deployment_config,omitempty"` + + // The name of the group's deployment config. The default is "CodeDeployDefault.OneAtATime". + DeploymentConfigName *string `json:"deploymentConfigName,omitempty" tf:"deployment_config_name,omitempty"` + + // Configuration block of the type of deployment, either in-place or blue/green, you want to run and whether to route deployment traffic behind a load balancer (documented below). + DeploymentStyle *DeploymentStyleInitParameters `json:"deploymentStyle,omitempty" tf:"deployment_style,omitempty"` + + // Tag filters associated with the deployment group. See the AWS docs for details. + EC2TagFilter []EC2TagFilterInitParameters `json:"ec2TagFilter,omitempty" tf:"ec2_tag_filter,omitempty"` + + // Configuration block(s) of Tag filters associated with the deployment group, which are also referred to as tag groups (documented below). See the AWS docs for details. + EC2TagSet []EC2TagSetInitParameters `json:"ec2TagSet,omitempty" tf:"ec2_tag_set,omitempty"` + + // Configuration block(s) of the ECS services for a deployment group (documented below). + EcsService *EcsServiceInitParameters `json:"ecsService,omitempty" tf:"ecs_service,omitempty"` + + // Single configuration block of the load balancer to use in a blue/green deployment (documented below). + LoadBalancerInfo *LoadBalancerInfoInitParameters `json:"loadBalancerInfo,omitempty" tf:"load_balancer_info,omitempty"` + + // On premise tag filters associated with the group. See the AWS docs for details. + OnPremisesInstanceTagFilter []OnPremisesInstanceTagFilterInitParameters `json:"onPremisesInstanceTagFilter,omitempty" tf:"on_premises_instance_tag_filter,omitempty"` + + // Configuration block of Indicates what happens when new Amazon EC2 instances are launched mid-deployment and do not receive the deployed application revision. Valid values are UPDATE and IGNORE. Defaults to UPDATE. + OutdatedInstancesStrategy *string `json:"outdatedInstancesStrategy,omitempty" tf:"outdated_instances_strategy,omitempty"` + + // The service role ARN that allows deployments. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + ServiceRoleArn *string `json:"serviceRoleArn,omitempty" tf:"service_role_arn,omitempty"` + + // Reference to a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnRef *v1.Reference `json:"serviceRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnSelector *v1.Selector `json:"serviceRoleArnSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block(s) of the triggers for the deployment group (documented below). + TriggerConfiguration []TriggerConfigurationInitParameters `json:"triggerConfiguration,omitempty" tf:"trigger_configuration,omitempty"` +} + +type DeploymentGroupObservation struct { + + // Configuration block of alarms associated with the deployment group (documented below). + AlarmConfiguration *AlarmConfigurationObservation `json:"alarmConfiguration,omitempty" tf:"alarm_configuration,omitempty"` + + // The name of the application. + AppName *string `json:"appName,omitempty" tf:"app_name,omitempty"` + + // The ARN of the CodeDeploy deployment group. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Configuration block of the automatic rollback configuration associated with the deployment group (documented below). + AutoRollbackConfiguration *AutoRollbackConfigurationObservation `json:"autoRollbackConfiguration,omitempty" tf:"auto_rollback_configuration,omitempty"` + + // Autoscaling groups associated with the deployment group. + // +listType=set + AutoscalingGroups []*string `json:"autoscalingGroups,omitempty" tf:"autoscaling_groups,omitempty"` + + // Configuration block of the blue/green deployment options for a deployment group (documented below). + BlueGreenDeploymentConfig *BlueGreenDeploymentConfigObservation `json:"blueGreenDeploymentConfig,omitempty" tf:"blue_green_deployment_config,omitempty"` + + // The destination platform type for the deployment. + ComputePlatform *string `json:"computePlatform,omitempty" tf:"compute_platform,omitempty"` + + // The name of the group's deployment config. The default is "CodeDeployDefault.OneAtATime". + DeploymentConfigName *string `json:"deploymentConfigName,omitempty" tf:"deployment_config_name,omitempty"` + + // The ID of the CodeDeploy deployment group. + DeploymentGroupID *string `json:"deploymentGroupId,omitempty" tf:"deployment_group_id,omitempty"` + + // Configuration block of the type of deployment, either in-place or blue/green, you want to run and whether to route deployment traffic behind a load balancer (documented below). + DeploymentStyle *DeploymentStyleObservation `json:"deploymentStyle,omitempty" tf:"deployment_style,omitempty"` + + // Tag filters associated with the deployment group. See the AWS docs for details. + EC2TagFilter []EC2TagFilterObservation `json:"ec2TagFilter,omitempty" tf:"ec2_tag_filter,omitempty"` + + // Configuration block(s) of Tag filters associated with the deployment group, which are also referred to as tag groups (documented below). See the AWS docs for details. + EC2TagSet []EC2TagSetObservation `json:"ec2TagSet,omitempty" tf:"ec2_tag_set,omitempty"` + + // Configuration block(s) of the ECS services for a deployment group (documented below). + EcsService *EcsServiceObservation `json:"ecsService,omitempty" tf:"ecs_service,omitempty"` + + // Application name and deployment group name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Single configuration block of the load balancer to use in a blue/green deployment (documented below). + LoadBalancerInfo *LoadBalancerInfoObservation `json:"loadBalancerInfo,omitempty" tf:"load_balancer_info,omitempty"` + + // On premise tag filters associated with the group. See the AWS docs for details. + OnPremisesInstanceTagFilter []OnPremisesInstanceTagFilterObservation `json:"onPremisesInstanceTagFilter,omitempty" tf:"on_premises_instance_tag_filter,omitempty"` + + // Configuration block of Indicates what happens when new Amazon EC2 instances are launched mid-deployment and do not receive the deployed application revision. Valid values are UPDATE and IGNORE. Defaults to UPDATE. + OutdatedInstancesStrategy *string `json:"outdatedInstancesStrategy,omitempty" tf:"outdated_instances_strategy,omitempty"` + + // The service role ARN that allows deployments. + ServiceRoleArn *string `json:"serviceRoleArn,omitempty" tf:"service_role_arn,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Configuration block(s) of the triggers for the deployment group (documented below). + TriggerConfiguration []TriggerConfigurationObservation `json:"triggerConfiguration,omitempty" tf:"trigger_configuration,omitempty"` +} + +type DeploymentGroupParameters struct { + + // Configuration block of alarms associated with the deployment group (documented below). + // +kubebuilder:validation:Optional + AlarmConfiguration *AlarmConfigurationParameters `json:"alarmConfiguration,omitempty" tf:"alarm_configuration,omitempty"` + + // The name of the application. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/deploy/v1beta1.App + // +kubebuilder:validation:Optional + AppName *string `json:"appName,omitempty" tf:"app_name,omitempty"` + + // Reference to a App in deploy to populate appName. + // +kubebuilder:validation:Optional + AppNameRef *v1.Reference `json:"appNameRef,omitempty" tf:"-"` + + // Selector for a App in deploy to populate appName. + // +kubebuilder:validation:Optional + AppNameSelector *v1.Selector `json:"appNameSelector,omitempty" tf:"-"` + + // Configuration block of the automatic rollback configuration associated with the deployment group (documented below). + // +kubebuilder:validation:Optional + AutoRollbackConfiguration *AutoRollbackConfigurationParameters `json:"autoRollbackConfiguration,omitempty" tf:"auto_rollback_configuration,omitempty"` + + // Autoscaling groups associated with the deployment group. + // +kubebuilder:validation:Optional + // +listType=set + AutoscalingGroups []*string `json:"autoscalingGroups,omitempty" tf:"autoscaling_groups,omitempty"` + + // Configuration block of the blue/green deployment options for a deployment group (documented below). + // +kubebuilder:validation:Optional + BlueGreenDeploymentConfig *BlueGreenDeploymentConfigParameters `json:"blueGreenDeploymentConfig,omitempty" tf:"blue_green_deployment_config,omitempty"` + + // The name of the group's deployment config. The default is "CodeDeployDefault.OneAtATime". + // +kubebuilder:validation:Optional + DeploymentConfigName *string `json:"deploymentConfigName,omitempty" tf:"deployment_config_name,omitempty"` + + // Configuration block of the type of deployment, either in-place or blue/green, you want to run and whether to route deployment traffic behind a load balancer (documented below). + // +kubebuilder:validation:Optional + DeploymentStyle *DeploymentStyleParameters `json:"deploymentStyle,omitempty" tf:"deployment_style,omitempty"` + + // Tag filters associated with the deployment group. See the AWS docs for details. + // +kubebuilder:validation:Optional + EC2TagFilter []EC2TagFilterParameters `json:"ec2TagFilter,omitempty" tf:"ec2_tag_filter,omitempty"` + + // Configuration block(s) of Tag filters associated with the deployment group, which are also referred to as tag groups (documented below). See the AWS docs for details. + // +kubebuilder:validation:Optional + EC2TagSet []EC2TagSetParameters `json:"ec2TagSet,omitempty" tf:"ec2_tag_set,omitempty"` + + // Configuration block(s) of the ECS services for a deployment group (documented below). + // +kubebuilder:validation:Optional + EcsService *EcsServiceParameters `json:"ecsService,omitempty" tf:"ecs_service,omitempty"` + + // Single configuration block of the load balancer to use in a blue/green deployment (documented below). + // +kubebuilder:validation:Optional + LoadBalancerInfo *LoadBalancerInfoParameters `json:"loadBalancerInfo,omitempty" tf:"load_balancer_info,omitempty"` + + // On premise tag filters associated with the group. See the AWS docs for details. + // +kubebuilder:validation:Optional + OnPremisesInstanceTagFilter []OnPremisesInstanceTagFilterParameters `json:"onPremisesInstanceTagFilter,omitempty" tf:"on_premises_instance_tag_filter,omitempty"` + + // Configuration block of Indicates what happens when new Amazon EC2 instances are launched mid-deployment and do not receive the deployed application revision. Valid values are UPDATE and IGNORE. Defaults to UPDATE. + // +kubebuilder:validation:Optional + OutdatedInstancesStrategy *string `json:"outdatedInstancesStrategy,omitempty" tf:"outdated_instances_strategy,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The service role ARN that allows deployments. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + ServiceRoleArn *string `json:"serviceRoleArn,omitempty" tf:"service_role_arn,omitempty"` + + // Reference to a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnRef *v1.Reference `json:"serviceRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnSelector *v1.Selector `json:"serviceRoleArnSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block(s) of the triggers for the deployment group (documented below). + // +kubebuilder:validation:Optional + TriggerConfiguration []TriggerConfigurationParameters `json:"triggerConfiguration,omitempty" tf:"trigger_configuration,omitempty"` +} + +type DeploymentReadyOptionInitParameters struct { + + // When to reroute traffic from an original environment to a replacement environment in a blue/green deployment. + ActionOnTimeout *string `json:"actionOnTimeout,omitempty" tf:"action_on_timeout,omitempty"` + + // The number of minutes to wait before the status of a blue/green deployment changed to Stopped if rerouting is not started manually. Applies only to the STOP_DEPLOYMENT option for action_on_timeout. + WaitTimeInMinutes *float64 `json:"waitTimeInMinutes,omitempty" tf:"wait_time_in_minutes,omitempty"` +} + +type DeploymentReadyOptionObservation struct { + + // When to reroute traffic from an original environment to a replacement environment in a blue/green deployment. + ActionOnTimeout *string `json:"actionOnTimeout,omitempty" tf:"action_on_timeout,omitempty"` + + // The number of minutes to wait before the status of a blue/green deployment changed to Stopped if rerouting is not started manually. Applies only to the STOP_DEPLOYMENT option for action_on_timeout. + WaitTimeInMinutes *float64 `json:"waitTimeInMinutes,omitempty" tf:"wait_time_in_minutes,omitempty"` +} + +type DeploymentReadyOptionParameters struct { + + // When to reroute traffic from an original environment to a replacement environment in a blue/green deployment. + // +kubebuilder:validation:Optional + ActionOnTimeout *string `json:"actionOnTimeout,omitempty" tf:"action_on_timeout,omitempty"` + + // The number of minutes to wait before the status of a blue/green deployment changed to Stopped if rerouting is not started manually. Applies only to the STOP_DEPLOYMENT option for action_on_timeout. + // +kubebuilder:validation:Optional + WaitTimeInMinutes *float64 `json:"waitTimeInMinutes,omitempty" tf:"wait_time_in_minutes,omitempty"` +} + +type DeploymentStyleInitParameters struct { + + // Indicates whether to route deployment traffic behind a load balancer. Valid Values are WITH_TRAFFIC_CONTROL or WITHOUT_TRAFFIC_CONTROL. Default is WITHOUT_TRAFFIC_CONTROL. + DeploymentOption *string `json:"deploymentOption,omitempty" tf:"deployment_option,omitempty"` + + // Indicates whether to run an in-place deployment or a blue/green deployment. Valid Values are IN_PLACE or BLUE_GREEN. Default is IN_PLACE. + DeploymentType *string `json:"deploymentType,omitempty" tf:"deployment_type,omitempty"` +} + +type DeploymentStyleObservation struct { + + // Indicates whether to route deployment traffic behind a load balancer. Valid Values are WITH_TRAFFIC_CONTROL or WITHOUT_TRAFFIC_CONTROL. Default is WITHOUT_TRAFFIC_CONTROL. + DeploymentOption *string `json:"deploymentOption,omitempty" tf:"deployment_option,omitempty"` + + // Indicates whether to run an in-place deployment or a blue/green deployment. Valid Values are IN_PLACE or BLUE_GREEN. Default is IN_PLACE. + DeploymentType *string `json:"deploymentType,omitempty" tf:"deployment_type,omitempty"` +} + +type DeploymentStyleParameters struct { + + // Indicates whether to route deployment traffic behind a load balancer. Valid Values are WITH_TRAFFIC_CONTROL or WITHOUT_TRAFFIC_CONTROL. Default is WITHOUT_TRAFFIC_CONTROL. + // +kubebuilder:validation:Optional + DeploymentOption *string `json:"deploymentOption,omitempty" tf:"deployment_option,omitempty"` + + // Indicates whether to run an in-place deployment or a blue/green deployment. Valid Values are IN_PLACE or BLUE_GREEN. Default is IN_PLACE. + // +kubebuilder:validation:Optional + DeploymentType *string `json:"deploymentType,omitempty" tf:"deployment_type,omitempty"` +} + +type EC2TagFilterInitParameters struct { + + // The key of the tag filter. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The type of the tag filter, either KEY_ONLY, VALUE_ONLY, or KEY_AND_VALUE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The value of the tag filter. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type EC2TagFilterObservation struct { + + // The key of the tag filter. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The type of the tag filter, either KEY_ONLY, VALUE_ONLY, or KEY_AND_VALUE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The value of the tag filter. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type EC2TagFilterParameters struct { + + // The key of the tag filter. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The type of the tag filter, either KEY_ONLY, VALUE_ONLY, or KEY_AND_VALUE. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The value of the tag filter. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type EC2TagSetEC2TagFilterInitParameters struct { + + // The key of the tag filter. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The type of the tag filter, either KEY_ONLY, VALUE_ONLY, or KEY_AND_VALUE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The value of the tag filter. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type EC2TagSetEC2TagFilterObservation struct { + + // The key of the tag filter. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The type of the tag filter, either KEY_ONLY, VALUE_ONLY, or KEY_AND_VALUE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The value of the tag filter. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type EC2TagSetEC2TagFilterParameters struct { + + // The key of the tag filter. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The type of the tag filter, either KEY_ONLY, VALUE_ONLY, or KEY_AND_VALUE. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The value of the tag filter. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type EC2TagSetInitParameters struct { + + // Tag filters associated with the deployment group. See the AWS docs for details. + EC2TagFilter []EC2TagSetEC2TagFilterInitParameters `json:"ec2TagFilter,omitempty" tf:"ec2_tag_filter,omitempty"` +} + +type EC2TagSetObservation struct { + + // Tag filters associated with the deployment group. See the AWS docs for details. + EC2TagFilter []EC2TagSetEC2TagFilterObservation `json:"ec2TagFilter,omitempty" tf:"ec2_tag_filter,omitempty"` +} + +type EC2TagSetParameters struct { + + // Tag filters associated with the deployment group. See the AWS docs for details. + // +kubebuilder:validation:Optional + EC2TagFilter []EC2TagSetEC2TagFilterParameters `json:"ec2TagFilter,omitempty" tf:"ec2_tag_filter,omitempty"` +} + +type ELBInfoInitParameters struct { + + // The name of the target group that instances in the original environment are deregistered from, and instances in the replacement environment registered with. For in-place deployments, the name of the target group that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment completes. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a ELB in elb to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a ELB in elb to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` +} + +type ELBInfoObservation struct { + + // The name of the target group that instances in the original environment are deregistered from, and instances in the replacement environment registered with. For in-place deployments, the name of the target group that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment completes. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ELBInfoParameters struct { + + // The name of the target group that instances in the original environment are deregistered from, and instances in the replacement environment registered with. For in-place deployments, the name of the target group that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment completes. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a ELB in elb to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a ELB in elb to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` +} + +type EcsServiceInitParameters struct { + + // The name of the ECS cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecs/v1beta2.Cluster + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // Reference to a Cluster in ecs to populate clusterName. + // +kubebuilder:validation:Optional + ClusterNameRef *v1.Reference `json:"clusterNameRef,omitempty" tf:"-"` + + // Selector for a Cluster in ecs to populate clusterName. + // +kubebuilder:validation:Optional + ClusterNameSelector *v1.Selector `json:"clusterNameSelector,omitempty" tf:"-"` + + // The name of the ECS service. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecs/v1beta2.Service + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Reference to a Service in ecs to populate serviceName. + // +kubebuilder:validation:Optional + ServiceNameRef *v1.Reference `json:"serviceNameRef,omitempty" tf:"-"` + + // Selector for a Service in ecs to populate serviceName. + // +kubebuilder:validation:Optional + ServiceNameSelector *v1.Selector `json:"serviceNameSelector,omitempty" tf:"-"` +} + +type EcsServiceObservation struct { + + // The name of the ECS cluster. + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // The name of the ECS service. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type EcsServiceParameters struct { + + // The name of the ECS cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecs/v1beta2.Cluster + // +kubebuilder:validation:Optional + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // Reference to a Cluster in ecs to populate clusterName. + // +kubebuilder:validation:Optional + ClusterNameRef *v1.Reference `json:"clusterNameRef,omitempty" tf:"-"` + + // Selector for a Cluster in ecs to populate clusterName. + // +kubebuilder:validation:Optional + ClusterNameSelector *v1.Selector `json:"clusterNameSelector,omitempty" tf:"-"` + + // The name of the ECS service. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecs/v1beta2.Service + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Reference to a Service in ecs to populate serviceName. + // +kubebuilder:validation:Optional + ServiceNameRef *v1.Reference `json:"serviceNameRef,omitempty" tf:"-"` + + // Selector for a Service in ecs to populate serviceName. + // +kubebuilder:validation:Optional + ServiceNameSelector *v1.Selector `json:"serviceNameSelector,omitempty" tf:"-"` +} + +type GreenFleetProvisioningOptionInitParameters struct { + + // The method used to add instances to a replacement environment. + Action *string `json:"action,omitempty" tf:"action,omitempty"` +} + +type GreenFleetProvisioningOptionObservation struct { + + // The method used to add instances to a replacement environment. + Action *string `json:"action,omitempty" tf:"action,omitempty"` +} + +type GreenFleetProvisioningOptionParameters struct { + + // The method used to add instances to a replacement environment. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` +} + +type LoadBalancerInfoInitParameters struct { + + // The Classic Elastic Load Balancer to use in a deployment. Conflicts with target_group_info and target_group_pair_info. + ELBInfo []ELBInfoInitParameters `json:"elbInfo,omitempty" tf:"elb_info,omitempty"` + + // The (Application/Network Load Balancer) target group to use in a deployment. Conflicts with elb_info and target_group_pair_info. + TargetGroupInfo []TargetGroupInfoInitParameters `json:"targetGroupInfo,omitempty" tf:"target_group_info,omitempty"` + + // The (Application/Network Load Balancer) target group pair to use in a deployment. Conflicts with elb_info and target_group_info. + TargetGroupPairInfo *TargetGroupPairInfoInitParameters `json:"targetGroupPairInfo,omitempty" tf:"target_group_pair_info,omitempty"` +} + +type LoadBalancerInfoObservation struct { + + // The Classic Elastic Load Balancer to use in a deployment. Conflicts with target_group_info and target_group_pair_info. + ELBInfo []ELBInfoObservation `json:"elbInfo,omitempty" tf:"elb_info,omitempty"` + + // The (Application/Network Load Balancer) target group to use in a deployment. Conflicts with elb_info and target_group_pair_info. + TargetGroupInfo []TargetGroupInfoObservation `json:"targetGroupInfo,omitempty" tf:"target_group_info,omitempty"` + + // The (Application/Network Load Balancer) target group pair to use in a deployment. Conflicts with elb_info and target_group_info. + TargetGroupPairInfo *TargetGroupPairInfoObservation `json:"targetGroupPairInfo,omitempty" tf:"target_group_pair_info,omitempty"` +} + +type LoadBalancerInfoParameters struct { + + // The Classic Elastic Load Balancer to use in a deployment. Conflicts with target_group_info and target_group_pair_info. + // +kubebuilder:validation:Optional + ELBInfo []ELBInfoParameters `json:"elbInfo,omitempty" tf:"elb_info,omitempty"` + + // The (Application/Network Load Balancer) target group to use in a deployment. Conflicts with elb_info and target_group_pair_info. + // +kubebuilder:validation:Optional + TargetGroupInfo []TargetGroupInfoParameters `json:"targetGroupInfo,omitempty" tf:"target_group_info,omitempty"` + + // The (Application/Network Load Balancer) target group pair to use in a deployment. Conflicts with elb_info and target_group_info. + // +kubebuilder:validation:Optional + TargetGroupPairInfo *TargetGroupPairInfoParameters `json:"targetGroupPairInfo,omitempty" tf:"target_group_pair_info,omitempty"` +} + +type OnPremisesInstanceTagFilterInitParameters struct { + + // The key of the tag filter. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The type of the tag filter, either KEY_ONLY, VALUE_ONLY, or KEY_AND_VALUE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The value of the tag filter. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type OnPremisesInstanceTagFilterObservation struct { + + // The key of the tag filter. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The type of the tag filter, either KEY_ONLY, VALUE_ONLY, or KEY_AND_VALUE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The value of the tag filter. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type OnPremisesInstanceTagFilterParameters struct { + + // The key of the tag filter. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The type of the tag filter, either KEY_ONLY, VALUE_ONLY, or KEY_AND_VALUE. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The value of the tag filter. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ProdTrafficRouteInitParameters struct { + + // List of Amazon Resource Names (ARNs) of the load balancer listeners. + // +listType=set + ListenerArns []*string `json:"listenerArns,omitempty" tf:"listener_arns,omitempty"` +} + +type ProdTrafficRouteObservation struct { + + // List of Amazon Resource Names (ARNs) of the load balancer listeners. + // +listType=set + ListenerArns []*string `json:"listenerArns,omitempty" tf:"listener_arns,omitempty"` +} + +type ProdTrafficRouteParameters struct { + + // List of Amazon Resource Names (ARNs) of the load balancer listeners. + // +kubebuilder:validation:Optional + // +listType=set + ListenerArns []*string `json:"listenerArns" tf:"listener_arns,omitempty"` +} + +type TargetGroupInfoInitParameters struct { + + // The name of the target group that instances in the original environment are deregistered from, and instances in the replacement environment registered with. For in-place deployments, the name of the target group that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment completes. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TargetGroupInfoObservation struct { + + // The name of the target group that instances in the original environment are deregistered from, and instances in the replacement environment registered with. For in-place deployments, the name of the target group that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment completes. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TargetGroupInfoParameters struct { + + // The name of the target group that instances in the original environment are deregistered from, and instances in the replacement environment registered with. For in-place deployments, the name of the target group that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment completes. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TargetGroupInitParameters struct { + + // The name of the target group that instances in the original environment are deregistered from, and instances in the replacement environment registered with. For in-place deployments, the name of the target group that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment completes. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBTargetGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a LBTargetGroup in elbv2 to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a LBTargetGroup in elbv2 to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` +} + +type TargetGroupObservation struct { + + // The name of the target group that instances in the original environment are deregistered from, and instances in the replacement environment registered with. For in-place deployments, the name of the target group that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment completes. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TargetGroupPairInfoInitParameters struct { + + // Configuration block for the production traffic route (documented below). + ProdTrafficRoute *ProdTrafficRouteInitParameters `json:"prodTrafficRoute,omitempty" tf:"prod_traffic_route,omitempty"` + + // Configuration blocks for a target group within a target group pair (documented below). + TargetGroup []TargetGroupInitParameters `json:"targetGroup,omitempty" tf:"target_group,omitempty"` + + // Configuration block for the test traffic route (documented below). + TestTrafficRoute *TestTrafficRouteInitParameters `json:"testTrafficRoute,omitempty" tf:"test_traffic_route,omitempty"` +} + +type TargetGroupPairInfoObservation struct { + + // Configuration block for the production traffic route (documented below). + ProdTrafficRoute *ProdTrafficRouteObservation `json:"prodTrafficRoute,omitempty" tf:"prod_traffic_route,omitempty"` + + // Configuration blocks for a target group within a target group pair (documented below). + TargetGroup []TargetGroupObservation `json:"targetGroup,omitempty" tf:"target_group,omitempty"` + + // Configuration block for the test traffic route (documented below). + TestTrafficRoute *TestTrafficRouteObservation `json:"testTrafficRoute,omitempty" tf:"test_traffic_route,omitempty"` +} + +type TargetGroupPairInfoParameters struct { + + // Configuration block for the production traffic route (documented below). + // +kubebuilder:validation:Optional + ProdTrafficRoute *ProdTrafficRouteParameters `json:"prodTrafficRoute" tf:"prod_traffic_route,omitempty"` + + // Configuration blocks for a target group within a target group pair (documented below). + // +kubebuilder:validation:Optional + TargetGroup []TargetGroupParameters `json:"targetGroup" tf:"target_group,omitempty"` + + // Configuration block for the test traffic route (documented below). + // +kubebuilder:validation:Optional + TestTrafficRoute *TestTrafficRouteParameters `json:"testTrafficRoute,omitempty" tf:"test_traffic_route,omitempty"` +} + +type TargetGroupParameters struct { + + // The name of the target group that instances in the original environment are deregistered from, and instances in the replacement environment registered with. For in-place deployments, the name of the target group that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment completes. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBTargetGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a LBTargetGroup in elbv2 to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a LBTargetGroup in elbv2 to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` +} + +type TerminateBlueInstancesOnDeploymentSuccessInitParameters struct { + + // The method used to add instances to a replacement environment. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The number of minutes to wait after a successful blue/green deployment before terminating instances from the original environment. + TerminationWaitTimeInMinutes *float64 `json:"terminationWaitTimeInMinutes,omitempty" tf:"termination_wait_time_in_minutes,omitempty"` +} + +type TerminateBlueInstancesOnDeploymentSuccessObservation struct { + + // The method used to add instances to a replacement environment. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The number of minutes to wait after a successful blue/green deployment before terminating instances from the original environment. + TerminationWaitTimeInMinutes *float64 `json:"terminationWaitTimeInMinutes,omitempty" tf:"termination_wait_time_in_minutes,omitempty"` +} + +type TerminateBlueInstancesOnDeploymentSuccessParameters struct { + + // The method used to add instances to a replacement environment. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The number of minutes to wait after a successful blue/green deployment before terminating instances from the original environment. + // +kubebuilder:validation:Optional + TerminationWaitTimeInMinutes *float64 `json:"terminationWaitTimeInMinutes,omitempty" tf:"termination_wait_time_in_minutes,omitempty"` +} + +type TestTrafficRouteInitParameters struct { + + // List of Amazon Resource Names (ARNs) of the load balancer listeners. + // +listType=set + ListenerArns []*string `json:"listenerArns,omitempty" tf:"listener_arns,omitempty"` +} + +type TestTrafficRouteObservation struct { + + // List of Amazon Resource Names (ARNs) of the load balancer listeners. + // +listType=set + ListenerArns []*string `json:"listenerArns,omitempty" tf:"listener_arns,omitempty"` +} + +type TestTrafficRouteParameters struct { + + // List of Amazon Resource Names (ARNs) of the load balancer listeners. + // +kubebuilder:validation:Optional + // +listType=set + ListenerArns []*string `json:"listenerArns" tf:"listener_arns,omitempty"` +} + +type TriggerConfigurationInitParameters struct { + + // The event type or types for which notifications are triggered. Some values that are supported: DeploymentStart, DeploymentSuccess, DeploymentFailure, DeploymentStop, DeploymentRollback, InstanceStart, InstanceSuccess, InstanceFailure. See the CodeDeploy documentation for all possible values. + // +listType=set + TriggerEvents []*string `json:"triggerEvents,omitempty" tf:"trigger_events,omitempty"` + + // The name of the notification trigger. + TriggerName *string `json:"triggerName,omitempty" tf:"trigger_name,omitempty"` + + // The ARN of the SNS topic through which notifications are sent. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + TriggerTargetArn *string `json:"triggerTargetArn,omitempty" tf:"trigger_target_arn,omitempty"` + + // Reference to a Topic in sns to populate triggerTargetArn. + // +kubebuilder:validation:Optional + TriggerTargetArnRef *v1.Reference `json:"triggerTargetArnRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate triggerTargetArn. + // +kubebuilder:validation:Optional + TriggerTargetArnSelector *v1.Selector `json:"triggerTargetArnSelector,omitempty" tf:"-"` +} + +type TriggerConfigurationObservation struct { + + // The event type or types for which notifications are triggered. Some values that are supported: DeploymentStart, DeploymentSuccess, DeploymentFailure, DeploymentStop, DeploymentRollback, InstanceStart, InstanceSuccess, InstanceFailure. See the CodeDeploy documentation for all possible values. + // +listType=set + TriggerEvents []*string `json:"triggerEvents,omitempty" tf:"trigger_events,omitempty"` + + // The name of the notification trigger. + TriggerName *string `json:"triggerName,omitempty" tf:"trigger_name,omitempty"` + + // The ARN of the SNS topic through which notifications are sent. + TriggerTargetArn *string `json:"triggerTargetArn,omitempty" tf:"trigger_target_arn,omitempty"` +} + +type TriggerConfigurationParameters struct { + + // The event type or types for which notifications are triggered. Some values that are supported: DeploymentStart, DeploymentSuccess, DeploymentFailure, DeploymentStop, DeploymentRollback, InstanceStart, InstanceSuccess, InstanceFailure. See the CodeDeploy documentation for all possible values. + // +kubebuilder:validation:Optional + // +listType=set + TriggerEvents []*string `json:"triggerEvents" tf:"trigger_events,omitempty"` + + // The name of the notification trigger. + // +kubebuilder:validation:Optional + TriggerName *string `json:"triggerName" tf:"trigger_name,omitempty"` + + // The ARN of the SNS topic through which notifications are sent. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + TriggerTargetArn *string `json:"triggerTargetArn,omitempty" tf:"trigger_target_arn,omitempty"` + + // Reference to a Topic in sns to populate triggerTargetArn. + // +kubebuilder:validation:Optional + TriggerTargetArnRef *v1.Reference `json:"triggerTargetArnRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate triggerTargetArn. + // +kubebuilder:validation:Optional + TriggerTargetArnSelector *v1.Selector `json:"triggerTargetArnSelector,omitempty" tf:"-"` +} + +// DeploymentGroupSpec defines the desired state of DeploymentGroup +type DeploymentGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DeploymentGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DeploymentGroupInitParameters `json:"initProvider,omitempty"` +} + +// DeploymentGroupStatus defines the observed state of DeploymentGroup. +type DeploymentGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DeploymentGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DeploymentGroup is the Schema for the DeploymentGroups API. Provides a CodeDeploy deployment group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type DeploymentGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DeploymentGroupSpec `json:"spec"` + Status DeploymentGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DeploymentGroupList contains a list of DeploymentGroups +type DeploymentGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DeploymentGroup `json:"items"` +} + +// Repository type metadata. +var ( + DeploymentGroup_Kind = "DeploymentGroup" + DeploymentGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DeploymentGroup_Kind}.String() + DeploymentGroup_KindAPIVersion = DeploymentGroup_Kind + "." + CRDGroupVersion.String() + DeploymentGroup_GroupVersionKind = CRDGroupVersion.WithKind(DeploymentGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&DeploymentGroup{}, &DeploymentGroupList{}) +} diff --git a/apis/deploy/v1beta2/zz_generated.conversion_hubs.go b/apis/deploy/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..c7c211fdcb --- /dev/null +++ b/apis/deploy/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *DeploymentConfig) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DeploymentGroup) Hub() {} diff --git a/apis/deploy/v1beta2/zz_generated.deepcopy.go b/apis/deploy/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..6c3a798876 --- /dev/null +++ b/apis/deploy/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2782 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlarmConfigurationInitParameters) DeepCopyInto(out *AlarmConfigurationInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IgnorePollAlarmFailure != nil { + in, out := &in.IgnorePollAlarmFailure, &out.IgnorePollAlarmFailure + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlarmConfigurationInitParameters. +func (in *AlarmConfigurationInitParameters) DeepCopy() *AlarmConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AlarmConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlarmConfigurationObservation) DeepCopyInto(out *AlarmConfigurationObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IgnorePollAlarmFailure != nil { + in, out := &in.IgnorePollAlarmFailure, &out.IgnorePollAlarmFailure + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlarmConfigurationObservation. +func (in *AlarmConfigurationObservation) DeepCopy() *AlarmConfigurationObservation { + if in == nil { + return nil + } + out := new(AlarmConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlarmConfigurationParameters) DeepCopyInto(out *AlarmConfigurationParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IgnorePollAlarmFailure != nil { + in, out := &in.IgnorePollAlarmFailure, &out.IgnorePollAlarmFailure + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlarmConfigurationParameters. +func (in *AlarmConfigurationParameters) DeepCopy() *AlarmConfigurationParameters { + if in == nil { + return nil + } + out := new(AlarmConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoRollbackConfigurationInitParameters) DeepCopyInto(out *AutoRollbackConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoRollbackConfigurationInitParameters. +func (in *AutoRollbackConfigurationInitParameters) DeepCopy() *AutoRollbackConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AutoRollbackConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoRollbackConfigurationObservation) DeepCopyInto(out *AutoRollbackConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoRollbackConfigurationObservation. +func (in *AutoRollbackConfigurationObservation) DeepCopy() *AutoRollbackConfigurationObservation { + if in == nil { + return nil + } + out := new(AutoRollbackConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoRollbackConfigurationParameters) DeepCopyInto(out *AutoRollbackConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoRollbackConfigurationParameters. +func (in *AutoRollbackConfigurationParameters) DeepCopy() *AutoRollbackConfigurationParameters { + if in == nil { + return nil + } + out := new(AutoRollbackConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlueGreenDeploymentConfigInitParameters) DeepCopyInto(out *BlueGreenDeploymentConfigInitParameters) { + *out = *in + if in.DeploymentReadyOption != nil { + in, out := &in.DeploymentReadyOption, &out.DeploymentReadyOption + *out = new(DeploymentReadyOptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.GreenFleetProvisioningOption != nil { + in, out := &in.GreenFleetProvisioningOption, &out.GreenFleetProvisioningOption + *out = new(GreenFleetProvisioningOptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TerminateBlueInstancesOnDeploymentSuccess != nil { + in, out := &in.TerminateBlueInstancesOnDeploymentSuccess, &out.TerminateBlueInstancesOnDeploymentSuccess + *out = new(TerminateBlueInstancesOnDeploymentSuccessInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlueGreenDeploymentConfigInitParameters. +func (in *BlueGreenDeploymentConfigInitParameters) DeepCopy() *BlueGreenDeploymentConfigInitParameters { + if in == nil { + return nil + } + out := new(BlueGreenDeploymentConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlueGreenDeploymentConfigObservation) DeepCopyInto(out *BlueGreenDeploymentConfigObservation) { + *out = *in + if in.DeploymentReadyOption != nil { + in, out := &in.DeploymentReadyOption, &out.DeploymentReadyOption + *out = new(DeploymentReadyOptionObservation) + (*in).DeepCopyInto(*out) + } + if in.GreenFleetProvisioningOption != nil { + in, out := &in.GreenFleetProvisioningOption, &out.GreenFleetProvisioningOption + *out = new(GreenFleetProvisioningOptionObservation) + (*in).DeepCopyInto(*out) + } + if in.TerminateBlueInstancesOnDeploymentSuccess != nil { + in, out := &in.TerminateBlueInstancesOnDeploymentSuccess, &out.TerminateBlueInstancesOnDeploymentSuccess + *out = new(TerminateBlueInstancesOnDeploymentSuccessObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlueGreenDeploymentConfigObservation. +func (in *BlueGreenDeploymentConfigObservation) DeepCopy() *BlueGreenDeploymentConfigObservation { + if in == nil { + return nil + } + out := new(BlueGreenDeploymentConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlueGreenDeploymentConfigParameters) DeepCopyInto(out *BlueGreenDeploymentConfigParameters) { + *out = *in + if in.DeploymentReadyOption != nil { + in, out := &in.DeploymentReadyOption, &out.DeploymentReadyOption + *out = new(DeploymentReadyOptionParameters) + (*in).DeepCopyInto(*out) + } + if in.GreenFleetProvisioningOption != nil { + in, out := &in.GreenFleetProvisioningOption, &out.GreenFleetProvisioningOption + *out = new(GreenFleetProvisioningOptionParameters) + (*in).DeepCopyInto(*out) + } + if in.TerminateBlueInstancesOnDeploymentSuccess != nil { + in, out := &in.TerminateBlueInstancesOnDeploymentSuccess, &out.TerminateBlueInstancesOnDeploymentSuccess + *out = new(TerminateBlueInstancesOnDeploymentSuccessParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlueGreenDeploymentConfigParameters. +func (in *BlueGreenDeploymentConfigParameters) DeepCopy() *BlueGreenDeploymentConfigParameters { + if in == nil { + return nil + } + out := new(BlueGreenDeploymentConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfig) DeepCopyInto(out *DeploymentConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfig. +func (in *DeploymentConfig) DeepCopy() *DeploymentConfig { + if in == nil { + return nil + } + out := new(DeploymentConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigInitParameters) DeepCopyInto(out *DeploymentConfigInitParameters) { + *out = *in + if in.ComputePlatform != nil { + in, out := &in.ComputePlatform, &out.ComputePlatform + *out = new(string) + **out = **in + } + if in.MinimumHealthyHosts != nil { + in, out := &in.MinimumHealthyHosts, &out.MinimumHealthyHosts + *out = new(MinimumHealthyHostsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TrafficRoutingConfig != nil { + in, out := &in.TrafficRoutingConfig, &out.TrafficRoutingConfig + *out = new(TrafficRoutingConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigInitParameters. +func (in *DeploymentConfigInitParameters) DeepCopy() *DeploymentConfigInitParameters { + if in == nil { + return nil + } + out := new(DeploymentConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigList) DeepCopyInto(out *DeploymentConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeploymentConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigList. +func (in *DeploymentConfigList) DeepCopy() *DeploymentConfigList { + if in == nil { + return nil + } + out := new(DeploymentConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigObservation) DeepCopyInto(out *DeploymentConfigObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ComputePlatform != nil { + in, out := &in.ComputePlatform, &out.ComputePlatform + *out = new(string) + **out = **in + } + if in.DeploymentConfigID != nil { + in, out := &in.DeploymentConfigID, &out.DeploymentConfigID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MinimumHealthyHosts != nil { + in, out := &in.MinimumHealthyHosts, &out.MinimumHealthyHosts + *out = new(MinimumHealthyHostsObservation) + (*in).DeepCopyInto(*out) + } + if in.TrafficRoutingConfig != nil { + in, out := &in.TrafficRoutingConfig, &out.TrafficRoutingConfig + *out = new(TrafficRoutingConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigObservation. +func (in *DeploymentConfigObservation) DeepCopy() *DeploymentConfigObservation { + if in == nil { + return nil + } + out := new(DeploymentConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigParameters) DeepCopyInto(out *DeploymentConfigParameters) { + *out = *in + if in.ComputePlatform != nil { + in, out := &in.ComputePlatform, &out.ComputePlatform + *out = new(string) + **out = **in + } + if in.MinimumHealthyHosts != nil { + in, out := &in.MinimumHealthyHosts, &out.MinimumHealthyHosts + *out = new(MinimumHealthyHostsParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.TrafficRoutingConfig != nil { + in, out := &in.TrafficRoutingConfig, &out.TrafficRoutingConfig + *out = new(TrafficRoutingConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigParameters. +func (in *DeploymentConfigParameters) DeepCopy() *DeploymentConfigParameters { + if in == nil { + return nil + } + out := new(DeploymentConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigSpec) DeepCopyInto(out *DeploymentConfigSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigSpec. +func (in *DeploymentConfigSpec) DeepCopy() *DeploymentConfigSpec { + if in == nil { + return nil + } + out := new(DeploymentConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigStatus) DeepCopyInto(out *DeploymentConfigStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigStatus. +func (in *DeploymentConfigStatus) DeepCopy() *DeploymentConfigStatus { + if in == nil { + return nil + } + out := new(DeploymentConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentGroup) DeepCopyInto(out *DeploymentGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentGroup. +func (in *DeploymentGroup) DeepCopy() *DeploymentGroup { + if in == nil { + return nil + } + out := new(DeploymentGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentGroupInitParameters) DeepCopyInto(out *DeploymentGroupInitParameters) { + *out = *in + if in.AlarmConfiguration != nil { + in, out := &in.AlarmConfiguration, &out.AlarmConfiguration + *out = new(AlarmConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoRollbackConfiguration != nil { + in, out := &in.AutoRollbackConfiguration, &out.AutoRollbackConfiguration + *out = new(AutoRollbackConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoscalingGroups != nil { + in, out := &in.AutoscalingGroups, &out.AutoscalingGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BlueGreenDeploymentConfig != nil { + in, out := &in.BlueGreenDeploymentConfig, &out.BlueGreenDeploymentConfig + *out = new(BlueGreenDeploymentConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DeploymentConfigName != nil { + in, out := &in.DeploymentConfigName, &out.DeploymentConfigName + *out = new(string) + **out = **in + } + if in.DeploymentStyle != nil { + in, out := &in.DeploymentStyle, &out.DeploymentStyle + *out = new(DeploymentStyleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EC2TagFilter != nil { + in, out := &in.EC2TagFilter, &out.EC2TagFilter + *out = make([]EC2TagFilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EC2TagSet != nil { + in, out := &in.EC2TagSet, &out.EC2TagSet + *out = make([]EC2TagSetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EcsService != nil { + in, out := &in.EcsService, &out.EcsService + *out = new(EcsServiceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LoadBalancerInfo != nil { + in, out := &in.LoadBalancerInfo, &out.LoadBalancerInfo + *out = new(LoadBalancerInfoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OnPremisesInstanceTagFilter != nil { + in, out := &in.OnPremisesInstanceTagFilter, &out.OnPremisesInstanceTagFilter + *out = make([]OnPremisesInstanceTagFilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OutdatedInstancesStrategy != nil { + in, out := &in.OutdatedInstancesStrategy, &out.OutdatedInstancesStrategy + *out = new(string) + **out = **in + } + if in.ServiceRoleArn != nil { + in, out := &in.ServiceRoleArn, &out.ServiceRoleArn + *out = new(string) + **out = **in + } + if in.ServiceRoleArnRef != nil { + in, out := &in.ServiceRoleArnRef, &out.ServiceRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceRoleArnSelector != nil { + in, out := &in.ServiceRoleArnSelector, &out.ServiceRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TriggerConfiguration != nil { + in, out := &in.TriggerConfiguration, &out.TriggerConfiguration + *out = make([]TriggerConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentGroupInitParameters. +func (in *DeploymentGroupInitParameters) DeepCopy() *DeploymentGroupInitParameters { + if in == nil { + return nil + } + out := new(DeploymentGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentGroupList) DeepCopyInto(out *DeploymentGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeploymentGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentGroupList. +func (in *DeploymentGroupList) DeepCopy() *DeploymentGroupList { + if in == nil { + return nil + } + out := new(DeploymentGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentGroupObservation) DeepCopyInto(out *DeploymentGroupObservation) { + *out = *in + if in.AlarmConfiguration != nil { + in, out := &in.AlarmConfiguration, &out.AlarmConfiguration + *out = new(AlarmConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.AppName != nil { + in, out := &in.AppName, &out.AppName + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoRollbackConfiguration != nil { + in, out := &in.AutoRollbackConfiguration, &out.AutoRollbackConfiguration + *out = new(AutoRollbackConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoscalingGroups != nil { + in, out := &in.AutoscalingGroups, &out.AutoscalingGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BlueGreenDeploymentConfig != nil { + in, out := &in.BlueGreenDeploymentConfig, &out.BlueGreenDeploymentConfig + *out = new(BlueGreenDeploymentConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ComputePlatform != nil { + in, out := &in.ComputePlatform, &out.ComputePlatform + *out = new(string) + **out = **in + } + if in.DeploymentConfigName != nil { + in, out := &in.DeploymentConfigName, &out.DeploymentConfigName + *out = new(string) + **out = **in + } + if in.DeploymentGroupID != nil { + in, out := &in.DeploymentGroupID, &out.DeploymentGroupID + *out = new(string) + **out = **in + } + if in.DeploymentStyle != nil { + in, out := &in.DeploymentStyle, &out.DeploymentStyle + *out = new(DeploymentStyleObservation) + (*in).DeepCopyInto(*out) + } + if in.EC2TagFilter != nil { + in, out := &in.EC2TagFilter, &out.EC2TagFilter + *out = make([]EC2TagFilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EC2TagSet != nil { + in, out := &in.EC2TagSet, &out.EC2TagSet + *out = make([]EC2TagSetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EcsService != nil { + in, out := &in.EcsService, &out.EcsService + *out = new(EcsServiceObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LoadBalancerInfo != nil { + in, out := &in.LoadBalancerInfo, &out.LoadBalancerInfo + *out = new(LoadBalancerInfoObservation) + (*in).DeepCopyInto(*out) + } + if in.OnPremisesInstanceTagFilter != nil { + in, out := &in.OnPremisesInstanceTagFilter, &out.OnPremisesInstanceTagFilter + *out = make([]OnPremisesInstanceTagFilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OutdatedInstancesStrategy != nil { + in, out := &in.OutdatedInstancesStrategy, &out.OutdatedInstancesStrategy + *out = new(string) + **out = **in + } + if in.ServiceRoleArn != nil { + in, out := &in.ServiceRoleArn, &out.ServiceRoleArn + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TriggerConfiguration != nil { + in, out := &in.TriggerConfiguration, &out.TriggerConfiguration + *out = make([]TriggerConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentGroupObservation. +func (in *DeploymentGroupObservation) DeepCopy() *DeploymentGroupObservation { + if in == nil { + return nil + } + out := new(DeploymentGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentGroupParameters) DeepCopyInto(out *DeploymentGroupParameters) { + *out = *in + if in.AlarmConfiguration != nil { + in, out := &in.AlarmConfiguration, &out.AlarmConfiguration + *out = new(AlarmConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.AppName != nil { + in, out := &in.AppName, &out.AppName + *out = new(string) + **out = **in + } + if in.AppNameRef != nil { + in, out := &in.AppNameRef, &out.AppNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AppNameSelector != nil { + in, out := &in.AppNameSelector, &out.AppNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AutoRollbackConfiguration != nil { + in, out := &in.AutoRollbackConfiguration, &out.AutoRollbackConfiguration + *out = new(AutoRollbackConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoscalingGroups != nil { + in, out := &in.AutoscalingGroups, &out.AutoscalingGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BlueGreenDeploymentConfig != nil { + in, out := &in.BlueGreenDeploymentConfig, &out.BlueGreenDeploymentConfig + *out = new(BlueGreenDeploymentConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.DeploymentConfigName != nil { + in, out := &in.DeploymentConfigName, &out.DeploymentConfigName + *out = new(string) + **out = **in + } + if in.DeploymentStyle != nil { + in, out := &in.DeploymentStyle, &out.DeploymentStyle + *out = new(DeploymentStyleParameters) + (*in).DeepCopyInto(*out) + } + if in.EC2TagFilter != nil { + in, out := &in.EC2TagFilter, &out.EC2TagFilter + *out = make([]EC2TagFilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EC2TagSet != nil { + in, out := &in.EC2TagSet, &out.EC2TagSet + *out = make([]EC2TagSetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EcsService != nil { + in, out := &in.EcsService, &out.EcsService + *out = new(EcsServiceParameters) + (*in).DeepCopyInto(*out) + } + if in.LoadBalancerInfo != nil { + in, out := &in.LoadBalancerInfo, &out.LoadBalancerInfo + *out = new(LoadBalancerInfoParameters) + (*in).DeepCopyInto(*out) + } + if in.OnPremisesInstanceTagFilter != nil { + in, out := &in.OnPremisesInstanceTagFilter, &out.OnPremisesInstanceTagFilter + *out = make([]OnPremisesInstanceTagFilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OutdatedInstancesStrategy != nil { + in, out := &in.OutdatedInstancesStrategy, &out.OutdatedInstancesStrategy + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ServiceRoleArn != nil { + in, out := &in.ServiceRoleArn, &out.ServiceRoleArn + *out = new(string) + **out = **in + } + if in.ServiceRoleArnRef != nil { + in, out := &in.ServiceRoleArnRef, &out.ServiceRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceRoleArnSelector != nil { + in, out := &in.ServiceRoleArnSelector, &out.ServiceRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TriggerConfiguration != nil { + in, out := &in.TriggerConfiguration, &out.TriggerConfiguration + *out = make([]TriggerConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentGroupParameters. +func (in *DeploymentGroupParameters) DeepCopy() *DeploymentGroupParameters { + if in == nil { + return nil + } + out := new(DeploymentGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentGroupSpec) DeepCopyInto(out *DeploymentGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentGroupSpec. +func (in *DeploymentGroupSpec) DeepCopy() *DeploymentGroupSpec { + if in == nil { + return nil + } + out := new(DeploymentGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentGroupStatus) DeepCopyInto(out *DeploymentGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentGroupStatus. +func (in *DeploymentGroupStatus) DeepCopy() *DeploymentGroupStatus { + if in == nil { + return nil + } + out := new(DeploymentGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentReadyOptionInitParameters) DeepCopyInto(out *DeploymentReadyOptionInitParameters) { + *out = *in + if in.ActionOnTimeout != nil { + in, out := &in.ActionOnTimeout, &out.ActionOnTimeout + *out = new(string) + **out = **in + } + if in.WaitTimeInMinutes != nil { + in, out := &in.WaitTimeInMinutes, &out.WaitTimeInMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentReadyOptionInitParameters. +func (in *DeploymentReadyOptionInitParameters) DeepCopy() *DeploymentReadyOptionInitParameters { + if in == nil { + return nil + } + out := new(DeploymentReadyOptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentReadyOptionObservation) DeepCopyInto(out *DeploymentReadyOptionObservation) { + *out = *in + if in.ActionOnTimeout != nil { + in, out := &in.ActionOnTimeout, &out.ActionOnTimeout + *out = new(string) + **out = **in + } + if in.WaitTimeInMinutes != nil { + in, out := &in.WaitTimeInMinutes, &out.WaitTimeInMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentReadyOptionObservation. +func (in *DeploymentReadyOptionObservation) DeepCopy() *DeploymentReadyOptionObservation { + if in == nil { + return nil + } + out := new(DeploymentReadyOptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentReadyOptionParameters) DeepCopyInto(out *DeploymentReadyOptionParameters) { + *out = *in + if in.ActionOnTimeout != nil { + in, out := &in.ActionOnTimeout, &out.ActionOnTimeout + *out = new(string) + **out = **in + } + if in.WaitTimeInMinutes != nil { + in, out := &in.WaitTimeInMinutes, &out.WaitTimeInMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentReadyOptionParameters. +func (in *DeploymentReadyOptionParameters) DeepCopy() *DeploymentReadyOptionParameters { + if in == nil { + return nil + } + out := new(DeploymentReadyOptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStyleInitParameters) DeepCopyInto(out *DeploymentStyleInitParameters) { + *out = *in + if in.DeploymentOption != nil { + in, out := &in.DeploymentOption, &out.DeploymentOption + *out = new(string) + **out = **in + } + if in.DeploymentType != nil { + in, out := &in.DeploymentType, &out.DeploymentType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStyleInitParameters. +func (in *DeploymentStyleInitParameters) DeepCopy() *DeploymentStyleInitParameters { + if in == nil { + return nil + } + out := new(DeploymentStyleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStyleObservation) DeepCopyInto(out *DeploymentStyleObservation) { + *out = *in + if in.DeploymentOption != nil { + in, out := &in.DeploymentOption, &out.DeploymentOption + *out = new(string) + **out = **in + } + if in.DeploymentType != nil { + in, out := &in.DeploymentType, &out.DeploymentType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStyleObservation. +func (in *DeploymentStyleObservation) DeepCopy() *DeploymentStyleObservation { + if in == nil { + return nil + } + out := new(DeploymentStyleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStyleParameters) DeepCopyInto(out *DeploymentStyleParameters) { + *out = *in + if in.DeploymentOption != nil { + in, out := &in.DeploymentOption, &out.DeploymentOption + *out = new(string) + **out = **in + } + if in.DeploymentType != nil { + in, out := &in.DeploymentType, &out.DeploymentType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStyleParameters. +func (in *DeploymentStyleParameters) DeepCopy() *DeploymentStyleParameters { + if in == nil { + return nil + } + out := new(DeploymentStyleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EC2TagFilterInitParameters) DeepCopyInto(out *EC2TagFilterInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2TagFilterInitParameters. +func (in *EC2TagFilterInitParameters) DeepCopy() *EC2TagFilterInitParameters { + if in == nil { + return nil + } + out := new(EC2TagFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EC2TagFilterObservation) DeepCopyInto(out *EC2TagFilterObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2TagFilterObservation. +func (in *EC2TagFilterObservation) DeepCopy() *EC2TagFilterObservation { + if in == nil { + return nil + } + out := new(EC2TagFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EC2TagFilterParameters) DeepCopyInto(out *EC2TagFilterParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2TagFilterParameters. +func (in *EC2TagFilterParameters) DeepCopy() *EC2TagFilterParameters { + if in == nil { + return nil + } + out := new(EC2TagFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EC2TagSetEC2TagFilterInitParameters) DeepCopyInto(out *EC2TagSetEC2TagFilterInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2TagSetEC2TagFilterInitParameters. +func (in *EC2TagSetEC2TagFilterInitParameters) DeepCopy() *EC2TagSetEC2TagFilterInitParameters { + if in == nil { + return nil + } + out := new(EC2TagSetEC2TagFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EC2TagSetEC2TagFilterObservation) DeepCopyInto(out *EC2TagSetEC2TagFilterObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2TagSetEC2TagFilterObservation. +func (in *EC2TagSetEC2TagFilterObservation) DeepCopy() *EC2TagSetEC2TagFilterObservation { + if in == nil { + return nil + } + out := new(EC2TagSetEC2TagFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EC2TagSetEC2TagFilterParameters) DeepCopyInto(out *EC2TagSetEC2TagFilterParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2TagSetEC2TagFilterParameters. +func (in *EC2TagSetEC2TagFilterParameters) DeepCopy() *EC2TagSetEC2TagFilterParameters { + if in == nil { + return nil + } + out := new(EC2TagSetEC2TagFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EC2TagSetInitParameters) DeepCopyInto(out *EC2TagSetInitParameters) { + *out = *in + if in.EC2TagFilter != nil { + in, out := &in.EC2TagFilter, &out.EC2TagFilter + *out = make([]EC2TagSetEC2TagFilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2TagSetInitParameters. +func (in *EC2TagSetInitParameters) DeepCopy() *EC2TagSetInitParameters { + if in == nil { + return nil + } + out := new(EC2TagSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EC2TagSetObservation) DeepCopyInto(out *EC2TagSetObservation) { + *out = *in + if in.EC2TagFilter != nil { + in, out := &in.EC2TagFilter, &out.EC2TagFilter + *out = make([]EC2TagSetEC2TagFilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2TagSetObservation. +func (in *EC2TagSetObservation) DeepCopy() *EC2TagSetObservation { + if in == nil { + return nil + } + out := new(EC2TagSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EC2TagSetParameters) DeepCopyInto(out *EC2TagSetParameters) { + *out = *in + if in.EC2TagFilter != nil { + in, out := &in.EC2TagFilter, &out.EC2TagFilter + *out = make([]EC2TagSetEC2TagFilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2TagSetParameters. +func (in *EC2TagSetParameters) DeepCopy() *EC2TagSetParameters { + if in == nil { + return nil + } + out := new(EC2TagSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ELBInfoInitParameters) DeepCopyInto(out *ELBInfoInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ELBInfoInitParameters. +func (in *ELBInfoInitParameters) DeepCopy() *ELBInfoInitParameters { + if in == nil { + return nil + } + out := new(ELBInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ELBInfoObservation) DeepCopyInto(out *ELBInfoObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ELBInfoObservation. +func (in *ELBInfoObservation) DeepCopy() *ELBInfoObservation { + if in == nil { + return nil + } + out := new(ELBInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ELBInfoParameters) DeepCopyInto(out *ELBInfoParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ELBInfoParameters. +func (in *ELBInfoParameters) DeepCopy() *ELBInfoParameters { + if in == nil { + return nil + } + out := new(ELBInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsServiceInitParameters) DeepCopyInto(out *EcsServiceInitParameters) { + *out = *in + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.ClusterNameRef != nil { + in, out := &in.ClusterNameRef, &out.ClusterNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterNameSelector != nil { + in, out := &in.ClusterNameSelector, &out.ClusterNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.ServiceNameRef != nil { + in, out := &in.ServiceNameRef, &out.ServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceNameSelector != nil { + in, out := &in.ServiceNameSelector, &out.ServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsServiceInitParameters. +func (in *EcsServiceInitParameters) DeepCopy() *EcsServiceInitParameters { + if in == nil { + return nil + } + out := new(EcsServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsServiceObservation) DeepCopyInto(out *EcsServiceObservation) { + *out = *in + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsServiceObservation. +func (in *EcsServiceObservation) DeepCopy() *EcsServiceObservation { + if in == nil { + return nil + } + out := new(EcsServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsServiceParameters) DeepCopyInto(out *EcsServiceParameters) { + *out = *in + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.ClusterNameRef != nil { + in, out := &in.ClusterNameRef, &out.ClusterNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterNameSelector != nil { + in, out := &in.ClusterNameSelector, &out.ClusterNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.ServiceNameRef != nil { + in, out := &in.ServiceNameRef, &out.ServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceNameSelector != nil { + in, out := &in.ServiceNameSelector, &out.ServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsServiceParameters. +func (in *EcsServiceParameters) DeepCopy() *EcsServiceParameters { + if in == nil { + return nil + } + out := new(EcsServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenFleetProvisioningOptionInitParameters) DeepCopyInto(out *GreenFleetProvisioningOptionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenFleetProvisioningOptionInitParameters. +func (in *GreenFleetProvisioningOptionInitParameters) DeepCopy() *GreenFleetProvisioningOptionInitParameters { + if in == nil { + return nil + } + out := new(GreenFleetProvisioningOptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenFleetProvisioningOptionObservation) DeepCopyInto(out *GreenFleetProvisioningOptionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenFleetProvisioningOptionObservation. +func (in *GreenFleetProvisioningOptionObservation) DeepCopy() *GreenFleetProvisioningOptionObservation { + if in == nil { + return nil + } + out := new(GreenFleetProvisioningOptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GreenFleetProvisioningOptionParameters) DeepCopyInto(out *GreenFleetProvisioningOptionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GreenFleetProvisioningOptionParameters. +func (in *GreenFleetProvisioningOptionParameters) DeepCopy() *GreenFleetProvisioningOptionParameters { + if in == nil { + return nil + } + out := new(GreenFleetProvisioningOptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerInfoInitParameters) DeepCopyInto(out *LoadBalancerInfoInitParameters) { + *out = *in + if in.ELBInfo != nil { + in, out := &in.ELBInfo, &out.ELBInfo + *out = make([]ELBInfoInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupInfo != nil { + in, out := &in.TargetGroupInfo, &out.TargetGroupInfo + *out = make([]TargetGroupInfoInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupPairInfo != nil { + in, out := &in.TargetGroupPairInfo, &out.TargetGroupPairInfo + *out = new(TargetGroupPairInfoInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerInfoInitParameters. +func (in *LoadBalancerInfoInitParameters) DeepCopy() *LoadBalancerInfoInitParameters { + if in == nil { + return nil + } + out := new(LoadBalancerInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerInfoObservation) DeepCopyInto(out *LoadBalancerInfoObservation) { + *out = *in + if in.ELBInfo != nil { + in, out := &in.ELBInfo, &out.ELBInfo + *out = make([]ELBInfoObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupInfo != nil { + in, out := &in.TargetGroupInfo, &out.TargetGroupInfo + *out = make([]TargetGroupInfoObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupPairInfo != nil { + in, out := &in.TargetGroupPairInfo, &out.TargetGroupPairInfo + *out = new(TargetGroupPairInfoObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerInfoObservation. +func (in *LoadBalancerInfoObservation) DeepCopy() *LoadBalancerInfoObservation { + if in == nil { + return nil + } + out := new(LoadBalancerInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerInfoParameters) DeepCopyInto(out *LoadBalancerInfoParameters) { + *out = *in + if in.ELBInfo != nil { + in, out := &in.ELBInfo, &out.ELBInfo + *out = make([]ELBInfoParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupInfo != nil { + in, out := &in.TargetGroupInfo, &out.TargetGroupInfo + *out = make([]TargetGroupInfoParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetGroupPairInfo != nil { + in, out := &in.TargetGroupPairInfo, &out.TargetGroupPairInfo + *out = new(TargetGroupPairInfoParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerInfoParameters. +func (in *LoadBalancerInfoParameters) DeepCopy() *LoadBalancerInfoParameters { + if in == nil { + return nil + } + out := new(LoadBalancerInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MinimumHealthyHostsInitParameters) DeepCopyInto(out *MinimumHealthyHostsInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MinimumHealthyHostsInitParameters. +func (in *MinimumHealthyHostsInitParameters) DeepCopy() *MinimumHealthyHostsInitParameters { + if in == nil { + return nil + } + out := new(MinimumHealthyHostsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MinimumHealthyHostsObservation) DeepCopyInto(out *MinimumHealthyHostsObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MinimumHealthyHostsObservation. +func (in *MinimumHealthyHostsObservation) DeepCopy() *MinimumHealthyHostsObservation { + if in == nil { + return nil + } + out := new(MinimumHealthyHostsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MinimumHealthyHostsParameters) DeepCopyInto(out *MinimumHealthyHostsParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MinimumHealthyHostsParameters. +func (in *MinimumHealthyHostsParameters) DeepCopy() *MinimumHealthyHostsParameters { + if in == nil { + return nil + } + out := new(MinimumHealthyHostsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremisesInstanceTagFilterInitParameters) DeepCopyInto(out *OnPremisesInstanceTagFilterInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremisesInstanceTagFilterInitParameters. +func (in *OnPremisesInstanceTagFilterInitParameters) DeepCopy() *OnPremisesInstanceTagFilterInitParameters { + if in == nil { + return nil + } + out := new(OnPremisesInstanceTagFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremisesInstanceTagFilterObservation) DeepCopyInto(out *OnPremisesInstanceTagFilterObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremisesInstanceTagFilterObservation. +func (in *OnPremisesInstanceTagFilterObservation) DeepCopy() *OnPremisesInstanceTagFilterObservation { + if in == nil { + return nil + } + out := new(OnPremisesInstanceTagFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremisesInstanceTagFilterParameters) DeepCopyInto(out *OnPremisesInstanceTagFilterParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremisesInstanceTagFilterParameters. +func (in *OnPremisesInstanceTagFilterParameters) DeepCopy() *OnPremisesInstanceTagFilterParameters { + if in == nil { + return nil + } + out := new(OnPremisesInstanceTagFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProdTrafficRouteInitParameters) DeepCopyInto(out *ProdTrafficRouteInitParameters) { + *out = *in + if in.ListenerArns != nil { + in, out := &in.ListenerArns, &out.ListenerArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProdTrafficRouteInitParameters. +func (in *ProdTrafficRouteInitParameters) DeepCopy() *ProdTrafficRouteInitParameters { + if in == nil { + return nil + } + out := new(ProdTrafficRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProdTrafficRouteObservation) DeepCopyInto(out *ProdTrafficRouteObservation) { + *out = *in + if in.ListenerArns != nil { + in, out := &in.ListenerArns, &out.ListenerArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProdTrafficRouteObservation. +func (in *ProdTrafficRouteObservation) DeepCopy() *ProdTrafficRouteObservation { + if in == nil { + return nil + } + out := new(ProdTrafficRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProdTrafficRouteParameters) DeepCopyInto(out *ProdTrafficRouteParameters) { + *out = *in + if in.ListenerArns != nil { + in, out := &in.ListenerArns, &out.ListenerArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProdTrafficRouteParameters. +func (in *ProdTrafficRouteParameters) DeepCopy() *ProdTrafficRouteParameters { + if in == nil { + return nil + } + out := new(ProdTrafficRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupInfoInitParameters) DeepCopyInto(out *TargetGroupInfoInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupInfoInitParameters. +func (in *TargetGroupInfoInitParameters) DeepCopy() *TargetGroupInfoInitParameters { + if in == nil { + return nil + } + out := new(TargetGroupInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupInfoObservation) DeepCopyInto(out *TargetGroupInfoObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupInfoObservation. +func (in *TargetGroupInfoObservation) DeepCopy() *TargetGroupInfoObservation { + if in == nil { + return nil + } + out := new(TargetGroupInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupInfoParameters) DeepCopyInto(out *TargetGroupInfoParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupInfoParameters. +func (in *TargetGroupInfoParameters) DeepCopy() *TargetGroupInfoParameters { + if in == nil { + return nil + } + out := new(TargetGroupInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupInitParameters) DeepCopyInto(out *TargetGroupInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupInitParameters. +func (in *TargetGroupInitParameters) DeepCopy() *TargetGroupInitParameters { + if in == nil { + return nil + } + out := new(TargetGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupObservation) DeepCopyInto(out *TargetGroupObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupObservation. +func (in *TargetGroupObservation) DeepCopy() *TargetGroupObservation { + if in == nil { + return nil + } + out := new(TargetGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupPairInfoInitParameters) DeepCopyInto(out *TargetGroupPairInfoInitParameters) { + *out = *in + if in.ProdTrafficRoute != nil { + in, out := &in.ProdTrafficRoute, &out.ProdTrafficRoute + *out = new(ProdTrafficRouteInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetGroup != nil { + in, out := &in.TargetGroup, &out.TargetGroup + *out = make([]TargetGroupInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TestTrafficRoute != nil { + in, out := &in.TestTrafficRoute, &out.TestTrafficRoute + *out = new(TestTrafficRouteInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupPairInfoInitParameters. +func (in *TargetGroupPairInfoInitParameters) DeepCopy() *TargetGroupPairInfoInitParameters { + if in == nil { + return nil + } + out := new(TargetGroupPairInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupPairInfoObservation) DeepCopyInto(out *TargetGroupPairInfoObservation) { + *out = *in + if in.ProdTrafficRoute != nil { + in, out := &in.ProdTrafficRoute, &out.ProdTrafficRoute + *out = new(ProdTrafficRouteObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetGroup != nil { + in, out := &in.TargetGroup, &out.TargetGroup + *out = make([]TargetGroupObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TestTrafficRoute != nil { + in, out := &in.TestTrafficRoute, &out.TestTrafficRoute + *out = new(TestTrafficRouteObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupPairInfoObservation. +func (in *TargetGroupPairInfoObservation) DeepCopy() *TargetGroupPairInfoObservation { + if in == nil { + return nil + } + out := new(TargetGroupPairInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupPairInfoParameters) DeepCopyInto(out *TargetGroupPairInfoParameters) { + *out = *in + if in.ProdTrafficRoute != nil { + in, out := &in.ProdTrafficRoute, &out.ProdTrafficRoute + *out = new(ProdTrafficRouteParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetGroup != nil { + in, out := &in.TargetGroup, &out.TargetGroup + *out = make([]TargetGroupParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TestTrafficRoute != nil { + in, out := &in.TestTrafficRoute, &out.TestTrafficRoute + *out = new(TestTrafficRouteParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupPairInfoParameters. +func (in *TargetGroupPairInfoParameters) DeepCopy() *TargetGroupPairInfoParameters { + if in == nil { + return nil + } + out := new(TargetGroupPairInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupParameters) DeepCopyInto(out *TargetGroupParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupParameters. +func (in *TargetGroupParameters) DeepCopy() *TargetGroupParameters { + if in == nil { + return nil + } + out := new(TargetGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TerminateBlueInstancesOnDeploymentSuccessInitParameters) DeepCopyInto(out *TerminateBlueInstancesOnDeploymentSuccessInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.TerminationWaitTimeInMinutes != nil { + in, out := &in.TerminationWaitTimeInMinutes, &out.TerminationWaitTimeInMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TerminateBlueInstancesOnDeploymentSuccessInitParameters. +func (in *TerminateBlueInstancesOnDeploymentSuccessInitParameters) DeepCopy() *TerminateBlueInstancesOnDeploymentSuccessInitParameters { + if in == nil { + return nil + } + out := new(TerminateBlueInstancesOnDeploymentSuccessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TerminateBlueInstancesOnDeploymentSuccessObservation) DeepCopyInto(out *TerminateBlueInstancesOnDeploymentSuccessObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.TerminationWaitTimeInMinutes != nil { + in, out := &in.TerminationWaitTimeInMinutes, &out.TerminationWaitTimeInMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TerminateBlueInstancesOnDeploymentSuccessObservation. +func (in *TerminateBlueInstancesOnDeploymentSuccessObservation) DeepCopy() *TerminateBlueInstancesOnDeploymentSuccessObservation { + if in == nil { + return nil + } + out := new(TerminateBlueInstancesOnDeploymentSuccessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TerminateBlueInstancesOnDeploymentSuccessParameters) DeepCopyInto(out *TerminateBlueInstancesOnDeploymentSuccessParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.TerminationWaitTimeInMinutes != nil { + in, out := &in.TerminationWaitTimeInMinutes, &out.TerminationWaitTimeInMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TerminateBlueInstancesOnDeploymentSuccessParameters. +func (in *TerminateBlueInstancesOnDeploymentSuccessParameters) DeepCopy() *TerminateBlueInstancesOnDeploymentSuccessParameters { + if in == nil { + return nil + } + out := new(TerminateBlueInstancesOnDeploymentSuccessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestTrafficRouteInitParameters) DeepCopyInto(out *TestTrafficRouteInitParameters) { + *out = *in + if in.ListenerArns != nil { + in, out := &in.ListenerArns, &out.ListenerArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestTrafficRouteInitParameters. +func (in *TestTrafficRouteInitParameters) DeepCopy() *TestTrafficRouteInitParameters { + if in == nil { + return nil + } + out := new(TestTrafficRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestTrafficRouteObservation) DeepCopyInto(out *TestTrafficRouteObservation) { + *out = *in + if in.ListenerArns != nil { + in, out := &in.ListenerArns, &out.ListenerArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestTrafficRouteObservation. +func (in *TestTrafficRouteObservation) DeepCopy() *TestTrafficRouteObservation { + if in == nil { + return nil + } + out := new(TestTrafficRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestTrafficRouteParameters) DeepCopyInto(out *TestTrafficRouteParameters) { + *out = *in + if in.ListenerArns != nil { + in, out := &in.ListenerArns, &out.ListenerArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestTrafficRouteParameters. +func (in *TestTrafficRouteParameters) DeepCopy() *TestTrafficRouteParameters { + if in == nil { + return nil + } + out := new(TestTrafficRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeBasedCanaryInitParameters) DeepCopyInto(out *TimeBasedCanaryInitParameters) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Percentage != nil { + in, out := &in.Percentage, &out.Percentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeBasedCanaryInitParameters. +func (in *TimeBasedCanaryInitParameters) DeepCopy() *TimeBasedCanaryInitParameters { + if in == nil { + return nil + } + out := new(TimeBasedCanaryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeBasedCanaryObservation) DeepCopyInto(out *TimeBasedCanaryObservation) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Percentage != nil { + in, out := &in.Percentage, &out.Percentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeBasedCanaryObservation. +func (in *TimeBasedCanaryObservation) DeepCopy() *TimeBasedCanaryObservation { + if in == nil { + return nil + } + out := new(TimeBasedCanaryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeBasedCanaryParameters) DeepCopyInto(out *TimeBasedCanaryParameters) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Percentage != nil { + in, out := &in.Percentage, &out.Percentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeBasedCanaryParameters. +func (in *TimeBasedCanaryParameters) DeepCopy() *TimeBasedCanaryParameters { + if in == nil { + return nil + } + out := new(TimeBasedCanaryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeBasedLinearInitParameters) DeepCopyInto(out *TimeBasedLinearInitParameters) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Percentage != nil { + in, out := &in.Percentage, &out.Percentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeBasedLinearInitParameters. +func (in *TimeBasedLinearInitParameters) DeepCopy() *TimeBasedLinearInitParameters { + if in == nil { + return nil + } + out := new(TimeBasedLinearInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeBasedLinearObservation) DeepCopyInto(out *TimeBasedLinearObservation) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Percentage != nil { + in, out := &in.Percentage, &out.Percentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeBasedLinearObservation. +func (in *TimeBasedLinearObservation) DeepCopy() *TimeBasedLinearObservation { + if in == nil { + return nil + } + out := new(TimeBasedLinearObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeBasedLinearParameters) DeepCopyInto(out *TimeBasedLinearParameters) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Percentage != nil { + in, out := &in.Percentage, &out.Percentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeBasedLinearParameters. +func (in *TimeBasedLinearParameters) DeepCopy() *TimeBasedLinearParameters { + if in == nil { + return nil + } + out := new(TimeBasedLinearParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficRoutingConfigInitParameters) DeepCopyInto(out *TrafficRoutingConfigInitParameters) { + *out = *in + if in.TimeBasedCanary != nil { + in, out := &in.TimeBasedCanary, &out.TimeBasedCanary + *out = new(TimeBasedCanaryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TimeBasedLinear != nil { + in, out := &in.TimeBasedLinear, &out.TimeBasedLinear + *out = new(TimeBasedLinearInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficRoutingConfigInitParameters. +func (in *TrafficRoutingConfigInitParameters) DeepCopy() *TrafficRoutingConfigInitParameters { + if in == nil { + return nil + } + out := new(TrafficRoutingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficRoutingConfigObservation) DeepCopyInto(out *TrafficRoutingConfigObservation) { + *out = *in + if in.TimeBasedCanary != nil { + in, out := &in.TimeBasedCanary, &out.TimeBasedCanary + *out = new(TimeBasedCanaryObservation) + (*in).DeepCopyInto(*out) + } + if in.TimeBasedLinear != nil { + in, out := &in.TimeBasedLinear, &out.TimeBasedLinear + *out = new(TimeBasedLinearObservation) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficRoutingConfigObservation. +func (in *TrafficRoutingConfigObservation) DeepCopy() *TrafficRoutingConfigObservation { + if in == nil { + return nil + } + out := new(TrafficRoutingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficRoutingConfigParameters) DeepCopyInto(out *TrafficRoutingConfigParameters) { + *out = *in + if in.TimeBasedCanary != nil { + in, out := &in.TimeBasedCanary, &out.TimeBasedCanary + *out = new(TimeBasedCanaryParameters) + (*in).DeepCopyInto(*out) + } + if in.TimeBasedLinear != nil { + in, out := &in.TimeBasedLinear, &out.TimeBasedLinear + *out = new(TimeBasedLinearParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficRoutingConfigParameters. +func (in *TrafficRoutingConfigParameters) DeepCopy() *TrafficRoutingConfigParameters { + if in == nil { + return nil + } + out := new(TrafficRoutingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerConfigurationInitParameters) DeepCopyInto(out *TriggerConfigurationInitParameters) { + *out = *in + if in.TriggerEvents != nil { + in, out := &in.TriggerEvents, &out.TriggerEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TriggerName != nil { + in, out := &in.TriggerName, &out.TriggerName + *out = new(string) + **out = **in + } + if in.TriggerTargetArn != nil { + in, out := &in.TriggerTargetArn, &out.TriggerTargetArn + *out = new(string) + **out = **in + } + if in.TriggerTargetArnRef != nil { + in, out := &in.TriggerTargetArnRef, &out.TriggerTargetArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TriggerTargetArnSelector != nil { + in, out := &in.TriggerTargetArnSelector, &out.TriggerTargetArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerConfigurationInitParameters. +func (in *TriggerConfigurationInitParameters) DeepCopy() *TriggerConfigurationInitParameters { + if in == nil { + return nil + } + out := new(TriggerConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerConfigurationObservation) DeepCopyInto(out *TriggerConfigurationObservation) { + *out = *in + if in.TriggerEvents != nil { + in, out := &in.TriggerEvents, &out.TriggerEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TriggerName != nil { + in, out := &in.TriggerName, &out.TriggerName + *out = new(string) + **out = **in + } + if in.TriggerTargetArn != nil { + in, out := &in.TriggerTargetArn, &out.TriggerTargetArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerConfigurationObservation. +func (in *TriggerConfigurationObservation) DeepCopy() *TriggerConfigurationObservation { + if in == nil { + return nil + } + out := new(TriggerConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerConfigurationParameters) DeepCopyInto(out *TriggerConfigurationParameters) { + *out = *in + if in.TriggerEvents != nil { + in, out := &in.TriggerEvents, &out.TriggerEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TriggerName != nil { + in, out := &in.TriggerName, &out.TriggerName + *out = new(string) + **out = **in + } + if in.TriggerTargetArn != nil { + in, out := &in.TriggerTargetArn, &out.TriggerTargetArn + *out = new(string) + **out = **in + } + if in.TriggerTargetArnRef != nil { + in, out := &in.TriggerTargetArnRef, &out.TriggerTargetArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TriggerTargetArnSelector != nil { + in, out := &in.TriggerTargetArnSelector, &out.TriggerTargetArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerConfigurationParameters. +func (in *TriggerConfigurationParameters) DeepCopy() *TriggerConfigurationParameters { + if in == nil { + return nil + } + out := new(TriggerConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/deploy/v1beta2/zz_generated.managed.go b/apis/deploy/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..2272dd773a --- /dev/null +++ b/apis/deploy/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this DeploymentConfig. +func (mg *DeploymentConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DeploymentConfig. +func (mg *DeploymentConfig) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DeploymentConfig. +func (mg *DeploymentConfig) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DeploymentConfig. +func (mg *DeploymentConfig) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DeploymentConfig. +func (mg *DeploymentConfig) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DeploymentConfig. +func (mg *DeploymentConfig) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DeploymentConfig. +func (mg *DeploymentConfig) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DeploymentConfig. +func (mg *DeploymentConfig) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DeploymentConfig. +func (mg *DeploymentConfig) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DeploymentConfig. +func (mg *DeploymentConfig) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DeploymentConfig. +func (mg *DeploymentConfig) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DeploymentConfig. +func (mg *DeploymentConfig) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DeploymentGroup. +func (mg *DeploymentGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DeploymentGroup. +func (mg *DeploymentGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DeploymentGroup. +func (mg *DeploymentGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DeploymentGroup. +func (mg *DeploymentGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DeploymentGroup. +func (mg *DeploymentGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DeploymentGroup. +func (mg *DeploymentGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DeploymentGroup. +func (mg *DeploymentGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DeploymentGroup. +func (mg *DeploymentGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DeploymentGroup. +func (mg *DeploymentGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DeploymentGroup. +func (mg *DeploymentGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DeploymentGroup. +func (mg *DeploymentGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DeploymentGroup. +func (mg *DeploymentGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/deploy/v1beta2/zz_generated.managedlist.go b/apis/deploy/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..59872eb405 --- /dev/null +++ b/apis/deploy/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this DeploymentConfigList. +func (l *DeploymentConfigList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DeploymentGroupList. +func (l *DeploymentGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/deploy/v1beta2/zz_generated.resolvers.go b/apis/deploy/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..dba7f2e03d --- /dev/null +++ b/apis/deploy/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,311 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *DeploymentGroup) ResolveReferences( // ResolveReferences of this DeploymentGroup. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("deploy.aws.upbound.io", "v1beta1", "App", "AppList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AppName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AppNameRef, + Selector: mg.Spec.ForProvider.AppNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AppName") + } + mg.Spec.ForProvider.AppName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AppNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.EcsService != nil { + { + m, l, err = apisresolver.GetManagedResource("ecs.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EcsService.ClusterName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.EcsService.ClusterNameRef, + Selector: mg.Spec.ForProvider.EcsService.ClusterNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EcsService.ClusterName") + } + mg.Spec.ForProvider.EcsService.ClusterName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EcsService.ClusterNameRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.EcsService != nil { + { + m, l, err = apisresolver.GetManagedResource("ecs.aws.upbound.io", "v1beta2", "Service", "ServiceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EcsService.ServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.EcsService.ServiceNameRef, + Selector: mg.Spec.ForProvider.EcsService.ServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EcsService.ServiceName") + } + mg.Spec.ForProvider.EcsService.ServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EcsService.ServiceNameRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.LoadBalancerInfo != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.LoadBalancerInfo.ELBInfo); i4++ { + { + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LoadBalancerInfo.ELBInfo[i4].Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LoadBalancerInfo.ELBInfo[i4].NameRef, + Selector: mg.Spec.ForProvider.LoadBalancerInfo.ELBInfo[i4].NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LoadBalancerInfo.ELBInfo[i4].Name") + } + mg.Spec.ForProvider.LoadBalancerInfo.ELBInfo[i4].Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LoadBalancerInfo.ELBInfo[i4].NameRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.LoadBalancerInfo != nil { + if mg.Spec.ForProvider.LoadBalancerInfo.TargetGroupPairInfo != nil { + for i5 := 0; i5 < len(mg.Spec.ForProvider.LoadBalancerInfo.TargetGroupPairInfo.TargetGroup); i5++ { + { + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBTargetGroup", "LBTargetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LoadBalancerInfo.TargetGroupPairInfo.TargetGroup[i5].Name), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.ForProvider.LoadBalancerInfo.TargetGroupPairInfo.TargetGroup[i5].NameRef, + Selector: mg.Spec.ForProvider.LoadBalancerInfo.TargetGroupPairInfo.TargetGroup[i5].NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LoadBalancerInfo.TargetGroupPairInfo.TargetGroup[i5].Name") + } + mg.Spec.ForProvider.LoadBalancerInfo.TargetGroupPairInfo.TargetGroup[i5].Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LoadBalancerInfo.TargetGroupPairInfo.TargetGroup[i5].NameRef = rsp.ResolvedReference + + } + } + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ServiceRoleArnRef, + Selector: mg.Spec.ForProvider.ServiceRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceRoleArn") + } + mg.Spec.ForProvider.ServiceRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceRoleArnRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.TriggerConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TriggerConfiguration[i3].TriggerTargetArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.TriggerConfiguration[i3].TriggerTargetArnRef, + Selector: mg.Spec.ForProvider.TriggerConfiguration[i3].TriggerTargetArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TriggerConfiguration[i3].TriggerTargetArn") + } + mg.Spec.ForProvider.TriggerConfiguration[i3].TriggerTargetArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TriggerConfiguration[i3].TriggerTargetArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.EcsService != nil { + { + m, l, err = apisresolver.GetManagedResource("ecs.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EcsService.ClusterName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.EcsService.ClusterNameRef, + Selector: mg.Spec.InitProvider.EcsService.ClusterNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EcsService.ClusterName") + } + mg.Spec.InitProvider.EcsService.ClusterName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EcsService.ClusterNameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.EcsService != nil { + { + m, l, err = apisresolver.GetManagedResource("ecs.aws.upbound.io", "v1beta2", "Service", "ServiceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EcsService.ServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.EcsService.ServiceNameRef, + Selector: mg.Spec.InitProvider.EcsService.ServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EcsService.ServiceName") + } + mg.Spec.InitProvider.EcsService.ServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EcsService.ServiceNameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LoadBalancerInfo != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.LoadBalancerInfo.ELBInfo); i4++ { + { + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LoadBalancerInfo.ELBInfo[i4].Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LoadBalancerInfo.ELBInfo[i4].NameRef, + Selector: mg.Spec.InitProvider.LoadBalancerInfo.ELBInfo[i4].NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LoadBalancerInfo.ELBInfo[i4].Name") + } + mg.Spec.InitProvider.LoadBalancerInfo.ELBInfo[i4].Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LoadBalancerInfo.ELBInfo[i4].NameRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.LoadBalancerInfo != nil { + if mg.Spec.InitProvider.LoadBalancerInfo.TargetGroupPairInfo != nil { + for i5 := 0; i5 < len(mg.Spec.InitProvider.LoadBalancerInfo.TargetGroupPairInfo.TargetGroup); i5++ { + { + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBTargetGroup", "LBTargetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LoadBalancerInfo.TargetGroupPairInfo.TargetGroup[i5].Name), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.InitProvider.LoadBalancerInfo.TargetGroupPairInfo.TargetGroup[i5].NameRef, + Selector: mg.Spec.InitProvider.LoadBalancerInfo.TargetGroupPairInfo.TargetGroup[i5].NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LoadBalancerInfo.TargetGroupPairInfo.TargetGroup[i5].Name") + } + mg.Spec.InitProvider.LoadBalancerInfo.TargetGroupPairInfo.TargetGroup[i5].Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LoadBalancerInfo.TargetGroupPairInfo.TargetGroup[i5].NameRef = rsp.ResolvedReference + + } + } + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ServiceRoleArnRef, + Selector: mg.Spec.InitProvider.ServiceRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceRoleArn") + } + mg.Spec.InitProvider.ServiceRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceRoleArnRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.TriggerConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TriggerConfiguration[i3].TriggerTargetArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.TriggerConfiguration[i3].TriggerTargetArnRef, + Selector: mg.Spec.InitProvider.TriggerConfiguration[i3].TriggerTargetArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TriggerConfiguration[i3].TriggerTargetArn") + } + mg.Spec.InitProvider.TriggerConfiguration[i3].TriggerTargetArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TriggerConfiguration[i3].TriggerTargetArnRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/deploy/v1beta2/zz_groupversion_info.go b/apis/deploy/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..a23eea6569 --- /dev/null +++ b/apis/deploy/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=deploy.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "deploy.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/devicefarm/v1beta1/zz_generated.conversion_hubs.go b/apis/devicefarm/v1beta1/zz_generated.conversion_hubs.go index 7c26d34777..2268d5b79d 100755 --- a/apis/devicefarm/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/devicefarm/v1beta1/zz_generated.conversion_hubs.go @@ -18,8 +18,5 @@ func (tr *NetworkProfile) Hub() {} // Hub marks this type as a conversion hub. func (tr *Project) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *TestGridProject) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Upload) Hub() {} diff --git a/apis/devicefarm/v1beta1/zz_generated.conversion_spokes.go b/apis/devicefarm/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..499f09ef0e --- /dev/null +++ b/apis/devicefarm/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this TestGridProject to the hub type. +func (tr *TestGridProject) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the TestGridProject type. +func (tr *TestGridProject) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/devicefarm/v1beta2/zz_generated.conversion_hubs.go b/apis/devicefarm/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..e02d73735f --- /dev/null +++ b/apis/devicefarm/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *TestGridProject) Hub() {} diff --git a/apis/devicefarm/v1beta2/zz_generated.deepcopy.go b/apis/devicefarm/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..167de91c9f --- /dev/null +++ b/apis/devicefarm/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,471 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestGridProject) DeepCopyInto(out *TestGridProject) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestGridProject. +func (in *TestGridProject) DeepCopy() *TestGridProject { + if in == nil { + return nil + } + out := new(TestGridProject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TestGridProject) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestGridProjectInitParameters) DeepCopyInto(out *TestGridProjectInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestGridProjectInitParameters. +func (in *TestGridProjectInitParameters) DeepCopy() *TestGridProjectInitParameters { + if in == nil { + return nil + } + out := new(TestGridProjectInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestGridProjectList) DeepCopyInto(out *TestGridProjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TestGridProject, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestGridProjectList. +func (in *TestGridProjectList) DeepCopy() *TestGridProjectList { + if in == nil { + return nil + } + out := new(TestGridProjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TestGridProjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestGridProjectObservation) DeepCopyInto(out *TestGridProjectObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestGridProjectObservation. +func (in *TestGridProjectObservation) DeepCopy() *TestGridProjectObservation { + if in == nil { + return nil + } + out := new(TestGridProjectObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestGridProjectParameters) DeepCopyInto(out *TestGridProjectParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestGridProjectParameters. +func (in *TestGridProjectParameters) DeepCopy() *TestGridProjectParameters { + if in == nil { + return nil + } + out := new(TestGridProjectParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestGridProjectSpec) DeepCopyInto(out *TestGridProjectSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestGridProjectSpec. +func (in *TestGridProjectSpec) DeepCopy() *TestGridProjectSpec { + if in == nil { + return nil + } + out := new(TestGridProjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestGridProjectStatus) DeepCopyInto(out *TestGridProjectStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestGridProjectStatus. +func (in *TestGridProjectStatus) DeepCopy() *TestGridProjectStatus { + if in == nil { + return nil + } + out := new(TestGridProjectStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigInitParameters) DeepCopyInto(out *VPCConfigInitParameters) { + *out = *in + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigInitParameters. +func (in *VPCConfigInitParameters) DeepCopy() *VPCConfigInitParameters { + if in == nil { + return nil + } + out := new(VPCConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigObservation) DeepCopyInto(out *VPCConfigObservation) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigObservation. +func (in *VPCConfigObservation) DeepCopy() *VPCConfigObservation { + if in == nil { + return nil + } + out := new(VPCConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigParameters) DeepCopyInto(out *VPCConfigParameters) { + *out = *in + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigParameters. +func (in *VPCConfigParameters) DeepCopy() *VPCConfigParameters { + if in == nil { + return nil + } + out := new(VPCConfigParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/devicefarm/v1beta2/zz_generated.managed.go b/apis/devicefarm/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..6b79d5917a --- /dev/null +++ b/apis/devicefarm/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this TestGridProject. +func (mg *TestGridProject) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this TestGridProject. +func (mg *TestGridProject) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this TestGridProject. +func (mg *TestGridProject) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this TestGridProject. +func (mg *TestGridProject) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this TestGridProject. +func (mg *TestGridProject) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this TestGridProject. +func (mg *TestGridProject) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this TestGridProject. +func (mg *TestGridProject) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this TestGridProject. +func (mg *TestGridProject) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this TestGridProject. +func (mg *TestGridProject) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this TestGridProject. +func (mg *TestGridProject) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this TestGridProject. +func (mg *TestGridProject) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this TestGridProject. +func (mg *TestGridProject) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/devicefarm/v1beta2/zz_generated.managedlist.go b/apis/devicefarm/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..acae15ea4e --- /dev/null +++ b/apis/devicefarm/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this TestGridProjectList. +func (l *TestGridProjectList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/devicefarm/v1beta2/zz_generated.resolvers.go b/apis/devicefarm/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..a0ee422f92 --- /dev/null +++ b/apis/devicefarm/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,158 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *TestGridProject) ResolveReferences( // ResolveReferences of this TestGridProject. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + if mg.Spec.ForProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCConfig.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCConfig.SecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.VPCConfig.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCConfig.SecurityGroupIds") + } + mg.Spec.ForProvider.VPCConfig.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCConfig.SecurityGroupIDRefs = mrsp.ResolvedReferences + + } + if mg.Spec.ForProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCConfig.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCConfig.SubnetIDRefs, + Selector: mg.Spec.ForProvider.VPCConfig.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCConfig.SubnetIds") + } + mg.Spec.ForProvider.VPCConfig.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCConfig.SubnetIDRefs = mrsp.ResolvedReferences + + } + if mg.Spec.ForProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCConfig.VPCID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VPCConfig.VPCIDRef, + Selector: mg.Spec.ForProvider.VPCConfig.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCConfig.VPCID") + } + mg.Spec.ForProvider.VPCConfig.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPCConfig.VPCIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCConfig.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCConfig.SecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.VPCConfig.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCConfig.SecurityGroupIds") + } + mg.Spec.InitProvider.VPCConfig.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCConfig.SecurityGroupIDRefs = mrsp.ResolvedReferences + + } + if mg.Spec.InitProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCConfig.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCConfig.SubnetIDRefs, + Selector: mg.Spec.InitProvider.VPCConfig.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCConfig.SubnetIds") + } + mg.Spec.InitProvider.VPCConfig.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCConfig.SubnetIDRefs = mrsp.ResolvedReferences + + } + if mg.Spec.InitProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCConfig.VPCID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VPCConfig.VPCIDRef, + Selector: mg.Spec.InitProvider.VPCConfig.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCConfig.VPCID") + } + mg.Spec.InitProvider.VPCConfig.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPCConfig.VPCIDRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/devicefarm/v1beta2/zz_groupversion_info.go b/apis/devicefarm/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..d45a6d4d07 --- /dev/null +++ b/apis/devicefarm/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=devicefarm.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "devicefarm.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/devicefarm/v1beta2/zz_testgridproject_terraformed.go b/apis/devicefarm/v1beta2/zz_testgridproject_terraformed.go new file mode 100755 index 0000000000..c142ddb5c8 --- /dev/null +++ b/apis/devicefarm/v1beta2/zz_testgridproject_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this TestGridProject +func (mg *TestGridProject) GetTerraformResourceType() string { + return "aws_devicefarm_test_grid_project" +} + +// GetConnectionDetailsMapping for this TestGridProject +func (tr *TestGridProject) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this TestGridProject +func (tr *TestGridProject) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this TestGridProject +func (tr *TestGridProject) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this TestGridProject +func (tr *TestGridProject) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this TestGridProject +func (tr *TestGridProject) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this TestGridProject +func (tr *TestGridProject) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this TestGridProject +func (tr *TestGridProject) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this TestGridProject +func (tr *TestGridProject) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this TestGridProject using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *TestGridProject) LateInitialize(attrs []byte) (bool, error) { + params := &TestGridProjectParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *TestGridProject) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/devicefarm/v1beta2/zz_testgridproject_types.go b/apis/devicefarm/v1beta2/zz_testgridproject_types.go new file mode 100755 index 0000000000..4b8af8ab77 --- /dev/null +++ b/apis/devicefarm/v1beta2/zz_testgridproject_types.go @@ -0,0 +1,250 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type TestGridProjectInitParameters struct { + + // Human-readable description of the project. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the Selenium testing project. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The VPC security groups and subnets that are attached to a project. See VPC Config below. + VPCConfig *VPCConfigInitParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type TestGridProjectObservation struct { + + // The Amazon Resource Name of this Test Grid Project. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Human-readable description of the project. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Selenium testing project. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The VPC security groups and subnets that are attached to a project. See VPC Config below. + VPCConfig *VPCConfigObservation `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type TestGridProjectParameters struct { + + // Human-readable description of the project. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the Selenium testing project. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The VPC security groups and subnets that are attached to a project. See VPC Config below. + // +kubebuilder:validation:Optional + VPCConfig *VPCConfigParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type VPCConfigInitParameters struct { + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // A list of VPC security group IDs in your Amazon VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A list of VPC subnet IDs in your Amazon VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // The ID of the Amazon VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type VPCConfigObservation struct { + + // A list of VPC security group IDs in your Amazon VPC. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of VPC subnet IDs in your Amazon VPC. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // The ID of the Amazon VPC. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type VPCConfigParameters struct { + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // A list of VPC security group IDs in your Amazon VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A list of VPC subnet IDs in your Amazon VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // The ID of the Amazon VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +// TestGridProjectSpec defines the desired state of TestGridProject +type TestGridProjectSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TestGridProjectParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TestGridProjectInitParameters `json:"initProvider,omitempty"` +} + +// TestGridProjectStatus defines the observed state of TestGridProject. +type TestGridProjectStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TestGridProjectObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// TestGridProject is the Schema for the TestGridProjects API. Provides a Devicefarm test_grid_project +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type TestGridProject struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec TestGridProjectSpec `json:"spec"` + Status TestGridProjectStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TestGridProjectList contains a list of TestGridProjects +type TestGridProjectList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TestGridProject `json:"items"` +} + +// Repository type metadata. +var ( + TestGridProject_Kind = "TestGridProject" + TestGridProject_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: TestGridProject_Kind}.String() + TestGridProject_KindAPIVersion = TestGridProject_Kind + "." + CRDGroupVersion.String() + TestGridProject_GroupVersionKind = CRDGroupVersion.WithKind(TestGridProject_Kind) +) + +func init() { + SchemeBuilder.Register(&TestGridProject{}, &TestGridProjectList{}) +} diff --git a/apis/dlm/v1beta1/zz_generated.conversion_spokes.go b/apis/dlm/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..a36190c683 --- /dev/null +++ b/apis/dlm/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this LifecyclePolicy to the hub type. +func (tr *LifecyclePolicy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LifecyclePolicy type. +func (tr *LifecyclePolicy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/dlm/v1beta2/zz_generated.conversion_hubs.go b/apis/dlm/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..77bec7ac64 --- /dev/null +++ b/apis/dlm/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *LifecyclePolicy) Hub() {} diff --git a/apis/dlm/v1beta2/zz_generated.deepcopy.go b/apis/dlm/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..5906c87053 --- /dev/null +++ b/apis/dlm/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2252 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionInitParameters) DeepCopyInto(out *ActionInitParameters) { + *out = *in + if in.CrossRegionCopy != nil { + in, out := &in.CrossRegionCopy, &out.CrossRegionCopy + *out = make([]CrossRegionCopyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionInitParameters. +func (in *ActionInitParameters) DeepCopy() *ActionInitParameters { + if in == nil { + return nil + } + out := new(ActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionObservation) DeepCopyInto(out *ActionObservation) { + *out = *in + if in.CrossRegionCopy != nil { + in, out := &in.CrossRegionCopy, &out.CrossRegionCopy + *out = make([]CrossRegionCopyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionObservation. +func (in *ActionObservation) DeepCopy() *ActionObservation { + if in == nil { + return nil + } + out := new(ActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionParameters) DeepCopyInto(out *ActionParameters) { + *out = *in + if in.CrossRegionCopy != nil { + in, out := &in.CrossRegionCopy, &out.CrossRegionCopy + *out = make([]CrossRegionCopyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionParameters. +func (in *ActionParameters) DeepCopy() *ActionParameters { + if in == nil { + return nil + } + out := new(ActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreateRuleInitParameters) DeepCopyInto(out *CreateRuleInitParameters) { + *out = *in + if in.CronExpression != nil { + in, out := &in.CronExpression, &out.CronExpression + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Times != nil { + in, out := &in.Times, &out.Times + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreateRuleInitParameters. +func (in *CreateRuleInitParameters) DeepCopy() *CreateRuleInitParameters { + if in == nil { + return nil + } + out := new(CreateRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreateRuleObservation) DeepCopyInto(out *CreateRuleObservation) { + *out = *in + if in.CronExpression != nil { + in, out := &in.CronExpression, &out.CronExpression + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Times != nil { + in, out := &in.Times, &out.Times + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreateRuleObservation. +func (in *CreateRuleObservation) DeepCopy() *CreateRuleObservation { + if in == nil { + return nil + } + out := new(CreateRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreateRuleParameters) DeepCopyInto(out *CreateRuleParameters) { + *out = *in + if in.CronExpression != nil { + in, out := &in.CronExpression, &out.CronExpression + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Times != nil { + in, out := &in.Times, &out.Times + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreateRuleParameters. +func (in *CreateRuleParameters) DeepCopy() *CreateRuleParameters { + if in == nil { + return nil + } + out := new(CreateRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossRegionCopyInitParameters) DeepCopyInto(out *CrossRegionCopyInitParameters) { + *out = *in + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetainRule != nil { + in, out := &in.RetainRule, &out.RetainRule + *out = new(RetainRuleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossRegionCopyInitParameters. +func (in *CrossRegionCopyInitParameters) DeepCopy() *CrossRegionCopyInitParameters { + if in == nil { + return nil + } + out := new(CrossRegionCopyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossRegionCopyObservation) DeepCopyInto(out *CrossRegionCopyObservation) { + *out = *in + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RetainRule != nil { + in, out := &in.RetainRule, &out.RetainRule + *out = new(RetainRuleObservation) + (*in).DeepCopyInto(*out) + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossRegionCopyObservation. +func (in *CrossRegionCopyObservation) DeepCopy() *CrossRegionCopyObservation { + if in == nil { + return nil + } + out := new(CrossRegionCopyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossRegionCopyParameters) DeepCopyInto(out *CrossRegionCopyParameters) { + *out = *in + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.RetainRule != nil { + in, out := &in.RetainRule, &out.RetainRule + *out = new(RetainRuleParameters) + (*in).DeepCopyInto(*out) + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossRegionCopyParameters. +func (in *CrossRegionCopyParameters) DeepCopy() *CrossRegionCopyParameters { + if in == nil { + return nil + } + out := new(CrossRegionCopyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossRegionCopyRuleInitParameters) DeepCopyInto(out *CrossRegionCopyRuleInitParameters) { + *out = *in + if in.CmkArn != nil { + in, out := &in.CmkArn, &out.CmkArn + *out = new(string) + **out = **in + } + if in.CmkArnRef != nil { + in, out := &in.CmkArnRef, &out.CmkArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CmkArnSelector != nil { + in, out := &in.CmkArnSelector, &out.CmkArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CopyTags != nil { + in, out := &in.CopyTags, &out.CopyTags + *out = new(bool) + **out = **in + } + if in.DeprecateRule != nil { + in, out := &in.DeprecateRule, &out.DeprecateRule + *out = new(DeprecateRuleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.RetainRule != nil { + in, out := &in.RetainRule, &out.RetainRule + *out = new(CrossRegionCopyRuleRetainRuleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossRegionCopyRuleInitParameters. +func (in *CrossRegionCopyRuleInitParameters) DeepCopy() *CrossRegionCopyRuleInitParameters { + if in == nil { + return nil + } + out := new(CrossRegionCopyRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossRegionCopyRuleObservation) DeepCopyInto(out *CrossRegionCopyRuleObservation) { + *out = *in + if in.CmkArn != nil { + in, out := &in.CmkArn, &out.CmkArn + *out = new(string) + **out = **in + } + if in.CopyTags != nil { + in, out := &in.CopyTags, &out.CopyTags + *out = new(bool) + **out = **in + } + if in.DeprecateRule != nil { + in, out := &in.DeprecateRule, &out.DeprecateRule + *out = new(DeprecateRuleObservation) + (*in).DeepCopyInto(*out) + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.RetainRule != nil { + in, out := &in.RetainRule, &out.RetainRule + *out = new(CrossRegionCopyRuleRetainRuleObservation) + (*in).DeepCopyInto(*out) + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossRegionCopyRuleObservation. +func (in *CrossRegionCopyRuleObservation) DeepCopy() *CrossRegionCopyRuleObservation { + if in == nil { + return nil + } + out := new(CrossRegionCopyRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossRegionCopyRuleParameters) DeepCopyInto(out *CrossRegionCopyRuleParameters) { + *out = *in + if in.CmkArn != nil { + in, out := &in.CmkArn, &out.CmkArn + *out = new(string) + **out = **in + } + if in.CmkArnRef != nil { + in, out := &in.CmkArnRef, &out.CmkArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CmkArnSelector != nil { + in, out := &in.CmkArnSelector, &out.CmkArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CopyTags != nil { + in, out := &in.CopyTags, &out.CopyTags + *out = new(bool) + **out = **in + } + if in.DeprecateRule != nil { + in, out := &in.DeprecateRule, &out.DeprecateRule + *out = new(DeprecateRuleParameters) + (*in).DeepCopyInto(*out) + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.RetainRule != nil { + in, out := &in.RetainRule, &out.RetainRule + *out = new(CrossRegionCopyRuleRetainRuleParameters) + (*in).DeepCopyInto(*out) + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossRegionCopyRuleParameters. +func (in *CrossRegionCopyRuleParameters) DeepCopy() *CrossRegionCopyRuleParameters { + if in == nil { + return nil + } + out := new(CrossRegionCopyRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossRegionCopyRuleRetainRuleInitParameters) DeepCopyInto(out *CrossRegionCopyRuleRetainRuleInitParameters) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossRegionCopyRuleRetainRuleInitParameters. +func (in *CrossRegionCopyRuleRetainRuleInitParameters) DeepCopy() *CrossRegionCopyRuleRetainRuleInitParameters { + if in == nil { + return nil + } + out := new(CrossRegionCopyRuleRetainRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossRegionCopyRuleRetainRuleObservation) DeepCopyInto(out *CrossRegionCopyRuleRetainRuleObservation) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossRegionCopyRuleRetainRuleObservation. +func (in *CrossRegionCopyRuleRetainRuleObservation) DeepCopy() *CrossRegionCopyRuleRetainRuleObservation { + if in == nil { + return nil + } + out := new(CrossRegionCopyRuleRetainRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossRegionCopyRuleRetainRuleParameters) DeepCopyInto(out *CrossRegionCopyRuleRetainRuleParameters) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossRegionCopyRuleRetainRuleParameters. +func (in *CrossRegionCopyRuleRetainRuleParameters) DeepCopy() *CrossRegionCopyRuleRetainRuleParameters { + if in == nil { + return nil + } + out := new(CrossRegionCopyRuleRetainRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeprecateRuleInitParameters) DeepCopyInto(out *DeprecateRuleInitParameters) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeprecateRuleInitParameters. +func (in *DeprecateRuleInitParameters) DeepCopy() *DeprecateRuleInitParameters { + if in == nil { + return nil + } + out := new(DeprecateRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeprecateRuleObservation) DeepCopyInto(out *DeprecateRuleObservation) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeprecateRuleObservation. +func (in *DeprecateRuleObservation) DeepCopy() *DeprecateRuleObservation { + if in == nil { + return nil + } + out := new(DeprecateRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeprecateRuleParameters) DeepCopyInto(out *DeprecateRuleParameters) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeprecateRuleParameters. +func (in *DeprecateRuleParameters) DeepCopy() *DeprecateRuleParameters { + if in == nil { + return nil + } + out := new(DeprecateRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationInitParameters) DeepCopyInto(out *EncryptionConfigurationInitParameters) { + *out = *in + if in.CmkArn != nil { + in, out := &in.CmkArn, &out.CmkArn + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationInitParameters. +func (in *EncryptionConfigurationInitParameters) DeepCopy() *EncryptionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationObservation) DeepCopyInto(out *EncryptionConfigurationObservation) { + *out = *in + if in.CmkArn != nil { + in, out := &in.CmkArn, &out.CmkArn + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationObservation. +func (in *EncryptionConfigurationObservation) DeepCopy() *EncryptionConfigurationObservation { + if in == nil { + return nil + } + out := new(EncryptionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationParameters) DeepCopyInto(out *EncryptionConfigurationParameters) { + *out = *in + if in.CmkArn != nil { + in, out := &in.CmkArn, &out.CmkArn + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationParameters. +func (in *EncryptionConfigurationParameters) DeepCopy() *EncryptionConfigurationParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSourceInitParameters) DeepCopyInto(out *EventSourceInitParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(ParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSourceInitParameters. +func (in *EventSourceInitParameters) DeepCopy() *EventSourceInitParameters { + if in == nil { + return nil + } + out := new(EventSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSourceObservation) DeepCopyInto(out *EventSourceObservation) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(ParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSourceObservation. +func (in *EventSourceObservation) DeepCopy() *EventSourceObservation { + if in == nil { + return nil + } + out := new(EventSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSourceParameters) DeepCopyInto(out *EventSourceParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(ParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSourceParameters. +func (in *EventSourceParameters) DeepCopy() *EventSourceParameters { + if in == nil { + return nil + } + out := new(EventSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FastRestoreRuleInitParameters) DeepCopyInto(out *FastRestoreRuleInitParameters) { + *out = *in + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FastRestoreRuleInitParameters. +func (in *FastRestoreRuleInitParameters) DeepCopy() *FastRestoreRuleInitParameters { + if in == nil { + return nil + } + out := new(FastRestoreRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FastRestoreRuleObservation) DeepCopyInto(out *FastRestoreRuleObservation) { + *out = *in + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FastRestoreRuleObservation. +func (in *FastRestoreRuleObservation) DeepCopy() *FastRestoreRuleObservation { + if in == nil { + return nil + } + out := new(FastRestoreRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FastRestoreRuleParameters) DeepCopyInto(out *FastRestoreRuleParameters) { + *out = *in + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FastRestoreRuleParameters. +func (in *FastRestoreRuleParameters) DeepCopy() *FastRestoreRuleParameters { + if in == nil { + return nil + } + out := new(FastRestoreRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecyclePolicy) DeepCopyInto(out *LifecyclePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecyclePolicy. +func (in *LifecyclePolicy) DeepCopy() *LifecyclePolicy { + if in == nil { + return nil + } + out := new(LifecyclePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LifecyclePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecyclePolicyInitParameters) DeepCopyInto(out *LifecyclePolicyInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionRoleArn != nil { + in, out := &in.ExecutionRoleArn, &out.ExecutionRoleArn + *out = new(string) + **out = **in + } + if in.ExecutionRoleArnRef != nil { + in, out := &in.ExecutionRoleArnRef, &out.ExecutionRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleArnSelector != nil { + in, out := &in.ExecutionRoleArnSelector, &out.ExecutionRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PolicyDetails != nil { + in, out := &in.PolicyDetails, &out.PolicyDetails + *out = new(PolicyDetailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecyclePolicyInitParameters. +func (in *LifecyclePolicyInitParameters) DeepCopy() *LifecyclePolicyInitParameters { + if in == nil { + return nil + } + out := new(LifecyclePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecyclePolicyList) DeepCopyInto(out *LifecyclePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LifecyclePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecyclePolicyList. +func (in *LifecyclePolicyList) DeepCopy() *LifecyclePolicyList { + if in == nil { + return nil + } + out := new(LifecyclePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LifecyclePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecyclePolicyObservation) DeepCopyInto(out *LifecyclePolicyObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionRoleArn != nil { + in, out := &in.ExecutionRoleArn, &out.ExecutionRoleArn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PolicyDetails != nil { + in, out := &in.PolicyDetails, &out.PolicyDetails + *out = new(PolicyDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecyclePolicyObservation. +func (in *LifecyclePolicyObservation) DeepCopy() *LifecyclePolicyObservation { + if in == nil { + return nil + } + out := new(LifecyclePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecyclePolicyParameters) DeepCopyInto(out *LifecyclePolicyParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionRoleArn != nil { + in, out := &in.ExecutionRoleArn, &out.ExecutionRoleArn + *out = new(string) + **out = **in + } + if in.ExecutionRoleArnRef != nil { + in, out := &in.ExecutionRoleArnRef, &out.ExecutionRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleArnSelector != nil { + in, out := &in.ExecutionRoleArnSelector, &out.ExecutionRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PolicyDetails != nil { + in, out := &in.PolicyDetails, &out.PolicyDetails + *out = new(PolicyDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecyclePolicyParameters. +func (in *LifecyclePolicyParameters) DeepCopy() *LifecyclePolicyParameters { + if in == nil { + return nil + } + out := new(LifecyclePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecyclePolicySpec) DeepCopyInto(out *LifecyclePolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecyclePolicySpec. +func (in *LifecyclePolicySpec) DeepCopy() *LifecyclePolicySpec { + if in == nil { + return nil + } + out := new(LifecyclePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecyclePolicyStatus) DeepCopyInto(out *LifecyclePolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecyclePolicyStatus. +func (in *LifecyclePolicyStatus) DeepCopy() *LifecyclePolicyStatus { + if in == nil { + return nil + } + out := new(LifecyclePolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersInitParameters) DeepCopyInto(out *ParametersInitParameters) { + *out = *in + if in.DescriptionRegex != nil { + in, out := &in.DescriptionRegex, &out.DescriptionRegex + *out = new(string) + **out = **in + } + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.SnapshotOwner != nil { + in, out := &in.SnapshotOwner, &out.SnapshotOwner + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersInitParameters. +func (in *ParametersInitParameters) DeepCopy() *ParametersInitParameters { + if in == nil { + return nil + } + out := new(ParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersObservation) DeepCopyInto(out *ParametersObservation) { + *out = *in + if in.DescriptionRegex != nil { + in, out := &in.DescriptionRegex, &out.DescriptionRegex + *out = new(string) + **out = **in + } + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.SnapshotOwner != nil { + in, out := &in.SnapshotOwner, &out.SnapshotOwner + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersObservation. +func (in *ParametersObservation) DeepCopy() *ParametersObservation { + if in == nil { + return nil + } + out := new(ParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersParameters) DeepCopyInto(out *ParametersParameters) { + *out = *in + if in.DescriptionRegex != nil { + in, out := &in.DescriptionRegex, &out.DescriptionRegex + *out = new(string) + **out = **in + } + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.SnapshotOwner != nil { + in, out := &in.SnapshotOwner, &out.SnapshotOwner + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersParameters. +func (in *ParametersParameters) DeepCopy() *ParametersParameters { + if in == nil { + return nil + } + out := new(ParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyDetailsInitParameters) DeepCopyInto(out *PolicyDetailsInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EventSource != nil { + in, out := &in.EventSource, &out.EventSource + *out = new(EventSourceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(PolicyDetailsParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } + if in.ResourceLocations != nil { + in, out := &in.ResourceLocations, &out.ResourceLocations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = make([]ScheduleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetTags != nil { + in, out := &in.TargetTags, &out.TargetTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyDetailsInitParameters. +func (in *PolicyDetailsInitParameters) DeepCopy() *PolicyDetailsInitParameters { + if in == nil { + return nil + } + out := new(PolicyDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyDetailsObservation) DeepCopyInto(out *PolicyDetailsObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionObservation) + (*in).DeepCopyInto(*out) + } + if in.EventSource != nil { + in, out := &in.EventSource, &out.EventSource + *out = new(EventSourceObservation) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(PolicyDetailsParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } + if in.ResourceLocations != nil { + in, out := &in.ResourceLocations, &out.ResourceLocations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = make([]ScheduleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetTags != nil { + in, out := &in.TargetTags, &out.TargetTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyDetailsObservation. +func (in *PolicyDetailsObservation) DeepCopy() *PolicyDetailsObservation { + if in == nil { + return nil + } + out := new(PolicyDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyDetailsParameters) DeepCopyInto(out *PolicyDetailsParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionParameters) + (*in).DeepCopyInto(*out) + } + if in.EventSource != nil { + in, out := &in.EventSource, &out.EventSource + *out = new(EventSourceParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(PolicyDetailsParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } + if in.ResourceLocations != nil { + in, out := &in.ResourceLocations, &out.ResourceLocations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = make([]ScheduleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetTags != nil { + in, out := &in.TargetTags, &out.TargetTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyDetailsParameters. +func (in *PolicyDetailsParameters) DeepCopy() *PolicyDetailsParameters { + if in == nil { + return nil + } + out := new(PolicyDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyDetailsParametersInitParameters) DeepCopyInto(out *PolicyDetailsParametersInitParameters) { + *out = *in + if in.ExcludeBootVolume != nil { + in, out := &in.ExcludeBootVolume, &out.ExcludeBootVolume + *out = new(bool) + **out = **in + } + if in.NoReboot != nil { + in, out := &in.NoReboot, &out.NoReboot + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyDetailsParametersInitParameters. +func (in *PolicyDetailsParametersInitParameters) DeepCopy() *PolicyDetailsParametersInitParameters { + if in == nil { + return nil + } + out := new(PolicyDetailsParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyDetailsParametersObservation) DeepCopyInto(out *PolicyDetailsParametersObservation) { + *out = *in + if in.ExcludeBootVolume != nil { + in, out := &in.ExcludeBootVolume, &out.ExcludeBootVolume + *out = new(bool) + **out = **in + } + if in.NoReboot != nil { + in, out := &in.NoReboot, &out.NoReboot + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyDetailsParametersObservation. +func (in *PolicyDetailsParametersObservation) DeepCopy() *PolicyDetailsParametersObservation { + if in == nil { + return nil + } + out := new(PolicyDetailsParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyDetailsParametersParameters) DeepCopyInto(out *PolicyDetailsParametersParameters) { + *out = *in + if in.ExcludeBootVolume != nil { + in, out := &in.ExcludeBootVolume, &out.ExcludeBootVolume + *out = new(bool) + **out = **in + } + if in.NoReboot != nil { + in, out := &in.NoReboot, &out.NoReboot + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyDetailsParametersParameters. +func (in *PolicyDetailsParametersParameters) DeepCopy() *PolicyDetailsParametersParameters { + if in == nil { + return nil + } + out := new(PolicyDetailsParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetainRuleInitParameters) DeepCopyInto(out *RetainRuleInitParameters) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetainRuleInitParameters. +func (in *RetainRuleInitParameters) DeepCopy() *RetainRuleInitParameters { + if in == nil { + return nil + } + out := new(RetainRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetainRuleObservation) DeepCopyInto(out *RetainRuleObservation) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetainRuleObservation. +func (in *RetainRuleObservation) DeepCopy() *RetainRuleObservation { + if in == nil { + return nil + } + out := new(RetainRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetainRuleParameters) DeepCopyInto(out *RetainRuleParameters) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetainRuleParameters. +func (in *RetainRuleParameters) DeepCopy() *RetainRuleParameters { + if in == nil { + return nil + } + out := new(RetainRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleDeprecateRuleInitParameters) DeepCopyInto(out *ScheduleDeprecateRuleInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleDeprecateRuleInitParameters. +func (in *ScheduleDeprecateRuleInitParameters) DeepCopy() *ScheduleDeprecateRuleInitParameters { + if in == nil { + return nil + } + out := new(ScheduleDeprecateRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleDeprecateRuleObservation) DeepCopyInto(out *ScheduleDeprecateRuleObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleDeprecateRuleObservation. +func (in *ScheduleDeprecateRuleObservation) DeepCopy() *ScheduleDeprecateRuleObservation { + if in == nil { + return nil + } + out := new(ScheduleDeprecateRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleDeprecateRuleParameters) DeepCopyInto(out *ScheduleDeprecateRuleParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleDeprecateRuleParameters. +func (in *ScheduleDeprecateRuleParameters) DeepCopy() *ScheduleDeprecateRuleParameters { + if in == nil { + return nil + } + out := new(ScheduleDeprecateRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleInitParameters) DeepCopyInto(out *ScheduleInitParameters) { + *out = *in + if in.CopyTags != nil { + in, out := &in.CopyTags, &out.CopyTags + *out = new(bool) + **out = **in + } + if in.CreateRule != nil { + in, out := &in.CreateRule, &out.CreateRule + *out = new(CreateRuleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CrossRegionCopyRule != nil { + in, out := &in.CrossRegionCopyRule, &out.CrossRegionCopyRule + *out = make([]CrossRegionCopyRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeprecateRule != nil { + in, out := &in.DeprecateRule, &out.DeprecateRule + *out = new(ScheduleDeprecateRuleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FastRestoreRule != nil { + in, out := &in.FastRestoreRule, &out.FastRestoreRule + *out = new(FastRestoreRuleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RetainRule != nil { + in, out := &in.RetainRule, &out.RetainRule + *out = new(ScheduleRetainRuleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ShareRule != nil { + in, out := &in.ShareRule, &out.ShareRule + *out = new(ShareRuleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TagsToAdd != nil { + in, out := &in.TagsToAdd, &out.TagsToAdd + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VariableTags != nil { + in, out := &in.VariableTags, &out.VariableTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleInitParameters. +func (in *ScheduleInitParameters) DeepCopy() *ScheduleInitParameters { + if in == nil { + return nil + } + out := new(ScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleObservation) DeepCopyInto(out *ScheduleObservation) { + *out = *in + if in.CopyTags != nil { + in, out := &in.CopyTags, &out.CopyTags + *out = new(bool) + **out = **in + } + if in.CreateRule != nil { + in, out := &in.CreateRule, &out.CreateRule + *out = new(CreateRuleObservation) + (*in).DeepCopyInto(*out) + } + if in.CrossRegionCopyRule != nil { + in, out := &in.CrossRegionCopyRule, &out.CrossRegionCopyRule + *out = make([]CrossRegionCopyRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeprecateRule != nil { + in, out := &in.DeprecateRule, &out.DeprecateRule + *out = new(ScheduleDeprecateRuleObservation) + (*in).DeepCopyInto(*out) + } + if in.FastRestoreRule != nil { + in, out := &in.FastRestoreRule, &out.FastRestoreRule + *out = new(FastRestoreRuleObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RetainRule != nil { + in, out := &in.RetainRule, &out.RetainRule + *out = new(ScheduleRetainRuleObservation) + (*in).DeepCopyInto(*out) + } + if in.ShareRule != nil { + in, out := &in.ShareRule, &out.ShareRule + *out = new(ShareRuleObservation) + (*in).DeepCopyInto(*out) + } + if in.TagsToAdd != nil { + in, out := &in.TagsToAdd, &out.TagsToAdd + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VariableTags != nil { + in, out := &in.VariableTags, &out.VariableTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleObservation. +func (in *ScheduleObservation) DeepCopy() *ScheduleObservation { + if in == nil { + return nil + } + out := new(ScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleParameters) DeepCopyInto(out *ScheduleParameters) { + *out = *in + if in.CopyTags != nil { + in, out := &in.CopyTags, &out.CopyTags + *out = new(bool) + **out = **in + } + if in.CreateRule != nil { + in, out := &in.CreateRule, &out.CreateRule + *out = new(CreateRuleParameters) + (*in).DeepCopyInto(*out) + } + if in.CrossRegionCopyRule != nil { + in, out := &in.CrossRegionCopyRule, &out.CrossRegionCopyRule + *out = make([]CrossRegionCopyRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeprecateRule != nil { + in, out := &in.DeprecateRule, &out.DeprecateRule + *out = new(ScheduleDeprecateRuleParameters) + (*in).DeepCopyInto(*out) + } + if in.FastRestoreRule != nil { + in, out := &in.FastRestoreRule, &out.FastRestoreRule + *out = new(FastRestoreRuleParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RetainRule != nil { + in, out := &in.RetainRule, &out.RetainRule + *out = new(ScheduleRetainRuleParameters) + (*in).DeepCopyInto(*out) + } + if in.ShareRule != nil { + in, out := &in.ShareRule, &out.ShareRule + *out = new(ShareRuleParameters) + (*in).DeepCopyInto(*out) + } + if in.TagsToAdd != nil { + in, out := &in.TagsToAdd, &out.TagsToAdd + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VariableTags != nil { + in, out := &in.VariableTags, &out.VariableTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleParameters. +func (in *ScheduleParameters) DeepCopy() *ScheduleParameters { + if in == nil { + return nil + } + out := new(ScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleRetainRuleInitParameters) DeepCopyInto(out *ScheduleRetainRuleInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleRetainRuleInitParameters. +func (in *ScheduleRetainRuleInitParameters) DeepCopy() *ScheduleRetainRuleInitParameters { + if in == nil { + return nil + } + out := new(ScheduleRetainRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleRetainRuleObservation) DeepCopyInto(out *ScheduleRetainRuleObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleRetainRuleObservation. +func (in *ScheduleRetainRuleObservation) DeepCopy() *ScheduleRetainRuleObservation { + if in == nil { + return nil + } + out := new(ScheduleRetainRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleRetainRuleParameters) DeepCopyInto(out *ScheduleRetainRuleParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.IntervalUnit != nil { + in, out := &in.IntervalUnit, &out.IntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleRetainRuleParameters. +func (in *ScheduleRetainRuleParameters) DeepCopy() *ScheduleRetainRuleParameters { + if in == nil { + return nil + } + out := new(ScheduleRetainRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShareRuleInitParameters) DeepCopyInto(out *ShareRuleInitParameters) { + *out = *in + if in.TargetAccounts != nil { + in, out := &in.TargetAccounts, &out.TargetAccounts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UnshareInterval != nil { + in, out := &in.UnshareInterval, &out.UnshareInterval + *out = new(float64) + **out = **in + } + if in.UnshareIntervalUnit != nil { + in, out := &in.UnshareIntervalUnit, &out.UnshareIntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShareRuleInitParameters. +func (in *ShareRuleInitParameters) DeepCopy() *ShareRuleInitParameters { + if in == nil { + return nil + } + out := new(ShareRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShareRuleObservation) DeepCopyInto(out *ShareRuleObservation) { + *out = *in + if in.TargetAccounts != nil { + in, out := &in.TargetAccounts, &out.TargetAccounts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UnshareInterval != nil { + in, out := &in.UnshareInterval, &out.UnshareInterval + *out = new(float64) + **out = **in + } + if in.UnshareIntervalUnit != nil { + in, out := &in.UnshareIntervalUnit, &out.UnshareIntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShareRuleObservation. +func (in *ShareRuleObservation) DeepCopy() *ShareRuleObservation { + if in == nil { + return nil + } + out := new(ShareRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShareRuleParameters) DeepCopyInto(out *ShareRuleParameters) { + *out = *in + if in.TargetAccounts != nil { + in, out := &in.TargetAccounts, &out.TargetAccounts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UnshareInterval != nil { + in, out := &in.UnshareInterval, &out.UnshareInterval + *out = new(float64) + **out = **in + } + if in.UnshareIntervalUnit != nil { + in, out := &in.UnshareIntervalUnit, &out.UnshareIntervalUnit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShareRuleParameters. +func (in *ShareRuleParameters) DeepCopy() *ShareRuleParameters { + if in == nil { + return nil + } + out := new(ShareRuleParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/dlm/v1beta2/zz_generated.managed.go b/apis/dlm/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..041272d6f3 --- /dev/null +++ b/apis/dlm/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this LifecyclePolicy. +func (mg *LifecyclePolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LifecyclePolicy. +func (mg *LifecyclePolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LifecyclePolicy. +func (mg *LifecyclePolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LifecyclePolicy. +func (mg *LifecyclePolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LifecyclePolicy. +func (mg *LifecyclePolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LifecyclePolicy. +func (mg *LifecyclePolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LifecyclePolicy. +func (mg *LifecyclePolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LifecyclePolicy. +func (mg *LifecyclePolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LifecyclePolicy. +func (mg *LifecyclePolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LifecyclePolicy. +func (mg *LifecyclePolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LifecyclePolicy. +func (mg *LifecyclePolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LifecyclePolicy. +func (mg *LifecyclePolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/dlm/v1beta2/zz_generated.managedlist.go b/apis/dlm/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..a403f8e9fd --- /dev/null +++ b/apis/dlm/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this LifecyclePolicyList. +func (l *LifecyclePolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/dlm/v1beta2/zz_generated.resolvers.go b/apis/dlm/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..5009bd87d1 --- /dev/null +++ b/apis/dlm/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,120 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *LifecyclePolicy) ResolveReferences( // ResolveReferences of this LifecyclePolicy. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ExecutionRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ExecutionRoleArnRef, + Selector: mg.Spec.ForProvider.ExecutionRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ExecutionRoleArn") + } + mg.Spec.ForProvider.ExecutionRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ExecutionRoleArnRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.PolicyDetails != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.PolicyDetails.Schedule); i4++ { + for i5 := 0; i5 < len(mg.Spec.ForProvider.PolicyDetails.Schedule[i4].CrossRegionCopyRule); i5++ { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PolicyDetails.Schedule[i4].CrossRegionCopyRule[i5].CmkArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.PolicyDetails.Schedule[i4].CrossRegionCopyRule[i5].CmkArnRef, + Selector: mg.Spec.ForProvider.PolicyDetails.Schedule[i4].CrossRegionCopyRule[i5].CmkArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PolicyDetails.Schedule[i4].CrossRegionCopyRule[i5].CmkArn") + } + mg.Spec.ForProvider.PolicyDetails.Schedule[i4].CrossRegionCopyRule[i5].CmkArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PolicyDetails.Schedule[i4].CrossRegionCopyRule[i5].CmkArnRef = rsp.ResolvedReference + + } + } + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ExecutionRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ExecutionRoleArnRef, + Selector: mg.Spec.InitProvider.ExecutionRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ExecutionRoleArn") + } + mg.Spec.InitProvider.ExecutionRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ExecutionRoleArnRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.PolicyDetails != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.PolicyDetails.Schedule); i4++ { + for i5 := 0; i5 < len(mg.Spec.InitProvider.PolicyDetails.Schedule[i4].CrossRegionCopyRule); i5++ { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PolicyDetails.Schedule[i4].CrossRegionCopyRule[i5].CmkArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.PolicyDetails.Schedule[i4].CrossRegionCopyRule[i5].CmkArnRef, + Selector: mg.Spec.InitProvider.PolicyDetails.Schedule[i4].CrossRegionCopyRule[i5].CmkArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PolicyDetails.Schedule[i4].CrossRegionCopyRule[i5].CmkArn") + } + mg.Spec.InitProvider.PolicyDetails.Schedule[i4].CrossRegionCopyRule[i5].CmkArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PolicyDetails.Schedule[i4].CrossRegionCopyRule[i5].CmkArnRef = rsp.ResolvedReference + + } + } + } + + return nil +} diff --git a/apis/dlm/v1beta2/zz_groupversion_info.go b/apis/dlm/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..0cd2f4bd0c --- /dev/null +++ b/apis/dlm/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=dlm.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "dlm.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/dlm/v1beta2/zz_lifecyclepolicy_terraformed.go b/apis/dlm/v1beta2/zz_lifecyclepolicy_terraformed.go new file mode 100755 index 0000000000..620af81f97 --- /dev/null +++ b/apis/dlm/v1beta2/zz_lifecyclepolicy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LifecyclePolicy +func (mg *LifecyclePolicy) GetTerraformResourceType() string { + return "aws_dlm_lifecycle_policy" +} + +// GetConnectionDetailsMapping for this LifecyclePolicy +func (tr *LifecyclePolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LifecyclePolicy +func (tr *LifecyclePolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LifecyclePolicy +func (tr *LifecyclePolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LifecyclePolicy +func (tr *LifecyclePolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LifecyclePolicy +func (tr *LifecyclePolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LifecyclePolicy +func (tr *LifecyclePolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LifecyclePolicy +func (tr *LifecyclePolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LifecyclePolicy +func (tr *LifecyclePolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LifecyclePolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LifecyclePolicy) LateInitialize(attrs []byte) (bool, error) { + params := &LifecyclePolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LifecyclePolicy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/dlm/v1beta2/zz_lifecyclepolicy_types.go b/apis/dlm/v1beta2/zz_lifecyclepolicy_types.go new file mode 100755 index 0000000000..6e2b389dc2 --- /dev/null +++ b/apis/dlm/v1beta2/zz_lifecyclepolicy_types.go @@ -0,0 +1,984 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionInitParameters struct { + + // The rule for copying shared snapshots across Regions. See the cross_region_copy configuration block. + CrossRegionCopy []CrossRegionCopyInitParameters `json:"crossRegionCopy,omitempty" tf:"cross_region_copy,omitempty"` + + // A descriptive name for the action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ActionObservation struct { + + // The rule for copying shared snapshots across Regions. See the cross_region_copy configuration block. + CrossRegionCopy []CrossRegionCopyObservation `json:"crossRegionCopy,omitempty" tf:"cross_region_copy,omitempty"` + + // A descriptive name for the action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ActionParameters struct { + + // The rule for copying shared snapshots across Regions. See the cross_region_copy configuration block. + // +kubebuilder:validation:Optional + CrossRegionCopy []CrossRegionCopyParameters `json:"crossRegionCopy" tf:"cross_region_copy,omitempty"` + + // A descriptive name for the action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type CreateRuleInitParameters struct { + + // The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1 year. Conflicts with interval, interval_unit, and times. + CronExpression *string `json:"cronExpression,omitempty" tf:"cron_expression,omitempty"` + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` + + // Specifies the destination for snapshots created by the policy. To create snapshots in the same Region as the source resource, specify CLOUD. To create snapshots on the same Outpost as the source resource, specify OUTPOST_LOCAL. If you omit this parameter, CLOUD is used by default. If the policy targets resources in an AWS Region, then you must create snapshots in the same Region as the source resource. If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost as the source resource, or in the Region of that Outpost. Valid values are CLOUD and OUTPOST_LOCAL. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A list of times in 24 hour clock format that sets when the lifecycle policy should be evaluated. Max of 1. Conflicts with cron_expression. Must be set if interval is set. + Times []*string `json:"times,omitempty" tf:"times,omitempty"` +} + +type CreateRuleObservation struct { + + // The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1 year. Conflicts with interval, interval_unit, and times. + CronExpression *string `json:"cronExpression,omitempty" tf:"cron_expression,omitempty"` + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` + + // Specifies the destination for snapshots created by the policy. To create snapshots in the same Region as the source resource, specify CLOUD. To create snapshots on the same Outpost as the source resource, specify OUTPOST_LOCAL. If you omit this parameter, CLOUD is used by default. If the policy targets resources in an AWS Region, then you must create snapshots in the same Region as the source resource. If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost as the source resource, or in the Region of that Outpost. Valid values are CLOUD and OUTPOST_LOCAL. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A list of times in 24 hour clock format that sets when the lifecycle policy should be evaluated. Max of 1. Conflicts with cron_expression. Must be set if interval is set. + Times []*string `json:"times,omitempty" tf:"times,omitempty"` +} + +type CreateRuleParameters struct { + + // The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1 year. Conflicts with interval, interval_unit, and times. + // +kubebuilder:validation:Optional + CronExpression *string `json:"cronExpression,omitempty" tf:"cron_expression,omitempty"` + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + // +kubebuilder:validation:Optional + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` + + // Specifies the destination for snapshots created by the policy. To create snapshots in the same Region as the source resource, specify CLOUD. To create snapshots on the same Outpost as the source resource, specify OUTPOST_LOCAL. If you omit this parameter, CLOUD is used by default. If the policy targets resources in an AWS Region, then you must create snapshots in the same Region as the source resource. If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost as the source resource, or in the Region of that Outpost. Valid values are CLOUD and OUTPOST_LOCAL. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A list of times in 24 hour clock format that sets when the lifecycle policy should be evaluated. Max of 1. Conflicts with cron_expression. Must be set if interval is set. + // +kubebuilder:validation:Optional + Times []*string `json:"times,omitempty" tf:"times,omitempty"` +} + +type CrossRegionCopyInitParameters struct { + + // The encryption settings for the copied snapshot. See the encryption_configuration block. Max of 1 per action. + EncryptionConfiguration *EncryptionConfigurationInitParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // Specifies the retention rule for cross-Region snapshot copies. See the retain_rule block. Max of 1 per action. + RetainRule *RetainRuleInitParameters `json:"retainRule,omitempty" tf:"retain_rule,omitempty"` + + // The target Region or the Amazon Resource Name (ARN) of the target Outpost for the snapshot copies. + Target *string `json:"target,omitempty" tf:"target,omitempty"` +} + +type CrossRegionCopyObservation struct { + + // The encryption settings for the copied snapshot. See the encryption_configuration block. Max of 1 per action. + EncryptionConfiguration *EncryptionConfigurationObservation `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // Specifies the retention rule for cross-Region snapshot copies. See the retain_rule block. Max of 1 per action. + RetainRule *RetainRuleObservation `json:"retainRule,omitempty" tf:"retain_rule,omitempty"` + + // The target Region or the Amazon Resource Name (ARN) of the target Outpost for the snapshot copies. + Target *string `json:"target,omitempty" tf:"target,omitempty"` +} + +type CrossRegionCopyParameters struct { + + // The encryption settings for the copied snapshot. See the encryption_configuration block. Max of 1 per action. + // +kubebuilder:validation:Optional + EncryptionConfiguration *EncryptionConfigurationParameters `json:"encryptionConfiguration" tf:"encryption_configuration,omitempty"` + + // Specifies the retention rule for cross-Region snapshot copies. See the retain_rule block. Max of 1 per action. + // +kubebuilder:validation:Optional + RetainRule *RetainRuleParameters `json:"retainRule,omitempty" tf:"retain_rule,omitempty"` + + // The target Region or the Amazon Resource Name (ARN) of the target Outpost for the snapshot copies. + // +kubebuilder:validation:Optional + Target *string `json:"target" tf:"target,omitempty"` +} + +type CrossRegionCopyRuleInitParameters struct { + + // The Amazon Resource Name (ARN) of the AWS KMS key to use for EBS encryption. If this parameter is not specified, the default KMS key for the account is used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + CmkArn *string `json:"cmkArn,omitempty" tf:"cmk_arn,omitempty"` + + // Reference to a Key in kms to populate cmkArn. + // +kubebuilder:validation:Optional + CmkArnRef *v1.Reference `json:"cmkArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate cmkArn. + // +kubebuilder:validation:Optional + CmkArnSelector *v1.Selector `json:"cmkArnSelector,omitempty" tf:"-"` + + // Copy all user-defined tags on a source volume to snapshots of the volume created by this policy. + CopyTags *bool `json:"copyTags,omitempty" tf:"copy_tags,omitempty"` + + // See the deprecate_rule block. Max of 1 per schedule. + DeprecateRule *DeprecateRuleInitParameters `json:"deprecateRule,omitempty" tf:"deprecate_rule,omitempty"` + + // To encrypt a copy of an unencrypted snapshot when encryption by default is not enabled, enable encryption using this parameter. Copies of encrypted snapshots are encrypted, even if this parameter is false or when encryption by default is not enabled. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // Specifies the retention rule for cross-Region snapshot copies. See the retain_rule block. Max of 1 per action. + RetainRule *CrossRegionCopyRuleRetainRuleInitParameters `json:"retainRule,omitempty" tf:"retain_rule,omitempty"` + + // The target Region or the Amazon Resource Name (ARN) of the target Outpost for the snapshot copies. + Target *string `json:"target,omitempty" tf:"target,omitempty"` +} + +type CrossRegionCopyRuleObservation struct { + + // The Amazon Resource Name (ARN) of the AWS KMS key to use for EBS encryption. If this parameter is not specified, the default KMS key for the account is used. + CmkArn *string `json:"cmkArn,omitempty" tf:"cmk_arn,omitempty"` + + // Copy all user-defined tags on a source volume to snapshots of the volume created by this policy. + CopyTags *bool `json:"copyTags,omitempty" tf:"copy_tags,omitempty"` + + // See the deprecate_rule block. Max of 1 per schedule. + DeprecateRule *DeprecateRuleObservation `json:"deprecateRule,omitempty" tf:"deprecate_rule,omitempty"` + + // To encrypt a copy of an unencrypted snapshot when encryption by default is not enabled, enable encryption using this parameter. Copies of encrypted snapshots are encrypted, even if this parameter is false or when encryption by default is not enabled. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // Specifies the retention rule for cross-Region snapshot copies. See the retain_rule block. Max of 1 per action. + RetainRule *CrossRegionCopyRuleRetainRuleObservation `json:"retainRule,omitempty" tf:"retain_rule,omitempty"` + + // The target Region or the Amazon Resource Name (ARN) of the target Outpost for the snapshot copies. + Target *string `json:"target,omitempty" tf:"target,omitempty"` +} + +type CrossRegionCopyRuleParameters struct { + + // The Amazon Resource Name (ARN) of the AWS KMS key to use for EBS encryption. If this parameter is not specified, the default KMS key for the account is used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + CmkArn *string `json:"cmkArn,omitempty" tf:"cmk_arn,omitempty"` + + // Reference to a Key in kms to populate cmkArn. + // +kubebuilder:validation:Optional + CmkArnRef *v1.Reference `json:"cmkArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate cmkArn. + // +kubebuilder:validation:Optional + CmkArnSelector *v1.Selector `json:"cmkArnSelector,omitempty" tf:"-"` + + // Copy all user-defined tags on a source volume to snapshots of the volume created by this policy. + // +kubebuilder:validation:Optional + CopyTags *bool `json:"copyTags,omitempty" tf:"copy_tags,omitempty"` + + // See the deprecate_rule block. Max of 1 per schedule. + // +kubebuilder:validation:Optional + DeprecateRule *DeprecateRuleParameters `json:"deprecateRule,omitempty" tf:"deprecate_rule,omitempty"` + + // To encrypt a copy of an unencrypted snapshot when encryption by default is not enabled, enable encryption using this parameter. Copies of encrypted snapshots are encrypted, even if this parameter is false or when encryption by default is not enabled. + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted" tf:"encrypted,omitempty"` + + // Specifies the retention rule for cross-Region snapshot copies. See the retain_rule block. Max of 1 per action. + // +kubebuilder:validation:Optional + RetainRule *CrossRegionCopyRuleRetainRuleParameters `json:"retainRule,omitempty" tf:"retain_rule,omitempty"` + + // The target Region or the Amazon Resource Name (ARN) of the target Outpost for the snapshot copies. + // +kubebuilder:validation:Optional + Target *string `json:"target" tf:"target,omitempty"` +} + +type CrossRegionCopyRuleRetainRuleInitParameters struct { + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` +} + +type CrossRegionCopyRuleRetainRuleObservation struct { + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` +} + +type CrossRegionCopyRuleRetainRuleParameters struct { + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + // +kubebuilder:validation:Optional + IntervalUnit *string `json:"intervalUnit" tf:"interval_unit,omitempty"` +} + +type DeprecateRuleInitParameters struct { + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` +} + +type DeprecateRuleObservation struct { + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` +} + +type DeprecateRuleParameters struct { + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + // +kubebuilder:validation:Optional + IntervalUnit *string `json:"intervalUnit" tf:"interval_unit,omitempty"` +} + +type EncryptionConfigurationInitParameters struct { + + // The Amazon Resource Name (ARN) of the AWS KMS key to use for EBS encryption. If this parameter is not specified, the default KMS key for the account is used. + CmkArn *string `json:"cmkArn,omitempty" tf:"cmk_arn,omitempty"` + + // To encrypt a copy of an unencrypted snapshot when encryption by default is not enabled, enable encryption using this parameter. Copies of encrypted snapshots are encrypted, even if this parameter is false or when encryption by default is not enabled. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` +} + +type EncryptionConfigurationObservation struct { + + // The Amazon Resource Name (ARN) of the AWS KMS key to use for EBS encryption. If this parameter is not specified, the default KMS key for the account is used. + CmkArn *string `json:"cmkArn,omitempty" tf:"cmk_arn,omitempty"` + + // To encrypt a copy of an unencrypted snapshot when encryption by default is not enabled, enable encryption using this parameter. Copies of encrypted snapshots are encrypted, even if this parameter is false or when encryption by default is not enabled. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` +} + +type EncryptionConfigurationParameters struct { + + // The Amazon Resource Name (ARN) of the AWS KMS key to use for EBS encryption. If this parameter is not specified, the default KMS key for the account is used. + // +kubebuilder:validation:Optional + CmkArn *string `json:"cmkArn,omitempty" tf:"cmk_arn,omitempty"` + + // To encrypt a copy of an unencrypted snapshot when encryption by default is not enabled, enable encryption using this parameter. Copies of encrypted snapshots are encrypted, even if this parameter is false or when encryption by default is not enabled. + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` +} + +type EventSourceInitParameters struct { + + // A set of optional parameters for snapshot and AMI lifecycle policies. See the parameters configuration block. + Parameters *ParametersInitParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The source of the event. Currently only managed CloudWatch Events rules are supported. Valid values are MANAGED_CWE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EventSourceObservation struct { + + // A set of optional parameters for snapshot and AMI lifecycle policies. See the parameters configuration block. + Parameters *ParametersObservation `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The source of the event. Currently only managed CloudWatch Events rules are supported. Valid values are MANAGED_CWE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EventSourceParameters struct { + + // A set of optional parameters for snapshot and AMI lifecycle policies. See the parameters configuration block. + // +kubebuilder:validation:Optional + Parameters *ParametersParameters `json:"parameters" tf:"parameters,omitempty"` + + // The source of the event. Currently only managed CloudWatch Events rules are supported. Valid values are MANAGED_CWE. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type FastRestoreRuleInitParameters struct { + + // The Availability Zones in which to enable fast snapshot restore. + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // Specifies the number of oldest AMIs to deprecate. Must be an integer between 1 and 1000. Conflicts with interval and interval_unit. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` +} + +type FastRestoreRuleObservation struct { + + // The Availability Zones in which to enable fast snapshot restore. + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // Specifies the number of oldest AMIs to deprecate. Must be an integer between 1 and 1000. Conflicts with interval and interval_unit. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` +} + +type FastRestoreRuleParameters struct { + + // The Availability Zones in which to enable fast snapshot restore. + // +kubebuilder:validation:Optional + // +listType=set + AvailabilityZones []*string `json:"availabilityZones" tf:"availability_zones,omitempty"` + + // Specifies the number of oldest AMIs to deprecate. Must be an integer between 1 and 1000. Conflicts with interval and interval_unit. + // +kubebuilder:validation:Optional + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + // +kubebuilder:validation:Optional + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` +} + +type LifecyclePolicyInitParameters struct { + + // A description for the DLM lifecycle policy. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ARN of an IAM role that is able to be assumed by the DLM service. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + ExecutionRoleArn *string `json:"executionRoleArn,omitempty" tf:"execution_role_arn,omitempty"` + + // Reference to a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnRef *v1.Reference `json:"executionRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnSelector *v1.Selector `json:"executionRoleArnSelector,omitempty" tf:"-"` + + // See the policy_details configuration block. Max of 1. + PolicyDetails *PolicyDetailsInitParameters `json:"policyDetails,omitempty" tf:"policy_details,omitempty"` + + // Whether the lifecycle policy should be enabled or disabled. ENABLED or DISABLED are valid values. Defaults to ENABLED. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type LifecyclePolicyObservation struct { + + // Amazon Resource Name (ARN) of the DLM Lifecycle Policy. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A description for the DLM lifecycle policy. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ARN of an IAM role that is able to be assumed by the DLM service. + ExecutionRoleArn *string `json:"executionRoleArn,omitempty" tf:"execution_role_arn,omitempty"` + + // Identifier of the DLM Lifecycle Policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // See the policy_details configuration block. Max of 1. + PolicyDetails *PolicyDetailsObservation `json:"policyDetails,omitempty" tf:"policy_details,omitempty"` + + // Whether the lifecycle policy should be enabled or disabled. ENABLED or DISABLED are valid values. Defaults to ENABLED. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type LifecyclePolicyParameters struct { + + // A description for the DLM lifecycle policy. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ARN of an IAM role that is able to be assumed by the DLM service. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + ExecutionRoleArn *string `json:"executionRoleArn,omitempty" tf:"execution_role_arn,omitempty"` + + // Reference to a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnRef *v1.Reference `json:"executionRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnSelector *v1.Selector `json:"executionRoleArnSelector,omitempty" tf:"-"` + + // See the policy_details configuration block. Max of 1. + // +kubebuilder:validation:Optional + PolicyDetails *PolicyDetailsParameters `json:"policyDetails,omitempty" tf:"policy_details,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Whether the lifecycle policy should be enabled or disabled. ENABLED or DISABLED are valid values. Defaults to ENABLED. + // +kubebuilder:validation:Optional + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ParametersInitParameters struct { + + // The snapshot description that can trigger the policy. The description pattern is specified using a regular expression. The policy runs only if a snapshot with a description that matches the specified pattern is shared with your account. + DescriptionRegex *string `json:"descriptionRegex,omitempty" tf:"description_regex,omitempty"` + + // The type of event. Currently, only shareSnapshot events are supported. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // The IDs of the AWS accounts that can trigger policy by sharing snapshots with your account. The policy only runs if one of the specified AWS accounts shares a snapshot with your account. + // +listType=set + SnapshotOwner []*string `json:"snapshotOwner,omitempty" tf:"snapshot_owner,omitempty"` +} + +type ParametersObservation struct { + + // The snapshot description that can trigger the policy. The description pattern is specified using a regular expression. The policy runs only if a snapshot with a description that matches the specified pattern is shared with your account. + DescriptionRegex *string `json:"descriptionRegex,omitempty" tf:"description_regex,omitempty"` + + // The type of event. Currently, only shareSnapshot events are supported. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // The IDs of the AWS accounts that can trigger policy by sharing snapshots with your account. The policy only runs if one of the specified AWS accounts shares a snapshot with your account. + // +listType=set + SnapshotOwner []*string `json:"snapshotOwner,omitempty" tf:"snapshot_owner,omitempty"` +} + +type ParametersParameters struct { + + // The snapshot description that can trigger the policy. The description pattern is specified using a regular expression. The policy runs only if a snapshot with a description that matches the specified pattern is shared with your account. + // +kubebuilder:validation:Optional + DescriptionRegex *string `json:"descriptionRegex" tf:"description_regex,omitempty"` + + // The type of event. Currently, only shareSnapshot events are supported. + // +kubebuilder:validation:Optional + EventType *string `json:"eventType" tf:"event_type,omitempty"` + + // The IDs of the AWS accounts that can trigger policy by sharing snapshots with your account. The policy only runs if one of the specified AWS accounts shares a snapshot with your account. + // +kubebuilder:validation:Optional + // +listType=set + SnapshotOwner []*string `json:"snapshotOwner" tf:"snapshot_owner,omitempty"` +} + +type PolicyDetailsInitParameters struct { + + // The actions to be performed when the event-based policy is triggered. You can specify only one action per policy. This parameter is required for event-based policies only. If you are creating a snapshot or AMI policy, omit this parameter. See the action configuration block. + Action *ActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // The event that triggers the event-based policy. This parameter is required for event-based policies only. If you are creating a snapshot or AMI policy, omit this parameter. See the event_source configuration block. + EventSource *EventSourceInitParameters `json:"eventSource,omitempty" tf:"event_source,omitempty"` + + // A set of optional parameters for snapshot and AMI lifecycle policies. See the parameters configuration block. + Parameters *PolicyDetailsParametersInitParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The valid target resource types and actions a policy can manage. Specify EBS_SNAPSHOT_MANAGEMENT to create a lifecycle policy that manages the lifecycle of Amazon EBS snapshots. Specify IMAGE_MANAGEMENT to create a lifecycle policy that manages the lifecycle of EBS-backed AMIs. Specify EVENT_BASED_POLICY to create an event-based policy that performs specific actions when a defined event occurs in your AWS account. Default value is EBS_SNAPSHOT_MANAGEMENT. + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` + + // The location of the resources to backup. If the source resources are located in an AWS Region, specify CLOUD. If the source resources are located on an Outpost in your account, specify OUTPOST. If you specify OUTPOST, Amazon Data Lifecycle Manager backs up all resources of the specified type with matching target tags across all of the Outposts in your account. Valid values are CLOUD and OUTPOST. + ResourceLocations []*string `json:"resourceLocations,omitempty" tf:"resource_locations,omitempty"` + + // A list of resource types that should be targeted by the lifecycle policy. Valid values are VOLUME and INSTANCE. + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` + + // See the schedule configuration block. + Schedule []ScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // A map of tag keys and their values. Any resources that match the resource_types and are tagged with any of these tags will be targeted. + // +mapType=granular + TargetTags map[string]*string `json:"targetTags,omitempty" tf:"target_tags,omitempty"` +} + +type PolicyDetailsObservation struct { + + // The actions to be performed when the event-based policy is triggered. You can specify only one action per policy. This parameter is required for event-based policies only. If you are creating a snapshot or AMI policy, omit this parameter. See the action configuration block. + Action *ActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // The event that triggers the event-based policy. This parameter is required for event-based policies only. If you are creating a snapshot or AMI policy, omit this parameter. See the event_source configuration block. + EventSource *EventSourceObservation `json:"eventSource,omitempty" tf:"event_source,omitempty"` + + // A set of optional parameters for snapshot and AMI lifecycle policies. See the parameters configuration block. + Parameters *PolicyDetailsParametersObservation `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The valid target resource types and actions a policy can manage. Specify EBS_SNAPSHOT_MANAGEMENT to create a lifecycle policy that manages the lifecycle of Amazon EBS snapshots. Specify IMAGE_MANAGEMENT to create a lifecycle policy that manages the lifecycle of EBS-backed AMIs. Specify EVENT_BASED_POLICY to create an event-based policy that performs specific actions when a defined event occurs in your AWS account. Default value is EBS_SNAPSHOT_MANAGEMENT. + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` + + // The location of the resources to backup. If the source resources are located in an AWS Region, specify CLOUD. If the source resources are located on an Outpost in your account, specify OUTPOST. If you specify OUTPOST, Amazon Data Lifecycle Manager backs up all resources of the specified type with matching target tags across all of the Outposts in your account. Valid values are CLOUD and OUTPOST. + ResourceLocations []*string `json:"resourceLocations,omitempty" tf:"resource_locations,omitempty"` + + // A list of resource types that should be targeted by the lifecycle policy. Valid values are VOLUME and INSTANCE. + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` + + // See the schedule configuration block. + Schedule []ScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // A map of tag keys and their values. Any resources that match the resource_types and are tagged with any of these tags will be targeted. + // +mapType=granular + TargetTags map[string]*string `json:"targetTags,omitempty" tf:"target_tags,omitempty"` +} + +type PolicyDetailsParameters struct { + + // The actions to be performed when the event-based policy is triggered. You can specify only one action per policy. This parameter is required for event-based policies only. If you are creating a snapshot or AMI policy, omit this parameter. See the action configuration block. + // +kubebuilder:validation:Optional + Action *ActionParameters `json:"action,omitempty" tf:"action,omitempty"` + + // The event that triggers the event-based policy. This parameter is required for event-based policies only. If you are creating a snapshot or AMI policy, omit this parameter. See the event_source configuration block. + // +kubebuilder:validation:Optional + EventSource *EventSourceParameters `json:"eventSource,omitempty" tf:"event_source,omitempty"` + + // A set of optional parameters for snapshot and AMI lifecycle policies. See the parameters configuration block. + // +kubebuilder:validation:Optional + Parameters *PolicyDetailsParametersParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The valid target resource types and actions a policy can manage. Specify EBS_SNAPSHOT_MANAGEMENT to create a lifecycle policy that manages the lifecycle of Amazon EBS snapshots. Specify IMAGE_MANAGEMENT to create a lifecycle policy that manages the lifecycle of EBS-backed AMIs. Specify EVENT_BASED_POLICY to create an event-based policy that performs specific actions when a defined event occurs in your AWS account. Default value is EBS_SNAPSHOT_MANAGEMENT. + // +kubebuilder:validation:Optional + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` + + // The location of the resources to backup. If the source resources are located in an AWS Region, specify CLOUD. If the source resources are located on an Outpost in your account, specify OUTPOST. If you specify OUTPOST, Amazon Data Lifecycle Manager backs up all resources of the specified type with matching target tags across all of the Outposts in your account. Valid values are CLOUD and OUTPOST. + // +kubebuilder:validation:Optional + ResourceLocations []*string `json:"resourceLocations,omitempty" tf:"resource_locations,omitempty"` + + // A list of resource types that should be targeted by the lifecycle policy. Valid values are VOLUME and INSTANCE. + // +kubebuilder:validation:Optional + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` + + // See the schedule configuration block. + // +kubebuilder:validation:Optional + Schedule []ScheduleParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // A map of tag keys and their values. Any resources that match the resource_types and are tagged with any of these tags will be targeted. + // +kubebuilder:validation:Optional + // +mapType=granular + TargetTags map[string]*string `json:"targetTags,omitempty" tf:"target_tags,omitempty"` +} + +type PolicyDetailsParametersInitParameters struct { + + // Indicates whether to exclude the root volume from snapshots created using CreateSnapshots. The default is false. + ExcludeBootVolume *bool `json:"excludeBootVolume,omitempty" tf:"exclude_boot_volume,omitempty"` + + // Applies to AMI lifecycle policies only. Indicates whether targeted instances are rebooted when the lifecycle policy runs. true indicates that targeted instances are not rebooted when the policy runs. false indicates that target instances are rebooted when the policy runs. The default is true (instances are not rebooted). + NoReboot *bool `json:"noReboot,omitempty" tf:"no_reboot,omitempty"` +} + +type PolicyDetailsParametersObservation struct { + + // Indicates whether to exclude the root volume from snapshots created using CreateSnapshots. The default is false. + ExcludeBootVolume *bool `json:"excludeBootVolume,omitempty" tf:"exclude_boot_volume,omitempty"` + + // Applies to AMI lifecycle policies only. Indicates whether targeted instances are rebooted when the lifecycle policy runs. true indicates that targeted instances are not rebooted when the policy runs. false indicates that target instances are rebooted when the policy runs. The default is true (instances are not rebooted). + NoReboot *bool `json:"noReboot,omitempty" tf:"no_reboot,omitempty"` +} + +type PolicyDetailsParametersParameters struct { + + // Indicates whether to exclude the root volume from snapshots created using CreateSnapshots. The default is false. + // +kubebuilder:validation:Optional + ExcludeBootVolume *bool `json:"excludeBootVolume,omitempty" tf:"exclude_boot_volume,omitempty"` + + // Applies to AMI lifecycle policies only. Indicates whether targeted instances are rebooted when the lifecycle policy runs. true indicates that targeted instances are not rebooted when the policy runs. false indicates that target instances are rebooted when the policy runs. The default is true (instances are not rebooted). + // +kubebuilder:validation:Optional + NoReboot *bool `json:"noReboot,omitempty" tf:"no_reboot,omitempty"` +} + +type RetainRuleInitParameters struct { + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` +} + +type RetainRuleObservation struct { + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` +} + +type RetainRuleParameters struct { + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + // +kubebuilder:validation:Optional + IntervalUnit *string `json:"intervalUnit" tf:"interval_unit,omitempty"` +} + +type ScheduleDeprecateRuleInitParameters struct { + + // Specifies the number of oldest AMIs to deprecate. Must be an integer between 1 and 1000. Conflicts with interval and interval_unit. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` +} + +type ScheduleDeprecateRuleObservation struct { + + // Specifies the number of oldest AMIs to deprecate. Must be an integer between 1 and 1000. Conflicts with interval and interval_unit. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` +} + +type ScheduleDeprecateRuleParameters struct { + + // Specifies the number of oldest AMIs to deprecate. Must be an integer between 1 and 1000. Conflicts with interval and interval_unit. + // +kubebuilder:validation:Optional + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + // +kubebuilder:validation:Optional + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` +} + +type ScheduleInitParameters struct { + + // Copy all user-defined tags on a source volume to snapshots of the volume created by this policy. + CopyTags *bool `json:"copyTags,omitempty" tf:"copy_tags,omitempty"` + + // See the create_rule block. Max of 1 per schedule. + CreateRule *CreateRuleInitParameters `json:"createRule,omitempty" tf:"create_rule,omitempty"` + + // See the cross_region_copy_rule block. Max of 3 per schedule. + CrossRegionCopyRule []CrossRegionCopyRuleInitParameters `json:"crossRegionCopyRule,omitempty" tf:"cross_region_copy_rule,omitempty"` + + // See the deprecate_rule block. Max of 1 per schedule. + DeprecateRule *ScheduleDeprecateRuleInitParameters `json:"deprecateRule,omitempty" tf:"deprecate_rule,omitempty"` + + // See the fast_restore_rule block. Max of 1 per schedule. + FastRestoreRule *FastRestoreRuleInitParameters `json:"fastRestoreRule,omitempty" tf:"fast_restore_rule,omitempty"` + + // A descriptive name for the action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the retention rule for cross-Region snapshot copies. See the retain_rule block. Max of 1 per action. + RetainRule *ScheduleRetainRuleInitParameters `json:"retainRule,omitempty" tf:"retain_rule,omitempty"` + + // See the share_rule block. Max of 1 per schedule. + ShareRule *ShareRuleInitParameters `json:"shareRule,omitempty" tf:"share_rule,omitempty"` + + // A map of tag keys and their values. DLM lifecycle policies will already tag the snapshot with the tags on the volume. This configuration adds extra tags on top of these. + // +mapType=granular + TagsToAdd map[string]*string `json:"tagsToAdd,omitempty" tf:"tags_to_add,omitempty"` + + // A map of tag keys and variable values, where the values are determined when the policy is executed. Only $(instance-id) or $(timestamp) are valid values. Can only be used when resource_types is INSTANCE. + // +mapType=granular + VariableTags map[string]*string `json:"variableTags,omitempty" tf:"variable_tags,omitempty"` +} + +type ScheduleObservation struct { + + // Copy all user-defined tags on a source volume to snapshots of the volume created by this policy. + CopyTags *bool `json:"copyTags,omitempty" tf:"copy_tags,omitempty"` + + // See the create_rule block. Max of 1 per schedule. + CreateRule *CreateRuleObservation `json:"createRule,omitempty" tf:"create_rule,omitempty"` + + // See the cross_region_copy_rule block. Max of 3 per schedule. + CrossRegionCopyRule []CrossRegionCopyRuleObservation `json:"crossRegionCopyRule,omitempty" tf:"cross_region_copy_rule,omitempty"` + + // See the deprecate_rule block. Max of 1 per schedule. + DeprecateRule *ScheduleDeprecateRuleObservation `json:"deprecateRule,omitempty" tf:"deprecate_rule,omitempty"` + + // See the fast_restore_rule block. Max of 1 per schedule. + FastRestoreRule *FastRestoreRuleObservation `json:"fastRestoreRule,omitempty" tf:"fast_restore_rule,omitempty"` + + // A descriptive name for the action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the retention rule for cross-Region snapshot copies. See the retain_rule block. Max of 1 per action. + RetainRule *ScheduleRetainRuleObservation `json:"retainRule,omitempty" tf:"retain_rule,omitempty"` + + // See the share_rule block. Max of 1 per schedule. + ShareRule *ShareRuleObservation `json:"shareRule,omitempty" tf:"share_rule,omitempty"` + + // A map of tag keys and their values. DLM lifecycle policies will already tag the snapshot with the tags on the volume. This configuration adds extra tags on top of these. + // +mapType=granular + TagsToAdd map[string]*string `json:"tagsToAdd,omitempty" tf:"tags_to_add,omitempty"` + + // A map of tag keys and variable values, where the values are determined when the policy is executed. Only $(instance-id) or $(timestamp) are valid values. Can only be used when resource_types is INSTANCE. + // +mapType=granular + VariableTags map[string]*string `json:"variableTags,omitempty" tf:"variable_tags,omitempty"` +} + +type ScheduleParameters struct { + + // Copy all user-defined tags on a source volume to snapshots of the volume created by this policy. + // +kubebuilder:validation:Optional + CopyTags *bool `json:"copyTags,omitempty" tf:"copy_tags,omitempty"` + + // See the create_rule block. Max of 1 per schedule. + // +kubebuilder:validation:Optional + CreateRule *CreateRuleParameters `json:"createRule" tf:"create_rule,omitempty"` + + // See the cross_region_copy_rule block. Max of 3 per schedule. + // +kubebuilder:validation:Optional + CrossRegionCopyRule []CrossRegionCopyRuleParameters `json:"crossRegionCopyRule,omitempty" tf:"cross_region_copy_rule,omitempty"` + + // See the deprecate_rule block. Max of 1 per schedule. + // +kubebuilder:validation:Optional + DeprecateRule *ScheduleDeprecateRuleParameters `json:"deprecateRule,omitempty" tf:"deprecate_rule,omitempty"` + + // See the fast_restore_rule block. Max of 1 per schedule. + // +kubebuilder:validation:Optional + FastRestoreRule *FastRestoreRuleParameters `json:"fastRestoreRule,omitempty" tf:"fast_restore_rule,omitempty"` + + // A descriptive name for the action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies the retention rule for cross-Region snapshot copies. See the retain_rule block. Max of 1 per action. + // +kubebuilder:validation:Optional + RetainRule *ScheduleRetainRuleParameters `json:"retainRule" tf:"retain_rule,omitempty"` + + // See the share_rule block. Max of 1 per schedule. + // +kubebuilder:validation:Optional + ShareRule *ShareRuleParameters `json:"shareRule,omitempty" tf:"share_rule,omitempty"` + + // A map of tag keys and their values. DLM lifecycle policies will already tag the snapshot with the tags on the volume. This configuration adds extra tags on top of these. + // +kubebuilder:validation:Optional + // +mapType=granular + TagsToAdd map[string]*string `json:"tagsToAdd,omitempty" tf:"tags_to_add,omitempty"` + + // A map of tag keys and variable values, where the values are determined when the policy is executed. Only $(instance-id) or $(timestamp) are valid values. Can only be used when resource_types is INSTANCE. + // +kubebuilder:validation:Optional + // +mapType=granular + VariableTags map[string]*string `json:"variableTags,omitempty" tf:"variable_tags,omitempty"` +} + +type ScheduleRetainRuleInitParameters struct { + + // Specifies the number of oldest AMIs to deprecate. Must be an integer between 1 and 1000. Conflicts with interval and interval_unit. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` +} + +type ScheduleRetainRuleObservation struct { + + // Specifies the number of oldest AMIs to deprecate. Must be an integer between 1 and 1000. Conflicts with interval and interval_unit. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` +} + +type ScheduleRetainRuleParameters struct { + + // Specifies the number of oldest AMIs to deprecate. Must be an integer between 1 and 1000. Conflicts with interval and interval_unit. + // +kubebuilder:validation:Optional + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + // +kubebuilder:validation:Optional + IntervalUnit *string `json:"intervalUnit,omitempty" tf:"interval_unit,omitempty"` +} + +type ShareRuleInitParameters struct { + + // The IDs of the AWS accounts with which to share the snapshots. + // +listType=set + TargetAccounts []*string `json:"targetAccounts,omitempty" tf:"target_accounts,omitempty"` + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + UnshareInterval *float64 `json:"unshareInterval,omitempty" tf:"unshare_interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + UnshareIntervalUnit *string `json:"unshareIntervalUnit,omitempty" tf:"unshare_interval_unit,omitempty"` +} + +type ShareRuleObservation struct { + + // The IDs of the AWS accounts with which to share the snapshots. + // +listType=set + TargetAccounts []*string `json:"targetAccounts,omitempty" tf:"target_accounts,omitempty"` + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + UnshareInterval *float64 `json:"unshareInterval,omitempty" tf:"unshare_interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + UnshareIntervalUnit *string `json:"unshareIntervalUnit,omitempty" tf:"unshare_interval_unit,omitempty"` +} + +type ShareRuleParameters struct { + + // The IDs of the AWS accounts with which to share the snapshots. + // +kubebuilder:validation:Optional + // +listType=set + TargetAccounts []*string `json:"targetAccounts" tf:"target_accounts,omitempty"` + + // How often this lifecycle policy should be evaluated. 1, 2,3,4,6,8,12 or 24 are valid values. Conflicts with cron_expression. If set, interval_unit and times must also be set. + // +kubebuilder:validation:Optional + UnshareInterval *float64 `json:"unshareInterval,omitempty" tf:"unshare_interval,omitempty"` + + // The unit for how often the lifecycle policy should be evaluated. HOURS is currently the only allowed value and also the default value. Conflicts with cron_expression. Must be set if interval is set. + // +kubebuilder:validation:Optional + UnshareIntervalUnit *string `json:"unshareIntervalUnit,omitempty" tf:"unshare_interval_unit,omitempty"` +} + +// LifecyclePolicySpec defines the desired state of LifecyclePolicy +type LifecyclePolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LifecyclePolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LifecyclePolicyInitParameters `json:"initProvider,omitempty"` +} + +// LifecyclePolicyStatus defines the observed state of LifecyclePolicy. +type LifecyclePolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LifecyclePolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LifecyclePolicy is the Schema for the LifecyclePolicys API. Provides a Data Lifecycle Manager (DLM) lifecycle policy for managing snapshots. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type LifecyclePolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.description) || (has(self.initProvider) && has(self.initProvider.description))",message="spec.forProvider.description is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.policyDetails) || (has(self.initProvider) && has(self.initProvider.policyDetails))",message="spec.forProvider.policyDetails is a required parameter" + Spec LifecyclePolicySpec `json:"spec"` + Status LifecyclePolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LifecyclePolicyList contains a list of LifecyclePolicys +type LifecyclePolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LifecyclePolicy `json:"items"` +} + +// Repository type metadata. +var ( + LifecyclePolicy_Kind = "LifecyclePolicy" + LifecyclePolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LifecyclePolicy_Kind}.String() + LifecyclePolicy_KindAPIVersion = LifecyclePolicy_Kind + "." + CRDGroupVersion.String() + LifecyclePolicy_GroupVersionKind = CRDGroupVersion.WithKind(LifecyclePolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&LifecyclePolicy{}, &LifecyclePolicyList{}) +} diff --git a/apis/dms/v1beta1/zz_generated.conversion_hubs.go b/apis/dms/v1beta1/zz_generated.conversion_hubs.go index c34164df90..5df54d03df 100755 --- a/apis/dms/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/dms/v1beta1/zz_generated.conversion_hubs.go @@ -9,9 +9,6 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *Certificate) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Endpoint) Hub() {} - // Hub marks this type as a conversion hub. func (tr *EventSubscription) Hub() {} diff --git a/apis/dms/v1beta1/zz_generated.conversion_spokes.go b/apis/dms/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..6748de8f9e --- /dev/null +++ b/apis/dms/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Endpoint to the hub type. +func (tr *Endpoint) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Endpoint type. +func (tr *Endpoint) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/dms/v1beta1/zz_generated.resolvers.go b/apis/dms/v1beta1/zz_generated.resolvers.go index 5a52c7530a..7b596f1fd2 100644 --- a/apis/dms/v1beta1/zz_generated.resolvers.go +++ b/apis/dms/v1beta1/zz_generated.resolvers.go @@ -400,7 +400,7 @@ func (mg *ReplicationTask) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.ForProvider.ReplicationInstanceArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ReplicationInstanceArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dms.aws.upbound.io", "v1beta1", "Endpoint", "EndpointList") + m, l, err = apisresolver.GetManagedResource("dms.aws.upbound.io", "v1beta2", "Endpoint", "EndpointList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -419,7 +419,7 @@ func (mg *ReplicationTask) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.ForProvider.SourceEndpointArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SourceEndpointArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dms.aws.upbound.io", "v1beta1", "Endpoint", "EndpointList") + m, l, err = apisresolver.GetManagedResource("dms.aws.upbound.io", "v1beta2", "Endpoint", "EndpointList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -457,7 +457,7 @@ func (mg *ReplicationTask) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.InitProvider.ReplicationInstanceArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ReplicationInstanceArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dms.aws.upbound.io", "v1beta1", "Endpoint", "EndpointList") + m, l, err = apisresolver.GetManagedResource("dms.aws.upbound.io", "v1beta2", "Endpoint", "EndpointList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -476,7 +476,7 @@ func (mg *ReplicationTask) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.InitProvider.SourceEndpointArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.SourceEndpointArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dms.aws.upbound.io", "v1beta1", "Endpoint", "EndpointList") + m, l, err = apisresolver.GetManagedResource("dms.aws.upbound.io", "v1beta2", "Endpoint", "EndpointList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/dms/v1beta1/zz_replicationtask_types.go b/apis/dms/v1beta1/zz_replicationtask_types.go index b6228632fd..ee1fdf47bc 100755 --- a/apis/dms/v1beta1/zz_replicationtask_types.go +++ b/apis/dms/v1beta1/zz_replicationtask_types.go @@ -44,7 +44,7 @@ type ReplicationTaskInitParameters struct { ResourceIdentifier *string `json:"resourceIdentifier,omitempty" tf:"resource_identifier,omitempty"` // ARN that uniquely identifies the source endpoint. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dms/v1beta1.Endpoint + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dms/v1beta2.Endpoint // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("endpoint_arn",true) SourceEndpointArn *string `json:"sourceEndpointArn,omitempty" tf:"source_endpoint_arn,omitempty"` @@ -67,7 +67,7 @@ type ReplicationTaskInitParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // ARN that uniquely identifies the target endpoint. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dms/v1beta1.Endpoint + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dms/v1beta2.Endpoint // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("endpoint_arn",true) TargetEndpointArn *string `json:"targetEndpointArn,omitempty" tf:"target_endpoint_arn,omitempty"` @@ -171,7 +171,7 @@ type ReplicationTaskParameters struct { ResourceIdentifier *string `json:"resourceIdentifier,omitempty" tf:"resource_identifier,omitempty"` // ARN that uniquely identifies the source endpoint. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dms/v1beta1.Endpoint + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dms/v1beta2.Endpoint // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("endpoint_arn",true) // +kubebuilder:validation:Optional SourceEndpointArn *string `json:"sourceEndpointArn,omitempty" tf:"source_endpoint_arn,omitempty"` @@ -198,7 +198,7 @@ type ReplicationTaskParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // ARN that uniquely identifies the target endpoint. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dms/v1beta1.Endpoint + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dms/v1beta2.Endpoint // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("endpoint_arn",true) // +kubebuilder:validation:Optional TargetEndpointArn *string `json:"targetEndpointArn,omitempty" tf:"target_endpoint_arn,omitempty"` diff --git a/apis/dms/v1beta2/zz_endpoint_terraformed.go b/apis/dms/v1beta2/zz_endpoint_terraformed.go new file mode 100755 index 0000000000..77bdb2220a --- /dev/null +++ b/apis/dms/v1beta2/zz_endpoint_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Endpoint +func (mg *Endpoint) GetTerraformResourceType() string { + return "aws_dms_endpoint" +} + +// GetConnectionDetailsMapping for this Endpoint +func (tr *Endpoint) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"kafka_settings[*].sasl_password": "kafkaSettings[*].saslPasswordSecretRef", "kafka_settings[*].ssl_client_key_password": "kafkaSettings[*].sslClientKeyPasswordSecretRef", "password": "passwordSecretRef", "redis_settings[*].auth_password": "redisSettings[*].authPasswordSecretRef"} +} + +// GetObservation of this Endpoint +func (tr *Endpoint) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Endpoint +func (tr *Endpoint) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Endpoint +func (tr *Endpoint) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Endpoint +func (tr *Endpoint) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Endpoint +func (tr *Endpoint) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Endpoint +func (tr *Endpoint) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Endpoint +func (tr *Endpoint) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Endpoint using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Endpoint) LateInitialize(attrs []byte) (bool, error) { + params := &EndpointParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Endpoint) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/dms/v1beta2/zz_endpoint_types.go b/apis/dms/v1beta2/zz_endpoint_types.go new file mode 100755 index 0000000000..fa005a6602 --- /dev/null +++ b/apis/dms/v1beta2/zz_endpoint_types.go @@ -0,0 +1,1502 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ElasticsearchSettingsInitParameters struct { + + // Endpoint for the OpenSearch cluster. + EndpointURI *string `json:"endpointUri,omitempty" tf:"endpoint_uri,omitempty"` + + // Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300. + ErrorRetryDuration *float64 `json:"errorRetryDuration,omitempty" tf:"error_retry_duration,omitempty"` + + // Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10. + FullLoadErrorPercentage *float64 `json:"fullLoadErrorPercentage,omitempty" tf:"full_load_error_percentage,omitempty"` + + // ARN of the IAM Role with permissions to write to the OpenSearch cluster. + ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"` + + // Enable to migrate documentation using the documentation type _doc. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value is false. + UseNewMappingType *bool `json:"useNewMappingType,omitempty" tf:"use_new_mapping_type,omitempty"` +} + +type ElasticsearchSettingsObservation struct { + + // Endpoint for the OpenSearch cluster. + EndpointURI *string `json:"endpointUri,omitempty" tf:"endpoint_uri,omitempty"` + + // Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300. + ErrorRetryDuration *float64 `json:"errorRetryDuration,omitempty" tf:"error_retry_duration,omitempty"` + + // Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10. + FullLoadErrorPercentage *float64 `json:"fullLoadErrorPercentage,omitempty" tf:"full_load_error_percentage,omitempty"` + + // ARN of the IAM Role with permissions to write to the OpenSearch cluster. + ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"` + + // Enable to migrate documentation using the documentation type _doc. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value is false. + UseNewMappingType *bool `json:"useNewMappingType,omitempty" tf:"use_new_mapping_type,omitempty"` +} + +type ElasticsearchSettingsParameters struct { + + // Endpoint for the OpenSearch cluster. + // +kubebuilder:validation:Optional + EndpointURI *string `json:"endpointUri" tf:"endpoint_uri,omitempty"` + + // Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300. + // +kubebuilder:validation:Optional + ErrorRetryDuration *float64 `json:"errorRetryDuration,omitempty" tf:"error_retry_duration,omitempty"` + + // Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10. + // +kubebuilder:validation:Optional + FullLoadErrorPercentage *float64 `json:"fullLoadErrorPercentage,omitempty" tf:"full_load_error_percentage,omitempty"` + + // ARN of the IAM Role with permissions to write to the OpenSearch cluster. + // +kubebuilder:validation:Optional + ServiceAccessRoleArn *string `json:"serviceAccessRoleArn" tf:"service_access_role_arn,omitempty"` + + // Enable to migrate documentation using the documentation type _doc. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value is false. + // +kubebuilder:validation:Optional + UseNewMappingType *bool `json:"useNewMappingType,omitempty" tf:"use_new_mapping_type,omitempty"` +} + +type EndpointInitParameters struct { + + // ARN for the certificate. + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` + + // Name of the endpoint database. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Configuration block for OpenSearch settings. See below. + ElasticsearchSettings *ElasticsearchSettingsInitParameters `json:"elasticsearchSettings,omitempty" tf:"elasticsearch_settings,omitempty"` + + // Type of endpoint. Valid values are source, target. + EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"` + + // Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift). + EngineName *string `json:"engineName,omitempty" tf:"engine_name,omitempty"` + + // Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration. + ExtraConnectionAttributes *string `json:"extraConnectionAttributes,omitempty" tf:"extra_connection_attributes,omitempty"` + + // ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` + + // Configuration block for Kafka settings. See below. + KafkaSettings *KafkaSettingsInitParameters `json:"kafkaSettings,omitempty" tf:"kafka_settings,omitempty"` + + // Configuration block for Kinesis settings. See below. + KinesisSettings *KinesisSettingsInitParameters `json:"kinesisSettings,omitempty" tf:"kinesis_settings,omitempty"` + + // Configuration block for MongoDB settings. See below. + MongodbSettings *MongodbSettingsInitParameters `json:"mongodbSettings,omitempty" tf:"mongodb_settings,omitempty"` + + // Password to be used to login to the endpoint database. + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // Only tasks paused by the resource will be restarted after the modification completes. Default is false. + PauseReplicationTasks *bool `json:"pauseReplicationTasks,omitempty" tf:"pause_replication_tasks,omitempty"` + + // Port used by the endpoint database. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Configuration block for Postgres settings. See below. + PostgresSettings *PostgresSettingsInitParameters `json:"postgresSettings,omitempty" tf:"postgres_settings,omitempty"` + + RedisSettings *RedisSettingsInitParameters `json:"redisSettings,omitempty" tf:"redis_settings,omitempty"` + + // Configuration block for Redshift settings. See below. + RedshiftSettings *RedshiftSettingsInitParameters `json:"redshiftSettings,omitempty" tf:"redshift_settings,omitempty"` + + // (Deprecated, use the aws_dms_s3_endpoint resource instead) Configuration block for S3 settings. See below. + // This argument is deprecated and will be removed in a future version; use aws_dms_s3_endpoint instead + S3Settings *S3SettingsInitParameters `json:"s3Settings,omitempty" tf:"s3_settings,omitempty"` + + // SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full + SSLMode *string `json:"sslMode,omitempty" tf:"ssl_mode,omitempty"` + + // ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + SecretsManagerAccessRoleArn *string `json:"secretsManagerAccessRoleArn,omitempty" tf:"secrets_manager_access_role_arn,omitempty"` + + // Reference to a Role in iam to populate secretsManagerAccessRoleArn. + // +kubebuilder:validation:Optional + SecretsManagerAccessRoleArnRef *v1.Reference `json:"secretsManagerAccessRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate secretsManagerAccessRoleArn. + // +kubebuilder:validation:Optional + SecretsManagerAccessRoleArnSelector *v1.Selector `json:"secretsManagerAccessRoleArnSelector,omitempty" tf:"-"` + + // text values for username, password , server_name, and port. You can't specify both. + SecretsManagerArn *string `json:"secretsManagerArn,omitempty" tf:"secrets_manager_arn,omitempty"` + + // Host name of the server. + ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` + + // ARN used by the service access IAM role for dynamodb endpoints. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + ServiceAccessRole *string `json:"serviceAccessRole,omitempty" tf:"service_access_role,omitempty"` + + // Reference to a Role in iam to populate serviceAccessRole. + // +kubebuilder:validation:Optional + ServiceAccessRoleRef *v1.Reference `json:"serviceAccessRoleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceAccessRole. + // +kubebuilder:validation:Optional + ServiceAccessRoleSelector *v1.Selector `json:"serviceAccessRoleSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // User name to be used to login to the endpoint database. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type EndpointObservation struct { + + // ARN for the certificate. + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` + + // Name of the endpoint database. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Configuration block for OpenSearch settings. See below. + ElasticsearchSettings *ElasticsearchSettingsObservation `json:"elasticsearchSettings,omitempty" tf:"elasticsearch_settings,omitempty"` + + // ARN for the endpoint. + EndpointArn *string `json:"endpointArn,omitempty" tf:"endpoint_arn,omitempty"` + + // Type of endpoint. Valid values are source, target. + EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"` + + // Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift). + EngineName *string `json:"engineName,omitempty" tf:"engine_name,omitempty"` + + // Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration. + ExtraConnectionAttributes *string `json:"extraConnectionAttributes,omitempty" tf:"extra_connection_attributes,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Configuration block for Kafka settings. See below. + KafkaSettings *KafkaSettingsObservation `json:"kafkaSettings,omitempty" tf:"kafka_settings,omitempty"` + + // Configuration block for Kinesis settings. See below. + KinesisSettings *KinesisSettingsObservation `json:"kinesisSettings,omitempty" tf:"kinesis_settings,omitempty"` + + // Configuration block for MongoDB settings. See below. + MongodbSettings *MongodbSettingsObservation `json:"mongodbSettings,omitempty" tf:"mongodb_settings,omitempty"` + + // Only tasks paused by the resource will be restarted after the modification completes. Default is false. + PauseReplicationTasks *bool `json:"pauseReplicationTasks,omitempty" tf:"pause_replication_tasks,omitempty"` + + // Port used by the endpoint database. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Configuration block for Postgres settings. See below. + PostgresSettings *PostgresSettingsObservation `json:"postgresSettings,omitempty" tf:"postgres_settings,omitempty"` + + RedisSettings *RedisSettingsObservation `json:"redisSettings,omitempty" tf:"redis_settings,omitempty"` + + // Configuration block for Redshift settings. See below. + RedshiftSettings *RedshiftSettingsObservation `json:"redshiftSettings,omitempty" tf:"redshift_settings,omitempty"` + + // (Deprecated, use the aws_dms_s3_endpoint resource instead) Configuration block for S3 settings. See below. + // This argument is deprecated and will be removed in a future version; use aws_dms_s3_endpoint instead + S3Settings *S3SettingsObservation `json:"s3Settings,omitempty" tf:"s3_settings,omitempty"` + + // SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full + SSLMode *string `json:"sslMode,omitempty" tf:"ssl_mode,omitempty"` + + // ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action. + SecretsManagerAccessRoleArn *string `json:"secretsManagerAccessRoleArn,omitempty" tf:"secrets_manager_access_role_arn,omitempty"` + + // text values for username, password , server_name, and port. You can't specify both. + SecretsManagerArn *string `json:"secretsManagerArn,omitempty" tf:"secrets_manager_arn,omitempty"` + + // Host name of the server. + ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` + + // ARN used by the service access IAM role for dynamodb endpoints. + ServiceAccessRole *string `json:"serviceAccessRole,omitempty" tf:"service_access_role,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // User name to be used to login to the endpoint database. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type EndpointParameters struct { + + // ARN for the certificate. + // +kubebuilder:validation:Optional + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` + + // Name of the endpoint database. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Configuration block for OpenSearch settings. See below. + // +kubebuilder:validation:Optional + ElasticsearchSettings *ElasticsearchSettingsParameters `json:"elasticsearchSettings,omitempty" tf:"elasticsearch_settings,omitempty"` + + // Type of endpoint. Valid values are source, target. + // +kubebuilder:validation:Optional + EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"` + + // Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift). + // +kubebuilder:validation:Optional + EngineName *string `json:"engineName,omitempty" tf:"engine_name,omitempty"` + + // Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration. + // +kubebuilder:validation:Optional + ExtraConnectionAttributes *string `json:"extraConnectionAttributes,omitempty" tf:"extra_connection_attributes,omitempty"` + + // ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` + + // Configuration block for Kafka settings. See below. + // +kubebuilder:validation:Optional + KafkaSettings *KafkaSettingsParameters `json:"kafkaSettings,omitempty" tf:"kafka_settings,omitempty"` + + // Configuration block for Kinesis settings. See below. + // +kubebuilder:validation:Optional + KinesisSettings *KinesisSettingsParameters `json:"kinesisSettings,omitempty" tf:"kinesis_settings,omitempty"` + + // Configuration block for MongoDB settings. See below. + // +kubebuilder:validation:Optional + MongodbSettings *MongodbSettingsParameters `json:"mongodbSettings,omitempty" tf:"mongodb_settings,omitempty"` + + // Password to be used to login to the endpoint database. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // Only tasks paused by the resource will be restarted after the modification completes. Default is false. + // +kubebuilder:validation:Optional + PauseReplicationTasks *bool `json:"pauseReplicationTasks,omitempty" tf:"pause_replication_tasks,omitempty"` + + // Port used by the endpoint database. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Configuration block for Postgres settings. See below. + // +kubebuilder:validation:Optional + PostgresSettings *PostgresSettingsParameters `json:"postgresSettings,omitempty" tf:"postgres_settings,omitempty"` + + // +kubebuilder:validation:Optional + RedisSettings *RedisSettingsParameters `json:"redisSettings,omitempty" tf:"redis_settings,omitempty"` + + // Configuration block for Redshift settings. See below. + // +kubebuilder:validation:Optional + RedshiftSettings *RedshiftSettingsParameters `json:"redshiftSettings,omitempty" tf:"redshift_settings,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // (Deprecated, use the aws_dms_s3_endpoint resource instead) Configuration block for S3 settings. See below. + // This argument is deprecated and will be removed in a future version; use aws_dms_s3_endpoint instead + // +kubebuilder:validation:Optional + S3Settings *S3SettingsParameters `json:"s3Settings,omitempty" tf:"s3_settings,omitempty"` + + // SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full + // +kubebuilder:validation:Optional + SSLMode *string `json:"sslMode,omitempty" tf:"ssl_mode,omitempty"` + + // ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + SecretsManagerAccessRoleArn *string `json:"secretsManagerAccessRoleArn,omitempty" tf:"secrets_manager_access_role_arn,omitempty"` + + // Reference to a Role in iam to populate secretsManagerAccessRoleArn. + // +kubebuilder:validation:Optional + SecretsManagerAccessRoleArnRef *v1.Reference `json:"secretsManagerAccessRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate secretsManagerAccessRoleArn. + // +kubebuilder:validation:Optional + SecretsManagerAccessRoleArnSelector *v1.Selector `json:"secretsManagerAccessRoleArnSelector,omitempty" tf:"-"` + + // text values for username, password , server_name, and port. You can't specify both. + // +kubebuilder:validation:Optional + SecretsManagerArn *string `json:"secretsManagerArn,omitempty" tf:"secrets_manager_arn,omitempty"` + + // Host name of the server. + // +kubebuilder:validation:Optional + ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` + + // ARN used by the service access IAM role for dynamodb endpoints. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + ServiceAccessRole *string `json:"serviceAccessRole,omitempty" tf:"service_access_role,omitempty"` + + // Reference to a Role in iam to populate serviceAccessRole. + // +kubebuilder:validation:Optional + ServiceAccessRoleRef *v1.Reference `json:"serviceAccessRoleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceAccessRole. + // +kubebuilder:validation:Optional + ServiceAccessRoleSelector *v1.Selector `json:"serviceAccessRoleSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // User name to be used to login to the endpoint database. + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type KafkaSettingsInitParameters struct { + + // Kafka broker location. Specify in the form broker-hostname-or-ip:port. + Broker *string `json:"broker,omitempty" tf:"broker,omitempty"` + + // Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false. + IncludeControlDetails *bool `json:"includeControlDetails,omitempty" tf:"include_control_details,omitempty"` + + // Include NULL and empty columns for records migrated to the endpoint. Default is false. + IncludeNullAndEmpty *bool `json:"includeNullAndEmpty,omitempty" tf:"include_null_and_empty,omitempty"` + + // Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false. + IncludePartitionValue *bool `json:"includePartitionValue,omitempty" tf:"include_partition_value,omitempty"` + + // Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false. + IncludeTableAlterOperations *bool `json:"includeTableAlterOperations,omitempty" tf:"include_table_alter_operations,omitempty"` + + // Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false. + IncludeTransactionDetails *bool `json:"includeTransactionDetails,omitempty" tf:"include_transaction_details,omitempty"` + + // Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab). + MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"` + + // Maximum size in bytes for records created on the endpoint Default is 1,000,000. + MessageMaxBytes *float64 `json:"messageMaxBytes,omitempty" tf:"message_max_bytes,omitempty"` + + // Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix. + NoHexPrefix *bool `json:"noHexPrefix,omitempty" tf:"no_hex_prefix,omitempty"` + + // Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false. + PartitionIncludeSchemaTable *bool `json:"partitionIncludeSchemaTable,omitempty" tf:"partition_include_schema_table,omitempty"` + + // ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint. + SSLCACertificateArn *string `json:"sslCaCertificateArn,omitempty" tf:"ssl_ca_certificate_arn,omitempty"` + + // ARN of the client certificate used to securely connect to a Kafka target endpoint. + SSLClientCertificateArn *string `json:"sslClientCertificateArn,omitempty" tf:"ssl_client_certificate_arn,omitempty"` + + // ARN for the client private key used to securely connect to a Kafka target endpoint. + SSLClientKeyArn *string `json:"sslClientKeyArn,omitempty" tf:"ssl_client_key_arn,omitempty"` + + // Password for the client private key used to securely connect to a Kafka target endpoint. + SSLClientKeyPasswordSecretRef *v1.SecretKeySelector `json:"sslClientKeyPasswordSecretRef,omitempty" tf:"-"` + + // Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication. + SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` + + // Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication. + SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` + + // Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password. + SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` + + // Kafka topic for migration. Default is kafka-default-topic. + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type KafkaSettingsObservation struct { + + // Kafka broker location. Specify in the form broker-hostname-or-ip:port. + Broker *string `json:"broker,omitempty" tf:"broker,omitempty"` + + // Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false. + IncludeControlDetails *bool `json:"includeControlDetails,omitempty" tf:"include_control_details,omitempty"` + + // Include NULL and empty columns for records migrated to the endpoint. Default is false. + IncludeNullAndEmpty *bool `json:"includeNullAndEmpty,omitempty" tf:"include_null_and_empty,omitempty"` + + // Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false. + IncludePartitionValue *bool `json:"includePartitionValue,omitempty" tf:"include_partition_value,omitempty"` + + // Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false. + IncludeTableAlterOperations *bool `json:"includeTableAlterOperations,omitempty" tf:"include_table_alter_operations,omitempty"` + + // Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false. + IncludeTransactionDetails *bool `json:"includeTransactionDetails,omitempty" tf:"include_transaction_details,omitempty"` + + // Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab). + MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"` + + // Maximum size in bytes for records created on the endpoint Default is 1,000,000. + MessageMaxBytes *float64 `json:"messageMaxBytes,omitempty" tf:"message_max_bytes,omitempty"` + + // Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix. + NoHexPrefix *bool `json:"noHexPrefix,omitempty" tf:"no_hex_prefix,omitempty"` + + // Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false. + PartitionIncludeSchemaTable *bool `json:"partitionIncludeSchemaTable,omitempty" tf:"partition_include_schema_table,omitempty"` + + // ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint. + SSLCACertificateArn *string `json:"sslCaCertificateArn,omitempty" tf:"ssl_ca_certificate_arn,omitempty"` + + // ARN of the client certificate used to securely connect to a Kafka target endpoint. + SSLClientCertificateArn *string `json:"sslClientCertificateArn,omitempty" tf:"ssl_client_certificate_arn,omitempty"` + + // ARN for the client private key used to securely connect to a Kafka target endpoint. + SSLClientKeyArn *string `json:"sslClientKeyArn,omitempty" tf:"ssl_client_key_arn,omitempty"` + + // Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication. + SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` + + // Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password. + SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` + + // Kafka topic for migration. Default is kafka-default-topic. + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type KafkaSettingsParameters struct { + + // Kafka broker location. Specify in the form broker-hostname-or-ip:port. + // +kubebuilder:validation:Optional + Broker *string `json:"broker" tf:"broker,omitempty"` + + // Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false. + // +kubebuilder:validation:Optional + IncludeControlDetails *bool `json:"includeControlDetails,omitempty" tf:"include_control_details,omitempty"` + + // Include NULL and empty columns for records migrated to the endpoint. Default is false. + // +kubebuilder:validation:Optional + IncludeNullAndEmpty *bool `json:"includeNullAndEmpty,omitempty" tf:"include_null_and_empty,omitempty"` + + // Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false. + // +kubebuilder:validation:Optional + IncludePartitionValue *bool `json:"includePartitionValue,omitempty" tf:"include_partition_value,omitempty"` + + // Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false. + // +kubebuilder:validation:Optional + IncludeTableAlterOperations *bool `json:"includeTableAlterOperations,omitempty" tf:"include_table_alter_operations,omitempty"` + + // Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false. + // +kubebuilder:validation:Optional + IncludeTransactionDetails *bool `json:"includeTransactionDetails,omitempty" tf:"include_transaction_details,omitempty"` + + // Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab). + // +kubebuilder:validation:Optional + MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"` + + // Maximum size in bytes for records created on the endpoint Default is 1,000,000. + // +kubebuilder:validation:Optional + MessageMaxBytes *float64 `json:"messageMaxBytes,omitempty" tf:"message_max_bytes,omitempty"` + + // Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix. + // +kubebuilder:validation:Optional + NoHexPrefix *bool `json:"noHexPrefix,omitempty" tf:"no_hex_prefix,omitempty"` + + // Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false. + // +kubebuilder:validation:Optional + PartitionIncludeSchemaTable *bool `json:"partitionIncludeSchemaTable,omitempty" tf:"partition_include_schema_table,omitempty"` + + // ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint. + // +kubebuilder:validation:Optional + SSLCACertificateArn *string `json:"sslCaCertificateArn,omitempty" tf:"ssl_ca_certificate_arn,omitempty"` + + // ARN of the client certificate used to securely connect to a Kafka target endpoint. + // +kubebuilder:validation:Optional + SSLClientCertificateArn *string `json:"sslClientCertificateArn,omitempty" tf:"ssl_client_certificate_arn,omitempty"` + + // ARN for the client private key used to securely connect to a Kafka target endpoint. + // +kubebuilder:validation:Optional + SSLClientKeyArn *string `json:"sslClientKeyArn,omitempty" tf:"ssl_client_key_arn,omitempty"` + + // Password for the client private key used to securely connect to a Kafka target endpoint. + // +kubebuilder:validation:Optional + SSLClientKeyPasswordSecretRef *v1.SecretKeySelector `json:"sslClientKeyPasswordSecretRef,omitempty" tf:"-"` + + // Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication. + // +kubebuilder:validation:Optional + SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"` + + // Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication. + // +kubebuilder:validation:Optional + SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"` + + // Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password. + // +kubebuilder:validation:Optional + SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"` + + // Kafka topic for migration. Default is kafka-default-topic. + // +kubebuilder:validation:Optional + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type KinesisSettingsInitParameters struct { + + // Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false. + IncludeControlDetails *bool `json:"includeControlDetails,omitempty" tf:"include_control_details,omitempty"` + + // Include NULL and empty columns in the target. Default is false. + IncludeNullAndEmpty *bool `json:"includeNullAndEmpty,omitempty" tf:"include_null_and_empty,omitempty"` + + // Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false. + IncludePartitionValue *bool `json:"includePartitionValue,omitempty" tf:"include_partition_value,omitempty"` + + // Includes any data definition language (DDL) operations that change the table in the control data. Default is false. + IncludeTableAlterOperations *bool `json:"includeTableAlterOperations,omitempty" tf:"include_table_alter_operations,omitempty"` + + // Provides detailed transaction information from the source database. Default is false. + IncludeTransactionDetails *bool `json:"includeTransactionDetails,omitempty" tf:"include_transaction_details,omitempty"` + + // Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab). + MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"` + + // Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false. + PartitionIncludeSchemaTable *bool `json:"partitionIncludeSchemaTable,omitempty" tf:"partition_include_schema_table,omitempty"` + + // ARN of the IAM Role with permissions to write to the Kinesis data stream. + ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"` + + // ARN of the Kinesis data stream. + StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` +} + +type KinesisSettingsObservation struct { + + // Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false. + IncludeControlDetails *bool `json:"includeControlDetails,omitempty" tf:"include_control_details,omitempty"` + + // Include NULL and empty columns in the target. Default is false. + IncludeNullAndEmpty *bool `json:"includeNullAndEmpty,omitempty" tf:"include_null_and_empty,omitempty"` + + // Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false. + IncludePartitionValue *bool `json:"includePartitionValue,omitempty" tf:"include_partition_value,omitempty"` + + // Includes any data definition language (DDL) operations that change the table in the control data. Default is false. + IncludeTableAlterOperations *bool `json:"includeTableAlterOperations,omitempty" tf:"include_table_alter_operations,omitempty"` + + // Provides detailed transaction information from the source database. Default is false. + IncludeTransactionDetails *bool `json:"includeTransactionDetails,omitempty" tf:"include_transaction_details,omitempty"` + + // Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab). + MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"` + + // Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false. + PartitionIncludeSchemaTable *bool `json:"partitionIncludeSchemaTable,omitempty" tf:"partition_include_schema_table,omitempty"` + + // ARN of the IAM Role with permissions to write to the Kinesis data stream. + ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"` + + // ARN of the Kinesis data stream. + StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` +} + +type KinesisSettingsParameters struct { + + // Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false. + // +kubebuilder:validation:Optional + IncludeControlDetails *bool `json:"includeControlDetails,omitempty" tf:"include_control_details,omitempty"` + + // Include NULL and empty columns in the target. Default is false. + // +kubebuilder:validation:Optional + IncludeNullAndEmpty *bool `json:"includeNullAndEmpty,omitempty" tf:"include_null_and_empty,omitempty"` + + // Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false. + // +kubebuilder:validation:Optional + IncludePartitionValue *bool `json:"includePartitionValue,omitempty" tf:"include_partition_value,omitempty"` + + // Includes any data definition language (DDL) operations that change the table in the control data. Default is false. + // +kubebuilder:validation:Optional + IncludeTableAlterOperations *bool `json:"includeTableAlterOperations,omitempty" tf:"include_table_alter_operations,omitempty"` + + // Provides detailed transaction information from the source database. Default is false. + // +kubebuilder:validation:Optional + IncludeTransactionDetails *bool `json:"includeTransactionDetails,omitempty" tf:"include_transaction_details,omitempty"` + + // Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab). + // +kubebuilder:validation:Optional + MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"` + + // Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false. + // +kubebuilder:validation:Optional + PartitionIncludeSchemaTable *bool `json:"partitionIncludeSchemaTable,omitempty" tf:"partition_include_schema_table,omitempty"` + + // ARN of the IAM Role with permissions to write to the Kinesis data stream. + // +kubebuilder:validation:Optional + ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"` + + // ARN of the Kinesis data stream. + // +kubebuilder:validation:Optional + StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` +} + +type MongodbSettingsInitParameters struct { + + // Authentication mechanism to access the MongoDB source endpoint. Default is default. + AuthMechanism *string `json:"authMechanism,omitempty" tf:"auth_mechanism,omitempty"` + + // Authentication database name. Not used when auth_type is no. Default is admin. + AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"` + + // Authentication type to access the MongoDB source endpoint. Default is password. + AuthType *string `json:"authType,omitempty" tf:"auth_type,omitempty"` + + // Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000. + DocsToInvestigate *string `json:"docsToInvestigate,omitempty" tf:"docs_to_investigate,omitempty"` + + // Document ID. Use this setting when nesting_level is set to none. Default is false. + ExtractDocID *string `json:"extractDocId,omitempty" tf:"extract_doc_id,omitempty"` + + // Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode). + NestingLevel *string `json:"nestingLevel,omitempty" tf:"nesting_level,omitempty"` +} + +type MongodbSettingsObservation struct { + + // Authentication mechanism to access the MongoDB source endpoint. Default is default. + AuthMechanism *string `json:"authMechanism,omitempty" tf:"auth_mechanism,omitempty"` + + // Authentication database name. Not used when auth_type is no. Default is admin. + AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"` + + // Authentication type to access the MongoDB source endpoint. Default is password. + AuthType *string `json:"authType,omitempty" tf:"auth_type,omitempty"` + + // Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000. + DocsToInvestigate *string `json:"docsToInvestigate,omitempty" tf:"docs_to_investigate,omitempty"` + + // Document ID. Use this setting when nesting_level is set to none. Default is false. + ExtractDocID *string `json:"extractDocId,omitempty" tf:"extract_doc_id,omitempty"` + + // Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode). + NestingLevel *string `json:"nestingLevel,omitempty" tf:"nesting_level,omitempty"` +} + +type MongodbSettingsParameters struct { + + // Authentication mechanism to access the MongoDB source endpoint. Default is default. + // +kubebuilder:validation:Optional + AuthMechanism *string `json:"authMechanism,omitempty" tf:"auth_mechanism,omitempty"` + + // Authentication database name. Not used when auth_type is no. Default is admin. + // +kubebuilder:validation:Optional + AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"` + + // Authentication type to access the MongoDB source endpoint. Default is password. + // +kubebuilder:validation:Optional + AuthType *string `json:"authType,omitempty" tf:"auth_type,omitempty"` + + // Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000. + // +kubebuilder:validation:Optional + DocsToInvestigate *string `json:"docsToInvestigate,omitempty" tf:"docs_to_investigate,omitempty"` + + // Document ID. Use this setting when nesting_level is set to none. Default is false. + // +kubebuilder:validation:Optional + ExtractDocID *string `json:"extractDocId,omitempty" tf:"extract_doc_id,omitempty"` + + // Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode). + // +kubebuilder:validation:Optional + NestingLevel *string `json:"nestingLevel,omitempty" tf:"nesting_level,omitempty"` +} + +type PostgresSettingsInitParameters struct { + + // For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data. + AfterConnectScript *string `json:"afterConnectScript,omitempty" tf:"after_connect_script,omitempty"` + + // The Babelfish for Aurora PostgreSQL database name for the endpoint. + BabelfishDatabaseName *string `json:"babelfishDatabaseName,omitempty" tf:"babelfish_database_name,omitempty"` + + // To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. + CaptureDdls *bool `json:"captureDdls,omitempty" tf:"capture_ddls,omitempty"` + + // Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints. + DatabaseMode *string `json:"databaseMode,omitempty" tf:"database_mode,omitempty"` + + // Sets the schema in which the operational DDL database artifacts are created. Default is public. + DdlArtifactsSchema *string `json:"ddlArtifactsSchema,omitempty" tf:"ddl_artifacts_schema,omitempty"` + + // Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is 60. + ExecuteTimeout *float64 `json:"executeTimeout,omitempty" tf:"execute_timeout,omitempty"` + + // When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize. Default is false. + FailTasksOnLobTruncation *bool `json:"failTasksOnLobTruncation,omitempty" tf:"fail_tasks_on_lob_truncation,omitempty"` + + // The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source. + HeartbeatEnable *bool `json:"heartbeatEnable,omitempty" tf:"heartbeat_enable,omitempty"` + + // Sets the WAL heartbeat frequency (in minutes). Default value is 5. + HeartbeatFrequency *float64 `json:"heartbeatFrequency,omitempty" tf:"heartbeat_frequency,omitempty"` + + // Sets the schema in which the heartbeat artifacts are created. Default value is public. + HeartbeatSchema *string `json:"heartbeatSchema,omitempty" tf:"heartbeat_schema,omitempty"` + + // You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is false. + MapBooleanAsBoolean *bool `json:"mapBooleanAsBoolean,omitempty" tf:"map_boolean_as_boolean,omitempty"` + + // Optional When true, DMS migrates JSONB values as CLOB. + MapJsonbAsClob *bool `json:"mapJsonbAsClob,omitempty" tf:"map_jsonb_as_clob,omitempty"` + + // Optional When true, DMS migrates LONG values as VARCHAR. + MapLongVarcharAs *string `json:"mapLongVarcharAs,omitempty" tf:"map_long_varchar_as,omitempty"` + + // Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is 32,768 KB. + MaxFileSize *float64 `json:"maxFileSize,omitempty" tf:"max_file_size,omitempty"` + + // Specifies the plugin to use to create a replication slot. Valid values: pglogical, test_decoding. + PluginName *string `json:"pluginName,omitempty" tf:"plugin_name,omitempty"` + + // Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance. + SlotName *string `json:"slotName,omitempty" tf:"slot_name,omitempty"` +} + +type PostgresSettingsObservation struct { + + // For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data. + AfterConnectScript *string `json:"afterConnectScript,omitempty" tf:"after_connect_script,omitempty"` + + // The Babelfish for Aurora PostgreSQL database name for the endpoint. + BabelfishDatabaseName *string `json:"babelfishDatabaseName,omitempty" tf:"babelfish_database_name,omitempty"` + + // To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. + CaptureDdls *bool `json:"captureDdls,omitempty" tf:"capture_ddls,omitempty"` + + // Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints. + DatabaseMode *string `json:"databaseMode,omitempty" tf:"database_mode,omitempty"` + + // Sets the schema in which the operational DDL database artifacts are created. Default is public. + DdlArtifactsSchema *string `json:"ddlArtifactsSchema,omitempty" tf:"ddl_artifacts_schema,omitempty"` + + // Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is 60. + ExecuteTimeout *float64 `json:"executeTimeout,omitempty" tf:"execute_timeout,omitempty"` + + // When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize. Default is false. + FailTasksOnLobTruncation *bool `json:"failTasksOnLobTruncation,omitempty" tf:"fail_tasks_on_lob_truncation,omitempty"` + + // The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source. + HeartbeatEnable *bool `json:"heartbeatEnable,omitempty" tf:"heartbeat_enable,omitempty"` + + // Sets the WAL heartbeat frequency (in minutes). Default value is 5. + HeartbeatFrequency *float64 `json:"heartbeatFrequency,omitempty" tf:"heartbeat_frequency,omitempty"` + + // Sets the schema in which the heartbeat artifacts are created. Default value is public. + HeartbeatSchema *string `json:"heartbeatSchema,omitempty" tf:"heartbeat_schema,omitempty"` + + // You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is false. + MapBooleanAsBoolean *bool `json:"mapBooleanAsBoolean,omitempty" tf:"map_boolean_as_boolean,omitempty"` + + // Optional When true, DMS migrates JSONB values as CLOB. + MapJsonbAsClob *bool `json:"mapJsonbAsClob,omitempty" tf:"map_jsonb_as_clob,omitempty"` + + // Optional When true, DMS migrates LONG values as VARCHAR. + MapLongVarcharAs *string `json:"mapLongVarcharAs,omitempty" tf:"map_long_varchar_as,omitempty"` + + // Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is 32,768 KB. + MaxFileSize *float64 `json:"maxFileSize,omitempty" tf:"max_file_size,omitempty"` + + // Specifies the plugin to use to create a replication slot. Valid values: pglogical, test_decoding. + PluginName *string `json:"pluginName,omitempty" tf:"plugin_name,omitempty"` + + // Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance. + SlotName *string `json:"slotName,omitempty" tf:"slot_name,omitempty"` +} + +type PostgresSettingsParameters struct { + + // For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data. + // +kubebuilder:validation:Optional + AfterConnectScript *string `json:"afterConnectScript,omitempty" tf:"after_connect_script,omitempty"` + + // The Babelfish for Aurora PostgreSQL database name for the endpoint. + // +kubebuilder:validation:Optional + BabelfishDatabaseName *string `json:"babelfishDatabaseName,omitempty" tf:"babelfish_database_name,omitempty"` + + // To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. + // +kubebuilder:validation:Optional + CaptureDdls *bool `json:"captureDdls,omitempty" tf:"capture_ddls,omitempty"` + + // Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints. + // +kubebuilder:validation:Optional + DatabaseMode *string `json:"databaseMode,omitempty" tf:"database_mode,omitempty"` + + // Sets the schema in which the operational DDL database artifacts are created. Default is public. + // +kubebuilder:validation:Optional + DdlArtifactsSchema *string `json:"ddlArtifactsSchema,omitempty" tf:"ddl_artifacts_schema,omitempty"` + + // Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is 60. + // +kubebuilder:validation:Optional + ExecuteTimeout *float64 `json:"executeTimeout,omitempty" tf:"execute_timeout,omitempty"` + + // When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize. Default is false. + // +kubebuilder:validation:Optional + FailTasksOnLobTruncation *bool `json:"failTasksOnLobTruncation,omitempty" tf:"fail_tasks_on_lob_truncation,omitempty"` + + // The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source. + // +kubebuilder:validation:Optional + HeartbeatEnable *bool `json:"heartbeatEnable,omitempty" tf:"heartbeat_enable,omitempty"` + + // Sets the WAL heartbeat frequency (in minutes). Default value is 5. + // +kubebuilder:validation:Optional + HeartbeatFrequency *float64 `json:"heartbeatFrequency,omitempty" tf:"heartbeat_frequency,omitempty"` + + // Sets the schema in which the heartbeat artifacts are created. Default value is public. + // +kubebuilder:validation:Optional + HeartbeatSchema *string `json:"heartbeatSchema,omitempty" tf:"heartbeat_schema,omitempty"` + + // You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is false. + // +kubebuilder:validation:Optional + MapBooleanAsBoolean *bool `json:"mapBooleanAsBoolean,omitempty" tf:"map_boolean_as_boolean,omitempty"` + + // Optional When true, DMS migrates JSONB values as CLOB. + // +kubebuilder:validation:Optional + MapJsonbAsClob *bool `json:"mapJsonbAsClob,omitempty" tf:"map_jsonb_as_clob,omitempty"` + + // Optional When true, DMS migrates LONG values as VARCHAR. + // +kubebuilder:validation:Optional + MapLongVarcharAs *string `json:"mapLongVarcharAs,omitempty" tf:"map_long_varchar_as,omitempty"` + + // Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is 32,768 KB. + // +kubebuilder:validation:Optional + MaxFileSize *float64 `json:"maxFileSize,omitempty" tf:"max_file_size,omitempty"` + + // Specifies the plugin to use to create a replication slot. Valid values: pglogical, test_decoding. + // +kubebuilder:validation:Optional + PluginName *string `json:"pluginName,omitempty" tf:"plugin_name,omitempty"` + + // Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance. + // +kubebuilder:validation:Optional + SlotName *string `json:"slotName,omitempty" tf:"slot_name,omitempty"` +} + +type RedisSettingsInitParameters struct { + + // The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint. + AuthPasswordSecretRef *v1.SecretKeySelector `json:"authPasswordSecretRef,omitempty" tf:"-"` + + // Authentication type to access the MongoDB source endpoint. Default is password. + AuthType *string `json:"authType,omitempty" tf:"auth_type,omitempty"` + + // The username provided with the auth-role option of the AuthType setting for a Redis target endpoint. + AuthUserName *string `json:"authUserName,omitempty" tf:"auth_user_name,omitempty"` + + // Port used by the endpoint database. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint. + SSLCACertificateArn *string `json:"sslCaCertificateArn,omitempty" tf:"ssl_ca_certificate_arn,omitempty"` + + // The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption. + SSLSecurityProtocol *string `json:"sslSecurityProtocol,omitempty" tf:"ssl_security_protocol,omitempty"` + + // Host name of the server. + ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` +} + +type RedisSettingsObservation struct { + + // Authentication type to access the MongoDB source endpoint. Default is password. + AuthType *string `json:"authType,omitempty" tf:"auth_type,omitempty"` + + // The username provided with the auth-role option of the AuthType setting for a Redis target endpoint. + AuthUserName *string `json:"authUserName,omitempty" tf:"auth_user_name,omitempty"` + + // Port used by the endpoint database. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint. + SSLCACertificateArn *string `json:"sslCaCertificateArn,omitempty" tf:"ssl_ca_certificate_arn,omitempty"` + + // The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption. + SSLSecurityProtocol *string `json:"sslSecurityProtocol,omitempty" tf:"ssl_security_protocol,omitempty"` + + // Host name of the server. + ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` +} + +type RedisSettingsParameters struct { + + // The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint. + // +kubebuilder:validation:Optional + AuthPasswordSecretRef *v1.SecretKeySelector `json:"authPasswordSecretRef,omitempty" tf:"-"` + + // Authentication type to access the MongoDB source endpoint. Default is password. + // +kubebuilder:validation:Optional + AuthType *string `json:"authType" tf:"auth_type,omitempty"` + + // The username provided with the auth-role option of the AuthType setting for a Redis target endpoint. + // +kubebuilder:validation:Optional + AuthUserName *string `json:"authUserName,omitempty" tf:"auth_user_name,omitempty"` + + // Port used by the endpoint database. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` + + // The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint. + // +kubebuilder:validation:Optional + SSLCACertificateArn *string `json:"sslCaCertificateArn,omitempty" tf:"ssl_ca_certificate_arn,omitempty"` + + // The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption. + // +kubebuilder:validation:Optional + SSLSecurityProtocol *string `json:"sslSecurityProtocol,omitempty" tf:"ssl_security_protocol,omitempty"` + + // Host name of the server. + // +kubebuilder:validation:Optional + ServerName *string `json:"serverName" tf:"server_name,omitempty"` +} + +type RedshiftSettingsInitParameters struct { + + // Custom S3 Bucket Object prefix for intermediate storage. + BucketFolder *string `json:"bucketFolder,omitempty" tf:"bucket_folder,omitempty"` + + // Custom S3 Bucket name for intermediate storage. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS. + EncryptionMode *string `json:"encryptionMode,omitempty" tf:"encryption_mode,omitempty"` + + // ARN or Id of KMS Key to use when encryption_mode is SSE_KMS. + ServerSideEncryptionKMSKeyID *string `json:"serverSideEncryptionKmsKeyId,omitempty" tf:"server_side_encryption_kms_key_id,omitempty"` + + // Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage. + ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"` +} + +type RedshiftSettingsObservation struct { + + // Custom S3 Bucket Object prefix for intermediate storage. + BucketFolder *string `json:"bucketFolder,omitempty" tf:"bucket_folder,omitempty"` + + // Custom S3 Bucket name for intermediate storage. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS. + EncryptionMode *string `json:"encryptionMode,omitempty" tf:"encryption_mode,omitempty"` + + // ARN or Id of KMS Key to use when encryption_mode is SSE_KMS. + ServerSideEncryptionKMSKeyID *string `json:"serverSideEncryptionKmsKeyId,omitempty" tf:"server_side_encryption_kms_key_id,omitempty"` + + // Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage. + ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"` +} + +type RedshiftSettingsParameters struct { + + // Custom S3 Bucket Object prefix for intermediate storage. + // +kubebuilder:validation:Optional + BucketFolder *string `json:"bucketFolder,omitempty" tf:"bucket_folder,omitempty"` + + // Custom S3 Bucket name for intermediate storage. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS. + // +kubebuilder:validation:Optional + EncryptionMode *string `json:"encryptionMode,omitempty" tf:"encryption_mode,omitempty"` + + // ARN or Id of KMS Key to use when encryption_mode is SSE_KMS. + // +kubebuilder:validation:Optional + ServerSideEncryptionKMSKeyID *string `json:"serverSideEncryptionKmsKeyId,omitempty" tf:"server_side_encryption_kms_key_id,omitempty"` + + // Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage. + // +kubebuilder:validation:Optional + ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"` +} + +type S3SettingsInitParameters struct { + + // Whether to add column name information to the .csv output file. Default is false. + AddColumnName *bool `json:"addColumnName,omitempty" tf:"add_column_name,omitempty"` + + // S3 object prefix. + BucketFolder *string `json:"bucketFolder,omitempty" tf:"bucket_folder,omitempty"` + + // S3 bucket name. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none. + CannedACLForObjects *string `json:"cannedAclForObjects,omitempty" tf:"canned_acl_for_objects,omitempty"` + + // Whether to write insert and update operations to .csv or .parquet output files. Default is false. + CdcInsertsAndUpdates *bool `json:"cdcInsertsAndUpdates,omitempty" tf:"cdc_inserts_and_updates,omitempty"` + + // Whether to write insert operations to .csv or .parquet output files. Default is false. + CdcInsertsOnly *bool `json:"cdcInsertsOnly,omitempty" tf:"cdc_inserts_only,omitempty"` + + // Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60. + CdcMaxBatchInterval *float64 `json:"cdcMaxBatchInterval,omitempty" tf:"cdc_max_batch_interval,omitempty"` + + // Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly. + CdcMinFileSize *float64 `json:"cdcMinFileSize,omitempty" tf:"cdc_min_file_size,omitempty"` + + // Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later. + CdcPath *string `json:"cdcPath,omitempty" tf:"cdc_path,omitempty"` + + // Set to compress target files. Default is NONE. Valid values are GZIP and NONE. + CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` + + // Delimiter used to separate columns in the source files. Default is ,. + CsvDelimiter *string `json:"csvDelimiter,omitempty" tf:"csv_delimiter,omitempty"` + + // String to use for all columns not included in the supplemental log. + CsvNoSupValue *string `json:"csvNoSupValue,omitempty" tf:"csv_no_sup_value,omitempty"` + + // String to as null when writing to the target. + CsvNullValue *string `json:"csvNullValue,omitempty" tf:"csv_null_value,omitempty"` + + // Delimiter used to separate rows in the source files. Default is \n. + CsvRowDelimiter *string `json:"csvRowDelimiter,omitempty" tf:"csv_row_delimiter,omitempty"` + + // Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv. + DataFormat *string `json:"dataFormat,omitempty" tf:"data_format,omitempty"` + + // Size of one data page in bytes. Default is 1048576 (1 MiB). + DataPageSize *float64 `json:"dataPageSize,omitempty" tf:"data_page_size,omitempty"` + + // Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH. + DatePartitionDelimiter *string `json:"datePartitionDelimiter,omitempty" tf:"date_partition_delimiter,omitempty"` + + // Partition S3 bucket folders based on transaction commit dates. Default is false. + DatePartitionEnabled *bool `json:"datePartitionEnabled,omitempty" tf:"date_partition_enabled,omitempty"` + + // Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD. + DatePartitionSequence *string `json:"datePartitionSequence,omitempty" tf:"date_partition_sequence,omitempty"` + + // Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB). + DictPageSizeLimit *float64 `json:"dictPageSizeLimit,omitempty" tf:"dict_page_size_limit,omitempty"` + + // Whether to enable statistics for Parquet pages and row groups. Default is true. + EnableStatistics *bool `json:"enableStatistics,omitempty" tf:"enable_statistics,omitempty"` + + // Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary. + EncodingType *string `json:"encodingType,omitempty" tf:"encoding_type,omitempty"` + + // Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. Default is SSE_S3. + EncryptionMode *string `json:"encryptionMode,omitempty" tf:"encryption_mode,omitempty"` + + // JSON document that describes how AWS DMS should interpret the data. + ExternalTableDefinition *string `json:"externalTableDefinition,omitempty" tf:"external_table_definition,omitempty"` + + // Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false. + GlueCatalogGeneration *bool `json:"glueCatalogGeneration,omitempty" tf:"glue_catalog_generation,omitempty"` + + // When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0. + IgnoreHeaderRows *float64 `json:"ignoreHeaderRows,omitempty" tf:"ignore_header_rows,omitempty"` + + // Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false. + IncludeOpForFullLoad *bool `json:"includeOpForFullLoad,omitempty" tf:"include_op_for_full_load,omitempty"` + + // Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB). + MaxFileSize *float64 `json:"maxFileSize,omitempty" tf:"max_file_size,omitempty"` + + // - Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false. + ParquetTimestampInMillisecond *bool `json:"parquetTimestampInMillisecond,omitempty" tf:"parquet_timestamp_in_millisecond,omitempty"` + + // Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0. + ParquetVersion *string `json:"parquetVersion,omitempty" tf:"parquet_version,omitempty"` + + // Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false. + PreserveTransactions *bool `json:"preserveTransactions,omitempty" tf:"preserve_transactions,omitempty"` + + // For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true. + Rfc4180 *bool `json:"rfc4180,omitempty" tf:"rfc_4180,omitempty"` + + // Number of rows in a row group. Default is 10000. + RowGroupLength *float64 `json:"rowGroupLength,omitempty" tf:"row_group_length,omitempty"` + + // ARN or Id of KMS Key to use when encryption_mode is SSE_KMS. + ServerSideEncryptionKMSKeyID *string `json:"serverSideEncryptionKmsKeyId,omitempty" tf:"server_side_encryption_kms_key_id,omitempty"` + + // ARN of the IAM Role with permissions to read from or write to the S3 Bucket. + ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"` + + // Column to add with timestamp information to the endpoint data for an Amazon S3 target. + TimestampColumnName *string `json:"timestampColumnName,omitempty" tf:"timestamp_column_name,omitempty"` + + // Whether to use csv_no_sup_value for columns not included in the supplemental log. + UseCsvNoSupValue *bool `json:"useCsvNoSupValue,omitempty" tf:"use_csv_no_sup_value,omitempty"` + + // When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false. + UseTaskStartTimeForFullLoadTimestamp *bool `json:"useTaskStartTimeForFullLoadTimestamp,omitempty" tf:"use_task_start_time_for_full_load_timestamp,omitempty"` +} + +type S3SettingsObservation struct { + + // Whether to add column name information to the .csv output file. Default is false. + AddColumnName *bool `json:"addColumnName,omitempty" tf:"add_column_name,omitempty"` + + // S3 object prefix. + BucketFolder *string `json:"bucketFolder,omitempty" tf:"bucket_folder,omitempty"` + + // S3 bucket name. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none. + CannedACLForObjects *string `json:"cannedAclForObjects,omitempty" tf:"canned_acl_for_objects,omitempty"` + + // Whether to write insert and update operations to .csv or .parquet output files. Default is false. + CdcInsertsAndUpdates *bool `json:"cdcInsertsAndUpdates,omitempty" tf:"cdc_inserts_and_updates,omitempty"` + + // Whether to write insert operations to .csv or .parquet output files. Default is false. + CdcInsertsOnly *bool `json:"cdcInsertsOnly,omitempty" tf:"cdc_inserts_only,omitempty"` + + // Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60. + CdcMaxBatchInterval *float64 `json:"cdcMaxBatchInterval,omitempty" tf:"cdc_max_batch_interval,omitempty"` + + // Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly. + CdcMinFileSize *float64 `json:"cdcMinFileSize,omitempty" tf:"cdc_min_file_size,omitempty"` + + // Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later. + CdcPath *string `json:"cdcPath,omitempty" tf:"cdc_path,omitempty"` + + // Set to compress target files. Default is NONE. Valid values are GZIP and NONE. + CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` + + // Delimiter used to separate columns in the source files. Default is ,. + CsvDelimiter *string `json:"csvDelimiter,omitempty" tf:"csv_delimiter,omitempty"` + + // String to use for all columns not included in the supplemental log. + CsvNoSupValue *string `json:"csvNoSupValue,omitempty" tf:"csv_no_sup_value,omitempty"` + + // String to as null when writing to the target. + CsvNullValue *string `json:"csvNullValue,omitempty" tf:"csv_null_value,omitempty"` + + // Delimiter used to separate rows in the source files. Default is \n. + CsvRowDelimiter *string `json:"csvRowDelimiter,omitempty" tf:"csv_row_delimiter,omitempty"` + + // Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv. + DataFormat *string `json:"dataFormat,omitempty" tf:"data_format,omitempty"` + + // Size of one data page in bytes. Default is 1048576 (1 MiB). + DataPageSize *float64 `json:"dataPageSize,omitempty" tf:"data_page_size,omitempty"` + + // Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH. + DatePartitionDelimiter *string `json:"datePartitionDelimiter,omitempty" tf:"date_partition_delimiter,omitempty"` + + // Partition S3 bucket folders based on transaction commit dates. Default is false. + DatePartitionEnabled *bool `json:"datePartitionEnabled,omitempty" tf:"date_partition_enabled,omitempty"` + + // Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD. + DatePartitionSequence *string `json:"datePartitionSequence,omitempty" tf:"date_partition_sequence,omitempty"` + + // Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB). + DictPageSizeLimit *float64 `json:"dictPageSizeLimit,omitempty" tf:"dict_page_size_limit,omitempty"` + + // Whether to enable statistics for Parquet pages and row groups. Default is true. + EnableStatistics *bool `json:"enableStatistics,omitempty" tf:"enable_statistics,omitempty"` + + // Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary. + EncodingType *string `json:"encodingType,omitempty" tf:"encoding_type,omitempty"` + + // Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. Default is SSE_S3. + EncryptionMode *string `json:"encryptionMode,omitempty" tf:"encryption_mode,omitempty"` + + // JSON document that describes how AWS DMS should interpret the data. + ExternalTableDefinition *string `json:"externalTableDefinition,omitempty" tf:"external_table_definition,omitempty"` + + // Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false. + GlueCatalogGeneration *bool `json:"glueCatalogGeneration,omitempty" tf:"glue_catalog_generation,omitempty"` + + // When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0. + IgnoreHeaderRows *float64 `json:"ignoreHeaderRows,omitempty" tf:"ignore_header_rows,omitempty"` + + // Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false. + IncludeOpForFullLoad *bool `json:"includeOpForFullLoad,omitempty" tf:"include_op_for_full_load,omitempty"` + + // Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB). + MaxFileSize *float64 `json:"maxFileSize,omitempty" tf:"max_file_size,omitempty"` + + // - Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false. + ParquetTimestampInMillisecond *bool `json:"parquetTimestampInMillisecond,omitempty" tf:"parquet_timestamp_in_millisecond,omitempty"` + + // Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0. + ParquetVersion *string `json:"parquetVersion,omitempty" tf:"parquet_version,omitempty"` + + // Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false. + PreserveTransactions *bool `json:"preserveTransactions,omitempty" tf:"preserve_transactions,omitempty"` + + // For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true. + Rfc4180 *bool `json:"rfc4180,omitempty" tf:"rfc_4180,omitempty"` + + // Number of rows in a row group. Default is 10000. + RowGroupLength *float64 `json:"rowGroupLength,omitempty" tf:"row_group_length,omitempty"` + + // ARN or Id of KMS Key to use when encryption_mode is SSE_KMS. + ServerSideEncryptionKMSKeyID *string `json:"serverSideEncryptionKmsKeyId,omitempty" tf:"server_side_encryption_kms_key_id,omitempty"` + + // ARN of the IAM Role with permissions to read from or write to the S3 Bucket. + ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"` + + // Column to add with timestamp information to the endpoint data for an Amazon S3 target. + TimestampColumnName *string `json:"timestampColumnName,omitempty" tf:"timestamp_column_name,omitempty"` + + // Whether to use csv_no_sup_value for columns not included in the supplemental log. + UseCsvNoSupValue *bool `json:"useCsvNoSupValue,omitempty" tf:"use_csv_no_sup_value,omitempty"` + + // When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false. + UseTaskStartTimeForFullLoadTimestamp *bool `json:"useTaskStartTimeForFullLoadTimestamp,omitempty" tf:"use_task_start_time_for_full_load_timestamp,omitempty"` +} + +type S3SettingsParameters struct { + + // Whether to add column name information to the .csv output file. Default is false. + // +kubebuilder:validation:Optional + AddColumnName *bool `json:"addColumnName,omitempty" tf:"add_column_name,omitempty"` + + // S3 object prefix. + // +kubebuilder:validation:Optional + BucketFolder *string `json:"bucketFolder,omitempty" tf:"bucket_folder,omitempty"` + + // S3 bucket name. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none. + // +kubebuilder:validation:Optional + CannedACLForObjects *string `json:"cannedAclForObjects,omitempty" tf:"canned_acl_for_objects,omitempty"` + + // Whether to write insert and update operations to .csv or .parquet output files. Default is false. + // +kubebuilder:validation:Optional + CdcInsertsAndUpdates *bool `json:"cdcInsertsAndUpdates,omitempty" tf:"cdc_inserts_and_updates,omitempty"` + + // Whether to write insert operations to .csv or .parquet output files. Default is false. + // +kubebuilder:validation:Optional + CdcInsertsOnly *bool `json:"cdcInsertsOnly,omitempty" tf:"cdc_inserts_only,omitempty"` + + // Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60. + // +kubebuilder:validation:Optional + CdcMaxBatchInterval *float64 `json:"cdcMaxBatchInterval,omitempty" tf:"cdc_max_batch_interval,omitempty"` + + // Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly. + // +kubebuilder:validation:Optional + CdcMinFileSize *float64 `json:"cdcMinFileSize,omitempty" tf:"cdc_min_file_size,omitempty"` + + // Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later. + // +kubebuilder:validation:Optional + CdcPath *string `json:"cdcPath,omitempty" tf:"cdc_path,omitempty"` + + // Set to compress target files. Default is NONE. Valid values are GZIP and NONE. + // +kubebuilder:validation:Optional + CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` + + // Delimiter used to separate columns in the source files. Default is ,. + // +kubebuilder:validation:Optional + CsvDelimiter *string `json:"csvDelimiter,omitempty" tf:"csv_delimiter,omitempty"` + + // String to use for all columns not included in the supplemental log. + // +kubebuilder:validation:Optional + CsvNoSupValue *string `json:"csvNoSupValue,omitempty" tf:"csv_no_sup_value,omitempty"` + + // String to as null when writing to the target. + // +kubebuilder:validation:Optional + CsvNullValue *string `json:"csvNullValue,omitempty" tf:"csv_null_value,omitempty"` + + // Delimiter used to separate rows in the source files. Default is \n. + // +kubebuilder:validation:Optional + CsvRowDelimiter *string `json:"csvRowDelimiter,omitempty" tf:"csv_row_delimiter,omitempty"` + + // Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv. + // +kubebuilder:validation:Optional + DataFormat *string `json:"dataFormat,omitempty" tf:"data_format,omitempty"` + + // Size of one data page in bytes. Default is 1048576 (1 MiB). + // +kubebuilder:validation:Optional + DataPageSize *float64 `json:"dataPageSize,omitempty" tf:"data_page_size,omitempty"` + + // Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH. + // +kubebuilder:validation:Optional + DatePartitionDelimiter *string `json:"datePartitionDelimiter,omitempty" tf:"date_partition_delimiter,omitempty"` + + // Partition S3 bucket folders based on transaction commit dates. Default is false. + // +kubebuilder:validation:Optional + DatePartitionEnabled *bool `json:"datePartitionEnabled,omitempty" tf:"date_partition_enabled,omitempty"` + + // Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD. + // +kubebuilder:validation:Optional + DatePartitionSequence *string `json:"datePartitionSequence,omitempty" tf:"date_partition_sequence,omitempty"` + + // Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB). + // +kubebuilder:validation:Optional + DictPageSizeLimit *float64 `json:"dictPageSizeLimit,omitempty" tf:"dict_page_size_limit,omitempty"` + + // Whether to enable statistics for Parquet pages and row groups. Default is true. + // +kubebuilder:validation:Optional + EnableStatistics *bool `json:"enableStatistics,omitempty" tf:"enable_statistics,omitempty"` + + // Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary. + // +kubebuilder:validation:Optional + EncodingType *string `json:"encodingType,omitempty" tf:"encoding_type,omitempty"` + + // Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. Default is SSE_S3. + // +kubebuilder:validation:Optional + EncryptionMode *string `json:"encryptionMode,omitempty" tf:"encryption_mode,omitempty"` + + // JSON document that describes how AWS DMS should interpret the data. + // +kubebuilder:validation:Optional + ExternalTableDefinition *string `json:"externalTableDefinition,omitempty" tf:"external_table_definition,omitempty"` + + // Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false. + // +kubebuilder:validation:Optional + GlueCatalogGeneration *bool `json:"glueCatalogGeneration,omitempty" tf:"glue_catalog_generation,omitempty"` + + // When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0. + // +kubebuilder:validation:Optional + IgnoreHeaderRows *float64 `json:"ignoreHeaderRows,omitempty" tf:"ignore_header_rows,omitempty"` + + // Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false. + // +kubebuilder:validation:Optional + IncludeOpForFullLoad *bool `json:"includeOpForFullLoad,omitempty" tf:"include_op_for_full_load,omitempty"` + + // Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB). + // +kubebuilder:validation:Optional + MaxFileSize *float64 `json:"maxFileSize,omitempty" tf:"max_file_size,omitempty"` + + // - Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false. + // +kubebuilder:validation:Optional + ParquetTimestampInMillisecond *bool `json:"parquetTimestampInMillisecond,omitempty" tf:"parquet_timestamp_in_millisecond,omitempty"` + + // Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0. + // +kubebuilder:validation:Optional + ParquetVersion *string `json:"parquetVersion,omitempty" tf:"parquet_version,omitempty"` + + // Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false. + // +kubebuilder:validation:Optional + PreserveTransactions *bool `json:"preserveTransactions,omitempty" tf:"preserve_transactions,omitempty"` + + // For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true. + // +kubebuilder:validation:Optional + Rfc4180 *bool `json:"rfc4180,omitempty" tf:"rfc_4180,omitempty"` + + // Number of rows in a row group. Default is 10000. + // +kubebuilder:validation:Optional + RowGroupLength *float64 `json:"rowGroupLength,omitempty" tf:"row_group_length,omitempty"` + + // ARN or Id of KMS Key to use when encryption_mode is SSE_KMS. + // +kubebuilder:validation:Optional + ServerSideEncryptionKMSKeyID *string `json:"serverSideEncryptionKmsKeyId,omitempty" tf:"server_side_encryption_kms_key_id,omitempty"` + + // ARN of the IAM Role with permissions to read from or write to the S3 Bucket. + // +kubebuilder:validation:Optional + ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"` + + // Column to add with timestamp information to the endpoint data for an Amazon S3 target. + // +kubebuilder:validation:Optional + TimestampColumnName *string `json:"timestampColumnName,omitempty" tf:"timestamp_column_name,omitempty"` + + // Whether to use csv_no_sup_value for columns not included in the supplemental log. + // +kubebuilder:validation:Optional + UseCsvNoSupValue *bool `json:"useCsvNoSupValue,omitempty" tf:"use_csv_no_sup_value,omitempty"` + + // When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false. + // +kubebuilder:validation:Optional + UseTaskStartTimeForFullLoadTimestamp *bool `json:"useTaskStartTimeForFullLoadTimestamp,omitempty" tf:"use_task_start_time_for_full_load_timestamp,omitempty"` +} + +// EndpointSpec defines the desired state of Endpoint +type EndpointSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider EndpointParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider EndpointInitParameters `json:"initProvider,omitempty"` +} + +// EndpointStatus defines the observed state of Endpoint. +type EndpointStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider EndpointObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Endpoint is the Schema for the Endpoints API. Provides a DMS (Data Migration Service) endpoint resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Endpoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.endpointType) || (has(self.initProvider) && has(self.initProvider.endpointType))",message="spec.forProvider.endpointType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.engineName) || (has(self.initProvider) && has(self.initProvider.engineName))",message="spec.forProvider.engineName is a required parameter" + Spec EndpointSpec `json:"spec"` + Status EndpointStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EndpointList contains a list of Endpoints +type EndpointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Endpoint `json:"items"` +} + +// Repository type metadata. +var ( + Endpoint_Kind = "Endpoint" + Endpoint_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Endpoint_Kind}.String() + Endpoint_KindAPIVersion = Endpoint_Kind + "." + CRDGroupVersion.String() + Endpoint_GroupVersionKind = CRDGroupVersion.WithKind(Endpoint_Kind) +) + +func init() { + SchemeBuilder.Register(&Endpoint{}, &EndpointList{}) +} diff --git a/apis/dms/v1beta2/zz_generated.conversion_hubs.go b/apis/dms/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..419d7b5b65 --- /dev/null +++ b/apis/dms/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Endpoint) Hub() {} diff --git a/apis/dms/v1beta2/zz_generated.deepcopy.go b/apis/dms/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..4429cd96a9 --- /dev/null +++ b/apis/dms/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2537 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchSettingsInitParameters) DeepCopyInto(out *ElasticsearchSettingsInitParameters) { + *out = *in + if in.EndpointURI != nil { + in, out := &in.EndpointURI, &out.EndpointURI + *out = new(string) + **out = **in + } + if in.ErrorRetryDuration != nil { + in, out := &in.ErrorRetryDuration, &out.ErrorRetryDuration + *out = new(float64) + **out = **in + } + if in.FullLoadErrorPercentage != nil { + in, out := &in.FullLoadErrorPercentage, &out.FullLoadErrorPercentage + *out = new(float64) + **out = **in + } + if in.ServiceAccessRoleArn != nil { + in, out := &in.ServiceAccessRoleArn, &out.ServiceAccessRoleArn + *out = new(string) + **out = **in + } + if in.UseNewMappingType != nil { + in, out := &in.UseNewMappingType, &out.UseNewMappingType + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchSettingsInitParameters. +func (in *ElasticsearchSettingsInitParameters) DeepCopy() *ElasticsearchSettingsInitParameters { + if in == nil { + return nil + } + out := new(ElasticsearchSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchSettingsObservation) DeepCopyInto(out *ElasticsearchSettingsObservation) { + *out = *in + if in.EndpointURI != nil { + in, out := &in.EndpointURI, &out.EndpointURI + *out = new(string) + **out = **in + } + if in.ErrorRetryDuration != nil { + in, out := &in.ErrorRetryDuration, &out.ErrorRetryDuration + *out = new(float64) + **out = **in + } + if in.FullLoadErrorPercentage != nil { + in, out := &in.FullLoadErrorPercentage, &out.FullLoadErrorPercentage + *out = new(float64) + **out = **in + } + if in.ServiceAccessRoleArn != nil { + in, out := &in.ServiceAccessRoleArn, &out.ServiceAccessRoleArn + *out = new(string) + **out = **in + } + if in.UseNewMappingType != nil { + in, out := &in.UseNewMappingType, &out.UseNewMappingType + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchSettingsObservation. +func (in *ElasticsearchSettingsObservation) DeepCopy() *ElasticsearchSettingsObservation { + if in == nil { + return nil + } + out := new(ElasticsearchSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchSettingsParameters) DeepCopyInto(out *ElasticsearchSettingsParameters) { + *out = *in + if in.EndpointURI != nil { + in, out := &in.EndpointURI, &out.EndpointURI + *out = new(string) + **out = **in + } + if in.ErrorRetryDuration != nil { + in, out := &in.ErrorRetryDuration, &out.ErrorRetryDuration + *out = new(float64) + **out = **in + } + if in.FullLoadErrorPercentage != nil { + in, out := &in.FullLoadErrorPercentage, &out.FullLoadErrorPercentage + *out = new(float64) + **out = **in + } + if in.ServiceAccessRoleArn != nil { + in, out := &in.ServiceAccessRoleArn, &out.ServiceAccessRoleArn + *out = new(string) + **out = **in + } + if in.UseNewMappingType != nil { + in, out := &in.UseNewMappingType, &out.UseNewMappingType + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchSettingsParameters. +func (in *ElasticsearchSettingsParameters) DeepCopy() *ElasticsearchSettingsParameters { + if in == nil { + return nil + } + out := new(ElasticsearchSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Endpoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointInitParameters) DeepCopyInto(out *EndpointInitParameters) { + *out = *in + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.ElasticsearchSettings != nil { + in, out := &in.ElasticsearchSettings, &out.ElasticsearchSettings + *out = new(ElasticsearchSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.EngineName != nil { + in, out := &in.EngineName, &out.EngineName + *out = new(string) + **out = **in + } + if in.ExtraConnectionAttributes != nil { + in, out := &in.ExtraConnectionAttributes, &out.ExtraConnectionAttributes + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KafkaSettings != nil { + in, out := &in.KafkaSettings, &out.KafkaSettings + *out = new(KafkaSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisSettings != nil { + in, out := &in.KinesisSettings, &out.KinesisSettings + *out = new(KinesisSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MongodbSettings != nil { + in, out := &in.MongodbSettings, &out.MongodbSettings + *out = new(MongodbSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.PauseReplicationTasks != nil { + in, out := &in.PauseReplicationTasks, &out.PauseReplicationTasks + *out = new(bool) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PostgresSettings != nil { + in, out := &in.PostgresSettings, &out.PostgresSettings + *out = new(PostgresSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RedisSettings != nil { + in, out := &in.RedisSettings, &out.RedisSettings + *out = new(RedisSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RedshiftSettings != nil { + in, out := &in.RedshiftSettings, &out.RedshiftSettings + *out = new(RedshiftSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3Settings != nil { + in, out := &in.S3Settings, &out.S3Settings + *out = new(S3SettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SSLMode != nil { + in, out := &in.SSLMode, &out.SSLMode + *out = new(string) + **out = **in + } + if in.SecretsManagerAccessRoleArn != nil { + in, out := &in.SecretsManagerAccessRoleArn, &out.SecretsManagerAccessRoleArn + *out = new(string) + **out = **in + } + if in.SecretsManagerAccessRoleArnRef != nil { + in, out := &in.SecretsManagerAccessRoleArnRef, &out.SecretsManagerAccessRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SecretsManagerAccessRoleArnSelector != nil { + in, out := &in.SecretsManagerAccessRoleArnSelector, &out.SecretsManagerAccessRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecretsManagerArn != nil { + in, out := &in.SecretsManagerArn, &out.SecretsManagerArn + *out = new(string) + **out = **in + } + if in.ServerName != nil { + in, out := &in.ServerName, &out.ServerName + *out = new(string) + **out = **in + } + if in.ServiceAccessRole != nil { + in, out := &in.ServiceAccessRole, &out.ServiceAccessRole + *out = new(string) + **out = **in + } + if in.ServiceAccessRoleRef != nil { + in, out := &in.ServiceAccessRoleRef, &out.ServiceAccessRoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccessRoleSelector != nil { + in, out := &in.ServiceAccessRoleSelector, &out.ServiceAccessRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointInitParameters. +func (in *EndpointInitParameters) DeepCopy() *EndpointInitParameters { + if in == nil { + return nil + } + out := new(EndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointList) DeepCopyInto(out *EndpointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointList. +func (in *EndpointList) DeepCopy() *EndpointList { + if in == nil { + return nil + } + out := new(EndpointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointObservation) DeepCopyInto(out *EndpointObservation) { + *out = *in + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.ElasticsearchSettings != nil { + in, out := &in.ElasticsearchSettings, &out.ElasticsearchSettings + *out = new(ElasticsearchSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.EndpointArn != nil { + in, out := &in.EndpointArn, &out.EndpointArn + *out = new(string) + **out = **in + } + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.EngineName != nil { + in, out := &in.EngineName, &out.EngineName + *out = new(string) + **out = **in + } + if in.ExtraConnectionAttributes != nil { + in, out := &in.ExtraConnectionAttributes, &out.ExtraConnectionAttributes + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KafkaSettings != nil { + in, out := &in.KafkaSettings, &out.KafkaSettings + *out = new(KafkaSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.KinesisSettings != nil { + in, out := &in.KinesisSettings, &out.KinesisSettings + *out = new(KinesisSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.MongodbSettings != nil { + in, out := &in.MongodbSettings, &out.MongodbSettings + *out = new(MongodbSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.PauseReplicationTasks != nil { + in, out := &in.PauseReplicationTasks, &out.PauseReplicationTasks + *out = new(bool) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PostgresSettings != nil { + in, out := &in.PostgresSettings, &out.PostgresSettings + *out = new(PostgresSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.RedisSettings != nil { + in, out := &in.RedisSettings, &out.RedisSettings + *out = new(RedisSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.RedshiftSettings != nil { + in, out := &in.RedshiftSettings, &out.RedshiftSettings + *out = new(RedshiftSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.S3Settings != nil { + in, out := &in.S3Settings, &out.S3Settings + *out = new(S3SettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.SSLMode != nil { + in, out := &in.SSLMode, &out.SSLMode + *out = new(string) + **out = **in + } + if in.SecretsManagerAccessRoleArn != nil { + in, out := &in.SecretsManagerAccessRoleArn, &out.SecretsManagerAccessRoleArn + *out = new(string) + **out = **in + } + if in.SecretsManagerArn != nil { + in, out := &in.SecretsManagerArn, &out.SecretsManagerArn + *out = new(string) + **out = **in + } + if in.ServerName != nil { + in, out := &in.ServerName, &out.ServerName + *out = new(string) + **out = **in + } + if in.ServiceAccessRole != nil { + in, out := &in.ServiceAccessRole, &out.ServiceAccessRole + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointObservation. +func (in *EndpointObservation) DeepCopy() *EndpointObservation { + if in == nil { + return nil + } + out := new(EndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointParameters) DeepCopyInto(out *EndpointParameters) { + *out = *in + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.ElasticsearchSettings != nil { + in, out := &in.ElasticsearchSettings, &out.ElasticsearchSettings + *out = new(ElasticsearchSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.EngineName != nil { + in, out := &in.EngineName, &out.EngineName + *out = new(string) + **out = **in + } + if in.ExtraConnectionAttributes != nil { + in, out := &in.ExtraConnectionAttributes, &out.ExtraConnectionAttributes + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KafkaSettings != nil { + in, out := &in.KafkaSettings, &out.KafkaSettings + *out = new(KafkaSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisSettings != nil { + in, out := &in.KinesisSettings, &out.KinesisSettings + *out = new(KinesisSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.MongodbSettings != nil { + in, out := &in.MongodbSettings, &out.MongodbSettings + *out = new(MongodbSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.PauseReplicationTasks != nil { + in, out := &in.PauseReplicationTasks, &out.PauseReplicationTasks + *out = new(bool) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PostgresSettings != nil { + in, out := &in.PostgresSettings, &out.PostgresSettings + *out = new(PostgresSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.RedisSettings != nil { + in, out := &in.RedisSettings, &out.RedisSettings + *out = new(RedisSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.RedshiftSettings != nil { + in, out := &in.RedshiftSettings, &out.RedshiftSettings + *out = new(RedshiftSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.S3Settings != nil { + in, out := &in.S3Settings, &out.S3Settings + *out = new(S3SettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.SSLMode != nil { + in, out := &in.SSLMode, &out.SSLMode + *out = new(string) + **out = **in + } + if in.SecretsManagerAccessRoleArn != nil { + in, out := &in.SecretsManagerAccessRoleArn, &out.SecretsManagerAccessRoleArn + *out = new(string) + **out = **in + } + if in.SecretsManagerAccessRoleArnRef != nil { + in, out := &in.SecretsManagerAccessRoleArnRef, &out.SecretsManagerAccessRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SecretsManagerAccessRoleArnSelector != nil { + in, out := &in.SecretsManagerAccessRoleArnSelector, &out.SecretsManagerAccessRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecretsManagerArn != nil { + in, out := &in.SecretsManagerArn, &out.SecretsManagerArn + *out = new(string) + **out = **in + } + if in.ServerName != nil { + in, out := &in.ServerName, &out.ServerName + *out = new(string) + **out = **in + } + if in.ServiceAccessRole != nil { + in, out := &in.ServiceAccessRole, &out.ServiceAccessRole + *out = new(string) + **out = **in + } + if in.ServiceAccessRoleRef != nil { + in, out := &in.ServiceAccessRoleRef, &out.ServiceAccessRoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccessRoleSelector != nil { + in, out := &in.ServiceAccessRoleSelector, &out.ServiceAccessRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointParameters. +func (in *EndpointParameters) DeepCopy() *EndpointParameters { + if in == nil { + return nil + } + out := new(EndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSpec) DeepCopyInto(out *EndpointSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSpec. +func (in *EndpointSpec) DeepCopy() *EndpointSpec { + if in == nil { + return nil + } + out := new(EndpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointStatus) DeepCopyInto(out *EndpointStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointStatus. +func (in *EndpointStatus) DeepCopy() *EndpointStatus { + if in == nil { + return nil + } + out := new(EndpointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaSettingsInitParameters) DeepCopyInto(out *KafkaSettingsInitParameters) { + *out = *in + if in.Broker != nil { + in, out := &in.Broker, &out.Broker + *out = new(string) + **out = **in + } + if in.IncludeControlDetails != nil { + in, out := &in.IncludeControlDetails, &out.IncludeControlDetails + *out = new(bool) + **out = **in + } + if in.IncludeNullAndEmpty != nil { + in, out := &in.IncludeNullAndEmpty, &out.IncludeNullAndEmpty + *out = new(bool) + **out = **in + } + if in.IncludePartitionValue != nil { + in, out := &in.IncludePartitionValue, &out.IncludePartitionValue + *out = new(bool) + **out = **in + } + if in.IncludeTableAlterOperations != nil { + in, out := &in.IncludeTableAlterOperations, &out.IncludeTableAlterOperations + *out = new(bool) + **out = **in + } + if in.IncludeTransactionDetails != nil { + in, out := &in.IncludeTransactionDetails, &out.IncludeTransactionDetails + *out = new(bool) + **out = **in + } + if in.MessageFormat != nil { + in, out := &in.MessageFormat, &out.MessageFormat + *out = new(string) + **out = **in + } + if in.MessageMaxBytes != nil { + in, out := &in.MessageMaxBytes, &out.MessageMaxBytes + *out = new(float64) + **out = **in + } + if in.NoHexPrefix != nil { + in, out := &in.NoHexPrefix, &out.NoHexPrefix + *out = new(bool) + **out = **in + } + if in.PartitionIncludeSchemaTable != nil { + in, out := &in.PartitionIncludeSchemaTable, &out.PartitionIncludeSchemaTable + *out = new(bool) + **out = **in + } + if in.SSLCACertificateArn != nil { + in, out := &in.SSLCACertificateArn, &out.SSLCACertificateArn + *out = new(string) + **out = **in + } + if in.SSLClientCertificateArn != nil { + in, out := &in.SSLClientCertificateArn, &out.SSLClientCertificateArn + *out = new(string) + **out = **in + } + if in.SSLClientKeyArn != nil { + in, out := &in.SSLClientKeyArn, &out.SSLClientKeyArn + *out = new(string) + **out = **in + } + if in.SSLClientKeyPasswordSecretRef != nil { + in, out := &in.SSLClientKeyPasswordSecretRef, &out.SSLClientKeyPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SaslPasswordSecretRef != nil { + in, out := &in.SaslPasswordSecretRef, &out.SaslPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SaslUsername != nil { + in, out := &in.SaslUsername, &out.SaslUsername + *out = new(string) + **out = **in + } + if in.SecurityProtocol != nil { + in, out := &in.SecurityProtocol, &out.SecurityProtocol + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSettingsInitParameters. +func (in *KafkaSettingsInitParameters) DeepCopy() *KafkaSettingsInitParameters { + if in == nil { + return nil + } + out := new(KafkaSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaSettingsObservation) DeepCopyInto(out *KafkaSettingsObservation) { + *out = *in + if in.Broker != nil { + in, out := &in.Broker, &out.Broker + *out = new(string) + **out = **in + } + if in.IncludeControlDetails != nil { + in, out := &in.IncludeControlDetails, &out.IncludeControlDetails + *out = new(bool) + **out = **in + } + if in.IncludeNullAndEmpty != nil { + in, out := &in.IncludeNullAndEmpty, &out.IncludeNullAndEmpty + *out = new(bool) + **out = **in + } + if in.IncludePartitionValue != nil { + in, out := &in.IncludePartitionValue, &out.IncludePartitionValue + *out = new(bool) + **out = **in + } + if in.IncludeTableAlterOperations != nil { + in, out := &in.IncludeTableAlterOperations, &out.IncludeTableAlterOperations + *out = new(bool) + **out = **in + } + if in.IncludeTransactionDetails != nil { + in, out := &in.IncludeTransactionDetails, &out.IncludeTransactionDetails + *out = new(bool) + **out = **in + } + if in.MessageFormat != nil { + in, out := &in.MessageFormat, &out.MessageFormat + *out = new(string) + **out = **in + } + if in.MessageMaxBytes != nil { + in, out := &in.MessageMaxBytes, &out.MessageMaxBytes + *out = new(float64) + **out = **in + } + if in.NoHexPrefix != nil { + in, out := &in.NoHexPrefix, &out.NoHexPrefix + *out = new(bool) + **out = **in + } + if in.PartitionIncludeSchemaTable != nil { + in, out := &in.PartitionIncludeSchemaTable, &out.PartitionIncludeSchemaTable + *out = new(bool) + **out = **in + } + if in.SSLCACertificateArn != nil { + in, out := &in.SSLCACertificateArn, &out.SSLCACertificateArn + *out = new(string) + **out = **in + } + if in.SSLClientCertificateArn != nil { + in, out := &in.SSLClientCertificateArn, &out.SSLClientCertificateArn + *out = new(string) + **out = **in + } + if in.SSLClientKeyArn != nil { + in, out := &in.SSLClientKeyArn, &out.SSLClientKeyArn + *out = new(string) + **out = **in + } + if in.SaslUsername != nil { + in, out := &in.SaslUsername, &out.SaslUsername + *out = new(string) + **out = **in + } + if in.SecurityProtocol != nil { + in, out := &in.SecurityProtocol, &out.SecurityProtocol + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSettingsObservation. +func (in *KafkaSettingsObservation) DeepCopy() *KafkaSettingsObservation { + if in == nil { + return nil + } + out := new(KafkaSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaSettingsParameters) DeepCopyInto(out *KafkaSettingsParameters) { + *out = *in + if in.Broker != nil { + in, out := &in.Broker, &out.Broker + *out = new(string) + **out = **in + } + if in.IncludeControlDetails != nil { + in, out := &in.IncludeControlDetails, &out.IncludeControlDetails + *out = new(bool) + **out = **in + } + if in.IncludeNullAndEmpty != nil { + in, out := &in.IncludeNullAndEmpty, &out.IncludeNullAndEmpty + *out = new(bool) + **out = **in + } + if in.IncludePartitionValue != nil { + in, out := &in.IncludePartitionValue, &out.IncludePartitionValue + *out = new(bool) + **out = **in + } + if in.IncludeTableAlterOperations != nil { + in, out := &in.IncludeTableAlterOperations, &out.IncludeTableAlterOperations + *out = new(bool) + **out = **in + } + if in.IncludeTransactionDetails != nil { + in, out := &in.IncludeTransactionDetails, &out.IncludeTransactionDetails + *out = new(bool) + **out = **in + } + if in.MessageFormat != nil { + in, out := &in.MessageFormat, &out.MessageFormat + *out = new(string) + **out = **in + } + if in.MessageMaxBytes != nil { + in, out := &in.MessageMaxBytes, &out.MessageMaxBytes + *out = new(float64) + **out = **in + } + if in.NoHexPrefix != nil { + in, out := &in.NoHexPrefix, &out.NoHexPrefix + *out = new(bool) + **out = **in + } + if in.PartitionIncludeSchemaTable != nil { + in, out := &in.PartitionIncludeSchemaTable, &out.PartitionIncludeSchemaTable + *out = new(bool) + **out = **in + } + if in.SSLCACertificateArn != nil { + in, out := &in.SSLCACertificateArn, &out.SSLCACertificateArn + *out = new(string) + **out = **in + } + if in.SSLClientCertificateArn != nil { + in, out := &in.SSLClientCertificateArn, &out.SSLClientCertificateArn + *out = new(string) + **out = **in + } + if in.SSLClientKeyArn != nil { + in, out := &in.SSLClientKeyArn, &out.SSLClientKeyArn + *out = new(string) + **out = **in + } + if in.SSLClientKeyPasswordSecretRef != nil { + in, out := &in.SSLClientKeyPasswordSecretRef, &out.SSLClientKeyPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SaslPasswordSecretRef != nil { + in, out := &in.SaslPasswordSecretRef, &out.SaslPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SaslUsername != nil { + in, out := &in.SaslUsername, &out.SaslUsername + *out = new(string) + **out = **in + } + if in.SecurityProtocol != nil { + in, out := &in.SecurityProtocol, &out.SecurityProtocol + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSettingsParameters. +func (in *KafkaSettingsParameters) DeepCopy() *KafkaSettingsParameters { + if in == nil { + return nil + } + out := new(KafkaSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisSettingsInitParameters) DeepCopyInto(out *KinesisSettingsInitParameters) { + *out = *in + if in.IncludeControlDetails != nil { + in, out := &in.IncludeControlDetails, &out.IncludeControlDetails + *out = new(bool) + **out = **in + } + if in.IncludeNullAndEmpty != nil { + in, out := &in.IncludeNullAndEmpty, &out.IncludeNullAndEmpty + *out = new(bool) + **out = **in + } + if in.IncludePartitionValue != nil { + in, out := &in.IncludePartitionValue, &out.IncludePartitionValue + *out = new(bool) + **out = **in + } + if in.IncludeTableAlterOperations != nil { + in, out := &in.IncludeTableAlterOperations, &out.IncludeTableAlterOperations + *out = new(bool) + **out = **in + } + if in.IncludeTransactionDetails != nil { + in, out := &in.IncludeTransactionDetails, &out.IncludeTransactionDetails + *out = new(bool) + **out = **in + } + if in.MessageFormat != nil { + in, out := &in.MessageFormat, &out.MessageFormat + *out = new(string) + **out = **in + } + if in.PartitionIncludeSchemaTable != nil { + in, out := &in.PartitionIncludeSchemaTable, &out.PartitionIncludeSchemaTable + *out = new(bool) + **out = **in + } + if in.ServiceAccessRoleArn != nil { + in, out := &in.ServiceAccessRoleArn, &out.ServiceAccessRoleArn + *out = new(string) + **out = **in + } + if in.StreamArn != nil { + in, out := &in.StreamArn, &out.StreamArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisSettingsInitParameters. +func (in *KinesisSettingsInitParameters) DeepCopy() *KinesisSettingsInitParameters { + if in == nil { + return nil + } + out := new(KinesisSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisSettingsObservation) DeepCopyInto(out *KinesisSettingsObservation) { + *out = *in + if in.IncludeControlDetails != nil { + in, out := &in.IncludeControlDetails, &out.IncludeControlDetails + *out = new(bool) + **out = **in + } + if in.IncludeNullAndEmpty != nil { + in, out := &in.IncludeNullAndEmpty, &out.IncludeNullAndEmpty + *out = new(bool) + **out = **in + } + if in.IncludePartitionValue != nil { + in, out := &in.IncludePartitionValue, &out.IncludePartitionValue + *out = new(bool) + **out = **in + } + if in.IncludeTableAlterOperations != nil { + in, out := &in.IncludeTableAlterOperations, &out.IncludeTableAlterOperations + *out = new(bool) + **out = **in + } + if in.IncludeTransactionDetails != nil { + in, out := &in.IncludeTransactionDetails, &out.IncludeTransactionDetails + *out = new(bool) + **out = **in + } + if in.MessageFormat != nil { + in, out := &in.MessageFormat, &out.MessageFormat + *out = new(string) + **out = **in + } + if in.PartitionIncludeSchemaTable != nil { + in, out := &in.PartitionIncludeSchemaTable, &out.PartitionIncludeSchemaTable + *out = new(bool) + **out = **in + } + if in.ServiceAccessRoleArn != nil { + in, out := &in.ServiceAccessRoleArn, &out.ServiceAccessRoleArn + *out = new(string) + **out = **in + } + if in.StreamArn != nil { + in, out := &in.StreamArn, &out.StreamArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisSettingsObservation. +func (in *KinesisSettingsObservation) DeepCopy() *KinesisSettingsObservation { + if in == nil { + return nil + } + out := new(KinesisSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisSettingsParameters) DeepCopyInto(out *KinesisSettingsParameters) { + *out = *in + if in.IncludeControlDetails != nil { + in, out := &in.IncludeControlDetails, &out.IncludeControlDetails + *out = new(bool) + **out = **in + } + if in.IncludeNullAndEmpty != nil { + in, out := &in.IncludeNullAndEmpty, &out.IncludeNullAndEmpty + *out = new(bool) + **out = **in + } + if in.IncludePartitionValue != nil { + in, out := &in.IncludePartitionValue, &out.IncludePartitionValue + *out = new(bool) + **out = **in + } + if in.IncludeTableAlterOperations != nil { + in, out := &in.IncludeTableAlterOperations, &out.IncludeTableAlterOperations + *out = new(bool) + **out = **in + } + if in.IncludeTransactionDetails != nil { + in, out := &in.IncludeTransactionDetails, &out.IncludeTransactionDetails + *out = new(bool) + **out = **in + } + if in.MessageFormat != nil { + in, out := &in.MessageFormat, &out.MessageFormat + *out = new(string) + **out = **in + } + if in.PartitionIncludeSchemaTable != nil { + in, out := &in.PartitionIncludeSchemaTable, &out.PartitionIncludeSchemaTable + *out = new(bool) + **out = **in + } + if in.ServiceAccessRoleArn != nil { + in, out := &in.ServiceAccessRoleArn, &out.ServiceAccessRoleArn + *out = new(string) + **out = **in + } + if in.StreamArn != nil { + in, out := &in.StreamArn, &out.StreamArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisSettingsParameters. +func (in *KinesisSettingsParameters) DeepCopy() *KinesisSettingsParameters { + if in == nil { + return nil + } + out := new(KinesisSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbSettingsInitParameters) DeepCopyInto(out *MongodbSettingsInitParameters) { + *out = *in + if in.AuthMechanism != nil { + in, out := &in.AuthMechanism, &out.AuthMechanism + *out = new(string) + **out = **in + } + if in.AuthSource != nil { + in, out := &in.AuthSource, &out.AuthSource + *out = new(string) + **out = **in + } + if in.AuthType != nil { + in, out := &in.AuthType, &out.AuthType + *out = new(string) + **out = **in + } + if in.DocsToInvestigate != nil { + in, out := &in.DocsToInvestigate, &out.DocsToInvestigate + *out = new(string) + **out = **in + } + if in.ExtractDocID != nil { + in, out := &in.ExtractDocID, &out.ExtractDocID + *out = new(string) + **out = **in + } + if in.NestingLevel != nil { + in, out := &in.NestingLevel, &out.NestingLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbSettingsInitParameters. +func (in *MongodbSettingsInitParameters) DeepCopy() *MongodbSettingsInitParameters { + if in == nil { + return nil + } + out := new(MongodbSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbSettingsObservation) DeepCopyInto(out *MongodbSettingsObservation) { + *out = *in + if in.AuthMechanism != nil { + in, out := &in.AuthMechanism, &out.AuthMechanism + *out = new(string) + **out = **in + } + if in.AuthSource != nil { + in, out := &in.AuthSource, &out.AuthSource + *out = new(string) + **out = **in + } + if in.AuthType != nil { + in, out := &in.AuthType, &out.AuthType + *out = new(string) + **out = **in + } + if in.DocsToInvestigate != nil { + in, out := &in.DocsToInvestigate, &out.DocsToInvestigate + *out = new(string) + **out = **in + } + if in.ExtractDocID != nil { + in, out := &in.ExtractDocID, &out.ExtractDocID + *out = new(string) + **out = **in + } + if in.NestingLevel != nil { + in, out := &in.NestingLevel, &out.NestingLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbSettingsObservation. +func (in *MongodbSettingsObservation) DeepCopy() *MongodbSettingsObservation { + if in == nil { + return nil + } + out := new(MongodbSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbSettingsParameters) DeepCopyInto(out *MongodbSettingsParameters) { + *out = *in + if in.AuthMechanism != nil { + in, out := &in.AuthMechanism, &out.AuthMechanism + *out = new(string) + **out = **in + } + if in.AuthSource != nil { + in, out := &in.AuthSource, &out.AuthSource + *out = new(string) + **out = **in + } + if in.AuthType != nil { + in, out := &in.AuthType, &out.AuthType + *out = new(string) + **out = **in + } + if in.DocsToInvestigate != nil { + in, out := &in.DocsToInvestigate, &out.DocsToInvestigate + *out = new(string) + **out = **in + } + if in.ExtractDocID != nil { + in, out := &in.ExtractDocID, &out.ExtractDocID + *out = new(string) + **out = **in + } + if in.NestingLevel != nil { + in, out := &in.NestingLevel, &out.NestingLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbSettingsParameters. +func (in *MongodbSettingsParameters) DeepCopy() *MongodbSettingsParameters { + if in == nil { + return nil + } + out := new(MongodbSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSettingsInitParameters) DeepCopyInto(out *PostgresSettingsInitParameters) { + *out = *in + if in.AfterConnectScript != nil { + in, out := &in.AfterConnectScript, &out.AfterConnectScript + *out = new(string) + **out = **in + } + if in.BabelfishDatabaseName != nil { + in, out := &in.BabelfishDatabaseName, &out.BabelfishDatabaseName + *out = new(string) + **out = **in + } + if in.CaptureDdls != nil { + in, out := &in.CaptureDdls, &out.CaptureDdls + *out = new(bool) + **out = **in + } + if in.DatabaseMode != nil { + in, out := &in.DatabaseMode, &out.DatabaseMode + *out = new(string) + **out = **in + } + if in.DdlArtifactsSchema != nil { + in, out := &in.DdlArtifactsSchema, &out.DdlArtifactsSchema + *out = new(string) + **out = **in + } + if in.ExecuteTimeout != nil { + in, out := &in.ExecuteTimeout, &out.ExecuteTimeout + *out = new(float64) + **out = **in + } + if in.FailTasksOnLobTruncation != nil { + in, out := &in.FailTasksOnLobTruncation, &out.FailTasksOnLobTruncation + *out = new(bool) + **out = **in + } + if in.HeartbeatEnable != nil { + in, out := &in.HeartbeatEnable, &out.HeartbeatEnable + *out = new(bool) + **out = **in + } + if in.HeartbeatFrequency != nil { + in, out := &in.HeartbeatFrequency, &out.HeartbeatFrequency + *out = new(float64) + **out = **in + } + if in.HeartbeatSchema != nil { + in, out := &in.HeartbeatSchema, &out.HeartbeatSchema + *out = new(string) + **out = **in + } + if in.MapBooleanAsBoolean != nil { + in, out := &in.MapBooleanAsBoolean, &out.MapBooleanAsBoolean + *out = new(bool) + **out = **in + } + if in.MapJsonbAsClob != nil { + in, out := &in.MapJsonbAsClob, &out.MapJsonbAsClob + *out = new(bool) + **out = **in + } + if in.MapLongVarcharAs != nil { + in, out := &in.MapLongVarcharAs, &out.MapLongVarcharAs + *out = new(string) + **out = **in + } + if in.MaxFileSize != nil { + in, out := &in.MaxFileSize, &out.MaxFileSize + *out = new(float64) + **out = **in + } + if in.PluginName != nil { + in, out := &in.PluginName, &out.PluginName + *out = new(string) + **out = **in + } + if in.SlotName != nil { + in, out := &in.SlotName, &out.SlotName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSettingsInitParameters. +func (in *PostgresSettingsInitParameters) DeepCopy() *PostgresSettingsInitParameters { + if in == nil { + return nil + } + out := new(PostgresSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSettingsObservation) DeepCopyInto(out *PostgresSettingsObservation) { + *out = *in + if in.AfterConnectScript != nil { + in, out := &in.AfterConnectScript, &out.AfterConnectScript + *out = new(string) + **out = **in + } + if in.BabelfishDatabaseName != nil { + in, out := &in.BabelfishDatabaseName, &out.BabelfishDatabaseName + *out = new(string) + **out = **in + } + if in.CaptureDdls != nil { + in, out := &in.CaptureDdls, &out.CaptureDdls + *out = new(bool) + **out = **in + } + if in.DatabaseMode != nil { + in, out := &in.DatabaseMode, &out.DatabaseMode + *out = new(string) + **out = **in + } + if in.DdlArtifactsSchema != nil { + in, out := &in.DdlArtifactsSchema, &out.DdlArtifactsSchema + *out = new(string) + **out = **in + } + if in.ExecuteTimeout != nil { + in, out := &in.ExecuteTimeout, &out.ExecuteTimeout + *out = new(float64) + **out = **in + } + if in.FailTasksOnLobTruncation != nil { + in, out := &in.FailTasksOnLobTruncation, &out.FailTasksOnLobTruncation + *out = new(bool) + **out = **in + } + if in.HeartbeatEnable != nil { + in, out := &in.HeartbeatEnable, &out.HeartbeatEnable + *out = new(bool) + **out = **in + } + if in.HeartbeatFrequency != nil { + in, out := &in.HeartbeatFrequency, &out.HeartbeatFrequency + *out = new(float64) + **out = **in + } + if in.HeartbeatSchema != nil { + in, out := &in.HeartbeatSchema, &out.HeartbeatSchema + *out = new(string) + **out = **in + } + if in.MapBooleanAsBoolean != nil { + in, out := &in.MapBooleanAsBoolean, &out.MapBooleanAsBoolean + *out = new(bool) + **out = **in + } + if in.MapJsonbAsClob != nil { + in, out := &in.MapJsonbAsClob, &out.MapJsonbAsClob + *out = new(bool) + **out = **in + } + if in.MapLongVarcharAs != nil { + in, out := &in.MapLongVarcharAs, &out.MapLongVarcharAs + *out = new(string) + **out = **in + } + if in.MaxFileSize != nil { + in, out := &in.MaxFileSize, &out.MaxFileSize + *out = new(float64) + **out = **in + } + if in.PluginName != nil { + in, out := &in.PluginName, &out.PluginName + *out = new(string) + **out = **in + } + if in.SlotName != nil { + in, out := &in.SlotName, &out.SlotName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSettingsObservation. +func (in *PostgresSettingsObservation) DeepCopy() *PostgresSettingsObservation { + if in == nil { + return nil + } + out := new(PostgresSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSettingsParameters) DeepCopyInto(out *PostgresSettingsParameters) { + *out = *in + if in.AfterConnectScript != nil { + in, out := &in.AfterConnectScript, &out.AfterConnectScript + *out = new(string) + **out = **in + } + if in.BabelfishDatabaseName != nil { + in, out := &in.BabelfishDatabaseName, &out.BabelfishDatabaseName + *out = new(string) + **out = **in + } + if in.CaptureDdls != nil { + in, out := &in.CaptureDdls, &out.CaptureDdls + *out = new(bool) + **out = **in + } + if in.DatabaseMode != nil { + in, out := &in.DatabaseMode, &out.DatabaseMode + *out = new(string) + **out = **in + } + if in.DdlArtifactsSchema != nil { + in, out := &in.DdlArtifactsSchema, &out.DdlArtifactsSchema + *out = new(string) + **out = **in + } + if in.ExecuteTimeout != nil { + in, out := &in.ExecuteTimeout, &out.ExecuteTimeout + *out = new(float64) + **out = **in + } + if in.FailTasksOnLobTruncation != nil { + in, out := &in.FailTasksOnLobTruncation, &out.FailTasksOnLobTruncation + *out = new(bool) + **out = **in + } + if in.HeartbeatEnable != nil { + in, out := &in.HeartbeatEnable, &out.HeartbeatEnable + *out = new(bool) + **out = **in + } + if in.HeartbeatFrequency != nil { + in, out := &in.HeartbeatFrequency, &out.HeartbeatFrequency + *out = new(float64) + **out = **in + } + if in.HeartbeatSchema != nil { + in, out := &in.HeartbeatSchema, &out.HeartbeatSchema + *out = new(string) + **out = **in + } + if in.MapBooleanAsBoolean != nil { + in, out := &in.MapBooleanAsBoolean, &out.MapBooleanAsBoolean + *out = new(bool) + **out = **in + } + if in.MapJsonbAsClob != nil { + in, out := &in.MapJsonbAsClob, &out.MapJsonbAsClob + *out = new(bool) + **out = **in + } + if in.MapLongVarcharAs != nil { + in, out := &in.MapLongVarcharAs, &out.MapLongVarcharAs + *out = new(string) + **out = **in + } + if in.MaxFileSize != nil { + in, out := &in.MaxFileSize, &out.MaxFileSize + *out = new(float64) + **out = **in + } + if in.PluginName != nil { + in, out := &in.PluginName, &out.PluginName + *out = new(string) + **out = **in + } + if in.SlotName != nil { + in, out := &in.SlotName, &out.SlotName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSettingsParameters. +func (in *PostgresSettingsParameters) DeepCopy() *PostgresSettingsParameters { + if in == nil { + return nil + } + out := new(PostgresSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisSettingsInitParameters) DeepCopyInto(out *RedisSettingsInitParameters) { + *out = *in + if in.AuthPasswordSecretRef != nil { + in, out := &in.AuthPasswordSecretRef, &out.AuthPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AuthType != nil { + in, out := &in.AuthType, &out.AuthType + *out = new(string) + **out = **in + } + if in.AuthUserName != nil { + in, out := &in.AuthUserName, &out.AuthUserName + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SSLCACertificateArn != nil { + in, out := &in.SSLCACertificateArn, &out.SSLCACertificateArn + *out = new(string) + **out = **in + } + if in.SSLSecurityProtocol != nil { + in, out := &in.SSLSecurityProtocol, &out.SSLSecurityProtocol + *out = new(string) + **out = **in + } + if in.ServerName != nil { + in, out := &in.ServerName, &out.ServerName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisSettingsInitParameters. +func (in *RedisSettingsInitParameters) DeepCopy() *RedisSettingsInitParameters { + if in == nil { + return nil + } + out := new(RedisSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisSettingsObservation) DeepCopyInto(out *RedisSettingsObservation) { + *out = *in + if in.AuthType != nil { + in, out := &in.AuthType, &out.AuthType + *out = new(string) + **out = **in + } + if in.AuthUserName != nil { + in, out := &in.AuthUserName, &out.AuthUserName + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SSLCACertificateArn != nil { + in, out := &in.SSLCACertificateArn, &out.SSLCACertificateArn + *out = new(string) + **out = **in + } + if in.SSLSecurityProtocol != nil { + in, out := &in.SSLSecurityProtocol, &out.SSLSecurityProtocol + *out = new(string) + **out = **in + } + if in.ServerName != nil { + in, out := &in.ServerName, &out.ServerName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisSettingsObservation. +func (in *RedisSettingsObservation) DeepCopy() *RedisSettingsObservation { + if in == nil { + return nil + } + out := new(RedisSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisSettingsParameters) DeepCopyInto(out *RedisSettingsParameters) { + *out = *in + if in.AuthPasswordSecretRef != nil { + in, out := &in.AuthPasswordSecretRef, &out.AuthPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AuthType != nil { + in, out := &in.AuthType, &out.AuthType + *out = new(string) + **out = **in + } + if in.AuthUserName != nil { + in, out := &in.AuthUserName, &out.AuthUserName + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SSLCACertificateArn != nil { + in, out := &in.SSLCACertificateArn, &out.SSLCACertificateArn + *out = new(string) + **out = **in + } + if in.SSLSecurityProtocol != nil { + in, out := &in.SSLSecurityProtocol, &out.SSLSecurityProtocol + *out = new(string) + **out = **in + } + if in.ServerName != nil { + in, out := &in.ServerName, &out.ServerName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisSettingsParameters. +func (in *RedisSettingsParameters) DeepCopy() *RedisSettingsParameters { + if in == nil { + return nil + } + out := new(RedisSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftSettingsInitParameters) DeepCopyInto(out *RedshiftSettingsInitParameters) { + *out = *in + if in.BucketFolder != nil { + in, out := &in.BucketFolder, &out.BucketFolder + *out = new(string) + **out = **in + } + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.EncryptionMode != nil { + in, out := &in.EncryptionMode, &out.EncryptionMode + *out = new(string) + **out = **in + } + if in.ServerSideEncryptionKMSKeyID != nil { + in, out := &in.ServerSideEncryptionKMSKeyID, &out.ServerSideEncryptionKMSKeyID + *out = new(string) + **out = **in + } + if in.ServiceAccessRoleArn != nil { + in, out := &in.ServiceAccessRoleArn, &out.ServiceAccessRoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftSettingsInitParameters. +func (in *RedshiftSettingsInitParameters) DeepCopy() *RedshiftSettingsInitParameters { + if in == nil { + return nil + } + out := new(RedshiftSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftSettingsObservation) DeepCopyInto(out *RedshiftSettingsObservation) { + *out = *in + if in.BucketFolder != nil { + in, out := &in.BucketFolder, &out.BucketFolder + *out = new(string) + **out = **in + } + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.EncryptionMode != nil { + in, out := &in.EncryptionMode, &out.EncryptionMode + *out = new(string) + **out = **in + } + if in.ServerSideEncryptionKMSKeyID != nil { + in, out := &in.ServerSideEncryptionKMSKeyID, &out.ServerSideEncryptionKMSKeyID + *out = new(string) + **out = **in + } + if in.ServiceAccessRoleArn != nil { + in, out := &in.ServiceAccessRoleArn, &out.ServiceAccessRoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftSettingsObservation. +func (in *RedshiftSettingsObservation) DeepCopy() *RedshiftSettingsObservation { + if in == nil { + return nil + } + out := new(RedshiftSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftSettingsParameters) DeepCopyInto(out *RedshiftSettingsParameters) { + *out = *in + if in.BucketFolder != nil { + in, out := &in.BucketFolder, &out.BucketFolder + *out = new(string) + **out = **in + } + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.EncryptionMode != nil { + in, out := &in.EncryptionMode, &out.EncryptionMode + *out = new(string) + **out = **in + } + if in.ServerSideEncryptionKMSKeyID != nil { + in, out := &in.ServerSideEncryptionKMSKeyID, &out.ServerSideEncryptionKMSKeyID + *out = new(string) + **out = **in + } + if in.ServiceAccessRoleArn != nil { + in, out := &in.ServiceAccessRoleArn, &out.ServiceAccessRoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftSettingsParameters. +func (in *RedshiftSettingsParameters) DeepCopy() *RedshiftSettingsParameters { + if in == nil { + return nil + } + out := new(RedshiftSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3SettingsInitParameters) DeepCopyInto(out *S3SettingsInitParameters) { + *out = *in + if in.AddColumnName != nil { + in, out := &in.AddColumnName, &out.AddColumnName + *out = new(bool) + **out = **in + } + if in.BucketFolder != nil { + in, out := &in.BucketFolder, &out.BucketFolder + *out = new(string) + **out = **in + } + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.CannedACLForObjects != nil { + in, out := &in.CannedACLForObjects, &out.CannedACLForObjects + *out = new(string) + **out = **in + } + if in.CdcInsertsAndUpdates != nil { + in, out := &in.CdcInsertsAndUpdates, &out.CdcInsertsAndUpdates + *out = new(bool) + **out = **in + } + if in.CdcInsertsOnly != nil { + in, out := &in.CdcInsertsOnly, &out.CdcInsertsOnly + *out = new(bool) + **out = **in + } + if in.CdcMaxBatchInterval != nil { + in, out := &in.CdcMaxBatchInterval, &out.CdcMaxBatchInterval + *out = new(float64) + **out = **in + } + if in.CdcMinFileSize != nil { + in, out := &in.CdcMinFileSize, &out.CdcMinFileSize + *out = new(float64) + **out = **in + } + if in.CdcPath != nil { + in, out := &in.CdcPath, &out.CdcPath + *out = new(string) + **out = **in + } + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.CsvDelimiter != nil { + in, out := &in.CsvDelimiter, &out.CsvDelimiter + *out = new(string) + **out = **in + } + if in.CsvNoSupValue != nil { + in, out := &in.CsvNoSupValue, &out.CsvNoSupValue + *out = new(string) + **out = **in + } + if in.CsvNullValue != nil { + in, out := &in.CsvNullValue, &out.CsvNullValue + *out = new(string) + **out = **in + } + if in.CsvRowDelimiter != nil { + in, out := &in.CsvRowDelimiter, &out.CsvRowDelimiter + *out = new(string) + **out = **in + } + if in.DataFormat != nil { + in, out := &in.DataFormat, &out.DataFormat + *out = new(string) + **out = **in + } + if in.DataPageSize != nil { + in, out := &in.DataPageSize, &out.DataPageSize + *out = new(float64) + **out = **in + } + if in.DatePartitionDelimiter != nil { + in, out := &in.DatePartitionDelimiter, &out.DatePartitionDelimiter + *out = new(string) + **out = **in + } + if in.DatePartitionEnabled != nil { + in, out := &in.DatePartitionEnabled, &out.DatePartitionEnabled + *out = new(bool) + **out = **in + } + if in.DatePartitionSequence != nil { + in, out := &in.DatePartitionSequence, &out.DatePartitionSequence + *out = new(string) + **out = **in + } + if in.DictPageSizeLimit != nil { + in, out := &in.DictPageSizeLimit, &out.DictPageSizeLimit + *out = new(float64) + **out = **in + } + if in.EnableStatistics != nil { + in, out := &in.EnableStatistics, &out.EnableStatistics + *out = new(bool) + **out = **in + } + if in.EncodingType != nil { + in, out := &in.EncodingType, &out.EncodingType + *out = new(string) + **out = **in + } + if in.EncryptionMode != nil { + in, out := &in.EncryptionMode, &out.EncryptionMode + *out = new(string) + **out = **in + } + if in.ExternalTableDefinition != nil { + in, out := &in.ExternalTableDefinition, &out.ExternalTableDefinition + *out = new(string) + **out = **in + } + if in.GlueCatalogGeneration != nil { + in, out := &in.GlueCatalogGeneration, &out.GlueCatalogGeneration + *out = new(bool) + **out = **in + } + if in.IgnoreHeaderRows != nil { + in, out := &in.IgnoreHeaderRows, &out.IgnoreHeaderRows + *out = new(float64) + **out = **in + } + if in.IncludeOpForFullLoad != nil { + in, out := &in.IncludeOpForFullLoad, &out.IncludeOpForFullLoad + *out = new(bool) + **out = **in + } + if in.MaxFileSize != nil { + in, out := &in.MaxFileSize, &out.MaxFileSize + *out = new(float64) + **out = **in + } + if in.ParquetTimestampInMillisecond != nil { + in, out := &in.ParquetTimestampInMillisecond, &out.ParquetTimestampInMillisecond + *out = new(bool) + **out = **in + } + if in.ParquetVersion != nil { + in, out := &in.ParquetVersion, &out.ParquetVersion + *out = new(string) + **out = **in + } + if in.PreserveTransactions != nil { + in, out := &in.PreserveTransactions, &out.PreserveTransactions + *out = new(bool) + **out = **in + } + if in.Rfc4180 != nil { + in, out := &in.Rfc4180, &out.Rfc4180 + *out = new(bool) + **out = **in + } + if in.RowGroupLength != nil { + in, out := &in.RowGroupLength, &out.RowGroupLength + *out = new(float64) + **out = **in + } + if in.ServerSideEncryptionKMSKeyID != nil { + in, out := &in.ServerSideEncryptionKMSKeyID, &out.ServerSideEncryptionKMSKeyID + *out = new(string) + **out = **in + } + if in.ServiceAccessRoleArn != nil { + in, out := &in.ServiceAccessRoleArn, &out.ServiceAccessRoleArn + *out = new(string) + **out = **in + } + if in.TimestampColumnName != nil { + in, out := &in.TimestampColumnName, &out.TimestampColumnName + *out = new(string) + **out = **in + } + if in.UseCsvNoSupValue != nil { + in, out := &in.UseCsvNoSupValue, &out.UseCsvNoSupValue + *out = new(bool) + **out = **in + } + if in.UseTaskStartTimeForFullLoadTimestamp != nil { + in, out := &in.UseTaskStartTimeForFullLoadTimestamp, &out.UseTaskStartTimeForFullLoadTimestamp + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3SettingsInitParameters. +func (in *S3SettingsInitParameters) DeepCopy() *S3SettingsInitParameters { + if in == nil { + return nil + } + out := new(S3SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3SettingsObservation) DeepCopyInto(out *S3SettingsObservation) { + *out = *in + if in.AddColumnName != nil { + in, out := &in.AddColumnName, &out.AddColumnName + *out = new(bool) + **out = **in + } + if in.BucketFolder != nil { + in, out := &in.BucketFolder, &out.BucketFolder + *out = new(string) + **out = **in + } + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.CannedACLForObjects != nil { + in, out := &in.CannedACLForObjects, &out.CannedACLForObjects + *out = new(string) + **out = **in + } + if in.CdcInsertsAndUpdates != nil { + in, out := &in.CdcInsertsAndUpdates, &out.CdcInsertsAndUpdates + *out = new(bool) + **out = **in + } + if in.CdcInsertsOnly != nil { + in, out := &in.CdcInsertsOnly, &out.CdcInsertsOnly + *out = new(bool) + **out = **in + } + if in.CdcMaxBatchInterval != nil { + in, out := &in.CdcMaxBatchInterval, &out.CdcMaxBatchInterval + *out = new(float64) + **out = **in + } + if in.CdcMinFileSize != nil { + in, out := &in.CdcMinFileSize, &out.CdcMinFileSize + *out = new(float64) + **out = **in + } + if in.CdcPath != nil { + in, out := &in.CdcPath, &out.CdcPath + *out = new(string) + **out = **in + } + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.CsvDelimiter != nil { + in, out := &in.CsvDelimiter, &out.CsvDelimiter + *out = new(string) + **out = **in + } + if in.CsvNoSupValue != nil { + in, out := &in.CsvNoSupValue, &out.CsvNoSupValue + *out = new(string) + **out = **in + } + if in.CsvNullValue != nil { + in, out := &in.CsvNullValue, &out.CsvNullValue + *out = new(string) + **out = **in + } + if in.CsvRowDelimiter != nil { + in, out := &in.CsvRowDelimiter, &out.CsvRowDelimiter + *out = new(string) + **out = **in + } + if in.DataFormat != nil { + in, out := &in.DataFormat, &out.DataFormat + *out = new(string) + **out = **in + } + if in.DataPageSize != nil { + in, out := &in.DataPageSize, &out.DataPageSize + *out = new(float64) + **out = **in + } + if in.DatePartitionDelimiter != nil { + in, out := &in.DatePartitionDelimiter, &out.DatePartitionDelimiter + *out = new(string) + **out = **in + } + if in.DatePartitionEnabled != nil { + in, out := &in.DatePartitionEnabled, &out.DatePartitionEnabled + *out = new(bool) + **out = **in + } + if in.DatePartitionSequence != nil { + in, out := &in.DatePartitionSequence, &out.DatePartitionSequence + *out = new(string) + **out = **in + } + if in.DictPageSizeLimit != nil { + in, out := &in.DictPageSizeLimit, &out.DictPageSizeLimit + *out = new(float64) + **out = **in + } + if in.EnableStatistics != nil { + in, out := &in.EnableStatistics, &out.EnableStatistics + *out = new(bool) + **out = **in + } + if in.EncodingType != nil { + in, out := &in.EncodingType, &out.EncodingType + *out = new(string) + **out = **in + } + if in.EncryptionMode != nil { + in, out := &in.EncryptionMode, &out.EncryptionMode + *out = new(string) + **out = **in + } + if in.ExternalTableDefinition != nil { + in, out := &in.ExternalTableDefinition, &out.ExternalTableDefinition + *out = new(string) + **out = **in + } + if in.GlueCatalogGeneration != nil { + in, out := &in.GlueCatalogGeneration, &out.GlueCatalogGeneration + *out = new(bool) + **out = **in + } + if in.IgnoreHeaderRows != nil { + in, out := &in.IgnoreHeaderRows, &out.IgnoreHeaderRows + *out = new(float64) + **out = **in + } + if in.IncludeOpForFullLoad != nil { + in, out := &in.IncludeOpForFullLoad, &out.IncludeOpForFullLoad + *out = new(bool) + **out = **in + } + if in.MaxFileSize != nil { + in, out := &in.MaxFileSize, &out.MaxFileSize + *out = new(float64) + **out = **in + } + if in.ParquetTimestampInMillisecond != nil { + in, out := &in.ParquetTimestampInMillisecond, &out.ParquetTimestampInMillisecond + *out = new(bool) + **out = **in + } + if in.ParquetVersion != nil { + in, out := &in.ParquetVersion, &out.ParquetVersion + *out = new(string) + **out = **in + } + if in.PreserveTransactions != nil { + in, out := &in.PreserveTransactions, &out.PreserveTransactions + *out = new(bool) + **out = **in + } + if in.Rfc4180 != nil { + in, out := &in.Rfc4180, &out.Rfc4180 + *out = new(bool) + **out = **in + } + if in.RowGroupLength != nil { + in, out := &in.RowGroupLength, &out.RowGroupLength + *out = new(float64) + **out = **in + } + if in.ServerSideEncryptionKMSKeyID != nil { + in, out := &in.ServerSideEncryptionKMSKeyID, &out.ServerSideEncryptionKMSKeyID + *out = new(string) + **out = **in + } + if in.ServiceAccessRoleArn != nil { + in, out := &in.ServiceAccessRoleArn, &out.ServiceAccessRoleArn + *out = new(string) + **out = **in + } + if in.TimestampColumnName != nil { + in, out := &in.TimestampColumnName, &out.TimestampColumnName + *out = new(string) + **out = **in + } + if in.UseCsvNoSupValue != nil { + in, out := &in.UseCsvNoSupValue, &out.UseCsvNoSupValue + *out = new(bool) + **out = **in + } + if in.UseTaskStartTimeForFullLoadTimestamp != nil { + in, out := &in.UseTaskStartTimeForFullLoadTimestamp, &out.UseTaskStartTimeForFullLoadTimestamp + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3SettingsObservation. +func (in *S3SettingsObservation) DeepCopy() *S3SettingsObservation { + if in == nil { + return nil + } + out := new(S3SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3SettingsParameters) DeepCopyInto(out *S3SettingsParameters) { + *out = *in + if in.AddColumnName != nil { + in, out := &in.AddColumnName, &out.AddColumnName + *out = new(bool) + **out = **in + } + if in.BucketFolder != nil { + in, out := &in.BucketFolder, &out.BucketFolder + *out = new(string) + **out = **in + } + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.CannedACLForObjects != nil { + in, out := &in.CannedACLForObjects, &out.CannedACLForObjects + *out = new(string) + **out = **in + } + if in.CdcInsertsAndUpdates != nil { + in, out := &in.CdcInsertsAndUpdates, &out.CdcInsertsAndUpdates + *out = new(bool) + **out = **in + } + if in.CdcInsertsOnly != nil { + in, out := &in.CdcInsertsOnly, &out.CdcInsertsOnly + *out = new(bool) + **out = **in + } + if in.CdcMaxBatchInterval != nil { + in, out := &in.CdcMaxBatchInterval, &out.CdcMaxBatchInterval + *out = new(float64) + **out = **in + } + if in.CdcMinFileSize != nil { + in, out := &in.CdcMinFileSize, &out.CdcMinFileSize + *out = new(float64) + **out = **in + } + if in.CdcPath != nil { + in, out := &in.CdcPath, &out.CdcPath + *out = new(string) + **out = **in + } + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.CsvDelimiter != nil { + in, out := &in.CsvDelimiter, &out.CsvDelimiter + *out = new(string) + **out = **in + } + if in.CsvNoSupValue != nil { + in, out := &in.CsvNoSupValue, &out.CsvNoSupValue + *out = new(string) + **out = **in + } + if in.CsvNullValue != nil { + in, out := &in.CsvNullValue, &out.CsvNullValue + *out = new(string) + **out = **in + } + if in.CsvRowDelimiter != nil { + in, out := &in.CsvRowDelimiter, &out.CsvRowDelimiter + *out = new(string) + **out = **in + } + if in.DataFormat != nil { + in, out := &in.DataFormat, &out.DataFormat + *out = new(string) + **out = **in + } + if in.DataPageSize != nil { + in, out := &in.DataPageSize, &out.DataPageSize + *out = new(float64) + **out = **in + } + if in.DatePartitionDelimiter != nil { + in, out := &in.DatePartitionDelimiter, &out.DatePartitionDelimiter + *out = new(string) + **out = **in + } + if in.DatePartitionEnabled != nil { + in, out := &in.DatePartitionEnabled, &out.DatePartitionEnabled + *out = new(bool) + **out = **in + } + if in.DatePartitionSequence != nil { + in, out := &in.DatePartitionSequence, &out.DatePartitionSequence + *out = new(string) + **out = **in + } + if in.DictPageSizeLimit != nil { + in, out := &in.DictPageSizeLimit, &out.DictPageSizeLimit + *out = new(float64) + **out = **in + } + if in.EnableStatistics != nil { + in, out := &in.EnableStatistics, &out.EnableStatistics + *out = new(bool) + **out = **in + } + if in.EncodingType != nil { + in, out := &in.EncodingType, &out.EncodingType + *out = new(string) + **out = **in + } + if in.EncryptionMode != nil { + in, out := &in.EncryptionMode, &out.EncryptionMode + *out = new(string) + **out = **in + } + if in.ExternalTableDefinition != nil { + in, out := &in.ExternalTableDefinition, &out.ExternalTableDefinition + *out = new(string) + **out = **in + } + if in.GlueCatalogGeneration != nil { + in, out := &in.GlueCatalogGeneration, &out.GlueCatalogGeneration + *out = new(bool) + **out = **in + } + if in.IgnoreHeaderRows != nil { + in, out := &in.IgnoreHeaderRows, &out.IgnoreHeaderRows + *out = new(float64) + **out = **in + } + if in.IncludeOpForFullLoad != nil { + in, out := &in.IncludeOpForFullLoad, &out.IncludeOpForFullLoad + *out = new(bool) + **out = **in + } + if in.MaxFileSize != nil { + in, out := &in.MaxFileSize, &out.MaxFileSize + *out = new(float64) + **out = **in + } + if in.ParquetTimestampInMillisecond != nil { + in, out := &in.ParquetTimestampInMillisecond, &out.ParquetTimestampInMillisecond + *out = new(bool) + **out = **in + } + if in.ParquetVersion != nil { + in, out := &in.ParquetVersion, &out.ParquetVersion + *out = new(string) + **out = **in + } + if in.PreserveTransactions != nil { + in, out := &in.PreserveTransactions, &out.PreserveTransactions + *out = new(bool) + **out = **in + } + if in.Rfc4180 != nil { + in, out := &in.Rfc4180, &out.Rfc4180 + *out = new(bool) + **out = **in + } + if in.RowGroupLength != nil { + in, out := &in.RowGroupLength, &out.RowGroupLength + *out = new(float64) + **out = **in + } + if in.ServerSideEncryptionKMSKeyID != nil { + in, out := &in.ServerSideEncryptionKMSKeyID, &out.ServerSideEncryptionKMSKeyID + *out = new(string) + **out = **in + } + if in.ServiceAccessRoleArn != nil { + in, out := &in.ServiceAccessRoleArn, &out.ServiceAccessRoleArn + *out = new(string) + **out = **in + } + if in.TimestampColumnName != nil { + in, out := &in.TimestampColumnName, &out.TimestampColumnName + *out = new(string) + **out = **in + } + if in.UseCsvNoSupValue != nil { + in, out := &in.UseCsvNoSupValue, &out.UseCsvNoSupValue + *out = new(bool) + **out = **in + } + if in.UseTaskStartTimeForFullLoadTimestamp != nil { + in, out := &in.UseTaskStartTimeForFullLoadTimestamp, &out.UseTaskStartTimeForFullLoadTimestamp + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3SettingsParameters. +func (in *S3SettingsParameters) DeepCopy() *S3SettingsParameters { + if in == nil { + return nil + } + out := new(S3SettingsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/dms/v1beta2/zz_generated.managed.go b/apis/dms/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..f85b4d4805 --- /dev/null +++ b/apis/dms/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Endpoint. +func (mg *Endpoint) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Endpoint. +func (mg *Endpoint) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Endpoint. +func (mg *Endpoint) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Endpoint. +func (mg *Endpoint) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Endpoint. +func (mg *Endpoint) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Endpoint. +func (mg *Endpoint) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Endpoint. +func (mg *Endpoint) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Endpoint. +func (mg *Endpoint) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Endpoint. +func (mg *Endpoint) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Endpoint. +func (mg *Endpoint) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Endpoint. +func (mg *Endpoint) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Endpoint. +func (mg *Endpoint) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/dms/v1beta2/zz_generated.managedlist.go b/apis/dms/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..b5ecf44ec2 --- /dev/null +++ b/apis/dms/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this EndpointList. +func (l *EndpointList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/dms/v1beta2/zz_generated.resolvers.go b/apis/dms/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..8ffb70e391 --- /dev/null +++ b/apis/dms/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,145 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Endpoint. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Endpoint) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.KMSKeyArnRef, + Selector: mg.Spec.ForProvider.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyArn") + } + mg.Spec.ForProvider.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SecretsManagerAccessRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.SecretsManagerAccessRoleArnRef, + Selector: mg.Spec.ForProvider.SecretsManagerAccessRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecretsManagerAccessRoleArn") + } + mg.Spec.ForProvider.SecretsManagerAccessRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SecretsManagerAccessRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceAccessRole), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ServiceAccessRoleRef, + Selector: mg.Spec.ForProvider.ServiceAccessRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceAccessRole") + } + mg.Spec.ForProvider.ServiceAccessRole = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceAccessRoleRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.KMSKeyArnRef, + Selector: mg.Spec.InitProvider.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyArn") + } + mg.Spec.InitProvider.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SecretsManagerAccessRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.SecretsManagerAccessRoleArnRef, + Selector: mg.Spec.InitProvider.SecretsManagerAccessRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecretsManagerAccessRoleArn") + } + mg.Spec.InitProvider.SecretsManagerAccessRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SecretsManagerAccessRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceAccessRole), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ServiceAccessRoleRef, + Selector: mg.Spec.InitProvider.ServiceAccessRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceAccessRole") + } + mg.Spec.InitProvider.ServiceAccessRole = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceAccessRoleRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/dms/v1beta2/zz_groupversion_info.go b/apis/dms/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..d9812529de --- /dev/null +++ b/apis/dms/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=dms.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "dms.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/ds/v1beta1/zz_conditionalforwarder_types.go b/apis/ds/v1beta1/zz_conditionalforwarder_types.go index 5aaf3e17f9..96064d4ea2 100755 --- a/apis/ds/v1beta1/zz_conditionalforwarder_types.go +++ b/apis/ds/v1beta1/zz_conditionalforwarder_types.go @@ -40,7 +40,7 @@ type ConditionalForwarderParameters struct { DNSIps []*string `json:"dnsIps,omitempty" tf:"dns_ips,omitempty"` // ID of directory. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ds/v1beta1.Directory + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ds/v1beta2.Directory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DirectoryID *string `json:"directoryId,omitempty" tf:"directory_id,omitempty"` diff --git a/apis/ds/v1beta1/zz_generated.conversion_hubs.go b/apis/ds/v1beta1/zz_generated.conversion_hubs.go index 13d86f6c3d..ef8ff7781e 100755 --- a/apis/ds/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/ds/v1beta1/zz_generated.conversion_hubs.go @@ -8,9 +8,3 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *ConditionalForwarder) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Directory) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SharedDirectory) Hub() {} diff --git a/apis/ds/v1beta1/zz_generated.conversion_spokes.go b/apis/ds/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..1f4426181e --- /dev/null +++ b/apis/ds/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Directory to the hub type. +func (tr *Directory) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Directory type. +func (tr *Directory) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SharedDirectory to the hub type. +func (tr *SharedDirectory) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SharedDirectory type. +func (tr *SharedDirectory) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/ds/v1beta1/zz_generated.resolvers.go b/apis/ds/v1beta1/zz_generated.resolvers.go index 0ce96689cf..f990294b6a 100644 --- a/apis/ds/v1beta1/zz_generated.resolvers.go +++ b/apis/ds/v1beta1/zz_generated.resolvers.go @@ -26,7 +26,7 @@ func (mg *ConditionalForwarder) ResolveReferences( // ResolveReferences of this var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("ds.aws.upbound.io", "v1beta1", "Directory", "DirectoryList") + m, l, err = apisresolver.GetManagedResource("ds.aws.upbound.io", "v1beta2", "Directory", "DirectoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/ds/v1beta2/zz_directory_terraformed.go b/apis/ds/v1beta2/zz_directory_terraformed.go new file mode 100755 index 0000000000..0a65efeaac --- /dev/null +++ b/apis/ds/v1beta2/zz_directory_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Directory +func (mg *Directory) GetTerraformResourceType() string { + return "aws_directory_service_directory" +} + +// GetConnectionDetailsMapping for this Directory +func (tr *Directory) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "passwordSecretRef"} +} + +// GetObservation of this Directory +func (tr *Directory) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Directory +func (tr *Directory) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Directory +func (tr *Directory) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Directory +func (tr *Directory) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Directory +func (tr *Directory) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Directory +func (tr *Directory) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Directory +func (tr *Directory) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Directory using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Directory) LateInitialize(attrs []byte) (bool, error) { + params := &DirectoryParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Directory) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ds/v1beta2/zz_directory_types.go b/apis/ds/v1beta2/zz_directory_types.go new file mode 100755 index 0000000000..b7ec80bc09 --- /dev/null +++ b/apis/ds/v1beta2/zz_directory_types.go @@ -0,0 +1,410 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConnectSettingsInitParameters struct { + + // The DNS IP addresses of the domain to connect to. + // +listType=set + CustomerDNSIps []*string `json:"customerDnsIps,omitempty" tf:"customer_dns_ips,omitempty"` + + // The username corresponding to the password provided. + CustomerUsername *string `json:"customerUsername,omitempty" tf:"customer_username,omitempty"` + + // The identifiers of the subnets for the directory servers (2 subnets in 2 different AZs). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsRefs []v1.Reference `json:"subnetIdsRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsSelector *v1.Selector `json:"subnetIdsSelector,omitempty" tf:"-"` + + // The identifier of the VPC that the directory is in. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type ConnectSettingsObservation struct { + + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // The IP addresses of the AD Connector servers. + // +listType=set + ConnectIps []*string `json:"connectIps,omitempty" tf:"connect_ips,omitempty"` + + // The DNS IP addresses of the domain to connect to. + // +listType=set + CustomerDNSIps []*string `json:"customerDnsIps,omitempty" tf:"customer_dns_ips,omitempty"` + + // The username corresponding to the password provided. + CustomerUsername *string `json:"customerUsername,omitempty" tf:"customer_username,omitempty"` + + // The identifiers of the subnets for the directory servers (2 subnets in 2 different AZs). + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // The identifier of the VPC that the directory is in. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type ConnectSettingsParameters struct { + + // The DNS IP addresses of the domain to connect to. + // +kubebuilder:validation:Optional + // +listType=set + CustomerDNSIps []*string `json:"customerDnsIps" tf:"customer_dns_ips,omitempty"` + + // The username corresponding to the password provided. + // +kubebuilder:validation:Optional + CustomerUsername *string `json:"customerUsername" tf:"customer_username,omitempty"` + + // The identifiers of the subnets for the directory servers (2 subnets in 2 different AZs). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsRefs []v1.Reference `json:"subnetIdsRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsSelector *v1.Selector `json:"subnetIdsSelector,omitempty" tf:"-"` + + // The identifier of the VPC that the directory is in. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type DirectoryInitParameters struct { + + // The alias for the directory (must be unique amongst all aliases in AWS). Required for enable_sso. + Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` + + // Connector related information about the directory. Fields documented below. + ConnectSettings *ConnectSettingsInitParameters `json:"connectSettings,omitempty" tf:"connect_settings,omitempty"` + + // A textual description for the directory. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The number of domain controllers desired in the directory. Minimum value of 2. Scaling of domain controllers is only supported for MicrosoftAD directories. + DesiredNumberOfDomainControllers *float64 `json:"desiredNumberOfDomainControllers,omitempty" tf:"desired_number_of_domain_controllers,omitempty"` + + // The MicrosoftAD edition (Standard or Enterprise). Defaults to Enterprise. + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + + // Whether to enable single-sign on for the directory. Requires alias. Defaults to false. + EnableSso *bool `json:"enableSso,omitempty" tf:"enable_sso,omitempty"` + + // The fully qualified name for the directory, such as corp.example.com + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The password for the directory administrator or connector user. + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The short name of the directory, such as CORP. + ShortName *string `json:"shortName,omitempty" tf:"short_name,omitempty"` + + // (For SimpleAD and ADConnector types) The size of the directory (Small or Large are accepted values). Large by default. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The directory type (SimpleAD, ADConnector or MicrosoftAD are accepted values). Defaults to SimpleAD. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // VPC related information about the directory. Fields documented below. + VPCSettings *VPCSettingsInitParameters `json:"vpcSettings,omitempty" tf:"vpc_settings,omitempty"` +} + +type DirectoryObservation struct { + + // The access URL for the directory, such as http://alias.awsapps.com. + AccessURL *string `json:"accessUrl,omitempty" tf:"access_url,omitempty"` + + // The alias for the directory (must be unique amongst all aliases in AWS). Required for enable_sso. + Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` + + // Connector related information about the directory. Fields documented below. + ConnectSettings *ConnectSettingsObservation `json:"connectSettings,omitempty" tf:"connect_settings,omitempty"` + + // A list of IP addresses of the DNS servers for the directory or connector. + // +listType=set + DNSIPAddresses []*string `json:"dnsIpAddresses,omitempty" tf:"dns_ip_addresses,omitempty"` + + // A textual description for the directory. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The number of domain controllers desired in the directory. Minimum value of 2. Scaling of domain controllers is only supported for MicrosoftAD directories. + DesiredNumberOfDomainControllers *float64 `json:"desiredNumberOfDomainControllers,omitempty" tf:"desired_number_of_domain_controllers,omitempty"` + + // The MicrosoftAD edition (Standard or Enterprise). Defaults to Enterprise. + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + + // Whether to enable single-sign on for the directory. Requires alias. Defaults to false. + EnableSso *bool `json:"enableSso,omitempty" tf:"enable_sso,omitempty"` + + // The directory identifier. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The fully qualified name for the directory, such as corp.example.com + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the security group created by the directory. + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // The short name of the directory, such as CORP. + ShortName *string `json:"shortName,omitempty" tf:"short_name,omitempty"` + + // (For SimpleAD and ADConnector types) The size of the directory (Small or Large are accepted values). Large by default. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The directory type (SimpleAD, ADConnector or MicrosoftAD are accepted values). Defaults to SimpleAD. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // VPC related information about the directory. Fields documented below. + VPCSettings *VPCSettingsObservation `json:"vpcSettings,omitempty" tf:"vpc_settings,omitempty"` +} + +type DirectoryParameters struct { + + // The alias for the directory (must be unique amongst all aliases in AWS). Required for enable_sso. + // +kubebuilder:validation:Optional + Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` + + // Connector related information about the directory. Fields documented below. + // +kubebuilder:validation:Optional + ConnectSettings *ConnectSettingsParameters `json:"connectSettings,omitempty" tf:"connect_settings,omitempty"` + + // A textual description for the directory. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The number of domain controllers desired in the directory. Minimum value of 2. Scaling of domain controllers is only supported for MicrosoftAD directories. + // +kubebuilder:validation:Optional + DesiredNumberOfDomainControllers *float64 `json:"desiredNumberOfDomainControllers,omitempty" tf:"desired_number_of_domain_controllers,omitempty"` + + // The MicrosoftAD edition (Standard or Enterprise). Defaults to Enterprise. + // +kubebuilder:validation:Optional + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + + // Whether to enable single-sign on for the directory. Requires alias. Defaults to false. + // +kubebuilder:validation:Optional + EnableSso *bool `json:"enableSso,omitempty" tf:"enable_sso,omitempty"` + + // The fully qualified name for the directory, such as corp.example.com + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The password for the directory administrator or connector user. + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The short name of the directory, such as CORP. + // +kubebuilder:validation:Optional + ShortName *string `json:"shortName,omitempty" tf:"short_name,omitempty"` + + // (For SimpleAD and ADConnector types) The size of the directory (Small or Large are accepted values). Large by default. + // +kubebuilder:validation:Optional + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The directory type (SimpleAD, ADConnector or MicrosoftAD are accepted values). Defaults to SimpleAD. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // VPC related information about the directory. Fields documented below. + // +kubebuilder:validation:Optional + VPCSettings *VPCSettingsParameters `json:"vpcSettings,omitempty" tf:"vpc_settings,omitempty"` +} + +type VPCSettingsInitParameters struct { + + // The identifiers of the subnets for the directory servers (2 subnets in 2 different AZs). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsRefs []v1.Reference `json:"subnetIdsRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsSelector *v1.Selector `json:"subnetIdsSelector,omitempty" tf:"-"` + + // The identifier of the VPC that the directory is in. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type VPCSettingsObservation struct { + + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // The identifiers of the subnets for the directory servers (2 subnets in 2 different AZs). + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // The identifier of the VPC that the directory is in. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type VPCSettingsParameters struct { + + // The identifiers of the subnets for the directory servers (2 subnets in 2 different AZs). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsRefs []v1.Reference `json:"subnetIdsRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIdsSelector *v1.Selector `json:"subnetIdsSelector,omitempty" tf:"-"` + + // The identifier of the VPC that the directory is in. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +// DirectorySpec defines the desired state of Directory +type DirectorySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DirectoryParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DirectoryInitParameters `json:"initProvider,omitempty"` +} + +// DirectoryStatus defines the observed state of Directory. +type DirectoryStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DirectoryObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Directory is the Schema for the Directorys API. Provides a directory in AWS Directory Service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Directory struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.passwordSecretRef)",message="spec.forProvider.passwordSecretRef is a required parameter" + Spec DirectorySpec `json:"spec"` + Status DirectoryStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DirectoryList contains a list of Directorys +type DirectoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Directory `json:"items"` +} + +// Repository type metadata. +var ( + Directory_Kind = "Directory" + Directory_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Directory_Kind}.String() + Directory_KindAPIVersion = Directory_Kind + "." + CRDGroupVersion.String() + Directory_GroupVersionKind = CRDGroupVersion.WithKind(Directory_Kind) +) + +func init() { + SchemeBuilder.Register(&Directory{}, &DirectoryList{}) +} diff --git a/apis/ds/v1beta2/zz_generated.conversion_hubs.go b/apis/ds/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..3ceef0e981 --- /dev/null +++ b/apis/ds/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Directory) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SharedDirectory) Hub() {} diff --git a/apis/ds/v1beta2/zz_generated.deepcopy.go b/apis/ds/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..a223c8f549 --- /dev/null +++ b/apis/ds/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1074 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectSettingsInitParameters) DeepCopyInto(out *ConnectSettingsInitParameters) { + *out = *in + if in.CustomerDNSIps != nil { + in, out := &in.CustomerDNSIps, &out.CustomerDNSIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomerUsername != nil { + in, out := &in.CustomerUsername, &out.CustomerUsername + *out = new(string) + **out = **in + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIdsRefs != nil { + in, out := &in.SubnetIdsRefs, &out.SubnetIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIdsSelector != nil { + in, out := &in.SubnetIdsSelector, &out.SubnetIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectSettingsInitParameters. +func (in *ConnectSettingsInitParameters) DeepCopy() *ConnectSettingsInitParameters { + if in == nil { + return nil + } + out := new(ConnectSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectSettingsObservation) DeepCopyInto(out *ConnectSettingsObservation) { + *out = *in + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectIps != nil { + in, out := &in.ConnectIps, &out.ConnectIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomerDNSIps != nil { + in, out := &in.CustomerDNSIps, &out.CustomerDNSIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomerUsername != nil { + in, out := &in.CustomerUsername, &out.CustomerUsername + *out = new(string) + **out = **in + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectSettingsObservation. +func (in *ConnectSettingsObservation) DeepCopy() *ConnectSettingsObservation { + if in == nil { + return nil + } + out := new(ConnectSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectSettingsParameters) DeepCopyInto(out *ConnectSettingsParameters) { + *out = *in + if in.CustomerDNSIps != nil { + in, out := &in.CustomerDNSIps, &out.CustomerDNSIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomerUsername != nil { + in, out := &in.CustomerUsername, &out.CustomerUsername + *out = new(string) + **out = **in + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIdsRefs != nil { + in, out := &in.SubnetIdsRefs, &out.SubnetIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIdsSelector != nil { + in, out := &in.SubnetIdsSelector, &out.SubnetIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectSettingsParameters. +func (in *ConnectSettingsParameters) DeepCopy() *ConnectSettingsParameters { + if in == nil { + return nil + } + out := new(ConnectSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Directory) DeepCopyInto(out *Directory) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Directory. +func (in *Directory) DeepCopy() *Directory { + if in == nil { + return nil + } + out := new(Directory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Directory) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryInitParameters) DeepCopyInto(out *DirectoryInitParameters) { + *out = *in + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(string) + **out = **in + } + if in.ConnectSettings != nil { + in, out := &in.ConnectSettings, &out.ConnectSettings + *out = new(ConnectSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DesiredNumberOfDomainControllers != nil { + in, out := &in.DesiredNumberOfDomainControllers, &out.DesiredNumberOfDomainControllers + *out = new(float64) + **out = **in + } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } + if in.EnableSso != nil { + in, out := &in.EnableSso, &out.EnableSso + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.ShortName != nil { + in, out := &in.ShortName, &out.ShortName + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.VPCSettings != nil { + in, out := &in.VPCSettings, &out.VPCSettings + *out = new(VPCSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryInitParameters. +func (in *DirectoryInitParameters) DeepCopy() *DirectoryInitParameters { + if in == nil { + return nil + } + out := new(DirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryList) DeepCopyInto(out *DirectoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Directory, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryList. +func (in *DirectoryList) DeepCopy() *DirectoryList { + if in == nil { + return nil + } + out := new(DirectoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DirectoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryObservation) DeepCopyInto(out *DirectoryObservation) { + *out = *in + if in.AccessURL != nil { + in, out := &in.AccessURL, &out.AccessURL + *out = new(string) + **out = **in + } + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(string) + **out = **in + } + if in.ConnectSettings != nil { + in, out := &in.ConnectSettings, &out.ConnectSettings + *out = new(ConnectSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.DNSIPAddresses != nil { + in, out := &in.DNSIPAddresses, &out.DNSIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DesiredNumberOfDomainControllers != nil { + in, out := &in.DesiredNumberOfDomainControllers, &out.DesiredNumberOfDomainControllers + *out = new(float64) + **out = **in + } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } + if in.EnableSso != nil { + in, out := &in.EnableSso, &out.EnableSso + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.ShortName != nil { + in, out := &in.ShortName, &out.ShortName + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.VPCSettings != nil { + in, out := &in.VPCSettings, &out.VPCSettings + *out = new(VPCSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryObservation. +func (in *DirectoryObservation) DeepCopy() *DirectoryObservation { + if in == nil { + return nil + } + out := new(DirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryParameters) DeepCopyInto(out *DirectoryParameters) { + *out = *in + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(string) + **out = **in + } + if in.ConnectSettings != nil { + in, out := &in.ConnectSettings, &out.ConnectSettings + *out = new(ConnectSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DesiredNumberOfDomainControllers != nil { + in, out := &in.DesiredNumberOfDomainControllers, &out.DesiredNumberOfDomainControllers + *out = new(float64) + **out = **in + } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } + if in.EnableSso != nil { + in, out := &in.EnableSso, &out.EnableSso + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ShortName != nil { + in, out := &in.ShortName, &out.ShortName + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.VPCSettings != nil { + in, out := &in.VPCSettings, &out.VPCSettings + *out = new(VPCSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryParameters. +func (in *DirectoryParameters) DeepCopy() *DirectoryParameters { + if in == nil { + return nil + } + out := new(DirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectorySpec) DeepCopyInto(out *DirectorySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectorySpec. +func (in *DirectorySpec) DeepCopy() *DirectorySpec { + if in == nil { + return nil + } + out := new(DirectorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryStatus) DeepCopyInto(out *DirectoryStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryStatus. +func (in *DirectoryStatus) DeepCopy() *DirectoryStatus { + if in == nil { + return nil + } + out := new(DirectoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedDirectory) DeepCopyInto(out *SharedDirectory) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedDirectory. +func (in *SharedDirectory) DeepCopy() *SharedDirectory { + if in == nil { + return nil + } + out := new(SharedDirectory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SharedDirectory) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedDirectoryInitParameters) DeepCopyInto(out *SharedDirectoryInitParameters) { + *out = *in + if in.DirectoryID != nil { + in, out := &in.DirectoryID, &out.DirectoryID + *out = new(string) + **out = **in + } + if in.DirectoryIDRef != nil { + in, out := &in.DirectoryIDRef, &out.DirectoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DirectoryIDSelector != nil { + in, out := &in.DirectoryIDSelector, &out.DirectoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.NotesSecretRef != nil { + in, out := &in.NotesSecretRef, &out.NotesSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(TargetInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedDirectoryInitParameters. +func (in *SharedDirectoryInitParameters) DeepCopy() *SharedDirectoryInitParameters { + if in == nil { + return nil + } + out := new(SharedDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedDirectoryList) DeepCopyInto(out *SharedDirectoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SharedDirectory, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedDirectoryList. +func (in *SharedDirectoryList) DeepCopy() *SharedDirectoryList { + if in == nil { + return nil + } + out := new(SharedDirectoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SharedDirectoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedDirectoryObservation) DeepCopyInto(out *SharedDirectoryObservation) { + *out = *in + if in.DirectoryID != nil { + in, out := &in.DirectoryID, &out.DirectoryID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.SharedDirectoryID != nil { + in, out := &in.SharedDirectoryID, &out.SharedDirectoryID + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(TargetObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedDirectoryObservation. +func (in *SharedDirectoryObservation) DeepCopy() *SharedDirectoryObservation { + if in == nil { + return nil + } + out := new(SharedDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedDirectoryParameters) DeepCopyInto(out *SharedDirectoryParameters) { + *out = *in + if in.DirectoryID != nil { + in, out := &in.DirectoryID, &out.DirectoryID + *out = new(string) + **out = **in + } + if in.DirectoryIDRef != nil { + in, out := &in.DirectoryIDRef, &out.DirectoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DirectoryIDSelector != nil { + in, out := &in.DirectoryIDSelector, &out.DirectoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.NotesSecretRef != nil { + in, out := &in.NotesSecretRef, &out.NotesSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(TargetParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedDirectoryParameters. +func (in *SharedDirectoryParameters) DeepCopy() *SharedDirectoryParameters { + if in == nil { + return nil + } + out := new(SharedDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedDirectorySpec) DeepCopyInto(out *SharedDirectorySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedDirectorySpec. +func (in *SharedDirectorySpec) DeepCopy() *SharedDirectorySpec { + if in == nil { + return nil + } + out := new(SharedDirectorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedDirectoryStatus) DeepCopyInto(out *SharedDirectoryStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedDirectoryStatus. +func (in *SharedDirectoryStatus) DeepCopy() *SharedDirectoryStatus { + if in == nil { + return nil + } + out := new(SharedDirectoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetInitParameters) DeepCopyInto(out *TargetInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetInitParameters. +func (in *TargetInitParameters) DeepCopy() *TargetInitParameters { + if in == nil { + return nil + } + out := new(TargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetObservation) DeepCopyInto(out *TargetObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetObservation. +func (in *TargetObservation) DeepCopy() *TargetObservation { + if in == nil { + return nil + } + out := new(TargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetParameters) DeepCopyInto(out *TargetParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetParameters. +func (in *TargetParameters) DeepCopy() *TargetParameters { + if in == nil { + return nil + } + out := new(TargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCSettingsInitParameters) DeepCopyInto(out *VPCSettingsInitParameters) { + *out = *in + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIdsRefs != nil { + in, out := &in.SubnetIdsRefs, &out.SubnetIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIdsSelector != nil { + in, out := &in.SubnetIdsSelector, &out.SubnetIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCSettingsInitParameters. +func (in *VPCSettingsInitParameters) DeepCopy() *VPCSettingsInitParameters { + if in == nil { + return nil + } + out := new(VPCSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCSettingsObservation) DeepCopyInto(out *VPCSettingsObservation) { + *out = *in + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCSettingsObservation. +func (in *VPCSettingsObservation) DeepCopy() *VPCSettingsObservation { + if in == nil { + return nil + } + out := new(VPCSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCSettingsParameters) DeepCopyInto(out *VPCSettingsParameters) { + *out = *in + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIdsRefs != nil { + in, out := &in.SubnetIdsRefs, &out.SubnetIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIdsSelector != nil { + in, out := &in.SubnetIdsSelector, &out.SubnetIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCSettingsParameters. +func (in *VPCSettingsParameters) DeepCopy() *VPCSettingsParameters { + if in == nil { + return nil + } + out := new(VPCSettingsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/ds/v1beta2/zz_generated.managed.go b/apis/ds/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..511a67d744 --- /dev/null +++ b/apis/ds/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Directory. +func (mg *Directory) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Directory. +func (mg *Directory) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Directory. +func (mg *Directory) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Directory. +func (mg *Directory) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Directory. +func (mg *Directory) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Directory. +func (mg *Directory) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Directory. +func (mg *Directory) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Directory. +func (mg *Directory) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Directory. +func (mg *Directory) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Directory. +func (mg *Directory) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Directory. +func (mg *Directory) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Directory. +func (mg *Directory) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SharedDirectory. +func (mg *SharedDirectory) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SharedDirectory. +func (mg *SharedDirectory) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SharedDirectory. +func (mg *SharedDirectory) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SharedDirectory. +func (mg *SharedDirectory) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SharedDirectory. +func (mg *SharedDirectory) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SharedDirectory. +func (mg *SharedDirectory) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SharedDirectory. +func (mg *SharedDirectory) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SharedDirectory. +func (mg *SharedDirectory) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SharedDirectory. +func (mg *SharedDirectory) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SharedDirectory. +func (mg *SharedDirectory) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SharedDirectory. +func (mg *SharedDirectory) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SharedDirectory. +func (mg *SharedDirectory) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/ds/v1beta2/zz_generated.managedlist.go b/apis/ds/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..2f33a68cf2 --- /dev/null +++ b/apis/ds/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this DirectoryList. +func (l *DirectoryList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SharedDirectoryList. +func (l *SharedDirectoryList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/ds/v1beta2/zz_generated.resolvers.go b/apis/ds/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..aad04dae6e --- /dev/null +++ b/apis/ds/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,250 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Directory) ResolveReferences( // ResolveReferences of this Directory. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + if mg.Spec.ForProvider.ConnectSettings != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.ConnectSettings.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.ConnectSettings.SubnetIdsRefs, + Selector: mg.Spec.ForProvider.ConnectSettings.SubnetIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ConnectSettings.SubnetIds") + } + mg.Spec.ForProvider.ConnectSettings.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.ConnectSettings.SubnetIdsRefs = mrsp.ResolvedReferences + + } + if mg.Spec.ForProvider.ConnectSettings != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ConnectSettings.VPCID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ConnectSettings.VPCIDRef, + Selector: mg.Spec.ForProvider.ConnectSettings.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ConnectSettings.VPCID") + } + mg.Spec.ForProvider.ConnectSettings.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ConnectSettings.VPCIDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.VPCSettings != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCSettings.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCSettings.SubnetIdsRefs, + Selector: mg.Spec.ForProvider.VPCSettings.SubnetIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCSettings.SubnetIds") + } + mg.Spec.ForProvider.VPCSettings.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCSettings.SubnetIdsRefs = mrsp.ResolvedReferences + + } + if mg.Spec.ForProvider.VPCSettings != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCSettings.VPCID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VPCSettings.VPCIDRef, + Selector: mg.Spec.ForProvider.VPCSettings.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCSettings.VPCID") + } + mg.Spec.ForProvider.VPCSettings.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPCSettings.VPCIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.ConnectSettings != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.ConnectSettings.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.ConnectSettings.SubnetIdsRefs, + Selector: mg.Spec.InitProvider.ConnectSettings.SubnetIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ConnectSettings.SubnetIds") + } + mg.Spec.InitProvider.ConnectSettings.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.ConnectSettings.SubnetIdsRefs = mrsp.ResolvedReferences + + } + if mg.Spec.InitProvider.ConnectSettings != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ConnectSettings.VPCID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ConnectSettings.VPCIDRef, + Selector: mg.Spec.InitProvider.ConnectSettings.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ConnectSettings.VPCID") + } + mg.Spec.InitProvider.ConnectSettings.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ConnectSettings.VPCIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.VPCSettings != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCSettings.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCSettings.SubnetIdsRefs, + Selector: mg.Spec.InitProvider.VPCSettings.SubnetIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCSettings.SubnetIds") + } + mg.Spec.InitProvider.VPCSettings.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCSettings.SubnetIdsRefs = mrsp.ResolvedReferences + + } + if mg.Spec.InitProvider.VPCSettings != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCSettings.VPCID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VPCSettings.VPCIDRef, + Selector: mg.Spec.InitProvider.VPCSettings.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCSettings.VPCID") + } + mg.Spec.InitProvider.VPCSettings.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPCSettings.VPCIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this SharedDirectory. +func (mg *SharedDirectory) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ds.aws.upbound.io", "v1beta2", "Directory", "DirectoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DirectoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DirectoryIDRef, + Selector: mg.Spec.ForProvider.DirectoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DirectoryID") + } + mg.Spec.ForProvider.DirectoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DirectoryIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ds.aws.upbound.io", "v1beta2", "Directory", "DirectoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DirectoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DirectoryIDRef, + Selector: mg.Spec.InitProvider.DirectoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DirectoryID") + } + mg.Spec.InitProvider.DirectoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DirectoryIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/ds/v1beta2/zz_groupversion_info.go b/apis/ds/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..36b915b6fc --- /dev/null +++ b/apis/ds/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=ds.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "ds.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/ds/v1beta2/zz_shareddirectory_terraformed.go b/apis/ds/v1beta2/zz_shareddirectory_terraformed.go new file mode 100755 index 0000000000..cd00ecfedb --- /dev/null +++ b/apis/ds/v1beta2/zz_shareddirectory_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SharedDirectory +func (mg *SharedDirectory) GetTerraformResourceType() string { + return "aws_directory_service_shared_directory" +} + +// GetConnectionDetailsMapping for this SharedDirectory +func (tr *SharedDirectory) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"notes": "notesSecretRef"} +} + +// GetObservation of this SharedDirectory +func (tr *SharedDirectory) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SharedDirectory +func (tr *SharedDirectory) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SharedDirectory +func (tr *SharedDirectory) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SharedDirectory +func (tr *SharedDirectory) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SharedDirectory +func (tr *SharedDirectory) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SharedDirectory +func (tr *SharedDirectory) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SharedDirectory +func (tr *SharedDirectory) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SharedDirectory using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SharedDirectory) LateInitialize(attrs []byte) (bool, error) { + params := &SharedDirectoryParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SharedDirectory) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ds/v1beta2/zz_shareddirectory_types.go b/apis/ds/v1beta2/zz_shareddirectory_types.go new file mode 100755 index 0000000000..ec5ee21ee5 --- /dev/null +++ b/apis/ds/v1beta2/zz_shareddirectory_types.go @@ -0,0 +1,181 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SharedDirectoryInitParameters struct { + + // Identifier of the Managed Microsoft AD directory that you want to share with other accounts. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ds/v1beta2.Directory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + DirectoryID *string `json:"directoryId,omitempty" tf:"directory_id,omitempty"` + + // Reference to a Directory in ds to populate directoryId. + // +kubebuilder:validation:Optional + DirectoryIDRef *v1.Reference `json:"directoryIdRef,omitempty" tf:"-"` + + // Selector for a Directory in ds to populate directoryId. + // +kubebuilder:validation:Optional + DirectoryIDSelector *v1.Selector `json:"directoryIdSelector,omitempty" tf:"-"` + + // Method used when sharing a directory. Valid values are ORGANIZATIONS and HANDSHAKE. Default is HANDSHAKE. + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // Message sent by the directory owner to the directory consumer to help the directory consumer administrator determine whether to approve or reject the share invitation. + NotesSecretRef *v1.SecretKeySelector `json:"notesSecretRef,omitempty" tf:"-"` + + // Identifier for the directory consumer account with whom the directory is to be shared. See below. + Target *TargetInitParameters `json:"target,omitempty" tf:"target,omitempty"` +} + +type SharedDirectoryObservation struct { + + // Identifier of the Managed Microsoft AD directory that you want to share with other accounts. + DirectoryID *string `json:"directoryId,omitempty" tf:"directory_id,omitempty"` + + // Identifier of the shared directory. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Method used when sharing a directory. Valid values are ORGANIZATIONS and HANDSHAKE. Default is HANDSHAKE. + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // Identifier of the directory that is stored in the directory consumer account that corresponds to the shared directory in the owner account. + SharedDirectoryID *string `json:"sharedDirectoryId,omitempty" tf:"shared_directory_id,omitempty"` + + // Identifier for the directory consumer account with whom the directory is to be shared. See below. + Target *TargetObservation `json:"target,omitempty" tf:"target,omitempty"` +} + +type SharedDirectoryParameters struct { + + // Identifier of the Managed Microsoft AD directory that you want to share with other accounts. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ds/v1beta2.Directory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DirectoryID *string `json:"directoryId,omitempty" tf:"directory_id,omitempty"` + + // Reference to a Directory in ds to populate directoryId. + // +kubebuilder:validation:Optional + DirectoryIDRef *v1.Reference `json:"directoryIdRef,omitempty" tf:"-"` + + // Selector for a Directory in ds to populate directoryId. + // +kubebuilder:validation:Optional + DirectoryIDSelector *v1.Selector `json:"directoryIdSelector,omitempty" tf:"-"` + + // Method used when sharing a directory. Valid values are ORGANIZATIONS and HANDSHAKE. Default is HANDSHAKE. + // +kubebuilder:validation:Optional + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // Message sent by the directory owner to the directory consumer to help the directory consumer administrator determine whether to approve or reject the share invitation. + // +kubebuilder:validation:Optional + NotesSecretRef *v1.SecretKeySelector `json:"notesSecretRef,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Identifier for the directory consumer account with whom the directory is to be shared. See below. + // +kubebuilder:validation:Optional + Target *TargetParameters `json:"target,omitempty" tf:"target,omitempty"` +} + +type TargetInitParameters struct { + + // Identifier of the directory consumer account. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Type of identifier to be used in the id field. Valid value is ACCOUNT. Default is ACCOUNT. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type TargetObservation struct { + + // Identifier of the directory consumer account. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Type of identifier to be used in the id field. Valid value is ACCOUNT. Default is ACCOUNT. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type TargetParameters struct { + + // Identifier of the directory consumer account. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // Type of identifier to be used in the id field. Valid value is ACCOUNT. Default is ACCOUNT. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +// SharedDirectorySpec defines the desired state of SharedDirectory +type SharedDirectorySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SharedDirectoryParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SharedDirectoryInitParameters `json:"initProvider,omitempty"` +} + +// SharedDirectoryStatus defines the observed state of SharedDirectory. +type SharedDirectoryStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SharedDirectoryObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SharedDirectory is the Schema for the SharedDirectorys API. Manages a directory in your account (directory owner) shared with another account (directory consumer). +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type SharedDirectory struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.target) || (has(self.initProvider) && has(self.initProvider.target))",message="spec.forProvider.target is a required parameter" + Spec SharedDirectorySpec `json:"spec"` + Status SharedDirectoryStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SharedDirectoryList contains a list of SharedDirectorys +type SharedDirectoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SharedDirectory `json:"items"` +} + +// Repository type metadata. +var ( + SharedDirectory_Kind = "SharedDirectory" + SharedDirectory_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SharedDirectory_Kind}.String() + SharedDirectory_KindAPIVersion = SharedDirectory_Kind + "." + CRDGroupVersion.String() + SharedDirectory_GroupVersionKind = CRDGroupVersion.WithKind(SharedDirectory_Kind) +) + +func init() { + SchemeBuilder.Register(&SharedDirectory{}, &SharedDirectoryList{}) +} diff --git a/apis/dynamodb/v1beta1/zz_contributorinsights_types.go b/apis/dynamodb/v1beta1/zz_contributorinsights_types.go index f47cb1abe5..5e6692a1a3 100755 --- a/apis/dynamodb/v1beta1/zz_contributorinsights_types.go +++ b/apis/dynamodb/v1beta1/zz_contributorinsights_types.go @@ -19,7 +19,7 @@ type ContributorInsightsInitParameters struct { IndexName *string `json:"indexName,omitempty" tf:"index_name,omitempty"` // The name of the table to enable contributor insights - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta1.Table + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta2.Table TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` // Reference to a Table in dynamodb to populate tableName. @@ -53,7 +53,7 @@ type ContributorInsightsParameters struct { Region *string `json:"region" tf:"-"` // The name of the table to enable contributor insights - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta1.Table + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta2.Table // +kubebuilder:validation:Optional TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` diff --git a/apis/dynamodb/v1beta1/zz_generated.conversion_hubs.go b/apis/dynamodb/v1beta1/zz_generated.conversion_hubs.go index 1de6a45f48..4cfa4ff537 100755 --- a/apis/dynamodb/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/dynamodb/v1beta1/zz_generated.conversion_hubs.go @@ -15,9 +15,6 @@ func (tr *GlobalTable) Hub() {} // Hub marks this type as a conversion hub. func (tr *KinesisStreamingDestination) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Table) Hub() {} - // Hub marks this type as a conversion hub. func (tr *TableItem) Hub() {} diff --git a/apis/dynamodb/v1beta1/zz_generated.conversion_spokes.go b/apis/dynamodb/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..5062339cc1 --- /dev/null +++ b/apis/dynamodb/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Table to the hub type. +func (tr *Table) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Table type. +func (tr *Table) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/dynamodb/v1beta1/zz_generated.resolvers.go b/apis/dynamodb/v1beta1/zz_generated.resolvers.go index da9bc9ba79..2a7de107bd 100644 --- a/apis/dynamodb/v1beta1/zz_generated.resolvers.go +++ b/apis/dynamodb/v1beta1/zz_generated.resolvers.go @@ -27,7 +27,7 @@ func (mg *ContributorInsights) ResolveReferences( // ResolveReferences of this C var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta1", "Table", "TableList") + m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta2", "Table", "TableList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -46,7 +46,7 @@ func (mg *ContributorInsights) ResolveReferences( // ResolveReferences of this C mg.Spec.ForProvider.TableName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TableNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta1", "Table", "TableList") + m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta2", "Table", "TableList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -77,7 +77,7 @@ func (mg *KinesisStreamingDestination) ResolveReferences(ctx context.Context, c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta1", "Stream", "StreamList") + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -96,7 +96,7 @@ func (mg *KinesisStreamingDestination) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.StreamArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StreamArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta1", "Table", "TableList") + m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta2", "Table", "TableList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -115,7 +115,7 @@ func (mg *KinesisStreamingDestination) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.TableName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TableNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta1", "Stream", "StreamList") + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -134,7 +134,7 @@ func (mg *KinesisStreamingDestination) ResolveReferences(ctx context.Context, c mg.Spec.InitProvider.StreamArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.StreamArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta1", "Table", "TableList") + m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta2", "Table", "TableList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -165,7 +165,7 @@ func (mg *TableItem) ResolveReferences(ctx context.Context, c client.Reader) err var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta1", "Table", "TableList") + m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta2", "Table", "TableList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -184,7 +184,7 @@ func (mg *TableItem) ResolveReferences(ctx context.Context, c client.Reader) err mg.Spec.ForProvider.TableName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TableNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta1", "Table", "TableList") + m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta2", "Table", "TableList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -215,7 +215,7 @@ func (mg *TableReplica) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta1", "Table", "TableList") + m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta2", "Table", "TableList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -253,7 +253,7 @@ func (mg *TableReplica) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KMSKeyArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta1", "Table", "TableList") + m, l, err = apisresolver.GetManagedResource("dynamodb.aws.upbound.io", "v1beta2", "Table", "TableList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/dynamodb/v1beta1/zz_kinesisstreamingdestination_types.go b/apis/dynamodb/v1beta1/zz_kinesisstreamingdestination_types.go index 17d5e2a1c8..03430700d1 100755 --- a/apis/dynamodb/v1beta1/zz_kinesisstreamingdestination_types.go +++ b/apis/dynamodb/v1beta1/zz_kinesisstreamingdestination_types.go @@ -16,7 +16,7 @@ import ( type KinesisStreamingDestinationInitParameters struct { // The ARN for a Kinesis data stream. This must exist in the same account and region as the DynamoDB table. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta1.Stream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` @@ -30,7 +30,7 @@ type KinesisStreamingDestinationInitParameters struct { // The name of the DynamoDB table. There // can only be one Kinesis streaming destination for a given DynamoDB table. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta1.Table + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta2.Table TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` // Reference to a Table in dynamodb to populate tableName. @@ -63,7 +63,7 @@ type KinesisStreamingDestinationParameters struct { Region *string `json:"region" tf:"-"` // The ARN for a Kinesis data stream. This must exist in the same account and region as the DynamoDB table. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta1.Stream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() // +kubebuilder:validation:Optional StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` @@ -78,7 +78,7 @@ type KinesisStreamingDestinationParameters struct { // The name of the DynamoDB table. There // can only be one Kinesis streaming destination for a given DynamoDB table. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta1.Table + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta2.Table // +kubebuilder:validation:Optional TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` diff --git a/apis/dynamodb/v1beta1/zz_tableitem_types.go b/apis/dynamodb/v1beta1/zz_tableitem_types.go index d8bf9f241e..b97a92d355 100755 --- a/apis/dynamodb/v1beta1/zz_tableitem_types.go +++ b/apis/dynamodb/v1beta1/zz_tableitem_types.go @@ -25,7 +25,7 @@ type TableItemInitParameters struct { RangeKey *string `json:"rangeKey,omitempty" tf:"range_key,omitempty"` // Name of the table to contain the item. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta1.Table + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta2.Table TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` // Reference to a Table in dynamodb to populate tableName. @@ -74,7 +74,7 @@ type TableItemParameters struct { Region *string `json:"region" tf:"-"` // Name of the table to contain the item. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta1.Table + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta2.Table // +kubebuilder:validation:Optional TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` diff --git a/apis/dynamodb/v1beta1/zz_tablereplica_types.go b/apis/dynamodb/v1beta1/zz_tablereplica_types.go index 080584e92e..7e467b2f54 100755 --- a/apis/dynamodb/v1beta1/zz_tablereplica_types.go +++ b/apis/dynamodb/v1beta1/zz_tablereplica_types.go @@ -16,7 +16,7 @@ import ( type TableReplicaInitParameters_2 struct { // ARN of the main or global table which this resource will replicate. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta1.Table + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta2.Table // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) GlobalTableArn *string `json:"globalTableArn,omitempty" tf:"global_table_arn,omitempty"` @@ -83,7 +83,7 @@ type TableReplicaObservation_2 struct { type TableReplicaParameters_2 struct { // ARN of the main or global table which this resource will replicate. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta1.Table + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/dynamodb/v1beta2.Table // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional GlobalTableArn *string `json:"globalTableArn,omitempty" tf:"global_table_arn,omitempty"` diff --git a/apis/dynamodb/v1beta2/zz_generated.conversion_hubs.go b/apis/dynamodb/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..0ed428e742 --- /dev/null +++ b/apis/dynamodb/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Table) Hub() {} diff --git a/apis/dynamodb/v1beta2/zz_generated.deepcopy.go b/apis/dynamodb/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..3873c07176 --- /dev/null +++ b/apis/dynamodb/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1609 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttributeInitParameters) DeepCopyInto(out *AttributeInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttributeInitParameters. +func (in *AttributeInitParameters) DeepCopy() *AttributeInitParameters { + if in == nil { + return nil + } + out := new(AttributeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttributeObservation) DeepCopyInto(out *AttributeObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttributeObservation. +func (in *AttributeObservation) DeepCopy() *AttributeObservation { + if in == nil { + return nil + } + out := new(AttributeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttributeParameters) DeepCopyInto(out *AttributeParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttributeParameters. +func (in *AttributeParameters) DeepCopy() *AttributeParameters { + if in == nil { + return nil + } + out := new(AttributeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CsvInitParameters) DeepCopyInto(out *CsvInitParameters) { + *out = *in + if in.Delimiter != nil { + in, out := &in.Delimiter, &out.Delimiter + *out = new(string) + **out = **in + } + if in.HeaderList != nil { + in, out := &in.HeaderList, &out.HeaderList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CsvInitParameters. +func (in *CsvInitParameters) DeepCopy() *CsvInitParameters { + if in == nil { + return nil + } + out := new(CsvInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CsvObservation) DeepCopyInto(out *CsvObservation) { + *out = *in + if in.Delimiter != nil { + in, out := &in.Delimiter, &out.Delimiter + *out = new(string) + **out = **in + } + if in.HeaderList != nil { + in, out := &in.HeaderList, &out.HeaderList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CsvObservation. +func (in *CsvObservation) DeepCopy() *CsvObservation { + if in == nil { + return nil + } + out := new(CsvObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CsvParameters) DeepCopyInto(out *CsvParameters) { + *out = *in + if in.Delimiter != nil { + in, out := &in.Delimiter, &out.Delimiter + *out = new(string) + **out = **in + } + if in.HeaderList != nil { + in, out := &in.HeaderList, &out.HeaderList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CsvParameters. +func (in *CsvParameters) DeepCopy() *CsvParameters { + if in == nil { + return nil + } + out := new(CsvParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalSecondaryIndexInitParameters) DeepCopyInto(out *GlobalSecondaryIndexInitParameters) { + *out = *in + if in.HashKey != nil { + in, out := &in.HashKey, &out.HashKey + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NonKeyAttributes != nil { + in, out := &in.NonKeyAttributes, &out.NonKeyAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ProjectionType != nil { + in, out := &in.ProjectionType, &out.ProjectionType + *out = new(string) + **out = **in + } + if in.RangeKey != nil { + in, out := &in.RangeKey, &out.RangeKey + *out = new(string) + **out = **in + } + if in.ReadCapacity != nil { + in, out := &in.ReadCapacity, &out.ReadCapacity + *out = new(float64) + **out = **in + } + if in.WriteCapacity != nil { + in, out := &in.WriteCapacity, &out.WriteCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalSecondaryIndexInitParameters. +func (in *GlobalSecondaryIndexInitParameters) DeepCopy() *GlobalSecondaryIndexInitParameters { + if in == nil { + return nil + } + out := new(GlobalSecondaryIndexInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalSecondaryIndexObservation) DeepCopyInto(out *GlobalSecondaryIndexObservation) { + *out = *in + if in.HashKey != nil { + in, out := &in.HashKey, &out.HashKey + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NonKeyAttributes != nil { + in, out := &in.NonKeyAttributes, &out.NonKeyAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ProjectionType != nil { + in, out := &in.ProjectionType, &out.ProjectionType + *out = new(string) + **out = **in + } + if in.RangeKey != nil { + in, out := &in.RangeKey, &out.RangeKey + *out = new(string) + **out = **in + } + if in.ReadCapacity != nil { + in, out := &in.ReadCapacity, &out.ReadCapacity + *out = new(float64) + **out = **in + } + if in.WriteCapacity != nil { + in, out := &in.WriteCapacity, &out.WriteCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalSecondaryIndexObservation. +func (in *GlobalSecondaryIndexObservation) DeepCopy() *GlobalSecondaryIndexObservation { + if in == nil { + return nil + } + out := new(GlobalSecondaryIndexObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalSecondaryIndexParameters) DeepCopyInto(out *GlobalSecondaryIndexParameters) { + *out = *in + if in.HashKey != nil { + in, out := &in.HashKey, &out.HashKey + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NonKeyAttributes != nil { + in, out := &in.NonKeyAttributes, &out.NonKeyAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ProjectionType != nil { + in, out := &in.ProjectionType, &out.ProjectionType + *out = new(string) + **out = **in + } + if in.RangeKey != nil { + in, out := &in.RangeKey, &out.RangeKey + *out = new(string) + **out = **in + } + if in.ReadCapacity != nil { + in, out := &in.ReadCapacity, &out.ReadCapacity + *out = new(float64) + **out = **in + } + if in.WriteCapacity != nil { + in, out := &in.WriteCapacity, &out.WriteCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalSecondaryIndexParameters. +func (in *GlobalSecondaryIndexParameters) DeepCopy() *GlobalSecondaryIndexParameters { + if in == nil { + return nil + } + out := new(GlobalSecondaryIndexParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportTableInitParameters) DeepCopyInto(out *ImportTableInitParameters) { + *out = *in + if in.InputCompressionType != nil { + in, out := &in.InputCompressionType, &out.InputCompressionType + *out = new(string) + **out = **in + } + if in.InputFormat != nil { + in, out := &in.InputFormat, &out.InputFormat + *out = new(string) + **out = **in + } + if in.InputFormatOptions != nil { + in, out := &in.InputFormatOptions, &out.InputFormatOptions + *out = new(InputFormatOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3BucketSource != nil { + in, out := &in.S3BucketSource, &out.S3BucketSource + *out = new(S3BucketSourceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportTableInitParameters. +func (in *ImportTableInitParameters) DeepCopy() *ImportTableInitParameters { + if in == nil { + return nil + } + out := new(ImportTableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportTableObservation) DeepCopyInto(out *ImportTableObservation) { + *out = *in + if in.InputCompressionType != nil { + in, out := &in.InputCompressionType, &out.InputCompressionType + *out = new(string) + **out = **in + } + if in.InputFormat != nil { + in, out := &in.InputFormat, &out.InputFormat + *out = new(string) + **out = **in + } + if in.InputFormatOptions != nil { + in, out := &in.InputFormatOptions, &out.InputFormatOptions + *out = new(InputFormatOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.S3BucketSource != nil { + in, out := &in.S3BucketSource, &out.S3BucketSource + *out = new(S3BucketSourceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportTableObservation. +func (in *ImportTableObservation) DeepCopy() *ImportTableObservation { + if in == nil { + return nil + } + out := new(ImportTableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportTableParameters) DeepCopyInto(out *ImportTableParameters) { + *out = *in + if in.InputCompressionType != nil { + in, out := &in.InputCompressionType, &out.InputCompressionType + *out = new(string) + **out = **in + } + if in.InputFormat != nil { + in, out := &in.InputFormat, &out.InputFormat + *out = new(string) + **out = **in + } + if in.InputFormatOptions != nil { + in, out := &in.InputFormatOptions, &out.InputFormatOptions + *out = new(InputFormatOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.S3BucketSource != nil { + in, out := &in.S3BucketSource, &out.S3BucketSource + *out = new(S3BucketSourceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportTableParameters. +func (in *ImportTableParameters) DeepCopy() *ImportTableParameters { + if in == nil { + return nil + } + out := new(ImportTableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputFormatOptionsInitParameters) DeepCopyInto(out *InputFormatOptionsInitParameters) { + *out = *in + if in.Csv != nil { + in, out := &in.Csv, &out.Csv + *out = new(CsvInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputFormatOptionsInitParameters. +func (in *InputFormatOptionsInitParameters) DeepCopy() *InputFormatOptionsInitParameters { + if in == nil { + return nil + } + out := new(InputFormatOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputFormatOptionsObservation) DeepCopyInto(out *InputFormatOptionsObservation) { + *out = *in + if in.Csv != nil { + in, out := &in.Csv, &out.Csv + *out = new(CsvObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputFormatOptionsObservation. +func (in *InputFormatOptionsObservation) DeepCopy() *InputFormatOptionsObservation { + if in == nil { + return nil + } + out := new(InputFormatOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputFormatOptionsParameters) DeepCopyInto(out *InputFormatOptionsParameters) { + *out = *in + if in.Csv != nil { + in, out := &in.Csv, &out.Csv + *out = new(CsvParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputFormatOptionsParameters. +func (in *InputFormatOptionsParameters) DeepCopy() *InputFormatOptionsParameters { + if in == nil { + return nil + } + out := new(InputFormatOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalSecondaryIndexInitParameters) DeepCopyInto(out *LocalSecondaryIndexInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NonKeyAttributes != nil { + in, out := &in.NonKeyAttributes, &out.NonKeyAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ProjectionType != nil { + in, out := &in.ProjectionType, &out.ProjectionType + *out = new(string) + **out = **in + } + if in.RangeKey != nil { + in, out := &in.RangeKey, &out.RangeKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSecondaryIndexInitParameters. +func (in *LocalSecondaryIndexInitParameters) DeepCopy() *LocalSecondaryIndexInitParameters { + if in == nil { + return nil + } + out := new(LocalSecondaryIndexInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalSecondaryIndexObservation) DeepCopyInto(out *LocalSecondaryIndexObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NonKeyAttributes != nil { + in, out := &in.NonKeyAttributes, &out.NonKeyAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ProjectionType != nil { + in, out := &in.ProjectionType, &out.ProjectionType + *out = new(string) + **out = **in + } + if in.RangeKey != nil { + in, out := &in.RangeKey, &out.RangeKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSecondaryIndexObservation. +func (in *LocalSecondaryIndexObservation) DeepCopy() *LocalSecondaryIndexObservation { + if in == nil { + return nil + } + out := new(LocalSecondaryIndexObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalSecondaryIndexParameters) DeepCopyInto(out *LocalSecondaryIndexParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NonKeyAttributes != nil { + in, out := &in.NonKeyAttributes, &out.NonKeyAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ProjectionType != nil { + in, out := &in.ProjectionType, &out.ProjectionType + *out = new(string) + **out = **in + } + if in.RangeKey != nil { + in, out := &in.RangeKey, &out.RangeKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSecondaryIndexParameters. +func (in *LocalSecondaryIndexParameters) DeepCopy() *LocalSecondaryIndexParameters { + if in == nil { + return nil + } + out := new(LocalSecondaryIndexParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PointInTimeRecoveryInitParameters) DeepCopyInto(out *PointInTimeRecoveryInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PointInTimeRecoveryInitParameters. +func (in *PointInTimeRecoveryInitParameters) DeepCopy() *PointInTimeRecoveryInitParameters { + if in == nil { + return nil + } + out := new(PointInTimeRecoveryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PointInTimeRecoveryObservation) DeepCopyInto(out *PointInTimeRecoveryObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PointInTimeRecoveryObservation. +func (in *PointInTimeRecoveryObservation) DeepCopy() *PointInTimeRecoveryObservation { + if in == nil { + return nil + } + out := new(PointInTimeRecoveryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PointInTimeRecoveryParameters) DeepCopyInto(out *PointInTimeRecoveryParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PointInTimeRecoveryParameters. +func (in *PointInTimeRecoveryParameters) DeepCopy() *PointInTimeRecoveryParameters { + if in == nil { + return nil + } + out := new(PointInTimeRecoveryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaInitParameters) DeepCopyInto(out *ReplicaInitParameters) { + *out = *in + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.PointInTimeRecovery != nil { + in, out := &in.PointInTimeRecovery, &out.PointInTimeRecovery + *out = new(bool) + **out = **in + } + if in.PropagateTags != nil { + in, out := &in.PropagateTags, &out.PropagateTags + *out = new(bool) + **out = **in + } + if in.RegionName != nil { + in, out := &in.RegionName, &out.RegionName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaInitParameters. +func (in *ReplicaInitParameters) DeepCopy() *ReplicaInitParameters { + if in == nil { + return nil + } + out := new(ReplicaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaObservation) DeepCopyInto(out *ReplicaObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.PointInTimeRecovery != nil { + in, out := &in.PointInTimeRecovery, &out.PointInTimeRecovery + *out = new(bool) + **out = **in + } + if in.PropagateTags != nil { + in, out := &in.PropagateTags, &out.PropagateTags + *out = new(bool) + **out = **in + } + if in.RegionName != nil { + in, out := &in.RegionName, &out.RegionName + *out = new(string) + **out = **in + } + if in.StreamArn != nil { + in, out := &in.StreamArn, &out.StreamArn + *out = new(string) + **out = **in + } + if in.StreamLabel != nil { + in, out := &in.StreamLabel, &out.StreamLabel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaObservation. +func (in *ReplicaObservation) DeepCopy() *ReplicaObservation { + if in == nil { + return nil + } + out := new(ReplicaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaParameters) DeepCopyInto(out *ReplicaParameters) { + *out = *in + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.PointInTimeRecovery != nil { + in, out := &in.PointInTimeRecovery, &out.PointInTimeRecovery + *out = new(bool) + **out = **in + } + if in.PropagateTags != nil { + in, out := &in.PropagateTags, &out.PropagateTags + *out = new(bool) + **out = **in + } + if in.RegionName != nil { + in, out := &in.RegionName, &out.RegionName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaParameters. +func (in *ReplicaParameters) DeepCopy() *ReplicaParameters { + if in == nil { + return nil + } + out := new(ReplicaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3BucketSourceInitParameters) DeepCopyInto(out *S3BucketSourceInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketOwner != nil { + in, out := &in.BucketOwner, &out.BucketOwner + *out = new(string) + **out = **in + } + if in.KeyPrefix != nil { + in, out := &in.KeyPrefix, &out.KeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BucketSourceInitParameters. +func (in *S3BucketSourceInitParameters) DeepCopy() *S3BucketSourceInitParameters { + if in == nil { + return nil + } + out := new(S3BucketSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3BucketSourceObservation) DeepCopyInto(out *S3BucketSourceObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketOwner != nil { + in, out := &in.BucketOwner, &out.BucketOwner + *out = new(string) + **out = **in + } + if in.KeyPrefix != nil { + in, out := &in.KeyPrefix, &out.KeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BucketSourceObservation. +func (in *S3BucketSourceObservation) DeepCopy() *S3BucketSourceObservation { + if in == nil { + return nil + } + out := new(S3BucketSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3BucketSourceParameters) DeepCopyInto(out *S3BucketSourceParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketOwner != nil { + in, out := &in.BucketOwner, &out.BucketOwner + *out = new(string) + **out = **in + } + if in.KeyPrefix != nil { + in, out := &in.KeyPrefix, &out.KeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BucketSourceParameters. +func (in *S3BucketSourceParameters) DeepCopy() *S3BucketSourceParameters { + if in == nil { + return nil + } + out := new(S3BucketSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionInitParameters) DeepCopyInto(out *ServerSideEncryptionInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionInitParameters. +func (in *ServerSideEncryptionInitParameters) DeepCopy() *ServerSideEncryptionInitParameters { + if in == nil { + return nil + } + out := new(ServerSideEncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionObservation) DeepCopyInto(out *ServerSideEncryptionObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionObservation. +func (in *ServerSideEncryptionObservation) DeepCopy() *ServerSideEncryptionObservation { + if in == nil { + return nil + } + out := new(ServerSideEncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionParameters) DeepCopyInto(out *ServerSideEncryptionParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionParameters. +func (in *ServerSideEncryptionParameters) DeepCopy() *ServerSideEncryptionParameters { + if in == nil { + return nil + } + out := new(ServerSideEncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TTLInitParameters) DeepCopyInto(out *TTLInitParameters) { + *out = *in + if in.AttributeName != nil { + in, out := &in.AttributeName, &out.AttributeName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TTLInitParameters. +func (in *TTLInitParameters) DeepCopy() *TTLInitParameters { + if in == nil { + return nil + } + out := new(TTLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TTLObservation) DeepCopyInto(out *TTLObservation) { + *out = *in + if in.AttributeName != nil { + in, out := &in.AttributeName, &out.AttributeName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TTLObservation. +func (in *TTLObservation) DeepCopy() *TTLObservation { + if in == nil { + return nil + } + out := new(TTLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TTLParameters) DeepCopyInto(out *TTLParameters) { + *out = *in + if in.AttributeName != nil { + in, out := &in.AttributeName, &out.AttributeName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TTLParameters. +func (in *TTLParameters) DeepCopy() *TTLParameters { + if in == nil { + return nil + } + out := new(TTLParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Table) DeepCopyInto(out *Table) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. +func (in *Table) DeepCopy() *Table { + if in == nil { + return nil + } + out := new(Table) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Table) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableInitParameters) DeepCopyInto(out *TableInitParameters) { + *out = *in + if in.Attribute != nil { + in, out := &in.Attribute, &out.Attribute + *out = make([]AttributeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BillingMode != nil { + in, out := &in.BillingMode, &out.BillingMode + *out = new(string) + **out = **in + } + if in.DeletionProtectionEnabled != nil { + in, out := &in.DeletionProtectionEnabled, &out.DeletionProtectionEnabled + *out = new(bool) + **out = **in + } + if in.GlobalSecondaryIndex != nil { + in, out := &in.GlobalSecondaryIndex, &out.GlobalSecondaryIndex + *out = make([]GlobalSecondaryIndexInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HashKey != nil { + in, out := &in.HashKey, &out.HashKey + *out = new(string) + **out = **in + } + if in.ImportTable != nil { + in, out := &in.ImportTable, &out.ImportTable + *out = new(ImportTableInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalSecondaryIndex != nil { + in, out := &in.LocalSecondaryIndex, &out.LocalSecondaryIndex + *out = make([]LocalSecondaryIndexInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PointInTimeRecovery != nil { + in, out := &in.PointInTimeRecovery, &out.PointInTimeRecovery + *out = new(PointInTimeRecoveryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RangeKey != nil { + in, out := &in.RangeKey, &out.RangeKey + *out = new(string) + **out = **in + } + if in.ReadCapacity != nil { + in, out := &in.ReadCapacity, &out.ReadCapacity + *out = new(float64) + **out = **in + } + if in.Replica != nil { + in, out := &in.Replica, &out.Replica + *out = make([]ReplicaInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RestoreDateTime != nil { + in, out := &in.RestoreDateTime, &out.RestoreDateTime + *out = new(string) + **out = **in + } + if in.RestoreSourceName != nil { + in, out := &in.RestoreSourceName, &out.RestoreSourceName + *out = new(string) + **out = **in + } + if in.RestoreToLatestTime != nil { + in, out := &in.RestoreToLatestTime, &out.RestoreToLatestTime + *out = new(bool) + **out = **in + } + if in.ServerSideEncryption != nil { + in, out := &in.ServerSideEncryption, &out.ServerSideEncryption + *out = new(ServerSideEncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StreamEnabled != nil { + in, out := &in.StreamEnabled, &out.StreamEnabled + *out = new(bool) + **out = **in + } + if in.StreamViewType != nil { + in, out := &in.StreamViewType, &out.StreamViewType + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(TTLInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TableClass != nil { + in, out := &in.TableClass, &out.TableClass + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WriteCapacity != nil { + in, out := &in.WriteCapacity, &out.WriteCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableInitParameters. +func (in *TableInitParameters) DeepCopy() *TableInitParameters { + if in == nil { + return nil + } + out := new(TableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableList) DeepCopyInto(out *TableList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Table, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableList. +func (in *TableList) DeepCopy() *TableList { + if in == nil { + return nil + } + out := new(TableList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TableList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableObservation) DeepCopyInto(out *TableObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Attribute != nil { + in, out := &in.Attribute, &out.Attribute + *out = make([]AttributeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BillingMode != nil { + in, out := &in.BillingMode, &out.BillingMode + *out = new(string) + **out = **in + } + if in.DeletionProtectionEnabled != nil { + in, out := &in.DeletionProtectionEnabled, &out.DeletionProtectionEnabled + *out = new(bool) + **out = **in + } + if in.GlobalSecondaryIndex != nil { + in, out := &in.GlobalSecondaryIndex, &out.GlobalSecondaryIndex + *out = make([]GlobalSecondaryIndexObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HashKey != nil { + in, out := &in.HashKey, &out.HashKey + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ImportTable != nil { + in, out := &in.ImportTable, &out.ImportTable + *out = new(ImportTableObservation) + (*in).DeepCopyInto(*out) + } + if in.LocalSecondaryIndex != nil { + in, out := &in.LocalSecondaryIndex, &out.LocalSecondaryIndex + *out = make([]LocalSecondaryIndexObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PointInTimeRecovery != nil { + in, out := &in.PointInTimeRecovery, &out.PointInTimeRecovery + *out = new(PointInTimeRecoveryObservation) + (*in).DeepCopyInto(*out) + } + if in.RangeKey != nil { + in, out := &in.RangeKey, &out.RangeKey + *out = new(string) + **out = **in + } + if in.ReadCapacity != nil { + in, out := &in.ReadCapacity, &out.ReadCapacity + *out = new(float64) + **out = **in + } + if in.Replica != nil { + in, out := &in.Replica, &out.Replica + *out = make([]ReplicaObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RestoreDateTime != nil { + in, out := &in.RestoreDateTime, &out.RestoreDateTime + *out = new(string) + **out = **in + } + if in.RestoreSourceName != nil { + in, out := &in.RestoreSourceName, &out.RestoreSourceName + *out = new(string) + **out = **in + } + if in.RestoreToLatestTime != nil { + in, out := &in.RestoreToLatestTime, &out.RestoreToLatestTime + *out = new(bool) + **out = **in + } + if in.ServerSideEncryption != nil { + in, out := &in.ServerSideEncryption, &out.ServerSideEncryption + *out = new(ServerSideEncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.StreamArn != nil { + in, out := &in.StreamArn, &out.StreamArn + *out = new(string) + **out = **in + } + if in.StreamEnabled != nil { + in, out := &in.StreamEnabled, &out.StreamEnabled + *out = new(bool) + **out = **in + } + if in.StreamLabel != nil { + in, out := &in.StreamLabel, &out.StreamLabel + *out = new(string) + **out = **in + } + if in.StreamViewType != nil { + in, out := &in.StreamViewType, &out.StreamViewType + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(TTLObservation) + (*in).DeepCopyInto(*out) + } + if in.TableClass != nil { + in, out := &in.TableClass, &out.TableClass + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WriteCapacity != nil { + in, out := &in.WriteCapacity, &out.WriteCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableObservation. +func (in *TableObservation) DeepCopy() *TableObservation { + if in == nil { + return nil + } + out := new(TableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableParameters) DeepCopyInto(out *TableParameters) { + *out = *in + if in.Attribute != nil { + in, out := &in.Attribute, &out.Attribute + *out = make([]AttributeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BillingMode != nil { + in, out := &in.BillingMode, &out.BillingMode + *out = new(string) + **out = **in + } + if in.DeletionProtectionEnabled != nil { + in, out := &in.DeletionProtectionEnabled, &out.DeletionProtectionEnabled + *out = new(bool) + **out = **in + } + if in.GlobalSecondaryIndex != nil { + in, out := &in.GlobalSecondaryIndex, &out.GlobalSecondaryIndex + *out = make([]GlobalSecondaryIndexParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HashKey != nil { + in, out := &in.HashKey, &out.HashKey + *out = new(string) + **out = **in + } + if in.ImportTable != nil { + in, out := &in.ImportTable, &out.ImportTable + *out = new(ImportTableParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalSecondaryIndex != nil { + in, out := &in.LocalSecondaryIndex, &out.LocalSecondaryIndex + *out = make([]LocalSecondaryIndexParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PointInTimeRecovery != nil { + in, out := &in.PointInTimeRecovery, &out.PointInTimeRecovery + *out = new(PointInTimeRecoveryParameters) + (*in).DeepCopyInto(*out) + } + if in.RangeKey != nil { + in, out := &in.RangeKey, &out.RangeKey + *out = new(string) + **out = **in + } + if in.ReadCapacity != nil { + in, out := &in.ReadCapacity, &out.ReadCapacity + *out = new(float64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Replica != nil { + in, out := &in.Replica, &out.Replica + *out = make([]ReplicaParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RestoreDateTime != nil { + in, out := &in.RestoreDateTime, &out.RestoreDateTime + *out = new(string) + **out = **in + } + if in.RestoreSourceName != nil { + in, out := &in.RestoreSourceName, &out.RestoreSourceName + *out = new(string) + **out = **in + } + if in.RestoreToLatestTime != nil { + in, out := &in.RestoreToLatestTime, &out.RestoreToLatestTime + *out = new(bool) + **out = **in + } + if in.ServerSideEncryption != nil { + in, out := &in.ServerSideEncryption, &out.ServerSideEncryption + *out = new(ServerSideEncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.StreamEnabled != nil { + in, out := &in.StreamEnabled, &out.StreamEnabled + *out = new(bool) + **out = **in + } + if in.StreamViewType != nil { + in, out := &in.StreamViewType, &out.StreamViewType + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(TTLParameters) + (*in).DeepCopyInto(*out) + } + if in.TableClass != nil { + in, out := &in.TableClass, &out.TableClass + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WriteCapacity != nil { + in, out := &in.WriteCapacity, &out.WriteCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableParameters. +func (in *TableParameters) DeepCopy() *TableParameters { + if in == nil { + return nil + } + out := new(TableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableSpec) DeepCopyInto(out *TableSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableSpec. +func (in *TableSpec) DeepCopy() *TableSpec { + if in == nil { + return nil + } + out := new(TableSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableStatus) DeepCopyInto(out *TableStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableStatus. +func (in *TableStatus) DeepCopy() *TableStatus { + if in == nil { + return nil + } + out := new(TableStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/dynamodb/v1beta2/zz_generated.managed.go b/apis/dynamodb/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..25acdb1a58 --- /dev/null +++ b/apis/dynamodb/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Table. +func (mg *Table) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Table. +func (mg *Table) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Table. +func (mg *Table) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Table. +func (mg *Table) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Table. +func (mg *Table) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Table. +func (mg *Table) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Table. +func (mg *Table) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Table. +func (mg *Table) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Table. +func (mg *Table) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Table. +func (mg *Table) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Table. +func (mg *Table) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Table. +func (mg *Table) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/dynamodb/v1beta2/zz_generated.managedlist.go b/apis/dynamodb/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..bfa82abb5e --- /dev/null +++ b/apis/dynamodb/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this TableList. +func (l *TableList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/dynamodb/v1beta2/zz_groupversion_info.go b/apis/dynamodb/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..6c1293b3d1 --- /dev/null +++ b/apis/dynamodb/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=dynamodb.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "dynamodb.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/dynamodb/v1beta2/zz_table_terraformed.go b/apis/dynamodb/v1beta2/zz_table_terraformed.go new file mode 100755 index 0000000000..ff10860550 --- /dev/null +++ b/apis/dynamodb/v1beta2/zz_table_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Table +func (mg *Table) GetTerraformResourceType() string { + return "aws_dynamodb_table" +} + +// GetConnectionDetailsMapping for this Table +func (tr *Table) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Table +func (tr *Table) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Table +func (tr *Table) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Table +func (tr *Table) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Table +func (tr *Table) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Table +func (tr *Table) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Table +func (tr *Table) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Table +func (tr *Table) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Table using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Table) LateInitialize(attrs []byte) (bool, error) { + params := &TableParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Table) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/dynamodb/v1beta2/zz_table_types.go b/apis/dynamodb/v1beta2/zz_table_types.go new file mode 100755 index 0000000000..c08b734c7b --- /dev/null +++ b/apis/dynamodb/v1beta2/zz_table_types.go @@ -0,0 +1,772 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AttributeInitParameters struct { + + // Name of the attribute + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Attribute type. Valid values are S (string), N (number), B (binary). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type AttributeObservation struct { + + // Name of the attribute + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Attribute type. Valid values are S (string), N (number), B (binary). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type AttributeParameters struct { + + // Name of the attribute + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Attribute type. Valid values are S (string), N (number), B (binary). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type CsvInitParameters struct { + + // The delimiter used for separating items in the CSV file being imported. + Delimiter *string `json:"delimiter,omitempty" tf:"delimiter,omitempty"` + + // List of the headers used to specify a common header for all source CSV files being imported. + // +listType=set + HeaderList []*string `json:"headerList,omitempty" tf:"header_list,omitempty"` +} + +type CsvObservation struct { + + // The delimiter used for separating items in the CSV file being imported. + Delimiter *string `json:"delimiter,omitempty" tf:"delimiter,omitempty"` + + // List of the headers used to specify a common header for all source CSV files being imported. + // +listType=set + HeaderList []*string `json:"headerList,omitempty" tf:"header_list,omitempty"` +} + +type CsvParameters struct { + + // The delimiter used for separating items in the CSV file being imported. + // +kubebuilder:validation:Optional + Delimiter *string `json:"delimiter,omitempty" tf:"delimiter,omitempty"` + + // List of the headers used to specify a common header for all source CSV files being imported. + // +kubebuilder:validation:Optional + // +listType=set + HeaderList []*string `json:"headerList,omitempty" tf:"header_list,omitempty"` +} + +type GlobalSecondaryIndexInitParameters struct { + + // Name of the hash key in the index; must be defined as an attribute in the resource. + HashKey *string `json:"hashKey,omitempty" tf:"hash_key,omitempty"` + + // Name of the index. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Only required with INCLUDE as a projection type; a list of attributes to project into the index. These do not need to be defined as attributes on the table. + // +listType=set + NonKeyAttributes []*string `json:"nonKeyAttributes,omitempty" tf:"non_key_attributes,omitempty"` + + // One of ALL, INCLUDE or KEYS_ONLY where ALL projects every attribute into the index, KEYS_ONLY projects into the index only the table and index hash_key and sort_key attributes , INCLUDE projects into the index all of the attributes that are defined in non_key_attributes in addition to the attributes that thatKEYS_ONLY project. + ProjectionType *string `json:"projectionType,omitempty" tf:"projection_type,omitempty"` + + // Name of the range key; must be defined + RangeKey *string `json:"rangeKey,omitempty" tf:"range_key,omitempty"` + + // Number of read units for this index. Must be set if billing_mode is set to PROVISIONED. + ReadCapacity *float64 `json:"readCapacity,omitempty" tf:"read_capacity,omitempty"` + + // Number of write units for this index. Must be set if billing_mode is set to PROVISIONED. + WriteCapacity *float64 `json:"writeCapacity,omitempty" tf:"write_capacity,omitempty"` +} + +type GlobalSecondaryIndexObservation struct { + + // Name of the hash key in the index; must be defined as an attribute in the resource. + HashKey *string `json:"hashKey,omitempty" tf:"hash_key,omitempty"` + + // Name of the index. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Only required with INCLUDE as a projection type; a list of attributes to project into the index. These do not need to be defined as attributes on the table. + // +listType=set + NonKeyAttributes []*string `json:"nonKeyAttributes,omitempty" tf:"non_key_attributes,omitempty"` + + // One of ALL, INCLUDE or KEYS_ONLY where ALL projects every attribute into the index, KEYS_ONLY projects into the index only the table and index hash_key and sort_key attributes , INCLUDE projects into the index all of the attributes that are defined in non_key_attributes in addition to the attributes that thatKEYS_ONLY project. + ProjectionType *string `json:"projectionType,omitempty" tf:"projection_type,omitempty"` + + // Name of the range key; must be defined + RangeKey *string `json:"rangeKey,omitempty" tf:"range_key,omitempty"` + + // Number of read units for this index. Must be set if billing_mode is set to PROVISIONED. + ReadCapacity *float64 `json:"readCapacity,omitempty" tf:"read_capacity,omitempty"` + + // Number of write units for this index. Must be set if billing_mode is set to PROVISIONED. + WriteCapacity *float64 `json:"writeCapacity,omitempty" tf:"write_capacity,omitempty"` +} + +type GlobalSecondaryIndexParameters struct { + + // Name of the hash key in the index; must be defined as an attribute in the resource. + // +kubebuilder:validation:Optional + HashKey *string `json:"hashKey" tf:"hash_key,omitempty"` + + // Name of the index. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Only required with INCLUDE as a projection type; a list of attributes to project into the index. These do not need to be defined as attributes on the table. + // +kubebuilder:validation:Optional + // +listType=set + NonKeyAttributes []*string `json:"nonKeyAttributes,omitempty" tf:"non_key_attributes,omitempty"` + + // One of ALL, INCLUDE or KEYS_ONLY where ALL projects every attribute into the index, KEYS_ONLY projects into the index only the table and index hash_key and sort_key attributes , INCLUDE projects into the index all of the attributes that are defined in non_key_attributes in addition to the attributes that thatKEYS_ONLY project. + // +kubebuilder:validation:Optional + ProjectionType *string `json:"projectionType" tf:"projection_type,omitempty"` + + // Name of the range key; must be defined + // +kubebuilder:validation:Optional + RangeKey *string `json:"rangeKey,omitempty" tf:"range_key,omitempty"` + + // Number of read units for this index. Must be set if billing_mode is set to PROVISIONED. + // +kubebuilder:validation:Optional + ReadCapacity *float64 `json:"readCapacity,omitempty" tf:"read_capacity,omitempty"` + + // Number of write units for this index. Must be set if billing_mode is set to PROVISIONED. + // +kubebuilder:validation:Optional + WriteCapacity *float64 `json:"writeCapacity,omitempty" tf:"write_capacity,omitempty"` +} + +type ImportTableInitParameters struct { + + // Type of compression to be used on the input coming from the imported table. + // Valid values are GZIP, ZSTD and NONE. + InputCompressionType *string `json:"inputCompressionType,omitempty" tf:"input_compression_type,omitempty"` + + // The format of the source data. + // Valid values are CSV, DYNAMODB_JSON, and ION. + InputFormat *string `json:"inputFormat,omitempty" tf:"input_format,omitempty"` + + // Describe the format options for the data that was imported into the target table. + // There is one value, csv. + // See below. + InputFormatOptions *InputFormatOptionsInitParameters `json:"inputFormatOptions,omitempty" tf:"input_format_options,omitempty"` + + // Values for the S3 bucket the source file is imported from. + // See below. + S3BucketSource *S3BucketSourceInitParameters `json:"s3BucketSource,omitempty" tf:"s3_bucket_source,omitempty"` +} + +type ImportTableObservation struct { + + // Type of compression to be used on the input coming from the imported table. + // Valid values are GZIP, ZSTD and NONE. + InputCompressionType *string `json:"inputCompressionType,omitempty" tf:"input_compression_type,omitempty"` + + // The format of the source data. + // Valid values are CSV, DYNAMODB_JSON, and ION. + InputFormat *string `json:"inputFormat,omitempty" tf:"input_format,omitempty"` + + // Describe the format options for the data that was imported into the target table. + // There is one value, csv. + // See below. + InputFormatOptions *InputFormatOptionsObservation `json:"inputFormatOptions,omitempty" tf:"input_format_options,omitempty"` + + // Values for the S3 bucket the source file is imported from. + // See below. + S3BucketSource *S3BucketSourceObservation `json:"s3BucketSource,omitempty" tf:"s3_bucket_source,omitempty"` +} + +type ImportTableParameters struct { + + // Type of compression to be used on the input coming from the imported table. + // Valid values are GZIP, ZSTD and NONE. + // +kubebuilder:validation:Optional + InputCompressionType *string `json:"inputCompressionType,omitempty" tf:"input_compression_type,omitempty"` + + // The format of the source data. + // Valid values are CSV, DYNAMODB_JSON, and ION. + // +kubebuilder:validation:Optional + InputFormat *string `json:"inputFormat" tf:"input_format,omitempty"` + + // Describe the format options for the data that was imported into the target table. + // There is one value, csv. + // See below. + // +kubebuilder:validation:Optional + InputFormatOptions *InputFormatOptionsParameters `json:"inputFormatOptions,omitempty" tf:"input_format_options,omitempty"` + + // Values for the S3 bucket the source file is imported from. + // See below. + // +kubebuilder:validation:Optional + S3BucketSource *S3BucketSourceParameters `json:"s3BucketSource" tf:"s3_bucket_source,omitempty"` +} + +type InputFormatOptionsInitParameters struct { + + // This block contains the processing options for the CSV file being imported: + Csv *CsvInitParameters `json:"csv,omitempty" tf:"csv,omitempty"` +} + +type InputFormatOptionsObservation struct { + + // This block contains the processing options for the CSV file being imported: + Csv *CsvObservation `json:"csv,omitempty" tf:"csv,omitempty"` +} + +type InputFormatOptionsParameters struct { + + // This block contains the processing options for the CSV file being imported: + // +kubebuilder:validation:Optional + Csv *CsvParameters `json:"csv,omitempty" tf:"csv,omitempty"` +} + +type LocalSecondaryIndexInitParameters struct { + + // Name of the index + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Only required with INCLUDE as a projection type; a list of attributes to project into the index. These do not need to be defined as attributes on the table. + NonKeyAttributes []*string `json:"nonKeyAttributes,omitempty" tf:"non_key_attributes,omitempty"` + + // One of ALL, INCLUDE or KEYS_ONLY where ALL projects every attribute into the index, KEYS_ONLY projects into the index only the table and index hash_key and sort_key attributes , INCLUDE projects into the index all of the attributes that are defined in non_key_attributes in addition to the attributes that thatKEYS_ONLY project. + ProjectionType *string `json:"projectionType,omitempty" tf:"projection_type,omitempty"` + + // Name of the range key. + RangeKey *string `json:"rangeKey,omitempty" tf:"range_key,omitempty"` +} + +type LocalSecondaryIndexObservation struct { + + // Name of the index + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Only required with INCLUDE as a projection type; a list of attributes to project into the index. These do not need to be defined as attributes on the table. + NonKeyAttributes []*string `json:"nonKeyAttributes,omitempty" tf:"non_key_attributes,omitempty"` + + // One of ALL, INCLUDE or KEYS_ONLY where ALL projects every attribute into the index, KEYS_ONLY projects into the index only the table and index hash_key and sort_key attributes , INCLUDE projects into the index all of the attributes that are defined in non_key_attributes in addition to the attributes that thatKEYS_ONLY project. + ProjectionType *string `json:"projectionType,omitempty" tf:"projection_type,omitempty"` + + // Name of the range key. + RangeKey *string `json:"rangeKey,omitempty" tf:"range_key,omitempty"` +} + +type LocalSecondaryIndexParameters struct { + + // Name of the index + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Only required with INCLUDE as a projection type; a list of attributes to project into the index. These do not need to be defined as attributes on the table. + // +kubebuilder:validation:Optional + NonKeyAttributes []*string `json:"nonKeyAttributes,omitempty" tf:"non_key_attributes,omitempty"` + + // One of ALL, INCLUDE or KEYS_ONLY where ALL projects every attribute into the index, KEYS_ONLY projects into the index only the table and index hash_key and sort_key attributes , INCLUDE projects into the index all of the attributes that are defined in non_key_attributes in addition to the attributes that thatKEYS_ONLY project. + // +kubebuilder:validation:Optional + ProjectionType *string `json:"projectionType" tf:"projection_type,omitempty"` + + // Name of the range key. + // +kubebuilder:validation:Optional + RangeKey *string `json:"rangeKey" tf:"range_key,omitempty"` +} + +type PointInTimeRecoveryInitParameters struct { + + // Whether to enable point-in-time recovery. It can take 10 minutes to enable for new tables. If the point_in_time_recovery block is not provided, this defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type PointInTimeRecoveryObservation struct { + + // Whether to enable point-in-time recovery. It can take 10 minutes to enable for new tables. If the point_in_time_recovery block is not provided, this defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type PointInTimeRecoveryParameters struct { + + // Whether to enable point-in-time recovery. It can take 10 minutes to enable for new tables. If the point_in_time_recovery block is not provided, this defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + +type ReplicaInitParameters struct { + + // ARN of the CMK that should be used for the AWS KMS encryption. This argument should only be used if the key is different from the default KMS-managed DynamoDB key, alias/aws/dynamodb. Note: This attribute will not be populated with the ARN of default keys. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Whether to enable Point In Time Recovery for the replica. Default is false. + PointInTimeRecovery *bool `json:"pointInTimeRecovery,omitempty" tf:"point_in_time_recovery,omitempty"` + + // Whether to propagate the global table's tags to a replica. Default is false. Changes to tags only move in one direction: from global (source) to replica. In other words, tag drift on a replica will not trigger an update. Tag or replica changes on the global table, whether from drift or configuration changes, are propagated to replicas. Changing from true to false on a subsequent apply means replica tags are left as they were, unmanaged, not deleted. + PropagateTags *bool `json:"propagateTags,omitempty" tf:"propagate_tags,omitempty"` + + // Region name of the replica. + RegionName *string `json:"regionName,omitempty" tf:"region_name,omitempty"` +} + +type ReplicaObservation struct { + + // ARN of the replica + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // ARN of the CMK that should be used for the AWS KMS encryption. This argument should only be used if the key is different from the default KMS-managed DynamoDB key, alias/aws/dynamodb. Note: This attribute will not be populated with the ARN of default keys. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Whether to enable Point In Time Recovery for the replica. Default is false. + PointInTimeRecovery *bool `json:"pointInTimeRecovery,omitempty" tf:"point_in_time_recovery,omitempty"` + + // Whether to propagate the global table's tags to a replica. Default is false. Changes to tags only move in one direction: from global (source) to replica. In other words, tag drift on a replica will not trigger an update. Tag or replica changes on the global table, whether from drift or configuration changes, are propagated to replicas. Changing from true to false on a subsequent apply means replica tags are left as they were, unmanaged, not deleted. + PropagateTags *bool `json:"propagateTags,omitempty" tf:"propagate_tags,omitempty"` + + // Region name of the replica. + RegionName *string `json:"regionName,omitempty" tf:"region_name,omitempty"` + + // ARN of the Table Stream. Only available when stream_enabled = true + StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` + + // Timestamp, in ISO 8601 format, for this stream. Note that this timestamp is not a unique identifier for the stream on its own. However, the combination of AWS customer ID, table name and this field is guaranteed to be unique. It can be used for creating CloudWatch Alarms. Only available when stream_enabled = true. + StreamLabel *string `json:"streamLabel,omitempty" tf:"stream_label,omitempty"` +} + +type ReplicaParameters struct { + + // ARN of the CMK that should be used for the AWS KMS encryption. This argument should only be used if the key is different from the default KMS-managed DynamoDB key, alias/aws/dynamodb. Note: This attribute will not be populated with the ARN of default keys. + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Whether to enable Point In Time Recovery for the replica. Default is false. + // +kubebuilder:validation:Optional + PointInTimeRecovery *bool `json:"pointInTimeRecovery,omitempty" tf:"point_in_time_recovery,omitempty"` + + // Whether to propagate the global table's tags to a replica. Default is false. Changes to tags only move in one direction: from global (source) to replica. In other words, tag drift on a replica will not trigger an update. Tag or replica changes on the global table, whether from drift or configuration changes, are propagated to replicas. Changing from true to false on a subsequent apply means replica tags are left as they were, unmanaged, not deleted. + // +kubebuilder:validation:Optional + PropagateTags *bool `json:"propagateTags,omitempty" tf:"propagate_tags,omitempty"` + + // Region name of the replica. + // +kubebuilder:validation:Optional + RegionName *string `json:"regionName" tf:"region_name,omitempty"` +} + +type S3BucketSourceInitParameters struct { + + // The S3 bucket that is being imported from. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The account number of the S3 bucket that is being imported from. + BucketOwner *string `json:"bucketOwner,omitempty" tf:"bucket_owner,omitempty"` + + // The key prefix shared by all S3 Objects that are being imported. + KeyPrefix *string `json:"keyPrefix,omitempty" tf:"key_prefix,omitempty"` +} + +type S3BucketSourceObservation struct { + + // The S3 bucket that is being imported from. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The account number of the S3 bucket that is being imported from. + BucketOwner *string `json:"bucketOwner,omitempty" tf:"bucket_owner,omitempty"` + + // The key prefix shared by all S3 Objects that are being imported. + KeyPrefix *string `json:"keyPrefix,omitempty" tf:"key_prefix,omitempty"` +} + +type S3BucketSourceParameters struct { + + // The S3 bucket that is being imported from. + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket" tf:"bucket,omitempty"` + + // The account number of the S3 bucket that is being imported from. + // +kubebuilder:validation:Optional + BucketOwner *string `json:"bucketOwner,omitempty" tf:"bucket_owner,omitempty"` + + // The key prefix shared by all S3 Objects that are being imported. + // +kubebuilder:validation:Optional + KeyPrefix *string `json:"keyPrefix,omitempty" tf:"key_prefix,omitempty"` +} + +type ServerSideEncryptionInitParameters struct { + + // Whether or not to enable encryption at rest using an AWS managed KMS customer master key (CMK). If enabled is false then server-side encryption is set to AWS-owned key (shown as DEFAULT in the AWS console). Potentially confusingly, if enabled is true and no kms_key_arn is specified then server-side encryption is set to the default KMS-managed key (shown as KMS in the AWS console). The AWS KMS documentation explains the difference between AWS-owned and KMS-managed keys. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // ARN of the CMK that should be used for the AWS KMS encryption. This argument should only be used if the key is different from the default KMS-managed DynamoDB key, alias/aws/dynamodb. Note: This attribute will not be populated with the ARN of default keys. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` +} + +type ServerSideEncryptionObservation struct { + + // Whether or not to enable encryption at rest using an AWS managed KMS customer master key (CMK). If enabled is false then server-side encryption is set to AWS-owned key (shown as DEFAULT in the AWS console). Potentially confusingly, if enabled is true and no kms_key_arn is specified then server-side encryption is set to the default KMS-managed key (shown as KMS in the AWS console). The AWS KMS documentation explains the difference between AWS-owned and KMS-managed keys. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // ARN of the CMK that should be used for the AWS KMS encryption. This argument should only be used if the key is different from the default KMS-managed DynamoDB key, alias/aws/dynamodb. Note: This attribute will not be populated with the ARN of default keys. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` +} + +type ServerSideEncryptionParameters struct { + + // Whether or not to enable encryption at rest using an AWS managed KMS customer master key (CMK). If enabled is false then server-side encryption is set to AWS-owned key (shown as DEFAULT in the AWS console). Potentially confusingly, if enabled is true and no kms_key_arn is specified then server-side encryption is set to the default KMS-managed key (shown as KMS in the AWS console). The AWS KMS documentation explains the difference between AWS-owned and KMS-managed keys. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // ARN of the CMK that should be used for the AWS KMS encryption. This argument should only be used if the key is different from the default KMS-managed DynamoDB key, alias/aws/dynamodb. Note: This attribute will not be populated with the ARN of default keys. + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` +} + +type TTLInitParameters struct { + + // Name of the table attribute to store the TTL timestamp in. + AttributeName *string `json:"attributeName,omitempty" tf:"attribute_name,omitempty"` + + // Whether TTL is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type TTLObservation struct { + + // Name of the table attribute to store the TTL timestamp in. + AttributeName *string `json:"attributeName,omitempty" tf:"attribute_name,omitempty"` + + // Whether TTL is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type TTLParameters struct { + + // Name of the table attribute to store the TTL timestamp in. + // +kubebuilder:validation:Optional + AttributeName *string `json:"attributeName" tf:"attribute_name,omitempty"` + + // Whether TTL is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type TableInitParameters struct { + + // Set of nested attribute definitions. Only required for hash_key and range_key attributes. See below. + Attribute []AttributeInitParameters `json:"attribute,omitempty" tf:"attribute,omitempty"` + + // Controls how you are charged for read and write throughput and how you manage capacity. The valid values are PROVISIONED and PAY_PER_REQUEST. Defaults to PROVISIONED. + BillingMode *string `json:"billingMode,omitempty" tf:"billing_mode,omitempty"` + + // Enables deletion protection for table. Defaults to false. + DeletionProtectionEnabled *bool `json:"deletionProtectionEnabled,omitempty" tf:"deletion_protection_enabled,omitempty"` + + // Describe a GSI for the table; subject to the normal limits on the number of GSIs, projected attributes, etc. See below. + GlobalSecondaryIndex []GlobalSecondaryIndexInitParameters `json:"globalSecondaryIndex,omitempty" tf:"global_secondary_index,omitempty"` + + // Attribute to use as the hash (partition) key. Must also be defined as an attribute. See below. + HashKey *string `json:"hashKey,omitempty" tf:"hash_key,omitempty"` + + // Import Amazon S3 data into a new table. See below. + ImportTable *ImportTableInitParameters `json:"importTable,omitempty" tf:"import_table,omitempty"` + + // Describe an LSI on the table; these can only be allocated at creation so you cannot change this definition after you have created the resource. See below. + LocalSecondaryIndex []LocalSecondaryIndexInitParameters `json:"localSecondaryIndex,omitempty" tf:"local_secondary_index,omitempty"` + + // Enable point-in-time recovery options. See below. + PointInTimeRecovery *PointInTimeRecoveryInitParameters `json:"pointInTimeRecovery,omitempty" tf:"point_in_time_recovery,omitempty"` + + // Attribute to use as the range (sort) key. Must also be defined as an attribute, see below. + RangeKey *string `json:"rangeKey,omitempty" tf:"range_key,omitempty"` + + // Number of read units for this table. If the billing_mode is PROVISIONED, this field is required. + ReadCapacity *float64 `json:"readCapacity,omitempty" tf:"read_capacity,omitempty"` + + // Configuration block(s) with DynamoDB Global Tables V2 (version 2019.11.21) replication configurations. See below. + Replica []ReplicaInitParameters `json:"replica,omitempty" tf:"replica,omitempty"` + + // Time of the point-in-time recovery point to restore. + RestoreDateTime *string `json:"restoreDateTime,omitempty" tf:"restore_date_time,omitempty"` + + // Name of the table to restore. Must match the name of an existing table. + RestoreSourceName *string `json:"restoreSourceName,omitempty" tf:"restore_source_name,omitempty"` + + // If set, restores table to the most recent point-in-time recovery point. + RestoreToLatestTime *bool `json:"restoreToLatestTime,omitempty" tf:"restore_to_latest_time,omitempty"` + + // Encryption at rest options. AWS DynamoDB tables are automatically encrypted at rest with an AWS-owned Customer Master Key if this argument isn't specified. See below. + ServerSideEncryption *ServerSideEncryptionInitParameters `json:"serverSideEncryption,omitempty" tf:"server_side_encryption,omitempty"` + + // Whether Streams are enabled. + StreamEnabled *bool `json:"streamEnabled,omitempty" tf:"stream_enabled,omitempty"` + + // When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values are KEYS_ONLY, NEW_IMAGE, OLD_IMAGE, NEW_AND_OLD_IMAGES. + StreamViewType *string `json:"streamViewType,omitempty" tf:"stream_view_type,omitempty"` + + // Configuration block for TTL. See below. + TTL *TTLInitParameters `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // Storage class of the table. + // Valid values are STANDARD and STANDARD_INFREQUENT_ACCESS. + // Default value is STANDARD. + TableClass *string `json:"tableClass,omitempty" tf:"table_class,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Number of write units for this table. If the billing_mode is PROVISIONED, this field is required. + WriteCapacity *float64 `json:"writeCapacity,omitempty" tf:"write_capacity,omitempty"` +} + +type TableObservation struct { + + // ARN of the table + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Set of nested attribute definitions. Only required for hash_key and range_key attributes. See below. + Attribute []AttributeObservation `json:"attribute,omitempty" tf:"attribute,omitempty"` + + // Controls how you are charged for read and write throughput and how you manage capacity. The valid values are PROVISIONED and PAY_PER_REQUEST. Defaults to PROVISIONED. + BillingMode *string `json:"billingMode,omitempty" tf:"billing_mode,omitempty"` + + // Enables deletion protection for table. Defaults to false. + DeletionProtectionEnabled *bool `json:"deletionProtectionEnabled,omitempty" tf:"deletion_protection_enabled,omitempty"` + + // Describe a GSI for the table; subject to the normal limits on the number of GSIs, projected attributes, etc. See below. + GlobalSecondaryIndex []GlobalSecondaryIndexObservation `json:"globalSecondaryIndex,omitempty" tf:"global_secondary_index,omitempty"` + + // Attribute to use as the hash (partition) key. Must also be defined as an attribute. See below. + HashKey *string `json:"hashKey,omitempty" tf:"hash_key,omitempty"` + + // Name of the table + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Import Amazon S3 data into a new table. See below. + ImportTable *ImportTableObservation `json:"importTable,omitempty" tf:"import_table,omitempty"` + + // Describe an LSI on the table; these can only be allocated at creation so you cannot change this definition after you have created the resource. See below. + LocalSecondaryIndex []LocalSecondaryIndexObservation `json:"localSecondaryIndex,omitempty" tf:"local_secondary_index,omitempty"` + + // Enable point-in-time recovery options. See below. + PointInTimeRecovery *PointInTimeRecoveryObservation `json:"pointInTimeRecovery,omitempty" tf:"point_in_time_recovery,omitempty"` + + // Attribute to use as the range (sort) key. Must also be defined as an attribute, see below. + RangeKey *string `json:"rangeKey,omitempty" tf:"range_key,omitempty"` + + // Number of read units for this table. If the billing_mode is PROVISIONED, this field is required. + ReadCapacity *float64 `json:"readCapacity,omitempty" tf:"read_capacity,omitempty"` + + // Configuration block(s) with DynamoDB Global Tables V2 (version 2019.11.21) replication configurations. See below. + Replica []ReplicaObservation `json:"replica,omitempty" tf:"replica,omitempty"` + + // Time of the point-in-time recovery point to restore. + RestoreDateTime *string `json:"restoreDateTime,omitempty" tf:"restore_date_time,omitempty"` + + // Name of the table to restore. Must match the name of an existing table. + RestoreSourceName *string `json:"restoreSourceName,omitempty" tf:"restore_source_name,omitempty"` + + // If set, restores table to the most recent point-in-time recovery point. + RestoreToLatestTime *bool `json:"restoreToLatestTime,omitempty" tf:"restore_to_latest_time,omitempty"` + + // Encryption at rest options. AWS DynamoDB tables are automatically encrypted at rest with an AWS-owned Customer Master Key if this argument isn't specified. See below. + ServerSideEncryption *ServerSideEncryptionObservation `json:"serverSideEncryption,omitempty" tf:"server_side_encryption,omitempty"` + + // ARN of the Table Stream. Only available when stream_enabled = true + StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` + + // Whether Streams are enabled. + StreamEnabled *bool `json:"streamEnabled,omitempty" tf:"stream_enabled,omitempty"` + + // Timestamp, in ISO 8601 format, for this stream. Note that this timestamp is not a unique identifier for the stream on its own. However, the combination of AWS customer ID, table name and this field is guaranteed to be unique. It can be used for creating CloudWatch Alarms. Only available when stream_enabled = true. + StreamLabel *string `json:"streamLabel,omitempty" tf:"stream_label,omitempty"` + + // When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values are KEYS_ONLY, NEW_IMAGE, OLD_IMAGE, NEW_AND_OLD_IMAGES. + StreamViewType *string `json:"streamViewType,omitempty" tf:"stream_view_type,omitempty"` + + // Configuration block for TTL. See below. + TTL *TTLObservation `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // Storage class of the table. + // Valid values are STANDARD and STANDARD_INFREQUENT_ACCESS. + // Default value is STANDARD. + TableClass *string `json:"tableClass,omitempty" tf:"table_class,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Number of write units for this table. If the billing_mode is PROVISIONED, this field is required. + WriteCapacity *float64 `json:"writeCapacity,omitempty" tf:"write_capacity,omitempty"` +} + +type TableParameters struct { + + // Set of nested attribute definitions. Only required for hash_key and range_key attributes. See below. + // +kubebuilder:validation:Optional + Attribute []AttributeParameters `json:"attribute,omitempty" tf:"attribute,omitempty"` + + // Controls how you are charged for read and write throughput and how you manage capacity. The valid values are PROVISIONED and PAY_PER_REQUEST. Defaults to PROVISIONED. + // +kubebuilder:validation:Optional + BillingMode *string `json:"billingMode,omitempty" tf:"billing_mode,omitempty"` + + // Enables deletion protection for table. Defaults to false. + // +kubebuilder:validation:Optional + DeletionProtectionEnabled *bool `json:"deletionProtectionEnabled,omitempty" tf:"deletion_protection_enabled,omitempty"` + + // Describe a GSI for the table; subject to the normal limits on the number of GSIs, projected attributes, etc. See below. + // +kubebuilder:validation:Optional + GlobalSecondaryIndex []GlobalSecondaryIndexParameters `json:"globalSecondaryIndex,omitempty" tf:"global_secondary_index,omitempty"` + + // Attribute to use as the hash (partition) key. Must also be defined as an attribute. See below. + // +kubebuilder:validation:Optional + HashKey *string `json:"hashKey,omitempty" tf:"hash_key,omitempty"` + + // Import Amazon S3 data into a new table. See below. + // +kubebuilder:validation:Optional + ImportTable *ImportTableParameters `json:"importTable,omitempty" tf:"import_table,omitempty"` + + // Describe an LSI on the table; these can only be allocated at creation so you cannot change this definition after you have created the resource. See below. + // +kubebuilder:validation:Optional + LocalSecondaryIndex []LocalSecondaryIndexParameters `json:"localSecondaryIndex,omitempty" tf:"local_secondary_index,omitempty"` + + // Enable point-in-time recovery options. See below. + // +kubebuilder:validation:Optional + PointInTimeRecovery *PointInTimeRecoveryParameters `json:"pointInTimeRecovery,omitempty" tf:"point_in_time_recovery,omitempty"` + + // Attribute to use as the range (sort) key. Must also be defined as an attribute, see below. + // +kubebuilder:validation:Optional + RangeKey *string `json:"rangeKey,omitempty" tf:"range_key,omitempty"` + + // Number of read units for this table. If the billing_mode is PROVISIONED, this field is required. + // +kubebuilder:validation:Optional + ReadCapacity *float64 `json:"readCapacity,omitempty" tf:"read_capacity,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration block(s) with DynamoDB Global Tables V2 (version 2019.11.21) replication configurations. See below. + // +kubebuilder:validation:Optional + Replica []ReplicaParameters `json:"replica,omitempty" tf:"replica,omitempty"` + + // Time of the point-in-time recovery point to restore. + // +kubebuilder:validation:Optional + RestoreDateTime *string `json:"restoreDateTime,omitempty" tf:"restore_date_time,omitempty"` + + // Name of the table to restore. Must match the name of an existing table. + // +kubebuilder:validation:Optional + RestoreSourceName *string `json:"restoreSourceName,omitempty" tf:"restore_source_name,omitempty"` + + // If set, restores table to the most recent point-in-time recovery point. + // +kubebuilder:validation:Optional + RestoreToLatestTime *bool `json:"restoreToLatestTime,omitempty" tf:"restore_to_latest_time,omitempty"` + + // Encryption at rest options. AWS DynamoDB tables are automatically encrypted at rest with an AWS-owned Customer Master Key if this argument isn't specified. See below. + // +kubebuilder:validation:Optional + ServerSideEncryption *ServerSideEncryptionParameters `json:"serverSideEncryption,omitempty" tf:"server_side_encryption,omitempty"` + + // Whether Streams are enabled. + // +kubebuilder:validation:Optional + StreamEnabled *bool `json:"streamEnabled,omitempty" tf:"stream_enabled,omitempty"` + + // When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values are KEYS_ONLY, NEW_IMAGE, OLD_IMAGE, NEW_AND_OLD_IMAGES. + // +kubebuilder:validation:Optional + StreamViewType *string `json:"streamViewType,omitempty" tf:"stream_view_type,omitempty"` + + // Configuration block for TTL. See below. + // +kubebuilder:validation:Optional + TTL *TTLParameters `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // Storage class of the table. + // Valid values are STANDARD and STANDARD_INFREQUENT_ACCESS. + // Default value is STANDARD. + // +kubebuilder:validation:Optional + TableClass *string `json:"tableClass,omitempty" tf:"table_class,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Number of write units for this table. If the billing_mode is PROVISIONED, this field is required. + // +kubebuilder:validation:Optional + WriteCapacity *float64 `json:"writeCapacity,omitempty" tf:"write_capacity,omitempty"` +} + +// TableSpec defines the desired state of Table +type TableSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TableParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TableInitParameters `json:"initProvider,omitempty"` +} + +// TableStatus defines the observed state of Table. +type TableStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TableObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Table is the Schema for the Tables API. Provides a DynamoDB table resource +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Table struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec TableSpec `json:"spec"` + Status TableStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TableList contains a list of Tables +type TableList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Table `json:"items"` +} + +// Repository type metadata. +var ( + Table_Kind = "Table" + Table_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Table_Kind}.String() + Table_KindAPIVersion = Table_Kind + "." + CRDGroupVersion.String() + Table_GroupVersionKind = CRDGroupVersion.WithKind(Table_Kind) +) + +func init() { + SchemeBuilder.Register(&Table{}, &TableList{}) +} diff --git a/apis/ec2/v1beta1/zz_eip_types.go b/apis/ec2/v1beta1/zz_eip_types.go index 106dd49253..dd22907ea8 100755 --- a/apis/ec2/v1beta1/zz_eip_types.go +++ b/apis/ec2/v1beta1/zz_eip_types.go @@ -28,7 +28,7 @@ type EIPInitParameters struct { Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` // EC2 instance ID. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance Instance *string `json:"instance,omitempty" tf:"instance,omitempty"` // Reference to a Instance in ec2 to populate instance. @@ -158,7 +158,7 @@ type EIPParameters struct { Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` // EC2 instance ID. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance // +kubebuilder:validation:Optional Instance *string `json:"instance,omitempty" tf:"instance,omitempty"` diff --git a/apis/ec2/v1beta1/zz_eipassociation_types.go b/apis/ec2/v1beta1/zz_eipassociation_types.go index 3882fd73ec..0af75d29de 100755 --- a/apis/ec2/v1beta1/zz_eipassociation_types.go +++ b/apis/ec2/v1beta1/zz_eipassociation_types.go @@ -36,7 +36,7 @@ type EIPAssociationInitParameters struct { // EC2-Classic. For EC2-VPC, you can specify either the instance ID or the // network interface ID, but not both. The operation fails if you specify an // instance ID unless exactly one network interface is attached. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` @@ -120,7 +120,7 @@ type EIPAssociationParameters struct { // EC2-Classic. For EC2-VPC, you can specify either the instance ID or the // network interface ID, but not both. The operation fails if you specify an // instance ID unless exactly one network interface is attached. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` diff --git a/apis/ec2/v1beta1/zz_generated.conversion_hubs.go b/apis/ec2/v1beta1/zz_generated.conversion_hubs.go index ef62e584e4..590ba08bd9 100755 --- a/apis/ec2/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/ec2/v1beta1/zz_generated.conversion_hubs.go @@ -57,9 +57,6 @@ func (tr *EBSSnapshot) Hub() {} // Hub marks this type as a conversion hub. func (tr *EBSSnapshotCopy) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *EBSSnapshotImport) Hub() {} - // Hub marks this type as a conversion hub. func (tr *EBSVolume) Hub() {} @@ -72,15 +69,9 @@ func (tr *EIP) Hub() {} // Hub marks this type as a conversion hub. func (tr *EIPAssociation) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *FlowLog) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Host) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Instance) Hub() {} - // Hub marks this type as a conversion hub. func (tr *InstanceState) Hub() {} @@ -90,9 +81,6 @@ func (tr *InternetGateway) Hub() {} // Hub marks this type as a conversion hub. func (tr *KeyPair) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *LaunchTemplate) Hub() {} - // Hub marks this type as a conversion hub. func (tr *MainRouteTableAssociation) Hub() {} @@ -156,12 +144,6 @@ func (tr *SnapshotCreateVolumePermission) Hub() {} // Hub marks this type as a conversion hub. func (tr *SpotDatafeedSubscription) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *SpotFleetRequest) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SpotInstanceRequest) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Subnet) Hub() {} @@ -174,9 +156,6 @@ func (tr *Tag) Hub() {} // Hub marks this type as a conversion hub. func (tr *TrafficMirrorFilter) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *TrafficMirrorFilterRule) Hub() {} - // Hub marks this type as a conversion hub. func (tr *TransitGateway) Hub() {} @@ -240,9 +219,6 @@ func (tr *VPCDHCPOptions) Hub() {} // Hub marks this type as a conversion hub. func (tr *VPCDHCPOptionsAssociation) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *VPCEndpoint) Hub() {} - // Hub marks this type as a conversion hub. func (tr *VPCEndpointConnectionNotification) Hub() {} @@ -267,9 +243,6 @@ func (tr *VPCIpam) Hub() {} // Hub marks this type as a conversion hub. func (tr *VPCIpamPool) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *VPCIpamPoolCidr) Hub() {} - // Hub marks this type as a conversion hub. func (tr *VPCIpamPoolCidrAllocation) Hub() {} @@ -279,18 +252,6 @@ func (tr *VPCIpamScope) Hub() {} // Hub marks this type as a conversion hub. func (tr *VPCIPv4CidrBlockAssociation) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *VPCPeeringConnection) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *VPCPeeringConnectionAccepter) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *VPCPeeringConnectionOptions) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *VPNConnection) Hub() {} - // Hub marks this type as a conversion hub. func (tr *VPNConnectionRoute) Hub() {} diff --git a/apis/ec2/v1beta1/zz_generated.conversion_spokes.go b/apis/ec2/v1beta1/zz_generated.conversion_spokes.go index 49072b29be..d3fe4ba39d 100755 --- a/apis/ec2/v1beta1/zz_generated.conversion_spokes.go +++ b/apis/ec2/v1beta1/zz_generated.conversion_spokes.go @@ -13,6 +13,86 @@ import ( "sigs.k8s.io/controller-runtime/pkg/conversion" ) +// ConvertTo converts this EBSSnapshotImport to the hub type. +func (tr *EBSSnapshotImport) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the EBSSnapshotImport type. +func (tr *EBSSnapshotImport) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FlowLog to the hub type. +func (tr *FlowLog) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FlowLog type. +func (tr *FlowLog) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Instance to the hub type. +func (tr *Instance) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Instance type. +func (tr *Instance) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LaunchTemplate to the hub type. +func (tr *LaunchTemplate) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LaunchTemplate type. +func (tr *LaunchTemplate) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + // ConvertTo converts this Route to the hub type. func (tr *Route) ConvertTo(dstRaw conversion.Hub) error { spokeVersion := tr.GetObjectKind().GroupVersionKind().Version @@ -32,3 +112,183 @@ func (tr *Route) ConvertFrom(srcRaw conversion.Hub) error { } return nil } + +// ConvertTo converts this SpotFleetRequest to the hub type. +func (tr *SpotFleetRequest) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SpotFleetRequest type. +func (tr *SpotFleetRequest) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SpotInstanceRequest to the hub type. +func (tr *SpotInstanceRequest) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SpotInstanceRequest type. +func (tr *SpotInstanceRequest) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this TrafficMirrorFilterRule to the hub type. +func (tr *TrafficMirrorFilterRule) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the TrafficMirrorFilterRule type. +func (tr *TrafficMirrorFilterRule) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VPCEndpoint to the hub type. +func (tr *VPCEndpoint) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VPCEndpoint type. +func (tr *VPCEndpoint) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VPCIpamPoolCidr to the hub type. +func (tr *VPCIpamPoolCidr) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VPCIpamPoolCidr type. +func (tr *VPCIpamPoolCidr) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VPCPeeringConnection to the hub type. +func (tr *VPCPeeringConnection) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VPCPeeringConnection type. +func (tr *VPCPeeringConnection) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VPCPeeringConnectionAccepter to the hub type. +func (tr *VPCPeeringConnectionAccepter) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VPCPeeringConnectionAccepter type. +func (tr *VPCPeeringConnectionAccepter) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VPCPeeringConnectionOptions to the hub type. +func (tr *VPCPeeringConnectionOptions) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VPCPeeringConnectionOptions type. +func (tr *VPCPeeringConnectionOptions) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VPNConnection to the hub type. +func (tr *VPNConnection) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VPNConnection type. +func (tr *VPNConnection) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/ec2/v1beta1/zz_generated.resolvers.go b/apis/ec2/v1beta1/zz_generated.resolvers.go index 75bd1b353d..5469cd5f45 100644 --- a/apis/ec2/v1beta1/zz_generated.resolvers.go +++ b/apis/ec2/v1beta1/zz_generated.resolvers.go @@ -875,7 +875,7 @@ func (mg *EIP) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -913,7 +913,7 @@ func (mg *EIP) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.ForProvider.NetworkInterface = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.NetworkInterfaceRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -982,7 +982,7 @@ func (mg *EIPAssociation) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.ForProvider.AllocationID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.AllocationIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1020,7 +1020,7 @@ func (mg *EIPAssociation) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.InitProvider.AllocationID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.AllocationIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1480,7 +1480,7 @@ func (mg *InstanceState) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1499,7 +1499,7 @@ func (mg *InstanceState) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.InstanceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.InstanceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2610,7 +2610,7 @@ func (mg *NetworkInterfaceAttachment) ResolveReferences(ctx context.Context, c c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2648,7 +2648,7 @@ func (mg *NetworkInterfaceAttachment) ResolveReferences(ctx context.Context, c c mg.Spec.ForProvider.NetworkInterfaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.NetworkInterfaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2698,7 +2698,7 @@ func (mg *NetworkInterfaceSgAttachment) ResolveReferences(ctx context.Context, c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2736,7 +2736,7 @@ func (mg *NetworkInterfaceSgAttachment) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.SecurityGroupID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SecurityGroupIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5847,7 +5847,7 @@ func (mg *VPCEndpointRouteTableAssociation) ResolveReferences(ctx context.Contex mg.Spec.ForProvider.RouteTableID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RouteTableIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPCEndpoint", "VPCEndpointList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "VPCEndpoint", "VPCEndpointList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5885,7 +5885,7 @@ func (mg *VPCEndpointRouteTableAssociation) ResolveReferences(ctx context.Contex mg.Spec.InitProvider.RouteTableID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.RouteTableIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPCEndpoint", "VPCEndpointList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "VPCEndpoint", "VPCEndpointList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5935,7 +5935,7 @@ func (mg *VPCEndpointSecurityGroupAssociation) ResolveReferences(ctx context.Con mg.Spec.ForProvider.SecurityGroupID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SecurityGroupIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPCEndpoint", "VPCEndpointList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "VPCEndpoint", "VPCEndpointList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5973,7 +5973,7 @@ func (mg *VPCEndpointSecurityGroupAssociation) ResolveReferences(ctx context.Con mg.Spec.InitProvider.SecurityGroupID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.SecurityGroupIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPCEndpoint", "VPCEndpointList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "VPCEndpoint", "VPCEndpointList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -6073,7 +6073,7 @@ func (mg *VPCEndpointSubnetAssociation) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPCEndpoint", "VPCEndpointList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "VPCEndpoint", "VPCEndpointList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -6111,7 +6111,7 @@ func (mg *VPCEndpointSubnetAssociation) ResolveReferences(ctx context.Context, c mg.Spec.InitProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.SubnetIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPCEndpoint", "VPCEndpointList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "VPCEndpoint", "VPCEndpointList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -6782,7 +6782,7 @@ func (mg *VPNConnectionRoute) ResolveReferences(ctx context.Context, c client.Re var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPNConnection", "VPNConnectionList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "VPNConnection", "VPNConnectionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -6801,7 +6801,7 @@ func (mg *VPNConnectionRoute) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.VPNConnectionID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.VPNConnectionIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPNConnection", "VPNConnectionList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "VPNConnection", "VPNConnectionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -7058,7 +7058,7 @@ func (mg *VolumeAttachment) ResolveReferences(ctx context.Context, c client.Read var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -7096,7 +7096,7 @@ func (mg *VolumeAttachment) ResolveReferences(ctx context.Context, c client.Read mg.Spec.ForProvider.VolumeID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.VolumeIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/ec2/v1beta1/zz_instancestate_types.go b/apis/ec2/v1beta1/zz_instancestate_types.go index c8ee371cbb..40e5d6f01a 100755 --- a/apis/ec2/v1beta1/zz_instancestate_types.go +++ b/apis/ec2/v1beta1/zz_instancestate_types.go @@ -19,7 +19,7 @@ type InstanceStateInitParameters struct { Force *bool `json:"force,omitempty" tf:"force,omitempty"` // ID of the instance. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` @@ -57,7 +57,7 @@ type InstanceStateParameters struct { Force *bool `json:"force,omitempty" tf:"force,omitempty"` // ID of the instance. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` diff --git a/apis/ec2/v1beta1/zz_networkinterfaceattachment_types.go b/apis/ec2/v1beta1/zz_networkinterfaceattachment_types.go index e24743fd96..952f3b072d 100755 --- a/apis/ec2/v1beta1/zz_networkinterfaceattachment_types.go +++ b/apis/ec2/v1beta1/zz_networkinterfaceattachment_types.go @@ -19,7 +19,7 @@ type NetworkInterfaceAttachmentInitParameters struct { DeviceIndex *float64 `json:"deviceIndex,omitempty" tf:"device_index,omitempty"` // Instance ID to attach. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` @@ -72,7 +72,7 @@ type NetworkInterfaceAttachmentParameters struct { DeviceIndex *float64 `json:"deviceIndex,omitempty" tf:"device_index,omitempty"` // Instance ID to attach. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` diff --git a/apis/ec2/v1beta1/zz_networkinterfacesgattachment_types.go b/apis/ec2/v1beta1/zz_networkinterfacesgattachment_types.go index 27ce42afef..5da8e85f6c 100755 --- a/apis/ec2/v1beta1/zz_networkinterfacesgattachment_types.go +++ b/apis/ec2/v1beta1/zz_networkinterfacesgattachment_types.go @@ -16,7 +16,7 @@ import ( type NetworkInterfaceSgAttachmentInitParameters struct { // The ID of the network interface to attach to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_network_interface_id",true) NetworkInterfaceID *string `json:"networkInterfaceId,omitempty" tf:"network_interface_id,omitempty"` @@ -54,7 +54,7 @@ type NetworkInterfaceSgAttachmentObservation struct { type NetworkInterfaceSgAttachmentParameters struct { // The ID of the network interface to attach to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_network_interface_id",true) // +kubebuilder:validation:Optional NetworkInterfaceID *string `json:"networkInterfaceId,omitempty" tf:"network_interface_id,omitempty"` diff --git a/apis/ec2/v1beta1/zz_volumeattachment_types.go b/apis/ec2/v1beta1/zz_volumeattachment_types.go index 414a846c5b..0415ab786b 100755 --- a/apis/ec2/v1beta1/zz_volumeattachment_types.go +++ b/apis/ec2/v1beta1/zz_volumeattachment_types.go @@ -26,7 +26,7 @@ type VolumeAttachmentInitParameters struct { ForceDetach *bool `json:"forceDetach,omitempty" tf:"force_detach,omitempty"` // ID of the Instance to attach to - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` @@ -106,7 +106,7 @@ type VolumeAttachmentParameters struct { ForceDetach *bool `json:"forceDetach,omitempty" tf:"force_detach,omitempty"` // ID of the Instance to attach to - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` diff --git a/apis/ec2/v1beta1/zz_vpcendpointroutetableassociation_types.go b/apis/ec2/v1beta1/zz_vpcendpointroutetableassociation_types.go index 50eee2bda8..b7d0736a97 100755 --- a/apis/ec2/v1beta1/zz_vpcendpointroutetableassociation_types.go +++ b/apis/ec2/v1beta1/zz_vpcendpointroutetableassociation_types.go @@ -29,7 +29,7 @@ type VPCEndpointRouteTableAssociationInitParameters struct { RouteTableIDSelector *v1.Selector `json:"routeTableIdSelector,omitempty" tf:"-"` // Identifier of the VPC Endpoint with which the EC2 Route Table will be associated. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPCEndpoint + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.VPCEndpoint // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() VPCEndpointID *string `json:"vpcEndpointId,omitempty" tf:"vpc_endpoint_id,omitempty"` @@ -76,7 +76,7 @@ type VPCEndpointRouteTableAssociationParameters struct { RouteTableIDSelector *v1.Selector `json:"routeTableIdSelector,omitempty" tf:"-"` // Identifier of the VPC Endpoint with which the EC2 Route Table will be associated. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPCEndpoint + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.VPCEndpoint // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional VPCEndpointID *string `json:"vpcEndpointId,omitempty" tf:"vpc_endpoint_id,omitempty"` diff --git a/apis/ec2/v1beta1/zz_vpcendpointsecuritygroupassociation_types.go b/apis/ec2/v1beta1/zz_vpcendpointsecuritygroupassociation_types.go index 5dedab2d68..425750130f 100755 --- a/apis/ec2/v1beta1/zz_vpcendpointsecuritygroupassociation_types.go +++ b/apis/ec2/v1beta1/zz_vpcendpointsecuritygroupassociation_types.go @@ -31,7 +31,7 @@ type VPCEndpointSecurityGroupAssociationInitParameters struct { SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` // The ID of the VPC endpoint with which the security group will be associated. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPCEndpoint + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.VPCEndpoint // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() VPCEndpointID *string `json:"vpcEndpointId,omitempty" tf:"vpc_endpoint_id,omitempty"` @@ -84,7 +84,7 @@ type VPCEndpointSecurityGroupAssociationParameters struct { SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` // The ID of the VPC endpoint with which the security group will be associated. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPCEndpoint + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.VPCEndpoint // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional VPCEndpointID *string `json:"vpcEndpointId,omitempty" tf:"vpc_endpoint_id,omitempty"` diff --git a/apis/ec2/v1beta1/zz_vpcendpointsubnetassociation_types.go b/apis/ec2/v1beta1/zz_vpcendpointsubnetassociation_types.go index e61280d636..5bb9ff6974 100755 --- a/apis/ec2/v1beta1/zz_vpcendpointsubnetassociation_types.go +++ b/apis/ec2/v1beta1/zz_vpcendpointsubnetassociation_types.go @@ -28,7 +28,7 @@ type VPCEndpointSubnetAssociationInitParameters struct { SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` // The ID of the VPC endpoint with which the subnet will be associated. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPCEndpoint + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.VPCEndpoint // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() VPCEndpointID *string `json:"vpcEndpointId,omitempty" tf:"vpc_endpoint_id,omitempty"` @@ -74,7 +74,7 @@ type VPCEndpointSubnetAssociationParameters struct { SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` // The ID of the VPC endpoint with which the subnet will be associated. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPCEndpoint + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.VPCEndpoint // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional VPCEndpointID *string `json:"vpcEndpointId,omitempty" tf:"vpc_endpoint_id,omitempty"` diff --git a/apis/ec2/v1beta1/zz_vpnconnectionroute_types.go b/apis/ec2/v1beta1/zz_vpnconnectionroute_types.go index 6107bfebd7..0ea3eb1d1c 100755 --- a/apis/ec2/v1beta1/zz_vpnconnectionroute_types.go +++ b/apis/ec2/v1beta1/zz_vpnconnectionroute_types.go @@ -19,7 +19,7 @@ type VPNConnectionRouteInitParameters struct { DestinationCidrBlock *string `json:"destinationCidrBlock,omitempty" tf:"destination_cidr_block,omitempty"` // The ID of the VPN connection. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPNConnection + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.VPNConnection // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() VPNConnectionID *string `json:"vpnConnectionId,omitempty" tf:"vpn_connection_id,omitempty"` @@ -55,7 +55,7 @@ type VPNConnectionRouteParameters struct { Region *string `json:"region" tf:"-"` // The ID of the VPN connection. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPNConnection + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.VPNConnection // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional VPNConnectionID *string `json:"vpnConnectionId,omitempty" tf:"vpn_connection_id,omitempty"` diff --git a/apis/ec2/v1beta2/zz_ebssnapshotimport_terraformed.go b/apis/ec2/v1beta2/zz_ebssnapshotimport_terraformed.go new file mode 100755 index 0000000000..22cbe2a74f --- /dev/null +++ b/apis/ec2/v1beta2/zz_ebssnapshotimport_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this EBSSnapshotImport +func (mg *EBSSnapshotImport) GetTerraformResourceType() string { + return "aws_ebs_snapshot_import" +} + +// GetConnectionDetailsMapping for this EBSSnapshotImport +func (tr *EBSSnapshotImport) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this EBSSnapshotImport +func (tr *EBSSnapshotImport) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this EBSSnapshotImport +func (tr *EBSSnapshotImport) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this EBSSnapshotImport +func (tr *EBSSnapshotImport) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this EBSSnapshotImport +func (tr *EBSSnapshotImport) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this EBSSnapshotImport +func (tr *EBSSnapshotImport) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this EBSSnapshotImport +func (tr *EBSSnapshotImport) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this EBSSnapshotImport +func (tr *EBSSnapshotImport) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this EBSSnapshotImport using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *EBSSnapshotImport) LateInitialize(attrs []byte) (bool, error) { + params := &EBSSnapshotImportParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *EBSSnapshotImport) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ec2/v1beta2/zz_ebssnapshotimport_types.go b/apis/ec2/v1beta2/zz_ebssnapshotimport_types.go new file mode 100755 index 0000000000..325e45bd9b --- /dev/null +++ b/apis/ec2/v1beta2/zz_ebssnapshotimport_types.go @@ -0,0 +1,365 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ClientDataInitParameters struct { + + // A user-defined comment about the disk upload. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // The time that the disk upload ends. + UploadEnd *string `json:"uploadEnd,omitempty" tf:"upload_end,omitempty"` + + // The size of the uploaded disk image, in GiB. + UploadSize *float64 `json:"uploadSize,omitempty" tf:"upload_size,omitempty"` + + // The time that the disk upload starts. + UploadStart *string `json:"uploadStart,omitempty" tf:"upload_start,omitempty"` +} + +type ClientDataObservation struct { + + // A user-defined comment about the disk upload. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // The time that the disk upload ends. + UploadEnd *string `json:"uploadEnd,omitempty" tf:"upload_end,omitempty"` + + // The size of the uploaded disk image, in GiB. + UploadSize *float64 `json:"uploadSize,omitempty" tf:"upload_size,omitempty"` + + // The time that the disk upload starts. + UploadStart *string `json:"uploadStart,omitempty" tf:"upload_start,omitempty"` +} + +type ClientDataParameters struct { + + // A user-defined comment about the disk upload. + // +kubebuilder:validation:Optional + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // The time that the disk upload ends. + // +kubebuilder:validation:Optional + UploadEnd *string `json:"uploadEnd,omitempty" tf:"upload_end,omitempty"` + + // The size of the uploaded disk image, in GiB. + // +kubebuilder:validation:Optional + UploadSize *float64 `json:"uploadSize,omitempty" tf:"upload_size,omitempty"` + + // The time that the disk upload starts. + // +kubebuilder:validation:Optional + UploadStart *string `json:"uploadStart,omitempty" tf:"upload_start,omitempty"` +} + +type DiskContainerInitParameters struct { + + // The description of the disk image being imported. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The format of the disk image being imported. One of VHD or VMDK. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The URL to the Amazon S3-based disk image being imported. It can either be a https URL (https://..) or an Amazon S3 URL (s3://..). One of url or user_bucket must be set. + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // The Amazon S3 bucket for the disk image. One of url or user_bucket must be set. Detailed below. + UserBucket *UserBucketInitParameters `json:"userBucket,omitempty" tf:"user_bucket,omitempty"` +} + +type DiskContainerObservation struct { + + // The description of the disk image being imported. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The format of the disk image being imported. One of VHD or VMDK. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The URL to the Amazon S3-based disk image being imported. It can either be a https URL (https://..) or an Amazon S3 URL (s3://..). One of url or user_bucket must be set. + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // The Amazon S3 bucket for the disk image. One of url or user_bucket must be set. Detailed below. + UserBucket *UserBucketObservation `json:"userBucket,omitempty" tf:"user_bucket,omitempty"` +} + +type DiskContainerParameters struct { + + // The description of the disk image being imported. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The format of the disk image being imported. One of VHD or VMDK. + // +kubebuilder:validation:Optional + Format *string `json:"format" tf:"format,omitempty"` + + // The URL to the Amazon S3-based disk image being imported. It can either be a https URL (https://..) or an Amazon S3 URL (s3://..). One of url or user_bucket must be set. + // +kubebuilder:validation:Optional + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // The Amazon S3 bucket for the disk image. One of url or user_bucket must be set. Detailed below. + // +kubebuilder:validation:Optional + UserBucket *UserBucketParameters `json:"userBucket,omitempty" tf:"user_bucket,omitempty"` +} + +type EBSSnapshotImportInitParameters struct { + + // The client-specific data. Detailed below. + ClientData *ClientDataInitParameters `json:"clientData,omitempty" tf:"client_data,omitempty"` + + // The description string for the import snapshot task. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Information about the disk container. Detailed below. + DiskContainer *DiskContainerInitParameters `json:"diskContainer,omitempty" tf:"disk_container,omitempty"` + + // Specifies whether the destination snapshot of the imported image should be encrypted. The default KMS key for EBS is used unless you specify a non-default KMS key using KmsKeyId. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // An identifier for the symmetric KMS key to use when creating the encrypted snapshot. This parameter is only required if you want to use a non-default KMS key; if this parameter is not specified, the default KMS key for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Indicates whether to permanently restore an archived snapshot. + PermanentRestore *bool `json:"permanentRestore,omitempty" tf:"permanent_restore,omitempty"` + + // The name of the IAM Role the VM Import/Export service will assume. This role needs certain permissions. See https://docs.aws.amazon.com/vm-import/latest/userguide/vmie_prereqs.html#vmimport-role. Default: vmimport + RoleName *string `json:"roleName,omitempty" tf:"role_name,omitempty"` + + // The name of the storage tier. Valid values are archive and standard. Default value is standard. + StorageTier *string `json:"storageTier,omitempty" tf:"storage_tier,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the number of days for which to temporarily restore an archived snapshot. Required for temporary restores only. The snapshot will be automatically re-archived after this period. + TemporaryRestoreDays *float64 `json:"temporaryRestoreDays,omitempty" tf:"temporary_restore_days,omitempty"` +} + +type EBSSnapshotImportObservation struct { + + // Amazon Resource Name (ARN) of the EBS Snapshot. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The client-specific data. Detailed below. + ClientData *ClientDataObservation `json:"clientData,omitempty" tf:"client_data,omitempty"` + + // The data encryption key identifier for the snapshot. + DataEncryptionKeyID *string `json:"dataEncryptionKeyId,omitempty" tf:"data_encryption_key_id,omitempty"` + + // The description string for the import snapshot task. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Information about the disk container. Detailed below. + DiskContainer *DiskContainerObservation `json:"diskContainer,omitempty" tf:"disk_container,omitempty"` + + // Specifies whether the destination snapshot of the imported image should be encrypted. The default KMS key for EBS is used unless you specify a non-default KMS key using KmsKeyId. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The snapshot ID (e.g., snap-59fcb34e). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identifier for the symmetric KMS key to use when creating the encrypted snapshot. This parameter is only required if you want to use a non-default KMS key; if this parameter is not specified, the default KMS key for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Amazon Resource Name (ARN) of the EBS Snapshot. + OutpostArn *string `json:"outpostArn,omitempty" tf:"outpost_arn,omitempty"` + + // Value from an Amazon-maintained list (amazon, aws-marketplace, microsoft) of snapshot owners. + OwnerAlias *string `json:"ownerAlias,omitempty" tf:"owner_alias,omitempty"` + + // The AWS account ID of the EBS snapshot owner. + OwnerID *string `json:"ownerId,omitempty" tf:"owner_id,omitempty"` + + // Indicates whether to permanently restore an archived snapshot. + PermanentRestore *bool `json:"permanentRestore,omitempty" tf:"permanent_restore,omitempty"` + + // The name of the IAM Role the VM Import/Export service will assume. This role needs certain permissions. See https://docs.aws.amazon.com/vm-import/latest/userguide/vmie_prereqs.html#vmimport-role. Default: vmimport + RoleName *string `json:"roleName,omitempty" tf:"role_name,omitempty"` + + // The name of the storage tier. Valid values are archive and standard. Default value is standard. + StorageTier *string `json:"storageTier,omitempty" tf:"storage_tier,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Specifies the number of days for which to temporarily restore an archived snapshot. Required for temporary restores only. The snapshot will be automatically re-archived after this period. + TemporaryRestoreDays *float64 `json:"temporaryRestoreDays,omitempty" tf:"temporary_restore_days,omitempty"` + + // The snapshot ID (e.g., snap-59fcb34e). + VolumeID *string `json:"volumeId,omitempty" tf:"volume_id,omitempty"` + + // The size of the drive in GiBs. + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` +} + +type EBSSnapshotImportParameters struct { + + // The client-specific data. Detailed below. + // +kubebuilder:validation:Optional + ClientData *ClientDataParameters `json:"clientData,omitempty" tf:"client_data,omitempty"` + + // The description string for the import snapshot task. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Information about the disk container. Detailed below. + // +kubebuilder:validation:Optional + DiskContainer *DiskContainerParameters `json:"diskContainer,omitempty" tf:"disk_container,omitempty"` + + // Specifies whether the destination snapshot of the imported image should be encrypted. The default KMS key for EBS is used unless you specify a non-default KMS key using KmsKeyId. + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // An identifier for the symmetric KMS key to use when creating the encrypted snapshot. This parameter is only required if you want to use a non-default KMS key; if this parameter is not specified, the default KMS key for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Indicates whether to permanently restore an archived snapshot. + // +kubebuilder:validation:Optional + PermanentRestore *bool `json:"permanentRestore,omitempty" tf:"permanent_restore,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The name of the IAM Role the VM Import/Export service will assume. This role needs certain permissions. See https://docs.aws.amazon.com/vm-import/latest/userguide/vmie_prereqs.html#vmimport-role. Default: vmimport + // +kubebuilder:validation:Optional + RoleName *string `json:"roleName,omitempty" tf:"role_name,omitempty"` + + // The name of the storage tier. Valid values are archive and standard. Default value is standard. + // +kubebuilder:validation:Optional + StorageTier *string `json:"storageTier,omitempty" tf:"storage_tier,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the number of days for which to temporarily restore an archived snapshot. Required for temporary restores only. The snapshot will be automatically re-archived after this period. + // +kubebuilder:validation:Optional + TemporaryRestoreDays *float64 `json:"temporaryRestoreDays,omitempty" tf:"temporary_restore_days,omitempty"` +} + +type UserBucketInitParameters struct { + + // The name of the Amazon S3 bucket where the disk image is located. + S3Bucket *string `json:"s3Bucket,omitempty" tf:"s3_bucket,omitempty"` + + // The file name of the disk image. + S3Key *string `json:"s3Key,omitempty" tf:"s3_key,omitempty"` +} + +type UserBucketObservation struct { + + // The name of the Amazon S3 bucket where the disk image is located. + S3Bucket *string `json:"s3Bucket,omitempty" tf:"s3_bucket,omitempty"` + + // The file name of the disk image. + S3Key *string `json:"s3Key,omitempty" tf:"s3_key,omitempty"` +} + +type UserBucketParameters struct { + + // The name of the Amazon S3 bucket where the disk image is located. + // +kubebuilder:validation:Optional + S3Bucket *string `json:"s3Bucket" tf:"s3_bucket,omitempty"` + + // The file name of the disk image. + // +kubebuilder:validation:Optional + S3Key *string `json:"s3Key" tf:"s3_key,omitempty"` +} + +// EBSSnapshotImportSpec defines the desired state of EBSSnapshotImport +type EBSSnapshotImportSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider EBSSnapshotImportParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider EBSSnapshotImportInitParameters `json:"initProvider,omitempty"` +} + +// EBSSnapshotImportStatus defines the observed state of EBSSnapshotImport. +type EBSSnapshotImportStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider EBSSnapshotImportObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// EBSSnapshotImport is the Schema for the EBSSnapshotImports API. Provides an elastic block storage snapshot import resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type EBSSnapshotImport struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.diskContainer) || (has(self.initProvider) && has(self.initProvider.diskContainer))",message="spec.forProvider.diskContainer is a required parameter" + Spec EBSSnapshotImportSpec `json:"spec"` + Status EBSSnapshotImportStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EBSSnapshotImportList contains a list of EBSSnapshotImports +type EBSSnapshotImportList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []EBSSnapshotImport `json:"items"` +} + +// Repository type metadata. +var ( + EBSSnapshotImport_Kind = "EBSSnapshotImport" + EBSSnapshotImport_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: EBSSnapshotImport_Kind}.String() + EBSSnapshotImport_KindAPIVersion = EBSSnapshotImport_Kind + "." + CRDGroupVersion.String() + EBSSnapshotImport_GroupVersionKind = CRDGroupVersion.WithKind(EBSSnapshotImport_Kind) +) + +func init() { + SchemeBuilder.Register(&EBSSnapshotImport{}, &EBSSnapshotImportList{}) +} diff --git a/apis/ec2/v1beta2/zz_flowlog_terraformed.go b/apis/ec2/v1beta2/zz_flowlog_terraformed.go new file mode 100755 index 0000000000..e0bf10d169 --- /dev/null +++ b/apis/ec2/v1beta2/zz_flowlog_terraformed.go @@ -0,0 +1,132 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FlowLog +func (mg *FlowLog) GetTerraformResourceType() string { + return "aws_flow_log" +} + +// GetConnectionDetailsMapping for this FlowLog +func (tr *FlowLog) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FlowLog +func (tr *FlowLog) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FlowLog +func (tr *FlowLog) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FlowLog +func (tr *FlowLog) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FlowLog +func (tr *FlowLog) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FlowLog +func (tr *FlowLog) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FlowLog +func (tr *FlowLog) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FlowLog +func (tr *FlowLog) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FlowLog using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FlowLog) LateInitialize(attrs []byte) (bool, error) { + params := &FlowLogParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("LogDestination")) + opts = append(opts, resource.WithNameFilter("LogFormat")) + opts = append(opts, resource.WithNameFilter("LogGroupName")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FlowLog) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ec2/v1beta2/zz_flowlog_types.go b/apis/ec2/v1beta2/zz_flowlog_types.go new file mode 100755 index 0000000000..7eb5151887 --- /dev/null +++ b/apis/ec2/v1beta2/zz_flowlog_types.go @@ -0,0 +1,375 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DestinationOptionsInitParameters struct { + + // The format for the flow log. Default value: plain-text. Valid values: plain-text, parquet. + FileFormat *string `json:"fileFormat,omitempty" tf:"file_format,omitempty"` + + // Indicates whether to use Hive-compatible prefixes for flow logs stored in Amazon S3. Default value: false. + HiveCompatiblePartitions *bool `json:"hiveCompatiblePartitions,omitempty" tf:"hive_compatible_partitions,omitempty"` + + // Indicates whether to partition the flow log per hour. This reduces the cost and response time for queries. Default value: false. + PerHourPartition *bool `json:"perHourPartition,omitempty" tf:"per_hour_partition,omitempty"` +} + +type DestinationOptionsObservation struct { + + // The format for the flow log. Default value: plain-text. Valid values: plain-text, parquet. + FileFormat *string `json:"fileFormat,omitempty" tf:"file_format,omitempty"` + + // Indicates whether to use Hive-compatible prefixes for flow logs stored in Amazon S3. Default value: false. + HiveCompatiblePartitions *bool `json:"hiveCompatiblePartitions,omitempty" tf:"hive_compatible_partitions,omitempty"` + + // Indicates whether to partition the flow log per hour. This reduces the cost and response time for queries. Default value: false. + PerHourPartition *bool `json:"perHourPartition,omitempty" tf:"per_hour_partition,omitempty"` +} + +type DestinationOptionsParameters struct { + + // The format for the flow log. Default value: plain-text. Valid values: plain-text, parquet. + // +kubebuilder:validation:Optional + FileFormat *string `json:"fileFormat,omitempty" tf:"file_format,omitempty"` + + // Indicates whether to use Hive-compatible prefixes for flow logs stored in Amazon S3. Default value: false. + // +kubebuilder:validation:Optional + HiveCompatiblePartitions *bool `json:"hiveCompatiblePartitions,omitempty" tf:"hive_compatible_partitions,omitempty"` + + // Indicates whether to partition the flow log per hour. This reduces the cost and response time for queries. Default value: false. + // +kubebuilder:validation:Optional + PerHourPartition *bool `json:"perHourPartition,omitempty" tf:"per_hour_partition,omitempty"` +} + +type FlowLogInitParameters struct { + + // ARN of the IAM role that allows Amazon EC2 to publish flow logs across accounts. + DeliverCrossAccountRole *string `json:"deliverCrossAccountRole,omitempty" tf:"deliver_cross_account_role,omitempty"` + + // Describes the destination options for a flow log. More details below. + DestinationOptions *DestinationOptionsInitParameters `json:"destinationOptions,omitempty" tf:"destination_options,omitempty"` + + // Elastic Network Interface ID to attach to + EniID *string `json:"eniId,omitempty" tf:"eni_id,omitempty"` + + // The ARN for the IAM role that's used to post flow logs to a CloudWatch Logs log group + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + IAMRoleArn *string `json:"iamRoleArn,omitempty" tf:"iam_role_arn,omitempty"` + + // Reference to a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnRef *v1.Reference `json:"iamRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnSelector *v1.Selector `json:"iamRoleArnSelector,omitempty" tf:"-"` + + // The ARN of the logging destination. Either log_destination or log_group_name must be set. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Group + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + LogDestination *string `json:"logDestination,omitempty" tf:"log_destination,omitempty"` + + // Reference to a Group in cloudwatchlogs to populate logDestination. + // +kubebuilder:validation:Optional + LogDestinationRef *v1.Reference `json:"logDestinationRef,omitempty" tf:"-"` + + // Selector for a Group in cloudwatchlogs to populate logDestination. + // +kubebuilder:validation:Optional + LogDestinationSelector *v1.Selector `json:"logDestinationSelector,omitempty" tf:"-"` + + // The type of the logging destination. Valid values: cloud-watch-logs, s3, kinesis-data-firehose. Default: cloud-watch-logs. + LogDestinationType *string `json:"logDestinationType,omitempty" tf:"log_destination_type,omitempty"` + + // The fields to include in the flow log record. Accepted format example: "$${interface-id} $${srcaddr} $${dstaddr} $${srcport} $${dstport}". + LogFormat *string `json:"logFormat,omitempty" tf:"log_format,omitempty"` + + // Deprecated: Use log_destination instead. The name of the CloudWatch log group. Either log_group_name or log_destination must be set. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The maximum interval of time + // during which a flow of packets is captured and aggregated into a flow + // log record. Valid Values: 60 seconds (1 minute) or 600 seconds (10 + // minutes). Default: 600. When transit_gateway_id or transit_gateway_attachment_id is specified, max_aggregation_interval must be 60 seconds (1 minute). + MaxAggregationInterval *float64 `json:"maxAggregationInterval,omitempty" tf:"max_aggregation_interval,omitempty"` + + // Subnet ID to attach to + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The type of traffic to capture. Valid values: ACCEPT,REJECT, ALL. + TrafficType *string `json:"trafficType,omitempty" tf:"traffic_type,omitempty"` + + // Transit Gateway Attachment ID to attach to + TransitGatewayAttachmentID *string `json:"transitGatewayAttachmentId,omitempty" tf:"transit_gateway_attachment_id,omitempty"` + + // Transit Gateway ID to attach to + TransitGatewayID *string `json:"transitGatewayId,omitempty" tf:"transit_gateway_id,omitempty"` + + // VPC ID to attach to + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type FlowLogObservation struct { + + // The ARN of the Flow Log. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // ARN of the IAM role that allows Amazon EC2 to publish flow logs across accounts. + DeliverCrossAccountRole *string `json:"deliverCrossAccountRole,omitempty" tf:"deliver_cross_account_role,omitempty"` + + // Describes the destination options for a flow log. More details below. + DestinationOptions *DestinationOptionsObservation `json:"destinationOptions,omitempty" tf:"destination_options,omitempty"` + + // Elastic Network Interface ID to attach to + EniID *string `json:"eniId,omitempty" tf:"eni_id,omitempty"` + + // The ARN for the IAM role that's used to post flow logs to a CloudWatch Logs log group + IAMRoleArn *string `json:"iamRoleArn,omitempty" tf:"iam_role_arn,omitempty"` + + // The Flow Log ID + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The ARN of the logging destination. Either log_destination or log_group_name must be set. + LogDestination *string `json:"logDestination,omitempty" tf:"log_destination,omitempty"` + + // The type of the logging destination. Valid values: cloud-watch-logs, s3, kinesis-data-firehose. Default: cloud-watch-logs. + LogDestinationType *string `json:"logDestinationType,omitempty" tf:"log_destination_type,omitempty"` + + // The fields to include in the flow log record. Accepted format example: "$${interface-id} $${srcaddr} $${dstaddr} $${srcport} $${dstport}". + LogFormat *string `json:"logFormat,omitempty" tf:"log_format,omitempty"` + + // Deprecated: Use log_destination instead. The name of the CloudWatch log group. Either log_group_name or log_destination must be set. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The maximum interval of time + // during which a flow of packets is captured and aggregated into a flow + // log record. Valid Values: 60 seconds (1 minute) or 600 seconds (10 + // minutes). Default: 600. When transit_gateway_id or transit_gateway_attachment_id is specified, max_aggregation_interval must be 60 seconds (1 minute). + MaxAggregationInterval *float64 `json:"maxAggregationInterval,omitempty" tf:"max_aggregation_interval,omitempty"` + + // Subnet ID to attach to + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The type of traffic to capture. Valid values: ACCEPT,REJECT, ALL. + TrafficType *string `json:"trafficType,omitempty" tf:"traffic_type,omitempty"` + + // Transit Gateway Attachment ID to attach to + TransitGatewayAttachmentID *string `json:"transitGatewayAttachmentId,omitempty" tf:"transit_gateway_attachment_id,omitempty"` + + // Transit Gateway ID to attach to + TransitGatewayID *string `json:"transitGatewayId,omitempty" tf:"transit_gateway_id,omitempty"` + + // VPC ID to attach to + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type FlowLogParameters struct { + + // ARN of the IAM role that allows Amazon EC2 to publish flow logs across accounts. + // +kubebuilder:validation:Optional + DeliverCrossAccountRole *string `json:"deliverCrossAccountRole,omitempty" tf:"deliver_cross_account_role,omitempty"` + + // Describes the destination options for a flow log. More details below. + // +kubebuilder:validation:Optional + DestinationOptions *DestinationOptionsParameters `json:"destinationOptions,omitempty" tf:"destination_options,omitempty"` + + // Elastic Network Interface ID to attach to + // +kubebuilder:validation:Optional + EniID *string `json:"eniId,omitempty" tf:"eni_id,omitempty"` + + // The ARN for the IAM role that's used to post flow logs to a CloudWatch Logs log group + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + IAMRoleArn *string `json:"iamRoleArn,omitempty" tf:"iam_role_arn,omitempty"` + + // Reference to a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnRef *v1.Reference `json:"iamRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnSelector *v1.Selector `json:"iamRoleArnSelector,omitempty" tf:"-"` + + // The ARN of the logging destination. Either log_destination or log_group_name must be set. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Group + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + LogDestination *string `json:"logDestination,omitempty" tf:"log_destination,omitempty"` + + // Reference to a Group in cloudwatchlogs to populate logDestination. + // +kubebuilder:validation:Optional + LogDestinationRef *v1.Reference `json:"logDestinationRef,omitempty" tf:"-"` + + // Selector for a Group in cloudwatchlogs to populate logDestination. + // +kubebuilder:validation:Optional + LogDestinationSelector *v1.Selector `json:"logDestinationSelector,omitempty" tf:"-"` + + // The type of the logging destination. Valid values: cloud-watch-logs, s3, kinesis-data-firehose. Default: cloud-watch-logs. + // +kubebuilder:validation:Optional + LogDestinationType *string `json:"logDestinationType,omitempty" tf:"log_destination_type,omitempty"` + + // The fields to include in the flow log record. Accepted format example: "$${interface-id} $${srcaddr} $${dstaddr} $${srcport} $${dstport}". + // +kubebuilder:validation:Optional + LogFormat *string `json:"logFormat,omitempty" tf:"log_format,omitempty"` + + // Deprecated: Use log_destination instead. The name of the CloudWatch log group. Either log_group_name or log_destination must be set. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The maximum interval of time + // during which a flow of packets is captured and aggregated into a flow + // log record. Valid Values: 60 seconds (1 minute) or 600 seconds (10 + // minutes). Default: 600. When transit_gateway_id or transit_gateway_attachment_id is specified, max_aggregation_interval must be 60 seconds (1 minute). + // +kubebuilder:validation:Optional + MaxAggregationInterval *float64 `json:"maxAggregationInterval,omitempty" tf:"max_aggregation_interval,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Subnet ID to attach to + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The type of traffic to capture. Valid values: ACCEPT,REJECT, ALL. + // +kubebuilder:validation:Optional + TrafficType *string `json:"trafficType,omitempty" tf:"traffic_type,omitempty"` + + // Transit Gateway Attachment ID to attach to + // +kubebuilder:validation:Optional + TransitGatewayAttachmentID *string `json:"transitGatewayAttachmentId,omitempty" tf:"transit_gateway_attachment_id,omitempty"` + + // Transit Gateway ID to attach to + // +kubebuilder:validation:Optional + TransitGatewayID *string `json:"transitGatewayId,omitempty" tf:"transit_gateway_id,omitempty"` + + // VPC ID to attach to + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +kubebuilder:validation:Optional + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +// FlowLogSpec defines the desired state of FlowLog +type FlowLogSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FlowLogParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FlowLogInitParameters `json:"initProvider,omitempty"` +} + +// FlowLogStatus defines the observed state of FlowLog. +type FlowLogStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FlowLogObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FlowLog is the Schema for the FlowLogs API. Provides a VPC/Subnet/ENI Flow Log +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type FlowLog struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec FlowLogSpec `json:"spec"` + Status FlowLogStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FlowLogList contains a list of FlowLogs +type FlowLogList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FlowLog `json:"items"` +} + +// Repository type metadata. +var ( + FlowLog_Kind = "FlowLog" + FlowLog_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FlowLog_Kind}.String() + FlowLog_KindAPIVersion = FlowLog_Kind + "." + CRDGroupVersion.String() + FlowLog_GroupVersionKind = CRDGroupVersion.WithKind(FlowLog_Kind) +) + +func init() { + SchemeBuilder.Register(&FlowLog{}, &FlowLogList{}) +} diff --git a/apis/ec2/v1beta2/zz_generated.conversion_hubs.go b/apis/ec2/v1beta2/zz_generated.conversion_hubs.go index 7e0bed19f1..5c5007b495 100755 --- a/apis/ec2/v1beta2/zz_generated.conversion_hubs.go +++ b/apis/ec2/v1beta2/zz_generated.conversion_hubs.go @@ -6,5 +6,44 @@ package v1beta2 +// Hub marks this type as a conversion hub. +func (tr *EBSSnapshotImport) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FlowLog) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Instance) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LaunchTemplate) Hub() {} + // Hub marks this type as a conversion hub. func (tr *Route) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SpotFleetRequest) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SpotInstanceRequest) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *TrafficMirrorFilterRule) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VPCEndpoint) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VPCIpamPoolCidr) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VPCPeeringConnection) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VPCPeeringConnectionAccepter) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VPCPeeringConnectionOptions) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VPNConnection) Hub() {} diff --git a/apis/ec2/v1beta2/zz_generated.deepcopy.go b/apis/ec2/v1beta2/zz_generated.deepcopy.go index a4305fffb9..d9da77b498 100644 --- a/apis/ec2/v1beta2/zz_generated.deepcopy.go +++ b/apis/ec2/v1beta2/zz_generated.deepcopy.go @@ -13,27 +13,18157 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorCountInitParameters) DeepCopyInto(out *AcceleratorCountInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorCountInitParameters. +func (in *AcceleratorCountInitParameters) DeepCopy() *AcceleratorCountInitParameters { + if in == nil { + return nil + } + out := new(AcceleratorCountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorCountObservation) DeepCopyInto(out *AcceleratorCountObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorCountObservation. +func (in *AcceleratorCountObservation) DeepCopy() *AcceleratorCountObservation { + if in == nil { + return nil + } + out := new(AcceleratorCountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorCountParameters) DeepCopyInto(out *AcceleratorCountParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorCountParameters. +func (in *AcceleratorCountParameters) DeepCopy() *AcceleratorCountParameters { + if in == nil { + return nil + } + out := new(AcceleratorCountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorTotalMemoryMibInitParameters) DeepCopyInto(out *AcceleratorTotalMemoryMibInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorTotalMemoryMibInitParameters. +func (in *AcceleratorTotalMemoryMibInitParameters) DeepCopy() *AcceleratorTotalMemoryMibInitParameters { + if in == nil { + return nil + } + out := new(AcceleratorTotalMemoryMibInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorTotalMemoryMibObservation) DeepCopyInto(out *AcceleratorTotalMemoryMibObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorTotalMemoryMibObservation. +func (in *AcceleratorTotalMemoryMibObservation) DeepCopy() *AcceleratorTotalMemoryMibObservation { + if in == nil { + return nil + } + out := new(AcceleratorTotalMemoryMibObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorTotalMemoryMibParameters) DeepCopyInto(out *AcceleratorTotalMemoryMibParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorTotalMemoryMibParameters. +func (in *AcceleratorTotalMemoryMibParameters) DeepCopy() *AcceleratorTotalMemoryMibParameters { + if in == nil { + return nil + } + out := new(AcceleratorTotalMemoryMibParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccepterInitParameters) DeepCopyInto(out *AccepterInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccepterInitParameters. +func (in *AccepterInitParameters) DeepCopy() *AccepterInitParameters { + if in == nil { + return nil + } + out := new(AccepterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccepterObservation) DeepCopyInto(out *AccepterObservation) { + *out = *in + if in.AllowRemoteVPCDNSResolution != nil { + in, out := &in.AllowRemoteVPCDNSResolution, &out.AllowRemoteVPCDNSResolution + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccepterObservation. +func (in *AccepterObservation) DeepCopy() *AccepterObservation { + if in == nil { + return nil + } + out := new(AccepterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccepterParameters) DeepCopyInto(out *AccepterParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccepterParameters. +func (in *AccepterParameters) DeepCopy() *AccepterParameters { + if in == nil { + return nil + } + out := new(AccepterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaselineEBSBandwidthMbpsInitParameters) DeepCopyInto(out *BaselineEBSBandwidthMbpsInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaselineEBSBandwidthMbpsInitParameters. +func (in *BaselineEBSBandwidthMbpsInitParameters) DeepCopy() *BaselineEBSBandwidthMbpsInitParameters { + if in == nil { + return nil + } + out := new(BaselineEBSBandwidthMbpsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaselineEBSBandwidthMbpsObservation) DeepCopyInto(out *BaselineEBSBandwidthMbpsObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaselineEBSBandwidthMbpsObservation. +func (in *BaselineEBSBandwidthMbpsObservation) DeepCopy() *BaselineEBSBandwidthMbpsObservation { + if in == nil { + return nil + } + out := new(BaselineEBSBandwidthMbpsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaselineEBSBandwidthMbpsParameters) DeepCopyInto(out *BaselineEBSBandwidthMbpsParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaselineEBSBandwidthMbpsParameters. +func (in *BaselineEBSBandwidthMbpsParameters) DeepCopy() *BaselineEBSBandwidthMbpsParameters { + if in == nil { + return nil + } + out := new(BaselineEBSBandwidthMbpsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockDeviceMappingsInitParameters) DeepCopyInto(out *BlockDeviceMappingsInitParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.EBS != nil { + in, out := &in.EBS, &out.EBS + *out = new(EBSInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(string) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceMappingsInitParameters. +func (in *BlockDeviceMappingsInitParameters) DeepCopy() *BlockDeviceMappingsInitParameters { + if in == nil { + return nil + } + out := new(BlockDeviceMappingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockDeviceMappingsObservation) DeepCopyInto(out *BlockDeviceMappingsObservation) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.EBS != nil { + in, out := &in.EBS, &out.EBS + *out = new(EBSObservation) + (*in).DeepCopyInto(*out) + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(string) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceMappingsObservation. +func (in *BlockDeviceMappingsObservation) DeepCopy() *BlockDeviceMappingsObservation { + if in == nil { + return nil + } + out := new(BlockDeviceMappingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockDeviceMappingsParameters) DeepCopyInto(out *BlockDeviceMappingsParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.EBS != nil { + in, out := &in.EBS, &out.EBS + *out = new(EBSParameters) + (*in).DeepCopyInto(*out) + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(string) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceMappingsParameters. +func (in *BlockDeviceMappingsParameters) DeepCopy() *BlockDeviceMappingsParameters { + if in == nil { + return nil + } + out := new(BlockDeviceMappingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CPUOptionsInitParameters) DeepCopyInto(out *CPUOptionsInitParameters) { + *out = *in + if in.AmdSevSnp != nil { + in, out := &in.AmdSevSnp, &out.AmdSevSnp + *out = new(string) + **out = **in + } + if in.CoreCount != nil { + in, out := &in.CoreCount, &out.CoreCount + *out = new(float64) + **out = **in + } + if in.ThreadsPerCore != nil { + in, out := &in.ThreadsPerCore, &out.ThreadsPerCore + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPUOptionsInitParameters. +func (in *CPUOptionsInitParameters) DeepCopy() *CPUOptionsInitParameters { + if in == nil { + return nil + } + out := new(CPUOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CPUOptionsObservation) DeepCopyInto(out *CPUOptionsObservation) { + *out = *in + if in.AmdSevSnp != nil { + in, out := &in.AmdSevSnp, &out.AmdSevSnp + *out = new(string) + **out = **in + } + if in.CoreCount != nil { + in, out := &in.CoreCount, &out.CoreCount + *out = new(float64) + **out = **in + } + if in.ThreadsPerCore != nil { + in, out := &in.ThreadsPerCore, &out.ThreadsPerCore + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPUOptionsObservation. +func (in *CPUOptionsObservation) DeepCopy() *CPUOptionsObservation { + if in == nil { + return nil + } + out := new(CPUOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CPUOptionsParameters) DeepCopyInto(out *CPUOptionsParameters) { + *out = *in + if in.AmdSevSnp != nil { + in, out := &in.AmdSevSnp, &out.AmdSevSnp + *out = new(string) + **out = **in + } + if in.CoreCount != nil { + in, out := &in.CoreCount, &out.CoreCount + *out = new(float64) + **out = **in + } + if in.ThreadsPerCore != nil { + in, out := &in.ThreadsPerCore, &out.ThreadsPerCore + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPUOptionsParameters. +func (in *CPUOptionsParameters) DeepCopy() *CPUOptionsParameters { + if in == nil { + return nil + } + out := new(CPUOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityRebalanceInitParameters) DeepCopyInto(out *CapacityRebalanceInitParameters) { + *out = *in + if in.ReplacementStrategy != nil { + in, out := &in.ReplacementStrategy, &out.ReplacementStrategy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityRebalanceInitParameters. +func (in *CapacityRebalanceInitParameters) DeepCopy() *CapacityRebalanceInitParameters { + if in == nil { + return nil + } + out := new(CapacityRebalanceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityRebalanceObservation) DeepCopyInto(out *CapacityRebalanceObservation) { + *out = *in + if in.ReplacementStrategy != nil { + in, out := &in.ReplacementStrategy, &out.ReplacementStrategy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityRebalanceObservation. +func (in *CapacityRebalanceObservation) DeepCopy() *CapacityRebalanceObservation { + if in == nil { + return nil + } + out := new(CapacityRebalanceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityRebalanceParameters) DeepCopyInto(out *CapacityRebalanceParameters) { + *out = *in + if in.ReplacementStrategy != nil { + in, out := &in.ReplacementStrategy, &out.ReplacementStrategy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityRebalanceParameters. +func (in *CapacityRebalanceParameters) DeepCopy() *CapacityRebalanceParameters { + if in == nil { + return nil + } + out := new(CapacityRebalanceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityReservationSpecificationCapacityReservationTargetInitParameters) DeepCopyInto(out *CapacityReservationSpecificationCapacityReservationTargetInitParameters) { + *out = *in + if in.CapacityReservationID != nil { + in, out := &in.CapacityReservationID, &out.CapacityReservationID + *out = new(string) + **out = **in + } + if in.CapacityReservationResourceGroupArn != nil { + in, out := &in.CapacityReservationResourceGroupArn, &out.CapacityReservationResourceGroupArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservationSpecificationCapacityReservationTargetInitParameters. +func (in *CapacityReservationSpecificationCapacityReservationTargetInitParameters) DeepCopy() *CapacityReservationSpecificationCapacityReservationTargetInitParameters { + if in == nil { + return nil + } + out := new(CapacityReservationSpecificationCapacityReservationTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityReservationSpecificationCapacityReservationTargetObservation) DeepCopyInto(out *CapacityReservationSpecificationCapacityReservationTargetObservation) { + *out = *in + if in.CapacityReservationID != nil { + in, out := &in.CapacityReservationID, &out.CapacityReservationID + *out = new(string) + **out = **in + } + if in.CapacityReservationResourceGroupArn != nil { + in, out := &in.CapacityReservationResourceGroupArn, &out.CapacityReservationResourceGroupArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservationSpecificationCapacityReservationTargetObservation. +func (in *CapacityReservationSpecificationCapacityReservationTargetObservation) DeepCopy() *CapacityReservationSpecificationCapacityReservationTargetObservation { + if in == nil { + return nil + } + out := new(CapacityReservationSpecificationCapacityReservationTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityReservationSpecificationCapacityReservationTargetParameters) DeepCopyInto(out *CapacityReservationSpecificationCapacityReservationTargetParameters) { + *out = *in + if in.CapacityReservationID != nil { + in, out := &in.CapacityReservationID, &out.CapacityReservationID + *out = new(string) + **out = **in + } + if in.CapacityReservationResourceGroupArn != nil { + in, out := &in.CapacityReservationResourceGroupArn, &out.CapacityReservationResourceGroupArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservationSpecificationCapacityReservationTargetParameters. +func (in *CapacityReservationSpecificationCapacityReservationTargetParameters) DeepCopy() *CapacityReservationSpecificationCapacityReservationTargetParameters { + if in == nil { + return nil + } + out := new(CapacityReservationSpecificationCapacityReservationTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityReservationSpecificationInitParameters) DeepCopyInto(out *CapacityReservationSpecificationInitParameters) { + *out = *in + if in.CapacityReservationPreference != nil { + in, out := &in.CapacityReservationPreference, &out.CapacityReservationPreference + *out = new(string) + **out = **in + } + if in.CapacityReservationTarget != nil { + in, out := &in.CapacityReservationTarget, &out.CapacityReservationTarget + *out = new(CapacityReservationTargetInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservationSpecificationInitParameters. +func (in *CapacityReservationSpecificationInitParameters) DeepCopy() *CapacityReservationSpecificationInitParameters { + if in == nil { + return nil + } + out := new(CapacityReservationSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityReservationSpecificationObservation) DeepCopyInto(out *CapacityReservationSpecificationObservation) { + *out = *in + if in.CapacityReservationPreference != nil { + in, out := &in.CapacityReservationPreference, &out.CapacityReservationPreference + *out = new(string) + **out = **in + } + if in.CapacityReservationTarget != nil { + in, out := &in.CapacityReservationTarget, &out.CapacityReservationTarget + *out = new(CapacityReservationTargetObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservationSpecificationObservation. +func (in *CapacityReservationSpecificationObservation) DeepCopy() *CapacityReservationSpecificationObservation { + if in == nil { + return nil + } + out := new(CapacityReservationSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityReservationSpecificationParameters) DeepCopyInto(out *CapacityReservationSpecificationParameters) { + *out = *in + if in.CapacityReservationPreference != nil { + in, out := &in.CapacityReservationPreference, &out.CapacityReservationPreference + *out = new(string) + **out = **in + } + if in.CapacityReservationTarget != nil { + in, out := &in.CapacityReservationTarget, &out.CapacityReservationTarget + *out = new(CapacityReservationTargetParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservationSpecificationParameters. +func (in *CapacityReservationSpecificationParameters) DeepCopy() *CapacityReservationSpecificationParameters { + if in == nil { + return nil + } + out := new(CapacityReservationSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityReservationTargetInitParameters) DeepCopyInto(out *CapacityReservationTargetInitParameters) { + *out = *in + if in.CapacityReservationID != nil { + in, out := &in.CapacityReservationID, &out.CapacityReservationID + *out = new(string) + **out = **in + } + if in.CapacityReservationResourceGroupArn != nil { + in, out := &in.CapacityReservationResourceGroupArn, &out.CapacityReservationResourceGroupArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservationTargetInitParameters. +func (in *CapacityReservationTargetInitParameters) DeepCopy() *CapacityReservationTargetInitParameters { + if in == nil { + return nil + } + out := new(CapacityReservationTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityReservationTargetObservation) DeepCopyInto(out *CapacityReservationTargetObservation) { + *out = *in + if in.CapacityReservationID != nil { + in, out := &in.CapacityReservationID, &out.CapacityReservationID + *out = new(string) + **out = **in + } + if in.CapacityReservationResourceGroupArn != nil { + in, out := &in.CapacityReservationResourceGroupArn, &out.CapacityReservationResourceGroupArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservationTargetObservation. +func (in *CapacityReservationTargetObservation) DeepCopy() *CapacityReservationTargetObservation { + if in == nil { + return nil + } + out := new(CapacityReservationTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityReservationTargetParameters) DeepCopyInto(out *CapacityReservationTargetParameters) { + *out = *in + if in.CapacityReservationID != nil { + in, out := &in.CapacityReservationID, &out.CapacityReservationID + *out = new(string) + **out = **in + } + if in.CapacityReservationResourceGroupArn != nil { + in, out := &in.CapacityReservationResourceGroupArn, &out.CapacityReservationResourceGroupArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservationTargetParameters. +func (in *CapacityReservationTargetParameters) DeepCopy() *CapacityReservationTargetParameters { + if in == nil { + return nil + } + out := new(CapacityReservationTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CidrAuthorizationContextInitParameters) DeepCopyInto(out *CidrAuthorizationContextInitParameters) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } + if in.Signature != nil { + in, out := &in.Signature, &out.Signature + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CidrAuthorizationContextInitParameters. +func (in *CidrAuthorizationContextInitParameters) DeepCopy() *CidrAuthorizationContextInitParameters { + if in == nil { + return nil + } + out := new(CidrAuthorizationContextInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CidrAuthorizationContextObservation) DeepCopyInto(out *CidrAuthorizationContextObservation) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } + if in.Signature != nil { + in, out := &in.Signature, &out.Signature + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CidrAuthorizationContextObservation. +func (in *CidrAuthorizationContextObservation) DeepCopy() *CidrAuthorizationContextObservation { + if in == nil { + return nil + } + out := new(CidrAuthorizationContextObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CidrAuthorizationContextParameters) DeepCopyInto(out *CidrAuthorizationContextParameters) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } + if in.Signature != nil { + in, out := &in.Signature, &out.Signature + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CidrAuthorizationContextParameters. +func (in *CidrAuthorizationContextParameters) DeepCopy() *CidrAuthorizationContextParameters { + if in == nil { + return nil + } + out := new(CidrAuthorizationContextParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientDataInitParameters) DeepCopyInto(out *ClientDataInitParameters) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.UploadEnd != nil { + in, out := &in.UploadEnd, &out.UploadEnd + *out = new(string) + **out = **in + } + if in.UploadSize != nil { + in, out := &in.UploadSize, &out.UploadSize + *out = new(float64) + **out = **in + } + if in.UploadStart != nil { + in, out := &in.UploadStart, &out.UploadStart + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientDataInitParameters. +func (in *ClientDataInitParameters) DeepCopy() *ClientDataInitParameters { + if in == nil { + return nil + } + out := new(ClientDataInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientDataObservation) DeepCopyInto(out *ClientDataObservation) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.UploadEnd != nil { + in, out := &in.UploadEnd, &out.UploadEnd + *out = new(string) + **out = **in + } + if in.UploadSize != nil { + in, out := &in.UploadSize, &out.UploadSize + *out = new(float64) + **out = **in + } + if in.UploadStart != nil { + in, out := &in.UploadStart, &out.UploadStart + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientDataObservation. +func (in *ClientDataObservation) DeepCopy() *ClientDataObservation { + if in == nil { + return nil + } + out := new(ClientDataObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientDataParameters) DeepCopyInto(out *ClientDataParameters) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.UploadEnd != nil { + in, out := &in.UploadEnd, &out.UploadEnd + *out = new(string) + **out = **in + } + if in.UploadSize != nil { + in, out := &in.UploadSize, &out.UploadSize + *out = new(float64) + **out = **in + } + if in.UploadStart != nil { + in, out := &in.UploadStart, &out.UploadStart + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientDataParameters. +func (in *ClientDataParameters) DeepCopy() *ClientDataParameters { + if in == nil { + return nil + } + out := new(ClientDataParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogOptionsInitParameters) DeepCopyInto(out *CloudwatchLogOptionsInitParameters) { + *out = *in + if in.LogEnabled != nil { + in, out := &in.LogEnabled, &out.LogEnabled + *out = new(bool) + **out = **in + } + if in.LogGroupArn != nil { + in, out := &in.LogGroupArn, &out.LogGroupArn + *out = new(string) + **out = **in + } + if in.LogOutputFormat != nil { + in, out := &in.LogOutputFormat, &out.LogOutputFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogOptionsInitParameters. +func (in *CloudwatchLogOptionsInitParameters) DeepCopy() *CloudwatchLogOptionsInitParameters { + if in == nil { + return nil + } + out := new(CloudwatchLogOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogOptionsObservation) DeepCopyInto(out *CloudwatchLogOptionsObservation) { + *out = *in + if in.LogEnabled != nil { + in, out := &in.LogEnabled, &out.LogEnabled + *out = new(bool) + **out = **in + } + if in.LogGroupArn != nil { + in, out := &in.LogGroupArn, &out.LogGroupArn + *out = new(string) + **out = **in + } + if in.LogOutputFormat != nil { + in, out := &in.LogOutputFormat, &out.LogOutputFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogOptionsObservation. +func (in *CloudwatchLogOptionsObservation) DeepCopy() *CloudwatchLogOptionsObservation { + if in == nil { + return nil + } + out := new(CloudwatchLogOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogOptionsParameters) DeepCopyInto(out *CloudwatchLogOptionsParameters) { + *out = *in + if in.LogEnabled != nil { + in, out := &in.LogEnabled, &out.LogEnabled + *out = new(bool) + **out = **in + } + if in.LogGroupArn != nil { + in, out := &in.LogGroupArn, &out.LogGroupArn + *out = new(string) + **out = **in + } + if in.LogOutputFormat != nil { + in, out := &in.LogOutputFormat, &out.LogOutputFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogOptionsParameters. +func (in *CloudwatchLogOptionsParameters) DeepCopy() *CloudwatchLogOptionsParameters { + if in == nil { + return nil + } + out := new(CloudwatchLogOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreditSpecificationInitParameters) DeepCopyInto(out *CreditSpecificationInitParameters) { + *out = *in + if in.CPUCredits != nil { + in, out := &in.CPUCredits, &out.CPUCredits + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreditSpecificationInitParameters. +func (in *CreditSpecificationInitParameters) DeepCopy() *CreditSpecificationInitParameters { + if in == nil { + return nil + } + out := new(CreditSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreditSpecificationObservation) DeepCopyInto(out *CreditSpecificationObservation) { + *out = *in + if in.CPUCredits != nil { + in, out := &in.CPUCredits, &out.CPUCredits + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreditSpecificationObservation. +func (in *CreditSpecificationObservation) DeepCopy() *CreditSpecificationObservation { + if in == nil { + return nil + } + out := new(CreditSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreditSpecificationParameters) DeepCopyInto(out *CreditSpecificationParameters) { + *out = *in + if in.CPUCredits != nil { + in, out := &in.CPUCredits, &out.CPUCredits + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreditSpecificationParameters. +func (in *CreditSpecificationParameters) DeepCopy() *CreditSpecificationParameters { + if in == nil { + return nil + } + out := new(CreditSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSEntryInitParameters) DeepCopyInto(out *DNSEntryInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEntryInitParameters. +func (in *DNSEntryInitParameters) DeepCopy() *DNSEntryInitParameters { + if in == nil { + return nil + } + out := new(DNSEntryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSEntryObservation) DeepCopyInto(out *DNSEntryObservation) { + *out = *in + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.HostedZoneID != nil { + in, out := &in.HostedZoneID, &out.HostedZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEntryObservation. +func (in *DNSEntryObservation) DeepCopy() *DNSEntryObservation { + if in == nil { + return nil + } + out := new(DNSEntryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSEntryParameters) DeepCopyInto(out *DNSEntryParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEntryParameters. +func (in *DNSEntryParameters) DeepCopy() *DNSEntryParameters { + if in == nil { + return nil + } + out := new(DNSEntryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSOptionsInitParameters) DeepCopyInto(out *DNSOptionsInitParameters) { + *out = *in + if in.DNSRecordIPType != nil { + in, out := &in.DNSRecordIPType, &out.DNSRecordIPType + *out = new(string) + **out = **in + } + if in.PrivateDNSOnlyForInboundResolverEndpoint != nil { + in, out := &in.PrivateDNSOnlyForInboundResolverEndpoint, &out.PrivateDNSOnlyForInboundResolverEndpoint + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOptionsInitParameters. +func (in *DNSOptionsInitParameters) DeepCopy() *DNSOptionsInitParameters { + if in == nil { + return nil + } + out := new(DNSOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSOptionsObservation) DeepCopyInto(out *DNSOptionsObservation) { + *out = *in + if in.DNSRecordIPType != nil { + in, out := &in.DNSRecordIPType, &out.DNSRecordIPType + *out = new(string) + **out = **in + } + if in.PrivateDNSOnlyForInboundResolverEndpoint != nil { + in, out := &in.PrivateDNSOnlyForInboundResolverEndpoint, &out.PrivateDNSOnlyForInboundResolverEndpoint + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOptionsObservation. +func (in *DNSOptionsObservation) DeepCopy() *DNSOptionsObservation { + if in == nil { + return nil + } + out := new(DNSOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSOptionsParameters) DeepCopyInto(out *DNSOptionsParameters) { + *out = *in + if in.DNSRecordIPType != nil { + in, out := &in.DNSRecordIPType, &out.DNSRecordIPType + *out = new(string) + **out = **in + } + if in.PrivateDNSOnlyForInboundResolverEndpoint != nil { + in, out := &in.PrivateDNSOnlyForInboundResolverEndpoint, &out.PrivateDNSOnlyForInboundResolverEndpoint + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOptionsParameters. +func (in *DNSOptionsParameters) DeepCopy() *DNSOptionsParameters { + if in == nil { + return nil + } + out := new(DNSOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationOptionsInitParameters) DeepCopyInto(out *DestinationOptionsInitParameters) { + *out = *in + if in.FileFormat != nil { + in, out := &in.FileFormat, &out.FileFormat + *out = new(string) + **out = **in + } + if in.HiveCompatiblePartitions != nil { + in, out := &in.HiveCompatiblePartitions, &out.HiveCompatiblePartitions + *out = new(bool) + **out = **in + } + if in.PerHourPartition != nil { + in, out := &in.PerHourPartition, &out.PerHourPartition + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationOptionsInitParameters. +func (in *DestinationOptionsInitParameters) DeepCopy() *DestinationOptionsInitParameters { + if in == nil { + return nil + } + out := new(DestinationOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationOptionsObservation) DeepCopyInto(out *DestinationOptionsObservation) { + *out = *in + if in.FileFormat != nil { + in, out := &in.FileFormat, &out.FileFormat + *out = new(string) + **out = **in + } + if in.HiveCompatiblePartitions != nil { + in, out := &in.HiveCompatiblePartitions, &out.HiveCompatiblePartitions + *out = new(bool) + **out = **in + } + if in.PerHourPartition != nil { + in, out := &in.PerHourPartition, &out.PerHourPartition + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationOptionsObservation. +func (in *DestinationOptionsObservation) DeepCopy() *DestinationOptionsObservation { + if in == nil { + return nil + } + out := new(DestinationOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationOptionsParameters) DeepCopyInto(out *DestinationOptionsParameters) { + *out = *in + if in.FileFormat != nil { + in, out := &in.FileFormat, &out.FileFormat + *out = new(string) + **out = **in + } + if in.HiveCompatiblePartitions != nil { + in, out := &in.HiveCompatiblePartitions, &out.HiveCompatiblePartitions + *out = new(bool) + **out = **in + } + if in.PerHourPartition != nil { + in, out := &in.PerHourPartition, &out.PerHourPartition + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationOptionsParameters. +func (in *DestinationOptionsParameters) DeepCopy() *DestinationOptionsParameters { + if in == nil { + return nil + } + out := new(DestinationOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationPortRangeInitParameters) DeepCopyInto(out *DestinationPortRangeInitParameters) { + *out = *in + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationPortRangeInitParameters. +func (in *DestinationPortRangeInitParameters) DeepCopy() *DestinationPortRangeInitParameters { + if in == nil { + return nil + } + out := new(DestinationPortRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationPortRangeObservation) DeepCopyInto(out *DestinationPortRangeObservation) { + *out = *in + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationPortRangeObservation. +func (in *DestinationPortRangeObservation) DeepCopy() *DestinationPortRangeObservation { + if in == nil { + return nil + } + out := new(DestinationPortRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationPortRangeParameters) DeepCopyInto(out *DestinationPortRangeParameters) { + *out = *in + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationPortRangeParameters. +func (in *DestinationPortRangeParameters) DeepCopy() *DestinationPortRangeParameters { + if in == nil { + return nil + } + out := new(DestinationPortRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskContainerInitParameters) DeepCopyInto(out *DiskContainerInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.UserBucket != nil { + in, out := &in.UserBucket, &out.UserBucket + *out = new(UserBucketInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskContainerInitParameters. +func (in *DiskContainerInitParameters) DeepCopy() *DiskContainerInitParameters { + if in == nil { + return nil + } + out := new(DiskContainerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskContainerObservation) DeepCopyInto(out *DiskContainerObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.UserBucket != nil { + in, out := &in.UserBucket, &out.UserBucket + *out = new(UserBucketObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskContainerObservation. +func (in *DiskContainerObservation) DeepCopy() *DiskContainerObservation { + if in == nil { + return nil + } + out := new(DiskContainerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskContainerParameters) DeepCopyInto(out *DiskContainerParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.UserBucket != nil { + in, out := &in.UserBucket, &out.UserBucket + *out = new(UserBucketParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskContainerParameters. +func (in *DiskContainerParameters) DeepCopy() *DiskContainerParameters { + if in == nil { + return nil + } + out := new(DiskContainerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSBlockDeviceInitParameters) DeepCopyInto(out *EBSBlockDeviceInitParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSBlockDeviceInitParameters. +func (in *EBSBlockDeviceInitParameters) DeepCopy() *EBSBlockDeviceInitParameters { + if in == nil { + return nil + } + out := new(EBSBlockDeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSBlockDeviceObservation) DeepCopyInto(out *EBSBlockDeviceObservation) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeID != nil { + in, out := &in.VolumeID, &out.VolumeID + *out = new(string) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSBlockDeviceObservation. +func (in *EBSBlockDeviceObservation) DeepCopy() *EBSBlockDeviceObservation { + if in == nil { + return nil + } + out := new(EBSBlockDeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSBlockDeviceParameters) DeepCopyInto(out *EBSBlockDeviceParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSBlockDeviceParameters. +func (in *EBSBlockDeviceParameters) DeepCopy() *EBSBlockDeviceParameters { + if in == nil { + return nil + } + out := new(EBSBlockDeviceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSInitParameters) DeepCopyInto(out *EBSInitParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(string) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSInitParameters. +func (in *EBSInitParameters) DeepCopy() *EBSInitParameters { + if in == nil { + return nil + } + out := new(EBSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSObservation) DeepCopyInto(out *EBSObservation) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(string) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSObservation. +func (in *EBSObservation) DeepCopy() *EBSObservation { + if in == nil { + return nil + } + out := new(EBSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSParameters) DeepCopyInto(out *EBSParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(string) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSParameters. +func (in *EBSParameters) DeepCopy() *EBSParameters { + if in == nil { + return nil + } + out := new(EBSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSSnapshotImport) DeepCopyInto(out *EBSSnapshotImport) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSSnapshotImport. +func (in *EBSSnapshotImport) DeepCopy() *EBSSnapshotImport { + if in == nil { + return nil + } + out := new(EBSSnapshotImport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EBSSnapshotImport) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSSnapshotImportInitParameters) DeepCopyInto(out *EBSSnapshotImportInitParameters) { + *out = *in + if in.ClientData != nil { + in, out := &in.ClientData, &out.ClientData + *out = new(ClientDataInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskContainer != nil { + in, out := &in.DiskContainer, &out.DiskContainer + *out = new(DiskContainerInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PermanentRestore != nil { + in, out := &in.PermanentRestore, &out.PermanentRestore + *out = new(bool) + **out = **in + } + if in.RoleName != nil { + in, out := &in.RoleName, &out.RoleName + *out = new(string) + **out = **in + } + if in.StorageTier != nil { + in, out := &in.StorageTier, &out.StorageTier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TemporaryRestoreDays != nil { + in, out := &in.TemporaryRestoreDays, &out.TemporaryRestoreDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSSnapshotImportInitParameters. +func (in *EBSSnapshotImportInitParameters) DeepCopy() *EBSSnapshotImportInitParameters { + if in == nil { + return nil + } + out := new(EBSSnapshotImportInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSSnapshotImportList) DeepCopyInto(out *EBSSnapshotImportList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EBSSnapshotImport, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSSnapshotImportList. +func (in *EBSSnapshotImportList) DeepCopy() *EBSSnapshotImportList { + if in == nil { + return nil + } + out := new(EBSSnapshotImportList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EBSSnapshotImportList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSSnapshotImportObservation) DeepCopyInto(out *EBSSnapshotImportObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ClientData != nil { + in, out := &in.ClientData, &out.ClientData + *out = new(ClientDataObservation) + (*in).DeepCopyInto(*out) + } + if in.DataEncryptionKeyID != nil { + in, out := &in.DataEncryptionKeyID, &out.DataEncryptionKeyID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskContainer != nil { + in, out := &in.DiskContainer, &out.DiskContainer + *out = new(DiskContainerObservation) + (*in).DeepCopyInto(*out) + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.OutpostArn != nil { + in, out := &in.OutpostArn, &out.OutpostArn + *out = new(string) + **out = **in + } + if in.OwnerAlias != nil { + in, out := &in.OwnerAlias, &out.OwnerAlias + *out = new(string) + **out = **in + } + if in.OwnerID != nil { + in, out := &in.OwnerID, &out.OwnerID + *out = new(string) + **out = **in + } + if in.PermanentRestore != nil { + in, out := &in.PermanentRestore, &out.PermanentRestore + *out = new(bool) + **out = **in + } + if in.RoleName != nil { + in, out := &in.RoleName, &out.RoleName + *out = new(string) + **out = **in + } + if in.StorageTier != nil { + in, out := &in.StorageTier, &out.StorageTier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TemporaryRestoreDays != nil { + in, out := &in.TemporaryRestoreDays, &out.TemporaryRestoreDays + *out = new(float64) + **out = **in + } + if in.VolumeID != nil { + in, out := &in.VolumeID, &out.VolumeID + *out = new(string) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSSnapshotImportObservation. +func (in *EBSSnapshotImportObservation) DeepCopy() *EBSSnapshotImportObservation { + if in == nil { + return nil + } + out := new(EBSSnapshotImportObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSSnapshotImportParameters) DeepCopyInto(out *EBSSnapshotImportParameters) { + *out = *in + if in.ClientData != nil { + in, out := &in.ClientData, &out.ClientData + *out = new(ClientDataParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskContainer != nil { + in, out := &in.DiskContainer, &out.DiskContainer + *out = new(DiskContainerParameters) + (*in).DeepCopyInto(*out) + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PermanentRestore != nil { + in, out := &in.PermanentRestore, &out.PermanentRestore + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleName != nil { + in, out := &in.RoleName, &out.RoleName + *out = new(string) + **out = **in + } + if in.StorageTier != nil { + in, out := &in.StorageTier, &out.StorageTier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TemporaryRestoreDays != nil { + in, out := &in.TemporaryRestoreDays, &out.TemporaryRestoreDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSSnapshotImportParameters. +func (in *EBSSnapshotImportParameters) DeepCopy() *EBSSnapshotImportParameters { + if in == nil { + return nil + } + out := new(EBSSnapshotImportParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSSnapshotImportSpec) DeepCopyInto(out *EBSSnapshotImportSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSSnapshotImportSpec. +func (in *EBSSnapshotImportSpec) DeepCopy() *EBSSnapshotImportSpec { + if in == nil { + return nil + } + out := new(EBSSnapshotImportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSSnapshotImportStatus) DeepCopyInto(out *EBSSnapshotImportStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSSnapshotImportStatus. +func (in *EBSSnapshotImportStatus) DeepCopy() *EBSSnapshotImportStatus { + if in == nil { + return nil + } + out := new(EBSSnapshotImportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticGpuSpecificationsInitParameters) DeepCopyInto(out *ElasticGpuSpecificationsInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticGpuSpecificationsInitParameters. +func (in *ElasticGpuSpecificationsInitParameters) DeepCopy() *ElasticGpuSpecificationsInitParameters { + if in == nil { + return nil + } + out := new(ElasticGpuSpecificationsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticGpuSpecificationsObservation) DeepCopyInto(out *ElasticGpuSpecificationsObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticGpuSpecificationsObservation. +func (in *ElasticGpuSpecificationsObservation) DeepCopy() *ElasticGpuSpecificationsObservation { + if in == nil { + return nil + } + out := new(ElasticGpuSpecificationsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticGpuSpecificationsParameters) DeepCopyInto(out *ElasticGpuSpecificationsParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticGpuSpecificationsParameters. +func (in *ElasticGpuSpecificationsParameters) DeepCopy() *ElasticGpuSpecificationsParameters { + if in == nil { + return nil + } + out := new(ElasticGpuSpecificationsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticInferenceAcceleratorInitParameters) DeepCopyInto(out *ElasticInferenceAcceleratorInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticInferenceAcceleratorInitParameters. +func (in *ElasticInferenceAcceleratorInitParameters) DeepCopy() *ElasticInferenceAcceleratorInitParameters { + if in == nil { + return nil + } + out := new(ElasticInferenceAcceleratorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticInferenceAcceleratorObservation) DeepCopyInto(out *ElasticInferenceAcceleratorObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticInferenceAcceleratorObservation. +func (in *ElasticInferenceAcceleratorObservation) DeepCopy() *ElasticInferenceAcceleratorObservation { + if in == nil { + return nil + } + out := new(ElasticInferenceAcceleratorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticInferenceAcceleratorParameters) DeepCopyInto(out *ElasticInferenceAcceleratorParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticInferenceAcceleratorParameters. +func (in *ElasticInferenceAcceleratorParameters) DeepCopy() *ElasticInferenceAcceleratorParameters { + if in == nil { + return nil + } + out := new(ElasticInferenceAcceleratorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnclaveOptionsInitParameters) DeepCopyInto(out *EnclaveOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnclaveOptionsInitParameters. +func (in *EnclaveOptionsInitParameters) DeepCopy() *EnclaveOptionsInitParameters { + if in == nil { + return nil + } + out := new(EnclaveOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnclaveOptionsObservation) DeepCopyInto(out *EnclaveOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnclaveOptionsObservation. +func (in *EnclaveOptionsObservation) DeepCopy() *EnclaveOptionsObservation { + if in == nil { + return nil + } + out := new(EnclaveOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnclaveOptionsParameters) DeepCopyInto(out *EnclaveOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnclaveOptionsParameters. +func (in *EnclaveOptionsParameters) DeepCopy() *EnclaveOptionsParameters { + if in == nil { + return nil + } + out := new(EnclaveOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralBlockDeviceInitParameters) DeepCopyInto(out *EphemeralBlockDeviceInitParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralBlockDeviceInitParameters. +func (in *EphemeralBlockDeviceInitParameters) DeepCopy() *EphemeralBlockDeviceInitParameters { + if in == nil { + return nil + } + out := new(EphemeralBlockDeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralBlockDeviceObservation) DeepCopyInto(out *EphemeralBlockDeviceObservation) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralBlockDeviceObservation. +func (in *EphemeralBlockDeviceObservation) DeepCopy() *EphemeralBlockDeviceObservation { + if in == nil { + return nil + } + out := new(EphemeralBlockDeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralBlockDeviceParameters) DeepCopyInto(out *EphemeralBlockDeviceParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralBlockDeviceParameters. +func (in *EphemeralBlockDeviceParameters) DeepCopy() *EphemeralBlockDeviceParameters { + if in == nil { + return nil + } + out := new(EphemeralBlockDeviceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowLog) DeepCopyInto(out *FlowLog) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowLog. +func (in *FlowLog) DeepCopy() *FlowLog { + if in == nil { + return nil + } + out := new(FlowLog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowLog) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowLogInitParameters) DeepCopyInto(out *FlowLogInitParameters) { + *out = *in + if in.DeliverCrossAccountRole != nil { + in, out := &in.DeliverCrossAccountRole, &out.DeliverCrossAccountRole + *out = new(string) + **out = **in + } + if in.DestinationOptions != nil { + in, out := &in.DestinationOptions, &out.DestinationOptions + *out = new(DestinationOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EniID != nil { + in, out := &in.EniID, &out.EniID + *out = new(string) + **out = **in + } + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } + if in.IAMRoleArnRef != nil { + in, out := &in.IAMRoleArnRef, &out.IAMRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMRoleArnSelector != nil { + in, out := &in.IAMRoleArnSelector, &out.IAMRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = new(string) + **out = **in + } + if in.LogDestinationRef != nil { + in, out := &in.LogDestinationRef, &out.LogDestinationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogDestinationSelector != nil { + in, out := &in.LogDestinationSelector, &out.LogDestinationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LogDestinationType != nil { + in, out := &in.LogDestinationType, &out.LogDestinationType + *out = new(string) + **out = **in + } + if in.LogFormat != nil { + in, out := &in.LogFormat, &out.LogFormat + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MaxAggregationInterval != nil { + in, out := &in.MaxAggregationInterval, &out.MaxAggregationInterval + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrafficType != nil { + in, out := &in.TrafficType, &out.TrafficType + *out = new(string) + **out = **in + } + if in.TransitGatewayAttachmentID != nil { + in, out := &in.TransitGatewayAttachmentID, &out.TransitGatewayAttachmentID + *out = new(string) + **out = **in + } + if in.TransitGatewayID != nil { + in, out := &in.TransitGatewayID, &out.TransitGatewayID + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowLogInitParameters. +func (in *FlowLogInitParameters) DeepCopy() *FlowLogInitParameters { + if in == nil { + return nil + } + out := new(FlowLogInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowLogList) DeepCopyInto(out *FlowLogList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FlowLog, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowLogList. +func (in *FlowLogList) DeepCopy() *FlowLogList { + if in == nil { + return nil + } + out := new(FlowLogList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowLogList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowLogObservation) DeepCopyInto(out *FlowLogObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DeliverCrossAccountRole != nil { + in, out := &in.DeliverCrossAccountRole, &out.DeliverCrossAccountRole + *out = new(string) + **out = **in + } + if in.DestinationOptions != nil { + in, out := &in.DestinationOptions, &out.DestinationOptions + *out = new(DestinationOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.EniID != nil { + in, out := &in.EniID, &out.EniID + *out = new(string) + **out = **in + } + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = new(string) + **out = **in + } + if in.LogDestinationType != nil { + in, out := &in.LogDestinationType, &out.LogDestinationType + *out = new(string) + **out = **in + } + if in.LogFormat != nil { + in, out := &in.LogFormat, &out.LogFormat + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MaxAggregationInterval != nil { + in, out := &in.MaxAggregationInterval, &out.MaxAggregationInterval + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrafficType != nil { + in, out := &in.TrafficType, &out.TrafficType + *out = new(string) + **out = **in + } + if in.TransitGatewayAttachmentID != nil { + in, out := &in.TransitGatewayAttachmentID, &out.TransitGatewayAttachmentID + *out = new(string) + **out = **in + } + if in.TransitGatewayID != nil { + in, out := &in.TransitGatewayID, &out.TransitGatewayID + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowLogObservation. +func (in *FlowLogObservation) DeepCopy() *FlowLogObservation { + if in == nil { + return nil + } + out := new(FlowLogObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowLogParameters) DeepCopyInto(out *FlowLogParameters) { + *out = *in + if in.DeliverCrossAccountRole != nil { + in, out := &in.DeliverCrossAccountRole, &out.DeliverCrossAccountRole + *out = new(string) + **out = **in + } + if in.DestinationOptions != nil { + in, out := &in.DestinationOptions, &out.DestinationOptions + *out = new(DestinationOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.EniID != nil { + in, out := &in.EniID, &out.EniID + *out = new(string) + **out = **in + } + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } + if in.IAMRoleArnRef != nil { + in, out := &in.IAMRoleArnRef, &out.IAMRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMRoleArnSelector != nil { + in, out := &in.IAMRoleArnSelector, &out.IAMRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = new(string) + **out = **in + } + if in.LogDestinationRef != nil { + in, out := &in.LogDestinationRef, &out.LogDestinationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogDestinationSelector != nil { + in, out := &in.LogDestinationSelector, &out.LogDestinationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LogDestinationType != nil { + in, out := &in.LogDestinationType, &out.LogDestinationType + *out = new(string) + **out = **in + } + if in.LogFormat != nil { + in, out := &in.LogFormat, &out.LogFormat + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MaxAggregationInterval != nil { + in, out := &in.MaxAggregationInterval, &out.MaxAggregationInterval + *out = new(float64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrafficType != nil { + in, out := &in.TrafficType, &out.TrafficType + *out = new(string) + **out = **in + } + if in.TransitGatewayAttachmentID != nil { + in, out := &in.TransitGatewayAttachmentID, &out.TransitGatewayAttachmentID + *out = new(string) + **out = **in + } + if in.TransitGatewayID != nil { + in, out := &in.TransitGatewayID, &out.TransitGatewayID + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowLogParameters. +func (in *FlowLogParameters) DeepCopy() *FlowLogParameters { + if in == nil { + return nil + } + out := new(FlowLogParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowLogSpec) DeepCopyInto(out *FlowLogSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowLogSpec. +func (in *FlowLogSpec) DeepCopy() *FlowLogSpec { + if in == nil { + return nil + } + out := new(FlowLogSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowLogStatus) DeepCopyInto(out *FlowLogStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowLogStatus. +func (in *FlowLogStatus) DeepCopy() *FlowLogStatus { + if in == nil { + return nil + } + out := new(FlowLogStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HibernationOptionsInitParameters) DeepCopyInto(out *HibernationOptionsInitParameters) { + *out = *in + if in.Configured != nil { + in, out := &in.Configured, &out.Configured + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HibernationOptionsInitParameters. +func (in *HibernationOptionsInitParameters) DeepCopy() *HibernationOptionsInitParameters { + if in == nil { + return nil + } + out := new(HibernationOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HibernationOptionsObservation) DeepCopyInto(out *HibernationOptionsObservation) { + *out = *in + if in.Configured != nil { + in, out := &in.Configured, &out.Configured + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HibernationOptionsObservation. +func (in *HibernationOptionsObservation) DeepCopy() *HibernationOptionsObservation { + if in == nil { + return nil + } + out := new(HibernationOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HibernationOptionsParameters) DeepCopyInto(out *HibernationOptionsParameters) { + *out = *in + if in.Configured != nil { + in, out := &in.Configured, &out.Configured + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HibernationOptionsParameters. +func (in *HibernationOptionsParameters) DeepCopy() *HibernationOptionsParameters { + if in == nil { + return nil + } + out := new(HibernationOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMInstanceProfileInitParameters) DeepCopyInto(out *IAMInstanceProfileInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMInstanceProfileInitParameters. +func (in *IAMInstanceProfileInitParameters) DeepCopy() *IAMInstanceProfileInitParameters { + if in == nil { + return nil + } + out := new(IAMInstanceProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMInstanceProfileObservation) DeepCopyInto(out *IAMInstanceProfileObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMInstanceProfileObservation. +func (in *IAMInstanceProfileObservation) DeepCopy() *IAMInstanceProfileObservation { + if in == nil { + return nil + } + out := new(IAMInstanceProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMInstanceProfileParameters) DeepCopyInto(out *IAMInstanceProfileParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMInstanceProfileParameters. +func (in *IAMInstanceProfileParameters) DeepCopy() *IAMInstanceProfileParameters { + if in == nil { + return nil + } + out := new(IAMInstanceProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Instance) DeepCopyInto(out *Instance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance. +func (in *Instance) DeepCopy() *Instance { + if in == nil { + return nil + } + out := new(Instance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Instance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceInitParameters) DeepCopyInto(out *InstanceInitParameters) { + *out = *in + if in.AMI != nil { + in, out := &in.AMI, &out.AMI + *out = new(string) + **out = **in + } + if in.AssociatePublicIPAddress != nil { + in, out := &in.AssociatePublicIPAddress, &out.AssociatePublicIPAddress + *out = new(bool) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.CPUCoreCount != nil { + in, out := &in.CPUCoreCount, &out.CPUCoreCount + *out = new(float64) + **out = **in + } + if in.CPUOptions != nil { + in, out := &in.CPUOptions, &out.CPUOptions + *out = new(CPUOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CPUThreadsPerCore != nil { + in, out := &in.CPUThreadsPerCore, &out.CPUThreadsPerCore + *out = new(float64) + **out = **in + } + if in.CapacityReservationSpecification != nil { + in, out := &in.CapacityReservationSpecification, &out.CapacityReservationSpecification + *out = new(CapacityReservationSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CreditSpecification != nil { + in, out := &in.CreditSpecification, &out.CreditSpecification + *out = new(CreditSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DisableAPIStop != nil { + in, out := &in.DisableAPIStop, &out.DisableAPIStop + *out = new(bool) + **out = **in + } + if in.DisableAPITermination != nil { + in, out := &in.DisableAPITermination, &out.DisableAPITermination + *out = new(bool) + **out = **in + } + if in.EBSBlockDevice != nil { + in, out := &in.EBSBlockDevice, &out.EBSBlockDevice + *out = make([]EBSBlockDeviceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EBSOptimized != nil { + in, out := &in.EBSOptimized, &out.EBSOptimized + *out = new(bool) + **out = **in + } + if in.EnclaveOptions != nil { + in, out := &in.EnclaveOptions, &out.EnclaveOptions + *out = new(EnclaveOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EphemeralBlockDevice != nil { + in, out := &in.EphemeralBlockDevice, &out.EphemeralBlockDevice + *out = make([]EphemeralBlockDeviceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GetPasswordData != nil { + in, out := &in.GetPasswordData, &out.GetPasswordData + *out = new(bool) + **out = **in + } + if in.Hibernation != nil { + in, out := &in.Hibernation, &out.Hibernation + *out = new(bool) + **out = **in + } + if in.HostID != nil { + in, out := &in.HostID, &out.HostID + *out = new(string) + **out = **in + } + if in.HostResourceGroupArn != nil { + in, out := &in.HostResourceGroupArn, &out.HostResourceGroupArn + *out = new(string) + **out = **in + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(string) + **out = **in + } + if in.IPv6AddressCount != nil { + in, out := &in.IPv6AddressCount, &out.IPv6AddressCount + *out = new(float64) + **out = **in + } + if in.IPv6Addresses != nil { + in, out := &in.IPv6Addresses, &out.IPv6Addresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceInitiatedShutdownBehavior != nil { + in, out := &in.InstanceInitiatedShutdownBehavior, &out.InstanceInitiatedShutdownBehavior + *out = new(string) + **out = **in + } + if in.InstanceMarketOptions != nil { + in, out := &in.InstanceMarketOptions, &out.InstanceMarketOptions + *out = new(InstanceMarketOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(LaunchTemplateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceOptions != nil { + in, out := &in.MaintenanceOptions, &out.MaintenanceOptions + *out = new(MaintenanceOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = new(MetadataOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(bool) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]NetworkInterfaceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementGroup != nil { + in, out := &in.PlacementGroup, &out.PlacementGroup + *out = new(string) + **out = **in + } + if in.PlacementPartitionNumber != nil { + in, out := &in.PlacementPartitionNumber, &out.PlacementPartitionNumber + *out = new(float64) + **out = **in + } + if in.PrivateDNSNameOptions != nil { + in, out := &in.PrivateDNSNameOptions, &out.PrivateDNSNameOptions + *out = new(PrivateDNSNameOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PrivateIP != nil { + in, out := &in.PrivateIP, &out.PrivateIP + *out = new(string) + **out = **in + } + if in.RootBlockDevice != nil { + in, out := &in.RootBlockDevice, &out.RootBlockDevice + *out = new(RootBlockDeviceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecondaryPrivateIps != nil { + in, out := &in.SecondaryPrivateIps, &out.SecondaryPrivateIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SourceDestCheck != nil { + in, out := &in.SourceDestCheck, &out.SourceDestCheck + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tenancy != nil { + in, out := &in.Tenancy, &out.Tenancy + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.UserDataBase64 != nil { + in, out := &in.UserDataBase64, &out.UserDataBase64 + *out = new(string) + **out = **in + } + if in.UserDataReplaceOnChange != nil { + in, out := &in.UserDataReplaceOnChange, &out.UserDataReplaceOnChange + *out = new(bool) + **out = **in + } + if in.VPCSecurityGroupIDRefs != nil { + in, out := &in.VPCSecurityGroupIDRefs, &out.VPCSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCSecurityGroupIDSelector != nil { + in, out := &in.VPCSecurityGroupIDSelector, &out.VPCSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VolumeTags != nil { + in, out := &in.VolumeTags, &out.VolumeTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceInitParameters. +func (in *InstanceInitParameters) DeepCopy() *InstanceInitParameters { + if in == nil { + return nil + } + out := new(InstanceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceList) DeepCopyInto(out *InstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Instance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceList. +func (in *InstanceList) DeepCopy() *InstanceList { + if in == nil { + return nil + } + out := new(InstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMarketOptionsInitParameters) DeepCopyInto(out *InstanceMarketOptionsInitParameters) { + *out = *in + if in.MarketType != nil { + in, out := &in.MarketType, &out.MarketType + *out = new(string) + **out = **in + } + if in.SpotOptions != nil { + in, out := &in.SpotOptions, &out.SpotOptions + *out = new(SpotOptionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMarketOptionsInitParameters. +func (in *InstanceMarketOptionsInitParameters) DeepCopy() *InstanceMarketOptionsInitParameters { + if in == nil { + return nil + } + out := new(InstanceMarketOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMarketOptionsObservation) DeepCopyInto(out *InstanceMarketOptionsObservation) { + *out = *in + if in.MarketType != nil { + in, out := &in.MarketType, &out.MarketType + *out = new(string) + **out = **in + } + if in.SpotOptions != nil { + in, out := &in.SpotOptions, &out.SpotOptions + *out = new(SpotOptionsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMarketOptionsObservation. +func (in *InstanceMarketOptionsObservation) DeepCopy() *InstanceMarketOptionsObservation { + if in == nil { + return nil + } + out := new(InstanceMarketOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMarketOptionsParameters) DeepCopyInto(out *InstanceMarketOptionsParameters) { + *out = *in + if in.MarketType != nil { + in, out := &in.MarketType, &out.MarketType + *out = new(string) + **out = **in + } + if in.SpotOptions != nil { + in, out := &in.SpotOptions, &out.SpotOptions + *out = new(SpotOptionsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMarketOptionsParameters. +func (in *InstanceMarketOptionsParameters) DeepCopy() *InstanceMarketOptionsParameters { + if in == nil { + return nil + } + out := new(InstanceMarketOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMarketOptionsSpotOptionsInitParameters) DeepCopyInto(out *InstanceMarketOptionsSpotOptionsInitParameters) { + *out = *in + if in.BlockDurationMinutes != nil { + in, out := &in.BlockDurationMinutes, &out.BlockDurationMinutes + *out = new(float64) + **out = **in + } + if in.InstanceInterruptionBehavior != nil { + in, out := &in.InstanceInterruptionBehavior, &out.InstanceInterruptionBehavior + *out = new(string) + **out = **in + } + if in.MaxPrice != nil { + in, out := &in.MaxPrice, &out.MaxPrice + *out = new(string) + **out = **in + } + if in.SpotInstanceType != nil { + in, out := &in.SpotInstanceType, &out.SpotInstanceType + *out = new(string) + **out = **in + } + if in.ValidUntil != nil { + in, out := &in.ValidUntil, &out.ValidUntil + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMarketOptionsSpotOptionsInitParameters. +func (in *InstanceMarketOptionsSpotOptionsInitParameters) DeepCopy() *InstanceMarketOptionsSpotOptionsInitParameters { + if in == nil { + return nil + } + out := new(InstanceMarketOptionsSpotOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMarketOptionsSpotOptionsObservation) DeepCopyInto(out *InstanceMarketOptionsSpotOptionsObservation) { + *out = *in + if in.BlockDurationMinutes != nil { + in, out := &in.BlockDurationMinutes, &out.BlockDurationMinutes + *out = new(float64) + **out = **in + } + if in.InstanceInterruptionBehavior != nil { + in, out := &in.InstanceInterruptionBehavior, &out.InstanceInterruptionBehavior + *out = new(string) + **out = **in + } + if in.MaxPrice != nil { + in, out := &in.MaxPrice, &out.MaxPrice + *out = new(string) + **out = **in + } + if in.SpotInstanceType != nil { + in, out := &in.SpotInstanceType, &out.SpotInstanceType + *out = new(string) + **out = **in + } + if in.ValidUntil != nil { + in, out := &in.ValidUntil, &out.ValidUntil + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMarketOptionsSpotOptionsObservation. +func (in *InstanceMarketOptionsSpotOptionsObservation) DeepCopy() *InstanceMarketOptionsSpotOptionsObservation { + if in == nil { + return nil + } + out := new(InstanceMarketOptionsSpotOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMarketOptionsSpotOptionsParameters) DeepCopyInto(out *InstanceMarketOptionsSpotOptionsParameters) { + *out = *in + if in.BlockDurationMinutes != nil { + in, out := &in.BlockDurationMinutes, &out.BlockDurationMinutes + *out = new(float64) + **out = **in + } + if in.InstanceInterruptionBehavior != nil { + in, out := &in.InstanceInterruptionBehavior, &out.InstanceInterruptionBehavior + *out = new(string) + **out = **in + } + if in.MaxPrice != nil { + in, out := &in.MaxPrice, &out.MaxPrice + *out = new(string) + **out = **in + } + if in.SpotInstanceType != nil { + in, out := &in.SpotInstanceType, &out.SpotInstanceType + *out = new(string) + **out = **in + } + if in.ValidUntil != nil { + in, out := &in.ValidUntil, &out.ValidUntil + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMarketOptionsSpotOptionsParameters. +func (in *InstanceMarketOptionsSpotOptionsParameters) DeepCopy() *InstanceMarketOptionsSpotOptionsParameters { + if in == nil { + return nil + } + out := new(InstanceMarketOptionsSpotOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceObservation) DeepCopyInto(out *InstanceObservation) { + *out = *in + if in.AMI != nil { + in, out := &in.AMI, &out.AMI + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AssociatePublicIPAddress != nil { + in, out := &in.AssociatePublicIPAddress, &out.AssociatePublicIPAddress + *out = new(bool) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.CPUCoreCount != nil { + in, out := &in.CPUCoreCount, &out.CPUCoreCount + *out = new(float64) + **out = **in + } + if in.CPUOptions != nil { + in, out := &in.CPUOptions, &out.CPUOptions + *out = new(CPUOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.CPUThreadsPerCore != nil { + in, out := &in.CPUThreadsPerCore, &out.CPUThreadsPerCore + *out = new(float64) + **out = **in + } + if in.CapacityReservationSpecification != nil { + in, out := &in.CapacityReservationSpecification, &out.CapacityReservationSpecification + *out = new(CapacityReservationSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.CreditSpecification != nil { + in, out := &in.CreditSpecification, &out.CreditSpecification + *out = new(CreditSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.DisableAPIStop != nil { + in, out := &in.DisableAPIStop, &out.DisableAPIStop + *out = new(bool) + **out = **in + } + if in.DisableAPITermination != nil { + in, out := &in.DisableAPITermination, &out.DisableAPITermination + *out = new(bool) + **out = **in + } + if in.EBSBlockDevice != nil { + in, out := &in.EBSBlockDevice, &out.EBSBlockDevice + *out = make([]EBSBlockDeviceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EBSOptimized != nil { + in, out := &in.EBSOptimized, &out.EBSOptimized + *out = new(bool) + **out = **in + } + if in.EnclaveOptions != nil { + in, out := &in.EnclaveOptions, &out.EnclaveOptions + *out = new(EnclaveOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.EphemeralBlockDevice != nil { + in, out := &in.EphemeralBlockDevice, &out.EphemeralBlockDevice + *out = make([]EphemeralBlockDeviceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GetPasswordData != nil { + in, out := &in.GetPasswordData, &out.GetPasswordData + *out = new(bool) + **out = **in + } + if in.Hibernation != nil { + in, out := &in.Hibernation, &out.Hibernation + *out = new(bool) + **out = **in + } + if in.HostID != nil { + in, out := &in.HostID, &out.HostID + *out = new(string) + **out = **in + } + if in.HostResourceGroupArn != nil { + in, out := &in.HostResourceGroupArn, &out.HostResourceGroupArn + *out = new(string) + **out = **in + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPv6AddressCount != nil { + in, out := &in.IPv6AddressCount, &out.IPv6AddressCount + *out = new(float64) + **out = **in + } + if in.IPv6Addresses != nil { + in, out := &in.IPv6Addresses, &out.IPv6Addresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceInitiatedShutdownBehavior != nil { + in, out := &in.InstanceInitiatedShutdownBehavior, &out.InstanceInitiatedShutdownBehavior + *out = new(string) + **out = **in + } + if in.InstanceLifecycle != nil { + in, out := &in.InstanceLifecycle, &out.InstanceLifecycle + *out = new(string) + **out = **in + } + if in.InstanceMarketOptions != nil { + in, out := &in.InstanceMarketOptions, &out.InstanceMarketOptions + *out = new(InstanceMarketOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(LaunchTemplateObservation) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceOptions != nil { + in, out := &in.MaintenanceOptions, &out.MaintenanceOptions + *out = new(MaintenanceOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = new(MetadataOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(bool) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]NetworkInterfaceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OutpostArn != nil { + in, out := &in.OutpostArn, &out.OutpostArn + *out = new(string) + **out = **in + } + if in.PasswordData != nil { + in, out := &in.PasswordData, &out.PasswordData + *out = new(string) + **out = **in + } + if in.PlacementGroup != nil { + in, out := &in.PlacementGroup, &out.PlacementGroup + *out = new(string) + **out = **in + } + if in.PlacementPartitionNumber != nil { + in, out := &in.PlacementPartitionNumber, &out.PlacementPartitionNumber + *out = new(float64) + **out = **in + } + if in.PrimaryNetworkInterfaceID != nil { + in, out := &in.PrimaryNetworkInterfaceID, &out.PrimaryNetworkInterfaceID + *out = new(string) + **out = **in + } + if in.PrivateDNS != nil { + in, out := &in.PrivateDNS, &out.PrivateDNS + *out = new(string) + **out = **in + } + if in.PrivateDNSNameOptions != nil { + in, out := &in.PrivateDNSNameOptions, &out.PrivateDNSNameOptions + *out = new(PrivateDNSNameOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.PrivateIP != nil { + in, out := &in.PrivateIP, &out.PrivateIP + *out = new(string) + **out = **in + } + if in.PublicDNS != nil { + in, out := &in.PublicDNS, &out.PublicDNS + *out = new(string) + **out = **in + } + if in.PublicIP != nil { + in, out := &in.PublicIP, &out.PublicIP + *out = new(string) + **out = **in + } + if in.RootBlockDevice != nil { + in, out := &in.RootBlockDevice, &out.RootBlockDevice + *out = new(RootBlockDeviceObservation) + (*in).DeepCopyInto(*out) + } + if in.SecondaryPrivateIps != nil { + in, out := &in.SecondaryPrivateIps, &out.SecondaryPrivateIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SourceDestCheck != nil { + in, out := &in.SourceDestCheck, &out.SourceDestCheck + *out = new(bool) + **out = **in + } + if in.SpotInstanceRequestID != nil { + in, out := &in.SpotInstanceRequestID, &out.SpotInstanceRequestID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tenancy != nil { + in, out := &in.Tenancy, &out.Tenancy + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.UserDataBase64 != nil { + in, out := &in.UserDataBase64, &out.UserDataBase64 + *out = new(string) + **out = **in + } + if in.UserDataReplaceOnChange != nil { + in, out := &in.UserDataReplaceOnChange, &out.UserDataReplaceOnChange + *out = new(bool) + **out = **in + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VolumeTags != nil { + in, out := &in.VolumeTags, &out.VolumeTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceObservation. +func (in *InstanceObservation) DeepCopy() *InstanceObservation { + if in == nil { + return nil + } + out := new(InstanceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceParameters) DeepCopyInto(out *InstanceParameters) { + *out = *in + if in.AMI != nil { + in, out := &in.AMI, &out.AMI + *out = new(string) + **out = **in + } + if in.AssociatePublicIPAddress != nil { + in, out := &in.AssociatePublicIPAddress, &out.AssociatePublicIPAddress + *out = new(bool) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.CPUCoreCount != nil { + in, out := &in.CPUCoreCount, &out.CPUCoreCount + *out = new(float64) + **out = **in + } + if in.CPUOptions != nil { + in, out := &in.CPUOptions, &out.CPUOptions + *out = new(CPUOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.CPUThreadsPerCore != nil { + in, out := &in.CPUThreadsPerCore, &out.CPUThreadsPerCore + *out = new(float64) + **out = **in + } + if in.CapacityReservationSpecification != nil { + in, out := &in.CapacityReservationSpecification, &out.CapacityReservationSpecification + *out = new(CapacityReservationSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.CreditSpecification != nil { + in, out := &in.CreditSpecification, &out.CreditSpecification + *out = new(CreditSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.DisableAPIStop != nil { + in, out := &in.DisableAPIStop, &out.DisableAPIStop + *out = new(bool) + **out = **in + } + if in.DisableAPITermination != nil { + in, out := &in.DisableAPITermination, &out.DisableAPITermination + *out = new(bool) + **out = **in + } + if in.EBSBlockDevice != nil { + in, out := &in.EBSBlockDevice, &out.EBSBlockDevice + *out = make([]EBSBlockDeviceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EBSOptimized != nil { + in, out := &in.EBSOptimized, &out.EBSOptimized + *out = new(bool) + **out = **in + } + if in.EnclaveOptions != nil { + in, out := &in.EnclaveOptions, &out.EnclaveOptions + *out = new(EnclaveOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.EphemeralBlockDevice != nil { + in, out := &in.EphemeralBlockDevice, &out.EphemeralBlockDevice + *out = make([]EphemeralBlockDeviceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GetPasswordData != nil { + in, out := &in.GetPasswordData, &out.GetPasswordData + *out = new(bool) + **out = **in + } + if in.Hibernation != nil { + in, out := &in.Hibernation, &out.Hibernation + *out = new(bool) + **out = **in + } + if in.HostID != nil { + in, out := &in.HostID, &out.HostID + *out = new(string) + **out = **in + } + if in.HostResourceGroupArn != nil { + in, out := &in.HostResourceGroupArn, &out.HostResourceGroupArn + *out = new(string) + **out = **in + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(string) + **out = **in + } + if in.IPv6AddressCount != nil { + in, out := &in.IPv6AddressCount, &out.IPv6AddressCount + *out = new(float64) + **out = **in + } + if in.IPv6Addresses != nil { + in, out := &in.IPv6Addresses, &out.IPv6Addresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceInitiatedShutdownBehavior != nil { + in, out := &in.InstanceInitiatedShutdownBehavior, &out.InstanceInitiatedShutdownBehavior + *out = new(string) + **out = **in + } + if in.InstanceMarketOptions != nil { + in, out := &in.InstanceMarketOptions, &out.InstanceMarketOptions + *out = new(InstanceMarketOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(LaunchTemplateParameters) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceOptions != nil { + in, out := &in.MaintenanceOptions, &out.MaintenanceOptions + *out = new(MaintenanceOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = new(MetadataOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(bool) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]NetworkInterfaceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementGroup != nil { + in, out := &in.PlacementGroup, &out.PlacementGroup + *out = new(string) + **out = **in + } + if in.PlacementPartitionNumber != nil { + in, out := &in.PlacementPartitionNumber, &out.PlacementPartitionNumber + *out = new(float64) + **out = **in + } + if in.PrivateDNSNameOptions != nil { + in, out := &in.PrivateDNSNameOptions, &out.PrivateDNSNameOptions + *out = new(PrivateDNSNameOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.PrivateIP != nil { + in, out := &in.PrivateIP, &out.PrivateIP + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RootBlockDevice != nil { + in, out := &in.RootBlockDevice, &out.RootBlockDevice + *out = new(RootBlockDeviceParameters) + (*in).DeepCopyInto(*out) + } + if in.SecondaryPrivateIps != nil { + in, out := &in.SecondaryPrivateIps, &out.SecondaryPrivateIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SourceDestCheck != nil { + in, out := &in.SourceDestCheck, &out.SourceDestCheck + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tenancy != nil { + in, out := &in.Tenancy, &out.Tenancy + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.UserDataBase64 != nil { + in, out := &in.UserDataBase64, &out.UserDataBase64 + *out = new(string) + **out = **in + } + if in.UserDataReplaceOnChange != nil { + in, out := &in.UserDataReplaceOnChange, &out.UserDataReplaceOnChange + *out = new(bool) + **out = **in + } + if in.VPCSecurityGroupIDRefs != nil { + in, out := &in.VPCSecurityGroupIDRefs, &out.VPCSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCSecurityGroupIDSelector != nil { + in, out := &in.VPCSecurityGroupIDSelector, &out.VPCSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VolumeTags != nil { + in, out := &in.VolumeTags, &out.VolumeTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceParameters. +func (in *InstanceParameters) DeepCopy() *InstanceParameters { + if in == nil { + return nil + } + out := new(InstanceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsAcceleratorCountInitParameters) DeepCopyInto(out *InstanceRequirementsAcceleratorCountInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsAcceleratorCountInitParameters. +func (in *InstanceRequirementsAcceleratorCountInitParameters) DeepCopy() *InstanceRequirementsAcceleratorCountInitParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsAcceleratorCountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsAcceleratorCountObservation) DeepCopyInto(out *InstanceRequirementsAcceleratorCountObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsAcceleratorCountObservation. +func (in *InstanceRequirementsAcceleratorCountObservation) DeepCopy() *InstanceRequirementsAcceleratorCountObservation { + if in == nil { + return nil + } + out := new(InstanceRequirementsAcceleratorCountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsAcceleratorCountParameters) DeepCopyInto(out *InstanceRequirementsAcceleratorCountParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsAcceleratorCountParameters. +func (in *InstanceRequirementsAcceleratorCountParameters) DeepCopy() *InstanceRequirementsAcceleratorCountParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsAcceleratorCountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsAcceleratorTotalMemoryMibInitParameters) DeepCopyInto(out *InstanceRequirementsAcceleratorTotalMemoryMibInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsAcceleratorTotalMemoryMibInitParameters. +func (in *InstanceRequirementsAcceleratorTotalMemoryMibInitParameters) DeepCopy() *InstanceRequirementsAcceleratorTotalMemoryMibInitParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsAcceleratorTotalMemoryMibInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsAcceleratorTotalMemoryMibObservation) DeepCopyInto(out *InstanceRequirementsAcceleratorTotalMemoryMibObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsAcceleratorTotalMemoryMibObservation. +func (in *InstanceRequirementsAcceleratorTotalMemoryMibObservation) DeepCopy() *InstanceRequirementsAcceleratorTotalMemoryMibObservation { + if in == nil { + return nil + } + out := new(InstanceRequirementsAcceleratorTotalMemoryMibObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsAcceleratorTotalMemoryMibParameters) DeepCopyInto(out *InstanceRequirementsAcceleratorTotalMemoryMibParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsAcceleratorTotalMemoryMibParameters. +func (in *InstanceRequirementsAcceleratorTotalMemoryMibParameters) DeepCopy() *InstanceRequirementsAcceleratorTotalMemoryMibParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsAcceleratorTotalMemoryMibParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsBaselineEBSBandwidthMbpsInitParameters) DeepCopyInto(out *InstanceRequirementsBaselineEBSBandwidthMbpsInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsBaselineEBSBandwidthMbpsInitParameters. +func (in *InstanceRequirementsBaselineEBSBandwidthMbpsInitParameters) DeepCopy() *InstanceRequirementsBaselineEBSBandwidthMbpsInitParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsBaselineEBSBandwidthMbpsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsBaselineEBSBandwidthMbpsObservation) DeepCopyInto(out *InstanceRequirementsBaselineEBSBandwidthMbpsObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsBaselineEBSBandwidthMbpsObservation. +func (in *InstanceRequirementsBaselineEBSBandwidthMbpsObservation) DeepCopy() *InstanceRequirementsBaselineEBSBandwidthMbpsObservation { + if in == nil { + return nil + } + out := new(InstanceRequirementsBaselineEBSBandwidthMbpsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsBaselineEBSBandwidthMbpsParameters) DeepCopyInto(out *InstanceRequirementsBaselineEBSBandwidthMbpsParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsBaselineEBSBandwidthMbpsParameters. +func (in *InstanceRequirementsBaselineEBSBandwidthMbpsParameters) DeepCopy() *InstanceRequirementsBaselineEBSBandwidthMbpsParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsBaselineEBSBandwidthMbpsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsInitParameters) DeepCopyInto(out *InstanceRequirementsInitParameters) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = new(AcceleratorCountInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorManufacturers != nil { + in, out := &in.AcceleratorManufacturers, &out.AcceleratorManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorNames != nil { + in, out := &in.AcceleratorNames, &out.AcceleratorNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorTotalMemoryMib != nil { + in, out := &in.AcceleratorTotalMemoryMib, &out.AcceleratorTotalMemoryMib + *out = new(AcceleratorTotalMemoryMibInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorTypes != nil { + in, out := &in.AcceleratorTypes, &out.AcceleratorTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedInstanceTypes != nil { + in, out := &in.AllowedInstanceTypes, &out.AllowedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(string) + **out = **in + } + if in.BaselineEBSBandwidthMbps != nil { + in, out := &in.BaselineEBSBandwidthMbps, &out.BaselineEBSBandwidthMbps + *out = new(BaselineEBSBandwidthMbpsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BurstablePerformance != nil { + in, out := &in.BurstablePerformance, &out.BurstablePerformance + *out = new(string) + **out = **in + } + if in.CPUManufacturers != nil { + in, out := &in.CPUManufacturers, &out.CPUManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExcludedInstanceTypes != nil { + in, out := &in.ExcludedInstanceTypes, &out.ExcludedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceGenerations != nil { + in, out := &in.InstanceGenerations, &out.InstanceGenerations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LocalStorage != nil { + in, out := &in.LocalStorage, &out.LocalStorage + *out = new(string) + **out = **in + } + if in.LocalStorageTypes != nil { + in, out := &in.LocalStorageTypes, &out.LocalStorageTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MemoryGibPerVcpu != nil { + in, out := &in.MemoryGibPerVcpu, &out.MemoryGibPerVcpu + *out = new(MemoryGibPerVcpuInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MemoryMib != nil { + in, out := &in.MemoryMib, &out.MemoryMib + *out = new(MemoryMibInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkBandwidthGbps != nil { + in, out := &in.NetworkBandwidthGbps, &out.NetworkBandwidthGbps + *out = new(NetworkBandwidthGbpsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceCount != nil { + in, out := &in.NetworkInterfaceCount, &out.NetworkInterfaceCount + *out = new(NetworkInterfaceCountInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OnDemandMaxPricePercentageOverLowestPrice != nil { + in, out := &in.OnDemandMaxPricePercentageOverLowestPrice, &out.OnDemandMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.RequireHibernateSupport != nil { + in, out := &in.RequireHibernateSupport, &out.RequireHibernateSupport + *out = new(bool) + **out = **in + } + if in.SpotMaxPricePercentageOverLowestPrice != nil { + in, out := &in.SpotMaxPricePercentageOverLowestPrice, &out.SpotMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.TotalLocalStorageGb != nil { + in, out := &in.TotalLocalStorageGb, &out.TotalLocalStorageGb + *out = new(TotalLocalStorageGbInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VcpuCount != nil { + in, out := &in.VcpuCount, &out.VcpuCount + *out = new(VcpuCountInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsInitParameters. +func (in *InstanceRequirementsInitParameters) DeepCopy() *InstanceRequirementsInitParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsMemoryGibPerVcpuInitParameters) DeepCopyInto(out *InstanceRequirementsMemoryGibPerVcpuInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsMemoryGibPerVcpuInitParameters. +func (in *InstanceRequirementsMemoryGibPerVcpuInitParameters) DeepCopy() *InstanceRequirementsMemoryGibPerVcpuInitParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsMemoryGibPerVcpuInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsMemoryGibPerVcpuObservation) DeepCopyInto(out *InstanceRequirementsMemoryGibPerVcpuObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsMemoryGibPerVcpuObservation. +func (in *InstanceRequirementsMemoryGibPerVcpuObservation) DeepCopy() *InstanceRequirementsMemoryGibPerVcpuObservation { + if in == nil { + return nil + } + out := new(InstanceRequirementsMemoryGibPerVcpuObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsMemoryGibPerVcpuParameters) DeepCopyInto(out *InstanceRequirementsMemoryGibPerVcpuParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsMemoryGibPerVcpuParameters. +func (in *InstanceRequirementsMemoryGibPerVcpuParameters) DeepCopy() *InstanceRequirementsMemoryGibPerVcpuParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsMemoryGibPerVcpuParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsMemoryMibInitParameters) DeepCopyInto(out *InstanceRequirementsMemoryMibInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsMemoryMibInitParameters. +func (in *InstanceRequirementsMemoryMibInitParameters) DeepCopy() *InstanceRequirementsMemoryMibInitParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsMemoryMibInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsMemoryMibObservation) DeepCopyInto(out *InstanceRequirementsMemoryMibObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsMemoryMibObservation. +func (in *InstanceRequirementsMemoryMibObservation) DeepCopy() *InstanceRequirementsMemoryMibObservation { + if in == nil { + return nil + } + out := new(InstanceRequirementsMemoryMibObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsMemoryMibParameters) DeepCopyInto(out *InstanceRequirementsMemoryMibParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsMemoryMibParameters. +func (in *InstanceRequirementsMemoryMibParameters) DeepCopy() *InstanceRequirementsMemoryMibParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsMemoryMibParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsNetworkBandwidthGbpsInitParameters) DeepCopyInto(out *InstanceRequirementsNetworkBandwidthGbpsInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsNetworkBandwidthGbpsInitParameters. +func (in *InstanceRequirementsNetworkBandwidthGbpsInitParameters) DeepCopy() *InstanceRequirementsNetworkBandwidthGbpsInitParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsNetworkBandwidthGbpsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsNetworkBandwidthGbpsObservation) DeepCopyInto(out *InstanceRequirementsNetworkBandwidthGbpsObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsNetworkBandwidthGbpsObservation. +func (in *InstanceRequirementsNetworkBandwidthGbpsObservation) DeepCopy() *InstanceRequirementsNetworkBandwidthGbpsObservation { + if in == nil { + return nil + } + out := new(InstanceRequirementsNetworkBandwidthGbpsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsNetworkBandwidthGbpsParameters) DeepCopyInto(out *InstanceRequirementsNetworkBandwidthGbpsParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsNetworkBandwidthGbpsParameters. +func (in *InstanceRequirementsNetworkBandwidthGbpsParameters) DeepCopy() *InstanceRequirementsNetworkBandwidthGbpsParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsNetworkBandwidthGbpsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsNetworkInterfaceCountInitParameters) DeepCopyInto(out *InstanceRequirementsNetworkInterfaceCountInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsNetworkInterfaceCountInitParameters. +func (in *InstanceRequirementsNetworkInterfaceCountInitParameters) DeepCopy() *InstanceRequirementsNetworkInterfaceCountInitParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsNetworkInterfaceCountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsNetworkInterfaceCountObservation) DeepCopyInto(out *InstanceRequirementsNetworkInterfaceCountObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsNetworkInterfaceCountObservation. +func (in *InstanceRequirementsNetworkInterfaceCountObservation) DeepCopy() *InstanceRequirementsNetworkInterfaceCountObservation { + if in == nil { + return nil + } + out := new(InstanceRequirementsNetworkInterfaceCountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsNetworkInterfaceCountParameters) DeepCopyInto(out *InstanceRequirementsNetworkInterfaceCountParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsNetworkInterfaceCountParameters. +func (in *InstanceRequirementsNetworkInterfaceCountParameters) DeepCopy() *InstanceRequirementsNetworkInterfaceCountParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsNetworkInterfaceCountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsObservation) DeepCopyInto(out *InstanceRequirementsObservation) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = new(AcceleratorCountObservation) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorManufacturers != nil { + in, out := &in.AcceleratorManufacturers, &out.AcceleratorManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorNames != nil { + in, out := &in.AcceleratorNames, &out.AcceleratorNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorTotalMemoryMib != nil { + in, out := &in.AcceleratorTotalMemoryMib, &out.AcceleratorTotalMemoryMib + *out = new(AcceleratorTotalMemoryMibObservation) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorTypes != nil { + in, out := &in.AcceleratorTypes, &out.AcceleratorTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedInstanceTypes != nil { + in, out := &in.AllowedInstanceTypes, &out.AllowedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(string) + **out = **in + } + if in.BaselineEBSBandwidthMbps != nil { + in, out := &in.BaselineEBSBandwidthMbps, &out.BaselineEBSBandwidthMbps + *out = new(BaselineEBSBandwidthMbpsObservation) + (*in).DeepCopyInto(*out) + } + if in.BurstablePerformance != nil { + in, out := &in.BurstablePerformance, &out.BurstablePerformance + *out = new(string) + **out = **in + } + if in.CPUManufacturers != nil { + in, out := &in.CPUManufacturers, &out.CPUManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExcludedInstanceTypes != nil { + in, out := &in.ExcludedInstanceTypes, &out.ExcludedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceGenerations != nil { + in, out := &in.InstanceGenerations, &out.InstanceGenerations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LocalStorage != nil { + in, out := &in.LocalStorage, &out.LocalStorage + *out = new(string) + **out = **in + } + if in.LocalStorageTypes != nil { + in, out := &in.LocalStorageTypes, &out.LocalStorageTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MemoryGibPerVcpu != nil { + in, out := &in.MemoryGibPerVcpu, &out.MemoryGibPerVcpu + *out = new(MemoryGibPerVcpuObservation) + (*in).DeepCopyInto(*out) + } + if in.MemoryMib != nil { + in, out := &in.MemoryMib, &out.MemoryMib + *out = new(MemoryMibObservation) + (*in).DeepCopyInto(*out) + } + if in.NetworkBandwidthGbps != nil { + in, out := &in.NetworkBandwidthGbps, &out.NetworkBandwidthGbps + *out = new(NetworkBandwidthGbpsObservation) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceCount != nil { + in, out := &in.NetworkInterfaceCount, &out.NetworkInterfaceCount + *out = new(NetworkInterfaceCountObservation) + (*in).DeepCopyInto(*out) + } + if in.OnDemandMaxPricePercentageOverLowestPrice != nil { + in, out := &in.OnDemandMaxPricePercentageOverLowestPrice, &out.OnDemandMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.RequireHibernateSupport != nil { + in, out := &in.RequireHibernateSupport, &out.RequireHibernateSupport + *out = new(bool) + **out = **in + } + if in.SpotMaxPricePercentageOverLowestPrice != nil { + in, out := &in.SpotMaxPricePercentageOverLowestPrice, &out.SpotMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.TotalLocalStorageGb != nil { + in, out := &in.TotalLocalStorageGb, &out.TotalLocalStorageGb + *out = new(TotalLocalStorageGbObservation) + (*in).DeepCopyInto(*out) + } + if in.VcpuCount != nil { + in, out := &in.VcpuCount, &out.VcpuCount + *out = new(VcpuCountObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsObservation. +func (in *InstanceRequirementsObservation) DeepCopy() *InstanceRequirementsObservation { + if in == nil { + return nil + } + out := new(InstanceRequirementsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsParameters) DeepCopyInto(out *InstanceRequirementsParameters) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = new(AcceleratorCountParameters) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorManufacturers != nil { + in, out := &in.AcceleratorManufacturers, &out.AcceleratorManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorNames != nil { + in, out := &in.AcceleratorNames, &out.AcceleratorNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorTotalMemoryMib != nil { + in, out := &in.AcceleratorTotalMemoryMib, &out.AcceleratorTotalMemoryMib + *out = new(AcceleratorTotalMemoryMibParameters) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorTypes != nil { + in, out := &in.AcceleratorTypes, &out.AcceleratorTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedInstanceTypes != nil { + in, out := &in.AllowedInstanceTypes, &out.AllowedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(string) + **out = **in + } + if in.BaselineEBSBandwidthMbps != nil { + in, out := &in.BaselineEBSBandwidthMbps, &out.BaselineEBSBandwidthMbps + *out = new(BaselineEBSBandwidthMbpsParameters) + (*in).DeepCopyInto(*out) + } + if in.BurstablePerformance != nil { + in, out := &in.BurstablePerformance, &out.BurstablePerformance + *out = new(string) + **out = **in + } + if in.CPUManufacturers != nil { + in, out := &in.CPUManufacturers, &out.CPUManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExcludedInstanceTypes != nil { + in, out := &in.ExcludedInstanceTypes, &out.ExcludedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceGenerations != nil { + in, out := &in.InstanceGenerations, &out.InstanceGenerations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LocalStorage != nil { + in, out := &in.LocalStorage, &out.LocalStorage + *out = new(string) + **out = **in + } + if in.LocalStorageTypes != nil { + in, out := &in.LocalStorageTypes, &out.LocalStorageTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MemoryGibPerVcpu != nil { + in, out := &in.MemoryGibPerVcpu, &out.MemoryGibPerVcpu + *out = new(MemoryGibPerVcpuParameters) + (*in).DeepCopyInto(*out) + } + if in.MemoryMib != nil { + in, out := &in.MemoryMib, &out.MemoryMib + *out = new(MemoryMibParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkBandwidthGbps != nil { + in, out := &in.NetworkBandwidthGbps, &out.NetworkBandwidthGbps + *out = new(NetworkBandwidthGbpsParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceCount != nil { + in, out := &in.NetworkInterfaceCount, &out.NetworkInterfaceCount + *out = new(NetworkInterfaceCountParameters) + (*in).DeepCopyInto(*out) + } + if in.OnDemandMaxPricePercentageOverLowestPrice != nil { + in, out := &in.OnDemandMaxPricePercentageOverLowestPrice, &out.OnDemandMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.RequireHibernateSupport != nil { + in, out := &in.RequireHibernateSupport, &out.RequireHibernateSupport + *out = new(bool) + **out = **in + } + if in.SpotMaxPricePercentageOverLowestPrice != nil { + in, out := &in.SpotMaxPricePercentageOverLowestPrice, &out.SpotMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.TotalLocalStorageGb != nil { + in, out := &in.TotalLocalStorageGb, &out.TotalLocalStorageGb + *out = new(TotalLocalStorageGbParameters) + (*in).DeepCopyInto(*out) + } + if in.VcpuCount != nil { + in, out := &in.VcpuCount, &out.VcpuCount + *out = new(VcpuCountParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsParameters. +func (in *InstanceRequirementsParameters) DeepCopy() *InstanceRequirementsParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsTotalLocalStorageGbInitParameters) DeepCopyInto(out *InstanceRequirementsTotalLocalStorageGbInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsTotalLocalStorageGbInitParameters. +func (in *InstanceRequirementsTotalLocalStorageGbInitParameters) DeepCopy() *InstanceRequirementsTotalLocalStorageGbInitParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsTotalLocalStorageGbInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsTotalLocalStorageGbObservation) DeepCopyInto(out *InstanceRequirementsTotalLocalStorageGbObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsTotalLocalStorageGbObservation. +func (in *InstanceRequirementsTotalLocalStorageGbObservation) DeepCopy() *InstanceRequirementsTotalLocalStorageGbObservation { + if in == nil { + return nil + } + out := new(InstanceRequirementsTotalLocalStorageGbObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsTotalLocalStorageGbParameters) DeepCopyInto(out *InstanceRequirementsTotalLocalStorageGbParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsTotalLocalStorageGbParameters. +func (in *InstanceRequirementsTotalLocalStorageGbParameters) DeepCopy() *InstanceRequirementsTotalLocalStorageGbParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsTotalLocalStorageGbParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsVcpuCountInitParameters) DeepCopyInto(out *InstanceRequirementsVcpuCountInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsVcpuCountInitParameters. +func (in *InstanceRequirementsVcpuCountInitParameters) DeepCopy() *InstanceRequirementsVcpuCountInitParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsVcpuCountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsVcpuCountObservation) DeepCopyInto(out *InstanceRequirementsVcpuCountObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsVcpuCountObservation. +func (in *InstanceRequirementsVcpuCountObservation) DeepCopy() *InstanceRequirementsVcpuCountObservation { + if in == nil { + return nil + } + out := new(InstanceRequirementsVcpuCountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceRequirementsVcpuCountParameters) DeepCopyInto(out *InstanceRequirementsVcpuCountParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceRequirementsVcpuCountParameters. +func (in *InstanceRequirementsVcpuCountParameters) DeepCopy() *InstanceRequirementsVcpuCountParameters { + if in == nil { + return nil + } + out := new(InstanceRequirementsVcpuCountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceSpec) DeepCopyInto(out *InstanceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceSpec. +func (in *InstanceSpec) DeepCopy() *InstanceSpec { + if in == nil { + return nil + } + out := new(InstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceStatus) DeepCopyInto(out *InstanceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceStatus. +func (in *InstanceStatus) DeepCopy() *InstanceStatus { + if in == nil { + return nil + } + out := new(InstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchSpecificationEBSBlockDeviceInitParameters) DeepCopyInto(out *LaunchSpecificationEBSBlockDeviceInitParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchSpecificationEBSBlockDeviceInitParameters. +func (in *LaunchSpecificationEBSBlockDeviceInitParameters) DeepCopy() *LaunchSpecificationEBSBlockDeviceInitParameters { + if in == nil { + return nil + } + out := new(LaunchSpecificationEBSBlockDeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchSpecificationEBSBlockDeviceObservation) DeepCopyInto(out *LaunchSpecificationEBSBlockDeviceObservation) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchSpecificationEBSBlockDeviceObservation. +func (in *LaunchSpecificationEBSBlockDeviceObservation) DeepCopy() *LaunchSpecificationEBSBlockDeviceObservation { + if in == nil { + return nil + } + out := new(LaunchSpecificationEBSBlockDeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchSpecificationEBSBlockDeviceParameters) DeepCopyInto(out *LaunchSpecificationEBSBlockDeviceParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchSpecificationEBSBlockDeviceParameters. +func (in *LaunchSpecificationEBSBlockDeviceParameters) DeepCopy() *LaunchSpecificationEBSBlockDeviceParameters { + if in == nil { + return nil + } + out := new(LaunchSpecificationEBSBlockDeviceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchSpecificationEphemeralBlockDeviceInitParameters) DeepCopyInto(out *LaunchSpecificationEphemeralBlockDeviceInitParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchSpecificationEphemeralBlockDeviceInitParameters. +func (in *LaunchSpecificationEphemeralBlockDeviceInitParameters) DeepCopy() *LaunchSpecificationEphemeralBlockDeviceInitParameters { + if in == nil { + return nil + } + out := new(LaunchSpecificationEphemeralBlockDeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchSpecificationEphemeralBlockDeviceObservation) DeepCopyInto(out *LaunchSpecificationEphemeralBlockDeviceObservation) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchSpecificationEphemeralBlockDeviceObservation. +func (in *LaunchSpecificationEphemeralBlockDeviceObservation) DeepCopy() *LaunchSpecificationEphemeralBlockDeviceObservation { + if in == nil { + return nil + } + out := new(LaunchSpecificationEphemeralBlockDeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchSpecificationEphemeralBlockDeviceParameters) DeepCopyInto(out *LaunchSpecificationEphemeralBlockDeviceParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchSpecificationEphemeralBlockDeviceParameters. +func (in *LaunchSpecificationEphemeralBlockDeviceParameters) DeepCopy() *LaunchSpecificationEphemeralBlockDeviceParameters { + if in == nil { + return nil + } + out := new(LaunchSpecificationEphemeralBlockDeviceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchSpecificationInitParameters) DeepCopyInto(out *LaunchSpecificationInitParameters) { + *out = *in + if in.AMI != nil { + in, out := &in.AMI, &out.AMI + *out = new(string) + **out = **in + } + if in.AssociatePublicIPAddress != nil { + in, out := &in.AssociatePublicIPAddress, &out.AssociatePublicIPAddress + *out = new(bool) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.EBSBlockDevice != nil { + in, out := &in.EBSBlockDevice, &out.EBSBlockDevice + *out = make([]LaunchSpecificationEBSBlockDeviceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EBSOptimized != nil { + in, out := &in.EBSOptimized, &out.EBSOptimized + *out = new(bool) + **out = **in + } + if in.EphemeralBlockDevice != nil { + in, out := &in.EphemeralBlockDevice, &out.EphemeralBlockDevice + *out = make([]LaunchSpecificationEphemeralBlockDeviceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(string) + **out = **in + } + if in.IAMInstanceProfileArn != nil { + in, out := &in.IAMInstanceProfileArn, &out.IAMInstanceProfileArn + *out = new(string) + **out = **in + } + if in.IAMInstanceProfileArnRef != nil { + in, out := &in.IAMInstanceProfileArnRef, &out.IAMInstanceProfileArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMInstanceProfileArnSelector != nil { + in, out := &in.IAMInstanceProfileArnSelector, &out.IAMInstanceProfileArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(bool) + **out = **in + } + if in.PlacementGroup != nil { + in, out := &in.PlacementGroup, &out.PlacementGroup + *out = new(string) + **out = **in + } + if in.PlacementTenancy != nil { + in, out := &in.PlacementTenancy, &out.PlacementTenancy + *out = new(string) + **out = **in + } + if in.RootBlockDevice != nil { + in, out := &in.RootBlockDevice, &out.RootBlockDevice + *out = make([]LaunchSpecificationRootBlockDeviceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SpotPrice != nil { + in, out := &in.SpotPrice, &out.SpotPrice + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WeightedCapacity != nil { + in, out := &in.WeightedCapacity, &out.WeightedCapacity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchSpecificationInitParameters. +func (in *LaunchSpecificationInitParameters) DeepCopy() *LaunchSpecificationInitParameters { + if in == nil { + return nil + } + out := new(LaunchSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchSpecificationObservation) DeepCopyInto(out *LaunchSpecificationObservation) { + *out = *in + if in.AMI != nil { + in, out := &in.AMI, &out.AMI + *out = new(string) + **out = **in + } + if in.AssociatePublicIPAddress != nil { + in, out := &in.AssociatePublicIPAddress, &out.AssociatePublicIPAddress + *out = new(bool) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.EBSBlockDevice != nil { + in, out := &in.EBSBlockDevice, &out.EBSBlockDevice + *out = make([]LaunchSpecificationEBSBlockDeviceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EBSOptimized != nil { + in, out := &in.EBSOptimized, &out.EBSOptimized + *out = new(bool) + **out = **in + } + if in.EphemeralBlockDevice != nil { + in, out := &in.EphemeralBlockDevice, &out.EphemeralBlockDevice + *out = make([]LaunchSpecificationEphemeralBlockDeviceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(string) + **out = **in + } + if in.IAMInstanceProfileArn != nil { + in, out := &in.IAMInstanceProfileArn, &out.IAMInstanceProfileArn + *out = new(string) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(bool) + **out = **in + } + if in.PlacementGroup != nil { + in, out := &in.PlacementGroup, &out.PlacementGroup + *out = new(string) + **out = **in + } + if in.PlacementTenancy != nil { + in, out := &in.PlacementTenancy, &out.PlacementTenancy + *out = new(string) + **out = **in + } + if in.RootBlockDevice != nil { + in, out := &in.RootBlockDevice, &out.RootBlockDevice + *out = make([]LaunchSpecificationRootBlockDeviceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SpotPrice != nil { + in, out := &in.SpotPrice, &out.SpotPrice + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WeightedCapacity != nil { + in, out := &in.WeightedCapacity, &out.WeightedCapacity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchSpecificationObservation. +func (in *LaunchSpecificationObservation) DeepCopy() *LaunchSpecificationObservation { + if in == nil { + return nil + } + out := new(LaunchSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchSpecificationParameters) DeepCopyInto(out *LaunchSpecificationParameters) { + *out = *in + if in.AMI != nil { + in, out := &in.AMI, &out.AMI + *out = new(string) + **out = **in + } + if in.AssociatePublicIPAddress != nil { + in, out := &in.AssociatePublicIPAddress, &out.AssociatePublicIPAddress + *out = new(bool) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.EBSBlockDevice != nil { + in, out := &in.EBSBlockDevice, &out.EBSBlockDevice + *out = make([]LaunchSpecificationEBSBlockDeviceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EBSOptimized != nil { + in, out := &in.EBSOptimized, &out.EBSOptimized + *out = new(bool) + **out = **in + } + if in.EphemeralBlockDevice != nil { + in, out := &in.EphemeralBlockDevice, &out.EphemeralBlockDevice + *out = make([]LaunchSpecificationEphemeralBlockDeviceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(string) + **out = **in + } + if in.IAMInstanceProfileArn != nil { + in, out := &in.IAMInstanceProfileArn, &out.IAMInstanceProfileArn + *out = new(string) + **out = **in + } + if in.IAMInstanceProfileArnRef != nil { + in, out := &in.IAMInstanceProfileArnRef, &out.IAMInstanceProfileArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMInstanceProfileArnSelector != nil { + in, out := &in.IAMInstanceProfileArnSelector, &out.IAMInstanceProfileArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(bool) + **out = **in + } + if in.PlacementGroup != nil { + in, out := &in.PlacementGroup, &out.PlacementGroup + *out = new(string) + **out = **in + } + if in.PlacementTenancy != nil { + in, out := &in.PlacementTenancy, &out.PlacementTenancy + *out = new(string) + **out = **in + } + if in.RootBlockDevice != nil { + in, out := &in.RootBlockDevice, &out.RootBlockDevice + *out = make([]LaunchSpecificationRootBlockDeviceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SpotPrice != nil { + in, out := &in.SpotPrice, &out.SpotPrice + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WeightedCapacity != nil { + in, out := &in.WeightedCapacity, &out.WeightedCapacity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchSpecificationParameters. +func (in *LaunchSpecificationParameters) DeepCopy() *LaunchSpecificationParameters { + if in == nil { + return nil + } + out := new(LaunchSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchSpecificationRootBlockDeviceInitParameters) DeepCopyInto(out *LaunchSpecificationRootBlockDeviceInitParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchSpecificationRootBlockDeviceInitParameters. +func (in *LaunchSpecificationRootBlockDeviceInitParameters) DeepCopy() *LaunchSpecificationRootBlockDeviceInitParameters { + if in == nil { + return nil + } + out := new(LaunchSpecificationRootBlockDeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchSpecificationRootBlockDeviceObservation) DeepCopyInto(out *LaunchSpecificationRootBlockDeviceObservation) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchSpecificationRootBlockDeviceObservation. +func (in *LaunchSpecificationRootBlockDeviceObservation) DeepCopy() *LaunchSpecificationRootBlockDeviceObservation { + if in == nil { + return nil + } + out := new(LaunchSpecificationRootBlockDeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchSpecificationRootBlockDeviceParameters) DeepCopyInto(out *LaunchSpecificationRootBlockDeviceParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchSpecificationRootBlockDeviceParameters. +func (in *LaunchSpecificationRootBlockDeviceParameters) DeepCopy() *LaunchSpecificationRootBlockDeviceParameters { + if in == nil { + return nil + } + out := new(LaunchSpecificationRootBlockDeviceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplate) DeepCopyInto(out *LaunchTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplate. +func (in *LaunchTemplate) DeepCopy() *LaunchTemplate { + if in == nil { + return nil + } + out := new(LaunchTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LaunchTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateCPUOptionsInitParameters) DeepCopyInto(out *LaunchTemplateCPUOptionsInitParameters) { + *out = *in + if in.AmdSevSnp != nil { + in, out := &in.AmdSevSnp, &out.AmdSevSnp + *out = new(string) + **out = **in + } + if in.CoreCount != nil { + in, out := &in.CoreCount, &out.CoreCount + *out = new(float64) + **out = **in + } + if in.ThreadsPerCore != nil { + in, out := &in.ThreadsPerCore, &out.ThreadsPerCore + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateCPUOptionsInitParameters. +func (in *LaunchTemplateCPUOptionsInitParameters) DeepCopy() *LaunchTemplateCPUOptionsInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateCPUOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateCPUOptionsObservation) DeepCopyInto(out *LaunchTemplateCPUOptionsObservation) { + *out = *in + if in.AmdSevSnp != nil { + in, out := &in.AmdSevSnp, &out.AmdSevSnp + *out = new(string) + **out = **in + } + if in.CoreCount != nil { + in, out := &in.CoreCount, &out.CoreCount + *out = new(float64) + **out = **in + } + if in.ThreadsPerCore != nil { + in, out := &in.ThreadsPerCore, &out.ThreadsPerCore + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateCPUOptionsObservation. +func (in *LaunchTemplateCPUOptionsObservation) DeepCopy() *LaunchTemplateCPUOptionsObservation { + if in == nil { + return nil + } + out := new(LaunchTemplateCPUOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateCPUOptionsParameters) DeepCopyInto(out *LaunchTemplateCPUOptionsParameters) { + *out = *in + if in.AmdSevSnp != nil { + in, out := &in.AmdSevSnp, &out.AmdSevSnp + *out = new(string) + **out = **in + } + if in.CoreCount != nil { + in, out := &in.CoreCount, &out.CoreCount + *out = new(float64) + **out = **in + } + if in.ThreadsPerCore != nil { + in, out := &in.ThreadsPerCore, &out.ThreadsPerCore + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateCPUOptionsParameters. +func (in *LaunchTemplateCPUOptionsParameters) DeepCopy() *LaunchTemplateCPUOptionsParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateCPUOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateCapacityReservationSpecificationInitParameters) DeepCopyInto(out *LaunchTemplateCapacityReservationSpecificationInitParameters) { + *out = *in + if in.CapacityReservationPreference != nil { + in, out := &in.CapacityReservationPreference, &out.CapacityReservationPreference + *out = new(string) + **out = **in + } + if in.CapacityReservationTarget != nil { + in, out := &in.CapacityReservationTarget, &out.CapacityReservationTarget + *out = new(CapacityReservationSpecificationCapacityReservationTargetInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateCapacityReservationSpecificationInitParameters. +func (in *LaunchTemplateCapacityReservationSpecificationInitParameters) DeepCopy() *LaunchTemplateCapacityReservationSpecificationInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateCapacityReservationSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateCapacityReservationSpecificationObservation) DeepCopyInto(out *LaunchTemplateCapacityReservationSpecificationObservation) { + *out = *in + if in.CapacityReservationPreference != nil { + in, out := &in.CapacityReservationPreference, &out.CapacityReservationPreference + *out = new(string) + **out = **in + } + if in.CapacityReservationTarget != nil { + in, out := &in.CapacityReservationTarget, &out.CapacityReservationTarget + *out = new(CapacityReservationSpecificationCapacityReservationTargetObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateCapacityReservationSpecificationObservation. +func (in *LaunchTemplateCapacityReservationSpecificationObservation) DeepCopy() *LaunchTemplateCapacityReservationSpecificationObservation { + if in == nil { + return nil + } + out := new(LaunchTemplateCapacityReservationSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateCapacityReservationSpecificationParameters) DeepCopyInto(out *LaunchTemplateCapacityReservationSpecificationParameters) { + *out = *in + if in.CapacityReservationPreference != nil { + in, out := &in.CapacityReservationPreference, &out.CapacityReservationPreference + *out = new(string) + **out = **in + } + if in.CapacityReservationTarget != nil { + in, out := &in.CapacityReservationTarget, &out.CapacityReservationTarget + *out = new(CapacityReservationSpecificationCapacityReservationTargetParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateCapacityReservationSpecificationParameters. +func (in *LaunchTemplateCapacityReservationSpecificationParameters) DeepCopy() *LaunchTemplateCapacityReservationSpecificationParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateCapacityReservationSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateConfigInitParameters) DeepCopyInto(out *LaunchTemplateConfigInitParameters) { + *out = *in + if in.LaunchTemplateSpecification != nil { + in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification + *out = new(LaunchTemplateSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]OverridesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateConfigInitParameters. +func (in *LaunchTemplateConfigInitParameters) DeepCopy() *LaunchTemplateConfigInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateConfigObservation) DeepCopyInto(out *LaunchTemplateConfigObservation) { + *out = *in + if in.LaunchTemplateSpecification != nil { + in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification + *out = new(LaunchTemplateSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]OverridesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateConfigObservation. +func (in *LaunchTemplateConfigObservation) DeepCopy() *LaunchTemplateConfigObservation { + if in == nil { + return nil + } + out := new(LaunchTemplateConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateConfigParameters) DeepCopyInto(out *LaunchTemplateConfigParameters) { + *out = *in + if in.LaunchTemplateSpecification != nil { + in, out := &in.LaunchTemplateSpecification, &out.LaunchTemplateSpecification + *out = new(LaunchTemplateSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]OverridesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateConfigParameters. +func (in *LaunchTemplateConfigParameters) DeepCopy() *LaunchTemplateConfigParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateCreditSpecificationInitParameters) DeepCopyInto(out *LaunchTemplateCreditSpecificationInitParameters) { + *out = *in + if in.CPUCredits != nil { + in, out := &in.CPUCredits, &out.CPUCredits + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateCreditSpecificationInitParameters. +func (in *LaunchTemplateCreditSpecificationInitParameters) DeepCopy() *LaunchTemplateCreditSpecificationInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateCreditSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateCreditSpecificationObservation) DeepCopyInto(out *LaunchTemplateCreditSpecificationObservation) { + *out = *in + if in.CPUCredits != nil { + in, out := &in.CPUCredits, &out.CPUCredits + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateCreditSpecificationObservation. +func (in *LaunchTemplateCreditSpecificationObservation) DeepCopy() *LaunchTemplateCreditSpecificationObservation { + if in == nil { + return nil + } + out := new(LaunchTemplateCreditSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateCreditSpecificationParameters) DeepCopyInto(out *LaunchTemplateCreditSpecificationParameters) { + *out = *in + if in.CPUCredits != nil { + in, out := &in.CPUCredits, &out.CPUCredits + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateCreditSpecificationParameters. +func (in *LaunchTemplateCreditSpecificationParameters) DeepCopy() *LaunchTemplateCreditSpecificationParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateCreditSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateEnclaveOptionsInitParameters) DeepCopyInto(out *LaunchTemplateEnclaveOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateEnclaveOptionsInitParameters. +func (in *LaunchTemplateEnclaveOptionsInitParameters) DeepCopy() *LaunchTemplateEnclaveOptionsInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateEnclaveOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateEnclaveOptionsObservation) DeepCopyInto(out *LaunchTemplateEnclaveOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateEnclaveOptionsObservation. +func (in *LaunchTemplateEnclaveOptionsObservation) DeepCopy() *LaunchTemplateEnclaveOptionsObservation { + if in == nil { + return nil + } + out := new(LaunchTemplateEnclaveOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateEnclaveOptionsParameters) DeepCopyInto(out *LaunchTemplateEnclaveOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateEnclaveOptionsParameters. +func (in *LaunchTemplateEnclaveOptionsParameters) DeepCopy() *LaunchTemplateEnclaveOptionsParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateEnclaveOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateInitParameters) DeepCopyInto(out *LaunchTemplateInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateInitParameters. +func (in *LaunchTemplateInitParameters) DeepCopy() *LaunchTemplateInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateInitParameters_2) DeepCopyInto(out *LaunchTemplateInitParameters_2) { + *out = *in + if in.BlockDeviceMappings != nil { + in, out := &in.BlockDeviceMappings, &out.BlockDeviceMappings + *out = make([]BlockDeviceMappingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CPUOptions != nil { + in, out := &in.CPUOptions, &out.CPUOptions + *out = new(LaunchTemplateCPUOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CapacityReservationSpecification != nil { + in, out := &in.CapacityReservationSpecification, &out.CapacityReservationSpecification + *out = new(LaunchTemplateCapacityReservationSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CreditSpecification != nil { + in, out := &in.CreditSpecification, &out.CreditSpecification + *out = new(LaunchTemplateCreditSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultVersion != nil { + in, out := &in.DefaultVersion, &out.DefaultVersion + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisableAPIStop != nil { + in, out := &in.DisableAPIStop, &out.DisableAPIStop + *out = new(bool) + **out = **in + } + if in.DisableAPITermination != nil { + in, out := &in.DisableAPITermination, &out.DisableAPITermination + *out = new(bool) + **out = **in + } + if in.EBSOptimized != nil { + in, out := &in.EBSOptimized, &out.EBSOptimized + *out = new(string) + **out = **in + } + if in.ElasticGpuSpecifications != nil { + in, out := &in.ElasticGpuSpecifications, &out.ElasticGpuSpecifications + *out = make([]ElasticGpuSpecificationsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticInferenceAccelerator != nil { + in, out := &in.ElasticInferenceAccelerator, &out.ElasticInferenceAccelerator + *out = new(ElasticInferenceAcceleratorInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EnclaveOptions != nil { + in, out := &in.EnclaveOptions, &out.EnclaveOptions + *out = new(LaunchTemplateEnclaveOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HibernationOptions != nil { + in, out := &in.HibernationOptions, &out.HibernationOptions + *out = new(HibernationOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(IAMInstanceProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.InstanceInitiatedShutdownBehavior != nil { + in, out := &in.InstanceInitiatedShutdownBehavior, &out.InstanceInitiatedShutdownBehavior + *out = new(string) + **out = **in + } + if in.InstanceMarketOptions != nil { + in, out := &in.InstanceMarketOptions, &out.InstanceMarketOptions + *out = new(LaunchTemplateInstanceMarketOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceRequirements != nil { + in, out := &in.InstanceRequirements, &out.InstanceRequirements + *out = new(InstanceRequirementsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KernelID != nil { + in, out := &in.KernelID, &out.KernelID + *out = new(string) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.LicenseSpecification != nil { + in, out := &in.LicenseSpecification, &out.LicenseSpecification + *out = make([]LicenseSpecificationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaintenanceOptions != nil { + in, out := &in.MaintenanceOptions, &out.MaintenanceOptions + *out = new(LaunchTemplateMaintenanceOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = new(LaunchTemplateMetadataOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(MonitoringInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkInterfaces != nil { + in, out := &in.NetworkInterfaces, &out.NetworkInterfaces + *out = make([]NetworkInterfacesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(PlacementInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PrivateDNSNameOptions != nil { + in, out := &in.PrivateDNSNameOptions, &out.PrivateDNSNameOptions + *out = new(LaunchTemplatePrivateDNSNameOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RAMDiskID != nil { + in, out := &in.RAMDiskID, &out.RAMDiskID + *out = new(string) + **out = **in + } + if in.SecurityGroupNameRefs != nil { + in, out := &in.SecurityGroupNameRefs, &out.SecurityGroupNameRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupNameSelector != nil { + in, out := &in.SecurityGroupNameSelector, &out.SecurityGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupNames != nil { + in, out := &in.SecurityGroupNames, &out.SecurityGroupNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TagSpecifications != nil { + in, out := &in.TagSpecifications, &out.TagSpecifications + *out = make([]TagSpecificationsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UpdateDefaultVersion != nil { + in, out := &in.UpdateDefaultVersion, &out.UpdateDefaultVersion + *out = new(bool) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VPCSecurityGroupIDRefs != nil { + in, out := &in.VPCSecurityGroupIDRefs, &out.VPCSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCSecurityGroupIDSelector != nil { + in, out := &in.VPCSecurityGroupIDSelector, &out.VPCSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateInitParameters_2. +func (in *LaunchTemplateInitParameters_2) DeepCopy() *LaunchTemplateInitParameters_2 { + if in == nil { + return nil + } + out := new(LaunchTemplateInitParameters_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateInstanceMarketOptionsInitParameters) DeepCopyInto(out *LaunchTemplateInstanceMarketOptionsInitParameters) { + *out = *in + if in.MarketType != nil { + in, out := &in.MarketType, &out.MarketType + *out = new(string) + **out = **in + } + if in.SpotOptions != nil { + in, out := &in.SpotOptions, &out.SpotOptions + *out = new(InstanceMarketOptionsSpotOptionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateInstanceMarketOptionsInitParameters. +func (in *LaunchTemplateInstanceMarketOptionsInitParameters) DeepCopy() *LaunchTemplateInstanceMarketOptionsInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateInstanceMarketOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateInstanceMarketOptionsObservation) DeepCopyInto(out *LaunchTemplateInstanceMarketOptionsObservation) { + *out = *in + if in.MarketType != nil { + in, out := &in.MarketType, &out.MarketType + *out = new(string) + **out = **in + } + if in.SpotOptions != nil { + in, out := &in.SpotOptions, &out.SpotOptions + *out = new(InstanceMarketOptionsSpotOptionsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateInstanceMarketOptionsObservation. +func (in *LaunchTemplateInstanceMarketOptionsObservation) DeepCopy() *LaunchTemplateInstanceMarketOptionsObservation { + if in == nil { + return nil + } + out := new(LaunchTemplateInstanceMarketOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateInstanceMarketOptionsParameters) DeepCopyInto(out *LaunchTemplateInstanceMarketOptionsParameters) { + *out = *in + if in.MarketType != nil { + in, out := &in.MarketType, &out.MarketType + *out = new(string) + **out = **in + } + if in.SpotOptions != nil { + in, out := &in.SpotOptions, &out.SpotOptions + *out = new(InstanceMarketOptionsSpotOptionsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateInstanceMarketOptionsParameters. +func (in *LaunchTemplateInstanceMarketOptionsParameters) DeepCopy() *LaunchTemplateInstanceMarketOptionsParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateInstanceMarketOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateList) DeepCopyInto(out *LaunchTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LaunchTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateList. +func (in *LaunchTemplateList) DeepCopy() *LaunchTemplateList { + if in == nil { + return nil + } + out := new(LaunchTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LaunchTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateMaintenanceOptionsInitParameters) DeepCopyInto(out *LaunchTemplateMaintenanceOptionsInitParameters) { + *out = *in + if in.AutoRecovery != nil { + in, out := &in.AutoRecovery, &out.AutoRecovery + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateMaintenanceOptionsInitParameters. +func (in *LaunchTemplateMaintenanceOptionsInitParameters) DeepCopy() *LaunchTemplateMaintenanceOptionsInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateMaintenanceOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateMaintenanceOptionsObservation) DeepCopyInto(out *LaunchTemplateMaintenanceOptionsObservation) { + *out = *in + if in.AutoRecovery != nil { + in, out := &in.AutoRecovery, &out.AutoRecovery + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateMaintenanceOptionsObservation. +func (in *LaunchTemplateMaintenanceOptionsObservation) DeepCopy() *LaunchTemplateMaintenanceOptionsObservation { + if in == nil { + return nil + } + out := new(LaunchTemplateMaintenanceOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateMaintenanceOptionsParameters) DeepCopyInto(out *LaunchTemplateMaintenanceOptionsParameters) { + *out = *in + if in.AutoRecovery != nil { + in, out := &in.AutoRecovery, &out.AutoRecovery + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateMaintenanceOptionsParameters. +func (in *LaunchTemplateMaintenanceOptionsParameters) DeepCopy() *LaunchTemplateMaintenanceOptionsParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateMaintenanceOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateMetadataOptionsInitParameters) DeepCopyInto(out *LaunchTemplateMetadataOptionsInitParameters) { + *out = *in + if in.HTTPEndpoint != nil { + in, out := &in.HTTPEndpoint, &out.HTTPEndpoint + *out = new(string) + **out = **in + } + if in.HTTPProtocolIPv6 != nil { + in, out := &in.HTTPProtocolIPv6, &out.HTTPProtocolIPv6 + *out = new(string) + **out = **in + } + if in.HTTPPutResponseHopLimit != nil { + in, out := &in.HTTPPutResponseHopLimit, &out.HTTPPutResponseHopLimit + *out = new(float64) + **out = **in + } + if in.HTTPTokens != nil { + in, out := &in.HTTPTokens, &out.HTTPTokens + *out = new(string) + **out = **in + } + if in.InstanceMetadataTags != nil { + in, out := &in.InstanceMetadataTags, &out.InstanceMetadataTags + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateMetadataOptionsInitParameters. +func (in *LaunchTemplateMetadataOptionsInitParameters) DeepCopy() *LaunchTemplateMetadataOptionsInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateMetadataOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateMetadataOptionsObservation) DeepCopyInto(out *LaunchTemplateMetadataOptionsObservation) { + *out = *in + if in.HTTPEndpoint != nil { + in, out := &in.HTTPEndpoint, &out.HTTPEndpoint + *out = new(string) + **out = **in + } + if in.HTTPProtocolIPv6 != nil { + in, out := &in.HTTPProtocolIPv6, &out.HTTPProtocolIPv6 + *out = new(string) + **out = **in + } + if in.HTTPPutResponseHopLimit != nil { + in, out := &in.HTTPPutResponseHopLimit, &out.HTTPPutResponseHopLimit + *out = new(float64) + **out = **in + } + if in.HTTPTokens != nil { + in, out := &in.HTTPTokens, &out.HTTPTokens + *out = new(string) + **out = **in + } + if in.InstanceMetadataTags != nil { + in, out := &in.InstanceMetadataTags, &out.InstanceMetadataTags + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateMetadataOptionsObservation. +func (in *LaunchTemplateMetadataOptionsObservation) DeepCopy() *LaunchTemplateMetadataOptionsObservation { + if in == nil { + return nil + } + out := new(LaunchTemplateMetadataOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateMetadataOptionsParameters) DeepCopyInto(out *LaunchTemplateMetadataOptionsParameters) { + *out = *in + if in.HTTPEndpoint != nil { + in, out := &in.HTTPEndpoint, &out.HTTPEndpoint + *out = new(string) + **out = **in + } + if in.HTTPProtocolIPv6 != nil { + in, out := &in.HTTPProtocolIPv6, &out.HTTPProtocolIPv6 + *out = new(string) + **out = **in + } + if in.HTTPPutResponseHopLimit != nil { + in, out := &in.HTTPPutResponseHopLimit, &out.HTTPPutResponseHopLimit + *out = new(float64) + **out = **in + } + if in.HTTPTokens != nil { + in, out := &in.HTTPTokens, &out.HTTPTokens + *out = new(string) + **out = **in + } + if in.InstanceMetadataTags != nil { + in, out := &in.InstanceMetadataTags, &out.InstanceMetadataTags + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateMetadataOptionsParameters. +func (in *LaunchTemplateMetadataOptionsParameters) DeepCopy() *LaunchTemplateMetadataOptionsParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateMetadataOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateObservation) DeepCopyInto(out *LaunchTemplateObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateObservation. +func (in *LaunchTemplateObservation) DeepCopy() *LaunchTemplateObservation { + if in == nil { + return nil + } + out := new(LaunchTemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateObservation_2) DeepCopyInto(out *LaunchTemplateObservation_2) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.BlockDeviceMappings != nil { + in, out := &in.BlockDeviceMappings, &out.BlockDeviceMappings + *out = make([]BlockDeviceMappingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CPUOptions != nil { + in, out := &in.CPUOptions, &out.CPUOptions + *out = new(LaunchTemplateCPUOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.CapacityReservationSpecification != nil { + in, out := &in.CapacityReservationSpecification, &out.CapacityReservationSpecification + *out = new(LaunchTemplateCapacityReservationSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.CreditSpecification != nil { + in, out := &in.CreditSpecification, &out.CreditSpecification + *out = new(LaunchTemplateCreditSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultVersion != nil { + in, out := &in.DefaultVersion, &out.DefaultVersion + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisableAPIStop != nil { + in, out := &in.DisableAPIStop, &out.DisableAPIStop + *out = new(bool) + **out = **in + } + if in.DisableAPITermination != nil { + in, out := &in.DisableAPITermination, &out.DisableAPITermination + *out = new(bool) + **out = **in + } + if in.EBSOptimized != nil { + in, out := &in.EBSOptimized, &out.EBSOptimized + *out = new(string) + **out = **in + } + if in.ElasticGpuSpecifications != nil { + in, out := &in.ElasticGpuSpecifications, &out.ElasticGpuSpecifications + *out = make([]ElasticGpuSpecificationsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticInferenceAccelerator != nil { + in, out := &in.ElasticInferenceAccelerator, &out.ElasticInferenceAccelerator + *out = new(ElasticInferenceAcceleratorObservation) + (*in).DeepCopyInto(*out) + } + if in.EnclaveOptions != nil { + in, out := &in.EnclaveOptions, &out.EnclaveOptions + *out = new(LaunchTemplateEnclaveOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.HibernationOptions != nil { + in, out := &in.HibernationOptions, &out.HibernationOptions + *out = new(HibernationOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(IAMInstanceProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.InstanceInitiatedShutdownBehavior != nil { + in, out := &in.InstanceInitiatedShutdownBehavior, &out.InstanceInitiatedShutdownBehavior + *out = new(string) + **out = **in + } + if in.InstanceMarketOptions != nil { + in, out := &in.InstanceMarketOptions, &out.InstanceMarketOptions + *out = new(LaunchTemplateInstanceMarketOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.InstanceRequirements != nil { + in, out := &in.InstanceRequirements, &out.InstanceRequirements + *out = new(InstanceRequirementsObservation) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KernelID != nil { + in, out := &in.KernelID, &out.KernelID + *out = new(string) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.LatestVersion != nil { + in, out := &in.LatestVersion, &out.LatestVersion + *out = new(float64) + **out = **in + } + if in.LicenseSpecification != nil { + in, out := &in.LicenseSpecification, &out.LicenseSpecification + *out = make([]LicenseSpecificationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaintenanceOptions != nil { + in, out := &in.MaintenanceOptions, &out.MaintenanceOptions + *out = new(LaunchTemplateMaintenanceOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = new(LaunchTemplateMetadataOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(MonitoringObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkInterfaces != nil { + in, out := &in.NetworkInterfaces, &out.NetworkInterfaces + *out = make([]NetworkInterfacesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(PlacementObservation) + (*in).DeepCopyInto(*out) + } + if in.PrivateDNSNameOptions != nil { + in, out := &in.PrivateDNSNameOptions, &out.PrivateDNSNameOptions + *out = new(LaunchTemplatePrivateDNSNameOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.RAMDiskID != nil { + in, out := &in.RAMDiskID, &out.RAMDiskID + *out = new(string) + **out = **in + } + if in.SecurityGroupNames != nil { + in, out := &in.SecurityGroupNames, &out.SecurityGroupNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TagSpecifications != nil { + in, out := &in.TagSpecifications, &out.TagSpecifications + *out = make([]TagSpecificationsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UpdateDefaultVersion != nil { + in, out := &in.UpdateDefaultVersion, &out.UpdateDefaultVersion + *out = new(bool) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateObservation_2. +func (in *LaunchTemplateObservation_2) DeepCopy() *LaunchTemplateObservation_2 { + if in == nil { + return nil + } + out := new(LaunchTemplateObservation_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateParameters) DeepCopyInto(out *LaunchTemplateParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateParameters. +func (in *LaunchTemplateParameters) DeepCopy() *LaunchTemplateParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateParameters_2) DeepCopyInto(out *LaunchTemplateParameters_2) { + *out = *in + if in.BlockDeviceMappings != nil { + in, out := &in.BlockDeviceMappings, &out.BlockDeviceMappings + *out = make([]BlockDeviceMappingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CPUOptions != nil { + in, out := &in.CPUOptions, &out.CPUOptions + *out = new(LaunchTemplateCPUOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.CapacityReservationSpecification != nil { + in, out := &in.CapacityReservationSpecification, &out.CapacityReservationSpecification + *out = new(LaunchTemplateCapacityReservationSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.CreditSpecification != nil { + in, out := &in.CreditSpecification, &out.CreditSpecification + *out = new(LaunchTemplateCreditSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultVersion != nil { + in, out := &in.DefaultVersion, &out.DefaultVersion + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisableAPIStop != nil { + in, out := &in.DisableAPIStop, &out.DisableAPIStop + *out = new(bool) + **out = **in + } + if in.DisableAPITermination != nil { + in, out := &in.DisableAPITermination, &out.DisableAPITermination + *out = new(bool) + **out = **in + } + if in.EBSOptimized != nil { + in, out := &in.EBSOptimized, &out.EBSOptimized + *out = new(string) + **out = **in + } + if in.ElasticGpuSpecifications != nil { + in, out := &in.ElasticGpuSpecifications, &out.ElasticGpuSpecifications + *out = make([]ElasticGpuSpecificationsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticInferenceAccelerator != nil { + in, out := &in.ElasticInferenceAccelerator, &out.ElasticInferenceAccelerator + *out = new(ElasticInferenceAcceleratorParameters) + (*in).DeepCopyInto(*out) + } + if in.EnclaveOptions != nil { + in, out := &in.EnclaveOptions, &out.EnclaveOptions + *out = new(LaunchTemplateEnclaveOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.HibernationOptions != nil { + in, out := &in.HibernationOptions, &out.HibernationOptions + *out = new(HibernationOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(IAMInstanceProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.InstanceInitiatedShutdownBehavior != nil { + in, out := &in.InstanceInitiatedShutdownBehavior, &out.InstanceInitiatedShutdownBehavior + *out = new(string) + **out = **in + } + if in.InstanceMarketOptions != nil { + in, out := &in.InstanceMarketOptions, &out.InstanceMarketOptions + *out = new(LaunchTemplateInstanceMarketOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceRequirements != nil { + in, out := &in.InstanceRequirements, &out.InstanceRequirements + *out = new(InstanceRequirementsParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KernelID != nil { + in, out := &in.KernelID, &out.KernelID + *out = new(string) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.LicenseSpecification != nil { + in, out := &in.LicenseSpecification, &out.LicenseSpecification + *out = make([]LicenseSpecificationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaintenanceOptions != nil { + in, out := &in.MaintenanceOptions, &out.MaintenanceOptions + *out = new(LaunchTemplateMaintenanceOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = new(LaunchTemplateMetadataOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(MonitoringParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkInterfaces != nil { + in, out := &in.NetworkInterfaces, &out.NetworkInterfaces + *out = make([]NetworkInterfacesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(PlacementParameters) + (*in).DeepCopyInto(*out) + } + if in.PrivateDNSNameOptions != nil { + in, out := &in.PrivateDNSNameOptions, &out.PrivateDNSNameOptions + *out = new(LaunchTemplatePrivateDNSNameOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.RAMDiskID != nil { + in, out := &in.RAMDiskID, &out.RAMDiskID + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SecurityGroupNameRefs != nil { + in, out := &in.SecurityGroupNameRefs, &out.SecurityGroupNameRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupNameSelector != nil { + in, out := &in.SecurityGroupNameSelector, &out.SecurityGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupNames != nil { + in, out := &in.SecurityGroupNames, &out.SecurityGroupNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TagSpecifications != nil { + in, out := &in.TagSpecifications, &out.TagSpecifications + *out = make([]TagSpecificationsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UpdateDefaultVersion != nil { + in, out := &in.UpdateDefaultVersion, &out.UpdateDefaultVersion + *out = new(bool) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VPCSecurityGroupIDRefs != nil { + in, out := &in.VPCSecurityGroupIDRefs, &out.VPCSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCSecurityGroupIDSelector != nil { + in, out := &in.VPCSecurityGroupIDSelector, &out.VPCSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateParameters_2. +func (in *LaunchTemplateParameters_2) DeepCopy() *LaunchTemplateParameters_2 { + if in == nil { + return nil + } + out := new(LaunchTemplateParameters_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplatePrivateDNSNameOptionsInitParameters) DeepCopyInto(out *LaunchTemplatePrivateDNSNameOptionsInitParameters) { + *out = *in + if in.EnableResourceNameDNSARecord != nil { + in, out := &in.EnableResourceNameDNSARecord, &out.EnableResourceNameDNSARecord + *out = new(bool) + **out = **in + } + if in.EnableResourceNameDNSAaaaRecord != nil { + in, out := &in.EnableResourceNameDNSAaaaRecord, &out.EnableResourceNameDNSAaaaRecord + *out = new(bool) + **out = **in + } + if in.HostnameType != nil { + in, out := &in.HostnameType, &out.HostnameType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplatePrivateDNSNameOptionsInitParameters. +func (in *LaunchTemplatePrivateDNSNameOptionsInitParameters) DeepCopy() *LaunchTemplatePrivateDNSNameOptionsInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplatePrivateDNSNameOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplatePrivateDNSNameOptionsObservation) DeepCopyInto(out *LaunchTemplatePrivateDNSNameOptionsObservation) { + *out = *in + if in.EnableResourceNameDNSARecord != nil { + in, out := &in.EnableResourceNameDNSARecord, &out.EnableResourceNameDNSARecord + *out = new(bool) + **out = **in + } + if in.EnableResourceNameDNSAaaaRecord != nil { + in, out := &in.EnableResourceNameDNSAaaaRecord, &out.EnableResourceNameDNSAaaaRecord + *out = new(bool) + **out = **in + } + if in.HostnameType != nil { + in, out := &in.HostnameType, &out.HostnameType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplatePrivateDNSNameOptionsObservation. +func (in *LaunchTemplatePrivateDNSNameOptionsObservation) DeepCopy() *LaunchTemplatePrivateDNSNameOptionsObservation { + if in == nil { + return nil + } + out := new(LaunchTemplatePrivateDNSNameOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplatePrivateDNSNameOptionsParameters) DeepCopyInto(out *LaunchTemplatePrivateDNSNameOptionsParameters) { + *out = *in + if in.EnableResourceNameDNSARecord != nil { + in, out := &in.EnableResourceNameDNSARecord, &out.EnableResourceNameDNSARecord + *out = new(bool) + **out = **in + } + if in.EnableResourceNameDNSAaaaRecord != nil { + in, out := &in.EnableResourceNameDNSAaaaRecord, &out.EnableResourceNameDNSAaaaRecord + *out = new(bool) + **out = **in + } + if in.HostnameType != nil { + in, out := &in.HostnameType, &out.HostnameType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplatePrivateDNSNameOptionsParameters. +func (in *LaunchTemplatePrivateDNSNameOptionsParameters) DeepCopy() *LaunchTemplatePrivateDNSNameOptionsParameters { + if in == nil { + return nil + } + out := new(LaunchTemplatePrivateDNSNameOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateSpec) DeepCopyInto(out *LaunchTemplateSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateSpec. +func (in *LaunchTemplateSpec) DeepCopy() *LaunchTemplateSpec { + if in == nil { + return nil + } + out := new(LaunchTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateSpecificationInitParameters) DeepCopyInto(out *LaunchTemplateSpecificationInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.VersionRef != nil { + in, out := &in.VersionRef, &out.VersionRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VersionSelector != nil { + in, out := &in.VersionSelector, &out.VersionSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateSpecificationInitParameters. +func (in *LaunchTemplateSpecificationInitParameters) DeepCopy() *LaunchTemplateSpecificationInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateSpecificationObservation) DeepCopyInto(out *LaunchTemplateSpecificationObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateSpecificationObservation. +func (in *LaunchTemplateSpecificationObservation) DeepCopy() *LaunchTemplateSpecificationObservation { + if in == nil { + return nil + } + out := new(LaunchTemplateSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateSpecificationParameters) DeepCopyInto(out *LaunchTemplateSpecificationParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.VersionRef != nil { + in, out := &in.VersionRef, &out.VersionRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VersionSelector != nil { + in, out := &in.VersionSelector, &out.VersionSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateSpecificationParameters. +func (in *LaunchTemplateSpecificationParameters) DeepCopy() *LaunchTemplateSpecificationParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateStatus) DeepCopyInto(out *LaunchTemplateStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateStatus. +func (in *LaunchTemplateStatus) DeepCopy() *LaunchTemplateStatus { + if in == nil { + return nil + } + out := new(LaunchTemplateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LicenseSpecificationInitParameters) DeepCopyInto(out *LicenseSpecificationInitParameters) { + *out = *in + if in.LicenseConfigurationArn != nil { + in, out := &in.LicenseConfigurationArn, &out.LicenseConfigurationArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseSpecificationInitParameters. +func (in *LicenseSpecificationInitParameters) DeepCopy() *LicenseSpecificationInitParameters { + if in == nil { + return nil + } + out := new(LicenseSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LicenseSpecificationObservation) DeepCopyInto(out *LicenseSpecificationObservation) { + *out = *in + if in.LicenseConfigurationArn != nil { + in, out := &in.LicenseConfigurationArn, &out.LicenseConfigurationArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseSpecificationObservation. +func (in *LicenseSpecificationObservation) DeepCopy() *LicenseSpecificationObservation { + if in == nil { + return nil + } + out := new(LicenseSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LicenseSpecificationParameters) DeepCopyInto(out *LicenseSpecificationParameters) { + *out = *in + if in.LicenseConfigurationArn != nil { + in, out := &in.LicenseConfigurationArn, &out.LicenseConfigurationArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseSpecificationParameters. +func (in *LicenseSpecificationParameters) DeepCopy() *LicenseSpecificationParameters { + if in == nil { + return nil + } + out := new(LicenseSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceOptionsInitParameters) DeepCopyInto(out *MaintenanceOptionsInitParameters) { + *out = *in + if in.AutoRecovery != nil { + in, out := &in.AutoRecovery, &out.AutoRecovery + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceOptionsInitParameters. +func (in *MaintenanceOptionsInitParameters) DeepCopy() *MaintenanceOptionsInitParameters { + if in == nil { + return nil + } + out := new(MaintenanceOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceOptionsObservation) DeepCopyInto(out *MaintenanceOptionsObservation) { + *out = *in + if in.AutoRecovery != nil { + in, out := &in.AutoRecovery, &out.AutoRecovery + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceOptionsObservation. +func (in *MaintenanceOptionsObservation) DeepCopy() *MaintenanceOptionsObservation { + if in == nil { + return nil + } + out := new(MaintenanceOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceOptionsParameters) DeepCopyInto(out *MaintenanceOptionsParameters) { + *out = *in + if in.AutoRecovery != nil { + in, out := &in.AutoRecovery, &out.AutoRecovery + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceOptionsParameters. +func (in *MaintenanceOptionsParameters) DeepCopy() *MaintenanceOptionsParameters { + if in == nil { + return nil + } + out := new(MaintenanceOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryGibPerVcpuInitParameters) DeepCopyInto(out *MemoryGibPerVcpuInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryGibPerVcpuInitParameters. +func (in *MemoryGibPerVcpuInitParameters) DeepCopy() *MemoryGibPerVcpuInitParameters { + if in == nil { + return nil + } + out := new(MemoryGibPerVcpuInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryGibPerVcpuObservation) DeepCopyInto(out *MemoryGibPerVcpuObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryGibPerVcpuObservation. +func (in *MemoryGibPerVcpuObservation) DeepCopy() *MemoryGibPerVcpuObservation { + if in == nil { + return nil + } + out := new(MemoryGibPerVcpuObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryGibPerVcpuParameters) DeepCopyInto(out *MemoryGibPerVcpuParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryGibPerVcpuParameters. +func (in *MemoryGibPerVcpuParameters) DeepCopy() *MemoryGibPerVcpuParameters { + if in == nil { + return nil + } + out := new(MemoryGibPerVcpuParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryMibInitParameters) DeepCopyInto(out *MemoryMibInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryMibInitParameters. +func (in *MemoryMibInitParameters) DeepCopy() *MemoryMibInitParameters { + if in == nil { + return nil + } + out := new(MemoryMibInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryMibObservation) DeepCopyInto(out *MemoryMibObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryMibObservation. +func (in *MemoryMibObservation) DeepCopy() *MemoryMibObservation { + if in == nil { + return nil + } + out := new(MemoryMibObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryMibParameters) DeepCopyInto(out *MemoryMibParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryMibParameters. +func (in *MemoryMibParameters) DeepCopy() *MemoryMibParameters { + if in == nil { + return nil + } + out := new(MemoryMibParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataOptionsInitParameters) DeepCopyInto(out *MetadataOptionsInitParameters) { + *out = *in + if in.HTTPEndpoint != nil { + in, out := &in.HTTPEndpoint, &out.HTTPEndpoint + *out = new(string) + **out = **in + } + if in.HTTPProtocolIPv6 != nil { + in, out := &in.HTTPProtocolIPv6, &out.HTTPProtocolIPv6 + *out = new(string) + **out = **in + } + if in.HTTPPutResponseHopLimit != nil { + in, out := &in.HTTPPutResponseHopLimit, &out.HTTPPutResponseHopLimit + *out = new(float64) + **out = **in + } + if in.HTTPTokens != nil { + in, out := &in.HTTPTokens, &out.HTTPTokens + *out = new(string) + **out = **in + } + if in.InstanceMetadataTags != nil { + in, out := &in.InstanceMetadataTags, &out.InstanceMetadataTags + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataOptionsInitParameters. +func (in *MetadataOptionsInitParameters) DeepCopy() *MetadataOptionsInitParameters { + if in == nil { + return nil + } + out := new(MetadataOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataOptionsObservation) DeepCopyInto(out *MetadataOptionsObservation) { + *out = *in + if in.HTTPEndpoint != nil { + in, out := &in.HTTPEndpoint, &out.HTTPEndpoint + *out = new(string) + **out = **in + } + if in.HTTPProtocolIPv6 != nil { + in, out := &in.HTTPProtocolIPv6, &out.HTTPProtocolIPv6 + *out = new(string) + **out = **in + } + if in.HTTPPutResponseHopLimit != nil { + in, out := &in.HTTPPutResponseHopLimit, &out.HTTPPutResponseHopLimit + *out = new(float64) + **out = **in + } + if in.HTTPTokens != nil { + in, out := &in.HTTPTokens, &out.HTTPTokens + *out = new(string) + **out = **in + } + if in.InstanceMetadataTags != nil { + in, out := &in.InstanceMetadataTags, &out.InstanceMetadataTags + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataOptionsObservation. +func (in *MetadataOptionsObservation) DeepCopy() *MetadataOptionsObservation { + if in == nil { + return nil + } + out := new(MetadataOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataOptionsParameters) DeepCopyInto(out *MetadataOptionsParameters) { + *out = *in + if in.HTTPEndpoint != nil { + in, out := &in.HTTPEndpoint, &out.HTTPEndpoint + *out = new(string) + **out = **in + } + if in.HTTPProtocolIPv6 != nil { + in, out := &in.HTTPProtocolIPv6, &out.HTTPProtocolIPv6 + *out = new(string) + **out = **in + } + if in.HTTPPutResponseHopLimit != nil { + in, out := &in.HTTPPutResponseHopLimit, &out.HTTPPutResponseHopLimit + *out = new(float64) + **out = **in + } + if in.HTTPTokens != nil { + in, out := &in.HTTPTokens, &out.HTTPTokens + *out = new(string) + **out = **in + } + if in.InstanceMetadataTags != nil { + in, out := &in.InstanceMetadataTags, &out.InstanceMetadataTags + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataOptionsParameters. +func (in *MetadataOptionsParameters) DeepCopy() *MetadataOptionsParameters { + if in == nil { + return nil + } + out := new(MetadataOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringInitParameters) DeepCopyInto(out *MonitoringInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringInitParameters. +func (in *MonitoringInitParameters) DeepCopy() *MonitoringInitParameters { + if in == nil { + return nil + } + out := new(MonitoringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringObservation) DeepCopyInto(out *MonitoringObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringObservation. +func (in *MonitoringObservation) DeepCopy() *MonitoringObservation { + if in == nil { + return nil + } + out := new(MonitoringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringParameters) DeepCopyInto(out *MonitoringParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringParameters. +func (in *MonitoringParameters) DeepCopy() *MonitoringParameters { + if in == nil { + return nil + } + out := new(MonitoringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkBandwidthGbpsInitParameters) DeepCopyInto(out *NetworkBandwidthGbpsInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkBandwidthGbpsInitParameters. +func (in *NetworkBandwidthGbpsInitParameters) DeepCopy() *NetworkBandwidthGbpsInitParameters { + if in == nil { + return nil + } + out := new(NetworkBandwidthGbpsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkBandwidthGbpsObservation) DeepCopyInto(out *NetworkBandwidthGbpsObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkBandwidthGbpsObservation. +func (in *NetworkBandwidthGbpsObservation) DeepCopy() *NetworkBandwidthGbpsObservation { + if in == nil { + return nil + } + out := new(NetworkBandwidthGbpsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkBandwidthGbpsParameters) DeepCopyInto(out *NetworkBandwidthGbpsParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkBandwidthGbpsParameters. +func (in *NetworkBandwidthGbpsParameters) DeepCopy() *NetworkBandwidthGbpsParameters { + if in == nil { + return nil + } + out := new(NetworkBandwidthGbpsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceCountInitParameters) DeepCopyInto(out *NetworkInterfaceCountInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceCountInitParameters. +func (in *NetworkInterfaceCountInitParameters) DeepCopy() *NetworkInterfaceCountInitParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceCountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceCountObservation) DeepCopyInto(out *NetworkInterfaceCountObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceCountObservation. +func (in *NetworkInterfaceCountObservation) DeepCopy() *NetworkInterfaceCountObservation { + if in == nil { + return nil + } + out := new(NetworkInterfaceCountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceCountParameters) DeepCopyInto(out *NetworkInterfaceCountParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceCountParameters. +func (in *NetworkInterfaceCountParameters) DeepCopy() *NetworkInterfaceCountParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceCountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceInitParameters) DeepCopyInto(out *NetworkInterfaceInitParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceIndex != nil { + in, out := &in.DeviceIndex, &out.DeviceIndex + *out = new(float64) + **out = **in + } + if in.NetworkCardIndex != nil { + in, out := &in.NetworkCardIndex, &out.NetworkCardIndex + *out = new(float64) + **out = **in + } + if in.NetworkInterfaceID != nil { + in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID + *out = new(string) + **out = **in + } + if in.NetworkInterfaceIDRef != nil { + in, out := &in.NetworkInterfaceIDRef, &out.NetworkInterfaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceIDSelector != nil { + in, out := &in.NetworkInterfaceIDSelector, &out.NetworkInterfaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceInitParameters. +func (in *NetworkInterfaceInitParameters) DeepCopy() *NetworkInterfaceInitParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceObservation) DeepCopyInto(out *NetworkInterfaceObservation) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceIndex != nil { + in, out := &in.DeviceIndex, &out.DeviceIndex + *out = new(float64) + **out = **in + } + if in.NetworkCardIndex != nil { + in, out := &in.NetworkCardIndex, &out.NetworkCardIndex + *out = new(float64) + **out = **in + } + if in.NetworkInterfaceID != nil { + in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceObservation. +func (in *NetworkInterfaceObservation) DeepCopy() *NetworkInterfaceObservation { + if in == nil { + return nil + } + out := new(NetworkInterfaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceParameters) DeepCopyInto(out *NetworkInterfaceParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceIndex != nil { + in, out := &in.DeviceIndex, &out.DeviceIndex + *out = new(float64) + **out = **in + } + if in.NetworkCardIndex != nil { + in, out := &in.NetworkCardIndex, &out.NetworkCardIndex + *out = new(float64) + **out = **in + } + if in.NetworkInterfaceID != nil { + in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID + *out = new(string) + **out = **in + } + if in.NetworkInterfaceIDRef != nil { + in, out := &in.NetworkInterfaceIDRef, &out.NetworkInterfaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceIDSelector != nil { + in, out := &in.NetworkInterfaceIDSelector, &out.NetworkInterfaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceParameters. +func (in *NetworkInterfaceParameters) DeepCopy() *NetworkInterfaceParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfacesInitParameters) DeepCopyInto(out *NetworkInterfacesInitParameters) { + *out = *in + if in.AssociateCarrierIPAddress != nil { + in, out := &in.AssociateCarrierIPAddress, &out.AssociateCarrierIPAddress + *out = new(string) + **out = **in + } + if in.AssociatePublicIPAddress != nil { + in, out := &in.AssociatePublicIPAddress, &out.AssociatePublicIPAddress + *out = new(string) + **out = **in + } + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DeviceIndex != nil { + in, out := &in.DeviceIndex, &out.DeviceIndex + *out = new(float64) + **out = **in + } + if in.IPv4AddressCount != nil { + in, out := &in.IPv4AddressCount, &out.IPv4AddressCount + *out = new(float64) + **out = **in + } + if in.IPv4Addresses != nil { + in, out := &in.IPv4Addresses, &out.IPv4Addresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPv4PrefixCount != nil { + in, out := &in.IPv4PrefixCount, &out.IPv4PrefixCount + *out = new(float64) + **out = **in + } + if in.IPv4Prefixes != nil { + in, out := &in.IPv4Prefixes, &out.IPv4Prefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPv6AddressCount != nil { + in, out := &in.IPv6AddressCount, &out.IPv6AddressCount + *out = new(float64) + **out = **in + } + if in.IPv6Addresses != nil { + in, out := &in.IPv6Addresses, &out.IPv6Addresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPv6PrefixCount != nil { + in, out := &in.IPv6PrefixCount, &out.IPv6PrefixCount + *out = new(float64) + **out = **in + } + if in.IPv6Prefixes != nil { + in, out := &in.IPv6Prefixes, &out.IPv6Prefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InterfaceType != nil { + in, out := &in.InterfaceType, &out.InterfaceType + *out = new(string) + **out = **in + } + if in.NetworkCardIndex != nil { + in, out := &in.NetworkCardIndex, &out.NetworkCardIndex + *out = new(float64) + **out = **in + } + if in.NetworkInterfaceID != nil { + in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID + *out = new(string) + **out = **in + } + if in.NetworkInterfaceIDRef != nil { + in, out := &in.NetworkInterfaceIDRef, &out.NetworkInterfaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceIDSelector != nil { + in, out := &in.NetworkInterfaceIDSelector, &out.NetworkInterfaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.SecurityGroupRefs != nil { + in, out := &in.SecurityGroupRefs, &out.SecurityGroupRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupSelector != nil { + in, out := &in.SecurityGroupSelector, &out.SecurityGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfacesInitParameters. +func (in *NetworkInterfacesInitParameters) DeepCopy() *NetworkInterfacesInitParameters { + if in == nil { + return nil + } + out := new(NetworkInterfacesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfacesObservation) DeepCopyInto(out *NetworkInterfacesObservation) { + *out = *in + if in.AssociateCarrierIPAddress != nil { + in, out := &in.AssociateCarrierIPAddress, &out.AssociateCarrierIPAddress + *out = new(string) + **out = **in + } + if in.AssociatePublicIPAddress != nil { + in, out := &in.AssociatePublicIPAddress, &out.AssociatePublicIPAddress + *out = new(string) + **out = **in + } + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DeviceIndex != nil { + in, out := &in.DeviceIndex, &out.DeviceIndex + *out = new(float64) + **out = **in + } + if in.IPv4AddressCount != nil { + in, out := &in.IPv4AddressCount, &out.IPv4AddressCount + *out = new(float64) + **out = **in + } + if in.IPv4Addresses != nil { + in, out := &in.IPv4Addresses, &out.IPv4Addresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPv4PrefixCount != nil { + in, out := &in.IPv4PrefixCount, &out.IPv4PrefixCount + *out = new(float64) + **out = **in + } + if in.IPv4Prefixes != nil { + in, out := &in.IPv4Prefixes, &out.IPv4Prefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPv6AddressCount != nil { + in, out := &in.IPv6AddressCount, &out.IPv6AddressCount + *out = new(float64) + **out = **in + } + if in.IPv6Addresses != nil { + in, out := &in.IPv6Addresses, &out.IPv6Addresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPv6PrefixCount != nil { + in, out := &in.IPv6PrefixCount, &out.IPv6PrefixCount + *out = new(float64) + **out = **in + } + if in.IPv6Prefixes != nil { + in, out := &in.IPv6Prefixes, &out.IPv6Prefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InterfaceType != nil { + in, out := &in.InterfaceType, &out.InterfaceType + *out = new(string) + **out = **in + } + if in.NetworkCardIndex != nil { + in, out := &in.NetworkCardIndex, &out.NetworkCardIndex + *out = new(float64) + **out = **in + } + if in.NetworkInterfaceID != nil { + in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID + *out = new(string) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfacesObservation. +func (in *NetworkInterfacesObservation) DeepCopy() *NetworkInterfacesObservation { + if in == nil { + return nil + } + out := new(NetworkInterfacesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfacesParameters) DeepCopyInto(out *NetworkInterfacesParameters) { + *out = *in + if in.AssociateCarrierIPAddress != nil { + in, out := &in.AssociateCarrierIPAddress, &out.AssociateCarrierIPAddress + *out = new(string) + **out = **in + } + if in.AssociatePublicIPAddress != nil { + in, out := &in.AssociatePublicIPAddress, &out.AssociatePublicIPAddress + *out = new(string) + **out = **in + } + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DeviceIndex != nil { + in, out := &in.DeviceIndex, &out.DeviceIndex + *out = new(float64) + **out = **in + } + if in.IPv4AddressCount != nil { + in, out := &in.IPv4AddressCount, &out.IPv4AddressCount + *out = new(float64) + **out = **in + } + if in.IPv4Addresses != nil { + in, out := &in.IPv4Addresses, &out.IPv4Addresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPv4PrefixCount != nil { + in, out := &in.IPv4PrefixCount, &out.IPv4PrefixCount + *out = new(float64) + **out = **in + } + if in.IPv4Prefixes != nil { + in, out := &in.IPv4Prefixes, &out.IPv4Prefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPv6AddressCount != nil { + in, out := &in.IPv6AddressCount, &out.IPv6AddressCount + *out = new(float64) + **out = **in + } + if in.IPv6Addresses != nil { + in, out := &in.IPv6Addresses, &out.IPv6Addresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPv6PrefixCount != nil { + in, out := &in.IPv6PrefixCount, &out.IPv6PrefixCount + *out = new(float64) + **out = **in + } + if in.IPv6Prefixes != nil { + in, out := &in.IPv6Prefixes, &out.IPv6Prefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InterfaceType != nil { + in, out := &in.InterfaceType, &out.InterfaceType + *out = new(string) + **out = **in + } + if in.NetworkCardIndex != nil { + in, out := &in.NetworkCardIndex, &out.NetworkCardIndex + *out = new(float64) + **out = **in + } + if in.NetworkInterfaceID != nil { + in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID + *out = new(string) + **out = **in + } + if in.NetworkInterfaceIDRef != nil { + in, out := &in.NetworkInterfaceIDRef, &out.NetworkInterfaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceIDSelector != nil { + in, out := &in.NetworkInterfaceIDSelector, &out.NetworkInterfaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.SecurityGroupRefs != nil { + in, out := &in.SecurityGroupRefs, &out.SecurityGroupRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupSelector != nil { + in, out := &in.SecurityGroupSelector, &out.SecurityGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfacesParameters. +func (in *NetworkInterfacesParameters) DeepCopy() *NetworkInterfacesParameters { + if in == nil { + return nil + } + out := new(NetworkInterfacesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridesInitParameters) DeepCopyInto(out *OverridesInitParameters) { + *out = *in + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.InstanceRequirements != nil { + in, out := &in.InstanceRequirements, &out.InstanceRequirements + *out = new(OverridesInstanceRequirementsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.SpotPrice != nil { + in, out := &in.SpotPrice, &out.SpotPrice + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.WeightedCapacity != nil { + in, out := &in.WeightedCapacity, &out.WeightedCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesInitParameters. +func (in *OverridesInitParameters) DeepCopy() *OverridesInitParameters { + if in == nil { + return nil + } + out := new(OverridesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridesInstanceRequirementsInitParameters) DeepCopyInto(out *OverridesInstanceRequirementsInitParameters) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = new(InstanceRequirementsAcceleratorCountInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorManufacturers != nil { + in, out := &in.AcceleratorManufacturers, &out.AcceleratorManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorNames != nil { + in, out := &in.AcceleratorNames, &out.AcceleratorNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorTotalMemoryMib != nil { + in, out := &in.AcceleratorTotalMemoryMib, &out.AcceleratorTotalMemoryMib + *out = new(InstanceRequirementsAcceleratorTotalMemoryMibInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorTypes != nil { + in, out := &in.AcceleratorTypes, &out.AcceleratorTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedInstanceTypes != nil { + in, out := &in.AllowedInstanceTypes, &out.AllowedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(string) + **out = **in + } + if in.BaselineEBSBandwidthMbps != nil { + in, out := &in.BaselineEBSBandwidthMbps, &out.BaselineEBSBandwidthMbps + *out = new(InstanceRequirementsBaselineEBSBandwidthMbpsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BurstablePerformance != nil { + in, out := &in.BurstablePerformance, &out.BurstablePerformance + *out = new(string) + **out = **in + } + if in.CPUManufacturers != nil { + in, out := &in.CPUManufacturers, &out.CPUManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExcludedInstanceTypes != nil { + in, out := &in.ExcludedInstanceTypes, &out.ExcludedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceGenerations != nil { + in, out := &in.InstanceGenerations, &out.InstanceGenerations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LocalStorage != nil { + in, out := &in.LocalStorage, &out.LocalStorage + *out = new(string) + **out = **in + } + if in.LocalStorageTypes != nil { + in, out := &in.LocalStorageTypes, &out.LocalStorageTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MemoryGibPerVcpu != nil { + in, out := &in.MemoryGibPerVcpu, &out.MemoryGibPerVcpu + *out = new(InstanceRequirementsMemoryGibPerVcpuInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MemoryMib != nil { + in, out := &in.MemoryMib, &out.MemoryMib + *out = new(InstanceRequirementsMemoryMibInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkBandwidthGbps != nil { + in, out := &in.NetworkBandwidthGbps, &out.NetworkBandwidthGbps + *out = new(InstanceRequirementsNetworkBandwidthGbpsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceCount != nil { + in, out := &in.NetworkInterfaceCount, &out.NetworkInterfaceCount + *out = new(InstanceRequirementsNetworkInterfaceCountInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OnDemandMaxPricePercentageOverLowestPrice != nil { + in, out := &in.OnDemandMaxPricePercentageOverLowestPrice, &out.OnDemandMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.RequireHibernateSupport != nil { + in, out := &in.RequireHibernateSupport, &out.RequireHibernateSupport + *out = new(bool) + **out = **in + } + if in.SpotMaxPricePercentageOverLowestPrice != nil { + in, out := &in.SpotMaxPricePercentageOverLowestPrice, &out.SpotMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.TotalLocalStorageGb != nil { + in, out := &in.TotalLocalStorageGb, &out.TotalLocalStorageGb + *out = new(InstanceRequirementsTotalLocalStorageGbInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VcpuCount != nil { + in, out := &in.VcpuCount, &out.VcpuCount + *out = new(InstanceRequirementsVcpuCountInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesInstanceRequirementsInitParameters. +func (in *OverridesInstanceRequirementsInitParameters) DeepCopy() *OverridesInstanceRequirementsInitParameters { + if in == nil { + return nil + } + out := new(OverridesInstanceRequirementsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridesInstanceRequirementsObservation) DeepCopyInto(out *OverridesInstanceRequirementsObservation) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = new(InstanceRequirementsAcceleratorCountObservation) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorManufacturers != nil { + in, out := &in.AcceleratorManufacturers, &out.AcceleratorManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorNames != nil { + in, out := &in.AcceleratorNames, &out.AcceleratorNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorTotalMemoryMib != nil { + in, out := &in.AcceleratorTotalMemoryMib, &out.AcceleratorTotalMemoryMib + *out = new(InstanceRequirementsAcceleratorTotalMemoryMibObservation) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorTypes != nil { + in, out := &in.AcceleratorTypes, &out.AcceleratorTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedInstanceTypes != nil { + in, out := &in.AllowedInstanceTypes, &out.AllowedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(string) + **out = **in + } + if in.BaselineEBSBandwidthMbps != nil { + in, out := &in.BaselineEBSBandwidthMbps, &out.BaselineEBSBandwidthMbps + *out = new(InstanceRequirementsBaselineEBSBandwidthMbpsObservation) + (*in).DeepCopyInto(*out) + } + if in.BurstablePerformance != nil { + in, out := &in.BurstablePerformance, &out.BurstablePerformance + *out = new(string) + **out = **in + } + if in.CPUManufacturers != nil { + in, out := &in.CPUManufacturers, &out.CPUManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExcludedInstanceTypes != nil { + in, out := &in.ExcludedInstanceTypes, &out.ExcludedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceGenerations != nil { + in, out := &in.InstanceGenerations, &out.InstanceGenerations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LocalStorage != nil { + in, out := &in.LocalStorage, &out.LocalStorage + *out = new(string) + **out = **in + } + if in.LocalStorageTypes != nil { + in, out := &in.LocalStorageTypes, &out.LocalStorageTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MemoryGibPerVcpu != nil { + in, out := &in.MemoryGibPerVcpu, &out.MemoryGibPerVcpu + *out = new(InstanceRequirementsMemoryGibPerVcpuObservation) + (*in).DeepCopyInto(*out) + } + if in.MemoryMib != nil { + in, out := &in.MemoryMib, &out.MemoryMib + *out = new(InstanceRequirementsMemoryMibObservation) + (*in).DeepCopyInto(*out) + } + if in.NetworkBandwidthGbps != nil { + in, out := &in.NetworkBandwidthGbps, &out.NetworkBandwidthGbps + *out = new(InstanceRequirementsNetworkBandwidthGbpsObservation) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceCount != nil { + in, out := &in.NetworkInterfaceCount, &out.NetworkInterfaceCount + *out = new(InstanceRequirementsNetworkInterfaceCountObservation) + (*in).DeepCopyInto(*out) + } + if in.OnDemandMaxPricePercentageOverLowestPrice != nil { + in, out := &in.OnDemandMaxPricePercentageOverLowestPrice, &out.OnDemandMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.RequireHibernateSupport != nil { + in, out := &in.RequireHibernateSupport, &out.RequireHibernateSupport + *out = new(bool) + **out = **in + } + if in.SpotMaxPricePercentageOverLowestPrice != nil { + in, out := &in.SpotMaxPricePercentageOverLowestPrice, &out.SpotMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.TotalLocalStorageGb != nil { + in, out := &in.TotalLocalStorageGb, &out.TotalLocalStorageGb + *out = new(InstanceRequirementsTotalLocalStorageGbObservation) + (*in).DeepCopyInto(*out) + } + if in.VcpuCount != nil { + in, out := &in.VcpuCount, &out.VcpuCount + *out = new(InstanceRequirementsVcpuCountObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesInstanceRequirementsObservation. +func (in *OverridesInstanceRequirementsObservation) DeepCopy() *OverridesInstanceRequirementsObservation { + if in == nil { + return nil + } + out := new(OverridesInstanceRequirementsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridesInstanceRequirementsParameters) DeepCopyInto(out *OverridesInstanceRequirementsParameters) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = new(InstanceRequirementsAcceleratorCountParameters) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorManufacturers != nil { + in, out := &in.AcceleratorManufacturers, &out.AcceleratorManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorNames != nil { + in, out := &in.AcceleratorNames, &out.AcceleratorNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorTotalMemoryMib != nil { + in, out := &in.AcceleratorTotalMemoryMib, &out.AcceleratorTotalMemoryMib + *out = new(InstanceRequirementsAcceleratorTotalMemoryMibParameters) + (*in).DeepCopyInto(*out) + } + if in.AcceleratorTypes != nil { + in, out := &in.AcceleratorTypes, &out.AcceleratorTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedInstanceTypes != nil { + in, out := &in.AllowedInstanceTypes, &out.AllowedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(string) + **out = **in + } + if in.BaselineEBSBandwidthMbps != nil { + in, out := &in.BaselineEBSBandwidthMbps, &out.BaselineEBSBandwidthMbps + *out = new(InstanceRequirementsBaselineEBSBandwidthMbpsParameters) + (*in).DeepCopyInto(*out) + } + if in.BurstablePerformance != nil { + in, out := &in.BurstablePerformance, &out.BurstablePerformance + *out = new(string) + **out = **in + } + if in.CPUManufacturers != nil { + in, out := &in.CPUManufacturers, &out.CPUManufacturers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExcludedInstanceTypes != nil { + in, out := &in.ExcludedInstanceTypes, &out.ExcludedInstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceGenerations != nil { + in, out := &in.InstanceGenerations, &out.InstanceGenerations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LocalStorage != nil { + in, out := &in.LocalStorage, &out.LocalStorage + *out = new(string) + **out = **in + } + if in.LocalStorageTypes != nil { + in, out := &in.LocalStorageTypes, &out.LocalStorageTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MemoryGibPerVcpu != nil { + in, out := &in.MemoryGibPerVcpu, &out.MemoryGibPerVcpu + *out = new(InstanceRequirementsMemoryGibPerVcpuParameters) + (*in).DeepCopyInto(*out) + } + if in.MemoryMib != nil { + in, out := &in.MemoryMib, &out.MemoryMib + *out = new(InstanceRequirementsMemoryMibParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkBandwidthGbps != nil { + in, out := &in.NetworkBandwidthGbps, &out.NetworkBandwidthGbps + *out = new(InstanceRequirementsNetworkBandwidthGbpsParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceCount != nil { + in, out := &in.NetworkInterfaceCount, &out.NetworkInterfaceCount + *out = new(InstanceRequirementsNetworkInterfaceCountParameters) + (*in).DeepCopyInto(*out) + } + if in.OnDemandMaxPricePercentageOverLowestPrice != nil { + in, out := &in.OnDemandMaxPricePercentageOverLowestPrice, &out.OnDemandMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.RequireHibernateSupport != nil { + in, out := &in.RequireHibernateSupport, &out.RequireHibernateSupport + *out = new(bool) + **out = **in + } + if in.SpotMaxPricePercentageOverLowestPrice != nil { + in, out := &in.SpotMaxPricePercentageOverLowestPrice, &out.SpotMaxPricePercentageOverLowestPrice + *out = new(float64) + **out = **in + } + if in.TotalLocalStorageGb != nil { + in, out := &in.TotalLocalStorageGb, &out.TotalLocalStorageGb + *out = new(InstanceRequirementsTotalLocalStorageGbParameters) + (*in).DeepCopyInto(*out) + } + if in.VcpuCount != nil { + in, out := &in.VcpuCount, &out.VcpuCount + *out = new(InstanceRequirementsVcpuCountParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesInstanceRequirementsParameters. +func (in *OverridesInstanceRequirementsParameters) DeepCopy() *OverridesInstanceRequirementsParameters { + if in == nil { + return nil + } + out := new(OverridesInstanceRequirementsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridesObservation) DeepCopyInto(out *OverridesObservation) { + *out = *in + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.InstanceRequirements != nil { + in, out := &in.InstanceRequirements, &out.InstanceRequirements + *out = new(OverridesInstanceRequirementsObservation) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.SpotPrice != nil { + in, out := &in.SpotPrice, &out.SpotPrice + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.WeightedCapacity != nil { + in, out := &in.WeightedCapacity, &out.WeightedCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesObservation. +func (in *OverridesObservation) DeepCopy() *OverridesObservation { + if in == nil { + return nil + } + out := new(OverridesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridesParameters) DeepCopyInto(out *OverridesParameters) { + *out = *in + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.InstanceRequirements != nil { + in, out := &in.InstanceRequirements, &out.InstanceRequirements + *out = new(OverridesInstanceRequirementsParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.SpotPrice != nil { + in, out := &in.SpotPrice, &out.SpotPrice + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.WeightedCapacity != nil { + in, out := &in.WeightedCapacity, &out.WeightedCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesParameters. +func (in *OverridesParameters) DeepCopy() *OverridesParameters { + if in == nil { + return nil + } + out := new(OverridesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementInitParameters) DeepCopyInto(out *PlacementInitParameters) { + *out = *in + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(string) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.GroupName != nil { + in, out := &in.GroupName, &out.GroupName + *out = new(string) + **out = **in + } + if in.HostID != nil { + in, out := &in.HostID, &out.HostID + *out = new(string) + **out = **in + } + if in.HostResourceGroupArn != nil { + in, out := &in.HostResourceGroupArn, &out.HostResourceGroupArn + *out = new(string) + **out = **in + } + if in.PartitionNumber != nil { + in, out := &in.PartitionNumber, &out.PartitionNumber + *out = new(float64) + **out = **in + } + if in.SpreadDomain != nil { + in, out := &in.SpreadDomain, &out.SpreadDomain + *out = new(string) + **out = **in + } + if in.Tenancy != nil { + in, out := &in.Tenancy, &out.Tenancy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementInitParameters. +func (in *PlacementInitParameters) DeepCopy() *PlacementInitParameters { + if in == nil { + return nil + } + out := new(PlacementInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementObservation) DeepCopyInto(out *PlacementObservation) { + *out = *in + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(string) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.GroupName != nil { + in, out := &in.GroupName, &out.GroupName + *out = new(string) + **out = **in + } + if in.HostID != nil { + in, out := &in.HostID, &out.HostID + *out = new(string) + **out = **in + } + if in.HostResourceGroupArn != nil { + in, out := &in.HostResourceGroupArn, &out.HostResourceGroupArn + *out = new(string) + **out = **in + } + if in.PartitionNumber != nil { + in, out := &in.PartitionNumber, &out.PartitionNumber + *out = new(float64) + **out = **in + } + if in.SpreadDomain != nil { + in, out := &in.SpreadDomain, &out.SpreadDomain + *out = new(string) + **out = **in + } + if in.Tenancy != nil { + in, out := &in.Tenancy, &out.Tenancy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementObservation. +func (in *PlacementObservation) DeepCopy() *PlacementObservation { + if in == nil { + return nil + } + out := new(PlacementObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementParameters) DeepCopyInto(out *PlacementParameters) { + *out = *in + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(string) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.GroupName != nil { + in, out := &in.GroupName, &out.GroupName + *out = new(string) + **out = **in + } + if in.HostID != nil { + in, out := &in.HostID, &out.HostID + *out = new(string) + **out = **in + } + if in.HostResourceGroupArn != nil { + in, out := &in.HostResourceGroupArn, &out.HostResourceGroupArn + *out = new(string) + **out = **in + } + if in.PartitionNumber != nil { + in, out := &in.PartitionNumber, &out.PartitionNumber + *out = new(float64) + **out = **in + } + if in.SpreadDomain != nil { + in, out := &in.SpreadDomain, &out.SpreadDomain + *out = new(string) + **out = **in + } + if in.Tenancy != nil { + in, out := &in.Tenancy, &out.Tenancy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementParameters. +func (in *PlacementParameters) DeepCopy() *PlacementParameters { + if in == nil { + return nil + } + out := new(PlacementParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSNameOptionsInitParameters) DeepCopyInto(out *PrivateDNSNameOptionsInitParameters) { + *out = *in + if in.EnableResourceNameDNSARecord != nil { + in, out := &in.EnableResourceNameDNSARecord, &out.EnableResourceNameDNSARecord + *out = new(bool) + **out = **in + } + if in.EnableResourceNameDNSAaaaRecord != nil { + in, out := &in.EnableResourceNameDNSAaaaRecord, &out.EnableResourceNameDNSAaaaRecord + *out = new(bool) + **out = **in + } + if in.HostnameType != nil { + in, out := &in.HostnameType, &out.HostnameType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSNameOptionsInitParameters. +func (in *PrivateDNSNameOptionsInitParameters) DeepCopy() *PrivateDNSNameOptionsInitParameters { + if in == nil { + return nil + } + out := new(PrivateDNSNameOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSNameOptionsObservation) DeepCopyInto(out *PrivateDNSNameOptionsObservation) { + *out = *in + if in.EnableResourceNameDNSARecord != nil { + in, out := &in.EnableResourceNameDNSARecord, &out.EnableResourceNameDNSARecord + *out = new(bool) + **out = **in + } + if in.EnableResourceNameDNSAaaaRecord != nil { + in, out := &in.EnableResourceNameDNSAaaaRecord, &out.EnableResourceNameDNSAaaaRecord + *out = new(bool) + **out = **in + } + if in.HostnameType != nil { + in, out := &in.HostnameType, &out.HostnameType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSNameOptionsObservation. +func (in *PrivateDNSNameOptionsObservation) DeepCopy() *PrivateDNSNameOptionsObservation { + if in == nil { + return nil + } + out := new(PrivateDNSNameOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSNameOptionsParameters) DeepCopyInto(out *PrivateDNSNameOptionsParameters) { + *out = *in + if in.EnableResourceNameDNSARecord != nil { + in, out := &in.EnableResourceNameDNSARecord, &out.EnableResourceNameDNSARecord + *out = new(bool) + **out = **in + } + if in.EnableResourceNameDNSAaaaRecord != nil { + in, out := &in.EnableResourceNameDNSAaaaRecord, &out.EnableResourceNameDNSAaaaRecord + *out = new(bool) + **out = **in + } + if in.HostnameType != nil { + in, out := &in.HostnameType, &out.HostnameType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSNameOptionsParameters. +func (in *PrivateDNSNameOptionsParameters) DeepCopy() *PrivateDNSNameOptionsParameters { + if in == nil { + return nil + } + out := new(PrivateDNSNameOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequesterInitParameters) DeepCopyInto(out *RequesterInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequesterInitParameters. +func (in *RequesterInitParameters) DeepCopy() *RequesterInitParameters { + if in == nil { + return nil + } + out := new(RequesterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequesterObservation) DeepCopyInto(out *RequesterObservation) { + *out = *in + if in.AllowRemoteVPCDNSResolution != nil { + in, out := &in.AllowRemoteVPCDNSResolution, &out.AllowRemoteVPCDNSResolution + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequesterObservation. +func (in *RequesterObservation) DeepCopy() *RequesterObservation { + if in == nil { + return nil + } + out := new(RequesterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequesterParameters) DeepCopyInto(out *RequesterParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequesterParameters. +func (in *RequesterParameters) DeepCopy() *RequesterParameters { + if in == nil { + return nil + } + out := new(RequesterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootBlockDeviceInitParameters) DeepCopyInto(out *RootBlockDeviceInitParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootBlockDeviceInitParameters. +func (in *RootBlockDeviceInitParameters) DeepCopy() *RootBlockDeviceInitParameters { + if in == nil { + return nil + } + out := new(RootBlockDeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootBlockDeviceObservation) DeepCopyInto(out *RootBlockDeviceObservation) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeID != nil { + in, out := &in.VolumeID, &out.VolumeID + *out = new(string) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootBlockDeviceObservation. +func (in *RootBlockDeviceObservation) DeepCopy() *RootBlockDeviceObservation { + if in == nil { + return nil + } + out := new(RootBlockDeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootBlockDeviceParameters) DeepCopyInto(out *RootBlockDeviceParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootBlockDeviceParameters. +func (in *RootBlockDeviceParameters) DeepCopy() *RootBlockDeviceParameters { + if in == nil { + return nil + } + out := new(RootBlockDeviceParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Route) DeepCopyInto(out *Route) { *out = *in out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route. +func (in *Route) DeepCopy() *Route { + if in == nil { + return nil + } + out := new(Route) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Route) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteInitParameters) DeepCopyInto(out *RouteInitParameters) { + *out = *in + if in.CarrierGatewayID != nil { + in, out := &in.CarrierGatewayID, &out.CarrierGatewayID + *out = new(string) + **out = **in + } + if in.CoreNetworkArn != nil { + in, out := &in.CoreNetworkArn, &out.CoreNetworkArn + *out = new(string) + **out = **in + } + if in.DestinationCidrBlock != nil { + in, out := &in.DestinationCidrBlock, &out.DestinationCidrBlock + *out = new(string) + **out = **in + } + if in.DestinationIPv6CidrBlock != nil { + in, out := &in.DestinationIPv6CidrBlock, &out.DestinationIPv6CidrBlock + *out = new(string) + **out = **in + } + if in.DestinationPrefixListID != nil { + in, out := &in.DestinationPrefixListID, &out.DestinationPrefixListID + *out = new(string) + **out = **in + } + if in.DestinationPrefixListIDRef != nil { + in, out := &in.DestinationPrefixListIDRef, &out.DestinationPrefixListIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DestinationPrefixListIDSelector != nil { + in, out := &in.DestinationPrefixListIDSelector, &out.DestinationPrefixListIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EgressOnlyGatewayID != nil { + in, out := &in.EgressOnlyGatewayID, &out.EgressOnlyGatewayID + *out = new(string) + **out = **in + } + if in.EgressOnlyGatewayIDRef != nil { + in, out := &in.EgressOnlyGatewayIDRef, &out.EgressOnlyGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EgressOnlyGatewayIDSelector != nil { + in, out := &in.EgressOnlyGatewayIDSelector, &out.EgressOnlyGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.GatewayID != nil { + in, out := &in.GatewayID, &out.GatewayID + *out = new(string) + **out = **in + } + if in.GatewayIDRef != nil { + in, out := &in.GatewayIDRef, &out.GatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GatewayIDSelector != nil { + in, out := &in.GatewayIDSelector, &out.GatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LocalGatewayID != nil { + in, out := &in.LocalGatewayID, &out.LocalGatewayID + *out = new(string) + **out = **in + } + if in.NATGatewayID != nil { + in, out := &in.NATGatewayID, &out.NATGatewayID + *out = new(string) + **out = **in + } + if in.NATGatewayIDRef != nil { + in, out := &in.NATGatewayIDRef, &out.NATGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NATGatewayIDSelector != nil { + in, out := &in.NATGatewayIDSelector, &out.NATGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceID != nil { + in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID + *out = new(string) + **out = **in + } + if in.NetworkInterfaceIDRef != nil { + in, out := &in.NetworkInterfaceIDRef, &out.NetworkInterfaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceIDSelector != nil { + in, out := &in.NetworkInterfaceIDSelector, &out.NetworkInterfaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RouteTableID != nil { + in, out := &in.RouteTableID, &out.RouteTableID + *out = new(string) + **out = **in + } + if in.RouteTableIDRef != nil { + in, out := &in.RouteTableIDRef, &out.RouteTableIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RouteTableIDSelector != nil { + in, out := &in.RouteTableIDSelector, &out.RouteTableIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TransitGatewayID != nil { + in, out := &in.TransitGatewayID, &out.TransitGatewayID + *out = new(string) + **out = **in + } + if in.TransitGatewayIDRef != nil { + in, out := &in.TransitGatewayIDRef, &out.TransitGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TransitGatewayIDSelector != nil { + in, out := &in.TransitGatewayIDSelector, &out.TransitGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCEndpointID != nil { + in, out := &in.VPCEndpointID, &out.VPCEndpointID + *out = new(string) + **out = **in + } + if in.VPCEndpointIDRef != nil { + in, out := &in.VPCEndpointIDRef, &out.VPCEndpointIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCEndpointIDSelector != nil { + in, out := &in.VPCEndpointIDSelector, &out.VPCEndpointIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCPeeringConnectionID != nil { + in, out := &in.VPCPeeringConnectionID, &out.VPCPeeringConnectionID + *out = new(string) + **out = **in + } + if in.VPCPeeringConnectionIDRef != nil { + in, out := &in.VPCPeeringConnectionIDRef, &out.VPCPeeringConnectionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCPeeringConnectionIDSelector != nil { + in, out := &in.VPCPeeringConnectionIDSelector, &out.VPCPeeringConnectionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteInitParameters. +func (in *RouteInitParameters) DeepCopy() *RouteInitParameters { + if in == nil { + return nil + } + out := new(RouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteList) DeepCopyInto(out *RouteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Route, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteList. +func (in *RouteList) DeepCopy() *RouteList { + if in == nil { + return nil + } + out := new(RouteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RouteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteObservation) DeepCopyInto(out *RouteObservation) { + *out = *in + if in.CarrierGatewayID != nil { + in, out := &in.CarrierGatewayID, &out.CarrierGatewayID + *out = new(string) + **out = **in + } + if in.CoreNetworkArn != nil { + in, out := &in.CoreNetworkArn, &out.CoreNetworkArn + *out = new(string) + **out = **in + } + if in.DestinationCidrBlock != nil { + in, out := &in.DestinationCidrBlock, &out.DestinationCidrBlock + *out = new(string) + **out = **in + } + if in.DestinationIPv6CidrBlock != nil { + in, out := &in.DestinationIPv6CidrBlock, &out.DestinationIPv6CidrBlock + *out = new(string) + **out = **in + } + if in.DestinationPrefixListID != nil { + in, out := &in.DestinationPrefixListID, &out.DestinationPrefixListID + *out = new(string) + **out = **in + } + if in.EgressOnlyGatewayID != nil { + in, out := &in.EgressOnlyGatewayID, &out.EgressOnlyGatewayID + *out = new(string) + **out = **in + } + if in.GatewayID != nil { + in, out := &in.GatewayID, &out.GatewayID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceOwnerID != nil { + in, out := &in.InstanceOwnerID, &out.InstanceOwnerID + *out = new(string) + **out = **in + } + if in.LocalGatewayID != nil { + in, out := &in.LocalGatewayID, &out.LocalGatewayID + *out = new(string) + **out = **in + } + if in.NATGatewayID != nil { + in, out := &in.NATGatewayID, &out.NATGatewayID + *out = new(string) + **out = **in + } + if in.NetworkInterfaceID != nil { + in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID + *out = new(string) + **out = **in + } + if in.Origin != nil { + in, out := &in.Origin, &out.Origin + *out = new(string) + **out = **in + } + if in.RouteTableID != nil { + in, out := &in.RouteTableID, &out.RouteTableID + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.TransitGatewayID != nil { + in, out := &in.TransitGatewayID, &out.TransitGatewayID + *out = new(string) + **out = **in + } + if in.VPCEndpointID != nil { + in, out := &in.VPCEndpointID, &out.VPCEndpointID + *out = new(string) + **out = **in + } + if in.VPCPeeringConnectionID != nil { + in, out := &in.VPCPeeringConnectionID, &out.VPCPeeringConnectionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteObservation. +func (in *RouteObservation) DeepCopy() *RouteObservation { + if in == nil { + return nil + } + out := new(RouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteParameters) DeepCopyInto(out *RouteParameters) { + *out = *in + if in.CarrierGatewayID != nil { + in, out := &in.CarrierGatewayID, &out.CarrierGatewayID + *out = new(string) + **out = **in + } + if in.CoreNetworkArn != nil { + in, out := &in.CoreNetworkArn, &out.CoreNetworkArn + *out = new(string) + **out = **in + } + if in.DestinationCidrBlock != nil { + in, out := &in.DestinationCidrBlock, &out.DestinationCidrBlock + *out = new(string) + **out = **in + } + if in.DestinationIPv6CidrBlock != nil { + in, out := &in.DestinationIPv6CidrBlock, &out.DestinationIPv6CidrBlock + *out = new(string) + **out = **in + } + if in.DestinationPrefixListID != nil { + in, out := &in.DestinationPrefixListID, &out.DestinationPrefixListID + *out = new(string) + **out = **in + } + if in.DestinationPrefixListIDRef != nil { + in, out := &in.DestinationPrefixListIDRef, &out.DestinationPrefixListIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DestinationPrefixListIDSelector != nil { + in, out := &in.DestinationPrefixListIDSelector, &out.DestinationPrefixListIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EgressOnlyGatewayID != nil { + in, out := &in.EgressOnlyGatewayID, &out.EgressOnlyGatewayID + *out = new(string) + **out = **in + } + if in.EgressOnlyGatewayIDRef != nil { + in, out := &in.EgressOnlyGatewayIDRef, &out.EgressOnlyGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EgressOnlyGatewayIDSelector != nil { + in, out := &in.EgressOnlyGatewayIDSelector, &out.EgressOnlyGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.GatewayID != nil { + in, out := &in.GatewayID, &out.GatewayID + *out = new(string) + **out = **in + } + if in.GatewayIDRef != nil { + in, out := &in.GatewayIDRef, &out.GatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GatewayIDSelector != nil { + in, out := &in.GatewayIDSelector, &out.GatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LocalGatewayID != nil { + in, out := &in.LocalGatewayID, &out.LocalGatewayID + *out = new(string) + **out = **in + } + if in.NATGatewayID != nil { + in, out := &in.NATGatewayID, &out.NATGatewayID + *out = new(string) + **out = **in + } + if in.NATGatewayIDRef != nil { + in, out := &in.NATGatewayIDRef, &out.NATGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NATGatewayIDSelector != nil { + in, out := &in.NATGatewayIDSelector, &out.NATGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceID != nil { + in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID + *out = new(string) + **out = **in + } + if in.NetworkInterfaceIDRef != nil { + in, out := &in.NetworkInterfaceIDRef, &out.NetworkInterfaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkInterfaceIDSelector != nil { + in, out := &in.NetworkInterfaceIDSelector, &out.NetworkInterfaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RouteTableID != nil { + in, out := &in.RouteTableID, &out.RouteTableID + *out = new(string) + **out = **in + } + if in.RouteTableIDRef != nil { + in, out := &in.RouteTableIDRef, &out.RouteTableIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RouteTableIDSelector != nil { + in, out := &in.RouteTableIDSelector, &out.RouteTableIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TransitGatewayID != nil { + in, out := &in.TransitGatewayID, &out.TransitGatewayID + *out = new(string) + **out = **in + } + if in.TransitGatewayIDRef != nil { + in, out := &in.TransitGatewayIDRef, &out.TransitGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TransitGatewayIDSelector != nil { + in, out := &in.TransitGatewayIDSelector, &out.TransitGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCEndpointID != nil { + in, out := &in.VPCEndpointID, &out.VPCEndpointID + *out = new(string) + **out = **in + } + if in.VPCEndpointIDRef != nil { + in, out := &in.VPCEndpointIDRef, &out.VPCEndpointIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCEndpointIDSelector != nil { + in, out := &in.VPCEndpointIDSelector, &out.VPCEndpointIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCPeeringConnectionID != nil { + in, out := &in.VPCPeeringConnectionID, &out.VPCPeeringConnectionID + *out = new(string) + **out = **in + } + if in.VPCPeeringConnectionIDRef != nil { + in, out := &in.VPCPeeringConnectionIDRef, &out.VPCPeeringConnectionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCPeeringConnectionIDSelector != nil { + in, out := &in.VPCPeeringConnectionIDSelector, &out.VPCPeeringConnectionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteParameters. +func (in *RouteParameters) DeepCopy() *RouteParameters { + if in == nil { + return nil + } + out := new(RouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteSpec) DeepCopyInto(out *RouteSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteSpec. +func (in *RouteSpec) DeepCopy() *RouteSpec { + if in == nil { + return nil + } + out := new(RouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteStatus) DeepCopyInto(out *RouteStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteStatus. +func (in *RouteStatus) DeepCopy() *RouteStatus { + if in == nil { + return nil + } + out := new(RouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutesInitParameters) DeepCopyInto(out *RoutesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutesInitParameters. +func (in *RoutesInitParameters) DeepCopy() *RoutesInitParameters { + if in == nil { + return nil + } + out := new(RoutesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutesObservation) DeepCopyInto(out *RoutesObservation) { + *out = *in + if in.DestinationCidrBlock != nil { + in, out := &in.DestinationCidrBlock, &out.DestinationCidrBlock + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutesObservation. +func (in *RoutesObservation) DeepCopy() *RoutesObservation { + if in == nil { + return nil + } + out := new(RoutesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutesParameters) DeepCopyInto(out *RoutesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutesParameters. +func (in *RoutesParameters) DeepCopy() *RoutesParameters { + if in == nil { + return nil + } + out := new(RoutesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourcePortRangeInitParameters) DeepCopyInto(out *SourcePortRangeInitParameters) { + *out = *in + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourcePortRangeInitParameters. +func (in *SourcePortRangeInitParameters) DeepCopy() *SourcePortRangeInitParameters { + if in == nil { + return nil + } + out := new(SourcePortRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourcePortRangeObservation) DeepCopyInto(out *SourcePortRangeObservation) { + *out = *in + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourcePortRangeObservation. +func (in *SourcePortRangeObservation) DeepCopy() *SourcePortRangeObservation { + if in == nil { + return nil + } + out := new(SourcePortRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourcePortRangeParameters) DeepCopyInto(out *SourcePortRangeParameters) { + *out = *in + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourcePortRangeParameters. +func (in *SourcePortRangeParameters) DeepCopy() *SourcePortRangeParameters { + if in == nil { + return nil + } + out := new(SourcePortRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotFleetRequest) DeepCopyInto(out *SpotFleetRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotFleetRequest. +func (in *SpotFleetRequest) DeepCopy() *SpotFleetRequest { + if in == nil { + return nil + } + out := new(SpotFleetRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpotFleetRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotFleetRequestInitParameters) DeepCopyInto(out *SpotFleetRequestInitParameters) { + *out = *in + if in.AllocationStrategy != nil { + in, out := &in.AllocationStrategy, &out.AllocationStrategy + *out = new(string) + **out = **in + } + if in.Context != nil { + in, out := &in.Context, &out.Context + *out = new(string) + **out = **in + } + if in.ExcessCapacityTerminationPolicy != nil { + in, out := &in.ExcessCapacityTerminationPolicy, &out.ExcessCapacityTerminationPolicy + *out = new(string) + **out = **in + } + if in.FleetType != nil { + in, out := &in.FleetType, &out.FleetType + *out = new(string) + **out = **in + } + if in.IAMFleetRole != nil { + in, out := &in.IAMFleetRole, &out.IAMFleetRole + *out = new(string) + **out = **in + } + if in.InstanceInterruptionBehaviour != nil { + in, out := &in.InstanceInterruptionBehaviour, &out.InstanceInterruptionBehaviour + *out = new(string) + **out = **in + } + if in.InstancePoolsToUseCount != nil { + in, out := &in.InstancePoolsToUseCount, &out.InstancePoolsToUseCount + *out = new(float64) + **out = **in + } + if in.LaunchSpecification != nil { + in, out := &in.LaunchSpecification, &out.LaunchSpecification + *out = make([]LaunchSpecificationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LaunchTemplateConfig != nil { + in, out := &in.LaunchTemplateConfig, &out.LaunchTemplateConfig + *out = make([]LaunchTemplateConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LoadBalancers != nil { + in, out := &in.LoadBalancers, &out.LoadBalancers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OnDemandAllocationStrategy != nil { + in, out := &in.OnDemandAllocationStrategy, &out.OnDemandAllocationStrategy + *out = new(string) + **out = **in + } + if in.OnDemandMaxTotalPrice != nil { + in, out := &in.OnDemandMaxTotalPrice, &out.OnDemandMaxTotalPrice + *out = new(string) + **out = **in + } + if in.OnDemandTargetCapacity != nil { + in, out := &in.OnDemandTargetCapacity, &out.OnDemandTargetCapacity + *out = new(float64) + **out = **in + } + if in.ReplaceUnhealthyInstances != nil { + in, out := &in.ReplaceUnhealthyInstances, &out.ReplaceUnhealthyInstances + *out = new(bool) + **out = **in + } + if in.SpotMaintenanceStrategies != nil { + in, out := &in.SpotMaintenanceStrategies, &out.SpotMaintenanceStrategies + *out = new(SpotMaintenanceStrategiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SpotPrice != nil { + in, out := &in.SpotPrice, &out.SpotPrice + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetCapacity != nil { + in, out := &in.TargetCapacity, &out.TargetCapacity + *out = new(float64) + **out = **in + } + if in.TargetCapacityUnitType != nil { + in, out := &in.TargetCapacityUnitType, &out.TargetCapacityUnitType + *out = new(string) + **out = **in + } + if in.TargetGroupArns != nil { + in, out := &in.TargetGroupArns, &out.TargetGroupArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TerminateInstancesOnDelete != nil { + in, out := &in.TerminateInstancesOnDelete, &out.TerminateInstancesOnDelete + *out = new(string) + **out = **in + } + if in.TerminateInstancesWithExpiration != nil { + in, out := &in.TerminateInstancesWithExpiration, &out.TerminateInstancesWithExpiration + *out = new(bool) + **out = **in + } + if in.ValidFrom != nil { + in, out := &in.ValidFrom, &out.ValidFrom + *out = new(string) + **out = **in + } + if in.ValidUntil != nil { + in, out := &in.ValidUntil, &out.ValidUntil + *out = new(string) + **out = **in + } + if in.WaitForFulfillment != nil { + in, out := &in.WaitForFulfillment, &out.WaitForFulfillment + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotFleetRequestInitParameters. +func (in *SpotFleetRequestInitParameters) DeepCopy() *SpotFleetRequestInitParameters { + if in == nil { + return nil + } + out := new(SpotFleetRequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotFleetRequestList) DeepCopyInto(out *SpotFleetRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SpotFleetRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotFleetRequestList. +func (in *SpotFleetRequestList) DeepCopy() *SpotFleetRequestList { + if in == nil { + return nil + } + out := new(SpotFleetRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpotFleetRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotFleetRequestObservation) DeepCopyInto(out *SpotFleetRequestObservation) { + *out = *in + if in.AllocationStrategy != nil { + in, out := &in.AllocationStrategy, &out.AllocationStrategy + *out = new(string) + **out = **in + } + if in.ClientToken != nil { + in, out := &in.ClientToken, &out.ClientToken + *out = new(string) + **out = **in + } + if in.Context != nil { + in, out := &in.Context, &out.Context + *out = new(string) + **out = **in + } + if in.ExcessCapacityTerminationPolicy != nil { + in, out := &in.ExcessCapacityTerminationPolicy, &out.ExcessCapacityTerminationPolicy + *out = new(string) + **out = **in + } + if in.FleetType != nil { + in, out := &in.FleetType, &out.FleetType + *out = new(string) + **out = **in + } + if in.IAMFleetRole != nil { + in, out := &in.IAMFleetRole, &out.IAMFleetRole + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceInterruptionBehaviour != nil { + in, out := &in.InstanceInterruptionBehaviour, &out.InstanceInterruptionBehaviour + *out = new(string) + **out = **in + } + if in.InstancePoolsToUseCount != nil { + in, out := &in.InstancePoolsToUseCount, &out.InstancePoolsToUseCount + *out = new(float64) + **out = **in + } + if in.LaunchSpecification != nil { + in, out := &in.LaunchSpecification, &out.LaunchSpecification + *out = make([]LaunchSpecificationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LaunchTemplateConfig != nil { + in, out := &in.LaunchTemplateConfig, &out.LaunchTemplateConfig + *out = make([]LaunchTemplateConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LoadBalancers != nil { + in, out := &in.LoadBalancers, &out.LoadBalancers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OnDemandAllocationStrategy != nil { + in, out := &in.OnDemandAllocationStrategy, &out.OnDemandAllocationStrategy + *out = new(string) + **out = **in + } + if in.OnDemandMaxTotalPrice != nil { + in, out := &in.OnDemandMaxTotalPrice, &out.OnDemandMaxTotalPrice + *out = new(string) + **out = **in + } + if in.OnDemandTargetCapacity != nil { + in, out := &in.OnDemandTargetCapacity, &out.OnDemandTargetCapacity + *out = new(float64) + **out = **in + } + if in.ReplaceUnhealthyInstances != nil { + in, out := &in.ReplaceUnhealthyInstances, &out.ReplaceUnhealthyInstances + *out = new(bool) + **out = **in + } + if in.SpotMaintenanceStrategies != nil { + in, out := &in.SpotMaintenanceStrategies, &out.SpotMaintenanceStrategies + *out = new(SpotMaintenanceStrategiesObservation) + (*in).DeepCopyInto(*out) + } + if in.SpotPrice != nil { + in, out := &in.SpotPrice, &out.SpotPrice + *out = new(string) + **out = **in + } + if in.SpotRequestState != nil { + in, out := &in.SpotRequestState, &out.SpotRequestState + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetCapacity != nil { + in, out := &in.TargetCapacity, &out.TargetCapacity + *out = new(float64) + **out = **in + } + if in.TargetCapacityUnitType != nil { + in, out := &in.TargetCapacityUnitType, &out.TargetCapacityUnitType + *out = new(string) + **out = **in + } + if in.TargetGroupArns != nil { + in, out := &in.TargetGroupArns, &out.TargetGroupArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TerminateInstancesOnDelete != nil { + in, out := &in.TerminateInstancesOnDelete, &out.TerminateInstancesOnDelete + *out = new(string) + **out = **in + } + if in.TerminateInstancesWithExpiration != nil { + in, out := &in.TerminateInstancesWithExpiration, &out.TerminateInstancesWithExpiration + *out = new(bool) + **out = **in + } + if in.ValidFrom != nil { + in, out := &in.ValidFrom, &out.ValidFrom + *out = new(string) + **out = **in + } + if in.ValidUntil != nil { + in, out := &in.ValidUntil, &out.ValidUntil + *out = new(string) + **out = **in + } + if in.WaitForFulfillment != nil { + in, out := &in.WaitForFulfillment, &out.WaitForFulfillment + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotFleetRequestObservation. +func (in *SpotFleetRequestObservation) DeepCopy() *SpotFleetRequestObservation { + if in == nil { + return nil + } + out := new(SpotFleetRequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotFleetRequestParameters) DeepCopyInto(out *SpotFleetRequestParameters) { + *out = *in + if in.AllocationStrategy != nil { + in, out := &in.AllocationStrategy, &out.AllocationStrategy + *out = new(string) + **out = **in + } + if in.Context != nil { + in, out := &in.Context, &out.Context + *out = new(string) + **out = **in + } + if in.ExcessCapacityTerminationPolicy != nil { + in, out := &in.ExcessCapacityTerminationPolicy, &out.ExcessCapacityTerminationPolicy + *out = new(string) + **out = **in + } + if in.FleetType != nil { + in, out := &in.FleetType, &out.FleetType + *out = new(string) + **out = **in + } + if in.IAMFleetRole != nil { + in, out := &in.IAMFleetRole, &out.IAMFleetRole + *out = new(string) + **out = **in + } + if in.InstanceInterruptionBehaviour != nil { + in, out := &in.InstanceInterruptionBehaviour, &out.InstanceInterruptionBehaviour + *out = new(string) + **out = **in + } + if in.InstancePoolsToUseCount != nil { + in, out := &in.InstancePoolsToUseCount, &out.InstancePoolsToUseCount + *out = new(float64) + **out = **in + } + if in.LaunchSpecification != nil { + in, out := &in.LaunchSpecification, &out.LaunchSpecification + *out = make([]LaunchSpecificationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LaunchTemplateConfig != nil { + in, out := &in.LaunchTemplateConfig, &out.LaunchTemplateConfig + *out = make([]LaunchTemplateConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LoadBalancers != nil { + in, out := &in.LoadBalancers, &out.LoadBalancers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OnDemandAllocationStrategy != nil { + in, out := &in.OnDemandAllocationStrategy, &out.OnDemandAllocationStrategy + *out = new(string) + **out = **in + } + if in.OnDemandMaxTotalPrice != nil { + in, out := &in.OnDemandMaxTotalPrice, &out.OnDemandMaxTotalPrice + *out = new(string) + **out = **in + } + if in.OnDemandTargetCapacity != nil { + in, out := &in.OnDemandTargetCapacity, &out.OnDemandTargetCapacity + *out = new(float64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ReplaceUnhealthyInstances != nil { + in, out := &in.ReplaceUnhealthyInstances, &out.ReplaceUnhealthyInstances + *out = new(bool) + **out = **in + } + if in.SpotMaintenanceStrategies != nil { + in, out := &in.SpotMaintenanceStrategies, &out.SpotMaintenanceStrategies + *out = new(SpotMaintenanceStrategiesParameters) + (*in).DeepCopyInto(*out) + } + if in.SpotPrice != nil { + in, out := &in.SpotPrice, &out.SpotPrice + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetCapacity != nil { + in, out := &in.TargetCapacity, &out.TargetCapacity + *out = new(float64) + **out = **in + } + if in.TargetCapacityUnitType != nil { + in, out := &in.TargetCapacityUnitType, &out.TargetCapacityUnitType + *out = new(string) + **out = **in + } + if in.TargetGroupArns != nil { + in, out := &in.TargetGroupArns, &out.TargetGroupArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TerminateInstancesOnDelete != nil { + in, out := &in.TerminateInstancesOnDelete, &out.TerminateInstancesOnDelete + *out = new(string) + **out = **in + } + if in.TerminateInstancesWithExpiration != nil { + in, out := &in.TerminateInstancesWithExpiration, &out.TerminateInstancesWithExpiration + *out = new(bool) + **out = **in + } + if in.ValidFrom != nil { + in, out := &in.ValidFrom, &out.ValidFrom + *out = new(string) + **out = **in + } + if in.ValidUntil != nil { + in, out := &in.ValidUntil, &out.ValidUntil + *out = new(string) + **out = **in + } + if in.WaitForFulfillment != nil { + in, out := &in.WaitForFulfillment, &out.WaitForFulfillment + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotFleetRequestParameters. +func (in *SpotFleetRequestParameters) DeepCopy() *SpotFleetRequestParameters { + if in == nil { + return nil + } + out := new(SpotFleetRequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotFleetRequestSpec) DeepCopyInto(out *SpotFleetRequestSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotFleetRequestSpec. +func (in *SpotFleetRequestSpec) DeepCopy() *SpotFleetRequestSpec { + if in == nil { + return nil + } + out := new(SpotFleetRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotFleetRequestStatus) DeepCopyInto(out *SpotFleetRequestStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotFleetRequestStatus. +func (in *SpotFleetRequestStatus) DeepCopy() *SpotFleetRequestStatus { + if in == nil { + return nil + } + out := new(SpotFleetRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequest) DeepCopyInto(out *SpotInstanceRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequest. +func (in *SpotInstanceRequest) DeepCopy() *SpotInstanceRequest { + if in == nil { + return nil + } + out := new(SpotInstanceRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpotInstanceRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestCPUOptionsInitParameters) DeepCopyInto(out *SpotInstanceRequestCPUOptionsInitParameters) { + *out = *in + if in.AmdSevSnp != nil { + in, out := &in.AmdSevSnp, &out.AmdSevSnp + *out = new(string) + **out = **in + } + if in.CoreCount != nil { + in, out := &in.CoreCount, &out.CoreCount + *out = new(float64) + **out = **in + } + if in.ThreadsPerCore != nil { + in, out := &in.ThreadsPerCore, &out.ThreadsPerCore + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestCPUOptionsInitParameters. +func (in *SpotInstanceRequestCPUOptionsInitParameters) DeepCopy() *SpotInstanceRequestCPUOptionsInitParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestCPUOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestCPUOptionsObservation) DeepCopyInto(out *SpotInstanceRequestCPUOptionsObservation) { + *out = *in + if in.AmdSevSnp != nil { + in, out := &in.AmdSevSnp, &out.AmdSevSnp + *out = new(string) + **out = **in + } + if in.CoreCount != nil { + in, out := &in.CoreCount, &out.CoreCount + *out = new(float64) + **out = **in + } + if in.ThreadsPerCore != nil { + in, out := &in.ThreadsPerCore, &out.ThreadsPerCore + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestCPUOptionsObservation. +func (in *SpotInstanceRequestCPUOptionsObservation) DeepCopy() *SpotInstanceRequestCPUOptionsObservation { + if in == nil { + return nil + } + out := new(SpotInstanceRequestCPUOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestCPUOptionsParameters) DeepCopyInto(out *SpotInstanceRequestCPUOptionsParameters) { + *out = *in + if in.AmdSevSnp != nil { + in, out := &in.AmdSevSnp, &out.AmdSevSnp + *out = new(string) + **out = **in + } + if in.CoreCount != nil { + in, out := &in.CoreCount, &out.CoreCount + *out = new(float64) + **out = **in + } + if in.ThreadsPerCore != nil { + in, out := &in.ThreadsPerCore, &out.ThreadsPerCore + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestCPUOptionsParameters. +func (in *SpotInstanceRequestCPUOptionsParameters) DeepCopy() *SpotInstanceRequestCPUOptionsParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestCPUOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetInitParameters) DeepCopyInto(out *SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetInitParameters) { + *out = *in + if in.CapacityReservationID != nil { + in, out := &in.CapacityReservationID, &out.CapacityReservationID + *out = new(string) + **out = **in + } + if in.CapacityReservationResourceGroupArn != nil { + in, out := &in.CapacityReservationResourceGroupArn, &out.CapacityReservationResourceGroupArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetInitParameters. +func (in *SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetInitParameters) DeepCopy() *SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetInitParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetObservation) DeepCopyInto(out *SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetObservation) { + *out = *in + if in.CapacityReservationID != nil { + in, out := &in.CapacityReservationID, &out.CapacityReservationID + *out = new(string) + **out = **in + } + if in.CapacityReservationResourceGroupArn != nil { + in, out := &in.CapacityReservationResourceGroupArn, &out.CapacityReservationResourceGroupArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetObservation. +func (in *SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetObservation) DeepCopy() *SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetObservation { + if in == nil { + return nil + } + out := new(SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetParameters) DeepCopyInto(out *SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetParameters) { + *out = *in + if in.CapacityReservationID != nil { + in, out := &in.CapacityReservationID, &out.CapacityReservationID + *out = new(string) + **out = **in + } + if in.CapacityReservationResourceGroupArn != nil { + in, out := &in.CapacityReservationResourceGroupArn, &out.CapacityReservationResourceGroupArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetParameters. +func (in *SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetParameters) DeepCopy() *SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestCapacityReservationSpecificationInitParameters) DeepCopyInto(out *SpotInstanceRequestCapacityReservationSpecificationInitParameters) { + *out = *in + if in.CapacityReservationPreference != nil { + in, out := &in.CapacityReservationPreference, &out.CapacityReservationPreference + *out = new(string) + **out = **in + } + if in.CapacityReservationTarget != nil { + in, out := &in.CapacityReservationTarget, &out.CapacityReservationTarget + *out = new(SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestCapacityReservationSpecificationInitParameters. +func (in *SpotInstanceRequestCapacityReservationSpecificationInitParameters) DeepCopy() *SpotInstanceRequestCapacityReservationSpecificationInitParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestCapacityReservationSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestCapacityReservationSpecificationObservation) DeepCopyInto(out *SpotInstanceRequestCapacityReservationSpecificationObservation) { + *out = *in + if in.CapacityReservationPreference != nil { + in, out := &in.CapacityReservationPreference, &out.CapacityReservationPreference + *out = new(string) + **out = **in + } + if in.CapacityReservationTarget != nil { + in, out := &in.CapacityReservationTarget, &out.CapacityReservationTarget + *out = new(SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestCapacityReservationSpecificationObservation. +func (in *SpotInstanceRequestCapacityReservationSpecificationObservation) DeepCopy() *SpotInstanceRequestCapacityReservationSpecificationObservation { + if in == nil { + return nil + } + out := new(SpotInstanceRequestCapacityReservationSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestCapacityReservationSpecificationParameters) DeepCopyInto(out *SpotInstanceRequestCapacityReservationSpecificationParameters) { + *out = *in + if in.CapacityReservationPreference != nil { + in, out := &in.CapacityReservationPreference, &out.CapacityReservationPreference + *out = new(string) + **out = **in + } + if in.CapacityReservationTarget != nil { + in, out := &in.CapacityReservationTarget, &out.CapacityReservationTarget + *out = new(SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestCapacityReservationSpecificationParameters. +func (in *SpotInstanceRequestCapacityReservationSpecificationParameters) DeepCopy() *SpotInstanceRequestCapacityReservationSpecificationParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestCapacityReservationSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestCreditSpecificationInitParameters) DeepCopyInto(out *SpotInstanceRequestCreditSpecificationInitParameters) { + *out = *in + if in.CPUCredits != nil { + in, out := &in.CPUCredits, &out.CPUCredits + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestCreditSpecificationInitParameters. +func (in *SpotInstanceRequestCreditSpecificationInitParameters) DeepCopy() *SpotInstanceRequestCreditSpecificationInitParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestCreditSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestCreditSpecificationObservation) DeepCopyInto(out *SpotInstanceRequestCreditSpecificationObservation) { + *out = *in + if in.CPUCredits != nil { + in, out := &in.CPUCredits, &out.CPUCredits + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestCreditSpecificationObservation. +func (in *SpotInstanceRequestCreditSpecificationObservation) DeepCopy() *SpotInstanceRequestCreditSpecificationObservation { + if in == nil { + return nil + } + out := new(SpotInstanceRequestCreditSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestCreditSpecificationParameters) DeepCopyInto(out *SpotInstanceRequestCreditSpecificationParameters) { + *out = *in + if in.CPUCredits != nil { + in, out := &in.CPUCredits, &out.CPUCredits + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestCreditSpecificationParameters. +func (in *SpotInstanceRequestCreditSpecificationParameters) DeepCopy() *SpotInstanceRequestCreditSpecificationParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestCreditSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestEBSBlockDeviceInitParameters) DeepCopyInto(out *SpotInstanceRequestEBSBlockDeviceInitParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestEBSBlockDeviceInitParameters. +func (in *SpotInstanceRequestEBSBlockDeviceInitParameters) DeepCopy() *SpotInstanceRequestEBSBlockDeviceInitParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestEBSBlockDeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestEBSBlockDeviceObservation) DeepCopyInto(out *SpotInstanceRequestEBSBlockDeviceObservation) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeID != nil { + in, out := &in.VolumeID, &out.VolumeID + *out = new(string) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestEBSBlockDeviceObservation. +func (in *SpotInstanceRequestEBSBlockDeviceObservation) DeepCopy() *SpotInstanceRequestEBSBlockDeviceObservation { + if in == nil { + return nil + } + out := new(SpotInstanceRequestEBSBlockDeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestEBSBlockDeviceParameters) DeepCopyInto(out *SpotInstanceRequestEBSBlockDeviceParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestEBSBlockDeviceParameters. +func (in *SpotInstanceRequestEBSBlockDeviceParameters) DeepCopy() *SpotInstanceRequestEBSBlockDeviceParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestEBSBlockDeviceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestEnclaveOptionsInitParameters) DeepCopyInto(out *SpotInstanceRequestEnclaveOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestEnclaveOptionsInitParameters. +func (in *SpotInstanceRequestEnclaveOptionsInitParameters) DeepCopy() *SpotInstanceRequestEnclaveOptionsInitParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestEnclaveOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestEnclaveOptionsObservation) DeepCopyInto(out *SpotInstanceRequestEnclaveOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestEnclaveOptionsObservation. +func (in *SpotInstanceRequestEnclaveOptionsObservation) DeepCopy() *SpotInstanceRequestEnclaveOptionsObservation { + if in == nil { + return nil + } + out := new(SpotInstanceRequestEnclaveOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestEnclaveOptionsParameters) DeepCopyInto(out *SpotInstanceRequestEnclaveOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestEnclaveOptionsParameters. +func (in *SpotInstanceRequestEnclaveOptionsParameters) DeepCopy() *SpotInstanceRequestEnclaveOptionsParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestEnclaveOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestEphemeralBlockDeviceInitParameters) DeepCopyInto(out *SpotInstanceRequestEphemeralBlockDeviceInitParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestEphemeralBlockDeviceInitParameters. +func (in *SpotInstanceRequestEphemeralBlockDeviceInitParameters) DeepCopy() *SpotInstanceRequestEphemeralBlockDeviceInitParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestEphemeralBlockDeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestEphemeralBlockDeviceObservation) DeepCopyInto(out *SpotInstanceRequestEphemeralBlockDeviceObservation) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestEphemeralBlockDeviceObservation. +func (in *SpotInstanceRequestEphemeralBlockDeviceObservation) DeepCopy() *SpotInstanceRequestEphemeralBlockDeviceObservation { + if in == nil { + return nil + } + out := new(SpotInstanceRequestEphemeralBlockDeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestEphemeralBlockDeviceParameters) DeepCopyInto(out *SpotInstanceRequestEphemeralBlockDeviceParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestEphemeralBlockDeviceParameters. +func (in *SpotInstanceRequestEphemeralBlockDeviceParameters) DeepCopy() *SpotInstanceRequestEphemeralBlockDeviceParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestEphemeralBlockDeviceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestInitParameters) DeepCopyInto(out *SpotInstanceRequestInitParameters) { + *out = *in + if in.AMI != nil { + in, out := &in.AMI, &out.AMI + *out = new(string) + **out = **in + } + if in.AssociatePublicIPAddress != nil { + in, out := &in.AssociatePublicIPAddress, &out.AssociatePublicIPAddress + *out = new(bool) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.BlockDurationMinutes != nil { + in, out := &in.BlockDurationMinutes, &out.BlockDurationMinutes + *out = new(float64) + **out = **in + } + if in.CPUCoreCount != nil { + in, out := &in.CPUCoreCount, &out.CPUCoreCount + *out = new(float64) + **out = **in + } + if in.CPUOptions != nil { + in, out := &in.CPUOptions, &out.CPUOptions + *out = new(SpotInstanceRequestCPUOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CPUThreadsPerCore != nil { + in, out := &in.CPUThreadsPerCore, &out.CPUThreadsPerCore + *out = new(float64) + **out = **in + } + if in.CapacityReservationSpecification != nil { + in, out := &in.CapacityReservationSpecification, &out.CapacityReservationSpecification + *out = new(SpotInstanceRequestCapacityReservationSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CreditSpecification != nil { + in, out := &in.CreditSpecification, &out.CreditSpecification + *out = new(SpotInstanceRequestCreditSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DisableAPIStop != nil { + in, out := &in.DisableAPIStop, &out.DisableAPIStop + *out = new(bool) + **out = **in + } + if in.DisableAPITermination != nil { + in, out := &in.DisableAPITermination, &out.DisableAPITermination + *out = new(bool) + **out = **in + } + if in.EBSBlockDevice != nil { + in, out := &in.EBSBlockDevice, &out.EBSBlockDevice + *out = make([]SpotInstanceRequestEBSBlockDeviceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EBSOptimized != nil { + in, out := &in.EBSOptimized, &out.EBSOptimized + *out = new(bool) + **out = **in + } + if in.EnclaveOptions != nil { + in, out := &in.EnclaveOptions, &out.EnclaveOptions + *out = new(SpotInstanceRequestEnclaveOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EphemeralBlockDevice != nil { + in, out := &in.EphemeralBlockDevice, &out.EphemeralBlockDevice + *out = make([]SpotInstanceRequestEphemeralBlockDeviceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GetPasswordData != nil { + in, out := &in.GetPasswordData, &out.GetPasswordData + *out = new(bool) + **out = **in + } + if in.Hibernation != nil { + in, out := &in.Hibernation, &out.Hibernation + *out = new(bool) + **out = **in + } + if in.HostID != nil { + in, out := &in.HostID, &out.HostID + *out = new(string) + **out = **in + } + if in.HostResourceGroupArn != nil { + in, out := &in.HostResourceGroupArn, &out.HostResourceGroupArn + *out = new(string) + **out = **in + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(string) + **out = **in + } + if in.IPv6AddressCount != nil { + in, out := &in.IPv6AddressCount, &out.IPv6AddressCount + *out = new(float64) + **out = **in + } + if in.IPv6Addresses != nil { + in, out := &in.IPv6Addresses, &out.IPv6Addresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceInitiatedShutdownBehavior != nil { + in, out := &in.InstanceInitiatedShutdownBehavior, &out.InstanceInitiatedShutdownBehavior + *out = new(string) + **out = **in + } + if in.InstanceInterruptionBehavior != nil { + in, out := &in.InstanceInterruptionBehavior, &out.InstanceInterruptionBehavior + *out = new(string) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.LaunchGroup != nil { + in, out := &in.LaunchGroup, &out.LaunchGroup + *out = new(string) + **out = **in + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(SpotInstanceRequestLaunchTemplateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceOptions != nil { + in, out := &in.MaintenanceOptions, &out.MaintenanceOptions + *out = new(SpotInstanceRequestMaintenanceOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = new(SpotInstanceRequestMetadataOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(bool) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]SpotInstanceRequestNetworkInterfaceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementGroup != nil { + in, out := &in.PlacementGroup, &out.PlacementGroup + *out = new(string) + **out = **in + } + if in.PlacementPartitionNumber != nil { + in, out := &in.PlacementPartitionNumber, &out.PlacementPartitionNumber + *out = new(float64) + **out = **in + } + if in.PrivateDNSNameOptions != nil { + in, out := &in.PrivateDNSNameOptions, &out.PrivateDNSNameOptions + *out = new(SpotInstanceRequestPrivateDNSNameOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PrivateIP != nil { + in, out := &in.PrivateIP, &out.PrivateIP + *out = new(string) + **out = **in + } + if in.RootBlockDevice != nil { + in, out := &in.RootBlockDevice, &out.RootBlockDevice + *out = new(SpotInstanceRequestRootBlockDeviceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecondaryPrivateIps != nil { + in, out := &in.SecondaryPrivateIps, &out.SecondaryPrivateIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SourceDestCheck != nil { + in, out := &in.SourceDestCheck, &out.SourceDestCheck + *out = new(bool) + **out = **in + } + if in.SpotPrice != nil { + in, out := &in.SpotPrice, &out.SpotPrice + *out = new(string) + **out = **in + } + if in.SpotType != nil { + in, out := &in.SpotType, &out.SpotType + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tenancy != nil { + in, out := &in.Tenancy, &out.Tenancy + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.UserDataBase64 != nil { + in, out := &in.UserDataBase64, &out.UserDataBase64 + *out = new(string) + **out = **in + } + if in.UserDataReplaceOnChange != nil { + in, out := &in.UserDataReplaceOnChange, &out.UserDataReplaceOnChange + *out = new(bool) + **out = **in + } + if in.VPCSecurityGroupIDRefs != nil { + in, out := &in.VPCSecurityGroupIDRefs, &out.VPCSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCSecurityGroupIDSelector != nil { + in, out := &in.VPCSecurityGroupIDSelector, &out.VPCSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ValidFrom != nil { + in, out := &in.ValidFrom, &out.ValidFrom + *out = new(string) + **out = **in + } + if in.ValidUntil != nil { + in, out := &in.ValidUntil, &out.ValidUntil + *out = new(string) + **out = **in + } + if in.VolumeTags != nil { + in, out := &in.VolumeTags, &out.VolumeTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WaitForFulfillment != nil { + in, out := &in.WaitForFulfillment, &out.WaitForFulfillment + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestInitParameters. +func (in *SpotInstanceRequestInitParameters) DeepCopy() *SpotInstanceRequestInitParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestLaunchTemplateInitParameters) DeepCopyInto(out *SpotInstanceRequestLaunchTemplateInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestLaunchTemplateInitParameters. +func (in *SpotInstanceRequestLaunchTemplateInitParameters) DeepCopy() *SpotInstanceRequestLaunchTemplateInitParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestLaunchTemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestLaunchTemplateObservation) DeepCopyInto(out *SpotInstanceRequestLaunchTemplateObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestLaunchTemplateObservation. +func (in *SpotInstanceRequestLaunchTemplateObservation) DeepCopy() *SpotInstanceRequestLaunchTemplateObservation { + if in == nil { + return nil + } + out := new(SpotInstanceRequestLaunchTemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestLaunchTemplateParameters) DeepCopyInto(out *SpotInstanceRequestLaunchTemplateParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestLaunchTemplateParameters. +func (in *SpotInstanceRequestLaunchTemplateParameters) DeepCopy() *SpotInstanceRequestLaunchTemplateParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestLaunchTemplateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestList) DeepCopyInto(out *SpotInstanceRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SpotInstanceRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestList. +func (in *SpotInstanceRequestList) DeepCopy() *SpotInstanceRequestList { + if in == nil { + return nil + } + out := new(SpotInstanceRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpotInstanceRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestMaintenanceOptionsInitParameters) DeepCopyInto(out *SpotInstanceRequestMaintenanceOptionsInitParameters) { + *out = *in + if in.AutoRecovery != nil { + in, out := &in.AutoRecovery, &out.AutoRecovery + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestMaintenanceOptionsInitParameters. +func (in *SpotInstanceRequestMaintenanceOptionsInitParameters) DeepCopy() *SpotInstanceRequestMaintenanceOptionsInitParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestMaintenanceOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestMaintenanceOptionsObservation) DeepCopyInto(out *SpotInstanceRequestMaintenanceOptionsObservation) { + *out = *in + if in.AutoRecovery != nil { + in, out := &in.AutoRecovery, &out.AutoRecovery + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestMaintenanceOptionsObservation. +func (in *SpotInstanceRequestMaintenanceOptionsObservation) DeepCopy() *SpotInstanceRequestMaintenanceOptionsObservation { + if in == nil { + return nil + } + out := new(SpotInstanceRequestMaintenanceOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestMaintenanceOptionsParameters) DeepCopyInto(out *SpotInstanceRequestMaintenanceOptionsParameters) { + *out = *in + if in.AutoRecovery != nil { + in, out := &in.AutoRecovery, &out.AutoRecovery + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestMaintenanceOptionsParameters. +func (in *SpotInstanceRequestMaintenanceOptionsParameters) DeepCopy() *SpotInstanceRequestMaintenanceOptionsParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestMaintenanceOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestMetadataOptionsInitParameters) DeepCopyInto(out *SpotInstanceRequestMetadataOptionsInitParameters) { + *out = *in + if in.HTTPEndpoint != nil { + in, out := &in.HTTPEndpoint, &out.HTTPEndpoint + *out = new(string) + **out = **in + } + if in.HTTPProtocolIPv6 != nil { + in, out := &in.HTTPProtocolIPv6, &out.HTTPProtocolIPv6 + *out = new(string) + **out = **in + } + if in.HTTPPutResponseHopLimit != nil { + in, out := &in.HTTPPutResponseHopLimit, &out.HTTPPutResponseHopLimit + *out = new(float64) + **out = **in + } + if in.HTTPTokens != nil { + in, out := &in.HTTPTokens, &out.HTTPTokens + *out = new(string) + **out = **in + } + if in.InstanceMetadataTags != nil { + in, out := &in.InstanceMetadataTags, &out.InstanceMetadataTags + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestMetadataOptionsInitParameters. +func (in *SpotInstanceRequestMetadataOptionsInitParameters) DeepCopy() *SpotInstanceRequestMetadataOptionsInitParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestMetadataOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestMetadataOptionsObservation) DeepCopyInto(out *SpotInstanceRequestMetadataOptionsObservation) { + *out = *in + if in.HTTPEndpoint != nil { + in, out := &in.HTTPEndpoint, &out.HTTPEndpoint + *out = new(string) + **out = **in + } + if in.HTTPProtocolIPv6 != nil { + in, out := &in.HTTPProtocolIPv6, &out.HTTPProtocolIPv6 + *out = new(string) + **out = **in + } + if in.HTTPPutResponseHopLimit != nil { + in, out := &in.HTTPPutResponseHopLimit, &out.HTTPPutResponseHopLimit + *out = new(float64) + **out = **in + } + if in.HTTPTokens != nil { + in, out := &in.HTTPTokens, &out.HTTPTokens + *out = new(string) + **out = **in + } + if in.InstanceMetadataTags != nil { + in, out := &in.InstanceMetadataTags, &out.InstanceMetadataTags + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestMetadataOptionsObservation. +func (in *SpotInstanceRequestMetadataOptionsObservation) DeepCopy() *SpotInstanceRequestMetadataOptionsObservation { + if in == nil { + return nil + } + out := new(SpotInstanceRequestMetadataOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestMetadataOptionsParameters) DeepCopyInto(out *SpotInstanceRequestMetadataOptionsParameters) { + *out = *in + if in.HTTPEndpoint != nil { + in, out := &in.HTTPEndpoint, &out.HTTPEndpoint + *out = new(string) + **out = **in + } + if in.HTTPProtocolIPv6 != nil { + in, out := &in.HTTPProtocolIPv6, &out.HTTPProtocolIPv6 + *out = new(string) + **out = **in + } + if in.HTTPPutResponseHopLimit != nil { + in, out := &in.HTTPPutResponseHopLimit, &out.HTTPPutResponseHopLimit + *out = new(float64) + **out = **in + } + if in.HTTPTokens != nil { + in, out := &in.HTTPTokens, &out.HTTPTokens + *out = new(string) + **out = **in + } + if in.InstanceMetadataTags != nil { + in, out := &in.InstanceMetadataTags, &out.InstanceMetadataTags + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestMetadataOptionsParameters. +func (in *SpotInstanceRequestMetadataOptionsParameters) DeepCopy() *SpotInstanceRequestMetadataOptionsParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestMetadataOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestNetworkInterfaceInitParameters) DeepCopyInto(out *SpotInstanceRequestNetworkInterfaceInitParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceIndex != nil { + in, out := &in.DeviceIndex, &out.DeviceIndex + *out = new(float64) + **out = **in + } + if in.NetworkCardIndex != nil { + in, out := &in.NetworkCardIndex, &out.NetworkCardIndex + *out = new(float64) + **out = **in + } + if in.NetworkInterfaceID != nil { + in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestNetworkInterfaceInitParameters. +func (in *SpotInstanceRequestNetworkInterfaceInitParameters) DeepCopy() *SpotInstanceRequestNetworkInterfaceInitParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestNetworkInterfaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestNetworkInterfaceObservation) DeepCopyInto(out *SpotInstanceRequestNetworkInterfaceObservation) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceIndex != nil { + in, out := &in.DeviceIndex, &out.DeviceIndex + *out = new(float64) + **out = **in + } + if in.NetworkCardIndex != nil { + in, out := &in.NetworkCardIndex, &out.NetworkCardIndex + *out = new(float64) + **out = **in + } + if in.NetworkInterfaceID != nil { + in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestNetworkInterfaceObservation. +func (in *SpotInstanceRequestNetworkInterfaceObservation) DeepCopy() *SpotInstanceRequestNetworkInterfaceObservation { + if in == nil { + return nil + } + out := new(SpotInstanceRequestNetworkInterfaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestNetworkInterfaceParameters) DeepCopyInto(out *SpotInstanceRequestNetworkInterfaceParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceIndex != nil { + in, out := &in.DeviceIndex, &out.DeviceIndex + *out = new(float64) + **out = **in + } + if in.NetworkCardIndex != nil { + in, out := &in.NetworkCardIndex, &out.NetworkCardIndex + *out = new(float64) + **out = **in + } + if in.NetworkInterfaceID != nil { + in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestNetworkInterfaceParameters. +func (in *SpotInstanceRequestNetworkInterfaceParameters) DeepCopy() *SpotInstanceRequestNetworkInterfaceParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestNetworkInterfaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestObservation) DeepCopyInto(out *SpotInstanceRequestObservation) { + *out = *in + if in.AMI != nil { + in, out := &in.AMI, &out.AMI + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AssociatePublicIPAddress != nil { + in, out := &in.AssociatePublicIPAddress, &out.AssociatePublicIPAddress + *out = new(bool) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.BlockDurationMinutes != nil { + in, out := &in.BlockDurationMinutes, &out.BlockDurationMinutes + *out = new(float64) + **out = **in + } + if in.CPUCoreCount != nil { + in, out := &in.CPUCoreCount, &out.CPUCoreCount + *out = new(float64) + **out = **in + } + if in.CPUOptions != nil { + in, out := &in.CPUOptions, &out.CPUOptions + *out = new(SpotInstanceRequestCPUOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.CPUThreadsPerCore != nil { + in, out := &in.CPUThreadsPerCore, &out.CPUThreadsPerCore + *out = new(float64) + **out = **in + } + if in.CapacityReservationSpecification != nil { + in, out := &in.CapacityReservationSpecification, &out.CapacityReservationSpecification + *out = new(SpotInstanceRequestCapacityReservationSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.CreditSpecification != nil { + in, out := &in.CreditSpecification, &out.CreditSpecification + *out = new(SpotInstanceRequestCreditSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.DisableAPIStop != nil { + in, out := &in.DisableAPIStop, &out.DisableAPIStop + *out = new(bool) + **out = **in + } + if in.DisableAPITermination != nil { + in, out := &in.DisableAPITermination, &out.DisableAPITermination + *out = new(bool) + **out = **in + } + if in.EBSBlockDevice != nil { + in, out := &in.EBSBlockDevice, &out.EBSBlockDevice + *out = make([]SpotInstanceRequestEBSBlockDeviceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EBSOptimized != nil { + in, out := &in.EBSOptimized, &out.EBSOptimized + *out = new(bool) + **out = **in + } + if in.EnclaveOptions != nil { + in, out := &in.EnclaveOptions, &out.EnclaveOptions + *out = new(SpotInstanceRequestEnclaveOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.EphemeralBlockDevice != nil { + in, out := &in.EphemeralBlockDevice, &out.EphemeralBlockDevice + *out = make([]SpotInstanceRequestEphemeralBlockDeviceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GetPasswordData != nil { + in, out := &in.GetPasswordData, &out.GetPasswordData + *out = new(bool) + **out = **in + } + if in.Hibernation != nil { + in, out := &in.Hibernation, &out.Hibernation + *out = new(bool) + **out = **in + } + if in.HostID != nil { + in, out := &in.HostID, &out.HostID + *out = new(string) + **out = **in + } + if in.HostResourceGroupArn != nil { + in, out := &in.HostResourceGroupArn, &out.HostResourceGroupArn + *out = new(string) + **out = **in + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPv6AddressCount != nil { + in, out := &in.IPv6AddressCount, &out.IPv6AddressCount + *out = new(float64) + **out = **in + } + if in.IPv6Addresses != nil { + in, out := &in.IPv6Addresses, &out.IPv6Addresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceInitiatedShutdownBehavior != nil { + in, out := &in.InstanceInitiatedShutdownBehavior, &out.InstanceInitiatedShutdownBehavior + *out = new(string) + **out = **in + } + if in.InstanceInterruptionBehavior != nil { + in, out := &in.InstanceInterruptionBehavior, &out.InstanceInterruptionBehavior + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.LaunchGroup != nil { + in, out := &in.LaunchGroup, &out.LaunchGroup + *out = new(string) + **out = **in + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(SpotInstanceRequestLaunchTemplateObservation) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceOptions != nil { + in, out := &in.MaintenanceOptions, &out.MaintenanceOptions + *out = new(SpotInstanceRequestMaintenanceOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = new(SpotInstanceRequestMetadataOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(bool) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]SpotInstanceRequestNetworkInterfaceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OutpostArn != nil { + in, out := &in.OutpostArn, &out.OutpostArn + *out = new(string) + **out = **in + } + if in.PasswordData != nil { + in, out := &in.PasswordData, &out.PasswordData + *out = new(string) + **out = **in + } + if in.PlacementGroup != nil { + in, out := &in.PlacementGroup, &out.PlacementGroup + *out = new(string) + **out = **in + } + if in.PlacementPartitionNumber != nil { + in, out := &in.PlacementPartitionNumber, &out.PlacementPartitionNumber + *out = new(float64) + **out = **in + } + if in.PrimaryNetworkInterfaceID != nil { + in, out := &in.PrimaryNetworkInterfaceID, &out.PrimaryNetworkInterfaceID + *out = new(string) + **out = **in + } + if in.PrivateDNS != nil { + in, out := &in.PrivateDNS, &out.PrivateDNS + *out = new(string) + **out = **in + } + if in.PrivateDNSNameOptions != nil { + in, out := &in.PrivateDNSNameOptions, &out.PrivateDNSNameOptions + *out = new(SpotInstanceRequestPrivateDNSNameOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.PrivateIP != nil { + in, out := &in.PrivateIP, &out.PrivateIP + *out = new(string) + **out = **in + } + if in.PublicDNS != nil { + in, out := &in.PublicDNS, &out.PublicDNS + *out = new(string) + **out = **in + } + if in.PublicIP != nil { + in, out := &in.PublicIP, &out.PublicIP + *out = new(string) + **out = **in + } + if in.RootBlockDevice != nil { + in, out := &in.RootBlockDevice, &out.RootBlockDevice + *out = new(SpotInstanceRequestRootBlockDeviceObservation) + (*in).DeepCopyInto(*out) + } + if in.SecondaryPrivateIps != nil { + in, out := &in.SecondaryPrivateIps, &out.SecondaryPrivateIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SourceDestCheck != nil { + in, out := &in.SourceDestCheck, &out.SourceDestCheck + *out = new(bool) + **out = **in + } + if in.SpotBidStatus != nil { + in, out := &in.SpotBidStatus, &out.SpotBidStatus + *out = new(string) + **out = **in + } + if in.SpotInstanceID != nil { + in, out := &in.SpotInstanceID, &out.SpotInstanceID + *out = new(string) + **out = **in + } + if in.SpotPrice != nil { + in, out := &in.SpotPrice, &out.SpotPrice + *out = new(string) + **out = **in + } + if in.SpotRequestState != nil { + in, out := &in.SpotRequestState, &out.SpotRequestState + *out = new(string) + **out = **in + } + if in.SpotType != nil { + in, out := &in.SpotType, &out.SpotType + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tenancy != nil { + in, out := &in.Tenancy, &out.Tenancy + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.UserDataBase64 != nil { + in, out := &in.UserDataBase64, &out.UserDataBase64 + *out = new(string) + **out = **in + } + if in.UserDataReplaceOnChange != nil { + in, out := &in.UserDataReplaceOnChange, &out.UserDataReplaceOnChange + *out = new(bool) + **out = **in + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ValidFrom != nil { + in, out := &in.ValidFrom, &out.ValidFrom + *out = new(string) + **out = **in + } + if in.ValidUntil != nil { + in, out := &in.ValidUntil, &out.ValidUntil + *out = new(string) + **out = **in + } + if in.VolumeTags != nil { + in, out := &in.VolumeTags, &out.VolumeTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WaitForFulfillment != nil { + in, out := &in.WaitForFulfillment, &out.WaitForFulfillment + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestObservation. +func (in *SpotInstanceRequestObservation) DeepCopy() *SpotInstanceRequestObservation { + if in == nil { + return nil + } + out := new(SpotInstanceRequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestParameters) DeepCopyInto(out *SpotInstanceRequestParameters) { + *out = *in + if in.AMI != nil { + in, out := &in.AMI, &out.AMI + *out = new(string) + **out = **in + } + if in.AssociatePublicIPAddress != nil { + in, out := &in.AssociatePublicIPAddress, &out.AssociatePublicIPAddress + *out = new(bool) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.BlockDurationMinutes != nil { + in, out := &in.BlockDurationMinutes, &out.BlockDurationMinutes + *out = new(float64) + **out = **in + } + if in.CPUCoreCount != nil { + in, out := &in.CPUCoreCount, &out.CPUCoreCount + *out = new(float64) + **out = **in + } + if in.CPUOptions != nil { + in, out := &in.CPUOptions, &out.CPUOptions + *out = new(SpotInstanceRequestCPUOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.CPUThreadsPerCore != nil { + in, out := &in.CPUThreadsPerCore, &out.CPUThreadsPerCore + *out = new(float64) + **out = **in + } + if in.CapacityReservationSpecification != nil { + in, out := &in.CapacityReservationSpecification, &out.CapacityReservationSpecification + *out = new(SpotInstanceRequestCapacityReservationSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.CreditSpecification != nil { + in, out := &in.CreditSpecification, &out.CreditSpecification + *out = new(SpotInstanceRequestCreditSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.DisableAPIStop != nil { + in, out := &in.DisableAPIStop, &out.DisableAPIStop + *out = new(bool) + **out = **in + } + if in.DisableAPITermination != nil { + in, out := &in.DisableAPITermination, &out.DisableAPITermination + *out = new(bool) + **out = **in + } + if in.EBSBlockDevice != nil { + in, out := &in.EBSBlockDevice, &out.EBSBlockDevice + *out = make([]SpotInstanceRequestEBSBlockDeviceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EBSOptimized != nil { + in, out := &in.EBSOptimized, &out.EBSOptimized + *out = new(bool) + **out = **in + } + if in.EnclaveOptions != nil { + in, out := &in.EnclaveOptions, &out.EnclaveOptions + *out = new(SpotInstanceRequestEnclaveOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.EphemeralBlockDevice != nil { + in, out := &in.EphemeralBlockDevice, &out.EphemeralBlockDevice + *out = make([]SpotInstanceRequestEphemeralBlockDeviceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GetPasswordData != nil { + in, out := &in.GetPasswordData, &out.GetPasswordData + *out = new(bool) + **out = **in + } + if in.Hibernation != nil { + in, out := &in.Hibernation, &out.Hibernation + *out = new(bool) + **out = **in + } + if in.HostID != nil { + in, out := &in.HostID, &out.HostID + *out = new(string) + **out = **in + } + if in.HostResourceGroupArn != nil { + in, out := &in.HostResourceGroupArn, &out.HostResourceGroupArn + *out = new(string) + **out = **in + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(string) + **out = **in + } + if in.IPv6AddressCount != nil { + in, out := &in.IPv6AddressCount, &out.IPv6AddressCount + *out = new(float64) + **out = **in + } + if in.IPv6Addresses != nil { + in, out := &in.IPv6Addresses, &out.IPv6Addresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstanceInitiatedShutdownBehavior != nil { + in, out := &in.InstanceInitiatedShutdownBehavior, &out.InstanceInitiatedShutdownBehavior + *out = new(string) + **out = **in + } + if in.InstanceInterruptionBehavior != nil { + in, out := &in.InstanceInterruptionBehavior, &out.InstanceInterruptionBehavior + *out = new(string) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.LaunchGroup != nil { + in, out := &in.LaunchGroup, &out.LaunchGroup + *out = new(string) + **out = **in + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(SpotInstanceRequestLaunchTemplateParameters) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceOptions != nil { + in, out := &in.MaintenanceOptions, &out.MaintenanceOptions + *out = new(SpotInstanceRequestMaintenanceOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.MetadataOptions != nil { + in, out := &in.MetadataOptions, &out.MetadataOptions + *out = new(SpotInstanceRequestMetadataOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(bool) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]SpotInstanceRequestNetworkInterfaceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementGroup != nil { + in, out := &in.PlacementGroup, &out.PlacementGroup + *out = new(string) + **out = **in + } + if in.PlacementPartitionNumber != nil { + in, out := &in.PlacementPartitionNumber, &out.PlacementPartitionNumber + *out = new(float64) + **out = **in + } + if in.PrivateDNSNameOptions != nil { + in, out := &in.PrivateDNSNameOptions, &out.PrivateDNSNameOptions + *out = new(SpotInstanceRequestPrivateDNSNameOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.PrivateIP != nil { + in, out := &in.PrivateIP, &out.PrivateIP + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RootBlockDevice != nil { + in, out := &in.RootBlockDevice, &out.RootBlockDevice + *out = new(SpotInstanceRequestRootBlockDeviceParameters) + (*in).DeepCopyInto(*out) + } + if in.SecondaryPrivateIps != nil { + in, out := &in.SecondaryPrivateIps, &out.SecondaryPrivateIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SourceDestCheck != nil { + in, out := &in.SourceDestCheck, &out.SourceDestCheck + *out = new(bool) + **out = **in + } + if in.SpotPrice != nil { + in, out := &in.SpotPrice, &out.SpotPrice + *out = new(string) + **out = **in + } + if in.SpotType != nil { + in, out := &in.SpotType, &out.SpotType + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tenancy != nil { + in, out := &in.Tenancy, &out.Tenancy + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.UserDataBase64 != nil { + in, out := &in.UserDataBase64, &out.UserDataBase64 + *out = new(string) + **out = **in + } + if in.UserDataReplaceOnChange != nil { + in, out := &in.UserDataReplaceOnChange, &out.UserDataReplaceOnChange + *out = new(bool) + **out = **in + } + if in.VPCSecurityGroupIDRefs != nil { + in, out := &in.VPCSecurityGroupIDRefs, &out.VPCSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCSecurityGroupIDSelector != nil { + in, out := &in.VPCSecurityGroupIDSelector, &out.VPCSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ValidFrom != nil { + in, out := &in.ValidFrom, &out.ValidFrom + *out = new(string) + **out = **in + } + if in.ValidUntil != nil { + in, out := &in.ValidUntil, &out.ValidUntil + *out = new(string) + **out = **in + } + if in.VolumeTags != nil { + in, out := &in.VolumeTags, &out.VolumeTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WaitForFulfillment != nil { + in, out := &in.WaitForFulfillment, &out.WaitForFulfillment + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestParameters. +func (in *SpotInstanceRequestParameters) DeepCopy() *SpotInstanceRequestParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestPrivateDNSNameOptionsInitParameters) DeepCopyInto(out *SpotInstanceRequestPrivateDNSNameOptionsInitParameters) { + *out = *in + if in.EnableResourceNameDNSARecord != nil { + in, out := &in.EnableResourceNameDNSARecord, &out.EnableResourceNameDNSARecord + *out = new(bool) + **out = **in + } + if in.EnableResourceNameDNSAaaaRecord != nil { + in, out := &in.EnableResourceNameDNSAaaaRecord, &out.EnableResourceNameDNSAaaaRecord + *out = new(bool) + **out = **in + } + if in.HostnameType != nil { + in, out := &in.HostnameType, &out.HostnameType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestPrivateDNSNameOptionsInitParameters. +func (in *SpotInstanceRequestPrivateDNSNameOptionsInitParameters) DeepCopy() *SpotInstanceRequestPrivateDNSNameOptionsInitParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestPrivateDNSNameOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestPrivateDNSNameOptionsObservation) DeepCopyInto(out *SpotInstanceRequestPrivateDNSNameOptionsObservation) { + *out = *in + if in.EnableResourceNameDNSARecord != nil { + in, out := &in.EnableResourceNameDNSARecord, &out.EnableResourceNameDNSARecord + *out = new(bool) + **out = **in + } + if in.EnableResourceNameDNSAaaaRecord != nil { + in, out := &in.EnableResourceNameDNSAaaaRecord, &out.EnableResourceNameDNSAaaaRecord + *out = new(bool) + **out = **in + } + if in.HostnameType != nil { + in, out := &in.HostnameType, &out.HostnameType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestPrivateDNSNameOptionsObservation. +func (in *SpotInstanceRequestPrivateDNSNameOptionsObservation) DeepCopy() *SpotInstanceRequestPrivateDNSNameOptionsObservation { + if in == nil { + return nil + } + out := new(SpotInstanceRequestPrivateDNSNameOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestPrivateDNSNameOptionsParameters) DeepCopyInto(out *SpotInstanceRequestPrivateDNSNameOptionsParameters) { + *out = *in + if in.EnableResourceNameDNSARecord != nil { + in, out := &in.EnableResourceNameDNSARecord, &out.EnableResourceNameDNSARecord + *out = new(bool) + **out = **in + } + if in.EnableResourceNameDNSAaaaRecord != nil { + in, out := &in.EnableResourceNameDNSAaaaRecord, &out.EnableResourceNameDNSAaaaRecord + *out = new(bool) + **out = **in + } + if in.HostnameType != nil { + in, out := &in.HostnameType, &out.HostnameType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestPrivateDNSNameOptionsParameters. +func (in *SpotInstanceRequestPrivateDNSNameOptionsParameters) DeepCopy() *SpotInstanceRequestPrivateDNSNameOptionsParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestPrivateDNSNameOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestRootBlockDeviceInitParameters) DeepCopyInto(out *SpotInstanceRequestRootBlockDeviceInitParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestRootBlockDeviceInitParameters. +func (in *SpotInstanceRequestRootBlockDeviceInitParameters) DeepCopy() *SpotInstanceRequestRootBlockDeviceInitParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestRootBlockDeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestRootBlockDeviceObservation) DeepCopyInto(out *SpotInstanceRequestRootBlockDeviceObservation) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeID != nil { + in, out := &in.VolumeID, &out.VolumeID + *out = new(string) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestRootBlockDeviceObservation. +func (in *SpotInstanceRequestRootBlockDeviceObservation) DeepCopy() *SpotInstanceRequestRootBlockDeviceObservation { + if in == nil { + return nil + } + out := new(SpotInstanceRequestRootBlockDeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestRootBlockDeviceParameters) DeepCopyInto(out *SpotInstanceRequestRootBlockDeviceParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestRootBlockDeviceParameters. +func (in *SpotInstanceRequestRootBlockDeviceParameters) DeepCopy() *SpotInstanceRequestRootBlockDeviceParameters { + if in == nil { + return nil + } + out := new(SpotInstanceRequestRootBlockDeviceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestSpec) DeepCopyInto(out *SpotInstanceRequestSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestSpec. +func (in *SpotInstanceRequestSpec) DeepCopy() *SpotInstanceRequestSpec { + if in == nil { + return nil + } + out := new(SpotInstanceRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotInstanceRequestStatus) DeepCopyInto(out *SpotInstanceRequestStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotInstanceRequestStatus. +func (in *SpotInstanceRequestStatus) DeepCopy() *SpotInstanceRequestStatus { + if in == nil { + return nil + } + out := new(SpotInstanceRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotMaintenanceStrategiesInitParameters) DeepCopyInto(out *SpotMaintenanceStrategiesInitParameters) { + *out = *in + if in.CapacityRebalance != nil { + in, out := &in.CapacityRebalance, &out.CapacityRebalance + *out = new(CapacityRebalanceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotMaintenanceStrategiesInitParameters. +func (in *SpotMaintenanceStrategiesInitParameters) DeepCopy() *SpotMaintenanceStrategiesInitParameters { + if in == nil { + return nil + } + out := new(SpotMaintenanceStrategiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotMaintenanceStrategiesObservation) DeepCopyInto(out *SpotMaintenanceStrategiesObservation) { + *out = *in + if in.CapacityRebalance != nil { + in, out := &in.CapacityRebalance, &out.CapacityRebalance + *out = new(CapacityRebalanceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotMaintenanceStrategiesObservation. +func (in *SpotMaintenanceStrategiesObservation) DeepCopy() *SpotMaintenanceStrategiesObservation { + if in == nil { + return nil + } + out := new(SpotMaintenanceStrategiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotMaintenanceStrategiesParameters) DeepCopyInto(out *SpotMaintenanceStrategiesParameters) { + *out = *in + if in.CapacityRebalance != nil { + in, out := &in.CapacityRebalance, &out.CapacityRebalance + *out = new(CapacityRebalanceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotMaintenanceStrategiesParameters. +func (in *SpotMaintenanceStrategiesParameters) DeepCopy() *SpotMaintenanceStrategiesParameters { + if in == nil { + return nil + } + out := new(SpotMaintenanceStrategiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotOptionsInitParameters) DeepCopyInto(out *SpotOptionsInitParameters) { + *out = *in + if in.InstanceInterruptionBehavior != nil { + in, out := &in.InstanceInterruptionBehavior, &out.InstanceInterruptionBehavior + *out = new(string) + **out = **in + } + if in.MaxPrice != nil { + in, out := &in.MaxPrice, &out.MaxPrice + *out = new(string) + **out = **in + } + if in.SpotInstanceType != nil { + in, out := &in.SpotInstanceType, &out.SpotInstanceType + *out = new(string) + **out = **in + } + if in.ValidUntil != nil { + in, out := &in.ValidUntil, &out.ValidUntil + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotOptionsInitParameters. +func (in *SpotOptionsInitParameters) DeepCopy() *SpotOptionsInitParameters { + if in == nil { + return nil + } + out := new(SpotOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotOptionsObservation) DeepCopyInto(out *SpotOptionsObservation) { + *out = *in + if in.InstanceInterruptionBehavior != nil { + in, out := &in.InstanceInterruptionBehavior, &out.InstanceInterruptionBehavior + *out = new(string) + **out = **in + } + if in.MaxPrice != nil { + in, out := &in.MaxPrice, &out.MaxPrice + *out = new(string) + **out = **in + } + if in.SpotInstanceType != nil { + in, out := &in.SpotInstanceType, &out.SpotInstanceType + *out = new(string) + **out = **in + } + if in.ValidUntil != nil { + in, out := &in.ValidUntil, &out.ValidUntil + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotOptionsObservation. +func (in *SpotOptionsObservation) DeepCopy() *SpotOptionsObservation { + if in == nil { + return nil + } + out := new(SpotOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotOptionsParameters) DeepCopyInto(out *SpotOptionsParameters) { + *out = *in + if in.InstanceInterruptionBehavior != nil { + in, out := &in.InstanceInterruptionBehavior, &out.InstanceInterruptionBehavior + *out = new(string) + **out = **in + } + if in.MaxPrice != nil { + in, out := &in.MaxPrice, &out.MaxPrice + *out = new(string) + **out = **in + } + if in.SpotInstanceType != nil { + in, out := &in.SpotInstanceType, &out.SpotInstanceType + *out = new(string) + **out = **in + } + if in.ValidUntil != nil { + in, out := &in.ValidUntil, &out.ValidUntil + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotOptionsParameters. +func (in *SpotOptionsParameters) DeepCopy() *SpotOptionsParameters { + if in == nil { + return nil + } + out := new(SpotOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagSpecificationsInitParameters) DeepCopyInto(out *TagSpecificationsInitParameters) { + *out = *in + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagSpecificationsInitParameters. +func (in *TagSpecificationsInitParameters) DeepCopy() *TagSpecificationsInitParameters { + if in == nil { + return nil + } + out := new(TagSpecificationsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagSpecificationsObservation) DeepCopyInto(out *TagSpecificationsObservation) { + *out = *in + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagSpecificationsObservation. +func (in *TagSpecificationsObservation) DeepCopy() *TagSpecificationsObservation { + if in == nil { + return nil + } + out := new(TagSpecificationsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagSpecificationsParameters) DeepCopyInto(out *TagSpecificationsParameters) { + *out = *in + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagSpecificationsParameters. +func (in *TagSpecificationsParameters) DeepCopy() *TagSpecificationsParameters { + if in == nil { + return nil + } + out := new(TagSpecificationsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TotalLocalStorageGbInitParameters) DeepCopyInto(out *TotalLocalStorageGbInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TotalLocalStorageGbInitParameters. +func (in *TotalLocalStorageGbInitParameters) DeepCopy() *TotalLocalStorageGbInitParameters { + if in == nil { + return nil + } + out := new(TotalLocalStorageGbInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TotalLocalStorageGbObservation) DeepCopyInto(out *TotalLocalStorageGbObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TotalLocalStorageGbObservation. +func (in *TotalLocalStorageGbObservation) DeepCopy() *TotalLocalStorageGbObservation { + if in == nil { + return nil + } + out := new(TotalLocalStorageGbObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TotalLocalStorageGbParameters) DeepCopyInto(out *TotalLocalStorageGbParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TotalLocalStorageGbParameters. +func (in *TotalLocalStorageGbParameters) DeepCopy() *TotalLocalStorageGbParameters { + if in == nil { + return nil + } + out := new(TotalLocalStorageGbParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficMirrorFilterRule) DeepCopyInto(out *TrafficMirrorFilterRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficMirrorFilterRule. +func (in *TrafficMirrorFilterRule) DeepCopy() *TrafficMirrorFilterRule { + if in == nil { + return nil + } + out := new(TrafficMirrorFilterRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TrafficMirrorFilterRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficMirrorFilterRuleInitParameters) DeepCopyInto(out *TrafficMirrorFilterRuleInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DestinationCidrBlock != nil { + in, out := &in.DestinationCidrBlock, &out.DestinationCidrBlock + *out = new(string) + **out = **in + } + if in.DestinationPortRange != nil { + in, out := &in.DestinationPortRange, &out.DestinationPortRange + *out = new(DestinationPortRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(float64) + **out = **in + } + if in.RuleAction != nil { + in, out := &in.RuleAction, &out.RuleAction + *out = new(string) + **out = **in + } + if in.RuleNumber != nil { + in, out := &in.RuleNumber, &out.RuleNumber + *out = new(float64) + **out = **in + } + if in.SourceCidrBlock != nil { + in, out := &in.SourceCidrBlock, &out.SourceCidrBlock + *out = new(string) + **out = **in + } + if in.SourcePortRange != nil { + in, out := &in.SourcePortRange, &out.SourcePortRange + *out = new(SourcePortRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TrafficDirection != nil { + in, out := &in.TrafficDirection, &out.TrafficDirection + *out = new(string) + **out = **in + } + if in.TrafficMirrorFilterID != nil { + in, out := &in.TrafficMirrorFilterID, &out.TrafficMirrorFilterID + *out = new(string) + **out = **in + } + if in.TrafficMirrorFilterIDRef != nil { + in, out := &in.TrafficMirrorFilterIDRef, &out.TrafficMirrorFilterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TrafficMirrorFilterIDSelector != nil { + in, out := &in.TrafficMirrorFilterIDSelector, &out.TrafficMirrorFilterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficMirrorFilterRuleInitParameters. +func (in *TrafficMirrorFilterRuleInitParameters) DeepCopy() *TrafficMirrorFilterRuleInitParameters { + if in == nil { + return nil + } + out := new(TrafficMirrorFilterRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficMirrorFilterRuleList) DeepCopyInto(out *TrafficMirrorFilterRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TrafficMirrorFilterRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficMirrorFilterRuleList. +func (in *TrafficMirrorFilterRuleList) DeepCopy() *TrafficMirrorFilterRuleList { + if in == nil { + return nil + } + out := new(TrafficMirrorFilterRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TrafficMirrorFilterRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficMirrorFilterRuleObservation) DeepCopyInto(out *TrafficMirrorFilterRuleObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DestinationCidrBlock != nil { + in, out := &in.DestinationCidrBlock, &out.DestinationCidrBlock + *out = new(string) + **out = **in + } + if in.DestinationPortRange != nil { + in, out := &in.DestinationPortRange, &out.DestinationPortRange + *out = new(DestinationPortRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(float64) + **out = **in + } + if in.RuleAction != nil { + in, out := &in.RuleAction, &out.RuleAction + *out = new(string) + **out = **in + } + if in.RuleNumber != nil { + in, out := &in.RuleNumber, &out.RuleNumber + *out = new(float64) + **out = **in + } + if in.SourceCidrBlock != nil { + in, out := &in.SourceCidrBlock, &out.SourceCidrBlock + *out = new(string) + **out = **in + } + if in.SourcePortRange != nil { + in, out := &in.SourcePortRange, &out.SourcePortRange + *out = new(SourcePortRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.TrafficDirection != nil { + in, out := &in.TrafficDirection, &out.TrafficDirection + *out = new(string) + **out = **in + } + if in.TrafficMirrorFilterID != nil { + in, out := &in.TrafficMirrorFilterID, &out.TrafficMirrorFilterID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficMirrorFilterRuleObservation. +func (in *TrafficMirrorFilterRuleObservation) DeepCopy() *TrafficMirrorFilterRuleObservation { + if in == nil { + return nil + } + out := new(TrafficMirrorFilterRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficMirrorFilterRuleParameters) DeepCopyInto(out *TrafficMirrorFilterRuleParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DestinationCidrBlock != nil { + in, out := &in.DestinationCidrBlock, &out.DestinationCidrBlock + *out = new(string) + **out = **in + } + if in.DestinationPortRange != nil { + in, out := &in.DestinationPortRange, &out.DestinationPortRange + *out = new(DestinationPortRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(float64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RuleAction != nil { + in, out := &in.RuleAction, &out.RuleAction + *out = new(string) + **out = **in + } + if in.RuleNumber != nil { + in, out := &in.RuleNumber, &out.RuleNumber + *out = new(float64) + **out = **in + } + if in.SourceCidrBlock != nil { + in, out := &in.SourceCidrBlock, &out.SourceCidrBlock + *out = new(string) + **out = **in + } + if in.SourcePortRange != nil { + in, out := &in.SourcePortRange, &out.SourcePortRange + *out = new(SourcePortRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.TrafficDirection != nil { + in, out := &in.TrafficDirection, &out.TrafficDirection + *out = new(string) + **out = **in + } + if in.TrafficMirrorFilterID != nil { + in, out := &in.TrafficMirrorFilterID, &out.TrafficMirrorFilterID + *out = new(string) + **out = **in + } + if in.TrafficMirrorFilterIDRef != nil { + in, out := &in.TrafficMirrorFilterIDRef, &out.TrafficMirrorFilterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TrafficMirrorFilterIDSelector != nil { + in, out := &in.TrafficMirrorFilterIDSelector, &out.TrafficMirrorFilterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficMirrorFilterRuleParameters. +func (in *TrafficMirrorFilterRuleParameters) DeepCopy() *TrafficMirrorFilterRuleParameters { + if in == nil { + return nil + } + out := new(TrafficMirrorFilterRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficMirrorFilterRuleSpec) DeepCopyInto(out *TrafficMirrorFilterRuleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficMirrorFilterRuleSpec. +func (in *TrafficMirrorFilterRuleSpec) DeepCopy() *TrafficMirrorFilterRuleSpec { + if in == nil { + return nil + } + out := new(TrafficMirrorFilterRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficMirrorFilterRuleStatus) DeepCopyInto(out *TrafficMirrorFilterRuleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficMirrorFilterRuleStatus. +func (in *TrafficMirrorFilterRuleStatus) DeepCopy() *TrafficMirrorFilterRuleStatus { + if in == nil { + return nil + } + out := new(TrafficMirrorFilterRuleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tunnel1LogOptionsInitParameters) DeepCopyInto(out *Tunnel1LogOptionsInitParameters) { + *out = *in + if in.CloudwatchLogOptions != nil { + in, out := &in.CloudwatchLogOptions, &out.CloudwatchLogOptions + *out = new(CloudwatchLogOptionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tunnel1LogOptionsInitParameters. +func (in *Tunnel1LogOptionsInitParameters) DeepCopy() *Tunnel1LogOptionsInitParameters { + if in == nil { + return nil + } + out := new(Tunnel1LogOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tunnel1LogOptionsObservation) DeepCopyInto(out *Tunnel1LogOptionsObservation) { + *out = *in + if in.CloudwatchLogOptions != nil { + in, out := &in.CloudwatchLogOptions, &out.CloudwatchLogOptions + *out = new(CloudwatchLogOptionsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tunnel1LogOptionsObservation. +func (in *Tunnel1LogOptionsObservation) DeepCopy() *Tunnel1LogOptionsObservation { + if in == nil { + return nil + } + out := new(Tunnel1LogOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tunnel1LogOptionsParameters) DeepCopyInto(out *Tunnel1LogOptionsParameters) { + *out = *in + if in.CloudwatchLogOptions != nil { + in, out := &in.CloudwatchLogOptions, &out.CloudwatchLogOptions + *out = new(CloudwatchLogOptionsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tunnel1LogOptionsParameters. +func (in *Tunnel1LogOptionsParameters) DeepCopy() *Tunnel1LogOptionsParameters { + if in == nil { + return nil + } + out := new(Tunnel1LogOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tunnel2LogOptionsCloudwatchLogOptionsInitParameters) DeepCopyInto(out *Tunnel2LogOptionsCloudwatchLogOptionsInitParameters) { + *out = *in + if in.LogEnabled != nil { + in, out := &in.LogEnabled, &out.LogEnabled + *out = new(bool) + **out = **in + } + if in.LogGroupArn != nil { + in, out := &in.LogGroupArn, &out.LogGroupArn + *out = new(string) + **out = **in + } + if in.LogOutputFormat != nil { + in, out := &in.LogOutputFormat, &out.LogOutputFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tunnel2LogOptionsCloudwatchLogOptionsInitParameters. +func (in *Tunnel2LogOptionsCloudwatchLogOptionsInitParameters) DeepCopy() *Tunnel2LogOptionsCloudwatchLogOptionsInitParameters { + if in == nil { + return nil + } + out := new(Tunnel2LogOptionsCloudwatchLogOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tunnel2LogOptionsCloudwatchLogOptionsObservation) DeepCopyInto(out *Tunnel2LogOptionsCloudwatchLogOptionsObservation) { + *out = *in + if in.LogEnabled != nil { + in, out := &in.LogEnabled, &out.LogEnabled + *out = new(bool) + **out = **in + } + if in.LogGroupArn != nil { + in, out := &in.LogGroupArn, &out.LogGroupArn + *out = new(string) + **out = **in + } + if in.LogOutputFormat != nil { + in, out := &in.LogOutputFormat, &out.LogOutputFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tunnel2LogOptionsCloudwatchLogOptionsObservation. +func (in *Tunnel2LogOptionsCloudwatchLogOptionsObservation) DeepCopy() *Tunnel2LogOptionsCloudwatchLogOptionsObservation { + if in == nil { + return nil + } + out := new(Tunnel2LogOptionsCloudwatchLogOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tunnel2LogOptionsCloudwatchLogOptionsParameters) DeepCopyInto(out *Tunnel2LogOptionsCloudwatchLogOptionsParameters) { + *out = *in + if in.LogEnabled != nil { + in, out := &in.LogEnabled, &out.LogEnabled + *out = new(bool) + **out = **in + } + if in.LogGroupArn != nil { + in, out := &in.LogGroupArn, &out.LogGroupArn + *out = new(string) + **out = **in + } + if in.LogOutputFormat != nil { + in, out := &in.LogOutputFormat, &out.LogOutputFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tunnel2LogOptionsCloudwatchLogOptionsParameters. +func (in *Tunnel2LogOptionsCloudwatchLogOptionsParameters) DeepCopy() *Tunnel2LogOptionsCloudwatchLogOptionsParameters { + if in == nil { + return nil + } + out := new(Tunnel2LogOptionsCloudwatchLogOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tunnel2LogOptionsInitParameters) DeepCopyInto(out *Tunnel2LogOptionsInitParameters) { + *out = *in + if in.CloudwatchLogOptions != nil { + in, out := &in.CloudwatchLogOptions, &out.CloudwatchLogOptions + *out = new(Tunnel2LogOptionsCloudwatchLogOptionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tunnel2LogOptionsInitParameters. +func (in *Tunnel2LogOptionsInitParameters) DeepCopy() *Tunnel2LogOptionsInitParameters { + if in == nil { + return nil + } + out := new(Tunnel2LogOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tunnel2LogOptionsObservation) DeepCopyInto(out *Tunnel2LogOptionsObservation) { + *out = *in + if in.CloudwatchLogOptions != nil { + in, out := &in.CloudwatchLogOptions, &out.CloudwatchLogOptions + *out = new(Tunnel2LogOptionsCloudwatchLogOptionsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tunnel2LogOptionsObservation. +func (in *Tunnel2LogOptionsObservation) DeepCopy() *Tunnel2LogOptionsObservation { + if in == nil { + return nil + } + out := new(Tunnel2LogOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tunnel2LogOptionsParameters) DeepCopyInto(out *Tunnel2LogOptionsParameters) { + *out = *in + if in.CloudwatchLogOptions != nil { + in, out := &in.CloudwatchLogOptions, &out.CloudwatchLogOptions + *out = new(Tunnel2LogOptionsCloudwatchLogOptionsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tunnel2LogOptionsParameters. +func (in *Tunnel2LogOptionsParameters) DeepCopy() *Tunnel2LogOptionsParameters { + if in == nil { + return nil + } + out := new(Tunnel2LogOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserBucketInitParameters) DeepCopyInto(out *UserBucketInitParameters) { + *out = *in + if in.S3Bucket != nil { + in, out := &in.S3Bucket, &out.S3Bucket + *out = new(string) + **out = **in + } + if in.S3Key != nil { + in, out := &in.S3Key, &out.S3Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserBucketInitParameters. +func (in *UserBucketInitParameters) DeepCopy() *UserBucketInitParameters { + if in == nil { + return nil + } + out := new(UserBucketInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserBucketObservation) DeepCopyInto(out *UserBucketObservation) { + *out = *in + if in.S3Bucket != nil { + in, out := &in.S3Bucket, &out.S3Bucket + *out = new(string) + **out = **in + } + if in.S3Key != nil { + in, out := &in.S3Key, &out.S3Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserBucketObservation. +func (in *UserBucketObservation) DeepCopy() *UserBucketObservation { + if in == nil { + return nil + } + out := new(UserBucketObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserBucketParameters) DeepCopyInto(out *UserBucketParameters) { + *out = *in + if in.S3Bucket != nil { + in, out := &in.S3Bucket, &out.S3Bucket + *out = new(string) + **out = **in + } + if in.S3Key != nil { + in, out := &in.S3Key, &out.S3Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserBucketParameters. +func (in *UserBucketParameters) DeepCopy() *UserBucketParameters { + if in == nil { + return nil + } + out := new(UserBucketParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCEndpoint) DeepCopyInto(out *VPCEndpoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCEndpoint. +func (in *VPCEndpoint) DeepCopy() *VPCEndpoint { + if in == nil { + return nil + } + out := new(VPCEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPCEndpoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCEndpointInitParameters) DeepCopyInto(out *VPCEndpointInitParameters) { + *out = *in + if in.AutoAccept != nil { + in, out := &in.AutoAccept, &out.AutoAccept + *out = new(bool) + **out = **in + } + if in.DNSOptions != nil { + in, out := &in.DNSOptions, &out.DNSOptions + *out = new(DNSOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } + if in.PrivateDNSEnabled != nil { + in, out := &in.PrivateDNSEnabled, &out.PrivateDNSEnabled + *out = new(bool) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.ServiceNameRef != nil { + in, out := &in.ServiceNameRef, &out.ServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceNameSelector != nil { + in, out := &in.ServiceNameSelector, &out.ServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCEndpointType != nil { + in, out := &in.VPCEndpointType, &out.VPCEndpointType + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCEndpointInitParameters. +func (in *VPCEndpointInitParameters) DeepCopy() *VPCEndpointInitParameters { + if in == nil { + return nil + } + out := new(VPCEndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCEndpointList) DeepCopyInto(out *VPCEndpointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VPCEndpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCEndpointList. +func (in *VPCEndpointList) DeepCopy() *VPCEndpointList { + if in == nil { + return nil + } + out := new(VPCEndpointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPCEndpointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCEndpointObservation) DeepCopyInto(out *VPCEndpointObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoAccept != nil { + in, out := &in.AutoAccept, &out.AutoAccept + *out = new(bool) + **out = **in + } + if in.CidrBlocks != nil { + in, out := &in.CidrBlocks, &out.CidrBlocks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DNSEntry != nil { + in, out := &in.DNSEntry, &out.DNSEntry + *out = make([]DNSEntryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DNSOptions != nil { + in, out := &in.DNSOptions, &out.DNSOptions + *out = new(DNSOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.NetworkInterfaceIds != nil { + in, out := &in.NetworkInterfaceIds, &out.NetworkInterfaceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OwnerID != nil { + in, out := &in.OwnerID, &out.OwnerID + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } + if in.PrefixListID != nil { + in, out := &in.PrefixListID, &out.PrefixListID + *out = new(string) + **out = **in + } + if in.PrivateDNSEnabled != nil { + in, out := &in.PrivateDNSEnabled, &out.PrivateDNSEnabled + *out = new(bool) + **out = **in + } + if in.RequesterManaged != nil { + in, out := &in.RequesterManaged, &out.RequesterManaged + *out = new(bool) + **out = **in + } + if in.RouteTableIds != nil { + in, out := &in.RouteTableIds, &out.RouteTableIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCEndpointType != nil { + in, out := &in.VPCEndpointType, &out.VPCEndpointType + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCEndpointObservation. +func (in *VPCEndpointObservation) DeepCopy() *VPCEndpointObservation { + if in == nil { + return nil + } + out := new(VPCEndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCEndpointParameters) DeepCopyInto(out *VPCEndpointParameters) { + *out = *in + if in.AutoAccept != nil { + in, out := &in.AutoAccept, &out.AutoAccept + *out = new(bool) + **out = **in + } + if in.DNSOptions != nil { + in, out := &in.DNSOptions, &out.DNSOptions + *out = new(DNSOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } + if in.PrivateDNSEnabled != nil { + in, out := &in.PrivateDNSEnabled, &out.PrivateDNSEnabled + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.ServiceNameRef != nil { + in, out := &in.ServiceNameRef, &out.ServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceNameSelector != nil { + in, out := &in.ServiceNameSelector, &out.ServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCEndpointType != nil { + in, out := &in.VPCEndpointType, &out.VPCEndpointType + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCEndpointParameters. +func (in *VPCEndpointParameters) DeepCopy() *VPCEndpointParameters { + if in == nil { + return nil + } + out := new(VPCEndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCEndpointSpec) DeepCopyInto(out *VPCEndpointSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCEndpointSpec. +func (in *VPCEndpointSpec) DeepCopy() *VPCEndpointSpec { + if in == nil { + return nil + } + out := new(VPCEndpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCEndpointStatus) DeepCopyInto(out *VPCEndpointStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCEndpointStatus. +func (in *VPCEndpointStatus) DeepCopy() *VPCEndpointStatus { + if in == nil { + return nil + } + out := new(VPCEndpointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCIpamPoolCidr) DeepCopyInto(out *VPCIpamPoolCidr) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCIpamPoolCidr. +func (in *VPCIpamPoolCidr) DeepCopy() *VPCIpamPoolCidr { + if in == nil { + return nil + } + out := new(VPCIpamPoolCidr) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPCIpamPoolCidr) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCIpamPoolCidrInitParameters) DeepCopyInto(out *VPCIpamPoolCidrInitParameters) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } + if in.CidrAuthorizationContext != nil { + in, out := &in.CidrAuthorizationContext, &out.CidrAuthorizationContext + *out = new(CidrAuthorizationContextInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IpamPoolID != nil { + in, out := &in.IpamPoolID, &out.IpamPoolID + *out = new(string) + **out = **in + } + if in.IpamPoolIDRef != nil { + in, out := &in.IpamPoolIDRef, &out.IpamPoolIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IpamPoolIDSelector != nil { + in, out := &in.IpamPoolIDSelector, &out.IpamPoolIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NetmaskLength != nil { + in, out := &in.NetmaskLength, &out.NetmaskLength + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCIpamPoolCidrInitParameters. +func (in *VPCIpamPoolCidrInitParameters) DeepCopy() *VPCIpamPoolCidrInitParameters { + if in == nil { + return nil + } + out := new(VPCIpamPoolCidrInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCIpamPoolCidrList) DeepCopyInto(out *VPCIpamPoolCidrList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VPCIpamPoolCidr, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCIpamPoolCidrList. +func (in *VPCIpamPoolCidrList) DeepCopy() *VPCIpamPoolCidrList { + if in == nil { + return nil + } + out := new(VPCIpamPoolCidrList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPCIpamPoolCidrList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCIpamPoolCidrObservation) DeepCopyInto(out *VPCIpamPoolCidrObservation) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } + if in.CidrAuthorizationContext != nil { + in, out := &in.CidrAuthorizationContext, &out.CidrAuthorizationContext + *out = new(CidrAuthorizationContextObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IpamPoolCidrID != nil { + in, out := &in.IpamPoolCidrID, &out.IpamPoolCidrID + *out = new(string) + **out = **in + } + if in.IpamPoolID != nil { + in, out := &in.IpamPoolID, &out.IpamPoolID + *out = new(string) + **out = **in + } + if in.NetmaskLength != nil { + in, out := &in.NetmaskLength, &out.NetmaskLength + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCIpamPoolCidrObservation. +func (in *VPCIpamPoolCidrObservation) DeepCopy() *VPCIpamPoolCidrObservation { + if in == nil { + return nil + } + out := new(VPCIpamPoolCidrObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCIpamPoolCidrParameters) DeepCopyInto(out *VPCIpamPoolCidrParameters) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } + if in.CidrAuthorizationContext != nil { + in, out := &in.CidrAuthorizationContext, &out.CidrAuthorizationContext + *out = new(CidrAuthorizationContextParameters) + (*in).DeepCopyInto(*out) + } + if in.IpamPoolID != nil { + in, out := &in.IpamPoolID, &out.IpamPoolID + *out = new(string) + **out = **in + } + if in.IpamPoolIDRef != nil { + in, out := &in.IpamPoolIDRef, &out.IpamPoolIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IpamPoolIDSelector != nil { + in, out := &in.IpamPoolIDSelector, &out.IpamPoolIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NetmaskLength != nil { + in, out := &in.NetmaskLength, &out.NetmaskLength + *out = new(float64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCIpamPoolCidrParameters. +func (in *VPCIpamPoolCidrParameters) DeepCopy() *VPCIpamPoolCidrParameters { + if in == nil { + return nil + } + out := new(VPCIpamPoolCidrParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCIpamPoolCidrSpec) DeepCopyInto(out *VPCIpamPoolCidrSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCIpamPoolCidrSpec. +func (in *VPCIpamPoolCidrSpec) DeepCopy() *VPCIpamPoolCidrSpec { + if in == nil { + return nil + } + out := new(VPCIpamPoolCidrSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCIpamPoolCidrStatus) DeepCopyInto(out *VPCIpamPoolCidrStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCIpamPoolCidrStatus. +func (in *VPCIpamPoolCidrStatus) DeepCopy() *VPCIpamPoolCidrStatus { + if in == nil { + return nil + } + out := new(VPCIpamPoolCidrStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnection) DeepCopyInto(out *VPCPeeringConnection) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnection. +func (in *VPCPeeringConnection) DeepCopy() *VPCPeeringConnection { + if in == nil { + return nil + } + out := new(VPCPeeringConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPCPeeringConnection) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionAccepter) DeepCopyInto(out *VPCPeeringConnectionAccepter) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionAccepter. +func (in *VPCPeeringConnectionAccepter) DeepCopy() *VPCPeeringConnectionAccepter { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionAccepter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPCPeeringConnectionAccepter) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionAccepterAccepterInitParameters) DeepCopyInto(out *VPCPeeringConnectionAccepterAccepterInitParameters) { + *out = *in + if in.AllowRemoteVPCDNSResolution != nil { + in, out := &in.AllowRemoteVPCDNSResolution, &out.AllowRemoteVPCDNSResolution + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionAccepterAccepterInitParameters. +func (in *VPCPeeringConnectionAccepterAccepterInitParameters) DeepCopy() *VPCPeeringConnectionAccepterAccepterInitParameters { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionAccepterAccepterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionAccepterAccepterObservation) DeepCopyInto(out *VPCPeeringConnectionAccepterAccepterObservation) { + *out = *in + if in.AllowRemoteVPCDNSResolution != nil { + in, out := &in.AllowRemoteVPCDNSResolution, &out.AllowRemoteVPCDNSResolution + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionAccepterAccepterObservation. +func (in *VPCPeeringConnectionAccepterAccepterObservation) DeepCopy() *VPCPeeringConnectionAccepterAccepterObservation { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionAccepterAccepterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionAccepterAccepterParameters) DeepCopyInto(out *VPCPeeringConnectionAccepterAccepterParameters) { + *out = *in + if in.AllowRemoteVPCDNSResolution != nil { + in, out := &in.AllowRemoteVPCDNSResolution, &out.AllowRemoteVPCDNSResolution + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionAccepterAccepterParameters. +func (in *VPCPeeringConnectionAccepterAccepterParameters) DeepCopy() *VPCPeeringConnectionAccepterAccepterParameters { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionAccepterAccepterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionAccepterInitParameters) DeepCopyInto(out *VPCPeeringConnectionAccepterInitParameters) { + *out = *in + if in.Accepter != nil { + in, out := &in.Accepter, &out.Accepter + *out = new(VPCPeeringConnectionAccepterAccepterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoAccept != nil { + in, out := &in.AutoAccept, &out.AutoAccept + *out = new(bool) + **out = **in + } + if in.Requester != nil { + in, out := &in.Requester, &out.Requester + *out = new(VPCPeeringConnectionAccepterRequesterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCPeeringConnectionID != nil { + in, out := &in.VPCPeeringConnectionID, &out.VPCPeeringConnectionID + *out = new(string) + **out = **in + } + if in.VPCPeeringConnectionIDRef != nil { + in, out := &in.VPCPeeringConnectionIDRef, &out.VPCPeeringConnectionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCPeeringConnectionIDSelector != nil { + in, out := &in.VPCPeeringConnectionIDSelector, &out.VPCPeeringConnectionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionAccepterInitParameters. +func (in *VPCPeeringConnectionAccepterInitParameters) DeepCopy() *VPCPeeringConnectionAccepterInitParameters { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionAccepterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionAccepterList) DeepCopyInto(out *VPCPeeringConnectionAccepterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VPCPeeringConnectionAccepter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionAccepterList. +func (in *VPCPeeringConnectionAccepterList) DeepCopy() *VPCPeeringConnectionAccepterList { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionAccepterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPCPeeringConnectionAccepterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionAccepterObservation) DeepCopyInto(out *VPCPeeringConnectionAccepterObservation) { + *out = *in + if in.AcceptStatus != nil { + in, out := &in.AcceptStatus, &out.AcceptStatus + *out = new(string) + **out = **in + } + if in.Accepter != nil { + in, out := &in.Accepter, &out.Accepter + *out = new(VPCPeeringConnectionAccepterAccepterObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoAccept != nil { + in, out := &in.AutoAccept, &out.AutoAccept + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PeerOwnerID != nil { + in, out := &in.PeerOwnerID, &out.PeerOwnerID + *out = new(string) + **out = **in + } + if in.PeerRegion != nil { + in, out := &in.PeerRegion, &out.PeerRegion + *out = new(string) + **out = **in + } + if in.PeerVPCID != nil { + in, out := &in.PeerVPCID, &out.PeerVPCID + *out = new(string) + **out = **in + } + if in.Requester != nil { + in, out := &in.Requester, &out.Requester + *out = new(VPCPeeringConnectionAccepterRequesterObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCPeeringConnectionID != nil { + in, out := &in.VPCPeeringConnectionID, &out.VPCPeeringConnectionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionAccepterObservation. +func (in *VPCPeeringConnectionAccepterObservation) DeepCopy() *VPCPeeringConnectionAccepterObservation { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionAccepterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionAccepterParameters) DeepCopyInto(out *VPCPeeringConnectionAccepterParameters) { + *out = *in + if in.Accepter != nil { + in, out := &in.Accepter, &out.Accepter + *out = new(VPCPeeringConnectionAccepterAccepterParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoAccept != nil { + in, out := &in.AutoAccept, &out.AutoAccept + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Requester != nil { + in, out := &in.Requester, &out.Requester + *out = new(VPCPeeringConnectionAccepterRequesterParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCPeeringConnectionID != nil { + in, out := &in.VPCPeeringConnectionID, &out.VPCPeeringConnectionID + *out = new(string) + **out = **in + } + if in.VPCPeeringConnectionIDRef != nil { + in, out := &in.VPCPeeringConnectionIDRef, &out.VPCPeeringConnectionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCPeeringConnectionIDSelector != nil { + in, out := &in.VPCPeeringConnectionIDSelector, &out.VPCPeeringConnectionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionAccepterParameters. +func (in *VPCPeeringConnectionAccepterParameters) DeepCopy() *VPCPeeringConnectionAccepterParameters { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionAccepterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionAccepterRequesterInitParameters) DeepCopyInto(out *VPCPeeringConnectionAccepterRequesterInitParameters) { + *out = *in + if in.AllowRemoteVPCDNSResolution != nil { + in, out := &in.AllowRemoteVPCDNSResolution, &out.AllowRemoteVPCDNSResolution + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionAccepterRequesterInitParameters. +func (in *VPCPeeringConnectionAccepterRequesterInitParameters) DeepCopy() *VPCPeeringConnectionAccepterRequesterInitParameters { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionAccepterRequesterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionAccepterRequesterObservation) DeepCopyInto(out *VPCPeeringConnectionAccepterRequesterObservation) { + *out = *in + if in.AllowRemoteVPCDNSResolution != nil { + in, out := &in.AllowRemoteVPCDNSResolution, &out.AllowRemoteVPCDNSResolution + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionAccepterRequesterObservation. +func (in *VPCPeeringConnectionAccepterRequesterObservation) DeepCopy() *VPCPeeringConnectionAccepterRequesterObservation { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionAccepterRequesterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionAccepterRequesterParameters) DeepCopyInto(out *VPCPeeringConnectionAccepterRequesterParameters) { + *out = *in + if in.AllowRemoteVPCDNSResolution != nil { + in, out := &in.AllowRemoteVPCDNSResolution, &out.AllowRemoteVPCDNSResolution + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionAccepterRequesterParameters. +func (in *VPCPeeringConnectionAccepterRequesterParameters) DeepCopy() *VPCPeeringConnectionAccepterRequesterParameters { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionAccepterRequesterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionAccepterSpec) DeepCopyInto(out *VPCPeeringConnectionAccepterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionAccepterSpec. +func (in *VPCPeeringConnectionAccepterSpec) DeepCopy() *VPCPeeringConnectionAccepterSpec { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionAccepterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionAccepterStatus) DeepCopyInto(out *VPCPeeringConnectionAccepterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionAccepterStatus. +func (in *VPCPeeringConnectionAccepterStatus) DeepCopy() *VPCPeeringConnectionAccepterStatus { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionAccepterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionInitParameters) DeepCopyInto(out *VPCPeeringConnectionInitParameters) { + *out = *in + if in.AutoAccept != nil { + in, out := &in.AutoAccept, &out.AutoAccept + *out = new(bool) + **out = **in + } + if in.PeerOwnerID != nil { + in, out := &in.PeerOwnerID, &out.PeerOwnerID + *out = new(string) + **out = **in + } + if in.PeerRegion != nil { + in, out := &in.PeerRegion, &out.PeerRegion + *out = new(string) + **out = **in + } + if in.PeerVPCID != nil { + in, out := &in.PeerVPCID, &out.PeerVPCID + *out = new(string) + **out = **in + } + if in.PeerVPCIDRef != nil { + in, out := &in.PeerVPCIDRef, &out.PeerVPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PeerVPCIDSelector != nil { + in, out := &in.PeerVPCIDSelector, &out.PeerVPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionInitParameters. +func (in *VPCPeeringConnectionInitParameters) DeepCopy() *VPCPeeringConnectionInitParameters { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionList) DeepCopyInto(out *VPCPeeringConnectionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VPCPeeringConnection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionList. +func (in *VPCPeeringConnectionList) DeepCopy() *VPCPeeringConnectionList { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPCPeeringConnectionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionObservation) DeepCopyInto(out *VPCPeeringConnectionObservation) { + *out = *in + if in.AcceptStatus != nil { + in, out := &in.AcceptStatus, &out.AcceptStatus + *out = new(string) + **out = **in + } + if in.Accepter != nil { + in, out := &in.Accepter, &out.Accepter + *out = new(AccepterObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoAccept != nil { + in, out := &in.AutoAccept, &out.AutoAccept + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PeerOwnerID != nil { + in, out := &in.PeerOwnerID, &out.PeerOwnerID + *out = new(string) + **out = **in + } + if in.PeerRegion != nil { + in, out := &in.PeerRegion, &out.PeerRegion + *out = new(string) + **out = **in + } + if in.PeerVPCID != nil { + in, out := &in.PeerVPCID, &out.PeerVPCID + *out = new(string) + **out = **in + } + if in.Requester != nil { + in, out := &in.Requester, &out.Requester + *out = new(RequesterObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionObservation. +func (in *VPCPeeringConnectionObservation) DeepCopy() *VPCPeeringConnectionObservation { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionOptions) DeepCopyInto(out *VPCPeeringConnectionOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionOptions. +func (in *VPCPeeringConnectionOptions) DeepCopy() *VPCPeeringConnectionOptions { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPCPeeringConnectionOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionOptionsAccepterInitParameters) DeepCopyInto(out *VPCPeeringConnectionOptionsAccepterInitParameters) { + *out = *in + if in.AllowRemoteVPCDNSResolution != nil { + in, out := &in.AllowRemoteVPCDNSResolution, &out.AllowRemoteVPCDNSResolution + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionOptionsAccepterInitParameters. +func (in *VPCPeeringConnectionOptionsAccepterInitParameters) DeepCopy() *VPCPeeringConnectionOptionsAccepterInitParameters { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionOptionsAccepterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionOptionsAccepterObservation) DeepCopyInto(out *VPCPeeringConnectionOptionsAccepterObservation) { + *out = *in + if in.AllowRemoteVPCDNSResolution != nil { + in, out := &in.AllowRemoteVPCDNSResolution, &out.AllowRemoteVPCDNSResolution + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionOptionsAccepterObservation. +func (in *VPCPeeringConnectionOptionsAccepterObservation) DeepCopy() *VPCPeeringConnectionOptionsAccepterObservation { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionOptionsAccepterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionOptionsAccepterParameters) DeepCopyInto(out *VPCPeeringConnectionOptionsAccepterParameters) { + *out = *in + if in.AllowRemoteVPCDNSResolution != nil { + in, out := &in.AllowRemoteVPCDNSResolution, &out.AllowRemoteVPCDNSResolution + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionOptionsAccepterParameters. +func (in *VPCPeeringConnectionOptionsAccepterParameters) DeepCopy() *VPCPeeringConnectionOptionsAccepterParameters { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionOptionsAccepterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionOptionsInitParameters) DeepCopyInto(out *VPCPeeringConnectionOptionsInitParameters) { + *out = *in + if in.Accepter != nil { + in, out := &in.Accepter, &out.Accepter + *out = new(VPCPeeringConnectionOptionsAccepterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Requester != nil { + in, out := &in.Requester, &out.Requester + *out = new(VPCPeeringConnectionOptionsRequesterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VPCPeeringConnectionID != nil { + in, out := &in.VPCPeeringConnectionID, &out.VPCPeeringConnectionID + *out = new(string) + **out = **in + } + if in.VPCPeeringConnectionIDRef != nil { + in, out := &in.VPCPeeringConnectionIDRef, &out.VPCPeeringConnectionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCPeeringConnectionIDSelector != nil { + in, out := &in.VPCPeeringConnectionIDSelector, &out.VPCPeeringConnectionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionOptionsInitParameters. +func (in *VPCPeeringConnectionOptionsInitParameters) DeepCopy() *VPCPeeringConnectionOptionsInitParameters { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionOptionsList) DeepCopyInto(out *VPCPeeringConnectionOptionsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VPCPeeringConnectionOptions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionOptionsList. +func (in *VPCPeeringConnectionOptionsList) DeepCopy() *VPCPeeringConnectionOptionsList { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionOptionsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPCPeeringConnectionOptionsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionOptionsObservation) DeepCopyInto(out *VPCPeeringConnectionOptionsObservation) { + *out = *in + if in.Accepter != nil { + in, out := &in.Accepter, &out.Accepter + *out = new(VPCPeeringConnectionOptionsAccepterObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Requester != nil { + in, out := &in.Requester, &out.Requester + *out = new(VPCPeeringConnectionOptionsRequesterObservation) + (*in).DeepCopyInto(*out) + } + if in.VPCPeeringConnectionID != nil { + in, out := &in.VPCPeeringConnectionID, &out.VPCPeeringConnectionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionOptionsObservation. +func (in *VPCPeeringConnectionOptionsObservation) DeepCopy() *VPCPeeringConnectionOptionsObservation { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionOptionsParameters) DeepCopyInto(out *VPCPeeringConnectionOptionsParameters) { + *out = *in + if in.Accepter != nil { + in, out := &in.Accepter, &out.Accepter + *out = new(VPCPeeringConnectionOptionsAccepterParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Requester != nil { + in, out := &in.Requester, &out.Requester + *out = new(VPCPeeringConnectionOptionsRequesterParameters) + (*in).DeepCopyInto(*out) + } + if in.VPCPeeringConnectionID != nil { + in, out := &in.VPCPeeringConnectionID, &out.VPCPeeringConnectionID + *out = new(string) + **out = **in + } + if in.VPCPeeringConnectionIDRef != nil { + in, out := &in.VPCPeeringConnectionIDRef, &out.VPCPeeringConnectionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCPeeringConnectionIDSelector != nil { + in, out := &in.VPCPeeringConnectionIDSelector, &out.VPCPeeringConnectionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionOptionsParameters. +func (in *VPCPeeringConnectionOptionsParameters) DeepCopy() *VPCPeeringConnectionOptionsParameters { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionOptionsRequesterInitParameters) DeepCopyInto(out *VPCPeeringConnectionOptionsRequesterInitParameters) { + *out = *in + if in.AllowRemoteVPCDNSResolution != nil { + in, out := &in.AllowRemoteVPCDNSResolution, &out.AllowRemoteVPCDNSResolution + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionOptionsRequesterInitParameters. +func (in *VPCPeeringConnectionOptionsRequesterInitParameters) DeepCopy() *VPCPeeringConnectionOptionsRequesterInitParameters { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionOptionsRequesterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionOptionsRequesterObservation) DeepCopyInto(out *VPCPeeringConnectionOptionsRequesterObservation) { + *out = *in + if in.AllowRemoteVPCDNSResolution != nil { + in, out := &in.AllowRemoteVPCDNSResolution, &out.AllowRemoteVPCDNSResolution + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionOptionsRequesterObservation. +func (in *VPCPeeringConnectionOptionsRequesterObservation) DeepCopy() *VPCPeeringConnectionOptionsRequesterObservation { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionOptionsRequesterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionOptionsRequesterParameters) DeepCopyInto(out *VPCPeeringConnectionOptionsRequesterParameters) { + *out = *in + if in.AllowRemoteVPCDNSResolution != nil { + in, out := &in.AllowRemoteVPCDNSResolution, &out.AllowRemoteVPCDNSResolution + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionOptionsRequesterParameters. +func (in *VPCPeeringConnectionOptionsRequesterParameters) DeepCopy() *VPCPeeringConnectionOptionsRequesterParameters { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionOptionsRequesterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionOptionsSpec) DeepCopyInto(out *VPCPeeringConnectionOptionsSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionOptionsSpec. +func (in *VPCPeeringConnectionOptionsSpec) DeepCopy() *VPCPeeringConnectionOptionsSpec { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionOptionsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionOptionsStatus) DeepCopyInto(out *VPCPeeringConnectionOptionsStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionOptionsStatus. +func (in *VPCPeeringConnectionOptionsStatus) DeepCopy() *VPCPeeringConnectionOptionsStatus { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionOptionsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionParameters) DeepCopyInto(out *VPCPeeringConnectionParameters) { + *out = *in + if in.AutoAccept != nil { + in, out := &in.AutoAccept, &out.AutoAccept + *out = new(bool) + **out = **in + } + if in.PeerOwnerID != nil { + in, out := &in.PeerOwnerID, &out.PeerOwnerID + *out = new(string) + **out = **in + } + if in.PeerRegion != nil { + in, out := &in.PeerRegion, &out.PeerRegion + *out = new(string) + **out = **in + } + if in.PeerVPCID != nil { + in, out := &in.PeerVPCID, &out.PeerVPCID + *out = new(string) + **out = **in + } + if in.PeerVPCIDRef != nil { + in, out := &in.PeerVPCIDRef, &out.PeerVPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PeerVPCIDSelector != nil { + in, out := &in.PeerVPCIDSelector, &out.PeerVPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionParameters. +func (in *VPCPeeringConnectionParameters) DeepCopy() *VPCPeeringConnectionParameters { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionSpec) DeepCopyInto(out *VPCPeeringConnectionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionSpec. +func (in *VPCPeeringConnectionSpec) DeepCopy() *VPCPeeringConnectionSpec { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCPeeringConnectionStatus) DeepCopyInto(out *VPCPeeringConnectionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCPeeringConnectionStatus. +func (in *VPCPeeringConnectionStatus) DeepCopy() *VPCPeeringConnectionStatus { + if in == nil { + return nil + } + out := new(VPCPeeringConnectionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNConnection) DeepCopyInto(out *VPNConnection) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNConnection. +func (in *VPNConnection) DeepCopy() *VPNConnection { + if in == nil { + return nil + } + out := new(VPNConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPNConnection) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNConnectionInitParameters) DeepCopyInto(out *VPNConnectionInitParameters) { + *out = *in + if in.CustomerGatewayID != nil { + in, out := &in.CustomerGatewayID, &out.CustomerGatewayID + *out = new(string) + **out = **in + } + if in.CustomerGatewayIDRef != nil { + in, out := &in.CustomerGatewayIDRef, &out.CustomerGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CustomerGatewayIDSelector != nil { + in, out := &in.CustomerGatewayIDSelector, &out.CustomerGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EnableAcceleration != nil { + in, out := &in.EnableAcceleration, &out.EnableAcceleration + *out = new(bool) + **out = **in + } + if in.LocalIPv4NetworkCidr != nil { + in, out := &in.LocalIPv4NetworkCidr, &out.LocalIPv4NetworkCidr + *out = new(string) + **out = **in + } + if in.LocalIPv6NetworkCidr != nil { + in, out := &in.LocalIPv6NetworkCidr, &out.LocalIPv6NetworkCidr + *out = new(string) + **out = **in + } + if in.OutsideIPAddressType != nil { + in, out := &in.OutsideIPAddressType, &out.OutsideIPAddressType + *out = new(string) + **out = **in + } + if in.RemoteIPv4NetworkCidr != nil { + in, out := &in.RemoteIPv4NetworkCidr, &out.RemoteIPv4NetworkCidr + *out = new(string) + **out = **in + } + if in.RemoteIPv6NetworkCidr != nil { + in, out := &in.RemoteIPv6NetworkCidr, &out.RemoteIPv6NetworkCidr + *out = new(string) + **out = **in + } + if in.StaticRoutesOnly != nil { + in, out := &in.StaticRoutesOnly, &out.StaticRoutesOnly + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TransitGatewayID != nil { + in, out := &in.TransitGatewayID, &out.TransitGatewayID + *out = new(string) + **out = **in + } + if in.TransitGatewayIDRef != nil { + in, out := &in.TransitGatewayIDRef, &out.TransitGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TransitGatewayIDSelector != nil { + in, out := &in.TransitGatewayIDSelector, &out.TransitGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TransportTransitGatewayAttachmentID != nil { + in, out := &in.TransportTransitGatewayAttachmentID, &out.TransportTransitGatewayAttachmentID + *out = new(string) + **out = **in + } + if in.Tunnel1DpdTimeoutAction != nil { + in, out := &in.Tunnel1DpdTimeoutAction, &out.Tunnel1DpdTimeoutAction + *out = new(string) + **out = **in + } + if in.Tunnel1DpdTimeoutSeconds != nil { + in, out := &in.Tunnel1DpdTimeoutSeconds, &out.Tunnel1DpdTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.Tunnel1EnableTunnelLifecycleControl != nil { + in, out := &in.Tunnel1EnableTunnelLifecycleControl, &out.Tunnel1EnableTunnelLifecycleControl + *out = new(bool) + **out = **in + } + if in.Tunnel1IkeVersions != nil { + in, out := &in.Tunnel1IkeVersions, &out.Tunnel1IkeVersions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel1InsideCidr != nil { + in, out := &in.Tunnel1InsideCidr, &out.Tunnel1InsideCidr + *out = new(string) + **out = **in + } + if in.Tunnel1InsideIPv6Cidr != nil { + in, out := &in.Tunnel1InsideIPv6Cidr, &out.Tunnel1InsideIPv6Cidr + *out = new(string) + **out = **in + } + if in.Tunnel1LogOptions != nil { + in, out := &in.Tunnel1LogOptions, &out.Tunnel1LogOptions + *out = new(Tunnel1LogOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tunnel1Phase1DhGroupNumbers != nil { + in, out := &in.Tunnel1Phase1DhGroupNumbers, &out.Tunnel1Phase1DhGroupNumbers + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Tunnel1Phase1EncryptionAlgorithms != nil { + in, out := &in.Tunnel1Phase1EncryptionAlgorithms, &out.Tunnel1Phase1EncryptionAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel1Phase1IntegrityAlgorithms != nil { + in, out := &in.Tunnel1Phase1IntegrityAlgorithms, &out.Tunnel1Phase1IntegrityAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel1Phase1LifetimeSeconds != nil { + in, out := &in.Tunnel1Phase1LifetimeSeconds, &out.Tunnel1Phase1LifetimeSeconds + *out = new(float64) + **out = **in + } + if in.Tunnel1Phase2DhGroupNumbers != nil { + in, out := &in.Tunnel1Phase2DhGroupNumbers, &out.Tunnel1Phase2DhGroupNumbers + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Tunnel1Phase2EncryptionAlgorithms != nil { + in, out := &in.Tunnel1Phase2EncryptionAlgorithms, &out.Tunnel1Phase2EncryptionAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel1Phase2IntegrityAlgorithms != nil { + in, out := &in.Tunnel1Phase2IntegrityAlgorithms, &out.Tunnel1Phase2IntegrityAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel1Phase2LifetimeSeconds != nil { + in, out := &in.Tunnel1Phase2LifetimeSeconds, &out.Tunnel1Phase2LifetimeSeconds + *out = new(float64) + **out = **in + } + if in.Tunnel1PresharedKeySecretRef != nil { + in, out := &in.Tunnel1PresharedKeySecretRef, &out.Tunnel1PresharedKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Tunnel1RekeyFuzzPercentage != nil { + in, out := &in.Tunnel1RekeyFuzzPercentage, &out.Tunnel1RekeyFuzzPercentage + *out = new(float64) + **out = **in + } + if in.Tunnel1RekeyMarginTimeSeconds != nil { + in, out := &in.Tunnel1RekeyMarginTimeSeconds, &out.Tunnel1RekeyMarginTimeSeconds + *out = new(float64) + **out = **in + } + if in.Tunnel1ReplayWindowSize != nil { + in, out := &in.Tunnel1ReplayWindowSize, &out.Tunnel1ReplayWindowSize + *out = new(float64) + **out = **in + } + if in.Tunnel1StartupAction != nil { + in, out := &in.Tunnel1StartupAction, &out.Tunnel1StartupAction + *out = new(string) + **out = **in + } + if in.Tunnel2DpdTimeoutAction != nil { + in, out := &in.Tunnel2DpdTimeoutAction, &out.Tunnel2DpdTimeoutAction + *out = new(string) + **out = **in + } + if in.Tunnel2DpdTimeoutSeconds != nil { + in, out := &in.Tunnel2DpdTimeoutSeconds, &out.Tunnel2DpdTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.Tunnel2EnableTunnelLifecycleControl != nil { + in, out := &in.Tunnel2EnableTunnelLifecycleControl, &out.Tunnel2EnableTunnelLifecycleControl + *out = new(bool) + **out = **in + } + if in.Tunnel2IkeVersions != nil { + in, out := &in.Tunnel2IkeVersions, &out.Tunnel2IkeVersions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel2InsideCidr != nil { + in, out := &in.Tunnel2InsideCidr, &out.Tunnel2InsideCidr + *out = new(string) + **out = **in + } + if in.Tunnel2InsideIPv6Cidr != nil { + in, out := &in.Tunnel2InsideIPv6Cidr, &out.Tunnel2InsideIPv6Cidr + *out = new(string) + **out = **in + } + if in.Tunnel2LogOptions != nil { + in, out := &in.Tunnel2LogOptions, &out.Tunnel2LogOptions + *out = new(Tunnel2LogOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tunnel2Phase1DhGroupNumbers != nil { + in, out := &in.Tunnel2Phase1DhGroupNumbers, &out.Tunnel2Phase1DhGroupNumbers + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Tunnel2Phase1EncryptionAlgorithms != nil { + in, out := &in.Tunnel2Phase1EncryptionAlgorithms, &out.Tunnel2Phase1EncryptionAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel2Phase1IntegrityAlgorithms != nil { + in, out := &in.Tunnel2Phase1IntegrityAlgorithms, &out.Tunnel2Phase1IntegrityAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel2Phase1LifetimeSeconds != nil { + in, out := &in.Tunnel2Phase1LifetimeSeconds, &out.Tunnel2Phase1LifetimeSeconds + *out = new(float64) + **out = **in + } + if in.Tunnel2Phase2DhGroupNumbers != nil { + in, out := &in.Tunnel2Phase2DhGroupNumbers, &out.Tunnel2Phase2DhGroupNumbers + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Tunnel2Phase2EncryptionAlgorithms != nil { + in, out := &in.Tunnel2Phase2EncryptionAlgorithms, &out.Tunnel2Phase2EncryptionAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel2Phase2IntegrityAlgorithms != nil { + in, out := &in.Tunnel2Phase2IntegrityAlgorithms, &out.Tunnel2Phase2IntegrityAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel2Phase2LifetimeSeconds != nil { + in, out := &in.Tunnel2Phase2LifetimeSeconds, &out.Tunnel2Phase2LifetimeSeconds + *out = new(float64) + **out = **in + } + if in.Tunnel2PresharedKeySecretRef != nil { + in, out := &in.Tunnel2PresharedKeySecretRef, &out.Tunnel2PresharedKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Tunnel2RekeyFuzzPercentage != nil { + in, out := &in.Tunnel2RekeyFuzzPercentage, &out.Tunnel2RekeyFuzzPercentage + *out = new(float64) + **out = **in + } + if in.Tunnel2RekeyMarginTimeSeconds != nil { + in, out := &in.Tunnel2RekeyMarginTimeSeconds, &out.Tunnel2RekeyMarginTimeSeconds + *out = new(float64) + **out = **in + } + if in.Tunnel2ReplayWindowSize != nil { + in, out := &in.Tunnel2ReplayWindowSize, &out.Tunnel2ReplayWindowSize + *out = new(float64) + **out = **in + } + if in.Tunnel2StartupAction != nil { + in, out := &in.Tunnel2StartupAction, &out.Tunnel2StartupAction + *out = new(string) + **out = **in + } + if in.TunnelInsideIPVersion != nil { + in, out := &in.TunnelInsideIPVersion, &out.TunnelInsideIPVersion + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeRef != nil { + in, out := &in.TypeRef, &out.TypeRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TypeSelector != nil { + in, out := &in.TypeSelector, &out.TypeSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPNGatewayID != nil { + in, out := &in.VPNGatewayID, &out.VPNGatewayID + *out = new(string) + **out = **in + } + if in.VPNGatewayIDRef != nil { + in, out := &in.VPNGatewayIDRef, &out.VPNGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPNGatewayIDSelector != nil { + in, out := &in.VPNGatewayIDSelector, &out.VPNGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNConnectionInitParameters. +func (in *VPNConnectionInitParameters) DeepCopy() *VPNConnectionInitParameters { + if in == nil { + return nil + } + out := new(VPNConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNConnectionList) DeepCopyInto(out *VPNConnectionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VPNConnection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route. -func (in *Route) DeepCopy() *Route { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNConnectionList. +func (in *VPNConnectionList) DeepCopy() *VPNConnectionList { if in == nil { return nil } - out := new(Route) + out := new(VPNConnectionList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Route) DeepCopyObject() runtime.Object { +func (in *VPNConnectionList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -41,533 +18171,1086 @@ func (in *Route) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RouteInitParameters) DeepCopyInto(out *RouteInitParameters) { +func (in *VPNConnectionObservation) DeepCopyInto(out *VPNConnectionObservation) { *out = *in - if in.CarrierGatewayID != nil { - in, out := &in.CarrierGatewayID, &out.CarrierGatewayID + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CoreNetworkArn != nil { + in, out := &in.CoreNetworkArn, &out.CoreNetworkArn + *out = new(string) + **out = **in + } + if in.CoreNetworkAttachmentArn != nil { + in, out := &in.CoreNetworkAttachmentArn, &out.CoreNetworkAttachmentArn + *out = new(string) + **out = **in + } + if in.CustomerGatewayID != nil { + in, out := &in.CustomerGatewayID, &out.CustomerGatewayID + *out = new(string) + **out = **in + } + if in.EnableAcceleration != nil { + in, out := &in.EnableAcceleration, &out.EnableAcceleration + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LocalIPv4NetworkCidr != nil { + in, out := &in.LocalIPv4NetworkCidr, &out.LocalIPv4NetworkCidr + *out = new(string) + **out = **in + } + if in.LocalIPv6NetworkCidr != nil { + in, out := &in.LocalIPv6NetworkCidr, &out.LocalIPv6NetworkCidr + *out = new(string) + **out = **in + } + if in.OutsideIPAddressType != nil { + in, out := &in.OutsideIPAddressType, &out.OutsideIPAddressType + *out = new(string) + **out = **in + } + if in.RemoteIPv4NetworkCidr != nil { + in, out := &in.RemoteIPv4NetworkCidr, &out.RemoteIPv4NetworkCidr + *out = new(string) + **out = **in + } + if in.RemoteIPv6NetworkCidr != nil { + in, out := &in.RemoteIPv6NetworkCidr, &out.RemoteIPv6NetworkCidr + *out = new(string) + **out = **in + } + if in.Routes != nil { + in, out := &in.Routes, &out.Routes + *out = make([]RoutesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StaticRoutesOnly != nil { + in, out := &in.StaticRoutesOnly, &out.StaticRoutesOnly + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TransitGatewayAttachmentID != nil { + in, out := &in.TransitGatewayAttachmentID, &out.TransitGatewayAttachmentID + *out = new(string) + **out = **in + } + if in.TransitGatewayID != nil { + in, out := &in.TransitGatewayID, &out.TransitGatewayID + *out = new(string) + **out = **in + } + if in.TransportTransitGatewayAttachmentID != nil { + in, out := &in.TransportTransitGatewayAttachmentID, &out.TransportTransitGatewayAttachmentID + *out = new(string) + **out = **in + } + if in.Tunnel1Address != nil { + in, out := &in.Tunnel1Address, &out.Tunnel1Address + *out = new(string) + **out = **in + } + if in.Tunnel1BGPAsn != nil { + in, out := &in.Tunnel1BGPAsn, &out.Tunnel1BGPAsn + *out = new(string) + **out = **in + } + if in.Tunnel1BGPHoldtime != nil { + in, out := &in.Tunnel1BGPHoldtime, &out.Tunnel1BGPHoldtime + *out = new(float64) + **out = **in + } + if in.Tunnel1CgwInsideAddress != nil { + in, out := &in.Tunnel1CgwInsideAddress, &out.Tunnel1CgwInsideAddress + *out = new(string) + **out = **in + } + if in.Tunnel1DpdTimeoutAction != nil { + in, out := &in.Tunnel1DpdTimeoutAction, &out.Tunnel1DpdTimeoutAction + *out = new(string) + **out = **in + } + if in.Tunnel1DpdTimeoutSeconds != nil { + in, out := &in.Tunnel1DpdTimeoutSeconds, &out.Tunnel1DpdTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.Tunnel1EnableTunnelLifecycleControl != nil { + in, out := &in.Tunnel1EnableTunnelLifecycleControl, &out.Tunnel1EnableTunnelLifecycleControl + *out = new(bool) + **out = **in + } + if in.Tunnel1IkeVersions != nil { + in, out := &in.Tunnel1IkeVersions, &out.Tunnel1IkeVersions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel1InsideCidr != nil { + in, out := &in.Tunnel1InsideCidr, &out.Tunnel1InsideCidr + *out = new(string) + **out = **in + } + if in.Tunnel1InsideIPv6Cidr != nil { + in, out := &in.Tunnel1InsideIPv6Cidr, &out.Tunnel1InsideIPv6Cidr *out = new(string) **out = **in } - if in.CoreNetworkArn != nil { - in, out := &in.CoreNetworkArn, &out.CoreNetworkArn - *out = new(string) + if in.Tunnel1LogOptions != nil { + in, out := &in.Tunnel1LogOptions, &out.Tunnel1LogOptions + *out = new(Tunnel1LogOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.Tunnel1Phase1DhGroupNumbers != nil { + in, out := &in.Tunnel1Phase1DhGroupNumbers, &out.Tunnel1Phase1DhGroupNumbers + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Tunnel1Phase1EncryptionAlgorithms != nil { + in, out := &in.Tunnel1Phase1EncryptionAlgorithms, &out.Tunnel1Phase1EncryptionAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel1Phase1IntegrityAlgorithms != nil { + in, out := &in.Tunnel1Phase1IntegrityAlgorithms, &out.Tunnel1Phase1IntegrityAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel1Phase1LifetimeSeconds != nil { + in, out := &in.Tunnel1Phase1LifetimeSeconds, &out.Tunnel1Phase1LifetimeSeconds + *out = new(float64) + **out = **in + } + if in.Tunnel1Phase2DhGroupNumbers != nil { + in, out := &in.Tunnel1Phase2DhGroupNumbers, &out.Tunnel1Phase2DhGroupNumbers + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Tunnel1Phase2EncryptionAlgorithms != nil { + in, out := &in.Tunnel1Phase2EncryptionAlgorithms, &out.Tunnel1Phase2EncryptionAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel1Phase2IntegrityAlgorithms != nil { + in, out := &in.Tunnel1Phase2IntegrityAlgorithms, &out.Tunnel1Phase2IntegrityAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel1Phase2LifetimeSeconds != nil { + in, out := &in.Tunnel1Phase2LifetimeSeconds, &out.Tunnel1Phase2LifetimeSeconds + *out = new(float64) + **out = **in + } + if in.Tunnel1RekeyFuzzPercentage != nil { + in, out := &in.Tunnel1RekeyFuzzPercentage, &out.Tunnel1RekeyFuzzPercentage + *out = new(float64) **out = **in } - if in.DestinationCidrBlock != nil { - in, out := &in.DestinationCidrBlock, &out.DestinationCidrBlock - *out = new(string) + if in.Tunnel1RekeyMarginTimeSeconds != nil { + in, out := &in.Tunnel1RekeyMarginTimeSeconds, &out.Tunnel1RekeyMarginTimeSeconds + *out = new(float64) **out = **in } - if in.DestinationIPv6CidrBlock != nil { - in, out := &in.DestinationIPv6CidrBlock, &out.DestinationIPv6CidrBlock - *out = new(string) + if in.Tunnel1ReplayWindowSize != nil { + in, out := &in.Tunnel1ReplayWindowSize, &out.Tunnel1ReplayWindowSize + *out = new(float64) **out = **in } - if in.DestinationPrefixListID != nil { - in, out := &in.DestinationPrefixListID, &out.DestinationPrefixListID + if in.Tunnel1StartupAction != nil { + in, out := &in.Tunnel1StartupAction, &out.Tunnel1StartupAction *out = new(string) **out = **in } - if in.DestinationPrefixListIDRef != nil { - in, out := &in.DestinationPrefixListIDRef, &out.DestinationPrefixListIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) + if in.Tunnel1VgwInsideAddress != nil { + in, out := &in.Tunnel1VgwInsideAddress, &out.Tunnel1VgwInsideAddress + *out = new(string) + **out = **in } - if in.DestinationPrefixListIDSelector != nil { - in, out := &in.DestinationPrefixListIDSelector, &out.DestinationPrefixListIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) + if in.Tunnel2Address != nil { + in, out := &in.Tunnel2Address, &out.Tunnel2Address + *out = new(string) + **out = **in } - if in.EgressOnlyGatewayID != nil { - in, out := &in.EgressOnlyGatewayID, &out.EgressOnlyGatewayID + if in.Tunnel2BGPAsn != nil { + in, out := &in.Tunnel2BGPAsn, &out.Tunnel2BGPAsn *out = new(string) **out = **in } - if in.EgressOnlyGatewayIDRef != nil { - in, out := &in.EgressOnlyGatewayIDRef, &out.EgressOnlyGatewayIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) + if in.Tunnel2BGPHoldtime != nil { + in, out := &in.Tunnel2BGPHoldtime, &out.Tunnel2BGPHoldtime + *out = new(float64) + **out = **in } - if in.EgressOnlyGatewayIDSelector != nil { - in, out := &in.EgressOnlyGatewayIDSelector, &out.EgressOnlyGatewayIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) + if in.Tunnel2CgwInsideAddress != nil { + in, out := &in.Tunnel2CgwInsideAddress, &out.Tunnel2CgwInsideAddress + *out = new(string) + **out = **in } - if in.GatewayID != nil { - in, out := &in.GatewayID, &out.GatewayID + if in.Tunnel2DpdTimeoutAction != nil { + in, out := &in.Tunnel2DpdTimeoutAction, &out.Tunnel2DpdTimeoutAction *out = new(string) **out = **in } - if in.GatewayIDRef != nil { - in, out := &in.GatewayIDRef, &out.GatewayIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) + if in.Tunnel2DpdTimeoutSeconds != nil { + in, out := &in.Tunnel2DpdTimeoutSeconds, &out.Tunnel2DpdTimeoutSeconds + *out = new(float64) + **out = **in } - if in.GatewayIDSelector != nil { - in, out := &in.GatewayIDSelector, &out.GatewayIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) + if in.Tunnel2EnableTunnelLifecycleControl != nil { + in, out := &in.Tunnel2EnableTunnelLifecycleControl, &out.Tunnel2EnableTunnelLifecycleControl + *out = new(bool) + **out = **in } - if in.LocalGatewayID != nil { - in, out := &in.LocalGatewayID, &out.LocalGatewayID + if in.Tunnel2IkeVersions != nil { + in, out := &in.Tunnel2IkeVersions, &out.Tunnel2IkeVersions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel2InsideCidr != nil { + in, out := &in.Tunnel2InsideCidr, &out.Tunnel2InsideCidr *out = new(string) **out = **in } - if in.NATGatewayID != nil { - in, out := &in.NATGatewayID, &out.NATGatewayID + if in.Tunnel2InsideIPv6Cidr != nil { + in, out := &in.Tunnel2InsideIPv6Cidr, &out.Tunnel2InsideIPv6Cidr *out = new(string) **out = **in } - if in.NATGatewayIDRef != nil { - in, out := &in.NATGatewayIDRef, &out.NATGatewayIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) - } - if in.NATGatewayIDSelector != nil { - in, out := &in.NATGatewayIDSelector, &out.NATGatewayIDSelector - *out = new(v1.Selector) + if in.Tunnel2LogOptions != nil { + in, out := &in.Tunnel2LogOptions, &out.Tunnel2LogOptions + *out = new(Tunnel2LogOptionsObservation) (*in).DeepCopyInto(*out) } - if in.NetworkInterfaceID != nil { - in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID - *out = new(string) - **out = **in + if in.Tunnel2Phase1DhGroupNumbers != nil { + in, out := &in.Tunnel2Phase1DhGroupNumbers, &out.Tunnel2Phase1DhGroupNumbers + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } } - if in.NetworkInterfaceIDRef != nil { - in, out := &in.NetworkInterfaceIDRef, &out.NetworkInterfaceIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) + if in.Tunnel2Phase1EncryptionAlgorithms != nil { + in, out := &in.Tunnel2Phase1EncryptionAlgorithms, &out.Tunnel2Phase1EncryptionAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } } - if in.NetworkInterfaceIDSelector != nil { - in, out := &in.NetworkInterfaceIDSelector, &out.NetworkInterfaceIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) + if in.Tunnel2Phase1IntegrityAlgorithms != nil { + in, out := &in.Tunnel2Phase1IntegrityAlgorithms, &out.Tunnel2Phase1IntegrityAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } } - if in.RouteTableID != nil { - in, out := &in.RouteTableID, &out.RouteTableID - *out = new(string) + if in.Tunnel2Phase1LifetimeSeconds != nil { + in, out := &in.Tunnel2Phase1LifetimeSeconds, &out.Tunnel2Phase1LifetimeSeconds + *out = new(float64) **out = **in } - if in.RouteTableIDRef != nil { - in, out := &in.RouteTableIDRef, &out.RouteTableIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) + if in.Tunnel2Phase2DhGroupNumbers != nil { + in, out := &in.Tunnel2Phase2DhGroupNumbers, &out.Tunnel2Phase2DhGroupNumbers + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } } - if in.RouteTableIDSelector != nil { - in, out := &in.RouteTableIDSelector, &out.RouteTableIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) + if in.Tunnel2Phase2EncryptionAlgorithms != nil { + in, out := &in.Tunnel2Phase2EncryptionAlgorithms, &out.Tunnel2Phase2EncryptionAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } } - if in.TransitGatewayID != nil { - in, out := &in.TransitGatewayID, &out.TransitGatewayID - *out = new(string) - **out = **in + if in.Tunnel2Phase2IntegrityAlgorithms != nil { + in, out := &in.Tunnel2Phase2IntegrityAlgorithms, &out.Tunnel2Phase2IntegrityAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } } - if in.TransitGatewayIDRef != nil { - in, out := &in.TransitGatewayIDRef, &out.TransitGatewayIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) + if in.Tunnel2Phase2LifetimeSeconds != nil { + in, out := &in.Tunnel2Phase2LifetimeSeconds, &out.Tunnel2Phase2LifetimeSeconds + *out = new(float64) + **out = **in } - if in.TransitGatewayIDSelector != nil { - in, out := &in.TransitGatewayIDSelector, &out.TransitGatewayIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) + if in.Tunnel2RekeyFuzzPercentage != nil { + in, out := &in.Tunnel2RekeyFuzzPercentage, &out.Tunnel2RekeyFuzzPercentage + *out = new(float64) + **out = **in } - if in.VPCEndpointID != nil { - in, out := &in.VPCEndpointID, &out.VPCEndpointID - *out = new(string) + if in.Tunnel2RekeyMarginTimeSeconds != nil { + in, out := &in.Tunnel2RekeyMarginTimeSeconds, &out.Tunnel2RekeyMarginTimeSeconds + *out = new(float64) **out = **in } - if in.VPCEndpointIDRef != nil { - in, out := &in.VPCEndpointIDRef, &out.VPCEndpointIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) + if in.Tunnel2ReplayWindowSize != nil { + in, out := &in.Tunnel2ReplayWindowSize, &out.Tunnel2ReplayWindowSize + *out = new(float64) + **out = **in } - if in.VPCEndpointIDSelector != nil { - in, out := &in.VPCEndpointIDSelector, &out.VPCEndpointIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) + if in.Tunnel2StartupAction != nil { + in, out := &in.Tunnel2StartupAction, &out.Tunnel2StartupAction + *out = new(string) + **out = **in } - if in.VPCPeeringConnectionID != nil { - in, out := &in.VPCPeeringConnectionID, &out.VPCPeeringConnectionID + if in.Tunnel2VgwInsideAddress != nil { + in, out := &in.Tunnel2VgwInsideAddress, &out.Tunnel2VgwInsideAddress *out = new(string) **out = **in } - if in.VPCPeeringConnectionIDRef != nil { - in, out := &in.VPCPeeringConnectionIDRef, &out.VPCPeeringConnectionIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) + if in.TunnelInsideIPVersion != nil { + in, out := &in.TunnelInsideIPVersion, &out.TunnelInsideIPVersion + *out = new(string) + **out = **in } - if in.VPCPeeringConnectionIDSelector != nil { - in, out := &in.VPCPeeringConnectionIDSelector, &out.VPCPeeringConnectionIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteInitParameters. -func (in *RouteInitParameters) DeepCopy() *RouteInitParameters { - if in == nil { - return nil + if in.VPNGatewayID != nil { + in, out := &in.VPNGatewayID, &out.VPNGatewayID + *out = new(string) + **out = **in } - out := new(RouteInitParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RouteList) DeepCopyInto(out *RouteList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Route, len(*in)) + if in.VgwTelemetry != nil { + in, out := &in.VgwTelemetry, &out.VgwTelemetry + *out = make([]VgwTelemetryObservation, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteList. -func (in *RouteList) DeepCopy() *RouteList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNConnectionObservation. +func (in *VPNConnectionObservation) DeepCopy() *VPNConnectionObservation { if in == nil { return nil } - out := new(RouteList) + out := new(VPNConnectionObservation) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RouteList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RouteObservation) DeepCopyInto(out *RouteObservation) { +func (in *VPNConnectionParameters) DeepCopyInto(out *VPNConnectionParameters) { *out = *in - if in.CarrierGatewayID != nil { - in, out := &in.CarrierGatewayID, &out.CarrierGatewayID + if in.CustomerGatewayID != nil { + in, out := &in.CustomerGatewayID, &out.CustomerGatewayID *out = new(string) **out = **in } - if in.CoreNetworkArn != nil { - in, out := &in.CoreNetworkArn, &out.CoreNetworkArn - *out = new(string) + if in.CustomerGatewayIDRef != nil { + in, out := &in.CustomerGatewayIDRef, &out.CustomerGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CustomerGatewayIDSelector != nil { + in, out := &in.CustomerGatewayIDSelector, &out.CustomerGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EnableAcceleration != nil { + in, out := &in.EnableAcceleration, &out.EnableAcceleration + *out = new(bool) **out = **in } - if in.DestinationCidrBlock != nil { - in, out := &in.DestinationCidrBlock, &out.DestinationCidrBlock + if in.LocalIPv4NetworkCidr != nil { + in, out := &in.LocalIPv4NetworkCidr, &out.LocalIPv4NetworkCidr *out = new(string) **out = **in } - if in.DestinationIPv6CidrBlock != nil { - in, out := &in.DestinationIPv6CidrBlock, &out.DestinationIPv6CidrBlock + if in.LocalIPv6NetworkCidr != nil { + in, out := &in.LocalIPv6NetworkCidr, &out.LocalIPv6NetworkCidr *out = new(string) **out = **in } - if in.DestinationPrefixListID != nil { - in, out := &in.DestinationPrefixListID, &out.DestinationPrefixListID + if in.OutsideIPAddressType != nil { + in, out := &in.OutsideIPAddressType, &out.OutsideIPAddressType *out = new(string) **out = **in } - if in.EgressOnlyGatewayID != nil { - in, out := &in.EgressOnlyGatewayID, &out.EgressOnlyGatewayID + if in.Region != nil { + in, out := &in.Region, &out.Region *out = new(string) **out = **in } - if in.GatewayID != nil { - in, out := &in.GatewayID, &out.GatewayID + if in.RemoteIPv4NetworkCidr != nil { + in, out := &in.RemoteIPv4NetworkCidr, &out.RemoteIPv4NetworkCidr *out = new(string) **out = **in } - if in.ID != nil { - in, out := &in.ID, &out.ID + if in.RemoteIPv6NetworkCidr != nil { + in, out := &in.RemoteIPv6NetworkCidr, &out.RemoteIPv6NetworkCidr *out = new(string) **out = **in } - if in.InstanceID != nil { - in, out := &in.InstanceID, &out.InstanceID - *out = new(string) + if in.StaticRoutesOnly != nil { + in, out := &in.StaticRoutesOnly, &out.StaticRoutesOnly + *out = new(bool) **out = **in } - if in.InstanceOwnerID != nil { - in, out := &in.InstanceOwnerID, &out.InstanceOwnerID + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TransitGatewayID != nil { + in, out := &in.TransitGatewayID, &out.TransitGatewayID *out = new(string) **out = **in } - if in.LocalGatewayID != nil { - in, out := &in.LocalGatewayID, &out.LocalGatewayID + if in.TransitGatewayIDRef != nil { + in, out := &in.TransitGatewayIDRef, &out.TransitGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TransitGatewayIDSelector != nil { + in, out := &in.TransitGatewayIDSelector, &out.TransitGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TransportTransitGatewayAttachmentID != nil { + in, out := &in.TransportTransitGatewayAttachmentID, &out.TransportTransitGatewayAttachmentID *out = new(string) **out = **in } - if in.NATGatewayID != nil { - in, out := &in.NATGatewayID, &out.NATGatewayID + if in.Tunnel1DpdTimeoutAction != nil { + in, out := &in.Tunnel1DpdTimeoutAction, &out.Tunnel1DpdTimeoutAction *out = new(string) **out = **in } - if in.NetworkInterfaceID != nil { - in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID - *out = new(string) + if in.Tunnel1DpdTimeoutSeconds != nil { + in, out := &in.Tunnel1DpdTimeoutSeconds, &out.Tunnel1DpdTimeoutSeconds + *out = new(float64) **out = **in } - if in.Origin != nil { - in, out := &in.Origin, &out.Origin - *out = new(string) + if in.Tunnel1EnableTunnelLifecycleControl != nil { + in, out := &in.Tunnel1EnableTunnelLifecycleControl, &out.Tunnel1EnableTunnelLifecycleControl + *out = new(bool) **out = **in } - if in.RouteTableID != nil { - in, out := &in.RouteTableID, &out.RouteTableID + if in.Tunnel1IkeVersions != nil { + in, out := &in.Tunnel1IkeVersions, &out.Tunnel1IkeVersions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel1InsideCidr != nil { + in, out := &in.Tunnel1InsideCidr, &out.Tunnel1InsideCidr *out = new(string) **out = **in } - if in.State != nil { - in, out := &in.State, &out.State + if in.Tunnel1InsideIPv6Cidr != nil { + in, out := &in.Tunnel1InsideIPv6Cidr, &out.Tunnel1InsideIPv6Cidr *out = new(string) **out = **in } - if in.TransitGatewayID != nil { - in, out := &in.TransitGatewayID, &out.TransitGatewayID - *out = new(string) + if in.Tunnel1LogOptions != nil { + in, out := &in.Tunnel1LogOptions, &out.Tunnel1LogOptions + *out = new(Tunnel1LogOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Tunnel1Phase1DhGroupNumbers != nil { + in, out := &in.Tunnel1Phase1DhGroupNumbers, &out.Tunnel1Phase1DhGroupNumbers + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Tunnel1Phase1EncryptionAlgorithms != nil { + in, out := &in.Tunnel1Phase1EncryptionAlgorithms, &out.Tunnel1Phase1EncryptionAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel1Phase1IntegrityAlgorithms != nil { + in, out := &in.Tunnel1Phase1IntegrityAlgorithms, &out.Tunnel1Phase1IntegrityAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel1Phase1LifetimeSeconds != nil { + in, out := &in.Tunnel1Phase1LifetimeSeconds, &out.Tunnel1Phase1LifetimeSeconds + *out = new(float64) **out = **in } - if in.VPCEndpointID != nil { - in, out := &in.VPCEndpointID, &out.VPCEndpointID - *out = new(string) + if in.Tunnel1Phase2DhGroupNumbers != nil { + in, out := &in.Tunnel1Phase2DhGroupNumbers, &out.Tunnel1Phase2DhGroupNumbers + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Tunnel1Phase2EncryptionAlgorithms != nil { + in, out := &in.Tunnel1Phase2EncryptionAlgorithms, &out.Tunnel1Phase2EncryptionAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel1Phase2IntegrityAlgorithms != nil { + in, out := &in.Tunnel1Phase2IntegrityAlgorithms, &out.Tunnel1Phase2IntegrityAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tunnel1Phase2LifetimeSeconds != nil { + in, out := &in.Tunnel1Phase2LifetimeSeconds, &out.Tunnel1Phase2LifetimeSeconds + *out = new(float64) **out = **in } - if in.VPCPeeringConnectionID != nil { - in, out := &in.VPCPeeringConnectionID, &out.VPCPeeringConnectionID - *out = new(string) + if in.Tunnel1PresharedKeySecretRef != nil { + in, out := &in.Tunnel1PresharedKeySecretRef, &out.Tunnel1PresharedKeySecretRef + *out = new(v1.SecretKeySelector) **out = **in } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteObservation. -func (in *RouteObservation) DeepCopy() *RouteObservation { - if in == nil { - return nil + if in.Tunnel1RekeyFuzzPercentage != nil { + in, out := &in.Tunnel1RekeyFuzzPercentage, &out.Tunnel1RekeyFuzzPercentage + *out = new(float64) + **out = **in } - out := new(RouteObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RouteParameters) DeepCopyInto(out *RouteParameters) { - *out = *in - if in.CarrierGatewayID != nil { - in, out := &in.CarrierGatewayID, &out.CarrierGatewayID - *out = new(string) + if in.Tunnel1RekeyMarginTimeSeconds != nil { + in, out := &in.Tunnel1RekeyMarginTimeSeconds, &out.Tunnel1RekeyMarginTimeSeconds + *out = new(float64) **out = **in } - if in.CoreNetworkArn != nil { - in, out := &in.CoreNetworkArn, &out.CoreNetworkArn - *out = new(string) + if in.Tunnel1ReplayWindowSize != nil { + in, out := &in.Tunnel1ReplayWindowSize, &out.Tunnel1ReplayWindowSize + *out = new(float64) **out = **in } - if in.DestinationCidrBlock != nil { - in, out := &in.DestinationCidrBlock, &out.DestinationCidrBlock + if in.Tunnel1StartupAction != nil { + in, out := &in.Tunnel1StartupAction, &out.Tunnel1StartupAction *out = new(string) **out = **in } - if in.DestinationIPv6CidrBlock != nil { - in, out := &in.DestinationIPv6CidrBlock, &out.DestinationIPv6CidrBlock + if in.Tunnel2DpdTimeoutAction != nil { + in, out := &in.Tunnel2DpdTimeoutAction, &out.Tunnel2DpdTimeoutAction *out = new(string) **out = **in } - if in.DestinationPrefixListID != nil { - in, out := &in.DestinationPrefixListID, &out.DestinationPrefixListID - *out = new(string) + if in.Tunnel2DpdTimeoutSeconds != nil { + in, out := &in.Tunnel2DpdTimeoutSeconds, &out.Tunnel2DpdTimeoutSeconds + *out = new(float64) **out = **in } - if in.DestinationPrefixListIDRef != nil { - in, out := &in.DestinationPrefixListIDRef, &out.DestinationPrefixListIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) + if in.Tunnel2EnableTunnelLifecycleControl != nil { + in, out := &in.Tunnel2EnableTunnelLifecycleControl, &out.Tunnel2EnableTunnelLifecycleControl + *out = new(bool) + **out = **in } - if in.DestinationPrefixListIDSelector != nil { - in, out := &in.DestinationPrefixListIDSelector, &out.DestinationPrefixListIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) + if in.Tunnel2IkeVersions != nil { + in, out := &in.Tunnel2IkeVersions, &out.Tunnel2IkeVersions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } } - if in.EgressOnlyGatewayID != nil { - in, out := &in.EgressOnlyGatewayID, &out.EgressOnlyGatewayID + if in.Tunnel2InsideCidr != nil { + in, out := &in.Tunnel2InsideCidr, &out.Tunnel2InsideCidr *out = new(string) **out = **in } - if in.EgressOnlyGatewayIDRef != nil { - in, out := &in.EgressOnlyGatewayIDRef, &out.EgressOnlyGatewayIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) - } - if in.EgressOnlyGatewayIDSelector != nil { - in, out := &in.EgressOnlyGatewayIDSelector, &out.EgressOnlyGatewayIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } - if in.GatewayID != nil { - in, out := &in.GatewayID, &out.GatewayID + if in.Tunnel2InsideIPv6Cidr != nil { + in, out := &in.Tunnel2InsideIPv6Cidr, &out.Tunnel2InsideIPv6Cidr *out = new(string) **out = **in } - if in.GatewayIDRef != nil { - in, out := &in.GatewayIDRef, &out.GatewayIDRef - *out = new(v1.Reference) + if in.Tunnel2LogOptions != nil { + in, out := &in.Tunnel2LogOptions, &out.Tunnel2LogOptions + *out = new(Tunnel2LogOptionsParameters) (*in).DeepCopyInto(*out) } - if in.GatewayIDSelector != nil { - in, out := &in.GatewayIDSelector, &out.GatewayIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) + if in.Tunnel2Phase1DhGroupNumbers != nil { + in, out := &in.Tunnel2Phase1DhGroupNumbers, &out.Tunnel2Phase1DhGroupNumbers + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } } - if in.LocalGatewayID != nil { - in, out := &in.LocalGatewayID, &out.LocalGatewayID - *out = new(string) - **out = **in + if in.Tunnel2Phase1EncryptionAlgorithms != nil { + in, out := &in.Tunnel2Phase1EncryptionAlgorithms, &out.Tunnel2Phase1EncryptionAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } } - if in.NATGatewayID != nil { - in, out := &in.NATGatewayID, &out.NATGatewayID - *out = new(string) - **out = **in + if in.Tunnel2Phase1IntegrityAlgorithms != nil { + in, out := &in.Tunnel2Phase1IntegrityAlgorithms, &out.Tunnel2Phase1IntegrityAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } } - if in.NATGatewayIDRef != nil { - in, out := &in.NATGatewayIDRef, &out.NATGatewayIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) + if in.Tunnel2Phase1LifetimeSeconds != nil { + in, out := &in.Tunnel2Phase1LifetimeSeconds, &out.Tunnel2Phase1LifetimeSeconds + *out = new(float64) + **out = **in } - if in.NATGatewayIDSelector != nil { - in, out := &in.NATGatewayIDSelector, &out.NATGatewayIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) + if in.Tunnel2Phase2DhGroupNumbers != nil { + in, out := &in.Tunnel2Phase2DhGroupNumbers, &out.Tunnel2Phase2DhGroupNumbers + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } } - if in.NetworkInterfaceID != nil { - in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID - *out = new(string) - **out = **in + if in.Tunnel2Phase2EncryptionAlgorithms != nil { + in, out := &in.Tunnel2Phase2EncryptionAlgorithms, &out.Tunnel2Phase2EncryptionAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } } - if in.NetworkInterfaceIDRef != nil { - in, out := &in.NetworkInterfaceIDRef, &out.NetworkInterfaceIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) + if in.Tunnel2Phase2IntegrityAlgorithms != nil { + in, out := &in.Tunnel2Phase2IntegrityAlgorithms, &out.Tunnel2Phase2IntegrityAlgorithms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } } - if in.NetworkInterfaceIDSelector != nil { - in, out := &in.NetworkInterfaceIDSelector, &out.NetworkInterfaceIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) + if in.Tunnel2Phase2LifetimeSeconds != nil { + in, out := &in.Tunnel2Phase2LifetimeSeconds, &out.Tunnel2Phase2LifetimeSeconds + *out = new(float64) + **out = **in } - if in.Region != nil { - in, out := &in.Region, &out.Region - *out = new(string) + if in.Tunnel2PresharedKeySecretRef != nil { + in, out := &in.Tunnel2PresharedKeySecretRef, &out.Tunnel2PresharedKeySecretRef + *out = new(v1.SecretKeySelector) **out = **in } - if in.RouteTableID != nil { - in, out := &in.RouteTableID, &out.RouteTableID - *out = new(string) + if in.Tunnel2RekeyFuzzPercentage != nil { + in, out := &in.Tunnel2RekeyFuzzPercentage, &out.Tunnel2RekeyFuzzPercentage + *out = new(float64) **out = **in } - if in.RouteTableIDRef != nil { - in, out := &in.RouteTableIDRef, &out.RouteTableIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) + if in.Tunnel2RekeyMarginTimeSeconds != nil { + in, out := &in.Tunnel2RekeyMarginTimeSeconds, &out.Tunnel2RekeyMarginTimeSeconds + *out = new(float64) + **out = **in } - if in.RouteTableIDSelector != nil { - in, out := &in.RouteTableIDSelector, &out.RouteTableIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) + if in.Tunnel2ReplayWindowSize != nil { + in, out := &in.Tunnel2ReplayWindowSize, &out.Tunnel2ReplayWindowSize + *out = new(float64) + **out = **in } - if in.TransitGatewayID != nil { - in, out := &in.TransitGatewayID, &out.TransitGatewayID + if in.Tunnel2StartupAction != nil { + in, out := &in.Tunnel2StartupAction, &out.Tunnel2StartupAction *out = new(string) **out = **in } - if in.TransitGatewayIDRef != nil { - in, out := &in.TransitGatewayIDRef, &out.TransitGatewayIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) - } - if in.TransitGatewayIDSelector != nil { - in, out := &in.TransitGatewayIDSelector, &out.TransitGatewayIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) + if in.TunnelInsideIPVersion != nil { + in, out := &in.TunnelInsideIPVersion, &out.TunnelInsideIPVersion + *out = new(string) + **out = **in } - if in.VPCEndpointID != nil { - in, out := &in.VPCEndpointID, &out.VPCEndpointID + if in.Type != nil { + in, out := &in.Type, &out.Type *out = new(string) **out = **in } - if in.VPCEndpointIDRef != nil { - in, out := &in.VPCEndpointIDRef, &out.VPCEndpointIDRef + if in.TypeRef != nil { + in, out := &in.TypeRef, &out.TypeRef *out = new(v1.Reference) (*in).DeepCopyInto(*out) } - if in.VPCEndpointIDSelector != nil { - in, out := &in.VPCEndpointIDSelector, &out.VPCEndpointIDSelector + if in.TypeSelector != nil { + in, out := &in.TypeSelector, &out.TypeSelector *out = new(v1.Selector) (*in).DeepCopyInto(*out) } - if in.VPCPeeringConnectionID != nil { - in, out := &in.VPCPeeringConnectionID, &out.VPCPeeringConnectionID + if in.VPNGatewayID != nil { + in, out := &in.VPNGatewayID, &out.VPNGatewayID *out = new(string) **out = **in } - if in.VPCPeeringConnectionIDRef != nil { - in, out := &in.VPCPeeringConnectionIDRef, &out.VPCPeeringConnectionIDRef + if in.VPNGatewayIDRef != nil { + in, out := &in.VPNGatewayIDRef, &out.VPNGatewayIDRef *out = new(v1.Reference) (*in).DeepCopyInto(*out) } - if in.VPCPeeringConnectionIDSelector != nil { - in, out := &in.VPCPeeringConnectionIDSelector, &out.VPCPeeringConnectionIDSelector + if in.VPNGatewayIDSelector != nil { + in, out := &in.VPNGatewayIDSelector, &out.VPNGatewayIDSelector *out = new(v1.Selector) (*in).DeepCopyInto(*out) } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteParameters. -func (in *RouteParameters) DeepCopy() *RouteParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNConnectionParameters. +func (in *VPNConnectionParameters) DeepCopy() *VPNConnectionParameters { if in == nil { return nil } - out := new(RouteParameters) + out := new(VPNConnectionParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RouteSpec) DeepCopyInto(out *RouteSpec) { +func (in *VPNConnectionSpec) DeepCopyInto(out *VPNConnectionSpec) { *out = *in in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) in.ForProvider.DeepCopyInto(&out.ForProvider) in.InitProvider.DeepCopyInto(&out.InitProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteSpec. -func (in *RouteSpec) DeepCopy() *RouteSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNConnectionSpec. +func (in *VPNConnectionSpec) DeepCopy() *VPNConnectionSpec { if in == nil { return nil } - out := new(RouteSpec) + out := new(VPNConnectionSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RouteStatus) DeepCopyInto(out *RouteStatus) { +func (in *VPNConnectionStatus) DeepCopyInto(out *VPNConnectionStatus) { *out = *in in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) in.AtProvider.DeepCopyInto(&out.AtProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteStatus. -func (in *RouteStatus) DeepCopy() *RouteStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNConnectionStatus. +func (in *VPNConnectionStatus) DeepCopy() *VPNConnectionStatus { if in == nil { return nil } - out := new(RouteStatus) + out := new(VPNConnectionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VcpuCountInitParameters) DeepCopyInto(out *VcpuCountInitParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VcpuCountInitParameters. +func (in *VcpuCountInitParameters) DeepCopy() *VcpuCountInitParameters { + if in == nil { + return nil + } + out := new(VcpuCountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VcpuCountObservation) DeepCopyInto(out *VcpuCountObservation) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VcpuCountObservation. +func (in *VcpuCountObservation) DeepCopy() *VcpuCountObservation { + if in == nil { + return nil + } + out := new(VcpuCountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VcpuCountParameters) DeepCopyInto(out *VcpuCountParameters) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VcpuCountParameters. +func (in *VcpuCountParameters) DeepCopy() *VcpuCountParameters { + if in == nil { + return nil + } + out := new(VcpuCountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VgwTelemetryInitParameters) DeepCopyInto(out *VgwTelemetryInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VgwTelemetryInitParameters. +func (in *VgwTelemetryInitParameters) DeepCopy() *VgwTelemetryInitParameters { + if in == nil { + return nil + } + out := new(VgwTelemetryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VgwTelemetryObservation) DeepCopyInto(out *VgwTelemetryObservation) { + *out = *in + if in.AcceptedRouteCount != nil { + in, out := &in.AcceptedRouteCount, &out.AcceptedRouteCount + *out = new(float64) + **out = **in + } + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } + if in.LastStatusChange != nil { + in, out := &in.LastStatusChange, &out.LastStatusChange + *out = new(string) + **out = **in + } + if in.OutsideIPAddress != nil { + in, out := &in.OutsideIPAddress, &out.OutsideIPAddress + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.StatusMessage != nil { + in, out := &in.StatusMessage, &out.StatusMessage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VgwTelemetryObservation. +func (in *VgwTelemetryObservation) DeepCopy() *VgwTelemetryObservation { + if in == nil { + return nil + } + out := new(VgwTelemetryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VgwTelemetryParameters) DeepCopyInto(out *VgwTelemetryParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VgwTelemetryParameters. +func (in *VgwTelemetryParameters) DeepCopy() *VgwTelemetryParameters { + if in == nil { + return nil + } + out := new(VgwTelemetryParameters) in.DeepCopyInto(out) return out } diff --git a/apis/ec2/v1beta2/zz_generated.managed.go b/apis/ec2/v1beta2/zz_generated.managed.go index 5f6fd51d7b..a9b43d7cd9 100644 --- a/apis/ec2/v1beta2/zz_generated.managed.go +++ b/apis/ec2/v1beta2/zz_generated.managed.go @@ -7,6 +7,246 @@ package v1beta2 import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +// GetCondition of this EBSSnapshotImport. +func (mg *EBSSnapshotImport) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this EBSSnapshotImport. +func (mg *EBSSnapshotImport) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this EBSSnapshotImport. +func (mg *EBSSnapshotImport) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this EBSSnapshotImport. +func (mg *EBSSnapshotImport) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this EBSSnapshotImport. +func (mg *EBSSnapshotImport) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this EBSSnapshotImport. +func (mg *EBSSnapshotImport) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this EBSSnapshotImport. +func (mg *EBSSnapshotImport) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this EBSSnapshotImport. +func (mg *EBSSnapshotImport) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this EBSSnapshotImport. +func (mg *EBSSnapshotImport) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this EBSSnapshotImport. +func (mg *EBSSnapshotImport) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this EBSSnapshotImport. +func (mg *EBSSnapshotImport) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this EBSSnapshotImport. +func (mg *EBSSnapshotImport) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FlowLog. +func (mg *FlowLog) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FlowLog. +func (mg *FlowLog) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FlowLog. +func (mg *FlowLog) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FlowLog. +func (mg *FlowLog) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FlowLog. +func (mg *FlowLog) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FlowLog. +func (mg *FlowLog) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FlowLog. +func (mg *FlowLog) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FlowLog. +func (mg *FlowLog) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FlowLog. +func (mg *FlowLog) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FlowLog. +func (mg *FlowLog) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FlowLog. +func (mg *FlowLog) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FlowLog. +func (mg *FlowLog) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Instance. +func (mg *Instance) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Instance. +func (mg *Instance) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Instance. +func (mg *Instance) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Instance. +func (mg *Instance) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Instance. +func (mg *Instance) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Instance. +func (mg *Instance) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Instance. +func (mg *Instance) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Instance. +func (mg *Instance) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Instance. +func (mg *Instance) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Instance. +func (mg *Instance) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Instance. +func (mg *Instance) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Instance. +func (mg *Instance) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LaunchTemplate. +func (mg *LaunchTemplate) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LaunchTemplate. +func (mg *LaunchTemplate) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LaunchTemplate. +func (mg *LaunchTemplate) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LaunchTemplate. +func (mg *LaunchTemplate) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LaunchTemplate. +func (mg *LaunchTemplate) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LaunchTemplate. +func (mg *LaunchTemplate) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LaunchTemplate. +func (mg *LaunchTemplate) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LaunchTemplate. +func (mg *LaunchTemplate) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LaunchTemplate. +func (mg *LaunchTemplate) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LaunchTemplate. +func (mg *LaunchTemplate) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LaunchTemplate. +func (mg *LaunchTemplate) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LaunchTemplate. +func (mg *LaunchTemplate) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this Route. func (mg *Route) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) @@ -66,3 +306,543 @@ func (mg *Route) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsT func (mg *Route) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { mg.Spec.WriteConnectionSecretToReference = r } + +// GetCondition of this SpotFleetRequest. +func (mg *SpotFleetRequest) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SpotFleetRequest. +func (mg *SpotFleetRequest) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SpotFleetRequest. +func (mg *SpotFleetRequest) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SpotFleetRequest. +func (mg *SpotFleetRequest) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SpotFleetRequest. +func (mg *SpotFleetRequest) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SpotFleetRequest. +func (mg *SpotFleetRequest) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SpotFleetRequest. +func (mg *SpotFleetRequest) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SpotFleetRequest. +func (mg *SpotFleetRequest) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SpotFleetRequest. +func (mg *SpotFleetRequest) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SpotFleetRequest. +func (mg *SpotFleetRequest) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SpotFleetRequest. +func (mg *SpotFleetRequest) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SpotFleetRequest. +func (mg *SpotFleetRequest) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SpotInstanceRequest. +func (mg *SpotInstanceRequest) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SpotInstanceRequest. +func (mg *SpotInstanceRequest) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SpotInstanceRequest. +func (mg *SpotInstanceRequest) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SpotInstanceRequest. +func (mg *SpotInstanceRequest) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SpotInstanceRequest. +func (mg *SpotInstanceRequest) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SpotInstanceRequest. +func (mg *SpotInstanceRequest) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SpotInstanceRequest. +func (mg *SpotInstanceRequest) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SpotInstanceRequest. +func (mg *SpotInstanceRequest) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SpotInstanceRequest. +func (mg *SpotInstanceRequest) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SpotInstanceRequest. +func (mg *SpotInstanceRequest) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SpotInstanceRequest. +func (mg *SpotInstanceRequest) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SpotInstanceRequest. +func (mg *SpotInstanceRequest) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this TrafficMirrorFilterRule. +func (mg *TrafficMirrorFilterRule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this TrafficMirrorFilterRule. +func (mg *TrafficMirrorFilterRule) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this TrafficMirrorFilterRule. +func (mg *TrafficMirrorFilterRule) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this TrafficMirrorFilterRule. +func (mg *TrafficMirrorFilterRule) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this TrafficMirrorFilterRule. +func (mg *TrafficMirrorFilterRule) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this TrafficMirrorFilterRule. +func (mg *TrafficMirrorFilterRule) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this TrafficMirrorFilterRule. +func (mg *TrafficMirrorFilterRule) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this TrafficMirrorFilterRule. +func (mg *TrafficMirrorFilterRule) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this TrafficMirrorFilterRule. +func (mg *TrafficMirrorFilterRule) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this TrafficMirrorFilterRule. +func (mg *TrafficMirrorFilterRule) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this TrafficMirrorFilterRule. +func (mg *TrafficMirrorFilterRule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this TrafficMirrorFilterRule. +func (mg *TrafficMirrorFilterRule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VPCEndpoint. +func (mg *VPCEndpoint) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VPCEndpoint. +func (mg *VPCEndpoint) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VPCEndpoint. +func (mg *VPCEndpoint) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VPCEndpoint. +func (mg *VPCEndpoint) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VPCEndpoint. +func (mg *VPCEndpoint) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VPCEndpoint. +func (mg *VPCEndpoint) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VPCEndpoint. +func (mg *VPCEndpoint) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VPCEndpoint. +func (mg *VPCEndpoint) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VPCEndpoint. +func (mg *VPCEndpoint) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VPCEndpoint. +func (mg *VPCEndpoint) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VPCEndpoint. +func (mg *VPCEndpoint) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VPCEndpoint. +func (mg *VPCEndpoint) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VPCIpamPoolCidr. +func (mg *VPCIpamPoolCidr) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VPCIpamPoolCidr. +func (mg *VPCIpamPoolCidr) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VPCIpamPoolCidr. +func (mg *VPCIpamPoolCidr) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VPCIpamPoolCidr. +func (mg *VPCIpamPoolCidr) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VPCIpamPoolCidr. +func (mg *VPCIpamPoolCidr) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VPCIpamPoolCidr. +func (mg *VPCIpamPoolCidr) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VPCIpamPoolCidr. +func (mg *VPCIpamPoolCidr) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VPCIpamPoolCidr. +func (mg *VPCIpamPoolCidr) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VPCIpamPoolCidr. +func (mg *VPCIpamPoolCidr) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VPCIpamPoolCidr. +func (mg *VPCIpamPoolCidr) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VPCIpamPoolCidr. +func (mg *VPCIpamPoolCidr) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VPCIpamPoolCidr. +func (mg *VPCIpamPoolCidr) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VPCPeeringConnection. +func (mg *VPCPeeringConnection) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VPCPeeringConnection. +func (mg *VPCPeeringConnection) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VPCPeeringConnection. +func (mg *VPCPeeringConnection) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VPCPeeringConnection. +func (mg *VPCPeeringConnection) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VPCPeeringConnection. +func (mg *VPCPeeringConnection) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VPCPeeringConnection. +func (mg *VPCPeeringConnection) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VPCPeeringConnection. +func (mg *VPCPeeringConnection) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VPCPeeringConnection. +func (mg *VPCPeeringConnection) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VPCPeeringConnection. +func (mg *VPCPeeringConnection) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VPCPeeringConnection. +func (mg *VPCPeeringConnection) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VPCPeeringConnection. +func (mg *VPCPeeringConnection) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VPCPeeringConnection. +func (mg *VPCPeeringConnection) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VPCPeeringConnectionAccepter. +func (mg *VPCPeeringConnectionAccepter) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VPCPeeringConnectionAccepter. +func (mg *VPCPeeringConnectionAccepter) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VPCPeeringConnectionAccepter. +func (mg *VPCPeeringConnectionAccepter) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VPCPeeringConnectionAccepter. +func (mg *VPCPeeringConnectionAccepter) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VPCPeeringConnectionAccepter. +func (mg *VPCPeeringConnectionAccepter) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VPCPeeringConnectionAccepter. +func (mg *VPCPeeringConnectionAccepter) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VPCPeeringConnectionAccepter. +func (mg *VPCPeeringConnectionAccepter) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VPCPeeringConnectionAccepter. +func (mg *VPCPeeringConnectionAccepter) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VPCPeeringConnectionAccepter. +func (mg *VPCPeeringConnectionAccepter) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VPCPeeringConnectionAccepter. +func (mg *VPCPeeringConnectionAccepter) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VPCPeeringConnectionAccepter. +func (mg *VPCPeeringConnectionAccepter) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VPCPeeringConnectionAccepter. +func (mg *VPCPeeringConnectionAccepter) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VPCPeeringConnectionOptions. +func (mg *VPCPeeringConnectionOptions) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VPCPeeringConnectionOptions. +func (mg *VPCPeeringConnectionOptions) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VPCPeeringConnectionOptions. +func (mg *VPCPeeringConnectionOptions) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VPCPeeringConnectionOptions. +func (mg *VPCPeeringConnectionOptions) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VPCPeeringConnectionOptions. +func (mg *VPCPeeringConnectionOptions) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VPCPeeringConnectionOptions. +func (mg *VPCPeeringConnectionOptions) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VPCPeeringConnectionOptions. +func (mg *VPCPeeringConnectionOptions) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VPCPeeringConnectionOptions. +func (mg *VPCPeeringConnectionOptions) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VPCPeeringConnectionOptions. +func (mg *VPCPeeringConnectionOptions) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VPCPeeringConnectionOptions. +func (mg *VPCPeeringConnectionOptions) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VPCPeeringConnectionOptions. +func (mg *VPCPeeringConnectionOptions) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VPCPeeringConnectionOptions. +func (mg *VPCPeeringConnectionOptions) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VPNConnection. +func (mg *VPNConnection) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VPNConnection. +func (mg *VPNConnection) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VPNConnection. +func (mg *VPNConnection) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VPNConnection. +func (mg *VPNConnection) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VPNConnection. +func (mg *VPNConnection) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VPNConnection. +func (mg *VPNConnection) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VPNConnection. +func (mg *VPNConnection) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VPNConnection. +func (mg *VPNConnection) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VPNConnection. +func (mg *VPNConnection) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VPNConnection. +func (mg *VPNConnection) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VPNConnection. +func (mg *VPNConnection) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VPNConnection. +func (mg *VPNConnection) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/ec2/v1beta2/zz_generated.managedlist.go b/apis/ec2/v1beta2/zz_generated.managedlist.go index e0c3e97f50..f16489c51f 100644 --- a/apis/ec2/v1beta2/zz_generated.managedlist.go +++ b/apis/ec2/v1beta2/zz_generated.managedlist.go @@ -7,6 +7,42 @@ package v1beta2 import resource "github.com/crossplane/crossplane-runtime/pkg/resource" +// GetItems of this EBSSnapshotImportList. +func (l *EBSSnapshotImportList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FlowLogList. +func (l *FlowLogList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this InstanceList. +func (l *InstanceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LaunchTemplateList. +func (l *LaunchTemplateList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this RouteList. func (l *RouteList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) @@ -15,3 +51,84 @@ func (l *RouteList) GetItems() []resource.Managed { } return items } + +// GetItems of this SpotFleetRequestList. +func (l *SpotFleetRequestList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SpotInstanceRequestList. +func (l *SpotInstanceRequestList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TrafficMirrorFilterRuleList. +func (l *TrafficMirrorFilterRuleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VPCEndpointList. +func (l *VPCEndpointList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VPCIpamPoolCidrList. +func (l *VPCIpamPoolCidrList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VPCPeeringConnectionAccepterList. +func (l *VPCPeeringConnectionAccepterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VPCPeeringConnectionList. +func (l *VPCPeeringConnectionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VPCPeeringConnectionOptionsList. +func (l *VPCPeeringConnectionOptionsList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VPNConnectionList. +func (l *VPNConnectionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/ec2/v1beta2/zz_generated.resolvers.go b/apis/ec2/v1beta2/zz_generated.resolvers.go index 91b95e20a8..99a7d5fe28 100644 --- a/apis/ec2/v1beta2/zz_generated.resolvers.go +++ b/apis/ec2/v1beta2/zz_generated.resolvers.go @@ -13,13 +13,1427 @@ import ( errors "github.com/pkg/errors" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *EBSSnapshotImport) ResolveReferences( // ResolveReferences of this EBSSnapshotImport. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyID") + } + mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyID") + } + mg.Spec.InitProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this FlowLog. +func (mg *FlowLog) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IAMRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.IAMRoleArnRef, + Selector: mg.Spec.ForProvider.IAMRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IAMRoleArn") + } + mg.Spec.ForProvider.IAMRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IAMRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Group", "GroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LogDestination), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.LogDestinationRef, + Selector: mg.Spec.ForProvider.LogDestinationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LogDestination") + } + mg.Spec.ForProvider.LogDestination = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LogDestinationRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.SubnetIDRef, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetID") + } + mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.VPCIDRef, + Selector: mg.Spec.ForProvider.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCID") + } + mg.Spec.ForProvider.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPCIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IAMRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.IAMRoleArnRef, + Selector: mg.Spec.InitProvider.IAMRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IAMRoleArn") + } + mg.Spec.InitProvider.IAMRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IAMRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Group", "GroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LogDestination), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.LogDestinationRef, + Selector: mg.Spec.InitProvider.LogDestinationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LogDestination") + } + mg.Spec.InitProvider.LogDestination = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LogDestinationRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.SubnetIDRef, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetID") + } + mg.Spec.InitProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.VPCIDRef, + Selector: mg.Spec.InitProvider.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCID") + } + mg.Spec.InitProvider.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPCIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Instance. +func (mg *Instance) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.EBSBlockDevice); i3++ { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EBSBlockDevice[i3].KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.EBSBlockDevice[i3].KMSKeyIDRef, + Selector: mg.Spec.ForProvider.EBSBlockDevice[i3].KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EBSBlockDevice[i3].KMSKeyID") + } + mg.Spec.ForProvider.EBSBlockDevice[i3].KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EBSBlockDevice[i3].KMSKeyIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.NetworkInterface); i3++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "NetworkInterface", "NetworkInterfaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkInterface[i3].NetworkInterfaceID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkInterface[i3].NetworkInterfaceIDRef, + Selector: mg.Spec.ForProvider.NetworkInterface[i3].NetworkInterfaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkInterface[i3].NetworkInterfaceID") + } + mg.Spec.ForProvider.NetworkInterface[i3].NetworkInterfaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkInterface[i3].NetworkInterfaceIDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.RootBlockDevice != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RootBlockDevice.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.RootBlockDevice.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.RootBlockDevice.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RootBlockDevice.KMSKeyID") + } + mg.Spec.ForProvider.RootBlockDevice.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RootBlockDevice.KMSKeyIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.SubnetIDRef, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetID") + } + mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.VPCSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCSecurityGroupIds") + } + mg.Spec.ForProvider.VPCSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCSecurityGroupIDRefs = mrsp.ResolvedReferences + + for i3 := 0; i3 < len(mg.Spec.InitProvider.EBSBlockDevice); i3++ { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EBSBlockDevice[i3].KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.EBSBlockDevice[i3].KMSKeyIDRef, + Selector: mg.Spec.InitProvider.EBSBlockDevice[i3].KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EBSBlockDevice[i3].KMSKeyID") + } + mg.Spec.InitProvider.EBSBlockDevice[i3].KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EBSBlockDevice[i3].KMSKeyIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.NetworkInterface); i3++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "NetworkInterface", "NetworkInterfaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkInterface[i3].NetworkInterfaceID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkInterface[i3].NetworkInterfaceIDRef, + Selector: mg.Spec.InitProvider.NetworkInterface[i3].NetworkInterfaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkInterface[i3].NetworkInterfaceID") + } + mg.Spec.InitProvider.NetworkInterface[i3].NetworkInterfaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkInterface[i3].NetworkInterfaceIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.RootBlockDevice != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RootBlockDevice.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.RootBlockDevice.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.RootBlockDevice.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RootBlockDevice.KMSKeyID") + } + mg.Spec.InitProvider.RootBlockDevice.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RootBlockDevice.KMSKeyIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.SubnetIDRef, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetID") + } + mg.Spec.InitProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.VPCSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCSecurityGroupIds") + } + mg.Spec.InitProvider.VPCSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCSecurityGroupIDRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this LaunchTemplate. +func (mg *LaunchTemplate) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.BlockDeviceMappings); i3++ { + if mg.Spec.ForProvider.BlockDeviceMappings[i3].EBS != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.BlockDeviceMappings[i3].EBS.KMSKeyID), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.BlockDeviceMappings[i3].EBS.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.BlockDeviceMappings[i3].EBS.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.BlockDeviceMappings[i3].EBS.KMSKeyID") + } + mg.Spec.ForProvider.BlockDeviceMappings[i3].EBS.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BlockDeviceMappings[i3].EBS.KMSKeyIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.IAMInstanceProfile != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "InstanceProfile", "InstanceProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IAMInstanceProfile.Arn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.IAMInstanceProfile.ArnRef, + Selector: mg.Spec.ForProvider.IAMInstanceProfile.ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IAMInstanceProfile.Arn") + } + mg.Spec.ForProvider.IAMInstanceProfile.Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IAMInstanceProfile.ArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.IAMInstanceProfile != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "InstanceProfile", "InstanceProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IAMInstanceProfile.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.IAMInstanceProfile.NameRef, + Selector: mg.Spec.ForProvider.IAMInstanceProfile.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IAMInstanceProfile.Name") + } + mg.Spec.ForProvider.IAMInstanceProfile.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IAMInstanceProfile.NameRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.NetworkInterfaces); i3++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "NetworkInterface", "NetworkInterfaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkInterfaces[i3].NetworkInterfaceID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkInterfaces[i3].NetworkInterfaceIDRef, + Selector: mg.Spec.ForProvider.NetworkInterfaces[i3].NetworkInterfaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkInterfaces[i3].NetworkInterfaceID") + } + mg.Spec.ForProvider.NetworkInterfaces[i3].NetworkInterfaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkInterfaces[i3].NetworkInterfaceIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.NetworkInterfaces); i3++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.NetworkInterfaces[i3].SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.NetworkInterfaces[i3].SecurityGroupRefs, + Selector: mg.Spec.ForProvider.NetworkInterfaces[i3].SecurityGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkInterfaces[i3].SecurityGroups") + } + mg.Spec.ForProvider.NetworkInterfaces[i3].SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.NetworkInterfaces[i3].SecurityGroupRefs = mrsp.ResolvedReferences + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.NetworkInterfaces); i3++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkInterfaces[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkInterfaces[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.NetworkInterfaces[i3].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkInterfaces[i3].SubnetID") + } + mg.Spec.ForProvider.NetworkInterfaces[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkInterfaces[i3].SubnetIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroupNames), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupNameRefs, + Selector: mg.Spec.ForProvider.SecurityGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroupNames") + } + mg.Spec.ForProvider.SecurityGroupNames = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupNameRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.VPCSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCSecurityGroupIds") + } + mg.Spec.ForProvider.VPCSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCSecurityGroupIDRefs = mrsp.ResolvedReferences + + for i3 := 0; i3 < len(mg.Spec.InitProvider.BlockDeviceMappings); i3++ { + if mg.Spec.InitProvider.BlockDeviceMappings[i3].EBS != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.BlockDeviceMappings[i3].EBS.KMSKeyID), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.BlockDeviceMappings[i3].EBS.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.BlockDeviceMappings[i3].EBS.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.BlockDeviceMappings[i3].EBS.KMSKeyID") + } + mg.Spec.InitProvider.BlockDeviceMappings[i3].EBS.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BlockDeviceMappings[i3].EBS.KMSKeyIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.IAMInstanceProfile != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "InstanceProfile", "InstanceProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IAMInstanceProfile.Arn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.IAMInstanceProfile.ArnRef, + Selector: mg.Spec.InitProvider.IAMInstanceProfile.ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IAMInstanceProfile.Arn") + } + mg.Spec.InitProvider.IAMInstanceProfile.Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IAMInstanceProfile.ArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.IAMInstanceProfile != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "InstanceProfile", "InstanceProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IAMInstanceProfile.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.IAMInstanceProfile.NameRef, + Selector: mg.Spec.InitProvider.IAMInstanceProfile.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IAMInstanceProfile.Name") + } + mg.Spec.InitProvider.IAMInstanceProfile.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IAMInstanceProfile.NameRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.NetworkInterfaces); i3++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "NetworkInterface", "NetworkInterfaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkInterfaces[i3].NetworkInterfaceID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkInterfaces[i3].NetworkInterfaceIDRef, + Selector: mg.Spec.InitProvider.NetworkInterfaces[i3].NetworkInterfaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkInterfaces[i3].NetworkInterfaceID") + } + mg.Spec.InitProvider.NetworkInterfaces[i3].NetworkInterfaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkInterfaces[i3].NetworkInterfaceIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.NetworkInterfaces); i3++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.NetworkInterfaces[i3].SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.NetworkInterfaces[i3].SecurityGroupRefs, + Selector: mg.Spec.InitProvider.NetworkInterfaces[i3].SecurityGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkInterfaces[i3].SecurityGroups") + } + mg.Spec.InitProvider.NetworkInterfaces[i3].SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.NetworkInterfaces[i3].SecurityGroupRefs = mrsp.ResolvedReferences + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.NetworkInterfaces); i3++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkInterfaces[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkInterfaces[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.NetworkInterfaces[i3].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkInterfaces[i3].SubnetID") + } + mg.Spec.InitProvider.NetworkInterfaces[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkInterfaces[i3].SubnetIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroupNames), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupNameRefs, + Selector: mg.Spec.InitProvider.SecurityGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroupNames") + } + mg.Spec.InitProvider.SecurityGroupNames = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupNameRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.VPCSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCSecurityGroupIds") + } + mg.Spec.InitProvider.VPCSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCSecurityGroupIDRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this Route. +func (mg *Route) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "ManagedPrefixList", "ManagedPrefixListList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DestinationPrefixListID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DestinationPrefixListIDRef, + Selector: mg.Spec.ForProvider.DestinationPrefixListIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DestinationPrefixListID") + } + mg.Spec.ForProvider.DestinationPrefixListID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DestinationPrefixListIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "EgressOnlyInternetGateway", "EgressOnlyInternetGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EgressOnlyGatewayID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.EgressOnlyGatewayIDRef, + Selector: mg.Spec.ForProvider.EgressOnlyGatewayIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EgressOnlyGatewayID") + } + mg.Spec.ForProvider.EgressOnlyGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EgressOnlyGatewayIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "InternetGateway", "InternetGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.GatewayID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.GatewayIDRef, + Selector: mg.Spec.ForProvider.GatewayIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.GatewayID") + } + mg.Spec.ForProvider.GatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.GatewayIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "NATGateway", "NATGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NATGatewayID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NATGatewayIDRef, + Selector: mg.Spec.ForProvider.NATGatewayIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NATGatewayID") + } + mg.Spec.ForProvider.NATGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NATGatewayIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "NetworkInterface", "NetworkInterfaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkInterfaceID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkInterfaceIDRef, + Selector: mg.Spec.ForProvider.NetworkInterfaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkInterfaceID") + } + mg.Spec.ForProvider.NetworkInterfaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkInterfaceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "RouteTable", "RouteTableList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RouteTableID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.RouteTableIDRef, + Selector: mg.Spec.ForProvider.RouteTableIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RouteTableID") + } + mg.Spec.ForProvider.RouteTableID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RouteTableIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "TransitGateway", "TransitGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TransitGatewayID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.TransitGatewayIDRef, + Selector: mg.Spec.ForProvider.TransitGatewayIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TransitGatewayID") + } + mg.Spec.ForProvider.TransitGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TransitGatewayIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "VPCEndpoint", "VPCEndpointList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCEndpointID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.VPCEndpointIDRef, + Selector: mg.Spec.ForProvider.VPCEndpointIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCEndpointID") + } + mg.Spec.ForProvider.VPCEndpointID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPCEndpointIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "VPCPeeringConnection", "VPCPeeringConnectionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCPeeringConnectionID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.VPCPeeringConnectionIDRef, + Selector: mg.Spec.ForProvider.VPCPeeringConnectionIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCPeeringConnectionID") + } + mg.Spec.ForProvider.VPCPeeringConnectionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPCPeeringConnectionIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "ManagedPrefixList", "ManagedPrefixListList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DestinationPrefixListID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DestinationPrefixListIDRef, + Selector: mg.Spec.InitProvider.DestinationPrefixListIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DestinationPrefixListID") + } + mg.Spec.InitProvider.DestinationPrefixListID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DestinationPrefixListIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "EgressOnlyInternetGateway", "EgressOnlyInternetGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EgressOnlyGatewayID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.EgressOnlyGatewayIDRef, + Selector: mg.Spec.InitProvider.EgressOnlyGatewayIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EgressOnlyGatewayID") + } + mg.Spec.InitProvider.EgressOnlyGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EgressOnlyGatewayIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "InternetGateway", "InternetGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.GatewayID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.GatewayIDRef, + Selector: mg.Spec.InitProvider.GatewayIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.GatewayID") + } + mg.Spec.InitProvider.GatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.GatewayIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "NATGateway", "NATGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NATGatewayID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NATGatewayIDRef, + Selector: mg.Spec.InitProvider.NATGatewayIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NATGatewayID") + } + mg.Spec.InitProvider.NATGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NATGatewayIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "NetworkInterface", "NetworkInterfaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkInterfaceID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NetworkInterfaceIDRef, + Selector: mg.Spec.InitProvider.NetworkInterfaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkInterfaceID") + } + mg.Spec.InitProvider.NetworkInterfaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkInterfaceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "RouteTable", "RouteTableList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RouteTableID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.RouteTableIDRef, + Selector: mg.Spec.InitProvider.RouteTableIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RouteTableID") + } + mg.Spec.InitProvider.RouteTableID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RouteTableIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "TransitGateway", "TransitGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TransitGatewayID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.TransitGatewayIDRef, + Selector: mg.Spec.InitProvider.TransitGatewayIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TransitGatewayID") + } + mg.Spec.InitProvider.TransitGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TransitGatewayIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "VPCEndpoint", "VPCEndpointList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCEndpointID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.VPCEndpointIDRef, + Selector: mg.Spec.InitProvider.VPCEndpointIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCEndpointID") + } + mg.Spec.InitProvider.VPCEndpointID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPCEndpointIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "VPCPeeringConnection", "VPCPeeringConnectionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCPeeringConnectionID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.VPCPeeringConnectionIDRef, + Selector: mg.Spec.InitProvider.VPCPeeringConnectionIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCPeeringConnectionID") + } + mg.Spec.InitProvider.VPCPeeringConnectionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPCPeeringConnectionIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SpotFleetRequest. +func (mg *SpotFleetRequest) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.LaunchSpecification); i3++ { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "InstanceProfile", "InstanceProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LaunchSpecification[i3].IAMInstanceProfileArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.LaunchSpecification[i3].IAMInstanceProfileArnRef, + Selector: mg.Spec.ForProvider.LaunchSpecification[i3].IAMInstanceProfileArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LaunchSpecification[i3].IAMInstanceProfileArn") + } + mg.Spec.ForProvider.LaunchSpecification[i3].IAMInstanceProfileArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LaunchSpecification[i3].IAMInstanceProfileArnRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.LaunchTemplateConfig); i3++ { + if mg.Spec.ForProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "LaunchTemplate", "LaunchTemplateList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.ID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.IDRef, + Selector: mg.Spec.ForProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.IDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.ID") + } + mg.Spec.ForProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.IDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.LaunchTemplateConfig); i3++ { + if mg.Spec.ForProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "LaunchTemplate", "LaunchTemplateList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.Version), + Extract: resource.ExtractParamPath("latest_version", true), + Reference: mg.Spec.ForProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.VersionRef, + Selector: mg.Spec.ForProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.VersionSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.Version") + } + mg.Spec.ForProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.Version = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.VersionRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.LaunchSpecification); i3++ { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "InstanceProfile", "InstanceProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LaunchSpecification[i3].IAMInstanceProfileArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.LaunchSpecification[i3].IAMInstanceProfileArnRef, + Selector: mg.Spec.InitProvider.LaunchSpecification[i3].IAMInstanceProfileArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LaunchSpecification[i3].IAMInstanceProfileArn") + } + mg.Spec.InitProvider.LaunchSpecification[i3].IAMInstanceProfileArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LaunchSpecification[i3].IAMInstanceProfileArnRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.LaunchTemplateConfig); i3++ { + if mg.Spec.InitProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "LaunchTemplate", "LaunchTemplateList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.ID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.IDRef, + Selector: mg.Spec.InitProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.IDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.ID") + } + mg.Spec.InitProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.IDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.LaunchTemplateConfig); i3++ { + if mg.Spec.InitProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "LaunchTemplate", "LaunchTemplateList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.Version), + Extract: resource.ExtractParamPath("latest_version", true), + Reference: mg.Spec.InitProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.VersionRef, + Selector: mg.Spec.InitProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.VersionSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.Version") + } + mg.Spec.InitProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.Version = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LaunchTemplateConfig[i3].LaunchTemplateSpecification.VersionRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this SpotInstanceRequest. +func (mg *SpotInstanceRequest) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.SubnetIDRef, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetID") + } + mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.VPCSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCSecurityGroupIds") + } + mg.Spec.ForProvider.VPCSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.SubnetIDRef, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetID") + } + mg.Spec.InitProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.VPCSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCSecurityGroupIds") + } + mg.Spec.InitProvider.VPCSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCSecurityGroupIDRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this TrafficMirrorFilterRule. +func (mg *TrafficMirrorFilterRule) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "TrafficMirrorFilter", "TrafficMirrorFilterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TrafficMirrorFilterID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.TrafficMirrorFilterIDRef, + Selector: mg.Spec.ForProvider.TrafficMirrorFilterIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TrafficMirrorFilterID") + } + mg.Spec.ForProvider.TrafficMirrorFilterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TrafficMirrorFilterIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "TrafficMirrorFilter", "TrafficMirrorFilterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TrafficMirrorFilterID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.TrafficMirrorFilterIDRef, + Selector: mg.Spec.InitProvider.TrafficMirrorFilterIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TrafficMirrorFilterID") + } + mg.Spec.InitProvider.TrafficMirrorFilterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TrafficMirrorFilterIDRef = rsp.ResolvedReference - // ResolveReferences of this Route. - apisresolver "github.com/upbound/provider-aws/internal/apis" -) + return nil +} -func (mg *Route) ResolveReferences(ctx context.Context, c client.Reader) error { +// ResolveReferences of this VPCEndpoint. +func (mg *VPCEndpoint) ResolveReferences(ctx context.Context, c client.Reader) error { var m xpresource.Managed var l xpresource.ManagedList r := reference.NewAPIResolver(c, mg) @@ -27,166 +1441,240 @@ func (mg *Route) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "ManagedPrefixList", "ManagedPrefixListList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPCEndpointService", "VPCEndpointServiceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DestinationPrefixListID), + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceName), + Extract: resource.ExtractParamPath("service_name", true), + Reference: mg.Spec.ForProvider.ServiceNameRef, + Selector: mg.Spec.ForProvider.ServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceName") + } + mg.Spec.ForProvider.ServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCID), Extract: reference.ExternalName(), - Reference: mg.Spec.ForProvider.DestinationPrefixListIDRef, - Selector: mg.Spec.ForProvider.DestinationPrefixListIDSelector, + Reference: mg.Spec.ForProvider.VPCIDRef, + Selector: mg.Spec.ForProvider.VPCIDSelector, To: reference.To{List: l, Managed: m}, }) } if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.DestinationPrefixListID") + return errors.Wrap(err, "mg.Spec.ForProvider.VPCID") } - mg.Spec.ForProvider.DestinationPrefixListID = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.ForProvider.DestinationPrefixListIDRef = rsp.ResolvedReference + mg.Spec.ForProvider.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPCIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "EgressOnlyInternetGateway", "EgressOnlyInternetGatewayList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPCEndpointService", "VPCEndpointServiceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EgressOnlyGatewayID), - Extract: resource.ExtractResourceID(), - Reference: mg.Spec.ForProvider.EgressOnlyGatewayIDRef, - Selector: mg.Spec.ForProvider.EgressOnlyGatewayIDSelector, + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceName), + Extract: resource.ExtractParamPath("service_name", true), + Reference: mg.Spec.InitProvider.ServiceNameRef, + Selector: mg.Spec.InitProvider.ServiceNameSelector, To: reference.To{List: l, Managed: m}, }) } if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.EgressOnlyGatewayID") + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceName") } - mg.Spec.ForProvider.EgressOnlyGatewayID = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.ForProvider.EgressOnlyGatewayIDRef = rsp.ResolvedReference + mg.Spec.InitProvider.ServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "InternetGateway", "InternetGatewayList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.GatewayID), + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCID), Extract: reference.ExternalName(), - Reference: mg.Spec.ForProvider.GatewayIDRef, - Selector: mg.Spec.ForProvider.GatewayIDSelector, + Reference: mg.Spec.InitProvider.VPCIDRef, + Selector: mg.Spec.InitProvider.VPCIDSelector, To: reference.To{List: l, Managed: m}, }) } if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.GatewayID") + return errors.Wrap(err, "mg.Spec.InitProvider.VPCID") } - mg.Spec.ForProvider.GatewayID = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.ForProvider.GatewayIDRef = rsp.ResolvedReference + mg.Spec.InitProvider.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPCIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this VPCIpamPoolCidr. +func (mg *VPCIpamPoolCidr) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "NATGateway", "NATGatewayList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPCIpamPool", "VPCIpamPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NATGatewayID), - Extract: reference.ExternalName(), - Reference: mg.Spec.ForProvider.NATGatewayIDRef, - Selector: mg.Spec.ForProvider.NATGatewayIDSelector, + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IpamPoolID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.IpamPoolIDRef, + Selector: mg.Spec.ForProvider.IpamPoolIDSelector, To: reference.To{List: l, Managed: m}, }) } if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.NATGatewayID") + return errors.Wrap(err, "mg.Spec.ForProvider.IpamPoolID") } - mg.Spec.ForProvider.NATGatewayID = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.ForProvider.NATGatewayIDRef = rsp.ResolvedReference + mg.Spec.ForProvider.IpamPoolID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IpamPoolIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "NetworkInterface", "NetworkInterfaceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPCIpamPool", "VPCIpamPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkInterfaceID), + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IpamPoolID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.IpamPoolIDRef, + Selector: mg.Spec.InitProvider.IpamPoolIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IpamPoolID") + } + mg.Spec.InitProvider.IpamPoolID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IpamPoolIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this VPCPeeringConnection. +func (mg *VPCPeeringConnection) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PeerVPCID), Extract: reference.ExternalName(), - Reference: mg.Spec.ForProvider.NetworkInterfaceIDRef, - Selector: mg.Spec.ForProvider.NetworkInterfaceIDSelector, + Reference: mg.Spec.ForProvider.PeerVPCIDRef, + Selector: mg.Spec.ForProvider.PeerVPCIDSelector, To: reference.To{List: l, Managed: m}, }) } if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.NetworkInterfaceID") + return errors.Wrap(err, "mg.Spec.ForProvider.PeerVPCID") } - mg.Spec.ForProvider.NetworkInterfaceID = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.ForProvider.NetworkInterfaceIDRef = rsp.ResolvedReference + mg.Spec.ForProvider.PeerVPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PeerVPCIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "RouteTable", "RouteTableList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RouteTableID), + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCID), Extract: reference.ExternalName(), - Reference: mg.Spec.ForProvider.RouteTableIDRef, - Selector: mg.Spec.ForProvider.RouteTableIDSelector, + Reference: mg.Spec.ForProvider.VPCIDRef, + Selector: mg.Spec.ForProvider.VPCIDSelector, To: reference.To{List: l, Managed: m}, }) } if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.RouteTableID") + return errors.Wrap(err, "mg.Spec.ForProvider.VPCID") } - mg.Spec.ForProvider.RouteTableID = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.ForProvider.RouteTableIDRef = rsp.ResolvedReference + mg.Spec.ForProvider.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPCIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "TransitGateway", "TransitGatewayList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TransitGatewayID), + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PeerVPCID), Extract: reference.ExternalName(), - Reference: mg.Spec.ForProvider.TransitGatewayIDRef, - Selector: mg.Spec.ForProvider.TransitGatewayIDSelector, + Reference: mg.Spec.InitProvider.PeerVPCIDRef, + Selector: mg.Spec.InitProvider.PeerVPCIDSelector, To: reference.To{List: l, Managed: m}, }) } if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.TransitGatewayID") + return errors.Wrap(err, "mg.Spec.InitProvider.PeerVPCID") } - mg.Spec.ForProvider.TransitGatewayID = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.ForProvider.TransitGatewayIDRef = rsp.ResolvedReference + mg.Spec.InitProvider.PeerVPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PeerVPCIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPCEndpoint", "VPCEndpointList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCEndpointID), + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCID), Extract: reference.ExternalName(), - Reference: mg.Spec.ForProvider.VPCEndpointIDRef, - Selector: mg.Spec.ForProvider.VPCEndpointIDSelector, + Reference: mg.Spec.InitProvider.VPCIDRef, + Selector: mg.Spec.InitProvider.VPCIDSelector, To: reference.To{List: l, Managed: m}, }) } if err != nil { - return errors.Wrap(err, "mg.Spec.ForProvider.VPCEndpointID") + return errors.Wrap(err, "mg.Spec.InitProvider.VPCID") } - mg.Spec.ForProvider.VPCEndpointID = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.ForProvider.VPCEndpointIDRef = rsp.ResolvedReference + mg.Spec.InitProvider.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPCIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this VPCPeeringConnectionAccepter. +func (mg *VPCPeeringConnectionAccepter) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPCPeeringConnection", "VPCPeeringConnectionList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "VPCPeeringConnection", "VPCPeeringConnectionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCPeeringConnectionID), - Extract: reference.ExternalName(), + Extract: resource.ExtractResourceID(), Reference: mg.Spec.ForProvider.VPCPeeringConnectionIDRef, Selector: mg.Spec.ForProvider.VPCPeeringConnectionIDSelector, To: reference.To{List: l, Managed: m}, @@ -198,119 +1686,181 @@ func (mg *Route) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.ForProvider.VPCPeeringConnectionID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.VPCPeeringConnectionIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "ManagedPrefixList", "ManagedPrefixListList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "VPCPeeringConnection", "VPCPeeringConnectionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DestinationPrefixListID), - Extract: reference.ExternalName(), - Reference: mg.Spec.InitProvider.DestinationPrefixListIDRef, - Selector: mg.Spec.InitProvider.DestinationPrefixListIDSelector, + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCPeeringConnectionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VPCPeeringConnectionIDRef, + Selector: mg.Spec.InitProvider.VPCPeeringConnectionIDSelector, To: reference.To{List: l, Managed: m}, }) } if err != nil { - return errors.Wrap(err, "mg.Spec.InitProvider.DestinationPrefixListID") + return errors.Wrap(err, "mg.Spec.InitProvider.VPCPeeringConnectionID") } - mg.Spec.InitProvider.DestinationPrefixListID = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.InitProvider.DestinationPrefixListIDRef = rsp.ResolvedReference + mg.Spec.InitProvider.VPCPeeringConnectionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPCPeeringConnectionIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this VPCPeeringConnectionOptions. +func (mg *VPCPeeringConnectionOptions) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "EgressOnlyInternetGateway", "EgressOnlyInternetGatewayList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "VPCPeeringConnection", "VPCPeeringConnectionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EgressOnlyGatewayID), + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCPeeringConnectionID), Extract: resource.ExtractResourceID(), - Reference: mg.Spec.InitProvider.EgressOnlyGatewayIDRef, - Selector: mg.Spec.InitProvider.EgressOnlyGatewayIDSelector, + Reference: mg.Spec.ForProvider.VPCPeeringConnectionIDRef, + Selector: mg.Spec.ForProvider.VPCPeeringConnectionIDSelector, To: reference.To{List: l, Managed: m}, }) } if err != nil { - return errors.Wrap(err, "mg.Spec.InitProvider.EgressOnlyGatewayID") + return errors.Wrap(err, "mg.Spec.ForProvider.VPCPeeringConnectionID") } - mg.Spec.InitProvider.EgressOnlyGatewayID = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.InitProvider.EgressOnlyGatewayIDRef = rsp.ResolvedReference + mg.Spec.ForProvider.VPCPeeringConnectionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPCPeeringConnectionIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "InternetGateway", "InternetGatewayList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "VPCPeeringConnection", "VPCPeeringConnectionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.GatewayID), - Extract: reference.ExternalName(), - Reference: mg.Spec.InitProvider.GatewayIDRef, - Selector: mg.Spec.InitProvider.GatewayIDSelector, + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCPeeringConnectionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VPCPeeringConnectionIDRef, + Selector: mg.Spec.InitProvider.VPCPeeringConnectionIDSelector, To: reference.To{List: l, Managed: m}, }) } if err != nil { - return errors.Wrap(err, "mg.Spec.InitProvider.GatewayID") + return errors.Wrap(err, "mg.Spec.InitProvider.VPCPeeringConnectionID") } - mg.Spec.InitProvider.GatewayID = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.InitProvider.GatewayIDRef = rsp.ResolvedReference + mg.Spec.InitProvider.VPCPeeringConnectionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPCPeeringConnectionIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this VPNConnection. +func (mg *VPNConnection) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "NATGateway", "NATGatewayList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "CustomerGateway", "CustomerGatewayList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NATGatewayID), - Extract: reference.ExternalName(), - Reference: mg.Spec.InitProvider.NATGatewayIDRef, - Selector: mg.Spec.InitProvider.NATGatewayIDSelector, + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CustomerGatewayID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.CustomerGatewayIDRef, + Selector: mg.Spec.ForProvider.CustomerGatewayIDSelector, To: reference.To{List: l, Managed: m}, }) } if err != nil { - return errors.Wrap(err, "mg.Spec.InitProvider.NATGatewayID") + return errors.Wrap(err, "mg.Spec.ForProvider.CustomerGatewayID") } - mg.Spec.InitProvider.NATGatewayID = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.InitProvider.NATGatewayIDRef = rsp.ResolvedReference + mg.Spec.ForProvider.CustomerGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CustomerGatewayIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "NetworkInterface", "NetworkInterfaceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "TransitGateway", "TransitGatewayList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkInterfaceID), - Extract: reference.ExternalName(), - Reference: mg.Spec.InitProvider.NetworkInterfaceIDRef, - Selector: mg.Spec.InitProvider.NetworkInterfaceIDSelector, + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TransitGatewayID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.TransitGatewayIDRef, + Selector: mg.Spec.ForProvider.TransitGatewayIDSelector, To: reference.To{List: l, Managed: m}, }) } if err != nil { - return errors.Wrap(err, "mg.Spec.InitProvider.NetworkInterfaceID") + return errors.Wrap(err, "mg.Spec.ForProvider.TransitGatewayID") } - mg.Spec.InitProvider.NetworkInterfaceID = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.InitProvider.NetworkInterfaceIDRef = rsp.ResolvedReference + mg.Spec.ForProvider.TransitGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TransitGatewayIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "RouteTable", "RouteTableList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "CustomerGateway", "CustomerGatewayList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RouteTableID), + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Type), + Extract: resource.ExtractParamPath("type", false), + Reference: mg.Spec.ForProvider.TypeRef, + Selector: mg.Spec.ForProvider.TypeSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Type") + } + mg.Spec.ForProvider.Type = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TypeRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPNGateway", "VPNGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPNGatewayID), Extract: reference.ExternalName(), - Reference: mg.Spec.InitProvider.RouteTableIDRef, - Selector: mg.Spec.InitProvider.RouteTableIDSelector, + Reference: mg.Spec.ForProvider.VPNGatewayIDRef, + Selector: mg.Spec.ForProvider.VPNGatewayIDSelector, To: reference.To{List: l, Managed: m}, }) } if err != nil { - return errors.Wrap(err, "mg.Spec.InitProvider.RouteTableID") + return errors.Wrap(err, "mg.Spec.ForProvider.VPNGatewayID") } - mg.Spec.InitProvider.RouteTableID = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.InitProvider.RouteTableIDRef = rsp.ResolvedReference + mg.Spec.ForProvider.VPNGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPNGatewayIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "CustomerGateway", "CustomerGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CustomerGatewayID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.CustomerGatewayIDRef, + Selector: mg.Spec.InitProvider.CustomerGatewayIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CustomerGatewayID") + } + mg.Spec.InitProvider.CustomerGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CustomerGatewayIDRef = rsp.ResolvedReference { m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "TransitGateway", "TransitGatewayList") if err != nil { @@ -319,7 +1869,7 @@ func (mg *Route) ResolveReferences(ctx context.Context, c client.Reader) error { rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TransitGatewayID), - Extract: reference.ExternalName(), + Extract: resource.ExtractResourceID(), Reference: mg.Spec.InitProvider.TransitGatewayIDRef, Selector: mg.Spec.InitProvider.TransitGatewayIDSelector, To: reference.To{List: l, Managed: m}, @@ -331,43 +1881,43 @@ func (mg *Route) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.InitProvider.TransitGatewayID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.TransitGatewayIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPCEndpoint", "VPCEndpointList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "CustomerGateway", "CustomerGatewayList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCEndpointID), - Extract: reference.ExternalName(), - Reference: mg.Spec.InitProvider.VPCEndpointIDRef, - Selector: mg.Spec.InitProvider.VPCEndpointIDSelector, + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Type), + Extract: resource.ExtractParamPath("type", false), + Reference: mg.Spec.InitProvider.TypeRef, + Selector: mg.Spec.InitProvider.TypeSelector, To: reference.To{List: l, Managed: m}, }) } if err != nil { - return errors.Wrap(err, "mg.Spec.InitProvider.VPCEndpointID") + return errors.Wrap(err, "mg.Spec.InitProvider.Type") } - mg.Spec.InitProvider.VPCEndpointID = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.InitProvider.VPCEndpointIDRef = rsp.ResolvedReference + mg.Spec.InitProvider.Type = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TypeRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPCPeeringConnection", "VPCPeeringConnectionList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPNGateway", "VPNGatewayList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ - CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCPeeringConnectionID), + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPNGatewayID), Extract: reference.ExternalName(), - Reference: mg.Spec.InitProvider.VPCPeeringConnectionIDRef, - Selector: mg.Spec.InitProvider.VPCPeeringConnectionIDSelector, + Reference: mg.Spec.InitProvider.VPNGatewayIDRef, + Selector: mg.Spec.InitProvider.VPNGatewayIDSelector, To: reference.To{List: l, Managed: m}, }) } if err != nil { - return errors.Wrap(err, "mg.Spec.InitProvider.VPCPeeringConnectionID") + return errors.Wrap(err, "mg.Spec.InitProvider.VPNGatewayID") } - mg.Spec.InitProvider.VPCPeeringConnectionID = reference.ToPtrValue(rsp.ResolvedValue) - mg.Spec.InitProvider.VPCPeeringConnectionIDRef = rsp.ResolvedReference + mg.Spec.InitProvider.VPNGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPNGatewayIDRef = rsp.ResolvedReference return nil } diff --git a/apis/ec2/v1beta2/zz_instance_terraformed.go b/apis/ec2/v1beta2/zz_instance_terraformed.go new file mode 100755 index 0000000000..3551810095 --- /dev/null +++ b/apis/ec2/v1beta2/zz_instance_terraformed.go @@ -0,0 +1,139 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Instance +func (mg *Instance) GetTerraformResourceType() string { + return "aws_instance" +} + +// GetConnectionDetailsMapping for this Instance +func (tr *Instance) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Instance +func (tr *Instance) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Instance +func (tr *Instance) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Instance +func (tr *Instance) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Instance +func (tr *Instance) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Instance +func (tr *Instance) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Instance +func (tr *Instance) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Instance +func (tr *Instance) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Instance using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Instance) LateInitialize(attrs []byte) (bool, error) { + params := &InstanceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("AssociatePublicIPAddress")) + opts = append(opts, resource.WithNameFilter("CPUCoreCount")) + opts = append(opts, resource.WithNameFilter("CPUThreadsPerCore")) + opts = append(opts, resource.WithNameFilter("IPv6AddressCount")) + opts = append(opts, resource.WithNameFilter("IPv6Addresses")) + opts = append(opts, resource.WithNameFilter("NetworkInterface")) + opts = append(opts, resource.WithNameFilter("PrivateIP")) + opts = append(opts, resource.WithNameFilter("SourceDestCheck")) + opts = append(opts, resource.WithNameFilter("SubnetID")) + opts = append(opts, resource.WithNameFilter("VPCSecurityGroupIds")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Instance) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/ec2/v1beta2/zz_instance_types.go b/apis/ec2/v1beta2/zz_instance_types.go new file mode 100755 index 0000000000..5a4ae71c6d --- /dev/null +++ b/apis/ec2/v1beta2/zz_instance_types.go @@ -0,0 +1,1381 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CPUOptionsInitParameters struct { + + // Indicates whether to enable the instance for AMD SEV-SNP. AMD SEV-SNP is supported with M6a, R6a, and C6a instance types only. Valid values are enabled and disabled. + AmdSevSnp *string `json:"amdSevSnp,omitempty" tf:"amd_sev_snp,omitempty"` + + // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options CPU Cores and Threads Per CPU Core Per Instance Type - specifying this option for unsupported instance types will return an error from the EC2 API. + CoreCount *float64 `json:"coreCount,omitempty" tf:"core_count,omitempty"` + + // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See Optimizing CPU Options for more information. + ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` +} + +type CPUOptionsObservation struct { + + // Indicates whether to enable the instance for AMD SEV-SNP. AMD SEV-SNP is supported with M6a, R6a, and C6a instance types only. Valid values are enabled and disabled. + AmdSevSnp *string `json:"amdSevSnp,omitempty" tf:"amd_sev_snp,omitempty"` + + // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options CPU Cores and Threads Per CPU Core Per Instance Type - specifying this option for unsupported instance types will return an error from the EC2 API. + CoreCount *float64 `json:"coreCount,omitempty" tf:"core_count,omitempty"` + + // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See Optimizing CPU Options for more information. + ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` +} + +type CPUOptionsParameters struct { + + // Indicates whether to enable the instance for AMD SEV-SNP. AMD SEV-SNP is supported with M6a, R6a, and C6a instance types only. Valid values are enabled and disabled. + // +kubebuilder:validation:Optional + AmdSevSnp *string `json:"amdSevSnp,omitempty" tf:"amd_sev_snp,omitempty"` + + // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options CPU Cores and Threads Per CPU Core Per Instance Type - specifying this option for unsupported instance types will return an error from the EC2 API. + // +kubebuilder:validation:Optional + CoreCount *float64 `json:"coreCount,omitempty" tf:"core_count,omitempty"` + + // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See Optimizing CPU Options for more information. + // +kubebuilder:validation:Optional + ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` +} + +type CapacityReservationSpecificationInitParameters struct { + + // Indicates the instance's Capacity Reservation preferences. Can be "open" or "none". (Default: "open"). + CapacityReservationPreference *string `json:"capacityReservationPreference,omitempty" tf:"capacity_reservation_preference,omitempty"` + + // Information about the target Capacity Reservation. See Capacity Reservation Target below for more details. + CapacityReservationTarget *CapacityReservationTargetInitParameters `json:"capacityReservationTarget,omitempty" tf:"capacity_reservation_target,omitempty"` +} + +type CapacityReservationSpecificationObservation struct { + + // Indicates the instance's Capacity Reservation preferences. Can be "open" or "none". (Default: "open"). + CapacityReservationPreference *string `json:"capacityReservationPreference,omitempty" tf:"capacity_reservation_preference,omitempty"` + + // Information about the target Capacity Reservation. See Capacity Reservation Target below for more details. + CapacityReservationTarget *CapacityReservationTargetObservation `json:"capacityReservationTarget,omitempty" tf:"capacity_reservation_target,omitempty"` +} + +type CapacityReservationSpecificationParameters struct { + + // Indicates the instance's Capacity Reservation preferences. Can be "open" or "none". (Default: "open"). + // +kubebuilder:validation:Optional + CapacityReservationPreference *string `json:"capacityReservationPreference,omitempty" tf:"capacity_reservation_preference,omitempty"` + + // Information about the target Capacity Reservation. See Capacity Reservation Target below for more details. + // +kubebuilder:validation:Optional + CapacityReservationTarget *CapacityReservationTargetParameters `json:"capacityReservationTarget,omitempty" tf:"capacity_reservation_target,omitempty"` +} + +type CapacityReservationTargetInitParameters struct { + + // ID of the Capacity Reservation in which to run the instance. + CapacityReservationID *string `json:"capacityReservationId,omitempty" tf:"capacity_reservation_id,omitempty"` + + // ARN of the Capacity Reservation resource group in which to run the instance. + CapacityReservationResourceGroupArn *string `json:"capacityReservationResourceGroupArn,omitempty" tf:"capacity_reservation_resource_group_arn,omitempty"` +} + +type CapacityReservationTargetObservation struct { + + // ID of the Capacity Reservation in which to run the instance. + CapacityReservationID *string `json:"capacityReservationId,omitempty" tf:"capacity_reservation_id,omitempty"` + + // ARN of the Capacity Reservation resource group in which to run the instance. + CapacityReservationResourceGroupArn *string `json:"capacityReservationResourceGroupArn,omitempty" tf:"capacity_reservation_resource_group_arn,omitempty"` +} + +type CapacityReservationTargetParameters struct { + + // ID of the Capacity Reservation in which to run the instance. + // +kubebuilder:validation:Optional + CapacityReservationID *string `json:"capacityReservationId,omitempty" tf:"capacity_reservation_id,omitempty"` + + // ARN of the Capacity Reservation resource group in which to run the instance. + // +kubebuilder:validation:Optional + CapacityReservationResourceGroupArn *string `json:"capacityReservationResourceGroupArn,omitempty" tf:"capacity_reservation_resource_group_arn,omitempty"` +} + +type CreditSpecificationInitParameters struct { + + // Credit option for CPU usage. Valid values include standard or unlimited. T3 instances are launched as unlimited by default. T2 instances are launched as standard by default. + CPUCredits *string `json:"cpuCredits,omitempty" tf:"cpu_credits,omitempty"` +} + +type CreditSpecificationObservation struct { + + // Credit option for CPU usage. Valid values include standard or unlimited. T3 instances are launched as unlimited by default. T2 instances are launched as standard by default. + CPUCredits *string `json:"cpuCredits,omitempty" tf:"cpu_credits,omitempty"` +} + +type CreditSpecificationParameters struct { + + // Credit option for CPU usage. Valid values include standard or unlimited. T3 instances are launched as unlimited by default. T2 instances are launched as standard by default. + // +kubebuilder:validation:Optional + CPUCredits *string `json:"cpuCredits,omitempty" tf:"cpu_credits,omitempty"` +} + +type EBSBlockDeviceInitParameters struct { + + // Whether the volume should be destroyed on instance termination. Defaults to true. + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Name of the device to mount. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Enables EBS encryption on the volume. Defaults to false. Cannot be used with snapshot_id. Must be configured to perform drift detection. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // Amount of provisioned IOPS. Only valid for volume_type of io1, io2 or gp3. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Amazon Resource Name (ARN) of the KMS Key to use when encrypting the volume. Must be configured to perform drift detection. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Snapshot ID to mount. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // Map of tags to assign to the device. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Throughput to provision for a volume in mebibytes per second (MiB/s). This is only valid for volume_type of gp3. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // Size of the volume in gibibytes (GiB). + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of volume. Valid values include standard, gp2, gp3, io1, io2, sc1, or st1. Defaults to gp2. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type EBSBlockDeviceObservation struct { + + // Whether the volume should be destroyed on instance termination. Defaults to true. + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Name of the device to mount. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Enables EBS encryption on the volume. Defaults to false. Cannot be used with snapshot_id. Must be configured to perform drift detection. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // Amount of provisioned IOPS. Only valid for volume_type of io1, io2 or gp3. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Amazon Resource Name (ARN) of the KMS Key to use when encrypting the volume. Must be configured to perform drift detection. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Snapshot ID to mount. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // Map of tags to assign to the device. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Throughput to provision for a volume in mebibytes per second (MiB/s). This is only valid for volume_type of gp3. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // ID of the volume. For example, the ID can be accessed like this, aws_instance.web.ebs_block_device.2.volume_id. + VolumeID *string `json:"volumeId,omitempty" tf:"volume_id,omitempty"` + + // Size of the volume in gibibytes (GiB). + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of volume. Valid values include standard, gp2, gp3, io1, io2, sc1, or st1. Defaults to gp2. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type EBSBlockDeviceParameters struct { + + // Whether the volume should be destroyed on instance termination. Defaults to true. + // +kubebuilder:validation:Optional + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Name of the device to mount. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName" tf:"device_name,omitempty"` + + // Enables EBS encryption on the volume. Defaults to false. Cannot be used with snapshot_id. Must be configured to perform drift detection. + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // Amount of provisioned IOPS. Only valid for volume_type of io1, io2 or gp3. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Amazon Resource Name (ARN) of the KMS Key to use when encrypting the volume. Must be configured to perform drift detection. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Snapshot ID to mount. + // +kubebuilder:validation:Optional + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // Map of tags to assign to the device. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +kubebuilder:validation:Optional + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Throughput to provision for a volume in mebibytes per second (MiB/s). This is only valid for volume_type of gp3. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // Size of the volume in gibibytes (GiB). + // +kubebuilder:validation:Optional + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of volume. Valid values include standard, gp2, gp3, io1, io2, sc1, or st1. Defaults to gp2. + // +kubebuilder:validation:Optional + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type EnclaveOptionsInitParameters struct { + + // Whether Nitro Enclaves will be enabled on the instance. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type EnclaveOptionsObservation struct { + + // Whether Nitro Enclaves will be enabled on the instance. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type EnclaveOptionsParameters struct { + + // Whether Nitro Enclaves will be enabled on the instance. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type EphemeralBlockDeviceInitParameters struct { + + // Name of the block device to mount on the instance. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Suppresses the specified device included in the AMI's block device mapping. + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // Instance Store Device Name (e.g., ephemeral0). + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type EphemeralBlockDeviceObservation struct { + + // Name of the block device to mount on the instance. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Suppresses the specified device included in the AMI's block device mapping. + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // Instance Store Device Name (e.g., ephemeral0). + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type EphemeralBlockDeviceParameters struct { + + // Name of the block device to mount on the instance. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName" tf:"device_name,omitempty"` + + // Suppresses the specified device included in the AMI's block device mapping. + // +kubebuilder:validation:Optional + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // Instance Store Device Name (e.g., ephemeral0). + // +kubebuilder:validation:Optional + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type InstanceInitParameters struct { + + // AMI to use for the instance. Required unless launch_template is specified and the Launch Template specifes an AMI. If an AMI is specified in the Launch Template, setting ami will override the AMI specified in the Launch Template. + AMI *string `json:"ami,omitempty" tf:"ami,omitempty"` + + // Whether to associate a public IP address with an instance in a VPC. + AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty" tf:"associate_public_ip_address,omitempty"` + + // AZ to start the instance in. + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options CPU Cores and Threads Per CPU Core Per Instance Type - specifying this option for unsupported instance types will return an error from the EC2 API. + CPUCoreCount *float64 `json:"cpuCoreCount,omitempty" tf:"cpu_core_count,omitempty"` + + // The CPU options for the instance. See CPU Options below for more details. + CPUOptions *CPUOptionsInitParameters `json:"cpuOptions,omitempty" tf:"cpu_options,omitempty"` + + // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See Optimizing CPU Options for more information. + CPUThreadsPerCore *float64 `json:"cpuThreadsPerCore,omitempty" tf:"cpu_threads_per_core,omitempty"` + + // Describes an instance's Capacity Reservation targeting option. See Capacity Reservation Specification below for more details. + CapacityReservationSpecification *CapacityReservationSpecificationInitParameters `json:"capacityReservationSpecification,omitempty" tf:"capacity_reservation_specification,omitempty"` + + // Configuration block for customizing the credit specification of the instance. See Credit Specification below for more details. Removing this configuration on existing instances will only stop managing it. It will not change the configuration back to the default for the instance type. + CreditSpecification *CreditSpecificationInitParameters `json:"creditSpecification,omitempty" tf:"credit_specification,omitempty"` + + // If true, enables EC2 Instance Stop Protection. + DisableAPIStop *bool `json:"disableApiStop,omitempty" tf:"disable_api_stop,omitempty"` + + // If true, enables EC2 Instance Termination Protection. + DisableAPITermination *bool `json:"disableApiTermination,omitempty" tf:"disable_api_termination,omitempty"` + + // One or more configuration blocks with additional EBS block devices to attach to the instance. Block device configurations only apply on resource creation. See Block Devices below for details on attributes and drift detection. When accessing this as an attribute reference, it is a set of objects. + EBSBlockDevice []EBSBlockDeviceInitParameters `json:"ebsBlockDevice,omitempty" tf:"ebs_block_device,omitempty"` + + // If true, the launched EC2 instance will be EBS-optimized. Note that if this is not set on an instance type that is optimized by default then this will show as disabled but if the instance type is optimized by default then there is no need to set this and there is no effect to disabling it. See the EBS Optimized section of the AWS User Guide for more information. + EBSOptimized *bool `json:"ebsOptimized,omitempty" tf:"ebs_optimized,omitempty"` + + // Enable Nitro Enclaves on launched instances. See Enclave Options below for more details. + EnclaveOptions *EnclaveOptionsInitParameters `json:"enclaveOptions,omitempty" tf:"enclave_options,omitempty"` + + // One or more configuration blocks to customize Ephemeral (also known as "Instance Store") volumes on the instance. See Block Devices below for details. When accessing this as an attribute reference, it is a set of objects. + EphemeralBlockDevice []EphemeralBlockDeviceInitParameters `json:"ephemeralBlockDevice,omitempty" tf:"ephemeral_block_device,omitempty"` + + // If true, wait for password data to become available and retrieve it. Useful for getting the administrator password for instances running Microsoft Windows. The password data is exported to the password_data attribute. See GetPasswordData for more information. + GetPasswordData *bool `json:"getPasswordData,omitempty" tf:"get_password_data,omitempty"` + + // If true, the launched EC2 instance will support hibernation. + Hibernation *bool `json:"hibernation,omitempty" tf:"hibernation,omitempty"` + + // ID of a dedicated host that the instance will be assigned to. Use when an instance is to be launched on a specific dedicated host. + HostID *string `json:"hostId,omitempty" tf:"host_id,omitempty"` + + // ARN of the host resource group in which to launch the instances. If you specify an ARN, omit the tenancy parameter or set it to host. + HostResourceGroupArn *string `json:"hostResourceGroupArn,omitempty" tf:"host_resource_group_arn,omitempty"` + + // IAM Instance Profile to launch the instance with. Specified as the name of the Instance Profile. Ensure your credentials have the correct permission to assign the instance profile according to the EC2 documentation, notably iam:PassRole. + IAMInstanceProfile *string `json:"iamInstanceProfile,omitempty" tf:"iam_instance_profile,omitempty"` + + // Number of IPv6 addresses to associate with the primary network interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. + IPv6AddressCount *float64 `json:"ipv6AddressCount,omitempty" tf:"ipv6_address_count,omitempty"` + + // Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface + IPv6Addresses []*string `json:"ipv6Addresses,omitempty" tf:"ipv6_addresses,omitempty"` + + // Shutdown behavior for the instance. Amazon defaults this to stop for EBS-backed instances and terminate for instance-store instances. Cannot be set on instance-store instances. See Shutdown Behavior for more information. + InstanceInitiatedShutdownBehavior *string `json:"instanceInitiatedShutdownBehavior,omitempty" tf:"instance_initiated_shutdown_behavior,omitempty"` + + // Describes the market (purchasing) option for the instances. See Market Options below for details on attributes. + InstanceMarketOptions *InstanceMarketOptionsInitParameters `json:"instanceMarketOptions,omitempty" tf:"instance_market_options,omitempty"` + + // Instance type to use for the instance. Required unless launch_template is specified and the Launch Template specifies an instance type. If an instance type is specified in the Launch Template, setting instance_type will override the instance type specified in the Launch Template. Updates to this field will trigger a stop/start of the EC2 instance. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // Key name of the Key Pair to use for the instance; which can be managed using the . + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + // Specifies a Launch Template to configure the instance. Parameters configured on this resource will override the corresponding parameters in the Launch Template. See Launch Template Specification below for more details. + LaunchTemplate *LaunchTemplateInitParameters `json:"launchTemplate,omitempty" tf:"launch_template,omitempty"` + + // Maintenance and recovery options for the instance. See Maintenance Options below for more details. + MaintenanceOptions *MaintenanceOptionsInitParameters `json:"maintenanceOptions,omitempty" tf:"maintenance_options,omitempty"` + + // Customize the metadata options of the instance. See Metadata Options below for more details. + MetadataOptions *MetadataOptionsInitParameters `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + // If true, the launched EC2 instance will have detailed monitoring enabled. (Available since v0.6.0) + Monitoring *bool `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + // Customize network interfaces to be attached at instance boot time. See Network Interfaces below for more details. + NetworkInterface []NetworkInterfaceInitParameters `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // Placement Group to start the instance in. + PlacementGroup *string `json:"placementGroup,omitempty" tf:"placement_group,omitempty"` + + // Number of the partition the instance is in. Valid only if the strategy argument is set to "partition". + PlacementPartitionNumber *float64 `json:"placementPartitionNumber,omitempty" tf:"placement_partition_number,omitempty"` + + // Options for the instance hostname. The default values are inherited from the subnet. See Private DNS Name Options below for more details. + PrivateDNSNameOptions *PrivateDNSNameOptionsInitParameters `json:"privateDnsNameOptions,omitempty" tf:"private_dns_name_options,omitempty"` + + // Private IP address to associate with the instance in a VPC. + PrivateIP *string `json:"privateIp,omitempty" tf:"private_ip,omitempty"` + + // Configuration block to customize details about the root block device of the instance. See Block Devices below for details. When accessing this as an attribute reference, it is a list containing one object. + RootBlockDevice *RootBlockDeviceInitParameters `json:"rootBlockDevice,omitempty" tf:"root_block_device,omitempty"` + + // List of secondary private IPv4 addresses to assign to the instance's primary network interface (eth0) in a VPC. Can only be assigned to the primary network interface (eth0) attached at instance creation, not a pre-existing network interface i.e., referenced in a network_interface block. Refer to the Elastic network interfaces documentation to see the maximum number of private IP addresses allowed per instance type. + // +listType=set + SecondaryPrivateIps []*string `json:"secondaryPrivateIps,omitempty" tf:"secondary_private_ips,omitempty"` + + // Controls if traffic is routed to the instance when the destination address does not match the instance. Used for NAT or VPNs. Defaults true. + SourceDestCheck *bool `json:"sourceDestCheck,omitempty" tf:"source_dest_check,omitempty"` + + // VPC Subnet ID to launch in. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of dedicated runs on single-tenant hardware. The host tenancy is not supported for the import-instance command. Valid values are default, dedicated, and host. + Tenancy *string `json:"tenancy,omitempty" tf:"tenancy,omitempty"` + + // User data to provide when launching the instance. Do not pass gzip-compressed data via this argument; see user_data_base64 instead. Updates to this field will trigger a stop/start of the EC2 instance by default. If the user_data_replace_on_change is set then updates to this field will trigger a destroy and recreate. + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Can be used instead of user_data to pass base64-encoded binary data directly. Use this instead of user_data whenever the value is not a valid UTF-8 string. For example, gzip-encoded user data must be base64-encoded and passed via this argument to avoid corruption. Updates to this field will trigger a stop/start of the EC2 instance by default. If the user_data_replace_on_change is set then updates to this field will trigger a destroy and recreate. + UserDataBase64 *string `json:"userDataBase64,omitempty" tf:"user_data_base64,omitempty"` + + // When used in combination with user_data or user_data_base64 will trigger a destroy and recreate when set to true. Defaults to false if not set. + UserDataReplaceOnChange *bool `json:"userDataReplaceOnChange,omitempty" tf:"user_data_replace_on_change,omitempty"` + + // References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDRefs []v1.Reference `json:"vpcSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDSelector *v1.Selector `json:"vpcSecurityGroupIdSelector,omitempty" tf:"-"` + + // List of security group IDs to associate with. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=VPCSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=VPCSecurityGroupIDSelector + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` + + // Map of tags to assign, at instance-creation time, to root and EBS volumes. + // +mapType=granular + VolumeTags map[string]*string `json:"volumeTags,omitempty" tf:"volume_tags,omitempty"` +} + +type InstanceMarketOptionsInitParameters struct { + + // Type of market for the instance. Valid value is spot. Defaults to spot. Required if spot_options is specified. + MarketType *string `json:"marketType,omitempty" tf:"market_type,omitempty"` + + // Block to configure the options for Spot Instances. See Spot Options below for details on attributes. + SpotOptions *SpotOptionsInitParameters `json:"spotOptions,omitempty" tf:"spot_options,omitempty"` +} + +type InstanceMarketOptionsObservation struct { + + // Type of market for the instance. Valid value is spot. Defaults to spot. Required if spot_options is specified. + MarketType *string `json:"marketType,omitempty" tf:"market_type,omitempty"` + + // Block to configure the options for Spot Instances. See Spot Options below for details on attributes. + SpotOptions *SpotOptionsObservation `json:"spotOptions,omitempty" tf:"spot_options,omitempty"` +} + +type InstanceMarketOptionsParameters struct { + + // Type of market for the instance. Valid value is spot. Defaults to spot. Required if spot_options is specified. + // +kubebuilder:validation:Optional + MarketType *string `json:"marketType,omitempty" tf:"market_type,omitempty"` + + // Block to configure the options for Spot Instances. See Spot Options below for details on attributes. + // +kubebuilder:validation:Optional + SpotOptions *SpotOptionsParameters `json:"spotOptions,omitempty" tf:"spot_options,omitempty"` +} + +type InstanceObservation struct { + + // AMI to use for the instance. Required unless launch_template is specified and the Launch Template specifes an AMI. If an AMI is specified in the Launch Template, setting ami will override the AMI specified in the Launch Template. + AMI *string `json:"ami,omitempty" tf:"ami,omitempty"` + + // ARN of the instance. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Whether to associate a public IP address with an instance in a VPC. + AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty" tf:"associate_public_ip_address,omitempty"` + + // AZ to start the instance in. + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options CPU Cores and Threads Per CPU Core Per Instance Type - specifying this option for unsupported instance types will return an error from the EC2 API. + CPUCoreCount *float64 `json:"cpuCoreCount,omitempty" tf:"cpu_core_count,omitempty"` + + // The CPU options for the instance. See CPU Options below for more details. + CPUOptions *CPUOptionsObservation `json:"cpuOptions,omitempty" tf:"cpu_options,omitempty"` + + // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See Optimizing CPU Options for more information. + CPUThreadsPerCore *float64 `json:"cpuThreadsPerCore,omitempty" tf:"cpu_threads_per_core,omitempty"` + + // Describes an instance's Capacity Reservation targeting option. See Capacity Reservation Specification below for more details. + CapacityReservationSpecification *CapacityReservationSpecificationObservation `json:"capacityReservationSpecification,omitempty" tf:"capacity_reservation_specification,omitempty"` + + // Configuration block for customizing the credit specification of the instance. See Credit Specification below for more details. Removing this configuration on existing instances will only stop managing it. It will not change the configuration back to the default for the instance type. + CreditSpecification *CreditSpecificationObservation `json:"creditSpecification,omitempty" tf:"credit_specification,omitempty"` + + // If true, enables EC2 Instance Stop Protection. + DisableAPIStop *bool `json:"disableApiStop,omitempty" tf:"disable_api_stop,omitempty"` + + // If true, enables EC2 Instance Termination Protection. + DisableAPITermination *bool `json:"disableApiTermination,omitempty" tf:"disable_api_termination,omitempty"` + + // One or more configuration blocks with additional EBS block devices to attach to the instance. Block device configurations only apply on resource creation. See Block Devices below for details on attributes and drift detection. When accessing this as an attribute reference, it is a set of objects. + EBSBlockDevice []EBSBlockDeviceObservation `json:"ebsBlockDevice,omitempty" tf:"ebs_block_device,omitempty"` + + // If true, the launched EC2 instance will be EBS-optimized. Note that if this is not set on an instance type that is optimized by default then this will show as disabled but if the instance type is optimized by default then there is no need to set this and there is no effect to disabling it. See the EBS Optimized section of the AWS User Guide for more information. + EBSOptimized *bool `json:"ebsOptimized,omitempty" tf:"ebs_optimized,omitempty"` + + // Enable Nitro Enclaves on launched instances. See Enclave Options below for more details. + EnclaveOptions *EnclaveOptionsObservation `json:"enclaveOptions,omitempty" tf:"enclave_options,omitempty"` + + // One or more configuration blocks to customize Ephemeral (also known as "Instance Store") volumes on the instance. See Block Devices below for details. When accessing this as an attribute reference, it is a set of objects. + EphemeralBlockDevice []EphemeralBlockDeviceObservation `json:"ephemeralBlockDevice,omitempty" tf:"ephemeral_block_device,omitempty"` + + // If true, wait for password data to become available and retrieve it. Useful for getting the administrator password for instances running Microsoft Windows. The password data is exported to the password_data attribute. See GetPasswordData for more information. + GetPasswordData *bool `json:"getPasswordData,omitempty" tf:"get_password_data,omitempty"` + + // If true, the launched EC2 instance will support hibernation. + Hibernation *bool `json:"hibernation,omitempty" tf:"hibernation,omitempty"` + + // ID of a dedicated host that the instance will be assigned to. Use when an instance is to be launched on a specific dedicated host. + HostID *string `json:"hostId,omitempty" tf:"host_id,omitempty"` + + // ARN of the host resource group in which to launch the instances. If you specify an ARN, omit the tenancy parameter or set it to host. + HostResourceGroupArn *string `json:"hostResourceGroupArn,omitempty" tf:"host_resource_group_arn,omitempty"` + + // IAM Instance Profile to launch the instance with. Specified as the name of the Instance Profile. Ensure your credentials have the correct permission to assign the instance profile according to the EC2 documentation, notably iam:PassRole. + IAMInstanceProfile *string `json:"iamInstanceProfile,omitempty" tf:"iam_instance_profile,omitempty"` + + // ID of the instance. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Number of IPv6 addresses to associate with the primary network interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. + IPv6AddressCount *float64 `json:"ipv6AddressCount,omitempty" tf:"ipv6_address_count,omitempty"` + + // Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface + IPv6Addresses []*string `json:"ipv6Addresses,omitempty" tf:"ipv6_addresses,omitempty"` + + // Shutdown behavior for the instance. Amazon defaults this to stop for EBS-backed instances and terminate for instance-store instances. Cannot be set on instance-store instances. See Shutdown Behavior for more information. + InstanceInitiatedShutdownBehavior *string `json:"instanceInitiatedShutdownBehavior,omitempty" tf:"instance_initiated_shutdown_behavior,omitempty"` + + // Indicates whether this is a Spot Instance or a Scheduled Instance. + InstanceLifecycle *string `json:"instanceLifecycle,omitempty" tf:"instance_lifecycle,omitempty"` + + // Describes the market (purchasing) option for the instances. See Market Options below for details on attributes. + InstanceMarketOptions *InstanceMarketOptionsObservation `json:"instanceMarketOptions,omitempty" tf:"instance_market_options,omitempty"` + + // State of the instance. One of: pending, running, shutting-down, terminated, stopping, stopped. See Instance Lifecycle for more information. + InstanceState *string `json:"instanceState,omitempty" tf:"instance_state,omitempty"` + + // Instance type to use for the instance. Required unless launch_template is specified and the Launch Template specifies an instance type. If an instance type is specified in the Launch Template, setting instance_type will override the instance type specified in the Launch Template. Updates to this field will trigger a stop/start of the EC2 instance. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // Key name of the Key Pair to use for the instance; which can be managed using the . + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + // Specifies a Launch Template to configure the instance. Parameters configured on this resource will override the corresponding parameters in the Launch Template. See Launch Template Specification below for more details. + LaunchTemplate *LaunchTemplateObservation `json:"launchTemplate,omitempty" tf:"launch_template,omitempty"` + + // Maintenance and recovery options for the instance. See Maintenance Options below for more details. + MaintenanceOptions *MaintenanceOptionsObservation `json:"maintenanceOptions,omitempty" tf:"maintenance_options,omitempty"` + + // Customize the metadata options of the instance. See Metadata Options below for more details. + MetadataOptions *MetadataOptionsObservation `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + // If true, the launched EC2 instance will have detailed monitoring enabled. (Available since v0.6.0) + Monitoring *bool `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + // Customize network interfaces to be attached at instance boot time. See Network Interfaces below for more details. + NetworkInterface []NetworkInterfaceObservation `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // ARN of the Outpost the instance is assigned to. + OutpostArn *string `json:"outpostArn,omitempty" tf:"outpost_arn,omitempty"` + + // Base-64 encoded encrypted password data for the instance. Useful for getting the administrator password for instances running Microsoft Windows. This attribute is only exported if get_password_data is true. Note that this encrypted value will be stored in the state file, as with all exported attributes. See GetPasswordData for more information. + PasswordData *string `json:"passwordData,omitempty" tf:"password_data,omitempty"` + + // Placement Group to start the instance in. + PlacementGroup *string `json:"placementGroup,omitempty" tf:"placement_group,omitempty"` + + // Number of the partition the instance is in. Valid only if the strategy argument is set to "partition". + PlacementPartitionNumber *float64 `json:"placementPartitionNumber,omitempty" tf:"placement_partition_number,omitempty"` + + // ID of the instance's primary network interface. + PrimaryNetworkInterfaceID *string `json:"primaryNetworkInterfaceId,omitempty" tf:"primary_network_interface_id,omitempty"` + + // Private DNS name assigned to the instance. Can only be used inside the Amazon EC2, and only available if you've enabled DNS hostnames for your VPC. + PrivateDNS *string `json:"privateDns,omitempty" tf:"private_dns,omitempty"` + + // Options for the instance hostname. The default values are inherited from the subnet. See Private DNS Name Options below for more details. + PrivateDNSNameOptions *PrivateDNSNameOptionsObservation `json:"privateDnsNameOptions,omitempty" tf:"private_dns_name_options,omitempty"` + + // Private IP address to associate with the instance in a VPC. + PrivateIP *string `json:"privateIp,omitempty" tf:"private_ip,omitempty"` + + // Public DNS name assigned to the instance. For EC2-VPC, this is only available if you've enabled DNS hostnames for your VPC. + PublicDNS *string `json:"publicDns,omitempty" tf:"public_dns,omitempty"` + + // Public IP address assigned to the instance, if applicable. NOTE: If you are using an aws_eip with your instance, you should refer to the EIP's address directly and not use public_ip as this field will change after the EIP is attached. + PublicIP *string `json:"publicIp,omitempty" tf:"public_ip,omitempty"` + + // Configuration block to customize details about the root block device of the instance. See Block Devices below for details. When accessing this as an attribute reference, it is a list containing one object. + RootBlockDevice *RootBlockDeviceObservation `json:"rootBlockDevice,omitempty" tf:"root_block_device,omitempty"` + + // List of secondary private IPv4 addresses to assign to the instance's primary network interface (eth0) in a VPC. Can only be assigned to the primary network interface (eth0) attached at instance creation, not a pre-existing network interface i.e., referenced in a network_interface block. Refer to the Elastic network interfaces documentation to see the maximum number of private IP addresses allowed per instance type. + // +listType=set + SecondaryPrivateIps []*string `json:"secondaryPrivateIps,omitempty" tf:"secondary_private_ips,omitempty"` + + // List of security group names to associate with. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // Controls if traffic is routed to the instance when the destination address does not match the instance. Used for NAT or VPNs. Defaults true. + SourceDestCheck *bool `json:"sourceDestCheck,omitempty" tf:"source_dest_check,omitempty"` + + // If the request is a Spot Instance request, the ID of the request. + SpotInstanceRequestID *string `json:"spotInstanceRequestId,omitempty" tf:"spot_instance_request_id,omitempty"` + + // VPC Subnet ID to launch in. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of dedicated runs on single-tenant hardware. The host tenancy is not supported for the import-instance command. Valid values are default, dedicated, and host. + Tenancy *string `json:"tenancy,omitempty" tf:"tenancy,omitempty"` + + // User data to provide when launching the instance. Do not pass gzip-compressed data via this argument; see user_data_base64 instead. Updates to this field will trigger a stop/start of the EC2 instance by default. If the user_data_replace_on_change is set then updates to this field will trigger a destroy and recreate. + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Can be used instead of user_data to pass base64-encoded binary data directly. Use this instead of user_data whenever the value is not a valid UTF-8 string. For example, gzip-encoded user data must be base64-encoded and passed via this argument to avoid corruption. Updates to this field will trigger a stop/start of the EC2 instance by default. If the user_data_replace_on_change is set then updates to this field will trigger a destroy and recreate. + UserDataBase64 *string `json:"userDataBase64,omitempty" tf:"user_data_base64,omitempty"` + + // When used in combination with user_data or user_data_base64 will trigger a destroy and recreate when set to true. Defaults to false if not set. + UserDataReplaceOnChange *bool `json:"userDataReplaceOnChange,omitempty" tf:"user_data_replace_on_change,omitempty"` + + // List of security group IDs to associate with. + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` + + // Map of tags to assign, at instance-creation time, to root and EBS volumes. + // +mapType=granular + VolumeTags map[string]*string `json:"volumeTags,omitempty" tf:"volume_tags,omitempty"` +} + +type InstanceParameters struct { + + // AMI to use for the instance. Required unless launch_template is specified and the Launch Template specifes an AMI. If an AMI is specified in the Launch Template, setting ami will override the AMI specified in the Launch Template. + // +kubebuilder:validation:Optional + AMI *string `json:"ami,omitempty" tf:"ami,omitempty"` + + // Whether to associate a public IP address with an instance in a VPC. + // +kubebuilder:validation:Optional + AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty" tf:"associate_public_ip_address,omitempty"` + + // AZ to start the instance in. + // +kubebuilder:validation:Optional + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options CPU Cores and Threads Per CPU Core Per Instance Type - specifying this option for unsupported instance types will return an error from the EC2 API. + // +kubebuilder:validation:Optional + CPUCoreCount *float64 `json:"cpuCoreCount,omitempty" tf:"cpu_core_count,omitempty"` + + // The CPU options for the instance. See CPU Options below for more details. + // +kubebuilder:validation:Optional + CPUOptions *CPUOptionsParameters `json:"cpuOptions,omitempty" tf:"cpu_options,omitempty"` + + // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See Optimizing CPU Options for more information. + // +kubebuilder:validation:Optional + CPUThreadsPerCore *float64 `json:"cpuThreadsPerCore,omitempty" tf:"cpu_threads_per_core,omitempty"` + + // Describes an instance's Capacity Reservation targeting option. See Capacity Reservation Specification below for more details. + // +kubebuilder:validation:Optional + CapacityReservationSpecification *CapacityReservationSpecificationParameters `json:"capacityReservationSpecification,omitempty" tf:"capacity_reservation_specification,omitempty"` + + // Configuration block for customizing the credit specification of the instance. See Credit Specification below for more details. Removing this configuration on existing instances will only stop managing it. It will not change the configuration back to the default for the instance type. + // +kubebuilder:validation:Optional + CreditSpecification *CreditSpecificationParameters `json:"creditSpecification,omitempty" tf:"credit_specification,omitempty"` + + // If true, enables EC2 Instance Stop Protection. + // +kubebuilder:validation:Optional + DisableAPIStop *bool `json:"disableApiStop,omitempty" tf:"disable_api_stop,omitempty"` + + // If true, enables EC2 Instance Termination Protection. + // +kubebuilder:validation:Optional + DisableAPITermination *bool `json:"disableApiTermination,omitempty" tf:"disable_api_termination,omitempty"` + + // One or more configuration blocks with additional EBS block devices to attach to the instance. Block device configurations only apply on resource creation. See Block Devices below for details on attributes and drift detection. When accessing this as an attribute reference, it is a set of objects. + // +kubebuilder:validation:Optional + EBSBlockDevice []EBSBlockDeviceParameters `json:"ebsBlockDevice,omitempty" tf:"ebs_block_device,omitempty"` + + // If true, the launched EC2 instance will be EBS-optimized. Note that if this is not set on an instance type that is optimized by default then this will show as disabled but if the instance type is optimized by default then there is no need to set this and there is no effect to disabling it. See the EBS Optimized section of the AWS User Guide for more information. + // +kubebuilder:validation:Optional + EBSOptimized *bool `json:"ebsOptimized,omitempty" tf:"ebs_optimized,omitempty"` + + // Enable Nitro Enclaves on launched instances. See Enclave Options below for more details. + // +kubebuilder:validation:Optional + EnclaveOptions *EnclaveOptionsParameters `json:"enclaveOptions,omitempty" tf:"enclave_options,omitempty"` + + // One or more configuration blocks to customize Ephemeral (also known as "Instance Store") volumes on the instance. See Block Devices below for details. When accessing this as an attribute reference, it is a set of objects. + // +kubebuilder:validation:Optional + EphemeralBlockDevice []EphemeralBlockDeviceParameters `json:"ephemeralBlockDevice,omitempty" tf:"ephemeral_block_device,omitempty"` + + // If true, wait for password data to become available and retrieve it. Useful for getting the administrator password for instances running Microsoft Windows. The password data is exported to the password_data attribute. See GetPasswordData for more information. + // +kubebuilder:validation:Optional + GetPasswordData *bool `json:"getPasswordData,omitempty" tf:"get_password_data,omitempty"` + + // If true, the launched EC2 instance will support hibernation. + // +kubebuilder:validation:Optional + Hibernation *bool `json:"hibernation,omitempty" tf:"hibernation,omitempty"` + + // ID of a dedicated host that the instance will be assigned to. Use when an instance is to be launched on a specific dedicated host. + // +kubebuilder:validation:Optional + HostID *string `json:"hostId,omitempty" tf:"host_id,omitempty"` + + // ARN of the host resource group in which to launch the instances. If you specify an ARN, omit the tenancy parameter or set it to host. + // +kubebuilder:validation:Optional + HostResourceGroupArn *string `json:"hostResourceGroupArn,omitempty" tf:"host_resource_group_arn,omitempty"` + + // IAM Instance Profile to launch the instance with. Specified as the name of the Instance Profile. Ensure your credentials have the correct permission to assign the instance profile according to the EC2 documentation, notably iam:PassRole. + // +kubebuilder:validation:Optional + IAMInstanceProfile *string `json:"iamInstanceProfile,omitempty" tf:"iam_instance_profile,omitempty"` + + // Number of IPv6 addresses to associate with the primary network interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. + // +kubebuilder:validation:Optional + IPv6AddressCount *float64 `json:"ipv6AddressCount,omitempty" tf:"ipv6_address_count,omitempty"` + + // Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface + // +kubebuilder:validation:Optional + IPv6Addresses []*string `json:"ipv6Addresses,omitempty" tf:"ipv6_addresses,omitempty"` + + // Shutdown behavior for the instance. Amazon defaults this to stop for EBS-backed instances and terminate for instance-store instances. Cannot be set on instance-store instances. See Shutdown Behavior for more information. + // +kubebuilder:validation:Optional + InstanceInitiatedShutdownBehavior *string `json:"instanceInitiatedShutdownBehavior,omitempty" tf:"instance_initiated_shutdown_behavior,omitempty"` + + // Describes the market (purchasing) option for the instances. See Market Options below for details on attributes. + // +kubebuilder:validation:Optional + InstanceMarketOptions *InstanceMarketOptionsParameters `json:"instanceMarketOptions,omitempty" tf:"instance_market_options,omitempty"` + + // Instance type to use for the instance. Required unless launch_template is specified and the Launch Template specifies an instance type. If an instance type is specified in the Launch Template, setting instance_type will override the instance type specified in the Launch Template. Updates to this field will trigger a stop/start of the EC2 instance. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // Key name of the Key Pair to use for the instance; which can be managed using the . + // +kubebuilder:validation:Optional + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + // Specifies a Launch Template to configure the instance. Parameters configured on this resource will override the corresponding parameters in the Launch Template. See Launch Template Specification below for more details. + // +kubebuilder:validation:Optional + LaunchTemplate *LaunchTemplateParameters `json:"launchTemplate,omitempty" tf:"launch_template,omitempty"` + + // Maintenance and recovery options for the instance. See Maintenance Options below for more details. + // +kubebuilder:validation:Optional + MaintenanceOptions *MaintenanceOptionsParameters `json:"maintenanceOptions,omitempty" tf:"maintenance_options,omitempty"` + + // Customize the metadata options of the instance. See Metadata Options below for more details. + // +kubebuilder:validation:Optional + MetadataOptions *MetadataOptionsParameters `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + // If true, the launched EC2 instance will have detailed monitoring enabled. (Available since v0.6.0) + // +kubebuilder:validation:Optional + Monitoring *bool `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + // Customize network interfaces to be attached at instance boot time. See Network Interfaces below for more details. + // +kubebuilder:validation:Optional + NetworkInterface []NetworkInterfaceParameters `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // Placement Group to start the instance in. + // +kubebuilder:validation:Optional + PlacementGroup *string `json:"placementGroup,omitempty" tf:"placement_group,omitempty"` + + // Number of the partition the instance is in. Valid only if the strategy argument is set to "partition". + // +kubebuilder:validation:Optional + PlacementPartitionNumber *float64 `json:"placementPartitionNumber,omitempty" tf:"placement_partition_number,omitempty"` + + // Options for the instance hostname. The default values are inherited from the subnet. See Private DNS Name Options below for more details. + // +kubebuilder:validation:Optional + PrivateDNSNameOptions *PrivateDNSNameOptionsParameters `json:"privateDnsNameOptions,omitempty" tf:"private_dns_name_options,omitempty"` + + // Private IP address to associate with the instance in a VPC. + // +kubebuilder:validation:Optional + PrivateIP *string `json:"privateIp,omitempty" tf:"private_ip,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration block to customize details about the root block device of the instance. See Block Devices below for details. When accessing this as an attribute reference, it is a list containing one object. + // +kubebuilder:validation:Optional + RootBlockDevice *RootBlockDeviceParameters `json:"rootBlockDevice,omitempty" tf:"root_block_device,omitempty"` + + // List of secondary private IPv4 addresses to assign to the instance's primary network interface (eth0) in a VPC. Can only be assigned to the primary network interface (eth0) attached at instance creation, not a pre-existing network interface i.e., referenced in a network_interface block. Refer to the Elastic network interfaces documentation to see the maximum number of private IP addresses allowed per instance type. + // +kubebuilder:validation:Optional + // +listType=set + SecondaryPrivateIps []*string `json:"secondaryPrivateIps,omitempty" tf:"secondary_private_ips,omitempty"` + + // Controls if traffic is routed to the instance when the destination address does not match the instance. Used for NAT or VPNs. Defaults true. + // +kubebuilder:validation:Optional + SourceDestCheck *bool `json:"sourceDestCheck,omitempty" tf:"source_dest_check,omitempty"` + + // VPC Subnet ID to launch in. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of dedicated runs on single-tenant hardware. The host tenancy is not supported for the import-instance command. Valid values are default, dedicated, and host. + // +kubebuilder:validation:Optional + Tenancy *string `json:"tenancy,omitempty" tf:"tenancy,omitempty"` + + // User data to provide when launching the instance. Do not pass gzip-compressed data via this argument; see user_data_base64 instead. Updates to this field will trigger a stop/start of the EC2 instance by default. If the user_data_replace_on_change is set then updates to this field will trigger a destroy and recreate. + // +kubebuilder:validation:Optional + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Can be used instead of user_data to pass base64-encoded binary data directly. Use this instead of user_data whenever the value is not a valid UTF-8 string. For example, gzip-encoded user data must be base64-encoded and passed via this argument to avoid corruption. Updates to this field will trigger a stop/start of the EC2 instance by default. If the user_data_replace_on_change is set then updates to this field will trigger a destroy and recreate. + // +kubebuilder:validation:Optional + UserDataBase64 *string `json:"userDataBase64,omitempty" tf:"user_data_base64,omitempty"` + + // When used in combination with user_data or user_data_base64 will trigger a destroy and recreate when set to true. Defaults to false if not set. + // +kubebuilder:validation:Optional + UserDataReplaceOnChange *bool `json:"userDataReplaceOnChange,omitempty" tf:"user_data_replace_on_change,omitempty"` + + // References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDRefs []v1.Reference `json:"vpcSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDSelector *v1.Selector `json:"vpcSecurityGroupIdSelector,omitempty" tf:"-"` + + // List of security group IDs to associate with. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=VPCSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=VPCSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` + + // Map of tags to assign, at instance-creation time, to root and EBS volumes. + // +kubebuilder:validation:Optional + // +mapType=granular + VolumeTags map[string]*string `json:"volumeTags,omitempty" tf:"volume_tags,omitempty"` +} + +type LaunchTemplateInitParameters struct { + + // ID of the launch template. Conflicts with name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the launch template. Conflicts with id. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Template version. Can be a specific version number, $Latest or $Default. The default value is $Default. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type LaunchTemplateObservation struct { + + // ID of the launch template. Conflicts with name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the launch template. Conflicts with id. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Template version. Can be a specific version number, $Latest or $Default. The default value is $Default. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type LaunchTemplateParameters struct { + + // ID of the launch template. Conflicts with name. + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the launch template. Conflicts with id. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Template version. Can be a specific version number, $Latest or $Default. The default value is $Default. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type MaintenanceOptionsInitParameters struct { + + // Automatic recovery behavior of the Instance. Can be "default" or "disabled". See Recover your instance for more details. + AutoRecovery *string `json:"autoRecovery,omitempty" tf:"auto_recovery,omitempty"` +} + +type MaintenanceOptionsObservation struct { + + // Automatic recovery behavior of the Instance. Can be "default" or "disabled". See Recover your instance for more details. + AutoRecovery *string `json:"autoRecovery,omitempty" tf:"auto_recovery,omitempty"` +} + +type MaintenanceOptionsParameters struct { + + // Automatic recovery behavior of the Instance. Can be "default" or "disabled". See Recover your instance for more details. + // +kubebuilder:validation:Optional + AutoRecovery *string `json:"autoRecovery,omitempty" tf:"auto_recovery,omitempty"` +} + +type MetadataOptionsInitParameters struct { + + // Whether the metadata service is available. Valid values include enabled or disabled. Defaults to enabled. + HTTPEndpoint *string `json:"httpEndpoint,omitempty" tf:"http_endpoint,omitempty"` + + // Whether the IPv6 endpoint for the instance metadata service is enabled. Defaults to disabled. + HTTPProtocolIPv6 *string `json:"httpProtocolIpv6,omitempty" tf:"http_protocol_ipv6,omitempty"` + + // Desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. Valid values are integer from 1 to 64. Defaults to 1. + HTTPPutResponseHopLimit *float64 `json:"httpPutResponseHopLimit,omitempty" tf:"http_put_response_hop_limit,omitempty"` + + // Whether or not the metadata service requires session tokens, also referred to as Instance Metadata Service Version 2 (IMDSv2). Valid values include optional or required. Defaults to optional. + HTTPTokens *string `json:"httpTokens,omitempty" tf:"http_tokens,omitempty"` + + // Enables or disables access to instance tags from the instance metadata service. Valid values include enabled or disabled. Defaults to disabled. + InstanceMetadataTags *string `json:"instanceMetadataTags,omitempty" tf:"instance_metadata_tags,omitempty"` +} + +type MetadataOptionsObservation struct { + + // Whether the metadata service is available. Valid values include enabled or disabled. Defaults to enabled. + HTTPEndpoint *string `json:"httpEndpoint,omitempty" tf:"http_endpoint,omitempty"` + + // Whether the IPv6 endpoint for the instance metadata service is enabled. Defaults to disabled. + HTTPProtocolIPv6 *string `json:"httpProtocolIpv6,omitempty" tf:"http_protocol_ipv6,omitempty"` + + // Desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. Valid values are integer from 1 to 64. Defaults to 1. + HTTPPutResponseHopLimit *float64 `json:"httpPutResponseHopLimit,omitempty" tf:"http_put_response_hop_limit,omitempty"` + + // Whether or not the metadata service requires session tokens, also referred to as Instance Metadata Service Version 2 (IMDSv2). Valid values include optional or required. Defaults to optional. + HTTPTokens *string `json:"httpTokens,omitempty" tf:"http_tokens,omitempty"` + + // Enables or disables access to instance tags from the instance metadata service. Valid values include enabled or disabled. Defaults to disabled. + InstanceMetadataTags *string `json:"instanceMetadataTags,omitempty" tf:"instance_metadata_tags,omitempty"` +} + +type MetadataOptionsParameters struct { + + // Whether the metadata service is available. Valid values include enabled or disabled. Defaults to enabled. + // +kubebuilder:validation:Optional + HTTPEndpoint *string `json:"httpEndpoint,omitempty" tf:"http_endpoint,omitempty"` + + // Whether the IPv6 endpoint for the instance metadata service is enabled. Defaults to disabled. + // +kubebuilder:validation:Optional + HTTPProtocolIPv6 *string `json:"httpProtocolIpv6,omitempty" tf:"http_protocol_ipv6,omitempty"` + + // Desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. Valid values are integer from 1 to 64. Defaults to 1. + // +kubebuilder:validation:Optional + HTTPPutResponseHopLimit *float64 `json:"httpPutResponseHopLimit,omitempty" tf:"http_put_response_hop_limit,omitempty"` + + // Whether or not the metadata service requires session tokens, also referred to as Instance Metadata Service Version 2 (IMDSv2). Valid values include optional or required. Defaults to optional. + // +kubebuilder:validation:Optional + HTTPTokens *string `json:"httpTokens,omitempty" tf:"http_tokens,omitempty"` + + // Enables or disables access to instance tags from the instance metadata service. Valid values include enabled or disabled. Defaults to disabled. + // +kubebuilder:validation:Optional + InstanceMetadataTags *string `json:"instanceMetadataTags,omitempty" tf:"instance_metadata_tags,omitempty"` +} + +type NetworkInterfaceInitParameters struct { + + // Whether or not to delete the network interface on instance termination. Defaults to false. Currently, the only valid value is false, as this is only supported when creating new network interfaces when launching an instance. + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Integer index of the network interface attachment. Limited by instance type. + DeviceIndex *float64 `json:"deviceIndex,omitempty" tf:"device_index,omitempty"` + + // Integer index of the network card. Limited by instance type. The default index is 0. + NetworkCardIndex *float64 `json:"networkCardIndex,omitempty" tf:"network_card_index,omitempty"` + + // ID of the network interface to attach. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.NetworkInterface + NetworkInterfaceID *string `json:"networkInterfaceId,omitempty" tf:"network_interface_id,omitempty"` + + // Reference to a NetworkInterface in ec2 to populate networkInterfaceId. + // +kubebuilder:validation:Optional + NetworkInterfaceIDRef *v1.Reference `json:"networkInterfaceIdRef,omitempty" tf:"-"` + + // Selector for a NetworkInterface in ec2 to populate networkInterfaceId. + // +kubebuilder:validation:Optional + NetworkInterfaceIDSelector *v1.Selector `json:"networkInterfaceIdSelector,omitempty" tf:"-"` +} + +type NetworkInterfaceObservation struct { + + // Whether or not to delete the network interface on instance termination. Defaults to false. Currently, the only valid value is false, as this is only supported when creating new network interfaces when launching an instance. + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Integer index of the network interface attachment. Limited by instance type. + DeviceIndex *float64 `json:"deviceIndex,omitempty" tf:"device_index,omitempty"` + + // Integer index of the network card. Limited by instance type. The default index is 0. + NetworkCardIndex *float64 `json:"networkCardIndex,omitempty" tf:"network_card_index,omitempty"` + + // ID of the network interface to attach. + NetworkInterfaceID *string `json:"networkInterfaceId,omitempty" tf:"network_interface_id,omitempty"` +} + +type NetworkInterfaceParameters struct { + + // Whether or not to delete the network interface on instance termination. Defaults to false. Currently, the only valid value is false, as this is only supported when creating new network interfaces when launching an instance. + // +kubebuilder:validation:Optional + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Integer index of the network interface attachment. Limited by instance type. + // +kubebuilder:validation:Optional + DeviceIndex *float64 `json:"deviceIndex" tf:"device_index,omitempty"` + + // Integer index of the network card. Limited by instance type. The default index is 0. + // +kubebuilder:validation:Optional + NetworkCardIndex *float64 `json:"networkCardIndex,omitempty" tf:"network_card_index,omitempty"` + + // ID of the network interface to attach. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.NetworkInterface + // +kubebuilder:validation:Optional + NetworkInterfaceID *string `json:"networkInterfaceId,omitempty" tf:"network_interface_id,omitempty"` + + // Reference to a NetworkInterface in ec2 to populate networkInterfaceId. + // +kubebuilder:validation:Optional + NetworkInterfaceIDRef *v1.Reference `json:"networkInterfaceIdRef,omitempty" tf:"-"` + + // Selector for a NetworkInterface in ec2 to populate networkInterfaceId. + // +kubebuilder:validation:Optional + NetworkInterfaceIDSelector *v1.Selector `json:"networkInterfaceIdSelector,omitempty" tf:"-"` +} + +type PrivateDNSNameOptionsInitParameters struct { + + // Indicates whether to respond to DNS queries for instance hostnames with DNS A records. + EnableResourceNameDNSARecord *bool `json:"enableResourceNameDnsARecord,omitempty" tf:"enable_resource_name_dns_a_record,omitempty"` + + // Indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. + EnableResourceNameDNSAaaaRecord *bool `json:"enableResourceNameDnsAaaaRecord,omitempty" tf:"enable_resource_name_dns_aaaa_record,omitempty"` + + // Type of hostname for Amazon EC2 instances. For IPv4 only subnets, an instance DNS name must be based on the instance IPv4 address. For IPv6 native subnets, an instance DNS name must be based on the instance ID. For dual-stack subnets, you can specify whether DNS names use the instance IPv4 address or the instance ID. Valid values: ip-name and resource-name. + HostnameType *string `json:"hostnameType,omitempty" tf:"hostname_type,omitempty"` +} + +type PrivateDNSNameOptionsObservation struct { + + // Indicates whether to respond to DNS queries for instance hostnames with DNS A records. + EnableResourceNameDNSARecord *bool `json:"enableResourceNameDnsARecord,omitempty" tf:"enable_resource_name_dns_a_record,omitempty"` + + // Indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. + EnableResourceNameDNSAaaaRecord *bool `json:"enableResourceNameDnsAaaaRecord,omitempty" tf:"enable_resource_name_dns_aaaa_record,omitempty"` + + // Type of hostname for Amazon EC2 instances. For IPv4 only subnets, an instance DNS name must be based on the instance IPv4 address. For IPv6 native subnets, an instance DNS name must be based on the instance ID. For dual-stack subnets, you can specify whether DNS names use the instance IPv4 address or the instance ID. Valid values: ip-name and resource-name. + HostnameType *string `json:"hostnameType,omitempty" tf:"hostname_type,omitempty"` +} + +type PrivateDNSNameOptionsParameters struct { + + // Indicates whether to respond to DNS queries for instance hostnames with DNS A records. + // +kubebuilder:validation:Optional + EnableResourceNameDNSARecord *bool `json:"enableResourceNameDnsARecord,omitempty" tf:"enable_resource_name_dns_a_record,omitempty"` + + // Indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. + // +kubebuilder:validation:Optional + EnableResourceNameDNSAaaaRecord *bool `json:"enableResourceNameDnsAaaaRecord,omitempty" tf:"enable_resource_name_dns_aaaa_record,omitempty"` + + // Type of hostname for Amazon EC2 instances. For IPv4 only subnets, an instance DNS name must be based on the instance IPv4 address. For IPv6 native subnets, an instance DNS name must be based on the instance ID. For dual-stack subnets, you can specify whether DNS names use the instance IPv4 address or the instance ID. Valid values: ip-name and resource-name. + // +kubebuilder:validation:Optional + HostnameType *string `json:"hostnameType,omitempty" tf:"hostname_type,omitempty"` +} + +type RootBlockDeviceInitParameters struct { + + // Whether the volume should be destroyed on instance termination. Defaults to true. + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Whether to enable volume encryption. Defaults to false. Must be configured to perform drift detection. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // Amount of provisioned IOPS. Only valid for volume_type of io1, io2 or gp3. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Amazon Resource Name (ARN) of the KMS Key to use when encrypting the volume. Must be configured to perform drift detection. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Map of tags to assign to the device. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Throughput to provision for a volume in mebibytes per second (MiB/s). This is only valid for volume_type of gp3. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // Size of the volume in gibibytes (GiB). + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of volume. Valid values include standard, gp2, gp3, io1, io2, sc1, or st1. Defaults to the volume type that the AMI uses. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type RootBlockDeviceObservation struct { + + // Whether the volume should be destroyed on instance termination. Defaults to true. + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Device name, e.g., /dev/sdh or xvdh. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Whether to enable volume encryption. Defaults to false. Must be configured to perform drift detection. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // Amount of provisioned IOPS. Only valid for volume_type of io1, io2 or gp3. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Amazon Resource Name (ARN) of the KMS Key to use when encrypting the volume. Must be configured to perform drift detection. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Map of tags to assign to the device. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Throughput to provision for a volume in mebibytes per second (MiB/s). This is only valid for volume_type of gp3. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // ID of the volume. For example, the ID can be accessed like this, aws_instance.web.root_block_device.0.volume_id. + VolumeID *string `json:"volumeId,omitempty" tf:"volume_id,omitempty"` + + // Size of the volume in gibibytes (GiB). + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of volume. Valid values include standard, gp2, gp3, io1, io2, sc1, or st1. Defaults to the volume type that the AMI uses. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type RootBlockDeviceParameters struct { + + // Whether the volume should be destroyed on instance termination. Defaults to true. + // +kubebuilder:validation:Optional + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Whether to enable volume encryption. Defaults to false. Must be configured to perform drift detection. + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // Amount of provisioned IOPS. Only valid for volume_type of io1, io2 or gp3. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Amazon Resource Name (ARN) of the KMS Key to use when encrypting the volume. Must be configured to perform drift detection. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Map of tags to assign to the device. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +kubebuilder:validation:Optional + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Throughput to provision for a volume in mebibytes per second (MiB/s). This is only valid for volume_type of gp3. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // Size of the volume in gibibytes (GiB). + // +kubebuilder:validation:Optional + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of volume. Valid values include standard, gp2, gp3, io1, io2, sc1, or st1. Defaults to the volume type that the AMI uses. + // +kubebuilder:validation:Optional + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type SpotOptionsInitParameters struct { + + // The behavior when a Spot Instance is interrupted. Valid values include hibernate, stop, terminate . The default is terminate. + InstanceInterruptionBehavior *string `json:"instanceInterruptionBehavior,omitempty" tf:"instance_interruption_behavior,omitempty"` + + // The maximum hourly price that you're willing to pay for a Spot Instance. + MaxPrice *string `json:"maxPrice,omitempty" tf:"max_price,omitempty"` + + // The Spot Instance request type. Valid values include one-time, persistent. Persistent Spot Instance requests are only supported when the instance interruption behavior is either hibernate or stop. The default is one-time. + SpotInstanceType *string `json:"spotInstanceType,omitempty" tf:"spot_instance_type,omitempty"` + + // The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). Supported only for persistent requests. + ValidUntil *string `json:"validUntil,omitempty" tf:"valid_until,omitempty"` +} + +type SpotOptionsObservation struct { + + // The behavior when a Spot Instance is interrupted. Valid values include hibernate, stop, terminate . The default is terminate. + InstanceInterruptionBehavior *string `json:"instanceInterruptionBehavior,omitempty" tf:"instance_interruption_behavior,omitempty"` + + // The maximum hourly price that you're willing to pay for a Spot Instance. + MaxPrice *string `json:"maxPrice,omitempty" tf:"max_price,omitempty"` + + // The Spot Instance request type. Valid values include one-time, persistent. Persistent Spot Instance requests are only supported when the instance interruption behavior is either hibernate or stop. The default is one-time. + SpotInstanceType *string `json:"spotInstanceType,omitempty" tf:"spot_instance_type,omitempty"` + + // The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). Supported only for persistent requests. + ValidUntil *string `json:"validUntil,omitempty" tf:"valid_until,omitempty"` +} + +type SpotOptionsParameters struct { + + // The behavior when a Spot Instance is interrupted. Valid values include hibernate, stop, terminate . The default is terminate. + // +kubebuilder:validation:Optional + InstanceInterruptionBehavior *string `json:"instanceInterruptionBehavior,omitempty" tf:"instance_interruption_behavior,omitempty"` + + // The maximum hourly price that you're willing to pay for a Spot Instance. + // +kubebuilder:validation:Optional + MaxPrice *string `json:"maxPrice,omitempty" tf:"max_price,omitempty"` + + // The Spot Instance request type. Valid values include one-time, persistent. Persistent Spot Instance requests are only supported when the instance interruption behavior is either hibernate or stop. The default is one-time. + // +kubebuilder:validation:Optional + SpotInstanceType *string `json:"spotInstanceType,omitempty" tf:"spot_instance_type,omitempty"` + + // The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). Supported only for persistent requests. + // +kubebuilder:validation:Optional + ValidUntil *string `json:"validUntil,omitempty" tf:"valid_until,omitempty"` +} + +// InstanceSpec defines the desired state of Instance +type InstanceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider InstanceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider InstanceInitParameters `json:"initProvider,omitempty"` +} + +// InstanceStatus defines the observed state of Instance. +type InstanceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider InstanceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Instance is the Schema for the Instances API. Provides an EC2 instance resource. This allows instances to be created, updated, and deleted. Instances also support provisioning. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Instance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec InstanceSpec `json:"spec"` + Status InstanceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// InstanceList contains a list of Instances +type InstanceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Instance `json:"items"` +} + +// Repository type metadata. +var ( + Instance_Kind = "Instance" + Instance_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Instance_Kind}.String() + Instance_KindAPIVersion = Instance_Kind + "." + CRDGroupVersion.String() + Instance_GroupVersionKind = CRDGroupVersion.WithKind(Instance_Kind) +) + +func init() { + SchemeBuilder.Register(&Instance{}, &InstanceList{}) +} diff --git a/apis/ec2/v1beta2/zz_launchtemplate_terraformed.go b/apis/ec2/v1beta2/zz_launchtemplate_terraformed.go new file mode 100755 index 0000000000..c8dd851638 --- /dev/null +++ b/apis/ec2/v1beta2/zz_launchtemplate_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LaunchTemplate +func (mg *LaunchTemplate) GetTerraformResourceType() string { + return "aws_launch_template" +} + +// GetConnectionDetailsMapping for this LaunchTemplate +func (tr *LaunchTemplate) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LaunchTemplate +func (tr *LaunchTemplate) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LaunchTemplate +func (tr *LaunchTemplate) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LaunchTemplate +func (tr *LaunchTemplate) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LaunchTemplate +func (tr *LaunchTemplate) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LaunchTemplate +func (tr *LaunchTemplate) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LaunchTemplate +func (tr *LaunchTemplate) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LaunchTemplate +func (tr *LaunchTemplate) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LaunchTemplate using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LaunchTemplate) LateInitialize(attrs []byte) (bool, error) { + params := &LaunchTemplateParameters_2{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LaunchTemplate) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ec2/v1beta2/zz_launchtemplate_types.go b/apis/ec2/v1beta2/zz_launchtemplate_types.go new file mode 100755 index 0000000000..c2ba4b0cf3 --- /dev/null +++ b/apis/ec2/v1beta2/zz_launchtemplate_types.go @@ -0,0 +1,2166 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AcceleratorCountInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type AcceleratorCountObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type AcceleratorCountParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type AcceleratorTotalMemoryMibInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type AcceleratorTotalMemoryMibObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type AcceleratorTotalMemoryMibParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type BaselineEBSBandwidthMbpsInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type BaselineEBSBandwidthMbpsObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type BaselineEBSBandwidthMbpsParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type BlockDeviceMappingsInitParameters struct { + + // The name of the device to mount. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Configure EBS volume properties. + EBS *EBSInitParameters `json:"ebs,omitempty" tf:"ebs,omitempty"` + + // Suppresses the specified device included in the AMI's block device mapping. + NoDevice *string `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // The Instance Store Device + // Name + // (e.g., "ephemeral0"). + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type BlockDeviceMappingsObservation struct { + + // The name of the device to mount. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Configure EBS volume properties. + EBS *EBSObservation `json:"ebs,omitempty" tf:"ebs,omitempty"` + + // Suppresses the specified device included in the AMI's block device mapping. + NoDevice *string `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // The Instance Store Device + // Name + // (e.g., "ephemeral0"). + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type BlockDeviceMappingsParameters struct { + + // The name of the device to mount. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Configure EBS volume properties. + // +kubebuilder:validation:Optional + EBS *EBSParameters `json:"ebs,omitempty" tf:"ebs,omitempty"` + + // Suppresses the specified device included in the AMI's block device mapping. + // +kubebuilder:validation:Optional + NoDevice *string `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // The Instance Store Device + // Name + // (e.g., "ephemeral0"). + // +kubebuilder:validation:Optional + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type CapacityReservationSpecificationCapacityReservationTargetInitParameters struct { + + // The ID of the Capacity Reservation in which to run the instance. + CapacityReservationID *string `json:"capacityReservationId,omitempty" tf:"capacity_reservation_id,omitempty"` + + // The ARN of the Capacity Reservation resource group in which to run the instance. + CapacityReservationResourceGroupArn *string `json:"capacityReservationResourceGroupArn,omitempty" tf:"capacity_reservation_resource_group_arn,omitempty"` +} + +type CapacityReservationSpecificationCapacityReservationTargetObservation struct { + + // The ID of the Capacity Reservation in which to run the instance. + CapacityReservationID *string `json:"capacityReservationId,omitempty" tf:"capacity_reservation_id,omitempty"` + + // The ARN of the Capacity Reservation resource group in which to run the instance. + CapacityReservationResourceGroupArn *string `json:"capacityReservationResourceGroupArn,omitempty" tf:"capacity_reservation_resource_group_arn,omitempty"` +} + +type CapacityReservationSpecificationCapacityReservationTargetParameters struct { + + // The ID of the Capacity Reservation in which to run the instance. + // +kubebuilder:validation:Optional + CapacityReservationID *string `json:"capacityReservationId,omitempty" tf:"capacity_reservation_id,omitempty"` + + // The ARN of the Capacity Reservation resource group in which to run the instance. + // +kubebuilder:validation:Optional + CapacityReservationResourceGroupArn *string `json:"capacityReservationResourceGroupArn,omitempty" tf:"capacity_reservation_resource_group_arn,omitempty"` +} + +type EBSInitParameters struct { + + // Whether the volume should be destroyed on instance termination. + // See Preserving Amazon EBS Volumes on Instance Termination for more information. + DeleteOnTermination *string `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Enables EBS encryption on the volume. + // Cannot be used with snapshot_id. + Encrypted *string `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The amount of provisioned IOPS. + // This must be set with a volume_type of "io1/io2/gp3". + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. + // encrypted must be set to true when this is set. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // The Snapshot ID to mount. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // The throughput to provision for a gp3 volume in MiB/s (specified as an integer, e.g., 500), with a maximum of 1,000 MiB/s. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // The size of the volume in gigabytes. + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // The volume type. + // Can be one of standard, gp2, gp3, io1, io2, sc1 or st1. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type EBSObservation struct { + + // Whether the volume should be destroyed on instance termination. + // See Preserving Amazon EBS Volumes on Instance Termination for more information. + DeleteOnTermination *string `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Enables EBS encryption on the volume. + // Cannot be used with snapshot_id. + Encrypted *string `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The amount of provisioned IOPS. + // This must be set with a volume_type of "io1/io2/gp3". + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. + // encrypted must be set to true when this is set. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The Snapshot ID to mount. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // The throughput to provision for a gp3 volume in MiB/s (specified as an integer, e.g., 500), with a maximum of 1,000 MiB/s. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // The size of the volume in gigabytes. + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // The volume type. + // Can be one of standard, gp2, gp3, io1, io2, sc1 or st1. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type EBSParameters struct { + + // Whether the volume should be destroyed on instance termination. + // See Preserving Amazon EBS Volumes on Instance Termination for more information. + // +kubebuilder:validation:Optional + DeleteOnTermination *string `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Enables EBS encryption on the volume. + // Cannot be used with snapshot_id. + // +kubebuilder:validation:Optional + Encrypted *string `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The amount of provisioned IOPS. + // This must be set with a volume_type of "io1/io2/gp3". + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. + // encrypted must be set to true when this is set. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // The Snapshot ID to mount. + // +kubebuilder:validation:Optional + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // The throughput to provision for a gp3 volume in MiB/s (specified as an integer, e.g., 500), with a maximum of 1,000 MiB/s. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // The size of the volume in gigabytes. + // +kubebuilder:validation:Optional + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // The volume type. + // Can be one of standard, gp2, gp3, io1, io2, sc1 or st1. + // +kubebuilder:validation:Optional + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type ElasticGpuSpecificationsInitParameters struct { + + // The Elastic GPU Type + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ElasticGpuSpecificationsObservation struct { + + // The Elastic GPU Type + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ElasticGpuSpecificationsParameters struct { + + // The Elastic GPU Type + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ElasticInferenceAcceleratorInitParameters struct { + + // Accelerator type. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ElasticInferenceAcceleratorObservation struct { + + // Accelerator type. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ElasticInferenceAcceleratorParameters struct { + + // Accelerator type. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type HibernationOptionsInitParameters struct { + + // If set to true, the launched EC2 instance will hibernation enabled. + Configured *bool `json:"configured,omitempty" tf:"configured,omitempty"` +} + +type HibernationOptionsObservation struct { + + // If set to true, the launched EC2 instance will hibernation enabled. + Configured *bool `json:"configured,omitempty" tf:"configured,omitempty"` +} + +type HibernationOptionsParameters struct { + + // If set to true, the launched EC2 instance will hibernation enabled. + // +kubebuilder:validation:Optional + Configured *bool `json:"configured" tf:"configured,omitempty"` +} + +type IAMInstanceProfileInitParameters struct { + + // The Amazon Resource Name (ARN) of the instance profile. Conflicts with name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.InstanceProfile + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a InstanceProfile in iam to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a InstanceProfile in iam to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // The name of the instance profile. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.InstanceProfile + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a InstanceProfile in iam to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a InstanceProfile in iam to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` +} + +type IAMInstanceProfileObservation struct { + + // The Amazon Resource Name (ARN) of the instance profile. Conflicts with name. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The name of the instance profile. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type IAMInstanceProfileParameters struct { + + // The Amazon Resource Name (ARN) of the instance profile. Conflicts with name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.InstanceProfile + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a InstanceProfile in iam to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a InstanceProfile in iam to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // The name of the instance profile. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.InstanceProfile + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a InstanceProfile in iam to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a InstanceProfile in iam to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` +} + +type InstanceMarketOptionsSpotOptionsInitParameters struct { + + // The required duration in minutes. This value must be a multiple of 60. + BlockDurationMinutes *float64 `json:"blockDurationMinutes,omitempty" tf:"block_duration_minutes,omitempty"` + + // The behavior when a Spot Instance is interrupted. Can be hibernate, + // stop, or terminate. (Default: terminate). + InstanceInterruptionBehavior *string `json:"instanceInterruptionBehavior,omitempty" tf:"instance_interruption_behavior,omitempty"` + + // The maximum hourly price you're willing to pay for the Spot Instances. + MaxPrice *string `json:"maxPrice,omitempty" tf:"max_price,omitempty"` + + // The Spot Instance request type. Can be one-time, or persistent. + SpotInstanceType *string `json:"spotInstanceType,omitempty" tf:"spot_instance_type,omitempty"` + + // The end date of the request. + ValidUntil *string `json:"validUntil,omitempty" tf:"valid_until,omitempty"` +} + +type InstanceMarketOptionsSpotOptionsObservation struct { + + // The required duration in minutes. This value must be a multiple of 60. + BlockDurationMinutes *float64 `json:"blockDurationMinutes,omitempty" tf:"block_duration_minutes,omitempty"` + + // The behavior when a Spot Instance is interrupted. Can be hibernate, + // stop, or terminate. (Default: terminate). + InstanceInterruptionBehavior *string `json:"instanceInterruptionBehavior,omitempty" tf:"instance_interruption_behavior,omitempty"` + + // The maximum hourly price you're willing to pay for the Spot Instances. + MaxPrice *string `json:"maxPrice,omitempty" tf:"max_price,omitempty"` + + // The Spot Instance request type. Can be one-time, or persistent. + SpotInstanceType *string `json:"spotInstanceType,omitempty" tf:"spot_instance_type,omitempty"` + + // The end date of the request. + ValidUntil *string `json:"validUntil,omitempty" tf:"valid_until,omitempty"` +} + +type InstanceMarketOptionsSpotOptionsParameters struct { + + // The required duration in minutes. This value must be a multiple of 60. + // +kubebuilder:validation:Optional + BlockDurationMinutes *float64 `json:"blockDurationMinutes,omitempty" tf:"block_duration_minutes,omitempty"` + + // The behavior when a Spot Instance is interrupted. Can be hibernate, + // stop, or terminate. (Default: terminate). + // +kubebuilder:validation:Optional + InstanceInterruptionBehavior *string `json:"instanceInterruptionBehavior,omitempty" tf:"instance_interruption_behavior,omitempty"` + + // The maximum hourly price you're willing to pay for the Spot Instances. + // +kubebuilder:validation:Optional + MaxPrice *string `json:"maxPrice,omitempty" tf:"max_price,omitempty"` + + // The Spot Instance request type. Can be one-time, or persistent. + // +kubebuilder:validation:Optional + SpotInstanceType *string `json:"spotInstanceType,omitempty" tf:"spot_instance_type,omitempty"` + + // The end date of the request. + // +kubebuilder:validation:Optional + ValidUntil *string `json:"validUntil,omitempty" tf:"valid_until,omitempty"` +} + +type InstanceRequirementsInitParameters struct { + + // Block describing the minimum and maximum number of accelerators (GPUs, FPGAs, or AWS Inferentia chips). Default is no minimum or maximum. + AcceleratorCount *AcceleratorCountInitParameters `json:"acceleratorCount,omitempty" tf:"accelerator_count,omitempty"` + + // List of accelerator manufacturer names. Default is any manufacturer. + // +listType=set + AcceleratorManufacturers []*string `json:"acceleratorManufacturers,omitempty" tf:"accelerator_manufacturers,omitempty"` + + // List of accelerator names. Default is any acclerator. + // +listType=set + AcceleratorNames []*string `json:"acceleratorNames,omitempty" tf:"accelerator_names,omitempty"` + + // Block describing the minimum and maximum total memory of the accelerators. Default is no minimum or maximum. + AcceleratorTotalMemoryMib *AcceleratorTotalMemoryMibInitParameters `json:"acceleratorTotalMemoryMib,omitempty" tf:"accelerator_total_memory_mib,omitempty"` + + // List of accelerator types. Default is any accelerator type. + // +listType=set + AcceleratorTypes []*string `json:"acceleratorTypes,omitempty" tf:"accelerator_types,omitempty"` + + // List of instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes. You can use strings with one or more wild cards, represented by an asterisk (*), to allow an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are allowing the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are allowing all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is all instance types. + // +listType=set + AllowedInstanceTypes []*string `json:"allowedInstanceTypes,omitempty" tf:"allowed_instance_types,omitempty"` + + // Indicate whether bare metal instace types should be included, excluded, or required. Default is excluded. + BareMetal *string `json:"bareMetal,omitempty" tf:"bare_metal,omitempty"` + + // Block describing the minimum and maximum baseline EBS bandwidth, in Mbps. Default is no minimum or maximum. + BaselineEBSBandwidthMbps *BaselineEBSBandwidthMbpsInitParameters `json:"baselineEbsBandwidthMbps,omitempty" tf:"baseline_ebs_bandwidth_mbps,omitempty"` + + // Indicate whether burstable performance instance types should be included, excluded, or required. Default is excluded. + BurstablePerformance *string `json:"burstablePerformance,omitempty" tf:"burstable_performance,omitempty"` + + // List of CPU manufacturer names. Default is any manufacturer. + // +listType=set + CPUManufacturers []*string `json:"cpuManufacturers,omitempty" tf:"cpu_manufacturers,omitempty"` + + // List of instance types to exclude. You can use strings with one or more wild cards, represented by an asterisk (*), to exclude an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are excluding the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are excluding all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is no excluded instance types. + // +listType=set + ExcludedInstanceTypes []*string `json:"excludedInstanceTypes,omitempty" tf:"excluded_instance_types,omitempty"` + + // List of instance generation names. Default is any generation. + // +listType=set + InstanceGenerations []*string `json:"instanceGenerations,omitempty" tf:"instance_generations,omitempty"` + + // Indicate whether instance types with local storage volumes are included, excluded, or required. Default is included. + LocalStorage *string `json:"localStorage,omitempty" tf:"local_storage,omitempty"` + + // List of local storage type names. Default any storage type. + // +listType=set + LocalStorageTypes []*string `json:"localStorageTypes,omitempty" tf:"local_storage_types,omitempty"` + + // Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. + MemoryGibPerVcpu *MemoryGibPerVcpuInitParameters `json:"memoryGibPerVcpu,omitempty" tf:"memory_gib_per_vcpu,omitempty"` + + // Block describing the minimum and maximum amount of memory (MiB). Default is no maximum. + MemoryMib *MemoryMibInitParameters `json:"memoryMib,omitempty" tf:"memory_mib,omitempty"` + + // Block describing the minimum and maximum amount of network bandwidth, in gigabits per second (Gbps). Default is no minimum or maximum. + NetworkBandwidthGbps *NetworkBandwidthGbpsInitParameters `json:"networkBandwidthGbps,omitempty" tf:"network_bandwidth_gbps,omitempty"` + + // Block describing the minimum and maximum number of network interfaces. Default is no minimum or maximum. + NetworkInterfaceCount *NetworkInterfaceCountInitParameters `json:"networkInterfaceCount,omitempty" tf:"network_interface_count,omitempty"` + + // The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 20. + OnDemandMaxPricePercentageOverLowestPrice *float64 `json:"onDemandMaxPricePercentageOverLowestPrice,omitempty" tf:"on_demand_max_price_percentage_over_lowest_price,omitempty"` + + // Indicate whether instance types must support On-Demand Instance Hibernation, either true or false. Default is false. + RequireHibernateSupport *bool `json:"requireHibernateSupport,omitempty" tf:"require_hibernate_support,omitempty"` + + // The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. + SpotMaxPricePercentageOverLowestPrice *float64 `json:"spotMaxPricePercentageOverLowestPrice,omitempty" tf:"spot_max_price_percentage_over_lowest_price,omitempty"` + + // Block describing the minimum and maximum total local storage (GB). Default is no minimum or maximum. + TotalLocalStorageGb *TotalLocalStorageGbInitParameters `json:"totalLocalStorageGb,omitempty" tf:"total_local_storage_gb,omitempty"` + + // Block describing the minimum and maximum number of vCPUs. Default is no maximum. + VcpuCount *VcpuCountInitParameters `json:"vcpuCount,omitempty" tf:"vcpu_count,omitempty"` +} + +type InstanceRequirementsObservation struct { + + // Block describing the minimum and maximum number of accelerators (GPUs, FPGAs, or AWS Inferentia chips). Default is no minimum or maximum. + AcceleratorCount *AcceleratorCountObservation `json:"acceleratorCount,omitempty" tf:"accelerator_count,omitempty"` + + // List of accelerator manufacturer names. Default is any manufacturer. + // +listType=set + AcceleratorManufacturers []*string `json:"acceleratorManufacturers,omitempty" tf:"accelerator_manufacturers,omitempty"` + + // List of accelerator names. Default is any acclerator. + // +listType=set + AcceleratorNames []*string `json:"acceleratorNames,omitempty" tf:"accelerator_names,omitempty"` + + // Block describing the minimum and maximum total memory of the accelerators. Default is no minimum or maximum. + AcceleratorTotalMemoryMib *AcceleratorTotalMemoryMibObservation `json:"acceleratorTotalMemoryMib,omitempty" tf:"accelerator_total_memory_mib,omitempty"` + + // List of accelerator types. Default is any accelerator type. + // +listType=set + AcceleratorTypes []*string `json:"acceleratorTypes,omitempty" tf:"accelerator_types,omitempty"` + + // List of instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes. You can use strings with one or more wild cards, represented by an asterisk (*), to allow an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are allowing the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are allowing all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is all instance types. + // +listType=set + AllowedInstanceTypes []*string `json:"allowedInstanceTypes,omitempty" tf:"allowed_instance_types,omitempty"` + + // Indicate whether bare metal instace types should be included, excluded, or required. Default is excluded. + BareMetal *string `json:"bareMetal,omitempty" tf:"bare_metal,omitempty"` + + // Block describing the minimum and maximum baseline EBS bandwidth, in Mbps. Default is no minimum or maximum. + BaselineEBSBandwidthMbps *BaselineEBSBandwidthMbpsObservation `json:"baselineEbsBandwidthMbps,omitempty" tf:"baseline_ebs_bandwidth_mbps,omitempty"` + + // Indicate whether burstable performance instance types should be included, excluded, or required. Default is excluded. + BurstablePerformance *string `json:"burstablePerformance,omitempty" tf:"burstable_performance,omitempty"` + + // List of CPU manufacturer names. Default is any manufacturer. + // +listType=set + CPUManufacturers []*string `json:"cpuManufacturers,omitempty" tf:"cpu_manufacturers,omitempty"` + + // List of instance types to exclude. You can use strings with one or more wild cards, represented by an asterisk (*), to exclude an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are excluding the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are excluding all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is no excluded instance types. + // +listType=set + ExcludedInstanceTypes []*string `json:"excludedInstanceTypes,omitempty" tf:"excluded_instance_types,omitempty"` + + // List of instance generation names. Default is any generation. + // +listType=set + InstanceGenerations []*string `json:"instanceGenerations,omitempty" tf:"instance_generations,omitempty"` + + // Indicate whether instance types with local storage volumes are included, excluded, or required. Default is included. + LocalStorage *string `json:"localStorage,omitempty" tf:"local_storage,omitempty"` + + // List of local storage type names. Default any storage type. + // +listType=set + LocalStorageTypes []*string `json:"localStorageTypes,omitempty" tf:"local_storage_types,omitempty"` + + // Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. + MemoryGibPerVcpu *MemoryGibPerVcpuObservation `json:"memoryGibPerVcpu,omitempty" tf:"memory_gib_per_vcpu,omitempty"` + + // Block describing the minimum and maximum amount of memory (MiB). Default is no maximum. + MemoryMib *MemoryMibObservation `json:"memoryMib,omitempty" tf:"memory_mib,omitempty"` + + // Block describing the minimum and maximum amount of network bandwidth, in gigabits per second (Gbps). Default is no minimum or maximum. + NetworkBandwidthGbps *NetworkBandwidthGbpsObservation `json:"networkBandwidthGbps,omitempty" tf:"network_bandwidth_gbps,omitempty"` + + // Block describing the minimum and maximum number of network interfaces. Default is no minimum or maximum. + NetworkInterfaceCount *NetworkInterfaceCountObservation `json:"networkInterfaceCount,omitempty" tf:"network_interface_count,omitempty"` + + // The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 20. + OnDemandMaxPricePercentageOverLowestPrice *float64 `json:"onDemandMaxPricePercentageOverLowestPrice,omitempty" tf:"on_demand_max_price_percentage_over_lowest_price,omitempty"` + + // Indicate whether instance types must support On-Demand Instance Hibernation, either true or false. Default is false. + RequireHibernateSupport *bool `json:"requireHibernateSupport,omitempty" tf:"require_hibernate_support,omitempty"` + + // The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. + SpotMaxPricePercentageOverLowestPrice *float64 `json:"spotMaxPricePercentageOverLowestPrice,omitempty" tf:"spot_max_price_percentage_over_lowest_price,omitempty"` + + // Block describing the minimum and maximum total local storage (GB). Default is no minimum or maximum. + TotalLocalStorageGb *TotalLocalStorageGbObservation `json:"totalLocalStorageGb,omitempty" tf:"total_local_storage_gb,omitempty"` + + // Block describing the minimum and maximum number of vCPUs. Default is no maximum. + VcpuCount *VcpuCountObservation `json:"vcpuCount,omitempty" tf:"vcpu_count,omitempty"` +} + +type InstanceRequirementsParameters struct { + + // Block describing the minimum and maximum number of accelerators (GPUs, FPGAs, or AWS Inferentia chips). Default is no minimum or maximum. + // +kubebuilder:validation:Optional + AcceleratorCount *AcceleratorCountParameters `json:"acceleratorCount,omitempty" tf:"accelerator_count,omitempty"` + + // List of accelerator manufacturer names. Default is any manufacturer. + // +kubebuilder:validation:Optional + // +listType=set + AcceleratorManufacturers []*string `json:"acceleratorManufacturers,omitempty" tf:"accelerator_manufacturers,omitempty"` + + // List of accelerator names. Default is any acclerator. + // +kubebuilder:validation:Optional + // +listType=set + AcceleratorNames []*string `json:"acceleratorNames,omitempty" tf:"accelerator_names,omitempty"` + + // Block describing the minimum and maximum total memory of the accelerators. Default is no minimum or maximum. + // +kubebuilder:validation:Optional + AcceleratorTotalMemoryMib *AcceleratorTotalMemoryMibParameters `json:"acceleratorTotalMemoryMib,omitempty" tf:"accelerator_total_memory_mib,omitempty"` + + // List of accelerator types. Default is any accelerator type. + // +kubebuilder:validation:Optional + // +listType=set + AcceleratorTypes []*string `json:"acceleratorTypes,omitempty" tf:"accelerator_types,omitempty"` + + // List of instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes. You can use strings with one or more wild cards, represented by an asterisk (*), to allow an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are allowing the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are allowing all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is all instance types. + // +kubebuilder:validation:Optional + // +listType=set + AllowedInstanceTypes []*string `json:"allowedInstanceTypes,omitempty" tf:"allowed_instance_types,omitempty"` + + // Indicate whether bare metal instace types should be included, excluded, or required. Default is excluded. + // +kubebuilder:validation:Optional + BareMetal *string `json:"bareMetal,omitempty" tf:"bare_metal,omitempty"` + + // Block describing the minimum and maximum baseline EBS bandwidth, in Mbps. Default is no minimum or maximum. + // +kubebuilder:validation:Optional + BaselineEBSBandwidthMbps *BaselineEBSBandwidthMbpsParameters `json:"baselineEbsBandwidthMbps,omitempty" tf:"baseline_ebs_bandwidth_mbps,omitempty"` + + // Indicate whether burstable performance instance types should be included, excluded, or required. Default is excluded. + // +kubebuilder:validation:Optional + BurstablePerformance *string `json:"burstablePerformance,omitempty" tf:"burstable_performance,omitempty"` + + // List of CPU manufacturer names. Default is any manufacturer. + // +kubebuilder:validation:Optional + // +listType=set + CPUManufacturers []*string `json:"cpuManufacturers,omitempty" tf:"cpu_manufacturers,omitempty"` + + // List of instance types to exclude. You can use strings with one or more wild cards, represented by an asterisk (*), to exclude an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are excluding the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are excluding all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is no excluded instance types. + // +kubebuilder:validation:Optional + // +listType=set + ExcludedInstanceTypes []*string `json:"excludedInstanceTypes,omitempty" tf:"excluded_instance_types,omitempty"` + + // List of instance generation names. Default is any generation. + // +kubebuilder:validation:Optional + // +listType=set + InstanceGenerations []*string `json:"instanceGenerations,omitempty" tf:"instance_generations,omitempty"` + + // Indicate whether instance types with local storage volumes are included, excluded, or required. Default is included. + // +kubebuilder:validation:Optional + LocalStorage *string `json:"localStorage,omitempty" tf:"local_storage,omitempty"` + + // List of local storage type names. Default any storage type. + // +kubebuilder:validation:Optional + // +listType=set + LocalStorageTypes []*string `json:"localStorageTypes,omitempty" tf:"local_storage_types,omitempty"` + + // Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. + // +kubebuilder:validation:Optional + MemoryGibPerVcpu *MemoryGibPerVcpuParameters `json:"memoryGibPerVcpu,omitempty" tf:"memory_gib_per_vcpu,omitempty"` + + // Block describing the minimum and maximum amount of memory (MiB). Default is no maximum. + // +kubebuilder:validation:Optional + MemoryMib *MemoryMibParameters `json:"memoryMib" tf:"memory_mib,omitempty"` + + // Block describing the minimum and maximum amount of network bandwidth, in gigabits per second (Gbps). Default is no minimum or maximum. + // +kubebuilder:validation:Optional + NetworkBandwidthGbps *NetworkBandwidthGbpsParameters `json:"networkBandwidthGbps,omitempty" tf:"network_bandwidth_gbps,omitempty"` + + // Block describing the minimum and maximum number of network interfaces. Default is no minimum or maximum. + // +kubebuilder:validation:Optional + NetworkInterfaceCount *NetworkInterfaceCountParameters `json:"networkInterfaceCount,omitempty" tf:"network_interface_count,omitempty"` + + // The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 20. + // +kubebuilder:validation:Optional + OnDemandMaxPricePercentageOverLowestPrice *float64 `json:"onDemandMaxPricePercentageOverLowestPrice,omitempty" tf:"on_demand_max_price_percentage_over_lowest_price,omitempty"` + + // Indicate whether instance types must support On-Demand Instance Hibernation, either true or false. Default is false. + // +kubebuilder:validation:Optional + RequireHibernateSupport *bool `json:"requireHibernateSupport,omitempty" tf:"require_hibernate_support,omitempty"` + + // The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. + // +kubebuilder:validation:Optional + SpotMaxPricePercentageOverLowestPrice *float64 `json:"spotMaxPricePercentageOverLowestPrice,omitempty" tf:"spot_max_price_percentage_over_lowest_price,omitempty"` + + // Block describing the minimum and maximum total local storage (GB). Default is no minimum or maximum. + // +kubebuilder:validation:Optional + TotalLocalStorageGb *TotalLocalStorageGbParameters `json:"totalLocalStorageGb,omitempty" tf:"total_local_storage_gb,omitempty"` + + // Block describing the minimum and maximum number of vCPUs. Default is no maximum. + // +kubebuilder:validation:Optional + VcpuCount *VcpuCountParameters `json:"vcpuCount" tf:"vcpu_count,omitempty"` +} + +type LaunchTemplateCPUOptionsInitParameters struct { + + // Indicates whether to enable the instance for AMD SEV-SNP. AMD SEV-SNP is supported with M6a, R6a, and C6a instance types only. Valid values are enabled and disabled. + AmdSevSnp *string `json:"amdSevSnp,omitempty" tf:"amd_sev_snp,omitempty"` + + // The number of CPU cores for the instance. + CoreCount *float64 `json:"coreCount,omitempty" tf:"core_count,omitempty"` + + // The number of threads per CPU core. + // To disable Intel Hyper-Threading Technology for the instance, specify a value of 1. + // Otherwise, specify the default value of 2. + ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` +} + +type LaunchTemplateCPUOptionsObservation struct { + + // Indicates whether to enable the instance for AMD SEV-SNP. AMD SEV-SNP is supported with M6a, R6a, and C6a instance types only. Valid values are enabled and disabled. + AmdSevSnp *string `json:"amdSevSnp,omitempty" tf:"amd_sev_snp,omitempty"` + + // The number of CPU cores for the instance. + CoreCount *float64 `json:"coreCount,omitempty" tf:"core_count,omitempty"` + + // The number of threads per CPU core. + // To disable Intel Hyper-Threading Technology for the instance, specify a value of 1. + // Otherwise, specify the default value of 2. + ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` +} + +type LaunchTemplateCPUOptionsParameters struct { + + // Indicates whether to enable the instance for AMD SEV-SNP. AMD SEV-SNP is supported with M6a, R6a, and C6a instance types only. Valid values are enabled and disabled. + // +kubebuilder:validation:Optional + AmdSevSnp *string `json:"amdSevSnp,omitempty" tf:"amd_sev_snp,omitempty"` + + // The number of CPU cores for the instance. + // +kubebuilder:validation:Optional + CoreCount *float64 `json:"coreCount,omitempty" tf:"core_count,omitempty"` + + // The number of threads per CPU core. + // To disable Intel Hyper-Threading Technology for the instance, specify a value of 1. + // Otherwise, specify the default value of 2. + // +kubebuilder:validation:Optional + ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` +} + +type LaunchTemplateCapacityReservationSpecificationInitParameters struct { + + // Indicates the instance's Capacity Reservation preferences. Can be open or none. (Default none). + CapacityReservationPreference *string `json:"capacityReservationPreference,omitempty" tf:"capacity_reservation_preference,omitempty"` + + // Used to target a specific Capacity Reservation: + CapacityReservationTarget *CapacityReservationSpecificationCapacityReservationTargetInitParameters `json:"capacityReservationTarget,omitempty" tf:"capacity_reservation_target,omitempty"` +} + +type LaunchTemplateCapacityReservationSpecificationObservation struct { + + // Indicates the instance's Capacity Reservation preferences. Can be open or none. (Default none). + CapacityReservationPreference *string `json:"capacityReservationPreference,omitempty" tf:"capacity_reservation_preference,omitempty"` + + // Used to target a specific Capacity Reservation: + CapacityReservationTarget *CapacityReservationSpecificationCapacityReservationTargetObservation `json:"capacityReservationTarget,omitempty" tf:"capacity_reservation_target,omitempty"` +} + +type LaunchTemplateCapacityReservationSpecificationParameters struct { + + // Indicates the instance's Capacity Reservation preferences. Can be open or none. (Default none). + // +kubebuilder:validation:Optional + CapacityReservationPreference *string `json:"capacityReservationPreference,omitempty" tf:"capacity_reservation_preference,omitempty"` + + // Used to target a specific Capacity Reservation: + // +kubebuilder:validation:Optional + CapacityReservationTarget *CapacityReservationSpecificationCapacityReservationTargetParameters `json:"capacityReservationTarget,omitempty" tf:"capacity_reservation_target,omitempty"` +} + +type LaunchTemplateCreditSpecificationInitParameters struct { + + // The credit option for CPU usage. + // Can be standard or unlimited. + // T3 instances are launched as unlimited by default. + // T2 instances are launched as standard by default. + CPUCredits *string `json:"cpuCredits,omitempty" tf:"cpu_credits,omitempty"` +} + +type LaunchTemplateCreditSpecificationObservation struct { + + // The credit option for CPU usage. + // Can be standard or unlimited. + // T3 instances are launched as unlimited by default. + // T2 instances are launched as standard by default. + CPUCredits *string `json:"cpuCredits,omitempty" tf:"cpu_credits,omitempty"` +} + +type LaunchTemplateCreditSpecificationParameters struct { + + // The credit option for CPU usage. + // Can be standard or unlimited. + // T3 instances are launched as unlimited by default. + // T2 instances are launched as standard by default. + // +kubebuilder:validation:Optional + CPUCredits *string `json:"cpuCredits,omitempty" tf:"cpu_credits,omitempty"` +} + +type LaunchTemplateEnclaveOptionsInitParameters struct { + + // If set to true, Nitro Enclaves will be enabled on the instance. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type LaunchTemplateEnclaveOptionsObservation struct { + + // If set to true, Nitro Enclaves will be enabled on the instance. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type LaunchTemplateEnclaveOptionsParameters struct { + + // If set to true, Nitro Enclaves will be enabled on the instance. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type LaunchTemplateInitParameters_2 struct { + + // Specify volumes to attach to the instance besides the volumes specified by the AMI. + // See Block Devices below for details. + BlockDeviceMappings []BlockDeviceMappingsInitParameters `json:"blockDeviceMappings,omitempty" tf:"block_device_mappings,omitempty"` + + // The CPU options for the instance. See CPU Options below for more details. + CPUOptions *LaunchTemplateCPUOptionsInitParameters `json:"cpuOptions,omitempty" tf:"cpu_options,omitempty"` + + // Targeting for EC2 capacity reservations. See Capacity Reservation Specification below for more details. + CapacityReservationSpecification *LaunchTemplateCapacityReservationSpecificationInitParameters `json:"capacityReservationSpecification,omitempty" tf:"capacity_reservation_specification,omitempty"` + + // Customize the credit specification of the instance. See Credit + // Specification below for more details. + CreditSpecification *LaunchTemplateCreditSpecificationInitParameters `json:"creditSpecification,omitempty" tf:"credit_specification,omitempty"` + + // Default Version of the launch template. + DefaultVersion *float64 `json:"defaultVersion,omitempty" tf:"default_version,omitempty"` + + // Description of the launch template. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // If true, enables EC2 Instance Stop Protection. + DisableAPIStop *bool `json:"disableApiStop,omitempty" tf:"disable_api_stop,omitempty"` + + // If true, enables EC2 Instance + // Termination Protection + DisableAPITermination *bool `json:"disableApiTermination,omitempty" tf:"disable_api_termination,omitempty"` + + // If true, the launched EC2 instance will be EBS-optimized. + EBSOptimized *string `json:"ebsOptimized,omitempty" tf:"ebs_optimized,omitempty"` + + // The elastic GPU to attach to the instance. See Elastic GPU + // below for more details. + ElasticGpuSpecifications []ElasticGpuSpecificationsInitParameters `json:"elasticGpuSpecifications,omitempty" tf:"elastic_gpu_specifications,omitempty"` + + // Configuration block containing an Elastic Inference Accelerator to attach to the instance. See Elastic Inference Accelerator below for more details. + ElasticInferenceAccelerator *ElasticInferenceAcceleratorInitParameters `json:"elasticInferenceAccelerator,omitempty" tf:"elastic_inference_accelerator,omitempty"` + + // Enable Nitro Enclaves on launched instances. See Enclave Options below for more details. + EnclaveOptions *LaunchTemplateEnclaveOptionsInitParameters `json:"enclaveOptions,omitempty" tf:"enclave_options,omitempty"` + + // The hibernation options for the instance. See Hibernation Options below for more details. + HibernationOptions *HibernationOptionsInitParameters `json:"hibernationOptions,omitempty" tf:"hibernation_options,omitempty"` + + // The IAM Instance Profile to launch the instance with. See Instance Profile + // below for more details. + IAMInstanceProfile *IAMInstanceProfileInitParameters `json:"iamInstanceProfile,omitempty" tf:"iam_instance_profile,omitempty"` + + // The AMI from which to launch the instance. + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // Shutdown behavior for the instance. Can be stop or terminate. + // (Default: stop). + InstanceInitiatedShutdownBehavior *string `json:"instanceInitiatedShutdownBehavior,omitempty" tf:"instance_initiated_shutdown_behavior,omitempty"` + + // The market (purchasing) option for the instance. See Market Options + // below for details. + InstanceMarketOptions *LaunchTemplateInstanceMarketOptionsInitParameters `json:"instanceMarketOptions,omitempty" tf:"instance_market_options,omitempty"` + + // The attribute requirements for the type of instance. If present then instance_type cannot be present. + InstanceRequirements *InstanceRequirementsInitParameters `json:"instanceRequirements,omitempty" tf:"instance_requirements,omitempty"` + + // The type of the instance. If present then instance_requirements cannot be present. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The kernel ID. + KernelID *string `json:"kernelId,omitempty" tf:"kernel_id,omitempty"` + + // The key name to use for the instance. + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + // A list of license specifications to associate with. See License Specification below for more details. + LicenseSpecification []LicenseSpecificationInitParameters `json:"licenseSpecification,omitempty" tf:"license_specification,omitempty"` + + // The maintenance options for the instance. See Maintenance Options below for more details. + MaintenanceOptions *LaunchTemplateMaintenanceOptionsInitParameters `json:"maintenanceOptions,omitempty" tf:"maintenance_options,omitempty"` + + // Customize the metadata options for the instance. See Metadata Options below for more details. + MetadataOptions *LaunchTemplateMetadataOptionsInitParameters `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + // The monitoring option for the instance. See Monitoring below for more details. + Monitoring *MonitoringInitParameters `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + // The name of the launch template. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Customize network interfaces to be attached at instance boot time. See Network + // Interfaces below for more details. + NetworkInterfaces []NetworkInterfacesInitParameters `json:"networkInterfaces,omitempty" tf:"network_interfaces,omitempty"` + + // The placement of the instance. See Placement below for more details. + Placement *PlacementInitParameters `json:"placement,omitempty" tf:"placement,omitempty"` + + // The options for the instance hostname. The default values are inherited from the subnet. See Private DNS Name Options below for more details. + PrivateDNSNameOptions *LaunchTemplatePrivateDNSNameOptionsInitParameters `json:"privateDnsNameOptions,omitempty" tf:"private_dns_name_options,omitempty"` + + // The ID of the RAM disk. + RAMDiskID *string `json:"ramDiskId,omitempty" tf:"ram_disk_id,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroupNames. + // +kubebuilder:validation:Optional + SecurityGroupNameRefs []v1.Reference `json:"securityGroupNameRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupNames. + // +kubebuilder:validation:Optional + SecurityGroupNameSelector *v1.Selector `json:"securityGroupNameSelector,omitempty" tf:"-"` + + // A list of security group names to associate with. If you are creating Instances in a VPC, use + // vpc_security_group_ids instead. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupNameRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupNameSelector + // +listType=set + SecurityGroupNames []*string `json:"securityGroupNames,omitempty" tf:"security_group_names,omitempty"` + + // The tags to apply to the resources during launch. See Tag Specifications below for more details. Default tags are currently not propagated to ASG created resources so you may wish to inject your default tags into this variable against the relevant child resource types created. + TagSpecifications []TagSpecificationsInitParameters `json:"tagSpecifications,omitempty" tf:"tag_specifications,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to update Default Version each update. Conflicts with default_version. + UpdateDefaultVersion *bool `json:"updateDefaultVersion,omitempty" tf:"update_default_version,omitempty"` + + // The base64-encoded user data to provide when launching the instance. + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDRefs []v1.Reference `json:"vpcSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDSelector *v1.Selector `json:"vpcSecurityGroupIdSelector,omitempty" tf:"-"` + + // A list of security group IDs to associate with. Conflicts with network_interfaces.security_groups + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=VPCSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=VPCSecurityGroupIDSelector + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` +} + +type LaunchTemplateInstanceMarketOptionsInitParameters struct { + + // The market type. Can be spot. + MarketType *string `json:"marketType,omitempty" tf:"market_type,omitempty"` + + // The options for Spot Instance + SpotOptions *InstanceMarketOptionsSpotOptionsInitParameters `json:"spotOptions,omitempty" tf:"spot_options,omitempty"` +} + +type LaunchTemplateInstanceMarketOptionsObservation struct { + + // The market type. Can be spot. + MarketType *string `json:"marketType,omitempty" tf:"market_type,omitempty"` + + // The options for Spot Instance + SpotOptions *InstanceMarketOptionsSpotOptionsObservation `json:"spotOptions,omitempty" tf:"spot_options,omitempty"` +} + +type LaunchTemplateInstanceMarketOptionsParameters struct { + + // The market type. Can be spot. + // +kubebuilder:validation:Optional + MarketType *string `json:"marketType,omitempty" tf:"market_type,omitempty"` + + // The options for Spot Instance + // +kubebuilder:validation:Optional + SpotOptions *InstanceMarketOptionsSpotOptionsParameters `json:"spotOptions,omitempty" tf:"spot_options,omitempty"` +} + +type LaunchTemplateMaintenanceOptionsInitParameters struct { + + // Disables the automatic recovery behavior of your instance or sets it to default. Can be "default" or "disabled". See Recover your instance for more details. + AutoRecovery *string `json:"autoRecovery,omitempty" tf:"auto_recovery,omitempty"` +} + +type LaunchTemplateMaintenanceOptionsObservation struct { + + // Disables the automatic recovery behavior of your instance or sets it to default. Can be "default" or "disabled". See Recover your instance for more details. + AutoRecovery *string `json:"autoRecovery,omitempty" tf:"auto_recovery,omitempty"` +} + +type LaunchTemplateMaintenanceOptionsParameters struct { + + // Disables the automatic recovery behavior of your instance or sets it to default. Can be "default" or "disabled". See Recover your instance for more details. + // +kubebuilder:validation:Optional + AutoRecovery *string `json:"autoRecovery,omitempty" tf:"auto_recovery,omitempty"` +} + +type LaunchTemplateMetadataOptionsInitParameters struct { + + // Whether the metadata service is available. Can be "enabled" or "disabled". (Default: "enabled"). + HTTPEndpoint *string `json:"httpEndpoint,omitempty" tf:"http_endpoint,omitempty"` + + // Enables or disables the IPv6 endpoint for the instance metadata service. Can be "enabled" or "disabled". + HTTPProtocolIPv6 *string `json:"httpProtocolIpv6,omitempty" tf:"http_protocol_ipv6,omitempty"` + + // The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. Can be an integer from 1 to 64. (Default: 1). + HTTPPutResponseHopLimit *float64 `json:"httpPutResponseHopLimit,omitempty" tf:"http_put_response_hop_limit,omitempty"` + + // Whether or not the metadata service requires session tokens, also referred to as Instance Metadata Service Version 2 (IMDSv2). Can be "optional" or "required". (Default: "optional"). + HTTPTokens *string `json:"httpTokens,omitempty" tf:"http_tokens,omitempty"` + + // Enables or disables access to instance tags from the instance metadata service. Can be "enabled" or "disabled". + InstanceMetadataTags *string `json:"instanceMetadataTags,omitempty" tf:"instance_metadata_tags,omitempty"` +} + +type LaunchTemplateMetadataOptionsObservation struct { + + // Whether the metadata service is available. Can be "enabled" or "disabled". (Default: "enabled"). + HTTPEndpoint *string `json:"httpEndpoint,omitempty" tf:"http_endpoint,omitempty"` + + // Enables or disables the IPv6 endpoint for the instance metadata service. Can be "enabled" or "disabled". + HTTPProtocolIPv6 *string `json:"httpProtocolIpv6,omitempty" tf:"http_protocol_ipv6,omitempty"` + + // The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. Can be an integer from 1 to 64. (Default: 1). + HTTPPutResponseHopLimit *float64 `json:"httpPutResponseHopLimit,omitempty" tf:"http_put_response_hop_limit,omitempty"` + + // Whether or not the metadata service requires session tokens, also referred to as Instance Metadata Service Version 2 (IMDSv2). Can be "optional" or "required". (Default: "optional"). + HTTPTokens *string `json:"httpTokens,omitempty" tf:"http_tokens,omitempty"` + + // Enables or disables access to instance tags from the instance metadata service. Can be "enabled" or "disabled". + InstanceMetadataTags *string `json:"instanceMetadataTags,omitempty" tf:"instance_metadata_tags,omitempty"` +} + +type LaunchTemplateMetadataOptionsParameters struct { + + // Whether the metadata service is available. Can be "enabled" or "disabled". (Default: "enabled"). + // +kubebuilder:validation:Optional + HTTPEndpoint *string `json:"httpEndpoint,omitempty" tf:"http_endpoint,omitempty"` + + // Enables or disables the IPv6 endpoint for the instance metadata service. Can be "enabled" or "disabled". + // +kubebuilder:validation:Optional + HTTPProtocolIPv6 *string `json:"httpProtocolIpv6,omitempty" tf:"http_protocol_ipv6,omitempty"` + + // The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. Can be an integer from 1 to 64. (Default: 1). + // +kubebuilder:validation:Optional + HTTPPutResponseHopLimit *float64 `json:"httpPutResponseHopLimit,omitempty" tf:"http_put_response_hop_limit,omitempty"` + + // Whether or not the metadata service requires session tokens, also referred to as Instance Metadata Service Version 2 (IMDSv2). Can be "optional" or "required". (Default: "optional"). + // +kubebuilder:validation:Optional + HTTPTokens *string `json:"httpTokens,omitempty" tf:"http_tokens,omitempty"` + + // Enables or disables access to instance tags from the instance metadata service. Can be "enabled" or "disabled". + // +kubebuilder:validation:Optional + InstanceMetadataTags *string `json:"instanceMetadataTags,omitempty" tf:"instance_metadata_tags,omitempty"` +} + +type LaunchTemplateObservation_2 struct { + + // Amazon Resource Name (ARN) of the launch template. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Specify volumes to attach to the instance besides the volumes specified by the AMI. + // See Block Devices below for details. + BlockDeviceMappings []BlockDeviceMappingsObservation `json:"blockDeviceMappings,omitempty" tf:"block_device_mappings,omitempty"` + + // The CPU options for the instance. See CPU Options below for more details. + CPUOptions *LaunchTemplateCPUOptionsObservation `json:"cpuOptions,omitempty" tf:"cpu_options,omitempty"` + + // Targeting for EC2 capacity reservations. See Capacity Reservation Specification below for more details. + CapacityReservationSpecification *LaunchTemplateCapacityReservationSpecificationObservation `json:"capacityReservationSpecification,omitempty" tf:"capacity_reservation_specification,omitempty"` + + // Customize the credit specification of the instance. See Credit + // Specification below for more details. + CreditSpecification *LaunchTemplateCreditSpecificationObservation `json:"creditSpecification,omitempty" tf:"credit_specification,omitempty"` + + // Default Version of the launch template. + DefaultVersion *float64 `json:"defaultVersion,omitempty" tf:"default_version,omitempty"` + + // Description of the launch template. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // If true, enables EC2 Instance Stop Protection. + DisableAPIStop *bool `json:"disableApiStop,omitempty" tf:"disable_api_stop,omitempty"` + + // If true, enables EC2 Instance + // Termination Protection + DisableAPITermination *bool `json:"disableApiTermination,omitempty" tf:"disable_api_termination,omitempty"` + + // If true, the launched EC2 instance will be EBS-optimized. + EBSOptimized *string `json:"ebsOptimized,omitempty" tf:"ebs_optimized,omitempty"` + + // The elastic GPU to attach to the instance. See Elastic GPU + // below for more details. + ElasticGpuSpecifications []ElasticGpuSpecificationsObservation `json:"elasticGpuSpecifications,omitempty" tf:"elastic_gpu_specifications,omitempty"` + + // Configuration block containing an Elastic Inference Accelerator to attach to the instance. See Elastic Inference Accelerator below for more details. + ElasticInferenceAccelerator *ElasticInferenceAcceleratorObservation `json:"elasticInferenceAccelerator,omitempty" tf:"elastic_inference_accelerator,omitempty"` + + // Enable Nitro Enclaves on launched instances. See Enclave Options below for more details. + EnclaveOptions *LaunchTemplateEnclaveOptionsObservation `json:"enclaveOptions,omitempty" tf:"enclave_options,omitempty"` + + // The hibernation options for the instance. See Hibernation Options below for more details. + HibernationOptions *HibernationOptionsObservation `json:"hibernationOptions,omitempty" tf:"hibernation_options,omitempty"` + + // The IAM Instance Profile to launch the instance with. See Instance Profile + // below for more details. + IAMInstanceProfile *IAMInstanceProfileObservation `json:"iamInstanceProfile,omitempty" tf:"iam_instance_profile,omitempty"` + + // The ID of the launch template. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The AMI from which to launch the instance. + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // Shutdown behavior for the instance. Can be stop or terminate. + // (Default: stop). + InstanceInitiatedShutdownBehavior *string `json:"instanceInitiatedShutdownBehavior,omitempty" tf:"instance_initiated_shutdown_behavior,omitempty"` + + // The market (purchasing) option for the instance. See Market Options + // below for details. + InstanceMarketOptions *LaunchTemplateInstanceMarketOptionsObservation `json:"instanceMarketOptions,omitempty" tf:"instance_market_options,omitempty"` + + // The attribute requirements for the type of instance. If present then instance_type cannot be present. + InstanceRequirements *InstanceRequirementsObservation `json:"instanceRequirements,omitempty" tf:"instance_requirements,omitempty"` + + // The type of the instance. If present then instance_requirements cannot be present. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The kernel ID. + KernelID *string `json:"kernelId,omitempty" tf:"kernel_id,omitempty"` + + // The key name to use for the instance. + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + // The latest version of the launch template. + LatestVersion *float64 `json:"latestVersion,omitempty" tf:"latest_version,omitempty"` + + // A list of license specifications to associate with. See License Specification below for more details. + LicenseSpecification []LicenseSpecificationObservation `json:"licenseSpecification,omitempty" tf:"license_specification,omitempty"` + + // The maintenance options for the instance. See Maintenance Options below for more details. + MaintenanceOptions *LaunchTemplateMaintenanceOptionsObservation `json:"maintenanceOptions,omitempty" tf:"maintenance_options,omitempty"` + + // Customize the metadata options for the instance. See Metadata Options below for more details. + MetadataOptions *LaunchTemplateMetadataOptionsObservation `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + // The monitoring option for the instance. See Monitoring below for more details. + Monitoring *MonitoringObservation `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + // The name of the launch template. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Customize network interfaces to be attached at instance boot time. See Network + // Interfaces below for more details. + NetworkInterfaces []NetworkInterfacesObservation `json:"networkInterfaces,omitempty" tf:"network_interfaces,omitempty"` + + // The placement of the instance. See Placement below for more details. + Placement *PlacementObservation `json:"placement,omitempty" tf:"placement,omitempty"` + + // The options for the instance hostname. The default values are inherited from the subnet. See Private DNS Name Options below for more details. + PrivateDNSNameOptions *LaunchTemplatePrivateDNSNameOptionsObservation `json:"privateDnsNameOptions,omitempty" tf:"private_dns_name_options,omitempty"` + + // The ID of the RAM disk. + RAMDiskID *string `json:"ramDiskId,omitempty" tf:"ram_disk_id,omitempty"` + + // A list of security group names to associate with. If you are creating Instances in a VPC, use + // vpc_security_group_ids instead. + // +listType=set + SecurityGroupNames []*string `json:"securityGroupNames,omitempty" tf:"security_group_names,omitempty"` + + // The tags to apply to the resources during launch. See Tag Specifications below for more details. Default tags are currently not propagated to ASG created resources so you may wish to inject your default tags into this variable against the relevant child resource types created. + TagSpecifications []TagSpecificationsObservation `json:"tagSpecifications,omitempty" tf:"tag_specifications,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Whether to update Default Version each update. Conflicts with default_version. + UpdateDefaultVersion *bool `json:"updateDefaultVersion,omitempty" tf:"update_default_version,omitempty"` + + // The base64-encoded user data to provide when launching the instance. + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // A list of security group IDs to associate with. Conflicts with network_interfaces.security_groups + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` +} + +type LaunchTemplateParameters_2 struct { + + // Specify volumes to attach to the instance besides the volumes specified by the AMI. + // See Block Devices below for details. + // +kubebuilder:validation:Optional + BlockDeviceMappings []BlockDeviceMappingsParameters `json:"blockDeviceMappings,omitempty" tf:"block_device_mappings,omitempty"` + + // The CPU options for the instance. See CPU Options below for more details. + // +kubebuilder:validation:Optional + CPUOptions *LaunchTemplateCPUOptionsParameters `json:"cpuOptions,omitempty" tf:"cpu_options,omitempty"` + + // Targeting for EC2 capacity reservations. See Capacity Reservation Specification below for more details. + // +kubebuilder:validation:Optional + CapacityReservationSpecification *LaunchTemplateCapacityReservationSpecificationParameters `json:"capacityReservationSpecification,omitempty" tf:"capacity_reservation_specification,omitempty"` + + // Customize the credit specification of the instance. See Credit + // Specification below for more details. + // +kubebuilder:validation:Optional + CreditSpecification *LaunchTemplateCreditSpecificationParameters `json:"creditSpecification,omitempty" tf:"credit_specification,omitempty"` + + // Default Version of the launch template. + // +kubebuilder:validation:Optional + DefaultVersion *float64 `json:"defaultVersion,omitempty" tf:"default_version,omitempty"` + + // Description of the launch template. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // If true, enables EC2 Instance Stop Protection. + // +kubebuilder:validation:Optional + DisableAPIStop *bool `json:"disableApiStop,omitempty" tf:"disable_api_stop,omitempty"` + + // If true, enables EC2 Instance + // Termination Protection + // +kubebuilder:validation:Optional + DisableAPITermination *bool `json:"disableApiTermination,omitempty" tf:"disable_api_termination,omitempty"` + + // If true, the launched EC2 instance will be EBS-optimized. + // +kubebuilder:validation:Optional + EBSOptimized *string `json:"ebsOptimized,omitempty" tf:"ebs_optimized,omitempty"` + + // The elastic GPU to attach to the instance. See Elastic GPU + // below for more details. + // +kubebuilder:validation:Optional + ElasticGpuSpecifications []ElasticGpuSpecificationsParameters `json:"elasticGpuSpecifications,omitempty" tf:"elastic_gpu_specifications,omitempty"` + + // Configuration block containing an Elastic Inference Accelerator to attach to the instance. See Elastic Inference Accelerator below for more details. + // +kubebuilder:validation:Optional + ElasticInferenceAccelerator *ElasticInferenceAcceleratorParameters `json:"elasticInferenceAccelerator,omitempty" tf:"elastic_inference_accelerator,omitempty"` + + // Enable Nitro Enclaves on launched instances. See Enclave Options below for more details. + // +kubebuilder:validation:Optional + EnclaveOptions *LaunchTemplateEnclaveOptionsParameters `json:"enclaveOptions,omitempty" tf:"enclave_options,omitempty"` + + // The hibernation options for the instance. See Hibernation Options below for more details. + // +kubebuilder:validation:Optional + HibernationOptions *HibernationOptionsParameters `json:"hibernationOptions,omitempty" tf:"hibernation_options,omitempty"` + + // The IAM Instance Profile to launch the instance with. See Instance Profile + // below for more details. + // +kubebuilder:validation:Optional + IAMInstanceProfile *IAMInstanceProfileParameters `json:"iamInstanceProfile,omitempty" tf:"iam_instance_profile,omitempty"` + + // The AMI from which to launch the instance. + // +kubebuilder:validation:Optional + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // Shutdown behavior for the instance. Can be stop or terminate. + // (Default: stop). + // +kubebuilder:validation:Optional + InstanceInitiatedShutdownBehavior *string `json:"instanceInitiatedShutdownBehavior,omitempty" tf:"instance_initiated_shutdown_behavior,omitempty"` + + // The market (purchasing) option for the instance. See Market Options + // below for details. + // +kubebuilder:validation:Optional + InstanceMarketOptions *LaunchTemplateInstanceMarketOptionsParameters `json:"instanceMarketOptions,omitempty" tf:"instance_market_options,omitempty"` + + // The attribute requirements for the type of instance. If present then instance_type cannot be present. + // +kubebuilder:validation:Optional + InstanceRequirements *InstanceRequirementsParameters `json:"instanceRequirements,omitempty" tf:"instance_requirements,omitempty"` + + // The type of the instance. If present then instance_requirements cannot be present. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The kernel ID. + // +kubebuilder:validation:Optional + KernelID *string `json:"kernelId,omitempty" tf:"kernel_id,omitempty"` + + // The key name to use for the instance. + // +kubebuilder:validation:Optional + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + // A list of license specifications to associate with. See License Specification below for more details. + // +kubebuilder:validation:Optional + LicenseSpecification []LicenseSpecificationParameters `json:"licenseSpecification,omitempty" tf:"license_specification,omitempty"` + + // The maintenance options for the instance. See Maintenance Options below for more details. + // +kubebuilder:validation:Optional + MaintenanceOptions *LaunchTemplateMaintenanceOptionsParameters `json:"maintenanceOptions,omitempty" tf:"maintenance_options,omitempty"` + + // Customize the metadata options for the instance. See Metadata Options below for more details. + // +kubebuilder:validation:Optional + MetadataOptions *LaunchTemplateMetadataOptionsParameters `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + // The monitoring option for the instance. See Monitoring below for more details. + // +kubebuilder:validation:Optional + Monitoring *MonitoringParameters `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + // The name of the launch template. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Customize network interfaces to be attached at instance boot time. See Network + // Interfaces below for more details. + // +kubebuilder:validation:Optional + NetworkInterfaces []NetworkInterfacesParameters `json:"networkInterfaces,omitempty" tf:"network_interfaces,omitempty"` + + // The placement of the instance. See Placement below for more details. + // +kubebuilder:validation:Optional + Placement *PlacementParameters `json:"placement,omitempty" tf:"placement,omitempty"` + + // The options for the instance hostname. The default values are inherited from the subnet. See Private DNS Name Options below for more details. + // +kubebuilder:validation:Optional + PrivateDNSNameOptions *LaunchTemplatePrivateDNSNameOptionsParameters `json:"privateDnsNameOptions,omitempty" tf:"private_dns_name_options,omitempty"` + + // The ID of the RAM disk. + // +kubebuilder:validation:Optional + RAMDiskID *string `json:"ramDiskId,omitempty" tf:"ram_disk_id,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // References to SecurityGroup in ec2 to populate securityGroupNames. + // +kubebuilder:validation:Optional + SecurityGroupNameRefs []v1.Reference `json:"securityGroupNameRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupNames. + // +kubebuilder:validation:Optional + SecurityGroupNameSelector *v1.Selector `json:"securityGroupNameSelector,omitempty" tf:"-"` + + // A list of security group names to associate with. If you are creating Instances in a VPC, use + // vpc_security_group_ids instead. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupNameRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupNameSelector + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupNames []*string `json:"securityGroupNames,omitempty" tf:"security_group_names,omitempty"` + + // The tags to apply to the resources during launch. See Tag Specifications below for more details. Default tags are currently not propagated to ASG created resources so you may wish to inject your default tags into this variable against the relevant child resource types created. + // +kubebuilder:validation:Optional + TagSpecifications []TagSpecificationsParameters `json:"tagSpecifications,omitempty" tf:"tag_specifications,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to update Default Version each update. Conflicts with default_version. + // +kubebuilder:validation:Optional + UpdateDefaultVersion *bool `json:"updateDefaultVersion,omitempty" tf:"update_default_version,omitempty"` + + // The base64-encoded user data to provide when launching the instance. + // +kubebuilder:validation:Optional + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDRefs []v1.Reference `json:"vpcSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDSelector *v1.Selector `json:"vpcSecurityGroupIdSelector,omitempty" tf:"-"` + + // A list of security group IDs to associate with. Conflicts with network_interfaces.security_groups + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=VPCSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=VPCSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` +} + +type LaunchTemplatePrivateDNSNameOptionsInitParameters struct { + + // Indicates whether to respond to DNS queries for instance hostnames with DNS A records. + EnableResourceNameDNSARecord *bool `json:"enableResourceNameDnsARecord,omitempty" tf:"enable_resource_name_dns_a_record,omitempty"` + + // Indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. + EnableResourceNameDNSAaaaRecord *bool `json:"enableResourceNameDnsAaaaRecord,omitempty" tf:"enable_resource_name_dns_aaaa_record,omitempty"` + + // The type of hostname for Amazon EC2 instances. For IPv4 only subnets, an instance DNS name must be based on the instance IPv4 address. For IPv6 native subnets, an instance DNS name must be based on the instance ID. For dual-stack subnets, you can specify whether DNS names use the instance IPv4 address or the instance ID. Valid values: ip-name and resource-name. + HostnameType *string `json:"hostnameType,omitempty" tf:"hostname_type,omitempty"` +} + +type LaunchTemplatePrivateDNSNameOptionsObservation struct { + + // Indicates whether to respond to DNS queries for instance hostnames with DNS A records. + EnableResourceNameDNSARecord *bool `json:"enableResourceNameDnsARecord,omitempty" tf:"enable_resource_name_dns_a_record,omitempty"` + + // Indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. + EnableResourceNameDNSAaaaRecord *bool `json:"enableResourceNameDnsAaaaRecord,omitempty" tf:"enable_resource_name_dns_aaaa_record,omitempty"` + + // The type of hostname for Amazon EC2 instances. For IPv4 only subnets, an instance DNS name must be based on the instance IPv4 address. For IPv6 native subnets, an instance DNS name must be based on the instance ID. For dual-stack subnets, you can specify whether DNS names use the instance IPv4 address or the instance ID. Valid values: ip-name and resource-name. + HostnameType *string `json:"hostnameType,omitempty" tf:"hostname_type,omitempty"` +} + +type LaunchTemplatePrivateDNSNameOptionsParameters struct { + + // Indicates whether to respond to DNS queries for instance hostnames with DNS A records. + // +kubebuilder:validation:Optional + EnableResourceNameDNSARecord *bool `json:"enableResourceNameDnsARecord,omitempty" tf:"enable_resource_name_dns_a_record,omitempty"` + + // Indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. + // +kubebuilder:validation:Optional + EnableResourceNameDNSAaaaRecord *bool `json:"enableResourceNameDnsAaaaRecord,omitempty" tf:"enable_resource_name_dns_aaaa_record,omitempty"` + + // The type of hostname for Amazon EC2 instances. For IPv4 only subnets, an instance DNS name must be based on the instance IPv4 address. For IPv6 native subnets, an instance DNS name must be based on the instance ID. For dual-stack subnets, you can specify whether DNS names use the instance IPv4 address or the instance ID. Valid values: ip-name and resource-name. + // +kubebuilder:validation:Optional + HostnameType *string `json:"hostnameType,omitempty" tf:"hostname_type,omitempty"` +} + +type LicenseSpecificationInitParameters struct { + + // ARN of the license configuration. + LicenseConfigurationArn *string `json:"licenseConfigurationArn,omitempty" tf:"license_configuration_arn,omitempty"` +} + +type LicenseSpecificationObservation struct { + + // ARN of the license configuration. + LicenseConfigurationArn *string `json:"licenseConfigurationArn,omitempty" tf:"license_configuration_arn,omitempty"` +} + +type LicenseSpecificationParameters struct { + + // ARN of the license configuration. + // +kubebuilder:validation:Optional + LicenseConfigurationArn *string `json:"licenseConfigurationArn" tf:"license_configuration_arn,omitempty"` +} + +type MemoryGibPerVcpuInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type MemoryGibPerVcpuObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type MemoryGibPerVcpuParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type MemoryMibInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type MemoryMibObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type MemoryMibParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min" tf:"min,omitempty"` +} + +type MonitoringInitParameters struct { + + // If true, the launched EC2 instance will have detailed monitoring enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type MonitoringObservation struct { + + // If true, the launched EC2 instance will have detailed monitoring enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type MonitoringParameters struct { + + // If true, the launched EC2 instance will have detailed monitoring enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type NetworkBandwidthGbpsInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type NetworkBandwidthGbpsObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type NetworkBandwidthGbpsParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type NetworkInterfaceCountInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type NetworkInterfaceCountObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type NetworkInterfaceCountParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type NetworkInterfacesInitParameters struct { + + // Associate a Carrier IP address with eth0 for a new network interface. + // Use this option when you launch an instance in a Wavelength Zone and want to associate a Carrier IP address with the network interface. + // Boolean value, can be left unset. + AssociateCarrierIPAddress *string `json:"associateCarrierIpAddress,omitempty" tf:"associate_carrier_ip_address,omitempty"` + + // Associate a public ip address with the network interface. + // Boolean value, can be left unset. + AssociatePublicIPAddress *string `json:"associatePublicIpAddress,omitempty" tf:"associate_public_ip_address,omitempty"` + + // Whether the network interface should be destroyed on instance termination. + DeleteOnTermination *string `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Description of the network interface. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integer index of the network interface attachment. + DeviceIndex *float64 `json:"deviceIndex,omitempty" tf:"device_index,omitempty"` + + // The number of secondary private IPv4 addresses to assign to a network interface. Conflicts with ipv4_addresses + IPv4AddressCount *float64 `json:"ipv4AddressCount,omitempty" tf:"ipv4_address_count,omitempty"` + + // One or more private IPv4 addresses to associate. Conflicts with ipv4_address_count + // +listType=set + IPv4Addresses []*string `json:"ipv4Addresses,omitempty" tf:"ipv4_addresses,omitempty"` + + // The number of IPv4 prefixes to be automatically assigned to the network interface. Conflicts with ipv4_prefixes + IPv4PrefixCount *float64 `json:"ipv4PrefixCount,omitempty" tf:"ipv4_prefix_count,omitempty"` + + // One or more IPv4 prefixes to be assigned to the network interface. Conflicts with ipv4_prefix_count + // +listType=set + IPv4Prefixes []*string `json:"ipv4Prefixes,omitempty" tf:"ipv4_prefixes,omitempty"` + + // The number of IPv6 addresses to assign to a network interface. Conflicts with ipv6_addresses + IPv6AddressCount *float64 `json:"ipv6AddressCount,omitempty" tf:"ipv6_address_count,omitempty"` + + // One or more specific IPv6 addresses from the IPv6 CIDR block range of your subnet. Conflicts with ipv6_address_count + // +listType=set + IPv6Addresses []*string `json:"ipv6Addresses,omitempty" tf:"ipv6_addresses,omitempty"` + + // The number of IPv6 prefixes to be automatically assigned to the network interface. Conflicts with ipv6_prefixes + IPv6PrefixCount *float64 `json:"ipv6PrefixCount,omitempty" tf:"ipv6_prefix_count,omitempty"` + + // One or more IPv6 prefixes to be assigned to the network interface. Conflicts with ipv6_prefix_count + // +listType=set + IPv6Prefixes []*string `json:"ipv6Prefixes,omitempty" tf:"ipv6_prefixes,omitempty"` + + // The type of network interface. To create an Elastic Fabric Adapter (EFA), specify efa. + InterfaceType *string `json:"interfaceType,omitempty" tf:"interface_type,omitempty"` + + // The index of the network card. Some instance types support multiple network cards. The primary network interface must be assigned to network card index 0. The default is network card index 0. + NetworkCardIndex *float64 `json:"networkCardIndex,omitempty" tf:"network_card_index,omitempty"` + + // The ID of the network interface to attach. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.NetworkInterface + NetworkInterfaceID *string `json:"networkInterfaceId,omitempty" tf:"network_interface_id,omitempty"` + + // Reference to a NetworkInterface in ec2 to populate networkInterfaceId. + // +kubebuilder:validation:Optional + NetworkInterfaceIDRef *v1.Reference `json:"networkInterfaceIdRef,omitempty" tf:"-"` + + // Selector for a NetworkInterface in ec2 to populate networkInterfaceId. + // +kubebuilder:validation:Optional + NetworkInterfaceIDSelector *v1.Selector `json:"networkInterfaceIdSelector,omitempty" tf:"-"` + + // The primary private IPv4 address. + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupRefs []v1.Reference `json:"securityGroupRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupSelector *v1.Selector `json:"securityGroupSelector,omitempty" tf:"-"` + + // A list of security group IDs to associate. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupSelector + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The VPC Subnet ID to associate. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type NetworkInterfacesObservation struct { + + // Associate a Carrier IP address with eth0 for a new network interface. + // Use this option when you launch an instance in a Wavelength Zone and want to associate a Carrier IP address with the network interface. + // Boolean value, can be left unset. + AssociateCarrierIPAddress *string `json:"associateCarrierIpAddress,omitempty" tf:"associate_carrier_ip_address,omitempty"` + + // Associate a public ip address with the network interface. + // Boolean value, can be left unset. + AssociatePublicIPAddress *string `json:"associatePublicIpAddress,omitempty" tf:"associate_public_ip_address,omitempty"` + + // Whether the network interface should be destroyed on instance termination. + DeleteOnTermination *string `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Description of the network interface. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integer index of the network interface attachment. + DeviceIndex *float64 `json:"deviceIndex,omitempty" tf:"device_index,omitempty"` + + // The number of secondary private IPv4 addresses to assign to a network interface. Conflicts with ipv4_addresses + IPv4AddressCount *float64 `json:"ipv4AddressCount,omitempty" tf:"ipv4_address_count,omitempty"` + + // One or more private IPv4 addresses to associate. Conflicts with ipv4_address_count + // +listType=set + IPv4Addresses []*string `json:"ipv4Addresses,omitempty" tf:"ipv4_addresses,omitempty"` + + // The number of IPv4 prefixes to be automatically assigned to the network interface. Conflicts with ipv4_prefixes + IPv4PrefixCount *float64 `json:"ipv4PrefixCount,omitempty" tf:"ipv4_prefix_count,omitempty"` + + // One or more IPv4 prefixes to be assigned to the network interface. Conflicts with ipv4_prefix_count + // +listType=set + IPv4Prefixes []*string `json:"ipv4Prefixes,omitempty" tf:"ipv4_prefixes,omitempty"` + + // The number of IPv6 addresses to assign to a network interface. Conflicts with ipv6_addresses + IPv6AddressCount *float64 `json:"ipv6AddressCount,omitempty" tf:"ipv6_address_count,omitempty"` + + // One or more specific IPv6 addresses from the IPv6 CIDR block range of your subnet. Conflicts with ipv6_address_count + // +listType=set + IPv6Addresses []*string `json:"ipv6Addresses,omitempty" tf:"ipv6_addresses,omitempty"` + + // The number of IPv6 prefixes to be automatically assigned to the network interface. Conflicts with ipv6_prefixes + IPv6PrefixCount *float64 `json:"ipv6PrefixCount,omitempty" tf:"ipv6_prefix_count,omitempty"` + + // One or more IPv6 prefixes to be assigned to the network interface. Conflicts with ipv6_prefix_count + // +listType=set + IPv6Prefixes []*string `json:"ipv6Prefixes,omitempty" tf:"ipv6_prefixes,omitempty"` + + // The type of network interface. To create an Elastic Fabric Adapter (EFA), specify efa. + InterfaceType *string `json:"interfaceType,omitempty" tf:"interface_type,omitempty"` + + // The index of the network card. Some instance types support multiple network cards. The primary network interface must be assigned to network card index 0. The default is network card index 0. + NetworkCardIndex *float64 `json:"networkCardIndex,omitempty" tf:"network_card_index,omitempty"` + + // The ID of the network interface to attach. + NetworkInterfaceID *string `json:"networkInterfaceId,omitempty" tf:"network_interface_id,omitempty"` + + // The primary private IPv4 address. + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // A list of security group IDs to associate. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The VPC Subnet ID to associate. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type NetworkInterfacesParameters struct { + + // Associate a Carrier IP address with eth0 for a new network interface. + // Use this option when you launch an instance in a Wavelength Zone and want to associate a Carrier IP address with the network interface. + // Boolean value, can be left unset. + // +kubebuilder:validation:Optional + AssociateCarrierIPAddress *string `json:"associateCarrierIpAddress,omitempty" tf:"associate_carrier_ip_address,omitempty"` + + // Associate a public ip address with the network interface. + // Boolean value, can be left unset. + // +kubebuilder:validation:Optional + AssociatePublicIPAddress *string `json:"associatePublicIpAddress,omitempty" tf:"associate_public_ip_address,omitempty"` + + // Whether the network interface should be destroyed on instance termination. + // +kubebuilder:validation:Optional + DeleteOnTermination *string `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Description of the network interface. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integer index of the network interface attachment. + // +kubebuilder:validation:Optional + DeviceIndex *float64 `json:"deviceIndex,omitempty" tf:"device_index,omitempty"` + + // The number of secondary private IPv4 addresses to assign to a network interface. Conflicts with ipv4_addresses + // +kubebuilder:validation:Optional + IPv4AddressCount *float64 `json:"ipv4AddressCount,omitempty" tf:"ipv4_address_count,omitempty"` + + // One or more private IPv4 addresses to associate. Conflicts with ipv4_address_count + // +kubebuilder:validation:Optional + // +listType=set + IPv4Addresses []*string `json:"ipv4Addresses,omitempty" tf:"ipv4_addresses,omitempty"` + + // The number of IPv4 prefixes to be automatically assigned to the network interface. Conflicts with ipv4_prefixes + // +kubebuilder:validation:Optional + IPv4PrefixCount *float64 `json:"ipv4PrefixCount,omitempty" tf:"ipv4_prefix_count,omitempty"` + + // One or more IPv4 prefixes to be assigned to the network interface. Conflicts with ipv4_prefix_count + // +kubebuilder:validation:Optional + // +listType=set + IPv4Prefixes []*string `json:"ipv4Prefixes,omitempty" tf:"ipv4_prefixes,omitempty"` + + // The number of IPv6 addresses to assign to a network interface. Conflicts with ipv6_addresses + // +kubebuilder:validation:Optional + IPv6AddressCount *float64 `json:"ipv6AddressCount,omitempty" tf:"ipv6_address_count,omitempty"` + + // One or more specific IPv6 addresses from the IPv6 CIDR block range of your subnet. Conflicts with ipv6_address_count + // +kubebuilder:validation:Optional + // +listType=set + IPv6Addresses []*string `json:"ipv6Addresses,omitempty" tf:"ipv6_addresses,omitempty"` + + // The number of IPv6 prefixes to be automatically assigned to the network interface. Conflicts with ipv6_prefixes + // +kubebuilder:validation:Optional + IPv6PrefixCount *float64 `json:"ipv6PrefixCount,omitempty" tf:"ipv6_prefix_count,omitempty"` + + // One or more IPv6 prefixes to be assigned to the network interface. Conflicts with ipv6_prefix_count + // +kubebuilder:validation:Optional + // +listType=set + IPv6Prefixes []*string `json:"ipv6Prefixes,omitempty" tf:"ipv6_prefixes,omitempty"` + + // The type of network interface. To create an Elastic Fabric Adapter (EFA), specify efa. + // +kubebuilder:validation:Optional + InterfaceType *string `json:"interfaceType,omitempty" tf:"interface_type,omitempty"` + + // The index of the network card. Some instance types support multiple network cards. The primary network interface must be assigned to network card index 0. The default is network card index 0. + // +kubebuilder:validation:Optional + NetworkCardIndex *float64 `json:"networkCardIndex,omitempty" tf:"network_card_index,omitempty"` + + // The ID of the network interface to attach. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.NetworkInterface + // +kubebuilder:validation:Optional + NetworkInterfaceID *string `json:"networkInterfaceId,omitempty" tf:"network_interface_id,omitempty"` + + // Reference to a NetworkInterface in ec2 to populate networkInterfaceId. + // +kubebuilder:validation:Optional + NetworkInterfaceIDRef *v1.Reference `json:"networkInterfaceIdRef,omitempty" tf:"-"` + + // Selector for a NetworkInterface in ec2 to populate networkInterfaceId. + // +kubebuilder:validation:Optional + NetworkInterfaceIDSelector *v1.Selector `json:"networkInterfaceIdSelector,omitempty" tf:"-"` + + // The primary private IPv4 address. + // +kubebuilder:validation:Optional + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupRefs []v1.Reference `json:"securityGroupRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupSelector *v1.Selector `json:"securityGroupSelector,omitempty" tf:"-"` + + // A list of security group IDs to associate. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupSelector + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The VPC Subnet ID to associate. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type PlacementInitParameters struct { + + // The affinity setting for an instance on a Dedicated Host. + Affinity *string `json:"affinity,omitempty" tf:"affinity,omitempty"` + + // The Availability Zone for the instance. + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // The name of the placement group for the instance. + GroupName *string `json:"groupName,omitempty" tf:"group_name,omitempty"` + + // The ID of the Dedicated Host for the instance. + HostID *string `json:"hostId,omitempty" tf:"host_id,omitempty"` + + // The ARN of the Host Resource Group in which to launch instances. + HostResourceGroupArn *string `json:"hostResourceGroupArn,omitempty" tf:"host_resource_group_arn,omitempty"` + + // The number of the partition the instance should launch in. Valid only if the placement group strategy is set to partition. + PartitionNumber *float64 `json:"partitionNumber,omitempty" tf:"partition_number,omitempty"` + + // Reserved for future use. + SpreadDomain *string `json:"spreadDomain,omitempty" tf:"spread_domain,omitempty"` + + // The tenancy of the instance (if the instance is running in a VPC). Can be default, dedicated, or host. + Tenancy *string `json:"tenancy,omitempty" tf:"tenancy,omitempty"` +} + +type PlacementObservation struct { + + // The affinity setting for an instance on a Dedicated Host. + Affinity *string `json:"affinity,omitempty" tf:"affinity,omitempty"` + + // The Availability Zone for the instance. + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // The name of the placement group for the instance. + GroupName *string `json:"groupName,omitempty" tf:"group_name,omitempty"` + + // The ID of the Dedicated Host for the instance. + HostID *string `json:"hostId,omitempty" tf:"host_id,omitempty"` + + // The ARN of the Host Resource Group in which to launch instances. + HostResourceGroupArn *string `json:"hostResourceGroupArn,omitempty" tf:"host_resource_group_arn,omitempty"` + + // The number of the partition the instance should launch in. Valid only if the placement group strategy is set to partition. + PartitionNumber *float64 `json:"partitionNumber,omitempty" tf:"partition_number,omitempty"` + + // Reserved for future use. + SpreadDomain *string `json:"spreadDomain,omitempty" tf:"spread_domain,omitempty"` + + // The tenancy of the instance (if the instance is running in a VPC). Can be default, dedicated, or host. + Tenancy *string `json:"tenancy,omitempty" tf:"tenancy,omitempty"` +} + +type PlacementParameters struct { + + // The affinity setting for an instance on a Dedicated Host. + // +kubebuilder:validation:Optional + Affinity *string `json:"affinity,omitempty" tf:"affinity,omitempty"` + + // The Availability Zone for the instance. + // +kubebuilder:validation:Optional + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // The name of the placement group for the instance. + // +kubebuilder:validation:Optional + GroupName *string `json:"groupName,omitempty" tf:"group_name,omitempty"` + + // The ID of the Dedicated Host for the instance. + // +kubebuilder:validation:Optional + HostID *string `json:"hostId,omitempty" tf:"host_id,omitempty"` + + // The ARN of the Host Resource Group in which to launch instances. + // +kubebuilder:validation:Optional + HostResourceGroupArn *string `json:"hostResourceGroupArn,omitempty" tf:"host_resource_group_arn,omitempty"` + + // The number of the partition the instance should launch in. Valid only if the placement group strategy is set to partition. + // +kubebuilder:validation:Optional + PartitionNumber *float64 `json:"partitionNumber,omitempty" tf:"partition_number,omitempty"` + + // Reserved for future use. + // +kubebuilder:validation:Optional + SpreadDomain *string `json:"spreadDomain,omitempty" tf:"spread_domain,omitempty"` + + // The tenancy of the instance (if the instance is running in a VPC). Can be default, dedicated, or host. + // +kubebuilder:validation:Optional + Tenancy *string `json:"tenancy,omitempty" tf:"tenancy,omitempty"` +} + +type TagSpecificationsInitParameters struct { + + // The type of resource to tag. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // A map of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type TagSpecificationsObservation struct { + + // The type of resource to tag. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // A map of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type TagSpecificationsParameters struct { + + // The type of resource to tag. + // +kubebuilder:validation:Optional + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // A map of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type TotalLocalStorageGbInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type TotalLocalStorageGbObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type TotalLocalStorageGbParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type VcpuCountInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type VcpuCountObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type VcpuCountParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min" tf:"min,omitempty"` +} + +// LaunchTemplateSpec defines the desired state of LaunchTemplate +type LaunchTemplateSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LaunchTemplateParameters_2 `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LaunchTemplateInitParameters_2 `json:"initProvider,omitempty"` +} + +// LaunchTemplateStatus defines the observed state of LaunchTemplate. +type LaunchTemplateStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LaunchTemplateObservation_2 `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LaunchTemplate is the Schema for the LaunchTemplates API. Provides an EC2 launch template resource. Can be used to create instances or auto scaling groups. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type LaunchTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec LaunchTemplateSpec `json:"spec"` + Status LaunchTemplateStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LaunchTemplateList contains a list of LaunchTemplates +type LaunchTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LaunchTemplate `json:"items"` +} + +// Repository type metadata. +var ( + LaunchTemplate_Kind = "LaunchTemplate" + LaunchTemplate_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LaunchTemplate_Kind}.String() + LaunchTemplate_KindAPIVersion = LaunchTemplate_Kind + "." + CRDGroupVersion.String() + LaunchTemplate_GroupVersionKind = CRDGroupVersion.WithKind(LaunchTemplate_Kind) +) + +func init() { + SchemeBuilder.Register(&LaunchTemplate{}, &LaunchTemplateList{}) +} diff --git a/apis/ec2/v1beta2/zz_route_types.go b/apis/ec2/v1beta2/zz_route_types.go index aa814a0fba..ec173f0e0b 100755 --- a/apis/ec2/v1beta2/zz_route_types.go +++ b/apis/ec2/v1beta2/zz_route_types.go @@ -116,7 +116,7 @@ type RouteInitParameters struct { TransitGatewayIDSelector *v1.Selector `json:"transitGatewayIdSelector,omitempty" tf:"-"` // Identifier of a VPC Endpoint. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPCEndpoint + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.VPCEndpoint VPCEndpointID *string `json:"vpcEndpointId,omitempty" tf:"vpc_endpoint_id,omitempty"` // Reference to a VPCEndpoint in ec2 to populate vpcEndpointId. @@ -128,7 +128,7 @@ type RouteInitParameters struct { VPCEndpointIDSelector *v1.Selector `json:"vpcEndpointIdSelector,omitempty" tf:"-"` // Identifier of a VPC peering connection. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPCPeeringConnection + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.VPCPeeringConnection VPCPeeringConnectionID *string `json:"vpcPeeringConnectionId,omitempty" tf:"vpc_peering_connection_id,omitempty"` // Reference to a VPCPeeringConnection in ec2 to populate vpcPeeringConnectionId. @@ -320,7 +320,7 @@ type RouteParameters struct { TransitGatewayIDSelector *v1.Selector `json:"transitGatewayIdSelector,omitempty" tf:"-"` // Identifier of a VPC Endpoint. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPCEndpoint + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.VPCEndpoint // +kubebuilder:validation:Optional VPCEndpointID *string `json:"vpcEndpointId,omitempty" tf:"vpc_endpoint_id,omitempty"` @@ -333,7 +333,7 @@ type RouteParameters struct { VPCEndpointIDSelector *v1.Selector `json:"vpcEndpointIdSelector,omitempty" tf:"-"` // Identifier of a VPC peering connection. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPCPeeringConnection + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.VPCPeeringConnection // +kubebuilder:validation:Optional VPCPeeringConnectionID *string `json:"vpcPeeringConnectionId,omitempty" tf:"vpc_peering_connection_id,omitempty"` diff --git a/apis/ec2/v1beta2/zz_spotfleetrequest_terraformed.go b/apis/ec2/v1beta2/zz_spotfleetrequest_terraformed.go new file mode 100755 index 0000000000..d4cb29f986 --- /dev/null +++ b/apis/ec2/v1beta2/zz_spotfleetrequest_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SpotFleetRequest +func (mg *SpotFleetRequest) GetTerraformResourceType() string { + return "aws_spot_fleet_request" +} + +// GetConnectionDetailsMapping for this SpotFleetRequest +func (tr *SpotFleetRequest) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SpotFleetRequest +func (tr *SpotFleetRequest) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SpotFleetRequest +func (tr *SpotFleetRequest) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SpotFleetRequest +func (tr *SpotFleetRequest) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SpotFleetRequest +func (tr *SpotFleetRequest) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SpotFleetRequest +func (tr *SpotFleetRequest) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SpotFleetRequest +func (tr *SpotFleetRequest) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SpotFleetRequest +func (tr *SpotFleetRequest) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SpotFleetRequest using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SpotFleetRequest) LateInitialize(attrs []byte) (bool, error) { + params := &SpotFleetRequestParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SpotFleetRequest) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/ec2/v1beta2/zz_spotfleetrequest_types.go b/apis/ec2/v1beta2/zz_spotfleetrequest_types.go new file mode 100755 index 0000000000..fd7c67713b --- /dev/null +++ b/apis/ec2/v1beta2/zz_spotfleetrequest_types.go @@ -0,0 +1,1531 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CapacityRebalanceInitParameters struct { + + // The replacement strategy to use. Only available for spot fleets with fleet_type set to maintain. Valid values: launch. + ReplacementStrategy *string `json:"replacementStrategy,omitempty" tf:"replacement_strategy,omitempty"` +} + +type CapacityRebalanceObservation struct { + + // The replacement strategy to use. Only available for spot fleets with fleet_type set to maintain. Valid values: launch. + ReplacementStrategy *string `json:"replacementStrategy,omitempty" tf:"replacement_strategy,omitempty"` +} + +type CapacityRebalanceParameters struct { + + // The replacement strategy to use. Only available for spot fleets with fleet_type set to maintain. Valid values: launch. + // +kubebuilder:validation:Optional + ReplacementStrategy *string `json:"replacementStrategy,omitempty" tf:"replacement_strategy,omitempty"` +} + +type InstanceRequirementsAcceleratorCountInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsAcceleratorCountObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsAcceleratorCountParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsAcceleratorTotalMemoryMibInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsAcceleratorTotalMemoryMibObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsAcceleratorTotalMemoryMibParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsBaselineEBSBandwidthMbpsInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsBaselineEBSBandwidthMbpsObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsBaselineEBSBandwidthMbpsParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsMemoryGibPerVcpuInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsMemoryGibPerVcpuObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsMemoryGibPerVcpuParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsMemoryMibInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsMemoryMibObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsMemoryMibParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsNetworkBandwidthGbpsInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsNetworkBandwidthGbpsObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsNetworkBandwidthGbpsParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsNetworkInterfaceCountInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsNetworkInterfaceCountObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsNetworkInterfaceCountParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsTotalLocalStorageGbInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsTotalLocalStorageGbObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsTotalLocalStorageGbParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsVcpuCountInitParameters struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsVcpuCountObservation struct { + + // Maximum. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type InstanceRequirementsVcpuCountParameters struct { + + // Maximum. + // +kubebuilder:validation:Optional + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // Minimum. + // +kubebuilder:validation:Optional + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` +} + +type LaunchSpecificationEBSBlockDeviceInitParameters struct { + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // The name of the launch template. Conflicts with id. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The ID of the launch template. Conflicts with name. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The ID of the launch template. Conflicts with name. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type LaunchSpecificationEBSBlockDeviceObservation struct { + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // The name of the launch template. Conflicts with id. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The ID of the launch template. Conflicts with name. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The ID of the launch template. Conflicts with name. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type LaunchSpecificationEBSBlockDeviceParameters struct { + + // +kubebuilder:validation:Optional + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // The name of the launch template. Conflicts with id. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName" tf:"device_name,omitempty"` + + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The ID of the launch template. Conflicts with name. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The ID of the launch template. Conflicts with name. + // +kubebuilder:validation:Optional + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // +kubebuilder:validation:Optional + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // +kubebuilder:validation:Optional + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type LaunchSpecificationEphemeralBlockDeviceInitParameters struct { + + // The name of the launch template. Conflicts with id. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // The name of the launch template. Conflicts with id. + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type LaunchSpecificationEphemeralBlockDeviceObservation struct { + + // The name of the launch template. Conflicts with id. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // The name of the launch template. Conflicts with id. + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type LaunchSpecificationEphemeralBlockDeviceParameters struct { + + // The name of the launch template. Conflicts with id. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName" tf:"device_name,omitempty"` + + // The name of the launch template. Conflicts with id. + // +kubebuilder:validation:Optional + VirtualName *string `json:"virtualName" tf:"virtual_name,omitempty"` +} + +type LaunchSpecificationInitParameters struct { + AMI *string `json:"ami,omitempty" tf:"ami,omitempty"` + + AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty" tf:"associate_public_ip_address,omitempty"` + + // The availability zone in which to place the request. + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + EBSBlockDevice []LaunchSpecificationEBSBlockDeviceInitParameters `json:"ebsBlockDevice,omitempty" tf:"ebs_block_device,omitempty"` + + EBSOptimized *bool `json:"ebsOptimized,omitempty" tf:"ebs_optimized,omitempty"` + + EphemeralBlockDevice []LaunchSpecificationEphemeralBlockDeviceInitParameters `json:"ephemeralBlockDevice,omitempty" tf:"ephemeral_block_device,omitempty"` + + IAMInstanceProfile *string `json:"iamInstanceProfile,omitempty" tf:"iam_instance_profile,omitempty"` + + // takes aws_iam_instance_profile attribute arn as input. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.InstanceProfile + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + IAMInstanceProfileArn *string `json:"iamInstanceProfileArn,omitempty" tf:"iam_instance_profile_arn,omitempty"` + + // Reference to a InstanceProfile in iam to populate iamInstanceProfileArn. + // +kubebuilder:validation:Optional + IAMInstanceProfileArnRef *v1.Reference `json:"iamInstanceProfileArnRef,omitempty" tf:"-"` + + // Selector for a InstanceProfile in iam to populate iamInstanceProfileArn. + // +kubebuilder:validation:Optional + IAMInstanceProfileArnSelector *v1.Selector `json:"iamInstanceProfileArnSelector,omitempty" tf:"-"` + + // The type of instance to request. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The name of the launch template. Conflicts with id. + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + Monitoring *bool `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + PlacementGroup *string `json:"placementGroup,omitempty" tf:"placement_group,omitempty"` + + PlacementTenancy *string `json:"placementTenancy,omitempty" tf:"placement_tenancy,omitempty"` + + RootBlockDevice []LaunchSpecificationRootBlockDeviceInitParameters `json:"rootBlockDevice,omitempty" tf:"root_block_device,omitempty"` + + // The maximum bid price per unit hour. + SpotPrice *string `json:"spotPrice,omitempty" tf:"spot_price,omitempty"` + + // The subnet in which to launch the requested instance. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` + + // The capacity added to the fleet by a fulfilled request. + WeightedCapacity *string `json:"weightedCapacity,omitempty" tf:"weighted_capacity,omitempty"` +} + +type LaunchSpecificationObservation struct { + AMI *string `json:"ami,omitempty" tf:"ami,omitempty"` + + AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty" tf:"associate_public_ip_address,omitempty"` + + // The availability zone in which to place the request. + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + EBSBlockDevice []LaunchSpecificationEBSBlockDeviceObservation `json:"ebsBlockDevice,omitempty" tf:"ebs_block_device,omitempty"` + + EBSOptimized *bool `json:"ebsOptimized,omitempty" tf:"ebs_optimized,omitempty"` + + EphemeralBlockDevice []LaunchSpecificationEphemeralBlockDeviceObservation `json:"ephemeralBlockDevice,omitempty" tf:"ephemeral_block_device,omitempty"` + + IAMInstanceProfile *string `json:"iamInstanceProfile,omitempty" tf:"iam_instance_profile,omitempty"` + + // takes aws_iam_instance_profile attribute arn as input. + IAMInstanceProfileArn *string `json:"iamInstanceProfileArn,omitempty" tf:"iam_instance_profile_arn,omitempty"` + + // The type of instance to request. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The name of the launch template. Conflicts with id. + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + Monitoring *bool `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + PlacementGroup *string `json:"placementGroup,omitempty" tf:"placement_group,omitempty"` + + PlacementTenancy *string `json:"placementTenancy,omitempty" tf:"placement_tenancy,omitempty"` + + RootBlockDevice []LaunchSpecificationRootBlockDeviceObservation `json:"rootBlockDevice,omitempty" tf:"root_block_device,omitempty"` + + // The maximum bid price per unit hour. + SpotPrice *string `json:"spotPrice,omitempty" tf:"spot_price,omitempty"` + + // The subnet in which to launch the requested instance. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` + + // The capacity added to the fleet by a fulfilled request. + WeightedCapacity *string `json:"weightedCapacity,omitempty" tf:"weighted_capacity,omitempty"` +} + +type LaunchSpecificationParameters struct { + + // +kubebuilder:validation:Optional + AMI *string `json:"ami" tf:"ami,omitempty"` + + // +kubebuilder:validation:Optional + AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty" tf:"associate_public_ip_address,omitempty"` + + // The availability zone in which to place the request. + // +kubebuilder:validation:Optional + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // +kubebuilder:validation:Optional + EBSBlockDevice []LaunchSpecificationEBSBlockDeviceParameters `json:"ebsBlockDevice,omitempty" tf:"ebs_block_device,omitempty"` + + // +kubebuilder:validation:Optional + EBSOptimized *bool `json:"ebsOptimized,omitempty" tf:"ebs_optimized,omitempty"` + + // +kubebuilder:validation:Optional + EphemeralBlockDevice []LaunchSpecificationEphemeralBlockDeviceParameters `json:"ephemeralBlockDevice,omitempty" tf:"ephemeral_block_device,omitempty"` + + // +kubebuilder:validation:Optional + IAMInstanceProfile *string `json:"iamInstanceProfile,omitempty" tf:"iam_instance_profile,omitempty"` + + // takes aws_iam_instance_profile attribute arn as input. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.InstanceProfile + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + IAMInstanceProfileArn *string `json:"iamInstanceProfileArn,omitempty" tf:"iam_instance_profile_arn,omitempty"` + + // Reference to a InstanceProfile in iam to populate iamInstanceProfileArn. + // +kubebuilder:validation:Optional + IAMInstanceProfileArnRef *v1.Reference `json:"iamInstanceProfileArnRef,omitempty" tf:"-"` + + // Selector for a InstanceProfile in iam to populate iamInstanceProfileArn. + // +kubebuilder:validation:Optional + IAMInstanceProfileArnSelector *v1.Selector `json:"iamInstanceProfileArnSelector,omitempty" tf:"-"` + + // The type of instance to request. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType" tf:"instance_type,omitempty"` + + // The name of the launch template. Conflicts with id. + // +kubebuilder:validation:Optional + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + // +kubebuilder:validation:Optional + Monitoring *bool `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + // +kubebuilder:validation:Optional + PlacementGroup *string `json:"placementGroup,omitempty" tf:"placement_group,omitempty"` + + // +kubebuilder:validation:Optional + PlacementTenancy *string `json:"placementTenancy,omitempty" tf:"placement_tenancy,omitempty"` + + // +kubebuilder:validation:Optional + RootBlockDevice []LaunchSpecificationRootBlockDeviceParameters `json:"rootBlockDevice,omitempty" tf:"root_block_device,omitempty"` + + // The maximum bid price per unit hour. + // +kubebuilder:validation:Optional + SpotPrice *string `json:"spotPrice,omitempty" tf:"spot_price,omitempty"` + + // The subnet in which to launch the requested instance. + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +kubebuilder:validation:Optional + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // +kubebuilder:validation:Optional + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` + + // The capacity added to the fleet by a fulfilled request. + // +kubebuilder:validation:Optional + WeightedCapacity *string `json:"weightedCapacity,omitempty" tf:"weighted_capacity,omitempty"` +} + +type LaunchSpecificationRootBlockDeviceInitParameters struct { + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The ID of the launch template. Conflicts with name. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type LaunchSpecificationRootBlockDeviceObservation struct { + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The ID of the launch template. Conflicts with name. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type LaunchSpecificationRootBlockDeviceParameters struct { + + // +kubebuilder:validation:Optional + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The ID of the launch template. Conflicts with name. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // +kubebuilder:validation:Optional + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // +kubebuilder:validation:Optional + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type LaunchTemplateConfigInitParameters struct { + + // Launch template specification. See Launch Template Specification below for more details. + LaunchTemplateSpecification *LaunchTemplateSpecificationInitParameters `json:"launchTemplateSpecification,omitempty" tf:"launch_template_specification,omitempty"` + + // One or more override configurations. See Overrides below for more details. + Overrides []OverridesInitParameters `json:"overrides,omitempty" tf:"overrides,omitempty"` +} + +type LaunchTemplateConfigObservation struct { + + // Launch template specification. See Launch Template Specification below for more details. + LaunchTemplateSpecification *LaunchTemplateSpecificationObservation `json:"launchTemplateSpecification,omitempty" tf:"launch_template_specification,omitempty"` + + // One or more override configurations. See Overrides below for more details. + Overrides []OverridesObservation `json:"overrides,omitempty" tf:"overrides,omitempty"` +} + +type LaunchTemplateConfigParameters struct { + + // Launch template specification. See Launch Template Specification below for more details. + // +kubebuilder:validation:Optional + LaunchTemplateSpecification *LaunchTemplateSpecificationParameters `json:"launchTemplateSpecification" tf:"launch_template_specification,omitempty"` + + // One or more override configurations. See Overrides below for more details. + // +kubebuilder:validation:Optional + Overrides []OverridesParameters `json:"overrides,omitempty" tf:"overrides,omitempty"` +} + +type LaunchTemplateSpecificationInitParameters struct { + + // The ID of the launch template. Conflicts with name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.LaunchTemplate + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Reference to a LaunchTemplate in ec2 to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + + // Selector for a LaunchTemplate in ec2 to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` + + // The name of the launch template. Conflicts with id. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Template version. Unlike the autoscaling equivalent, does not support $Latest or $Default, so use the launch_template resource's attribute, e.g., "${aws_launch_template.foo.latest_version}". It will use the default version if omitted. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.LaunchTemplate + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("latest_version",true) + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // Reference to a LaunchTemplate in ec2 to populate version. + // +kubebuilder:validation:Optional + VersionRef *v1.Reference `json:"versionRef,omitempty" tf:"-"` + + // Selector for a LaunchTemplate in ec2 to populate version. + // +kubebuilder:validation:Optional + VersionSelector *v1.Selector `json:"versionSelector,omitempty" tf:"-"` +} + +type LaunchTemplateSpecificationObservation struct { + + // The ID of the launch template. Conflicts with name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the launch template. Conflicts with id. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Template version. Unlike the autoscaling equivalent, does not support $Latest or $Default, so use the launch_template resource's attribute, e.g., "${aws_launch_template.foo.latest_version}". It will use the default version if omitted. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type LaunchTemplateSpecificationParameters struct { + + // The ID of the launch template. Conflicts with name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.LaunchTemplate + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Reference to a LaunchTemplate in ec2 to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + + // Selector for a LaunchTemplate in ec2 to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` + + // The name of the launch template. Conflicts with id. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Template version. Unlike the autoscaling equivalent, does not support $Latest or $Default, so use the launch_template resource's attribute, e.g., "${aws_launch_template.foo.latest_version}". It will use the default version if omitted. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.LaunchTemplate + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("latest_version",true) + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // Reference to a LaunchTemplate in ec2 to populate version. + // +kubebuilder:validation:Optional + VersionRef *v1.Reference `json:"versionRef,omitempty" tf:"-"` + + // Selector for a LaunchTemplate in ec2 to populate version. + // +kubebuilder:validation:Optional + VersionSelector *v1.Selector `json:"versionSelector,omitempty" tf:"-"` +} + +type OverridesInitParameters struct { + + // The availability zone in which to place the request. + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // The instance requirements. See below. + InstanceRequirements *OverridesInstanceRequirementsInitParameters `json:"instanceRequirements,omitempty" tf:"instance_requirements,omitempty"` + + // The type of instance to request. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The priority for the launch template override. The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The maximum bid price per unit hour. + SpotPrice *string `json:"spotPrice,omitempty" tf:"spot_price,omitempty"` + + // The subnet in which to launch the requested instance. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The capacity added to the fleet by a fulfilled request. + WeightedCapacity *float64 `json:"weightedCapacity,omitempty" tf:"weighted_capacity,omitempty"` +} + +type OverridesInstanceRequirementsInitParameters struct { + + // Block describing the minimum and maximum number of accelerators (GPUs, FPGAs, or AWS Inferentia chips). Default is no minimum or maximum. + AcceleratorCount *InstanceRequirementsAcceleratorCountInitParameters `json:"acceleratorCount,omitempty" tf:"accelerator_count,omitempty"` + + // List of accelerator manufacturer names. Default is any manufacturer. + // +listType=set + AcceleratorManufacturers []*string `json:"acceleratorManufacturers,omitempty" tf:"accelerator_manufacturers,omitempty"` + + // List of accelerator names. Default is any acclerator. + // +listType=set + AcceleratorNames []*string `json:"acceleratorNames,omitempty" tf:"accelerator_names,omitempty"` + + // Block describing the minimum and maximum total memory of the accelerators. Default is no minimum or maximum. + AcceleratorTotalMemoryMib *InstanceRequirementsAcceleratorTotalMemoryMibInitParameters `json:"acceleratorTotalMemoryMib,omitempty" tf:"accelerator_total_memory_mib,omitempty"` + + // List of accelerator types. Default is any accelerator type. + // +listType=set + AcceleratorTypes []*string `json:"acceleratorTypes,omitempty" tf:"accelerator_types,omitempty"` + + // List of instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes. You can use strings with one or more wild cards, represented by an asterisk (*), to allow an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are allowing the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are allowing all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is all instance types. + // +listType=set + AllowedInstanceTypes []*string `json:"allowedInstanceTypes,omitempty" tf:"allowed_instance_types,omitempty"` + + // Indicate whether bare metal instace types should be included, excluded, or required. Default is excluded. + BareMetal *string `json:"bareMetal,omitempty" tf:"bare_metal,omitempty"` + + // Block describing the minimum and maximum baseline EBS bandwidth, in Mbps. Default is no minimum or maximum. + BaselineEBSBandwidthMbps *InstanceRequirementsBaselineEBSBandwidthMbpsInitParameters `json:"baselineEbsBandwidthMbps,omitempty" tf:"baseline_ebs_bandwidth_mbps,omitempty"` + + // Indicate whether burstable performance instance types should be included, excluded, or required. Default is excluded. + BurstablePerformance *string `json:"burstablePerformance,omitempty" tf:"burstable_performance,omitempty"` + + // List of CPU manufacturer names. Default is any manufacturer. + // +listType=set + CPUManufacturers []*string `json:"cpuManufacturers,omitempty" tf:"cpu_manufacturers,omitempty"` + + // List of instance types to exclude. You can use strings with one or more wild cards, represented by an asterisk (*), to exclude an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are excluding the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are excluding all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is no excluded instance types. + // +listType=set + ExcludedInstanceTypes []*string `json:"excludedInstanceTypes,omitempty" tf:"excluded_instance_types,omitempty"` + + // List of instance generation names. Default is any generation. + // +listType=set + InstanceGenerations []*string `json:"instanceGenerations,omitempty" tf:"instance_generations,omitempty"` + + // Indicate whether instance types with local storage volumes are included, excluded, or required. Default is included. + LocalStorage *string `json:"localStorage,omitempty" tf:"local_storage,omitempty"` + + // List of local storage type names. Default any storage type. + // +listType=set + LocalStorageTypes []*string `json:"localStorageTypes,omitempty" tf:"local_storage_types,omitempty"` + + // Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. + MemoryGibPerVcpu *InstanceRequirementsMemoryGibPerVcpuInitParameters `json:"memoryGibPerVcpu,omitempty" tf:"memory_gib_per_vcpu,omitempty"` + + // Block describing the minimum and maximum amount of memory (MiB). Default is no maximum. + MemoryMib *InstanceRequirementsMemoryMibInitParameters `json:"memoryMib,omitempty" tf:"memory_mib,omitempty"` + + // Block describing the minimum and maximum amount of network bandwidth, in gigabits per second (Gbps). Default is no minimum or maximum. + NetworkBandwidthGbps *InstanceRequirementsNetworkBandwidthGbpsInitParameters `json:"networkBandwidthGbps,omitempty" tf:"network_bandwidth_gbps,omitempty"` + + // Block describing the minimum and maximum number of network interfaces. Default is no minimum or maximum. + NetworkInterfaceCount *InstanceRequirementsNetworkInterfaceCountInitParameters `json:"networkInterfaceCount,omitempty" tf:"network_interface_count,omitempty"` + + // The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 20. + OnDemandMaxPricePercentageOverLowestPrice *float64 `json:"onDemandMaxPricePercentageOverLowestPrice,omitempty" tf:"on_demand_max_price_percentage_over_lowest_price,omitempty"` + + // Indicate whether instance types must support On-Demand Instance Hibernation, either true or false. Default is false. + RequireHibernateSupport *bool `json:"requireHibernateSupport,omitempty" tf:"require_hibernate_support,omitempty"` + + // The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. + SpotMaxPricePercentageOverLowestPrice *float64 `json:"spotMaxPricePercentageOverLowestPrice,omitempty" tf:"spot_max_price_percentage_over_lowest_price,omitempty"` + + // Block describing the minimum and maximum total local storage (GB). Default is no minimum or maximum. + TotalLocalStorageGb *InstanceRequirementsTotalLocalStorageGbInitParameters `json:"totalLocalStorageGb,omitempty" tf:"total_local_storage_gb,omitempty"` + + // Block describing the minimum and maximum number of vCPUs. Default is no maximum. + VcpuCount *InstanceRequirementsVcpuCountInitParameters `json:"vcpuCount,omitempty" tf:"vcpu_count,omitempty"` +} + +type OverridesInstanceRequirementsObservation struct { + + // Block describing the minimum and maximum number of accelerators (GPUs, FPGAs, or AWS Inferentia chips). Default is no minimum or maximum. + AcceleratorCount *InstanceRequirementsAcceleratorCountObservation `json:"acceleratorCount,omitempty" tf:"accelerator_count,omitempty"` + + // List of accelerator manufacturer names. Default is any manufacturer. + // +listType=set + AcceleratorManufacturers []*string `json:"acceleratorManufacturers,omitempty" tf:"accelerator_manufacturers,omitempty"` + + // List of accelerator names. Default is any acclerator. + // +listType=set + AcceleratorNames []*string `json:"acceleratorNames,omitempty" tf:"accelerator_names,omitempty"` + + // Block describing the minimum and maximum total memory of the accelerators. Default is no minimum or maximum. + AcceleratorTotalMemoryMib *InstanceRequirementsAcceleratorTotalMemoryMibObservation `json:"acceleratorTotalMemoryMib,omitempty" tf:"accelerator_total_memory_mib,omitempty"` + + // List of accelerator types. Default is any accelerator type. + // +listType=set + AcceleratorTypes []*string `json:"acceleratorTypes,omitempty" tf:"accelerator_types,omitempty"` + + // List of instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes. You can use strings with one or more wild cards, represented by an asterisk (*), to allow an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are allowing the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are allowing all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is all instance types. + // +listType=set + AllowedInstanceTypes []*string `json:"allowedInstanceTypes,omitempty" tf:"allowed_instance_types,omitempty"` + + // Indicate whether bare metal instace types should be included, excluded, or required. Default is excluded. + BareMetal *string `json:"bareMetal,omitempty" tf:"bare_metal,omitempty"` + + // Block describing the minimum and maximum baseline EBS bandwidth, in Mbps. Default is no minimum or maximum. + BaselineEBSBandwidthMbps *InstanceRequirementsBaselineEBSBandwidthMbpsObservation `json:"baselineEbsBandwidthMbps,omitempty" tf:"baseline_ebs_bandwidth_mbps,omitempty"` + + // Indicate whether burstable performance instance types should be included, excluded, or required. Default is excluded. + BurstablePerformance *string `json:"burstablePerformance,omitempty" tf:"burstable_performance,omitempty"` + + // List of CPU manufacturer names. Default is any manufacturer. + // +listType=set + CPUManufacturers []*string `json:"cpuManufacturers,omitempty" tf:"cpu_manufacturers,omitempty"` + + // List of instance types to exclude. You can use strings with one or more wild cards, represented by an asterisk (*), to exclude an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are excluding the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are excluding all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is no excluded instance types. + // +listType=set + ExcludedInstanceTypes []*string `json:"excludedInstanceTypes,omitempty" tf:"excluded_instance_types,omitempty"` + + // List of instance generation names. Default is any generation. + // +listType=set + InstanceGenerations []*string `json:"instanceGenerations,omitempty" tf:"instance_generations,omitempty"` + + // Indicate whether instance types with local storage volumes are included, excluded, or required. Default is included. + LocalStorage *string `json:"localStorage,omitempty" tf:"local_storage,omitempty"` + + // List of local storage type names. Default any storage type. + // +listType=set + LocalStorageTypes []*string `json:"localStorageTypes,omitempty" tf:"local_storage_types,omitempty"` + + // Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. + MemoryGibPerVcpu *InstanceRequirementsMemoryGibPerVcpuObservation `json:"memoryGibPerVcpu,omitempty" tf:"memory_gib_per_vcpu,omitempty"` + + // Block describing the minimum and maximum amount of memory (MiB). Default is no maximum. + MemoryMib *InstanceRequirementsMemoryMibObservation `json:"memoryMib,omitempty" tf:"memory_mib,omitempty"` + + // Block describing the minimum and maximum amount of network bandwidth, in gigabits per second (Gbps). Default is no minimum or maximum. + NetworkBandwidthGbps *InstanceRequirementsNetworkBandwidthGbpsObservation `json:"networkBandwidthGbps,omitempty" tf:"network_bandwidth_gbps,omitempty"` + + // Block describing the minimum and maximum number of network interfaces. Default is no minimum or maximum. + NetworkInterfaceCount *InstanceRequirementsNetworkInterfaceCountObservation `json:"networkInterfaceCount,omitempty" tf:"network_interface_count,omitempty"` + + // The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 20. + OnDemandMaxPricePercentageOverLowestPrice *float64 `json:"onDemandMaxPricePercentageOverLowestPrice,omitempty" tf:"on_demand_max_price_percentage_over_lowest_price,omitempty"` + + // Indicate whether instance types must support On-Demand Instance Hibernation, either true or false. Default is false. + RequireHibernateSupport *bool `json:"requireHibernateSupport,omitempty" tf:"require_hibernate_support,omitempty"` + + // The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. + SpotMaxPricePercentageOverLowestPrice *float64 `json:"spotMaxPricePercentageOverLowestPrice,omitempty" tf:"spot_max_price_percentage_over_lowest_price,omitempty"` + + // Block describing the minimum and maximum total local storage (GB). Default is no minimum or maximum. + TotalLocalStorageGb *InstanceRequirementsTotalLocalStorageGbObservation `json:"totalLocalStorageGb,omitempty" tf:"total_local_storage_gb,omitempty"` + + // Block describing the minimum and maximum number of vCPUs. Default is no maximum. + VcpuCount *InstanceRequirementsVcpuCountObservation `json:"vcpuCount,omitempty" tf:"vcpu_count,omitempty"` +} + +type OverridesInstanceRequirementsParameters struct { + + // Block describing the minimum and maximum number of accelerators (GPUs, FPGAs, or AWS Inferentia chips). Default is no minimum or maximum. + // +kubebuilder:validation:Optional + AcceleratorCount *InstanceRequirementsAcceleratorCountParameters `json:"acceleratorCount,omitempty" tf:"accelerator_count,omitempty"` + + // List of accelerator manufacturer names. Default is any manufacturer. + // +kubebuilder:validation:Optional + // +listType=set + AcceleratorManufacturers []*string `json:"acceleratorManufacturers,omitempty" tf:"accelerator_manufacturers,omitempty"` + + // List of accelerator names. Default is any acclerator. + // +kubebuilder:validation:Optional + // +listType=set + AcceleratorNames []*string `json:"acceleratorNames,omitempty" tf:"accelerator_names,omitempty"` + + // Block describing the minimum and maximum total memory of the accelerators. Default is no minimum or maximum. + // +kubebuilder:validation:Optional + AcceleratorTotalMemoryMib *InstanceRequirementsAcceleratorTotalMemoryMibParameters `json:"acceleratorTotalMemoryMib,omitempty" tf:"accelerator_total_memory_mib,omitempty"` + + // List of accelerator types. Default is any accelerator type. + // +kubebuilder:validation:Optional + // +listType=set + AcceleratorTypes []*string `json:"acceleratorTypes,omitempty" tf:"accelerator_types,omitempty"` + + // List of instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes. You can use strings with one or more wild cards, represented by an asterisk (*), to allow an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are allowing the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are allowing all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is all instance types. + // +kubebuilder:validation:Optional + // +listType=set + AllowedInstanceTypes []*string `json:"allowedInstanceTypes,omitempty" tf:"allowed_instance_types,omitempty"` + + // Indicate whether bare metal instace types should be included, excluded, or required. Default is excluded. + // +kubebuilder:validation:Optional + BareMetal *string `json:"bareMetal,omitempty" tf:"bare_metal,omitempty"` + + // Block describing the minimum and maximum baseline EBS bandwidth, in Mbps. Default is no minimum or maximum. + // +kubebuilder:validation:Optional + BaselineEBSBandwidthMbps *InstanceRequirementsBaselineEBSBandwidthMbpsParameters `json:"baselineEbsBandwidthMbps,omitempty" tf:"baseline_ebs_bandwidth_mbps,omitempty"` + + // Indicate whether burstable performance instance types should be included, excluded, or required. Default is excluded. + // +kubebuilder:validation:Optional + BurstablePerformance *string `json:"burstablePerformance,omitempty" tf:"burstable_performance,omitempty"` + + // List of CPU manufacturer names. Default is any manufacturer. + // +kubebuilder:validation:Optional + // +listType=set + CPUManufacturers []*string `json:"cpuManufacturers,omitempty" tf:"cpu_manufacturers,omitempty"` + + // List of instance types to exclude. You can use strings with one or more wild cards, represented by an asterisk (*), to exclude an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*, you are excluding the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, you are excluding all the M5a instance types, but not the M5n instance types. Maximum of 400 entries in the list; each entry is limited to 30 characters. Default is no excluded instance types. + // +kubebuilder:validation:Optional + // +listType=set + ExcludedInstanceTypes []*string `json:"excludedInstanceTypes,omitempty" tf:"excluded_instance_types,omitempty"` + + // List of instance generation names. Default is any generation. + // +kubebuilder:validation:Optional + // +listType=set + InstanceGenerations []*string `json:"instanceGenerations,omitempty" tf:"instance_generations,omitempty"` + + // Indicate whether instance types with local storage volumes are included, excluded, or required. Default is included. + // +kubebuilder:validation:Optional + LocalStorage *string `json:"localStorage,omitempty" tf:"local_storage,omitempty"` + + // List of local storage type names. Default any storage type. + // +kubebuilder:validation:Optional + // +listType=set + LocalStorageTypes []*string `json:"localStorageTypes,omitempty" tf:"local_storage_types,omitempty"` + + // Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. + // +kubebuilder:validation:Optional + MemoryGibPerVcpu *InstanceRequirementsMemoryGibPerVcpuParameters `json:"memoryGibPerVcpu,omitempty" tf:"memory_gib_per_vcpu,omitempty"` + + // Block describing the minimum and maximum amount of memory (MiB). Default is no maximum. + // +kubebuilder:validation:Optional + MemoryMib *InstanceRequirementsMemoryMibParameters `json:"memoryMib,omitempty" tf:"memory_mib,omitempty"` + + // Block describing the minimum and maximum amount of network bandwidth, in gigabits per second (Gbps). Default is no minimum or maximum. + // +kubebuilder:validation:Optional + NetworkBandwidthGbps *InstanceRequirementsNetworkBandwidthGbpsParameters `json:"networkBandwidthGbps,omitempty" tf:"network_bandwidth_gbps,omitempty"` + + // Block describing the minimum and maximum number of network interfaces. Default is no minimum or maximum. + // +kubebuilder:validation:Optional + NetworkInterfaceCount *InstanceRequirementsNetworkInterfaceCountParameters `json:"networkInterfaceCount,omitempty" tf:"network_interface_count,omitempty"` + + // The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 20. + // +kubebuilder:validation:Optional + OnDemandMaxPricePercentageOverLowestPrice *float64 `json:"onDemandMaxPricePercentageOverLowestPrice,omitempty" tf:"on_demand_max_price_percentage_over_lowest_price,omitempty"` + + // Indicate whether instance types must support On-Demand Instance Hibernation, either true or false. Default is false. + // +kubebuilder:validation:Optional + RequireHibernateSupport *bool `json:"requireHibernateSupport,omitempty" tf:"require_hibernate_support,omitempty"` + + // The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. + // +kubebuilder:validation:Optional + SpotMaxPricePercentageOverLowestPrice *float64 `json:"spotMaxPricePercentageOverLowestPrice,omitempty" tf:"spot_max_price_percentage_over_lowest_price,omitempty"` + + // Block describing the minimum and maximum total local storage (GB). Default is no minimum or maximum. + // +kubebuilder:validation:Optional + TotalLocalStorageGb *InstanceRequirementsTotalLocalStorageGbParameters `json:"totalLocalStorageGb,omitempty" tf:"total_local_storage_gb,omitempty"` + + // Block describing the minimum and maximum number of vCPUs. Default is no maximum. + // +kubebuilder:validation:Optional + VcpuCount *InstanceRequirementsVcpuCountParameters `json:"vcpuCount,omitempty" tf:"vcpu_count,omitempty"` +} + +type OverridesObservation struct { + + // The availability zone in which to place the request. + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // The instance requirements. See below. + InstanceRequirements *OverridesInstanceRequirementsObservation `json:"instanceRequirements,omitempty" tf:"instance_requirements,omitempty"` + + // The type of instance to request. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The priority for the launch template override. The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The maximum bid price per unit hour. + SpotPrice *string `json:"spotPrice,omitempty" tf:"spot_price,omitempty"` + + // The subnet in which to launch the requested instance. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The capacity added to the fleet by a fulfilled request. + WeightedCapacity *float64 `json:"weightedCapacity,omitempty" tf:"weighted_capacity,omitempty"` +} + +type OverridesParameters struct { + + // The availability zone in which to place the request. + // +kubebuilder:validation:Optional + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // The instance requirements. See below. + // +kubebuilder:validation:Optional + InstanceRequirements *OverridesInstanceRequirementsParameters `json:"instanceRequirements,omitempty" tf:"instance_requirements,omitempty"` + + // The type of instance to request. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The priority for the launch template override. The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The maximum bid price per unit hour. + // +kubebuilder:validation:Optional + SpotPrice *string `json:"spotPrice,omitempty" tf:"spot_price,omitempty"` + + // The subnet in which to launch the requested instance. + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The capacity added to the fleet by a fulfilled request. + // +kubebuilder:validation:Optional + WeightedCapacity *float64 `json:"weightedCapacity,omitempty" tf:"weighted_capacity,omitempty"` +} + +type SpotFleetRequestInitParameters struct { + + // Indicates how to allocate the target capacity across + // the Spot pools specified by the Spot fleet request. Valid values: lowestPrice, diversified, capacityOptimized, capacityOptimizedPrioritized, and priceCapacityOptimized. The default is + // lowestPrice. + AllocationStrategy *string `json:"allocationStrategy,omitempty" tf:"allocation_strategy,omitempty"` + + // Reserved. + Context *string `json:"context,omitempty" tf:"context,omitempty"` + + // Indicates whether running Spot + // instances should be terminated if the target capacity of the Spot fleet + // request is decreased below the current size of the Spot fleet. + ExcessCapacityTerminationPolicy *string `json:"excessCapacityTerminationPolicy,omitempty" tf:"excess_capacity_termination_policy,omitempty"` + + // The type of fleet request. Indicates whether the Spot Fleet only requests the target + // capacity or also attempts to maintain it. Default is maintain. + FleetType *string `json:"fleetType,omitempty" tf:"fleet_type,omitempty"` + + // Grants the Spot fleet permission to terminate + // Spot instances on your behalf when you cancel its Spot fleet request using + // CancelSpotFleetRequests or when the Spot fleet request expires, if you set + // terminateInstancesWithExpiration. + IAMFleetRole *string `json:"iamFleetRole,omitempty" tf:"iam_fleet_role,omitempty"` + + // Indicates whether a Spot + // instance stops or terminates when it is interrupted. Default is + // terminate. + InstanceInterruptionBehaviour *string `json:"instanceInterruptionBehaviour,omitempty" tf:"instance_interruption_behaviour,omitempty"` + + // The number of Spot pools across which to allocate your target Spot capacity. + // Valid only when allocation_strategy is set to lowestPrice. Spot Fleet selects + // the cheapest Spot pools and evenly allocates your target Spot capacity across + // the number of Spot pools that you specify. + InstancePoolsToUseCount *float64 `json:"instancePoolsToUseCount,omitempty" tf:"instance_pools_to_use_count,omitempty"` + + // Used to define the launch configuration of the + // spot-fleet request. Can be specified multiple times to define different bids + // across different markets and instance types. Conflicts with launch_template_config. At least one of launch_specification or launch_template_config is required. + LaunchSpecification []LaunchSpecificationInitParameters `json:"launchSpecification,omitempty" tf:"launch_specification,omitempty"` + + // Launch template configuration block. See Launch Template Configs below for more details. Conflicts with launch_specification. At least one of launch_specification or launch_template_config is required. + LaunchTemplateConfig []LaunchTemplateConfigInitParameters `json:"launchTemplateConfig,omitempty" tf:"launch_template_config,omitempty"` + + // A list of elastic load balancer names to add to the Spot fleet. + // +listType=set + LoadBalancers []*string `json:"loadBalancers,omitempty" tf:"load_balancers,omitempty"` + + // The order of the launch template overrides to use in fulfilling On-Demand capacity. the possible values are: lowestPrice and prioritized. the default is lowestPrice. + OnDemandAllocationStrategy *string `json:"onDemandAllocationStrategy,omitempty" tf:"on_demand_allocation_strategy,omitempty"` + + // The maximum amount per hour for On-Demand Instances that you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. + OnDemandMaxTotalPrice *string `json:"onDemandMaxTotalPrice,omitempty" tf:"on_demand_max_total_price,omitempty"` + + // The number of On-Demand units to request. If the request type is maintain, you can specify a target capacity of 0 and add capacity later. + OnDemandTargetCapacity *float64 `json:"onDemandTargetCapacity,omitempty" tf:"on_demand_target_capacity,omitempty"` + + // Indicates whether Spot fleet should replace unhealthy instances. Default false. + ReplaceUnhealthyInstances *bool `json:"replaceUnhealthyInstances,omitempty" tf:"replace_unhealthy_instances,omitempty"` + + // Nested argument containing maintenance strategies for managing your Spot Instances that are at an elevated risk of being interrupted. Defined below. + SpotMaintenanceStrategies *SpotMaintenanceStrategiesInitParameters `json:"spotMaintenanceStrategies,omitempty" tf:"spot_maintenance_strategies,omitempty"` + + // The maximum bid price per unit hour. + SpotPrice *string `json:"spotPrice,omitempty" tf:"spot_price,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The number of units to request. You can choose to set the + // target capacity in terms of instances or a performance characteristic that is + // important to your application workload, such as vCPUs, memory, or I/O. + TargetCapacity *float64 `json:"targetCapacity,omitempty" tf:"target_capacity,omitempty"` + + // The unit for the target capacity. This can only be done with instance_requirements defined + TargetCapacityUnitType *string `json:"targetCapacityUnitType,omitempty" tf:"target_capacity_unit_type,omitempty"` + + // A list of aws_alb_target_group ARNs, for use with Application Load Balancing. + // +listType=set + TargetGroupArns []*string `json:"targetGroupArns,omitempty" tf:"target_group_arns,omitempty"` + + // Indicates whether running Spot + // instances should be terminated when the resource is deleted (and the Spot fleet request cancelled). + // If no value is specified, the value of the terminate_instances_with_expiration argument is used. + TerminateInstancesOnDelete *string `json:"terminateInstancesOnDelete,omitempty" tf:"terminate_instances_on_delete,omitempty"` + + // Indicates whether running Spot + // instances should be terminated when the Spot fleet request expires. + TerminateInstancesWithExpiration *bool `json:"terminateInstancesWithExpiration,omitempty" tf:"terminate_instances_with_expiration,omitempty"` + + // The start date and time of the request, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). The default is to start fulfilling the request immediately. + ValidFrom *string `json:"validFrom,omitempty" tf:"valid_from,omitempty"` + + // The end date and time of the request, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no new Spot instance requests are placed or enabled to fulfill the request. + ValidUntil *string `json:"validUntil,omitempty" tf:"valid_until,omitempty"` + + WaitForFulfillment *bool `json:"waitForFulfillment,omitempty" tf:"wait_for_fulfillment,omitempty"` +} + +type SpotFleetRequestObservation struct { + + // Indicates how to allocate the target capacity across + // the Spot pools specified by the Spot fleet request. Valid values: lowestPrice, diversified, capacityOptimized, capacityOptimizedPrioritized, and priceCapacityOptimized. The default is + // lowestPrice. + AllocationStrategy *string `json:"allocationStrategy,omitempty" tf:"allocation_strategy,omitempty"` + + ClientToken *string `json:"clientToken,omitempty" tf:"client_token,omitempty"` + + // Reserved. + Context *string `json:"context,omitempty" tf:"context,omitempty"` + + // Indicates whether running Spot + // instances should be terminated if the target capacity of the Spot fleet + // request is decreased below the current size of the Spot fleet. + ExcessCapacityTerminationPolicy *string `json:"excessCapacityTerminationPolicy,omitempty" tf:"excess_capacity_termination_policy,omitempty"` + + // The type of fleet request. Indicates whether the Spot Fleet only requests the target + // capacity or also attempts to maintain it. Default is maintain. + FleetType *string `json:"fleetType,omitempty" tf:"fleet_type,omitempty"` + + // Grants the Spot fleet permission to terminate + // Spot instances on your behalf when you cancel its Spot fleet request using + // CancelSpotFleetRequests or when the Spot fleet request expires, if you set + // terminateInstancesWithExpiration. + IAMFleetRole *string `json:"iamFleetRole,omitempty" tf:"iam_fleet_role,omitempty"` + + // The ID of the launch template. Conflicts with name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Indicates whether a Spot + // instance stops or terminates when it is interrupted. Default is + // terminate. + InstanceInterruptionBehaviour *string `json:"instanceInterruptionBehaviour,omitempty" tf:"instance_interruption_behaviour,omitempty"` + + // The number of Spot pools across which to allocate your target Spot capacity. + // Valid only when allocation_strategy is set to lowestPrice. Spot Fleet selects + // the cheapest Spot pools and evenly allocates your target Spot capacity across + // the number of Spot pools that you specify. + InstancePoolsToUseCount *float64 `json:"instancePoolsToUseCount,omitempty" tf:"instance_pools_to_use_count,omitempty"` + + // Used to define the launch configuration of the + // spot-fleet request. Can be specified multiple times to define different bids + // across different markets and instance types. Conflicts with launch_template_config. At least one of launch_specification or launch_template_config is required. + LaunchSpecification []LaunchSpecificationObservation `json:"launchSpecification,omitempty" tf:"launch_specification,omitempty"` + + // Launch template configuration block. See Launch Template Configs below for more details. Conflicts with launch_specification. At least one of launch_specification or launch_template_config is required. + LaunchTemplateConfig []LaunchTemplateConfigObservation `json:"launchTemplateConfig,omitempty" tf:"launch_template_config,omitempty"` + + // A list of elastic load balancer names to add to the Spot fleet. + // +listType=set + LoadBalancers []*string `json:"loadBalancers,omitempty" tf:"load_balancers,omitempty"` + + // The order of the launch template overrides to use in fulfilling On-Demand capacity. the possible values are: lowestPrice and prioritized. the default is lowestPrice. + OnDemandAllocationStrategy *string `json:"onDemandAllocationStrategy,omitempty" tf:"on_demand_allocation_strategy,omitempty"` + + // The maximum amount per hour for On-Demand Instances that you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. + OnDemandMaxTotalPrice *string `json:"onDemandMaxTotalPrice,omitempty" tf:"on_demand_max_total_price,omitempty"` + + // The number of On-Demand units to request. If the request type is maintain, you can specify a target capacity of 0 and add capacity later. + OnDemandTargetCapacity *float64 `json:"onDemandTargetCapacity,omitempty" tf:"on_demand_target_capacity,omitempty"` + + // Indicates whether Spot fleet should replace unhealthy instances. Default false. + ReplaceUnhealthyInstances *bool `json:"replaceUnhealthyInstances,omitempty" tf:"replace_unhealthy_instances,omitempty"` + + // Nested argument containing maintenance strategies for managing your Spot Instances that are at an elevated risk of being interrupted. Defined below. + SpotMaintenanceStrategies *SpotMaintenanceStrategiesObservation `json:"spotMaintenanceStrategies,omitempty" tf:"spot_maintenance_strategies,omitempty"` + + // The maximum bid price per unit hour. + SpotPrice *string `json:"spotPrice,omitempty" tf:"spot_price,omitempty"` + + // The state of the Spot fleet request. + SpotRequestState *string `json:"spotRequestState,omitempty" tf:"spot_request_state,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The number of units to request. You can choose to set the + // target capacity in terms of instances or a performance characteristic that is + // important to your application workload, such as vCPUs, memory, or I/O. + TargetCapacity *float64 `json:"targetCapacity,omitempty" tf:"target_capacity,omitempty"` + + // The unit for the target capacity. This can only be done with instance_requirements defined + TargetCapacityUnitType *string `json:"targetCapacityUnitType,omitempty" tf:"target_capacity_unit_type,omitempty"` + + // A list of aws_alb_target_group ARNs, for use with Application Load Balancing. + // +listType=set + TargetGroupArns []*string `json:"targetGroupArns,omitempty" tf:"target_group_arns,omitempty"` + + // Indicates whether running Spot + // instances should be terminated when the resource is deleted (and the Spot fleet request cancelled). + // If no value is specified, the value of the terminate_instances_with_expiration argument is used. + TerminateInstancesOnDelete *string `json:"terminateInstancesOnDelete,omitempty" tf:"terminate_instances_on_delete,omitempty"` + + // Indicates whether running Spot + // instances should be terminated when the Spot fleet request expires. + TerminateInstancesWithExpiration *bool `json:"terminateInstancesWithExpiration,omitempty" tf:"terminate_instances_with_expiration,omitempty"` + + // The start date and time of the request, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). The default is to start fulfilling the request immediately. + ValidFrom *string `json:"validFrom,omitempty" tf:"valid_from,omitempty"` + + // The end date and time of the request, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no new Spot instance requests are placed or enabled to fulfill the request. + ValidUntil *string `json:"validUntil,omitempty" tf:"valid_until,omitempty"` + + WaitForFulfillment *bool `json:"waitForFulfillment,omitempty" tf:"wait_for_fulfillment,omitempty"` +} + +type SpotFleetRequestParameters struct { + + // Indicates how to allocate the target capacity across + // the Spot pools specified by the Spot fleet request. Valid values: lowestPrice, diversified, capacityOptimized, capacityOptimizedPrioritized, and priceCapacityOptimized. The default is + // lowestPrice. + // +kubebuilder:validation:Optional + AllocationStrategy *string `json:"allocationStrategy,omitempty" tf:"allocation_strategy,omitempty"` + + // Reserved. + // +kubebuilder:validation:Optional + Context *string `json:"context,omitempty" tf:"context,omitempty"` + + // Indicates whether running Spot + // instances should be terminated if the target capacity of the Spot fleet + // request is decreased below the current size of the Spot fleet. + // +kubebuilder:validation:Optional + ExcessCapacityTerminationPolicy *string `json:"excessCapacityTerminationPolicy,omitempty" tf:"excess_capacity_termination_policy,omitempty"` + + // The type of fleet request. Indicates whether the Spot Fleet only requests the target + // capacity or also attempts to maintain it. Default is maintain. + // +kubebuilder:validation:Optional + FleetType *string `json:"fleetType,omitempty" tf:"fleet_type,omitempty"` + + // Grants the Spot fleet permission to terminate + // Spot instances on your behalf when you cancel its Spot fleet request using + // CancelSpotFleetRequests or when the Spot fleet request expires, if you set + // terminateInstancesWithExpiration. + // +kubebuilder:validation:Optional + IAMFleetRole *string `json:"iamFleetRole,omitempty" tf:"iam_fleet_role,omitempty"` + + // Indicates whether a Spot + // instance stops or terminates when it is interrupted. Default is + // terminate. + // +kubebuilder:validation:Optional + InstanceInterruptionBehaviour *string `json:"instanceInterruptionBehaviour,omitempty" tf:"instance_interruption_behaviour,omitempty"` + + // The number of Spot pools across which to allocate your target Spot capacity. + // Valid only when allocation_strategy is set to lowestPrice. Spot Fleet selects + // the cheapest Spot pools and evenly allocates your target Spot capacity across + // the number of Spot pools that you specify. + // +kubebuilder:validation:Optional + InstancePoolsToUseCount *float64 `json:"instancePoolsToUseCount,omitempty" tf:"instance_pools_to_use_count,omitempty"` + + // Used to define the launch configuration of the + // spot-fleet request. Can be specified multiple times to define different bids + // across different markets and instance types. Conflicts with launch_template_config. At least one of launch_specification or launch_template_config is required. + // +kubebuilder:validation:Optional + LaunchSpecification []LaunchSpecificationParameters `json:"launchSpecification,omitempty" tf:"launch_specification,omitempty"` + + // Launch template configuration block. See Launch Template Configs below for more details. Conflicts with launch_specification. At least one of launch_specification or launch_template_config is required. + // +kubebuilder:validation:Optional + LaunchTemplateConfig []LaunchTemplateConfigParameters `json:"launchTemplateConfig,omitempty" tf:"launch_template_config,omitempty"` + + // A list of elastic load balancer names to add to the Spot fleet. + // +kubebuilder:validation:Optional + // +listType=set + LoadBalancers []*string `json:"loadBalancers,omitempty" tf:"load_balancers,omitempty"` + + // The order of the launch template overrides to use in fulfilling On-Demand capacity. the possible values are: lowestPrice and prioritized. the default is lowestPrice. + // +kubebuilder:validation:Optional + OnDemandAllocationStrategy *string `json:"onDemandAllocationStrategy,omitempty" tf:"on_demand_allocation_strategy,omitempty"` + + // The maximum amount per hour for On-Demand Instances that you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. + // +kubebuilder:validation:Optional + OnDemandMaxTotalPrice *string `json:"onDemandMaxTotalPrice,omitempty" tf:"on_demand_max_total_price,omitempty"` + + // The number of On-Demand units to request. If the request type is maintain, you can specify a target capacity of 0 and add capacity later. + // +kubebuilder:validation:Optional + OnDemandTargetCapacity *float64 `json:"onDemandTargetCapacity,omitempty" tf:"on_demand_target_capacity,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Indicates whether Spot fleet should replace unhealthy instances. Default false. + // +kubebuilder:validation:Optional + ReplaceUnhealthyInstances *bool `json:"replaceUnhealthyInstances,omitempty" tf:"replace_unhealthy_instances,omitempty"` + + // Nested argument containing maintenance strategies for managing your Spot Instances that are at an elevated risk of being interrupted. Defined below. + // +kubebuilder:validation:Optional + SpotMaintenanceStrategies *SpotMaintenanceStrategiesParameters `json:"spotMaintenanceStrategies,omitempty" tf:"spot_maintenance_strategies,omitempty"` + + // The maximum bid price per unit hour. + // +kubebuilder:validation:Optional + SpotPrice *string `json:"spotPrice,omitempty" tf:"spot_price,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The number of units to request. You can choose to set the + // target capacity in terms of instances or a performance characteristic that is + // important to your application workload, such as vCPUs, memory, or I/O. + // +kubebuilder:validation:Optional + TargetCapacity *float64 `json:"targetCapacity,omitempty" tf:"target_capacity,omitempty"` + + // The unit for the target capacity. This can only be done with instance_requirements defined + // +kubebuilder:validation:Optional + TargetCapacityUnitType *string `json:"targetCapacityUnitType,omitempty" tf:"target_capacity_unit_type,omitempty"` + + // A list of aws_alb_target_group ARNs, for use with Application Load Balancing. + // +kubebuilder:validation:Optional + // +listType=set + TargetGroupArns []*string `json:"targetGroupArns,omitempty" tf:"target_group_arns,omitempty"` + + // Indicates whether running Spot + // instances should be terminated when the resource is deleted (and the Spot fleet request cancelled). + // If no value is specified, the value of the terminate_instances_with_expiration argument is used. + // +kubebuilder:validation:Optional + TerminateInstancesOnDelete *string `json:"terminateInstancesOnDelete,omitempty" tf:"terminate_instances_on_delete,omitempty"` + + // Indicates whether running Spot + // instances should be terminated when the Spot fleet request expires. + // +kubebuilder:validation:Optional + TerminateInstancesWithExpiration *bool `json:"terminateInstancesWithExpiration,omitempty" tf:"terminate_instances_with_expiration,omitempty"` + + // The start date and time of the request, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). The default is to start fulfilling the request immediately. + // +kubebuilder:validation:Optional + ValidFrom *string `json:"validFrom,omitempty" tf:"valid_from,omitempty"` + + // The end date and time of the request, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no new Spot instance requests are placed or enabled to fulfill the request. + // +kubebuilder:validation:Optional + ValidUntil *string `json:"validUntil,omitempty" tf:"valid_until,omitempty"` + + // +kubebuilder:validation:Optional + WaitForFulfillment *bool `json:"waitForFulfillment,omitempty" tf:"wait_for_fulfillment,omitempty"` +} + +type SpotMaintenanceStrategiesInitParameters struct { + + // Nested argument containing the capacity rebalance for your fleet request. Defined below. + CapacityRebalance *CapacityRebalanceInitParameters `json:"capacityRebalance,omitempty" tf:"capacity_rebalance,omitempty"` +} + +type SpotMaintenanceStrategiesObservation struct { + + // Nested argument containing the capacity rebalance for your fleet request. Defined below. + CapacityRebalance *CapacityRebalanceObservation `json:"capacityRebalance,omitempty" tf:"capacity_rebalance,omitempty"` +} + +type SpotMaintenanceStrategiesParameters struct { + + // Nested argument containing the capacity rebalance for your fleet request. Defined below. + // +kubebuilder:validation:Optional + CapacityRebalance *CapacityRebalanceParameters `json:"capacityRebalance,omitempty" tf:"capacity_rebalance,omitempty"` +} + +// SpotFleetRequestSpec defines the desired state of SpotFleetRequest +type SpotFleetRequestSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SpotFleetRequestParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SpotFleetRequestInitParameters `json:"initProvider,omitempty"` +} + +// SpotFleetRequestStatus defines the observed state of SpotFleetRequest. +type SpotFleetRequestStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SpotFleetRequestObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SpotFleetRequest is the Schema for the SpotFleetRequests API. Provides a Spot Fleet Request resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type SpotFleetRequest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.iamFleetRole) || (has(self.initProvider) && has(self.initProvider.iamFleetRole))",message="spec.forProvider.iamFleetRole is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.targetCapacity) || (has(self.initProvider) && has(self.initProvider.targetCapacity))",message="spec.forProvider.targetCapacity is a required parameter" + Spec SpotFleetRequestSpec `json:"spec"` + Status SpotFleetRequestStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SpotFleetRequestList contains a list of SpotFleetRequests +type SpotFleetRequestList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SpotFleetRequest `json:"items"` +} + +// Repository type metadata. +var ( + SpotFleetRequest_Kind = "SpotFleetRequest" + SpotFleetRequest_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SpotFleetRequest_Kind}.String() + SpotFleetRequest_KindAPIVersion = SpotFleetRequest_Kind + "." + CRDGroupVersion.String() + SpotFleetRequest_GroupVersionKind = CRDGroupVersion.WithKind(SpotFleetRequest_Kind) +) + +func init() { + SchemeBuilder.Register(&SpotFleetRequest{}, &SpotFleetRequestList{}) +} diff --git a/apis/ec2/v1beta2/zz_spotinstancerequest_terraformed.go b/apis/ec2/v1beta2/zz_spotinstancerequest_terraformed.go new file mode 100755 index 0000000000..a42eb8566f --- /dev/null +++ b/apis/ec2/v1beta2/zz_spotinstancerequest_terraformed.go @@ -0,0 +1,134 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SpotInstanceRequest +func (mg *SpotInstanceRequest) GetTerraformResourceType() string { + return "aws_spot_instance_request" +} + +// GetConnectionDetailsMapping for this SpotInstanceRequest +func (tr *SpotInstanceRequest) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SpotInstanceRequest +func (tr *SpotInstanceRequest) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SpotInstanceRequest +func (tr *SpotInstanceRequest) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SpotInstanceRequest +func (tr *SpotInstanceRequest) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SpotInstanceRequest +func (tr *SpotInstanceRequest) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SpotInstanceRequest +func (tr *SpotInstanceRequest) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SpotInstanceRequest +func (tr *SpotInstanceRequest) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SpotInstanceRequest +func (tr *SpotInstanceRequest) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SpotInstanceRequest using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SpotInstanceRequest) LateInitialize(attrs []byte) (bool, error) { + params := &SpotInstanceRequestParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("InstanceInterruptionBehavior")) + opts = append(opts, resource.WithNameFilter("SourceDestCheck")) + opts = append(opts, resource.WithNameFilter("SpotType")) + opts = append(opts, resource.WithNameFilter("ValidFrom")) + opts = append(opts, resource.WithNameFilter("ValidUntil")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SpotInstanceRequest) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ec2/v1beta2/zz_spotinstancerequest_types.go b/apis/ec2/v1beta2/zz_spotinstancerequest_types.go new file mode 100755 index 0000000000..3dde3e4bce --- /dev/null +++ b/apis/ec2/v1beta2/zz_spotinstancerequest_types.go @@ -0,0 +1,1088 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SpotInstanceRequestCPUOptionsInitParameters struct { + AmdSevSnp *string `json:"amdSevSnp,omitempty" tf:"amd_sev_snp,omitempty"` + + CoreCount *float64 `json:"coreCount,omitempty" tf:"core_count,omitempty"` + + ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` +} + +type SpotInstanceRequestCPUOptionsObservation struct { + AmdSevSnp *string `json:"amdSevSnp,omitempty" tf:"amd_sev_snp,omitempty"` + + CoreCount *float64 `json:"coreCount,omitempty" tf:"core_count,omitempty"` + + ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` +} + +type SpotInstanceRequestCPUOptionsParameters struct { + + // +kubebuilder:validation:Optional + AmdSevSnp *string `json:"amdSevSnp,omitempty" tf:"amd_sev_snp,omitempty"` + + // +kubebuilder:validation:Optional + CoreCount *float64 `json:"coreCount,omitempty" tf:"core_count,omitempty"` + + // +kubebuilder:validation:Optional + ThreadsPerCore *float64 `json:"threadsPerCore,omitempty" tf:"threads_per_core,omitempty"` +} + +type SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetInitParameters struct { + + // The Spot Instance Request ID. + CapacityReservationID *string `json:"capacityReservationId,omitempty" tf:"capacity_reservation_id,omitempty"` + + CapacityReservationResourceGroupArn *string `json:"capacityReservationResourceGroupArn,omitempty" tf:"capacity_reservation_resource_group_arn,omitempty"` +} + +type SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetObservation struct { + + // The Spot Instance Request ID. + CapacityReservationID *string `json:"capacityReservationId,omitempty" tf:"capacity_reservation_id,omitempty"` + + CapacityReservationResourceGroupArn *string `json:"capacityReservationResourceGroupArn,omitempty" tf:"capacity_reservation_resource_group_arn,omitempty"` +} + +type SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetParameters struct { + + // The Spot Instance Request ID. + // +kubebuilder:validation:Optional + CapacityReservationID *string `json:"capacityReservationId,omitempty" tf:"capacity_reservation_id,omitempty"` + + // +kubebuilder:validation:Optional + CapacityReservationResourceGroupArn *string `json:"capacityReservationResourceGroupArn,omitempty" tf:"capacity_reservation_resource_group_arn,omitempty"` +} + +type SpotInstanceRequestCapacityReservationSpecificationInitParameters struct { + CapacityReservationPreference *string `json:"capacityReservationPreference,omitempty" tf:"capacity_reservation_preference,omitempty"` + + CapacityReservationTarget *SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetInitParameters `json:"capacityReservationTarget,omitempty" tf:"capacity_reservation_target,omitempty"` +} + +type SpotInstanceRequestCapacityReservationSpecificationObservation struct { + CapacityReservationPreference *string `json:"capacityReservationPreference,omitempty" tf:"capacity_reservation_preference,omitempty"` + + CapacityReservationTarget *SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetObservation `json:"capacityReservationTarget,omitempty" tf:"capacity_reservation_target,omitempty"` +} + +type SpotInstanceRequestCapacityReservationSpecificationParameters struct { + + // +kubebuilder:validation:Optional + CapacityReservationPreference *string `json:"capacityReservationPreference,omitempty" tf:"capacity_reservation_preference,omitempty"` + + // +kubebuilder:validation:Optional + CapacityReservationTarget *SpotInstanceRequestCapacityReservationSpecificationCapacityReservationTargetParameters `json:"capacityReservationTarget,omitempty" tf:"capacity_reservation_target,omitempty"` +} + +type SpotInstanceRequestCreditSpecificationInitParameters struct { + CPUCredits *string `json:"cpuCredits,omitempty" tf:"cpu_credits,omitempty"` +} + +type SpotInstanceRequestCreditSpecificationObservation struct { + CPUCredits *string `json:"cpuCredits,omitempty" tf:"cpu_credits,omitempty"` +} + +type SpotInstanceRequestCreditSpecificationParameters struct { + + // +kubebuilder:validation:Optional + CPUCredits *string `json:"cpuCredits,omitempty" tf:"cpu_credits,omitempty"` +} + +type SpotInstanceRequestEBSBlockDeviceInitParameters struct { + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The Spot Instance Request ID. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The Spot Instance Request ID. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type SpotInstanceRequestEBSBlockDeviceObservation struct { + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The Spot Instance Request ID. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The Spot Instance Request ID. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // The Spot Instance Request ID. + VolumeID *string `json:"volumeId,omitempty" tf:"volume_id,omitempty"` + + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type SpotInstanceRequestEBSBlockDeviceParameters struct { + + // +kubebuilder:validation:Optional + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName" tf:"device_name,omitempty"` + + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The Spot Instance Request ID. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The Spot Instance Request ID. + // +kubebuilder:validation:Optional + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +kubebuilder:validation:Optional + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // +kubebuilder:validation:Optional + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // +kubebuilder:validation:Optional + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type SpotInstanceRequestEnclaveOptionsInitParameters struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type SpotInstanceRequestEnclaveOptionsObservation struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type SpotInstanceRequestEnclaveOptionsParameters struct { + + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type SpotInstanceRequestEphemeralBlockDeviceInitParameters struct { + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type SpotInstanceRequestEphemeralBlockDeviceObservation struct { + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type SpotInstanceRequestEphemeralBlockDeviceParameters struct { + + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName" tf:"device_name,omitempty"` + + // +kubebuilder:validation:Optional + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // +kubebuilder:validation:Optional + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type SpotInstanceRequestInitParameters struct { + AMI *string `json:"ami,omitempty" tf:"ami,omitempty"` + + AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty" tf:"associate_public_ip_address,omitempty"` + + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // The required duration for the Spot instances, in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). + // The duration period starts as soon as your Spot instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates. + // Note that you can't specify an Availability Zone group or a launch group if you specify a duration. + BlockDurationMinutes *float64 `json:"blockDurationMinutes,omitempty" tf:"block_duration_minutes,omitempty"` + + CPUCoreCount *float64 `json:"cpuCoreCount,omitempty" tf:"cpu_core_count,omitempty"` + + CPUOptions *SpotInstanceRequestCPUOptionsInitParameters `json:"cpuOptions,omitempty" tf:"cpu_options,omitempty"` + + CPUThreadsPerCore *float64 `json:"cpuThreadsPerCore,omitempty" tf:"cpu_threads_per_core,omitempty"` + + CapacityReservationSpecification *SpotInstanceRequestCapacityReservationSpecificationInitParameters `json:"capacityReservationSpecification,omitempty" tf:"capacity_reservation_specification,omitempty"` + + CreditSpecification *SpotInstanceRequestCreditSpecificationInitParameters `json:"creditSpecification,omitempty" tf:"credit_specification,omitempty"` + + DisableAPIStop *bool `json:"disableApiStop,omitempty" tf:"disable_api_stop,omitempty"` + + DisableAPITermination *bool `json:"disableApiTermination,omitempty" tf:"disable_api_termination,omitempty"` + + EBSBlockDevice []SpotInstanceRequestEBSBlockDeviceInitParameters `json:"ebsBlockDevice,omitempty" tf:"ebs_block_device,omitempty"` + + EBSOptimized *bool `json:"ebsOptimized,omitempty" tf:"ebs_optimized,omitempty"` + + EnclaveOptions *SpotInstanceRequestEnclaveOptionsInitParameters `json:"enclaveOptions,omitempty" tf:"enclave_options,omitempty"` + + EphemeralBlockDevice []SpotInstanceRequestEphemeralBlockDeviceInitParameters `json:"ephemeralBlockDevice,omitempty" tf:"ephemeral_block_device,omitempty"` + + GetPasswordData *bool `json:"getPasswordData,omitempty" tf:"get_password_data,omitempty"` + + Hibernation *bool `json:"hibernation,omitempty" tf:"hibernation,omitempty"` + + // The Spot Instance Request ID. + HostID *string `json:"hostId,omitempty" tf:"host_id,omitempty"` + + HostResourceGroupArn *string `json:"hostResourceGroupArn,omitempty" tf:"host_resource_group_arn,omitempty"` + + IAMInstanceProfile *string `json:"iamInstanceProfile,omitempty" tf:"iam_instance_profile,omitempty"` + + IPv6AddressCount *float64 `json:"ipv6AddressCount,omitempty" tf:"ipv6_address_count,omitempty"` + + IPv6Addresses []*string `json:"ipv6Addresses,omitempty" tf:"ipv6_addresses,omitempty"` + + InstanceInitiatedShutdownBehavior *string `json:"instanceInitiatedShutdownBehavior,omitempty" tf:"instance_initiated_shutdown_behavior,omitempty"` + + // Indicates Spot instance behavior when it is interrupted. Valid values are terminate, stop, or hibernate. Default value is terminate. + InstanceInterruptionBehavior *string `json:"instanceInterruptionBehavior,omitempty" tf:"instance_interruption_behavior,omitempty"` + + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + // A launch group is a group of spot instances that launch together and terminate together. + // If left empty instances are launched and terminated individually. + LaunchGroup *string `json:"launchGroup,omitempty" tf:"launch_group,omitempty"` + + LaunchTemplate *SpotInstanceRequestLaunchTemplateInitParameters `json:"launchTemplate,omitempty" tf:"launch_template,omitempty"` + + MaintenanceOptions *SpotInstanceRequestMaintenanceOptionsInitParameters `json:"maintenanceOptions,omitempty" tf:"maintenance_options,omitempty"` + + MetadataOptions *SpotInstanceRequestMetadataOptionsInitParameters `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + Monitoring *bool `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + NetworkInterface []SpotInstanceRequestNetworkInterfaceInitParameters `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + PlacementGroup *string `json:"placementGroup,omitempty" tf:"placement_group,omitempty"` + + PlacementPartitionNumber *float64 `json:"placementPartitionNumber,omitempty" tf:"placement_partition_number,omitempty"` + + PrivateDNSNameOptions *SpotInstanceRequestPrivateDNSNameOptionsInitParameters `json:"privateDnsNameOptions,omitempty" tf:"private_dns_name_options,omitempty"` + + // The private IP address assigned to the instance + PrivateIP *string `json:"privateIp,omitempty" tf:"private_ip,omitempty"` + + RootBlockDevice *SpotInstanceRequestRootBlockDeviceInitParameters `json:"rootBlockDevice,omitempty" tf:"root_block_device,omitempty"` + + // +listType=set + SecondaryPrivateIps []*string `json:"secondaryPrivateIps,omitempty" tf:"secondary_private_ips,omitempty"` + + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + SourceDestCheck *bool `json:"sourceDestCheck,omitempty" tf:"source_dest_check,omitempty"` + + // The maximum price to request on the spot market. + SpotPrice *string `json:"spotPrice,omitempty" tf:"spot_price,omitempty"` + + // If set to one-time, after + // the instance is terminated, the spot request will be closed. + SpotType *string `json:"spotType,omitempty" tf:"spot_type,omitempty"` + + // The Spot Instance Request ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + Tenancy *string `json:"tenancy,omitempty" tf:"tenancy,omitempty"` + + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + UserDataBase64 *string `json:"userDataBase64,omitempty" tf:"user_data_base64,omitempty"` + + UserDataReplaceOnChange *bool `json:"userDataReplaceOnChange,omitempty" tf:"user_data_replace_on_change,omitempty"` + + // References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDRefs []v1.Reference `json:"vpcSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDSelector *v1.Selector `json:"vpcSecurityGroupIdSelector,omitempty" tf:"-"` + + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=VPCSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=VPCSecurityGroupIDSelector + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` + + // The start date and time of the request, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). The default is to start fulfilling the request immediately. + ValidFrom *string `json:"validFrom,omitempty" tf:"valid_from,omitempty"` + + // The end date and time of the request, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no new Spot instance requests are placed or enabled to fulfill the request. The default end date is 7 days from the current date. + ValidUntil *string `json:"validUntil,omitempty" tf:"valid_until,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + VolumeTags map[string]*string `json:"volumeTags,omitempty" tf:"volume_tags,omitempty"` + + WaitForFulfillment *bool `json:"waitForFulfillment,omitempty" tf:"wait_for_fulfillment,omitempty"` +} + +type SpotInstanceRequestLaunchTemplateInitParameters struct { + + // The Spot Instance Request ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type SpotInstanceRequestLaunchTemplateObservation struct { + + // The Spot Instance Request ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type SpotInstanceRequestLaunchTemplateParameters struct { + + // The Spot Instance Request ID. + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type SpotInstanceRequestMaintenanceOptionsInitParameters struct { + AutoRecovery *string `json:"autoRecovery,omitempty" tf:"auto_recovery,omitempty"` +} + +type SpotInstanceRequestMaintenanceOptionsObservation struct { + AutoRecovery *string `json:"autoRecovery,omitempty" tf:"auto_recovery,omitempty"` +} + +type SpotInstanceRequestMaintenanceOptionsParameters struct { + + // +kubebuilder:validation:Optional + AutoRecovery *string `json:"autoRecovery,omitempty" tf:"auto_recovery,omitempty"` +} + +type SpotInstanceRequestMetadataOptionsInitParameters struct { + HTTPEndpoint *string `json:"httpEndpoint,omitempty" tf:"http_endpoint,omitempty"` + + HTTPProtocolIPv6 *string `json:"httpProtocolIpv6,omitempty" tf:"http_protocol_ipv6,omitempty"` + + HTTPPutResponseHopLimit *float64 `json:"httpPutResponseHopLimit,omitempty" tf:"http_put_response_hop_limit,omitempty"` + + HTTPTokens *string `json:"httpTokens,omitempty" tf:"http_tokens,omitempty"` + + // Key-value map of resource tags. + InstanceMetadataTags *string `json:"instanceMetadataTags,omitempty" tf:"instance_metadata_tags,omitempty"` +} + +type SpotInstanceRequestMetadataOptionsObservation struct { + HTTPEndpoint *string `json:"httpEndpoint,omitempty" tf:"http_endpoint,omitempty"` + + HTTPProtocolIPv6 *string `json:"httpProtocolIpv6,omitempty" tf:"http_protocol_ipv6,omitempty"` + + HTTPPutResponseHopLimit *float64 `json:"httpPutResponseHopLimit,omitempty" tf:"http_put_response_hop_limit,omitempty"` + + HTTPTokens *string `json:"httpTokens,omitempty" tf:"http_tokens,omitempty"` + + // Key-value map of resource tags. + InstanceMetadataTags *string `json:"instanceMetadataTags,omitempty" tf:"instance_metadata_tags,omitempty"` +} + +type SpotInstanceRequestMetadataOptionsParameters struct { + + // +kubebuilder:validation:Optional + HTTPEndpoint *string `json:"httpEndpoint,omitempty" tf:"http_endpoint,omitempty"` + + // +kubebuilder:validation:Optional + HTTPProtocolIPv6 *string `json:"httpProtocolIpv6,omitempty" tf:"http_protocol_ipv6,omitempty"` + + // +kubebuilder:validation:Optional + HTTPPutResponseHopLimit *float64 `json:"httpPutResponseHopLimit,omitempty" tf:"http_put_response_hop_limit,omitempty"` + + // +kubebuilder:validation:Optional + HTTPTokens *string `json:"httpTokens,omitempty" tf:"http_tokens,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + InstanceMetadataTags *string `json:"instanceMetadataTags,omitempty" tf:"instance_metadata_tags,omitempty"` +} + +type SpotInstanceRequestNetworkInterfaceInitParameters struct { + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + DeviceIndex *float64 `json:"deviceIndex,omitempty" tf:"device_index,omitempty"` + + NetworkCardIndex *float64 `json:"networkCardIndex,omitempty" tf:"network_card_index,omitempty"` + + // The Spot Instance Request ID. + NetworkInterfaceID *string `json:"networkInterfaceId,omitempty" tf:"network_interface_id,omitempty"` +} + +type SpotInstanceRequestNetworkInterfaceObservation struct { + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + DeviceIndex *float64 `json:"deviceIndex,omitempty" tf:"device_index,omitempty"` + + NetworkCardIndex *float64 `json:"networkCardIndex,omitempty" tf:"network_card_index,omitempty"` + + // The Spot Instance Request ID. + NetworkInterfaceID *string `json:"networkInterfaceId,omitempty" tf:"network_interface_id,omitempty"` +} + +type SpotInstanceRequestNetworkInterfaceParameters struct { + + // +kubebuilder:validation:Optional + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // +kubebuilder:validation:Optional + DeviceIndex *float64 `json:"deviceIndex" tf:"device_index,omitempty"` + + // +kubebuilder:validation:Optional + NetworkCardIndex *float64 `json:"networkCardIndex,omitempty" tf:"network_card_index,omitempty"` + + // The Spot Instance Request ID. + // +kubebuilder:validation:Optional + NetworkInterfaceID *string `json:"networkInterfaceId" tf:"network_interface_id,omitempty"` +} + +type SpotInstanceRequestObservation struct { + AMI *string `json:"ami,omitempty" tf:"ami,omitempty"` + + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty" tf:"associate_public_ip_address,omitempty"` + + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // The required duration for the Spot instances, in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). + // The duration period starts as soon as your Spot instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates. + // Note that you can't specify an Availability Zone group or a launch group if you specify a duration. + BlockDurationMinutes *float64 `json:"blockDurationMinutes,omitempty" tf:"block_duration_minutes,omitempty"` + + CPUCoreCount *float64 `json:"cpuCoreCount,omitempty" tf:"cpu_core_count,omitempty"` + + CPUOptions *SpotInstanceRequestCPUOptionsObservation `json:"cpuOptions,omitempty" tf:"cpu_options,omitempty"` + + CPUThreadsPerCore *float64 `json:"cpuThreadsPerCore,omitempty" tf:"cpu_threads_per_core,omitempty"` + + CapacityReservationSpecification *SpotInstanceRequestCapacityReservationSpecificationObservation `json:"capacityReservationSpecification,omitempty" tf:"capacity_reservation_specification,omitempty"` + + CreditSpecification *SpotInstanceRequestCreditSpecificationObservation `json:"creditSpecification,omitempty" tf:"credit_specification,omitempty"` + + DisableAPIStop *bool `json:"disableApiStop,omitempty" tf:"disable_api_stop,omitempty"` + + DisableAPITermination *bool `json:"disableApiTermination,omitempty" tf:"disable_api_termination,omitempty"` + + EBSBlockDevice []SpotInstanceRequestEBSBlockDeviceObservation `json:"ebsBlockDevice,omitempty" tf:"ebs_block_device,omitempty"` + + EBSOptimized *bool `json:"ebsOptimized,omitempty" tf:"ebs_optimized,omitempty"` + + EnclaveOptions *SpotInstanceRequestEnclaveOptionsObservation `json:"enclaveOptions,omitempty" tf:"enclave_options,omitempty"` + + EphemeralBlockDevice []SpotInstanceRequestEphemeralBlockDeviceObservation `json:"ephemeralBlockDevice,omitempty" tf:"ephemeral_block_device,omitempty"` + + GetPasswordData *bool `json:"getPasswordData,omitempty" tf:"get_password_data,omitempty"` + + Hibernation *bool `json:"hibernation,omitempty" tf:"hibernation,omitempty"` + + // The Spot Instance Request ID. + HostID *string `json:"hostId,omitempty" tf:"host_id,omitempty"` + + HostResourceGroupArn *string `json:"hostResourceGroupArn,omitempty" tf:"host_resource_group_arn,omitempty"` + + IAMInstanceProfile *string `json:"iamInstanceProfile,omitempty" tf:"iam_instance_profile,omitempty"` + + // The Spot Instance Request ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + IPv6AddressCount *float64 `json:"ipv6AddressCount,omitempty" tf:"ipv6_address_count,omitempty"` + + IPv6Addresses []*string `json:"ipv6Addresses,omitempty" tf:"ipv6_addresses,omitempty"` + + InstanceInitiatedShutdownBehavior *string `json:"instanceInitiatedShutdownBehavior,omitempty" tf:"instance_initiated_shutdown_behavior,omitempty"` + + // Indicates Spot instance behavior when it is interrupted. Valid values are terminate, stop, or hibernate. Default value is terminate. + InstanceInterruptionBehavior *string `json:"instanceInterruptionBehavior,omitempty" tf:"instance_interruption_behavior,omitempty"` + + InstanceState *string `json:"instanceState,omitempty" tf:"instance_state,omitempty"` + + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + // A launch group is a group of spot instances that launch together and terminate together. + // If left empty instances are launched and terminated individually. + LaunchGroup *string `json:"launchGroup,omitempty" tf:"launch_group,omitempty"` + + LaunchTemplate *SpotInstanceRequestLaunchTemplateObservation `json:"launchTemplate,omitempty" tf:"launch_template,omitempty"` + + MaintenanceOptions *SpotInstanceRequestMaintenanceOptionsObservation `json:"maintenanceOptions,omitempty" tf:"maintenance_options,omitempty"` + + MetadataOptions *SpotInstanceRequestMetadataOptionsObservation `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + Monitoring *bool `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + NetworkInterface []SpotInstanceRequestNetworkInterfaceObservation `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + OutpostArn *string `json:"outpostArn,omitempty" tf:"outpost_arn,omitempty"` + + PasswordData *string `json:"passwordData,omitempty" tf:"password_data,omitempty"` + + PlacementGroup *string `json:"placementGroup,omitempty" tf:"placement_group,omitempty"` + + PlacementPartitionNumber *float64 `json:"placementPartitionNumber,omitempty" tf:"placement_partition_number,omitempty"` + + // The Spot Instance Request ID. + PrimaryNetworkInterfaceID *string `json:"primaryNetworkInterfaceId,omitempty" tf:"primary_network_interface_id,omitempty"` + + // The private DNS name assigned to the instance. Can only be + // used inside the Amazon EC2, and only available if you've enabled DNS hostnames + // for your VPC + PrivateDNS *string `json:"privateDns,omitempty" tf:"private_dns,omitempty"` + + PrivateDNSNameOptions *SpotInstanceRequestPrivateDNSNameOptionsObservation `json:"privateDnsNameOptions,omitempty" tf:"private_dns_name_options,omitempty"` + + // The private IP address assigned to the instance + PrivateIP *string `json:"privateIp,omitempty" tf:"private_ip,omitempty"` + + // The public DNS name assigned to the instance. For EC2-VPC, this + // is only available if you've enabled DNS hostnames for your VPC + PublicDNS *string `json:"publicDns,omitempty" tf:"public_dns,omitempty"` + + // The public IP address assigned to the instance, if applicable. + PublicIP *string `json:"publicIp,omitempty" tf:"public_ip,omitempty"` + + RootBlockDevice *SpotInstanceRequestRootBlockDeviceObservation `json:"rootBlockDevice,omitempty" tf:"root_block_device,omitempty"` + + // +listType=set + SecondaryPrivateIps []*string `json:"secondaryPrivateIps,omitempty" tf:"secondary_private_ips,omitempty"` + + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + SourceDestCheck *bool `json:"sourceDestCheck,omitempty" tf:"source_dest_check,omitempty"` + + // The current bid + // status + // of the Spot Instance Request. + SpotBidStatus *string `json:"spotBidStatus,omitempty" tf:"spot_bid_status,omitempty"` + + // The Instance ID (if any) that is currently fulfilling + // the Spot Instance request. + SpotInstanceID *string `json:"spotInstanceId,omitempty" tf:"spot_instance_id,omitempty"` + + // The maximum price to request on the spot market. + SpotPrice *string `json:"spotPrice,omitempty" tf:"spot_price,omitempty"` + + // The current request + // state + // of the Spot Instance Request. + SpotRequestState *string `json:"spotRequestState,omitempty" tf:"spot_request_state,omitempty"` + + // If set to one-time, after + // the instance is terminated, the spot request will be closed. + SpotType *string `json:"spotType,omitempty" tf:"spot_type,omitempty"` + + // The Spot Instance Request ID. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + Tenancy *string `json:"tenancy,omitempty" tf:"tenancy,omitempty"` + + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + UserDataBase64 *string `json:"userDataBase64,omitempty" tf:"user_data_base64,omitempty"` + + UserDataReplaceOnChange *bool `json:"userDataReplaceOnChange,omitempty" tf:"user_data_replace_on_change,omitempty"` + + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` + + // The start date and time of the request, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). The default is to start fulfilling the request immediately. + ValidFrom *string `json:"validFrom,omitempty" tf:"valid_from,omitempty"` + + // The end date and time of the request, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no new Spot instance requests are placed or enabled to fulfill the request. The default end date is 7 days from the current date. + ValidUntil *string `json:"validUntil,omitempty" tf:"valid_until,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + VolumeTags map[string]*string `json:"volumeTags,omitempty" tf:"volume_tags,omitempty"` + + WaitForFulfillment *bool `json:"waitForFulfillment,omitempty" tf:"wait_for_fulfillment,omitempty"` +} + +type SpotInstanceRequestParameters struct { + + // +kubebuilder:validation:Optional + AMI *string `json:"ami,omitempty" tf:"ami,omitempty"` + + // +kubebuilder:validation:Optional + AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty" tf:"associate_public_ip_address,omitempty"` + + // +kubebuilder:validation:Optional + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // The required duration for the Spot instances, in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). + // The duration period starts as soon as your Spot instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates. + // Note that you can't specify an Availability Zone group or a launch group if you specify a duration. + // +kubebuilder:validation:Optional + BlockDurationMinutes *float64 `json:"blockDurationMinutes,omitempty" tf:"block_duration_minutes,omitempty"` + + // +kubebuilder:validation:Optional + CPUCoreCount *float64 `json:"cpuCoreCount,omitempty" tf:"cpu_core_count,omitempty"` + + // +kubebuilder:validation:Optional + CPUOptions *SpotInstanceRequestCPUOptionsParameters `json:"cpuOptions,omitempty" tf:"cpu_options,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreadsPerCore *float64 `json:"cpuThreadsPerCore,omitempty" tf:"cpu_threads_per_core,omitempty"` + + // +kubebuilder:validation:Optional + CapacityReservationSpecification *SpotInstanceRequestCapacityReservationSpecificationParameters `json:"capacityReservationSpecification,omitempty" tf:"capacity_reservation_specification,omitempty"` + + // +kubebuilder:validation:Optional + CreditSpecification *SpotInstanceRequestCreditSpecificationParameters `json:"creditSpecification,omitempty" tf:"credit_specification,omitempty"` + + // +kubebuilder:validation:Optional + DisableAPIStop *bool `json:"disableApiStop,omitempty" tf:"disable_api_stop,omitempty"` + + // +kubebuilder:validation:Optional + DisableAPITermination *bool `json:"disableApiTermination,omitempty" tf:"disable_api_termination,omitempty"` + + // +kubebuilder:validation:Optional + EBSBlockDevice []SpotInstanceRequestEBSBlockDeviceParameters `json:"ebsBlockDevice,omitempty" tf:"ebs_block_device,omitempty"` + + // +kubebuilder:validation:Optional + EBSOptimized *bool `json:"ebsOptimized,omitempty" tf:"ebs_optimized,omitempty"` + + // +kubebuilder:validation:Optional + EnclaveOptions *SpotInstanceRequestEnclaveOptionsParameters `json:"enclaveOptions,omitempty" tf:"enclave_options,omitempty"` + + // +kubebuilder:validation:Optional + EphemeralBlockDevice []SpotInstanceRequestEphemeralBlockDeviceParameters `json:"ephemeralBlockDevice,omitempty" tf:"ephemeral_block_device,omitempty"` + + // +kubebuilder:validation:Optional + GetPasswordData *bool `json:"getPasswordData,omitempty" tf:"get_password_data,omitempty"` + + // +kubebuilder:validation:Optional + Hibernation *bool `json:"hibernation,omitempty" tf:"hibernation,omitempty"` + + // The Spot Instance Request ID. + // +kubebuilder:validation:Optional + HostID *string `json:"hostId,omitempty" tf:"host_id,omitempty"` + + // +kubebuilder:validation:Optional + HostResourceGroupArn *string `json:"hostResourceGroupArn,omitempty" tf:"host_resource_group_arn,omitempty"` + + // +kubebuilder:validation:Optional + IAMInstanceProfile *string `json:"iamInstanceProfile,omitempty" tf:"iam_instance_profile,omitempty"` + + // +kubebuilder:validation:Optional + IPv6AddressCount *float64 `json:"ipv6AddressCount,omitempty" tf:"ipv6_address_count,omitempty"` + + // +kubebuilder:validation:Optional + IPv6Addresses []*string `json:"ipv6Addresses,omitempty" tf:"ipv6_addresses,omitempty"` + + // +kubebuilder:validation:Optional + InstanceInitiatedShutdownBehavior *string `json:"instanceInitiatedShutdownBehavior,omitempty" tf:"instance_initiated_shutdown_behavior,omitempty"` + + // Indicates Spot instance behavior when it is interrupted. Valid values are terminate, stop, or hibernate. Default value is terminate. + // +kubebuilder:validation:Optional + InstanceInterruptionBehavior *string `json:"instanceInterruptionBehavior,omitempty" tf:"instance_interruption_behavior,omitempty"` + + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // +kubebuilder:validation:Optional + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + // A launch group is a group of spot instances that launch together and terminate together. + // If left empty instances are launched and terminated individually. + // +kubebuilder:validation:Optional + LaunchGroup *string `json:"launchGroup,omitempty" tf:"launch_group,omitempty"` + + // +kubebuilder:validation:Optional + LaunchTemplate *SpotInstanceRequestLaunchTemplateParameters `json:"launchTemplate,omitempty" tf:"launch_template,omitempty"` + + // +kubebuilder:validation:Optional + MaintenanceOptions *SpotInstanceRequestMaintenanceOptionsParameters `json:"maintenanceOptions,omitempty" tf:"maintenance_options,omitempty"` + + // +kubebuilder:validation:Optional + MetadataOptions *SpotInstanceRequestMetadataOptionsParameters `json:"metadataOptions,omitempty" tf:"metadata_options,omitempty"` + + // +kubebuilder:validation:Optional + Monitoring *bool `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + // +kubebuilder:validation:Optional + NetworkInterface []SpotInstanceRequestNetworkInterfaceParameters `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // +kubebuilder:validation:Optional + PlacementGroup *string `json:"placementGroup,omitempty" tf:"placement_group,omitempty"` + + // +kubebuilder:validation:Optional + PlacementPartitionNumber *float64 `json:"placementPartitionNumber,omitempty" tf:"placement_partition_number,omitempty"` + + // +kubebuilder:validation:Optional + PrivateDNSNameOptions *SpotInstanceRequestPrivateDNSNameOptionsParameters `json:"privateDnsNameOptions,omitempty" tf:"private_dns_name_options,omitempty"` + + // The private IP address assigned to the instance + // +kubebuilder:validation:Optional + PrivateIP *string `json:"privateIp,omitempty" tf:"private_ip,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // +kubebuilder:validation:Optional + RootBlockDevice *SpotInstanceRequestRootBlockDeviceParameters `json:"rootBlockDevice,omitempty" tf:"root_block_device,omitempty"` + + // +kubebuilder:validation:Optional + // +listType=set + SecondaryPrivateIps []*string `json:"secondaryPrivateIps,omitempty" tf:"secondary_private_ips,omitempty"` + + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // +kubebuilder:validation:Optional + SourceDestCheck *bool `json:"sourceDestCheck,omitempty" tf:"source_dest_check,omitempty"` + + // The maximum price to request on the spot market. + // +kubebuilder:validation:Optional + SpotPrice *string `json:"spotPrice,omitempty" tf:"spot_price,omitempty"` + + // If set to one-time, after + // the instance is terminated, the spot request will be closed. + // +kubebuilder:validation:Optional + SpotType *string `json:"spotType,omitempty" tf:"spot_type,omitempty"` + + // The Spot Instance Request ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +kubebuilder:validation:Optional + Tenancy *string `json:"tenancy,omitempty" tf:"tenancy,omitempty"` + + // +kubebuilder:validation:Optional + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // +kubebuilder:validation:Optional + UserDataBase64 *string `json:"userDataBase64,omitempty" tf:"user_data_base64,omitempty"` + + // +kubebuilder:validation:Optional + UserDataReplaceOnChange *bool `json:"userDataReplaceOnChange,omitempty" tf:"user_data_replace_on_change,omitempty"` + + // References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDRefs []v1.Reference `json:"vpcSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDSelector *v1.Selector `json:"vpcSecurityGroupIdSelector,omitempty" tf:"-"` + + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=VPCSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=VPCSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` + + // The start date and time of the request, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). The default is to start fulfilling the request immediately. + // +kubebuilder:validation:Optional + ValidFrom *string `json:"validFrom,omitempty" tf:"valid_from,omitempty"` + + // The end date and time of the request, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no new Spot instance requests are placed or enabled to fulfill the request. The default end date is 7 days from the current date. + // +kubebuilder:validation:Optional + ValidUntil *string `json:"validUntil,omitempty" tf:"valid_until,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + VolumeTags map[string]*string `json:"volumeTags,omitempty" tf:"volume_tags,omitempty"` + + // +kubebuilder:validation:Optional + WaitForFulfillment *bool `json:"waitForFulfillment,omitempty" tf:"wait_for_fulfillment,omitempty"` +} + +type SpotInstanceRequestPrivateDNSNameOptionsInitParameters struct { + EnableResourceNameDNSARecord *bool `json:"enableResourceNameDnsARecord,omitempty" tf:"enable_resource_name_dns_a_record,omitempty"` + + EnableResourceNameDNSAaaaRecord *bool `json:"enableResourceNameDnsAaaaRecord,omitempty" tf:"enable_resource_name_dns_aaaa_record,omitempty"` + + HostnameType *string `json:"hostnameType,omitempty" tf:"hostname_type,omitempty"` +} + +type SpotInstanceRequestPrivateDNSNameOptionsObservation struct { + EnableResourceNameDNSARecord *bool `json:"enableResourceNameDnsARecord,omitempty" tf:"enable_resource_name_dns_a_record,omitempty"` + + EnableResourceNameDNSAaaaRecord *bool `json:"enableResourceNameDnsAaaaRecord,omitempty" tf:"enable_resource_name_dns_aaaa_record,omitempty"` + + HostnameType *string `json:"hostnameType,omitempty" tf:"hostname_type,omitempty"` +} + +type SpotInstanceRequestPrivateDNSNameOptionsParameters struct { + + // +kubebuilder:validation:Optional + EnableResourceNameDNSARecord *bool `json:"enableResourceNameDnsARecord,omitempty" tf:"enable_resource_name_dns_a_record,omitempty"` + + // +kubebuilder:validation:Optional + EnableResourceNameDNSAaaaRecord *bool `json:"enableResourceNameDnsAaaaRecord,omitempty" tf:"enable_resource_name_dns_aaaa_record,omitempty"` + + // +kubebuilder:validation:Optional + HostnameType *string `json:"hostnameType,omitempty" tf:"hostname_type,omitempty"` +} + +type SpotInstanceRequestRootBlockDeviceInitParameters struct { + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The Spot Instance Request ID. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type SpotInstanceRequestRootBlockDeviceObservation struct { + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The Spot Instance Request ID. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // The Spot Instance Request ID. + VolumeID *string `json:"volumeId,omitempty" tf:"volume_id,omitempty"` + + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type SpotInstanceRequestRootBlockDeviceParameters struct { + + // +kubebuilder:validation:Optional + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The Spot Instance Request ID. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +kubebuilder:validation:Optional + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // +kubebuilder:validation:Optional + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // +kubebuilder:validation:Optional + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +// SpotInstanceRequestSpec defines the desired state of SpotInstanceRequest +type SpotInstanceRequestSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SpotInstanceRequestParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SpotInstanceRequestInitParameters `json:"initProvider,omitempty"` +} + +// SpotInstanceRequestStatus defines the observed state of SpotInstanceRequest. +type SpotInstanceRequestStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SpotInstanceRequestObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SpotInstanceRequest is the Schema for the SpotInstanceRequests API. Provides a Spot Instance Request resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type SpotInstanceRequest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec SpotInstanceRequestSpec `json:"spec"` + Status SpotInstanceRequestStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SpotInstanceRequestList contains a list of SpotInstanceRequests +type SpotInstanceRequestList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SpotInstanceRequest `json:"items"` +} + +// Repository type metadata. +var ( + SpotInstanceRequest_Kind = "SpotInstanceRequest" + SpotInstanceRequest_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SpotInstanceRequest_Kind}.String() + SpotInstanceRequest_KindAPIVersion = SpotInstanceRequest_Kind + "." + CRDGroupVersion.String() + SpotInstanceRequest_GroupVersionKind = CRDGroupVersion.WithKind(SpotInstanceRequest_Kind) +) + +func init() { + SchemeBuilder.Register(&SpotInstanceRequest{}, &SpotInstanceRequestList{}) +} diff --git a/apis/ec2/v1beta2/zz_trafficmirrorfilterrule_terraformed.go b/apis/ec2/v1beta2/zz_trafficmirrorfilterrule_terraformed.go new file mode 100755 index 0000000000..8145899d12 --- /dev/null +++ b/apis/ec2/v1beta2/zz_trafficmirrorfilterrule_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this TrafficMirrorFilterRule +func (mg *TrafficMirrorFilterRule) GetTerraformResourceType() string { + return "aws_ec2_traffic_mirror_filter_rule" +} + +// GetConnectionDetailsMapping for this TrafficMirrorFilterRule +func (tr *TrafficMirrorFilterRule) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this TrafficMirrorFilterRule +func (tr *TrafficMirrorFilterRule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this TrafficMirrorFilterRule +func (tr *TrafficMirrorFilterRule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this TrafficMirrorFilterRule +func (tr *TrafficMirrorFilterRule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this TrafficMirrorFilterRule +func (tr *TrafficMirrorFilterRule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this TrafficMirrorFilterRule +func (tr *TrafficMirrorFilterRule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this TrafficMirrorFilterRule +func (tr *TrafficMirrorFilterRule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this TrafficMirrorFilterRule +func (tr *TrafficMirrorFilterRule) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this TrafficMirrorFilterRule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *TrafficMirrorFilterRule) LateInitialize(attrs []byte) (bool, error) { + params := &TrafficMirrorFilterRuleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *TrafficMirrorFilterRule) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ec2/v1beta2/zz_trafficmirrorfilterrule_types.go b/apis/ec2/v1beta2/zz_trafficmirrorfilterrule_types.go new file mode 100755 index 0000000000..61fd134edf --- /dev/null +++ b/apis/ec2/v1beta2/zz_trafficmirrorfilterrule_types.go @@ -0,0 +1,277 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DestinationPortRangeInitParameters struct { + + // Starting port of the range + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Ending port of the range + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` +} + +type DestinationPortRangeObservation struct { + + // Starting port of the range + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Ending port of the range + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` +} + +type DestinationPortRangeParameters struct { + + // Starting port of the range + // +kubebuilder:validation:Optional + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Ending port of the range + // +kubebuilder:validation:Optional + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` +} + +type SourcePortRangeInitParameters struct { + + // Starting port of the range + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Ending port of the range + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` +} + +type SourcePortRangeObservation struct { + + // Starting port of the range + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Ending port of the range + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` +} + +type SourcePortRangeParameters struct { + + // Starting port of the range + // +kubebuilder:validation:Optional + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Ending port of the range + // +kubebuilder:validation:Optional + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` +} + +type TrafficMirrorFilterRuleInitParameters struct { + + // Description of the traffic mirror filter rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Destination CIDR block to assign to the Traffic Mirror rule. + DestinationCidrBlock *string `json:"destinationCidrBlock,omitempty" tf:"destination_cidr_block,omitempty"` + + // Destination port range. Supported only when the protocol is set to TCP(6) or UDP(17). See Traffic mirror port range documented below + DestinationPortRange *DestinationPortRangeInitParameters `json:"destinationPortRange,omitempty" tf:"destination_port_range,omitempty"` + + // Protocol number, for example 17 (UDP), to assign to the Traffic Mirror rule. For information about the protocol value, see Protocol Numbers on the Internet Assigned Numbers Authority (IANA) website. + Protocol *float64 `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Action to take (accept | reject) on the filtered traffic. Valid values are accept and reject + RuleAction *string `json:"ruleAction,omitempty" tf:"rule_action,omitempty"` + + // Number of the Traffic Mirror rule. This number must be unique for each Traffic Mirror rule in a given direction. The rules are processed in ascending order by rule number. + RuleNumber *float64 `json:"ruleNumber,omitempty" tf:"rule_number,omitempty"` + + // Source CIDR block to assign to the Traffic Mirror rule. + SourceCidrBlock *string `json:"sourceCidrBlock,omitempty" tf:"source_cidr_block,omitempty"` + + // Source port range. Supported only when the protocol is set to TCP(6) or UDP(17). See Traffic mirror port range documented below + SourcePortRange *SourcePortRangeInitParameters `json:"sourcePortRange,omitempty" tf:"source_port_range,omitempty"` + + // Direction of traffic to be captured. Valid values are ingress and egress + TrafficDirection *string `json:"trafficDirection,omitempty" tf:"traffic_direction,omitempty"` + + // ID of the traffic mirror filter to which this rule should be added + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.TrafficMirrorFilter + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + TrafficMirrorFilterID *string `json:"trafficMirrorFilterId,omitempty" tf:"traffic_mirror_filter_id,omitempty"` + + // Reference to a TrafficMirrorFilter in ec2 to populate trafficMirrorFilterId. + // +kubebuilder:validation:Optional + TrafficMirrorFilterIDRef *v1.Reference `json:"trafficMirrorFilterIdRef,omitempty" tf:"-"` + + // Selector for a TrafficMirrorFilter in ec2 to populate trafficMirrorFilterId. + // +kubebuilder:validation:Optional + TrafficMirrorFilterIDSelector *v1.Selector `json:"trafficMirrorFilterIdSelector,omitempty" tf:"-"` +} + +type TrafficMirrorFilterRuleObservation struct { + + // ARN of the traffic mirror filter rule. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Description of the traffic mirror filter rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Destination CIDR block to assign to the Traffic Mirror rule. + DestinationCidrBlock *string `json:"destinationCidrBlock,omitempty" tf:"destination_cidr_block,omitempty"` + + // Destination port range. Supported only when the protocol is set to TCP(6) or UDP(17). See Traffic mirror port range documented below + DestinationPortRange *DestinationPortRangeObservation `json:"destinationPortRange,omitempty" tf:"destination_port_range,omitempty"` + + // Name of the traffic mirror filter rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Protocol number, for example 17 (UDP), to assign to the Traffic Mirror rule. For information about the protocol value, see Protocol Numbers on the Internet Assigned Numbers Authority (IANA) website. + Protocol *float64 `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Action to take (accept | reject) on the filtered traffic. Valid values are accept and reject + RuleAction *string `json:"ruleAction,omitempty" tf:"rule_action,omitempty"` + + // Number of the Traffic Mirror rule. This number must be unique for each Traffic Mirror rule in a given direction. The rules are processed in ascending order by rule number. + RuleNumber *float64 `json:"ruleNumber,omitempty" tf:"rule_number,omitempty"` + + // Source CIDR block to assign to the Traffic Mirror rule. + SourceCidrBlock *string `json:"sourceCidrBlock,omitempty" tf:"source_cidr_block,omitempty"` + + // Source port range. Supported only when the protocol is set to TCP(6) or UDP(17). See Traffic mirror port range documented below + SourcePortRange *SourcePortRangeObservation `json:"sourcePortRange,omitempty" tf:"source_port_range,omitempty"` + + // Direction of traffic to be captured. Valid values are ingress and egress + TrafficDirection *string `json:"trafficDirection,omitempty" tf:"traffic_direction,omitempty"` + + // ID of the traffic mirror filter to which this rule should be added + TrafficMirrorFilterID *string `json:"trafficMirrorFilterId,omitempty" tf:"traffic_mirror_filter_id,omitempty"` +} + +type TrafficMirrorFilterRuleParameters struct { + + // Description of the traffic mirror filter rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Destination CIDR block to assign to the Traffic Mirror rule. + // +kubebuilder:validation:Optional + DestinationCidrBlock *string `json:"destinationCidrBlock,omitempty" tf:"destination_cidr_block,omitempty"` + + // Destination port range. Supported only when the protocol is set to TCP(6) or UDP(17). See Traffic mirror port range documented below + // +kubebuilder:validation:Optional + DestinationPortRange *DestinationPortRangeParameters `json:"destinationPortRange,omitempty" tf:"destination_port_range,omitempty"` + + // Protocol number, for example 17 (UDP), to assign to the Traffic Mirror rule. For information about the protocol value, see Protocol Numbers on the Internet Assigned Numbers Authority (IANA) website. + // +kubebuilder:validation:Optional + Protocol *float64 `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Action to take (accept | reject) on the filtered traffic. Valid values are accept and reject + // +kubebuilder:validation:Optional + RuleAction *string `json:"ruleAction,omitempty" tf:"rule_action,omitempty"` + + // Number of the Traffic Mirror rule. This number must be unique for each Traffic Mirror rule in a given direction. The rules are processed in ascending order by rule number. + // +kubebuilder:validation:Optional + RuleNumber *float64 `json:"ruleNumber,omitempty" tf:"rule_number,omitempty"` + + // Source CIDR block to assign to the Traffic Mirror rule. + // +kubebuilder:validation:Optional + SourceCidrBlock *string `json:"sourceCidrBlock,omitempty" tf:"source_cidr_block,omitempty"` + + // Source port range. Supported only when the protocol is set to TCP(6) or UDP(17). See Traffic mirror port range documented below + // +kubebuilder:validation:Optional + SourcePortRange *SourcePortRangeParameters `json:"sourcePortRange,omitempty" tf:"source_port_range,omitempty"` + + // Direction of traffic to be captured. Valid values are ingress and egress + // +kubebuilder:validation:Optional + TrafficDirection *string `json:"trafficDirection,omitempty" tf:"traffic_direction,omitempty"` + + // ID of the traffic mirror filter to which this rule should be added + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.TrafficMirrorFilter + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + TrafficMirrorFilterID *string `json:"trafficMirrorFilterId,omitempty" tf:"traffic_mirror_filter_id,omitempty"` + + // Reference to a TrafficMirrorFilter in ec2 to populate trafficMirrorFilterId. + // +kubebuilder:validation:Optional + TrafficMirrorFilterIDRef *v1.Reference `json:"trafficMirrorFilterIdRef,omitempty" tf:"-"` + + // Selector for a TrafficMirrorFilter in ec2 to populate trafficMirrorFilterId. + // +kubebuilder:validation:Optional + TrafficMirrorFilterIDSelector *v1.Selector `json:"trafficMirrorFilterIdSelector,omitempty" tf:"-"` +} + +// TrafficMirrorFilterRuleSpec defines the desired state of TrafficMirrorFilterRule +type TrafficMirrorFilterRuleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TrafficMirrorFilterRuleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TrafficMirrorFilterRuleInitParameters `json:"initProvider,omitempty"` +} + +// TrafficMirrorFilterRuleStatus defines the observed state of TrafficMirrorFilterRule. +type TrafficMirrorFilterRuleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TrafficMirrorFilterRuleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// TrafficMirrorFilterRule is the Schema for the TrafficMirrorFilterRules API. Provides an Traffic mirror filter rule +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type TrafficMirrorFilterRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.destinationCidrBlock) || (has(self.initProvider) && has(self.initProvider.destinationCidrBlock))",message="spec.forProvider.destinationCidrBlock is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.ruleAction) || (has(self.initProvider) && has(self.initProvider.ruleAction))",message="spec.forProvider.ruleAction is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.ruleNumber) || (has(self.initProvider) && has(self.initProvider.ruleNumber))",message="spec.forProvider.ruleNumber is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sourceCidrBlock) || (has(self.initProvider) && has(self.initProvider.sourceCidrBlock))",message="spec.forProvider.sourceCidrBlock is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.trafficDirection) || (has(self.initProvider) && has(self.initProvider.trafficDirection))",message="spec.forProvider.trafficDirection is a required parameter" + Spec TrafficMirrorFilterRuleSpec `json:"spec"` + Status TrafficMirrorFilterRuleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TrafficMirrorFilterRuleList contains a list of TrafficMirrorFilterRules +type TrafficMirrorFilterRuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TrafficMirrorFilterRule `json:"items"` +} + +// Repository type metadata. +var ( + TrafficMirrorFilterRule_Kind = "TrafficMirrorFilterRule" + TrafficMirrorFilterRule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: TrafficMirrorFilterRule_Kind}.String() + TrafficMirrorFilterRule_KindAPIVersion = TrafficMirrorFilterRule_Kind + "." + CRDGroupVersion.String() + TrafficMirrorFilterRule_GroupVersionKind = CRDGroupVersion.WithKind(TrafficMirrorFilterRule_Kind) +) + +func init() { + SchemeBuilder.Register(&TrafficMirrorFilterRule{}, &TrafficMirrorFilterRuleList{}) +} diff --git a/apis/ec2/v1beta2/zz_vpcendpoint_terraformed.go b/apis/ec2/v1beta2/zz_vpcendpoint_terraformed.go new file mode 100755 index 0000000000..90091e0619 --- /dev/null +++ b/apis/ec2/v1beta2/zz_vpcendpoint_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VPCEndpoint +func (mg *VPCEndpoint) GetTerraformResourceType() string { + return "aws_vpc_endpoint" +} + +// GetConnectionDetailsMapping for this VPCEndpoint +func (tr *VPCEndpoint) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VPCEndpoint +func (tr *VPCEndpoint) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VPCEndpoint +func (tr *VPCEndpoint) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VPCEndpoint +func (tr *VPCEndpoint) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VPCEndpoint +func (tr *VPCEndpoint) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VPCEndpoint +func (tr *VPCEndpoint) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VPCEndpoint +func (tr *VPCEndpoint) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VPCEndpoint +func (tr *VPCEndpoint) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VPCEndpoint using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VPCEndpoint) LateInitialize(attrs []byte) (bool, error) { + params := &VPCEndpointParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VPCEndpoint) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ec2/v1beta2/zz_vpcendpoint_types.go b/apis/ec2/v1beta2/zz_vpcendpoint_types.go new file mode 100755 index 0000000000..36231236d7 --- /dev/null +++ b/apis/ec2/v1beta2/zz_vpcendpoint_types.go @@ -0,0 +1,311 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DNSEntryInitParameters struct { +} + +type DNSEntryObservation struct { + + // The DNS name. + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // The ID of the private hosted zone. + HostedZoneID *string `json:"hostedZoneId,omitempty" tf:"hosted_zone_id,omitempty"` +} + +type DNSEntryParameters struct { +} + +type DNSOptionsInitParameters struct { + + // The DNS records created for the endpoint. Valid values are ipv4, dualstack, service-defined, and ipv6. + DNSRecordIPType *string `json:"dnsRecordIpType,omitempty" tf:"dns_record_ip_type,omitempty"` + + // Indicates whether to enable private DNS only for inbound endpoints. This option is available only for services that support both gateway and interface endpoints. It routes traffic that originates from the VPC to the gateway endpoint and traffic that originates from on-premises to the interface endpoint. Default is false. Can only be specified if private_dns_enabled is true. + PrivateDNSOnlyForInboundResolverEndpoint *bool `json:"privateDnsOnlyForInboundResolverEndpoint,omitempty" tf:"private_dns_only_for_inbound_resolver_endpoint,omitempty"` +} + +type DNSOptionsObservation struct { + + // The DNS records created for the endpoint. Valid values are ipv4, dualstack, service-defined, and ipv6. + DNSRecordIPType *string `json:"dnsRecordIpType,omitempty" tf:"dns_record_ip_type,omitempty"` + + // Indicates whether to enable private DNS only for inbound endpoints. This option is available only for services that support both gateway and interface endpoints. It routes traffic that originates from the VPC to the gateway endpoint and traffic that originates from on-premises to the interface endpoint. Default is false. Can only be specified if private_dns_enabled is true. + PrivateDNSOnlyForInboundResolverEndpoint *bool `json:"privateDnsOnlyForInboundResolverEndpoint,omitempty" tf:"private_dns_only_for_inbound_resolver_endpoint,omitempty"` +} + +type DNSOptionsParameters struct { + + // The DNS records created for the endpoint. Valid values are ipv4, dualstack, service-defined, and ipv6. + // +kubebuilder:validation:Optional + DNSRecordIPType *string `json:"dnsRecordIpType,omitempty" tf:"dns_record_ip_type,omitempty"` + + // Indicates whether to enable private DNS only for inbound endpoints. This option is available only for services that support both gateway and interface endpoints. It routes traffic that originates from the VPC to the gateway endpoint and traffic that originates from on-premises to the interface endpoint. Default is false. Can only be specified if private_dns_enabled is true. + // +kubebuilder:validation:Optional + PrivateDNSOnlyForInboundResolverEndpoint *bool `json:"privateDnsOnlyForInboundResolverEndpoint,omitempty" tf:"private_dns_only_for_inbound_resolver_endpoint,omitempty"` +} + +type VPCEndpointInitParameters struct { + + // Accept the VPC endpoint (the VPC endpoint and service need to be in the same AWS account). + AutoAccept *bool `json:"autoAccept,omitempty" tf:"auto_accept,omitempty"` + + // The DNS options for the endpoint. See dns_options below. + DNSOptions *DNSOptionsInitParameters `json:"dnsOptions,omitempty" tf:"dns_options,omitempty"` + + // The IP address type for the endpoint. Valid values are ipv4, dualstack, and ipv6. + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // A policy to attach to the endpoint that controls access to the service. This is a JSON formatted string. Defaults to full access. All Gateway and some Interface endpoints support policies - see the relevant AWS documentation for more details. + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` + + // Whether or not to associate a private hosted zone with the specified VPC. Applicable for endpoints of type Interface. Most users will want this enabled to allow services within the VPC to automatically use the endpoint. + // Defaults to false. + PrivateDNSEnabled *bool `json:"privateDnsEnabled,omitempty" tf:"private_dns_enabled,omitempty"` + + // The service name. For AWS services the service name is usually in the form com.amazonaws.. (the SageMaker Notebook service is an exception to this rule, the service name is in the form aws.sagemaker..notebook). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPCEndpointService + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("service_name",true) + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Reference to a VPCEndpointService in ec2 to populate serviceName. + // +kubebuilder:validation:Optional + ServiceNameRef *v1.Reference `json:"serviceNameRef,omitempty" tf:"-"` + + // Selector for a VPCEndpointService in ec2 to populate serviceName. + // +kubebuilder:validation:Optional + ServiceNameSelector *v1.Selector `json:"serviceNameSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The VPC endpoint type, Gateway, GatewayLoadBalancer, or Interface. Defaults to Gateway. + VPCEndpointType *string `json:"vpcEndpointType,omitempty" tf:"vpc_endpoint_type,omitempty"` + + // The ID of the VPC in which the endpoint will be used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type VPCEndpointObservation struct { + + // The Amazon Resource Name (ARN) of the VPC endpoint. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Accept the VPC endpoint (the VPC endpoint and service need to be in the same AWS account). + AutoAccept *bool `json:"autoAccept,omitempty" tf:"auto_accept,omitempty"` + + // The list of CIDR blocks for the exposed AWS service. Applicable for endpoints of type Gateway. + CidrBlocks []*string `json:"cidrBlocks,omitempty" tf:"cidr_blocks,omitempty"` + + // The DNS entries for the VPC Endpoint. Applicable for endpoints of type Interface. DNS blocks are documented below. + DNSEntry []DNSEntryObservation `json:"dnsEntry,omitempty" tf:"dns_entry,omitempty"` + + // The DNS options for the endpoint. See dns_options below. + DNSOptions *DNSOptionsObservation `json:"dnsOptions,omitempty" tf:"dns_options,omitempty"` + + // The ID of the VPC endpoint. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The IP address type for the endpoint. Valid values are ipv4, dualstack, and ipv6. + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // One or more network interfaces for the VPC Endpoint. Applicable for endpoints of type Interface. + // +listType=set + NetworkInterfaceIds []*string `json:"networkInterfaceIds,omitempty" tf:"network_interface_ids,omitempty"` + + // The ID of the AWS account that owns the VPC endpoint. + OwnerID *string `json:"ownerId,omitempty" tf:"owner_id,omitempty"` + + // A policy to attach to the endpoint that controls access to the service. This is a JSON formatted string. Defaults to full access. All Gateway and some Interface endpoints support policies - see the relevant AWS documentation for more details. + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` + + // The prefix list ID of the exposed AWS service. Applicable for endpoints of type Gateway. + PrefixListID *string `json:"prefixListId,omitempty" tf:"prefix_list_id,omitempty"` + + // Whether or not to associate a private hosted zone with the specified VPC. Applicable for endpoints of type Interface. Most users will want this enabled to allow services within the VPC to automatically use the endpoint. + // Defaults to false. + PrivateDNSEnabled *bool `json:"privateDnsEnabled,omitempty" tf:"private_dns_enabled,omitempty"` + + // Whether or not the VPC Endpoint is being managed by its service - true or false. + RequesterManaged *bool `json:"requesterManaged,omitempty" tf:"requester_managed,omitempty"` + + // One or more route table IDs. Applicable for endpoints of type Gateway. + // +listType=set + RouteTableIds []*string `json:"routeTableIds,omitempty" tf:"route_table_ids,omitempty"` + + // The ID of one or more security groups to associate with the network interface. Applicable for endpoints of type Interface. + // If no security groups are specified, the VPC's default security group is associated with the endpoint. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The service name. For AWS services the service name is usually in the form com.amazonaws.. (the SageMaker Notebook service is an exception to this rule, the service name is in the form aws.sagemaker..notebook). + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // The state of the VPC endpoint. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // The ID of one or more subnets in which to create a network interface for the endpoint. Applicable for endpoints of type GatewayLoadBalancer and Interface. Interface type endpoints cannot function without being assigned to a subnet. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The VPC endpoint type, Gateway, GatewayLoadBalancer, or Interface. Defaults to Gateway. + VPCEndpointType *string `json:"vpcEndpointType,omitempty" tf:"vpc_endpoint_type,omitempty"` + + // The ID of the VPC in which the endpoint will be used. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type VPCEndpointParameters struct { + + // Accept the VPC endpoint (the VPC endpoint and service need to be in the same AWS account). + // +kubebuilder:validation:Optional + AutoAccept *bool `json:"autoAccept,omitempty" tf:"auto_accept,omitempty"` + + // The DNS options for the endpoint. See dns_options below. + // +kubebuilder:validation:Optional + DNSOptions *DNSOptionsParameters `json:"dnsOptions,omitempty" tf:"dns_options,omitempty"` + + // The IP address type for the endpoint. Valid values are ipv4, dualstack, and ipv6. + // +kubebuilder:validation:Optional + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // A policy to attach to the endpoint that controls access to the service. This is a JSON formatted string. Defaults to full access. All Gateway and some Interface endpoints support policies - see the relevant AWS documentation for more details. + // +kubebuilder:validation:Optional + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` + + // Whether or not to associate a private hosted zone with the specified VPC. Applicable for endpoints of type Interface. Most users will want this enabled to allow services within the VPC to automatically use the endpoint. + // Defaults to false. + // +kubebuilder:validation:Optional + PrivateDNSEnabled *bool `json:"privateDnsEnabled,omitempty" tf:"private_dns_enabled,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The service name. For AWS services the service name is usually in the form com.amazonaws.. (the SageMaker Notebook service is an exception to this rule, the service name is in the form aws.sagemaker..notebook). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPCEndpointService + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("service_name",true) + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Reference to a VPCEndpointService in ec2 to populate serviceName. + // +kubebuilder:validation:Optional + ServiceNameRef *v1.Reference `json:"serviceNameRef,omitempty" tf:"-"` + + // Selector for a VPCEndpointService in ec2 to populate serviceName. + // +kubebuilder:validation:Optional + ServiceNameSelector *v1.Selector `json:"serviceNameSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The VPC endpoint type, Gateway, GatewayLoadBalancer, or Interface. Defaults to Gateway. + // +kubebuilder:validation:Optional + VPCEndpointType *string `json:"vpcEndpointType,omitempty" tf:"vpc_endpoint_type,omitempty"` + + // The ID of the VPC in which the endpoint will be used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +kubebuilder:validation:Optional + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +// VPCEndpointSpec defines the desired state of VPCEndpoint +type VPCEndpointSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VPCEndpointParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VPCEndpointInitParameters `json:"initProvider,omitempty"` +} + +// VPCEndpointStatus defines the observed state of VPCEndpoint. +type VPCEndpointStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VPCEndpointObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VPCEndpoint is the Schema for the VPCEndpoints API. Provides a VPC Endpoint resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type VPCEndpoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec VPCEndpointSpec `json:"spec"` + Status VPCEndpointStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VPCEndpointList contains a list of VPCEndpoints +type VPCEndpointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VPCEndpoint `json:"items"` +} + +// Repository type metadata. +var ( + VPCEndpoint_Kind = "VPCEndpoint" + VPCEndpoint_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VPCEndpoint_Kind}.String() + VPCEndpoint_KindAPIVersion = VPCEndpoint_Kind + "." + CRDGroupVersion.String() + VPCEndpoint_GroupVersionKind = CRDGroupVersion.WithKind(VPCEndpoint_Kind) +) + +func init() { + SchemeBuilder.Register(&VPCEndpoint{}, &VPCEndpointList{}) +} diff --git a/apis/ec2/v1beta2/zz_vpcipampoolcidr_terraformed.go b/apis/ec2/v1beta2/zz_vpcipampoolcidr_terraformed.go new file mode 100755 index 0000000000..b22baba406 --- /dev/null +++ b/apis/ec2/v1beta2/zz_vpcipampoolcidr_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VPCIpamPoolCidr +func (mg *VPCIpamPoolCidr) GetTerraformResourceType() string { + return "aws_vpc_ipam_pool_cidr" +} + +// GetConnectionDetailsMapping for this VPCIpamPoolCidr +func (tr *VPCIpamPoolCidr) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VPCIpamPoolCidr +func (tr *VPCIpamPoolCidr) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VPCIpamPoolCidr +func (tr *VPCIpamPoolCidr) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VPCIpamPoolCidr +func (tr *VPCIpamPoolCidr) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VPCIpamPoolCidr +func (tr *VPCIpamPoolCidr) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VPCIpamPoolCidr +func (tr *VPCIpamPoolCidr) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VPCIpamPoolCidr +func (tr *VPCIpamPoolCidr) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VPCIpamPoolCidr +func (tr *VPCIpamPoolCidr) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VPCIpamPoolCidr using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VPCIpamPoolCidr) LateInitialize(attrs []byte) (bool, error) { + params := &VPCIpamPoolCidrParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VPCIpamPoolCidr) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ec2/v1beta2/zz_vpcipampoolcidr_types.go b/apis/ec2/v1beta2/zz_vpcipampoolcidr_types.go new file mode 100755 index 0000000000..58c58a17dd --- /dev/null +++ b/apis/ec2/v1beta2/zz_vpcipampoolcidr_types.go @@ -0,0 +1,183 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CidrAuthorizationContextInitParameters struct { + + // The plain-text authorization message for the prefix and account. + Message *string `json:"message,omitempty" tf:"message,omitempty"` + + // The signed authorization message for the prefix and account. + Signature *string `json:"signature,omitempty" tf:"signature,omitempty"` +} + +type CidrAuthorizationContextObservation struct { + + // The plain-text authorization message for the prefix and account. + Message *string `json:"message,omitempty" tf:"message,omitempty"` + + // The signed authorization message for the prefix and account. + Signature *string `json:"signature,omitempty" tf:"signature,omitempty"` +} + +type CidrAuthorizationContextParameters struct { + + // The plain-text authorization message for the prefix and account. + // +kubebuilder:validation:Optional + Message *string `json:"message,omitempty" tf:"message,omitempty"` + + // The signed authorization message for the prefix and account. + // +kubebuilder:validation:Optional + Signature *string `json:"signature,omitempty" tf:"signature,omitempty"` +} + +type VPCIpamPoolCidrInitParameters struct { + + // The CIDR you want to assign to the pool. Conflicts with netmask_length. + Cidr *string `json:"cidr,omitempty" tf:"cidr,omitempty"` + + // A signed document that proves that you are authorized to bring the specified IP address range to Amazon using BYOIP. This is not stored in the state file. See cidr_authorization_context for more information. + CidrAuthorizationContext *CidrAuthorizationContextInitParameters `json:"cidrAuthorizationContext,omitempty" tf:"cidr_authorization_context,omitempty"` + + // The ID of the pool to which you want to assign a CIDR. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPCIpamPool + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + IpamPoolID *string `json:"ipamPoolId,omitempty" tf:"ipam_pool_id,omitempty"` + + // Reference to a VPCIpamPool in ec2 to populate ipamPoolId. + // +kubebuilder:validation:Optional + IpamPoolIDRef *v1.Reference `json:"ipamPoolIdRef,omitempty" tf:"-"` + + // Selector for a VPCIpamPool in ec2 to populate ipamPoolId. + // +kubebuilder:validation:Optional + IpamPoolIDSelector *v1.Selector `json:"ipamPoolIdSelector,omitempty" tf:"-"` + + // If provided, the cidr provisioned into the specified pool will be the next available cidr given this declared netmask length. Conflicts with cidr. + NetmaskLength *float64 `json:"netmaskLength,omitempty" tf:"netmask_length,omitempty"` +} + +type VPCIpamPoolCidrObservation struct { + + // The CIDR you want to assign to the pool. Conflicts with netmask_length. + Cidr *string `json:"cidr,omitempty" tf:"cidr,omitempty"` + + // A signed document that proves that you are authorized to bring the specified IP address range to Amazon using BYOIP. This is not stored in the state file. See cidr_authorization_context for more information. + CidrAuthorizationContext *CidrAuthorizationContextObservation `json:"cidrAuthorizationContext,omitempty" tf:"cidr_authorization_context,omitempty"` + + // The ID of the IPAM Pool Cidr concatenated with the IPAM Pool ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The unique ID generated by AWS for the pool cidr. + IpamPoolCidrID *string `json:"ipamPoolCidrId,omitempty" tf:"ipam_pool_cidr_id,omitempty"` + + // The ID of the pool to which you want to assign a CIDR. + IpamPoolID *string `json:"ipamPoolId,omitempty" tf:"ipam_pool_id,omitempty"` + + // If provided, the cidr provisioned into the specified pool will be the next available cidr given this declared netmask length. Conflicts with cidr. + NetmaskLength *float64 `json:"netmaskLength,omitempty" tf:"netmask_length,omitempty"` +} + +type VPCIpamPoolCidrParameters struct { + + // The CIDR you want to assign to the pool. Conflicts with netmask_length. + // +kubebuilder:validation:Optional + Cidr *string `json:"cidr,omitempty" tf:"cidr,omitempty"` + + // A signed document that proves that you are authorized to bring the specified IP address range to Amazon using BYOIP. This is not stored in the state file. See cidr_authorization_context for more information. + // +kubebuilder:validation:Optional + CidrAuthorizationContext *CidrAuthorizationContextParameters `json:"cidrAuthorizationContext,omitempty" tf:"cidr_authorization_context,omitempty"` + + // The ID of the pool to which you want to assign a CIDR. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPCIpamPool + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + IpamPoolID *string `json:"ipamPoolId,omitempty" tf:"ipam_pool_id,omitempty"` + + // Reference to a VPCIpamPool in ec2 to populate ipamPoolId. + // +kubebuilder:validation:Optional + IpamPoolIDRef *v1.Reference `json:"ipamPoolIdRef,omitempty" tf:"-"` + + // Selector for a VPCIpamPool in ec2 to populate ipamPoolId. + // +kubebuilder:validation:Optional + IpamPoolIDSelector *v1.Selector `json:"ipamPoolIdSelector,omitempty" tf:"-"` + + // If provided, the cidr provisioned into the specified pool will be the next available cidr given this declared netmask length. Conflicts with cidr. + // +kubebuilder:validation:Optional + NetmaskLength *float64 `json:"netmaskLength,omitempty" tf:"netmask_length,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +// VPCIpamPoolCidrSpec defines the desired state of VPCIpamPoolCidr +type VPCIpamPoolCidrSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VPCIpamPoolCidrParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VPCIpamPoolCidrInitParameters `json:"initProvider,omitempty"` +} + +// VPCIpamPoolCidrStatus defines the observed state of VPCIpamPoolCidr. +type VPCIpamPoolCidrStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VPCIpamPoolCidrObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VPCIpamPoolCidr is the Schema for the VPCIpamPoolCidrs API. Provisions a CIDR from an IPAM address pool. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type VPCIpamPoolCidr struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec VPCIpamPoolCidrSpec `json:"spec"` + Status VPCIpamPoolCidrStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VPCIpamPoolCidrList contains a list of VPCIpamPoolCidrs +type VPCIpamPoolCidrList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VPCIpamPoolCidr `json:"items"` +} + +// Repository type metadata. +var ( + VPCIpamPoolCidr_Kind = "VPCIpamPoolCidr" + VPCIpamPoolCidr_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VPCIpamPoolCidr_Kind}.String() + VPCIpamPoolCidr_KindAPIVersion = VPCIpamPoolCidr_Kind + "." + CRDGroupVersion.String() + VPCIpamPoolCidr_GroupVersionKind = CRDGroupVersion.WithKind(VPCIpamPoolCidr_Kind) +) + +func init() { + SchemeBuilder.Register(&VPCIpamPoolCidr{}, &VPCIpamPoolCidrList{}) +} diff --git a/apis/ec2/v1beta2/zz_vpcpeeringconnection_terraformed.go b/apis/ec2/v1beta2/zz_vpcpeeringconnection_terraformed.go new file mode 100755 index 0000000000..8d3a853461 --- /dev/null +++ b/apis/ec2/v1beta2/zz_vpcpeeringconnection_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VPCPeeringConnection +func (mg *VPCPeeringConnection) GetTerraformResourceType() string { + return "aws_vpc_peering_connection" +} + +// GetConnectionDetailsMapping for this VPCPeeringConnection +func (tr *VPCPeeringConnection) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VPCPeeringConnection +func (tr *VPCPeeringConnection) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VPCPeeringConnection +func (tr *VPCPeeringConnection) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VPCPeeringConnection +func (tr *VPCPeeringConnection) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VPCPeeringConnection +func (tr *VPCPeeringConnection) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VPCPeeringConnection +func (tr *VPCPeeringConnection) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VPCPeeringConnection +func (tr *VPCPeeringConnection) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VPCPeeringConnection +func (tr *VPCPeeringConnection) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VPCPeeringConnection using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VPCPeeringConnection) LateInitialize(attrs []byte) (bool, error) { + params := &VPCPeeringConnectionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VPCPeeringConnection) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ec2/v1beta2/zz_vpcpeeringconnection_types.go b/apis/ec2/v1beta2/zz_vpcpeeringconnection_types.go new file mode 100755 index 0000000000..10c7262db6 --- /dev/null +++ b/apis/ec2/v1beta2/zz_vpcpeeringconnection_types.go @@ -0,0 +1,237 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccepterInitParameters struct { +} + +type AccepterObservation struct { + + // Allow a local VPC to resolve public DNS hostnames to + // private IP addresses when queried from instances in the peer VPC. + AllowRemoteVPCDNSResolution *bool `json:"allowRemoteVpcDnsResolution,omitempty" tf:"allow_remote_vpc_dns_resolution,omitempty"` +} + +type AccepterParameters struct { +} + +type RequesterInitParameters struct { +} + +type RequesterObservation struct { + + // Allow a local VPC to resolve public DNS hostnames to + // private IP addresses when queried from instances in the peer VPC. + AllowRemoteVPCDNSResolution *bool `json:"allowRemoteVpcDnsResolution,omitempty" tf:"allow_remote_vpc_dns_resolution,omitempty"` +} + +type RequesterParameters struct { +} + +type VPCPeeringConnectionInitParameters struct { + + // Accept the peering (both VPCs need to be in the same AWS account and region). + AutoAccept *bool `json:"autoAccept,omitempty" tf:"auto_accept,omitempty"` + + // The AWS account ID of the target peer VPC. + // Defaults to the account ID the AWS provider is currently connected to, so must be managed if connecting cross-account. + PeerOwnerID *string `json:"peerOwnerId,omitempty" tf:"peer_owner_id,omitempty"` + + // The region of the accepter VPC of the VPC Peering Connection. auto_accept must be false, + // and use the aws_vpc_peering_connection_accepter to manage the accepter side. + PeerRegion *string `json:"peerRegion,omitempty" tf:"peer_region,omitempty"` + + // The ID of the target VPC with which you are creating the VPC Peering Connection. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + PeerVPCID *string `json:"peerVpcId,omitempty" tf:"peer_vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate peerVpcId. + // +kubebuilder:validation:Optional + PeerVPCIDRef *v1.Reference `json:"peerVpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate peerVpcId. + // +kubebuilder:validation:Optional + PeerVPCIDSelector *v1.Selector `json:"peerVpcIdSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ID of the requester VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type VPCPeeringConnectionObservation struct { + + // The status of the VPC Peering Connection request. + AcceptStatus *string `json:"acceptStatus,omitempty" tf:"accept_status,omitempty"` + + // An optional configuration block that allows for VPC Peering Connection options to be set for the VPC that accepts + // the peering connection (a maximum of one). + Accepter *AccepterObservation `json:"accepter,omitempty" tf:"accepter,omitempty"` + + // Accept the peering (both VPCs need to be in the same AWS account and region). + AutoAccept *bool `json:"autoAccept,omitempty" tf:"auto_accept,omitempty"` + + // The ID of the VPC Peering Connection. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The AWS account ID of the target peer VPC. + // Defaults to the account ID the AWS provider is currently connected to, so must be managed if connecting cross-account. + PeerOwnerID *string `json:"peerOwnerId,omitempty" tf:"peer_owner_id,omitempty"` + + // The region of the accepter VPC of the VPC Peering Connection. auto_accept must be false, + // and use the aws_vpc_peering_connection_accepter to manage the accepter side. + PeerRegion *string `json:"peerRegion,omitempty" tf:"peer_region,omitempty"` + + // The ID of the target VPC with which you are creating the VPC Peering Connection. + PeerVPCID *string `json:"peerVpcId,omitempty" tf:"peer_vpc_id,omitempty"` + + // A optional configuration block that allows for VPC Peering Connection options to be set for the VPC that requests + // the peering connection (a maximum of one). + Requester *RequesterObservation `json:"requester,omitempty" tf:"requester,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The ID of the requester VPC. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type VPCPeeringConnectionParameters struct { + + // Accept the peering (both VPCs need to be in the same AWS account and region). + // +kubebuilder:validation:Optional + AutoAccept *bool `json:"autoAccept,omitempty" tf:"auto_accept,omitempty"` + + // The AWS account ID of the target peer VPC. + // Defaults to the account ID the AWS provider is currently connected to, so must be managed if connecting cross-account. + // +kubebuilder:validation:Optional + PeerOwnerID *string `json:"peerOwnerId,omitempty" tf:"peer_owner_id,omitempty"` + + // The region of the accepter VPC of the VPC Peering Connection. auto_accept must be false, + // and use the aws_vpc_peering_connection_accepter to manage the accepter side. + // +kubebuilder:validation:Optional + PeerRegion *string `json:"peerRegion,omitempty" tf:"peer_region,omitempty"` + + // The ID of the target VPC with which you are creating the VPC Peering Connection. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +kubebuilder:validation:Optional + PeerVPCID *string `json:"peerVpcId,omitempty" tf:"peer_vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate peerVpcId. + // +kubebuilder:validation:Optional + PeerVPCIDRef *v1.Reference `json:"peerVpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate peerVpcId. + // +kubebuilder:validation:Optional + PeerVPCIDSelector *v1.Selector `json:"peerVpcIdSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ID of the requester VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +kubebuilder:validation:Optional + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +// VPCPeeringConnectionSpec defines the desired state of VPCPeeringConnection +type VPCPeeringConnectionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VPCPeeringConnectionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VPCPeeringConnectionInitParameters `json:"initProvider,omitempty"` +} + +// VPCPeeringConnectionStatus defines the observed state of VPCPeeringConnection. +type VPCPeeringConnectionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VPCPeeringConnectionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VPCPeeringConnection is the Schema for the VPCPeeringConnections API. Provides a resource to manage a VPC peering connection. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type VPCPeeringConnection struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec VPCPeeringConnectionSpec `json:"spec"` + Status VPCPeeringConnectionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VPCPeeringConnectionList contains a list of VPCPeeringConnections +type VPCPeeringConnectionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VPCPeeringConnection `json:"items"` +} + +// Repository type metadata. +var ( + VPCPeeringConnection_Kind = "VPCPeeringConnection" + VPCPeeringConnection_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VPCPeeringConnection_Kind}.String() + VPCPeeringConnection_KindAPIVersion = VPCPeeringConnection_Kind + "." + CRDGroupVersion.String() + VPCPeeringConnection_GroupVersionKind = CRDGroupVersion.WithKind(VPCPeeringConnection_Kind) +) + +func init() { + SchemeBuilder.Register(&VPCPeeringConnection{}, &VPCPeeringConnectionList{}) +} diff --git a/apis/ec2/v1beta2/zz_vpcpeeringconnectionaccepter_terraformed.go b/apis/ec2/v1beta2/zz_vpcpeeringconnectionaccepter_terraformed.go new file mode 100755 index 0000000000..ddc83beadf --- /dev/null +++ b/apis/ec2/v1beta2/zz_vpcpeeringconnectionaccepter_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VPCPeeringConnectionAccepter +func (mg *VPCPeeringConnectionAccepter) GetTerraformResourceType() string { + return "aws_vpc_peering_connection_accepter" +} + +// GetConnectionDetailsMapping for this VPCPeeringConnectionAccepter +func (tr *VPCPeeringConnectionAccepter) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VPCPeeringConnectionAccepter +func (tr *VPCPeeringConnectionAccepter) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VPCPeeringConnectionAccepter +func (tr *VPCPeeringConnectionAccepter) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VPCPeeringConnectionAccepter +func (tr *VPCPeeringConnectionAccepter) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VPCPeeringConnectionAccepter +func (tr *VPCPeeringConnectionAccepter) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VPCPeeringConnectionAccepter +func (tr *VPCPeeringConnectionAccepter) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VPCPeeringConnectionAccepter +func (tr *VPCPeeringConnectionAccepter) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VPCPeeringConnectionAccepter +func (tr *VPCPeeringConnectionAccepter) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VPCPeeringConnectionAccepter using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VPCPeeringConnectionAccepter) LateInitialize(attrs []byte) (bool, error) { + params := &VPCPeeringConnectionAccepterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VPCPeeringConnectionAccepter) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ec2/v1beta2/zz_vpcpeeringconnectionaccepter_types.go b/apis/ec2/v1beta2/zz_vpcpeeringconnectionaccepter_types.go new file mode 100755 index 0000000000..805cec9753 --- /dev/null +++ b/apis/ec2/v1beta2/zz_vpcpeeringconnectionaccepter_types.go @@ -0,0 +1,233 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type VPCPeeringConnectionAccepterAccepterInitParameters struct { + + // Indicates whether a local VPC can resolve public DNS hostnames to + // private IP addresses when queried from instances in a peer VPC. + AllowRemoteVPCDNSResolution *bool `json:"allowRemoteVpcDnsResolution,omitempty" tf:"allow_remote_vpc_dns_resolution,omitempty"` +} + +type VPCPeeringConnectionAccepterAccepterObservation struct { + + // Indicates whether a local VPC can resolve public DNS hostnames to + // private IP addresses when queried from instances in a peer VPC. + AllowRemoteVPCDNSResolution *bool `json:"allowRemoteVpcDnsResolution,omitempty" tf:"allow_remote_vpc_dns_resolution,omitempty"` +} + +type VPCPeeringConnectionAccepterAccepterParameters struct { + + // Indicates whether a local VPC can resolve public DNS hostnames to + // private IP addresses when queried from instances in a peer VPC. + // +kubebuilder:validation:Optional + AllowRemoteVPCDNSResolution *bool `json:"allowRemoteVpcDnsResolution,omitempty" tf:"allow_remote_vpc_dns_resolution,omitempty"` +} + +type VPCPeeringConnectionAccepterInitParameters struct { + + // A configuration block that describes [VPC Peering Connection] + // (https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the accepter VPC. + Accepter *VPCPeeringConnectionAccepterAccepterInitParameters `json:"accepter,omitempty" tf:"accepter,omitempty"` + + // Whether or not to accept the peering request. Defaults to false. + AutoAccept *bool `json:"autoAccept,omitempty" tf:"auto_accept,omitempty"` + + // A configuration block that describes [VPC Peering Connection] + // (https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the requester VPC. + Requester *VPCPeeringConnectionAccepterRequesterInitParameters `json:"requester,omitempty" tf:"requester,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The VPC Peering Connection ID to manage. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.VPCPeeringConnection + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + VPCPeeringConnectionID *string `json:"vpcPeeringConnectionId,omitempty" tf:"vpc_peering_connection_id,omitempty"` + + // Reference to a VPCPeeringConnection in ec2 to populate vpcPeeringConnectionId. + // +kubebuilder:validation:Optional + VPCPeeringConnectionIDRef *v1.Reference `json:"vpcPeeringConnectionIdRef,omitempty" tf:"-"` + + // Selector for a VPCPeeringConnection in ec2 to populate vpcPeeringConnectionId. + // +kubebuilder:validation:Optional + VPCPeeringConnectionIDSelector *v1.Selector `json:"vpcPeeringConnectionIdSelector,omitempty" tf:"-"` +} + +type VPCPeeringConnectionAccepterObservation struct { + + // The status of the VPC Peering Connection request. + AcceptStatus *string `json:"acceptStatus,omitempty" tf:"accept_status,omitempty"` + + // A configuration block that describes [VPC Peering Connection] + // (https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the accepter VPC. + Accepter *VPCPeeringConnectionAccepterAccepterObservation `json:"accepter,omitempty" tf:"accepter,omitempty"` + + // Whether or not to accept the peering request. Defaults to false. + AutoAccept *bool `json:"autoAccept,omitempty" tf:"auto_accept,omitempty"` + + // The ID of the VPC Peering Connection. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The AWS account ID of the owner of the requester VPC. + PeerOwnerID *string `json:"peerOwnerId,omitempty" tf:"peer_owner_id,omitempty"` + + // The region of the accepter VPC. + PeerRegion *string `json:"peerRegion,omitempty" tf:"peer_region,omitempty"` + + // The ID of the requester VPC. + PeerVPCID *string `json:"peerVpcId,omitempty" tf:"peer_vpc_id,omitempty"` + + // A configuration block that describes [VPC Peering Connection] + // (https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the requester VPC. + Requester *VPCPeeringConnectionAccepterRequesterObservation `json:"requester,omitempty" tf:"requester,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The ID of the accepter VPC. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // The VPC Peering Connection ID to manage. + VPCPeeringConnectionID *string `json:"vpcPeeringConnectionId,omitempty" tf:"vpc_peering_connection_id,omitempty"` +} + +type VPCPeeringConnectionAccepterParameters struct { + + // A configuration block that describes [VPC Peering Connection] + // (https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the accepter VPC. + // +kubebuilder:validation:Optional + Accepter *VPCPeeringConnectionAccepterAccepterParameters `json:"accepter,omitempty" tf:"accepter,omitempty"` + + // Whether or not to accept the peering request. Defaults to false. + // +kubebuilder:validation:Optional + AutoAccept *bool `json:"autoAccept,omitempty" tf:"auto_accept,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // A configuration block that describes [VPC Peering Connection] + // (https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the requester VPC. + // +kubebuilder:validation:Optional + Requester *VPCPeeringConnectionAccepterRequesterParameters `json:"requester,omitempty" tf:"requester,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The VPC Peering Connection ID to manage. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.VPCPeeringConnection + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VPCPeeringConnectionID *string `json:"vpcPeeringConnectionId,omitempty" tf:"vpc_peering_connection_id,omitempty"` + + // Reference to a VPCPeeringConnection in ec2 to populate vpcPeeringConnectionId. + // +kubebuilder:validation:Optional + VPCPeeringConnectionIDRef *v1.Reference `json:"vpcPeeringConnectionIdRef,omitempty" tf:"-"` + + // Selector for a VPCPeeringConnection in ec2 to populate vpcPeeringConnectionId. + // +kubebuilder:validation:Optional + VPCPeeringConnectionIDSelector *v1.Selector `json:"vpcPeeringConnectionIdSelector,omitempty" tf:"-"` +} + +type VPCPeeringConnectionAccepterRequesterInitParameters struct { + + // Indicates whether a local VPC can resolve public DNS hostnames to + // private IP addresses when queried from instances in a peer VPC. + AllowRemoteVPCDNSResolution *bool `json:"allowRemoteVpcDnsResolution,omitempty" tf:"allow_remote_vpc_dns_resolution,omitempty"` +} + +type VPCPeeringConnectionAccepterRequesterObservation struct { + + // Indicates whether a local VPC can resolve public DNS hostnames to + // private IP addresses when queried from instances in a peer VPC. + AllowRemoteVPCDNSResolution *bool `json:"allowRemoteVpcDnsResolution,omitempty" tf:"allow_remote_vpc_dns_resolution,omitempty"` +} + +type VPCPeeringConnectionAccepterRequesterParameters struct { + + // Indicates whether a local VPC can resolve public DNS hostnames to + // private IP addresses when queried from instances in a peer VPC. + // +kubebuilder:validation:Optional + AllowRemoteVPCDNSResolution *bool `json:"allowRemoteVpcDnsResolution,omitempty" tf:"allow_remote_vpc_dns_resolution,omitempty"` +} + +// VPCPeeringConnectionAccepterSpec defines the desired state of VPCPeeringConnectionAccepter +type VPCPeeringConnectionAccepterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VPCPeeringConnectionAccepterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VPCPeeringConnectionAccepterInitParameters `json:"initProvider,omitempty"` +} + +// VPCPeeringConnectionAccepterStatus defines the observed state of VPCPeeringConnectionAccepter. +type VPCPeeringConnectionAccepterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VPCPeeringConnectionAccepterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VPCPeeringConnectionAccepter is the Schema for the VPCPeeringConnectionAccepters API. Manage the accepter's side of a VPC Peering Connection. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type VPCPeeringConnectionAccepter struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec VPCPeeringConnectionAccepterSpec `json:"spec"` + Status VPCPeeringConnectionAccepterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VPCPeeringConnectionAccepterList contains a list of VPCPeeringConnectionAccepters +type VPCPeeringConnectionAccepterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VPCPeeringConnectionAccepter `json:"items"` +} + +// Repository type metadata. +var ( + VPCPeeringConnectionAccepter_Kind = "VPCPeeringConnectionAccepter" + VPCPeeringConnectionAccepter_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VPCPeeringConnectionAccepter_Kind}.String() + VPCPeeringConnectionAccepter_KindAPIVersion = VPCPeeringConnectionAccepter_Kind + "." + CRDGroupVersion.String() + VPCPeeringConnectionAccepter_GroupVersionKind = CRDGroupVersion.WithKind(VPCPeeringConnectionAccepter_Kind) +) + +func init() { + SchemeBuilder.Register(&VPCPeeringConnectionAccepter{}, &VPCPeeringConnectionAccepterList{}) +} diff --git a/apis/ec2/v1beta2/zz_vpcpeeringconnectionoptions_terraformed.go b/apis/ec2/v1beta2/zz_vpcpeeringconnectionoptions_terraformed.go new file mode 100755 index 0000000000..29f981bc44 --- /dev/null +++ b/apis/ec2/v1beta2/zz_vpcpeeringconnectionoptions_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VPCPeeringConnectionOptions +func (mg *VPCPeeringConnectionOptions) GetTerraformResourceType() string { + return "aws_vpc_peering_connection_options" +} + +// GetConnectionDetailsMapping for this VPCPeeringConnectionOptions +func (tr *VPCPeeringConnectionOptions) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VPCPeeringConnectionOptions +func (tr *VPCPeeringConnectionOptions) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VPCPeeringConnectionOptions +func (tr *VPCPeeringConnectionOptions) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VPCPeeringConnectionOptions +func (tr *VPCPeeringConnectionOptions) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VPCPeeringConnectionOptions +func (tr *VPCPeeringConnectionOptions) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VPCPeeringConnectionOptions +func (tr *VPCPeeringConnectionOptions) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VPCPeeringConnectionOptions +func (tr *VPCPeeringConnectionOptions) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VPCPeeringConnectionOptions +func (tr *VPCPeeringConnectionOptions) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VPCPeeringConnectionOptions using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VPCPeeringConnectionOptions) LateInitialize(attrs []byte) (bool, error) { + params := &VPCPeeringConnectionOptionsParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VPCPeeringConnectionOptions) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ec2/v1beta2/zz_vpcpeeringconnectionoptions_types.go b/apis/ec2/v1beta2/zz_vpcpeeringconnectionoptions_types.go new file mode 100755 index 0000000000..0b5060c8c3 --- /dev/null +++ b/apis/ec2/v1beta2/zz_vpcpeeringconnectionoptions_types.go @@ -0,0 +1,179 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type VPCPeeringConnectionOptionsAccepterInitParameters struct { + + // Allow a local VPC to resolve public DNS hostnames to private IP addresses when queried from instances in the peer VPC. + AllowRemoteVPCDNSResolution *bool `json:"allowRemoteVpcDnsResolution,omitempty" tf:"allow_remote_vpc_dns_resolution,omitempty"` +} + +type VPCPeeringConnectionOptionsAccepterObservation struct { + + // Allow a local VPC to resolve public DNS hostnames to private IP addresses when queried from instances in the peer VPC. + AllowRemoteVPCDNSResolution *bool `json:"allowRemoteVpcDnsResolution,omitempty" tf:"allow_remote_vpc_dns_resolution,omitempty"` +} + +type VPCPeeringConnectionOptionsAccepterParameters struct { + + // Allow a local VPC to resolve public DNS hostnames to private IP addresses when queried from instances in the peer VPC. + // +kubebuilder:validation:Optional + AllowRemoteVPCDNSResolution *bool `json:"allowRemoteVpcDnsResolution,omitempty" tf:"allow_remote_vpc_dns_resolution,omitempty"` +} + +type VPCPeeringConnectionOptionsInitParameters struct { + + // An optional configuration block that allows for VPC Peering Connection options to be set for the VPC that acceptsthe peering connection (a maximum of one). + Accepter *VPCPeeringConnectionOptionsAccepterInitParameters `json:"accepter,omitempty" tf:"accepter,omitempty"` + + // A optional configuration block that allows for VPC Peering Connection options to be set for the VPC that requeststhe peering connection (a maximum of one). + Requester *VPCPeeringConnectionOptionsRequesterInitParameters `json:"requester,omitempty" tf:"requester,omitempty"` + + // The ID of the requester VPC peering connection. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.VPCPeeringConnection + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + VPCPeeringConnectionID *string `json:"vpcPeeringConnectionId,omitempty" tf:"vpc_peering_connection_id,omitempty"` + + // Reference to a VPCPeeringConnection in ec2 to populate vpcPeeringConnectionId. + // +kubebuilder:validation:Optional + VPCPeeringConnectionIDRef *v1.Reference `json:"vpcPeeringConnectionIdRef,omitempty" tf:"-"` + + // Selector for a VPCPeeringConnection in ec2 to populate vpcPeeringConnectionId. + // +kubebuilder:validation:Optional + VPCPeeringConnectionIDSelector *v1.Selector `json:"vpcPeeringConnectionIdSelector,omitempty" tf:"-"` +} + +type VPCPeeringConnectionOptionsObservation struct { + + // An optional configuration block that allows for VPC Peering Connection options to be set for the VPC that acceptsthe peering connection (a maximum of one). + Accepter *VPCPeeringConnectionOptionsAccepterObservation `json:"accepter,omitempty" tf:"accepter,omitempty"` + + // The ID of the VPC Peering Connection Options. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A optional configuration block that allows for VPC Peering Connection options to be set for the VPC that requeststhe peering connection (a maximum of one). + Requester *VPCPeeringConnectionOptionsRequesterObservation `json:"requester,omitempty" tf:"requester,omitempty"` + + // The ID of the requester VPC peering connection. + VPCPeeringConnectionID *string `json:"vpcPeeringConnectionId,omitempty" tf:"vpc_peering_connection_id,omitempty"` +} + +type VPCPeeringConnectionOptionsParameters struct { + + // An optional configuration block that allows for VPC Peering Connection options to be set for the VPC that acceptsthe peering connection (a maximum of one). + // +kubebuilder:validation:Optional + Accepter *VPCPeeringConnectionOptionsAccepterParameters `json:"accepter,omitempty" tf:"accepter,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // A optional configuration block that allows for VPC Peering Connection options to be set for the VPC that requeststhe peering connection (a maximum of one). + // +kubebuilder:validation:Optional + Requester *VPCPeeringConnectionOptionsRequesterParameters `json:"requester,omitempty" tf:"requester,omitempty"` + + // The ID of the requester VPC peering connection. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.VPCPeeringConnection + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VPCPeeringConnectionID *string `json:"vpcPeeringConnectionId,omitempty" tf:"vpc_peering_connection_id,omitempty"` + + // Reference to a VPCPeeringConnection in ec2 to populate vpcPeeringConnectionId. + // +kubebuilder:validation:Optional + VPCPeeringConnectionIDRef *v1.Reference `json:"vpcPeeringConnectionIdRef,omitempty" tf:"-"` + + // Selector for a VPCPeeringConnection in ec2 to populate vpcPeeringConnectionId. + // +kubebuilder:validation:Optional + VPCPeeringConnectionIDSelector *v1.Selector `json:"vpcPeeringConnectionIdSelector,omitempty" tf:"-"` +} + +type VPCPeeringConnectionOptionsRequesterInitParameters struct { + + // Allow a local VPC to resolve public DNS hostnames to private IP addresses when queried from instances in the peer VPC. + AllowRemoteVPCDNSResolution *bool `json:"allowRemoteVpcDnsResolution,omitempty" tf:"allow_remote_vpc_dns_resolution,omitempty"` +} + +type VPCPeeringConnectionOptionsRequesterObservation struct { + + // Allow a local VPC to resolve public DNS hostnames to private IP addresses when queried from instances in the peer VPC. + AllowRemoteVPCDNSResolution *bool `json:"allowRemoteVpcDnsResolution,omitempty" tf:"allow_remote_vpc_dns_resolution,omitempty"` +} + +type VPCPeeringConnectionOptionsRequesterParameters struct { + + // Allow a local VPC to resolve public DNS hostnames to private IP addresses when queried from instances in the peer VPC. + // +kubebuilder:validation:Optional + AllowRemoteVPCDNSResolution *bool `json:"allowRemoteVpcDnsResolution,omitempty" tf:"allow_remote_vpc_dns_resolution,omitempty"` +} + +// VPCPeeringConnectionOptionsSpec defines the desired state of VPCPeeringConnectionOptions +type VPCPeeringConnectionOptionsSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VPCPeeringConnectionOptionsParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VPCPeeringConnectionOptionsInitParameters `json:"initProvider,omitempty"` +} + +// VPCPeeringConnectionOptionsStatus defines the observed state of VPCPeeringConnectionOptions. +type VPCPeeringConnectionOptionsStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VPCPeeringConnectionOptionsObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VPCPeeringConnectionOptions is the Schema for the VPCPeeringConnectionOptionss API. Provides a resource to manage VPC peering connection options. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type VPCPeeringConnectionOptions struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec VPCPeeringConnectionOptionsSpec `json:"spec"` + Status VPCPeeringConnectionOptionsStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VPCPeeringConnectionOptionsList contains a list of VPCPeeringConnectionOptionss +type VPCPeeringConnectionOptionsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VPCPeeringConnectionOptions `json:"items"` +} + +// Repository type metadata. +var ( + VPCPeeringConnectionOptions_Kind = "VPCPeeringConnectionOptions" + VPCPeeringConnectionOptions_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VPCPeeringConnectionOptions_Kind}.String() + VPCPeeringConnectionOptions_KindAPIVersion = VPCPeeringConnectionOptions_Kind + "." + CRDGroupVersion.String() + VPCPeeringConnectionOptions_GroupVersionKind = CRDGroupVersion.WithKind(VPCPeeringConnectionOptions_Kind) +) + +func init() { + SchemeBuilder.Register(&VPCPeeringConnectionOptions{}, &VPCPeeringConnectionOptionsList{}) +} diff --git a/apis/ec2/v1beta2/zz_vpnconnection_terraformed.go b/apis/ec2/v1beta2/zz_vpnconnection_terraformed.go new file mode 100755 index 0000000000..666c49ae24 --- /dev/null +++ b/apis/ec2/v1beta2/zz_vpnconnection_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VPNConnection +func (mg *VPNConnection) GetTerraformResourceType() string { + return "aws_vpn_connection" +} + +// GetConnectionDetailsMapping for this VPNConnection +func (tr *VPNConnection) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"customer_gateway_configuration": "status.atProvider.customerGatewayConfiguration", "tunnel1_preshared_key": "tunnel1PresharedKeySecretRef", "tunnel2_preshared_key": "tunnel2PresharedKeySecretRef"} +} + +// GetObservation of this VPNConnection +func (tr *VPNConnection) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VPNConnection +func (tr *VPNConnection) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VPNConnection +func (tr *VPNConnection) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VPNConnection +func (tr *VPNConnection) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VPNConnection +func (tr *VPNConnection) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VPNConnection +func (tr *VPNConnection) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VPNConnection +func (tr *VPNConnection) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VPNConnection using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VPNConnection) LateInitialize(attrs []byte) (bool, error) { + params := &VPNConnectionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VPNConnection) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ec2/v1beta2/zz_vpnconnection_types.go b/apis/ec2/v1beta2/zz_vpnconnection_types.go new file mode 100755 index 0000000000..dab5885f4e --- /dev/null +++ b/apis/ec2/v1beta2/zz_vpnconnection_types.go @@ -0,0 +1,961 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CloudwatchLogOptionsInitParameters struct { + + // Enable or disable VPN tunnel logging feature. The default is false. + LogEnabled *bool `json:"logEnabled,omitempty" tf:"log_enabled,omitempty"` + + // The Amazon Resource Name (ARN) of the CloudWatch log group to send logs to. + LogGroupArn *string `json:"logGroupArn,omitempty" tf:"log_group_arn,omitempty"` + + // Set log format. Default format is json. Possible values are: json and text. The default is json. + LogOutputFormat *string `json:"logOutputFormat,omitempty" tf:"log_output_format,omitempty"` +} + +type CloudwatchLogOptionsObservation struct { + + // Enable or disable VPN tunnel logging feature. The default is false. + LogEnabled *bool `json:"logEnabled,omitempty" tf:"log_enabled,omitempty"` + + // The Amazon Resource Name (ARN) of the CloudWatch log group to send logs to. + LogGroupArn *string `json:"logGroupArn,omitempty" tf:"log_group_arn,omitempty"` + + // Set log format. Default format is json. Possible values are: json and text. The default is json. + LogOutputFormat *string `json:"logOutputFormat,omitempty" tf:"log_output_format,omitempty"` +} + +type CloudwatchLogOptionsParameters struct { + + // Enable or disable VPN tunnel logging feature. The default is false. + // +kubebuilder:validation:Optional + LogEnabled *bool `json:"logEnabled,omitempty" tf:"log_enabled,omitempty"` + + // The Amazon Resource Name (ARN) of the CloudWatch log group to send logs to. + // +kubebuilder:validation:Optional + LogGroupArn *string `json:"logGroupArn,omitempty" tf:"log_group_arn,omitempty"` + + // Set log format. Default format is json. Possible values are: json and text. The default is json. + // +kubebuilder:validation:Optional + LogOutputFormat *string `json:"logOutputFormat,omitempty" tf:"log_output_format,omitempty"` +} + +type RoutesInitParameters struct { +} + +type RoutesObservation struct { + + // The CIDR block associated with the local subnet of the customer data center. + DestinationCidrBlock *string `json:"destinationCidrBlock,omitempty" tf:"destination_cidr_block,omitempty"` + + // Indicates how the routes were provided. + Source *string `json:"source,omitempty" tf:"source,omitempty"` + + // The current state of the static route. + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type RoutesParameters struct { +} + +type Tunnel1LogOptionsInitParameters struct { + + // Options for sending VPN tunnel logs to CloudWatch. See CloudWatch Log Options below for more details. + CloudwatchLogOptions *CloudwatchLogOptionsInitParameters `json:"cloudwatchLogOptions,omitempty" tf:"cloudwatch_log_options,omitempty"` +} + +type Tunnel1LogOptionsObservation struct { + + // Options for sending VPN tunnel logs to CloudWatch. See CloudWatch Log Options below for more details. + CloudwatchLogOptions *CloudwatchLogOptionsObservation `json:"cloudwatchLogOptions,omitempty" tf:"cloudwatch_log_options,omitempty"` +} + +type Tunnel1LogOptionsParameters struct { + + // Options for sending VPN tunnel logs to CloudWatch. See CloudWatch Log Options below for more details. + // +kubebuilder:validation:Optional + CloudwatchLogOptions *CloudwatchLogOptionsParameters `json:"cloudwatchLogOptions,omitempty" tf:"cloudwatch_log_options,omitempty"` +} + +type Tunnel2LogOptionsCloudwatchLogOptionsInitParameters struct { + + // Enable or disable VPN tunnel logging feature. The default is false. + LogEnabled *bool `json:"logEnabled,omitempty" tf:"log_enabled,omitempty"` + + // The Amazon Resource Name (ARN) of the CloudWatch log group to send logs to. + LogGroupArn *string `json:"logGroupArn,omitempty" tf:"log_group_arn,omitempty"` + + // Set log format. Default format is json. Possible values are: json and text. The default is json. + LogOutputFormat *string `json:"logOutputFormat,omitempty" tf:"log_output_format,omitempty"` +} + +type Tunnel2LogOptionsCloudwatchLogOptionsObservation struct { + + // Enable or disable VPN tunnel logging feature. The default is false. + LogEnabled *bool `json:"logEnabled,omitempty" tf:"log_enabled,omitempty"` + + // The Amazon Resource Name (ARN) of the CloudWatch log group to send logs to. + LogGroupArn *string `json:"logGroupArn,omitempty" tf:"log_group_arn,omitempty"` + + // Set log format. Default format is json. Possible values are: json and text. The default is json. + LogOutputFormat *string `json:"logOutputFormat,omitempty" tf:"log_output_format,omitempty"` +} + +type Tunnel2LogOptionsCloudwatchLogOptionsParameters struct { + + // Enable or disable VPN tunnel logging feature. The default is false. + // +kubebuilder:validation:Optional + LogEnabled *bool `json:"logEnabled,omitempty" tf:"log_enabled,omitempty"` + + // The Amazon Resource Name (ARN) of the CloudWatch log group to send logs to. + // +kubebuilder:validation:Optional + LogGroupArn *string `json:"logGroupArn,omitempty" tf:"log_group_arn,omitempty"` + + // Set log format. Default format is json. Possible values are: json and text. The default is json. + // +kubebuilder:validation:Optional + LogOutputFormat *string `json:"logOutputFormat,omitempty" tf:"log_output_format,omitempty"` +} + +type Tunnel2LogOptionsInitParameters struct { + + // Options for sending VPN tunnel logs to CloudWatch. See CloudWatch Log Options below for more details. + CloudwatchLogOptions *Tunnel2LogOptionsCloudwatchLogOptionsInitParameters `json:"cloudwatchLogOptions,omitempty" tf:"cloudwatch_log_options,omitempty"` +} + +type Tunnel2LogOptionsObservation struct { + + // Options for sending VPN tunnel logs to CloudWatch. See CloudWatch Log Options below for more details. + CloudwatchLogOptions *Tunnel2LogOptionsCloudwatchLogOptionsObservation `json:"cloudwatchLogOptions,omitempty" tf:"cloudwatch_log_options,omitempty"` +} + +type Tunnel2LogOptionsParameters struct { + + // Options for sending VPN tunnel logs to CloudWatch. See CloudWatch Log Options below for more details. + // +kubebuilder:validation:Optional + CloudwatchLogOptions *Tunnel2LogOptionsCloudwatchLogOptionsParameters `json:"cloudwatchLogOptions,omitempty" tf:"cloudwatch_log_options,omitempty"` +} + +type VPNConnectionInitParameters struct { + + // The ID of the customer gateway. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.CustomerGateway + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + CustomerGatewayID *string `json:"customerGatewayId,omitempty" tf:"customer_gateway_id,omitempty"` + + // Reference to a CustomerGateway in ec2 to populate customerGatewayId. + // +kubebuilder:validation:Optional + CustomerGatewayIDRef *v1.Reference `json:"customerGatewayIdRef,omitempty" tf:"-"` + + // Selector for a CustomerGateway in ec2 to populate customerGatewayId. + // +kubebuilder:validation:Optional + CustomerGatewayIDSelector *v1.Selector `json:"customerGatewayIdSelector,omitempty" tf:"-"` + + // Indicate whether to enable acceleration for the VPN connection. Supports only EC2 Transit Gateway. + EnableAcceleration *bool `json:"enableAcceleration,omitempty" tf:"enable_acceleration,omitempty"` + + // The IPv4 CIDR on the customer gateway (on-premises) side of the VPN connection. + LocalIPv4NetworkCidr *string `json:"localIpv4NetworkCidr,omitempty" tf:"local_ipv4_network_cidr,omitempty"` + + // The IPv6 CIDR on the customer gateway (on-premises) side of the VPN connection. + LocalIPv6NetworkCidr *string `json:"localIpv6NetworkCidr,omitempty" tf:"local_ipv6_network_cidr,omitempty"` + + // Indicates if a Public S2S VPN or Private S2S VPN over AWS Direct Connect. Valid values are PublicIpv4 | PrivateIpv4 + OutsideIPAddressType *string `json:"outsideIpAddressType,omitempty" tf:"outside_ip_address_type,omitempty"` + + // The IPv4 CIDR on the AWS side of the VPN connection. + RemoteIPv4NetworkCidr *string `json:"remoteIpv4NetworkCidr,omitempty" tf:"remote_ipv4_network_cidr,omitempty"` + + // The IPv6 CIDR on the AWS side of the VPN connection. + RemoteIPv6NetworkCidr *string `json:"remoteIpv6NetworkCidr,omitempty" tf:"remote_ipv6_network_cidr,omitempty"` + + // Whether the VPN connection uses static routes exclusively. Static routes must be used for devices that don't support BGP. + StaticRoutesOnly *bool `json:"staticRoutesOnly,omitempty" tf:"static_routes_only,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ID of the EC2 Transit Gateway. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.TransitGateway + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + TransitGatewayID *string `json:"transitGatewayId,omitempty" tf:"transit_gateway_id,omitempty"` + + // Reference to a TransitGateway in ec2 to populate transitGatewayId. + // +kubebuilder:validation:Optional + TransitGatewayIDRef *v1.Reference `json:"transitGatewayIdRef,omitempty" tf:"-"` + + // Selector for a TransitGateway in ec2 to populate transitGatewayId. + // +kubebuilder:validation:Optional + TransitGatewayIDSelector *v1.Selector `json:"transitGatewayIdSelector,omitempty" tf:"-"` + + // . The attachment ID of the Transit Gateway attachment to Direct Connect Gateway. The ID is obtained through a data source only. + TransportTransitGatewayAttachmentID *string `json:"transportTransitGatewayAttachmentId,omitempty" tf:"transport_transit_gateway_attachment_id,omitempty"` + + // The action to take after DPD timeout occurs for the first VPN tunnel. Specify restart to restart the IKE initiation. Specify clear to end the IKE session. Valid values are clear | none | restart. + Tunnel1DpdTimeoutAction *string `json:"tunnel1DpdTimeoutAction,omitempty" tf:"tunnel1_dpd_timeout_action,omitempty"` + + // The number of seconds after which a DPD timeout occurs for the first VPN tunnel. Valid value is equal or higher than 30. + Tunnel1DpdTimeoutSeconds *float64 `json:"tunnel1DpdTimeoutSeconds,omitempty" tf:"tunnel1_dpd_timeout_seconds,omitempty"` + + // Turn on or off tunnel endpoint lifecycle control feature for the first VPN tunnel. Valid values are true | false. + Tunnel1EnableTunnelLifecycleControl *bool `json:"tunnel1EnableTunnelLifecycleControl,omitempty" tf:"tunnel1_enable_tunnel_lifecycle_control,omitempty"` + + // The IKE versions that are permitted for the first VPN tunnel. Valid values are ikev1 | ikev2. + // +listType=set + Tunnel1IkeVersions []*string `json:"tunnel1IkeVersions,omitempty" tf:"tunnel1_ike_versions,omitempty"` + + // The CIDR block of the inside IP addresses for the first VPN tunnel. Valid value is a size /30 CIDR block from the 169.254.0.0/16 range. + Tunnel1InsideCidr *string `json:"tunnel1InsideCidr,omitempty" tf:"tunnel1_inside_cidr,omitempty"` + + // The range of inside IPv6 addresses for the first VPN tunnel. Supports only EC2 Transit Gateway. Valid value is a size /126 CIDR block from the local fd00::/8 range. + Tunnel1InsideIPv6Cidr *string `json:"tunnel1InsideIpv6Cidr,omitempty" tf:"tunnel1_inside_ipv6_cidr,omitempty"` + + // Options for logging VPN tunnel activity. See Log Options below for more details. + Tunnel1LogOptions *Tunnel1LogOptionsInitParameters `json:"tunnel1LogOptions,omitempty" tf:"tunnel1_log_options,omitempty"` + + // List of one or more Diffie-Hellman group numbers that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are 2 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24. + // +listType=set + Tunnel1Phase1DhGroupNumbers []*float64 `json:"tunnel1Phase1DhGroupNumbers,omitempty" tf:"tunnel1_phase1_dh_group_numbers,omitempty"` + + // List of one or more encryption algorithms that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + // +listType=set + Tunnel1Phase1EncryptionAlgorithms []*string `json:"tunnel1Phase1EncryptionAlgorithms,omitempty" tf:"tunnel1_phase1_encryption_algorithms,omitempty"` + + // One or more integrity algorithms that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + // +listType=set + Tunnel1Phase1IntegrityAlgorithms []*string `json:"tunnel1Phase1IntegrityAlgorithms,omitempty" tf:"tunnel1_phase1_integrity_algorithms,omitempty"` + + // The lifetime for phase 1 of the IKE negotiation for the first VPN tunnel, in seconds. Valid value is between 900 and 28800. + Tunnel1Phase1LifetimeSeconds *float64 `json:"tunnel1Phase1LifetimeSeconds,omitempty" tf:"tunnel1_phase1_lifetime_seconds,omitempty"` + + // List of one or more Diffie-Hellman group numbers that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are 2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24. + // +listType=set + Tunnel1Phase2DhGroupNumbers []*float64 `json:"tunnel1Phase2DhGroupNumbers,omitempty" tf:"tunnel1_phase2_dh_group_numbers,omitempty"` + + // List of one or more encryption algorithms that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + // +listType=set + Tunnel1Phase2EncryptionAlgorithms []*string `json:"tunnel1Phase2EncryptionAlgorithms,omitempty" tf:"tunnel1_phase2_encryption_algorithms,omitempty"` + + // List of one or more integrity algorithms that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + // +listType=set + Tunnel1Phase2IntegrityAlgorithms []*string `json:"tunnel1Phase2IntegrityAlgorithms,omitempty" tf:"tunnel1_phase2_integrity_algorithms,omitempty"` + + // The lifetime for phase 2 of the IKE negotiation for the first VPN tunnel, in seconds. Valid value is between 900 and 3600. + Tunnel1Phase2LifetimeSeconds *float64 `json:"tunnel1Phase2LifetimeSeconds,omitempty" tf:"tunnel1_phase2_lifetime_seconds,omitempty"` + + // The preshared key of the first VPN tunnel. The preshared key must be between 8 and 64 characters in length and cannot start with zero(0). Allowed characters are alphanumeric characters, periods(.) and underscores(_). + Tunnel1PresharedKeySecretRef *v1.SecretKeySelector `json:"tunnel1PresharedKeySecretRef,omitempty" tf:"-"` + + // The percentage of the rekey window for the first VPN tunnel (determined by tunnel1_rekey_margin_time_seconds) during which the rekey time is randomly selected. Valid value is between 0 and 100. + Tunnel1RekeyFuzzPercentage *float64 `json:"tunnel1RekeyFuzzPercentage,omitempty" tf:"tunnel1_rekey_fuzz_percentage,omitempty"` + + // The margin time, in seconds, before the phase 2 lifetime expires, during which the AWS side of the first VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for tunnel1_rekey_fuzz_percentage. Valid value is between 60 and half of tunnel1_phase2_lifetime_seconds. + Tunnel1RekeyMarginTimeSeconds *float64 `json:"tunnel1RekeyMarginTimeSeconds,omitempty" tf:"tunnel1_rekey_margin_time_seconds,omitempty"` + + // The number of packets in an IKE replay window for the first VPN tunnel. Valid value is between 64 and 2048. + Tunnel1ReplayWindowSize *float64 `json:"tunnel1ReplayWindowSize,omitempty" tf:"tunnel1_replay_window_size,omitempty"` + + // The action to take when the establishing the tunnel for the first VPN connection. By default, your customer gateway device must initiate the IKE negotiation and bring up the tunnel. Specify start for AWS to initiate the IKE negotiation. Valid values are add | start. + Tunnel1StartupAction *string `json:"tunnel1StartupAction,omitempty" tf:"tunnel1_startup_action,omitempty"` + + // The action to take after DPD timeout occurs for the second VPN tunnel. Specify restart to restart the IKE initiation. Specify clear to end the IKE session. Valid values are clear | none | restart. + Tunnel2DpdTimeoutAction *string `json:"tunnel2DpdTimeoutAction,omitempty" tf:"tunnel2_dpd_timeout_action,omitempty"` + + // The number of seconds after which a DPD timeout occurs for the second VPN tunnel. Valid value is equal or higher than 30. + Tunnel2DpdTimeoutSeconds *float64 `json:"tunnel2DpdTimeoutSeconds,omitempty" tf:"tunnel2_dpd_timeout_seconds,omitempty"` + + // Turn on or off tunnel endpoint lifecycle control feature for the second VPN tunnel. Valid values are true | false. + Tunnel2EnableTunnelLifecycleControl *bool `json:"tunnel2EnableTunnelLifecycleControl,omitempty" tf:"tunnel2_enable_tunnel_lifecycle_control,omitempty"` + + // The IKE versions that are permitted for the second VPN tunnel. Valid values are ikev1 | ikev2. + // +listType=set + Tunnel2IkeVersions []*string `json:"tunnel2IkeVersions,omitempty" tf:"tunnel2_ike_versions,omitempty"` + + // The CIDR block of the inside IP addresses for the second VPN tunnel. Valid value is a size /30 CIDR block from the 169.254.0.0/16 range. + Tunnel2InsideCidr *string `json:"tunnel2InsideCidr,omitempty" tf:"tunnel2_inside_cidr,omitempty"` + + // The range of inside IPv6 addresses for the second VPN tunnel. Supports only EC2 Transit Gateway. Valid value is a size /126 CIDR block from the local fd00::/8 range. + Tunnel2InsideIPv6Cidr *string `json:"tunnel2InsideIpv6Cidr,omitempty" tf:"tunnel2_inside_ipv6_cidr,omitempty"` + + // Options for logging VPN tunnel activity. See Log Options below for more details. + Tunnel2LogOptions *Tunnel2LogOptionsInitParameters `json:"tunnel2LogOptions,omitempty" tf:"tunnel2_log_options,omitempty"` + + // List of one or more Diffie-Hellman group numbers that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are 2 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24. + // +listType=set + Tunnel2Phase1DhGroupNumbers []*float64 `json:"tunnel2Phase1DhGroupNumbers,omitempty" tf:"tunnel2_phase1_dh_group_numbers,omitempty"` + + // List of one or more encryption algorithms that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + // +listType=set + Tunnel2Phase1EncryptionAlgorithms []*string `json:"tunnel2Phase1EncryptionAlgorithms,omitempty" tf:"tunnel2_phase1_encryption_algorithms,omitempty"` + + // One or more integrity algorithms that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + // +listType=set + Tunnel2Phase1IntegrityAlgorithms []*string `json:"tunnel2Phase1IntegrityAlgorithms,omitempty" tf:"tunnel2_phase1_integrity_algorithms,omitempty"` + + // The lifetime for phase 1 of the IKE negotiation for the second VPN tunnel, in seconds. Valid value is between 900 and 28800. + Tunnel2Phase1LifetimeSeconds *float64 `json:"tunnel2Phase1LifetimeSeconds,omitempty" tf:"tunnel2_phase1_lifetime_seconds,omitempty"` + + // List of one or more Diffie-Hellman group numbers that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are 2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24. + // +listType=set + Tunnel2Phase2DhGroupNumbers []*float64 `json:"tunnel2Phase2DhGroupNumbers,omitempty" tf:"tunnel2_phase2_dh_group_numbers,omitempty"` + + // List of one or more encryption algorithms that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + // +listType=set + Tunnel2Phase2EncryptionAlgorithms []*string `json:"tunnel2Phase2EncryptionAlgorithms,omitempty" tf:"tunnel2_phase2_encryption_algorithms,omitempty"` + + // List of one or more integrity algorithms that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + // +listType=set + Tunnel2Phase2IntegrityAlgorithms []*string `json:"tunnel2Phase2IntegrityAlgorithms,omitempty" tf:"tunnel2_phase2_integrity_algorithms,omitempty"` + + // The lifetime for phase 2 of the IKE negotiation for the second VPN tunnel, in seconds. Valid value is between 900 and 3600. + Tunnel2Phase2LifetimeSeconds *float64 `json:"tunnel2Phase2LifetimeSeconds,omitempty" tf:"tunnel2_phase2_lifetime_seconds,omitempty"` + + // The preshared key of the second VPN tunnel. The preshared key must be between 8 and 64 characters in length and cannot start with zero(0). Allowed characters are alphanumeric characters, periods(.) and underscores(_). + Tunnel2PresharedKeySecretRef *v1.SecretKeySelector `json:"tunnel2PresharedKeySecretRef,omitempty" tf:"-"` + + // The percentage of the rekey window for the second VPN tunnel (determined by tunnel2_rekey_margin_time_seconds) during which the rekey time is randomly selected. Valid value is between 0 and 100. + Tunnel2RekeyFuzzPercentage *float64 `json:"tunnel2RekeyFuzzPercentage,omitempty" tf:"tunnel2_rekey_fuzz_percentage,omitempty"` + + // The margin time, in seconds, before the phase 2 lifetime expires, during which the AWS side of the second VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for tunnel2_rekey_fuzz_percentage. Valid value is between 60 and half of tunnel2_phase2_lifetime_seconds. + Tunnel2RekeyMarginTimeSeconds *float64 `json:"tunnel2RekeyMarginTimeSeconds,omitempty" tf:"tunnel2_rekey_margin_time_seconds,omitempty"` + + // The number of packets in an IKE replay window for the second VPN tunnel. Valid value is between 64 and 2048. + Tunnel2ReplayWindowSize *float64 `json:"tunnel2ReplayWindowSize,omitempty" tf:"tunnel2_replay_window_size,omitempty"` + + // The action to take when the establishing the tunnel for the second VPN connection. By default, your customer gateway device must initiate the IKE negotiation and bring up the tunnel. Specify start for AWS to initiate the IKE negotiation. Valid values are add | start. + Tunnel2StartupAction *string `json:"tunnel2StartupAction,omitempty" tf:"tunnel2_startup_action,omitempty"` + + // Indicate whether the VPN tunnels process IPv4 or IPv6 traffic. Valid values are ipv4 | ipv6. ipv6 Supports only EC2 Transit Gateway. + TunnelInsideIPVersion *string `json:"tunnelInsideIpVersion,omitempty" tf:"tunnel_inside_ip_version,omitempty"` + + // The type of VPN connection. The only type AWS supports at this time is "ipsec.1". + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.CustomerGateway + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("type",false) + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Reference to a CustomerGateway in ec2 to populate type. + // +kubebuilder:validation:Optional + TypeRef *v1.Reference `json:"typeRef,omitempty" tf:"-"` + + // Selector for a CustomerGateway in ec2 to populate type. + // +kubebuilder:validation:Optional + TypeSelector *v1.Selector `json:"typeSelector,omitempty" tf:"-"` + + // The ID of the Virtual Private Gateway. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPNGateway + VPNGatewayID *string `json:"vpnGatewayId,omitempty" tf:"vpn_gateway_id,omitempty"` + + // Reference to a VPNGateway in ec2 to populate vpnGatewayId. + // +kubebuilder:validation:Optional + VPNGatewayIDRef *v1.Reference `json:"vpnGatewayIdRef,omitempty" tf:"-"` + + // Selector for a VPNGateway in ec2 to populate vpnGatewayId. + // +kubebuilder:validation:Optional + VPNGatewayIDSelector *v1.Selector `json:"vpnGatewayIdSelector,omitempty" tf:"-"` +} + +type VPNConnectionObservation struct { + + // Amazon Resource Name (ARN) of the VPN Connection. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The ARN of the core network. + CoreNetworkArn *string `json:"coreNetworkArn,omitempty" tf:"core_network_arn,omitempty"` + + // The ARN of the core network attachment. + CoreNetworkAttachmentArn *string `json:"coreNetworkAttachmentArn,omitempty" tf:"core_network_attachment_arn,omitempty"` + + // The ID of the customer gateway. + CustomerGatewayID *string `json:"customerGatewayId,omitempty" tf:"customer_gateway_id,omitempty"` + + // Indicate whether to enable acceleration for the VPN connection. Supports only EC2 Transit Gateway. + EnableAcceleration *bool `json:"enableAcceleration,omitempty" tf:"enable_acceleration,omitempty"` + + // The amazon-assigned ID of the VPN connection. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The IPv4 CIDR on the customer gateway (on-premises) side of the VPN connection. + LocalIPv4NetworkCidr *string `json:"localIpv4NetworkCidr,omitempty" tf:"local_ipv4_network_cidr,omitempty"` + + // The IPv6 CIDR on the customer gateway (on-premises) side of the VPN connection. + LocalIPv6NetworkCidr *string `json:"localIpv6NetworkCidr,omitempty" tf:"local_ipv6_network_cidr,omitempty"` + + // Indicates if a Public S2S VPN or Private S2S VPN over AWS Direct Connect. Valid values are PublicIpv4 | PrivateIpv4 + OutsideIPAddressType *string `json:"outsideIpAddressType,omitempty" tf:"outside_ip_address_type,omitempty"` + + // The IPv4 CIDR on the AWS side of the VPN connection. + RemoteIPv4NetworkCidr *string `json:"remoteIpv4NetworkCidr,omitempty" tf:"remote_ipv4_network_cidr,omitempty"` + + // The IPv6 CIDR on the AWS side of the VPN connection. + RemoteIPv6NetworkCidr *string `json:"remoteIpv6NetworkCidr,omitempty" tf:"remote_ipv6_network_cidr,omitempty"` + + // The static routes associated with the VPN connection. Detailed below. + Routes []RoutesObservation `json:"routes,omitempty" tf:"routes,omitempty"` + + // Whether the VPN connection uses static routes exclusively. Static routes must be used for devices that don't support BGP. + StaticRoutesOnly *bool `json:"staticRoutesOnly,omitempty" tf:"static_routes_only,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // When associated with an EC2 Transit Gateway (transit_gateway_id argument), the attachment ID. See also the aws_ec2_tag resource for tagging the EC2 Transit Gateway VPN Attachment. + TransitGatewayAttachmentID *string `json:"transitGatewayAttachmentId,omitempty" tf:"transit_gateway_attachment_id,omitempty"` + + // The ID of the EC2 Transit Gateway. + TransitGatewayID *string `json:"transitGatewayId,omitempty" tf:"transit_gateway_id,omitempty"` + + // . The attachment ID of the Transit Gateway attachment to Direct Connect Gateway. The ID is obtained through a data source only. + TransportTransitGatewayAttachmentID *string `json:"transportTransitGatewayAttachmentId,omitempty" tf:"transport_transit_gateway_attachment_id,omitempty"` + + // The public IP address of the first VPN tunnel. + Tunnel1Address *string `json:"tunnel1Address,omitempty" tf:"tunnel1_address,omitempty"` + + // The bgp asn number of the first VPN tunnel. + Tunnel1BGPAsn *string `json:"tunnel1BgpAsn,omitempty" tf:"tunnel1_bgp_asn,omitempty"` + + // The bgp holdtime of the first VPN tunnel. + Tunnel1BGPHoldtime *float64 `json:"tunnel1BgpHoldtime,omitempty" tf:"tunnel1_bgp_holdtime,omitempty"` + + // The RFC 6890 link-local address of the first VPN tunnel (Customer Gateway Side). + Tunnel1CgwInsideAddress *string `json:"tunnel1CgwInsideAddress,omitempty" tf:"tunnel1_cgw_inside_address,omitempty"` + + // The action to take after DPD timeout occurs for the first VPN tunnel. Specify restart to restart the IKE initiation. Specify clear to end the IKE session. Valid values are clear | none | restart. + Tunnel1DpdTimeoutAction *string `json:"tunnel1DpdTimeoutAction,omitempty" tf:"tunnel1_dpd_timeout_action,omitempty"` + + // The number of seconds after which a DPD timeout occurs for the first VPN tunnel. Valid value is equal or higher than 30. + Tunnel1DpdTimeoutSeconds *float64 `json:"tunnel1DpdTimeoutSeconds,omitempty" tf:"tunnel1_dpd_timeout_seconds,omitempty"` + + // Turn on or off tunnel endpoint lifecycle control feature for the first VPN tunnel. Valid values are true | false. + Tunnel1EnableTunnelLifecycleControl *bool `json:"tunnel1EnableTunnelLifecycleControl,omitempty" tf:"tunnel1_enable_tunnel_lifecycle_control,omitempty"` + + // The IKE versions that are permitted for the first VPN tunnel. Valid values are ikev1 | ikev2. + // +listType=set + Tunnel1IkeVersions []*string `json:"tunnel1IkeVersions,omitempty" tf:"tunnel1_ike_versions,omitempty"` + + // The CIDR block of the inside IP addresses for the first VPN tunnel. Valid value is a size /30 CIDR block from the 169.254.0.0/16 range. + Tunnel1InsideCidr *string `json:"tunnel1InsideCidr,omitempty" tf:"tunnel1_inside_cidr,omitempty"` + + // The range of inside IPv6 addresses for the first VPN tunnel. Supports only EC2 Transit Gateway. Valid value is a size /126 CIDR block from the local fd00::/8 range. + Tunnel1InsideIPv6Cidr *string `json:"tunnel1InsideIpv6Cidr,omitempty" tf:"tunnel1_inside_ipv6_cidr,omitempty"` + + // Options for logging VPN tunnel activity. See Log Options below for more details. + Tunnel1LogOptions *Tunnel1LogOptionsObservation `json:"tunnel1LogOptions,omitempty" tf:"tunnel1_log_options,omitempty"` + + // List of one or more Diffie-Hellman group numbers that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are 2 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24. + // +listType=set + Tunnel1Phase1DhGroupNumbers []*float64 `json:"tunnel1Phase1DhGroupNumbers,omitempty" tf:"tunnel1_phase1_dh_group_numbers,omitempty"` + + // List of one or more encryption algorithms that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + // +listType=set + Tunnel1Phase1EncryptionAlgorithms []*string `json:"tunnel1Phase1EncryptionAlgorithms,omitempty" tf:"tunnel1_phase1_encryption_algorithms,omitempty"` + + // One or more integrity algorithms that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + // +listType=set + Tunnel1Phase1IntegrityAlgorithms []*string `json:"tunnel1Phase1IntegrityAlgorithms,omitempty" tf:"tunnel1_phase1_integrity_algorithms,omitempty"` + + // The lifetime for phase 1 of the IKE negotiation for the first VPN tunnel, in seconds. Valid value is between 900 and 28800. + Tunnel1Phase1LifetimeSeconds *float64 `json:"tunnel1Phase1LifetimeSeconds,omitempty" tf:"tunnel1_phase1_lifetime_seconds,omitempty"` + + // List of one or more Diffie-Hellman group numbers that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are 2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24. + // +listType=set + Tunnel1Phase2DhGroupNumbers []*float64 `json:"tunnel1Phase2DhGroupNumbers,omitempty" tf:"tunnel1_phase2_dh_group_numbers,omitempty"` + + // List of one or more encryption algorithms that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + // +listType=set + Tunnel1Phase2EncryptionAlgorithms []*string `json:"tunnel1Phase2EncryptionAlgorithms,omitempty" tf:"tunnel1_phase2_encryption_algorithms,omitempty"` + + // List of one or more integrity algorithms that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + // +listType=set + Tunnel1Phase2IntegrityAlgorithms []*string `json:"tunnel1Phase2IntegrityAlgorithms,omitempty" tf:"tunnel1_phase2_integrity_algorithms,omitempty"` + + // The lifetime for phase 2 of the IKE negotiation for the first VPN tunnel, in seconds. Valid value is between 900 and 3600. + Tunnel1Phase2LifetimeSeconds *float64 `json:"tunnel1Phase2LifetimeSeconds,omitempty" tf:"tunnel1_phase2_lifetime_seconds,omitempty"` + + // The percentage of the rekey window for the first VPN tunnel (determined by tunnel1_rekey_margin_time_seconds) during which the rekey time is randomly selected. Valid value is between 0 and 100. + Tunnel1RekeyFuzzPercentage *float64 `json:"tunnel1RekeyFuzzPercentage,omitempty" tf:"tunnel1_rekey_fuzz_percentage,omitempty"` + + // The margin time, in seconds, before the phase 2 lifetime expires, during which the AWS side of the first VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for tunnel1_rekey_fuzz_percentage. Valid value is between 60 and half of tunnel1_phase2_lifetime_seconds. + Tunnel1RekeyMarginTimeSeconds *float64 `json:"tunnel1RekeyMarginTimeSeconds,omitempty" tf:"tunnel1_rekey_margin_time_seconds,omitempty"` + + // The number of packets in an IKE replay window for the first VPN tunnel. Valid value is between 64 and 2048. + Tunnel1ReplayWindowSize *float64 `json:"tunnel1ReplayWindowSize,omitempty" tf:"tunnel1_replay_window_size,omitempty"` + + // The action to take when the establishing the tunnel for the first VPN connection. By default, your customer gateway device must initiate the IKE negotiation and bring up the tunnel. Specify start for AWS to initiate the IKE negotiation. Valid values are add | start. + Tunnel1StartupAction *string `json:"tunnel1StartupAction,omitempty" tf:"tunnel1_startup_action,omitempty"` + + // The RFC 6890 link-local address of the first VPN tunnel (VPN Gateway Side). + Tunnel1VgwInsideAddress *string `json:"tunnel1VgwInsideAddress,omitempty" tf:"tunnel1_vgw_inside_address,omitempty"` + + // The public IP address of the second VPN tunnel. + Tunnel2Address *string `json:"tunnel2Address,omitempty" tf:"tunnel2_address,omitempty"` + + // The bgp asn number of the second VPN tunnel. + Tunnel2BGPAsn *string `json:"tunnel2BgpAsn,omitempty" tf:"tunnel2_bgp_asn,omitempty"` + + // The bgp holdtime of the second VPN tunnel. + Tunnel2BGPHoldtime *float64 `json:"tunnel2BgpHoldtime,omitempty" tf:"tunnel2_bgp_holdtime,omitempty"` + + // The RFC 6890 link-local address of the second VPN tunnel (Customer Gateway Side). + Tunnel2CgwInsideAddress *string `json:"tunnel2CgwInsideAddress,omitempty" tf:"tunnel2_cgw_inside_address,omitempty"` + + // The action to take after DPD timeout occurs for the second VPN tunnel. Specify restart to restart the IKE initiation. Specify clear to end the IKE session. Valid values are clear | none | restart. + Tunnel2DpdTimeoutAction *string `json:"tunnel2DpdTimeoutAction,omitempty" tf:"tunnel2_dpd_timeout_action,omitempty"` + + // The number of seconds after which a DPD timeout occurs for the second VPN tunnel. Valid value is equal or higher than 30. + Tunnel2DpdTimeoutSeconds *float64 `json:"tunnel2DpdTimeoutSeconds,omitempty" tf:"tunnel2_dpd_timeout_seconds,omitempty"` + + // Turn on or off tunnel endpoint lifecycle control feature for the second VPN tunnel. Valid values are true | false. + Tunnel2EnableTunnelLifecycleControl *bool `json:"tunnel2EnableTunnelLifecycleControl,omitempty" tf:"tunnel2_enable_tunnel_lifecycle_control,omitempty"` + + // The IKE versions that are permitted for the second VPN tunnel. Valid values are ikev1 | ikev2. + // +listType=set + Tunnel2IkeVersions []*string `json:"tunnel2IkeVersions,omitempty" tf:"tunnel2_ike_versions,omitempty"` + + // The CIDR block of the inside IP addresses for the second VPN tunnel. Valid value is a size /30 CIDR block from the 169.254.0.0/16 range. + Tunnel2InsideCidr *string `json:"tunnel2InsideCidr,omitempty" tf:"tunnel2_inside_cidr,omitempty"` + + // The range of inside IPv6 addresses for the second VPN tunnel. Supports only EC2 Transit Gateway. Valid value is a size /126 CIDR block from the local fd00::/8 range. + Tunnel2InsideIPv6Cidr *string `json:"tunnel2InsideIpv6Cidr,omitempty" tf:"tunnel2_inside_ipv6_cidr,omitempty"` + + // Options for logging VPN tunnel activity. See Log Options below for more details. + Tunnel2LogOptions *Tunnel2LogOptionsObservation `json:"tunnel2LogOptions,omitempty" tf:"tunnel2_log_options,omitempty"` + + // List of one or more Diffie-Hellman group numbers that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are 2 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24. + // +listType=set + Tunnel2Phase1DhGroupNumbers []*float64 `json:"tunnel2Phase1DhGroupNumbers,omitempty" tf:"tunnel2_phase1_dh_group_numbers,omitempty"` + + // List of one or more encryption algorithms that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + // +listType=set + Tunnel2Phase1EncryptionAlgorithms []*string `json:"tunnel2Phase1EncryptionAlgorithms,omitempty" tf:"tunnel2_phase1_encryption_algorithms,omitempty"` + + // One or more integrity algorithms that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + // +listType=set + Tunnel2Phase1IntegrityAlgorithms []*string `json:"tunnel2Phase1IntegrityAlgorithms,omitempty" tf:"tunnel2_phase1_integrity_algorithms,omitempty"` + + // The lifetime for phase 1 of the IKE negotiation for the second VPN tunnel, in seconds. Valid value is between 900 and 28800. + Tunnel2Phase1LifetimeSeconds *float64 `json:"tunnel2Phase1LifetimeSeconds,omitempty" tf:"tunnel2_phase1_lifetime_seconds,omitempty"` + + // List of one or more Diffie-Hellman group numbers that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are 2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24. + // +listType=set + Tunnel2Phase2DhGroupNumbers []*float64 `json:"tunnel2Phase2DhGroupNumbers,omitempty" tf:"tunnel2_phase2_dh_group_numbers,omitempty"` + + // List of one or more encryption algorithms that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + // +listType=set + Tunnel2Phase2EncryptionAlgorithms []*string `json:"tunnel2Phase2EncryptionAlgorithms,omitempty" tf:"tunnel2_phase2_encryption_algorithms,omitempty"` + + // List of one or more integrity algorithms that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + // +listType=set + Tunnel2Phase2IntegrityAlgorithms []*string `json:"tunnel2Phase2IntegrityAlgorithms,omitempty" tf:"tunnel2_phase2_integrity_algorithms,omitempty"` + + // The lifetime for phase 2 of the IKE negotiation for the second VPN tunnel, in seconds. Valid value is between 900 and 3600. + Tunnel2Phase2LifetimeSeconds *float64 `json:"tunnel2Phase2LifetimeSeconds,omitempty" tf:"tunnel2_phase2_lifetime_seconds,omitempty"` + + // The percentage of the rekey window for the second VPN tunnel (determined by tunnel2_rekey_margin_time_seconds) during which the rekey time is randomly selected. Valid value is between 0 and 100. + Tunnel2RekeyFuzzPercentage *float64 `json:"tunnel2RekeyFuzzPercentage,omitempty" tf:"tunnel2_rekey_fuzz_percentage,omitempty"` + + // The margin time, in seconds, before the phase 2 lifetime expires, during which the AWS side of the second VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for tunnel2_rekey_fuzz_percentage. Valid value is between 60 and half of tunnel2_phase2_lifetime_seconds. + Tunnel2RekeyMarginTimeSeconds *float64 `json:"tunnel2RekeyMarginTimeSeconds,omitempty" tf:"tunnel2_rekey_margin_time_seconds,omitempty"` + + // The number of packets in an IKE replay window for the second VPN tunnel. Valid value is between 64 and 2048. + Tunnel2ReplayWindowSize *float64 `json:"tunnel2ReplayWindowSize,omitempty" tf:"tunnel2_replay_window_size,omitempty"` + + // The action to take when the establishing the tunnel for the second VPN connection. By default, your customer gateway device must initiate the IKE negotiation and bring up the tunnel. Specify start for AWS to initiate the IKE negotiation. Valid values are add | start. + Tunnel2StartupAction *string `json:"tunnel2StartupAction,omitempty" tf:"tunnel2_startup_action,omitempty"` + + // The RFC 6890 link-local address of the second VPN tunnel (VPN Gateway Side). + Tunnel2VgwInsideAddress *string `json:"tunnel2VgwInsideAddress,omitempty" tf:"tunnel2_vgw_inside_address,omitempty"` + + // Indicate whether the VPN tunnels process IPv4 or IPv6 traffic. Valid values are ipv4 | ipv6. ipv6 Supports only EC2 Transit Gateway. + TunnelInsideIPVersion *string `json:"tunnelInsideIpVersion,omitempty" tf:"tunnel_inside_ip_version,omitempty"` + + // The type of VPN connection. The only type AWS supports at this time is "ipsec.1". + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The ID of the Virtual Private Gateway. + VPNGatewayID *string `json:"vpnGatewayId,omitempty" tf:"vpn_gateway_id,omitempty"` + + // Telemetry for the VPN tunnels. Detailed below. + VgwTelemetry []VgwTelemetryObservation `json:"vgwTelemetry,omitempty" tf:"vgw_telemetry,omitempty"` +} + +type VPNConnectionParameters struct { + + // The ID of the customer gateway. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.CustomerGateway + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + CustomerGatewayID *string `json:"customerGatewayId,omitempty" tf:"customer_gateway_id,omitempty"` + + // Reference to a CustomerGateway in ec2 to populate customerGatewayId. + // +kubebuilder:validation:Optional + CustomerGatewayIDRef *v1.Reference `json:"customerGatewayIdRef,omitempty" tf:"-"` + + // Selector for a CustomerGateway in ec2 to populate customerGatewayId. + // +kubebuilder:validation:Optional + CustomerGatewayIDSelector *v1.Selector `json:"customerGatewayIdSelector,omitempty" tf:"-"` + + // Indicate whether to enable acceleration for the VPN connection. Supports only EC2 Transit Gateway. + // +kubebuilder:validation:Optional + EnableAcceleration *bool `json:"enableAcceleration,omitempty" tf:"enable_acceleration,omitempty"` + + // The IPv4 CIDR on the customer gateway (on-premises) side of the VPN connection. + // +kubebuilder:validation:Optional + LocalIPv4NetworkCidr *string `json:"localIpv4NetworkCidr,omitempty" tf:"local_ipv4_network_cidr,omitempty"` + + // The IPv6 CIDR on the customer gateway (on-premises) side of the VPN connection. + // +kubebuilder:validation:Optional + LocalIPv6NetworkCidr *string `json:"localIpv6NetworkCidr,omitempty" tf:"local_ipv6_network_cidr,omitempty"` + + // Indicates if a Public S2S VPN or Private S2S VPN over AWS Direct Connect. Valid values are PublicIpv4 | PrivateIpv4 + // +kubebuilder:validation:Optional + OutsideIPAddressType *string `json:"outsideIpAddressType,omitempty" tf:"outside_ip_address_type,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The IPv4 CIDR on the AWS side of the VPN connection. + // +kubebuilder:validation:Optional + RemoteIPv4NetworkCidr *string `json:"remoteIpv4NetworkCidr,omitempty" tf:"remote_ipv4_network_cidr,omitempty"` + + // The IPv6 CIDR on the AWS side of the VPN connection. + // +kubebuilder:validation:Optional + RemoteIPv6NetworkCidr *string `json:"remoteIpv6NetworkCidr,omitempty" tf:"remote_ipv6_network_cidr,omitempty"` + + // Whether the VPN connection uses static routes exclusively. Static routes must be used for devices that don't support BGP. + // +kubebuilder:validation:Optional + StaticRoutesOnly *bool `json:"staticRoutesOnly,omitempty" tf:"static_routes_only,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ID of the EC2 Transit Gateway. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.TransitGateway + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + TransitGatewayID *string `json:"transitGatewayId,omitempty" tf:"transit_gateway_id,omitempty"` + + // Reference to a TransitGateway in ec2 to populate transitGatewayId. + // +kubebuilder:validation:Optional + TransitGatewayIDRef *v1.Reference `json:"transitGatewayIdRef,omitempty" tf:"-"` + + // Selector for a TransitGateway in ec2 to populate transitGatewayId. + // +kubebuilder:validation:Optional + TransitGatewayIDSelector *v1.Selector `json:"transitGatewayIdSelector,omitempty" tf:"-"` + + // . The attachment ID of the Transit Gateway attachment to Direct Connect Gateway. The ID is obtained through a data source only. + // +kubebuilder:validation:Optional + TransportTransitGatewayAttachmentID *string `json:"transportTransitGatewayAttachmentId,omitempty" tf:"transport_transit_gateway_attachment_id,omitempty"` + + // The action to take after DPD timeout occurs for the first VPN tunnel. Specify restart to restart the IKE initiation. Specify clear to end the IKE session. Valid values are clear | none | restart. + // +kubebuilder:validation:Optional + Tunnel1DpdTimeoutAction *string `json:"tunnel1DpdTimeoutAction,omitempty" tf:"tunnel1_dpd_timeout_action,omitempty"` + + // The number of seconds after which a DPD timeout occurs for the first VPN tunnel. Valid value is equal or higher than 30. + // +kubebuilder:validation:Optional + Tunnel1DpdTimeoutSeconds *float64 `json:"tunnel1DpdTimeoutSeconds,omitempty" tf:"tunnel1_dpd_timeout_seconds,omitempty"` + + // Turn on or off tunnel endpoint lifecycle control feature for the first VPN tunnel. Valid values are true | false. + // +kubebuilder:validation:Optional + Tunnel1EnableTunnelLifecycleControl *bool `json:"tunnel1EnableTunnelLifecycleControl,omitempty" tf:"tunnel1_enable_tunnel_lifecycle_control,omitempty"` + + // The IKE versions that are permitted for the first VPN tunnel. Valid values are ikev1 | ikev2. + // +kubebuilder:validation:Optional + // +listType=set + Tunnel1IkeVersions []*string `json:"tunnel1IkeVersions,omitempty" tf:"tunnel1_ike_versions,omitempty"` + + // The CIDR block of the inside IP addresses for the first VPN tunnel. Valid value is a size /30 CIDR block from the 169.254.0.0/16 range. + // +kubebuilder:validation:Optional + Tunnel1InsideCidr *string `json:"tunnel1InsideCidr,omitempty" tf:"tunnel1_inside_cidr,omitempty"` + + // The range of inside IPv6 addresses for the first VPN tunnel. Supports only EC2 Transit Gateway. Valid value is a size /126 CIDR block from the local fd00::/8 range. + // +kubebuilder:validation:Optional + Tunnel1InsideIPv6Cidr *string `json:"tunnel1InsideIpv6Cidr,omitempty" tf:"tunnel1_inside_ipv6_cidr,omitempty"` + + // Options for logging VPN tunnel activity. See Log Options below for more details. + // +kubebuilder:validation:Optional + Tunnel1LogOptions *Tunnel1LogOptionsParameters `json:"tunnel1LogOptions,omitempty" tf:"tunnel1_log_options,omitempty"` + + // List of one or more Diffie-Hellman group numbers that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are 2 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24. + // +kubebuilder:validation:Optional + // +listType=set + Tunnel1Phase1DhGroupNumbers []*float64 `json:"tunnel1Phase1DhGroupNumbers,omitempty" tf:"tunnel1_phase1_dh_group_numbers,omitempty"` + + // List of one or more encryption algorithms that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + // +kubebuilder:validation:Optional + // +listType=set + Tunnel1Phase1EncryptionAlgorithms []*string `json:"tunnel1Phase1EncryptionAlgorithms,omitempty" tf:"tunnel1_phase1_encryption_algorithms,omitempty"` + + // One or more integrity algorithms that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + // +kubebuilder:validation:Optional + // +listType=set + Tunnel1Phase1IntegrityAlgorithms []*string `json:"tunnel1Phase1IntegrityAlgorithms,omitempty" tf:"tunnel1_phase1_integrity_algorithms,omitempty"` + + // The lifetime for phase 1 of the IKE negotiation for the first VPN tunnel, in seconds. Valid value is between 900 and 28800. + // +kubebuilder:validation:Optional + Tunnel1Phase1LifetimeSeconds *float64 `json:"tunnel1Phase1LifetimeSeconds,omitempty" tf:"tunnel1_phase1_lifetime_seconds,omitempty"` + + // List of one or more Diffie-Hellman group numbers that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are 2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24. + // +kubebuilder:validation:Optional + // +listType=set + Tunnel1Phase2DhGroupNumbers []*float64 `json:"tunnel1Phase2DhGroupNumbers,omitempty" tf:"tunnel1_phase2_dh_group_numbers,omitempty"` + + // List of one or more encryption algorithms that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + // +kubebuilder:validation:Optional + // +listType=set + Tunnel1Phase2EncryptionAlgorithms []*string `json:"tunnel1Phase2EncryptionAlgorithms,omitempty" tf:"tunnel1_phase2_encryption_algorithms,omitempty"` + + // List of one or more integrity algorithms that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + // +kubebuilder:validation:Optional + // +listType=set + Tunnel1Phase2IntegrityAlgorithms []*string `json:"tunnel1Phase2IntegrityAlgorithms,omitempty" tf:"tunnel1_phase2_integrity_algorithms,omitempty"` + + // The lifetime for phase 2 of the IKE negotiation for the first VPN tunnel, in seconds. Valid value is between 900 and 3600. + // +kubebuilder:validation:Optional + Tunnel1Phase2LifetimeSeconds *float64 `json:"tunnel1Phase2LifetimeSeconds,omitempty" tf:"tunnel1_phase2_lifetime_seconds,omitempty"` + + // The preshared key of the first VPN tunnel. The preshared key must be between 8 and 64 characters in length and cannot start with zero(0). Allowed characters are alphanumeric characters, periods(.) and underscores(_). + // +kubebuilder:validation:Optional + Tunnel1PresharedKeySecretRef *v1.SecretKeySelector `json:"tunnel1PresharedKeySecretRef,omitempty" tf:"-"` + + // The percentage of the rekey window for the first VPN tunnel (determined by tunnel1_rekey_margin_time_seconds) during which the rekey time is randomly selected. Valid value is between 0 and 100. + // +kubebuilder:validation:Optional + Tunnel1RekeyFuzzPercentage *float64 `json:"tunnel1RekeyFuzzPercentage,omitempty" tf:"tunnel1_rekey_fuzz_percentage,omitempty"` + + // The margin time, in seconds, before the phase 2 lifetime expires, during which the AWS side of the first VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for tunnel1_rekey_fuzz_percentage. Valid value is between 60 and half of tunnel1_phase2_lifetime_seconds. + // +kubebuilder:validation:Optional + Tunnel1RekeyMarginTimeSeconds *float64 `json:"tunnel1RekeyMarginTimeSeconds,omitempty" tf:"tunnel1_rekey_margin_time_seconds,omitempty"` + + // The number of packets in an IKE replay window for the first VPN tunnel. Valid value is between 64 and 2048. + // +kubebuilder:validation:Optional + Tunnel1ReplayWindowSize *float64 `json:"tunnel1ReplayWindowSize,omitempty" tf:"tunnel1_replay_window_size,omitempty"` + + // The action to take when the establishing the tunnel for the first VPN connection. By default, your customer gateway device must initiate the IKE negotiation and bring up the tunnel. Specify start for AWS to initiate the IKE negotiation. Valid values are add | start. + // +kubebuilder:validation:Optional + Tunnel1StartupAction *string `json:"tunnel1StartupAction,omitempty" tf:"tunnel1_startup_action,omitempty"` + + // The action to take after DPD timeout occurs for the second VPN tunnel. Specify restart to restart the IKE initiation. Specify clear to end the IKE session. Valid values are clear | none | restart. + // +kubebuilder:validation:Optional + Tunnel2DpdTimeoutAction *string `json:"tunnel2DpdTimeoutAction,omitempty" tf:"tunnel2_dpd_timeout_action,omitempty"` + + // The number of seconds after which a DPD timeout occurs for the second VPN tunnel. Valid value is equal or higher than 30. + // +kubebuilder:validation:Optional + Tunnel2DpdTimeoutSeconds *float64 `json:"tunnel2DpdTimeoutSeconds,omitempty" tf:"tunnel2_dpd_timeout_seconds,omitempty"` + + // Turn on or off tunnel endpoint lifecycle control feature for the second VPN tunnel. Valid values are true | false. + // +kubebuilder:validation:Optional + Tunnel2EnableTunnelLifecycleControl *bool `json:"tunnel2EnableTunnelLifecycleControl,omitempty" tf:"tunnel2_enable_tunnel_lifecycle_control,omitempty"` + + // The IKE versions that are permitted for the second VPN tunnel. Valid values are ikev1 | ikev2. + // +kubebuilder:validation:Optional + // +listType=set + Tunnel2IkeVersions []*string `json:"tunnel2IkeVersions,omitempty" tf:"tunnel2_ike_versions,omitempty"` + + // The CIDR block of the inside IP addresses for the second VPN tunnel. Valid value is a size /30 CIDR block from the 169.254.0.0/16 range. + // +kubebuilder:validation:Optional + Tunnel2InsideCidr *string `json:"tunnel2InsideCidr,omitempty" tf:"tunnel2_inside_cidr,omitempty"` + + // The range of inside IPv6 addresses for the second VPN tunnel. Supports only EC2 Transit Gateway. Valid value is a size /126 CIDR block from the local fd00::/8 range. + // +kubebuilder:validation:Optional + Tunnel2InsideIPv6Cidr *string `json:"tunnel2InsideIpv6Cidr,omitempty" tf:"tunnel2_inside_ipv6_cidr,omitempty"` + + // Options for logging VPN tunnel activity. See Log Options below for more details. + // +kubebuilder:validation:Optional + Tunnel2LogOptions *Tunnel2LogOptionsParameters `json:"tunnel2LogOptions,omitempty" tf:"tunnel2_log_options,omitempty"` + + // List of one or more Diffie-Hellman group numbers that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are 2 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24. + // +kubebuilder:validation:Optional + // +listType=set + Tunnel2Phase1DhGroupNumbers []*float64 `json:"tunnel2Phase1DhGroupNumbers,omitempty" tf:"tunnel2_phase1_dh_group_numbers,omitempty"` + + // List of one or more encryption algorithms that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + // +kubebuilder:validation:Optional + // +listType=set + Tunnel2Phase1EncryptionAlgorithms []*string `json:"tunnel2Phase1EncryptionAlgorithms,omitempty" tf:"tunnel2_phase1_encryption_algorithms,omitempty"` + + // One or more integrity algorithms that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + // +kubebuilder:validation:Optional + // +listType=set + Tunnel2Phase1IntegrityAlgorithms []*string `json:"tunnel2Phase1IntegrityAlgorithms,omitempty" tf:"tunnel2_phase1_integrity_algorithms,omitempty"` + + // The lifetime for phase 1 of the IKE negotiation for the second VPN tunnel, in seconds. Valid value is between 900 and 28800. + // +kubebuilder:validation:Optional + Tunnel2Phase1LifetimeSeconds *float64 `json:"tunnel2Phase1LifetimeSeconds,omitempty" tf:"tunnel2_phase1_lifetime_seconds,omitempty"` + + // List of one or more Diffie-Hellman group numbers that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are 2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24. + // +kubebuilder:validation:Optional + // +listType=set + Tunnel2Phase2DhGroupNumbers []*float64 `json:"tunnel2Phase2DhGroupNumbers,omitempty" tf:"tunnel2_phase2_dh_group_numbers,omitempty"` + + // List of one or more encryption algorithms that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + // +kubebuilder:validation:Optional + // +listType=set + Tunnel2Phase2EncryptionAlgorithms []*string `json:"tunnel2Phase2EncryptionAlgorithms,omitempty" tf:"tunnel2_phase2_encryption_algorithms,omitempty"` + + // List of one or more integrity algorithms that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + // +kubebuilder:validation:Optional + // +listType=set + Tunnel2Phase2IntegrityAlgorithms []*string `json:"tunnel2Phase2IntegrityAlgorithms,omitempty" tf:"tunnel2_phase2_integrity_algorithms,omitempty"` + + // The lifetime for phase 2 of the IKE negotiation for the second VPN tunnel, in seconds. Valid value is between 900 and 3600. + // +kubebuilder:validation:Optional + Tunnel2Phase2LifetimeSeconds *float64 `json:"tunnel2Phase2LifetimeSeconds,omitempty" tf:"tunnel2_phase2_lifetime_seconds,omitempty"` + + // The preshared key of the second VPN tunnel. The preshared key must be between 8 and 64 characters in length and cannot start with zero(0). Allowed characters are alphanumeric characters, periods(.) and underscores(_). + // +kubebuilder:validation:Optional + Tunnel2PresharedKeySecretRef *v1.SecretKeySelector `json:"tunnel2PresharedKeySecretRef,omitempty" tf:"-"` + + // The percentage of the rekey window for the second VPN tunnel (determined by tunnel2_rekey_margin_time_seconds) during which the rekey time is randomly selected. Valid value is between 0 and 100. + // +kubebuilder:validation:Optional + Tunnel2RekeyFuzzPercentage *float64 `json:"tunnel2RekeyFuzzPercentage,omitempty" tf:"tunnel2_rekey_fuzz_percentage,omitempty"` + + // The margin time, in seconds, before the phase 2 lifetime expires, during which the AWS side of the second VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for tunnel2_rekey_fuzz_percentage. Valid value is between 60 and half of tunnel2_phase2_lifetime_seconds. + // +kubebuilder:validation:Optional + Tunnel2RekeyMarginTimeSeconds *float64 `json:"tunnel2RekeyMarginTimeSeconds,omitempty" tf:"tunnel2_rekey_margin_time_seconds,omitempty"` + + // The number of packets in an IKE replay window for the second VPN tunnel. Valid value is between 64 and 2048. + // +kubebuilder:validation:Optional + Tunnel2ReplayWindowSize *float64 `json:"tunnel2ReplayWindowSize,omitempty" tf:"tunnel2_replay_window_size,omitempty"` + + // The action to take when the establishing the tunnel for the second VPN connection. By default, your customer gateway device must initiate the IKE negotiation and bring up the tunnel. Specify start for AWS to initiate the IKE negotiation. Valid values are add | start. + // +kubebuilder:validation:Optional + Tunnel2StartupAction *string `json:"tunnel2StartupAction,omitempty" tf:"tunnel2_startup_action,omitempty"` + + // Indicate whether the VPN tunnels process IPv4 or IPv6 traffic. Valid values are ipv4 | ipv6. ipv6 Supports only EC2 Transit Gateway. + // +kubebuilder:validation:Optional + TunnelInsideIPVersion *string `json:"tunnelInsideIpVersion,omitempty" tf:"tunnel_inside_ip_version,omitempty"` + + // The type of VPN connection. The only type AWS supports at this time is "ipsec.1". + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.CustomerGateway + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("type",false) + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Reference to a CustomerGateway in ec2 to populate type. + // +kubebuilder:validation:Optional + TypeRef *v1.Reference `json:"typeRef,omitempty" tf:"-"` + + // Selector for a CustomerGateway in ec2 to populate type. + // +kubebuilder:validation:Optional + TypeSelector *v1.Selector `json:"typeSelector,omitempty" tf:"-"` + + // The ID of the Virtual Private Gateway. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPNGateway + // +kubebuilder:validation:Optional + VPNGatewayID *string `json:"vpnGatewayId,omitempty" tf:"vpn_gateway_id,omitempty"` + + // Reference to a VPNGateway in ec2 to populate vpnGatewayId. + // +kubebuilder:validation:Optional + VPNGatewayIDRef *v1.Reference `json:"vpnGatewayIdRef,omitempty" tf:"-"` + + // Selector for a VPNGateway in ec2 to populate vpnGatewayId. + // +kubebuilder:validation:Optional + VPNGatewayIDSelector *v1.Selector `json:"vpnGatewayIdSelector,omitempty" tf:"-"` +} + +type VgwTelemetryInitParameters struct { +} + +type VgwTelemetryObservation struct { + + // The number of accepted routes. + AcceptedRouteCount *float64 `json:"acceptedRouteCount,omitempty" tf:"accepted_route_count,omitempty"` + + // The Amazon Resource Name (ARN) of the VPN tunnel endpoint certificate. + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` + + // The date and time of the last change in status. + LastStatusChange *string `json:"lastStatusChange,omitempty" tf:"last_status_change,omitempty"` + + // The Internet-routable IP address of the virtual private gateway's outside interface. + OutsideIPAddress *string `json:"outsideIpAddress,omitempty" tf:"outside_ip_address,omitempty"` + + // The status of the VPN tunnel. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // If an error occurs, a description of the error. + StatusMessage *string `json:"statusMessage,omitempty" tf:"status_message,omitempty"` +} + +type VgwTelemetryParameters struct { +} + +// VPNConnectionSpec defines the desired state of VPNConnection +type VPNConnectionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VPNConnectionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VPNConnectionInitParameters `json:"initProvider,omitempty"` +} + +// VPNConnectionStatus defines the observed state of VPNConnection. +type VPNConnectionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VPNConnectionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VPNConnection is the Schema for the VPNConnections API. Manages a Site-to-Site VPN connection. A Site-to-Site VPN connection is an Internet Protocol security (IPsec) VPN connection between a VPC and an on-premises network. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type VPNConnection struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec VPNConnectionSpec `json:"spec"` + Status VPNConnectionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VPNConnectionList contains a list of VPNConnections +type VPNConnectionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VPNConnection `json:"items"` +} + +// Repository type metadata. +var ( + VPNConnection_Kind = "VPNConnection" + VPNConnection_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VPNConnection_Kind}.String() + VPNConnection_KindAPIVersion = VPNConnection_Kind + "." + CRDGroupVersion.String() + VPNConnection_GroupVersionKind = CRDGroupVersion.WithKind(VPNConnection_Kind) +) + +func init() { + SchemeBuilder.Register(&VPNConnection{}, &VPNConnectionList{}) +} diff --git a/apis/ecr/v1beta1/zz_generated.conversion_hubs.go b/apis/ecr/v1beta1/zz_generated.conversion_hubs.go index 4cc0a47dfb..ce7f68bc93 100755 --- a/apis/ecr/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/ecr/v1beta1/zz_generated.conversion_hubs.go @@ -18,11 +18,5 @@ func (tr *RegistryPolicy) Hub() {} // Hub marks this type as a conversion hub. func (tr *RegistryScanningConfiguration) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ReplicationConfiguration) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Repository) Hub() {} - // Hub marks this type as a conversion hub. func (tr *RepositoryPolicy) Hub() {} diff --git a/apis/ecr/v1beta1/zz_generated.conversion_spokes.go b/apis/ecr/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..7cd6bb0afd --- /dev/null +++ b/apis/ecr/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ReplicationConfiguration to the hub type. +func (tr *ReplicationConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ReplicationConfiguration type. +func (tr *ReplicationConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Repository to the hub type. +func (tr *Repository) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Repository type. +func (tr *Repository) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/ecr/v1beta1/zz_generated.resolvers.go b/apis/ecr/v1beta1/zz_generated.resolvers.go index da837b4735..613d4511df 100644 --- a/apis/ecr/v1beta1/zz_generated.resolvers.go +++ b/apis/ecr/v1beta1/zz_generated.resolvers.go @@ -26,7 +26,7 @@ func (mg *LifecyclePolicy) ResolveReferences( // ResolveReferences of this Lifec var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("ecr.aws.upbound.io", "v1beta1", "Repository", "RepositoryList") + m, l, err = apisresolver.GetManagedResource("ecr.aws.upbound.io", "v1beta2", "Repository", "RepositoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -45,7 +45,7 @@ func (mg *LifecyclePolicy) ResolveReferences( // ResolveReferences of this Lifec mg.Spec.ForProvider.Repository = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RepositoryRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ecr.aws.upbound.io", "v1beta1", "Repository", "RepositoryList") + m, l, err = apisresolver.GetManagedResource("ecr.aws.upbound.io", "v1beta2", "Repository", "RepositoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -131,7 +131,7 @@ func (mg *RepositoryPolicy) ResolveReferences(ctx context.Context, c client.Read var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("ecr.aws.upbound.io", "v1beta1", "Repository", "RepositoryList") + m, l, err = apisresolver.GetManagedResource("ecr.aws.upbound.io", "v1beta2", "Repository", "RepositoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -150,7 +150,7 @@ func (mg *RepositoryPolicy) ResolveReferences(ctx context.Context, c client.Read mg.Spec.ForProvider.Repository = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RepositoryRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ecr.aws.upbound.io", "v1beta1", "Repository", "RepositoryList") + m, l, err = apisresolver.GetManagedResource("ecr.aws.upbound.io", "v1beta2", "Repository", "RepositoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/ecr/v1beta1/zz_lifecyclepolicy_types.go b/apis/ecr/v1beta1/zz_lifecyclepolicy_types.go index 0cbfd17b14..6b9f514563 100755 --- a/apis/ecr/v1beta1/zz_lifecyclepolicy_types.go +++ b/apis/ecr/v1beta1/zz_lifecyclepolicy_types.go @@ -19,7 +19,7 @@ type LifecyclePolicyInitParameters struct { Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` // Name of the repository to apply the policy. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecr/v1beta1.Repository + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecr/v1beta2.Repository Repository *string `json:"repository,omitempty" tf:"repository,omitempty"` // Reference to a Repository in ecr to populate repository. @@ -56,7 +56,7 @@ type LifecyclePolicyParameters struct { Region *string `json:"region" tf:"-"` // Name of the repository to apply the policy. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecr/v1beta1.Repository + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecr/v1beta2.Repository // +kubebuilder:validation:Optional Repository *string `json:"repository,omitempty" tf:"repository,omitempty"` diff --git a/apis/ecr/v1beta1/zz_repositorypolicy_types.go b/apis/ecr/v1beta1/zz_repositorypolicy_types.go index 1c42dfbd48..8db6da4b46 100755 --- a/apis/ecr/v1beta1/zz_repositorypolicy_types.go +++ b/apis/ecr/v1beta1/zz_repositorypolicy_types.go @@ -19,7 +19,7 @@ type RepositoryPolicyInitParameters struct { Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` // Name of the repository to apply the policy. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecr/v1beta1.Repository + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecr/v1beta2.Repository Repository *string `json:"repository,omitempty" tf:"repository,omitempty"` // Reference to a Repository in ecr to populate repository. @@ -56,7 +56,7 @@ type RepositoryPolicyParameters struct { Region *string `json:"region" tf:"-"` // Name of the repository to apply the policy. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecr/v1beta1.Repository + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecr/v1beta2.Repository // +kubebuilder:validation:Optional Repository *string `json:"repository,omitempty" tf:"repository,omitempty"` diff --git a/apis/ecr/v1beta2/zz_generated.conversion_hubs.go b/apis/ecr/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..558b3a5e3d --- /dev/null +++ b/apis/ecr/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ReplicationConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Repository) Hub() {} diff --git a/apis/ecr/v1beta2/zz_generated.deepcopy.go b/apis/ecr/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..0e005ac845 --- /dev/null +++ b/apis/ecr/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,930 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationInitParameters) DeepCopyInto(out *DestinationInitParameters) { + *out = *in + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationInitParameters. +func (in *DestinationInitParameters) DeepCopy() *DestinationInitParameters { + if in == nil { + return nil + } + out := new(DestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationObservation) DeepCopyInto(out *DestinationObservation) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationObservation. +func (in *DestinationObservation) DeepCopy() *DestinationObservation { + if in == nil { + return nil + } + out := new(DestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationParameters) DeepCopyInto(out *DestinationParameters) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationParameters. +func (in *DestinationParameters) DeepCopy() *DestinationParameters { + if in == nil { + return nil + } + out := new(DestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationInitParameters) DeepCopyInto(out *EncryptionConfigurationInitParameters) { + *out = *in + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(string) + **out = **in + } + if in.KMSKeyRef != nil { + in, out := &in.KMSKeyRef, &out.KMSKeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeySelector != nil { + in, out := &in.KMSKeySelector, &out.KMSKeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationInitParameters. +func (in *EncryptionConfigurationInitParameters) DeepCopy() *EncryptionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationObservation) DeepCopyInto(out *EncryptionConfigurationObservation) { + *out = *in + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationObservation. +func (in *EncryptionConfigurationObservation) DeepCopy() *EncryptionConfigurationObservation { + if in == nil { + return nil + } + out := new(EncryptionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationParameters) DeepCopyInto(out *EncryptionConfigurationParameters) { + *out = *in + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(string) + **out = **in + } + if in.KMSKeyRef != nil { + in, out := &in.KMSKeyRef, &out.KMSKeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeySelector != nil { + in, out := &in.KMSKeySelector, &out.KMSKeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationParameters. +func (in *EncryptionConfigurationParameters) DeepCopy() *EncryptionConfigurationParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageScanningConfigurationInitParameters) DeepCopyInto(out *ImageScanningConfigurationInitParameters) { + *out = *in + if in.ScanOnPush != nil { + in, out := &in.ScanOnPush, &out.ScanOnPush + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageScanningConfigurationInitParameters. +func (in *ImageScanningConfigurationInitParameters) DeepCopy() *ImageScanningConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ImageScanningConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageScanningConfigurationObservation) DeepCopyInto(out *ImageScanningConfigurationObservation) { + *out = *in + if in.ScanOnPush != nil { + in, out := &in.ScanOnPush, &out.ScanOnPush + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageScanningConfigurationObservation. +func (in *ImageScanningConfigurationObservation) DeepCopy() *ImageScanningConfigurationObservation { + if in == nil { + return nil + } + out := new(ImageScanningConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageScanningConfigurationParameters) DeepCopyInto(out *ImageScanningConfigurationParameters) { + *out = *in + if in.ScanOnPush != nil { + in, out := &in.ScanOnPush, &out.ScanOnPush + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageScanningConfigurationParameters. +func (in *ImageScanningConfigurationParameters) DeepCopy() *ImageScanningConfigurationParameters { + if in == nil { + return nil + } + out := new(ImageScanningConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfiguration) DeepCopyInto(out *ReplicationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfiguration. +func (in *ReplicationConfiguration) DeepCopy() *ReplicationConfiguration { + if in == nil { + return nil + } + out := new(ReplicationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationInitParameters) DeepCopyInto(out *ReplicationConfigurationInitParameters) { + *out = *in + if in.ReplicationConfiguration != nil { + in, out := &in.ReplicationConfiguration, &out.ReplicationConfiguration + *out = new(ReplicationConfigurationReplicationConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationInitParameters. +func (in *ReplicationConfigurationInitParameters) DeepCopy() *ReplicationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ReplicationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationList) DeepCopyInto(out *ReplicationConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationList. +func (in *ReplicationConfigurationList) DeepCopy() *ReplicationConfigurationList { + if in == nil { + return nil + } + out := new(ReplicationConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationObservation) DeepCopyInto(out *ReplicationConfigurationObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.ReplicationConfiguration != nil { + in, out := &in.ReplicationConfiguration, &out.ReplicationConfiguration + *out = new(ReplicationConfigurationReplicationConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationObservation. +func (in *ReplicationConfigurationObservation) DeepCopy() *ReplicationConfigurationObservation { + if in == nil { + return nil + } + out := new(ReplicationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationParameters) DeepCopyInto(out *ReplicationConfigurationParameters) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ReplicationConfiguration != nil { + in, out := &in.ReplicationConfiguration, &out.ReplicationConfiguration + *out = new(ReplicationConfigurationReplicationConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationParameters. +func (in *ReplicationConfigurationParameters) DeepCopy() *ReplicationConfigurationParameters { + if in == nil { + return nil + } + out := new(ReplicationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationReplicationConfigurationInitParameters) DeepCopyInto(out *ReplicationConfigurationReplicationConfigurationInitParameters) { + *out = *in + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationReplicationConfigurationInitParameters. +func (in *ReplicationConfigurationReplicationConfigurationInitParameters) DeepCopy() *ReplicationConfigurationReplicationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ReplicationConfigurationReplicationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationReplicationConfigurationObservation) DeepCopyInto(out *ReplicationConfigurationReplicationConfigurationObservation) { + *out = *in + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationReplicationConfigurationObservation. +func (in *ReplicationConfigurationReplicationConfigurationObservation) DeepCopy() *ReplicationConfigurationReplicationConfigurationObservation { + if in == nil { + return nil + } + out := new(ReplicationConfigurationReplicationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationReplicationConfigurationParameters) DeepCopyInto(out *ReplicationConfigurationReplicationConfigurationParameters) { + *out = *in + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationReplicationConfigurationParameters. +func (in *ReplicationConfigurationReplicationConfigurationParameters) DeepCopy() *ReplicationConfigurationReplicationConfigurationParameters { + if in == nil { + return nil + } + out := new(ReplicationConfigurationReplicationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationSpec) DeepCopyInto(out *ReplicationConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationSpec. +func (in *ReplicationConfigurationSpec) DeepCopy() *ReplicationConfigurationSpec { + if in == nil { + return nil + } + out := new(ReplicationConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationStatus) DeepCopyInto(out *ReplicationConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationStatus. +func (in *ReplicationConfigurationStatus) DeepCopy() *ReplicationConfigurationStatus { + if in == nil { + return nil + } + out := new(ReplicationConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Repository) DeepCopyInto(out *Repository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Repository. +func (in *Repository) DeepCopy() *Repository { + if in == nil { + return nil + } + out := new(Repository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Repository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryFilterInitParameters) DeepCopyInto(out *RepositoryFilterInitParameters) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } + if in.FilterType != nil { + in, out := &in.FilterType, &out.FilterType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryFilterInitParameters. +func (in *RepositoryFilterInitParameters) DeepCopy() *RepositoryFilterInitParameters { + if in == nil { + return nil + } + out := new(RepositoryFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryFilterObservation) DeepCopyInto(out *RepositoryFilterObservation) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } + if in.FilterType != nil { + in, out := &in.FilterType, &out.FilterType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryFilterObservation. +func (in *RepositoryFilterObservation) DeepCopy() *RepositoryFilterObservation { + if in == nil { + return nil + } + out := new(RepositoryFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryFilterParameters) DeepCopyInto(out *RepositoryFilterParameters) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } + if in.FilterType != nil { + in, out := &in.FilterType, &out.FilterType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryFilterParameters. +func (in *RepositoryFilterParameters) DeepCopy() *RepositoryFilterParameters { + if in == nil { + return nil + } + out := new(RepositoryFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryInitParameters) DeepCopyInto(out *RepositoryInitParameters) { + *out = *in + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = make([]EncryptionConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ForceDelete != nil { + in, out := &in.ForceDelete, &out.ForceDelete + *out = new(bool) + **out = **in + } + if in.ImageScanningConfiguration != nil { + in, out := &in.ImageScanningConfiguration, &out.ImageScanningConfiguration + *out = new(ImageScanningConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageTagMutability != nil { + in, out := &in.ImageTagMutability, &out.ImageTagMutability + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryInitParameters. +func (in *RepositoryInitParameters) DeepCopy() *RepositoryInitParameters { + if in == nil { + return nil + } + out := new(RepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryList) DeepCopyInto(out *RepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Repository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryList. +func (in *RepositoryList) DeepCopy() *RepositoryList { + if in == nil { + return nil + } + out := new(RepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryObservation) DeepCopyInto(out *RepositoryObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = make([]EncryptionConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ForceDelete != nil { + in, out := &in.ForceDelete, &out.ForceDelete + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ImageScanningConfiguration != nil { + in, out := &in.ImageScanningConfiguration, &out.ImageScanningConfiguration + *out = new(ImageScanningConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ImageTagMutability != nil { + in, out := &in.ImageTagMutability, &out.ImageTagMutability + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryObservation. +func (in *RepositoryObservation) DeepCopy() *RepositoryObservation { + if in == nil { + return nil + } + out := new(RepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryParameters) DeepCopyInto(out *RepositoryParameters) { + *out = *in + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = make([]EncryptionConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ForceDelete != nil { + in, out := &in.ForceDelete, &out.ForceDelete + *out = new(bool) + **out = **in + } + if in.ImageScanningConfiguration != nil { + in, out := &in.ImageScanningConfiguration, &out.ImageScanningConfiguration + *out = new(ImageScanningConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageTagMutability != nil { + in, out := &in.ImageTagMutability, &out.ImageTagMutability + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryParameters. +func (in *RepositoryParameters) DeepCopy() *RepositoryParameters { + if in == nil { + return nil + } + out := new(RepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositorySpec) DeepCopyInto(out *RepositorySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositorySpec. +func (in *RepositorySpec) DeepCopy() *RepositorySpec { + if in == nil { + return nil + } + out := new(RepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryStatus) DeepCopyInto(out *RepositoryStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryStatus. +func (in *RepositoryStatus) DeepCopy() *RepositoryStatus { + if in == nil { + return nil + } + out := new(RepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleInitParameters) DeepCopyInto(out *RuleInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = make([]DestinationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RepositoryFilter != nil { + in, out := &in.RepositoryFilter, &out.RepositoryFilter + *out = make([]RepositoryFilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleInitParameters. +func (in *RuleInitParameters) DeepCopy() *RuleInitParameters { + if in == nil { + return nil + } + out := new(RuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleObservation) DeepCopyInto(out *RuleObservation) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = make([]DestinationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RepositoryFilter != nil { + in, out := &in.RepositoryFilter, &out.RepositoryFilter + *out = make([]RepositoryFilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleObservation. +func (in *RuleObservation) DeepCopy() *RuleObservation { + if in == nil { + return nil + } + out := new(RuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleParameters) DeepCopyInto(out *RuleParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = make([]DestinationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RepositoryFilter != nil { + in, out := &in.RepositoryFilter, &out.RepositoryFilter + *out = make([]RepositoryFilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleParameters. +func (in *RuleParameters) DeepCopy() *RuleParameters { + if in == nil { + return nil + } + out := new(RuleParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/ecr/v1beta2/zz_generated.managed.go b/apis/ecr/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..15126c3161 --- /dev/null +++ b/apis/ecr/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Repository. +func (mg *Repository) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Repository. +func (mg *Repository) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Repository. +func (mg *Repository) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Repository. +func (mg *Repository) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Repository. +func (mg *Repository) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Repository. +func (mg *Repository) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Repository. +func (mg *Repository) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Repository. +func (mg *Repository) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Repository. +func (mg *Repository) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Repository. +func (mg *Repository) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Repository. +func (mg *Repository) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Repository. +func (mg *Repository) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/ecr/v1beta2/zz_generated.managedlist.go b/apis/ecr/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..b61ad5cfda --- /dev/null +++ b/apis/ecr/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ReplicationConfigurationList. +func (l *ReplicationConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this RepositoryList. +func (l *RepositoryList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/ecr/v1beta2/zz_generated.resolvers.go b/apis/ecr/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..c2ceea5328 --- /dev/null +++ b/apis/ecr/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,73 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Repository) ResolveReferences( // ResolveReferences of this Repository. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.EncryptionConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EncryptionConfiguration[i3].KMSKey), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.EncryptionConfiguration[i3].KMSKeyRef, + Selector: mg.Spec.ForProvider.EncryptionConfiguration[i3].KMSKeySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EncryptionConfiguration[i3].KMSKey") + } + mg.Spec.ForProvider.EncryptionConfiguration[i3].KMSKey = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EncryptionConfiguration[i3].KMSKeyRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.EncryptionConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EncryptionConfiguration[i3].KMSKey), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.EncryptionConfiguration[i3].KMSKeyRef, + Selector: mg.Spec.InitProvider.EncryptionConfiguration[i3].KMSKeySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EncryptionConfiguration[i3].KMSKey") + } + mg.Spec.InitProvider.EncryptionConfiguration[i3].KMSKey = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EncryptionConfiguration[i3].KMSKeyRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/ecr/v1beta2/zz_groupversion_info.go b/apis/ecr/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..bc00a4b6f7 --- /dev/null +++ b/apis/ecr/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=ecr.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "ecr.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/ecr/v1beta2/zz_replicationconfiguration_terraformed.go b/apis/ecr/v1beta2/zz_replicationconfiguration_terraformed.go new file mode 100755 index 0000000000..aae4f5c309 --- /dev/null +++ b/apis/ecr/v1beta2/zz_replicationconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ReplicationConfiguration +func (mg *ReplicationConfiguration) GetTerraformResourceType() string { + return "aws_ecr_replication_configuration" +} + +// GetConnectionDetailsMapping for this ReplicationConfiguration +func (tr *ReplicationConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ReplicationConfiguration +func (tr *ReplicationConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ReplicationConfiguration +func (tr *ReplicationConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ReplicationConfiguration +func (tr *ReplicationConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ReplicationConfiguration +func (tr *ReplicationConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ReplicationConfiguration +func (tr *ReplicationConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ReplicationConfiguration +func (tr *ReplicationConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ReplicationConfiguration +func (tr *ReplicationConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ReplicationConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ReplicationConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &ReplicationConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ReplicationConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ecr/v1beta2/zz_replicationconfiguration_types.go b/apis/ecr/v1beta2/zz_replicationconfiguration_types.go new file mode 100755 index 0000000000..bd33336dab --- /dev/null +++ b/apis/ecr/v1beta2/zz_replicationconfiguration_types.go @@ -0,0 +1,206 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DestinationInitParameters struct { + + // The account ID of the destination registry to replicate to. + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` +} + +type DestinationObservation struct { + + // A Region to replicate to. + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // The account ID of the destination registry to replicate to. + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` +} + +type DestinationParameters struct { + + // A Region to replicate to. + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"region,omitempty"` + + // The account ID of the destination registry to replicate to. + // +kubebuilder:validation:Optional + RegistryID *string `json:"registryId" tf:"registry_id,omitempty"` +} + +type ReplicationConfigurationInitParameters struct { + + // Replication configuration for a registry. See Replication Configuration. + ReplicationConfiguration *ReplicationConfigurationReplicationConfigurationInitParameters `json:"replicationConfiguration,omitempty" tf:"replication_configuration,omitempty"` +} + +type ReplicationConfigurationObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The account ID of the destination registry to replicate to. + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` + + // Replication configuration for a registry. See Replication Configuration. + ReplicationConfiguration *ReplicationConfigurationReplicationConfigurationObservation `json:"replicationConfiguration,omitempty" tf:"replication_configuration,omitempty"` +} + +type ReplicationConfigurationParameters struct { + + // A Region to replicate to. + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Replication configuration for a registry. See Replication Configuration. + // +kubebuilder:validation:Optional + ReplicationConfiguration *ReplicationConfigurationReplicationConfigurationParameters `json:"replicationConfiguration,omitempty" tf:"replication_configuration,omitempty"` +} + +type ReplicationConfigurationReplicationConfigurationInitParameters struct { + + // The replication rules for a replication configuration. A maximum of 10 are allowed per replication_configuration. See Rule + Rule []RuleInitParameters `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type ReplicationConfigurationReplicationConfigurationObservation struct { + + // The replication rules for a replication configuration. A maximum of 10 are allowed per replication_configuration. See Rule + Rule []RuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type ReplicationConfigurationReplicationConfigurationParameters struct { + + // The replication rules for a replication configuration. A maximum of 10 are allowed per replication_configuration. See Rule + // +kubebuilder:validation:Optional + Rule []RuleParameters `json:"rule" tf:"rule,omitempty"` +} + +type RepositoryFilterInitParameters struct { + + // The repository filter details. + Filter *string `json:"filter,omitempty" tf:"filter,omitempty"` + + // The repository filter type. The only supported value is PREFIX_MATCH, which is a repository name prefix specified with the filter parameter. + FilterType *string `json:"filterType,omitempty" tf:"filter_type,omitempty"` +} + +type RepositoryFilterObservation struct { + + // The repository filter details. + Filter *string `json:"filter,omitempty" tf:"filter,omitempty"` + + // The repository filter type. The only supported value is PREFIX_MATCH, which is a repository name prefix specified with the filter parameter. + FilterType *string `json:"filterType,omitempty" tf:"filter_type,omitempty"` +} + +type RepositoryFilterParameters struct { + + // The repository filter details. + // +kubebuilder:validation:Optional + Filter *string `json:"filter" tf:"filter,omitempty"` + + // The repository filter type. The only supported value is PREFIX_MATCH, which is a repository name prefix specified with the filter parameter. + // +kubebuilder:validation:Optional + FilterType *string `json:"filterType" tf:"filter_type,omitempty"` +} + +type RuleInitParameters struct { + + // the details of a replication destination. A maximum of 25 are allowed per rule. See Destination. + Destination []DestinationInitParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + // filters for a replication rule. See Repository Filter. + RepositoryFilter []RepositoryFilterInitParameters `json:"repositoryFilter,omitempty" tf:"repository_filter,omitempty"` +} + +type RuleObservation struct { + + // the details of a replication destination. A maximum of 25 are allowed per rule. See Destination. + Destination []DestinationObservation `json:"destination,omitempty" tf:"destination,omitempty"` + + // filters for a replication rule. See Repository Filter. + RepositoryFilter []RepositoryFilterObservation `json:"repositoryFilter,omitempty" tf:"repository_filter,omitempty"` +} + +type RuleParameters struct { + + // the details of a replication destination. A maximum of 25 are allowed per rule. See Destination. + // +kubebuilder:validation:Optional + Destination []DestinationParameters `json:"destination" tf:"destination,omitempty"` + + // filters for a replication rule. See Repository Filter. + // +kubebuilder:validation:Optional + RepositoryFilter []RepositoryFilterParameters `json:"repositoryFilter,omitempty" tf:"repository_filter,omitempty"` +} + +// ReplicationConfigurationSpec defines the desired state of ReplicationConfiguration +type ReplicationConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ReplicationConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ReplicationConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// ReplicationConfigurationStatus defines the observed state of ReplicationConfiguration. +type ReplicationConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ReplicationConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ReplicationConfiguration is the Schema for the ReplicationConfigurations API. Provides an Elastic Container Registry Replication Configuration. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ReplicationConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ReplicationConfigurationSpec `json:"spec"` + Status ReplicationConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ReplicationConfigurationList contains a list of ReplicationConfigurations +type ReplicationConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ReplicationConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + ReplicationConfiguration_Kind = "ReplicationConfiguration" + ReplicationConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ReplicationConfiguration_Kind}.String() + ReplicationConfiguration_KindAPIVersion = ReplicationConfiguration_Kind + "." + CRDGroupVersion.String() + ReplicationConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(ReplicationConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&ReplicationConfiguration{}, &ReplicationConfigurationList{}) +} diff --git a/apis/ecr/v1beta2/zz_repository_terraformed.go b/apis/ecr/v1beta2/zz_repository_terraformed.go new file mode 100755 index 0000000000..267116ade8 --- /dev/null +++ b/apis/ecr/v1beta2/zz_repository_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Repository +func (mg *Repository) GetTerraformResourceType() string { + return "aws_ecr_repository" +} + +// GetConnectionDetailsMapping for this Repository +func (tr *Repository) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Repository +func (tr *Repository) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Repository +func (tr *Repository) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Repository +func (tr *Repository) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Repository +func (tr *Repository) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Repository +func (tr *Repository) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Repository +func (tr *Repository) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Repository +func (tr *Repository) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Repository using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Repository) LateInitialize(attrs []byte) (bool, error) { + params := &RepositoryParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Repository) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ecr/v1beta2/zz_repository_types.go b/apis/ecr/v1beta2/zz_repository_types.go new file mode 100755 index 0000000000..53dc31ed47 --- /dev/null +++ b/apis/ecr/v1beta2/zz_repository_types.go @@ -0,0 +1,227 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EncryptionConfigurationInitParameters struct { + + // The encryption type to use for the repository. Valid values are AES256 or KMS. Defaults to AES256. + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` + + // The ARN of the KMS key to use when encryption_type is KMS. If not specified, uses the default AWS managed key for ECR. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + KMSKey *string `json:"kmsKey,omitempty" tf:"kms_key,omitempty"` + + // Reference to a Key in kms to populate kmsKey. + // +kubebuilder:validation:Optional + KMSKeyRef *v1.Reference `json:"kmsKeyRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKey. + // +kubebuilder:validation:Optional + KMSKeySelector *v1.Selector `json:"kmsKeySelector,omitempty" tf:"-"` +} + +type EncryptionConfigurationObservation struct { + + // The encryption type to use for the repository. Valid values are AES256 or KMS. Defaults to AES256. + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` + + // The ARN of the KMS key to use when encryption_type is KMS. If not specified, uses the default AWS managed key for ECR. + KMSKey *string `json:"kmsKey,omitempty" tf:"kms_key,omitempty"` +} + +type EncryptionConfigurationParameters struct { + + // The encryption type to use for the repository. Valid values are AES256 or KMS. Defaults to AES256. + // +kubebuilder:validation:Optional + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` + + // The ARN of the KMS key to use when encryption_type is KMS. If not specified, uses the default AWS managed key for ECR. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + KMSKey *string `json:"kmsKey,omitempty" tf:"kms_key,omitempty"` + + // Reference to a Key in kms to populate kmsKey. + // +kubebuilder:validation:Optional + KMSKeyRef *v1.Reference `json:"kmsKeyRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKey. + // +kubebuilder:validation:Optional + KMSKeySelector *v1.Selector `json:"kmsKeySelector,omitempty" tf:"-"` +} + +type ImageScanningConfigurationInitParameters struct { + + // Indicates whether images are scanned after being pushed to the repository (true) or not scanned (false). + ScanOnPush *bool `json:"scanOnPush,omitempty" tf:"scan_on_push,omitempty"` +} + +type ImageScanningConfigurationObservation struct { + + // Indicates whether images are scanned after being pushed to the repository (true) or not scanned (false). + ScanOnPush *bool `json:"scanOnPush,omitempty" tf:"scan_on_push,omitempty"` +} + +type ImageScanningConfigurationParameters struct { + + // Indicates whether images are scanned after being pushed to the repository (true) or not scanned (false). + // +kubebuilder:validation:Optional + ScanOnPush *bool `json:"scanOnPush" tf:"scan_on_push,omitempty"` +} + +type RepositoryInitParameters struct { + + // Encryption configuration for the repository. See below for schema. + EncryptionConfiguration []EncryptionConfigurationInitParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // If true, will delete the repository even if it contains images. + // Defaults to false. + ForceDelete *bool `json:"forceDelete,omitempty" tf:"force_delete,omitempty"` + + // Configuration block that defines image scanning configuration for the repository. By default, image scanning must be manually triggered. See the ECR User Guide for more information about image scanning. + ImageScanningConfiguration *ImageScanningConfigurationInitParameters `json:"imageScanningConfiguration,omitempty" tf:"image_scanning_configuration,omitempty"` + + // The tag mutability setting for the repository. Must be one of: MUTABLE or IMMUTABLE. Defaults to MUTABLE. + ImageTagMutability *string `json:"imageTagMutability,omitempty" tf:"image_tag_mutability,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type RepositoryObservation struct { + + // Full ARN of the repository. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Encryption configuration for the repository. See below for schema. + EncryptionConfiguration []EncryptionConfigurationObservation `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // If true, will delete the repository even if it contains images. + // Defaults to false. + ForceDelete *bool `json:"forceDelete,omitempty" tf:"force_delete,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration block that defines image scanning configuration for the repository. By default, image scanning must be manually triggered. See the ECR User Guide for more information about image scanning. + ImageScanningConfiguration *ImageScanningConfigurationObservation `json:"imageScanningConfiguration,omitempty" tf:"image_scanning_configuration,omitempty"` + + // The tag mutability setting for the repository. Must be one of: MUTABLE or IMMUTABLE. Defaults to MUTABLE. + ImageTagMutability *string `json:"imageTagMutability,omitempty" tf:"image_tag_mutability,omitempty"` + + // The registry ID where the repository was created. + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` + + // The URL of the repository (in the form aws_account_id.dkr.ecr.region.amazonaws.com/repositoryName). + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type RepositoryParameters struct { + + // Encryption configuration for the repository. See below for schema. + // +kubebuilder:validation:Optional + EncryptionConfiguration []EncryptionConfigurationParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // If true, will delete the repository even if it contains images. + // Defaults to false. + // +kubebuilder:validation:Optional + ForceDelete *bool `json:"forceDelete,omitempty" tf:"force_delete,omitempty"` + + // Configuration block that defines image scanning configuration for the repository. By default, image scanning must be manually triggered. See the ECR User Guide for more information about image scanning. + // +kubebuilder:validation:Optional + ImageScanningConfiguration *ImageScanningConfigurationParameters `json:"imageScanningConfiguration,omitempty" tf:"image_scanning_configuration,omitempty"` + + // The tag mutability setting for the repository. Must be one of: MUTABLE or IMMUTABLE. Defaults to MUTABLE. + // +kubebuilder:validation:Optional + ImageTagMutability *string `json:"imageTagMutability,omitempty" tf:"image_tag_mutability,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// RepositorySpec defines the desired state of Repository +type RepositorySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RepositoryParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RepositoryInitParameters `json:"initProvider,omitempty"` +} + +// RepositoryStatus defines the observed state of Repository. +type RepositoryStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RepositoryObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Repository is the Schema for the Repositorys API. Provides an Elastic Container Registry Repository. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Repository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec RepositorySpec `json:"spec"` + Status RepositoryStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RepositoryList contains a list of Repositorys +type RepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Repository `json:"items"` +} + +// Repository type metadata. +var ( + Repository_Kind = "Repository" + Repository_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Repository_Kind}.String() + Repository_KindAPIVersion = Repository_Kind + "." + CRDGroupVersion.String() + Repository_GroupVersionKind = CRDGroupVersion.WithKind(Repository_Kind) +) + +func init() { + SchemeBuilder.Register(&Repository{}, &RepositoryList{}) +} diff --git a/apis/ecrpublic/v1beta1/zz_generated.conversion_hubs.go b/apis/ecrpublic/v1beta1/zz_generated.conversion_hubs.go index a4bc3e0e40..d6731df0ff 100755 --- a/apis/ecrpublic/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/ecrpublic/v1beta1/zz_generated.conversion_hubs.go @@ -6,8 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Repository) Hub() {} - // Hub marks this type as a conversion hub. func (tr *RepositoryPolicy) Hub() {} diff --git a/apis/ecrpublic/v1beta1/zz_generated.conversion_spokes.go b/apis/ecrpublic/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..77c27745aa --- /dev/null +++ b/apis/ecrpublic/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Repository to the hub type. +func (tr *Repository) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Repository type. +func (tr *Repository) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/ecrpublic/v1beta1/zz_generated.resolvers.go b/apis/ecrpublic/v1beta1/zz_generated.resolvers.go index 35e87fd360..5d43b86f42 100644 --- a/apis/ecrpublic/v1beta1/zz_generated.resolvers.go +++ b/apis/ecrpublic/v1beta1/zz_generated.resolvers.go @@ -9,8 +9,9 @@ package v1beta1 import ( "context" reference "github.com/crossplane/crossplane-runtime/pkg/reference" - xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" apisresolver "github.com/upbound/provider-aws/internal/apis" client "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -24,7 +25,7 @@ func (mg *RepositoryPolicy) ResolveReferences( // ResolveReferences of this Repo var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("ecrpublic.aws.upbound.io", "v1beta1", "Repository", "RepositoryList") + m, l, err = apisresolver.GetManagedResource("ecrpublic.aws.upbound.io", "v1beta2", "Repository", "RepositoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -43,7 +44,7 @@ func (mg *RepositoryPolicy) ResolveReferences( // ResolveReferences of this Repo mg.Spec.ForProvider.RepositoryName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RepositoryNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ecrpublic.aws.upbound.io", "v1beta1", "Repository", "RepositoryList") + m, l, err = apisresolver.GetManagedResource("ecrpublic.aws.upbound.io", "v1beta2", "Repository", "RepositoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/ecrpublic/v1beta1/zz_repositorypolicy_types.go b/apis/ecrpublic/v1beta1/zz_repositorypolicy_types.go index a03e4dd0ce..58b2d4ea48 100755 --- a/apis/ecrpublic/v1beta1/zz_repositorypolicy_types.go +++ b/apis/ecrpublic/v1beta1/zz_repositorypolicy_types.go @@ -19,7 +19,7 @@ type RepositoryPolicyInitParameters struct { Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` // Name of the repository to apply the policy. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecrpublic/v1beta1.Repository + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecrpublic/v1beta2.Repository RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` // Reference to a Repository in ecrpublic to populate repositoryName. @@ -56,7 +56,7 @@ type RepositoryPolicyParameters struct { Region *string `json:"region" tf:"-"` // Name of the repository to apply the policy. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecrpublic/v1beta1.Repository + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecrpublic/v1beta2.Repository // +kubebuilder:validation:Optional RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` diff --git a/apis/ecrpublic/v1beta2/zz_generated.conversion_hubs.go b/apis/ecrpublic/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..9c57cde7cb --- /dev/null +++ b/apis/ecrpublic/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Repository) Hub() {} diff --git a/apis/ecrpublic/v1beta2/zz_generated.deepcopy.go b/apis/ecrpublic/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..f6e72d4bd8 --- /dev/null +++ b/apis/ecrpublic/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,442 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogDataInitParameters) DeepCopyInto(out *CatalogDataInitParameters) { + *out = *in + if in.AboutText != nil { + in, out := &in.AboutText, &out.AboutText + *out = new(string) + **out = **in + } + if in.Architectures != nil { + in, out := &in.Architectures, &out.Architectures + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.LogoImageBlob != nil { + in, out := &in.LogoImageBlob, &out.LogoImageBlob + *out = new(string) + **out = **in + } + if in.OperatingSystems != nil { + in, out := &in.OperatingSystems, &out.OperatingSystems + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UsageText != nil { + in, out := &in.UsageText, &out.UsageText + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogDataInitParameters. +func (in *CatalogDataInitParameters) DeepCopy() *CatalogDataInitParameters { + if in == nil { + return nil + } + out := new(CatalogDataInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogDataObservation) DeepCopyInto(out *CatalogDataObservation) { + *out = *in + if in.AboutText != nil { + in, out := &in.AboutText, &out.AboutText + *out = new(string) + **out = **in + } + if in.Architectures != nil { + in, out := &in.Architectures, &out.Architectures + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.LogoImageBlob != nil { + in, out := &in.LogoImageBlob, &out.LogoImageBlob + *out = new(string) + **out = **in + } + if in.OperatingSystems != nil { + in, out := &in.OperatingSystems, &out.OperatingSystems + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UsageText != nil { + in, out := &in.UsageText, &out.UsageText + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogDataObservation. +func (in *CatalogDataObservation) DeepCopy() *CatalogDataObservation { + if in == nil { + return nil + } + out := new(CatalogDataObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogDataParameters) DeepCopyInto(out *CatalogDataParameters) { + *out = *in + if in.AboutText != nil { + in, out := &in.AboutText, &out.AboutText + *out = new(string) + **out = **in + } + if in.Architectures != nil { + in, out := &in.Architectures, &out.Architectures + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.LogoImageBlob != nil { + in, out := &in.LogoImageBlob, &out.LogoImageBlob + *out = new(string) + **out = **in + } + if in.OperatingSystems != nil { + in, out := &in.OperatingSystems, &out.OperatingSystems + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UsageText != nil { + in, out := &in.UsageText, &out.UsageText + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogDataParameters. +func (in *CatalogDataParameters) DeepCopy() *CatalogDataParameters { + if in == nil { + return nil + } + out := new(CatalogDataParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Repository) DeepCopyInto(out *Repository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Repository. +func (in *Repository) DeepCopy() *Repository { + if in == nil { + return nil + } + out := new(Repository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Repository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryInitParameters) DeepCopyInto(out *RepositoryInitParameters) { + *out = *in + if in.CatalogData != nil { + in, out := &in.CatalogData, &out.CatalogData + *out = new(CatalogDataInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryInitParameters. +func (in *RepositoryInitParameters) DeepCopy() *RepositoryInitParameters { + if in == nil { + return nil + } + out := new(RepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryList) DeepCopyInto(out *RepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Repository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryList. +func (in *RepositoryList) DeepCopy() *RepositoryList { + if in == nil { + return nil + } + out := new(RepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryObservation) DeepCopyInto(out *RepositoryObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CatalogData != nil { + in, out := &in.CatalogData, &out.CatalogData + *out = new(CatalogDataObservation) + (*in).DeepCopyInto(*out) + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.RepositoryURI != nil { + in, out := &in.RepositoryURI, &out.RepositoryURI + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryObservation. +func (in *RepositoryObservation) DeepCopy() *RepositoryObservation { + if in == nil { + return nil + } + out := new(RepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryParameters) DeepCopyInto(out *RepositoryParameters) { + *out = *in + if in.CatalogData != nil { + in, out := &in.CatalogData, &out.CatalogData + *out = new(CatalogDataParameters) + (*in).DeepCopyInto(*out) + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryParameters. +func (in *RepositoryParameters) DeepCopy() *RepositoryParameters { + if in == nil { + return nil + } + out := new(RepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositorySpec) DeepCopyInto(out *RepositorySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositorySpec. +func (in *RepositorySpec) DeepCopy() *RepositorySpec { + if in == nil { + return nil + } + out := new(RepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryStatus) DeepCopyInto(out *RepositoryStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryStatus. +func (in *RepositoryStatus) DeepCopy() *RepositoryStatus { + if in == nil { + return nil + } + out := new(RepositoryStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/ecrpublic/v1beta2/zz_generated.managed.go b/apis/ecrpublic/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..663f8a4ca7 --- /dev/null +++ b/apis/ecrpublic/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Repository. +func (mg *Repository) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Repository. +func (mg *Repository) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Repository. +func (mg *Repository) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Repository. +func (mg *Repository) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Repository. +func (mg *Repository) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Repository. +func (mg *Repository) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Repository. +func (mg *Repository) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Repository. +func (mg *Repository) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Repository. +func (mg *Repository) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Repository. +func (mg *Repository) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Repository. +func (mg *Repository) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Repository. +func (mg *Repository) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/ecrpublic/v1beta2/zz_generated.managedlist.go b/apis/ecrpublic/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..3e36de4066 --- /dev/null +++ b/apis/ecrpublic/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this RepositoryList. +func (l *RepositoryList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/ecrpublic/v1beta2/zz_groupversion_info.go b/apis/ecrpublic/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..449900207f --- /dev/null +++ b/apis/ecrpublic/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=ecrpublic.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "ecrpublic.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/ecrpublic/v1beta2/zz_repository_terraformed.go b/apis/ecrpublic/v1beta2/zz_repository_terraformed.go new file mode 100755 index 0000000000..f985aefe0c --- /dev/null +++ b/apis/ecrpublic/v1beta2/zz_repository_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Repository +func (mg *Repository) GetTerraformResourceType() string { + return "aws_ecrpublic_repository" +} + +// GetConnectionDetailsMapping for this Repository +func (tr *Repository) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Repository +func (tr *Repository) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Repository +func (tr *Repository) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Repository +func (tr *Repository) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Repository +func (tr *Repository) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Repository +func (tr *Repository) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Repository +func (tr *Repository) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Repository +func (tr *Repository) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Repository using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Repository) LateInitialize(attrs []byte) (bool, error) { + params := &RepositoryParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Repository) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ecrpublic/v1beta2/zz_repository_types.go b/apis/ecrpublic/v1beta2/zz_repository_types.go new file mode 100755 index 0000000000..6eb1ee5bb0 --- /dev/null +++ b/apis/ecrpublic/v1beta2/zz_repository_types.go @@ -0,0 +1,209 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CatalogDataInitParameters struct { + + // A detailed description of the contents of the repository. It is publicly visible in the Amazon ECR Public Gallery. The text must be in markdown format. + AboutText *string `json:"aboutText,omitempty" tf:"about_text,omitempty"` + + // The system architecture that the images in the repository are compatible with. On the Amazon ECR Public Gallery, the following supported architectures will appear as badges on the repository and are used as search filters: ARM, ARM 64, x86, x86-64 + // +listType=set + Architectures []*string `json:"architectures,omitempty" tf:"architectures,omitempty"` + + // A short description of the contents of the repository. This text appears in both the image details and also when searching for repositories on the Amazon ECR Public Gallery. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The base64-encoded repository logo payload. (Only visible for verified accounts) Note that drift detection is disabled for this attribute. + LogoImageBlob *string `json:"logoImageBlob,omitempty" tf:"logo_image_blob,omitempty"` + + // The operating systems that the images in the repository are compatible with. On the Amazon ECR Public Gallery, the following supported operating systems will appear as badges on the repository and are used as search filters: Linux, Windows + // +listType=set + OperatingSystems []*string `json:"operatingSystems,omitempty" tf:"operating_systems,omitempty"` + + // Detailed information on how to use the contents of the repository. It is publicly visible in the Amazon ECR Public Gallery. The usage text provides context, support information, and additional usage details for users of the repository. The text must be in markdown format. + UsageText *string `json:"usageText,omitempty" tf:"usage_text,omitempty"` +} + +type CatalogDataObservation struct { + + // A detailed description of the contents of the repository. It is publicly visible in the Amazon ECR Public Gallery. The text must be in markdown format. + AboutText *string `json:"aboutText,omitempty" tf:"about_text,omitempty"` + + // The system architecture that the images in the repository are compatible with. On the Amazon ECR Public Gallery, the following supported architectures will appear as badges on the repository and are used as search filters: ARM, ARM 64, x86, x86-64 + // +listType=set + Architectures []*string `json:"architectures,omitempty" tf:"architectures,omitempty"` + + // A short description of the contents of the repository. This text appears in both the image details and also when searching for repositories on the Amazon ECR Public Gallery. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The base64-encoded repository logo payload. (Only visible for verified accounts) Note that drift detection is disabled for this attribute. + LogoImageBlob *string `json:"logoImageBlob,omitempty" tf:"logo_image_blob,omitempty"` + + // The operating systems that the images in the repository are compatible with. On the Amazon ECR Public Gallery, the following supported operating systems will appear as badges on the repository and are used as search filters: Linux, Windows + // +listType=set + OperatingSystems []*string `json:"operatingSystems,omitempty" tf:"operating_systems,omitempty"` + + // Detailed information on how to use the contents of the repository. It is publicly visible in the Amazon ECR Public Gallery. The usage text provides context, support information, and additional usage details for users of the repository. The text must be in markdown format. + UsageText *string `json:"usageText,omitempty" tf:"usage_text,omitempty"` +} + +type CatalogDataParameters struct { + + // A detailed description of the contents of the repository. It is publicly visible in the Amazon ECR Public Gallery. The text must be in markdown format. + // +kubebuilder:validation:Optional + AboutText *string `json:"aboutText,omitempty" tf:"about_text,omitempty"` + + // The system architecture that the images in the repository are compatible with. On the Amazon ECR Public Gallery, the following supported architectures will appear as badges on the repository and are used as search filters: ARM, ARM 64, x86, x86-64 + // +kubebuilder:validation:Optional + // +listType=set + Architectures []*string `json:"architectures,omitempty" tf:"architectures,omitempty"` + + // A short description of the contents of the repository. This text appears in both the image details and also when searching for repositories on the Amazon ECR Public Gallery. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The base64-encoded repository logo payload. (Only visible for verified accounts) Note that drift detection is disabled for this attribute. + // +kubebuilder:validation:Optional + LogoImageBlob *string `json:"logoImageBlob,omitempty" tf:"logo_image_blob,omitempty"` + + // The operating systems that the images in the repository are compatible with. On the Amazon ECR Public Gallery, the following supported operating systems will appear as badges on the repository and are used as search filters: Linux, Windows + // +kubebuilder:validation:Optional + // +listType=set + OperatingSystems []*string `json:"operatingSystems,omitempty" tf:"operating_systems,omitempty"` + + // Detailed information on how to use the contents of the repository. It is publicly visible in the Amazon ECR Public Gallery. The usage text provides context, support information, and additional usage details for users of the repository. The text must be in markdown format. + // +kubebuilder:validation:Optional + UsageText *string `json:"usageText,omitempty" tf:"usage_text,omitempty"` +} + +type RepositoryInitParameters struct { + + // Catalog data configuration for the repository. See below for schema. + CatalogData *CatalogDataInitParameters `json:"catalogData,omitempty" tf:"catalog_data,omitempty"` + + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type RepositoryObservation struct { + + // Full ARN of the repository. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Catalog data configuration for the repository. See below for schema. + CatalogData *CatalogDataObservation `json:"catalogData,omitempty" tf:"catalog_data,omitempty"` + + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // The repository name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The registry ID where the repository was created. + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` + + // The URI of the repository. + RepositoryURI *string `json:"repositoryUri,omitempty" tf:"repository_uri,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type RepositoryParameters struct { + + // Catalog data configuration for the repository. See below for schema. + // +kubebuilder:validation:Optional + CatalogData *CatalogDataParameters `json:"catalogData,omitempty" tf:"catalog_data,omitempty"` + + // +kubebuilder:validation:Optional + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// RepositorySpec defines the desired state of Repository +type RepositorySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RepositoryParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RepositoryInitParameters `json:"initProvider,omitempty"` +} + +// RepositoryStatus defines the observed state of Repository. +type RepositoryStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RepositoryObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Repository is the Schema for the Repositorys API. Provides a Public Elastic Container Registry Repository. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Repository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec RepositorySpec `json:"spec"` + Status RepositoryStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RepositoryList contains a list of Repositorys +type RepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Repository `json:"items"` +} + +// Repository type metadata. +var ( + Repository_Kind = "Repository" + Repository_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Repository_Kind}.String() + Repository_KindAPIVersion = Repository_Kind + "." + CRDGroupVersion.String() + Repository_GroupVersionKind = CRDGroupVersion.WithKind(Repository_Kind) +) + +func init() { + SchemeBuilder.Register(&Repository{}, &RepositoryList{}) +} diff --git a/apis/ecs/v1beta1/zz_clustercapacityproviders_types.go b/apis/ecs/v1beta1/zz_clustercapacityproviders_types.go index 0cb8a6fbbe..7f2da95b44 100755 --- a/apis/ecs/v1beta1/zz_clustercapacityproviders_types.go +++ b/apis/ecs/v1beta1/zz_clustercapacityproviders_types.go @@ -20,7 +20,7 @@ type ClusterCapacityProvidersInitParameters struct { CapacityProviders []*string `json:"capacityProviders,omitempty" tf:"capacity_providers,omitempty"` // Name of the ECS cluster to manage capacity providers for. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecs/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecs/v1beta2.Cluster ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` // Reference to a Cluster in ecs to populate clusterName. @@ -59,7 +59,7 @@ type ClusterCapacityProvidersParameters struct { CapacityProviders []*string `json:"capacityProviders,omitempty" tf:"capacity_providers,omitempty"` // Name of the ECS cluster to manage capacity providers for. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecs/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecs/v1beta2.Cluster // +kubebuilder:validation:Optional ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` diff --git a/apis/ecs/v1beta1/zz_generated.conversion_hubs.go b/apis/ecs/v1beta1/zz_generated.conversion_hubs.go index b13e0d443b..a443b008d3 100755 --- a/apis/ecs/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/ecs/v1beta1/zz_generated.conversion_hubs.go @@ -9,17 +9,5 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *AccountSettingDefault) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *CapacityProvider) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Cluster) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ClusterCapacityProviders) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Service) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *TaskDefinition) Hub() {} diff --git a/apis/ecs/v1beta1/zz_generated.conversion_spokes.go b/apis/ecs/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..e7066d273b --- /dev/null +++ b/apis/ecs/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,94 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this CapacityProvider to the hub type. +func (tr *CapacityProvider) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CapacityProvider type. +func (tr *CapacityProvider) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Cluster to the hub type. +func (tr *Cluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Cluster type. +func (tr *Cluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Service to the hub type. +func (tr *Service) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Service type. +func (tr *Service) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this TaskDefinition to the hub type. +func (tr *TaskDefinition) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the TaskDefinition type. +func (tr *TaskDefinition) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/ecs/v1beta1/zz_generated.resolvers.go b/apis/ecs/v1beta1/zz_generated.resolvers.go index 74713de984..7ca0c3b11f 100644 --- a/apis/ecs/v1beta1/zz_generated.resolvers.go +++ b/apis/ecs/v1beta1/zz_generated.resolvers.go @@ -81,7 +81,7 @@ func (mg *ClusterCapacityProviders) ResolveReferences(ctx context.Context, c cli var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("ecs.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("ecs.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -100,7 +100,7 @@ func (mg *ClusterCapacityProviders) ResolveReferences(ctx context.Context, c cli mg.Spec.ForProvider.ClusterName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ClusterNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ecs.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("ecs.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/ecs/v1beta2/zz_capacityprovider_terraformed.go b/apis/ecs/v1beta2/zz_capacityprovider_terraformed.go new file mode 100755 index 0000000000..5438bf48ce --- /dev/null +++ b/apis/ecs/v1beta2/zz_capacityprovider_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CapacityProvider +func (mg *CapacityProvider) GetTerraformResourceType() string { + return "aws_ecs_capacity_provider" +} + +// GetConnectionDetailsMapping for this CapacityProvider +func (tr *CapacityProvider) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CapacityProvider +func (tr *CapacityProvider) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CapacityProvider +func (tr *CapacityProvider) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CapacityProvider +func (tr *CapacityProvider) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CapacityProvider +func (tr *CapacityProvider) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CapacityProvider +func (tr *CapacityProvider) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CapacityProvider +func (tr *CapacityProvider) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CapacityProvider +func (tr *CapacityProvider) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CapacityProvider using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CapacityProvider) LateInitialize(attrs []byte) (bool, error) { + params := &CapacityProviderParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CapacityProvider) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ecs/v1beta2/zz_capacityprovider_types.go b/apis/ecs/v1beta2/zz_capacityprovider_types.go new file mode 100755 index 0000000000..3b4ecb64fe --- /dev/null +++ b/apis/ecs/v1beta2/zz_capacityprovider_types.go @@ -0,0 +1,250 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AutoScalingGroupProviderInitParameters struct { + + // - ARN of the associated auto scaling group. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/autoscaling/v1beta3.AutoscalingGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + AutoScalingGroupArn *string `json:"autoScalingGroupArn,omitempty" tf:"auto_scaling_group_arn,omitempty"` + + // Reference to a AutoscalingGroup in autoscaling to populate autoScalingGroupArn. + // +kubebuilder:validation:Optional + AutoScalingGroupArnRef *v1.Reference `json:"autoScalingGroupArnRef,omitempty" tf:"-"` + + // Selector for a AutoscalingGroup in autoscaling to populate autoScalingGroupArn. + // +kubebuilder:validation:Optional + AutoScalingGroupArnSelector *v1.Selector `json:"autoScalingGroupArnSelector,omitempty" tf:"-"` + + // - Enables or disables a graceful shutdown of instances without disturbing workloads. Valid values are ENABLED and DISABLED. The default value is ENABLED when a capacity provider is created. + ManagedDraining *string `json:"managedDraining,omitempty" tf:"managed_draining,omitempty"` + + // - Configuration block defining the parameters of the auto scaling. Detailed below. + ManagedScaling *ManagedScalingInitParameters `json:"managedScaling,omitempty" tf:"managed_scaling,omitempty"` + + // - Enables or disables container-aware termination of instances in the auto scaling group when scale-in happens. Valid values are ENABLED and DISABLED. + ManagedTerminationProtection *string `json:"managedTerminationProtection,omitempty" tf:"managed_termination_protection,omitempty"` +} + +type AutoScalingGroupProviderObservation struct { + + // - ARN of the associated auto scaling group. + AutoScalingGroupArn *string `json:"autoScalingGroupArn,omitempty" tf:"auto_scaling_group_arn,omitempty"` + + // - Enables or disables a graceful shutdown of instances without disturbing workloads. Valid values are ENABLED and DISABLED. The default value is ENABLED when a capacity provider is created. + ManagedDraining *string `json:"managedDraining,omitempty" tf:"managed_draining,omitempty"` + + // - Configuration block defining the parameters of the auto scaling. Detailed below. + ManagedScaling *ManagedScalingObservation `json:"managedScaling,omitempty" tf:"managed_scaling,omitempty"` + + // - Enables or disables container-aware termination of instances in the auto scaling group when scale-in happens. Valid values are ENABLED and DISABLED. + ManagedTerminationProtection *string `json:"managedTerminationProtection,omitempty" tf:"managed_termination_protection,omitempty"` +} + +type AutoScalingGroupProviderParameters struct { + + // - ARN of the associated auto scaling group. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/autoscaling/v1beta3.AutoscalingGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + AutoScalingGroupArn *string `json:"autoScalingGroupArn,omitempty" tf:"auto_scaling_group_arn,omitempty"` + + // Reference to a AutoscalingGroup in autoscaling to populate autoScalingGroupArn. + // +kubebuilder:validation:Optional + AutoScalingGroupArnRef *v1.Reference `json:"autoScalingGroupArnRef,omitempty" tf:"-"` + + // Selector for a AutoscalingGroup in autoscaling to populate autoScalingGroupArn. + // +kubebuilder:validation:Optional + AutoScalingGroupArnSelector *v1.Selector `json:"autoScalingGroupArnSelector,omitempty" tf:"-"` + + // - Enables or disables a graceful shutdown of instances without disturbing workloads. Valid values are ENABLED and DISABLED. The default value is ENABLED when a capacity provider is created. + // +kubebuilder:validation:Optional + ManagedDraining *string `json:"managedDraining,omitempty" tf:"managed_draining,omitempty"` + + // - Configuration block defining the parameters of the auto scaling. Detailed below. + // +kubebuilder:validation:Optional + ManagedScaling *ManagedScalingParameters `json:"managedScaling,omitempty" tf:"managed_scaling,omitempty"` + + // - Enables or disables container-aware termination of instances in the auto scaling group when scale-in happens. Valid values are ENABLED and DISABLED. + // +kubebuilder:validation:Optional + ManagedTerminationProtection *string `json:"managedTerminationProtection,omitempty" tf:"managed_termination_protection,omitempty"` +} + +type CapacityProviderInitParameters struct { + + // Configuration block for the provider for the ECS auto scaling group. Detailed below. + AutoScalingGroupProvider *AutoScalingGroupProviderInitParameters `json:"autoScalingGroupProvider,omitempty" tf:"auto_scaling_group_provider,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type CapacityProviderObservation struct { + + // ARN that identifies the capacity provider. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Configuration block for the provider for the ECS auto scaling group. Detailed below. + AutoScalingGroupProvider *AutoScalingGroupProviderObservation `json:"autoScalingGroupProvider,omitempty" tf:"auto_scaling_group_provider,omitempty"` + + // ARN that identifies the capacity provider. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type CapacityProviderParameters struct { + + // Configuration block for the provider for the ECS auto scaling group. Detailed below. + // +kubebuilder:validation:Optional + AutoScalingGroupProvider *AutoScalingGroupProviderParameters `json:"autoScalingGroupProvider,omitempty" tf:"auto_scaling_group_provider,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ManagedScalingInitParameters struct { + + // Period of time, in seconds, after a newly launched Amazon EC2 instance can contribute to CloudWatch metrics for Auto Scaling group. If this parameter is omitted, the default value of 300 seconds is used. + InstanceWarmupPeriod *float64 `json:"instanceWarmupPeriod,omitempty" tf:"instance_warmup_period,omitempty"` + + // Maximum step adjustment size. A number between 1 and 10,000. + MaximumScalingStepSize *float64 `json:"maximumScalingStepSize,omitempty" tf:"maximum_scaling_step_size,omitempty"` + + // Minimum step adjustment size. A number between 1 and 10,000. + MinimumScalingStepSize *float64 `json:"minimumScalingStepSize,omitempty" tf:"minimum_scaling_step_size,omitempty"` + + // Whether auto scaling is managed by ECS. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Target utilization for the capacity provider. A number between 1 and 100. + TargetCapacity *float64 `json:"targetCapacity,omitempty" tf:"target_capacity,omitempty"` +} + +type ManagedScalingObservation struct { + + // Period of time, in seconds, after a newly launched Amazon EC2 instance can contribute to CloudWatch metrics for Auto Scaling group. If this parameter is omitted, the default value of 300 seconds is used. + InstanceWarmupPeriod *float64 `json:"instanceWarmupPeriod,omitempty" tf:"instance_warmup_period,omitempty"` + + // Maximum step adjustment size. A number between 1 and 10,000. + MaximumScalingStepSize *float64 `json:"maximumScalingStepSize,omitempty" tf:"maximum_scaling_step_size,omitempty"` + + // Minimum step adjustment size. A number between 1 and 10,000. + MinimumScalingStepSize *float64 `json:"minimumScalingStepSize,omitempty" tf:"minimum_scaling_step_size,omitempty"` + + // Whether auto scaling is managed by ECS. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Target utilization for the capacity provider. A number between 1 and 100. + TargetCapacity *float64 `json:"targetCapacity,omitempty" tf:"target_capacity,omitempty"` +} + +type ManagedScalingParameters struct { + + // Period of time, in seconds, after a newly launched Amazon EC2 instance can contribute to CloudWatch metrics for Auto Scaling group. If this parameter is omitted, the default value of 300 seconds is used. + // +kubebuilder:validation:Optional + InstanceWarmupPeriod *float64 `json:"instanceWarmupPeriod,omitempty" tf:"instance_warmup_period,omitempty"` + + // Maximum step adjustment size. A number between 1 and 10,000. + // +kubebuilder:validation:Optional + MaximumScalingStepSize *float64 `json:"maximumScalingStepSize,omitempty" tf:"maximum_scaling_step_size,omitempty"` + + // Minimum step adjustment size. A number between 1 and 10,000. + // +kubebuilder:validation:Optional + MinimumScalingStepSize *float64 `json:"minimumScalingStepSize,omitempty" tf:"minimum_scaling_step_size,omitempty"` + + // Whether auto scaling is managed by ECS. Valid values are ENABLED and DISABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Target utilization for the capacity provider. A number between 1 and 100. + // +kubebuilder:validation:Optional + TargetCapacity *float64 `json:"targetCapacity,omitempty" tf:"target_capacity,omitempty"` +} + +// CapacityProviderSpec defines the desired state of CapacityProvider +type CapacityProviderSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CapacityProviderParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CapacityProviderInitParameters `json:"initProvider,omitempty"` +} + +// CapacityProviderStatus defines the observed state of CapacityProvider. +type CapacityProviderStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CapacityProviderObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CapacityProvider is the Schema for the CapacityProviders API. Provides an ECS cluster capacity provider. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type CapacityProvider struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.autoScalingGroupProvider) || (has(self.initProvider) && has(self.initProvider.autoScalingGroupProvider))",message="spec.forProvider.autoScalingGroupProvider is a required parameter" + Spec CapacityProviderSpec `json:"spec"` + Status CapacityProviderStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CapacityProviderList contains a list of CapacityProviders +type CapacityProviderList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CapacityProvider `json:"items"` +} + +// Repository type metadata. +var ( + CapacityProvider_Kind = "CapacityProvider" + CapacityProvider_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CapacityProvider_Kind}.String() + CapacityProvider_KindAPIVersion = CapacityProvider_Kind + "." + CRDGroupVersion.String() + CapacityProvider_GroupVersionKind = CRDGroupVersion.WithKind(CapacityProvider_Kind) +) + +func init() { + SchemeBuilder.Register(&CapacityProvider{}, &CapacityProviderList{}) +} diff --git a/apis/ecs/v1beta2/zz_cluster_terraformed.go b/apis/ecs/v1beta2/zz_cluster_terraformed.go new file mode 100755 index 0000000000..6dfca669bd --- /dev/null +++ b/apis/ecs/v1beta2/zz_cluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Cluster +func (mg *Cluster) GetTerraformResourceType() string { + return "aws_ecs_cluster" +} + +// GetConnectionDetailsMapping for this Cluster +func (tr *Cluster) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Cluster +func (tr *Cluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Cluster +func (tr *Cluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Cluster +func (tr *Cluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Cluster +func (tr *Cluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Cluster +func (tr *Cluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Cluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Cluster) LateInitialize(attrs []byte) (bool, error) { + params := &ClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Cluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ecs/v1beta2/zz_cluster_types.go b/apis/ecs/v1beta2/zz_cluster_types.go new file mode 100755 index 0000000000..d94aa439b2 --- /dev/null +++ b/apis/ecs/v1beta2/zz_cluster_types.go @@ -0,0 +1,306 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ClusterInitParameters struct { + + // The execute command configuration for the cluster. Detailed below. + Configuration *ConfigurationInitParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // Configures a default Service Connect namespace. Detailed below. + ServiceConnectDefaults *ServiceConnectDefaultsInitParameters `json:"serviceConnectDefaults,omitempty" tf:"service_connect_defaults,omitempty"` + + // Configuration block(s) with cluster settings. For example, this can be used to enable CloudWatch Container Insights for a cluster. Detailed below. + Setting []SettingInitParameters `json:"setting,omitempty" tf:"setting,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ClusterObservation struct { + + // ARN that identifies the cluster. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The execute command configuration for the cluster. Detailed below. + Configuration *ConfigurationObservation `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // ARN that identifies the cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configures a default Service Connect namespace. Detailed below. + ServiceConnectDefaults *ServiceConnectDefaultsObservation `json:"serviceConnectDefaults,omitempty" tf:"service_connect_defaults,omitempty"` + + // Configuration block(s) with cluster settings. For example, this can be used to enable CloudWatch Container Insights for a cluster. Detailed below. + Setting []SettingObservation `json:"setting,omitempty" tf:"setting,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type ClusterParameters struct { + + // The execute command configuration for the cluster. Detailed below. + // +kubebuilder:validation:Optional + Configuration *ConfigurationParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configures a default Service Connect namespace. Detailed below. + // +kubebuilder:validation:Optional + ServiceConnectDefaults *ServiceConnectDefaultsParameters `json:"serviceConnectDefaults,omitempty" tf:"service_connect_defaults,omitempty"` + + // Configuration block(s) with cluster settings. For example, this can be used to enable CloudWatch Container Insights for a cluster. Detailed below. + // +kubebuilder:validation:Optional + Setting []SettingParameters `json:"setting,omitempty" tf:"setting,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ConfigurationInitParameters struct { + + // The details of the execute command configuration. Detailed below. + ExecuteCommandConfiguration *ExecuteCommandConfigurationInitParameters `json:"executeCommandConfiguration,omitempty" tf:"execute_command_configuration,omitempty"` +} + +type ConfigurationObservation struct { + + // The details of the execute command configuration. Detailed below. + ExecuteCommandConfiguration *ExecuteCommandConfigurationObservation `json:"executeCommandConfiguration,omitempty" tf:"execute_command_configuration,omitempty"` +} + +type ConfigurationParameters struct { + + // The details of the execute command configuration. Detailed below. + // +kubebuilder:validation:Optional + ExecuteCommandConfiguration *ExecuteCommandConfigurationParameters `json:"executeCommandConfiguration,omitempty" tf:"execute_command_configuration,omitempty"` +} + +type ExecuteCommandConfigurationInitParameters struct { + + // The AWS Key Management Service key ID to encrypt the data between the local client and the container. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The log configuration for the results of the execute command actions Required when logging is OVERRIDE. Detailed below. + LogConfiguration *LogConfigurationInitParameters `json:"logConfiguration,omitempty" tf:"log_configuration,omitempty"` + + // The log setting to use for redirecting logs for your execute command results. Valid values are NONE, DEFAULT, and OVERRIDE. + Logging *string `json:"logging,omitempty" tf:"logging,omitempty"` +} + +type ExecuteCommandConfigurationObservation struct { + + // The AWS Key Management Service key ID to encrypt the data between the local client and the container. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The log configuration for the results of the execute command actions Required when logging is OVERRIDE. Detailed below. + LogConfiguration *LogConfigurationObservation `json:"logConfiguration,omitempty" tf:"log_configuration,omitempty"` + + // The log setting to use for redirecting logs for your execute command results. Valid values are NONE, DEFAULT, and OVERRIDE. + Logging *string `json:"logging,omitempty" tf:"logging,omitempty"` +} + +type ExecuteCommandConfigurationParameters struct { + + // The AWS Key Management Service key ID to encrypt the data between the local client and the container. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The log configuration for the results of the execute command actions Required when logging is OVERRIDE. Detailed below. + // +kubebuilder:validation:Optional + LogConfiguration *LogConfigurationParameters `json:"logConfiguration,omitempty" tf:"log_configuration,omitempty"` + + // The log setting to use for redirecting logs for your execute command results. Valid values are NONE, DEFAULT, and OVERRIDE. + // +kubebuilder:validation:Optional + Logging *string `json:"logging,omitempty" tf:"logging,omitempty"` +} + +type LogConfigurationInitParameters struct { + + // Whether or not to enable encryption on the CloudWatch logs. If not specified, encryption will be disabled. + CloudWatchEncryptionEnabled *bool `json:"cloudWatchEncryptionEnabled,omitempty" tf:"cloud_watch_encryption_enabled,omitempty"` + + // The name of the CloudWatch log group to send logs to. + CloudWatchLogGroupName *string `json:"cloudWatchLogGroupName,omitempty" tf:"cloud_watch_log_group_name,omitempty"` + + // Whether or not to enable encryption on the logs sent to S3. If not specified, encryption will be disabled. + S3BucketEncryptionEnabled *bool `json:"s3BucketEncryptionEnabled,omitempty" tf:"s3_bucket_encryption_enabled,omitempty"` + + // The name of the S3 bucket to send logs to. + S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` + + // An optional folder in the S3 bucket to place logs in. + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` +} + +type LogConfigurationObservation struct { + + // Whether or not to enable encryption on the CloudWatch logs. If not specified, encryption will be disabled. + CloudWatchEncryptionEnabled *bool `json:"cloudWatchEncryptionEnabled,omitempty" tf:"cloud_watch_encryption_enabled,omitempty"` + + // The name of the CloudWatch log group to send logs to. + CloudWatchLogGroupName *string `json:"cloudWatchLogGroupName,omitempty" tf:"cloud_watch_log_group_name,omitempty"` + + // Whether or not to enable encryption on the logs sent to S3. If not specified, encryption will be disabled. + S3BucketEncryptionEnabled *bool `json:"s3BucketEncryptionEnabled,omitempty" tf:"s3_bucket_encryption_enabled,omitempty"` + + // The name of the S3 bucket to send logs to. + S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` + + // An optional folder in the S3 bucket to place logs in. + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` +} + +type LogConfigurationParameters struct { + + // Whether or not to enable encryption on the CloudWatch logs. If not specified, encryption will be disabled. + // +kubebuilder:validation:Optional + CloudWatchEncryptionEnabled *bool `json:"cloudWatchEncryptionEnabled,omitempty" tf:"cloud_watch_encryption_enabled,omitempty"` + + // The name of the CloudWatch log group to send logs to. + // +kubebuilder:validation:Optional + CloudWatchLogGroupName *string `json:"cloudWatchLogGroupName,omitempty" tf:"cloud_watch_log_group_name,omitempty"` + + // Whether or not to enable encryption on the logs sent to S3. If not specified, encryption will be disabled. + // +kubebuilder:validation:Optional + S3BucketEncryptionEnabled *bool `json:"s3BucketEncryptionEnabled,omitempty" tf:"s3_bucket_encryption_enabled,omitempty"` + + // The name of the S3 bucket to send logs to. + // +kubebuilder:validation:Optional + S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` + + // An optional folder in the S3 bucket to place logs in. + // +kubebuilder:validation:Optional + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` +} + +type ServiceConnectDefaultsInitParameters struct { + + // The ARN of the aws_service_discovery_http_namespace that's used when you create a service and don't specify a Service Connect configuration. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` +} + +type ServiceConnectDefaultsObservation struct { + + // The ARN of the aws_service_discovery_http_namespace that's used when you create a service and don't specify a Service Connect configuration. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` +} + +type ServiceConnectDefaultsParameters struct { + + // The ARN of the aws_service_discovery_http_namespace that's used when you create a service and don't specify a Service Connect configuration. + // +kubebuilder:validation:Optional + Namespace *string `json:"namespace" tf:"namespace,omitempty"` +} + +type SettingInitParameters struct { + + // Name of the setting to manage. Valid values: containerInsights. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value to assign to the setting. Valid values are enabled and disabled. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type SettingObservation struct { + + // Name of the setting to manage. Valid values: containerInsights. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value to assign to the setting. Valid values are enabled and disabled. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type SettingParameters struct { + + // Name of the setting to manage. Valid values: containerInsights. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value to assign to the setting. Valid values are enabled and disabled. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +// ClusterSpec defines the desired state of Cluster +type ClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ClusterInitParameters `json:"initProvider,omitempty"` +} + +// ClusterStatus defines the observed state of Cluster. +type ClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Cluster is the Schema for the Clusters API. Provides an ECS cluster. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ClusterSpec `json:"spec"` + Status ClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterList contains a list of Clusters +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +// Repository type metadata. +var ( + Cluster_Kind = "Cluster" + Cluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Cluster_Kind}.String() + Cluster_KindAPIVersion = Cluster_Kind + "." + CRDGroupVersion.String() + Cluster_GroupVersionKind = CRDGroupVersion.WithKind(Cluster_Kind) +) + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/apis/ecs/v1beta2/zz_generated.conversion_hubs.go b/apis/ecs/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..e3dfe6adde --- /dev/null +++ b/apis/ecs/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,19 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *CapacityProvider) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Cluster) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Service) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *TaskDefinition) Hub() {} diff --git a/apis/ecs/v1beta2/zz_generated.deepcopy.go b/apis/ecs/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..ed20615f25 --- /dev/null +++ b/apis/ecs/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,5151 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlarmsInitParameters) DeepCopyInto(out *AlarmsInitParameters) { + *out = *in + if in.AlarmNames != nil { + in, out := &in.AlarmNames, &out.AlarmNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Rollback != nil { + in, out := &in.Rollback, &out.Rollback + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlarmsInitParameters. +func (in *AlarmsInitParameters) DeepCopy() *AlarmsInitParameters { + if in == nil { + return nil + } + out := new(AlarmsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlarmsObservation) DeepCopyInto(out *AlarmsObservation) { + *out = *in + if in.AlarmNames != nil { + in, out := &in.AlarmNames, &out.AlarmNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Rollback != nil { + in, out := &in.Rollback, &out.Rollback + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlarmsObservation. +func (in *AlarmsObservation) DeepCopy() *AlarmsObservation { + if in == nil { + return nil + } + out := new(AlarmsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlarmsParameters) DeepCopyInto(out *AlarmsParameters) { + *out = *in + if in.AlarmNames != nil { + in, out := &in.AlarmNames, &out.AlarmNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Rollback != nil { + in, out := &in.Rollback, &out.Rollback + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlarmsParameters. +func (in *AlarmsParameters) DeepCopy() *AlarmsParameters { + if in == nil { + return nil + } + out := new(AlarmsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationConfigInitParameters) DeepCopyInto(out *AuthorizationConfigInitParameters) { + *out = *in + if in.AccessPointID != nil { + in, out := &in.AccessPointID, &out.AccessPointID + *out = new(string) + **out = **in + } + if in.IAM != nil { + in, out := &in.IAM, &out.IAM + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfigInitParameters. +func (in *AuthorizationConfigInitParameters) DeepCopy() *AuthorizationConfigInitParameters { + if in == nil { + return nil + } + out := new(AuthorizationConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationConfigObservation) DeepCopyInto(out *AuthorizationConfigObservation) { + *out = *in + if in.AccessPointID != nil { + in, out := &in.AccessPointID, &out.AccessPointID + *out = new(string) + **out = **in + } + if in.IAM != nil { + in, out := &in.IAM, &out.IAM + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfigObservation. +func (in *AuthorizationConfigObservation) DeepCopy() *AuthorizationConfigObservation { + if in == nil { + return nil + } + out := new(AuthorizationConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationConfigParameters) DeepCopyInto(out *AuthorizationConfigParameters) { + *out = *in + if in.AccessPointID != nil { + in, out := &in.AccessPointID, &out.AccessPointID + *out = new(string) + **out = **in + } + if in.IAM != nil { + in, out := &in.IAM, &out.IAM + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfigParameters. +func (in *AuthorizationConfigParameters) DeepCopy() *AuthorizationConfigParameters { + if in == nil { + return nil + } + out := new(AuthorizationConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoScalingGroupProviderInitParameters) DeepCopyInto(out *AutoScalingGroupProviderInitParameters) { + *out = *in + if in.AutoScalingGroupArn != nil { + in, out := &in.AutoScalingGroupArn, &out.AutoScalingGroupArn + *out = new(string) + **out = **in + } + if in.AutoScalingGroupArnRef != nil { + in, out := &in.AutoScalingGroupArnRef, &out.AutoScalingGroupArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AutoScalingGroupArnSelector != nil { + in, out := &in.AutoScalingGroupArnSelector, &out.AutoScalingGroupArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ManagedDraining != nil { + in, out := &in.ManagedDraining, &out.ManagedDraining + *out = new(string) + **out = **in + } + if in.ManagedScaling != nil { + in, out := &in.ManagedScaling, &out.ManagedScaling + *out = new(ManagedScalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ManagedTerminationProtection != nil { + in, out := &in.ManagedTerminationProtection, &out.ManagedTerminationProtection + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScalingGroupProviderInitParameters. +func (in *AutoScalingGroupProviderInitParameters) DeepCopy() *AutoScalingGroupProviderInitParameters { + if in == nil { + return nil + } + out := new(AutoScalingGroupProviderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoScalingGroupProviderObservation) DeepCopyInto(out *AutoScalingGroupProviderObservation) { + *out = *in + if in.AutoScalingGroupArn != nil { + in, out := &in.AutoScalingGroupArn, &out.AutoScalingGroupArn + *out = new(string) + **out = **in + } + if in.ManagedDraining != nil { + in, out := &in.ManagedDraining, &out.ManagedDraining + *out = new(string) + **out = **in + } + if in.ManagedScaling != nil { + in, out := &in.ManagedScaling, &out.ManagedScaling + *out = new(ManagedScalingObservation) + (*in).DeepCopyInto(*out) + } + if in.ManagedTerminationProtection != nil { + in, out := &in.ManagedTerminationProtection, &out.ManagedTerminationProtection + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScalingGroupProviderObservation. +func (in *AutoScalingGroupProviderObservation) DeepCopy() *AutoScalingGroupProviderObservation { + if in == nil { + return nil + } + out := new(AutoScalingGroupProviderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoScalingGroupProviderParameters) DeepCopyInto(out *AutoScalingGroupProviderParameters) { + *out = *in + if in.AutoScalingGroupArn != nil { + in, out := &in.AutoScalingGroupArn, &out.AutoScalingGroupArn + *out = new(string) + **out = **in + } + if in.AutoScalingGroupArnRef != nil { + in, out := &in.AutoScalingGroupArnRef, &out.AutoScalingGroupArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AutoScalingGroupArnSelector != nil { + in, out := &in.AutoScalingGroupArnSelector, &out.AutoScalingGroupArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ManagedDraining != nil { + in, out := &in.ManagedDraining, &out.ManagedDraining + *out = new(string) + **out = **in + } + if in.ManagedScaling != nil { + in, out := &in.ManagedScaling, &out.ManagedScaling + *out = new(ManagedScalingParameters) + (*in).DeepCopyInto(*out) + } + if in.ManagedTerminationProtection != nil { + in, out := &in.ManagedTerminationProtection, &out.ManagedTerminationProtection + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScalingGroupProviderParameters. +func (in *AutoScalingGroupProviderParameters) DeepCopy() *AutoScalingGroupProviderParameters { + if in == nil { + return nil + } + out := new(AutoScalingGroupProviderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityProvider) DeepCopyInto(out *CapacityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityProvider. +func (in *CapacityProvider) DeepCopy() *CapacityProvider { + if in == nil { + return nil + } + out := new(CapacityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CapacityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityProviderInitParameters) DeepCopyInto(out *CapacityProviderInitParameters) { + *out = *in + if in.AutoScalingGroupProvider != nil { + in, out := &in.AutoScalingGroupProvider, &out.AutoScalingGroupProvider + *out = new(AutoScalingGroupProviderInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityProviderInitParameters. +func (in *CapacityProviderInitParameters) DeepCopy() *CapacityProviderInitParameters { + if in == nil { + return nil + } + out := new(CapacityProviderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityProviderList) DeepCopyInto(out *CapacityProviderList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CapacityProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityProviderList. +func (in *CapacityProviderList) DeepCopy() *CapacityProviderList { + if in == nil { + return nil + } + out := new(CapacityProviderList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CapacityProviderList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityProviderObservation) DeepCopyInto(out *CapacityProviderObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoScalingGroupProvider != nil { + in, out := &in.AutoScalingGroupProvider, &out.AutoScalingGroupProvider + *out = new(AutoScalingGroupProviderObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityProviderObservation. +func (in *CapacityProviderObservation) DeepCopy() *CapacityProviderObservation { + if in == nil { + return nil + } + out := new(CapacityProviderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityProviderParameters) DeepCopyInto(out *CapacityProviderParameters) { + *out = *in + if in.AutoScalingGroupProvider != nil { + in, out := &in.AutoScalingGroupProvider, &out.AutoScalingGroupProvider + *out = new(AutoScalingGroupProviderParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityProviderParameters. +func (in *CapacityProviderParameters) DeepCopy() *CapacityProviderParameters { + if in == nil { + return nil + } + out := new(CapacityProviderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityProviderSpec) DeepCopyInto(out *CapacityProviderSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityProviderSpec. +func (in *CapacityProviderSpec) DeepCopy() *CapacityProviderSpec { + if in == nil { + return nil + } + out := new(CapacityProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityProviderStatus) DeepCopyInto(out *CapacityProviderStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityProviderStatus. +func (in *CapacityProviderStatus) DeepCopy() *CapacityProviderStatus { + if in == nil { + return nil + } + out := new(CapacityProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityProviderStrategyInitParameters) DeepCopyInto(out *CapacityProviderStrategyInitParameters) { + *out = *in + if in.Base != nil { + in, out := &in.Base, &out.Base + *out = new(float64) + **out = **in + } + if in.CapacityProvider != nil { + in, out := &in.CapacityProvider, &out.CapacityProvider + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityProviderStrategyInitParameters. +func (in *CapacityProviderStrategyInitParameters) DeepCopy() *CapacityProviderStrategyInitParameters { + if in == nil { + return nil + } + out := new(CapacityProviderStrategyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityProviderStrategyObservation) DeepCopyInto(out *CapacityProviderStrategyObservation) { + *out = *in + if in.Base != nil { + in, out := &in.Base, &out.Base + *out = new(float64) + **out = **in + } + if in.CapacityProvider != nil { + in, out := &in.CapacityProvider, &out.CapacityProvider + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityProviderStrategyObservation. +func (in *CapacityProviderStrategyObservation) DeepCopy() *CapacityProviderStrategyObservation { + if in == nil { + return nil + } + out := new(CapacityProviderStrategyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityProviderStrategyParameters) DeepCopyInto(out *CapacityProviderStrategyParameters) { + *out = *in + if in.Base != nil { + in, out := &in.Base, &out.Base + *out = new(float64) + **out = **in + } + if in.CapacityProvider != nil { + in, out := &in.CapacityProvider, &out.CapacityProvider + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityProviderStrategyParameters. +func (in *CapacityProviderStrategyParameters) DeepCopy() *CapacityProviderStrategyParameters { + if in == nil { + return nil + } + out := new(CapacityProviderStrategyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientAliasInitParameters) DeepCopyInto(out *ClientAliasInitParameters) { + *out = *in + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientAliasInitParameters. +func (in *ClientAliasInitParameters) DeepCopy() *ClientAliasInitParameters { + if in == nil { + return nil + } + out := new(ClientAliasInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientAliasObservation) DeepCopyInto(out *ClientAliasObservation) { + *out = *in + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientAliasObservation. +func (in *ClientAliasObservation) DeepCopy() *ClientAliasObservation { + if in == nil { + return nil + } + out := new(ClientAliasObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientAliasParameters) DeepCopyInto(out *ClientAliasParameters) { + *out = *in + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientAliasParameters. +func (in *ClientAliasParameters) DeepCopy() *ClientAliasParameters { + if in == nil { + return nil + } + out := new(ClientAliasParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceConnectDefaults != nil { + in, out := &in.ServiceConnectDefaults, &out.ServiceConnectDefaults + *out = new(ServiceConnectDefaultsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Setting != nil { + in, out := &in.Setting, &out.Setting + *out = make([]SettingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInitParameters. +func (in *ClusterInitParameters) DeepCopy() *ClusterInitParameters { + if in == nil { + return nil + } + out := new(ClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ServiceConnectDefaults != nil { + in, out := &in.ServiceConnectDefaults, &out.ServiceConnectDefaults + *out = new(ServiceConnectDefaultsObservation) + (*in).DeepCopyInto(*out) + } + if in.Setting != nil { + in, out := &in.Setting, &out.Setting + *out = make([]SettingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservation. +func (in *ClusterObservation) DeepCopy() *ClusterObservation { + if in == nil { + return nil + } + out := new(ClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ServiceConnectDefaults != nil { + in, out := &in.ServiceConnectDefaults, &out.ServiceConnectDefaults + *out = new(ServiceConnectDefaultsParameters) + (*in).DeepCopyInto(*out) + } + if in.Setting != nil { + in, out := &in.Setting, &out.Setting + *out = make([]SettingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. +func (in *ClusterParameters) DeepCopy() *ClusterParameters { + if in == nil { + return nil + } + out := new(ClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationInitParameters) DeepCopyInto(out *ConfigurationInitParameters) { + *out = *in + if in.ExecuteCommandConfiguration != nil { + in, out := &in.ExecuteCommandConfiguration, &out.ExecuteCommandConfiguration + *out = new(ExecuteCommandConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationInitParameters. +func (in *ConfigurationInitParameters) DeepCopy() *ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationObservation) DeepCopyInto(out *ConfigurationObservation) { + *out = *in + if in.ExecuteCommandConfiguration != nil { + in, out := &in.ExecuteCommandConfiguration, &out.ExecuteCommandConfiguration + *out = new(ExecuteCommandConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationObservation. +func (in *ConfigurationObservation) DeepCopy() *ConfigurationObservation { + if in == nil { + return nil + } + out := new(ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationParameters) DeepCopyInto(out *ConfigurationParameters) { + *out = *in + if in.ExecuteCommandConfiguration != nil { + in, out := &in.ExecuteCommandConfiguration, &out.ExecuteCommandConfiguration + *out = new(ExecuteCommandConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationParameters. +func (in *ConfigurationParameters) DeepCopy() *ConfigurationParameters { + if in == nil { + return nil + } + out := new(ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCircuitBreakerInitParameters) DeepCopyInto(out *DeploymentCircuitBreakerInitParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Rollback != nil { + in, out := &in.Rollback, &out.Rollback + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCircuitBreakerInitParameters. +func (in *DeploymentCircuitBreakerInitParameters) DeepCopy() *DeploymentCircuitBreakerInitParameters { + if in == nil { + return nil + } + out := new(DeploymentCircuitBreakerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCircuitBreakerObservation) DeepCopyInto(out *DeploymentCircuitBreakerObservation) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Rollback != nil { + in, out := &in.Rollback, &out.Rollback + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCircuitBreakerObservation. +func (in *DeploymentCircuitBreakerObservation) DeepCopy() *DeploymentCircuitBreakerObservation { + if in == nil { + return nil + } + out := new(DeploymentCircuitBreakerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCircuitBreakerParameters) DeepCopyInto(out *DeploymentCircuitBreakerParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Rollback != nil { + in, out := &in.Rollback, &out.Rollback + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCircuitBreakerParameters. +func (in *DeploymentCircuitBreakerParameters) DeepCopy() *DeploymentCircuitBreakerParameters { + if in == nil { + return nil + } + out := new(DeploymentCircuitBreakerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentControllerInitParameters) DeepCopyInto(out *DeploymentControllerInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentControllerInitParameters. +func (in *DeploymentControllerInitParameters) DeepCopy() *DeploymentControllerInitParameters { + if in == nil { + return nil + } + out := new(DeploymentControllerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentControllerObservation) DeepCopyInto(out *DeploymentControllerObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentControllerObservation. +func (in *DeploymentControllerObservation) DeepCopy() *DeploymentControllerObservation { + if in == nil { + return nil + } + out := new(DeploymentControllerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentControllerParameters) DeepCopyInto(out *DeploymentControllerParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentControllerParameters. +func (in *DeploymentControllerParameters) DeepCopy() *DeploymentControllerParameters { + if in == nil { + return nil + } + out := new(DeploymentControllerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerVolumeConfigurationInitParameters) DeepCopyInto(out *DockerVolumeConfigurationInitParameters) { + *out = *in + if in.Autoprovision != nil { + in, out := &in.Autoprovision, &out.Autoprovision + *out = new(bool) + **out = **in + } + if in.Driver != nil { + in, out := &in.Driver, &out.Driver + *out = new(string) + **out = **in + } + if in.DriverOpts != nil { + in, out := &in.DriverOpts, &out.DriverOpts + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerVolumeConfigurationInitParameters. +func (in *DockerVolumeConfigurationInitParameters) DeepCopy() *DockerVolumeConfigurationInitParameters { + if in == nil { + return nil + } + out := new(DockerVolumeConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerVolumeConfigurationObservation) DeepCopyInto(out *DockerVolumeConfigurationObservation) { + *out = *in + if in.Autoprovision != nil { + in, out := &in.Autoprovision, &out.Autoprovision + *out = new(bool) + **out = **in + } + if in.Driver != nil { + in, out := &in.Driver, &out.Driver + *out = new(string) + **out = **in + } + if in.DriverOpts != nil { + in, out := &in.DriverOpts, &out.DriverOpts + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerVolumeConfigurationObservation. +func (in *DockerVolumeConfigurationObservation) DeepCopy() *DockerVolumeConfigurationObservation { + if in == nil { + return nil + } + out := new(DockerVolumeConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerVolumeConfigurationParameters) DeepCopyInto(out *DockerVolumeConfigurationParameters) { + *out = *in + if in.Autoprovision != nil { + in, out := &in.Autoprovision, &out.Autoprovision + *out = new(bool) + **out = **in + } + if in.Driver != nil { + in, out := &in.Driver, &out.Driver + *out = new(string) + **out = **in + } + if in.DriverOpts != nil { + in, out := &in.DriverOpts, &out.DriverOpts + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerVolumeConfigurationParameters. +func (in *DockerVolumeConfigurationParameters) DeepCopy() *DockerVolumeConfigurationParameters { + if in == nil { + return nil + } + out := new(DockerVolumeConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EFSVolumeConfigurationInitParameters) DeepCopyInto(out *EFSVolumeConfigurationInitParameters) { + *out = *in + if in.AuthorizationConfig != nil { + in, out := &in.AuthorizationConfig, &out.AuthorizationConfig + *out = new(AuthorizationConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.RootDirectory != nil { + in, out := &in.RootDirectory, &out.RootDirectory + *out = new(string) + **out = **in + } + if in.TransitEncryption != nil { + in, out := &in.TransitEncryption, &out.TransitEncryption + *out = new(string) + **out = **in + } + if in.TransitEncryptionPort != nil { + in, out := &in.TransitEncryptionPort, &out.TransitEncryptionPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EFSVolumeConfigurationInitParameters. +func (in *EFSVolumeConfigurationInitParameters) DeepCopy() *EFSVolumeConfigurationInitParameters { + if in == nil { + return nil + } + out := new(EFSVolumeConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EFSVolumeConfigurationObservation) DeepCopyInto(out *EFSVolumeConfigurationObservation) { + *out = *in + if in.AuthorizationConfig != nil { + in, out := &in.AuthorizationConfig, &out.AuthorizationConfig + *out = new(AuthorizationConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.RootDirectory != nil { + in, out := &in.RootDirectory, &out.RootDirectory + *out = new(string) + **out = **in + } + if in.TransitEncryption != nil { + in, out := &in.TransitEncryption, &out.TransitEncryption + *out = new(string) + **out = **in + } + if in.TransitEncryptionPort != nil { + in, out := &in.TransitEncryptionPort, &out.TransitEncryptionPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EFSVolumeConfigurationObservation. +func (in *EFSVolumeConfigurationObservation) DeepCopy() *EFSVolumeConfigurationObservation { + if in == nil { + return nil + } + out := new(EFSVolumeConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EFSVolumeConfigurationParameters) DeepCopyInto(out *EFSVolumeConfigurationParameters) { + *out = *in + if in.AuthorizationConfig != nil { + in, out := &in.AuthorizationConfig, &out.AuthorizationConfig + *out = new(AuthorizationConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.RootDirectory != nil { + in, out := &in.RootDirectory, &out.RootDirectory + *out = new(string) + **out = **in + } + if in.TransitEncryption != nil { + in, out := &in.TransitEncryption, &out.TransitEncryption + *out = new(string) + **out = **in + } + if in.TransitEncryptionPort != nil { + in, out := &in.TransitEncryptionPort, &out.TransitEncryptionPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EFSVolumeConfigurationParameters. +func (in *EFSVolumeConfigurationParameters) DeepCopy() *EFSVolumeConfigurationParameters { + if in == nil { + return nil + } + out := new(EFSVolumeConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralStorageInitParameters) DeepCopyInto(out *EphemeralStorageInitParameters) { + *out = *in + if in.SizeInGib != nil { + in, out := &in.SizeInGib, &out.SizeInGib + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralStorageInitParameters. +func (in *EphemeralStorageInitParameters) DeepCopy() *EphemeralStorageInitParameters { + if in == nil { + return nil + } + out := new(EphemeralStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralStorageObservation) DeepCopyInto(out *EphemeralStorageObservation) { + *out = *in + if in.SizeInGib != nil { + in, out := &in.SizeInGib, &out.SizeInGib + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralStorageObservation. +func (in *EphemeralStorageObservation) DeepCopy() *EphemeralStorageObservation { + if in == nil { + return nil + } + out := new(EphemeralStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralStorageParameters) DeepCopyInto(out *EphemeralStorageParameters) { + *out = *in + if in.SizeInGib != nil { + in, out := &in.SizeInGib, &out.SizeInGib + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralStorageParameters. +func (in *EphemeralStorageParameters) DeepCopy() *EphemeralStorageParameters { + if in == nil { + return nil + } + out := new(EphemeralStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecuteCommandConfigurationInitParameters) DeepCopyInto(out *ExecuteCommandConfigurationInitParameters) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.LogConfiguration != nil { + in, out := &in.LogConfiguration, &out.LogConfiguration + *out = new(LogConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecuteCommandConfigurationInitParameters. +func (in *ExecuteCommandConfigurationInitParameters) DeepCopy() *ExecuteCommandConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ExecuteCommandConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecuteCommandConfigurationObservation) DeepCopyInto(out *ExecuteCommandConfigurationObservation) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.LogConfiguration != nil { + in, out := &in.LogConfiguration, &out.LogConfiguration + *out = new(LogConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecuteCommandConfigurationObservation. +func (in *ExecuteCommandConfigurationObservation) DeepCopy() *ExecuteCommandConfigurationObservation { + if in == nil { + return nil + } + out := new(ExecuteCommandConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecuteCommandConfigurationParameters) DeepCopyInto(out *ExecuteCommandConfigurationParameters) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.LogConfiguration != nil { + in, out := &in.LogConfiguration, &out.LogConfiguration + *out = new(LogConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecuteCommandConfigurationParameters. +func (in *ExecuteCommandConfigurationParameters) DeepCopy() *ExecuteCommandConfigurationParameters { + if in == nil { + return nil + } + out := new(ExecuteCommandConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FSXWindowsFileServerVolumeConfigurationAuthorizationConfigInitParameters) DeepCopyInto(out *FSXWindowsFileServerVolumeConfigurationAuthorizationConfigInitParameters) { + *out = *in + if in.CredentialsParameter != nil { + in, out := &in.CredentialsParameter, &out.CredentialsParameter + *out = new(string) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSXWindowsFileServerVolumeConfigurationAuthorizationConfigInitParameters. +func (in *FSXWindowsFileServerVolumeConfigurationAuthorizationConfigInitParameters) DeepCopy() *FSXWindowsFileServerVolumeConfigurationAuthorizationConfigInitParameters { + if in == nil { + return nil + } + out := new(FSXWindowsFileServerVolumeConfigurationAuthorizationConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FSXWindowsFileServerVolumeConfigurationAuthorizationConfigObservation) DeepCopyInto(out *FSXWindowsFileServerVolumeConfigurationAuthorizationConfigObservation) { + *out = *in + if in.CredentialsParameter != nil { + in, out := &in.CredentialsParameter, &out.CredentialsParameter + *out = new(string) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSXWindowsFileServerVolumeConfigurationAuthorizationConfigObservation. +func (in *FSXWindowsFileServerVolumeConfigurationAuthorizationConfigObservation) DeepCopy() *FSXWindowsFileServerVolumeConfigurationAuthorizationConfigObservation { + if in == nil { + return nil + } + out := new(FSXWindowsFileServerVolumeConfigurationAuthorizationConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FSXWindowsFileServerVolumeConfigurationAuthorizationConfigParameters) DeepCopyInto(out *FSXWindowsFileServerVolumeConfigurationAuthorizationConfigParameters) { + *out = *in + if in.CredentialsParameter != nil { + in, out := &in.CredentialsParameter, &out.CredentialsParameter + *out = new(string) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSXWindowsFileServerVolumeConfigurationAuthorizationConfigParameters. +func (in *FSXWindowsFileServerVolumeConfigurationAuthorizationConfigParameters) DeepCopy() *FSXWindowsFileServerVolumeConfigurationAuthorizationConfigParameters { + if in == nil { + return nil + } + out := new(FSXWindowsFileServerVolumeConfigurationAuthorizationConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FSXWindowsFileServerVolumeConfigurationInitParameters) DeepCopyInto(out *FSXWindowsFileServerVolumeConfigurationInitParameters) { + *out = *in + if in.AuthorizationConfig != nil { + in, out := &in.AuthorizationConfig, &out.AuthorizationConfig + *out = new(FSXWindowsFileServerVolumeConfigurationAuthorizationConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.RootDirectory != nil { + in, out := &in.RootDirectory, &out.RootDirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSXWindowsFileServerVolumeConfigurationInitParameters. +func (in *FSXWindowsFileServerVolumeConfigurationInitParameters) DeepCopy() *FSXWindowsFileServerVolumeConfigurationInitParameters { + if in == nil { + return nil + } + out := new(FSXWindowsFileServerVolumeConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FSXWindowsFileServerVolumeConfigurationObservation) DeepCopyInto(out *FSXWindowsFileServerVolumeConfigurationObservation) { + *out = *in + if in.AuthorizationConfig != nil { + in, out := &in.AuthorizationConfig, &out.AuthorizationConfig + *out = new(FSXWindowsFileServerVolumeConfigurationAuthorizationConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.RootDirectory != nil { + in, out := &in.RootDirectory, &out.RootDirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSXWindowsFileServerVolumeConfigurationObservation. +func (in *FSXWindowsFileServerVolumeConfigurationObservation) DeepCopy() *FSXWindowsFileServerVolumeConfigurationObservation { + if in == nil { + return nil + } + out := new(FSXWindowsFileServerVolumeConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FSXWindowsFileServerVolumeConfigurationParameters) DeepCopyInto(out *FSXWindowsFileServerVolumeConfigurationParameters) { + *out = *in + if in.AuthorizationConfig != nil { + in, out := &in.AuthorizationConfig, &out.AuthorizationConfig + *out = new(FSXWindowsFileServerVolumeConfigurationAuthorizationConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.RootDirectory != nil { + in, out := &in.RootDirectory, &out.RootDirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSXWindowsFileServerVolumeConfigurationParameters. +func (in *FSXWindowsFileServerVolumeConfigurationParameters) DeepCopy() *FSXWindowsFileServerVolumeConfigurationParameters { + if in == nil { + return nil + } + out := new(FSXWindowsFileServerVolumeConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InferenceAcceleratorInitParameters) DeepCopyInto(out *InferenceAcceleratorInitParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DeviceType != nil { + in, out := &in.DeviceType, &out.DeviceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InferenceAcceleratorInitParameters. +func (in *InferenceAcceleratorInitParameters) DeepCopy() *InferenceAcceleratorInitParameters { + if in == nil { + return nil + } + out := new(InferenceAcceleratorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InferenceAcceleratorObservation) DeepCopyInto(out *InferenceAcceleratorObservation) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DeviceType != nil { + in, out := &in.DeviceType, &out.DeviceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InferenceAcceleratorObservation. +func (in *InferenceAcceleratorObservation) DeepCopy() *InferenceAcceleratorObservation { + if in == nil { + return nil + } + out := new(InferenceAcceleratorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InferenceAcceleratorParameters) DeepCopyInto(out *InferenceAcceleratorParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DeviceType != nil { + in, out := &in.DeviceType, &out.DeviceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InferenceAcceleratorParameters. +func (in *InferenceAcceleratorParameters) DeepCopy() *InferenceAcceleratorParameters { + if in == nil { + return nil + } + out := new(InferenceAcceleratorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IssuerCertAuthorityInitParameters) DeepCopyInto(out *IssuerCertAuthorityInitParameters) { + *out = *in + if in.AwsPcaAuthorityArn != nil { + in, out := &in.AwsPcaAuthorityArn, &out.AwsPcaAuthorityArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IssuerCertAuthorityInitParameters. +func (in *IssuerCertAuthorityInitParameters) DeepCopy() *IssuerCertAuthorityInitParameters { + if in == nil { + return nil + } + out := new(IssuerCertAuthorityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IssuerCertAuthorityObservation) DeepCopyInto(out *IssuerCertAuthorityObservation) { + *out = *in + if in.AwsPcaAuthorityArn != nil { + in, out := &in.AwsPcaAuthorityArn, &out.AwsPcaAuthorityArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IssuerCertAuthorityObservation. +func (in *IssuerCertAuthorityObservation) DeepCopy() *IssuerCertAuthorityObservation { + if in == nil { + return nil + } + out := new(IssuerCertAuthorityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IssuerCertAuthorityParameters) DeepCopyInto(out *IssuerCertAuthorityParameters) { + *out = *in + if in.AwsPcaAuthorityArn != nil { + in, out := &in.AwsPcaAuthorityArn, &out.AwsPcaAuthorityArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IssuerCertAuthorityParameters. +func (in *IssuerCertAuthorityParameters) DeepCopy() *IssuerCertAuthorityParameters { + if in == nil { + return nil + } + out := new(IssuerCertAuthorityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerInitParameters) DeepCopyInto(out *LoadBalancerInitParameters) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.ContainerPort != nil { + in, out := &in.ContainerPort, &out.ContainerPort + *out = new(float64) + **out = **in + } + if in.ELBName != nil { + in, out := &in.ELBName, &out.ELBName + *out = new(string) + **out = **in + } + if in.TargetGroupArn != nil { + in, out := &in.TargetGroupArn, &out.TargetGroupArn + *out = new(string) + **out = **in + } + if in.TargetGroupArnRef != nil { + in, out := &in.TargetGroupArnRef, &out.TargetGroupArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetGroupArnSelector != nil { + in, out := &in.TargetGroupArnSelector, &out.TargetGroupArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerInitParameters. +func (in *LoadBalancerInitParameters) DeepCopy() *LoadBalancerInitParameters { + if in == nil { + return nil + } + out := new(LoadBalancerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerObservation) DeepCopyInto(out *LoadBalancerObservation) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.ContainerPort != nil { + in, out := &in.ContainerPort, &out.ContainerPort + *out = new(float64) + **out = **in + } + if in.ELBName != nil { + in, out := &in.ELBName, &out.ELBName + *out = new(string) + **out = **in + } + if in.TargetGroupArn != nil { + in, out := &in.TargetGroupArn, &out.TargetGroupArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerObservation. +func (in *LoadBalancerObservation) DeepCopy() *LoadBalancerObservation { + if in == nil { + return nil + } + out := new(LoadBalancerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerParameters) DeepCopyInto(out *LoadBalancerParameters) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.ContainerPort != nil { + in, out := &in.ContainerPort, &out.ContainerPort + *out = new(float64) + **out = **in + } + if in.ELBName != nil { + in, out := &in.ELBName, &out.ELBName + *out = new(string) + **out = **in + } + if in.TargetGroupArn != nil { + in, out := &in.TargetGroupArn, &out.TargetGroupArn + *out = new(string) + **out = **in + } + if in.TargetGroupArnRef != nil { + in, out := &in.TargetGroupArnRef, &out.TargetGroupArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetGroupArnSelector != nil { + in, out := &in.TargetGroupArnSelector, &out.TargetGroupArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerParameters. +func (in *LoadBalancerParameters) DeepCopy() *LoadBalancerParameters { + if in == nil { + return nil + } + out := new(LoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfigurationInitParameters) DeepCopyInto(out *LogConfigurationInitParameters) { + *out = *in + if in.CloudWatchEncryptionEnabled != nil { + in, out := &in.CloudWatchEncryptionEnabled, &out.CloudWatchEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.CloudWatchLogGroupName != nil { + in, out := &in.CloudWatchLogGroupName, &out.CloudWatchLogGroupName + *out = new(string) + **out = **in + } + if in.S3BucketEncryptionEnabled != nil { + in, out := &in.S3BucketEncryptionEnabled, &out.S3BucketEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfigurationInitParameters. +func (in *LogConfigurationInitParameters) DeepCopy() *LogConfigurationInitParameters { + if in == nil { + return nil + } + out := new(LogConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfigurationObservation) DeepCopyInto(out *LogConfigurationObservation) { + *out = *in + if in.CloudWatchEncryptionEnabled != nil { + in, out := &in.CloudWatchEncryptionEnabled, &out.CloudWatchEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.CloudWatchLogGroupName != nil { + in, out := &in.CloudWatchLogGroupName, &out.CloudWatchLogGroupName + *out = new(string) + **out = **in + } + if in.S3BucketEncryptionEnabled != nil { + in, out := &in.S3BucketEncryptionEnabled, &out.S3BucketEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfigurationObservation. +func (in *LogConfigurationObservation) DeepCopy() *LogConfigurationObservation { + if in == nil { + return nil + } + out := new(LogConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfigurationParameters) DeepCopyInto(out *LogConfigurationParameters) { + *out = *in + if in.CloudWatchEncryptionEnabled != nil { + in, out := &in.CloudWatchEncryptionEnabled, &out.CloudWatchEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.CloudWatchLogGroupName != nil { + in, out := &in.CloudWatchLogGroupName, &out.CloudWatchLogGroupName + *out = new(string) + **out = **in + } + if in.S3BucketEncryptionEnabled != nil { + in, out := &in.S3BucketEncryptionEnabled, &out.S3BucketEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfigurationParameters. +func (in *LogConfigurationParameters) DeepCopy() *LogConfigurationParameters { + if in == nil { + return nil + } + out := new(LogConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedScalingInitParameters) DeepCopyInto(out *ManagedScalingInitParameters) { + *out = *in + if in.InstanceWarmupPeriod != nil { + in, out := &in.InstanceWarmupPeriod, &out.InstanceWarmupPeriod + *out = new(float64) + **out = **in + } + if in.MaximumScalingStepSize != nil { + in, out := &in.MaximumScalingStepSize, &out.MaximumScalingStepSize + *out = new(float64) + **out = **in + } + if in.MinimumScalingStepSize != nil { + in, out := &in.MinimumScalingStepSize, &out.MinimumScalingStepSize + *out = new(float64) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.TargetCapacity != nil { + in, out := &in.TargetCapacity, &out.TargetCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedScalingInitParameters. +func (in *ManagedScalingInitParameters) DeepCopy() *ManagedScalingInitParameters { + if in == nil { + return nil + } + out := new(ManagedScalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedScalingObservation) DeepCopyInto(out *ManagedScalingObservation) { + *out = *in + if in.InstanceWarmupPeriod != nil { + in, out := &in.InstanceWarmupPeriod, &out.InstanceWarmupPeriod + *out = new(float64) + **out = **in + } + if in.MaximumScalingStepSize != nil { + in, out := &in.MaximumScalingStepSize, &out.MaximumScalingStepSize + *out = new(float64) + **out = **in + } + if in.MinimumScalingStepSize != nil { + in, out := &in.MinimumScalingStepSize, &out.MinimumScalingStepSize + *out = new(float64) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.TargetCapacity != nil { + in, out := &in.TargetCapacity, &out.TargetCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedScalingObservation. +func (in *ManagedScalingObservation) DeepCopy() *ManagedScalingObservation { + if in == nil { + return nil + } + out := new(ManagedScalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedScalingParameters) DeepCopyInto(out *ManagedScalingParameters) { + *out = *in + if in.InstanceWarmupPeriod != nil { + in, out := &in.InstanceWarmupPeriod, &out.InstanceWarmupPeriod + *out = new(float64) + **out = **in + } + if in.MaximumScalingStepSize != nil { + in, out := &in.MaximumScalingStepSize, &out.MaximumScalingStepSize + *out = new(float64) + **out = **in + } + if in.MinimumScalingStepSize != nil { + in, out := &in.MinimumScalingStepSize, &out.MinimumScalingStepSize + *out = new(float64) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.TargetCapacity != nil { + in, out := &in.TargetCapacity, &out.TargetCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedScalingParameters. +func (in *ManagedScalingParameters) DeepCopy() *ManagedScalingParameters { + if in == nil { + return nil + } + out := new(ManagedScalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationInitParameters) DeepCopyInto(out *NetworkConfigurationInitParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.SecurityGroupRefs != nil { + in, out := &in.SecurityGroupRefs, &out.SecurityGroupRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupSelector != nil { + in, out := &in.SecurityGroupSelector, &out.SecurityGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetRefs != nil { + in, out := &in.SubnetRefs, &out.SubnetRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetSelector != nil { + in, out := &in.SubnetSelector, &out.SubnetSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationInitParameters. +func (in *NetworkConfigurationInitParameters) DeepCopy() *NetworkConfigurationInitParameters { + if in == nil { + return nil + } + out := new(NetworkConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationObservation) DeepCopyInto(out *NetworkConfigurationObservation) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationObservation. +func (in *NetworkConfigurationObservation) DeepCopy() *NetworkConfigurationObservation { + if in == nil { + return nil + } + out := new(NetworkConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationParameters) DeepCopyInto(out *NetworkConfigurationParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.SecurityGroupRefs != nil { + in, out := &in.SecurityGroupRefs, &out.SecurityGroupRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupSelector != nil { + in, out := &in.SecurityGroupSelector, &out.SecurityGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetRefs != nil { + in, out := &in.SubnetRefs, &out.SubnetRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetSelector != nil { + in, out := &in.SubnetSelector, &out.SubnetSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationParameters. +func (in *NetworkConfigurationParameters) DeepCopy() *NetworkConfigurationParameters { + if in == nil { + return nil + } + out := new(NetworkConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedPlacementStrategyInitParameters) DeepCopyInto(out *OrderedPlacementStrategyInitParameters) { + *out = *in + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedPlacementStrategyInitParameters. +func (in *OrderedPlacementStrategyInitParameters) DeepCopy() *OrderedPlacementStrategyInitParameters { + if in == nil { + return nil + } + out := new(OrderedPlacementStrategyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedPlacementStrategyObservation) DeepCopyInto(out *OrderedPlacementStrategyObservation) { + *out = *in + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedPlacementStrategyObservation. +func (in *OrderedPlacementStrategyObservation) DeepCopy() *OrderedPlacementStrategyObservation { + if in == nil { + return nil + } + out := new(OrderedPlacementStrategyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderedPlacementStrategyParameters) DeepCopyInto(out *OrderedPlacementStrategyParameters) { + *out = *in + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderedPlacementStrategyParameters. +func (in *OrderedPlacementStrategyParameters) DeepCopy() *OrderedPlacementStrategyParameters { + if in == nil { + return nil + } + out := new(OrderedPlacementStrategyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementConstraintsInitParameters) DeepCopyInto(out *PlacementConstraintsInitParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementConstraintsInitParameters. +func (in *PlacementConstraintsInitParameters) DeepCopy() *PlacementConstraintsInitParameters { + if in == nil { + return nil + } + out := new(PlacementConstraintsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementConstraintsObservation) DeepCopyInto(out *PlacementConstraintsObservation) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementConstraintsObservation. +func (in *PlacementConstraintsObservation) DeepCopy() *PlacementConstraintsObservation { + if in == nil { + return nil + } + out := new(PlacementConstraintsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementConstraintsParameters) DeepCopyInto(out *PlacementConstraintsParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementConstraintsParameters. +func (in *PlacementConstraintsParameters) DeepCopy() *PlacementConstraintsParameters { + if in == nil { + return nil + } + out := new(PlacementConstraintsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfigurationInitParameters) DeepCopyInto(out *ProxyConfigurationInitParameters) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfigurationInitParameters. +func (in *ProxyConfigurationInitParameters) DeepCopy() *ProxyConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ProxyConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfigurationObservation) DeepCopyInto(out *ProxyConfigurationObservation) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfigurationObservation. +func (in *ProxyConfigurationObservation) DeepCopy() *ProxyConfigurationObservation { + if in == nil { + return nil + } + out := new(ProxyConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfigurationParameters) DeepCopyInto(out *ProxyConfigurationParameters) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfigurationParameters. +func (in *ProxyConfigurationParameters) DeepCopy() *ProxyConfigurationParameters { + if in == nil { + return nil + } + out := new(ProxyConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimePlatformInitParameters) DeepCopyInto(out *RuntimePlatformInitParameters) { + *out = *in + if in.CPUArchitecture != nil { + in, out := &in.CPUArchitecture, &out.CPUArchitecture + *out = new(string) + **out = **in + } + if in.OperatingSystemFamily != nil { + in, out := &in.OperatingSystemFamily, &out.OperatingSystemFamily + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimePlatformInitParameters. +func (in *RuntimePlatformInitParameters) DeepCopy() *RuntimePlatformInitParameters { + if in == nil { + return nil + } + out := new(RuntimePlatformInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimePlatformObservation) DeepCopyInto(out *RuntimePlatformObservation) { + *out = *in + if in.CPUArchitecture != nil { + in, out := &in.CPUArchitecture, &out.CPUArchitecture + *out = new(string) + **out = **in + } + if in.OperatingSystemFamily != nil { + in, out := &in.OperatingSystemFamily, &out.OperatingSystemFamily + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimePlatformObservation. +func (in *RuntimePlatformObservation) DeepCopy() *RuntimePlatformObservation { + if in == nil { + return nil + } + out := new(RuntimePlatformObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimePlatformParameters) DeepCopyInto(out *RuntimePlatformParameters) { + *out = *in + if in.CPUArchitecture != nil { + in, out := &in.CPUArchitecture, &out.CPUArchitecture + *out = new(string) + **out = **in + } + if in.OperatingSystemFamily != nil { + in, out := &in.OperatingSystemFamily, &out.OperatingSystemFamily + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimePlatformParameters. +func (in *RuntimePlatformParameters) DeepCopy() *RuntimePlatformParameters { + if in == nil { + return nil + } + out := new(RuntimePlatformParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretOptionInitParameters) DeepCopyInto(out *SecretOptionInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretOptionInitParameters. +func (in *SecretOptionInitParameters) DeepCopy() *SecretOptionInitParameters { + if in == nil { + return nil + } + out := new(SecretOptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretOptionObservation) DeepCopyInto(out *SecretOptionObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretOptionObservation. +func (in *SecretOptionObservation) DeepCopy() *SecretOptionObservation { + if in == nil { + return nil + } + out := new(SecretOptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretOptionParameters) DeepCopyInto(out *SecretOptionParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretOptionParameters. +func (in *SecretOptionParameters) DeepCopy() *SecretOptionParameters { + if in == nil { + return nil + } + out := new(SecretOptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Service) DeepCopyInto(out *Service) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. +func (in *Service) DeepCopy() *Service { + if in == nil { + return nil + } + out := new(Service) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Service) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceConnectConfigurationInitParameters) DeepCopyInto(out *ServiceConnectConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogConfiguration != nil { + in, out := &in.LogConfiguration, &out.LogConfiguration + *out = new(ServiceConnectConfigurationLogConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = make([]ServiceConnectConfigurationServiceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceConnectConfigurationInitParameters. +func (in *ServiceConnectConfigurationInitParameters) DeepCopy() *ServiceConnectConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ServiceConnectConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceConnectConfigurationLogConfigurationInitParameters) DeepCopyInto(out *ServiceConnectConfigurationLogConfigurationInitParameters) { + *out = *in + if in.LogDriver != nil { + in, out := &in.LogDriver, &out.LogDriver + *out = new(string) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SecretOption != nil { + in, out := &in.SecretOption, &out.SecretOption + *out = make([]SecretOptionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceConnectConfigurationLogConfigurationInitParameters. +func (in *ServiceConnectConfigurationLogConfigurationInitParameters) DeepCopy() *ServiceConnectConfigurationLogConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ServiceConnectConfigurationLogConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceConnectConfigurationLogConfigurationObservation) DeepCopyInto(out *ServiceConnectConfigurationLogConfigurationObservation) { + *out = *in + if in.LogDriver != nil { + in, out := &in.LogDriver, &out.LogDriver + *out = new(string) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SecretOption != nil { + in, out := &in.SecretOption, &out.SecretOption + *out = make([]SecretOptionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceConnectConfigurationLogConfigurationObservation. +func (in *ServiceConnectConfigurationLogConfigurationObservation) DeepCopy() *ServiceConnectConfigurationLogConfigurationObservation { + if in == nil { + return nil + } + out := new(ServiceConnectConfigurationLogConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceConnectConfigurationLogConfigurationParameters) DeepCopyInto(out *ServiceConnectConfigurationLogConfigurationParameters) { + *out = *in + if in.LogDriver != nil { + in, out := &in.LogDriver, &out.LogDriver + *out = new(string) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SecretOption != nil { + in, out := &in.SecretOption, &out.SecretOption + *out = make([]SecretOptionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceConnectConfigurationLogConfigurationParameters. +func (in *ServiceConnectConfigurationLogConfigurationParameters) DeepCopy() *ServiceConnectConfigurationLogConfigurationParameters { + if in == nil { + return nil + } + out := new(ServiceConnectConfigurationLogConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceConnectConfigurationObservation) DeepCopyInto(out *ServiceConnectConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogConfiguration != nil { + in, out := &in.LogConfiguration, &out.LogConfiguration + *out = new(ServiceConnectConfigurationLogConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = make([]ServiceConnectConfigurationServiceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceConnectConfigurationObservation. +func (in *ServiceConnectConfigurationObservation) DeepCopy() *ServiceConnectConfigurationObservation { + if in == nil { + return nil + } + out := new(ServiceConnectConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceConnectConfigurationParameters) DeepCopyInto(out *ServiceConnectConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogConfiguration != nil { + in, out := &in.LogConfiguration, &out.LogConfiguration + *out = new(ServiceConnectConfigurationLogConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = make([]ServiceConnectConfigurationServiceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceConnectConfigurationParameters. +func (in *ServiceConnectConfigurationParameters) DeepCopy() *ServiceConnectConfigurationParameters { + if in == nil { + return nil + } + out := new(ServiceConnectConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceConnectConfigurationServiceInitParameters) DeepCopyInto(out *ServiceConnectConfigurationServiceInitParameters) { + *out = *in + if in.ClientAlias != nil { + in, out := &in.ClientAlias, &out.ClientAlias + *out = new(ClientAliasInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DiscoveryName != nil { + in, out := &in.DiscoveryName, &out.DiscoveryName + *out = new(string) + **out = **in + } + if in.IngressPortOverride != nil { + in, out := &in.IngressPortOverride, &out.IngressPortOverride + *out = new(float64) + **out = **in + } + if in.PortName != nil { + in, out := &in.PortName, &out.PortName + *out = new(string) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(TimeoutInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceConnectConfigurationServiceInitParameters. +func (in *ServiceConnectConfigurationServiceInitParameters) DeepCopy() *ServiceConnectConfigurationServiceInitParameters { + if in == nil { + return nil + } + out := new(ServiceConnectConfigurationServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceConnectConfigurationServiceObservation) DeepCopyInto(out *ServiceConnectConfigurationServiceObservation) { + *out = *in + if in.ClientAlias != nil { + in, out := &in.ClientAlias, &out.ClientAlias + *out = new(ClientAliasObservation) + (*in).DeepCopyInto(*out) + } + if in.DiscoveryName != nil { + in, out := &in.DiscoveryName, &out.DiscoveryName + *out = new(string) + **out = **in + } + if in.IngressPortOverride != nil { + in, out := &in.IngressPortOverride, &out.IngressPortOverride + *out = new(float64) + **out = **in + } + if in.PortName != nil { + in, out := &in.PortName, &out.PortName + *out = new(string) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSObservation) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(TimeoutObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceConnectConfigurationServiceObservation. +func (in *ServiceConnectConfigurationServiceObservation) DeepCopy() *ServiceConnectConfigurationServiceObservation { + if in == nil { + return nil + } + out := new(ServiceConnectConfigurationServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceConnectConfigurationServiceParameters) DeepCopyInto(out *ServiceConnectConfigurationServiceParameters) { + *out = *in + if in.ClientAlias != nil { + in, out := &in.ClientAlias, &out.ClientAlias + *out = new(ClientAliasParameters) + (*in).DeepCopyInto(*out) + } + if in.DiscoveryName != nil { + in, out := &in.DiscoveryName, &out.DiscoveryName + *out = new(string) + **out = **in + } + if in.IngressPortOverride != nil { + in, out := &in.IngressPortOverride, &out.IngressPortOverride + *out = new(float64) + **out = **in + } + if in.PortName != nil { + in, out := &in.PortName, &out.PortName + *out = new(string) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSParameters) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(TimeoutParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceConnectConfigurationServiceParameters. +func (in *ServiceConnectConfigurationServiceParameters) DeepCopy() *ServiceConnectConfigurationServiceParameters { + if in == nil { + return nil + } + out := new(ServiceConnectConfigurationServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceConnectDefaultsInitParameters) DeepCopyInto(out *ServiceConnectDefaultsInitParameters) { + *out = *in + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceConnectDefaultsInitParameters. +func (in *ServiceConnectDefaultsInitParameters) DeepCopy() *ServiceConnectDefaultsInitParameters { + if in == nil { + return nil + } + out := new(ServiceConnectDefaultsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceConnectDefaultsObservation) DeepCopyInto(out *ServiceConnectDefaultsObservation) { + *out = *in + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceConnectDefaultsObservation. +func (in *ServiceConnectDefaultsObservation) DeepCopy() *ServiceConnectDefaultsObservation { + if in == nil { + return nil + } + out := new(ServiceConnectDefaultsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceConnectDefaultsParameters) DeepCopyInto(out *ServiceConnectDefaultsParameters) { + *out = *in + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceConnectDefaultsParameters. +func (in *ServiceConnectDefaultsParameters) DeepCopy() *ServiceConnectDefaultsParameters { + if in == nil { + return nil + } + out := new(ServiceConnectDefaultsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceInitParameters) DeepCopyInto(out *ServiceInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = new(AlarmsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CapacityProviderStrategy != nil { + in, out := &in.CapacityProviderStrategy, &out.CapacityProviderStrategy + *out = make([]CapacityProviderStrategyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(string) + **out = **in + } + if in.ClusterRef != nil { + in, out := &in.ClusterRef, &out.ClusterRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterSelector != nil { + in, out := &in.ClusterSelector, &out.ClusterSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DeploymentCircuitBreaker != nil { + in, out := &in.DeploymentCircuitBreaker, &out.DeploymentCircuitBreaker + *out = new(DeploymentCircuitBreakerInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DeploymentController != nil { + in, out := &in.DeploymentController, &out.DeploymentController + *out = new(DeploymentControllerInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DeploymentMaximumPercent != nil { + in, out := &in.DeploymentMaximumPercent, &out.DeploymentMaximumPercent + *out = new(float64) + **out = **in + } + if in.DeploymentMinimumHealthyPercent != nil { + in, out := &in.DeploymentMinimumHealthyPercent, &out.DeploymentMinimumHealthyPercent + *out = new(float64) + **out = **in + } + if in.DesiredCount != nil { + in, out := &in.DesiredCount, &out.DesiredCount + *out = new(float64) + **out = **in + } + if in.EnableEcsManagedTags != nil { + in, out := &in.EnableEcsManagedTags, &out.EnableEcsManagedTags + *out = new(bool) + **out = **in + } + if in.EnableExecuteCommand != nil { + in, out := &in.EnableExecuteCommand, &out.EnableExecuteCommand + *out = new(bool) + **out = **in + } + if in.ForceNewDeployment != nil { + in, out := &in.ForceNewDeployment, &out.ForceNewDeployment + *out = new(bool) + **out = **in + } + if in.HealthCheckGracePeriodSeconds != nil { + in, out := &in.HealthCheckGracePeriodSeconds, &out.HealthCheckGracePeriodSeconds + *out = new(float64) + **out = **in + } + if in.IAMRole != nil { + in, out := &in.IAMRole, &out.IAMRole + *out = new(string) + **out = **in + } + if in.IAMRoleRef != nil { + in, out := &in.IAMRoleRef, &out.IAMRoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMRoleSelector != nil { + in, out := &in.IAMRoleSelector, &out.IAMRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LaunchType != nil { + in, out := &in.LaunchType, &out.LaunchType + *out = new(string) + **out = **in + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = make([]LoadBalancerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = new(NetworkConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OrderedPlacementStrategy != nil { + in, out := &in.OrderedPlacementStrategy, &out.OrderedPlacementStrategy + *out = make([]OrderedPlacementStrategyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementConstraints != nil { + in, out := &in.PlacementConstraints, &out.PlacementConstraints + *out = make([]PlacementConstraintsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformVersion != nil { + in, out := &in.PlatformVersion, &out.PlatformVersion + *out = new(string) + **out = **in + } + if in.PropagateTags != nil { + in, out := &in.PropagateTags, &out.PropagateTags + *out = new(string) + **out = **in + } + if in.SchedulingStrategy != nil { + in, out := &in.SchedulingStrategy, &out.SchedulingStrategy + *out = new(string) + **out = **in + } + if in.ServiceConnectConfiguration != nil { + in, out := &in.ServiceConnectConfiguration, &out.ServiceConnectConfiguration + *out = new(ServiceConnectConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceRegistries != nil { + in, out := &in.ServiceRegistries, &out.ServiceRegistries + *out = new(ServiceRegistriesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskDefinition != nil { + in, out := &in.TaskDefinition, &out.TaskDefinition + *out = new(string) + **out = **in + } + if in.TaskDefinitionRef != nil { + in, out := &in.TaskDefinitionRef, &out.TaskDefinitionRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TaskDefinitionSelector != nil { + in, out := &in.TaskDefinitionSelector, &out.TaskDefinitionSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WaitForSteadyState != nil { + in, out := &in.WaitForSteadyState, &out.WaitForSteadyState + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceInitParameters. +func (in *ServiceInitParameters) DeepCopy() *ServiceInitParameters { + if in == nil { + return nil + } + out := new(ServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceList) DeepCopyInto(out *ServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. +func (in *ServiceList) DeepCopy() *ServiceList { + if in == nil { + return nil + } + out := new(ServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceObservation) DeepCopyInto(out *ServiceObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = new(AlarmsObservation) + (*in).DeepCopyInto(*out) + } + if in.CapacityProviderStrategy != nil { + in, out := &in.CapacityProviderStrategy, &out.CapacityProviderStrategy + *out = make([]CapacityProviderStrategyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(string) + **out = **in + } + if in.DeploymentCircuitBreaker != nil { + in, out := &in.DeploymentCircuitBreaker, &out.DeploymentCircuitBreaker + *out = new(DeploymentCircuitBreakerObservation) + (*in).DeepCopyInto(*out) + } + if in.DeploymentController != nil { + in, out := &in.DeploymentController, &out.DeploymentController + *out = new(DeploymentControllerObservation) + (*in).DeepCopyInto(*out) + } + if in.DeploymentMaximumPercent != nil { + in, out := &in.DeploymentMaximumPercent, &out.DeploymentMaximumPercent + *out = new(float64) + **out = **in + } + if in.DeploymentMinimumHealthyPercent != nil { + in, out := &in.DeploymentMinimumHealthyPercent, &out.DeploymentMinimumHealthyPercent + *out = new(float64) + **out = **in + } + if in.DesiredCount != nil { + in, out := &in.DesiredCount, &out.DesiredCount + *out = new(float64) + **out = **in + } + if in.EnableEcsManagedTags != nil { + in, out := &in.EnableEcsManagedTags, &out.EnableEcsManagedTags + *out = new(bool) + **out = **in + } + if in.EnableExecuteCommand != nil { + in, out := &in.EnableExecuteCommand, &out.EnableExecuteCommand + *out = new(bool) + **out = **in + } + if in.ForceNewDeployment != nil { + in, out := &in.ForceNewDeployment, &out.ForceNewDeployment + *out = new(bool) + **out = **in + } + if in.HealthCheckGracePeriodSeconds != nil { + in, out := &in.HealthCheckGracePeriodSeconds, &out.HealthCheckGracePeriodSeconds + *out = new(float64) + **out = **in + } + if in.IAMRole != nil { + in, out := &in.IAMRole, &out.IAMRole + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LaunchType != nil { + in, out := &in.LaunchType, &out.LaunchType + *out = new(string) + **out = **in + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = make([]LoadBalancerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = new(NetworkConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.OrderedPlacementStrategy != nil { + in, out := &in.OrderedPlacementStrategy, &out.OrderedPlacementStrategy + *out = make([]OrderedPlacementStrategyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementConstraints != nil { + in, out := &in.PlacementConstraints, &out.PlacementConstraints + *out = make([]PlacementConstraintsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformVersion != nil { + in, out := &in.PlatformVersion, &out.PlatformVersion + *out = new(string) + **out = **in + } + if in.PropagateTags != nil { + in, out := &in.PropagateTags, &out.PropagateTags + *out = new(string) + **out = **in + } + if in.SchedulingStrategy != nil { + in, out := &in.SchedulingStrategy, &out.SchedulingStrategy + *out = new(string) + **out = **in + } + if in.ServiceConnectConfiguration != nil { + in, out := &in.ServiceConnectConfiguration, &out.ServiceConnectConfiguration + *out = new(ServiceConnectConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ServiceRegistries != nil { + in, out := &in.ServiceRegistries, &out.ServiceRegistries + *out = new(ServiceRegistriesObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskDefinition != nil { + in, out := &in.TaskDefinition, &out.TaskDefinition + *out = new(string) + **out = **in + } + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WaitForSteadyState != nil { + in, out := &in.WaitForSteadyState, &out.WaitForSteadyState + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceObservation. +func (in *ServiceObservation) DeepCopy() *ServiceObservation { + if in == nil { + return nil + } + out := new(ServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceParameters) DeepCopyInto(out *ServiceParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = new(AlarmsParameters) + (*in).DeepCopyInto(*out) + } + if in.CapacityProviderStrategy != nil { + in, out := &in.CapacityProviderStrategy, &out.CapacityProviderStrategy + *out = make([]CapacityProviderStrategyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(string) + **out = **in + } + if in.ClusterRef != nil { + in, out := &in.ClusterRef, &out.ClusterRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterSelector != nil { + in, out := &in.ClusterSelector, &out.ClusterSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DeploymentCircuitBreaker != nil { + in, out := &in.DeploymentCircuitBreaker, &out.DeploymentCircuitBreaker + *out = new(DeploymentCircuitBreakerParameters) + (*in).DeepCopyInto(*out) + } + if in.DeploymentController != nil { + in, out := &in.DeploymentController, &out.DeploymentController + *out = new(DeploymentControllerParameters) + (*in).DeepCopyInto(*out) + } + if in.DeploymentMaximumPercent != nil { + in, out := &in.DeploymentMaximumPercent, &out.DeploymentMaximumPercent + *out = new(float64) + **out = **in + } + if in.DeploymentMinimumHealthyPercent != nil { + in, out := &in.DeploymentMinimumHealthyPercent, &out.DeploymentMinimumHealthyPercent + *out = new(float64) + **out = **in + } + if in.DesiredCount != nil { + in, out := &in.DesiredCount, &out.DesiredCount + *out = new(float64) + **out = **in + } + if in.EnableEcsManagedTags != nil { + in, out := &in.EnableEcsManagedTags, &out.EnableEcsManagedTags + *out = new(bool) + **out = **in + } + if in.EnableExecuteCommand != nil { + in, out := &in.EnableExecuteCommand, &out.EnableExecuteCommand + *out = new(bool) + **out = **in + } + if in.ForceNewDeployment != nil { + in, out := &in.ForceNewDeployment, &out.ForceNewDeployment + *out = new(bool) + **out = **in + } + if in.HealthCheckGracePeriodSeconds != nil { + in, out := &in.HealthCheckGracePeriodSeconds, &out.HealthCheckGracePeriodSeconds + *out = new(float64) + **out = **in + } + if in.IAMRole != nil { + in, out := &in.IAMRole, &out.IAMRole + *out = new(string) + **out = **in + } + if in.IAMRoleRef != nil { + in, out := &in.IAMRoleRef, &out.IAMRoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMRoleSelector != nil { + in, out := &in.IAMRoleSelector, &out.IAMRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LaunchType != nil { + in, out := &in.LaunchType, &out.LaunchType + *out = new(string) + **out = **in + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = make([]LoadBalancerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = new(NetworkConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.OrderedPlacementStrategy != nil { + in, out := &in.OrderedPlacementStrategy, &out.OrderedPlacementStrategy + *out = make([]OrderedPlacementStrategyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementConstraints != nil { + in, out := &in.PlacementConstraints, &out.PlacementConstraints + *out = make([]PlacementConstraintsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformVersion != nil { + in, out := &in.PlatformVersion, &out.PlatformVersion + *out = new(string) + **out = **in + } + if in.PropagateTags != nil { + in, out := &in.PropagateTags, &out.PropagateTags + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SchedulingStrategy != nil { + in, out := &in.SchedulingStrategy, &out.SchedulingStrategy + *out = new(string) + **out = **in + } + if in.ServiceConnectConfiguration != nil { + in, out := &in.ServiceConnectConfiguration, &out.ServiceConnectConfiguration + *out = new(ServiceConnectConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceRegistries != nil { + in, out := &in.ServiceRegistries, &out.ServiceRegistries + *out = new(ServiceRegistriesParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskDefinition != nil { + in, out := &in.TaskDefinition, &out.TaskDefinition + *out = new(string) + **out = **in + } + if in.TaskDefinitionRef != nil { + in, out := &in.TaskDefinitionRef, &out.TaskDefinitionRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TaskDefinitionSelector != nil { + in, out := &in.TaskDefinitionSelector, &out.TaskDefinitionSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WaitForSteadyState != nil { + in, out := &in.WaitForSteadyState, &out.WaitForSteadyState + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceParameters. +func (in *ServiceParameters) DeepCopy() *ServiceParameters { + if in == nil { + return nil + } + out := new(ServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceRegistriesInitParameters) DeepCopyInto(out *ServiceRegistriesInitParameters) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.ContainerPort != nil { + in, out := &in.ContainerPort, &out.ContainerPort + *out = new(float64) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.RegistryArn != nil { + in, out := &in.RegistryArn, &out.RegistryArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceRegistriesInitParameters. +func (in *ServiceRegistriesInitParameters) DeepCopy() *ServiceRegistriesInitParameters { + if in == nil { + return nil + } + out := new(ServiceRegistriesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceRegistriesObservation) DeepCopyInto(out *ServiceRegistriesObservation) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.ContainerPort != nil { + in, out := &in.ContainerPort, &out.ContainerPort + *out = new(float64) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.RegistryArn != nil { + in, out := &in.RegistryArn, &out.RegistryArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceRegistriesObservation. +func (in *ServiceRegistriesObservation) DeepCopy() *ServiceRegistriesObservation { + if in == nil { + return nil + } + out := new(ServiceRegistriesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceRegistriesParameters) DeepCopyInto(out *ServiceRegistriesParameters) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.ContainerPort != nil { + in, out := &in.ContainerPort, &out.ContainerPort + *out = new(float64) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.RegistryArn != nil { + in, out := &in.RegistryArn, &out.RegistryArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceRegistriesParameters. +func (in *ServiceRegistriesParameters) DeepCopy() *ServiceRegistriesParameters { + if in == nil { + return nil + } + out := new(ServiceRegistriesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. +func (in *ServiceSpec) DeepCopy() *ServiceSpec { + if in == nil { + return nil + } + out := new(ServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. +func (in *ServiceStatus) DeepCopy() *ServiceStatus { + if in == nil { + return nil + } + out := new(ServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingInitParameters) DeepCopyInto(out *SettingInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingInitParameters. +func (in *SettingInitParameters) DeepCopy() *SettingInitParameters { + if in == nil { + return nil + } + out := new(SettingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingObservation) DeepCopyInto(out *SettingObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingObservation. +func (in *SettingObservation) DeepCopy() *SettingObservation { + if in == nil { + return nil + } + out := new(SettingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingParameters) DeepCopyInto(out *SettingParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingParameters. +func (in *SettingParameters) DeepCopy() *SettingParameters { + if in == nil { + return nil + } + out := new(SettingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSInitParameters) DeepCopyInto(out *TLSInitParameters) { + *out = *in + if in.IssuerCertAuthority != nil { + in, out := &in.IssuerCertAuthority, &out.IssuerCertAuthority + *out = new(IssuerCertAuthorityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSInitParameters. +func (in *TLSInitParameters) DeepCopy() *TLSInitParameters { + if in == nil { + return nil + } + out := new(TLSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSObservation) DeepCopyInto(out *TLSObservation) { + *out = *in + if in.IssuerCertAuthority != nil { + in, out := &in.IssuerCertAuthority, &out.IssuerCertAuthority + *out = new(IssuerCertAuthorityObservation) + (*in).DeepCopyInto(*out) + } + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSObservation. +func (in *TLSObservation) DeepCopy() *TLSObservation { + if in == nil { + return nil + } + out := new(TLSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSParameters) DeepCopyInto(out *TLSParameters) { + *out = *in + if in.IssuerCertAuthority != nil { + in, out := &in.IssuerCertAuthority, &out.IssuerCertAuthority + *out = new(IssuerCertAuthorityParameters) + (*in).DeepCopyInto(*out) + } + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSParameters. +func (in *TLSParameters) DeepCopy() *TLSParameters { + if in == nil { + return nil + } + out := new(TLSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskDefinition) DeepCopyInto(out *TaskDefinition) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskDefinition. +func (in *TaskDefinition) DeepCopy() *TaskDefinition { + if in == nil { + return nil + } + out := new(TaskDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TaskDefinition) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskDefinitionInitParameters) DeepCopyInto(out *TaskDefinitionInitParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.ContainerDefinitions != nil { + in, out := &in.ContainerDefinitions, &out.ContainerDefinitions + *out = new(string) + **out = **in + } + if in.EphemeralStorage != nil { + in, out := &in.EphemeralStorage, &out.EphemeralStorage + *out = new(EphemeralStorageInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleArn != nil { + in, out := &in.ExecutionRoleArn, &out.ExecutionRoleArn + *out = new(string) + **out = **in + } + if in.ExecutionRoleArnRef != nil { + in, out := &in.ExecutionRoleArnRef, &out.ExecutionRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleArnSelector != nil { + in, out := &in.ExecutionRoleArnSelector, &out.ExecutionRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.InferenceAccelerator != nil { + in, out := &in.InferenceAccelerator, &out.InferenceAccelerator + *out = make([]InferenceAcceleratorInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IpcMode != nil { + in, out := &in.IpcMode, &out.IpcMode + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } + if in.NetworkMode != nil { + in, out := &in.NetworkMode, &out.NetworkMode + *out = new(string) + **out = **in + } + if in.PidMode != nil { + in, out := &in.PidMode, &out.PidMode + *out = new(string) + **out = **in + } + if in.PlacementConstraints != nil { + in, out := &in.PlacementConstraints, &out.PlacementConstraints + *out = make([]TaskDefinitionPlacementConstraintsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProxyConfiguration != nil { + in, out := &in.ProxyConfiguration, &out.ProxyConfiguration + *out = new(ProxyConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RequiresCompatibilities != nil { + in, out := &in.RequiresCompatibilities, &out.RequiresCompatibilities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RuntimePlatform != nil { + in, out := &in.RuntimePlatform, &out.RuntimePlatform + *out = new(RuntimePlatformInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SkipDestroy != nil { + in, out := &in.SkipDestroy, &out.SkipDestroy + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskRoleArn != nil { + in, out := &in.TaskRoleArn, &out.TaskRoleArn + *out = new(string) + **out = **in + } + if in.TrackLatest != nil { + in, out := &in.TrackLatest, &out.TrackLatest + *out = new(bool) + **out = **in + } + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = make([]VolumeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskDefinitionInitParameters. +func (in *TaskDefinitionInitParameters) DeepCopy() *TaskDefinitionInitParameters { + if in == nil { + return nil + } + out := new(TaskDefinitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskDefinitionList) DeepCopyInto(out *TaskDefinitionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TaskDefinition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskDefinitionList. +func (in *TaskDefinitionList) DeepCopy() *TaskDefinitionList { + if in == nil { + return nil + } + out := new(TaskDefinitionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TaskDefinitionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskDefinitionObservation) DeepCopyInto(out *TaskDefinitionObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnWithoutRevision != nil { + in, out := &in.ArnWithoutRevision, &out.ArnWithoutRevision + *out = new(string) + **out = **in + } + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.ContainerDefinitions != nil { + in, out := &in.ContainerDefinitions, &out.ContainerDefinitions + *out = new(string) + **out = **in + } + if in.EphemeralStorage != nil { + in, out := &in.EphemeralStorage, &out.EphemeralStorage + *out = new(EphemeralStorageObservation) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleArn != nil { + in, out := &in.ExecutionRoleArn, &out.ExecutionRoleArn + *out = new(string) + **out = **in + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InferenceAccelerator != nil { + in, out := &in.InferenceAccelerator, &out.InferenceAccelerator + *out = make([]InferenceAcceleratorObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IpcMode != nil { + in, out := &in.IpcMode, &out.IpcMode + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } + if in.NetworkMode != nil { + in, out := &in.NetworkMode, &out.NetworkMode + *out = new(string) + **out = **in + } + if in.PidMode != nil { + in, out := &in.PidMode, &out.PidMode + *out = new(string) + **out = **in + } + if in.PlacementConstraints != nil { + in, out := &in.PlacementConstraints, &out.PlacementConstraints + *out = make([]TaskDefinitionPlacementConstraintsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProxyConfiguration != nil { + in, out := &in.ProxyConfiguration, &out.ProxyConfiguration + *out = new(ProxyConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RequiresCompatibilities != nil { + in, out := &in.RequiresCompatibilities, &out.RequiresCompatibilities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(float64) + **out = **in + } + if in.RuntimePlatform != nil { + in, out := &in.RuntimePlatform, &out.RuntimePlatform + *out = new(RuntimePlatformObservation) + (*in).DeepCopyInto(*out) + } + if in.SkipDestroy != nil { + in, out := &in.SkipDestroy, &out.SkipDestroy + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskRoleArn != nil { + in, out := &in.TaskRoleArn, &out.TaskRoleArn + *out = new(string) + **out = **in + } + if in.TrackLatest != nil { + in, out := &in.TrackLatest, &out.TrackLatest + *out = new(bool) + **out = **in + } + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = make([]VolumeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskDefinitionObservation. +func (in *TaskDefinitionObservation) DeepCopy() *TaskDefinitionObservation { + if in == nil { + return nil + } + out := new(TaskDefinitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskDefinitionParameters) DeepCopyInto(out *TaskDefinitionParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.ContainerDefinitions != nil { + in, out := &in.ContainerDefinitions, &out.ContainerDefinitions + *out = new(string) + **out = **in + } + if in.EphemeralStorage != nil { + in, out := &in.EphemeralStorage, &out.EphemeralStorage + *out = new(EphemeralStorageParameters) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleArn != nil { + in, out := &in.ExecutionRoleArn, &out.ExecutionRoleArn + *out = new(string) + **out = **in + } + if in.ExecutionRoleArnRef != nil { + in, out := &in.ExecutionRoleArnRef, &out.ExecutionRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleArnSelector != nil { + in, out := &in.ExecutionRoleArnSelector, &out.ExecutionRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.InferenceAccelerator != nil { + in, out := &in.InferenceAccelerator, &out.InferenceAccelerator + *out = make([]InferenceAcceleratorParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IpcMode != nil { + in, out := &in.IpcMode, &out.IpcMode + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } + if in.NetworkMode != nil { + in, out := &in.NetworkMode, &out.NetworkMode + *out = new(string) + **out = **in + } + if in.PidMode != nil { + in, out := &in.PidMode, &out.PidMode + *out = new(string) + **out = **in + } + if in.PlacementConstraints != nil { + in, out := &in.PlacementConstraints, &out.PlacementConstraints + *out = make([]TaskDefinitionPlacementConstraintsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProxyConfiguration != nil { + in, out := &in.ProxyConfiguration, &out.ProxyConfiguration + *out = new(ProxyConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RequiresCompatibilities != nil { + in, out := &in.RequiresCompatibilities, &out.RequiresCompatibilities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RuntimePlatform != nil { + in, out := &in.RuntimePlatform, &out.RuntimePlatform + *out = new(RuntimePlatformParameters) + (*in).DeepCopyInto(*out) + } + if in.SkipDestroy != nil { + in, out := &in.SkipDestroy, &out.SkipDestroy + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskRoleArn != nil { + in, out := &in.TaskRoleArn, &out.TaskRoleArn + *out = new(string) + **out = **in + } + if in.TrackLatest != nil { + in, out := &in.TrackLatest, &out.TrackLatest + *out = new(bool) + **out = **in + } + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = make([]VolumeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskDefinitionParameters. +func (in *TaskDefinitionParameters) DeepCopy() *TaskDefinitionParameters { + if in == nil { + return nil + } + out := new(TaskDefinitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskDefinitionPlacementConstraintsInitParameters) DeepCopyInto(out *TaskDefinitionPlacementConstraintsInitParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskDefinitionPlacementConstraintsInitParameters. +func (in *TaskDefinitionPlacementConstraintsInitParameters) DeepCopy() *TaskDefinitionPlacementConstraintsInitParameters { + if in == nil { + return nil + } + out := new(TaskDefinitionPlacementConstraintsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskDefinitionPlacementConstraintsObservation) DeepCopyInto(out *TaskDefinitionPlacementConstraintsObservation) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskDefinitionPlacementConstraintsObservation. +func (in *TaskDefinitionPlacementConstraintsObservation) DeepCopy() *TaskDefinitionPlacementConstraintsObservation { + if in == nil { + return nil + } + out := new(TaskDefinitionPlacementConstraintsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskDefinitionPlacementConstraintsParameters) DeepCopyInto(out *TaskDefinitionPlacementConstraintsParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskDefinitionPlacementConstraintsParameters. +func (in *TaskDefinitionPlacementConstraintsParameters) DeepCopy() *TaskDefinitionPlacementConstraintsParameters { + if in == nil { + return nil + } + out := new(TaskDefinitionPlacementConstraintsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskDefinitionSpec) DeepCopyInto(out *TaskDefinitionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskDefinitionSpec. +func (in *TaskDefinitionSpec) DeepCopy() *TaskDefinitionSpec { + if in == nil { + return nil + } + out := new(TaskDefinitionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskDefinitionStatus) DeepCopyInto(out *TaskDefinitionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskDefinitionStatus. +func (in *TaskDefinitionStatus) DeepCopy() *TaskDefinitionStatus { + if in == nil { + return nil + } + out := new(TaskDefinitionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutInitParameters) DeepCopyInto(out *TimeoutInitParameters) { + *out = *in + if in.IdleTimeoutSeconds != nil { + in, out := &in.IdleTimeoutSeconds, &out.IdleTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.PerRequestTimeoutSeconds != nil { + in, out := &in.PerRequestTimeoutSeconds, &out.PerRequestTimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutInitParameters. +func (in *TimeoutInitParameters) DeepCopy() *TimeoutInitParameters { + if in == nil { + return nil + } + out := new(TimeoutInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutObservation) DeepCopyInto(out *TimeoutObservation) { + *out = *in + if in.IdleTimeoutSeconds != nil { + in, out := &in.IdleTimeoutSeconds, &out.IdleTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.PerRequestTimeoutSeconds != nil { + in, out := &in.PerRequestTimeoutSeconds, &out.PerRequestTimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutObservation. +func (in *TimeoutObservation) DeepCopy() *TimeoutObservation { + if in == nil { + return nil + } + out := new(TimeoutObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutParameters) DeepCopyInto(out *TimeoutParameters) { + *out = *in + if in.IdleTimeoutSeconds != nil { + in, out := &in.IdleTimeoutSeconds, &out.IdleTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.PerRequestTimeoutSeconds != nil { + in, out := &in.PerRequestTimeoutSeconds, &out.PerRequestTimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutParameters. +func (in *TimeoutParameters) DeepCopy() *TimeoutParameters { + if in == nil { + return nil + } + out := new(TimeoutParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeInitParameters) DeepCopyInto(out *VolumeInitParameters) { + *out = *in + if in.DockerVolumeConfiguration != nil { + in, out := &in.DockerVolumeConfiguration, &out.DockerVolumeConfiguration + *out = new(DockerVolumeConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EFSVolumeConfiguration != nil { + in, out := &in.EFSVolumeConfiguration, &out.EFSVolumeConfiguration + *out = new(EFSVolumeConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FSXWindowsFileServerVolumeConfiguration != nil { + in, out := &in.FSXWindowsFileServerVolumeConfiguration, &out.FSXWindowsFileServerVolumeConfiguration + *out = new(FSXWindowsFileServerVolumeConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeInitParameters. +func (in *VolumeInitParameters) DeepCopy() *VolumeInitParameters { + if in == nil { + return nil + } + out := new(VolumeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeObservation) DeepCopyInto(out *VolumeObservation) { + *out = *in + if in.DockerVolumeConfiguration != nil { + in, out := &in.DockerVolumeConfiguration, &out.DockerVolumeConfiguration + *out = new(DockerVolumeConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.EFSVolumeConfiguration != nil { + in, out := &in.EFSVolumeConfiguration, &out.EFSVolumeConfiguration + *out = new(EFSVolumeConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.FSXWindowsFileServerVolumeConfiguration != nil { + in, out := &in.FSXWindowsFileServerVolumeConfiguration, &out.FSXWindowsFileServerVolumeConfiguration + *out = new(FSXWindowsFileServerVolumeConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeObservation. +func (in *VolumeObservation) DeepCopy() *VolumeObservation { + if in == nil { + return nil + } + out := new(VolumeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeParameters) DeepCopyInto(out *VolumeParameters) { + *out = *in + if in.DockerVolumeConfiguration != nil { + in, out := &in.DockerVolumeConfiguration, &out.DockerVolumeConfiguration + *out = new(DockerVolumeConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.EFSVolumeConfiguration != nil { + in, out := &in.EFSVolumeConfiguration, &out.EFSVolumeConfiguration + *out = new(EFSVolumeConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.FSXWindowsFileServerVolumeConfiguration != nil { + in, out := &in.FSXWindowsFileServerVolumeConfiguration, &out.FSXWindowsFileServerVolumeConfiguration + *out = new(FSXWindowsFileServerVolumeConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeParameters. +func (in *VolumeParameters) DeepCopy() *VolumeParameters { + if in == nil { + return nil + } + out := new(VolumeParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/ecs/v1beta2/zz_generated.managed.go b/apis/ecs/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..b2c3fae767 --- /dev/null +++ b/apis/ecs/v1beta2/zz_generated.managed.go @@ -0,0 +1,248 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this CapacityProvider. +func (mg *CapacityProvider) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CapacityProvider. +func (mg *CapacityProvider) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CapacityProvider. +func (mg *CapacityProvider) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CapacityProvider. +func (mg *CapacityProvider) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CapacityProvider. +func (mg *CapacityProvider) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CapacityProvider. +func (mg *CapacityProvider) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CapacityProvider. +func (mg *CapacityProvider) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CapacityProvider. +func (mg *CapacityProvider) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CapacityProvider. +func (mg *CapacityProvider) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CapacityProvider. +func (mg *CapacityProvider) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CapacityProvider. +func (mg *CapacityProvider) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CapacityProvider. +func (mg *CapacityProvider) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Cluster. +func (mg *Cluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Cluster. +func (mg *Cluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Cluster. +func (mg *Cluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Cluster. +func (mg *Cluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Cluster. +func (mg *Cluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Cluster. +func (mg *Cluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Cluster. +func (mg *Cluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Cluster. +func (mg *Cluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Service. +func (mg *Service) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Service. +func (mg *Service) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Service. +func (mg *Service) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Service. +func (mg *Service) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Service. +func (mg *Service) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Service. +func (mg *Service) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Service. +func (mg *Service) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Service. +func (mg *Service) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Service. +func (mg *Service) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Service. +func (mg *Service) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Service. +func (mg *Service) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Service. +func (mg *Service) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this TaskDefinition. +func (mg *TaskDefinition) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this TaskDefinition. +func (mg *TaskDefinition) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this TaskDefinition. +func (mg *TaskDefinition) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this TaskDefinition. +func (mg *TaskDefinition) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this TaskDefinition. +func (mg *TaskDefinition) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this TaskDefinition. +func (mg *TaskDefinition) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this TaskDefinition. +func (mg *TaskDefinition) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this TaskDefinition. +func (mg *TaskDefinition) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this TaskDefinition. +func (mg *TaskDefinition) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this TaskDefinition. +func (mg *TaskDefinition) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this TaskDefinition. +func (mg *TaskDefinition) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this TaskDefinition. +func (mg *TaskDefinition) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/ecs/v1beta2/zz_generated.managedlist.go b/apis/ecs/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..1ad27ae916 --- /dev/null +++ b/apis/ecs/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this CapacityProviderList. +func (l *CapacityProviderList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ClusterList. +func (l *ClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ServiceList. +func (l *ServiceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TaskDefinitionList. +func (l *TaskDefinitionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/ecs/v1beta2/zz_generated.resolvers.go b/apis/ecs/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..639b2e4994 --- /dev/null +++ b/apis/ecs/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,376 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *CapacityProvider) ResolveReferences( // ResolveReferences of this CapacityProvider. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.AutoScalingGroupProvider != nil { + { + m, l, err = apisresolver.GetManagedResource("autoscaling.aws.upbound.io", "v1beta3", "AutoscalingGroup", "AutoscalingGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AutoScalingGroupProvider.AutoScalingGroupArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.AutoScalingGroupProvider.AutoScalingGroupArnRef, + Selector: mg.Spec.ForProvider.AutoScalingGroupProvider.AutoScalingGroupArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AutoScalingGroupProvider.AutoScalingGroupArn") + } + mg.Spec.ForProvider.AutoScalingGroupProvider.AutoScalingGroupArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AutoScalingGroupProvider.AutoScalingGroupArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.AutoScalingGroupProvider != nil { + { + m, l, err = apisresolver.GetManagedResource("autoscaling.aws.upbound.io", "v1beta3", "AutoscalingGroup", "AutoscalingGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AutoScalingGroupProvider.AutoScalingGroupArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.AutoScalingGroupProvider.AutoScalingGroupArnRef, + Selector: mg.Spec.InitProvider.AutoScalingGroupProvider.AutoScalingGroupArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AutoScalingGroupProvider.AutoScalingGroupArn") + } + mg.Spec.InitProvider.AutoScalingGroupProvider.AutoScalingGroupArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AutoScalingGroupProvider.AutoScalingGroupArnRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this Service. +func (mg *Service) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ecs.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Cluster), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ClusterRef, + Selector: mg.Spec.ForProvider.ClusterSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Cluster") + } + mg.Spec.ForProvider.Cluster = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IAMRole), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.IAMRoleRef, + Selector: mg.Spec.ForProvider.IAMRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IAMRole") + } + mg.Spec.ForProvider.IAMRole = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IAMRoleRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.LoadBalancer); i3++ { + { + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBTargetGroup", "LBTargetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LoadBalancer[i3].TargetGroupArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LoadBalancer[i3].TargetGroupArnRef, + Selector: mg.Spec.ForProvider.LoadBalancer[i3].TargetGroupArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LoadBalancer[i3].TargetGroupArn") + } + mg.Spec.ForProvider.LoadBalancer[i3].TargetGroupArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LoadBalancer[i3].TargetGroupArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.NetworkConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.NetworkConfiguration.SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.NetworkConfiguration.SecurityGroupRefs, + Selector: mg.Spec.ForProvider.NetworkConfiguration.SecurityGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkConfiguration.SecurityGroups") + } + mg.Spec.ForProvider.NetworkConfiguration.SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.NetworkConfiguration.SecurityGroupRefs = mrsp.ResolvedReferences + + } + if mg.Spec.ForProvider.NetworkConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.NetworkConfiguration.Subnets), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.NetworkConfiguration.SubnetRefs, + Selector: mg.Spec.ForProvider.NetworkConfiguration.SubnetSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkConfiguration.Subnets") + } + mg.Spec.ForProvider.NetworkConfiguration.Subnets = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.NetworkConfiguration.SubnetRefs = mrsp.ResolvedReferences + + } + { + m, l, err = apisresolver.GetManagedResource("ecs.aws.upbound.io", "v1beta2", "TaskDefinition", "TaskDefinitionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TaskDefinition), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.TaskDefinitionRef, + Selector: mg.Spec.ForProvider.TaskDefinitionSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TaskDefinition") + } + mg.Spec.ForProvider.TaskDefinition = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TaskDefinitionRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ecs.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Cluster), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ClusterRef, + Selector: mg.Spec.InitProvider.ClusterSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Cluster") + } + mg.Spec.InitProvider.Cluster = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ClusterRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IAMRole), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.IAMRoleRef, + Selector: mg.Spec.InitProvider.IAMRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IAMRole") + } + mg.Spec.InitProvider.IAMRole = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IAMRoleRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.LoadBalancer); i3++ { + { + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBTargetGroup", "LBTargetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LoadBalancer[i3].TargetGroupArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LoadBalancer[i3].TargetGroupArnRef, + Selector: mg.Spec.InitProvider.LoadBalancer[i3].TargetGroupArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LoadBalancer[i3].TargetGroupArn") + } + mg.Spec.InitProvider.LoadBalancer[i3].TargetGroupArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LoadBalancer[i3].TargetGroupArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.NetworkConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.NetworkConfiguration.SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.NetworkConfiguration.SecurityGroupRefs, + Selector: mg.Spec.InitProvider.NetworkConfiguration.SecurityGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkConfiguration.SecurityGroups") + } + mg.Spec.InitProvider.NetworkConfiguration.SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.NetworkConfiguration.SecurityGroupRefs = mrsp.ResolvedReferences + + } + if mg.Spec.InitProvider.NetworkConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.NetworkConfiguration.Subnets), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.NetworkConfiguration.SubnetRefs, + Selector: mg.Spec.InitProvider.NetworkConfiguration.SubnetSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkConfiguration.Subnets") + } + mg.Spec.InitProvider.NetworkConfiguration.Subnets = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.NetworkConfiguration.SubnetRefs = mrsp.ResolvedReferences + + } + { + m, l, err = apisresolver.GetManagedResource("ecs.aws.upbound.io", "v1beta2", "TaskDefinition", "TaskDefinitionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TaskDefinition), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.TaskDefinitionRef, + Selector: mg.Spec.InitProvider.TaskDefinitionSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TaskDefinition") + } + mg.Spec.InitProvider.TaskDefinition = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TaskDefinitionRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this TaskDefinition. +func (mg *TaskDefinition) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ExecutionRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ExecutionRoleArnRef, + Selector: mg.Spec.ForProvider.ExecutionRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ExecutionRoleArn") + } + mg.Spec.ForProvider.ExecutionRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ExecutionRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ExecutionRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ExecutionRoleArnRef, + Selector: mg.Spec.InitProvider.ExecutionRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ExecutionRoleArn") + } + mg.Spec.InitProvider.ExecutionRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ExecutionRoleArnRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/ecs/v1beta2/zz_groupversion_info.go b/apis/ecs/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..71003ae3a5 --- /dev/null +++ b/apis/ecs/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=ecs.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "ecs.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/ecs/v1beta2/zz_service_terraformed.go b/apis/ecs/v1beta2/zz_service_terraformed.go new file mode 100755 index 0000000000..272d59328d --- /dev/null +++ b/apis/ecs/v1beta2/zz_service_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Service +func (mg *Service) GetTerraformResourceType() string { + return "aws_ecs_service" +} + +// GetConnectionDetailsMapping for this Service +func (tr *Service) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Service +func (tr *Service) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Service +func (tr *Service) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Service +func (tr *Service) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Service +func (tr *Service) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Service +func (tr *Service) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Service +func (tr *Service) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Service +func (tr *Service) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Service using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Service) LateInitialize(attrs []byte) (bool, error) { + params := &ServiceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Service) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ecs/v1beta2/zz_service_types.go b/apis/ecs/v1beta2/zz_service_types.go new file mode 100755 index 0000000000..553e5e1fab --- /dev/null +++ b/apis/ecs/v1beta2/zz_service_types.go @@ -0,0 +1,1133 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AlarmsInitParameters struct { + + // One or more CloudWatch alarm names. + // +listType=set + AlarmNames []*string `json:"alarmNames,omitempty" tf:"alarm_names,omitempty"` + + // Determines whether to use the CloudWatch alarm option in the service deployment process. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // Determines whether to configure Amazon ECS to roll back the service if a service deployment fails. If rollback is used, when a service deployment fails, the service is rolled back to the last deployment that completed successfully. + Rollback *bool `json:"rollback,omitempty" tf:"rollback,omitempty"` +} + +type AlarmsObservation struct { + + // One or more CloudWatch alarm names. + // +listType=set + AlarmNames []*string `json:"alarmNames,omitempty" tf:"alarm_names,omitempty"` + + // Determines whether to use the CloudWatch alarm option in the service deployment process. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // Determines whether to configure Amazon ECS to roll back the service if a service deployment fails. If rollback is used, when a service deployment fails, the service is rolled back to the last deployment that completed successfully. + Rollback *bool `json:"rollback,omitempty" tf:"rollback,omitempty"` +} + +type AlarmsParameters struct { + + // One or more CloudWatch alarm names. + // +kubebuilder:validation:Optional + // +listType=set + AlarmNames []*string `json:"alarmNames" tf:"alarm_names,omitempty"` + + // Determines whether to use the CloudWatch alarm option in the service deployment process. + // +kubebuilder:validation:Optional + Enable *bool `json:"enable" tf:"enable,omitempty"` + + // Determines whether to configure Amazon ECS to roll back the service if a service deployment fails. If rollback is used, when a service deployment fails, the service is rolled back to the last deployment that completed successfully. + // +kubebuilder:validation:Optional + Rollback *bool `json:"rollback" tf:"rollback,omitempty"` +} + +type CapacityProviderStrategyInitParameters struct { + + // Number of tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. + Base *float64 `json:"base,omitempty" tf:"base,omitempty"` + + // Short name of the capacity provider. + CapacityProvider *string `json:"capacityProvider,omitempty" tf:"capacity_provider,omitempty"` + + // Relative percentage of the total number of launched tasks that should use the specified capacity provider. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type CapacityProviderStrategyObservation struct { + + // Number of tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. + Base *float64 `json:"base,omitempty" tf:"base,omitempty"` + + // Short name of the capacity provider. + CapacityProvider *string `json:"capacityProvider,omitempty" tf:"capacity_provider,omitempty"` + + // Relative percentage of the total number of launched tasks that should use the specified capacity provider. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type CapacityProviderStrategyParameters struct { + + // Number of tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. + // +kubebuilder:validation:Optional + Base *float64 `json:"base,omitempty" tf:"base,omitempty"` + + // Short name of the capacity provider. + // +kubebuilder:validation:Optional + CapacityProvider *string `json:"capacityProvider" tf:"capacity_provider,omitempty"` + + // Relative percentage of the total number of launched tasks that should use the specified capacity provider. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type ClientAliasInitParameters struct { + + // The name that you use in the applications of client tasks to connect to this service. + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // The listening port number for the Service Connect proxy. This port is available inside of all of the tasks within the same namespace. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` +} + +type ClientAliasObservation struct { + + // The name that you use in the applications of client tasks to connect to this service. + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // The listening port number for the Service Connect proxy. This port is available inside of all of the tasks within the same namespace. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` +} + +type ClientAliasParameters struct { + + // The name that you use in the applications of client tasks to connect to this service. + // +kubebuilder:validation:Optional + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // The listening port number for the Service Connect proxy. This port is available inside of all of the tasks within the same namespace. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` +} + +type DeploymentCircuitBreakerInitParameters struct { + + // Whether to enable the deployment circuit breaker logic for the service. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // Whether to enable Amazon ECS to roll back the service if a service deployment fails. If rollback is enabled, when a service deployment fails, the service is rolled back to the last deployment that completed successfully. + Rollback *bool `json:"rollback,omitempty" tf:"rollback,omitempty"` +} + +type DeploymentCircuitBreakerObservation struct { + + // Whether to enable the deployment circuit breaker logic for the service. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // Whether to enable Amazon ECS to roll back the service if a service deployment fails. If rollback is enabled, when a service deployment fails, the service is rolled back to the last deployment that completed successfully. + Rollback *bool `json:"rollback,omitempty" tf:"rollback,omitempty"` +} + +type DeploymentCircuitBreakerParameters struct { + + // Whether to enable the deployment circuit breaker logic for the service. + // +kubebuilder:validation:Optional + Enable *bool `json:"enable" tf:"enable,omitempty"` + + // Whether to enable Amazon ECS to roll back the service if a service deployment fails. If rollback is enabled, when a service deployment fails, the service is rolled back to the last deployment that completed successfully. + // +kubebuilder:validation:Optional + Rollback *bool `json:"rollback" tf:"rollback,omitempty"` +} + +type DeploymentControllerInitParameters struct { + + // Type of deployment controller. Valid values: CODE_DEPLOY, ECS, EXTERNAL. Default: ECS. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DeploymentControllerObservation struct { + + // Type of deployment controller. Valid values: CODE_DEPLOY, ECS, EXTERNAL. Default: ECS. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DeploymentControllerParameters struct { + + // Type of deployment controller. Valid values: CODE_DEPLOY, ECS, EXTERNAL. Default: ECS. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IssuerCertAuthorityInitParameters struct { + + // The ARN of the aws_acmpca_certificate_authority used to create the TLS Certificates. + AwsPcaAuthorityArn *string `json:"awsPcaAuthorityArn,omitempty" tf:"aws_pca_authority_arn,omitempty"` +} + +type IssuerCertAuthorityObservation struct { + + // The ARN of the aws_acmpca_certificate_authority used to create the TLS Certificates. + AwsPcaAuthorityArn *string `json:"awsPcaAuthorityArn,omitempty" tf:"aws_pca_authority_arn,omitempty"` +} + +type IssuerCertAuthorityParameters struct { + + // The ARN of the aws_acmpca_certificate_authority used to create the TLS Certificates. + // +kubebuilder:validation:Optional + AwsPcaAuthorityArn *string `json:"awsPcaAuthorityArn" tf:"aws_pca_authority_arn,omitempty"` +} + +type LoadBalancerInitParameters struct { + + // Name of the container to associate with the load balancer (as it appears in a container definition). + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // Port on the container to associate with the load balancer. + ContainerPort *float64 `json:"containerPort,omitempty" tf:"container_port,omitempty"` + + // Name of the ELB (Classic) to associate with the service. + ELBName *string `json:"elbName,omitempty" tf:"elb_name,omitempty"` + + // ARN of the Load Balancer target group to associate with the service. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBTargetGroup + TargetGroupArn *string `json:"targetGroupArn,omitempty" tf:"target_group_arn,omitempty"` + + // Reference to a LBTargetGroup in elbv2 to populate targetGroupArn. + // +kubebuilder:validation:Optional + TargetGroupArnRef *v1.Reference `json:"targetGroupArnRef,omitempty" tf:"-"` + + // Selector for a LBTargetGroup in elbv2 to populate targetGroupArn. + // +kubebuilder:validation:Optional + TargetGroupArnSelector *v1.Selector `json:"targetGroupArnSelector,omitempty" tf:"-"` +} + +type LoadBalancerObservation struct { + + // Name of the container to associate with the load balancer (as it appears in a container definition). + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // Port on the container to associate with the load balancer. + ContainerPort *float64 `json:"containerPort,omitempty" tf:"container_port,omitempty"` + + // Name of the ELB (Classic) to associate with the service. + ELBName *string `json:"elbName,omitempty" tf:"elb_name,omitempty"` + + // ARN of the Load Balancer target group to associate with the service. + TargetGroupArn *string `json:"targetGroupArn,omitempty" tf:"target_group_arn,omitempty"` +} + +type LoadBalancerParameters struct { + + // Name of the container to associate with the load balancer (as it appears in a container definition). + // +kubebuilder:validation:Optional + ContainerName *string `json:"containerName" tf:"container_name,omitempty"` + + // Port on the container to associate with the load balancer. + // +kubebuilder:validation:Optional + ContainerPort *float64 `json:"containerPort" tf:"container_port,omitempty"` + + // Name of the ELB (Classic) to associate with the service. + // +kubebuilder:validation:Optional + ELBName *string `json:"elbName,omitempty" tf:"elb_name,omitempty"` + + // ARN of the Load Balancer target group to associate with the service. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBTargetGroup + // +kubebuilder:validation:Optional + TargetGroupArn *string `json:"targetGroupArn,omitempty" tf:"target_group_arn,omitempty"` + + // Reference to a LBTargetGroup in elbv2 to populate targetGroupArn. + // +kubebuilder:validation:Optional + TargetGroupArnRef *v1.Reference `json:"targetGroupArnRef,omitempty" tf:"-"` + + // Selector for a LBTargetGroup in elbv2 to populate targetGroupArn. + // +kubebuilder:validation:Optional + TargetGroupArnSelector *v1.Selector `json:"targetGroupArnSelector,omitempty" tf:"-"` +} + +type NetworkConfigurationInitParameters struct { + + // Assign a public IP address to the ENI (Fargate launch type only). Valid values are true or false. Default false. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupRefs []v1.Reference `json:"securityGroupRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupSelector *v1.Selector `json:"securityGroupSelector,omitempty" tf:"-"` + + // Security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupSelector + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // References to Subnet in ec2 to populate subnets. + // +kubebuilder:validation:Optional + SubnetRefs []v1.Reference `json:"subnetRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnets. + // +kubebuilder:validation:Optional + SubnetSelector *v1.Selector `json:"subnetSelector,omitempty" tf:"-"` + + // Subnets associated with the task or service. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetRefs + // +crossplane:generate:reference:selectorFieldName=SubnetSelector + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` +} + +type NetworkConfigurationObservation struct { + + // Assign a public IP address to the ENI (Fargate launch type only). Valid values are true or false. Default false. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // Security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // Subnets associated with the task or service. + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` +} + +type NetworkConfigurationParameters struct { + + // Assign a public IP address to the ENI (Fargate launch type only). Valid values are true or false. Default false. + // +kubebuilder:validation:Optional + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupRefs []v1.Reference `json:"securityGroupRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupSelector *v1.Selector `json:"securityGroupSelector,omitempty" tf:"-"` + + // Security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupSelector + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // References to Subnet in ec2 to populate subnets. + // +kubebuilder:validation:Optional + SubnetRefs []v1.Reference `json:"subnetRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnets. + // +kubebuilder:validation:Optional + SubnetSelector *v1.Selector `json:"subnetSelector,omitempty" tf:"-"` + + // Subnets associated with the task or service. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetRefs + // +crossplane:generate:reference:selectorFieldName=SubnetSelector + // +kubebuilder:validation:Optional + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` +} + +type OrderedPlacementStrategyInitParameters struct { + + // For the spread placement strategy, valid values are instanceId (or host, + // which has the same effect), or any platform or custom attribute that is applied to a container instance. + // For the binpack type, valid values are memory and cpu. For the random type, this attribute is not + // needed. For more information, see Placement Strategy. + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + // Type of placement strategy. Must be one of: binpack, random, or spread + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OrderedPlacementStrategyObservation struct { + + // For the spread placement strategy, valid values are instanceId (or host, + // which has the same effect), or any platform or custom attribute that is applied to a container instance. + // For the binpack type, valid values are memory and cpu. For the random type, this attribute is not + // needed. For more information, see Placement Strategy. + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + // Type of placement strategy. Must be one of: binpack, random, or spread + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OrderedPlacementStrategyParameters struct { + + // For the spread placement strategy, valid values are instanceId (or host, + // which has the same effect), or any platform or custom attribute that is applied to a container instance. + // For the binpack type, valid values are memory and cpu. For the random type, this attribute is not + // needed. For more information, see Placement Strategy. + // +kubebuilder:validation:Optional + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + // Type of placement strategy. Must be one of: binpack, random, or spread + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type PlacementConstraintsInitParameters struct { + + // Cluster Query Language expression to apply to the constraint. Does not need to be specified for the distinctInstance type. For more information, see Cluster Query Language in the Amazon EC2 Container Service Developer Guide. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Type of constraint. The only valid values at this time are memberOf and distinctInstance. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PlacementConstraintsObservation struct { + + // Cluster Query Language expression to apply to the constraint. Does not need to be specified for the distinctInstance type. For more information, see Cluster Query Language in the Amazon EC2 Container Service Developer Guide. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Type of constraint. The only valid values at this time are memberOf and distinctInstance. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PlacementConstraintsParameters struct { + + // Cluster Query Language expression to apply to the constraint. Does not need to be specified for the distinctInstance type. For more information, see Cluster Query Language in the Amazon EC2 Container Service Developer Guide. + // +kubebuilder:validation:Optional + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Type of constraint. The only valid values at this time are memberOf and distinctInstance. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type SecretOptionInitParameters struct { + + // The name of the secret. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The secret to expose to the container. The supported values are either the full ARN of the AWS Secrets Manager secret or the full ARN of the parameter in the SSM Parameter Store. + ValueFrom *string `json:"valueFrom,omitempty" tf:"value_from,omitempty"` +} + +type SecretOptionObservation struct { + + // The name of the secret. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The secret to expose to the container. The supported values are either the full ARN of the AWS Secrets Manager secret or the full ARN of the parameter in the SSM Parameter Store. + ValueFrom *string `json:"valueFrom,omitempty" tf:"value_from,omitempty"` +} + +type SecretOptionParameters struct { + + // The name of the secret. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The secret to expose to the container. The supported values are either the full ARN of the AWS Secrets Manager secret or the full ARN of the parameter in the SSM Parameter Store. + // +kubebuilder:validation:Optional + ValueFrom *string `json:"valueFrom" tf:"value_from,omitempty"` +} + +type ServiceConnectConfigurationInitParameters struct { + + // Specifies whether to use Service Connect with this service. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The log configuration for the container. See below. + LogConfiguration *ServiceConnectConfigurationLogConfigurationInitParameters `json:"logConfiguration,omitempty" tf:"log_configuration,omitempty"` + + // The namespace name or ARN of the aws_service_discovery_http_namespace for use with Service Connect. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // The list of Service Connect service objects. See below. + Service []ServiceConnectConfigurationServiceInitParameters `json:"service,omitempty" tf:"service,omitempty"` +} + +type ServiceConnectConfigurationLogConfigurationInitParameters struct { + + // The log driver to use for the container. + LogDriver *string `json:"logDriver,omitempty" tf:"log_driver,omitempty"` + + // The configuration options to send to the log driver. + // +mapType=granular + Options map[string]*string `json:"options,omitempty" tf:"options,omitempty"` + + // The secrets to pass to the log configuration. See below. + SecretOption []SecretOptionInitParameters `json:"secretOption,omitempty" tf:"secret_option,omitempty"` +} + +type ServiceConnectConfigurationLogConfigurationObservation struct { + + // The log driver to use for the container. + LogDriver *string `json:"logDriver,omitempty" tf:"log_driver,omitempty"` + + // The configuration options to send to the log driver. + // +mapType=granular + Options map[string]*string `json:"options,omitempty" tf:"options,omitempty"` + + // The secrets to pass to the log configuration. See below. + SecretOption []SecretOptionObservation `json:"secretOption,omitempty" tf:"secret_option,omitempty"` +} + +type ServiceConnectConfigurationLogConfigurationParameters struct { + + // The log driver to use for the container. + // +kubebuilder:validation:Optional + LogDriver *string `json:"logDriver" tf:"log_driver,omitempty"` + + // The configuration options to send to the log driver. + // +kubebuilder:validation:Optional + // +mapType=granular + Options map[string]*string `json:"options,omitempty" tf:"options,omitempty"` + + // The secrets to pass to the log configuration. See below. + // +kubebuilder:validation:Optional + SecretOption []SecretOptionParameters `json:"secretOption,omitempty" tf:"secret_option,omitempty"` +} + +type ServiceConnectConfigurationObservation struct { + + // Specifies whether to use Service Connect with this service. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The log configuration for the container. See below. + LogConfiguration *ServiceConnectConfigurationLogConfigurationObservation `json:"logConfiguration,omitempty" tf:"log_configuration,omitempty"` + + // The namespace name or ARN of the aws_service_discovery_http_namespace for use with Service Connect. + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // The list of Service Connect service objects. See below. + Service []ServiceConnectConfigurationServiceObservation `json:"service,omitempty" tf:"service,omitempty"` +} + +type ServiceConnectConfigurationParameters struct { + + // Specifies whether to use Service Connect with this service. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // The log configuration for the container. See below. + // +kubebuilder:validation:Optional + LogConfiguration *ServiceConnectConfigurationLogConfigurationParameters `json:"logConfiguration,omitempty" tf:"log_configuration,omitempty"` + + // The namespace name or ARN of the aws_service_discovery_http_namespace for use with Service Connect. + // +kubebuilder:validation:Optional + Namespace *string `json:"namespace,omitempty" tf:"namespace,omitempty"` + + // The list of Service Connect service objects. See below. + // +kubebuilder:validation:Optional + Service []ServiceConnectConfigurationServiceParameters `json:"service,omitempty" tf:"service,omitempty"` +} + +type ServiceConnectConfigurationServiceInitParameters struct { + + // The list of client aliases for this Service Connect service. You use these to assign names that can be used by client applications. The maximum number of client aliases that you can have in this list is 1. See below. + ClientAlias *ClientAliasInitParameters `json:"clientAlias,omitempty" tf:"client_alias,omitempty"` + + // The name of the new AWS Cloud Map service that Amazon ECS creates for this Amazon ECS service. + DiscoveryName *string `json:"discoveryName,omitempty" tf:"discovery_name,omitempty"` + + // The port number for the Service Connect proxy to listen on. + IngressPortOverride *float64 `json:"ingressPortOverride,omitempty" tf:"ingress_port_override,omitempty"` + + // The name of one of the portMappings from all the containers in the task definition of this Amazon ECS service. + PortName *string `json:"portName,omitempty" tf:"port_name,omitempty"` + + // The configuration for enabling Transport Layer Security (TLS) + TLS *TLSInitParameters `json:"tls,omitempty" tf:"tls,omitempty"` + + // Configuration timeouts for Service Connect + Timeout *TimeoutInitParameters `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type ServiceConnectConfigurationServiceObservation struct { + + // The list of client aliases for this Service Connect service. You use these to assign names that can be used by client applications. The maximum number of client aliases that you can have in this list is 1. See below. + ClientAlias *ClientAliasObservation `json:"clientAlias,omitempty" tf:"client_alias,omitempty"` + + // The name of the new AWS Cloud Map service that Amazon ECS creates for this Amazon ECS service. + DiscoveryName *string `json:"discoveryName,omitempty" tf:"discovery_name,omitempty"` + + // The port number for the Service Connect proxy to listen on. + IngressPortOverride *float64 `json:"ingressPortOverride,omitempty" tf:"ingress_port_override,omitempty"` + + // The name of one of the portMappings from all the containers in the task definition of this Amazon ECS service. + PortName *string `json:"portName,omitempty" tf:"port_name,omitempty"` + + // The configuration for enabling Transport Layer Security (TLS) + TLS *TLSObservation `json:"tls,omitempty" tf:"tls,omitempty"` + + // Configuration timeouts for Service Connect + Timeout *TimeoutObservation `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type ServiceConnectConfigurationServiceParameters struct { + + // The list of client aliases for this Service Connect service. You use these to assign names that can be used by client applications. The maximum number of client aliases that you can have in this list is 1. See below. + // +kubebuilder:validation:Optional + ClientAlias *ClientAliasParameters `json:"clientAlias,omitempty" tf:"client_alias,omitempty"` + + // The name of the new AWS Cloud Map service that Amazon ECS creates for this Amazon ECS service. + // +kubebuilder:validation:Optional + DiscoveryName *string `json:"discoveryName,omitempty" tf:"discovery_name,omitempty"` + + // The port number for the Service Connect proxy to listen on. + // +kubebuilder:validation:Optional + IngressPortOverride *float64 `json:"ingressPortOverride,omitempty" tf:"ingress_port_override,omitempty"` + + // The name of one of the portMappings from all the containers in the task definition of this Amazon ECS service. + // +kubebuilder:validation:Optional + PortName *string `json:"portName" tf:"port_name,omitempty"` + + // The configuration for enabling Transport Layer Security (TLS) + // +kubebuilder:validation:Optional + TLS *TLSParameters `json:"tls,omitempty" tf:"tls,omitempty"` + + // Configuration timeouts for Service Connect + // +kubebuilder:validation:Optional + Timeout *TimeoutParameters `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type ServiceInitParameters struct { + + // Information about the CloudWatch alarms. See below. + Alarms *AlarmsInitParameters `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // Capacity provider strategies to use for the service. Can be one or more. These can be updated without destroying and recreating the service only if force_new_deployment = true and not changing from 0 capacity_provider_strategy blocks to greater than 0, or vice versa. See below. Conflicts with launch_type. + CapacityProviderStrategy []CapacityProviderStrategyInitParameters `json:"capacityProviderStrategy,omitempty" tf:"capacity_provider_strategy,omitempty"` + + // Name of an ECS cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecs/v1beta2.Cluster + Cluster *string `json:"cluster,omitempty" tf:"cluster,omitempty"` + + // Reference to a Cluster in ecs to populate cluster. + // +kubebuilder:validation:Optional + ClusterRef *v1.Reference `json:"clusterRef,omitempty" tf:"-"` + + // Selector for a Cluster in ecs to populate cluster. + // +kubebuilder:validation:Optional + ClusterSelector *v1.Selector `json:"clusterSelector,omitempty" tf:"-"` + + // Configuration block for deployment circuit breaker. See below. + DeploymentCircuitBreaker *DeploymentCircuitBreakerInitParameters `json:"deploymentCircuitBreaker,omitempty" tf:"deployment_circuit_breaker,omitempty"` + + // Configuration block for deployment controller configuration. See below. + DeploymentController *DeploymentControllerInitParameters `json:"deploymentController,omitempty" tf:"deployment_controller,omitempty"` + + // Upper limit (as a percentage of the service's desiredCount) of the number of running tasks that can be running in a service during a deployment. Not valid when using the DAEMON scheduling strategy. + DeploymentMaximumPercent *float64 `json:"deploymentMaximumPercent,omitempty" tf:"deployment_maximum_percent,omitempty"` + + // Lower limit (as a percentage of the service's desiredCount) of the number of running tasks that must remain running and healthy in a service during a deployment. + DeploymentMinimumHealthyPercent *float64 `json:"deploymentMinimumHealthyPercent,omitempty" tf:"deployment_minimum_healthy_percent,omitempty"` + + // Number of instances of the task definition to place and keep running. Defaults to 0. Do not specify if using the DAEMON scheduling strategy. + DesiredCount *float64 `json:"desiredCount,omitempty" tf:"desired_count,omitempty"` + + // Specifies whether to enable Amazon ECS managed tags for the tasks within the service. + EnableEcsManagedTags *bool `json:"enableEcsManagedTags,omitempty" tf:"enable_ecs_managed_tags,omitempty"` + + // Specifies whether to enable Amazon ECS Exec for the tasks within the service. + EnableExecuteCommand *bool `json:"enableExecuteCommand,omitempty" tf:"enable_execute_command,omitempty"` + + // Enable to force a new task deployment of the service. This can be used to update tasks to use a newer Docker image with same image/tag combination (e.g., myimage:latest), roll Fargate tasks onto a newer platform version, or immediately deploy ordered_placement_strategy and placement_constraints updates. + ForceNewDeployment *bool `json:"forceNewDeployment,omitempty" tf:"force_new_deployment,omitempty"` + + // Seconds to ignore failing load balancer health checks on newly instantiated tasks to prevent premature shutdown, up to 2147483647. Only valid for services configured to use load balancers. + HealthCheckGracePeriodSeconds *float64 `json:"healthCheckGracePeriodSeconds,omitempty" tf:"health_check_grace_period_seconds,omitempty"` + + // ARN of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is required if you are using a load balancer with your service, but only if your task definition does not use the awsvpc network mode. If using awsvpc network mode, do not specify this role. If your account has already created the Amazon ECS service-linked role, that role is used by default for your service unless you specify a role here. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + IAMRole *string `json:"iamRole,omitempty" tf:"iam_role,omitempty"` + + // Reference to a Role in iam to populate iamRole. + // +kubebuilder:validation:Optional + IAMRoleRef *v1.Reference `json:"iamRoleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate iamRole. + // +kubebuilder:validation:Optional + IAMRoleSelector *v1.Selector `json:"iamRoleSelector,omitempty" tf:"-"` + + // Launch type on which to run your service. The valid values are EC2, FARGATE, and EXTERNAL. Defaults to EC2. Conflicts with capacity_provider_strategy. + LaunchType *string `json:"launchType,omitempty" tf:"launch_type,omitempty"` + + // Configuration block for load balancers. See below. + LoadBalancer []LoadBalancerInitParameters `json:"loadBalancer,omitempty" tf:"load_balancer,omitempty"` + + // Network configuration for the service. This parameter is required for task definitions that use the awsvpc network mode to receive their own Elastic Network Interface, and it is not supported for other network modes. See below. + NetworkConfiguration *NetworkConfigurationInitParameters `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // Service level strategy rules that are taken into consideration during task placement. List from top to bottom in order of precedence. Updates to this configuration will take effect next task deployment unless force_new_deployment is enabled. The maximum number of ordered_placement_strategy blocks is 5. See below. + OrderedPlacementStrategy []OrderedPlacementStrategyInitParameters `json:"orderedPlacementStrategy,omitempty" tf:"ordered_placement_strategy,omitempty"` + + // Rules that are taken into consideration during task placement. Updates to this configuration will take effect next task deployment unless force_new_deployment is enabled. Maximum number of placement_constraints is 10. See below. + PlacementConstraints []PlacementConstraintsInitParameters `json:"placementConstraints,omitempty" tf:"placement_constraints,omitempty"` + + // Platform version on which to run your service. Only applicable for launch_type set to FARGATE. Defaults to LATEST. More information about Fargate platform versions can be found in the AWS ECS User Guide. + PlatformVersion *string `json:"platformVersion,omitempty" tf:"platform_version,omitempty"` + + // Specifies whether to propagate the tags from the task definition or the service to the tasks. The valid values are SERVICE and TASK_DEFINITION. + PropagateTags *string `json:"propagateTags,omitempty" tf:"propagate_tags,omitempty"` + + // Scheduling strategy to use for the service. The valid values are REPLICA and DAEMON. Defaults to REPLICA. Note that Tasks using the Fargate launch type or the . + SchedulingStrategy *string `json:"schedulingStrategy,omitempty" tf:"scheduling_strategy,omitempty"` + + // The ECS Service Connect configuration for this service to discover and connect to services, and be discovered by, and connected from, other services within a namespace. See below. + ServiceConnectConfiguration *ServiceConnectConfigurationInitParameters `json:"serviceConnectConfiguration,omitempty" tf:"service_connect_configuration,omitempty"` + + // Service discovery registries for the service. The maximum number of service_registries blocks is 1. See below. + ServiceRegistries *ServiceRegistriesInitParameters `json:"serviceRegistries,omitempty" tf:"service_registries,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Family and revision (family:revision) or full ARN of the task definition that you want to run in your service. Required unless using the EXTERNAL deployment controller. If a revision is not specified, the latest ACTIVE revision is used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecs/v1beta2.TaskDefinition + TaskDefinition *string `json:"taskDefinition,omitempty" tf:"task_definition,omitempty"` + + // Reference to a TaskDefinition in ecs to populate taskDefinition. + // +kubebuilder:validation:Optional + TaskDefinitionRef *v1.Reference `json:"taskDefinitionRef,omitempty" tf:"-"` + + // Selector for a TaskDefinition in ecs to populate taskDefinition. + // +kubebuilder:validation:Optional + TaskDefinitionSelector *v1.Selector `json:"taskDefinitionSelector,omitempty" tf:"-"` + + // Map of arbitrary keys and values that, when changed, will trigger an in-place update (redeployment). Useful with plantimestamp(). See example above. + // +mapType=granular + Triggers map[string]*string `json:"triggers,omitempty" tf:"triggers,omitempty"` + + // Default false. + WaitForSteadyState *bool `json:"waitForSteadyState,omitempty" tf:"wait_for_steady_state,omitempty"` +} + +type ServiceObservation struct { + + // Information about the CloudWatch alarms. See below. + Alarms *AlarmsObservation `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // Capacity provider strategies to use for the service. Can be one or more. These can be updated without destroying and recreating the service only if force_new_deployment = true and not changing from 0 capacity_provider_strategy blocks to greater than 0, or vice versa. See below. Conflicts with launch_type. + CapacityProviderStrategy []CapacityProviderStrategyObservation `json:"capacityProviderStrategy,omitempty" tf:"capacity_provider_strategy,omitempty"` + + // Name of an ECS cluster. + Cluster *string `json:"cluster,omitempty" tf:"cluster,omitempty"` + + // Configuration block for deployment circuit breaker. See below. + DeploymentCircuitBreaker *DeploymentCircuitBreakerObservation `json:"deploymentCircuitBreaker,omitempty" tf:"deployment_circuit_breaker,omitempty"` + + // Configuration block for deployment controller configuration. See below. + DeploymentController *DeploymentControllerObservation `json:"deploymentController,omitempty" tf:"deployment_controller,omitempty"` + + // Upper limit (as a percentage of the service's desiredCount) of the number of running tasks that can be running in a service during a deployment. Not valid when using the DAEMON scheduling strategy. + DeploymentMaximumPercent *float64 `json:"deploymentMaximumPercent,omitempty" tf:"deployment_maximum_percent,omitempty"` + + // Lower limit (as a percentage of the service's desiredCount) of the number of running tasks that must remain running and healthy in a service during a deployment. + DeploymentMinimumHealthyPercent *float64 `json:"deploymentMinimumHealthyPercent,omitempty" tf:"deployment_minimum_healthy_percent,omitempty"` + + // Number of instances of the task definition to place and keep running. Defaults to 0. Do not specify if using the DAEMON scheduling strategy. + DesiredCount *float64 `json:"desiredCount,omitempty" tf:"desired_count,omitempty"` + + // Specifies whether to enable Amazon ECS managed tags for the tasks within the service. + EnableEcsManagedTags *bool `json:"enableEcsManagedTags,omitempty" tf:"enable_ecs_managed_tags,omitempty"` + + // Specifies whether to enable Amazon ECS Exec for the tasks within the service. + EnableExecuteCommand *bool `json:"enableExecuteCommand,omitempty" tf:"enable_execute_command,omitempty"` + + // Enable to force a new task deployment of the service. This can be used to update tasks to use a newer Docker image with same image/tag combination (e.g., myimage:latest), roll Fargate tasks onto a newer platform version, or immediately deploy ordered_placement_strategy and placement_constraints updates. + ForceNewDeployment *bool `json:"forceNewDeployment,omitempty" tf:"force_new_deployment,omitempty"` + + // Seconds to ignore failing load balancer health checks on newly instantiated tasks to prevent premature shutdown, up to 2147483647. Only valid for services configured to use load balancers. + HealthCheckGracePeriodSeconds *float64 `json:"healthCheckGracePeriodSeconds,omitempty" tf:"health_check_grace_period_seconds,omitempty"` + + // ARN of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is required if you are using a load balancer with your service, but only if your task definition does not use the awsvpc network mode. If using awsvpc network mode, do not specify this role. If your account has already created the Amazon ECS service-linked role, that role is used by default for your service unless you specify a role here. + IAMRole *string `json:"iamRole,omitempty" tf:"iam_role,omitempty"` + + // ARN that identifies the service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Launch type on which to run your service. The valid values are EC2, FARGATE, and EXTERNAL. Defaults to EC2. Conflicts with capacity_provider_strategy. + LaunchType *string `json:"launchType,omitempty" tf:"launch_type,omitempty"` + + // Configuration block for load balancers. See below. + LoadBalancer []LoadBalancerObservation `json:"loadBalancer,omitempty" tf:"load_balancer,omitempty"` + + // Network configuration for the service. This parameter is required for task definitions that use the awsvpc network mode to receive their own Elastic Network Interface, and it is not supported for other network modes. See below. + NetworkConfiguration *NetworkConfigurationObservation `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // Service level strategy rules that are taken into consideration during task placement. List from top to bottom in order of precedence. Updates to this configuration will take effect next task deployment unless force_new_deployment is enabled. The maximum number of ordered_placement_strategy blocks is 5. See below. + OrderedPlacementStrategy []OrderedPlacementStrategyObservation `json:"orderedPlacementStrategy,omitempty" tf:"ordered_placement_strategy,omitempty"` + + // Rules that are taken into consideration during task placement. Updates to this configuration will take effect next task deployment unless force_new_deployment is enabled. Maximum number of placement_constraints is 10. See below. + PlacementConstraints []PlacementConstraintsObservation `json:"placementConstraints,omitempty" tf:"placement_constraints,omitempty"` + + // Platform version on which to run your service. Only applicable for launch_type set to FARGATE. Defaults to LATEST. More information about Fargate platform versions can be found in the AWS ECS User Guide. + PlatformVersion *string `json:"platformVersion,omitempty" tf:"platform_version,omitempty"` + + // Specifies whether to propagate the tags from the task definition or the service to the tasks. The valid values are SERVICE and TASK_DEFINITION. + PropagateTags *string `json:"propagateTags,omitempty" tf:"propagate_tags,omitempty"` + + // Scheduling strategy to use for the service. The valid values are REPLICA and DAEMON. Defaults to REPLICA. Note that Tasks using the Fargate launch type or the . + SchedulingStrategy *string `json:"schedulingStrategy,omitempty" tf:"scheduling_strategy,omitempty"` + + // The ECS Service Connect configuration for this service to discover and connect to services, and be discovered by, and connected from, other services within a namespace. See below. + ServiceConnectConfiguration *ServiceConnectConfigurationObservation `json:"serviceConnectConfiguration,omitempty" tf:"service_connect_configuration,omitempty"` + + // Service discovery registries for the service. The maximum number of service_registries blocks is 1. See below. + ServiceRegistries *ServiceRegistriesObservation `json:"serviceRegistries,omitempty" tf:"service_registries,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Family and revision (family:revision) or full ARN of the task definition that you want to run in your service. Required unless using the EXTERNAL deployment controller. If a revision is not specified, the latest ACTIVE revision is used. + TaskDefinition *string `json:"taskDefinition,omitempty" tf:"task_definition,omitempty"` + + // Map of arbitrary keys and values that, when changed, will trigger an in-place update (redeployment). Useful with plantimestamp(). See example above. + // +mapType=granular + Triggers map[string]*string `json:"triggers,omitempty" tf:"triggers,omitempty"` + + // Default false. + WaitForSteadyState *bool `json:"waitForSteadyState,omitempty" tf:"wait_for_steady_state,omitempty"` +} + +type ServiceParameters struct { + + // Information about the CloudWatch alarms. See below. + // +kubebuilder:validation:Optional + Alarms *AlarmsParameters `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // Capacity provider strategies to use for the service. Can be one or more. These can be updated without destroying and recreating the service only if force_new_deployment = true and not changing from 0 capacity_provider_strategy blocks to greater than 0, or vice versa. See below. Conflicts with launch_type. + // +kubebuilder:validation:Optional + CapacityProviderStrategy []CapacityProviderStrategyParameters `json:"capacityProviderStrategy,omitempty" tf:"capacity_provider_strategy,omitempty"` + + // Name of an ECS cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecs/v1beta2.Cluster + // +kubebuilder:validation:Optional + Cluster *string `json:"cluster,omitempty" tf:"cluster,omitempty"` + + // Reference to a Cluster in ecs to populate cluster. + // +kubebuilder:validation:Optional + ClusterRef *v1.Reference `json:"clusterRef,omitempty" tf:"-"` + + // Selector for a Cluster in ecs to populate cluster. + // +kubebuilder:validation:Optional + ClusterSelector *v1.Selector `json:"clusterSelector,omitempty" tf:"-"` + + // Configuration block for deployment circuit breaker. See below. + // +kubebuilder:validation:Optional + DeploymentCircuitBreaker *DeploymentCircuitBreakerParameters `json:"deploymentCircuitBreaker,omitempty" tf:"deployment_circuit_breaker,omitempty"` + + // Configuration block for deployment controller configuration. See below. + // +kubebuilder:validation:Optional + DeploymentController *DeploymentControllerParameters `json:"deploymentController,omitempty" tf:"deployment_controller,omitempty"` + + // Upper limit (as a percentage of the service's desiredCount) of the number of running tasks that can be running in a service during a deployment. Not valid when using the DAEMON scheduling strategy. + // +kubebuilder:validation:Optional + DeploymentMaximumPercent *float64 `json:"deploymentMaximumPercent,omitempty" tf:"deployment_maximum_percent,omitempty"` + + // Lower limit (as a percentage of the service's desiredCount) of the number of running tasks that must remain running and healthy in a service during a deployment. + // +kubebuilder:validation:Optional + DeploymentMinimumHealthyPercent *float64 `json:"deploymentMinimumHealthyPercent,omitempty" tf:"deployment_minimum_healthy_percent,omitempty"` + + // Number of instances of the task definition to place and keep running. Defaults to 0. Do not specify if using the DAEMON scheduling strategy. + // +kubebuilder:validation:Optional + DesiredCount *float64 `json:"desiredCount,omitempty" tf:"desired_count,omitempty"` + + // Specifies whether to enable Amazon ECS managed tags for the tasks within the service. + // +kubebuilder:validation:Optional + EnableEcsManagedTags *bool `json:"enableEcsManagedTags,omitempty" tf:"enable_ecs_managed_tags,omitempty"` + + // Specifies whether to enable Amazon ECS Exec for the tasks within the service. + // +kubebuilder:validation:Optional + EnableExecuteCommand *bool `json:"enableExecuteCommand,omitempty" tf:"enable_execute_command,omitempty"` + + // Enable to force a new task deployment of the service. This can be used to update tasks to use a newer Docker image with same image/tag combination (e.g., myimage:latest), roll Fargate tasks onto a newer platform version, or immediately deploy ordered_placement_strategy and placement_constraints updates. + // +kubebuilder:validation:Optional + ForceNewDeployment *bool `json:"forceNewDeployment,omitempty" tf:"force_new_deployment,omitempty"` + + // Seconds to ignore failing load balancer health checks on newly instantiated tasks to prevent premature shutdown, up to 2147483647. Only valid for services configured to use load balancers. + // +kubebuilder:validation:Optional + HealthCheckGracePeriodSeconds *float64 `json:"healthCheckGracePeriodSeconds,omitempty" tf:"health_check_grace_period_seconds,omitempty"` + + // ARN of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is required if you are using a load balancer with your service, but only if your task definition does not use the awsvpc network mode. If using awsvpc network mode, do not specify this role. If your account has already created the Amazon ECS service-linked role, that role is used by default for your service unless you specify a role here. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + IAMRole *string `json:"iamRole,omitempty" tf:"iam_role,omitempty"` + + // Reference to a Role in iam to populate iamRole. + // +kubebuilder:validation:Optional + IAMRoleRef *v1.Reference `json:"iamRoleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate iamRole. + // +kubebuilder:validation:Optional + IAMRoleSelector *v1.Selector `json:"iamRoleSelector,omitempty" tf:"-"` + + // Launch type on which to run your service. The valid values are EC2, FARGATE, and EXTERNAL. Defaults to EC2. Conflicts with capacity_provider_strategy. + // +kubebuilder:validation:Optional + LaunchType *string `json:"launchType,omitempty" tf:"launch_type,omitempty"` + + // Configuration block for load balancers. See below. + // +kubebuilder:validation:Optional + LoadBalancer []LoadBalancerParameters `json:"loadBalancer,omitempty" tf:"load_balancer,omitempty"` + + // Network configuration for the service. This parameter is required for task definitions that use the awsvpc network mode to receive their own Elastic Network Interface, and it is not supported for other network modes. See below. + // +kubebuilder:validation:Optional + NetworkConfiguration *NetworkConfigurationParameters `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // Service level strategy rules that are taken into consideration during task placement. List from top to bottom in order of precedence. Updates to this configuration will take effect next task deployment unless force_new_deployment is enabled. The maximum number of ordered_placement_strategy blocks is 5. See below. + // +kubebuilder:validation:Optional + OrderedPlacementStrategy []OrderedPlacementStrategyParameters `json:"orderedPlacementStrategy,omitempty" tf:"ordered_placement_strategy,omitempty"` + + // Rules that are taken into consideration during task placement. Updates to this configuration will take effect next task deployment unless force_new_deployment is enabled. Maximum number of placement_constraints is 10. See below. + // +kubebuilder:validation:Optional + PlacementConstraints []PlacementConstraintsParameters `json:"placementConstraints,omitempty" tf:"placement_constraints,omitempty"` + + // Platform version on which to run your service. Only applicable for launch_type set to FARGATE. Defaults to LATEST. More information about Fargate platform versions can be found in the AWS ECS User Guide. + // +kubebuilder:validation:Optional + PlatformVersion *string `json:"platformVersion,omitempty" tf:"platform_version,omitempty"` + + // Specifies whether to propagate the tags from the task definition or the service to the tasks. The valid values are SERVICE and TASK_DEFINITION. + // +kubebuilder:validation:Optional + PropagateTags *string `json:"propagateTags,omitempty" tf:"propagate_tags,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Scheduling strategy to use for the service. The valid values are REPLICA and DAEMON. Defaults to REPLICA. Note that Tasks using the Fargate launch type or the . + // +kubebuilder:validation:Optional + SchedulingStrategy *string `json:"schedulingStrategy,omitempty" tf:"scheduling_strategy,omitempty"` + + // The ECS Service Connect configuration for this service to discover and connect to services, and be discovered by, and connected from, other services within a namespace. See below. + // +kubebuilder:validation:Optional + ServiceConnectConfiguration *ServiceConnectConfigurationParameters `json:"serviceConnectConfiguration,omitempty" tf:"service_connect_configuration,omitempty"` + + // Service discovery registries for the service. The maximum number of service_registries blocks is 1. See below. + // +kubebuilder:validation:Optional + ServiceRegistries *ServiceRegistriesParameters `json:"serviceRegistries,omitempty" tf:"service_registries,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Family and revision (family:revision) or full ARN of the task definition that you want to run in your service. Required unless using the EXTERNAL deployment controller. If a revision is not specified, the latest ACTIVE revision is used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecs/v1beta2.TaskDefinition + // +kubebuilder:validation:Optional + TaskDefinition *string `json:"taskDefinition,omitempty" tf:"task_definition,omitempty"` + + // Reference to a TaskDefinition in ecs to populate taskDefinition. + // +kubebuilder:validation:Optional + TaskDefinitionRef *v1.Reference `json:"taskDefinitionRef,omitempty" tf:"-"` + + // Selector for a TaskDefinition in ecs to populate taskDefinition. + // +kubebuilder:validation:Optional + TaskDefinitionSelector *v1.Selector `json:"taskDefinitionSelector,omitempty" tf:"-"` + + // Map of arbitrary keys and values that, when changed, will trigger an in-place update (redeployment). Useful with plantimestamp(). See example above. + // +kubebuilder:validation:Optional + // +mapType=granular + Triggers map[string]*string `json:"triggers,omitempty" tf:"triggers,omitempty"` + + // Default false. + // +kubebuilder:validation:Optional + WaitForSteadyState *bool `json:"waitForSteadyState,omitempty" tf:"wait_for_steady_state,omitempty"` +} + +type ServiceRegistriesInitParameters struct { + + // Container name value, already specified in the task definition, to be used for your service discovery service. + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // Port value, already specified in the task definition, to be used for your service discovery service. + ContainerPort *float64 `json:"containerPort,omitempty" tf:"container_port,omitempty"` + + // Port value used if your Service Discovery service specified an SRV record. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // ARN of the Service Registry. The currently supported service registry is Amazon Route 53 Auto Naming Service(aws_service_discovery_service). For more information, see Service + RegistryArn *string `json:"registryArn,omitempty" tf:"registry_arn,omitempty"` +} + +type ServiceRegistriesObservation struct { + + // Container name value, already specified in the task definition, to be used for your service discovery service. + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // Port value, already specified in the task definition, to be used for your service discovery service. + ContainerPort *float64 `json:"containerPort,omitempty" tf:"container_port,omitempty"` + + // Port value used if your Service Discovery service specified an SRV record. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // ARN of the Service Registry. The currently supported service registry is Amazon Route 53 Auto Naming Service(aws_service_discovery_service). For more information, see Service + RegistryArn *string `json:"registryArn,omitempty" tf:"registry_arn,omitempty"` +} + +type ServiceRegistriesParameters struct { + + // Container name value, already specified in the task definition, to be used for your service discovery service. + // +kubebuilder:validation:Optional + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // Port value, already specified in the task definition, to be used for your service discovery service. + // +kubebuilder:validation:Optional + ContainerPort *float64 `json:"containerPort,omitempty" tf:"container_port,omitempty"` + + // Port value used if your Service Discovery service specified an SRV record. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // ARN of the Service Registry. The currently supported service registry is Amazon Route 53 Auto Naming Service(aws_service_discovery_service). For more information, see Service + // +kubebuilder:validation:Optional + RegistryArn *string `json:"registryArn" tf:"registry_arn,omitempty"` +} + +type TLSInitParameters struct { + + // The details of the certificate authority which will issue the certificate. + IssuerCertAuthority *IssuerCertAuthorityInitParameters `json:"issuerCertAuthority,omitempty" tf:"issuer_cert_authority,omitempty"` + + // The KMS key used to encrypt the private key in Secrets Manager. + KMSKey *string `json:"kmsKey,omitempty" tf:"kms_key,omitempty"` + + // The ARN of the IAM Role that's associated with the Service Connect TLS. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type TLSObservation struct { + + // The details of the certificate authority which will issue the certificate. + IssuerCertAuthority *IssuerCertAuthorityObservation `json:"issuerCertAuthority,omitempty" tf:"issuer_cert_authority,omitempty"` + + // The KMS key used to encrypt the private key in Secrets Manager. + KMSKey *string `json:"kmsKey,omitempty" tf:"kms_key,omitempty"` + + // The ARN of the IAM Role that's associated with the Service Connect TLS. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type TLSParameters struct { + + // The details of the certificate authority which will issue the certificate. + // +kubebuilder:validation:Optional + IssuerCertAuthority *IssuerCertAuthorityParameters `json:"issuerCertAuthority" tf:"issuer_cert_authority,omitempty"` + + // The KMS key used to encrypt the private key in Secrets Manager. + // +kubebuilder:validation:Optional + KMSKey *string `json:"kmsKey,omitempty" tf:"kms_key,omitempty"` + + // The ARN of the IAM Role that's associated with the Service Connect TLS. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type TimeoutInitParameters struct { + + // The amount of time in seconds a connection will stay active while idle. A value of 0 can be set to disable idleTimeout. + IdleTimeoutSeconds *float64 `json:"idleTimeoutSeconds,omitempty" tf:"idle_timeout_seconds,omitempty"` + + // The amount of time in seconds for the upstream to respond with a complete response per request. A value of 0 can be set to disable perRequestTimeout. Can only be set when appProtocol isn't TCP. + PerRequestTimeoutSeconds *float64 `json:"perRequestTimeoutSeconds,omitempty" tf:"per_request_timeout_seconds,omitempty"` +} + +type TimeoutObservation struct { + + // The amount of time in seconds a connection will stay active while idle. A value of 0 can be set to disable idleTimeout. + IdleTimeoutSeconds *float64 `json:"idleTimeoutSeconds,omitempty" tf:"idle_timeout_seconds,omitempty"` + + // The amount of time in seconds for the upstream to respond with a complete response per request. A value of 0 can be set to disable perRequestTimeout. Can only be set when appProtocol isn't TCP. + PerRequestTimeoutSeconds *float64 `json:"perRequestTimeoutSeconds,omitempty" tf:"per_request_timeout_seconds,omitempty"` +} + +type TimeoutParameters struct { + + // The amount of time in seconds a connection will stay active while idle. A value of 0 can be set to disable idleTimeout. + // +kubebuilder:validation:Optional + IdleTimeoutSeconds *float64 `json:"idleTimeoutSeconds,omitempty" tf:"idle_timeout_seconds,omitempty"` + + // The amount of time in seconds for the upstream to respond with a complete response per request. A value of 0 can be set to disable perRequestTimeout. Can only be set when appProtocol isn't TCP. + // +kubebuilder:validation:Optional + PerRequestTimeoutSeconds *float64 `json:"perRequestTimeoutSeconds,omitempty" tf:"per_request_timeout_seconds,omitempty"` +} + +// ServiceSpec defines the desired state of Service +type ServiceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServiceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServiceInitParameters `json:"initProvider,omitempty"` +} + +// ServiceStatus defines the observed state of Service. +type ServiceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServiceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Service is the Schema for the Services API. Provides an ECS service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Service struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ServiceSpec `json:"spec"` + Status ServiceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServiceList contains a list of Services +type ServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Service `json:"items"` +} + +// Repository type metadata. +var ( + Service_Kind = "Service" + Service_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Service_Kind}.String() + Service_KindAPIVersion = Service_Kind + "." + CRDGroupVersion.String() + Service_GroupVersionKind = CRDGroupVersion.WithKind(Service_Kind) +) + +func init() { + SchemeBuilder.Register(&Service{}, &ServiceList{}) +} diff --git a/apis/ecs/v1beta2/zz_taskdefinition_terraformed.go b/apis/ecs/v1beta2/zz_taskdefinition_terraformed.go new file mode 100755 index 0000000000..3bd9096c70 --- /dev/null +++ b/apis/ecs/v1beta2/zz_taskdefinition_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this TaskDefinition +func (mg *TaskDefinition) GetTerraformResourceType() string { + return "aws_ecs_task_definition" +} + +// GetConnectionDetailsMapping for this TaskDefinition +func (tr *TaskDefinition) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this TaskDefinition +func (tr *TaskDefinition) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this TaskDefinition +func (tr *TaskDefinition) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this TaskDefinition +func (tr *TaskDefinition) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this TaskDefinition +func (tr *TaskDefinition) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this TaskDefinition +func (tr *TaskDefinition) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this TaskDefinition +func (tr *TaskDefinition) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this TaskDefinition +func (tr *TaskDefinition) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this TaskDefinition using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *TaskDefinition) LateInitialize(attrs []byte) (bool, error) { + params := &TaskDefinitionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *TaskDefinition) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/ecs/v1beta2/zz_taskdefinition_types.go b/apis/ecs/v1beta2/zz_taskdefinition_types.go new file mode 100755 index 0000000000..a359ab9aee --- /dev/null +++ b/apis/ecs/v1beta2/zz_taskdefinition_types.go @@ -0,0 +1,752 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuthorizationConfigInitParameters struct { + + // Access point ID to use. If an access point is specified, the root directory value will be relative to the directory set for the access point. If specified, transit encryption must be enabled in the EFSVolumeConfiguration. + AccessPointID *string `json:"accessPointId,omitempty" tf:"access_point_id,omitempty"` + + // Whether or not to use the Amazon ECS task IAM role defined in a task definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the EFSVolumeConfiguration. Valid values: ENABLED, DISABLED. If this parameter is omitted, the default value of DISABLED is used. + IAM *string `json:"iam,omitempty" tf:"iam,omitempty"` +} + +type AuthorizationConfigObservation struct { + + // Access point ID to use. If an access point is specified, the root directory value will be relative to the directory set for the access point. If specified, transit encryption must be enabled in the EFSVolumeConfiguration. + AccessPointID *string `json:"accessPointId,omitempty" tf:"access_point_id,omitempty"` + + // Whether or not to use the Amazon ECS task IAM role defined in a task definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the EFSVolumeConfiguration. Valid values: ENABLED, DISABLED. If this parameter is omitted, the default value of DISABLED is used. + IAM *string `json:"iam,omitempty" tf:"iam,omitempty"` +} + +type AuthorizationConfigParameters struct { + + // Access point ID to use. If an access point is specified, the root directory value will be relative to the directory set for the access point. If specified, transit encryption must be enabled in the EFSVolumeConfiguration. + // +kubebuilder:validation:Optional + AccessPointID *string `json:"accessPointId,omitempty" tf:"access_point_id,omitempty"` + + // Whether or not to use the Amazon ECS task IAM role defined in a task definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the EFSVolumeConfiguration. Valid values: ENABLED, DISABLED. If this parameter is omitted, the default value of DISABLED is used. + // +kubebuilder:validation:Optional + IAM *string `json:"iam,omitempty" tf:"iam,omitempty"` +} + +type DockerVolumeConfigurationInitParameters struct { + + // If this value is true, the Docker volume is created if it does not already exist. Note: This field is only used if the scope is shared. + Autoprovision *bool `json:"autoprovision,omitempty" tf:"autoprovision,omitempty"` + + // Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. + Driver *string `json:"driver,omitempty" tf:"driver,omitempty"` + + // Map of Docker driver specific options. + // +mapType=granular + DriverOpts map[string]*string `json:"driverOpts,omitempty" tf:"driver_opts,omitempty"` + + // Map of custom metadata to add to your Docker volume. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Scope for the Docker volume, which determines its lifecycle, either task or shared. Docker volumes that are scoped to a task are automatically provisioned when the task starts and destroyed when the task stops. Docker volumes that are scoped as shared persist after the task stops. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` +} + +type DockerVolumeConfigurationObservation struct { + + // If this value is true, the Docker volume is created if it does not already exist. Note: This field is only used if the scope is shared. + Autoprovision *bool `json:"autoprovision,omitempty" tf:"autoprovision,omitempty"` + + // Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. + Driver *string `json:"driver,omitempty" tf:"driver,omitempty"` + + // Map of Docker driver specific options. + // +mapType=granular + DriverOpts map[string]*string `json:"driverOpts,omitempty" tf:"driver_opts,omitempty"` + + // Map of custom metadata to add to your Docker volume. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Scope for the Docker volume, which determines its lifecycle, either task or shared. Docker volumes that are scoped to a task are automatically provisioned when the task starts and destroyed when the task stops. Docker volumes that are scoped as shared persist after the task stops. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` +} + +type DockerVolumeConfigurationParameters struct { + + // If this value is true, the Docker volume is created if it does not already exist. Note: This field is only used if the scope is shared. + // +kubebuilder:validation:Optional + Autoprovision *bool `json:"autoprovision,omitempty" tf:"autoprovision,omitempty"` + + // Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. + // +kubebuilder:validation:Optional + Driver *string `json:"driver,omitempty" tf:"driver,omitempty"` + + // Map of Docker driver specific options. + // +kubebuilder:validation:Optional + // +mapType=granular + DriverOpts map[string]*string `json:"driverOpts,omitempty" tf:"driver_opts,omitempty"` + + // Map of custom metadata to add to your Docker volume. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Scope for the Docker volume, which determines its lifecycle, either task or shared. Docker volumes that are scoped to a task are automatically provisioned when the task starts and destroyed when the task stops. Docker volumes that are scoped as shared persist after the task stops. + // +kubebuilder:validation:Optional + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` +} + +type EFSVolumeConfigurationInitParameters struct { + + // Configuration block for authorization for the Amazon EFS file system. Detailed below. + AuthorizationConfig *AuthorizationConfigInitParameters `json:"authorizationConfig,omitempty" tf:"authorization_config,omitempty"` + + // ID of the EFS File System. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // Directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume will be used. Specifying / will have the same effect as omitting this parameter. This argument is ignored when using authorization_config. + RootDirectory *string `json:"rootDirectory,omitempty" tf:"root_directory,omitempty"` + + // Whether or not to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. Valid values: ENABLED, DISABLED. If this parameter is omitted, the default value of DISABLED is used. + TransitEncryption *string `json:"transitEncryption,omitempty" tf:"transit_encryption,omitempty"` + + // Port to use for transit encryption. If you do not specify a transit encryption port, it will use the port selection strategy that the Amazon EFS mount helper uses. + TransitEncryptionPort *float64 `json:"transitEncryptionPort,omitempty" tf:"transit_encryption_port,omitempty"` +} + +type EFSVolumeConfigurationObservation struct { + + // Configuration block for authorization for the Amazon EFS file system. Detailed below. + AuthorizationConfig *AuthorizationConfigObservation `json:"authorizationConfig,omitempty" tf:"authorization_config,omitempty"` + + // ID of the EFS File System. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // Directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume will be used. Specifying / will have the same effect as omitting this parameter. This argument is ignored when using authorization_config. + RootDirectory *string `json:"rootDirectory,omitempty" tf:"root_directory,omitempty"` + + // Whether or not to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. Valid values: ENABLED, DISABLED. If this parameter is omitted, the default value of DISABLED is used. + TransitEncryption *string `json:"transitEncryption,omitempty" tf:"transit_encryption,omitempty"` + + // Port to use for transit encryption. If you do not specify a transit encryption port, it will use the port selection strategy that the Amazon EFS mount helper uses. + TransitEncryptionPort *float64 `json:"transitEncryptionPort,omitempty" tf:"transit_encryption_port,omitempty"` +} + +type EFSVolumeConfigurationParameters struct { + + // Configuration block for authorization for the Amazon EFS file system. Detailed below. + // +kubebuilder:validation:Optional + AuthorizationConfig *AuthorizationConfigParameters `json:"authorizationConfig,omitempty" tf:"authorization_config,omitempty"` + + // ID of the EFS File System. + // +kubebuilder:validation:Optional + FileSystemID *string `json:"fileSystemId" tf:"file_system_id,omitempty"` + + // Directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume will be used. Specifying / will have the same effect as omitting this parameter. This argument is ignored when using authorization_config. + // +kubebuilder:validation:Optional + RootDirectory *string `json:"rootDirectory,omitempty" tf:"root_directory,omitempty"` + + // Whether or not to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. Valid values: ENABLED, DISABLED. If this parameter is omitted, the default value of DISABLED is used. + // +kubebuilder:validation:Optional + TransitEncryption *string `json:"transitEncryption,omitempty" tf:"transit_encryption,omitempty"` + + // Port to use for transit encryption. If you do not specify a transit encryption port, it will use the port selection strategy that the Amazon EFS mount helper uses. + // +kubebuilder:validation:Optional + TransitEncryptionPort *float64 `json:"transitEncryptionPort,omitempty" tf:"transit_encryption_port,omitempty"` +} + +type EphemeralStorageInitParameters struct { + + // The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is 21 GiB and the maximum supported value is 200 GiB. + SizeInGib *float64 `json:"sizeInGib,omitempty" tf:"size_in_gib,omitempty"` +} + +type EphemeralStorageObservation struct { + + // The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is 21 GiB and the maximum supported value is 200 GiB. + SizeInGib *float64 `json:"sizeInGib,omitempty" tf:"size_in_gib,omitempty"` +} + +type EphemeralStorageParameters struct { + + // The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is 21 GiB and the maximum supported value is 200 GiB. + // +kubebuilder:validation:Optional + SizeInGib *float64 `json:"sizeInGib" tf:"size_in_gib,omitempty"` +} + +type FSXWindowsFileServerVolumeConfigurationAuthorizationConfigInitParameters struct { + + // The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or AWS Systems Manager Parameter Store parameter. The ARNs refer to the stored credentials. + CredentialsParameter *string `json:"credentialsParameter,omitempty" tf:"credentials_parameter,omitempty"` + + // A fully qualified domain name hosted by an AWS Directory Service Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2. + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` +} + +type FSXWindowsFileServerVolumeConfigurationAuthorizationConfigObservation struct { + + // The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or AWS Systems Manager Parameter Store parameter. The ARNs refer to the stored credentials. + CredentialsParameter *string `json:"credentialsParameter,omitempty" tf:"credentials_parameter,omitempty"` + + // A fully qualified domain name hosted by an AWS Directory Service Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2. + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` +} + +type FSXWindowsFileServerVolumeConfigurationAuthorizationConfigParameters struct { + + // The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or AWS Systems Manager Parameter Store parameter. The ARNs refer to the stored credentials. + // +kubebuilder:validation:Optional + CredentialsParameter *string `json:"credentialsParameter" tf:"credentials_parameter,omitempty"` + + // A fully qualified domain name hosted by an AWS Directory Service Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2. + // +kubebuilder:validation:Optional + Domain *string `json:"domain" tf:"domain,omitempty"` +} + +type FSXWindowsFileServerVolumeConfigurationInitParameters struct { + + // Configuration block for authorization for the Amazon FSx for Windows File Server file system detailed below. + AuthorizationConfig *FSXWindowsFileServerVolumeConfigurationAuthorizationConfigInitParameters `json:"authorizationConfig,omitempty" tf:"authorization_config,omitempty"` + + // The Amazon FSx for Windows File Server file system ID to use. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The directory within the Amazon FSx for Windows File Server file system to mount as the root directory inside the host. + RootDirectory *string `json:"rootDirectory,omitempty" tf:"root_directory,omitempty"` +} + +type FSXWindowsFileServerVolumeConfigurationObservation struct { + + // Configuration block for authorization for the Amazon FSx for Windows File Server file system detailed below. + AuthorizationConfig *FSXWindowsFileServerVolumeConfigurationAuthorizationConfigObservation `json:"authorizationConfig,omitempty" tf:"authorization_config,omitempty"` + + // The Amazon FSx for Windows File Server file system ID to use. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The directory within the Amazon FSx for Windows File Server file system to mount as the root directory inside the host. + RootDirectory *string `json:"rootDirectory,omitempty" tf:"root_directory,omitempty"` +} + +type FSXWindowsFileServerVolumeConfigurationParameters struct { + + // Configuration block for authorization for the Amazon FSx for Windows File Server file system detailed below. + // +kubebuilder:validation:Optional + AuthorizationConfig *FSXWindowsFileServerVolumeConfigurationAuthorizationConfigParameters `json:"authorizationConfig" tf:"authorization_config,omitempty"` + + // The Amazon FSx for Windows File Server file system ID to use. + // +kubebuilder:validation:Optional + FileSystemID *string `json:"fileSystemId" tf:"file_system_id,omitempty"` + + // The directory within the Amazon FSx for Windows File Server file system to mount as the root directory inside the host. + // +kubebuilder:validation:Optional + RootDirectory *string `json:"rootDirectory" tf:"root_directory,omitempty"` +} + +type InferenceAcceleratorInitParameters struct { + + // Elastic Inference accelerator device name. The deviceName must also be referenced in a container definition as a ResourceRequirement. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Elastic Inference accelerator type to use. + DeviceType *string `json:"deviceType,omitempty" tf:"device_type,omitempty"` +} + +type InferenceAcceleratorObservation struct { + + // Elastic Inference accelerator device name. The deviceName must also be referenced in a container definition as a ResourceRequirement. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Elastic Inference accelerator type to use. + DeviceType *string `json:"deviceType,omitempty" tf:"device_type,omitempty"` +} + +type InferenceAcceleratorParameters struct { + + // Elastic Inference accelerator device name. The deviceName must also be referenced in a container definition as a ResourceRequirement. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName" tf:"device_name,omitempty"` + + // Elastic Inference accelerator type to use. + // +kubebuilder:validation:Optional + DeviceType *string `json:"deviceType" tf:"device_type,omitempty"` +} + +type ProxyConfigurationInitParameters struct { + + // Name of the container that will serve as the App Mesh proxy. + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // Set of network configuration parameters to provide the Container Network Interface (CNI) plugin, specified a key-value mapping. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // Proxy type. The default value is APPMESH. The only supported value is APPMESH. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ProxyConfigurationObservation struct { + + // Name of the container that will serve as the App Mesh proxy. + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // Set of network configuration parameters to provide the Container Network Interface (CNI) plugin, specified a key-value mapping. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // Proxy type. The default value is APPMESH. The only supported value is APPMESH. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ProxyConfigurationParameters struct { + + // Name of the container that will serve as the App Mesh proxy. + // +kubebuilder:validation:Optional + ContainerName *string `json:"containerName" tf:"container_name,omitempty"` + + // Set of network configuration parameters to provide the Container Network Interface (CNI) plugin, specified a key-value mapping. + // +kubebuilder:validation:Optional + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // Proxy type. The default value is APPMESH. The only supported value is APPMESH. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RuntimePlatformInitParameters struct { + + // Must be set to either X86_64 or ARM64; see cpu architecture + CPUArchitecture *string `json:"cpuArchitecture,omitempty" tf:"cpu_architecture,omitempty"` + + // If the requires_compatibilities is FARGATE this field is required; must be set to a valid option from the operating system family in the runtime platform setting + OperatingSystemFamily *string `json:"operatingSystemFamily,omitempty" tf:"operating_system_family,omitempty"` +} + +type RuntimePlatformObservation struct { + + // Must be set to either X86_64 or ARM64; see cpu architecture + CPUArchitecture *string `json:"cpuArchitecture,omitempty" tf:"cpu_architecture,omitempty"` + + // If the requires_compatibilities is FARGATE this field is required; must be set to a valid option from the operating system family in the runtime platform setting + OperatingSystemFamily *string `json:"operatingSystemFamily,omitempty" tf:"operating_system_family,omitempty"` +} + +type RuntimePlatformParameters struct { + + // Must be set to either X86_64 or ARM64; see cpu architecture + // +kubebuilder:validation:Optional + CPUArchitecture *string `json:"cpuArchitecture,omitempty" tf:"cpu_architecture,omitempty"` + + // If the requires_compatibilities is FARGATE this field is required; must be set to a valid option from the operating system family in the runtime platform setting + // +kubebuilder:validation:Optional + OperatingSystemFamily *string `json:"operatingSystemFamily,omitempty" tf:"operating_system_family,omitempty"` +} + +type TaskDefinitionInitParameters struct { + + // Number of cpu units used by the task. If the requires_compatibilities is FARGATE this field is required. + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // A list of valid container definitions provided as a single valid JSON document. Please note that you should only provide values that are part of the container definition document. For a detailed description of what parameters are available, see the Task Definition Parameters section from the official Developer Guide. + ContainerDefinitions *string `json:"containerDefinitions,omitempty" tf:"container_definitions,omitempty"` + + // The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate. See Ephemeral Storage. + EphemeralStorage *EphemeralStorageInitParameters `json:"ephemeralStorage,omitempty" tf:"ephemeral_storage,omitempty"` + + // ARN of the task execution role that the Amazon ECS container agent and the Docker daemon can assume. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + ExecutionRoleArn *string `json:"executionRoleArn,omitempty" tf:"execution_role_arn,omitempty"` + + // Reference to a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnRef *v1.Reference `json:"executionRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnSelector *v1.Selector `json:"executionRoleArnSelector,omitempty" tf:"-"` + + // A unique name for your task definition. + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // Configuration block(s) with Inference Accelerators settings. Detailed below. + InferenceAccelerator []InferenceAcceleratorInitParameters `json:"inferenceAccelerator,omitempty" tf:"inference_accelerator,omitempty"` + + // IPC resource namespace to be used for the containers in the task The valid values are host, task, and none. + IpcMode *string `json:"ipcMode,omitempty" tf:"ipc_mode,omitempty"` + + // Amount (in MiB) of memory used by the task. If the requires_compatibilities is FARGATE this field is required. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` + + // Docker networking mode to use for the containers in the task. Valid values are none, bridge, awsvpc, and host. + NetworkMode *string `json:"networkMode,omitempty" tf:"network_mode,omitempty"` + + // Process namespace to use for the containers in the task. The valid values are host and task. + PidMode *string `json:"pidMode,omitempty" tf:"pid_mode,omitempty"` + + // Configuration block for rules that are taken into consideration during task placement. Maximum number of placement_constraints is 10. Detailed below. + PlacementConstraints []TaskDefinitionPlacementConstraintsInitParameters `json:"placementConstraints,omitempty" tf:"placement_constraints,omitempty"` + + // Configuration block for the App Mesh proxy. Detailed below. + ProxyConfiguration *ProxyConfigurationInitParameters `json:"proxyConfiguration,omitempty" tf:"proxy_configuration,omitempty"` + + // Set of launch types required by the task. The valid values are EC2 and FARGATE. + // +listType=set + RequiresCompatibilities []*string `json:"requiresCompatibilities,omitempty" tf:"requires_compatibilities,omitempty"` + + // Configuration block for runtime_platform that containers in your task may use. + RuntimePlatform *RuntimePlatformInitParameters `json:"runtimePlatform,omitempty" tf:"runtime_platform,omitempty"` + + // Whether to retain the old revision when the resource is destroyed or replacement is necessary. Default is false. + SkipDestroy *bool `json:"skipDestroy,omitempty" tf:"skip_destroy,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // ARN of IAM role that allows your Amazon ECS container task to make calls to other AWS services. + TaskRoleArn *string `json:"taskRoleArn,omitempty" tf:"task_role_arn,omitempty"` + + // Whether should track latest task definition or the one created with the resource. Default is false. + TrackLatest *bool `json:"trackLatest,omitempty" tf:"track_latest,omitempty"` + + // Configuration block for volumes that containers in your task may use. Detailed below. + Volume []VolumeInitParameters `json:"volume,omitempty" tf:"volume,omitempty"` +} + +type TaskDefinitionObservation struct { + + // Full ARN of the Task Definition (including both family and revision). + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // ARN of the Task Definition with the trailing revision removed. This may be useful for situations where the latest task definition is always desired. If a revision isn't specified, the latest ACTIVE revision is used. See the AWS documentation for details. + ArnWithoutRevision *string `json:"arnWithoutRevision,omitempty" tf:"arn_without_revision,omitempty"` + + // Number of cpu units used by the task. If the requires_compatibilities is FARGATE this field is required. + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // A list of valid container definitions provided as a single valid JSON document. Please note that you should only provide values that are part of the container definition document. For a detailed description of what parameters are available, see the Task Definition Parameters section from the official Developer Guide. + ContainerDefinitions *string `json:"containerDefinitions,omitempty" tf:"container_definitions,omitempty"` + + // The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate. See Ephemeral Storage. + EphemeralStorage *EphemeralStorageObservation `json:"ephemeralStorage,omitempty" tf:"ephemeral_storage,omitempty"` + + // ARN of the task execution role that the Amazon ECS container agent and the Docker daemon can assume. + ExecutionRoleArn *string `json:"executionRoleArn,omitempty" tf:"execution_role_arn,omitempty"` + + // A unique name for your task definition. + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration block(s) with Inference Accelerators settings. Detailed below. + InferenceAccelerator []InferenceAcceleratorObservation `json:"inferenceAccelerator,omitempty" tf:"inference_accelerator,omitempty"` + + // IPC resource namespace to be used for the containers in the task The valid values are host, task, and none. + IpcMode *string `json:"ipcMode,omitempty" tf:"ipc_mode,omitempty"` + + // Amount (in MiB) of memory used by the task. If the requires_compatibilities is FARGATE this field is required. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` + + // Docker networking mode to use for the containers in the task. Valid values are none, bridge, awsvpc, and host. + NetworkMode *string `json:"networkMode,omitempty" tf:"network_mode,omitempty"` + + // Process namespace to use for the containers in the task. The valid values are host and task. + PidMode *string `json:"pidMode,omitempty" tf:"pid_mode,omitempty"` + + // Configuration block for rules that are taken into consideration during task placement. Maximum number of placement_constraints is 10. Detailed below. + PlacementConstraints []TaskDefinitionPlacementConstraintsObservation `json:"placementConstraints,omitempty" tf:"placement_constraints,omitempty"` + + // Configuration block for the App Mesh proxy. Detailed below. + ProxyConfiguration *ProxyConfigurationObservation `json:"proxyConfiguration,omitempty" tf:"proxy_configuration,omitempty"` + + // Set of launch types required by the task. The valid values are EC2 and FARGATE. + // +listType=set + RequiresCompatibilities []*string `json:"requiresCompatibilities,omitempty" tf:"requires_compatibilities,omitempty"` + + // Revision of the task in a particular family. + Revision *float64 `json:"revision,omitempty" tf:"revision,omitempty"` + + // Configuration block for runtime_platform that containers in your task may use. + RuntimePlatform *RuntimePlatformObservation `json:"runtimePlatform,omitempty" tf:"runtime_platform,omitempty"` + + // Whether to retain the old revision when the resource is destroyed or replacement is necessary. Default is false. + SkipDestroy *bool `json:"skipDestroy,omitempty" tf:"skip_destroy,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // ARN of IAM role that allows your Amazon ECS container task to make calls to other AWS services. + TaskRoleArn *string `json:"taskRoleArn,omitempty" tf:"task_role_arn,omitempty"` + + // Whether should track latest task definition or the one created with the resource. Default is false. + TrackLatest *bool `json:"trackLatest,omitempty" tf:"track_latest,omitempty"` + + // Configuration block for volumes that containers in your task may use. Detailed below. + Volume []VolumeObservation `json:"volume,omitempty" tf:"volume,omitempty"` +} + +type TaskDefinitionParameters struct { + + // Number of cpu units used by the task. If the requires_compatibilities is FARGATE this field is required. + // +kubebuilder:validation:Optional + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // A list of valid container definitions provided as a single valid JSON document. Please note that you should only provide values that are part of the container definition document. For a detailed description of what parameters are available, see the Task Definition Parameters section from the official Developer Guide. + // +kubebuilder:validation:Optional + ContainerDefinitions *string `json:"containerDefinitions,omitempty" tf:"container_definitions,omitempty"` + + // The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate. See Ephemeral Storage. + // +kubebuilder:validation:Optional + EphemeralStorage *EphemeralStorageParameters `json:"ephemeralStorage,omitempty" tf:"ephemeral_storage,omitempty"` + + // ARN of the task execution role that the Amazon ECS container agent and the Docker daemon can assume. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + ExecutionRoleArn *string `json:"executionRoleArn,omitempty" tf:"execution_role_arn,omitempty"` + + // Reference to a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnRef *v1.Reference `json:"executionRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnSelector *v1.Selector `json:"executionRoleArnSelector,omitempty" tf:"-"` + + // A unique name for your task definition. + // +kubebuilder:validation:Optional + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // Configuration block(s) with Inference Accelerators settings. Detailed below. + // +kubebuilder:validation:Optional + InferenceAccelerator []InferenceAcceleratorParameters `json:"inferenceAccelerator,omitempty" tf:"inference_accelerator,omitempty"` + + // IPC resource namespace to be used for the containers in the task The valid values are host, task, and none. + // +kubebuilder:validation:Optional + IpcMode *string `json:"ipcMode,omitempty" tf:"ipc_mode,omitempty"` + + // Amount (in MiB) of memory used by the task. If the requires_compatibilities is FARGATE this field is required. + // +kubebuilder:validation:Optional + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` + + // Docker networking mode to use for the containers in the task. Valid values are none, bridge, awsvpc, and host. + // +kubebuilder:validation:Optional + NetworkMode *string `json:"networkMode,omitempty" tf:"network_mode,omitempty"` + + // Process namespace to use for the containers in the task. The valid values are host and task. + // +kubebuilder:validation:Optional + PidMode *string `json:"pidMode,omitempty" tf:"pid_mode,omitempty"` + + // Configuration block for rules that are taken into consideration during task placement. Maximum number of placement_constraints is 10. Detailed below. + // +kubebuilder:validation:Optional + PlacementConstraints []TaskDefinitionPlacementConstraintsParameters `json:"placementConstraints,omitempty" tf:"placement_constraints,omitempty"` + + // Configuration block for the App Mesh proxy. Detailed below. + // +kubebuilder:validation:Optional + ProxyConfiguration *ProxyConfigurationParameters `json:"proxyConfiguration,omitempty" tf:"proxy_configuration,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Set of launch types required by the task. The valid values are EC2 and FARGATE. + // +kubebuilder:validation:Optional + // +listType=set + RequiresCompatibilities []*string `json:"requiresCompatibilities,omitempty" tf:"requires_compatibilities,omitempty"` + + // Configuration block for runtime_platform that containers in your task may use. + // +kubebuilder:validation:Optional + RuntimePlatform *RuntimePlatformParameters `json:"runtimePlatform,omitempty" tf:"runtime_platform,omitempty"` + + // Whether to retain the old revision when the resource is destroyed or replacement is necessary. Default is false. + // +kubebuilder:validation:Optional + SkipDestroy *bool `json:"skipDestroy,omitempty" tf:"skip_destroy,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // ARN of IAM role that allows your Amazon ECS container task to make calls to other AWS services. + // +kubebuilder:validation:Optional + TaskRoleArn *string `json:"taskRoleArn,omitempty" tf:"task_role_arn,omitempty"` + + // Whether should track latest task definition or the one created with the resource. Default is false. + // +kubebuilder:validation:Optional + TrackLatest *bool `json:"trackLatest,omitempty" tf:"track_latest,omitempty"` + + // Configuration block for volumes that containers in your task may use. Detailed below. + // +kubebuilder:validation:Optional + Volume []VolumeParameters `json:"volume,omitempty" tf:"volume,omitempty"` +} + +type TaskDefinitionPlacementConstraintsInitParameters struct { + + // Cluster Query Language expression to apply to the constraint. For more information, see Cluster Query Language in the Amazon EC2 Container Service Developer Guide. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Type of constraint. Use memberOf to restrict selection to a group of valid candidates. Note that distinctInstance is not supported in task definitions. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type TaskDefinitionPlacementConstraintsObservation struct { + + // Cluster Query Language expression to apply to the constraint. For more information, see Cluster Query Language in the Amazon EC2 Container Service Developer Guide. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Type of constraint. Use memberOf to restrict selection to a group of valid candidates. Note that distinctInstance is not supported in task definitions. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type TaskDefinitionPlacementConstraintsParameters struct { + + // Cluster Query Language expression to apply to the constraint. For more information, see Cluster Query Language in the Amazon EC2 Container Service Developer Guide. + // +kubebuilder:validation:Optional + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // Type of constraint. Use memberOf to restrict selection to a group of valid candidates. Note that distinctInstance is not supported in task definitions. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type VolumeInitParameters struct { + + // Configuration block to configure a docker volume. Detailed below. + DockerVolumeConfiguration *DockerVolumeConfigurationInitParameters `json:"dockerVolumeConfiguration,omitempty" tf:"docker_volume_configuration,omitempty"` + + // Configuration block for an EFS volume. Detailed below. + EFSVolumeConfiguration *EFSVolumeConfigurationInitParameters `json:"efsVolumeConfiguration,omitempty" tf:"efs_volume_configuration,omitempty"` + + // Configuration block for an FSX Windows File Server volume. Detailed below. + FSXWindowsFileServerVolumeConfiguration *FSXWindowsFileServerVolumeConfigurationInitParameters `json:"fsxWindowsFileServerVolumeConfiguration,omitempty" tf:"fsx_windows_file_server_volume_configuration,omitempty"` + + // Path on the host container instance that is presented to the container. If not set, ECS will create a nonpersistent data volume that starts empty and is deleted after the task has finished. + HostPath *string `json:"hostPath,omitempty" tf:"host_path,omitempty"` + + // Name of the volume. This name is referenced in the sourceVolume + // parameter of container definition in the mountPoints section. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type VolumeObservation struct { + + // Configuration block to configure a docker volume. Detailed below. + DockerVolumeConfiguration *DockerVolumeConfigurationObservation `json:"dockerVolumeConfiguration,omitempty" tf:"docker_volume_configuration,omitempty"` + + // Configuration block for an EFS volume. Detailed below. + EFSVolumeConfiguration *EFSVolumeConfigurationObservation `json:"efsVolumeConfiguration,omitempty" tf:"efs_volume_configuration,omitempty"` + + // Configuration block for an FSX Windows File Server volume. Detailed below. + FSXWindowsFileServerVolumeConfiguration *FSXWindowsFileServerVolumeConfigurationObservation `json:"fsxWindowsFileServerVolumeConfiguration,omitempty" tf:"fsx_windows_file_server_volume_configuration,omitempty"` + + // Path on the host container instance that is presented to the container. If not set, ECS will create a nonpersistent data volume that starts empty and is deleted after the task has finished. + HostPath *string `json:"hostPath,omitempty" tf:"host_path,omitempty"` + + // Name of the volume. This name is referenced in the sourceVolume + // parameter of container definition in the mountPoints section. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type VolumeParameters struct { + + // Configuration block to configure a docker volume. Detailed below. + // +kubebuilder:validation:Optional + DockerVolumeConfiguration *DockerVolumeConfigurationParameters `json:"dockerVolumeConfiguration,omitempty" tf:"docker_volume_configuration,omitempty"` + + // Configuration block for an EFS volume. Detailed below. + // +kubebuilder:validation:Optional + EFSVolumeConfiguration *EFSVolumeConfigurationParameters `json:"efsVolumeConfiguration,omitempty" tf:"efs_volume_configuration,omitempty"` + + // Configuration block for an FSX Windows File Server volume. Detailed below. + // +kubebuilder:validation:Optional + FSXWindowsFileServerVolumeConfiguration *FSXWindowsFileServerVolumeConfigurationParameters `json:"fsxWindowsFileServerVolumeConfiguration,omitempty" tf:"fsx_windows_file_server_volume_configuration,omitempty"` + + // Path on the host container instance that is presented to the container. If not set, ECS will create a nonpersistent data volume that starts empty and is deleted after the task has finished. + // +kubebuilder:validation:Optional + HostPath *string `json:"hostPath,omitempty" tf:"host_path,omitempty"` + + // Name of the volume. This name is referenced in the sourceVolume + // parameter of container definition in the mountPoints section. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +// TaskDefinitionSpec defines the desired state of TaskDefinition +type TaskDefinitionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TaskDefinitionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TaskDefinitionInitParameters `json:"initProvider,omitempty"` +} + +// TaskDefinitionStatus defines the observed state of TaskDefinition. +type TaskDefinitionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TaskDefinitionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// TaskDefinition is the Schema for the TaskDefinitions API. Manages a revision of an ECS task definition. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type TaskDefinition struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.containerDefinitions) || (has(self.initProvider) && has(self.initProvider.containerDefinitions))",message="spec.forProvider.containerDefinitions is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.family) || (has(self.initProvider) && has(self.initProvider.family))",message="spec.forProvider.family is a required parameter" + Spec TaskDefinitionSpec `json:"spec"` + Status TaskDefinitionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TaskDefinitionList contains a list of TaskDefinitions +type TaskDefinitionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TaskDefinition `json:"items"` +} + +// Repository type metadata. +var ( + TaskDefinition_Kind = "TaskDefinition" + TaskDefinition_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: TaskDefinition_Kind}.String() + TaskDefinition_KindAPIVersion = TaskDefinition_Kind + "." + CRDGroupVersion.String() + TaskDefinition_GroupVersionKind = CRDGroupVersion.WithKind(TaskDefinition_Kind) +) + +func init() { + SchemeBuilder.Register(&TaskDefinition{}, &TaskDefinitionList{}) +} diff --git a/apis/efs/v1beta1/zz_filesystempolicy_types.go b/apis/efs/v1beta1/zz_filesystempolicy_types.go index 206e87366a..9d1e3eaf9d 100755 --- a/apis/efs/v1beta1/zz_filesystempolicy_types.go +++ b/apis/efs/v1beta1/zz_filesystempolicy_types.go @@ -19,7 +19,7 @@ type FileSystemPolicyInitParameters struct { BypassPolicyLockoutSafetyCheck *bool `json:"bypassPolicyLockoutSafetyCheck,omitempty" tf:"bypass_policy_lockout_safety_check,omitempty"` // The ID of the EFS file system. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/efs/v1beta1.FileSystem + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/efs/v1beta2.FileSystem FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` // Reference to a FileSystem in efs to populate fileSystemId. @@ -56,7 +56,7 @@ type FileSystemPolicyParameters struct { BypassPolicyLockoutSafetyCheck *bool `json:"bypassPolicyLockoutSafetyCheck,omitempty" tf:"bypass_policy_lockout_safety_check,omitempty"` // The ID of the EFS file system. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/efs/v1beta1.FileSystem + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/efs/v1beta2.FileSystem // +kubebuilder:validation:Optional FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` diff --git a/apis/efs/v1beta1/zz_generated.conversion_hubs.go b/apis/efs/v1beta1/zz_generated.conversion_hubs.go index 6751e1b488..ddd4afe02a 100755 --- a/apis/efs/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/efs/v1beta1/zz_generated.conversion_hubs.go @@ -6,20 +6,8 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *AccessPoint) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *BackupPolicy) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *FileSystem) Hub() {} - // Hub marks this type as a conversion hub. func (tr *FileSystemPolicy) Hub() {} // Hub marks this type as a conversion hub. func (tr *MountTarget) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ReplicationConfiguration) Hub() {} diff --git a/apis/efs/v1beta1/zz_generated.conversion_spokes.go b/apis/efs/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..24bc80f9ff --- /dev/null +++ b/apis/efs/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,94 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this AccessPoint to the hub type. +func (tr *AccessPoint) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the AccessPoint type. +func (tr *AccessPoint) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BackupPolicy to the hub type. +func (tr *BackupPolicy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BackupPolicy type. +func (tr *BackupPolicy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FileSystem to the hub type. +func (tr *FileSystem) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FileSystem type. +func (tr *FileSystem) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ReplicationConfiguration to the hub type. +func (tr *ReplicationConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ReplicationConfiguration type. +func (tr *ReplicationConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/efs/v1beta1/zz_generated.resolvers.go b/apis/efs/v1beta1/zz_generated.resolvers.go index 25208634f8..9c32d8362e 100644 --- a/apis/efs/v1beta1/zz_generated.resolvers.go +++ b/apis/efs/v1beta1/zz_generated.resolvers.go @@ -177,7 +177,7 @@ func (mg *FileSystemPolicy) ResolveReferences(ctx context.Context, c client.Read var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("efs.aws.upbound.io", "v1beta1", "FileSystem", "FileSystemList") + m, l, err = apisresolver.GetManagedResource("efs.aws.upbound.io", "v1beta2", "FileSystem", "FileSystemList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -196,7 +196,7 @@ func (mg *FileSystemPolicy) ResolveReferences(ctx context.Context, c client.Read mg.Spec.ForProvider.FileSystemID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.FileSystemIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("efs.aws.upbound.io", "v1beta1", "FileSystem", "FileSystemList") + m, l, err = apisresolver.GetManagedResource("efs.aws.upbound.io", "v1beta2", "FileSystem", "FileSystemList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -228,7 +228,7 @@ func (mg *MountTarget) ResolveReferences(ctx context.Context, c client.Reader) e var mrsp reference.MultiResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("efs.aws.upbound.io", "v1beta1", "FileSystem", "FileSystemList") + m, l, err = apisresolver.GetManagedResource("efs.aws.upbound.io", "v1beta2", "FileSystem", "FileSystemList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -285,7 +285,7 @@ func (mg *MountTarget) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("efs.aws.upbound.io", "v1beta1", "FileSystem", "FileSystemList") + m, l, err = apisresolver.GetManagedResource("efs.aws.upbound.io", "v1beta2", "FileSystem", "FileSystemList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/efs/v1beta1/zz_mounttarget_types.go b/apis/efs/v1beta1/zz_mounttarget_types.go index 5bd19af162..aa7375500e 100755 --- a/apis/efs/v1beta1/zz_mounttarget_types.go +++ b/apis/efs/v1beta1/zz_mounttarget_types.go @@ -16,7 +16,7 @@ import ( type MountTargetInitParameters struct { // The ID of the file system for which the mount target is intended. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/efs/v1beta1.FileSystem + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/efs/v1beta2.FileSystem FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` // Reference to a FileSystem in efs to populate fileSystemId. @@ -103,7 +103,7 @@ type MountTargetObservation struct { type MountTargetParameters struct { // The ID of the file system for which the mount target is intended. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/efs/v1beta1.FileSystem + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/efs/v1beta2.FileSystem // +kubebuilder:validation:Optional FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` diff --git a/apis/efs/v1beta2/zz_accesspoint_terraformed.go b/apis/efs/v1beta2/zz_accesspoint_terraformed.go new file mode 100755 index 0000000000..f7e9a89c30 --- /dev/null +++ b/apis/efs/v1beta2/zz_accesspoint_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AccessPoint +func (mg *AccessPoint) GetTerraformResourceType() string { + return "aws_efs_access_point" +} + +// GetConnectionDetailsMapping for this AccessPoint +func (tr *AccessPoint) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this AccessPoint +func (tr *AccessPoint) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AccessPoint +func (tr *AccessPoint) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AccessPoint +func (tr *AccessPoint) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AccessPoint +func (tr *AccessPoint) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AccessPoint +func (tr *AccessPoint) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this AccessPoint +func (tr *AccessPoint) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this AccessPoint +func (tr *AccessPoint) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this AccessPoint using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AccessPoint) LateInitialize(attrs []byte) (bool, error) { + params := &AccessPointParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AccessPoint) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/efs/v1beta2/zz_accesspoint_types.go b/apis/efs/v1beta2/zz_accesspoint_types.go new file mode 100755 index 0000000000..60e7946939 --- /dev/null +++ b/apis/efs/v1beta2/zz_accesspoint_types.go @@ -0,0 +1,275 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessPointInitParameters struct { + + // ID of the file system for which the access point is intended. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/efs/v1beta2.FileSystem + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // Reference to a FileSystem in efs to populate fileSystemId. + // +kubebuilder:validation:Optional + FileSystemIDRef *v1.Reference `json:"fileSystemIdRef,omitempty" tf:"-"` + + // Selector for a FileSystem in efs to populate fileSystemId. + // +kubebuilder:validation:Optional + FileSystemIDSelector *v1.Selector `json:"fileSystemIdSelector,omitempty" tf:"-"` + + // Operating system user and group applied to all file system requests made using the access point. Detailed below. + PosixUser *PosixUserInitParameters `json:"posixUser,omitempty" tf:"posix_user,omitempty"` + + // Directory on the Amazon EFS file system that the access point provides access to. Detailed below. + RootDirectory *RootDirectoryInitParameters `json:"rootDirectory,omitempty" tf:"root_directory,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AccessPointObservation struct { + + // ARN of the access point. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // ARN of the file system. + FileSystemArn *string `json:"fileSystemArn,omitempty" tf:"file_system_arn,omitempty"` + + // ID of the file system for which the access point is intended. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // ID of the access point. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ID of the access point. + OwnerID *string `json:"ownerId,omitempty" tf:"owner_id,omitempty"` + + // Operating system user and group applied to all file system requests made using the access point. Detailed below. + PosixUser *PosixUserObservation `json:"posixUser,omitempty" tf:"posix_user,omitempty"` + + // Directory on the Amazon EFS file system that the access point provides access to. Detailed below. + RootDirectory *RootDirectoryObservation `json:"rootDirectory,omitempty" tf:"root_directory,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type AccessPointParameters struct { + + // ID of the file system for which the access point is intended. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/efs/v1beta2.FileSystem + // +kubebuilder:validation:Optional + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // Reference to a FileSystem in efs to populate fileSystemId. + // +kubebuilder:validation:Optional + FileSystemIDRef *v1.Reference `json:"fileSystemIdRef,omitempty" tf:"-"` + + // Selector for a FileSystem in efs to populate fileSystemId. + // +kubebuilder:validation:Optional + FileSystemIDSelector *v1.Selector `json:"fileSystemIdSelector,omitempty" tf:"-"` + + // Operating system user and group applied to all file system requests made using the access point. Detailed below. + // +kubebuilder:validation:Optional + PosixUser *PosixUserParameters `json:"posixUser,omitempty" tf:"posix_user,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Directory on the Amazon EFS file system that the access point provides access to. Detailed below. + // +kubebuilder:validation:Optional + RootDirectory *RootDirectoryParameters `json:"rootDirectory,omitempty" tf:"root_directory,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type CreationInfoInitParameters struct { + + // POSIX group ID to apply to the root_directory. + OwnerGID *float64 `json:"ownerGid,omitempty" tf:"owner_gid,omitempty"` + + // POSIX user ID to apply to the root_directory. + OwnerUID *float64 `json:"ownerUid,omitempty" tf:"owner_uid,omitempty"` + + // POSIX permissions to apply to the RootDirectory, in the format of an octal number representing the file's mode bits. + Permissions *string `json:"permissions,omitempty" tf:"permissions,omitempty"` +} + +type CreationInfoObservation struct { + + // POSIX group ID to apply to the root_directory. + OwnerGID *float64 `json:"ownerGid,omitempty" tf:"owner_gid,omitempty"` + + // POSIX user ID to apply to the root_directory. + OwnerUID *float64 `json:"ownerUid,omitempty" tf:"owner_uid,omitempty"` + + // POSIX permissions to apply to the RootDirectory, in the format of an octal number representing the file's mode bits. + Permissions *string `json:"permissions,omitempty" tf:"permissions,omitempty"` +} + +type CreationInfoParameters struct { + + // POSIX group ID to apply to the root_directory. + // +kubebuilder:validation:Optional + OwnerGID *float64 `json:"ownerGid" tf:"owner_gid,omitempty"` + + // POSIX user ID to apply to the root_directory. + // +kubebuilder:validation:Optional + OwnerUID *float64 `json:"ownerUid" tf:"owner_uid,omitempty"` + + // POSIX permissions to apply to the RootDirectory, in the format of an octal number representing the file's mode bits. + // +kubebuilder:validation:Optional + Permissions *string `json:"permissions" tf:"permissions,omitempty"` +} + +type PosixUserInitParameters struct { + + // POSIX group ID used for all file system operations using this access point. + GID *float64 `json:"gid,omitempty" tf:"gid,omitempty"` + + // Secondary POSIX group IDs used for all file system operations using this access point. + // +listType=set + SecondaryGids []*float64 `json:"secondaryGids,omitempty" tf:"secondary_gids,omitempty"` + + // POSIX user ID used for all file system operations using this access point. + UID *float64 `json:"uid,omitempty" tf:"uid,omitempty"` +} + +type PosixUserObservation struct { + + // POSIX group ID used for all file system operations using this access point. + GID *float64 `json:"gid,omitempty" tf:"gid,omitempty"` + + // Secondary POSIX group IDs used for all file system operations using this access point. + // +listType=set + SecondaryGids []*float64 `json:"secondaryGids,omitempty" tf:"secondary_gids,omitempty"` + + // POSIX user ID used for all file system operations using this access point. + UID *float64 `json:"uid,omitempty" tf:"uid,omitempty"` +} + +type PosixUserParameters struct { + + // POSIX group ID used for all file system operations using this access point. + // +kubebuilder:validation:Optional + GID *float64 `json:"gid" tf:"gid,omitempty"` + + // Secondary POSIX group IDs used for all file system operations using this access point. + // +kubebuilder:validation:Optional + // +listType=set + SecondaryGids []*float64 `json:"secondaryGids,omitempty" tf:"secondary_gids,omitempty"` + + // POSIX user ID used for all file system operations using this access point. + // +kubebuilder:validation:Optional + UID *float64 `json:"uid" tf:"uid,omitempty"` +} + +type RootDirectoryInitParameters struct { + + // POSIX IDs and permissions to apply to the access point's Root Directory. See Creation Info below. + CreationInfo *CreationInfoInitParameters `json:"creationInfo,omitempty" tf:"creation_info,omitempty"` + + // Path on the EFS file system to expose as the root directory to NFS clients using the access point to access the EFS file system. A path can have up to four subdirectories. If the specified path does not exist, you are required to provide creation_info. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type RootDirectoryObservation struct { + + // POSIX IDs and permissions to apply to the access point's Root Directory. See Creation Info below. + CreationInfo *CreationInfoObservation `json:"creationInfo,omitempty" tf:"creation_info,omitempty"` + + // Path on the EFS file system to expose as the root directory to NFS clients using the access point to access the EFS file system. A path can have up to four subdirectories. If the specified path does not exist, you are required to provide creation_info. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type RootDirectoryParameters struct { + + // POSIX IDs and permissions to apply to the access point's Root Directory. See Creation Info below. + // +kubebuilder:validation:Optional + CreationInfo *CreationInfoParameters `json:"creationInfo,omitempty" tf:"creation_info,omitempty"` + + // Path on the EFS file system to expose as the root directory to NFS clients using the access point to access the EFS file system. A path can have up to four subdirectories. If the specified path does not exist, you are required to provide creation_info. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +// AccessPointSpec defines the desired state of AccessPoint +type AccessPointSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AccessPointParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AccessPointInitParameters `json:"initProvider,omitempty"` +} + +// AccessPointStatus defines the observed state of AccessPoint. +type AccessPointStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AccessPointObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// AccessPoint is the Schema for the AccessPoints API. Provides an Elastic File System (EFS) access point. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type AccessPoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec AccessPointSpec `json:"spec"` + Status AccessPointStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AccessPointList contains a list of AccessPoints +type AccessPointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AccessPoint `json:"items"` +} + +// Repository type metadata. +var ( + AccessPoint_Kind = "AccessPoint" + AccessPoint_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AccessPoint_Kind}.String() + AccessPoint_KindAPIVersion = AccessPoint_Kind + "." + CRDGroupVersion.String() + AccessPoint_GroupVersionKind = CRDGroupVersion.WithKind(AccessPoint_Kind) +) + +func init() { + SchemeBuilder.Register(&AccessPoint{}, &AccessPointList{}) +} diff --git a/apis/efs/v1beta2/zz_backuppolicy_terraformed.go b/apis/efs/v1beta2/zz_backuppolicy_terraformed.go new file mode 100755 index 0000000000..12a87148ae --- /dev/null +++ b/apis/efs/v1beta2/zz_backuppolicy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BackupPolicy +func (mg *BackupPolicy) GetTerraformResourceType() string { + return "aws_efs_backup_policy" +} + +// GetConnectionDetailsMapping for this BackupPolicy +func (tr *BackupPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BackupPolicy +func (tr *BackupPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BackupPolicy +func (tr *BackupPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BackupPolicy +func (tr *BackupPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BackupPolicy +func (tr *BackupPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BackupPolicy +func (tr *BackupPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BackupPolicy +func (tr *BackupPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BackupPolicy +func (tr *BackupPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BackupPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BackupPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &BackupPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BackupPolicy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/efs/v1beta2/zz_backuppolicy_types.go b/apis/efs/v1beta2/zz_backuppolicy_types.go new file mode 100755 index 0000000000..88c5d0fad0 --- /dev/null +++ b/apis/efs/v1beta2/zz_backuppolicy_types.go @@ -0,0 +1,149 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BackupPolicyBackupPolicyInitParameters struct { + + // A status of the backup policy. Valid values: ENABLED, DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type BackupPolicyBackupPolicyObservation struct { + + // A status of the backup policy. Valid values: ENABLED, DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type BackupPolicyBackupPolicyParameters struct { + + // A status of the backup policy. Valid values: ENABLED, DISABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status" tf:"status,omitempty"` +} + +type BackupPolicyInitParameters struct { + + // A backup_policy object (documented below). + BackupPolicy *BackupPolicyBackupPolicyInitParameters `json:"backupPolicy,omitempty" tf:"backup_policy,omitempty"` + + // The ID of the EFS file system. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/efs/v1beta2.FileSystem + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // Reference to a FileSystem in efs to populate fileSystemId. + // +kubebuilder:validation:Optional + FileSystemIDRef *v1.Reference `json:"fileSystemIdRef,omitempty" tf:"-"` + + // Selector for a FileSystem in efs to populate fileSystemId. + // +kubebuilder:validation:Optional + FileSystemIDSelector *v1.Selector `json:"fileSystemIdSelector,omitempty" tf:"-"` +} + +type BackupPolicyObservation struct { + + // A backup_policy object (documented below). + BackupPolicy *BackupPolicyBackupPolicyObservation `json:"backupPolicy,omitempty" tf:"backup_policy,omitempty"` + + // The ID of the EFS file system. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The ID that identifies the file system (e.g., fs-ccfc0d65). + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type BackupPolicyParameters struct { + + // A backup_policy object (documented below). + // +kubebuilder:validation:Optional + BackupPolicy *BackupPolicyBackupPolicyParameters `json:"backupPolicy,omitempty" tf:"backup_policy,omitempty"` + + // The ID of the EFS file system. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/efs/v1beta2.FileSystem + // +kubebuilder:validation:Optional + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // Reference to a FileSystem in efs to populate fileSystemId. + // +kubebuilder:validation:Optional + FileSystemIDRef *v1.Reference `json:"fileSystemIdRef,omitempty" tf:"-"` + + // Selector for a FileSystem in efs to populate fileSystemId. + // +kubebuilder:validation:Optional + FileSystemIDSelector *v1.Selector `json:"fileSystemIdSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +// BackupPolicySpec defines the desired state of BackupPolicy +type BackupPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BackupPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BackupPolicyInitParameters `json:"initProvider,omitempty"` +} + +// BackupPolicyStatus defines the observed state of BackupPolicy. +type BackupPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BackupPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BackupPolicy is the Schema for the BackupPolicys API. Provides an Elastic File System (EFS) Backup Policy resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type BackupPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.backupPolicy) || (has(self.initProvider) && has(self.initProvider.backupPolicy))",message="spec.forProvider.backupPolicy is a required parameter" + Spec BackupPolicySpec `json:"spec"` + Status BackupPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BackupPolicyList contains a list of BackupPolicys +type BackupPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackupPolicy `json:"items"` +} + +// Repository type metadata. +var ( + BackupPolicy_Kind = "BackupPolicy" + BackupPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BackupPolicy_Kind}.String() + BackupPolicy_KindAPIVersion = BackupPolicy_Kind + "." + CRDGroupVersion.String() + BackupPolicy_GroupVersionKind = CRDGroupVersion.WithKind(BackupPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&BackupPolicy{}, &BackupPolicyList{}) +} diff --git a/apis/efs/v1beta2/zz_filesystem_terraformed.go b/apis/efs/v1beta2/zz_filesystem_terraformed.go new file mode 100755 index 0000000000..f26a4028dd --- /dev/null +++ b/apis/efs/v1beta2/zz_filesystem_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FileSystem +func (mg *FileSystem) GetTerraformResourceType() string { + return "aws_efs_file_system" +} + +// GetConnectionDetailsMapping for this FileSystem +func (tr *FileSystem) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FileSystem +func (tr *FileSystem) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FileSystem +func (tr *FileSystem) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FileSystem +func (tr *FileSystem) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FileSystem +func (tr *FileSystem) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FileSystem +func (tr *FileSystem) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FileSystem +func (tr *FileSystem) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FileSystem +func (tr *FileSystem) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FileSystem using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FileSystem) LateInitialize(attrs []byte) (bool, error) { + params := &FileSystemParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FileSystem) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/efs/v1beta2/zz_filesystem_types.go b/apis/efs/v1beta2/zz_filesystem_types.go new file mode 100755 index 0000000000..93c83425da --- /dev/null +++ b/apis/efs/v1beta2/zz_filesystem_types.go @@ -0,0 +1,324 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type FileSystemInitParameters struct { + + // the AWS Availability Zone in which to create the file system. Used to create a file system that uses One Zone storage classes. See user guide for more information. + AvailabilityZoneName *string `json:"availabilityZoneName,omitempty" tf:"availability_zone_name,omitempty"` + + // A unique name (a maximum of 64 characters are allowed) + // used as reference when creating the Elastic File System to ensure idempotent file + // system creation. See Elastic File System + // user guide for more information. + CreationToken *string `json:"creationToken,omitempty" tf:"creation_token,omitempty"` + + // If true, the disk will be encrypted. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The ARN for the KMS encryption key. When specifying kms_key_id, encrypted needs to be set to true. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // A file system lifecycle policy object. See lifecycle_policy block below for details. + LifecyclePolicy []LifecyclePolicyInitParameters `json:"lifecyclePolicy,omitempty" tf:"lifecycle_policy,omitempty"` + + // The file system performance mode. Can be either "generalPurpose" or "maxIO" (Default: "generalPurpose"). + PerformanceMode *string `json:"performanceMode,omitempty" tf:"performance_mode,omitempty"` + + // A file system protection object. See protection block below for details. + Protection *ProtectionInitParameters `json:"protection,omitempty" tf:"protection,omitempty"` + + // The throughput, measured in MiB/s, that you want to provision for the file system. Only applicable with throughput_mode set to provisioned. + ProvisionedThroughputInMibps *float64 `json:"provisionedThroughputInMibps,omitempty" tf:"provisioned_throughput_in_mibps,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Throughput mode for the file system. Defaults to bursting. Valid values: bursting, provisioned, or elastic. When using provisioned, also set provisioned_throughput_in_mibps. + ThroughputMode *string `json:"throughputMode,omitempty" tf:"throughput_mode,omitempty"` +} + +type FileSystemObservation struct { + + // Amazon Resource Name of the file system. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The identifier of the Availability Zone in which the file system's One Zone storage classes exist. + AvailabilityZoneID *string `json:"availabilityZoneId,omitempty" tf:"availability_zone_id,omitempty"` + + // the AWS Availability Zone in which to create the file system. Used to create a file system that uses One Zone storage classes. See user guide for more information. + AvailabilityZoneName *string `json:"availabilityZoneName,omitempty" tf:"availability_zone_name,omitempty"` + + // A unique name (a maximum of 64 characters are allowed) + // used as reference when creating the Elastic File System to ensure idempotent file + // system creation. See Elastic File System + // user guide for more information. + CreationToken *string `json:"creationToken,omitempty" tf:"creation_token,omitempty"` + + // The DNS name for the filesystem per documented convention. + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // If true, the disk will be encrypted. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The ID that identifies the file system (e.g., fs-ccfc0d65). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The ARN for the KMS encryption key. When specifying kms_key_id, encrypted needs to be set to true. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // A file system lifecycle policy object. See lifecycle_policy block below for details. + LifecyclePolicy []LifecyclePolicyObservation `json:"lifecyclePolicy,omitempty" tf:"lifecycle_policy,omitempty"` + + // The value of the file system's Name tag. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The current number of mount targets that the file system has. + NumberOfMountTargets *float64 `json:"numberOfMountTargets,omitempty" tf:"number_of_mount_targets,omitempty"` + + // The AWS account that created the file system. If the file system was createdby an IAM user, the parent account to which the user belongs is the owner. + OwnerID *string `json:"ownerId,omitempty" tf:"owner_id,omitempty"` + + // The file system performance mode. Can be either "generalPurpose" or "maxIO" (Default: "generalPurpose"). + PerformanceMode *string `json:"performanceMode,omitempty" tf:"performance_mode,omitempty"` + + // A file system protection object. See protection block below for details. + Protection *ProtectionObservation `json:"protection,omitempty" tf:"protection,omitempty"` + + // The throughput, measured in MiB/s, that you want to provision for the file system. Only applicable with throughput_mode set to provisioned. + ProvisionedThroughputInMibps *float64 `json:"provisionedThroughputInMibps,omitempty" tf:"provisioned_throughput_in_mibps,omitempty"` + + // The latest known metered size (in bytes) of data stored in the file system, the value is not the exact size that the file system was at any point in time. See Size In Bytes. + SizeInBytes []SizeInBytesObservation `json:"sizeInBytes,omitempty" tf:"size_in_bytes,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Throughput mode for the file system. Defaults to bursting. Valid values: bursting, provisioned, or elastic. When using provisioned, also set provisioned_throughput_in_mibps. + ThroughputMode *string `json:"throughputMode,omitempty" tf:"throughput_mode,omitempty"` +} + +type FileSystemParameters struct { + + // the AWS Availability Zone in which to create the file system. Used to create a file system that uses One Zone storage classes. See user guide for more information. + // +kubebuilder:validation:Optional + AvailabilityZoneName *string `json:"availabilityZoneName,omitempty" tf:"availability_zone_name,omitempty"` + + // A unique name (a maximum of 64 characters are allowed) + // used as reference when creating the Elastic File System to ensure idempotent file + // system creation. See Elastic File System + // user guide for more information. + // +kubebuilder:validation:Optional + CreationToken *string `json:"creationToken,omitempty" tf:"creation_token,omitempty"` + + // If true, the disk will be encrypted. + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The ARN for the KMS encryption key. When specifying kms_key_id, encrypted needs to be set to true. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // A file system lifecycle policy object. See lifecycle_policy block below for details. + // +kubebuilder:validation:Optional + LifecyclePolicy []LifecyclePolicyParameters `json:"lifecyclePolicy,omitempty" tf:"lifecycle_policy,omitempty"` + + // The file system performance mode. Can be either "generalPurpose" or "maxIO" (Default: "generalPurpose"). + // +kubebuilder:validation:Optional + PerformanceMode *string `json:"performanceMode,omitempty" tf:"performance_mode,omitempty"` + + // A file system protection object. See protection block below for details. + // +kubebuilder:validation:Optional + Protection *ProtectionParameters `json:"protection,omitempty" tf:"protection,omitempty"` + + // The throughput, measured in MiB/s, that you want to provision for the file system. Only applicable with throughput_mode set to provisioned. + // +kubebuilder:validation:Optional + ProvisionedThroughputInMibps *float64 `json:"provisionedThroughputInMibps,omitempty" tf:"provisioned_throughput_in_mibps,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Throughput mode for the file system. Defaults to bursting. Valid values: bursting, provisioned, or elastic. When using provisioned, also set provisioned_throughput_in_mibps. + // +kubebuilder:validation:Optional + ThroughputMode *string `json:"throughputMode,omitempty" tf:"throughput_mode,omitempty"` +} + +type LifecyclePolicyInitParameters struct { + + // Indicates how long it takes to transition files to the archive storage class. Requires transition_to_ia, Elastic Throughput and General Purpose performance mode. Valid values: AFTER_1_DAY, AFTER_7_DAYS, AFTER_14_DAYS, AFTER_30_DAYS, AFTER_60_DAYS, AFTER_90_DAYS, AFTER_180_DAYS, AFTER_270_DAYS, or AFTER_365_DAYS. + TransitionToArchive *string `json:"transitionToArchive,omitempty" tf:"transition_to_archive,omitempty"` + + // Indicates how long it takes to transition files to the IA storage class. Valid values: AFTER_1_DAY, AFTER_7_DAYS, AFTER_14_DAYS, AFTER_30_DAYS, AFTER_60_DAYS, AFTER_90_DAYS, AFTER_180_DAYS, AFTER_270_DAYS, or AFTER_365_DAYS. + TransitionToIa *string `json:"transitionToIa,omitempty" tf:"transition_to_ia,omitempty"` + + // Describes the policy used to transition a file from infequent access storage to primary storage. Valid values: AFTER_1_ACCESS. + TransitionToPrimaryStorageClass *string `json:"transitionToPrimaryStorageClass,omitempty" tf:"transition_to_primary_storage_class,omitempty"` +} + +type LifecyclePolicyObservation struct { + + // Indicates how long it takes to transition files to the archive storage class. Requires transition_to_ia, Elastic Throughput and General Purpose performance mode. Valid values: AFTER_1_DAY, AFTER_7_DAYS, AFTER_14_DAYS, AFTER_30_DAYS, AFTER_60_DAYS, AFTER_90_DAYS, AFTER_180_DAYS, AFTER_270_DAYS, or AFTER_365_DAYS. + TransitionToArchive *string `json:"transitionToArchive,omitempty" tf:"transition_to_archive,omitempty"` + + // Indicates how long it takes to transition files to the IA storage class. Valid values: AFTER_1_DAY, AFTER_7_DAYS, AFTER_14_DAYS, AFTER_30_DAYS, AFTER_60_DAYS, AFTER_90_DAYS, AFTER_180_DAYS, AFTER_270_DAYS, or AFTER_365_DAYS. + TransitionToIa *string `json:"transitionToIa,omitempty" tf:"transition_to_ia,omitempty"` + + // Describes the policy used to transition a file from infequent access storage to primary storage. Valid values: AFTER_1_ACCESS. + TransitionToPrimaryStorageClass *string `json:"transitionToPrimaryStorageClass,omitempty" tf:"transition_to_primary_storage_class,omitempty"` +} + +type LifecyclePolicyParameters struct { + + // Indicates how long it takes to transition files to the archive storage class. Requires transition_to_ia, Elastic Throughput and General Purpose performance mode. Valid values: AFTER_1_DAY, AFTER_7_DAYS, AFTER_14_DAYS, AFTER_30_DAYS, AFTER_60_DAYS, AFTER_90_DAYS, AFTER_180_DAYS, AFTER_270_DAYS, or AFTER_365_DAYS. + // +kubebuilder:validation:Optional + TransitionToArchive *string `json:"transitionToArchive,omitempty" tf:"transition_to_archive,omitempty"` + + // Indicates how long it takes to transition files to the IA storage class. Valid values: AFTER_1_DAY, AFTER_7_DAYS, AFTER_14_DAYS, AFTER_30_DAYS, AFTER_60_DAYS, AFTER_90_DAYS, AFTER_180_DAYS, AFTER_270_DAYS, or AFTER_365_DAYS. + // +kubebuilder:validation:Optional + TransitionToIa *string `json:"transitionToIa,omitempty" tf:"transition_to_ia,omitempty"` + + // Describes the policy used to transition a file from infequent access storage to primary storage. Valid values: AFTER_1_ACCESS. + // +kubebuilder:validation:Optional + TransitionToPrimaryStorageClass *string `json:"transitionToPrimaryStorageClass,omitempty" tf:"transition_to_primary_storage_class,omitempty"` +} + +type ProtectionInitParameters struct { + + // Indicates whether replication overwrite protection is enabled. Valid values: ENABLED or DISABLED. + ReplicationOverwrite *string `json:"replicationOverwrite,omitempty" tf:"replication_overwrite,omitempty"` +} + +type ProtectionObservation struct { + + // Indicates whether replication overwrite protection is enabled. Valid values: ENABLED or DISABLED. + ReplicationOverwrite *string `json:"replicationOverwrite,omitempty" tf:"replication_overwrite,omitempty"` +} + +type ProtectionParameters struct { + + // Indicates whether replication overwrite protection is enabled. Valid values: ENABLED or DISABLED. + // +kubebuilder:validation:Optional + ReplicationOverwrite *string `json:"replicationOverwrite,omitempty" tf:"replication_overwrite,omitempty"` +} + +type SizeInBytesInitParameters struct { +} + +type SizeInBytesObservation struct { + + // The latest known metered size (in bytes) of data stored in the file system. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` + + // The latest known metered size (in bytes) of data stored in the Infrequent Access storage class. + ValueInIa *float64 `json:"valueInIa,omitempty" tf:"value_in_ia,omitempty"` + + // The latest known metered size (in bytes) of data stored in the Standard storage class. + ValueInStandard *float64 `json:"valueInStandard,omitempty" tf:"value_in_standard,omitempty"` +} + +type SizeInBytesParameters struct { +} + +// FileSystemSpec defines the desired state of FileSystem +type FileSystemSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FileSystemParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FileSystemInitParameters `json:"initProvider,omitempty"` +} + +// FileSystemStatus defines the observed state of FileSystem. +type FileSystemStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FileSystemObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FileSystem is the Schema for the FileSystems API. Provides an Elastic File System (EFS) File System resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type FileSystem struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec FileSystemSpec `json:"spec"` + Status FileSystemStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FileSystemList contains a list of FileSystems +type FileSystemList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FileSystem `json:"items"` +} + +// Repository type metadata. +var ( + FileSystem_Kind = "FileSystem" + FileSystem_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FileSystem_Kind}.String() + FileSystem_KindAPIVersion = FileSystem_Kind + "." + CRDGroupVersion.String() + FileSystem_GroupVersionKind = CRDGroupVersion.WithKind(FileSystem_Kind) +) + +func init() { + SchemeBuilder.Register(&FileSystem{}, &FileSystemList{}) +} diff --git a/apis/efs/v1beta2/zz_generated.conversion_hubs.go b/apis/efs/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..9d17ee0c93 --- /dev/null +++ b/apis/efs/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,19 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *AccessPoint) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BackupPolicy) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FileSystem) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ReplicationConfiguration) Hub() {} diff --git a/apis/efs/v1beta2/zz_generated.deepcopy.go b/apis/efs/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..5674126e9b --- /dev/null +++ b/apis/efs/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1784 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPoint) DeepCopyInto(out *AccessPoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPoint. +func (in *AccessPoint) DeepCopy() *AccessPoint { + if in == nil { + return nil + } + out := new(AccessPoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccessPoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPointInitParameters) DeepCopyInto(out *AccessPointInitParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.FileSystemIDRef != nil { + in, out := &in.FileSystemIDRef, &out.FileSystemIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FileSystemIDSelector != nil { + in, out := &in.FileSystemIDSelector, &out.FileSystemIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PosixUser != nil { + in, out := &in.PosixUser, &out.PosixUser + *out = new(PosixUserInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RootDirectory != nil { + in, out := &in.RootDirectory, &out.RootDirectory + *out = new(RootDirectoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPointInitParameters. +func (in *AccessPointInitParameters) DeepCopy() *AccessPointInitParameters { + if in == nil { + return nil + } + out := new(AccessPointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPointList) DeepCopyInto(out *AccessPointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AccessPoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPointList. +func (in *AccessPointList) DeepCopy() *AccessPointList { + if in == nil { + return nil + } + out := new(AccessPointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccessPointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPointObservation) DeepCopyInto(out *AccessPointObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.FileSystemArn != nil { + in, out := &in.FileSystemArn, &out.FileSystemArn + *out = new(string) + **out = **in + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.OwnerID != nil { + in, out := &in.OwnerID, &out.OwnerID + *out = new(string) + **out = **in + } + if in.PosixUser != nil { + in, out := &in.PosixUser, &out.PosixUser + *out = new(PosixUserObservation) + (*in).DeepCopyInto(*out) + } + if in.RootDirectory != nil { + in, out := &in.RootDirectory, &out.RootDirectory + *out = new(RootDirectoryObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPointObservation. +func (in *AccessPointObservation) DeepCopy() *AccessPointObservation { + if in == nil { + return nil + } + out := new(AccessPointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPointParameters) DeepCopyInto(out *AccessPointParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.FileSystemIDRef != nil { + in, out := &in.FileSystemIDRef, &out.FileSystemIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FileSystemIDSelector != nil { + in, out := &in.FileSystemIDSelector, &out.FileSystemIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PosixUser != nil { + in, out := &in.PosixUser, &out.PosixUser + *out = new(PosixUserParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RootDirectory != nil { + in, out := &in.RootDirectory, &out.RootDirectory + *out = new(RootDirectoryParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPointParameters. +func (in *AccessPointParameters) DeepCopy() *AccessPointParameters { + if in == nil { + return nil + } + out := new(AccessPointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPointSpec) DeepCopyInto(out *AccessPointSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPointSpec. +func (in *AccessPointSpec) DeepCopy() *AccessPointSpec { + if in == nil { + return nil + } + out := new(AccessPointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPointStatus) DeepCopyInto(out *AccessPointStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPointStatus. +func (in *AccessPointStatus) DeepCopy() *AccessPointStatus { + if in == nil { + return nil + } + out := new(AccessPointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicy) DeepCopyInto(out *BackupPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicy. +func (in *BackupPolicy) DeepCopy() *BackupPolicy { + if in == nil { + return nil + } + out := new(BackupPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyBackupPolicyInitParameters) DeepCopyInto(out *BackupPolicyBackupPolicyInitParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyBackupPolicyInitParameters. +func (in *BackupPolicyBackupPolicyInitParameters) DeepCopy() *BackupPolicyBackupPolicyInitParameters { + if in == nil { + return nil + } + out := new(BackupPolicyBackupPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyBackupPolicyObservation) DeepCopyInto(out *BackupPolicyBackupPolicyObservation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyBackupPolicyObservation. +func (in *BackupPolicyBackupPolicyObservation) DeepCopy() *BackupPolicyBackupPolicyObservation { + if in == nil { + return nil + } + out := new(BackupPolicyBackupPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyBackupPolicyParameters) DeepCopyInto(out *BackupPolicyBackupPolicyParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyBackupPolicyParameters. +func (in *BackupPolicyBackupPolicyParameters) DeepCopy() *BackupPolicyBackupPolicyParameters { + if in == nil { + return nil + } + out := new(BackupPolicyBackupPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyInitParameters) DeepCopyInto(out *BackupPolicyInitParameters) { + *out = *in + if in.BackupPolicy != nil { + in, out := &in.BackupPolicy, &out.BackupPolicy + *out = new(BackupPolicyBackupPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.FileSystemIDRef != nil { + in, out := &in.FileSystemIDRef, &out.FileSystemIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FileSystemIDSelector != nil { + in, out := &in.FileSystemIDSelector, &out.FileSystemIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyInitParameters. +func (in *BackupPolicyInitParameters) DeepCopy() *BackupPolicyInitParameters { + if in == nil { + return nil + } + out := new(BackupPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyList) DeepCopyInto(out *BackupPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyList. +func (in *BackupPolicyList) DeepCopy() *BackupPolicyList { + if in == nil { + return nil + } + out := new(BackupPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyObservation) DeepCopyInto(out *BackupPolicyObservation) { + *out = *in + if in.BackupPolicy != nil { + in, out := &in.BackupPolicy, &out.BackupPolicy + *out = new(BackupPolicyBackupPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyObservation. +func (in *BackupPolicyObservation) DeepCopy() *BackupPolicyObservation { + if in == nil { + return nil + } + out := new(BackupPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyParameters) DeepCopyInto(out *BackupPolicyParameters) { + *out = *in + if in.BackupPolicy != nil { + in, out := &in.BackupPolicy, &out.BackupPolicy + *out = new(BackupPolicyBackupPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.FileSystemIDRef != nil { + in, out := &in.FileSystemIDRef, &out.FileSystemIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FileSystemIDSelector != nil { + in, out := &in.FileSystemIDSelector, &out.FileSystemIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyParameters. +func (in *BackupPolicyParameters) DeepCopy() *BackupPolicyParameters { + if in == nil { + return nil + } + out := new(BackupPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicySpec) DeepCopyInto(out *BackupPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicySpec. +func (in *BackupPolicySpec) DeepCopy() *BackupPolicySpec { + if in == nil { + return nil + } + out := new(BackupPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyStatus) DeepCopyInto(out *BackupPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyStatus. +func (in *BackupPolicyStatus) DeepCopy() *BackupPolicyStatus { + if in == nil { + return nil + } + out := new(BackupPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreationInfoInitParameters) DeepCopyInto(out *CreationInfoInitParameters) { + *out = *in + if in.OwnerGID != nil { + in, out := &in.OwnerGID, &out.OwnerGID + *out = new(float64) + **out = **in + } + if in.OwnerUID != nil { + in, out := &in.OwnerUID, &out.OwnerUID + *out = new(float64) + **out = **in + } + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreationInfoInitParameters. +func (in *CreationInfoInitParameters) DeepCopy() *CreationInfoInitParameters { + if in == nil { + return nil + } + out := new(CreationInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreationInfoObservation) DeepCopyInto(out *CreationInfoObservation) { + *out = *in + if in.OwnerGID != nil { + in, out := &in.OwnerGID, &out.OwnerGID + *out = new(float64) + **out = **in + } + if in.OwnerUID != nil { + in, out := &in.OwnerUID, &out.OwnerUID + *out = new(float64) + **out = **in + } + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreationInfoObservation. +func (in *CreationInfoObservation) DeepCopy() *CreationInfoObservation { + if in == nil { + return nil + } + out := new(CreationInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreationInfoParameters) DeepCopyInto(out *CreationInfoParameters) { + *out = *in + if in.OwnerGID != nil { + in, out := &in.OwnerGID, &out.OwnerGID + *out = new(float64) + **out = **in + } + if in.OwnerUID != nil { + in, out := &in.OwnerUID, &out.OwnerUID + *out = new(float64) + **out = **in + } + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreationInfoParameters. +func (in *CreationInfoParameters) DeepCopy() *CreationInfoParameters { + if in == nil { + return nil + } + out := new(CreationInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationInitParameters) DeepCopyInto(out *DestinationInitParameters) { + *out = *in + if in.AvailabilityZoneName != nil { + in, out := &in.AvailabilityZoneName, &out.AvailabilityZoneName + *out = new(string) + **out = **in + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationInitParameters. +func (in *DestinationInitParameters) DeepCopy() *DestinationInitParameters { + if in == nil { + return nil + } + out := new(DestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationObservation) DeepCopyInto(out *DestinationObservation) { + *out = *in + if in.AvailabilityZoneName != nil { + in, out := &in.AvailabilityZoneName, &out.AvailabilityZoneName + *out = new(string) + **out = **in + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationObservation. +func (in *DestinationObservation) DeepCopy() *DestinationObservation { + if in == nil { + return nil + } + out := new(DestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationParameters) DeepCopyInto(out *DestinationParameters) { + *out = *in + if in.AvailabilityZoneName != nil { + in, out := &in.AvailabilityZoneName, &out.AvailabilityZoneName + *out = new(string) + **out = **in + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationParameters. +func (in *DestinationParameters) DeepCopy() *DestinationParameters { + if in == nil { + return nil + } + out := new(DestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystem) DeepCopyInto(out *FileSystem) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystem. +func (in *FileSystem) DeepCopy() *FileSystem { + if in == nil { + return nil + } + out := new(FileSystem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FileSystem) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystemInitParameters) DeepCopyInto(out *FileSystemInitParameters) { + *out = *in + if in.AvailabilityZoneName != nil { + in, out := &in.AvailabilityZoneName, &out.AvailabilityZoneName + *out = new(string) + **out = **in + } + if in.CreationToken != nil { + in, out := &in.CreationToken, &out.CreationToken + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LifecyclePolicy != nil { + in, out := &in.LifecyclePolicy, &out.LifecyclePolicy + *out = make([]LifecyclePolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PerformanceMode != nil { + in, out := &in.PerformanceMode, &out.PerformanceMode + *out = new(string) + **out = **in + } + if in.Protection != nil { + in, out := &in.Protection, &out.Protection + *out = new(ProtectionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ProvisionedThroughputInMibps != nil { + in, out := &in.ProvisionedThroughputInMibps, &out.ProvisionedThroughputInMibps + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThroughputMode != nil { + in, out := &in.ThroughputMode, &out.ThroughputMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystemInitParameters. +func (in *FileSystemInitParameters) DeepCopy() *FileSystemInitParameters { + if in == nil { + return nil + } + out := new(FileSystemInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystemList) DeepCopyInto(out *FileSystemList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FileSystem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystemList. +func (in *FileSystemList) DeepCopy() *FileSystemList { + if in == nil { + return nil + } + out := new(FileSystemList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FileSystemList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystemObservation) DeepCopyInto(out *FileSystemObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AvailabilityZoneID != nil { + in, out := &in.AvailabilityZoneID, &out.AvailabilityZoneID + *out = new(string) + **out = **in + } + if in.AvailabilityZoneName != nil { + in, out := &in.AvailabilityZoneName, &out.AvailabilityZoneName + *out = new(string) + **out = **in + } + if in.CreationToken != nil { + in, out := &in.CreationToken, &out.CreationToken + *out = new(string) + **out = **in + } + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.LifecyclePolicy != nil { + in, out := &in.LifecyclePolicy, &out.LifecyclePolicy + *out = make([]LifecyclePolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NumberOfMountTargets != nil { + in, out := &in.NumberOfMountTargets, &out.NumberOfMountTargets + *out = new(float64) + **out = **in + } + if in.OwnerID != nil { + in, out := &in.OwnerID, &out.OwnerID + *out = new(string) + **out = **in + } + if in.PerformanceMode != nil { + in, out := &in.PerformanceMode, &out.PerformanceMode + *out = new(string) + **out = **in + } + if in.Protection != nil { + in, out := &in.Protection, &out.Protection + *out = new(ProtectionObservation) + (*in).DeepCopyInto(*out) + } + if in.ProvisionedThroughputInMibps != nil { + in, out := &in.ProvisionedThroughputInMibps, &out.ProvisionedThroughputInMibps + *out = new(float64) + **out = **in + } + if in.SizeInBytes != nil { + in, out := &in.SizeInBytes, &out.SizeInBytes + *out = make([]SizeInBytesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThroughputMode != nil { + in, out := &in.ThroughputMode, &out.ThroughputMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystemObservation. +func (in *FileSystemObservation) DeepCopy() *FileSystemObservation { + if in == nil { + return nil + } + out := new(FileSystemObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystemParameters) DeepCopyInto(out *FileSystemParameters) { + *out = *in + if in.AvailabilityZoneName != nil { + in, out := &in.AvailabilityZoneName, &out.AvailabilityZoneName + *out = new(string) + **out = **in + } + if in.CreationToken != nil { + in, out := &in.CreationToken, &out.CreationToken + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LifecyclePolicy != nil { + in, out := &in.LifecyclePolicy, &out.LifecyclePolicy + *out = make([]LifecyclePolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PerformanceMode != nil { + in, out := &in.PerformanceMode, &out.PerformanceMode + *out = new(string) + **out = **in + } + if in.Protection != nil { + in, out := &in.Protection, &out.Protection + *out = new(ProtectionParameters) + (*in).DeepCopyInto(*out) + } + if in.ProvisionedThroughputInMibps != nil { + in, out := &in.ProvisionedThroughputInMibps, &out.ProvisionedThroughputInMibps + *out = new(float64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThroughputMode != nil { + in, out := &in.ThroughputMode, &out.ThroughputMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystemParameters. +func (in *FileSystemParameters) DeepCopy() *FileSystemParameters { + if in == nil { + return nil + } + out := new(FileSystemParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystemSpec) DeepCopyInto(out *FileSystemSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystemSpec. +func (in *FileSystemSpec) DeepCopy() *FileSystemSpec { + if in == nil { + return nil + } + out := new(FileSystemSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystemStatus) DeepCopyInto(out *FileSystemStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystemStatus. +func (in *FileSystemStatus) DeepCopy() *FileSystemStatus { + if in == nil { + return nil + } + out := new(FileSystemStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecyclePolicyInitParameters) DeepCopyInto(out *LifecyclePolicyInitParameters) { + *out = *in + if in.TransitionToArchive != nil { + in, out := &in.TransitionToArchive, &out.TransitionToArchive + *out = new(string) + **out = **in + } + if in.TransitionToIa != nil { + in, out := &in.TransitionToIa, &out.TransitionToIa + *out = new(string) + **out = **in + } + if in.TransitionToPrimaryStorageClass != nil { + in, out := &in.TransitionToPrimaryStorageClass, &out.TransitionToPrimaryStorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecyclePolicyInitParameters. +func (in *LifecyclePolicyInitParameters) DeepCopy() *LifecyclePolicyInitParameters { + if in == nil { + return nil + } + out := new(LifecyclePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecyclePolicyObservation) DeepCopyInto(out *LifecyclePolicyObservation) { + *out = *in + if in.TransitionToArchive != nil { + in, out := &in.TransitionToArchive, &out.TransitionToArchive + *out = new(string) + **out = **in + } + if in.TransitionToIa != nil { + in, out := &in.TransitionToIa, &out.TransitionToIa + *out = new(string) + **out = **in + } + if in.TransitionToPrimaryStorageClass != nil { + in, out := &in.TransitionToPrimaryStorageClass, &out.TransitionToPrimaryStorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecyclePolicyObservation. +func (in *LifecyclePolicyObservation) DeepCopy() *LifecyclePolicyObservation { + if in == nil { + return nil + } + out := new(LifecyclePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecyclePolicyParameters) DeepCopyInto(out *LifecyclePolicyParameters) { + *out = *in + if in.TransitionToArchive != nil { + in, out := &in.TransitionToArchive, &out.TransitionToArchive + *out = new(string) + **out = **in + } + if in.TransitionToIa != nil { + in, out := &in.TransitionToIa, &out.TransitionToIa + *out = new(string) + **out = **in + } + if in.TransitionToPrimaryStorageClass != nil { + in, out := &in.TransitionToPrimaryStorageClass, &out.TransitionToPrimaryStorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecyclePolicyParameters. +func (in *LifecyclePolicyParameters) DeepCopy() *LifecyclePolicyParameters { + if in == nil { + return nil + } + out := new(LifecyclePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PosixUserInitParameters) DeepCopyInto(out *PosixUserInitParameters) { + *out = *in + if in.GID != nil { + in, out := &in.GID, &out.GID + *out = new(float64) + **out = **in + } + if in.SecondaryGids != nil { + in, out := &in.SecondaryGids, &out.SecondaryGids + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PosixUserInitParameters. +func (in *PosixUserInitParameters) DeepCopy() *PosixUserInitParameters { + if in == nil { + return nil + } + out := new(PosixUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PosixUserObservation) DeepCopyInto(out *PosixUserObservation) { + *out = *in + if in.GID != nil { + in, out := &in.GID, &out.GID + *out = new(float64) + **out = **in + } + if in.SecondaryGids != nil { + in, out := &in.SecondaryGids, &out.SecondaryGids + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PosixUserObservation. +func (in *PosixUserObservation) DeepCopy() *PosixUserObservation { + if in == nil { + return nil + } + out := new(PosixUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PosixUserParameters) DeepCopyInto(out *PosixUserParameters) { + *out = *in + if in.GID != nil { + in, out := &in.GID, &out.GID + *out = new(float64) + **out = **in + } + if in.SecondaryGids != nil { + in, out := &in.SecondaryGids, &out.SecondaryGids + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PosixUserParameters. +func (in *PosixUserParameters) DeepCopy() *PosixUserParameters { + if in == nil { + return nil + } + out := new(PosixUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionInitParameters) DeepCopyInto(out *ProtectionInitParameters) { + *out = *in + if in.ReplicationOverwrite != nil { + in, out := &in.ReplicationOverwrite, &out.ReplicationOverwrite + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionInitParameters. +func (in *ProtectionInitParameters) DeepCopy() *ProtectionInitParameters { + if in == nil { + return nil + } + out := new(ProtectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionObservation) DeepCopyInto(out *ProtectionObservation) { + *out = *in + if in.ReplicationOverwrite != nil { + in, out := &in.ReplicationOverwrite, &out.ReplicationOverwrite + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionObservation. +func (in *ProtectionObservation) DeepCopy() *ProtectionObservation { + if in == nil { + return nil + } + out := new(ProtectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionParameters) DeepCopyInto(out *ProtectionParameters) { + *out = *in + if in.ReplicationOverwrite != nil { + in, out := &in.ReplicationOverwrite, &out.ReplicationOverwrite + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionParameters. +func (in *ProtectionParameters) DeepCopy() *ProtectionParameters { + if in == nil { + return nil + } + out := new(ProtectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfiguration) DeepCopyInto(out *ReplicationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfiguration. +func (in *ReplicationConfiguration) DeepCopy() *ReplicationConfiguration { + if in == nil { + return nil + } + out := new(ReplicationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationInitParameters) DeepCopyInto(out *ReplicationConfigurationInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(DestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SourceFileSystemID != nil { + in, out := &in.SourceFileSystemID, &out.SourceFileSystemID + *out = new(string) + **out = **in + } + if in.SourceFileSystemIDRef != nil { + in, out := &in.SourceFileSystemIDRef, &out.SourceFileSystemIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceFileSystemIDSelector != nil { + in, out := &in.SourceFileSystemIDSelector, &out.SourceFileSystemIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationInitParameters. +func (in *ReplicationConfigurationInitParameters) DeepCopy() *ReplicationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ReplicationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationList) DeepCopyInto(out *ReplicationConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationList. +func (in *ReplicationConfigurationList) DeepCopy() *ReplicationConfigurationList { + if in == nil { + return nil + } + out := new(ReplicationConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationObservation) DeepCopyInto(out *ReplicationConfigurationObservation) { + *out = *in + if in.CreationTime != nil { + in, out := &in.CreationTime, &out.CreationTime + *out = new(string) + **out = **in + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(DestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.OriginalSourceFileSystemArn != nil { + in, out := &in.OriginalSourceFileSystemArn, &out.OriginalSourceFileSystemArn + *out = new(string) + **out = **in + } + if in.SourceFileSystemArn != nil { + in, out := &in.SourceFileSystemArn, &out.SourceFileSystemArn + *out = new(string) + **out = **in + } + if in.SourceFileSystemID != nil { + in, out := &in.SourceFileSystemID, &out.SourceFileSystemID + *out = new(string) + **out = **in + } + if in.SourceFileSystemRegion != nil { + in, out := &in.SourceFileSystemRegion, &out.SourceFileSystemRegion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationObservation. +func (in *ReplicationConfigurationObservation) DeepCopy() *ReplicationConfigurationObservation { + if in == nil { + return nil + } + out := new(ReplicationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationParameters) DeepCopyInto(out *ReplicationConfigurationParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(DestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SourceFileSystemID != nil { + in, out := &in.SourceFileSystemID, &out.SourceFileSystemID + *out = new(string) + **out = **in + } + if in.SourceFileSystemIDRef != nil { + in, out := &in.SourceFileSystemIDRef, &out.SourceFileSystemIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceFileSystemIDSelector != nil { + in, out := &in.SourceFileSystemIDSelector, &out.SourceFileSystemIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationParameters. +func (in *ReplicationConfigurationParameters) DeepCopy() *ReplicationConfigurationParameters { + if in == nil { + return nil + } + out := new(ReplicationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationSpec) DeepCopyInto(out *ReplicationConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationSpec. +func (in *ReplicationConfigurationSpec) DeepCopy() *ReplicationConfigurationSpec { + if in == nil { + return nil + } + out := new(ReplicationConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationStatus) DeepCopyInto(out *ReplicationConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationStatus. +func (in *ReplicationConfigurationStatus) DeepCopy() *ReplicationConfigurationStatus { + if in == nil { + return nil + } + out := new(ReplicationConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootDirectoryInitParameters) DeepCopyInto(out *RootDirectoryInitParameters) { + *out = *in + if in.CreationInfo != nil { + in, out := &in.CreationInfo, &out.CreationInfo + *out = new(CreationInfoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootDirectoryInitParameters. +func (in *RootDirectoryInitParameters) DeepCopy() *RootDirectoryInitParameters { + if in == nil { + return nil + } + out := new(RootDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootDirectoryObservation) DeepCopyInto(out *RootDirectoryObservation) { + *out = *in + if in.CreationInfo != nil { + in, out := &in.CreationInfo, &out.CreationInfo + *out = new(CreationInfoObservation) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootDirectoryObservation. +func (in *RootDirectoryObservation) DeepCopy() *RootDirectoryObservation { + if in == nil { + return nil + } + out := new(RootDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootDirectoryParameters) DeepCopyInto(out *RootDirectoryParameters) { + *out = *in + if in.CreationInfo != nil { + in, out := &in.CreationInfo, &out.CreationInfo + *out = new(CreationInfoParameters) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootDirectoryParameters. +func (in *RootDirectoryParameters) DeepCopy() *RootDirectoryParameters { + if in == nil { + return nil + } + out := new(RootDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeInBytesInitParameters) DeepCopyInto(out *SizeInBytesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeInBytesInitParameters. +func (in *SizeInBytesInitParameters) DeepCopy() *SizeInBytesInitParameters { + if in == nil { + return nil + } + out := new(SizeInBytesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeInBytesObservation) DeepCopyInto(out *SizeInBytesObservation) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } + if in.ValueInIa != nil { + in, out := &in.ValueInIa, &out.ValueInIa + *out = new(float64) + **out = **in + } + if in.ValueInStandard != nil { + in, out := &in.ValueInStandard, &out.ValueInStandard + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeInBytesObservation. +func (in *SizeInBytesObservation) DeepCopy() *SizeInBytesObservation { + if in == nil { + return nil + } + out := new(SizeInBytesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeInBytesParameters) DeepCopyInto(out *SizeInBytesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeInBytesParameters. +func (in *SizeInBytesParameters) DeepCopy() *SizeInBytesParameters { + if in == nil { + return nil + } + out := new(SizeInBytesParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/efs/v1beta2/zz_generated.managed.go b/apis/efs/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..008bea78ff --- /dev/null +++ b/apis/efs/v1beta2/zz_generated.managed.go @@ -0,0 +1,248 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this AccessPoint. +func (mg *AccessPoint) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this AccessPoint. +func (mg *AccessPoint) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this AccessPoint. +func (mg *AccessPoint) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this AccessPoint. +func (mg *AccessPoint) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this AccessPoint. +func (mg *AccessPoint) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this AccessPoint. +func (mg *AccessPoint) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this AccessPoint. +func (mg *AccessPoint) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this AccessPoint. +func (mg *AccessPoint) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this AccessPoint. +func (mg *AccessPoint) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this AccessPoint. +func (mg *AccessPoint) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this AccessPoint. +func (mg *AccessPoint) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this AccessPoint. +func (mg *AccessPoint) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BackupPolicy. +func (mg *BackupPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BackupPolicy. +func (mg *BackupPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BackupPolicy. +func (mg *BackupPolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BackupPolicy. +func (mg *BackupPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BackupPolicy. +func (mg *BackupPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BackupPolicy. +func (mg *BackupPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BackupPolicy. +func (mg *BackupPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BackupPolicy. +func (mg *BackupPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BackupPolicy. +func (mg *BackupPolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BackupPolicy. +func (mg *BackupPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BackupPolicy. +func (mg *BackupPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BackupPolicy. +func (mg *BackupPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FileSystem. +func (mg *FileSystem) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FileSystem. +func (mg *FileSystem) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FileSystem. +func (mg *FileSystem) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FileSystem. +func (mg *FileSystem) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FileSystem. +func (mg *FileSystem) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FileSystem. +func (mg *FileSystem) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FileSystem. +func (mg *FileSystem) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FileSystem. +func (mg *FileSystem) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FileSystem. +func (mg *FileSystem) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FileSystem. +func (mg *FileSystem) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FileSystem. +func (mg *FileSystem) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FileSystem. +func (mg *FileSystem) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/efs/v1beta2/zz_generated.managedlist.go b/apis/efs/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..31beb2ed65 --- /dev/null +++ b/apis/efs/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AccessPointList. +func (l *AccessPointList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BackupPolicyList. +func (l *BackupPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FileSystemList. +func (l *FileSystemList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ReplicationConfigurationList. +func (l *ReplicationConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/efs/v1beta2/zz_generated.resolvers.go b/apis/efs/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..36e13c5a91 --- /dev/null +++ b/apis/efs/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,219 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *AccessPoint) ResolveReferences( // ResolveReferences of this AccessPoint. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("efs.aws.upbound.io", "v1beta2", "FileSystem", "FileSystemList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FileSystemID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FileSystemIDRef, + Selector: mg.Spec.ForProvider.FileSystemIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FileSystemID") + } + mg.Spec.ForProvider.FileSystemID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FileSystemIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("efs.aws.upbound.io", "v1beta2", "FileSystem", "FileSystemList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FileSystemID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FileSystemIDRef, + Selector: mg.Spec.InitProvider.FileSystemIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FileSystemID") + } + mg.Spec.InitProvider.FileSystemID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FileSystemIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this BackupPolicy. +func (mg *BackupPolicy) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("efs.aws.upbound.io", "v1beta2", "FileSystem", "FileSystemList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FileSystemID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FileSystemIDRef, + Selector: mg.Spec.ForProvider.FileSystemIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FileSystemID") + } + mg.Spec.ForProvider.FileSystemID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FileSystemIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("efs.aws.upbound.io", "v1beta2", "FileSystem", "FileSystemList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FileSystemID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FileSystemIDRef, + Selector: mg.Spec.InitProvider.FileSystemIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FileSystemID") + } + mg.Spec.InitProvider.FileSystemID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FileSystemIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this FileSystem. +func (mg *FileSystem) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyID), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyID") + } + mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyID), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyID") + } + mg.Spec.InitProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ReplicationConfiguration. +func (mg *ReplicationConfiguration) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("efs.aws.upbound.io", "v1beta2", "FileSystem", "FileSystemList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SourceFileSystemID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SourceFileSystemIDRef, + Selector: mg.Spec.ForProvider.SourceFileSystemIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SourceFileSystemID") + } + mg.Spec.ForProvider.SourceFileSystemID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SourceFileSystemIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("efs.aws.upbound.io", "v1beta2", "FileSystem", "FileSystemList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SourceFileSystemID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SourceFileSystemIDRef, + Selector: mg.Spec.InitProvider.SourceFileSystemIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SourceFileSystemID") + } + mg.Spec.InitProvider.SourceFileSystemID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SourceFileSystemIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/efs/v1beta2/zz_groupversion_info.go b/apis/efs/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..ec7af8e2bf --- /dev/null +++ b/apis/efs/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=efs.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "efs.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/efs/v1beta2/zz_replicationconfiguration_terraformed.go b/apis/efs/v1beta2/zz_replicationconfiguration_terraformed.go new file mode 100755 index 0000000000..d3785a6175 --- /dev/null +++ b/apis/efs/v1beta2/zz_replicationconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ReplicationConfiguration +func (mg *ReplicationConfiguration) GetTerraformResourceType() string { + return "aws_efs_replication_configuration" +} + +// GetConnectionDetailsMapping for this ReplicationConfiguration +func (tr *ReplicationConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ReplicationConfiguration +func (tr *ReplicationConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ReplicationConfiguration +func (tr *ReplicationConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ReplicationConfiguration +func (tr *ReplicationConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ReplicationConfiguration +func (tr *ReplicationConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ReplicationConfiguration +func (tr *ReplicationConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ReplicationConfiguration +func (tr *ReplicationConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ReplicationConfiguration +func (tr *ReplicationConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ReplicationConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ReplicationConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &ReplicationConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ReplicationConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/efs/v1beta2/zz_replicationconfiguration_types.go b/apis/efs/v1beta2/zz_replicationconfiguration_types.go new file mode 100755 index 0000000000..1b5b3b5009 --- /dev/null +++ b/apis/efs/v1beta2/zz_replicationconfiguration_types.go @@ -0,0 +1,193 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DestinationInitParameters struct { + + // The availability zone in which the replica should be created. If specified, the replica will be created with One Zone storage. If omitted, regional storage will be used. + AvailabilityZoneName *string `json:"availabilityZoneName,omitempty" tf:"availability_zone_name,omitempty"` + + // The ID of the destination file system for the replication. If no ID is provided, then EFS creates a new file system with the default settings. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The Key ID, ARN, alias, or alias ARN of the KMS key that should be used to encrypt the replica file system. If omitted, the default KMS key for EFS /aws/elasticfilesystem will be used. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type DestinationObservation struct { + + // The availability zone in which the replica should be created. If specified, the replica will be created with One Zone storage. If omitted, regional storage will be used. + AvailabilityZoneName *string `json:"availabilityZoneName,omitempty" tf:"availability_zone_name,omitempty"` + + // The ID of the destination file system for the replication. If no ID is provided, then EFS creates a new file system with the default settings. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The Key ID, ARN, alias, or alias ARN of the KMS key that should be used to encrypt the replica file system. If omitted, the default KMS key for EFS /aws/elasticfilesystem will be used. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The region in which the replica should be created. + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // The status of the replication. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type DestinationParameters struct { + + // The availability zone in which the replica should be created. If specified, the replica will be created with One Zone storage. If omitted, regional storage will be used. + // +kubebuilder:validation:Optional + AvailabilityZoneName *string `json:"availabilityZoneName,omitempty" tf:"availability_zone_name,omitempty"` + + // The ID of the destination file system for the replication. If no ID is provided, then EFS creates a new file system with the default settings. + // +kubebuilder:validation:Optional + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The Key ID, ARN, alias, or alias ARN of the KMS key that should be used to encrypt the replica file system. If omitted, the default KMS key for EFS /aws/elasticfilesystem will be used. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The region in which the replica should be created. + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +type ReplicationConfigurationInitParameters struct { + + // A destination configuration block (documented below). + Destination *DestinationInitParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + // The ID of the file system that is to be replicated. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/efs/v1beta2.FileSystem + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SourceFileSystemID *string `json:"sourceFileSystemId,omitempty" tf:"source_file_system_id,omitempty"` + + // Reference to a FileSystem in efs to populate sourceFileSystemId. + // +kubebuilder:validation:Optional + SourceFileSystemIDRef *v1.Reference `json:"sourceFileSystemIdRef,omitempty" tf:"-"` + + // Selector for a FileSystem in efs to populate sourceFileSystemId. + // +kubebuilder:validation:Optional + SourceFileSystemIDSelector *v1.Selector `json:"sourceFileSystemIdSelector,omitempty" tf:"-"` +} + +type ReplicationConfigurationObservation struct { + + // When the replication configuration was created. + CreationTime *string `json:"creationTime,omitempty" tf:"creation_time,omitempty"` + + // A destination configuration block (documented below). + Destination *DestinationObservation `json:"destination,omitempty" tf:"destination,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Amazon Resource Name (ARN) of the original source Amazon EFS file system in the replication configuration. + OriginalSourceFileSystemArn *string `json:"originalSourceFileSystemArn,omitempty" tf:"original_source_file_system_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the current source file system in the replication configuration. + SourceFileSystemArn *string `json:"sourceFileSystemArn,omitempty" tf:"source_file_system_arn,omitempty"` + + // The ID of the file system that is to be replicated. + SourceFileSystemID *string `json:"sourceFileSystemId,omitempty" tf:"source_file_system_id,omitempty"` + + // The AWS Region in which the source Amazon EFS file system is located. + SourceFileSystemRegion *string `json:"sourceFileSystemRegion,omitempty" tf:"source_file_system_region,omitempty"` +} + +type ReplicationConfigurationParameters struct { + + // A destination configuration block (documented below). + // +kubebuilder:validation:Optional + Destination *DestinationParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + // The region in which the replica should be created. + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The ID of the file system that is to be replicated. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/efs/v1beta2.FileSystem + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SourceFileSystemID *string `json:"sourceFileSystemId,omitempty" tf:"source_file_system_id,omitempty"` + + // Reference to a FileSystem in efs to populate sourceFileSystemId. + // +kubebuilder:validation:Optional + SourceFileSystemIDRef *v1.Reference `json:"sourceFileSystemIdRef,omitempty" tf:"-"` + + // Selector for a FileSystem in efs to populate sourceFileSystemId. + // +kubebuilder:validation:Optional + SourceFileSystemIDSelector *v1.Selector `json:"sourceFileSystemIdSelector,omitempty" tf:"-"` +} + +// ReplicationConfigurationSpec defines the desired state of ReplicationConfiguration +type ReplicationConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ReplicationConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ReplicationConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// ReplicationConfigurationStatus defines the observed state of ReplicationConfiguration. +type ReplicationConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ReplicationConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ReplicationConfiguration is the Schema for the ReplicationConfigurations API. Provides an Elastic File System (EFS) Replication Configuration. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ReplicationConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.destination) || (has(self.initProvider) && has(self.initProvider.destination))",message="spec.forProvider.destination is a required parameter" + Spec ReplicationConfigurationSpec `json:"spec"` + Status ReplicationConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ReplicationConfigurationList contains a list of ReplicationConfigurations +type ReplicationConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ReplicationConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + ReplicationConfiguration_Kind = "ReplicationConfiguration" + ReplicationConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ReplicationConfiguration_Kind}.String() + ReplicationConfiguration_KindAPIVersion = ReplicationConfiguration_Kind + "." + CRDGroupVersion.String() + ReplicationConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(ReplicationConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&ReplicationConfiguration{}, &ReplicationConfigurationList{}) +} diff --git a/apis/eks/v1beta1/zz_addon_types.go b/apis/eks/v1beta1/zz_addon_types.go index 8e1b6335b0..8e7a743a44 100755 --- a/apis/eks/v1beta1/zz_addon_types.go +++ b/apis/eks/v1beta1/zz_addon_types.go @@ -24,7 +24,7 @@ type AddonInitParameters struct { AddonVersion *string `json:"addonVersion,omitempty" tf:"addon_version,omitempty"` // – Name of the EKS Cluster. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/eks/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/eks/v1beta2.Cluster ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` // Reference to a Cluster in eks to populate clusterName. @@ -143,7 +143,7 @@ type AddonParameters struct { AddonVersion *string `json:"addonVersion,omitempty" tf:"addon_version,omitempty"` // – Name of the EKS Cluster. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/eks/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/eks/v1beta2.Cluster // +kubebuilder:validation:Optional ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` diff --git a/apis/eks/v1beta1/zz_fargateprofile_types.go b/apis/eks/v1beta1/zz_fargateprofile_types.go index d6489866b9..4498b1eb83 100755 --- a/apis/eks/v1beta1/zz_fargateprofile_types.go +++ b/apis/eks/v1beta1/zz_fargateprofile_types.go @@ -16,7 +16,7 @@ import ( type FargateProfileInitParameters struct { // – Name of the EKS Cluster. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/eks/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/eks/v1beta2.Cluster ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` // Reference to a Cluster in eks to populate clusterName. @@ -99,7 +99,7 @@ type FargateProfileObservation struct { type FargateProfileParameters struct { // – Name of the EKS Cluster. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/eks/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/eks/v1beta2.Cluster // +kubebuilder:validation:Optional ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` diff --git a/apis/eks/v1beta1/zz_generated.conversion_hubs.go b/apis/eks/v1beta1/zz_generated.conversion_hubs.go index 579ddaf389..d039942038 100755 --- a/apis/eks/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/eks/v1beta1/zz_generated.conversion_hubs.go @@ -9,17 +9,8 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *Addon) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Cluster) Hub() {} - // Hub marks this type as a conversion hub. func (tr *FargateProfile) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *IdentityProviderConfig) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *NodeGroup) Hub() {} - // Hub marks this type as a conversion hub. func (tr *PodIdentityAssociation) Hub() {} diff --git a/apis/eks/v1beta1/zz_generated.conversion_spokes.go b/apis/eks/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..194ed90ead --- /dev/null +++ b/apis/eks/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Cluster to the hub type. +func (tr *Cluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Cluster type. +func (tr *Cluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this IdentityProviderConfig to the hub type. +func (tr *IdentityProviderConfig) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the IdentityProviderConfig type. +func (tr *IdentityProviderConfig) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this NodeGroup to the hub type. +func (tr *NodeGroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the NodeGroup type. +func (tr *NodeGroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/eks/v1beta1/zz_generated.resolvers.go b/apis/eks/v1beta1/zz_generated.resolvers.go index 72dc8b70e3..913afc5ee2 100644 --- a/apis/eks/v1beta1/zz_generated.resolvers.go +++ b/apis/eks/v1beta1/zz_generated.resolvers.go @@ -28,7 +28,7 @@ func (mg *Addon) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("eks.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("eks.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -66,7 +66,7 @@ func (mg *Addon) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.ForProvider.ServiceAccountRoleArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ServiceAccountRoleArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("eks.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("eks.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -284,7 +284,7 @@ func (mg *FargateProfile) ResolveReferences(ctx context.Context, c client.Reader var mrsp reference.MultiResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("eks.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("eks.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -341,7 +341,7 @@ func (mg *FargateProfile) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.ForProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) mg.Spec.ForProvider.SubnetIDRefs = mrsp.ResolvedReferences { - m, l, err = apisresolver.GetManagedResource("eks.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("eks.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -648,7 +648,7 @@ func (mg *PodIdentityAssociation) ResolveReferences(ctx context.Context, c clien var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("eks.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("eks.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -686,7 +686,7 @@ func (mg *PodIdentityAssociation) ResolveReferences(ctx context.Context, c clien mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("eks.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("eks.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/eks/v1beta1/zz_podidentityassociation_types.go b/apis/eks/v1beta1/zz_podidentityassociation_types.go index f4baeb905e..20bce945ae 100755 --- a/apis/eks/v1beta1/zz_podidentityassociation_types.go +++ b/apis/eks/v1beta1/zz_podidentityassociation_types.go @@ -16,7 +16,7 @@ import ( type PodIdentityAssociationInitParameters struct { // The name of the cluster to create the association in. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/eks/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/eks/v1beta2.Cluster ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` // Reference to a Cluster in eks to populate clusterName. @@ -85,7 +85,7 @@ type PodIdentityAssociationObservation struct { type PodIdentityAssociationParameters struct { // The name of the cluster to create the association in. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/eks/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/eks/v1beta2.Cluster // +kubebuilder:validation:Optional ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` diff --git a/apis/eks/v1beta2/zz_cluster_terraformed.go b/apis/eks/v1beta2/zz_cluster_terraformed.go new file mode 100755 index 0000000000..d9ddaf1fee --- /dev/null +++ b/apis/eks/v1beta2/zz_cluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Cluster +func (mg *Cluster) GetTerraformResourceType() string { + return "aws_eks_cluster" +} + +// GetConnectionDetailsMapping for this Cluster +func (tr *Cluster) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Cluster +func (tr *Cluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Cluster +func (tr *Cluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Cluster +func (tr *Cluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Cluster +func (tr *Cluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Cluster +func (tr *Cluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Cluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Cluster) LateInitialize(attrs []byte) (bool, error) { + params := &ClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Cluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/eks/v1beta2/zz_cluster_types.go b/apis/eks/v1beta2/zz_cluster_types.go new file mode 100755 index 0000000000..7cf24b6c1e --- /dev/null +++ b/apis/eks/v1beta2/zz_cluster_types.go @@ -0,0 +1,566 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessConfigInitParameters struct { + + // The authentication mode for the cluster. Valid values are CONFIG_MAP, API or API_AND_CONFIG_MAP + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // Whether or not to bootstrap the access config values to the cluster. Default is true. + BootstrapClusterCreatorAdminPermissions *bool `json:"bootstrapClusterCreatorAdminPermissions,omitempty" tf:"bootstrap_cluster_creator_admin_permissions,omitempty"` +} + +type AccessConfigObservation struct { + + // The authentication mode for the cluster. Valid values are CONFIG_MAP, API or API_AND_CONFIG_MAP + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // Whether or not to bootstrap the access config values to the cluster. Default is true. + BootstrapClusterCreatorAdminPermissions *bool `json:"bootstrapClusterCreatorAdminPermissions,omitempty" tf:"bootstrap_cluster_creator_admin_permissions,omitempty"` +} + +type AccessConfigParameters struct { + + // The authentication mode for the cluster. Valid values are CONFIG_MAP, API or API_AND_CONFIG_MAP + // +kubebuilder:validation:Optional + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // Whether or not to bootstrap the access config values to the cluster. Default is true. + // +kubebuilder:validation:Optional + BootstrapClusterCreatorAdminPermissions *bool `json:"bootstrapClusterCreatorAdminPermissions,omitempty" tf:"bootstrap_cluster_creator_admin_permissions,omitempty"` +} + +type CertificateAuthorityInitParameters struct { +} + +type CertificateAuthorityObservation struct { + + // Base64 encoded certificate data required to communicate with your cluster. Add this to the certificate-authority-data section of the kubeconfig file for your cluster. + Data *string `json:"data,omitempty" tf:"data,omitempty"` +} + +type CertificateAuthorityParameters struct { +} + +type ClusterInitParameters struct { + + // Configuration block for the access config associated with your cluster, see Amazon EKS Access Entries. + AccessConfig *AccessConfigInitParameters `json:"accessConfig,omitempty" tf:"access_config,omitempty"` + + // List of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging. + // +listType=set + EnabledClusterLogTypes []*string `json:"enabledClusterLogTypes,omitempty" tf:"enabled_cluster_log_types,omitempty"` + + // Configuration block with encryption configuration for the cluster. Only available on Kubernetes 1.13 and above clusters created after March 6, 2020. Detailed below. + EncryptionConfig *EncryptionConfigInitParameters `json:"encryptionConfig,omitempty" tf:"encryption_config,omitempty"` + + // Configuration block with kubernetes network configuration for the cluster. Detailed below. + KubernetesNetworkConfig *KubernetesNetworkConfigInitParameters `json:"kubernetesNetworkConfig,omitempty" tf:"kubernetes_network_config,omitempty"` + + // Configuration block representing the configuration of your local Amazon EKS cluster on an AWS Outpost. This block isn't available for creating Amazon EKS clusters on the AWS cloud. + OutpostConfig *OutpostConfigInitParameters `json:"outpostConfig,omitempty" tf:"outpost_config,omitempty"` + + // ARN of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. Ensure the resource configuration includes explicit dependencies on the IAM Role permissions by adding depends_on if using the aws_iam_role_policy resource or aws_iam_role_policy_attachment resource, otherwise EKS cannot delete EKS managed EC2 infrastructure such as Security Groups on EKS Cluster deletion. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for the VPC associated with your cluster. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see Cluster VPC Considerations and Cluster Security Group Considerations in the Amazon EKS User Guide. Detailed below. Also contains attributes detailed in the Attributes section. + VPCConfig *VPCConfigInitParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` + + // – Desired Kubernetes master version. If you do not specify a value, the latest available version at resource creation is used and no upgrades will occur except those automatically triggered by EKS. The value must be configured and increased to upgrade the version when desired. Downgrades are not supported by EKS. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ClusterObservation struct { + + // Configuration block for the access config associated with your cluster, see Amazon EKS Access Entries. + AccessConfig *AccessConfigObservation `json:"accessConfig,omitempty" tf:"access_config,omitempty"` + + // ARN of the cluster. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Attribute block containing certificate-authority-data for your cluster. Detailed below. + CertificateAuthority []CertificateAuthorityObservation `json:"certificateAuthority,omitempty" tf:"certificate_authority,omitempty"` + + // The ID of your local Amazon EKS cluster on the AWS Outpost. This attribute isn't available for an AWS EKS cluster on AWS cloud. + ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` + + // Unix epoch timestamp in seconds for when the cluster was created. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // List of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging. + // +listType=set + EnabledClusterLogTypes []*string `json:"enabledClusterLogTypes,omitempty" tf:"enabled_cluster_log_types,omitempty"` + + // Configuration block with encryption configuration for the cluster. Only available on Kubernetes 1.13 and above clusters created after March 6, 2020. Detailed below. + EncryptionConfig *EncryptionConfigObservation `json:"encryptionConfig,omitempty" tf:"encryption_config,omitempty"` + + // Endpoint for your Kubernetes API server. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // Name of the cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Attribute block containing identity provider information for your cluster. Only available on Kubernetes version 1.13 and 1.14 clusters created or upgraded on or after September 3, 2019. Detailed below. + Identity []IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Configuration block with kubernetes network configuration for the cluster. Detailed below. + KubernetesNetworkConfig *KubernetesNetworkConfigObservation `json:"kubernetesNetworkConfig,omitempty" tf:"kubernetes_network_config,omitempty"` + + // Configuration block representing the configuration of your local Amazon EKS cluster on an AWS Outpost. This block isn't available for creating Amazon EKS clusters on the AWS cloud. + OutpostConfig *OutpostConfigObservation `json:"outpostConfig,omitempty" tf:"outpost_config,omitempty"` + + // Platform version for the cluster. + PlatformVersion *string `json:"platformVersion,omitempty" tf:"platform_version,omitempty"` + + // ARN of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. Ensure the resource configuration includes explicit dependencies on the IAM Role permissions by adding depends_on if using the aws_iam_role_policy resource or aws_iam_role_policy_attachment resource, otherwise EKS cannot delete EKS managed EC2 infrastructure such as Security Groups on EKS Cluster deletion. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Status of the EKS cluster. One of CREATING, ACTIVE, DELETING, FAILED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Configuration block for the VPC associated with your cluster. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see Cluster VPC Considerations and Cluster Security Group Considerations in the Amazon EKS User Guide. Detailed below. Also contains attributes detailed in the Attributes section. + VPCConfig *VPCConfigObservation `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` + + // – Desired Kubernetes master version. If you do not specify a value, the latest available version at resource creation is used and no upgrades will occur except those automatically triggered by EKS. The value must be configured and increased to upgrade the version when desired. Downgrades are not supported by EKS. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ClusterParameters struct { + + // Configuration block for the access config associated with your cluster, see Amazon EKS Access Entries. + // +kubebuilder:validation:Optional + AccessConfig *AccessConfigParameters `json:"accessConfig,omitempty" tf:"access_config,omitempty"` + + // List of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging. + // +kubebuilder:validation:Optional + // +listType=set + EnabledClusterLogTypes []*string `json:"enabledClusterLogTypes,omitempty" tf:"enabled_cluster_log_types,omitempty"` + + // Configuration block with encryption configuration for the cluster. Only available on Kubernetes 1.13 and above clusters created after March 6, 2020. Detailed below. + // +kubebuilder:validation:Optional + EncryptionConfig *EncryptionConfigParameters `json:"encryptionConfig,omitempty" tf:"encryption_config,omitempty"` + + // Configuration block with kubernetes network configuration for the cluster. Detailed below. + // +kubebuilder:validation:Optional + KubernetesNetworkConfig *KubernetesNetworkConfigParameters `json:"kubernetesNetworkConfig,omitempty" tf:"kubernetes_network_config,omitempty"` + + // Configuration block representing the configuration of your local Amazon EKS cluster on an AWS Outpost. This block isn't available for creating Amazon EKS clusters on the AWS cloud. + // +kubebuilder:validation:Optional + OutpostConfig *OutpostConfigParameters `json:"outpostConfig,omitempty" tf:"outpost_config,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // ARN of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. Ensure the resource configuration includes explicit dependencies on the IAM Role permissions by adding depends_on if using the aws_iam_role_policy resource or aws_iam_role_policy_attachment resource, otherwise EKS cannot delete EKS managed EC2 infrastructure such as Security Groups on EKS Cluster deletion. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for the VPC associated with your cluster. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see Cluster VPC Considerations and Cluster Security Group Considerations in the Amazon EKS User Guide. Detailed below. Also contains attributes detailed in the Attributes section. + // +kubebuilder:validation:Optional + VPCConfig *VPCConfigParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` + + // – Desired Kubernetes master version. If you do not specify a value, the latest available version at resource creation is used and no upgrades will occur except those automatically triggered by EKS. The value must be configured and increased to upgrade the version when desired. Downgrades are not supported by EKS. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ControlPlanePlacementInitParameters struct { + + // The name of the placement group for the Kubernetes control plane instances. This setting can't be changed after cluster creation. + GroupName *string `json:"groupName,omitempty" tf:"group_name,omitempty"` +} + +type ControlPlanePlacementObservation struct { + + // The name of the placement group for the Kubernetes control plane instances. This setting can't be changed after cluster creation. + GroupName *string `json:"groupName,omitempty" tf:"group_name,omitempty"` +} + +type ControlPlanePlacementParameters struct { + + // The name of the placement group for the Kubernetes control plane instances. This setting can't be changed after cluster creation. + // +kubebuilder:validation:Optional + GroupName *string `json:"groupName" tf:"group_name,omitempty"` +} + +type EncryptionConfigInitParameters struct { + + // Configuration block with provider for encryption. Detailed below. + Provider *ProviderInitParameters `json:"provider,omitempty" tf:"provider,omitempty"` + + // List of strings with resources to be encrypted. Valid values: secrets. + // +listType=set + Resources []*string `json:"resources,omitempty" tf:"resources,omitempty"` +} + +type EncryptionConfigObservation struct { + + // Configuration block with provider for encryption. Detailed below. + Provider *ProviderObservation `json:"provider,omitempty" tf:"provider,omitempty"` + + // List of strings with resources to be encrypted. Valid values: secrets. + // +listType=set + Resources []*string `json:"resources,omitempty" tf:"resources,omitempty"` +} + +type EncryptionConfigParameters struct { + + // Configuration block with provider for encryption. Detailed below. + // +kubebuilder:validation:Optional + Provider *ProviderParameters `json:"provider" tf:"provider,omitempty"` + + // List of strings with resources to be encrypted. Valid values: secrets. + // +kubebuilder:validation:Optional + // +listType=set + Resources []*string `json:"resources" tf:"resources,omitempty"` +} + +type IdentityInitParameters struct { +} + +type IdentityObservation struct { + + // Nested block containing OpenID Connect identity provider information for the cluster. Detailed below. + Oidc []OidcObservation `json:"oidc,omitempty" tf:"oidc,omitempty"` +} + +type IdentityParameters struct { +} + +type KubernetesNetworkConfigInitParameters struct { + + // The IP family used to assign Kubernetes pod and service addresses. Valid values are ipv4 (default) and ipv6. You can only specify an IP family when you create a cluster, changing this value will force a new cluster to be created. + IPFamily *string `json:"ipFamily,omitempty" tf:"ip_family,omitempty"` + + // The CIDR block to assign Kubernetes pod and service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. We recommend that you specify a block that does not overlap with resources in other networks that are peered or connected to your VPC. You can only specify a custom CIDR block when you create a cluster, changing this value will force a new cluster to be created. The block must meet the following requirements: + ServiceIPv4Cidr *string `json:"serviceIpv4Cidr,omitempty" tf:"service_ipv4_cidr,omitempty"` +} + +type KubernetesNetworkConfigObservation struct { + + // The IP family used to assign Kubernetes pod and service addresses. Valid values are ipv4 (default) and ipv6. You can only specify an IP family when you create a cluster, changing this value will force a new cluster to be created. + IPFamily *string `json:"ipFamily,omitempty" tf:"ip_family,omitempty"` + + // The CIDR block to assign Kubernetes pod and service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. We recommend that you specify a block that does not overlap with resources in other networks that are peered or connected to your VPC. You can only specify a custom CIDR block when you create a cluster, changing this value will force a new cluster to be created. The block must meet the following requirements: + ServiceIPv4Cidr *string `json:"serviceIpv4Cidr,omitempty" tf:"service_ipv4_cidr,omitempty"` + + // The CIDR block that Kubernetes pod and service IP addresses are assigned from if you specified ipv6 for ipFamily when you created the cluster. Kubernetes assigns service addresses from the unique local address range (fc00::/7) because you can't specify a custom IPv6 CIDR block when you create the cluster. + ServiceIPv6Cidr *string `json:"serviceIpv6Cidr,omitempty" tf:"service_ipv6_cidr,omitempty"` +} + +type KubernetesNetworkConfigParameters struct { + + // The IP family used to assign Kubernetes pod and service addresses. Valid values are ipv4 (default) and ipv6. You can only specify an IP family when you create a cluster, changing this value will force a new cluster to be created. + // +kubebuilder:validation:Optional + IPFamily *string `json:"ipFamily,omitempty" tf:"ip_family,omitempty"` + + // The CIDR block to assign Kubernetes pod and service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. We recommend that you specify a block that does not overlap with resources in other networks that are peered or connected to your VPC. You can only specify a custom CIDR block when you create a cluster, changing this value will force a new cluster to be created. The block must meet the following requirements: + // +kubebuilder:validation:Optional + ServiceIPv4Cidr *string `json:"serviceIpv4Cidr,omitempty" tf:"service_ipv4_cidr,omitempty"` +} + +type OidcInitParameters struct { +} + +type OidcObservation struct { + + // Issuer URL for the OpenID Connect identity provider. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` +} + +type OidcParameters struct { +} + +type OutpostConfigInitParameters struct { + + // The Amazon EC2 instance type that you want to use for your local Amazon EKS cluster on Outposts. The instance type that you specify is used for all Kubernetes control plane instances. The instance type can't be changed after cluster creation. Choose an instance type based on the number of nodes that your cluster will have. If your cluster will have: + ControlPlaneInstanceType *string `json:"controlPlaneInstanceType,omitempty" tf:"control_plane_instance_type,omitempty"` + + // An object representing the placement configuration for all the control plane instances of your local Amazon EKS cluster on AWS Outpost. + // The control_plane_placement configuration block supports the following arguments: + ControlPlanePlacement *ControlPlanePlacementInitParameters `json:"controlPlanePlacement,omitempty" tf:"control_plane_placement,omitempty"` + + // The ARN of the Outpost that you want to use for your local Amazon EKS cluster on Outposts. This argument is a list of arns, but only a single Outpost ARN is supported currently. + // +listType=set + OutpostArns []*string `json:"outpostArns,omitempty" tf:"outpost_arns,omitempty"` +} + +type OutpostConfigObservation struct { + + // The Amazon EC2 instance type that you want to use for your local Amazon EKS cluster on Outposts. The instance type that you specify is used for all Kubernetes control plane instances. The instance type can't be changed after cluster creation. Choose an instance type based on the number of nodes that your cluster will have. If your cluster will have: + ControlPlaneInstanceType *string `json:"controlPlaneInstanceType,omitempty" tf:"control_plane_instance_type,omitempty"` + + // An object representing the placement configuration for all the control plane instances of your local Amazon EKS cluster on AWS Outpost. + // The control_plane_placement configuration block supports the following arguments: + ControlPlanePlacement *ControlPlanePlacementObservation `json:"controlPlanePlacement,omitempty" tf:"control_plane_placement,omitempty"` + + // The ARN of the Outpost that you want to use for your local Amazon EKS cluster on Outposts. This argument is a list of arns, but only a single Outpost ARN is supported currently. + // +listType=set + OutpostArns []*string `json:"outpostArns,omitempty" tf:"outpost_arns,omitempty"` +} + +type OutpostConfigParameters struct { + + // The Amazon EC2 instance type that you want to use for your local Amazon EKS cluster on Outposts. The instance type that you specify is used for all Kubernetes control plane instances. The instance type can't be changed after cluster creation. Choose an instance type based on the number of nodes that your cluster will have. If your cluster will have: + // +kubebuilder:validation:Optional + ControlPlaneInstanceType *string `json:"controlPlaneInstanceType" tf:"control_plane_instance_type,omitempty"` + + // An object representing the placement configuration for all the control plane instances of your local Amazon EKS cluster on AWS Outpost. + // The control_plane_placement configuration block supports the following arguments: + // +kubebuilder:validation:Optional + ControlPlanePlacement *ControlPlanePlacementParameters `json:"controlPlanePlacement,omitempty" tf:"control_plane_placement,omitempty"` + + // The ARN of the Outpost that you want to use for your local Amazon EKS cluster on Outposts. This argument is a list of arns, but only a single Outpost ARN is supported currently. + // +kubebuilder:validation:Optional + // +listType=set + OutpostArns []*string `json:"outpostArns" tf:"outpost_arns,omitempty"` +} + +type ProviderInitParameters struct { + + // ARN of the Key Management Service (KMS) customer master key (CMK). The CMK must be symmetric, created in the same region as the cluster, and if the CMK was created in a different account, the user must have access to the CMK. For more information, see Allowing Users in Other Accounts to Use a CMK in the AWS Key Management Service Developer Guide. + KeyArn *string `json:"keyArn,omitempty" tf:"key_arn,omitempty"` +} + +type ProviderObservation struct { + + // ARN of the Key Management Service (KMS) customer master key (CMK). The CMK must be symmetric, created in the same region as the cluster, and if the CMK was created in a different account, the user must have access to the CMK. For more information, see Allowing Users in Other Accounts to Use a CMK in the AWS Key Management Service Developer Guide. + KeyArn *string `json:"keyArn,omitempty" tf:"key_arn,omitempty"` +} + +type ProviderParameters struct { + + // ARN of the Key Management Service (KMS) customer master key (CMK). The CMK must be symmetric, created in the same region as the cluster, and if the CMK was created in a different account, the user must have access to the CMK. For more information, see Allowing Users in Other Accounts to Use a CMK in the AWS Key Management Service Developer Guide. + // +kubebuilder:validation:Optional + KeyArn *string `json:"keyArn" tf:"key_arn,omitempty"` +} + +type VPCConfigInitParameters struct { + + // Whether the Amazon EKS private API server endpoint is enabled. Default is false. + EndpointPrivateAccess *bool `json:"endpointPrivateAccess,omitempty" tf:"endpoint_private_access,omitempty"` + + // Whether the Amazon EKS public API server endpoint is enabled. Default is true. + EndpointPublicAccess *bool `json:"endpointPublicAccess,omitempty" tf:"endpoint_public_access,omitempty"` + + // List of CIDR blocks. Indicates which CIDR blocks can access the Amazon EKS public API server endpoint when enabled. EKS defaults this to a list with 0.0.0.0/0. + // +listType=set + PublicAccessCidrs []*string `json:"publicAccessCidrs,omitempty" tf:"public_access_cidrs,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // account elastic network interfaces in these subnets to allow communication between your worker nodes and the Kubernetes control plane. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type VPCConfigObservation struct { + + // Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. + ClusterSecurityGroupID *string `json:"clusterSecurityGroupId,omitempty" tf:"cluster_security_group_id,omitempty"` + + // Whether the Amazon EKS private API server endpoint is enabled. Default is false. + EndpointPrivateAccess *bool `json:"endpointPrivateAccess,omitempty" tf:"endpoint_private_access,omitempty"` + + // Whether the Amazon EKS public API server endpoint is enabled. Default is true. + EndpointPublicAccess *bool `json:"endpointPublicAccess,omitempty" tf:"endpoint_public_access,omitempty"` + + // List of CIDR blocks. Indicates which CIDR blocks can access the Amazon EKS public API server endpoint when enabled. EKS defaults this to a list with 0.0.0.0/0. + // +listType=set + PublicAccessCidrs []*string `json:"publicAccessCidrs,omitempty" tf:"public_access_cidrs,omitempty"` + + // account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // account elastic network interfaces in these subnets to allow communication between your worker nodes and the Kubernetes control plane. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // ID of the VPC associated with your cluster. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type VPCConfigParameters struct { + + // Whether the Amazon EKS private API server endpoint is enabled. Default is false. + // +kubebuilder:validation:Optional + EndpointPrivateAccess *bool `json:"endpointPrivateAccess,omitempty" tf:"endpoint_private_access,omitempty"` + + // Whether the Amazon EKS public API server endpoint is enabled. Default is true. + // +kubebuilder:validation:Optional + EndpointPublicAccess *bool `json:"endpointPublicAccess,omitempty" tf:"endpoint_public_access,omitempty"` + + // List of CIDR blocks. Indicates which CIDR blocks can access the Amazon EKS public API server endpoint when enabled. EKS defaults this to a list with 0.0.0.0/0. + // +kubebuilder:validation:Optional + // +listType=set + PublicAccessCidrs []*string `json:"publicAccessCidrs,omitempty" tf:"public_access_cidrs,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // account elastic network interfaces in these subnets to allow communication between your worker nodes and the Kubernetes control plane. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +// ClusterSpec defines the desired state of Cluster +type ClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ClusterInitParameters `json:"initProvider,omitempty"` +} + +// ClusterStatus defines the observed state of Cluster. +type ClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Cluster is the Schema for the Clusters API. Manages an EKS Cluster +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.vpcConfig) || (has(self.initProvider) && has(self.initProvider.vpcConfig))",message="spec.forProvider.vpcConfig is a required parameter" + Spec ClusterSpec `json:"spec"` + Status ClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterList contains a list of Clusters +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +// Repository type metadata. +var ( + Cluster_Kind = "Cluster" + Cluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Cluster_Kind}.String() + Cluster_KindAPIVersion = Cluster_Kind + "." + CRDGroupVersion.String() + Cluster_GroupVersionKind = CRDGroupVersion.WithKind(Cluster_Kind) +) + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/apis/eks/v1beta2/zz_generated.conversion_hubs.go b/apis/eks/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..07686d82a3 --- /dev/null +++ b/apis/eks/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Cluster) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *IdentityProviderConfig) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *NodeGroup) Hub() {} diff --git a/apis/eks/v1beta2/zz_generated.deepcopy.go b/apis/eks/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..b7d452b9c4 --- /dev/null +++ b/apis/eks/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2928 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessConfigInitParameters) DeepCopyInto(out *AccessConfigInitParameters) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.BootstrapClusterCreatorAdminPermissions != nil { + in, out := &in.BootstrapClusterCreatorAdminPermissions, &out.BootstrapClusterCreatorAdminPermissions + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessConfigInitParameters. +func (in *AccessConfigInitParameters) DeepCopy() *AccessConfigInitParameters { + if in == nil { + return nil + } + out := new(AccessConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessConfigObservation) DeepCopyInto(out *AccessConfigObservation) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.BootstrapClusterCreatorAdminPermissions != nil { + in, out := &in.BootstrapClusterCreatorAdminPermissions, &out.BootstrapClusterCreatorAdminPermissions + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessConfigObservation. +func (in *AccessConfigObservation) DeepCopy() *AccessConfigObservation { + if in == nil { + return nil + } + out := new(AccessConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessConfigParameters) DeepCopyInto(out *AccessConfigParameters) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.BootstrapClusterCreatorAdminPermissions != nil { + in, out := &in.BootstrapClusterCreatorAdminPermissions, &out.BootstrapClusterCreatorAdminPermissions + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessConfigParameters. +func (in *AccessConfigParameters) DeepCopy() *AccessConfigParameters { + if in == nil { + return nil + } + out := new(AccessConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingGroupsInitParameters) DeepCopyInto(out *AutoscalingGroupsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingGroupsInitParameters. +func (in *AutoscalingGroupsInitParameters) DeepCopy() *AutoscalingGroupsInitParameters { + if in == nil { + return nil + } + out := new(AutoscalingGroupsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingGroupsObservation) DeepCopyInto(out *AutoscalingGroupsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingGroupsObservation. +func (in *AutoscalingGroupsObservation) DeepCopy() *AutoscalingGroupsObservation { + if in == nil { + return nil + } + out := new(AutoscalingGroupsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingGroupsParameters) DeepCopyInto(out *AutoscalingGroupsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingGroupsParameters. +func (in *AutoscalingGroupsParameters) DeepCopy() *AutoscalingGroupsParameters { + if in == nil { + return nil + } + out := new(AutoscalingGroupsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAuthorityInitParameters) DeepCopyInto(out *CertificateAuthorityInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAuthorityInitParameters. +func (in *CertificateAuthorityInitParameters) DeepCopy() *CertificateAuthorityInitParameters { + if in == nil { + return nil + } + out := new(CertificateAuthorityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAuthorityObservation) DeepCopyInto(out *CertificateAuthorityObservation) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAuthorityObservation. +func (in *CertificateAuthorityObservation) DeepCopy() *CertificateAuthorityObservation { + if in == nil { + return nil + } + out := new(CertificateAuthorityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAuthorityParameters) DeepCopyInto(out *CertificateAuthorityParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAuthorityParameters. +func (in *CertificateAuthorityParameters) DeepCopy() *CertificateAuthorityParameters { + if in == nil { + return nil + } + out := new(CertificateAuthorityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { + *out = *in + if in.AccessConfig != nil { + in, out := &in.AccessConfig, &out.AccessConfig + *out = new(AccessConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EnabledClusterLogTypes != nil { + in, out := &in.EnabledClusterLogTypes, &out.EnabledClusterLogTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EncryptionConfig != nil { + in, out := &in.EncryptionConfig, &out.EncryptionConfig + *out = new(EncryptionConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KubernetesNetworkConfig != nil { + in, out := &in.KubernetesNetworkConfig, &out.KubernetesNetworkConfig + *out = new(KubernetesNetworkConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OutpostConfig != nil { + in, out := &in.OutpostConfig, &out.OutpostConfig + *out = new(OutpostConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInitParameters. +func (in *ClusterInitParameters) DeepCopy() *ClusterInitParameters { + if in == nil { + return nil + } + out := new(ClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { + *out = *in + if in.AccessConfig != nil { + in, out := &in.AccessConfig, &out.AccessConfig + *out = new(AccessConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CertificateAuthority != nil { + in, out := &in.CertificateAuthority, &out.CertificateAuthority + *out = make([]CertificateAuthorityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterID != nil { + in, out := &in.ClusterID, &out.ClusterID + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.EnabledClusterLogTypes != nil { + in, out := &in.EnabledClusterLogTypes, &out.EnabledClusterLogTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EncryptionConfig != nil { + in, out := &in.EncryptionConfig, &out.EncryptionConfig + *out = new(EncryptionConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = make([]IdentityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KubernetesNetworkConfig != nil { + in, out := &in.KubernetesNetworkConfig, &out.KubernetesNetworkConfig + *out = new(KubernetesNetworkConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.OutpostConfig != nil { + in, out := &in.OutpostConfig, &out.OutpostConfig + *out = new(OutpostConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.PlatformVersion != nil { + in, out := &in.PlatformVersion, &out.PlatformVersion + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservation. +func (in *ClusterObservation) DeepCopy() *ClusterObservation { + if in == nil { + return nil + } + out := new(ClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { + *out = *in + if in.AccessConfig != nil { + in, out := &in.AccessConfig, &out.AccessConfig + *out = new(AccessConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.EnabledClusterLogTypes != nil { + in, out := &in.EnabledClusterLogTypes, &out.EnabledClusterLogTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EncryptionConfig != nil { + in, out := &in.EncryptionConfig, &out.EncryptionConfig + *out = new(EncryptionConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.KubernetesNetworkConfig != nil { + in, out := &in.KubernetesNetworkConfig, &out.KubernetesNetworkConfig + *out = new(KubernetesNetworkConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.OutpostConfig != nil { + in, out := &in.OutpostConfig, &out.OutpostConfig + *out = new(OutpostConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. +func (in *ClusterParameters) DeepCopy() *ClusterParameters { + if in == nil { + return nil + } + out := new(ClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlanePlacementInitParameters) DeepCopyInto(out *ControlPlanePlacementInitParameters) { + *out = *in + if in.GroupName != nil { + in, out := &in.GroupName, &out.GroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlanePlacementInitParameters. +func (in *ControlPlanePlacementInitParameters) DeepCopy() *ControlPlanePlacementInitParameters { + if in == nil { + return nil + } + out := new(ControlPlanePlacementInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlanePlacementObservation) DeepCopyInto(out *ControlPlanePlacementObservation) { + *out = *in + if in.GroupName != nil { + in, out := &in.GroupName, &out.GroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlanePlacementObservation. +func (in *ControlPlanePlacementObservation) DeepCopy() *ControlPlanePlacementObservation { + if in == nil { + return nil + } + out := new(ControlPlanePlacementObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlanePlacementParameters) DeepCopyInto(out *ControlPlanePlacementParameters) { + *out = *in + if in.GroupName != nil { + in, out := &in.GroupName, &out.GroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlanePlacementParameters. +func (in *ControlPlanePlacementParameters) DeepCopy() *ControlPlanePlacementParameters { + if in == nil { + return nil + } + out := new(ControlPlanePlacementParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigInitParameters) DeepCopyInto(out *EncryptionConfigInitParameters) { + *out = *in + if in.Provider != nil { + in, out := &in.Provider, &out.Provider + *out = new(ProviderInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigInitParameters. +func (in *EncryptionConfigInitParameters) DeepCopy() *EncryptionConfigInitParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigObservation) DeepCopyInto(out *EncryptionConfigObservation) { + *out = *in + if in.Provider != nil { + in, out := &in.Provider, &out.Provider + *out = new(ProviderObservation) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigObservation. +func (in *EncryptionConfigObservation) DeepCopy() *EncryptionConfigObservation { + if in == nil { + return nil + } + out := new(EncryptionConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigParameters) DeepCopyInto(out *EncryptionConfigParameters) { + *out = *in + if in.Provider != nil { + in, out := &in.Provider, &out.Provider + *out = new(ProviderParameters) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigParameters. +func (in *EncryptionConfigParameters) DeepCopy() *EncryptionConfigParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.Oidc != nil { + in, out := &in.Oidc, &out.Oidc + *out = make([]OidcObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderConfig) DeepCopyInto(out *IdentityProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfig. +func (in *IdentityProviderConfig) DeepCopy() *IdentityProviderConfig { + if in == nil { + return nil + } + out := new(IdentityProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IdentityProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderConfigInitParameters) DeepCopyInto(out *IdentityProviderConfigInitParameters) { + *out = *in + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.ClusterNameRef != nil { + in, out := &in.ClusterNameRef, &out.ClusterNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterNameSelector != nil { + in, out := &in.ClusterNameSelector, &out.ClusterNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Oidc != nil { + in, out := &in.Oidc, &out.Oidc + *out = new(IdentityProviderConfigOidcInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfigInitParameters. +func (in *IdentityProviderConfigInitParameters) DeepCopy() *IdentityProviderConfigInitParameters { + if in == nil { + return nil + } + out := new(IdentityProviderConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderConfigList) DeepCopyInto(out *IdentityProviderConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IdentityProviderConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfigList. +func (in *IdentityProviderConfigList) DeepCopy() *IdentityProviderConfigList { + if in == nil { + return nil + } + out := new(IdentityProviderConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IdentityProviderConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderConfigObservation) DeepCopyInto(out *IdentityProviderConfigObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Oidc != nil { + in, out := &in.Oidc, &out.Oidc + *out = new(IdentityProviderConfigOidcObservation) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfigObservation. +func (in *IdentityProviderConfigObservation) DeepCopy() *IdentityProviderConfigObservation { + if in == nil { + return nil + } + out := new(IdentityProviderConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderConfigOidcInitParameters) DeepCopyInto(out *IdentityProviderConfigOidcInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.GroupsClaim != nil { + in, out := &in.GroupsClaim, &out.GroupsClaim + *out = new(string) + **out = **in + } + if in.GroupsPrefix != nil { + in, out := &in.GroupsPrefix, &out.GroupsPrefix + *out = new(string) + **out = **in + } + if in.IssuerURL != nil { + in, out := &in.IssuerURL, &out.IssuerURL + *out = new(string) + **out = **in + } + if in.RequiredClaims != nil { + in, out := &in.RequiredClaims, &out.RequiredClaims + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UsernameClaim != nil { + in, out := &in.UsernameClaim, &out.UsernameClaim + *out = new(string) + **out = **in + } + if in.UsernamePrefix != nil { + in, out := &in.UsernamePrefix, &out.UsernamePrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfigOidcInitParameters. +func (in *IdentityProviderConfigOidcInitParameters) DeepCopy() *IdentityProviderConfigOidcInitParameters { + if in == nil { + return nil + } + out := new(IdentityProviderConfigOidcInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderConfigOidcObservation) DeepCopyInto(out *IdentityProviderConfigOidcObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.GroupsClaim != nil { + in, out := &in.GroupsClaim, &out.GroupsClaim + *out = new(string) + **out = **in + } + if in.GroupsPrefix != nil { + in, out := &in.GroupsPrefix, &out.GroupsPrefix + *out = new(string) + **out = **in + } + if in.IssuerURL != nil { + in, out := &in.IssuerURL, &out.IssuerURL + *out = new(string) + **out = **in + } + if in.RequiredClaims != nil { + in, out := &in.RequiredClaims, &out.RequiredClaims + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UsernameClaim != nil { + in, out := &in.UsernameClaim, &out.UsernameClaim + *out = new(string) + **out = **in + } + if in.UsernamePrefix != nil { + in, out := &in.UsernamePrefix, &out.UsernamePrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfigOidcObservation. +func (in *IdentityProviderConfigOidcObservation) DeepCopy() *IdentityProviderConfigOidcObservation { + if in == nil { + return nil + } + out := new(IdentityProviderConfigOidcObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderConfigOidcParameters) DeepCopyInto(out *IdentityProviderConfigOidcParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.GroupsClaim != nil { + in, out := &in.GroupsClaim, &out.GroupsClaim + *out = new(string) + **out = **in + } + if in.GroupsPrefix != nil { + in, out := &in.GroupsPrefix, &out.GroupsPrefix + *out = new(string) + **out = **in + } + if in.IssuerURL != nil { + in, out := &in.IssuerURL, &out.IssuerURL + *out = new(string) + **out = **in + } + if in.RequiredClaims != nil { + in, out := &in.RequiredClaims, &out.RequiredClaims + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UsernameClaim != nil { + in, out := &in.UsernameClaim, &out.UsernameClaim + *out = new(string) + **out = **in + } + if in.UsernamePrefix != nil { + in, out := &in.UsernamePrefix, &out.UsernamePrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfigOidcParameters. +func (in *IdentityProviderConfigOidcParameters) DeepCopy() *IdentityProviderConfigOidcParameters { + if in == nil { + return nil + } + out := new(IdentityProviderConfigOidcParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderConfigParameters) DeepCopyInto(out *IdentityProviderConfigParameters) { + *out = *in + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.ClusterNameRef != nil { + in, out := &in.ClusterNameRef, &out.ClusterNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterNameSelector != nil { + in, out := &in.ClusterNameSelector, &out.ClusterNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Oidc != nil { + in, out := &in.Oidc, &out.Oidc + *out = new(IdentityProviderConfigOidcParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfigParameters. +func (in *IdentityProviderConfigParameters) DeepCopy() *IdentityProviderConfigParameters { + if in == nil { + return nil + } + out := new(IdentityProviderConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderConfigSpec) DeepCopyInto(out *IdentityProviderConfigSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfigSpec. +func (in *IdentityProviderConfigSpec) DeepCopy() *IdentityProviderConfigSpec { + if in == nil { + return nil + } + out := new(IdentityProviderConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderConfigStatus) DeepCopyInto(out *IdentityProviderConfigStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfigStatus. +func (in *IdentityProviderConfigStatus) DeepCopy() *IdentityProviderConfigStatus { + if in == nil { + return nil + } + out := new(IdentityProviderConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesNetworkConfigInitParameters) DeepCopyInto(out *KubernetesNetworkConfigInitParameters) { + *out = *in + if in.IPFamily != nil { + in, out := &in.IPFamily, &out.IPFamily + *out = new(string) + **out = **in + } + if in.ServiceIPv4Cidr != nil { + in, out := &in.ServiceIPv4Cidr, &out.ServiceIPv4Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesNetworkConfigInitParameters. +func (in *KubernetesNetworkConfigInitParameters) DeepCopy() *KubernetesNetworkConfigInitParameters { + if in == nil { + return nil + } + out := new(KubernetesNetworkConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesNetworkConfigObservation) DeepCopyInto(out *KubernetesNetworkConfigObservation) { + *out = *in + if in.IPFamily != nil { + in, out := &in.IPFamily, &out.IPFamily + *out = new(string) + **out = **in + } + if in.ServiceIPv4Cidr != nil { + in, out := &in.ServiceIPv4Cidr, &out.ServiceIPv4Cidr + *out = new(string) + **out = **in + } + if in.ServiceIPv6Cidr != nil { + in, out := &in.ServiceIPv6Cidr, &out.ServiceIPv6Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesNetworkConfigObservation. +func (in *KubernetesNetworkConfigObservation) DeepCopy() *KubernetesNetworkConfigObservation { + if in == nil { + return nil + } + out := new(KubernetesNetworkConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesNetworkConfigParameters) DeepCopyInto(out *KubernetesNetworkConfigParameters) { + *out = *in + if in.IPFamily != nil { + in, out := &in.IPFamily, &out.IPFamily + *out = new(string) + **out = **in + } + if in.ServiceIPv4Cidr != nil { + in, out := &in.ServiceIPv4Cidr, &out.ServiceIPv4Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesNetworkConfigParameters. +func (in *KubernetesNetworkConfigParameters) DeepCopy() *KubernetesNetworkConfigParameters { + if in == nil { + return nil + } + out := new(KubernetesNetworkConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateInitParameters) DeepCopyInto(out *LaunchTemplateInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateInitParameters. +func (in *LaunchTemplateInitParameters) DeepCopy() *LaunchTemplateInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateObservation) DeepCopyInto(out *LaunchTemplateObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateObservation. +func (in *LaunchTemplateObservation) DeepCopy() *LaunchTemplateObservation { + if in == nil { + return nil + } + out := new(LaunchTemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateParameters) DeepCopyInto(out *LaunchTemplateParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateParameters. +func (in *LaunchTemplateParameters) DeepCopy() *LaunchTemplateParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroup) DeepCopyInto(out *NodeGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroup. +func (in *NodeGroup) DeepCopy() *NodeGroup { + if in == nil { + return nil + } + out := new(NodeGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupInitParameters) DeepCopyInto(out *NodeGroupInitParameters) { + *out = *in + if in.AMIType != nil { + in, out := &in.AMIType, &out.AMIType + *out = new(string) + **out = **in + } + if in.CapacityType != nil { + in, out := &in.CapacityType, &out.CapacityType + *out = new(string) + **out = **in + } + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.ForceUpdateVersion != nil { + in, out := &in.ForceUpdateVersion, &out.ForceUpdateVersion + *out = new(bool) + **out = **in + } + if in.InstanceTypes != nil { + in, out := &in.InstanceTypes, &out.InstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(LaunchTemplateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NodeRoleArn != nil { + in, out := &in.NodeRoleArn, &out.NodeRoleArn + *out = new(string) + **out = **in + } + if in.NodeRoleArnRef != nil { + in, out := &in.NodeRoleArnRef, &out.NodeRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NodeRoleArnSelector != nil { + in, out := &in.NodeRoleArnSelector, &out.NodeRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReleaseVersion != nil { + in, out := &in.ReleaseVersion, &out.ReleaseVersion + *out = new(string) + **out = **in + } + if in.RemoteAccess != nil { + in, out := &in.RemoteAccess, &out.RemoteAccess + *out = new(RemoteAccessInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ScalingConfig != nil { + in, out := &in.ScalingConfig, &out.ScalingConfig + *out = new(ScalingConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Taint != nil { + in, out := &in.Taint, &out.Taint + *out = make([]TaintInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UpdateConfig != nil { + in, out := &in.UpdateConfig, &out.UpdateConfig + *out = new(UpdateConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.VersionRef != nil { + in, out := &in.VersionRef, &out.VersionRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VersionSelector != nil { + in, out := &in.VersionSelector, &out.VersionSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupInitParameters. +func (in *NodeGroupInitParameters) DeepCopy() *NodeGroupInitParameters { + if in == nil { + return nil + } + out := new(NodeGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupList) DeepCopyInto(out *NodeGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodeGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupList. +func (in *NodeGroupList) DeepCopy() *NodeGroupList { + if in == nil { + return nil + } + out := new(NodeGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupObservation) DeepCopyInto(out *NodeGroupObservation) { + *out = *in + if in.AMIType != nil { + in, out := &in.AMIType, &out.AMIType + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CapacityType != nil { + in, out := &in.CapacityType, &out.CapacityType + *out = new(string) + **out = **in + } + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.ForceUpdateVersion != nil { + in, out := &in.ForceUpdateVersion, &out.ForceUpdateVersion + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceTypes != nil { + in, out := &in.InstanceTypes, &out.InstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(LaunchTemplateObservation) + (*in).DeepCopyInto(*out) + } + if in.NodeRoleArn != nil { + in, out := &in.NodeRoleArn, &out.NodeRoleArn + *out = new(string) + **out = **in + } + if in.ReleaseVersion != nil { + in, out := &in.ReleaseVersion, &out.ReleaseVersion + *out = new(string) + **out = **in + } + if in.RemoteAccess != nil { + in, out := &in.RemoteAccess, &out.RemoteAccess + *out = new(RemoteAccessObservation) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScalingConfig != nil { + in, out := &in.ScalingConfig, &out.ScalingConfig + *out = new(ScalingConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Taint != nil { + in, out := &in.Taint, &out.Taint + *out = make([]TaintObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UpdateConfig != nil { + in, out := &in.UpdateConfig, &out.UpdateConfig + *out = new(UpdateConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupObservation. +func (in *NodeGroupObservation) DeepCopy() *NodeGroupObservation { + if in == nil { + return nil + } + out := new(NodeGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupParameters) DeepCopyInto(out *NodeGroupParameters) { + *out = *in + if in.AMIType != nil { + in, out := &in.AMIType, &out.AMIType + *out = new(string) + **out = **in + } + if in.CapacityType != nil { + in, out := &in.CapacityType, &out.CapacityType + *out = new(string) + **out = **in + } + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.ClusterNameRef != nil { + in, out := &in.ClusterNameRef, &out.ClusterNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterNameSelector != nil { + in, out := &in.ClusterNameSelector, &out.ClusterNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.ForceUpdateVersion != nil { + in, out := &in.ForceUpdateVersion, &out.ForceUpdateVersion + *out = new(bool) + **out = **in + } + if in.InstanceTypes != nil { + in, out := &in.InstanceTypes, &out.InstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(LaunchTemplateParameters) + (*in).DeepCopyInto(*out) + } + if in.NodeRoleArn != nil { + in, out := &in.NodeRoleArn, &out.NodeRoleArn + *out = new(string) + **out = **in + } + if in.NodeRoleArnRef != nil { + in, out := &in.NodeRoleArnRef, &out.NodeRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NodeRoleArnSelector != nil { + in, out := &in.NodeRoleArnSelector, &out.NodeRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ReleaseVersion != nil { + in, out := &in.ReleaseVersion, &out.ReleaseVersion + *out = new(string) + **out = **in + } + if in.RemoteAccess != nil { + in, out := &in.RemoteAccess, &out.RemoteAccess + *out = new(RemoteAccessParameters) + (*in).DeepCopyInto(*out) + } + if in.ScalingConfig != nil { + in, out := &in.ScalingConfig, &out.ScalingConfig + *out = new(ScalingConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Taint != nil { + in, out := &in.Taint, &out.Taint + *out = make([]TaintParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UpdateConfig != nil { + in, out := &in.UpdateConfig, &out.UpdateConfig + *out = new(UpdateConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.VersionRef != nil { + in, out := &in.VersionRef, &out.VersionRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VersionSelector != nil { + in, out := &in.VersionSelector, &out.VersionSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupParameters. +func (in *NodeGroupParameters) DeepCopy() *NodeGroupParameters { + if in == nil { + return nil + } + out := new(NodeGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupSpec) DeepCopyInto(out *NodeGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupSpec. +func (in *NodeGroupSpec) DeepCopy() *NodeGroupSpec { + if in == nil { + return nil + } + out := new(NodeGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupStatus) DeepCopyInto(out *NodeGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupStatus. +func (in *NodeGroupStatus) DeepCopy() *NodeGroupStatus { + if in == nil { + return nil + } + out := new(NodeGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OidcInitParameters) DeepCopyInto(out *OidcInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OidcInitParameters. +func (in *OidcInitParameters) DeepCopy() *OidcInitParameters { + if in == nil { + return nil + } + out := new(OidcInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OidcObservation) DeepCopyInto(out *OidcObservation) { + *out = *in + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OidcObservation. +func (in *OidcObservation) DeepCopy() *OidcObservation { + if in == nil { + return nil + } + out := new(OidcObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OidcParameters) DeepCopyInto(out *OidcParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OidcParameters. +func (in *OidcParameters) DeepCopy() *OidcParameters { + if in == nil { + return nil + } + out := new(OidcParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutpostConfigInitParameters) DeepCopyInto(out *OutpostConfigInitParameters) { + *out = *in + if in.ControlPlaneInstanceType != nil { + in, out := &in.ControlPlaneInstanceType, &out.ControlPlaneInstanceType + *out = new(string) + **out = **in + } + if in.ControlPlanePlacement != nil { + in, out := &in.ControlPlanePlacement, &out.ControlPlanePlacement + *out = new(ControlPlanePlacementInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OutpostArns != nil { + in, out := &in.OutpostArns, &out.OutpostArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutpostConfigInitParameters. +func (in *OutpostConfigInitParameters) DeepCopy() *OutpostConfigInitParameters { + if in == nil { + return nil + } + out := new(OutpostConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutpostConfigObservation) DeepCopyInto(out *OutpostConfigObservation) { + *out = *in + if in.ControlPlaneInstanceType != nil { + in, out := &in.ControlPlaneInstanceType, &out.ControlPlaneInstanceType + *out = new(string) + **out = **in + } + if in.ControlPlanePlacement != nil { + in, out := &in.ControlPlanePlacement, &out.ControlPlanePlacement + *out = new(ControlPlanePlacementObservation) + (*in).DeepCopyInto(*out) + } + if in.OutpostArns != nil { + in, out := &in.OutpostArns, &out.OutpostArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutpostConfigObservation. +func (in *OutpostConfigObservation) DeepCopy() *OutpostConfigObservation { + if in == nil { + return nil + } + out := new(OutpostConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutpostConfigParameters) DeepCopyInto(out *OutpostConfigParameters) { + *out = *in + if in.ControlPlaneInstanceType != nil { + in, out := &in.ControlPlaneInstanceType, &out.ControlPlaneInstanceType + *out = new(string) + **out = **in + } + if in.ControlPlanePlacement != nil { + in, out := &in.ControlPlanePlacement, &out.ControlPlanePlacement + *out = new(ControlPlanePlacementParameters) + (*in).DeepCopyInto(*out) + } + if in.OutpostArns != nil { + in, out := &in.OutpostArns, &out.OutpostArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutpostConfigParameters. +func (in *OutpostConfigParameters) DeepCopy() *OutpostConfigParameters { + if in == nil { + return nil + } + out := new(OutpostConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderInitParameters) DeepCopyInto(out *ProviderInitParameters) { + *out = *in + if in.KeyArn != nil { + in, out := &in.KeyArn, &out.KeyArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderInitParameters. +func (in *ProviderInitParameters) DeepCopy() *ProviderInitParameters { + if in == nil { + return nil + } + out := new(ProviderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderObservation) DeepCopyInto(out *ProviderObservation) { + *out = *in + if in.KeyArn != nil { + in, out := &in.KeyArn, &out.KeyArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderObservation. +func (in *ProviderObservation) DeepCopy() *ProviderObservation { + if in == nil { + return nil + } + out := new(ProviderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderParameters) DeepCopyInto(out *ProviderParameters) { + *out = *in + if in.KeyArn != nil { + in, out := &in.KeyArn, &out.KeyArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderParameters. +func (in *ProviderParameters) DeepCopy() *ProviderParameters { + if in == nil { + return nil + } + out := new(ProviderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteAccessInitParameters) DeepCopyInto(out *RemoteAccessInitParameters) { + *out = *in + if in.EC2SSHKey != nil { + in, out := &in.EC2SSHKey, &out.EC2SSHKey + *out = new(string) + **out = **in + } + if in.SourceSecurityGroupIDRefs != nil { + in, out := &in.SourceSecurityGroupIDRefs, &out.SourceSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceSecurityGroupIDSelector != nil { + in, out := &in.SourceSecurityGroupIDSelector, &out.SourceSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SourceSecurityGroupIds != nil { + in, out := &in.SourceSecurityGroupIds, &out.SourceSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteAccessInitParameters. +func (in *RemoteAccessInitParameters) DeepCopy() *RemoteAccessInitParameters { + if in == nil { + return nil + } + out := new(RemoteAccessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteAccessObservation) DeepCopyInto(out *RemoteAccessObservation) { + *out = *in + if in.EC2SSHKey != nil { + in, out := &in.EC2SSHKey, &out.EC2SSHKey + *out = new(string) + **out = **in + } + if in.SourceSecurityGroupIds != nil { + in, out := &in.SourceSecurityGroupIds, &out.SourceSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteAccessObservation. +func (in *RemoteAccessObservation) DeepCopy() *RemoteAccessObservation { + if in == nil { + return nil + } + out := new(RemoteAccessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteAccessParameters) DeepCopyInto(out *RemoteAccessParameters) { + *out = *in + if in.EC2SSHKey != nil { + in, out := &in.EC2SSHKey, &out.EC2SSHKey + *out = new(string) + **out = **in + } + if in.SourceSecurityGroupIDRefs != nil { + in, out := &in.SourceSecurityGroupIDRefs, &out.SourceSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceSecurityGroupIDSelector != nil { + in, out := &in.SourceSecurityGroupIDSelector, &out.SourceSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SourceSecurityGroupIds != nil { + in, out := &in.SourceSecurityGroupIds, &out.SourceSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteAccessParameters. +func (in *RemoteAccessParameters) DeepCopy() *RemoteAccessParameters { + if in == nil { + return nil + } + out := new(RemoteAccessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesInitParameters) DeepCopyInto(out *ResourcesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesInitParameters. +func (in *ResourcesInitParameters) DeepCopy() *ResourcesInitParameters { + if in == nil { + return nil + } + out := new(ResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesObservation) DeepCopyInto(out *ResourcesObservation) { + *out = *in + if in.AutoscalingGroups != nil { + in, out := &in.AutoscalingGroups, &out.AutoscalingGroups + *out = make([]AutoscalingGroupsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteAccessSecurityGroupID != nil { + in, out := &in.RemoteAccessSecurityGroupID, &out.RemoteAccessSecurityGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesObservation. +func (in *ResourcesObservation) DeepCopy() *ResourcesObservation { + if in == nil { + return nil + } + out := new(ResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesParameters) DeepCopyInto(out *ResourcesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesParameters. +func (in *ResourcesParameters) DeepCopy() *ResourcesParameters { + if in == nil { + return nil + } + out := new(ResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingConfigInitParameters) DeepCopyInto(out *ScalingConfigInitParameters) { + *out = *in + if in.DesiredSize != nil { + in, out := &in.DesiredSize, &out.DesiredSize + *out = new(float64) + **out = **in + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(float64) + **out = **in + } + if in.MinSize != nil { + in, out := &in.MinSize, &out.MinSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingConfigInitParameters. +func (in *ScalingConfigInitParameters) DeepCopy() *ScalingConfigInitParameters { + if in == nil { + return nil + } + out := new(ScalingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingConfigObservation) DeepCopyInto(out *ScalingConfigObservation) { + *out = *in + if in.DesiredSize != nil { + in, out := &in.DesiredSize, &out.DesiredSize + *out = new(float64) + **out = **in + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(float64) + **out = **in + } + if in.MinSize != nil { + in, out := &in.MinSize, &out.MinSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingConfigObservation. +func (in *ScalingConfigObservation) DeepCopy() *ScalingConfigObservation { + if in == nil { + return nil + } + out := new(ScalingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingConfigParameters) DeepCopyInto(out *ScalingConfigParameters) { + *out = *in + if in.DesiredSize != nil { + in, out := &in.DesiredSize, &out.DesiredSize + *out = new(float64) + **out = **in + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(float64) + **out = **in + } + if in.MinSize != nil { + in, out := &in.MinSize, &out.MinSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingConfigParameters. +func (in *ScalingConfigParameters) DeepCopy() *ScalingConfigParameters { + if in == nil { + return nil + } + out := new(ScalingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaintInitParameters) DeepCopyInto(out *TaintInitParameters) { + *out = *in + if in.Effect != nil { + in, out := &in.Effect, &out.Effect + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaintInitParameters. +func (in *TaintInitParameters) DeepCopy() *TaintInitParameters { + if in == nil { + return nil + } + out := new(TaintInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaintObservation) DeepCopyInto(out *TaintObservation) { + *out = *in + if in.Effect != nil { + in, out := &in.Effect, &out.Effect + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaintObservation. +func (in *TaintObservation) DeepCopy() *TaintObservation { + if in == nil { + return nil + } + out := new(TaintObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaintParameters) DeepCopyInto(out *TaintParameters) { + *out = *in + if in.Effect != nil { + in, out := &in.Effect, &out.Effect + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaintParameters. +func (in *TaintParameters) DeepCopy() *TaintParameters { + if in == nil { + return nil + } + out := new(TaintParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateConfigInitParameters) DeepCopyInto(out *UpdateConfigInitParameters) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(float64) + **out = **in + } + if in.MaxUnavailablePercentage != nil { + in, out := &in.MaxUnavailablePercentage, &out.MaxUnavailablePercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateConfigInitParameters. +func (in *UpdateConfigInitParameters) DeepCopy() *UpdateConfigInitParameters { + if in == nil { + return nil + } + out := new(UpdateConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateConfigObservation) DeepCopyInto(out *UpdateConfigObservation) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(float64) + **out = **in + } + if in.MaxUnavailablePercentage != nil { + in, out := &in.MaxUnavailablePercentage, &out.MaxUnavailablePercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateConfigObservation. +func (in *UpdateConfigObservation) DeepCopy() *UpdateConfigObservation { + if in == nil { + return nil + } + out := new(UpdateConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateConfigParameters) DeepCopyInto(out *UpdateConfigParameters) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(float64) + **out = **in + } + if in.MaxUnavailablePercentage != nil { + in, out := &in.MaxUnavailablePercentage, &out.MaxUnavailablePercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateConfigParameters. +func (in *UpdateConfigParameters) DeepCopy() *UpdateConfigParameters { + if in == nil { + return nil + } + out := new(UpdateConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigInitParameters) DeepCopyInto(out *VPCConfigInitParameters) { + *out = *in + if in.EndpointPrivateAccess != nil { + in, out := &in.EndpointPrivateAccess, &out.EndpointPrivateAccess + *out = new(bool) + **out = **in + } + if in.EndpointPublicAccess != nil { + in, out := &in.EndpointPublicAccess, &out.EndpointPublicAccess + *out = new(bool) + **out = **in + } + if in.PublicAccessCidrs != nil { + in, out := &in.PublicAccessCidrs, &out.PublicAccessCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigInitParameters. +func (in *VPCConfigInitParameters) DeepCopy() *VPCConfigInitParameters { + if in == nil { + return nil + } + out := new(VPCConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigObservation) DeepCopyInto(out *VPCConfigObservation) { + *out = *in + if in.ClusterSecurityGroupID != nil { + in, out := &in.ClusterSecurityGroupID, &out.ClusterSecurityGroupID + *out = new(string) + **out = **in + } + if in.EndpointPrivateAccess != nil { + in, out := &in.EndpointPrivateAccess, &out.EndpointPrivateAccess + *out = new(bool) + **out = **in + } + if in.EndpointPublicAccess != nil { + in, out := &in.EndpointPublicAccess, &out.EndpointPublicAccess + *out = new(bool) + **out = **in + } + if in.PublicAccessCidrs != nil { + in, out := &in.PublicAccessCidrs, &out.PublicAccessCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigObservation. +func (in *VPCConfigObservation) DeepCopy() *VPCConfigObservation { + if in == nil { + return nil + } + out := new(VPCConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigParameters) DeepCopyInto(out *VPCConfigParameters) { + *out = *in + if in.EndpointPrivateAccess != nil { + in, out := &in.EndpointPrivateAccess, &out.EndpointPrivateAccess + *out = new(bool) + **out = **in + } + if in.EndpointPublicAccess != nil { + in, out := &in.EndpointPublicAccess, &out.EndpointPublicAccess + *out = new(bool) + **out = **in + } + if in.PublicAccessCidrs != nil { + in, out := &in.PublicAccessCidrs, &out.PublicAccessCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigParameters. +func (in *VPCConfigParameters) DeepCopy() *VPCConfigParameters { + if in == nil { + return nil + } + out := new(VPCConfigParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/eks/v1beta2/zz_generated.managed.go b/apis/eks/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..6fe3c093ef --- /dev/null +++ b/apis/eks/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Cluster. +func (mg *Cluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Cluster. +func (mg *Cluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Cluster. +func (mg *Cluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Cluster. +func (mg *Cluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Cluster. +func (mg *Cluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Cluster. +func (mg *Cluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Cluster. +func (mg *Cluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Cluster. +func (mg *Cluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this IdentityProviderConfig. +func (mg *IdentityProviderConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this IdentityProviderConfig. +func (mg *IdentityProviderConfig) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this IdentityProviderConfig. +func (mg *IdentityProviderConfig) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this IdentityProviderConfig. +func (mg *IdentityProviderConfig) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this IdentityProviderConfig. +func (mg *IdentityProviderConfig) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this IdentityProviderConfig. +func (mg *IdentityProviderConfig) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this IdentityProviderConfig. +func (mg *IdentityProviderConfig) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this IdentityProviderConfig. +func (mg *IdentityProviderConfig) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this IdentityProviderConfig. +func (mg *IdentityProviderConfig) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this IdentityProviderConfig. +func (mg *IdentityProviderConfig) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this IdentityProviderConfig. +func (mg *IdentityProviderConfig) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this IdentityProviderConfig. +func (mg *IdentityProviderConfig) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this NodeGroup. +func (mg *NodeGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this NodeGroup. +func (mg *NodeGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this NodeGroup. +func (mg *NodeGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this NodeGroup. +func (mg *NodeGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this NodeGroup. +func (mg *NodeGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this NodeGroup. +func (mg *NodeGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this NodeGroup. +func (mg *NodeGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this NodeGroup. +func (mg *NodeGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this NodeGroup. +func (mg *NodeGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this NodeGroup. +func (mg *NodeGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this NodeGroup. +func (mg *NodeGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this NodeGroup. +func (mg *NodeGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/eks/v1beta2/zz_generated.managedlist.go b/apis/eks/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..dfde99e5d3 --- /dev/null +++ b/apis/eks/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ClusterList. +func (l *ClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this IdentityProviderConfigList. +func (l *IdentityProviderConfigList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this NodeGroupList. +func (l *NodeGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/eks/v1beta2/zz_generated.resolvers.go b/apis/eks/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..ca52f8e60a --- /dev/null +++ b/apis/eks/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,394 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Cluster. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Cluster) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCConfig.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCConfig.SecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.VPCConfig.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCConfig.SecurityGroupIds") + } + mg.Spec.ForProvider.VPCConfig.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCConfig.SecurityGroupIDRefs = mrsp.ResolvedReferences + + } + if mg.Spec.ForProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCConfig.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCConfig.SubnetIDRefs, + Selector: mg.Spec.ForProvider.VPCConfig.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCConfig.SubnetIds") + } + mg.Spec.ForProvider.VPCConfig.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCConfig.SubnetIDRefs = mrsp.ResolvedReferences + + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCConfig.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCConfig.SecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.VPCConfig.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCConfig.SecurityGroupIds") + } + mg.Spec.InitProvider.VPCConfig.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCConfig.SecurityGroupIDRefs = mrsp.ResolvedReferences + + } + if mg.Spec.InitProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCConfig.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCConfig.SubnetIDRefs, + Selector: mg.Spec.InitProvider.VPCConfig.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCConfig.SubnetIds") + } + mg.Spec.InitProvider.VPCConfig.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCConfig.SubnetIDRefs = mrsp.ResolvedReferences + + } + + return nil +} + +// ResolveReferences of this IdentityProviderConfig. +func (mg *IdentityProviderConfig) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("eks.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ClusterName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ClusterNameRef, + Selector: mg.Spec.ForProvider.ClusterNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ClusterName") + } + mg.Spec.ForProvider.ClusterName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("eks.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ClusterName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ClusterNameRef, + Selector: mg.Spec.InitProvider.ClusterNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ClusterName") + } + mg.Spec.InitProvider.ClusterName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ClusterNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this NodeGroup. +func (mg *NodeGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("eks.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ClusterName), + Extract: ExternalNameIfClusterActive(), + Reference: mg.Spec.ForProvider.ClusterNameRef, + Selector: mg.Spec.ForProvider.ClusterNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ClusterName") + } + mg.Spec.ForProvider.ClusterName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NodeRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.NodeRoleArnRef, + Selector: mg.Spec.ForProvider.NodeRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NodeRoleArn") + } + mg.Spec.ForProvider.NodeRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NodeRoleArnRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.RemoteAccess != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.RemoteAccess.SourceSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.RemoteAccess.SourceSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.RemoteAccess.SourceSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RemoteAccess.SourceSecurityGroupIds") + } + mg.Spec.ForProvider.RemoteAccess.SourceSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.RemoteAccess.SourceSecurityGroupIDRefs = mrsp.ResolvedReferences + + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SubnetIDRefs, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetIds") + } + mg.Spec.ForProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SubnetIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("eks.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Version), + Extract: resource.ExtractParamPath("version", false), + Reference: mg.Spec.ForProvider.VersionRef, + Selector: mg.Spec.ForProvider.VersionSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Version") + } + mg.Spec.ForProvider.Version = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VersionRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NodeRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.NodeRoleArnRef, + Selector: mg.Spec.InitProvider.NodeRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NodeRoleArn") + } + mg.Spec.InitProvider.NodeRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NodeRoleArnRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.RemoteAccess != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.RemoteAccess.SourceSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.RemoteAccess.SourceSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.RemoteAccess.SourceSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RemoteAccess.SourceSecurityGroupIds") + } + mg.Spec.InitProvider.RemoteAccess.SourceSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.RemoteAccess.SourceSecurityGroupIDRefs = mrsp.ResolvedReferences + + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SubnetIDRefs, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetIds") + } + mg.Spec.InitProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SubnetIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("eks.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Version), + Extract: resource.ExtractParamPath("version", false), + Reference: mg.Spec.InitProvider.VersionRef, + Selector: mg.Spec.InitProvider.VersionSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Version") + } + mg.Spec.InitProvider.Version = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VersionRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/eks/v1beta2/zz_groupversion_info.go b/apis/eks/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..a9aa5a7159 --- /dev/null +++ b/apis/eks/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=eks.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "eks.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/eks/v1beta2/zz_identityproviderconfig_terraformed.go b/apis/eks/v1beta2/zz_identityproviderconfig_terraformed.go new file mode 100755 index 0000000000..7280a77d95 --- /dev/null +++ b/apis/eks/v1beta2/zz_identityproviderconfig_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this IdentityProviderConfig +func (mg *IdentityProviderConfig) GetTerraformResourceType() string { + return "aws_eks_identity_provider_config" +} + +// GetConnectionDetailsMapping for this IdentityProviderConfig +func (tr *IdentityProviderConfig) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this IdentityProviderConfig +func (tr *IdentityProviderConfig) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this IdentityProviderConfig +func (tr *IdentityProviderConfig) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this IdentityProviderConfig +func (tr *IdentityProviderConfig) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this IdentityProviderConfig +func (tr *IdentityProviderConfig) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this IdentityProviderConfig +func (tr *IdentityProviderConfig) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this IdentityProviderConfig +func (tr *IdentityProviderConfig) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this IdentityProviderConfig +func (tr *IdentityProviderConfig) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this IdentityProviderConfig using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *IdentityProviderConfig) LateInitialize(attrs []byte) (bool, error) { + params := &IdentityProviderConfigParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *IdentityProviderConfig) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/eks/v1beta2/zz_identityproviderconfig_types.go b/apis/eks/v1beta2/zz_identityproviderconfig_types.go new file mode 100755 index 0000000000..4ada35102b --- /dev/null +++ b/apis/eks/v1beta2/zz_identityproviderconfig_types.go @@ -0,0 +1,235 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IdentityProviderConfigInitParameters struct { + + // – Name of the EKS Cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/eks/v1beta2.Cluster + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // Reference to a Cluster in eks to populate clusterName. + // +kubebuilder:validation:Optional + ClusterNameRef *v1.Reference `json:"clusterNameRef,omitempty" tf:"-"` + + // Selector for a Cluster in eks to populate clusterName. + // +kubebuilder:validation:Optional + ClusterNameSelector *v1.Selector `json:"clusterNameSelector,omitempty" tf:"-"` + + // Nested attribute containing OpenID Connect identity provider information for the cluster. Detailed below. + Oidc *IdentityProviderConfigOidcInitParameters `json:"oidc,omitempty" tf:"oidc,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IdentityProviderConfigObservation struct { + + // Amazon Resource Name (ARN) of the EKS Identity Provider Configuration. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // – Name of the EKS Cluster. + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // EKS Cluster name and EKS Identity Provider Configuration name separated by a colon (:). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Nested attribute containing OpenID Connect identity provider information for the cluster. Detailed below. + Oidc *IdentityProviderConfigOidcObservation `json:"oidc,omitempty" tf:"oidc,omitempty"` + + // Status of the EKS Identity Provider Configuration. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type IdentityProviderConfigOidcInitParameters struct { + + // – Client ID for the OpenID Connect identity provider. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The JWT claim that the provider will use to return groups. + GroupsClaim *string `json:"groupsClaim,omitempty" tf:"groups_claim,omitempty"` + + // A prefix that is prepended to group claims e.g., oidc:. + GroupsPrefix *string `json:"groupsPrefix,omitempty" tf:"groups_prefix,omitempty"` + + // Issuer URL for the OpenID Connect identity provider. + IssuerURL *string `json:"issuerUrl,omitempty" tf:"issuer_url,omitempty"` + + // The key value pairs that describe required claims in the identity token. + // +mapType=granular + RequiredClaims map[string]*string `json:"requiredClaims,omitempty" tf:"required_claims,omitempty"` + + // The JWT claim that the provider will use as the username. + UsernameClaim *string `json:"usernameClaim,omitempty" tf:"username_claim,omitempty"` + + // A prefix that is prepended to username claims. + UsernamePrefix *string `json:"usernamePrefix,omitempty" tf:"username_prefix,omitempty"` +} + +type IdentityProviderConfigOidcObservation struct { + + // – Client ID for the OpenID Connect identity provider. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The JWT claim that the provider will use to return groups. + GroupsClaim *string `json:"groupsClaim,omitempty" tf:"groups_claim,omitempty"` + + // A prefix that is prepended to group claims e.g., oidc:. + GroupsPrefix *string `json:"groupsPrefix,omitempty" tf:"groups_prefix,omitempty"` + + // Issuer URL for the OpenID Connect identity provider. + IssuerURL *string `json:"issuerUrl,omitempty" tf:"issuer_url,omitempty"` + + // The key value pairs that describe required claims in the identity token. + // +mapType=granular + RequiredClaims map[string]*string `json:"requiredClaims,omitempty" tf:"required_claims,omitempty"` + + // The JWT claim that the provider will use as the username. + UsernameClaim *string `json:"usernameClaim,omitempty" tf:"username_claim,omitempty"` + + // A prefix that is prepended to username claims. + UsernamePrefix *string `json:"usernamePrefix,omitempty" tf:"username_prefix,omitempty"` +} + +type IdentityProviderConfigOidcParameters struct { + + // – Client ID for the OpenID Connect identity provider. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The JWT claim that the provider will use to return groups. + // +kubebuilder:validation:Optional + GroupsClaim *string `json:"groupsClaim,omitempty" tf:"groups_claim,omitempty"` + + // A prefix that is prepended to group claims e.g., oidc:. + // +kubebuilder:validation:Optional + GroupsPrefix *string `json:"groupsPrefix,omitempty" tf:"groups_prefix,omitempty"` + + // Issuer URL for the OpenID Connect identity provider. + // +kubebuilder:validation:Optional + IssuerURL *string `json:"issuerUrl" tf:"issuer_url,omitempty"` + + // The key value pairs that describe required claims in the identity token. + // +kubebuilder:validation:Optional + // +mapType=granular + RequiredClaims map[string]*string `json:"requiredClaims,omitempty" tf:"required_claims,omitempty"` + + // The JWT claim that the provider will use as the username. + // +kubebuilder:validation:Optional + UsernameClaim *string `json:"usernameClaim,omitempty" tf:"username_claim,omitempty"` + + // A prefix that is prepended to username claims. + // +kubebuilder:validation:Optional + UsernamePrefix *string `json:"usernamePrefix,omitempty" tf:"username_prefix,omitempty"` +} + +type IdentityProviderConfigParameters struct { + + // – Name of the EKS Cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/eks/v1beta2.Cluster + // +kubebuilder:validation:Optional + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // Reference to a Cluster in eks to populate clusterName. + // +kubebuilder:validation:Optional + ClusterNameRef *v1.Reference `json:"clusterNameRef,omitempty" tf:"-"` + + // Selector for a Cluster in eks to populate clusterName. + // +kubebuilder:validation:Optional + ClusterNameSelector *v1.Selector `json:"clusterNameSelector,omitempty" tf:"-"` + + // Nested attribute containing OpenID Connect identity provider information for the cluster. Detailed below. + // +kubebuilder:validation:Optional + Oidc *IdentityProviderConfigOidcParameters `json:"oidc,omitempty" tf:"oidc,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// IdentityProviderConfigSpec defines the desired state of IdentityProviderConfig +type IdentityProviderConfigSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IdentityProviderConfigParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IdentityProviderConfigInitParameters `json:"initProvider,omitempty"` +} + +// IdentityProviderConfigStatus defines the observed state of IdentityProviderConfig. +type IdentityProviderConfigStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IdentityProviderConfigObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// IdentityProviderConfig is the Schema for the IdentityProviderConfigs API. Manages an EKS Identity Provider Configuration. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type IdentityProviderConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.oidc) || (has(self.initProvider) && has(self.initProvider.oidc))",message="spec.forProvider.oidc is a required parameter" + Spec IdentityProviderConfigSpec `json:"spec"` + Status IdentityProviderConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IdentityProviderConfigList contains a list of IdentityProviderConfigs +type IdentityProviderConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IdentityProviderConfig `json:"items"` +} + +// Repository type metadata. +var ( + IdentityProviderConfig_Kind = "IdentityProviderConfig" + IdentityProviderConfig_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: IdentityProviderConfig_Kind}.String() + IdentityProviderConfig_KindAPIVersion = IdentityProviderConfig_Kind + "." + CRDGroupVersion.String() + IdentityProviderConfig_GroupVersionKind = CRDGroupVersion.WithKind(IdentityProviderConfig_Kind) +) + +func init() { + SchemeBuilder.Register(&IdentityProviderConfig{}, &IdentityProviderConfigList{}) +} diff --git a/apis/eks/v1beta2/zz_nodegroup_terraformed.go b/apis/eks/v1beta2/zz_nodegroup_terraformed.go new file mode 100755 index 0000000000..d4da2553e9 --- /dev/null +++ b/apis/eks/v1beta2/zz_nodegroup_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this NodeGroup +func (mg *NodeGroup) GetTerraformResourceType() string { + return "aws_eks_node_group" +} + +// GetConnectionDetailsMapping for this NodeGroup +func (tr *NodeGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this NodeGroup +func (tr *NodeGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this NodeGroup +func (tr *NodeGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this NodeGroup +func (tr *NodeGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this NodeGroup +func (tr *NodeGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this NodeGroup +func (tr *NodeGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this NodeGroup +func (tr *NodeGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this NodeGroup +func (tr *NodeGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this NodeGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *NodeGroup) LateInitialize(attrs []byte) (bool, error) { + params := &NodeGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("ReleaseVersion")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *NodeGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/eks/v1beta2/zz_nodegroup_types.go b/apis/eks/v1beta2/zz_nodegroup_types.go new file mode 100755 index 0000000000..a6c54b0359 --- /dev/null +++ b/apis/eks/v1beta2/zz_nodegroup_types.go @@ -0,0 +1,580 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AutoscalingGroupsInitParameters struct { +} + +type AutoscalingGroupsObservation struct { + + // Name of the AutoScaling Group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type AutoscalingGroupsParameters struct { +} + +type LaunchTemplateInitParameters struct { + + // Identifier of the EC2 Launch Template. Conflicts with name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the EC2 Launch Template. Conflicts with id. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // EC2 Launch Template version number. While the API accepts values like $Default and $Latest, the API will convert the value to the associated version number (e.g., 1). Using the default_version or latest_version attribute of the aws_launch_template resource or data source is recommended for this argument. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type LaunchTemplateObservation struct { + + // Identifier of the EC2 Launch Template. Conflicts with name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the EC2 Launch Template. Conflicts with id. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // EC2 Launch Template version number. While the API accepts values like $Default and $Latest, the API will convert the value to the associated version number (e.g., 1). Using the default_version or latest_version attribute of the aws_launch_template resource or data source is recommended for this argument. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type LaunchTemplateParameters struct { + + // Identifier of the EC2 Launch Template. Conflicts with name. + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the EC2 Launch Template. Conflicts with id. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // EC2 Launch Template version number. While the API accepts values like $Default and $Latest, the API will convert the value to the associated version number (e.g., 1). Using the default_version or latest_version attribute of the aws_launch_template resource or data source is recommended for this argument. + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` +} + +type NodeGroupInitParameters struct { + + // Type of Amazon Machine Image (AMI) associated with the EKS Node Group. See the AWS documentation for valid values. + AMIType *string `json:"amiType,omitempty" tf:"ami_type,omitempty"` + + // Type of capacity associated with the EKS Node Group. Valid values: ON_DEMAND, SPOT. + CapacityType *string `json:"capacityType,omitempty" tf:"capacity_type,omitempty"` + + // Disk size in GiB for worker nodes. Defaults to 50 for Windows, 20 all other node groups. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + + // Force version update if existing pods are unable to be drained due to a pod disruption budget issue. + ForceUpdateVersion *bool `json:"forceUpdateVersion,omitempty" tf:"force_update_version,omitempty"` + + // List of instance types associated with the EKS Node Group. Defaults to ["t3.medium"]. + InstanceTypes []*string `json:"instanceTypes,omitempty" tf:"instance_types,omitempty"` + + // Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Configuration block with Launch Template settings. See launch_template below for details. Conflicts with remote_access. + LaunchTemplate *LaunchTemplateInitParameters `json:"launchTemplate,omitempty" tf:"launch_template,omitempty"` + + // – Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + NodeRoleArn *string `json:"nodeRoleArn,omitempty" tf:"node_role_arn,omitempty"` + + // Reference to a Role in iam to populate nodeRoleArn. + // +kubebuilder:validation:Optional + NodeRoleArnRef *v1.Reference `json:"nodeRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate nodeRoleArn. + // +kubebuilder:validation:Optional + NodeRoleArnSelector *v1.Selector `json:"nodeRoleArnSelector,omitempty" tf:"-"` + + // – AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version. + ReleaseVersion *string `json:"releaseVersion,omitempty" tf:"release_version,omitempty"` + + // Configuration block with remote access settings. See remote_access below for details. Conflicts with launch_template. + RemoteAccess *RemoteAccessInitParameters `json:"remoteAccess,omitempty" tf:"remote_access,omitempty"` + + // Configuration block with scaling settings. See scaling_config below for details. + ScalingConfig *ScalingConfigInitParameters `json:"scalingConfig,omitempty" tf:"scaling_config,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Identifiers of EC2 Subnets to associate with the EKS Node Group. Amazon EKS managed node groups can be launched in both public and private subnets. If you plan to deploy load balancers to a subnet, the private subnet must have tag kubernetes.io/role/internal-elb, the public subnet must have tag kubernetes.io/role/elb. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group. See taint below for details. + Taint []TaintInitParameters `json:"taint,omitempty" tf:"taint,omitempty"` + + // Configuration block with update settings. See update_config below for details. + UpdateConfig *UpdateConfigInitParameters `json:"updateConfig,omitempty" tf:"update_config,omitempty"` + + // – Kubernetes version. Defaults to EKS Cluster Kubernetes version. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/eks/v1beta2.Cluster + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("version",false) + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // Reference to a Cluster in eks to populate version. + // +kubebuilder:validation:Optional + VersionRef *v1.Reference `json:"versionRef,omitempty" tf:"-"` + + // Selector for a Cluster in eks to populate version. + // +kubebuilder:validation:Optional + VersionSelector *v1.Selector `json:"versionSelector,omitempty" tf:"-"` +} + +type NodeGroupObservation struct { + + // Type of Amazon Machine Image (AMI) associated with the EKS Node Group. See the AWS documentation for valid values. + AMIType *string `json:"amiType,omitempty" tf:"ami_type,omitempty"` + + // Amazon Resource Name (ARN) of the EKS Node Group. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Type of capacity associated with the EKS Node Group. Valid values: ON_DEMAND, SPOT. + CapacityType *string `json:"capacityType,omitempty" tf:"capacity_type,omitempty"` + + // – Name of the EKS Cluster. + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // Disk size in GiB for worker nodes. Defaults to 50 for Windows, 20 all other node groups. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + + // Force version update if existing pods are unable to be drained due to a pod disruption budget issue. + ForceUpdateVersion *bool `json:"forceUpdateVersion,omitempty" tf:"force_update_version,omitempty"` + + // EKS Cluster name and EKS Node Group name separated by a colon (:). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // List of instance types associated with the EKS Node Group. Defaults to ["t3.medium"]. + InstanceTypes []*string `json:"instanceTypes,omitempty" tf:"instance_types,omitempty"` + + // Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Configuration block with Launch Template settings. See launch_template below for details. Conflicts with remote_access. + LaunchTemplate *LaunchTemplateObservation `json:"launchTemplate,omitempty" tf:"launch_template,omitempty"` + + // – Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group. + NodeRoleArn *string `json:"nodeRoleArn,omitempty" tf:"node_role_arn,omitempty"` + + // – AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version. + ReleaseVersion *string `json:"releaseVersion,omitempty" tf:"release_version,omitempty"` + + // Configuration block with remote access settings. See remote_access below for details. Conflicts with launch_template. + RemoteAccess *RemoteAccessObservation `json:"remoteAccess,omitempty" tf:"remote_access,omitempty"` + + // List of objects containing information about underlying resources. + Resources []ResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + + // Configuration block with scaling settings. See scaling_config below for details. + ScalingConfig *ScalingConfigObservation `json:"scalingConfig,omitempty" tf:"scaling_config,omitempty"` + + // Status of the EKS Node Group. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Identifiers of EC2 Subnets to associate with the EKS Node Group. Amazon EKS managed node groups can be launched in both public and private subnets. If you plan to deploy load balancers to a subnet, the private subnet must have tag kubernetes.io/role/internal-elb, the public subnet must have tag kubernetes.io/role/elb. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group. See taint below for details. + Taint []TaintObservation `json:"taint,omitempty" tf:"taint,omitempty"` + + // Configuration block with update settings. See update_config below for details. + UpdateConfig *UpdateConfigObservation `json:"updateConfig,omitempty" tf:"update_config,omitempty"` + + // – Kubernetes version. Defaults to EKS Cluster Kubernetes version. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type NodeGroupParameters struct { + + // Type of Amazon Machine Image (AMI) associated with the EKS Node Group. See the AWS documentation for valid values. + // +kubebuilder:validation:Optional + AMIType *string `json:"amiType,omitempty" tf:"ami_type,omitempty"` + + // Type of capacity associated with the EKS Node Group. Valid values: ON_DEMAND, SPOT. + // +kubebuilder:validation:Optional + CapacityType *string `json:"capacityType,omitempty" tf:"capacity_type,omitempty"` + + // – Name of the EKS Cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/eks/v1beta2.Cluster + // +crossplane:generate:reference:extractor=ExternalNameIfClusterActive() + // +kubebuilder:validation:Optional + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // Reference to a Cluster in eks to populate clusterName. + // +kubebuilder:validation:Optional + ClusterNameRef *v1.Reference `json:"clusterNameRef,omitempty" tf:"-"` + + // Selector for a Cluster in eks to populate clusterName. + // +kubebuilder:validation:Optional + ClusterNameSelector *v1.Selector `json:"clusterNameSelector,omitempty" tf:"-"` + + // Disk size in GiB for worker nodes. Defaults to 50 for Windows, 20 all other node groups. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + + // Force version update if existing pods are unable to be drained due to a pod disruption budget issue. + // +kubebuilder:validation:Optional + ForceUpdateVersion *bool `json:"forceUpdateVersion,omitempty" tf:"force_update_version,omitempty"` + + // List of instance types associated with the EKS Node Group. Defaults to ["t3.medium"]. + // +kubebuilder:validation:Optional + InstanceTypes []*string `json:"instanceTypes,omitempty" tf:"instance_types,omitempty"` + + // Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Configuration block with Launch Template settings. See launch_template below for details. Conflicts with remote_access. + // +kubebuilder:validation:Optional + LaunchTemplate *LaunchTemplateParameters `json:"launchTemplate,omitempty" tf:"launch_template,omitempty"` + + // – Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + NodeRoleArn *string `json:"nodeRoleArn,omitempty" tf:"node_role_arn,omitempty"` + + // Reference to a Role in iam to populate nodeRoleArn. + // +kubebuilder:validation:Optional + NodeRoleArnRef *v1.Reference `json:"nodeRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate nodeRoleArn. + // +kubebuilder:validation:Optional + NodeRoleArnSelector *v1.Selector `json:"nodeRoleArnSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // – AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version. + // +kubebuilder:validation:Optional + ReleaseVersion *string `json:"releaseVersion,omitempty" tf:"release_version,omitempty"` + + // Configuration block with remote access settings. See remote_access below for details. Conflicts with launch_template. + // +kubebuilder:validation:Optional + RemoteAccess *RemoteAccessParameters `json:"remoteAccess,omitempty" tf:"remote_access,omitempty"` + + // Configuration block with scaling settings. See scaling_config below for details. + // +kubebuilder:validation:Optional + ScalingConfig *ScalingConfigParameters `json:"scalingConfig,omitempty" tf:"scaling_config,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Identifiers of EC2 Subnets to associate with the EKS Node Group. Amazon EKS managed node groups can be launched in both public and private subnets. If you plan to deploy load balancers to a subnet, the private subnet must have tag kubernetes.io/role/internal-elb, the public subnet must have tag kubernetes.io/role/elb. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group. See taint below for details. + // +kubebuilder:validation:Optional + Taint []TaintParameters `json:"taint,omitempty" tf:"taint,omitempty"` + + // Configuration block with update settings. See update_config below for details. + // +kubebuilder:validation:Optional + UpdateConfig *UpdateConfigParameters `json:"updateConfig,omitempty" tf:"update_config,omitempty"` + + // – Kubernetes version. Defaults to EKS Cluster Kubernetes version. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/eks/v1beta2.Cluster + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("version",false) + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // Reference to a Cluster in eks to populate version. + // +kubebuilder:validation:Optional + VersionRef *v1.Reference `json:"versionRef,omitempty" tf:"-"` + + // Selector for a Cluster in eks to populate version. + // +kubebuilder:validation:Optional + VersionSelector *v1.Selector `json:"versionSelector,omitempty" tf:"-"` +} + +type RemoteAccessInitParameters struct { + + // EC2 Key Pair name that provides access for remote communication with the worker nodes in the EKS Node Group. If you specify this configuration, but do not specify source_security_group_ids when you create an EKS Node Group, either port 3389 for Windows, or port 22 for all other operating systems is opened on the worker nodes to the Internet (0.0.0.0/0). For Windows nodes, this will allow you to use RDP, for all others this allows you to SSH into the worker nodes. + EC2SSHKey *string `json:"ec2SshKey,omitempty" tf:"ec2_ssh_key,omitempty"` + + // References to SecurityGroup in ec2 to populate sourceSecurityGroupIds. + // +kubebuilder:validation:Optional + SourceSecurityGroupIDRefs []v1.Reference `json:"sourceSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate sourceSecurityGroupIds. + // +kubebuilder:validation:Optional + SourceSecurityGroupIDSelector *v1.Selector `json:"sourceSecurityGroupIdSelector,omitempty" tf:"-"` + + // Set of EC2 Security Group IDs to allow SSH access (port 22) from on the worker nodes. If you specify ec2_ssh_key, but do not specify this configuration when you create an EKS Node Group, port 22 on the worker nodes is opened to the Internet (0.0.0.0/0). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SourceSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SourceSecurityGroupIDSelector + // +listType=set + SourceSecurityGroupIds []*string `json:"sourceSecurityGroupIds,omitempty" tf:"source_security_group_ids,omitempty"` +} + +type RemoteAccessObservation struct { + + // EC2 Key Pair name that provides access for remote communication with the worker nodes in the EKS Node Group. If you specify this configuration, but do not specify source_security_group_ids when you create an EKS Node Group, either port 3389 for Windows, or port 22 for all other operating systems is opened on the worker nodes to the Internet (0.0.0.0/0). For Windows nodes, this will allow you to use RDP, for all others this allows you to SSH into the worker nodes. + EC2SSHKey *string `json:"ec2SshKey,omitempty" tf:"ec2_ssh_key,omitempty"` + + // Set of EC2 Security Group IDs to allow SSH access (port 22) from on the worker nodes. If you specify ec2_ssh_key, but do not specify this configuration when you create an EKS Node Group, port 22 on the worker nodes is opened to the Internet (0.0.0.0/0). + // +listType=set + SourceSecurityGroupIds []*string `json:"sourceSecurityGroupIds,omitempty" tf:"source_security_group_ids,omitempty"` +} + +type RemoteAccessParameters struct { + + // EC2 Key Pair name that provides access for remote communication with the worker nodes in the EKS Node Group. If you specify this configuration, but do not specify source_security_group_ids when you create an EKS Node Group, either port 3389 for Windows, or port 22 for all other operating systems is opened on the worker nodes to the Internet (0.0.0.0/0). For Windows nodes, this will allow you to use RDP, for all others this allows you to SSH into the worker nodes. + // +kubebuilder:validation:Optional + EC2SSHKey *string `json:"ec2SshKey,omitempty" tf:"ec2_ssh_key,omitempty"` + + // References to SecurityGroup in ec2 to populate sourceSecurityGroupIds. + // +kubebuilder:validation:Optional + SourceSecurityGroupIDRefs []v1.Reference `json:"sourceSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate sourceSecurityGroupIds. + // +kubebuilder:validation:Optional + SourceSecurityGroupIDSelector *v1.Selector `json:"sourceSecurityGroupIdSelector,omitempty" tf:"-"` + + // Set of EC2 Security Group IDs to allow SSH access (port 22) from on the worker nodes. If you specify ec2_ssh_key, but do not specify this configuration when you create an EKS Node Group, port 22 on the worker nodes is opened to the Internet (0.0.0.0/0). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SourceSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SourceSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SourceSecurityGroupIds []*string `json:"sourceSecurityGroupIds,omitempty" tf:"source_security_group_ids,omitempty"` +} + +type ResourcesInitParameters struct { +} + +type ResourcesObservation struct { + + // List of objects containing information about AutoScaling Groups. + AutoscalingGroups []AutoscalingGroupsObservation `json:"autoscalingGroups,omitempty" tf:"autoscaling_groups,omitempty"` + + // Identifier of the remote access EC2 Security Group. + RemoteAccessSecurityGroupID *string `json:"remoteAccessSecurityGroupId,omitempty" tf:"remote_access_security_group_id,omitempty"` +} + +type ResourcesParameters struct { +} + +type ScalingConfigInitParameters struct { + + // Desired number of worker nodes. + DesiredSize *float64 `json:"desiredSize,omitempty" tf:"desired_size,omitempty"` + + // Maximum number of worker nodes. + MaxSize *float64 `json:"maxSize,omitempty" tf:"max_size,omitempty"` + + // Minimum number of worker nodes. + MinSize *float64 `json:"minSize,omitempty" tf:"min_size,omitempty"` +} + +type ScalingConfigObservation struct { + + // Desired number of worker nodes. + DesiredSize *float64 `json:"desiredSize,omitempty" tf:"desired_size,omitempty"` + + // Maximum number of worker nodes. + MaxSize *float64 `json:"maxSize,omitempty" tf:"max_size,omitempty"` + + // Minimum number of worker nodes. + MinSize *float64 `json:"minSize,omitempty" tf:"min_size,omitempty"` +} + +type ScalingConfigParameters struct { + + // Desired number of worker nodes. + // +kubebuilder:validation:Optional + DesiredSize *float64 `json:"desiredSize" tf:"desired_size,omitempty"` + + // Maximum number of worker nodes. + // +kubebuilder:validation:Optional + MaxSize *float64 `json:"maxSize" tf:"max_size,omitempty"` + + // Minimum number of worker nodes. + // +kubebuilder:validation:Optional + MinSize *float64 `json:"minSize" tf:"min_size,omitempty"` +} + +type TaintInitParameters struct { + + // The effect of the taint. Valid values: NO_SCHEDULE, NO_EXECUTE, PREFER_NO_SCHEDULE. + Effect *string `json:"effect,omitempty" tf:"effect,omitempty"` + + // The key of the taint. Maximum length of 63. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The value of the taint. Maximum length of 63. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TaintObservation struct { + + // The effect of the taint. Valid values: NO_SCHEDULE, NO_EXECUTE, PREFER_NO_SCHEDULE. + Effect *string `json:"effect,omitempty" tf:"effect,omitempty"` + + // The key of the taint. Maximum length of 63. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The value of the taint. Maximum length of 63. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TaintParameters struct { + + // The effect of the taint. Valid values: NO_SCHEDULE, NO_EXECUTE, PREFER_NO_SCHEDULE. + // +kubebuilder:validation:Optional + Effect *string `json:"effect" tf:"effect,omitempty"` + + // The key of the taint. Maximum length of 63. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // The value of the taint. Maximum length of 63. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type UpdateConfigInitParameters struct { + + // Desired max number of unavailable worker nodes during node group update. + MaxUnavailable *float64 `json:"maxUnavailable,omitempty" tf:"max_unavailable,omitempty"` + + // Desired max percentage of unavailable worker nodes during node group update. + MaxUnavailablePercentage *float64 `json:"maxUnavailablePercentage,omitempty" tf:"max_unavailable_percentage,omitempty"` +} + +type UpdateConfigObservation struct { + + // Desired max number of unavailable worker nodes during node group update. + MaxUnavailable *float64 `json:"maxUnavailable,omitempty" tf:"max_unavailable,omitempty"` + + // Desired max percentage of unavailable worker nodes during node group update. + MaxUnavailablePercentage *float64 `json:"maxUnavailablePercentage,omitempty" tf:"max_unavailable_percentage,omitempty"` +} + +type UpdateConfigParameters struct { + + // Desired max number of unavailable worker nodes during node group update. + // +kubebuilder:validation:Optional + MaxUnavailable *float64 `json:"maxUnavailable,omitempty" tf:"max_unavailable,omitempty"` + + // Desired max percentage of unavailable worker nodes during node group update. + // +kubebuilder:validation:Optional + MaxUnavailablePercentage *float64 `json:"maxUnavailablePercentage,omitempty" tf:"max_unavailable_percentage,omitempty"` +} + +// NodeGroupSpec defines the desired state of NodeGroup +type NodeGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider NodeGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider NodeGroupInitParameters `json:"initProvider,omitempty"` +} + +// NodeGroupStatus defines the observed state of NodeGroup. +type NodeGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider NodeGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NodeGroup is the Schema for the NodeGroups API. Manages an EKS Node Group +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type NodeGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scalingConfig) || (has(self.initProvider) && has(self.initProvider.scalingConfig))",message="spec.forProvider.scalingConfig is a required parameter" + Spec NodeGroupSpec `json:"spec"` + Status NodeGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NodeGroupList contains a list of NodeGroups +type NodeGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NodeGroup `json:"items"` +} + +// Repository type metadata. +var ( + NodeGroup_Kind = "NodeGroup" + NodeGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: NodeGroup_Kind}.String() + NodeGroup_KindAPIVersion = NodeGroup_Kind + "." + CRDGroupVersion.String() + NodeGroup_GroupVersionKind = CRDGroupVersion.WithKind(NodeGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&NodeGroup{}, &NodeGroupList{}) +} diff --git a/apis/elasticache/v1beta1/zz_generated.conversion_hubs.go b/apis/elasticache/v1beta1/zz_generated.conversion_hubs.go index 1ab6fa94d1..f43b7f4367 100755 --- a/apis/elasticache/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/elasticache/v1beta1/zz_generated.conversion_hubs.go @@ -15,8 +15,5 @@ func (tr *ParameterGroup) Hub() {} // Hub marks this type as a conversion hub. func (tr *SubnetGroup) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *User) Hub() {} - // Hub marks this type as a conversion hub. func (tr *UserGroup) Hub() {} diff --git a/apis/elasticache/v1beta1/zz_generated.conversion_spokes.go b/apis/elasticache/v1beta1/zz_generated.conversion_spokes.go index 4464e8e65a..879b3b0673 100755 --- a/apis/elasticache/v1beta1/zz_generated.conversion_spokes.go +++ b/apis/elasticache/v1beta1/zz_generated.conversion_spokes.go @@ -32,3 +32,23 @@ func (tr *ReplicationGroup) ConvertFrom(srcRaw conversion.Hub) error { } return nil } + +// ConvertTo converts this User to the hub type. +func (tr *User) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the User type. +func (tr *User) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/elasticache/v1beta1/zz_generated.resolvers.go b/apis/elasticache/v1beta1/zz_generated.resolvers.go index 42bbcdf76f..b39e302305 100644 --- a/apis/elasticache/v1beta1/zz_generated.resolvers.go +++ b/apis/elasticache/v1beta1/zz_generated.resolvers.go @@ -369,7 +369,7 @@ func (mg *UserGroup) ResolveReferences(ctx context.Context, c client.Reader) err var mrsp reference.MultiResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("elasticache.aws.upbound.io", "v1beta1", "User", "UserList") + m, l, err = apisresolver.GetManagedResource("elasticache.aws.upbound.io", "v1beta2", "User", "UserList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -388,7 +388,7 @@ func (mg *UserGroup) ResolveReferences(ctx context.Context, c client.Reader) err mg.Spec.ForProvider.UserIds = reference.ToPtrValues(mrsp.ResolvedValues) mg.Spec.ForProvider.UserIDRefs = mrsp.ResolvedReferences { - m, l, err = apisresolver.GetManagedResource("elasticache.aws.upbound.io", "v1beta1", "User", "UserList") + m, l, err = apisresolver.GetManagedResource("elasticache.aws.upbound.io", "v1beta2", "User", "UserList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/elasticache/v1beta1/zz_usergroup_types.go b/apis/elasticache/v1beta1/zz_usergroup_types.go index c86e1f9a9d..31f47e1f22 100755 --- a/apis/elasticache/v1beta1/zz_usergroup_types.go +++ b/apis/elasticache/v1beta1/zz_usergroup_types.go @@ -31,7 +31,7 @@ type UserGroupInitParameters struct { UserIDSelector *v1.Selector `json:"userIdSelector,omitempty" tf:"-"` // The list of user IDs that belong to the user group. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elasticache/v1beta1.User + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elasticache/v1beta2.User // +crossplane:generate:reference:refFieldName=UserIDRefs // +crossplane:generate:reference:selectorFieldName=UserIDSelector // +listType=set @@ -87,7 +87,7 @@ type UserGroupParameters struct { UserIDSelector *v1.Selector `json:"userIdSelector,omitempty" tf:"-"` // The list of user IDs that belong to the user group. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elasticache/v1beta1.User + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elasticache/v1beta2.User // +crossplane:generate:reference:refFieldName=UserIDRefs // +crossplane:generate:reference:selectorFieldName=UserIDSelector // +kubebuilder:validation:Optional diff --git a/apis/elasticache/v1beta2/zz_generated.conversion_hubs.go b/apis/elasticache/v1beta2/zz_generated.conversion_hubs.go index 1b86e0f019..809be8552d 100755 --- a/apis/elasticache/v1beta2/zz_generated.conversion_hubs.go +++ b/apis/elasticache/v1beta2/zz_generated.conversion_hubs.go @@ -8,3 +8,6 @@ package v1beta2 // Hub marks this type as a conversion hub. func (tr *ReplicationGroup) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *User) Hub() {} diff --git a/apis/elasticache/v1beta2/zz_generated.deepcopy.go b/apis/elasticache/v1beta2/zz_generated.deepcopy.go index 9fe791bda5..47da5f5ca5 100644 --- a/apis/elasticache/v1beta2/zz_generated.deepcopy.go +++ b/apis/elasticache/v1beta2/zz_generated.deepcopy.go @@ -13,6 +13,91 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationModeInitParameters) DeepCopyInto(out *AuthenticationModeInitParameters) { + *out = *in + if in.Passwords != nil { + in, out := &in.Passwords, &out.Passwords + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationModeInitParameters. +func (in *AuthenticationModeInitParameters) DeepCopy() *AuthenticationModeInitParameters { + if in == nil { + return nil + } + out := new(AuthenticationModeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationModeObservation) DeepCopyInto(out *AuthenticationModeObservation) { + *out = *in + if in.PasswordCount != nil { + in, out := &in.PasswordCount, &out.PasswordCount + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationModeObservation. +func (in *AuthenticationModeObservation) DeepCopy() *AuthenticationModeObservation { + if in == nil { + return nil + } + out := new(AuthenticationModeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationModeParameters) DeepCopyInto(out *AuthenticationModeParameters) { + *out = *in + if in.PasswordsSecretRef != nil { + in, out := &in.PasswordsSecretRef, &out.PasswordsSecretRef + *out = new([]v1.SecretKeySelector) + if **in != nil { + in, out := *in, *out + *out = make([]v1.SecretKeySelector, len(*in)) + copy(*out, *in) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationModeParameters. +func (in *AuthenticationModeParameters) DeepCopy() *AuthenticationModeParameters { + if in == nil { + return nil + } + out := new(AuthenticationModeParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LogDeliveryConfigurationInitParameters) DeepCopyInto(out *LogDeliveryConfigurationInitParameters) { *out = *in @@ -1071,3 +1156,316 @@ func (in *ReplicationGroupStatus) DeepCopy() *ReplicationGroupStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *User) DeepCopyInto(out *User) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User. +func (in *User) DeepCopy() *User { + if in == nil { + return nil + } + out := new(User) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *User) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserInitParameters) DeepCopyInto(out *UserInitParameters) { + *out = *in + if in.AccessString != nil { + in, out := &in.AccessString, &out.AccessString + *out = new(string) + **out = **in + } + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(AuthenticationModeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.NoPasswordRequired != nil { + in, out := &in.NoPasswordRequired, &out.NoPasswordRequired + *out = new(bool) + **out = **in + } + if in.Passwords != nil { + in, out := &in.Passwords, &out.Passwords + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserName != nil { + in, out := &in.UserName, &out.UserName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInitParameters. +func (in *UserInitParameters) DeepCopy() *UserInitParameters { + if in == nil { + return nil + } + out := new(UserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserList) DeepCopyInto(out *UserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]User, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserList. +func (in *UserList) DeepCopy() *UserList { + if in == nil { + return nil + } + out := new(UserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserObservation) DeepCopyInto(out *UserObservation) { + *out = *in + if in.AccessString != nil { + in, out := &in.AccessString, &out.AccessString + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(AuthenticationModeObservation) + (*in).DeepCopyInto(*out) + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.NoPasswordRequired != nil { + in, out := &in.NoPasswordRequired, &out.NoPasswordRequired + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserName != nil { + in, out := &in.UserName, &out.UserName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserObservation. +func (in *UserObservation) DeepCopy() *UserObservation { + if in == nil { + return nil + } + out := new(UserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserParameters) DeepCopyInto(out *UserParameters) { + *out = *in + if in.AccessString != nil { + in, out := &in.AccessString, &out.AccessString + *out = new(string) + **out = **in + } + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(AuthenticationModeParameters) + (*in).DeepCopyInto(*out) + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.NoPasswordRequired != nil { + in, out := &in.NoPasswordRequired, &out.NoPasswordRequired + *out = new(bool) + **out = **in + } + if in.PasswordsSecretRef != nil { + in, out := &in.PasswordsSecretRef, &out.PasswordsSecretRef + *out = new([]v1.SecretKeySelector) + if **in != nil { + in, out := *in, *out + *out = make([]v1.SecretKeySelector, len(*in)) + copy(*out, *in) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserName != nil { + in, out := &in.UserName, &out.UserName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserParameters. +func (in *UserParameters) DeepCopy() *UserParameters { + if in == nil { + return nil + } + out := new(UserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSpec) DeepCopyInto(out *UserSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSpec. +func (in *UserSpec) DeepCopy() *UserSpec { + if in == nil { + return nil + } + out := new(UserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserStatus) DeepCopyInto(out *UserStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserStatus. +func (in *UserStatus) DeepCopy() *UserStatus { + if in == nil { + return nil + } + out := new(UserStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/elasticache/v1beta2/zz_generated.managed.go b/apis/elasticache/v1beta2/zz_generated.managed.go index bdce06442e..df24b09f87 100644 --- a/apis/elasticache/v1beta2/zz_generated.managed.go +++ b/apis/elasticache/v1beta2/zz_generated.managed.go @@ -66,3 +66,63 @@ func (mg *ReplicationGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnect func (mg *ReplicationGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { mg.Spec.WriteConnectionSecretToReference = r } + +// GetCondition of this User. +func (mg *User) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this User. +func (mg *User) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this User. +func (mg *User) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this User. +func (mg *User) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this User. +func (mg *User) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this User. +func (mg *User) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this User. +func (mg *User) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this User. +func (mg *User) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this User. +func (mg *User) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this User. +func (mg *User) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this User. +func (mg *User) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this User. +func (mg *User) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/elasticache/v1beta2/zz_generated.managedlist.go b/apis/elasticache/v1beta2/zz_generated.managedlist.go index d6437cfe9b..7e7c54a0dc 100644 --- a/apis/elasticache/v1beta2/zz_generated.managedlist.go +++ b/apis/elasticache/v1beta2/zz_generated.managedlist.go @@ -15,3 +15,12 @@ func (l *ReplicationGroupList) GetItems() []resource.Managed { } return items } + +// GetItems of this UserList. +func (l *UserList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/elasticache/v1beta2/zz_user_terraformed.go b/apis/elasticache/v1beta2/zz_user_terraformed.go new file mode 100755 index 0000000000..995906c37a --- /dev/null +++ b/apis/elasticache/v1beta2/zz_user_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this User +func (mg *User) GetTerraformResourceType() string { + return "aws_elasticache_user" +} + +// GetConnectionDetailsMapping for this User +func (tr *User) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"authentication_mode[*].passwords[*]": "authenticationMode[*].passwordsSecretRef[*]", "passwords[*]": "passwordsSecretRef[*]"} +} + +// GetObservation of this User +func (tr *User) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this User +func (tr *User) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this User +func (tr *User) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this User +func (tr *User) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this User +func (tr *User) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this User +func (tr *User) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this User +func (tr *User) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this User using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *User) LateInitialize(attrs []byte) (bool, error) { + params := &UserParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *User) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/elasticache/v1beta2/zz_user_types.go b/apis/elasticache/v1beta2/zz_user_types.go new file mode 100755 index 0000000000..129e1b2a2a --- /dev/null +++ b/apis/elasticache/v1beta2/zz_user_types.go @@ -0,0 +1,193 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuthenticationModeInitParameters struct { + Passwords []*string `json:"passwordsSecretRef,omitempty" tf:"-"` + + // Specifies the authentication type. Possible options are: password, no-password-required or iam. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type AuthenticationModeObservation struct { + PasswordCount *float64 `json:"passwordCount,omitempty" tf:"password_count,omitempty"` + + // Specifies the authentication type. Possible options are: password, no-password-required or iam. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type AuthenticationModeParameters struct { + + // Specifies the passwords to use for authentication if type is set to password. + // +kubebuilder:validation:Optional + PasswordsSecretRef *[]v1.SecretKeySelector `json:"passwordsSecretRef,omitempty" tf:"-"` + + // Specifies the authentication type. Possible options are: password, no-password-required or iam. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type UserInitParameters struct { + + // Access permissions string used for this user. See Specifying Permissions Using an Access String for more details. + AccessString *string `json:"accessString,omitempty" tf:"access_string,omitempty"` + + // Denotes the user's authentication properties. Detailed below. + AuthenticationMode *AuthenticationModeInitParameters `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The current supported value is REDIS. + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // Indicates a password is not required for this user. + NoPasswordRequired *bool `json:"noPasswordRequired,omitempty" tf:"no_password_required,omitempty"` + + Passwords []*string `json:"passwordsSecretRef,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The username of the user. + UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` +} + +type UserObservation struct { + + // Access permissions string used for this user. See Specifying Permissions Using an Access String for more details. + AccessString *string `json:"accessString,omitempty" tf:"access_string,omitempty"` + + // The ARN of the created ElastiCache User. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Denotes the user's authentication properties. Detailed below. + AuthenticationMode *AuthenticationModeObservation `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The current supported value is REDIS. + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Indicates a password is not required for this user. + NoPasswordRequired *bool `json:"noPasswordRequired,omitempty" tf:"no_password_required,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The username of the user. + UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` +} + +type UserParameters struct { + + // Access permissions string used for this user. See Specifying Permissions Using an Access String for more details. + // +kubebuilder:validation:Optional + AccessString *string `json:"accessString,omitempty" tf:"access_string,omitempty"` + + // Denotes the user's authentication properties. Detailed below. + // +kubebuilder:validation:Optional + AuthenticationMode *AuthenticationModeParameters `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The current supported value is REDIS. + // +kubebuilder:validation:Optional + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // Indicates a password is not required for this user. + // +kubebuilder:validation:Optional + NoPasswordRequired *bool `json:"noPasswordRequired,omitempty" tf:"no_password_required,omitempty"` + + // Passwords used for this user. You can create up to two passwords for each user. + // +kubebuilder:validation:Optional + PasswordsSecretRef *[]v1.SecretKeySelector `json:"passwordsSecretRef,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The username of the user. + // +kubebuilder:validation:Optional + UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` +} + +// UserSpec defines the desired state of User +type UserSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider UserParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider UserInitParameters `json:"initProvider,omitempty"` +} + +// UserStatus defines the observed state of User. +type UserStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider UserObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// User is the Schema for the Users API. Provides an ElastiCache user. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type User struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.accessString) || (has(self.initProvider) && has(self.initProvider.accessString))",message="spec.forProvider.accessString is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.engine) || (has(self.initProvider) && has(self.initProvider.engine))",message="spec.forProvider.engine is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.userName) || (has(self.initProvider) && has(self.initProvider.userName))",message="spec.forProvider.userName is a required parameter" + Spec UserSpec `json:"spec"` + Status UserStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// UserList contains a list of Users +type UserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []User `json:"items"` +} + +// Repository type metadata. +var ( + User_Kind = "User" + User_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: User_Kind}.String() + User_KindAPIVersion = User_Kind + "." + CRDGroupVersion.String() + User_GroupVersionKind = CRDGroupVersion.WithKind(User_Kind) +) + +func init() { + SchemeBuilder.Register(&User{}, &UserList{}) +} diff --git a/apis/elasticbeanstalk/v1beta1/zz_applicationversion_types.go b/apis/elasticbeanstalk/v1beta1/zz_applicationversion_types.go index 4d4b12b8b0..ca6e71e29e 100755 --- a/apis/elasticbeanstalk/v1beta1/zz_applicationversion_types.go +++ b/apis/elasticbeanstalk/v1beta1/zz_applicationversion_types.go @@ -19,7 +19,7 @@ type ApplicationVersionInitParameters struct { Application *string `json:"application,omitempty" tf:"application,omitempty"` // S3 bucket that contains the Application Version source bundle. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` @@ -38,7 +38,7 @@ type ApplicationVersionInitParameters struct { ForceDelete *bool `json:"forceDelete,omitempty" tf:"force_delete,omitempty"` // S3 object that is the Application Version source bundle. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Object + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Object // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() Key *string `json:"key,omitempty" tf:"key,omitempty"` @@ -93,7 +93,7 @@ type ApplicationVersionParameters struct { Application *string `json:"application,omitempty" tf:"application,omitempty"` // S3 bucket that contains the Application Version source bundle. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` @@ -115,7 +115,7 @@ type ApplicationVersionParameters struct { ForceDelete *bool `json:"forceDelete,omitempty" tf:"force_delete,omitempty"` // S3 object that is the Application Version source bundle. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Object + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Object // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional Key *string `json:"key,omitempty" tf:"key,omitempty"` diff --git a/apis/elasticbeanstalk/v1beta1/zz_configurationtemplate_types.go b/apis/elasticbeanstalk/v1beta1/zz_configurationtemplate_types.go index 12dd671416..78870d492d 100755 --- a/apis/elasticbeanstalk/v1beta1/zz_configurationtemplate_types.go +++ b/apis/elasticbeanstalk/v1beta1/zz_configurationtemplate_types.go @@ -16,7 +16,7 @@ import ( type ConfigurationTemplateInitParameters struct { // – name of the application to associate with this configuration template - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elasticbeanstalk/v1beta1.Application + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elasticbeanstalk/v1beta2.Application Application *string `json:"application,omitempty" tf:"application,omitempty"` // Reference to a Application in elasticbeanstalk to populate application. @@ -69,7 +69,7 @@ type ConfigurationTemplateObservation struct { type ConfigurationTemplateParameters struct { // – name of the application to associate with this configuration template - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elasticbeanstalk/v1beta1.Application + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elasticbeanstalk/v1beta2.Application // +kubebuilder:validation:Optional Application *string `json:"application,omitempty" tf:"application,omitempty"` diff --git a/apis/elasticbeanstalk/v1beta1/zz_generated.conversion_hubs.go b/apis/elasticbeanstalk/v1beta1/zz_generated.conversion_hubs.go index ecfc9faa22..15677fd77d 100755 --- a/apis/elasticbeanstalk/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/elasticbeanstalk/v1beta1/zz_generated.conversion_hubs.go @@ -6,9 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Application) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ApplicationVersion) Hub() {} diff --git a/apis/elasticbeanstalk/v1beta1/zz_generated.conversion_spokes.go b/apis/elasticbeanstalk/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..ada25ff080 --- /dev/null +++ b/apis/elasticbeanstalk/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Application to the hub type. +func (tr *Application) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Application type. +func (tr *Application) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/elasticbeanstalk/v1beta1/zz_generated.resolvers.go b/apis/elasticbeanstalk/v1beta1/zz_generated.resolvers.go index f39768b725..71e2845664 100644 --- a/apis/elasticbeanstalk/v1beta1/zz_generated.resolvers.go +++ b/apis/elasticbeanstalk/v1beta1/zz_generated.resolvers.go @@ -81,7 +81,7 @@ func (mg *ApplicationVersion) ResolveReferences(ctx context.Context, c client.Re var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -100,7 +100,7 @@ func (mg *ApplicationVersion) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Object", "ObjectList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Object", "ObjectList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -119,7 +119,7 @@ func (mg *ApplicationVersion) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.Key = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KeyRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -138,7 +138,7 @@ func (mg *ApplicationVersion) ResolveReferences(ctx context.Context, c client.Re mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Object", "ObjectList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Object", "ObjectList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -169,7 +169,7 @@ func (mg *ConfigurationTemplate) ResolveReferences(ctx context.Context, c client var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("elasticbeanstalk.aws.upbound.io", "v1beta1", "Application", "ApplicationList") + m, l, err = apisresolver.GetManagedResource("elasticbeanstalk.aws.upbound.io", "v1beta2", "Application", "ApplicationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -188,7 +188,7 @@ func (mg *ConfigurationTemplate) ResolveReferences(ctx context.Context, c client mg.Spec.ForProvider.Application = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ApplicationRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("elasticbeanstalk.aws.upbound.io", "v1beta1", "Application", "ApplicationList") + m, l, err = apisresolver.GetManagedResource("elasticbeanstalk.aws.upbound.io", "v1beta2", "Application", "ApplicationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/elasticbeanstalk/v1beta2/zz_application_terraformed.go b/apis/elasticbeanstalk/v1beta2/zz_application_terraformed.go new file mode 100755 index 0000000000..2fc6cc696b --- /dev/null +++ b/apis/elasticbeanstalk/v1beta2/zz_application_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Application +func (mg *Application) GetTerraformResourceType() string { + return "aws_elastic_beanstalk_application" +} + +// GetConnectionDetailsMapping for this Application +func (tr *Application) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Application +func (tr *Application) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Application +func (tr *Application) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Application +func (tr *Application) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Application +func (tr *Application) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Application +func (tr *Application) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Application +func (tr *Application) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Application +func (tr *Application) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Application using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Application) LateInitialize(attrs []byte) (bool, error) { + params := &ApplicationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Application) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/elasticbeanstalk/v1beta2/zz_application_types.go b/apis/elasticbeanstalk/v1beta2/zz_application_types.go new file mode 100755 index 0000000000..d0deb9f39d --- /dev/null +++ b/apis/elasticbeanstalk/v1beta2/zz_application_types.go @@ -0,0 +1,194 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ApplicationInitParameters struct { + AppversionLifecycle *AppversionLifecycleInitParameters `json:"appversionLifecycle,omitempty" tf:"appversion_lifecycle,omitempty"` + + // Short description of the application + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ApplicationObservation struct { + AppversionLifecycle *AppversionLifecycleObservation `json:"appversionLifecycle,omitempty" tf:"appversion_lifecycle,omitempty"` + + // The ARN assigned by AWS for this Elastic Beanstalk Application. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Short description of the application + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type ApplicationParameters struct { + + // +kubebuilder:validation:Optional + AppversionLifecycle *AppversionLifecycleParameters `json:"appversionLifecycle,omitempty" tf:"appversion_lifecycle,omitempty"` + + // Short description of the application + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AppversionLifecycleInitParameters struct { + + // Set to true to delete a version's source bundle from S3 when the application version is deleted. + DeleteSourceFromS3 *bool `json:"deleteSourceFromS3,omitempty" tf:"delete_source_from_s3,omitempty"` + + // The number of days to retain an application version ('max_age_in_days' and 'max_count' cannot be enabled simultaneously.). + MaxAgeInDays *float64 `json:"maxAgeInDays,omitempty" tf:"max_age_in_days,omitempty"` + + // The maximum number of application versions to retain ('max_age_in_days' and 'max_count' cannot be enabled simultaneously.). + MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` + + // The ARN of an IAM service role under which the application version is deleted. Elastic Beanstalk must have permission to assume this role. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + ServiceRole *string `json:"serviceRole,omitempty" tf:"service_role,omitempty"` + + // Reference to a Role in iam to populate serviceRole. + // +kubebuilder:validation:Optional + ServiceRoleRef *v1.Reference `json:"serviceRoleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceRole. + // +kubebuilder:validation:Optional + ServiceRoleSelector *v1.Selector `json:"serviceRoleSelector,omitempty" tf:"-"` +} + +type AppversionLifecycleObservation struct { + + // Set to true to delete a version's source bundle from S3 when the application version is deleted. + DeleteSourceFromS3 *bool `json:"deleteSourceFromS3,omitempty" tf:"delete_source_from_s3,omitempty"` + + // The number of days to retain an application version ('max_age_in_days' and 'max_count' cannot be enabled simultaneously.). + MaxAgeInDays *float64 `json:"maxAgeInDays,omitempty" tf:"max_age_in_days,omitempty"` + + // The maximum number of application versions to retain ('max_age_in_days' and 'max_count' cannot be enabled simultaneously.). + MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` + + // The ARN of an IAM service role under which the application version is deleted. Elastic Beanstalk must have permission to assume this role. + ServiceRole *string `json:"serviceRole,omitempty" tf:"service_role,omitempty"` +} + +type AppversionLifecycleParameters struct { + + // Set to true to delete a version's source bundle from S3 when the application version is deleted. + // +kubebuilder:validation:Optional + DeleteSourceFromS3 *bool `json:"deleteSourceFromS3,omitempty" tf:"delete_source_from_s3,omitempty"` + + // The number of days to retain an application version ('max_age_in_days' and 'max_count' cannot be enabled simultaneously.). + // +kubebuilder:validation:Optional + MaxAgeInDays *float64 `json:"maxAgeInDays,omitempty" tf:"max_age_in_days,omitempty"` + + // The maximum number of application versions to retain ('max_age_in_days' and 'max_count' cannot be enabled simultaneously.). + // +kubebuilder:validation:Optional + MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` + + // The ARN of an IAM service role under which the application version is deleted. Elastic Beanstalk must have permission to assume this role. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + ServiceRole *string `json:"serviceRole,omitempty" tf:"service_role,omitempty"` + + // Reference to a Role in iam to populate serviceRole. + // +kubebuilder:validation:Optional + ServiceRoleRef *v1.Reference `json:"serviceRoleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceRole. + // +kubebuilder:validation:Optional + ServiceRoleSelector *v1.Selector `json:"serviceRoleSelector,omitempty" tf:"-"` +} + +// ApplicationSpec defines the desired state of Application +type ApplicationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ApplicationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ApplicationInitParameters `json:"initProvider,omitempty"` +} + +// ApplicationStatus defines the observed state of Application. +type ApplicationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ApplicationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Application is the Schema for the Applications API. Provides an Elastic Beanstalk Application Resource +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Application struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ApplicationSpec `json:"spec"` + Status ApplicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ApplicationList contains a list of Applications +type ApplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Application `json:"items"` +} + +// Repository type metadata. +var ( + Application_Kind = "Application" + Application_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Application_Kind}.String() + Application_KindAPIVersion = Application_Kind + "." + CRDGroupVersion.String() + Application_GroupVersionKind = CRDGroupVersion.WithKind(Application_Kind) +) + +func init() { + SchemeBuilder.Register(&Application{}, &ApplicationList{}) +} diff --git a/apis/elasticbeanstalk/v1beta2/zz_generated.conversion_hubs.go b/apis/elasticbeanstalk/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..2d48655531 --- /dev/null +++ b/apis/elasticbeanstalk/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Application) Hub() {} diff --git a/apis/elasticbeanstalk/v1beta2/zz_generated.deepcopy.go b/apis/elasticbeanstalk/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..e6251c4601 --- /dev/null +++ b/apis/elasticbeanstalk/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,387 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Application) DeepCopyInto(out *Application) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Application. +func (in *Application) DeepCopy() *Application { + if in == nil { + return nil + } + out := new(Application) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Application) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInitParameters) DeepCopyInto(out *ApplicationInitParameters) { + *out = *in + if in.AppversionLifecycle != nil { + in, out := &in.AppversionLifecycle, &out.AppversionLifecycle + *out = new(AppversionLifecycleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInitParameters. +func (in *ApplicationInitParameters) DeepCopy() *ApplicationInitParameters { + if in == nil { + return nil + } + out := new(ApplicationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationList) DeepCopyInto(out *ApplicationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Application, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationList. +func (in *ApplicationList) DeepCopy() *ApplicationList { + if in == nil { + return nil + } + out := new(ApplicationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ApplicationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationObservation) DeepCopyInto(out *ApplicationObservation) { + *out = *in + if in.AppversionLifecycle != nil { + in, out := &in.AppversionLifecycle, &out.AppversionLifecycle + *out = new(AppversionLifecycleObservation) + (*in).DeepCopyInto(*out) + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationObservation. +func (in *ApplicationObservation) DeepCopy() *ApplicationObservation { + if in == nil { + return nil + } + out := new(ApplicationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationParameters) DeepCopyInto(out *ApplicationParameters) { + *out = *in + if in.AppversionLifecycle != nil { + in, out := &in.AppversionLifecycle, &out.AppversionLifecycle + *out = new(AppversionLifecycleParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationParameters. +func (in *ApplicationParameters) DeepCopy() *ApplicationParameters { + if in == nil { + return nil + } + out := new(ApplicationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationSpec) DeepCopyInto(out *ApplicationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSpec. +func (in *ApplicationSpec) DeepCopy() *ApplicationSpec { + if in == nil { + return nil + } + out := new(ApplicationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationStatus) DeepCopyInto(out *ApplicationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationStatus. +func (in *ApplicationStatus) DeepCopy() *ApplicationStatus { + if in == nil { + return nil + } + out := new(ApplicationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppversionLifecycleInitParameters) DeepCopyInto(out *AppversionLifecycleInitParameters) { + *out = *in + if in.DeleteSourceFromS3 != nil { + in, out := &in.DeleteSourceFromS3, &out.DeleteSourceFromS3 + *out = new(bool) + **out = **in + } + if in.MaxAgeInDays != nil { + in, out := &in.MaxAgeInDays, &out.MaxAgeInDays + *out = new(float64) + **out = **in + } + if in.MaxCount != nil { + in, out := &in.MaxCount, &out.MaxCount + *out = new(float64) + **out = **in + } + if in.ServiceRole != nil { + in, out := &in.ServiceRole, &out.ServiceRole + *out = new(string) + **out = **in + } + if in.ServiceRoleRef != nil { + in, out := &in.ServiceRoleRef, &out.ServiceRoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceRoleSelector != nil { + in, out := &in.ServiceRoleSelector, &out.ServiceRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppversionLifecycleInitParameters. +func (in *AppversionLifecycleInitParameters) DeepCopy() *AppversionLifecycleInitParameters { + if in == nil { + return nil + } + out := new(AppversionLifecycleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppversionLifecycleObservation) DeepCopyInto(out *AppversionLifecycleObservation) { + *out = *in + if in.DeleteSourceFromS3 != nil { + in, out := &in.DeleteSourceFromS3, &out.DeleteSourceFromS3 + *out = new(bool) + **out = **in + } + if in.MaxAgeInDays != nil { + in, out := &in.MaxAgeInDays, &out.MaxAgeInDays + *out = new(float64) + **out = **in + } + if in.MaxCount != nil { + in, out := &in.MaxCount, &out.MaxCount + *out = new(float64) + **out = **in + } + if in.ServiceRole != nil { + in, out := &in.ServiceRole, &out.ServiceRole + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppversionLifecycleObservation. +func (in *AppversionLifecycleObservation) DeepCopy() *AppversionLifecycleObservation { + if in == nil { + return nil + } + out := new(AppversionLifecycleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppversionLifecycleParameters) DeepCopyInto(out *AppversionLifecycleParameters) { + *out = *in + if in.DeleteSourceFromS3 != nil { + in, out := &in.DeleteSourceFromS3, &out.DeleteSourceFromS3 + *out = new(bool) + **out = **in + } + if in.MaxAgeInDays != nil { + in, out := &in.MaxAgeInDays, &out.MaxAgeInDays + *out = new(float64) + **out = **in + } + if in.MaxCount != nil { + in, out := &in.MaxCount, &out.MaxCount + *out = new(float64) + **out = **in + } + if in.ServiceRole != nil { + in, out := &in.ServiceRole, &out.ServiceRole + *out = new(string) + **out = **in + } + if in.ServiceRoleRef != nil { + in, out := &in.ServiceRoleRef, &out.ServiceRoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceRoleSelector != nil { + in, out := &in.ServiceRoleSelector, &out.ServiceRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppversionLifecycleParameters. +func (in *AppversionLifecycleParameters) DeepCopy() *AppversionLifecycleParameters { + if in == nil { + return nil + } + out := new(AppversionLifecycleParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/elasticbeanstalk/v1beta2/zz_generated.managed.go b/apis/elasticbeanstalk/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..d1481109bb --- /dev/null +++ b/apis/elasticbeanstalk/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Application. +func (mg *Application) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Application. +func (mg *Application) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Application. +func (mg *Application) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Application. +func (mg *Application) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Application. +func (mg *Application) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Application. +func (mg *Application) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Application. +func (mg *Application) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Application. +func (mg *Application) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Application. +func (mg *Application) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Application. +func (mg *Application) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Application. +func (mg *Application) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Application. +func (mg *Application) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/elasticbeanstalk/v1beta2/zz_generated.managedlist.go b/apis/elasticbeanstalk/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..9c9817b1e1 --- /dev/null +++ b/apis/elasticbeanstalk/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ApplicationList. +func (l *ApplicationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/elasticbeanstalk/v1beta2/zz_generated.resolvers.go b/apis/elasticbeanstalk/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..40677854e5 --- /dev/null +++ b/apis/elasticbeanstalk/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,73 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Application) ResolveReferences( // ResolveReferences of this Application. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.AppversionLifecycle != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AppversionLifecycle.ServiceRole), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.AppversionLifecycle.ServiceRoleRef, + Selector: mg.Spec.ForProvider.AppversionLifecycle.ServiceRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AppversionLifecycle.ServiceRole") + } + mg.Spec.ForProvider.AppversionLifecycle.ServiceRole = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AppversionLifecycle.ServiceRoleRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.AppversionLifecycle != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AppversionLifecycle.ServiceRole), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.AppversionLifecycle.ServiceRoleRef, + Selector: mg.Spec.InitProvider.AppversionLifecycle.ServiceRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AppversionLifecycle.ServiceRole") + } + mg.Spec.InitProvider.AppversionLifecycle.ServiceRole = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AppversionLifecycle.ServiceRoleRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/elasticbeanstalk/v1beta2/zz_groupversion_info.go b/apis/elasticbeanstalk/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..cd1de18552 --- /dev/null +++ b/apis/elasticbeanstalk/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=elasticbeanstalk.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "elasticbeanstalk.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/elasticsearch/v1beta1/zz_domainpolicy_types.go b/apis/elasticsearch/v1beta1/zz_domainpolicy_types.go index 6a28d5e951..6d0d4a7b98 100755 --- a/apis/elasticsearch/v1beta1/zz_domainpolicy_types.go +++ b/apis/elasticsearch/v1beta1/zz_domainpolicy_types.go @@ -19,7 +19,7 @@ type DomainPolicyInitParameters struct { AccessPolicies *string `json:"accessPolicies,omitempty" tf:"access_policies,omitempty"` // Name of the domain. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elasticsearch/v1beta1.Domain + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elasticsearch/v1beta2.Domain DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` // Reference to a Domain in elasticsearch to populate domainName. @@ -49,7 +49,7 @@ type DomainPolicyParameters struct { AccessPolicies *string `json:"accessPolicies,omitempty" tf:"access_policies,omitempty"` // Name of the domain. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elasticsearch/v1beta1.Domain + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elasticsearch/v1beta2.Domain // +kubebuilder:validation:Optional DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` diff --git a/apis/elasticsearch/v1beta1/zz_generated.conversion_hubs.go b/apis/elasticsearch/v1beta1/zz_generated.conversion_hubs.go index e1c18871a9..69c4e36efe 100755 --- a/apis/elasticsearch/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/elasticsearch/v1beta1/zz_generated.conversion_hubs.go @@ -6,11 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Domain) Hub() {} - // Hub marks this type as a conversion hub. func (tr *DomainPolicy) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *DomainSAMLOptions) Hub() {} diff --git a/apis/elasticsearch/v1beta1/zz_generated.conversion_spokes.go b/apis/elasticsearch/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..9128ba7d2b --- /dev/null +++ b/apis/elasticsearch/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Domain to the hub type. +func (tr *Domain) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Domain type. +func (tr *Domain) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DomainSAMLOptions to the hub type. +func (tr *DomainSAMLOptions) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DomainSAMLOptions type. +func (tr *DomainSAMLOptions) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/elasticsearch/v1beta1/zz_generated.resolvers.go b/apis/elasticsearch/v1beta1/zz_generated.resolvers.go index 8d9469a699..c3f70cbeec 100644 --- a/apis/elasticsearch/v1beta1/zz_generated.resolvers.go +++ b/apis/elasticsearch/v1beta1/zz_generated.resolvers.go @@ -82,7 +82,7 @@ func (mg *DomainPolicy) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("elasticsearch.aws.upbound.io", "v1beta1", "Domain", "DomainList") + m, l, err = apisresolver.GetManagedResource("elasticsearch.aws.upbound.io", "v1beta2", "Domain", "DomainList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -101,7 +101,7 @@ func (mg *DomainPolicy) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.DomainName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DomainNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("elasticsearch.aws.upbound.io", "v1beta1", "Domain", "DomainList") + m, l, err = apisresolver.GetManagedResource("elasticsearch.aws.upbound.io", "v1beta2", "Domain", "DomainList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/elasticsearch/v1beta2/zz_domain_terraformed.go b/apis/elasticsearch/v1beta2/zz_domain_terraformed.go new file mode 100755 index 0000000000..6b88c301f4 --- /dev/null +++ b/apis/elasticsearch/v1beta2/zz_domain_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Domain +func (mg *Domain) GetTerraformResourceType() string { + return "aws_elasticsearch_domain" +} + +// GetConnectionDetailsMapping for this Domain +func (tr *Domain) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"advanced_security_options[*].master_user_options[*].master_user_password": "advancedSecurityOptions[*].masterUserOptions[*].masterUserPasswordSecretRef"} +} + +// GetObservation of this Domain +func (tr *Domain) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Domain +func (tr *Domain) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Domain +func (tr *Domain) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Domain +func (tr *Domain) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Domain +func (tr *Domain) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Domain +func (tr *Domain) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Domain +func (tr *Domain) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Domain using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Domain) LateInitialize(attrs []byte) (bool, error) { + params := &DomainParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Domain) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/elasticsearch/v1beta2/zz_domain_types.go b/apis/elasticsearch/v1beta2/zz_domain_types.go new file mode 100755 index 0000000000..5ec9951a8e --- /dev/null +++ b/apis/elasticsearch/v1beta2/zz_domain_types.go @@ -0,0 +1,936 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AdvancedSecurityOptionsInitParameters struct { + + // Whether advanced security is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Whether the internal user database is enabled. If not set, defaults to false by the AWS API. + InternalUserDatabaseEnabled *bool `json:"internalUserDatabaseEnabled,omitempty" tf:"internal_user_database_enabled,omitempty"` + + // Configuration block for the main user. Detailed below. + MasterUserOptions *MasterUserOptionsInitParameters `json:"masterUserOptions,omitempty" tf:"master_user_options,omitempty"` +} + +type AdvancedSecurityOptionsObservation struct { + + // Whether advanced security is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Whether the internal user database is enabled. If not set, defaults to false by the AWS API. + InternalUserDatabaseEnabled *bool `json:"internalUserDatabaseEnabled,omitempty" tf:"internal_user_database_enabled,omitempty"` + + // Configuration block for the main user. Detailed below. + MasterUserOptions *MasterUserOptionsObservation `json:"masterUserOptions,omitempty" tf:"master_user_options,omitempty"` +} + +type AdvancedSecurityOptionsParameters struct { + + // Whether advanced security is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Whether the internal user database is enabled. If not set, defaults to false by the AWS API. + // +kubebuilder:validation:Optional + InternalUserDatabaseEnabled *bool `json:"internalUserDatabaseEnabled,omitempty" tf:"internal_user_database_enabled,omitempty"` + + // Configuration block for the main user. Detailed below. + // +kubebuilder:validation:Optional + MasterUserOptions *MasterUserOptionsParameters `json:"masterUserOptions,omitempty" tf:"master_user_options,omitempty"` +} + +type AutoTuneOptionsInitParameters struct { + + // The Auto-Tune desired state for the domain. Valid values: ENABLED or DISABLED. + DesiredState *string `json:"desiredState,omitempty" tf:"desired_state,omitempty"` + + // Configuration block for Auto-Tune maintenance windows. Can be specified multiple times for each maintenance window. Detailed below. + MaintenanceSchedule []MaintenanceScheduleInitParameters `json:"maintenanceSchedule,omitempty" tf:"maintenance_schedule,omitempty"` + + // Whether to roll back to default Auto-Tune settings when disabling Auto-Tune. Valid values: DEFAULT_ROLLBACK or NO_ROLLBACK. + RollbackOnDisable *string `json:"rollbackOnDisable,omitempty" tf:"rollback_on_disable,omitempty"` +} + +type AutoTuneOptionsObservation struct { + + // The Auto-Tune desired state for the domain. Valid values: ENABLED or DISABLED. + DesiredState *string `json:"desiredState,omitempty" tf:"desired_state,omitempty"` + + // Configuration block for Auto-Tune maintenance windows. Can be specified multiple times for each maintenance window. Detailed below. + MaintenanceSchedule []MaintenanceScheduleObservation `json:"maintenanceSchedule,omitempty" tf:"maintenance_schedule,omitempty"` + + // Whether to roll back to default Auto-Tune settings when disabling Auto-Tune. Valid values: DEFAULT_ROLLBACK or NO_ROLLBACK. + RollbackOnDisable *string `json:"rollbackOnDisable,omitempty" tf:"rollback_on_disable,omitempty"` +} + +type AutoTuneOptionsParameters struct { + + // The Auto-Tune desired state for the domain. Valid values: ENABLED or DISABLED. + // +kubebuilder:validation:Optional + DesiredState *string `json:"desiredState" tf:"desired_state,omitempty"` + + // Configuration block for Auto-Tune maintenance windows. Can be specified multiple times for each maintenance window. Detailed below. + // +kubebuilder:validation:Optional + MaintenanceSchedule []MaintenanceScheduleParameters `json:"maintenanceSchedule,omitempty" tf:"maintenance_schedule,omitempty"` + + // Whether to roll back to default Auto-Tune settings when disabling Auto-Tune. Valid values: DEFAULT_ROLLBACK or NO_ROLLBACK. + // +kubebuilder:validation:Optional + RollbackOnDisable *string `json:"rollbackOnDisable,omitempty" tf:"rollback_on_disable,omitempty"` +} + +type ClusterConfigInitParameters struct { + + // Configuration block containing cold storage configuration. Detailed below. + ColdStorageOptions *ColdStorageOptionsInitParameters `json:"coldStorageOptions,omitempty" tf:"cold_storage_options,omitempty"` + + // Number of dedicated main nodes in the cluster. + DedicatedMasterCount *float64 `json:"dedicatedMasterCount,omitempty" tf:"dedicated_master_count,omitempty"` + + // Whether dedicated main nodes are enabled for the cluster. + DedicatedMasterEnabled *bool `json:"dedicatedMasterEnabled,omitempty" tf:"dedicated_master_enabled,omitempty"` + + // Instance type of the dedicated main nodes in the cluster. + DedicatedMasterType *string `json:"dedicatedMasterType,omitempty" tf:"dedicated_master_type,omitempty"` + + // Number of instances in the cluster. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // Instance type of data nodes in the cluster. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // Number of warm nodes in the cluster. Valid values are between 2 and 150. warm_count can be only and must be set when warm_enabled is set to true. + WarmCount *float64 `json:"warmCount,omitempty" tf:"warm_count,omitempty"` + + // Whether to enable warm storage. + WarmEnabled *bool `json:"warmEnabled,omitempty" tf:"warm_enabled,omitempty"` + + // Instance type for the Elasticsearch cluster's warm nodes. Valid values are ultrawarm1.medium.elasticsearch, ultrawarm1.large.elasticsearch and ultrawarm1.xlarge.elasticsearch. warm_type can be only and must be set when warm_enabled is set to true. + WarmType *string `json:"warmType,omitempty" tf:"warm_type,omitempty"` + + // Configuration block containing zone awareness settings. Detailed below. + ZoneAwarenessConfig *ZoneAwarenessConfigInitParameters `json:"zoneAwarenessConfig,omitempty" tf:"zone_awareness_config,omitempty"` + + // Whether zone awareness is enabled, set to true for multi-az deployment. To enable awareness with three Availability Zones, the availability_zone_count within the zone_awareness_config must be set to 3. + ZoneAwarenessEnabled *bool `json:"zoneAwarenessEnabled,omitempty" tf:"zone_awareness_enabled,omitempty"` +} + +type ClusterConfigObservation struct { + + // Configuration block containing cold storage configuration. Detailed below. + ColdStorageOptions *ColdStorageOptionsObservation `json:"coldStorageOptions,omitempty" tf:"cold_storage_options,omitempty"` + + // Number of dedicated main nodes in the cluster. + DedicatedMasterCount *float64 `json:"dedicatedMasterCount,omitempty" tf:"dedicated_master_count,omitempty"` + + // Whether dedicated main nodes are enabled for the cluster. + DedicatedMasterEnabled *bool `json:"dedicatedMasterEnabled,omitempty" tf:"dedicated_master_enabled,omitempty"` + + // Instance type of the dedicated main nodes in the cluster. + DedicatedMasterType *string `json:"dedicatedMasterType,omitempty" tf:"dedicated_master_type,omitempty"` + + // Number of instances in the cluster. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // Instance type of data nodes in the cluster. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // Number of warm nodes in the cluster. Valid values are between 2 and 150. warm_count can be only and must be set when warm_enabled is set to true. + WarmCount *float64 `json:"warmCount,omitempty" tf:"warm_count,omitempty"` + + // Whether to enable warm storage. + WarmEnabled *bool `json:"warmEnabled,omitempty" tf:"warm_enabled,omitempty"` + + // Instance type for the Elasticsearch cluster's warm nodes. Valid values are ultrawarm1.medium.elasticsearch, ultrawarm1.large.elasticsearch and ultrawarm1.xlarge.elasticsearch. warm_type can be only and must be set when warm_enabled is set to true. + WarmType *string `json:"warmType,omitempty" tf:"warm_type,omitempty"` + + // Configuration block containing zone awareness settings. Detailed below. + ZoneAwarenessConfig *ZoneAwarenessConfigObservation `json:"zoneAwarenessConfig,omitempty" tf:"zone_awareness_config,omitempty"` + + // Whether zone awareness is enabled, set to true for multi-az deployment. To enable awareness with three Availability Zones, the availability_zone_count within the zone_awareness_config must be set to 3. + ZoneAwarenessEnabled *bool `json:"zoneAwarenessEnabled,omitempty" tf:"zone_awareness_enabled,omitempty"` +} + +type ClusterConfigParameters struct { + + // Configuration block containing cold storage configuration. Detailed below. + // +kubebuilder:validation:Optional + ColdStorageOptions *ColdStorageOptionsParameters `json:"coldStorageOptions,omitempty" tf:"cold_storage_options,omitempty"` + + // Number of dedicated main nodes in the cluster. + // +kubebuilder:validation:Optional + DedicatedMasterCount *float64 `json:"dedicatedMasterCount,omitempty" tf:"dedicated_master_count,omitempty"` + + // Whether dedicated main nodes are enabled for the cluster. + // +kubebuilder:validation:Optional + DedicatedMasterEnabled *bool `json:"dedicatedMasterEnabled,omitempty" tf:"dedicated_master_enabled,omitempty"` + + // Instance type of the dedicated main nodes in the cluster. + // +kubebuilder:validation:Optional + DedicatedMasterType *string `json:"dedicatedMasterType,omitempty" tf:"dedicated_master_type,omitempty"` + + // Number of instances in the cluster. + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // Instance type of data nodes in the cluster. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // Number of warm nodes in the cluster. Valid values are between 2 and 150. warm_count can be only and must be set when warm_enabled is set to true. + // +kubebuilder:validation:Optional + WarmCount *float64 `json:"warmCount,omitempty" tf:"warm_count,omitempty"` + + // Whether to enable warm storage. + // +kubebuilder:validation:Optional + WarmEnabled *bool `json:"warmEnabled,omitempty" tf:"warm_enabled,omitempty"` + + // Instance type for the Elasticsearch cluster's warm nodes. Valid values are ultrawarm1.medium.elasticsearch, ultrawarm1.large.elasticsearch and ultrawarm1.xlarge.elasticsearch. warm_type can be only and must be set when warm_enabled is set to true. + // +kubebuilder:validation:Optional + WarmType *string `json:"warmType,omitempty" tf:"warm_type,omitempty"` + + // Configuration block containing zone awareness settings. Detailed below. + // +kubebuilder:validation:Optional + ZoneAwarenessConfig *ZoneAwarenessConfigParameters `json:"zoneAwarenessConfig,omitempty" tf:"zone_awareness_config,omitempty"` + + // Whether zone awareness is enabled, set to true for multi-az deployment. To enable awareness with three Availability Zones, the availability_zone_count within the zone_awareness_config must be set to 3. + // +kubebuilder:validation:Optional + ZoneAwarenessEnabled *bool `json:"zoneAwarenessEnabled,omitempty" tf:"zone_awareness_enabled,omitempty"` +} + +type CognitoOptionsInitParameters struct { + + // Whether Amazon Cognito authentication with Kibana is enabled or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // ID of the Cognito Identity Pool to use. + IdentityPoolID *string `json:"identityPoolId,omitempty" tf:"identity_pool_id,omitempty"` + + // ARN of the IAM role that has the AmazonESCognitoAccess policy attached. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // ID of the Cognito User Pool to use. + UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` +} + +type CognitoOptionsObservation struct { + + // Whether Amazon Cognito authentication with Kibana is enabled or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // ID of the Cognito Identity Pool to use. + IdentityPoolID *string `json:"identityPoolId,omitempty" tf:"identity_pool_id,omitempty"` + + // ARN of the IAM role that has the AmazonESCognitoAccess policy attached. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // ID of the Cognito User Pool to use. + UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` +} + +type CognitoOptionsParameters struct { + + // Whether Amazon Cognito authentication with Kibana is enabled or not. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // ID of the Cognito Identity Pool to use. + // +kubebuilder:validation:Optional + IdentityPoolID *string `json:"identityPoolId" tf:"identity_pool_id,omitempty"` + + // ARN of the IAM role that has the AmazonESCognitoAccess policy attached. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // ID of the Cognito User Pool to use. + // +kubebuilder:validation:Optional + UserPoolID *string `json:"userPoolId" tf:"user_pool_id,omitempty"` +} + +type ColdStorageOptionsInitParameters struct { + + // Boolean to enable cold storage for an Elasticsearch domain. Defaults to false. Master and ultrawarm nodes must be enabled for cold storage. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ColdStorageOptionsObservation struct { + + // Boolean to enable cold storage for an Elasticsearch domain. Defaults to false. Master and ultrawarm nodes must be enabled for cold storage. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ColdStorageOptionsParameters struct { + + // Boolean to enable cold storage for an Elasticsearch domain. Defaults to false. Master and ultrawarm nodes must be enabled for cold storage. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type DomainEndpointOptionsInitParameters struct { + + // Fully qualified domain for your custom endpoint. + CustomEndpoint *string `json:"customEndpoint,omitempty" tf:"custom_endpoint,omitempty"` + + // ACM certificate ARN for your custom endpoint. + CustomEndpointCertificateArn *string `json:"customEndpointCertificateArn,omitempty" tf:"custom_endpoint_certificate_arn,omitempty"` + + // Whether to enable custom endpoint for the Elasticsearch domain. + CustomEndpointEnabled *bool `json:"customEndpointEnabled,omitempty" tf:"custom_endpoint_enabled,omitempty"` + + // Whether or not to require HTTPS. Defaults to true. + EnforceHTTPS *bool `json:"enforceHttps,omitempty" tf:"enforce_https,omitempty"` + + // Name of the TLS security policy that needs to be applied to the HTTPS endpoint. Valid values: Policy-Min-TLS-1-0-2019-07 and Policy-Min-TLS-1-2-2019-07. + TLSSecurityPolicy *string `json:"tlsSecurityPolicy,omitempty" tf:"tls_security_policy,omitempty"` +} + +type DomainEndpointOptionsObservation struct { + + // Fully qualified domain for your custom endpoint. + CustomEndpoint *string `json:"customEndpoint,omitempty" tf:"custom_endpoint,omitempty"` + + // ACM certificate ARN for your custom endpoint. + CustomEndpointCertificateArn *string `json:"customEndpointCertificateArn,omitempty" tf:"custom_endpoint_certificate_arn,omitempty"` + + // Whether to enable custom endpoint for the Elasticsearch domain. + CustomEndpointEnabled *bool `json:"customEndpointEnabled,omitempty" tf:"custom_endpoint_enabled,omitempty"` + + // Whether or not to require HTTPS. Defaults to true. + EnforceHTTPS *bool `json:"enforceHttps,omitempty" tf:"enforce_https,omitempty"` + + // Name of the TLS security policy that needs to be applied to the HTTPS endpoint. Valid values: Policy-Min-TLS-1-0-2019-07 and Policy-Min-TLS-1-2-2019-07. + TLSSecurityPolicy *string `json:"tlsSecurityPolicy,omitempty" tf:"tls_security_policy,omitempty"` +} + +type DomainEndpointOptionsParameters struct { + + // Fully qualified domain for your custom endpoint. + // +kubebuilder:validation:Optional + CustomEndpoint *string `json:"customEndpoint,omitempty" tf:"custom_endpoint,omitempty"` + + // ACM certificate ARN for your custom endpoint. + // +kubebuilder:validation:Optional + CustomEndpointCertificateArn *string `json:"customEndpointCertificateArn,omitempty" tf:"custom_endpoint_certificate_arn,omitempty"` + + // Whether to enable custom endpoint for the Elasticsearch domain. + // +kubebuilder:validation:Optional + CustomEndpointEnabled *bool `json:"customEndpointEnabled,omitempty" tf:"custom_endpoint_enabled,omitempty"` + + // Whether or not to require HTTPS. Defaults to true. + // +kubebuilder:validation:Optional + EnforceHTTPS *bool `json:"enforceHttps,omitempty" tf:"enforce_https,omitempty"` + + // Name of the TLS security policy that needs to be applied to the HTTPS endpoint. Valid values: Policy-Min-TLS-1-0-2019-07 and Policy-Min-TLS-1-2-2019-07. + // +kubebuilder:validation:Optional + TLSSecurityPolicy *string `json:"tlsSecurityPolicy,omitempty" tf:"tls_security_policy,omitempty"` +} + +type DomainInitParameters struct { + + // IAM policy document specifying the access policies for the domain. + AccessPolicies *string `json:"accessPolicies,omitempty" tf:"access_policies,omitempty"` + + // Key-value string pairs to specify advanced configuration options. + // +mapType=granular + AdvancedOptions map[string]*string `json:"advancedOptions,omitempty" tf:"advanced_options,omitempty"` + + // Configuration block for fine-grained access control. Detailed below. + AdvancedSecurityOptions *AdvancedSecurityOptionsInitParameters `json:"advancedSecurityOptions,omitempty" tf:"advanced_security_options,omitempty"` + + // Configuration block for the Auto-Tune options of the domain. Detailed below. + AutoTuneOptions *AutoTuneOptionsInitParameters `json:"autoTuneOptions,omitempty" tf:"auto_tune_options,omitempty"` + + // Configuration block for the cluster of the domain. Detailed below. + ClusterConfig *ClusterConfigInitParameters `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` + + // Configuration block for authenticating Kibana with Cognito. Detailed below. + CognitoOptions *CognitoOptionsInitParameters `json:"cognitoOptions,omitempty" tf:"cognito_options,omitempty"` + + // Configuration block for domain endpoint HTTP(S) related options. Detailed below. + DomainEndpointOptions *DomainEndpointOptionsInitParameters `json:"domainEndpointOptions,omitempty" tf:"domain_endpoint_options,omitempty"` + + // Configuration block for EBS related options, may be required based on chosen instance size. Detailed below. + EBSOptions *EBSOptionsInitParameters `json:"ebsOptions,omitempty" tf:"ebs_options,omitempty"` + + // Version of Elasticsearch to deploy. Defaults to 1.5. + ElasticsearchVersion *string `json:"elasticsearchVersion,omitempty" tf:"elasticsearch_version,omitempty"` + + // Configuration block for encrypt at rest options. Only available for certain instance types. Detailed below. + EncryptAtRest *EncryptAtRestInitParameters `json:"encryptAtRest,omitempty" tf:"encrypt_at_rest,omitempty"` + + // Configuration block for publishing slow and application logs to CloudWatch Logs. This block can be declared multiple times, for each log_type, within the same resource. Detailed below. + LogPublishingOptions []LogPublishingOptionsInitParameters `json:"logPublishingOptions,omitempty" tf:"log_publishing_options,omitempty"` + + // Configuration block for node-to-node encryption options. Detailed below. + NodeToNodeEncryption *NodeToNodeEncryptionInitParameters `json:"nodeToNodeEncryption,omitempty" tf:"node_to_node_encryption,omitempty"` + + // Configuration block for snapshot related options. Detailed below. DEPRECATED. For domains running Elasticsearch 5.3 and later, Amazon ES takes hourly automated snapshots, making this setting irrelevant. For domains running earlier versions of Elasticsearch, Amazon ES takes daily automated snapshots. + SnapshotOptions *SnapshotOptionsInitParameters `json:"snapshotOptions,omitempty" tf:"snapshot_options,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for VPC related options. Adding or removing this configuration forces a new resource (documentation). Detailed below. + VPCOptions *VPCOptionsInitParameters `json:"vpcOptions,omitempty" tf:"vpc_options,omitempty"` +} + +type DomainObservation struct { + + // IAM policy document specifying the access policies for the domain. + AccessPolicies *string `json:"accessPolicies,omitempty" tf:"access_policies,omitempty"` + + // Key-value string pairs to specify advanced configuration options. + // +mapType=granular + AdvancedOptions map[string]*string `json:"advancedOptions,omitempty" tf:"advanced_options,omitempty"` + + // Configuration block for fine-grained access control. Detailed below. + AdvancedSecurityOptions *AdvancedSecurityOptionsObservation `json:"advancedSecurityOptions,omitempty" tf:"advanced_security_options,omitempty"` + + // ARN of the domain. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Configuration block for the Auto-Tune options of the domain. Detailed below. + AutoTuneOptions *AutoTuneOptionsObservation `json:"autoTuneOptions,omitempty" tf:"auto_tune_options,omitempty"` + + // Configuration block for the cluster of the domain. Detailed below. + ClusterConfig *ClusterConfigObservation `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` + + // Configuration block for authenticating Kibana with Cognito. Detailed below. + CognitoOptions *CognitoOptionsObservation `json:"cognitoOptions,omitempty" tf:"cognito_options,omitempty"` + + // Configuration block for domain endpoint HTTP(S) related options. Detailed below. + DomainEndpointOptions *DomainEndpointOptionsObservation `json:"domainEndpointOptions,omitempty" tf:"domain_endpoint_options,omitempty"` + + // Unique identifier for the domain. + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` + + // Configuration block for EBS related options, may be required based on chosen instance size. Detailed below. + EBSOptions *EBSOptionsObservation `json:"ebsOptions,omitempty" tf:"ebs_options,omitempty"` + + // Version of Elasticsearch to deploy. Defaults to 1.5. + ElasticsearchVersion *string `json:"elasticsearchVersion,omitempty" tf:"elasticsearch_version,omitempty"` + + // Configuration block for encrypt at rest options. Only available for certain instance types. Detailed below. + EncryptAtRest *EncryptAtRestObservation `json:"encryptAtRest,omitempty" tf:"encrypt_at_rest,omitempty"` + + // Domain-specific endpoint used to submit index, search, and data upload requests. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Domain-specific endpoint for kibana without https scheme. + KibanaEndpoint *string `json:"kibanaEndpoint,omitempty" tf:"kibana_endpoint,omitempty"` + + // Configuration block for publishing slow and application logs to CloudWatch Logs. This block can be declared multiple times, for each log_type, within the same resource. Detailed below. + LogPublishingOptions []LogPublishingOptionsObservation `json:"logPublishingOptions,omitempty" tf:"log_publishing_options,omitempty"` + + // Configuration block for node-to-node encryption options. Detailed below. + NodeToNodeEncryption *NodeToNodeEncryptionObservation `json:"nodeToNodeEncryption,omitempty" tf:"node_to_node_encryption,omitempty"` + + // Configuration block for snapshot related options. Detailed below. DEPRECATED. For domains running Elasticsearch 5.3 and later, Amazon ES takes hourly automated snapshots, making this setting irrelevant. For domains running earlier versions of Elasticsearch, Amazon ES takes daily automated snapshots. + SnapshotOptions *SnapshotOptionsObservation `json:"snapshotOptions,omitempty" tf:"snapshot_options,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Configuration block for VPC related options. Adding or removing this configuration forces a new resource (documentation). Detailed below. + VPCOptions *VPCOptionsObservation `json:"vpcOptions,omitempty" tf:"vpc_options,omitempty"` +} + +type DomainParameters struct { + + // IAM policy document specifying the access policies for the domain. + // +kubebuilder:validation:Optional + AccessPolicies *string `json:"accessPolicies,omitempty" tf:"access_policies,omitempty"` + + // Key-value string pairs to specify advanced configuration options. + // +kubebuilder:validation:Optional + // +mapType=granular + AdvancedOptions map[string]*string `json:"advancedOptions,omitempty" tf:"advanced_options,omitempty"` + + // Configuration block for fine-grained access control. Detailed below. + // +kubebuilder:validation:Optional + AdvancedSecurityOptions *AdvancedSecurityOptionsParameters `json:"advancedSecurityOptions,omitempty" tf:"advanced_security_options,omitempty"` + + // Configuration block for the Auto-Tune options of the domain. Detailed below. + // +kubebuilder:validation:Optional + AutoTuneOptions *AutoTuneOptionsParameters `json:"autoTuneOptions,omitempty" tf:"auto_tune_options,omitempty"` + + // Configuration block for the cluster of the domain. Detailed below. + // +kubebuilder:validation:Optional + ClusterConfig *ClusterConfigParameters `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` + + // Configuration block for authenticating Kibana with Cognito. Detailed below. + // +kubebuilder:validation:Optional + CognitoOptions *CognitoOptionsParameters `json:"cognitoOptions,omitempty" tf:"cognito_options,omitempty"` + + // Configuration block for domain endpoint HTTP(S) related options. Detailed below. + // +kubebuilder:validation:Optional + DomainEndpointOptions *DomainEndpointOptionsParameters `json:"domainEndpointOptions,omitempty" tf:"domain_endpoint_options,omitempty"` + + // Configuration block for EBS related options, may be required based on chosen instance size. Detailed below. + // +kubebuilder:validation:Optional + EBSOptions *EBSOptionsParameters `json:"ebsOptions,omitempty" tf:"ebs_options,omitempty"` + + // Version of Elasticsearch to deploy. Defaults to 1.5. + // +kubebuilder:validation:Optional + ElasticsearchVersion *string `json:"elasticsearchVersion,omitempty" tf:"elasticsearch_version,omitempty"` + + // Configuration block for encrypt at rest options. Only available for certain instance types. Detailed below. + // +kubebuilder:validation:Optional + EncryptAtRest *EncryptAtRestParameters `json:"encryptAtRest,omitempty" tf:"encrypt_at_rest,omitempty"` + + // Configuration block for publishing slow and application logs to CloudWatch Logs. This block can be declared multiple times, for each log_type, within the same resource. Detailed below. + // +kubebuilder:validation:Optional + LogPublishingOptions []LogPublishingOptionsParameters `json:"logPublishingOptions,omitempty" tf:"log_publishing_options,omitempty"` + + // Configuration block for node-to-node encryption options. Detailed below. + // +kubebuilder:validation:Optional + NodeToNodeEncryption *NodeToNodeEncryptionParameters `json:"nodeToNodeEncryption,omitempty" tf:"node_to_node_encryption,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration block for snapshot related options. Detailed below. DEPRECATED. For domains running Elasticsearch 5.3 and later, Amazon ES takes hourly automated snapshots, making this setting irrelevant. For domains running earlier versions of Elasticsearch, Amazon ES takes daily automated snapshots. + // +kubebuilder:validation:Optional + SnapshotOptions *SnapshotOptionsParameters `json:"snapshotOptions,omitempty" tf:"snapshot_options,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for VPC related options. Adding or removing this configuration forces a new resource (documentation). Detailed below. + // +kubebuilder:validation:Optional + VPCOptions *VPCOptionsParameters `json:"vpcOptions,omitempty" tf:"vpc_options,omitempty"` +} + +type DurationInitParameters struct { + + // The unit of time specifying the duration of an Auto-Tune maintenance window. Valid values: HOURS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // An integer specifying the value of the duration of an Auto-Tune maintenance window. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type DurationObservation struct { + + // The unit of time specifying the duration of an Auto-Tune maintenance window. Valid values: HOURS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // An integer specifying the value of the duration of an Auto-Tune maintenance window. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type DurationParameters struct { + + // The unit of time specifying the duration of an Auto-Tune maintenance window. Valid values: HOURS. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // An integer specifying the value of the duration of an Auto-Tune maintenance window. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type EBSOptionsInitParameters struct { + + // Whether EBS volumes are attached to data nodes in the domain. + EBSEnabled *bool `json:"ebsEnabled,omitempty" tf:"ebs_enabled,omitempty"` + + // Baseline input/output (I/O) performance of EBS volumes attached to data nodes. Applicable only for the GP3 and Provisioned IOPS EBS volume types. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Specifies the throughput (in MiB/s) of the EBS volumes attached to data nodes. Applicable only for the gp3 volume type. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // Size of EBS volumes attached to data nodes (in GiB). + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of EBS volumes attached to data nodes. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type EBSOptionsObservation struct { + + // Whether EBS volumes are attached to data nodes in the domain. + EBSEnabled *bool `json:"ebsEnabled,omitempty" tf:"ebs_enabled,omitempty"` + + // Baseline input/output (I/O) performance of EBS volumes attached to data nodes. Applicable only for the GP3 and Provisioned IOPS EBS volume types. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Specifies the throughput (in MiB/s) of the EBS volumes attached to data nodes. Applicable only for the gp3 volume type. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // Size of EBS volumes attached to data nodes (in GiB). + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of EBS volumes attached to data nodes. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type EBSOptionsParameters struct { + + // Whether EBS volumes are attached to data nodes in the domain. + // +kubebuilder:validation:Optional + EBSEnabled *bool `json:"ebsEnabled" tf:"ebs_enabled,omitempty"` + + // Baseline input/output (I/O) performance of EBS volumes attached to data nodes. Applicable only for the GP3 and Provisioned IOPS EBS volume types. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Specifies the throughput (in MiB/s) of the EBS volumes attached to data nodes. Applicable only for the gp3 volume type. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // Size of EBS volumes attached to data nodes (in GiB). + // +kubebuilder:validation:Optional + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of EBS volumes attached to data nodes. + // +kubebuilder:validation:Optional + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type EncryptAtRestInitParameters struct { + + // Whether to enable encryption at rest. If the encrypt_at_rest block is not provided then this defaults to false. Enabling encryption on new domains requires elasticsearch_version 5.1 or greater. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // KMS key ARN to encrypt the Elasticsearch domain with. If not specified then it defaults to using the aws/es service KMS key. Note that KMS will accept a KMS key ID but will return the key ARN. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type EncryptAtRestObservation struct { + + // Whether to enable encryption at rest. If the encrypt_at_rest block is not provided then this defaults to false. Enabling encryption on new domains requires elasticsearch_version 5.1 or greater. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // KMS key ARN to encrypt the Elasticsearch domain with. If not specified then it defaults to using the aws/es service KMS key. Note that KMS will accept a KMS key ID but will return the key ARN. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type EncryptAtRestParameters struct { + + // Whether to enable encryption at rest. If the encrypt_at_rest block is not provided then this defaults to false. Enabling encryption on new domains requires elasticsearch_version 5.1 or greater. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // KMS key ARN to encrypt the Elasticsearch domain with. If not specified then it defaults to using the aws/es service KMS key. Note that KMS will accept a KMS key ID but will return the key ARN. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type LogPublishingOptionsInitParameters struct { + + // ARN of the Cloudwatch log group to which log needs to be published. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Group + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + CloudwatchLogGroupArn *string `json:"cloudwatchLogGroupArn,omitempty" tf:"cloudwatch_log_group_arn,omitempty"` + + // Reference to a Group in cloudwatchlogs to populate cloudwatchLogGroupArn. + // +kubebuilder:validation:Optional + CloudwatchLogGroupArnRef *v1.Reference `json:"cloudwatchLogGroupArnRef,omitempty" tf:"-"` + + // Selector for a Group in cloudwatchlogs to populate cloudwatchLogGroupArn. + // +kubebuilder:validation:Optional + CloudwatchLogGroupArnSelector *v1.Selector `json:"cloudwatchLogGroupArnSelector,omitempty" tf:"-"` + + // Whether given log publishing option is enabled or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Type of Elasticsearch log. Valid values: INDEX_SLOW_LOGS, SEARCH_SLOW_LOGS, ES_APPLICATION_LOGS, AUDIT_LOGS. + LogType *string `json:"logType,omitempty" tf:"log_type,omitempty"` +} + +type LogPublishingOptionsObservation struct { + + // ARN of the Cloudwatch log group to which log needs to be published. + CloudwatchLogGroupArn *string `json:"cloudwatchLogGroupArn,omitempty" tf:"cloudwatch_log_group_arn,omitempty"` + + // Whether given log publishing option is enabled or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Type of Elasticsearch log. Valid values: INDEX_SLOW_LOGS, SEARCH_SLOW_LOGS, ES_APPLICATION_LOGS, AUDIT_LOGS. + LogType *string `json:"logType,omitempty" tf:"log_type,omitempty"` +} + +type LogPublishingOptionsParameters struct { + + // ARN of the Cloudwatch log group to which log needs to be published. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Group + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + CloudwatchLogGroupArn *string `json:"cloudwatchLogGroupArn,omitempty" tf:"cloudwatch_log_group_arn,omitempty"` + + // Reference to a Group in cloudwatchlogs to populate cloudwatchLogGroupArn. + // +kubebuilder:validation:Optional + CloudwatchLogGroupArnRef *v1.Reference `json:"cloudwatchLogGroupArnRef,omitempty" tf:"-"` + + // Selector for a Group in cloudwatchlogs to populate cloudwatchLogGroupArn. + // +kubebuilder:validation:Optional + CloudwatchLogGroupArnSelector *v1.Selector `json:"cloudwatchLogGroupArnSelector,omitempty" tf:"-"` + + // Whether given log publishing option is enabled or not. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Type of Elasticsearch log. Valid values: INDEX_SLOW_LOGS, SEARCH_SLOW_LOGS, ES_APPLICATION_LOGS, AUDIT_LOGS. + // +kubebuilder:validation:Optional + LogType *string `json:"logType" tf:"log_type,omitempty"` +} + +type MaintenanceScheduleInitParameters struct { + + // A cron expression specifying the recurrence pattern for an Auto-Tune maintenance schedule. + CronExpressionForRecurrence *string `json:"cronExpressionForRecurrence,omitempty" tf:"cron_expression_for_recurrence,omitempty"` + + // Configuration block for the duration of the Auto-Tune maintenance window. Detailed below. + Duration *DurationInitParameters `json:"duration,omitempty" tf:"duration,omitempty"` + + // Date and time at which to start the Auto-Tune maintenance schedule in RFC3339 format. + StartAt *string `json:"startAt,omitempty" tf:"start_at,omitempty"` +} + +type MaintenanceScheduleObservation struct { + + // A cron expression specifying the recurrence pattern for an Auto-Tune maintenance schedule. + CronExpressionForRecurrence *string `json:"cronExpressionForRecurrence,omitempty" tf:"cron_expression_for_recurrence,omitempty"` + + // Configuration block for the duration of the Auto-Tune maintenance window. Detailed below. + Duration *DurationObservation `json:"duration,omitempty" tf:"duration,omitempty"` + + // Date and time at which to start the Auto-Tune maintenance schedule in RFC3339 format. + StartAt *string `json:"startAt,omitempty" tf:"start_at,omitempty"` +} + +type MaintenanceScheduleParameters struct { + + // A cron expression specifying the recurrence pattern for an Auto-Tune maintenance schedule. + // +kubebuilder:validation:Optional + CronExpressionForRecurrence *string `json:"cronExpressionForRecurrence" tf:"cron_expression_for_recurrence,omitempty"` + + // Configuration block for the duration of the Auto-Tune maintenance window. Detailed below. + // +kubebuilder:validation:Optional + Duration *DurationParameters `json:"duration" tf:"duration,omitempty"` + + // Date and time at which to start the Auto-Tune maintenance schedule in RFC3339 format. + // +kubebuilder:validation:Optional + StartAt *string `json:"startAt" tf:"start_at,omitempty"` +} + +type MasterUserOptionsInitParameters struct { + + // ARN for the main user. Only specify if internal_user_database_enabled is not set or set to false. + MasterUserArn *string `json:"masterUserArn,omitempty" tf:"master_user_arn,omitempty"` + + // Main user's username, which is stored in the Amazon Elasticsearch Service domain's internal database. Only specify if internal_user_database_enabled is set to true. + MasterUserName *string `json:"masterUserName,omitempty" tf:"master_user_name,omitempty"` + + // Main user's password, which is stored in the Amazon Elasticsearch Service domain's internal database. Only specify if internal_user_database_enabled is set to true. + MasterUserPasswordSecretRef *v1.SecretKeySelector `json:"masterUserPasswordSecretRef,omitempty" tf:"-"` +} + +type MasterUserOptionsObservation struct { + + // ARN for the main user. Only specify if internal_user_database_enabled is not set or set to false. + MasterUserArn *string `json:"masterUserArn,omitempty" tf:"master_user_arn,omitempty"` + + // Main user's username, which is stored in the Amazon Elasticsearch Service domain's internal database. Only specify if internal_user_database_enabled is set to true. + MasterUserName *string `json:"masterUserName,omitempty" tf:"master_user_name,omitempty"` +} + +type MasterUserOptionsParameters struct { + + // ARN for the main user. Only specify if internal_user_database_enabled is not set or set to false. + // +kubebuilder:validation:Optional + MasterUserArn *string `json:"masterUserArn,omitempty" tf:"master_user_arn,omitempty"` + + // Main user's username, which is stored in the Amazon Elasticsearch Service domain's internal database. Only specify if internal_user_database_enabled is set to true. + // +kubebuilder:validation:Optional + MasterUserName *string `json:"masterUserName,omitempty" tf:"master_user_name,omitempty"` + + // Main user's password, which is stored in the Amazon Elasticsearch Service domain's internal database. Only specify if internal_user_database_enabled is set to true. + // +kubebuilder:validation:Optional + MasterUserPasswordSecretRef *v1.SecretKeySelector `json:"masterUserPasswordSecretRef,omitempty" tf:"-"` +} + +type NodeToNodeEncryptionInitParameters struct { + + // Whether to enable node-to-node encryption. If the node_to_node_encryption block is not provided then this defaults to false. Enabling node-to-node encryption of a new domain requires an elasticsearch_version of 6.0 or greater. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type NodeToNodeEncryptionObservation struct { + + // Whether to enable node-to-node encryption. If the node_to_node_encryption block is not provided then this defaults to false. Enabling node-to-node encryption of a new domain requires an elasticsearch_version of 6.0 or greater. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type NodeToNodeEncryptionParameters struct { + + // Whether to enable node-to-node encryption. If the node_to_node_encryption block is not provided then this defaults to false. Enabling node-to-node encryption of a new domain requires an elasticsearch_version of 6.0 or greater. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + +type SnapshotOptionsInitParameters struct { + + // Hour during which the service takes an automated daily snapshot of the indices in the domain. + AutomatedSnapshotStartHour *float64 `json:"automatedSnapshotStartHour,omitempty" tf:"automated_snapshot_start_hour,omitempty"` +} + +type SnapshotOptionsObservation struct { + + // Hour during which the service takes an automated daily snapshot of the indices in the domain. + AutomatedSnapshotStartHour *float64 `json:"automatedSnapshotStartHour,omitempty" tf:"automated_snapshot_start_hour,omitempty"` +} + +type SnapshotOptionsParameters struct { + + // Hour during which the service takes an automated daily snapshot of the indices in the domain. + // +kubebuilder:validation:Optional + AutomatedSnapshotStartHour *float64 `json:"automatedSnapshotStartHour" tf:"automated_snapshot_start_hour,omitempty"` +} + +type VPCOptionsInitParameters struct { + + // List of VPC Security Group IDs to be applied to the Elasticsearch domain endpoints. If omitted, the default Security Group for the VPC will be used. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // List of VPC Subnet IDs for the Elasticsearch domain endpoints to be created in. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type VPCOptionsObservation struct { + + // If the domain was created inside a VPC, the names of the availability zones the configured subnet_ids were created inside. + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // List of VPC Security Group IDs to be applied to the Elasticsearch domain endpoints. If omitted, the default Security Group for the VPC will be used. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // List of VPC Subnet IDs for the Elasticsearch domain endpoints to be created in. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // If the domain was created inside a VPC, the ID of the VPC. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type VPCOptionsParameters struct { + + // List of VPC Security Group IDs to be applied to the Elasticsearch domain endpoints. If omitted, the default Security Group for the VPC will be used. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // List of VPC Subnet IDs for the Elasticsearch domain endpoints to be created in. + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type ZoneAwarenessConfigInitParameters struct { + + // Number of Availability Zones for the domain to use with zone_awareness_enabled. Defaults to 2. Valid values: 2 or 3. + AvailabilityZoneCount *float64 `json:"availabilityZoneCount,omitempty" tf:"availability_zone_count,omitempty"` +} + +type ZoneAwarenessConfigObservation struct { + + // Number of Availability Zones for the domain to use with zone_awareness_enabled. Defaults to 2. Valid values: 2 or 3. + AvailabilityZoneCount *float64 `json:"availabilityZoneCount,omitempty" tf:"availability_zone_count,omitempty"` +} + +type ZoneAwarenessConfigParameters struct { + + // Number of Availability Zones for the domain to use with zone_awareness_enabled. Defaults to 2. Valid values: 2 or 3. + // +kubebuilder:validation:Optional + AvailabilityZoneCount *float64 `json:"availabilityZoneCount,omitempty" tf:"availability_zone_count,omitempty"` +} + +// DomainSpec defines the desired state of Domain +type DomainSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DomainParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DomainInitParameters `json:"initProvider,omitempty"` +} + +// DomainStatus defines the observed state of Domain. +type DomainStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DomainObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Domain is the Schema for the Domains API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Domain struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DomainSpec `json:"spec"` + Status DomainStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DomainList contains a list of Domains +type DomainList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Domain `json:"items"` +} + +// Repository type metadata. +var ( + Domain_Kind = "Domain" + Domain_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Domain_Kind}.String() + Domain_KindAPIVersion = Domain_Kind + "." + CRDGroupVersion.String() + Domain_GroupVersionKind = CRDGroupVersion.WithKind(Domain_Kind) +) + +func init() { + SchemeBuilder.Register(&Domain{}, &DomainList{}) +} diff --git a/apis/elasticsearch/v1beta2/zz_domainsamloptions_terraformed.go b/apis/elasticsearch/v1beta2/zz_domainsamloptions_terraformed.go new file mode 100755 index 0000000000..6bf7eed224 --- /dev/null +++ b/apis/elasticsearch/v1beta2/zz_domainsamloptions_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DomainSAMLOptions +func (mg *DomainSAMLOptions) GetTerraformResourceType() string { + return "aws_elasticsearch_domain_saml_options" +} + +// GetConnectionDetailsMapping for this DomainSAMLOptions +func (tr *DomainSAMLOptions) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"saml_options[*].master_user_name": "samlOptions[*].masterUserNameSecretRef"} +} + +// GetObservation of this DomainSAMLOptions +func (tr *DomainSAMLOptions) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DomainSAMLOptions +func (tr *DomainSAMLOptions) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DomainSAMLOptions +func (tr *DomainSAMLOptions) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DomainSAMLOptions +func (tr *DomainSAMLOptions) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DomainSAMLOptions +func (tr *DomainSAMLOptions) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DomainSAMLOptions +func (tr *DomainSAMLOptions) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DomainSAMLOptions +func (tr *DomainSAMLOptions) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DomainSAMLOptions using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DomainSAMLOptions) LateInitialize(attrs []byte) (bool, error) { + params := &DomainSAMLOptionsParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DomainSAMLOptions) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/elasticsearch/v1beta2/zz_domainsamloptions_types.go b/apis/elasticsearch/v1beta2/zz_domainsamloptions_types.go new file mode 100755 index 0000000000..a0049716a0 --- /dev/null +++ b/apis/elasticsearch/v1beta2/zz_domainsamloptions_types.go @@ -0,0 +1,206 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DomainSAMLOptionsInitParameters struct { + + // The SAML authentication options for an AWS Elasticsearch Domain. + SAMLOptions *SAMLOptionsInitParameters `json:"samlOptions,omitempty" tf:"saml_options,omitempty"` +} + +type DomainSAMLOptionsObservation struct { + + // The name of the domain the SAML options are associated with. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The SAML authentication options for an AWS Elasticsearch Domain. + SAMLOptions *SAMLOptionsObservation `json:"samlOptions,omitempty" tf:"saml_options,omitempty"` +} + +type DomainSAMLOptionsParameters struct { + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The SAML authentication options for an AWS Elasticsearch Domain. + // +kubebuilder:validation:Optional + SAMLOptions *SAMLOptionsParameters `json:"samlOptions,omitempty" tf:"saml_options,omitempty"` +} + +type IdpInitParameters struct { + + // The unique Entity ID of the application in SAML Identity Provider. + EntityID *string `json:"entityId,omitempty" tf:"entity_id,omitempty"` + + // The Metadata of the SAML application in xml format. + MetadataContent *string `json:"metadataContent,omitempty" tf:"metadata_content,omitempty"` +} + +type IdpObservation struct { + + // The unique Entity ID of the application in SAML Identity Provider. + EntityID *string `json:"entityId,omitempty" tf:"entity_id,omitempty"` + + // The Metadata of the SAML application in xml format. + MetadataContent *string `json:"metadataContent,omitempty" tf:"metadata_content,omitempty"` +} + +type IdpParameters struct { + + // The unique Entity ID of the application in SAML Identity Provider. + // +kubebuilder:validation:Optional + EntityID *string `json:"entityId" tf:"entity_id,omitempty"` + + // The Metadata of the SAML application in xml format. + // +kubebuilder:validation:Optional + MetadataContent *string `json:"metadataContent" tf:"metadata_content,omitempty"` +} + +type SAMLOptionsInitParameters struct { + + // Whether SAML authentication is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Information from your identity provider. + Idp *IdpInitParameters `json:"idp,omitempty" tf:"idp,omitempty"` + + // This backend role from the SAML IdP receives full permissions to the cluster, equivalent to a new master user. + MasterBackendRole *string `json:"masterBackendRole,omitempty" tf:"master_backend_role,omitempty"` + + // This username from the SAML IdP receives full permissions to the cluster, equivalent to a new master user. + MasterUserNameSecretRef *v1.SecretKeySelector `json:"masterUserNameSecretRef,omitempty" tf:"-"` + + // Element of the SAML assertion to use for backend roles. Default is roles. + RolesKey *string `json:"rolesKey,omitempty" tf:"roles_key,omitempty"` + + // Duration of a session in minutes after a user logs in. Default is 60. Maximum value is 1,440. + SessionTimeoutMinutes *float64 `json:"sessionTimeoutMinutes,omitempty" tf:"session_timeout_minutes,omitempty"` + + // Custom SAML attribute to use for user names. Default is an empty string - "". This will cause Elasticsearch to use the NameID element of the Subject, which is the default location for name identifiers in the SAML specification. + SubjectKey *string `json:"subjectKey,omitempty" tf:"subject_key,omitempty"` +} + +type SAMLOptionsObservation struct { + + // Whether SAML authentication is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Information from your identity provider. + Idp *IdpObservation `json:"idp,omitempty" tf:"idp,omitempty"` + + // This backend role from the SAML IdP receives full permissions to the cluster, equivalent to a new master user. + MasterBackendRole *string `json:"masterBackendRole,omitempty" tf:"master_backend_role,omitempty"` + + // Element of the SAML assertion to use for backend roles. Default is roles. + RolesKey *string `json:"rolesKey,omitempty" tf:"roles_key,omitempty"` + + // Duration of a session in minutes after a user logs in. Default is 60. Maximum value is 1,440. + SessionTimeoutMinutes *float64 `json:"sessionTimeoutMinutes,omitempty" tf:"session_timeout_minutes,omitempty"` + + // Custom SAML attribute to use for user names. Default is an empty string - "". This will cause Elasticsearch to use the NameID element of the Subject, which is the default location for name identifiers in the SAML specification. + SubjectKey *string `json:"subjectKey,omitempty" tf:"subject_key,omitempty"` +} + +type SAMLOptionsParameters struct { + + // Whether SAML authentication is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Information from your identity provider. + // +kubebuilder:validation:Optional + Idp *IdpParameters `json:"idp,omitempty" tf:"idp,omitempty"` + + // This backend role from the SAML IdP receives full permissions to the cluster, equivalent to a new master user. + // +kubebuilder:validation:Optional + MasterBackendRole *string `json:"masterBackendRole,omitempty" tf:"master_backend_role,omitempty"` + + // This username from the SAML IdP receives full permissions to the cluster, equivalent to a new master user. + // +kubebuilder:validation:Optional + MasterUserNameSecretRef *v1.SecretKeySelector `json:"masterUserNameSecretRef,omitempty" tf:"-"` + + // Element of the SAML assertion to use for backend roles. Default is roles. + // +kubebuilder:validation:Optional + RolesKey *string `json:"rolesKey,omitempty" tf:"roles_key,omitempty"` + + // Duration of a session in minutes after a user logs in. Default is 60. Maximum value is 1,440. + // +kubebuilder:validation:Optional + SessionTimeoutMinutes *float64 `json:"sessionTimeoutMinutes,omitempty" tf:"session_timeout_minutes,omitempty"` + + // Custom SAML attribute to use for user names. Default is an empty string - "". This will cause Elasticsearch to use the NameID element of the Subject, which is the default location for name identifiers in the SAML specification. + // +kubebuilder:validation:Optional + SubjectKey *string `json:"subjectKey,omitempty" tf:"subject_key,omitempty"` +} + +// DomainSAMLOptionsSpec defines the desired state of DomainSAMLOptions +type DomainSAMLOptionsSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DomainSAMLOptionsParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DomainSAMLOptionsInitParameters `json:"initProvider,omitempty"` +} + +// DomainSAMLOptionsStatus defines the observed state of DomainSAMLOptions. +type DomainSAMLOptionsStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DomainSAMLOptionsObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DomainSAMLOptions is the Schema for the DomainSAMLOptionss API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type DomainSAMLOptions struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DomainSAMLOptionsSpec `json:"spec"` + Status DomainSAMLOptionsStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DomainSAMLOptionsList contains a list of DomainSAMLOptionss +type DomainSAMLOptionsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DomainSAMLOptions `json:"items"` +} + +// Repository type metadata. +var ( + DomainSAMLOptions_Kind = "DomainSAMLOptions" + DomainSAMLOptions_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DomainSAMLOptions_Kind}.String() + DomainSAMLOptions_KindAPIVersion = DomainSAMLOptions_Kind + "." + CRDGroupVersion.String() + DomainSAMLOptions_GroupVersionKind = CRDGroupVersion.WithKind(DomainSAMLOptions_Kind) +) + +func init() { + SchemeBuilder.Register(&DomainSAMLOptions{}, &DomainSAMLOptionsList{}) +} diff --git a/apis/elasticsearch/v1beta2/zz_generated.conversion_hubs.go b/apis/elasticsearch/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..7f35a0518a --- /dev/null +++ b/apis/elasticsearch/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Domain) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DomainSAMLOptions) Hub() {} diff --git a/apis/elasticsearch/v1beta2/zz_generated.deepcopy.go b/apis/elasticsearch/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..649af51ea1 --- /dev/null +++ b/apis/elasticsearch/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2423 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedSecurityOptionsInitParameters) DeepCopyInto(out *AdvancedSecurityOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.InternalUserDatabaseEnabled != nil { + in, out := &in.InternalUserDatabaseEnabled, &out.InternalUserDatabaseEnabled + *out = new(bool) + **out = **in + } + if in.MasterUserOptions != nil { + in, out := &in.MasterUserOptions, &out.MasterUserOptions + *out = new(MasterUserOptionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedSecurityOptionsInitParameters. +func (in *AdvancedSecurityOptionsInitParameters) DeepCopy() *AdvancedSecurityOptionsInitParameters { + if in == nil { + return nil + } + out := new(AdvancedSecurityOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedSecurityOptionsObservation) DeepCopyInto(out *AdvancedSecurityOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.InternalUserDatabaseEnabled != nil { + in, out := &in.InternalUserDatabaseEnabled, &out.InternalUserDatabaseEnabled + *out = new(bool) + **out = **in + } + if in.MasterUserOptions != nil { + in, out := &in.MasterUserOptions, &out.MasterUserOptions + *out = new(MasterUserOptionsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedSecurityOptionsObservation. +func (in *AdvancedSecurityOptionsObservation) DeepCopy() *AdvancedSecurityOptionsObservation { + if in == nil { + return nil + } + out := new(AdvancedSecurityOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedSecurityOptionsParameters) DeepCopyInto(out *AdvancedSecurityOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.InternalUserDatabaseEnabled != nil { + in, out := &in.InternalUserDatabaseEnabled, &out.InternalUserDatabaseEnabled + *out = new(bool) + **out = **in + } + if in.MasterUserOptions != nil { + in, out := &in.MasterUserOptions, &out.MasterUserOptions + *out = new(MasterUserOptionsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedSecurityOptionsParameters. +func (in *AdvancedSecurityOptionsParameters) DeepCopy() *AdvancedSecurityOptionsParameters { + if in == nil { + return nil + } + out := new(AdvancedSecurityOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoTuneOptionsInitParameters) DeepCopyInto(out *AutoTuneOptionsInitParameters) { + *out = *in + if in.DesiredState != nil { + in, out := &in.DesiredState, &out.DesiredState + *out = new(string) + **out = **in + } + if in.MaintenanceSchedule != nil { + in, out := &in.MaintenanceSchedule, &out.MaintenanceSchedule + *out = make([]MaintenanceScheduleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RollbackOnDisable != nil { + in, out := &in.RollbackOnDisable, &out.RollbackOnDisable + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoTuneOptionsInitParameters. +func (in *AutoTuneOptionsInitParameters) DeepCopy() *AutoTuneOptionsInitParameters { + if in == nil { + return nil + } + out := new(AutoTuneOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoTuneOptionsObservation) DeepCopyInto(out *AutoTuneOptionsObservation) { + *out = *in + if in.DesiredState != nil { + in, out := &in.DesiredState, &out.DesiredState + *out = new(string) + **out = **in + } + if in.MaintenanceSchedule != nil { + in, out := &in.MaintenanceSchedule, &out.MaintenanceSchedule + *out = make([]MaintenanceScheduleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RollbackOnDisable != nil { + in, out := &in.RollbackOnDisable, &out.RollbackOnDisable + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoTuneOptionsObservation. +func (in *AutoTuneOptionsObservation) DeepCopy() *AutoTuneOptionsObservation { + if in == nil { + return nil + } + out := new(AutoTuneOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoTuneOptionsParameters) DeepCopyInto(out *AutoTuneOptionsParameters) { + *out = *in + if in.DesiredState != nil { + in, out := &in.DesiredState, &out.DesiredState + *out = new(string) + **out = **in + } + if in.MaintenanceSchedule != nil { + in, out := &in.MaintenanceSchedule, &out.MaintenanceSchedule + *out = make([]MaintenanceScheduleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RollbackOnDisable != nil { + in, out := &in.RollbackOnDisable, &out.RollbackOnDisable + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoTuneOptionsParameters. +func (in *AutoTuneOptionsParameters) DeepCopy() *AutoTuneOptionsParameters { + if in == nil { + return nil + } + out := new(AutoTuneOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigInitParameters) DeepCopyInto(out *ClusterConfigInitParameters) { + *out = *in + if in.ColdStorageOptions != nil { + in, out := &in.ColdStorageOptions, &out.ColdStorageOptions + *out = new(ColdStorageOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DedicatedMasterCount != nil { + in, out := &in.DedicatedMasterCount, &out.DedicatedMasterCount + *out = new(float64) + **out = **in + } + if in.DedicatedMasterEnabled != nil { + in, out := &in.DedicatedMasterEnabled, &out.DedicatedMasterEnabled + *out = new(bool) + **out = **in + } + if in.DedicatedMasterType != nil { + in, out := &in.DedicatedMasterType, &out.DedicatedMasterType + *out = new(string) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.WarmCount != nil { + in, out := &in.WarmCount, &out.WarmCount + *out = new(float64) + **out = **in + } + if in.WarmEnabled != nil { + in, out := &in.WarmEnabled, &out.WarmEnabled + *out = new(bool) + **out = **in + } + if in.WarmType != nil { + in, out := &in.WarmType, &out.WarmType + *out = new(string) + **out = **in + } + if in.ZoneAwarenessConfig != nil { + in, out := &in.ZoneAwarenessConfig, &out.ZoneAwarenessConfig + *out = new(ZoneAwarenessConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ZoneAwarenessEnabled != nil { + in, out := &in.ZoneAwarenessEnabled, &out.ZoneAwarenessEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigInitParameters. +func (in *ClusterConfigInitParameters) DeepCopy() *ClusterConfigInitParameters { + if in == nil { + return nil + } + out := new(ClusterConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigObservation) DeepCopyInto(out *ClusterConfigObservation) { + *out = *in + if in.ColdStorageOptions != nil { + in, out := &in.ColdStorageOptions, &out.ColdStorageOptions + *out = new(ColdStorageOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.DedicatedMasterCount != nil { + in, out := &in.DedicatedMasterCount, &out.DedicatedMasterCount + *out = new(float64) + **out = **in + } + if in.DedicatedMasterEnabled != nil { + in, out := &in.DedicatedMasterEnabled, &out.DedicatedMasterEnabled + *out = new(bool) + **out = **in + } + if in.DedicatedMasterType != nil { + in, out := &in.DedicatedMasterType, &out.DedicatedMasterType + *out = new(string) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.WarmCount != nil { + in, out := &in.WarmCount, &out.WarmCount + *out = new(float64) + **out = **in + } + if in.WarmEnabled != nil { + in, out := &in.WarmEnabled, &out.WarmEnabled + *out = new(bool) + **out = **in + } + if in.WarmType != nil { + in, out := &in.WarmType, &out.WarmType + *out = new(string) + **out = **in + } + if in.ZoneAwarenessConfig != nil { + in, out := &in.ZoneAwarenessConfig, &out.ZoneAwarenessConfig + *out = new(ZoneAwarenessConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ZoneAwarenessEnabled != nil { + in, out := &in.ZoneAwarenessEnabled, &out.ZoneAwarenessEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigObservation. +func (in *ClusterConfigObservation) DeepCopy() *ClusterConfigObservation { + if in == nil { + return nil + } + out := new(ClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigParameters) DeepCopyInto(out *ClusterConfigParameters) { + *out = *in + if in.ColdStorageOptions != nil { + in, out := &in.ColdStorageOptions, &out.ColdStorageOptions + *out = new(ColdStorageOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.DedicatedMasterCount != nil { + in, out := &in.DedicatedMasterCount, &out.DedicatedMasterCount + *out = new(float64) + **out = **in + } + if in.DedicatedMasterEnabled != nil { + in, out := &in.DedicatedMasterEnabled, &out.DedicatedMasterEnabled + *out = new(bool) + **out = **in + } + if in.DedicatedMasterType != nil { + in, out := &in.DedicatedMasterType, &out.DedicatedMasterType + *out = new(string) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.WarmCount != nil { + in, out := &in.WarmCount, &out.WarmCount + *out = new(float64) + **out = **in + } + if in.WarmEnabled != nil { + in, out := &in.WarmEnabled, &out.WarmEnabled + *out = new(bool) + **out = **in + } + if in.WarmType != nil { + in, out := &in.WarmType, &out.WarmType + *out = new(string) + **out = **in + } + if in.ZoneAwarenessConfig != nil { + in, out := &in.ZoneAwarenessConfig, &out.ZoneAwarenessConfig + *out = new(ZoneAwarenessConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.ZoneAwarenessEnabled != nil { + in, out := &in.ZoneAwarenessEnabled, &out.ZoneAwarenessEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigParameters. +func (in *ClusterConfigParameters) DeepCopy() *ClusterConfigParameters { + if in == nil { + return nil + } + out := new(ClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CognitoOptionsInitParameters) DeepCopyInto(out *CognitoOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IdentityPoolID != nil { + in, out := &in.IdentityPoolID, &out.IdentityPoolID + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.UserPoolID != nil { + in, out := &in.UserPoolID, &out.UserPoolID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CognitoOptionsInitParameters. +func (in *CognitoOptionsInitParameters) DeepCopy() *CognitoOptionsInitParameters { + if in == nil { + return nil + } + out := new(CognitoOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CognitoOptionsObservation) DeepCopyInto(out *CognitoOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IdentityPoolID != nil { + in, out := &in.IdentityPoolID, &out.IdentityPoolID + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.UserPoolID != nil { + in, out := &in.UserPoolID, &out.UserPoolID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CognitoOptionsObservation. +func (in *CognitoOptionsObservation) DeepCopy() *CognitoOptionsObservation { + if in == nil { + return nil + } + out := new(CognitoOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CognitoOptionsParameters) DeepCopyInto(out *CognitoOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IdentityPoolID != nil { + in, out := &in.IdentityPoolID, &out.IdentityPoolID + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.UserPoolID != nil { + in, out := &in.UserPoolID, &out.UserPoolID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CognitoOptionsParameters. +func (in *CognitoOptionsParameters) DeepCopy() *CognitoOptionsParameters { + if in == nil { + return nil + } + out := new(CognitoOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColdStorageOptionsInitParameters) DeepCopyInto(out *ColdStorageOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColdStorageOptionsInitParameters. +func (in *ColdStorageOptionsInitParameters) DeepCopy() *ColdStorageOptionsInitParameters { + if in == nil { + return nil + } + out := new(ColdStorageOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColdStorageOptionsObservation) DeepCopyInto(out *ColdStorageOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColdStorageOptionsObservation. +func (in *ColdStorageOptionsObservation) DeepCopy() *ColdStorageOptionsObservation { + if in == nil { + return nil + } + out := new(ColdStorageOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColdStorageOptionsParameters) DeepCopyInto(out *ColdStorageOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColdStorageOptionsParameters. +func (in *ColdStorageOptionsParameters) DeepCopy() *ColdStorageOptionsParameters { + if in == nil { + return nil + } + out := new(ColdStorageOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Domain) DeepCopyInto(out *Domain) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Domain. +func (in *Domain) DeepCopy() *Domain { + if in == nil { + return nil + } + out := new(Domain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Domain) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainEndpointOptionsInitParameters) DeepCopyInto(out *DomainEndpointOptionsInitParameters) { + *out = *in + if in.CustomEndpoint != nil { + in, out := &in.CustomEndpoint, &out.CustomEndpoint + *out = new(string) + **out = **in + } + if in.CustomEndpointCertificateArn != nil { + in, out := &in.CustomEndpointCertificateArn, &out.CustomEndpointCertificateArn + *out = new(string) + **out = **in + } + if in.CustomEndpointEnabled != nil { + in, out := &in.CustomEndpointEnabled, &out.CustomEndpointEnabled + *out = new(bool) + **out = **in + } + if in.EnforceHTTPS != nil { + in, out := &in.EnforceHTTPS, &out.EnforceHTTPS + *out = new(bool) + **out = **in + } + if in.TLSSecurityPolicy != nil { + in, out := &in.TLSSecurityPolicy, &out.TLSSecurityPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainEndpointOptionsInitParameters. +func (in *DomainEndpointOptionsInitParameters) DeepCopy() *DomainEndpointOptionsInitParameters { + if in == nil { + return nil + } + out := new(DomainEndpointOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainEndpointOptionsObservation) DeepCopyInto(out *DomainEndpointOptionsObservation) { + *out = *in + if in.CustomEndpoint != nil { + in, out := &in.CustomEndpoint, &out.CustomEndpoint + *out = new(string) + **out = **in + } + if in.CustomEndpointCertificateArn != nil { + in, out := &in.CustomEndpointCertificateArn, &out.CustomEndpointCertificateArn + *out = new(string) + **out = **in + } + if in.CustomEndpointEnabled != nil { + in, out := &in.CustomEndpointEnabled, &out.CustomEndpointEnabled + *out = new(bool) + **out = **in + } + if in.EnforceHTTPS != nil { + in, out := &in.EnforceHTTPS, &out.EnforceHTTPS + *out = new(bool) + **out = **in + } + if in.TLSSecurityPolicy != nil { + in, out := &in.TLSSecurityPolicy, &out.TLSSecurityPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainEndpointOptionsObservation. +func (in *DomainEndpointOptionsObservation) DeepCopy() *DomainEndpointOptionsObservation { + if in == nil { + return nil + } + out := new(DomainEndpointOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainEndpointOptionsParameters) DeepCopyInto(out *DomainEndpointOptionsParameters) { + *out = *in + if in.CustomEndpoint != nil { + in, out := &in.CustomEndpoint, &out.CustomEndpoint + *out = new(string) + **out = **in + } + if in.CustomEndpointCertificateArn != nil { + in, out := &in.CustomEndpointCertificateArn, &out.CustomEndpointCertificateArn + *out = new(string) + **out = **in + } + if in.CustomEndpointEnabled != nil { + in, out := &in.CustomEndpointEnabled, &out.CustomEndpointEnabled + *out = new(bool) + **out = **in + } + if in.EnforceHTTPS != nil { + in, out := &in.EnforceHTTPS, &out.EnforceHTTPS + *out = new(bool) + **out = **in + } + if in.TLSSecurityPolicy != nil { + in, out := &in.TLSSecurityPolicy, &out.TLSSecurityPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainEndpointOptionsParameters. +func (in *DomainEndpointOptionsParameters) DeepCopy() *DomainEndpointOptionsParameters { + if in == nil { + return nil + } + out := new(DomainEndpointOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainInitParameters) DeepCopyInto(out *DomainInitParameters) { + *out = *in + if in.AccessPolicies != nil { + in, out := &in.AccessPolicies, &out.AccessPolicies + *out = new(string) + **out = **in + } + if in.AdvancedOptions != nil { + in, out := &in.AdvancedOptions, &out.AdvancedOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AdvancedSecurityOptions != nil { + in, out := &in.AdvancedSecurityOptions, &out.AdvancedSecurityOptions + *out = new(AdvancedSecurityOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoTuneOptions != nil { + in, out := &in.AutoTuneOptions, &out.AutoTuneOptions + *out = new(AutoTuneOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClusterConfig != nil { + in, out := &in.ClusterConfig, &out.ClusterConfig + *out = new(ClusterConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CognitoOptions != nil { + in, out := &in.CognitoOptions, &out.CognitoOptions + *out = new(CognitoOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DomainEndpointOptions != nil { + in, out := &in.DomainEndpointOptions, &out.DomainEndpointOptions + *out = new(DomainEndpointOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EBSOptions != nil { + in, out := &in.EBSOptions, &out.EBSOptions + *out = new(EBSOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ElasticsearchVersion != nil { + in, out := &in.ElasticsearchVersion, &out.ElasticsearchVersion + *out = new(string) + **out = **in + } + if in.EncryptAtRest != nil { + in, out := &in.EncryptAtRest, &out.EncryptAtRest + *out = new(EncryptAtRestInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LogPublishingOptions != nil { + in, out := &in.LogPublishingOptions, &out.LogPublishingOptions + *out = make([]LogPublishingOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NodeToNodeEncryption != nil { + in, out := &in.NodeToNodeEncryption, &out.NodeToNodeEncryption + *out = new(NodeToNodeEncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SnapshotOptions != nil { + in, out := &in.SnapshotOptions, &out.SnapshotOptions + *out = new(SnapshotOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCOptions != nil { + in, out := &in.VPCOptions, &out.VPCOptions + *out = new(VPCOptionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainInitParameters. +func (in *DomainInitParameters) DeepCopy() *DomainInitParameters { + if in == nil { + return nil + } + out := new(DomainInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainList) DeepCopyInto(out *DomainList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Domain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainList. +func (in *DomainList) DeepCopy() *DomainList { + if in == nil { + return nil + } + out := new(DomainList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DomainList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainObservation) DeepCopyInto(out *DomainObservation) { + *out = *in + if in.AccessPolicies != nil { + in, out := &in.AccessPolicies, &out.AccessPolicies + *out = new(string) + **out = **in + } + if in.AdvancedOptions != nil { + in, out := &in.AdvancedOptions, &out.AdvancedOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AdvancedSecurityOptions != nil { + in, out := &in.AdvancedSecurityOptions, &out.AdvancedSecurityOptions + *out = new(AdvancedSecurityOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoTuneOptions != nil { + in, out := &in.AutoTuneOptions, &out.AutoTuneOptions + *out = new(AutoTuneOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.ClusterConfig != nil { + in, out := &in.ClusterConfig, &out.ClusterConfig + *out = new(ClusterConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.CognitoOptions != nil { + in, out := &in.CognitoOptions, &out.CognitoOptions + *out = new(CognitoOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.DomainEndpointOptions != nil { + in, out := &in.DomainEndpointOptions, &out.DomainEndpointOptions + *out = new(DomainEndpointOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.EBSOptions != nil { + in, out := &in.EBSOptions, &out.EBSOptions + *out = new(EBSOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.ElasticsearchVersion != nil { + in, out := &in.ElasticsearchVersion, &out.ElasticsearchVersion + *out = new(string) + **out = **in + } + if in.EncryptAtRest != nil { + in, out := &in.EncryptAtRest, &out.EncryptAtRest + *out = new(EncryptAtRestObservation) + (*in).DeepCopyInto(*out) + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KibanaEndpoint != nil { + in, out := &in.KibanaEndpoint, &out.KibanaEndpoint + *out = new(string) + **out = **in + } + if in.LogPublishingOptions != nil { + in, out := &in.LogPublishingOptions, &out.LogPublishingOptions + *out = make([]LogPublishingOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NodeToNodeEncryption != nil { + in, out := &in.NodeToNodeEncryption, &out.NodeToNodeEncryption + *out = new(NodeToNodeEncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.SnapshotOptions != nil { + in, out := &in.SnapshotOptions, &out.SnapshotOptions + *out = new(SnapshotOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCOptions != nil { + in, out := &in.VPCOptions, &out.VPCOptions + *out = new(VPCOptionsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainObservation. +func (in *DomainObservation) DeepCopy() *DomainObservation { + if in == nil { + return nil + } + out := new(DomainObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainParameters) DeepCopyInto(out *DomainParameters) { + *out = *in + if in.AccessPolicies != nil { + in, out := &in.AccessPolicies, &out.AccessPolicies + *out = new(string) + **out = **in + } + if in.AdvancedOptions != nil { + in, out := &in.AdvancedOptions, &out.AdvancedOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AdvancedSecurityOptions != nil { + in, out := &in.AdvancedSecurityOptions, &out.AdvancedSecurityOptions + *out = new(AdvancedSecurityOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoTuneOptions != nil { + in, out := &in.AutoTuneOptions, &out.AutoTuneOptions + *out = new(AutoTuneOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.ClusterConfig != nil { + in, out := &in.ClusterConfig, &out.ClusterConfig + *out = new(ClusterConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.CognitoOptions != nil { + in, out := &in.CognitoOptions, &out.CognitoOptions + *out = new(CognitoOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.DomainEndpointOptions != nil { + in, out := &in.DomainEndpointOptions, &out.DomainEndpointOptions + *out = new(DomainEndpointOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.EBSOptions != nil { + in, out := &in.EBSOptions, &out.EBSOptions + *out = new(EBSOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.ElasticsearchVersion != nil { + in, out := &in.ElasticsearchVersion, &out.ElasticsearchVersion + *out = new(string) + **out = **in + } + if in.EncryptAtRest != nil { + in, out := &in.EncryptAtRest, &out.EncryptAtRest + *out = new(EncryptAtRestParameters) + (*in).DeepCopyInto(*out) + } + if in.LogPublishingOptions != nil { + in, out := &in.LogPublishingOptions, &out.LogPublishingOptions + *out = make([]LogPublishingOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NodeToNodeEncryption != nil { + in, out := &in.NodeToNodeEncryption, &out.NodeToNodeEncryption + *out = new(NodeToNodeEncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SnapshotOptions != nil { + in, out := &in.SnapshotOptions, &out.SnapshotOptions + *out = new(SnapshotOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCOptions != nil { + in, out := &in.VPCOptions, &out.VPCOptions + *out = new(VPCOptionsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainParameters. +func (in *DomainParameters) DeepCopy() *DomainParameters { + if in == nil { + return nil + } + out := new(DomainParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSAMLOptions) DeepCopyInto(out *DomainSAMLOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSAMLOptions. +func (in *DomainSAMLOptions) DeepCopy() *DomainSAMLOptions { + if in == nil { + return nil + } + out := new(DomainSAMLOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DomainSAMLOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSAMLOptionsInitParameters) DeepCopyInto(out *DomainSAMLOptionsInitParameters) { + *out = *in + if in.SAMLOptions != nil { + in, out := &in.SAMLOptions, &out.SAMLOptions + *out = new(SAMLOptionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSAMLOptionsInitParameters. +func (in *DomainSAMLOptionsInitParameters) DeepCopy() *DomainSAMLOptionsInitParameters { + if in == nil { + return nil + } + out := new(DomainSAMLOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSAMLOptionsList) DeepCopyInto(out *DomainSAMLOptionsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DomainSAMLOptions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSAMLOptionsList. +func (in *DomainSAMLOptionsList) DeepCopy() *DomainSAMLOptionsList { + if in == nil { + return nil + } + out := new(DomainSAMLOptionsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DomainSAMLOptionsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSAMLOptionsObservation) DeepCopyInto(out *DomainSAMLOptionsObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.SAMLOptions != nil { + in, out := &in.SAMLOptions, &out.SAMLOptions + *out = new(SAMLOptionsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSAMLOptionsObservation. +func (in *DomainSAMLOptionsObservation) DeepCopy() *DomainSAMLOptionsObservation { + if in == nil { + return nil + } + out := new(DomainSAMLOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSAMLOptionsParameters) DeepCopyInto(out *DomainSAMLOptionsParameters) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SAMLOptions != nil { + in, out := &in.SAMLOptions, &out.SAMLOptions + *out = new(SAMLOptionsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSAMLOptionsParameters. +func (in *DomainSAMLOptionsParameters) DeepCopy() *DomainSAMLOptionsParameters { + if in == nil { + return nil + } + out := new(DomainSAMLOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSAMLOptionsSpec) DeepCopyInto(out *DomainSAMLOptionsSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSAMLOptionsSpec. +func (in *DomainSAMLOptionsSpec) DeepCopy() *DomainSAMLOptionsSpec { + if in == nil { + return nil + } + out := new(DomainSAMLOptionsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSAMLOptionsStatus) DeepCopyInto(out *DomainSAMLOptionsStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSAMLOptionsStatus. +func (in *DomainSAMLOptionsStatus) DeepCopy() *DomainSAMLOptionsStatus { + if in == nil { + return nil + } + out := new(DomainSAMLOptionsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSpec) DeepCopyInto(out *DomainSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSpec. +func (in *DomainSpec) DeepCopy() *DomainSpec { + if in == nil { + return nil + } + out := new(DomainSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainStatus) DeepCopyInto(out *DomainStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainStatus. +func (in *DomainStatus) DeepCopy() *DomainStatus { + if in == nil { + return nil + } + out := new(DomainStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DurationInitParameters) DeepCopyInto(out *DurationInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DurationInitParameters. +func (in *DurationInitParameters) DeepCopy() *DurationInitParameters { + if in == nil { + return nil + } + out := new(DurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DurationObservation) DeepCopyInto(out *DurationObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DurationObservation. +func (in *DurationObservation) DeepCopy() *DurationObservation { + if in == nil { + return nil + } + out := new(DurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DurationParameters) DeepCopyInto(out *DurationParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DurationParameters. +func (in *DurationParameters) DeepCopy() *DurationParameters { + if in == nil { + return nil + } + out := new(DurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSOptionsInitParameters) DeepCopyInto(out *EBSOptionsInitParameters) { + *out = *in + if in.EBSEnabled != nil { + in, out := &in.EBSEnabled, &out.EBSEnabled + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSOptionsInitParameters. +func (in *EBSOptionsInitParameters) DeepCopy() *EBSOptionsInitParameters { + if in == nil { + return nil + } + out := new(EBSOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSOptionsObservation) DeepCopyInto(out *EBSOptionsObservation) { + *out = *in + if in.EBSEnabled != nil { + in, out := &in.EBSEnabled, &out.EBSEnabled + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSOptionsObservation. +func (in *EBSOptionsObservation) DeepCopy() *EBSOptionsObservation { + if in == nil { + return nil + } + out := new(EBSOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSOptionsParameters) DeepCopyInto(out *EBSOptionsParameters) { + *out = *in + if in.EBSEnabled != nil { + in, out := &in.EBSEnabled, &out.EBSEnabled + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSOptionsParameters. +func (in *EBSOptionsParameters) DeepCopy() *EBSOptionsParameters { + if in == nil { + return nil + } + out := new(EBSOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptAtRestInitParameters) DeepCopyInto(out *EncryptAtRestInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptAtRestInitParameters. +func (in *EncryptAtRestInitParameters) DeepCopy() *EncryptAtRestInitParameters { + if in == nil { + return nil + } + out := new(EncryptAtRestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptAtRestObservation) DeepCopyInto(out *EncryptAtRestObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptAtRestObservation. +func (in *EncryptAtRestObservation) DeepCopy() *EncryptAtRestObservation { + if in == nil { + return nil + } + out := new(EncryptAtRestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptAtRestParameters) DeepCopyInto(out *EncryptAtRestParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptAtRestParameters. +func (in *EncryptAtRestParameters) DeepCopy() *EncryptAtRestParameters { + if in == nil { + return nil + } + out := new(EncryptAtRestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdpInitParameters) DeepCopyInto(out *IdpInitParameters) { + *out = *in + if in.EntityID != nil { + in, out := &in.EntityID, &out.EntityID + *out = new(string) + **out = **in + } + if in.MetadataContent != nil { + in, out := &in.MetadataContent, &out.MetadataContent + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdpInitParameters. +func (in *IdpInitParameters) DeepCopy() *IdpInitParameters { + if in == nil { + return nil + } + out := new(IdpInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdpObservation) DeepCopyInto(out *IdpObservation) { + *out = *in + if in.EntityID != nil { + in, out := &in.EntityID, &out.EntityID + *out = new(string) + **out = **in + } + if in.MetadataContent != nil { + in, out := &in.MetadataContent, &out.MetadataContent + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdpObservation. +func (in *IdpObservation) DeepCopy() *IdpObservation { + if in == nil { + return nil + } + out := new(IdpObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdpParameters) DeepCopyInto(out *IdpParameters) { + *out = *in + if in.EntityID != nil { + in, out := &in.EntityID, &out.EntityID + *out = new(string) + **out = **in + } + if in.MetadataContent != nil { + in, out := &in.MetadataContent, &out.MetadataContent + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdpParameters. +func (in *IdpParameters) DeepCopy() *IdpParameters { + if in == nil { + return nil + } + out := new(IdpParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogPublishingOptionsInitParameters) DeepCopyInto(out *LogPublishingOptionsInitParameters) { + *out = *in + if in.CloudwatchLogGroupArn != nil { + in, out := &in.CloudwatchLogGroupArn, &out.CloudwatchLogGroupArn + *out = new(string) + **out = **in + } + if in.CloudwatchLogGroupArnRef != nil { + in, out := &in.CloudwatchLogGroupArnRef, &out.CloudwatchLogGroupArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CloudwatchLogGroupArnSelector != nil { + in, out := &in.CloudwatchLogGroupArnSelector, &out.CloudwatchLogGroupArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogType != nil { + in, out := &in.LogType, &out.LogType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogPublishingOptionsInitParameters. +func (in *LogPublishingOptionsInitParameters) DeepCopy() *LogPublishingOptionsInitParameters { + if in == nil { + return nil + } + out := new(LogPublishingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogPublishingOptionsObservation) DeepCopyInto(out *LogPublishingOptionsObservation) { + *out = *in + if in.CloudwatchLogGroupArn != nil { + in, out := &in.CloudwatchLogGroupArn, &out.CloudwatchLogGroupArn + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogType != nil { + in, out := &in.LogType, &out.LogType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogPublishingOptionsObservation. +func (in *LogPublishingOptionsObservation) DeepCopy() *LogPublishingOptionsObservation { + if in == nil { + return nil + } + out := new(LogPublishingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogPublishingOptionsParameters) DeepCopyInto(out *LogPublishingOptionsParameters) { + *out = *in + if in.CloudwatchLogGroupArn != nil { + in, out := &in.CloudwatchLogGroupArn, &out.CloudwatchLogGroupArn + *out = new(string) + **out = **in + } + if in.CloudwatchLogGroupArnRef != nil { + in, out := &in.CloudwatchLogGroupArnRef, &out.CloudwatchLogGroupArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CloudwatchLogGroupArnSelector != nil { + in, out := &in.CloudwatchLogGroupArnSelector, &out.CloudwatchLogGroupArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogType != nil { + in, out := &in.LogType, &out.LogType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogPublishingOptionsParameters. +func (in *LogPublishingOptionsParameters) DeepCopy() *LogPublishingOptionsParameters { + if in == nil { + return nil + } + out := new(LogPublishingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceScheduleInitParameters) DeepCopyInto(out *MaintenanceScheduleInitParameters) { + *out = *in + if in.CronExpressionForRecurrence != nil { + in, out := &in.CronExpressionForRecurrence, &out.CronExpressionForRecurrence + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(DurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StartAt != nil { + in, out := &in.StartAt, &out.StartAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceScheduleInitParameters. +func (in *MaintenanceScheduleInitParameters) DeepCopy() *MaintenanceScheduleInitParameters { + if in == nil { + return nil + } + out := new(MaintenanceScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceScheduleObservation) DeepCopyInto(out *MaintenanceScheduleObservation) { + *out = *in + if in.CronExpressionForRecurrence != nil { + in, out := &in.CronExpressionForRecurrence, &out.CronExpressionForRecurrence + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(DurationObservation) + (*in).DeepCopyInto(*out) + } + if in.StartAt != nil { + in, out := &in.StartAt, &out.StartAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceScheduleObservation. +func (in *MaintenanceScheduleObservation) DeepCopy() *MaintenanceScheduleObservation { + if in == nil { + return nil + } + out := new(MaintenanceScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceScheduleParameters) DeepCopyInto(out *MaintenanceScheduleParameters) { + *out = *in + if in.CronExpressionForRecurrence != nil { + in, out := &in.CronExpressionForRecurrence, &out.CronExpressionForRecurrence + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(DurationParameters) + (*in).DeepCopyInto(*out) + } + if in.StartAt != nil { + in, out := &in.StartAt, &out.StartAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceScheduleParameters. +func (in *MaintenanceScheduleParameters) DeepCopy() *MaintenanceScheduleParameters { + if in == nil { + return nil + } + out := new(MaintenanceScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterUserOptionsInitParameters) DeepCopyInto(out *MasterUserOptionsInitParameters) { + *out = *in + if in.MasterUserArn != nil { + in, out := &in.MasterUserArn, &out.MasterUserArn + *out = new(string) + **out = **in + } + if in.MasterUserName != nil { + in, out := &in.MasterUserName, &out.MasterUserName + *out = new(string) + **out = **in + } + if in.MasterUserPasswordSecretRef != nil { + in, out := &in.MasterUserPasswordSecretRef, &out.MasterUserPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterUserOptionsInitParameters. +func (in *MasterUserOptionsInitParameters) DeepCopy() *MasterUserOptionsInitParameters { + if in == nil { + return nil + } + out := new(MasterUserOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterUserOptionsObservation) DeepCopyInto(out *MasterUserOptionsObservation) { + *out = *in + if in.MasterUserArn != nil { + in, out := &in.MasterUserArn, &out.MasterUserArn + *out = new(string) + **out = **in + } + if in.MasterUserName != nil { + in, out := &in.MasterUserName, &out.MasterUserName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterUserOptionsObservation. +func (in *MasterUserOptionsObservation) DeepCopy() *MasterUserOptionsObservation { + if in == nil { + return nil + } + out := new(MasterUserOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterUserOptionsParameters) DeepCopyInto(out *MasterUserOptionsParameters) { + *out = *in + if in.MasterUserArn != nil { + in, out := &in.MasterUserArn, &out.MasterUserArn + *out = new(string) + **out = **in + } + if in.MasterUserName != nil { + in, out := &in.MasterUserName, &out.MasterUserName + *out = new(string) + **out = **in + } + if in.MasterUserPasswordSecretRef != nil { + in, out := &in.MasterUserPasswordSecretRef, &out.MasterUserPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterUserOptionsParameters. +func (in *MasterUserOptionsParameters) DeepCopy() *MasterUserOptionsParameters { + if in == nil { + return nil + } + out := new(MasterUserOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeToNodeEncryptionInitParameters) DeepCopyInto(out *NodeToNodeEncryptionInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeToNodeEncryptionInitParameters. +func (in *NodeToNodeEncryptionInitParameters) DeepCopy() *NodeToNodeEncryptionInitParameters { + if in == nil { + return nil + } + out := new(NodeToNodeEncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeToNodeEncryptionObservation) DeepCopyInto(out *NodeToNodeEncryptionObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeToNodeEncryptionObservation. +func (in *NodeToNodeEncryptionObservation) DeepCopy() *NodeToNodeEncryptionObservation { + if in == nil { + return nil + } + out := new(NodeToNodeEncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeToNodeEncryptionParameters) DeepCopyInto(out *NodeToNodeEncryptionParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeToNodeEncryptionParameters. +func (in *NodeToNodeEncryptionParameters) DeepCopy() *NodeToNodeEncryptionParameters { + if in == nil { + return nil + } + out := new(NodeToNodeEncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLOptionsInitParameters) DeepCopyInto(out *SAMLOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Idp != nil { + in, out := &in.Idp, &out.Idp + *out = new(IdpInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MasterBackendRole != nil { + in, out := &in.MasterBackendRole, &out.MasterBackendRole + *out = new(string) + **out = **in + } + if in.MasterUserNameSecretRef != nil { + in, out := &in.MasterUserNameSecretRef, &out.MasterUserNameSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.RolesKey != nil { + in, out := &in.RolesKey, &out.RolesKey + *out = new(string) + **out = **in + } + if in.SessionTimeoutMinutes != nil { + in, out := &in.SessionTimeoutMinutes, &out.SessionTimeoutMinutes + *out = new(float64) + **out = **in + } + if in.SubjectKey != nil { + in, out := &in.SubjectKey, &out.SubjectKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLOptionsInitParameters. +func (in *SAMLOptionsInitParameters) DeepCopy() *SAMLOptionsInitParameters { + if in == nil { + return nil + } + out := new(SAMLOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLOptionsObservation) DeepCopyInto(out *SAMLOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Idp != nil { + in, out := &in.Idp, &out.Idp + *out = new(IdpObservation) + (*in).DeepCopyInto(*out) + } + if in.MasterBackendRole != nil { + in, out := &in.MasterBackendRole, &out.MasterBackendRole + *out = new(string) + **out = **in + } + if in.RolesKey != nil { + in, out := &in.RolesKey, &out.RolesKey + *out = new(string) + **out = **in + } + if in.SessionTimeoutMinutes != nil { + in, out := &in.SessionTimeoutMinutes, &out.SessionTimeoutMinutes + *out = new(float64) + **out = **in + } + if in.SubjectKey != nil { + in, out := &in.SubjectKey, &out.SubjectKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLOptionsObservation. +func (in *SAMLOptionsObservation) DeepCopy() *SAMLOptionsObservation { + if in == nil { + return nil + } + out := new(SAMLOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLOptionsParameters) DeepCopyInto(out *SAMLOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Idp != nil { + in, out := &in.Idp, &out.Idp + *out = new(IdpParameters) + (*in).DeepCopyInto(*out) + } + if in.MasterBackendRole != nil { + in, out := &in.MasterBackendRole, &out.MasterBackendRole + *out = new(string) + **out = **in + } + if in.MasterUserNameSecretRef != nil { + in, out := &in.MasterUserNameSecretRef, &out.MasterUserNameSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.RolesKey != nil { + in, out := &in.RolesKey, &out.RolesKey + *out = new(string) + **out = **in + } + if in.SessionTimeoutMinutes != nil { + in, out := &in.SessionTimeoutMinutes, &out.SessionTimeoutMinutes + *out = new(float64) + **out = **in + } + if in.SubjectKey != nil { + in, out := &in.SubjectKey, &out.SubjectKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLOptionsParameters. +func (in *SAMLOptionsParameters) DeepCopy() *SAMLOptionsParameters { + if in == nil { + return nil + } + out := new(SAMLOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotOptionsInitParameters) DeepCopyInto(out *SnapshotOptionsInitParameters) { + *out = *in + if in.AutomatedSnapshotStartHour != nil { + in, out := &in.AutomatedSnapshotStartHour, &out.AutomatedSnapshotStartHour + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotOptionsInitParameters. +func (in *SnapshotOptionsInitParameters) DeepCopy() *SnapshotOptionsInitParameters { + if in == nil { + return nil + } + out := new(SnapshotOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotOptionsObservation) DeepCopyInto(out *SnapshotOptionsObservation) { + *out = *in + if in.AutomatedSnapshotStartHour != nil { + in, out := &in.AutomatedSnapshotStartHour, &out.AutomatedSnapshotStartHour + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotOptionsObservation. +func (in *SnapshotOptionsObservation) DeepCopy() *SnapshotOptionsObservation { + if in == nil { + return nil + } + out := new(SnapshotOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotOptionsParameters) DeepCopyInto(out *SnapshotOptionsParameters) { + *out = *in + if in.AutomatedSnapshotStartHour != nil { + in, out := &in.AutomatedSnapshotStartHour, &out.AutomatedSnapshotStartHour + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotOptionsParameters. +func (in *SnapshotOptionsParameters) DeepCopy() *SnapshotOptionsParameters { + if in == nil { + return nil + } + out := new(SnapshotOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCOptionsInitParameters) DeepCopyInto(out *VPCOptionsInitParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCOptionsInitParameters. +func (in *VPCOptionsInitParameters) DeepCopy() *VPCOptionsInitParameters { + if in == nil { + return nil + } + out := new(VPCOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCOptionsObservation) DeepCopyInto(out *VPCOptionsObservation) { + *out = *in + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCOptionsObservation. +func (in *VPCOptionsObservation) DeepCopy() *VPCOptionsObservation { + if in == nil { + return nil + } + out := new(VPCOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCOptionsParameters) DeepCopyInto(out *VPCOptionsParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCOptionsParameters. +func (in *VPCOptionsParameters) DeepCopy() *VPCOptionsParameters { + if in == nil { + return nil + } + out := new(VPCOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneAwarenessConfigInitParameters) DeepCopyInto(out *ZoneAwarenessConfigInitParameters) { + *out = *in + if in.AvailabilityZoneCount != nil { + in, out := &in.AvailabilityZoneCount, &out.AvailabilityZoneCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneAwarenessConfigInitParameters. +func (in *ZoneAwarenessConfigInitParameters) DeepCopy() *ZoneAwarenessConfigInitParameters { + if in == nil { + return nil + } + out := new(ZoneAwarenessConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneAwarenessConfigObservation) DeepCopyInto(out *ZoneAwarenessConfigObservation) { + *out = *in + if in.AvailabilityZoneCount != nil { + in, out := &in.AvailabilityZoneCount, &out.AvailabilityZoneCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneAwarenessConfigObservation. +func (in *ZoneAwarenessConfigObservation) DeepCopy() *ZoneAwarenessConfigObservation { + if in == nil { + return nil + } + out := new(ZoneAwarenessConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneAwarenessConfigParameters) DeepCopyInto(out *ZoneAwarenessConfigParameters) { + *out = *in + if in.AvailabilityZoneCount != nil { + in, out := &in.AvailabilityZoneCount, &out.AvailabilityZoneCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneAwarenessConfigParameters. +func (in *ZoneAwarenessConfigParameters) DeepCopy() *ZoneAwarenessConfigParameters { + if in == nil { + return nil + } + out := new(ZoneAwarenessConfigParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/elasticsearch/v1beta2/zz_generated.managed.go b/apis/elasticsearch/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..3ba7ff25a9 --- /dev/null +++ b/apis/elasticsearch/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Domain. +func (mg *Domain) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Domain. +func (mg *Domain) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Domain. +func (mg *Domain) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Domain. +func (mg *Domain) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Domain. +func (mg *Domain) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Domain. +func (mg *Domain) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Domain. +func (mg *Domain) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Domain. +func (mg *Domain) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Domain. +func (mg *Domain) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Domain. +func (mg *Domain) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Domain. +func (mg *Domain) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Domain. +func (mg *Domain) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/elasticsearch/v1beta2/zz_generated.managedlist.go b/apis/elasticsearch/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..6e3b0b4953 --- /dev/null +++ b/apis/elasticsearch/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this DomainList. +func (l *DomainList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DomainSAMLOptionsList. +func (l *DomainSAMLOptionsList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/elasticsearch/v1beta2/zz_generated.resolvers.go b/apis/elasticsearch/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..a180e97d8b --- /dev/null +++ b/apis/elasticsearch/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Domain. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Domain) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.LogPublishingOptions); i3++ { + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Group", "GroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LogPublishingOptions[i3].CloudwatchLogGroupArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.LogPublishingOptions[i3].CloudwatchLogGroupArnRef, + Selector: mg.Spec.ForProvider.LogPublishingOptions[i3].CloudwatchLogGroupArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LogPublishingOptions[i3].CloudwatchLogGroupArn") + } + mg.Spec.ForProvider.LogPublishingOptions[i3].CloudwatchLogGroupArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LogPublishingOptions[i3].CloudwatchLogGroupArnRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.LogPublishingOptions); i3++ { + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Group", "GroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LogPublishingOptions[i3].CloudwatchLogGroupArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.LogPublishingOptions[i3].CloudwatchLogGroupArnRef, + Selector: mg.Spec.InitProvider.LogPublishingOptions[i3].CloudwatchLogGroupArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LogPublishingOptions[i3].CloudwatchLogGroupArn") + } + mg.Spec.InitProvider.LogPublishingOptions[i3].CloudwatchLogGroupArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LogPublishingOptions[i3].CloudwatchLogGroupArnRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/elasticsearch/v1beta2/zz_groupversion_info.go b/apis/elasticsearch/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..30cb5098e7 --- /dev/null +++ b/apis/elasticsearch/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=elasticsearch.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "elasticsearch.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/elastictranscoder/v1beta1/zz_generated.conversion_spokes.go b/apis/elastictranscoder/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..be45679902 --- /dev/null +++ b/apis/elastictranscoder/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Pipeline to the hub type. +func (tr *Pipeline) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Pipeline type. +func (tr *Pipeline) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Preset to the hub type. +func (tr *Preset) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Preset type. +func (tr *Preset) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/elastictranscoder/v1beta2/zz_generated.conversion_hubs.go b/apis/elastictranscoder/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..8dc87ace38 --- /dev/null +++ b/apis/elastictranscoder/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Pipeline) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Preset) Hub() {} diff --git a/apis/elastictranscoder/v1beta2/zz_generated.deepcopy.go b/apis/elastictranscoder/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..3db135ec98 --- /dev/null +++ b/apis/elastictranscoder/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2049 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioCodecOptionsInitParameters) DeepCopyInto(out *AudioCodecOptionsInitParameters) { + *out = *in + if in.BitDepth != nil { + in, out := &in.BitDepth, &out.BitDepth + *out = new(string) + **out = **in + } + if in.BitOrder != nil { + in, out := &in.BitOrder, &out.BitOrder + *out = new(string) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.Signed != nil { + in, out := &in.Signed, &out.Signed + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioCodecOptionsInitParameters. +func (in *AudioCodecOptionsInitParameters) DeepCopy() *AudioCodecOptionsInitParameters { + if in == nil { + return nil + } + out := new(AudioCodecOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioCodecOptionsObservation) DeepCopyInto(out *AudioCodecOptionsObservation) { + *out = *in + if in.BitDepth != nil { + in, out := &in.BitDepth, &out.BitDepth + *out = new(string) + **out = **in + } + if in.BitOrder != nil { + in, out := &in.BitOrder, &out.BitOrder + *out = new(string) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.Signed != nil { + in, out := &in.Signed, &out.Signed + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioCodecOptionsObservation. +func (in *AudioCodecOptionsObservation) DeepCopy() *AudioCodecOptionsObservation { + if in == nil { + return nil + } + out := new(AudioCodecOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioCodecOptionsParameters) DeepCopyInto(out *AudioCodecOptionsParameters) { + *out = *in + if in.BitDepth != nil { + in, out := &in.BitDepth, &out.BitDepth + *out = new(string) + **out = **in + } + if in.BitOrder != nil { + in, out := &in.BitOrder, &out.BitOrder + *out = new(string) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.Signed != nil { + in, out := &in.Signed, &out.Signed + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioCodecOptionsParameters. +func (in *AudioCodecOptionsParameters) DeepCopy() *AudioCodecOptionsParameters { + if in == nil { + return nil + } + out := new(AudioCodecOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioInitParameters) DeepCopyInto(out *AudioInitParameters) { + *out = *in + if in.AudioPackingMode != nil { + in, out := &in.AudioPackingMode, &out.AudioPackingMode + *out = new(string) + **out = **in + } + if in.BitRate != nil { + in, out := &in.BitRate, &out.BitRate + *out = new(string) + **out = **in + } + if in.Channels != nil { + in, out := &in.Channels, &out.Channels + *out = new(string) + **out = **in + } + if in.Codec != nil { + in, out := &in.Codec, &out.Codec + *out = new(string) + **out = **in + } + if in.SampleRate != nil { + in, out := &in.SampleRate, &out.SampleRate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioInitParameters. +func (in *AudioInitParameters) DeepCopy() *AudioInitParameters { + if in == nil { + return nil + } + out := new(AudioInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioObservation) DeepCopyInto(out *AudioObservation) { + *out = *in + if in.AudioPackingMode != nil { + in, out := &in.AudioPackingMode, &out.AudioPackingMode + *out = new(string) + **out = **in + } + if in.BitRate != nil { + in, out := &in.BitRate, &out.BitRate + *out = new(string) + **out = **in + } + if in.Channels != nil { + in, out := &in.Channels, &out.Channels + *out = new(string) + **out = **in + } + if in.Codec != nil { + in, out := &in.Codec, &out.Codec + *out = new(string) + **out = **in + } + if in.SampleRate != nil { + in, out := &in.SampleRate, &out.SampleRate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioObservation. +func (in *AudioObservation) DeepCopy() *AudioObservation { + if in == nil { + return nil + } + out := new(AudioObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioParameters) DeepCopyInto(out *AudioParameters) { + *out = *in + if in.AudioPackingMode != nil { + in, out := &in.AudioPackingMode, &out.AudioPackingMode + *out = new(string) + **out = **in + } + if in.BitRate != nil { + in, out := &in.BitRate, &out.BitRate + *out = new(string) + **out = **in + } + if in.Channels != nil { + in, out := &in.Channels, &out.Channels + *out = new(string) + **out = **in + } + if in.Codec != nil { + in, out := &in.Codec, &out.Codec + *out = new(string) + **out = **in + } + if in.SampleRate != nil { + in, out := &in.SampleRate, &out.SampleRate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioParameters. +func (in *AudioParameters) DeepCopy() *AudioParameters { + if in == nil { + return nil + } + out := new(AudioParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentConfigInitParameters) DeepCopyInto(out *ContentConfigInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentConfigInitParameters. +func (in *ContentConfigInitParameters) DeepCopy() *ContentConfigInitParameters { + if in == nil { + return nil + } + out := new(ContentConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentConfigObservation) DeepCopyInto(out *ContentConfigObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentConfigObservation. +func (in *ContentConfigObservation) DeepCopy() *ContentConfigObservation { + if in == nil { + return nil + } + out := new(ContentConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentConfigParameters) DeepCopyInto(out *ContentConfigParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentConfigParameters. +func (in *ContentConfigParameters) DeepCopy() *ContentConfigParameters { + if in == nil { + return nil + } + out := new(ContentConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentConfigPermissionsInitParameters) DeepCopyInto(out *ContentConfigPermissionsInitParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Grantee != nil { + in, out := &in.Grantee, &out.Grantee + *out = new(string) + **out = **in + } + if in.GranteeType != nil { + in, out := &in.GranteeType, &out.GranteeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentConfigPermissionsInitParameters. +func (in *ContentConfigPermissionsInitParameters) DeepCopy() *ContentConfigPermissionsInitParameters { + if in == nil { + return nil + } + out := new(ContentConfigPermissionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentConfigPermissionsObservation) DeepCopyInto(out *ContentConfigPermissionsObservation) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Grantee != nil { + in, out := &in.Grantee, &out.Grantee + *out = new(string) + **out = **in + } + if in.GranteeType != nil { + in, out := &in.GranteeType, &out.GranteeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentConfigPermissionsObservation. +func (in *ContentConfigPermissionsObservation) DeepCopy() *ContentConfigPermissionsObservation { + if in == nil { + return nil + } + out := new(ContentConfigPermissionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentConfigPermissionsParameters) DeepCopyInto(out *ContentConfigPermissionsParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Grantee != nil { + in, out := &in.Grantee, &out.Grantee + *out = new(string) + **out = **in + } + if in.GranteeType != nil { + in, out := &in.GranteeType, &out.GranteeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentConfigPermissionsParameters. +func (in *ContentConfigPermissionsParameters) DeepCopy() *ContentConfigPermissionsParameters { + if in == nil { + return nil + } + out := new(ContentConfigPermissionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationsInitParameters) DeepCopyInto(out *NotificationsInitParameters) { + *out = *in + if in.Completed != nil { + in, out := &in.Completed, &out.Completed + *out = new(string) + **out = **in + } + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(string) + **out = **in + } + if in.Progressing != nil { + in, out := &in.Progressing, &out.Progressing + *out = new(string) + **out = **in + } + if in.Warning != nil { + in, out := &in.Warning, &out.Warning + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationsInitParameters. +func (in *NotificationsInitParameters) DeepCopy() *NotificationsInitParameters { + if in == nil { + return nil + } + out := new(NotificationsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationsObservation) DeepCopyInto(out *NotificationsObservation) { + *out = *in + if in.Completed != nil { + in, out := &in.Completed, &out.Completed + *out = new(string) + **out = **in + } + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(string) + **out = **in + } + if in.Progressing != nil { + in, out := &in.Progressing, &out.Progressing + *out = new(string) + **out = **in + } + if in.Warning != nil { + in, out := &in.Warning, &out.Warning + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationsObservation. +func (in *NotificationsObservation) DeepCopy() *NotificationsObservation { + if in == nil { + return nil + } + out := new(NotificationsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationsParameters) DeepCopyInto(out *NotificationsParameters) { + *out = *in + if in.Completed != nil { + in, out := &in.Completed, &out.Completed + *out = new(string) + **out = **in + } + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(string) + **out = **in + } + if in.Progressing != nil { + in, out := &in.Progressing, &out.Progressing + *out = new(string) + **out = **in + } + if in.Warning != nil { + in, out := &in.Warning, &out.Warning + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationsParameters. +func (in *NotificationsParameters) DeepCopy() *NotificationsParameters { + if in == nil { + return nil + } + out := new(NotificationsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Pipeline) DeepCopyInto(out *Pipeline) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pipeline. +func (in *Pipeline) DeepCopy() *Pipeline { + if in == nil { + return nil + } + out := new(Pipeline) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Pipeline) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineInitParameters) DeepCopyInto(out *PipelineInitParameters) { + *out = *in + if in.AwsKMSKeyArn != nil { + in, out := &in.AwsKMSKeyArn, &out.AwsKMSKeyArn + *out = new(string) + **out = **in + } + if in.ContentConfig != nil { + in, out := &in.ContentConfig, &out.ContentConfig + *out = new(ContentConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ContentConfigPermissions != nil { + in, out := &in.ContentConfigPermissions, &out.ContentConfigPermissions + *out = make([]ContentConfigPermissionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputBucket != nil { + in, out := &in.InputBucket, &out.InputBucket + *out = new(string) + **out = **in + } + if in.InputBucketRef != nil { + in, out := &in.InputBucketRef, &out.InputBucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InputBucketSelector != nil { + in, out := &in.InputBucketSelector, &out.InputBucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notifications != nil { + in, out := &in.Notifications, &out.Notifications + *out = new(NotificationsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputBucket != nil { + in, out := &in.OutputBucket, &out.OutputBucket + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.RoleRef != nil { + in, out := &in.RoleRef, &out.RoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleSelector != nil { + in, out := &in.RoleSelector, &out.RoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ThumbnailConfig != nil { + in, out := &in.ThumbnailConfig, &out.ThumbnailConfig + *out = new(ThumbnailConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ThumbnailConfigPermissions != nil { + in, out := &in.ThumbnailConfigPermissions, &out.ThumbnailConfigPermissions + *out = make([]ThumbnailConfigPermissionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineInitParameters. +func (in *PipelineInitParameters) DeepCopy() *PipelineInitParameters { + if in == nil { + return nil + } + out := new(PipelineInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineList) DeepCopyInto(out *PipelineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pipeline, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineList. +func (in *PipelineList) DeepCopy() *PipelineList { + if in == nil { + return nil + } + out := new(PipelineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PipelineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineObservation) DeepCopyInto(out *PipelineObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AwsKMSKeyArn != nil { + in, out := &in.AwsKMSKeyArn, &out.AwsKMSKeyArn + *out = new(string) + **out = **in + } + if in.ContentConfig != nil { + in, out := &in.ContentConfig, &out.ContentConfig + *out = new(ContentConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ContentConfigPermissions != nil { + in, out := &in.ContentConfigPermissions, &out.ContentConfigPermissions + *out = make([]ContentConfigPermissionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InputBucket != nil { + in, out := &in.InputBucket, &out.InputBucket + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notifications != nil { + in, out := &in.Notifications, &out.Notifications + *out = new(NotificationsObservation) + (*in).DeepCopyInto(*out) + } + if in.OutputBucket != nil { + in, out := &in.OutputBucket, &out.OutputBucket + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ThumbnailConfig != nil { + in, out := &in.ThumbnailConfig, &out.ThumbnailConfig + *out = new(ThumbnailConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ThumbnailConfigPermissions != nil { + in, out := &in.ThumbnailConfigPermissions, &out.ThumbnailConfigPermissions + *out = make([]ThumbnailConfigPermissionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineObservation. +func (in *PipelineObservation) DeepCopy() *PipelineObservation { + if in == nil { + return nil + } + out := new(PipelineObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineParameters) DeepCopyInto(out *PipelineParameters) { + *out = *in + if in.AwsKMSKeyArn != nil { + in, out := &in.AwsKMSKeyArn, &out.AwsKMSKeyArn + *out = new(string) + **out = **in + } + if in.ContentConfig != nil { + in, out := &in.ContentConfig, &out.ContentConfig + *out = new(ContentConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.ContentConfigPermissions != nil { + in, out := &in.ContentConfigPermissions, &out.ContentConfigPermissions + *out = make([]ContentConfigPermissionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputBucket != nil { + in, out := &in.InputBucket, &out.InputBucket + *out = new(string) + **out = **in + } + if in.InputBucketRef != nil { + in, out := &in.InputBucketRef, &out.InputBucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InputBucketSelector != nil { + in, out := &in.InputBucketSelector, &out.InputBucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notifications != nil { + in, out := &in.Notifications, &out.Notifications + *out = new(NotificationsParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputBucket != nil { + in, out := &in.OutputBucket, &out.OutputBucket + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.RoleRef != nil { + in, out := &in.RoleRef, &out.RoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleSelector != nil { + in, out := &in.RoleSelector, &out.RoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ThumbnailConfig != nil { + in, out := &in.ThumbnailConfig, &out.ThumbnailConfig + *out = new(ThumbnailConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.ThumbnailConfigPermissions != nil { + in, out := &in.ThumbnailConfigPermissions, &out.ThumbnailConfigPermissions + *out = make([]ThumbnailConfigPermissionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineParameters. +func (in *PipelineParameters) DeepCopy() *PipelineParameters { + if in == nil { + return nil + } + out := new(PipelineParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineSpec. +func (in *PipelineSpec) DeepCopy() *PipelineSpec { + if in == nil { + return nil + } + out := new(PipelineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineStatus) DeepCopyInto(out *PipelineStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineStatus. +func (in *PipelineStatus) DeepCopy() *PipelineStatus { + if in == nil { + return nil + } + out := new(PipelineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Preset) DeepCopyInto(out *Preset) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preset. +func (in *Preset) DeepCopy() *Preset { + if in == nil { + return nil + } + out := new(Preset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Preset) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PresetInitParameters) DeepCopyInto(out *PresetInitParameters) { + *out = *in + if in.Audio != nil { + in, out := &in.Audio, &out.Audio + *out = new(AudioInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AudioCodecOptions != nil { + in, out := &in.AudioCodecOptions, &out.AudioCodecOptions + *out = new(AudioCodecOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Thumbnails != nil { + in, out := &in.Thumbnails, &out.Thumbnails + *out = new(ThumbnailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Video != nil { + in, out := &in.Video, &out.Video + *out = new(VideoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VideoCodecOptions != nil { + in, out := &in.VideoCodecOptions, &out.VideoCodecOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VideoWatermarks != nil { + in, out := &in.VideoWatermarks, &out.VideoWatermarks + *out = make([]VideoWatermarksInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PresetInitParameters. +func (in *PresetInitParameters) DeepCopy() *PresetInitParameters { + if in == nil { + return nil + } + out := new(PresetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PresetList) DeepCopyInto(out *PresetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Preset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PresetList. +func (in *PresetList) DeepCopy() *PresetList { + if in == nil { + return nil + } + out := new(PresetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PresetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PresetObservation) DeepCopyInto(out *PresetObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Audio != nil { + in, out := &in.Audio, &out.Audio + *out = new(AudioObservation) + (*in).DeepCopyInto(*out) + } + if in.AudioCodecOptions != nil { + in, out := &in.AudioCodecOptions, &out.AudioCodecOptions + *out = new(AudioCodecOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Thumbnails != nil { + in, out := &in.Thumbnails, &out.Thumbnails + *out = new(ThumbnailsObservation) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Video != nil { + in, out := &in.Video, &out.Video + *out = new(VideoObservation) + (*in).DeepCopyInto(*out) + } + if in.VideoCodecOptions != nil { + in, out := &in.VideoCodecOptions, &out.VideoCodecOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VideoWatermarks != nil { + in, out := &in.VideoWatermarks, &out.VideoWatermarks + *out = make([]VideoWatermarksObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PresetObservation. +func (in *PresetObservation) DeepCopy() *PresetObservation { + if in == nil { + return nil + } + out := new(PresetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PresetParameters) DeepCopyInto(out *PresetParameters) { + *out = *in + if in.Audio != nil { + in, out := &in.Audio, &out.Audio + *out = new(AudioParameters) + (*in).DeepCopyInto(*out) + } + if in.AudioCodecOptions != nil { + in, out := &in.AudioCodecOptions, &out.AudioCodecOptions + *out = new(AudioCodecOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Thumbnails != nil { + in, out := &in.Thumbnails, &out.Thumbnails + *out = new(ThumbnailsParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Video != nil { + in, out := &in.Video, &out.Video + *out = new(VideoParameters) + (*in).DeepCopyInto(*out) + } + if in.VideoCodecOptions != nil { + in, out := &in.VideoCodecOptions, &out.VideoCodecOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VideoWatermarks != nil { + in, out := &in.VideoWatermarks, &out.VideoWatermarks + *out = make([]VideoWatermarksParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PresetParameters. +func (in *PresetParameters) DeepCopy() *PresetParameters { + if in == nil { + return nil + } + out := new(PresetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PresetSpec) DeepCopyInto(out *PresetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PresetSpec. +func (in *PresetSpec) DeepCopy() *PresetSpec { + if in == nil { + return nil + } + out := new(PresetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PresetStatus) DeepCopyInto(out *PresetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PresetStatus. +func (in *PresetStatus) DeepCopy() *PresetStatus { + if in == nil { + return nil + } + out := new(PresetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThumbnailConfigInitParameters) DeepCopyInto(out *ThumbnailConfigInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThumbnailConfigInitParameters. +func (in *ThumbnailConfigInitParameters) DeepCopy() *ThumbnailConfigInitParameters { + if in == nil { + return nil + } + out := new(ThumbnailConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThumbnailConfigObservation) DeepCopyInto(out *ThumbnailConfigObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThumbnailConfigObservation. +func (in *ThumbnailConfigObservation) DeepCopy() *ThumbnailConfigObservation { + if in == nil { + return nil + } + out := new(ThumbnailConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThumbnailConfigParameters) DeepCopyInto(out *ThumbnailConfigParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThumbnailConfigParameters. +func (in *ThumbnailConfigParameters) DeepCopy() *ThumbnailConfigParameters { + if in == nil { + return nil + } + out := new(ThumbnailConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThumbnailConfigPermissionsInitParameters) DeepCopyInto(out *ThumbnailConfigPermissionsInitParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Grantee != nil { + in, out := &in.Grantee, &out.Grantee + *out = new(string) + **out = **in + } + if in.GranteeType != nil { + in, out := &in.GranteeType, &out.GranteeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThumbnailConfigPermissionsInitParameters. +func (in *ThumbnailConfigPermissionsInitParameters) DeepCopy() *ThumbnailConfigPermissionsInitParameters { + if in == nil { + return nil + } + out := new(ThumbnailConfigPermissionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThumbnailConfigPermissionsObservation) DeepCopyInto(out *ThumbnailConfigPermissionsObservation) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Grantee != nil { + in, out := &in.Grantee, &out.Grantee + *out = new(string) + **out = **in + } + if in.GranteeType != nil { + in, out := &in.GranteeType, &out.GranteeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThumbnailConfigPermissionsObservation. +func (in *ThumbnailConfigPermissionsObservation) DeepCopy() *ThumbnailConfigPermissionsObservation { + if in == nil { + return nil + } + out := new(ThumbnailConfigPermissionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThumbnailConfigPermissionsParameters) DeepCopyInto(out *ThumbnailConfigPermissionsParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Grantee != nil { + in, out := &in.Grantee, &out.Grantee + *out = new(string) + **out = **in + } + if in.GranteeType != nil { + in, out := &in.GranteeType, &out.GranteeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThumbnailConfigPermissionsParameters. +func (in *ThumbnailConfigPermissionsParameters) DeepCopy() *ThumbnailConfigPermissionsParameters { + if in == nil { + return nil + } + out := new(ThumbnailConfigPermissionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThumbnailsInitParameters) DeepCopyInto(out *ThumbnailsInitParameters) { + *out = *in + if in.AspectRatio != nil { + in, out := &in.AspectRatio, &out.AspectRatio + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.MaxHeight != nil { + in, out := &in.MaxHeight, &out.MaxHeight + *out = new(string) + **out = **in + } + if in.MaxWidth != nil { + in, out := &in.MaxWidth, &out.MaxWidth + *out = new(string) + **out = **in + } + if in.PaddingPolicy != nil { + in, out := &in.PaddingPolicy, &out.PaddingPolicy + *out = new(string) + **out = **in + } + if in.Resolution != nil { + in, out := &in.Resolution, &out.Resolution + *out = new(string) + **out = **in + } + if in.SizingPolicy != nil { + in, out := &in.SizingPolicy, &out.SizingPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThumbnailsInitParameters. +func (in *ThumbnailsInitParameters) DeepCopy() *ThumbnailsInitParameters { + if in == nil { + return nil + } + out := new(ThumbnailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThumbnailsObservation) DeepCopyInto(out *ThumbnailsObservation) { + *out = *in + if in.AspectRatio != nil { + in, out := &in.AspectRatio, &out.AspectRatio + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.MaxHeight != nil { + in, out := &in.MaxHeight, &out.MaxHeight + *out = new(string) + **out = **in + } + if in.MaxWidth != nil { + in, out := &in.MaxWidth, &out.MaxWidth + *out = new(string) + **out = **in + } + if in.PaddingPolicy != nil { + in, out := &in.PaddingPolicy, &out.PaddingPolicy + *out = new(string) + **out = **in + } + if in.Resolution != nil { + in, out := &in.Resolution, &out.Resolution + *out = new(string) + **out = **in + } + if in.SizingPolicy != nil { + in, out := &in.SizingPolicy, &out.SizingPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThumbnailsObservation. +func (in *ThumbnailsObservation) DeepCopy() *ThumbnailsObservation { + if in == nil { + return nil + } + out := new(ThumbnailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThumbnailsParameters) DeepCopyInto(out *ThumbnailsParameters) { + *out = *in + if in.AspectRatio != nil { + in, out := &in.AspectRatio, &out.AspectRatio + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.MaxHeight != nil { + in, out := &in.MaxHeight, &out.MaxHeight + *out = new(string) + **out = **in + } + if in.MaxWidth != nil { + in, out := &in.MaxWidth, &out.MaxWidth + *out = new(string) + **out = **in + } + if in.PaddingPolicy != nil { + in, out := &in.PaddingPolicy, &out.PaddingPolicy + *out = new(string) + **out = **in + } + if in.Resolution != nil { + in, out := &in.Resolution, &out.Resolution + *out = new(string) + **out = **in + } + if in.SizingPolicy != nil { + in, out := &in.SizingPolicy, &out.SizingPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThumbnailsParameters. +func (in *ThumbnailsParameters) DeepCopy() *ThumbnailsParameters { + if in == nil { + return nil + } + out := new(ThumbnailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoInitParameters) DeepCopyInto(out *VideoInitParameters) { + *out = *in + if in.AspectRatio != nil { + in, out := &in.AspectRatio, &out.AspectRatio + *out = new(string) + **out = **in + } + if in.BitRate != nil { + in, out := &in.BitRate, &out.BitRate + *out = new(string) + **out = **in + } + if in.Codec != nil { + in, out := &in.Codec, &out.Codec + *out = new(string) + **out = **in + } + if in.DisplayAspectRatio != nil { + in, out := &in.DisplayAspectRatio, &out.DisplayAspectRatio + *out = new(string) + **out = **in + } + if in.FixedGop != nil { + in, out := &in.FixedGop, &out.FixedGop + *out = new(string) + **out = **in + } + if in.FrameRate != nil { + in, out := &in.FrameRate, &out.FrameRate + *out = new(string) + **out = **in + } + if in.KeyframesMaxDist != nil { + in, out := &in.KeyframesMaxDist, &out.KeyframesMaxDist + *out = new(string) + **out = **in + } + if in.MaxFrameRate != nil { + in, out := &in.MaxFrameRate, &out.MaxFrameRate + *out = new(string) + **out = **in + } + if in.MaxHeight != nil { + in, out := &in.MaxHeight, &out.MaxHeight + *out = new(string) + **out = **in + } + if in.MaxWidth != nil { + in, out := &in.MaxWidth, &out.MaxWidth + *out = new(string) + **out = **in + } + if in.PaddingPolicy != nil { + in, out := &in.PaddingPolicy, &out.PaddingPolicy + *out = new(string) + **out = **in + } + if in.Resolution != nil { + in, out := &in.Resolution, &out.Resolution + *out = new(string) + **out = **in + } + if in.SizingPolicy != nil { + in, out := &in.SizingPolicy, &out.SizingPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoInitParameters. +func (in *VideoInitParameters) DeepCopy() *VideoInitParameters { + if in == nil { + return nil + } + out := new(VideoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoObservation) DeepCopyInto(out *VideoObservation) { + *out = *in + if in.AspectRatio != nil { + in, out := &in.AspectRatio, &out.AspectRatio + *out = new(string) + **out = **in + } + if in.BitRate != nil { + in, out := &in.BitRate, &out.BitRate + *out = new(string) + **out = **in + } + if in.Codec != nil { + in, out := &in.Codec, &out.Codec + *out = new(string) + **out = **in + } + if in.DisplayAspectRatio != nil { + in, out := &in.DisplayAspectRatio, &out.DisplayAspectRatio + *out = new(string) + **out = **in + } + if in.FixedGop != nil { + in, out := &in.FixedGop, &out.FixedGop + *out = new(string) + **out = **in + } + if in.FrameRate != nil { + in, out := &in.FrameRate, &out.FrameRate + *out = new(string) + **out = **in + } + if in.KeyframesMaxDist != nil { + in, out := &in.KeyframesMaxDist, &out.KeyframesMaxDist + *out = new(string) + **out = **in + } + if in.MaxFrameRate != nil { + in, out := &in.MaxFrameRate, &out.MaxFrameRate + *out = new(string) + **out = **in + } + if in.MaxHeight != nil { + in, out := &in.MaxHeight, &out.MaxHeight + *out = new(string) + **out = **in + } + if in.MaxWidth != nil { + in, out := &in.MaxWidth, &out.MaxWidth + *out = new(string) + **out = **in + } + if in.PaddingPolicy != nil { + in, out := &in.PaddingPolicy, &out.PaddingPolicy + *out = new(string) + **out = **in + } + if in.Resolution != nil { + in, out := &in.Resolution, &out.Resolution + *out = new(string) + **out = **in + } + if in.SizingPolicy != nil { + in, out := &in.SizingPolicy, &out.SizingPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoObservation. +func (in *VideoObservation) DeepCopy() *VideoObservation { + if in == nil { + return nil + } + out := new(VideoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoParameters) DeepCopyInto(out *VideoParameters) { + *out = *in + if in.AspectRatio != nil { + in, out := &in.AspectRatio, &out.AspectRatio + *out = new(string) + **out = **in + } + if in.BitRate != nil { + in, out := &in.BitRate, &out.BitRate + *out = new(string) + **out = **in + } + if in.Codec != nil { + in, out := &in.Codec, &out.Codec + *out = new(string) + **out = **in + } + if in.DisplayAspectRatio != nil { + in, out := &in.DisplayAspectRatio, &out.DisplayAspectRatio + *out = new(string) + **out = **in + } + if in.FixedGop != nil { + in, out := &in.FixedGop, &out.FixedGop + *out = new(string) + **out = **in + } + if in.FrameRate != nil { + in, out := &in.FrameRate, &out.FrameRate + *out = new(string) + **out = **in + } + if in.KeyframesMaxDist != nil { + in, out := &in.KeyframesMaxDist, &out.KeyframesMaxDist + *out = new(string) + **out = **in + } + if in.MaxFrameRate != nil { + in, out := &in.MaxFrameRate, &out.MaxFrameRate + *out = new(string) + **out = **in + } + if in.MaxHeight != nil { + in, out := &in.MaxHeight, &out.MaxHeight + *out = new(string) + **out = **in + } + if in.MaxWidth != nil { + in, out := &in.MaxWidth, &out.MaxWidth + *out = new(string) + **out = **in + } + if in.PaddingPolicy != nil { + in, out := &in.PaddingPolicy, &out.PaddingPolicy + *out = new(string) + **out = **in + } + if in.Resolution != nil { + in, out := &in.Resolution, &out.Resolution + *out = new(string) + **out = **in + } + if in.SizingPolicy != nil { + in, out := &in.SizingPolicy, &out.SizingPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoParameters. +func (in *VideoParameters) DeepCopy() *VideoParameters { + if in == nil { + return nil + } + out := new(VideoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoWatermarksInitParameters) DeepCopyInto(out *VideoWatermarksInitParameters) { + *out = *in + if in.HorizontalAlign != nil { + in, out := &in.HorizontalAlign, &out.HorizontalAlign + *out = new(string) + **out = **in + } + if in.HorizontalOffset != nil { + in, out := &in.HorizontalOffset, &out.HorizontalOffset + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MaxHeight != nil { + in, out := &in.MaxHeight, &out.MaxHeight + *out = new(string) + **out = **in + } + if in.MaxWidth != nil { + in, out := &in.MaxWidth, &out.MaxWidth + *out = new(string) + **out = **in + } + if in.Opacity != nil { + in, out := &in.Opacity, &out.Opacity + *out = new(string) + **out = **in + } + if in.SizingPolicy != nil { + in, out := &in.SizingPolicy, &out.SizingPolicy + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.VerticalAlign != nil { + in, out := &in.VerticalAlign, &out.VerticalAlign + *out = new(string) + **out = **in + } + if in.VerticalOffset != nil { + in, out := &in.VerticalOffset, &out.VerticalOffset + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoWatermarksInitParameters. +func (in *VideoWatermarksInitParameters) DeepCopy() *VideoWatermarksInitParameters { + if in == nil { + return nil + } + out := new(VideoWatermarksInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoWatermarksObservation) DeepCopyInto(out *VideoWatermarksObservation) { + *out = *in + if in.HorizontalAlign != nil { + in, out := &in.HorizontalAlign, &out.HorizontalAlign + *out = new(string) + **out = **in + } + if in.HorizontalOffset != nil { + in, out := &in.HorizontalOffset, &out.HorizontalOffset + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MaxHeight != nil { + in, out := &in.MaxHeight, &out.MaxHeight + *out = new(string) + **out = **in + } + if in.MaxWidth != nil { + in, out := &in.MaxWidth, &out.MaxWidth + *out = new(string) + **out = **in + } + if in.Opacity != nil { + in, out := &in.Opacity, &out.Opacity + *out = new(string) + **out = **in + } + if in.SizingPolicy != nil { + in, out := &in.SizingPolicy, &out.SizingPolicy + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.VerticalAlign != nil { + in, out := &in.VerticalAlign, &out.VerticalAlign + *out = new(string) + **out = **in + } + if in.VerticalOffset != nil { + in, out := &in.VerticalOffset, &out.VerticalOffset + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoWatermarksObservation. +func (in *VideoWatermarksObservation) DeepCopy() *VideoWatermarksObservation { + if in == nil { + return nil + } + out := new(VideoWatermarksObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoWatermarksParameters) DeepCopyInto(out *VideoWatermarksParameters) { + *out = *in + if in.HorizontalAlign != nil { + in, out := &in.HorizontalAlign, &out.HorizontalAlign + *out = new(string) + **out = **in + } + if in.HorizontalOffset != nil { + in, out := &in.HorizontalOffset, &out.HorizontalOffset + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MaxHeight != nil { + in, out := &in.MaxHeight, &out.MaxHeight + *out = new(string) + **out = **in + } + if in.MaxWidth != nil { + in, out := &in.MaxWidth, &out.MaxWidth + *out = new(string) + **out = **in + } + if in.Opacity != nil { + in, out := &in.Opacity, &out.Opacity + *out = new(string) + **out = **in + } + if in.SizingPolicy != nil { + in, out := &in.SizingPolicy, &out.SizingPolicy + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.VerticalAlign != nil { + in, out := &in.VerticalAlign, &out.VerticalAlign + *out = new(string) + **out = **in + } + if in.VerticalOffset != nil { + in, out := &in.VerticalOffset, &out.VerticalOffset + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoWatermarksParameters. +func (in *VideoWatermarksParameters) DeepCopy() *VideoWatermarksParameters { + if in == nil { + return nil + } + out := new(VideoWatermarksParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/elastictranscoder/v1beta2/zz_generated.managed.go b/apis/elastictranscoder/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..470d672953 --- /dev/null +++ b/apis/elastictranscoder/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Pipeline. +func (mg *Pipeline) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Pipeline. +func (mg *Pipeline) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Pipeline. +func (mg *Pipeline) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Pipeline. +func (mg *Pipeline) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Pipeline. +func (mg *Pipeline) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Pipeline. +func (mg *Pipeline) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Pipeline. +func (mg *Pipeline) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Pipeline. +func (mg *Pipeline) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Pipeline. +func (mg *Pipeline) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Pipeline. +func (mg *Pipeline) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Pipeline. +func (mg *Pipeline) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Pipeline. +func (mg *Pipeline) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Preset. +func (mg *Preset) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Preset. +func (mg *Preset) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Preset. +func (mg *Preset) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Preset. +func (mg *Preset) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Preset. +func (mg *Preset) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Preset. +func (mg *Preset) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Preset. +func (mg *Preset) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Preset. +func (mg *Preset) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Preset. +func (mg *Preset) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Preset. +func (mg *Preset) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Preset. +func (mg *Preset) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Preset. +func (mg *Preset) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/elastictranscoder/v1beta2/zz_generated.managedlist.go b/apis/elastictranscoder/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..77eee4bf8c --- /dev/null +++ b/apis/elastictranscoder/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this PipelineList. +func (l *PipelineList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this PresetList. +func (l *PresetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/elastictranscoder/v1beta2/zz_generated.resolvers.go b/apis/elastictranscoder/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..632eea85cf --- /dev/null +++ b/apis/elastictranscoder/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,192 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Pipeline. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Pipeline) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.ContentConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ContentConfig.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ContentConfig.BucketRef, + Selector: mg.Spec.ForProvider.ContentConfig.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ContentConfig.Bucket") + } + mg.Spec.ForProvider.ContentConfig.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ContentConfig.BucketRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InputBucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.InputBucketRef, + Selector: mg.Spec.ForProvider.InputBucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InputBucket") + } + mg.Spec.ForProvider.InputBucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InputBucketRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Role), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.RoleRef, + Selector: mg.Spec.ForProvider.RoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Role") + } + mg.Spec.ForProvider.Role = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.ThumbnailConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ThumbnailConfig.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ThumbnailConfig.BucketRef, + Selector: mg.Spec.ForProvider.ThumbnailConfig.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ThumbnailConfig.Bucket") + } + mg.Spec.ForProvider.ThumbnailConfig.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ThumbnailConfig.BucketRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.ContentConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ContentConfig.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ContentConfig.BucketRef, + Selector: mg.Spec.InitProvider.ContentConfig.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ContentConfig.Bucket") + } + mg.Spec.InitProvider.ContentConfig.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ContentConfig.BucketRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.InputBucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.InputBucketRef, + Selector: mg.Spec.InitProvider.InputBucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InputBucket") + } + mg.Spec.InitProvider.InputBucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.InputBucketRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Role), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.RoleRef, + Selector: mg.Spec.InitProvider.RoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Role") + } + mg.Spec.InitProvider.Role = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.ThumbnailConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ThumbnailConfig.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ThumbnailConfig.BucketRef, + Selector: mg.Spec.InitProvider.ThumbnailConfig.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ThumbnailConfig.Bucket") + } + mg.Spec.InitProvider.ThumbnailConfig.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ThumbnailConfig.BucketRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/elastictranscoder/v1beta2/zz_groupversion_info.go b/apis/elastictranscoder/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..613d914900 --- /dev/null +++ b/apis/elastictranscoder/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=elastictranscoder.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "elastictranscoder.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/elastictranscoder/v1beta2/zz_pipeline_terraformed.go b/apis/elastictranscoder/v1beta2/zz_pipeline_terraformed.go new file mode 100755 index 0000000000..0b04cc470e --- /dev/null +++ b/apis/elastictranscoder/v1beta2/zz_pipeline_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Pipeline +func (mg *Pipeline) GetTerraformResourceType() string { + return "aws_elastictranscoder_pipeline" +} + +// GetConnectionDetailsMapping for this Pipeline +func (tr *Pipeline) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Pipeline +func (tr *Pipeline) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Pipeline +func (tr *Pipeline) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Pipeline +func (tr *Pipeline) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Pipeline +func (tr *Pipeline) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Pipeline +func (tr *Pipeline) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Pipeline +func (tr *Pipeline) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Pipeline +func (tr *Pipeline) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Pipeline using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Pipeline) LateInitialize(attrs []byte) (bool, error) { + params := &PipelineParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Pipeline) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/elastictranscoder/v1beta2/zz_pipeline_types.go b/apis/elastictranscoder/v1beta2/zz_pipeline_types.go new file mode 100755 index 0000000000..d60f2259ec --- /dev/null +++ b/apis/elastictranscoder/v1beta2/zz_pipeline_types.go @@ -0,0 +1,459 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ContentConfigInitParameters struct { + + // The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the files and playlists that it stores in your Amazon S3 bucket. + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type ContentConfigObservation struct { + + // The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the files and playlists that it stores in your Amazon S3 bucket. + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type ContentConfigParameters struct { + + // The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the files and playlists that it stores in your Amazon S3 bucket. + // +kubebuilder:validation:Optional + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type ContentConfigPermissionsInitParameters struct { + + // The permission that you want to give to the AWS user that you specified in content_config_permissions.grantee. Valid values are Read, ReadAcp, WriteAcp or FullControl. + Access []*string `json:"access,omitempty" tf:"access,omitempty"` + + // The AWS user or group that you want to have access to transcoded files and playlists. + Grantee *string `json:"grantee,omitempty" tf:"grantee,omitempty"` + + // Specify the type of value that appears in the content_config_permissions.grantee object. Valid values are Canonical, Email or Group. + GranteeType *string `json:"granteeType,omitempty" tf:"grantee_type,omitempty"` +} + +type ContentConfigPermissionsObservation struct { + + // The permission that you want to give to the AWS user that you specified in content_config_permissions.grantee. Valid values are Read, ReadAcp, WriteAcp or FullControl. + Access []*string `json:"access,omitempty" tf:"access,omitempty"` + + // The AWS user or group that you want to have access to transcoded files and playlists. + Grantee *string `json:"grantee,omitempty" tf:"grantee,omitempty"` + + // Specify the type of value that appears in the content_config_permissions.grantee object. Valid values are Canonical, Email or Group. + GranteeType *string `json:"granteeType,omitempty" tf:"grantee_type,omitempty"` +} + +type ContentConfigPermissionsParameters struct { + + // The permission that you want to give to the AWS user that you specified in content_config_permissions.grantee. Valid values are Read, ReadAcp, WriteAcp or FullControl. + // +kubebuilder:validation:Optional + Access []*string `json:"access,omitempty" tf:"access,omitempty"` + + // The AWS user or group that you want to have access to transcoded files and playlists. + // +kubebuilder:validation:Optional + Grantee *string `json:"grantee,omitempty" tf:"grantee,omitempty"` + + // Specify the type of value that appears in the content_config_permissions.grantee object. Valid values are Canonical, Email or Group. + // +kubebuilder:validation:Optional + GranteeType *string `json:"granteeType,omitempty" tf:"grantee_type,omitempty"` +} + +type NotificationsInitParameters struct { + + // The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing a job in this pipeline. + Completed *string `json:"completed,omitempty" tf:"completed,omitempty"` + + // The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition while processing a job in this pipeline. + Error *string `json:"error,omitempty" tf:"error,omitempty"` + + // The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process a job in this pipeline. + Progressing *string `json:"progressing,omitempty" tf:"progressing,omitempty"` + + // The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition while processing a job in this pipeline. + Warning *string `json:"warning,omitempty" tf:"warning,omitempty"` +} + +type NotificationsObservation struct { + + // The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing a job in this pipeline. + Completed *string `json:"completed,omitempty" tf:"completed,omitempty"` + + // The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition while processing a job in this pipeline. + Error *string `json:"error,omitempty" tf:"error,omitempty"` + + // The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process a job in this pipeline. + Progressing *string `json:"progressing,omitempty" tf:"progressing,omitempty"` + + // The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition while processing a job in this pipeline. + Warning *string `json:"warning,omitempty" tf:"warning,omitempty"` +} + +type NotificationsParameters struct { + + // The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing a job in this pipeline. + // +kubebuilder:validation:Optional + Completed *string `json:"completed,omitempty" tf:"completed,omitempty"` + + // The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition while processing a job in this pipeline. + // +kubebuilder:validation:Optional + Error *string `json:"error,omitempty" tf:"error,omitempty"` + + // The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process a job in this pipeline. + // +kubebuilder:validation:Optional + Progressing *string `json:"progressing,omitempty" tf:"progressing,omitempty"` + + // The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition while processing a job in this pipeline. + // +kubebuilder:validation:Optional + Warning *string `json:"warning,omitempty" tf:"warning,omitempty"` +} + +type PipelineInitParameters struct { + + // The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline. + AwsKMSKeyArn *string `json:"awsKmsKeyArn,omitempty" tf:"aws_kms_key_arn,omitempty"` + + // The ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. (documented below) + ContentConfig *ContentConfigInitParameters `json:"contentConfig,omitempty" tf:"content_config,omitempty"` + + // The permissions for the content_config object. (documented below) + ContentConfigPermissions []ContentConfigPermissionsInitParameters `json:"contentConfigPermissions,omitempty" tf:"content_config_permissions,omitempty"` + + // The Amazon S3 bucket in which you saved the media files that you want to transcode and the graphics that you want to use as watermarks. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + InputBucket *string `json:"inputBucket,omitempty" tf:"input_bucket,omitempty"` + + // Reference to a Bucket in s3 to populate inputBucket. + // +kubebuilder:validation:Optional + InputBucketRef *v1.Reference `json:"inputBucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate inputBucket. + // +kubebuilder:validation:Optional + InputBucketSelector *v1.Selector `json:"inputBucketSelector,omitempty" tf:"-"` + + // The name of the pipeline. Maximum 40 characters + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status. (documented below) + Notifications *NotificationsInitParameters `json:"notifications,omitempty" tf:"notifications,omitempty"` + + // The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded files. + OutputBucket *string `json:"outputBucket,omitempty" tf:"output_bucket,omitempty"` + + // The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to transcode jobs for this pipeline. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // Reference to a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleRef *v1.Reference `json:"roleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleSelector *v1.Selector `json:"roleSelector,omitempty" tf:"-"` + + // The ThumbnailConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files. (documented below) + ThumbnailConfig *ThumbnailConfigInitParameters `json:"thumbnailConfig,omitempty" tf:"thumbnail_config,omitempty"` + + // The permissions for the thumbnail_config object. (documented below) + ThumbnailConfigPermissions []ThumbnailConfigPermissionsInitParameters `json:"thumbnailConfigPermissions,omitempty" tf:"thumbnail_config_permissions,omitempty"` +} + +type PipelineObservation struct { + + // The ARN of the Elastictranscoder pipeline. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline. + AwsKMSKeyArn *string `json:"awsKmsKeyArn,omitempty" tf:"aws_kms_key_arn,omitempty"` + + // The ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. (documented below) + ContentConfig *ContentConfigObservation `json:"contentConfig,omitempty" tf:"content_config,omitempty"` + + // The permissions for the content_config object. (documented below) + ContentConfigPermissions []ContentConfigPermissionsObservation `json:"contentConfigPermissions,omitempty" tf:"content_config_permissions,omitempty"` + + // The ID of the Elastictranscoder pipeline. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Amazon S3 bucket in which you saved the media files that you want to transcode and the graphics that you want to use as watermarks. + InputBucket *string `json:"inputBucket,omitempty" tf:"input_bucket,omitempty"` + + // The name of the pipeline. Maximum 40 characters + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status. (documented below) + Notifications *NotificationsObservation `json:"notifications,omitempty" tf:"notifications,omitempty"` + + // The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded files. + OutputBucket *string `json:"outputBucket,omitempty" tf:"output_bucket,omitempty"` + + // The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to transcode jobs for this pipeline. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // The ThumbnailConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files. (documented below) + ThumbnailConfig *ThumbnailConfigObservation `json:"thumbnailConfig,omitempty" tf:"thumbnail_config,omitempty"` + + // The permissions for the thumbnail_config object. (documented below) + ThumbnailConfigPermissions []ThumbnailConfigPermissionsObservation `json:"thumbnailConfigPermissions,omitempty" tf:"thumbnail_config_permissions,omitempty"` +} + +type PipelineParameters struct { + + // The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline. + // +kubebuilder:validation:Optional + AwsKMSKeyArn *string `json:"awsKmsKeyArn,omitempty" tf:"aws_kms_key_arn,omitempty"` + + // The ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. (documented below) + // +kubebuilder:validation:Optional + ContentConfig *ContentConfigParameters `json:"contentConfig,omitempty" tf:"content_config,omitempty"` + + // The permissions for the content_config object. (documented below) + // +kubebuilder:validation:Optional + ContentConfigPermissions []ContentConfigPermissionsParameters `json:"contentConfigPermissions,omitempty" tf:"content_config_permissions,omitempty"` + + // The Amazon S3 bucket in which you saved the media files that you want to transcode and the graphics that you want to use as watermarks. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + InputBucket *string `json:"inputBucket,omitempty" tf:"input_bucket,omitempty"` + + // Reference to a Bucket in s3 to populate inputBucket. + // +kubebuilder:validation:Optional + InputBucketRef *v1.Reference `json:"inputBucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate inputBucket. + // +kubebuilder:validation:Optional + InputBucketSelector *v1.Selector `json:"inputBucketSelector,omitempty" tf:"-"` + + // The name of the pipeline. Maximum 40 characters + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status. (documented below) + // +kubebuilder:validation:Optional + Notifications *NotificationsParameters `json:"notifications,omitempty" tf:"notifications,omitempty"` + + // The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded files. + // +kubebuilder:validation:Optional + OutputBucket *string `json:"outputBucket,omitempty" tf:"output_bucket,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to transcode jobs for this pipeline. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // Reference to a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleRef *v1.Reference `json:"roleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleSelector *v1.Selector `json:"roleSelector,omitempty" tf:"-"` + + // The ThumbnailConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files. (documented below) + // +kubebuilder:validation:Optional + ThumbnailConfig *ThumbnailConfigParameters `json:"thumbnailConfig,omitempty" tf:"thumbnail_config,omitempty"` + + // The permissions for the thumbnail_config object. (documented below) + // +kubebuilder:validation:Optional + ThumbnailConfigPermissions []ThumbnailConfigPermissionsParameters `json:"thumbnailConfigPermissions,omitempty" tf:"thumbnail_config_permissions,omitempty"` +} + +type ThumbnailConfigInitParameters struct { + + // The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the files and playlists that it stores in your Amazon S3 bucket. + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type ThumbnailConfigObservation struct { + + // The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the files and playlists that it stores in your Amazon S3 bucket. + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type ThumbnailConfigParameters struct { + + // The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the files and playlists that it stores in your Amazon S3 bucket. + // +kubebuilder:validation:Optional + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type ThumbnailConfigPermissionsInitParameters struct { + + // The permission that you want to give to the AWS user that you specified in content_config_permissions.grantee. Valid values are Read, ReadAcp, WriteAcp or FullControl. + Access []*string `json:"access,omitempty" tf:"access,omitempty"` + + // The AWS user or group that you want to have access to transcoded files and playlists. + Grantee *string `json:"grantee,omitempty" tf:"grantee,omitempty"` + + // Specify the type of value that appears in the content_config_permissions.grantee object. Valid values are Canonical, Email or Group. + GranteeType *string `json:"granteeType,omitempty" tf:"grantee_type,omitempty"` +} + +type ThumbnailConfigPermissionsObservation struct { + + // The permission that you want to give to the AWS user that you specified in content_config_permissions.grantee. Valid values are Read, ReadAcp, WriteAcp or FullControl. + Access []*string `json:"access,omitempty" tf:"access,omitempty"` + + // The AWS user or group that you want to have access to transcoded files and playlists. + Grantee *string `json:"grantee,omitempty" tf:"grantee,omitempty"` + + // Specify the type of value that appears in the content_config_permissions.grantee object. Valid values are Canonical, Email or Group. + GranteeType *string `json:"granteeType,omitempty" tf:"grantee_type,omitempty"` +} + +type ThumbnailConfigPermissionsParameters struct { + + // The permission that you want to give to the AWS user that you specified in content_config_permissions.grantee. Valid values are Read, ReadAcp, WriteAcp or FullControl. + // +kubebuilder:validation:Optional + Access []*string `json:"access,omitempty" tf:"access,omitempty"` + + // The AWS user or group that you want to have access to transcoded files and playlists. + // +kubebuilder:validation:Optional + Grantee *string `json:"grantee,omitempty" tf:"grantee,omitempty"` + + // Specify the type of value that appears in the content_config_permissions.grantee object. Valid values are Canonical, Email or Group. + // +kubebuilder:validation:Optional + GranteeType *string `json:"granteeType,omitempty" tf:"grantee_type,omitempty"` +} + +// PipelineSpec defines the desired state of Pipeline +type PipelineSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PipelineParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PipelineInitParameters `json:"initProvider,omitempty"` +} + +// PipelineStatus defines the observed state of Pipeline. +type PipelineStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PipelineObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Pipeline is the Schema for the Pipelines API. Provides an Elastic Transcoder pipeline resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Pipeline struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec PipelineSpec `json:"spec"` + Status PipelineStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PipelineList contains a list of Pipelines +type PipelineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Pipeline `json:"items"` +} + +// Repository type metadata. +var ( + Pipeline_Kind = "Pipeline" + Pipeline_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Pipeline_Kind}.String() + Pipeline_KindAPIVersion = Pipeline_Kind + "." + CRDGroupVersion.String() + Pipeline_GroupVersionKind = CRDGroupVersion.WithKind(Pipeline_Kind) +) + +func init() { + SchemeBuilder.Register(&Pipeline{}, &PipelineList{}) +} diff --git a/apis/elastictranscoder/v1beta2/zz_preset_terraformed.go b/apis/elastictranscoder/v1beta2/zz_preset_terraformed.go new file mode 100755 index 0000000000..4ea1a8ac99 --- /dev/null +++ b/apis/elastictranscoder/v1beta2/zz_preset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Preset +func (mg *Preset) GetTerraformResourceType() string { + return "aws_elastictranscoder_preset" +} + +// GetConnectionDetailsMapping for this Preset +func (tr *Preset) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Preset +func (tr *Preset) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Preset +func (tr *Preset) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Preset +func (tr *Preset) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Preset +func (tr *Preset) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Preset +func (tr *Preset) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Preset +func (tr *Preset) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Preset +func (tr *Preset) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Preset using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Preset) LateInitialize(attrs []byte) (bool, error) { + params := &PresetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Preset) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/elastictranscoder/v1beta2/zz_preset_types.go b/apis/elastictranscoder/v1beta2/zz_preset_types.go new file mode 100755 index 0000000000..bc5295219d --- /dev/null +++ b/apis/elastictranscoder/v1beta2/zz_preset_types.go @@ -0,0 +1,640 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AudioCodecOptionsInitParameters struct { + + // The bit depth of a sample is how many bits of information are included in the audio samples. Valid values are 16 and 24. (FLAC/PCM Only) + BitDepth *string `json:"bitDepth,omitempty" tf:"bit_depth,omitempty"` + + // The order the bits of a PCM sample are stored in. The supported value is LittleEndian. (PCM Only) + BitOrder *string `json:"bitOrder,omitempty" tf:"bit_order,omitempty"` + + // If you specified AAC for Audio:Codec, choose the AAC profile for the output file. + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // Whether audio samples are represented with negative and positive numbers (signed) or only positive numbers (unsigned). The supported value is Signed. (PCM Only) + Signed *string `json:"signed,omitempty" tf:"signed,omitempty"` +} + +type AudioCodecOptionsObservation struct { + + // The bit depth of a sample is how many bits of information are included in the audio samples. Valid values are 16 and 24. (FLAC/PCM Only) + BitDepth *string `json:"bitDepth,omitempty" tf:"bit_depth,omitempty"` + + // The order the bits of a PCM sample are stored in. The supported value is LittleEndian. (PCM Only) + BitOrder *string `json:"bitOrder,omitempty" tf:"bit_order,omitempty"` + + // If you specified AAC for Audio:Codec, choose the AAC profile for the output file. + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // Whether audio samples are represented with negative and positive numbers (signed) or only positive numbers (unsigned). The supported value is Signed. (PCM Only) + Signed *string `json:"signed,omitempty" tf:"signed,omitempty"` +} + +type AudioCodecOptionsParameters struct { + + // The bit depth of a sample is how many bits of information are included in the audio samples. Valid values are 16 and 24. (FLAC/PCM Only) + // +kubebuilder:validation:Optional + BitDepth *string `json:"bitDepth,omitempty" tf:"bit_depth,omitempty"` + + // The order the bits of a PCM sample are stored in. The supported value is LittleEndian. (PCM Only) + // +kubebuilder:validation:Optional + BitOrder *string `json:"bitOrder,omitempty" tf:"bit_order,omitempty"` + + // If you specified AAC for Audio:Codec, choose the AAC profile for the output file. + // +kubebuilder:validation:Optional + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // Whether audio samples are represented with negative and positive numbers (signed) or only positive numbers (unsigned). The supported value is Signed. (PCM Only) + // +kubebuilder:validation:Optional + Signed *string `json:"signed,omitempty" tf:"signed,omitempty"` +} + +type AudioInitParameters struct { + + // The method of organizing audio channels and tracks. Use Audio:Channels to specify the number of channels in your output, and Audio:AudioPackingMode to specify the number of tracks and their relation to the channels. If you do not specify an Audio:AudioPackingMode, Elastic Transcoder uses SingleTrack. + AudioPackingMode *string `json:"audioPackingMode,omitempty" tf:"audio_packing_mode,omitempty"` + + // The bit rate of the audio stream in the output file, in kilobits/second. Enter an integer between 64 and 320, inclusive. + BitRate *string `json:"bitRate,omitempty" tf:"bit_rate,omitempty"` + + // The number of audio channels in the output file + Channels *string `json:"channels,omitempty" tf:"channels,omitempty"` + + // The audio codec for the output file. Valid values are AAC, flac, mp2, mp3, pcm, and vorbis. + Codec *string `json:"codec,omitempty" tf:"codec,omitempty"` + + // The sample rate of the audio stream in the output file, in hertz. Valid values are: auto, 22050, 32000, 44100, 48000, 96000 + SampleRate *string `json:"sampleRate,omitempty" tf:"sample_rate,omitempty"` +} + +type AudioObservation struct { + + // The method of organizing audio channels and tracks. Use Audio:Channels to specify the number of channels in your output, and Audio:AudioPackingMode to specify the number of tracks and their relation to the channels. If you do not specify an Audio:AudioPackingMode, Elastic Transcoder uses SingleTrack. + AudioPackingMode *string `json:"audioPackingMode,omitempty" tf:"audio_packing_mode,omitempty"` + + // The bit rate of the audio stream in the output file, in kilobits/second. Enter an integer between 64 and 320, inclusive. + BitRate *string `json:"bitRate,omitempty" tf:"bit_rate,omitempty"` + + // The number of audio channels in the output file + Channels *string `json:"channels,omitempty" tf:"channels,omitempty"` + + // The audio codec for the output file. Valid values are AAC, flac, mp2, mp3, pcm, and vorbis. + Codec *string `json:"codec,omitempty" tf:"codec,omitempty"` + + // The sample rate of the audio stream in the output file, in hertz. Valid values are: auto, 22050, 32000, 44100, 48000, 96000 + SampleRate *string `json:"sampleRate,omitempty" tf:"sample_rate,omitempty"` +} + +type AudioParameters struct { + + // The method of organizing audio channels and tracks. Use Audio:Channels to specify the number of channels in your output, and Audio:AudioPackingMode to specify the number of tracks and their relation to the channels. If you do not specify an Audio:AudioPackingMode, Elastic Transcoder uses SingleTrack. + // +kubebuilder:validation:Optional + AudioPackingMode *string `json:"audioPackingMode,omitempty" tf:"audio_packing_mode,omitempty"` + + // The bit rate of the audio stream in the output file, in kilobits/second. Enter an integer between 64 and 320, inclusive. + // +kubebuilder:validation:Optional + BitRate *string `json:"bitRate,omitempty" tf:"bit_rate,omitempty"` + + // The number of audio channels in the output file + // +kubebuilder:validation:Optional + Channels *string `json:"channels,omitempty" tf:"channels,omitempty"` + + // The audio codec for the output file. Valid values are AAC, flac, mp2, mp3, pcm, and vorbis. + // +kubebuilder:validation:Optional + Codec *string `json:"codec,omitempty" tf:"codec,omitempty"` + + // The sample rate of the audio stream in the output file, in hertz. Valid values are: auto, 22050, 32000, 44100, 48000, 96000 + // +kubebuilder:validation:Optional + SampleRate *string `json:"sampleRate,omitempty" tf:"sample_rate,omitempty"` +} + +type PresetInitParameters struct { + + // Audio parameters object (documented below). + Audio *AudioInitParameters `json:"audio,omitempty" tf:"audio,omitempty"` + + // Codec options for the audio parameters (documented below) + AudioCodecOptions *AudioCodecOptionsInitParameters `json:"audioCodecOptions,omitempty" tf:"audio_codec_options,omitempty"` + + // The container type for the output file. Valid values are flac, flv, fmp4, gif, mp3, mp4, mpg, mxf, oga, ogg, ts, and webm. + Container *string `json:"container,omitempty" tf:"container,omitempty"` + + // A description of the preset (maximum 255 characters) + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the preset. (maximum 40 characters) + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Thumbnail parameters object (documented below) + Thumbnails *ThumbnailsInitParameters `json:"thumbnails,omitempty" tf:"thumbnails,omitempty"` + + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Video parameters object (documented below) + Video *VideoInitParameters `json:"video,omitempty" tf:"video,omitempty"` + + // Codec options for the video parameters + // +mapType=granular + VideoCodecOptions map[string]*string `json:"videoCodecOptions,omitempty" tf:"video_codec_options,omitempty"` + + // Watermark parameters for the video parameters (documented below) + VideoWatermarks []VideoWatermarksInitParameters `json:"videoWatermarks,omitempty" tf:"video_watermarks,omitempty"` +} + +type PresetObservation struct { + + // Amazon Resource Name (ARN) of the Elastic Transcoder Preset. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Audio parameters object (documented below). + Audio *AudioObservation `json:"audio,omitempty" tf:"audio,omitempty"` + + // Codec options for the audio parameters (documented below) + AudioCodecOptions *AudioCodecOptionsObservation `json:"audioCodecOptions,omitempty" tf:"audio_codec_options,omitempty"` + + // The container type for the output file. Valid values are flac, flv, fmp4, gif, mp3, mp4, mpg, mxf, oga, ogg, ts, and webm. + Container *string `json:"container,omitempty" tf:"container,omitempty"` + + // A description of the preset (maximum 255 characters) + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A unique identifier for the settings for one watermark. The value of Id can be up to 40 characters long. You can specify settings for up to four watermarks. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the preset. (maximum 40 characters) + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Thumbnail parameters object (documented below) + Thumbnails *ThumbnailsObservation `json:"thumbnails,omitempty" tf:"thumbnails,omitempty"` + + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Video parameters object (documented below) + Video *VideoObservation `json:"video,omitempty" tf:"video,omitempty"` + + // Codec options for the video parameters + // +mapType=granular + VideoCodecOptions map[string]*string `json:"videoCodecOptions,omitempty" tf:"video_codec_options,omitempty"` + + // Watermark parameters for the video parameters (documented below) + VideoWatermarks []VideoWatermarksObservation `json:"videoWatermarks,omitempty" tf:"video_watermarks,omitempty"` +} + +type PresetParameters struct { + + // Audio parameters object (documented below). + // +kubebuilder:validation:Optional + Audio *AudioParameters `json:"audio,omitempty" tf:"audio,omitempty"` + + // Codec options for the audio parameters (documented below) + // +kubebuilder:validation:Optional + AudioCodecOptions *AudioCodecOptionsParameters `json:"audioCodecOptions,omitempty" tf:"audio_codec_options,omitempty"` + + // The container type for the output file. Valid values are flac, flv, fmp4, gif, mp3, mp4, mpg, mxf, oga, ogg, ts, and webm. + // +kubebuilder:validation:Optional + Container *string `json:"container,omitempty" tf:"container,omitempty"` + + // A description of the preset (maximum 255 characters) + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the preset. (maximum 40 characters) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Thumbnail parameters object (documented below) + // +kubebuilder:validation:Optional + Thumbnails *ThumbnailsParameters `json:"thumbnails,omitempty" tf:"thumbnails,omitempty"` + + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Video parameters object (documented below) + // +kubebuilder:validation:Optional + Video *VideoParameters `json:"video,omitempty" tf:"video,omitempty"` + + // Codec options for the video parameters + // +kubebuilder:validation:Optional + // +mapType=granular + VideoCodecOptions map[string]*string `json:"videoCodecOptions,omitempty" tf:"video_codec_options,omitempty"` + + // Watermark parameters for the video parameters (documented below) + // +kubebuilder:validation:Optional + VideoWatermarks []VideoWatermarksParameters `json:"videoWatermarks,omitempty" tf:"video_watermarks,omitempty"` +} + +type ThumbnailsInitParameters struct { + + // The aspect ratio of thumbnails. The following values are valid: auto, 1:1, 4:3, 3:2, 16:9 + AspectRatio *string `json:"aspectRatio,omitempty" tf:"aspect_ratio,omitempty"` + + // The format of thumbnails, if any. Valid formats are jpg and png. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The approximate number of seconds between thumbnails. The value must be an integer. The actual interval can vary by several seconds from one thumbnail to the next. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The maximum height of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 3072, inclusive. + MaxHeight *string `json:"maxHeight,omitempty" tf:"max_height,omitempty"` + + // The maximum width of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 4096, inclusive. + MaxWidth *string `json:"maxWidth,omitempty" tf:"max_width,omitempty"` + + // When you set PaddingPolicy to Pad, Elastic Transcoder might add black bars to the top and bottom and/or left and right sides of thumbnails to make the total size of the thumbnails match the values that you specified for thumbnail MaxWidth and MaxHeight settings. + PaddingPolicy *string `json:"paddingPolicy,omitempty" tf:"padding_policy,omitempty"` + + // The width and height of thumbnail files in pixels, in the format WidthxHeight, where both values are even integers. The values cannot exceed the width and height that you specified in the Video:Resolution object. (To better control resolution and aspect ratio of thumbnails, we recommend that you use the thumbnail values max_width, max_height, sizing_policy, and padding_policy instead of resolution and aspect_ratio. The two groups of settings are mutually exclusive. Do not use them together) + Resolution *string `json:"resolution,omitempty" tf:"resolution,omitempty"` + + // A value that controls scaling of thumbnails. Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, and ShrinkToFill. + SizingPolicy *string `json:"sizingPolicy,omitempty" tf:"sizing_policy,omitempty"` +} + +type ThumbnailsObservation struct { + + // The aspect ratio of thumbnails. The following values are valid: auto, 1:1, 4:3, 3:2, 16:9 + AspectRatio *string `json:"aspectRatio,omitempty" tf:"aspect_ratio,omitempty"` + + // The format of thumbnails, if any. Valid formats are jpg and png. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The approximate number of seconds between thumbnails. The value must be an integer. The actual interval can vary by several seconds from one thumbnail to the next. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The maximum height of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 3072, inclusive. + MaxHeight *string `json:"maxHeight,omitempty" tf:"max_height,omitempty"` + + // The maximum width of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 4096, inclusive. + MaxWidth *string `json:"maxWidth,omitempty" tf:"max_width,omitempty"` + + // When you set PaddingPolicy to Pad, Elastic Transcoder might add black bars to the top and bottom and/or left and right sides of thumbnails to make the total size of the thumbnails match the values that you specified for thumbnail MaxWidth and MaxHeight settings. + PaddingPolicy *string `json:"paddingPolicy,omitempty" tf:"padding_policy,omitempty"` + + // The width and height of thumbnail files in pixels, in the format WidthxHeight, where both values are even integers. The values cannot exceed the width and height that you specified in the Video:Resolution object. (To better control resolution and aspect ratio of thumbnails, we recommend that you use the thumbnail values max_width, max_height, sizing_policy, and padding_policy instead of resolution and aspect_ratio. The two groups of settings are mutually exclusive. Do not use them together) + Resolution *string `json:"resolution,omitempty" tf:"resolution,omitempty"` + + // A value that controls scaling of thumbnails. Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, and ShrinkToFill. + SizingPolicy *string `json:"sizingPolicy,omitempty" tf:"sizing_policy,omitempty"` +} + +type ThumbnailsParameters struct { + + // The aspect ratio of thumbnails. The following values are valid: auto, 1:1, 4:3, 3:2, 16:9 + // +kubebuilder:validation:Optional + AspectRatio *string `json:"aspectRatio,omitempty" tf:"aspect_ratio,omitempty"` + + // The format of thumbnails, if any. Valid formats are jpg and png. + // +kubebuilder:validation:Optional + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The approximate number of seconds between thumbnails. The value must be an integer. The actual interval can vary by several seconds from one thumbnail to the next. + // +kubebuilder:validation:Optional + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The maximum height of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 3072, inclusive. + // +kubebuilder:validation:Optional + MaxHeight *string `json:"maxHeight,omitempty" tf:"max_height,omitempty"` + + // The maximum width of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 4096, inclusive. + // +kubebuilder:validation:Optional + MaxWidth *string `json:"maxWidth,omitempty" tf:"max_width,omitempty"` + + // When you set PaddingPolicy to Pad, Elastic Transcoder might add black bars to the top and bottom and/or left and right sides of thumbnails to make the total size of the thumbnails match the values that you specified for thumbnail MaxWidth and MaxHeight settings. + // +kubebuilder:validation:Optional + PaddingPolicy *string `json:"paddingPolicy,omitempty" tf:"padding_policy,omitempty"` + + // The width and height of thumbnail files in pixels, in the format WidthxHeight, where both values are even integers. The values cannot exceed the width and height that you specified in the Video:Resolution object. (To better control resolution and aspect ratio of thumbnails, we recommend that you use the thumbnail values max_width, max_height, sizing_policy, and padding_policy instead of resolution and aspect_ratio. The two groups of settings are mutually exclusive. Do not use them together) + // +kubebuilder:validation:Optional + Resolution *string `json:"resolution,omitempty" tf:"resolution,omitempty"` + + // A value that controls scaling of thumbnails. Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, and ShrinkToFill. + // +kubebuilder:validation:Optional + SizingPolicy *string `json:"sizingPolicy,omitempty" tf:"sizing_policy,omitempty"` +} + +type VideoInitParameters struct { + + // The aspect ratio of thumbnails. The following values are valid: auto, 1:1, 4:3, 3:2, 16:9 + AspectRatio *string `json:"aspectRatio,omitempty" tf:"aspect_ratio,omitempty"` + + // The bit rate of the audio stream in the output file, in kilobits/second. Enter an integer between 64 and 320, inclusive. + BitRate *string `json:"bitRate,omitempty" tf:"bit_rate,omitempty"` + + // The audio codec for the output file. Valid values are AAC, flac, mp2, mp3, pcm, and vorbis. + Codec *string `json:"codec,omitempty" tf:"codec,omitempty"` + + // The value that Elastic Transcoder adds to the metadata in the output file. If you set DisplayAspectRatio to auto, Elastic Transcoder chooses an aspect ratio that ensures square pixels. If you specify another option, Elastic Transcoder sets that value in the output file. + DisplayAspectRatio *string `json:"displayAspectRatio,omitempty" tf:"display_aspect_ratio,omitempty"` + + // Whether to use a fixed value for Video:FixedGOP. Not applicable for containers of type gif. Valid values are true and false. Also known as, Fixed Number of Frames Between Keyframes. + FixedGop *string `json:"fixedGop,omitempty" tf:"fixed_gop,omitempty"` + + // The frames per second for the video stream in the output file. The following values are valid: auto, 10, 15, 23.97, 24, 25, 29.97, 30, 50, 60. + FrameRate *string `json:"frameRate,omitempty" tf:"frame_rate,omitempty"` + + // The maximum number of frames between key frames. Not applicable for containers of type gif. + KeyframesMaxDist *string `json:"keyframesMaxDist,omitempty" tf:"keyframes_max_dist,omitempty"` + + // If you specify auto for FrameRate, Elastic Transcoder uses the frame rate of the input video for the frame rate of the output video, up to the maximum frame rate. If you do not specify a MaxFrameRate, Elastic Transcoder will use a default of 30. + MaxFrameRate *string `json:"maxFrameRate,omitempty" tf:"max_frame_rate,omitempty"` + + // The maximum height of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 3072, inclusive. + MaxHeight *string `json:"maxHeight,omitempty" tf:"max_height,omitempty"` + + // The maximum width of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 4096, inclusive. + MaxWidth *string `json:"maxWidth,omitempty" tf:"max_width,omitempty"` + + // When you set PaddingPolicy to Pad, Elastic Transcoder might add black bars to the top and bottom and/or left and right sides of thumbnails to make the total size of the thumbnails match the values that you specified for thumbnail MaxWidth and MaxHeight settings. + PaddingPolicy *string `json:"paddingPolicy,omitempty" tf:"padding_policy,omitempty"` + + // The width and height of thumbnail files in pixels, in the format WidthxHeight, where both values are even integers. The values cannot exceed the width and height that you specified in the Video:Resolution object. (To better control resolution and aspect ratio of thumbnails, we recommend that you use the thumbnail values max_width, max_height, sizing_policy, and padding_policy instead of resolution and aspect_ratio. The two groups of settings are mutually exclusive. Do not use them together) + Resolution *string `json:"resolution,omitempty" tf:"resolution,omitempty"` + + // A value that controls scaling of thumbnails. Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, and ShrinkToFill. + SizingPolicy *string `json:"sizingPolicy,omitempty" tf:"sizing_policy,omitempty"` +} + +type VideoObservation struct { + + // The aspect ratio of thumbnails. The following values are valid: auto, 1:1, 4:3, 3:2, 16:9 + AspectRatio *string `json:"aspectRatio,omitempty" tf:"aspect_ratio,omitempty"` + + // The bit rate of the audio stream in the output file, in kilobits/second. Enter an integer between 64 and 320, inclusive. + BitRate *string `json:"bitRate,omitempty" tf:"bit_rate,omitempty"` + + // The audio codec for the output file. Valid values are AAC, flac, mp2, mp3, pcm, and vorbis. + Codec *string `json:"codec,omitempty" tf:"codec,omitempty"` + + // The value that Elastic Transcoder adds to the metadata in the output file. If you set DisplayAspectRatio to auto, Elastic Transcoder chooses an aspect ratio that ensures square pixels. If you specify another option, Elastic Transcoder sets that value in the output file. + DisplayAspectRatio *string `json:"displayAspectRatio,omitempty" tf:"display_aspect_ratio,omitempty"` + + // Whether to use a fixed value for Video:FixedGOP. Not applicable for containers of type gif. Valid values are true and false. Also known as, Fixed Number of Frames Between Keyframes. + FixedGop *string `json:"fixedGop,omitempty" tf:"fixed_gop,omitempty"` + + // The frames per second for the video stream in the output file. The following values are valid: auto, 10, 15, 23.97, 24, 25, 29.97, 30, 50, 60. + FrameRate *string `json:"frameRate,omitempty" tf:"frame_rate,omitempty"` + + // The maximum number of frames between key frames. Not applicable for containers of type gif. + KeyframesMaxDist *string `json:"keyframesMaxDist,omitempty" tf:"keyframes_max_dist,omitempty"` + + // If you specify auto for FrameRate, Elastic Transcoder uses the frame rate of the input video for the frame rate of the output video, up to the maximum frame rate. If you do not specify a MaxFrameRate, Elastic Transcoder will use a default of 30. + MaxFrameRate *string `json:"maxFrameRate,omitempty" tf:"max_frame_rate,omitempty"` + + // The maximum height of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 3072, inclusive. + MaxHeight *string `json:"maxHeight,omitempty" tf:"max_height,omitempty"` + + // The maximum width of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 4096, inclusive. + MaxWidth *string `json:"maxWidth,omitempty" tf:"max_width,omitempty"` + + // When you set PaddingPolicy to Pad, Elastic Transcoder might add black bars to the top and bottom and/or left and right sides of thumbnails to make the total size of the thumbnails match the values that you specified for thumbnail MaxWidth and MaxHeight settings. + PaddingPolicy *string `json:"paddingPolicy,omitempty" tf:"padding_policy,omitempty"` + + // The width and height of thumbnail files in pixels, in the format WidthxHeight, where both values are even integers. The values cannot exceed the width and height that you specified in the Video:Resolution object. (To better control resolution and aspect ratio of thumbnails, we recommend that you use the thumbnail values max_width, max_height, sizing_policy, and padding_policy instead of resolution and aspect_ratio. The two groups of settings are mutually exclusive. Do not use them together) + Resolution *string `json:"resolution,omitempty" tf:"resolution,omitempty"` + + // A value that controls scaling of thumbnails. Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, and ShrinkToFill. + SizingPolicy *string `json:"sizingPolicy,omitempty" tf:"sizing_policy,omitempty"` +} + +type VideoParameters struct { + + // The aspect ratio of thumbnails. The following values are valid: auto, 1:1, 4:3, 3:2, 16:9 + // +kubebuilder:validation:Optional + AspectRatio *string `json:"aspectRatio,omitempty" tf:"aspect_ratio,omitempty"` + + // The bit rate of the audio stream in the output file, in kilobits/second. Enter an integer between 64 and 320, inclusive. + // +kubebuilder:validation:Optional + BitRate *string `json:"bitRate,omitempty" tf:"bit_rate,omitempty"` + + // The audio codec for the output file. Valid values are AAC, flac, mp2, mp3, pcm, and vorbis. + // +kubebuilder:validation:Optional + Codec *string `json:"codec,omitempty" tf:"codec,omitempty"` + + // The value that Elastic Transcoder adds to the metadata in the output file. If you set DisplayAspectRatio to auto, Elastic Transcoder chooses an aspect ratio that ensures square pixels. If you specify another option, Elastic Transcoder sets that value in the output file. + // +kubebuilder:validation:Optional + DisplayAspectRatio *string `json:"displayAspectRatio,omitempty" tf:"display_aspect_ratio,omitempty"` + + // Whether to use a fixed value for Video:FixedGOP. Not applicable for containers of type gif. Valid values are true and false. Also known as, Fixed Number of Frames Between Keyframes. + // +kubebuilder:validation:Optional + FixedGop *string `json:"fixedGop,omitempty" tf:"fixed_gop,omitempty"` + + // The frames per second for the video stream in the output file. The following values are valid: auto, 10, 15, 23.97, 24, 25, 29.97, 30, 50, 60. + // +kubebuilder:validation:Optional + FrameRate *string `json:"frameRate,omitempty" tf:"frame_rate,omitempty"` + + // The maximum number of frames between key frames. Not applicable for containers of type gif. + // +kubebuilder:validation:Optional + KeyframesMaxDist *string `json:"keyframesMaxDist,omitempty" tf:"keyframes_max_dist,omitempty"` + + // If you specify auto for FrameRate, Elastic Transcoder uses the frame rate of the input video for the frame rate of the output video, up to the maximum frame rate. If you do not specify a MaxFrameRate, Elastic Transcoder will use a default of 30. + // +kubebuilder:validation:Optional + MaxFrameRate *string `json:"maxFrameRate,omitempty" tf:"max_frame_rate,omitempty"` + + // The maximum height of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 3072, inclusive. + // +kubebuilder:validation:Optional + MaxHeight *string `json:"maxHeight,omitempty" tf:"max_height,omitempty"` + + // The maximum width of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 4096, inclusive. + // +kubebuilder:validation:Optional + MaxWidth *string `json:"maxWidth,omitempty" tf:"max_width,omitempty"` + + // When you set PaddingPolicy to Pad, Elastic Transcoder might add black bars to the top and bottom and/or left and right sides of thumbnails to make the total size of the thumbnails match the values that you specified for thumbnail MaxWidth and MaxHeight settings. + // +kubebuilder:validation:Optional + PaddingPolicy *string `json:"paddingPolicy,omitempty" tf:"padding_policy,omitempty"` + + // The width and height of thumbnail files in pixels, in the format WidthxHeight, where both values are even integers. The values cannot exceed the width and height that you specified in the Video:Resolution object. (To better control resolution and aspect ratio of thumbnails, we recommend that you use the thumbnail values max_width, max_height, sizing_policy, and padding_policy instead of resolution and aspect_ratio. The two groups of settings are mutually exclusive. Do not use them together) + // +kubebuilder:validation:Optional + Resolution *string `json:"resolution,omitempty" tf:"resolution,omitempty"` + + // A value that controls scaling of thumbnails. Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, and ShrinkToFill. + // +kubebuilder:validation:Optional + SizingPolicy *string `json:"sizingPolicy,omitempty" tf:"sizing_policy,omitempty"` +} + +type VideoWatermarksInitParameters struct { + + // The horizontal position of the watermark unless you specify a nonzero value for horzontal_offset. + HorizontalAlign *string `json:"horizontalAlign,omitempty" tf:"horizontal_align,omitempty"` + + // The amount by which you want the horizontal position of the watermark to be offset from the position specified by horizontal_align. + HorizontalOffset *string `json:"horizontalOffset,omitempty" tf:"horizontal_offset,omitempty"` + + // A unique identifier for the settings for one watermark. The value of Id can be up to 40 characters long. You can specify settings for up to four watermarks. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The maximum height of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 3072, inclusive. + MaxHeight *string `json:"maxHeight,omitempty" tf:"max_height,omitempty"` + + // The maximum width of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 4096, inclusive. + MaxWidth *string `json:"maxWidth,omitempty" tf:"max_width,omitempty"` + + // A percentage that indicates how much you want a watermark to obscure the video in the location where it appears. + Opacity *string `json:"opacity,omitempty" tf:"opacity,omitempty"` + + // A value that controls scaling of thumbnails. Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, and ShrinkToFill. + SizingPolicy *string `json:"sizingPolicy,omitempty" tf:"sizing_policy,omitempty"` + + // A value that determines how Elastic Transcoder interprets values that you specified for video_watermarks.horizontal_offset, video_watermarks.vertical_offset, video_watermarks.max_width, and video_watermarks.max_height. Valid values are Content and Frame. + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // The vertical position of the watermark unless you specify a nonzero value for vertical_align. Valid values are Top, Bottom, Center. + VerticalAlign *string `json:"verticalAlign,omitempty" tf:"vertical_align,omitempty"` + + // The amount by which you want the vertical position of the watermark to be offset from the position specified by vertical_align + VerticalOffset *string `json:"verticalOffset,omitempty" tf:"vertical_offset,omitempty"` +} + +type VideoWatermarksObservation struct { + + // The horizontal position of the watermark unless you specify a nonzero value for horzontal_offset. + HorizontalAlign *string `json:"horizontalAlign,omitempty" tf:"horizontal_align,omitempty"` + + // The amount by which you want the horizontal position of the watermark to be offset from the position specified by horizontal_align. + HorizontalOffset *string `json:"horizontalOffset,omitempty" tf:"horizontal_offset,omitempty"` + + // A unique identifier for the settings for one watermark. The value of Id can be up to 40 characters long. You can specify settings for up to four watermarks. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The maximum height of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 3072, inclusive. + MaxHeight *string `json:"maxHeight,omitempty" tf:"max_height,omitempty"` + + // The maximum width of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 4096, inclusive. + MaxWidth *string `json:"maxWidth,omitempty" tf:"max_width,omitempty"` + + // A percentage that indicates how much you want a watermark to obscure the video in the location where it appears. + Opacity *string `json:"opacity,omitempty" tf:"opacity,omitempty"` + + // A value that controls scaling of thumbnails. Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, and ShrinkToFill. + SizingPolicy *string `json:"sizingPolicy,omitempty" tf:"sizing_policy,omitempty"` + + // A value that determines how Elastic Transcoder interprets values that you specified for video_watermarks.horizontal_offset, video_watermarks.vertical_offset, video_watermarks.max_width, and video_watermarks.max_height. Valid values are Content and Frame. + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // The vertical position of the watermark unless you specify a nonzero value for vertical_align. Valid values are Top, Bottom, Center. + VerticalAlign *string `json:"verticalAlign,omitempty" tf:"vertical_align,omitempty"` + + // The amount by which you want the vertical position of the watermark to be offset from the position specified by vertical_align + VerticalOffset *string `json:"verticalOffset,omitempty" tf:"vertical_offset,omitempty"` +} + +type VideoWatermarksParameters struct { + + // The horizontal position of the watermark unless you specify a nonzero value for horzontal_offset. + // +kubebuilder:validation:Optional + HorizontalAlign *string `json:"horizontalAlign,omitempty" tf:"horizontal_align,omitempty"` + + // The amount by which you want the horizontal position of the watermark to be offset from the position specified by horizontal_align. + // +kubebuilder:validation:Optional + HorizontalOffset *string `json:"horizontalOffset,omitempty" tf:"horizontal_offset,omitempty"` + + // A unique identifier for the settings for one watermark. The value of Id can be up to 40 characters long. You can specify settings for up to four watermarks. + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The maximum height of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 3072, inclusive. + // +kubebuilder:validation:Optional + MaxHeight *string `json:"maxHeight,omitempty" tf:"max_height,omitempty"` + + // The maximum width of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 4096, inclusive. + // +kubebuilder:validation:Optional + MaxWidth *string `json:"maxWidth,omitempty" tf:"max_width,omitempty"` + + // A percentage that indicates how much you want a watermark to obscure the video in the location where it appears. + // +kubebuilder:validation:Optional + Opacity *string `json:"opacity,omitempty" tf:"opacity,omitempty"` + + // A value that controls scaling of thumbnails. Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, and ShrinkToFill. + // +kubebuilder:validation:Optional + SizingPolicy *string `json:"sizingPolicy,omitempty" tf:"sizing_policy,omitempty"` + + // A value that determines how Elastic Transcoder interprets values that you specified for video_watermarks.horizontal_offset, video_watermarks.vertical_offset, video_watermarks.max_width, and video_watermarks.max_height. Valid values are Content and Frame. + // +kubebuilder:validation:Optional + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // The vertical position of the watermark unless you specify a nonzero value for vertical_align. Valid values are Top, Bottom, Center. + // +kubebuilder:validation:Optional + VerticalAlign *string `json:"verticalAlign,omitempty" tf:"vertical_align,omitempty"` + + // The amount by which you want the vertical position of the watermark to be offset from the position specified by vertical_align + // +kubebuilder:validation:Optional + VerticalOffset *string `json:"verticalOffset,omitempty" tf:"vertical_offset,omitempty"` +} + +// PresetSpec defines the desired state of Preset +type PresetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PresetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PresetInitParameters `json:"initProvider,omitempty"` +} + +// PresetStatus defines the observed state of Preset. +type PresetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PresetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Preset is the Schema for the Presets API. Provides an Elastic Transcoder preset resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Preset struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.container) || (has(self.initProvider) && has(self.initProvider.container))",message="spec.forProvider.container is a required parameter" + Spec PresetSpec `json:"spec"` + Status PresetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PresetList contains a list of Presets +type PresetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Preset `json:"items"` +} + +// Repository type metadata. +var ( + Preset_Kind = "Preset" + Preset_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Preset_Kind}.String() + Preset_KindAPIVersion = Preset_Kind + "." + CRDGroupVersion.String() + Preset_GroupVersionKind = CRDGroupVersion.WithKind(Preset_Kind) +) + +func init() { + SchemeBuilder.Register(&Preset{}, &PresetList{}) +} diff --git a/apis/elb/v1beta1/zz_appcookiestickinesspolicy_types.go b/apis/elb/v1beta1/zz_appcookiestickinesspolicy_types.go index 005e7a8dbf..93dcc09657 100755 --- a/apis/elb/v1beta1/zz_appcookiestickinesspolicy_types.go +++ b/apis/elb/v1beta1/zz_appcookiestickinesspolicy_types.go @@ -51,7 +51,7 @@ type AppCookieStickinessPolicyParameters struct { // Name of load balancer to which the policy // should be attached. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta1.ELB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB // +kubebuilder:validation:Optional LoadBalancer *string `json:"loadBalancer,omitempty" tf:"load_balancer,omitempty"` diff --git a/apis/elb/v1beta1/zz_attachment_types.go b/apis/elb/v1beta1/zz_attachment_types.go index d85d660fe8..fe3992759d 100755 --- a/apis/elb/v1beta1/zz_attachment_types.go +++ b/apis/elb/v1beta1/zz_attachment_types.go @@ -16,7 +16,7 @@ import ( type AttachmentInitParameters struct { // The name of the ELB. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta1.ELB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB ELB *string `json:"elb,omitempty" tf:"elb,omitempty"` // Reference to a ELB in elb to populate elb. @@ -28,7 +28,7 @@ type AttachmentInitParameters struct { ELBSelector *v1.Selector `json:"elbSelector,omitempty" tf:"-"` // Instance ID to place in the ELB pool. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance Instance *string `json:"instance,omitempty" tf:"instance,omitempty"` // Reference to a Instance in ec2 to populate instance. @@ -54,7 +54,7 @@ type AttachmentObservation struct { type AttachmentParameters struct { // The name of the ELB. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta1.ELB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB // +kubebuilder:validation:Optional ELB *string `json:"elb,omitempty" tf:"elb,omitempty"` @@ -67,7 +67,7 @@ type AttachmentParameters struct { ELBSelector *v1.Selector `json:"elbSelector,omitempty" tf:"-"` // Instance ID to place in the ELB pool. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance // +kubebuilder:validation:Optional Instance *string `json:"instance,omitempty" tf:"instance,omitempty"` diff --git a/apis/elb/v1beta1/zz_backendserverpolicy_types.go b/apis/elb/v1beta1/zz_backendserverpolicy_types.go index b7b152f6fa..0f47783885 100755 --- a/apis/elb/v1beta1/zz_backendserverpolicy_types.go +++ b/apis/elb/v1beta1/zz_backendserverpolicy_types.go @@ -19,7 +19,7 @@ type BackendServerPolicyInitParameters struct { InstancePort *float64 `json:"instancePort,omitempty" tf:"instance_port,omitempty"` // The load balancer to attach the policy to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta1.ELB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB LoadBalancerName *string `json:"loadBalancerName,omitempty" tf:"load_balancer_name,omitempty"` // Reference to a ELB in elb to populate loadBalancerName. @@ -58,7 +58,7 @@ type BackendServerPolicyParameters struct { InstancePort *float64 `json:"instancePort,omitempty" tf:"instance_port,omitempty"` // The load balancer to attach the policy to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta1.ELB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB // +kubebuilder:validation:Optional LoadBalancerName *string `json:"loadBalancerName,omitempty" tf:"load_balancer_name,omitempty"` diff --git a/apis/elb/v1beta1/zz_generated.conversion_hubs.go b/apis/elb/v1beta1/zz_generated.conversion_hubs.go index 32182b01ca..7b03a13955 100755 --- a/apis/elb/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/elb/v1beta1/zz_generated.conversion_hubs.go @@ -15,9 +15,6 @@ func (tr *Attachment) Hub() {} // Hub marks this type as a conversion hub. func (tr *BackendServerPolicy) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ELB) Hub() {} - // Hub marks this type as a conversion hub. func (tr *LBCookieStickinessPolicy) Hub() {} diff --git a/apis/elb/v1beta1/zz_generated.conversion_spokes.go b/apis/elb/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..e4185d101f --- /dev/null +++ b/apis/elb/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ELB to the hub type. +func (tr *ELB) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ELB type. +func (tr *ELB) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/elb/v1beta1/zz_generated.resolvers.go b/apis/elb/v1beta1/zz_generated.resolvers.go index 8939637249..53a2721562 100644 --- a/apis/elb/v1beta1/zz_generated.resolvers.go +++ b/apis/elb/v1beta1/zz_generated.resolvers.go @@ -26,7 +26,7 @@ func (mg *AppCookieStickinessPolicy) ResolveReferences( // ResolveReferences of var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta1", "ELB", "ELBList") + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -57,7 +57,7 @@ func (mg *Attachment) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta1", "ELB", "ELBList") + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -76,7 +76,7 @@ func (mg *Attachment) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.ELB = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ELBRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -95,7 +95,7 @@ func (mg *Attachment) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.Instance = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.InstanceRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta1", "ELB", "ELBList") + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -114,7 +114,7 @@ func (mg *Attachment) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.InitProvider.ELB = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ELBRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -145,7 +145,7 @@ func (mg *BackendServerPolicy) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta1", "ELB", "ELBList") + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -164,7 +164,7 @@ func (mg *BackendServerPolicy) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.LoadBalancerName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.LoadBalancerNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta1", "ELB", "ELBList") + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -283,7 +283,7 @@ func (mg *LBCookieStickinessPolicy) ResolveReferences(ctx context.Context, c cli var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta1", "ELB", "ELBList") + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -302,7 +302,7 @@ func (mg *LBCookieStickinessPolicy) ResolveReferences(ctx context.Context, c cli mg.Spec.ForProvider.LoadBalancer = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.LoadBalancerRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta1", "ELB", "ELBList") + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -333,7 +333,7 @@ func (mg *LBSSLNegotiationPolicy) ResolveReferences(ctx context.Context, c clien var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta1", "ELB", "ELBList") + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -352,7 +352,7 @@ func (mg *LBSSLNegotiationPolicy) ResolveReferences(ctx context.Context, c clien mg.Spec.ForProvider.LoadBalancer = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.LoadBalancerRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta1", "ELB", "ELBList") + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -383,7 +383,7 @@ func (mg *ListenerPolicy) ResolveReferences(ctx context.Context, c client.Reader var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta1", "ELB", "ELBList") + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -402,7 +402,7 @@ func (mg *ListenerPolicy) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.ForProvider.LoadBalancerName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.LoadBalancerNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta1", "ELB", "ELBList") + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -433,7 +433,7 @@ func (mg *Policy) ResolveReferences(ctx context.Context, c client.Reader) error var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta1", "ELB", "ELBList") + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -474,7 +474,7 @@ func (mg *Policy) ResolveReferences(ctx context.Context, c client.Reader) error } { - m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta1", "ELB", "ELBList") + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -526,7 +526,7 @@ func (mg *ProxyProtocolPolicy) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta1", "ELB", "ELBList") + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -545,7 +545,7 @@ func (mg *ProxyProtocolPolicy) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.LoadBalancer = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.LoadBalancerRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta1", "ELB", "ELBList") + m, l, err = apisresolver.GetManagedResource("elb.aws.upbound.io", "v1beta2", "ELB", "ELBList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/elb/v1beta1/zz_lbcookiestickinesspolicy_types.go b/apis/elb/v1beta1/zz_lbcookiestickinesspolicy_types.go index 1efe3a5100..ffe1038f45 100755 --- a/apis/elb/v1beta1/zz_lbcookiestickinesspolicy_types.go +++ b/apis/elb/v1beta1/zz_lbcookiestickinesspolicy_types.go @@ -26,7 +26,7 @@ type LBCookieStickinessPolicyInitParameters struct { // The load balancer to which the policy // should be attached. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta1.ELB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() LoadBalancer *string `json:"loadBalancer,omitempty" tf:"load_balancer,omitempty"` @@ -79,7 +79,7 @@ type LBCookieStickinessPolicyParameters struct { // The load balancer to which the policy // should be attached. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta1.ELB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional LoadBalancer *string `json:"loadBalancer,omitempty" tf:"load_balancer,omitempty"` diff --git a/apis/elb/v1beta1/zz_lbsslnegotiationpolicy_types.go b/apis/elb/v1beta1/zz_lbsslnegotiationpolicy_types.go index 5beb8100d4..16a83ed949 100755 --- a/apis/elb/v1beta1/zz_lbsslnegotiationpolicy_types.go +++ b/apis/elb/v1beta1/zz_lbsslnegotiationpolicy_types.go @@ -54,7 +54,7 @@ type LBSSLNegotiationPolicyInitParameters struct { // The load balancer to which the policy // should be attached. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta1.ELB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() LoadBalancer *string `json:"loadBalancer,omitempty" tf:"load_balancer,omitempty"` @@ -113,7 +113,7 @@ type LBSSLNegotiationPolicyParameters struct { // The load balancer to which the policy // should be attached. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta1.ELB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional LoadBalancer *string `json:"loadBalancer,omitempty" tf:"load_balancer,omitempty"` diff --git a/apis/elb/v1beta1/zz_listenerpolicy_types.go b/apis/elb/v1beta1/zz_listenerpolicy_types.go index be65ba1a28..442797d9af 100755 --- a/apis/elb/v1beta1/zz_listenerpolicy_types.go +++ b/apis/elb/v1beta1/zz_listenerpolicy_types.go @@ -16,7 +16,7 @@ import ( type ListenerPolicyInitParameters struct { // The load balancer to attach the policy to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta1.ELB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB LoadBalancerName *string `json:"loadBalancerName,omitempty" tf:"load_balancer_name,omitempty"` // Reference to a ELB in elb to populate loadBalancerName. @@ -62,7 +62,7 @@ type ListenerPolicyObservation struct { type ListenerPolicyParameters struct { // The load balancer to attach the policy to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta1.ELB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB // +kubebuilder:validation:Optional LoadBalancerName *string `json:"loadBalancerName,omitempty" tf:"load_balancer_name,omitempty"` diff --git a/apis/elb/v1beta1/zz_policy_types.go b/apis/elb/v1beta1/zz_policy_types.go index 19806b40df..d13c161ee6 100755 --- a/apis/elb/v1beta1/zz_policy_types.go +++ b/apis/elb/v1beta1/zz_policy_types.go @@ -57,7 +57,7 @@ type PolicyAttributeParameters struct { type PolicyInitParameters struct { // The load balancer on which the policy is defined. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta1.ELB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB LoadBalancerName *string `json:"loadBalancerName,omitempty" tf:"load_balancer_name,omitempty"` // Reference to a ELB in elb to populate loadBalancerName. @@ -99,7 +99,7 @@ type PolicyObservation struct { type PolicyParameters struct { // The load balancer on which the policy is defined. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta1.ELB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB // +kubebuilder:validation:Optional LoadBalancerName *string `json:"loadBalancerName,omitempty" tf:"load_balancer_name,omitempty"` diff --git a/apis/elb/v1beta1/zz_proxyprotocolpolicy_types.go b/apis/elb/v1beta1/zz_proxyprotocolpolicy_types.go index 06b93e2d78..b06256d63f 100755 --- a/apis/elb/v1beta1/zz_proxyprotocolpolicy_types.go +++ b/apis/elb/v1beta1/zz_proxyprotocolpolicy_types.go @@ -22,7 +22,7 @@ type ProxyProtocolPolicyInitParameters struct { // The load balancer to which the policy // should be attached. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta1.ELB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB LoadBalancer *string `json:"loadBalancer,omitempty" tf:"load_balancer,omitempty"` // Reference to a ELB in elb to populate loadBalancer. @@ -59,7 +59,7 @@ type ProxyProtocolPolicyParameters struct { // The load balancer to which the policy // should be attached. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta1.ELB + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elb/v1beta2.ELB // +kubebuilder:validation:Optional LoadBalancer *string `json:"loadBalancer,omitempty" tf:"load_balancer,omitempty"` diff --git a/apis/elb/v1beta2/zz_elb_terraformed.go b/apis/elb/v1beta2/zz_elb_terraformed.go new file mode 100755 index 0000000000..388e865605 --- /dev/null +++ b/apis/elb/v1beta2/zz_elb_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ELB +func (mg *ELB) GetTerraformResourceType() string { + return "aws_elb" +} + +// GetConnectionDetailsMapping for this ELB +func (tr *ELB) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ELB +func (tr *ELB) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ELB +func (tr *ELB) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ELB +func (tr *ELB) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ELB +func (tr *ELB) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ELB +func (tr *ELB) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ELB +func (tr *ELB) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ELB +func (tr *ELB) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ELB using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ELB) LateInitialize(attrs []byte) (bool, error) { + params := &ELBParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("AccessLogs")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ELB) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/elb/v1beta2/zz_elb_types.go b/apis/elb/v1beta2/zz_elb_types.go new file mode 100755 index 0000000000..4ac03ca6f1 --- /dev/null +++ b/apis/elb/v1beta2/zz_elb_types.go @@ -0,0 +1,499 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessLogsInitParameters struct { + + // The S3 bucket name to store the logs in. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The S3 bucket prefix. Logs are stored in the root if not configured. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Boolean to enable / disable access_logs. Default is true + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The publishing interval in minutes. Valid values: 5 and 60. Default: 60 + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` +} + +type AccessLogsObservation struct { + + // The S3 bucket name to store the logs in. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The S3 bucket prefix. Logs are stored in the root if not configured. + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Boolean to enable / disable access_logs. Default is true + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The publishing interval in minutes. Valid values: 5 and 60. Default: 60 + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` +} + +type AccessLogsParameters struct { + + // The S3 bucket name to store the logs in. + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket" tf:"bucket,omitempty"` + + // The S3 bucket prefix. Logs are stored in the root if not configured. + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Boolean to enable / disable access_logs. Default is true + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The publishing interval in minutes. Valid values: 5 and 60. Default: 60 + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` +} + +type ELBInitParameters struct { + + // An Access Logs block. Access Logs documented below. + AccessLogs *AccessLogsInitParameters `json:"accessLogs,omitempty" tf:"access_logs,omitempty"` + + // The AZ's to serve traffic in. + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // Boolean to enable connection draining. Default: false + ConnectionDraining *bool `json:"connectionDraining,omitempty" tf:"connection_draining,omitempty"` + + // The time in seconds to allow for connections to drain. Default: 300 + ConnectionDrainingTimeout *float64 `json:"connectionDrainingTimeout,omitempty" tf:"connection_draining_timeout,omitempty"` + + // Enable cross-zone load balancing. Default: true + CrossZoneLoadBalancing *bool `json:"crossZoneLoadBalancing,omitempty" tf:"cross_zone_load_balancing,omitempty"` + + // Determines how the load balancer handles requests that might pose a security risk to an application due to HTTP desync. Valid values are monitor, defensive (default), strictest. + DesyncMitigationMode *string `json:"desyncMitigationMode,omitempty" tf:"desync_mitigation_mode,omitempty"` + + // A health_check block. Health Check documented below. + HealthCheck *HealthCheckInitParameters `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + + // The time in seconds that the connection is allowed to be idle. Default: 60 + IdleTimeout *float64 `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` + + // A list of instance ids to place in the ELB pool. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance + // +listType=set + Instances []*string `json:"instances,omitempty" tf:"instances,omitempty"` + + // References to Instance in ec2 to populate instances. + // +kubebuilder:validation:Optional + InstancesRefs []v1.Reference `json:"instancesRefs,omitempty" tf:"-"` + + // Selector for a list of Instance in ec2 to populate instances. + // +kubebuilder:validation:Optional + InstancesSelector *v1.Selector `json:"instancesSelector,omitempty" tf:"-"` + + // If true, ELB will be an internal ELB. + Internal *bool `json:"internal,omitempty" tf:"internal,omitempty"` + + // A list of listener blocks. Listeners documented below. + Listener []ListenerInitParameters `json:"listener,omitempty" tf:"listener,omitempty"` + + // A list of security group IDs to assign to the ELB. + // Only valid if creating an ELB within a VPC + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The name of the security group that you can use as + // part of your inbound rules for your load balancer's back-end application + // instances. Use this for Classic or Default VPC only. + SourceSecurityGroup *string `json:"sourceSecurityGroup,omitempty" tf:"source_security_group,omitempty"` + + // A list of subnet IDs to attach to the ELB. When an update to subnets will remove all current subnets, this will force a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` + + // References to Subnet in ec2 to populate subnets. + // +kubebuilder:validation:Optional + SubnetsRefs []v1.Reference `json:"subnetsRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnets. + // +kubebuilder:validation:Optional + SubnetsSelector *v1.Selector `json:"subnetsSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ELBObservation struct { + + // An Access Logs block. Access Logs documented below. + AccessLogs *AccessLogsObservation `json:"accessLogs,omitempty" tf:"access_logs,omitempty"` + + // The ARN of the ELB + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The AZ's to serve traffic in. + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // Boolean to enable connection draining. Default: false + ConnectionDraining *bool `json:"connectionDraining,omitempty" tf:"connection_draining,omitempty"` + + // The time in seconds to allow for connections to drain. Default: 300 + ConnectionDrainingTimeout *float64 `json:"connectionDrainingTimeout,omitempty" tf:"connection_draining_timeout,omitempty"` + + // Enable cross-zone load balancing. Default: true + CrossZoneLoadBalancing *bool `json:"crossZoneLoadBalancing,omitempty" tf:"cross_zone_load_balancing,omitempty"` + + // The DNS name of the ELB + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // Determines how the load balancer handles requests that might pose a security risk to an application due to HTTP desync. Valid values are monitor, defensive (default), strictest. + DesyncMitigationMode *string `json:"desyncMitigationMode,omitempty" tf:"desync_mitigation_mode,omitempty"` + + // A health_check block. Health Check documented below. + HealthCheck *HealthCheckObservation `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + + // The name of the ELB + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The time in seconds that the connection is allowed to be idle. Default: 60 + IdleTimeout *float64 `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` + + // A list of instance ids to place in the ELB pool. + // +listType=set + Instances []*string `json:"instances,omitempty" tf:"instances,omitempty"` + + // If true, ELB will be an internal ELB. + Internal *bool `json:"internal,omitempty" tf:"internal,omitempty"` + + // A list of listener blocks. Listeners documented below. + Listener []ListenerObservation `json:"listener,omitempty" tf:"listener,omitempty"` + + // A list of security group IDs to assign to the ELB. + // Only valid if creating an ELB within a VPC + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The name of the security group that you can use as + // part of your inbound rules for your load balancer's back-end application + // instances. Use this for Classic or Default VPC only. + SourceSecurityGroup *string `json:"sourceSecurityGroup,omitempty" tf:"source_security_group,omitempty"` + + // The ID of the security group that you can use as + // part of your inbound rules for your load balancer's back-end application + // instances. Only available on ELBs launched in a VPC. + SourceSecurityGroupID *string `json:"sourceSecurityGroupId,omitempty" tf:"source_security_group_id,omitempty"` + + // A list of subnet IDs to attach to the ELB. When an update to subnets will remove all current subnets, this will force a new resource. + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The canonical hosted zone ID of the ELB (to be used in a Route 53 Alias record) + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` +} + +type ELBParameters struct { + + // An Access Logs block. Access Logs documented below. + // +kubebuilder:validation:Optional + AccessLogs *AccessLogsParameters `json:"accessLogs,omitempty" tf:"access_logs,omitempty"` + + // The AZ's to serve traffic in. + // +kubebuilder:validation:Optional + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // Boolean to enable connection draining. Default: false + // +kubebuilder:validation:Optional + ConnectionDraining *bool `json:"connectionDraining,omitempty" tf:"connection_draining,omitempty"` + + // The time in seconds to allow for connections to drain. Default: 300 + // +kubebuilder:validation:Optional + ConnectionDrainingTimeout *float64 `json:"connectionDrainingTimeout,omitempty" tf:"connection_draining_timeout,omitempty"` + + // Enable cross-zone load balancing. Default: true + // +kubebuilder:validation:Optional + CrossZoneLoadBalancing *bool `json:"crossZoneLoadBalancing,omitempty" tf:"cross_zone_load_balancing,omitempty"` + + // Determines how the load balancer handles requests that might pose a security risk to an application due to HTTP desync. Valid values are monitor, defensive (default), strictest. + // +kubebuilder:validation:Optional + DesyncMitigationMode *string `json:"desyncMitigationMode,omitempty" tf:"desync_mitigation_mode,omitempty"` + + // A health_check block. Health Check documented below. + // +kubebuilder:validation:Optional + HealthCheck *HealthCheckParameters `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + + // The time in seconds that the connection is allowed to be idle. Default: 60 + // +kubebuilder:validation:Optional + IdleTimeout *float64 `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` + + // A list of instance ids to place in the ELB pool. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance + // +kubebuilder:validation:Optional + // +listType=set + Instances []*string `json:"instances,omitempty" tf:"instances,omitempty"` + + // References to Instance in ec2 to populate instances. + // +kubebuilder:validation:Optional + InstancesRefs []v1.Reference `json:"instancesRefs,omitempty" tf:"-"` + + // Selector for a list of Instance in ec2 to populate instances. + // +kubebuilder:validation:Optional + InstancesSelector *v1.Selector `json:"instancesSelector,omitempty" tf:"-"` + + // If true, ELB will be an internal ELB. + // +kubebuilder:validation:Optional + Internal *bool `json:"internal,omitempty" tf:"internal,omitempty"` + + // A list of listener blocks. Listeners documented below. + // +kubebuilder:validation:Optional + Listener []ListenerParameters `json:"listener,omitempty" tf:"listener,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // A list of security group IDs to assign to the ELB. + // Only valid if creating an ELB within a VPC + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The name of the security group that you can use as + // part of your inbound rules for your load balancer's back-end application + // instances. Use this for Classic or Default VPC only. + // +kubebuilder:validation:Optional + SourceSecurityGroup *string `json:"sourceSecurityGroup,omitempty" tf:"source_security_group,omitempty"` + + // A list of subnet IDs to attach to the ELB. When an update to subnets will remove all current subnets, this will force a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +kubebuilder:validation:Optional + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` + + // References to Subnet in ec2 to populate subnets. + // +kubebuilder:validation:Optional + SubnetsRefs []v1.Reference `json:"subnetsRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnets. + // +kubebuilder:validation:Optional + SubnetsSelector *v1.Selector `json:"subnetsSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type HealthCheckInitParameters struct { + + // The number of checks before the instance is declared healthy. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + + // The publishing interval in minutes. Valid values: 5 and 60. Default: 60 + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The target of the check. Valid pattern is "${PROTOCOL}:${PORT}${PATH}", where PROTOCOL + // values are: + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // The length of time before the check times out. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // The number of checks before the instance is declared unhealthy. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` +} + +type HealthCheckObservation struct { + + // The number of checks before the instance is declared healthy. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + + // The publishing interval in minutes. Valid values: 5 and 60. Default: 60 + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // The target of the check. Valid pattern is "${PROTOCOL}:${PORT}${PATH}", where PROTOCOL + // values are: + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // The length of time before the check times out. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // The number of checks before the instance is declared unhealthy. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` +} + +type HealthCheckParameters struct { + + // The number of checks before the instance is declared healthy. + // +kubebuilder:validation:Optional + HealthyThreshold *float64 `json:"healthyThreshold" tf:"healthy_threshold,omitempty"` + + // The publishing interval in minutes. Valid values: 5 and 60. Default: 60 + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval" tf:"interval,omitempty"` + + // The target of the check. Valid pattern is "${PROTOCOL}:${PORT}${PATH}", where PROTOCOL + // values are: + // +kubebuilder:validation:Optional + Target *string `json:"target" tf:"target,omitempty"` + + // The length of time before the check times out. + // +kubebuilder:validation:Optional + Timeout *float64 `json:"timeout" tf:"timeout,omitempty"` + + // The number of checks before the instance is declared unhealthy. + // +kubebuilder:validation:Optional + UnhealthyThreshold *float64 `json:"unhealthyThreshold" tf:"unhealthy_threshold,omitempty"` +} + +type ListenerInitParameters struct { + + // The port on the instance to route to + InstancePort *float64 `json:"instancePort,omitempty" tf:"instance_port,omitempty"` + + // The protocol to use to the instance. Valid + // values are HTTP, HTTPS, TCP, or SSL + InstanceProtocol *string `json:"instanceProtocol,omitempty" tf:"instance_protocol,omitempty"` + + // The port to listen on for the load balancer + LBPort *float64 `json:"lbPort,omitempty" tf:"lb_port,omitempty"` + + // The protocol to listen on. Valid values are HTTP, + // HTTPS, TCP, or SSL + LBProtocol *string `json:"lbProtocol,omitempty" tf:"lb_protocol,omitempty"` + + // The ARN of an SSL certificate you have + // uploaded to AWS IAM. Note ECDSA-specific restrictions below. Only valid when + SSLCertificateID *string `json:"sslCertificateId,omitempty" tf:"ssl_certificate_id,omitempty"` +} + +type ListenerObservation struct { + + // The port on the instance to route to + InstancePort *float64 `json:"instancePort,omitempty" tf:"instance_port,omitempty"` + + // The protocol to use to the instance. Valid + // values are HTTP, HTTPS, TCP, or SSL + InstanceProtocol *string `json:"instanceProtocol,omitempty" tf:"instance_protocol,omitempty"` + + // The port to listen on for the load balancer + LBPort *float64 `json:"lbPort,omitempty" tf:"lb_port,omitempty"` + + // The protocol to listen on. Valid values are HTTP, + // HTTPS, TCP, or SSL + LBProtocol *string `json:"lbProtocol,omitempty" tf:"lb_protocol,omitempty"` + + // The ARN of an SSL certificate you have + // uploaded to AWS IAM. Note ECDSA-specific restrictions below. Only valid when + SSLCertificateID *string `json:"sslCertificateId,omitempty" tf:"ssl_certificate_id,omitempty"` +} + +type ListenerParameters struct { + + // The port on the instance to route to + // +kubebuilder:validation:Optional + InstancePort *float64 `json:"instancePort" tf:"instance_port,omitempty"` + + // The protocol to use to the instance. Valid + // values are HTTP, HTTPS, TCP, or SSL + // +kubebuilder:validation:Optional + InstanceProtocol *string `json:"instanceProtocol" tf:"instance_protocol,omitempty"` + + // The port to listen on for the load balancer + // +kubebuilder:validation:Optional + LBPort *float64 `json:"lbPort" tf:"lb_port,omitempty"` + + // The protocol to listen on. Valid values are HTTP, + // HTTPS, TCP, or SSL + // +kubebuilder:validation:Optional + LBProtocol *string `json:"lbProtocol" tf:"lb_protocol,omitempty"` + + // The ARN of an SSL certificate you have + // uploaded to AWS IAM. Note ECDSA-specific restrictions below. Only valid when + // +kubebuilder:validation:Optional + SSLCertificateID *string `json:"sslCertificateId,omitempty" tf:"ssl_certificate_id,omitempty"` +} + +// ELBSpec defines the desired state of ELB +type ELBSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ELBParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ELBInitParameters `json:"initProvider,omitempty"` +} + +// ELBStatus defines the observed state of ELB. +type ELBStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ELBObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ELB is the Schema for the ELBs API. Provides an Elastic Load Balancer resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ELB struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.listener) || (has(self.initProvider) && has(self.initProvider.listener))",message="spec.forProvider.listener is a required parameter" + Spec ELBSpec `json:"spec"` + Status ELBStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ELBList contains a list of ELBs +type ELBList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ELB `json:"items"` +} + +// Repository type metadata. +var ( + ELB_Kind = "ELB" + ELB_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ELB_Kind}.String() + ELB_KindAPIVersion = ELB_Kind + "." + CRDGroupVersion.String() + ELB_GroupVersionKind = CRDGroupVersion.WithKind(ELB_Kind) +) + +func init() { + SchemeBuilder.Register(&ELB{}, &ELBList{}) +} diff --git a/apis/elb/v1beta2/zz_generated.conversion_hubs.go b/apis/elb/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..56e55f5fe7 --- /dev/null +++ b/apis/elb/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ELB) Hub() {} diff --git a/apis/elb/v1beta2/zz_generated.deepcopy.go b/apis/elb/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..b34494035a --- /dev/null +++ b/apis/elb/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,928 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogsInitParameters) DeepCopyInto(out *AccessLogsInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogsInitParameters. +func (in *AccessLogsInitParameters) DeepCopy() *AccessLogsInitParameters { + if in == nil { + return nil + } + out := new(AccessLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogsObservation) DeepCopyInto(out *AccessLogsObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogsObservation. +func (in *AccessLogsObservation) DeepCopy() *AccessLogsObservation { + if in == nil { + return nil + } + out := new(AccessLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogsParameters) DeepCopyInto(out *AccessLogsParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogsParameters. +func (in *AccessLogsParameters) DeepCopy() *AccessLogsParameters { + if in == nil { + return nil + } + out := new(AccessLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ELB) DeepCopyInto(out *ELB) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ELB. +func (in *ELB) DeepCopy() *ELB { + if in == nil { + return nil + } + out := new(ELB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ELB) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ELBInitParameters) DeepCopyInto(out *ELBInitParameters) { + *out = *in + if in.AccessLogs != nil { + in, out := &in.AccessLogs, &out.AccessLogs + *out = new(AccessLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionDraining != nil { + in, out := &in.ConnectionDraining, &out.ConnectionDraining + *out = new(bool) + **out = **in + } + if in.ConnectionDrainingTimeout != nil { + in, out := &in.ConnectionDrainingTimeout, &out.ConnectionDrainingTimeout + *out = new(float64) + **out = **in + } + if in.CrossZoneLoadBalancing != nil { + in, out := &in.CrossZoneLoadBalancing, &out.CrossZoneLoadBalancing + *out = new(bool) + **out = **in + } + if in.DesyncMitigationMode != nil { + in, out := &in.DesyncMitigationMode, &out.DesyncMitigationMode + *out = new(string) + **out = **in + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(HealthCheckInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IdleTimeout != nil { + in, out := &in.IdleTimeout, &out.IdleTimeout + *out = new(float64) + **out = **in + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstancesRefs != nil { + in, out := &in.InstancesRefs, &out.InstancesRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InstancesSelector != nil { + in, out := &in.InstancesSelector, &out.InstancesSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Internal != nil { + in, out := &in.Internal, &out.Internal + *out = new(bool) + **out = **in + } + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]ListenerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SourceSecurityGroup != nil { + in, out := &in.SourceSecurityGroup, &out.SourceSecurityGroup + *out = new(string) + **out = **in + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetsRefs != nil { + in, out := &in.SubnetsRefs, &out.SubnetsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetsSelector != nil { + in, out := &in.SubnetsSelector, &out.SubnetsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ELBInitParameters. +func (in *ELBInitParameters) DeepCopy() *ELBInitParameters { + if in == nil { + return nil + } + out := new(ELBInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ELBList) DeepCopyInto(out *ELBList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ELB, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ELBList. +func (in *ELBList) DeepCopy() *ELBList { + if in == nil { + return nil + } + out := new(ELBList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ELBList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ELBObservation) DeepCopyInto(out *ELBObservation) { + *out = *in + if in.AccessLogs != nil { + in, out := &in.AccessLogs, &out.AccessLogs + *out = new(AccessLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionDraining != nil { + in, out := &in.ConnectionDraining, &out.ConnectionDraining + *out = new(bool) + **out = **in + } + if in.ConnectionDrainingTimeout != nil { + in, out := &in.ConnectionDrainingTimeout, &out.ConnectionDrainingTimeout + *out = new(float64) + **out = **in + } + if in.CrossZoneLoadBalancing != nil { + in, out := &in.CrossZoneLoadBalancing, &out.CrossZoneLoadBalancing + *out = new(bool) + **out = **in + } + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.DesyncMitigationMode != nil { + in, out := &in.DesyncMitigationMode, &out.DesyncMitigationMode + *out = new(string) + **out = **in + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(HealthCheckObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IdleTimeout != nil { + in, out := &in.IdleTimeout, &out.IdleTimeout + *out = new(float64) + **out = **in + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Internal != nil { + in, out := &in.Internal, &out.Internal + *out = new(bool) + **out = **in + } + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]ListenerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SourceSecurityGroup != nil { + in, out := &in.SourceSecurityGroup, &out.SourceSecurityGroup + *out = new(string) + **out = **in + } + if in.SourceSecurityGroupID != nil { + in, out := &in.SourceSecurityGroupID, &out.SourceSecurityGroupID + *out = new(string) + **out = **in + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ELBObservation. +func (in *ELBObservation) DeepCopy() *ELBObservation { + if in == nil { + return nil + } + out := new(ELBObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ELBParameters) DeepCopyInto(out *ELBParameters) { + *out = *in + if in.AccessLogs != nil { + in, out := &in.AccessLogs, &out.AccessLogs + *out = new(AccessLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionDraining != nil { + in, out := &in.ConnectionDraining, &out.ConnectionDraining + *out = new(bool) + **out = **in + } + if in.ConnectionDrainingTimeout != nil { + in, out := &in.ConnectionDrainingTimeout, &out.ConnectionDrainingTimeout + *out = new(float64) + **out = **in + } + if in.CrossZoneLoadBalancing != nil { + in, out := &in.CrossZoneLoadBalancing, &out.CrossZoneLoadBalancing + *out = new(bool) + **out = **in + } + if in.DesyncMitigationMode != nil { + in, out := &in.DesyncMitigationMode, &out.DesyncMitigationMode + *out = new(string) + **out = **in + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(HealthCheckParameters) + (*in).DeepCopyInto(*out) + } + if in.IdleTimeout != nil { + in, out := &in.IdleTimeout, &out.IdleTimeout + *out = new(float64) + **out = **in + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InstancesRefs != nil { + in, out := &in.InstancesRefs, &out.InstancesRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InstancesSelector != nil { + in, out := &in.InstancesSelector, &out.InstancesSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Internal != nil { + in, out := &in.Internal, &out.Internal + *out = new(bool) + **out = **in + } + if in.Listener != nil { + in, out := &in.Listener, &out.Listener + *out = make([]ListenerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SourceSecurityGroup != nil { + in, out := &in.SourceSecurityGroup, &out.SourceSecurityGroup + *out = new(string) + **out = **in + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetsRefs != nil { + in, out := &in.SubnetsRefs, &out.SubnetsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetsSelector != nil { + in, out := &in.SubnetsSelector, &out.SubnetsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ELBParameters. +func (in *ELBParameters) DeepCopy() *ELBParameters { + if in == nil { + return nil + } + out := new(ELBParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ELBSpec) DeepCopyInto(out *ELBSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ELBSpec. +func (in *ELBSpec) DeepCopy() *ELBSpec { + if in == nil { + return nil + } + out := new(ELBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ELBStatus) DeepCopyInto(out *ELBStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ELBStatus. +func (in *ELBStatus) DeepCopy() *ELBStatus { + if in == nil { + return nil + } + out := new(ELBStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckInitParameters) DeepCopyInto(out *HealthCheckInitParameters) { + *out = *in + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckInitParameters. +func (in *HealthCheckInitParameters) DeepCopy() *HealthCheckInitParameters { + if in == nil { + return nil + } + out := new(HealthCheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckObservation) DeepCopyInto(out *HealthCheckObservation) { + *out = *in + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckObservation. +func (in *HealthCheckObservation) DeepCopy() *HealthCheckObservation { + if in == nil { + return nil + } + out := new(HealthCheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckParameters) DeepCopyInto(out *HealthCheckParameters) { + *out = *in + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckParameters. +func (in *HealthCheckParameters) DeepCopy() *HealthCheckParameters { + if in == nil { + return nil + } + out := new(HealthCheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerInitParameters) DeepCopyInto(out *ListenerInitParameters) { + *out = *in + if in.InstancePort != nil { + in, out := &in.InstancePort, &out.InstancePort + *out = new(float64) + **out = **in + } + if in.InstanceProtocol != nil { + in, out := &in.InstanceProtocol, &out.InstanceProtocol + *out = new(string) + **out = **in + } + if in.LBPort != nil { + in, out := &in.LBPort, &out.LBPort + *out = new(float64) + **out = **in + } + if in.LBProtocol != nil { + in, out := &in.LBProtocol, &out.LBProtocol + *out = new(string) + **out = **in + } + if in.SSLCertificateID != nil { + in, out := &in.SSLCertificateID, &out.SSLCertificateID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerInitParameters. +func (in *ListenerInitParameters) DeepCopy() *ListenerInitParameters { + if in == nil { + return nil + } + out := new(ListenerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerObservation) DeepCopyInto(out *ListenerObservation) { + *out = *in + if in.InstancePort != nil { + in, out := &in.InstancePort, &out.InstancePort + *out = new(float64) + **out = **in + } + if in.InstanceProtocol != nil { + in, out := &in.InstanceProtocol, &out.InstanceProtocol + *out = new(string) + **out = **in + } + if in.LBPort != nil { + in, out := &in.LBPort, &out.LBPort + *out = new(float64) + **out = **in + } + if in.LBProtocol != nil { + in, out := &in.LBProtocol, &out.LBProtocol + *out = new(string) + **out = **in + } + if in.SSLCertificateID != nil { + in, out := &in.SSLCertificateID, &out.SSLCertificateID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerObservation. +func (in *ListenerObservation) DeepCopy() *ListenerObservation { + if in == nil { + return nil + } + out := new(ListenerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerParameters) DeepCopyInto(out *ListenerParameters) { + *out = *in + if in.InstancePort != nil { + in, out := &in.InstancePort, &out.InstancePort + *out = new(float64) + **out = **in + } + if in.InstanceProtocol != nil { + in, out := &in.InstanceProtocol, &out.InstanceProtocol + *out = new(string) + **out = **in + } + if in.LBPort != nil { + in, out := &in.LBPort, &out.LBPort + *out = new(float64) + **out = **in + } + if in.LBProtocol != nil { + in, out := &in.LBProtocol, &out.LBProtocol + *out = new(string) + **out = **in + } + if in.SSLCertificateID != nil { + in, out := &in.SSLCertificateID, &out.SSLCertificateID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerParameters. +func (in *ListenerParameters) DeepCopy() *ListenerParameters { + if in == nil { + return nil + } + out := new(ListenerParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/elb/v1beta2/zz_generated.managed.go b/apis/elb/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..d176451921 --- /dev/null +++ b/apis/elb/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ELB. +func (mg *ELB) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ELB. +func (mg *ELB) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ELB. +func (mg *ELB) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ELB. +func (mg *ELB) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ELB. +func (mg *ELB) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ELB. +func (mg *ELB) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ELB. +func (mg *ELB) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ELB. +func (mg *ELB) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ELB. +func (mg *ELB) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ELB. +func (mg *ELB) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ELB. +func (mg *ELB) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ELB. +func (mg *ELB) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/elb/v1beta2/zz_generated.managedlist.go b/apis/elb/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..4ea60faaae --- /dev/null +++ b/apis/elb/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ELBList. +func (l *ELBList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/elb/v1beta2/zz_generated.resolvers.go b/apis/elb/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..0e159432b6 --- /dev/null +++ b/apis/elb/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,106 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this ELB. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *ELB) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Instances), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.InstancesRefs, + Selector: mg.Spec.ForProvider.InstancesSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Instances") + } + mg.Spec.ForProvider.Instances = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.InstancesRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Subnets), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SubnetsRefs, + Selector: mg.Spec.ForProvider.SubnetsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Subnets") + } + mg.Spec.ForProvider.Subnets = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SubnetsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Instances), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.InstancesRefs, + Selector: mg.Spec.InitProvider.InstancesSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Instances") + } + mg.Spec.InitProvider.Instances = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.InstancesRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Subnets), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SubnetsRefs, + Selector: mg.Spec.InitProvider.SubnetsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Subnets") + } + mg.Spec.InitProvider.Subnets = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SubnetsRefs = mrsp.ResolvedReferences + + return nil +} diff --git a/apis/elb/v1beta2/zz_groupversion_info.go b/apis/elb/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..5a392651db --- /dev/null +++ b/apis/elb/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=elb.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "elb.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/elbv2/v1beta1/zz_generated.conversion_hubs.go b/apis/elbv2/v1beta1/zz_generated.conversion_hubs.go index abff24b574..ec94ba711b 100755 --- a/apis/elbv2/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/elbv2/v1beta1/zz_generated.conversion_hubs.go @@ -6,20 +6,8 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *LB) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LBListener) Hub() {} - // Hub marks this type as a conversion hub. func (tr *LBListenerCertificate) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *LBListenerRule) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LBTargetGroup) Hub() {} - // Hub marks this type as a conversion hub. func (tr *LBTargetGroupAttachment) Hub() {} diff --git a/apis/elbv2/v1beta1/zz_generated.conversion_spokes.go b/apis/elbv2/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..12225e8934 --- /dev/null +++ b/apis/elbv2/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,94 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this LB to the hub type. +func (tr *LB) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LB type. +func (tr *LB) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LBListener to the hub type. +func (tr *LBListener) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LBListener type. +func (tr *LBListener) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LBListenerRule to the hub type. +func (tr *LBListenerRule) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LBListenerRule type. +func (tr *LBListenerRule) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LBTargetGroup to the hub type. +func (tr *LBTargetGroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LBTargetGroup type. +func (tr *LBTargetGroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/elbv2/v1beta1/zz_generated.resolvers.go b/apis/elbv2/v1beta1/zz_generated.resolvers.go index 75321dbff1..f230922ff7 100644 --- a/apis/elbv2/v1beta1/zz_generated.resolvers.go +++ b/apis/elbv2/v1beta1/zz_generated.resolvers.go @@ -342,7 +342,7 @@ func (mg *LBListenerCertificate) ResolveReferences(ctx context.Context, c client var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta1", "Certificate", "CertificateList") + m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta2", "Certificate", "CertificateList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -361,7 +361,7 @@ func (mg *LBListenerCertificate) ResolveReferences(ctx context.Context, c client mg.Spec.ForProvider.CertificateArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.CertificateArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta1", "LBListener", "LBListenerList") + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBListener", "LBListenerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -380,7 +380,7 @@ func (mg *LBListenerCertificate) ResolveReferences(ctx context.Context, c client mg.Spec.ForProvider.ListenerArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ListenerArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta1", "Certificate", "CertificateList") + m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta2", "Certificate", "CertificateList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -399,7 +399,7 @@ func (mg *LBListenerCertificate) ResolveReferences(ctx context.Context, c client mg.Spec.InitProvider.CertificateArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.CertificateArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta1", "LBListener", "LBListenerList") + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBListener", "LBListenerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -760,7 +760,7 @@ func (mg *LBTargetGroupAttachment) ResolveReferences(ctx context.Context, c clie var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta1", "LBTargetGroup", "LBTargetGroupList") + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBTargetGroup", "LBTargetGroupList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -779,7 +779,7 @@ func (mg *LBTargetGroupAttachment) ResolveReferences(ctx context.Context, c clie mg.Spec.ForProvider.TargetGroupArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TargetGroupArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta1", "LBTargetGroup", "LBTargetGroupList") + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBTargetGroup", "LBTargetGroupList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/elbv2/v1beta1/zz_lblistenercertificate_types.go b/apis/elbv2/v1beta1/zz_lblistenercertificate_types.go index 1d2445ae47..82b580d6ee 100755 --- a/apis/elbv2/v1beta1/zz_lblistenercertificate_types.go +++ b/apis/elbv2/v1beta1/zz_lblistenercertificate_types.go @@ -16,7 +16,7 @@ import ( type LBListenerCertificateInitParameters struct { // The ARN of the certificate to attach to the listener. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta1.Certificate + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta2.Certificate // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` @@ -29,7 +29,7 @@ type LBListenerCertificateInitParameters struct { CertificateArnSelector *v1.Selector `json:"certificateArnSelector,omitempty" tf:"-"` // The ARN of the listener to which to attach the certificate. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta1.LBListener + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBListener // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) ListenerArn *string `json:"listenerArn,omitempty" tf:"listener_arn,omitempty"` @@ -57,7 +57,7 @@ type LBListenerCertificateObservation struct { type LBListenerCertificateParameters struct { // The ARN of the certificate to attach to the listener. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta1.Certificate + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta2.Certificate // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` @@ -71,7 +71,7 @@ type LBListenerCertificateParameters struct { CertificateArnSelector *v1.Selector `json:"certificateArnSelector,omitempty" tf:"-"` // The ARN of the listener to which to attach the certificate. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta1.LBListener + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBListener // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional ListenerArn *string `json:"listenerArn,omitempty" tf:"listener_arn,omitempty"` diff --git a/apis/elbv2/v1beta1/zz_lbtargetgroupattachment_types.go b/apis/elbv2/v1beta1/zz_lbtargetgroupattachment_types.go index 3e0f630943..455abb0d90 100755 --- a/apis/elbv2/v1beta1/zz_lbtargetgroupattachment_types.go +++ b/apis/elbv2/v1beta1/zz_lbtargetgroupattachment_types.go @@ -22,7 +22,7 @@ type LBTargetGroupAttachmentInitParameters struct { Port *float64 `json:"port,omitempty" tf:"port,omitempty"` // The ARN of the target group with which to register targets. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta1.LBTargetGroup + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBTargetGroup TargetGroupArn *string `json:"targetGroupArn,omitempty" tf:"target_group_arn,omitempty"` // Reference to a LBTargetGroup in elbv2 to populate targetGroupArn. @@ -71,7 +71,7 @@ type LBTargetGroupAttachmentParameters struct { Region *string `json:"region" tf:"-"` // The ARN of the target group with which to register targets. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta1.LBTargetGroup + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBTargetGroup // +kubebuilder:validation:Optional TargetGroupArn *string `json:"targetGroupArn,omitempty" tf:"target_group_arn,omitempty"` diff --git a/apis/elbv2/v1beta2/zz_generated.conversion_hubs.go b/apis/elbv2/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..8b57b6c96a --- /dev/null +++ b/apis/elbv2/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,19 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *LB) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LBListener) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LBListenerRule) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LBTargetGroup) Hub() {} diff --git a/apis/elbv2/v1beta2/zz_generated.deepcopy.go b/apis/elbv2/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..d690a773d0 --- /dev/null +++ b/apis/elbv2/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,5641 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogsInitParameters) DeepCopyInto(out *AccessLogsInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogsInitParameters. +func (in *AccessLogsInitParameters) DeepCopy() *AccessLogsInitParameters { + if in == nil { + return nil + } + out := new(AccessLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogsObservation) DeepCopyInto(out *AccessLogsObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogsObservation. +func (in *AccessLogsObservation) DeepCopy() *AccessLogsObservation { + if in == nil { + return nil + } + out := new(AccessLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogsParameters) DeepCopyInto(out *AccessLogsParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogsParameters. +func (in *AccessLogsParameters) DeepCopy() *AccessLogsParameters { + if in == nil { + return nil + } + out := new(AccessLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionAuthenticateCognitoInitParameters) DeepCopyInto(out *ActionAuthenticateCognitoInitParameters) { + *out = *in + if in.AuthenticationRequestExtraParams != nil { + in, out := &in.AuthenticationRequestExtraParams, &out.AuthenticationRequestExtraParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.OnUnauthenticatedRequest != nil { + in, out := &in.OnUnauthenticatedRequest, &out.OnUnauthenticatedRequest + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.SessionCookieName != nil { + in, out := &in.SessionCookieName, &out.SessionCookieName + *out = new(string) + **out = **in + } + if in.SessionTimeout != nil { + in, out := &in.SessionTimeout, &out.SessionTimeout + *out = new(float64) + **out = **in + } + if in.UserPoolArn != nil { + in, out := &in.UserPoolArn, &out.UserPoolArn + *out = new(string) + **out = **in + } + if in.UserPoolArnRef != nil { + in, out := &in.UserPoolArnRef, &out.UserPoolArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserPoolArnSelector != nil { + in, out := &in.UserPoolArnSelector, &out.UserPoolArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserPoolClientID != nil { + in, out := &in.UserPoolClientID, &out.UserPoolClientID + *out = new(string) + **out = **in + } + if in.UserPoolClientIDRef != nil { + in, out := &in.UserPoolClientIDRef, &out.UserPoolClientIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserPoolClientIDSelector != nil { + in, out := &in.UserPoolClientIDSelector, &out.UserPoolClientIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserPoolDomain != nil { + in, out := &in.UserPoolDomain, &out.UserPoolDomain + *out = new(string) + **out = **in + } + if in.UserPoolDomainRef != nil { + in, out := &in.UserPoolDomainRef, &out.UserPoolDomainRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserPoolDomainSelector != nil { + in, out := &in.UserPoolDomainSelector, &out.UserPoolDomainSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionAuthenticateCognitoInitParameters. +func (in *ActionAuthenticateCognitoInitParameters) DeepCopy() *ActionAuthenticateCognitoInitParameters { + if in == nil { + return nil + } + out := new(ActionAuthenticateCognitoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionAuthenticateCognitoObservation) DeepCopyInto(out *ActionAuthenticateCognitoObservation) { + *out = *in + if in.AuthenticationRequestExtraParams != nil { + in, out := &in.AuthenticationRequestExtraParams, &out.AuthenticationRequestExtraParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.OnUnauthenticatedRequest != nil { + in, out := &in.OnUnauthenticatedRequest, &out.OnUnauthenticatedRequest + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.SessionCookieName != nil { + in, out := &in.SessionCookieName, &out.SessionCookieName + *out = new(string) + **out = **in + } + if in.SessionTimeout != nil { + in, out := &in.SessionTimeout, &out.SessionTimeout + *out = new(float64) + **out = **in + } + if in.UserPoolArn != nil { + in, out := &in.UserPoolArn, &out.UserPoolArn + *out = new(string) + **out = **in + } + if in.UserPoolClientID != nil { + in, out := &in.UserPoolClientID, &out.UserPoolClientID + *out = new(string) + **out = **in + } + if in.UserPoolDomain != nil { + in, out := &in.UserPoolDomain, &out.UserPoolDomain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionAuthenticateCognitoObservation. +func (in *ActionAuthenticateCognitoObservation) DeepCopy() *ActionAuthenticateCognitoObservation { + if in == nil { + return nil + } + out := new(ActionAuthenticateCognitoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionAuthenticateCognitoParameters) DeepCopyInto(out *ActionAuthenticateCognitoParameters) { + *out = *in + if in.AuthenticationRequestExtraParams != nil { + in, out := &in.AuthenticationRequestExtraParams, &out.AuthenticationRequestExtraParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.OnUnauthenticatedRequest != nil { + in, out := &in.OnUnauthenticatedRequest, &out.OnUnauthenticatedRequest + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.SessionCookieName != nil { + in, out := &in.SessionCookieName, &out.SessionCookieName + *out = new(string) + **out = **in + } + if in.SessionTimeout != nil { + in, out := &in.SessionTimeout, &out.SessionTimeout + *out = new(float64) + **out = **in + } + if in.UserPoolArn != nil { + in, out := &in.UserPoolArn, &out.UserPoolArn + *out = new(string) + **out = **in + } + if in.UserPoolArnRef != nil { + in, out := &in.UserPoolArnRef, &out.UserPoolArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserPoolArnSelector != nil { + in, out := &in.UserPoolArnSelector, &out.UserPoolArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserPoolClientID != nil { + in, out := &in.UserPoolClientID, &out.UserPoolClientID + *out = new(string) + **out = **in + } + if in.UserPoolClientIDRef != nil { + in, out := &in.UserPoolClientIDRef, &out.UserPoolClientIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserPoolClientIDSelector != nil { + in, out := &in.UserPoolClientIDSelector, &out.UserPoolClientIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserPoolDomain != nil { + in, out := &in.UserPoolDomain, &out.UserPoolDomain + *out = new(string) + **out = **in + } + if in.UserPoolDomainRef != nil { + in, out := &in.UserPoolDomainRef, &out.UserPoolDomainRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserPoolDomainSelector != nil { + in, out := &in.UserPoolDomainSelector, &out.UserPoolDomainSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionAuthenticateCognitoParameters. +func (in *ActionAuthenticateCognitoParameters) DeepCopy() *ActionAuthenticateCognitoParameters { + if in == nil { + return nil + } + out := new(ActionAuthenticateCognitoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionAuthenticateOidcInitParameters) DeepCopyInto(out *ActionAuthenticateOidcInitParameters) { + *out = *in + if in.AuthenticationRequestExtraParams != nil { + in, out := &in.AuthenticationRequestExtraParams, &out.AuthenticationRequestExtraParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthorizationEndpoint != nil { + in, out := &in.AuthorizationEndpoint, &out.AuthorizationEndpoint + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + out.ClientSecretSecretRef = in.ClientSecretSecretRef + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.OnUnauthenticatedRequest != nil { + in, out := &in.OnUnauthenticatedRequest, &out.OnUnauthenticatedRequest + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.SessionCookieName != nil { + in, out := &in.SessionCookieName, &out.SessionCookieName + *out = new(string) + **out = **in + } + if in.SessionTimeout != nil { + in, out := &in.SessionTimeout, &out.SessionTimeout + *out = new(float64) + **out = **in + } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } + if in.UserInfoEndpoint != nil { + in, out := &in.UserInfoEndpoint, &out.UserInfoEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionAuthenticateOidcInitParameters. +func (in *ActionAuthenticateOidcInitParameters) DeepCopy() *ActionAuthenticateOidcInitParameters { + if in == nil { + return nil + } + out := new(ActionAuthenticateOidcInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionAuthenticateOidcObservation) DeepCopyInto(out *ActionAuthenticateOidcObservation) { + *out = *in + if in.AuthenticationRequestExtraParams != nil { + in, out := &in.AuthenticationRequestExtraParams, &out.AuthenticationRequestExtraParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthorizationEndpoint != nil { + in, out := &in.AuthorizationEndpoint, &out.AuthorizationEndpoint + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.OnUnauthenticatedRequest != nil { + in, out := &in.OnUnauthenticatedRequest, &out.OnUnauthenticatedRequest + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.SessionCookieName != nil { + in, out := &in.SessionCookieName, &out.SessionCookieName + *out = new(string) + **out = **in + } + if in.SessionTimeout != nil { + in, out := &in.SessionTimeout, &out.SessionTimeout + *out = new(float64) + **out = **in + } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } + if in.UserInfoEndpoint != nil { + in, out := &in.UserInfoEndpoint, &out.UserInfoEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionAuthenticateOidcObservation. +func (in *ActionAuthenticateOidcObservation) DeepCopy() *ActionAuthenticateOidcObservation { + if in == nil { + return nil + } + out := new(ActionAuthenticateOidcObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionAuthenticateOidcParameters) DeepCopyInto(out *ActionAuthenticateOidcParameters) { + *out = *in + if in.AuthenticationRequestExtraParams != nil { + in, out := &in.AuthenticationRequestExtraParams, &out.AuthenticationRequestExtraParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthorizationEndpoint != nil { + in, out := &in.AuthorizationEndpoint, &out.AuthorizationEndpoint + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + out.ClientSecretSecretRef = in.ClientSecretSecretRef + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.OnUnauthenticatedRequest != nil { + in, out := &in.OnUnauthenticatedRequest, &out.OnUnauthenticatedRequest + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.SessionCookieName != nil { + in, out := &in.SessionCookieName, &out.SessionCookieName + *out = new(string) + **out = **in + } + if in.SessionTimeout != nil { + in, out := &in.SessionTimeout, &out.SessionTimeout + *out = new(float64) + **out = **in + } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } + if in.UserInfoEndpoint != nil { + in, out := &in.UserInfoEndpoint, &out.UserInfoEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionAuthenticateOidcParameters. +func (in *ActionAuthenticateOidcParameters) DeepCopy() *ActionAuthenticateOidcParameters { + if in == nil { + return nil + } + out := new(ActionAuthenticateOidcParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionFixedResponseInitParameters) DeepCopyInto(out *ActionFixedResponseInitParameters) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.MessageBody != nil { + in, out := &in.MessageBody, &out.MessageBody + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionFixedResponseInitParameters. +func (in *ActionFixedResponseInitParameters) DeepCopy() *ActionFixedResponseInitParameters { + if in == nil { + return nil + } + out := new(ActionFixedResponseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionFixedResponseObservation) DeepCopyInto(out *ActionFixedResponseObservation) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.MessageBody != nil { + in, out := &in.MessageBody, &out.MessageBody + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionFixedResponseObservation. +func (in *ActionFixedResponseObservation) DeepCopy() *ActionFixedResponseObservation { + if in == nil { + return nil + } + out := new(ActionFixedResponseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionFixedResponseParameters) DeepCopyInto(out *ActionFixedResponseParameters) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.MessageBody != nil { + in, out := &in.MessageBody, &out.MessageBody + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionFixedResponseParameters. +func (in *ActionFixedResponseParameters) DeepCopy() *ActionFixedResponseParameters { + if in == nil { + return nil + } + out := new(ActionFixedResponseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionForwardInitParameters) DeepCopyInto(out *ActionForwardInitParameters) { + *out = *in + if in.Stickiness != nil { + in, out := &in.Stickiness, &out.Stickiness + *out = new(ForwardStickinessInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetGroup != nil { + in, out := &in.TargetGroup, &out.TargetGroup + *out = make([]ForwardTargetGroupInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionForwardInitParameters. +func (in *ActionForwardInitParameters) DeepCopy() *ActionForwardInitParameters { + if in == nil { + return nil + } + out := new(ActionForwardInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionForwardObservation) DeepCopyInto(out *ActionForwardObservation) { + *out = *in + if in.Stickiness != nil { + in, out := &in.Stickiness, &out.Stickiness + *out = new(ForwardStickinessObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetGroup != nil { + in, out := &in.TargetGroup, &out.TargetGroup + *out = make([]ForwardTargetGroupObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionForwardObservation. +func (in *ActionForwardObservation) DeepCopy() *ActionForwardObservation { + if in == nil { + return nil + } + out := new(ActionForwardObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionForwardParameters) DeepCopyInto(out *ActionForwardParameters) { + *out = *in + if in.Stickiness != nil { + in, out := &in.Stickiness, &out.Stickiness + *out = new(ForwardStickinessParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetGroup != nil { + in, out := &in.TargetGroup, &out.TargetGroup + *out = make([]ForwardTargetGroupParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionForwardParameters. +func (in *ActionForwardParameters) DeepCopy() *ActionForwardParameters { + if in == nil { + return nil + } + out := new(ActionForwardParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionInitParameters) DeepCopyInto(out *ActionInitParameters) { + *out = *in + if in.AuthenticateCognito != nil { + in, out := &in.AuthenticateCognito, &out.AuthenticateCognito + *out = new(ActionAuthenticateCognitoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthenticateOidc != nil { + in, out := &in.AuthenticateOidc, &out.AuthenticateOidc + *out = new(ActionAuthenticateOidcInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FixedResponse != nil { + in, out := &in.FixedResponse, &out.FixedResponse + *out = new(ActionFixedResponseInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Forward != nil { + in, out := &in.Forward, &out.Forward + *out = new(ActionForwardInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Redirect != nil { + in, out := &in.Redirect, &out.Redirect + *out = new(ActionRedirectInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetGroupArn != nil { + in, out := &in.TargetGroupArn, &out.TargetGroupArn + *out = new(string) + **out = **in + } + if in.TargetGroupArnRef != nil { + in, out := &in.TargetGroupArnRef, &out.TargetGroupArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetGroupArnSelector != nil { + in, out := &in.TargetGroupArnSelector, &out.TargetGroupArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionInitParameters. +func (in *ActionInitParameters) DeepCopy() *ActionInitParameters { + if in == nil { + return nil + } + out := new(ActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionObservation) DeepCopyInto(out *ActionObservation) { + *out = *in + if in.AuthenticateCognito != nil { + in, out := &in.AuthenticateCognito, &out.AuthenticateCognito + *out = new(ActionAuthenticateCognitoObservation) + (*in).DeepCopyInto(*out) + } + if in.AuthenticateOidc != nil { + in, out := &in.AuthenticateOidc, &out.AuthenticateOidc + *out = new(ActionAuthenticateOidcObservation) + (*in).DeepCopyInto(*out) + } + if in.FixedResponse != nil { + in, out := &in.FixedResponse, &out.FixedResponse + *out = new(ActionFixedResponseObservation) + (*in).DeepCopyInto(*out) + } + if in.Forward != nil { + in, out := &in.Forward, &out.Forward + *out = new(ActionForwardObservation) + (*in).DeepCopyInto(*out) + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Redirect != nil { + in, out := &in.Redirect, &out.Redirect + *out = new(ActionRedirectObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetGroupArn != nil { + in, out := &in.TargetGroupArn, &out.TargetGroupArn + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionObservation. +func (in *ActionObservation) DeepCopy() *ActionObservation { + if in == nil { + return nil + } + out := new(ActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionParameters) DeepCopyInto(out *ActionParameters) { + *out = *in + if in.AuthenticateCognito != nil { + in, out := &in.AuthenticateCognito, &out.AuthenticateCognito + *out = new(ActionAuthenticateCognitoParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthenticateOidc != nil { + in, out := &in.AuthenticateOidc, &out.AuthenticateOidc + *out = new(ActionAuthenticateOidcParameters) + (*in).DeepCopyInto(*out) + } + if in.FixedResponse != nil { + in, out := &in.FixedResponse, &out.FixedResponse + *out = new(ActionFixedResponseParameters) + (*in).DeepCopyInto(*out) + } + if in.Forward != nil { + in, out := &in.Forward, &out.Forward + *out = new(ActionForwardParameters) + (*in).DeepCopyInto(*out) + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Redirect != nil { + in, out := &in.Redirect, &out.Redirect + *out = new(ActionRedirectParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetGroupArn != nil { + in, out := &in.TargetGroupArn, &out.TargetGroupArn + *out = new(string) + **out = **in + } + if in.TargetGroupArnRef != nil { + in, out := &in.TargetGroupArnRef, &out.TargetGroupArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetGroupArnSelector != nil { + in, out := &in.TargetGroupArnSelector, &out.TargetGroupArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionParameters. +func (in *ActionParameters) DeepCopy() *ActionParameters { + if in == nil { + return nil + } + out := new(ActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionRedirectInitParameters) DeepCopyInto(out *ActionRedirectInitParameters) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionRedirectInitParameters. +func (in *ActionRedirectInitParameters) DeepCopy() *ActionRedirectInitParameters { + if in == nil { + return nil + } + out := new(ActionRedirectInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionRedirectObservation) DeepCopyInto(out *ActionRedirectObservation) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionRedirectObservation. +func (in *ActionRedirectObservation) DeepCopy() *ActionRedirectObservation { + if in == nil { + return nil + } + out := new(ActionRedirectObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionRedirectParameters) DeepCopyInto(out *ActionRedirectParameters) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionRedirectParameters. +func (in *ActionRedirectParameters) DeepCopy() *ActionRedirectParameters { + if in == nil { + return nil + } + out := new(ActionRedirectParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticateCognitoInitParameters) DeepCopyInto(out *AuthenticateCognitoInitParameters) { + *out = *in + if in.AuthenticationRequestExtraParams != nil { + in, out := &in.AuthenticationRequestExtraParams, &out.AuthenticationRequestExtraParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.OnUnauthenticatedRequest != nil { + in, out := &in.OnUnauthenticatedRequest, &out.OnUnauthenticatedRequest + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.SessionCookieName != nil { + in, out := &in.SessionCookieName, &out.SessionCookieName + *out = new(string) + **out = **in + } + if in.SessionTimeout != nil { + in, out := &in.SessionTimeout, &out.SessionTimeout + *out = new(float64) + **out = **in + } + if in.UserPoolArn != nil { + in, out := &in.UserPoolArn, &out.UserPoolArn + *out = new(string) + **out = **in + } + if in.UserPoolClientID != nil { + in, out := &in.UserPoolClientID, &out.UserPoolClientID + *out = new(string) + **out = **in + } + if in.UserPoolDomain != nil { + in, out := &in.UserPoolDomain, &out.UserPoolDomain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticateCognitoInitParameters. +func (in *AuthenticateCognitoInitParameters) DeepCopy() *AuthenticateCognitoInitParameters { + if in == nil { + return nil + } + out := new(AuthenticateCognitoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticateCognitoObservation) DeepCopyInto(out *AuthenticateCognitoObservation) { + *out = *in + if in.AuthenticationRequestExtraParams != nil { + in, out := &in.AuthenticationRequestExtraParams, &out.AuthenticationRequestExtraParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.OnUnauthenticatedRequest != nil { + in, out := &in.OnUnauthenticatedRequest, &out.OnUnauthenticatedRequest + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.SessionCookieName != nil { + in, out := &in.SessionCookieName, &out.SessionCookieName + *out = new(string) + **out = **in + } + if in.SessionTimeout != nil { + in, out := &in.SessionTimeout, &out.SessionTimeout + *out = new(float64) + **out = **in + } + if in.UserPoolArn != nil { + in, out := &in.UserPoolArn, &out.UserPoolArn + *out = new(string) + **out = **in + } + if in.UserPoolClientID != nil { + in, out := &in.UserPoolClientID, &out.UserPoolClientID + *out = new(string) + **out = **in + } + if in.UserPoolDomain != nil { + in, out := &in.UserPoolDomain, &out.UserPoolDomain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticateCognitoObservation. +func (in *AuthenticateCognitoObservation) DeepCopy() *AuthenticateCognitoObservation { + if in == nil { + return nil + } + out := new(AuthenticateCognitoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticateCognitoParameters) DeepCopyInto(out *AuthenticateCognitoParameters) { + *out = *in + if in.AuthenticationRequestExtraParams != nil { + in, out := &in.AuthenticationRequestExtraParams, &out.AuthenticationRequestExtraParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.OnUnauthenticatedRequest != nil { + in, out := &in.OnUnauthenticatedRequest, &out.OnUnauthenticatedRequest + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.SessionCookieName != nil { + in, out := &in.SessionCookieName, &out.SessionCookieName + *out = new(string) + **out = **in + } + if in.SessionTimeout != nil { + in, out := &in.SessionTimeout, &out.SessionTimeout + *out = new(float64) + **out = **in + } + if in.UserPoolArn != nil { + in, out := &in.UserPoolArn, &out.UserPoolArn + *out = new(string) + **out = **in + } + if in.UserPoolClientID != nil { + in, out := &in.UserPoolClientID, &out.UserPoolClientID + *out = new(string) + **out = **in + } + if in.UserPoolDomain != nil { + in, out := &in.UserPoolDomain, &out.UserPoolDomain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticateCognitoParameters. +func (in *AuthenticateCognitoParameters) DeepCopy() *AuthenticateCognitoParameters { + if in == nil { + return nil + } + out := new(AuthenticateCognitoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticateOidcInitParameters) DeepCopyInto(out *AuthenticateOidcInitParameters) { + *out = *in + if in.AuthenticationRequestExtraParams != nil { + in, out := &in.AuthenticationRequestExtraParams, &out.AuthenticationRequestExtraParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthorizationEndpoint != nil { + in, out := &in.AuthorizationEndpoint, &out.AuthorizationEndpoint + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + out.ClientSecretSecretRef = in.ClientSecretSecretRef + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.OnUnauthenticatedRequest != nil { + in, out := &in.OnUnauthenticatedRequest, &out.OnUnauthenticatedRequest + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.SessionCookieName != nil { + in, out := &in.SessionCookieName, &out.SessionCookieName + *out = new(string) + **out = **in + } + if in.SessionTimeout != nil { + in, out := &in.SessionTimeout, &out.SessionTimeout + *out = new(float64) + **out = **in + } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } + if in.UserInfoEndpoint != nil { + in, out := &in.UserInfoEndpoint, &out.UserInfoEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticateOidcInitParameters. +func (in *AuthenticateOidcInitParameters) DeepCopy() *AuthenticateOidcInitParameters { + if in == nil { + return nil + } + out := new(AuthenticateOidcInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticateOidcObservation) DeepCopyInto(out *AuthenticateOidcObservation) { + *out = *in + if in.AuthenticationRequestExtraParams != nil { + in, out := &in.AuthenticationRequestExtraParams, &out.AuthenticationRequestExtraParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthorizationEndpoint != nil { + in, out := &in.AuthorizationEndpoint, &out.AuthorizationEndpoint + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.OnUnauthenticatedRequest != nil { + in, out := &in.OnUnauthenticatedRequest, &out.OnUnauthenticatedRequest + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.SessionCookieName != nil { + in, out := &in.SessionCookieName, &out.SessionCookieName + *out = new(string) + **out = **in + } + if in.SessionTimeout != nil { + in, out := &in.SessionTimeout, &out.SessionTimeout + *out = new(float64) + **out = **in + } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } + if in.UserInfoEndpoint != nil { + in, out := &in.UserInfoEndpoint, &out.UserInfoEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticateOidcObservation. +func (in *AuthenticateOidcObservation) DeepCopy() *AuthenticateOidcObservation { + if in == nil { + return nil + } + out := new(AuthenticateOidcObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticateOidcParameters) DeepCopyInto(out *AuthenticateOidcParameters) { + *out = *in + if in.AuthenticationRequestExtraParams != nil { + in, out := &in.AuthenticationRequestExtraParams, &out.AuthenticationRequestExtraParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthorizationEndpoint != nil { + in, out := &in.AuthorizationEndpoint, &out.AuthorizationEndpoint + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + out.ClientSecretSecretRef = in.ClientSecretSecretRef + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.OnUnauthenticatedRequest != nil { + in, out := &in.OnUnauthenticatedRequest, &out.OnUnauthenticatedRequest + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.SessionCookieName != nil { + in, out := &in.SessionCookieName, &out.SessionCookieName + *out = new(string) + **out = **in + } + if in.SessionTimeout != nil { + in, out := &in.SessionTimeout, &out.SessionTimeout + *out = new(float64) + **out = **in + } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } + if in.UserInfoEndpoint != nil { + in, out := &in.UserInfoEndpoint, &out.UserInfoEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticateOidcParameters. +func (in *AuthenticateOidcParameters) DeepCopy() *AuthenticateOidcParameters { + if in == nil { + return nil + } + out := new(AuthenticateOidcParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionInitParameters) DeepCopyInto(out *ConditionInitParameters) { + *out = *in + if in.HTTPHeader != nil { + in, out := &in.HTTPHeader, &out.HTTPHeader + *out = new(HTTPHeaderInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRequestMethod != nil { + in, out := &in.HTTPRequestMethod, &out.HTTPRequestMethod + *out = new(HTTPRequestMethodInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HostHeader != nil { + in, out := &in.HostHeader, &out.HostHeader + *out = new(HostHeaderInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PathPattern != nil { + in, out := &in.PathPattern, &out.PathPattern + *out = new(PathPatternInitParameters) + (*in).DeepCopyInto(*out) + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = make([]QueryStringInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = new(SourceIPInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionInitParameters. +func (in *ConditionInitParameters) DeepCopy() *ConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionObservation) DeepCopyInto(out *ConditionObservation) { + *out = *in + if in.HTTPHeader != nil { + in, out := &in.HTTPHeader, &out.HTTPHeader + *out = new(HTTPHeaderObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTPRequestMethod != nil { + in, out := &in.HTTPRequestMethod, &out.HTTPRequestMethod + *out = new(HTTPRequestMethodObservation) + (*in).DeepCopyInto(*out) + } + if in.HostHeader != nil { + in, out := &in.HostHeader, &out.HostHeader + *out = new(HostHeaderObservation) + (*in).DeepCopyInto(*out) + } + if in.PathPattern != nil { + in, out := &in.PathPattern, &out.PathPattern + *out = new(PathPatternObservation) + (*in).DeepCopyInto(*out) + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = make([]QueryStringObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = new(SourceIPObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionObservation. +func (in *ConditionObservation) DeepCopy() *ConditionObservation { + if in == nil { + return nil + } + out := new(ConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionParameters) DeepCopyInto(out *ConditionParameters) { + *out = *in + if in.HTTPHeader != nil { + in, out := &in.HTTPHeader, &out.HTTPHeader + *out = new(HTTPHeaderParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRequestMethod != nil { + in, out := &in.HTTPRequestMethod, &out.HTTPRequestMethod + *out = new(HTTPRequestMethodParameters) + (*in).DeepCopyInto(*out) + } + if in.HostHeader != nil { + in, out := &in.HostHeader, &out.HostHeader + *out = new(HostHeaderParameters) + (*in).DeepCopyInto(*out) + } + if in.PathPattern != nil { + in, out := &in.PathPattern, &out.PathPattern + *out = new(PathPatternParameters) + (*in).DeepCopyInto(*out) + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = make([]QueryStringParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceIP != nil { + in, out := &in.SourceIP, &out.SourceIP + *out = new(SourceIPParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionParameters. +func (in *ConditionParameters) DeepCopy() *ConditionParameters { + if in == nil { + return nil + } + out := new(ConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionLogsInitParameters) DeepCopyInto(out *ConnectionLogsInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionLogsInitParameters. +func (in *ConnectionLogsInitParameters) DeepCopy() *ConnectionLogsInitParameters { + if in == nil { + return nil + } + out := new(ConnectionLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionLogsObservation) DeepCopyInto(out *ConnectionLogsObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionLogsObservation. +func (in *ConnectionLogsObservation) DeepCopy() *ConnectionLogsObservation { + if in == nil { + return nil + } + out := new(ConnectionLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionLogsParameters) DeepCopyInto(out *ConnectionLogsParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionLogsParameters. +func (in *ConnectionLogsParameters) DeepCopy() *ConnectionLogsParameters { + if in == nil { + return nil + } + out := new(ConnectionLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultActionInitParameters) DeepCopyInto(out *DefaultActionInitParameters) { + *out = *in + if in.AuthenticateCognito != nil { + in, out := &in.AuthenticateCognito, &out.AuthenticateCognito + *out = new(AuthenticateCognitoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthenticateOidc != nil { + in, out := &in.AuthenticateOidc, &out.AuthenticateOidc + *out = new(AuthenticateOidcInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FixedResponse != nil { + in, out := &in.FixedResponse, &out.FixedResponse + *out = new(FixedResponseInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Forward != nil { + in, out := &in.Forward, &out.Forward + *out = new(ForwardInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Redirect != nil { + in, out := &in.Redirect, &out.Redirect + *out = new(RedirectInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetGroupArn != nil { + in, out := &in.TargetGroupArn, &out.TargetGroupArn + *out = new(string) + **out = **in + } + if in.TargetGroupArnRef != nil { + in, out := &in.TargetGroupArnRef, &out.TargetGroupArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetGroupArnSelector != nil { + in, out := &in.TargetGroupArnSelector, &out.TargetGroupArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultActionInitParameters. +func (in *DefaultActionInitParameters) DeepCopy() *DefaultActionInitParameters { + if in == nil { + return nil + } + out := new(DefaultActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultActionObservation) DeepCopyInto(out *DefaultActionObservation) { + *out = *in + if in.AuthenticateCognito != nil { + in, out := &in.AuthenticateCognito, &out.AuthenticateCognito + *out = new(AuthenticateCognitoObservation) + (*in).DeepCopyInto(*out) + } + if in.AuthenticateOidc != nil { + in, out := &in.AuthenticateOidc, &out.AuthenticateOidc + *out = new(AuthenticateOidcObservation) + (*in).DeepCopyInto(*out) + } + if in.FixedResponse != nil { + in, out := &in.FixedResponse, &out.FixedResponse + *out = new(FixedResponseObservation) + (*in).DeepCopyInto(*out) + } + if in.Forward != nil { + in, out := &in.Forward, &out.Forward + *out = new(ForwardObservation) + (*in).DeepCopyInto(*out) + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Redirect != nil { + in, out := &in.Redirect, &out.Redirect + *out = new(RedirectObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetGroupArn != nil { + in, out := &in.TargetGroupArn, &out.TargetGroupArn + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultActionObservation. +func (in *DefaultActionObservation) DeepCopy() *DefaultActionObservation { + if in == nil { + return nil + } + out := new(DefaultActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultActionParameters) DeepCopyInto(out *DefaultActionParameters) { + *out = *in + if in.AuthenticateCognito != nil { + in, out := &in.AuthenticateCognito, &out.AuthenticateCognito + *out = new(AuthenticateCognitoParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthenticateOidc != nil { + in, out := &in.AuthenticateOidc, &out.AuthenticateOidc + *out = new(AuthenticateOidcParameters) + (*in).DeepCopyInto(*out) + } + if in.FixedResponse != nil { + in, out := &in.FixedResponse, &out.FixedResponse + *out = new(FixedResponseParameters) + (*in).DeepCopyInto(*out) + } + if in.Forward != nil { + in, out := &in.Forward, &out.Forward + *out = new(ForwardParameters) + (*in).DeepCopyInto(*out) + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Redirect != nil { + in, out := &in.Redirect, &out.Redirect + *out = new(RedirectParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetGroupArn != nil { + in, out := &in.TargetGroupArn, &out.TargetGroupArn + *out = new(string) + **out = **in + } + if in.TargetGroupArnRef != nil { + in, out := &in.TargetGroupArnRef, &out.TargetGroupArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetGroupArnSelector != nil { + in, out := &in.TargetGroupArnSelector, &out.TargetGroupArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultActionParameters. +func (in *DefaultActionParameters) DeepCopy() *DefaultActionParameters { + if in == nil { + return nil + } + out := new(DefaultActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedResponseInitParameters) DeepCopyInto(out *FixedResponseInitParameters) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.MessageBody != nil { + in, out := &in.MessageBody, &out.MessageBody + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedResponseInitParameters. +func (in *FixedResponseInitParameters) DeepCopy() *FixedResponseInitParameters { + if in == nil { + return nil + } + out := new(FixedResponseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedResponseObservation) DeepCopyInto(out *FixedResponseObservation) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.MessageBody != nil { + in, out := &in.MessageBody, &out.MessageBody + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedResponseObservation. +func (in *FixedResponseObservation) DeepCopy() *FixedResponseObservation { + if in == nil { + return nil + } + out := new(FixedResponseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedResponseParameters) DeepCopyInto(out *FixedResponseParameters) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.MessageBody != nil { + in, out := &in.MessageBody, &out.MessageBody + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedResponseParameters. +func (in *FixedResponseParameters) DeepCopy() *FixedResponseParameters { + if in == nil { + return nil + } + out := new(FixedResponseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardInitParameters) DeepCopyInto(out *ForwardInitParameters) { + *out = *in + if in.Stickiness != nil { + in, out := &in.Stickiness, &out.Stickiness + *out = new(StickinessInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetGroup != nil { + in, out := &in.TargetGroup, &out.TargetGroup + *out = make([]TargetGroupInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardInitParameters. +func (in *ForwardInitParameters) DeepCopy() *ForwardInitParameters { + if in == nil { + return nil + } + out := new(ForwardInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardObservation) DeepCopyInto(out *ForwardObservation) { + *out = *in + if in.Stickiness != nil { + in, out := &in.Stickiness, &out.Stickiness + *out = new(StickinessObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetGroup != nil { + in, out := &in.TargetGroup, &out.TargetGroup + *out = make([]TargetGroupObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardObservation. +func (in *ForwardObservation) DeepCopy() *ForwardObservation { + if in == nil { + return nil + } + out := new(ForwardObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardParameters) DeepCopyInto(out *ForwardParameters) { + *out = *in + if in.Stickiness != nil { + in, out := &in.Stickiness, &out.Stickiness + *out = new(StickinessParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetGroup != nil { + in, out := &in.TargetGroup, &out.TargetGroup + *out = make([]TargetGroupParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardParameters. +func (in *ForwardParameters) DeepCopy() *ForwardParameters { + if in == nil { + return nil + } + out := new(ForwardParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardStickinessInitParameters) DeepCopyInto(out *ForwardStickinessInitParameters) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardStickinessInitParameters. +func (in *ForwardStickinessInitParameters) DeepCopy() *ForwardStickinessInitParameters { + if in == nil { + return nil + } + out := new(ForwardStickinessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardStickinessObservation) DeepCopyInto(out *ForwardStickinessObservation) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardStickinessObservation. +func (in *ForwardStickinessObservation) DeepCopy() *ForwardStickinessObservation { + if in == nil { + return nil + } + out := new(ForwardStickinessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardStickinessParameters) DeepCopyInto(out *ForwardStickinessParameters) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardStickinessParameters. +func (in *ForwardStickinessParameters) DeepCopy() *ForwardStickinessParameters { + if in == nil { + return nil + } + out := new(ForwardStickinessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardTargetGroupInitParameters) DeepCopyInto(out *ForwardTargetGroupInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardTargetGroupInitParameters. +func (in *ForwardTargetGroupInitParameters) DeepCopy() *ForwardTargetGroupInitParameters { + if in == nil { + return nil + } + out := new(ForwardTargetGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardTargetGroupObservation) DeepCopyInto(out *ForwardTargetGroupObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardTargetGroupObservation. +func (in *ForwardTargetGroupObservation) DeepCopy() *ForwardTargetGroupObservation { + if in == nil { + return nil + } + out := new(ForwardTargetGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardTargetGroupParameters) DeepCopyInto(out *ForwardTargetGroupParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardTargetGroupParameters. +func (in *ForwardTargetGroupParameters) DeepCopy() *ForwardTargetGroupParameters { + if in == nil { + return nil + } + out := new(ForwardTargetGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeaderInitParameters) DeepCopyInto(out *HTTPHeaderInitParameters) { + *out = *in + if in.HTTPHeaderName != nil { + in, out := &in.HTTPHeaderName, &out.HTTPHeaderName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeaderInitParameters. +func (in *HTTPHeaderInitParameters) DeepCopy() *HTTPHeaderInitParameters { + if in == nil { + return nil + } + out := new(HTTPHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeaderObservation) DeepCopyInto(out *HTTPHeaderObservation) { + *out = *in + if in.HTTPHeaderName != nil { + in, out := &in.HTTPHeaderName, &out.HTTPHeaderName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeaderObservation. +func (in *HTTPHeaderObservation) DeepCopy() *HTTPHeaderObservation { + if in == nil { + return nil + } + out := new(HTTPHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeaderParameters) DeepCopyInto(out *HTTPHeaderParameters) { + *out = *in + if in.HTTPHeaderName != nil { + in, out := &in.HTTPHeaderName, &out.HTTPHeaderName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeaderParameters. +func (in *HTTPHeaderParameters) DeepCopy() *HTTPHeaderParameters { + if in == nil { + return nil + } + out := new(HTTPHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRequestMethodInitParameters) DeepCopyInto(out *HTTPRequestMethodInitParameters) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRequestMethodInitParameters. +func (in *HTTPRequestMethodInitParameters) DeepCopy() *HTTPRequestMethodInitParameters { + if in == nil { + return nil + } + out := new(HTTPRequestMethodInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRequestMethodObservation) DeepCopyInto(out *HTTPRequestMethodObservation) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRequestMethodObservation. +func (in *HTTPRequestMethodObservation) DeepCopy() *HTTPRequestMethodObservation { + if in == nil { + return nil + } + out := new(HTTPRequestMethodObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRequestMethodParameters) DeepCopyInto(out *HTTPRequestMethodParameters) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRequestMethodParameters. +func (in *HTTPRequestMethodParameters) DeepCopy() *HTTPRequestMethodParameters { + if in == nil { + return nil + } + out := new(HTTPRequestMethodParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckInitParameters) DeepCopyInto(out *HealthCheckInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Matcher != nil { + in, out := &in.Matcher, &out.Matcher + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckInitParameters. +func (in *HealthCheckInitParameters) DeepCopy() *HealthCheckInitParameters { + if in == nil { + return nil + } + out := new(HealthCheckInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckObservation) DeepCopyInto(out *HealthCheckObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Matcher != nil { + in, out := &in.Matcher, &out.Matcher + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckObservation. +func (in *HealthCheckObservation) DeepCopy() *HealthCheckObservation { + if in == nil { + return nil + } + out := new(HealthCheckObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckParameters) DeepCopyInto(out *HealthCheckParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HealthyThreshold != nil { + in, out := &in.HealthyThreshold, &out.HealthyThreshold + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Matcher != nil { + in, out := &in.Matcher, &out.Matcher + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckParameters. +func (in *HealthCheckParameters) DeepCopy() *HealthCheckParameters { + if in == nil { + return nil + } + out := new(HealthCheckParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostHeaderInitParameters) DeepCopyInto(out *HostHeaderInitParameters) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostHeaderInitParameters. +func (in *HostHeaderInitParameters) DeepCopy() *HostHeaderInitParameters { + if in == nil { + return nil + } + out := new(HostHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostHeaderObservation) DeepCopyInto(out *HostHeaderObservation) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostHeaderObservation. +func (in *HostHeaderObservation) DeepCopy() *HostHeaderObservation { + if in == nil { + return nil + } + out := new(HostHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostHeaderParameters) DeepCopyInto(out *HostHeaderParameters) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostHeaderParameters. +func (in *HostHeaderParameters) DeepCopy() *HostHeaderParameters { + if in == nil { + return nil + } + out := new(HostHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LB) DeepCopyInto(out *LB) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LB. +func (in *LB) DeepCopy() *LB { + if in == nil { + return nil + } + out := new(LB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LB) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBInitParameters) DeepCopyInto(out *LBInitParameters) { + *out = *in + if in.AccessLogs != nil { + in, out := &in.AccessLogs, &out.AccessLogs + *out = new(AccessLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientKeepAlive != nil { + in, out := &in.ClientKeepAlive, &out.ClientKeepAlive + *out = new(float64) + **out = **in + } + if in.ConnectionLogs != nil { + in, out := &in.ConnectionLogs, &out.ConnectionLogs + *out = new(ConnectionLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomerOwnedIPv4Pool != nil { + in, out := &in.CustomerOwnedIPv4Pool, &out.CustomerOwnedIPv4Pool + *out = new(string) + **out = **in + } + if in.DNSRecordClientRoutingPolicy != nil { + in, out := &in.DNSRecordClientRoutingPolicy, &out.DNSRecordClientRoutingPolicy + *out = new(string) + **out = **in + } + if in.DesyncMitigationMode != nil { + in, out := &in.DesyncMitigationMode, &out.DesyncMitigationMode + *out = new(string) + **out = **in + } + if in.DropInvalidHeaderFields != nil { + in, out := &in.DropInvalidHeaderFields, &out.DropInvalidHeaderFields + *out = new(bool) + **out = **in + } + if in.EnableCrossZoneLoadBalancing != nil { + in, out := &in.EnableCrossZoneLoadBalancing, &out.EnableCrossZoneLoadBalancing + *out = new(bool) + **out = **in + } + if in.EnableDeletionProtection != nil { + in, out := &in.EnableDeletionProtection, &out.EnableDeletionProtection + *out = new(bool) + **out = **in + } + if in.EnableHttp2 != nil { + in, out := &in.EnableHttp2, &out.EnableHttp2 + *out = new(bool) + **out = **in + } + if in.EnableTLSVersionAndCipherSuiteHeaders != nil { + in, out := &in.EnableTLSVersionAndCipherSuiteHeaders, &out.EnableTLSVersionAndCipherSuiteHeaders + *out = new(bool) + **out = **in + } + if in.EnableWafFailOpen != nil { + in, out := &in.EnableWafFailOpen, &out.EnableWafFailOpen + *out = new(bool) + **out = **in + } + if in.EnableXffClientPort != nil { + in, out := &in.EnableXffClientPort, &out.EnableXffClientPort + *out = new(bool) + **out = **in + } + if in.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic != nil { + in, out := &in.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic, &out.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic + *out = new(string) + **out = **in + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.IdleTimeout != nil { + in, out := &in.IdleTimeout, &out.IdleTimeout + *out = new(float64) + **out = **in + } + if in.Internal != nil { + in, out := &in.Internal, &out.Internal + *out = new(bool) + **out = **in + } + if in.LoadBalancerType != nil { + in, out := &in.LoadBalancerType, &out.LoadBalancerType + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PreserveHostHeader != nil { + in, out := &in.PreserveHostHeader, &out.PreserveHostHeader + *out = new(bool) + **out = **in + } + if in.SecurityGroupRefs != nil { + in, out := &in.SecurityGroupRefs, &out.SecurityGroupRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupSelector != nil { + in, out := &in.SecurityGroupSelector, &out.SecurityGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetMapping != nil { + in, out := &in.SubnetMapping, &out.SubnetMapping + *out = make([]SubnetMappingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetRefs != nil { + in, out := &in.SubnetRefs, &out.SubnetRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetSelector != nil { + in, out := &in.SubnetSelector, &out.SubnetSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.XffHeaderProcessingMode != nil { + in, out := &in.XffHeaderProcessingMode, &out.XffHeaderProcessingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBInitParameters. +func (in *LBInitParameters) DeepCopy() *LBInitParameters { + if in == nil { + return nil + } + out := new(LBInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBList) DeepCopyInto(out *LBList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LB, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBList. +func (in *LBList) DeepCopy() *LBList { + if in == nil { + return nil + } + out := new(LBList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LBList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBListener) DeepCopyInto(out *LBListener) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBListener. +func (in *LBListener) DeepCopy() *LBListener { + if in == nil { + return nil + } + out := new(LBListener) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LBListener) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBListenerInitParameters) DeepCopyInto(out *LBListenerInitParameters) { + *out = *in + if in.AlpnPolicy != nil { + in, out := &in.AlpnPolicy, &out.AlpnPolicy + *out = new(string) + **out = **in + } + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = make([]DefaultActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LoadBalancerArn != nil { + in, out := &in.LoadBalancerArn, &out.LoadBalancerArn + *out = new(string) + **out = **in + } + if in.LoadBalancerArnRef != nil { + in, out := &in.LoadBalancerArnRef, &out.LoadBalancerArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LoadBalancerArnSelector != nil { + in, out := &in.LoadBalancerArnSelector, &out.LoadBalancerArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MutualAuthentication != nil { + in, out := &in.MutualAuthentication, &out.MutualAuthentication + *out = new(MutualAuthenticationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SSLPolicy != nil { + in, out := &in.SSLPolicy, &out.SSLPolicy + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBListenerInitParameters. +func (in *LBListenerInitParameters) DeepCopy() *LBListenerInitParameters { + if in == nil { + return nil + } + out := new(LBListenerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBListenerList) DeepCopyInto(out *LBListenerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LBListener, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBListenerList. +func (in *LBListenerList) DeepCopy() *LBListenerList { + if in == nil { + return nil + } + out := new(LBListenerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LBListenerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBListenerObservation) DeepCopyInto(out *LBListenerObservation) { + *out = *in + if in.AlpnPolicy != nil { + in, out := &in.AlpnPolicy, &out.AlpnPolicy + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = make([]DefaultActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LoadBalancerArn != nil { + in, out := &in.LoadBalancerArn, &out.LoadBalancerArn + *out = new(string) + **out = **in + } + if in.MutualAuthentication != nil { + in, out := &in.MutualAuthentication, &out.MutualAuthentication + *out = new(MutualAuthenticationObservation) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SSLPolicy != nil { + in, out := &in.SSLPolicy, &out.SSLPolicy + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBListenerObservation. +func (in *LBListenerObservation) DeepCopy() *LBListenerObservation { + if in == nil { + return nil + } + out := new(LBListenerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBListenerParameters) DeepCopyInto(out *LBListenerParameters) { + *out = *in + if in.AlpnPolicy != nil { + in, out := &in.AlpnPolicy, &out.AlpnPolicy + *out = new(string) + **out = **in + } + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = make([]DefaultActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LoadBalancerArn != nil { + in, out := &in.LoadBalancerArn, &out.LoadBalancerArn + *out = new(string) + **out = **in + } + if in.LoadBalancerArnRef != nil { + in, out := &in.LoadBalancerArnRef, &out.LoadBalancerArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LoadBalancerArnSelector != nil { + in, out := &in.LoadBalancerArnSelector, &out.LoadBalancerArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MutualAuthentication != nil { + in, out := &in.MutualAuthentication, &out.MutualAuthentication + *out = new(MutualAuthenticationParameters) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SSLPolicy != nil { + in, out := &in.SSLPolicy, &out.SSLPolicy + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBListenerParameters. +func (in *LBListenerParameters) DeepCopy() *LBListenerParameters { + if in == nil { + return nil + } + out := new(LBListenerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBListenerRule) DeepCopyInto(out *LBListenerRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBListenerRule. +func (in *LBListenerRule) DeepCopy() *LBListenerRule { + if in == nil { + return nil + } + out := new(LBListenerRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LBListenerRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBListenerRuleInitParameters) DeepCopyInto(out *LBListenerRuleInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]ActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ListenerArn != nil { + in, out := &in.ListenerArn, &out.ListenerArn + *out = new(string) + **out = **in + } + if in.ListenerArnRef != nil { + in, out := &in.ListenerArnRef, &out.ListenerArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ListenerArnSelector != nil { + in, out := &in.ListenerArnSelector, &out.ListenerArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBListenerRuleInitParameters. +func (in *LBListenerRuleInitParameters) DeepCopy() *LBListenerRuleInitParameters { + if in == nil { + return nil + } + out := new(LBListenerRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBListenerRuleList) DeepCopyInto(out *LBListenerRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LBListenerRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBListenerRuleList. +func (in *LBListenerRuleList) DeepCopy() *LBListenerRuleList { + if in == nil { + return nil + } + out := new(LBListenerRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LBListenerRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBListenerRuleObservation) DeepCopyInto(out *LBListenerRuleObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]ActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ListenerArn != nil { + in, out := &in.ListenerArn, &out.ListenerArn + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBListenerRuleObservation. +func (in *LBListenerRuleObservation) DeepCopy() *LBListenerRuleObservation { + if in == nil { + return nil + } + out := new(LBListenerRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBListenerRuleParameters) DeepCopyInto(out *LBListenerRuleParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]ActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ListenerArn != nil { + in, out := &in.ListenerArn, &out.ListenerArn + *out = new(string) + **out = **in + } + if in.ListenerArnRef != nil { + in, out := &in.ListenerArnRef, &out.ListenerArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ListenerArnSelector != nil { + in, out := &in.ListenerArnSelector, &out.ListenerArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBListenerRuleParameters. +func (in *LBListenerRuleParameters) DeepCopy() *LBListenerRuleParameters { + if in == nil { + return nil + } + out := new(LBListenerRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBListenerRuleSpec) DeepCopyInto(out *LBListenerRuleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBListenerRuleSpec. +func (in *LBListenerRuleSpec) DeepCopy() *LBListenerRuleSpec { + if in == nil { + return nil + } + out := new(LBListenerRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBListenerRuleStatus) DeepCopyInto(out *LBListenerRuleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBListenerRuleStatus. +func (in *LBListenerRuleStatus) DeepCopy() *LBListenerRuleStatus { + if in == nil { + return nil + } + out := new(LBListenerRuleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBListenerSpec) DeepCopyInto(out *LBListenerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBListenerSpec. +func (in *LBListenerSpec) DeepCopy() *LBListenerSpec { + if in == nil { + return nil + } + out := new(LBListenerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBListenerStatus) DeepCopyInto(out *LBListenerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBListenerStatus. +func (in *LBListenerStatus) DeepCopy() *LBListenerStatus { + if in == nil { + return nil + } + out := new(LBListenerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBObservation) DeepCopyInto(out *LBObservation) { + *out = *in + if in.AccessLogs != nil { + in, out := &in.AccessLogs, &out.AccessLogs + *out = new(AccessLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnSuffix != nil { + in, out := &in.ArnSuffix, &out.ArnSuffix + *out = new(string) + **out = **in + } + if in.ClientKeepAlive != nil { + in, out := &in.ClientKeepAlive, &out.ClientKeepAlive + *out = new(float64) + **out = **in + } + if in.ConnectionLogs != nil { + in, out := &in.ConnectionLogs, &out.ConnectionLogs + *out = new(ConnectionLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomerOwnedIPv4Pool != nil { + in, out := &in.CustomerOwnedIPv4Pool, &out.CustomerOwnedIPv4Pool + *out = new(string) + **out = **in + } + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.DNSRecordClientRoutingPolicy != nil { + in, out := &in.DNSRecordClientRoutingPolicy, &out.DNSRecordClientRoutingPolicy + *out = new(string) + **out = **in + } + if in.DesyncMitigationMode != nil { + in, out := &in.DesyncMitigationMode, &out.DesyncMitigationMode + *out = new(string) + **out = **in + } + if in.DropInvalidHeaderFields != nil { + in, out := &in.DropInvalidHeaderFields, &out.DropInvalidHeaderFields + *out = new(bool) + **out = **in + } + if in.EnableCrossZoneLoadBalancing != nil { + in, out := &in.EnableCrossZoneLoadBalancing, &out.EnableCrossZoneLoadBalancing + *out = new(bool) + **out = **in + } + if in.EnableDeletionProtection != nil { + in, out := &in.EnableDeletionProtection, &out.EnableDeletionProtection + *out = new(bool) + **out = **in + } + if in.EnableHttp2 != nil { + in, out := &in.EnableHttp2, &out.EnableHttp2 + *out = new(bool) + **out = **in + } + if in.EnableTLSVersionAndCipherSuiteHeaders != nil { + in, out := &in.EnableTLSVersionAndCipherSuiteHeaders, &out.EnableTLSVersionAndCipherSuiteHeaders + *out = new(bool) + **out = **in + } + if in.EnableWafFailOpen != nil { + in, out := &in.EnableWafFailOpen, &out.EnableWafFailOpen + *out = new(bool) + **out = **in + } + if in.EnableXffClientPort != nil { + in, out := &in.EnableXffClientPort, &out.EnableXffClientPort + *out = new(bool) + **out = **in + } + if in.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic != nil { + in, out := &in.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic, &out.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.IdleTimeout != nil { + in, out := &in.IdleTimeout, &out.IdleTimeout + *out = new(float64) + **out = **in + } + if in.Internal != nil { + in, out := &in.Internal, &out.Internal + *out = new(bool) + **out = **in + } + if in.LoadBalancerType != nil { + in, out := &in.LoadBalancerType, &out.LoadBalancerType + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PreserveHostHeader != nil { + in, out := &in.PreserveHostHeader, &out.PreserveHostHeader + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetMapping != nil { + in, out := &in.SubnetMapping, &out.SubnetMapping + *out = make([]SubnetMappingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.XffHeaderProcessingMode != nil { + in, out := &in.XffHeaderProcessingMode, &out.XffHeaderProcessingMode + *out = new(string) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBObservation. +func (in *LBObservation) DeepCopy() *LBObservation { + if in == nil { + return nil + } + out := new(LBObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBParameters) DeepCopyInto(out *LBParameters) { + *out = *in + if in.AccessLogs != nil { + in, out := &in.AccessLogs, &out.AccessLogs + *out = new(AccessLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientKeepAlive != nil { + in, out := &in.ClientKeepAlive, &out.ClientKeepAlive + *out = new(float64) + **out = **in + } + if in.ConnectionLogs != nil { + in, out := &in.ConnectionLogs, &out.ConnectionLogs + *out = new(ConnectionLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomerOwnedIPv4Pool != nil { + in, out := &in.CustomerOwnedIPv4Pool, &out.CustomerOwnedIPv4Pool + *out = new(string) + **out = **in + } + if in.DNSRecordClientRoutingPolicy != nil { + in, out := &in.DNSRecordClientRoutingPolicy, &out.DNSRecordClientRoutingPolicy + *out = new(string) + **out = **in + } + if in.DesyncMitigationMode != nil { + in, out := &in.DesyncMitigationMode, &out.DesyncMitigationMode + *out = new(string) + **out = **in + } + if in.DropInvalidHeaderFields != nil { + in, out := &in.DropInvalidHeaderFields, &out.DropInvalidHeaderFields + *out = new(bool) + **out = **in + } + if in.EnableCrossZoneLoadBalancing != nil { + in, out := &in.EnableCrossZoneLoadBalancing, &out.EnableCrossZoneLoadBalancing + *out = new(bool) + **out = **in + } + if in.EnableDeletionProtection != nil { + in, out := &in.EnableDeletionProtection, &out.EnableDeletionProtection + *out = new(bool) + **out = **in + } + if in.EnableHttp2 != nil { + in, out := &in.EnableHttp2, &out.EnableHttp2 + *out = new(bool) + **out = **in + } + if in.EnableTLSVersionAndCipherSuiteHeaders != nil { + in, out := &in.EnableTLSVersionAndCipherSuiteHeaders, &out.EnableTLSVersionAndCipherSuiteHeaders + *out = new(bool) + **out = **in + } + if in.EnableWafFailOpen != nil { + in, out := &in.EnableWafFailOpen, &out.EnableWafFailOpen + *out = new(bool) + **out = **in + } + if in.EnableXffClientPort != nil { + in, out := &in.EnableXffClientPort, &out.EnableXffClientPort + *out = new(bool) + **out = **in + } + if in.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic != nil { + in, out := &in.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic, &out.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic + *out = new(string) + **out = **in + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.IdleTimeout != nil { + in, out := &in.IdleTimeout, &out.IdleTimeout + *out = new(float64) + **out = **in + } + if in.Internal != nil { + in, out := &in.Internal, &out.Internal + *out = new(bool) + **out = **in + } + if in.LoadBalancerType != nil { + in, out := &in.LoadBalancerType, &out.LoadBalancerType + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PreserveHostHeader != nil { + in, out := &in.PreserveHostHeader, &out.PreserveHostHeader + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SecurityGroupRefs != nil { + in, out := &in.SecurityGroupRefs, &out.SecurityGroupRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupSelector != nil { + in, out := &in.SecurityGroupSelector, &out.SecurityGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetMapping != nil { + in, out := &in.SubnetMapping, &out.SubnetMapping + *out = make([]SubnetMappingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetRefs != nil { + in, out := &in.SubnetRefs, &out.SubnetRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetSelector != nil { + in, out := &in.SubnetSelector, &out.SubnetSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.XffHeaderProcessingMode != nil { + in, out := &in.XffHeaderProcessingMode, &out.XffHeaderProcessingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBParameters. +func (in *LBParameters) DeepCopy() *LBParameters { + if in == nil { + return nil + } + out := new(LBParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBSpec) DeepCopyInto(out *LBSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBSpec. +func (in *LBSpec) DeepCopy() *LBSpec { + if in == nil { + return nil + } + out := new(LBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBStatus) DeepCopyInto(out *LBStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBStatus. +func (in *LBStatus) DeepCopy() *LBStatus { + if in == nil { + return nil + } + out := new(LBStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBTargetGroup) DeepCopyInto(out *LBTargetGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBTargetGroup. +func (in *LBTargetGroup) DeepCopy() *LBTargetGroup { + if in == nil { + return nil + } + out := new(LBTargetGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LBTargetGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBTargetGroupInitParameters) DeepCopyInto(out *LBTargetGroupInitParameters) { + *out = *in + if in.ConnectionTermination != nil { + in, out := &in.ConnectionTermination, &out.ConnectionTermination + *out = new(bool) + **out = **in + } + if in.DeregistrationDelay != nil { + in, out := &in.DeregistrationDelay, &out.DeregistrationDelay + *out = new(string) + **out = **in + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(HealthCheckInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.LambdaMultiValueHeadersEnabled != nil { + in, out := &in.LambdaMultiValueHeadersEnabled, &out.LambdaMultiValueHeadersEnabled + *out = new(bool) + **out = **in + } + if in.LoadBalancingAlgorithmType != nil { + in, out := &in.LoadBalancingAlgorithmType, &out.LoadBalancingAlgorithmType + *out = new(string) + **out = **in + } + if in.LoadBalancingAnomalyMitigation != nil { + in, out := &in.LoadBalancingAnomalyMitigation, &out.LoadBalancingAnomalyMitigation + *out = new(string) + **out = **in + } + if in.LoadBalancingCrossZoneEnabled != nil { + in, out := &in.LoadBalancingCrossZoneEnabled, &out.LoadBalancingCrossZoneEnabled + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PreserveClientIP != nil { + in, out := &in.PreserveClientIP, &out.PreserveClientIP + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.ProtocolVersion != nil { + in, out := &in.ProtocolVersion, &out.ProtocolVersion + *out = new(string) + **out = **in + } + if in.ProxyProtocolV2 != nil { + in, out := &in.ProxyProtocolV2, &out.ProxyProtocolV2 + *out = new(bool) + **out = **in + } + if in.SlowStart != nil { + in, out := &in.SlowStart, &out.SlowStart + *out = new(float64) + **out = **in + } + if in.Stickiness != nil { + in, out := &in.Stickiness, &out.Stickiness + *out = new(LBTargetGroupStickinessInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetFailover != nil { + in, out := &in.TargetFailover, &out.TargetFailover + *out = make([]TargetFailoverInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetHealthState != nil { + in, out := &in.TargetHealthState, &out.TargetHealthState + *out = make([]TargetHealthStateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetType != nil { + in, out := &in.TargetType, &out.TargetType + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBTargetGroupInitParameters. +func (in *LBTargetGroupInitParameters) DeepCopy() *LBTargetGroupInitParameters { + if in == nil { + return nil + } + out := new(LBTargetGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBTargetGroupList) DeepCopyInto(out *LBTargetGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LBTargetGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBTargetGroupList. +func (in *LBTargetGroupList) DeepCopy() *LBTargetGroupList { + if in == nil { + return nil + } + out := new(LBTargetGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LBTargetGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBTargetGroupObservation) DeepCopyInto(out *LBTargetGroupObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnSuffix != nil { + in, out := &in.ArnSuffix, &out.ArnSuffix + *out = new(string) + **out = **in + } + if in.ConnectionTermination != nil { + in, out := &in.ConnectionTermination, &out.ConnectionTermination + *out = new(bool) + **out = **in + } + if in.DeregistrationDelay != nil { + in, out := &in.DeregistrationDelay, &out.DeregistrationDelay + *out = new(string) + **out = **in + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(HealthCheckObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.LambdaMultiValueHeadersEnabled != nil { + in, out := &in.LambdaMultiValueHeadersEnabled, &out.LambdaMultiValueHeadersEnabled + *out = new(bool) + **out = **in + } + if in.LoadBalancerArns != nil { + in, out := &in.LoadBalancerArns, &out.LoadBalancerArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancingAlgorithmType != nil { + in, out := &in.LoadBalancingAlgorithmType, &out.LoadBalancingAlgorithmType + *out = new(string) + **out = **in + } + if in.LoadBalancingAnomalyMitigation != nil { + in, out := &in.LoadBalancingAnomalyMitigation, &out.LoadBalancingAnomalyMitigation + *out = new(string) + **out = **in + } + if in.LoadBalancingCrossZoneEnabled != nil { + in, out := &in.LoadBalancingCrossZoneEnabled, &out.LoadBalancingCrossZoneEnabled + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PreserveClientIP != nil { + in, out := &in.PreserveClientIP, &out.PreserveClientIP + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.ProtocolVersion != nil { + in, out := &in.ProtocolVersion, &out.ProtocolVersion + *out = new(string) + **out = **in + } + if in.ProxyProtocolV2 != nil { + in, out := &in.ProxyProtocolV2, &out.ProxyProtocolV2 + *out = new(bool) + **out = **in + } + if in.SlowStart != nil { + in, out := &in.SlowStart, &out.SlowStart + *out = new(float64) + **out = **in + } + if in.Stickiness != nil { + in, out := &in.Stickiness, &out.Stickiness + *out = new(LBTargetGroupStickinessObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetFailover != nil { + in, out := &in.TargetFailover, &out.TargetFailover + *out = make([]TargetFailoverObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetHealthState != nil { + in, out := &in.TargetHealthState, &out.TargetHealthState + *out = make([]TargetHealthStateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetType != nil { + in, out := &in.TargetType, &out.TargetType + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBTargetGroupObservation. +func (in *LBTargetGroupObservation) DeepCopy() *LBTargetGroupObservation { + if in == nil { + return nil + } + out := new(LBTargetGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBTargetGroupParameters) DeepCopyInto(out *LBTargetGroupParameters) { + *out = *in + if in.ConnectionTermination != nil { + in, out := &in.ConnectionTermination, &out.ConnectionTermination + *out = new(bool) + **out = **in + } + if in.DeregistrationDelay != nil { + in, out := &in.DeregistrationDelay, &out.DeregistrationDelay + *out = new(string) + **out = **in + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(HealthCheckParameters) + (*in).DeepCopyInto(*out) + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.LambdaMultiValueHeadersEnabled != nil { + in, out := &in.LambdaMultiValueHeadersEnabled, &out.LambdaMultiValueHeadersEnabled + *out = new(bool) + **out = **in + } + if in.LoadBalancingAlgorithmType != nil { + in, out := &in.LoadBalancingAlgorithmType, &out.LoadBalancingAlgorithmType + *out = new(string) + **out = **in + } + if in.LoadBalancingAnomalyMitigation != nil { + in, out := &in.LoadBalancingAnomalyMitigation, &out.LoadBalancingAnomalyMitigation + *out = new(string) + **out = **in + } + if in.LoadBalancingCrossZoneEnabled != nil { + in, out := &in.LoadBalancingCrossZoneEnabled, &out.LoadBalancingCrossZoneEnabled + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PreserveClientIP != nil { + in, out := &in.PreserveClientIP, &out.PreserveClientIP + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.ProtocolVersion != nil { + in, out := &in.ProtocolVersion, &out.ProtocolVersion + *out = new(string) + **out = **in + } + if in.ProxyProtocolV2 != nil { + in, out := &in.ProxyProtocolV2, &out.ProxyProtocolV2 + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SlowStart != nil { + in, out := &in.SlowStart, &out.SlowStart + *out = new(float64) + **out = **in + } + if in.Stickiness != nil { + in, out := &in.Stickiness, &out.Stickiness + *out = new(LBTargetGroupStickinessParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetFailover != nil { + in, out := &in.TargetFailover, &out.TargetFailover + *out = make([]TargetFailoverParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetHealthState != nil { + in, out := &in.TargetHealthState, &out.TargetHealthState + *out = make([]TargetHealthStateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetType != nil { + in, out := &in.TargetType, &out.TargetType + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBTargetGroupParameters. +func (in *LBTargetGroupParameters) DeepCopy() *LBTargetGroupParameters { + if in == nil { + return nil + } + out := new(LBTargetGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBTargetGroupSpec) DeepCopyInto(out *LBTargetGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBTargetGroupSpec. +func (in *LBTargetGroupSpec) DeepCopy() *LBTargetGroupSpec { + if in == nil { + return nil + } + out := new(LBTargetGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBTargetGroupStatus) DeepCopyInto(out *LBTargetGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBTargetGroupStatus. +func (in *LBTargetGroupStatus) DeepCopy() *LBTargetGroupStatus { + if in == nil { + return nil + } + out := new(LBTargetGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBTargetGroupStickinessInitParameters) DeepCopyInto(out *LBTargetGroupStickinessInitParameters) { + *out = *in + if in.CookieDuration != nil { + in, out := &in.CookieDuration, &out.CookieDuration + *out = new(float64) + **out = **in + } + if in.CookieName != nil { + in, out := &in.CookieName, &out.CookieName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBTargetGroupStickinessInitParameters. +func (in *LBTargetGroupStickinessInitParameters) DeepCopy() *LBTargetGroupStickinessInitParameters { + if in == nil { + return nil + } + out := new(LBTargetGroupStickinessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBTargetGroupStickinessObservation) DeepCopyInto(out *LBTargetGroupStickinessObservation) { + *out = *in + if in.CookieDuration != nil { + in, out := &in.CookieDuration, &out.CookieDuration + *out = new(float64) + **out = **in + } + if in.CookieName != nil { + in, out := &in.CookieName, &out.CookieName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBTargetGroupStickinessObservation. +func (in *LBTargetGroupStickinessObservation) DeepCopy() *LBTargetGroupStickinessObservation { + if in == nil { + return nil + } + out := new(LBTargetGroupStickinessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBTargetGroupStickinessParameters) DeepCopyInto(out *LBTargetGroupStickinessParameters) { + *out = *in + if in.CookieDuration != nil { + in, out := &in.CookieDuration, &out.CookieDuration + *out = new(float64) + **out = **in + } + if in.CookieName != nil { + in, out := &in.CookieName, &out.CookieName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBTargetGroupStickinessParameters. +func (in *LBTargetGroupStickinessParameters) DeepCopy() *LBTargetGroupStickinessParameters { + if in == nil { + return nil + } + out := new(LBTargetGroupStickinessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutualAuthenticationInitParameters) DeepCopyInto(out *MutualAuthenticationInitParameters) { + *out = *in + if in.IgnoreClientCertificateExpiry != nil { + in, out := &in.IgnoreClientCertificateExpiry, &out.IgnoreClientCertificateExpiry + *out = new(bool) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.TrustStoreArn != nil { + in, out := &in.TrustStoreArn, &out.TrustStoreArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutualAuthenticationInitParameters. +func (in *MutualAuthenticationInitParameters) DeepCopy() *MutualAuthenticationInitParameters { + if in == nil { + return nil + } + out := new(MutualAuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutualAuthenticationObservation) DeepCopyInto(out *MutualAuthenticationObservation) { + *out = *in + if in.IgnoreClientCertificateExpiry != nil { + in, out := &in.IgnoreClientCertificateExpiry, &out.IgnoreClientCertificateExpiry + *out = new(bool) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.TrustStoreArn != nil { + in, out := &in.TrustStoreArn, &out.TrustStoreArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutualAuthenticationObservation. +func (in *MutualAuthenticationObservation) DeepCopy() *MutualAuthenticationObservation { + if in == nil { + return nil + } + out := new(MutualAuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutualAuthenticationParameters) DeepCopyInto(out *MutualAuthenticationParameters) { + *out = *in + if in.IgnoreClientCertificateExpiry != nil { + in, out := &in.IgnoreClientCertificateExpiry, &out.IgnoreClientCertificateExpiry + *out = new(bool) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.TrustStoreArn != nil { + in, out := &in.TrustStoreArn, &out.TrustStoreArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutualAuthenticationParameters. +func (in *MutualAuthenticationParameters) DeepCopy() *MutualAuthenticationParameters { + if in == nil { + return nil + } + out := new(MutualAuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathPatternInitParameters) DeepCopyInto(out *PathPatternInitParameters) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathPatternInitParameters. +func (in *PathPatternInitParameters) DeepCopy() *PathPatternInitParameters { + if in == nil { + return nil + } + out := new(PathPatternInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathPatternObservation) DeepCopyInto(out *PathPatternObservation) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathPatternObservation. +func (in *PathPatternObservation) DeepCopy() *PathPatternObservation { + if in == nil { + return nil + } + out := new(PathPatternObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathPatternParameters) DeepCopyInto(out *PathPatternParameters) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathPatternParameters. +func (in *PathPatternParameters) DeepCopy() *PathPatternParameters { + if in == nil { + return nil + } + out := new(PathPatternParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringInitParameters) DeepCopyInto(out *QueryStringInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringInitParameters. +func (in *QueryStringInitParameters) DeepCopy() *QueryStringInitParameters { + if in == nil { + return nil + } + out := new(QueryStringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringObservation) DeepCopyInto(out *QueryStringObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringObservation. +func (in *QueryStringObservation) DeepCopy() *QueryStringObservation { + if in == nil { + return nil + } + out := new(QueryStringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringParameters) DeepCopyInto(out *QueryStringParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringParameters. +func (in *QueryStringParameters) DeepCopy() *QueryStringParameters { + if in == nil { + return nil + } + out := new(QueryStringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectInitParameters) DeepCopyInto(out *RedirectInitParameters) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectInitParameters. +func (in *RedirectInitParameters) DeepCopy() *RedirectInitParameters { + if in == nil { + return nil + } + out := new(RedirectInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectObservation) DeepCopyInto(out *RedirectObservation) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectObservation. +func (in *RedirectObservation) DeepCopy() *RedirectObservation { + if in == nil { + return nil + } + out := new(RedirectObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectParameters) DeepCopyInto(out *RedirectParameters) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectParameters. +func (in *RedirectParameters) DeepCopy() *RedirectParameters { + if in == nil { + return nil + } + out := new(RedirectParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPInitParameters) DeepCopyInto(out *SourceIPInitParameters) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPInitParameters. +func (in *SourceIPInitParameters) DeepCopy() *SourceIPInitParameters { + if in == nil { + return nil + } + out := new(SourceIPInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPObservation) DeepCopyInto(out *SourceIPObservation) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPObservation. +func (in *SourceIPObservation) DeepCopy() *SourceIPObservation { + if in == nil { + return nil + } + out := new(SourceIPObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPParameters) DeepCopyInto(out *SourceIPParameters) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPParameters. +func (in *SourceIPParameters) DeepCopy() *SourceIPParameters { + if in == nil { + return nil + } + out := new(SourceIPParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StickinessInitParameters) DeepCopyInto(out *StickinessInitParameters) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StickinessInitParameters. +func (in *StickinessInitParameters) DeepCopy() *StickinessInitParameters { + if in == nil { + return nil + } + out := new(StickinessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StickinessObservation) DeepCopyInto(out *StickinessObservation) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StickinessObservation. +func (in *StickinessObservation) DeepCopy() *StickinessObservation { + if in == nil { + return nil + } + out := new(StickinessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StickinessParameters) DeepCopyInto(out *StickinessParameters) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StickinessParameters. +func (in *StickinessParameters) DeepCopy() *StickinessParameters { + if in == nil { + return nil + } + out := new(StickinessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetMappingInitParameters) DeepCopyInto(out *SubnetMappingInitParameters) { + *out = *in + if in.AllocationID != nil { + in, out := &in.AllocationID, &out.AllocationID + *out = new(string) + **out = **in + } + if in.IPv6Address != nil { + in, out := &in.IPv6Address, &out.IPv6Address + *out = new(string) + **out = **in + } + if in.PrivateIPv4Address != nil { + in, out := &in.PrivateIPv4Address, &out.PrivateIPv4Address + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetMappingInitParameters. +func (in *SubnetMappingInitParameters) DeepCopy() *SubnetMappingInitParameters { + if in == nil { + return nil + } + out := new(SubnetMappingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetMappingObservation) DeepCopyInto(out *SubnetMappingObservation) { + *out = *in + if in.AllocationID != nil { + in, out := &in.AllocationID, &out.AllocationID + *out = new(string) + **out = **in + } + if in.IPv6Address != nil { + in, out := &in.IPv6Address, &out.IPv6Address + *out = new(string) + **out = **in + } + if in.OutpostID != nil { + in, out := &in.OutpostID, &out.OutpostID + *out = new(string) + **out = **in + } + if in.PrivateIPv4Address != nil { + in, out := &in.PrivateIPv4Address, &out.PrivateIPv4Address + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetMappingObservation. +func (in *SubnetMappingObservation) DeepCopy() *SubnetMappingObservation { + if in == nil { + return nil + } + out := new(SubnetMappingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetMappingParameters) DeepCopyInto(out *SubnetMappingParameters) { + *out = *in + if in.AllocationID != nil { + in, out := &in.AllocationID, &out.AllocationID + *out = new(string) + **out = **in + } + if in.IPv6Address != nil { + in, out := &in.IPv6Address, &out.IPv6Address + *out = new(string) + **out = **in + } + if in.PrivateIPv4Address != nil { + in, out := &in.PrivateIPv4Address, &out.PrivateIPv4Address + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetMappingParameters. +func (in *SubnetMappingParameters) DeepCopy() *SubnetMappingParameters { + if in == nil { + return nil + } + out := new(SubnetMappingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetFailoverInitParameters) DeepCopyInto(out *TargetFailoverInitParameters) { + *out = *in + if in.OnDeregistration != nil { + in, out := &in.OnDeregistration, &out.OnDeregistration + *out = new(string) + **out = **in + } + if in.OnUnhealthy != nil { + in, out := &in.OnUnhealthy, &out.OnUnhealthy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetFailoverInitParameters. +func (in *TargetFailoverInitParameters) DeepCopy() *TargetFailoverInitParameters { + if in == nil { + return nil + } + out := new(TargetFailoverInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetFailoverObservation) DeepCopyInto(out *TargetFailoverObservation) { + *out = *in + if in.OnDeregistration != nil { + in, out := &in.OnDeregistration, &out.OnDeregistration + *out = new(string) + **out = **in + } + if in.OnUnhealthy != nil { + in, out := &in.OnUnhealthy, &out.OnUnhealthy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetFailoverObservation. +func (in *TargetFailoverObservation) DeepCopy() *TargetFailoverObservation { + if in == nil { + return nil + } + out := new(TargetFailoverObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetFailoverParameters) DeepCopyInto(out *TargetFailoverParameters) { + *out = *in + if in.OnDeregistration != nil { + in, out := &in.OnDeregistration, &out.OnDeregistration + *out = new(string) + **out = **in + } + if in.OnUnhealthy != nil { + in, out := &in.OnUnhealthy, &out.OnUnhealthy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetFailoverParameters. +func (in *TargetFailoverParameters) DeepCopy() *TargetFailoverParameters { + if in == nil { + return nil + } + out := new(TargetFailoverParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupInitParameters) DeepCopyInto(out *TargetGroupInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupInitParameters. +func (in *TargetGroupInitParameters) DeepCopy() *TargetGroupInitParameters { + if in == nil { + return nil + } + out := new(TargetGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupObservation) DeepCopyInto(out *TargetGroupObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupObservation. +func (in *TargetGroupObservation) DeepCopy() *TargetGroupObservation { + if in == nil { + return nil + } + out := new(TargetGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupParameters) DeepCopyInto(out *TargetGroupParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupParameters. +func (in *TargetGroupParameters) DeepCopy() *TargetGroupParameters { + if in == nil { + return nil + } + out := new(TargetGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetHealthStateInitParameters) DeepCopyInto(out *TargetHealthStateInitParameters) { + *out = *in + if in.EnableUnhealthyConnectionTermination != nil { + in, out := &in.EnableUnhealthyConnectionTermination, &out.EnableUnhealthyConnectionTermination + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetHealthStateInitParameters. +func (in *TargetHealthStateInitParameters) DeepCopy() *TargetHealthStateInitParameters { + if in == nil { + return nil + } + out := new(TargetHealthStateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetHealthStateObservation) DeepCopyInto(out *TargetHealthStateObservation) { + *out = *in + if in.EnableUnhealthyConnectionTermination != nil { + in, out := &in.EnableUnhealthyConnectionTermination, &out.EnableUnhealthyConnectionTermination + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetHealthStateObservation. +func (in *TargetHealthStateObservation) DeepCopy() *TargetHealthStateObservation { + if in == nil { + return nil + } + out := new(TargetHealthStateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetHealthStateParameters) DeepCopyInto(out *TargetHealthStateParameters) { + *out = *in + if in.EnableUnhealthyConnectionTermination != nil { + in, out := &in.EnableUnhealthyConnectionTermination, &out.EnableUnhealthyConnectionTermination + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetHealthStateParameters. +func (in *TargetHealthStateParameters) DeepCopy() *TargetHealthStateParameters { + if in == nil { + return nil + } + out := new(TargetHealthStateParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/elbv2/v1beta2/zz_generated.managed.go b/apis/elbv2/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..98565d86f5 --- /dev/null +++ b/apis/elbv2/v1beta2/zz_generated.managed.go @@ -0,0 +1,248 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this LB. +func (mg *LB) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LB. +func (mg *LB) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LB. +func (mg *LB) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LB. +func (mg *LB) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LB. +func (mg *LB) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LB. +func (mg *LB) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LB. +func (mg *LB) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LB. +func (mg *LB) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LB. +func (mg *LB) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LB. +func (mg *LB) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LB. +func (mg *LB) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LB. +func (mg *LB) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LBListener. +func (mg *LBListener) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LBListener. +func (mg *LBListener) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LBListener. +func (mg *LBListener) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LBListener. +func (mg *LBListener) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LBListener. +func (mg *LBListener) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LBListener. +func (mg *LBListener) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LBListener. +func (mg *LBListener) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LBListener. +func (mg *LBListener) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LBListener. +func (mg *LBListener) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LBListener. +func (mg *LBListener) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LBListener. +func (mg *LBListener) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LBListener. +func (mg *LBListener) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LBListenerRule. +func (mg *LBListenerRule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LBListenerRule. +func (mg *LBListenerRule) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LBListenerRule. +func (mg *LBListenerRule) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LBListenerRule. +func (mg *LBListenerRule) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LBListenerRule. +func (mg *LBListenerRule) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LBListenerRule. +func (mg *LBListenerRule) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LBListenerRule. +func (mg *LBListenerRule) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LBListenerRule. +func (mg *LBListenerRule) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LBListenerRule. +func (mg *LBListenerRule) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LBListenerRule. +func (mg *LBListenerRule) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LBListenerRule. +func (mg *LBListenerRule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LBListenerRule. +func (mg *LBListenerRule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LBTargetGroup. +func (mg *LBTargetGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LBTargetGroup. +func (mg *LBTargetGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LBTargetGroup. +func (mg *LBTargetGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LBTargetGroup. +func (mg *LBTargetGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LBTargetGroup. +func (mg *LBTargetGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LBTargetGroup. +func (mg *LBTargetGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LBTargetGroup. +func (mg *LBTargetGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LBTargetGroup. +func (mg *LBTargetGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LBTargetGroup. +func (mg *LBTargetGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LBTargetGroup. +func (mg *LBTargetGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LBTargetGroup. +func (mg *LBTargetGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LBTargetGroup. +func (mg *LBTargetGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/elbv2/v1beta2/zz_generated.managedlist.go b/apis/elbv2/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..725b4da41f --- /dev/null +++ b/apis/elbv2/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this LBList. +func (l *LBList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LBListenerList. +func (l *LBListenerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LBListenerRuleList. +func (l *LBListenerRuleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LBTargetGroupList. +func (l *LBTargetGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/elbv2/v1beta2/zz_generated.resolvers.go b/apis/elbv2/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..7374888e91 --- /dev/null +++ b/apis/elbv2/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,664 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this LB. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *LB) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + if mg.Spec.ForProvider.AccessLogs != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AccessLogs.Bucket), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AccessLogs.BucketRef, + Selector: mg.Spec.ForProvider.AccessLogs.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AccessLogs.Bucket") + } + mg.Spec.ForProvider.AccessLogs.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AccessLogs.BucketRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupRefs, + Selector: mg.Spec.ForProvider.SecurityGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroups") + } + mg.Spec.ForProvider.SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupRefs = mrsp.ResolvedReferences + + for i3 := 0; i3 < len(mg.Spec.ForProvider.SubnetMapping); i3++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SubnetMapping[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.SubnetMapping[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.SubnetMapping[i3].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetMapping[i3].SubnetID") + } + mg.Spec.ForProvider.SubnetMapping[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SubnetMapping[i3].SubnetIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Subnets), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SubnetRefs, + Selector: mg.Spec.ForProvider.SubnetSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Subnets") + } + mg.Spec.ForProvider.Subnets = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SubnetRefs = mrsp.ResolvedReferences + + if mg.Spec.InitProvider.AccessLogs != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AccessLogs.Bucket), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.AccessLogs.BucketRef, + Selector: mg.Spec.InitProvider.AccessLogs.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AccessLogs.Bucket") + } + mg.Spec.InitProvider.AccessLogs.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AccessLogs.BucketRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupRefs, + Selector: mg.Spec.InitProvider.SecurityGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroups") + } + mg.Spec.InitProvider.SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupRefs = mrsp.ResolvedReferences + + for i3 := 0; i3 < len(mg.Spec.InitProvider.SubnetMapping); i3++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SubnetMapping[i3].SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.SubnetMapping[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.SubnetMapping[i3].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetMapping[i3].SubnetID") + } + mg.Spec.InitProvider.SubnetMapping[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SubnetMapping[i3].SubnetIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Subnets), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SubnetRefs, + Selector: mg.Spec.InitProvider.SubnetSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Subnets") + } + mg.Spec.InitProvider.Subnets = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SubnetRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this LBListener. +func (mg *LBListener) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.DefaultAction); i3++ { + if mg.Spec.ForProvider.DefaultAction[i3].Forward != nil { + for i5 := 0; i5 < len(mg.Spec.ForProvider.DefaultAction[i3].Forward.TargetGroup); i5++ { + { + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBTargetGroup", "LBTargetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DefaultAction[i3].Forward.TargetGroup[i5].Arn), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DefaultAction[i3].Forward.TargetGroup[i5].ArnRef, + Selector: mg.Spec.ForProvider.DefaultAction[i3].Forward.TargetGroup[i5].ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DefaultAction[i3].Forward.TargetGroup[i5].Arn") + } + mg.Spec.ForProvider.DefaultAction[i3].Forward.TargetGroup[i5].Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DefaultAction[i3].Forward.TargetGroup[i5].ArnRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.DefaultAction); i3++ { + { + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBTargetGroup", "LBTargetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DefaultAction[i3].TargetGroupArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DefaultAction[i3].TargetGroupArnRef, + Selector: mg.Spec.ForProvider.DefaultAction[i3].TargetGroupArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DefaultAction[i3].TargetGroupArn") + } + mg.Spec.ForProvider.DefaultAction[i3].TargetGroupArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DefaultAction[i3].TargetGroupArnRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LB", "LBList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LoadBalancerArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LoadBalancerArnRef, + Selector: mg.Spec.ForProvider.LoadBalancerArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LoadBalancerArn") + } + mg.Spec.ForProvider.LoadBalancerArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LoadBalancerArnRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.DefaultAction); i3++ { + if mg.Spec.InitProvider.DefaultAction[i3].Forward != nil { + for i5 := 0; i5 < len(mg.Spec.InitProvider.DefaultAction[i3].Forward.TargetGroup); i5++ { + { + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBTargetGroup", "LBTargetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DefaultAction[i3].Forward.TargetGroup[i5].Arn), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DefaultAction[i3].Forward.TargetGroup[i5].ArnRef, + Selector: mg.Spec.InitProvider.DefaultAction[i3].Forward.TargetGroup[i5].ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DefaultAction[i3].Forward.TargetGroup[i5].Arn") + } + mg.Spec.InitProvider.DefaultAction[i3].Forward.TargetGroup[i5].Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DefaultAction[i3].Forward.TargetGroup[i5].ArnRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.DefaultAction); i3++ { + { + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBTargetGroup", "LBTargetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DefaultAction[i3].TargetGroupArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DefaultAction[i3].TargetGroupArnRef, + Selector: mg.Spec.InitProvider.DefaultAction[i3].TargetGroupArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DefaultAction[i3].TargetGroupArn") + } + mg.Spec.InitProvider.DefaultAction[i3].TargetGroupArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DefaultAction[i3].TargetGroupArnRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LB", "LBList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LoadBalancerArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LoadBalancerArnRef, + Selector: mg.Spec.InitProvider.LoadBalancerArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LoadBalancerArn") + } + mg.Spec.InitProvider.LoadBalancerArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LoadBalancerArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LBListenerRule. +func (mg *LBListenerRule) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Action); i3++ { + if mg.Spec.ForProvider.Action[i3].AuthenticateCognito != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolArnRef, + Selector: mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolArn") + } + mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolArnRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Action); i3++ { + if mg.Spec.ForProvider.Action[i3].AuthenticateCognito != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPoolClient", "UserPoolClientList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolClientID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolClientIDRef, + Selector: mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolClientIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolClientID") + } + mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolClientID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolClientIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Action); i3++ { + if mg.Spec.ForProvider.Action[i3].AuthenticateCognito != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPoolDomain", "UserPoolDomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolDomain), + Extract: resource.ExtractParamPath("domain", false), + Reference: mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolDomainRef, + Selector: mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolDomainSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolDomain") + } + mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolDomain = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Action[i3].AuthenticateCognito.UserPoolDomainRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Action); i3++ { + if mg.Spec.ForProvider.Action[i3].Forward != nil { + for i5 := 0; i5 < len(mg.Spec.ForProvider.Action[i3].Forward.TargetGroup); i5++ { + { + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBTargetGroup", "LBTargetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Action[i3].Forward.TargetGroup[i5].Arn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Action[i3].Forward.TargetGroup[i5].ArnRef, + Selector: mg.Spec.ForProvider.Action[i3].Forward.TargetGroup[i5].ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Action[i3].Forward.TargetGroup[i5].Arn") + } + mg.Spec.ForProvider.Action[i3].Forward.TargetGroup[i5].Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Action[i3].Forward.TargetGroup[i5].ArnRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Action); i3++ { + { + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBTargetGroup", "LBTargetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Action[i3].TargetGroupArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Action[i3].TargetGroupArnRef, + Selector: mg.Spec.ForProvider.Action[i3].TargetGroupArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Action[i3].TargetGroupArn") + } + mg.Spec.ForProvider.Action[i3].TargetGroupArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Action[i3].TargetGroupArnRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBListener", "LBListenerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ListenerArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.ListenerArnRef, + Selector: mg.Spec.ForProvider.ListenerArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ListenerArn") + } + mg.Spec.ForProvider.ListenerArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ListenerArnRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Action); i3++ { + if mg.Spec.InitProvider.Action[i3].AuthenticateCognito != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta2", "UserPool", "UserPoolList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolArnRef, + Selector: mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolArn") + } + mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolArnRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Action); i3++ { + if mg.Spec.InitProvider.Action[i3].AuthenticateCognito != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPoolClient", "UserPoolClientList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolClientID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolClientIDRef, + Selector: mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolClientIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolClientID") + } + mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolClientID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolClientIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Action); i3++ { + if mg.Spec.InitProvider.Action[i3].AuthenticateCognito != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPoolDomain", "UserPoolDomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolDomain), + Extract: resource.ExtractParamPath("domain", false), + Reference: mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolDomainRef, + Selector: mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolDomainSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolDomain") + } + mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolDomain = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Action[i3].AuthenticateCognito.UserPoolDomainRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Action); i3++ { + if mg.Spec.InitProvider.Action[i3].Forward != nil { + for i5 := 0; i5 < len(mg.Spec.InitProvider.Action[i3].Forward.TargetGroup); i5++ { + { + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBTargetGroup", "LBTargetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Action[i3].Forward.TargetGroup[i5].Arn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Action[i3].Forward.TargetGroup[i5].ArnRef, + Selector: mg.Spec.InitProvider.Action[i3].Forward.TargetGroup[i5].ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Action[i3].Forward.TargetGroup[i5].Arn") + } + mg.Spec.InitProvider.Action[i3].Forward.TargetGroup[i5].Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Action[i3].Forward.TargetGroup[i5].ArnRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Action); i3++ { + { + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBTargetGroup", "LBTargetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Action[i3].TargetGroupArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Action[i3].TargetGroupArnRef, + Selector: mg.Spec.InitProvider.Action[i3].TargetGroupArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Action[i3].TargetGroupArn") + } + mg.Spec.InitProvider.Action[i3].TargetGroupArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Action[i3].TargetGroupArnRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("elbv2.aws.upbound.io", "v1beta2", "LBListener", "LBListenerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ListenerArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.ListenerArnRef, + Selector: mg.Spec.InitProvider.ListenerArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ListenerArn") + } + mg.Spec.InitProvider.ListenerArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ListenerArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LBTargetGroup. +func (mg *LBTargetGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.VPCIDRef, + Selector: mg.Spec.ForProvider.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCID") + } + mg.Spec.ForProvider.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPCIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.VPCIDRef, + Selector: mg.Spec.InitProvider.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCID") + } + mg.Spec.InitProvider.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPCIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/elbv2/v1beta2/zz_groupversion_info.go b/apis/elbv2/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..ba785428e5 --- /dev/null +++ b/apis/elbv2/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=elbv2.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "elbv2.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/elbv2/v1beta2/zz_lb_terraformed.go b/apis/elbv2/v1beta2/zz_lb_terraformed.go new file mode 100755 index 0000000000..f492a05c06 --- /dev/null +++ b/apis/elbv2/v1beta2/zz_lb_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LB +func (mg *LB) GetTerraformResourceType() string { + return "aws_lb" +} + +// GetConnectionDetailsMapping for this LB +func (tr *LB) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LB +func (tr *LB) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LB +func (tr *LB) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LB +func (tr *LB) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LB +func (tr *LB) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LB +func (tr *LB) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LB +func (tr *LB) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LB +func (tr *LB) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LB using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LB) LateInitialize(attrs []byte) (bool, error) { + params := &LBParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("AccessLogs")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LB) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/elbv2/v1beta2/zz_lb_types.go b/apis/elbv2/v1beta2/zz_lb_types.go new file mode 100755 index 0000000000..274bf08743 --- /dev/null +++ b/apis/elbv2/v1beta2/zz_lb_types.go @@ -0,0 +1,579 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessLogsInitParameters struct { + + // S3 bucket name to store the logs in. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Boolean to enable / disable access_logs. Defaults to false, even when bucket is specified. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // S3 bucket prefix. Logs are stored in the root if not configured. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type AccessLogsObservation struct { + + // S3 bucket name to store the logs in. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Boolean to enable / disable access_logs. Defaults to false, even when bucket is specified. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // S3 bucket prefix. Logs are stored in the root if not configured. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type AccessLogsParameters struct { + + // S3 bucket name to store the logs in. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Boolean to enable / disable access_logs. Defaults to false, even when bucket is specified. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // S3 bucket prefix. Logs are stored in the root if not configured. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type ConnectionLogsInitParameters struct { + + // S3 bucket name to store the logs in. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Boolean to enable / disable connection_logs. Defaults to false, even when bucket is specified. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // S3 bucket prefix. Logs are stored in the root if not configured. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type ConnectionLogsObservation struct { + + // S3 bucket name to store the logs in. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Boolean to enable / disable connection_logs. Defaults to false, even when bucket is specified. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // S3 bucket prefix. Logs are stored in the root if not configured. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type ConnectionLogsParameters struct { + + // S3 bucket name to store the logs in. + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket" tf:"bucket,omitempty"` + + // Boolean to enable / disable connection_logs. Defaults to false, even when bucket is specified. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // S3 bucket prefix. Logs are stored in the root if not configured. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type LBInitParameters struct { + + // Access Logs block. See below. + AccessLogs *AccessLogsInitParameters `json:"accessLogs,omitempty" tf:"access_logs,omitempty"` + + // Client keep alive value in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds. + ClientKeepAlive *float64 `json:"clientKeepAlive,omitempty" tf:"client_keep_alive,omitempty"` + + // Connection Logs block. See below. Only valid for Load Balancers of type application. + ConnectionLogs *ConnectionLogsInitParameters `json:"connectionLogs,omitempty" tf:"connection_logs,omitempty"` + + // ID of the customer owned ipv4 pool to use for this load balancer. + CustomerOwnedIPv4Pool *string `json:"customerOwnedIpv4Pool,omitempty" tf:"customer_owned_ipv4_pool,omitempty"` + + // How traffic is distributed among the load balancer Availability Zones. Possible values are any_availability_zone (default), availability_zone_affinity, or partial_availability_zone_affinity. See Availability Zone DNS affinity for additional details. Only valid for network type load balancers. + DNSRecordClientRoutingPolicy *string `json:"dnsRecordClientRoutingPolicy,omitempty" tf:"dns_record_client_routing_policy,omitempty"` + + // How the load balancer handles requests that might pose a security risk to an application due to HTTP desync. Valid values are monitor, defensive (default), strictest. + DesyncMitigationMode *string `json:"desyncMitigationMode,omitempty" tf:"desync_mitigation_mode,omitempty"` + + // Whether HTTP headers with header fields that are not valid are removed by the load balancer (true) or routed to targets (false). The default is false. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens. Only valid for Load Balancers of type application. + DropInvalidHeaderFields *bool `json:"dropInvalidHeaderFields,omitempty" tf:"drop_invalid_header_fields,omitempty"` + + // If true, cross-zone load balancing of the load balancer will be enabled. For network and gateway type load balancers, this feature is disabled by default (false). For application load balancer this feature is always enabled (true) and cannot be disabled. Defaults to false. + EnableCrossZoneLoadBalancing *bool `json:"enableCrossZoneLoadBalancing,omitempty" tf:"enable_cross_zone_load_balancing,omitempty"` + + // If true, deletion of the load balancer will be disabled via the AWS API. Defaults to false. + EnableDeletionProtection *bool `json:"enableDeletionProtection,omitempty" tf:"enable_deletion_protection,omitempty"` + + // Whether HTTP/2 is enabled in application load balancers. Defaults to true. + EnableHttp2 *bool `json:"enableHttp2,omitempty" tf:"enable_http2,omitempty"` + + // Whether the two headers (x-amzn-tls-version and x-amzn-tls-cipher-suite), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. Only valid for Load Balancers of type application. Defaults to false + EnableTLSVersionAndCipherSuiteHeaders *bool `json:"enableTlsVersionAndCipherSuiteHeaders,omitempty" tf:"enable_tls_version_and_cipher_suite_headers,omitempty"` + + // Whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. Defaults to false. + EnableWafFailOpen *bool `json:"enableWafFailOpen,omitempty" tf:"enable_waf_fail_open,omitempty"` + + // Whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer in application load balancers. Defaults to false. + EnableXffClientPort *bool `json:"enableXffClientPort,omitempty" tf:"enable_xff_client_port,omitempty"` + + // Whether inbound security group rules are enforced for traffic originating from a PrivateLink. Only valid for Load Balancers of type network. The possible values are on and off. + EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic *string `json:"enforceSecurityGroupInboundRulesOnPrivateLinkTraffic,omitempty" tf:"enforce_security_group_inbound_rules_on_private_link_traffic,omitempty"` + + // Type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 and dualstack. + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // Time in seconds that the connection is allowed to be idle. Only valid for Load Balancers of type application. Default: 60. + IdleTimeout *float64 `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` + + // If true, the LB will be internal. Defaults to false. + Internal *bool `json:"internal,omitempty" tf:"internal,omitempty"` + + // Type of load balancer to create. Possible values are application, gateway, or network. The default value is application. + LoadBalancerType *string `json:"loadBalancerType,omitempty" tf:"load_balancer_type,omitempty"` + + // Name of the LB. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether the Application Load Balancer should preserve the Host header in the HTTP request and send it to the target without any change. Defaults to false. + PreserveHostHeader *bool `json:"preserveHostHeader,omitempty" tf:"preserve_host_header,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupRefs []v1.Reference `json:"securityGroupRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupSelector *v1.Selector `json:"securityGroupSelector,omitempty" tf:"-"` + + // List of security group IDs to assign to the LB. Only valid for Load Balancers of type application or network. For load balancers of type network security groups cannot be added if none are currently present, and cannot all be removed once added. If either of these conditions are met, this will force a recreation of the resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupSelector + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // Subnet mapping block. See below. For Load Balancers of type network subnet mappings can only be added. + SubnetMapping []SubnetMappingInitParameters `json:"subnetMapping,omitempty" tf:"subnet_mapping,omitempty"` + + // References to Subnet in ec2 to populate subnets. + // +kubebuilder:validation:Optional + SubnetRefs []v1.Reference `json:"subnetRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnets. + // +kubebuilder:validation:Optional + SubnetSelector *v1.Selector `json:"subnetSelector,omitempty" tf:"-"` + + // List of subnet IDs to attach to the LB. For Load Balancers of type network subnets can only be added (see Availability Zones), deleting a subnet for load balancers of type network will force a recreation of the resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetRefs + // +crossplane:generate:reference:selectorFieldName=SubnetSelector + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Determines how the load balancer modifies the X-Forwarded-For header in the HTTP request before sending the request to the target. The possible values are append, preserve, and remove. Only valid for Load Balancers of type application. The default is append. + XffHeaderProcessingMode *string `json:"xffHeaderProcessingMode,omitempty" tf:"xff_header_processing_mode,omitempty"` +} + +type LBObservation struct { + + // Access Logs block. See below. + AccessLogs *AccessLogsObservation `json:"accessLogs,omitempty" tf:"access_logs,omitempty"` + + // ARN of the load balancer (matches id). + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // ARN suffix for use with CloudWatch Metrics. + ArnSuffix *string `json:"arnSuffix,omitempty" tf:"arn_suffix,omitempty"` + + // Client keep alive value in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds. + ClientKeepAlive *float64 `json:"clientKeepAlive,omitempty" tf:"client_keep_alive,omitempty"` + + // Connection Logs block. See below. Only valid for Load Balancers of type application. + ConnectionLogs *ConnectionLogsObservation `json:"connectionLogs,omitempty" tf:"connection_logs,omitempty"` + + // ID of the customer owned ipv4 pool to use for this load balancer. + CustomerOwnedIPv4Pool *string `json:"customerOwnedIpv4Pool,omitempty" tf:"customer_owned_ipv4_pool,omitempty"` + + // DNS name of the load balancer. + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // How traffic is distributed among the load balancer Availability Zones. Possible values are any_availability_zone (default), availability_zone_affinity, or partial_availability_zone_affinity. See Availability Zone DNS affinity for additional details. Only valid for network type load balancers. + DNSRecordClientRoutingPolicy *string `json:"dnsRecordClientRoutingPolicy,omitempty" tf:"dns_record_client_routing_policy,omitempty"` + + // How the load balancer handles requests that might pose a security risk to an application due to HTTP desync. Valid values are monitor, defensive (default), strictest. + DesyncMitigationMode *string `json:"desyncMitigationMode,omitempty" tf:"desync_mitigation_mode,omitempty"` + + // Whether HTTP headers with header fields that are not valid are removed by the load balancer (true) or routed to targets (false). The default is false. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens. Only valid for Load Balancers of type application. + DropInvalidHeaderFields *bool `json:"dropInvalidHeaderFields,omitempty" tf:"drop_invalid_header_fields,omitempty"` + + // If true, cross-zone load balancing of the load balancer will be enabled. For network and gateway type load balancers, this feature is disabled by default (false). For application load balancer this feature is always enabled (true) and cannot be disabled. Defaults to false. + EnableCrossZoneLoadBalancing *bool `json:"enableCrossZoneLoadBalancing,omitempty" tf:"enable_cross_zone_load_balancing,omitempty"` + + // If true, deletion of the load balancer will be disabled via the AWS API. Defaults to false. + EnableDeletionProtection *bool `json:"enableDeletionProtection,omitempty" tf:"enable_deletion_protection,omitempty"` + + // Whether HTTP/2 is enabled in application load balancers. Defaults to true. + EnableHttp2 *bool `json:"enableHttp2,omitempty" tf:"enable_http2,omitempty"` + + // Whether the two headers (x-amzn-tls-version and x-amzn-tls-cipher-suite), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. Only valid for Load Balancers of type application. Defaults to false + EnableTLSVersionAndCipherSuiteHeaders *bool `json:"enableTlsVersionAndCipherSuiteHeaders,omitempty" tf:"enable_tls_version_and_cipher_suite_headers,omitempty"` + + // Whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. Defaults to false. + EnableWafFailOpen *bool `json:"enableWafFailOpen,omitempty" tf:"enable_waf_fail_open,omitempty"` + + // Whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer in application load balancers. Defaults to false. + EnableXffClientPort *bool `json:"enableXffClientPort,omitempty" tf:"enable_xff_client_port,omitempty"` + + // Whether inbound security group rules are enforced for traffic originating from a PrivateLink. Only valid for Load Balancers of type network. The possible values are on and off. + EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic *string `json:"enforceSecurityGroupInboundRulesOnPrivateLinkTraffic,omitempty" tf:"enforce_security_group_inbound_rules_on_private_link_traffic,omitempty"` + + // ARN of the load balancer (matches arn). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 and dualstack. + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // Time in seconds that the connection is allowed to be idle. Only valid for Load Balancers of type application. Default: 60. + IdleTimeout *float64 `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` + + // If true, the LB will be internal. Defaults to false. + Internal *bool `json:"internal,omitempty" tf:"internal,omitempty"` + + // Type of load balancer to create. Possible values are application, gateway, or network. The default value is application. + LoadBalancerType *string `json:"loadBalancerType,omitempty" tf:"load_balancer_type,omitempty"` + + // Name of the LB. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether the Application Load Balancer should preserve the Host header in the HTTP request and send it to the target without any change. Defaults to false. + PreserveHostHeader *bool `json:"preserveHostHeader,omitempty" tf:"preserve_host_header,omitempty"` + + // List of security group IDs to assign to the LB. Only valid for Load Balancers of type application or network. For load balancers of type network security groups cannot be added if none are currently present, and cannot all be removed once added. If either of these conditions are met, this will force a recreation of the resource. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // Subnet mapping block. See below. For Load Balancers of type network subnet mappings can only be added. + SubnetMapping []SubnetMappingObservation `json:"subnetMapping,omitempty" tf:"subnet_mapping,omitempty"` + + // List of subnet IDs to attach to the LB. For Load Balancers of type network subnets can only be added (see Availability Zones), deleting a subnet for load balancers of type network will force a recreation of the resource. + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // ARN of the load balancer (matches arn). + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Determines how the load balancer modifies the X-Forwarded-For header in the HTTP request before sending the request to the target. The possible values are append, preserve, and remove. Only valid for Load Balancers of type application. The default is append. + XffHeaderProcessingMode *string `json:"xffHeaderProcessingMode,omitempty" tf:"xff_header_processing_mode,omitempty"` + + // Canonical hosted zone ID of the load balancer (to be used in a Route 53 Alias record). + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` +} + +type LBParameters struct { + + // Access Logs block. See below. + // +kubebuilder:validation:Optional + AccessLogs *AccessLogsParameters `json:"accessLogs,omitempty" tf:"access_logs,omitempty"` + + // Client keep alive value in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds. + // +kubebuilder:validation:Optional + ClientKeepAlive *float64 `json:"clientKeepAlive,omitempty" tf:"client_keep_alive,omitempty"` + + // Connection Logs block. See below. Only valid for Load Balancers of type application. + // +kubebuilder:validation:Optional + ConnectionLogs *ConnectionLogsParameters `json:"connectionLogs,omitempty" tf:"connection_logs,omitempty"` + + // ID of the customer owned ipv4 pool to use for this load balancer. + // +kubebuilder:validation:Optional + CustomerOwnedIPv4Pool *string `json:"customerOwnedIpv4Pool,omitempty" tf:"customer_owned_ipv4_pool,omitempty"` + + // How traffic is distributed among the load balancer Availability Zones. Possible values are any_availability_zone (default), availability_zone_affinity, or partial_availability_zone_affinity. See Availability Zone DNS affinity for additional details. Only valid for network type load balancers. + // +kubebuilder:validation:Optional + DNSRecordClientRoutingPolicy *string `json:"dnsRecordClientRoutingPolicy,omitempty" tf:"dns_record_client_routing_policy,omitempty"` + + // How the load balancer handles requests that might pose a security risk to an application due to HTTP desync. Valid values are monitor, defensive (default), strictest. + // +kubebuilder:validation:Optional + DesyncMitigationMode *string `json:"desyncMitigationMode,omitempty" tf:"desync_mitigation_mode,omitempty"` + + // Whether HTTP headers with header fields that are not valid are removed by the load balancer (true) or routed to targets (false). The default is false. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens. Only valid for Load Balancers of type application. + // +kubebuilder:validation:Optional + DropInvalidHeaderFields *bool `json:"dropInvalidHeaderFields,omitempty" tf:"drop_invalid_header_fields,omitempty"` + + // If true, cross-zone load balancing of the load balancer will be enabled. For network and gateway type load balancers, this feature is disabled by default (false). For application load balancer this feature is always enabled (true) and cannot be disabled. Defaults to false. + // +kubebuilder:validation:Optional + EnableCrossZoneLoadBalancing *bool `json:"enableCrossZoneLoadBalancing,omitempty" tf:"enable_cross_zone_load_balancing,omitempty"` + + // If true, deletion of the load balancer will be disabled via the AWS API. Defaults to false. + // +kubebuilder:validation:Optional + EnableDeletionProtection *bool `json:"enableDeletionProtection,omitempty" tf:"enable_deletion_protection,omitempty"` + + // Whether HTTP/2 is enabled in application load balancers. Defaults to true. + // +kubebuilder:validation:Optional + EnableHttp2 *bool `json:"enableHttp2,omitempty" tf:"enable_http2,omitempty"` + + // Whether the two headers (x-amzn-tls-version and x-amzn-tls-cipher-suite), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. Only valid for Load Balancers of type application. Defaults to false + // +kubebuilder:validation:Optional + EnableTLSVersionAndCipherSuiteHeaders *bool `json:"enableTlsVersionAndCipherSuiteHeaders,omitempty" tf:"enable_tls_version_and_cipher_suite_headers,omitempty"` + + // Whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. Defaults to false. + // +kubebuilder:validation:Optional + EnableWafFailOpen *bool `json:"enableWafFailOpen,omitempty" tf:"enable_waf_fail_open,omitempty"` + + // Whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer in application load balancers. Defaults to false. + // +kubebuilder:validation:Optional + EnableXffClientPort *bool `json:"enableXffClientPort,omitempty" tf:"enable_xff_client_port,omitempty"` + + // Whether inbound security group rules are enforced for traffic originating from a PrivateLink. Only valid for Load Balancers of type network. The possible values are on and off. + // +kubebuilder:validation:Optional + EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic *string `json:"enforceSecurityGroupInboundRulesOnPrivateLinkTraffic,omitempty" tf:"enforce_security_group_inbound_rules_on_private_link_traffic,omitempty"` + + // Type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 and dualstack. + // +kubebuilder:validation:Optional + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // Time in seconds that the connection is allowed to be idle. Only valid for Load Balancers of type application. Default: 60. + // +kubebuilder:validation:Optional + IdleTimeout *float64 `json:"idleTimeout,omitempty" tf:"idle_timeout,omitempty"` + + // If true, the LB will be internal. Defaults to false. + // +kubebuilder:validation:Optional + Internal *bool `json:"internal,omitempty" tf:"internal,omitempty"` + + // Type of load balancer to create. Possible values are application, gateway, or network. The default value is application. + // +kubebuilder:validation:Optional + LoadBalancerType *string `json:"loadBalancerType,omitempty" tf:"load_balancer_type,omitempty"` + + // Name of the LB. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether the Application Load Balancer should preserve the Host header in the HTTP request and send it to the target without any change. Defaults to false. + // +kubebuilder:validation:Optional + PreserveHostHeader *bool `json:"preserveHostHeader,omitempty" tf:"preserve_host_header,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // References to SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupRefs []v1.Reference `json:"securityGroupRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupSelector *v1.Selector `json:"securityGroupSelector,omitempty" tf:"-"` + + // List of security group IDs to assign to the LB. Only valid for Load Balancers of type application or network. For load balancers of type network security groups cannot be added if none are currently present, and cannot all be removed once added. If either of these conditions are met, this will force a recreation of the resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupSelector + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // Subnet mapping block. See below. For Load Balancers of type network subnet mappings can only be added. + // +kubebuilder:validation:Optional + SubnetMapping []SubnetMappingParameters `json:"subnetMapping,omitempty" tf:"subnet_mapping,omitempty"` + + // References to Subnet in ec2 to populate subnets. + // +kubebuilder:validation:Optional + SubnetRefs []v1.Reference `json:"subnetRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnets. + // +kubebuilder:validation:Optional + SubnetSelector *v1.Selector `json:"subnetSelector,omitempty" tf:"-"` + + // List of subnet IDs to attach to the LB. For Load Balancers of type network subnets can only be added (see Availability Zones), deleting a subnet for load balancers of type network will force a recreation of the resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetRefs + // +crossplane:generate:reference:selectorFieldName=SubnetSelector + // +kubebuilder:validation:Optional + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Determines how the load balancer modifies the X-Forwarded-For header in the HTTP request before sending the request to the target. The possible values are append, preserve, and remove. Only valid for Load Balancers of type application. The default is append. + // +kubebuilder:validation:Optional + XffHeaderProcessingMode *string `json:"xffHeaderProcessingMode,omitempty" tf:"xff_header_processing_mode,omitempty"` +} + +type SubnetMappingInitParameters struct { + + // Allocation ID of the Elastic IP address for an internet-facing load balancer. + AllocationID *string `json:"allocationId,omitempty" tf:"allocation_id,omitempty"` + + // IPv6 address. You associate IPv6 CIDR blocks with your VPC and choose the subnets where you launch both internet-facing and internal Application Load Balancers or Network Load Balancers. + IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` + + // Private IPv4 address for an internal load balancer. + PrivateIPv4Address *string `json:"privateIpv4Address,omitempty" tf:"private_ipv4_address,omitempty"` + + // ID of the subnet of which to attach to the load balancer. You can specify only one subnet per Availability Zone. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type SubnetMappingObservation struct { + + // Allocation ID of the Elastic IP address for an internet-facing load balancer. + AllocationID *string `json:"allocationId,omitempty" tf:"allocation_id,omitempty"` + + // IPv6 address. You associate IPv6 CIDR blocks with your VPC and choose the subnets where you launch both internet-facing and internal Application Load Balancers or Network Load Balancers. + IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` + + // ID of the Outpost containing the load balancer. + OutpostID *string `json:"outpostId,omitempty" tf:"outpost_id,omitempty"` + + // Private IPv4 address for an internal load balancer. + PrivateIPv4Address *string `json:"privateIpv4Address,omitempty" tf:"private_ipv4_address,omitempty"` + + // ID of the subnet of which to attach to the load balancer. You can specify only one subnet per Availability Zone. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type SubnetMappingParameters struct { + + // Allocation ID of the Elastic IP address for an internet-facing load balancer. + // +kubebuilder:validation:Optional + AllocationID *string `json:"allocationId,omitempty" tf:"allocation_id,omitempty"` + + // IPv6 address. You associate IPv6 CIDR blocks with your VPC and choose the subnets where you launch both internet-facing and internal Application Load Balancers or Network Load Balancers. + // +kubebuilder:validation:Optional + IPv6Address *string `json:"ipv6Address,omitempty" tf:"ipv6_address,omitempty"` + + // Private IPv4 address for an internal load balancer. + // +kubebuilder:validation:Optional + PrivateIPv4Address *string `json:"privateIpv4Address,omitempty" tf:"private_ipv4_address,omitempty"` + + // ID of the subnet of which to attach to the load balancer. You can specify only one subnet per Availability Zone. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +// LBSpec defines the desired state of LB +type LBSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LBParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LBInitParameters `json:"initProvider,omitempty"` +} + +// LBStatus defines the observed state of LB. +type LBStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LBObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LB is the Schema for the LBs API. Provides a Load Balancer resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type LB struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec LBSpec `json:"spec"` + Status LBStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LBList contains a list of LBs +type LBList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LB `json:"items"` +} + +// Repository type metadata. +var ( + LB_Kind = "LB" + LB_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LB_Kind}.String() + LB_KindAPIVersion = LB_Kind + "." + CRDGroupVersion.String() + LB_GroupVersionKind = CRDGroupVersion.WithKind(LB_Kind) +) + +func init() { + SchemeBuilder.Register(&LB{}, &LBList{}) +} diff --git a/apis/elbv2/v1beta2/zz_lblistener_terraformed.go b/apis/elbv2/v1beta2/zz_lblistener_terraformed.go new file mode 100755 index 0000000000..757999e4cd --- /dev/null +++ b/apis/elbv2/v1beta2/zz_lblistener_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LBListener +func (mg *LBListener) GetTerraformResourceType() string { + return "aws_lb_listener" +} + +// GetConnectionDetailsMapping for this LBListener +func (tr *LBListener) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"default_action[*].authenticate_oidc[*].client_secret": "defaultAction[*].authenticateOidc[*].clientSecretSecretRef"} +} + +// GetObservation of this LBListener +func (tr *LBListener) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LBListener +func (tr *LBListener) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LBListener +func (tr *LBListener) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LBListener +func (tr *LBListener) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LBListener +func (tr *LBListener) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LBListener +func (tr *LBListener) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LBListener +func (tr *LBListener) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LBListener using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LBListener) LateInitialize(attrs []byte) (bool, error) { + params := &LBListenerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LBListener) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/elbv2/v1beta2/zz_lblistener_types.go b/apis/elbv2/v1beta2/zz_lblistener_types.go new file mode 100755 index 0000000000..19a33218ed --- /dev/null +++ b/apis/elbv2/v1beta2/zz_lblistener_types.go @@ -0,0 +1,807 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuthenticateCognitoInitParameters struct { + + // Query parameters to include in the redirect request to the authorization endpoint. Max: 10. Detailed below. + // +mapType=granular + AuthenticationRequestExtraParams map[string]*string `json:"authenticationRequestExtraParams,omitempty" tf:"authentication_request_extra_params,omitempty"` + + // Behavior if the user is not authenticated. Valid values are deny, allow and authenticate. + OnUnauthenticatedRequest *string `json:"onUnauthenticatedRequest,omitempty" tf:"on_unauthenticated_request,omitempty"` + + // Set of user claims to be requested from the IdP. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // Name of the cookie used to maintain session information. + SessionCookieName *string `json:"sessionCookieName,omitempty" tf:"session_cookie_name,omitempty"` + + // Maximum duration of the authentication session, in seconds. + SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` + + // ARN of the Cognito user pool. + UserPoolArn *string `json:"userPoolArn,omitempty" tf:"user_pool_arn,omitempty"` + + // ID of the Cognito user pool client. + UserPoolClientID *string `json:"userPoolClientId,omitempty" tf:"user_pool_client_id,omitempty"` + + // Domain prefix or fully-qualified domain name of the Cognito user pool. + UserPoolDomain *string `json:"userPoolDomain,omitempty" tf:"user_pool_domain,omitempty"` +} + +type AuthenticateCognitoObservation struct { + + // Query parameters to include in the redirect request to the authorization endpoint. Max: 10. Detailed below. + // +mapType=granular + AuthenticationRequestExtraParams map[string]*string `json:"authenticationRequestExtraParams,omitempty" tf:"authentication_request_extra_params,omitempty"` + + // Behavior if the user is not authenticated. Valid values are deny, allow and authenticate. + OnUnauthenticatedRequest *string `json:"onUnauthenticatedRequest,omitempty" tf:"on_unauthenticated_request,omitempty"` + + // Set of user claims to be requested from the IdP. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // Name of the cookie used to maintain session information. + SessionCookieName *string `json:"sessionCookieName,omitempty" tf:"session_cookie_name,omitempty"` + + // Maximum duration of the authentication session, in seconds. + SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` + + // ARN of the Cognito user pool. + UserPoolArn *string `json:"userPoolArn,omitempty" tf:"user_pool_arn,omitempty"` + + // ID of the Cognito user pool client. + UserPoolClientID *string `json:"userPoolClientId,omitempty" tf:"user_pool_client_id,omitempty"` + + // Domain prefix or fully-qualified domain name of the Cognito user pool. + UserPoolDomain *string `json:"userPoolDomain,omitempty" tf:"user_pool_domain,omitempty"` +} + +type AuthenticateCognitoParameters struct { + + // Query parameters to include in the redirect request to the authorization endpoint. Max: 10. Detailed below. + // +kubebuilder:validation:Optional + // +mapType=granular + AuthenticationRequestExtraParams map[string]*string `json:"authenticationRequestExtraParams,omitempty" tf:"authentication_request_extra_params,omitempty"` + + // Behavior if the user is not authenticated. Valid values are deny, allow and authenticate. + // +kubebuilder:validation:Optional + OnUnauthenticatedRequest *string `json:"onUnauthenticatedRequest,omitempty" tf:"on_unauthenticated_request,omitempty"` + + // Set of user claims to be requested from the IdP. + // +kubebuilder:validation:Optional + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // Name of the cookie used to maintain session information. + // +kubebuilder:validation:Optional + SessionCookieName *string `json:"sessionCookieName,omitempty" tf:"session_cookie_name,omitempty"` + + // Maximum duration of the authentication session, in seconds. + // +kubebuilder:validation:Optional + SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` + + // ARN of the Cognito user pool. + // +kubebuilder:validation:Optional + UserPoolArn *string `json:"userPoolArn" tf:"user_pool_arn,omitempty"` + + // ID of the Cognito user pool client. + // +kubebuilder:validation:Optional + UserPoolClientID *string `json:"userPoolClientId" tf:"user_pool_client_id,omitempty"` + + // Domain prefix or fully-qualified domain name of the Cognito user pool. + // +kubebuilder:validation:Optional + UserPoolDomain *string `json:"userPoolDomain" tf:"user_pool_domain,omitempty"` +} + +type AuthenticateOidcInitParameters struct { + + // Query parameters to include in the redirect request to the authorization endpoint. Max: 10. + // +mapType=granular + AuthenticationRequestExtraParams map[string]*string `json:"authenticationRequestExtraParams,omitempty" tf:"authentication_request_extra_params,omitempty"` + + // Authorization endpoint of the IdP. + AuthorizationEndpoint *string `json:"authorizationEndpoint,omitempty" tf:"authorization_endpoint,omitempty"` + + // OAuth 2.0 client identifier. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // OAuth 2.0 client secret. + ClientSecretSecretRef v1.SecretKeySelector `json:"clientSecretSecretRef" tf:"-"` + + // OIDC issuer identifier of the IdP. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // Behavior if the user is not authenticated. Valid values: deny, allow and authenticate + OnUnauthenticatedRequest *string `json:"onUnauthenticatedRequest,omitempty" tf:"on_unauthenticated_request,omitempty"` + + // Set of user claims to be requested from the IdP. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // Name of the cookie used to maintain session information. + SessionCookieName *string `json:"sessionCookieName,omitempty" tf:"session_cookie_name,omitempty"` + + // Maximum duration of the authentication session, in seconds. + SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` + + // Token endpoint of the IdP. + TokenEndpoint *string `json:"tokenEndpoint,omitempty" tf:"token_endpoint,omitempty"` + + // User info endpoint of the IdP. + UserInfoEndpoint *string `json:"userInfoEndpoint,omitempty" tf:"user_info_endpoint,omitempty"` +} + +type AuthenticateOidcObservation struct { + + // Query parameters to include in the redirect request to the authorization endpoint. Max: 10. + // +mapType=granular + AuthenticationRequestExtraParams map[string]*string `json:"authenticationRequestExtraParams,omitempty" tf:"authentication_request_extra_params,omitempty"` + + // Authorization endpoint of the IdP. + AuthorizationEndpoint *string `json:"authorizationEndpoint,omitempty" tf:"authorization_endpoint,omitempty"` + + // OAuth 2.0 client identifier. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // OIDC issuer identifier of the IdP. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // Behavior if the user is not authenticated. Valid values: deny, allow and authenticate + OnUnauthenticatedRequest *string `json:"onUnauthenticatedRequest,omitempty" tf:"on_unauthenticated_request,omitempty"` + + // Set of user claims to be requested from the IdP. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // Name of the cookie used to maintain session information. + SessionCookieName *string `json:"sessionCookieName,omitempty" tf:"session_cookie_name,omitempty"` + + // Maximum duration of the authentication session, in seconds. + SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` + + // Token endpoint of the IdP. + TokenEndpoint *string `json:"tokenEndpoint,omitempty" tf:"token_endpoint,omitempty"` + + // User info endpoint of the IdP. + UserInfoEndpoint *string `json:"userInfoEndpoint,omitempty" tf:"user_info_endpoint,omitempty"` +} + +type AuthenticateOidcParameters struct { + + // Query parameters to include in the redirect request to the authorization endpoint. Max: 10. + // +kubebuilder:validation:Optional + // +mapType=granular + AuthenticationRequestExtraParams map[string]*string `json:"authenticationRequestExtraParams,omitempty" tf:"authentication_request_extra_params,omitempty"` + + // Authorization endpoint of the IdP. + // +kubebuilder:validation:Optional + AuthorizationEndpoint *string `json:"authorizationEndpoint" tf:"authorization_endpoint,omitempty"` + + // OAuth 2.0 client identifier. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // OAuth 2.0 client secret. + // +kubebuilder:validation:Optional + ClientSecretSecretRef v1.SecretKeySelector `json:"clientSecretSecretRef" tf:"-"` + + // OIDC issuer identifier of the IdP. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer" tf:"issuer,omitempty"` + + // Behavior if the user is not authenticated. Valid values: deny, allow and authenticate + // +kubebuilder:validation:Optional + OnUnauthenticatedRequest *string `json:"onUnauthenticatedRequest,omitempty" tf:"on_unauthenticated_request,omitempty"` + + // Set of user claims to be requested from the IdP. + // +kubebuilder:validation:Optional + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // Name of the cookie used to maintain session information. + // +kubebuilder:validation:Optional + SessionCookieName *string `json:"sessionCookieName,omitempty" tf:"session_cookie_name,omitempty"` + + // Maximum duration of the authentication session, in seconds. + // +kubebuilder:validation:Optional + SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` + + // Token endpoint of the IdP. + // +kubebuilder:validation:Optional + TokenEndpoint *string `json:"tokenEndpoint" tf:"token_endpoint,omitempty"` + + // User info endpoint of the IdP. + // +kubebuilder:validation:Optional + UserInfoEndpoint *string `json:"userInfoEndpoint" tf:"user_info_endpoint,omitempty"` +} + +type DefaultActionInitParameters struct { + + // Configuration block for using Amazon Cognito to authenticate users. Specify only when type is authenticate-cognito. Detailed below. + AuthenticateCognito *AuthenticateCognitoInitParameters `json:"authenticateCognito,omitempty" tf:"authenticate_cognito,omitempty"` + + // Configuration block for an identity provider that is compliant with OpenID Connect (OIDC). Specify only when type is authenticate-oidc. Detailed below. + AuthenticateOidc *AuthenticateOidcInitParameters `json:"authenticateOidc,omitempty" tf:"authenticate_oidc,omitempty"` + + // Information for creating an action that returns a custom HTTP response. Required if type is fixed-response. + FixedResponse *FixedResponseInitParameters `json:"fixedResponse,omitempty" tf:"fixed_response,omitempty"` + + // Configuration block for creating an action that distributes requests among one or more target groups. + // Specify only if type is forward. + // Cannot be specified with target_group_arn. + // Detailed below. + Forward *ForwardInitParameters `json:"forward,omitempty" tf:"forward,omitempty"` + + // Order for the action. + // The action with the lowest value for order is performed first. + // Valid values are between 1 and 50000. + // Defaults to the position in the list of actions. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Configuration block for creating a redirect action. Required if type is redirect. Detailed below. + Redirect *RedirectInitParameters `json:"redirect,omitempty" tf:"redirect,omitempty"` + + // ARN of the Target Group to which to route traffic. + // Specify only if type is forward and you want to route to a single target group. + // To route to one or more target groups, use a forward block instead. + // Cannot be specified with forward. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBTargetGroup + TargetGroupArn *string `json:"targetGroupArn,omitempty" tf:"target_group_arn,omitempty"` + + // Reference to a LBTargetGroup in elbv2 to populate targetGroupArn. + // +kubebuilder:validation:Optional + TargetGroupArnRef *v1.Reference `json:"targetGroupArnRef,omitempty" tf:"-"` + + // Selector for a LBTargetGroup in elbv2 to populate targetGroupArn. + // +kubebuilder:validation:Optional + TargetGroupArnSelector *v1.Selector `json:"targetGroupArnSelector,omitempty" tf:"-"` + + // Type of routing action. Valid values are forward, redirect, fixed-response, authenticate-cognito and authenticate-oidc. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DefaultActionObservation struct { + + // Configuration block for using Amazon Cognito to authenticate users. Specify only when type is authenticate-cognito. Detailed below. + AuthenticateCognito *AuthenticateCognitoObservation `json:"authenticateCognito,omitempty" tf:"authenticate_cognito,omitempty"` + + // Configuration block for an identity provider that is compliant with OpenID Connect (OIDC). Specify only when type is authenticate-oidc. Detailed below. + AuthenticateOidc *AuthenticateOidcObservation `json:"authenticateOidc,omitempty" tf:"authenticate_oidc,omitempty"` + + // Information for creating an action that returns a custom HTTP response. Required if type is fixed-response. + FixedResponse *FixedResponseObservation `json:"fixedResponse,omitempty" tf:"fixed_response,omitempty"` + + // Configuration block for creating an action that distributes requests among one or more target groups. + // Specify only if type is forward. + // Cannot be specified with target_group_arn. + // Detailed below. + Forward *ForwardObservation `json:"forward,omitempty" tf:"forward,omitempty"` + + // Order for the action. + // The action with the lowest value for order is performed first. + // Valid values are between 1 and 50000. + // Defaults to the position in the list of actions. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Configuration block for creating a redirect action. Required if type is redirect. Detailed below. + Redirect *RedirectObservation `json:"redirect,omitempty" tf:"redirect,omitempty"` + + // ARN of the Target Group to which to route traffic. + // Specify only if type is forward and you want to route to a single target group. + // To route to one or more target groups, use a forward block instead. + // Cannot be specified with forward. + TargetGroupArn *string `json:"targetGroupArn,omitempty" tf:"target_group_arn,omitempty"` + + // Type of routing action. Valid values are forward, redirect, fixed-response, authenticate-cognito and authenticate-oidc. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DefaultActionParameters struct { + + // Configuration block for using Amazon Cognito to authenticate users. Specify only when type is authenticate-cognito. Detailed below. + // +kubebuilder:validation:Optional + AuthenticateCognito *AuthenticateCognitoParameters `json:"authenticateCognito,omitempty" tf:"authenticate_cognito,omitempty"` + + // Configuration block for an identity provider that is compliant with OpenID Connect (OIDC). Specify only when type is authenticate-oidc. Detailed below. + // +kubebuilder:validation:Optional + AuthenticateOidc *AuthenticateOidcParameters `json:"authenticateOidc,omitempty" tf:"authenticate_oidc,omitempty"` + + // Information for creating an action that returns a custom HTTP response. Required if type is fixed-response. + // +kubebuilder:validation:Optional + FixedResponse *FixedResponseParameters `json:"fixedResponse,omitempty" tf:"fixed_response,omitempty"` + + // Configuration block for creating an action that distributes requests among one or more target groups. + // Specify only if type is forward. + // Cannot be specified with target_group_arn. + // Detailed below. + // +kubebuilder:validation:Optional + Forward *ForwardParameters `json:"forward,omitempty" tf:"forward,omitempty"` + + // Order for the action. + // The action with the lowest value for order is performed first. + // Valid values are between 1 and 50000. + // Defaults to the position in the list of actions. + // +kubebuilder:validation:Optional + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Configuration block for creating a redirect action. Required if type is redirect. Detailed below. + // +kubebuilder:validation:Optional + Redirect *RedirectParameters `json:"redirect,omitempty" tf:"redirect,omitempty"` + + // ARN of the Target Group to which to route traffic. + // Specify only if type is forward and you want to route to a single target group. + // To route to one or more target groups, use a forward block instead. + // Cannot be specified with forward. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBTargetGroup + // +kubebuilder:validation:Optional + TargetGroupArn *string `json:"targetGroupArn,omitempty" tf:"target_group_arn,omitempty"` + + // Reference to a LBTargetGroup in elbv2 to populate targetGroupArn. + // +kubebuilder:validation:Optional + TargetGroupArnRef *v1.Reference `json:"targetGroupArnRef,omitempty" tf:"-"` + + // Selector for a LBTargetGroup in elbv2 to populate targetGroupArn. + // +kubebuilder:validation:Optional + TargetGroupArnSelector *v1.Selector `json:"targetGroupArnSelector,omitempty" tf:"-"` + + // Type of routing action. Valid values are forward, redirect, fixed-response, authenticate-cognito and authenticate-oidc. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type FixedResponseInitParameters struct { + + // Content type. Valid values are text/plain, text/css, text/html, application/javascript and application/json. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Message body. + MessageBody *string `json:"messageBody,omitempty" tf:"message_body,omitempty"` + + // HTTP response code. Valid values are 2XX, 4XX, or 5XX. + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type FixedResponseObservation struct { + + // Content type. Valid values are text/plain, text/css, text/html, application/javascript and application/json. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Message body. + MessageBody *string `json:"messageBody,omitempty" tf:"message_body,omitempty"` + + // HTTP response code. Valid values are 2XX, 4XX, or 5XX. + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type FixedResponseParameters struct { + + // Content type. Valid values are text/plain, text/css, text/html, application/javascript and application/json. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType" tf:"content_type,omitempty"` + + // Message body. + // +kubebuilder:validation:Optional + MessageBody *string `json:"messageBody,omitempty" tf:"message_body,omitempty"` + + // HTTP response code. Valid values are 2XX, 4XX, or 5XX. + // +kubebuilder:validation:Optional + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type ForwardInitParameters struct { + + // Configuration block for target group stickiness for the rule. Detailed below. + Stickiness *StickinessInitParameters `json:"stickiness,omitempty" tf:"stickiness,omitempty"` + + // Set of 1-5 target group blocks. Detailed below. + TargetGroup []TargetGroupInitParameters `json:"targetGroup,omitempty" tf:"target_group,omitempty"` +} + +type ForwardObservation struct { + + // Configuration block for target group stickiness for the rule. Detailed below. + Stickiness *StickinessObservation `json:"stickiness,omitempty" tf:"stickiness,omitempty"` + + // Set of 1-5 target group blocks. Detailed below. + TargetGroup []TargetGroupObservation `json:"targetGroup,omitempty" tf:"target_group,omitempty"` +} + +type ForwardParameters struct { + + // Configuration block for target group stickiness for the rule. Detailed below. + // +kubebuilder:validation:Optional + Stickiness *StickinessParameters `json:"stickiness,omitempty" tf:"stickiness,omitempty"` + + // Set of 1-5 target group blocks. Detailed below. + // +kubebuilder:validation:Optional + TargetGroup []TargetGroupParameters `json:"targetGroup" tf:"target_group,omitempty"` +} + +type LBListenerInitParameters struct { + + // Name of the Application-Layer Protocol Negotiation (ALPN) policy. Can be set if protocol is TLS. Valid values are HTTP1Only, HTTP2Only, HTTP2Optional, HTTP2Preferred, and None. + AlpnPolicy *string `json:"alpnPolicy,omitempty" tf:"alpn_policy,omitempty"` + + // ARN of the default SSL server certificate. Exactly one certificate is required if the protocol is HTTPS. For adding additional SSL certificates, see the aws_lb_listener_certificate resource. + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` + + // Configuration block for default actions. Detailed below. + DefaultAction []DefaultActionInitParameters `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // ARN of the load balancer. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LB + LoadBalancerArn *string `json:"loadBalancerArn,omitempty" tf:"load_balancer_arn,omitempty"` + + // Reference to a LB in elbv2 to populate loadBalancerArn. + // +kubebuilder:validation:Optional + LoadBalancerArnRef *v1.Reference `json:"loadBalancerArnRef,omitempty" tf:"-"` + + // Selector for a LB in elbv2 to populate loadBalancerArn. + // +kubebuilder:validation:Optional + LoadBalancerArnSelector *v1.Selector `json:"loadBalancerArnSelector,omitempty" tf:"-"` + + // The mutual authentication configuration information. Detailed below. + MutualAuthentication *MutualAuthenticationInitParameters `json:"mutualAuthentication,omitempty" tf:"mutual_authentication,omitempty"` + + // Port on which the load balancer is listening. Not valid for Gateway Load Balancers. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol for connections from clients to the load balancer. For Application Load Balancers, valid values are HTTP and HTTPS, with a default of HTTP. For Network Load Balancers, valid values are TCP, TLS, UDP, and TCP_UDP. Not valid to use UDP or TCP_UDP if dual-stack mode is enabled. Not valid for Gateway Load Balancers. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Name of the SSL Policy for the listener. Required if protocol is HTTPS or TLS. + SSLPolicy *string `json:"sslPolicy,omitempty" tf:"ssl_policy,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type LBListenerObservation struct { + + // Name of the Application-Layer Protocol Negotiation (ALPN) policy. Can be set if protocol is TLS. Valid values are HTTP1Only, HTTP2Only, HTTP2Optional, HTTP2Preferred, and None. + AlpnPolicy *string `json:"alpnPolicy,omitempty" tf:"alpn_policy,omitempty"` + + // ARN of the listener (matches id). + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // ARN of the default SSL server certificate. Exactly one certificate is required if the protocol is HTTPS. For adding additional SSL certificates, see the aws_lb_listener_certificate resource. + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` + + // Configuration block for default actions. Detailed below. + DefaultAction []DefaultActionObservation `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // ARN of the listener (matches arn). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ARN of the load balancer. + LoadBalancerArn *string `json:"loadBalancerArn,omitempty" tf:"load_balancer_arn,omitempty"` + + // The mutual authentication configuration information. Detailed below. + MutualAuthentication *MutualAuthenticationObservation `json:"mutualAuthentication,omitempty" tf:"mutual_authentication,omitempty"` + + // Port on which the load balancer is listening. Not valid for Gateway Load Balancers. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol for connections from clients to the load balancer. For Application Load Balancers, valid values are HTTP and HTTPS, with a default of HTTP. For Network Load Balancers, valid values are TCP, TLS, UDP, and TCP_UDP. Not valid to use UDP or TCP_UDP if dual-stack mode is enabled. Not valid for Gateway Load Balancers. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Name of the SSL Policy for the listener. Required if protocol is HTTPS or TLS. + SSLPolicy *string `json:"sslPolicy,omitempty" tf:"ssl_policy,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type LBListenerParameters struct { + + // Name of the Application-Layer Protocol Negotiation (ALPN) policy. Can be set if protocol is TLS. Valid values are HTTP1Only, HTTP2Only, HTTP2Optional, HTTP2Preferred, and None. + // +kubebuilder:validation:Optional + AlpnPolicy *string `json:"alpnPolicy,omitempty" tf:"alpn_policy,omitempty"` + + // ARN of the default SSL server certificate. Exactly one certificate is required if the protocol is HTTPS. For adding additional SSL certificates, see the aws_lb_listener_certificate resource. + // +kubebuilder:validation:Optional + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` + + // Configuration block for default actions. Detailed below. + // +kubebuilder:validation:Optional + DefaultAction []DefaultActionParameters `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // ARN of the load balancer. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LB + // +kubebuilder:validation:Optional + LoadBalancerArn *string `json:"loadBalancerArn,omitempty" tf:"load_balancer_arn,omitempty"` + + // Reference to a LB in elbv2 to populate loadBalancerArn. + // +kubebuilder:validation:Optional + LoadBalancerArnRef *v1.Reference `json:"loadBalancerArnRef,omitempty" tf:"-"` + + // Selector for a LB in elbv2 to populate loadBalancerArn. + // +kubebuilder:validation:Optional + LoadBalancerArnSelector *v1.Selector `json:"loadBalancerArnSelector,omitempty" tf:"-"` + + // The mutual authentication configuration information. Detailed below. + // +kubebuilder:validation:Optional + MutualAuthentication *MutualAuthenticationParameters `json:"mutualAuthentication,omitempty" tf:"mutual_authentication,omitempty"` + + // Port on which the load balancer is listening. Not valid for Gateway Load Balancers. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol for connections from clients to the load balancer. For Application Load Balancers, valid values are HTTP and HTTPS, with a default of HTTP. For Network Load Balancers, valid values are TCP, TLS, UDP, and TCP_UDP. Not valid to use UDP or TCP_UDP if dual-stack mode is enabled. Not valid for Gateway Load Balancers. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Name of the SSL Policy for the listener. Required if protocol is HTTPS or TLS. + // +kubebuilder:validation:Optional + SSLPolicy *string `json:"sslPolicy,omitempty" tf:"ssl_policy,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MutualAuthenticationInitParameters struct { + + // Whether client certificate expiry is ignored. Default is false. + IgnoreClientCertificateExpiry *bool `json:"ignoreClientCertificateExpiry,omitempty" tf:"ignore_client_certificate_expiry,omitempty"` + + // Valid values are off, verify and passthrough. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // ARN of the elbv2 Trust Store. + TrustStoreArn *string `json:"trustStoreArn,omitempty" tf:"trust_store_arn,omitempty"` +} + +type MutualAuthenticationObservation struct { + + // Whether client certificate expiry is ignored. Default is false. + IgnoreClientCertificateExpiry *bool `json:"ignoreClientCertificateExpiry,omitempty" tf:"ignore_client_certificate_expiry,omitempty"` + + // Valid values are off, verify and passthrough. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // ARN of the elbv2 Trust Store. + TrustStoreArn *string `json:"trustStoreArn,omitempty" tf:"trust_store_arn,omitempty"` +} + +type MutualAuthenticationParameters struct { + + // Whether client certificate expiry is ignored. Default is false. + // +kubebuilder:validation:Optional + IgnoreClientCertificateExpiry *bool `json:"ignoreClientCertificateExpiry,omitempty" tf:"ignore_client_certificate_expiry,omitempty"` + + // Valid values are off, verify and passthrough. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // ARN of the elbv2 Trust Store. + // +kubebuilder:validation:Optional + TrustStoreArn *string `json:"trustStoreArn,omitempty" tf:"trust_store_arn,omitempty"` +} + +type RedirectInitParameters struct { + + // Hostname. This component is not percent-encoded. The hostname can contain #{host}. Defaults to #{host}. + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // Absolute path, starting with the leading "/". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}. Defaults to /#{path}. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Port. Specify a value from 1 to 65535 or #{port}. Defaults to #{port}. + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol. Valid values are HTTP, HTTPS, or #{protocol}. Defaults to #{protocol}. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading "?". Defaults to #{query}. + Query *string `json:"query,omitempty" tf:"query,omitempty"` + + // HTTP redirect code. The redirect is either permanent (HTTP_301) or temporary (HTTP_302). + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type RedirectObservation struct { + + // Hostname. This component is not percent-encoded. The hostname can contain #{host}. Defaults to #{host}. + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // Absolute path, starting with the leading "/". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}. Defaults to /#{path}. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Port. Specify a value from 1 to 65535 or #{port}. Defaults to #{port}. + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol. Valid values are HTTP, HTTPS, or #{protocol}. Defaults to #{protocol}. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading "?". Defaults to #{query}. + Query *string `json:"query,omitempty" tf:"query,omitempty"` + + // HTTP redirect code. The redirect is either permanent (HTTP_301) or temporary (HTTP_302). + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type RedirectParameters struct { + + // Hostname. This component is not percent-encoded. The hostname can contain #{host}. Defaults to #{host}. + // +kubebuilder:validation:Optional + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // Absolute path, starting with the leading "/". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}. Defaults to /#{path}. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Port. Specify a value from 1 to 65535 or #{port}. Defaults to #{port}. + // +kubebuilder:validation:Optional + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol. Valid values are HTTP, HTTPS, or #{protocol}. Defaults to #{protocol}. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading "?". Defaults to #{query}. + // +kubebuilder:validation:Optional + Query *string `json:"query,omitempty" tf:"query,omitempty"` + + // HTTP redirect code. The redirect is either permanent (HTTP_301) or temporary (HTTP_302). + // +kubebuilder:validation:Optional + StatusCode *string `json:"statusCode" tf:"status_code,omitempty"` +} + +type StickinessInitParameters struct { + + // Time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days). + Duration *float64 `json:"duration,omitempty" tf:"duration,omitempty"` + + // Whether target group stickiness is enabled. Default is false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type StickinessObservation struct { + + // Time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days). + Duration *float64 `json:"duration,omitempty" tf:"duration,omitempty"` + + // Whether target group stickiness is enabled. Default is false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type StickinessParameters struct { + + // Time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days). + // +kubebuilder:validation:Optional + Duration *float64 `json:"duration" tf:"duration,omitempty"` + + // Whether target group stickiness is enabled. Default is false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type TargetGroupInitParameters struct { + + // ARN of the target group. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBTargetGroup + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a LBTargetGroup in elbv2 to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a LBTargetGroup in elbv2 to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // Weight. The range is 0 to 999. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type TargetGroupObservation struct { + + // ARN of the target group. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Weight. The range is 0 to 999. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type TargetGroupParameters struct { + + // ARN of the target group. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBTargetGroup + // +kubebuilder:validation:Optional + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a LBTargetGroup in elbv2 to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a LBTargetGroup in elbv2 to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // Weight. The range is 0 to 999. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +// LBListenerSpec defines the desired state of LBListener +type LBListenerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LBListenerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LBListenerInitParameters `json:"initProvider,omitempty"` +} + +// LBListenerStatus defines the observed state of LBListener. +type LBListenerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LBListenerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LBListener is the Schema for the LBListeners API. Provides a Load Balancer Listener resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type LBListener struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.defaultAction) || (has(self.initProvider) && has(self.initProvider.defaultAction))",message="spec.forProvider.defaultAction is a required parameter" + Spec LBListenerSpec `json:"spec"` + Status LBListenerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LBListenerList contains a list of LBListeners +type LBListenerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LBListener `json:"items"` +} + +// Repository type metadata. +var ( + LBListener_Kind = "LBListener" + LBListener_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LBListener_Kind}.String() + LBListener_KindAPIVersion = LBListener_Kind + "." + CRDGroupVersion.String() + LBListener_GroupVersionKind = CRDGroupVersion.WithKind(LBListener_Kind) +) + +func init() { + SchemeBuilder.Register(&LBListener{}, &LBListenerList{}) +} diff --git a/apis/elbv2/v1beta2/zz_lblistenerrule_terraformed.go b/apis/elbv2/v1beta2/zz_lblistenerrule_terraformed.go new file mode 100755 index 0000000000..d9ba20515e --- /dev/null +++ b/apis/elbv2/v1beta2/zz_lblistenerrule_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LBListenerRule +func (mg *LBListenerRule) GetTerraformResourceType() string { + return "aws_lb_listener_rule" +} + +// GetConnectionDetailsMapping for this LBListenerRule +func (tr *LBListenerRule) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"action[*].authenticate_oidc[*].client_secret": "action[*].authenticateOidc[*].clientSecretSecretRef"} +} + +// GetObservation of this LBListenerRule +func (tr *LBListenerRule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LBListenerRule +func (tr *LBListenerRule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LBListenerRule +func (tr *LBListenerRule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LBListenerRule +func (tr *LBListenerRule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LBListenerRule +func (tr *LBListenerRule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LBListenerRule +func (tr *LBListenerRule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LBListenerRule +func (tr *LBListenerRule) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LBListenerRule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LBListenerRule) LateInitialize(attrs []byte) (bool, error) { + params := &LBListenerRuleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LBListenerRule) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/elbv2/v1beta2/zz_lblistenerrule_types.go b/apis/elbv2/v1beta2/zz_lblistenerrule_types.go new file mode 100755 index 0000000000..60689fa5b6 --- /dev/null +++ b/apis/elbv2/v1beta2/zz_lblistenerrule_types.go @@ -0,0 +1,1010 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionAuthenticateCognitoInitParameters struct { + + // The query parameters to include in the redirect request to the authorization endpoint. Max: 10. + // +mapType=granular + AuthenticationRequestExtraParams map[string]*string `json:"authenticationRequestExtraParams,omitempty" tf:"authentication_request_extra_params,omitempty"` + + // The behavior if the user is not authenticated. Valid values: deny, allow and authenticate + OnUnauthenticatedRequest *string `json:"onUnauthenticatedRequest,omitempty" tf:"on_unauthenticated_request,omitempty"` + + // The set of user claims to be requested from the IdP. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // The name of the cookie used to maintain session information. + SessionCookieName *string `json:"sessionCookieName,omitempty" tf:"session_cookie_name,omitempty"` + + // The maximum duration of the authentication session, in seconds. + SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` + + // The ARN of the Cognito user pool. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + UserPoolArn *string `json:"userPoolArn,omitempty" tf:"user_pool_arn,omitempty"` + + // Reference to a UserPool in cognitoidp to populate userPoolArn. + // +kubebuilder:validation:Optional + UserPoolArnRef *v1.Reference `json:"userPoolArnRef,omitempty" tf:"-"` + + // Selector for a UserPool in cognitoidp to populate userPoolArn. + // +kubebuilder:validation:Optional + UserPoolArnSelector *v1.Selector `json:"userPoolArnSelector,omitempty" tf:"-"` + + // The ID of the Cognito user pool client. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPoolClient + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + UserPoolClientID *string `json:"userPoolClientId,omitempty" tf:"user_pool_client_id,omitempty"` + + // Reference to a UserPoolClient in cognitoidp to populate userPoolClientId. + // +kubebuilder:validation:Optional + UserPoolClientIDRef *v1.Reference `json:"userPoolClientIdRef,omitempty" tf:"-"` + + // Selector for a UserPoolClient in cognitoidp to populate userPoolClientId. + // +kubebuilder:validation:Optional + UserPoolClientIDSelector *v1.Selector `json:"userPoolClientIdSelector,omitempty" tf:"-"` + + // The domain prefix or fully-qualified domain name of the Cognito user pool. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPoolDomain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("domain",false) + UserPoolDomain *string `json:"userPoolDomain,omitempty" tf:"user_pool_domain,omitempty"` + + // Reference to a UserPoolDomain in cognitoidp to populate userPoolDomain. + // +kubebuilder:validation:Optional + UserPoolDomainRef *v1.Reference `json:"userPoolDomainRef,omitempty" tf:"-"` + + // Selector for a UserPoolDomain in cognitoidp to populate userPoolDomain. + // +kubebuilder:validation:Optional + UserPoolDomainSelector *v1.Selector `json:"userPoolDomainSelector,omitempty" tf:"-"` +} + +type ActionAuthenticateCognitoObservation struct { + + // The query parameters to include in the redirect request to the authorization endpoint. Max: 10. + // +mapType=granular + AuthenticationRequestExtraParams map[string]*string `json:"authenticationRequestExtraParams,omitempty" tf:"authentication_request_extra_params,omitempty"` + + // The behavior if the user is not authenticated. Valid values: deny, allow and authenticate + OnUnauthenticatedRequest *string `json:"onUnauthenticatedRequest,omitempty" tf:"on_unauthenticated_request,omitempty"` + + // The set of user claims to be requested from the IdP. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // The name of the cookie used to maintain session information. + SessionCookieName *string `json:"sessionCookieName,omitempty" tf:"session_cookie_name,omitempty"` + + // The maximum duration of the authentication session, in seconds. + SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` + + // The ARN of the Cognito user pool. + UserPoolArn *string `json:"userPoolArn,omitempty" tf:"user_pool_arn,omitempty"` + + // The ID of the Cognito user pool client. + UserPoolClientID *string `json:"userPoolClientId,omitempty" tf:"user_pool_client_id,omitempty"` + + // The domain prefix or fully-qualified domain name of the Cognito user pool. + UserPoolDomain *string `json:"userPoolDomain,omitempty" tf:"user_pool_domain,omitempty"` +} + +type ActionAuthenticateCognitoParameters struct { + + // The query parameters to include in the redirect request to the authorization endpoint. Max: 10. + // +kubebuilder:validation:Optional + // +mapType=granular + AuthenticationRequestExtraParams map[string]*string `json:"authenticationRequestExtraParams,omitempty" tf:"authentication_request_extra_params,omitempty"` + + // The behavior if the user is not authenticated. Valid values: deny, allow and authenticate + // +kubebuilder:validation:Optional + OnUnauthenticatedRequest *string `json:"onUnauthenticatedRequest,omitempty" tf:"on_unauthenticated_request,omitempty"` + + // The set of user claims to be requested from the IdP. + // +kubebuilder:validation:Optional + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // The name of the cookie used to maintain session information. + // +kubebuilder:validation:Optional + SessionCookieName *string `json:"sessionCookieName,omitempty" tf:"session_cookie_name,omitempty"` + + // The maximum duration of the authentication session, in seconds. + // +kubebuilder:validation:Optional + SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` + + // The ARN of the Cognito user pool. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta2.UserPool + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + UserPoolArn *string `json:"userPoolArn,omitempty" tf:"user_pool_arn,omitempty"` + + // Reference to a UserPool in cognitoidp to populate userPoolArn. + // +kubebuilder:validation:Optional + UserPoolArnRef *v1.Reference `json:"userPoolArnRef,omitempty" tf:"-"` + + // Selector for a UserPool in cognitoidp to populate userPoolArn. + // +kubebuilder:validation:Optional + UserPoolArnSelector *v1.Selector `json:"userPoolArnSelector,omitempty" tf:"-"` + + // The ID of the Cognito user pool client. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPoolClient + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + UserPoolClientID *string `json:"userPoolClientId,omitempty" tf:"user_pool_client_id,omitempty"` + + // Reference to a UserPoolClient in cognitoidp to populate userPoolClientId. + // +kubebuilder:validation:Optional + UserPoolClientIDRef *v1.Reference `json:"userPoolClientIdRef,omitempty" tf:"-"` + + // Selector for a UserPoolClient in cognitoidp to populate userPoolClientId. + // +kubebuilder:validation:Optional + UserPoolClientIDSelector *v1.Selector `json:"userPoolClientIdSelector,omitempty" tf:"-"` + + // The domain prefix or fully-qualified domain name of the Cognito user pool. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPoolDomain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("domain",false) + // +kubebuilder:validation:Optional + UserPoolDomain *string `json:"userPoolDomain,omitempty" tf:"user_pool_domain,omitempty"` + + // Reference to a UserPoolDomain in cognitoidp to populate userPoolDomain. + // +kubebuilder:validation:Optional + UserPoolDomainRef *v1.Reference `json:"userPoolDomainRef,omitempty" tf:"-"` + + // Selector for a UserPoolDomain in cognitoidp to populate userPoolDomain. + // +kubebuilder:validation:Optional + UserPoolDomainSelector *v1.Selector `json:"userPoolDomainSelector,omitempty" tf:"-"` +} + +type ActionAuthenticateOidcInitParameters struct { + + // The query parameters to include in the redirect request to the authorization endpoint. Max: 10. + // +mapType=granular + AuthenticationRequestExtraParams map[string]*string `json:"authenticationRequestExtraParams,omitempty" tf:"authentication_request_extra_params,omitempty"` + + // The authorization endpoint of the IdP. + AuthorizationEndpoint *string `json:"authorizationEndpoint,omitempty" tf:"authorization_endpoint,omitempty"` + + // The OAuth 2.0 client identifier. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret. + ClientSecretSecretRef v1.SecretKeySelector `json:"clientSecretSecretRef" tf:"-"` + + // The OIDC issuer identifier of the IdP. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // The behavior if the user is not authenticated. Valid values: deny, allow and authenticate + OnUnauthenticatedRequest *string `json:"onUnauthenticatedRequest,omitempty" tf:"on_unauthenticated_request,omitempty"` + + // The set of user claims to be requested from the IdP. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // The name of the cookie used to maintain session information. + SessionCookieName *string `json:"sessionCookieName,omitempty" tf:"session_cookie_name,omitempty"` + + // The maximum duration of the authentication session, in seconds. + SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` + + // The token endpoint of the IdP. + TokenEndpoint *string `json:"tokenEndpoint,omitempty" tf:"token_endpoint,omitempty"` + + // The user info endpoint of the IdP. + UserInfoEndpoint *string `json:"userInfoEndpoint,omitempty" tf:"user_info_endpoint,omitempty"` +} + +type ActionAuthenticateOidcObservation struct { + + // The query parameters to include in the redirect request to the authorization endpoint. Max: 10. + // +mapType=granular + AuthenticationRequestExtraParams map[string]*string `json:"authenticationRequestExtraParams,omitempty" tf:"authentication_request_extra_params,omitempty"` + + // The authorization endpoint of the IdP. + AuthorizationEndpoint *string `json:"authorizationEndpoint,omitempty" tf:"authorization_endpoint,omitempty"` + + // The OAuth 2.0 client identifier. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The OIDC issuer identifier of the IdP. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // The behavior if the user is not authenticated. Valid values: deny, allow and authenticate + OnUnauthenticatedRequest *string `json:"onUnauthenticatedRequest,omitempty" tf:"on_unauthenticated_request,omitempty"` + + // The set of user claims to be requested from the IdP. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // The name of the cookie used to maintain session information. + SessionCookieName *string `json:"sessionCookieName,omitempty" tf:"session_cookie_name,omitempty"` + + // The maximum duration of the authentication session, in seconds. + SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` + + // The token endpoint of the IdP. + TokenEndpoint *string `json:"tokenEndpoint,omitempty" tf:"token_endpoint,omitempty"` + + // The user info endpoint of the IdP. + UserInfoEndpoint *string `json:"userInfoEndpoint,omitempty" tf:"user_info_endpoint,omitempty"` +} + +type ActionAuthenticateOidcParameters struct { + + // The query parameters to include in the redirect request to the authorization endpoint. Max: 10. + // +kubebuilder:validation:Optional + // +mapType=granular + AuthenticationRequestExtraParams map[string]*string `json:"authenticationRequestExtraParams,omitempty" tf:"authentication_request_extra_params,omitempty"` + + // The authorization endpoint of the IdP. + // +kubebuilder:validation:Optional + AuthorizationEndpoint *string `json:"authorizationEndpoint" tf:"authorization_endpoint,omitempty"` + + // The OAuth 2.0 client identifier. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret. + // +kubebuilder:validation:Optional + ClientSecretSecretRef v1.SecretKeySelector `json:"clientSecretSecretRef" tf:"-"` + + // The OIDC issuer identifier of the IdP. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer" tf:"issuer,omitempty"` + + // The behavior if the user is not authenticated. Valid values: deny, allow and authenticate + // +kubebuilder:validation:Optional + OnUnauthenticatedRequest *string `json:"onUnauthenticatedRequest,omitempty" tf:"on_unauthenticated_request,omitempty"` + + // The set of user claims to be requested from the IdP. + // +kubebuilder:validation:Optional + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // The name of the cookie used to maintain session information. + // +kubebuilder:validation:Optional + SessionCookieName *string `json:"sessionCookieName,omitempty" tf:"session_cookie_name,omitempty"` + + // The maximum duration of the authentication session, in seconds. + // +kubebuilder:validation:Optional + SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` + + // The token endpoint of the IdP. + // +kubebuilder:validation:Optional + TokenEndpoint *string `json:"tokenEndpoint" tf:"token_endpoint,omitempty"` + + // The user info endpoint of the IdP. + // +kubebuilder:validation:Optional + UserInfoEndpoint *string `json:"userInfoEndpoint" tf:"user_info_endpoint,omitempty"` +} + +type ActionFixedResponseInitParameters struct { + + // The content type. Valid values are text/plain, text/css, text/html, application/javascript and application/json. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // The message body. + MessageBody *string `json:"messageBody,omitempty" tf:"message_body,omitempty"` + + // The HTTP redirect code. The redirect is either permanent (HTTP_301) or temporary (HTTP_302). + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type ActionFixedResponseObservation struct { + + // The content type. Valid values are text/plain, text/css, text/html, application/javascript and application/json. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // The message body. + MessageBody *string `json:"messageBody,omitempty" tf:"message_body,omitempty"` + + // The HTTP redirect code. The redirect is either permanent (HTTP_301) or temporary (HTTP_302). + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type ActionFixedResponseParameters struct { + + // The content type. Valid values are text/plain, text/css, text/html, application/javascript and application/json. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType" tf:"content_type,omitempty"` + + // The message body. + // +kubebuilder:validation:Optional + MessageBody *string `json:"messageBody,omitempty" tf:"message_body,omitempty"` + + // The HTTP redirect code. The redirect is either permanent (HTTP_301) or temporary (HTTP_302). + // +kubebuilder:validation:Optional + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type ActionForwardInitParameters struct { + + // The target group stickiness for the rule. + Stickiness *ForwardStickinessInitParameters `json:"stickiness,omitempty" tf:"stickiness,omitempty"` + + // One or more target groups block. + TargetGroup []ForwardTargetGroupInitParameters `json:"targetGroup,omitempty" tf:"target_group,omitempty"` +} + +type ActionForwardObservation struct { + + // The target group stickiness for the rule. + Stickiness *ForwardStickinessObservation `json:"stickiness,omitempty" tf:"stickiness,omitempty"` + + // One or more target groups block. + TargetGroup []ForwardTargetGroupObservation `json:"targetGroup,omitempty" tf:"target_group,omitempty"` +} + +type ActionForwardParameters struct { + + // The target group stickiness for the rule. + // +kubebuilder:validation:Optional + Stickiness *ForwardStickinessParameters `json:"stickiness,omitempty" tf:"stickiness,omitempty"` + + // One or more target groups block. + // +kubebuilder:validation:Optional + TargetGroup []ForwardTargetGroupParameters `json:"targetGroup" tf:"target_group,omitempty"` +} + +type ActionInitParameters struct { + + // Information for creating an authenticate action using Cognito. Required if type is authenticate-cognito. + AuthenticateCognito *ActionAuthenticateCognitoInitParameters `json:"authenticateCognito,omitempty" tf:"authenticate_cognito,omitempty"` + + // Information for creating an authenticate action using OIDC. Required if type is authenticate-oidc. + AuthenticateOidc *ActionAuthenticateOidcInitParameters `json:"authenticateOidc,omitempty" tf:"authenticate_oidc,omitempty"` + + // Information for creating an action that returns a custom HTTP response. Required if type is fixed-response. + FixedResponse *ActionFixedResponseInitParameters `json:"fixedResponse,omitempty" tf:"fixed_response,omitempty"` + + // Configuration block for creating an action that distributes requests among one or more target groups. + // Specify only if type is forward. + // Cannot be specified with target_group_arn. + Forward *ActionForwardInitParameters `json:"forward,omitempty" tf:"forward,omitempty"` + + // Order for the action. + // The action with the lowest value for order is performed first. + // Valid values are between 1 and 50000. + // Defaults to the position in the list of actions. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Information for creating a redirect action. Required if type is redirect. + Redirect *ActionRedirectInitParameters `json:"redirect,omitempty" tf:"redirect,omitempty"` + + // ARN of the Target Group to which to route traffic. + // Specify only if type is forward and you want to route to a single target group. + // To route to one or more target groups, use a forward block instead. + // Cannot be specified with forward. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBTargetGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + TargetGroupArn *string `json:"targetGroupArn,omitempty" tf:"target_group_arn,omitempty"` + + // Reference to a LBTargetGroup in elbv2 to populate targetGroupArn. + // +kubebuilder:validation:Optional + TargetGroupArnRef *v1.Reference `json:"targetGroupArnRef,omitempty" tf:"-"` + + // Selector for a LBTargetGroup in elbv2 to populate targetGroupArn. + // +kubebuilder:validation:Optional + TargetGroupArnSelector *v1.Selector `json:"targetGroupArnSelector,omitempty" tf:"-"` + + // The type of routing action. Valid values are forward, redirect, fixed-response, authenticate-cognito and authenticate-oidc. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ActionObservation struct { + + // Information for creating an authenticate action using Cognito. Required if type is authenticate-cognito. + AuthenticateCognito *ActionAuthenticateCognitoObservation `json:"authenticateCognito,omitempty" tf:"authenticate_cognito,omitempty"` + + // Information for creating an authenticate action using OIDC. Required if type is authenticate-oidc. + AuthenticateOidc *ActionAuthenticateOidcObservation `json:"authenticateOidc,omitempty" tf:"authenticate_oidc,omitempty"` + + // Information for creating an action that returns a custom HTTP response. Required if type is fixed-response. + FixedResponse *ActionFixedResponseObservation `json:"fixedResponse,omitempty" tf:"fixed_response,omitempty"` + + // Configuration block for creating an action that distributes requests among one or more target groups. + // Specify only if type is forward. + // Cannot be specified with target_group_arn. + Forward *ActionForwardObservation `json:"forward,omitempty" tf:"forward,omitempty"` + + // Order for the action. + // The action with the lowest value for order is performed first. + // Valid values are between 1 and 50000. + // Defaults to the position in the list of actions. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Information for creating a redirect action. Required if type is redirect. + Redirect *ActionRedirectObservation `json:"redirect,omitempty" tf:"redirect,omitempty"` + + // ARN of the Target Group to which to route traffic. + // Specify only if type is forward and you want to route to a single target group. + // To route to one or more target groups, use a forward block instead. + // Cannot be specified with forward. + TargetGroupArn *string `json:"targetGroupArn,omitempty" tf:"target_group_arn,omitempty"` + + // The type of routing action. Valid values are forward, redirect, fixed-response, authenticate-cognito and authenticate-oidc. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ActionParameters struct { + + // Information for creating an authenticate action using Cognito. Required if type is authenticate-cognito. + // +kubebuilder:validation:Optional + AuthenticateCognito *ActionAuthenticateCognitoParameters `json:"authenticateCognito,omitempty" tf:"authenticate_cognito,omitempty"` + + // Information for creating an authenticate action using OIDC. Required if type is authenticate-oidc. + // +kubebuilder:validation:Optional + AuthenticateOidc *ActionAuthenticateOidcParameters `json:"authenticateOidc,omitempty" tf:"authenticate_oidc,omitempty"` + + // Information for creating an action that returns a custom HTTP response. Required if type is fixed-response. + // +kubebuilder:validation:Optional + FixedResponse *ActionFixedResponseParameters `json:"fixedResponse,omitempty" tf:"fixed_response,omitempty"` + + // Configuration block for creating an action that distributes requests among one or more target groups. + // Specify only if type is forward. + // Cannot be specified with target_group_arn. + // +kubebuilder:validation:Optional + Forward *ActionForwardParameters `json:"forward,omitempty" tf:"forward,omitempty"` + + // Order for the action. + // The action with the lowest value for order is performed first. + // Valid values are between 1 and 50000. + // Defaults to the position in the list of actions. + // +kubebuilder:validation:Optional + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Information for creating a redirect action. Required if type is redirect. + // +kubebuilder:validation:Optional + Redirect *ActionRedirectParameters `json:"redirect,omitempty" tf:"redirect,omitempty"` + + // ARN of the Target Group to which to route traffic. + // Specify only if type is forward and you want to route to a single target group. + // To route to one or more target groups, use a forward block instead. + // Cannot be specified with forward. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBTargetGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + TargetGroupArn *string `json:"targetGroupArn,omitempty" tf:"target_group_arn,omitempty"` + + // Reference to a LBTargetGroup in elbv2 to populate targetGroupArn. + // +kubebuilder:validation:Optional + TargetGroupArnRef *v1.Reference `json:"targetGroupArnRef,omitempty" tf:"-"` + + // Selector for a LBTargetGroup in elbv2 to populate targetGroupArn. + // +kubebuilder:validation:Optional + TargetGroupArnSelector *v1.Selector `json:"targetGroupArnSelector,omitempty" tf:"-"` + + // The type of routing action. Valid values are forward, redirect, fixed-response, authenticate-cognito and authenticate-oidc. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ActionRedirectInitParameters struct { + + // The hostname. This component is not percent-encoded. The hostname can contain #{host}. Defaults to #{host}. + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // The absolute path, starting with the leading "/". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}. Defaults to /#{path}. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port. Specify a value from 1 to 65535 or #{port}. Defaults to #{port}. + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // The protocol. Valid values are HTTP, HTTPS, or #{protocol}. Defaults to #{protocol}. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading "?". Defaults to #{query}. + Query *string `json:"query,omitempty" tf:"query,omitempty"` + + // The HTTP redirect code. The redirect is either permanent (HTTP_301) or temporary (HTTP_302). + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type ActionRedirectObservation struct { + + // The hostname. This component is not percent-encoded. The hostname can contain #{host}. Defaults to #{host}. + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // The absolute path, starting with the leading "/". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}. Defaults to /#{path}. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port. Specify a value from 1 to 65535 or #{port}. Defaults to #{port}. + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // The protocol. Valid values are HTTP, HTTPS, or #{protocol}. Defaults to #{protocol}. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading "?". Defaults to #{query}. + Query *string `json:"query,omitempty" tf:"query,omitempty"` + + // The HTTP redirect code. The redirect is either permanent (HTTP_301) or temporary (HTTP_302). + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type ActionRedirectParameters struct { + + // The hostname. This component is not percent-encoded. The hostname can contain #{host}. Defaults to #{host}. + // +kubebuilder:validation:Optional + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // The absolute path, starting with the leading "/". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}. Defaults to /#{path}. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port. Specify a value from 1 to 65535 or #{port}. Defaults to #{port}. + // +kubebuilder:validation:Optional + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // The protocol. Valid values are HTTP, HTTPS, or #{protocol}. Defaults to #{protocol}. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading "?". Defaults to #{query}. + // +kubebuilder:validation:Optional + Query *string `json:"query,omitempty" tf:"query,omitempty"` + + // The HTTP redirect code. The redirect is either permanent (HTTP_301) or temporary (HTTP_302). + // +kubebuilder:validation:Optional + StatusCode *string `json:"statusCode" tf:"status_code,omitempty"` +} + +type ConditionInitParameters struct { + + // HTTP headers to match. HTTP Header block fields documented below. + HTTPHeader *HTTPHeaderInitParameters `json:"httpHeader,omitempty" tf:"http_header,omitempty"` + + // Contains a single values item which is a list of HTTP request methods or verbs to match. Maximum size is 40 characters. Only allowed characters are A-Z, hyphen (-) and underscore (_). Comparison is case sensitive. Wildcards are not supported. Only one needs to match for the condition to be satisfied. AWS recommends that GET and HEAD requests are routed in the same way because the response to a HEAD request may be cached. + HTTPRequestMethod *HTTPRequestMethodInitParameters `json:"httpRequestMethod,omitempty" tf:"http_request_method,omitempty"` + + // Contains a single values item which is a list of host header patterns to match. The maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). Only one pattern needs to match for the condition to be satisfied. + HostHeader *HostHeaderInitParameters `json:"hostHeader,omitempty" tf:"host_header,omitempty"` + + // Contains a single values item which is a list of path patterns to match against the request URL. Maximum size of each pattern is 128 characters. Comparison is case sensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). Only one pattern needs to match for the condition to be satisfied. Path pattern is compared only to the path of the URL, not to its query string. To compare against the query string, use a query_string condition. + PathPattern *PathPatternInitParameters `json:"pathPattern,omitempty" tf:"path_pattern,omitempty"` + + // Query strings to match. Query String block fields documented below. + QueryString []QueryStringInitParameters `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // Contains a single values item which is a list of source IP CIDR notations to match. You can use both IPv4 and IPv6 addresses. Wildcards are not supported. Condition is satisfied if the source IP address of the request matches one of the CIDR blocks. Condition is not satisfied by the addresses in the X-Forwarded-For header, use http_header condition instead. + SourceIP *SourceIPInitParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` +} + +type ConditionObservation struct { + + // HTTP headers to match. HTTP Header block fields documented below. + HTTPHeader *HTTPHeaderObservation `json:"httpHeader,omitempty" tf:"http_header,omitempty"` + + // Contains a single values item which is a list of HTTP request methods or verbs to match. Maximum size is 40 characters. Only allowed characters are A-Z, hyphen (-) and underscore (_). Comparison is case sensitive. Wildcards are not supported. Only one needs to match for the condition to be satisfied. AWS recommends that GET and HEAD requests are routed in the same way because the response to a HEAD request may be cached. + HTTPRequestMethod *HTTPRequestMethodObservation `json:"httpRequestMethod,omitempty" tf:"http_request_method,omitempty"` + + // Contains a single values item which is a list of host header patterns to match. The maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). Only one pattern needs to match for the condition to be satisfied. + HostHeader *HostHeaderObservation `json:"hostHeader,omitempty" tf:"host_header,omitempty"` + + // Contains a single values item which is a list of path patterns to match against the request URL. Maximum size of each pattern is 128 characters. Comparison is case sensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). Only one pattern needs to match for the condition to be satisfied. Path pattern is compared only to the path of the URL, not to its query string. To compare against the query string, use a query_string condition. + PathPattern *PathPatternObservation `json:"pathPattern,omitempty" tf:"path_pattern,omitempty"` + + // Query strings to match. Query String block fields documented below. + QueryString []QueryStringObservation `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // Contains a single values item which is a list of source IP CIDR notations to match. You can use both IPv4 and IPv6 addresses. Wildcards are not supported. Condition is satisfied if the source IP address of the request matches one of the CIDR blocks. Condition is not satisfied by the addresses in the X-Forwarded-For header, use http_header condition instead. + SourceIP *SourceIPObservation `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` +} + +type ConditionParameters struct { + + // HTTP headers to match. HTTP Header block fields documented below. + // +kubebuilder:validation:Optional + HTTPHeader *HTTPHeaderParameters `json:"httpHeader,omitempty" tf:"http_header,omitempty"` + + // Contains a single values item which is a list of HTTP request methods or verbs to match. Maximum size is 40 characters. Only allowed characters are A-Z, hyphen (-) and underscore (_). Comparison is case sensitive. Wildcards are not supported. Only one needs to match for the condition to be satisfied. AWS recommends that GET and HEAD requests are routed in the same way because the response to a HEAD request may be cached. + // +kubebuilder:validation:Optional + HTTPRequestMethod *HTTPRequestMethodParameters `json:"httpRequestMethod,omitempty" tf:"http_request_method,omitempty"` + + // Contains a single values item which is a list of host header patterns to match. The maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). Only one pattern needs to match for the condition to be satisfied. + // +kubebuilder:validation:Optional + HostHeader *HostHeaderParameters `json:"hostHeader,omitempty" tf:"host_header,omitempty"` + + // Contains a single values item which is a list of path patterns to match against the request URL. Maximum size of each pattern is 128 characters. Comparison is case sensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). Only one pattern needs to match for the condition to be satisfied. Path pattern is compared only to the path of the URL, not to its query string. To compare against the query string, use a query_string condition. + // +kubebuilder:validation:Optional + PathPattern *PathPatternParameters `json:"pathPattern,omitempty" tf:"path_pattern,omitempty"` + + // Query strings to match. Query String block fields documented below. + // +kubebuilder:validation:Optional + QueryString []QueryStringParameters `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // Contains a single values item which is a list of source IP CIDR notations to match. You can use both IPv4 and IPv6 addresses. Wildcards are not supported. Condition is satisfied if the source IP address of the request matches one of the CIDR blocks. Condition is not satisfied by the addresses in the X-Forwarded-For header, use http_header condition instead. + // +kubebuilder:validation:Optional + SourceIP *SourceIPParameters `json:"sourceIp,omitempty" tf:"source_ip,omitempty"` +} + +type ForwardStickinessInitParameters struct { + + // The time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days). + Duration *float64 `json:"duration,omitempty" tf:"duration,omitempty"` + + // Indicates whether target group stickiness is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ForwardStickinessObservation struct { + + // The time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days). + Duration *float64 `json:"duration,omitempty" tf:"duration,omitempty"` + + // Indicates whether target group stickiness is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ForwardStickinessParameters struct { + + // The time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days). + // +kubebuilder:validation:Optional + Duration *float64 `json:"duration" tf:"duration,omitempty"` + + // Indicates whether target group stickiness is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ForwardTargetGroupInitParameters struct { + + // The Amazon Resource Name (ARN) of the target group. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBTargetGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a LBTargetGroup in elbv2 to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a LBTargetGroup in elbv2 to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // The weight. The range is 0 to 999. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type ForwardTargetGroupObservation struct { + + // The Amazon Resource Name (ARN) of the target group. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The weight. The range is 0 to 999. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type ForwardTargetGroupParameters struct { + + // The Amazon Resource Name (ARN) of the target group. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBTargetGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a LBTargetGroup in elbv2 to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a LBTargetGroup in elbv2 to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // The weight. The range is 0 to 999. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type HTTPHeaderInitParameters struct { + + // Name of HTTP header to search. The maximum size is 40 characters. Comparison is case insensitive. Only RFC7240 characters are supported. Wildcards are not supported. You cannot use HTTP header condition to specify the host header, use a host-header condition instead. + HTTPHeaderName *string `json:"httpHeaderName,omitempty" tf:"http_header_name,omitempty"` + + // List of header value patterns to match. Maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). If the same header appears multiple times in the request they will be searched in order until a match is found. Only one pattern needs to match for the condition to be satisfied. To require that all of the strings are a match, create one condition block per string. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type HTTPHeaderObservation struct { + + // Name of HTTP header to search. The maximum size is 40 characters. Comparison is case insensitive. Only RFC7240 characters are supported. Wildcards are not supported. You cannot use HTTP header condition to specify the host header, use a host-header condition instead. + HTTPHeaderName *string `json:"httpHeaderName,omitempty" tf:"http_header_name,omitempty"` + + // List of header value patterns to match. Maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). If the same header appears multiple times in the request they will be searched in order until a match is found. Only one pattern needs to match for the condition to be satisfied. To require that all of the strings are a match, create one condition block per string. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type HTTPHeaderParameters struct { + + // Name of HTTP header to search. The maximum size is 40 characters. Comparison is case insensitive. Only RFC7240 characters are supported. Wildcards are not supported. You cannot use HTTP header condition to specify the host header, use a host-header condition instead. + // +kubebuilder:validation:Optional + HTTPHeaderName *string `json:"httpHeaderName" tf:"http_header_name,omitempty"` + + // List of header value patterns to match. Maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). If the same header appears multiple times in the request they will be searched in order until a match is found. Only one pattern needs to match for the condition to be satisfied. To require that all of the strings are a match, create one condition block per string. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type HTTPRequestMethodInitParameters struct { + + // Query string pairs or values to match. Query String Value blocks documented below. Multiple values blocks can be specified, see example above. Maximum size of each string is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). To search for a literal '*' or '?' character in a query string, escape the character with a backslash (\). Only one pair needs to match for the condition to be satisfied. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type HTTPRequestMethodObservation struct { + + // Query string pairs or values to match. Query String Value blocks documented below. Multiple values blocks can be specified, see example above. Maximum size of each string is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). To search for a literal '*' or '?' character in a query string, escape the character with a backslash (\). Only one pair needs to match for the condition to be satisfied. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type HTTPRequestMethodParameters struct { + + // Query string pairs or values to match. Query String Value blocks documented below. Multiple values blocks can be specified, see example above. Maximum size of each string is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). To search for a literal '*' or '?' character in a query string, escape the character with a backslash (\). Only one pair needs to match for the condition to be satisfied. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type HostHeaderInitParameters struct { + + // Query string pairs or values to match. Query String Value blocks documented below. Multiple values blocks can be specified, see example above. Maximum size of each string is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). To search for a literal '*' or '?' character in a query string, escape the character with a backslash (\). Only one pair needs to match for the condition to be satisfied. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type HostHeaderObservation struct { + + // Query string pairs or values to match. Query String Value blocks documented below. Multiple values blocks can be specified, see example above. Maximum size of each string is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). To search for a literal '*' or '?' character in a query string, escape the character with a backslash (\). Only one pair needs to match for the condition to be satisfied. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type HostHeaderParameters struct { + + // Query string pairs or values to match. Query String Value blocks documented below. Multiple values blocks can be specified, see example above. Maximum size of each string is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). To search for a literal '*' or '?' character in a query string, escape the character with a backslash (\). Only one pair needs to match for the condition to be satisfied. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type LBListenerRuleInitParameters struct { + + // An Action block. Action blocks are documented below. + Action []ActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // A Condition block. Multiple condition blocks of different types can be set and all must be satisfied for the rule to match. Condition blocks are documented below. + Condition []ConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // The ARN of the listener to which to attach the rule. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBListener + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + ListenerArn *string `json:"listenerArn,omitempty" tf:"listener_arn,omitempty"` + + // Reference to a LBListener in elbv2 to populate listenerArn. + // +kubebuilder:validation:Optional + ListenerArnRef *v1.Reference `json:"listenerArnRef,omitempty" tf:"-"` + + // Selector for a LBListener in elbv2 to populate listenerArn. + // +kubebuilder:validation:Optional + ListenerArnSelector *v1.Selector `json:"listenerArnSelector,omitempty" tf:"-"` + + // The priority for the rule between 1 and 50000. Leaving it unset will automatically set the rule with next available priority after currently existing highest rule. A listener can't have multiple rules with the same priority. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type LBListenerRuleObservation struct { + + // An Action block. Action blocks are documented below. + Action []ActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // The ARN of the rule (matches id) + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A Condition block. Multiple condition blocks of different types can be set and all must be satisfied for the rule to match. Condition blocks are documented below. + Condition []ConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` + + // The ARN of the rule (matches arn) + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The ARN of the listener to which to attach the rule. + ListenerArn *string `json:"listenerArn,omitempty" tf:"listener_arn,omitempty"` + + // The priority for the rule between 1 and 50000. Leaving it unset will automatically set the rule with next available priority after currently existing highest rule. A listener can't have multiple rules with the same priority. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type LBListenerRuleParameters struct { + + // An Action block. Action blocks are documented below. + // +kubebuilder:validation:Optional + Action []ActionParameters `json:"action,omitempty" tf:"action,omitempty"` + + // A Condition block. Multiple condition blocks of different types can be set and all must be satisfied for the rule to match. Condition blocks are documented below. + // +kubebuilder:validation:Optional + Condition []ConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // The ARN of the listener to which to attach the rule. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elbv2/v1beta2.LBListener + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + ListenerArn *string `json:"listenerArn,omitempty" tf:"listener_arn,omitempty"` + + // Reference to a LBListener in elbv2 to populate listenerArn. + // +kubebuilder:validation:Optional + ListenerArnRef *v1.Reference `json:"listenerArnRef,omitempty" tf:"-"` + + // Selector for a LBListener in elbv2 to populate listenerArn. + // +kubebuilder:validation:Optional + ListenerArnSelector *v1.Selector `json:"listenerArnSelector,omitempty" tf:"-"` + + // The priority for the rule between 1 and 50000. Leaving it unset will automatically set the rule with next available priority after currently existing highest rule. A listener can't have multiple rules with the same priority. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type PathPatternInitParameters struct { + + // Query string pairs or values to match. Query String Value blocks documented below. Multiple values blocks can be specified, see example above. Maximum size of each string is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). To search for a literal '*' or '?' character in a query string, escape the character with a backslash (\). Only one pair needs to match for the condition to be satisfied. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type PathPatternObservation struct { + + // Query string pairs or values to match. Query String Value blocks documented below. Multiple values blocks can be specified, see example above. Maximum size of each string is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). To search for a literal '*' or '?' character in a query string, escape the character with a backslash (\). Only one pair needs to match for the condition to be satisfied. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type PathPatternParameters struct { + + // Query string pairs or values to match. Query String Value blocks documented below. Multiple values blocks can be specified, see example above. Maximum size of each string is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). To search for a literal '*' or '?' character in a query string, escape the character with a backslash (\). Only one pair needs to match for the condition to be satisfied. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type QueryStringInitParameters struct { + + // Query string key pattern to match. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Query string value pattern to match. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type QueryStringObservation struct { + + // Query string key pattern to match. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Query string value pattern to match. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type QueryStringParameters struct { + + // Query string key pattern to match. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Query string value pattern to match. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type SourceIPInitParameters struct { + + // Query string pairs or values to match. Query String Value blocks documented below. Multiple values blocks can be specified, see example above. Maximum size of each string is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). To search for a literal '*' or '?' character in a query string, escape the character with a backslash (\). Only one pair needs to match for the condition to be satisfied. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type SourceIPObservation struct { + + // Query string pairs or values to match. Query String Value blocks documented below. Multiple values blocks can be specified, see example above. Maximum size of each string is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). To search for a literal '*' or '?' character in a query string, escape the character with a backslash (\). Only one pair needs to match for the condition to be satisfied. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type SourceIPParameters struct { + + // Query string pairs or values to match. Query String Value blocks documented below. Multiple values blocks can be specified, see example above. Maximum size of each string is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). To search for a literal '*' or '?' character in a query string, escape the character with a backslash (\). Only one pair needs to match for the condition to be satisfied. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +// LBListenerRuleSpec defines the desired state of LBListenerRule +type LBListenerRuleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LBListenerRuleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LBListenerRuleInitParameters `json:"initProvider,omitempty"` +} + +// LBListenerRuleStatus defines the observed state of LBListenerRule. +type LBListenerRuleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LBListenerRuleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LBListenerRule is the Schema for the LBListenerRules API. Provides a Load Balancer Listener Rule resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type LBListenerRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.action) || (has(self.initProvider) && has(self.initProvider.action))",message="spec.forProvider.action is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.condition) || (has(self.initProvider) && has(self.initProvider.condition))",message="spec.forProvider.condition is a required parameter" + Spec LBListenerRuleSpec `json:"spec"` + Status LBListenerRuleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LBListenerRuleList contains a list of LBListenerRules +type LBListenerRuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LBListenerRule `json:"items"` +} + +// Repository type metadata. +var ( + LBListenerRule_Kind = "LBListenerRule" + LBListenerRule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LBListenerRule_Kind}.String() + LBListenerRule_KindAPIVersion = LBListenerRule_Kind + "." + CRDGroupVersion.String() + LBListenerRule_GroupVersionKind = CRDGroupVersion.WithKind(LBListenerRule_Kind) +) + +func init() { + SchemeBuilder.Register(&LBListenerRule{}, &LBListenerRuleList{}) +} diff --git a/apis/elbv2/v1beta2/zz_lbtargetgroup_terraformed.go b/apis/elbv2/v1beta2/zz_lbtargetgroup_terraformed.go new file mode 100755 index 0000000000..f2ab04b753 --- /dev/null +++ b/apis/elbv2/v1beta2/zz_lbtargetgroup_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LBTargetGroup +func (mg *LBTargetGroup) GetTerraformResourceType() string { + return "aws_lb_target_group" +} + +// GetConnectionDetailsMapping for this LBTargetGroup +func (tr *LBTargetGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LBTargetGroup +func (tr *LBTargetGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LBTargetGroup +func (tr *LBTargetGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LBTargetGroup +func (tr *LBTargetGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LBTargetGroup +func (tr *LBTargetGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LBTargetGroup +func (tr *LBTargetGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LBTargetGroup +func (tr *LBTargetGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LBTargetGroup +func (tr *LBTargetGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LBTargetGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LBTargetGroup) LateInitialize(attrs []byte) (bool, error) { + params := &LBTargetGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("TargetFailover")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LBTargetGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/elbv2/v1beta2/zz_lbtargetgroup_types.go b/apis/elbv2/v1beta2/zz_lbtargetgroup_types.go new file mode 100755 index 0000000000..540b0b5d84 --- /dev/null +++ b/apis/elbv2/v1beta2/zz_lbtargetgroup_types.go @@ -0,0 +1,566 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type HealthCheckInitParameters struct { + + // Whether health checks are enabled. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Number of consecutive health check successes required before considering a target healthy. The range is 2-10. Defaults to 3. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + + // Approximate amount of time, in seconds, between health checks of an individual target. The range is 5-300. For lambda target groups, it needs to be greater than the timeout of the underlying lambda. Defaults to 30. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // separated individual values (e.g., "200,202") or a range of values (e.g., "200-299"). + Matcher *string `json:"matcher,omitempty" tf:"matcher,omitempty"` + + // (May be required) Destination for the health check request. Required for HTTP/HTTPS ALB and HTTP NLB. Only applies to HTTP/HTTPS. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port the load balancer uses when performing health checks on targets. + // Valid values are either traffic-port, to use the same port as the target group, or a valid port number between 1 and 65536. + // Default is traffic-port. + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol the load balancer uses when performing health checks on targets. + // Must be one of TCP, HTTP, or HTTPS. + // The TCP protocol is not supported for health checks if the protocol of the target group is HTTP or HTTPS. + // Default is HTTP. + // Cannot be specified when the target_type is lambda. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Amount of time, in seconds, during which no response from a target means a failed health check. The range is 2–120 seconds. For target groups with a protocol of HTTP, the default is 6 seconds. For target groups with a protocol of TCP, TLS or HTTPS, the default is 10 seconds. For target groups with a protocol of GENEVE, the default is 5 seconds. If the target type is lambda, the default is 30 seconds. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Number of consecutive health check failures required before considering a target unhealthy. The range is 2-10. Defaults to 3. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` +} + +type HealthCheckObservation struct { + + // Whether health checks are enabled. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Number of consecutive health check successes required before considering a target healthy. The range is 2-10. Defaults to 3. + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + + // Approximate amount of time, in seconds, between health checks of an individual target. The range is 5-300. For lambda target groups, it needs to be greater than the timeout of the underlying lambda. Defaults to 30. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // separated individual values (e.g., "200,202") or a range of values (e.g., "200-299"). + Matcher *string `json:"matcher,omitempty" tf:"matcher,omitempty"` + + // (May be required) Destination for the health check request. Required for HTTP/HTTPS ALB and HTTP NLB. Only applies to HTTP/HTTPS. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port the load balancer uses when performing health checks on targets. + // Valid values are either traffic-port, to use the same port as the target group, or a valid port number between 1 and 65536. + // Default is traffic-port. + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol the load balancer uses when performing health checks on targets. + // Must be one of TCP, HTTP, or HTTPS. + // The TCP protocol is not supported for health checks if the protocol of the target group is HTTP or HTTPS. + // Default is HTTP. + // Cannot be specified when the target_type is lambda. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Amount of time, in seconds, during which no response from a target means a failed health check. The range is 2–120 seconds. For target groups with a protocol of HTTP, the default is 6 seconds. For target groups with a protocol of TCP, TLS or HTTPS, the default is 10 seconds. For target groups with a protocol of GENEVE, the default is 5 seconds. If the target type is lambda, the default is 30 seconds. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Number of consecutive health check failures required before considering a target unhealthy. The range is 2-10. Defaults to 3. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` +} + +type HealthCheckParameters struct { + + // Whether health checks are enabled. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Number of consecutive health check successes required before considering a target healthy. The range is 2-10. Defaults to 3. + // +kubebuilder:validation:Optional + HealthyThreshold *float64 `json:"healthyThreshold,omitempty" tf:"healthy_threshold,omitempty"` + + // Approximate amount of time, in seconds, between health checks of an individual target. The range is 5-300. For lambda target groups, it needs to be greater than the timeout of the underlying lambda. Defaults to 30. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // separated individual values (e.g., "200,202") or a range of values (e.g., "200-299"). + // +kubebuilder:validation:Optional + Matcher *string `json:"matcher,omitempty" tf:"matcher,omitempty"` + + // (May be required) Destination for the health check request. Required for HTTP/HTTPS ALB and HTTP NLB. Only applies to HTTP/HTTPS. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port the load balancer uses when performing health checks on targets. + // Valid values are either traffic-port, to use the same port as the target group, or a valid port number between 1 and 65536. + // Default is traffic-port. + // +kubebuilder:validation:Optional + Port *string `json:"port,omitempty" tf:"port,omitempty"` + + // Protocol the load balancer uses when performing health checks on targets. + // Must be one of TCP, HTTP, or HTTPS. + // The TCP protocol is not supported for health checks if the protocol of the target group is HTTP or HTTPS. + // Default is HTTP. + // Cannot be specified when the target_type is lambda. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Amount of time, in seconds, during which no response from a target means a failed health check. The range is 2–120 seconds. For target groups with a protocol of HTTP, the default is 6 seconds. For target groups with a protocol of TCP, TLS or HTTPS, the default is 10 seconds. For target groups with a protocol of GENEVE, the default is 5 seconds. If the target type is lambda, the default is 30 seconds. + // +kubebuilder:validation:Optional + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Number of consecutive health check failures required before considering a target unhealthy. The range is 2-10. Defaults to 3. + // +kubebuilder:validation:Optional + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` +} + +type LBTargetGroupInitParameters struct { + + // Whether to terminate connections at the end of the deregistration timeout on Network Load Balancers. See doc for more information. Default is false. + ConnectionTermination *bool `json:"connectionTermination,omitempty" tf:"connection_termination,omitempty"` + + // Amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds. + DeregistrationDelay *string `json:"deregistrationDelay,omitempty" tf:"deregistration_delay,omitempty"` + + // Health Check configuration block. Detailed below. + HealthCheck *HealthCheckInitParameters `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + + // The type of IP addresses used by the target group, only supported when target type is set to ip. Possible values are ipv4 or ipv6. + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // Whether the request and response headers exchanged between the load balancer and the Lambda function include arrays of values or strings. Only applies when target_type is lambda. Default is false. + LambdaMultiValueHeadersEnabled *bool `json:"lambdaMultiValueHeadersEnabled,omitempty" tf:"lambda_multi_value_headers_enabled,omitempty"` + + // Determines how the load balancer selects targets when routing requests. Only applicable for Application Load Balancer Target Groups. The value is round_robin, least_outstanding_requests, or weighted_random. The default is round_robin. + LoadBalancingAlgorithmType *string `json:"loadBalancingAlgorithmType,omitempty" tf:"load_balancing_algorithm_type,omitempty"` + + // Determines whether to enable target anomaly mitigation. Target anomaly mitigation is only supported by the weighted_random load balancing algorithm type. See doc for more information. The value is "on" or "off". The default is "off". + LoadBalancingAnomalyMitigation *string `json:"loadBalancingAnomalyMitigation,omitempty" tf:"load_balancing_anomaly_mitigation,omitempty"` + + // Indicates whether cross zone load balancing is enabled. The value is "true", "false" or "use_load_balancer_configuration". The default is "use_load_balancer_configuration". + LoadBalancingCrossZoneEnabled *string `json:"loadBalancingCrossZoneEnabled,omitempty" tf:"load_balancing_cross_zone_enabled,omitempty"` + + // Name of the target group. This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // (May be required, Forces new resource) Port on which targets receive traffic, unless overridden when registering a specific target. Required when target_type is instance, ip or alb. Does not apply when target_type is lambda. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Whether client IP preservation is enabled. See doc for more information. + PreserveClientIP *string `json:"preserveClientIp,omitempty" tf:"preserve_client_ip,omitempty"` + + // (May be required, Forces new resource) Protocol to use for routing traffic to the targets. + // Should be one of GENEVE, HTTP, HTTPS, TCP, TCP_UDP, TLS, or UDP. + // Required when target_type is instance, ip, or alb. + // Does not apply when target_type is lambda. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Only applicable when protocol is HTTP or HTTPS. The protocol version. Specify GRPC to send requests to targets using gRPC. Specify HTTP2 to send requests to targets using HTTP/2. The default is HTTP1, which sends requests to targets using HTTP/1.1 + ProtocolVersion *string `json:"protocolVersion,omitempty" tf:"protocol_version,omitempty"` + + // Whether to enable support for proxy protocol v2 on Network Load Balancers. See doc for more information. Default is false. + ProxyProtocolV2 *bool `json:"proxyProtocolV2,omitempty" tf:"proxy_protocol_v2,omitempty"` + + // Amount time for targets to warm up before the load balancer sends them a full share of requests. The range is 30-900 seconds or 0 to disable. The default value is 0 seconds. + SlowStart *float64 `json:"slowStart,omitempty" tf:"slow_start,omitempty"` + + // Stickiness configuration block. Detailed below. + Stickiness *LBTargetGroupStickinessInitParameters `json:"stickiness,omitempty" tf:"stickiness,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Target failover block. Only applicable for Gateway Load Balancer target groups. See target_failover for more information. + TargetFailover []TargetFailoverInitParameters `json:"targetFailover,omitempty" tf:"target_failover,omitempty"` + + // Target health state block. Only applicable for Network Load Balancer target groups when protocol is TCP or TLS. See target_health_state for more information. + TargetHealthState []TargetHealthStateInitParameters `json:"targetHealthState,omitempty" tf:"target_health_state,omitempty"` + + // Type of target that you must specify when registering targets with this target group. + // See doc for supported values. + // The default is instance. + TargetType *string `json:"targetType,omitempty" tf:"target_type,omitempty"` + + // Identifier of the VPC in which to create the target group. Required when target_type is instance, ip or alb. Does not apply when target_type is lambda. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type LBTargetGroupObservation struct { + + // ARN of the Target Group (matches id). + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // ARN suffix for use with CloudWatch Metrics. + ArnSuffix *string `json:"arnSuffix,omitempty" tf:"arn_suffix,omitempty"` + + // Whether to terminate connections at the end of the deregistration timeout on Network Load Balancers. See doc for more information. Default is false. + ConnectionTermination *bool `json:"connectionTermination,omitempty" tf:"connection_termination,omitempty"` + + // Amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds. + DeregistrationDelay *string `json:"deregistrationDelay,omitempty" tf:"deregistration_delay,omitempty"` + + // Health Check configuration block. Detailed below. + HealthCheck *HealthCheckObservation `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + + // ARN of the Target Group (matches arn). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The type of IP addresses used by the target group, only supported when target type is set to ip. Possible values are ipv4 or ipv6. + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // Whether the request and response headers exchanged between the load balancer and the Lambda function include arrays of values or strings. Only applies when target_type is lambda. Default is false. + LambdaMultiValueHeadersEnabled *bool `json:"lambdaMultiValueHeadersEnabled,omitempty" tf:"lambda_multi_value_headers_enabled,omitempty"` + + // ARNs of the Load Balancers associated with the Target Group. + // +listType=set + LoadBalancerArns []*string `json:"loadBalancerArns,omitempty" tf:"load_balancer_arns,omitempty"` + + // Determines how the load balancer selects targets when routing requests. Only applicable for Application Load Balancer Target Groups. The value is round_robin, least_outstanding_requests, or weighted_random. The default is round_robin. + LoadBalancingAlgorithmType *string `json:"loadBalancingAlgorithmType,omitempty" tf:"load_balancing_algorithm_type,omitempty"` + + // Determines whether to enable target anomaly mitigation. Target anomaly mitigation is only supported by the weighted_random load balancing algorithm type. See doc for more information. The value is "on" or "off". The default is "off". + LoadBalancingAnomalyMitigation *string `json:"loadBalancingAnomalyMitigation,omitempty" tf:"load_balancing_anomaly_mitigation,omitempty"` + + // Indicates whether cross zone load balancing is enabled. The value is "true", "false" or "use_load_balancer_configuration". The default is "use_load_balancer_configuration". + LoadBalancingCrossZoneEnabled *string `json:"loadBalancingCrossZoneEnabled,omitempty" tf:"load_balancing_cross_zone_enabled,omitempty"` + + // Name of the target group. This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // (May be required, Forces new resource) Port on which targets receive traffic, unless overridden when registering a specific target. Required when target_type is instance, ip or alb. Does not apply when target_type is lambda. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Whether client IP preservation is enabled. See doc for more information. + PreserveClientIP *string `json:"preserveClientIp,omitempty" tf:"preserve_client_ip,omitempty"` + + // (May be required, Forces new resource) Protocol to use for routing traffic to the targets. + // Should be one of GENEVE, HTTP, HTTPS, TCP, TCP_UDP, TLS, or UDP. + // Required when target_type is instance, ip, or alb. + // Does not apply when target_type is lambda. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Only applicable when protocol is HTTP or HTTPS. The protocol version. Specify GRPC to send requests to targets using gRPC. Specify HTTP2 to send requests to targets using HTTP/2. The default is HTTP1, which sends requests to targets using HTTP/1.1 + ProtocolVersion *string `json:"protocolVersion,omitempty" tf:"protocol_version,omitempty"` + + // Whether to enable support for proxy protocol v2 on Network Load Balancers. See doc for more information. Default is false. + ProxyProtocolV2 *bool `json:"proxyProtocolV2,omitempty" tf:"proxy_protocol_v2,omitempty"` + + // Amount time for targets to warm up before the load balancer sends them a full share of requests. The range is 30-900 seconds or 0 to disable. The default value is 0 seconds. + SlowStart *float64 `json:"slowStart,omitempty" tf:"slow_start,omitempty"` + + // Stickiness configuration block. Detailed below. + Stickiness *LBTargetGroupStickinessObservation `json:"stickiness,omitempty" tf:"stickiness,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Target failover block. Only applicable for Gateway Load Balancer target groups. See target_failover for more information. + TargetFailover []TargetFailoverObservation `json:"targetFailover,omitempty" tf:"target_failover,omitempty"` + + // Target health state block. Only applicable for Network Load Balancer target groups when protocol is TCP or TLS. See target_health_state for more information. + TargetHealthState []TargetHealthStateObservation `json:"targetHealthState,omitempty" tf:"target_health_state,omitempty"` + + // Type of target that you must specify when registering targets with this target group. + // See doc for supported values. + // The default is instance. + TargetType *string `json:"targetType,omitempty" tf:"target_type,omitempty"` + + // Identifier of the VPC in which to create the target group. Required when target_type is instance, ip or alb. Does not apply when target_type is lambda. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type LBTargetGroupParameters struct { + + // Whether to terminate connections at the end of the deregistration timeout on Network Load Balancers. See doc for more information. Default is false. + // +kubebuilder:validation:Optional + ConnectionTermination *bool `json:"connectionTermination,omitempty" tf:"connection_termination,omitempty"` + + // Amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds. + // +kubebuilder:validation:Optional + DeregistrationDelay *string `json:"deregistrationDelay,omitempty" tf:"deregistration_delay,omitempty"` + + // Health Check configuration block. Detailed below. + // +kubebuilder:validation:Optional + HealthCheck *HealthCheckParameters `json:"healthCheck,omitempty" tf:"health_check,omitempty"` + + // The type of IP addresses used by the target group, only supported when target type is set to ip. Possible values are ipv4 or ipv6. + // +kubebuilder:validation:Optional + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // Whether the request and response headers exchanged between the load balancer and the Lambda function include arrays of values or strings. Only applies when target_type is lambda. Default is false. + // +kubebuilder:validation:Optional + LambdaMultiValueHeadersEnabled *bool `json:"lambdaMultiValueHeadersEnabled,omitempty" tf:"lambda_multi_value_headers_enabled,omitempty"` + + // Determines how the load balancer selects targets when routing requests. Only applicable for Application Load Balancer Target Groups. The value is round_robin, least_outstanding_requests, or weighted_random. The default is round_robin. + // +kubebuilder:validation:Optional + LoadBalancingAlgorithmType *string `json:"loadBalancingAlgorithmType,omitempty" tf:"load_balancing_algorithm_type,omitempty"` + + // Determines whether to enable target anomaly mitigation. Target anomaly mitigation is only supported by the weighted_random load balancing algorithm type. See doc for more information. The value is "on" or "off". The default is "off". + // +kubebuilder:validation:Optional + LoadBalancingAnomalyMitigation *string `json:"loadBalancingAnomalyMitigation,omitempty" tf:"load_balancing_anomaly_mitigation,omitempty"` + + // Indicates whether cross zone load balancing is enabled. The value is "true", "false" or "use_load_balancer_configuration". The default is "use_load_balancer_configuration". + // +kubebuilder:validation:Optional + LoadBalancingCrossZoneEnabled *string `json:"loadBalancingCrossZoneEnabled,omitempty" tf:"load_balancing_cross_zone_enabled,omitempty"` + + // Name of the target group. This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // (May be required, Forces new resource) Port on which targets receive traffic, unless overridden when registering a specific target. Required when target_type is instance, ip or alb. Does not apply when target_type is lambda. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Whether client IP preservation is enabled. See doc for more information. + // +kubebuilder:validation:Optional + PreserveClientIP *string `json:"preserveClientIp,omitempty" tf:"preserve_client_ip,omitempty"` + + // (May be required, Forces new resource) Protocol to use for routing traffic to the targets. + // Should be one of GENEVE, HTTP, HTTPS, TCP, TCP_UDP, TLS, or UDP. + // Required when target_type is instance, ip, or alb. + // Does not apply when target_type is lambda. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Only applicable when protocol is HTTP or HTTPS. The protocol version. Specify GRPC to send requests to targets using gRPC. Specify HTTP2 to send requests to targets using HTTP/2. The default is HTTP1, which sends requests to targets using HTTP/1.1 + // +kubebuilder:validation:Optional + ProtocolVersion *string `json:"protocolVersion,omitempty" tf:"protocol_version,omitempty"` + + // Whether to enable support for proxy protocol v2 on Network Load Balancers. See doc for more information. Default is false. + // +kubebuilder:validation:Optional + ProxyProtocolV2 *bool `json:"proxyProtocolV2,omitempty" tf:"proxy_protocol_v2,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Amount time for targets to warm up before the load balancer sends them a full share of requests. The range is 30-900 seconds or 0 to disable. The default value is 0 seconds. + // +kubebuilder:validation:Optional + SlowStart *float64 `json:"slowStart,omitempty" tf:"slow_start,omitempty"` + + // Stickiness configuration block. Detailed below. + // +kubebuilder:validation:Optional + Stickiness *LBTargetGroupStickinessParameters `json:"stickiness,omitempty" tf:"stickiness,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Target failover block. Only applicable for Gateway Load Balancer target groups. See target_failover for more information. + // +kubebuilder:validation:Optional + TargetFailover []TargetFailoverParameters `json:"targetFailover,omitempty" tf:"target_failover,omitempty"` + + // Target health state block. Only applicable for Network Load Balancer target groups when protocol is TCP or TLS. See target_health_state for more information. + // +kubebuilder:validation:Optional + TargetHealthState []TargetHealthStateParameters `json:"targetHealthState,omitempty" tf:"target_health_state,omitempty"` + + // Type of target that you must specify when registering targets with this target group. + // See doc for supported values. + // The default is instance. + // +kubebuilder:validation:Optional + TargetType *string `json:"targetType,omitempty" tf:"target_type,omitempty"` + + // Identifier of the VPC in which to create the target group. Required when target_type is instance, ip or alb. Does not apply when target_type is lambda. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +kubebuilder:validation:Optional + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type LBTargetGroupStickinessInitParameters struct { + + // Only used when the type is lb_cookie. The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds). + CookieDuration *float64 `json:"cookieDuration,omitempty" tf:"cookie_duration,omitempty"` + + // Name of the application based cookie. AWSALB, AWSALBAPP, and AWSALBTG prefixes are reserved and cannot be used. Only needed when type is app_cookie. + CookieName *string `json:"cookieName,omitempty" tf:"cookie_name,omitempty"` + + // Whether health checks are enabled. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The type of sticky sessions. The only current possible values are lb_cookie, app_cookie for ALBs, source_ip for NLBs, and source_ip_dest_ip, source_ip_dest_ip_proto for GWLBs. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LBTargetGroupStickinessObservation struct { + + // Only used when the type is lb_cookie. The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds). + CookieDuration *float64 `json:"cookieDuration,omitempty" tf:"cookie_duration,omitempty"` + + // Name of the application based cookie. AWSALB, AWSALBAPP, and AWSALBTG prefixes are reserved and cannot be used. Only needed when type is app_cookie. + CookieName *string `json:"cookieName,omitempty" tf:"cookie_name,omitempty"` + + // Whether health checks are enabled. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The type of sticky sessions. The only current possible values are lb_cookie, app_cookie for ALBs, source_ip for NLBs, and source_ip_dest_ip, source_ip_dest_ip_proto for GWLBs. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LBTargetGroupStickinessParameters struct { + + // Only used when the type is lb_cookie. The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds). + // +kubebuilder:validation:Optional + CookieDuration *float64 `json:"cookieDuration,omitempty" tf:"cookie_duration,omitempty"` + + // Name of the application based cookie. AWSALB, AWSALBAPP, and AWSALBTG prefixes are reserved and cannot be used. Only needed when type is app_cookie. + // +kubebuilder:validation:Optional + CookieName *string `json:"cookieName,omitempty" tf:"cookie_name,omitempty"` + + // Whether health checks are enabled. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The type of sticky sessions. The only current possible values are lb_cookie, app_cookie for ALBs, source_ip for NLBs, and source_ip_dest_ip, source_ip_dest_ip_proto for GWLBs. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type TargetFailoverInitParameters struct { + + // Indicates how the GWLB handles existing flows when a target is deregistered. Possible values are rebalance and no_rebalance. Must match the attribute value set for on_unhealthy. Default: no_rebalance. + OnDeregistration *string `json:"onDeregistration,omitempty" tf:"on_deregistration,omitempty"` + + // Indicates how the GWLB handles existing flows when a target is unhealthy. Possible values are rebalance and no_rebalance. Must match the attribute value set for on_deregistration. Default: no_rebalance. + OnUnhealthy *string `json:"onUnhealthy,omitempty" tf:"on_unhealthy,omitempty"` +} + +type TargetFailoverObservation struct { + + // Indicates how the GWLB handles existing flows when a target is deregistered. Possible values are rebalance and no_rebalance. Must match the attribute value set for on_unhealthy. Default: no_rebalance. + OnDeregistration *string `json:"onDeregistration,omitempty" tf:"on_deregistration,omitempty"` + + // Indicates how the GWLB handles existing flows when a target is unhealthy. Possible values are rebalance and no_rebalance. Must match the attribute value set for on_deregistration. Default: no_rebalance. + OnUnhealthy *string `json:"onUnhealthy,omitempty" tf:"on_unhealthy,omitempty"` +} + +type TargetFailoverParameters struct { + + // Indicates how the GWLB handles existing flows when a target is deregistered. Possible values are rebalance and no_rebalance. Must match the attribute value set for on_unhealthy. Default: no_rebalance. + // +kubebuilder:validation:Optional + OnDeregistration *string `json:"onDeregistration" tf:"on_deregistration,omitempty"` + + // Indicates how the GWLB handles existing flows when a target is unhealthy. Possible values are rebalance and no_rebalance. Must match the attribute value set for on_deregistration. Default: no_rebalance. + // +kubebuilder:validation:Optional + OnUnhealthy *string `json:"onUnhealthy" tf:"on_unhealthy,omitempty"` +} + +type TargetHealthStateInitParameters struct { + + // Indicates whether the load balancer terminates connections to unhealthy targets. Possible values are true or false. Default: true. + EnableUnhealthyConnectionTermination *bool `json:"enableUnhealthyConnectionTermination,omitempty" tf:"enable_unhealthy_connection_termination,omitempty"` +} + +type TargetHealthStateObservation struct { + + // Indicates whether the load balancer terminates connections to unhealthy targets. Possible values are true or false. Default: true. + EnableUnhealthyConnectionTermination *bool `json:"enableUnhealthyConnectionTermination,omitempty" tf:"enable_unhealthy_connection_termination,omitempty"` +} + +type TargetHealthStateParameters struct { + + // Indicates whether the load balancer terminates connections to unhealthy targets. Possible values are true or false. Default: true. + // +kubebuilder:validation:Optional + EnableUnhealthyConnectionTermination *bool `json:"enableUnhealthyConnectionTermination" tf:"enable_unhealthy_connection_termination,omitempty"` +} + +// LBTargetGroupSpec defines the desired state of LBTargetGroup +type LBTargetGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LBTargetGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LBTargetGroupInitParameters `json:"initProvider,omitempty"` +} + +// LBTargetGroupStatus defines the observed state of LBTargetGroup. +type LBTargetGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LBTargetGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LBTargetGroup is the Schema for the LBTargetGroups API. Provides a Target Group resource for use with Load Balancers. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type LBTargetGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec LBTargetGroupSpec `json:"spec"` + Status LBTargetGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LBTargetGroupList contains a list of LBTargetGroups +type LBTargetGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LBTargetGroup `json:"items"` +} + +// Repository type metadata. +var ( + LBTargetGroup_Kind = "LBTargetGroup" + LBTargetGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LBTargetGroup_Kind}.String() + LBTargetGroup_KindAPIVersion = LBTargetGroup_Kind + "." + CRDGroupVersion.String() + LBTargetGroup_GroupVersionKind = CRDGroupVersion.WithKind(LBTargetGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&LBTargetGroup{}, &LBTargetGroupList{}) +} diff --git a/apis/emrserverless/v1beta1/zz_generated.conversion_spokes.go b/apis/emrserverless/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..ada25ff080 --- /dev/null +++ b/apis/emrserverless/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Application to the hub type. +func (tr *Application) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Application type. +func (tr *Application) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/emrserverless/v1beta2/zz_application_terraformed.go b/apis/emrserverless/v1beta2/zz_application_terraformed.go new file mode 100755 index 0000000000..0a1fc16ad9 --- /dev/null +++ b/apis/emrserverless/v1beta2/zz_application_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Application +func (mg *Application) GetTerraformResourceType() string { + return "aws_emrserverless_application" +} + +// GetConnectionDetailsMapping for this Application +func (tr *Application) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Application +func (tr *Application) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Application +func (tr *Application) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Application +func (tr *Application) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Application +func (tr *Application) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Application +func (tr *Application) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Application +func (tr *Application) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Application +func (tr *Application) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Application using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Application) LateInitialize(attrs []byte) (bool, error) { + params := &ApplicationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Application) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/emrserverless/v1beta2/zz_application_types.go b/apis/emrserverless/v1beta2/zz_application_types.go new file mode 100755 index 0000000000..58b4df8838 --- /dev/null +++ b/apis/emrserverless/v1beta2/zz_application_types.go @@ -0,0 +1,452 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ApplicationInitParameters struct { + + // – The CPU architecture of an application. Valid values are ARM64 or X86_64. Default value is X86_64. + Architecture *string `json:"architecture,omitempty" tf:"architecture,omitempty"` + + // – The configuration for an application to automatically start on job submission. + AutoStartConfiguration *AutoStartConfigurationInitParameters `json:"autoStartConfiguration,omitempty" tf:"auto_start_configuration,omitempty"` + + // – The configuration for an application to automatically stop after a certain amount of time being idle. + AutoStopConfiguration *AutoStopConfigurationInitParameters `json:"autoStopConfiguration,omitempty" tf:"auto_stop_configuration,omitempty"` + + // – The image configuration applied to all worker types. + ImageConfiguration *ImageConfigurationInitParameters `json:"imageConfiguration,omitempty" tf:"image_configuration,omitempty"` + + // – The capacity to initialize when the application is created. + InitialCapacity []InitialCapacityInitParameters `json:"initialCapacity,omitempty" tf:"initial_capacity,omitempty"` + + // – The maximum capacity to allocate when the application is created. This is cumulative across all workers at any given point in time, not just when an application is created. No new resources will be created once any one of the defined limits is hit. + MaximumCapacity *MaximumCapacityInitParameters `json:"maximumCapacity,omitempty" tf:"maximum_capacity,omitempty"` + + // – The name of the application. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // – The network configuration for customer VPC connectivity. + NetworkConfiguration *NetworkConfigurationInitParameters `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // – The EMR release version associated with the application. + ReleaseLabel *string `json:"releaseLabel,omitempty" tf:"release_label,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // – The type of application you want to start, such as spark or hive. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ApplicationObservation struct { + + // – The CPU architecture of an application. Valid values are ARM64 or X86_64. Default value is X86_64. + Architecture *string `json:"architecture,omitempty" tf:"architecture,omitempty"` + + // ARN of the cluster. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // – The configuration for an application to automatically start on job submission. + AutoStartConfiguration *AutoStartConfigurationObservation `json:"autoStartConfiguration,omitempty" tf:"auto_start_configuration,omitempty"` + + // – The configuration for an application to automatically stop after a certain amount of time being idle. + AutoStopConfiguration *AutoStopConfigurationObservation `json:"autoStopConfiguration,omitempty" tf:"auto_stop_configuration,omitempty"` + + // The ID of the cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // – The image configuration applied to all worker types. + ImageConfiguration *ImageConfigurationObservation `json:"imageConfiguration,omitempty" tf:"image_configuration,omitempty"` + + // – The capacity to initialize when the application is created. + InitialCapacity []InitialCapacityObservation `json:"initialCapacity,omitempty" tf:"initial_capacity,omitempty"` + + // – The maximum capacity to allocate when the application is created. This is cumulative across all workers at any given point in time, not just when an application is created. No new resources will be created once any one of the defined limits is hit. + MaximumCapacity *MaximumCapacityObservation `json:"maximumCapacity,omitempty" tf:"maximum_capacity,omitempty"` + + // – The name of the application. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // – The network configuration for customer VPC connectivity. + NetworkConfiguration *NetworkConfigurationObservation `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // – The EMR release version associated with the application. + ReleaseLabel *string `json:"releaseLabel,omitempty" tf:"release_label,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // – The type of application you want to start, such as spark or hive. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ApplicationParameters struct { + + // – The CPU architecture of an application. Valid values are ARM64 or X86_64. Default value is X86_64. + // +kubebuilder:validation:Optional + Architecture *string `json:"architecture,omitempty" tf:"architecture,omitempty"` + + // – The configuration for an application to automatically start on job submission. + // +kubebuilder:validation:Optional + AutoStartConfiguration *AutoStartConfigurationParameters `json:"autoStartConfiguration,omitempty" tf:"auto_start_configuration,omitempty"` + + // – The configuration for an application to automatically stop after a certain amount of time being idle. + // +kubebuilder:validation:Optional + AutoStopConfiguration *AutoStopConfigurationParameters `json:"autoStopConfiguration,omitempty" tf:"auto_stop_configuration,omitempty"` + + // – The image configuration applied to all worker types. + // +kubebuilder:validation:Optional + ImageConfiguration *ImageConfigurationParameters `json:"imageConfiguration,omitempty" tf:"image_configuration,omitempty"` + + // – The capacity to initialize when the application is created. + // +kubebuilder:validation:Optional + InitialCapacity []InitialCapacityParameters `json:"initialCapacity,omitempty" tf:"initial_capacity,omitempty"` + + // – The maximum capacity to allocate when the application is created. This is cumulative across all workers at any given point in time, not just when an application is created. No new resources will be created once any one of the defined limits is hit. + // +kubebuilder:validation:Optional + MaximumCapacity *MaximumCapacityParameters `json:"maximumCapacity,omitempty" tf:"maximum_capacity,omitempty"` + + // – The name of the application. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // – The network configuration for customer VPC connectivity. + // +kubebuilder:validation:Optional + NetworkConfiguration *NetworkConfigurationParameters `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // – The EMR release version associated with the application. + // +kubebuilder:validation:Optional + ReleaseLabel *string `json:"releaseLabel,omitempty" tf:"release_label,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // – The type of application you want to start, such as spark or hive. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type AutoStartConfigurationInitParameters struct { + + // Enables the application to automatically start on job submission. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AutoStartConfigurationObservation struct { + + // Enables the application to automatically start on job submission. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AutoStartConfigurationParameters struct { + + // Enables the application to automatically start on job submission. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AutoStopConfigurationInitParameters struct { + + // Enables the application to automatically start on job submission. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The amount of idle time in minutes after which your application will automatically stop. Defaults to 15 minutes. + IdleTimeoutMinutes *float64 `json:"idleTimeoutMinutes,omitempty" tf:"idle_timeout_minutes,omitempty"` +} + +type AutoStopConfigurationObservation struct { + + // Enables the application to automatically start on job submission. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The amount of idle time in minutes after which your application will automatically stop. Defaults to 15 minutes. + IdleTimeoutMinutes *float64 `json:"idleTimeoutMinutes,omitempty" tf:"idle_timeout_minutes,omitempty"` +} + +type AutoStopConfigurationParameters struct { + + // Enables the application to automatically start on job submission. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The amount of idle time in minutes after which your application will automatically stop. Defaults to 15 minutes. + // +kubebuilder:validation:Optional + IdleTimeoutMinutes *float64 `json:"idleTimeoutMinutes,omitempty" tf:"idle_timeout_minutes,omitempty"` +} + +type ImageConfigurationInitParameters struct { + + // The image URI. + ImageURI *string `json:"imageUri,omitempty" tf:"image_uri,omitempty"` +} + +type ImageConfigurationObservation struct { + + // The image URI. + ImageURI *string `json:"imageUri,omitempty" tf:"image_uri,omitempty"` +} + +type ImageConfigurationParameters struct { + + // The image URI. + // +kubebuilder:validation:Optional + ImageURI *string `json:"imageUri" tf:"image_uri,omitempty"` +} + +type InitialCapacityConfigInitParameters struct { + + // The resource configuration of the initial capacity configuration. + WorkerConfiguration *WorkerConfigurationInitParameters `json:"workerConfiguration,omitempty" tf:"worker_configuration,omitempty"` + + // The number of workers in the initial capacity configuration. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type InitialCapacityConfigObservation struct { + + // The resource configuration of the initial capacity configuration. + WorkerConfiguration *WorkerConfigurationObservation `json:"workerConfiguration,omitempty" tf:"worker_configuration,omitempty"` + + // The number of workers in the initial capacity configuration. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type InitialCapacityConfigParameters struct { + + // The resource configuration of the initial capacity configuration. + // +kubebuilder:validation:Optional + WorkerConfiguration *WorkerConfigurationParameters `json:"workerConfiguration,omitempty" tf:"worker_configuration,omitempty"` + + // The number of workers in the initial capacity configuration. + // +kubebuilder:validation:Optional + WorkerCount *float64 `json:"workerCount" tf:"worker_count,omitempty"` +} + +type InitialCapacityInitParameters struct { + + // The initial capacity configuration per worker. + InitialCapacityConfig *InitialCapacityConfigInitParameters `json:"initialCapacityConfig,omitempty" tf:"initial_capacity_config,omitempty"` + + // The worker type for an analytics framework. For Spark applications, the key can either be set to Driver or Executor. For Hive applications, it can be set to HiveDriver or TezTask. + InitialCapacityType *string `json:"initialCapacityType,omitempty" tf:"initial_capacity_type,omitempty"` +} + +type InitialCapacityObservation struct { + + // The initial capacity configuration per worker. + InitialCapacityConfig *InitialCapacityConfigObservation `json:"initialCapacityConfig,omitempty" tf:"initial_capacity_config,omitempty"` + + // The worker type for an analytics framework. For Spark applications, the key can either be set to Driver or Executor. For Hive applications, it can be set to HiveDriver or TezTask. + InitialCapacityType *string `json:"initialCapacityType,omitempty" tf:"initial_capacity_type,omitempty"` +} + +type InitialCapacityParameters struct { + + // The initial capacity configuration per worker. + // +kubebuilder:validation:Optional + InitialCapacityConfig *InitialCapacityConfigParameters `json:"initialCapacityConfig,omitempty" tf:"initial_capacity_config,omitempty"` + + // The worker type for an analytics framework. For Spark applications, the key can either be set to Driver or Executor. For Hive applications, it can be set to HiveDriver or TezTask. + // +kubebuilder:validation:Optional + InitialCapacityType *string `json:"initialCapacityType" tf:"initial_capacity_type,omitempty"` +} + +type MaximumCapacityInitParameters struct { + + // The maximum allowed CPU for an application. + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // The maximum allowed disk for an application. + Disk *string `json:"disk,omitempty" tf:"disk,omitempty"` + + // The maximum allowed resources for an application. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type MaximumCapacityObservation struct { + + // The maximum allowed CPU for an application. + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // The maximum allowed disk for an application. + Disk *string `json:"disk,omitempty" tf:"disk,omitempty"` + + // The maximum allowed resources for an application. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type MaximumCapacityParameters struct { + + // The maximum allowed CPU for an application. + // +kubebuilder:validation:Optional + CPU *string `json:"cpu" tf:"cpu,omitempty"` + + // The maximum allowed disk for an application. + // +kubebuilder:validation:Optional + Disk *string `json:"disk,omitempty" tf:"disk,omitempty"` + + // The maximum allowed resources for an application. + // +kubebuilder:validation:Optional + Memory *string `json:"memory" tf:"memory,omitempty"` +} + +type NetworkConfigurationInitParameters struct { + + // The array of security group Ids for customer VPC connectivity. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The array of subnet Ids for customer VPC connectivity. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type NetworkConfigurationObservation struct { + + // The array of security group Ids for customer VPC connectivity. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The array of subnet Ids for customer VPC connectivity. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type NetworkConfigurationParameters struct { + + // The array of security group Ids for customer VPC connectivity. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The array of subnet Ids for customer VPC connectivity. + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type WorkerConfigurationInitParameters struct { + + // The maximum allowed CPU for an application. + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // The maximum allowed disk for an application. + Disk *string `json:"disk,omitempty" tf:"disk,omitempty"` + + // The maximum allowed resources for an application. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type WorkerConfigurationObservation struct { + + // The maximum allowed CPU for an application. + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // The maximum allowed disk for an application. + Disk *string `json:"disk,omitempty" tf:"disk,omitempty"` + + // The maximum allowed resources for an application. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type WorkerConfigurationParameters struct { + + // The maximum allowed CPU for an application. + // +kubebuilder:validation:Optional + CPU *string `json:"cpu" tf:"cpu,omitempty"` + + // The maximum allowed disk for an application. + // +kubebuilder:validation:Optional + Disk *string `json:"disk,omitempty" tf:"disk,omitempty"` + + // The maximum allowed resources for an application. + // +kubebuilder:validation:Optional + Memory *string `json:"memory" tf:"memory,omitempty"` +} + +// ApplicationSpec defines the desired state of Application +type ApplicationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ApplicationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ApplicationInitParameters `json:"initProvider,omitempty"` +} + +// ApplicationStatus defines the observed state of Application. +type ApplicationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ApplicationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Application is the Schema for the Applications API. Manages an EMR Serverless Application +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Application struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.releaseLabel) || (has(self.initProvider) && has(self.initProvider.releaseLabel))",message="spec.forProvider.releaseLabel is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + Spec ApplicationSpec `json:"spec"` + Status ApplicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ApplicationList contains a list of Applications +type ApplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Application `json:"items"` +} + +// Repository type metadata. +var ( + Application_Kind = "Application" + Application_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Application_Kind}.String() + Application_KindAPIVersion = Application_Kind + "." + CRDGroupVersion.String() + Application_GroupVersionKind = CRDGroupVersion.WithKind(Application_Kind) +) + +func init() { + SchemeBuilder.Register(&Application{}, &ApplicationList{}) +} diff --git a/apis/emrserverless/v1beta2/zz_generated.conversion_hubs.go b/apis/emrserverless/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..2d48655531 --- /dev/null +++ b/apis/emrserverless/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Application) Hub() {} diff --git a/apis/emrserverless/v1beta2/zz_generated.deepcopy.go b/apis/emrserverless/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..7272dbcfc2 --- /dev/null +++ b/apis/emrserverless/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1023 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Application) DeepCopyInto(out *Application) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Application. +func (in *Application) DeepCopy() *Application { + if in == nil { + return nil + } + out := new(Application) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Application) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInitParameters) DeepCopyInto(out *ApplicationInitParameters) { + *out = *in + if in.Architecture != nil { + in, out := &in.Architecture, &out.Architecture + *out = new(string) + **out = **in + } + if in.AutoStartConfiguration != nil { + in, out := &in.AutoStartConfiguration, &out.AutoStartConfiguration + *out = new(AutoStartConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoStopConfiguration != nil { + in, out := &in.AutoStopConfiguration, &out.AutoStopConfiguration + *out = new(AutoStopConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageConfiguration != nil { + in, out := &in.ImageConfiguration, &out.ImageConfiguration + *out = new(ImageConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InitialCapacity != nil { + in, out := &in.InitialCapacity, &out.InitialCapacity + *out = make([]InitialCapacityInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaximumCapacity != nil { + in, out := &in.MaximumCapacity, &out.MaximumCapacity + *out = new(MaximumCapacityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = new(NetworkConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ReleaseLabel != nil { + in, out := &in.ReleaseLabel, &out.ReleaseLabel + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInitParameters. +func (in *ApplicationInitParameters) DeepCopy() *ApplicationInitParameters { + if in == nil { + return nil + } + out := new(ApplicationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationList) DeepCopyInto(out *ApplicationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Application, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationList. +func (in *ApplicationList) DeepCopy() *ApplicationList { + if in == nil { + return nil + } + out := new(ApplicationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ApplicationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationObservation) DeepCopyInto(out *ApplicationObservation) { + *out = *in + if in.Architecture != nil { + in, out := &in.Architecture, &out.Architecture + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoStartConfiguration != nil { + in, out := &in.AutoStartConfiguration, &out.AutoStartConfiguration + *out = new(AutoStartConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoStopConfiguration != nil { + in, out := &in.AutoStopConfiguration, &out.AutoStopConfiguration + *out = new(AutoStopConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ImageConfiguration != nil { + in, out := &in.ImageConfiguration, &out.ImageConfiguration + *out = new(ImageConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.InitialCapacity != nil { + in, out := &in.InitialCapacity, &out.InitialCapacity + *out = make([]InitialCapacityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaximumCapacity != nil { + in, out := &in.MaximumCapacity, &out.MaximumCapacity + *out = new(MaximumCapacityObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = new(NetworkConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ReleaseLabel != nil { + in, out := &in.ReleaseLabel, &out.ReleaseLabel + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationObservation. +func (in *ApplicationObservation) DeepCopy() *ApplicationObservation { + if in == nil { + return nil + } + out := new(ApplicationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationParameters) DeepCopyInto(out *ApplicationParameters) { + *out = *in + if in.Architecture != nil { + in, out := &in.Architecture, &out.Architecture + *out = new(string) + **out = **in + } + if in.AutoStartConfiguration != nil { + in, out := &in.AutoStartConfiguration, &out.AutoStartConfiguration + *out = new(AutoStartConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoStopConfiguration != nil { + in, out := &in.AutoStopConfiguration, &out.AutoStopConfiguration + *out = new(AutoStopConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageConfiguration != nil { + in, out := &in.ImageConfiguration, &out.ImageConfiguration + *out = new(ImageConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.InitialCapacity != nil { + in, out := &in.InitialCapacity, &out.InitialCapacity + *out = make([]InitialCapacityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaximumCapacity != nil { + in, out := &in.MaximumCapacity, &out.MaximumCapacity + *out = new(MaximumCapacityParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = new(NetworkConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ReleaseLabel != nil { + in, out := &in.ReleaseLabel, &out.ReleaseLabel + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationParameters. +func (in *ApplicationParameters) DeepCopy() *ApplicationParameters { + if in == nil { + return nil + } + out := new(ApplicationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationSpec) DeepCopyInto(out *ApplicationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSpec. +func (in *ApplicationSpec) DeepCopy() *ApplicationSpec { + if in == nil { + return nil + } + out := new(ApplicationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationStatus) DeepCopyInto(out *ApplicationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationStatus. +func (in *ApplicationStatus) DeepCopy() *ApplicationStatus { + if in == nil { + return nil + } + out := new(ApplicationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoStartConfigurationInitParameters) DeepCopyInto(out *AutoStartConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoStartConfigurationInitParameters. +func (in *AutoStartConfigurationInitParameters) DeepCopy() *AutoStartConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AutoStartConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoStartConfigurationObservation) DeepCopyInto(out *AutoStartConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoStartConfigurationObservation. +func (in *AutoStartConfigurationObservation) DeepCopy() *AutoStartConfigurationObservation { + if in == nil { + return nil + } + out := new(AutoStartConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoStartConfigurationParameters) DeepCopyInto(out *AutoStartConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoStartConfigurationParameters. +func (in *AutoStartConfigurationParameters) DeepCopy() *AutoStartConfigurationParameters { + if in == nil { + return nil + } + out := new(AutoStartConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoStopConfigurationInitParameters) DeepCopyInto(out *AutoStopConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IdleTimeoutMinutes != nil { + in, out := &in.IdleTimeoutMinutes, &out.IdleTimeoutMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoStopConfigurationInitParameters. +func (in *AutoStopConfigurationInitParameters) DeepCopy() *AutoStopConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AutoStopConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoStopConfigurationObservation) DeepCopyInto(out *AutoStopConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IdleTimeoutMinutes != nil { + in, out := &in.IdleTimeoutMinutes, &out.IdleTimeoutMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoStopConfigurationObservation. +func (in *AutoStopConfigurationObservation) DeepCopy() *AutoStopConfigurationObservation { + if in == nil { + return nil + } + out := new(AutoStopConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoStopConfigurationParameters) DeepCopyInto(out *AutoStopConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IdleTimeoutMinutes != nil { + in, out := &in.IdleTimeoutMinutes, &out.IdleTimeoutMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoStopConfigurationParameters. +func (in *AutoStopConfigurationParameters) DeepCopy() *AutoStopConfigurationParameters { + if in == nil { + return nil + } + out := new(AutoStopConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfigurationInitParameters) DeepCopyInto(out *ImageConfigurationInitParameters) { + *out = *in + if in.ImageURI != nil { + in, out := &in.ImageURI, &out.ImageURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfigurationInitParameters. +func (in *ImageConfigurationInitParameters) DeepCopy() *ImageConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ImageConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfigurationObservation) DeepCopyInto(out *ImageConfigurationObservation) { + *out = *in + if in.ImageURI != nil { + in, out := &in.ImageURI, &out.ImageURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfigurationObservation. +func (in *ImageConfigurationObservation) DeepCopy() *ImageConfigurationObservation { + if in == nil { + return nil + } + out := new(ImageConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfigurationParameters) DeepCopyInto(out *ImageConfigurationParameters) { + *out = *in + if in.ImageURI != nil { + in, out := &in.ImageURI, &out.ImageURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfigurationParameters. +func (in *ImageConfigurationParameters) DeepCopy() *ImageConfigurationParameters { + if in == nil { + return nil + } + out := new(ImageConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitialCapacityConfigInitParameters) DeepCopyInto(out *InitialCapacityConfigInitParameters) { + *out = *in + if in.WorkerConfiguration != nil { + in, out := &in.WorkerConfiguration, &out.WorkerConfiguration + *out = new(WorkerConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitialCapacityConfigInitParameters. +func (in *InitialCapacityConfigInitParameters) DeepCopy() *InitialCapacityConfigInitParameters { + if in == nil { + return nil + } + out := new(InitialCapacityConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitialCapacityConfigObservation) DeepCopyInto(out *InitialCapacityConfigObservation) { + *out = *in + if in.WorkerConfiguration != nil { + in, out := &in.WorkerConfiguration, &out.WorkerConfiguration + *out = new(WorkerConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitialCapacityConfigObservation. +func (in *InitialCapacityConfigObservation) DeepCopy() *InitialCapacityConfigObservation { + if in == nil { + return nil + } + out := new(InitialCapacityConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitialCapacityConfigParameters) DeepCopyInto(out *InitialCapacityConfigParameters) { + *out = *in + if in.WorkerConfiguration != nil { + in, out := &in.WorkerConfiguration, &out.WorkerConfiguration + *out = new(WorkerConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitialCapacityConfigParameters. +func (in *InitialCapacityConfigParameters) DeepCopy() *InitialCapacityConfigParameters { + if in == nil { + return nil + } + out := new(InitialCapacityConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitialCapacityInitParameters) DeepCopyInto(out *InitialCapacityInitParameters) { + *out = *in + if in.InitialCapacityConfig != nil { + in, out := &in.InitialCapacityConfig, &out.InitialCapacityConfig + *out = new(InitialCapacityConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InitialCapacityType != nil { + in, out := &in.InitialCapacityType, &out.InitialCapacityType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitialCapacityInitParameters. +func (in *InitialCapacityInitParameters) DeepCopy() *InitialCapacityInitParameters { + if in == nil { + return nil + } + out := new(InitialCapacityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitialCapacityObservation) DeepCopyInto(out *InitialCapacityObservation) { + *out = *in + if in.InitialCapacityConfig != nil { + in, out := &in.InitialCapacityConfig, &out.InitialCapacityConfig + *out = new(InitialCapacityConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.InitialCapacityType != nil { + in, out := &in.InitialCapacityType, &out.InitialCapacityType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitialCapacityObservation. +func (in *InitialCapacityObservation) DeepCopy() *InitialCapacityObservation { + if in == nil { + return nil + } + out := new(InitialCapacityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitialCapacityParameters) DeepCopyInto(out *InitialCapacityParameters) { + *out = *in + if in.InitialCapacityConfig != nil { + in, out := &in.InitialCapacityConfig, &out.InitialCapacityConfig + *out = new(InitialCapacityConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.InitialCapacityType != nil { + in, out := &in.InitialCapacityType, &out.InitialCapacityType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitialCapacityParameters. +func (in *InitialCapacityParameters) DeepCopy() *InitialCapacityParameters { + if in == nil { + return nil + } + out := new(InitialCapacityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaximumCapacityInitParameters) DeepCopyInto(out *MaximumCapacityInitParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Disk != nil { + in, out := &in.Disk, &out.Disk + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaximumCapacityInitParameters. +func (in *MaximumCapacityInitParameters) DeepCopy() *MaximumCapacityInitParameters { + if in == nil { + return nil + } + out := new(MaximumCapacityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaximumCapacityObservation) DeepCopyInto(out *MaximumCapacityObservation) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Disk != nil { + in, out := &in.Disk, &out.Disk + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaximumCapacityObservation. +func (in *MaximumCapacityObservation) DeepCopy() *MaximumCapacityObservation { + if in == nil { + return nil + } + out := new(MaximumCapacityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaximumCapacityParameters) DeepCopyInto(out *MaximumCapacityParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Disk != nil { + in, out := &in.Disk, &out.Disk + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaximumCapacityParameters. +func (in *MaximumCapacityParameters) DeepCopy() *MaximumCapacityParameters { + if in == nil { + return nil + } + out := new(MaximumCapacityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationInitParameters) DeepCopyInto(out *NetworkConfigurationInitParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationInitParameters. +func (in *NetworkConfigurationInitParameters) DeepCopy() *NetworkConfigurationInitParameters { + if in == nil { + return nil + } + out := new(NetworkConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationObservation) DeepCopyInto(out *NetworkConfigurationObservation) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationObservation. +func (in *NetworkConfigurationObservation) DeepCopy() *NetworkConfigurationObservation { + if in == nil { + return nil + } + out := new(NetworkConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationParameters) DeepCopyInto(out *NetworkConfigurationParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationParameters. +func (in *NetworkConfigurationParameters) DeepCopy() *NetworkConfigurationParameters { + if in == nil { + return nil + } + out := new(NetworkConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerConfigurationInitParameters) DeepCopyInto(out *WorkerConfigurationInitParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Disk != nil { + in, out := &in.Disk, &out.Disk + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerConfigurationInitParameters. +func (in *WorkerConfigurationInitParameters) DeepCopy() *WorkerConfigurationInitParameters { + if in == nil { + return nil + } + out := new(WorkerConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerConfigurationObservation) DeepCopyInto(out *WorkerConfigurationObservation) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Disk != nil { + in, out := &in.Disk, &out.Disk + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerConfigurationObservation. +func (in *WorkerConfigurationObservation) DeepCopy() *WorkerConfigurationObservation { + if in == nil { + return nil + } + out := new(WorkerConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerConfigurationParameters) DeepCopyInto(out *WorkerConfigurationParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Disk != nil { + in, out := &in.Disk, &out.Disk + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerConfigurationParameters. +func (in *WorkerConfigurationParameters) DeepCopy() *WorkerConfigurationParameters { + if in == nil { + return nil + } + out := new(WorkerConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/emrserverless/v1beta2/zz_generated.managed.go b/apis/emrserverless/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..d1481109bb --- /dev/null +++ b/apis/emrserverless/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Application. +func (mg *Application) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Application. +func (mg *Application) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Application. +func (mg *Application) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Application. +func (mg *Application) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Application. +func (mg *Application) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Application. +func (mg *Application) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Application. +func (mg *Application) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Application. +func (mg *Application) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Application. +func (mg *Application) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Application. +func (mg *Application) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Application. +func (mg *Application) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Application. +func (mg *Application) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/emrserverless/v1beta2/zz_generated.managedlist.go b/apis/emrserverless/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..9c9817b1e1 --- /dev/null +++ b/apis/emrserverless/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ApplicationList. +func (l *ApplicationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/emrserverless/v1beta2/zz_groupversion_info.go b/apis/emrserverless/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..7855a75789 --- /dev/null +++ b/apis/emrserverless/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=emrserverless.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "emrserverless.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/evidently/v1beta1/zz_generated.conversion_hubs.go b/apis/evidently/v1beta1/zz_generated.conversion_hubs.go index a17e1bd653..7179811971 100755 --- a/apis/evidently/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/evidently/v1beta1/zz_generated.conversion_hubs.go @@ -6,11 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Feature) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Project) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Segment) Hub() {} diff --git a/apis/evidently/v1beta1/zz_generated.conversion_spokes.go b/apis/evidently/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..265652b6df --- /dev/null +++ b/apis/evidently/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Feature to the hub type. +func (tr *Feature) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Feature type. +func (tr *Feature) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Project to the hub type. +func (tr *Project) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Project type. +func (tr *Project) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/evidently/v1beta2/zz_feature_terraformed.go b/apis/evidently/v1beta2/zz_feature_terraformed.go new file mode 100755 index 0000000000..e7aa5b5614 --- /dev/null +++ b/apis/evidently/v1beta2/zz_feature_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Feature +func (mg *Feature) GetTerraformResourceType() string { + return "aws_evidently_feature" +} + +// GetConnectionDetailsMapping for this Feature +func (tr *Feature) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Feature +func (tr *Feature) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Feature +func (tr *Feature) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Feature +func (tr *Feature) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Feature +func (tr *Feature) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Feature +func (tr *Feature) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Feature +func (tr *Feature) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Feature +func (tr *Feature) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Feature using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Feature) LateInitialize(attrs []byte) (bool, error) { + params := &FeatureParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Feature) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/evidently/v1beta2/zz_feature_types.go b/apis/evidently/v1beta2/zz_feature_types.go new file mode 100755 index 0000000000..0e08b8542a --- /dev/null +++ b/apis/evidently/v1beta2/zz_feature_types.go @@ -0,0 +1,290 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EvaluationRulesInitParameters struct { +} + +type EvaluationRulesObservation struct { + + // The name of the experiment or launch. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // This value is aws.evidently.splits if this is an evaluation rule for a launch, and it is aws.evidently.onlineab if this is an evaluation rule for an experiment. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EvaluationRulesParameters struct { +} + +type FeatureInitParameters struct { + + // The name of the variation to use as the default variation. The default variation is served to users who are not allocated to any ongoing launches or experiments of this feature. This variation must also be listed in the variations structure. If you omit default_variation, the first variation listed in the variations structure is used as the default variation. + DefaultVariation *string `json:"defaultVariation,omitempty" tf:"default_variation,omitempty"` + + // Specifies the description of the feature. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specify users that should always be served a specific variation of a feature. Each user is specified by a key-value pair . For each key, specify a user by entering their user ID, account ID, or some other identifier. For the value, specify the name of the variation that they are to be served. + // +mapType=granular + EntityOverrides map[string]*string `json:"entityOverrides,omitempty" tf:"entity_overrides,omitempty"` + + // Specify ALL_RULES to activate the traffic allocation specified by any ongoing launches or experiments. Specify DEFAULT_VARIATION to serve the default variation to all users instead. + EvaluationStrategy *string `json:"evaluationStrategy,omitempty" tf:"evaluation_strategy,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // One or more blocks that contain the configuration of the feature's different variations. Detailed below + Variations []VariationsInitParameters `json:"variations,omitempty" tf:"variations,omitempty"` +} + +type FeatureObservation struct { + + // The ARN of the feature. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The date and time that the feature is created. + CreatedTime *string `json:"createdTime,omitempty" tf:"created_time,omitempty"` + + // The name of the variation to use as the default variation. The default variation is served to users who are not allocated to any ongoing launches or experiments of this feature. This variation must also be listed in the variations structure. If you omit default_variation, the first variation listed in the variations structure is used as the default variation. + DefaultVariation *string `json:"defaultVariation,omitempty" tf:"default_variation,omitempty"` + + // Specifies the description of the feature. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specify users that should always be served a specific variation of a feature. Each user is specified by a key-value pair . For each key, specify a user by entering their user ID, account ID, or some other identifier. For the value, specify the name of the variation that they are to be served. + // +mapType=granular + EntityOverrides map[string]*string `json:"entityOverrides,omitempty" tf:"entity_overrides,omitempty"` + + // One or more blocks that define the evaluation rules for the feature. Detailed below + EvaluationRules []EvaluationRulesObservation `json:"evaluationRules,omitempty" tf:"evaluation_rules,omitempty"` + + // Specify ALL_RULES to activate the traffic allocation specified by any ongoing launches or experiments. Specify DEFAULT_VARIATION to serve the default variation to all users instead. + EvaluationStrategy *string `json:"evaluationStrategy,omitempty" tf:"evaluation_strategy,omitempty"` + + // The feature name and the project name or arn separated by a colon (:). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The date and time that the feature was most recently updated. + LastUpdatedTime *string `json:"lastUpdatedTime,omitempty" tf:"last_updated_time,omitempty"` + + // The name or ARN of the project that is to contain the new feature. + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // The current state of the feature. Valid values are AVAILABLE and UPDATING. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Defines the type of value used to define the different feature variations. Valid Values: STRING, LONG, DOUBLE, BOOLEAN. + ValueType *string `json:"valueType,omitempty" tf:"value_type,omitempty"` + + // One or more blocks that contain the configuration of the feature's different variations. Detailed below + Variations []VariationsObservation `json:"variations,omitempty" tf:"variations,omitempty"` +} + +type FeatureParameters struct { + + // The name of the variation to use as the default variation. The default variation is served to users who are not allocated to any ongoing launches or experiments of this feature. This variation must also be listed in the variations structure. If you omit default_variation, the first variation listed in the variations structure is used as the default variation. + // +kubebuilder:validation:Optional + DefaultVariation *string `json:"defaultVariation,omitempty" tf:"default_variation,omitempty"` + + // Specifies the description of the feature. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specify users that should always be served a specific variation of a feature. Each user is specified by a key-value pair . For each key, specify a user by entering their user ID, account ID, or some other identifier. For the value, specify the name of the variation that they are to be served. + // +kubebuilder:validation:Optional + // +mapType=granular + EntityOverrides map[string]*string `json:"entityOverrides,omitempty" tf:"entity_overrides,omitempty"` + + // Specify ALL_RULES to activate the traffic allocation specified by any ongoing launches or experiments. Specify DEFAULT_VARIATION to serve the default variation to all users instead. + // +kubebuilder:validation:Optional + EvaluationStrategy *string `json:"evaluationStrategy,omitempty" tf:"evaluation_strategy,omitempty"` + + // The name or ARN of the project that is to contain the new feature. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/evidently/v1beta2.Project + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + // +kubebuilder:validation:Optional + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // Reference to a Project in evidently to populate project. + // +kubebuilder:validation:Optional + ProjectRef *v1.Reference `json:"projectRef,omitempty" tf:"-"` + + // Selector for a Project in evidently to populate project. + // +kubebuilder:validation:Optional + ProjectSelector *v1.Selector `json:"projectSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // One or more blocks that contain the configuration of the feature's different variations. Detailed below + // +kubebuilder:validation:Optional + Variations []VariationsParameters `json:"variations,omitempty" tf:"variations,omitempty"` +} + +type ValueInitParameters struct { + + // If this feature uses the Boolean variation type, this field contains the Boolean value of this variation. + BoolValue *string `json:"boolValue,omitempty" tf:"bool_value,omitempty"` + + // If this feature uses the double integer variation type, this field contains the double integer value of this variation. + DoubleValue *string `json:"doubleValue,omitempty" tf:"double_value,omitempty"` + + // If this feature uses the long variation type, this field contains the long value of this variation. Minimum value of -9007199254740991. Maximum value of 9007199254740991. + LongValue *string `json:"longValue,omitempty" tf:"long_value,omitempty"` + + // If this feature uses the string variation type, this field contains the string value of this variation. Minimum length of 0. Maximum length of 512. + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` +} + +type ValueObservation struct { + + // If this feature uses the Boolean variation type, this field contains the Boolean value of this variation. + BoolValue *string `json:"boolValue,omitempty" tf:"bool_value,omitempty"` + + // If this feature uses the double integer variation type, this field contains the double integer value of this variation. + DoubleValue *string `json:"doubleValue,omitempty" tf:"double_value,omitempty"` + + // If this feature uses the long variation type, this field contains the long value of this variation. Minimum value of -9007199254740991. Maximum value of 9007199254740991. + LongValue *string `json:"longValue,omitempty" tf:"long_value,omitempty"` + + // If this feature uses the string variation type, this field contains the string value of this variation. Minimum length of 0. Maximum length of 512. + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` +} + +type ValueParameters struct { + + // If this feature uses the Boolean variation type, this field contains the Boolean value of this variation. + // +kubebuilder:validation:Optional + BoolValue *string `json:"boolValue,omitempty" tf:"bool_value,omitempty"` + + // If this feature uses the double integer variation type, this field contains the double integer value of this variation. + // +kubebuilder:validation:Optional + DoubleValue *string `json:"doubleValue,omitempty" tf:"double_value,omitempty"` + + // If this feature uses the long variation type, this field contains the long value of this variation. Minimum value of -9007199254740991. Maximum value of 9007199254740991. + // +kubebuilder:validation:Optional + LongValue *string `json:"longValue,omitempty" tf:"long_value,omitempty"` + + // If this feature uses the string variation type, this field contains the string value of this variation. Minimum length of 0. Maximum length of 512. + // +kubebuilder:validation:Optional + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` +} + +type VariationsInitParameters struct { + + // The name of the variation. Minimum length of 1. Maximum length of 127. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A block that specifies the value assigned to this variation. Detailed below + Value *ValueInitParameters `json:"value,omitempty" tf:"value,omitempty"` +} + +type VariationsObservation struct { + + // The name of the variation. Minimum length of 1. Maximum length of 127. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A block that specifies the value assigned to this variation. Detailed below + Value *ValueObservation `json:"value,omitempty" tf:"value,omitempty"` +} + +type VariationsParameters struct { + + // The name of the variation. Minimum length of 1. Maximum length of 127. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A block that specifies the value assigned to this variation. Detailed below + // +kubebuilder:validation:Optional + Value *ValueParameters `json:"value" tf:"value,omitempty"` +} + +// FeatureSpec defines the desired state of Feature +type FeatureSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FeatureParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FeatureInitParameters `json:"initProvider,omitempty"` +} + +// FeatureStatus defines the observed state of Feature. +type FeatureStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FeatureObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Feature is the Schema for the Features API. Provides a CloudWatch Evidently Feature resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Feature struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.variations) || (has(self.initProvider) && has(self.initProvider.variations))",message="spec.forProvider.variations is a required parameter" + Spec FeatureSpec `json:"spec"` + Status FeatureStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FeatureList contains a list of Features +type FeatureList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Feature `json:"items"` +} + +// Repository type metadata. +var ( + Feature_Kind = "Feature" + Feature_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Feature_Kind}.String() + Feature_KindAPIVersion = Feature_Kind + "." + CRDGroupVersion.String() + Feature_GroupVersionKind = CRDGroupVersion.WithKind(Feature_Kind) +) + +func init() { + SchemeBuilder.Register(&Feature{}, &FeatureList{}) +} diff --git a/apis/evidently/v1beta2/zz_generated.conversion_hubs.go b/apis/evidently/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..9b9c00f660 --- /dev/null +++ b/apis/evidently/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Feature) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Project) Hub() {} diff --git a/apis/evidently/v1beta2/zz_generated.deepcopy.go b/apis/evidently/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..b680177269 --- /dev/null +++ b/apis/evidently/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1141 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogsInitParameters) DeepCopyInto(out *CloudwatchLogsInitParameters) { + *out = *in + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogsInitParameters. +func (in *CloudwatchLogsInitParameters) DeepCopy() *CloudwatchLogsInitParameters { + if in == nil { + return nil + } + out := new(CloudwatchLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogsObservation) DeepCopyInto(out *CloudwatchLogsObservation) { + *out = *in + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogsObservation. +func (in *CloudwatchLogsObservation) DeepCopy() *CloudwatchLogsObservation { + if in == nil { + return nil + } + out := new(CloudwatchLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogsParameters) DeepCopyInto(out *CloudwatchLogsParameters) { + *out = *in + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogsParameters. +func (in *CloudwatchLogsParameters) DeepCopy() *CloudwatchLogsParameters { + if in == nil { + return nil + } + out := new(CloudwatchLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDeliveryInitParameters) DeepCopyInto(out *DataDeliveryInitParameters) { + *out = *in + if in.CloudwatchLogs != nil { + in, out := &in.CloudwatchLogs, &out.CloudwatchLogs + *out = new(CloudwatchLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3Destination != nil { + in, out := &in.S3Destination, &out.S3Destination + *out = new(S3DestinationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDeliveryInitParameters. +func (in *DataDeliveryInitParameters) DeepCopy() *DataDeliveryInitParameters { + if in == nil { + return nil + } + out := new(DataDeliveryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDeliveryObservation) DeepCopyInto(out *DataDeliveryObservation) { + *out = *in + if in.CloudwatchLogs != nil { + in, out := &in.CloudwatchLogs, &out.CloudwatchLogs + *out = new(CloudwatchLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.S3Destination != nil { + in, out := &in.S3Destination, &out.S3Destination + *out = new(S3DestinationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDeliveryObservation. +func (in *DataDeliveryObservation) DeepCopy() *DataDeliveryObservation { + if in == nil { + return nil + } + out := new(DataDeliveryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDeliveryParameters) DeepCopyInto(out *DataDeliveryParameters) { + *out = *in + if in.CloudwatchLogs != nil { + in, out := &in.CloudwatchLogs, &out.CloudwatchLogs + *out = new(CloudwatchLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.S3Destination != nil { + in, out := &in.S3Destination, &out.S3Destination + *out = new(S3DestinationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDeliveryParameters. +func (in *DataDeliveryParameters) DeepCopy() *DataDeliveryParameters { + if in == nil { + return nil + } + out := new(DataDeliveryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EvaluationRulesInitParameters) DeepCopyInto(out *EvaluationRulesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EvaluationRulesInitParameters. +func (in *EvaluationRulesInitParameters) DeepCopy() *EvaluationRulesInitParameters { + if in == nil { + return nil + } + out := new(EvaluationRulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EvaluationRulesObservation) DeepCopyInto(out *EvaluationRulesObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EvaluationRulesObservation. +func (in *EvaluationRulesObservation) DeepCopy() *EvaluationRulesObservation { + if in == nil { + return nil + } + out := new(EvaluationRulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EvaluationRulesParameters) DeepCopyInto(out *EvaluationRulesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EvaluationRulesParameters. +func (in *EvaluationRulesParameters) DeepCopy() *EvaluationRulesParameters { + if in == nil { + return nil + } + out := new(EvaluationRulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Feature) DeepCopyInto(out *Feature) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Feature. +func (in *Feature) DeepCopy() *Feature { + if in == nil { + return nil + } + out := new(Feature) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Feature) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureInitParameters) DeepCopyInto(out *FeatureInitParameters) { + *out = *in + if in.DefaultVariation != nil { + in, out := &in.DefaultVariation, &out.DefaultVariation + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EntityOverrides != nil { + in, out := &in.EntityOverrides, &out.EntityOverrides + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.EvaluationStrategy != nil { + in, out := &in.EvaluationStrategy, &out.EvaluationStrategy + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Variations != nil { + in, out := &in.Variations, &out.Variations + *out = make([]VariationsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureInitParameters. +func (in *FeatureInitParameters) DeepCopy() *FeatureInitParameters { + if in == nil { + return nil + } + out := new(FeatureInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureList) DeepCopyInto(out *FeatureList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Feature, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureList. +func (in *FeatureList) DeepCopy() *FeatureList { + if in == nil { + return nil + } + out := new(FeatureList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FeatureList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureObservation) DeepCopyInto(out *FeatureObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CreatedTime != nil { + in, out := &in.CreatedTime, &out.CreatedTime + *out = new(string) + **out = **in + } + if in.DefaultVariation != nil { + in, out := &in.DefaultVariation, &out.DefaultVariation + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EntityOverrides != nil { + in, out := &in.EntityOverrides, &out.EntityOverrides + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.EvaluationRules != nil { + in, out := &in.EvaluationRules, &out.EvaluationRules + *out = make([]EvaluationRulesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EvaluationStrategy != nil { + in, out := &in.EvaluationStrategy, &out.EvaluationStrategy + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LastUpdatedTime != nil { + in, out := &in.LastUpdatedTime, &out.LastUpdatedTime + *out = new(string) + **out = **in + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ValueType != nil { + in, out := &in.ValueType, &out.ValueType + *out = new(string) + **out = **in + } + if in.Variations != nil { + in, out := &in.Variations, &out.Variations + *out = make([]VariationsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureObservation. +func (in *FeatureObservation) DeepCopy() *FeatureObservation { + if in == nil { + return nil + } + out := new(FeatureObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureParameters) DeepCopyInto(out *FeatureParameters) { + *out = *in + if in.DefaultVariation != nil { + in, out := &in.DefaultVariation, &out.DefaultVariation + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EntityOverrides != nil { + in, out := &in.EntityOverrides, &out.EntityOverrides + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.EvaluationStrategy != nil { + in, out := &in.EvaluationStrategy, &out.EvaluationStrategy + *out = new(string) + **out = **in + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.ProjectRef != nil { + in, out := &in.ProjectRef, &out.ProjectRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ProjectSelector != nil { + in, out := &in.ProjectSelector, &out.ProjectSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Variations != nil { + in, out := &in.Variations, &out.Variations + *out = make([]VariationsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureParameters. +func (in *FeatureParameters) DeepCopy() *FeatureParameters { + if in == nil { + return nil + } + out := new(FeatureParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureSpec) DeepCopyInto(out *FeatureSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureSpec. +func (in *FeatureSpec) DeepCopy() *FeatureSpec { + if in == nil { + return nil + } + out := new(FeatureSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureStatus) DeepCopyInto(out *FeatureStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureStatus. +func (in *FeatureStatus) DeepCopy() *FeatureStatus { + if in == nil { + return nil + } + out := new(FeatureStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Project) DeepCopyInto(out *Project) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. +func (in *Project) DeepCopy() *Project { + if in == nil { + return nil + } + out := new(Project) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Project) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectInitParameters) DeepCopyInto(out *ProjectInitParameters) { + *out = *in + if in.DataDelivery != nil { + in, out := &in.DataDelivery, &out.DataDelivery + *out = new(DataDeliveryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectInitParameters. +func (in *ProjectInitParameters) DeepCopy() *ProjectInitParameters { + if in == nil { + return nil + } + out := new(ProjectInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectList) DeepCopyInto(out *ProjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList. +func (in *ProjectList) DeepCopy() *ProjectList { + if in == nil { + return nil + } + out := new(ProjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectObservation) DeepCopyInto(out *ProjectObservation) { + *out = *in + if in.ActiveExperimentCount != nil { + in, out := &in.ActiveExperimentCount, &out.ActiveExperimentCount + *out = new(float64) + **out = **in + } + if in.ActiveLaunchCount != nil { + in, out := &in.ActiveLaunchCount, &out.ActiveLaunchCount + *out = new(float64) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CreatedTime != nil { + in, out := &in.CreatedTime, &out.CreatedTime + *out = new(string) + **out = **in + } + if in.DataDelivery != nil { + in, out := &in.DataDelivery, &out.DataDelivery + *out = new(DataDeliveryObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExperimentCount != nil { + in, out := &in.ExperimentCount, &out.ExperimentCount + *out = new(float64) + **out = **in + } + if in.FeatureCount != nil { + in, out := &in.FeatureCount, &out.FeatureCount + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LastUpdatedTime != nil { + in, out := &in.LastUpdatedTime, &out.LastUpdatedTime + *out = new(string) + **out = **in + } + if in.LaunchCount != nil { + in, out := &in.LaunchCount, &out.LaunchCount + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectObservation. +func (in *ProjectObservation) DeepCopy() *ProjectObservation { + if in == nil { + return nil + } + out := new(ProjectObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectParameters) DeepCopyInto(out *ProjectParameters) { + *out = *in + if in.DataDelivery != nil { + in, out := &in.DataDelivery, &out.DataDelivery + *out = new(DataDeliveryParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectParameters. +func (in *ProjectParameters) DeepCopy() *ProjectParameters { + if in == nil { + return nil + } + out := new(ProjectParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec. +func (in *ProjectSpec) DeepCopy() *ProjectSpec { + if in == nil { + return nil + } + out := new(ProjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus. +func (in *ProjectStatus) DeepCopy() *ProjectStatus { + if in == nil { + return nil + } + out := new(ProjectStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DestinationInitParameters) DeepCopyInto(out *S3DestinationInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DestinationInitParameters. +func (in *S3DestinationInitParameters) DeepCopy() *S3DestinationInitParameters { + if in == nil { + return nil + } + out := new(S3DestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DestinationObservation) DeepCopyInto(out *S3DestinationObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DestinationObservation. +func (in *S3DestinationObservation) DeepCopy() *S3DestinationObservation { + if in == nil { + return nil + } + out := new(S3DestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DestinationParameters) DeepCopyInto(out *S3DestinationParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DestinationParameters. +func (in *S3DestinationParameters) DeepCopy() *S3DestinationParameters { + if in == nil { + return nil + } + out := new(S3DestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueInitParameters) DeepCopyInto(out *ValueInitParameters) { + *out = *in + if in.BoolValue != nil { + in, out := &in.BoolValue, &out.BoolValue + *out = new(string) + **out = **in + } + if in.DoubleValue != nil { + in, out := &in.DoubleValue, &out.DoubleValue + *out = new(string) + **out = **in + } + if in.LongValue != nil { + in, out := &in.LongValue, &out.LongValue + *out = new(string) + **out = **in + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueInitParameters. +func (in *ValueInitParameters) DeepCopy() *ValueInitParameters { + if in == nil { + return nil + } + out := new(ValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueObservation) DeepCopyInto(out *ValueObservation) { + *out = *in + if in.BoolValue != nil { + in, out := &in.BoolValue, &out.BoolValue + *out = new(string) + **out = **in + } + if in.DoubleValue != nil { + in, out := &in.DoubleValue, &out.DoubleValue + *out = new(string) + **out = **in + } + if in.LongValue != nil { + in, out := &in.LongValue, &out.LongValue + *out = new(string) + **out = **in + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueObservation. +func (in *ValueObservation) DeepCopy() *ValueObservation { + if in == nil { + return nil + } + out := new(ValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueParameters) DeepCopyInto(out *ValueParameters) { + *out = *in + if in.BoolValue != nil { + in, out := &in.BoolValue, &out.BoolValue + *out = new(string) + **out = **in + } + if in.DoubleValue != nil { + in, out := &in.DoubleValue, &out.DoubleValue + *out = new(string) + **out = **in + } + if in.LongValue != nil { + in, out := &in.LongValue, &out.LongValue + *out = new(string) + **out = **in + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueParameters. +func (in *ValueParameters) DeepCopy() *ValueParameters { + if in == nil { + return nil + } + out := new(ValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VariationsInitParameters) DeepCopyInto(out *VariationsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(ValueInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VariationsInitParameters. +func (in *VariationsInitParameters) DeepCopy() *VariationsInitParameters { + if in == nil { + return nil + } + out := new(VariationsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VariationsObservation) DeepCopyInto(out *VariationsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(ValueObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VariationsObservation. +func (in *VariationsObservation) DeepCopy() *VariationsObservation { + if in == nil { + return nil + } + out := new(VariationsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VariationsParameters) DeepCopyInto(out *VariationsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(ValueParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VariationsParameters. +func (in *VariationsParameters) DeepCopy() *VariationsParameters { + if in == nil { + return nil + } + out := new(VariationsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/evidently/v1beta2/zz_generated.managed.go b/apis/evidently/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..563ed2a99c --- /dev/null +++ b/apis/evidently/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Feature. +func (mg *Feature) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Feature. +func (mg *Feature) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Feature. +func (mg *Feature) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Feature. +func (mg *Feature) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Feature. +func (mg *Feature) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Feature. +func (mg *Feature) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Feature. +func (mg *Feature) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Feature. +func (mg *Feature) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Feature. +func (mg *Feature) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Feature. +func (mg *Feature) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Feature. +func (mg *Feature) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Feature. +func (mg *Feature) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Project. +func (mg *Project) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Project. +func (mg *Project) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Project. +func (mg *Project) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Project. +func (mg *Project) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Project. +func (mg *Project) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Project. +func (mg *Project) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Project. +func (mg *Project) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Project. +func (mg *Project) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Project. +func (mg *Project) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Project. +func (mg *Project) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Project. +func (mg *Project) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Project. +func (mg *Project) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/evidently/v1beta2/zz_generated.managedlist.go b/apis/evidently/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..02c8cf2f4d --- /dev/null +++ b/apis/evidently/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this FeatureList. +func (l *FeatureList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProjectList. +func (l *ProjectList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/evidently/v1beta2/zz_generated.resolvers.go b/apis/evidently/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..ed0b9a70ad --- /dev/null +++ b/apis/evidently/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,49 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Feature. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Feature) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("evidently.aws.upbound.io", "v1beta2", "Project", "ProjectList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Project), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.ForProvider.ProjectRef, + Selector: mg.Spec.ForProvider.ProjectSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Project") + } + mg.Spec.ForProvider.Project = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ProjectRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/evidently/v1beta2/zz_groupversion_info.go b/apis/evidently/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..98805bbfe0 --- /dev/null +++ b/apis/evidently/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=evidently.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "evidently.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/evidently/v1beta2/zz_project_terraformed.go b/apis/evidently/v1beta2/zz_project_terraformed.go new file mode 100755 index 0000000000..2c393c190b --- /dev/null +++ b/apis/evidently/v1beta2/zz_project_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Project +func (mg *Project) GetTerraformResourceType() string { + return "aws_evidently_project" +} + +// GetConnectionDetailsMapping for this Project +func (tr *Project) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Project +func (tr *Project) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Project +func (tr *Project) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Project +func (tr *Project) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Project +func (tr *Project) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Project +func (tr *Project) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Project +func (tr *Project) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Project +func (tr *Project) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Project using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Project) LateInitialize(attrs []byte) (bool, error) { + params := &ProjectParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Project) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/evidently/v1beta2/zz_project_types.go b/apis/evidently/v1beta2/zz_project_types.go new file mode 100755 index 0000000000..a2f7633ee8 --- /dev/null +++ b/apis/evidently/v1beta2/zz_project_types.go @@ -0,0 +1,243 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CloudwatchLogsInitParameters struct { + + // The name of the log group where the project stores evaluation events. + LogGroup *string `json:"logGroup,omitempty" tf:"log_group,omitempty"` +} + +type CloudwatchLogsObservation struct { + + // The name of the log group where the project stores evaluation events. + LogGroup *string `json:"logGroup,omitempty" tf:"log_group,omitempty"` +} + +type CloudwatchLogsParameters struct { + + // The name of the log group where the project stores evaluation events. + // +kubebuilder:validation:Optional + LogGroup *string `json:"logGroup,omitempty" tf:"log_group,omitempty"` +} + +type DataDeliveryInitParameters struct { + + // A block that defines the CloudWatch Log Group that stores the evaluation events. See below. + CloudwatchLogs *CloudwatchLogsInitParameters `json:"cloudwatchLogs,omitempty" tf:"cloudwatch_logs,omitempty"` + + // A block that defines the S3 bucket and prefix that stores the evaluation events. See below. + S3Destination *S3DestinationInitParameters `json:"s3Destination,omitempty" tf:"s3_destination,omitempty"` +} + +type DataDeliveryObservation struct { + + // A block that defines the CloudWatch Log Group that stores the evaluation events. See below. + CloudwatchLogs *CloudwatchLogsObservation `json:"cloudwatchLogs,omitempty" tf:"cloudwatch_logs,omitempty"` + + // A block that defines the S3 bucket and prefix that stores the evaluation events. See below. + S3Destination *S3DestinationObservation `json:"s3Destination,omitempty" tf:"s3_destination,omitempty"` +} + +type DataDeliveryParameters struct { + + // A block that defines the CloudWatch Log Group that stores the evaluation events. See below. + // +kubebuilder:validation:Optional + CloudwatchLogs *CloudwatchLogsParameters `json:"cloudwatchLogs,omitempty" tf:"cloudwatch_logs,omitempty"` + + // A block that defines the S3 bucket and prefix that stores the evaluation events. See below. + // +kubebuilder:validation:Optional + S3Destination *S3DestinationParameters `json:"s3Destination,omitempty" tf:"s3_destination,omitempty"` +} + +type ProjectInitParameters struct { + + // A block that contains information about where Evidently is to store evaluation events for longer term storage, if you choose to do so. If you choose not to store these events, Evidently deletes them after using them to produce metrics and other experiment results that you can view. See below. + DataDelivery *DataDeliveryInitParameters `json:"dataDelivery,omitempty" tf:"data_delivery,omitempty"` + + // Specifies the description of the project. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A name for the project. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ProjectObservation struct { + + // The number of ongoing experiments currently in the project. + ActiveExperimentCount *float64 `json:"activeExperimentCount,omitempty" tf:"active_experiment_count,omitempty"` + + // The number of ongoing launches currently in the project. + ActiveLaunchCount *float64 `json:"activeLaunchCount,omitempty" tf:"active_launch_count,omitempty"` + + // The ARN of the project. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The date and time that the project is created. + CreatedTime *string `json:"createdTime,omitempty" tf:"created_time,omitempty"` + + // A block that contains information about where Evidently is to store evaluation events for longer term storage, if you choose to do so. If you choose not to store these events, Evidently deletes them after using them to produce metrics and other experiment results that you can view. See below. + DataDelivery *DataDeliveryObservation `json:"dataDelivery,omitempty" tf:"data_delivery,omitempty"` + + // Specifies the description of the project. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The number of experiments currently in the project. This includes all experiments that have been created and not deleted, whether they are ongoing or not. + ExperimentCount *float64 `json:"experimentCount,omitempty" tf:"experiment_count,omitempty"` + + // The number of features currently in the project. + FeatureCount *float64 `json:"featureCount,omitempty" tf:"feature_count,omitempty"` + + // The ID has the same value as the arn of the project. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The date and time that the project was most recently updated. + LastUpdatedTime *string `json:"lastUpdatedTime,omitempty" tf:"last_updated_time,omitempty"` + + // The number of launches currently in the project. This includes all launches that have been created and not deleted, whether they are ongoing or not. + LaunchCount *float64 `json:"launchCount,omitempty" tf:"launch_count,omitempty"` + + // A name for the project. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The current state of the project. Valid values are AVAILABLE and UPDATING. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type ProjectParameters struct { + + // A block that contains information about where Evidently is to store evaluation events for longer term storage, if you choose to do so. If you choose not to store these events, Evidently deletes them after using them to produce metrics and other experiment results that you can view. See below. + // +kubebuilder:validation:Optional + DataDelivery *DataDeliveryParameters `json:"dataDelivery,omitempty" tf:"data_delivery,omitempty"` + + // Specifies the description of the project. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A name for the project. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type S3DestinationInitParameters struct { + + // The name of the bucket in which Evidently stores evaluation events. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The bucket prefix in which Evidently stores evaluation events. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type S3DestinationObservation struct { + + // The name of the bucket in which Evidently stores evaluation events. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The bucket prefix in which Evidently stores evaluation events. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type S3DestinationParameters struct { + + // The name of the bucket in which Evidently stores evaluation events. + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The bucket prefix in which Evidently stores evaluation events. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +// ProjectSpec defines the desired state of Project +type ProjectSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProjectParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProjectInitParameters `json:"initProvider,omitempty"` +} + +// ProjectStatus defines the observed state of Project. +type ProjectStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProjectObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Project is the Schema for the Projects API. Provides a CloudWatch Evidently Project resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Project struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ProjectSpec `json:"spec"` + Status ProjectStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProjectList contains a list of Projects +type ProjectList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Project `json:"items"` +} + +// Repository type metadata. +var ( + Project_Kind = "Project" + Project_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Project_Kind}.String() + Project_KindAPIVersion = Project_Kind + "." + CRDGroupVersion.String() + Project_GroupVersionKind = CRDGroupVersion.WithKind(Project_Kind) +) + +func init() { + SchemeBuilder.Register(&Project{}, &ProjectList{}) +} diff --git a/apis/firehose/v1beta1/zz_generated.conversion_spokes.go b/apis/firehose/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..e08d823a38 --- /dev/null +++ b/apis/firehose/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this DeliveryStream to the hub type. +func (tr *DeliveryStream) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DeliveryStream type. +func (tr *DeliveryStream) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/firehose/v1beta2/zz_deliverystream_terraformed.go b/apis/firehose/v1beta2/zz_deliverystream_terraformed.go new file mode 100755 index 0000000000..850acf931a --- /dev/null +++ b/apis/firehose/v1beta2/zz_deliverystream_terraformed.go @@ -0,0 +1,131 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DeliveryStream +func (mg *DeliveryStream) GetTerraformResourceType() string { + return "aws_kinesis_firehose_delivery_stream" +} + +// GetConnectionDetailsMapping for this DeliveryStream +func (tr *DeliveryStream) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"http_endpoint_configuration[*].access_key": "httpEndpointConfiguration[*].accessKeySecretRef", "redshift_configuration[*].password": "redshiftConfiguration[*].passwordSecretRef", "snowflake_configuration[*].key_passphrase": "snowflakeConfiguration[*].keyPassphraseSecretRef", "snowflake_configuration[*].private_key": "snowflakeConfiguration[*].privateKeySecretRef", "splunk_configuration[*].hec_token": "splunkConfiguration[*].hecTokenSecretRef"} +} + +// GetObservation of this DeliveryStream +func (tr *DeliveryStream) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DeliveryStream +func (tr *DeliveryStream) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DeliveryStream +func (tr *DeliveryStream) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DeliveryStream +func (tr *DeliveryStream) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DeliveryStream +func (tr *DeliveryStream) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DeliveryStream +func (tr *DeliveryStream) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DeliveryStream +func (tr *DeliveryStream) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DeliveryStream using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DeliveryStream) LateInitialize(attrs []byte) (bool, error) { + params := &DeliveryStreamParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("ServerSideEncryption")) + opts = append(opts, resource.WithNameFilter("VersionID")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DeliveryStream) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/firehose/v1beta2/zz_deliverystream_types.go b/apis/firehose/v1beta2/zz_deliverystream_types.go new file mode 100755 index 0000000000..0ea4a7efd3 --- /dev/null +++ b/apis/firehose/v1beta2/zz_deliverystream_types.go @@ -0,0 +1,5196 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuthenticationConfigurationInitParameters struct { + + // The type of connectivity used to access the Amazon MSK cluster. Valid values: PUBLIC, PRIVATE. + Connectivity *string `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + + // The ARN of the role used to access the Amazon MSK cluster. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type AuthenticationConfigurationObservation struct { + + // The type of connectivity used to access the Amazon MSK cluster. Valid values: PUBLIC, PRIVATE. + Connectivity *string `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + + // The ARN of the role used to access the Amazon MSK cluster. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type AuthenticationConfigurationParameters struct { + + // The type of connectivity used to access the Amazon MSK cluster. Valid values: PUBLIC, PRIVATE. + // +kubebuilder:validation:Optional + Connectivity *string `json:"connectivity" tf:"connectivity,omitempty"` + + // The ARN of the role used to access the Amazon MSK cluster. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type CloudwatchLoggingOptionsInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type CloudwatchLoggingOptionsObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type CloudwatchLoggingOptionsParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type CommonAttributesInitParameters struct { + + // The name of the HTTP endpoint common attribute. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the HTTP endpoint common attribute. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type CommonAttributesObservation struct { + + // The name of the HTTP endpoint common attribute. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the HTTP endpoint common attribute. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type CommonAttributesParameters struct { + + // The name of the HTTP endpoint common attribute. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value of the HTTP endpoint common attribute. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type DataFormatConversionConfigurationInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. See input_format_configuration block below for details. + InputFormatConfiguration *InputFormatConfigurationInitParameters `json:"inputFormatConfiguration,omitempty" tf:"input_format_configuration,omitempty"` + + // Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. See output_format_configuration block below for details. + OutputFormatConfiguration *OutputFormatConfigurationInitParameters `json:"outputFormatConfiguration,omitempty" tf:"output_format_configuration,omitempty"` + + // Specifies the AWS Glue Data Catalog table that contains the column information. See schema_configuration block below for details. + SchemaConfiguration *SchemaConfigurationInitParameters `json:"schemaConfiguration,omitempty" tf:"schema_configuration,omitempty"` +} + +type DataFormatConversionConfigurationObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. See input_format_configuration block below for details. + InputFormatConfiguration *InputFormatConfigurationObservation `json:"inputFormatConfiguration,omitempty" tf:"input_format_configuration,omitempty"` + + // Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. See output_format_configuration block below for details. + OutputFormatConfiguration *OutputFormatConfigurationObservation `json:"outputFormatConfiguration,omitempty" tf:"output_format_configuration,omitempty"` + + // Specifies the AWS Glue Data Catalog table that contains the column information. See schema_configuration block below for details. + SchemaConfiguration *SchemaConfigurationObservation `json:"schemaConfiguration,omitempty" tf:"schema_configuration,omitempty"` +} + +type DataFormatConversionConfigurationParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. See input_format_configuration block below for details. + // +kubebuilder:validation:Optional + InputFormatConfiguration *InputFormatConfigurationParameters `json:"inputFormatConfiguration" tf:"input_format_configuration,omitempty"` + + // Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. See output_format_configuration block below for details. + // +kubebuilder:validation:Optional + OutputFormatConfiguration *OutputFormatConfigurationParameters `json:"outputFormatConfiguration" tf:"output_format_configuration,omitempty"` + + // Specifies the AWS Glue Data Catalog table that contains the column information. See schema_configuration block below for details. + // +kubebuilder:validation:Optional + SchemaConfiguration *SchemaConfigurationParameters `json:"schemaConfiguration" tf:"schema_configuration,omitempty"` +} + +type DeliveryStreamInitParameters struct { + + // – This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint, opensearch, opensearchserverless and snowflake. + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + DestinationID *string `json:"destinationId,omitempty" tf:"destination_id,omitempty"` + + // Configuration options when destination is elasticsearch. See elasticsearch_configuration block below for details. + ElasticsearchConfiguration *ElasticsearchConfigurationInitParameters `json:"elasticsearchConfiguration,omitempty" tf:"elasticsearch_configuration,omitempty"` + + // Enhanced configuration options for the s3 destination. See extended_s3_configuration block below for details. + ExtendedS3Configuration *ExtendedS3ConfigurationInitParameters `json:"extendedS3Configuration,omitempty" tf:"extended_s3_configuration,omitempty"` + + // Configuration options when destination is http_endpoint. Requires the user to also specify an s3_configuration block. See http_endpoint_configuration block below for details. + HTTPEndpointConfiguration *HTTPEndpointConfigurationInitParameters `json:"httpEndpointConfiguration,omitempty" tf:"http_endpoint_configuration,omitempty"` + + // The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream. See kinesis_source_configuration block below for details. + KinesisSourceConfiguration *KinesisSourceConfigurationInitParameters `json:"kinesisSourceConfiguration,omitempty" tf:"kinesis_source_configuration,omitempty"` + + // The configuration for the Amazon MSK cluster to be used as the source for a delivery stream. See msk_source_configuration block below for details. + MskSourceConfiguration *MskSourceConfigurationInitParameters `json:"mskSourceConfiguration,omitempty" tf:"msk_source_configuration,omitempty"` + + // A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration options when destination is opensearch. See opensearch_configuration block below for details. + OpensearchConfiguration *OpensearchConfigurationInitParameters `json:"opensearchConfiguration,omitempty" tf:"opensearch_configuration,omitempty"` + + // Configuration options when destination is opensearchserverless. See opensearchserverless_configuration block below for details. + OpensearchserverlessConfiguration *OpensearchserverlessConfigurationInitParameters `json:"opensearchserverlessConfiguration,omitempty" tf:"opensearchserverless_configuration,omitempty"` + + // Configuration options when destination is redshift. Requires the user to also specify an s3_configuration block. See redshift_configuration block below for details. + RedshiftConfiguration *RedshiftConfigurationInitParameters `json:"redshiftConfiguration,omitempty" tf:"redshift_configuration,omitempty"` + + // Encrypt at rest options. See server_side_encryption block below for details. + ServerSideEncryption *ServerSideEncryptionInitParameters `json:"serverSideEncryption,omitempty" tf:"server_side_encryption,omitempty"` + + // Configuration options when destination is snowflake. See snowflake_configuration block below for details. + SnowflakeConfiguration *SnowflakeConfigurationInitParameters `json:"snowflakeConfiguration,omitempty" tf:"snowflake_configuration,omitempty"` + + // Configuration options when destination is splunk. See splunk_configuration block below for details. + SplunkConfiguration *SplunkConfigurationInitParameters `json:"splunkConfiguration,omitempty" tf:"splunk_configuration,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the table version for the output data schema. Defaults to LATEST. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type DeliveryStreamObservation struct { + + // The Amazon Resource Name (ARN) specifying the Stream + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // – This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint, opensearch, opensearchserverless and snowflake. + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + DestinationID *string `json:"destinationId,omitempty" tf:"destination_id,omitempty"` + + // Configuration options when destination is elasticsearch. See elasticsearch_configuration block below for details. + ElasticsearchConfiguration *ElasticsearchConfigurationObservation `json:"elasticsearchConfiguration,omitempty" tf:"elasticsearch_configuration,omitempty"` + + // Enhanced configuration options for the s3 destination. See extended_s3_configuration block below for details. + ExtendedS3Configuration *ExtendedS3ConfigurationObservation `json:"extendedS3Configuration,omitempty" tf:"extended_s3_configuration,omitempty"` + + // Configuration options when destination is http_endpoint. Requires the user to also specify an s3_configuration block. See http_endpoint_configuration block below for details. + HTTPEndpointConfiguration *HTTPEndpointConfigurationObservation `json:"httpEndpointConfiguration,omitempty" tf:"http_endpoint_configuration,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream. See kinesis_source_configuration block below for details. + KinesisSourceConfiguration *KinesisSourceConfigurationObservation `json:"kinesisSourceConfiguration,omitempty" tf:"kinesis_source_configuration,omitempty"` + + // The configuration for the Amazon MSK cluster to be used as the source for a delivery stream. See msk_source_configuration block below for details. + MskSourceConfiguration *MskSourceConfigurationObservation `json:"mskSourceConfiguration,omitempty" tf:"msk_source_configuration,omitempty"` + + // A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration options when destination is opensearch. See opensearch_configuration block below for details. + OpensearchConfiguration *OpensearchConfigurationObservation `json:"opensearchConfiguration,omitempty" tf:"opensearch_configuration,omitempty"` + + // Configuration options when destination is opensearchserverless. See opensearchserverless_configuration block below for details. + OpensearchserverlessConfiguration *OpensearchserverlessConfigurationObservation `json:"opensearchserverlessConfiguration,omitempty" tf:"opensearchserverless_configuration,omitempty"` + + // Configuration options when destination is redshift. Requires the user to also specify an s3_configuration block. See redshift_configuration block below for details. + RedshiftConfiguration *RedshiftConfigurationObservation `json:"redshiftConfiguration,omitempty" tf:"redshift_configuration,omitempty"` + + // Encrypt at rest options. See server_side_encryption block below for details. + ServerSideEncryption *ServerSideEncryptionObservation `json:"serverSideEncryption,omitempty" tf:"server_side_encryption,omitempty"` + + // Configuration options when destination is snowflake. See snowflake_configuration block below for details. + SnowflakeConfiguration *SnowflakeConfigurationObservation `json:"snowflakeConfiguration,omitempty" tf:"snowflake_configuration,omitempty"` + + // Configuration options when destination is splunk. See splunk_configuration block below for details. + SplunkConfiguration *SplunkConfigurationObservation `json:"splunkConfiguration,omitempty" tf:"splunk_configuration,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Specifies the table version for the output data schema. Defaults to LATEST. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type DeliveryStreamParameters struct { + + // – This is the destination to where the data is delivered. The only options are s3 (Deprecated, use extended_s3 instead), extended_s3, redshift, elasticsearch, splunk, http_endpoint, opensearch, opensearchserverless and snowflake. + // +kubebuilder:validation:Optional + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // +kubebuilder:validation:Optional + DestinationID *string `json:"destinationId,omitempty" tf:"destination_id,omitempty"` + + // Configuration options when destination is elasticsearch. See elasticsearch_configuration block below for details. + // +kubebuilder:validation:Optional + ElasticsearchConfiguration *ElasticsearchConfigurationParameters `json:"elasticsearchConfiguration,omitempty" tf:"elasticsearch_configuration,omitempty"` + + // Enhanced configuration options for the s3 destination. See extended_s3_configuration block below for details. + // +kubebuilder:validation:Optional + ExtendedS3Configuration *ExtendedS3ConfigurationParameters `json:"extendedS3Configuration,omitempty" tf:"extended_s3_configuration,omitempty"` + + // Configuration options when destination is http_endpoint. Requires the user to also specify an s3_configuration block. See http_endpoint_configuration block below for details. + // +kubebuilder:validation:Optional + HTTPEndpointConfiguration *HTTPEndpointConfigurationParameters `json:"httpEndpointConfiguration,omitempty" tf:"http_endpoint_configuration,omitempty"` + + // The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream. See kinesis_source_configuration block below for details. + // +kubebuilder:validation:Optional + KinesisSourceConfiguration *KinesisSourceConfigurationParameters `json:"kinesisSourceConfiguration,omitempty" tf:"kinesis_source_configuration,omitempty"` + + // The configuration for the Amazon MSK cluster to be used as the source for a delivery stream. See msk_source_configuration block below for details. + // +kubebuilder:validation:Optional + MskSourceConfiguration *MskSourceConfigurationParameters `json:"mskSourceConfiguration,omitempty" tf:"msk_source_configuration,omitempty"` + + // A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with aws-waf-logs-. See AWS Documentation for more details. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration options when destination is opensearch. See opensearch_configuration block below for details. + // +kubebuilder:validation:Optional + OpensearchConfiguration *OpensearchConfigurationParameters `json:"opensearchConfiguration,omitempty" tf:"opensearch_configuration,omitempty"` + + // Configuration options when destination is opensearchserverless. See opensearchserverless_configuration block below for details. + // +kubebuilder:validation:Optional + OpensearchserverlessConfiguration *OpensearchserverlessConfigurationParameters `json:"opensearchserverlessConfiguration,omitempty" tf:"opensearchserverless_configuration,omitempty"` + + // Configuration options when destination is redshift. Requires the user to also specify an s3_configuration block. See redshift_configuration block below for details. + // +kubebuilder:validation:Optional + RedshiftConfiguration *RedshiftConfigurationParameters `json:"redshiftConfiguration,omitempty" tf:"redshift_configuration,omitempty"` + + // If you don't specify an AWS Region, the default is the current region. + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Encrypt at rest options. See server_side_encryption block below for details. + // +kubebuilder:validation:Optional + ServerSideEncryption *ServerSideEncryptionParameters `json:"serverSideEncryption,omitempty" tf:"server_side_encryption,omitempty"` + + // Configuration options when destination is snowflake. See snowflake_configuration block below for details. + // +kubebuilder:validation:Optional + SnowflakeConfiguration *SnowflakeConfigurationParameters `json:"snowflakeConfiguration,omitempty" tf:"snowflake_configuration,omitempty"` + + // Configuration options when destination is splunk. See splunk_configuration block below for details. + // +kubebuilder:validation:Optional + SplunkConfiguration *SplunkConfigurationParameters `json:"splunkConfiguration,omitempty" tf:"splunk_configuration,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the table version for the output data schema. Defaults to LATEST. + // +kubebuilder:validation:Optional + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type DeserializerInitParameters struct { + + // Specifies the native Hive / HCatalog JsonSerDe. More details below. See hive_json_ser_de block below for details. + HiveJSONSerDe *HiveJSONSerDeInitParameters `json:"hiveJsonSerDe,omitempty" tf:"hive_json_ser_de,omitempty"` + + // Specifies the OpenX SerDe. See open_x_json_ser_de block below for details. + OpenXJSONSerDe *OpenXJSONSerDeInitParameters `json:"openXJsonSerDe,omitempty" tf:"open_x_json_ser_de,omitempty"` +} + +type DeserializerObservation struct { + + // Specifies the native Hive / HCatalog JsonSerDe. More details below. See hive_json_ser_de block below for details. + HiveJSONSerDe *HiveJSONSerDeObservation `json:"hiveJsonSerDe,omitempty" tf:"hive_json_ser_de,omitempty"` + + // Specifies the OpenX SerDe. See open_x_json_ser_de block below for details. + OpenXJSONSerDe *OpenXJSONSerDeObservation `json:"openXJsonSerDe,omitempty" tf:"open_x_json_ser_de,omitempty"` +} + +type DeserializerParameters struct { + + // Specifies the native Hive / HCatalog JsonSerDe. More details below. See hive_json_ser_de block below for details. + // +kubebuilder:validation:Optional + HiveJSONSerDe *HiveJSONSerDeParameters `json:"hiveJsonSerDe,omitempty" tf:"hive_json_ser_de,omitempty"` + + // Specifies the OpenX SerDe. See open_x_json_ser_de block below for details. + // +kubebuilder:validation:Optional + OpenXJSONSerDe *OpenXJSONSerDeParameters `json:"openXJsonSerDe,omitempty" tf:"open_x_json_ser_de,omitempty"` +} + +type DocumentIDOptionsInitParameters struct { + + // The method for setting up document ID. Valid values: FIREHOSE_DEFAULT, NO_DOCUMENT_ID. + DefaultDocumentIDFormat *string `json:"defaultDocumentIdFormat,omitempty" tf:"default_document_id_format,omitempty"` +} + +type DocumentIDOptionsObservation struct { + + // The method for setting up document ID. Valid values: FIREHOSE_DEFAULT, NO_DOCUMENT_ID. + DefaultDocumentIDFormat *string `json:"defaultDocumentIdFormat,omitempty" tf:"default_document_id_format,omitempty"` +} + +type DocumentIDOptionsParameters struct { + + // The method for setting up document ID. Valid values: FIREHOSE_DEFAULT, NO_DOCUMENT_ID. + // +kubebuilder:validation:Optional + DefaultDocumentIDFormat *string `json:"defaultDocumentIdFormat" tf:"default_document_id_format,omitempty"` +} + +type DynamicPartitioningConfigurationInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0. + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` +} + +type DynamicPartitioningConfigurationObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0. + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` +} + +type DynamicPartitioningConfigurationParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0. + // +kubebuilder:validation:Optional + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` +} + +type ElasticsearchConfigurationInitParameters struct { + + // Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *CloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The endpoint to use when communicating with the cluster. Conflicts with domain_arn. + ClusterEndpoint *string `json:"clusterEndpoint,omitempty" tf:"cluster_endpoint,omitempty"` + + // The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elasticsearch/v1beta2.Domain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + DomainArn *string `json:"domainArn,omitempty" tf:"domain_arn,omitempty"` + + // Reference to a Domain in elasticsearch to populate domainArn. + // +kubebuilder:validation:Optional + DomainArnRef *v1.Reference `json:"domainArnRef,omitempty" tf:"-"` + + // Selector for a Domain in elasticsearch to populate domainArn. + // +kubebuilder:validation:Optional + DomainArnSelector *v1.Selector `json:"domainArnSelector,omitempty" tf:"-"` + + // The Elasticsearch index name. + IndexName *string `json:"indexName,omitempty" tf:"index_name,omitempty"` + + // The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay. + IndexRotationPeriod *string `json:"indexRotationPeriod,omitempty" tf:"index_rotation_period,omitempty"` + + // The data processing configuration. See processing_configuration block below for details. + ProcessingConfiguration *ProcessingConfigurationInitParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0. + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The pattern needs to be arn:.*. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly. + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration block below for details. + S3Configuration *S3ConfigurationInitParameters `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` + + // The Elasticsearch type name with maximum length of 100 characters. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. See vpc_config block below for details. + VPCConfig *VPCConfigInitParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type ElasticsearchConfigurationObservation struct { + + // Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *CloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The endpoint to use when communicating with the cluster. Conflicts with domain_arn. + ClusterEndpoint *string `json:"clusterEndpoint,omitempty" tf:"cluster_endpoint,omitempty"` + + // The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint. + DomainArn *string `json:"domainArn,omitempty" tf:"domain_arn,omitempty"` + + // The Elasticsearch index name. + IndexName *string `json:"indexName,omitempty" tf:"index_name,omitempty"` + + // The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay. + IndexRotationPeriod *string `json:"indexRotationPeriod,omitempty" tf:"index_rotation_period,omitempty"` + + // The data processing configuration. See processing_configuration block below for details. + ProcessingConfiguration *ProcessingConfigurationObservation `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0. + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The pattern needs to be arn:.*. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly. + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration block below for details. + S3Configuration *S3ConfigurationObservation `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` + + // The Elasticsearch type name with maximum length of 100 characters. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. See vpc_config block below for details. + VPCConfig *VPCConfigObservation `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type ElasticsearchConfigurationParameters struct { + + // Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s. + // +kubebuilder:validation:Optional + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. + // +kubebuilder:validation:Optional + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *CloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The endpoint to use when communicating with the cluster. Conflicts with domain_arn. + // +kubebuilder:validation:Optional + ClusterEndpoint *string `json:"clusterEndpoint,omitempty" tf:"cluster_endpoint,omitempty"` + + // The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/elasticsearch/v1beta2.Domain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + DomainArn *string `json:"domainArn,omitempty" tf:"domain_arn,omitempty"` + + // Reference to a Domain in elasticsearch to populate domainArn. + // +kubebuilder:validation:Optional + DomainArnRef *v1.Reference `json:"domainArnRef,omitempty" tf:"-"` + + // Selector for a Domain in elasticsearch to populate domainArn. + // +kubebuilder:validation:Optional + DomainArnSelector *v1.Selector `json:"domainArnSelector,omitempty" tf:"-"` + + // The Elasticsearch index name. + // +kubebuilder:validation:Optional + IndexName *string `json:"indexName" tf:"index_name,omitempty"` + + // The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay. + // +kubebuilder:validation:Optional + IndexRotationPeriod *string `json:"indexRotationPeriod,omitempty" tf:"index_rotation_period,omitempty"` + + // The data processing configuration. See processing_configuration block below for details. + // +kubebuilder:validation:Optional + ProcessingConfiguration *ProcessingConfigurationParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0. + // +kubebuilder:validation:Optional + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The pattern needs to be arn:.*. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly. + // +kubebuilder:validation:Optional + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration block below for details. + // +kubebuilder:validation:Optional + S3Configuration *S3ConfigurationParameters `json:"s3Configuration" tf:"s3_configuration,omitempty"` + + // The Elasticsearch type name with maximum length of 100 characters. + // +kubebuilder:validation:Optional + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. See vpc_config block below for details. + // +kubebuilder:validation:Optional + VPCConfig *VPCConfigParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type ExtendedS3ConfigurationCloudwatchLoggingOptionsInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type ExtendedS3ConfigurationCloudwatchLoggingOptionsObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type ExtendedS3ConfigurationCloudwatchLoggingOptionsParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type ExtendedS3ConfigurationInitParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *ExtendedS3ConfigurationCloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // The time zone you prefer. Valid values are UTC or a non-3-letter IANA time zones (for example, America/Los_Angeles). Default value is UTC. + CustomTimeZone *string `json:"customTimeZone,omitempty" tf:"custom_time_zone,omitempty"` + + // Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. See data_format_conversion_configuration block below for details. + DataFormatConversionConfiguration *DataFormatConversionConfigurationInitParameters `json:"dataFormatConversionConfiguration,omitempty" tf:"data_format_conversion_configuration,omitempty"` + + // The configuration for dynamic partitioning. Required when using dynamic partitioning. See dynamic_partitioning_configuration block below for details. + DynamicPartitioningConfiguration *DynamicPartitioningConfigurationInitParameters `json:"dynamicPartitioningConfiguration,omitempty" tf:"dynamic_partitioning_configuration,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // The file extension to override the default file extension (for example, .json). + FileExtension *string `json:"fileExtension,omitempty" tf:"file_extension,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The data processing configuration. See processing_configuration block below for details. + ProcessingConfiguration *ExtendedS3ConfigurationProcessingConfigurationInitParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object. + S3BackupConfiguration *S3BackupConfigurationInitParameters `json:"s3BackupConfiguration,omitempty" tf:"s3_backup_configuration,omitempty"` + + // The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled. + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` +} + +type ExtendedS3ConfigurationObservation struct { + + // The ARN of the S3 bucket + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *ExtendedS3ConfigurationCloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // The time zone you prefer. Valid values are UTC or a non-3-letter IANA time zones (for example, America/Los_Angeles). Default value is UTC. + CustomTimeZone *string `json:"customTimeZone,omitempty" tf:"custom_time_zone,omitempty"` + + // Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. See data_format_conversion_configuration block below for details. + DataFormatConversionConfiguration *DataFormatConversionConfigurationObservation `json:"dataFormatConversionConfiguration,omitempty" tf:"data_format_conversion_configuration,omitempty"` + + // The configuration for dynamic partitioning. Required when using dynamic partitioning. See dynamic_partitioning_configuration block below for details. + DynamicPartitioningConfiguration *DynamicPartitioningConfigurationObservation `json:"dynamicPartitioningConfiguration,omitempty" tf:"dynamic_partitioning_configuration,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // The file extension to override the default file extension (for example, .json). + FileExtension *string `json:"fileExtension,omitempty" tf:"file_extension,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The data processing configuration. See processing_configuration block below for details. + ProcessingConfiguration *ExtendedS3ConfigurationProcessingConfigurationObservation `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object. + S3BackupConfiguration *S3BackupConfigurationObservation `json:"s3BackupConfiguration,omitempty" tf:"s3_backup_configuration,omitempty"` + + // The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled. + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` +} + +type ExtendedS3ConfigurationParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + // +kubebuilder:validation:Optional + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + // +kubebuilder:validation:Optional + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *ExtendedS3ConfigurationCloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + // +kubebuilder:validation:Optional + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // The time zone you prefer. Valid values are UTC or a non-3-letter IANA time zones (for example, America/Los_Angeles). Default value is UTC. + // +kubebuilder:validation:Optional + CustomTimeZone *string `json:"customTimeZone,omitempty" tf:"custom_time_zone,omitempty"` + + // Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. See data_format_conversion_configuration block below for details. + // +kubebuilder:validation:Optional + DataFormatConversionConfiguration *DataFormatConversionConfigurationParameters `json:"dataFormatConversionConfiguration,omitempty" tf:"data_format_conversion_configuration,omitempty"` + + // The configuration for dynamic partitioning. Required when using dynamic partitioning. See dynamic_partitioning_configuration block below for details. + // +kubebuilder:validation:Optional + DynamicPartitioningConfiguration *DynamicPartitioningConfigurationParameters `json:"dynamicPartitioningConfiguration,omitempty" tf:"dynamic_partitioning_configuration,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + // +kubebuilder:validation:Optional + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // The file extension to override the default file extension (for example, .json). + // +kubebuilder:validation:Optional + FileExtension *string `json:"fileExtension,omitempty" tf:"file_extension,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The data processing configuration. See processing_configuration block below for details. + // +kubebuilder:validation:Optional + ProcessingConfiguration *ExtendedS3ConfigurationProcessingConfigurationParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object. + // +kubebuilder:validation:Optional + S3BackupConfiguration *S3BackupConfigurationParameters `json:"s3BackupConfiguration,omitempty" tf:"s3_backup_configuration,omitempty"` + + // The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled. + // +kubebuilder:validation:Optional + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` +} + +type ExtendedS3ConfigurationProcessingConfigurationInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + Processors []ProcessingConfigurationProcessorsInitParameters `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type ExtendedS3ConfigurationProcessingConfigurationObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + Processors []ProcessingConfigurationProcessorsObservation `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type ExtendedS3ConfigurationProcessingConfigurationParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + // +kubebuilder:validation:Optional + Processors []ProcessingConfigurationProcessorsParameters `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type HTTPEndpointConfigurationCloudwatchLoggingOptionsInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type HTTPEndpointConfigurationCloudwatchLoggingOptionsObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type HTTPEndpointConfigurationCloudwatchLoggingOptionsParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type HTTPEndpointConfigurationInitParameters struct { + + // The access key required for Kinesis Firehose to authenticate with the HTTP endpoint selected as the destination. + AccessKeySecretRef *v1.SecretKeySelector `json:"accessKeySecretRef,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes). + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *HTTPEndpointConfigurationCloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The HTTP endpoint name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The data processing configuration. See processing_configuration block below for details. + ProcessingConfiguration *HTTPEndpointConfigurationProcessingConfigurationInitParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // The request configuration. See request_configuration block below for details. + RequestConfiguration *RequestConfigurationInitParameters `json:"requestConfiguration,omitempty" tf:"request_configuration,omitempty"` + + // Total amount of seconds Firehose spends on retries. This duration starts after the initial attempt fails, It does not include the time periods during which Firehose waits for acknowledgment from the specified destination after each attempt. Valid values between 0 and 7200. Default is 300. + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs. The pattern needs to be arn:.*. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Defines how documents should be delivered to Amazon S3. Valid values are FailedDataOnly and AllData. Default value is FailedDataOnly. + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration block below for details. + S3Configuration *HTTPEndpointConfigurationS3ConfigurationInitParameters `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` + + // The HTTP endpoint URL to which Kinesis Firehose sends your data. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type HTTPEndpointConfigurationObservation struct { + + // Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes). + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *HTTPEndpointConfigurationCloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The HTTP endpoint name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The data processing configuration. See processing_configuration block below for details. + ProcessingConfiguration *HTTPEndpointConfigurationProcessingConfigurationObservation `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // The request configuration. See request_configuration block below for details. + RequestConfiguration *RequestConfigurationObservation `json:"requestConfiguration,omitempty" tf:"request_configuration,omitempty"` + + // Total amount of seconds Firehose spends on retries. This duration starts after the initial attempt fails, It does not include the time periods during which Firehose waits for acknowledgment from the specified destination after each attempt. Valid values between 0 and 7200. Default is 300. + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs. The pattern needs to be arn:.*. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Defines how documents should be delivered to Amazon S3. Valid values are FailedDataOnly and AllData. Default value is FailedDataOnly. + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration block below for details. + S3Configuration *HTTPEndpointConfigurationS3ConfigurationObservation `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` + + // The HTTP endpoint URL to which Kinesis Firehose sends your data. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type HTTPEndpointConfigurationParameters struct { + + // The access key required for Kinesis Firehose to authenticate with the HTTP endpoint selected as the destination. + // +kubebuilder:validation:Optional + AccessKeySecretRef *v1.SecretKeySelector `json:"accessKeySecretRef,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes). + // +kubebuilder:validation:Optional + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. + // +kubebuilder:validation:Optional + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *HTTPEndpointConfigurationCloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The HTTP endpoint name. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The data processing configuration. See processing_configuration block below for details. + // +kubebuilder:validation:Optional + ProcessingConfiguration *HTTPEndpointConfigurationProcessingConfigurationParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // The request configuration. See request_configuration block below for details. + // +kubebuilder:validation:Optional + RequestConfiguration *RequestConfigurationParameters `json:"requestConfiguration,omitempty" tf:"request_configuration,omitempty"` + + // Total amount of seconds Firehose spends on retries. This duration starts after the initial attempt fails, It does not include the time periods during which Firehose waits for acknowledgment from the specified destination after each attempt. Valid values between 0 and 7200. Default is 300. + // +kubebuilder:validation:Optional + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs. The pattern needs to be arn:.*. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Defines how documents should be delivered to Amazon S3. Valid values are FailedDataOnly and AllData. Default value is FailedDataOnly. + // +kubebuilder:validation:Optional + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration block below for details. + // +kubebuilder:validation:Optional + S3Configuration *HTTPEndpointConfigurationS3ConfigurationParameters `json:"s3Configuration" tf:"s3_configuration,omitempty"` + + // The HTTP endpoint URL to which Kinesis Firehose sends your data. + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` +} + +type HTTPEndpointConfigurationProcessingConfigurationInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + Processors []HTTPEndpointConfigurationProcessingConfigurationProcessorsInitParameters `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type HTTPEndpointConfigurationProcessingConfigurationObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + Processors []HTTPEndpointConfigurationProcessingConfigurationProcessorsObservation `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type HTTPEndpointConfigurationProcessingConfigurationParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + // +kubebuilder:validation:Optional + Processors []HTTPEndpointConfigurationProcessingConfigurationProcessorsParameters `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type HTTPEndpointConfigurationProcessingConfigurationProcessorsInitParameters struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + Parameters []ProcessingConfigurationProcessorsParametersInitParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type HTTPEndpointConfigurationProcessingConfigurationProcessorsObservation struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + Parameters []ProcessingConfigurationProcessorsParametersObservation `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type HTTPEndpointConfigurationProcessingConfigurationProcessorsParameters struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + // +kubebuilder:validation:Optional + Parameters []ProcessingConfigurationProcessorsParametersParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type HTTPEndpointConfigurationS3ConfigurationInitParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type HTTPEndpointConfigurationS3ConfigurationObservation struct { + + // The ARN of the S3 bucket + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type HTTPEndpointConfigurationS3ConfigurationParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + // +kubebuilder:validation:Optional + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + // +kubebuilder:validation:Optional + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + // +kubebuilder:validation:Optional + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + // +kubebuilder:validation:Optional + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type HiveJSONSerDeInitParameters struct { + + // A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default. + TimestampFormats []*string `json:"timestampFormats,omitempty" tf:"timestamp_formats,omitempty"` +} + +type HiveJSONSerDeObservation struct { + + // A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default. + TimestampFormats []*string `json:"timestampFormats,omitempty" tf:"timestamp_formats,omitempty"` +} + +type HiveJSONSerDeParameters struct { + + // A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default. + // +kubebuilder:validation:Optional + TimestampFormats []*string `json:"timestampFormats,omitempty" tf:"timestamp_formats,omitempty"` +} + +type InputFormatConfigurationInitParameters struct { + + // Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. See deserializer block below for details. + Deserializer *DeserializerInitParameters `json:"deserializer,omitempty" tf:"deserializer,omitempty"` +} + +type InputFormatConfigurationObservation struct { + + // Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. See deserializer block below for details. + Deserializer *DeserializerObservation `json:"deserializer,omitempty" tf:"deserializer,omitempty"` +} + +type InputFormatConfigurationParameters struct { + + // Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. See deserializer block below for details. + // +kubebuilder:validation:Optional + Deserializer *DeserializerParameters `json:"deserializer" tf:"deserializer,omitempty"` +} + +type KinesisSourceConfigurationInitParameters struct { + + // The kinesis stream used as the source of the firehose delivery stream. + KinesisStreamArn *string `json:"kinesisStreamArn,omitempty" tf:"kinesis_stream_arn,omitempty"` + + // The ARN of the role that provides access to the source Kinesis stream. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type KinesisSourceConfigurationObservation struct { + + // The kinesis stream used as the source of the firehose delivery stream. + KinesisStreamArn *string `json:"kinesisStreamArn,omitempty" tf:"kinesis_stream_arn,omitempty"` + + // The ARN of the role that provides access to the source Kinesis stream. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type KinesisSourceConfigurationParameters struct { + + // The kinesis stream used as the source of the firehose delivery stream. + // +kubebuilder:validation:Optional + KinesisStreamArn *string `json:"kinesisStreamArn" tf:"kinesis_stream_arn,omitempty"` + + // The ARN of the role that provides access to the source Kinesis stream. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type MskSourceConfigurationInitParameters struct { + + // The authentication configuration of the Amazon MSK cluster. See authentication_configuration block below for details. + AuthenticationConfiguration *AuthenticationConfigurationInitParameters `json:"authenticationConfiguration,omitempty" tf:"authentication_configuration,omitempty"` + + // The ARN of the Amazon MSK cluster. + MskClusterArn *string `json:"mskClusterArn,omitempty" tf:"msk_cluster_arn,omitempty"` + + // The topic name within the Amazon MSK cluster. + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` +} + +type MskSourceConfigurationObservation struct { + + // The authentication configuration of the Amazon MSK cluster. See authentication_configuration block below for details. + AuthenticationConfiguration *AuthenticationConfigurationObservation `json:"authenticationConfiguration,omitempty" tf:"authentication_configuration,omitempty"` + + // The ARN of the Amazon MSK cluster. + MskClusterArn *string `json:"mskClusterArn,omitempty" tf:"msk_cluster_arn,omitempty"` + + // The topic name within the Amazon MSK cluster. + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` +} + +type MskSourceConfigurationParameters struct { + + // The authentication configuration of the Amazon MSK cluster. See authentication_configuration block below for details. + // +kubebuilder:validation:Optional + AuthenticationConfiguration *AuthenticationConfigurationParameters `json:"authenticationConfiguration" tf:"authentication_configuration,omitempty"` + + // The ARN of the Amazon MSK cluster. + // +kubebuilder:validation:Optional + MskClusterArn *string `json:"mskClusterArn" tf:"msk_cluster_arn,omitempty"` + + // The topic name within the Amazon MSK cluster. + // +kubebuilder:validation:Optional + TopicName *string `json:"topicName" tf:"topic_name,omitempty"` +} + +type OpenXJSONSerDeInitParameters struct { + + // When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them. + CaseInsensitive *bool `json:"caseInsensitive,omitempty" tf:"case_insensitive,omitempty"` + + // A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to { ts = "timestamp" } to map this key to a column named ts. + // +mapType=granular + ColumnToJSONKeyMappings map[string]*string `json:"columnToJsonKeyMappings,omitempty" tf:"column_to_json_key_mappings,omitempty"` + + // When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults to false. + ConvertDotsInJSONKeysToUnderscores *bool `json:"convertDotsInJsonKeysToUnderscores,omitempty" tf:"convert_dots_in_json_keys_to_underscores,omitempty"` +} + +type OpenXJSONSerDeObservation struct { + + // When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them. + CaseInsensitive *bool `json:"caseInsensitive,omitempty" tf:"case_insensitive,omitempty"` + + // A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to { ts = "timestamp" } to map this key to a column named ts. + // +mapType=granular + ColumnToJSONKeyMappings map[string]*string `json:"columnToJsonKeyMappings,omitempty" tf:"column_to_json_key_mappings,omitempty"` + + // When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults to false. + ConvertDotsInJSONKeysToUnderscores *bool `json:"convertDotsInJsonKeysToUnderscores,omitempty" tf:"convert_dots_in_json_keys_to_underscores,omitempty"` +} + +type OpenXJSONSerDeParameters struct { + + // When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them. + // +kubebuilder:validation:Optional + CaseInsensitive *bool `json:"caseInsensitive,omitempty" tf:"case_insensitive,omitempty"` + + // A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to { ts = "timestamp" } to map this key to a column named ts. + // +kubebuilder:validation:Optional + // +mapType=granular + ColumnToJSONKeyMappings map[string]*string `json:"columnToJsonKeyMappings,omitempty" tf:"column_to_json_key_mappings,omitempty"` + + // When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults to false. + // +kubebuilder:validation:Optional + ConvertDotsInJSONKeysToUnderscores *bool `json:"convertDotsInJsonKeysToUnderscores,omitempty" tf:"convert_dots_in_json_keys_to_underscores,omitempty"` +} + +type OpensearchConfigurationCloudwatchLoggingOptionsInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type OpensearchConfigurationCloudwatchLoggingOptionsObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type OpensearchConfigurationCloudwatchLoggingOptionsParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type OpensearchConfigurationInitParameters struct { + + // Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *OpensearchConfigurationCloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The endpoint to use when communicating with the cluster. Conflicts with domain_arn. + ClusterEndpoint *string `json:"clusterEndpoint,omitempty" tf:"cluster_endpoint,omitempty"` + + // The method for setting up document ID. See [document_id_options block] below for details. + DocumentIDOptions *DocumentIDOptionsInitParameters `json:"documentIdOptions,omitempty" tf:"document_id_options,omitempty"` + + // The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opensearch/v1beta2.Domain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + DomainArn *string `json:"domainArn,omitempty" tf:"domain_arn,omitempty"` + + // Reference to a Domain in opensearch to populate domainArn. + // +kubebuilder:validation:Optional + DomainArnRef *v1.Reference `json:"domainArnRef,omitempty" tf:"-"` + + // Selector for a Domain in opensearch to populate domainArn. + // +kubebuilder:validation:Optional + DomainArnSelector *v1.Selector `json:"domainArnSelector,omitempty" tf:"-"` + + // The OpenSearch index name. + IndexName *string `json:"indexName,omitempty" tf:"index_name,omitempty"` + + // The OpenSearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay. + IndexRotationPeriod *string `json:"indexRotationPeriod,omitempty" tf:"index_rotation_period,omitempty"` + + // The data processing configuration. See processing_configuration block below for details. + ProcessingConfiguration *OpensearchConfigurationProcessingConfigurationInitParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // After an initial failure to deliver to Amazon OpenSearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0. + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeDomain, DescribeDomains, and DescribeDomainConfig. The pattern needs to be arn:.*. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly. + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration block below for details. + S3Configuration *OpensearchConfigurationS3ConfigurationInitParameters `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` + + // The Elasticsearch type name with maximum length of 100 characters. Types are deprecated in OpenSearch_1.1. TypeName must be empty. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // The VPC configuration for the delivery stream to connect to OpenSearch associated with the VPC. See vpc_config block below for details. + VPCConfig *OpensearchConfigurationVPCConfigInitParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type OpensearchConfigurationObservation struct { + + // Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *OpensearchConfigurationCloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The endpoint to use when communicating with the cluster. Conflicts with domain_arn. + ClusterEndpoint *string `json:"clusterEndpoint,omitempty" tf:"cluster_endpoint,omitempty"` + + // The method for setting up document ID. See [document_id_options block] below for details. + DocumentIDOptions *DocumentIDOptionsObservation `json:"documentIdOptions,omitempty" tf:"document_id_options,omitempty"` + + // The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint. + DomainArn *string `json:"domainArn,omitempty" tf:"domain_arn,omitempty"` + + // The OpenSearch index name. + IndexName *string `json:"indexName,omitempty" tf:"index_name,omitempty"` + + // The OpenSearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay. + IndexRotationPeriod *string `json:"indexRotationPeriod,omitempty" tf:"index_rotation_period,omitempty"` + + // The data processing configuration. See processing_configuration block below for details. + ProcessingConfiguration *OpensearchConfigurationProcessingConfigurationObservation `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // After an initial failure to deliver to Amazon OpenSearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0. + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeDomain, DescribeDomains, and DescribeDomainConfig. The pattern needs to be arn:.*. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly. + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration block below for details. + S3Configuration *OpensearchConfigurationS3ConfigurationObservation `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` + + // The Elasticsearch type name with maximum length of 100 characters. Types are deprecated in OpenSearch_1.1. TypeName must be empty. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // The VPC configuration for the delivery stream to connect to OpenSearch associated with the VPC. See vpc_config block below for details. + VPCConfig *OpensearchConfigurationVPCConfigObservation `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type OpensearchConfigurationParameters struct { + + // Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s. + // +kubebuilder:validation:Optional + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. + // +kubebuilder:validation:Optional + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *OpensearchConfigurationCloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The endpoint to use when communicating with the cluster. Conflicts with domain_arn. + // +kubebuilder:validation:Optional + ClusterEndpoint *string `json:"clusterEndpoint,omitempty" tf:"cluster_endpoint,omitempty"` + + // The method for setting up document ID. See [document_id_options block] below for details. + // +kubebuilder:validation:Optional + DocumentIDOptions *DocumentIDOptionsParameters `json:"documentIdOptions,omitempty" tf:"document_id_options,omitempty"` + + // The ARN of the Amazon ES domain. The pattern needs to be arn:.*. Conflicts with cluster_endpoint. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opensearch/v1beta2.Domain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + DomainArn *string `json:"domainArn,omitempty" tf:"domain_arn,omitempty"` + + // Reference to a Domain in opensearch to populate domainArn. + // +kubebuilder:validation:Optional + DomainArnRef *v1.Reference `json:"domainArnRef,omitempty" tf:"-"` + + // Selector for a Domain in opensearch to populate domainArn. + // +kubebuilder:validation:Optional + DomainArnSelector *v1.Selector `json:"domainArnSelector,omitempty" tf:"-"` + + // The OpenSearch index name. + // +kubebuilder:validation:Optional + IndexName *string `json:"indexName" tf:"index_name,omitempty"` + + // The OpenSearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are NoRotation, OneHour, OneDay, OneWeek, and OneMonth. The default value is OneDay. + // +kubebuilder:validation:Optional + IndexRotationPeriod *string `json:"indexRotationPeriod,omitempty" tf:"index_rotation_period,omitempty"` + + // The data processing configuration. See processing_configuration block below for details. + // +kubebuilder:validation:Optional + ProcessingConfiguration *OpensearchConfigurationProcessingConfigurationParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // After an initial failure to deliver to Amazon OpenSearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0. + // +kubebuilder:validation:Optional + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for DescribeDomain, DescribeDomains, and DescribeDomainConfig. The pattern needs to be arn:.*. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly. + // +kubebuilder:validation:Optional + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration block below for details. + // +kubebuilder:validation:Optional + S3Configuration *OpensearchConfigurationS3ConfigurationParameters `json:"s3Configuration" tf:"s3_configuration,omitempty"` + + // The Elasticsearch type name with maximum length of 100 characters. Types are deprecated in OpenSearch_1.1. TypeName must be empty. + // +kubebuilder:validation:Optional + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // The VPC configuration for the delivery stream to connect to OpenSearch associated with the VPC. See vpc_config block below for details. + // +kubebuilder:validation:Optional + VPCConfig *OpensearchConfigurationVPCConfigParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type OpensearchConfigurationProcessingConfigurationInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + Processors []OpensearchConfigurationProcessingConfigurationProcessorsInitParameters `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type OpensearchConfigurationProcessingConfigurationObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + Processors []OpensearchConfigurationProcessingConfigurationProcessorsObservation `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type OpensearchConfigurationProcessingConfigurationParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + // +kubebuilder:validation:Optional + Processors []OpensearchConfigurationProcessingConfigurationProcessorsParameters `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type OpensearchConfigurationProcessingConfigurationProcessorsInitParameters struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + Parameters []OpensearchConfigurationProcessingConfigurationProcessorsParametersInitParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OpensearchConfigurationProcessingConfigurationProcessorsObservation struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + Parameters []OpensearchConfigurationProcessingConfigurationProcessorsParametersObservation `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OpensearchConfigurationProcessingConfigurationProcessorsParameters struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + // +kubebuilder:validation:Optional + Parameters []OpensearchConfigurationProcessingConfigurationProcessorsParametersParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type OpensearchConfigurationProcessingConfigurationProcessorsParametersInitParameters struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + ParameterName *string `json:"parameterName,omitempty" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + ParameterValue *string `json:"parameterValue,omitempty" tf:"parameter_value,omitempty"` +} + +type OpensearchConfigurationProcessingConfigurationProcessorsParametersObservation struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + ParameterName *string `json:"parameterName,omitempty" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + ParameterValue *string `json:"parameterValue,omitempty" tf:"parameter_value,omitempty"` +} + +type OpensearchConfigurationProcessingConfigurationProcessorsParametersParameters struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + // +kubebuilder:validation:Optional + ParameterName *string `json:"parameterName" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + // +kubebuilder:validation:Optional + ParameterValue *string `json:"parameterValue" tf:"parameter_value,omitempty"` +} + +type OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type OpensearchConfigurationS3ConfigurationInitParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type OpensearchConfigurationS3ConfigurationObservation struct { + + // The ARN of the S3 bucket + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type OpensearchConfigurationS3ConfigurationParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + // +kubebuilder:validation:Optional + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + // +kubebuilder:validation:Optional + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + // +kubebuilder:validation:Optional + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + // +kubebuilder:validation:Optional + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type OpensearchConfigurationVPCConfigInitParameters struct { + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // A list of security group IDs to associate with Kinesis Firehose. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of subnet IDs to associate with Kinesis Firehose. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type OpensearchConfigurationVPCConfigObservation struct { + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // A list of security group IDs to associate with Kinesis Firehose. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of subnet IDs to associate with Kinesis Firehose. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type OpensearchConfigurationVPCConfigParameters struct { + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // A list of security group IDs to associate with Kinesis Firehose. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds" tf:"security_group_ids,omitempty"` + + // A list of subnet IDs to associate with Kinesis Firehose. + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds" tf:"subnet_ids,omitempty"` +} + +type OpensearchserverlessConfigurationCloudwatchLoggingOptionsInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type OpensearchserverlessConfigurationCloudwatchLoggingOptionsObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type OpensearchserverlessConfigurationCloudwatchLoggingOptionsParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type OpensearchserverlessConfigurationInitParameters struct { + + // Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *OpensearchserverlessConfigurationCloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The endpoint to use when communicating with the collection in the Serverless offering for Amazon OpenSearch Service. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opensearchserverless/v1beta1.Collection + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("collection_endpoint",true) + CollectionEndpoint *string `json:"collectionEndpoint,omitempty" tf:"collection_endpoint,omitempty"` + + // Reference to a Collection in opensearchserverless to populate collectionEndpoint. + // +kubebuilder:validation:Optional + CollectionEndpointRef *v1.Reference `json:"collectionEndpointRef,omitempty" tf:"-"` + + // Selector for a Collection in opensearchserverless to populate collectionEndpoint. + // +kubebuilder:validation:Optional + CollectionEndpointSelector *v1.Selector `json:"collectionEndpointSelector,omitempty" tf:"-"` + + // The Serverless offering for Amazon OpenSearch Service index name. + IndexName *string `json:"indexName,omitempty" tf:"index_name,omitempty"` + + // The data processing configuration. See processing_configuration block below for details. + ProcessingConfiguration *OpensearchserverlessConfigurationProcessingConfigurationInitParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time, in seconds between 0 to 7200, during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0. + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents. The pattern needs to be arn:.*. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly. + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration block below for details. + S3Configuration *OpensearchserverlessConfigurationS3ConfigurationInitParameters `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` + + // The VPC configuration for the delivery stream to connect to OpenSearch Serverless associated with the VPC. See vpc_config block below for details. + VPCConfig *OpensearchserverlessConfigurationVPCConfigInitParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type OpensearchserverlessConfigurationObservation struct { + + // Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *OpensearchserverlessConfigurationCloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The endpoint to use when communicating with the collection in the Serverless offering for Amazon OpenSearch Service. + CollectionEndpoint *string `json:"collectionEndpoint,omitempty" tf:"collection_endpoint,omitempty"` + + // The Serverless offering for Amazon OpenSearch Service index name. + IndexName *string `json:"indexName,omitempty" tf:"index_name,omitempty"` + + // The data processing configuration. See processing_configuration block below for details. + ProcessingConfiguration *OpensearchserverlessConfigurationProcessingConfigurationObservation `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time, in seconds between 0 to 7200, during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0. + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents. The pattern needs to be arn:.*. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly. + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration block below for details. + S3Configuration *OpensearchserverlessConfigurationS3ConfigurationObservation `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` + + // The VPC configuration for the delivery stream to connect to OpenSearch Serverless associated with the VPC. See vpc_config block below for details. + VPCConfig *OpensearchserverlessConfigurationVPCConfigObservation `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type OpensearchserverlessConfigurationParameters struct { + + // Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s. + // +kubebuilder:validation:Optional + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. + // +kubebuilder:validation:Optional + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *OpensearchserverlessConfigurationCloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The endpoint to use when communicating with the collection in the Serverless offering for Amazon OpenSearch Service. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opensearchserverless/v1beta1.Collection + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("collection_endpoint",true) + // +kubebuilder:validation:Optional + CollectionEndpoint *string `json:"collectionEndpoint,omitempty" tf:"collection_endpoint,omitempty"` + + // Reference to a Collection in opensearchserverless to populate collectionEndpoint. + // +kubebuilder:validation:Optional + CollectionEndpointRef *v1.Reference `json:"collectionEndpointRef,omitempty" tf:"-"` + + // Selector for a Collection in opensearchserverless to populate collectionEndpoint. + // +kubebuilder:validation:Optional + CollectionEndpointSelector *v1.Selector `json:"collectionEndpointSelector,omitempty" tf:"-"` + + // The Serverless offering for Amazon OpenSearch Service index name. + // +kubebuilder:validation:Optional + IndexName *string `json:"indexName" tf:"index_name,omitempty"` + + // The data processing configuration. See processing_configuration block below for details. + // +kubebuilder:validation:Optional + ProcessingConfiguration *OpensearchserverlessConfigurationProcessingConfigurationParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time, in seconds between 0 to 7200, during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0. + // +kubebuilder:validation:Optional + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents. The pattern needs to be arn:.*. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Defines how documents should be delivered to Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default value is FailedDocumentsOnly. + // +kubebuilder:validation:Optional + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration block below for details. + // +kubebuilder:validation:Optional + S3Configuration *OpensearchserverlessConfigurationS3ConfigurationParameters `json:"s3Configuration" tf:"s3_configuration,omitempty"` + + // The VPC configuration for the delivery stream to connect to OpenSearch Serverless associated with the VPC. See vpc_config block below for details. + // +kubebuilder:validation:Optional + VPCConfig *OpensearchserverlessConfigurationVPCConfigParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type OpensearchserverlessConfigurationProcessingConfigurationInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + Processors []OpensearchserverlessConfigurationProcessingConfigurationProcessorsInitParameters `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type OpensearchserverlessConfigurationProcessingConfigurationObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + Processors []OpensearchserverlessConfigurationProcessingConfigurationProcessorsObservation `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type OpensearchserverlessConfigurationProcessingConfigurationParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + // +kubebuilder:validation:Optional + Processors []OpensearchserverlessConfigurationProcessingConfigurationProcessorsParameters `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type OpensearchserverlessConfigurationProcessingConfigurationProcessorsInitParameters struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + Parameters []OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersInitParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OpensearchserverlessConfigurationProcessingConfigurationProcessorsObservation struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + Parameters []OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersObservation `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OpensearchserverlessConfigurationProcessingConfigurationProcessorsParameters struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + // +kubebuilder:validation:Optional + Parameters []OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersInitParameters struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + ParameterName *string `json:"parameterName,omitempty" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + ParameterValue *string `json:"parameterValue,omitempty" tf:"parameter_value,omitempty"` +} + +type OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersObservation struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + ParameterName *string `json:"parameterName,omitempty" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + ParameterValue *string `json:"parameterValue,omitempty" tf:"parameter_value,omitempty"` +} + +type OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersParameters struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + // +kubebuilder:validation:Optional + ParameterName *string `json:"parameterName" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + // +kubebuilder:validation:Optional + ParameterValue *string `json:"parameterValue" tf:"parameter_value,omitempty"` +} + +type OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type OpensearchserverlessConfigurationS3ConfigurationInitParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type OpensearchserverlessConfigurationS3ConfigurationObservation struct { + + // The ARN of the S3 bucket + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type OpensearchserverlessConfigurationS3ConfigurationParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + // +kubebuilder:validation:Optional + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + // +kubebuilder:validation:Optional + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + // +kubebuilder:validation:Optional + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + // +kubebuilder:validation:Optional + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type OpensearchserverlessConfigurationVPCConfigInitParameters struct { + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // A list of security group IDs to associate with Kinesis Firehose. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of subnet IDs to associate with Kinesis Firehose. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type OpensearchserverlessConfigurationVPCConfigObservation struct { + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // A list of security group IDs to associate with Kinesis Firehose. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of subnet IDs to associate with Kinesis Firehose. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type OpensearchserverlessConfigurationVPCConfigParameters struct { + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // A list of security group IDs to associate with Kinesis Firehose. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds" tf:"security_group_ids,omitempty"` + + // A list of subnet IDs to associate with Kinesis Firehose. + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds" tf:"subnet_ids,omitempty"` +} + +type OrcSerDeInitParameters struct { + + // The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations. + BlockSizeBytes *float64 `json:"blockSizeBytes,omitempty" tf:"block_size_bytes,omitempty"` + + // A list of column names for which you want Kinesis Data Firehose to create bloom filters. + BloomFilterColumns []*string `json:"bloomFilterColumns,omitempty" tf:"bloom_filter_columns,omitempty"` + + // The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1. + BloomFilterFalsePositiveProbability *float64 `json:"bloomFilterFalsePositiveProbability,omitempty" tf:"bloom_filter_false_positive_probability,omitempty"` + + // The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed. + Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` + + // A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1. + DictionaryKeyThreshold *float64 `json:"dictionaryKeyThreshold,omitempty" tf:"dictionary_key_threshold,omitempty"` + + // Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false. + EnablePadding *bool `json:"enablePadding,omitempty" tf:"enable_padding,omitempty"` + + // The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12. + FormatVersion *string `json:"formatVersion,omitempty" tf:"format_version,omitempty"` + + // A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter when enable_padding is false. + PaddingTolerance *float64 `json:"paddingTolerance,omitempty" tf:"padding_tolerance,omitempty"` + + // The number of rows between index entries. The default is 10000 and the minimum is 1000. + RowIndexStride *float64 `json:"rowIndexStride,omitempty" tf:"row_index_stride,omitempty"` + + // The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB. + StripeSizeBytes *float64 `json:"stripeSizeBytes,omitempty" tf:"stripe_size_bytes,omitempty"` +} + +type OrcSerDeObservation struct { + + // The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations. + BlockSizeBytes *float64 `json:"blockSizeBytes,omitempty" tf:"block_size_bytes,omitempty"` + + // A list of column names for which you want Kinesis Data Firehose to create bloom filters. + BloomFilterColumns []*string `json:"bloomFilterColumns,omitempty" tf:"bloom_filter_columns,omitempty"` + + // The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1. + BloomFilterFalsePositiveProbability *float64 `json:"bloomFilterFalsePositiveProbability,omitempty" tf:"bloom_filter_false_positive_probability,omitempty"` + + // The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed. + Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` + + // A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1. + DictionaryKeyThreshold *float64 `json:"dictionaryKeyThreshold,omitempty" tf:"dictionary_key_threshold,omitempty"` + + // Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false. + EnablePadding *bool `json:"enablePadding,omitempty" tf:"enable_padding,omitempty"` + + // The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12. + FormatVersion *string `json:"formatVersion,omitempty" tf:"format_version,omitempty"` + + // A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter when enable_padding is false. + PaddingTolerance *float64 `json:"paddingTolerance,omitempty" tf:"padding_tolerance,omitempty"` + + // The number of rows between index entries. The default is 10000 and the minimum is 1000. + RowIndexStride *float64 `json:"rowIndexStride,omitempty" tf:"row_index_stride,omitempty"` + + // The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB. + StripeSizeBytes *float64 `json:"stripeSizeBytes,omitempty" tf:"stripe_size_bytes,omitempty"` +} + +type OrcSerDeParameters struct { + + // The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations. + // +kubebuilder:validation:Optional + BlockSizeBytes *float64 `json:"blockSizeBytes,omitempty" tf:"block_size_bytes,omitempty"` + + // A list of column names for which you want Kinesis Data Firehose to create bloom filters. + // +kubebuilder:validation:Optional + BloomFilterColumns []*string `json:"bloomFilterColumns,omitempty" tf:"bloom_filter_columns,omitempty"` + + // The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1. + // +kubebuilder:validation:Optional + BloomFilterFalsePositiveProbability *float64 `json:"bloomFilterFalsePositiveProbability,omitempty" tf:"bloom_filter_false_positive_probability,omitempty"` + + // The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed. + // +kubebuilder:validation:Optional + Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` + + // A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1. + // +kubebuilder:validation:Optional + DictionaryKeyThreshold *float64 `json:"dictionaryKeyThreshold,omitempty" tf:"dictionary_key_threshold,omitempty"` + + // Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false. + // +kubebuilder:validation:Optional + EnablePadding *bool `json:"enablePadding,omitempty" tf:"enable_padding,omitempty"` + + // The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12. + // +kubebuilder:validation:Optional + FormatVersion *string `json:"formatVersion,omitempty" tf:"format_version,omitempty"` + + // A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter when enable_padding is false. + // +kubebuilder:validation:Optional + PaddingTolerance *float64 `json:"paddingTolerance,omitempty" tf:"padding_tolerance,omitempty"` + + // The number of rows between index entries. The default is 10000 and the minimum is 1000. + // +kubebuilder:validation:Optional + RowIndexStride *float64 `json:"rowIndexStride,omitempty" tf:"row_index_stride,omitempty"` + + // The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB. + // +kubebuilder:validation:Optional + StripeSizeBytes *float64 `json:"stripeSizeBytes,omitempty" tf:"stripe_size_bytes,omitempty"` +} + +type OutputFormatConfigurationInitParameters struct { + + // Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. See serializer block below for details. + Serializer *SerializerInitParameters `json:"serializer,omitempty" tf:"serializer,omitempty"` +} + +type OutputFormatConfigurationObservation struct { + + // Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. See serializer block below for details. + Serializer *SerializerObservation `json:"serializer,omitempty" tf:"serializer,omitempty"` +} + +type OutputFormatConfigurationParameters struct { + + // Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. See serializer block below for details. + // +kubebuilder:validation:Optional + Serializer *SerializerParameters `json:"serializer" tf:"serializer,omitempty"` +} + +type ParametersInitParameters struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + ParameterName *string `json:"parameterName,omitempty" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + ParameterValue *string `json:"parameterValue,omitempty" tf:"parameter_value,omitempty"` +} + +type ParametersObservation struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + ParameterName *string `json:"parameterName,omitempty" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + ParameterValue *string `json:"parameterValue,omitempty" tf:"parameter_value,omitempty"` +} + +type ParametersParameters struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + // +kubebuilder:validation:Optional + ParameterName *string `json:"parameterName" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + // +kubebuilder:validation:Optional + ParameterValue *string `json:"parameterValue" tf:"parameter_value,omitempty"` +} + +type ParquetSerDeInitParameters struct { + + // The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations. + BlockSizeBytes *float64 `json:"blockSizeBytes,omitempty" tf:"block_size_bytes,omitempty"` + + // The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed. + Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` + + // Indicates whether to enable dictionary compression. + EnableDictionaryCompression *bool `json:"enableDictionaryCompression,omitempty" tf:"enable_dictionary_compression,omitempty"` + + // The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0. + MaxPaddingBytes *float64 `json:"maxPaddingBytes,omitempty" tf:"max_padding_bytes,omitempty"` + + // The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB. + PageSizeBytes *float64 `json:"pageSizeBytes,omitempty" tf:"page_size_bytes,omitempty"` + + // Indicates the version of row format to output. The possible values are V1 and V2. The default is V1. + WriterVersion *string `json:"writerVersion,omitempty" tf:"writer_version,omitempty"` +} + +type ParquetSerDeObservation struct { + + // The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations. + BlockSizeBytes *float64 `json:"blockSizeBytes,omitempty" tf:"block_size_bytes,omitempty"` + + // The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed. + Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` + + // Indicates whether to enable dictionary compression. + EnableDictionaryCompression *bool `json:"enableDictionaryCompression,omitempty" tf:"enable_dictionary_compression,omitempty"` + + // The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0. + MaxPaddingBytes *float64 `json:"maxPaddingBytes,omitempty" tf:"max_padding_bytes,omitempty"` + + // The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB. + PageSizeBytes *float64 `json:"pageSizeBytes,omitempty" tf:"page_size_bytes,omitempty"` + + // Indicates the version of row format to output. The possible values are V1 and V2. The default is V1. + WriterVersion *string `json:"writerVersion,omitempty" tf:"writer_version,omitempty"` +} + +type ParquetSerDeParameters struct { + + // The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations. + // +kubebuilder:validation:Optional + BlockSizeBytes *float64 `json:"blockSizeBytes,omitempty" tf:"block_size_bytes,omitempty"` + + // The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed. + // +kubebuilder:validation:Optional + Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` + + // Indicates whether to enable dictionary compression. + // +kubebuilder:validation:Optional + EnableDictionaryCompression *bool `json:"enableDictionaryCompression,omitempty" tf:"enable_dictionary_compression,omitempty"` + + // The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0. + // +kubebuilder:validation:Optional + MaxPaddingBytes *float64 `json:"maxPaddingBytes,omitempty" tf:"max_padding_bytes,omitempty"` + + // The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB. + // +kubebuilder:validation:Optional + PageSizeBytes *float64 `json:"pageSizeBytes,omitempty" tf:"page_size_bytes,omitempty"` + + // Indicates the version of row format to output. The possible values are V1 and V2. The default is V1. + // +kubebuilder:validation:Optional + WriterVersion *string `json:"writerVersion,omitempty" tf:"writer_version,omitempty"` +} + +type ProcessingConfigurationInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + Processors []ProcessorsInitParameters `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type ProcessingConfigurationObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + Processors []ProcessorsObservation `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type ProcessingConfigurationParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + // +kubebuilder:validation:Optional + Processors []ProcessorsParameters `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type ProcessingConfigurationProcessorsInitParameters struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + Parameters []ProcessorsParametersInitParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ProcessingConfigurationProcessorsObservation struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + Parameters []ProcessorsParametersObservation `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ProcessingConfigurationProcessorsParameters struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + // +kubebuilder:validation:Optional + Parameters []ProcessorsParametersParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ProcessingConfigurationProcessorsParametersInitParameters struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + ParameterName *string `json:"parameterName,omitempty" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + ParameterValue *string `json:"parameterValue,omitempty" tf:"parameter_value,omitempty"` +} + +type ProcessingConfigurationProcessorsParametersObservation struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + ParameterName *string `json:"parameterName,omitempty" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + ParameterValue *string `json:"parameterValue,omitempty" tf:"parameter_value,omitempty"` +} + +type ProcessingConfigurationProcessorsParametersParameters struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + // +kubebuilder:validation:Optional + ParameterName *string `json:"parameterName" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + // +kubebuilder:validation:Optional + ParameterValue *string `json:"parameterValue" tf:"parameter_value,omitempty"` +} + +type ProcessorsInitParameters struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + Parameters []ParametersInitParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ProcessorsObservation struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + Parameters []ParametersObservation `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ProcessorsParameters struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + // +kubebuilder:validation:Optional + Parameters []ParametersParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ProcessorsParametersInitParameters struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + ParameterName *string `json:"parameterName,omitempty" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + ParameterValue *string `json:"parameterValue,omitempty" tf:"parameter_value,omitempty"` +} + +type ProcessorsParametersObservation struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + ParameterName *string `json:"parameterName,omitempty" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + ParameterValue *string `json:"parameterValue,omitempty" tf:"parameter_value,omitempty"` +} + +type ProcessorsParametersParameters struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + // +kubebuilder:validation:Optional + ParameterName *string `json:"parameterName" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + // +kubebuilder:validation:Optional + ParameterValue *string `json:"parameterValue" tf:"parameter_value,omitempty"` +} + +type RedshiftConfigurationCloudwatchLoggingOptionsInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type RedshiftConfigurationCloudwatchLoggingOptionsObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type RedshiftConfigurationCloudwatchLoggingOptionsParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type RedshiftConfigurationInitParameters struct { + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *RedshiftConfigurationCloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The jdbcurl of the redshift cluster. + ClusterJdbcurl *string `json:"clusterJdbcurl,omitempty" tf:"cluster_jdbcurl,omitempty"` + + // Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter. For valid values, see the AWS documentation + CopyOptions *string `json:"copyOptions,omitempty" tf:"copy_options,omitempty"` + + // The data table columns that will be targeted by the copy command. + DataTableColumns *string `json:"dataTableColumns,omitempty" tf:"data_table_columns,omitempty"` + + // The name of the table in the redshift cluster that the s3 bucket will copy to. + DataTableName *string `json:"dataTableName,omitempty" tf:"data_table_name,omitempty"` + + // The password for the username above. + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The data processing configuration. See processing_configuration block below for details. + ProcessingConfiguration *RedshiftConfigurationProcessingConfigurationInitParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value. + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // The arn of the role the stream assumes. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object. + S3BackupConfiguration *RedshiftConfigurationS3BackupConfigurationInitParameters `json:"s3BackupConfiguration,omitempty" tf:"s3_backup_configuration,omitempty"` + + // The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled. + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration below for details. + S3Configuration *RedshiftConfigurationS3ConfigurationInitParameters `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` + + // The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type RedshiftConfigurationObservation struct { + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *RedshiftConfigurationCloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The jdbcurl of the redshift cluster. + ClusterJdbcurl *string `json:"clusterJdbcurl,omitempty" tf:"cluster_jdbcurl,omitempty"` + + // Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter. For valid values, see the AWS documentation + CopyOptions *string `json:"copyOptions,omitempty" tf:"copy_options,omitempty"` + + // The data table columns that will be targeted by the copy command. + DataTableColumns *string `json:"dataTableColumns,omitempty" tf:"data_table_columns,omitempty"` + + // The name of the table in the redshift cluster that the s3 bucket will copy to. + DataTableName *string `json:"dataTableName,omitempty" tf:"data_table_name,omitempty"` + + // The data processing configuration. See processing_configuration block below for details. + ProcessingConfiguration *RedshiftConfigurationProcessingConfigurationObservation `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value. + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // The arn of the role the stream assumes. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object. + S3BackupConfiguration *RedshiftConfigurationS3BackupConfigurationObservation `json:"s3BackupConfiguration,omitempty" tf:"s3_backup_configuration,omitempty"` + + // The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled. + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration below for details. + S3Configuration *RedshiftConfigurationS3ConfigurationObservation `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` + + // The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type RedshiftConfigurationParameters struct { + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *RedshiftConfigurationCloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The jdbcurl of the redshift cluster. + // +kubebuilder:validation:Optional + ClusterJdbcurl *string `json:"clusterJdbcurl" tf:"cluster_jdbcurl,omitempty"` + + // Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter. For valid values, see the AWS documentation + // +kubebuilder:validation:Optional + CopyOptions *string `json:"copyOptions,omitempty" tf:"copy_options,omitempty"` + + // The data table columns that will be targeted by the copy command. + // +kubebuilder:validation:Optional + DataTableColumns *string `json:"dataTableColumns,omitempty" tf:"data_table_columns,omitempty"` + + // The name of the table in the redshift cluster that the s3 bucket will copy to. + // +kubebuilder:validation:Optional + DataTableName *string `json:"dataTableName" tf:"data_table_name,omitempty"` + + // The password for the username above. + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The data processing configuration. See processing_configuration block below for details. + // +kubebuilder:validation:Optional + ProcessingConfiguration *RedshiftConfigurationProcessingConfigurationParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value. + // +kubebuilder:validation:Optional + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // The arn of the role the stream assumes. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The configuration for backup in Amazon S3. Required if s3_backup_mode is Enabled. Supports the same fields as s3_configuration object. + // +kubebuilder:validation:Optional + S3BackupConfiguration *RedshiftConfigurationS3BackupConfigurationParameters `json:"s3BackupConfiguration,omitempty" tf:"s3_backup_configuration,omitempty"` + + // The Amazon S3 backup mode. Valid values are Disabled and Enabled. Default value is Disabled. + // +kubebuilder:validation:Optional + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration below for details. + // +kubebuilder:validation:Optional + S3Configuration *RedshiftConfigurationS3ConfigurationParameters `json:"s3Configuration" tf:"s3_configuration,omitempty"` + + // The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type RedshiftConfigurationProcessingConfigurationInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + Processors []RedshiftConfigurationProcessingConfigurationProcessorsInitParameters `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type RedshiftConfigurationProcessingConfigurationObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + Processors []RedshiftConfigurationProcessingConfigurationProcessorsObservation `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type RedshiftConfigurationProcessingConfigurationParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + // +kubebuilder:validation:Optional + Processors []RedshiftConfigurationProcessingConfigurationProcessorsParameters `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type RedshiftConfigurationProcessingConfigurationProcessorsInitParameters struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + Parameters []RedshiftConfigurationProcessingConfigurationProcessorsParametersInitParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RedshiftConfigurationProcessingConfigurationProcessorsObservation struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + Parameters []RedshiftConfigurationProcessingConfigurationProcessorsParametersObservation `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RedshiftConfigurationProcessingConfigurationProcessorsParameters struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + // +kubebuilder:validation:Optional + Parameters []RedshiftConfigurationProcessingConfigurationProcessorsParametersParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type RedshiftConfigurationProcessingConfigurationProcessorsParametersInitParameters struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + ParameterName *string `json:"parameterName,omitempty" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + ParameterValue *string `json:"parameterValue,omitempty" tf:"parameter_value,omitempty"` +} + +type RedshiftConfigurationProcessingConfigurationProcessorsParametersObservation struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + ParameterName *string `json:"parameterName,omitempty" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + ParameterValue *string `json:"parameterValue,omitempty" tf:"parameter_value,omitempty"` +} + +type RedshiftConfigurationProcessingConfigurationProcessorsParametersParameters struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + // +kubebuilder:validation:Optional + ParameterName *string `json:"parameterName" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + // +kubebuilder:validation:Optional + ParameterValue *string `json:"parameterValue" tf:"parameter_value,omitempty"` +} + +type RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type RedshiftConfigurationS3BackupConfigurationInitParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type RedshiftConfigurationS3BackupConfigurationObservation struct { + + // The ARN of the S3 bucket + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type RedshiftConfigurationS3BackupConfigurationParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + // +kubebuilder:validation:Optional + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + // +kubebuilder:validation:Optional + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + // +kubebuilder:validation:Optional + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + // +kubebuilder:validation:Optional + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type RedshiftConfigurationS3ConfigurationInitParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type RedshiftConfigurationS3ConfigurationObservation struct { + + // The ARN of the S3 bucket + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type RedshiftConfigurationS3ConfigurationParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + // +kubebuilder:validation:Optional + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + // +kubebuilder:validation:Optional + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + // +kubebuilder:validation:Optional + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + // +kubebuilder:validation:Optional + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type RequestConfigurationInitParameters struct { + + // Describes the metadata sent to the HTTP endpoint destination. See common_attributes block below for details. + CommonAttributes []CommonAttributesInitParameters `json:"commonAttributes,omitempty" tf:"common_attributes,omitempty"` + + // Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. Valid values are NONE and GZIP. Default value is NONE. + ContentEncoding *string `json:"contentEncoding,omitempty" tf:"content_encoding,omitempty"` +} + +type RequestConfigurationObservation struct { + + // Describes the metadata sent to the HTTP endpoint destination. See common_attributes block below for details. + CommonAttributes []CommonAttributesObservation `json:"commonAttributes,omitempty" tf:"common_attributes,omitempty"` + + // Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. Valid values are NONE and GZIP. Default value is NONE. + ContentEncoding *string `json:"contentEncoding,omitempty" tf:"content_encoding,omitempty"` +} + +type RequestConfigurationParameters struct { + + // Describes the metadata sent to the HTTP endpoint destination. See common_attributes block below for details. + // +kubebuilder:validation:Optional + CommonAttributes []CommonAttributesParameters `json:"commonAttributes,omitempty" tf:"common_attributes,omitempty"` + + // Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. Valid values are NONE and GZIP. Default value is NONE. + // +kubebuilder:validation:Optional + ContentEncoding *string `json:"contentEncoding,omitempty" tf:"content_encoding,omitempty"` +} + +type S3BackupConfigurationCloudwatchLoggingOptionsInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type S3BackupConfigurationCloudwatchLoggingOptionsObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type S3BackupConfigurationCloudwatchLoggingOptionsParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type S3BackupConfigurationInitParameters struct { + + // The ARN of the S3 bucket + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *S3BackupConfigurationCloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type S3BackupConfigurationObservation struct { + + // The ARN of the S3 bucket + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *S3BackupConfigurationCloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type S3BackupConfigurationParameters struct { + + // The ARN of the S3 bucket + // +kubebuilder:validation:Optional + BucketArn *string `json:"bucketArn" tf:"bucket_arn,omitempty"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + // +kubebuilder:validation:Optional + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + // +kubebuilder:validation:Optional + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *S3BackupConfigurationCloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + // +kubebuilder:validation:Optional + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + // +kubebuilder:validation:Optional + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type S3ConfigurationCloudwatchLoggingOptionsInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type S3ConfigurationCloudwatchLoggingOptionsObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type S3ConfigurationCloudwatchLoggingOptionsParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type S3ConfigurationInitParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *S3ConfigurationCloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type S3ConfigurationObservation struct { + + // The ARN of the S3 bucket + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *S3ConfigurationCloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type S3ConfigurationParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + // +kubebuilder:validation:Optional + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + // +kubebuilder:validation:Optional + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *S3ConfigurationCloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + // +kubebuilder:validation:Optional + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + // +kubebuilder:validation:Optional + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type SchemaConfigurationInitParameters struct { + + // The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // Specifies the name of the AWS Glue database that contains the schema for the output data. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Specifies the AWS Glue table that contains the column information that constitutes your data schema. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.CatalogTable + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` + + // Reference to a CatalogTable in glue to populate tableName. + // +kubebuilder:validation:Optional + TableNameRef *v1.Reference `json:"tableNameRef,omitempty" tf:"-"` + + // Selector for a CatalogTable in glue to populate tableName. + // +kubebuilder:validation:Optional + TableNameSelector *v1.Selector `json:"tableNameSelector,omitempty" tf:"-"` + + // Specifies the table version for the output data schema. Defaults to LATEST. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type SchemaConfigurationObservation struct { + + // The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // Specifies the name of the AWS Glue database that contains the schema for the output data. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // If you don't specify an AWS Region, the default is the current region. + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Specifies the AWS Glue table that contains the column information that constitutes your data schema. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` + + // Specifies the table version for the output data schema. Defaults to LATEST. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type SchemaConfigurationParameters struct { + + // The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default. + // +kubebuilder:validation:Optional + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // Specifies the name of the AWS Glue database that contains the schema for the output data. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // If you don't specify an AWS Region, the default is the current region. + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Specifies the AWS Glue table that contains the column information that constitutes your data schema. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.CatalogTable + // +kubebuilder:validation:Optional + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` + + // Reference to a CatalogTable in glue to populate tableName. + // +kubebuilder:validation:Optional + TableNameRef *v1.Reference `json:"tableNameRef,omitempty" tf:"-"` + + // Selector for a CatalogTable in glue to populate tableName. + // +kubebuilder:validation:Optional + TableNameSelector *v1.Selector `json:"tableNameSelector,omitempty" tf:"-"` + + // Specifies the table version for the output data schema. Defaults to LATEST. + // +kubebuilder:validation:Optional + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type SerializerInitParameters struct { + + // Specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. See orc_ser_de block below for details. + OrcSerDe *OrcSerDeInitParameters `json:"orcSerDe,omitempty" tf:"orc_ser_de,omitempty"` + + // Specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below. + ParquetSerDe *ParquetSerDeInitParameters `json:"parquetSerDe,omitempty" tf:"parquet_ser_de,omitempty"` +} + +type SerializerObservation struct { + + // Specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. See orc_ser_de block below for details. + OrcSerDe *OrcSerDeObservation `json:"orcSerDe,omitempty" tf:"orc_ser_de,omitempty"` + + // Specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below. + ParquetSerDe *ParquetSerDeObservation `json:"parquetSerDe,omitempty" tf:"parquet_ser_de,omitempty"` +} + +type SerializerParameters struct { + + // Specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. See orc_ser_de block below for details. + // +kubebuilder:validation:Optional + OrcSerDe *OrcSerDeParameters `json:"orcSerDe,omitempty" tf:"orc_ser_de,omitempty"` + + // Specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below. + // +kubebuilder:validation:Optional + ParquetSerDe *ParquetSerDeParameters `json:"parquetSerDe,omitempty" tf:"parquet_ser_de,omitempty"` +} + +type ServerSideEncryptionInitParameters struct { + + // Whether to enable encryption at rest. Default is false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Amazon Resource Name (ARN) of the encryption key. Required when key_type is CUSTOMER_MANAGED_CMK. + KeyArn *string `json:"keyArn,omitempty" tf:"key_arn,omitempty"` + + // Type of encryption key. Default is AWS_OWNED_CMK. Valid values are AWS_OWNED_CMK and CUSTOMER_MANAGED_CMK + KeyType *string `json:"keyType,omitempty" tf:"key_type,omitempty"` +} + +type ServerSideEncryptionObservation struct { + + // Whether to enable encryption at rest. Default is false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Amazon Resource Name (ARN) of the encryption key. Required when key_type is CUSTOMER_MANAGED_CMK. + KeyArn *string `json:"keyArn,omitempty" tf:"key_arn,omitempty"` + + // Type of encryption key. Default is AWS_OWNED_CMK. Valid values are AWS_OWNED_CMK and CUSTOMER_MANAGED_CMK + KeyType *string `json:"keyType,omitempty" tf:"key_type,omitempty"` +} + +type ServerSideEncryptionParameters struct { + + // Whether to enable encryption at rest. Default is false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Amazon Resource Name (ARN) of the encryption key. Required when key_type is CUSTOMER_MANAGED_CMK. + // +kubebuilder:validation:Optional + KeyArn *string `json:"keyArn,omitempty" tf:"key_arn,omitempty"` + + // Type of encryption key. Default is AWS_OWNED_CMK. Valid values are AWS_OWNED_CMK and CUSTOMER_MANAGED_CMK + // +kubebuilder:validation:Optional + KeyType *string `json:"keyType,omitempty" tf:"key_type,omitempty"` +} + +type SnowflakeConfigurationCloudwatchLoggingOptionsInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type SnowflakeConfigurationCloudwatchLoggingOptionsObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type SnowflakeConfigurationCloudwatchLoggingOptionsParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type SnowflakeConfigurationInitParameters struct { + + // The URL of the Snowflake account. Format: https://[account_identifier].snowflakecomputing.com. + AccountURL *string `json:"accountUrl,omitempty" tf:"account_url,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *SnowflakeConfigurationCloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The name of the content column. + ContentColumnName *string `json:"contentColumnName,omitempty" tf:"content_column_name,omitempty"` + + // The data loading option. + DataLoadingOption *string `json:"dataLoadingOption,omitempty" tf:"data_loading_option,omitempty"` + + // The Snowflake database name. + Database *string `json:"database,omitempty" tf:"database,omitempty"` + + // The passphrase for the private key. + KeyPassphraseSecretRef *v1.SecretKeySelector `json:"keyPassphraseSecretRef,omitempty" tf:"-"` + + // The name of the metadata column. + MetadataColumnName *string `json:"metadataColumnName,omitempty" tf:"metadata_column_name,omitempty"` + + // The private key for authentication. + PrivateKeySecretRef v1.SecretKeySelector `json:"privateKeySecretRef" tf:"-"` + + // The processing configuration. See processing_configuration block below for details. + ProcessingConfiguration *SnowflakeConfigurationProcessingConfigurationInitParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // After an initial failure to deliver to Snowflake, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 60s. There will be no retry if the value is 0. + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // The ARN of the IAM role. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The S3 backup mode. + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 configuration. See s3_configuration block below for details. + S3Configuration *SnowflakeConfigurationS3ConfigurationInitParameters `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` + + // The Snowflake schema name. + Schema *string `json:"schema,omitempty" tf:"schema,omitempty"` + + // The configuration for Snowflake role. + SnowflakeRoleConfiguration *SnowflakeRoleConfigurationInitParameters `json:"snowflakeRoleConfiguration,omitempty" tf:"snowflake_role_configuration,omitempty"` + + // The VPC configuration for Snowflake. + SnowflakeVPCConfiguration *SnowflakeVPCConfigurationInitParameters `json:"snowflakeVpcConfiguration,omitempty" tf:"snowflake_vpc_configuration,omitempty"` + + // The Snowflake table name. + Table *string `json:"table,omitempty" tf:"table,omitempty"` + + // The user for authentication. + User *string `json:"user,omitempty" tf:"user,omitempty"` +} + +type SnowflakeConfigurationObservation struct { + + // The URL of the Snowflake account. Format: https://[account_identifier].snowflakecomputing.com. + AccountURL *string `json:"accountUrl,omitempty" tf:"account_url,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *SnowflakeConfigurationCloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The name of the content column. + ContentColumnName *string `json:"contentColumnName,omitempty" tf:"content_column_name,omitempty"` + + // The data loading option. + DataLoadingOption *string `json:"dataLoadingOption,omitempty" tf:"data_loading_option,omitempty"` + + // The Snowflake database name. + Database *string `json:"database,omitempty" tf:"database,omitempty"` + + // The name of the metadata column. + MetadataColumnName *string `json:"metadataColumnName,omitempty" tf:"metadata_column_name,omitempty"` + + // The processing configuration. See processing_configuration block below for details. + ProcessingConfiguration *SnowflakeConfigurationProcessingConfigurationObservation `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // After an initial failure to deliver to Snowflake, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 60s. There will be no retry if the value is 0. + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // The ARN of the IAM role. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The S3 backup mode. + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 configuration. See s3_configuration block below for details. + S3Configuration *SnowflakeConfigurationS3ConfigurationObservation `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` + + // The Snowflake schema name. + Schema *string `json:"schema,omitempty" tf:"schema,omitempty"` + + // The configuration for Snowflake role. + SnowflakeRoleConfiguration *SnowflakeRoleConfigurationObservation `json:"snowflakeRoleConfiguration,omitempty" tf:"snowflake_role_configuration,omitempty"` + + // The VPC configuration for Snowflake. + SnowflakeVPCConfiguration *SnowflakeVPCConfigurationObservation `json:"snowflakeVpcConfiguration,omitempty" tf:"snowflake_vpc_configuration,omitempty"` + + // The Snowflake table name. + Table *string `json:"table,omitempty" tf:"table,omitempty"` + + // The user for authentication. + User *string `json:"user,omitempty" tf:"user,omitempty"` +} + +type SnowflakeConfigurationParameters struct { + + // The URL of the Snowflake account. Format: https://[account_identifier].snowflakecomputing.com. + // +kubebuilder:validation:Optional + AccountURL *string `json:"accountUrl" tf:"account_url,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *SnowflakeConfigurationCloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The name of the content column. + // +kubebuilder:validation:Optional + ContentColumnName *string `json:"contentColumnName,omitempty" tf:"content_column_name,omitempty"` + + // The data loading option. + // +kubebuilder:validation:Optional + DataLoadingOption *string `json:"dataLoadingOption,omitempty" tf:"data_loading_option,omitempty"` + + // The Snowflake database name. + // +kubebuilder:validation:Optional + Database *string `json:"database" tf:"database,omitempty"` + + // The passphrase for the private key. + // +kubebuilder:validation:Optional + KeyPassphraseSecretRef *v1.SecretKeySelector `json:"keyPassphraseSecretRef,omitempty" tf:"-"` + + // The name of the metadata column. + // +kubebuilder:validation:Optional + MetadataColumnName *string `json:"metadataColumnName,omitempty" tf:"metadata_column_name,omitempty"` + + // The private key for authentication. + // +kubebuilder:validation:Optional + PrivateKeySecretRef v1.SecretKeySelector `json:"privateKeySecretRef" tf:"-"` + + // The processing configuration. See processing_configuration block below for details. + // +kubebuilder:validation:Optional + ProcessingConfiguration *SnowflakeConfigurationProcessingConfigurationParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // After an initial failure to deliver to Snowflake, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 60s. There will be no retry if the value is 0. + // +kubebuilder:validation:Optional + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // The ARN of the IAM role. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The S3 backup mode. + // +kubebuilder:validation:Optional + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 configuration. See s3_configuration block below for details. + // +kubebuilder:validation:Optional + S3Configuration *SnowflakeConfigurationS3ConfigurationParameters `json:"s3Configuration" tf:"s3_configuration,omitempty"` + + // The Snowflake schema name. + // +kubebuilder:validation:Optional + Schema *string `json:"schema" tf:"schema,omitempty"` + + // The configuration for Snowflake role. + // +kubebuilder:validation:Optional + SnowflakeRoleConfiguration *SnowflakeRoleConfigurationParameters `json:"snowflakeRoleConfiguration,omitempty" tf:"snowflake_role_configuration,omitempty"` + + // The VPC configuration for Snowflake. + // +kubebuilder:validation:Optional + SnowflakeVPCConfiguration *SnowflakeVPCConfigurationParameters `json:"snowflakeVpcConfiguration,omitempty" tf:"snowflake_vpc_configuration,omitempty"` + + // The Snowflake table name. + // +kubebuilder:validation:Optional + Table *string `json:"table" tf:"table,omitempty"` + + // The user for authentication. + // +kubebuilder:validation:Optional + User *string `json:"user" tf:"user,omitempty"` +} + +type SnowflakeConfigurationProcessingConfigurationInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + Processors []SnowflakeConfigurationProcessingConfigurationProcessorsInitParameters `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type SnowflakeConfigurationProcessingConfigurationObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + Processors []SnowflakeConfigurationProcessingConfigurationProcessorsObservation `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type SnowflakeConfigurationProcessingConfigurationParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + // +kubebuilder:validation:Optional + Processors []SnowflakeConfigurationProcessingConfigurationProcessorsParameters `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type SnowflakeConfigurationProcessingConfigurationProcessorsInitParameters struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + Parameters []SnowflakeConfigurationProcessingConfigurationProcessorsParametersInitParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SnowflakeConfigurationProcessingConfigurationProcessorsObservation struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + Parameters []SnowflakeConfigurationProcessingConfigurationProcessorsParametersObservation `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SnowflakeConfigurationProcessingConfigurationProcessorsParameters struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + // +kubebuilder:validation:Optional + Parameters []SnowflakeConfigurationProcessingConfigurationProcessorsParametersParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type SnowflakeConfigurationProcessingConfigurationProcessorsParametersInitParameters struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + ParameterName *string `json:"parameterName,omitempty" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + ParameterValue *string `json:"parameterValue,omitempty" tf:"parameter_value,omitempty"` +} + +type SnowflakeConfigurationProcessingConfigurationProcessorsParametersObservation struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + ParameterName *string `json:"parameterName,omitempty" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + ParameterValue *string `json:"parameterValue,omitempty" tf:"parameter_value,omitempty"` +} + +type SnowflakeConfigurationProcessingConfigurationProcessorsParametersParameters struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + // +kubebuilder:validation:Optional + ParameterName *string `json:"parameterName" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + // +kubebuilder:validation:Optional + ParameterValue *string `json:"parameterValue" tf:"parameter_value,omitempty"` +} + +type SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type SnowflakeConfigurationS3ConfigurationInitParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type SnowflakeConfigurationS3ConfigurationObservation struct { + + // The ARN of the S3 bucket + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type SnowflakeConfigurationS3ConfigurationParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + // +kubebuilder:validation:Optional + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + // +kubebuilder:validation:Optional + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + // +kubebuilder:validation:Optional + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + // +kubebuilder:validation:Optional + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type SnowflakeRoleConfigurationInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The Snowflake role. + SnowflakeRole *string `json:"snowflakeRole,omitempty" tf:"snowflake_role,omitempty"` +} + +type SnowflakeRoleConfigurationObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The Snowflake role. + SnowflakeRole *string `json:"snowflakeRole,omitempty" tf:"snowflake_role,omitempty"` +} + +type SnowflakeRoleConfigurationParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The Snowflake role. + // +kubebuilder:validation:Optional + SnowflakeRole *string `json:"snowflakeRole,omitempty" tf:"snowflake_role,omitempty"` +} + +type SnowflakeVPCConfigurationInitParameters struct { + + // The VPCE ID for Firehose to privately connect with Snowflake. + PrivateLinkVpceID *string `json:"privateLinkVpceId,omitempty" tf:"private_link_vpce_id,omitempty"` +} + +type SnowflakeVPCConfigurationObservation struct { + + // The VPCE ID for Firehose to privately connect with Snowflake. + PrivateLinkVpceID *string `json:"privateLinkVpceId,omitempty" tf:"private_link_vpce_id,omitempty"` +} + +type SnowflakeVPCConfigurationParameters struct { + + // The VPCE ID for Firehose to privately connect with Snowflake. + // +kubebuilder:validation:Optional + PrivateLinkVpceID *string `json:"privateLinkVpceId" tf:"private_link_vpce_id,omitempty"` +} + +type SplunkConfigurationCloudwatchLoggingOptionsInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type SplunkConfigurationCloudwatchLoggingOptionsObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type SplunkConfigurationCloudwatchLoggingOptionsParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type SplunkConfigurationInitParameters struct { + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *SplunkConfigurationCloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The amount of time, in seconds between 180 and 600, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data. + HecAcknowledgmentTimeout *float64 `json:"hecAcknowledgmentTimeout,omitempty" tf:"hec_acknowledgment_timeout,omitempty"` + + // The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data. + HecEndpoint *string `json:"hecEndpoint,omitempty" tf:"hec_endpoint,omitempty"` + + // The HEC endpoint type. Valid values are Raw or Event. The default value is Raw. + HecEndpointType *string `json:"hecEndpointType,omitempty" tf:"hec_endpoint_type,omitempty"` + + // The GUID that you obtain from your Splunk cluster when you create a new HEC endpoint. + HecTokenSecretRef v1.SecretKeySelector `json:"hecTokenSecretRef" tf:"-"` + + // The data processing configuration. See processing_configuration block below for details. + ProcessingConfiguration *SplunkConfigurationProcessingConfigurationInitParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0. + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // Defines how documents should be delivered to Amazon S3. Valid values are FailedEventsOnly and AllEvents. Default value is FailedEventsOnly. + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration block below for details. + S3Configuration *SplunkConfigurationS3ConfigurationInitParameters `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` +} + +type SplunkConfigurationObservation struct { + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *SplunkConfigurationCloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The amount of time, in seconds between 180 and 600, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data. + HecAcknowledgmentTimeout *float64 `json:"hecAcknowledgmentTimeout,omitempty" tf:"hec_acknowledgment_timeout,omitempty"` + + // The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data. + HecEndpoint *string `json:"hecEndpoint,omitempty" tf:"hec_endpoint,omitempty"` + + // The HEC endpoint type. Valid values are Raw or Event. The default value is Raw. + HecEndpointType *string `json:"hecEndpointType,omitempty" tf:"hec_endpoint_type,omitempty"` + + // The data processing configuration. See processing_configuration block below for details. + ProcessingConfiguration *SplunkConfigurationProcessingConfigurationObservation `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0. + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // Defines how documents should be delivered to Amazon S3. Valid values are FailedEventsOnly and AllEvents. Default value is FailedEventsOnly. + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration block below for details. + S3Configuration *SplunkConfigurationS3ConfigurationObservation `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` +} + +type SplunkConfigurationParameters struct { + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + // +kubebuilder:validation:Optional + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + // +kubebuilder:validation:Optional + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *SplunkConfigurationCloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The amount of time, in seconds between 180 and 600, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data. + // +kubebuilder:validation:Optional + HecAcknowledgmentTimeout *float64 `json:"hecAcknowledgmentTimeout,omitempty" tf:"hec_acknowledgment_timeout,omitempty"` + + // The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data. + // +kubebuilder:validation:Optional + HecEndpoint *string `json:"hecEndpoint" tf:"hec_endpoint,omitempty"` + + // The HEC endpoint type. Valid values are Raw or Event. The default value is Raw. + // +kubebuilder:validation:Optional + HecEndpointType *string `json:"hecEndpointType,omitempty" tf:"hec_endpoint_type,omitempty"` + + // The GUID that you obtain from your Splunk cluster when you create a new HEC endpoint. + // +kubebuilder:validation:Optional + HecTokenSecretRef v1.SecretKeySelector `json:"hecTokenSecretRef" tf:"-"` + + // The data processing configuration. See processing_configuration block below for details. + // +kubebuilder:validation:Optional + ProcessingConfiguration *SplunkConfigurationProcessingConfigurationParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0. + // +kubebuilder:validation:Optional + RetryDuration *float64 `json:"retryDuration,omitempty" tf:"retry_duration,omitempty"` + + // Defines how documents should be delivered to Amazon S3. Valid values are FailedEventsOnly and AllEvents. Default value is FailedEventsOnly. + // +kubebuilder:validation:Optional + S3BackupMode *string `json:"s3BackupMode,omitempty" tf:"s3_backup_mode,omitempty"` + + // The S3 Configuration. See s3_configuration block below for details. + // +kubebuilder:validation:Optional + S3Configuration *SplunkConfigurationS3ConfigurationParameters `json:"s3Configuration" tf:"s3_configuration,omitempty"` +} + +type SplunkConfigurationProcessingConfigurationInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + Processors []SplunkConfigurationProcessingConfigurationProcessorsInitParameters `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type SplunkConfigurationProcessingConfigurationObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + Processors []SplunkConfigurationProcessingConfigurationProcessorsObservation `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type SplunkConfigurationProcessingConfigurationParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the data processors as multiple blocks. See processors block below for details. + // +kubebuilder:validation:Optional + Processors []SplunkConfigurationProcessingConfigurationProcessorsParameters `json:"processors,omitempty" tf:"processors,omitempty"` +} + +type SplunkConfigurationProcessingConfigurationProcessorsInitParameters struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + Parameters []SplunkConfigurationProcessingConfigurationProcessorsParametersInitParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SplunkConfigurationProcessingConfigurationProcessorsObservation struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + Parameters []SplunkConfigurationProcessingConfigurationProcessorsParametersObservation `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SplunkConfigurationProcessingConfigurationProcessorsParameters struct { + + // Specifies the processor parameters as multiple blocks. See parameters block below for details. + // +kubebuilder:validation:Optional + Parameters []SplunkConfigurationProcessingConfigurationProcessorsParametersParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of processor. Valid Values: RecordDeAggregation, Lambda, MetadataExtraction, AppendDelimiterToRecord. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type SplunkConfigurationProcessingConfigurationProcessorsParametersInitParameters struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + ParameterName *string `json:"parameterName,omitempty" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + ParameterValue *string `json:"parameterValue,omitempty" tf:"parameter_value,omitempty"` +} + +type SplunkConfigurationProcessingConfigurationProcessorsParametersObservation struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + ParameterName *string `json:"parameterName,omitempty" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + ParameterValue *string `json:"parameterValue,omitempty" tf:"parameter_value,omitempty"` +} + +type SplunkConfigurationProcessingConfigurationProcessorsParametersParameters struct { + + // Parameter name. Valid Values: LambdaArn, NumberOfRetries, MetadataExtractionQuery, JsonParsingEngine, RoleArn, BufferSizeInMBs, BufferIntervalInSeconds, SubRecordType, Delimiter. Validation is done against AWS SDK constants; so that values not explicitly listed may also work. + // +kubebuilder:validation:Optional + ParameterName *string `json:"parameterName" tf:"parameter_name,omitempty"` + + // Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. + // +kubebuilder:validation:Optional + ParameterValue *string `json:"parameterValue" tf:"parameter_value,omitempty"` +} + +type SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation struct { + + // Enables or disables the logging. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters struct { + + // Enables or disables the logging. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The CloudWatch group name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The CloudWatch log stream name for logging. This value is required if enabled is true. + // +kubebuilder:validation:Optional + LogStreamName *string `json:"logStreamName,omitempty" tf:"log_stream_name,omitempty"` +} + +type SplunkConfigurationS3ConfigurationInitParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type SplunkConfigurationS3ConfigurationObservation struct { + + // The ARN of the S3 bucket + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + CloudwatchLoggingOptions *SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type SplunkConfigurationS3ConfigurationParameters struct { + + // The ARN of the S3 bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. + // +kubebuilder:validation:Optional + BufferingInterval *float64 `json:"bufferingInterval,omitempty" tf:"buffering_interval,omitempty"` + + // Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. + // +kubebuilder:validation:Optional + BufferingSize *float64 `json:"bufferingSize,omitempty" tf:"buffering_size,omitempty"` + + // The CloudWatch Logging Options for the delivery stream. See cloudwatch_logging_options block below for details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + // +kubebuilder:validation:Optional + CompressionFormat *string `json:"compressionFormat,omitempty" tf:"compression_format,omitempty"` + + // Prefix added to failed records before writing them to S3. Not currently supported for redshift destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. + // +kubebuilder:validation:Optional + ErrorOutputPrefix *string `json:"errorOutputPrefix,omitempty" tf:"error_output_prefix,omitempty"` + + // Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + // be used. + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type VPCConfigInitParameters struct { + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // A list of security group IDs to associate with Kinesis Firehose. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of subnet IDs to associate with Kinesis Firehose. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type VPCConfigObservation struct { + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // A list of security group IDs to associate with Kinesis Firehose. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of subnet IDs to associate with Kinesis Firehose. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type VPCConfigParameters struct { + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // A list of security group IDs to associate with Kinesis Firehose. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds" tf:"security_group_ids,omitempty"` + + // A list of subnet IDs to associate with Kinesis Firehose. + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds" tf:"subnet_ids,omitempty"` +} + +// DeliveryStreamSpec defines the desired state of DeliveryStream +type DeliveryStreamSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DeliveryStreamParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DeliveryStreamInitParameters `json:"initProvider,omitempty"` +} + +// DeliveryStreamStatus defines the observed state of DeliveryStream. +type DeliveryStreamStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DeliveryStreamObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DeliveryStream is the Schema for the DeliveryStreams API. Provides a AWS Kinesis Firehose Delivery Stream +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type DeliveryStream struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.destination) || (has(self.initProvider) && has(self.initProvider.destination))",message="spec.forProvider.destination is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec DeliveryStreamSpec `json:"spec"` + Status DeliveryStreamStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DeliveryStreamList contains a list of DeliveryStreams +type DeliveryStreamList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DeliveryStream `json:"items"` +} + +// Repository type metadata. +var ( + DeliveryStream_Kind = "DeliveryStream" + DeliveryStream_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DeliveryStream_Kind}.String() + DeliveryStream_KindAPIVersion = DeliveryStream_Kind + "." + CRDGroupVersion.String() + DeliveryStream_GroupVersionKind = CRDGroupVersion.WithKind(DeliveryStream_Kind) +) + +func init() { + SchemeBuilder.Register(&DeliveryStream{}, &DeliveryStreamList{}) +} diff --git a/apis/firehose/v1beta2/zz_generated.conversion_hubs.go b/apis/firehose/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..d6e75af266 --- /dev/null +++ b/apis/firehose/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *DeliveryStream) Hub() {} diff --git a/apis/firehose/v1beta2/zz_generated.deepcopy.go b/apis/firehose/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..da9880a007 --- /dev/null +++ b/apis/firehose/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,10287 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfigurationInitParameters) DeepCopyInto(out *AuthenticationConfigurationInitParameters) { + *out = *in + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfigurationInitParameters. +func (in *AuthenticationConfigurationInitParameters) DeepCopy() *AuthenticationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AuthenticationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfigurationObservation) DeepCopyInto(out *AuthenticationConfigurationObservation) { + *out = *in + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfigurationObservation. +func (in *AuthenticationConfigurationObservation) DeepCopy() *AuthenticationConfigurationObservation { + if in == nil { + return nil + } + out := new(AuthenticationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfigurationParameters) DeepCopyInto(out *AuthenticationConfigurationParameters) { + *out = *in + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfigurationParameters. +func (in *AuthenticationConfigurationParameters) DeepCopy() *AuthenticationConfigurationParameters { + if in == nil { + return nil + } + out := new(AuthenticationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *CloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLoggingOptionsInitParameters. +func (in *CloudwatchLoggingOptionsInitParameters) DeepCopy() *CloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(CloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLoggingOptionsObservation) DeepCopyInto(out *CloudwatchLoggingOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLoggingOptionsObservation. +func (in *CloudwatchLoggingOptionsObservation) DeepCopy() *CloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(CloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLoggingOptionsParameters) DeepCopyInto(out *CloudwatchLoggingOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLoggingOptionsParameters. +func (in *CloudwatchLoggingOptionsParameters) DeepCopy() *CloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(CloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonAttributesInitParameters) DeepCopyInto(out *CommonAttributesInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonAttributesInitParameters. +func (in *CommonAttributesInitParameters) DeepCopy() *CommonAttributesInitParameters { + if in == nil { + return nil + } + out := new(CommonAttributesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonAttributesObservation) DeepCopyInto(out *CommonAttributesObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonAttributesObservation. +func (in *CommonAttributesObservation) DeepCopy() *CommonAttributesObservation { + if in == nil { + return nil + } + out := new(CommonAttributesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonAttributesParameters) DeepCopyInto(out *CommonAttributesParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonAttributesParameters. +func (in *CommonAttributesParameters) DeepCopy() *CommonAttributesParameters { + if in == nil { + return nil + } + out := new(CommonAttributesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataFormatConversionConfigurationInitParameters) DeepCopyInto(out *DataFormatConversionConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.InputFormatConfiguration != nil { + in, out := &in.InputFormatConfiguration, &out.InputFormatConfiguration + *out = new(InputFormatConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputFormatConfiguration != nil { + in, out := &in.OutputFormatConfiguration, &out.OutputFormatConfiguration + *out = new(OutputFormatConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SchemaConfiguration != nil { + in, out := &in.SchemaConfiguration, &out.SchemaConfiguration + *out = new(SchemaConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataFormatConversionConfigurationInitParameters. +func (in *DataFormatConversionConfigurationInitParameters) DeepCopy() *DataFormatConversionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(DataFormatConversionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataFormatConversionConfigurationObservation) DeepCopyInto(out *DataFormatConversionConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.InputFormatConfiguration != nil { + in, out := &in.InputFormatConfiguration, &out.InputFormatConfiguration + *out = new(InputFormatConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.OutputFormatConfiguration != nil { + in, out := &in.OutputFormatConfiguration, &out.OutputFormatConfiguration + *out = new(OutputFormatConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.SchemaConfiguration != nil { + in, out := &in.SchemaConfiguration, &out.SchemaConfiguration + *out = new(SchemaConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataFormatConversionConfigurationObservation. +func (in *DataFormatConversionConfigurationObservation) DeepCopy() *DataFormatConversionConfigurationObservation { + if in == nil { + return nil + } + out := new(DataFormatConversionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataFormatConversionConfigurationParameters) DeepCopyInto(out *DataFormatConversionConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.InputFormatConfiguration != nil { + in, out := &in.InputFormatConfiguration, &out.InputFormatConfiguration + *out = new(InputFormatConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputFormatConfiguration != nil { + in, out := &in.OutputFormatConfiguration, &out.OutputFormatConfiguration + *out = new(OutputFormatConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.SchemaConfiguration != nil { + in, out := &in.SchemaConfiguration, &out.SchemaConfiguration + *out = new(SchemaConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataFormatConversionConfigurationParameters. +func (in *DataFormatConversionConfigurationParameters) DeepCopy() *DataFormatConversionConfigurationParameters { + if in == nil { + return nil + } + out := new(DataFormatConversionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryStream) DeepCopyInto(out *DeliveryStream) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryStream. +func (in *DeliveryStream) DeepCopy() *DeliveryStream { + if in == nil { + return nil + } + out := new(DeliveryStream) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeliveryStream) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryStreamInitParameters) DeepCopyInto(out *DeliveryStreamInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.DestinationID != nil { + in, out := &in.DestinationID, &out.DestinationID + *out = new(string) + **out = **in + } + if in.ElasticsearchConfiguration != nil { + in, out := &in.ElasticsearchConfiguration, &out.ElasticsearchConfiguration + *out = new(ElasticsearchConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ExtendedS3Configuration != nil { + in, out := &in.ExtendedS3Configuration, &out.ExtendedS3Configuration + *out = new(ExtendedS3ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPEndpointConfiguration != nil { + in, out := &in.HTTPEndpointConfiguration, &out.HTTPEndpointConfiguration + *out = new(HTTPEndpointConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisSourceConfiguration != nil { + in, out := &in.KinesisSourceConfiguration, &out.KinesisSourceConfiguration + *out = new(KinesisSourceConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MskSourceConfiguration != nil { + in, out := &in.MskSourceConfiguration, &out.MskSourceConfiguration + *out = new(MskSourceConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OpensearchConfiguration != nil { + in, out := &in.OpensearchConfiguration, &out.OpensearchConfiguration + *out = new(OpensearchConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OpensearchserverlessConfiguration != nil { + in, out := &in.OpensearchserverlessConfiguration, &out.OpensearchserverlessConfiguration + *out = new(OpensearchserverlessConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RedshiftConfiguration != nil { + in, out := &in.RedshiftConfiguration, &out.RedshiftConfiguration + *out = new(RedshiftConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServerSideEncryption != nil { + in, out := &in.ServerSideEncryption, &out.ServerSideEncryption + *out = new(ServerSideEncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SnowflakeConfiguration != nil { + in, out := &in.SnowflakeConfiguration, &out.SnowflakeConfiguration + *out = new(SnowflakeConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SplunkConfiguration != nil { + in, out := &in.SplunkConfiguration, &out.SplunkConfiguration + *out = new(SplunkConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryStreamInitParameters. +func (in *DeliveryStreamInitParameters) DeepCopy() *DeliveryStreamInitParameters { + if in == nil { + return nil + } + out := new(DeliveryStreamInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryStreamList) DeepCopyInto(out *DeliveryStreamList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeliveryStream, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryStreamList. +func (in *DeliveryStreamList) DeepCopy() *DeliveryStreamList { + if in == nil { + return nil + } + out := new(DeliveryStreamList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeliveryStreamList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryStreamObservation) DeepCopyInto(out *DeliveryStreamObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.DestinationID != nil { + in, out := &in.DestinationID, &out.DestinationID + *out = new(string) + **out = **in + } + if in.ElasticsearchConfiguration != nil { + in, out := &in.ElasticsearchConfiguration, &out.ElasticsearchConfiguration + *out = new(ElasticsearchConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ExtendedS3Configuration != nil { + in, out := &in.ExtendedS3Configuration, &out.ExtendedS3Configuration + *out = new(ExtendedS3ConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTPEndpointConfiguration != nil { + in, out := &in.HTTPEndpointConfiguration, &out.HTTPEndpointConfiguration + *out = new(HTTPEndpointConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KinesisSourceConfiguration != nil { + in, out := &in.KinesisSourceConfiguration, &out.KinesisSourceConfiguration + *out = new(KinesisSourceConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.MskSourceConfiguration != nil { + in, out := &in.MskSourceConfiguration, &out.MskSourceConfiguration + *out = new(MskSourceConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OpensearchConfiguration != nil { + in, out := &in.OpensearchConfiguration, &out.OpensearchConfiguration + *out = new(OpensearchConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.OpensearchserverlessConfiguration != nil { + in, out := &in.OpensearchserverlessConfiguration, &out.OpensearchserverlessConfiguration + *out = new(OpensearchserverlessConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RedshiftConfiguration != nil { + in, out := &in.RedshiftConfiguration, &out.RedshiftConfiguration + *out = new(RedshiftConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ServerSideEncryption != nil { + in, out := &in.ServerSideEncryption, &out.ServerSideEncryption + *out = new(ServerSideEncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.SnowflakeConfiguration != nil { + in, out := &in.SnowflakeConfiguration, &out.SnowflakeConfiguration + *out = new(SnowflakeConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.SplunkConfiguration != nil { + in, out := &in.SplunkConfiguration, &out.SplunkConfiguration + *out = new(SplunkConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryStreamObservation. +func (in *DeliveryStreamObservation) DeepCopy() *DeliveryStreamObservation { + if in == nil { + return nil + } + out := new(DeliveryStreamObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryStreamParameters) DeepCopyInto(out *DeliveryStreamParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.DestinationID != nil { + in, out := &in.DestinationID, &out.DestinationID + *out = new(string) + **out = **in + } + if in.ElasticsearchConfiguration != nil { + in, out := &in.ElasticsearchConfiguration, &out.ElasticsearchConfiguration + *out = new(ElasticsearchConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ExtendedS3Configuration != nil { + in, out := &in.ExtendedS3Configuration, &out.ExtendedS3Configuration + *out = new(ExtendedS3ConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPEndpointConfiguration != nil { + in, out := &in.HTTPEndpointConfiguration, &out.HTTPEndpointConfiguration + *out = new(HTTPEndpointConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisSourceConfiguration != nil { + in, out := &in.KinesisSourceConfiguration, &out.KinesisSourceConfiguration + *out = new(KinesisSourceConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.MskSourceConfiguration != nil { + in, out := &in.MskSourceConfiguration, &out.MskSourceConfiguration + *out = new(MskSourceConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OpensearchConfiguration != nil { + in, out := &in.OpensearchConfiguration, &out.OpensearchConfiguration + *out = new(OpensearchConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.OpensearchserverlessConfiguration != nil { + in, out := &in.OpensearchserverlessConfiguration, &out.OpensearchserverlessConfiguration + *out = new(OpensearchserverlessConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.RedshiftConfiguration != nil { + in, out := &in.RedshiftConfiguration, &out.RedshiftConfiguration + *out = new(RedshiftConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ServerSideEncryption != nil { + in, out := &in.ServerSideEncryption, &out.ServerSideEncryption + *out = new(ServerSideEncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.SnowflakeConfiguration != nil { + in, out := &in.SnowflakeConfiguration, &out.SnowflakeConfiguration + *out = new(SnowflakeConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.SplunkConfiguration != nil { + in, out := &in.SplunkConfiguration, &out.SplunkConfiguration + *out = new(SplunkConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryStreamParameters. +func (in *DeliveryStreamParameters) DeepCopy() *DeliveryStreamParameters { + if in == nil { + return nil + } + out := new(DeliveryStreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryStreamSpec) DeepCopyInto(out *DeliveryStreamSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryStreamSpec. +func (in *DeliveryStreamSpec) DeepCopy() *DeliveryStreamSpec { + if in == nil { + return nil + } + out := new(DeliveryStreamSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryStreamStatus) DeepCopyInto(out *DeliveryStreamStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryStreamStatus. +func (in *DeliveryStreamStatus) DeepCopy() *DeliveryStreamStatus { + if in == nil { + return nil + } + out := new(DeliveryStreamStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeserializerInitParameters) DeepCopyInto(out *DeserializerInitParameters) { + *out = *in + if in.HiveJSONSerDe != nil { + in, out := &in.HiveJSONSerDe, &out.HiveJSONSerDe + *out = new(HiveJSONSerDeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OpenXJSONSerDe != nil { + in, out := &in.OpenXJSONSerDe, &out.OpenXJSONSerDe + *out = new(OpenXJSONSerDeInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeserializerInitParameters. +func (in *DeserializerInitParameters) DeepCopy() *DeserializerInitParameters { + if in == nil { + return nil + } + out := new(DeserializerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeserializerObservation) DeepCopyInto(out *DeserializerObservation) { + *out = *in + if in.HiveJSONSerDe != nil { + in, out := &in.HiveJSONSerDe, &out.HiveJSONSerDe + *out = new(HiveJSONSerDeObservation) + (*in).DeepCopyInto(*out) + } + if in.OpenXJSONSerDe != nil { + in, out := &in.OpenXJSONSerDe, &out.OpenXJSONSerDe + *out = new(OpenXJSONSerDeObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeserializerObservation. +func (in *DeserializerObservation) DeepCopy() *DeserializerObservation { + if in == nil { + return nil + } + out := new(DeserializerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeserializerParameters) DeepCopyInto(out *DeserializerParameters) { + *out = *in + if in.HiveJSONSerDe != nil { + in, out := &in.HiveJSONSerDe, &out.HiveJSONSerDe + *out = new(HiveJSONSerDeParameters) + (*in).DeepCopyInto(*out) + } + if in.OpenXJSONSerDe != nil { + in, out := &in.OpenXJSONSerDe, &out.OpenXJSONSerDe + *out = new(OpenXJSONSerDeParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeserializerParameters. +func (in *DeserializerParameters) DeepCopy() *DeserializerParameters { + if in == nil { + return nil + } + out := new(DeserializerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentIDOptionsInitParameters) DeepCopyInto(out *DocumentIDOptionsInitParameters) { + *out = *in + if in.DefaultDocumentIDFormat != nil { + in, out := &in.DefaultDocumentIDFormat, &out.DefaultDocumentIDFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentIDOptionsInitParameters. +func (in *DocumentIDOptionsInitParameters) DeepCopy() *DocumentIDOptionsInitParameters { + if in == nil { + return nil + } + out := new(DocumentIDOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentIDOptionsObservation) DeepCopyInto(out *DocumentIDOptionsObservation) { + *out = *in + if in.DefaultDocumentIDFormat != nil { + in, out := &in.DefaultDocumentIDFormat, &out.DefaultDocumentIDFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentIDOptionsObservation. +func (in *DocumentIDOptionsObservation) DeepCopy() *DocumentIDOptionsObservation { + if in == nil { + return nil + } + out := new(DocumentIDOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentIDOptionsParameters) DeepCopyInto(out *DocumentIDOptionsParameters) { + *out = *in + if in.DefaultDocumentIDFormat != nil { + in, out := &in.DefaultDocumentIDFormat, &out.DefaultDocumentIDFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentIDOptionsParameters. +func (in *DocumentIDOptionsParameters) DeepCopy() *DocumentIDOptionsParameters { + if in == nil { + return nil + } + out := new(DocumentIDOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamicPartitioningConfigurationInitParameters) DeepCopyInto(out *DynamicPartitioningConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicPartitioningConfigurationInitParameters. +func (in *DynamicPartitioningConfigurationInitParameters) DeepCopy() *DynamicPartitioningConfigurationInitParameters { + if in == nil { + return nil + } + out := new(DynamicPartitioningConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamicPartitioningConfigurationObservation) DeepCopyInto(out *DynamicPartitioningConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicPartitioningConfigurationObservation. +func (in *DynamicPartitioningConfigurationObservation) DeepCopy() *DynamicPartitioningConfigurationObservation { + if in == nil { + return nil + } + out := new(DynamicPartitioningConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamicPartitioningConfigurationParameters) DeepCopyInto(out *DynamicPartitioningConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicPartitioningConfigurationParameters. +func (in *DynamicPartitioningConfigurationParameters) DeepCopy() *DynamicPartitioningConfigurationParameters { + if in == nil { + return nil + } + out := new(DynamicPartitioningConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchConfigurationInitParameters) DeepCopyInto(out *ElasticsearchConfigurationInitParameters) { + *out = *in + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(CloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClusterEndpoint != nil { + in, out := &in.ClusterEndpoint, &out.ClusterEndpoint + *out = new(string) + **out = **in + } + if in.DomainArn != nil { + in, out := &in.DomainArn, &out.DomainArn + *out = new(string) + **out = **in + } + if in.DomainArnRef != nil { + in, out := &in.DomainArnRef, &out.DomainArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DomainArnSelector != nil { + in, out := &in.DomainArnSelector, &out.DomainArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.IndexRotationPeriod != nil { + in, out := &in.IndexRotationPeriod, &out.IndexRotationPeriod + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchConfigurationInitParameters. +func (in *ElasticsearchConfigurationInitParameters) DeepCopy() *ElasticsearchConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ElasticsearchConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchConfigurationObservation) DeepCopyInto(out *ElasticsearchConfigurationObservation) { + *out = *in + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(CloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.ClusterEndpoint != nil { + in, out := &in.ClusterEndpoint, &out.ClusterEndpoint + *out = new(string) + **out = **in + } + if in.DomainArn != nil { + in, out := &in.DomainArn, &out.DomainArn + *out = new(string) + **out = **in + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.IndexRotationPeriod != nil { + in, out := &in.IndexRotationPeriod, &out.IndexRotationPeriod + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3ConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchConfigurationObservation. +func (in *ElasticsearchConfigurationObservation) DeepCopy() *ElasticsearchConfigurationObservation { + if in == nil { + return nil + } + out := new(ElasticsearchConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchConfigurationParameters) DeepCopyInto(out *ElasticsearchConfigurationParameters) { + *out = *in + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(CloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.ClusterEndpoint != nil { + in, out := &in.ClusterEndpoint, &out.ClusterEndpoint + *out = new(string) + **out = **in + } + if in.DomainArn != nil { + in, out := &in.DomainArn, &out.DomainArn + *out = new(string) + **out = **in + } + if in.DomainArnRef != nil { + in, out := &in.DomainArnRef, &out.DomainArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DomainArnSelector != nil { + in, out := &in.DomainArnSelector, &out.DomainArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.IndexRotationPeriod != nil { + in, out := &in.IndexRotationPeriod, &out.IndexRotationPeriod + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3ConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchConfigurationParameters. +func (in *ElasticsearchConfigurationParameters) DeepCopy() *ElasticsearchConfigurationParameters { + if in == nil { + return nil + } + out := new(ElasticsearchConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtendedS3ConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *ExtendedS3ConfigurationCloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtendedS3ConfigurationCloudwatchLoggingOptionsInitParameters. +func (in *ExtendedS3ConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopy() *ExtendedS3ConfigurationCloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(ExtendedS3ConfigurationCloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtendedS3ConfigurationCloudwatchLoggingOptionsObservation) DeepCopyInto(out *ExtendedS3ConfigurationCloudwatchLoggingOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtendedS3ConfigurationCloudwatchLoggingOptionsObservation. +func (in *ExtendedS3ConfigurationCloudwatchLoggingOptionsObservation) DeepCopy() *ExtendedS3ConfigurationCloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(ExtendedS3ConfigurationCloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtendedS3ConfigurationCloudwatchLoggingOptionsParameters) DeepCopyInto(out *ExtendedS3ConfigurationCloudwatchLoggingOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtendedS3ConfigurationCloudwatchLoggingOptionsParameters. +func (in *ExtendedS3ConfigurationCloudwatchLoggingOptionsParameters) DeepCopy() *ExtendedS3ConfigurationCloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(ExtendedS3ConfigurationCloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtendedS3ConfigurationInitParameters) DeepCopyInto(out *ExtendedS3ConfigurationInitParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(ExtendedS3ConfigurationCloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.CustomTimeZone != nil { + in, out := &in.CustomTimeZone, &out.CustomTimeZone + *out = new(string) + **out = **in + } + if in.DataFormatConversionConfiguration != nil { + in, out := &in.DataFormatConversionConfiguration, &out.DataFormatConversionConfiguration + *out = new(DataFormatConversionConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DynamicPartitioningConfiguration != nil { + in, out := &in.DynamicPartitioningConfiguration, &out.DynamicPartitioningConfiguration + *out = new(DynamicPartitioningConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.FileExtension != nil { + in, out := &in.FileExtension, &out.FileExtension + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ExtendedS3ConfigurationProcessingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3BackupConfiguration != nil { + in, out := &in.S3BackupConfiguration, &out.S3BackupConfiguration + *out = new(S3BackupConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtendedS3ConfigurationInitParameters. +func (in *ExtendedS3ConfigurationInitParameters) DeepCopy() *ExtendedS3ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ExtendedS3ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtendedS3ConfigurationObservation) DeepCopyInto(out *ExtendedS3ConfigurationObservation) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(ExtendedS3ConfigurationCloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.CustomTimeZone != nil { + in, out := &in.CustomTimeZone, &out.CustomTimeZone + *out = new(string) + **out = **in + } + if in.DataFormatConversionConfiguration != nil { + in, out := &in.DataFormatConversionConfiguration, &out.DataFormatConversionConfiguration + *out = new(DataFormatConversionConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.DynamicPartitioningConfiguration != nil { + in, out := &in.DynamicPartitioningConfiguration, &out.DynamicPartitioningConfiguration + *out = new(DynamicPartitioningConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.FileExtension != nil { + in, out := &in.FileExtension, &out.FileExtension + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ExtendedS3ConfigurationProcessingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.S3BackupConfiguration != nil { + in, out := &in.S3BackupConfiguration, &out.S3BackupConfiguration + *out = new(S3BackupConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtendedS3ConfigurationObservation. +func (in *ExtendedS3ConfigurationObservation) DeepCopy() *ExtendedS3ConfigurationObservation { + if in == nil { + return nil + } + out := new(ExtendedS3ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtendedS3ConfigurationParameters) DeepCopyInto(out *ExtendedS3ConfigurationParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(ExtendedS3ConfigurationCloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.CustomTimeZone != nil { + in, out := &in.CustomTimeZone, &out.CustomTimeZone + *out = new(string) + **out = **in + } + if in.DataFormatConversionConfiguration != nil { + in, out := &in.DataFormatConversionConfiguration, &out.DataFormatConversionConfiguration + *out = new(DataFormatConversionConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.DynamicPartitioningConfiguration != nil { + in, out := &in.DynamicPartitioningConfiguration, &out.DynamicPartitioningConfiguration + *out = new(DynamicPartitioningConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.FileExtension != nil { + in, out := &in.FileExtension, &out.FileExtension + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ExtendedS3ConfigurationProcessingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3BackupConfiguration != nil { + in, out := &in.S3BackupConfiguration, &out.S3BackupConfiguration + *out = new(S3BackupConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtendedS3ConfigurationParameters. +func (in *ExtendedS3ConfigurationParameters) DeepCopy() *ExtendedS3ConfigurationParameters { + if in == nil { + return nil + } + out := new(ExtendedS3ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtendedS3ConfigurationProcessingConfigurationInitParameters) DeepCopyInto(out *ExtendedS3ConfigurationProcessingConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]ProcessingConfigurationProcessorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtendedS3ConfigurationProcessingConfigurationInitParameters. +func (in *ExtendedS3ConfigurationProcessingConfigurationInitParameters) DeepCopy() *ExtendedS3ConfigurationProcessingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ExtendedS3ConfigurationProcessingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtendedS3ConfigurationProcessingConfigurationObservation) DeepCopyInto(out *ExtendedS3ConfigurationProcessingConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]ProcessingConfigurationProcessorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtendedS3ConfigurationProcessingConfigurationObservation. +func (in *ExtendedS3ConfigurationProcessingConfigurationObservation) DeepCopy() *ExtendedS3ConfigurationProcessingConfigurationObservation { + if in == nil { + return nil + } + out := new(ExtendedS3ConfigurationProcessingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtendedS3ConfigurationProcessingConfigurationParameters) DeepCopyInto(out *ExtendedS3ConfigurationProcessingConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]ProcessingConfigurationProcessorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtendedS3ConfigurationProcessingConfigurationParameters. +func (in *ExtendedS3ConfigurationProcessingConfigurationParameters) DeepCopy() *ExtendedS3ConfigurationProcessingConfigurationParameters { + if in == nil { + return nil + } + out := new(ExtendedS3ConfigurationProcessingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *HTTPEndpointConfigurationCloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationCloudwatchLoggingOptionsInitParameters. +func (in *HTTPEndpointConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopy() *HTTPEndpointConfigurationCloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationCloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationCloudwatchLoggingOptionsObservation) DeepCopyInto(out *HTTPEndpointConfigurationCloudwatchLoggingOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationCloudwatchLoggingOptionsObservation. +func (in *HTTPEndpointConfigurationCloudwatchLoggingOptionsObservation) DeepCopy() *HTTPEndpointConfigurationCloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationCloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationCloudwatchLoggingOptionsParameters) DeepCopyInto(out *HTTPEndpointConfigurationCloudwatchLoggingOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationCloudwatchLoggingOptionsParameters. +func (in *HTTPEndpointConfigurationCloudwatchLoggingOptionsParameters) DeepCopy() *HTTPEndpointConfigurationCloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationCloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationInitParameters) DeepCopyInto(out *HTTPEndpointConfigurationInitParameters) { + *out = *in + if in.AccessKeySecretRef != nil { + in, out := &in.AccessKeySecretRef, &out.AccessKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(HTTPEndpointConfigurationCloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(HTTPEndpointConfigurationProcessingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RequestConfiguration != nil { + in, out := &in.RequestConfiguration, &out.RequestConfiguration + *out = new(RequestConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(HTTPEndpointConfigurationS3ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationInitParameters. +func (in *HTTPEndpointConfigurationInitParameters) DeepCopy() *HTTPEndpointConfigurationInitParameters { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationObservation) DeepCopyInto(out *HTTPEndpointConfigurationObservation) { + *out = *in + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(HTTPEndpointConfigurationCloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(HTTPEndpointConfigurationProcessingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RequestConfiguration != nil { + in, out := &in.RequestConfiguration, &out.RequestConfiguration + *out = new(RequestConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(HTTPEndpointConfigurationS3ConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationObservation. +func (in *HTTPEndpointConfigurationObservation) DeepCopy() *HTTPEndpointConfigurationObservation { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationParameters) DeepCopyInto(out *HTTPEndpointConfigurationParameters) { + *out = *in + if in.AccessKeySecretRef != nil { + in, out := &in.AccessKeySecretRef, &out.AccessKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(HTTPEndpointConfigurationCloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(HTTPEndpointConfigurationProcessingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.RequestConfiguration != nil { + in, out := &in.RequestConfiguration, &out.RequestConfiguration + *out = new(RequestConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(HTTPEndpointConfigurationS3ConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationParameters. +func (in *HTTPEndpointConfigurationParameters) DeepCopy() *HTTPEndpointConfigurationParameters { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationProcessingConfigurationInitParameters) DeepCopyInto(out *HTTPEndpointConfigurationProcessingConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]HTTPEndpointConfigurationProcessingConfigurationProcessorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationProcessingConfigurationInitParameters. +func (in *HTTPEndpointConfigurationProcessingConfigurationInitParameters) DeepCopy() *HTTPEndpointConfigurationProcessingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationProcessingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationProcessingConfigurationObservation) DeepCopyInto(out *HTTPEndpointConfigurationProcessingConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]HTTPEndpointConfigurationProcessingConfigurationProcessorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationProcessingConfigurationObservation. +func (in *HTTPEndpointConfigurationProcessingConfigurationObservation) DeepCopy() *HTTPEndpointConfigurationProcessingConfigurationObservation { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationProcessingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationProcessingConfigurationParameters) DeepCopyInto(out *HTTPEndpointConfigurationProcessingConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]HTTPEndpointConfigurationProcessingConfigurationProcessorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationProcessingConfigurationParameters. +func (in *HTTPEndpointConfigurationProcessingConfigurationParameters) DeepCopy() *HTTPEndpointConfigurationProcessingConfigurationParameters { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationProcessingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationProcessingConfigurationProcessorsInitParameters) DeepCopyInto(out *HTTPEndpointConfigurationProcessingConfigurationProcessorsInitParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ProcessingConfigurationProcessorsParametersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationProcessingConfigurationProcessorsInitParameters. +func (in *HTTPEndpointConfigurationProcessingConfigurationProcessorsInitParameters) DeepCopy() *HTTPEndpointConfigurationProcessingConfigurationProcessorsInitParameters { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationProcessingConfigurationProcessorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationProcessingConfigurationProcessorsObservation) DeepCopyInto(out *HTTPEndpointConfigurationProcessingConfigurationProcessorsObservation) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ProcessingConfigurationProcessorsParametersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationProcessingConfigurationProcessorsObservation. +func (in *HTTPEndpointConfigurationProcessingConfigurationProcessorsObservation) DeepCopy() *HTTPEndpointConfigurationProcessingConfigurationProcessorsObservation { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationProcessingConfigurationProcessorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationProcessingConfigurationProcessorsParameters) DeepCopyInto(out *HTTPEndpointConfigurationProcessingConfigurationProcessorsParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ProcessingConfigurationProcessorsParametersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationProcessingConfigurationProcessorsParameters. +func (in *HTTPEndpointConfigurationProcessingConfigurationProcessorsParameters) DeepCopy() *HTTPEndpointConfigurationProcessingConfigurationProcessorsParameters { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationProcessingConfigurationProcessorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters. +func (in *HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopy() *HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) DeepCopyInto(out *HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation. +func (in *HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) DeepCopy() *HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) DeepCopyInto(out *HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters. +func (in *HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) DeepCopy() *HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationS3ConfigurationInitParameters) DeepCopyInto(out *HTTPEndpointConfigurationS3ConfigurationInitParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationS3ConfigurationInitParameters. +func (in *HTTPEndpointConfigurationS3ConfigurationInitParameters) DeepCopy() *HTTPEndpointConfigurationS3ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationS3ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationS3ConfigurationObservation) DeepCopyInto(out *HTTPEndpointConfigurationS3ConfigurationObservation) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationS3ConfigurationObservation. +func (in *HTTPEndpointConfigurationS3ConfigurationObservation) DeepCopy() *HTTPEndpointConfigurationS3ConfigurationObservation { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationS3ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointConfigurationS3ConfigurationParameters) DeepCopyInto(out *HTTPEndpointConfigurationS3ConfigurationParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(HTTPEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointConfigurationS3ConfigurationParameters. +func (in *HTTPEndpointConfigurationS3ConfigurationParameters) DeepCopy() *HTTPEndpointConfigurationS3ConfigurationParameters { + if in == nil { + return nil + } + out := new(HTTPEndpointConfigurationS3ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HiveJSONSerDeInitParameters) DeepCopyInto(out *HiveJSONSerDeInitParameters) { + *out = *in + if in.TimestampFormats != nil { + in, out := &in.TimestampFormats, &out.TimestampFormats + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HiveJSONSerDeInitParameters. +func (in *HiveJSONSerDeInitParameters) DeepCopy() *HiveJSONSerDeInitParameters { + if in == nil { + return nil + } + out := new(HiveJSONSerDeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HiveJSONSerDeObservation) DeepCopyInto(out *HiveJSONSerDeObservation) { + *out = *in + if in.TimestampFormats != nil { + in, out := &in.TimestampFormats, &out.TimestampFormats + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HiveJSONSerDeObservation. +func (in *HiveJSONSerDeObservation) DeepCopy() *HiveJSONSerDeObservation { + if in == nil { + return nil + } + out := new(HiveJSONSerDeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HiveJSONSerDeParameters) DeepCopyInto(out *HiveJSONSerDeParameters) { + *out = *in + if in.TimestampFormats != nil { + in, out := &in.TimestampFormats, &out.TimestampFormats + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HiveJSONSerDeParameters. +func (in *HiveJSONSerDeParameters) DeepCopy() *HiveJSONSerDeParameters { + if in == nil { + return nil + } + out := new(HiveJSONSerDeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputFormatConfigurationInitParameters) DeepCopyInto(out *InputFormatConfigurationInitParameters) { + *out = *in + if in.Deserializer != nil { + in, out := &in.Deserializer, &out.Deserializer + *out = new(DeserializerInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputFormatConfigurationInitParameters. +func (in *InputFormatConfigurationInitParameters) DeepCopy() *InputFormatConfigurationInitParameters { + if in == nil { + return nil + } + out := new(InputFormatConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputFormatConfigurationObservation) DeepCopyInto(out *InputFormatConfigurationObservation) { + *out = *in + if in.Deserializer != nil { + in, out := &in.Deserializer, &out.Deserializer + *out = new(DeserializerObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputFormatConfigurationObservation. +func (in *InputFormatConfigurationObservation) DeepCopy() *InputFormatConfigurationObservation { + if in == nil { + return nil + } + out := new(InputFormatConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputFormatConfigurationParameters) DeepCopyInto(out *InputFormatConfigurationParameters) { + *out = *in + if in.Deserializer != nil { + in, out := &in.Deserializer, &out.Deserializer + *out = new(DeserializerParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputFormatConfigurationParameters. +func (in *InputFormatConfigurationParameters) DeepCopy() *InputFormatConfigurationParameters { + if in == nil { + return nil + } + out := new(InputFormatConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisSourceConfigurationInitParameters) DeepCopyInto(out *KinesisSourceConfigurationInitParameters) { + *out = *in + if in.KinesisStreamArn != nil { + in, out := &in.KinesisStreamArn, &out.KinesisStreamArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisSourceConfigurationInitParameters. +func (in *KinesisSourceConfigurationInitParameters) DeepCopy() *KinesisSourceConfigurationInitParameters { + if in == nil { + return nil + } + out := new(KinesisSourceConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisSourceConfigurationObservation) DeepCopyInto(out *KinesisSourceConfigurationObservation) { + *out = *in + if in.KinesisStreamArn != nil { + in, out := &in.KinesisStreamArn, &out.KinesisStreamArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisSourceConfigurationObservation. +func (in *KinesisSourceConfigurationObservation) DeepCopy() *KinesisSourceConfigurationObservation { + if in == nil { + return nil + } + out := new(KinesisSourceConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisSourceConfigurationParameters) DeepCopyInto(out *KinesisSourceConfigurationParameters) { + *out = *in + if in.KinesisStreamArn != nil { + in, out := &in.KinesisStreamArn, &out.KinesisStreamArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisSourceConfigurationParameters. +func (in *KinesisSourceConfigurationParameters) DeepCopy() *KinesisSourceConfigurationParameters { + if in == nil { + return nil + } + out := new(KinesisSourceConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MskSourceConfigurationInitParameters) DeepCopyInto(out *MskSourceConfigurationInitParameters) { + *out = *in + if in.AuthenticationConfiguration != nil { + in, out := &in.AuthenticationConfiguration, &out.AuthenticationConfiguration + *out = new(AuthenticationConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MskClusterArn != nil { + in, out := &in.MskClusterArn, &out.MskClusterArn + *out = new(string) + **out = **in + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MskSourceConfigurationInitParameters. +func (in *MskSourceConfigurationInitParameters) DeepCopy() *MskSourceConfigurationInitParameters { + if in == nil { + return nil + } + out := new(MskSourceConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MskSourceConfigurationObservation) DeepCopyInto(out *MskSourceConfigurationObservation) { + *out = *in + if in.AuthenticationConfiguration != nil { + in, out := &in.AuthenticationConfiguration, &out.AuthenticationConfiguration + *out = new(AuthenticationConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.MskClusterArn != nil { + in, out := &in.MskClusterArn, &out.MskClusterArn + *out = new(string) + **out = **in + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MskSourceConfigurationObservation. +func (in *MskSourceConfigurationObservation) DeepCopy() *MskSourceConfigurationObservation { + if in == nil { + return nil + } + out := new(MskSourceConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MskSourceConfigurationParameters) DeepCopyInto(out *MskSourceConfigurationParameters) { + *out = *in + if in.AuthenticationConfiguration != nil { + in, out := &in.AuthenticationConfiguration, &out.AuthenticationConfiguration + *out = new(AuthenticationConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.MskClusterArn != nil { + in, out := &in.MskClusterArn, &out.MskClusterArn + *out = new(string) + **out = **in + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MskSourceConfigurationParameters. +func (in *MskSourceConfigurationParameters) DeepCopy() *MskSourceConfigurationParameters { + if in == nil { + return nil + } + out := new(MskSourceConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenXJSONSerDeInitParameters) DeepCopyInto(out *OpenXJSONSerDeInitParameters) { + *out = *in + if in.CaseInsensitive != nil { + in, out := &in.CaseInsensitive, &out.CaseInsensitive + *out = new(bool) + **out = **in + } + if in.ColumnToJSONKeyMappings != nil { + in, out := &in.ColumnToJSONKeyMappings, &out.ColumnToJSONKeyMappings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ConvertDotsInJSONKeysToUnderscores != nil { + in, out := &in.ConvertDotsInJSONKeysToUnderscores, &out.ConvertDotsInJSONKeysToUnderscores + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenXJSONSerDeInitParameters. +func (in *OpenXJSONSerDeInitParameters) DeepCopy() *OpenXJSONSerDeInitParameters { + if in == nil { + return nil + } + out := new(OpenXJSONSerDeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenXJSONSerDeObservation) DeepCopyInto(out *OpenXJSONSerDeObservation) { + *out = *in + if in.CaseInsensitive != nil { + in, out := &in.CaseInsensitive, &out.CaseInsensitive + *out = new(bool) + **out = **in + } + if in.ColumnToJSONKeyMappings != nil { + in, out := &in.ColumnToJSONKeyMappings, &out.ColumnToJSONKeyMappings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ConvertDotsInJSONKeysToUnderscores != nil { + in, out := &in.ConvertDotsInJSONKeysToUnderscores, &out.ConvertDotsInJSONKeysToUnderscores + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenXJSONSerDeObservation. +func (in *OpenXJSONSerDeObservation) DeepCopy() *OpenXJSONSerDeObservation { + if in == nil { + return nil + } + out := new(OpenXJSONSerDeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenXJSONSerDeParameters) DeepCopyInto(out *OpenXJSONSerDeParameters) { + *out = *in + if in.CaseInsensitive != nil { + in, out := &in.CaseInsensitive, &out.CaseInsensitive + *out = new(bool) + **out = **in + } + if in.ColumnToJSONKeyMappings != nil { + in, out := &in.ColumnToJSONKeyMappings, &out.ColumnToJSONKeyMappings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ConvertDotsInJSONKeysToUnderscores != nil { + in, out := &in.ConvertDotsInJSONKeysToUnderscores, &out.ConvertDotsInJSONKeysToUnderscores + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenXJSONSerDeParameters. +func (in *OpenXJSONSerDeParameters) DeepCopy() *OpenXJSONSerDeParameters { + if in == nil { + return nil + } + out := new(OpenXJSONSerDeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *OpensearchConfigurationCloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationCloudwatchLoggingOptionsInitParameters. +func (in *OpensearchConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopy() *OpensearchConfigurationCloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(OpensearchConfigurationCloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationCloudwatchLoggingOptionsObservation) DeepCopyInto(out *OpensearchConfigurationCloudwatchLoggingOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationCloudwatchLoggingOptionsObservation. +func (in *OpensearchConfigurationCloudwatchLoggingOptionsObservation) DeepCopy() *OpensearchConfigurationCloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(OpensearchConfigurationCloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationCloudwatchLoggingOptionsParameters) DeepCopyInto(out *OpensearchConfigurationCloudwatchLoggingOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationCloudwatchLoggingOptionsParameters. +func (in *OpensearchConfigurationCloudwatchLoggingOptionsParameters) DeepCopy() *OpensearchConfigurationCloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(OpensearchConfigurationCloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationInitParameters) DeepCopyInto(out *OpensearchConfigurationInitParameters) { + *out = *in + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(OpensearchConfigurationCloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClusterEndpoint != nil { + in, out := &in.ClusterEndpoint, &out.ClusterEndpoint + *out = new(string) + **out = **in + } + if in.DocumentIDOptions != nil { + in, out := &in.DocumentIDOptions, &out.DocumentIDOptions + *out = new(DocumentIDOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DomainArn != nil { + in, out := &in.DomainArn, &out.DomainArn + *out = new(string) + **out = **in + } + if in.DomainArnRef != nil { + in, out := &in.DomainArnRef, &out.DomainArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DomainArnSelector != nil { + in, out := &in.DomainArnSelector, &out.DomainArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.IndexRotationPeriod != nil { + in, out := &in.IndexRotationPeriod, &out.IndexRotationPeriod + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(OpensearchConfigurationProcessingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(OpensearchConfigurationS3ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(OpensearchConfigurationVPCConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationInitParameters. +func (in *OpensearchConfigurationInitParameters) DeepCopy() *OpensearchConfigurationInitParameters { + if in == nil { + return nil + } + out := new(OpensearchConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationObservation) DeepCopyInto(out *OpensearchConfigurationObservation) { + *out = *in + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(OpensearchConfigurationCloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.ClusterEndpoint != nil { + in, out := &in.ClusterEndpoint, &out.ClusterEndpoint + *out = new(string) + **out = **in + } + if in.DocumentIDOptions != nil { + in, out := &in.DocumentIDOptions, &out.DocumentIDOptions + *out = new(DocumentIDOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.DomainArn != nil { + in, out := &in.DomainArn, &out.DomainArn + *out = new(string) + **out = **in + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.IndexRotationPeriod != nil { + in, out := &in.IndexRotationPeriod, &out.IndexRotationPeriod + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(OpensearchConfigurationProcessingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(OpensearchConfigurationS3ConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(OpensearchConfigurationVPCConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationObservation. +func (in *OpensearchConfigurationObservation) DeepCopy() *OpensearchConfigurationObservation { + if in == nil { + return nil + } + out := new(OpensearchConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationParameters) DeepCopyInto(out *OpensearchConfigurationParameters) { + *out = *in + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(OpensearchConfigurationCloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.ClusterEndpoint != nil { + in, out := &in.ClusterEndpoint, &out.ClusterEndpoint + *out = new(string) + **out = **in + } + if in.DocumentIDOptions != nil { + in, out := &in.DocumentIDOptions, &out.DocumentIDOptions + *out = new(DocumentIDOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.DomainArn != nil { + in, out := &in.DomainArn, &out.DomainArn + *out = new(string) + **out = **in + } + if in.DomainArnRef != nil { + in, out := &in.DomainArnRef, &out.DomainArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DomainArnSelector != nil { + in, out := &in.DomainArnSelector, &out.DomainArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.IndexRotationPeriod != nil { + in, out := &in.IndexRotationPeriod, &out.IndexRotationPeriod + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(OpensearchConfigurationProcessingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(OpensearchConfigurationS3ConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(OpensearchConfigurationVPCConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationParameters. +func (in *OpensearchConfigurationParameters) DeepCopy() *OpensearchConfigurationParameters { + if in == nil { + return nil + } + out := new(OpensearchConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationProcessingConfigurationInitParameters) DeepCopyInto(out *OpensearchConfigurationProcessingConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]OpensearchConfigurationProcessingConfigurationProcessorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationProcessingConfigurationInitParameters. +func (in *OpensearchConfigurationProcessingConfigurationInitParameters) DeepCopy() *OpensearchConfigurationProcessingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(OpensearchConfigurationProcessingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationProcessingConfigurationObservation) DeepCopyInto(out *OpensearchConfigurationProcessingConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]OpensearchConfigurationProcessingConfigurationProcessorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationProcessingConfigurationObservation. +func (in *OpensearchConfigurationProcessingConfigurationObservation) DeepCopy() *OpensearchConfigurationProcessingConfigurationObservation { + if in == nil { + return nil + } + out := new(OpensearchConfigurationProcessingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationProcessingConfigurationParameters) DeepCopyInto(out *OpensearchConfigurationProcessingConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]OpensearchConfigurationProcessingConfigurationProcessorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationProcessingConfigurationParameters. +func (in *OpensearchConfigurationProcessingConfigurationParameters) DeepCopy() *OpensearchConfigurationProcessingConfigurationParameters { + if in == nil { + return nil + } + out := new(OpensearchConfigurationProcessingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationProcessingConfigurationProcessorsInitParameters) DeepCopyInto(out *OpensearchConfigurationProcessingConfigurationProcessorsInitParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]OpensearchConfigurationProcessingConfigurationProcessorsParametersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationProcessingConfigurationProcessorsInitParameters. +func (in *OpensearchConfigurationProcessingConfigurationProcessorsInitParameters) DeepCopy() *OpensearchConfigurationProcessingConfigurationProcessorsInitParameters { + if in == nil { + return nil + } + out := new(OpensearchConfigurationProcessingConfigurationProcessorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationProcessingConfigurationProcessorsObservation) DeepCopyInto(out *OpensearchConfigurationProcessingConfigurationProcessorsObservation) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]OpensearchConfigurationProcessingConfigurationProcessorsParametersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationProcessingConfigurationProcessorsObservation. +func (in *OpensearchConfigurationProcessingConfigurationProcessorsObservation) DeepCopy() *OpensearchConfigurationProcessingConfigurationProcessorsObservation { + if in == nil { + return nil + } + out := new(OpensearchConfigurationProcessingConfigurationProcessorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationProcessingConfigurationProcessorsParameters) DeepCopyInto(out *OpensearchConfigurationProcessingConfigurationProcessorsParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]OpensearchConfigurationProcessingConfigurationProcessorsParametersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationProcessingConfigurationProcessorsParameters. +func (in *OpensearchConfigurationProcessingConfigurationProcessorsParameters) DeepCopy() *OpensearchConfigurationProcessingConfigurationProcessorsParameters { + if in == nil { + return nil + } + out := new(OpensearchConfigurationProcessingConfigurationProcessorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationProcessingConfigurationProcessorsParametersInitParameters) DeepCopyInto(out *OpensearchConfigurationProcessingConfigurationProcessorsParametersInitParameters) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationProcessingConfigurationProcessorsParametersInitParameters. +func (in *OpensearchConfigurationProcessingConfigurationProcessorsParametersInitParameters) DeepCopy() *OpensearchConfigurationProcessingConfigurationProcessorsParametersInitParameters { + if in == nil { + return nil + } + out := new(OpensearchConfigurationProcessingConfigurationProcessorsParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationProcessingConfigurationProcessorsParametersObservation) DeepCopyInto(out *OpensearchConfigurationProcessingConfigurationProcessorsParametersObservation) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationProcessingConfigurationProcessorsParametersObservation. +func (in *OpensearchConfigurationProcessingConfigurationProcessorsParametersObservation) DeepCopy() *OpensearchConfigurationProcessingConfigurationProcessorsParametersObservation { + if in == nil { + return nil + } + out := new(OpensearchConfigurationProcessingConfigurationProcessorsParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationProcessingConfigurationProcessorsParametersParameters) DeepCopyInto(out *OpensearchConfigurationProcessingConfigurationProcessorsParametersParameters) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationProcessingConfigurationProcessorsParametersParameters. +func (in *OpensearchConfigurationProcessingConfigurationProcessorsParametersParameters) DeepCopy() *OpensearchConfigurationProcessingConfigurationProcessorsParametersParameters { + if in == nil { + return nil + } + out := new(OpensearchConfigurationProcessingConfigurationProcessorsParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters. +func (in *OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopy() *OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) DeepCopyInto(out *OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation. +func (in *OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) DeepCopy() *OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) DeepCopyInto(out *OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters. +func (in *OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) DeepCopy() *OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationS3ConfigurationInitParameters) DeepCopyInto(out *OpensearchConfigurationS3ConfigurationInitParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationS3ConfigurationInitParameters. +func (in *OpensearchConfigurationS3ConfigurationInitParameters) DeepCopy() *OpensearchConfigurationS3ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(OpensearchConfigurationS3ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationS3ConfigurationObservation) DeepCopyInto(out *OpensearchConfigurationS3ConfigurationObservation) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationS3ConfigurationObservation. +func (in *OpensearchConfigurationS3ConfigurationObservation) DeepCopy() *OpensearchConfigurationS3ConfigurationObservation { + if in == nil { + return nil + } + out := new(OpensearchConfigurationS3ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationS3ConfigurationParameters) DeepCopyInto(out *OpensearchConfigurationS3ConfigurationParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(OpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationS3ConfigurationParameters. +func (in *OpensearchConfigurationS3ConfigurationParameters) DeepCopy() *OpensearchConfigurationS3ConfigurationParameters { + if in == nil { + return nil + } + out := new(OpensearchConfigurationS3ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationVPCConfigInitParameters) DeepCopyInto(out *OpensearchConfigurationVPCConfigInitParameters) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationVPCConfigInitParameters. +func (in *OpensearchConfigurationVPCConfigInitParameters) DeepCopy() *OpensearchConfigurationVPCConfigInitParameters { + if in == nil { + return nil + } + out := new(OpensearchConfigurationVPCConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationVPCConfigObservation) DeepCopyInto(out *OpensearchConfigurationVPCConfigObservation) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationVPCConfigObservation. +func (in *OpensearchConfigurationVPCConfigObservation) DeepCopy() *OpensearchConfigurationVPCConfigObservation { + if in == nil { + return nil + } + out := new(OpensearchConfigurationVPCConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchConfigurationVPCConfigParameters) DeepCopyInto(out *OpensearchConfigurationVPCConfigParameters) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchConfigurationVPCConfigParameters. +func (in *OpensearchConfigurationVPCConfigParameters) DeepCopy() *OpensearchConfigurationVPCConfigParameters { + if in == nil { + return nil + } + out := new(OpensearchConfigurationVPCConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *OpensearchserverlessConfigurationCloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationCloudwatchLoggingOptionsInitParameters. +func (in *OpensearchserverlessConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopy() *OpensearchserverlessConfigurationCloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationCloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationCloudwatchLoggingOptionsObservation) DeepCopyInto(out *OpensearchserverlessConfigurationCloudwatchLoggingOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationCloudwatchLoggingOptionsObservation. +func (in *OpensearchserverlessConfigurationCloudwatchLoggingOptionsObservation) DeepCopy() *OpensearchserverlessConfigurationCloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationCloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationCloudwatchLoggingOptionsParameters) DeepCopyInto(out *OpensearchserverlessConfigurationCloudwatchLoggingOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationCloudwatchLoggingOptionsParameters. +func (in *OpensearchserverlessConfigurationCloudwatchLoggingOptionsParameters) DeepCopy() *OpensearchserverlessConfigurationCloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationCloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationInitParameters) DeepCopyInto(out *OpensearchserverlessConfigurationInitParameters) { + *out = *in + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(OpensearchserverlessConfigurationCloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CollectionEndpoint != nil { + in, out := &in.CollectionEndpoint, &out.CollectionEndpoint + *out = new(string) + **out = **in + } + if in.CollectionEndpointRef != nil { + in, out := &in.CollectionEndpointRef, &out.CollectionEndpointRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CollectionEndpointSelector != nil { + in, out := &in.CollectionEndpointSelector, &out.CollectionEndpointSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(OpensearchserverlessConfigurationProcessingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(OpensearchserverlessConfigurationS3ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(OpensearchserverlessConfigurationVPCConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationInitParameters. +func (in *OpensearchserverlessConfigurationInitParameters) DeepCopy() *OpensearchserverlessConfigurationInitParameters { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationObservation) DeepCopyInto(out *OpensearchserverlessConfigurationObservation) { + *out = *in + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(OpensearchserverlessConfigurationCloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.CollectionEndpoint != nil { + in, out := &in.CollectionEndpoint, &out.CollectionEndpoint + *out = new(string) + **out = **in + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(OpensearchserverlessConfigurationProcessingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(OpensearchserverlessConfigurationS3ConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(OpensearchserverlessConfigurationVPCConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationObservation. +func (in *OpensearchserverlessConfigurationObservation) DeepCopy() *OpensearchserverlessConfigurationObservation { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationParameters) DeepCopyInto(out *OpensearchserverlessConfigurationParameters) { + *out = *in + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(OpensearchserverlessConfigurationCloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.CollectionEndpoint != nil { + in, out := &in.CollectionEndpoint, &out.CollectionEndpoint + *out = new(string) + **out = **in + } + if in.CollectionEndpointRef != nil { + in, out := &in.CollectionEndpointRef, &out.CollectionEndpointRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CollectionEndpointSelector != nil { + in, out := &in.CollectionEndpointSelector, &out.CollectionEndpointSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(OpensearchserverlessConfigurationProcessingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(OpensearchserverlessConfigurationS3ConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(OpensearchserverlessConfigurationVPCConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationParameters. +func (in *OpensearchserverlessConfigurationParameters) DeepCopy() *OpensearchserverlessConfigurationParameters { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationProcessingConfigurationInitParameters) DeepCopyInto(out *OpensearchserverlessConfigurationProcessingConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]OpensearchserverlessConfigurationProcessingConfigurationProcessorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationProcessingConfigurationInitParameters. +func (in *OpensearchserverlessConfigurationProcessingConfigurationInitParameters) DeepCopy() *OpensearchserverlessConfigurationProcessingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationProcessingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationProcessingConfigurationObservation) DeepCopyInto(out *OpensearchserverlessConfigurationProcessingConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]OpensearchserverlessConfigurationProcessingConfigurationProcessorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationProcessingConfigurationObservation. +func (in *OpensearchserverlessConfigurationProcessingConfigurationObservation) DeepCopy() *OpensearchserverlessConfigurationProcessingConfigurationObservation { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationProcessingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationProcessingConfigurationParameters) DeepCopyInto(out *OpensearchserverlessConfigurationProcessingConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]OpensearchserverlessConfigurationProcessingConfigurationProcessorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationProcessingConfigurationParameters. +func (in *OpensearchserverlessConfigurationProcessingConfigurationParameters) DeepCopy() *OpensearchserverlessConfigurationProcessingConfigurationParameters { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationProcessingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationProcessingConfigurationProcessorsInitParameters) DeepCopyInto(out *OpensearchserverlessConfigurationProcessingConfigurationProcessorsInitParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationProcessingConfigurationProcessorsInitParameters. +func (in *OpensearchserverlessConfigurationProcessingConfigurationProcessorsInitParameters) DeepCopy() *OpensearchserverlessConfigurationProcessingConfigurationProcessorsInitParameters { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationProcessingConfigurationProcessorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationProcessingConfigurationProcessorsObservation) DeepCopyInto(out *OpensearchserverlessConfigurationProcessingConfigurationProcessorsObservation) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationProcessingConfigurationProcessorsObservation. +func (in *OpensearchserverlessConfigurationProcessingConfigurationProcessorsObservation) DeepCopy() *OpensearchserverlessConfigurationProcessingConfigurationProcessorsObservation { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationProcessingConfigurationProcessorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationProcessingConfigurationProcessorsParameters) DeepCopyInto(out *OpensearchserverlessConfigurationProcessingConfigurationProcessorsParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationProcessingConfigurationProcessorsParameters. +func (in *OpensearchserverlessConfigurationProcessingConfigurationProcessorsParameters) DeepCopy() *OpensearchserverlessConfigurationProcessingConfigurationProcessorsParameters { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationProcessingConfigurationProcessorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersInitParameters) DeepCopyInto(out *OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersInitParameters) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersInitParameters. +func (in *OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersInitParameters) DeepCopy() *OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersInitParameters { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersObservation) DeepCopyInto(out *OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersObservation) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersObservation. +func (in *OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersObservation) DeepCopy() *OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersObservation { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersParameters) DeepCopyInto(out *OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersParameters) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersParameters. +func (in *OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersParameters) DeepCopy() *OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersParameters { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationProcessingConfigurationProcessorsParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters. +func (in *OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopy() *OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) DeepCopyInto(out *OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation. +func (in *OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) DeepCopy() *OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) DeepCopyInto(out *OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters. +func (in *OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) DeepCopy() *OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationS3ConfigurationInitParameters) DeepCopyInto(out *OpensearchserverlessConfigurationS3ConfigurationInitParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationS3ConfigurationInitParameters. +func (in *OpensearchserverlessConfigurationS3ConfigurationInitParameters) DeepCopy() *OpensearchserverlessConfigurationS3ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationS3ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationS3ConfigurationObservation) DeepCopyInto(out *OpensearchserverlessConfigurationS3ConfigurationObservation) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationS3ConfigurationObservation. +func (in *OpensearchserverlessConfigurationS3ConfigurationObservation) DeepCopy() *OpensearchserverlessConfigurationS3ConfigurationObservation { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationS3ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationS3ConfigurationParameters) DeepCopyInto(out *OpensearchserverlessConfigurationS3ConfigurationParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(OpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationS3ConfigurationParameters. +func (in *OpensearchserverlessConfigurationS3ConfigurationParameters) DeepCopy() *OpensearchserverlessConfigurationS3ConfigurationParameters { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationS3ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationVPCConfigInitParameters) DeepCopyInto(out *OpensearchserverlessConfigurationVPCConfigInitParameters) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationVPCConfigInitParameters. +func (in *OpensearchserverlessConfigurationVPCConfigInitParameters) DeepCopy() *OpensearchserverlessConfigurationVPCConfigInitParameters { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationVPCConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationVPCConfigObservation) DeepCopyInto(out *OpensearchserverlessConfigurationVPCConfigObservation) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationVPCConfigObservation. +func (in *OpensearchserverlessConfigurationVPCConfigObservation) DeepCopy() *OpensearchserverlessConfigurationVPCConfigObservation { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationVPCConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpensearchserverlessConfigurationVPCConfigParameters) DeepCopyInto(out *OpensearchserverlessConfigurationVPCConfigParameters) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpensearchserverlessConfigurationVPCConfigParameters. +func (in *OpensearchserverlessConfigurationVPCConfigParameters) DeepCopy() *OpensearchserverlessConfigurationVPCConfigParameters { + if in == nil { + return nil + } + out := new(OpensearchserverlessConfigurationVPCConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrcSerDeInitParameters) DeepCopyInto(out *OrcSerDeInitParameters) { + *out = *in + if in.BlockSizeBytes != nil { + in, out := &in.BlockSizeBytes, &out.BlockSizeBytes + *out = new(float64) + **out = **in + } + if in.BloomFilterColumns != nil { + in, out := &in.BloomFilterColumns, &out.BloomFilterColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BloomFilterFalsePositiveProbability != nil { + in, out := &in.BloomFilterFalsePositiveProbability, &out.BloomFilterFalsePositiveProbability + *out = new(float64) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.DictionaryKeyThreshold != nil { + in, out := &in.DictionaryKeyThreshold, &out.DictionaryKeyThreshold + *out = new(float64) + **out = **in + } + if in.EnablePadding != nil { + in, out := &in.EnablePadding, &out.EnablePadding + *out = new(bool) + **out = **in + } + if in.FormatVersion != nil { + in, out := &in.FormatVersion, &out.FormatVersion + *out = new(string) + **out = **in + } + if in.PaddingTolerance != nil { + in, out := &in.PaddingTolerance, &out.PaddingTolerance + *out = new(float64) + **out = **in + } + if in.RowIndexStride != nil { + in, out := &in.RowIndexStride, &out.RowIndexStride + *out = new(float64) + **out = **in + } + if in.StripeSizeBytes != nil { + in, out := &in.StripeSizeBytes, &out.StripeSizeBytes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrcSerDeInitParameters. +func (in *OrcSerDeInitParameters) DeepCopy() *OrcSerDeInitParameters { + if in == nil { + return nil + } + out := new(OrcSerDeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrcSerDeObservation) DeepCopyInto(out *OrcSerDeObservation) { + *out = *in + if in.BlockSizeBytes != nil { + in, out := &in.BlockSizeBytes, &out.BlockSizeBytes + *out = new(float64) + **out = **in + } + if in.BloomFilterColumns != nil { + in, out := &in.BloomFilterColumns, &out.BloomFilterColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BloomFilterFalsePositiveProbability != nil { + in, out := &in.BloomFilterFalsePositiveProbability, &out.BloomFilterFalsePositiveProbability + *out = new(float64) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.DictionaryKeyThreshold != nil { + in, out := &in.DictionaryKeyThreshold, &out.DictionaryKeyThreshold + *out = new(float64) + **out = **in + } + if in.EnablePadding != nil { + in, out := &in.EnablePadding, &out.EnablePadding + *out = new(bool) + **out = **in + } + if in.FormatVersion != nil { + in, out := &in.FormatVersion, &out.FormatVersion + *out = new(string) + **out = **in + } + if in.PaddingTolerance != nil { + in, out := &in.PaddingTolerance, &out.PaddingTolerance + *out = new(float64) + **out = **in + } + if in.RowIndexStride != nil { + in, out := &in.RowIndexStride, &out.RowIndexStride + *out = new(float64) + **out = **in + } + if in.StripeSizeBytes != nil { + in, out := &in.StripeSizeBytes, &out.StripeSizeBytes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrcSerDeObservation. +func (in *OrcSerDeObservation) DeepCopy() *OrcSerDeObservation { + if in == nil { + return nil + } + out := new(OrcSerDeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrcSerDeParameters) DeepCopyInto(out *OrcSerDeParameters) { + *out = *in + if in.BlockSizeBytes != nil { + in, out := &in.BlockSizeBytes, &out.BlockSizeBytes + *out = new(float64) + **out = **in + } + if in.BloomFilterColumns != nil { + in, out := &in.BloomFilterColumns, &out.BloomFilterColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BloomFilterFalsePositiveProbability != nil { + in, out := &in.BloomFilterFalsePositiveProbability, &out.BloomFilterFalsePositiveProbability + *out = new(float64) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.DictionaryKeyThreshold != nil { + in, out := &in.DictionaryKeyThreshold, &out.DictionaryKeyThreshold + *out = new(float64) + **out = **in + } + if in.EnablePadding != nil { + in, out := &in.EnablePadding, &out.EnablePadding + *out = new(bool) + **out = **in + } + if in.FormatVersion != nil { + in, out := &in.FormatVersion, &out.FormatVersion + *out = new(string) + **out = **in + } + if in.PaddingTolerance != nil { + in, out := &in.PaddingTolerance, &out.PaddingTolerance + *out = new(float64) + **out = **in + } + if in.RowIndexStride != nil { + in, out := &in.RowIndexStride, &out.RowIndexStride + *out = new(float64) + **out = **in + } + if in.StripeSizeBytes != nil { + in, out := &in.StripeSizeBytes, &out.StripeSizeBytes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrcSerDeParameters. +func (in *OrcSerDeParameters) DeepCopy() *OrcSerDeParameters { + if in == nil { + return nil + } + out := new(OrcSerDeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputFormatConfigurationInitParameters) DeepCopyInto(out *OutputFormatConfigurationInitParameters) { + *out = *in + if in.Serializer != nil { + in, out := &in.Serializer, &out.Serializer + *out = new(SerializerInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputFormatConfigurationInitParameters. +func (in *OutputFormatConfigurationInitParameters) DeepCopy() *OutputFormatConfigurationInitParameters { + if in == nil { + return nil + } + out := new(OutputFormatConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputFormatConfigurationObservation) DeepCopyInto(out *OutputFormatConfigurationObservation) { + *out = *in + if in.Serializer != nil { + in, out := &in.Serializer, &out.Serializer + *out = new(SerializerObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputFormatConfigurationObservation. +func (in *OutputFormatConfigurationObservation) DeepCopy() *OutputFormatConfigurationObservation { + if in == nil { + return nil + } + out := new(OutputFormatConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputFormatConfigurationParameters) DeepCopyInto(out *OutputFormatConfigurationParameters) { + *out = *in + if in.Serializer != nil { + in, out := &in.Serializer, &out.Serializer + *out = new(SerializerParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputFormatConfigurationParameters. +func (in *OutputFormatConfigurationParameters) DeepCopy() *OutputFormatConfigurationParameters { + if in == nil { + return nil + } + out := new(OutputFormatConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersInitParameters) DeepCopyInto(out *ParametersInitParameters) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersInitParameters. +func (in *ParametersInitParameters) DeepCopy() *ParametersInitParameters { + if in == nil { + return nil + } + out := new(ParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersObservation) DeepCopyInto(out *ParametersObservation) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersObservation. +func (in *ParametersObservation) DeepCopy() *ParametersObservation { + if in == nil { + return nil + } + out := new(ParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersParameters) DeepCopyInto(out *ParametersParameters) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersParameters. +func (in *ParametersParameters) DeepCopy() *ParametersParameters { + if in == nil { + return nil + } + out := new(ParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParquetSerDeInitParameters) DeepCopyInto(out *ParquetSerDeInitParameters) { + *out = *in + if in.BlockSizeBytes != nil { + in, out := &in.BlockSizeBytes, &out.BlockSizeBytes + *out = new(float64) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.EnableDictionaryCompression != nil { + in, out := &in.EnableDictionaryCompression, &out.EnableDictionaryCompression + *out = new(bool) + **out = **in + } + if in.MaxPaddingBytes != nil { + in, out := &in.MaxPaddingBytes, &out.MaxPaddingBytes + *out = new(float64) + **out = **in + } + if in.PageSizeBytes != nil { + in, out := &in.PageSizeBytes, &out.PageSizeBytes + *out = new(float64) + **out = **in + } + if in.WriterVersion != nil { + in, out := &in.WriterVersion, &out.WriterVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParquetSerDeInitParameters. +func (in *ParquetSerDeInitParameters) DeepCopy() *ParquetSerDeInitParameters { + if in == nil { + return nil + } + out := new(ParquetSerDeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParquetSerDeObservation) DeepCopyInto(out *ParquetSerDeObservation) { + *out = *in + if in.BlockSizeBytes != nil { + in, out := &in.BlockSizeBytes, &out.BlockSizeBytes + *out = new(float64) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.EnableDictionaryCompression != nil { + in, out := &in.EnableDictionaryCompression, &out.EnableDictionaryCompression + *out = new(bool) + **out = **in + } + if in.MaxPaddingBytes != nil { + in, out := &in.MaxPaddingBytes, &out.MaxPaddingBytes + *out = new(float64) + **out = **in + } + if in.PageSizeBytes != nil { + in, out := &in.PageSizeBytes, &out.PageSizeBytes + *out = new(float64) + **out = **in + } + if in.WriterVersion != nil { + in, out := &in.WriterVersion, &out.WriterVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParquetSerDeObservation. +func (in *ParquetSerDeObservation) DeepCopy() *ParquetSerDeObservation { + if in == nil { + return nil + } + out := new(ParquetSerDeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParquetSerDeParameters) DeepCopyInto(out *ParquetSerDeParameters) { + *out = *in + if in.BlockSizeBytes != nil { + in, out := &in.BlockSizeBytes, &out.BlockSizeBytes + *out = new(float64) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.EnableDictionaryCompression != nil { + in, out := &in.EnableDictionaryCompression, &out.EnableDictionaryCompression + *out = new(bool) + **out = **in + } + if in.MaxPaddingBytes != nil { + in, out := &in.MaxPaddingBytes, &out.MaxPaddingBytes + *out = new(float64) + **out = **in + } + if in.PageSizeBytes != nil { + in, out := &in.PageSizeBytes, &out.PageSizeBytes + *out = new(float64) + **out = **in + } + if in.WriterVersion != nil { + in, out := &in.WriterVersion, &out.WriterVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParquetSerDeParameters. +func (in *ParquetSerDeParameters) DeepCopy() *ParquetSerDeParameters { + if in == nil { + return nil + } + out := new(ParquetSerDeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessingConfigurationInitParameters) DeepCopyInto(out *ProcessingConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]ProcessorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessingConfigurationInitParameters. +func (in *ProcessingConfigurationInitParameters) DeepCopy() *ProcessingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ProcessingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessingConfigurationObservation) DeepCopyInto(out *ProcessingConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]ProcessorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessingConfigurationObservation. +func (in *ProcessingConfigurationObservation) DeepCopy() *ProcessingConfigurationObservation { + if in == nil { + return nil + } + out := new(ProcessingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessingConfigurationParameters) DeepCopyInto(out *ProcessingConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]ProcessorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessingConfigurationParameters. +func (in *ProcessingConfigurationParameters) DeepCopy() *ProcessingConfigurationParameters { + if in == nil { + return nil + } + out := new(ProcessingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessingConfigurationProcessorsInitParameters) DeepCopyInto(out *ProcessingConfigurationProcessorsInitParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ProcessorsParametersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessingConfigurationProcessorsInitParameters. +func (in *ProcessingConfigurationProcessorsInitParameters) DeepCopy() *ProcessingConfigurationProcessorsInitParameters { + if in == nil { + return nil + } + out := new(ProcessingConfigurationProcessorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessingConfigurationProcessorsObservation) DeepCopyInto(out *ProcessingConfigurationProcessorsObservation) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ProcessorsParametersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessingConfigurationProcessorsObservation. +func (in *ProcessingConfigurationProcessorsObservation) DeepCopy() *ProcessingConfigurationProcessorsObservation { + if in == nil { + return nil + } + out := new(ProcessingConfigurationProcessorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessingConfigurationProcessorsParameters) DeepCopyInto(out *ProcessingConfigurationProcessorsParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ProcessorsParametersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessingConfigurationProcessorsParameters. +func (in *ProcessingConfigurationProcessorsParameters) DeepCopy() *ProcessingConfigurationProcessorsParameters { + if in == nil { + return nil + } + out := new(ProcessingConfigurationProcessorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessingConfigurationProcessorsParametersInitParameters) DeepCopyInto(out *ProcessingConfigurationProcessorsParametersInitParameters) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessingConfigurationProcessorsParametersInitParameters. +func (in *ProcessingConfigurationProcessorsParametersInitParameters) DeepCopy() *ProcessingConfigurationProcessorsParametersInitParameters { + if in == nil { + return nil + } + out := new(ProcessingConfigurationProcessorsParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessingConfigurationProcessorsParametersObservation) DeepCopyInto(out *ProcessingConfigurationProcessorsParametersObservation) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessingConfigurationProcessorsParametersObservation. +func (in *ProcessingConfigurationProcessorsParametersObservation) DeepCopy() *ProcessingConfigurationProcessorsParametersObservation { + if in == nil { + return nil + } + out := new(ProcessingConfigurationProcessorsParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessingConfigurationProcessorsParametersParameters) DeepCopyInto(out *ProcessingConfigurationProcessorsParametersParameters) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessingConfigurationProcessorsParametersParameters. +func (in *ProcessingConfigurationProcessorsParametersParameters) DeepCopy() *ProcessingConfigurationProcessorsParametersParameters { + if in == nil { + return nil + } + out := new(ProcessingConfigurationProcessorsParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessorsInitParameters) DeepCopyInto(out *ProcessorsInitParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ParametersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessorsInitParameters. +func (in *ProcessorsInitParameters) DeepCopy() *ProcessorsInitParameters { + if in == nil { + return nil + } + out := new(ProcessorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessorsObservation) DeepCopyInto(out *ProcessorsObservation) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ParametersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessorsObservation. +func (in *ProcessorsObservation) DeepCopy() *ProcessorsObservation { + if in == nil { + return nil + } + out := new(ProcessorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessorsParameters) DeepCopyInto(out *ProcessorsParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ParametersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessorsParameters. +func (in *ProcessorsParameters) DeepCopy() *ProcessorsParameters { + if in == nil { + return nil + } + out := new(ProcessorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessorsParametersInitParameters) DeepCopyInto(out *ProcessorsParametersInitParameters) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessorsParametersInitParameters. +func (in *ProcessorsParametersInitParameters) DeepCopy() *ProcessorsParametersInitParameters { + if in == nil { + return nil + } + out := new(ProcessorsParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessorsParametersObservation) DeepCopyInto(out *ProcessorsParametersObservation) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessorsParametersObservation. +func (in *ProcessorsParametersObservation) DeepCopy() *ProcessorsParametersObservation { + if in == nil { + return nil + } + out := new(ProcessorsParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessorsParametersParameters) DeepCopyInto(out *ProcessorsParametersParameters) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessorsParametersParameters. +func (in *ProcessorsParametersParameters) DeepCopy() *ProcessorsParametersParameters { + if in == nil { + return nil + } + out := new(ProcessorsParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *RedshiftConfigurationCloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationCloudwatchLoggingOptionsInitParameters. +func (in *RedshiftConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopy() *RedshiftConfigurationCloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationCloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationCloudwatchLoggingOptionsObservation) DeepCopyInto(out *RedshiftConfigurationCloudwatchLoggingOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationCloudwatchLoggingOptionsObservation. +func (in *RedshiftConfigurationCloudwatchLoggingOptionsObservation) DeepCopy() *RedshiftConfigurationCloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(RedshiftConfigurationCloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationCloudwatchLoggingOptionsParameters) DeepCopyInto(out *RedshiftConfigurationCloudwatchLoggingOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationCloudwatchLoggingOptionsParameters. +func (in *RedshiftConfigurationCloudwatchLoggingOptionsParameters) DeepCopy() *RedshiftConfigurationCloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationCloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationInitParameters) DeepCopyInto(out *RedshiftConfigurationInitParameters) { + *out = *in + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(RedshiftConfigurationCloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClusterJdbcurl != nil { + in, out := &in.ClusterJdbcurl, &out.ClusterJdbcurl + *out = new(string) + **out = **in + } + if in.CopyOptions != nil { + in, out := &in.CopyOptions, &out.CopyOptions + *out = new(string) + **out = **in + } + if in.DataTableColumns != nil { + in, out := &in.DataTableColumns, &out.DataTableColumns + *out = new(string) + **out = **in + } + if in.DataTableName != nil { + in, out := &in.DataTableName, &out.DataTableName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(RedshiftConfigurationProcessingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3BackupConfiguration != nil { + in, out := &in.S3BackupConfiguration, &out.S3BackupConfiguration + *out = new(RedshiftConfigurationS3BackupConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(RedshiftConfigurationS3ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationInitParameters. +func (in *RedshiftConfigurationInitParameters) DeepCopy() *RedshiftConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationObservation) DeepCopyInto(out *RedshiftConfigurationObservation) { + *out = *in + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(RedshiftConfigurationCloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.ClusterJdbcurl != nil { + in, out := &in.ClusterJdbcurl, &out.ClusterJdbcurl + *out = new(string) + **out = **in + } + if in.CopyOptions != nil { + in, out := &in.CopyOptions, &out.CopyOptions + *out = new(string) + **out = **in + } + if in.DataTableColumns != nil { + in, out := &in.DataTableColumns, &out.DataTableColumns + *out = new(string) + **out = **in + } + if in.DataTableName != nil { + in, out := &in.DataTableName, &out.DataTableName + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(RedshiftConfigurationProcessingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.S3BackupConfiguration != nil { + in, out := &in.S3BackupConfiguration, &out.S3BackupConfiguration + *out = new(RedshiftConfigurationS3BackupConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(RedshiftConfigurationS3ConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationObservation. +func (in *RedshiftConfigurationObservation) DeepCopy() *RedshiftConfigurationObservation { + if in == nil { + return nil + } + out := new(RedshiftConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationParameters) DeepCopyInto(out *RedshiftConfigurationParameters) { + *out = *in + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(RedshiftConfigurationCloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.ClusterJdbcurl != nil { + in, out := &in.ClusterJdbcurl, &out.ClusterJdbcurl + *out = new(string) + **out = **in + } + if in.CopyOptions != nil { + in, out := &in.CopyOptions, &out.CopyOptions + *out = new(string) + **out = **in + } + if in.DataTableColumns != nil { + in, out := &in.DataTableColumns, &out.DataTableColumns + *out = new(string) + **out = **in + } + if in.DataTableName != nil { + in, out := &in.DataTableName, &out.DataTableName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(RedshiftConfigurationProcessingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3BackupConfiguration != nil { + in, out := &in.S3BackupConfiguration, &out.S3BackupConfiguration + *out = new(RedshiftConfigurationS3BackupConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(RedshiftConfigurationS3ConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationParameters. +func (in *RedshiftConfigurationParameters) DeepCopy() *RedshiftConfigurationParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationProcessingConfigurationInitParameters) DeepCopyInto(out *RedshiftConfigurationProcessingConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]RedshiftConfigurationProcessingConfigurationProcessorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationProcessingConfigurationInitParameters. +func (in *RedshiftConfigurationProcessingConfigurationInitParameters) DeepCopy() *RedshiftConfigurationProcessingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationProcessingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationProcessingConfigurationObservation) DeepCopyInto(out *RedshiftConfigurationProcessingConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]RedshiftConfigurationProcessingConfigurationProcessorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationProcessingConfigurationObservation. +func (in *RedshiftConfigurationProcessingConfigurationObservation) DeepCopy() *RedshiftConfigurationProcessingConfigurationObservation { + if in == nil { + return nil + } + out := new(RedshiftConfigurationProcessingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationProcessingConfigurationParameters) DeepCopyInto(out *RedshiftConfigurationProcessingConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]RedshiftConfigurationProcessingConfigurationProcessorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationProcessingConfigurationParameters. +func (in *RedshiftConfigurationProcessingConfigurationParameters) DeepCopy() *RedshiftConfigurationProcessingConfigurationParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationProcessingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationProcessingConfigurationProcessorsInitParameters) DeepCopyInto(out *RedshiftConfigurationProcessingConfigurationProcessorsInitParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]RedshiftConfigurationProcessingConfigurationProcessorsParametersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationProcessingConfigurationProcessorsInitParameters. +func (in *RedshiftConfigurationProcessingConfigurationProcessorsInitParameters) DeepCopy() *RedshiftConfigurationProcessingConfigurationProcessorsInitParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationProcessingConfigurationProcessorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationProcessingConfigurationProcessorsObservation) DeepCopyInto(out *RedshiftConfigurationProcessingConfigurationProcessorsObservation) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]RedshiftConfigurationProcessingConfigurationProcessorsParametersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationProcessingConfigurationProcessorsObservation. +func (in *RedshiftConfigurationProcessingConfigurationProcessorsObservation) DeepCopy() *RedshiftConfigurationProcessingConfigurationProcessorsObservation { + if in == nil { + return nil + } + out := new(RedshiftConfigurationProcessingConfigurationProcessorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationProcessingConfigurationProcessorsParameters) DeepCopyInto(out *RedshiftConfigurationProcessingConfigurationProcessorsParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]RedshiftConfigurationProcessingConfigurationProcessorsParametersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationProcessingConfigurationProcessorsParameters. +func (in *RedshiftConfigurationProcessingConfigurationProcessorsParameters) DeepCopy() *RedshiftConfigurationProcessingConfigurationProcessorsParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationProcessingConfigurationProcessorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationProcessingConfigurationProcessorsParametersInitParameters) DeepCopyInto(out *RedshiftConfigurationProcessingConfigurationProcessorsParametersInitParameters) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationProcessingConfigurationProcessorsParametersInitParameters. +func (in *RedshiftConfigurationProcessingConfigurationProcessorsParametersInitParameters) DeepCopy() *RedshiftConfigurationProcessingConfigurationProcessorsParametersInitParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationProcessingConfigurationProcessorsParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationProcessingConfigurationProcessorsParametersObservation) DeepCopyInto(out *RedshiftConfigurationProcessingConfigurationProcessorsParametersObservation) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationProcessingConfigurationProcessorsParametersObservation. +func (in *RedshiftConfigurationProcessingConfigurationProcessorsParametersObservation) DeepCopy() *RedshiftConfigurationProcessingConfigurationProcessorsParametersObservation { + if in == nil { + return nil + } + out := new(RedshiftConfigurationProcessingConfigurationProcessorsParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationProcessingConfigurationProcessorsParametersParameters) DeepCopyInto(out *RedshiftConfigurationProcessingConfigurationProcessorsParametersParameters) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationProcessingConfigurationProcessorsParametersParameters. +func (in *RedshiftConfigurationProcessingConfigurationProcessorsParametersParameters) DeepCopy() *RedshiftConfigurationProcessingConfigurationProcessorsParametersParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationProcessingConfigurationProcessorsParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsInitParameters. +func (in *RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopy() *RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsObservation) DeepCopyInto(out *RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsObservation. +func (in *RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsObservation) DeepCopy() *RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsParameters) DeepCopyInto(out *RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsParameters. +func (in *RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsParameters) DeepCopy() *RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationS3BackupConfigurationInitParameters) DeepCopyInto(out *RedshiftConfigurationS3BackupConfigurationInitParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationS3BackupConfigurationInitParameters. +func (in *RedshiftConfigurationS3BackupConfigurationInitParameters) DeepCopy() *RedshiftConfigurationS3BackupConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationS3BackupConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationS3BackupConfigurationObservation) DeepCopyInto(out *RedshiftConfigurationS3BackupConfigurationObservation) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationS3BackupConfigurationObservation. +func (in *RedshiftConfigurationS3BackupConfigurationObservation) DeepCopy() *RedshiftConfigurationS3BackupConfigurationObservation { + if in == nil { + return nil + } + out := new(RedshiftConfigurationS3BackupConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationS3BackupConfigurationParameters) DeepCopyInto(out *RedshiftConfigurationS3BackupConfigurationParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(RedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationS3BackupConfigurationParameters. +func (in *RedshiftConfigurationS3BackupConfigurationParameters) DeepCopy() *RedshiftConfigurationS3BackupConfigurationParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationS3BackupConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters. +func (in *RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopy() *RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) DeepCopyInto(out *RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation. +func (in *RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) DeepCopy() *RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) DeepCopyInto(out *RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters. +func (in *RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) DeepCopy() *RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationS3ConfigurationInitParameters) DeepCopyInto(out *RedshiftConfigurationS3ConfigurationInitParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationS3ConfigurationInitParameters. +func (in *RedshiftConfigurationS3ConfigurationInitParameters) DeepCopy() *RedshiftConfigurationS3ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationS3ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationS3ConfigurationObservation) DeepCopyInto(out *RedshiftConfigurationS3ConfigurationObservation) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationS3ConfigurationObservation. +func (in *RedshiftConfigurationS3ConfigurationObservation) DeepCopy() *RedshiftConfigurationS3ConfigurationObservation { + if in == nil { + return nil + } + out := new(RedshiftConfigurationS3ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedshiftConfigurationS3ConfigurationParameters) DeepCopyInto(out *RedshiftConfigurationS3ConfigurationParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(RedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftConfigurationS3ConfigurationParameters. +func (in *RedshiftConfigurationS3ConfigurationParameters) DeepCopy() *RedshiftConfigurationS3ConfigurationParameters { + if in == nil { + return nil + } + out := new(RedshiftConfigurationS3ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestConfigurationInitParameters) DeepCopyInto(out *RequestConfigurationInitParameters) { + *out = *in + if in.CommonAttributes != nil { + in, out := &in.CommonAttributes, &out.CommonAttributes + *out = make([]CommonAttributesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentEncoding != nil { + in, out := &in.ContentEncoding, &out.ContentEncoding + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestConfigurationInitParameters. +func (in *RequestConfigurationInitParameters) DeepCopy() *RequestConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RequestConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestConfigurationObservation) DeepCopyInto(out *RequestConfigurationObservation) { + *out = *in + if in.CommonAttributes != nil { + in, out := &in.CommonAttributes, &out.CommonAttributes + *out = make([]CommonAttributesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentEncoding != nil { + in, out := &in.ContentEncoding, &out.ContentEncoding + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestConfigurationObservation. +func (in *RequestConfigurationObservation) DeepCopy() *RequestConfigurationObservation { + if in == nil { + return nil + } + out := new(RequestConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestConfigurationParameters) DeepCopyInto(out *RequestConfigurationParameters) { + *out = *in + if in.CommonAttributes != nil { + in, out := &in.CommonAttributes, &out.CommonAttributes + *out = make([]CommonAttributesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentEncoding != nil { + in, out := &in.ContentEncoding, &out.ContentEncoding + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestConfigurationParameters. +func (in *RequestConfigurationParameters) DeepCopy() *RequestConfigurationParameters { + if in == nil { + return nil + } + out := new(RequestConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3BackupConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *S3BackupConfigurationCloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BackupConfigurationCloudwatchLoggingOptionsInitParameters. +func (in *S3BackupConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopy() *S3BackupConfigurationCloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(S3BackupConfigurationCloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3BackupConfigurationCloudwatchLoggingOptionsObservation) DeepCopyInto(out *S3BackupConfigurationCloudwatchLoggingOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BackupConfigurationCloudwatchLoggingOptionsObservation. +func (in *S3BackupConfigurationCloudwatchLoggingOptionsObservation) DeepCopy() *S3BackupConfigurationCloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(S3BackupConfigurationCloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3BackupConfigurationCloudwatchLoggingOptionsParameters) DeepCopyInto(out *S3BackupConfigurationCloudwatchLoggingOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BackupConfigurationCloudwatchLoggingOptionsParameters. +func (in *S3BackupConfigurationCloudwatchLoggingOptionsParameters) DeepCopy() *S3BackupConfigurationCloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(S3BackupConfigurationCloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3BackupConfigurationInitParameters) DeepCopyInto(out *S3BackupConfigurationInitParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(S3BackupConfigurationCloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BackupConfigurationInitParameters. +func (in *S3BackupConfigurationInitParameters) DeepCopy() *S3BackupConfigurationInitParameters { + if in == nil { + return nil + } + out := new(S3BackupConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3BackupConfigurationObservation) DeepCopyInto(out *S3BackupConfigurationObservation) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(S3BackupConfigurationCloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BackupConfigurationObservation. +func (in *S3BackupConfigurationObservation) DeepCopy() *S3BackupConfigurationObservation { + if in == nil { + return nil + } + out := new(S3BackupConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3BackupConfigurationParameters) DeepCopyInto(out *S3BackupConfigurationParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(S3BackupConfigurationCloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BackupConfigurationParameters. +func (in *S3BackupConfigurationParameters) DeepCopy() *S3BackupConfigurationParameters { + if in == nil { + return nil + } + out := new(S3BackupConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *S3ConfigurationCloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigurationCloudwatchLoggingOptionsInitParameters. +func (in *S3ConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopy() *S3ConfigurationCloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(S3ConfigurationCloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigurationCloudwatchLoggingOptionsObservation) DeepCopyInto(out *S3ConfigurationCloudwatchLoggingOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigurationCloudwatchLoggingOptionsObservation. +func (in *S3ConfigurationCloudwatchLoggingOptionsObservation) DeepCopy() *S3ConfigurationCloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(S3ConfigurationCloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigurationCloudwatchLoggingOptionsParameters) DeepCopyInto(out *S3ConfigurationCloudwatchLoggingOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigurationCloudwatchLoggingOptionsParameters. +func (in *S3ConfigurationCloudwatchLoggingOptionsParameters) DeepCopy() *S3ConfigurationCloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(S3ConfigurationCloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigurationInitParameters) DeepCopyInto(out *S3ConfigurationInitParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(S3ConfigurationCloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigurationInitParameters. +func (in *S3ConfigurationInitParameters) DeepCopy() *S3ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(S3ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigurationObservation) DeepCopyInto(out *S3ConfigurationObservation) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(S3ConfigurationCloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigurationObservation. +func (in *S3ConfigurationObservation) DeepCopy() *S3ConfigurationObservation { + if in == nil { + return nil + } + out := new(S3ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigurationParameters) DeepCopyInto(out *S3ConfigurationParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(S3ConfigurationCloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigurationParameters. +func (in *S3ConfigurationParameters) DeepCopy() *S3ConfigurationParameters { + if in == nil { + return nil + } + out := new(S3ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaConfigurationInitParameters) DeepCopyInto(out *SchemaConfigurationInitParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.TableNameRef != nil { + in, out := &in.TableNameRef, &out.TableNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TableNameSelector != nil { + in, out := &in.TableNameSelector, &out.TableNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaConfigurationInitParameters. +func (in *SchemaConfigurationInitParameters) DeepCopy() *SchemaConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SchemaConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaConfigurationObservation) DeepCopyInto(out *SchemaConfigurationObservation) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaConfigurationObservation. +func (in *SchemaConfigurationObservation) DeepCopy() *SchemaConfigurationObservation { + if in == nil { + return nil + } + out := new(SchemaConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaConfigurationParameters) DeepCopyInto(out *SchemaConfigurationParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.TableNameRef != nil { + in, out := &in.TableNameRef, &out.TableNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TableNameSelector != nil { + in, out := &in.TableNameSelector, &out.TableNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaConfigurationParameters. +func (in *SchemaConfigurationParameters) DeepCopy() *SchemaConfigurationParameters { + if in == nil { + return nil + } + out := new(SchemaConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerInitParameters) DeepCopyInto(out *SerializerInitParameters) { + *out = *in + if in.OrcSerDe != nil { + in, out := &in.OrcSerDe, &out.OrcSerDe + *out = new(OrcSerDeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ParquetSerDe != nil { + in, out := &in.ParquetSerDe, &out.ParquetSerDe + *out = new(ParquetSerDeInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerInitParameters. +func (in *SerializerInitParameters) DeepCopy() *SerializerInitParameters { + if in == nil { + return nil + } + out := new(SerializerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerObservation) DeepCopyInto(out *SerializerObservation) { + *out = *in + if in.OrcSerDe != nil { + in, out := &in.OrcSerDe, &out.OrcSerDe + *out = new(OrcSerDeObservation) + (*in).DeepCopyInto(*out) + } + if in.ParquetSerDe != nil { + in, out := &in.ParquetSerDe, &out.ParquetSerDe + *out = new(ParquetSerDeObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerObservation. +func (in *SerializerObservation) DeepCopy() *SerializerObservation { + if in == nil { + return nil + } + out := new(SerializerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializerParameters) DeepCopyInto(out *SerializerParameters) { + *out = *in + if in.OrcSerDe != nil { + in, out := &in.OrcSerDe, &out.OrcSerDe + *out = new(OrcSerDeParameters) + (*in).DeepCopyInto(*out) + } + if in.ParquetSerDe != nil { + in, out := &in.ParquetSerDe, &out.ParquetSerDe + *out = new(ParquetSerDeParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializerParameters. +func (in *SerializerParameters) DeepCopy() *SerializerParameters { + if in == nil { + return nil + } + out := new(SerializerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionInitParameters) DeepCopyInto(out *ServerSideEncryptionInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KeyArn != nil { + in, out := &in.KeyArn, &out.KeyArn + *out = new(string) + **out = **in + } + if in.KeyType != nil { + in, out := &in.KeyType, &out.KeyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionInitParameters. +func (in *ServerSideEncryptionInitParameters) DeepCopy() *ServerSideEncryptionInitParameters { + if in == nil { + return nil + } + out := new(ServerSideEncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionObservation) DeepCopyInto(out *ServerSideEncryptionObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KeyArn != nil { + in, out := &in.KeyArn, &out.KeyArn + *out = new(string) + **out = **in + } + if in.KeyType != nil { + in, out := &in.KeyType, &out.KeyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionObservation. +func (in *ServerSideEncryptionObservation) DeepCopy() *ServerSideEncryptionObservation { + if in == nil { + return nil + } + out := new(ServerSideEncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionParameters) DeepCopyInto(out *ServerSideEncryptionParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KeyArn != nil { + in, out := &in.KeyArn, &out.KeyArn + *out = new(string) + **out = **in + } + if in.KeyType != nil { + in, out := &in.KeyType, &out.KeyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionParameters. +func (in *ServerSideEncryptionParameters) DeepCopy() *ServerSideEncryptionParameters { + if in == nil { + return nil + } + out := new(ServerSideEncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *SnowflakeConfigurationCloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationCloudwatchLoggingOptionsInitParameters. +func (in *SnowflakeConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopy() *SnowflakeConfigurationCloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationCloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationCloudwatchLoggingOptionsObservation) DeepCopyInto(out *SnowflakeConfigurationCloudwatchLoggingOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationCloudwatchLoggingOptionsObservation. +func (in *SnowflakeConfigurationCloudwatchLoggingOptionsObservation) DeepCopy() *SnowflakeConfigurationCloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationCloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationCloudwatchLoggingOptionsParameters) DeepCopyInto(out *SnowflakeConfigurationCloudwatchLoggingOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationCloudwatchLoggingOptionsParameters. +func (in *SnowflakeConfigurationCloudwatchLoggingOptionsParameters) DeepCopy() *SnowflakeConfigurationCloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationCloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationInitParameters) DeepCopyInto(out *SnowflakeConfigurationInitParameters) { + *out = *in + if in.AccountURL != nil { + in, out := &in.AccountURL, &out.AccountURL + *out = new(string) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(SnowflakeConfigurationCloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ContentColumnName != nil { + in, out := &in.ContentColumnName, &out.ContentColumnName + *out = new(string) + **out = **in + } + if in.DataLoadingOption != nil { + in, out := &in.DataLoadingOption, &out.DataLoadingOption + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.KeyPassphraseSecretRef != nil { + in, out := &in.KeyPassphraseSecretRef, &out.KeyPassphraseSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.MetadataColumnName != nil { + in, out := &in.MetadataColumnName, &out.MetadataColumnName + *out = new(string) + **out = **in + } + out.PrivateKeySecretRef = in.PrivateKeySecretRef + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(SnowflakeConfigurationProcessingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(SnowflakeConfigurationS3ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(string) + **out = **in + } + if in.SnowflakeRoleConfiguration != nil { + in, out := &in.SnowflakeRoleConfiguration, &out.SnowflakeRoleConfiguration + *out = new(SnowflakeRoleConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SnowflakeVPCConfiguration != nil { + in, out := &in.SnowflakeVPCConfiguration, &out.SnowflakeVPCConfiguration + *out = new(SnowflakeVPCConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(string) + **out = **in + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationInitParameters. +func (in *SnowflakeConfigurationInitParameters) DeepCopy() *SnowflakeConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationObservation) DeepCopyInto(out *SnowflakeConfigurationObservation) { + *out = *in + if in.AccountURL != nil { + in, out := &in.AccountURL, &out.AccountURL + *out = new(string) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(SnowflakeConfigurationCloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.ContentColumnName != nil { + in, out := &in.ContentColumnName, &out.ContentColumnName + *out = new(string) + **out = **in + } + if in.DataLoadingOption != nil { + in, out := &in.DataLoadingOption, &out.DataLoadingOption + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.MetadataColumnName != nil { + in, out := &in.MetadataColumnName, &out.MetadataColumnName + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(SnowflakeConfigurationProcessingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(SnowflakeConfigurationS3ConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(string) + **out = **in + } + if in.SnowflakeRoleConfiguration != nil { + in, out := &in.SnowflakeRoleConfiguration, &out.SnowflakeRoleConfiguration + *out = new(SnowflakeRoleConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.SnowflakeVPCConfiguration != nil { + in, out := &in.SnowflakeVPCConfiguration, &out.SnowflakeVPCConfiguration + *out = new(SnowflakeVPCConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(string) + **out = **in + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationObservation. +func (in *SnowflakeConfigurationObservation) DeepCopy() *SnowflakeConfigurationObservation { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationParameters) DeepCopyInto(out *SnowflakeConfigurationParameters) { + *out = *in + if in.AccountURL != nil { + in, out := &in.AccountURL, &out.AccountURL + *out = new(string) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(SnowflakeConfigurationCloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.ContentColumnName != nil { + in, out := &in.ContentColumnName, &out.ContentColumnName + *out = new(string) + **out = **in + } + if in.DataLoadingOption != nil { + in, out := &in.DataLoadingOption, &out.DataLoadingOption + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.KeyPassphraseSecretRef != nil { + in, out := &in.KeyPassphraseSecretRef, &out.KeyPassphraseSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.MetadataColumnName != nil { + in, out := &in.MetadataColumnName, &out.MetadataColumnName + *out = new(string) + **out = **in + } + out.PrivateKeySecretRef = in.PrivateKeySecretRef + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(SnowflakeConfigurationProcessingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(SnowflakeConfigurationS3ConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(string) + **out = **in + } + if in.SnowflakeRoleConfiguration != nil { + in, out := &in.SnowflakeRoleConfiguration, &out.SnowflakeRoleConfiguration + *out = new(SnowflakeRoleConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.SnowflakeVPCConfiguration != nil { + in, out := &in.SnowflakeVPCConfiguration, &out.SnowflakeVPCConfiguration + *out = new(SnowflakeVPCConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(string) + **out = **in + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationParameters. +func (in *SnowflakeConfigurationParameters) DeepCopy() *SnowflakeConfigurationParameters { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationProcessingConfigurationInitParameters) DeepCopyInto(out *SnowflakeConfigurationProcessingConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]SnowflakeConfigurationProcessingConfigurationProcessorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationProcessingConfigurationInitParameters. +func (in *SnowflakeConfigurationProcessingConfigurationInitParameters) DeepCopy() *SnowflakeConfigurationProcessingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationProcessingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationProcessingConfigurationObservation) DeepCopyInto(out *SnowflakeConfigurationProcessingConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]SnowflakeConfigurationProcessingConfigurationProcessorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationProcessingConfigurationObservation. +func (in *SnowflakeConfigurationProcessingConfigurationObservation) DeepCopy() *SnowflakeConfigurationProcessingConfigurationObservation { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationProcessingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationProcessingConfigurationParameters) DeepCopyInto(out *SnowflakeConfigurationProcessingConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]SnowflakeConfigurationProcessingConfigurationProcessorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationProcessingConfigurationParameters. +func (in *SnowflakeConfigurationProcessingConfigurationParameters) DeepCopy() *SnowflakeConfigurationProcessingConfigurationParameters { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationProcessingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationProcessingConfigurationProcessorsInitParameters) DeepCopyInto(out *SnowflakeConfigurationProcessingConfigurationProcessorsInitParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]SnowflakeConfigurationProcessingConfigurationProcessorsParametersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationProcessingConfigurationProcessorsInitParameters. +func (in *SnowflakeConfigurationProcessingConfigurationProcessorsInitParameters) DeepCopy() *SnowflakeConfigurationProcessingConfigurationProcessorsInitParameters { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationProcessingConfigurationProcessorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationProcessingConfigurationProcessorsObservation) DeepCopyInto(out *SnowflakeConfigurationProcessingConfigurationProcessorsObservation) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]SnowflakeConfigurationProcessingConfigurationProcessorsParametersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationProcessingConfigurationProcessorsObservation. +func (in *SnowflakeConfigurationProcessingConfigurationProcessorsObservation) DeepCopy() *SnowflakeConfigurationProcessingConfigurationProcessorsObservation { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationProcessingConfigurationProcessorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationProcessingConfigurationProcessorsParameters) DeepCopyInto(out *SnowflakeConfigurationProcessingConfigurationProcessorsParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]SnowflakeConfigurationProcessingConfigurationProcessorsParametersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationProcessingConfigurationProcessorsParameters. +func (in *SnowflakeConfigurationProcessingConfigurationProcessorsParameters) DeepCopy() *SnowflakeConfigurationProcessingConfigurationProcessorsParameters { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationProcessingConfigurationProcessorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationProcessingConfigurationProcessorsParametersInitParameters) DeepCopyInto(out *SnowflakeConfigurationProcessingConfigurationProcessorsParametersInitParameters) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationProcessingConfigurationProcessorsParametersInitParameters. +func (in *SnowflakeConfigurationProcessingConfigurationProcessorsParametersInitParameters) DeepCopy() *SnowflakeConfigurationProcessingConfigurationProcessorsParametersInitParameters { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationProcessingConfigurationProcessorsParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationProcessingConfigurationProcessorsParametersObservation) DeepCopyInto(out *SnowflakeConfigurationProcessingConfigurationProcessorsParametersObservation) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationProcessingConfigurationProcessorsParametersObservation. +func (in *SnowflakeConfigurationProcessingConfigurationProcessorsParametersObservation) DeepCopy() *SnowflakeConfigurationProcessingConfigurationProcessorsParametersObservation { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationProcessingConfigurationProcessorsParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationProcessingConfigurationProcessorsParametersParameters) DeepCopyInto(out *SnowflakeConfigurationProcessingConfigurationProcessorsParametersParameters) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationProcessingConfigurationProcessorsParametersParameters. +func (in *SnowflakeConfigurationProcessingConfigurationProcessorsParametersParameters) DeepCopy() *SnowflakeConfigurationProcessingConfigurationProcessorsParametersParameters { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationProcessingConfigurationProcessorsParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters. +func (in *SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopy() *SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) DeepCopyInto(out *SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation. +func (in *SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) DeepCopy() *SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) DeepCopyInto(out *SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters. +func (in *SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) DeepCopy() *SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationS3ConfigurationInitParameters) DeepCopyInto(out *SnowflakeConfigurationS3ConfigurationInitParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationS3ConfigurationInitParameters. +func (in *SnowflakeConfigurationS3ConfigurationInitParameters) DeepCopy() *SnowflakeConfigurationS3ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationS3ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationS3ConfigurationObservation) DeepCopyInto(out *SnowflakeConfigurationS3ConfigurationObservation) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationS3ConfigurationObservation. +func (in *SnowflakeConfigurationS3ConfigurationObservation) DeepCopy() *SnowflakeConfigurationS3ConfigurationObservation { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationS3ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeConfigurationS3ConfigurationParameters) DeepCopyInto(out *SnowflakeConfigurationS3ConfigurationParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(SnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeConfigurationS3ConfigurationParameters. +func (in *SnowflakeConfigurationS3ConfigurationParameters) DeepCopy() *SnowflakeConfigurationS3ConfigurationParameters { + if in == nil { + return nil + } + out := new(SnowflakeConfigurationS3ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeRoleConfigurationInitParameters) DeepCopyInto(out *SnowflakeRoleConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SnowflakeRole != nil { + in, out := &in.SnowflakeRole, &out.SnowflakeRole + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeRoleConfigurationInitParameters. +func (in *SnowflakeRoleConfigurationInitParameters) DeepCopy() *SnowflakeRoleConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SnowflakeRoleConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeRoleConfigurationObservation) DeepCopyInto(out *SnowflakeRoleConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SnowflakeRole != nil { + in, out := &in.SnowflakeRole, &out.SnowflakeRole + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeRoleConfigurationObservation. +func (in *SnowflakeRoleConfigurationObservation) DeepCopy() *SnowflakeRoleConfigurationObservation { + if in == nil { + return nil + } + out := new(SnowflakeRoleConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeRoleConfigurationParameters) DeepCopyInto(out *SnowflakeRoleConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SnowflakeRole != nil { + in, out := &in.SnowflakeRole, &out.SnowflakeRole + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeRoleConfigurationParameters. +func (in *SnowflakeRoleConfigurationParameters) DeepCopy() *SnowflakeRoleConfigurationParameters { + if in == nil { + return nil + } + out := new(SnowflakeRoleConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeVPCConfigurationInitParameters) DeepCopyInto(out *SnowflakeVPCConfigurationInitParameters) { + *out = *in + if in.PrivateLinkVpceID != nil { + in, out := &in.PrivateLinkVpceID, &out.PrivateLinkVpceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeVPCConfigurationInitParameters. +func (in *SnowflakeVPCConfigurationInitParameters) DeepCopy() *SnowflakeVPCConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SnowflakeVPCConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeVPCConfigurationObservation) DeepCopyInto(out *SnowflakeVPCConfigurationObservation) { + *out = *in + if in.PrivateLinkVpceID != nil { + in, out := &in.PrivateLinkVpceID, &out.PrivateLinkVpceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeVPCConfigurationObservation. +func (in *SnowflakeVPCConfigurationObservation) DeepCopy() *SnowflakeVPCConfigurationObservation { + if in == nil { + return nil + } + out := new(SnowflakeVPCConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnowflakeVPCConfigurationParameters) DeepCopyInto(out *SnowflakeVPCConfigurationParameters) { + *out = *in + if in.PrivateLinkVpceID != nil { + in, out := &in.PrivateLinkVpceID, &out.PrivateLinkVpceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowflakeVPCConfigurationParameters. +func (in *SnowflakeVPCConfigurationParameters) DeepCopy() *SnowflakeVPCConfigurationParameters { + if in == nil { + return nil + } + out := new(SnowflakeVPCConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *SplunkConfigurationCloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationCloudwatchLoggingOptionsInitParameters. +func (in *SplunkConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopy() *SplunkConfigurationCloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(SplunkConfigurationCloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationCloudwatchLoggingOptionsObservation) DeepCopyInto(out *SplunkConfigurationCloudwatchLoggingOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationCloudwatchLoggingOptionsObservation. +func (in *SplunkConfigurationCloudwatchLoggingOptionsObservation) DeepCopy() *SplunkConfigurationCloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(SplunkConfigurationCloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationCloudwatchLoggingOptionsParameters) DeepCopyInto(out *SplunkConfigurationCloudwatchLoggingOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationCloudwatchLoggingOptionsParameters. +func (in *SplunkConfigurationCloudwatchLoggingOptionsParameters) DeepCopy() *SplunkConfigurationCloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(SplunkConfigurationCloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationInitParameters) DeepCopyInto(out *SplunkConfigurationInitParameters) { + *out = *in + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(SplunkConfigurationCloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HecAcknowledgmentTimeout != nil { + in, out := &in.HecAcknowledgmentTimeout, &out.HecAcknowledgmentTimeout + *out = new(float64) + **out = **in + } + if in.HecEndpoint != nil { + in, out := &in.HecEndpoint, &out.HecEndpoint + *out = new(string) + **out = **in + } + if in.HecEndpointType != nil { + in, out := &in.HecEndpointType, &out.HecEndpointType + *out = new(string) + **out = **in + } + out.HecTokenSecretRef = in.HecTokenSecretRef + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(SplunkConfigurationProcessingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(SplunkConfigurationS3ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationInitParameters. +func (in *SplunkConfigurationInitParameters) DeepCopy() *SplunkConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SplunkConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationObservation) DeepCopyInto(out *SplunkConfigurationObservation) { + *out = *in + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(SplunkConfigurationCloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.HecAcknowledgmentTimeout != nil { + in, out := &in.HecAcknowledgmentTimeout, &out.HecAcknowledgmentTimeout + *out = new(float64) + **out = **in + } + if in.HecEndpoint != nil { + in, out := &in.HecEndpoint, &out.HecEndpoint + *out = new(string) + **out = **in + } + if in.HecEndpointType != nil { + in, out := &in.HecEndpointType, &out.HecEndpointType + *out = new(string) + **out = **in + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(SplunkConfigurationProcessingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(SplunkConfigurationS3ConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationObservation. +func (in *SplunkConfigurationObservation) DeepCopy() *SplunkConfigurationObservation { + if in == nil { + return nil + } + out := new(SplunkConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationParameters) DeepCopyInto(out *SplunkConfigurationParameters) { + *out = *in + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(SplunkConfigurationCloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.HecAcknowledgmentTimeout != nil { + in, out := &in.HecAcknowledgmentTimeout, &out.HecAcknowledgmentTimeout + *out = new(float64) + **out = **in + } + if in.HecEndpoint != nil { + in, out := &in.HecEndpoint, &out.HecEndpoint + *out = new(string) + **out = **in + } + if in.HecEndpointType != nil { + in, out := &in.HecEndpointType, &out.HecEndpointType + *out = new(string) + **out = **in + } + out.HecTokenSecretRef = in.HecTokenSecretRef + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(SplunkConfigurationProcessingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(float64) + **out = **in + } + if in.S3BackupMode != nil { + in, out := &in.S3BackupMode, &out.S3BackupMode + *out = new(string) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(SplunkConfigurationS3ConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationParameters. +func (in *SplunkConfigurationParameters) DeepCopy() *SplunkConfigurationParameters { + if in == nil { + return nil + } + out := new(SplunkConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationProcessingConfigurationInitParameters) DeepCopyInto(out *SplunkConfigurationProcessingConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]SplunkConfigurationProcessingConfigurationProcessorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationProcessingConfigurationInitParameters. +func (in *SplunkConfigurationProcessingConfigurationInitParameters) DeepCopy() *SplunkConfigurationProcessingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SplunkConfigurationProcessingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationProcessingConfigurationObservation) DeepCopyInto(out *SplunkConfigurationProcessingConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]SplunkConfigurationProcessingConfigurationProcessorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationProcessingConfigurationObservation. +func (in *SplunkConfigurationProcessingConfigurationObservation) DeepCopy() *SplunkConfigurationProcessingConfigurationObservation { + if in == nil { + return nil + } + out := new(SplunkConfigurationProcessingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationProcessingConfigurationParameters) DeepCopyInto(out *SplunkConfigurationProcessingConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Processors != nil { + in, out := &in.Processors, &out.Processors + *out = make([]SplunkConfigurationProcessingConfigurationProcessorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationProcessingConfigurationParameters. +func (in *SplunkConfigurationProcessingConfigurationParameters) DeepCopy() *SplunkConfigurationProcessingConfigurationParameters { + if in == nil { + return nil + } + out := new(SplunkConfigurationProcessingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationProcessingConfigurationProcessorsInitParameters) DeepCopyInto(out *SplunkConfigurationProcessingConfigurationProcessorsInitParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]SplunkConfigurationProcessingConfigurationProcessorsParametersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationProcessingConfigurationProcessorsInitParameters. +func (in *SplunkConfigurationProcessingConfigurationProcessorsInitParameters) DeepCopy() *SplunkConfigurationProcessingConfigurationProcessorsInitParameters { + if in == nil { + return nil + } + out := new(SplunkConfigurationProcessingConfigurationProcessorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationProcessingConfigurationProcessorsObservation) DeepCopyInto(out *SplunkConfigurationProcessingConfigurationProcessorsObservation) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]SplunkConfigurationProcessingConfigurationProcessorsParametersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationProcessingConfigurationProcessorsObservation. +func (in *SplunkConfigurationProcessingConfigurationProcessorsObservation) DeepCopy() *SplunkConfigurationProcessingConfigurationProcessorsObservation { + if in == nil { + return nil + } + out := new(SplunkConfigurationProcessingConfigurationProcessorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationProcessingConfigurationProcessorsParameters) DeepCopyInto(out *SplunkConfigurationProcessingConfigurationProcessorsParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]SplunkConfigurationProcessingConfigurationProcessorsParametersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationProcessingConfigurationProcessorsParameters. +func (in *SplunkConfigurationProcessingConfigurationProcessorsParameters) DeepCopy() *SplunkConfigurationProcessingConfigurationProcessorsParameters { + if in == nil { + return nil + } + out := new(SplunkConfigurationProcessingConfigurationProcessorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationProcessingConfigurationProcessorsParametersInitParameters) DeepCopyInto(out *SplunkConfigurationProcessingConfigurationProcessorsParametersInitParameters) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationProcessingConfigurationProcessorsParametersInitParameters. +func (in *SplunkConfigurationProcessingConfigurationProcessorsParametersInitParameters) DeepCopy() *SplunkConfigurationProcessingConfigurationProcessorsParametersInitParameters { + if in == nil { + return nil + } + out := new(SplunkConfigurationProcessingConfigurationProcessorsParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationProcessingConfigurationProcessorsParametersObservation) DeepCopyInto(out *SplunkConfigurationProcessingConfigurationProcessorsParametersObservation) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationProcessingConfigurationProcessorsParametersObservation. +func (in *SplunkConfigurationProcessingConfigurationProcessorsParametersObservation) DeepCopy() *SplunkConfigurationProcessingConfigurationProcessorsParametersObservation { + if in == nil { + return nil + } + out := new(SplunkConfigurationProcessingConfigurationProcessorsParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationProcessingConfigurationProcessorsParametersParameters) DeepCopyInto(out *SplunkConfigurationProcessingConfigurationProcessorsParametersParameters) { + *out = *in + if in.ParameterName != nil { + in, out := &in.ParameterName, &out.ParameterName + *out = new(string) + **out = **in + } + if in.ParameterValue != nil { + in, out := &in.ParameterValue, &out.ParameterValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationProcessingConfigurationProcessorsParametersParameters. +func (in *SplunkConfigurationProcessingConfigurationProcessorsParametersParameters) DeepCopy() *SplunkConfigurationProcessingConfigurationProcessorsParametersParameters { + if in == nil { + return nil + } + out := new(SplunkConfigurationProcessingConfigurationProcessorsParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters. +func (in *SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) DeepCopy() *SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) DeepCopyInto(out *SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation. +func (in *SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) DeepCopy() *SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) DeepCopyInto(out *SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.LogStreamName != nil { + in, out := &in.LogStreamName, &out.LogStreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters. +func (in *SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) DeepCopy() *SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationS3ConfigurationInitParameters) DeepCopyInto(out *SplunkConfigurationS3ConfigurationInitParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationS3ConfigurationInitParameters. +func (in *SplunkConfigurationS3ConfigurationInitParameters) DeepCopy() *SplunkConfigurationS3ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SplunkConfigurationS3ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationS3ConfigurationObservation) DeepCopyInto(out *SplunkConfigurationS3ConfigurationObservation) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationS3ConfigurationObservation. +func (in *SplunkConfigurationS3ConfigurationObservation) DeepCopy() *SplunkConfigurationS3ConfigurationObservation { + if in == nil { + return nil + } + out := new(SplunkConfigurationS3ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SplunkConfigurationS3ConfigurationParameters) DeepCopyInto(out *SplunkConfigurationS3ConfigurationParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BufferingInterval != nil { + in, out := &in.BufferingInterval, &out.BufferingInterval + *out = new(float64) + **out = **in + } + if in.BufferingSize != nil { + in, out := &in.BufferingSize, &out.BufferingSize + *out = new(float64) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(SplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionFormat != nil { + in, out := &in.CompressionFormat, &out.CompressionFormat + *out = new(string) + **out = **in + } + if in.ErrorOutputPrefix != nil { + in, out := &in.ErrorOutputPrefix, &out.ErrorOutputPrefix + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkConfigurationS3ConfigurationParameters. +func (in *SplunkConfigurationS3ConfigurationParameters) DeepCopy() *SplunkConfigurationS3ConfigurationParameters { + if in == nil { + return nil + } + out := new(SplunkConfigurationS3ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigInitParameters) DeepCopyInto(out *VPCConfigInitParameters) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigInitParameters. +func (in *VPCConfigInitParameters) DeepCopy() *VPCConfigInitParameters { + if in == nil { + return nil + } + out := new(VPCConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigObservation) DeepCopyInto(out *VPCConfigObservation) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigObservation. +func (in *VPCConfigObservation) DeepCopy() *VPCConfigObservation { + if in == nil { + return nil + } + out := new(VPCConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigParameters) DeepCopyInto(out *VPCConfigParameters) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigParameters. +func (in *VPCConfigParameters) DeepCopy() *VPCConfigParameters { + if in == nil { + return nil + } + out := new(VPCConfigParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/firehose/v1beta2/zz_generated.managed.go b/apis/firehose/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..f9ce0ec304 --- /dev/null +++ b/apis/firehose/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this DeliveryStream. +func (mg *DeliveryStream) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DeliveryStream. +func (mg *DeliveryStream) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DeliveryStream. +func (mg *DeliveryStream) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DeliveryStream. +func (mg *DeliveryStream) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DeliveryStream. +func (mg *DeliveryStream) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DeliveryStream. +func (mg *DeliveryStream) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DeliveryStream. +func (mg *DeliveryStream) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DeliveryStream. +func (mg *DeliveryStream) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DeliveryStream. +func (mg *DeliveryStream) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DeliveryStream. +func (mg *DeliveryStream) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DeliveryStream. +func (mg *DeliveryStream) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DeliveryStream. +func (mg *DeliveryStream) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/firehose/v1beta2/zz_generated.managedlist.go b/apis/firehose/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..cf0c40f8ac --- /dev/null +++ b/apis/firehose/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this DeliveryStreamList. +func (l *DeliveryStreamList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/firehose/v1beta2/zz_generated.resolvers.go b/apis/firehose/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..f8902aef06 --- /dev/null +++ b/apis/firehose/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,1422 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *DeliveryStream) ResolveReferences( // ResolveReferences of this DeliveryStream. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.ElasticsearchConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("elasticsearch.aws.upbound.io", "v1beta2", "Domain", "DomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ElasticsearchConfiguration.DomainArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.ElasticsearchConfiguration.DomainArnRef, + Selector: mg.Spec.ForProvider.ElasticsearchConfiguration.DomainArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ElasticsearchConfiguration.DomainArn") + } + mg.Spec.ForProvider.ElasticsearchConfiguration.DomainArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ElasticsearchConfiguration.DomainArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.ElasticsearchConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ElasticsearchConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.ElasticsearchConfiguration.RoleArnRef, + Selector: mg.Spec.ForProvider.ElasticsearchConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ElasticsearchConfiguration.RoleArn") + } + mg.Spec.ForProvider.ElasticsearchConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ElasticsearchConfiguration.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.ElasticsearchConfiguration != nil { + if mg.Spec.ForProvider.ElasticsearchConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ElasticsearchConfiguration.S3Configuration.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.ElasticsearchConfiguration.S3Configuration.BucketArnRef, + Selector: mg.Spec.ForProvider.ElasticsearchConfiguration.S3Configuration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ElasticsearchConfiguration.S3Configuration.BucketArn") + } + mg.Spec.ForProvider.ElasticsearchConfiguration.S3Configuration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ElasticsearchConfiguration.S3Configuration.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.ElasticsearchConfiguration != nil { + if mg.Spec.ForProvider.ElasticsearchConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ElasticsearchConfiguration.S3Configuration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.ElasticsearchConfiguration.S3Configuration.RoleArnRef, + Selector: mg.Spec.ForProvider.ElasticsearchConfiguration.S3Configuration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ElasticsearchConfiguration.S3Configuration.RoleArn") + } + mg.Spec.ForProvider.ElasticsearchConfiguration.S3Configuration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ElasticsearchConfiguration.S3Configuration.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.ElasticsearchConfiguration != nil { + if mg.Spec.ForProvider.ElasticsearchConfiguration.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ElasticsearchConfiguration.VPCConfig.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.ElasticsearchConfiguration.VPCConfig.RoleArnRef, + Selector: mg.Spec.ForProvider.ElasticsearchConfiguration.VPCConfig.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ElasticsearchConfiguration.VPCConfig.RoleArn") + } + mg.Spec.ForProvider.ElasticsearchConfiguration.VPCConfig.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ElasticsearchConfiguration.VPCConfig.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.ExtendedS3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ExtendedS3Configuration.BucketArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ExtendedS3Configuration.BucketArnRef, + Selector: mg.Spec.ForProvider.ExtendedS3Configuration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ExtendedS3Configuration.BucketArn") + } + mg.Spec.ForProvider.ExtendedS3Configuration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ExtendedS3Configuration.BucketArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.ExtendedS3Configuration != nil { + if mg.Spec.ForProvider.ExtendedS3Configuration.DataFormatConversionConfiguration != nil { + if mg.Spec.ForProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.RoleArnRef, + Selector: mg.Spec.ForProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.RoleArn") + } + mg.Spec.ForProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.RoleArnRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.ForProvider.ExtendedS3Configuration != nil { + if mg.Spec.ForProvider.ExtendedS3Configuration.DataFormatConversionConfiguration != nil { + if mg.Spec.ForProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "CatalogTable", "CatalogTableList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.TableName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.TableNameRef, + Selector: mg.Spec.ForProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.TableNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.TableName") + } + mg.Spec.ForProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.TableName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.TableNameRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.ForProvider.ExtendedS3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ExtendedS3Configuration.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ExtendedS3Configuration.RoleArnRef, + Selector: mg.Spec.ForProvider.ExtendedS3Configuration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ExtendedS3Configuration.RoleArn") + } + mg.Spec.ForProvider.ExtendedS3Configuration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ExtendedS3Configuration.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.HTTPEndpointConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.HTTPEndpointConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.HTTPEndpointConfiguration.RoleArnRef, + Selector: mg.Spec.ForProvider.HTTPEndpointConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.HTTPEndpointConfiguration.RoleArn") + } + mg.Spec.ForProvider.HTTPEndpointConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.HTTPEndpointConfiguration.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.HTTPEndpointConfiguration != nil { + if mg.Spec.ForProvider.HTTPEndpointConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.HTTPEndpointConfiguration.S3Configuration.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.HTTPEndpointConfiguration.S3Configuration.BucketArnRef, + Selector: mg.Spec.ForProvider.HTTPEndpointConfiguration.S3Configuration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.HTTPEndpointConfiguration.S3Configuration.BucketArn") + } + mg.Spec.ForProvider.HTTPEndpointConfiguration.S3Configuration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.HTTPEndpointConfiguration.S3Configuration.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.HTTPEndpointConfiguration != nil { + if mg.Spec.ForProvider.HTTPEndpointConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.HTTPEndpointConfiguration.S3Configuration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.HTTPEndpointConfiguration.S3Configuration.RoleArnRef, + Selector: mg.Spec.ForProvider.HTTPEndpointConfiguration.S3Configuration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.HTTPEndpointConfiguration.S3Configuration.RoleArn") + } + mg.Spec.ForProvider.HTTPEndpointConfiguration.S3Configuration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.HTTPEndpointConfiguration.S3Configuration.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.OpensearchConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("opensearch.aws.upbound.io", "v1beta2", "Domain", "DomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.OpensearchConfiguration.DomainArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.OpensearchConfiguration.DomainArnRef, + Selector: mg.Spec.ForProvider.OpensearchConfiguration.DomainArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OpensearchConfiguration.DomainArn") + } + mg.Spec.ForProvider.OpensearchConfiguration.DomainArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OpensearchConfiguration.DomainArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.OpensearchConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.OpensearchConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.OpensearchConfiguration.RoleArnRef, + Selector: mg.Spec.ForProvider.OpensearchConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OpensearchConfiguration.RoleArn") + } + mg.Spec.ForProvider.OpensearchConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OpensearchConfiguration.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.OpensearchConfiguration != nil { + if mg.Spec.ForProvider.OpensearchConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.OpensearchConfiguration.S3Configuration.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.OpensearchConfiguration.S3Configuration.BucketArnRef, + Selector: mg.Spec.ForProvider.OpensearchConfiguration.S3Configuration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OpensearchConfiguration.S3Configuration.BucketArn") + } + mg.Spec.ForProvider.OpensearchConfiguration.S3Configuration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OpensearchConfiguration.S3Configuration.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.OpensearchConfiguration != nil { + if mg.Spec.ForProvider.OpensearchConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.OpensearchConfiguration.S3Configuration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.OpensearchConfiguration.S3Configuration.RoleArnRef, + Selector: mg.Spec.ForProvider.OpensearchConfiguration.S3Configuration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OpensearchConfiguration.S3Configuration.RoleArn") + } + mg.Spec.ForProvider.OpensearchConfiguration.S3Configuration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OpensearchConfiguration.S3Configuration.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.OpensearchConfiguration != nil { + if mg.Spec.ForProvider.OpensearchConfiguration.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.OpensearchConfiguration.VPCConfig.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.OpensearchConfiguration.VPCConfig.RoleArnRef, + Selector: mg.Spec.ForProvider.OpensearchConfiguration.VPCConfig.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OpensearchConfiguration.VPCConfig.RoleArn") + } + mg.Spec.ForProvider.OpensearchConfiguration.VPCConfig.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OpensearchConfiguration.VPCConfig.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.OpensearchserverlessConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("opensearchserverless.aws.upbound.io", "v1beta1", "Collection", "CollectionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.OpensearchserverlessConfiguration.CollectionEndpoint), + Extract: resource.ExtractParamPath("collection_endpoint", true), + Reference: mg.Spec.ForProvider.OpensearchserverlessConfiguration.CollectionEndpointRef, + Selector: mg.Spec.ForProvider.OpensearchserverlessConfiguration.CollectionEndpointSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OpensearchserverlessConfiguration.CollectionEndpoint") + } + mg.Spec.ForProvider.OpensearchserverlessConfiguration.CollectionEndpoint = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OpensearchserverlessConfiguration.CollectionEndpointRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.OpensearchserverlessConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.OpensearchserverlessConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.OpensearchserverlessConfiguration.RoleArnRef, + Selector: mg.Spec.ForProvider.OpensearchserverlessConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OpensearchserverlessConfiguration.RoleArn") + } + mg.Spec.ForProvider.OpensearchserverlessConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OpensearchserverlessConfiguration.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.OpensearchserverlessConfiguration != nil { + if mg.Spec.ForProvider.OpensearchserverlessConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.OpensearchserverlessConfiguration.S3Configuration.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.OpensearchserverlessConfiguration.S3Configuration.BucketArnRef, + Selector: mg.Spec.ForProvider.OpensearchserverlessConfiguration.S3Configuration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OpensearchserverlessConfiguration.S3Configuration.BucketArn") + } + mg.Spec.ForProvider.OpensearchserverlessConfiguration.S3Configuration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OpensearchserverlessConfiguration.S3Configuration.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.OpensearchserverlessConfiguration != nil { + if mg.Spec.ForProvider.OpensearchserverlessConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.OpensearchserverlessConfiguration.S3Configuration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.OpensearchserverlessConfiguration.S3Configuration.RoleArnRef, + Selector: mg.Spec.ForProvider.OpensearchserverlessConfiguration.S3Configuration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OpensearchserverlessConfiguration.S3Configuration.RoleArn") + } + mg.Spec.ForProvider.OpensearchserverlessConfiguration.S3Configuration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OpensearchserverlessConfiguration.S3Configuration.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.RedshiftConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RedshiftConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.RedshiftConfiguration.RoleArnRef, + Selector: mg.Spec.ForProvider.RedshiftConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RedshiftConfiguration.RoleArn") + } + mg.Spec.ForProvider.RedshiftConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RedshiftConfiguration.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.RedshiftConfiguration != nil { + if mg.Spec.ForProvider.RedshiftConfiguration.S3BackupConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RedshiftConfiguration.S3BackupConfiguration.BucketArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RedshiftConfiguration.S3BackupConfiguration.BucketArnRef, + Selector: mg.Spec.ForProvider.RedshiftConfiguration.S3BackupConfiguration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RedshiftConfiguration.S3BackupConfiguration.BucketArn") + } + mg.Spec.ForProvider.RedshiftConfiguration.S3BackupConfiguration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RedshiftConfiguration.S3BackupConfiguration.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.RedshiftConfiguration != nil { + if mg.Spec.ForProvider.RedshiftConfiguration.S3BackupConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RedshiftConfiguration.S3BackupConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.RedshiftConfiguration.S3BackupConfiguration.RoleArnRef, + Selector: mg.Spec.ForProvider.RedshiftConfiguration.S3BackupConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RedshiftConfiguration.S3BackupConfiguration.RoleArn") + } + mg.Spec.ForProvider.RedshiftConfiguration.S3BackupConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RedshiftConfiguration.S3BackupConfiguration.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.RedshiftConfiguration != nil { + if mg.Spec.ForProvider.RedshiftConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RedshiftConfiguration.S3Configuration.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.RedshiftConfiguration.S3Configuration.BucketArnRef, + Selector: mg.Spec.ForProvider.RedshiftConfiguration.S3Configuration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RedshiftConfiguration.S3Configuration.BucketArn") + } + mg.Spec.ForProvider.RedshiftConfiguration.S3Configuration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RedshiftConfiguration.S3Configuration.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.RedshiftConfiguration != nil { + if mg.Spec.ForProvider.RedshiftConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RedshiftConfiguration.S3Configuration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.RedshiftConfiguration.S3Configuration.RoleArnRef, + Selector: mg.Spec.ForProvider.RedshiftConfiguration.S3Configuration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RedshiftConfiguration.S3Configuration.RoleArn") + } + mg.Spec.ForProvider.RedshiftConfiguration.S3Configuration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RedshiftConfiguration.S3Configuration.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.SnowflakeConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SnowflakeConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.SnowflakeConfiguration.RoleArnRef, + Selector: mg.Spec.ForProvider.SnowflakeConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SnowflakeConfiguration.RoleArn") + } + mg.Spec.ForProvider.SnowflakeConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SnowflakeConfiguration.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.SnowflakeConfiguration != nil { + if mg.Spec.ForProvider.SnowflakeConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SnowflakeConfiguration.S3Configuration.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.SnowflakeConfiguration.S3Configuration.BucketArnRef, + Selector: mg.Spec.ForProvider.SnowflakeConfiguration.S3Configuration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SnowflakeConfiguration.S3Configuration.BucketArn") + } + mg.Spec.ForProvider.SnowflakeConfiguration.S3Configuration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SnowflakeConfiguration.S3Configuration.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.SnowflakeConfiguration != nil { + if mg.Spec.ForProvider.SnowflakeConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SnowflakeConfiguration.S3Configuration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.SnowflakeConfiguration.S3Configuration.RoleArnRef, + Selector: mg.Spec.ForProvider.SnowflakeConfiguration.S3Configuration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SnowflakeConfiguration.S3Configuration.RoleArn") + } + mg.Spec.ForProvider.SnowflakeConfiguration.S3Configuration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SnowflakeConfiguration.S3Configuration.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.SplunkConfiguration != nil { + if mg.Spec.ForProvider.SplunkConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SplunkConfiguration.S3Configuration.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.SplunkConfiguration.S3Configuration.BucketArnRef, + Selector: mg.Spec.ForProvider.SplunkConfiguration.S3Configuration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SplunkConfiguration.S3Configuration.BucketArn") + } + mg.Spec.ForProvider.SplunkConfiguration.S3Configuration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SplunkConfiguration.S3Configuration.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.SplunkConfiguration != nil { + if mg.Spec.ForProvider.SplunkConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SplunkConfiguration.S3Configuration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.SplunkConfiguration.S3Configuration.RoleArnRef, + Selector: mg.Spec.ForProvider.SplunkConfiguration.S3Configuration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SplunkConfiguration.S3Configuration.RoleArn") + } + mg.Spec.ForProvider.SplunkConfiguration.S3Configuration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SplunkConfiguration.S3Configuration.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.ElasticsearchConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("elasticsearch.aws.upbound.io", "v1beta2", "Domain", "DomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ElasticsearchConfiguration.DomainArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.ElasticsearchConfiguration.DomainArnRef, + Selector: mg.Spec.InitProvider.ElasticsearchConfiguration.DomainArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ElasticsearchConfiguration.DomainArn") + } + mg.Spec.InitProvider.ElasticsearchConfiguration.DomainArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ElasticsearchConfiguration.DomainArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.ElasticsearchConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ElasticsearchConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.ElasticsearchConfiguration.RoleArnRef, + Selector: mg.Spec.InitProvider.ElasticsearchConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ElasticsearchConfiguration.RoleArn") + } + mg.Spec.InitProvider.ElasticsearchConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ElasticsearchConfiguration.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.ElasticsearchConfiguration != nil { + if mg.Spec.InitProvider.ElasticsearchConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ElasticsearchConfiguration.S3Configuration.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.ElasticsearchConfiguration.S3Configuration.BucketArnRef, + Selector: mg.Spec.InitProvider.ElasticsearchConfiguration.S3Configuration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ElasticsearchConfiguration.S3Configuration.BucketArn") + } + mg.Spec.InitProvider.ElasticsearchConfiguration.S3Configuration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ElasticsearchConfiguration.S3Configuration.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.ElasticsearchConfiguration != nil { + if mg.Spec.InitProvider.ElasticsearchConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ElasticsearchConfiguration.S3Configuration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.ElasticsearchConfiguration.S3Configuration.RoleArnRef, + Selector: mg.Spec.InitProvider.ElasticsearchConfiguration.S3Configuration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ElasticsearchConfiguration.S3Configuration.RoleArn") + } + mg.Spec.InitProvider.ElasticsearchConfiguration.S3Configuration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ElasticsearchConfiguration.S3Configuration.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.ElasticsearchConfiguration != nil { + if mg.Spec.InitProvider.ElasticsearchConfiguration.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ElasticsearchConfiguration.VPCConfig.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.ElasticsearchConfiguration.VPCConfig.RoleArnRef, + Selector: mg.Spec.InitProvider.ElasticsearchConfiguration.VPCConfig.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ElasticsearchConfiguration.VPCConfig.RoleArn") + } + mg.Spec.InitProvider.ElasticsearchConfiguration.VPCConfig.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ElasticsearchConfiguration.VPCConfig.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.ExtendedS3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ExtendedS3Configuration.BucketArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ExtendedS3Configuration.BucketArnRef, + Selector: mg.Spec.InitProvider.ExtendedS3Configuration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ExtendedS3Configuration.BucketArn") + } + mg.Spec.InitProvider.ExtendedS3Configuration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ExtendedS3Configuration.BucketArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.ExtendedS3Configuration != nil { + if mg.Spec.InitProvider.ExtendedS3Configuration.DataFormatConversionConfiguration != nil { + if mg.Spec.InitProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.RoleArnRef, + Selector: mg.Spec.InitProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.RoleArn") + } + mg.Spec.InitProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.RoleArnRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.InitProvider.ExtendedS3Configuration != nil { + if mg.Spec.InitProvider.ExtendedS3Configuration.DataFormatConversionConfiguration != nil { + if mg.Spec.InitProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "CatalogTable", "CatalogTableList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.TableName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.TableNameRef, + Selector: mg.Spec.InitProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.TableNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.TableName") + } + mg.Spec.InitProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.TableName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ExtendedS3Configuration.DataFormatConversionConfiguration.SchemaConfiguration.TableNameRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.InitProvider.ExtendedS3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ExtendedS3Configuration.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ExtendedS3Configuration.RoleArnRef, + Selector: mg.Spec.InitProvider.ExtendedS3Configuration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ExtendedS3Configuration.RoleArn") + } + mg.Spec.InitProvider.ExtendedS3Configuration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ExtendedS3Configuration.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.HTTPEndpointConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.HTTPEndpointConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.HTTPEndpointConfiguration.RoleArnRef, + Selector: mg.Spec.InitProvider.HTTPEndpointConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.HTTPEndpointConfiguration.RoleArn") + } + mg.Spec.InitProvider.HTTPEndpointConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.HTTPEndpointConfiguration.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.HTTPEndpointConfiguration != nil { + if mg.Spec.InitProvider.HTTPEndpointConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.HTTPEndpointConfiguration.S3Configuration.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.HTTPEndpointConfiguration.S3Configuration.BucketArnRef, + Selector: mg.Spec.InitProvider.HTTPEndpointConfiguration.S3Configuration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.HTTPEndpointConfiguration.S3Configuration.BucketArn") + } + mg.Spec.InitProvider.HTTPEndpointConfiguration.S3Configuration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.HTTPEndpointConfiguration.S3Configuration.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.HTTPEndpointConfiguration != nil { + if mg.Spec.InitProvider.HTTPEndpointConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.HTTPEndpointConfiguration.S3Configuration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.HTTPEndpointConfiguration.S3Configuration.RoleArnRef, + Selector: mg.Spec.InitProvider.HTTPEndpointConfiguration.S3Configuration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.HTTPEndpointConfiguration.S3Configuration.RoleArn") + } + mg.Spec.InitProvider.HTTPEndpointConfiguration.S3Configuration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.HTTPEndpointConfiguration.S3Configuration.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.OpensearchConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("opensearch.aws.upbound.io", "v1beta2", "Domain", "DomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.OpensearchConfiguration.DomainArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.OpensearchConfiguration.DomainArnRef, + Selector: mg.Spec.InitProvider.OpensearchConfiguration.DomainArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OpensearchConfiguration.DomainArn") + } + mg.Spec.InitProvider.OpensearchConfiguration.DomainArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OpensearchConfiguration.DomainArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.OpensearchConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.OpensearchConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.OpensearchConfiguration.RoleArnRef, + Selector: mg.Spec.InitProvider.OpensearchConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OpensearchConfiguration.RoleArn") + } + mg.Spec.InitProvider.OpensearchConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OpensearchConfiguration.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.OpensearchConfiguration != nil { + if mg.Spec.InitProvider.OpensearchConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.OpensearchConfiguration.S3Configuration.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.OpensearchConfiguration.S3Configuration.BucketArnRef, + Selector: mg.Spec.InitProvider.OpensearchConfiguration.S3Configuration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OpensearchConfiguration.S3Configuration.BucketArn") + } + mg.Spec.InitProvider.OpensearchConfiguration.S3Configuration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OpensearchConfiguration.S3Configuration.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.OpensearchConfiguration != nil { + if mg.Spec.InitProvider.OpensearchConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.OpensearchConfiguration.S3Configuration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.OpensearchConfiguration.S3Configuration.RoleArnRef, + Selector: mg.Spec.InitProvider.OpensearchConfiguration.S3Configuration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OpensearchConfiguration.S3Configuration.RoleArn") + } + mg.Spec.InitProvider.OpensearchConfiguration.S3Configuration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OpensearchConfiguration.S3Configuration.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.OpensearchConfiguration != nil { + if mg.Spec.InitProvider.OpensearchConfiguration.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.OpensearchConfiguration.VPCConfig.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.OpensearchConfiguration.VPCConfig.RoleArnRef, + Selector: mg.Spec.InitProvider.OpensearchConfiguration.VPCConfig.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OpensearchConfiguration.VPCConfig.RoleArn") + } + mg.Spec.InitProvider.OpensearchConfiguration.VPCConfig.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OpensearchConfiguration.VPCConfig.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.OpensearchserverlessConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("opensearchserverless.aws.upbound.io", "v1beta1", "Collection", "CollectionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.OpensearchserverlessConfiguration.CollectionEndpoint), + Extract: resource.ExtractParamPath("collection_endpoint", true), + Reference: mg.Spec.InitProvider.OpensearchserverlessConfiguration.CollectionEndpointRef, + Selector: mg.Spec.InitProvider.OpensearchserverlessConfiguration.CollectionEndpointSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OpensearchserverlessConfiguration.CollectionEndpoint") + } + mg.Spec.InitProvider.OpensearchserverlessConfiguration.CollectionEndpoint = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OpensearchserverlessConfiguration.CollectionEndpointRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.OpensearchserverlessConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.OpensearchserverlessConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.OpensearchserverlessConfiguration.RoleArnRef, + Selector: mg.Spec.InitProvider.OpensearchserverlessConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OpensearchserverlessConfiguration.RoleArn") + } + mg.Spec.InitProvider.OpensearchserverlessConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OpensearchserverlessConfiguration.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.OpensearchserverlessConfiguration != nil { + if mg.Spec.InitProvider.OpensearchserverlessConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.OpensearchserverlessConfiguration.S3Configuration.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.OpensearchserverlessConfiguration.S3Configuration.BucketArnRef, + Selector: mg.Spec.InitProvider.OpensearchserverlessConfiguration.S3Configuration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OpensearchserverlessConfiguration.S3Configuration.BucketArn") + } + mg.Spec.InitProvider.OpensearchserverlessConfiguration.S3Configuration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OpensearchserverlessConfiguration.S3Configuration.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.OpensearchserverlessConfiguration != nil { + if mg.Spec.InitProvider.OpensearchserverlessConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.OpensearchserverlessConfiguration.S3Configuration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.OpensearchserverlessConfiguration.S3Configuration.RoleArnRef, + Selector: mg.Spec.InitProvider.OpensearchserverlessConfiguration.S3Configuration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OpensearchserverlessConfiguration.S3Configuration.RoleArn") + } + mg.Spec.InitProvider.OpensearchserverlessConfiguration.S3Configuration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OpensearchserverlessConfiguration.S3Configuration.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.RedshiftConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RedshiftConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.RedshiftConfiguration.RoleArnRef, + Selector: mg.Spec.InitProvider.RedshiftConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RedshiftConfiguration.RoleArn") + } + mg.Spec.InitProvider.RedshiftConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RedshiftConfiguration.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.RedshiftConfiguration != nil { + if mg.Spec.InitProvider.RedshiftConfiguration.S3BackupConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RedshiftConfiguration.S3BackupConfiguration.BucketArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RedshiftConfiguration.S3BackupConfiguration.BucketArnRef, + Selector: mg.Spec.InitProvider.RedshiftConfiguration.S3BackupConfiguration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RedshiftConfiguration.S3BackupConfiguration.BucketArn") + } + mg.Spec.InitProvider.RedshiftConfiguration.S3BackupConfiguration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RedshiftConfiguration.S3BackupConfiguration.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.RedshiftConfiguration != nil { + if mg.Spec.InitProvider.RedshiftConfiguration.S3BackupConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RedshiftConfiguration.S3BackupConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.RedshiftConfiguration.S3BackupConfiguration.RoleArnRef, + Selector: mg.Spec.InitProvider.RedshiftConfiguration.S3BackupConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RedshiftConfiguration.S3BackupConfiguration.RoleArn") + } + mg.Spec.InitProvider.RedshiftConfiguration.S3BackupConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RedshiftConfiguration.S3BackupConfiguration.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.RedshiftConfiguration != nil { + if mg.Spec.InitProvider.RedshiftConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RedshiftConfiguration.S3Configuration.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.RedshiftConfiguration.S3Configuration.BucketArnRef, + Selector: mg.Spec.InitProvider.RedshiftConfiguration.S3Configuration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RedshiftConfiguration.S3Configuration.BucketArn") + } + mg.Spec.InitProvider.RedshiftConfiguration.S3Configuration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RedshiftConfiguration.S3Configuration.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.RedshiftConfiguration != nil { + if mg.Spec.InitProvider.RedshiftConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RedshiftConfiguration.S3Configuration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.RedshiftConfiguration.S3Configuration.RoleArnRef, + Selector: mg.Spec.InitProvider.RedshiftConfiguration.S3Configuration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RedshiftConfiguration.S3Configuration.RoleArn") + } + mg.Spec.InitProvider.RedshiftConfiguration.S3Configuration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RedshiftConfiguration.S3Configuration.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.SnowflakeConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SnowflakeConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.SnowflakeConfiguration.RoleArnRef, + Selector: mg.Spec.InitProvider.SnowflakeConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SnowflakeConfiguration.RoleArn") + } + mg.Spec.InitProvider.SnowflakeConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SnowflakeConfiguration.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.SnowflakeConfiguration != nil { + if mg.Spec.InitProvider.SnowflakeConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SnowflakeConfiguration.S3Configuration.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.SnowflakeConfiguration.S3Configuration.BucketArnRef, + Selector: mg.Spec.InitProvider.SnowflakeConfiguration.S3Configuration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SnowflakeConfiguration.S3Configuration.BucketArn") + } + mg.Spec.InitProvider.SnowflakeConfiguration.S3Configuration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SnowflakeConfiguration.S3Configuration.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.SnowflakeConfiguration != nil { + if mg.Spec.InitProvider.SnowflakeConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SnowflakeConfiguration.S3Configuration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.SnowflakeConfiguration.S3Configuration.RoleArnRef, + Selector: mg.Spec.InitProvider.SnowflakeConfiguration.S3Configuration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SnowflakeConfiguration.S3Configuration.RoleArn") + } + mg.Spec.InitProvider.SnowflakeConfiguration.S3Configuration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SnowflakeConfiguration.S3Configuration.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.SplunkConfiguration != nil { + if mg.Spec.InitProvider.SplunkConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SplunkConfiguration.S3Configuration.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.SplunkConfiguration.S3Configuration.BucketArnRef, + Selector: mg.Spec.InitProvider.SplunkConfiguration.S3Configuration.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SplunkConfiguration.S3Configuration.BucketArn") + } + mg.Spec.InitProvider.SplunkConfiguration.S3Configuration.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SplunkConfiguration.S3Configuration.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.SplunkConfiguration != nil { + if mg.Spec.InitProvider.SplunkConfiguration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SplunkConfiguration.S3Configuration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.SplunkConfiguration.S3Configuration.RoleArnRef, + Selector: mg.Spec.InitProvider.SplunkConfiguration.S3Configuration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SplunkConfiguration.S3Configuration.RoleArn") + } + mg.Spec.InitProvider.SplunkConfiguration.S3Configuration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SplunkConfiguration.S3Configuration.RoleArnRef = rsp.ResolvedReference + + } + } + + return nil +} diff --git a/apis/firehose/v1beta2/zz_groupversion_info.go b/apis/firehose/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..8af1fe142f --- /dev/null +++ b/apis/firehose/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=firehose.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "firehose.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/fis/v1beta1/zz_generated.conversion_spokes.go b/apis/fis/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..3ca6c0afef --- /dev/null +++ b/apis/fis/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ExperimentTemplate to the hub type. +func (tr *ExperimentTemplate) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ExperimentTemplate type. +func (tr *ExperimentTemplate) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/fis/v1beta2/zz_experimenttemplate_terraformed.go b/apis/fis/v1beta2/zz_experimenttemplate_terraformed.go new file mode 100755 index 0000000000..98f48d8bb1 --- /dev/null +++ b/apis/fis/v1beta2/zz_experimenttemplate_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ExperimentTemplate +func (mg *ExperimentTemplate) GetTerraformResourceType() string { + return "aws_fis_experiment_template" +} + +// GetConnectionDetailsMapping for this ExperimentTemplate +func (tr *ExperimentTemplate) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ExperimentTemplate +func (tr *ExperimentTemplate) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ExperimentTemplate +func (tr *ExperimentTemplate) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ExperimentTemplate +func (tr *ExperimentTemplate) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ExperimentTemplate +func (tr *ExperimentTemplate) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ExperimentTemplate +func (tr *ExperimentTemplate) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ExperimentTemplate +func (tr *ExperimentTemplate) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ExperimentTemplate +func (tr *ExperimentTemplate) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ExperimentTemplate using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ExperimentTemplate) LateInitialize(attrs []byte) (bool, error) { + params := &ExperimentTemplateParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ExperimentTemplate) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/fis/v1beta2/zz_experimenttemplate_types.go b/apis/fis/v1beta2/zz_experimenttemplate_types.go new file mode 100755 index 0000000000..2678b80604 --- /dev/null +++ b/apis/fis/v1beta2/zz_experimenttemplate_types.go @@ -0,0 +1,582 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionInitParameters struct { + + // ID of the action. To find out what actions are supported see AWS FIS actions reference. + ActionID *string `json:"actionId,omitempty" tf:"action_id,omitempty"` + + // Description of the action. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Friendly name of the action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Parameter(s) for the action, if applicable. See below. + Parameter []ParameterInitParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // Set of action names that must complete before this action can be executed. + // +listType=set + StartAfter []*string `json:"startAfter,omitempty" tf:"start_after,omitempty"` + + // Action's target, if applicable. See below. + Target *TargetInitParameters `json:"target,omitempty" tf:"target,omitempty"` +} + +type ActionObservation struct { + + // ID of the action. To find out what actions are supported see AWS FIS actions reference. + ActionID *string `json:"actionId,omitempty" tf:"action_id,omitempty"` + + // Description of the action. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Friendly name of the action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Parameter(s) for the action, if applicable. See below. + Parameter []ParameterObservation `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // Set of action names that must complete before this action can be executed. + // +listType=set + StartAfter []*string `json:"startAfter,omitempty" tf:"start_after,omitempty"` + + // Action's target, if applicable. See below. + Target *TargetObservation `json:"target,omitempty" tf:"target,omitempty"` +} + +type ActionParameters struct { + + // ID of the action. To find out what actions are supported see AWS FIS actions reference. + // +kubebuilder:validation:Optional + ActionID *string `json:"actionId" tf:"action_id,omitempty"` + + // Description of the action. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Friendly name of the action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Parameter(s) for the action, if applicable. See below. + // +kubebuilder:validation:Optional + Parameter []ParameterParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // Set of action names that must complete before this action can be executed. + // +kubebuilder:validation:Optional + // +listType=set + StartAfter []*string `json:"startAfter,omitempty" tf:"start_after,omitempty"` + + // Action's target, if applicable. See below. + // +kubebuilder:validation:Optional + Target *TargetParameters `json:"target,omitempty" tf:"target,omitempty"` +} + +type CloudwatchLogsConfigurationInitParameters struct { + + // The Amazon Resource Name (ARN) of the destination Amazon CloudWatch Logs log group. + LogGroupArn *string `json:"logGroupArn,omitempty" tf:"log_group_arn,omitempty"` +} + +type CloudwatchLogsConfigurationObservation struct { + + // The Amazon Resource Name (ARN) of the destination Amazon CloudWatch Logs log group. + LogGroupArn *string `json:"logGroupArn,omitempty" tf:"log_group_arn,omitempty"` +} + +type CloudwatchLogsConfigurationParameters struct { + + // The Amazon Resource Name (ARN) of the destination Amazon CloudWatch Logs log group. + // +kubebuilder:validation:Optional + LogGroupArn *string `json:"logGroupArn" tf:"log_group_arn,omitempty"` +} + +type ExperimentTemplateInitParameters struct { + + // Action to be performed during an experiment. See below. + Action []ActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Description for the experiment template. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The configuration for experiment logging. See below. + LogConfiguration *LogConfigurationInitParameters `json:"logConfiguration,omitempty" tf:"log_configuration,omitempty"` + + // ARN of an IAM role that grants the AWS FIS service permission to perform service actions on your behalf. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // When an ongoing experiment should be stopped. See below. + StopCondition []StopConditionInitParameters `json:"stopCondition,omitempty" tf:"stop_condition,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Target of an action. See below. + Target []ExperimentTemplateTargetInitParameters `json:"target,omitempty" tf:"target,omitempty"` +} + +type ExperimentTemplateObservation struct { + + // Action to be performed during an experiment. See below. + Action []ActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // Description for the experiment template. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Experiment Template ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The configuration for experiment logging. See below. + LogConfiguration *LogConfigurationObservation `json:"logConfiguration,omitempty" tf:"log_configuration,omitempty"` + + // ARN of an IAM role that grants the AWS FIS service permission to perform service actions on your behalf. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // When an ongoing experiment should be stopped. See below. + StopCondition []StopConditionObservation `json:"stopCondition,omitempty" tf:"stop_condition,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Target of an action. See below. + Target []ExperimentTemplateTargetObservation `json:"target,omitempty" tf:"target,omitempty"` +} + +type ExperimentTemplateParameters struct { + + // Action to be performed during an experiment. See below. + // +kubebuilder:validation:Optional + Action []ActionParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Description for the experiment template. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The configuration for experiment logging. See below. + // +kubebuilder:validation:Optional + LogConfiguration *LogConfigurationParameters `json:"logConfiguration,omitempty" tf:"log_configuration,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // ARN of an IAM role that grants the AWS FIS service permission to perform service actions on your behalf. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // When an ongoing experiment should be stopped. See below. + // +kubebuilder:validation:Optional + StopCondition []StopConditionParameters `json:"stopCondition,omitempty" tf:"stop_condition,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Target of an action. See below. + // +kubebuilder:validation:Optional + Target []ExperimentTemplateTargetParameters `json:"target,omitempty" tf:"target,omitempty"` +} + +type ExperimentTemplateTargetInitParameters struct { + + // Filter(s) for the target. Filters can be used to select resources based on specific attributes returned by the respective describe action of the resource type. For more information, see Targets for AWS FIS. See below. + Filter []FilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Friendly name given to the target. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The resource type parameters. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Set of ARNs of the resources to target with an action. Conflicts with resource_tag. + // +listType=set + ResourceArns []*string `json:"resourceArns,omitempty" tf:"resource_arns,omitempty"` + + // Tag(s) the resources need to have to be considered a valid target for an action. Conflicts with resource_arns. See below. + ResourceTag []ResourceTagInitParameters `json:"resourceTag,omitempty" tf:"resource_tag,omitempty"` + + // AWS resource type. The resource type must be supported for the specified action. To find out what resource types are supported, see Targets for AWS FIS. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // Scopes the identified resources. Valid values are ALL (all identified resources), COUNT(n) (randomly select n of the identified resources), PERCENT(n) (randomly select n percent of the identified resources). + SelectionMode *string `json:"selectionMode,omitempty" tf:"selection_mode,omitempty"` +} + +type ExperimentTemplateTargetObservation struct { + + // Filter(s) for the target. Filters can be used to select resources based on specific attributes returned by the respective describe action of the resource type. For more information, see Targets for AWS FIS. See below. + Filter []FilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + // Friendly name given to the target. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The resource type parameters. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Set of ARNs of the resources to target with an action. Conflicts with resource_tag. + // +listType=set + ResourceArns []*string `json:"resourceArns,omitempty" tf:"resource_arns,omitempty"` + + // Tag(s) the resources need to have to be considered a valid target for an action. Conflicts with resource_arns. See below. + ResourceTag []ResourceTagObservation `json:"resourceTag,omitempty" tf:"resource_tag,omitempty"` + + // AWS resource type. The resource type must be supported for the specified action. To find out what resource types are supported, see Targets for AWS FIS. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // Scopes the identified resources. Valid values are ALL (all identified resources), COUNT(n) (randomly select n of the identified resources), PERCENT(n) (randomly select n percent of the identified resources). + SelectionMode *string `json:"selectionMode,omitempty" tf:"selection_mode,omitempty"` +} + +type ExperimentTemplateTargetParameters struct { + + // Filter(s) for the target. Filters can be used to select resources based on specific attributes returned by the respective describe action of the resource type. For more information, see Targets for AWS FIS. See below. + // +kubebuilder:validation:Optional + Filter []FilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Friendly name given to the target. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The resource type parameters. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Set of ARNs of the resources to target with an action. Conflicts with resource_tag. + // +kubebuilder:validation:Optional + // +listType=set + ResourceArns []*string `json:"resourceArns,omitempty" tf:"resource_arns,omitempty"` + + // Tag(s) the resources need to have to be considered a valid target for an action. Conflicts with resource_arns. See below. + // +kubebuilder:validation:Optional + ResourceTag []ResourceTagParameters `json:"resourceTag,omitempty" tf:"resource_tag,omitempty"` + + // AWS resource type. The resource type must be supported for the specified action. To find out what resource types are supported, see Targets for AWS FIS. + // +kubebuilder:validation:Optional + ResourceType *string `json:"resourceType" tf:"resource_type,omitempty"` + + // Scopes the identified resources. Valid values are ALL (all identified resources), COUNT(n) (randomly select n of the identified resources), PERCENT(n) (randomly select n percent of the identified resources). + // +kubebuilder:validation:Optional + SelectionMode *string `json:"selectionMode" tf:"selection_mode,omitempty"` +} + +type FilterInitParameters struct { + + // Attribute path for the filter. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Set of attribute values for the filter. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type FilterObservation struct { + + // Attribute path for the filter. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Set of attribute values for the filter. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type FilterParameters struct { + + // Attribute path for the filter. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` + + // Set of attribute values for the filter. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type LogConfigurationInitParameters struct { + + // The configuration for experiment logging to Amazon CloudWatch Logs. See below. + CloudwatchLogsConfiguration *CloudwatchLogsConfigurationInitParameters `json:"cloudwatchLogsConfiguration,omitempty" tf:"cloudwatch_logs_configuration,omitempty"` + + // The schema version. See documentation for the list of schema versions. + LogSchemaVersion *float64 `json:"logSchemaVersion,omitempty" tf:"log_schema_version,omitempty"` + + // The configuration for experiment logging to Amazon S3. See below. + S3Configuration *S3ConfigurationInitParameters `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` +} + +type LogConfigurationObservation struct { + + // The configuration for experiment logging to Amazon CloudWatch Logs. See below. + CloudwatchLogsConfiguration *CloudwatchLogsConfigurationObservation `json:"cloudwatchLogsConfiguration,omitempty" tf:"cloudwatch_logs_configuration,omitempty"` + + // The schema version. See documentation for the list of schema versions. + LogSchemaVersion *float64 `json:"logSchemaVersion,omitempty" tf:"log_schema_version,omitempty"` + + // The configuration for experiment logging to Amazon S3. See below. + S3Configuration *S3ConfigurationObservation `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` +} + +type LogConfigurationParameters struct { + + // The configuration for experiment logging to Amazon CloudWatch Logs. See below. + // +kubebuilder:validation:Optional + CloudwatchLogsConfiguration *CloudwatchLogsConfigurationParameters `json:"cloudwatchLogsConfiguration,omitempty" tf:"cloudwatch_logs_configuration,omitempty"` + + // The schema version. See documentation for the list of schema versions. + // +kubebuilder:validation:Optional + LogSchemaVersion *float64 `json:"logSchemaVersion" tf:"log_schema_version,omitempty"` + + // The configuration for experiment logging to Amazon S3. See below. + // +kubebuilder:validation:Optional + S3Configuration *S3ConfigurationParameters `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` +} + +type ParameterInitParameters struct { + + // Parameter name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Parameter value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ParameterObservation struct { + + // Parameter name. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Parameter value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ParameterParameters struct { + + // Parameter name. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Parameter value. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceTagInitParameters struct { + + // Tag key. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Tag value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceTagObservation struct { + + // Tag key. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Tag value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceTagParameters struct { + + // Tag key. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Tag value. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type S3ConfigurationInitParameters struct { + + // The name of the destination bucket. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // The bucket prefix. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type S3ConfigurationObservation struct { + + // The name of the destination bucket. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // The bucket prefix. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type S3ConfigurationParameters struct { + + // The name of the destination bucket. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName" tf:"bucket_name,omitempty"` + + // The bucket prefix. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type StopConditionInitParameters struct { + + // Source of the condition. One of none, aws:cloudwatch:alarm. + Source *string `json:"source,omitempty" tf:"source,omitempty"` + + // ARN of the CloudWatch alarm. Required if the source is a CloudWatch alarm. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type StopConditionObservation struct { + + // Source of the condition. One of none, aws:cloudwatch:alarm. + Source *string `json:"source,omitempty" tf:"source,omitempty"` + + // ARN of the CloudWatch alarm. Required if the source is a CloudWatch alarm. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type StopConditionParameters struct { + + // Source of the condition. One of none, aws:cloudwatch:alarm. + // +kubebuilder:validation:Optional + Source *string `json:"source" tf:"source,omitempty"` + + // ARN of the CloudWatch alarm. Required if the source is a CloudWatch alarm. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TargetInitParameters struct { + + // Tag key. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Target name, referencing a corresponding target. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TargetObservation struct { + + // Tag key. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Target name, referencing a corresponding target. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TargetParameters struct { + + // Tag key. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Target name, referencing a corresponding target. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +// ExperimentTemplateSpec defines the desired state of ExperimentTemplate +type ExperimentTemplateSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ExperimentTemplateParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ExperimentTemplateInitParameters `json:"initProvider,omitempty"` +} + +// ExperimentTemplateStatus defines the observed state of ExperimentTemplate. +type ExperimentTemplateStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ExperimentTemplateObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ExperimentTemplate is the Schema for the ExperimentTemplates API. Provides an FIS Experiment Template. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ExperimentTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.action) || (has(self.initProvider) && has(self.initProvider.action))",message="spec.forProvider.action is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.description) || (has(self.initProvider) && has(self.initProvider.description))",message="spec.forProvider.description is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.stopCondition) || (has(self.initProvider) && has(self.initProvider.stopCondition))",message="spec.forProvider.stopCondition is a required parameter" + Spec ExperimentTemplateSpec `json:"spec"` + Status ExperimentTemplateStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ExperimentTemplateList contains a list of ExperimentTemplates +type ExperimentTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ExperimentTemplate `json:"items"` +} + +// Repository type metadata. +var ( + ExperimentTemplate_Kind = "ExperimentTemplate" + ExperimentTemplate_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ExperimentTemplate_Kind}.String() + ExperimentTemplate_KindAPIVersion = ExperimentTemplate_Kind + "." + CRDGroupVersion.String() + ExperimentTemplate_GroupVersionKind = CRDGroupVersion.WithKind(ExperimentTemplate_Kind) +) + +func init() { + SchemeBuilder.Register(&ExperimentTemplate{}, &ExperimentTemplateList{}) +} diff --git a/apis/fis/v1beta2/zz_generated.conversion_hubs.go b/apis/fis/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..c1204ec511 --- /dev/null +++ b/apis/fis/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ExperimentTemplate) Hub() {} diff --git a/apis/fis/v1beta2/zz_generated.deepcopy.go b/apis/fis/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..11ce1f4834 --- /dev/null +++ b/apis/fis/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1345 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionInitParameters) DeepCopyInto(out *ActionInitParameters) { + *out = *in + if in.ActionID != nil { + in, out := &in.ActionID, &out.ActionID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartAfter != nil { + in, out := &in.StartAfter, &out.StartAfter + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(TargetInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionInitParameters. +func (in *ActionInitParameters) DeepCopy() *ActionInitParameters { + if in == nil { + return nil + } + out := new(ActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionObservation) DeepCopyInto(out *ActionObservation) { + *out = *in + if in.ActionID != nil { + in, out := &in.ActionID, &out.ActionID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartAfter != nil { + in, out := &in.StartAfter, &out.StartAfter + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(TargetObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionObservation. +func (in *ActionObservation) DeepCopy() *ActionObservation { + if in == nil { + return nil + } + out := new(ActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionParameters) DeepCopyInto(out *ActionParameters) { + *out = *in + if in.ActionID != nil { + in, out := &in.ActionID, &out.ActionID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartAfter != nil { + in, out := &in.StartAfter, &out.StartAfter + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(TargetParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionParameters. +func (in *ActionParameters) DeepCopy() *ActionParameters { + if in == nil { + return nil + } + out := new(ActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogsConfigurationInitParameters) DeepCopyInto(out *CloudwatchLogsConfigurationInitParameters) { + *out = *in + if in.LogGroupArn != nil { + in, out := &in.LogGroupArn, &out.LogGroupArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogsConfigurationInitParameters. +func (in *CloudwatchLogsConfigurationInitParameters) DeepCopy() *CloudwatchLogsConfigurationInitParameters { + if in == nil { + return nil + } + out := new(CloudwatchLogsConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogsConfigurationObservation) DeepCopyInto(out *CloudwatchLogsConfigurationObservation) { + *out = *in + if in.LogGroupArn != nil { + in, out := &in.LogGroupArn, &out.LogGroupArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogsConfigurationObservation. +func (in *CloudwatchLogsConfigurationObservation) DeepCopy() *CloudwatchLogsConfigurationObservation { + if in == nil { + return nil + } + out := new(CloudwatchLogsConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogsConfigurationParameters) DeepCopyInto(out *CloudwatchLogsConfigurationParameters) { + *out = *in + if in.LogGroupArn != nil { + in, out := &in.LogGroupArn, &out.LogGroupArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogsConfigurationParameters. +func (in *CloudwatchLogsConfigurationParameters) DeepCopy() *CloudwatchLogsConfigurationParameters { + if in == nil { + return nil + } + out := new(CloudwatchLogsConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperimentTemplate) DeepCopyInto(out *ExperimentTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperimentTemplate. +func (in *ExperimentTemplate) DeepCopy() *ExperimentTemplate { + if in == nil { + return nil + } + out := new(ExperimentTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExperimentTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperimentTemplateInitParameters) DeepCopyInto(out *ExperimentTemplateInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]ActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.LogConfiguration != nil { + in, out := &in.LogConfiguration, &out.LogConfiguration + *out = new(LogConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StopCondition != nil { + in, out := &in.StopCondition, &out.StopCondition + *out = make([]StopConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = make([]ExperimentTemplateTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperimentTemplateInitParameters. +func (in *ExperimentTemplateInitParameters) DeepCopy() *ExperimentTemplateInitParameters { + if in == nil { + return nil + } + out := new(ExperimentTemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperimentTemplateList) DeepCopyInto(out *ExperimentTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ExperimentTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperimentTemplateList. +func (in *ExperimentTemplateList) DeepCopy() *ExperimentTemplateList { + if in == nil { + return nil + } + out := new(ExperimentTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExperimentTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperimentTemplateObservation) DeepCopyInto(out *ExperimentTemplateObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]ActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LogConfiguration != nil { + in, out := &in.LogConfiguration, &out.LogConfiguration + *out = new(LogConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StopCondition != nil { + in, out := &in.StopCondition, &out.StopCondition + *out = make([]StopConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = make([]ExperimentTemplateTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperimentTemplateObservation. +func (in *ExperimentTemplateObservation) DeepCopy() *ExperimentTemplateObservation { + if in == nil { + return nil + } + out := new(ExperimentTemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperimentTemplateParameters) DeepCopyInto(out *ExperimentTemplateParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]ActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.LogConfiguration != nil { + in, out := &in.LogConfiguration, &out.LogConfiguration + *out = new(LogConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StopCondition != nil { + in, out := &in.StopCondition, &out.StopCondition + *out = make([]StopConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = make([]ExperimentTemplateTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperimentTemplateParameters. +func (in *ExperimentTemplateParameters) DeepCopy() *ExperimentTemplateParameters { + if in == nil { + return nil + } + out := new(ExperimentTemplateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperimentTemplateSpec) DeepCopyInto(out *ExperimentTemplateSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperimentTemplateSpec. +func (in *ExperimentTemplateSpec) DeepCopy() *ExperimentTemplateSpec { + if in == nil { + return nil + } + out := new(ExperimentTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperimentTemplateStatus) DeepCopyInto(out *ExperimentTemplateStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperimentTemplateStatus. +func (in *ExperimentTemplateStatus) DeepCopy() *ExperimentTemplateStatus { + if in == nil { + return nil + } + out := new(ExperimentTemplateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperimentTemplateTargetInitParameters) DeepCopyInto(out *ExperimentTemplateTargetInitParameters) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]FilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResourceArns != nil { + in, out := &in.ResourceArns, &out.ResourceArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceTag != nil { + in, out := &in.ResourceTag, &out.ResourceTag + *out = make([]ResourceTagInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } + if in.SelectionMode != nil { + in, out := &in.SelectionMode, &out.SelectionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperimentTemplateTargetInitParameters. +func (in *ExperimentTemplateTargetInitParameters) DeepCopy() *ExperimentTemplateTargetInitParameters { + if in == nil { + return nil + } + out := new(ExperimentTemplateTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperimentTemplateTargetObservation) DeepCopyInto(out *ExperimentTemplateTargetObservation) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]FilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResourceArns != nil { + in, out := &in.ResourceArns, &out.ResourceArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceTag != nil { + in, out := &in.ResourceTag, &out.ResourceTag + *out = make([]ResourceTagObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } + if in.SelectionMode != nil { + in, out := &in.SelectionMode, &out.SelectionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperimentTemplateTargetObservation. +func (in *ExperimentTemplateTargetObservation) DeepCopy() *ExperimentTemplateTargetObservation { + if in == nil { + return nil + } + out := new(ExperimentTemplateTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperimentTemplateTargetParameters) DeepCopyInto(out *ExperimentTemplateTargetParameters) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]FilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResourceArns != nil { + in, out := &in.ResourceArns, &out.ResourceArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceTag != nil { + in, out := &in.ResourceTag, &out.ResourceTag + *out = make([]ResourceTagParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } + if in.SelectionMode != nil { + in, out := &in.SelectionMode, &out.SelectionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperimentTemplateTargetParameters. +func (in *ExperimentTemplateTargetParameters) DeepCopy() *ExperimentTemplateTargetParameters { + if in == nil { + return nil + } + out := new(ExperimentTemplateTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterInitParameters) DeepCopyInto(out *FilterInitParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterInitParameters. +func (in *FilterInitParameters) DeepCopy() *FilterInitParameters { + if in == nil { + return nil + } + out := new(FilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterObservation) DeepCopyInto(out *FilterObservation) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterObservation. +func (in *FilterObservation) DeepCopy() *FilterObservation { + if in == nil { + return nil + } + out := new(FilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterParameters) DeepCopyInto(out *FilterParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterParameters. +func (in *FilterParameters) DeepCopy() *FilterParameters { + if in == nil { + return nil + } + out := new(FilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfigurationInitParameters) DeepCopyInto(out *LogConfigurationInitParameters) { + *out = *in + if in.CloudwatchLogsConfiguration != nil { + in, out := &in.CloudwatchLogsConfiguration, &out.CloudwatchLogsConfiguration + *out = new(CloudwatchLogsConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LogSchemaVersion != nil { + in, out := &in.LogSchemaVersion, &out.LogSchemaVersion + *out = new(float64) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfigurationInitParameters. +func (in *LogConfigurationInitParameters) DeepCopy() *LogConfigurationInitParameters { + if in == nil { + return nil + } + out := new(LogConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfigurationObservation) DeepCopyInto(out *LogConfigurationObservation) { + *out = *in + if in.CloudwatchLogsConfiguration != nil { + in, out := &in.CloudwatchLogsConfiguration, &out.CloudwatchLogsConfiguration + *out = new(CloudwatchLogsConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.LogSchemaVersion != nil { + in, out := &in.LogSchemaVersion, &out.LogSchemaVersion + *out = new(float64) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3ConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfigurationObservation. +func (in *LogConfigurationObservation) DeepCopy() *LogConfigurationObservation { + if in == nil { + return nil + } + out := new(LogConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfigurationParameters) DeepCopyInto(out *LogConfigurationParameters) { + *out = *in + if in.CloudwatchLogsConfiguration != nil { + in, out := &in.CloudwatchLogsConfiguration, &out.CloudwatchLogsConfiguration + *out = new(CloudwatchLogsConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.LogSchemaVersion != nil { + in, out := &in.LogSchemaVersion, &out.LogSchemaVersion + *out = new(float64) + **out = **in + } + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3ConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfigurationParameters. +func (in *LogConfigurationParameters) DeepCopy() *LogConfigurationParameters { + if in == nil { + return nil + } + out := new(LogConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterInitParameters) DeepCopyInto(out *ParameterInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterInitParameters. +func (in *ParameterInitParameters) DeepCopy() *ParameterInitParameters { + if in == nil { + return nil + } + out := new(ParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterObservation) DeepCopyInto(out *ParameterObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterObservation. +func (in *ParameterObservation) DeepCopy() *ParameterObservation { + if in == nil { + return nil + } + out := new(ParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterParameters) DeepCopyInto(out *ParameterParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterParameters. +func (in *ParameterParameters) DeepCopy() *ParameterParameters { + if in == nil { + return nil + } + out := new(ParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTagInitParameters) DeepCopyInto(out *ResourceTagInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTagInitParameters. +func (in *ResourceTagInitParameters) DeepCopy() *ResourceTagInitParameters { + if in == nil { + return nil + } + out := new(ResourceTagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTagObservation) DeepCopyInto(out *ResourceTagObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTagObservation. +func (in *ResourceTagObservation) DeepCopy() *ResourceTagObservation { + if in == nil { + return nil + } + out := new(ResourceTagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTagParameters) DeepCopyInto(out *ResourceTagParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTagParameters. +func (in *ResourceTagParameters) DeepCopy() *ResourceTagParameters { + if in == nil { + return nil + } + out := new(ResourceTagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigurationInitParameters) DeepCopyInto(out *S3ConfigurationInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigurationInitParameters. +func (in *S3ConfigurationInitParameters) DeepCopy() *S3ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(S3ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigurationObservation) DeepCopyInto(out *S3ConfigurationObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigurationObservation. +func (in *S3ConfigurationObservation) DeepCopy() *S3ConfigurationObservation { + if in == nil { + return nil + } + out := new(S3ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigurationParameters) DeepCopyInto(out *S3ConfigurationParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigurationParameters. +func (in *S3ConfigurationParameters) DeepCopy() *S3ConfigurationParameters { + if in == nil { + return nil + } + out := new(S3ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StopConditionInitParameters) DeepCopyInto(out *StopConditionInitParameters) { + *out = *in + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StopConditionInitParameters. +func (in *StopConditionInitParameters) DeepCopy() *StopConditionInitParameters { + if in == nil { + return nil + } + out := new(StopConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StopConditionObservation) DeepCopyInto(out *StopConditionObservation) { + *out = *in + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StopConditionObservation. +func (in *StopConditionObservation) DeepCopy() *StopConditionObservation { + if in == nil { + return nil + } + out := new(StopConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StopConditionParameters) DeepCopyInto(out *StopConditionParameters) { + *out = *in + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StopConditionParameters. +func (in *StopConditionParameters) DeepCopy() *StopConditionParameters { + if in == nil { + return nil + } + out := new(StopConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetInitParameters) DeepCopyInto(out *TargetInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetInitParameters. +func (in *TargetInitParameters) DeepCopy() *TargetInitParameters { + if in == nil { + return nil + } + out := new(TargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetObservation) DeepCopyInto(out *TargetObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetObservation. +func (in *TargetObservation) DeepCopy() *TargetObservation { + if in == nil { + return nil + } + out := new(TargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetParameters) DeepCopyInto(out *TargetParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetParameters. +func (in *TargetParameters) DeepCopy() *TargetParameters { + if in == nil { + return nil + } + out := new(TargetParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/fis/v1beta2/zz_generated.managed.go b/apis/fis/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..166321c1f6 --- /dev/null +++ b/apis/fis/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ExperimentTemplate. +func (mg *ExperimentTemplate) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ExperimentTemplate. +func (mg *ExperimentTemplate) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ExperimentTemplate. +func (mg *ExperimentTemplate) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ExperimentTemplate. +func (mg *ExperimentTemplate) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ExperimentTemplate. +func (mg *ExperimentTemplate) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ExperimentTemplate. +func (mg *ExperimentTemplate) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ExperimentTemplate. +func (mg *ExperimentTemplate) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ExperimentTemplate. +func (mg *ExperimentTemplate) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ExperimentTemplate. +func (mg *ExperimentTemplate) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ExperimentTemplate. +func (mg *ExperimentTemplate) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ExperimentTemplate. +func (mg *ExperimentTemplate) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ExperimentTemplate. +func (mg *ExperimentTemplate) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/fis/v1beta2/zz_generated.managedlist.go b/apis/fis/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..57aa37ecff --- /dev/null +++ b/apis/fis/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ExperimentTemplateList. +func (l *ExperimentTemplateList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/fis/v1beta2/zz_generated.resolvers.go b/apis/fis/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..3770ebba8f --- /dev/null +++ b/apis/fis/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *ExperimentTemplate) ResolveReferences( // ResolveReferences of this ExperimentTemplate. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/fis/v1beta2/zz_groupversion_info.go b/apis/fis/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..6c1e9c930f --- /dev/null +++ b/apis/fis/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=fis.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "fis.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/fsx/v1beta1/zz_backup_types.go b/apis/fsx/v1beta1/zz_backup_types.go index 97ccec3830..e7bbd4c1ce 100755 --- a/apis/fsx/v1beta1/zz_backup_types.go +++ b/apis/fsx/v1beta1/zz_backup_types.go @@ -16,7 +16,7 @@ import ( type BackupInitParameters struct { // The ID of the file system to back up. Required if backing up Lustre or Windows file systems. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/fsx/v1beta1.LustreFileSystem + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/fsx/v1beta2.LustreFileSystem // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` @@ -71,7 +71,7 @@ type BackupObservation struct { type BackupParameters struct { // The ID of the file system to back up. Required if backing up Lustre or Windows file systems. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/fsx/v1beta1.LustreFileSystem + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/fsx/v1beta2.LustreFileSystem // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` diff --git a/apis/fsx/v1beta1/zz_generated.conversion_hubs.go b/apis/fsx/v1beta1/zz_generated.conversion_hubs.go index ae7be559f3..41681908d2 100755 --- a/apis/fsx/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/fsx/v1beta1/zz_generated.conversion_hubs.go @@ -8,18 +8,3 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *Backup) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *DataRepositoryAssociation) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LustreFileSystem) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *OntapFileSystem) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *OntapStorageVirtualMachine) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *WindowsFileSystem) Hub() {} diff --git a/apis/fsx/v1beta1/zz_generated.conversion_spokes.go b/apis/fsx/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..4a8b5173e2 --- /dev/null +++ b/apis/fsx/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,114 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this DataRepositoryAssociation to the hub type. +func (tr *DataRepositoryAssociation) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DataRepositoryAssociation type. +func (tr *DataRepositoryAssociation) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LustreFileSystem to the hub type. +func (tr *LustreFileSystem) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LustreFileSystem type. +func (tr *LustreFileSystem) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this OntapFileSystem to the hub type. +func (tr *OntapFileSystem) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the OntapFileSystem type. +func (tr *OntapFileSystem) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this OntapStorageVirtualMachine to the hub type. +func (tr *OntapStorageVirtualMachine) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the OntapStorageVirtualMachine type. +func (tr *OntapStorageVirtualMachine) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this WindowsFileSystem to the hub type. +func (tr *WindowsFileSystem) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the WindowsFileSystem type. +func (tr *WindowsFileSystem) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/fsx/v1beta1/zz_generated.resolvers.go b/apis/fsx/v1beta1/zz_generated.resolvers.go index bd9f37ac66..d00d7696ab 100644 --- a/apis/fsx/v1beta1/zz_generated.resolvers.go +++ b/apis/fsx/v1beta1/zz_generated.resolvers.go @@ -28,7 +28,7 @@ func (mg *Backup) ResolveReferences(ctx context.Context, c client.Reader) error var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("fsx.aws.upbound.io", "v1beta1", "LustreFileSystem", "LustreFileSystemList") + m, l, err = apisresolver.GetManagedResource("fsx.aws.upbound.io", "v1beta2", "LustreFileSystem", "LustreFileSystemList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -47,7 +47,7 @@ func (mg *Backup) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.ForProvider.FileSystemID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.FileSystemIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("fsx.aws.upbound.io", "v1beta1", "LustreFileSystem", "LustreFileSystemList") + m, l, err = apisresolver.GetManagedResource("fsx.aws.upbound.io", "v1beta2", "LustreFileSystem", "LustreFileSystemList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/fsx/v1beta2/zz_datarepositoryassociation_terraformed.go b/apis/fsx/v1beta2/zz_datarepositoryassociation_terraformed.go new file mode 100755 index 0000000000..8fb36173df --- /dev/null +++ b/apis/fsx/v1beta2/zz_datarepositoryassociation_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DataRepositoryAssociation +func (mg *DataRepositoryAssociation) GetTerraformResourceType() string { + return "aws_fsx_data_repository_association" +} + +// GetConnectionDetailsMapping for this DataRepositoryAssociation +func (tr *DataRepositoryAssociation) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DataRepositoryAssociation +func (tr *DataRepositoryAssociation) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DataRepositoryAssociation +func (tr *DataRepositoryAssociation) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DataRepositoryAssociation +func (tr *DataRepositoryAssociation) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DataRepositoryAssociation +func (tr *DataRepositoryAssociation) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DataRepositoryAssociation +func (tr *DataRepositoryAssociation) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DataRepositoryAssociation +func (tr *DataRepositoryAssociation) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DataRepositoryAssociation +func (tr *DataRepositoryAssociation) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DataRepositoryAssociation using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DataRepositoryAssociation) LateInitialize(attrs []byte) (bool, error) { + params := &DataRepositoryAssociationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DataRepositoryAssociation) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/fsx/v1beta2/zz_datarepositoryassociation_types.go b/apis/fsx/v1beta2/zz_datarepositoryassociation_types.go new file mode 100755 index 0000000000..102598ef41 --- /dev/null +++ b/apis/fsx/v1beta2/zz_datarepositoryassociation_types.go @@ -0,0 +1,276 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AutoExportPolicyInitParameters struct { + + // A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are NEW, CHANGED, DELETED. Max of 3. + Events []*string `json:"events,omitempty" tf:"events,omitempty"` +} + +type AutoExportPolicyObservation struct { + + // A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are NEW, CHANGED, DELETED. Max of 3. + Events []*string `json:"events,omitempty" tf:"events,omitempty"` +} + +type AutoExportPolicyParameters struct { + + // A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are NEW, CHANGED, DELETED. Max of 3. + // +kubebuilder:validation:Optional + Events []*string `json:"events,omitempty" tf:"events,omitempty"` +} + +type AutoImportPolicyInitParameters struct { + + // A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are NEW, CHANGED, DELETED. Max of 3. + Events []*string `json:"events,omitempty" tf:"events,omitempty"` +} + +type AutoImportPolicyObservation struct { + + // A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are NEW, CHANGED, DELETED. Max of 3. + Events []*string `json:"events,omitempty" tf:"events,omitempty"` +} + +type AutoImportPolicyParameters struct { + + // A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are NEW, CHANGED, DELETED. Max of 3. + // +kubebuilder:validation:Optional + Events []*string `json:"events,omitempty" tf:"events,omitempty"` +} + +type DataRepositoryAssociationInitParameters struct { + + // Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to false. + BatchImportMetaDataOnCreate *bool `json:"batchImportMetaDataOnCreate,omitempty" tf:"batch_import_meta_data_on_create,omitempty"` + + // The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system. + DataRepositoryPath *string `json:"dataRepositoryPath,omitempty" tf:"data_repository_path,omitempty"` + + // Set to true to delete files from the file system upon deleting this data repository association. Defaults to false. + DeleteDataInFilesystem *bool `json:"deleteDataInFilesystem,omitempty" tf:"delete_data_in_filesystem,omitempty"` + + // The ID of the Amazon FSx file system to on which to create a data repository association. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/fsx/v1beta2.LustreFileSystem + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // Reference to a LustreFileSystem in fsx to populate fileSystemId. + // +kubebuilder:validation:Optional + FileSystemIDRef *v1.Reference `json:"fileSystemIdRef,omitempty" tf:"-"` + + // Selector for a LustreFileSystem in fsx to populate fileSystemId. + // +kubebuilder:validation:Optional + FileSystemIDSelector *v1.Selector `json:"fileSystemIdSelector,omitempty" tf:"-"` + + // A path on the file system that points to a high-level directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) that will be mapped 1-1 with data_repository_path. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path /ns1/, then you cannot link another data repository with file system path /ns1/ns2. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. + FileSystemPath *string `json:"fileSystemPath,omitempty" tf:"file_system_path,omitempty"` + + // For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system. + ImportedFileChunkSize *float64 `json:"importedFileChunkSize,omitempty" tf:"imported_file_chunk_size,omitempty"` + + // See the s3 configuration block. Max of 1. + // The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. + S3 *S3InitParameters `json:"s3,omitempty" tf:"s3,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type DataRepositoryAssociationObservation struct { + + // Amazon Resource Name of the file system. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Identifier of the data repository association, e.g., dra-12345678 + AssociationID *string `json:"associationId,omitempty" tf:"association_id,omitempty"` + + // Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to false. + BatchImportMetaDataOnCreate *bool `json:"batchImportMetaDataOnCreate,omitempty" tf:"batch_import_meta_data_on_create,omitempty"` + + // The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system. + DataRepositoryPath *string `json:"dataRepositoryPath,omitempty" tf:"data_repository_path,omitempty"` + + // Set to true to delete files from the file system upon deleting this data repository association. Defaults to false. + DeleteDataInFilesystem *bool `json:"deleteDataInFilesystem,omitempty" tf:"delete_data_in_filesystem,omitempty"` + + // The ID of the Amazon FSx file system to on which to create a data repository association. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // A path on the file system that points to a high-level directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) that will be mapped 1-1 with data_repository_path. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path /ns1/, then you cannot link another data repository with file system path /ns1/ns2. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. + FileSystemPath *string `json:"fileSystemPath,omitempty" tf:"file_system_path,omitempty"` + + // Identifier of the data repository association, e.g., dra-12345678 + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system. + ImportedFileChunkSize *float64 `json:"importedFileChunkSize,omitempty" tf:"imported_file_chunk_size,omitempty"` + + // See the s3 configuration block. Max of 1. + // The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. + S3 *S3Observation `json:"s3,omitempty" tf:"s3,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type DataRepositoryAssociationParameters struct { + + // Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to false. + // +kubebuilder:validation:Optional + BatchImportMetaDataOnCreate *bool `json:"batchImportMetaDataOnCreate,omitempty" tf:"batch_import_meta_data_on_create,omitempty"` + + // The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system. + // +kubebuilder:validation:Optional + DataRepositoryPath *string `json:"dataRepositoryPath,omitempty" tf:"data_repository_path,omitempty"` + + // Set to true to delete files from the file system upon deleting this data repository association. Defaults to false. + // +kubebuilder:validation:Optional + DeleteDataInFilesystem *bool `json:"deleteDataInFilesystem,omitempty" tf:"delete_data_in_filesystem,omitempty"` + + // The ID of the Amazon FSx file system to on which to create a data repository association. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/fsx/v1beta2.LustreFileSystem + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // Reference to a LustreFileSystem in fsx to populate fileSystemId. + // +kubebuilder:validation:Optional + FileSystemIDRef *v1.Reference `json:"fileSystemIdRef,omitempty" tf:"-"` + + // Selector for a LustreFileSystem in fsx to populate fileSystemId. + // +kubebuilder:validation:Optional + FileSystemIDSelector *v1.Selector `json:"fileSystemIdSelector,omitempty" tf:"-"` + + // A path on the file system that points to a high-level directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) that will be mapped 1-1 with data_repository_path. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path /ns1/, then you cannot link another data repository with file system path /ns1/ns2. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. + // +kubebuilder:validation:Optional + FileSystemPath *string `json:"fileSystemPath,omitempty" tf:"file_system_path,omitempty"` + + // For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system. + // +kubebuilder:validation:Optional + ImportedFileChunkSize *float64 `json:"importedFileChunkSize,omitempty" tf:"imported_file_chunk_size,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // See the s3 configuration block. Max of 1. + // The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. + // +kubebuilder:validation:Optional + S3 *S3Parameters `json:"s3,omitempty" tf:"s3,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type S3InitParameters struct { + + // Specifies the type of updated objects that will be automatically exported from your file system to the linked S3 bucket. See the events configuration block. + AutoExportPolicy *AutoExportPolicyInitParameters `json:"autoExportPolicy,omitempty" tf:"auto_export_policy,omitempty"` + + // Specifies the type of updated objects that will be automatically imported from the linked S3 bucket to your file system. See the events configuration block. + AutoImportPolicy *AutoImportPolicyInitParameters `json:"autoImportPolicy,omitempty" tf:"auto_import_policy,omitempty"` +} + +type S3Observation struct { + + // Specifies the type of updated objects that will be automatically exported from your file system to the linked S3 bucket. See the events configuration block. + AutoExportPolicy *AutoExportPolicyObservation `json:"autoExportPolicy,omitempty" tf:"auto_export_policy,omitempty"` + + // Specifies the type of updated objects that will be automatically imported from the linked S3 bucket to your file system. See the events configuration block. + AutoImportPolicy *AutoImportPolicyObservation `json:"autoImportPolicy,omitempty" tf:"auto_import_policy,omitempty"` +} + +type S3Parameters struct { + + // Specifies the type of updated objects that will be automatically exported from your file system to the linked S3 bucket. See the events configuration block. + // +kubebuilder:validation:Optional + AutoExportPolicy *AutoExportPolicyParameters `json:"autoExportPolicy,omitempty" tf:"auto_export_policy,omitempty"` + + // Specifies the type of updated objects that will be automatically imported from the linked S3 bucket to your file system. See the events configuration block. + // +kubebuilder:validation:Optional + AutoImportPolicy *AutoImportPolicyParameters `json:"autoImportPolicy,omitempty" tf:"auto_import_policy,omitempty"` +} + +// DataRepositoryAssociationSpec defines the desired state of DataRepositoryAssociation +type DataRepositoryAssociationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DataRepositoryAssociationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DataRepositoryAssociationInitParameters `json:"initProvider,omitempty"` +} + +// DataRepositoryAssociationStatus defines the observed state of DataRepositoryAssociation. +type DataRepositoryAssociationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DataRepositoryAssociationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DataRepositoryAssociation is the Schema for the DataRepositoryAssociations API. Manages a FSx for Lustre Data Repository Association. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type DataRepositoryAssociation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.dataRepositoryPath) || (has(self.initProvider) && has(self.initProvider.dataRepositoryPath))",message="spec.forProvider.dataRepositoryPath is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.fileSystemPath) || (has(self.initProvider) && has(self.initProvider.fileSystemPath))",message="spec.forProvider.fileSystemPath is a required parameter" + Spec DataRepositoryAssociationSpec `json:"spec"` + Status DataRepositoryAssociationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DataRepositoryAssociationList contains a list of DataRepositoryAssociations +type DataRepositoryAssociationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DataRepositoryAssociation `json:"items"` +} + +// Repository type metadata. +var ( + DataRepositoryAssociation_Kind = "DataRepositoryAssociation" + DataRepositoryAssociation_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DataRepositoryAssociation_Kind}.String() + DataRepositoryAssociation_KindAPIVersion = DataRepositoryAssociation_Kind + "." + CRDGroupVersion.String() + DataRepositoryAssociation_GroupVersionKind = CRDGroupVersion.WithKind(DataRepositoryAssociation_Kind) +) + +func init() { + SchemeBuilder.Register(&DataRepositoryAssociation{}, &DataRepositoryAssociationList{}) +} diff --git a/apis/fsx/v1beta2/zz_generated.conversion_hubs.go b/apis/fsx/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..7f7d8a4f40 --- /dev/null +++ b/apis/fsx/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *DataRepositoryAssociation) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LustreFileSystem) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *OntapFileSystem) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *OntapStorageVirtualMachine) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *WindowsFileSystem) Hub() {} diff --git a/apis/fsx/v1beta2/zz_generated.deepcopy.go b/apis/fsx/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..4f4e0b9f24 --- /dev/null +++ b/apis/fsx/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,4171 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryConfigurationInitParameters) DeepCopyInto(out *ActiveDirectoryConfigurationInitParameters) { + *out = *in + if in.NetbiosName != nil { + in, out := &in.NetbiosName, &out.NetbiosName + *out = new(string) + **out = **in + } + if in.SelfManagedActiveDirectoryConfiguration != nil { + in, out := &in.SelfManagedActiveDirectoryConfiguration, &out.SelfManagedActiveDirectoryConfiguration + *out = new(SelfManagedActiveDirectoryConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryConfigurationInitParameters. +func (in *ActiveDirectoryConfigurationInitParameters) DeepCopy() *ActiveDirectoryConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ActiveDirectoryConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryConfigurationObservation) DeepCopyInto(out *ActiveDirectoryConfigurationObservation) { + *out = *in + if in.NetbiosName != nil { + in, out := &in.NetbiosName, &out.NetbiosName + *out = new(string) + **out = **in + } + if in.SelfManagedActiveDirectoryConfiguration != nil { + in, out := &in.SelfManagedActiveDirectoryConfiguration, &out.SelfManagedActiveDirectoryConfiguration + *out = new(SelfManagedActiveDirectoryConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryConfigurationObservation. +func (in *ActiveDirectoryConfigurationObservation) DeepCopy() *ActiveDirectoryConfigurationObservation { + if in == nil { + return nil + } + out := new(ActiveDirectoryConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryConfigurationParameters) DeepCopyInto(out *ActiveDirectoryConfigurationParameters) { + *out = *in + if in.NetbiosName != nil { + in, out := &in.NetbiosName, &out.NetbiosName + *out = new(string) + **out = **in + } + if in.SelfManagedActiveDirectoryConfiguration != nil { + in, out := &in.SelfManagedActiveDirectoryConfiguration, &out.SelfManagedActiveDirectoryConfiguration + *out = new(SelfManagedActiveDirectoryConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryConfigurationParameters. +func (in *ActiveDirectoryConfigurationParameters) DeepCopy() *ActiveDirectoryConfigurationParameters { + if in == nil { + return nil + } + out := new(ActiveDirectoryConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditLogConfigurationInitParameters) DeepCopyInto(out *AuditLogConfigurationInitParameters) { + *out = *in + if in.AuditLogDestination != nil { + in, out := &in.AuditLogDestination, &out.AuditLogDestination + *out = new(string) + **out = **in + } + if in.FileAccessAuditLogLevel != nil { + in, out := &in.FileAccessAuditLogLevel, &out.FileAccessAuditLogLevel + *out = new(string) + **out = **in + } + if in.FileShareAccessAuditLogLevel != nil { + in, out := &in.FileShareAccessAuditLogLevel, &out.FileShareAccessAuditLogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditLogConfigurationInitParameters. +func (in *AuditLogConfigurationInitParameters) DeepCopy() *AuditLogConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AuditLogConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditLogConfigurationObservation) DeepCopyInto(out *AuditLogConfigurationObservation) { + *out = *in + if in.AuditLogDestination != nil { + in, out := &in.AuditLogDestination, &out.AuditLogDestination + *out = new(string) + **out = **in + } + if in.FileAccessAuditLogLevel != nil { + in, out := &in.FileAccessAuditLogLevel, &out.FileAccessAuditLogLevel + *out = new(string) + **out = **in + } + if in.FileShareAccessAuditLogLevel != nil { + in, out := &in.FileShareAccessAuditLogLevel, &out.FileShareAccessAuditLogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditLogConfigurationObservation. +func (in *AuditLogConfigurationObservation) DeepCopy() *AuditLogConfigurationObservation { + if in == nil { + return nil + } + out := new(AuditLogConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditLogConfigurationParameters) DeepCopyInto(out *AuditLogConfigurationParameters) { + *out = *in + if in.AuditLogDestination != nil { + in, out := &in.AuditLogDestination, &out.AuditLogDestination + *out = new(string) + **out = **in + } + if in.FileAccessAuditLogLevel != nil { + in, out := &in.FileAccessAuditLogLevel, &out.FileAccessAuditLogLevel + *out = new(string) + **out = **in + } + if in.FileShareAccessAuditLogLevel != nil { + in, out := &in.FileShareAccessAuditLogLevel, &out.FileShareAccessAuditLogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditLogConfigurationParameters. +func (in *AuditLogConfigurationParameters) DeepCopy() *AuditLogConfigurationParameters { + if in == nil { + return nil + } + out := new(AuditLogConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoExportPolicyInitParameters) DeepCopyInto(out *AutoExportPolicyInitParameters) { + *out = *in + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoExportPolicyInitParameters. +func (in *AutoExportPolicyInitParameters) DeepCopy() *AutoExportPolicyInitParameters { + if in == nil { + return nil + } + out := new(AutoExportPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoExportPolicyObservation) DeepCopyInto(out *AutoExportPolicyObservation) { + *out = *in + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoExportPolicyObservation. +func (in *AutoExportPolicyObservation) DeepCopy() *AutoExportPolicyObservation { + if in == nil { + return nil + } + out := new(AutoExportPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoExportPolicyParameters) DeepCopyInto(out *AutoExportPolicyParameters) { + *out = *in + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoExportPolicyParameters. +func (in *AutoExportPolicyParameters) DeepCopy() *AutoExportPolicyParameters { + if in == nil { + return nil + } + out := new(AutoExportPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoImportPolicyInitParameters) DeepCopyInto(out *AutoImportPolicyInitParameters) { + *out = *in + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoImportPolicyInitParameters. +func (in *AutoImportPolicyInitParameters) DeepCopy() *AutoImportPolicyInitParameters { + if in == nil { + return nil + } + out := new(AutoImportPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoImportPolicyObservation) DeepCopyInto(out *AutoImportPolicyObservation) { + *out = *in + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoImportPolicyObservation. +func (in *AutoImportPolicyObservation) DeepCopy() *AutoImportPolicyObservation { + if in == nil { + return nil + } + out := new(AutoImportPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoImportPolicyParameters) DeepCopyInto(out *AutoImportPolicyParameters) { + *out = *in + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoImportPolicyParameters. +func (in *AutoImportPolicyParameters) DeepCopy() *AutoImportPolicyParameters { + if in == nil { + return nil + } + out := new(AutoImportPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataRepositoryAssociation) DeepCopyInto(out *DataRepositoryAssociation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataRepositoryAssociation. +func (in *DataRepositoryAssociation) DeepCopy() *DataRepositoryAssociation { + if in == nil { + return nil + } + out := new(DataRepositoryAssociation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataRepositoryAssociation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataRepositoryAssociationInitParameters) DeepCopyInto(out *DataRepositoryAssociationInitParameters) { + *out = *in + if in.BatchImportMetaDataOnCreate != nil { + in, out := &in.BatchImportMetaDataOnCreate, &out.BatchImportMetaDataOnCreate + *out = new(bool) + **out = **in + } + if in.DataRepositoryPath != nil { + in, out := &in.DataRepositoryPath, &out.DataRepositoryPath + *out = new(string) + **out = **in + } + if in.DeleteDataInFilesystem != nil { + in, out := &in.DeleteDataInFilesystem, &out.DeleteDataInFilesystem + *out = new(bool) + **out = **in + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.FileSystemIDRef != nil { + in, out := &in.FileSystemIDRef, &out.FileSystemIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FileSystemIDSelector != nil { + in, out := &in.FileSystemIDSelector, &out.FileSystemIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FileSystemPath != nil { + in, out := &in.FileSystemPath, &out.FileSystemPath + *out = new(string) + **out = **in + } + if in.ImportedFileChunkSize != nil { + in, out := &in.ImportedFileChunkSize, &out.ImportedFileChunkSize + *out = new(float64) + **out = **in + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataRepositoryAssociationInitParameters. +func (in *DataRepositoryAssociationInitParameters) DeepCopy() *DataRepositoryAssociationInitParameters { + if in == nil { + return nil + } + out := new(DataRepositoryAssociationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataRepositoryAssociationList) DeepCopyInto(out *DataRepositoryAssociationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataRepositoryAssociation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataRepositoryAssociationList. +func (in *DataRepositoryAssociationList) DeepCopy() *DataRepositoryAssociationList { + if in == nil { + return nil + } + out := new(DataRepositoryAssociationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataRepositoryAssociationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataRepositoryAssociationObservation) DeepCopyInto(out *DataRepositoryAssociationObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AssociationID != nil { + in, out := &in.AssociationID, &out.AssociationID + *out = new(string) + **out = **in + } + if in.BatchImportMetaDataOnCreate != nil { + in, out := &in.BatchImportMetaDataOnCreate, &out.BatchImportMetaDataOnCreate + *out = new(bool) + **out = **in + } + if in.DataRepositoryPath != nil { + in, out := &in.DataRepositoryPath, &out.DataRepositoryPath + *out = new(string) + **out = **in + } + if in.DeleteDataInFilesystem != nil { + in, out := &in.DeleteDataInFilesystem, &out.DeleteDataInFilesystem + *out = new(bool) + **out = **in + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.FileSystemPath != nil { + in, out := &in.FileSystemPath, &out.FileSystemPath + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ImportedFileChunkSize != nil { + in, out := &in.ImportedFileChunkSize, &out.ImportedFileChunkSize + *out = new(float64) + **out = **in + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Observation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataRepositoryAssociationObservation. +func (in *DataRepositoryAssociationObservation) DeepCopy() *DataRepositoryAssociationObservation { + if in == nil { + return nil + } + out := new(DataRepositoryAssociationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataRepositoryAssociationParameters) DeepCopyInto(out *DataRepositoryAssociationParameters) { + *out = *in + if in.BatchImportMetaDataOnCreate != nil { + in, out := &in.BatchImportMetaDataOnCreate, &out.BatchImportMetaDataOnCreate + *out = new(bool) + **out = **in + } + if in.DataRepositoryPath != nil { + in, out := &in.DataRepositoryPath, &out.DataRepositoryPath + *out = new(string) + **out = **in + } + if in.DeleteDataInFilesystem != nil { + in, out := &in.DeleteDataInFilesystem, &out.DeleteDataInFilesystem + *out = new(bool) + **out = **in + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.FileSystemIDRef != nil { + in, out := &in.FileSystemIDRef, &out.FileSystemIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FileSystemIDSelector != nil { + in, out := &in.FileSystemIDSelector, &out.FileSystemIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FileSystemPath != nil { + in, out := &in.FileSystemPath, &out.FileSystemPath + *out = new(string) + **out = **in + } + if in.ImportedFileChunkSize != nil { + in, out := &in.ImportedFileChunkSize, &out.ImportedFileChunkSize + *out = new(float64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Parameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataRepositoryAssociationParameters. +func (in *DataRepositoryAssociationParameters) DeepCopy() *DataRepositoryAssociationParameters { + if in == nil { + return nil + } + out := new(DataRepositoryAssociationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataRepositoryAssociationSpec) DeepCopyInto(out *DataRepositoryAssociationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataRepositoryAssociationSpec. +func (in *DataRepositoryAssociationSpec) DeepCopy() *DataRepositoryAssociationSpec { + if in == nil { + return nil + } + out := new(DataRepositoryAssociationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataRepositoryAssociationStatus) DeepCopyInto(out *DataRepositoryAssociationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataRepositoryAssociationStatus. +func (in *DataRepositoryAssociationStatus) DeepCopy() *DataRepositoryAssociationStatus { + if in == nil { + return nil + } + out := new(DataRepositoryAssociationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskIopsConfigurationInitParameters) DeepCopyInto(out *DiskIopsConfigurationInitParameters) { + *out = *in + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIopsConfigurationInitParameters. +func (in *DiskIopsConfigurationInitParameters) DeepCopy() *DiskIopsConfigurationInitParameters { + if in == nil { + return nil + } + out := new(DiskIopsConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskIopsConfigurationObservation) DeepCopyInto(out *DiskIopsConfigurationObservation) { + *out = *in + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIopsConfigurationObservation. +func (in *DiskIopsConfigurationObservation) DeepCopy() *DiskIopsConfigurationObservation { + if in == nil { + return nil + } + out := new(DiskIopsConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskIopsConfigurationParameters) DeepCopyInto(out *DiskIopsConfigurationParameters) { + *out = *in + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIopsConfigurationParameters. +func (in *DiskIopsConfigurationParameters) DeepCopy() *DiskIopsConfigurationParameters { + if in == nil { + return nil + } + out := new(DiskIopsConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsInitParameters) DeepCopyInto(out *EndpointsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsInitParameters. +func (in *EndpointsInitParameters) DeepCopy() *EndpointsInitParameters { + if in == nil { + return nil + } + out := new(EndpointsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsManagementInitParameters) DeepCopyInto(out *EndpointsManagementInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsManagementInitParameters. +func (in *EndpointsManagementInitParameters) DeepCopy() *EndpointsManagementInitParameters { + if in == nil { + return nil + } + out := new(EndpointsManagementInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsManagementObservation) DeepCopyInto(out *EndpointsManagementObservation) { + *out = *in + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsManagementObservation. +func (in *EndpointsManagementObservation) DeepCopy() *EndpointsManagementObservation { + if in == nil { + return nil + } + out := new(EndpointsManagementObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsManagementParameters) DeepCopyInto(out *EndpointsManagementParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsManagementParameters. +func (in *EndpointsManagementParameters) DeepCopy() *EndpointsManagementParameters { + if in == nil { + return nil + } + out := new(EndpointsManagementParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsObservation) DeepCopyInto(out *EndpointsObservation) { + *out = *in + if in.Intercluster != nil { + in, out := &in.Intercluster, &out.Intercluster + *out = make([]InterclusterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Management != nil { + in, out := &in.Management, &out.Management + *out = make([]ManagementObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsObservation. +func (in *EndpointsObservation) DeepCopy() *EndpointsObservation { + if in == nil { + return nil + } + out := new(EndpointsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsParameters) DeepCopyInto(out *EndpointsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsParameters. +func (in *EndpointsParameters) DeepCopy() *EndpointsParameters { + if in == nil { + return nil + } + out := new(EndpointsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIInitParameters) DeepCopyInto(out *ISCSIInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIInitParameters. +func (in *ISCSIInitParameters) DeepCopy() *ISCSIInitParameters { + if in == nil { + return nil + } + out := new(ISCSIInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIObservation) DeepCopyInto(out *ISCSIObservation) { + *out = *in + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIObservation. +func (in *ISCSIObservation) DeepCopy() *ISCSIObservation { + if in == nil { + return nil + } + out := new(ISCSIObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIParameters) DeepCopyInto(out *ISCSIParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIParameters. +func (in *ISCSIParameters) DeepCopy() *ISCSIParameters { + if in == nil { + return nil + } + out := new(ISCSIParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InterclusterInitParameters) DeepCopyInto(out *InterclusterInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterclusterInitParameters. +func (in *InterclusterInitParameters) DeepCopy() *InterclusterInitParameters { + if in == nil { + return nil + } + out := new(InterclusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InterclusterObservation) DeepCopyInto(out *InterclusterObservation) { + *out = *in + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterclusterObservation. +func (in *InterclusterObservation) DeepCopy() *InterclusterObservation { + if in == nil { + return nil + } + out := new(InterclusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InterclusterParameters) DeepCopyInto(out *InterclusterParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterclusterParameters. +func (in *InterclusterParameters) DeepCopy() *InterclusterParameters { + if in == nil { + return nil + } + out := new(InterclusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfigurationInitParameters) DeepCopyInto(out *LogConfigurationInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfigurationInitParameters. +func (in *LogConfigurationInitParameters) DeepCopy() *LogConfigurationInitParameters { + if in == nil { + return nil + } + out := new(LogConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfigurationObservation) DeepCopyInto(out *LogConfigurationObservation) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfigurationObservation. +func (in *LogConfigurationObservation) DeepCopy() *LogConfigurationObservation { + if in == nil { + return nil + } + out := new(LogConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfigurationParameters) DeepCopyInto(out *LogConfigurationParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfigurationParameters. +func (in *LogConfigurationParameters) DeepCopy() *LogConfigurationParameters { + if in == nil { + return nil + } + out := new(LogConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LustreFileSystem) DeepCopyInto(out *LustreFileSystem) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LustreFileSystem. +func (in *LustreFileSystem) DeepCopy() *LustreFileSystem { + if in == nil { + return nil + } + out := new(LustreFileSystem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LustreFileSystem) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LustreFileSystemInitParameters) DeepCopyInto(out *LustreFileSystemInitParameters) { + *out = *in + if in.AutoImportPolicy != nil { + in, out := &in.AutoImportPolicy, &out.AutoImportPolicy + *out = new(string) + **out = **in + } + if in.AutomaticBackupRetentionDays != nil { + in, out := &in.AutomaticBackupRetentionDays, &out.AutomaticBackupRetentionDays + *out = new(float64) + **out = **in + } + if in.BackupID != nil { + in, out := &in.BackupID, &out.BackupID + *out = new(string) + **out = **in + } + if in.CopyTagsToBackups != nil { + in, out := &in.CopyTagsToBackups, &out.CopyTagsToBackups + *out = new(bool) + **out = **in + } + if in.DailyAutomaticBackupStartTime != nil { + in, out := &in.DailyAutomaticBackupStartTime, &out.DailyAutomaticBackupStartTime + *out = new(string) + **out = **in + } + if in.DataCompressionType != nil { + in, out := &in.DataCompressionType, &out.DataCompressionType + *out = new(string) + **out = **in + } + if in.DeploymentType != nil { + in, out := &in.DeploymentType, &out.DeploymentType + *out = new(string) + **out = **in + } + if in.DriveCacheType != nil { + in, out := &in.DriveCacheType, &out.DriveCacheType + *out = new(string) + **out = **in + } + if in.ExportPath != nil { + in, out := &in.ExportPath, &out.ExportPath + *out = new(string) + **out = **in + } + if in.FileSystemTypeVersion != nil { + in, out := &in.FileSystemTypeVersion, &out.FileSystemTypeVersion + *out = new(string) + **out = **in + } + if in.ImportPath != nil { + in, out := &in.ImportPath, &out.ImportPath + *out = new(string) + **out = **in + } + if in.ImportedFileChunkSize != nil { + in, out := &in.ImportedFileChunkSize, &out.ImportedFileChunkSize + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LogConfiguration != nil { + in, out := &in.LogConfiguration, &out.LogConfiguration + *out = new(LogConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PerUnitStorageThroughput != nil { + in, out := &in.PerUnitStorageThroughput, &out.PerUnitStorageThroughput + *out = new(float64) + **out = **in + } + if in.RootSquashConfiguration != nil { + in, out := &in.RootSquashConfiguration, &out.RootSquashConfiguration + *out = new(RootSquashConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StorageCapacity != nil { + in, out := &in.StorageCapacity, &out.StorageCapacity + *out = new(float64) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WeeklyMaintenanceStartTime != nil { + in, out := &in.WeeklyMaintenanceStartTime, &out.WeeklyMaintenanceStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LustreFileSystemInitParameters. +func (in *LustreFileSystemInitParameters) DeepCopy() *LustreFileSystemInitParameters { + if in == nil { + return nil + } + out := new(LustreFileSystemInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LustreFileSystemList) DeepCopyInto(out *LustreFileSystemList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LustreFileSystem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LustreFileSystemList. +func (in *LustreFileSystemList) DeepCopy() *LustreFileSystemList { + if in == nil { + return nil + } + out := new(LustreFileSystemList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LustreFileSystemList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LustreFileSystemObservation) DeepCopyInto(out *LustreFileSystemObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoImportPolicy != nil { + in, out := &in.AutoImportPolicy, &out.AutoImportPolicy + *out = new(string) + **out = **in + } + if in.AutomaticBackupRetentionDays != nil { + in, out := &in.AutomaticBackupRetentionDays, &out.AutomaticBackupRetentionDays + *out = new(float64) + **out = **in + } + if in.BackupID != nil { + in, out := &in.BackupID, &out.BackupID + *out = new(string) + **out = **in + } + if in.CopyTagsToBackups != nil { + in, out := &in.CopyTagsToBackups, &out.CopyTagsToBackups + *out = new(bool) + **out = **in + } + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.DailyAutomaticBackupStartTime != nil { + in, out := &in.DailyAutomaticBackupStartTime, &out.DailyAutomaticBackupStartTime + *out = new(string) + **out = **in + } + if in.DataCompressionType != nil { + in, out := &in.DataCompressionType, &out.DataCompressionType + *out = new(string) + **out = **in + } + if in.DeploymentType != nil { + in, out := &in.DeploymentType, &out.DeploymentType + *out = new(string) + **out = **in + } + if in.DriveCacheType != nil { + in, out := &in.DriveCacheType, &out.DriveCacheType + *out = new(string) + **out = **in + } + if in.ExportPath != nil { + in, out := &in.ExportPath, &out.ExportPath + *out = new(string) + **out = **in + } + if in.FileSystemTypeVersion != nil { + in, out := &in.FileSystemTypeVersion, &out.FileSystemTypeVersion + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ImportPath != nil { + in, out := &in.ImportPath, &out.ImportPath + *out = new(string) + **out = **in + } + if in.ImportedFileChunkSize != nil { + in, out := &in.ImportedFileChunkSize, &out.ImportedFileChunkSize + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.LogConfiguration != nil { + in, out := &in.LogConfiguration, &out.LogConfiguration + *out = new(LogConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.MountName != nil { + in, out := &in.MountName, &out.MountName + *out = new(string) + **out = **in + } + if in.NetworkInterfaceIds != nil { + in, out := &in.NetworkInterfaceIds, &out.NetworkInterfaceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OwnerID != nil { + in, out := &in.OwnerID, &out.OwnerID + *out = new(string) + **out = **in + } + if in.PerUnitStorageThroughput != nil { + in, out := &in.PerUnitStorageThroughput, &out.PerUnitStorageThroughput + *out = new(float64) + **out = **in + } + if in.RootSquashConfiguration != nil { + in, out := &in.RootSquashConfiguration, &out.RootSquashConfiguration + *out = new(RootSquashConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StorageCapacity != nil { + in, out := &in.StorageCapacity, &out.StorageCapacity + *out = new(float64) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.WeeklyMaintenanceStartTime != nil { + in, out := &in.WeeklyMaintenanceStartTime, &out.WeeklyMaintenanceStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LustreFileSystemObservation. +func (in *LustreFileSystemObservation) DeepCopy() *LustreFileSystemObservation { + if in == nil { + return nil + } + out := new(LustreFileSystemObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LustreFileSystemParameters) DeepCopyInto(out *LustreFileSystemParameters) { + *out = *in + if in.AutoImportPolicy != nil { + in, out := &in.AutoImportPolicy, &out.AutoImportPolicy + *out = new(string) + **out = **in + } + if in.AutomaticBackupRetentionDays != nil { + in, out := &in.AutomaticBackupRetentionDays, &out.AutomaticBackupRetentionDays + *out = new(float64) + **out = **in + } + if in.BackupID != nil { + in, out := &in.BackupID, &out.BackupID + *out = new(string) + **out = **in + } + if in.CopyTagsToBackups != nil { + in, out := &in.CopyTagsToBackups, &out.CopyTagsToBackups + *out = new(bool) + **out = **in + } + if in.DailyAutomaticBackupStartTime != nil { + in, out := &in.DailyAutomaticBackupStartTime, &out.DailyAutomaticBackupStartTime + *out = new(string) + **out = **in + } + if in.DataCompressionType != nil { + in, out := &in.DataCompressionType, &out.DataCompressionType + *out = new(string) + **out = **in + } + if in.DeploymentType != nil { + in, out := &in.DeploymentType, &out.DeploymentType + *out = new(string) + **out = **in + } + if in.DriveCacheType != nil { + in, out := &in.DriveCacheType, &out.DriveCacheType + *out = new(string) + **out = **in + } + if in.ExportPath != nil { + in, out := &in.ExportPath, &out.ExportPath + *out = new(string) + **out = **in + } + if in.FileSystemTypeVersion != nil { + in, out := &in.FileSystemTypeVersion, &out.FileSystemTypeVersion + *out = new(string) + **out = **in + } + if in.ImportPath != nil { + in, out := &in.ImportPath, &out.ImportPath + *out = new(string) + **out = **in + } + if in.ImportedFileChunkSize != nil { + in, out := &in.ImportedFileChunkSize, &out.ImportedFileChunkSize + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LogConfiguration != nil { + in, out := &in.LogConfiguration, &out.LogConfiguration + *out = new(LogConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.PerUnitStorageThroughput != nil { + in, out := &in.PerUnitStorageThroughput, &out.PerUnitStorageThroughput + *out = new(float64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RootSquashConfiguration != nil { + in, out := &in.RootSquashConfiguration, &out.RootSquashConfiguration + *out = new(RootSquashConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StorageCapacity != nil { + in, out := &in.StorageCapacity, &out.StorageCapacity + *out = new(float64) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WeeklyMaintenanceStartTime != nil { + in, out := &in.WeeklyMaintenanceStartTime, &out.WeeklyMaintenanceStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LustreFileSystemParameters. +func (in *LustreFileSystemParameters) DeepCopy() *LustreFileSystemParameters { + if in == nil { + return nil + } + out := new(LustreFileSystemParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LustreFileSystemSpec) DeepCopyInto(out *LustreFileSystemSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LustreFileSystemSpec. +func (in *LustreFileSystemSpec) DeepCopy() *LustreFileSystemSpec { + if in == nil { + return nil + } + out := new(LustreFileSystemSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LustreFileSystemStatus) DeepCopyInto(out *LustreFileSystemStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LustreFileSystemStatus. +func (in *LustreFileSystemStatus) DeepCopy() *LustreFileSystemStatus { + if in == nil { + return nil + } + out := new(LustreFileSystemStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementInitParameters) DeepCopyInto(out *ManagementInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementInitParameters. +func (in *ManagementInitParameters) DeepCopy() *ManagementInitParameters { + if in == nil { + return nil + } + out := new(ManagementInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementObservation) DeepCopyInto(out *ManagementObservation) { + *out = *in + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementObservation. +func (in *ManagementObservation) DeepCopy() *ManagementObservation { + if in == nil { + return nil + } + out := new(ManagementObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementParameters) DeepCopyInto(out *ManagementParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementParameters. +func (in *ManagementParameters) DeepCopy() *ManagementParameters { + if in == nil { + return nil + } + out := new(ManagementParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NFSInitParameters) DeepCopyInto(out *NFSInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSInitParameters. +func (in *NFSInitParameters) DeepCopy() *NFSInitParameters { + if in == nil { + return nil + } + out := new(NFSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NFSObservation) DeepCopyInto(out *NFSObservation) { + *out = *in + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSObservation. +func (in *NFSObservation) DeepCopy() *NFSObservation { + if in == nil { + return nil + } + out := new(NFSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NFSParameters) DeepCopyInto(out *NFSParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSParameters. +func (in *NFSParameters) DeepCopy() *NFSParameters { + if in == nil { + return nil + } + out := new(NFSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OntapFileSystem) DeepCopyInto(out *OntapFileSystem) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OntapFileSystem. +func (in *OntapFileSystem) DeepCopy() *OntapFileSystem { + if in == nil { + return nil + } + out := new(OntapFileSystem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OntapFileSystem) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OntapFileSystemInitParameters) DeepCopyInto(out *OntapFileSystemInitParameters) { + *out = *in + if in.AutomaticBackupRetentionDays != nil { + in, out := &in.AutomaticBackupRetentionDays, &out.AutomaticBackupRetentionDays + *out = new(float64) + **out = **in + } + if in.DailyAutomaticBackupStartTime != nil { + in, out := &in.DailyAutomaticBackupStartTime, &out.DailyAutomaticBackupStartTime + *out = new(string) + **out = **in + } + if in.DeploymentType != nil { + in, out := &in.DeploymentType, &out.DeploymentType + *out = new(string) + **out = **in + } + if in.DiskIopsConfiguration != nil { + in, out := &in.DiskIopsConfiguration, &out.DiskIopsConfiguration + *out = new(DiskIopsConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EndpointIPAddressRange != nil { + in, out := &in.EndpointIPAddressRange, &out.EndpointIPAddressRange + *out = new(string) + **out = **in + } + if in.FSXAdminPasswordSecretRef != nil { + in, out := &in.FSXAdminPasswordSecretRef, &out.FSXAdminPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.HaPairs != nil { + in, out := &in.HaPairs, &out.HaPairs + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PreferredSubnetID != nil { + in, out := &in.PreferredSubnetID, &out.PreferredSubnetID + *out = new(string) + **out = **in + } + if in.PreferredSubnetIDRef != nil { + in, out := &in.PreferredSubnetIDRef, &out.PreferredSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PreferredSubnetIDSelector != nil { + in, out := &in.PreferredSubnetIDSelector, &out.PreferredSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RouteTableIds != nil { + in, out := &in.RouteTableIds, &out.RouteTableIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StorageCapacity != nil { + in, out := &in.StorageCapacity, &out.StorageCapacity + *out = new(float64) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThroughputCapacity != nil { + in, out := &in.ThroughputCapacity, &out.ThroughputCapacity + *out = new(float64) + **out = **in + } + if in.ThroughputCapacityPerHaPair != nil { + in, out := &in.ThroughputCapacityPerHaPair, &out.ThroughputCapacityPerHaPair + *out = new(float64) + **out = **in + } + if in.WeeklyMaintenanceStartTime != nil { + in, out := &in.WeeklyMaintenanceStartTime, &out.WeeklyMaintenanceStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OntapFileSystemInitParameters. +func (in *OntapFileSystemInitParameters) DeepCopy() *OntapFileSystemInitParameters { + if in == nil { + return nil + } + out := new(OntapFileSystemInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OntapFileSystemList) DeepCopyInto(out *OntapFileSystemList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OntapFileSystem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OntapFileSystemList. +func (in *OntapFileSystemList) DeepCopy() *OntapFileSystemList { + if in == nil { + return nil + } + out := new(OntapFileSystemList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OntapFileSystemList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OntapFileSystemObservation) DeepCopyInto(out *OntapFileSystemObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutomaticBackupRetentionDays != nil { + in, out := &in.AutomaticBackupRetentionDays, &out.AutomaticBackupRetentionDays + *out = new(float64) + **out = **in + } + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.DailyAutomaticBackupStartTime != nil { + in, out := &in.DailyAutomaticBackupStartTime, &out.DailyAutomaticBackupStartTime + *out = new(string) + **out = **in + } + if in.DeploymentType != nil { + in, out := &in.DeploymentType, &out.DeploymentType + *out = new(string) + **out = **in + } + if in.DiskIopsConfiguration != nil { + in, out := &in.DiskIopsConfiguration, &out.DiskIopsConfiguration + *out = new(DiskIopsConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.EndpointIPAddressRange != nil { + in, out := &in.EndpointIPAddressRange, &out.EndpointIPAddressRange + *out = new(string) + **out = **in + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]EndpointsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HaPairs != nil { + in, out := &in.HaPairs, &out.HaPairs + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.NetworkInterfaceIds != nil { + in, out := &in.NetworkInterfaceIds, &out.NetworkInterfaceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OwnerID != nil { + in, out := &in.OwnerID, &out.OwnerID + *out = new(string) + **out = **in + } + if in.PreferredSubnetID != nil { + in, out := &in.PreferredSubnetID, &out.PreferredSubnetID + *out = new(string) + **out = **in + } + if in.RouteTableIds != nil { + in, out := &in.RouteTableIds, &out.RouteTableIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StorageCapacity != nil { + in, out := &in.StorageCapacity, &out.StorageCapacity + *out = new(float64) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThroughputCapacity != nil { + in, out := &in.ThroughputCapacity, &out.ThroughputCapacity + *out = new(float64) + **out = **in + } + if in.ThroughputCapacityPerHaPair != nil { + in, out := &in.ThroughputCapacityPerHaPair, &out.ThroughputCapacityPerHaPair + *out = new(float64) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.WeeklyMaintenanceStartTime != nil { + in, out := &in.WeeklyMaintenanceStartTime, &out.WeeklyMaintenanceStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OntapFileSystemObservation. +func (in *OntapFileSystemObservation) DeepCopy() *OntapFileSystemObservation { + if in == nil { + return nil + } + out := new(OntapFileSystemObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OntapFileSystemParameters) DeepCopyInto(out *OntapFileSystemParameters) { + *out = *in + if in.AutomaticBackupRetentionDays != nil { + in, out := &in.AutomaticBackupRetentionDays, &out.AutomaticBackupRetentionDays + *out = new(float64) + **out = **in + } + if in.DailyAutomaticBackupStartTime != nil { + in, out := &in.DailyAutomaticBackupStartTime, &out.DailyAutomaticBackupStartTime + *out = new(string) + **out = **in + } + if in.DeploymentType != nil { + in, out := &in.DeploymentType, &out.DeploymentType + *out = new(string) + **out = **in + } + if in.DiskIopsConfiguration != nil { + in, out := &in.DiskIopsConfiguration, &out.DiskIopsConfiguration + *out = new(DiskIopsConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.EndpointIPAddressRange != nil { + in, out := &in.EndpointIPAddressRange, &out.EndpointIPAddressRange + *out = new(string) + **out = **in + } + if in.FSXAdminPasswordSecretRef != nil { + in, out := &in.FSXAdminPasswordSecretRef, &out.FSXAdminPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.HaPairs != nil { + in, out := &in.HaPairs, &out.HaPairs + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PreferredSubnetID != nil { + in, out := &in.PreferredSubnetID, &out.PreferredSubnetID + *out = new(string) + **out = **in + } + if in.PreferredSubnetIDRef != nil { + in, out := &in.PreferredSubnetIDRef, &out.PreferredSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PreferredSubnetIDSelector != nil { + in, out := &in.PreferredSubnetIDSelector, &out.PreferredSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RouteTableIds != nil { + in, out := &in.RouteTableIds, &out.RouteTableIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StorageCapacity != nil { + in, out := &in.StorageCapacity, &out.StorageCapacity + *out = new(float64) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThroughputCapacity != nil { + in, out := &in.ThroughputCapacity, &out.ThroughputCapacity + *out = new(float64) + **out = **in + } + if in.ThroughputCapacityPerHaPair != nil { + in, out := &in.ThroughputCapacityPerHaPair, &out.ThroughputCapacityPerHaPair + *out = new(float64) + **out = **in + } + if in.WeeklyMaintenanceStartTime != nil { + in, out := &in.WeeklyMaintenanceStartTime, &out.WeeklyMaintenanceStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OntapFileSystemParameters. +func (in *OntapFileSystemParameters) DeepCopy() *OntapFileSystemParameters { + if in == nil { + return nil + } + out := new(OntapFileSystemParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OntapFileSystemSpec) DeepCopyInto(out *OntapFileSystemSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OntapFileSystemSpec. +func (in *OntapFileSystemSpec) DeepCopy() *OntapFileSystemSpec { + if in == nil { + return nil + } + out := new(OntapFileSystemSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OntapFileSystemStatus) DeepCopyInto(out *OntapFileSystemStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OntapFileSystemStatus. +func (in *OntapFileSystemStatus) DeepCopy() *OntapFileSystemStatus { + if in == nil { + return nil + } + out := new(OntapFileSystemStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OntapStorageVirtualMachine) DeepCopyInto(out *OntapStorageVirtualMachine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OntapStorageVirtualMachine. +func (in *OntapStorageVirtualMachine) DeepCopy() *OntapStorageVirtualMachine { + if in == nil { + return nil + } + out := new(OntapStorageVirtualMachine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OntapStorageVirtualMachine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OntapStorageVirtualMachineEndpointsInitParameters) DeepCopyInto(out *OntapStorageVirtualMachineEndpointsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OntapStorageVirtualMachineEndpointsInitParameters. +func (in *OntapStorageVirtualMachineEndpointsInitParameters) DeepCopy() *OntapStorageVirtualMachineEndpointsInitParameters { + if in == nil { + return nil + } + out := new(OntapStorageVirtualMachineEndpointsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OntapStorageVirtualMachineEndpointsObservation) DeepCopyInto(out *OntapStorageVirtualMachineEndpointsObservation) { + *out = *in + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = make([]ISCSIObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Management != nil { + in, out := &in.Management, &out.Management + *out = make([]EndpointsManagementObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = make([]NFSObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SMB != nil { + in, out := &in.SMB, &out.SMB + *out = make([]SMBObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OntapStorageVirtualMachineEndpointsObservation. +func (in *OntapStorageVirtualMachineEndpointsObservation) DeepCopy() *OntapStorageVirtualMachineEndpointsObservation { + if in == nil { + return nil + } + out := new(OntapStorageVirtualMachineEndpointsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OntapStorageVirtualMachineEndpointsParameters) DeepCopyInto(out *OntapStorageVirtualMachineEndpointsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OntapStorageVirtualMachineEndpointsParameters. +func (in *OntapStorageVirtualMachineEndpointsParameters) DeepCopy() *OntapStorageVirtualMachineEndpointsParameters { + if in == nil { + return nil + } + out := new(OntapStorageVirtualMachineEndpointsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OntapStorageVirtualMachineInitParameters) DeepCopyInto(out *OntapStorageVirtualMachineInitParameters) { + *out = *in + if in.ActiveDirectoryConfiguration != nil { + in, out := &in.ActiveDirectoryConfiguration, &out.ActiveDirectoryConfiguration + *out = new(ActiveDirectoryConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.FileSystemIDRef != nil { + in, out := &in.FileSystemIDRef, &out.FileSystemIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FileSystemIDSelector != nil { + in, out := &in.FileSystemIDSelector, &out.FileSystemIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RootVolumeSecurityStyle != nil { + in, out := &in.RootVolumeSecurityStyle, &out.RootVolumeSecurityStyle + *out = new(string) + **out = **in + } + if in.SvmAdminPasswordSecretRef != nil { + in, out := &in.SvmAdminPasswordSecretRef, &out.SvmAdminPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OntapStorageVirtualMachineInitParameters. +func (in *OntapStorageVirtualMachineInitParameters) DeepCopy() *OntapStorageVirtualMachineInitParameters { + if in == nil { + return nil + } + out := new(OntapStorageVirtualMachineInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OntapStorageVirtualMachineList) DeepCopyInto(out *OntapStorageVirtualMachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OntapStorageVirtualMachine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OntapStorageVirtualMachineList. +func (in *OntapStorageVirtualMachineList) DeepCopy() *OntapStorageVirtualMachineList { + if in == nil { + return nil + } + out := new(OntapStorageVirtualMachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OntapStorageVirtualMachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OntapStorageVirtualMachineObservation) DeepCopyInto(out *OntapStorageVirtualMachineObservation) { + *out = *in + if in.ActiveDirectoryConfiguration != nil { + in, out := &in.ActiveDirectoryConfiguration, &out.ActiveDirectoryConfiguration + *out = new(ActiveDirectoryConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]OntapStorageVirtualMachineEndpointsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RootVolumeSecurityStyle != nil { + in, out := &in.RootVolumeSecurityStyle, &out.RootVolumeSecurityStyle + *out = new(string) + **out = **in + } + if in.Subtype != nil { + in, out := &in.Subtype, &out.Subtype + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UUID != nil { + in, out := &in.UUID, &out.UUID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OntapStorageVirtualMachineObservation. +func (in *OntapStorageVirtualMachineObservation) DeepCopy() *OntapStorageVirtualMachineObservation { + if in == nil { + return nil + } + out := new(OntapStorageVirtualMachineObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OntapStorageVirtualMachineParameters) DeepCopyInto(out *OntapStorageVirtualMachineParameters) { + *out = *in + if in.ActiveDirectoryConfiguration != nil { + in, out := &in.ActiveDirectoryConfiguration, &out.ActiveDirectoryConfiguration + *out = new(ActiveDirectoryConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.FileSystemIDRef != nil { + in, out := &in.FileSystemIDRef, &out.FileSystemIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FileSystemIDSelector != nil { + in, out := &in.FileSystemIDSelector, &out.FileSystemIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RootVolumeSecurityStyle != nil { + in, out := &in.RootVolumeSecurityStyle, &out.RootVolumeSecurityStyle + *out = new(string) + **out = **in + } + if in.SvmAdminPasswordSecretRef != nil { + in, out := &in.SvmAdminPasswordSecretRef, &out.SvmAdminPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OntapStorageVirtualMachineParameters. +func (in *OntapStorageVirtualMachineParameters) DeepCopy() *OntapStorageVirtualMachineParameters { + if in == nil { + return nil + } + out := new(OntapStorageVirtualMachineParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OntapStorageVirtualMachineSpec) DeepCopyInto(out *OntapStorageVirtualMachineSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OntapStorageVirtualMachineSpec. +func (in *OntapStorageVirtualMachineSpec) DeepCopy() *OntapStorageVirtualMachineSpec { + if in == nil { + return nil + } + out := new(OntapStorageVirtualMachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OntapStorageVirtualMachineStatus) DeepCopyInto(out *OntapStorageVirtualMachineStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OntapStorageVirtualMachineStatus. +func (in *OntapStorageVirtualMachineStatus) DeepCopy() *OntapStorageVirtualMachineStatus { + if in == nil { + return nil + } + out := new(OntapStorageVirtualMachineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootSquashConfigurationInitParameters) DeepCopyInto(out *RootSquashConfigurationInitParameters) { + *out = *in + if in.NoSquashNids != nil { + in, out := &in.NoSquashNids, &out.NoSquashNids + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RootSquash != nil { + in, out := &in.RootSquash, &out.RootSquash + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootSquashConfigurationInitParameters. +func (in *RootSquashConfigurationInitParameters) DeepCopy() *RootSquashConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RootSquashConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootSquashConfigurationObservation) DeepCopyInto(out *RootSquashConfigurationObservation) { + *out = *in + if in.NoSquashNids != nil { + in, out := &in.NoSquashNids, &out.NoSquashNids + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RootSquash != nil { + in, out := &in.RootSquash, &out.RootSquash + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootSquashConfigurationObservation. +func (in *RootSquashConfigurationObservation) DeepCopy() *RootSquashConfigurationObservation { + if in == nil { + return nil + } + out := new(RootSquashConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootSquashConfigurationParameters) DeepCopyInto(out *RootSquashConfigurationParameters) { + *out = *in + if in.NoSquashNids != nil { + in, out := &in.NoSquashNids, &out.NoSquashNids + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RootSquash != nil { + in, out := &in.RootSquash, &out.RootSquash + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootSquashConfigurationParameters. +func (in *RootSquashConfigurationParameters) DeepCopy() *RootSquashConfigurationParameters { + if in == nil { + return nil + } + out := new(RootSquashConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InitParameters) DeepCopyInto(out *S3InitParameters) { + *out = *in + if in.AutoExportPolicy != nil { + in, out := &in.AutoExportPolicy, &out.AutoExportPolicy + *out = new(AutoExportPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoImportPolicy != nil { + in, out := &in.AutoImportPolicy, &out.AutoImportPolicy + *out = new(AutoImportPolicyInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InitParameters. +func (in *S3InitParameters) DeepCopy() *S3InitParameters { + if in == nil { + return nil + } + out := new(S3InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Observation) DeepCopyInto(out *S3Observation) { + *out = *in + if in.AutoExportPolicy != nil { + in, out := &in.AutoExportPolicy, &out.AutoExportPolicy + *out = new(AutoExportPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoImportPolicy != nil { + in, out := &in.AutoImportPolicy, &out.AutoImportPolicy + *out = new(AutoImportPolicyObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Observation. +func (in *S3Observation) DeepCopy() *S3Observation { + if in == nil { + return nil + } + out := new(S3Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Parameters) DeepCopyInto(out *S3Parameters) { + *out = *in + if in.AutoExportPolicy != nil { + in, out := &in.AutoExportPolicy, &out.AutoExportPolicy + *out = new(AutoExportPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoImportPolicy != nil { + in, out := &in.AutoImportPolicy, &out.AutoImportPolicy + *out = new(AutoImportPolicyParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Parameters. +func (in *S3Parameters) DeepCopy() *S3Parameters { + if in == nil { + return nil + } + out := new(S3Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SMBInitParameters) DeepCopyInto(out *SMBInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMBInitParameters. +func (in *SMBInitParameters) DeepCopy() *SMBInitParameters { + if in == nil { + return nil + } + out := new(SMBInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SMBObservation) DeepCopyInto(out *SMBObservation) { + *out = *in + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMBObservation. +func (in *SMBObservation) DeepCopy() *SMBObservation { + if in == nil { + return nil + } + out := new(SMBObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SMBParameters) DeepCopyInto(out *SMBParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMBParameters. +func (in *SMBParameters) DeepCopy() *SMBParameters { + if in == nil { + return nil + } + out := new(SMBParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedActiveDirectoryConfigurationInitParameters) DeepCopyInto(out *SelfManagedActiveDirectoryConfigurationInitParameters) { + *out = *in + if in.DNSIps != nil { + in, out := &in.DNSIps, &out.DNSIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.FileSystemAdministratorsGroup != nil { + in, out := &in.FileSystemAdministratorsGroup, &out.FileSystemAdministratorsGroup + *out = new(string) + **out = **in + } + if in.OrganizationalUnitDistinguishedName != nil { + in, out := &in.OrganizationalUnitDistinguishedName, &out.OrganizationalUnitDistinguishedName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedActiveDirectoryConfigurationInitParameters. +func (in *SelfManagedActiveDirectoryConfigurationInitParameters) DeepCopy() *SelfManagedActiveDirectoryConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SelfManagedActiveDirectoryConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedActiveDirectoryConfigurationObservation) DeepCopyInto(out *SelfManagedActiveDirectoryConfigurationObservation) { + *out = *in + if in.DNSIps != nil { + in, out := &in.DNSIps, &out.DNSIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.FileSystemAdministratorsGroup != nil { + in, out := &in.FileSystemAdministratorsGroup, &out.FileSystemAdministratorsGroup + *out = new(string) + **out = **in + } + if in.OrganizationalUnitDistinguishedName != nil { + in, out := &in.OrganizationalUnitDistinguishedName, &out.OrganizationalUnitDistinguishedName + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedActiveDirectoryConfigurationObservation. +func (in *SelfManagedActiveDirectoryConfigurationObservation) DeepCopy() *SelfManagedActiveDirectoryConfigurationObservation { + if in == nil { + return nil + } + out := new(SelfManagedActiveDirectoryConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedActiveDirectoryConfigurationParameters) DeepCopyInto(out *SelfManagedActiveDirectoryConfigurationParameters) { + *out = *in + if in.DNSIps != nil { + in, out := &in.DNSIps, &out.DNSIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.FileSystemAdministratorsGroup != nil { + in, out := &in.FileSystemAdministratorsGroup, &out.FileSystemAdministratorsGroup + *out = new(string) + **out = **in + } + if in.OrganizationalUnitDistinguishedName != nil { + in, out := &in.OrganizationalUnitDistinguishedName, &out.OrganizationalUnitDistinguishedName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedActiveDirectoryConfigurationParameters. +func (in *SelfManagedActiveDirectoryConfigurationParameters) DeepCopy() *SelfManagedActiveDirectoryConfigurationParameters { + if in == nil { + return nil + } + out := new(SelfManagedActiveDirectoryConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedActiveDirectoryInitParameters) DeepCopyInto(out *SelfManagedActiveDirectoryInitParameters) { + *out = *in + if in.DNSIps != nil { + in, out := &in.DNSIps, &out.DNSIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.FileSystemAdministratorsGroup != nil { + in, out := &in.FileSystemAdministratorsGroup, &out.FileSystemAdministratorsGroup + *out = new(string) + **out = **in + } + if in.OrganizationalUnitDistinguishedName != nil { + in, out := &in.OrganizationalUnitDistinguishedName, &out.OrganizationalUnitDistinguishedName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedActiveDirectoryInitParameters. +func (in *SelfManagedActiveDirectoryInitParameters) DeepCopy() *SelfManagedActiveDirectoryInitParameters { + if in == nil { + return nil + } + out := new(SelfManagedActiveDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedActiveDirectoryObservation) DeepCopyInto(out *SelfManagedActiveDirectoryObservation) { + *out = *in + if in.DNSIps != nil { + in, out := &in.DNSIps, &out.DNSIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.FileSystemAdministratorsGroup != nil { + in, out := &in.FileSystemAdministratorsGroup, &out.FileSystemAdministratorsGroup + *out = new(string) + **out = **in + } + if in.OrganizationalUnitDistinguishedName != nil { + in, out := &in.OrganizationalUnitDistinguishedName, &out.OrganizationalUnitDistinguishedName + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedActiveDirectoryObservation. +func (in *SelfManagedActiveDirectoryObservation) DeepCopy() *SelfManagedActiveDirectoryObservation { + if in == nil { + return nil + } + out := new(SelfManagedActiveDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedActiveDirectoryParameters) DeepCopyInto(out *SelfManagedActiveDirectoryParameters) { + *out = *in + if in.DNSIps != nil { + in, out := &in.DNSIps, &out.DNSIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.FileSystemAdministratorsGroup != nil { + in, out := &in.FileSystemAdministratorsGroup, &out.FileSystemAdministratorsGroup + *out = new(string) + **out = **in + } + if in.OrganizationalUnitDistinguishedName != nil { + in, out := &in.OrganizationalUnitDistinguishedName, &out.OrganizationalUnitDistinguishedName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedActiveDirectoryParameters. +func (in *SelfManagedActiveDirectoryParameters) DeepCopy() *SelfManagedActiveDirectoryParameters { + if in == nil { + return nil + } + out := new(SelfManagedActiveDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFileSystem) DeepCopyInto(out *WindowsFileSystem) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFileSystem. +func (in *WindowsFileSystem) DeepCopy() *WindowsFileSystem { + if in == nil { + return nil + } + out := new(WindowsFileSystem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WindowsFileSystem) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFileSystemDiskIopsConfigurationInitParameters) DeepCopyInto(out *WindowsFileSystemDiskIopsConfigurationInitParameters) { + *out = *in + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFileSystemDiskIopsConfigurationInitParameters. +func (in *WindowsFileSystemDiskIopsConfigurationInitParameters) DeepCopy() *WindowsFileSystemDiskIopsConfigurationInitParameters { + if in == nil { + return nil + } + out := new(WindowsFileSystemDiskIopsConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFileSystemDiskIopsConfigurationObservation) DeepCopyInto(out *WindowsFileSystemDiskIopsConfigurationObservation) { + *out = *in + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFileSystemDiskIopsConfigurationObservation. +func (in *WindowsFileSystemDiskIopsConfigurationObservation) DeepCopy() *WindowsFileSystemDiskIopsConfigurationObservation { + if in == nil { + return nil + } + out := new(WindowsFileSystemDiskIopsConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFileSystemDiskIopsConfigurationParameters) DeepCopyInto(out *WindowsFileSystemDiskIopsConfigurationParameters) { + *out = *in + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFileSystemDiskIopsConfigurationParameters. +func (in *WindowsFileSystemDiskIopsConfigurationParameters) DeepCopy() *WindowsFileSystemDiskIopsConfigurationParameters { + if in == nil { + return nil + } + out := new(WindowsFileSystemDiskIopsConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFileSystemInitParameters) DeepCopyInto(out *WindowsFileSystemInitParameters) { + *out = *in + if in.ActiveDirectoryID != nil { + in, out := &in.ActiveDirectoryID, &out.ActiveDirectoryID + *out = new(string) + **out = **in + } + if in.ActiveDirectoryIDRef != nil { + in, out := &in.ActiveDirectoryIDRef, &out.ActiveDirectoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ActiveDirectoryIDSelector != nil { + in, out := &in.ActiveDirectoryIDSelector, &out.ActiveDirectoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Aliases != nil { + in, out := &in.Aliases, &out.Aliases + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AuditLogConfiguration != nil { + in, out := &in.AuditLogConfiguration, &out.AuditLogConfiguration + *out = new(AuditLogConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutomaticBackupRetentionDays != nil { + in, out := &in.AutomaticBackupRetentionDays, &out.AutomaticBackupRetentionDays + *out = new(float64) + **out = **in + } + if in.BackupID != nil { + in, out := &in.BackupID, &out.BackupID + *out = new(string) + **out = **in + } + if in.CopyTagsToBackups != nil { + in, out := &in.CopyTagsToBackups, &out.CopyTagsToBackups + *out = new(bool) + **out = **in + } + if in.DailyAutomaticBackupStartTime != nil { + in, out := &in.DailyAutomaticBackupStartTime, &out.DailyAutomaticBackupStartTime + *out = new(string) + **out = **in + } + if in.DeploymentType != nil { + in, out := &in.DeploymentType, &out.DeploymentType + *out = new(string) + **out = **in + } + if in.DiskIopsConfiguration != nil { + in, out := &in.DiskIopsConfiguration, &out.DiskIopsConfiguration + *out = new(WindowsFileSystemDiskIopsConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PreferredSubnetID != nil { + in, out := &in.PreferredSubnetID, &out.PreferredSubnetID + *out = new(string) + **out = **in + } + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SelfManagedActiveDirectory != nil { + in, out := &in.SelfManagedActiveDirectory, &out.SelfManagedActiveDirectory + *out = new(SelfManagedActiveDirectoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SkipFinalBackup != nil { + in, out := &in.SkipFinalBackup, &out.SkipFinalBackup + *out = new(bool) + **out = **in + } + if in.StorageCapacity != nil { + in, out := &in.StorageCapacity, &out.StorageCapacity + *out = new(float64) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThroughputCapacity != nil { + in, out := &in.ThroughputCapacity, &out.ThroughputCapacity + *out = new(float64) + **out = **in + } + if in.WeeklyMaintenanceStartTime != nil { + in, out := &in.WeeklyMaintenanceStartTime, &out.WeeklyMaintenanceStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFileSystemInitParameters. +func (in *WindowsFileSystemInitParameters) DeepCopy() *WindowsFileSystemInitParameters { + if in == nil { + return nil + } + out := new(WindowsFileSystemInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFileSystemList) DeepCopyInto(out *WindowsFileSystemList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WindowsFileSystem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFileSystemList. +func (in *WindowsFileSystemList) DeepCopy() *WindowsFileSystemList { + if in == nil { + return nil + } + out := new(WindowsFileSystemList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WindowsFileSystemList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFileSystemObservation) DeepCopyInto(out *WindowsFileSystemObservation) { + *out = *in + if in.ActiveDirectoryID != nil { + in, out := &in.ActiveDirectoryID, &out.ActiveDirectoryID + *out = new(string) + **out = **in + } + if in.Aliases != nil { + in, out := &in.Aliases, &out.Aliases + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AuditLogConfiguration != nil { + in, out := &in.AuditLogConfiguration, &out.AuditLogConfiguration + *out = new(AuditLogConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.AutomaticBackupRetentionDays != nil { + in, out := &in.AutomaticBackupRetentionDays, &out.AutomaticBackupRetentionDays + *out = new(float64) + **out = **in + } + if in.BackupID != nil { + in, out := &in.BackupID, &out.BackupID + *out = new(string) + **out = **in + } + if in.CopyTagsToBackups != nil { + in, out := &in.CopyTagsToBackups, &out.CopyTagsToBackups + *out = new(bool) + **out = **in + } + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.DailyAutomaticBackupStartTime != nil { + in, out := &in.DailyAutomaticBackupStartTime, &out.DailyAutomaticBackupStartTime + *out = new(string) + **out = **in + } + if in.DeploymentType != nil { + in, out := &in.DeploymentType, &out.DeploymentType + *out = new(string) + **out = **in + } + if in.DiskIopsConfiguration != nil { + in, out := &in.DiskIopsConfiguration, &out.DiskIopsConfiguration + *out = new(WindowsFileSystemDiskIopsConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.NetworkInterfaceIds != nil { + in, out := &in.NetworkInterfaceIds, &out.NetworkInterfaceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OwnerID != nil { + in, out := &in.OwnerID, &out.OwnerID + *out = new(string) + **out = **in + } + if in.PreferredFileServerIP != nil { + in, out := &in.PreferredFileServerIP, &out.PreferredFileServerIP + *out = new(string) + **out = **in + } + if in.PreferredSubnetID != nil { + in, out := &in.PreferredSubnetID, &out.PreferredSubnetID + *out = new(string) + **out = **in + } + if in.RemoteAdministrationEndpoint != nil { + in, out := &in.RemoteAdministrationEndpoint, &out.RemoteAdministrationEndpoint + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SelfManagedActiveDirectory != nil { + in, out := &in.SelfManagedActiveDirectory, &out.SelfManagedActiveDirectory + *out = new(SelfManagedActiveDirectoryObservation) + (*in).DeepCopyInto(*out) + } + if in.SkipFinalBackup != nil { + in, out := &in.SkipFinalBackup, &out.SkipFinalBackup + *out = new(bool) + **out = **in + } + if in.StorageCapacity != nil { + in, out := &in.StorageCapacity, &out.StorageCapacity + *out = new(float64) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThroughputCapacity != nil { + in, out := &in.ThroughputCapacity, &out.ThroughputCapacity + *out = new(float64) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.WeeklyMaintenanceStartTime != nil { + in, out := &in.WeeklyMaintenanceStartTime, &out.WeeklyMaintenanceStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFileSystemObservation. +func (in *WindowsFileSystemObservation) DeepCopy() *WindowsFileSystemObservation { + if in == nil { + return nil + } + out := new(WindowsFileSystemObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFileSystemParameters) DeepCopyInto(out *WindowsFileSystemParameters) { + *out = *in + if in.ActiveDirectoryID != nil { + in, out := &in.ActiveDirectoryID, &out.ActiveDirectoryID + *out = new(string) + **out = **in + } + if in.ActiveDirectoryIDRef != nil { + in, out := &in.ActiveDirectoryIDRef, &out.ActiveDirectoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ActiveDirectoryIDSelector != nil { + in, out := &in.ActiveDirectoryIDSelector, &out.ActiveDirectoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Aliases != nil { + in, out := &in.Aliases, &out.Aliases + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AuditLogConfiguration != nil { + in, out := &in.AuditLogConfiguration, &out.AuditLogConfiguration + *out = new(AuditLogConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.AutomaticBackupRetentionDays != nil { + in, out := &in.AutomaticBackupRetentionDays, &out.AutomaticBackupRetentionDays + *out = new(float64) + **out = **in + } + if in.BackupID != nil { + in, out := &in.BackupID, &out.BackupID + *out = new(string) + **out = **in + } + if in.CopyTagsToBackups != nil { + in, out := &in.CopyTagsToBackups, &out.CopyTagsToBackups + *out = new(bool) + **out = **in + } + if in.DailyAutomaticBackupStartTime != nil { + in, out := &in.DailyAutomaticBackupStartTime, &out.DailyAutomaticBackupStartTime + *out = new(string) + **out = **in + } + if in.DeploymentType != nil { + in, out := &in.DeploymentType, &out.DeploymentType + *out = new(string) + **out = **in + } + if in.DiskIopsConfiguration != nil { + in, out := &in.DiskIopsConfiguration, &out.DiskIopsConfiguration + *out = new(WindowsFileSystemDiskIopsConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PreferredSubnetID != nil { + in, out := &in.PreferredSubnetID, &out.PreferredSubnetID + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SelfManagedActiveDirectory != nil { + in, out := &in.SelfManagedActiveDirectory, &out.SelfManagedActiveDirectory + *out = new(SelfManagedActiveDirectoryParameters) + (*in).DeepCopyInto(*out) + } + if in.SkipFinalBackup != nil { + in, out := &in.SkipFinalBackup, &out.SkipFinalBackup + *out = new(bool) + **out = **in + } + if in.StorageCapacity != nil { + in, out := &in.StorageCapacity, &out.StorageCapacity + *out = new(float64) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThroughputCapacity != nil { + in, out := &in.ThroughputCapacity, &out.ThroughputCapacity + *out = new(float64) + **out = **in + } + if in.WeeklyMaintenanceStartTime != nil { + in, out := &in.WeeklyMaintenanceStartTime, &out.WeeklyMaintenanceStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFileSystemParameters. +func (in *WindowsFileSystemParameters) DeepCopy() *WindowsFileSystemParameters { + if in == nil { + return nil + } + out := new(WindowsFileSystemParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFileSystemSpec) DeepCopyInto(out *WindowsFileSystemSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFileSystemSpec. +func (in *WindowsFileSystemSpec) DeepCopy() *WindowsFileSystemSpec { + if in == nil { + return nil + } + out := new(WindowsFileSystemSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFileSystemStatus) DeepCopyInto(out *WindowsFileSystemStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFileSystemStatus. +func (in *WindowsFileSystemStatus) DeepCopy() *WindowsFileSystemStatus { + if in == nil { + return nil + } + out := new(WindowsFileSystemStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/fsx/v1beta2/zz_generated.managed.go b/apis/fsx/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..cdb27ad84c --- /dev/null +++ b/apis/fsx/v1beta2/zz_generated.managed.go @@ -0,0 +1,308 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this DataRepositoryAssociation. +func (mg *DataRepositoryAssociation) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DataRepositoryAssociation. +func (mg *DataRepositoryAssociation) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DataRepositoryAssociation. +func (mg *DataRepositoryAssociation) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DataRepositoryAssociation. +func (mg *DataRepositoryAssociation) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DataRepositoryAssociation. +func (mg *DataRepositoryAssociation) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DataRepositoryAssociation. +func (mg *DataRepositoryAssociation) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DataRepositoryAssociation. +func (mg *DataRepositoryAssociation) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DataRepositoryAssociation. +func (mg *DataRepositoryAssociation) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DataRepositoryAssociation. +func (mg *DataRepositoryAssociation) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DataRepositoryAssociation. +func (mg *DataRepositoryAssociation) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DataRepositoryAssociation. +func (mg *DataRepositoryAssociation) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DataRepositoryAssociation. +func (mg *DataRepositoryAssociation) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LustreFileSystem. +func (mg *LustreFileSystem) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LustreFileSystem. +func (mg *LustreFileSystem) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LustreFileSystem. +func (mg *LustreFileSystem) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LustreFileSystem. +func (mg *LustreFileSystem) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LustreFileSystem. +func (mg *LustreFileSystem) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LustreFileSystem. +func (mg *LustreFileSystem) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LustreFileSystem. +func (mg *LustreFileSystem) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LustreFileSystem. +func (mg *LustreFileSystem) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LustreFileSystem. +func (mg *LustreFileSystem) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LustreFileSystem. +func (mg *LustreFileSystem) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LustreFileSystem. +func (mg *LustreFileSystem) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LustreFileSystem. +func (mg *LustreFileSystem) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this OntapFileSystem. +func (mg *OntapFileSystem) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this OntapFileSystem. +func (mg *OntapFileSystem) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this OntapFileSystem. +func (mg *OntapFileSystem) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this OntapFileSystem. +func (mg *OntapFileSystem) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this OntapFileSystem. +func (mg *OntapFileSystem) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this OntapFileSystem. +func (mg *OntapFileSystem) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this OntapFileSystem. +func (mg *OntapFileSystem) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this OntapFileSystem. +func (mg *OntapFileSystem) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this OntapFileSystem. +func (mg *OntapFileSystem) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this OntapFileSystem. +func (mg *OntapFileSystem) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this OntapFileSystem. +func (mg *OntapFileSystem) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this OntapFileSystem. +func (mg *OntapFileSystem) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this OntapStorageVirtualMachine. +func (mg *OntapStorageVirtualMachine) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this OntapStorageVirtualMachine. +func (mg *OntapStorageVirtualMachine) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this OntapStorageVirtualMachine. +func (mg *OntapStorageVirtualMachine) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this OntapStorageVirtualMachine. +func (mg *OntapStorageVirtualMachine) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this OntapStorageVirtualMachine. +func (mg *OntapStorageVirtualMachine) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this OntapStorageVirtualMachine. +func (mg *OntapStorageVirtualMachine) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this OntapStorageVirtualMachine. +func (mg *OntapStorageVirtualMachine) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this OntapStorageVirtualMachine. +func (mg *OntapStorageVirtualMachine) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this OntapStorageVirtualMachine. +func (mg *OntapStorageVirtualMachine) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this OntapStorageVirtualMachine. +func (mg *OntapStorageVirtualMachine) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this OntapStorageVirtualMachine. +func (mg *OntapStorageVirtualMachine) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this OntapStorageVirtualMachine. +func (mg *OntapStorageVirtualMachine) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this WindowsFileSystem. +func (mg *WindowsFileSystem) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this WindowsFileSystem. +func (mg *WindowsFileSystem) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this WindowsFileSystem. +func (mg *WindowsFileSystem) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this WindowsFileSystem. +func (mg *WindowsFileSystem) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this WindowsFileSystem. +func (mg *WindowsFileSystem) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this WindowsFileSystem. +func (mg *WindowsFileSystem) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this WindowsFileSystem. +func (mg *WindowsFileSystem) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this WindowsFileSystem. +func (mg *WindowsFileSystem) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this WindowsFileSystem. +func (mg *WindowsFileSystem) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this WindowsFileSystem. +func (mg *WindowsFileSystem) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this WindowsFileSystem. +func (mg *WindowsFileSystem) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this WindowsFileSystem. +func (mg *WindowsFileSystem) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/fsx/v1beta2/zz_generated.managedlist.go b/apis/fsx/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..3d6096d75a --- /dev/null +++ b/apis/fsx/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,53 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this DataRepositoryAssociationList. +func (l *DataRepositoryAssociationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LustreFileSystemList. +func (l *LustreFileSystemList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this OntapFileSystemList. +func (l *OntapFileSystemList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this OntapStorageVirtualMachineList. +func (l *OntapStorageVirtualMachineList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WindowsFileSystemList. +func (l *WindowsFileSystemList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/fsx/v1beta2/zz_generated.resolvers.go b/apis/fsx/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..debf735ab6 --- /dev/null +++ b/apis/fsx/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,576 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *DataRepositoryAssociation) ResolveReferences( // ResolveReferences of this DataRepositoryAssociation. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("fsx.aws.upbound.io", "v1beta2", "LustreFileSystem", "LustreFileSystemList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FileSystemID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.FileSystemIDRef, + Selector: mg.Spec.ForProvider.FileSystemIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FileSystemID") + } + mg.Spec.ForProvider.FileSystemID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FileSystemIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("fsx.aws.upbound.io", "v1beta2", "LustreFileSystem", "LustreFileSystemList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FileSystemID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.FileSystemIDRef, + Selector: mg.Spec.InitProvider.FileSystemIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FileSystemID") + } + mg.Spec.InitProvider.FileSystemID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FileSystemIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LustreFileSystem. +func (mg *LustreFileSystem) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyID") + } + mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroupIds") + } + mg.Spec.ForProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SubnetIDRefs, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetIds") + } + mg.Spec.ForProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SubnetIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyID") + } + mg.Spec.InitProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroupIds") + } + mg.Spec.InitProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SubnetIDRefs, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetIds") + } + mg.Spec.InitProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SubnetIDRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this OntapFileSystem. +func (mg *OntapFileSystem) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyID") + } + mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PreferredSubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PreferredSubnetIDRef, + Selector: mg.Spec.ForProvider.PreferredSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PreferredSubnetID") + } + mg.Spec.ForProvider.PreferredSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PreferredSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroupIds") + } + mg.Spec.ForProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SubnetIDRefs, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetIds") + } + mg.Spec.ForProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SubnetIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyID") + } + mg.Spec.InitProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PreferredSubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PreferredSubnetIDRef, + Selector: mg.Spec.InitProvider.PreferredSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PreferredSubnetID") + } + mg.Spec.InitProvider.PreferredSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PreferredSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroupIds") + } + mg.Spec.InitProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SubnetIDRefs, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetIds") + } + mg.Spec.InitProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SubnetIDRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this OntapStorageVirtualMachine. +func (mg *OntapStorageVirtualMachine) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("fsx.aws.upbound.io", "v1beta2", "OntapFileSystem", "OntapFileSystemList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FileSystemID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.FileSystemIDRef, + Selector: mg.Spec.ForProvider.FileSystemIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FileSystemID") + } + mg.Spec.ForProvider.FileSystemID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FileSystemIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("fsx.aws.upbound.io", "v1beta2", "OntapFileSystem", "OntapFileSystemList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FileSystemID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.FileSystemIDRef, + Selector: mg.Spec.InitProvider.FileSystemIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FileSystemID") + } + mg.Spec.InitProvider.FileSystemID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FileSystemIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this WindowsFileSystem. +func (mg *WindowsFileSystem) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ds.aws.upbound.io", "v1beta2", "Directory", "DirectoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ActiveDirectoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ActiveDirectoryIDRef, + Selector: mg.Spec.ForProvider.ActiveDirectoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ActiveDirectoryID") + } + mg.Spec.ForProvider.ActiveDirectoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ActiveDirectoryIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyID), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyID") + } + mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroupIds") + } + mg.Spec.ForProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SubnetIDRefs, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetIds") + } + mg.Spec.ForProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SubnetIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ds.aws.upbound.io", "v1beta2", "Directory", "DirectoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ActiveDirectoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ActiveDirectoryIDRef, + Selector: mg.Spec.InitProvider.ActiveDirectoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ActiveDirectoryID") + } + mg.Spec.InitProvider.ActiveDirectoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ActiveDirectoryIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyID), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyID") + } + mg.Spec.InitProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroupIds") + } + mg.Spec.InitProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SubnetIDRefs, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetIds") + } + mg.Spec.InitProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SubnetIDRefs = mrsp.ResolvedReferences + + return nil +} diff --git a/apis/fsx/v1beta2/zz_groupversion_info.go b/apis/fsx/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..31e962e680 --- /dev/null +++ b/apis/fsx/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=fsx.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "fsx.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/fsx/v1beta2/zz_lustrefilesystem_terraformed.go b/apis/fsx/v1beta2/zz_lustrefilesystem_terraformed.go new file mode 100755 index 0000000000..5c785a4574 --- /dev/null +++ b/apis/fsx/v1beta2/zz_lustrefilesystem_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LustreFileSystem +func (mg *LustreFileSystem) GetTerraformResourceType() string { + return "aws_fsx_lustre_file_system" +} + +// GetConnectionDetailsMapping for this LustreFileSystem +func (tr *LustreFileSystem) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LustreFileSystem +func (tr *LustreFileSystem) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LustreFileSystem +func (tr *LustreFileSystem) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LustreFileSystem +func (tr *LustreFileSystem) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LustreFileSystem +func (tr *LustreFileSystem) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LustreFileSystem +func (tr *LustreFileSystem) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LustreFileSystem +func (tr *LustreFileSystem) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LustreFileSystem +func (tr *LustreFileSystem) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LustreFileSystem using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LustreFileSystem) LateInitialize(attrs []byte) (bool, error) { + params := &LustreFileSystemParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LustreFileSystem) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/fsx/v1beta2/zz_lustrefilesystem_types.go b/apis/fsx/v1beta2/zz_lustrefilesystem_types.go new file mode 100755 index 0000000000..043144ba56 --- /dev/null +++ b/apis/fsx/v1beta2/zz_lustrefilesystem_types.go @@ -0,0 +1,462 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type LogConfigurationInitParameters struct { + + // The Amazon Resource Name (ARN) that specifies the destination of the logs. The name of the Amazon CloudWatch Logs log group must begin with the /aws/fsx prefix. If you do not provide a destination, Amazon FSx will create and use a log stream in the CloudWatch Logs /aws/fsx/lustre log group. + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // Sets which data repository events are logged by Amazon FSx. Valid values are WARN_ONLY, FAILURE_ONLY, ERROR_ONLY, WARN_ERROR and DISABLED. Default value is DISABLED. + Level *string `json:"level,omitempty" tf:"level,omitempty"` +} + +type LogConfigurationObservation struct { + + // The Amazon Resource Name (ARN) that specifies the destination of the logs. The name of the Amazon CloudWatch Logs log group must begin with the /aws/fsx prefix. If you do not provide a destination, Amazon FSx will create and use a log stream in the CloudWatch Logs /aws/fsx/lustre log group. + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // Sets which data repository events are logged by Amazon FSx. Valid values are WARN_ONLY, FAILURE_ONLY, ERROR_ONLY, WARN_ERROR and DISABLED. Default value is DISABLED. + Level *string `json:"level,omitempty" tf:"level,omitempty"` +} + +type LogConfigurationParameters struct { + + // The Amazon Resource Name (ARN) that specifies the destination of the logs. The name of the Amazon CloudWatch Logs log group must begin with the /aws/fsx prefix. If you do not provide a destination, Amazon FSx will create and use a log stream in the CloudWatch Logs /aws/fsx/lustre log group. + // +kubebuilder:validation:Optional + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // Sets which data repository events are logged by Amazon FSx. Valid values are WARN_ONLY, FAILURE_ONLY, ERROR_ONLY, WARN_ERROR and DISABLED. Default value is DISABLED. + // +kubebuilder:validation:Optional + Level *string `json:"level,omitempty" tf:"level,omitempty"` +} + +type LustreFileSystemInitParameters struct { + + // How Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. see Auto Import Data Repo for more details. Only supported on PERSISTENT_1 deployment types. + AutoImportPolicy *string `json:"autoImportPolicy,omitempty" tf:"auto_import_policy,omitempty"` + + // The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. only valid for PERSISTENT_1 and PERSISTENT_2 deployment_type. + AutomaticBackupRetentionDays *float64 `json:"automaticBackupRetentionDays,omitempty" tf:"automatic_backup_retention_days,omitempty"` + + // The ID of the source backup to create the filesystem from. + BackupID *string `json:"backupId,omitempty" tf:"backup_id,omitempty"` + + // A boolean flag indicating whether tags for the file system should be copied to backups. Applicable for PERSISTENT_1 and PERSISTENT_2 deployment_type. The default value is false. + CopyTagsToBackups *bool `json:"copyTagsToBackups,omitempty" tf:"copy_tags_to_backups,omitempty"` + + // A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. only valid for PERSISTENT_1 and PERSISTENT_2 deployment_type. Requires automatic_backup_retention_days to be set. + DailyAutomaticBackupStartTime *string `json:"dailyAutomaticBackupStartTime,omitempty" tf:"daily_automatic_backup_start_time,omitempty"` + + // Sets the data compression configuration for the file system. Valid values are LZ4 and NONE. Default value is NONE. Unsetting this value reverts the compression type back to NONE. + DataCompressionType *string `json:"dataCompressionType,omitempty" tf:"data_compression_type,omitempty"` + + // - The filesystem deployment type. One of: SCRATCH_1, SCRATCH_2, PERSISTENT_1, PERSISTENT_2. + DeploymentType *string `json:"deploymentType,omitempty" tf:"deployment_type,omitempty"` + + // - The type of drive cache used by PERSISTENT_1 filesystems that are provisioned with HDD storage_type. Required for HDD storage_type, set to either READ or NONE. + DriveCacheType *string `json:"driveCacheType,omitempty" tf:"drive_cache_type,omitempty"` + + // S3 URI (with optional prefix) where the root of your Amazon FSx file system is exported. Can only be specified with import_path argument and the path must use the same Amazon S3 bucket as specified in import_path. Set equal to import_path to overwrite files on export. Defaults to s3://{IMPORT BUCKET}/FSxLustre{CREATION TIMESTAMP}. Only supported on PERSISTENT_1 deployment types. + ExportPath *string `json:"exportPath,omitempty" tf:"export_path,omitempty"` + + // Sets the Lustre version for the file system that you're creating. Valid values are 2.10 for SCRATCH_1, SCRATCH_2 and PERSISTENT_1 deployment types. Valid values for 2.12 include all deployment types. + FileSystemTypeVersion *string `json:"fileSystemTypeVersion,omitempty" tf:"file_system_type_version,omitempty"` + + // S3 URI (with optional prefix) that you're using as the data repository for your FSx for Lustre file system. For example, s3://example-bucket/optional-prefix/. Only supported on PERSISTENT_1 deployment types. + ImportPath *string `json:"importPath,omitempty" tf:"import_path,omitempty"` + + // For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. Can only be specified with import_path argument. Defaults to 1024. Minimum of 1 and maximum of 512000. Only supported on PERSISTENT_1 deployment types. + ImportedFileChunkSize *float64 `json:"importedFileChunkSize,omitempty" tf:"imported_file_chunk_size,omitempty"` + + // ARN for the KMS Key to encrypt the file system at rest, applicable for PERSISTENT_1 and PERSISTENT_2 deployment_type. Defaults to an AWS managed KMS Key. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon CloudWatch Logs. + LogConfiguration *LogConfigurationInitParameters `json:"logConfiguration,omitempty" tf:"log_configuration,omitempty"` + + // - Describes the amount of read and write throughput for each 1 tebibyte of storage, in MB/s/TiB, required for the PERSISTENT_1 and PERSISTENT_2 deployment_type. Valid values for PERSISTENT_1 deployment_type and SSD storage_type are 50, 100, 200. Valid values for PERSISTENT_1 deployment_type and HDD storage_type are 12, 40. Valid values for PERSISTENT_2 deployment_type and SSD storage_type are 125, 250, 500, 1000. + PerUnitStorageThroughput *float64 `json:"perUnitStorageThroughput,omitempty" tf:"per_unit_storage_throughput,omitempty"` + + // The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user. + RootSquashConfiguration *RootSquashConfigurationInitParameters `json:"rootSquashConfiguration,omitempty" tf:"root_squash_configuration,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The storage capacity (GiB) of the file system. Minimum of 1200. See more details at Allowed values for Fsx storage capacity. Update is allowed only for SCRATCH_2, PERSISTENT_1 and PERSISTENT_2 deployment types, See more details at Fsx Storage Capacity Update. Required when not creating filesystem for a backup. + StorageCapacity *float64 `json:"storageCapacity,omitempty" tf:"storage_capacity,omitempty"` + + // - The filesystem storage type. Either SSD or HDD, defaults to SSD. HDD is only supported on PERSISTENT_1 deployment types. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A list of IDs for the subnets that the file system will be accessible from. File systems currently support only one subnet. The file server is also launched in that subnet's Availability Zone. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The preferred start time (in d:HH:MM format) to perform weekly maintenance, in the UTC time zone. + WeeklyMaintenanceStartTime *string `json:"weeklyMaintenanceStartTime,omitempty" tf:"weekly_maintenance_start_time,omitempty"` +} + +type LustreFileSystemObservation struct { + + // Amazon Resource Name of the file system. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // How Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. see Auto Import Data Repo for more details. Only supported on PERSISTENT_1 deployment types. + AutoImportPolicy *string `json:"autoImportPolicy,omitempty" tf:"auto_import_policy,omitempty"` + + // The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. only valid for PERSISTENT_1 and PERSISTENT_2 deployment_type. + AutomaticBackupRetentionDays *float64 `json:"automaticBackupRetentionDays,omitempty" tf:"automatic_backup_retention_days,omitempty"` + + // The ID of the source backup to create the filesystem from. + BackupID *string `json:"backupId,omitempty" tf:"backup_id,omitempty"` + + // A boolean flag indicating whether tags for the file system should be copied to backups. Applicable for PERSISTENT_1 and PERSISTENT_2 deployment_type. The default value is false. + CopyTagsToBackups *bool `json:"copyTagsToBackups,omitempty" tf:"copy_tags_to_backups,omitempty"` + + // DNS name for the file system, e.g., fs-12345678.fsx.us-west-2.amazonaws.com + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. only valid for PERSISTENT_1 and PERSISTENT_2 deployment_type. Requires automatic_backup_retention_days to be set. + DailyAutomaticBackupStartTime *string `json:"dailyAutomaticBackupStartTime,omitempty" tf:"daily_automatic_backup_start_time,omitempty"` + + // Sets the data compression configuration for the file system. Valid values are LZ4 and NONE. Default value is NONE. Unsetting this value reverts the compression type back to NONE. + DataCompressionType *string `json:"dataCompressionType,omitempty" tf:"data_compression_type,omitempty"` + + // - The filesystem deployment type. One of: SCRATCH_1, SCRATCH_2, PERSISTENT_1, PERSISTENT_2. + DeploymentType *string `json:"deploymentType,omitempty" tf:"deployment_type,omitempty"` + + // - The type of drive cache used by PERSISTENT_1 filesystems that are provisioned with HDD storage_type. Required for HDD storage_type, set to either READ or NONE. + DriveCacheType *string `json:"driveCacheType,omitempty" tf:"drive_cache_type,omitempty"` + + // S3 URI (with optional prefix) where the root of your Amazon FSx file system is exported. Can only be specified with import_path argument and the path must use the same Amazon S3 bucket as specified in import_path. Set equal to import_path to overwrite files on export. Defaults to s3://{IMPORT BUCKET}/FSxLustre{CREATION TIMESTAMP}. Only supported on PERSISTENT_1 deployment types. + ExportPath *string `json:"exportPath,omitempty" tf:"export_path,omitempty"` + + // Sets the Lustre version for the file system that you're creating. Valid values are 2.10 for SCRATCH_1, SCRATCH_2 and PERSISTENT_1 deployment types. Valid values for 2.12 include all deployment types. + FileSystemTypeVersion *string `json:"fileSystemTypeVersion,omitempty" tf:"file_system_type_version,omitempty"` + + // Identifier of the file system, e.g., fs-12345678 + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // S3 URI (with optional prefix) that you're using as the data repository for your FSx for Lustre file system. For example, s3://example-bucket/optional-prefix/. Only supported on PERSISTENT_1 deployment types. + ImportPath *string `json:"importPath,omitempty" tf:"import_path,omitempty"` + + // For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. Can only be specified with import_path argument. Defaults to 1024. Minimum of 1 and maximum of 512000. Only supported on PERSISTENT_1 deployment types. + ImportedFileChunkSize *float64 `json:"importedFileChunkSize,omitempty" tf:"imported_file_chunk_size,omitempty"` + + // ARN for the KMS Key to encrypt the file system at rest, applicable for PERSISTENT_1 and PERSISTENT_2 deployment_type. Defaults to an AWS managed KMS Key. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon CloudWatch Logs. + LogConfiguration *LogConfigurationObservation `json:"logConfiguration,omitempty" tf:"log_configuration,omitempty"` + + // The value to be used when mounting the filesystem. + MountName *string `json:"mountName,omitempty" tf:"mount_name,omitempty"` + + // Set of Elastic Network Interface identifiers from which the file system is accessible. As explained in the documentation, the first network interface returned is the primary network interface. + NetworkInterfaceIds []*string `json:"networkInterfaceIds,omitempty" tf:"network_interface_ids,omitempty"` + + // AWS account identifier that created the file system. + OwnerID *string `json:"ownerId,omitempty" tf:"owner_id,omitempty"` + + // - Describes the amount of read and write throughput for each 1 tebibyte of storage, in MB/s/TiB, required for the PERSISTENT_1 and PERSISTENT_2 deployment_type. Valid values for PERSISTENT_1 deployment_type and SSD storage_type are 50, 100, 200. Valid values for PERSISTENT_1 deployment_type and HDD storage_type are 12, 40. Valid values for PERSISTENT_2 deployment_type and SSD storage_type are 125, 250, 500, 1000. + PerUnitStorageThroughput *float64 `json:"perUnitStorageThroughput,omitempty" tf:"per_unit_storage_throughput,omitempty"` + + // The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user. + RootSquashConfiguration *RootSquashConfigurationObservation `json:"rootSquashConfiguration,omitempty" tf:"root_squash_configuration,omitempty"` + + // A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The storage capacity (GiB) of the file system. Minimum of 1200. See more details at Allowed values for Fsx storage capacity. Update is allowed only for SCRATCH_2, PERSISTENT_1 and PERSISTENT_2 deployment types, See more details at Fsx Storage Capacity Update. Required when not creating filesystem for a backup. + StorageCapacity *float64 `json:"storageCapacity,omitempty" tf:"storage_capacity,omitempty"` + + // - The filesystem storage type. Either SSD or HDD, defaults to SSD. HDD is only supported on PERSISTENT_1 deployment types. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // A list of IDs for the subnets that the file system will be accessible from. File systems currently support only one subnet. The file server is also launched in that subnet's Availability Zone. + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Identifier of the Virtual Private Cloud for the file system. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // The preferred start time (in d:HH:MM format) to perform weekly maintenance, in the UTC time zone. + WeeklyMaintenanceStartTime *string `json:"weeklyMaintenanceStartTime,omitempty" tf:"weekly_maintenance_start_time,omitempty"` +} + +type LustreFileSystemParameters struct { + + // How Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. see Auto Import Data Repo for more details. Only supported on PERSISTENT_1 deployment types. + // +kubebuilder:validation:Optional + AutoImportPolicy *string `json:"autoImportPolicy,omitempty" tf:"auto_import_policy,omitempty"` + + // The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. only valid for PERSISTENT_1 and PERSISTENT_2 deployment_type. + // +kubebuilder:validation:Optional + AutomaticBackupRetentionDays *float64 `json:"automaticBackupRetentionDays,omitempty" tf:"automatic_backup_retention_days,omitempty"` + + // The ID of the source backup to create the filesystem from. + // +kubebuilder:validation:Optional + BackupID *string `json:"backupId,omitempty" tf:"backup_id,omitempty"` + + // A boolean flag indicating whether tags for the file system should be copied to backups. Applicable for PERSISTENT_1 and PERSISTENT_2 deployment_type. The default value is false. + // +kubebuilder:validation:Optional + CopyTagsToBackups *bool `json:"copyTagsToBackups,omitempty" tf:"copy_tags_to_backups,omitempty"` + + // A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. only valid for PERSISTENT_1 and PERSISTENT_2 deployment_type. Requires automatic_backup_retention_days to be set. + // +kubebuilder:validation:Optional + DailyAutomaticBackupStartTime *string `json:"dailyAutomaticBackupStartTime,omitempty" tf:"daily_automatic_backup_start_time,omitempty"` + + // Sets the data compression configuration for the file system. Valid values are LZ4 and NONE. Default value is NONE. Unsetting this value reverts the compression type back to NONE. + // +kubebuilder:validation:Optional + DataCompressionType *string `json:"dataCompressionType,omitempty" tf:"data_compression_type,omitempty"` + + // - The filesystem deployment type. One of: SCRATCH_1, SCRATCH_2, PERSISTENT_1, PERSISTENT_2. + // +kubebuilder:validation:Optional + DeploymentType *string `json:"deploymentType,omitempty" tf:"deployment_type,omitempty"` + + // - The type of drive cache used by PERSISTENT_1 filesystems that are provisioned with HDD storage_type. Required for HDD storage_type, set to either READ or NONE. + // +kubebuilder:validation:Optional + DriveCacheType *string `json:"driveCacheType,omitempty" tf:"drive_cache_type,omitempty"` + + // S3 URI (with optional prefix) where the root of your Amazon FSx file system is exported. Can only be specified with import_path argument and the path must use the same Amazon S3 bucket as specified in import_path. Set equal to import_path to overwrite files on export. Defaults to s3://{IMPORT BUCKET}/FSxLustre{CREATION TIMESTAMP}. Only supported on PERSISTENT_1 deployment types. + // +kubebuilder:validation:Optional + ExportPath *string `json:"exportPath,omitempty" tf:"export_path,omitempty"` + + // Sets the Lustre version for the file system that you're creating. Valid values are 2.10 for SCRATCH_1, SCRATCH_2 and PERSISTENT_1 deployment types. Valid values for 2.12 include all deployment types. + // +kubebuilder:validation:Optional + FileSystemTypeVersion *string `json:"fileSystemTypeVersion,omitempty" tf:"file_system_type_version,omitempty"` + + // S3 URI (with optional prefix) that you're using as the data repository for your FSx for Lustre file system. For example, s3://example-bucket/optional-prefix/. Only supported on PERSISTENT_1 deployment types. + // +kubebuilder:validation:Optional + ImportPath *string `json:"importPath,omitempty" tf:"import_path,omitempty"` + + // For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. Can only be specified with import_path argument. Defaults to 1024. Minimum of 1 and maximum of 512000. Only supported on PERSISTENT_1 deployment types. + // +kubebuilder:validation:Optional + ImportedFileChunkSize *float64 `json:"importedFileChunkSize,omitempty" tf:"imported_file_chunk_size,omitempty"` + + // ARN for the KMS Key to encrypt the file system at rest, applicable for PERSISTENT_1 and PERSISTENT_2 deployment_type. Defaults to an AWS managed KMS Key. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon CloudWatch Logs. + // +kubebuilder:validation:Optional + LogConfiguration *LogConfigurationParameters `json:"logConfiguration,omitempty" tf:"log_configuration,omitempty"` + + // - Describes the amount of read and write throughput for each 1 tebibyte of storage, in MB/s/TiB, required for the PERSISTENT_1 and PERSISTENT_2 deployment_type. Valid values for PERSISTENT_1 deployment_type and SSD storage_type are 50, 100, 200. Valid values for PERSISTENT_1 deployment_type and HDD storage_type are 12, 40. Valid values for PERSISTENT_2 deployment_type and SSD storage_type are 125, 250, 500, 1000. + // +kubebuilder:validation:Optional + PerUnitStorageThroughput *float64 `json:"perUnitStorageThroughput,omitempty" tf:"per_unit_storage_throughput,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user. + // +kubebuilder:validation:Optional + RootSquashConfiguration *RootSquashConfigurationParameters `json:"rootSquashConfiguration,omitempty" tf:"root_squash_configuration,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The storage capacity (GiB) of the file system. Minimum of 1200. See more details at Allowed values for Fsx storage capacity. Update is allowed only for SCRATCH_2, PERSISTENT_1 and PERSISTENT_2 deployment types, See more details at Fsx Storage Capacity Update. Required when not creating filesystem for a backup. + // +kubebuilder:validation:Optional + StorageCapacity *float64 `json:"storageCapacity,omitempty" tf:"storage_capacity,omitempty"` + + // - The filesystem storage type. Either SSD or HDD, defaults to SSD. HDD is only supported on PERSISTENT_1 deployment types. + // +kubebuilder:validation:Optional + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A list of IDs for the subnets that the file system will be accessible from. File systems currently support only one subnet. The file server is also launched in that subnet's Availability Zone. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +kubebuilder:validation:Optional + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The preferred start time (in d:HH:MM format) to perform weekly maintenance, in the UTC time zone. + // +kubebuilder:validation:Optional + WeeklyMaintenanceStartTime *string `json:"weeklyMaintenanceStartTime,omitempty" tf:"weekly_maintenance_start_time,omitempty"` +} + +type RootSquashConfigurationInitParameters struct { + + // When root squash is enabled, you can optionally specify an array of NIDs of clients for which root squash does not apply. A client NID is a Lustre Network Identifier used to uniquely identify a client. You can specify the NID as either a single address or a range of addresses: 1. A single address is described in standard Lustre NID format by specifying the client’s IP address followed by the Lustre network ID (for example, 10.0.1.6@tcp). 2. An address range is described using a dash to separate the range (for example, 10.0.[2-10].[1-255]@tcp). + // +listType=set + NoSquashNids []*string `json:"noSquashNids,omitempty" tf:"no_squash_nids,omitempty"` + + // You enable root squash by setting a user ID (UID) and group ID (GID) for the file system in the format UID:GID (for example, 365534:65534). The UID and GID values can range from 0 to 4294967294. + RootSquash *string `json:"rootSquash,omitempty" tf:"root_squash,omitempty"` +} + +type RootSquashConfigurationObservation struct { + + // When root squash is enabled, you can optionally specify an array of NIDs of clients for which root squash does not apply. A client NID is a Lustre Network Identifier used to uniquely identify a client. You can specify the NID as either a single address or a range of addresses: 1. A single address is described in standard Lustre NID format by specifying the client’s IP address followed by the Lustre network ID (for example, 10.0.1.6@tcp). 2. An address range is described using a dash to separate the range (for example, 10.0.[2-10].[1-255]@tcp). + // +listType=set + NoSquashNids []*string `json:"noSquashNids,omitempty" tf:"no_squash_nids,omitempty"` + + // You enable root squash by setting a user ID (UID) and group ID (GID) for the file system in the format UID:GID (for example, 365534:65534). The UID and GID values can range from 0 to 4294967294. + RootSquash *string `json:"rootSquash,omitempty" tf:"root_squash,omitempty"` +} + +type RootSquashConfigurationParameters struct { + + // When root squash is enabled, you can optionally specify an array of NIDs of clients for which root squash does not apply. A client NID is a Lustre Network Identifier used to uniquely identify a client. You can specify the NID as either a single address or a range of addresses: 1. A single address is described in standard Lustre NID format by specifying the client’s IP address followed by the Lustre network ID (for example, 10.0.1.6@tcp). 2. An address range is described using a dash to separate the range (for example, 10.0.[2-10].[1-255]@tcp). + // +kubebuilder:validation:Optional + // +listType=set + NoSquashNids []*string `json:"noSquashNids,omitempty" tf:"no_squash_nids,omitempty"` + + // You enable root squash by setting a user ID (UID) and group ID (GID) for the file system in the format UID:GID (for example, 365534:65534). The UID and GID values can range from 0 to 4294967294. + // +kubebuilder:validation:Optional + RootSquash *string `json:"rootSquash,omitempty" tf:"root_squash,omitempty"` +} + +// LustreFileSystemSpec defines the desired state of LustreFileSystem +type LustreFileSystemSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LustreFileSystemParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LustreFileSystemInitParameters `json:"initProvider,omitempty"` +} + +// LustreFileSystemStatus defines the observed state of LustreFileSystem. +type LustreFileSystemStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LustreFileSystemObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LustreFileSystem is the Schema for the LustreFileSystems API. Manages a FSx Lustre File System. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type LustreFileSystem struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec LustreFileSystemSpec `json:"spec"` + Status LustreFileSystemStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LustreFileSystemList contains a list of LustreFileSystems +type LustreFileSystemList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LustreFileSystem `json:"items"` +} + +// Repository type metadata. +var ( + LustreFileSystem_Kind = "LustreFileSystem" + LustreFileSystem_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LustreFileSystem_Kind}.String() + LustreFileSystem_KindAPIVersion = LustreFileSystem_Kind + "." + CRDGroupVersion.String() + LustreFileSystem_GroupVersionKind = CRDGroupVersion.WithKind(LustreFileSystem_Kind) +) + +func init() { + SchemeBuilder.Register(&LustreFileSystem{}, &LustreFileSystemList{}) +} diff --git a/apis/fsx/v1beta2/zz_ontapfilesystem_terraformed.go b/apis/fsx/v1beta2/zz_ontapfilesystem_terraformed.go new file mode 100755 index 0000000000..171b3fc853 --- /dev/null +++ b/apis/fsx/v1beta2/zz_ontapfilesystem_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this OntapFileSystem +func (mg *OntapFileSystem) GetTerraformResourceType() string { + return "aws_fsx_ontap_file_system" +} + +// GetConnectionDetailsMapping for this OntapFileSystem +func (tr *OntapFileSystem) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"fsx_admin_password": "fsxAdminPasswordSecretRef"} +} + +// GetObservation of this OntapFileSystem +func (tr *OntapFileSystem) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this OntapFileSystem +func (tr *OntapFileSystem) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this OntapFileSystem +func (tr *OntapFileSystem) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this OntapFileSystem +func (tr *OntapFileSystem) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this OntapFileSystem +func (tr *OntapFileSystem) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this OntapFileSystem +func (tr *OntapFileSystem) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this OntapFileSystem +func (tr *OntapFileSystem) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this OntapFileSystem using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *OntapFileSystem) LateInitialize(attrs []byte) (bool, error) { + params := &OntapFileSystemParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *OntapFileSystem) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/fsx/v1beta2/zz_ontapfilesystem_types.go b/apis/fsx/v1beta2/zz_ontapfilesystem_types.go new file mode 100755 index 0000000000..9357486b8f --- /dev/null +++ b/apis/fsx/v1beta2/zz_ontapfilesystem_types.go @@ -0,0 +1,459 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DiskIopsConfigurationInitParameters struct { + + // - The total number of SSD IOPS provisioned for the file system. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // - Specifies whether the number of IOPS for the file system is using the system. Valid values are AUTOMATIC and USER_PROVISIONED. Default value is AUTOMATIC. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type DiskIopsConfigurationObservation struct { + + // - The total number of SSD IOPS provisioned for the file system. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // - Specifies whether the number of IOPS for the file system is using the system. Valid values are AUTOMATIC and USER_PROVISIONED. Default value is AUTOMATIC. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type DiskIopsConfigurationParameters struct { + + // - The total number of SSD IOPS provisioned for the file system. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // - Specifies whether the number of IOPS for the file system is using the system. Valid values are AUTOMATIC and USER_PROVISIONED. Default value is AUTOMATIC. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type EndpointsInitParameters struct { +} + +type EndpointsObservation struct { + + // An endpoint for managing your file system by setting up NetApp SnapMirror with other ONTAP systems. See Endpoint. + Intercluster []InterclusterObservation `json:"intercluster,omitempty" tf:"intercluster,omitempty"` + + // An endpoint for managing your file system using the NetApp ONTAP CLI and NetApp ONTAP API. See Endpoint. + Management []ManagementObservation `json:"management,omitempty" tf:"management,omitempty"` +} + +type EndpointsParameters struct { +} + +type InterclusterInitParameters struct { +} + +type InterclusterObservation struct { + + // DNS name for the file system, e.g., fs-12345678.fsx.us-west-2.amazonaws.com + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // IP addresses of the file system endpoint. + // +listType=set + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` +} + +type InterclusterParameters struct { +} + +type ManagementInitParameters struct { +} + +type ManagementObservation struct { + + // DNS name for the file system, e.g., fs-12345678.fsx.us-west-2.amazonaws.com + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // IP addresses of the file system endpoint. + // +listType=set + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` +} + +type ManagementParameters struct { +} + +type OntapFileSystemInitParameters struct { + + // The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. + AutomaticBackupRetentionDays *float64 `json:"automaticBackupRetentionDays,omitempty" tf:"automatic_backup_retention_days,omitempty"` + + // A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. Requires automatic_backup_retention_days to be set. + DailyAutomaticBackupStartTime *string `json:"dailyAutomaticBackupStartTime,omitempty" tf:"daily_automatic_backup_start_time,omitempty"` + + // - The filesystem deployment type. Supports MULTI_AZ_1, SINGLE_AZ_1, and SINGLE_AZ_2. + DeploymentType *string `json:"deploymentType,omitempty" tf:"deployment_type,omitempty"` + + // The SSD IOPS configuration for the Amazon FSx for NetApp ONTAP file system. See Disk Iops Configuration below. + DiskIopsConfiguration *DiskIopsConfigurationInitParameters `json:"diskIopsConfiguration,omitempty" tf:"disk_iops_configuration,omitempty"` + + // Specifies the IP address range in which the endpoints to access your file system will be created. By default, Amazon FSx selects an unused IP address range for you from the 198.19.* range. + EndpointIPAddressRange *string `json:"endpointIpAddressRange,omitempty" tf:"endpoint_ip_address_range,omitempty"` + + // The ONTAP administrative password for the fsxadmin user that you can use to administer your file system using the ONTAP CLI and REST API. + FSXAdminPasswordSecretRef *v1.SecretKeySelector `json:"fsxAdminPasswordSecretRef,omitempty" tf:"-"` + + // - The number of ha_pairs to deploy for the file system. Valid values are 1 through 12. Value of 2 or greater required for SINGLE_AZ_2. Only value of 1 is supported with SINGLE_AZ_1 or MULTI_AZ_1 but not required. + HaPairs *float64 `json:"haPairs,omitempty" tf:"ha_pairs,omitempty"` + + // ARN for the KMS Key to encrypt the file system at rest, Defaults to an AWS managed KMS Key. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + PreferredSubnetID *string `json:"preferredSubnetId,omitempty" tf:"preferred_subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate preferredSubnetId. + // +kubebuilder:validation:Optional + PreferredSubnetIDRef *v1.Reference `json:"preferredSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate preferredSubnetId. + // +kubebuilder:validation:Optional + PreferredSubnetIDSelector *v1.Selector `json:"preferredSubnetIdSelector,omitempty" tf:"-"` + + // Specifies the VPC route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table. + // +listType=set + RouteTableIds []*string `json:"routeTableIds,omitempty" tf:"route_table_ids,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The storage capacity (GiB) of the file system. Valid values between 1024 and 196608 for file systems with deployment_type SINGLE_AZ_1 and MULTI_AZ_1. Valid values between 2048 (1024 per ha pair) and 1048576 for file systems with deployment_type SINGLE_AZ_2. + StorageCapacity *float64 `json:"storageCapacity,omitempty" tf:"storage_capacity,omitempty"` + + // - The filesystem storage type. defaults to SSD. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A list of IDs for the subnets that the file system will be accessible from. Up to 2 subnets can be provided. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Sets the throughput capacity (in MBps) for the file system that you're creating. Valid values are 128, 256, 512, 1024, 2048, and 4096. This parameter is only supported when not using the ha_pairs parameter. Either throughput_capacity or throughput_capacity_per_ha_pair must be specified. + ThroughputCapacity *float64 `json:"throughputCapacity,omitempty" tf:"throughput_capacity,omitempty"` + + // Sets the throughput capacity (in MBps) for the file system that you're creating. Valid value when using 1 ha_pair are 128, 256, 512, 1024, 2048, and 4096. Valid values when using 2 or more ha_pairs are 3072,6144. This parameter is only supported when specifying the ha_pairs parameter. Either throughput_capacity or throughput_capacity_per_ha_pair must be specified. + ThroughputCapacityPerHaPair *float64 `json:"throughputCapacityPerHaPair,omitempty" tf:"throughput_capacity_per_ha_pair,omitempty"` + + // The preferred start time (in d:HH:MM format) to perform weekly maintenance, in the UTC time zone. + WeeklyMaintenanceStartTime *string `json:"weeklyMaintenanceStartTime,omitempty" tf:"weekly_maintenance_start_time,omitempty"` +} + +type OntapFileSystemObservation struct { + + // Amazon Resource Name of the file system. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. + AutomaticBackupRetentionDays *float64 `json:"automaticBackupRetentionDays,omitempty" tf:"automatic_backup_retention_days,omitempty"` + + // DNS name for the file system, e.g., fs-12345678.fsx.us-west-2.amazonaws.com + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. Requires automatic_backup_retention_days to be set. + DailyAutomaticBackupStartTime *string `json:"dailyAutomaticBackupStartTime,omitempty" tf:"daily_automatic_backup_start_time,omitempty"` + + // - The filesystem deployment type. Supports MULTI_AZ_1, SINGLE_AZ_1, and SINGLE_AZ_2. + DeploymentType *string `json:"deploymentType,omitempty" tf:"deployment_type,omitempty"` + + // The SSD IOPS configuration for the Amazon FSx for NetApp ONTAP file system. See Disk Iops Configuration below. + DiskIopsConfiguration *DiskIopsConfigurationObservation `json:"diskIopsConfiguration,omitempty" tf:"disk_iops_configuration,omitempty"` + + // Specifies the IP address range in which the endpoints to access your file system will be created. By default, Amazon FSx selects an unused IP address range for you from the 198.19.* range. + EndpointIPAddressRange *string `json:"endpointIpAddressRange,omitempty" tf:"endpoint_ip_address_range,omitempty"` + + // The endpoints that are used to access data or to manage the file system using the NetApp ONTAP CLI, REST API, or NetApp SnapMirror. See Endpoints below. + Endpoints []EndpointsObservation `json:"endpoints,omitempty" tf:"endpoints,omitempty"` + + // - The number of ha_pairs to deploy for the file system. Valid values are 1 through 12. Value of 2 or greater required for SINGLE_AZ_2. Only value of 1 is supported with SINGLE_AZ_1 or MULTI_AZ_1 but not required. + HaPairs *float64 `json:"haPairs,omitempty" tf:"ha_pairs,omitempty"` + + // Identifier of the file system, e.g., fs-12345678 + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ARN for the KMS Key to encrypt the file system at rest, Defaults to an AWS managed KMS Key. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Set of Elastic Network Interface identifiers from which the file system is accessible The first network interface returned is the primary network interface. + NetworkInterfaceIds []*string `json:"networkInterfaceIds,omitempty" tf:"network_interface_ids,omitempty"` + + // AWS account identifier that created the file system. + OwnerID *string `json:"ownerId,omitempty" tf:"owner_id,omitempty"` + + // The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). + PreferredSubnetID *string `json:"preferredSubnetId,omitempty" tf:"preferred_subnet_id,omitempty"` + + // Specifies the VPC route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table. + // +listType=set + RouteTableIds []*string `json:"routeTableIds,omitempty" tf:"route_table_ids,omitempty"` + + // A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The storage capacity (GiB) of the file system. Valid values between 1024 and 196608 for file systems with deployment_type SINGLE_AZ_1 and MULTI_AZ_1. Valid values between 2048 (1024 per ha pair) and 1048576 for file systems with deployment_type SINGLE_AZ_2. + StorageCapacity *float64 `json:"storageCapacity,omitempty" tf:"storage_capacity,omitempty"` + + // - The filesystem storage type. defaults to SSD. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // A list of IDs for the subnets that the file system will be accessible from. Up to 2 subnets can be provided. + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Sets the throughput capacity (in MBps) for the file system that you're creating. Valid values are 128, 256, 512, 1024, 2048, and 4096. This parameter is only supported when not using the ha_pairs parameter. Either throughput_capacity or throughput_capacity_per_ha_pair must be specified. + ThroughputCapacity *float64 `json:"throughputCapacity,omitempty" tf:"throughput_capacity,omitempty"` + + // Sets the throughput capacity (in MBps) for the file system that you're creating. Valid value when using 1 ha_pair are 128, 256, 512, 1024, 2048, and 4096. Valid values when using 2 or more ha_pairs are 3072,6144. This parameter is only supported when specifying the ha_pairs parameter. Either throughput_capacity or throughput_capacity_per_ha_pair must be specified. + ThroughputCapacityPerHaPair *float64 `json:"throughputCapacityPerHaPair,omitempty" tf:"throughput_capacity_per_ha_pair,omitempty"` + + // Identifier of the Virtual Private Cloud for the file system. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // The preferred start time (in d:HH:MM format) to perform weekly maintenance, in the UTC time zone. + WeeklyMaintenanceStartTime *string `json:"weeklyMaintenanceStartTime,omitempty" tf:"weekly_maintenance_start_time,omitempty"` +} + +type OntapFileSystemParameters struct { + + // The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. + // +kubebuilder:validation:Optional + AutomaticBackupRetentionDays *float64 `json:"automaticBackupRetentionDays,omitempty" tf:"automatic_backup_retention_days,omitempty"` + + // A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. Requires automatic_backup_retention_days to be set. + // +kubebuilder:validation:Optional + DailyAutomaticBackupStartTime *string `json:"dailyAutomaticBackupStartTime,omitempty" tf:"daily_automatic_backup_start_time,omitempty"` + + // - The filesystem deployment type. Supports MULTI_AZ_1, SINGLE_AZ_1, and SINGLE_AZ_2. + // +kubebuilder:validation:Optional + DeploymentType *string `json:"deploymentType,omitempty" tf:"deployment_type,omitempty"` + + // The SSD IOPS configuration for the Amazon FSx for NetApp ONTAP file system. See Disk Iops Configuration below. + // +kubebuilder:validation:Optional + DiskIopsConfiguration *DiskIopsConfigurationParameters `json:"diskIopsConfiguration,omitempty" tf:"disk_iops_configuration,omitempty"` + + // Specifies the IP address range in which the endpoints to access your file system will be created. By default, Amazon FSx selects an unused IP address range for you from the 198.19.* range. + // +kubebuilder:validation:Optional + EndpointIPAddressRange *string `json:"endpointIpAddressRange,omitempty" tf:"endpoint_ip_address_range,omitempty"` + + // The ONTAP administrative password for the fsxadmin user that you can use to administer your file system using the ONTAP CLI and REST API. + // +kubebuilder:validation:Optional + FSXAdminPasswordSecretRef *v1.SecretKeySelector `json:"fsxAdminPasswordSecretRef,omitempty" tf:"-"` + + // - The number of ha_pairs to deploy for the file system. Valid values are 1 through 12. Value of 2 or greater required for SINGLE_AZ_2. Only value of 1 is supported with SINGLE_AZ_1 or MULTI_AZ_1 but not required. + // +kubebuilder:validation:Optional + HaPairs *float64 `json:"haPairs,omitempty" tf:"ha_pairs,omitempty"` + + // ARN for the KMS Key to encrypt the file system at rest, Defaults to an AWS managed KMS Key. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + PreferredSubnetID *string `json:"preferredSubnetId,omitempty" tf:"preferred_subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate preferredSubnetId. + // +kubebuilder:validation:Optional + PreferredSubnetIDRef *v1.Reference `json:"preferredSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate preferredSubnetId. + // +kubebuilder:validation:Optional + PreferredSubnetIDSelector *v1.Selector `json:"preferredSubnetIdSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Specifies the VPC route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table. + // +kubebuilder:validation:Optional + // +listType=set + RouteTableIds []*string `json:"routeTableIds,omitempty" tf:"route_table_ids,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The storage capacity (GiB) of the file system. Valid values between 1024 and 196608 for file systems with deployment_type SINGLE_AZ_1 and MULTI_AZ_1. Valid values between 2048 (1024 per ha pair) and 1048576 for file systems with deployment_type SINGLE_AZ_2. + // +kubebuilder:validation:Optional + StorageCapacity *float64 `json:"storageCapacity,omitempty" tf:"storage_capacity,omitempty"` + + // - The filesystem storage type. defaults to SSD. + // +kubebuilder:validation:Optional + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A list of IDs for the subnets that the file system will be accessible from. Up to 2 subnets can be provided. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +kubebuilder:validation:Optional + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Sets the throughput capacity (in MBps) for the file system that you're creating. Valid values are 128, 256, 512, 1024, 2048, and 4096. This parameter is only supported when not using the ha_pairs parameter. Either throughput_capacity or throughput_capacity_per_ha_pair must be specified. + // +kubebuilder:validation:Optional + ThroughputCapacity *float64 `json:"throughputCapacity,omitempty" tf:"throughput_capacity,omitempty"` + + // Sets the throughput capacity (in MBps) for the file system that you're creating. Valid value when using 1 ha_pair are 128, 256, 512, 1024, 2048, and 4096. Valid values when using 2 or more ha_pairs are 3072,6144. This parameter is only supported when specifying the ha_pairs parameter. Either throughput_capacity or throughput_capacity_per_ha_pair must be specified. + // +kubebuilder:validation:Optional + ThroughputCapacityPerHaPair *float64 `json:"throughputCapacityPerHaPair,omitempty" tf:"throughput_capacity_per_ha_pair,omitempty"` + + // The preferred start time (in d:HH:MM format) to perform weekly maintenance, in the UTC time zone. + // +kubebuilder:validation:Optional + WeeklyMaintenanceStartTime *string `json:"weeklyMaintenanceStartTime,omitempty" tf:"weekly_maintenance_start_time,omitempty"` +} + +// OntapFileSystemSpec defines the desired state of OntapFileSystem +type OntapFileSystemSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider OntapFileSystemParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider OntapFileSystemInitParameters `json:"initProvider,omitempty"` +} + +// OntapFileSystemStatus defines the observed state of OntapFileSystem. +type OntapFileSystemStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider OntapFileSystemObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// OntapFileSystem is the Schema for the OntapFileSystems API. Manages an Amazon FSx for NetApp ONTAP file system. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type OntapFileSystem struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.deploymentType) || (has(self.initProvider) && has(self.initProvider.deploymentType))",message="spec.forProvider.deploymentType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageCapacity) || (has(self.initProvider) && has(self.initProvider.storageCapacity))",message="spec.forProvider.storageCapacity is a required parameter" + Spec OntapFileSystemSpec `json:"spec"` + Status OntapFileSystemStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OntapFileSystemList contains a list of OntapFileSystems +type OntapFileSystemList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OntapFileSystem `json:"items"` +} + +// Repository type metadata. +var ( + OntapFileSystem_Kind = "OntapFileSystem" + OntapFileSystem_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: OntapFileSystem_Kind}.String() + OntapFileSystem_KindAPIVersion = OntapFileSystem_Kind + "." + CRDGroupVersion.String() + OntapFileSystem_GroupVersionKind = CRDGroupVersion.WithKind(OntapFileSystem_Kind) +) + +func init() { + SchemeBuilder.Register(&OntapFileSystem{}, &OntapFileSystemList{}) +} diff --git a/apis/fsx/v1beta2/zz_ontapstoragevirtualmachine_terraformed.go b/apis/fsx/v1beta2/zz_ontapstoragevirtualmachine_terraformed.go new file mode 100755 index 0000000000..3dd4cc1b2c --- /dev/null +++ b/apis/fsx/v1beta2/zz_ontapstoragevirtualmachine_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this OntapStorageVirtualMachine +func (mg *OntapStorageVirtualMachine) GetTerraformResourceType() string { + return "aws_fsx_ontap_storage_virtual_machine" +} + +// GetConnectionDetailsMapping for this OntapStorageVirtualMachine +func (tr *OntapStorageVirtualMachine) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"active_directory_configuration[*].self_managed_active_directory_configuration[*].password": "activeDirectoryConfiguration[*].selfManagedActiveDirectoryConfiguration[*].passwordSecretRef", "svm_admin_password": "svmAdminPasswordSecretRef"} +} + +// GetObservation of this OntapStorageVirtualMachine +func (tr *OntapStorageVirtualMachine) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this OntapStorageVirtualMachine +func (tr *OntapStorageVirtualMachine) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this OntapStorageVirtualMachine +func (tr *OntapStorageVirtualMachine) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this OntapStorageVirtualMachine +func (tr *OntapStorageVirtualMachine) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this OntapStorageVirtualMachine +func (tr *OntapStorageVirtualMachine) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this OntapStorageVirtualMachine +func (tr *OntapStorageVirtualMachine) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this OntapStorageVirtualMachine +func (tr *OntapStorageVirtualMachine) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this OntapStorageVirtualMachine using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *OntapStorageVirtualMachine) LateInitialize(attrs []byte) (bool, error) { + params := &OntapStorageVirtualMachineParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *OntapStorageVirtualMachine) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/fsx/v1beta2/zz_ontapstoragevirtualmachine_types.go b/apis/fsx/v1beta2/zz_ontapstoragevirtualmachine_types.go new file mode 100755 index 0000000000..4ffa89645a --- /dev/null +++ b/apis/fsx/v1beta2/zz_ontapstoragevirtualmachine_types.go @@ -0,0 +1,369 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActiveDirectoryConfigurationInitParameters struct { + + // The NetBIOS name of the Active Directory computer object that will be created for your SVM. This is often the same as the SVM name but can be different. AWS limits to 15 characters because of standard NetBIOS naming limits. + NetbiosName *string `json:"netbiosName,omitempty" tf:"netbios_name,omitempty"` + + // Configuration block that Amazon FSx uses to join the FSx ONTAP Storage Virtual Machine(SVM) to your Microsoft Active Directory (AD) directory. Detailed below. + SelfManagedActiveDirectoryConfiguration *SelfManagedActiveDirectoryConfigurationInitParameters `json:"selfManagedActiveDirectoryConfiguration,omitempty" tf:"self_managed_active_directory_configuration,omitempty"` +} + +type ActiveDirectoryConfigurationObservation struct { + + // The NetBIOS name of the Active Directory computer object that will be created for your SVM. This is often the same as the SVM name but can be different. AWS limits to 15 characters because of standard NetBIOS naming limits. + NetbiosName *string `json:"netbiosName,omitempty" tf:"netbios_name,omitempty"` + + // Configuration block that Amazon FSx uses to join the FSx ONTAP Storage Virtual Machine(SVM) to your Microsoft Active Directory (AD) directory. Detailed below. + SelfManagedActiveDirectoryConfiguration *SelfManagedActiveDirectoryConfigurationObservation `json:"selfManagedActiveDirectoryConfiguration,omitempty" tf:"self_managed_active_directory_configuration,omitempty"` +} + +type ActiveDirectoryConfigurationParameters struct { + + // The NetBIOS name of the Active Directory computer object that will be created for your SVM. This is often the same as the SVM name but can be different. AWS limits to 15 characters because of standard NetBIOS naming limits. + // +kubebuilder:validation:Optional + NetbiosName *string `json:"netbiosName,omitempty" tf:"netbios_name,omitempty"` + + // Configuration block that Amazon FSx uses to join the FSx ONTAP Storage Virtual Machine(SVM) to your Microsoft Active Directory (AD) directory. Detailed below. + // +kubebuilder:validation:Optional + SelfManagedActiveDirectoryConfiguration *SelfManagedActiveDirectoryConfigurationParameters `json:"selfManagedActiveDirectoryConfiguration,omitempty" tf:"self_managed_active_directory_configuration,omitempty"` +} + +type EndpointsManagementInitParameters struct { +} + +type EndpointsManagementObservation struct { + + // The Domain Name Service (DNS) name for the storage virtual machine. You can mount your storage virtual machine using its DNS name. + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // IP addresses of the storage virtual machine endpoint. + // +listType=set + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` +} + +type EndpointsManagementParameters struct { +} + +type ISCSIInitParameters struct { +} + +type ISCSIObservation struct { + + // The Domain Name Service (DNS) name for the storage virtual machine. You can mount your storage virtual machine using its DNS name. + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // IP addresses of the storage virtual machine endpoint. + // +listType=set + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` +} + +type ISCSIParameters struct { +} + +type NFSInitParameters struct { +} + +type NFSObservation struct { + + // The Domain Name Service (DNS) name for the storage virtual machine. You can mount your storage virtual machine using its DNS name. + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // IP addresses of the storage virtual machine endpoint. + // +listType=set + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` +} + +type NFSParameters struct { +} + +type OntapStorageVirtualMachineEndpointsInitParameters struct { +} + +type OntapStorageVirtualMachineEndpointsObservation struct { + + // An endpoint for accessing data on your storage virtual machine via iSCSI protocol. See Endpoint. + ISCSI []ISCSIObservation `json:"iscsi,omitempty" tf:"iscsi,omitempty"` + + // An endpoint for managing your file system using the NetApp ONTAP CLI and NetApp ONTAP API. See Endpoint. + Management []EndpointsManagementObservation `json:"management,omitempty" tf:"management,omitempty"` + + // An endpoint for accessing data on your storage virtual machine via NFS protocol. See Endpoint. + NFS []NFSObservation `json:"nfs,omitempty" tf:"nfs,omitempty"` + + // An endpoint for accessing data on your storage virtual machine via SMB protocol. This is only set if an active_directory_configuration has been set. See Endpoint. + SMB []SMBObservation `json:"smb,omitempty" tf:"smb,omitempty"` +} + +type OntapStorageVirtualMachineEndpointsParameters struct { +} + +type OntapStorageVirtualMachineInitParameters struct { + + // Configuration block that Amazon FSx uses to join the FSx ONTAP Storage Virtual Machine(SVM) to your Microsoft Active Directory (AD) directory. Detailed below. + ActiveDirectoryConfiguration *ActiveDirectoryConfigurationInitParameters `json:"activeDirectoryConfiguration,omitempty" tf:"active_directory_configuration,omitempty"` + + // The ID of the Amazon FSx ONTAP File System that this SVM will be created on. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/fsx/v1beta2.OntapFileSystem + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // Reference to a OntapFileSystem in fsx to populate fileSystemId. + // +kubebuilder:validation:Optional + FileSystemIDRef *v1.Reference `json:"fileSystemIdRef,omitempty" tf:"-"` + + // Selector for a OntapFileSystem in fsx to populate fileSystemId. + // +kubebuilder:validation:Optional + FileSystemIDSelector *v1.Selector `json:"fileSystemIdSelector,omitempty" tf:"-"` + + // The name of the SVM. You can use a maximum of 47 alphanumeric characters, plus the underscore (_) special character. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the root volume security style, Valid values are UNIX, NTFS, and MIXED. All volumes created under this SVM will inherit the root security style unless the security style is specified on the volume. Default value is UNIX. + RootVolumeSecurityStyle *string `json:"rootVolumeSecurityStyle,omitempty" tf:"root_volume_security_style,omitempty"` + + SvmAdminPasswordSecretRef *v1.SecretKeySelector `json:"svmAdminPasswordSecretRef,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type OntapStorageVirtualMachineObservation struct { + + // Configuration block that Amazon FSx uses to join the FSx ONTAP Storage Virtual Machine(SVM) to your Microsoft Active Directory (AD) directory. Detailed below. + ActiveDirectoryConfiguration *ActiveDirectoryConfigurationObservation `json:"activeDirectoryConfiguration,omitempty" tf:"active_directory_configuration,omitempty"` + + // Amazon Resource Name of the storage virtual machine. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The endpoints that are used to access data or to manage the storage virtual machine using the NetApp ONTAP CLI, REST API, or NetApp SnapMirror. See Endpoints below. + Endpoints []OntapStorageVirtualMachineEndpointsObservation `json:"endpoints,omitempty" tf:"endpoints,omitempty"` + + // The ID of the Amazon FSx ONTAP File System that this SVM will be created on. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // Identifier of the storage virtual machine, e.g., svm-12345678 + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the SVM. You can use a maximum of 47 alphanumeric characters, plus the underscore (_) special character. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the root volume security style, Valid values are UNIX, NTFS, and MIXED. All volumes created under this SVM will inherit the root security style unless the security style is specified on the volume. Default value is UNIX. + RootVolumeSecurityStyle *string `json:"rootVolumeSecurityStyle,omitempty" tf:"root_volume_security_style,omitempty"` + + // Describes the SVM's subtype, e.g. DEFAULT + Subtype *string `json:"subtype,omitempty" tf:"subtype,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The SVM's UUID (universally unique identifier). + UUID *string `json:"uuid,omitempty" tf:"uuid,omitempty"` +} + +type OntapStorageVirtualMachineParameters struct { + + // Configuration block that Amazon FSx uses to join the FSx ONTAP Storage Virtual Machine(SVM) to your Microsoft Active Directory (AD) directory. Detailed below. + // +kubebuilder:validation:Optional + ActiveDirectoryConfiguration *ActiveDirectoryConfigurationParameters `json:"activeDirectoryConfiguration,omitempty" tf:"active_directory_configuration,omitempty"` + + // The ID of the Amazon FSx ONTAP File System that this SVM will be created on. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/fsx/v1beta2.OntapFileSystem + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // Reference to a OntapFileSystem in fsx to populate fileSystemId. + // +kubebuilder:validation:Optional + FileSystemIDRef *v1.Reference `json:"fileSystemIdRef,omitempty" tf:"-"` + + // Selector for a OntapFileSystem in fsx to populate fileSystemId. + // +kubebuilder:validation:Optional + FileSystemIDSelector *v1.Selector `json:"fileSystemIdSelector,omitempty" tf:"-"` + + // The name of the SVM. You can use a maximum of 47 alphanumeric characters, plus the underscore (_) special character. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Specifies the root volume security style, Valid values are UNIX, NTFS, and MIXED. All volumes created under this SVM will inherit the root security style unless the security style is specified on the volume. Default value is UNIX. + // +kubebuilder:validation:Optional + RootVolumeSecurityStyle *string `json:"rootVolumeSecurityStyle,omitempty" tf:"root_volume_security_style,omitempty"` + + // +kubebuilder:validation:Optional + SvmAdminPasswordSecretRef *v1.SecretKeySelector `json:"svmAdminPasswordSecretRef,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SMBInitParameters struct { +} + +type SMBObservation struct { + + // The Domain Name Service (DNS) name for the storage virtual machine. You can mount your storage virtual machine using its DNS name. + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // IP addresses of the storage virtual machine endpoint. + // +listType=set + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` +} + +type SMBParameters struct { +} + +type SelfManagedActiveDirectoryConfigurationInitParameters struct { + + // A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory. + // +listType=set + DNSIps []*string `json:"dnsIps,omitempty" tf:"dns_ips,omitempty"` + + // The fully qualified domain name of the self-managed AD directory. For example, corp.example.com. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The name of the domain group whose members are granted administrative privileges for the SVM. The group that you specify must already exist in your domain. Defaults to Domain Admins. + FileSystemAdministratorsGroup *string `json:"fileSystemAdministratorsGroup,omitempty" tf:"file_system_administrators_group,omitempty"` + + // The fully qualified distinguished name of the organizational unit within your self-managed AD directory that the Windows File Server instance will join. For example, OU=FSx,DC=yourdomain,DC=corp,DC=com. Only accepts OU as the direct parent of the SVM. If none is provided, the SVM is created in the default location of your self-managed AD directory. To learn more, see RFC 2253. + OrganizationalUnitDistinguishedName *string `json:"organizationalUnitDistinguishedName,omitempty" tf:"organizational_unit_distinguished_name,omitempty"` + + // The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SelfManagedActiveDirectoryConfigurationObservation struct { + + // A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory. + // +listType=set + DNSIps []*string `json:"dnsIps,omitempty" tf:"dns_ips,omitempty"` + + // The fully qualified domain name of the self-managed AD directory. For example, corp.example.com. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The name of the domain group whose members are granted administrative privileges for the SVM. The group that you specify must already exist in your domain. Defaults to Domain Admins. + FileSystemAdministratorsGroup *string `json:"fileSystemAdministratorsGroup,omitempty" tf:"file_system_administrators_group,omitempty"` + + // The fully qualified distinguished name of the organizational unit within your self-managed AD directory that the Windows File Server instance will join. For example, OU=FSx,DC=yourdomain,DC=corp,DC=com. Only accepts OU as the direct parent of the SVM. If none is provided, the SVM is created in the default location of your self-managed AD directory. To learn more, see RFC 2253. + OrganizationalUnitDistinguishedName *string `json:"organizationalUnitDistinguishedName,omitempty" tf:"organizational_unit_distinguished_name,omitempty"` + + // The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SelfManagedActiveDirectoryConfigurationParameters struct { + + // A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory. + // +kubebuilder:validation:Optional + // +listType=set + DNSIps []*string `json:"dnsIps" tf:"dns_ips,omitempty"` + + // The fully qualified domain name of the self-managed AD directory. For example, corp.example.com. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName" tf:"domain_name,omitempty"` + + // The name of the domain group whose members are granted administrative privileges for the SVM. The group that you specify must already exist in your domain. Defaults to Domain Admins. + // +kubebuilder:validation:Optional + FileSystemAdministratorsGroup *string `json:"fileSystemAdministratorsGroup,omitempty" tf:"file_system_administrators_group,omitempty"` + + // The fully qualified distinguished name of the organizational unit within your self-managed AD directory that the Windows File Server instance will join. For example, OU=FSx,DC=yourdomain,DC=corp,DC=com. Only accepts OU as the direct parent of the SVM. If none is provided, the SVM is created in the default location of your self-managed AD directory. To learn more, see RFC 2253. + // +kubebuilder:validation:Optional + OrganizationalUnitDistinguishedName *string `json:"organizationalUnitDistinguishedName,omitempty" tf:"organizational_unit_distinguished_name,omitempty"` + + // The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +// OntapStorageVirtualMachineSpec defines the desired state of OntapStorageVirtualMachine +type OntapStorageVirtualMachineSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider OntapStorageVirtualMachineParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider OntapStorageVirtualMachineInitParameters `json:"initProvider,omitempty"` +} + +// OntapStorageVirtualMachineStatus defines the observed state of OntapStorageVirtualMachine. +type OntapStorageVirtualMachineStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider OntapStorageVirtualMachineObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// OntapStorageVirtualMachine is the Schema for the OntapStorageVirtualMachines API. Manages a FSx Storage Virtual Machine. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type OntapStorageVirtualMachine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec OntapStorageVirtualMachineSpec `json:"spec"` + Status OntapStorageVirtualMachineStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OntapStorageVirtualMachineList contains a list of OntapStorageVirtualMachines +type OntapStorageVirtualMachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OntapStorageVirtualMachine `json:"items"` +} + +// Repository type metadata. +var ( + OntapStorageVirtualMachine_Kind = "OntapStorageVirtualMachine" + OntapStorageVirtualMachine_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: OntapStorageVirtualMachine_Kind}.String() + OntapStorageVirtualMachine_KindAPIVersion = OntapStorageVirtualMachine_Kind + "." + CRDGroupVersion.String() + OntapStorageVirtualMachine_GroupVersionKind = CRDGroupVersion.WithKind(OntapStorageVirtualMachine_Kind) +) + +func init() { + SchemeBuilder.Register(&OntapStorageVirtualMachine{}, &OntapStorageVirtualMachineList{}) +} diff --git a/apis/fsx/v1beta2/zz_windowsfilesystem_terraformed.go b/apis/fsx/v1beta2/zz_windowsfilesystem_terraformed.go new file mode 100755 index 0000000000..f2276295de --- /dev/null +++ b/apis/fsx/v1beta2/zz_windowsfilesystem_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this WindowsFileSystem +func (mg *WindowsFileSystem) GetTerraformResourceType() string { + return "aws_fsx_windows_file_system" +} + +// GetConnectionDetailsMapping for this WindowsFileSystem +func (tr *WindowsFileSystem) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"self_managed_active_directory[*].password": "selfManagedActiveDirectory[*].passwordSecretRef"} +} + +// GetObservation of this WindowsFileSystem +func (tr *WindowsFileSystem) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this WindowsFileSystem +func (tr *WindowsFileSystem) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this WindowsFileSystem +func (tr *WindowsFileSystem) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this WindowsFileSystem +func (tr *WindowsFileSystem) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this WindowsFileSystem +func (tr *WindowsFileSystem) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this WindowsFileSystem +func (tr *WindowsFileSystem) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this WindowsFileSystem +func (tr *WindowsFileSystem) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this WindowsFileSystem using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *WindowsFileSystem) LateInitialize(attrs []byte) (bool, error) { + params := &WindowsFileSystemParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *WindowsFileSystem) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/fsx/v1beta2/zz_windowsfilesystem_types.go b/apis/fsx/v1beta2/zz_windowsfilesystem_types.go new file mode 100755 index 0000000000..65701d23b2 --- /dev/null +++ b/apis/fsx/v1beta2/zz_windowsfilesystem_types.go @@ -0,0 +1,548 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuditLogConfigurationInitParameters struct { + + // The Amazon Resource Name (ARN) for the destination of the audit logs. The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN. Can be specified when file_access_audit_log_level and file_share_access_audit_log_level are not set to DISABLED. The name of the Amazon CloudWatch Logs log group must begin with the /aws/fsx prefix. The name of the Amazon Kinesis Data Firehouse delivery stream must begin with the aws-fsx prefix. If you do not provide a destination in audit_log_destionation, Amazon FSx will create and use a log stream in the CloudWatch Logs /aws/fsx/windows log group. + AuditLogDestination *string `json:"auditLogDestination,omitempty" tf:"audit_log_destination,omitempty"` + + // Sets which attempt type is logged by Amazon FSx for file and folder accesses. Valid values are SUCCESS_ONLY, FAILURE_ONLY, SUCCESS_AND_FAILURE, and DISABLED. Default value is DISABLED. + FileAccessAuditLogLevel *string `json:"fileAccessAuditLogLevel,omitempty" tf:"file_access_audit_log_level,omitempty"` + + // Sets which attempt type is logged by Amazon FSx for file share accesses. Valid values are SUCCESS_ONLY, FAILURE_ONLY, SUCCESS_AND_FAILURE, and DISABLED. Default value is DISABLED. + FileShareAccessAuditLogLevel *string `json:"fileShareAccessAuditLogLevel,omitempty" tf:"file_share_access_audit_log_level,omitempty"` +} + +type AuditLogConfigurationObservation struct { + + // The Amazon Resource Name (ARN) for the destination of the audit logs. The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN. Can be specified when file_access_audit_log_level and file_share_access_audit_log_level are not set to DISABLED. The name of the Amazon CloudWatch Logs log group must begin with the /aws/fsx prefix. The name of the Amazon Kinesis Data Firehouse delivery stream must begin with the aws-fsx prefix. If you do not provide a destination in audit_log_destionation, Amazon FSx will create and use a log stream in the CloudWatch Logs /aws/fsx/windows log group. + AuditLogDestination *string `json:"auditLogDestination,omitempty" tf:"audit_log_destination,omitempty"` + + // Sets which attempt type is logged by Amazon FSx for file and folder accesses. Valid values are SUCCESS_ONLY, FAILURE_ONLY, SUCCESS_AND_FAILURE, and DISABLED. Default value is DISABLED. + FileAccessAuditLogLevel *string `json:"fileAccessAuditLogLevel,omitempty" tf:"file_access_audit_log_level,omitempty"` + + // Sets which attempt type is logged by Amazon FSx for file share accesses. Valid values are SUCCESS_ONLY, FAILURE_ONLY, SUCCESS_AND_FAILURE, and DISABLED. Default value is DISABLED. + FileShareAccessAuditLogLevel *string `json:"fileShareAccessAuditLogLevel,omitempty" tf:"file_share_access_audit_log_level,omitempty"` +} + +type AuditLogConfigurationParameters struct { + + // The Amazon Resource Name (ARN) for the destination of the audit logs. The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN. Can be specified when file_access_audit_log_level and file_share_access_audit_log_level are not set to DISABLED. The name of the Amazon CloudWatch Logs log group must begin with the /aws/fsx prefix. The name of the Amazon Kinesis Data Firehouse delivery stream must begin with the aws-fsx prefix. If you do not provide a destination in audit_log_destionation, Amazon FSx will create and use a log stream in the CloudWatch Logs /aws/fsx/windows log group. + // +kubebuilder:validation:Optional + AuditLogDestination *string `json:"auditLogDestination,omitempty" tf:"audit_log_destination,omitempty"` + + // Sets which attempt type is logged by Amazon FSx for file and folder accesses. Valid values are SUCCESS_ONLY, FAILURE_ONLY, SUCCESS_AND_FAILURE, and DISABLED. Default value is DISABLED. + // +kubebuilder:validation:Optional + FileAccessAuditLogLevel *string `json:"fileAccessAuditLogLevel,omitempty" tf:"file_access_audit_log_level,omitempty"` + + // Sets which attempt type is logged by Amazon FSx for file share accesses. Valid values are SUCCESS_ONLY, FAILURE_ONLY, SUCCESS_AND_FAILURE, and DISABLED. Default value is DISABLED. + // +kubebuilder:validation:Optional + FileShareAccessAuditLogLevel *string `json:"fileShareAccessAuditLogLevel,omitempty" tf:"file_share_access_audit_log_level,omitempty"` +} + +type SelfManagedActiveDirectoryInitParameters struct { + + // A list of up to two IP addresses of DNS servers or domain controllers in the self-managed AD directory. The IP addresses need to be either in the same VPC CIDR range as the file system or in the private IP version 4 (IPv4) address ranges as specified in RFC 1918. + // +listType=set + DNSIps []*string `json:"dnsIps,omitempty" tf:"dns_ips,omitempty"` + + // The fully qualified domain name of the self-managed AD directory. For example, corp.example.com. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The name of the domain group whose members are granted administrative privileges for the file system. Administrative privileges include taking ownership of files and folders, and setting audit controls (audit ACLs) on files and folders. The group that you specify must already exist in your domain. Defaults to Domain Admins. + FileSystemAdministratorsGroup *string `json:"fileSystemAdministratorsGroup,omitempty" tf:"file_system_administrators_group,omitempty"` + + // The fully qualified distinguished name of the organizational unit within your self-managed AD directory that the Windows File Server instance will join. For example, OU=FSx,DC=yourdomain,DC=corp,DC=com. Only accepts OU as the direct parent of the file system. If none is provided, the FSx file system is created in the default location of your self-managed AD directory. To learn more, see RFC 2253. + OrganizationalUnitDistinguishedName *string `json:"organizationalUnitDistinguishedName,omitempty" tf:"organizational_unit_distinguished_name,omitempty"` + + // The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SelfManagedActiveDirectoryObservation struct { + + // A list of up to two IP addresses of DNS servers or domain controllers in the self-managed AD directory. The IP addresses need to be either in the same VPC CIDR range as the file system or in the private IP version 4 (IPv4) address ranges as specified in RFC 1918. + // +listType=set + DNSIps []*string `json:"dnsIps,omitempty" tf:"dns_ips,omitempty"` + + // The fully qualified domain name of the self-managed AD directory. For example, corp.example.com. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The name of the domain group whose members are granted administrative privileges for the file system. Administrative privileges include taking ownership of files and folders, and setting audit controls (audit ACLs) on files and folders. The group that you specify must already exist in your domain. Defaults to Domain Admins. + FileSystemAdministratorsGroup *string `json:"fileSystemAdministratorsGroup,omitempty" tf:"file_system_administrators_group,omitempty"` + + // The fully qualified distinguished name of the organizational unit within your self-managed AD directory that the Windows File Server instance will join. For example, OU=FSx,DC=yourdomain,DC=corp,DC=com. Only accepts OU as the direct parent of the file system. If none is provided, the FSx file system is created in the default location of your self-managed AD directory. To learn more, see RFC 2253. + OrganizationalUnitDistinguishedName *string `json:"organizationalUnitDistinguishedName,omitempty" tf:"organizational_unit_distinguished_name,omitempty"` + + // The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SelfManagedActiveDirectoryParameters struct { + + // A list of up to two IP addresses of DNS servers or domain controllers in the self-managed AD directory. The IP addresses need to be either in the same VPC CIDR range as the file system or in the private IP version 4 (IPv4) address ranges as specified in RFC 1918. + // +kubebuilder:validation:Optional + // +listType=set + DNSIps []*string `json:"dnsIps" tf:"dns_ips,omitempty"` + + // The fully qualified domain name of the self-managed AD directory. For example, corp.example.com. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName" tf:"domain_name,omitempty"` + + // The name of the domain group whose members are granted administrative privileges for the file system. Administrative privileges include taking ownership of files and folders, and setting audit controls (audit ACLs) on files and folders. The group that you specify must already exist in your domain. Defaults to Domain Admins. + // +kubebuilder:validation:Optional + FileSystemAdministratorsGroup *string `json:"fileSystemAdministratorsGroup,omitempty" tf:"file_system_administrators_group,omitempty"` + + // The fully qualified distinguished name of the organizational unit within your self-managed AD directory that the Windows File Server instance will join. For example, OU=FSx,DC=yourdomain,DC=corp,DC=com. Only accepts OU as the direct parent of the file system. If none is provided, the FSx file system is created in the default location of your self-managed AD directory. To learn more, see RFC 2253. + // +kubebuilder:validation:Optional + OrganizationalUnitDistinguishedName *string `json:"organizationalUnitDistinguishedName,omitempty" tf:"organizational_unit_distinguished_name,omitempty"` + + // The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type WindowsFileSystemDiskIopsConfigurationInitParameters struct { + + // - The total number of SSD IOPS provisioned for the file system. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // - Specifies whether the number of IOPS for the file system is using the system. Valid values are AUTOMATIC and USER_PROVISIONED. Default value is AUTOMATIC. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type WindowsFileSystemDiskIopsConfigurationObservation struct { + + // - The total number of SSD IOPS provisioned for the file system. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // - Specifies whether the number of IOPS for the file system is using the system. Valid values are AUTOMATIC and USER_PROVISIONED. Default value is AUTOMATIC. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type WindowsFileSystemDiskIopsConfigurationParameters struct { + + // - The total number of SSD IOPS provisioned for the file system. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // - Specifies whether the number of IOPS for the file system is using the system. Valid values are AUTOMATIC and USER_PROVISIONED. Default value is AUTOMATIC. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type WindowsFileSystemInitParameters struct { + + // The ID for an existing Microsoft Active Directory instance that the file system should join when it's created. Cannot be specified with self_managed_active_directory. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ds/v1beta2.Directory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ActiveDirectoryID *string `json:"activeDirectoryId,omitempty" tf:"active_directory_id,omitempty"` + + // Reference to a Directory in ds to populate activeDirectoryId. + // +kubebuilder:validation:Optional + ActiveDirectoryIDRef *v1.Reference `json:"activeDirectoryIdRef,omitempty" tf:"-"` + + // Selector for a Directory in ds to populate activeDirectoryId. + // +kubebuilder:validation:Optional + ActiveDirectoryIDSelector *v1.Selector `json:"activeDirectoryIdSelector,omitempty" tf:"-"` + + // An array DNS alias names that you want to associate with the Amazon FSx file system. For more information, see Working with DNS Aliases + // +listType=set + Aliases []*string `json:"aliases,omitempty" tf:"aliases,omitempty"` + + // The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system. See Audit Log Configuration below. + AuditLogConfiguration *AuditLogConfigurationInitParameters `json:"auditLogConfiguration,omitempty" tf:"audit_log_configuration,omitempty"` + + // The number of days to retain automatic backups. Minimum of 0 and maximum of 90. Defaults to 7. Set to 0 to disable. + AutomaticBackupRetentionDays *float64 `json:"automaticBackupRetentionDays,omitempty" tf:"automatic_backup_retention_days,omitempty"` + + // The ID of the source backup to create the filesystem from. + BackupID *string `json:"backupId,omitempty" tf:"backup_id,omitempty"` + + // A boolean flag indicating whether tags on the file system should be copied to backups. Defaults to false. + CopyTagsToBackups *bool `json:"copyTagsToBackups,omitempty" tf:"copy_tags_to_backups,omitempty"` + + // The preferred time (in HH:MM format) to take daily automatic backups, in the UTC time zone. + DailyAutomaticBackupStartTime *string `json:"dailyAutomaticBackupStartTime,omitempty" tf:"daily_automatic_backup_start_time,omitempty"` + + // Specifies the file system deployment type, valid values are MULTI_AZ_1, SINGLE_AZ_1 and SINGLE_AZ_2. Default value is SINGLE_AZ_1. + DeploymentType *string `json:"deploymentType,omitempty" tf:"deployment_type,omitempty"` + + // The SSD IOPS configuration for the Amazon FSx for Windows File Server file system. See Disk Iops Configuration below. + DiskIopsConfiguration *WindowsFileSystemDiskIopsConfigurationInitParameters `json:"diskIopsConfiguration,omitempty" tf:"disk_iops_configuration,omitempty"` + + // ARN for the KMS Key to encrypt the file system at rest. Defaults to an AWS managed KMS Key. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Specifies the subnet in which you want the preferred file server to be located. Required for when deployment type is MULTI_AZ_1. + PreferredSubnetID *string `json:"preferredSubnetId,omitempty" tf:"preferred_subnet_id,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // Configuration block that Amazon FSx uses to join the Windows File Server instance to your self-managed (including on-premises) Microsoft Active Directory (AD) directory. Cannot be specified with active_directory_id. See Self-Managed Active Directory below. + SelfManagedActiveDirectory *SelfManagedActiveDirectoryInitParameters `json:"selfManagedActiveDirectory,omitempty" tf:"self_managed_active_directory,omitempty"` + + // When enabled, will skip the default final backup taken when the file system is deleted. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to false. + SkipFinalBackup *bool `json:"skipFinalBackup,omitempty" tf:"skip_final_backup,omitempty"` + + // Storage capacity (GiB) of the file system. Minimum of 32 and maximum of 65536. If the storage type is set to HDD the minimum value is 2000. Required when not creating filesystem for a backup. + StorageCapacity *float64 `json:"storageCapacity,omitempty" tf:"storage_capacity,omitempty"` + + // Specifies the storage type, Valid values are SSD and HDD. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types. Default value is SSD. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A list of IDs for the subnets that the file system will be accessible from. To specify more than a single subnet set deployment_type to MULTI_AZ_1. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Throughput (megabytes per second) of the file system in power of 2 increments. Minimum of 8 and maximum of 2048. + ThroughputCapacity *float64 `json:"throughputCapacity,omitempty" tf:"throughput_capacity,omitempty"` + + // The preferred start time (in d:HH:MM format) to perform weekly maintenance, in the UTC time zone. + WeeklyMaintenanceStartTime *string `json:"weeklyMaintenanceStartTime,omitempty" tf:"weekly_maintenance_start_time,omitempty"` +} + +type WindowsFileSystemObservation struct { + + // The ID for an existing Microsoft Active Directory instance that the file system should join when it's created. Cannot be specified with self_managed_active_directory. + ActiveDirectoryID *string `json:"activeDirectoryId,omitempty" tf:"active_directory_id,omitempty"` + + // An array DNS alias names that you want to associate with the Amazon FSx file system. For more information, see Working with DNS Aliases + // +listType=set + Aliases []*string `json:"aliases,omitempty" tf:"aliases,omitempty"` + + // Amazon Resource Name of the file system. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system. See Audit Log Configuration below. + AuditLogConfiguration *AuditLogConfigurationObservation `json:"auditLogConfiguration,omitempty" tf:"audit_log_configuration,omitempty"` + + // The number of days to retain automatic backups. Minimum of 0 and maximum of 90. Defaults to 7. Set to 0 to disable. + AutomaticBackupRetentionDays *float64 `json:"automaticBackupRetentionDays,omitempty" tf:"automatic_backup_retention_days,omitempty"` + + // The ID of the source backup to create the filesystem from. + BackupID *string `json:"backupId,omitempty" tf:"backup_id,omitempty"` + + // A boolean flag indicating whether tags on the file system should be copied to backups. Defaults to false. + CopyTagsToBackups *bool `json:"copyTagsToBackups,omitempty" tf:"copy_tags_to_backups,omitempty"` + + // DNS name for the file system, e.g., fs-12345678.corp.example.com (domain name matching the Active Directory domain name) + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // The preferred time (in HH:MM format) to take daily automatic backups, in the UTC time zone. + DailyAutomaticBackupStartTime *string `json:"dailyAutomaticBackupStartTime,omitempty" tf:"daily_automatic_backup_start_time,omitempty"` + + // Specifies the file system deployment type, valid values are MULTI_AZ_1, SINGLE_AZ_1 and SINGLE_AZ_2. Default value is SINGLE_AZ_1. + DeploymentType *string `json:"deploymentType,omitempty" tf:"deployment_type,omitempty"` + + // The SSD IOPS configuration for the Amazon FSx for Windows File Server file system. See Disk Iops Configuration below. + DiskIopsConfiguration *WindowsFileSystemDiskIopsConfigurationObservation `json:"diskIopsConfiguration,omitempty" tf:"disk_iops_configuration,omitempty"` + + // Identifier of the file system (e.g. fs-12345678). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ARN for the KMS Key to encrypt the file system at rest. Defaults to an AWS managed KMS Key. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Set of Elastic Network Interface identifiers from which the file system is accessible. + // +listType=set + NetworkInterfaceIds []*string `json:"networkInterfaceIds,omitempty" tf:"network_interface_ids,omitempty"` + + // AWS account identifier that created the file system. + OwnerID *string `json:"ownerId,omitempty" tf:"owner_id,omitempty"` + + // The IP address of the primary, or preferred, file server. + PreferredFileServerIP *string `json:"preferredFileServerIp,omitempty" tf:"preferred_file_server_ip,omitempty"` + + // Specifies the subnet in which you want the preferred file server to be located. Required for when deployment type is MULTI_AZ_1. + PreferredSubnetID *string `json:"preferredSubnetId,omitempty" tf:"preferred_subnet_id,omitempty"` + + // For MULTI_AZ_1 deployment types, use this endpoint when performing administrative tasks on the file system using Amazon FSx Remote PowerShell. For SINGLE_AZ_1 deployment types, this is the DNS name of the file system. + RemoteAdministrationEndpoint *string `json:"remoteAdministrationEndpoint,omitempty" tf:"remote_administration_endpoint,omitempty"` + + // A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // Configuration block that Amazon FSx uses to join the Windows File Server instance to your self-managed (including on-premises) Microsoft Active Directory (AD) directory. Cannot be specified with active_directory_id. See Self-Managed Active Directory below. + SelfManagedActiveDirectory *SelfManagedActiveDirectoryObservation `json:"selfManagedActiveDirectory,omitempty" tf:"self_managed_active_directory,omitempty"` + + // When enabled, will skip the default final backup taken when the file system is deleted. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to false. + SkipFinalBackup *bool `json:"skipFinalBackup,omitempty" tf:"skip_final_backup,omitempty"` + + // Storage capacity (GiB) of the file system. Minimum of 32 and maximum of 65536. If the storage type is set to HDD the minimum value is 2000. Required when not creating filesystem for a backup. + StorageCapacity *float64 `json:"storageCapacity,omitempty" tf:"storage_capacity,omitempty"` + + // Specifies the storage type, Valid values are SSD and HDD. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types. Default value is SSD. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // A list of IDs for the subnets that the file system will be accessible from. To specify more than a single subnet set deployment_type to MULTI_AZ_1. + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Throughput (megabytes per second) of the file system in power of 2 increments. Minimum of 8 and maximum of 2048. + ThroughputCapacity *float64 `json:"throughputCapacity,omitempty" tf:"throughput_capacity,omitempty"` + + // Identifier of the Virtual Private Cloud for the file system. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // The preferred start time (in d:HH:MM format) to perform weekly maintenance, in the UTC time zone. + WeeklyMaintenanceStartTime *string `json:"weeklyMaintenanceStartTime,omitempty" tf:"weekly_maintenance_start_time,omitempty"` +} + +type WindowsFileSystemParameters struct { + + // The ID for an existing Microsoft Active Directory instance that the file system should join when it's created. Cannot be specified with self_managed_active_directory. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ds/v1beta2.Directory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ActiveDirectoryID *string `json:"activeDirectoryId,omitempty" tf:"active_directory_id,omitempty"` + + // Reference to a Directory in ds to populate activeDirectoryId. + // +kubebuilder:validation:Optional + ActiveDirectoryIDRef *v1.Reference `json:"activeDirectoryIdRef,omitempty" tf:"-"` + + // Selector for a Directory in ds to populate activeDirectoryId. + // +kubebuilder:validation:Optional + ActiveDirectoryIDSelector *v1.Selector `json:"activeDirectoryIdSelector,omitempty" tf:"-"` + + // An array DNS alias names that you want to associate with the Amazon FSx file system. For more information, see Working with DNS Aliases + // +kubebuilder:validation:Optional + // +listType=set + Aliases []*string `json:"aliases,omitempty" tf:"aliases,omitempty"` + + // The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system. See Audit Log Configuration below. + // +kubebuilder:validation:Optional + AuditLogConfiguration *AuditLogConfigurationParameters `json:"auditLogConfiguration,omitempty" tf:"audit_log_configuration,omitempty"` + + // The number of days to retain automatic backups. Minimum of 0 and maximum of 90. Defaults to 7. Set to 0 to disable. + // +kubebuilder:validation:Optional + AutomaticBackupRetentionDays *float64 `json:"automaticBackupRetentionDays,omitempty" tf:"automatic_backup_retention_days,omitempty"` + + // The ID of the source backup to create the filesystem from. + // +kubebuilder:validation:Optional + BackupID *string `json:"backupId,omitempty" tf:"backup_id,omitempty"` + + // A boolean flag indicating whether tags on the file system should be copied to backups. Defaults to false. + // +kubebuilder:validation:Optional + CopyTagsToBackups *bool `json:"copyTagsToBackups,omitempty" tf:"copy_tags_to_backups,omitempty"` + + // The preferred time (in HH:MM format) to take daily automatic backups, in the UTC time zone. + // +kubebuilder:validation:Optional + DailyAutomaticBackupStartTime *string `json:"dailyAutomaticBackupStartTime,omitempty" tf:"daily_automatic_backup_start_time,omitempty"` + + // Specifies the file system deployment type, valid values are MULTI_AZ_1, SINGLE_AZ_1 and SINGLE_AZ_2. Default value is SINGLE_AZ_1. + // +kubebuilder:validation:Optional + DeploymentType *string `json:"deploymentType,omitempty" tf:"deployment_type,omitempty"` + + // The SSD IOPS configuration for the Amazon FSx for Windows File Server file system. See Disk Iops Configuration below. + // +kubebuilder:validation:Optional + DiskIopsConfiguration *WindowsFileSystemDiskIopsConfigurationParameters `json:"diskIopsConfiguration,omitempty" tf:"disk_iops_configuration,omitempty"` + + // ARN for the KMS Key to encrypt the file system at rest. Defaults to an AWS managed KMS Key. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Specifies the subnet in which you want the preferred file server to be located. Required for when deployment type is MULTI_AZ_1. + // +kubebuilder:validation:Optional + PreferredSubnetID *string `json:"preferredSubnetId,omitempty" tf:"preferred_subnet_id,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // Configuration block that Amazon FSx uses to join the Windows File Server instance to your self-managed (including on-premises) Microsoft Active Directory (AD) directory. Cannot be specified with active_directory_id. See Self-Managed Active Directory below. + // +kubebuilder:validation:Optional + SelfManagedActiveDirectory *SelfManagedActiveDirectoryParameters `json:"selfManagedActiveDirectory,omitempty" tf:"self_managed_active_directory,omitempty"` + + // When enabled, will skip the default final backup taken when the file system is deleted. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to false. + // +kubebuilder:validation:Optional + SkipFinalBackup *bool `json:"skipFinalBackup,omitempty" tf:"skip_final_backup,omitempty"` + + // Storage capacity (GiB) of the file system. Minimum of 32 and maximum of 65536. If the storage type is set to HDD the minimum value is 2000. Required when not creating filesystem for a backup. + // +kubebuilder:validation:Optional + StorageCapacity *float64 `json:"storageCapacity,omitempty" tf:"storage_capacity,omitempty"` + + // Specifies the storage type, Valid values are SSD and HDD. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types. Default value is SSD. + // +kubebuilder:validation:Optional + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A list of IDs for the subnets that the file system will be accessible from. To specify more than a single subnet set deployment_type to MULTI_AZ_1. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +kubebuilder:validation:Optional + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Throughput (megabytes per second) of the file system in power of 2 increments. Minimum of 8 and maximum of 2048. + // +kubebuilder:validation:Optional + ThroughputCapacity *float64 `json:"throughputCapacity,omitempty" tf:"throughput_capacity,omitempty"` + + // The preferred start time (in d:HH:MM format) to perform weekly maintenance, in the UTC time zone. + // +kubebuilder:validation:Optional + WeeklyMaintenanceStartTime *string `json:"weeklyMaintenanceStartTime,omitempty" tf:"weekly_maintenance_start_time,omitempty"` +} + +// WindowsFileSystemSpec defines the desired state of WindowsFileSystem +type WindowsFileSystemSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WindowsFileSystemParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WindowsFileSystemInitParameters `json:"initProvider,omitempty"` +} + +// WindowsFileSystemStatus defines the observed state of WindowsFileSystem. +type WindowsFileSystemStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WindowsFileSystemObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// WindowsFileSystem is the Schema for the WindowsFileSystems API. Manages a FSx Windows File System. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type WindowsFileSystem struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.throughputCapacity) || (has(self.initProvider) && has(self.initProvider.throughputCapacity))",message="spec.forProvider.throughputCapacity is a required parameter" + Spec WindowsFileSystemSpec `json:"spec"` + Status WindowsFileSystemStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WindowsFileSystemList contains a list of WindowsFileSystems +type WindowsFileSystemList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []WindowsFileSystem `json:"items"` +} + +// Repository type metadata. +var ( + WindowsFileSystem_Kind = "WindowsFileSystem" + WindowsFileSystem_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: WindowsFileSystem_Kind}.String() + WindowsFileSystem_KindAPIVersion = WindowsFileSystem_Kind + "." + CRDGroupVersion.String() + WindowsFileSystem_GroupVersionKind = CRDGroupVersion.WithKind(WindowsFileSystem_Kind) +) + +func init() { + SchemeBuilder.Register(&WindowsFileSystem{}, &WindowsFileSystemList{}) +} diff --git a/apis/gamelift/v1beta1/zz_generated.conversion_hubs.go b/apis/gamelift/v1beta1/zz_generated.conversion_hubs.go index 73e9d0f45c..a58a6dda32 100755 --- a/apis/gamelift/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/gamelift/v1beta1/zz_generated.conversion_hubs.go @@ -6,17 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Alias) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Build) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Fleet) Hub() {} - // Hub marks this type as a conversion hub. func (tr *GameSessionQueue) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Script) Hub() {} diff --git a/apis/gamelift/v1beta1/zz_generated.conversion_spokes.go b/apis/gamelift/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..b6718d88cd --- /dev/null +++ b/apis/gamelift/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,94 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Alias to the hub type. +func (tr *Alias) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Alias type. +func (tr *Alias) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Build to the hub type. +func (tr *Build) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Build type. +func (tr *Build) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Fleet to the hub type. +func (tr *Fleet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Fleet type. +func (tr *Fleet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Script to the hub type. +func (tr *Script) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Script type. +func (tr *Script) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/gamelift/v1beta2/zz_alias_terraformed.go b/apis/gamelift/v1beta2/zz_alias_terraformed.go new file mode 100755 index 0000000000..b3d7b6663f --- /dev/null +++ b/apis/gamelift/v1beta2/zz_alias_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Alias +func (mg *Alias) GetTerraformResourceType() string { + return "aws_gamelift_alias" +} + +// GetConnectionDetailsMapping for this Alias +func (tr *Alias) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Alias +func (tr *Alias) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Alias +func (tr *Alias) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Alias +func (tr *Alias) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Alias +func (tr *Alias) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Alias +func (tr *Alias) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Alias +func (tr *Alias) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Alias +func (tr *Alias) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Alias using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Alias) LateInitialize(attrs []byte) (bool, error) { + params := &AliasParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Alias) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/gamelift/v1beta2/zz_alias_types.go b/apis/gamelift/v1beta2/zz_alias_types.go new file mode 100755 index 0000000000..60daab45e0 --- /dev/null +++ b/apis/gamelift/v1beta2/zz_alias_types.go @@ -0,0 +1,182 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AliasInitParameters struct { + + // Description of the alias. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Name of the alias. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the fleet and/or routing type to use for the alias. + RoutingStrategy *RoutingStrategyInitParameters `json:"routingStrategy,omitempty" tf:"routing_strategy,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AliasObservation struct { + + // Alias ARN. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Description of the alias. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Alias ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the alias. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the fleet and/or routing type to use for the alias. + RoutingStrategy *RoutingStrategyObservation `json:"routingStrategy,omitempty" tf:"routing_strategy,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type AliasParameters struct { + + // Description of the alias. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Name of the alias. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Specifies the fleet and/or routing type to use for the alias. + // +kubebuilder:validation:Optional + RoutingStrategy *RoutingStrategyParameters `json:"routingStrategy,omitempty" tf:"routing_strategy,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type RoutingStrategyInitParameters struct { + + // ID of the GameLift Fleet to point the alias to. + FleetID *string `json:"fleetId,omitempty" tf:"fleet_id,omitempty"` + + // Message text to be used with the TERMINAL routing strategy. + Message *string `json:"message,omitempty" tf:"message,omitempty"` + + // Type of routing strategyE.g., SIMPLE or TERMINAL + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RoutingStrategyObservation struct { + + // ID of the GameLift Fleet to point the alias to. + FleetID *string `json:"fleetId,omitempty" tf:"fleet_id,omitempty"` + + // Message text to be used with the TERMINAL routing strategy. + Message *string `json:"message,omitempty" tf:"message,omitempty"` + + // Type of routing strategyE.g., SIMPLE or TERMINAL + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RoutingStrategyParameters struct { + + // ID of the GameLift Fleet to point the alias to. + // +kubebuilder:validation:Optional + FleetID *string `json:"fleetId,omitempty" tf:"fleet_id,omitempty"` + + // Message text to be used with the TERMINAL routing strategy. + // +kubebuilder:validation:Optional + Message *string `json:"message,omitempty" tf:"message,omitempty"` + + // Type of routing strategyE.g., SIMPLE or TERMINAL + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// AliasSpec defines the desired state of Alias +type AliasSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AliasParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AliasInitParameters `json:"initProvider,omitempty"` +} + +// AliasStatus defines the observed state of Alias. +type AliasStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AliasObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Alias is the Schema for the Aliass API. Provides a GameLift Alias resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Alias struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.routingStrategy) || (has(self.initProvider) && has(self.initProvider.routingStrategy))",message="spec.forProvider.routingStrategy is a required parameter" + Spec AliasSpec `json:"spec"` + Status AliasStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AliasList contains a list of Aliass +type AliasList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Alias `json:"items"` +} + +// Repository type metadata. +var ( + Alias_Kind = "Alias" + Alias_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Alias_Kind}.String() + Alias_KindAPIVersion = Alias_Kind + "." + CRDGroupVersion.String() + Alias_GroupVersionKind = CRDGroupVersion.WithKind(Alias_Kind) +) + +func init() { + SchemeBuilder.Register(&Alias{}, &AliasList{}) +} diff --git a/apis/gamelift/v1beta2/zz_build_terraformed.go b/apis/gamelift/v1beta2/zz_build_terraformed.go new file mode 100755 index 0000000000..c6079cf780 --- /dev/null +++ b/apis/gamelift/v1beta2/zz_build_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Build +func (mg *Build) GetTerraformResourceType() string { + return "aws_gamelift_build" +} + +// GetConnectionDetailsMapping for this Build +func (tr *Build) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Build +func (tr *Build) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Build +func (tr *Build) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Build +func (tr *Build) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Build +func (tr *Build) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Build +func (tr *Build) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Build +func (tr *Build) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Build +func (tr *Build) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Build using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Build) LateInitialize(attrs []byte) (bool, error) { + params := &BuildParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Build) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/gamelift/v1beta2/zz_build_types.go b/apis/gamelift/v1beta2/zz_build_types.go new file mode 100755 index 0000000000..46c3831b4d --- /dev/null +++ b/apis/gamelift/v1beta2/zz_build_types.go @@ -0,0 +1,261 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BuildInitParameters struct { + + // Name of the build + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Operating system that the game server binaries are built to run on. Valid values: WINDOWS_2012, AMAZON_LINUX, AMAZON_LINUX_2, WINDOWS_2016, AMAZON_LINUX_2023. + OperatingSystem *string `json:"operatingSystem,omitempty" tf:"operating_system,omitempty"` + + // Information indicating where your game build files are stored. See below. + StorageLocation *StorageLocationInitParameters `json:"storageLocation,omitempty" tf:"storage_location,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Version that is associated with this build. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type BuildObservation struct { + + // GameLift Build ARN. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // GameLift Build ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the build + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Operating system that the game server binaries are built to run on. Valid values: WINDOWS_2012, AMAZON_LINUX, AMAZON_LINUX_2, WINDOWS_2016, AMAZON_LINUX_2023. + OperatingSystem *string `json:"operatingSystem,omitempty" tf:"operating_system,omitempty"` + + // Information indicating where your game build files are stored. See below. + StorageLocation *StorageLocationObservation `json:"storageLocation,omitempty" tf:"storage_location,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Version that is associated with this build. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type BuildParameters struct { + + // Name of the build + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Operating system that the game server binaries are built to run on. Valid values: WINDOWS_2012, AMAZON_LINUX, AMAZON_LINUX_2, WINDOWS_2016, AMAZON_LINUX_2023. + // +kubebuilder:validation:Optional + OperatingSystem *string `json:"operatingSystem,omitempty" tf:"operating_system,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Information indicating where your game build files are stored. See below. + // +kubebuilder:validation:Optional + StorageLocation *StorageLocationParameters `json:"storageLocation,omitempty" tf:"storage_location,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Version that is associated with this build. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type StorageLocationInitParameters struct { + + // Name of your S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Name of the zip file containing your build files. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Object + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("key",false) + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Reference to a Object in s3 to populate key. + // +kubebuilder:validation:Optional + KeyRef *v1.Reference `json:"keyRef,omitempty" tf:"-"` + + // Selector for a Object in s3 to populate key. + // +kubebuilder:validation:Optional + KeySelector *v1.Selector `json:"keySelector,omitempty" tf:"-"` + + // A specific version of the file. If not set, the latest version of the file is retrieved. + ObjectVersion *string `json:"objectVersion,omitempty" tf:"object_version,omitempty"` + + // ARN of the access role that allows Amazon GameLift to access your S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type StorageLocationObservation struct { + + // Name of your S3 bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Name of the zip file containing your build files. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A specific version of the file. If not set, the latest version of the file is retrieved. + ObjectVersion *string `json:"objectVersion,omitempty" tf:"object_version,omitempty"` + + // ARN of the access role that allows Amazon GameLift to access your S3 bucket. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type StorageLocationParameters struct { + + // Name of your S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Name of the zip file containing your build files. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Object + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("key",false) + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Reference to a Object in s3 to populate key. + // +kubebuilder:validation:Optional + KeyRef *v1.Reference `json:"keyRef,omitempty" tf:"-"` + + // Selector for a Object in s3 to populate key. + // +kubebuilder:validation:Optional + KeySelector *v1.Selector `json:"keySelector,omitempty" tf:"-"` + + // A specific version of the file. If not set, the latest version of the file is retrieved. + // +kubebuilder:validation:Optional + ObjectVersion *string `json:"objectVersion,omitempty" tf:"object_version,omitempty"` + + // ARN of the access role that allows Amazon GameLift to access your S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +// BuildSpec defines the desired state of Build +type BuildSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BuildParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BuildInitParameters `json:"initProvider,omitempty"` +} + +// BuildStatus defines the observed state of Build. +type BuildStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BuildObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Build is the Schema for the Builds API. Provides a GameLift Build resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Build struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.operatingSystem) || (has(self.initProvider) && has(self.initProvider.operatingSystem))",message="spec.forProvider.operatingSystem is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageLocation) || (has(self.initProvider) && has(self.initProvider.storageLocation))",message="spec.forProvider.storageLocation is a required parameter" + Spec BuildSpec `json:"spec"` + Status BuildStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BuildList contains a list of Builds +type BuildList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Build `json:"items"` +} + +// Repository type metadata. +var ( + Build_Kind = "Build" + Build_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Build_Kind}.String() + Build_KindAPIVersion = Build_Kind + "." + CRDGroupVersion.String() + Build_GroupVersionKind = CRDGroupVersion.WithKind(Build_Kind) +) + +func init() { + SchemeBuilder.Register(&Build{}, &BuildList{}) +} diff --git a/apis/gamelift/v1beta2/zz_fleet_terraformed.go b/apis/gamelift/v1beta2/zz_fleet_terraformed.go new file mode 100755 index 0000000000..c0398408e7 --- /dev/null +++ b/apis/gamelift/v1beta2/zz_fleet_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Fleet +func (mg *Fleet) GetTerraformResourceType() string { + return "aws_gamelift_fleet" +} + +// GetConnectionDetailsMapping for this Fleet +func (tr *Fleet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Fleet +func (tr *Fleet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Fleet +func (tr *Fleet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Fleet +func (tr *Fleet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Fleet +func (tr *Fleet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Fleet +func (tr *Fleet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Fleet +func (tr *Fleet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Fleet +func (tr *Fleet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Fleet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Fleet) LateInitialize(attrs []byte) (bool, error) { + params := &FleetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Fleet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/gamelift/v1beta2/zz_fleet_types.go b/apis/gamelift/v1beta2/zz_fleet_types.go new file mode 100755 index 0000000000..b5cb4c0b37 --- /dev/null +++ b/apis/gamelift/v1beta2/zz_fleet_types.go @@ -0,0 +1,467 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CertificateConfigurationInitParameters struct { + + // Indicates whether a TLS/SSL certificate is generated for a fleet. Valid values are DISABLED and GENERATED. Default value is DISABLED. + CertificateType *string `json:"certificateType,omitempty" tf:"certificate_type,omitempty"` +} + +type CertificateConfigurationObservation struct { + + // Indicates whether a TLS/SSL certificate is generated for a fleet. Valid values are DISABLED and GENERATED. Default value is DISABLED. + CertificateType *string `json:"certificateType,omitempty" tf:"certificate_type,omitempty"` +} + +type CertificateConfigurationParameters struct { + + // Indicates whether a TLS/SSL certificate is generated for a fleet. Valid values are DISABLED and GENERATED. Default value is DISABLED. + // +kubebuilder:validation:Optional + CertificateType *string `json:"certificateType,omitempty" tf:"certificate_type,omitempty"` +} + +type EC2InboundPermissionInitParameters struct { + + // Starting value for a range of allowed port numbers. + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Range of allowed IP addresses expressed in CIDR notationE.g., 000.000.000.000/[subnet mask] or 0.0.0.0/[subnet mask]. + IPRange *string `json:"ipRange,omitempty" tf:"ip_range,omitempty"` + + // Network communication protocol used by the fleetE.g., TCP or UDP + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Ending value for a range of allowed port numbers. Port numbers are end-inclusive. This value must be higher than from_port. + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` +} + +type EC2InboundPermissionObservation struct { + + // Starting value for a range of allowed port numbers. + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // Range of allowed IP addresses expressed in CIDR notationE.g., 000.000.000.000/[subnet mask] or 0.0.0.0/[subnet mask]. + IPRange *string `json:"ipRange,omitempty" tf:"ip_range,omitempty"` + + // Network communication protocol used by the fleetE.g., TCP or UDP + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Ending value for a range of allowed port numbers. Port numbers are end-inclusive. This value must be higher than from_port. + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` +} + +type EC2InboundPermissionParameters struct { + + // Starting value for a range of allowed port numbers. + // +kubebuilder:validation:Optional + FromPort *float64 `json:"fromPort" tf:"from_port,omitempty"` + + // Range of allowed IP addresses expressed in CIDR notationE.g., 000.000.000.000/[subnet mask] or 0.0.0.0/[subnet mask]. + // +kubebuilder:validation:Optional + IPRange *string `json:"ipRange" tf:"ip_range,omitempty"` + + // Network communication protocol used by the fleetE.g., TCP or UDP + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` + + // Ending value for a range of allowed port numbers. Port numbers are end-inclusive. This value must be higher than from_port. + // +kubebuilder:validation:Optional + ToPort *float64 `json:"toPort" tf:"to_port,omitempty"` +} + +type FleetInitParameters struct { + + // ID of the GameLift Build to be deployed on the fleet. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/gamelift/v1beta2.Build + BuildID *string `json:"buildId,omitempty" tf:"build_id,omitempty"` + + // Reference to a Build in gamelift to populate buildId. + // +kubebuilder:validation:Optional + BuildIDRef *v1.Reference `json:"buildIdRef,omitempty" tf:"-"` + + // Selector for a Build in gamelift to populate buildId. + // +kubebuilder:validation:Optional + BuildIDSelector *v1.Selector `json:"buildIdSelector,omitempty" tf:"-"` + + // Prompts GameLift to generate a TLS/SSL certificate for the fleet. See certificate_configuration. + CertificateConfiguration *CertificateConfigurationInitParameters `json:"certificateConfiguration,omitempty" tf:"certificate_configuration,omitempty"` + + // Human-readable description of the fleet. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Range of IP addresses and port settings that permit inbound traffic to access server processes running on the fleet. See below. + EC2InboundPermission []EC2InboundPermissionInitParameters `json:"ec2InboundPermission,omitempty" tf:"ec2_inbound_permission,omitempty"` + + // Name of an EC2 instance typeE.g., t2.micro + EC2InstanceType *string `json:"ec2InstanceType,omitempty" tf:"ec2_instance_type,omitempty"` + + // Type of fleet. This value must be ON_DEMAND or SPOT. Defaults to ON_DEMAND. + FleetType *string `json:"fleetType,omitempty" tf:"fleet_type,omitempty"` + + // ARN of an IAM role that instances in the fleet can assume. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + InstanceRoleArn *string `json:"instanceRoleArn,omitempty" tf:"instance_role_arn,omitempty"` + + // Reference to a Role in iam to populate instanceRoleArn. + // +kubebuilder:validation:Optional + InstanceRoleArnRef *v1.Reference `json:"instanceRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate instanceRoleArn. + // +kubebuilder:validation:Optional + InstanceRoleArnSelector *v1.Selector `json:"instanceRoleArnSelector,omitempty" tf:"-"` + + // List of names of metric groups to add this fleet to. A metric group tracks metrics across all fleets in the group. Defaults to default. + MetricGroups []*string `json:"metricGroups,omitempty" tf:"metric_groups,omitempty"` + + // The name of the fleet. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Game session protection policy to apply to all instances in this fleetE.g., FullProtection. Defaults to NoProtection. + NewGameSessionProtectionPolicy *string `json:"newGameSessionProtectionPolicy,omitempty" tf:"new_game_session_protection_policy,omitempty"` + + // Policy that limits the number of game sessions an individual player can create over a span of time for this fleet. See below. + ResourceCreationLimitPolicy *ResourceCreationLimitPolicyInitParameters `json:"resourceCreationLimitPolicy,omitempty" tf:"resource_creation_limit_policy,omitempty"` + + // Instructions for launching server processes on each instance in the fleet. See below. + RuntimeConfiguration *RuntimeConfigurationInitParameters `json:"runtimeConfiguration,omitempty" tf:"runtime_configuration,omitempty"` + + // ID of the GameLift Script to be deployed on the fleet. + ScriptID *string `json:"scriptId,omitempty" tf:"script_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type FleetObservation struct { + + // Fleet ARN. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Build ARN. + BuildArn *string `json:"buildArn,omitempty" tf:"build_arn,omitempty"` + + // ID of the GameLift Build to be deployed on the fleet. + BuildID *string `json:"buildId,omitempty" tf:"build_id,omitempty"` + + // Prompts GameLift to generate a TLS/SSL certificate for the fleet. See certificate_configuration. + CertificateConfiguration *CertificateConfigurationObservation `json:"certificateConfiguration,omitempty" tf:"certificate_configuration,omitempty"` + + // Human-readable description of the fleet. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Range of IP addresses and port settings that permit inbound traffic to access server processes running on the fleet. See below. + EC2InboundPermission []EC2InboundPermissionObservation `json:"ec2InboundPermission,omitempty" tf:"ec2_inbound_permission,omitempty"` + + // Name of an EC2 instance typeE.g., t2.micro + EC2InstanceType *string `json:"ec2InstanceType,omitempty" tf:"ec2_instance_type,omitempty"` + + // Type of fleet. This value must be ON_DEMAND or SPOT. Defaults to ON_DEMAND. + FleetType *string `json:"fleetType,omitempty" tf:"fleet_type,omitempty"` + + // Fleet ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ARN of an IAM role that instances in the fleet can assume. + InstanceRoleArn *string `json:"instanceRoleArn,omitempty" tf:"instance_role_arn,omitempty"` + + LogPaths []*string `json:"logPaths,omitempty" tf:"log_paths,omitempty"` + + // List of names of metric groups to add this fleet to. A metric group tracks metrics across all fleets in the group. Defaults to default. + MetricGroups []*string `json:"metricGroups,omitempty" tf:"metric_groups,omitempty"` + + // The name of the fleet. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Game session protection policy to apply to all instances in this fleetE.g., FullProtection. Defaults to NoProtection. + NewGameSessionProtectionPolicy *string `json:"newGameSessionProtectionPolicy,omitempty" tf:"new_game_session_protection_policy,omitempty"` + + // Operating system of the fleet's computing resources. + OperatingSystem *string `json:"operatingSystem,omitempty" tf:"operating_system,omitempty"` + + // Policy that limits the number of game sessions an individual player can create over a span of time for this fleet. See below. + ResourceCreationLimitPolicy *ResourceCreationLimitPolicyObservation `json:"resourceCreationLimitPolicy,omitempty" tf:"resource_creation_limit_policy,omitempty"` + + // Instructions for launching server processes on each instance in the fleet. See below. + RuntimeConfiguration *RuntimeConfigurationObservation `json:"runtimeConfiguration,omitempty" tf:"runtime_configuration,omitempty"` + + // Script ARN. + ScriptArn *string `json:"scriptArn,omitempty" tf:"script_arn,omitempty"` + + // ID of the GameLift Script to be deployed on the fleet. + ScriptID *string `json:"scriptId,omitempty" tf:"script_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type FleetParameters struct { + + // ID of the GameLift Build to be deployed on the fleet. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/gamelift/v1beta2.Build + // +kubebuilder:validation:Optional + BuildID *string `json:"buildId,omitempty" tf:"build_id,omitempty"` + + // Reference to a Build in gamelift to populate buildId. + // +kubebuilder:validation:Optional + BuildIDRef *v1.Reference `json:"buildIdRef,omitempty" tf:"-"` + + // Selector for a Build in gamelift to populate buildId. + // +kubebuilder:validation:Optional + BuildIDSelector *v1.Selector `json:"buildIdSelector,omitempty" tf:"-"` + + // Prompts GameLift to generate a TLS/SSL certificate for the fleet. See certificate_configuration. + // +kubebuilder:validation:Optional + CertificateConfiguration *CertificateConfigurationParameters `json:"certificateConfiguration,omitempty" tf:"certificate_configuration,omitempty"` + + // Human-readable description of the fleet. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Range of IP addresses and port settings that permit inbound traffic to access server processes running on the fleet. See below. + // +kubebuilder:validation:Optional + EC2InboundPermission []EC2InboundPermissionParameters `json:"ec2InboundPermission,omitempty" tf:"ec2_inbound_permission,omitempty"` + + // Name of an EC2 instance typeE.g., t2.micro + // +kubebuilder:validation:Optional + EC2InstanceType *string `json:"ec2InstanceType,omitempty" tf:"ec2_instance_type,omitempty"` + + // Type of fleet. This value must be ON_DEMAND or SPOT. Defaults to ON_DEMAND. + // +kubebuilder:validation:Optional + FleetType *string `json:"fleetType,omitempty" tf:"fleet_type,omitempty"` + + // ARN of an IAM role that instances in the fleet can assume. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + InstanceRoleArn *string `json:"instanceRoleArn,omitempty" tf:"instance_role_arn,omitempty"` + + // Reference to a Role in iam to populate instanceRoleArn. + // +kubebuilder:validation:Optional + InstanceRoleArnRef *v1.Reference `json:"instanceRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate instanceRoleArn. + // +kubebuilder:validation:Optional + InstanceRoleArnSelector *v1.Selector `json:"instanceRoleArnSelector,omitempty" tf:"-"` + + // List of names of metric groups to add this fleet to. A metric group tracks metrics across all fleets in the group. Defaults to default. + // +kubebuilder:validation:Optional + MetricGroups []*string `json:"metricGroups,omitempty" tf:"metric_groups,omitempty"` + + // The name of the fleet. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Game session protection policy to apply to all instances in this fleetE.g., FullProtection. Defaults to NoProtection. + // +kubebuilder:validation:Optional + NewGameSessionProtectionPolicy *string `json:"newGameSessionProtectionPolicy,omitempty" tf:"new_game_session_protection_policy,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Policy that limits the number of game sessions an individual player can create over a span of time for this fleet. See below. + // +kubebuilder:validation:Optional + ResourceCreationLimitPolicy *ResourceCreationLimitPolicyParameters `json:"resourceCreationLimitPolicy,omitempty" tf:"resource_creation_limit_policy,omitempty"` + + // Instructions for launching server processes on each instance in the fleet. See below. + // +kubebuilder:validation:Optional + RuntimeConfiguration *RuntimeConfigurationParameters `json:"runtimeConfiguration,omitempty" tf:"runtime_configuration,omitempty"` + + // ID of the GameLift Script to be deployed on the fleet. + // +kubebuilder:validation:Optional + ScriptID *string `json:"scriptId,omitempty" tf:"script_id,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ResourceCreationLimitPolicyInitParameters struct { + + // Maximum number of game sessions that an individual can create during the policy period. + NewGameSessionsPerCreator *float64 `json:"newGameSessionsPerCreator,omitempty" tf:"new_game_sessions_per_creator,omitempty"` + + // Time span used in evaluating the resource creation limit policy. + PolicyPeriodInMinutes *float64 `json:"policyPeriodInMinutes,omitempty" tf:"policy_period_in_minutes,omitempty"` +} + +type ResourceCreationLimitPolicyObservation struct { + + // Maximum number of game sessions that an individual can create during the policy period. + NewGameSessionsPerCreator *float64 `json:"newGameSessionsPerCreator,omitempty" tf:"new_game_sessions_per_creator,omitempty"` + + // Time span used in evaluating the resource creation limit policy. + PolicyPeriodInMinutes *float64 `json:"policyPeriodInMinutes,omitempty" tf:"policy_period_in_minutes,omitempty"` +} + +type ResourceCreationLimitPolicyParameters struct { + + // Maximum number of game sessions that an individual can create during the policy period. + // +kubebuilder:validation:Optional + NewGameSessionsPerCreator *float64 `json:"newGameSessionsPerCreator,omitempty" tf:"new_game_sessions_per_creator,omitempty"` + + // Time span used in evaluating the resource creation limit policy. + // +kubebuilder:validation:Optional + PolicyPeriodInMinutes *float64 `json:"policyPeriodInMinutes,omitempty" tf:"policy_period_in_minutes,omitempty"` +} + +type RuntimeConfigurationInitParameters struct { + + // Maximum amount of time (in seconds) that a game session can remain in status ACTIVATING. + GameSessionActivationTimeoutSeconds *float64 `json:"gameSessionActivationTimeoutSeconds,omitempty" tf:"game_session_activation_timeout_seconds,omitempty"` + + // Maximum number of game sessions with status ACTIVATING to allow on an instance simultaneously. + MaxConcurrentGameSessionActivations *float64 `json:"maxConcurrentGameSessionActivations,omitempty" tf:"max_concurrent_game_session_activations,omitempty"` + + // Collection of server process configurations that describe which server processes to run on each instance in a fleet. See below. + ServerProcess []ServerProcessInitParameters `json:"serverProcess,omitempty" tf:"server_process,omitempty"` +} + +type RuntimeConfigurationObservation struct { + + // Maximum amount of time (in seconds) that a game session can remain in status ACTIVATING. + GameSessionActivationTimeoutSeconds *float64 `json:"gameSessionActivationTimeoutSeconds,omitempty" tf:"game_session_activation_timeout_seconds,omitempty"` + + // Maximum number of game sessions with status ACTIVATING to allow on an instance simultaneously. + MaxConcurrentGameSessionActivations *float64 `json:"maxConcurrentGameSessionActivations,omitempty" tf:"max_concurrent_game_session_activations,omitempty"` + + // Collection of server process configurations that describe which server processes to run on each instance in a fleet. See below. + ServerProcess []ServerProcessObservation `json:"serverProcess,omitempty" tf:"server_process,omitempty"` +} + +type RuntimeConfigurationParameters struct { + + // Maximum amount of time (in seconds) that a game session can remain in status ACTIVATING. + // +kubebuilder:validation:Optional + GameSessionActivationTimeoutSeconds *float64 `json:"gameSessionActivationTimeoutSeconds,omitempty" tf:"game_session_activation_timeout_seconds,omitempty"` + + // Maximum number of game sessions with status ACTIVATING to allow on an instance simultaneously. + // +kubebuilder:validation:Optional + MaxConcurrentGameSessionActivations *float64 `json:"maxConcurrentGameSessionActivations,omitempty" tf:"max_concurrent_game_session_activations,omitempty"` + + // Collection of server process configurations that describe which server processes to run on each instance in a fleet. See below. + // +kubebuilder:validation:Optional + ServerProcess []ServerProcessParameters `json:"serverProcess,omitempty" tf:"server_process,omitempty"` +} + +type ServerProcessInitParameters struct { + + // Number of server processes using this configuration to run concurrently on an instance. + ConcurrentExecutions *float64 `json:"concurrentExecutions,omitempty" tf:"concurrent_executions,omitempty"` + + // Location of the server executable in a game build. All game builds are installed on instances at the root : for Windows instances C:\game, and for Linux instances /local/game. + LaunchPath *string `json:"launchPath,omitempty" tf:"launch_path,omitempty"` + + // Optional list of parameters to pass to the server executable on launch. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type ServerProcessObservation struct { + + // Number of server processes using this configuration to run concurrently on an instance. + ConcurrentExecutions *float64 `json:"concurrentExecutions,omitempty" tf:"concurrent_executions,omitempty"` + + // Location of the server executable in a game build. All game builds are installed on instances at the root : for Windows instances C:\game, and for Linux instances /local/game. + LaunchPath *string `json:"launchPath,omitempty" tf:"launch_path,omitempty"` + + // Optional list of parameters to pass to the server executable on launch. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type ServerProcessParameters struct { + + // Number of server processes using this configuration to run concurrently on an instance. + // +kubebuilder:validation:Optional + ConcurrentExecutions *float64 `json:"concurrentExecutions" tf:"concurrent_executions,omitempty"` + + // Location of the server executable in a game build. All game builds are installed on instances at the root : for Windows instances C:\game, and for Linux instances /local/game. + // +kubebuilder:validation:Optional + LaunchPath *string `json:"launchPath" tf:"launch_path,omitempty"` + + // Optional list of parameters to pass to the server executable on launch. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +// FleetSpec defines the desired state of Fleet +type FleetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FleetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FleetInitParameters `json:"initProvider,omitempty"` +} + +// FleetStatus defines the observed state of Fleet. +type FleetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FleetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Fleet is the Schema for the Fleets API. Provides a GameLift Fleet resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws},path=fleet +type Fleet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.ec2InstanceType) || (has(self.initProvider) && has(self.initProvider.ec2InstanceType))",message="spec.forProvider.ec2InstanceType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec FleetSpec `json:"spec"` + Status FleetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FleetList contains a list of Fleets +type FleetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Fleet `json:"items"` +} + +// Repository type metadata. +var ( + Fleet_Kind = "Fleet" + Fleet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Fleet_Kind}.String() + Fleet_KindAPIVersion = Fleet_Kind + "." + CRDGroupVersion.String() + Fleet_GroupVersionKind = CRDGroupVersion.WithKind(Fleet_Kind) +) + +func init() { + SchemeBuilder.Register(&Fleet{}, &FleetList{}) +} diff --git a/apis/gamelift/v1beta2/zz_generated.conversion_hubs.go b/apis/gamelift/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..2beffdb53d --- /dev/null +++ b/apis/gamelift/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,19 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Alias) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Build) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Fleet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Script) Hub() {} diff --git a/apis/gamelift/v1beta2/zz_generated.deepcopy.go b/apis/gamelift/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..946ef903c1 --- /dev/null +++ b/apis/gamelift/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2182 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Alias) DeepCopyInto(out *Alias) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Alias. +func (in *Alias) DeepCopy() *Alias { + if in == nil { + return nil + } + out := new(Alias) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Alias) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AliasInitParameters) DeepCopyInto(out *AliasInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoutingStrategy != nil { + in, out := &in.RoutingStrategy, &out.RoutingStrategy + *out = new(RoutingStrategyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AliasInitParameters. +func (in *AliasInitParameters) DeepCopy() *AliasInitParameters { + if in == nil { + return nil + } + out := new(AliasInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AliasList) DeepCopyInto(out *AliasList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Alias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AliasList. +func (in *AliasList) DeepCopy() *AliasList { + if in == nil { + return nil + } + out := new(AliasList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AliasList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AliasObservation) DeepCopyInto(out *AliasObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoutingStrategy != nil { + in, out := &in.RoutingStrategy, &out.RoutingStrategy + *out = new(RoutingStrategyObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AliasObservation. +func (in *AliasObservation) DeepCopy() *AliasObservation { + if in == nil { + return nil + } + out := new(AliasObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AliasParameters) DeepCopyInto(out *AliasParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoutingStrategy != nil { + in, out := &in.RoutingStrategy, &out.RoutingStrategy + *out = new(RoutingStrategyParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AliasParameters. +func (in *AliasParameters) DeepCopy() *AliasParameters { + if in == nil { + return nil + } + out := new(AliasParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AliasSpec) DeepCopyInto(out *AliasSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AliasSpec. +func (in *AliasSpec) DeepCopy() *AliasSpec { + if in == nil { + return nil + } + out := new(AliasSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AliasStatus) DeepCopyInto(out *AliasStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AliasStatus. +func (in *AliasStatus) DeepCopy() *AliasStatus { + if in == nil { + return nil + } + out := new(AliasStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Build) DeepCopyInto(out *Build) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build. +func (in *Build) DeepCopy() *Build { + if in == nil { + return nil + } + out := new(Build) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Build) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildInitParameters) DeepCopyInto(out *BuildInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OperatingSystem != nil { + in, out := &in.OperatingSystem, &out.OperatingSystem + *out = new(string) + **out = **in + } + if in.StorageLocation != nil { + in, out := &in.StorageLocation, &out.StorageLocation + *out = new(StorageLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildInitParameters. +func (in *BuildInitParameters) DeepCopy() *BuildInitParameters { + if in == nil { + return nil + } + out := new(BuildInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildList) DeepCopyInto(out *BuildList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Build, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList. +func (in *BuildList) DeepCopy() *BuildList { + if in == nil { + return nil + } + out := new(BuildList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildObservation) DeepCopyInto(out *BuildObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OperatingSystem != nil { + in, out := &in.OperatingSystem, &out.OperatingSystem + *out = new(string) + **out = **in + } + if in.StorageLocation != nil { + in, out := &in.StorageLocation, &out.StorageLocation + *out = new(StorageLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildObservation. +func (in *BuildObservation) DeepCopy() *BuildObservation { + if in == nil { + return nil + } + out := new(BuildObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildParameters) DeepCopyInto(out *BuildParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OperatingSystem != nil { + in, out := &in.OperatingSystem, &out.OperatingSystem + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.StorageLocation != nil { + in, out := &in.StorageLocation, &out.StorageLocation + *out = new(StorageLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildParameters. +func (in *BuildParameters) DeepCopy() *BuildParameters { + if in == nil { + return nil + } + out := new(BuildParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildSpec) DeepCopyInto(out *BuildSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec. +func (in *BuildSpec) DeepCopy() *BuildSpec { + if in == nil { + return nil + } + out := new(BuildSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildStatus) DeepCopyInto(out *BuildStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatus. +func (in *BuildStatus) DeepCopy() *BuildStatus { + if in == nil { + return nil + } + out := new(BuildStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateConfigurationInitParameters) DeepCopyInto(out *CertificateConfigurationInitParameters) { + *out = *in + if in.CertificateType != nil { + in, out := &in.CertificateType, &out.CertificateType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateConfigurationInitParameters. +func (in *CertificateConfigurationInitParameters) DeepCopy() *CertificateConfigurationInitParameters { + if in == nil { + return nil + } + out := new(CertificateConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateConfigurationObservation) DeepCopyInto(out *CertificateConfigurationObservation) { + *out = *in + if in.CertificateType != nil { + in, out := &in.CertificateType, &out.CertificateType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateConfigurationObservation. +func (in *CertificateConfigurationObservation) DeepCopy() *CertificateConfigurationObservation { + if in == nil { + return nil + } + out := new(CertificateConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateConfigurationParameters) DeepCopyInto(out *CertificateConfigurationParameters) { + *out = *in + if in.CertificateType != nil { + in, out := &in.CertificateType, &out.CertificateType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateConfigurationParameters. +func (in *CertificateConfigurationParameters) DeepCopy() *CertificateConfigurationParameters { + if in == nil { + return nil + } + out := new(CertificateConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EC2InboundPermissionInitParameters) DeepCopyInto(out *EC2InboundPermissionInitParameters) { + *out = *in + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.IPRange != nil { + in, out := &in.IPRange, &out.IPRange + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2InboundPermissionInitParameters. +func (in *EC2InboundPermissionInitParameters) DeepCopy() *EC2InboundPermissionInitParameters { + if in == nil { + return nil + } + out := new(EC2InboundPermissionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EC2InboundPermissionObservation) DeepCopyInto(out *EC2InboundPermissionObservation) { + *out = *in + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.IPRange != nil { + in, out := &in.IPRange, &out.IPRange + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2InboundPermissionObservation. +func (in *EC2InboundPermissionObservation) DeepCopy() *EC2InboundPermissionObservation { + if in == nil { + return nil + } + out := new(EC2InboundPermissionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EC2InboundPermissionParameters) DeepCopyInto(out *EC2InboundPermissionParameters) { + *out = *in + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.IPRange != nil { + in, out := &in.IPRange, &out.IPRange + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2InboundPermissionParameters. +func (in *EC2InboundPermissionParameters) DeepCopy() *EC2InboundPermissionParameters { + if in == nil { + return nil + } + out := new(EC2InboundPermissionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Fleet) DeepCopyInto(out *Fleet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fleet. +func (in *Fleet) DeepCopy() *Fleet { + if in == nil { + return nil + } + out := new(Fleet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Fleet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FleetInitParameters) DeepCopyInto(out *FleetInitParameters) { + *out = *in + if in.BuildID != nil { + in, out := &in.BuildID, &out.BuildID + *out = new(string) + **out = **in + } + if in.BuildIDRef != nil { + in, out := &in.BuildIDRef, &out.BuildIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BuildIDSelector != nil { + in, out := &in.BuildIDSelector, &out.BuildIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CertificateConfiguration != nil { + in, out := &in.CertificateConfiguration, &out.CertificateConfiguration + *out = new(CertificateConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EC2InboundPermission != nil { + in, out := &in.EC2InboundPermission, &out.EC2InboundPermission + *out = make([]EC2InboundPermissionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EC2InstanceType != nil { + in, out := &in.EC2InstanceType, &out.EC2InstanceType + *out = new(string) + **out = **in + } + if in.FleetType != nil { + in, out := &in.FleetType, &out.FleetType + *out = new(string) + **out = **in + } + if in.InstanceRoleArn != nil { + in, out := &in.InstanceRoleArn, &out.InstanceRoleArn + *out = new(string) + **out = **in + } + if in.InstanceRoleArnRef != nil { + in, out := &in.InstanceRoleArnRef, &out.InstanceRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceRoleArnSelector != nil { + in, out := &in.InstanceRoleArnSelector, &out.InstanceRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MetricGroups != nil { + in, out := &in.MetricGroups, &out.MetricGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NewGameSessionProtectionPolicy != nil { + in, out := &in.NewGameSessionProtectionPolicy, &out.NewGameSessionProtectionPolicy + *out = new(string) + **out = **in + } + if in.ResourceCreationLimitPolicy != nil { + in, out := &in.ResourceCreationLimitPolicy, &out.ResourceCreationLimitPolicy + *out = new(ResourceCreationLimitPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeConfiguration != nil { + in, out := &in.RuntimeConfiguration, &out.RuntimeConfiguration + *out = new(RuntimeConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ScriptID != nil { + in, out := &in.ScriptID, &out.ScriptID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FleetInitParameters. +func (in *FleetInitParameters) DeepCopy() *FleetInitParameters { + if in == nil { + return nil + } + out := new(FleetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FleetList) DeepCopyInto(out *FleetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Fleet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FleetList. +func (in *FleetList) DeepCopy() *FleetList { + if in == nil { + return nil + } + out := new(FleetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FleetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FleetObservation) DeepCopyInto(out *FleetObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.BuildArn != nil { + in, out := &in.BuildArn, &out.BuildArn + *out = new(string) + **out = **in + } + if in.BuildID != nil { + in, out := &in.BuildID, &out.BuildID + *out = new(string) + **out = **in + } + if in.CertificateConfiguration != nil { + in, out := &in.CertificateConfiguration, &out.CertificateConfiguration + *out = new(CertificateConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EC2InboundPermission != nil { + in, out := &in.EC2InboundPermission, &out.EC2InboundPermission + *out = make([]EC2InboundPermissionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EC2InstanceType != nil { + in, out := &in.EC2InstanceType, &out.EC2InstanceType + *out = new(string) + **out = **in + } + if in.FleetType != nil { + in, out := &in.FleetType, &out.FleetType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceRoleArn != nil { + in, out := &in.InstanceRoleArn, &out.InstanceRoleArn + *out = new(string) + **out = **in + } + if in.LogPaths != nil { + in, out := &in.LogPaths, &out.LogPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MetricGroups != nil { + in, out := &in.MetricGroups, &out.MetricGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NewGameSessionProtectionPolicy != nil { + in, out := &in.NewGameSessionProtectionPolicy, &out.NewGameSessionProtectionPolicy + *out = new(string) + **out = **in + } + if in.OperatingSystem != nil { + in, out := &in.OperatingSystem, &out.OperatingSystem + *out = new(string) + **out = **in + } + if in.ResourceCreationLimitPolicy != nil { + in, out := &in.ResourceCreationLimitPolicy, &out.ResourceCreationLimitPolicy + *out = new(ResourceCreationLimitPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.RuntimeConfiguration != nil { + in, out := &in.RuntimeConfiguration, &out.RuntimeConfiguration + *out = new(RuntimeConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ScriptArn != nil { + in, out := &in.ScriptArn, &out.ScriptArn + *out = new(string) + **out = **in + } + if in.ScriptID != nil { + in, out := &in.ScriptID, &out.ScriptID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FleetObservation. +func (in *FleetObservation) DeepCopy() *FleetObservation { + if in == nil { + return nil + } + out := new(FleetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FleetParameters) DeepCopyInto(out *FleetParameters) { + *out = *in + if in.BuildID != nil { + in, out := &in.BuildID, &out.BuildID + *out = new(string) + **out = **in + } + if in.BuildIDRef != nil { + in, out := &in.BuildIDRef, &out.BuildIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BuildIDSelector != nil { + in, out := &in.BuildIDSelector, &out.BuildIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CertificateConfiguration != nil { + in, out := &in.CertificateConfiguration, &out.CertificateConfiguration + *out = new(CertificateConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EC2InboundPermission != nil { + in, out := &in.EC2InboundPermission, &out.EC2InboundPermission + *out = make([]EC2InboundPermissionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EC2InstanceType != nil { + in, out := &in.EC2InstanceType, &out.EC2InstanceType + *out = new(string) + **out = **in + } + if in.FleetType != nil { + in, out := &in.FleetType, &out.FleetType + *out = new(string) + **out = **in + } + if in.InstanceRoleArn != nil { + in, out := &in.InstanceRoleArn, &out.InstanceRoleArn + *out = new(string) + **out = **in + } + if in.InstanceRoleArnRef != nil { + in, out := &in.InstanceRoleArnRef, &out.InstanceRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceRoleArnSelector != nil { + in, out := &in.InstanceRoleArnSelector, &out.InstanceRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MetricGroups != nil { + in, out := &in.MetricGroups, &out.MetricGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NewGameSessionProtectionPolicy != nil { + in, out := &in.NewGameSessionProtectionPolicy, &out.NewGameSessionProtectionPolicy + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ResourceCreationLimitPolicy != nil { + in, out := &in.ResourceCreationLimitPolicy, &out.ResourceCreationLimitPolicy + *out = new(ResourceCreationLimitPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeConfiguration != nil { + in, out := &in.RuntimeConfiguration, &out.RuntimeConfiguration + *out = new(RuntimeConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ScriptID != nil { + in, out := &in.ScriptID, &out.ScriptID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FleetParameters. +func (in *FleetParameters) DeepCopy() *FleetParameters { + if in == nil { + return nil + } + out := new(FleetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FleetSpec) DeepCopyInto(out *FleetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FleetSpec. +func (in *FleetSpec) DeepCopy() *FleetSpec { + if in == nil { + return nil + } + out := new(FleetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FleetStatus) DeepCopyInto(out *FleetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FleetStatus. +func (in *FleetStatus) DeepCopy() *FleetStatus { + if in == nil { + return nil + } + out := new(FleetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceCreationLimitPolicyInitParameters) DeepCopyInto(out *ResourceCreationLimitPolicyInitParameters) { + *out = *in + if in.NewGameSessionsPerCreator != nil { + in, out := &in.NewGameSessionsPerCreator, &out.NewGameSessionsPerCreator + *out = new(float64) + **out = **in + } + if in.PolicyPeriodInMinutes != nil { + in, out := &in.PolicyPeriodInMinutes, &out.PolicyPeriodInMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceCreationLimitPolicyInitParameters. +func (in *ResourceCreationLimitPolicyInitParameters) DeepCopy() *ResourceCreationLimitPolicyInitParameters { + if in == nil { + return nil + } + out := new(ResourceCreationLimitPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceCreationLimitPolicyObservation) DeepCopyInto(out *ResourceCreationLimitPolicyObservation) { + *out = *in + if in.NewGameSessionsPerCreator != nil { + in, out := &in.NewGameSessionsPerCreator, &out.NewGameSessionsPerCreator + *out = new(float64) + **out = **in + } + if in.PolicyPeriodInMinutes != nil { + in, out := &in.PolicyPeriodInMinutes, &out.PolicyPeriodInMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceCreationLimitPolicyObservation. +func (in *ResourceCreationLimitPolicyObservation) DeepCopy() *ResourceCreationLimitPolicyObservation { + if in == nil { + return nil + } + out := new(ResourceCreationLimitPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceCreationLimitPolicyParameters) DeepCopyInto(out *ResourceCreationLimitPolicyParameters) { + *out = *in + if in.NewGameSessionsPerCreator != nil { + in, out := &in.NewGameSessionsPerCreator, &out.NewGameSessionsPerCreator + *out = new(float64) + **out = **in + } + if in.PolicyPeriodInMinutes != nil { + in, out := &in.PolicyPeriodInMinutes, &out.PolicyPeriodInMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceCreationLimitPolicyParameters. +func (in *ResourceCreationLimitPolicyParameters) DeepCopy() *ResourceCreationLimitPolicyParameters { + if in == nil { + return nil + } + out := new(ResourceCreationLimitPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingStrategyInitParameters) DeepCopyInto(out *RoutingStrategyInitParameters) { + *out = *in + if in.FleetID != nil { + in, out := &in.FleetID, &out.FleetID + *out = new(string) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingStrategyInitParameters. +func (in *RoutingStrategyInitParameters) DeepCopy() *RoutingStrategyInitParameters { + if in == nil { + return nil + } + out := new(RoutingStrategyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingStrategyObservation) DeepCopyInto(out *RoutingStrategyObservation) { + *out = *in + if in.FleetID != nil { + in, out := &in.FleetID, &out.FleetID + *out = new(string) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingStrategyObservation. +func (in *RoutingStrategyObservation) DeepCopy() *RoutingStrategyObservation { + if in == nil { + return nil + } + out := new(RoutingStrategyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingStrategyParameters) DeepCopyInto(out *RoutingStrategyParameters) { + *out = *in + if in.FleetID != nil { + in, out := &in.FleetID, &out.FleetID + *out = new(string) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingStrategyParameters. +func (in *RoutingStrategyParameters) DeepCopy() *RoutingStrategyParameters { + if in == nil { + return nil + } + out := new(RoutingStrategyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeConfigurationInitParameters) DeepCopyInto(out *RuntimeConfigurationInitParameters) { + *out = *in + if in.GameSessionActivationTimeoutSeconds != nil { + in, out := &in.GameSessionActivationTimeoutSeconds, &out.GameSessionActivationTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.MaxConcurrentGameSessionActivations != nil { + in, out := &in.MaxConcurrentGameSessionActivations, &out.MaxConcurrentGameSessionActivations + *out = new(float64) + **out = **in + } + if in.ServerProcess != nil { + in, out := &in.ServerProcess, &out.ServerProcess + *out = make([]ServerProcessInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeConfigurationInitParameters. +func (in *RuntimeConfigurationInitParameters) DeepCopy() *RuntimeConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RuntimeConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeConfigurationObservation) DeepCopyInto(out *RuntimeConfigurationObservation) { + *out = *in + if in.GameSessionActivationTimeoutSeconds != nil { + in, out := &in.GameSessionActivationTimeoutSeconds, &out.GameSessionActivationTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.MaxConcurrentGameSessionActivations != nil { + in, out := &in.MaxConcurrentGameSessionActivations, &out.MaxConcurrentGameSessionActivations + *out = new(float64) + **out = **in + } + if in.ServerProcess != nil { + in, out := &in.ServerProcess, &out.ServerProcess + *out = make([]ServerProcessObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeConfigurationObservation. +func (in *RuntimeConfigurationObservation) DeepCopy() *RuntimeConfigurationObservation { + if in == nil { + return nil + } + out := new(RuntimeConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeConfigurationParameters) DeepCopyInto(out *RuntimeConfigurationParameters) { + *out = *in + if in.GameSessionActivationTimeoutSeconds != nil { + in, out := &in.GameSessionActivationTimeoutSeconds, &out.GameSessionActivationTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.MaxConcurrentGameSessionActivations != nil { + in, out := &in.MaxConcurrentGameSessionActivations, &out.MaxConcurrentGameSessionActivations + *out = new(float64) + **out = **in + } + if in.ServerProcess != nil { + in, out := &in.ServerProcess, &out.ServerProcess + *out = make([]ServerProcessParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeConfigurationParameters. +func (in *RuntimeConfigurationParameters) DeepCopy() *RuntimeConfigurationParameters { + if in == nil { + return nil + } + out := new(RuntimeConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Script) DeepCopyInto(out *Script) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Script. +func (in *Script) DeepCopy() *Script { + if in == nil { + return nil + } + out := new(Script) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Script) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptInitParameters) DeepCopyInto(out *ScriptInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageLocation != nil { + in, out := &in.StorageLocation, &out.StorageLocation + *out = new(ScriptStorageLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.ZipFile != nil { + in, out := &in.ZipFile, &out.ZipFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptInitParameters. +func (in *ScriptInitParameters) DeepCopy() *ScriptInitParameters { + if in == nil { + return nil + } + out := new(ScriptInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptList) DeepCopyInto(out *ScriptList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Script, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptList. +func (in *ScriptList) DeepCopy() *ScriptList { + if in == nil { + return nil + } + out := new(ScriptList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScriptList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptObservation) DeepCopyInto(out *ScriptObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageLocation != nil { + in, out := &in.StorageLocation, &out.StorageLocation + *out = new(ScriptStorageLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.ZipFile != nil { + in, out := &in.ZipFile, &out.ZipFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptObservation. +func (in *ScriptObservation) DeepCopy() *ScriptObservation { + if in == nil { + return nil + } + out := new(ScriptObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptParameters) DeepCopyInto(out *ScriptParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.StorageLocation != nil { + in, out := &in.StorageLocation, &out.StorageLocation + *out = new(ScriptStorageLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.ZipFile != nil { + in, out := &in.ZipFile, &out.ZipFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptParameters. +func (in *ScriptParameters) DeepCopy() *ScriptParameters { + if in == nil { + return nil + } + out := new(ScriptParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptSpec) DeepCopyInto(out *ScriptSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptSpec. +func (in *ScriptSpec) DeepCopy() *ScriptSpec { + if in == nil { + return nil + } + out := new(ScriptSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptStatus) DeepCopyInto(out *ScriptStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptStatus. +func (in *ScriptStatus) DeepCopy() *ScriptStatus { + if in == nil { + return nil + } + out := new(ScriptStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptStorageLocationInitParameters) DeepCopyInto(out *ScriptStorageLocationInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.KeyRef != nil { + in, out := &in.KeyRef, &out.KeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeySelector != nil { + in, out := &in.KeySelector, &out.KeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ObjectVersion != nil { + in, out := &in.ObjectVersion, &out.ObjectVersion + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptStorageLocationInitParameters. +func (in *ScriptStorageLocationInitParameters) DeepCopy() *ScriptStorageLocationInitParameters { + if in == nil { + return nil + } + out := new(ScriptStorageLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptStorageLocationObservation) DeepCopyInto(out *ScriptStorageLocationObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ObjectVersion != nil { + in, out := &in.ObjectVersion, &out.ObjectVersion + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptStorageLocationObservation. +func (in *ScriptStorageLocationObservation) DeepCopy() *ScriptStorageLocationObservation { + if in == nil { + return nil + } + out := new(ScriptStorageLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptStorageLocationParameters) DeepCopyInto(out *ScriptStorageLocationParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.KeyRef != nil { + in, out := &in.KeyRef, &out.KeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeySelector != nil { + in, out := &in.KeySelector, &out.KeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ObjectVersion != nil { + in, out := &in.ObjectVersion, &out.ObjectVersion + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptStorageLocationParameters. +func (in *ScriptStorageLocationParameters) DeepCopy() *ScriptStorageLocationParameters { + if in == nil { + return nil + } + out := new(ScriptStorageLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerProcessInitParameters) DeepCopyInto(out *ServerProcessInitParameters) { + *out = *in + if in.ConcurrentExecutions != nil { + in, out := &in.ConcurrentExecutions, &out.ConcurrentExecutions + *out = new(float64) + **out = **in + } + if in.LaunchPath != nil { + in, out := &in.LaunchPath, &out.LaunchPath + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerProcessInitParameters. +func (in *ServerProcessInitParameters) DeepCopy() *ServerProcessInitParameters { + if in == nil { + return nil + } + out := new(ServerProcessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerProcessObservation) DeepCopyInto(out *ServerProcessObservation) { + *out = *in + if in.ConcurrentExecutions != nil { + in, out := &in.ConcurrentExecutions, &out.ConcurrentExecutions + *out = new(float64) + **out = **in + } + if in.LaunchPath != nil { + in, out := &in.LaunchPath, &out.LaunchPath + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerProcessObservation. +func (in *ServerProcessObservation) DeepCopy() *ServerProcessObservation { + if in == nil { + return nil + } + out := new(ServerProcessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerProcessParameters) DeepCopyInto(out *ServerProcessParameters) { + *out = *in + if in.ConcurrentExecutions != nil { + in, out := &in.ConcurrentExecutions, &out.ConcurrentExecutions + *out = new(float64) + **out = **in + } + if in.LaunchPath != nil { + in, out := &in.LaunchPath, &out.LaunchPath + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerProcessParameters. +func (in *ServerProcessParameters) DeepCopy() *ServerProcessParameters { + if in == nil { + return nil + } + out := new(ServerProcessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageLocationInitParameters) DeepCopyInto(out *StorageLocationInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.KeyRef != nil { + in, out := &in.KeyRef, &out.KeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeySelector != nil { + in, out := &in.KeySelector, &out.KeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ObjectVersion != nil { + in, out := &in.ObjectVersion, &out.ObjectVersion + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageLocationInitParameters. +func (in *StorageLocationInitParameters) DeepCopy() *StorageLocationInitParameters { + if in == nil { + return nil + } + out := new(StorageLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageLocationObservation) DeepCopyInto(out *StorageLocationObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.ObjectVersion != nil { + in, out := &in.ObjectVersion, &out.ObjectVersion + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageLocationObservation. +func (in *StorageLocationObservation) DeepCopy() *StorageLocationObservation { + if in == nil { + return nil + } + out := new(StorageLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageLocationParameters) DeepCopyInto(out *StorageLocationParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.KeyRef != nil { + in, out := &in.KeyRef, &out.KeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeySelector != nil { + in, out := &in.KeySelector, &out.KeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ObjectVersion != nil { + in, out := &in.ObjectVersion, &out.ObjectVersion + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageLocationParameters. +func (in *StorageLocationParameters) DeepCopy() *StorageLocationParameters { + if in == nil { + return nil + } + out := new(StorageLocationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/gamelift/v1beta2/zz_generated.managed.go b/apis/gamelift/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..1319c17bc9 --- /dev/null +++ b/apis/gamelift/v1beta2/zz_generated.managed.go @@ -0,0 +1,248 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Alias. +func (mg *Alias) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Alias. +func (mg *Alias) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Alias. +func (mg *Alias) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Alias. +func (mg *Alias) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Alias. +func (mg *Alias) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Alias. +func (mg *Alias) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Alias. +func (mg *Alias) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Alias. +func (mg *Alias) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Alias. +func (mg *Alias) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Alias. +func (mg *Alias) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Alias. +func (mg *Alias) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Alias. +func (mg *Alias) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Build. +func (mg *Build) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Build. +func (mg *Build) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Build. +func (mg *Build) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Build. +func (mg *Build) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Build. +func (mg *Build) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Build. +func (mg *Build) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Build. +func (mg *Build) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Build. +func (mg *Build) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Build. +func (mg *Build) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Build. +func (mg *Build) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Build. +func (mg *Build) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Build. +func (mg *Build) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Fleet. +func (mg *Fleet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Fleet. +func (mg *Fleet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Fleet. +func (mg *Fleet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Fleet. +func (mg *Fleet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Fleet. +func (mg *Fleet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Fleet. +func (mg *Fleet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Fleet. +func (mg *Fleet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Fleet. +func (mg *Fleet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Fleet. +func (mg *Fleet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Fleet. +func (mg *Fleet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Fleet. +func (mg *Fleet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Fleet. +func (mg *Fleet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Script. +func (mg *Script) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Script. +func (mg *Script) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Script. +func (mg *Script) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Script. +func (mg *Script) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Script. +func (mg *Script) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Script. +func (mg *Script) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Script. +func (mg *Script) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Script. +func (mg *Script) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Script. +func (mg *Script) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Script. +func (mg *Script) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Script. +func (mg *Script) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Script. +func (mg *Script) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/gamelift/v1beta2/zz_generated.managedlist.go b/apis/gamelift/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..6cc02cfe3b --- /dev/null +++ b/apis/gamelift/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AliasList. +func (l *AliasList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BuildList. +func (l *BuildList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FleetList. +func (l *FleetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ScriptList. +func (l *ScriptList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/gamelift/v1beta2/zz_generated.resolvers.go b/apis/gamelift/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..a8962eed00 --- /dev/null +++ b/apis/gamelift/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,386 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Build. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Build) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.StorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageLocation.Bucket), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StorageLocation.BucketRef, + Selector: mg.Spec.ForProvider.StorageLocation.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageLocation.Bucket") + } + mg.Spec.ForProvider.StorageLocation.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageLocation.BucketRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.StorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Object", "ObjectList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageLocation.Key), + Extract: resource.ExtractParamPath("key", false), + Reference: mg.Spec.ForProvider.StorageLocation.KeyRef, + Selector: mg.Spec.ForProvider.StorageLocation.KeySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageLocation.Key") + } + mg.Spec.ForProvider.StorageLocation.Key = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageLocation.KeyRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.StorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageLocation.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.StorageLocation.RoleArnRef, + Selector: mg.Spec.ForProvider.StorageLocation.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageLocation.RoleArn") + } + mg.Spec.ForProvider.StorageLocation.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageLocation.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.StorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageLocation.Bucket), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StorageLocation.BucketRef, + Selector: mg.Spec.InitProvider.StorageLocation.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageLocation.Bucket") + } + mg.Spec.InitProvider.StorageLocation.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageLocation.BucketRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.StorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Object", "ObjectList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageLocation.Key), + Extract: resource.ExtractParamPath("key", false), + Reference: mg.Spec.InitProvider.StorageLocation.KeyRef, + Selector: mg.Spec.InitProvider.StorageLocation.KeySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageLocation.Key") + } + mg.Spec.InitProvider.StorageLocation.Key = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageLocation.KeyRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.StorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageLocation.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.StorageLocation.RoleArnRef, + Selector: mg.Spec.InitProvider.StorageLocation.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageLocation.RoleArn") + } + mg.Spec.InitProvider.StorageLocation.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageLocation.RoleArnRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this Fleet. +func (mg *Fleet) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("gamelift.aws.upbound.io", "v1beta2", "Build", "BuildList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.BuildID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.BuildIDRef, + Selector: mg.Spec.ForProvider.BuildIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.BuildID") + } + mg.Spec.ForProvider.BuildID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BuildIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InstanceRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.InstanceRoleArnRef, + Selector: mg.Spec.ForProvider.InstanceRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InstanceRoleArn") + } + mg.Spec.ForProvider.InstanceRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InstanceRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("gamelift.aws.upbound.io", "v1beta2", "Build", "BuildList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.BuildID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.BuildIDRef, + Selector: mg.Spec.InitProvider.BuildIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.BuildID") + } + mg.Spec.InitProvider.BuildID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BuildIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.InstanceRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.InstanceRoleArnRef, + Selector: mg.Spec.InitProvider.InstanceRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InstanceRoleArn") + } + mg.Spec.InitProvider.InstanceRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.InstanceRoleArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Script. +func (mg *Script) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.StorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageLocation.Bucket), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StorageLocation.BucketRef, + Selector: mg.Spec.ForProvider.StorageLocation.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageLocation.Bucket") + } + mg.Spec.ForProvider.StorageLocation.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageLocation.BucketRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.StorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Object", "ObjectList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageLocation.Key), + Extract: resource.ExtractParamPath("key", false), + Reference: mg.Spec.ForProvider.StorageLocation.KeyRef, + Selector: mg.Spec.ForProvider.StorageLocation.KeySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageLocation.Key") + } + mg.Spec.ForProvider.StorageLocation.Key = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageLocation.KeyRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.StorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageLocation.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.StorageLocation.RoleArnRef, + Selector: mg.Spec.ForProvider.StorageLocation.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageLocation.RoleArn") + } + mg.Spec.ForProvider.StorageLocation.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageLocation.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.StorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageLocation.Bucket), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StorageLocation.BucketRef, + Selector: mg.Spec.InitProvider.StorageLocation.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageLocation.Bucket") + } + mg.Spec.InitProvider.StorageLocation.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageLocation.BucketRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.StorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Object", "ObjectList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageLocation.Key), + Extract: resource.ExtractParamPath("key", false), + Reference: mg.Spec.InitProvider.StorageLocation.KeyRef, + Selector: mg.Spec.InitProvider.StorageLocation.KeySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageLocation.Key") + } + mg.Spec.InitProvider.StorageLocation.Key = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageLocation.KeyRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.StorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageLocation.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.StorageLocation.RoleArnRef, + Selector: mg.Spec.InitProvider.StorageLocation.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageLocation.RoleArn") + } + mg.Spec.InitProvider.StorageLocation.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageLocation.RoleArnRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/gamelift/v1beta2/zz_groupversion_info.go b/apis/gamelift/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..0993a03c70 --- /dev/null +++ b/apis/gamelift/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=gamelift.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "gamelift.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/gamelift/v1beta2/zz_script_terraformed.go b/apis/gamelift/v1beta2/zz_script_terraformed.go new file mode 100755 index 0000000000..322486adfe --- /dev/null +++ b/apis/gamelift/v1beta2/zz_script_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Script +func (mg *Script) GetTerraformResourceType() string { + return "aws_gamelift_script" +} + +// GetConnectionDetailsMapping for this Script +func (tr *Script) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Script +func (tr *Script) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Script +func (tr *Script) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Script +func (tr *Script) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Script +func (tr *Script) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Script +func (tr *Script) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Script +func (tr *Script) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Script +func (tr *Script) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Script using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Script) LateInitialize(attrs []byte) (bool, error) { + params := &ScriptParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Script) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/gamelift/v1beta2/zz_script_types.go b/apis/gamelift/v1beta2/zz_script_types.go new file mode 100755 index 0000000000..92f6deddd4 --- /dev/null +++ b/apis/gamelift/v1beta2/zz_script_types.go @@ -0,0 +1,259 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ScriptInitParameters struct { + + // Name of the script + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Information indicating where your game script files are stored. See below. + StorageLocation *ScriptStorageLocationInitParameters `json:"storageLocation,omitempty" tf:"storage_location,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Version that is associated with this script. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // A data object containing your Realtime scripts and dependencies as a zip file. The zip file can have one or multiple files. Maximum size of a zip file is 5 MB. + ZipFile *string `json:"zipFile,omitempty" tf:"zip_file,omitempty"` +} + +type ScriptObservation struct { + + // GameLift Script ARN. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // GameLift Script ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the script + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Information indicating where your game script files are stored. See below. + StorageLocation *ScriptStorageLocationObservation `json:"storageLocation,omitempty" tf:"storage_location,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Version that is associated with this script. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // A data object containing your Realtime scripts and dependencies as a zip file. The zip file can have one or multiple files. Maximum size of a zip file is 5 MB. + ZipFile *string `json:"zipFile,omitempty" tf:"zip_file,omitempty"` +} + +type ScriptParameters struct { + + // Name of the script + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Information indicating where your game script files are stored. See below. + // +kubebuilder:validation:Optional + StorageLocation *ScriptStorageLocationParameters `json:"storageLocation,omitempty" tf:"storage_location,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Version that is associated with this script. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // A data object containing your Realtime scripts and dependencies as a zip file. The zip file can have one or multiple files. Maximum size of a zip file is 5 MB. + // +kubebuilder:validation:Optional + ZipFile *string `json:"zipFile,omitempty" tf:"zip_file,omitempty"` +} + +type ScriptStorageLocationInitParameters struct { + + // Name of your S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Name of the zip file containing your script files. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Object + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("key",false) + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Reference to a Object in s3 to populate key. + // +kubebuilder:validation:Optional + KeyRef *v1.Reference `json:"keyRef,omitempty" tf:"-"` + + // Selector for a Object in s3 to populate key. + // +kubebuilder:validation:Optional + KeySelector *v1.Selector `json:"keySelector,omitempty" tf:"-"` + + // A specific version of the file. If not set, the latest version of the file is retrieved. + ObjectVersion *string `json:"objectVersion,omitempty" tf:"object_version,omitempty"` + + // ARN of the access role that allows Amazon GameLift to access your S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type ScriptStorageLocationObservation struct { + + // Name of your S3 bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Name of the zip file containing your script files. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A specific version of the file. If not set, the latest version of the file is retrieved. + ObjectVersion *string `json:"objectVersion,omitempty" tf:"object_version,omitempty"` + + // ARN of the access role that allows Amazon GameLift to access your S3 bucket. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type ScriptStorageLocationParameters struct { + + // Name of your S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Name of the zip file containing your script files. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Object + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("key",false) + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Reference to a Object in s3 to populate key. + // +kubebuilder:validation:Optional + KeyRef *v1.Reference `json:"keyRef,omitempty" tf:"-"` + + // Selector for a Object in s3 to populate key. + // +kubebuilder:validation:Optional + KeySelector *v1.Selector `json:"keySelector,omitempty" tf:"-"` + + // A specific version of the file. If not set, the latest version of the file is retrieved. + // +kubebuilder:validation:Optional + ObjectVersion *string `json:"objectVersion,omitempty" tf:"object_version,omitempty"` + + // ARN of the access role that allows Amazon GameLift to access your S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +// ScriptSpec defines the desired state of Script +type ScriptSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ScriptParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ScriptInitParameters `json:"initProvider,omitempty"` +} + +// ScriptStatus defines the observed state of Script. +type ScriptStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ScriptObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Script is the Schema for the Scripts API. Provides a GameLift Script resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Script struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ScriptSpec `json:"spec"` + Status ScriptStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ScriptList contains a list of Scripts +type ScriptList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Script `json:"items"` +} + +// Repository type metadata. +var ( + Script_Kind = "Script" + Script_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Script_Kind}.String() + Script_KindAPIVersion = Script_Kind + "." + CRDGroupVersion.String() + Script_GroupVersionKind = CRDGroupVersion.WithKind(Script_Kind) +) + +func init() { + SchemeBuilder.Register(&Script{}, &ScriptList{}) +} diff --git a/apis/glacier/v1beta1/zz_generated.conversion_hubs.go b/apis/glacier/v1beta1/zz_generated.conversion_hubs.go index 6ff1ea467e..5caeaeaa62 100755 --- a/apis/glacier/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/glacier/v1beta1/zz_generated.conversion_hubs.go @@ -6,8 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Vault) Hub() {} - // Hub marks this type as a conversion hub. func (tr *VaultLock) Hub() {} diff --git a/apis/glacier/v1beta1/zz_generated.conversion_spokes.go b/apis/glacier/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..17418f9f6f --- /dev/null +++ b/apis/glacier/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Vault to the hub type. +func (tr *Vault) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Vault type. +func (tr *Vault) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/glacier/v1beta1/zz_generated.resolvers.go b/apis/glacier/v1beta1/zz_generated.resolvers.go index 958294222f..e85fa62a59 100644 --- a/apis/glacier/v1beta1/zz_generated.resolvers.go +++ b/apis/glacier/v1beta1/zz_generated.resolvers.go @@ -82,7 +82,7 @@ func (mg *VaultLock) ResolveReferences(ctx context.Context, c client.Reader) err var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("glacier.aws.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("glacier.aws.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -101,7 +101,7 @@ func (mg *VaultLock) ResolveReferences(ctx context.Context, c client.Reader) err mg.Spec.ForProvider.VaultName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.VaultNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("glacier.aws.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("glacier.aws.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/glacier/v1beta1/zz_vaultlock_types.go b/apis/glacier/v1beta1/zz_vaultlock_types.go index 721a42d150..f1cb6a5c80 100755 --- a/apis/glacier/v1beta1/zz_vaultlock_types.go +++ b/apis/glacier/v1beta1/zz_vaultlock_types.go @@ -25,7 +25,7 @@ type VaultLockInitParameters struct { Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` // The name of the Glacier Vault. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glacier/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glacier/v1beta2.Vault VaultName *string `json:"vaultName,omitempty" tf:"vault_name,omitempty"` // Reference to a Vault in glacier to populate vaultName. @@ -75,7 +75,7 @@ type VaultLockParameters struct { Region *string `json:"region" tf:"-"` // The name of the Glacier Vault. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glacier/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glacier/v1beta2.Vault // +kubebuilder:validation:Optional VaultName *string `json:"vaultName,omitempty" tf:"vault_name,omitempty"` diff --git a/apis/glacier/v1beta2/zz_generated.conversion_hubs.go b/apis/glacier/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..5ebdffb032 --- /dev/null +++ b/apis/glacier/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Vault) Hub() {} diff --git a/apis/glacier/v1beta2/zz_generated.deepcopy.go b/apis/glacier/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..6b273b11e8 --- /dev/null +++ b/apis/glacier/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,380 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationInitParameters) DeepCopyInto(out *NotificationInitParameters) { + *out = *in + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SnsTopic != nil { + in, out := &in.SnsTopic, &out.SnsTopic + *out = new(string) + **out = **in + } + if in.SnsTopicRef != nil { + in, out := &in.SnsTopicRef, &out.SnsTopicRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SnsTopicSelector != nil { + in, out := &in.SnsTopicSelector, &out.SnsTopicSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationInitParameters. +func (in *NotificationInitParameters) DeepCopy() *NotificationInitParameters { + if in == nil { + return nil + } + out := new(NotificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationObservation) DeepCopyInto(out *NotificationObservation) { + *out = *in + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SnsTopic != nil { + in, out := &in.SnsTopic, &out.SnsTopic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationObservation. +func (in *NotificationObservation) DeepCopy() *NotificationObservation { + if in == nil { + return nil + } + out := new(NotificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationParameters) DeepCopyInto(out *NotificationParameters) { + *out = *in + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SnsTopic != nil { + in, out := &in.SnsTopic, &out.SnsTopic + *out = new(string) + **out = **in + } + if in.SnsTopicRef != nil { + in, out := &in.SnsTopicRef, &out.SnsTopicRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SnsTopicSelector != nil { + in, out := &in.SnsTopicSelector, &out.SnsTopicSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationParameters. +func (in *NotificationParameters) DeepCopy() *NotificationParameters { + if in == nil { + return nil + } + out := new(NotificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Vault) DeepCopyInto(out *Vault) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Vault. +func (in *Vault) DeepCopy() *Vault { + if in == nil { + return nil + } + out := new(Vault) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Vault) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultInitParameters) DeepCopyInto(out *VaultInitParameters) { + *out = *in + if in.AccessPolicy != nil { + in, out := &in.AccessPolicy, &out.AccessPolicy + *out = new(string) + **out = **in + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = new(NotificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultInitParameters. +func (in *VaultInitParameters) DeepCopy() *VaultInitParameters { + if in == nil { + return nil + } + out := new(VaultInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultList) DeepCopyInto(out *VaultList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Vault, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultList. +func (in *VaultList) DeepCopy() *VaultList { + if in == nil { + return nil + } + out := new(VaultList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VaultList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultObservation) DeepCopyInto(out *VaultObservation) { + *out = *in + if in.AccessPolicy != nil { + in, out := &in.AccessPolicy, &out.AccessPolicy + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = new(NotificationObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultObservation. +func (in *VaultObservation) DeepCopy() *VaultObservation { + if in == nil { + return nil + } + out := new(VaultObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultParameters) DeepCopyInto(out *VaultParameters) { + *out = *in + if in.AccessPolicy != nil { + in, out := &in.AccessPolicy, &out.AccessPolicy + *out = new(string) + **out = **in + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = new(NotificationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultParameters. +func (in *VaultParameters) DeepCopy() *VaultParameters { + if in == nil { + return nil + } + out := new(VaultParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultSpec) DeepCopyInto(out *VaultSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultSpec. +func (in *VaultSpec) DeepCopy() *VaultSpec { + if in == nil { + return nil + } + out := new(VaultSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultStatus) DeepCopyInto(out *VaultStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultStatus. +func (in *VaultStatus) DeepCopy() *VaultStatus { + if in == nil { + return nil + } + out := new(VaultStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/glacier/v1beta2/zz_generated.managed.go b/apis/glacier/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..707c595fcc --- /dev/null +++ b/apis/glacier/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Vault. +func (mg *Vault) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Vault. +func (mg *Vault) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Vault. +func (mg *Vault) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Vault. +func (mg *Vault) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Vault. +func (mg *Vault) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Vault. +func (mg *Vault) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Vault. +func (mg *Vault) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Vault. +func (mg *Vault) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Vault. +func (mg *Vault) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Vault. +func (mg *Vault) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Vault. +func (mg *Vault) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Vault. +func (mg *Vault) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/glacier/v1beta2/zz_generated.managedlist.go b/apis/glacier/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..558b2fb86b --- /dev/null +++ b/apis/glacier/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this VaultList. +func (l *VaultList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/glacier/v1beta2/zz_generated.resolvers.go b/apis/glacier/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..68118ea8a1 --- /dev/null +++ b/apis/glacier/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Vault. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Vault) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.Notification != nil { + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Notification.SnsTopic), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Notification.SnsTopicRef, + Selector: mg.Spec.ForProvider.Notification.SnsTopicSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Notification.SnsTopic") + } + mg.Spec.ForProvider.Notification.SnsTopic = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Notification.SnsTopicRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Notification != nil { + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Notification.SnsTopic), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Notification.SnsTopicRef, + Selector: mg.Spec.InitProvider.Notification.SnsTopicSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Notification.SnsTopic") + } + mg.Spec.InitProvider.Notification.SnsTopic = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Notification.SnsTopicRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/glacier/v1beta2/zz_groupversion_info.go b/apis/glacier/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..fae956569d --- /dev/null +++ b/apis/glacier/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=glacier.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "glacier.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/glacier/v1beta2/zz_vault_terraformed.go b/apis/glacier/v1beta2/zz_vault_terraformed.go new file mode 100755 index 0000000000..31f24a95be --- /dev/null +++ b/apis/glacier/v1beta2/zz_vault_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Vault +func (mg *Vault) GetTerraformResourceType() string { + return "aws_glacier_vault" +} + +// GetConnectionDetailsMapping for this Vault +func (tr *Vault) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Vault +func (tr *Vault) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Vault +func (tr *Vault) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Vault +func (tr *Vault) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Vault +func (tr *Vault) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Vault +func (tr *Vault) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Vault +func (tr *Vault) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Vault +func (tr *Vault) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Vault using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Vault) LateInitialize(attrs []byte) (bool, error) { + params := &VaultParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Vault) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/glacier/v1beta2/zz_vault_types.go b/apis/glacier/v1beta2/zz_vault_types.go new file mode 100755 index 0000000000..ac85c0674d --- /dev/null +++ b/apis/glacier/v1beta2/zz_vault_types.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type NotificationInitParameters struct { + + // You can configure a vault to publish a notification for ArchiveRetrievalCompleted and InventoryRetrievalCompleted events. + // +listType=set + Events []*string `json:"events,omitempty" tf:"events,omitempty"` + + // The SNS Topic ARN. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + SnsTopic *string `json:"snsTopic,omitempty" tf:"sns_topic,omitempty"` + + // Reference to a Topic in sns to populate snsTopic. + // +kubebuilder:validation:Optional + SnsTopicRef *v1.Reference `json:"snsTopicRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate snsTopic. + // +kubebuilder:validation:Optional + SnsTopicSelector *v1.Selector `json:"snsTopicSelector,omitempty" tf:"-"` +} + +type NotificationObservation struct { + + // You can configure a vault to publish a notification for ArchiveRetrievalCompleted and InventoryRetrievalCompleted events. + // +listType=set + Events []*string `json:"events,omitempty" tf:"events,omitempty"` + + // The SNS Topic ARN. + SnsTopic *string `json:"snsTopic,omitempty" tf:"sns_topic,omitempty"` +} + +type NotificationParameters struct { + + // You can configure a vault to publish a notification for ArchiveRetrievalCompleted and InventoryRetrievalCompleted events. + // +kubebuilder:validation:Optional + // +listType=set + Events []*string `json:"events" tf:"events,omitempty"` + + // The SNS Topic ARN. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + SnsTopic *string `json:"snsTopic,omitempty" tf:"sns_topic,omitempty"` + + // Reference to a Topic in sns to populate snsTopic. + // +kubebuilder:validation:Optional + SnsTopicRef *v1.Reference `json:"snsTopicRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate snsTopic. + // +kubebuilder:validation:Optional + SnsTopicSelector *v1.Selector `json:"snsTopicSelector,omitempty" tf:"-"` +} + +type VaultInitParameters struct { + + // The policy document. This is a JSON formatted string. + // The heredoc syntax or file function is helpful here. Use the Glacier Developer Guide for more information on Glacier Vault Policy + AccessPolicy *string `json:"accessPolicy,omitempty" tf:"access_policy,omitempty"` + + // The notifications for the Vault. Fields documented below. + Notification *NotificationInitParameters `json:"notification,omitempty" tf:"notification,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type VaultObservation struct { + + // The policy document. This is a JSON formatted string. + // The heredoc syntax or file function is helpful here. Use the Glacier Developer Guide for more information on Glacier Vault Policy + AccessPolicy *string `json:"accessPolicy,omitempty" tf:"access_policy,omitempty"` + + // The ARN of the vault. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The URI of the vault that was created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The notifications for the Vault. Fields documented below. + Notification *NotificationObservation `json:"notification,omitempty" tf:"notification,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type VaultParameters struct { + + // The policy document. This is a JSON formatted string. + // The heredoc syntax or file function is helpful here. Use the Glacier Developer Guide for more information on Glacier Vault Policy + // +kubebuilder:validation:Optional + AccessPolicy *string `json:"accessPolicy,omitempty" tf:"access_policy,omitempty"` + + // The notifications for the Vault. Fields documented below. + // +kubebuilder:validation:Optional + Notification *NotificationParameters `json:"notification,omitempty" tf:"notification,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// VaultSpec defines the desired state of Vault +type VaultSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VaultParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VaultInitParameters `json:"initProvider,omitempty"` +} + +// VaultStatus defines the observed state of Vault. +type VaultStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VaultObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Vault is the Schema for the Vaults API. Provides a Glacier Vault. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Vault struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec VaultSpec `json:"spec"` + Status VaultStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VaultList contains a list of Vaults +type VaultList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Vault `json:"items"` +} + +// Repository type metadata. +var ( + Vault_Kind = "Vault" + Vault_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Vault_Kind}.String() + Vault_KindAPIVersion = Vault_Kind + "." + CRDGroupVersion.String() + Vault_GroupVersionKind = CRDGroupVersion.WithKind(Vault_Kind) +) + +func init() { + SchemeBuilder.Register(&Vault{}, &VaultList{}) +} diff --git a/apis/globalaccelerator/v1beta1/zz_generated.conversion_hubs.go b/apis/globalaccelerator/v1beta1/zz_generated.conversion_hubs.go index 7b6c8c486e..e6dcffb9cb 100755 --- a/apis/globalaccelerator/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/globalaccelerator/v1beta1/zz_generated.conversion_hubs.go @@ -6,9 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Accelerator) Hub() {} - // Hub marks this type as a conversion hub. func (tr *EndpointGroup) Hub() {} diff --git a/apis/globalaccelerator/v1beta1/zz_generated.conversion_spokes.go b/apis/globalaccelerator/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..ba306beeda --- /dev/null +++ b/apis/globalaccelerator/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Accelerator to the hub type. +func (tr *Accelerator) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Accelerator type. +func (tr *Accelerator) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/globalaccelerator/v1beta1/zz_generated.resolvers.go b/apis/globalaccelerator/v1beta1/zz_generated.resolvers.go index 870c9e2d8c..80effd2fc7 100644 --- a/apis/globalaccelerator/v1beta1/zz_generated.resolvers.go +++ b/apis/globalaccelerator/v1beta1/zz_generated.resolvers.go @@ -9,8 +9,9 @@ package v1beta1 import ( "context" reference "github.com/crossplane/crossplane-runtime/pkg/reference" - xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" apisresolver "github.com/upbound/provider-aws/internal/apis" client "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -74,7 +75,7 @@ func (mg *Listener) ResolveReferences(ctx context.Context, c client.Reader) erro var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("globalaccelerator.aws.upbound.io", "v1beta1", "Accelerator", "AcceleratorList") + m, l, err = apisresolver.GetManagedResource("globalaccelerator.aws.upbound.io", "v1beta2", "Accelerator", "AcceleratorList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -93,7 +94,7 @@ func (mg *Listener) ResolveReferences(ctx context.Context, c client.Reader) erro mg.Spec.ForProvider.AcceleratorArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.AcceleratorArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("globalaccelerator.aws.upbound.io", "v1beta1", "Accelerator", "AcceleratorList") + m, l, err = apisresolver.GetManagedResource("globalaccelerator.aws.upbound.io", "v1beta2", "Accelerator", "AcceleratorList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/globalaccelerator/v1beta1/zz_listener_types.go b/apis/globalaccelerator/v1beta1/zz_listener_types.go index ab7d52e7c2..6cc66e9d5b 100755 --- a/apis/globalaccelerator/v1beta1/zz_listener_types.go +++ b/apis/globalaccelerator/v1beta1/zz_listener_types.go @@ -16,7 +16,7 @@ import ( type ListenerInitParameters struct { // The Amazon Resource Name (ARN) of your accelerator. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/globalaccelerator/v1beta1.Accelerator + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/globalaccelerator/v1beta2.Accelerator AcceleratorArn *string `json:"acceleratorArn,omitempty" tf:"accelerator_arn,omitempty"` // Reference to a Accelerator in globalaccelerator to populate acceleratorArn. @@ -58,7 +58,7 @@ type ListenerObservation struct { type ListenerParameters struct { // The Amazon Resource Name (ARN) of your accelerator. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/globalaccelerator/v1beta1.Accelerator + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/globalaccelerator/v1beta2.Accelerator // +kubebuilder:validation:Optional AcceleratorArn *string `json:"acceleratorArn,omitempty" tf:"accelerator_arn,omitempty"` diff --git a/apis/globalaccelerator/v1beta2/zz_accelerator_terraformed.go b/apis/globalaccelerator/v1beta2/zz_accelerator_terraformed.go new file mode 100755 index 0000000000..5493de0521 --- /dev/null +++ b/apis/globalaccelerator/v1beta2/zz_accelerator_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Accelerator +func (mg *Accelerator) GetTerraformResourceType() string { + return "aws_globalaccelerator_accelerator" +} + +// GetConnectionDetailsMapping for this Accelerator +func (tr *Accelerator) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Accelerator +func (tr *Accelerator) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Accelerator +func (tr *Accelerator) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Accelerator +func (tr *Accelerator) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Accelerator +func (tr *Accelerator) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Accelerator +func (tr *Accelerator) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Accelerator +func (tr *Accelerator) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Accelerator +func (tr *Accelerator) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Accelerator using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Accelerator) LateInitialize(attrs []byte) (bool, error) { + params := &AcceleratorParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Accelerator) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/globalaccelerator/v1beta2/zz_accelerator_types.go b/apis/globalaccelerator/v1beta2/zz_accelerator_types.go new file mode 100755 index 0000000000..751b706c48 --- /dev/null +++ b/apis/globalaccelerator/v1beta2/zz_accelerator_types.go @@ -0,0 +1,227 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AcceleratorInitParameters struct { + + // The attributes of the accelerator. Fields documented below. + Attributes *AttributesInitParameters `json:"attributes,omitempty" tf:"attributes,omitempty"` + + // Indicates whether the accelerator is enabled. Defaults to true. Valid values: true, false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The value for the address type. Defaults to IPV4. Valid values: IPV4, DUAL_STACK. + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // The IP addresses to use for BYOIP accelerators. If not specified, the service assigns IP addresses. Valid values: 1 or 2 IPv4 addresses. + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` + + // The name of the accelerator. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AcceleratorObservation struct { + + // The attributes of the accelerator. Fields documented below. + Attributes *AttributesObservation `json:"attributes,omitempty" tf:"attributes,omitempty"` + + // The DNS name of the accelerator. For example, a5d53ff5ee6bca4ce.awsglobalaccelerator.com. + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // The Domain Name System (DNS) name that Global Accelerator creates that points to a dual-stack accelerator's four static IP addresses: two IPv4 addresses and two IPv6 addresses. For example, a1234567890abcdef.dualstack.awsglobalaccelerator.com. + DualStackDNSName *string `json:"dualStackDnsName,omitempty" tf:"dual_stack_dns_name,omitempty"` + + // Indicates whether the accelerator is enabled. Defaults to true. Valid values: true, false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // - The Global Accelerator Route 53 zone ID that can be used to + // route an Alias Resource Record Set to the Global Accelerator. This attribute + // is simply an alias for the zone ID Z2BJ6XQ5FK7U4H. + HostedZoneID *string `json:"hostedZoneId,omitempty" tf:"hosted_zone_id,omitempty"` + + // The Amazon Resource Name (ARN) of the accelerator. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The value for the address type. Defaults to IPV4. Valid values: IPV4, DUAL_STACK. + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // The IP addresses to use for BYOIP accelerators. If not specified, the service assigns IP addresses. Valid values: 1 or 2 IPv4 addresses. + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` + + // IP address set associated with the accelerator. + IPSets []IPSetsObservation `json:"ipSets,omitempty" tf:"ip_sets,omitempty"` + + // The name of the accelerator. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type AcceleratorParameters struct { + + // The attributes of the accelerator. Fields documented below. + // +kubebuilder:validation:Optional + Attributes *AttributesParameters `json:"attributes,omitempty" tf:"attributes,omitempty"` + + // Indicates whether the accelerator is enabled. Defaults to true. Valid values: true, false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The value for the address type. Defaults to IPV4. Valid values: IPV4, DUAL_STACK. + // +kubebuilder:validation:Optional + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // The IP addresses to use for BYOIP accelerators. If not specified, the service assigns IP addresses. Valid values: 1 or 2 IPv4 addresses. + // +kubebuilder:validation:Optional + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` + + // The name of the accelerator. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AttributesInitParameters struct { + + // Indicates whether flow logs are enabled. Defaults to false. Valid values: true, false. + FlowLogsEnabled *bool `json:"flowLogsEnabled,omitempty" tf:"flow_logs_enabled,omitempty"` + + // The name of the Amazon S3 bucket for the flow logs. Required if flow_logs_enabled is true. + FlowLogsS3Bucket *string `json:"flowLogsS3Bucket,omitempty" tf:"flow_logs_s3_bucket,omitempty"` + + // The prefix for the location in the Amazon S3 bucket for the flow logs. Required if flow_logs_enabled is true. + FlowLogsS3Prefix *string `json:"flowLogsS3Prefix,omitempty" tf:"flow_logs_s3_prefix,omitempty"` +} + +type AttributesObservation struct { + + // Indicates whether flow logs are enabled. Defaults to false. Valid values: true, false. + FlowLogsEnabled *bool `json:"flowLogsEnabled,omitempty" tf:"flow_logs_enabled,omitempty"` + + // The name of the Amazon S3 bucket for the flow logs. Required if flow_logs_enabled is true. + FlowLogsS3Bucket *string `json:"flowLogsS3Bucket,omitempty" tf:"flow_logs_s3_bucket,omitempty"` + + // The prefix for the location in the Amazon S3 bucket for the flow logs. Required if flow_logs_enabled is true. + FlowLogsS3Prefix *string `json:"flowLogsS3Prefix,omitempty" tf:"flow_logs_s3_prefix,omitempty"` +} + +type AttributesParameters struct { + + // Indicates whether flow logs are enabled. Defaults to false. Valid values: true, false. + // +kubebuilder:validation:Optional + FlowLogsEnabled *bool `json:"flowLogsEnabled,omitempty" tf:"flow_logs_enabled,omitempty"` + + // The name of the Amazon S3 bucket for the flow logs. Required if flow_logs_enabled is true. + // +kubebuilder:validation:Optional + FlowLogsS3Bucket *string `json:"flowLogsS3Bucket,omitempty" tf:"flow_logs_s3_bucket,omitempty"` + + // The prefix for the location in the Amazon S3 bucket for the flow logs. Required if flow_logs_enabled is true. + // +kubebuilder:validation:Optional + FlowLogsS3Prefix *string `json:"flowLogsS3Prefix,omitempty" tf:"flow_logs_s3_prefix,omitempty"` +} + +type IPSetsInitParameters struct { +} + +type IPSetsObservation struct { + + // The IP addresses to use for BYOIP accelerators. If not specified, the service assigns IP addresses. Valid values: 1 or 2 IPv4 addresses. + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` + + // The type of IP addresses included in this IP set. + IPFamily *string `json:"ipFamily,omitempty" tf:"ip_family,omitempty"` +} + +type IPSetsParameters struct { +} + +// AcceleratorSpec defines the desired state of Accelerator +type AcceleratorSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AcceleratorParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AcceleratorInitParameters `json:"initProvider,omitempty"` +} + +// AcceleratorStatus defines the observed state of Accelerator. +type AcceleratorStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AcceleratorObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Accelerator is the Schema for the Accelerators API. Provides a Global Accelerator accelerator. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Accelerator struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec AcceleratorSpec `json:"spec"` + Status AcceleratorStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AcceleratorList contains a list of Accelerators +type AcceleratorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Accelerator `json:"items"` +} + +// Repository type metadata. +var ( + Accelerator_Kind = "Accelerator" + Accelerator_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Accelerator_Kind}.String() + Accelerator_KindAPIVersion = Accelerator_Kind + "." + CRDGroupVersion.String() + Accelerator_GroupVersionKind = CRDGroupVersion.WithKind(Accelerator_Kind) +) + +func init() { + SchemeBuilder.Register(&Accelerator{}, &AcceleratorList{}) +} diff --git a/apis/globalaccelerator/v1beta2/zz_generated.conversion_hubs.go b/apis/globalaccelerator/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..8df8193acd --- /dev/null +++ b/apis/globalaccelerator/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Accelerator) Hub() {} diff --git a/apis/globalaccelerator/v1beta2/zz_generated.deepcopy.go b/apis/globalaccelerator/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..f98ad4d5d7 --- /dev/null +++ b/apis/globalaccelerator/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,492 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Accelerator) DeepCopyInto(out *Accelerator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Accelerator. +func (in *Accelerator) DeepCopy() *Accelerator { + if in == nil { + return nil + } + out := new(Accelerator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Accelerator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorInitParameters) DeepCopyInto(out *AcceleratorInitParameters) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = new(AttributesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorInitParameters. +func (in *AcceleratorInitParameters) DeepCopy() *AcceleratorInitParameters { + if in == nil { + return nil + } + out := new(AcceleratorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorList) DeepCopyInto(out *AcceleratorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Accelerator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorList. +func (in *AcceleratorList) DeepCopy() *AcceleratorList { + if in == nil { + return nil + } + out := new(AcceleratorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AcceleratorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorObservation) DeepCopyInto(out *AcceleratorObservation) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = new(AttributesObservation) + (*in).DeepCopyInto(*out) + } + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.DualStackDNSName != nil { + in, out := &in.DualStackDNSName, &out.DualStackDNSName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HostedZoneID != nil { + in, out := &in.HostedZoneID, &out.HostedZoneID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPSets != nil { + in, out := &in.IPSets, &out.IPSets + *out = make([]IPSetsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorObservation. +func (in *AcceleratorObservation) DeepCopy() *AcceleratorObservation { + if in == nil { + return nil + } + out := new(AcceleratorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorParameters) DeepCopyInto(out *AcceleratorParameters) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = new(AttributesParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorParameters. +func (in *AcceleratorParameters) DeepCopy() *AcceleratorParameters { + if in == nil { + return nil + } + out := new(AcceleratorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorSpec) DeepCopyInto(out *AcceleratorSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorSpec. +func (in *AcceleratorSpec) DeepCopy() *AcceleratorSpec { + if in == nil { + return nil + } + out := new(AcceleratorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorStatus) DeepCopyInto(out *AcceleratorStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorStatus. +func (in *AcceleratorStatus) DeepCopy() *AcceleratorStatus { + if in == nil { + return nil + } + out := new(AcceleratorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttributesInitParameters) DeepCopyInto(out *AttributesInitParameters) { + *out = *in + if in.FlowLogsEnabled != nil { + in, out := &in.FlowLogsEnabled, &out.FlowLogsEnabled + *out = new(bool) + **out = **in + } + if in.FlowLogsS3Bucket != nil { + in, out := &in.FlowLogsS3Bucket, &out.FlowLogsS3Bucket + *out = new(string) + **out = **in + } + if in.FlowLogsS3Prefix != nil { + in, out := &in.FlowLogsS3Prefix, &out.FlowLogsS3Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttributesInitParameters. +func (in *AttributesInitParameters) DeepCopy() *AttributesInitParameters { + if in == nil { + return nil + } + out := new(AttributesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttributesObservation) DeepCopyInto(out *AttributesObservation) { + *out = *in + if in.FlowLogsEnabled != nil { + in, out := &in.FlowLogsEnabled, &out.FlowLogsEnabled + *out = new(bool) + **out = **in + } + if in.FlowLogsS3Bucket != nil { + in, out := &in.FlowLogsS3Bucket, &out.FlowLogsS3Bucket + *out = new(string) + **out = **in + } + if in.FlowLogsS3Prefix != nil { + in, out := &in.FlowLogsS3Prefix, &out.FlowLogsS3Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttributesObservation. +func (in *AttributesObservation) DeepCopy() *AttributesObservation { + if in == nil { + return nil + } + out := new(AttributesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttributesParameters) DeepCopyInto(out *AttributesParameters) { + *out = *in + if in.FlowLogsEnabled != nil { + in, out := &in.FlowLogsEnabled, &out.FlowLogsEnabled + *out = new(bool) + **out = **in + } + if in.FlowLogsS3Bucket != nil { + in, out := &in.FlowLogsS3Bucket, &out.FlowLogsS3Bucket + *out = new(string) + **out = **in + } + if in.FlowLogsS3Prefix != nil { + in, out := &in.FlowLogsS3Prefix, &out.FlowLogsS3Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttributesParameters. +func (in *AttributesParameters) DeepCopy() *AttributesParameters { + if in == nil { + return nil + } + out := new(AttributesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetsInitParameters) DeepCopyInto(out *IPSetsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetsInitParameters. +func (in *IPSetsInitParameters) DeepCopy() *IPSetsInitParameters { + if in == nil { + return nil + } + out := new(IPSetsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetsObservation) DeepCopyInto(out *IPSetsObservation) { + *out = *in + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPFamily != nil { + in, out := &in.IPFamily, &out.IPFamily + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetsObservation. +func (in *IPSetsObservation) DeepCopy() *IPSetsObservation { + if in == nil { + return nil + } + out := new(IPSetsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetsParameters) DeepCopyInto(out *IPSetsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetsParameters. +func (in *IPSetsParameters) DeepCopy() *IPSetsParameters { + if in == nil { + return nil + } + out := new(IPSetsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/globalaccelerator/v1beta2/zz_generated.managed.go b/apis/globalaccelerator/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..1bde6989ed --- /dev/null +++ b/apis/globalaccelerator/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Accelerator. +func (mg *Accelerator) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Accelerator. +func (mg *Accelerator) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Accelerator. +func (mg *Accelerator) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Accelerator. +func (mg *Accelerator) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Accelerator. +func (mg *Accelerator) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Accelerator. +func (mg *Accelerator) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Accelerator. +func (mg *Accelerator) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Accelerator. +func (mg *Accelerator) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Accelerator. +func (mg *Accelerator) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Accelerator. +func (mg *Accelerator) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Accelerator. +func (mg *Accelerator) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Accelerator. +func (mg *Accelerator) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/globalaccelerator/v1beta2/zz_generated.managedlist.go b/apis/globalaccelerator/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..6ba52f0bf6 --- /dev/null +++ b/apis/globalaccelerator/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AcceleratorList. +func (l *AcceleratorList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/globalaccelerator/v1beta2/zz_groupversion_info.go b/apis/globalaccelerator/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..95bbcc8267 --- /dev/null +++ b/apis/globalaccelerator/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=globalaccelerator.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "globalaccelerator.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/glue/v1beta1/zz_generated.conversion_hubs.go b/apis/glue/v1beta1/zz_generated.conversion_hubs.go index de5ea719b1..c90629954b 100755 --- a/apis/glue/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/glue/v1beta1/zz_generated.conversion_hubs.go @@ -6,27 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *CatalogDatabase) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *CatalogTable) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Classifier) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Connection) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Crawler) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *DataCatalogEncryptionSettings) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Job) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Registry) Hub() {} @@ -36,12 +15,6 @@ func (tr *ResourcePolicy) Hub() {} // Hub marks this type as a conversion hub. func (tr *Schema) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *SecurityConfiguration) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Trigger) Hub() {} - // Hub marks this type as a conversion hub. func (tr *UserDefinedFunction) Hub() {} diff --git a/apis/glue/v1beta1/zz_generated.conversion_spokes.go b/apis/glue/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..9e06dd72bf --- /dev/null +++ b/apis/glue/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,194 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this CatalogDatabase to the hub type. +func (tr *CatalogDatabase) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CatalogDatabase type. +func (tr *CatalogDatabase) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this CatalogTable to the hub type. +func (tr *CatalogTable) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CatalogTable type. +func (tr *CatalogTable) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Classifier to the hub type. +func (tr *Classifier) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Classifier type. +func (tr *Classifier) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Connection to the hub type. +func (tr *Connection) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Connection type. +func (tr *Connection) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Crawler to the hub type. +func (tr *Crawler) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Crawler type. +func (tr *Crawler) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DataCatalogEncryptionSettings to the hub type. +func (tr *DataCatalogEncryptionSettings) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DataCatalogEncryptionSettings type. +func (tr *DataCatalogEncryptionSettings) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Job to the hub type. +func (tr *Job) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Job type. +func (tr *Job) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SecurityConfiguration to the hub type. +func (tr *SecurityConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SecurityConfiguration type. +func (tr *SecurityConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Trigger to the hub type. +func (tr *Trigger) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Trigger type. +func (tr *Trigger) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/glue/v1beta1/zz_generated.resolvers.go b/apis/glue/v1beta1/zz_generated.resolvers.go index 98cabe652d..63c67a4d71 100644 --- a/apis/glue/v1beta1/zz_generated.resolvers.go +++ b/apis/glue/v1beta1/zz_generated.resolvers.go @@ -914,7 +914,7 @@ func (mg *UserDefinedFunction) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta1", "CatalogDatabase", "CatalogDatabaseList") + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "CatalogDatabase", "CatalogDatabaseList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/glue/v1beta1/zz_userdefinedfunction_types.go b/apis/glue/v1beta1/zz_userdefinedfunction_types.go index c37e10cb85..dd048f85cb 100755 --- a/apis/glue/v1beta1/zz_userdefinedfunction_types.go +++ b/apis/glue/v1beta1/zz_userdefinedfunction_types.go @@ -98,7 +98,7 @@ type UserDefinedFunctionParameters struct { ClassName *string `json:"className,omitempty" tf:"class_name,omitempty"` // The name of the Database to create the Function. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta1.CatalogDatabase + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.CatalogDatabase // +kubebuilder:validation:Optional DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` diff --git a/apis/glue/v1beta2/zz_catalogdatabase_terraformed.go b/apis/glue/v1beta2/zz_catalogdatabase_terraformed.go new file mode 100755 index 0000000000..2eacd48099 --- /dev/null +++ b/apis/glue/v1beta2/zz_catalogdatabase_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CatalogDatabase +func (mg *CatalogDatabase) GetTerraformResourceType() string { + return "aws_glue_catalog_database" +} + +// GetConnectionDetailsMapping for this CatalogDatabase +func (tr *CatalogDatabase) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CatalogDatabase +func (tr *CatalogDatabase) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CatalogDatabase +func (tr *CatalogDatabase) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CatalogDatabase +func (tr *CatalogDatabase) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CatalogDatabase +func (tr *CatalogDatabase) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CatalogDatabase +func (tr *CatalogDatabase) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CatalogDatabase +func (tr *CatalogDatabase) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CatalogDatabase +func (tr *CatalogDatabase) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CatalogDatabase using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CatalogDatabase) LateInitialize(attrs []byte) (bool, error) { + params := &CatalogDatabaseParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CatalogDatabase) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/glue/v1beta2/zz_catalogdatabase_types.go b/apis/glue/v1beta2/zz_catalogdatabase_types.go new file mode 100755 index 0000000000..e22640f826 --- /dev/null +++ b/apis/glue/v1beta2/zz_catalogdatabase_types.go @@ -0,0 +1,295 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CatalogDatabaseInitParameters struct { + + // Creates a set of default permissions on the table for principals. See create_table_default_permission below. + CreateTableDefaultPermission []CreateTableDefaultPermissionInitParameters `json:"createTableDefaultPermission,omitempty" tf:"create_table_default_permission,omitempty"` + + // Description of the database. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Configuration block that references an entity outside the AWS Glue Data Catalog. See federated_database below. + FederatedDatabase *FederatedDatabaseInitParameters `json:"federatedDatabase,omitempty" tf:"federated_database,omitempty"` + + // Location of the database (for example, an HDFS path). + LocationURI *string `json:"locationUri,omitempty" tf:"location_uri,omitempty"` + + // List of key-value pairs that define parameters and properties of the database. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for a target database for resource linking. See target_database below. + TargetDatabase *TargetDatabaseInitParameters `json:"targetDatabase,omitempty" tf:"target_database,omitempty"` +} + +type CatalogDatabaseObservation struct { + + // ARN of the Glue Catalog Database. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // ID of the Glue Catalog to create the database in. If omitted, this defaults to the AWS Account ID. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // Creates a set of default permissions on the table for principals. See create_table_default_permission below. + CreateTableDefaultPermission []CreateTableDefaultPermissionObservation `json:"createTableDefaultPermission,omitempty" tf:"create_table_default_permission,omitempty"` + + // Description of the database. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Configuration block that references an entity outside the AWS Glue Data Catalog. See federated_database below. + FederatedDatabase *FederatedDatabaseObservation `json:"federatedDatabase,omitempty" tf:"federated_database,omitempty"` + + // Catalog ID and name of the database. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Location of the database (for example, an HDFS path). + LocationURI *string `json:"locationUri,omitempty" tf:"location_uri,omitempty"` + + // List of key-value pairs that define parameters and properties of the database. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Configuration block for a target database for resource linking. See target_database below. + TargetDatabase *TargetDatabaseObservation `json:"targetDatabase,omitempty" tf:"target_database,omitempty"` +} + +type CatalogDatabaseParameters struct { + + // ID of the Glue Catalog to create the database in. If omitted, this defaults to the AWS Account ID. + // +kubebuilder:validation:Required + CatalogID *string `json:"catalogId" tf:"catalog_id,omitempty"` + + // Creates a set of default permissions on the table for principals. See create_table_default_permission below. + // +kubebuilder:validation:Optional + CreateTableDefaultPermission []CreateTableDefaultPermissionParameters `json:"createTableDefaultPermission,omitempty" tf:"create_table_default_permission,omitempty"` + + // Description of the database. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Configuration block that references an entity outside the AWS Glue Data Catalog. See federated_database below. + // +kubebuilder:validation:Optional + FederatedDatabase *FederatedDatabaseParameters `json:"federatedDatabase,omitempty" tf:"federated_database,omitempty"` + + // Location of the database (for example, an HDFS path). + // +kubebuilder:validation:Optional + LocationURI *string `json:"locationUri,omitempty" tf:"location_uri,omitempty"` + + // List of key-value pairs that define parameters and properties of the database. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Region of the target database. + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for a target database for resource linking. See target_database below. + // +kubebuilder:validation:Optional + TargetDatabase *TargetDatabaseParameters `json:"targetDatabase,omitempty" tf:"target_database,omitempty"` +} + +type CreateTableDefaultPermissionInitParameters struct { + + // The permissions that are granted to the principal. + // +listType=set + Permissions []*string `json:"permissions,omitempty" tf:"permissions,omitempty"` + + // The principal who is granted permissions.. See principal below. + Principal *PrincipalInitParameters `json:"principal,omitempty" tf:"principal,omitempty"` +} + +type CreateTableDefaultPermissionObservation struct { + + // The permissions that are granted to the principal. + // +listType=set + Permissions []*string `json:"permissions,omitempty" tf:"permissions,omitempty"` + + // The principal who is granted permissions.. See principal below. + Principal *PrincipalObservation `json:"principal,omitempty" tf:"principal,omitempty"` +} + +type CreateTableDefaultPermissionParameters struct { + + // The permissions that are granted to the principal. + // +kubebuilder:validation:Optional + // +listType=set + Permissions []*string `json:"permissions,omitempty" tf:"permissions,omitempty"` + + // The principal who is granted permissions.. See principal below. + // +kubebuilder:validation:Optional + Principal *PrincipalParameters `json:"principal,omitempty" tf:"principal,omitempty"` +} + +type FederatedDatabaseInitParameters struct { + + // Name of the connection to the external metastore. + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // Unique identifier for the federated database. + Identifier *string `json:"identifier,omitempty" tf:"identifier,omitempty"` +} + +type FederatedDatabaseObservation struct { + + // Name of the connection to the external metastore. + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // Unique identifier for the federated database. + Identifier *string `json:"identifier,omitempty" tf:"identifier,omitempty"` +} + +type FederatedDatabaseParameters struct { + + // Name of the connection to the external metastore. + // +kubebuilder:validation:Optional + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // Unique identifier for the federated database. + // +kubebuilder:validation:Optional + Identifier *string `json:"identifier,omitempty" tf:"identifier,omitempty"` +} + +type PrincipalInitParameters struct { + + // An identifier for the Lake Formation principal. + DataLakePrincipalIdentifier *string `json:"dataLakePrincipalIdentifier,omitempty" tf:"data_lake_principal_identifier,omitempty"` +} + +type PrincipalObservation struct { + + // An identifier for the Lake Formation principal. + DataLakePrincipalIdentifier *string `json:"dataLakePrincipalIdentifier,omitempty" tf:"data_lake_principal_identifier,omitempty"` +} + +type PrincipalParameters struct { + + // An identifier for the Lake Formation principal. + // +kubebuilder:validation:Optional + DataLakePrincipalIdentifier *string `json:"dataLakePrincipalIdentifier,omitempty" tf:"data_lake_principal_identifier,omitempty"` +} + +type TargetDatabaseInitParameters struct { + + // Name of the catalog database. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` +} + +type TargetDatabaseObservation struct { + + // ID of the Data Catalog in which the database resides. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // Name of the catalog database. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Region of the target database. + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +type TargetDatabaseParameters struct { + + // ID of the Data Catalog in which the database resides. + // +kubebuilder:validation:Required + CatalogID *string `json:"catalogId" tf:"catalog_id,omitempty"` + + // Name of the catalog database. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // Region of the target database. + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +// CatalogDatabaseSpec defines the desired state of CatalogDatabase +type CatalogDatabaseSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CatalogDatabaseParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CatalogDatabaseInitParameters `json:"initProvider,omitempty"` +} + +// CatalogDatabaseStatus defines the observed state of CatalogDatabase. +type CatalogDatabaseStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CatalogDatabaseObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CatalogDatabase is the Schema for the CatalogDatabases API. Provides a Glue Catalog Database. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type CatalogDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec CatalogDatabaseSpec `json:"spec"` + Status CatalogDatabaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CatalogDatabaseList contains a list of CatalogDatabases +type CatalogDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CatalogDatabase `json:"items"` +} + +// Repository type metadata. +var ( + CatalogDatabase_Kind = "CatalogDatabase" + CatalogDatabase_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CatalogDatabase_Kind}.String() + CatalogDatabase_KindAPIVersion = CatalogDatabase_Kind + "." + CRDGroupVersion.String() + CatalogDatabase_GroupVersionKind = CRDGroupVersion.WithKind(CatalogDatabase_Kind) +) + +func init() { + SchemeBuilder.Register(&CatalogDatabase{}, &CatalogDatabaseList{}) +} diff --git a/apis/glue/v1beta2/zz_catalogtable_terraformed.go b/apis/glue/v1beta2/zz_catalogtable_terraformed.go new file mode 100755 index 0000000000..1a737ee5cd --- /dev/null +++ b/apis/glue/v1beta2/zz_catalogtable_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CatalogTable +func (mg *CatalogTable) GetTerraformResourceType() string { + return "aws_glue_catalog_table" +} + +// GetConnectionDetailsMapping for this CatalogTable +func (tr *CatalogTable) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CatalogTable +func (tr *CatalogTable) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CatalogTable +func (tr *CatalogTable) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CatalogTable +func (tr *CatalogTable) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CatalogTable +func (tr *CatalogTable) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CatalogTable +func (tr *CatalogTable) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CatalogTable +func (tr *CatalogTable) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CatalogTable +func (tr *CatalogTable) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CatalogTable using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CatalogTable) LateInitialize(attrs []byte) (bool, error) { + params := &CatalogTableParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CatalogTable) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/glue/v1beta2/zz_catalogtable_types.go b/apis/glue/v1beta2/zz_catalogtable_types.go new file mode 100755 index 0000000000..4f0cbc6354 --- /dev/null +++ b/apis/glue/v1beta2/zz_catalogtable_types.go @@ -0,0 +1,784 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CatalogTableInitParameters struct { + + // Description of the table. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Configuration block for open table formats. See open_table_format_input below. + OpenTableFormatInput *OpenTableFormatInputInitParameters `json:"openTableFormatInput,omitempty" tf:"open_table_format_input,omitempty"` + + // Owner of the table. + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + // Properties associated with this table, as a list of key-value pairs. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Configuration block for a maximum of 3 partition indexes. See partition_index below. + PartitionIndex []PartitionIndexInitParameters `json:"partitionIndex,omitempty" tf:"partition_index,omitempty"` + + // Configuration block of columns by which the table is partitioned. Only primitive types are supported as partition keys. See partition_keys below. + PartitionKeys []PartitionKeysInitParameters `json:"partitionKeys,omitempty" tf:"partition_keys,omitempty"` + + // Retention time for this table. + Retention *float64 `json:"retention,omitempty" tf:"retention,omitempty"` + + // Configuration block for information about the physical storage of this table. For more information, refer to the Glue Developer Guide. See storage_descriptor below. + StorageDescriptor *StorageDescriptorInitParameters `json:"storageDescriptor,omitempty" tf:"storage_descriptor,omitempty"` + + // Type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.). While optional, some Athena DDL queries such as ALTER TABLE and SHOW CREATE TABLE will fail if this argument is empty. + TableType *string `json:"tableType,omitempty" tf:"table_type,omitempty"` + + // Configuration block of a target table for resource linking. See target_table below. + TargetTable *TargetTableInitParameters `json:"targetTable,omitempty" tf:"target_table,omitempty"` + + // If the table is a view, the expanded text of the view; otherwise null. + ViewExpandedText *string `json:"viewExpandedText,omitempty" tf:"view_expanded_text,omitempty"` + + // If the table is a view, the original text of the view; otherwise null. + ViewOriginalText *string `json:"viewOriginalText,omitempty" tf:"view_original_text,omitempty"` +} + +type CatalogTableObservation struct { + + // The ARN of the Glue Table. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // ID of the Glue Catalog and database to create the table in. If omitted, this defaults to the AWS Account ID plus the database name. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // Name of the metadata database where the table metadata resides. For Hive compatibility, this must be all lowercase. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Description of the table. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Catalog ID, Database name and of the name table. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration block for open table formats. See open_table_format_input below. + OpenTableFormatInput *OpenTableFormatInputObservation `json:"openTableFormatInput,omitempty" tf:"open_table_format_input,omitempty"` + + // Owner of the table. + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + // Properties associated with this table, as a list of key-value pairs. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Configuration block for a maximum of 3 partition indexes. See partition_index below. + PartitionIndex []PartitionIndexObservation `json:"partitionIndex,omitempty" tf:"partition_index,omitempty"` + + // Configuration block of columns by which the table is partitioned. Only primitive types are supported as partition keys. See partition_keys below. + PartitionKeys []PartitionKeysObservation `json:"partitionKeys,omitempty" tf:"partition_keys,omitempty"` + + // Retention time for this table. + Retention *float64 `json:"retention,omitempty" tf:"retention,omitempty"` + + // Configuration block for information about the physical storage of this table. For more information, refer to the Glue Developer Guide. See storage_descriptor below. + StorageDescriptor *StorageDescriptorObservation `json:"storageDescriptor,omitempty" tf:"storage_descriptor,omitempty"` + + // Type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.). While optional, some Athena DDL queries such as ALTER TABLE and SHOW CREATE TABLE will fail if this argument is empty. + TableType *string `json:"tableType,omitempty" tf:"table_type,omitempty"` + + // Configuration block of a target table for resource linking. See target_table below. + TargetTable *TargetTableObservation `json:"targetTable,omitempty" tf:"target_table,omitempty"` + + // If the table is a view, the expanded text of the view; otherwise null. + ViewExpandedText *string `json:"viewExpandedText,omitempty" tf:"view_expanded_text,omitempty"` + + // If the table is a view, the original text of the view; otherwise null. + ViewOriginalText *string `json:"viewOriginalText,omitempty" tf:"view_original_text,omitempty"` +} + +type CatalogTableParameters struct { + + // ID of the Glue Catalog and database to create the table in. If omitted, this defaults to the AWS Account ID plus the database name. + // +kubebuilder:validation:Required + CatalogID *string `json:"catalogId" tf:"catalog_id,omitempty"` + + // Name of the metadata database where the table metadata resides. For Hive compatibility, this must be all lowercase. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.CatalogDatabase + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Reference to a CatalogDatabase in glue to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameRef *v1.Reference `json:"databaseNameRef,omitempty" tf:"-"` + + // Selector for a CatalogDatabase in glue to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameSelector *v1.Selector `json:"databaseNameSelector,omitempty" tf:"-"` + + // Description of the table. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Configuration block for open table formats. See open_table_format_input below. + // +kubebuilder:validation:Optional + OpenTableFormatInput *OpenTableFormatInputParameters `json:"openTableFormatInput,omitempty" tf:"open_table_format_input,omitempty"` + + // Owner of the table. + // +kubebuilder:validation:Optional + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + // Properties associated with this table, as a list of key-value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Configuration block for a maximum of 3 partition indexes. See partition_index below. + // +kubebuilder:validation:Optional + PartitionIndex []PartitionIndexParameters `json:"partitionIndex,omitempty" tf:"partition_index,omitempty"` + + // Configuration block of columns by which the table is partitioned. Only primitive types are supported as partition keys. See partition_keys below. + // +kubebuilder:validation:Optional + PartitionKeys []PartitionKeysParameters `json:"partitionKeys,omitempty" tf:"partition_keys,omitempty"` + + // Region of the target table. + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Retention time for this table. + // +kubebuilder:validation:Optional + Retention *float64 `json:"retention,omitempty" tf:"retention,omitempty"` + + // Configuration block for information about the physical storage of this table. For more information, refer to the Glue Developer Guide. See storage_descriptor below. + // +kubebuilder:validation:Optional + StorageDescriptor *StorageDescriptorParameters `json:"storageDescriptor,omitempty" tf:"storage_descriptor,omitempty"` + + // Type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.). While optional, some Athena DDL queries such as ALTER TABLE and SHOW CREATE TABLE will fail if this argument is empty. + // +kubebuilder:validation:Optional + TableType *string `json:"tableType,omitempty" tf:"table_type,omitempty"` + + // Configuration block of a target table for resource linking. See target_table below. + // +kubebuilder:validation:Optional + TargetTable *TargetTableParameters `json:"targetTable,omitempty" tf:"target_table,omitempty"` + + // If the table is a view, the expanded text of the view; otherwise null. + // +kubebuilder:validation:Optional + ViewExpandedText *string `json:"viewExpandedText,omitempty" tf:"view_expanded_text,omitempty"` + + // If the table is a view, the original text of the view; otherwise null. + // +kubebuilder:validation:Optional + ViewOriginalText *string `json:"viewOriginalText,omitempty" tf:"view_original_text,omitempty"` +} + +type ColumnsInitParameters struct { + + // Free-form text comment. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Name of the Column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Key-value pairs defining properties associated with the column. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Datatype of data in the Column. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ColumnsObservation struct { + + // Free-form text comment. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Name of the Column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Key-value pairs defining properties associated with the column. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Datatype of data in the Column. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ColumnsParameters struct { + + // Free-form text comment. + // +kubebuilder:validation:Optional + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Name of the Column. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Key-value pairs defining properties associated with the column. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Datatype of data in the Column. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IcebergInputInitParameters struct { + + // A required metadata operation. Can only be set to CREATE. + MetadataOperation *string `json:"metadataOperation,omitempty" tf:"metadata_operation,omitempty"` + + // The table version for the Iceberg table. Defaults to 2. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type IcebergInputObservation struct { + + // A required metadata operation. Can only be set to CREATE. + MetadataOperation *string `json:"metadataOperation,omitempty" tf:"metadata_operation,omitempty"` + + // The table version for the Iceberg table. Defaults to 2. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type IcebergInputParameters struct { + + // A required metadata operation. Can only be set to CREATE. + // +kubebuilder:validation:Optional + MetadataOperation *string `json:"metadataOperation" tf:"metadata_operation,omitempty"` + + // The table version for the Iceberg table. Defaults to 2. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type OpenTableFormatInputInitParameters struct { + + // Configuration block for iceberg table config. See iceberg_input below. + IcebergInput *IcebergInputInitParameters `json:"icebergInput,omitempty" tf:"iceberg_input,omitempty"` +} + +type OpenTableFormatInputObservation struct { + + // Configuration block for iceberg table config. See iceberg_input below. + IcebergInput *IcebergInputObservation `json:"icebergInput,omitempty" tf:"iceberg_input,omitempty"` +} + +type OpenTableFormatInputParameters struct { + + // Configuration block for iceberg table config. See iceberg_input below. + // +kubebuilder:validation:Optional + IcebergInput *IcebergInputParameters `json:"icebergInput" tf:"iceberg_input,omitempty"` +} + +type PartitionIndexInitParameters struct { + + // Name of the partition index. + IndexName *string `json:"indexName,omitempty" tf:"index_name,omitempty"` + + // Keys for the partition index. + Keys []*string `json:"keys,omitempty" tf:"keys,omitempty"` +} + +type PartitionIndexObservation struct { + + // Name of the partition index. + IndexName *string `json:"indexName,omitempty" tf:"index_name,omitempty"` + + IndexStatus *string `json:"indexStatus,omitempty" tf:"index_status,omitempty"` + + // Keys for the partition index. + Keys []*string `json:"keys,omitempty" tf:"keys,omitempty"` +} + +type PartitionIndexParameters struct { + + // Name of the partition index. + // +kubebuilder:validation:Optional + IndexName *string `json:"indexName" tf:"index_name,omitempty"` + + // Keys for the partition index. + // +kubebuilder:validation:Optional + Keys []*string `json:"keys" tf:"keys,omitempty"` +} + +type PartitionKeysInitParameters struct { + + // Free-form text comment. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Name of the Partition Key. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Datatype of data in the Partition Key. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PartitionKeysObservation struct { + + // Free-form text comment. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Name of the Partition Key. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Datatype of data in the Partition Key. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PartitionKeysParameters struct { + + // Free-form text comment. + // +kubebuilder:validation:Optional + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // Name of the Partition Key. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Datatype of data in the Partition Key. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SchemaIDInitParameters struct { + + // Name of the schema registry that contains the schema. Must be provided when schema_name is specified and conflicts with schema_arn. + RegistryName *string `json:"registryName,omitempty" tf:"registry_name,omitempty"` + + // ARN of the schema. One of schema_arn or schema_name has to be provided. + SchemaArn *string `json:"schemaArn,omitempty" tf:"schema_arn,omitempty"` + + // Name of the schema. One of schema_arn or schema_name has to be provided. + SchemaName *string `json:"schemaName,omitempty" tf:"schema_name,omitempty"` +} + +type SchemaIDObservation struct { + + // Name of the schema registry that contains the schema. Must be provided when schema_name is specified and conflicts with schema_arn. + RegistryName *string `json:"registryName,omitempty" tf:"registry_name,omitempty"` + + // ARN of the schema. One of schema_arn or schema_name has to be provided. + SchemaArn *string `json:"schemaArn,omitempty" tf:"schema_arn,omitempty"` + + // Name of the schema. One of schema_arn or schema_name has to be provided. + SchemaName *string `json:"schemaName,omitempty" tf:"schema_name,omitempty"` +} + +type SchemaIDParameters struct { + + // Name of the schema registry that contains the schema. Must be provided when schema_name is specified and conflicts with schema_arn. + // +kubebuilder:validation:Optional + RegistryName *string `json:"registryName,omitempty" tf:"registry_name,omitempty"` + + // ARN of the schema. One of schema_arn or schema_name has to be provided. + // +kubebuilder:validation:Optional + SchemaArn *string `json:"schemaArn,omitempty" tf:"schema_arn,omitempty"` + + // Name of the schema. One of schema_arn or schema_name has to be provided. + // +kubebuilder:validation:Optional + SchemaName *string `json:"schemaName,omitempty" tf:"schema_name,omitempty"` +} + +type SchemaReferenceInitParameters struct { + + // Configuration block that contains schema identity fields. Either this or the schema_version_id has to be provided. See schema_id below. + SchemaID *SchemaIDInitParameters `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // Unique ID assigned to a version of the schema. Either this or the schema_id has to be provided. + SchemaVersionID *string `json:"schemaVersionId,omitempty" tf:"schema_version_id,omitempty"` + + // Version number of the schema. + SchemaVersionNumber *float64 `json:"schemaVersionNumber,omitempty" tf:"schema_version_number,omitempty"` +} + +type SchemaReferenceObservation struct { + + // Configuration block that contains schema identity fields. Either this or the schema_version_id has to be provided. See schema_id below. + SchemaID *SchemaIDObservation `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // Unique ID assigned to a version of the schema. Either this or the schema_id has to be provided. + SchemaVersionID *string `json:"schemaVersionId,omitempty" tf:"schema_version_id,omitempty"` + + // Version number of the schema. + SchemaVersionNumber *float64 `json:"schemaVersionNumber,omitempty" tf:"schema_version_number,omitempty"` +} + +type SchemaReferenceParameters struct { + + // Configuration block that contains schema identity fields. Either this or the schema_version_id has to be provided. See schema_id below. + // +kubebuilder:validation:Optional + SchemaID *SchemaIDParameters `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // Unique ID assigned to a version of the schema. Either this or the schema_id has to be provided. + // +kubebuilder:validation:Optional + SchemaVersionID *string `json:"schemaVersionId,omitempty" tf:"schema_version_id,omitempty"` + + // Version number of the schema. + // +kubebuilder:validation:Optional + SchemaVersionNumber *float64 `json:"schemaVersionNumber" tf:"schema_version_number,omitempty"` +} + +type SerDeInfoInitParameters struct { + + // Name of the SerDe. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Map of initialization parameters for the SerDe, in key-value form. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe. + SerializationLibrary *string `json:"serializationLibrary,omitempty" tf:"serialization_library,omitempty"` +} + +type SerDeInfoObservation struct { + + // Name of the SerDe. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Map of initialization parameters for the SerDe, in key-value form. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe. + SerializationLibrary *string `json:"serializationLibrary,omitempty" tf:"serialization_library,omitempty"` +} + +type SerDeInfoParameters struct { + + // Name of the SerDe. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Map of initialization parameters for the SerDe, in key-value form. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe. + // +kubebuilder:validation:Optional + SerializationLibrary *string `json:"serializationLibrary,omitempty" tf:"serialization_library,omitempty"` +} + +type SkewedInfoInitParameters struct { + + // List of names of columns that contain skewed values. + SkewedColumnNames []*string `json:"skewedColumnNames,omitempty" tf:"skewed_column_names,omitempty"` + + // List of values that appear so frequently as to be considered skewed. + // +mapType=granular + SkewedColumnValueLocationMaps map[string]*string `json:"skewedColumnValueLocationMaps,omitempty" tf:"skewed_column_value_location_maps,omitempty"` + + // Map of skewed values to the columns that contain them. + SkewedColumnValues []*string `json:"skewedColumnValues,omitempty" tf:"skewed_column_values,omitempty"` +} + +type SkewedInfoObservation struct { + + // List of names of columns that contain skewed values. + SkewedColumnNames []*string `json:"skewedColumnNames,omitempty" tf:"skewed_column_names,omitempty"` + + // List of values that appear so frequently as to be considered skewed. + // +mapType=granular + SkewedColumnValueLocationMaps map[string]*string `json:"skewedColumnValueLocationMaps,omitempty" tf:"skewed_column_value_location_maps,omitempty"` + + // Map of skewed values to the columns that contain them. + SkewedColumnValues []*string `json:"skewedColumnValues,omitempty" tf:"skewed_column_values,omitempty"` +} + +type SkewedInfoParameters struct { + + // List of names of columns that contain skewed values. + // +kubebuilder:validation:Optional + SkewedColumnNames []*string `json:"skewedColumnNames,omitempty" tf:"skewed_column_names,omitempty"` + + // List of values that appear so frequently as to be considered skewed. + // +kubebuilder:validation:Optional + // +mapType=granular + SkewedColumnValueLocationMaps map[string]*string `json:"skewedColumnValueLocationMaps,omitempty" tf:"skewed_column_value_location_maps,omitempty"` + + // Map of skewed values to the columns that contain them. + // +kubebuilder:validation:Optional + SkewedColumnValues []*string `json:"skewedColumnValues,omitempty" tf:"skewed_column_values,omitempty"` +} + +type SortColumnsInitParameters struct { + + // Name of the column. + Column *string `json:"column,omitempty" tf:"column,omitempty"` + + // Whether the column is sorted in ascending (1) or descending order (0). + SortOrder *float64 `json:"sortOrder,omitempty" tf:"sort_order,omitempty"` +} + +type SortColumnsObservation struct { + + // Name of the column. + Column *string `json:"column,omitempty" tf:"column,omitempty"` + + // Whether the column is sorted in ascending (1) or descending order (0). + SortOrder *float64 `json:"sortOrder,omitempty" tf:"sort_order,omitempty"` +} + +type SortColumnsParameters struct { + + // Name of the column. + // +kubebuilder:validation:Optional + Column *string `json:"column" tf:"column,omitempty"` + + // Whether the column is sorted in ascending (1) or descending order (0). + // +kubebuilder:validation:Optional + SortOrder *float64 `json:"sortOrder" tf:"sort_order,omitempty"` +} + +type StorageDescriptorInitParameters struct { + + // List of reducer grouping columns, clustering columns, and bucketing columns in the table. + BucketColumns []*string `json:"bucketColumns,omitempty" tf:"bucket_columns,omitempty"` + + // Configuration block for columns in the table. See columns below. + Columns []ColumnsInitParameters `json:"columns,omitempty" tf:"columns,omitempty"` + + // Whether the data in the table is compressed. + Compressed *bool `json:"compressed,omitempty" tf:"compressed,omitempty"` + + // Input format: SequenceFileInputFormat (binary), or TextInputFormat, or a custom format. + InputFormat *string `json:"inputFormat,omitempty" tf:"input_format,omitempty"` + + // Physical location of the table. By default this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Must be specified if the table contains any dimension columns. + NumberOfBuckets *float64 `json:"numberOfBuckets,omitempty" tf:"number_of_buckets,omitempty"` + + // Output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat, or a custom format. + OutputFormat *string `json:"outputFormat,omitempty" tf:"output_format,omitempty"` + + // User-supplied properties in key-value form. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Object that references a schema stored in the AWS Glue Schema Registry. When creating a table, you can pass an empty list of columns for the schema, and instead use a schema reference. See Schema Reference below. + SchemaReference *SchemaReferenceInitParameters `json:"schemaReference,omitempty" tf:"schema_reference,omitempty"` + + // Configuration block for serialization and deserialization ("SerDe") information. See ser_de_info below. + SerDeInfo *SerDeInfoInitParameters `json:"serDeInfo,omitempty" tf:"ser_de_info,omitempty"` + + // Configuration block with information about values that appear very frequently in a column (skewed values). See skewed_info below. + SkewedInfo *SkewedInfoInitParameters `json:"skewedInfo,omitempty" tf:"skewed_info,omitempty"` + + // Configuration block for the sort order of each bucket in the table. See sort_columns below. + SortColumns []SortColumnsInitParameters `json:"sortColumns,omitempty" tf:"sort_columns,omitempty"` + + // Whether the table data is stored in subdirectories. + StoredAsSubDirectories *bool `json:"storedAsSubDirectories,omitempty" tf:"stored_as_sub_directories,omitempty"` +} + +type StorageDescriptorObservation struct { + + // List of reducer grouping columns, clustering columns, and bucketing columns in the table. + BucketColumns []*string `json:"bucketColumns,omitempty" tf:"bucket_columns,omitempty"` + + // Configuration block for columns in the table. See columns below. + Columns []ColumnsObservation `json:"columns,omitempty" tf:"columns,omitempty"` + + // Whether the data in the table is compressed. + Compressed *bool `json:"compressed,omitempty" tf:"compressed,omitempty"` + + // Input format: SequenceFileInputFormat (binary), or TextInputFormat, or a custom format. + InputFormat *string `json:"inputFormat,omitempty" tf:"input_format,omitempty"` + + // Physical location of the table. By default this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Must be specified if the table contains any dimension columns. + NumberOfBuckets *float64 `json:"numberOfBuckets,omitempty" tf:"number_of_buckets,omitempty"` + + // Output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat, or a custom format. + OutputFormat *string `json:"outputFormat,omitempty" tf:"output_format,omitempty"` + + // User-supplied properties in key-value form. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Object that references a schema stored in the AWS Glue Schema Registry. When creating a table, you can pass an empty list of columns for the schema, and instead use a schema reference. See Schema Reference below. + SchemaReference *SchemaReferenceObservation `json:"schemaReference,omitempty" tf:"schema_reference,omitempty"` + + // Configuration block for serialization and deserialization ("SerDe") information. See ser_de_info below. + SerDeInfo *SerDeInfoObservation `json:"serDeInfo,omitempty" tf:"ser_de_info,omitempty"` + + // Configuration block with information about values that appear very frequently in a column (skewed values). See skewed_info below. + SkewedInfo *SkewedInfoObservation `json:"skewedInfo,omitempty" tf:"skewed_info,omitempty"` + + // Configuration block for the sort order of each bucket in the table. See sort_columns below. + SortColumns []SortColumnsObservation `json:"sortColumns,omitempty" tf:"sort_columns,omitempty"` + + // Whether the table data is stored in subdirectories. + StoredAsSubDirectories *bool `json:"storedAsSubDirectories,omitempty" tf:"stored_as_sub_directories,omitempty"` +} + +type StorageDescriptorParameters struct { + + // List of reducer grouping columns, clustering columns, and bucketing columns in the table. + // +kubebuilder:validation:Optional + BucketColumns []*string `json:"bucketColumns,omitempty" tf:"bucket_columns,omitempty"` + + // Configuration block for columns in the table. See columns below. + // +kubebuilder:validation:Optional + Columns []ColumnsParameters `json:"columns,omitempty" tf:"columns,omitempty"` + + // Whether the data in the table is compressed. + // +kubebuilder:validation:Optional + Compressed *bool `json:"compressed,omitempty" tf:"compressed,omitempty"` + + // Input format: SequenceFileInputFormat (binary), or TextInputFormat, or a custom format. + // +kubebuilder:validation:Optional + InputFormat *string `json:"inputFormat,omitempty" tf:"input_format,omitempty"` + + // Physical location of the table. By default this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Must be specified if the table contains any dimension columns. + // +kubebuilder:validation:Optional + NumberOfBuckets *float64 `json:"numberOfBuckets,omitempty" tf:"number_of_buckets,omitempty"` + + // Output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat, or a custom format. + // +kubebuilder:validation:Optional + OutputFormat *string `json:"outputFormat,omitempty" tf:"output_format,omitempty"` + + // User-supplied properties in key-value form. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Object that references a schema stored in the AWS Glue Schema Registry. When creating a table, you can pass an empty list of columns for the schema, and instead use a schema reference. See Schema Reference below. + // +kubebuilder:validation:Optional + SchemaReference *SchemaReferenceParameters `json:"schemaReference,omitempty" tf:"schema_reference,omitempty"` + + // Configuration block for serialization and deserialization ("SerDe") information. See ser_de_info below. + // +kubebuilder:validation:Optional + SerDeInfo *SerDeInfoParameters `json:"serDeInfo,omitempty" tf:"ser_de_info,omitempty"` + + // Configuration block with information about values that appear very frequently in a column (skewed values). See skewed_info below. + // +kubebuilder:validation:Optional + SkewedInfo *SkewedInfoParameters `json:"skewedInfo,omitempty" tf:"skewed_info,omitempty"` + + // Configuration block for the sort order of each bucket in the table. See sort_columns below. + // +kubebuilder:validation:Optional + SortColumns []SortColumnsParameters `json:"sortColumns,omitempty" tf:"sort_columns,omitempty"` + + // Whether the table data is stored in subdirectories. + // +kubebuilder:validation:Optional + StoredAsSubDirectories *bool `json:"storedAsSubDirectories,omitempty" tf:"stored_as_sub_directories,omitempty"` +} + +type TargetTableInitParameters struct { + + // Name of the target table. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TargetTableObservation struct { + + // ID of the Data Catalog in which the table resides. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // Name of the catalog database that contains the target table. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Name of the target table. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region of the target table. + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +type TargetTableParameters struct { + + // ID of the Data Catalog in which the table resides. + // +kubebuilder:validation:Required + CatalogID *string `json:"catalogId" tf:"catalog_id,omitempty"` + + // Name of the catalog database that contains the target table. + // +kubebuilder:validation:Required + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // Name of the target table. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Region of the target table. + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +// CatalogTableSpec defines the desired state of CatalogTable +type CatalogTableSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CatalogTableParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CatalogTableInitParameters `json:"initProvider,omitempty"` +} + +// CatalogTableStatus defines the observed state of CatalogTable. +type CatalogTableStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CatalogTableObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CatalogTable is the Schema for the CatalogTables API. Provides a Glue Catalog Table. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type CatalogTable struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec CatalogTableSpec `json:"spec"` + Status CatalogTableStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CatalogTableList contains a list of CatalogTables +type CatalogTableList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CatalogTable `json:"items"` +} + +// Repository type metadata. +var ( + CatalogTable_Kind = "CatalogTable" + CatalogTable_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CatalogTable_Kind}.String() + CatalogTable_KindAPIVersion = CatalogTable_Kind + "." + CRDGroupVersion.String() + CatalogTable_GroupVersionKind = CRDGroupVersion.WithKind(CatalogTable_Kind) +) + +func init() { + SchemeBuilder.Register(&CatalogTable{}, &CatalogTableList{}) +} diff --git a/apis/glue/v1beta2/zz_classifier_terraformed.go b/apis/glue/v1beta2/zz_classifier_terraformed.go new file mode 100755 index 0000000000..19da1dd2fb --- /dev/null +++ b/apis/glue/v1beta2/zz_classifier_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Classifier +func (mg *Classifier) GetTerraformResourceType() string { + return "aws_glue_classifier" +} + +// GetConnectionDetailsMapping for this Classifier +func (tr *Classifier) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Classifier +func (tr *Classifier) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Classifier +func (tr *Classifier) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Classifier +func (tr *Classifier) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Classifier +func (tr *Classifier) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Classifier +func (tr *Classifier) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Classifier +func (tr *Classifier) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Classifier +func (tr *Classifier) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Classifier using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Classifier) LateInitialize(attrs []byte) (bool, error) { + params := &ClassifierParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Classifier) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/glue/v1beta2/zz_classifier_types.go b/apis/glue/v1beta2/zz_classifier_types.go new file mode 100755 index 0000000000..c86390aa36 --- /dev/null +++ b/apis/glue/v1beta2/zz_classifier_types.go @@ -0,0 +1,314 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ClassifierInitParameters struct { + + // A classifier for Csv content. Defined below. + CsvClassifier *CsvClassifierInitParameters `json:"csvClassifier,omitempty" tf:"csv_classifier,omitempty"` + + // – A classifier that uses grok patterns. Defined below. + GrokClassifier *GrokClassifierInitParameters `json:"grokClassifier,omitempty" tf:"grok_classifier,omitempty"` + + // – A classifier for JSON content. Defined below. + JSONClassifier *JSONClassifierInitParameters `json:"jsonClassifier,omitempty" tf:"json_classifier,omitempty"` + + // – A classifier for XML content. Defined below. + XMLClassifier *XMLClassifierInitParameters `json:"xmlClassifier,omitempty" tf:"xml_classifier,omitempty"` +} + +type ClassifierObservation struct { + + // A classifier for Csv content. Defined below. + CsvClassifier *CsvClassifierObservation `json:"csvClassifier,omitempty" tf:"csv_classifier,omitempty"` + + // – A classifier that uses grok patterns. Defined below. + GrokClassifier *GrokClassifierObservation `json:"grokClassifier,omitempty" tf:"grok_classifier,omitempty"` + + // Name of the classifier + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // – A classifier for JSON content. Defined below. + JSONClassifier *JSONClassifierObservation `json:"jsonClassifier,omitempty" tf:"json_classifier,omitempty"` + + // – A classifier for XML content. Defined below. + XMLClassifier *XMLClassifierObservation `json:"xmlClassifier,omitempty" tf:"xml_classifier,omitempty"` +} + +type ClassifierParameters struct { + + // A classifier for Csv content. Defined below. + // +kubebuilder:validation:Optional + CsvClassifier *CsvClassifierParameters `json:"csvClassifier,omitempty" tf:"csv_classifier,omitempty"` + + // – A classifier that uses grok patterns. Defined below. + // +kubebuilder:validation:Optional + GrokClassifier *GrokClassifierParameters `json:"grokClassifier,omitempty" tf:"grok_classifier,omitempty"` + + // – A classifier for JSON content. Defined below. + // +kubebuilder:validation:Optional + JSONClassifier *JSONClassifierParameters `json:"jsonClassifier,omitempty" tf:"json_classifier,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // – A classifier for XML content. Defined below. + // +kubebuilder:validation:Optional + XMLClassifier *XMLClassifierParameters `json:"xmlClassifier,omitempty" tf:"xml_classifier,omitempty"` +} + +type CsvClassifierInitParameters struct { + + // Enables the processing of files that contain only one column. + AllowSingleColumn *bool `json:"allowSingleColumn,omitempty" tf:"allow_single_column,omitempty"` + + // Indicates whether the CSV file contains a header. This can be one of "ABSENT", "PRESENT", or "UNKNOWN". + ContainsHeader *string `json:"containsHeader,omitempty" tf:"contains_header,omitempty"` + + // Enables the custom datatype to be configured. + CustomDatatypeConfigured *bool `json:"customDatatypeConfigured,omitempty" tf:"custom_datatype_configured,omitempty"` + + // A list of supported custom datatypes. Valid values are BINARY, BOOLEAN, DATE, DECIMAL, DOUBLE, FLOAT, INT, LONG, SHORT, STRING, TIMESTAMP. + CustomDatatypes []*string `json:"customDatatypes,omitempty" tf:"custom_datatypes,omitempty"` + + // The delimiter used in the Csv to separate columns. + Delimiter *string `json:"delimiter,omitempty" tf:"delimiter,omitempty"` + + // Specifies whether to trim column values. + DisableValueTrimming *bool `json:"disableValueTrimming,omitempty" tf:"disable_value_trimming,omitempty"` + + // A list of strings representing column names. + Header []*string `json:"header,omitempty" tf:"header,omitempty"` + + // A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter. + QuoteSymbol *string `json:"quoteSymbol,omitempty" tf:"quote_symbol,omitempty"` + + Serde *string `json:"serde,omitempty" tf:"serde,omitempty"` +} + +type CsvClassifierObservation struct { + + // Enables the processing of files that contain only one column. + AllowSingleColumn *bool `json:"allowSingleColumn,omitempty" tf:"allow_single_column,omitempty"` + + // Indicates whether the CSV file contains a header. This can be one of "ABSENT", "PRESENT", or "UNKNOWN". + ContainsHeader *string `json:"containsHeader,omitempty" tf:"contains_header,omitempty"` + + // Enables the custom datatype to be configured. + CustomDatatypeConfigured *bool `json:"customDatatypeConfigured,omitempty" tf:"custom_datatype_configured,omitempty"` + + // A list of supported custom datatypes. Valid values are BINARY, BOOLEAN, DATE, DECIMAL, DOUBLE, FLOAT, INT, LONG, SHORT, STRING, TIMESTAMP. + CustomDatatypes []*string `json:"customDatatypes,omitempty" tf:"custom_datatypes,omitempty"` + + // The delimiter used in the Csv to separate columns. + Delimiter *string `json:"delimiter,omitempty" tf:"delimiter,omitempty"` + + // Specifies whether to trim column values. + DisableValueTrimming *bool `json:"disableValueTrimming,omitempty" tf:"disable_value_trimming,omitempty"` + + // A list of strings representing column names. + Header []*string `json:"header,omitempty" tf:"header,omitempty"` + + // A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter. + QuoteSymbol *string `json:"quoteSymbol,omitempty" tf:"quote_symbol,omitempty"` + + Serde *string `json:"serde,omitempty" tf:"serde,omitempty"` +} + +type CsvClassifierParameters struct { + + // Enables the processing of files that contain only one column. + // +kubebuilder:validation:Optional + AllowSingleColumn *bool `json:"allowSingleColumn,omitempty" tf:"allow_single_column,omitempty"` + + // Indicates whether the CSV file contains a header. This can be one of "ABSENT", "PRESENT", or "UNKNOWN". + // +kubebuilder:validation:Optional + ContainsHeader *string `json:"containsHeader,omitempty" tf:"contains_header,omitempty"` + + // Enables the custom datatype to be configured. + // +kubebuilder:validation:Optional + CustomDatatypeConfigured *bool `json:"customDatatypeConfigured,omitempty" tf:"custom_datatype_configured,omitempty"` + + // A list of supported custom datatypes. Valid values are BINARY, BOOLEAN, DATE, DECIMAL, DOUBLE, FLOAT, INT, LONG, SHORT, STRING, TIMESTAMP. + // +kubebuilder:validation:Optional + CustomDatatypes []*string `json:"customDatatypes,omitempty" tf:"custom_datatypes,omitempty"` + + // The delimiter used in the Csv to separate columns. + // +kubebuilder:validation:Optional + Delimiter *string `json:"delimiter,omitempty" tf:"delimiter,omitempty"` + + // Specifies whether to trim column values. + // +kubebuilder:validation:Optional + DisableValueTrimming *bool `json:"disableValueTrimming,omitempty" tf:"disable_value_trimming,omitempty"` + + // A list of strings representing column names. + // +kubebuilder:validation:Optional + Header []*string `json:"header,omitempty" tf:"header,omitempty"` + + // A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter. + // +kubebuilder:validation:Optional + QuoteSymbol *string `json:"quoteSymbol,omitempty" tf:"quote_symbol,omitempty"` + + // +kubebuilder:validation:Optional + Serde *string `json:"serde,omitempty" tf:"serde,omitempty"` +} + +type GrokClassifierInitParameters struct { + + // An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, Amazon CloudWatch Logs, and so on. + Classification *string `json:"classification,omitempty" tf:"classification,omitempty"` + + // Custom grok patterns used by this classifier. + CustomPatterns *string `json:"customPatterns,omitempty" tf:"custom_patterns,omitempty"` + + // The grok pattern used by this classifier. + GrokPattern *string `json:"grokPattern,omitempty" tf:"grok_pattern,omitempty"` +} + +type GrokClassifierObservation struct { + + // An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, Amazon CloudWatch Logs, and so on. + Classification *string `json:"classification,omitempty" tf:"classification,omitempty"` + + // Custom grok patterns used by this classifier. + CustomPatterns *string `json:"customPatterns,omitempty" tf:"custom_patterns,omitempty"` + + // The grok pattern used by this classifier. + GrokPattern *string `json:"grokPattern,omitempty" tf:"grok_pattern,omitempty"` +} + +type GrokClassifierParameters struct { + + // An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, Amazon CloudWatch Logs, and so on. + // +kubebuilder:validation:Optional + Classification *string `json:"classification" tf:"classification,omitempty"` + + // Custom grok patterns used by this classifier. + // +kubebuilder:validation:Optional + CustomPatterns *string `json:"customPatterns,omitempty" tf:"custom_patterns,omitempty"` + + // The grok pattern used by this classifier. + // +kubebuilder:validation:Optional + GrokPattern *string `json:"grokPattern" tf:"grok_pattern,omitempty"` +} + +type JSONClassifierInitParameters struct { + + // A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers. + JSONPath *string `json:"jsonPath,omitempty" tf:"json_path,omitempty"` +} + +type JSONClassifierObservation struct { + + // A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers. + JSONPath *string `json:"jsonPath,omitempty" tf:"json_path,omitempty"` +} + +type JSONClassifierParameters struct { + + // A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers. + // +kubebuilder:validation:Optional + JSONPath *string `json:"jsonPath" tf:"json_path,omitempty"` +} + +type XMLClassifierInitParameters struct { + + // An identifier of the data format that the classifier matches. + Classification *string `json:"classification,omitempty" tf:"classification,omitempty"` + + // The XML tag designating the element that contains each record in an XML document being parsed. Note that this cannot identify a self-closing element (closed by />). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, is okay, but is not). + RowTag *string `json:"rowTag,omitempty" tf:"row_tag,omitempty"` +} + +type XMLClassifierObservation struct { + + // An identifier of the data format that the classifier matches. + Classification *string `json:"classification,omitempty" tf:"classification,omitempty"` + + // The XML tag designating the element that contains each record in an XML document being parsed. Note that this cannot identify a self-closing element (closed by />). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, is okay, but is not). + RowTag *string `json:"rowTag,omitempty" tf:"row_tag,omitempty"` +} + +type XMLClassifierParameters struct { + + // An identifier of the data format that the classifier matches. + // +kubebuilder:validation:Optional + Classification *string `json:"classification" tf:"classification,omitempty"` + + // The XML tag designating the element that contains each record in an XML document being parsed. Note that this cannot identify a self-closing element (closed by />). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, is okay, but is not). + // +kubebuilder:validation:Optional + RowTag *string `json:"rowTag" tf:"row_tag,omitempty"` +} + +// ClassifierSpec defines the desired state of Classifier +type ClassifierSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ClassifierParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ClassifierInitParameters `json:"initProvider,omitempty"` +} + +// ClassifierStatus defines the observed state of Classifier. +type ClassifierStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ClassifierObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Classifier is the Schema for the Classifiers API. Provides an Glue Classifier resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Classifier struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ClassifierSpec `json:"spec"` + Status ClassifierStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClassifierList contains a list of Classifiers +type ClassifierList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Classifier `json:"items"` +} + +// Repository type metadata. +var ( + Classifier_Kind = "Classifier" + Classifier_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Classifier_Kind}.String() + Classifier_KindAPIVersion = Classifier_Kind + "." + CRDGroupVersion.String() + Classifier_GroupVersionKind = CRDGroupVersion.WithKind(Classifier_Kind) +) + +func init() { + SchemeBuilder.Register(&Classifier{}, &ClassifierList{}) +} diff --git a/apis/glue/v1beta2/zz_connection_terraformed.go b/apis/glue/v1beta2/zz_connection_terraformed.go new file mode 100755 index 0000000000..434fb8020d --- /dev/null +++ b/apis/glue/v1beta2/zz_connection_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Connection +func (mg *Connection) GetTerraformResourceType() string { + return "aws_glue_connection" +} + +// GetConnectionDetailsMapping for this Connection +func (tr *Connection) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"connection_properties": "connectionPropertiesSecretRef"} +} + +// GetObservation of this Connection +func (tr *Connection) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Connection +func (tr *Connection) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Connection +func (tr *Connection) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Connection +func (tr *Connection) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Connection +func (tr *Connection) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Connection +func (tr *Connection) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Connection +func (tr *Connection) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Connection using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Connection) LateInitialize(attrs []byte) (bool, error) { + params := &ConnectionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Connection) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/glue/v1beta2/zz_connection_types.go b/apis/glue/v1beta2/zz_connection_types.go new file mode 100755 index 0000000000..ab25d80dff --- /dev/null +++ b/apis/glue/v1beta2/zz_connection_types.go @@ -0,0 +1,245 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConnectionInitParameters struct { + ConnectionProperties map[string]*string `json:"connectionPropertiesSecretRef,omitempty" tf:"-"` + + // – The type of the connection. Supported are: CUSTOM, JDBC, KAFKA, MARKETPLACE, MONGODB, and NETWORK. Defaults to JDBC. + ConnectionType *string `json:"connectionType,omitempty" tf:"connection_type,omitempty"` + + // – Description of the connection. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // – A list of criteria that can be used in selecting this connection. + MatchCriteria []*string `json:"matchCriteria,omitempty" tf:"match_criteria,omitempty"` + + // A map of physical connection requirements, such as VPC and SecurityGroup. Defined below. + PhysicalConnectionRequirements *PhysicalConnectionRequirementsInitParameters `json:"physicalConnectionRequirements,omitempty" tf:"physical_connection_requirements,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ConnectionObservation struct { + + // The ARN of the Glue Connection. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // – The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // – The type of the connection. Supported are: CUSTOM, JDBC, KAFKA, MARKETPLACE, MONGODB, and NETWORK. Defaults to JDBC. + ConnectionType *string `json:"connectionType,omitempty" tf:"connection_type,omitempty"` + + // – Description of the connection. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Catalog ID and name of the connection + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // – A list of criteria that can be used in selecting this connection. + MatchCriteria []*string `json:"matchCriteria,omitempty" tf:"match_criteria,omitempty"` + + // A map of physical connection requirements, such as VPC and SecurityGroup. Defined below. + PhysicalConnectionRequirements *PhysicalConnectionRequirementsObservation `json:"physicalConnectionRequirements,omitempty" tf:"physical_connection_requirements,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type ConnectionParameters struct { + + // – The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. + // +kubebuilder:validation:Required + CatalogID *string `json:"catalogId" tf:"catalog_id,omitempty"` + + // value pairs used as parameters for this connection. + // +kubebuilder:validation:Optional + ConnectionPropertiesSecretRef *v1.SecretReference `json:"connectionPropertiesSecretRef,omitempty" tf:"-"` + + // – The type of the connection. Supported are: CUSTOM, JDBC, KAFKA, MARKETPLACE, MONGODB, and NETWORK. Defaults to JDBC. + // +kubebuilder:validation:Optional + ConnectionType *string `json:"connectionType,omitempty" tf:"connection_type,omitempty"` + + // – Description of the connection. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // – A list of criteria that can be used in selecting this connection. + // +kubebuilder:validation:Optional + MatchCriteria []*string `json:"matchCriteria,omitempty" tf:"match_criteria,omitempty"` + + // A map of physical connection requirements, such as VPC and SecurityGroup. Defined below. + // +kubebuilder:validation:Optional + PhysicalConnectionRequirements *PhysicalConnectionRequirementsParameters `json:"physicalConnectionRequirements,omitempty" tf:"physical_connection_requirements,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type PhysicalConnectionRequirementsInitParameters struct { + + // The availability zone of the connection. This field is redundant and implied by subnet_id, but is currently an api requirement. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("availability_zone",false) + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // Reference to a Subnet in ec2 to populate availabilityZone. + // +kubebuilder:validation:Optional + AvailabilityZoneRef *v1.Reference `json:"availabilityZoneRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate availabilityZone. + // +kubebuilder:validation:Optional + AvailabilityZoneSelector *v1.Selector `json:"availabilityZoneSelector,omitempty" tf:"-"` + + // The security group ID list used by the connection. + // +listType=set + SecurityGroupIDList []*string `json:"securityGroupIdList,omitempty" tf:"security_group_id_list,omitempty"` + + // The subnet ID used by the connection. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type PhysicalConnectionRequirementsObservation struct { + + // The availability zone of the connection. This field is redundant and implied by subnet_id, but is currently an api requirement. + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // The security group ID list used by the connection. + // +listType=set + SecurityGroupIDList []*string `json:"securityGroupIdList,omitempty" tf:"security_group_id_list,omitempty"` + + // The subnet ID used by the connection. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type PhysicalConnectionRequirementsParameters struct { + + // The availability zone of the connection. This field is redundant and implied by subnet_id, but is currently an api requirement. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("availability_zone",false) + // +kubebuilder:validation:Optional + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // Reference to a Subnet in ec2 to populate availabilityZone. + // +kubebuilder:validation:Optional + AvailabilityZoneRef *v1.Reference `json:"availabilityZoneRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate availabilityZone. + // +kubebuilder:validation:Optional + AvailabilityZoneSelector *v1.Selector `json:"availabilityZoneSelector,omitempty" tf:"-"` + + // The security group ID list used by the connection. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIDList []*string `json:"securityGroupIdList,omitempty" tf:"security_group_id_list,omitempty"` + + // The subnet ID used by the connection. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +// ConnectionSpec defines the desired state of Connection +type ConnectionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ConnectionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ConnectionInitParameters `json:"initProvider,omitempty"` +} + +// ConnectionStatus defines the observed state of Connection. +type ConnectionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ConnectionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Connection is the Schema for the Connections API. Provides an Glue Connection resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Connection struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ConnectionSpec `json:"spec"` + Status ConnectionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConnectionList contains a list of Connections +type ConnectionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Connection `json:"items"` +} + +// Repository type metadata. +var ( + Connection_Kind = "Connection" + Connection_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Connection_Kind}.String() + Connection_KindAPIVersion = Connection_Kind + "." + CRDGroupVersion.String() + Connection_GroupVersionKind = CRDGroupVersion.WithKind(Connection_Kind) +) + +func init() { + SchemeBuilder.Register(&Connection{}, &ConnectionList{}) +} diff --git a/apis/glue/v1beta2/zz_crawler_terraformed.go b/apis/glue/v1beta2/zz_crawler_terraformed.go new file mode 100755 index 0000000000..c4a0c0b749 --- /dev/null +++ b/apis/glue/v1beta2/zz_crawler_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Crawler +func (mg *Crawler) GetTerraformResourceType() string { + return "aws_glue_crawler" +} + +// GetConnectionDetailsMapping for this Crawler +func (tr *Crawler) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Crawler +func (tr *Crawler) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Crawler +func (tr *Crawler) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Crawler +func (tr *Crawler) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Crawler +func (tr *Crawler) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Crawler +func (tr *Crawler) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Crawler +func (tr *Crawler) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Crawler +func (tr *Crawler) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Crawler using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Crawler) LateInitialize(attrs []byte) (bool, error) { + params := &CrawlerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Crawler) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/glue/v1beta2/zz_crawler_types.go b/apis/glue/v1beta2/zz_crawler_types.go new file mode 100755 index 0000000000..4f2734fb12 --- /dev/null +++ b/apis/glue/v1beta2/zz_crawler_types.go @@ -0,0 +1,910 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CatalogTargetInitParameters struct { + + // The name of the connection to use to connect to the JDBC target. + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // Glue database where results are written. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.CatalogDatabase + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Reference to a CatalogDatabase in glue to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameRef *v1.Reference `json:"databaseNameRef,omitempty" tf:"-"` + + // Selector for a CatalogDatabase in glue to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameSelector *v1.Selector `json:"databaseNameSelector,omitempty" tf:"-"` + + // The ARN of the dead-letter SQS queue. + DlqEventQueueArn *string `json:"dlqEventQueueArn,omitempty" tf:"dlq_event_queue_arn,omitempty"` + + // The ARN of the SQS queue to receive S3 notifications from. + EventQueueArn *string `json:"eventQueueArn,omitempty" tf:"event_queue_arn,omitempty"` + + // A list of catalog tables to be synchronized. + Tables []*string `json:"tables,omitempty" tf:"tables,omitempty"` +} + +type CatalogTargetObservation struct { + + // The name of the connection to use to connect to the JDBC target. + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // Glue database where results are written. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The ARN of the dead-letter SQS queue. + DlqEventQueueArn *string `json:"dlqEventQueueArn,omitempty" tf:"dlq_event_queue_arn,omitempty"` + + // The ARN of the SQS queue to receive S3 notifications from. + EventQueueArn *string `json:"eventQueueArn,omitempty" tf:"event_queue_arn,omitempty"` + + // A list of catalog tables to be synchronized. + Tables []*string `json:"tables,omitempty" tf:"tables,omitempty"` +} + +type CatalogTargetParameters struct { + + // The name of the connection to use to connect to the JDBC target. + // +kubebuilder:validation:Optional + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // Glue database where results are written. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.CatalogDatabase + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Reference to a CatalogDatabase in glue to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameRef *v1.Reference `json:"databaseNameRef,omitempty" tf:"-"` + + // Selector for a CatalogDatabase in glue to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameSelector *v1.Selector `json:"databaseNameSelector,omitempty" tf:"-"` + + // The ARN of the dead-letter SQS queue. + // +kubebuilder:validation:Optional + DlqEventQueueArn *string `json:"dlqEventQueueArn,omitempty" tf:"dlq_event_queue_arn,omitempty"` + + // The ARN of the SQS queue to receive S3 notifications from. + // +kubebuilder:validation:Optional + EventQueueArn *string `json:"eventQueueArn,omitempty" tf:"event_queue_arn,omitempty"` + + // A list of catalog tables to be synchronized. + // +kubebuilder:validation:Optional + Tables []*string `json:"tables" tf:"tables,omitempty"` +} + +type CrawlerInitParameters struct { + + // List of nested AWS Glue Data Catalog target arguments. See Catalog Target below. + CatalogTarget []CatalogTargetInitParameters `json:"catalogTarget,omitempty" tf:"catalog_target,omitempty"` + + // List of custom classifiers. By default, all AWS classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification. + Classifiers []*string `json:"classifiers,omitempty" tf:"classifiers,omitempty"` + + // JSON string of configuration information. For more details see Setting Crawler Configuration Options. + Configuration *string `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // Glue database where results are written. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.CatalogDatabase + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Reference to a CatalogDatabase in glue to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameRef *v1.Reference `json:"databaseNameRef,omitempty" tf:"-"` + + // Selector for a CatalogDatabase in glue to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameSelector *v1.Selector `json:"databaseNameSelector,omitempty" tf:"-"` + + // List of nested Delta Lake target arguments. See Delta Target below. + DeltaTarget []DeltaTargetInitParameters `json:"deltaTarget,omitempty" tf:"delta_target,omitempty"` + + // Description of the crawler. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // List of nested DynamoDB target arguments. See Dynamodb Target below. + DynamodbTarget []DynamodbTargetInitParameters `json:"dynamodbTarget,omitempty" tf:"dynamodb_target,omitempty"` + + // List of nested Hudi target arguments. See Iceberg Target below. + HudiTarget []HudiTargetInitParameters `json:"hudiTarget,omitempty" tf:"hudi_target,omitempty"` + + // List of nested Iceberg target arguments. See Iceberg Target below. + IcebergTarget []IcebergTargetInitParameters `json:"icebergTarget,omitempty" tf:"iceberg_target,omitempty"` + + // List of nested JDBC target arguments. See JDBC Target below. + JdbcTarget []JdbcTargetInitParameters `json:"jdbcTarget,omitempty" tf:"jdbc_target,omitempty"` + + // Specifies Lake Formation configuration settings for the crawler. See Lake Formation Configuration below. + LakeFormationConfiguration *LakeFormationConfigurationInitParameters `json:"lakeFormationConfiguration,omitempty" tf:"lake_formation_configuration,omitempty"` + + // Specifies data lineage configuration settings for the crawler. See Lineage Configuration below. + LineageConfiguration *LineageConfigurationInitParameters `json:"lineageConfiguration,omitempty" tf:"lineage_configuration,omitempty"` + + // List of nested MongoDB target arguments. See MongoDB Target below. + MongodbTarget []MongodbTargetInitParameters `json:"mongodbTarget,omitempty" tf:"mongodb_target,omitempty"` + + // A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.. See Recrawl Policy below. + RecrawlPolicy *RecrawlPolicyInitParameters `json:"recrawlPolicy,omitempty" tf:"recrawl_policy,omitempty"` + + // The IAM role friendly name (including path without leading slash), or ARN of an IAM role, used by the crawler to access other resources. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // Reference to a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleRef *v1.Reference `json:"roleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleSelector *v1.Selector `json:"roleSelector,omitempty" tf:"-"` + + // List of nested Amazon S3 target arguments. See S3 Target below. + S3Target []S3TargetInitParameters `json:"s3Target,omitempty" tf:"s3_target,omitempty"` + + // Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *). + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // Policy for the crawler's update and deletion behavior. See Schema Change Policy below. + SchemaChangePolicy *SchemaChangePolicyInitParameters `json:"schemaChangePolicy,omitempty" tf:"schema_change_policy,omitempty"` + + // The name of Security Configuration to be used by the crawler + SecurityConfiguration *string `json:"securityConfiguration,omitempty" tf:"security_configuration,omitempty"` + + // The table prefix used for catalog tables that are created. + TablePrefix *string `json:"tablePrefix,omitempty" tf:"table_prefix,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type CrawlerObservation struct { + + // The ARN of the crawler + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // List of nested AWS Glue Data Catalog target arguments. See Catalog Target below. + CatalogTarget []CatalogTargetObservation `json:"catalogTarget,omitempty" tf:"catalog_target,omitempty"` + + // List of custom classifiers. By default, all AWS classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification. + Classifiers []*string `json:"classifiers,omitempty" tf:"classifiers,omitempty"` + + // JSON string of configuration information. For more details see Setting Crawler Configuration Options. + Configuration *string `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // Glue database where results are written. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // List of nested Delta Lake target arguments. See Delta Target below. + DeltaTarget []DeltaTargetObservation `json:"deltaTarget,omitempty" tf:"delta_target,omitempty"` + + // Description of the crawler. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // List of nested DynamoDB target arguments. See Dynamodb Target below. + DynamodbTarget []DynamodbTargetObservation `json:"dynamodbTarget,omitempty" tf:"dynamodb_target,omitempty"` + + // List of nested Hudi target arguments. See Iceberg Target below. + HudiTarget []HudiTargetObservation `json:"hudiTarget,omitempty" tf:"hudi_target,omitempty"` + + // Crawler name + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // List of nested Iceberg target arguments. See Iceberg Target below. + IcebergTarget []IcebergTargetObservation `json:"icebergTarget,omitempty" tf:"iceberg_target,omitempty"` + + // List of nested JDBC target arguments. See JDBC Target below. + JdbcTarget []JdbcTargetObservation `json:"jdbcTarget,omitempty" tf:"jdbc_target,omitempty"` + + // Specifies Lake Formation configuration settings for the crawler. See Lake Formation Configuration below. + LakeFormationConfiguration *LakeFormationConfigurationObservation `json:"lakeFormationConfiguration,omitempty" tf:"lake_formation_configuration,omitempty"` + + // Specifies data lineage configuration settings for the crawler. See Lineage Configuration below. + LineageConfiguration *LineageConfigurationObservation `json:"lineageConfiguration,omitempty" tf:"lineage_configuration,omitempty"` + + // List of nested MongoDB target arguments. See MongoDB Target below. + MongodbTarget []MongodbTargetObservation `json:"mongodbTarget,omitempty" tf:"mongodb_target,omitempty"` + + // A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.. See Recrawl Policy below. + RecrawlPolicy *RecrawlPolicyObservation `json:"recrawlPolicy,omitempty" tf:"recrawl_policy,omitempty"` + + // The IAM role friendly name (including path without leading slash), or ARN of an IAM role, used by the crawler to access other resources. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // List of nested Amazon S3 target arguments. See S3 Target below. + S3Target []S3TargetObservation `json:"s3Target,omitempty" tf:"s3_target,omitempty"` + + // Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *). + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // Policy for the crawler's update and deletion behavior. See Schema Change Policy below. + SchemaChangePolicy *SchemaChangePolicyObservation `json:"schemaChangePolicy,omitempty" tf:"schema_change_policy,omitempty"` + + // The name of Security Configuration to be used by the crawler + SecurityConfiguration *string `json:"securityConfiguration,omitempty" tf:"security_configuration,omitempty"` + + // The table prefix used for catalog tables that are created. + TablePrefix *string `json:"tablePrefix,omitempty" tf:"table_prefix,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type CrawlerParameters struct { + + // List of nested AWS Glue Data Catalog target arguments. See Catalog Target below. + // +kubebuilder:validation:Optional + CatalogTarget []CatalogTargetParameters `json:"catalogTarget,omitempty" tf:"catalog_target,omitempty"` + + // List of custom classifiers. By default, all AWS classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification. + // +kubebuilder:validation:Optional + Classifiers []*string `json:"classifiers,omitempty" tf:"classifiers,omitempty"` + + // JSON string of configuration information. For more details see Setting Crawler Configuration Options. + // +kubebuilder:validation:Optional + Configuration *string `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // Glue database where results are written. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.CatalogDatabase + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Reference to a CatalogDatabase in glue to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameRef *v1.Reference `json:"databaseNameRef,omitempty" tf:"-"` + + // Selector for a CatalogDatabase in glue to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameSelector *v1.Selector `json:"databaseNameSelector,omitempty" tf:"-"` + + // List of nested Delta Lake target arguments. See Delta Target below. + // +kubebuilder:validation:Optional + DeltaTarget []DeltaTargetParameters `json:"deltaTarget,omitempty" tf:"delta_target,omitempty"` + + // Description of the crawler. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // List of nested DynamoDB target arguments. See Dynamodb Target below. + // +kubebuilder:validation:Optional + DynamodbTarget []DynamodbTargetParameters `json:"dynamodbTarget,omitempty" tf:"dynamodb_target,omitempty"` + + // List of nested Hudi target arguments. See Iceberg Target below. + // +kubebuilder:validation:Optional + HudiTarget []HudiTargetParameters `json:"hudiTarget,omitempty" tf:"hudi_target,omitempty"` + + // List of nested Iceberg target arguments. See Iceberg Target below. + // +kubebuilder:validation:Optional + IcebergTarget []IcebergTargetParameters `json:"icebergTarget,omitempty" tf:"iceberg_target,omitempty"` + + // List of nested JDBC target arguments. See JDBC Target below. + // +kubebuilder:validation:Optional + JdbcTarget []JdbcTargetParameters `json:"jdbcTarget,omitempty" tf:"jdbc_target,omitempty"` + + // Specifies Lake Formation configuration settings for the crawler. See Lake Formation Configuration below. + // +kubebuilder:validation:Optional + LakeFormationConfiguration *LakeFormationConfigurationParameters `json:"lakeFormationConfiguration,omitempty" tf:"lake_formation_configuration,omitempty"` + + // Specifies data lineage configuration settings for the crawler. See Lineage Configuration below. + // +kubebuilder:validation:Optional + LineageConfiguration *LineageConfigurationParameters `json:"lineageConfiguration,omitempty" tf:"lineage_configuration,omitempty"` + + // List of nested MongoDB target arguments. See MongoDB Target below. + // +kubebuilder:validation:Optional + MongodbTarget []MongodbTargetParameters `json:"mongodbTarget,omitempty" tf:"mongodb_target,omitempty"` + + // A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.. See Recrawl Policy below. + // +kubebuilder:validation:Optional + RecrawlPolicy *RecrawlPolicyParameters `json:"recrawlPolicy,omitempty" tf:"recrawl_policy,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The IAM role friendly name (including path without leading slash), or ARN of an IAM role, used by the crawler to access other resources. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // Reference to a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleRef *v1.Reference `json:"roleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleSelector *v1.Selector `json:"roleSelector,omitempty" tf:"-"` + + // List of nested Amazon S3 target arguments. See S3 Target below. + // +kubebuilder:validation:Optional + S3Target []S3TargetParameters `json:"s3Target,omitempty" tf:"s3_target,omitempty"` + + // Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *). + // +kubebuilder:validation:Optional + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // Policy for the crawler's update and deletion behavior. See Schema Change Policy below. + // +kubebuilder:validation:Optional + SchemaChangePolicy *SchemaChangePolicyParameters `json:"schemaChangePolicy,omitempty" tf:"schema_change_policy,omitempty"` + + // The name of Security Configuration to be used by the crawler + // +kubebuilder:validation:Optional + SecurityConfiguration *string `json:"securityConfiguration,omitempty" tf:"security_configuration,omitempty"` + + // The table prefix used for catalog tables that are created. + // +kubebuilder:validation:Optional + TablePrefix *string `json:"tablePrefix,omitempty" tf:"table_prefix,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type DeltaTargetInitParameters struct { + + // The name of the connection to use to connect to the JDBC target. + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // Specifies whether the crawler will create native tables, to allow integration with query engines that support querying of the Delta transaction log directly. + CreateNativeDeltaTable *bool `json:"createNativeDeltaTable,omitempty" tf:"create_native_delta_table,omitempty"` + + // A list of the Amazon S3 paths to the Delta tables. + // +listType=set + DeltaTables []*string `json:"deltaTables,omitempty" tf:"delta_tables,omitempty"` + + // Specifies whether to write the manifest files to the Delta table path. + WriteManifest *bool `json:"writeManifest,omitempty" tf:"write_manifest,omitempty"` +} + +type DeltaTargetObservation struct { + + // The name of the connection to use to connect to the JDBC target. + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // Specifies whether the crawler will create native tables, to allow integration with query engines that support querying of the Delta transaction log directly. + CreateNativeDeltaTable *bool `json:"createNativeDeltaTable,omitempty" tf:"create_native_delta_table,omitempty"` + + // A list of the Amazon S3 paths to the Delta tables. + // +listType=set + DeltaTables []*string `json:"deltaTables,omitempty" tf:"delta_tables,omitempty"` + + // Specifies whether to write the manifest files to the Delta table path. + WriteManifest *bool `json:"writeManifest,omitempty" tf:"write_manifest,omitempty"` +} + +type DeltaTargetParameters struct { + + // The name of the connection to use to connect to the JDBC target. + // +kubebuilder:validation:Optional + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // Specifies whether the crawler will create native tables, to allow integration with query engines that support querying of the Delta transaction log directly. + // +kubebuilder:validation:Optional + CreateNativeDeltaTable *bool `json:"createNativeDeltaTable,omitempty" tf:"create_native_delta_table,omitempty"` + + // A list of the Amazon S3 paths to the Delta tables. + // +kubebuilder:validation:Optional + // +listType=set + DeltaTables []*string `json:"deltaTables" tf:"delta_tables,omitempty"` + + // Specifies whether to write the manifest files to the Delta table path. + // +kubebuilder:validation:Optional + WriteManifest *bool `json:"writeManifest" tf:"write_manifest,omitempty"` +} + +type DynamodbTargetInitParameters struct { + + // The name of the DynamoDB table to crawl. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Indicates whether to scan all the records, or to sample rows from the table. Scanning all the records can take a long time when the table is not a high throughput table. defaults to true. + ScanAll *bool `json:"scanAll,omitempty" tf:"scan_all,omitempty"` + + // The percentage of the configured read capacity units to use by the AWS Glue crawler. The valid values are null or a value between 0.1 to 1.5. + ScanRate *float64 `json:"scanRate,omitempty" tf:"scan_rate,omitempty"` +} + +type DynamodbTargetObservation struct { + + // The name of the DynamoDB table to crawl. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Indicates whether to scan all the records, or to sample rows from the table. Scanning all the records can take a long time when the table is not a high throughput table. defaults to true. + ScanAll *bool `json:"scanAll,omitempty" tf:"scan_all,omitempty"` + + // The percentage of the configured read capacity units to use by the AWS Glue crawler. The valid values are null or a value between 0.1 to 1.5. + ScanRate *float64 `json:"scanRate,omitempty" tf:"scan_rate,omitempty"` +} + +type DynamodbTargetParameters struct { + + // The name of the DynamoDB table to crawl. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` + + // Indicates whether to scan all the records, or to sample rows from the table. Scanning all the records can take a long time when the table is not a high throughput table. defaults to true. + // +kubebuilder:validation:Optional + ScanAll *bool `json:"scanAll,omitempty" tf:"scan_all,omitempty"` + + // The percentage of the configured read capacity units to use by the AWS Glue crawler. The valid values are null or a value between 0.1 to 1.5. + // +kubebuilder:validation:Optional + ScanRate *float64 `json:"scanRate,omitempty" tf:"scan_rate,omitempty"` +} + +type HudiTargetInitParameters struct { + + // The name of the connection to use to connect to the JDBC target. + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // A list of glob patterns used to exclude from the crawl. + Exclusions []*string `json:"exclusions,omitempty" tf:"exclusions,omitempty"` + + // The maximum depth of Amazon S3 paths that the crawler can traverse to discover the Hudi metadata folder in your Amazon S3 path. Used to limit the crawler run time. Valid values are between 1 and 20. + MaximumTraversalDepth *float64 `json:"maximumTraversalDepth,omitempty" tf:"maximum_traversal_depth,omitempty"` + + // One or more Amazon S3 paths that contains Hudi metadata folders as s3://bucket/prefix. + // +listType=set + Paths []*string `json:"paths,omitempty" tf:"paths,omitempty"` +} + +type HudiTargetObservation struct { + + // The name of the connection to use to connect to the JDBC target. + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // A list of glob patterns used to exclude from the crawl. + Exclusions []*string `json:"exclusions,omitempty" tf:"exclusions,omitempty"` + + // The maximum depth of Amazon S3 paths that the crawler can traverse to discover the Hudi metadata folder in your Amazon S3 path. Used to limit the crawler run time. Valid values are between 1 and 20. + MaximumTraversalDepth *float64 `json:"maximumTraversalDepth,omitempty" tf:"maximum_traversal_depth,omitempty"` + + // One or more Amazon S3 paths that contains Hudi metadata folders as s3://bucket/prefix. + // +listType=set + Paths []*string `json:"paths,omitempty" tf:"paths,omitempty"` +} + +type HudiTargetParameters struct { + + // The name of the connection to use to connect to the JDBC target. + // +kubebuilder:validation:Optional + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // A list of glob patterns used to exclude from the crawl. + // +kubebuilder:validation:Optional + Exclusions []*string `json:"exclusions,omitempty" tf:"exclusions,omitempty"` + + // The maximum depth of Amazon S3 paths that the crawler can traverse to discover the Hudi metadata folder in your Amazon S3 path. Used to limit the crawler run time. Valid values are between 1 and 20. + // +kubebuilder:validation:Optional + MaximumTraversalDepth *float64 `json:"maximumTraversalDepth" tf:"maximum_traversal_depth,omitempty"` + + // One or more Amazon S3 paths that contains Hudi metadata folders as s3://bucket/prefix. + // +kubebuilder:validation:Optional + // +listType=set + Paths []*string `json:"paths" tf:"paths,omitempty"` +} + +type IcebergTargetInitParameters struct { + + // The name of the connection to use to connect to the JDBC target. + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // A list of glob patterns used to exclude from the crawl. + Exclusions []*string `json:"exclusions,omitempty" tf:"exclusions,omitempty"` + + // The maximum depth of Amazon S3 paths that the crawler can traverse to discover the Hudi metadata folder in your Amazon S3 path. Used to limit the crawler run time. Valid values are between 1 and 20. + MaximumTraversalDepth *float64 `json:"maximumTraversalDepth,omitempty" tf:"maximum_traversal_depth,omitempty"` + + // One or more Amazon S3 paths that contains Hudi metadata folders as s3://bucket/prefix. + // +listType=set + Paths []*string `json:"paths,omitempty" tf:"paths,omitempty"` +} + +type IcebergTargetObservation struct { + + // The name of the connection to use to connect to the JDBC target. + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // A list of glob patterns used to exclude from the crawl. + Exclusions []*string `json:"exclusions,omitempty" tf:"exclusions,omitempty"` + + // The maximum depth of Amazon S3 paths that the crawler can traverse to discover the Hudi metadata folder in your Amazon S3 path. Used to limit the crawler run time. Valid values are between 1 and 20. + MaximumTraversalDepth *float64 `json:"maximumTraversalDepth,omitempty" tf:"maximum_traversal_depth,omitempty"` + + // One or more Amazon S3 paths that contains Hudi metadata folders as s3://bucket/prefix. + // +listType=set + Paths []*string `json:"paths,omitempty" tf:"paths,omitempty"` +} + +type IcebergTargetParameters struct { + + // The name of the connection to use to connect to the JDBC target. + // +kubebuilder:validation:Optional + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // A list of glob patterns used to exclude from the crawl. + // +kubebuilder:validation:Optional + Exclusions []*string `json:"exclusions,omitempty" tf:"exclusions,omitempty"` + + // The maximum depth of Amazon S3 paths that the crawler can traverse to discover the Hudi metadata folder in your Amazon S3 path. Used to limit the crawler run time. Valid values are between 1 and 20. + // +kubebuilder:validation:Optional + MaximumTraversalDepth *float64 `json:"maximumTraversalDepth" tf:"maximum_traversal_depth,omitempty"` + + // One or more Amazon S3 paths that contains Hudi metadata folders as s3://bucket/prefix. + // +kubebuilder:validation:Optional + // +listType=set + Paths []*string `json:"paths" tf:"paths,omitempty"` +} + +type JdbcTargetInitParameters struct { + + // The name of the connection to use to connect to the JDBC target. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.Connection + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // Reference to a Connection in glue to populate connectionName. + // +kubebuilder:validation:Optional + ConnectionNameRef *v1.Reference `json:"connectionNameRef,omitempty" tf:"-"` + + // Selector for a Connection in glue to populate connectionName. + // +kubebuilder:validation:Optional + ConnectionNameSelector *v1.Selector `json:"connectionNameSelector,omitempty" tf:"-"` + + // Specify a value of RAWTYPES or COMMENTS to enable additional metadata intable responses. RAWTYPES provides the native-level datatype. COMMENTS provides comments associated with a column or table in the database. + EnableAdditionalMetadata []*string `json:"enableAdditionalMetadata,omitempty" tf:"enable_additional_metadata,omitempty"` + + // A list of glob patterns used to exclude from the crawl. + Exclusions []*string `json:"exclusions,omitempty" tf:"exclusions,omitempty"` + + // The name of the DynamoDB table to crawl. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type JdbcTargetObservation struct { + + // The name of the connection to use to connect to the JDBC target. + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // Specify a value of RAWTYPES or COMMENTS to enable additional metadata intable responses. RAWTYPES provides the native-level datatype. COMMENTS provides comments associated with a column or table in the database. + EnableAdditionalMetadata []*string `json:"enableAdditionalMetadata,omitempty" tf:"enable_additional_metadata,omitempty"` + + // A list of glob patterns used to exclude from the crawl. + Exclusions []*string `json:"exclusions,omitempty" tf:"exclusions,omitempty"` + + // The name of the DynamoDB table to crawl. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type JdbcTargetParameters struct { + + // The name of the connection to use to connect to the JDBC target. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.Connection + // +kubebuilder:validation:Optional + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // Reference to a Connection in glue to populate connectionName. + // +kubebuilder:validation:Optional + ConnectionNameRef *v1.Reference `json:"connectionNameRef,omitempty" tf:"-"` + + // Selector for a Connection in glue to populate connectionName. + // +kubebuilder:validation:Optional + ConnectionNameSelector *v1.Selector `json:"connectionNameSelector,omitempty" tf:"-"` + + // Specify a value of RAWTYPES or COMMENTS to enable additional metadata intable responses. RAWTYPES provides the native-level datatype. COMMENTS provides comments associated with a column or table in the database. + // +kubebuilder:validation:Optional + EnableAdditionalMetadata []*string `json:"enableAdditionalMetadata,omitempty" tf:"enable_additional_metadata,omitempty"` + + // A list of glob patterns used to exclude from the crawl. + // +kubebuilder:validation:Optional + Exclusions []*string `json:"exclusions,omitempty" tf:"exclusions,omitempty"` + + // The name of the DynamoDB table to crawl. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` +} + +type LakeFormationConfigurationInitParameters struct { + + // Required for cross account crawls. For same account crawls as the target data, this can omitted. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Specifies whether to use Lake Formation credentials for the crawler instead of the IAM role credentials. + UseLakeFormationCredentials *bool `json:"useLakeFormationCredentials,omitempty" tf:"use_lake_formation_credentials,omitempty"` +} + +type LakeFormationConfigurationObservation struct { + + // Required for cross account crawls. For same account crawls as the target data, this can omitted. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Specifies whether to use Lake Formation credentials for the crawler instead of the IAM role credentials. + UseLakeFormationCredentials *bool `json:"useLakeFormationCredentials,omitempty" tf:"use_lake_formation_credentials,omitempty"` +} + +type LakeFormationConfigurationParameters struct { + + // Required for cross account crawls. For same account crawls as the target data, this can omitted. + // +kubebuilder:validation:Optional + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Specifies whether to use Lake Formation credentials for the crawler instead of the IAM role credentials. + // +kubebuilder:validation:Optional + UseLakeFormationCredentials *bool `json:"useLakeFormationCredentials,omitempty" tf:"use_lake_formation_credentials,omitempty"` +} + +type LineageConfigurationInitParameters struct { + + // Specifies whether data lineage is enabled for the crawler. Valid values are: ENABLE and DISABLE. Default value is DISABLE. + CrawlerLineageSettings *string `json:"crawlerLineageSettings,omitempty" tf:"crawler_lineage_settings,omitempty"` +} + +type LineageConfigurationObservation struct { + + // Specifies whether data lineage is enabled for the crawler. Valid values are: ENABLE and DISABLE. Default value is DISABLE. + CrawlerLineageSettings *string `json:"crawlerLineageSettings,omitempty" tf:"crawler_lineage_settings,omitempty"` +} + +type LineageConfigurationParameters struct { + + // Specifies whether data lineage is enabled for the crawler. Valid values are: ENABLE and DISABLE. Default value is DISABLE. + // +kubebuilder:validation:Optional + CrawlerLineageSettings *string `json:"crawlerLineageSettings,omitempty" tf:"crawler_lineage_settings,omitempty"` +} + +type MongodbTargetInitParameters struct { + + // The name of the connection to use to connect to the JDBC target. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.Connection + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // Reference to a Connection in glue to populate connectionName. + // +kubebuilder:validation:Optional + ConnectionNameRef *v1.Reference `json:"connectionNameRef,omitempty" tf:"-"` + + // Selector for a Connection in glue to populate connectionName. + // +kubebuilder:validation:Optional + ConnectionNameSelector *v1.Selector `json:"connectionNameSelector,omitempty" tf:"-"` + + // The name of the DynamoDB table to crawl. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Indicates whether to scan all the records, or to sample rows from the table. Scanning all the records can take a long time when the table is not a high throughput table. defaults to true. + ScanAll *bool `json:"scanAll,omitempty" tf:"scan_all,omitempty"` +} + +type MongodbTargetObservation struct { + + // The name of the connection to use to connect to the JDBC target. + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // The name of the DynamoDB table to crawl. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Indicates whether to scan all the records, or to sample rows from the table. Scanning all the records can take a long time when the table is not a high throughput table. defaults to true. + ScanAll *bool `json:"scanAll,omitempty" tf:"scan_all,omitempty"` +} + +type MongodbTargetParameters struct { + + // The name of the connection to use to connect to the JDBC target. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.Connection + // +kubebuilder:validation:Optional + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // Reference to a Connection in glue to populate connectionName. + // +kubebuilder:validation:Optional + ConnectionNameRef *v1.Reference `json:"connectionNameRef,omitempty" tf:"-"` + + // Selector for a Connection in glue to populate connectionName. + // +kubebuilder:validation:Optional + ConnectionNameSelector *v1.Selector `json:"connectionNameSelector,omitempty" tf:"-"` + + // The name of the DynamoDB table to crawl. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` + + // Indicates whether to scan all the records, or to sample rows from the table. Scanning all the records can take a long time when the table is not a high throughput table. defaults to true. + // +kubebuilder:validation:Optional + ScanAll *bool `json:"scanAll,omitempty" tf:"scan_all,omitempty"` +} + +type RecrawlPolicyInitParameters struct { + + // Specifies whether to crawl the entire dataset again, crawl only folders that were added since the last crawler run, or crawl what S3 notifies the crawler of via SQS. Valid Values are: CRAWL_EVENT_MODE, CRAWL_EVERYTHING and CRAWL_NEW_FOLDERS_ONLY. Default value is CRAWL_EVERYTHING. + RecrawlBehavior *string `json:"recrawlBehavior,omitempty" tf:"recrawl_behavior,omitempty"` +} + +type RecrawlPolicyObservation struct { + + // Specifies whether to crawl the entire dataset again, crawl only folders that were added since the last crawler run, or crawl what S3 notifies the crawler of via SQS. Valid Values are: CRAWL_EVENT_MODE, CRAWL_EVERYTHING and CRAWL_NEW_FOLDERS_ONLY. Default value is CRAWL_EVERYTHING. + RecrawlBehavior *string `json:"recrawlBehavior,omitempty" tf:"recrawl_behavior,omitempty"` +} + +type RecrawlPolicyParameters struct { + + // Specifies whether to crawl the entire dataset again, crawl only folders that were added since the last crawler run, or crawl what S3 notifies the crawler of via SQS. Valid Values are: CRAWL_EVENT_MODE, CRAWL_EVERYTHING and CRAWL_NEW_FOLDERS_ONLY. Default value is CRAWL_EVERYTHING. + // +kubebuilder:validation:Optional + RecrawlBehavior *string `json:"recrawlBehavior,omitempty" tf:"recrawl_behavior,omitempty"` +} + +type S3TargetInitParameters struct { + + // The name of the connection to use to connect to the JDBC target. + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // The ARN of the dead-letter SQS queue. + DlqEventQueueArn *string `json:"dlqEventQueueArn,omitempty" tf:"dlq_event_queue_arn,omitempty"` + + // The ARN of the SQS queue to receive S3 notifications from. + EventQueueArn *string `json:"eventQueueArn,omitempty" tf:"event_queue_arn,omitempty"` + + // A list of glob patterns used to exclude from the crawl. + Exclusions []*string `json:"exclusions,omitempty" tf:"exclusions,omitempty"` + + // The name of the DynamoDB table to crawl. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Sets the number of files in each leaf folder to be crawled when crawling sample files in a dataset. If not set, all the files are crawled. A valid value is an integer between 1 and 249. + SampleSize *float64 `json:"sampleSize,omitempty" tf:"sample_size,omitempty"` +} + +type S3TargetObservation struct { + + // The name of the connection to use to connect to the JDBC target. + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // The ARN of the dead-letter SQS queue. + DlqEventQueueArn *string `json:"dlqEventQueueArn,omitempty" tf:"dlq_event_queue_arn,omitempty"` + + // The ARN of the SQS queue to receive S3 notifications from. + EventQueueArn *string `json:"eventQueueArn,omitempty" tf:"event_queue_arn,omitempty"` + + // A list of glob patterns used to exclude from the crawl. + Exclusions []*string `json:"exclusions,omitempty" tf:"exclusions,omitempty"` + + // The name of the DynamoDB table to crawl. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Sets the number of files in each leaf folder to be crawled when crawling sample files in a dataset. If not set, all the files are crawled. A valid value is an integer between 1 and 249. + SampleSize *float64 `json:"sampleSize,omitempty" tf:"sample_size,omitempty"` +} + +type S3TargetParameters struct { + + // The name of the connection to use to connect to the JDBC target. + // +kubebuilder:validation:Optional + ConnectionName *string `json:"connectionName,omitempty" tf:"connection_name,omitempty"` + + // The ARN of the dead-letter SQS queue. + // +kubebuilder:validation:Optional + DlqEventQueueArn *string `json:"dlqEventQueueArn,omitempty" tf:"dlq_event_queue_arn,omitempty"` + + // The ARN of the SQS queue to receive S3 notifications from. + // +kubebuilder:validation:Optional + EventQueueArn *string `json:"eventQueueArn,omitempty" tf:"event_queue_arn,omitempty"` + + // A list of glob patterns used to exclude from the crawl. + // +kubebuilder:validation:Optional + Exclusions []*string `json:"exclusions,omitempty" tf:"exclusions,omitempty"` + + // The name of the DynamoDB table to crawl. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` + + // Sets the number of files in each leaf folder to be crawled when crawling sample files in a dataset. If not set, all the files are crawled. A valid value is an integer between 1 and 249. + // +kubebuilder:validation:Optional + SampleSize *float64 `json:"sampleSize,omitempty" tf:"sample_size,omitempty"` +} + +type SchemaChangePolicyInitParameters struct { + + // The deletion behavior when the crawler finds a deleted object. Valid values: LOG, DELETE_FROM_DATABASE, or DEPRECATE_IN_DATABASE. Defaults to DEPRECATE_IN_DATABASE. + DeleteBehavior *string `json:"deleteBehavior,omitempty" tf:"delete_behavior,omitempty"` + + // The update behavior when the crawler finds a changed schema. Valid values: LOG or UPDATE_IN_DATABASE. Defaults to UPDATE_IN_DATABASE. + UpdateBehavior *string `json:"updateBehavior,omitempty" tf:"update_behavior,omitempty"` +} + +type SchemaChangePolicyObservation struct { + + // The deletion behavior when the crawler finds a deleted object. Valid values: LOG, DELETE_FROM_DATABASE, or DEPRECATE_IN_DATABASE. Defaults to DEPRECATE_IN_DATABASE. + DeleteBehavior *string `json:"deleteBehavior,omitempty" tf:"delete_behavior,omitempty"` + + // The update behavior when the crawler finds a changed schema. Valid values: LOG or UPDATE_IN_DATABASE. Defaults to UPDATE_IN_DATABASE. + UpdateBehavior *string `json:"updateBehavior,omitempty" tf:"update_behavior,omitempty"` +} + +type SchemaChangePolicyParameters struct { + + // The deletion behavior when the crawler finds a deleted object. Valid values: LOG, DELETE_FROM_DATABASE, or DEPRECATE_IN_DATABASE. Defaults to DEPRECATE_IN_DATABASE. + // +kubebuilder:validation:Optional + DeleteBehavior *string `json:"deleteBehavior,omitempty" tf:"delete_behavior,omitempty"` + + // The update behavior when the crawler finds a changed schema. Valid values: LOG or UPDATE_IN_DATABASE. Defaults to UPDATE_IN_DATABASE. + // +kubebuilder:validation:Optional + UpdateBehavior *string `json:"updateBehavior,omitempty" tf:"update_behavior,omitempty"` +} + +// CrawlerSpec defines the desired state of Crawler +type CrawlerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CrawlerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CrawlerInitParameters `json:"initProvider,omitempty"` +} + +// CrawlerStatus defines the observed state of Crawler. +type CrawlerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CrawlerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Crawler is the Schema for the Crawlers API. Manages a Glue Crawler +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Crawler struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec CrawlerSpec `json:"spec"` + Status CrawlerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CrawlerList contains a list of Crawlers +type CrawlerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Crawler `json:"items"` +} + +// Repository type metadata. +var ( + Crawler_Kind = "Crawler" + Crawler_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Crawler_Kind}.String() + Crawler_KindAPIVersion = Crawler_Kind + "." + CRDGroupVersion.String() + Crawler_GroupVersionKind = CRDGroupVersion.WithKind(Crawler_Kind) +) + +func init() { + SchemeBuilder.Register(&Crawler{}, &CrawlerList{}) +} diff --git a/apis/glue/v1beta2/zz_datacatalogencryptionsettings_terraformed.go b/apis/glue/v1beta2/zz_datacatalogencryptionsettings_terraformed.go new file mode 100755 index 0000000000..e05a2fde24 --- /dev/null +++ b/apis/glue/v1beta2/zz_datacatalogencryptionsettings_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DataCatalogEncryptionSettings +func (mg *DataCatalogEncryptionSettings) GetTerraformResourceType() string { + return "aws_glue_data_catalog_encryption_settings" +} + +// GetConnectionDetailsMapping for this DataCatalogEncryptionSettings +func (tr *DataCatalogEncryptionSettings) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DataCatalogEncryptionSettings +func (tr *DataCatalogEncryptionSettings) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DataCatalogEncryptionSettings +func (tr *DataCatalogEncryptionSettings) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DataCatalogEncryptionSettings +func (tr *DataCatalogEncryptionSettings) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DataCatalogEncryptionSettings +func (tr *DataCatalogEncryptionSettings) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DataCatalogEncryptionSettings +func (tr *DataCatalogEncryptionSettings) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DataCatalogEncryptionSettings +func (tr *DataCatalogEncryptionSettings) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DataCatalogEncryptionSettings +func (tr *DataCatalogEncryptionSettings) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DataCatalogEncryptionSettings using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DataCatalogEncryptionSettings) LateInitialize(attrs []byte) (bool, error) { + params := &DataCatalogEncryptionSettingsParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DataCatalogEncryptionSettings) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/glue/v1beta2/zz_datacatalogencryptionsettings_types.go b/apis/glue/v1beta2/zz_datacatalogencryptionsettings_types.go new file mode 100755 index 0000000000..cc3191006e --- /dev/null +++ b/apis/glue/v1beta2/zz_datacatalogencryptionsettings_types.go @@ -0,0 +1,249 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConnectionPasswordEncryptionInitParameters struct { + + // A KMS key ARN that is used to encrypt the connection password. If connection password protection is enabled, the caller of CreateConnection and UpdateConnection needs at least kms:Encrypt permission on the specified AWS KMS key, to encrypt passwords before storing them in the Data Catalog. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + AwsKMSKeyID *string `json:"awsKmsKeyId,omitempty" tf:"aws_kms_key_id,omitempty"` + + // Reference to a Key in kms to populate awsKmsKeyId. + // +kubebuilder:validation:Optional + AwsKMSKeyIDRef *v1.Reference `json:"awsKmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate awsKmsKeyId. + // +kubebuilder:validation:Optional + AwsKMSKeyIDSelector *v1.Selector `json:"awsKmsKeyIdSelector,omitempty" tf:"-"` + + // When set to true, passwords remain encrypted in the responses of GetConnection and GetConnections. This encryption takes effect independently of the catalog encryption. + ReturnConnectionPasswordEncrypted *bool `json:"returnConnectionPasswordEncrypted,omitempty" tf:"return_connection_password_encrypted,omitempty"` +} + +type ConnectionPasswordEncryptionObservation struct { + + // A KMS key ARN that is used to encrypt the connection password. If connection password protection is enabled, the caller of CreateConnection and UpdateConnection needs at least kms:Encrypt permission on the specified AWS KMS key, to encrypt passwords before storing them in the Data Catalog. + AwsKMSKeyID *string `json:"awsKmsKeyId,omitempty" tf:"aws_kms_key_id,omitempty"` + + // When set to true, passwords remain encrypted in the responses of GetConnection and GetConnections. This encryption takes effect independently of the catalog encryption. + ReturnConnectionPasswordEncrypted *bool `json:"returnConnectionPasswordEncrypted,omitempty" tf:"return_connection_password_encrypted,omitempty"` +} + +type ConnectionPasswordEncryptionParameters struct { + + // A KMS key ARN that is used to encrypt the connection password. If connection password protection is enabled, the caller of CreateConnection and UpdateConnection needs at least kms:Encrypt permission on the specified AWS KMS key, to encrypt passwords before storing them in the Data Catalog. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + AwsKMSKeyID *string `json:"awsKmsKeyId,omitempty" tf:"aws_kms_key_id,omitempty"` + + // Reference to a Key in kms to populate awsKmsKeyId. + // +kubebuilder:validation:Optional + AwsKMSKeyIDRef *v1.Reference `json:"awsKmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate awsKmsKeyId. + // +kubebuilder:validation:Optional + AwsKMSKeyIDSelector *v1.Selector `json:"awsKmsKeyIdSelector,omitempty" tf:"-"` + + // When set to true, passwords remain encrypted in the responses of GetConnection and GetConnections. This encryption takes effect independently of the catalog encryption. + // +kubebuilder:validation:Optional + ReturnConnectionPasswordEncrypted *bool `json:"returnConnectionPasswordEncrypted" tf:"return_connection_password_encrypted,omitempty"` +} + +type DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsInitParameters struct { + + // When connection password protection is enabled, the Data Catalog uses a customer-provided key to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption. see Connection Password Encryption. + ConnectionPasswordEncryption *ConnectionPasswordEncryptionInitParameters `json:"connectionPasswordEncryption,omitempty" tf:"connection_password_encryption,omitempty"` + + // Specifies the encryption-at-rest configuration for the Data Catalog. see Encryption At Rest. + EncryptionAtRest *EncryptionAtRestInitParameters `json:"encryptionAtRest,omitempty" tf:"encryption_at_rest,omitempty"` +} + +type DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsObservation struct { + + // When connection password protection is enabled, the Data Catalog uses a customer-provided key to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption. see Connection Password Encryption. + ConnectionPasswordEncryption *ConnectionPasswordEncryptionObservation `json:"connectionPasswordEncryption,omitempty" tf:"connection_password_encryption,omitempty"` + + // Specifies the encryption-at-rest configuration for the Data Catalog. see Encryption At Rest. + EncryptionAtRest *EncryptionAtRestObservation `json:"encryptionAtRest,omitempty" tf:"encryption_at_rest,omitempty"` +} + +type DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsParameters struct { + + // When connection password protection is enabled, the Data Catalog uses a customer-provided key to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption. see Connection Password Encryption. + // +kubebuilder:validation:Optional + ConnectionPasswordEncryption *ConnectionPasswordEncryptionParameters `json:"connectionPasswordEncryption" tf:"connection_password_encryption,omitempty"` + + // Specifies the encryption-at-rest configuration for the Data Catalog. see Encryption At Rest. + // +kubebuilder:validation:Optional + EncryptionAtRest *EncryptionAtRestParameters `json:"encryptionAtRest" tf:"encryption_at_rest,omitempty"` +} + +type DataCatalogEncryptionSettingsInitParameters struct { + + // – The ID of the Data Catalog to set the security configuration for. If none is provided, the AWS account ID is used by default. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // – The security configuration to set. see Data Catalog Encryption Settings. + DataCatalogEncryptionSettings *DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsInitParameters `json:"dataCatalogEncryptionSettings,omitempty" tf:"data_catalog_encryption_settings,omitempty"` +} + +type DataCatalogEncryptionSettingsObservation struct { + + // – The ID of the Data Catalog to set the security configuration for. If none is provided, the AWS account ID is used by default. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // – The security configuration to set. see Data Catalog Encryption Settings. + DataCatalogEncryptionSettings *DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsObservation `json:"dataCatalogEncryptionSettings,omitempty" tf:"data_catalog_encryption_settings,omitempty"` + + // The ID of the Data Catalog to set the security configuration for. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type DataCatalogEncryptionSettingsParameters struct { + + // – The ID of the Data Catalog to set the security configuration for. If none is provided, the AWS account ID is used by default. + // +kubebuilder:validation:Optional + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // – The security configuration to set. see Data Catalog Encryption Settings. + // +kubebuilder:validation:Optional + DataCatalogEncryptionSettings *DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsParameters `json:"dataCatalogEncryptionSettings,omitempty" tf:"data_catalog_encryption_settings,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type EncryptionAtRestInitParameters struct { + + // The encryption-at-rest mode for encrypting Data Catalog data. Valid values: DISABLED, SSE-KMS, SSE-KMS-WITH-SERVICE-ROLE. + CatalogEncryptionMode *string `json:"catalogEncryptionMode,omitempty" tf:"catalog_encryption_mode,omitempty"` + + // The ARN of the AWS IAM role used for accessing encrypted Data Catalog data. + CatalogEncryptionServiceRole *string `json:"catalogEncryptionServiceRole,omitempty" tf:"catalog_encryption_service_role,omitempty"` + + // The ARN of the AWS KMS key to use for encryption at rest. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + SseAwsKMSKeyID *string `json:"sseAwsKmsKeyId,omitempty" tf:"sse_aws_kms_key_id,omitempty"` + + // Reference to a Key in kms to populate sseAwsKmsKeyId. + // +kubebuilder:validation:Optional + SseAwsKMSKeyIDRef *v1.Reference `json:"sseAwsKmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate sseAwsKmsKeyId. + // +kubebuilder:validation:Optional + SseAwsKMSKeyIDSelector *v1.Selector `json:"sseAwsKmsKeyIdSelector,omitempty" tf:"-"` +} + +type EncryptionAtRestObservation struct { + + // The encryption-at-rest mode for encrypting Data Catalog data. Valid values: DISABLED, SSE-KMS, SSE-KMS-WITH-SERVICE-ROLE. + CatalogEncryptionMode *string `json:"catalogEncryptionMode,omitempty" tf:"catalog_encryption_mode,omitempty"` + + // The ARN of the AWS IAM role used for accessing encrypted Data Catalog data. + CatalogEncryptionServiceRole *string `json:"catalogEncryptionServiceRole,omitempty" tf:"catalog_encryption_service_role,omitempty"` + + // The ARN of the AWS KMS key to use for encryption at rest. + SseAwsKMSKeyID *string `json:"sseAwsKmsKeyId,omitempty" tf:"sse_aws_kms_key_id,omitempty"` +} + +type EncryptionAtRestParameters struct { + + // The encryption-at-rest mode for encrypting Data Catalog data. Valid values: DISABLED, SSE-KMS, SSE-KMS-WITH-SERVICE-ROLE. + // +kubebuilder:validation:Optional + CatalogEncryptionMode *string `json:"catalogEncryptionMode" tf:"catalog_encryption_mode,omitempty"` + + // The ARN of the AWS IAM role used for accessing encrypted Data Catalog data. + // +kubebuilder:validation:Optional + CatalogEncryptionServiceRole *string `json:"catalogEncryptionServiceRole,omitempty" tf:"catalog_encryption_service_role,omitempty"` + + // The ARN of the AWS KMS key to use for encryption at rest. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + SseAwsKMSKeyID *string `json:"sseAwsKmsKeyId,omitempty" tf:"sse_aws_kms_key_id,omitempty"` + + // Reference to a Key in kms to populate sseAwsKmsKeyId. + // +kubebuilder:validation:Optional + SseAwsKMSKeyIDRef *v1.Reference `json:"sseAwsKmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate sseAwsKmsKeyId. + // +kubebuilder:validation:Optional + SseAwsKMSKeyIDSelector *v1.Selector `json:"sseAwsKmsKeyIdSelector,omitempty" tf:"-"` +} + +// DataCatalogEncryptionSettingsSpec defines the desired state of DataCatalogEncryptionSettings +type DataCatalogEncryptionSettingsSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DataCatalogEncryptionSettingsParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DataCatalogEncryptionSettingsInitParameters `json:"initProvider,omitempty"` +} + +// DataCatalogEncryptionSettingsStatus defines the observed state of DataCatalogEncryptionSettings. +type DataCatalogEncryptionSettingsStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DataCatalogEncryptionSettingsObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DataCatalogEncryptionSettings is the Schema for the DataCatalogEncryptionSettingss API. Provides a Glue Data Catalog Encryption Settings resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type DataCatalogEncryptionSettings struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.dataCatalogEncryptionSettings) || (has(self.initProvider) && has(self.initProvider.dataCatalogEncryptionSettings))",message="spec.forProvider.dataCatalogEncryptionSettings is a required parameter" + Spec DataCatalogEncryptionSettingsSpec `json:"spec"` + Status DataCatalogEncryptionSettingsStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DataCatalogEncryptionSettingsList contains a list of DataCatalogEncryptionSettingss +type DataCatalogEncryptionSettingsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DataCatalogEncryptionSettings `json:"items"` +} + +// Repository type metadata. +var ( + DataCatalogEncryptionSettings_Kind = "DataCatalogEncryptionSettings" + DataCatalogEncryptionSettings_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DataCatalogEncryptionSettings_Kind}.String() + DataCatalogEncryptionSettings_KindAPIVersion = DataCatalogEncryptionSettings_Kind + "." + CRDGroupVersion.String() + DataCatalogEncryptionSettings_GroupVersionKind = CRDGroupVersion.WithKind(DataCatalogEncryptionSettings_Kind) +) + +func init() { + SchemeBuilder.Register(&DataCatalogEncryptionSettings{}, &DataCatalogEncryptionSettingsList{}) +} diff --git a/apis/glue/v1beta2/zz_generated.conversion_hubs.go b/apis/glue/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..3976727361 --- /dev/null +++ b/apis/glue/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *CatalogDatabase) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *CatalogTable) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Classifier) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Connection) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Crawler) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DataCatalogEncryptionSettings) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Job) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SecurityConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Trigger) Hub() {} diff --git a/apis/glue/v1beta2/zz_generated.deepcopy.go b/apis/glue/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..0ccfa43309 --- /dev/null +++ b/apis/glue/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,8314 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsInitParameters) DeepCopyInto(out *ActionsInitParameters) { + *out = *in + if in.Arguments != nil { + in, out := &in.Arguments, &out.Arguments + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.CrawlerName != nil { + in, out := &in.CrawlerName, &out.CrawlerName + *out = new(string) + **out = **in + } + if in.CrawlerNameRef != nil { + in, out := &in.CrawlerNameRef, &out.CrawlerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CrawlerNameSelector != nil { + in, out := &in.CrawlerNameSelector, &out.CrawlerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.JobName != nil { + in, out := &in.JobName, &out.JobName + *out = new(string) + **out = **in + } + if in.JobNameRef != nil { + in, out := &in.JobNameRef, &out.JobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.JobNameSelector != nil { + in, out := &in.JobNameSelector, &out.JobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NotificationProperty != nil { + in, out := &in.NotificationProperty, &out.NotificationProperty + *out = new(ActionsNotificationPropertyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityConfiguration != nil { + in, out := &in.SecurityConfiguration, &out.SecurityConfiguration + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsInitParameters. +func (in *ActionsInitParameters) DeepCopy() *ActionsInitParameters { + if in == nil { + return nil + } + out := new(ActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsNotificationPropertyInitParameters) DeepCopyInto(out *ActionsNotificationPropertyInitParameters) { + *out = *in + if in.NotifyDelayAfter != nil { + in, out := &in.NotifyDelayAfter, &out.NotifyDelayAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsNotificationPropertyInitParameters. +func (in *ActionsNotificationPropertyInitParameters) DeepCopy() *ActionsNotificationPropertyInitParameters { + if in == nil { + return nil + } + out := new(ActionsNotificationPropertyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsNotificationPropertyObservation) DeepCopyInto(out *ActionsNotificationPropertyObservation) { + *out = *in + if in.NotifyDelayAfter != nil { + in, out := &in.NotifyDelayAfter, &out.NotifyDelayAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsNotificationPropertyObservation. +func (in *ActionsNotificationPropertyObservation) DeepCopy() *ActionsNotificationPropertyObservation { + if in == nil { + return nil + } + out := new(ActionsNotificationPropertyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsNotificationPropertyParameters) DeepCopyInto(out *ActionsNotificationPropertyParameters) { + *out = *in + if in.NotifyDelayAfter != nil { + in, out := &in.NotifyDelayAfter, &out.NotifyDelayAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsNotificationPropertyParameters. +func (in *ActionsNotificationPropertyParameters) DeepCopy() *ActionsNotificationPropertyParameters { + if in == nil { + return nil + } + out := new(ActionsNotificationPropertyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsObservation) DeepCopyInto(out *ActionsObservation) { + *out = *in + if in.Arguments != nil { + in, out := &in.Arguments, &out.Arguments + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.CrawlerName != nil { + in, out := &in.CrawlerName, &out.CrawlerName + *out = new(string) + **out = **in + } + if in.JobName != nil { + in, out := &in.JobName, &out.JobName + *out = new(string) + **out = **in + } + if in.NotificationProperty != nil { + in, out := &in.NotificationProperty, &out.NotificationProperty + *out = new(ActionsNotificationPropertyObservation) + (*in).DeepCopyInto(*out) + } + if in.SecurityConfiguration != nil { + in, out := &in.SecurityConfiguration, &out.SecurityConfiguration + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsObservation. +func (in *ActionsObservation) DeepCopy() *ActionsObservation { + if in == nil { + return nil + } + out := new(ActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsParameters) DeepCopyInto(out *ActionsParameters) { + *out = *in + if in.Arguments != nil { + in, out := &in.Arguments, &out.Arguments + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.CrawlerName != nil { + in, out := &in.CrawlerName, &out.CrawlerName + *out = new(string) + **out = **in + } + if in.CrawlerNameRef != nil { + in, out := &in.CrawlerNameRef, &out.CrawlerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CrawlerNameSelector != nil { + in, out := &in.CrawlerNameSelector, &out.CrawlerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.JobName != nil { + in, out := &in.JobName, &out.JobName + *out = new(string) + **out = **in + } + if in.JobNameRef != nil { + in, out := &in.JobNameRef, &out.JobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.JobNameSelector != nil { + in, out := &in.JobNameSelector, &out.JobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NotificationProperty != nil { + in, out := &in.NotificationProperty, &out.NotificationProperty + *out = new(ActionsNotificationPropertyParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityConfiguration != nil { + in, out := &in.SecurityConfiguration, &out.SecurityConfiguration + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsParameters. +func (in *ActionsParameters) DeepCopy() *ActionsParameters { + if in == nil { + return nil + } + out := new(ActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogDatabase) DeepCopyInto(out *CatalogDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogDatabase. +func (in *CatalogDatabase) DeepCopy() *CatalogDatabase { + if in == nil { + return nil + } + out := new(CatalogDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CatalogDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogDatabaseInitParameters) DeepCopyInto(out *CatalogDatabaseInitParameters) { + *out = *in + if in.CreateTableDefaultPermission != nil { + in, out := &in.CreateTableDefaultPermission, &out.CreateTableDefaultPermission + *out = make([]CreateTableDefaultPermissionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FederatedDatabase != nil { + in, out := &in.FederatedDatabase, &out.FederatedDatabase + *out = new(FederatedDatabaseInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LocationURI != nil { + in, out := &in.LocationURI, &out.LocationURI + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetDatabase != nil { + in, out := &in.TargetDatabase, &out.TargetDatabase + *out = new(TargetDatabaseInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogDatabaseInitParameters. +func (in *CatalogDatabaseInitParameters) DeepCopy() *CatalogDatabaseInitParameters { + if in == nil { + return nil + } + out := new(CatalogDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogDatabaseList) DeepCopyInto(out *CatalogDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CatalogDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogDatabaseList. +func (in *CatalogDatabaseList) DeepCopy() *CatalogDatabaseList { + if in == nil { + return nil + } + out := new(CatalogDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CatalogDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogDatabaseObservation) DeepCopyInto(out *CatalogDatabaseObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.CreateTableDefaultPermission != nil { + in, out := &in.CreateTableDefaultPermission, &out.CreateTableDefaultPermission + *out = make([]CreateTableDefaultPermissionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FederatedDatabase != nil { + in, out := &in.FederatedDatabase, &out.FederatedDatabase + *out = new(FederatedDatabaseObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LocationURI != nil { + in, out := &in.LocationURI, &out.LocationURI + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetDatabase != nil { + in, out := &in.TargetDatabase, &out.TargetDatabase + *out = new(TargetDatabaseObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogDatabaseObservation. +func (in *CatalogDatabaseObservation) DeepCopy() *CatalogDatabaseObservation { + if in == nil { + return nil + } + out := new(CatalogDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogDatabaseParameters) DeepCopyInto(out *CatalogDatabaseParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.CreateTableDefaultPermission != nil { + in, out := &in.CreateTableDefaultPermission, &out.CreateTableDefaultPermission + *out = make([]CreateTableDefaultPermissionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FederatedDatabase != nil { + in, out := &in.FederatedDatabase, &out.FederatedDatabase + *out = new(FederatedDatabaseParameters) + (*in).DeepCopyInto(*out) + } + if in.LocationURI != nil { + in, out := &in.LocationURI, &out.LocationURI + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetDatabase != nil { + in, out := &in.TargetDatabase, &out.TargetDatabase + *out = new(TargetDatabaseParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogDatabaseParameters. +func (in *CatalogDatabaseParameters) DeepCopy() *CatalogDatabaseParameters { + if in == nil { + return nil + } + out := new(CatalogDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogDatabaseSpec) DeepCopyInto(out *CatalogDatabaseSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogDatabaseSpec. +func (in *CatalogDatabaseSpec) DeepCopy() *CatalogDatabaseSpec { + if in == nil { + return nil + } + out := new(CatalogDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogDatabaseStatus) DeepCopyInto(out *CatalogDatabaseStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogDatabaseStatus. +func (in *CatalogDatabaseStatus) DeepCopy() *CatalogDatabaseStatus { + if in == nil { + return nil + } + out := new(CatalogDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogTable) DeepCopyInto(out *CatalogTable) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogTable. +func (in *CatalogTable) DeepCopy() *CatalogTable { + if in == nil { + return nil + } + out := new(CatalogTable) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CatalogTable) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogTableInitParameters) DeepCopyInto(out *CatalogTableInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.OpenTableFormatInput != nil { + in, out := &in.OpenTableFormatInput, &out.OpenTableFormatInput + *out = new(OpenTableFormatInputInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PartitionIndex != nil { + in, out := &in.PartitionIndex, &out.PartitionIndex + *out = make([]PartitionIndexInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PartitionKeys != nil { + in, out := &in.PartitionKeys, &out.PartitionKeys + *out = make([]PartitionKeysInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = new(float64) + **out = **in + } + if in.StorageDescriptor != nil { + in, out := &in.StorageDescriptor, &out.StorageDescriptor + *out = new(StorageDescriptorInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TableType != nil { + in, out := &in.TableType, &out.TableType + *out = new(string) + **out = **in + } + if in.TargetTable != nil { + in, out := &in.TargetTable, &out.TargetTable + *out = new(TargetTableInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ViewExpandedText != nil { + in, out := &in.ViewExpandedText, &out.ViewExpandedText + *out = new(string) + **out = **in + } + if in.ViewOriginalText != nil { + in, out := &in.ViewOriginalText, &out.ViewOriginalText + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogTableInitParameters. +func (in *CatalogTableInitParameters) DeepCopy() *CatalogTableInitParameters { + if in == nil { + return nil + } + out := new(CatalogTableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogTableList) DeepCopyInto(out *CatalogTableList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CatalogTable, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogTableList. +func (in *CatalogTableList) DeepCopy() *CatalogTableList { + if in == nil { + return nil + } + out := new(CatalogTableList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CatalogTableList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogTableObservation) DeepCopyInto(out *CatalogTableObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.OpenTableFormatInput != nil { + in, out := &in.OpenTableFormatInput, &out.OpenTableFormatInput + *out = new(OpenTableFormatInputObservation) + (*in).DeepCopyInto(*out) + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PartitionIndex != nil { + in, out := &in.PartitionIndex, &out.PartitionIndex + *out = make([]PartitionIndexObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PartitionKeys != nil { + in, out := &in.PartitionKeys, &out.PartitionKeys + *out = make([]PartitionKeysObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = new(float64) + **out = **in + } + if in.StorageDescriptor != nil { + in, out := &in.StorageDescriptor, &out.StorageDescriptor + *out = new(StorageDescriptorObservation) + (*in).DeepCopyInto(*out) + } + if in.TableType != nil { + in, out := &in.TableType, &out.TableType + *out = new(string) + **out = **in + } + if in.TargetTable != nil { + in, out := &in.TargetTable, &out.TargetTable + *out = new(TargetTableObservation) + (*in).DeepCopyInto(*out) + } + if in.ViewExpandedText != nil { + in, out := &in.ViewExpandedText, &out.ViewExpandedText + *out = new(string) + **out = **in + } + if in.ViewOriginalText != nil { + in, out := &in.ViewOriginalText, &out.ViewOriginalText + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogTableObservation. +func (in *CatalogTableObservation) DeepCopy() *CatalogTableObservation { + if in == nil { + return nil + } + out := new(CatalogTableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogTableParameters) DeepCopyInto(out *CatalogTableParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DatabaseNameRef != nil { + in, out := &in.DatabaseNameRef, &out.DatabaseNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseNameSelector != nil { + in, out := &in.DatabaseNameSelector, &out.DatabaseNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.OpenTableFormatInput != nil { + in, out := &in.OpenTableFormatInput, &out.OpenTableFormatInput + *out = new(OpenTableFormatInputParameters) + (*in).DeepCopyInto(*out) + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PartitionIndex != nil { + in, out := &in.PartitionIndex, &out.PartitionIndex + *out = make([]PartitionIndexParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PartitionKeys != nil { + in, out := &in.PartitionKeys, &out.PartitionKeys + *out = make([]PartitionKeysParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = new(float64) + **out = **in + } + if in.StorageDescriptor != nil { + in, out := &in.StorageDescriptor, &out.StorageDescriptor + *out = new(StorageDescriptorParameters) + (*in).DeepCopyInto(*out) + } + if in.TableType != nil { + in, out := &in.TableType, &out.TableType + *out = new(string) + **out = **in + } + if in.TargetTable != nil { + in, out := &in.TargetTable, &out.TargetTable + *out = new(TargetTableParameters) + (*in).DeepCopyInto(*out) + } + if in.ViewExpandedText != nil { + in, out := &in.ViewExpandedText, &out.ViewExpandedText + *out = new(string) + **out = **in + } + if in.ViewOriginalText != nil { + in, out := &in.ViewOriginalText, &out.ViewOriginalText + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogTableParameters. +func (in *CatalogTableParameters) DeepCopy() *CatalogTableParameters { + if in == nil { + return nil + } + out := new(CatalogTableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogTableSpec) DeepCopyInto(out *CatalogTableSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogTableSpec. +func (in *CatalogTableSpec) DeepCopy() *CatalogTableSpec { + if in == nil { + return nil + } + out := new(CatalogTableSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogTableStatus) DeepCopyInto(out *CatalogTableStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogTableStatus. +func (in *CatalogTableStatus) DeepCopy() *CatalogTableStatus { + if in == nil { + return nil + } + out := new(CatalogTableStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogTargetInitParameters) DeepCopyInto(out *CatalogTargetInitParameters) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DatabaseNameRef != nil { + in, out := &in.DatabaseNameRef, &out.DatabaseNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseNameSelector != nil { + in, out := &in.DatabaseNameSelector, &out.DatabaseNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DlqEventQueueArn != nil { + in, out := &in.DlqEventQueueArn, &out.DlqEventQueueArn + *out = new(string) + **out = **in + } + if in.EventQueueArn != nil { + in, out := &in.EventQueueArn, &out.EventQueueArn + *out = new(string) + **out = **in + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogTargetInitParameters. +func (in *CatalogTargetInitParameters) DeepCopy() *CatalogTargetInitParameters { + if in == nil { + return nil + } + out := new(CatalogTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogTargetObservation) DeepCopyInto(out *CatalogTargetObservation) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DlqEventQueueArn != nil { + in, out := &in.DlqEventQueueArn, &out.DlqEventQueueArn + *out = new(string) + **out = **in + } + if in.EventQueueArn != nil { + in, out := &in.EventQueueArn, &out.EventQueueArn + *out = new(string) + **out = **in + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogTargetObservation. +func (in *CatalogTargetObservation) DeepCopy() *CatalogTargetObservation { + if in == nil { + return nil + } + out := new(CatalogTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogTargetParameters) DeepCopyInto(out *CatalogTargetParameters) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DatabaseNameRef != nil { + in, out := &in.DatabaseNameRef, &out.DatabaseNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseNameSelector != nil { + in, out := &in.DatabaseNameSelector, &out.DatabaseNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DlqEventQueueArn != nil { + in, out := &in.DlqEventQueueArn, &out.DlqEventQueueArn + *out = new(string) + **out = **in + } + if in.EventQueueArn != nil { + in, out := &in.EventQueueArn, &out.EventQueueArn + *out = new(string) + **out = **in + } + if in.Tables != nil { + in, out := &in.Tables, &out.Tables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogTargetParameters. +func (in *CatalogTargetParameters) DeepCopy() *CatalogTargetParameters { + if in == nil { + return nil + } + out := new(CatalogTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Classifier) DeepCopyInto(out *Classifier) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Classifier. +func (in *Classifier) DeepCopy() *Classifier { + if in == nil { + return nil + } + out := new(Classifier) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Classifier) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClassifierInitParameters) DeepCopyInto(out *ClassifierInitParameters) { + *out = *in + if in.CsvClassifier != nil { + in, out := &in.CsvClassifier, &out.CsvClassifier + *out = new(CsvClassifierInitParameters) + (*in).DeepCopyInto(*out) + } + if in.GrokClassifier != nil { + in, out := &in.GrokClassifier, &out.GrokClassifier + *out = new(GrokClassifierInitParameters) + (*in).DeepCopyInto(*out) + } + if in.JSONClassifier != nil { + in, out := &in.JSONClassifier, &out.JSONClassifier + *out = new(JSONClassifierInitParameters) + (*in).DeepCopyInto(*out) + } + if in.XMLClassifier != nil { + in, out := &in.XMLClassifier, &out.XMLClassifier + *out = new(XMLClassifierInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassifierInitParameters. +func (in *ClassifierInitParameters) DeepCopy() *ClassifierInitParameters { + if in == nil { + return nil + } + out := new(ClassifierInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClassifierList) DeepCopyInto(out *ClassifierList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Classifier, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassifierList. +func (in *ClassifierList) DeepCopy() *ClassifierList { + if in == nil { + return nil + } + out := new(ClassifierList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClassifierList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClassifierObservation) DeepCopyInto(out *ClassifierObservation) { + *out = *in + if in.CsvClassifier != nil { + in, out := &in.CsvClassifier, &out.CsvClassifier + *out = new(CsvClassifierObservation) + (*in).DeepCopyInto(*out) + } + if in.GrokClassifier != nil { + in, out := &in.GrokClassifier, &out.GrokClassifier + *out = new(GrokClassifierObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.JSONClassifier != nil { + in, out := &in.JSONClassifier, &out.JSONClassifier + *out = new(JSONClassifierObservation) + (*in).DeepCopyInto(*out) + } + if in.XMLClassifier != nil { + in, out := &in.XMLClassifier, &out.XMLClassifier + *out = new(XMLClassifierObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassifierObservation. +func (in *ClassifierObservation) DeepCopy() *ClassifierObservation { + if in == nil { + return nil + } + out := new(ClassifierObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClassifierParameters) DeepCopyInto(out *ClassifierParameters) { + *out = *in + if in.CsvClassifier != nil { + in, out := &in.CsvClassifier, &out.CsvClassifier + *out = new(CsvClassifierParameters) + (*in).DeepCopyInto(*out) + } + if in.GrokClassifier != nil { + in, out := &in.GrokClassifier, &out.GrokClassifier + *out = new(GrokClassifierParameters) + (*in).DeepCopyInto(*out) + } + if in.JSONClassifier != nil { + in, out := &in.JSONClassifier, &out.JSONClassifier + *out = new(JSONClassifierParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.XMLClassifier != nil { + in, out := &in.XMLClassifier, &out.XMLClassifier + *out = new(XMLClassifierParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassifierParameters. +func (in *ClassifierParameters) DeepCopy() *ClassifierParameters { + if in == nil { + return nil + } + out := new(ClassifierParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClassifierSpec) DeepCopyInto(out *ClassifierSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassifierSpec. +func (in *ClassifierSpec) DeepCopy() *ClassifierSpec { + if in == nil { + return nil + } + out := new(ClassifierSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClassifierStatus) DeepCopyInto(out *ClassifierStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassifierStatus. +func (in *ClassifierStatus) DeepCopy() *ClassifierStatus { + if in == nil { + return nil + } + out := new(ClassifierStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchEncryptionInitParameters) DeepCopyInto(out *CloudwatchEncryptionInitParameters) { + *out = *in + if in.CloudwatchEncryptionMode != nil { + in, out := &in.CloudwatchEncryptionMode, &out.CloudwatchEncryptionMode + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchEncryptionInitParameters. +func (in *CloudwatchEncryptionInitParameters) DeepCopy() *CloudwatchEncryptionInitParameters { + if in == nil { + return nil + } + out := new(CloudwatchEncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchEncryptionObservation) DeepCopyInto(out *CloudwatchEncryptionObservation) { + *out = *in + if in.CloudwatchEncryptionMode != nil { + in, out := &in.CloudwatchEncryptionMode, &out.CloudwatchEncryptionMode + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchEncryptionObservation. +func (in *CloudwatchEncryptionObservation) DeepCopy() *CloudwatchEncryptionObservation { + if in == nil { + return nil + } + out := new(CloudwatchEncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchEncryptionParameters) DeepCopyInto(out *CloudwatchEncryptionParameters) { + *out = *in + if in.CloudwatchEncryptionMode != nil { + in, out := &in.CloudwatchEncryptionMode, &out.CloudwatchEncryptionMode + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchEncryptionParameters. +func (in *CloudwatchEncryptionParameters) DeepCopy() *CloudwatchEncryptionParameters { + if in == nil { + return nil + } + out := new(CloudwatchEncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnsInitParameters) DeepCopyInto(out *ColumnsInitParameters) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnsInitParameters. +func (in *ColumnsInitParameters) DeepCopy() *ColumnsInitParameters { + if in == nil { + return nil + } + out := new(ColumnsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnsObservation) DeepCopyInto(out *ColumnsObservation) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnsObservation. +func (in *ColumnsObservation) DeepCopy() *ColumnsObservation { + if in == nil { + return nil + } + out := new(ColumnsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnsParameters) DeepCopyInto(out *ColumnsParameters) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnsParameters. +func (in *ColumnsParameters) DeepCopy() *ColumnsParameters { + if in == nil { + return nil + } + out := new(ColumnsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandInitParameters) DeepCopyInto(out *CommandInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(string) + **out = **in + } + if in.ScriptLocation != nil { + in, out := &in.ScriptLocation, &out.ScriptLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandInitParameters. +func (in *CommandInitParameters) DeepCopy() *CommandInitParameters { + if in == nil { + return nil + } + out := new(CommandInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandObservation) DeepCopyInto(out *CommandObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(string) + **out = **in + } + if in.ScriptLocation != nil { + in, out := &in.ScriptLocation, &out.ScriptLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandObservation. +func (in *CommandObservation) DeepCopy() *CommandObservation { + if in == nil { + return nil + } + out := new(CommandObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandParameters) DeepCopyInto(out *CommandParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(string) + **out = **in + } + if in.ScriptLocation != nil { + in, out := &in.ScriptLocation, &out.ScriptLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandParameters. +func (in *CommandParameters) DeepCopy() *CommandParameters { + if in == nil { + return nil + } + out := new(CommandParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsInitParameters) DeepCopyInto(out *ConditionsInitParameters) { + *out = *in + if in.CrawlState != nil { + in, out := &in.CrawlState, &out.CrawlState + *out = new(string) + **out = **in + } + if in.CrawlerName != nil { + in, out := &in.CrawlerName, &out.CrawlerName + *out = new(string) + **out = **in + } + if in.CrawlerNameRef != nil { + in, out := &in.CrawlerNameRef, &out.CrawlerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CrawlerNameSelector != nil { + in, out := &in.CrawlerNameSelector, &out.CrawlerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.JobName != nil { + in, out := &in.JobName, &out.JobName + *out = new(string) + **out = **in + } + if in.JobNameRef != nil { + in, out := &in.JobNameRef, &out.JobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.JobNameSelector != nil { + in, out := &in.JobNameSelector, &out.JobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LogicalOperator != nil { + in, out := &in.LogicalOperator, &out.LogicalOperator + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsInitParameters. +func (in *ConditionsInitParameters) DeepCopy() *ConditionsInitParameters { + if in == nil { + return nil + } + out := new(ConditionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsObservation) DeepCopyInto(out *ConditionsObservation) { + *out = *in + if in.CrawlState != nil { + in, out := &in.CrawlState, &out.CrawlState + *out = new(string) + **out = **in + } + if in.CrawlerName != nil { + in, out := &in.CrawlerName, &out.CrawlerName + *out = new(string) + **out = **in + } + if in.JobName != nil { + in, out := &in.JobName, &out.JobName + *out = new(string) + **out = **in + } + if in.LogicalOperator != nil { + in, out := &in.LogicalOperator, &out.LogicalOperator + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsObservation. +func (in *ConditionsObservation) DeepCopy() *ConditionsObservation { + if in == nil { + return nil + } + out := new(ConditionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsParameters) DeepCopyInto(out *ConditionsParameters) { + *out = *in + if in.CrawlState != nil { + in, out := &in.CrawlState, &out.CrawlState + *out = new(string) + **out = **in + } + if in.CrawlerName != nil { + in, out := &in.CrawlerName, &out.CrawlerName + *out = new(string) + **out = **in + } + if in.CrawlerNameRef != nil { + in, out := &in.CrawlerNameRef, &out.CrawlerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CrawlerNameSelector != nil { + in, out := &in.CrawlerNameSelector, &out.CrawlerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.JobName != nil { + in, out := &in.JobName, &out.JobName + *out = new(string) + **out = **in + } + if in.JobNameRef != nil { + in, out := &in.JobNameRef, &out.JobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.JobNameSelector != nil { + in, out := &in.JobNameSelector, &out.JobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LogicalOperator != nil { + in, out := &in.LogicalOperator, &out.LogicalOperator + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsParameters. +func (in *ConditionsParameters) DeepCopy() *ConditionsParameters { + if in == nil { + return nil + } + out := new(ConditionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Connection) DeepCopyInto(out *Connection) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connection. +func (in *Connection) DeepCopy() *Connection { + if in == nil { + return nil + } + out := new(Connection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Connection) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionInitParameters) DeepCopyInto(out *ConnectionInitParameters) { + *out = *in + if in.ConnectionProperties != nil { + in, out := &in.ConnectionProperties, &out.ConnectionProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ConnectionType != nil { + in, out := &in.ConnectionType, &out.ConnectionType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.MatchCriteria != nil { + in, out := &in.MatchCriteria, &out.MatchCriteria + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PhysicalConnectionRequirements != nil { + in, out := &in.PhysicalConnectionRequirements, &out.PhysicalConnectionRequirements + *out = new(PhysicalConnectionRequirementsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionInitParameters. +func (in *ConnectionInitParameters) DeepCopy() *ConnectionInitParameters { + if in == nil { + return nil + } + out := new(ConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionList) DeepCopyInto(out *ConnectionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Connection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionList. +func (in *ConnectionList) DeepCopy() *ConnectionList { + if in == nil { + return nil + } + out := new(ConnectionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConnectionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionObservation) DeepCopyInto(out *ConnectionObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.ConnectionType != nil { + in, out := &in.ConnectionType, &out.ConnectionType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MatchCriteria != nil { + in, out := &in.MatchCriteria, &out.MatchCriteria + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PhysicalConnectionRequirements != nil { + in, out := &in.PhysicalConnectionRequirements, &out.PhysicalConnectionRequirements + *out = new(PhysicalConnectionRequirementsObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionObservation. +func (in *ConnectionObservation) DeepCopy() *ConnectionObservation { + if in == nil { + return nil + } + out := new(ConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionParameters) DeepCopyInto(out *ConnectionParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.ConnectionPropertiesSecretRef != nil { + in, out := &in.ConnectionPropertiesSecretRef, &out.ConnectionPropertiesSecretRef + *out = new(v1.SecretReference) + **out = **in + } + if in.ConnectionType != nil { + in, out := &in.ConnectionType, &out.ConnectionType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.MatchCriteria != nil { + in, out := &in.MatchCriteria, &out.MatchCriteria + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PhysicalConnectionRequirements != nil { + in, out := &in.PhysicalConnectionRequirements, &out.PhysicalConnectionRequirements + *out = new(PhysicalConnectionRequirementsParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionParameters. +func (in *ConnectionParameters) DeepCopy() *ConnectionParameters { + if in == nil { + return nil + } + out := new(ConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPasswordEncryptionInitParameters) DeepCopyInto(out *ConnectionPasswordEncryptionInitParameters) { + *out = *in + if in.AwsKMSKeyID != nil { + in, out := &in.AwsKMSKeyID, &out.AwsKMSKeyID + *out = new(string) + **out = **in + } + if in.AwsKMSKeyIDRef != nil { + in, out := &in.AwsKMSKeyIDRef, &out.AwsKMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AwsKMSKeyIDSelector != nil { + in, out := &in.AwsKMSKeyIDSelector, &out.AwsKMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReturnConnectionPasswordEncrypted != nil { + in, out := &in.ReturnConnectionPasswordEncrypted, &out.ReturnConnectionPasswordEncrypted + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPasswordEncryptionInitParameters. +func (in *ConnectionPasswordEncryptionInitParameters) DeepCopy() *ConnectionPasswordEncryptionInitParameters { + if in == nil { + return nil + } + out := new(ConnectionPasswordEncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPasswordEncryptionObservation) DeepCopyInto(out *ConnectionPasswordEncryptionObservation) { + *out = *in + if in.AwsKMSKeyID != nil { + in, out := &in.AwsKMSKeyID, &out.AwsKMSKeyID + *out = new(string) + **out = **in + } + if in.ReturnConnectionPasswordEncrypted != nil { + in, out := &in.ReturnConnectionPasswordEncrypted, &out.ReturnConnectionPasswordEncrypted + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPasswordEncryptionObservation. +func (in *ConnectionPasswordEncryptionObservation) DeepCopy() *ConnectionPasswordEncryptionObservation { + if in == nil { + return nil + } + out := new(ConnectionPasswordEncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPasswordEncryptionParameters) DeepCopyInto(out *ConnectionPasswordEncryptionParameters) { + *out = *in + if in.AwsKMSKeyID != nil { + in, out := &in.AwsKMSKeyID, &out.AwsKMSKeyID + *out = new(string) + **out = **in + } + if in.AwsKMSKeyIDRef != nil { + in, out := &in.AwsKMSKeyIDRef, &out.AwsKMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AwsKMSKeyIDSelector != nil { + in, out := &in.AwsKMSKeyIDSelector, &out.AwsKMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReturnConnectionPasswordEncrypted != nil { + in, out := &in.ReturnConnectionPasswordEncrypted, &out.ReturnConnectionPasswordEncrypted + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPasswordEncryptionParameters. +func (in *ConnectionPasswordEncryptionParameters) DeepCopy() *ConnectionPasswordEncryptionParameters { + if in == nil { + return nil + } + out := new(ConnectionPasswordEncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionSpec) DeepCopyInto(out *ConnectionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionSpec. +func (in *ConnectionSpec) DeepCopy() *ConnectionSpec { + if in == nil { + return nil + } + out := new(ConnectionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionStatus) DeepCopyInto(out *ConnectionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionStatus. +func (in *ConnectionStatus) DeepCopy() *ConnectionStatus { + if in == nil { + return nil + } + out := new(ConnectionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Crawler) DeepCopyInto(out *Crawler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Crawler. +func (in *Crawler) DeepCopy() *Crawler { + if in == nil { + return nil + } + out := new(Crawler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Crawler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrawlerInitParameters) DeepCopyInto(out *CrawlerInitParameters) { + *out = *in + if in.CatalogTarget != nil { + in, out := &in.CatalogTarget, &out.CatalogTarget + *out = make([]CatalogTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Classifiers != nil { + in, out := &in.Classifiers, &out.Classifiers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DatabaseNameRef != nil { + in, out := &in.DatabaseNameRef, &out.DatabaseNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseNameSelector != nil { + in, out := &in.DatabaseNameSelector, &out.DatabaseNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DeltaTarget != nil { + in, out := &in.DeltaTarget, &out.DeltaTarget + *out = make([]DeltaTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DynamodbTarget != nil { + in, out := &in.DynamodbTarget, &out.DynamodbTarget + *out = make([]DynamodbTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HudiTarget != nil { + in, out := &in.HudiTarget, &out.HudiTarget + *out = make([]HudiTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IcebergTarget != nil { + in, out := &in.IcebergTarget, &out.IcebergTarget + *out = make([]IcebergTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JdbcTarget != nil { + in, out := &in.JdbcTarget, &out.JdbcTarget + *out = make([]JdbcTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LakeFormationConfiguration != nil { + in, out := &in.LakeFormationConfiguration, &out.LakeFormationConfiguration + *out = new(LakeFormationConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LineageConfiguration != nil { + in, out := &in.LineageConfiguration, &out.LineageConfiguration + *out = new(LineageConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MongodbTarget != nil { + in, out := &in.MongodbTarget, &out.MongodbTarget + *out = make([]MongodbTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecrawlPolicy != nil { + in, out := &in.RecrawlPolicy, &out.RecrawlPolicy + *out = new(RecrawlPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.RoleRef != nil { + in, out := &in.RoleRef, &out.RoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleSelector != nil { + in, out := &in.RoleSelector, &out.RoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3Target != nil { + in, out := &in.S3Target, &out.S3Target + *out = make([]S3TargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.SchemaChangePolicy != nil { + in, out := &in.SchemaChangePolicy, &out.SchemaChangePolicy + *out = new(SchemaChangePolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityConfiguration != nil { + in, out := &in.SecurityConfiguration, &out.SecurityConfiguration + *out = new(string) + **out = **in + } + if in.TablePrefix != nil { + in, out := &in.TablePrefix, &out.TablePrefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrawlerInitParameters. +func (in *CrawlerInitParameters) DeepCopy() *CrawlerInitParameters { + if in == nil { + return nil + } + out := new(CrawlerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrawlerList) DeepCopyInto(out *CrawlerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Crawler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrawlerList. +func (in *CrawlerList) DeepCopy() *CrawlerList { + if in == nil { + return nil + } + out := new(CrawlerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CrawlerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrawlerObservation) DeepCopyInto(out *CrawlerObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CatalogTarget != nil { + in, out := &in.CatalogTarget, &out.CatalogTarget + *out = make([]CatalogTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Classifiers != nil { + in, out := &in.Classifiers, &out.Classifiers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DeltaTarget != nil { + in, out := &in.DeltaTarget, &out.DeltaTarget + *out = make([]DeltaTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DynamodbTarget != nil { + in, out := &in.DynamodbTarget, &out.DynamodbTarget + *out = make([]DynamodbTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HudiTarget != nil { + in, out := &in.HudiTarget, &out.HudiTarget + *out = make([]HudiTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IcebergTarget != nil { + in, out := &in.IcebergTarget, &out.IcebergTarget + *out = make([]IcebergTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JdbcTarget != nil { + in, out := &in.JdbcTarget, &out.JdbcTarget + *out = make([]JdbcTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LakeFormationConfiguration != nil { + in, out := &in.LakeFormationConfiguration, &out.LakeFormationConfiguration + *out = new(LakeFormationConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.LineageConfiguration != nil { + in, out := &in.LineageConfiguration, &out.LineageConfiguration + *out = new(LineageConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.MongodbTarget != nil { + in, out := &in.MongodbTarget, &out.MongodbTarget + *out = make([]MongodbTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecrawlPolicy != nil { + in, out := &in.RecrawlPolicy, &out.RecrawlPolicy + *out = new(RecrawlPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.S3Target != nil { + in, out := &in.S3Target, &out.S3Target + *out = make([]S3TargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.SchemaChangePolicy != nil { + in, out := &in.SchemaChangePolicy, &out.SchemaChangePolicy + *out = new(SchemaChangePolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.SecurityConfiguration != nil { + in, out := &in.SecurityConfiguration, &out.SecurityConfiguration + *out = new(string) + **out = **in + } + if in.TablePrefix != nil { + in, out := &in.TablePrefix, &out.TablePrefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrawlerObservation. +func (in *CrawlerObservation) DeepCopy() *CrawlerObservation { + if in == nil { + return nil + } + out := new(CrawlerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrawlerParameters) DeepCopyInto(out *CrawlerParameters) { + *out = *in + if in.CatalogTarget != nil { + in, out := &in.CatalogTarget, &out.CatalogTarget + *out = make([]CatalogTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Classifiers != nil { + in, out := &in.Classifiers, &out.Classifiers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DatabaseNameRef != nil { + in, out := &in.DatabaseNameRef, &out.DatabaseNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseNameSelector != nil { + in, out := &in.DatabaseNameSelector, &out.DatabaseNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DeltaTarget != nil { + in, out := &in.DeltaTarget, &out.DeltaTarget + *out = make([]DeltaTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DynamodbTarget != nil { + in, out := &in.DynamodbTarget, &out.DynamodbTarget + *out = make([]DynamodbTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HudiTarget != nil { + in, out := &in.HudiTarget, &out.HudiTarget + *out = make([]HudiTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IcebergTarget != nil { + in, out := &in.IcebergTarget, &out.IcebergTarget + *out = make([]IcebergTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JdbcTarget != nil { + in, out := &in.JdbcTarget, &out.JdbcTarget + *out = make([]JdbcTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LakeFormationConfiguration != nil { + in, out := &in.LakeFormationConfiguration, &out.LakeFormationConfiguration + *out = new(LakeFormationConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.LineageConfiguration != nil { + in, out := &in.LineageConfiguration, &out.LineageConfiguration + *out = new(LineageConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.MongodbTarget != nil { + in, out := &in.MongodbTarget, &out.MongodbTarget + *out = make([]MongodbTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecrawlPolicy != nil { + in, out := &in.RecrawlPolicy, &out.RecrawlPolicy + *out = new(RecrawlPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.RoleRef != nil { + in, out := &in.RoleRef, &out.RoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleSelector != nil { + in, out := &in.RoleSelector, &out.RoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3Target != nil { + in, out := &in.S3Target, &out.S3Target + *out = make([]S3TargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.SchemaChangePolicy != nil { + in, out := &in.SchemaChangePolicy, &out.SchemaChangePolicy + *out = new(SchemaChangePolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityConfiguration != nil { + in, out := &in.SecurityConfiguration, &out.SecurityConfiguration + *out = new(string) + **out = **in + } + if in.TablePrefix != nil { + in, out := &in.TablePrefix, &out.TablePrefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrawlerParameters. +func (in *CrawlerParameters) DeepCopy() *CrawlerParameters { + if in == nil { + return nil + } + out := new(CrawlerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrawlerSpec) DeepCopyInto(out *CrawlerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrawlerSpec. +func (in *CrawlerSpec) DeepCopy() *CrawlerSpec { + if in == nil { + return nil + } + out := new(CrawlerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrawlerStatus) DeepCopyInto(out *CrawlerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrawlerStatus. +func (in *CrawlerStatus) DeepCopy() *CrawlerStatus { + if in == nil { + return nil + } + out := new(CrawlerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreateTableDefaultPermissionInitParameters) DeepCopyInto(out *CreateTableDefaultPermissionInitParameters) { + *out = *in + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Principal != nil { + in, out := &in.Principal, &out.Principal + *out = new(PrincipalInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreateTableDefaultPermissionInitParameters. +func (in *CreateTableDefaultPermissionInitParameters) DeepCopy() *CreateTableDefaultPermissionInitParameters { + if in == nil { + return nil + } + out := new(CreateTableDefaultPermissionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreateTableDefaultPermissionObservation) DeepCopyInto(out *CreateTableDefaultPermissionObservation) { + *out = *in + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Principal != nil { + in, out := &in.Principal, &out.Principal + *out = new(PrincipalObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreateTableDefaultPermissionObservation. +func (in *CreateTableDefaultPermissionObservation) DeepCopy() *CreateTableDefaultPermissionObservation { + if in == nil { + return nil + } + out := new(CreateTableDefaultPermissionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreateTableDefaultPermissionParameters) DeepCopyInto(out *CreateTableDefaultPermissionParameters) { + *out = *in + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Principal != nil { + in, out := &in.Principal, &out.Principal + *out = new(PrincipalParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreateTableDefaultPermissionParameters. +func (in *CreateTableDefaultPermissionParameters) DeepCopy() *CreateTableDefaultPermissionParameters { + if in == nil { + return nil + } + out := new(CreateTableDefaultPermissionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CsvClassifierInitParameters) DeepCopyInto(out *CsvClassifierInitParameters) { + *out = *in + if in.AllowSingleColumn != nil { + in, out := &in.AllowSingleColumn, &out.AllowSingleColumn + *out = new(bool) + **out = **in + } + if in.ContainsHeader != nil { + in, out := &in.ContainsHeader, &out.ContainsHeader + *out = new(string) + **out = **in + } + if in.CustomDatatypeConfigured != nil { + in, out := &in.CustomDatatypeConfigured, &out.CustomDatatypeConfigured + *out = new(bool) + **out = **in + } + if in.CustomDatatypes != nil { + in, out := &in.CustomDatatypes, &out.CustomDatatypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Delimiter != nil { + in, out := &in.Delimiter, &out.Delimiter + *out = new(string) + **out = **in + } + if in.DisableValueTrimming != nil { + in, out := &in.DisableValueTrimming, &out.DisableValueTrimming + *out = new(bool) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QuoteSymbol != nil { + in, out := &in.QuoteSymbol, &out.QuoteSymbol + *out = new(string) + **out = **in + } + if in.Serde != nil { + in, out := &in.Serde, &out.Serde + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CsvClassifierInitParameters. +func (in *CsvClassifierInitParameters) DeepCopy() *CsvClassifierInitParameters { + if in == nil { + return nil + } + out := new(CsvClassifierInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CsvClassifierObservation) DeepCopyInto(out *CsvClassifierObservation) { + *out = *in + if in.AllowSingleColumn != nil { + in, out := &in.AllowSingleColumn, &out.AllowSingleColumn + *out = new(bool) + **out = **in + } + if in.ContainsHeader != nil { + in, out := &in.ContainsHeader, &out.ContainsHeader + *out = new(string) + **out = **in + } + if in.CustomDatatypeConfigured != nil { + in, out := &in.CustomDatatypeConfigured, &out.CustomDatatypeConfigured + *out = new(bool) + **out = **in + } + if in.CustomDatatypes != nil { + in, out := &in.CustomDatatypes, &out.CustomDatatypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Delimiter != nil { + in, out := &in.Delimiter, &out.Delimiter + *out = new(string) + **out = **in + } + if in.DisableValueTrimming != nil { + in, out := &in.DisableValueTrimming, &out.DisableValueTrimming + *out = new(bool) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QuoteSymbol != nil { + in, out := &in.QuoteSymbol, &out.QuoteSymbol + *out = new(string) + **out = **in + } + if in.Serde != nil { + in, out := &in.Serde, &out.Serde + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CsvClassifierObservation. +func (in *CsvClassifierObservation) DeepCopy() *CsvClassifierObservation { + if in == nil { + return nil + } + out := new(CsvClassifierObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CsvClassifierParameters) DeepCopyInto(out *CsvClassifierParameters) { + *out = *in + if in.AllowSingleColumn != nil { + in, out := &in.AllowSingleColumn, &out.AllowSingleColumn + *out = new(bool) + **out = **in + } + if in.ContainsHeader != nil { + in, out := &in.ContainsHeader, &out.ContainsHeader + *out = new(string) + **out = **in + } + if in.CustomDatatypeConfigured != nil { + in, out := &in.CustomDatatypeConfigured, &out.CustomDatatypeConfigured + *out = new(bool) + **out = **in + } + if in.CustomDatatypes != nil { + in, out := &in.CustomDatatypes, &out.CustomDatatypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Delimiter != nil { + in, out := &in.Delimiter, &out.Delimiter + *out = new(string) + **out = **in + } + if in.DisableValueTrimming != nil { + in, out := &in.DisableValueTrimming, &out.DisableValueTrimming + *out = new(bool) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QuoteSymbol != nil { + in, out := &in.QuoteSymbol, &out.QuoteSymbol + *out = new(string) + **out = **in + } + if in.Serde != nil { + in, out := &in.Serde, &out.Serde + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CsvClassifierParameters. +func (in *CsvClassifierParameters) DeepCopy() *CsvClassifierParameters { + if in == nil { + return nil + } + out := new(CsvClassifierParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCatalogEncryptionSettings) DeepCopyInto(out *DataCatalogEncryptionSettings) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCatalogEncryptionSettings. +func (in *DataCatalogEncryptionSettings) DeepCopy() *DataCatalogEncryptionSettings { + if in == nil { + return nil + } + out := new(DataCatalogEncryptionSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataCatalogEncryptionSettings) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsInitParameters) DeepCopyInto(out *DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsInitParameters) { + *out = *in + if in.ConnectionPasswordEncryption != nil { + in, out := &in.ConnectionPasswordEncryption, &out.ConnectionPasswordEncryption + *out = new(ConnectionPasswordEncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EncryptionAtRest != nil { + in, out := &in.EncryptionAtRest, &out.EncryptionAtRest + *out = new(EncryptionAtRestInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsInitParameters. +func (in *DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsInitParameters) DeepCopy() *DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsInitParameters { + if in == nil { + return nil + } + out := new(DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsObservation) DeepCopyInto(out *DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsObservation) { + *out = *in + if in.ConnectionPasswordEncryption != nil { + in, out := &in.ConnectionPasswordEncryption, &out.ConnectionPasswordEncryption + *out = new(ConnectionPasswordEncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.EncryptionAtRest != nil { + in, out := &in.EncryptionAtRest, &out.EncryptionAtRest + *out = new(EncryptionAtRestObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsObservation. +func (in *DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsObservation) DeepCopy() *DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsObservation { + if in == nil { + return nil + } + out := new(DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsParameters) DeepCopyInto(out *DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsParameters) { + *out = *in + if in.ConnectionPasswordEncryption != nil { + in, out := &in.ConnectionPasswordEncryption, &out.ConnectionPasswordEncryption + *out = new(ConnectionPasswordEncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.EncryptionAtRest != nil { + in, out := &in.EncryptionAtRest, &out.EncryptionAtRest + *out = new(EncryptionAtRestParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsParameters. +func (in *DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsParameters) DeepCopy() *DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsParameters { + if in == nil { + return nil + } + out := new(DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCatalogEncryptionSettingsInitParameters) DeepCopyInto(out *DataCatalogEncryptionSettingsInitParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.DataCatalogEncryptionSettings != nil { + in, out := &in.DataCatalogEncryptionSettings, &out.DataCatalogEncryptionSettings + *out = new(DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCatalogEncryptionSettingsInitParameters. +func (in *DataCatalogEncryptionSettingsInitParameters) DeepCopy() *DataCatalogEncryptionSettingsInitParameters { + if in == nil { + return nil + } + out := new(DataCatalogEncryptionSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCatalogEncryptionSettingsList) DeepCopyInto(out *DataCatalogEncryptionSettingsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataCatalogEncryptionSettings, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCatalogEncryptionSettingsList. +func (in *DataCatalogEncryptionSettingsList) DeepCopy() *DataCatalogEncryptionSettingsList { + if in == nil { + return nil + } + out := new(DataCatalogEncryptionSettingsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataCatalogEncryptionSettingsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCatalogEncryptionSettingsObservation) DeepCopyInto(out *DataCatalogEncryptionSettingsObservation) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.DataCatalogEncryptionSettings != nil { + in, out := &in.DataCatalogEncryptionSettings, &out.DataCatalogEncryptionSettings + *out = new(DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCatalogEncryptionSettingsObservation. +func (in *DataCatalogEncryptionSettingsObservation) DeepCopy() *DataCatalogEncryptionSettingsObservation { + if in == nil { + return nil + } + out := new(DataCatalogEncryptionSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCatalogEncryptionSettingsParameters) DeepCopyInto(out *DataCatalogEncryptionSettingsParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.DataCatalogEncryptionSettings != nil { + in, out := &in.DataCatalogEncryptionSettings, &out.DataCatalogEncryptionSettings + *out = new(DataCatalogEncryptionSettingsDataCatalogEncryptionSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCatalogEncryptionSettingsParameters. +func (in *DataCatalogEncryptionSettingsParameters) DeepCopy() *DataCatalogEncryptionSettingsParameters { + if in == nil { + return nil + } + out := new(DataCatalogEncryptionSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCatalogEncryptionSettingsSpec) DeepCopyInto(out *DataCatalogEncryptionSettingsSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCatalogEncryptionSettingsSpec. +func (in *DataCatalogEncryptionSettingsSpec) DeepCopy() *DataCatalogEncryptionSettingsSpec { + if in == nil { + return nil + } + out := new(DataCatalogEncryptionSettingsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCatalogEncryptionSettingsStatus) DeepCopyInto(out *DataCatalogEncryptionSettingsStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCatalogEncryptionSettingsStatus. +func (in *DataCatalogEncryptionSettingsStatus) DeepCopy() *DataCatalogEncryptionSettingsStatus { + if in == nil { + return nil + } + out := new(DataCatalogEncryptionSettingsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeltaTargetInitParameters) DeepCopyInto(out *DeltaTargetInitParameters) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.CreateNativeDeltaTable != nil { + in, out := &in.CreateNativeDeltaTable, &out.CreateNativeDeltaTable + *out = new(bool) + **out = **in + } + if in.DeltaTables != nil { + in, out := &in.DeltaTables, &out.DeltaTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WriteManifest != nil { + in, out := &in.WriteManifest, &out.WriteManifest + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeltaTargetInitParameters. +func (in *DeltaTargetInitParameters) DeepCopy() *DeltaTargetInitParameters { + if in == nil { + return nil + } + out := new(DeltaTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeltaTargetObservation) DeepCopyInto(out *DeltaTargetObservation) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.CreateNativeDeltaTable != nil { + in, out := &in.CreateNativeDeltaTable, &out.CreateNativeDeltaTable + *out = new(bool) + **out = **in + } + if in.DeltaTables != nil { + in, out := &in.DeltaTables, &out.DeltaTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WriteManifest != nil { + in, out := &in.WriteManifest, &out.WriteManifest + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeltaTargetObservation. +func (in *DeltaTargetObservation) DeepCopy() *DeltaTargetObservation { + if in == nil { + return nil + } + out := new(DeltaTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeltaTargetParameters) DeepCopyInto(out *DeltaTargetParameters) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.CreateNativeDeltaTable != nil { + in, out := &in.CreateNativeDeltaTable, &out.CreateNativeDeltaTable + *out = new(bool) + **out = **in + } + if in.DeltaTables != nil { + in, out := &in.DeltaTables, &out.DeltaTables + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WriteManifest != nil { + in, out := &in.WriteManifest, &out.WriteManifest + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeltaTargetParameters. +func (in *DeltaTargetParameters) DeepCopy() *DeltaTargetParameters { + if in == nil { + return nil + } + out := new(DeltaTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamodbTargetInitParameters) DeepCopyInto(out *DynamodbTargetInitParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.ScanAll != nil { + in, out := &in.ScanAll, &out.ScanAll + *out = new(bool) + **out = **in + } + if in.ScanRate != nil { + in, out := &in.ScanRate, &out.ScanRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamodbTargetInitParameters. +func (in *DynamodbTargetInitParameters) DeepCopy() *DynamodbTargetInitParameters { + if in == nil { + return nil + } + out := new(DynamodbTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamodbTargetObservation) DeepCopyInto(out *DynamodbTargetObservation) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.ScanAll != nil { + in, out := &in.ScanAll, &out.ScanAll + *out = new(bool) + **out = **in + } + if in.ScanRate != nil { + in, out := &in.ScanRate, &out.ScanRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamodbTargetObservation. +func (in *DynamodbTargetObservation) DeepCopy() *DynamodbTargetObservation { + if in == nil { + return nil + } + out := new(DynamodbTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamodbTargetParameters) DeepCopyInto(out *DynamodbTargetParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.ScanAll != nil { + in, out := &in.ScanAll, &out.ScanAll + *out = new(bool) + **out = **in + } + if in.ScanRate != nil { + in, out := &in.ScanRate, &out.ScanRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamodbTargetParameters. +func (in *DynamodbTargetParameters) DeepCopy() *DynamodbTargetParameters { + if in == nil { + return nil + } + out := new(DynamodbTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionAtRestInitParameters) DeepCopyInto(out *EncryptionAtRestInitParameters) { + *out = *in + if in.CatalogEncryptionMode != nil { + in, out := &in.CatalogEncryptionMode, &out.CatalogEncryptionMode + *out = new(string) + **out = **in + } + if in.CatalogEncryptionServiceRole != nil { + in, out := &in.CatalogEncryptionServiceRole, &out.CatalogEncryptionServiceRole + *out = new(string) + **out = **in + } + if in.SseAwsKMSKeyID != nil { + in, out := &in.SseAwsKMSKeyID, &out.SseAwsKMSKeyID + *out = new(string) + **out = **in + } + if in.SseAwsKMSKeyIDRef != nil { + in, out := &in.SseAwsKMSKeyIDRef, &out.SseAwsKMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SseAwsKMSKeyIDSelector != nil { + in, out := &in.SseAwsKMSKeyIDSelector, &out.SseAwsKMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionAtRestInitParameters. +func (in *EncryptionAtRestInitParameters) DeepCopy() *EncryptionAtRestInitParameters { + if in == nil { + return nil + } + out := new(EncryptionAtRestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionAtRestObservation) DeepCopyInto(out *EncryptionAtRestObservation) { + *out = *in + if in.CatalogEncryptionMode != nil { + in, out := &in.CatalogEncryptionMode, &out.CatalogEncryptionMode + *out = new(string) + **out = **in + } + if in.CatalogEncryptionServiceRole != nil { + in, out := &in.CatalogEncryptionServiceRole, &out.CatalogEncryptionServiceRole + *out = new(string) + **out = **in + } + if in.SseAwsKMSKeyID != nil { + in, out := &in.SseAwsKMSKeyID, &out.SseAwsKMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionAtRestObservation. +func (in *EncryptionAtRestObservation) DeepCopy() *EncryptionAtRestObservation { + if in == nil { + return nil + } + out := new(EncryptionAtRestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionAtRestParameters) DeepCopyInto(out *EncryptionAtRestParameters) { + *out = *in + if in.CatalogEncryptionMode != nil { + in, out := &in.CatalogEncryptionMode, &out.CatalogEncryptionMode + *out = new(string) + **out = **in + } + if in.CatalogEncryptionServiceRole != nil { + in, out := &in.CatalogEncryptionServiceRole, &out.CatalogEncryptionServiceRole + *out = new(string) + **out = **in + } + if in.SseAwsKMSKeyID != nil { + in, out := &in.SseAwsKMSKeyID, &out.SseAwsKMSKeyID + *out = new(string) + **out = **in + } + if in.SseAwsKMSKeyIDRef != nil { + in, out := &in.SseAwsKMSKeyIDRef, &out.SseAwsKMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SseAwsKMSKeyIDSelector != nil { + in, out := &in.SseAwsKMSKeyIDSelector, &out.SseAwsKMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionAtRestParameters. +func (in *EncryptionAtRestParameters) DeepCopy() *EncryptionAtRestParameters { + if in == nil { + return nil + } + out := new(EncryptionAtRestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationInitParameters) DeepCopyInto(out *EncryptionConfigurationInitParameters) { + *out = *in + if in.CloudwatchEncryption != nil { + in, out := &in.CloudwatchEncryption, &out.CloudwatchEncryption + *out = new(CloudwatchEncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.JobBookmarksEncryption != nil { + in, out := &in.JobBookmarksEncryption, &out.JobBookmarksEncryption + *out = new(JobBookmarksEncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3Encryption != nil { + in, out := &in.S3Encryption, &out.S3Encryption + *out = new(S3EncryptionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationInitParameters. +func (in *EncryptionConfigurationInitParameters) DeepCopy() *EncryptionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationObservation) DeepCopyInto(out *EncryptionConfigurationObservation) { + *out = *in + if in.CloudwatchEncryption != nil { + in, out := &in.CloudwatchEncryption, &out.CloudwatchEncryption + *out = new(CloudwatchEncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.JobBookmarksEncryption != nil { + in, out := &in.JobBookmarksEncryption, &out.JobBookmarksEncryption + *out = new(JobBookmarksEncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.S3Encryption != nil { + in, out := &in.S3Encryption, &out.S3Encryption + *out = new(S3EncryptionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationObservation. +func (in *EncryptionConfigurationObservation) DeepCopy() *EncryptionConfigurationObservation { + if in == nil { + return nil + } + out := new(EncryptionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationParameters) DeepCopyInto(out *EncryptionConfigurationParameters) { + *out = *in + if in.CloudwatchEncryption != nil { + in, out := &in.CloudwatchEncryption, &out.CloudwatchEncryption + *out = new(CloudwatchEncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.JobBookmarksEncryption != nil { + in, out := &in.JobBookmarksEncryption, &out.JobBookmarksEncryption + *out = new(JobBookmarksEncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.S3Encryption != nil { + in, out := &in.S3Encryption, &out.S3Encryption + *out = new(S3EncryptionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationParameters. +func (in *EncryptionConfigurationParameters) DeepCopy() *EncryptionConfigurationParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventBatchingConditionInitParameters) DeepCopyInto(out *EventBatchingConditionInitParameters) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BatchWindow != nil { + in, out := &in.BatchWindow, &out.BatchWindow + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventBatchingConditionInitParameters. +func (in *EventBatchingConditionInitParameters) DeepCopy() *EventBatchingConditionInitParameters { + if in == nil { + return nil + } + out := new(EventBatchingConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventBatchingConditionObservation) DeepCopyInto(out *EventBatchingConditionObservation) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BatchWindow != nil { + in, out := &in.BatchWindow, &out.BatchWindow + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventBatchingConditionObservation. +func (in *EventBatchingConditionObservation) DeepCopy() *EventBatchingConditionObservation { + if in == nil { + return nil + } + out := new(EventBatchingConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventBatchingConditionParameters) DeepCopyInto(out *EventBatchingConditionParameters) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BatchWindow != nil { + in, out := &in.BatchWindow, &out.BatchWindow + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventBatchingConditionParameters. +func (in *EventBatchingConditionParameters) DeepCopy() *EventBatchingConditionParameters { + if in == nil { + return nil + } + out := new(EventBatchingConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecutionPropertyInitParameters) DeepCopyInto(out *ExecutionPropertyInitParameters) { + *out = *in + if in.MaxConcurrentRuns != nil { + in, out := &in.MaxConcurrentRuns, &out.MaxConcurrentRuns + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutionPropertyInitParameters. +func (in *ExecutionPropertyInitParameters) DeepCopy() *ExecutionPropertyInitParameters { + if in == nil { + return nil + } + out := new(ExecutionPropertyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecutionPropertyObservation) DeepCopyInto(out *ExecutionPropertyObservation) { + *out = *in + if in.MaxConcurrentRuns != nil { + in, out := &in.MaxConcurrentRuns, &out.MaxConcurrentRuns + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutionPropertyObservation. +func (in *ExecutionPropertyObservation) DeepCopy() *ExecutionPropertyObservation { + if in == nil { + return nil + } + out := new(ExecutionPropertyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecutionPropertyParameters) DeepCopyInto(out *ExecutionPropertyParameters) { + *out = *in + if in.MaxConcurrentRuns != nil { + in, out := &in.MaxConcurrentRuns, &out.MaxConcurrentRuns + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutionPropertyParameters. +func (in *ExecutionPropertyParameters) DeepCopy() *ExecutionPropertyParameters { + if in == nil { + return nil + } + out := new(ExecutionPropertyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedDatabaseInitParameters) DeepCopyInto(out *FederatedDatabaseInitParameters) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedDatabaseInitParameters. +func (in *FederatedDatabaseInitParameters) DeepCopy() *FederatedDatabaseInitParameters { + if in == nil { + return nil + } + out := new(FederatedDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedDatabaseObservation) DeepCopyInto(out *FederatedDatabaseObservation) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedDatabaseObservation. +func (in *FederatedDatabaseObservation) DeepCopy() *FederatedDatabaseObservation { + if in == nil { + return nil + } + out := new(FederatedDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedDatabaseParameters) DeepCopyInto(out *FederatedDatabaseParameters) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedDatabaseParameters. +func (in *FederatedDatabaseParameters) DeepCopy() *FederatedDatabaseParameters { + if in == nil { + return nil + } + out := new(FederatedDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrokClassifierInitParameters) DeepCopyInto(out *GrokClassifierInitParameters) { + *out = *in + if in.Classification != nil { + in, out := &in.Classification, &out.Classification + *out = new(string) + **out = **in + } + if in.CustomPatterns != nil { + in, out := &in.CustomPatterns, &out.CustomPatterns + *out = new(string) + **out = **in + } + if in.GrokPattern != nil { + in, out := &in.GrokPattern, &out.GrokPattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrokClassifierInitParameters. +func (in *GrokClassifierInitParameters) DeepCopy() *GrokClassifierInitParameters { + if in == nil { + return nil + } + out := new(GrokClassifierInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrokClassifierObservation) DeepCopyInto(out *GrokClassifierObservation) { + *out = *in + if in.Classification != nil { + in, out := &in.Classification, &out.Classification + *out = new(string) + **out = **in + } + if in.CustomPatterns != nil { + in, out := &in.CustomPatterns, &out.CustomPatterns + *out = new(string) + **out = **in + } + if in.GrokPattern != nil { + in, out := &in.GrokPattern, &out.GrokPattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrokClassifierObservation. +func (in *GrokClassifierObservation) DeepCopy() *GrokClassifierObservation { + if in == nil { + return nil + } + out := new(GrokClassifierObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrokClassifierParameters) DeepCopyInto(out *GrokClassifierParameters) { + *out = *in + if in.Classification != nil { + in, out := &in.Classification, &out.Classification + *out = new(string) + **out = **in + } + if in.CustomPatterns != nil { + in, out := &in.CustomPatterns, &out.CustomPatterns + *out = new(string) + **out = **in + } + if in.GrokPattern != nil { + in, out := &in.GrokPattern, &out.GrokPattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrokClassifierParameters. +func (in *GrokClassifierParameters) DeepCopy() *GrokClassifierParameters { + if in == nil { + return nil + } + out := new(GrokClassifierParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HudiTargetInitParameters) DeepCopyInto(out *HudiTargetInitParameters) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.Exclusions != nil { + in, out := &in.Exclusions, &out.Exclusions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaximumTraversalDepth != nil { + in, out := &in.MaximumTraversalDepth, &out.MaximumTraversalDepth + *out = new(float64) + **out = **in + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HudiTargetInitParameters. +func (in *HudiTargetInitParameters) DeepCopy() *HudiTargetInitParameters { + if in == nil { + return nil + } + out := new(HudiTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HudiTargetObservation) DeepCopyInto(out *HudiTargetObservation) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.Exclusions != nil { + in, out := &in.Exclusions, &out.Exclusions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaximumTraversalDepth != nil { + in, out := &in.MaximumTraversalDepth, &out.MaximumTraversalDepth + *out = new(float64) + **out = **in + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HudiTargetObservation. +func (in *HudiTargetObservation) DeepCopy() *HudiTargetObservation { + if in == nil { + return nil + } + out := new(HudiTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HudiTargetParameters) DeepCopyInto(out *HudiTargetParameters) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.Exclusions != nil { + in, out := &in.Exclusions, &out.Exclusions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaximumTraversalDepth != nil { + in, out := &in.MaximumTraversalDepth, &out.MaximumTraversalDepth + *out = new(float64) + **out = **in + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HudiTargetParameters. +func (in *HudiTargetParameters) DeepCopy() *HudiTargetParameters { + if in == nil { + return nil + } + out := new(HudiTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IcebergInputInitParameters) DeepCopyInto(out *IcebergInputInitParameters) { + *out = *in + if in.MetadataOperation != nil { + in, out := &in.MetadataOperation, &out.MetadataOperation + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IcebergInputInitParameters. +func (in *IcebergInputInitParameters) DeepCopy() *IcebergInputInitParameters { + if in == nil { + return nil + } + out := new(IcebergInputInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IcebergInputObservation) DeepCopyInto(out *IcebergInputObservation) { + *out = *in + if in.MetadataOperation != nil { + in, out := &in.MetadataOperation, &out.MetadataOperation + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IcebergInputObservation. +func (in *IcebergInputObservation) DeepCopy() *IcebergInputObservation { + if in == nil { + return nil + } + out := new(IcebergInputObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IcebergInputParameters) DeepCopyInto(out *IcebergInputParameters) { + *out = *in + if in.MetadataOperation != nil { + in, out := &in.MetadataOperation, &out.MetadataOperation + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IcebergInputParameters. +func (in *IcebergInputParameters) DeepCopy() *IcebergInputParameters { + if in == nil { + return nil + } + out := new(IcebergInputParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IcebergTargetInitParameters) DeepCopyInto(out *IcebergTargetInitParameters) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.Exclusions != nil { + in, out := &in.Exclusions, &out.Exclusions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaximumTraversalDepth != nil { + in, out := &in.MaximumTraversalDepth, &out.MaximumTraversalDepth + *out = new(float64) + **out = **in + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IcebergTargetInitParameters. +func (in *IcebergTargetInitParameters) DeepCopy() *IcebergTargetInitParameters { + if in == nil { + return nil + } + out := new(IcebergTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IcebergTargetObservation) DeepCopyInto(out *IcebergTargetObservation) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.Exclusions != nil { + in, out := &in.Exclusions, &out.Exclusions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaximumTraversalDepth != nil { + in, out := &in.MaximumTraversalDepth, &out.MaximumTraversalDepth + *out = new(float64) + **out = **in + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IcebergTargetObservation. +func (in *IcebergTargetObservation) DeepCopy() *IcebergTargetObservation { + if in == nil { + return nil + } + out := new(IcebergTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IcebergTargetParameters) DeepCopyInto(out *IcebergTargetParameters) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.Exclusions != nil { + in, out := &in.Exclusions, &out.Exclusions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaximumTraversalDepth != nil { + in, out := &in.MaximumTraversalDepth, &out.MaximumTraversalDepth + *out = new(float64) + **out = **in + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IcebergTargetParameters. +func (in *IcebergTargetParameters) DeepCopy() *IcebergTargetParameters { + if in == nil { + return nil + } + out := new(IcebergTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONClassifierInitParameters) DeepCopyInto(out *JSONClassifierInitParameters) { + *out = *in + if in.JSONPath != nil { + in, out := &in.JSONPath, &out.JSONPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONClassifierInitParameters. +func (in *JSONClassifierInitParameters) DeepCopy() *JSONClassifierInitParameters { + if in == nil { + return nil + } + out := new(JSONClassifierInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONClassifierObservation) DeepCopyInto(out *JSONClassifierObservation) { + *out = *in + if in.JSONPath != nil { + in, out := &in.JSONPath, &out.JSONPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONClassifierObservation. +func (in *JSONClassifierObservation) DeepCopy() *JSONClassifierObservation { + if in == nil { + return nil + } + out := new(JSONClassifierObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONClassifierParameters) DeepCopyInto(out *JSONClassifierParameters) { + *out = *in + if in.JSONPath != nil { + in, out := &in.JSONPath, &out.JSONPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONClassifierParameters. +func (in *JSONClassifierParameters) DeepCopy() *JSONClassifierParameters { + if in == nil { + return nil + } + out := new(JSONClassifierParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JdbcTargetInitParameters) DeepCopyInto(out *JdbcTargetInitParameters) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.ConnectionNameRef != nil { + in, out := &in.ConnectionNameRef, &out.ConnectionNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConnectionNameSelector != nil { + in, out := &in.ConnectionNameSelector, &out.ConnectionNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EnableAdditionalMetadata != nil { + in, out := &in.EnableAdditionalMetadata, &out.EnableAdditionalMetadata + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Exclusions != nil { + in, out := &in.Exclusions, &out.Exclusions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JdbcTargetInitParameters. +func (in *JdbcTargetInitParameters) DeepCopy() *JdbcTargetInitParameters { + if in == nil { + return nil + } + out := new(JdbcTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JdbcTargetObservation) DeepCopyInto(out *JdbcTargetObservation) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.EnableAdditionalMetadata != nil { + in, out := &in.EnableAdditionalMetadata, &out.EnableAdditionalMetadata + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Exclusions != nil { + in, out := &in.Exclusions, &out.Exclusions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JdbcTargetObservation. +func (in *JdbcTargetObservation) DeepCopy() *JdbcTargetObservation { + if in == nil { + return nil + } + out := new(JdbcTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JdbcTargetParameters) DeepCopyInto(out *JdbcTargetParameters) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.ConnectionNameRef != nil { + in, out := &in.ConnectionNameRef, &out.ConnectionNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConnectionNameSelector != nil { + in, out := &in.ConnectionNameSelector, &out.ConnectionNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EnableAdditionalMetadata != nil { + in, out := &in.EnableAdditionalMetadata, &out.EnableAdditionalMetadata + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Exclusions != nil { + in, out := &in.Exclusions, &out.Exclusions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JdbcTargetParameters. +func (in *JdbcTargetParameters) DeepCopy() *JdbcTargetParameters { + if in == nil { + return nil + } + out := new(JdbcTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Job) DeepCopyInto(out *Job) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Job. +func (in *Job) DeepCopy() *Job { + if in == nil { + return nil + } + out := new(Job) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Job) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobBookmarksEncryptionInitParameters) DeepCopyInto(out *JobBookmarksEncryptionInitParameters) { + *out = *in + if in.JobBookmarksEncryptionMode != nil { + in, out := &in.JobBookmarksEncryptionMode, &out.JobBookmarksEncryptionMode + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobBookmarksEncryptionInitParameters. +func (in *JobBookmarksEncryptionInitParameters) DeepCopy() *JobBookmarksEncryptionInitParameters { + if in == nil { + return nil + } + out := new(JobBookmarksEncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobBookmarksEncryptionObservation) DeepCopyInto(out *JobBookmarksEncryptionObservation) { + *out = *in + if in.JobBookmarksEncryptionMode != nil { + in, out := &in.JobBookmarksEncryptionMode, &out.JobBookmarksEncryptionMode + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobBookmarksEncryptionObservation. +func (in *JobBookmarksEncryptionObservation) DeepCopy() *JobBookmarksEncryptionObservation { + if in == nil { + return nil + } + out := new(JobBookmarksEncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobBookmarksEncryptionParameters) DeepCopyInto(out *JobBookmarksEncryptionParameters) { + *out = *in + if in.JobBookmarksEncryptionMode != nil { + in, out := &in.JobBookmarksEncryptionMode, &out.JobBookmarksEncryptionMode + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobBookmarksEncryptionParameters. +func (in *JobBookmarksEncryptionParameters) DeepCopy() *JobBookmarksEncryptionParameters { + if in == nil { + return nil + } + out := new(JobBookmarksEncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobInitParameters) DeepCopyInto(out *JobInitParameters) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = new(CommandInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Connections != nil { + in, out := &in.Connections, &out.Connections + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultArguments != nil { + in, out := &in.DefaultArguments, &out.DefaultArguments + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionClass != nil { + in, out := &in.ExecutionClass, &out.ExecutionClass + *out = new(string) + **out = **in + } + if in.ExecutionProperty != nil { + in, out := &in.ExecutionProperty, &out.ExecutionProperty + *out = new(ExecutionPropertyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.GlueVersion != nil { + in, out := &in.GlueVersion, &out.GlueVersion + *out = new(string) + **out = **in + } + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MaxRetries != nil { + in, out := &in.MaxRetries, &out.MaxRetries + *out = new(float64) + **out = **in + } + if in.NonOverridableArguments != nil { + in, out := &in.NonOverridableArguments, &out.NonOverridableArguments + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NotificationProperty != nil { + in, out := &in.NotificationProperty, &out.NotificationProperty + *out = new(NotificationPropertyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NumberOfWorkers != nil { + in, out := &in.NumberOfWorkers, &out.NumberOfWorkers + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityConfiguration != nil { + in, out := &in.SecurityConfiguration, &out.SecurityConfiguration + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.WorkerType != nil { + in, out := &in.WorkerType, &out.WorkerType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobInitParameters. +func (in *JobInitParameters) DeepCopy() *JobInitParameters { + if in == nil { + return nil + } + out := new(JobInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobList) DeepCopyInto(out *JobList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Job, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobList. +func (in *JobList) DeepCopy() *JobList { + if in == nil { + return nil + } + out := new(JobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *JobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobObservation) DeepCopyInto(out *JobObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = new(CommandObservation) + (*in).DeepCopyInto(*out) + } + if in.Connections != nil { + in, out := &in.Connections, &out.Connections + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultArguments != nil { + in, out := &in.DefaultArguments, &out.DefaultArguments + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionClass != nil { + in, out := &in.ExecutionClass, &out.ExecutionClass + *out = new(string) + **out = **in + } + if in.ExecutionProperty != nil { + in, out := &in.ExecutionProperty, &out.ExecutionProperty + *out = new(ExecutionPropertyObservation) + (*in).DeepCopyInto(*out) + } + if in.GlueVersion != nil { + in, out := &in.GlueVersion, &out.GlueVersion + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MaxRetries != nil { + in, out := &in.MaxRetries, &out.MaxRetries + *out = new(float64) + **out = **in + } + if in.NonOverridableArguments != nil { + in, out := &in.NonOverridableArguments, &out.NonOverridableArguments + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NotificationProperty != nil { + in, out := &in.NotificationProperty, &out.NotificationProperty + *out = new(NotificationPropertyObservation) + (*in).DeepCopyInto(*out) + } + if in.NumberOfWorkers != nil { + in, out := &in.NumberOfWorkers, &out.NumberOfWorkers + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.SecurityConfiguration != nil { + in, out := &in.SecurityConfiguration, &out.SecurityConfiguration + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.WorkerType != nil { + in, out := &in.WorkerType, &out.WorkerType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobObservation. +func (in *JobObservation) DeepCopy() *JobObservation { + if in == nil { + return nil + } + out := new(JobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobParameters) DeepCopyInto(out *JobParameters) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = new(CommandParameters) + (*in).DeepCopyInto(*out) + } + if in.Connections != nil { + in, out := &in.Connections, &out.Connections + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultArguments != nil { + in, out := &in.DefaultArguments, &out.DefaultArguments + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionClass != nil { + in, out := &in.ExecutionClass, &out.ExecutionClass + *out = new(string) + **out = **in + } + if in.ExecutionProperty != nil { + in, out := &in.ExecutionProperty, &out.ExecutionProperty + *out = new(ExecutionPropertyParameters) + (*in).DeepCopyInto(*out) + } + if in.GlueVersion != nil { + in, out := &in.GlueVersion, &out.GlueVersion + *out = new(string) + **out = **in + } + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MaxRetries != nil { + in, out := &in.MaxRetries, &out.MaxRetries + *out = new(float64) + **out = **in + } + if in.NonOverridableArguments != nil { + in, out := &in.NonOverridableArguments, &out.NonOverridableArguments + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NotificationProperty != nil { + in, out := &in.NotificationProperty, &out.NotificationProperty + *out = new(NotificationPropertyParameters) + (*in).DeepCopyInto(*out) + } + if in.NumberOfWorkers != nil { + in, out := &in.NumberOfWorkers, &out.NumberOfWorkers + *out = new(float64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityConfiguration != nil { + in, out := &in.SecurityConfiguration, &out.SecurityConfiguration + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.WorkerType != nil { + in, out := &in.WorkerType, &out.WorkerType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobParameters. +func (in *JobParameters) DeepCopy() *JobParameters { + if in == nil { + return nil + } + out := new(JobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobSpec) DeepCopyInto(out *JobSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobSpec. +func (in *JobSpec) DeepCopy() *JobSpec { + if in == nil { + return nil + } + out := new(JobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobStatus) DeepCopyInto(out *JobStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStatus. +func (in *JobStatus) DeepCopy() *JobStatus { + if in == nil { + return nil + } + out := new(JobStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeFormationConfigurationInitParameters) DeepCopyInto(out *LakeFormationConfigurationInitParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.UseLakeFormationCredentials != nil { + in, out := &in.UseLakeFormationCredentials, &out.UseLakeFormationCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeFormationConfigurationInitParameters. +func (in *LakeFormationConfigurationInitParameters) DeepCopy() *LakeFormationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(LakeFormationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeFormationConfigurationObservation) DeepCopyInto(out *LakeFormationConfigurationObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.UseLakeFormationCredentials != nil { + in, out := &in.UseLakeFormationCredentials, &out.UseLakeFormationCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeFormationConfigurationObservation. +func (in *LakeFormationConfigurationObservation) DeepCopy() *LakeFormationConfigurationObservation { + if in == nil { + return nil + } + out := new(LakeFormationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeFormationConfigurationParameters) DeepCopyInto(out *LakeFormationConfigurationParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.UseLakeFormationCredentials != nil { + in, out := &in.UseLakeFormationCredentials, &out.UseLakeFormationCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeFormationConfigurationParameters. +func (in *LakeFormationConfigurationParameters) DeepCopy() *LakeFormationConfigurationParameters { + if in == nil { + return nil + } + out := new(LakeFormationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LineageConfigurationInitParameters) DeepCopyInto(out *LineageConfigurationInitParameters) { + *out = *in + if in.CrawlerLineageSettings != nil { + in, out := &in.CrawlerLineageSettings, &out.CrawlerLineageSettings + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LineageConfigurationInitParameters. +func (in *LineageConfigurationInitParameters) DeepCopy() *LineageConfigurationInitParameters { + if in == nil { + return nil + } + out := new(LineageConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LineageConfigurationObservation) DeepCopyInto(out *LineageConfigurationObservation) { + *out = *in + if in.CrawlerLineageSettings != nil { + in, out := &in.CrawlerLineageSettings, &out.CrawlerLineageSettings + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LineageConfigurationObservation. +func (in *LineageConfigurationObservation) DeepCopy() *LineageConfigurationObservation { + if in == nil { + return nil + } + out := new(LineageConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LineageConfigurationParameters) DeepCopyInto(out *LineageConfigurationParameters) { + *out = *in + if in.CrawlerLineageSettings != nil { + in, out := &in.CrawlerLineageSettings, &out.CrawlerLineageSettings + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LineageConfigurationParameters. +func (in *LineageConfigurationParameters) DeepCopy() *LineageConfigurationParameters { + if in == nil { + return nil + } + out := new(LineageConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbTargetInitParameters) DeepCopyInto(out *MongodbTargetInitParameters) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.ConnectionNameRef != nil { + in, out := &in.ConnectionNameRef, &out.ConnectionNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConnectionNameSelector != nil { + in, out := &in.ConnectionNameSelector, &out.ConnectionNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.ScanAll != nil { + in, out := &in.ScanAll, &out.ScanAll + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbTargetInitParameters. +func (in *MongodbTargetInitParameters) DeepCopy() *MongodbTargetInitParameters { + if in == nil { + return nil + } + out := new(MongodbTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbTargetObservation) DeepCopyInto(out *MongodbTargetObservation) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.ScanAll != nil { + in, out := &in.ScanAll, &out.ScanAll + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbTargetObservation. +func (in *MongodbTargetObservation) DeepCopy() *MongodbTargetObservation { + if in == nil { + return nil + } + out := new(MongodbTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodbTargetParameters) DeepCopyInto(out *MongodbTargetParameters) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.ConnectionNameRef != nil { + in, out := &in.ConnectionNameRef, &out.ConnectionNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConnectionNameSelector != nil { + in, out := &in.ConnectionNameSelector, &out.ConnectionNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.ScanAll != nil { + in, out := &in.ScanAll, &out.ScanAll + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbTargetParameters. +func (in *MongodbTargetParameters) DeepCopy() *MongodbTargetParameters { + if in == nil { + return nil + } + out := new(MongodbTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationPropertyInitParameters) DeepCopyInto(out *NotificationPropertyInitParameters) { + *out = *in + if in.NotifyDelayAfter != nil { + in, out := &in.NotifyDelayAfter, &out.NotifyDelayAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationPropertyInitParameters. +func (in *NotificationPropertyInitParameters) DeepCopy() *NotificationPropertyInitParameters { + if in == nil { + return nil + } + out := new(NotificationPropertyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationPropertyObservation) DeepCopyInto(out *NotificationPropertyObservation) { + *out = *in + if in.NotifyDelayAfter != nil { + in, out := &in.NotifyDelayAfter, &out.NotifyDelayAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationPropertyObservation. +func (in *NotificationPropertyObservation) DeepCopy() *NotificationPropertyObservation { + if in == nil { + return nil + } + out := new(NotificationPropertyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationPropertyParameters) DeepCopyInto(out *NotificationPropertyParameters) { + *out = *in + if in.NotifyDelayAfter != nil { + in, out := &in.NotifyDelayAfter, &out.NotifyDelayAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationPropertyParameters. +func (in *NotificationPropertyParameters) DeepCopy() *NotificationPropertyParameters { + if in == nil { + return nil + } + out := new(NotificationPropertyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenTableFormatInputInitParameters) DeepCopyInto(out *OpenTableFormatInputInitParameters) { + *out = *in + if in.IcebergInput != nil { + in, out := &in.IcebergInput, &out.IcebergInput + *out = new(IcebergInputInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenTableFormatInputInitParameters. +func (in *OpenTableFormatInputInitParameters) DeepCopy() *OpenTableFormatInputInitParameters { + if in == nil { + return nil + } + out := new(OpenTableFormatInputInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenTableFormatInputObservation) DeepCopyInto(out *OpenTableFormatInputObservation) { + *out = *in + if in.IcebergInput != nil { + in, out := &in.IcebergInput, &out.IcebergInput + *out = new(IcebergInputObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenTableFormatInputObservation. +func (in *OpenTableFormatInputObservation) DeepCopy() *OpenTableFormatInputObservation { + if in == nil { + return nil + } + out := new(OpenTableFormatInputObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenTableFormatInputParameters) DeepCopyInto(out *OpenTableFormatInputParameters) { + *out = *in + if in.IcebergInput != nil { + in, out := &in.IcebergInput, &out.IcebergInput + *out = new(IcebergInputParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenTableFormatInputParameters. +func (in *OpenTableFormatInputParameters) DeepCopy() *OpenTableFormatInputParameters { + if in == nil { + return nil + } + out := new(OpenTableFormatInputParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionIndexInitParameters) DeepCopyInto(out *PartitionIndexInitParameters) { + *out = *in + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionIndexInitParameters. +func (in *PartitionIndexInitParameters) DeepCopy() *PartitionIndexInitParameters { + if in == nil { + return nil + } + out := new(PartitionIndexInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionIndexObservation) DeepCopyInto(out *PartitionIndexObservation) { + *out = *in + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.IndexStatus != nil { + in, out := &in.IndexStatus, &out.IndexStatus + *out = new(string) + **out = **in + } + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionIndexObservation. +func (in *PartitionIndexObservation) DeepCopy() *PartitionIndexObservation { + if in == nil { + return nil + } + out := new(PartitionIndexObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionIndexParameters) DeepCopyInto(out *PartitionIndexParameters) { + *out = *in + if in.IndexName != nil { + in, out := &in.IndexName, &out.IndexName + *out = new(string) + **out = **in + } + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionIndexParameters. +func (in *PartitionIndexParameters) DeepCopy() *PartitionIndexParameters { + if in == nil { + return nil + } + out := new(PartitionIndexParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionKeysInitParameters) DeepCopyInto(out *PartitionKeysInitParameters) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionKeysInitParameters. +func (in *PartitionKeysInitParameters) DeepCopy() *PartitionKeysInitParameters { + if in == nil { + return nil + } + out := new(PartitionKeysInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionKeysObservation) DeepCopyInto(out *PartitionKeysObservation) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionKeysObservation. +func (in *PartitionKeysObservation) DeepCopy() *PartitionKeysObservation { + if in == nil { + return nil + } + out := new(PartitionKeysObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionKeysParameters) DeepCopyInto(out *PartitionKeysParameters) { + *out = *in + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionKeysParameters. +func (in *PartitionKeysParameters) DeepCopy() *PartitionKeysParameters { + if in == nil { + return nil + } + out := new(PartitionKeysParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PhysicalConnectionRequirementsInitParameters) DeepCopyInto(out *PhysicalConnectionRequirementsInitParameters) { + *out = *in + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.AvailabilityZoneRef != nil { + in, out := &in.AvailabilityZoneRef, &out.AvailabilityZoneRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AvailabilityZoneSelector != nil { + in, out := &in.AvailabilityZoneSelector, &out.AvailabilityZoneSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIDList != nil { + in, out := &in.SecurityGroupIDList, &out.SecurityGroupIDList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhysicalConnectionRequirementsInitParameters. +func (in *PhysicalConnectionRequirementsInitParameters) DeepCopy() *PhysicalConnectionRequirementsInitParameters { + if in == nil { + return nil + } + out := new(PhysicalConnectionRequirementsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PhysicalConnectionRequirementsObservation) DeepCopyInto(out *PhysicalConnectionRequirementsObservation) { + *out = *in + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.SecurityGroupIDList != nil { + in, out := &in.SecurityGroupIDList, &out.SecurityGroupIDList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhysicalConnectionRequirementsObservation. +func (in *PhysicalConnectionRequirementsObservation) DeepCopy() *PhysicalConnectionRequirementsObservation { + if in == nil { + return nil + } + out := new(PhysicalConnectionRequirementsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PhysicalConnectionRequirementsParameters) DeepCopyInto(out *PhysicalConnectionRequirementsParameters) { + *out = *in + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.AvailabilityZoneRef != nil { + in, out := &in.AvailabilityZoneRef, &out.AvailabilityZoneRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AvailabilityZoneSelector != nil { + in, out := &in.AvailabilityZoneSelector, &out.AvailabilityZoneSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIDList != nil { + in, out := &in.SecurityGroupIDList, &out.SecurityGroupIDList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhysicalConnectionRequirementsParameters. +func (in *PhysicalConnectionRequirementsParameters) DeepCopy() *PhysicalConnectionRequirementsParameters { + if in == nil { + return nil + } + out := new(PhysicalConnectionRequirementsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredicateInitParameters) DeepCopyInto(out *PredicateInitParameters) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ConditionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logical != nil { + in, out := &in.Logical, &out.Logical + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredicateInitParameters. +func (in *PredicateInitParameters) DeepCopy() *PredicateInitParameters { + if in == nil { + return nil + } + out := new(PredicateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredicateObservation) DeepCopyInto(out *PredicateObservation) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ConditionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logical != nil { + in, out := &in.Logical, &out.Logical + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredicateObservation. +func (in *PredicateObservation) DeepCopy() *PredicateObservation { + if in == nil { + return nil + } + out := new(PredicateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredicateParameters) DeepCopyInto(out *PredicateParameters) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ConditionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logical != nil { + in, out := &in.Logical, &out.Logical + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredicateParameters. +func (in *PredicateParameters) DeepCopy() *PredicateParameters { + if in == nil { + return nil + } + out := new(PredicateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrincipalInitParameters) DeepCopyInto(out *PrincipalInitParameters) { + *out = *in + if in.DataLakePrincipalIdentifier != nil { + in, out := &in.DataLakePrincipalIdentifier, &out.DataLakePrincipalIdentifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrincipalInitParameters. +func (in *PrincipalInitParameters) DeepCopy() *PrincipalInitParameters { + if in == nil { + return nil + } + out := new(PrincipalInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrincipalObservation) DeepCopyInto(out *PrincipalObservation) { + *out = *in + if in.DataLakePrincipalIdentifier != nil { + in, out := &in.DataLakePrincipalIdentifier, &out.DataLakePrincipalIdentifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrincipalObservation. +func (in *PrincipalObservation) DeepCopy() *PrincipalObservation { + if in == nil { + return nil + } + out := new(PrincipalObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrincipalParameters) DeepCopyInto(out *PrincipalParameters) { + *out = *in + if in.DataLakePrincipalIdentifier != nil { + in, out := &in.DataLakePrincipalIdentifier, &out.DataLakePrincipalIdentifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrincipalParameters. +func (in *PrincipalParameters) DeepCopy() *PrincipalParameters { + if in == nil { + return nil + } + out := new(PrincipalParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecrawlPolicyInitParameters) DeepCopyInto(out *RecrawlPolicyInitParameters) { + *out = *in + if in.RecrawlBehavior != nil { + in, out := &in.RecrawlBehavior, &out.RecrawlBehavior + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecrawlPolicyInitParameters. +func (in *RecrawlPolicyInitParameters) DeepCopy() *RecrawlPolicyInitParameters { + if in == nil { + return nil + } + out := new(RecrawlPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecrawlPolicyObservation) DeepCopyInto(out *RecrawlPolicyObservation) { + *out = *in + if in.RecrawlBehavior != nil { + in, out := &in.RecrawlBehavior, &out.RecrawlBehavior + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecrawlPolicyObservation. +func (in *RecrawlPolicyObservation) DeepCopy() *RecrawlPolicyObservation { + if in == nil { + return nil + } + out := new(RecrawlPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecrawlPolicyParameters) DeepCopyInto(out *RecrawlPolicyParameters) { + *out = *in + if in.RecrawlBehavior != nil { + in, out := &in.RecrawlBehavior, &out.RecrawlBehavior + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecrawlPolicyParameters. +func (in *RecrawlPolicyParameters) DeepCopy() *RecrawlPolicyParameters { + if in == nil { + return nil + } + out := new(RecrawlPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3EncryptionInitParameters) DeepCopyInto(out *S3EncryptionInitParameters) { + *out = *in + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3EncryptionMode != nil { + in, out := &in.S3EncryptionMode, &out.S3EncryptionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3EncryptionInitParameters. +func (in *S3EncryptionInitParameters) DeepCopy() *S3EncryptionInitParameters { + if in == nil { + return nil + } + out := new(S3EncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3EncryptionObservation) DeepCopyInto(out *S3EncryptionObservation) { + *out = *in + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.S3EncryptionMode != nil { + in, out := &in.S3EncryptionMode, &out.S3EncryptionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3EncryptionObservation. +func (in *S3EncryptionObservation) DeepCopy() *S3EncryptionObservation { + if in == nil { + return nil + } + out := new(S3EncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3EncryptionParameters) DeepCopyInto(out *S3EncryptionParameters) { + *out = *in + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3EncryptionMode != nil { + in, out := &in.S3EncryptionMode, &out.S3EncryptionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3EncryptionParameters. +func (in *S3EncryptionParameters) DeepCopy() *S3EncryptionParameters { + if in == nil { + return nil + } + out := new(S3EncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3TargetInitParameters) DeepCopyInto(out *S3TargetInitParameters) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.DlqEventQueueArn != nil { + in, out := &in.DlqEventQueueArn, &out.DlqEventQueueArn + *out = new(string) + **out = **in + } + if in.EventQueueArn != nil { + in, out := &in.EventQueueArn, &out.EventQueueArn + *out = new(string) + **out = **in + } + if in.Exclusions != nil { + in, out := &in.Exclusions, &out.Exclusions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.SampleSize != nil { + in, out := &in.SampleSize, &out.SampleSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3TargetInitParameters. +func (in *S3TargetInitParameters) DeepCopy() *S3TargetInitParameters { + if in == nil { + return nil + } + out := new(S3TargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3TargetObservation) DeepCopyInto(out *S3TargetObservation) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.DlqEventQueueArn != nil { + in, out := &in.DlqEventQueueArn, &out.DlqEventQueueArn + *out = new(string) + **out = **in + } + if in.EventQueueArn != nil { + in, out := &in.EventQueueArn, &out.EventQueueArn + *out = new(string) + **out = **in + } + if in.Exclusions != nil { + in, out := &in.Exclusions, &out.Exclusions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.SampleSize != nil { + in, out := &in.SampleSize, &out.SampleSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3TargetObservation. +func (in *S3TargetObservation) DeepCopy() *S3TargetObservation { + if in == nil { + return nil + } + out := new(S3TargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3TargetParameters) DeepCopyInto(out *S3TargetParameters) { + *out = *in + if in.ConnectionName != nil { + in, out := &in.ConnectionName, &out.ConnectionName + *out = new(string) + **out = **in + } + if in.DlqEventQueueArn != nil { + in, out := &in.DlqEventQueueArn, &out.DlqEventQueueArn + *out = new(string) + **out = **in + } + if in.EventQueueArn != nil { + in, out := &in.EventQueueArn, &out.EventQueueArn + *out = new(string) + **out = **in + } + if in.Exclusions != nil { + in, out := &in.Exclusions, &out.Exclusions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.SampleSize != nil { + in, out := &in.SampleSize, &out.SampleSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3TargetParameters. +func (in *S3TargetParameters) DeepCopy() *S3TargetParameters { + if in == nil { + return nil + } + out := new(S3TargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaChangePolicyInitParameters) DeepCopyInto(out *SchemaChangePolicyInitParameters) { + *out = *in + if in.DeleteBehavior != nil { + in, out := &in.DeleteBehavior, &out.DeleteBehavior + *out = new(string) + **out = **in + } + if in.UpdateBehavior != nil { + in, out := &in.UpdateBehavior, &out.UpdateBehavior + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaChangePolicyInitParameters. +func (in *SchemaChangePolicyInitParameters) DeepCopy() *SchemaChangePolicyInitParameters { + if in == nil { + return nil + } + out := new(SchemaChangePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaChangePolicyObservation) DeepCopyInto(out *SchemaChangePolicyObservation) { + *out = *in + if in.DeleteBehavior != nil { + in, out := &in.DeleteBehavior, &out.DeleteBehavior + *out = new(string) + **out = **in + } + if in.UpdateBehavior != nil { + in, out := &in.UpdateBehavior, &out.UpdateBehavior + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaChangePolicyObservation. +func (in *SchemaChangePolicyObservation) DeepCopy() *SchemaChangePolicyObservation { + if in == nil { + return nil + } + out := new(SchemaChangePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaChangePolicyParameters) DeepCopyInto(out *SchemaChangePolicyParameters) { + *out = *in + if in.DeleteBehavior != nil { + in, out := &in.DeleteBehavior, &out.DeleteBehavior + *out = new(string) + **out = **in + } + if in.UpdateBehavior != nil { + in, out := &in.UpdateBehavior, &out.UpdateBehavior + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaChangePolicyParameters. +func (in *SchemaChangePolicyParameters) DeepCopy() *SchemaChangePolicyParameters { + if in == nil { + return nil + } + out := new(SchemaChangePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaIDInitParameters) DeepCopyInto(out *SchemaIDInitParameters) { + *out = *in + if in.RegistryName != nil { + in, out := &in.RegistryName, &out.RegistryName + *out = new(string) + **out = **in + } + if in.SchemaArn != nil { + in, out := &in.SchemaArn, &out.SchemaArn + *out = new(string) + **out = **in + } + if in.SchemaName != nil { + in, out := &in.SchemaName, &out.SchemaName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaIDInitParameters. +func (in *SchemaIDInitParameters) DeepCopy() *SchemaIDInitParameters { + if in == nil { + return nil + } + out := new(SchemaIDInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaIDObservation) DeepCopyInto(out *SchemaIDObservation) { + *out = *in + if in.RegistryName != nil { + in, out := &in.RegistryName, &out.RegistryName + *out = new(string) + **out = **in + } + if in.SchemaArn != nil { + in, out := &in.SchemaArn, &out.SchemaArn + *out = new(string) + **out = **in + } + if in.SchemaName != nil { + in, out := &in.SchemaName, &out.SchemaName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaIDObservation. +func (in *SchemaIDObservation) DeepCopy() *SchemaIDObservation { + if in == nil { + return nil + } + out := new(SchemaIDObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaIDParameters) DeepCopyInto(out *SchemaIDParameters) { + *out = *in + if in.RegistryName != nil { + in, out := &in.RegistryName, &out.RegistryName + *out = new(string) + **out = **in + } + if in.SchemaArn != nil { + in, out := &in.SchemaArn, &out.SchemaArn + *out = new(string) + **out = **in + } + if in.SchemaName != nil { + in, out := &in.SchemaName, &out.SchemaName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaIDParameters. +func (in *SchemaIDParameters) DeepCopy() *SchemaIDParameters { + if in == nil { + return nil + } + out := new(SchemaIDParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaReferenceInitParameters) DeepCopyInto(out *SchemaReferenceInitParameters) { + *out = *in + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(SchemaIDInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SchemaVersionID != nil { + in, out := &in.SchemaVersionID, &out.SchemaVersionID + *out = new(string) + **out = **in + } + if in.SchemaVersionNumber != nil { + in, out := &in.SchemaVersionNumber, &out.SchemaVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaReferenceInitParameters. +func (in *SchemaReferenceInitParameters) DeepCopy() *SchemaReferenceInitParameters { + if in == nil { + return nil + } + out := new(SchemaReferenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaReferenceObservation) DeepCopyInto(out *SchemaReferenceObservation) { + *out = *in + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(SchemaIDObservation) + (*in).DeepCopyInto(*out) + } + if in.SchemaVersionID != nil { + in, out := &in.SchemaVersionID, &out.SchemaVersionID + *out = new(string) + **out = **in + } + if in.SchemaVersionNumber != nil { + in, out := &in.SchemaVersionNumber, &out.SchemaVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaReferenceObservation. +func (in *SchemaReferenceObservation) DeepCopy() *SchemaReferenceObservation { + if in == nil { + return nil + } + out := new(SchemaReferenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaReferenceParameters) DeepCopyInto(out *SchemaReferenceParameters) { + *out = *in + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(SchemaIDParameters) + (*in).DeepCopyInto(*out) + } + if in.SchemaVersionID != nil { + in, out := &in.SchemaVersionID, &out.SchemaVersionID + *out = new(string) + **out = **in + } + if in.SchemaVersionNumber != nil { + in, out := &in.SchemaVersionNumber, &out.SchemaVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaReferenceParameters. +func (in *SchemaReferenceParameters) DeepCopy() *SchemaReferenceParameters { + if in == nil { + return nil + } + out := new(SchemaReferenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfiguration) DeepCopyInto(out *SecurityConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfiguration. +func (in *SecurityConfiguration) DeepCopy() *SecurityConfiguration { + if in == nil { + return nil + } + out := new(SecurityConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecurityConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigurationInitParameters) DeepCopyInto(out *SecurityConfigurationInitParameters) { + *out = *in + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigurationInitParameters. +func (in *SecurityConfigurationInitParameters) DeepCopy() *SecurityConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SecurityConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigurationList) DeepCopyInto(out *SecurityConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SecurityConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigurationList. +func (in *SecurityConfigurationList) DeepCopy() *SecurityConfigurationList { + if in == nil { + return nil + } + out := new(SecurityConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecurityConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigurationObservation) DeepCopyInto(out *SecurityConfigurationObservation) { + *out = *in + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigurationObservation. +func (in *SecurityConfigurationObservation) DeepCopy() *SecurityConfigurationObservation { + if in == nil { + return nil + } + out := new(SecurityConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigurationParameters) DeepCopyInto(out *SecurityConfigurationParameters) { + *out = *in + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigurationParameters. +func (in *SecurityConfigurationParameters) DeepCopy() *SecurityConfigurationParameters { + if in == nil { + return nil + } + out := new(SecurityConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigurationSpec) DeepCopyInto(out *SecurityConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigurationSpec. +func (in *SecurityConfigurationSpec) DeepCopy() *SecurityConfigurationSpec { + if in == nil { + return nil + } + out := new(SecurityConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigurationStatus) DeepCopyInto(out *SecurityConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigurationStatus. +func (in *SecurityConfigurationStatus) DeepCopy() *SecurityConfigurationStatus { + if in == nil { + return nil + } + out := new(SecurityConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerDeInfoInitParameters) DeepCopyInto(out *SerDeInfoInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SerializationLibrary != nil { + in, out := &in.SerializationLibrary, &out.SerializationLibrary + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerDeInfoInitParameters. +func (in *SerDeInfoInitParameters) DeepCopy() *SerDeInfoInitParameters { + if in == nil { + return nil + } + out := new(SerDeInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerDeInfoObservation) DeepCopyInto(out *SerDeInfoObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SerializationLibrary != nil { + in, out := &in.SerializationLibrary, &out.SerializationLibrary + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerDeInfoObservation. +func (in *SerDeInfoObservation) DeepCopy() *SerDeInfoObservation { + if in == nil { + return nil + } + out := new(SerDeInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerDeInfoParameters) DeepCopyInto(out *SerDeInfoParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SerializationLibrary != nil { + in, out := &in.SerializationLibrary, &out.SerializationLibrary + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerDeInfoParameters. +func (in *SerDeInfoParameters) DeepCopy() *SerDeInfoParameters { + if in == nil { + return nil + } + out := new(SerDeInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkewedInfoInitParameters) DeepCopyInto(out *SkewedInfoInitParameters) { + *out = *in + if in.SkewedColumnNames != nil { + in, out := &in.SkewedColumnNames, &out.SkewedColumnNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SkewedColumnValueLocationMaps != nil { + in, out := &in.SkewedColumnValueLocationMaps, &out.SkewedColumnValueLocationMaps + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SkewedColumnValues != nil { + in, out := &in.SkewedColumnValues, &out.SkewedColumnValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkewedInfoInitParameters. +func (in *SkewedInfoInitParameters) DeepCopy() *SkewedInfoInitParameters { + if in == nil { + return nil + } + out := new(SkewedInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkewedInfoObservation) DeepCopyInto(out *SkewedInfoObservation) { + *out = *in + if in.SkewedColumnNames != nil { + in, out := &in.SkewedColumnNames, &out.SkewedColumnNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SkewedColumnValueLocationMaps != nil { + in, out := &in.SkewedColumnValueLocationMaps, &out.SkewedColumnValueLocationMaps + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SkewedColumnValues != nil { + in, out := &in.SkewedColumnValues, &out.SkewedColumnValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkewedInfoObservation. +func (in *SkewedInfoObservation) DeepCopy() *SkewedInfoObservation { + if in == nil { + return nil + } + out := new(SkewedInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkewedInfoParameters) DeepCopyInto(out *SkewedInfoParameters) { + *out = *in + if in.SkewedColumnNames != nil { + in, out := &in.SkewedColumnNames, &out.SkewedColumnNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SkewedColumnValueLocationMaps != nil { + in, out := &in.SkewedColumnValueLocationMaps, &out.SkewedColumnValueLocationMaps + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SkewedColumnValues != nil { + in, out := &in.SkewedColumnValues, &out.SkewedColumnValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkewedInfoParameters. +func (in *SkewedInfoParameters) DeepCopy() *SkewedInfoParameters { + if in == nil { + return nil + } + out := new(SkewedInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SortColumnsInitParameters) DeepCopyInto(out *SortColumnsInitParameters) { + *out = *in + if in.Column != nil { + in, out := &in.Column, &out.Column + *out = new(string) + **out = **in + } + if in.SortOrder != nil { + in, out := &in.SortOrder, &out.SortOrder + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SortColumnsInitParameters. +func (in *SortColumnsInitParameters) DeepCopy() *SortColumnsInitParameters { + if in == nil { + return nil + } + out := new(SortColumnsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SortColumnsObservation) DeepCopyInto(out *SortColumnsObservation) { + *out = *in + if in.Column != nil { + in, out := &in.Column, &out.Column + *out = new(string) + **out = **in + } + if in.SortOrder != nil { + in, out := &in.SortOrder, &out.SortOrder + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SortColumnsObservation. +func (in *SortColumnsObservation) DeepCopy() *SortColumnsObservation { + if in == nil { + return nil + } + out := new(SortColumnsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SortColumnsParameters) DeepCopyInto(out *SortColumnsParameters) { + *out = *in + if in.Column != nil { + in, out := &in.Column, &out.Column + *out = new(string) + **out = **in + } + if in.SortOrder != nil { + in, out := &in.SortOrder, &out.SortOrder + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SortColumnsParameters. +func (in *SortColumnsParameters) DeepCopy() *SortColumnsParameters { + if in == nil { + return nil + } + out := new(SortColumnsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageDescriptorInitParameters) DeepCopyInto(out *StorageDescriptorInitParameters) { + *out = *in + if in.BucketColumns != nil { + in, out := &in.BucketColumns, &out.BucketColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]ColumnsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Compressed != nil { + in, out := &in.Compressed, &out.Compressed + *out = new(bool) + **out = **in + } + if in.InputFormat != nil { + in, out := &in.InputFormat, &out.InputFormat + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NumberOfBuckets != nil { + in, out := &in.NumberOfBuckets, &out.NumberOfBuckets + *out = new(float64) + **out = **in + } + if in.OutputFormat != nil { + in, out := &in.OutputFormat, &out.OutputFormat + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SchemaReference != nil { + in, out := &in.SchemaReference, &out.SchemaReference + *out = new(SchemaReferenceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SerDeInfo != nil { + in, out := &in.SerDeInfo, &out.SerDeInfo + *out = new(SerDeInfoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SkewedInfo != nil { + in, out := &in.SkewedInfo, &out.SkewedInfo + *out = new(SkewedInfoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SortColumns != nil { + in, out := &in.SortColumns, &out.SortColumns + *out = make([]SortColumnsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StoredAsSubDirectories != nil { + in, out := &in.StoredAsSubDirectories, &out.StoredAsSubDirectories + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageDescriptorInitParameters. +func (in *StorageDescriptorInitParameters) DeepCopy() *StorageDescriptorInitParameters { + if in == nil { + return nil + } + out := new(StorageDescriptorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageDescriptorObservation) DeepCopyInto(out *StorageDescriptorObservation) { + *out = *in + if in.BucketColumns != nil { + in, out := &in.BucketColumns, &out.BucketColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]ColumnsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Compressed != nil { + in, out := &in.Compressed, &out.Compressed + *out = new(bool) + **out = **in + } + if in.InputFormat != nil { + in, out := &in.InputFormat, &out.InputFormat + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NumberOfBuckets != nil { + in, out := &in.NumberOfBuckets, &out.NumberOfBuckets + *out = new(float64) + **out = **in + } + if in.OutputFormat != nil { + in, out := &in.OutputFormat, &out.OutputFormat + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SchemaReference != nil { + in, out := &in.SchemaReference, &out.SchemaReference + *out = new(SchemaReferenceObservation) + (*in).DeepCopyInto(*out) + } + if in.SerDeInfo != nil { + in, out := &in.SerDeInfo, &out.SerDeInfo + *out = new(SerDeInfoObservation) + (*in).DeepCopyInto(*out) + } + if in.SkewedInfo != nil { + in, out := &in.SkewedInfo, &out.SkewedInfo + *out = new(SkewedInfoObservation) + (*in).DeepCopyInto(*out) + } + if in.SortColumns != nil { + in, out := &in.SortColumns, &out.SortColumns + *out = make([]SortColumnsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StoredAsSubDirectories != nil { + in, out := &in.StoredAsSubDirectories, &out.StoredAsSubDirectories + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageDescriptorObservation. +func (in *StorageDescriptorObservation) DeepCopy() *StorageDescriptorObservation { + if in == nil { + return nil + } + out := new(StorageDescriptorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageDescriptorParameters) DeepCopyInto(out *StorageDescriptorParameters) { + *out = *in + if in.BucketColumns != nil { + in, out := &in.BucketColumns, &out.BucketColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]ColumnsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Compressed != nil { + in, out := &in.Compressed, &out.Compressed + *out = new(bool) + **out = **in + } + if in.InputFormat != nil { + in, out := &in.InputFormat, &out.InputFormat + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NumberOfBuckets != nil { + in, out := &in.NumberOfBuckets, &out.NumberOfBuckets + *out = new(float64) + **out = **in + } + if in.OutputFormat != nil { + in, out := &in.OutputFormat, &out.OutputFormat + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SchemaReference != nil { + in, out := &in.SchemaReference, &out.SchemaReference + *out = new(SchemaReferenceParameters) + (*in).DeepCopyInto(*out) + } + if in.SerDeInfo != nil { + in, out := &in.SerDeInfo, &out.SerDeInfo + *out = new(SerDeInfoParameters) + (*in).DeepCopyInto(*out) + } + if in.SkewedInfo != nil { + in, out := &in.SkewedInfo, &out.SkewedInfo + *out = new(SkewedInfoParameters) + (*in).DeepCopyInto(*out) + } + if in.SortColumns != nil { + in, out := &in.SortColumns, &out.SortColumns + *out = make([]SortColumnsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StoredAsSubDirectories != nil { + in, out := &in.StoredAsSubDirectories, &out.StoredAsSubDirectories + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageDescriptorParameters. +func (in *StorageDescriptorParameters) DeepCopy() *StorageDescriptorParameters { + if in == nil { + return nil + } + out := new(StorageDescriptorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetDatabaseInitParameters) DeepCopyInto(out *TargetDatabaseInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetDatabaseInitParameters. +func (in *TargetDatabaseInitParameters) DeepCopy() *TargetDatabaseInitParameters { + if in == nil { + return nil + } + out := new(TargetDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetDatabaseObservation) DeepCopyInto(out *TargetDatabaseObservation) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetDatabaseObservation. +func (in *TargetDatabaseObservation) DeepCopy() *TargetDatabaseObservation { + if in == nil { + return nil + } + out := new(TargetDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetDatabaseParameters) DeepCopyInto(out *TargetDatabaseParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetDatabaseParameters. +func (in *TargetDatabaseParameters) DeepCopy() *TargetDatabaseParameters { + if in == nil { + return nil + } + out := new(TargetDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetTableInitParameters) DeepCopyInto(out *TargetTableInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetTableInitParameters. +func (in *TargetTableInitParameters) DeepCopy() *TargetTableInitParameters { + if in == nil { + return nil + } + out := new(TargetTableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetTableObservation) DeepCopyInto(out *TargetTableObservation) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetTableObservation. +func (in *TargetTableObservation) DeepCopy() *TargetTableObservation { + if in == nil { + return nil + } + out := new(TargetTableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetTableParameters) DeepCopyInto(out *TargetTableParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetTableParameters. +func (in *TargetTableParameters) DeepCopy() *TargetTableParameters { + if in == nil { + return nil + } + out := new(TargetTableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Trigger) DeepCopyInto(out *Trigger) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Trigger. +func (in *Trigger) DeepCopy() *Trigger { + if in == nil { + return nil + } + out := new(Trigger) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Trigger) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerInitParameters) DeepCopyInto(out *TriggerInitParameters) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]ActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EventBatchingCondition != nil { + in, out := &in.EventBatchingCondition, &out.EventBatchingCondition + *out = make([]EventBatchingConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Predicate != nil { + in, out := &in.Predicate, &out.Predicate + *out = new(PredicateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.StartOnCreation != nil { + in, out := &in.StartOnCreation, &out.StartOnCreation + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.WorkflowName != nil { + in, out := &in.WorkflowName, &out.WorkflowName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerInitParameters. +func (in *TriggerInitParameters) DeepCopy() *TriggerInitParameters { + if in == nil { + return nil + } + out := new(TriggerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerList) DeepCopyInto(out *TriggerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Trigger, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerList. +func (in *TriggerList) DeepCopy() *TriggerList { + if in == nil { + return nil + } + out := new(TriggerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TriggerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerObservation) DeepCopyInto(out *TriggerObservation) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]ActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EventBatchingCondition != nil { + in, out := &in.EventBatchingCondition, &out.EventBatchingCondition + *out = make([]EventBatchingConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Predicate != nil { + in, out := &in.Predicate, &out.Predicate + *out = new(PredicateObservation) + (*in).DeepCopyInto(*out) + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.StartOnCreation != nil { + in, out := &in.StartOnCreation, &out.StartOnCreation + *out = new(bool) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.WorkflowName != nil { + in, out := &in.WorkflowName, &out.WorkflowName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerObservation. +func (in *TriggerObservation) DeepCopy() *TriggerObservation { + if in == nil { + return nil + } + out := new(TriggerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerParameters) DeepCopyInto(out *TriggerParameters) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]ActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EventBatchingCondition != nil { + in, out := &in.EventBatchingCondition, &out.EventBatchingCondition + *out = make([]EventBatchingConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Predicate != nil { + in, out := &in.Predicate, &out.Predicate + *out = new(PredicateParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.StartOnCreation != nil { + in, out := &in.StartOnCreation, &out.StartOnCreation + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.WorkflowName != nil { + in, out := &in.WorkflowName, &out.WorkflowName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerParameters. +func (in *TriggerParameters) DeepCopy() *TriggerParameters { + if in == nil { + return nil + } + out := new(TriggerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerSpec) DeepCopyInto(out *TriggerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerSpec. +func (in *TriggerSpec) DeepCopy() *TriggerSpec { + if in == nil { + return nil + } + out := new(TriggerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerStatus) DeepCopyInto(out *TriggerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerStatus. +func (in *TriggerStatus) DeepCopy() *TriggerStatus { + if in == nil { + return nil + } + out := new(TriggerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XMLClassifierInitParameters) DeepCopyInto(out *XMLClassifierInitParameters) { + *out = *in + if in.Classification != nil { + in, out := &in.Classification, &out.Classification + *out = new(string) + **out = **in + } + if in.RowTag != nil { + in, out := &in.RowTag, &out.RowTag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XMLClassifierInitParameters. +func (in *XMLClassifierInitParameters) DeepCopy() *XMLClassifierInitParameters { + if in == nil { + return nil + } + out := new(XMLClassifierInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XMLClassifierObservation) DeepCopyInto(out *XMLClassifierObservation) { + *out = *in + if in.Classification != nil { + in, out := &in.Classification, &out.Classification + *out = new(string) + **out = **in + } + if in.RowTag != nil { + in, out := &in.RowTag, &out.RowTag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XMLClassifierObservation. +func (in *XMLClassifierObservation) DeepCopy() *XMLClassifierObservation { + if in == nil { + return nil + } + out := new(XMLClassifierObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XMLClassifierParameters) DeepCopyInto(out *XMLClassifierParameters) { + *out = *in + if in.Classification != nil { + in, out := &in.Classification, &out.Classification + *out = new(string) + **out = **in + } + if in.RowTag != nil { + in, out := &in.RowTag, &out.RowTag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XMLClassifierParameters. +func (in *XMLClassifierParameters) DeepCopy() *XMLClassifierParameters { + if in == nil { + return nil + } + out := new(XMLClassifierParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/glue/v1beta2/zz_generated.managed.go b/apis/glue/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..22e37bd651 --- /dev/null +++ b/apis/glue/v1beta2/zz_generated.managed.go @@ -0,0 +1,548 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this CatalogDatabase. +func (mg *CatalogDatabase) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CatalogDatabase. +func (mg *CatalogDatabase) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CatalogDatabase. +func (mg *CatalogDatabase) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CatalogDatabase. +func (mg *CatalogDatabase) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CatalogDatabase. +func (mg *CatalogDatabase) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CatalogDatabase. +func (mg *CatalogDatabase) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CatalogDatabase. +func (mg *CatalogDatabase) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CatalogDatabase. +func (mg *CatalogDatabase) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CatalogDatabase. +func (mg *CatalogDatabase) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CatalogDatabase. +func (mg *CatalogDatabase) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CatalogDatabase. +func (mg *CatalogDatabase) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CatalogDatabase. +func (mg *CatalogDatabase) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this CatalogTable. +func (mg *CatalogTable) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CatalogTable. +func (mg *CatalogTable) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CatalogTable. +func (mg *CatalogTable) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CatalogTable. +func (mg *CatalogTable) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CatalogTable. +func (mg *CatalogTable) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CatalogTable. +func (mg *CatalogTable) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CatalogTable. +func (mg *CatalogTable) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CatalogTable. +func (mg *CatalogTable) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CatalogTable. +func (mg *CatalogTable) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CatalogTable. +func (mg *CatalogTable) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CatalogTable. +func (mg *CatalogTable) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CatalogTable. +func (mg *CatalogTable) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Classifier. +func (mg *Classifier) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Classifier. +func (mg *Classifier) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Classifier. +func (mg *Classifier) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Classifier. +func (mg *Classifier) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Classifier. +func (mg *Classifier) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Classifier. +func (mg *Classifier) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Classifier. +func (mg *Classifier) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Classifier. +func (mg *Classifier) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Classifier. +func (mg *Classifier) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Classifier. +func (mg *Classifier) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Classifier. +func (mg *Classifier) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Classifier. +func (mg *Classifier) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Connection. +func (mg *Connection) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Connection. +func (mg *Connection) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Connection. +func (mg *Connection) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Connection. +func (mg *Connection) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Connection. +func (mg *Connection) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Connection. +func (mg *Connection) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Connection. +func (mg *Connection) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Connection. +func (mg *Connection) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Connection. +func (mg *Connection) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Connection. +func (mg *Connection) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Connection. +func (mg *Connection) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Connection. +func (mg *Connection) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Crawler. +func (mg *Crawler) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Crawler. +func (mg *Crawler) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Crawler. +func (mg *Crawler) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Crawler. +func (mg *Crawler) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Crawler. +func (mg *Crawler) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Crawler. +func (mg *Crawler) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Crawler. +func (mg *Crawler) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Crawler. +func (mg *Crawler) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Crawler. +func (mg *Crawler) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Crawler. +func (mg *Crawler) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Crawler. +func (mg *Crawler) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Crawler. +func (mg *Crawler) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DataCatalogEncryptionSettings. +func (mg *DataCatalogEncryptionSettings) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DataCatalogEncryptionSettings. +func (mg *DataCatalogEncryptionSettings) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DataCatalogEncryptionSettings. +func (mg *DataCatalogEncryptionSettings) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DataCatalogEncryptionSettings. +func (mg *DataCatalogEncryptionSettings) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DataCatalogEncryptionSettings. +func (mg *DataCatalogEncryptionSettings) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DataCatalogEncryptionSettings. +func (mg *DataCatalogEncryptionSettings) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DataCatalogEncryptionSettings. +func (mg *DataCatalogEncryptionSettings) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DataCatalogEncryptionSettings. +func (mg *DataCatalogEncryptionSettings) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DataCatalogEncryptionSettings. +func (mg *DataCatalogEncryptionSettings) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DataCatalogEncryptionSettings. +func (mg *DataCatalogEncryptionSettings) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DataCatalogEncryptionSettings. +func (mg *DataCatalogEncryptionSettings) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DataCatalogEncryptionSettings. +func (mg *DataCatalogEncryptionSettings) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Job. +func (mg *Job) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Job. +func (mg *Job) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Job. +func (mg *Job) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Job. +func (mg *Job) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Job. +func (mg *Job) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Job. +func (mg *Job) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Job. +func (mg *Job) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Job. +func (mg *Job) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Job. +func (mg *Job) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Job. +func (mg *Job) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Job. +func (mg *Job) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Job. +func (mg *Job) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SecurityConfiguration. +func (mg *SecurityConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SecurityConfiguration. +func (mg *SecurityConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SecurityConfiguration. +func (mg *SecurityConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SecurityConfiguration. +func (mg *SecurityConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SecurityConfiguration. +func (mg *SecurityConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SecurityConfiguration. +func (mg *SecurityConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SecurityConfiguration. +func (mg *SecurityConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SecurityConfiguration. +func (mg *SecurityConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SecurityConfiguration. +func (mg *SecurityConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SecurityConfiguration. +func (mg *SecurityConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SecurityConfiguration. +func (mg *SecurityConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SecurityConfiguration. +func (mg *SecurityConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Trigger. +func (mg *Trigger) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Trigger. +func (mg *Trigger) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Trigger. +func (mg *Trigger) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Trigger. +func (mg *Trigger) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Trigger. +func (mg *Trigger) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Trigger. +func (mg *Trigger) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Trigger. +func (mg *Trigger) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Trigger. +func (mg *Trigger) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Trigger. +func (mg *Trigger) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Trigger. +func (mg *Trigger) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Trigger. +func (mg *Trigger) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Trigger. +func (mg *Trigger) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/glue/v1beta2/zz_generated.managedlist.go b/apis/glue/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..84a70ee1b6 --- /dev/null +++ b/apis/glue/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,89 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this CatalogDatabaseList. +func (l *CatalogDatabaseList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this CatalogTableList. +func (l *CatalogTableList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ClassifierList. +func (l *ClassifierList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ConnectionList. +func (l *ConnectionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this CrawlerList. +func (l *CrawlerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DataCatalogEncryptionSettingsList. +func (l *DataCatalogEncryptionSettingsList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this JobList. +func (l *JobList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SecurityConfigurationList. +func (l *SecurityConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TriggerList. +func (l *TriggerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/glue/v1beta2/zz_generated.resolvers.go b/apis/glue/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..f575078228 --- /dev/null +++ b/apis/glue/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,856 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *CatalogTable) ResolveReferences( // ResolveReferences of this CatalogTable. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "CatalogDatabase", "CatalogDatabaseList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DatabaseName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DatabaseNameRef, + Selector: mg.Spec.ForProvider.DatabaseNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DatabaseName") + } + mg.Spec.ForProvider.DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DatabaseNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Connection. +func (mg *Connection) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.PhysicalConnectionRequirements != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PhysicalConnectionRequirements.AvailabilityZone), + Extract: resource.ExtractParamPath("availability_zone", false), + Reference: mg.Spec.ForProvider.PhysicalConnectionRequirements.AvailabilityZoneRef, + Selector: mg.Spec.ForProvider.PhysicalConnectionRequirements.AvailabilityZoneSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PhysicalConnectionRequirements.AvailabilityZone") + } + mg.Spec.ForProvider.PhysicalConnectionRequirements.AvailabilityZone = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PhysicalConnectionRequirements.AvailabilityZoneRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.PhysicalConnectionRequirements != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PhysicalConnectionRequirements.SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PhysicalConnectionRequirements.SubnetIDRef, + Selector: mg.Spec.ForProvider.PhysicalConnectionRequirements.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PhysicalConnectionRequirements.SubnetID") + } + mg.Spec.ForProvider.PhysicalConnectionRequirements.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PhysicalConnectionRequirements.SubnetIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.PhysicalConnectionRequirements != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PhysicalConnectionRequirements.AvailabilityZone), + Extract: resource.ExtractParamPath("availability_zone", false), + Reference: mg.Spec.InitProvider.PhysicalConnectionRequirements.AvailabilityZoneRef, + Selector: mg.Spec.InitProvider.PhysicalConnectionRequirements.AvailabilityZoneSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PhysicalConnectionRequirements.AvailabilityZone") + } + mg.Spec.InitProvider.PhysicalConnectionRequirements.AvailabilityZone = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PhysicalConnectionRequirements.AvailabilityZoneRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.PhysicalConnectionRequirements != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PhysicalConnectionRequirements.SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PhysicalConnectionRequirements.SubnetIDRef, + Selector: mg.Spec.InitProvider.PhysicalConnectionRequirements.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PhysicalConnectionRequirements.SubnetID") + } + mg.Spec.InitProvider.PhysicalConnectionRequirements.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PhysicalConnectionRequirements.SubnetIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this Crawler. +func (mg *Crawler) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.CatalogTarget); i3++ { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "CatalogDatabase", "CatalogDatabaseList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CatalogTarget[i3].DatabaseName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.CatalogTarget[i3].DatabaseNameRef, + Selector: mg.Spec.ForProvider.CatalogTarget[i3].DatabaseNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CatalogTarget[i3].DatabaseName") + } + mg.Spec.ForProvider.CatalogTarget[i3].DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CatalogTarget[i3].DatabaseNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "CatalogDatabase", "CatalogDatabaseList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DatabaseName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DatabaseNameRef, + Selector: mg.Spec.ForProvider.DatabaseNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DatabaseName") + } + mg.Spec.ForProvider.DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DatabaseNameRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.JdbcTarget); i3++ { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "Connection", "ConnectionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.JdbcTarget[i3].ConnectionName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.JdbcTarget[i3].ConnectionNameRef, + Selector: mg.Spec.ForProvider.JdbcTarget[i3].ConnectionNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.JdbcTarget[i3].ConnectionName") + } + mg.Spec.ForProvider.JdbcTarget[i3].ConnectionName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.JdbcTarget[i3].ConnectionNameRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.MongodbTarget); i3++ { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "Connection", "ConnectionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MongodbTarget[i3].ConnectionName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.MongodbTarget[i3].ConnectionNameRef, + Selector: mg.Spec.ForProvider.MongodbTarget[i3].ConnectionNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MongodbTarget[i3].ConnectionName") + } + mg.Spec.ForProvider.MongodbTarget[i3].ConnectionName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MongodbTarget[i3].ConnectionNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Role), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleRef, + Selector: mg.Spec.ForProvider.RoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Role") + } + mg.Spec.ForProvider.Role = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.CatalogTarget); i3++ { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "CatalogDatabase", "CatalogDatabaseList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CatalogTarget[i3].DatabaseName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.CatalogTarget[i3].DatabaseNameRef, + Selector: mg.Spec.InitProvider.CatalogTarget[i3].DatabaseNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CatalogTarget[i3].DatabaseName") + } + mg.Spec.InitProvider.CatalogTarget[i3].DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CatalogTarget[i3].DatabaseNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "CatalogDatabase", "CatalogDatabaseList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DatabaseName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DatabaseNameRef, + Selector: mg.Spec.InitProvider.DatabaseNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DatabaseName") + } + mg.Spec.InitProvider.DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DatabaseNameRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.JdbcTarget); i3++ { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "Connection", "ConnectionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.JdbcTarget[i3].ConnectionName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.JdbcTarget[i3].ConnectionNameRef, + Selector: mg.Spec.InitProvider.JdbcTarget[i3].ConnectionNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.JdbcTarget[i3].ConnectionName") + } + mg.Spec.InitProvider.JdbcTarget[i3].ConnectionName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.JdbcTarget[i3].ConnectionNameRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.MongodbTarget); i3++ { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "Connection", "ConnectionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.MongodbTarget[i3].ConnectionName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.MongodbTarget[i3].ConnectionNameRef, + Selector: mg.Spec.InitProvider.MongodbTarget[i3].ConnectionNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.MongodbTarget[i3].ConnectionName") + } + mg.Spec.InitProvider.MongodbTarget[i3].ConnectionName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.MongodbTarget[i3].ConnectionNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Role), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleRef, + Selector: mg.Spec.InitProvider.RoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Role") + } + mg.Spec.InitProvider.Role = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this DataCatalogEncryptionSettings. +func (mg *DataCatalogEncryptionSettings) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.DataCatalogEncryptionSettings != nil { + if mg.Spec.ForProvider.DataCatalogEncryptionSettings.ConnectionPasswordEncryption != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataCatalogEncryptionSettings.ConnectionPasswordEncryption.AwsKMSKeyID), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.DataCatalogEncryptionSettings.ConnectionPasswordEncryption.AwsKMSKeyIDRef, + Selector: mg.Spec.ForProvider.DataCatalogEncryptionSettings.ConnectionPasswordEncryption.AwsKMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataCatalogEncryptionSettings.ConnectionPasswordEncryption.AwsKMSKeyID") + } + mg.Spec.ForProvider.DataCatalogEncryptionSettings.ConnectionPasswordEncryption.AwsKMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataCatalogEncryptionSettings.ConnectionPasswordEncryption.AwsKMSKeyIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.DataCatalogEncryptionSettings != nil { + if mg.Spec.ForProvider.DataCatalogEncryptionSettings.EncryptionAtRest != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataCatalogEncryptionSettings.EncryptionAtRest.SseAwsKMSKeyID), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.DataCatalogEncryptionSettings.EncryptionAtRest.SseAwsKMSKeyIDRef, + Selector: mg.Spec.ForProvider.DataCatalogEncryptionSettings.EncryptionAtRest.SseAwsKMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataCatalogEncryptionSettings.EncryptionAtRest.SseAwsKMSKeyID") + } + mg.Spec.ForProvider.DataCatalogEncryptionSettings.EncryptionAtRest.SseAwsKMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataCatalogEncryptionSettings.EncryptionAtRest.SseAwsKMSKeyIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.DataCatalogEncryptionSettings != nil { + if mg.Spec.InitProvider.DataCatalogEncryptionSettings.ConnectionPasswordEncryption != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DataCatalogEncryptionSettings.ConnectionPasswordEncryption.AwsKMSKeyID), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.DataCatalogEncryptionSettings.ConnectionPasswordEncryption.AwsKMSKeyIDRef, + Selector: mg.Spec.InitProvider.DataCatalogEncryptionSettings.ConnectionPasswordEncryption.AwsKMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DataCatalogEncryptionSettings.ConnectionPasswordEncryption.AwsKMSKeyID") + } + mg.Spec.InitProvider.DataCatalogEncryptionSettings.ConnectionPasswordEncryption.AwsKMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DataCatalogEncryptionSettings.ConnectionPasswordEncryption.AwsKMSKeyIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.DataCatalogEncryptionSettings != nil { + if mg.Spec.InitProvider.DataCatalogEncryptionSettings.EncryptionAtRest != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DataCatalogEncryptionSettings.EncryptionAtRest.SseAwsKMSKeyID), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.DataCatalogEncryptionSettings.EncryptionAtRest.SseAwsKMSKeyIDRef, + Selector: mg.Spec.InitProvider.DataCatalogEncryptionSettings.EncryptionAtRest.SseAwsKMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DataCatalogEncryptionSettings.EncryptionAtRest.SseAwsKMSKeyID") + } + mg.Spec.InitProvider.DataCatalogEncryptionSettings.EncryptionAtRest.SseAwsKMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DataCatalogEncryptionSettings.EncryptionAtRest.SseAwsKMSKeyIDRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this Job. +func (mg *Job) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SecurityConfiguration. +func (mg *SecurityConfiguration) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.EncryptionConfiguration != nil { + if mg.Spec.ForProvider.EncryptionConfiguration.CloudwatchEncryption != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EncryptionConfiguration.CloudwatchEncryption.KMSKeyArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.EncryptionConfiguration.CloudwatchEncryption.KMSKeyArnRef, + Selector: mg.Spec.ForProvider.EncryptionConfiguration.CloudwatchEncryption.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EncryptionConfiguration.CloudwatchEncryption.KMSKeyArn") + } + mg.Spec.ForProvider.EncryptionConfiguration.CloudwatchEncryption.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EncryptionConfiguration.CloudwatchEncryption.KMSKeyArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.EncryptionConfiguration != nil { + if mg.Spec.ForProvider.EncryptionConfiguration.JobBookmarksEncryption != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EncryptionConfiguration.JobBookmarksEncryption.KMSKeyArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.EncryptionConfiguration.JobBookmarksEncryption.KMSKeyArnRef, + Selector: mg.Spec.ForProvider.EncryptionConfiguration.JobBookmarksEncryption.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EncryptionConfiguration.JobBookmarksEncryption.KMSKeyArn") + } + mg.Spec.ForProvider.EncryptionConfiguration.JobBookmarksEncryption.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EncryptionConfiguration.JobBookmarksEncryption.KMSKeyArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.EncryptionConfiguration != nil { + if mg.Spec.ForProvider.EncryptionConfiguration.S3Encryption != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EncryptionConfiguration.S3Encryption.KMSKeyArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.EncryptionConfiguration.S3Encryption.KMSKeyArnRef, + Selector: mg.Spec.ForProvider.EncryptionConfiguration.S3Encryption.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EncryptionConfiguration.S3Encryption.KMSKeyArn") + } + mg.Spec.ForProvider.EncryptionConfiguration.S3Encryption.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EncryptionConfiguration.S3Encryption.KMSKeyArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.EncryptionConfiguration != nil { + if mg.Spec.InitProvider.EncryptionConfiguration.CloudwatchEncryption != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EncryptionConfiguration.CloudwatchEncryption.KMSKeyArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.EncryptionConfiguration.CloudwatchEncryption.KMSKeyArnRef, + Selector: mg.Spec.InitProvider.EncryptionConfiguration.CloudwatchEncryption.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EncryptionConfiguration.CloudwatchEncryption.KMSKeyArn") + } + mg.Spec.InitProvider.EncryptionConfiguration.CloudwatchEncryption.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EncryptionConfiguration.CloudwatchEncryption.KMSKeyArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.EncryptionConfiguration != nil { + if mg.Spec.InitProvider.EncryptionConfiguration.JobBookmarksEncryption != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EncryptionConfiguration.JobBookmarksEncryption.KMSKeyArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.EncryptionConfiguration.JobBookmarksEncryption.KMSKeyArnRef, + Selector: mg.Spec.InitProvider.EncryptionConfiguration.JobBookmarksEncryption.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EncryptionConfiguration.JobBookmarksEncryption.KMSKeyArn") + } + mg.Spec.InitProvider.EncryptionConfiguration.JobBookmarksEncryption.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EncryptionConfiguration.JobBookmarksEncryption.KMSKeyArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.EncryptionConfiguration != nil { + if mg.Spec.InitProvider.EncryptionConfiguration.S3Encryption != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EncryptionConfiguration.S3Encryption.KMSKeyArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.EncryptionConfiguration.S3Encryption.KMSKeyArnRef, + Selector: mg.Spec.InitProvider.EncryptionConfiguration.S3Encryption.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EncryptionConfiguration.S3Encryption.KMSKeyArn") + } + mg.Spec.InitProvider.EncryptionConfiguration.S3Encryption.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EncryptionConfiguration.S3Encryption.KMSKeyArnRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this Trigger. +func (mg *Trigger) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Actions); i3++ { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "Crawler", "CrawlerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Actions[i3].CrawlerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Actions[i3].CrawlerNameRef, + Selector: mg.Spec.ForProvider.Actions[i3].CrawlerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Actions[i3].CrawlerName") + } + mg.Spec.ForProvider.Actions[i3].CrawlerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Actions[i3].CrawlerNameRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Actions); i3++ { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Actions[i3].JobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Actions[i3].JobNameRef, + Selector: mg.Spec.ForProvider.Actions[i3].JobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Actions[i3].JobName") + } + mg.Spec.ForProvider.Actions[i3].JobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Actions[i3].JobNameRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.Predicate != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Predicate.Conditions); i4++ { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "Crawler", "CrawlerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Predicate.Conditions[i4].CrawlerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Predicate.Conditions[i4].CrawlerNameRef, + Selector: mg.Spec.ForProvider.Predicate.Conditions[i4].CrawlerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Predicate.Conditions[i4].CrawlerName") + } + mg.Spec.ForProvider.Predicate.Conditions[i4].CrawlerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Predicate.Conditions[i4].CrawlerNameRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Predicate != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Predicate.Conditions); i4++ { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Predicate.Conditions[i4].JobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Predicate.Conditions[i4].JobNameRef, + Selector: mg.Spec.ForProvider.Predicate.Conditions[i4].JobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Predicate.Conditions[i4].JobName") + } + mg.Spec.ForProvider.Predicate.Conditions[i4].JobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Predicate.Conditions[i4].JobNameRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Actions); i3++ { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "Crawler", "CrawlerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Actions[i3].CrawlerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Actions[i3].CrawlerNameRef, + Selector: mg.Spec.InitProvider.Actions[i3].CrawlerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Actions[i3].CrawlerName") + } + mg.Spec.InitProvider.Actions[i3].CrawlerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Actions[i3].CrawlerNameRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Actions); i3++ { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Actions[i3].JobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Actions[i3].JobNameRef, + Selector: mg.Spec.InitProvider.Actions[i3].JobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Actions[i3].JobName") + } + mg.Spec.InitProvider.Actions[i3].JobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Actions[i3].JobNameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Predicate != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Predicate.Conditions); i4++ { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "Crawler", "CrawlerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Predicate.Conditions[i4].CrawlerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Predicate.Conditions[i4].CrawlerNameRef, + Selector: mg.Spec.InitProvider.Predicate.Conditions[i4].CrawlerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Predicate.Conditions[i4].CrawlerName") + } + mg.Spec.InitProvider.Predicate.Conditions[i4].CrawlerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Predicate.Conditions[i4].CrawlerNameRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Predicate != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Predicate.Conditions); i4++ { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Predicate.Conditions[i4].JobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Predicate.Conditions[i4].JobNameRef, + Selector: mg.Spec.InitProvider.Predicate.Conditions[i4].JobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Predicate.Conditions[i4].JobName") + } + mg.Spec.InitProvider.Predicate.Conditions[i4].JobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Predicate.Conditions[i4].JobNameRef = rsp.ResolvedReference + + } + } + + return nil +} diff --git a/apis/glue/v1beta2/zz_groupversion_info.go b/apis/glue/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..4098e35512 --- /dev/null +++ b/apis/glue/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=glue.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "glue.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/glue/v1beta2/zz_job_terraformed.go b/apis/glue/v1beta2/zz_job_terraformed.go new file mode 100755 index 0000000000..df58dff9da --- /dev/null +++ b/apis/glue/v1beta2/zz_job_terraformed.go @@ -0,0 +1,132 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Job +func (mg *Job) GetTerraformResourceType() string { + return "aws_glue_job" +} + +// GetConnectionDetailsMapping for this Job +func (tr *Job) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Job +func (tr *Job) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Job +func (tr *Job) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Job +func (tr *Job) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Job +func (tr *Job) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Job +func (tr *Job) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Job +func (tr *Job) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Job +func (tr *Job) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Job using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Job) LateInitialize(attrs []byte) (bool, error) { + params := &JobParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("MaxCapacity")) + opts = append(opts, resource.WithNameFilter("NumberOfWorkers")) + opts = append(opts, resource.WithNameFilter("WorkerType")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Job) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/glue/v1beta2/zz_job_types.go b/apis/glue/v1beta2/zz_job_types.go new file mode 100755 index 0000000000..ddce784dad --- /dev/null +++ b/apis/glue/v1beta2/zz_job_types.go @@ -0,0 +1,385 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CommandInitParameters struct { + + // – The name you assign to this job. It must be unique in your account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Python version being used to execute a Python shell job. Allowed values are 2, 3 or 3.9. Version 3 refers to Python 3.6. + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // In Ray jobs, runtime is used to specify the versions of Ray, Python and additional libraries available in your environment. This field is not used in other job types. For supported runtime environment values, see Working with Ray jobs in the Glue Developer Guide. + Runtime *string `json:"runtime,omitempty" tf:"runtime,omitempty"` + + // Specifies the S3 path to a script that executes a job. + ScriptLocation *string `json:"scriptLocation,omitempty" tf:"script_location,omitempty"` +} + +type CommandObservation struct { + + // – The name you assign to this job. It must be unique in your account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Python version being used to execute a Python shell job. Allowed values are 2, 3 or 3.9. Version 3 refers to Python 3.6. + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // In Ray jobs, runtime is used to specify the versions of Ray, Python and additional libraries available in your environment. This field is not used in other job types. For supported runtime environment values, see Working with Ray jobs in the Glue Developer Guide. + Runtime *string `json:"runtime,omitempty" tf:"runtime,omitempty"` + + // Specifies the S3 path to a script that executes a job. + ScriptLocation *string `json:"scriptLocation,omitempty" tf:"script_location,omitempty"` +} + +type CommandParameters struct { + + // – The name you assign to this job. It must be unique in your account. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Python version being used to execute a Python shell job. Allowed values are 2, 3 or 3.9. Version 3 refers to Python 3.6. + // +kubebuilder:validation:Optional + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // In Ray jobs, runtime is used to specify the versions of Ray, Python and additional libraries available in your environment. This field is not used in other job types. For supported runtime environment values, see Working with Ray jobs in the Glue Developer Guide. + // +kubebuilder:validation:Optional + Runtime *string `json:"runtime,omitempty" tf:"runtime,omitempty"` + + // Specifies the S3 path to a script that executes a job. + // +kubebuilder:validation:Optional + ScriptLocation *string `json:"scriptLocation" tf:"script_location,omitempty"` +} + +type ExecutionPropertyInitParameters struct { + + // The maximum number of concurrent runs allowed for a job. The default is 1. + MaxConcurrentRuns *float64 `json:"maxConcurrentRuns,omitempty" tf:"max_concurrent_runs,omitempty"` +} + +type ExecutionPropertyObservation struct { + + // The maximum number of concurrent runs allowed for a job. The default is 1. + MaxConcurrentRuns *float64 `json:"maxConcurrentRuns,omitempty" tf:"max_concurrent_runs,omitempty"` +} + +type ExecutionPropertyParameters struct { + + // The maximum number of concurrent runs allowed for a job. The default is 1. + // +kubebuilder:validation:Optional + MaxConcurrentRuns *float64 `json:"maxConcurrentRuns,omitempty" tf:"max_concurrent_runs,omitempty"` +} + +type JobInitParameters struct { + + // – The command of the job. Defined below. + Command *CommandInitParameters `json:"command,omitempty" tf:"command,omitempty"` + + // – The list of connections used for this job. + Connections []*string `json:"connections,omitempty" tf:"connections,omitempty"` + + // execution script consumes, as well as arguments that AWS Glue itself consumes. For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide. For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide. + // +mapType=granular + DefaultArguments map[string]*string `json:"defaultArguments,omitempty" tf:"default_arguments,omitempty"` + + // – Description of the job. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. Valid value: FLEX, STANDARD. + ExecutionClass *string `json:"executionClass,omitempty" tf:"execution_class,omitempty"` + + // – Execution property of the job. Defined below. + ExecutionProperty *ExecutionPropertyInitParameters `json:"executionProperty,omitempty" tf:"execution_property,omitempty"` + + // The version of glue to use, for example "1.0". Ray jobs should set this to 4.0 or greater. For information about available versions, see the AWS Glue Release Notes. + GlueVersion *string `json:"glueVersion,omitempty" tf:"glue_version,omitempty"` + + // – The maximum number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. Required when pythonshell is set, accept either 0.0625 or 1.0. Use number_of_workers and worker_type arguments instead with glue_version 2.0 and above. + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // – The maximum number of times to retry this job if it fails. + MaxRetries *float64 `json:"maxRetries,omitempty" tf:"max_retries,omitempty"` + + // overridable arguments for this job, specified as name-value pairs. + // +mapType=granular + NonOverridableArguments map[string]*string `json:"nonOverridableArguments,omitempty" tf:"non_overridable_arguments,omitempty"` + + // Notification property of the job. Defined below. + NotificationProperty *NotificationPropertyInitParameters `json:"notificationProperty,omitempty" tf:"notification_property,omitempty"` + + // The number of workers of a defined workerType that are allocated when a job runs. + NumberOfWorkers *float64 `json:"numberOfWorkers,omitempty" tf:"number_of_workers,omitempty"` + + // – The ARN of the IAM role associated with this job. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The name of the Security Configuration to be associated with the job. + SecurityConfiguration *string `json:"securityConfiguration,omitempty" tf:"security_configuration,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // – The job timeout in minutes. The default is 2880 minutes (48 hours) for glueetl and pythonshell jobs, and null (unlimited) for gluestreaming jobs. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. + WorkerType *string `json:"workerType,omitempty" tf:"worker_type,omitempty"` +} + +type JobObservation struct { + + // Amazon Resource Name (ARN) of Glue Job + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // – The command of the job. Defined below. + Command *CommandObservation `json:"command,omitempty" tf:"command,omitempty"` + + // – The list of connections used for this job. + Connections []*string `json:"connections,omitempty" tf:"connections,omitempty"` + + // execution script consumes, as well as arguments that AWS Glue itself consumes. For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide. For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide. + // +mapType=granular + DefaultArguments map[string]*string `json:"defaultArguments,omitempty" tf:"default_arguments,omitempty"` + + // – Description of the job. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. Valid value: FLEX, STANDARD. + ExecutionClass *string `json:"executionClass,omitempty" tf:"execution_class,omitempty"` + + // – Execution property of the job. Defined below. + ExecutionProperty *ExecutionPropertyObservation `json:"executionProperty,omitempty" tf:"execution_property,omitempty"` + + // The version of glue to use, for example "1.0". Ray jobs should set this to 4.0 or greater. For information about available versions, see the AWS Glue Release Notes. + GlueVersion *string `json:"glueVersion,omitempty" tf:"glue_version,omitempty"` + + // Job name + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // – The maximum number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. Required when pythonshell is set, accept either 0.0625 or 1.0. Use number_of_workers and worker_type arguments instead with glue_version 2.0 and above. + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // – The maximum number of times to retry this job if it fails. + MaxRetries *float64 `json:"maxRetries,omitempty" tf:"max_retries,omitempty"` + + // overridable arguments for this job, specified as name-value pairs. + // +mapType=granular + NonOverridableArguments map[string]*string `json:"nonOverridableArguments,omitempty" tf:"non_overridable_arguments,omitempty"` + + // Notification property of the job. Defined below. + NotificationProperty *NotificationPropertyObservation `json:"notificationProperty,omitempty" tf:"notification_property,omitempty"` + + // The number of workers of a defined workerType that are allocated when a job runs. + NumberOfWorkers *float64 `json:"numberOfWorkers,omitempty" tf:"number_of_workers,omitempty"` + + // – The ARN of the IAM role associated with this job. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the Security Configuration to be associated with the job. + SecurityConfiguration *string `json:"securityConfiguration,omitempty" tf:"security_configuration,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // – The job timeout in minutes. The default is 2880 minutes (48 hours) for glueetl and pythonshell jobs, and null (unlimited) for gluestreaming jobs. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. + WorkerType *string `json:"workerType,omitempty" tf:"worker_type,omitempty"` +} + +type JobParameters struct { + + // – The command of the job. Defined below. + // +kubebuilder:validation:Optional + Command *CommandParameters `json:"command,omitempty" tf:"command,omitempty"` + + // – The list of connections used for this job. + // +kubebuilder:validation:Optional + Connections []*string `json:"connections,omitempty" tf:"connections,omitempty"` + + // execution script consumes, as well as arguments that AWS Glue itself consumes. For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide. For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide. + // +kubebuilder:validation:Optional + // +mapType=granular + DefaultArguments map[string]*string `json:"defaultArguments,omitempty" tf:"default_arguments,omitempty"` + + // – Description of the job. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. Valid value: FLEX, STANDARD. + // +kubebuilder:validation:Optional + ExecutionClass *string `json:"executionClass,omitempty" tf:"execution_class,omitempty"` + + // – Execution property of the job. Defined below. + // +kubebuilder:validation:Optional + ExecutionProperty *ExecutionPropertyParameters `json:"executionProperty,omitempty" tf:"execution_property,omitempty"` + + // The version of glue to use, for example "1.0". Ray jobs should set this to 4.0 or greater. For information about available versions, see the AWS Glue Release Notes. + // +kubebuilder:validation:Optional + GlueVersion *string `json:"glueVersion,omitempty" tf:"glue_version,omitempty"` + + // – The maximum number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. Required when pythonshell is set, accept either 0.0625 or 1.0. Use number_of_workers and worker_type arguments instead with glue_version 2.0 and above. + // +kubebuilder:validation:Optional + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // – The maximum number of times to retry this job if it fails. + // +kubebuilder:validation:Optional + MaxRetries *float64 `json:"maxRetries,omitempty" tf:"max_retries,omitempty"` + + // overridable arguments for this job, specified as name-value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + NonOverridableArguments map[string]*string `json:"nonOverridableArguments,omitempty" tf:"non_overridable_arguments,omitempty"` + + // Notification property of the job. Defined below. + // +kubebuilder:validation:Optional + NotificationProperty *NotificationPropertyParameters `json:"notificationProperty,omitempty" tf:"notification_property,omitempty"` + + // The number of workers of a defined workerType that are allocated when a job runs. + // +kubebuilder:validation:Optional + NumberOfWorkers *float64 `json:"numberOfWorkers,omitempty" tf:"number_of_workers,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // – The ARN of the IAM role associated with this job. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The name of the Security Configuration to be associated with the job. + // +kubebuilder:validation:Optional + SecurityConfiguration *string `json:"securityConfiguration,omitempty" tf:"security_configuration,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // – The job timeout in minutes. The default is 2880 minutes (48 hours) for glueetl and pythonshell jobs, and null (unlimited) for gluestreaming jobs. + // +kubebuilder:validation:Optional + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. + // +kubebuilder:validation:Optional + WorkerType *string `json:"workerType,omitempty" tf:"worker_type,omitempty"` +} + +type NotificationPropertyInitParameters struct { + + // After a job run starts, the number of minutes to wait before sending a job run delay notification. + NotifyDelayAfter *float64 `json:"notifyDelayAfter,omitempty" tf:"notify_delay_after,omitempty"` +} + +type NotificationPropertyObservation struct { + + // After a job run starts, the number of minutes to wait before sending a job run delay notification. + NotifyDelayAfter *float64 `json:"notifyDelayAfter,omitempty" tf:"notify_delay_after,omitempty"` +} + +type NotificationPropertyParameters struct { + + // After a job run starts, the number of minutes to wait before sending a job run delay notification. + // +kubebuilder:validation:Optional + NotifyDelayAfter *float64 `json:"notifyDelayAfter,omitempty" tf:"notify_delay_after,omitempty"` +} + +// JobSpec defines the desired state of Job +type JobSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider JobParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider JobInitParameters `json:"initProvider,omitempty"` +} + +// JobStatus defines the observed state of Job. +type JobStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider JobObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Job is the Schema for the Jobs API. Provides an Glue Job resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Job struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.command) || (has(self.initProvider) && has(self.initProvider.command))",message="spec.forProvider.command is a required parameter" + Spec JobSpec `json:"spec"` + Status JobStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// JobList contains a list of Jobs +type JobList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Job `json:"items"` +} + +// Repository type metadata. +var ( + Job_Kind = "Job" + Job_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Job_Kind}.String() + Job_KindAPIVersion = Job_Kind + "." + CRDGroupVersion.String() + Job_GroupVersionKind = CRDGroupVersion.WithKind(Job_Kind) +) + +func init() { + SchemeBuilder.Register(&Job{}, &JobList{}) +} diff --git a/apis/glue/v1beta2/zz_securityconfiguration_terraformed.go b/apis/glue/v1beta2/zz_securityconfiguration_terraformed.go new file mode 100755 index 0000000000..26033e9336 --- /dev/null +++ b/apis/glue/v1beta2/zz_securityconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SecurityConfiguration +func (mg *SecurityConfiguration) GetTerraformResourceType() string { + return "aws_glue_security_configuration" +} + +// GetConnectionDetailsMapping for this SecurityConfiguration +func (tr *SecurityConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SecurityConfiguration +func (tr *SecurityConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SecurityConfiguration +func (tr *SecurityConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SecurityConfiguration +func (tr *SecurityConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SecurityConfiguration +func (tr *SecurityConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SecurityConfiguration +func (tr *SecurityConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SecurityConfiguration +func (tr *SecurityConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SecurityConfiguration +func (tr *SecurityConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SecurityConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SecurityConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &SecurityConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SecurityConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/glue/v1beta2/zz_securityconfiguration_types.go b/apis/glue/v1beta2/zz_securityconfiguration_types.go new file mode 100755 index 0000000000..38e876303c --- /dev/null +++ b/apis/glue/v1beta2/zz_securityconfiguration_types.go @@ -0,0 +1,280 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CloudwatchEncryptionInitParameters struct { + + // Encryption mode to use for CloudWatch data. Valid values: DISABLED, SSE-KMS. Default value: DISABLED. + CloudwatchEncryptionMode *string `json:"cloudwatchEncryptionMode,omitempty" tf:"cloudwatch_encryption_mode,omitempty"` + + // Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` +} + +type CloudwatchEncryptionObservation struct { + + // Encryption mode to use for CloudWatch data. Valid values: DISABLED, SSE-KMS. Default value: DISABLED. + CloudwatchEncryptionMode *string `json:"cloudwatchEncryptionMode,omitempty" tf:"cloudwatch_encryption_mode,omitempty"` + + // Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` +} + +type CloudwatchEncryptionParameters struct { + + // Encryption mode to use for CloudWatch data. Valid values: DISABLED, SSE-KMS. Default value: DISABLED. + // +kubebuilder:validation:Optional + CloudwatchEncryptionMode *string `json:"cloudwatchEncryptionMode,omitempty" tf:"cloudwatch_encryption_mode,omitempty"` + + // Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` +} + +type EncryptionConfigurationInitParameters struct { + CloudwatchEncryption *CloudwatchEncryptionInitParameters `json:"cloudwatchEncryption,omitempty" tf:"cloudwatch_encryption,omitempty"` + + JobBookmarksEncryption *JobBookmarksEncryptionInitParameters `json:"jobBookmarksEncryption,omitempty" tf:"job_bookmarks_encryption,omitempty"` + + // A s3_encryption block as described below, which contains encryption configuration for S3 data. + S3Encryption *S3EncryptionInitParameters `json:"s3Encryption,omitempty" tf:"s3_encryption,omitempty"` +} + +type EncryptionConfigurationObservation struct { + CloudwatchEncryption *CloudwatchEncryptionObservation `json:"cloudwatchEncryption,omitempty" tf:"cloudwatch_encryption,omitempty"` + + JobBookmarksEncryption *JobBookmarksEncryptionObservation `json:"jobBookmarksEncryption,omitempty" tf:"job_bookmarks_encryption,omitempty"` + + // A s3_encryption block as described below, which contains encryption configuration for S3 data. + S3Encryption *S3EncryptionObservation `json:"s3Encryption,omitempty" tf:"s3_encryption,omitempty"` +} + +type EncryptionConfigurationParameters struct { + + // +kubebuilder:validation:Optional + CloudwatchEncryption *CloudwatchEncryptionParameters `json:"cloudwatchEncryption" tf:"cloudwatch_encryption,omitempty"` + + // +kubebuilder:validation:Optional + JobBookmarksEncryption *JobBookmarksEncryptionParameters `json:"jobBookmarksEncryption" tf:"job_bookmarks_encryption,omitempty"` + + // A s3_encryption block as described below, which contains encryption configuration for S3 data. + // +kubebuilder:validation:Optional + S3Encryption *S3EncryptionParameters `json:"s3Encryption" tf:"s3_encryption,omitempty"` +} + +type JobBookmarksEncryptionInitParameters struct { + + // Encryption mode to use for job bookmarks data. Valid values: CSE-KMS, DISABLED. Default value: DISABLED. + JobBookmarksEncryptionMode *string `json:"jobBookmarksEncryptionMode,omitempty" tf:"job_bookmarks_encryption_mode,omitempty"` + + // Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` +} + +type JobBookmarksEncryptionObservation struct { + + // Encryption mode to use for job bookmarks data. Valid values: CSE-KMS, DISABLED. Default value: DISABLED. + JobBookmarksEncryptionMode *string `json:"jobBookmarksEncryptionMode,omitempty" tf:"job_bookmarks_encryption_mode,omitempty"` + + // Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` +} + +type JobBookmarksEncryptionParameters struct { + + // Encryption mode to use for job bookmarks data. Valid values: CSE-KMS, DISABLED. Default value: DISABLED. + // +kubebuilder:validation:Optional + JobBookmarksEncryptionMode *string `json:"jobBookmarksEncryptionMode,omitempty" tf:"job_bookmarks_encryption_mode,omitempty"` + + // Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` +} + +type S3EncryptionInitParameters struct { + + // Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` + + // Encryption mode to use for S3 data. Valid values: DISABLED, SSE-KMS, SSE-S3. Default value: DISABLED. + S3EncryptionMode *string `json:"s3EncryptionMode,omitempty" tf:"s3_encryption_mode,omitempty"` +} + +type S3EncryptionObservation struct { + + // Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Encryption mode to use for S3 data. Valid values: DISABLED, SSE-KMS, SSE-S3. Default value: DISABLED. + S3EncryptionMode *string `json:"s3EncryptionMode,omitempty" tf:"s3_encryption_mode,omitempty"` +} + +type S3EncryptionParameters struct { + + // Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` + + // Encryption mode to use for S3 data. Valid values: DISABLED, SSE-KMS, SSE-S3. Default value: DISABLED. + // +kubebuilder:validation:Optional + S3EncryptionMode *string `json:"s3EncryptionMode,omitempty" tf:"s3_encryption_mode,omitempty"` +} + +type SecurityConfigurationInitParameters struct { + + // – Configuration block containing encryption configuration. Detailed below. + EncryptionConfiguration *EncryptionConfigurationInitParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` +} + +type SecurityConfigurationObservation struct { + + // – Configuration block containing encryption configuration. Detailed below. + EncryptionConfiguration *EncryptionConfigurationObservation `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // Glue security configuration name + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type SecurityConfigurationParameters struct { + + // – Configuration block containing encryption configuration. Detailed below. + // +kubebuilder:validation:Optional + EncryptionConfiguration *EncryptionConfigurationParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +// SecurityConfigurationSpec defines the desired state of SecurityConfiguration +type SecurityConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SecurityConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SecurityConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// SecurityConfigurationStatus defines the observed state of SecurityConfiguration. +type SecurityConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SecurityConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SecurityConfiguration is the Schema for the SecurityConfigurations API. Manages a Glue Security Configuration +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type SecurityConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.encryptionConfiguration) || (has(self.initProvider) && has(self.initProvider.encryptionConfiguration))",message="spec.forProvider.encryptionConfiguration is a required parameter" + Spec SecurityConfigurationSpec `json:"spec"` + Status SecurityConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SecurityConfigurationList contains a list of SecurityConfigurations +type SecurityConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SecurityConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + SecurityConfiguration_Kind = "SecurityConfiguration" + SecurityConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SecurityConfiguration_Kind}.String() + SecurityConfiguration_KindAPIVersion = SecurityConfiguration_Kind + "." + CRDGroupVersion.String() + SecurityConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(SecurityConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&SecurityConfiguration{}, &SecurityConfigurationList{}) +} diff --git a/apis/glue/v1beta2/zz_trigger_terraformed.go b/apis/glue/v1beta2/zz_trigger_terraformed.go new file mode 100755 index 0000000000..e71d5e473e --- /dev/null +++ b/apis/glue/v1beta2/zz_trigger_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Trigger +func (mg *Trigger) GetTerraformResourceType() string { + return "aws_glue_trigger" +} + +// GetConnectionDetailsMapping for this Trigger +func (tr *Trigger) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Trigger +func (tr *Trigger) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Trigger +func (tr *Trigger) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Trigger +func (tr *Trigger) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Trigger +func (tr *Trigger) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Trigger +func (tr *Trigger) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Trigger +func (tr *Trigger) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Trigger +func (tr *Trigger) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Trigger using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Trigger) LateInitialize(attrs []byte) (bool, error) { + params := &TriggerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Trigger) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/glue/v1beta2/zz_trigger_types.go b/apis/glue/v1beta2/zz_trigger_types.go new file mode 100755 index 0000000000..1295003ccf --- /dev/null +++ b/apis/glue/v1beta2/zz_trigger_types.go @@ -0,0 +1,486 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionsInitParameters struct { + + // Arguments to be passed to the job. You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes. + // +mapType=granular + Arguments map[string]*string `json:"arguments,omitempty" tf:"arguments,omitempty"` + + // The name of the crawler to be executed. Conflicts with job_name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.Crawler + CrawlerName *string `json:"crawlerName,omitempty" tf:"crawler_name,omitempty"` + + // Reference to a Crawler in glue to populate crawlerName. + // +kubebuilder:validation:Optional + CrawlerNameRef *v1.Reference `json:"crawlerNameRef,omitempty" tf:"-"` + + // Selector for a Crawler in glue to populate crawlerName. + // +kubebuilder:validation:Optional + CrawlerNameSelector *v1.Selector `json:"crawlerNameSelector,omitempty" tf:"-"` + + // The name of a job to be executed. Conflicts with crawler_name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.Job + JobName *string `json:"jobName,omitempty" tf:"job_name,omitempty"` + + // Reference to a Job in glue to populate jobName. + // +kubebuilder:validation:Optional + JobNameRef *v1.Reference `json:"jobNameRef,omitempty" tf:"-"` + + // Selector for a Job in glue to populate jobName. + // +kubebuilder:validation:Optional + JobNameSelector *v1.Selector `json:"jobNameSelector,omitempty" tf:"-"` + + // Specifies configuration properties of a job run notification. See Notification Property details below. + NotificationProperty *ActionsNotificationPropertyInitParameters `json:"notificationProperty,omitempty" tf:"notification_property,omitempty"` + + // The name of the Security Configuration structure to be used with this action. + SecurityConfiguration *string `json:"securityConfiguration,omitempty" tf:"security_configuration,omitempty"` + + // The job run timeout in minutes. It overrides the timeout value of the job. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type ActionsNotificationPropertyInitParameters struct { + + // After a job run starts, the number of minutes to wait before sending a job run delay notification. + NotifyDelayAfter *float64 `json:"notifyDelayAfter,omitempty" tf:"notify_delay_after,omitempty"` +} + +type ActionsNotificationPropertyObservation struct { + + // After a job run starts, the number of minutes to wait before sending a job run delay notification. + NotifyDelayAfter *float64 `json:"notifyDelayAfter,omitempty" tf:"notify_delay_after,omitempty"` +} + +type ActionsNotificationPropertyParameters struct { + + // After a job run starts, the number of minutes to wait before sending a job run delay notification. + // +kubebuilder:validation:Optional + NotifyDelayAfter *float64 `json:"notifyDelayAfter,omitempty" tf:"notify_delay_after,omitempty"` +} + +type ActionsObservation struct { + + // Arguments to be passed to the job. You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes. + // +mapType=granular + Arguments map[string]*string `json:"arguments,omitempty" tf:"arguments,omitempty"` + + // The name of the crawler to be executed. Conflicts with job_name. + CrawlerName *string `json:"crawlerName,omitempty" tf:"crawler_name,omitempty"` + + // The name of a job to be executed. Conflicts with crawler_name. + JobName *string `json:"jobName,omitempty" tf:"job_name,omitempty"` + + // Specifies configuration properties of a job run notification. See Notification Property details below. + NotificationProperty *ActionsNotificationPropertyObservation `json:"notificationProperty,omitempty" tf:"notification_property,omitempty"` + + // The name of the Security Configuration structure to be used with this action. + SecurityConfiguration *string `json:"securityConfiguration,omitempty" tf:"security_configuration,omitempty"` + + // The job run timeout in minutes. It overrides the timeout value of the job. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type ActionsParameters struct { + + // Arguments to be passed to the job. You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes. + // +kubebuilder:validation:Optional + // +mapType=granular + Arguments map[string]*string `json:"arguments,omitempty" tf:"arguments,omitempty"` + + // The name of the crawler to be executed. Conflicts with job_name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.Crawler + // +kubebuilder:validation:Optional + CrawlerName *string `json:"crawlerName,omitempty" tf:"crawler_name,omitempty"` + + // Reference to a Crawler in glue to populate crawlerName. + // +kubebuilder:validation:Optional + CrawlerNameRef *v1.Reference `json:"crawlerNameRef,omitempty" tf:"-"` + + // Selector for a Crawler in glue to populate crawlerName. + // +kubebuilder:validation:Optional + CrawlerNameSelector *v1.Selector `json:"crawlerNameSelector,omitempty" tf:"-"` + + // The name of a job to be executed. Conflicts with crawler_name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.Job + // +kubebuilder:validation:Optional + JobName *string `json:"jobName,omitempty" tf:"job_name,omitempty"` + + // Reference to a Job in glue to populate jobName. + // +kubebuilder:validation:Optional + JobNameRef *v1.Reference `json:"jobNameRef,omitempty" tf:"-"` + + // Selector for a Job in glue to populate jobName. + // +kubebuilder:validation:Optional + JobNameSelector *v1.Selector `json:"jobNameSelector,omitempty" tf:"-"` + + // Specifies configuration properties of a job run notification. See Notification Property details below. + // +kubebuilder:validation:Optional + NotificationProperty *ActionsNotificationPropertyParameters `json:"notificationProperty,omitempty" tf:"notification_property,omitempty"` + + // The name of the Security Configuration structure to be used with this action. + // +kubebuilder:validation:Optional + SecurityConfiguration *string `json:"securityConfiguration,omitempty" tf:"security_configuration,omitempty"` + + // The job run timeout in minutes. It overrides the timeout value of the job. + // +kubebuilder:validation:Optional + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type ConditionsInitParameters struct { + + // The condition crawl state. Currently, the values supported are RUNNING, SUCCEEDED, CANCELLED, and FAILED. If this is specified, crawler_name must also be specified. Conflicts with state. + CrawlState *string `json:"crawlState,omitempty" tf:"crawl_state,omitempty"` + + // The name of the crawler to be executed. Conflicts with job_name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.Crawler + CrawlerName *string `json:"crawlerName,omitempty" tf:"crawler_name,omitempty"` + + // Reference to a Crawler in glue to populate crawlerName. + // +kubebuilder:validation:Optional + CrawlerNameRef *v1.Reference `json:"crawlerNameRef,omitempty" tf:"-"` + + // Selector for a Crawler in glue to populate crawlerName. + // +kubebuilder:validation:Optional + CrawlerNameSelector *v1.Selector `json:"crawlerNameSelector,omitempty" tf:"-"` + + // The name of a job to be executed. Conflicts with crawler_name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.Job + JobName *string `json:"jobName,omitempty" tf:"job_name,omitempty"` + + // Reference to a Job in glue to populate jobName. + // +kubebuilder:validation:Optional + JobNameRef *v1.Reference `json:"jobNameRef,omitempty" tf:"-"` + + // Selector for a Job in glue to populate jobName. + // +kubebuilder:validation:Optional + JobNameSelector *v1.Selector `json:"jobNameSelector,omitempty" tf:"-"` + + // A logical operator. Defaults to EQUALS. + LogicalOperator *string `json:"logicalOperator,omitempty" tf:"logical_operator,omitempty"` + + // The condition job state. Currently, the values supported are SUCCEEDED, STOPPED, TIMEOUT and FAILED. If this is specified, job_name must also be specified. Conflicts with crawler_state. + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type ConditionsObservation struct { + + // The condition crawl state. Currently, the values supported are RUNNING, SUCCEEDED, CANCELLED, and FAILED. If this is specified, crawler_name must also be specified. Conflicts with state. + CrawlState *string `json:"crawlState,omitempty" tf:"crawl_state,omitempty"` + + // The name of the crawler to be executed. Conflicts with job_name. + CrawlerName *string `json:"crawlerName,omitempty" tf:"crawler_name,omitempty"` + + // The name of a job to be executed. Conflicts with crawler_name. + JobName *string `json:"jobName,omitempty" tf:"job_name,omitempty"` + + // A logical operator. Defaults to EQUALS. + LogicalOperator *string `json:"logicalOperator,omitempty" tf:"logical_operator,omitempty"` + + // The condition job state. Currently, the values supported are SUCCEEDED, STOPPED, TIMEOUT and FAILED. If this is specified, job_name must also be specified. Conflicts with crawler_state. + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type ConditionsParameters struct { + + // The condition crawl state. Currently, the values supported are RUNNING, SUCCEEDED, CANCELLED, and FAILED. If this is specified, crawler_name must also be specified. Conflicts with state. + // +kubebuilder:validation:Optional + CrawlState *string `json:"crawlState,omitempty" tf:"crawl_state,omitempty"` + + // The name of the crawler to be executed. Conflicts with job_name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.Crawler + // +kubebuilder:validation:Optional + CrawlerName *string `json:"crawlerName,omitempty" tf:"crawler_name,omitempty"` + + // Reference to a Crawler in glue to populate crawlerName. + // +kubebuilder:validation:Optional + CrawlerNameRef *v1.Reference `json:"crawlerNameRef,omitempty" tf:"-"` + + // Selector for a Crawler in glue to populate crawlerName. + // +kubebuilder:validation:Optional + CrawlerNameSelector *v1.Selector `json:"crawlerNameSelector,omitempty" tf:"-"` + + // The name of a job to be executed. Conflicts with crawler_name. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.Job + // +kubebuilder:validation:Optional + JobName *string `json:"jobName,omitempty" tf:"job_name,omitempty"` + + // Reference to a Job in glue to populate jobName. + // +kubebuilder:validation:Optional + JobNameRef *v1.Reference `json:"jobNameRef,omitempty" tf:"-"` + + // Selector for a Job in glue to populate jobName. + // +kubebuilder:validation:Optional + JobNameSelector *v1.Selector `json:"jobNameSelector,omitempty" tf:"-"` + + // A logical operator. Defaults to EQUALS. + // +kubebuilder:validation:Optional + LogicalOperator *string `json:"logicalOperator,omitempty" tf:"logical_operator,omitempty"` + + // The condition job state. Currently, the values supported are SUCCEEDED, STOPPED, TIMEOUT and FAILED. If this is specified, job_name must also be specified. Conflicts with crawler_state. + // +kubebuilder:validation:Optional + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type EventBatchingConditionInitParameters struct { + + // Number of events that must be received from Amazon EventBridge before EventBridge event trigger fires. + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Window of time in seconds after which EventBridge event trigger fires. Window starts when first event is received. Default value is 900. + BatchWindow *float64 `json:"batchWindow,omitempty" tf:"batch_window,omitempty"` +} + +type EventBatchingConditionObservation struct { + + // Number of events that must be received from Amazon EventBridge before EventBridge event trigger fires. + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Window of time in seconds after which EventBridge event trigger fires. Window starts when first event is received. Default value is 900. + BatchWindow *float64 `json:"batchWindow,omitempty" tf:"batch_window,omitempty"` +} + +type EventBatchingConditionParameters struct { + + // Number of events that must be received from Amazon EventBridge before EventBridge event trigger fires. + // +kubebuilder:validation:Optional + BatchSize *float64 `json:"batchSize" tf:"batch_size,omitempty"` + + // Window of time in seconds after which EventBridge event trigger fires. Window starts when first event is received. Default value is 900. + // +kubebuilder:validation:Optional + BatchWindow *float64 `json:"batchWindow,omitempty" tf:"batch_window,omitempty"` +} + +type PredicateInitParameters struct { + + // A list of the conditions that determine when the trigger will fire. See Conditions. + Conditions []ConditionsInitParameters `json:"conditions,omitempty" tf:"conditions,omitempty"` + + // How to handle multiple conditions. Defaults to AND. Valid values are AND or ANY. + Logical *string `json:"logical,omitempty" tf:"logical,omitempty"` +} + +type PredicateObservation struct { + + // A list of the conditions that determine when the trigger will fire. See Conditions. + Conditions []ConditionsObservation `json:"conditions,omitempty" tf:"conditions,omitempty"` + + // How to handle multiple conditions. Defaults to AND. Valid values are AND or ANY. + Logical *string `json:"logical,omitempty" tf:"logical,omitempty"` +} + +type PredicateParameters struct { + + // A list of the conditions that determine when the trigger will fire. See Conditions. + // +kubebuilder:validation:Optional + Conditions []ConditionsParameters `json:"conditions" tf:"conditions,omitempty"` + + // How to handle multiple conditions. Defaults to AND. Valid values are AND or ANY. + // +kubebuilder:validation:Optional + Logical *string `json:"logical,omitempty" tf:"logical,omitempty"` +} + +type TriggerInitParameters struct { + + // – List of actions initiated by this trigger when it fires. See Actions Below. + Actions []ActionsInitParameters `json:"actions,omitempty" tf:"actions,omitempty"` + + // – A description of the new trigger. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // – Start the trigger. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Batch condition that must be met (specified number of events received or batch time window expired) before EventBridge event trigger fires. See Event Batching Condition. + EventBatchingCondition []EventBatchingConditionInitParameters `json:"eventBatchingCondition,omitempty" tf:"event_batching_condition,omitempty"` + + // – A predicate to specify when the new trigger should fire. Required when trigger type is CONDITIONAL. See Predicate Below. + Predicate *PredicateInitParameters `json:"predicate,omitempty" tf:"predicate,omitempty"` + + // Based Schedules for Jobs and Crawlers + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // – Set to true to start SCHEDULED and CONDITIONAL triggers when created. True is not supported for ON_DEMAND triggers. + StartOnCreation *bool `json:"startOnCreation,omitempty" tf:"start_on_creation,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // – The type of trigger. Valid values are CONDITIONAL, EVENT, ON_DEMAND, and SCHEDULED. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A workflow to which the trigger should be associated to. Every workflow graph (DAG) needs a starting trigger (ON_DEMAND or SCHEDULED type) and can contain multiple additional CONDITIONAL triggers. + WorkflowName *string `json:"workflowName,omitempty" tf:"workflow_name,omitempty"` +} + +type TriggerObservation struct { + + // – List of actions initiated by this trigger when it fires. See Actions Below. + Actions []ActionsObservation `json:"actions,omitempty" tf:"actions,omitempty"` + + // Amazon Resource Name (ARN) of Glue Trigger + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // – A description of the new trigger. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // – Start the trigger. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Batch condition that must be met (specified number of events received or batch time window expired) before EventBridge event trigger fires. See Event Batching Condition. + EventBatchingCondition []EventBatchingConditionObservation `json:"eventBatchingCondition,omitempty" tf:"event_batching_condition,omitempty"` + + // Trigger name + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // – A predicate to specify when the new trigger should fire. Required when trigger type is CONDITIONAL. See Predicate Below. + Predicate *PredicateObservation `json:"predicate,omitempty" tf:"predicate,omitempty"` + + // Based Schedules for Jobs and Crawlers + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // – Set to true to start SCHEDULED and CONDITIONAL triggers when created. True is not supported for ON_DEMAND triggers. + StartOnCreation *bool `json:"startOnCreation,omitempty" tf:"start_on_creation,omitempty"` + + // The condition job state. Currently, the values supported are SUCCEEDED, STOPPED, TIMEOUT and FAILED. If this is specified, job_name must also be specified. Conflicts with crawler_state. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // – The type of trigger. Valid values are CONDITIONAL, EVENT, ON_DEMAND, and SCHEDULED. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A workflow to which the trigger should be associated to. Every workflow graph (DAG) needs a starting trigger (ON_DEMAND or SCHEDULED type) and can contain multiple additional CONDITIONAL triggers. + WorkflowName *string `json:"workflowName,omitempty" tf:"workflow_name,omitempty"` +} + +type TriggerParameters struct { + + // – List of actions initiated by this trigger when it fires. See Actions Below. + // +kubebuilder:validation:Optional + Actions []ActionsParameters `json:"actions,omitempty" tf:"actions,omitempty"` + + // – A description of the new trigger. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // – Start the trigger. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Batch condition that must be met (specified number of events received or batch time window expired) before EventBridge event trigger fires. See Event Batching Condition. + // +kubebuilder:validation:Optional + EventBatchingCondition []EventBatchingConditionParameters `json:"eventBatchingCondition,omitempty" tf:"event_batching_condition,omitempty"` + + // – A predicate to specify when the new trigger should fire. Required when trigger type is CONDITIONAL. See Predicate Below. + // +kubebuilder:validation:Optional + Predicate *PredicateParameters `json:"predicate,omitempty" tf:"predicate,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Based Schedules for Jobs and Crawlers + // +kubebuilder:validation:Optional + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // – Set to true to start SCHEDULED and CONDITIONAL triggers when created. True is not supported for ON_DEMAND triggers. + // +kubebuilder:validation:Optional + StartOnCreation *bool `json:"startOnCreation,omitempty" tf:"start_on_creation,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // – The type of trigger. Valid values are CONDITIONAL, EVENT, ON_DEMAND, and SCHEDULED. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A workflow to which the trigger should be associated to. Every workflow graph (DAG) needs a starting trigger (ON_DEMAND or SCHEDULED type) and can contain multiple additional CONDITIONAL triggers. + // +kubebuilder:validation:Optional + WorkflowName *string `json:"workflowName,omitempty" tf:"workflow_name,omitempty"` +} + +// TriggerSpec defines the desired state of Trigger +type TriggerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TriggerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TriggerInitParameters `json:"initProvider,omitempty"` +} + +// TriggerStatus defines the observed state of Trigger. +type TriggerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TriggerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Trigger is the Schema for the Triggers API. Manages a Glue Trigger resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Trigger struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.actions) || (has(self.initProvider) && has(self.initProvider.actions))",message="spec.forProvider.actions is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + Spec TriggerSpec `json:"spec"` + Status TriggerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TriggerList contains a list of Triggers +type TriggerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Trigger `json:"items"` +} + +// Repository type metadata. +var ( + Trigger_Kind = "Trigger" + Trigger_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Trigger_Kind}.String() + Trigger_KindAPIVersion = Trigger_Kind + "." + CRDGroupVersion.String() + Trigger_GroupVersionKind = CRDGroupVersion.WithKind(Trigger_Kind) +) + +func init() { + SchemeBuilder.Register(&Trigger{}, &TriggerList{}) +} diff --git a/apis/grafana/v1beta1/zz_generated.conversion_hubs.go b/apis/grafana/v1beta1/zz_generated.conversion_hubs.go index b99b4a674f..1c33a3188c 100755 --- a/apis/grafana/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/grafana/v1beta1/zz_generated.conversion_hubs.go @@ -12,9 +12,6 @@ func (tr *LicenseAssociation) Hub() {} // Hub marks this type as a conversion hub. func (tr *RoleAssociation) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Workspace) Hub() {} - // Hub marks this type as a conversion hub. func (tr *WorkspaceAPIKey) Hub() {} diff --git a/apis/grafana/v1beta1/zz_generated.conversion_spokes.go b/apis/grafana/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..508736b23d --- /dev/null +++ b/apis/grafana/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Workspace to the hub type. +func (tr *Workspace) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Workspace type. +func (tr *Workspace) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/grafana/v1beta1/zz_generated.resolvers.go b/apis/grafana/v1beta1/zz_generated.resolvers.go index 41f3ea2567..fdf952ec6d 100644 --- a/apis/grafana/v1beta1/zz_generated.resolvers.go +++ b/apis/grafana/v1beta1/zz_generated.resolvers.go @@ -27,7 +27,7 @@ func (mg *LicenseAssociation) ResolveReferences( // ResolveReferences of this Li var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("grafana.aws.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("grafana.aws.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -46,7 +46,7 @@ func (mg *LicenseAssociation) ResolveReferences( // ResolveReferences of this Li mg.Spec.ForProvider.WorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.WorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("grafana.aws.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("grafana.aws.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -77,7 +77,7 @@ func (mg *RoleAssociation) ResolveReferences(ctx context.Context, c client.Reade var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("grafana.aws.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("grafana.aws.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -96,7 +96,7 @@ func (mg *RoleAssociation) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.ForProvider.WorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.WorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("grafana.aws.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("grafana.aws.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -177,7 +177,7 @@ func (mg *WorkspaceAPIKey) ResolveReferences(ctx context.Context, c client.Reade var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("grafana.aws.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("grafana.aws.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -196,7 +196,7 @@ func (mg *WorkspaceAPIKey) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.ForProvider.WorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.WorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("grafana.aws.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("grafana.aws.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -227,7 +227,7 @@ func (mg *WorkspaceSAMLConfiguration) ResolveReferences(ctx context.Context, c c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("grafana.aws.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("grafana.aws.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -246,7 +246,7 @@ func (mg *WorkspaceSAMLConfiguration) ResolveReferences(ctx context.Context, c c mg.Spec.ForProvider.WorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.WorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("grafana.aws.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("grafana.aws.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/grafana/v1beta1/zz_licenseassociation_types.go b/apis/grafana/v1beta1/zz_licenseassociation_types.go index 96aa92c96c..767b6a2da7 100755 --- a/apis/grafana/v1beta1/zz_licenseassociation_types.go +++ b/apis/grafana/v1beta1/zz_licenseassociation_types.go @@ -19,7 +19,7 @@ type LicenseAssociationInitParameters struct { LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` // The workspace id. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/grafana/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/grafana/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` @@ -61,7 +61,7 @@ type LicenseAssociationParameters struct { Region *string `json:"region" tf:"-"` // The workspace id. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/grafana/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/grafana/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` diff --git a/apis/grafana/v1beta1/zz_roleassociation_types.go b/apis/grafana/v1beta1/zz_roleassociation_types.go index 82d0b752bc..e1dccaf641 100755 --- a/apis/grafana/v1beta1/zz_roleassociation_types.go +++ b/apis/grafana/v1beta1/zz_roleassociation_types.go @@ -27,7 +27,7 @@ type RoleAssociationInitParameters struct { UserIds []*string `json:"userIds,omitempty" tf:"user_ids,omitempty"` // The workspace id. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/grafana/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/grafana/v1beta2.Workspace WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` // Reference to a Workspace in grafana to populate workspaceId. @@ -80,7 +80,7 @@ type RoleAssociationParameters struct { UserIds []*string `json:"userIds,omitempty" tf:"user_ids,omitempty"` // The workspace id. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/grafana/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/grafana/v1beta2.Workspace // +kubebuilder:validation:Optional WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` diff --git a/apis/grafana/v1beta1/zz_workspaceapikey_types.go b/apis/grafana/v1beta1/zz_workspaceapikey_types.go index 414e40d686..bc74686294 100755 --- a/apis/grafana/v1beta1/zz_workspaceapikey_types.go +++ b/apis/grafana/v1beta1/zz_workspaceapikey_types.go @@ -25,7 +25,7 @@ type WorkspaceAPIKeyInitParameters struct { SecondsToLive *float64 `json:"secondsToLive,omitempty" tf:"seconds_to_live,omitempty"` // The ID of the workspace that the API key is valid for. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/grafana/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/grafana/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` @@ -74,7 +74,7 @@ type WorkspaceAPIKeyParameters struct { SecondsToLive *float64 `json:"secondsToLive,omitempty" tf:"seconds_to_live,omitempty"` // The ID of the workspace that the API key is valid for. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/grafana/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/grafana/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` diff --git a/apis/grafana/v1beta1/zz_workspacesamlconfiguration_types.go b/apis/grafana/v1beta1/zz_workspacesamlconfiguration_types.go index 7207f61df7..7631d50c24 100755 --- a/apis/grafana/v1beta1/zz_workspacesamlconfiguration_types.go +++ b/apis/grafana/v1beta1/zz_workspacesamlconfiguration_types.go @@ -52,7 +52,7 @@ type WorkspaceSAMLConfigurationInitParameters struct { RoleAssertion *string `json:"roleAssertion,omitempty" tf:"role_assertion,omitempty"` // The workspace id. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/grafana/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/grafana/v1beta2.Workspace WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` // Reference to a Workspace in grafana to populate workspaceId. @@ -167,7 +167,7 @@ type WorkspaceSAMLConfigurationParameters struct { RoleAssertion *string `json:"roleAssertion,omitempty" tf:"role_assertion,omitempty"` // The workspace id. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/grafana/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/grafana/v1beta2.Workspace // +kubebuilder:validation:Optional WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` diff --git a/apis/grafana/v1beta2/zz_generated.conversion_hubs.go b/apis/grafana/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..60d422318a --- /dev/null +++ b/apis/grafana/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Workspace) Hub() {} diff --git a/apis/grafana/v1beta2/zz_generated.deepcopy.go b/apis/grafana/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..c242a8d5f8 --- /dev/null +++ b/apis/grafana/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,781 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkAccessControlInitParameters) DeepCopyInto(out *NetworkAccessControlInitParameters) { + *out = *in + if in.PrefixListIds != nil { + in, out := &in.PrefixListIds, &out.PrefixListIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VpceIds != nil { + in, out := &in.VpceIds, &out.VpceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkAccessControlInitParameters. +func (in *NetworkAccessControlInitParameters) DeepCopy() *NetworkAccessControlInitParameters { + if in == nil { + return nil + } + out := new(NetworkAccessControlInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkAccessControlObservation) DeepCopyInto(out *NetworkAccessControlObservation) { + *out = *in + if in.PrefixListIds != nil { + in, out := &in.PrefixListIds, &out.PrefixListIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VpceIds != nil { + in, out := &in.VpceIds, &out.VpceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkAccessControlObservation. +func (in *NetworkAccessControlObservation) DeepCopy() *NetworkAccessControlObservation { + if in == nil { + return nil + } + out := new(NetworkAccessControlObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkAccessControlParameters) DeepCopyInto(out *NetworkAccessControlParameters) { + *out = *in + if in.PrefixListIds != nil { + in, out := &in.PrefixListIds, &out.PrefixListIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VpceIds != nil { + in, out := &in.VpceIds, &out.VpceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkAccessControlParameters. +func (in *NetworkAccessControlParameters) DeepCopy() *NetworkAccessControlParameters { + if in == nil { + return nil + } + out := new(NetworkAccessControlParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigurationInitParameters) DeepCopyInto(out *VPCConfigurationInitParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigurationInitParameters. +func (in *VPCConfigurationInitParameters) DeepCopy() *VPCConfigurationInitParameters { + if in == nil { + return nil + } + out := new(VPCConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigurationObservation) DeepCopyInto(out *VPCConfigurationObservation) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigurationObservation. +func (in *VPCConfigurationObservation) DeepCopy() *VPCConfigurationObservation { + if in == nil { + return nil + } + out := new(VPCConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigurationParameters) DeepCopyInto(out *VPCConfigurationParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigurationParameters. +func (in *VPCConfigurationParameters) DeepCopy() *VPCConfigurationParameters { + if in == nil { + return nil + } + out := new(VPCConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workspace) DeepCopyInto(out *Workspace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workspace. +func (in *Workspace) DeepCopy() *Workspace { + if in == nil { + return nil + } + out := new(Workspace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Workspace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceInitParameters) DeepCopyInto(out *WorkspaceInitParameters) { + *out = *in + if in.AccountAccessType != nil { + in, out := &in.AccountAccessType, &out.AccountAccessType + *out = new(string) + **out = **in + } + if in.AuthenticationProviders != nil { + in, out := &in.AuthenticationProviders, &out.AuthenticationProviders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(string) + **out = **in + } + if in.DataSources != nil { + in, out := &in.DataSources, &out.DataSources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.GrafanaVersion != nil { + in, out := &in.GrafanaVersion, &out.GrafanaVersion + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkAccessControl != nil { + in, out := &in.NetworkAccessControl, &out.NetworkAccessControl + *out = new(NetworkAccessControlInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NotificationDestinations != nil { + in, out := &in.NotificationDestinations, &out.NotificationDestinations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OrganizationRoleName != nil { + in, out := &in.OrganizationRoleName, &out.OrganizationRoleName + *out = new(string) + **out = **in + } + if in.OrganizationalUnits != nil { + in, out := &in.OrganizationalUnits, &out.OrganizationalUnits + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PermissionType != nil { + in, out := &in.PermissionType, &out.PermissionType + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StackSetName != nil { + in, out := &in.StackSetName, &out.StackSetName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceInitParameters. +func (in *WorkspaceInitParameters) DeepCopy() *WorkspaceInitParameters { + if in == nil { + return nil + } + out := new(WorkspaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceList) DeepCopyInto(out *WorkspaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Workspace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceList. +func (in *WorkspaceList) DeepCopy() *WorkspaceList { + if in == nil { + return nil + } + out := new(WorkspaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkspaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceObservation) DeepCopyInto(out *WorkspaceObservation) { + *out = *in + if in.AccountAccessType != nil { + in, out := &in.AccountAccessType, &out.AccountAccessType + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AuthenticationProviders != nil { + in, out := &in.AuthenticationProviders, &out.AuthenticationProviders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(string) + **out = **in + } + if in.DataSources != nil { + in, out := &in.DataSources, &out.DataSources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.GrafanaVersion != nil { + in, out := &in.GrafanaVersion, &out.GrafanaVersion + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkAccessControl != nil { + in, out := &in.NetworkAccessControl, &out.NetworkAccessControl + *out = new(NetworkAccessControlObservation) + (*in).DeepCopyInto(*out) + } + if in.NotificationDestinations != nil { + in, out := &in.NotificationDestinations, &out.NotificationDestinations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OrganizationRoleName != nil { + in, out := &in.OrganizationRoleName, &out.OrganizationRoleName + *out = new(string) + **out = **in + } + if in.OrganizationalUnits != nil { + in, out := &in.OrganizationalUnits, &out.OrganizationalUnits + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PermissionType != nil { + in, out := &in.PermissionType, &out.PermissionType + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.SAMLConfigurationStatus != nil { + in, out := &in.SAMLConfigurationStatus, &out.SAMLConfigurationStatus + *out = new(string) + **out = **in + } + if in.StackSetName != nil { + in, out := &in.StackSetName, &out.StackSetName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceObservation. +func (in *WorkspaceObservation) DeepCopy() *WorkspaceObservation { + if in == nil { + return nil + } + out := new(WorkspaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceParameters) DeepCopyInto(out *WorkspaceParameters) { + *out = *in + if in.AccountAccessType != nil { + in, out := &in.AccountAccessType, &out.AccountAccessType + *out = new(string) + **out = **in + } + if in.AuthenticationProviders != nil { + in, out := &in.AuthenticationProviders, &out.AuthenticationProviders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(string) + **out = **in + } + if in.DataSources != nil { + in, out := &in.DataSources, &out.DataSources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.GrafanaVersion != nil { + in, out := &in.GrafanaVersion, &out.GrafanaVersion + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkAccessControl != nil { + in, out := &in.NetworkAccessControl, &out.NetworkAccessControl + *out = new(NetworkAccessControlParameters) + (*in).DeepCopyInto(*out) + } + if in.NotificationDestinations != nil { + in, out := &in.NotificationDestinations, &out.NotificationDestinations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OrganizationRoleName != nil { + in, out := &in.OrganizationRoleName, &out.OrganizationRoleName + *out = new(string) + **out = **in + } + if in.OrganizationalUnits != nil { + in, out := &in.OrganizationalUnits, &out.OrganizationalUnits + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PermissionType != nil { + in, out := &in.PermissionType, &out.PermissionType + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StackSetName != nil { + in, out := &in.StackSetName, &out.StackSetName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceParameters. +func (in *WorkspaceParameters) DeepCopy() *WorkspaceParameters { + if in == nil { + return nil + } + out := new(WorkspaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceSpec) DeepCopyInto(out *WorkspaceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceSpec. +func (in *WorkspaceSpec) DeepCopy() *WorkspaceSpec { + if in == nil { + return nil + } + out := new(WorkspaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceStatus) DeepCopyInto(out *WorkspaceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceStatus. +func (in *WorkspaceStatus) DeepCopy() *WorkspaceStatus { + if in == nil { + return nil + } + out := new(WorkspaceStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/grafana/v1beta2/zz_generated.managed.go b/apis/grafana/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..562d4553d0 --- /dev/null +++ b/apis/grafana/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Workspace. +func (mg *Workspace) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Workspace. +func (mg *Workspace) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Workspace. +func (mg *Workspace) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Workspace. +func (mg *Workspace) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Workspace. +func (mg *Workspace) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Workspace. +func (mg *Workspace) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Workspace. +func (mg *Workspace) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Workspace. +func (mg *Workspace) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Workspace. +func (mg *Workspace) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Workspace. +func (mg *Workspace) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Workspace. +func (mg *Workspace) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Workspace. +func (mg *Workspace) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/grafana/v1beta2/zz_generated.managedlist.go b/apis/grafana/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..d32cca0e44 --- /dev/null +++ b/apis/grafana/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this WorkspaceList. +func (l *WorkspaceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/grafana/v1beta2/zz_generated.resolvers.go b/apis/grafana/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..9e4c3cdea5 --- /dev/null +++ b/apis/grafana/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Workspace) ResolveReferences( // ResolveReferences of this Workspace. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/grafana/v1beta2/zz_groupversion_info.go b/apis/grafana/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..6208ef0d99 --- /dev/null +++ b/apis/grafana/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=grafana.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "grafana.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/grafana/v1beta2/zz_workspace_terraformed.go b/apis/grafana/v1beta2/zz_workspace_terraformed.go new file mode 100755 index 0000000000..ab22313fd1 --- /dev/null +++ b/apis/grafana/v1beta2/zz_workspace_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Workspace +func (mg *Workspace) GetTerraformResourceType() string { + return "aws_grafana_workspace" +} + +// GetConnectionDetailsMapping for this Workspace +func (tr *Workspace) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Workspace +func (tr *Workspace) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Workspace +func (tr *Workspace) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Workspace +func (tr *Workspace) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Workspace +func (tr *Workspace) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Workspace +func (tr *Workspace) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Workspace +func (tr *Workspace) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Workspace +func (tr *Workspace) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Workspace using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Workspace) LateInitialize(attrs []byte) (bool, error) { + params := &WorkspaceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Workspace) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/grafana/v1beta2/zz_workspace_types.go b/apis/grafana/v1beta2/zz_workspace_types.go new file mode 100755 index 0000000000..fda64a3f41 --- /dev/null +++ b/apis/grafana/v1beta2/zz_workspace_types.go @@ -0,0 +1,358 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type NetworkAccessControlInitParameters struct { + + // - An array of prefix list IDs. + // +listType=set + PrefixListIds []*string `json:"prefixListIds,omitempty" tf:"prefix_list_ids,omitempty"` + + // - An array of Amazon VPC endpoint IDs for the workspace. The only VPC endpoints that can be specified here are interface VPC endpoints for Grafana workspaces (using the com.amazonaws.[region].grafana-workspace service endpoint). Other VPC endpoints will be ignored. + // +listType=set + VpceIds []*string `json:"vpceIds,omitempty" tf:"vpce_ids,omitempty"` +} + +type NetworkAccessControlObservation struct { + + // - An array of prefix list IDs. + // +listType=set + PrefixListIds []*string `json:"prefixListIds,omitempty" tf:"prefix_list_ids,omitempty"` + + // - An array of Amazon VPC endpoint IDs for the workspace. The only VPC endpoints that can be specified here are interface VPC endpoints for Grafana workspaces (using the com.amazonaws.[region].grafana-workspace service endpoint). Other VPC endpoints will be ignored. + // +listType=set + VpceIds []*string `json:"vpceIds,omitempty" tf:"vpce_ids,omitempty"` +} + +type NetworkAccessControlParameters struct { + + // - An array of prefix list IDs. + // +kubebuilder:validation:Optional + // +listType=set + PrefixListIds []*string `json:"prefixListIds" tf:"prefix_list_ids,omitempty"` + + // - An array of Amazon VPC endpoint IDs for the workspace. The only VPC endpoints that can be specified here are interface VPC endpoints for Grafana workspaces (using the com.amazonaws.[region].grafana-workspace service endpoint). Other VPC endpoints will be ignored. + // +kubebuilder:validation:Optional + // +listType=set + VpceIds []*string `json:"vpceIds" tf:"vpce_ids,omitempty"` +} + +type VPCConfigurationInitParameters struct { + + // - The list of Amazon EC2 security group IDs attached to the Amazon VPC for your Grafana workspace to connect. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // - The list of Amazon EC2 subnet IDs created in the Amazon VPC for your Grafana workspace to connect. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type VPCConfigurationObservation struct { + + // - The list of Amazon EC2 security group IDs attached to the Amazon VPC for your Grafana workspace to connect. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // - The list of Amazon EC2 subnet IDs created in the Amazon VPC for your Grafana workspace to connect. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type VPCConfigurationParameters struct { + + // - The list of Amazon EC2 security group IDs attached to the Amazon VPC for your Grafana workspace to connect. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds" tf:"security_group_ids,omitempty"` + + // - The list of Amazon EC2 subnet IDs created in the Amazon VPC for your Grafana workspace to connect. + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds" tf:"subnet_ids,omitempty"` +} + +type WorkspaceInitParameters struct { + + // The type of account access for the workspace. Valid values are CURRENT_ACCOUNT and ORGANIZATION. If ORGANIZATION is specified, then organizational_units must also be present. + AccountAccessType *string `json:"accountAccessType,omitempty" tf:"account_access_type,omitempty"` + + // The authentication providers for the workspace. Valid values are AWS_SSO, SAML, or both. + AuthenticationProviders []*string `json:"authenticationProviders,omitempty" tf:"authentication_providers,omitempty"` + + // The configuration string for the workspace that you create. For more information about the format and configuration options available, see Working in your Grafana workspace. + Configuration *string `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // The data sources for the workspace. Valid values are AMAZON_OPENSEARCH_SERVICE, ATHENA, CLOUDWATCH, PROMETHEUS, REDSHIFT, SITEWISE, TIMESTREAM, XRAY + DataSources []*string `json:"dataSources,omitempty" tf:"data_sources,omitempty"` + + // The workspace description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the version of Grafana to support in the new workspace. Supported values are 8.4, 9.4 and 10.4. If not specified, defaults to 9.4. + GrafanaVersion *string `json:"grafanaVersion,omitempty" tf:"grafana_version,omitempty"` + + // The Grafana workspace name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration for network access to your workspace.See Network Access Control below. + NetworkAccessControl *NetworkAccessControlInitParameters `json:"networkAccessControl,omitempty" tf:"network_access_control,omitempty"` + + // The notification destinations. If a data source is specified here, Amazon Managed Grafana will create IAM roles and permissions needed to use these destinations. Must be set to SNS. + NotificationDestinations []*string `json:"notificationDestinations,omitempty" tf:"notification_destinations,omitempty"` + + // The role name that the workspace uses to access resources through Amazon Organizations. + OrganizationRoleName *string `json:"organizationRoleName,omitempty" tf:"organization_role_name,omitempty"` + + // The Amazon Organizations organizational units that the workspace is authorized to use data sources from. + OrganizationalUnits []*string `json:"organizationalUnits,omitempty" tf:"organizational_units,omitempty"` + + // The permission type of the workspace. If SERVICE_MANAGED is specified, the IAM roles and IAM policy attachments are generated automatically. If CUSTOMER_MANAGED is specified, the IAM roles and IAM policy attachments will not be created. + PermissionType *string `json:"permissionType,omitempty" tf:"permission_type,omitempty"` + + // The IAM role ARN that the workspace assumes. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The AWS CloudFormation stack set name that provisions IAM roles to be used by the workspace. + StackSetName *string `json:"stackSetName,omitempty" tf:"stack_set_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The configuration settings for an Amazon VPC that contains data sources for your Grafana workspace to connect to. See VPC Configuration below. + VPCConfiguration *VPCConfigurationInitParameters `json:"vpcConfiguration,omitempty" tf:"vpc_configuration,omitempty"` +} + +type WorkspaceObservation struct { + + // The type of account access for the workspace. Valid values are CURRENT_ACCOUNT and ORGANIZATION. If ORGANIZATION is specified, then organizational_units must also be present. + AccountAccessType *string `json:"accountAccessType,omitempty" tf:"account_access_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Grafana workspace. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The authentication providers for the workspace. Valid values are AWS_SSO, SAML, or both. + AuthenticationProviders []*string `json:"authenticationProviders,omitempty" tf:"authentication_providers,omitempty"` + + // The configuration string for the workspace that you create. For more information about the format and configuration options available, see Working in your Grafana workspace. + Configuration *string `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // The data sources for the workspace. Valid values are AMAZON_OPENSEARCH_SERVICE, ATHENA, CLOUDWATCH, PROMETHEUS, REDSHIFT, SITEWISE, TIMESTREAM, XRAY + DataSources []*string `json:"dataSources,omitempty" tf:"data_sources,omitempty"` + + // The workspace description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The endpoint of the Grafana workspace. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // Specifies the version of Grafana to support in the new workspace. Supported values are 8.4, 9.4 and 10.4. If not specified, defaults to 9.4. + GrafanaVersion *string `json:"grafanaVersion,omitempty" tf:"grafana_version,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Grafana workspace name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration for network access to your workspace.See Network Access Control below. + NetworkAccessControl *NetworkAccessControlObservation `json:"networkAccessControl,omitempty" tf:"network_access_control,omitempty"` + + // The notification destinations. If a data source is specified here, Amazon Managed Grafana will create IAM roles and permissions needed to use these destinations. Must be set to SNS. + NotificationDestinations []*string `json:"notificationDestinations,omitempty" tf:"notification_destinations,omitempty"` + + // The role name that the workspace uses to access resources through Amazon Organizations. + OrganizationRoleName *string `json:"organizationRoleName,omitempty" tf:"organization_role_name,omitempty"` + + // The Amazon Organizations organizational units that the workspace is authorized to use data sources from. + OrganizationalUnits []*string `json:"organizationalUnits,omitempty" tf:"organizational_units,omitempty"` + + // The permission type of the workspace. If SERVICE_MANAGED is specified, the IAM roles and IAM policy attachments are generated automatically. If CUSTOMER_MANAGED is specified, the IAM roles and IAM policy attachments will not be created. + PermissionType *string `json:"permissionType,omitempty" tf:"permission_type,omitempty"` + + // The IAM role ARN that the workspace assumes. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + SAMLConfigurationStatus *string `json:"samlConfigurationStatus,omitempty" tf:"saml_configuration_status,omitempty"` + + // The AWS CloudFormation stack set name that provisions IAM roles to be used by the workspace. + StackSetName *string `json:"stackSetName,omitempty" tf:"stack_set_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The configuration settings for an Amazon VPC that contains data sources for your Grafana workspace to connect to. See VPC Configuration below. + VPCConfiguration *VPCConfigurationObservation `json:"vpcConfiguration,omitempty" tf:"vpc_configuration,omitempty"` +} + +type WorkspaceParameters struct { + + // The type of account access for the workspace. Valid values are CURRENT_ACCOUNT and ORGANIZATION. If ORGANIZATION is specified, then organizational_units must also be present. + // +kubebuilder:validation:Optional + AccountAccessType *string `json:"accountAccessType,omitempty" tf:"account_access_type,omitempty"` + + // The authentication providers for the workspace. Valid values are AWS_SSO, SAML, or both. + // +kubebuilder:validation:Optional + AuthenticationProviders []*string `json:"authenticationProviders,omitempty" tf:"authentication_providers,omitempty"` + + // The configuration string for the workspace that you create. For more information about the format and configuration options available, see Working in your Grafana workspace. + // +kubebuilder:validation:Optional + Configuration *string `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // The data sources for the workspace. Valid values are AMAZON_OPENSEARCH_SERVICE, ATHENA, CLOUDWATCH, PROMETHEUS, REDSHIFT, SITEWISE, TIMESTREAM, XRAY + // +kubebuilder:validation:Optional + DataSources []*string `json:"dataSources,omitempty" tf:"data_sources,omitempty"` + + // The workspace description. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the version of Grafana to support in the new workspace. Supported values are 8.4, 9.4 and 10.4. If not specified, defaults to 9.4. + // +kubebuilder:validation:Optional + GrafanaVersion *string `json:"grafanaVersion,omitempty" tf:"grafana_version,omitempty"` + + // The Grafana workspace name. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration for network access to your workspace.See Network Access Control below. + // +kubebuilder:validation:Optional + NetworkAccessControl *NetworkAccessControlParameters `json:"networkAccessControl,omitempty" tf:"network_access_control,omitempty"` + + // The notification destinations. If a data source is specified here, Amazon Managed Grafana will create IAM roles and permissions needed to use these destinations. Must be set to SNS. + // +kubebuilder:validation:Optional + NotificationDestinations []*string `json:"notificationDestinations,omitempty" tf:"notification_destinations,omitempty"` + + // The role name that the workspace uses to access resources through Amazon Organizations. + // +kubebuilder:validation:Optional + OrganizationRoleName *string `json:"organizationRoleName,omitempty" tf:"organization_role_name,omitempty"` + + // The Amazon Organizations organizational units that the workspace is authorized to use data sources from. + // +kubebuilder:validation:Optional + OrganizationalUnits []*string `json:"organizationalUnits,omitempty" tf:"organizational_units,omitempty"` + + // The permission type of the workspace. If SERVICE_MANAGED is specified, the IAM roles and IAM policy attachments are generated automatically. If CUSTOMER_MANAGED is specified, the IAM roles and IAM policy attachments will not be created. + // +kubebuilder:validation:Optional + PermissionType *string `json:"permissionType,omitempty" tf:"permission_type,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The IAM role ARN that the workspace assumes. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The AWS CloudFormation stack set name that provisions IAM roles to be used by the workspace. + // +kubebuilder:validation:Optional + StackSetName *string `json:"stackSetName,omitempty" tf:"stack_set_name,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The configuration settings for an Amazon VPC that contains data sources for your Grafana workspace to connect to. See VPC Configuration below. + // +kubebuilder:validation:Optional + VPCConfiguration *VPCConfigurationParameters `json:"vpcConfiguration,omitempty" tf:"vpc_configuration,omitempty"` +} + +// WorkspaceSpec defines the desired state of Workspace +type WorkspaceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WorkspaceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WorkspaceInitParameters `json:"initProvider,omitempty"` +} + +// WorkspaceStatus defines the observed state of Workspace. +type WorkspaceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WorkspaceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Workspace is the Schema for the Workspaces API. Provides an Amazon Managed Grafana workspace resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Workspace struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.accountAccessType) || (has(self.initProvider) && has(self.initProvider.accountAccessType))",message="spec.forProvider.accountAccessType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.authenticationProviders) || (has(self.initProvider) && has(self.initProvider.authenticationProviders))",message="spec.forProvider.authenticationProviders is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.permissionType) || (has(self.initProvider) && has(self.initProvider.permissionType))",message="spec.forProvider.permissionType is a required parameter" + Spec WorkspaceSpec `json:"spec"` + Status WorkspaceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WorkspaceList contains a list of Workspaces +type WorkspaceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Workspace `json:"items"` +} + +// Repository type metadata. +var ( + Workspace_Kind = "Workspace" + Workspace_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Workspace_Kind}.String() + Workspace_KindAPIVersion = Workspace_Kind + "." + CRDGroupVersion.String() + Workspace_GroupVersionKind = CRDGroupVersion.WithKind(Workspace_Kind) +) + +func init() { + SchemeBuilder.Register(&Workspace{}, &WorkspaceList{}) +} diff --git a/apis/guardduty/v1beta1/zz_generated.conversion_hubs.go b/apis/guardduty/v1beta1/zz_generated.conversion_hubs.go index 8c1d1989c3..ea5d237822 100755 --- a/apis/guardduty/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/guardduty/v1beta1/zz_generated.conversion_hubs.go @@ -6,11 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Detector) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Filter) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Member) Hub() {} diff --git a/apis/guardduty/v1beta1/zz_generated.conversion_spokes.go b/apis/guardduty/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..6a369e5d97 --- /dev/null +++ b/apis/guardduty/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Detector to the hub type. +func (tr *Detector) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Detector type. +func (tr *Detector) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Filter to the hub type. +func (tr *Filter) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Filter type. +func (tr *Filter) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/guardduty/v1beta1/zz_generated.resolvers.go b/apis/guardduty/v1beta1/zz_generated.resolvers.go index 4894251131..f4873e6af0 100644 --- a/apis/guardduty/v1beta1/zz_generated.resolvers.go +++ b/apis/guardduty/v1beta1/zz_generated.resolvers.go @@ -9,9 +9,10 @@ package v1beta1 import ( "context" reference "github.com/crossplane/crossplane-runtime/pkg/reference" - xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" resource "github.com/crossplane/upjet/pkg/resource" errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" client "sigs.k8s.io/controller-runtime/pkg/client" // ResolveReferences of this Filter. @@ -57,7 +58,7 @@ func (mg *Member) ResolveReferences(ctx context.Context, c client.Reader) error var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("guardduty.aws.upbound.io", "v1beta1", "Detector", "DetectorList") + m, l, err = apisresolver.GetManagedResource("guardduty.aws.upbound.io", "v1beta2", "Detector", "DetectorList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -76,7 +77,7 @@ func (mg *Member) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.ForProvider.AccountID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.AccountIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("guardduty.aws.upbound.io", "v1beta1", "Detector", "DetectorList") + m, l, err = apisresolver.GetManagedResource("guardduty.aws.upbound.io", "v1beta2", "Detector", "DetectorList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -95,7 +96,7 @@ func (mg *Member) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.ForProvider.DetectorID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DetectorIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("guardduty.aws.upbound.io", "v1beta1", "Detector", "DetectorList") + m, l, err = apisresolver.GetManagedResource("guardduty.aws.upbound.io", "v1beta2", "Detector", "DetectorList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -114,7 +115,7 @@ func (mg *Member) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.InitProvider.AccountID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.AccountIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("guardduty.aws.upbound.io", "v1beta1", "Detector", "DetectorList") + m, l, err = apisresolver.GetManagedResource("guardduty.aws.upbound.io", "v1beta2", "Detector", "DetectorList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/guardduty/v1beta1/zz_member_types.go b/apis/guardduty/v1beta1/zz_member_types.go index 4bfe92da05..f1226c9c41 100755 --- a/apis/guardduty/v1beta1/zz_member_types.go +++ b/apis/guardduty/v1beta1/zz_member_types.go @@ -16,7 +16,7 @@ import ( type MemberInitParameters struct { // AWS account ID for member account. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/guardduty/v1beta1.Detector + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/guardduty/v1beta2.Detector // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("account_id",true) AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` @@ -29,7 +29,7 @@ type MemberInitParameters struct { AccountIDSelector *v1.Selector `json:"accountIdSelector,omitempty" tf:"-"` // The detector ID of the GuardDuty account where you want to create member accounts. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/guardduty/v1beta1.Detector + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/guardduty/v1beta2.Detector // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() DetectorID *string `json:"detectorId,omitempty" tf:"detector_id,omitempty"` @@ -84,7 +84,7 @@ type MemberObservation struct { type MemberParameters struct { // AWS account ID for member account. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/guardduty/v1beta1.Detector + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/guardduty/v1beta2.Detector // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("account_id",true) // +kubebuilder:validation:Optional AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` @@ -98,7 +98,7 @@ type MemberParameters struct { AccountIDSelector *v1.Selector `json:"accountIdSelector,omitempty" tf:"-"` // The detector ID of the GuardDuty account where you want to create member accounts. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/guardduty/v1beta1.Detector + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/guardduty/v1beta2.Detector // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DetectorID *string `json:"detectorId,omitempty" tf:"detector_id,omitempty"` diff --git a/apis/guardduty/v1beta2/zz_detector_terraformed.go b/apis/guardduty/v1beta2/zz_detector_terraformed.go new file mode 100755 index 0000000000..ca23e277ce --- /dev/null +++ b/apis/guardduty/v1beta2/zz_detector_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Detector +func (mg *Detector) GetTerraformResourceType() string { + return "aws_guardduty_detector" +} + +// GetConnectionDetailsMapping for this Detector +func (tr *Detector) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Detector +func (tr *Detector) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Detector +func (tr *Detector) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Detector +func (tr *Detector) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Detector +func (tr *Detector) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Detector +func (tr *Detector) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Detector +func (tr *Detector) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Detector +func (tr *Detector) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Detector using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Detector) LateInitialize(attrs []byte) (bool, error) { + params := &DetectorParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Detector) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/guardduty/v1beta2/zz_detector_types.go b/apis/guardduty/v1beta2/zz_detector_types.go new file mode 100755 index 0000000000..81fe08f94e --- /dev/null +++ b/apis/guardduty/v1beta2/zz_detector_types.go @@ -0,0 +1,324 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuditLogsInitParameters struct { + + // If true, enables Malware Protection as data source for the detector. + // Defaults to true. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` +} + +type AuditLogsObservation struct { + + // If true, enables Malware Protection as data source for the detector. + // Defaults to true. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` +} + +type AuditLogsParameters struct { + + // If true, enables Malware Protection as data source for the detector. + // Defaults to true. + // +kubebuilder:validation:Optional + Enable *bool `json:"enable" tf:"enable,omitempty"` +} + +type DatasourcesInitParameters struct { + + // Configures Kubernetes protection. + // See Kubernetes and Kubernetes Audit Logs below for more details. + Kubernetes *KubernetesInitParameters `json:"kubernetes,omitempty" tf:"kubernetes,omitempty"` + + // Configures Malware Protection. + // See Malware Protection, Scan EC2 instance with findings and EBS volumes below for more details. + MalwareProtection *MalwareProtectionInitParameters `json:"malwareProtection,omitempty" tf:"malware_protection,omitempty"` + + // Configures S3 protection. + // See S3 Logs below for more details. + S3Logs *S3LogsInitParameters `json:"s3Logs,omitempty" tf:"s3_logs,omitempty"` +} + +type DatasourcesObservation struct { + + // Configures Kubernetes protection. + // See Kubernetes and Kubernetes Audit Logs below for more details. + Kubernetes *KubernetesObservation `json:"kubernetes,omitempty" tf:"kubernetes,omitempty"` + + // Configures Malware Protection. + // See Malware Protection, Scan EC2 instance with findings and EBS volumes below for more details. + MalwareProtection *MalwareProtectionObservation `json:"malwareProtection,omitempty" tf:"malware_protection,omitempty"` + + // Configures S3 protection. + // See S3 Logs below for more details. + S3Logs *S3LogsObservation `json:"s3Logs,omitempty" tf:"s3_logs,omitempty"` +} + +type DatasourcesParameters struct { + + // Configures Kubernetes protection. + // See Kubernetes and Kubernetes Audit Logs below for more details. + // +kubebuilder:validation:Optional + Kubernetes *KubernetesParameters `json:"kubernetes,omitempty" tf:"kubernetes,omitempty"` + + // Configures Malware Protection. + // See Malware Protection, Scan EC2 instance with findings and EBS volumes below for more details. + // +kubebuilder:validation:Optional + MalwareProtection *MalwareProtectionParameters `json:"malwareProtection,omitempty" tf:"malware_protection,omitempty"` + + // Configures S3 protection. + // See S3 Logs below for more details. + // +kubebuilder:validation:Optional + S3Logs *S3LogsParameters `json:"s3Logs,omitempty" tf:"s3_logs,omitempty"` +} + +type DetectorInitParameters struct { + + // Describes which data sources will be enabled for the detector. See Data Sources below for more details. Deprecated in favor of aws_guardduty_detector_feature resources. + Datasources *DatasourcesInitParameters `json:"datasources,omitempty" tf:"datasources,omitempty"` + + // Enable monitoring and feedback reporting. Setting to false is equivalent to "suspending" GuardDuty. Defaults to true. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // Specifies the frequency of notifications sent for subsequent finding occurrences. If the detector is a GuardDuty member account, the value is determined by the GuardDuty primary account and cannot be modified, otherwise defaults to SIX_HOURS. Valid values for standalone and primary accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS. See AWS Documentation for more information. + FindingPublishingFrequency *string `json:"findingPublishingFrequency,omitempty" tf:"finding_publishing_frequency,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type DetectorObservation struct { + + // The AWS account ID of the GuardDuty detector + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Amazon Resource Name (ARN) of the GuardDuty detector + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Describes which data sources will be enabled for the detector. See Data Sources below for more details. Deprecated in favor of aws_guardduty_detector_feature resources. + Datasources *DatasourcesObservation `json:"datasources,omitempty" tf:"datasources,omitempty"` + + // Enable monitoring and feedback reporting. Setting to false is equivalent to "suspending" GuardDuty. Defaults to true. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // Specifies the frequency of notifications sent for subsequent finding occurrences. If the detector is a GuardDuty member account, the value is determined by the GuardDuty primary account and cannot be modified, otherwise defaults to SIX_HOURS. Valid values for standalone and primary accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS. See AWS Documentation for more information. + FindingPublishingFrequency *string `json:"findingPublishingFrequency,omitempty" tf:"finding_publishing_frequency,omitempty"` + + // The ID of the GuardDuty detector + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type DetectorParameters struct { + + // Describes which data sources will be enabled for the detector. See Data Sources below for more details. Deprecated in favor of aws_guardduty_detector_feature resources. + // +kubebuilder:validation:Optional + Datasources *DatasourcesParameters `json:"datasources,omitempty" tf:"datasources,omitempty"` + + // Enable monitoring and feedback reporting. Setting to false is equivalent to "suspending" GuardDuty. Defaults to true. + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // Specifies the frequency of notifications sent for subsequent finding occurrences. If the detector is a GuardDuty member account, the value is determined by the GuardDuty primary account and cannot be modified, otherwise defaults to SIX_HOURS. Valid values for standalone and primary accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS. See AWS Documentation for more information. + // +kubebuilder:validation:Optional + FindingPublishingFrequency *string `json:"findingPublishingFrequency,omitempty" tf:"finding_publishing_frequency,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type EBSVolumesInitParameters struct { + + // If true, enables Malware Protection as data source for the detector. + // Defaults to true. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` +} + +type EBSVolumesObservation struct { + + // If true, enables Malware Protection as data source for the detector. + // Defaults to true. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` +} + +type EBSVolumesParameters struct { + + // If true, enables Malware Protection as data source for the detector. + // Defaults to true. + // +kubebuilder:validation:Optional + Enable *bool `json:"enable" tf:"enable,omitempty"` +} + +type KubernetesInitParameters struct { + + // Configures Kubernetes audit logs as a data source for Kubernetes protection. + // See Kubernetes Audit Logs below for more details. + AuditLogs *AuditLogsInitParameters `json:"auditLogs,omitempty" tf:"audit_logs,omitempty"` +} + +type KubernetesObservation struct { + + // Configures Kubernetes audit logs as a data source for Kubernetes protection. + // See Kubernetes Audit Logs below for more details. + AuditLogs *AuditLogsObservation `json:"auditLogs,omitempty" tf:"audit_logs,omitempty"` +} + +type KubernetesParameters struct { + + // Configures Kubernetes audit logs as a data source for Kubernetes protection. + // See Kubernetes Audit Logs below for more details. + // +kubebuilder:validation:Optional + AuditLogs *AuditLogsParameters `json:"auditLogs" tf:"audit_logs,omitempty"` +} + +type MalwareProtectionInitParameters struct { + + // Configure whether Malware Protection is enabled as data source for EC2 instances with findings for the detector. + // See Scan EC2 instance with findings below for more details. + ScanEC2InstanceWithFindings *ScanEC2InstanceWithFindingsInitParameters `json:"scanEc2InstanceWithFindings,omitempty" tf:"scan_ec2_instance_with_findings,omitempty"` +} + +type MalwareProtectionObservation struct { + + // Configure whether Malware Protection is enabled as data source for EC2 instances with findings for the detector. + // See Scan EC2 instance with findings below for more details. + ScanEC2InstanceWithFindings *ScanEC2InstanceWithFindingsObservation `json:"scanEc2InstanceWithFindings,omitempty" tf:"scan_ec2_instance_with_findings,omitempty"` +} + +type MalwareProtectionParameters struct { + + // Configure whether Malware Protection is enabled as data source for EC2 instances with findings for the detector. + // See Scan EC2 instance with findings below for more details. + // +kubebuilder:validation:Optional + ScanEC2InstanceWithFindings *ScanEC2InstanceWithFindingsParameters `json:"scanEc2InstanceWithFindings" tf:"scan_ec2_instance_with_findings,omitempty"` +} + +type S3LogsInitParameters struct { + + // If true, enables S3 protection. + // Defaults to true. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` +} + +type S3LogsObservation struct { + + // If true, enables S3 protection. + // Defaults to true. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` +} + +type S3LogsParameters struct { + + // If true, enables S3 protection. + // Defaults to true. + // +kubebuilder:validation:Optional + Enable *bool `json:"enable" tf:"enable,omitempty"` +} + +type ScanEC2InstanceWithFindingsInitParameters struct { + + // Configure whether scanning EBS volumes is enabled as data source for the detector for instances with findings. + // See EBS volumes below for more details. + EBSVolumes *EBSVolumesInitParameters `json:"ebsVolumes,omitempty" tf:"ebs_volumes,omitempty"` +} + +type ScanEC2InstanceWithFindingsObservation struct { + + // Configure whether scanning EBS volumes is enabled as data source for the detector for instances with findings. + // See EBS volumes below for more details. + EBSVolumes *EBSVolumesObservation `json:"ebsVolumes,omitempty" tf:"ebs_volumes,omitempty"` +} + +type ScanEC2InstanceWithFindingsParameters struct { + + // Configure whether scanning EBS volumes is enabled as data source for the detector for instances with findings. + // See EBS volumes below for more details. + // +kubebuilder:validation:Optional + EBSVolumes *EBSVolumesParameters `json:"ebsVolumes" tf:"ebs_volumes,omitempty"` +} + +// DetectorSpec defines the desired state of Detector +type DetectorSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DetectorParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DetectorInitParameters `json:"initProvider,omitempty"` +} + +// DetectorStatus defines the observed state of Detector. +type DetectorStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DetectorObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Detector is the Schema for the Detectors API. Provides a resource to manage an Amazon GuardDuty detector +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Detector struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DetectorSpec `json:"spec"` + Status DetectorStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DetectorList contains a list of Detectors +type DetectorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Detector `json:"items"` +} + +// Repository type metadata. +var ( + Detector_Kind = "Detector" + Detector_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Detector_Kind}.String() + Detector_KindAPIVersion = Detector_Kind + "." + CRDGroupVersion.String() + Detector_GroupVersionKind = CRDGroupVersion.WithKind(Detector_Kind) +) + +func init() { + SchemeBuilder.Register(&Detector{}, &DetectorList{}) +} diff --git a/apis/guardduty/v1beta2/zz_filter_terraformed.go b/apis/guardduty/v1beta2/zz_filter_terraformed.go new file mode 100755 index 0000000000..ecd6e7bd9d --- /dev/null +++ b/apis/guardduty/v1beta2/zz_filter_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Filter +func (mg *Filter) GetTerraformResourceType() string { + return "aws_guardduty_filter" +} + +// GetConnectionDetailsMapping for this Filter +func (tr *Filter) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Filter +func (tr *Filter) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Filter +func (tr *Filter) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Filter +func (tr *Filter) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Filter +func (tr *Filter) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Filter +func (tr *Filter) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Filter +func (tr *Filter) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Filter +func (tr *Filter) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Filter using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Filter) LateInitialize(attrs []byte) (bool, error) { + params := &FilterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Filter) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/guardduty/v1beta2/zz_filter_types.go b/apis/guardduty/v1beta2/zz_filter_types.go new file mode 100755 index 0000000000..f0903a32b7 --- /dev/null +++ b/apis/guardduty/v1beta2/zz_filter_types.go @@ -0,0 +1,264 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CriterionInitParameters struct { + + // List of string values to be evaluated. + Equals []*string `json:"equals,omitempty" tf:"equals,omitempty"` + + // The name of the field to be evaluated. The full list of field names can be found in AWS documentation. + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + // A value to be evaluated. Accepts either an integer or a date in RFC 3339 format. + GreaterThan *string `json:"greaterThan,omitempty" tf:"greater_than,omitempty"` + + // A value to be evaluated. Accepts either an integer or a date in RFC 3339 format. + GreaterThanOrEqual *string `json:"greaterThanOrEqual,omitempty" tf:"greater_than_or_equal,omitempty"` + + // A value to be evaluated. Accepts either an integer or a date in RFC 3339 format. + LessThan *string `json:"lessThan,omitempty" tf:"less_than,omitempty"` + + // A value to be evaluated. Accepts either an integer or a date in RFC 3339 format. + LessThanOrEqual *string `json:"lessThanOrEqual,omitempty" tf:"less_than_or_equal,omitempty"` + + // List of string values to be evaluated. + NotEquals []*string `json:"notEquals,omitempty" tf:"not_equals,omitempty"` +} + +type CriterionObservation struct { + + // List of string values to be evaluated. + Equals []*string `json:"equals,omitempty" tf:"equals,omitempty"` + + // The name of the field to be evaluated. The full list of field names can be found in AWS documentation. + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + // A value to be evaluated. Accepts either an integer or a date in RFC 3339 format. + GreaterThan *string `json:"greaterThan,omitempty" tf:"greater_than,omitempty"` + + // A value to be evaluated. Accepts either an integer or a date in RFC 3339 format. + GreaterThanOrEqual *string `json:"greaterThanOrEqual,omitempty" tf:"greater_than_or_equal,omitempty"` + + // A value to be evaluated. Accepts either an integer or a date in RFC 3339 format. + LessThan *string `json:"lessThan,omitempty" tf:"less_than,omitempty"` + + // A value to be evaluated. Accepts either an integer or a date in RFC 3339 format. + LessThanOrEqual *string `json:"lessThanOrEqual,omitempty" tf:"less_than_or_equal,omitempty"` + + // List of string values to be evaluated. + NotEquals []*string `json:"notEquals,omitempty" tf:"not_equals,omitempty"` +} + +type CriterionParameters struct { + + // List of string values to be evaluated. + // +kubebuilder:validation:Optional + Equals []*string `json:"equals,omitempty" tf:"equals,omitempty"` + + // The name of the field to be evaluated. The full list of field names can be found in AWS documentation. + // +kubebuilder:validation:Optional + Field *string `json:"field" tf:"field,omitempty"` + + // A value to be evaluated. Accepts either an integer or a date in RFC 3339 format. + // +kubebuilder:validation:Optional + GreaterThan *string `json:"greaterThan,omitempty" tf:"greater_than,omitempty"` + + // A value to be evaluated. Accepts either an integer or a date in RFC 3339 format. + // +kubebuilder:validation:Optional + GreaterThanOrEqual *string `json:"greaterThanOrEqual,omitempty" tf:"greater_than_or_equal,omitempty"` + + // A value to be evaluated. Accepts either an integer or a date in RFC 3339 format. + // +kubebuilder:validation:Optional + LessThan *string `json:"lessThan,omitempty" tf:"less_than,omitempty"` + + // A value to be evaluated. Accepts either an integer or a date in RFC 3339 format. + // +kubebuilder:validation:Optional + LessThanOrEqual *string `json:"lessThanOrEqual,omitempty" tf:"less_than_or_equal,omitempty"` + + // List of string values to be evaluated. + // +kubebuilder:validation:Optional + NotEquals []*string `json:"notEquals,omitempty" tf:"not_equals,omitempty"` +} + +type FilterInitParameters struct { + + // Specifies the action that is to be applied to the findings that match the filter. Can be one of ARCHIVE or NOOP. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // Description of the filter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Represents the criteria to be used in the filter for querying findings. Contains one or more criterion blocks, documented below. + FindingCriteria *FindingCriteriaInitParameters `json:"findingCriteria,omitempty" tf:"finding_criteria,omitempty"` + + // Specifies the position of the filter in the list of current filters. Also specifies the order in which this filter is applied to the findings. + Rank *float64 `json:"rank,omitempty" tf:"rank,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type FilterObservation struct { + + // Specifies the action that is to be applied to the findings that match the filter. Can be one of ARCHIVE or NOOP. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The ARN of the GuardDuty filter. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Description of the filter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of a GuardDuty detector, attached to your account. + DetectorID *string `json:"detectorId,omitempty" tf:"detector_id,omitempty"` + + // Represents the criteria to be used in the filter for querying findings. Contains one or more criterion blocks, documented below. + FindingCriteria *FindingCriteriaObservation `json:"findingCriteria,omitempty" tf:"finding_criteria,omitempty"` + + // A compound field, consisting of the ID of the GuardDuty detector and the name of the filter. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the position of the filter in the list of current filters. Also specifies the order in which this filter is applied to the findings. + Rank *float64 `json:"rank,omitempty" tf:"rank,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type FilterParameters struct { + + // Specifies the action that is to be applied to the findings that match the filter. Can be one of ARCHIVE or NOOP. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // Description of the filter. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of a GuardDuty detector, attached to your account. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/guardduty/v1beta2.Detector + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DetectorID *string `json:"detectorId,omitempty" tf:"detector_id,omitempty"` + + // Reference to a Detector in guardduty to populate detectorId. + // +kubebuilder:validation:Optional + DetectorIDRef *v1.Reference `json:"detectorIdRef,omitempty" tf:"-"` + + // Selector for a Detector in guardduty to populate detectorId. + // +kubebuilder:validation:Optional + DetectorIDSelector *v1.Selector `json:"detectorIdSelector,omitempty" tf:"-"` + + // Represents the criteria to be used in the filter for querying findings. Contains one or more criterion blocks, documented below. + // +kubebuilder:validation:Optional + FindingCriteria *FindingCriteriaParameters `json:"findingCriteria,omitempty" tf:"finding_criteria,omitempty"` + + // Specifies the position of the filter in the list of current filters. Also specifies the order in which this filter is applied to the findings. + // +kubebuilder:validation:Optional + Rank *float64 `json:"rank,omitempty" tf:"rank,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type FindingCriteriaInitParameters struct { + Criterion []CriterionInitParameters `json:"criterion,omitempty" tf:"criterion,omitempty"` +} + +type FindingCriteriaObservation struct { + Criterion []CriterionObservation `json:"criterion,omitempty" tf:"criterion,omitempty"` +} + +type FindingCriteriaParameters struct { + + // +kubebuilder:validation:Optional + Criterion []CriterionParameters `json:"criterion" tf:"criterion,omitempty"` +} + +// FilterSpec defines the desired state of Filter +type FilterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FilterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FilterInitParameters `json:"initProvider,omitempty"` +} + +// FilterStatus defines the observed state of Filter. +type FilterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FilterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Filter is the Schema for the Filters API. Provides a resource to manage a GuardDuty filter +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Filter struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.action) || (has(self.initProvider) && has(self.initProvider.action))",message="spec.forProvider.action is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.findingCriteria) || (has(self.initProvider) && has(self.initProvider.findingCriteria))",message="spec.forProvider.findingCriteria is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.rank) || (has(self.initProvider) && has(self.initProvider.rank))",message="spec.forProvider.rank is a required parameter" + Spec FilterSpec `json:"spec"` + Status FilterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FilterList contains a list of Filters +type FilterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Filter `json:"items"` +} + +// Repository type metadata. +var ( + Filter_Kind = "Filter" + Filter_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Filter_Kind}.String() + Filter_KindAPIVersion = Filter_Kind + "." + CRDGroupVersion.String() + Filter_GroupVersionKind = CRDGroupVersion.WithKind(Filter_Kind) +) + +func init() { + SchemeBuilder.Register(&Filter{}, &FilterList{}) +} diff --git a/apis/guardduty/v1beta2/zz_generated.conversion_hubs.go b/apis/guardduty/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..f7ef8f4dc3 --- /dev/null +++ b/apis/guardduty/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Detector) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Filter) Hub() {} diff --git a/apis/guardduty/v1beta2/zz_generated.deepcopy.go b/apis/guardduty/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..713b4592b9 --- /dev/null +++ b/apis/guardduty/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1282 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditLogsInitParameters) DeepCopyInto(out *AuditLogsInitParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditLogsInitParameters. +func (in *AuditLogsInitParameters) DeepCopy() *AuditLogsInitParameters { + if in == nil { + return nil + } + out := new(AuditLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditLogsObservation) DeepCopyInto(out *AuditLogsObservation) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditLogsObservation. +func (in *AuditLogsObservation) DeepCopy() *AuditLogsObservation { + if in == nil { + return nil + } + out := new(AuditLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditLogsParameters) DeepCopyInto(out *AuditLogsParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditLogsParameters. +func (in *AuditLogsParameters) DeepCopy() *AuditLogsParameters { + if in == nil { + return nil + } + out := new(AuditLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriterionInitParameters) DeepCopyInto(out *CriterionInitParameters) { + *out = *in + if in.Equals != nil { + in, out := &in.Equals, &out.Equals + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.GreaterThan != nil { + in, out := &in.GreaterThan, &out.GreaterThan + *out = new(string) + **out = **in + } + if in.GreaterThanOrEqual != nil { + in, out := &in.GreaterThanOrEqual, &out.GreaterThanOrEqual + *out = new(string) + **out = **in + } + if in.LessThan != nil { + in, out := &in.LessThan, &out.LessThan + *out = new(string) + **out = **in + } + if in.LessThanOrEqual != nil { + in, out := &in.LessThanOrEqual, &out.LessThanOrEqual + *out = new(string) + **out = **in + } + if in.NotEquals != nil { + in, out := &in.NotEquals, &out.NotEquals + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriterionInitParameters. +func (in *CriterionInitParameters) DeepCopy() *CriterionInitParameters { + if in == nil { + return nil + } + out := new(CriterionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriterionObservation) DeepCopyInto(out *CriterionObservation) { + *out = *in + if in.Equals != nil { + in, out := &in.Equals, &out.Equals + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.GreaterThan != nil { + in, out := &in.GreaterThan, &out.GreaterThan + *out = new(string) + **out = **in + } + if in.GreaterThanOrEqual != nil { + in, out := &in.GreaterThanOrEqual, &out.GreaterThanOrEqual + *out = new(string) + **out = **in + } + if in.LessThan != nil { + in, out := &in.LessThan, &out.LessThan + *out = new(string) + **out = **in + } + if in.LessThanOrEqual != nil { + in, out := &in.LessThanOrEqual, &out.LessThanOrEqual + *out = new(string) + **out = **in + } + if in.NotEquals != nil { + in, out := &in.NotEquals, &out.NotEquals + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriterionObservation. +func (in *CriterionObservation) DeepCopy() *CriterionObservation { + if in == nil { + return nil + } + out := new(CriterionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriterionParameters) DeepCopyInto(out *CriterionParameters) { + *out = *in + if in.Equals != nil { + in, out := &in.Equals, &out.Equals + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.GreaterThan != nil { + in, out := &in.GreaterThan, &out.GreaterThan + *out = new(string) + **out = **in + } + if in.GreaterThanOrEqual != nil { + in, out := &in.GreaterThanOrEqual, &out.GreaterThanOrEqual + *out = new(string) + **out = **in + } + if in.LessThan != nil { + in, out := &in.LessThan, &out.LessThan + *out = new(string) + **out = **in + } + if in.LessThanOrEqual != nil { + in, out := &in.LessThanOrEqual, &out.LessThanOrEqual + *out = new(string) + **out = **in + } + if in.NotEquals != nil { + in, out := &in.NotEquals, &out.NotEquals + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriterionParameters. +func (in *CriterionParameters) DeepCopy() *CriterionParameters { + if in == nil { + return nil + } + out := new(CriterionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatasourcesInitParameters) DeepCopyInto(out *DatasourcesInitParameters) { + *out = *in + if in.Kubernetes != nil { + in, out := &in.Kubernetes, &out.Kubernetes + *out = new(KubernetesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MalwareProtection != nil { + in, out := &in.MalwareProtection, &out.MalwareProtection + *out = new(MalwareProtectionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3Logs != nil { + in, out := &in.S3Logs, &out.S3Logs + *out = new(S3LogsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatasourcesInitParameters. +func (in *DatasourcesInitParameters) DeepCopy() *DatasourcesInitParameters { + if in == nil { + return nil + } + out := new(DatasourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatasourcesObservation) DeepCopyInto(out *DatasourcesObservation) { + *out = *in + if in.Kubernetes != nil { + in, out := &in.Kubernetes, &out.Kubernetes + *out = new(KubernetesObservation) + (*in).DeepCopyInto(*out) + } + if in.MalwareProtection != nil { + in, out := &in.MalwareProtection, &out.MalwareProtection + *out = new(MalwareProtectionObservation) + (*in).DeepCopyInto(*out) + } + if in.S3Logs != nil { + in, out := &in.S3Logs, &out.S3Logs + *out = new(S3LogsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatasourcesObservation. +func (in *DatasourcesObservation) DeepCopy() *DatasourcesObservation { + if in == nil { + return nil + } + out := new(DatasourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatasourcesParameters) DeepCopyInto(out *DatasourcesParameters) { + *out = *in + if in.Kubernetes != nil { + in, out := &in.Kubernetes, &out.Kubernetes + *out = new(KubernetesParameters) + (*in).DeepCopyInto(*out) + } + if in.MalwareProtection != nil { + in, out := &in.MalwareProtection, &out.MalwareProtection + *out = new(MalwareProtectionParameters) + (*in).DeepCopyInto(*out) + } + if in.S3Logs != nil { + in, out := &in.S3Logs, &out.S3Logs + *out = new(S3LogsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatasourcesParameters. +func (in *DatasourcesParameters) DeepCopy() *DatasourcesParameters { + if in == nil { + return nil + } + out := new(DatasourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Detector) DeepCopyInto(out *Detector) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Detector. +func (in *Detector) DeepCopy() *Detector { + if in == nil { + return nil + } + out := new(Detector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Detector) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetectorInitParameters) DeepCopyInto(out *DetectorInitParameters) { + *out = *in + if in.Datasources != nil { + in, out := &in.Datasources, &out.Datasources + *out = new(DatasourcesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.FindingPublishingFrequency != nil { + in, out := &in.FindingPublishingFrequency, &out.FindingPublishingFrequency + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectorInitParameters. +func (in *DetectorInitParameters) DeepCopy() *DetectorInitParameters { + if in == nil { + return nil + } + out := new(DetectorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetectorList) DeepCopyInto(out *DetectorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Detector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectorList. +func (in *DetectorList) DeepCopy() *DetectorList { + if in == nil { + return nil + } + out := new(DetectorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DetectorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetectorObservation) DeepCopyInto(out *DetectorObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Datasources != nil { + in, out := &in.Datasources, &out.Datasources + *out = new(DatasourcesObservation) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.FindingPublishingFrequency != nil { + in, out := &in.FindingPublishingFrequency, &out.FindingPublishingFrequency + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectorObservation. +func (in *DetectorObservation) DeepCopy() *DetectorObservation { + if in == nil { + return nil + } + out := new(DetectorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetectorParameters) DeepCopyInto(out *DetectorParameters) { + *out = *in + if in.Datasources != nil { + in, out := &in.Datasources, &out.Datasources + *out = new(DatasourcesParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.FindingPublishingFrequency != nil { + in, out := &in.FindingPublishingFrequency, &out.FindingPublishingFrequency + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectorParameters. +func (in *DetectorParameters) DeepCopy() *DetectorParameters { + if in == nil { + return nil + } + out := new(DetectorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetectorSpec) DeepCopyInto(out *DetectorSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectorSpec. +func (in *DetectorSpec) DeepCopy() *DetectorSpec { + if in == nil { + return nil + } + out := new(DetectorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetectorStatus) DeepCopyInto(out *DetectorStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectorStatus. +func (in *DetectorStatus) DeepCopy() *DetectorStatus { + if in == nil { + return nil + } + out := new(DetectorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSVolumesInitParameters) DeepCopyInto(out *EBSVolumesInitParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSVolumesInitParameters. +func (in *EBSVolumesInitParameters) DeepCopy() *EBSVolumesInitParameters { + if in == nil { + return nil + } + out := new(EBSVolumesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSVolumesObservation) DeepCopyInto(out *EBSVolumesObservation) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSVolumesObservation. +func (in *EBSVolumesObservation) DeepCopy() *EBSVolumesObservation { + if in == nil { + return nil + } + out := new(EBSVolumesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSVolumesParameters) DeepCopyInto(out *EBSVolumesParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSVolumesParameters. +func (in *EBSVolumesParameters) DeepCopy() *EBSVolumesParameters { + if in == nil { + return nil + } + out := new(EBSVolumesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Filter) DeepCopyInto(out *Filter) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter. +func (in *Filter) DeepCopy() *Filter { + if in == nil { + return nil + } + out := new(Filter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Filter) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterInitParameters) DeepCopyInto(out *FilterInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FindingCriteria != nil { + in, out := &in.FindingCriteria, &out.FindingCriteria + *out = new(FindingCriteriaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Rank != nil { + in, out := &in.Rank, &out.Rank + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterInitParameters. +func (in *FilterInitParameters) DeepCopy() *FilterInitParameters { + if in == nil { + return nil + } + out := new(FilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterList) DeepCopyInto(out *FilterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Filter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterList. +func (in *FilterList) DeepCopy() *FilterList { + if in == nil { + return nil + } + out := new(FilterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FilterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterObservation) DeepCopyInto(out *FilterObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DetectorID != nil { + in, out := &in.DetectorID, &out.DetectorID + *out = new(string) + **out = **in + } + if in.FindingCriteria != nil { + in, out := &in.FindingCriteria, &out.FindingCriteria + *out = new(FindingCriteriaObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Rank != nil { + in, out := &in.Rank, &out.Rank + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterObservation. +func (in *FilterObservation) DeepCopy() *FilterObservation { + if in == nil { + return nil + } + out := new(FilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterParameters) DeepCopyInto(out *FilterParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DetectorID != nil { + in, out := &in.DetectorID, &out.DetectorID + *out = new(string) + **out = **in + } + if in.DetectorIDRef != nil { + in, out := &in.DetectorIDRef, &out.DetectorIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DetectorIDSelector != nil { + in, out := &in.DetectorIDSelector, &out.DetectorIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FindingCriteria != nil { + in, out := &in.FindingCriteria, &out.FindingCriteria + *out = new(FindingCriteriaParameters) + (*in).DeepCopyInto(*out) + } + if in.Rank != nil { + in, out := &in.Rank, &out.Rank + *out = new(float64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterParameters. +func (in *FilterParameters) DeepCopy() *FilterParameters { + if in == nil { + return nil + } + out := new(FilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterSpec) DeepCopyInto(out *FilterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterSpec. +func (in *FilterSpec) DeepCopy() *FilterSpec { + if in == nil { + return nil + } + out := new(FilterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterStatus) DeepCopyInto(out *FilterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterStatus. +func (in *FilterStatus) DeepCopy() *FilterStatus { + if in == nil { + return nil + } + out := new(FilterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingCriteriaInitParameters) DeepCopyInto(out *FindingCriteriaInitParameters) { + *out = *in + if in.Criterion != nil { + in, out := &in.Criterion, &out.Criterion + *out = make([]CriterionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingCriteriaInitParameters. +func (in *FindingCriteriaInitParameters) DeepCopy() *FindingCriteriaInitParameters { + if in == nil { + return nil + } + out := new(FindingCriteriaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingCriteriaObservation) DeepCopyInto(out *FindingCriteriaObservation) { + *out = *in + if in.Criterion != nil { + in, out := &in.Criterion, &out.Criterion + *out = make([]CriterionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingCriteriaObservation. +func (in *FindingCriteriaObservation) DeepCopy() *FindingCriteriaObservation { + if in == nil { + return nil + } + out := new(FindingCriteriaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingCriteriaParameters) DeepCopyInto(out *FindingCriteriaParameters) { + *out = *in + if in.Criterion != nil { + in, out := &in.Criterion, &out.Criterion + *out = make([]CriterionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingCriteriaParameters. +func (in *FindingCriteriaParameters) DeepCopy() *FindingCriteriaParameters { + if in == nil { + return nil + } + out := new(FindingCriteriaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesInitParameters) DeepCopyInto(out *KubernetesInitParameters) { + *out = *in + if in.AuditLogs != nil { + in, out := &in.AuditLogs, &out.AuditLogs + *out = new(AuditLogsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesInitParameters. +func (in *KubernetesInitParameters) DeepCopy() *KubernetesInitParameters { + if in == nil { + return nil + } + out := new(KubernetesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesObservation) DeepCopyInto(out *KubernetesObservation) { + *out = *in + if in.AuditLogs != nil { + in, out := &in.AuditLogs, &out.AuditLogs + *out = new(AuditLogsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesObservation. +func (in *KubernetesObservation) DeepCopy() *KubernetesObservation { + if in == nil { + return nil + } + out := new(KubernetesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesParameters) DeepCopyInto(out *KubernetesParameters) { + *out = *in + if in.AuditLogs != nil { + in, out := &in.AuditLogs, &out.AuditLogs + *out = new(AuditLogsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesParameters. +func (in *KubernetesParameters) DeepCopy() *KubernetesParameters { + if in == nil { + return nil + } + out := new(KubernetesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MalwareProtectionInitParameters) DeepCopyInto(out *MalwareProtectionInitParameters) { + *out = *in + if in.ScanEC2InstanceWithFindings != nil { + in, out := &in.ScanEC2InstanceWithFindings, &out.ScanEC2InstanceWithFindings + *out = new(ScanEC2InstanceWithFindingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MalwareProtectionInitParameters. +func (in *MalwareProtectionInitParameters) DeepCopy() *MalwareProtectionInitParameters { + if in == nil { + return nil + } + out := new(MalwareProtectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MalwareProtectionObservation) DeepCopyInto(out *MalwareProtectionObservation) { + *out = *in + if in.ScanEC2InstanceWithFindings != nil { + in, out := &in.ScanEC2InstanceWithFindings, &out.ScanEC2InstanceWithFindings + *out = new(ScanEC2InstanceWithFindingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MalwareProtectionObservation. +func (in *MalwareProtectionObservation) DeepCopy() *MalwareProtectionObservation { + if in == nil { + return nil + } + out := new(MalwareProtectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MalwareProtectionParameters) DeepCopyInto(out *MalwareProtectionParameters) { + *out = *in + if in.ScanEC2InstanceWithFindings != nil { + in, out := &in.ScanEC2InstanceWithFindings, &out.ScanEC2InstanceWithFindings + *out = new(ScanEC2InstanceWithFindingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MalwareProtectionParameters. +func (in *MalwareProtectionParameters) DeepCopy() *MalwareProtectionParameters { + if in == nil { + return nil + } + out := new(MalwareProtectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3LogsInitParameters) DeepCopyInto(out *S3LogsInitParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3LogsInitParameters. +func (in *S3LogsInitParameters) DeepCopy() *S3LogsInitParameters { + if in == nil { + return nil + } + out := new(S3LogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3LogsObservation) DeepCopyInto(out *S3LogsObservation) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3LogsObservation. +func (in *S3LogsObservation) DeepCopy() *S3LogsObservation { + if in == nil { + return nil + } + out := new(S3LogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3LogsParameters) DeepCopyInto(out *S3LogsParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3LogsParameters. +func (in *S3LogsParameters) DeepCopy() *S3LogsParameters { + if in == nil { + return nil + } + out := new(S3LogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScanEC2InstanceWithFindingsInitParameters) DeepCopyInto(out *ScanEC2InstanceWithFindingsInitParameters) { + *out = *in + if in.EBSVolumes != nil { + in, out := &in.EBSVolumes, &out.EBSVolumes + *out = new(EBSVolumesInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScanEC2InstanceWithFindingsInitParameters. +func (in *ScanEC2InstanceWithFindingsInitParameters) DeepCopy() *ScanEC2InstanceWithFindingsInitParameters { + if in == nil { + return nil + } + out := new(ScanEC2InstanceWithFindingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScanEC2InstanceWithFindingsObservation) DeepCopyInto(out *ScanEC2InstanceWithFindingsObservation) { + *out = *in + if in.EBSVolumes != nil { + in, out := &in.EBSVolumes, &out.EBSVolumes + *out = new(EBSVolumesObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScanEC2InstanceWithFindingsObservation. +func (in *ScanEC2InstanceWithFindingsObservation) DeepCopy() *ScanEC2InstanceWithFindingsObservation { + if in == nil { + return nil + } + out := new(ScanEC2InstanceWithFindingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScanEC2InstanceWithFindingsParameters) DeepCopyInto(out *ScanEC2InstanceWithFindingsParameters) { + *out = *in + if in.EBSVolumes != nil { + in, out := &in.EBSVolumes, &out.EBSVolumes + *out = new(EBSVolumesParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScanEC2InstanceWithFindingsParameters. +func (in *ScanEC2InstanceWithFindingsParameters) DeepCopy() *ScanEC2InstanceWithFindingsParameters { + if in == nil { + return nil + } + out := new(ScanEC2InstanceWithFindingsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/guardduty/v1beta2/zz_generated.managed.go b/apis/guardduty/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..e4c7872e21 --- /dev/null +++ b/apis/guardduty/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Detector. +func (mg *Detector) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Detector. +func (mg *Detector) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Detector. +func (mg *Detector) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Detector. +func (mg *Detector) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Detector. +func (mg *Detector) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Detector. +func (mg *Detector) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Detector. +func (mg *Detector) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Detector. +func (mg *Detector) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Detector. +func (mg *Detector) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Detector. +func (mg *Detector) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Detector. +func (mg *Detector) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Detector. +func (mg *Detector) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Filter. +func (mg *Filter) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Filter. +func (mg *Filter) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Filter. +func (mg *Filter) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Filter. +func (mg *Filter) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Filter. +func (mg *Filter) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Filter. +func (mg *Filter) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Filter. +func (mg *Filter) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Filter. +func (mg *Filter) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Filter. +func (mg *Filter) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Filter. +func (mg *Filter) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Filter. +func (mg *Filter) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Filter. +func (mg *Filter) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/guardduty/v1beta2/zz_generated.managedlist.go b/apis/guardduty/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..9fb43d068d --- /dev/null +++ b/apis/guardduty/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this DetectorList. +func (l *DetectorList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FilterList. +func (l *FilterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/guardduty/v1beta2/zz_generated.resolvers.go b/apis/guardduty/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..d2b75bf7bf --- /dev/null +++ b/apis/guardduty/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,49 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Filter. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Filter) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("guardduty.aws.upbound.io", "v1beta2", "Detector", "DetectorList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DetectorID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DetectorIDRef, + Selector: mg.Spec.ForProvider.DetectorIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DetectorID") + } + mg.Spec.ForProvider.DetectorID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DetectorIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/guardduty/v1beta2/zz_groupversion_info.go b/apis/guardduty/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..8efdfdccca --- /dev/null +++ b/apis/guardduty/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=guardduty.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "guardduty.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/identitystore/v1beta1/zz_generated.conversion_hubs.go b/apis/identitystore/v1beta1/zz_generated.conversion_hubs.go index 265d391dc5..f5cd950357 100755 --- a/apis/identitystore/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/identitystore/v1beta1/zz_generated.conversion_hubs.go @@ -11,6 +11,3 @@ func (tr *Group) Hub() {} // Hub marks this type as a conversion hub. func (tr *GroupMembership) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *User) Hub() {} diff --git a/apis/identitystore/v1beta1/zz_generated.conversion_spokes.go b/apis/identitystore/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..12f2214eea --- /dev/null +++ b/apis/identitystore/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this User to the hub type. +func (tr *User) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the User type. +func (tr *User) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/identitystore/v1beta1/zz_generated.resolvers.go b/apis/identitystore/v1beta1/zz_generated.resolvers.go index 13ba9b0209..d3d1e33c77 100644 --- a/apis/identitystore/v1beta1/zz_generated.resolvers.go +++ b/apis/identitystore/v1beta1/zz_generated.resolvers.go @@ -9,9 +9,10 @@ package v1beta1 import ( "context" reference "github.com/crossplane/crossplane-runtime/pkg/reference" - xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" resource "github.com/crossplane/upjet/pkg/resource" errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" apisresolver "github.com/upbound/provider-aws/internal/apis" client "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -44,7 +45,7 @@ func (mg *GroupMembership) ResolveReferences( // ResolveReferences of this Group mg.Spec.ForProvider.GroupID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.GroupIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("identitystore.aws.upbound.io", "v1beta1", "User", "UserList") + m, l, err = apisresolver.GetManagedResource("identitystore.aws.upbound.io", "v1beta2", "User", "UserList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -82,7 +83,7 @@ func (mg *GroupMembership) ResolveReferences( // ResolveReferences of this Group mg.Spec.InitProvider.GroupID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.GroupIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("identitystore.aws.upbound.io", "v1beta1", "User", "UserList") + m, l, err = apisresolver.GetManagedResource("identitystore.aws.upbound.io", "v1beta2", "User", "UserList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/identitystore/v1beta1/zz_groupmembership_types.go b/apis/identitystore/v1beta1/zz_groupmembership_types.go index a1eaef4d2f..26803b8c55 100755 --- a/apis/identitystore/v1beta1/zz_groupmembership_types.go +++ b/apis/identitystore/v1beta1/zz_groupmembership_types.go @@ -29,7 +29,7 @@ type GroupMembershipInitParameters struct { GroupIDSelector *v1.Selector `json:"groupIdSelector,omitempty" tf:"-"` // The identifier for a user in the Identity Store. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/identitystore/v1beta1.User + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/identitystore/v1beta2.User // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("user_id",true) MemberID *string `json:"memberId,omitempty" tf:"member_id,omitempty"` @@ -80,7 +80,7 @@ type GroupMembershipParameters struct { IdentityStoreID *string `json:"identityStoreId" tf:"identity_store_id,omitempty"` // The identifier for a user in the Identity Store. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/identitystore/v1beta1.User + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/identitystore/v1beta2.User // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("user_id",true) // +kubebuilder:validation:Optional MemberID *string `json:"memberId,omitempty" tf:"member_id,omitempty"` diff --git a/apis/identitystore/v1beta2/zz_generated.conversion_hubs.go b/apis/identitystore/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..89554cb8a5 --- /dev/null +++ b/apis/identitystore/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *User) Hub() {} diff --git a/apis/identitystore/v1beta2/zz_generated.deepcopy.go b/apis/identitystore/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..f9a6bb3689 --- /dev/null +++ b/apis/identitystore/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,909 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressesInitParameters) DeepCopyInto(out *AddressesInitParameters) { + *out = *in + if in.Country != nil { + in, out := &in.Country, &out.Country + *out = new(string) + **out = **in + } + if in.Formatted != nil { + in, out := &in.Formatted, &out.Formatted + *out = new(string) + **out = **in + } + if in.Locality != nil { + in, out := &in.Locality, &out.Locality + *out = new(string) + **out = **in + } + if in.PostalCode != nil { + in, out := &in.PostalCode, &out.PostalCode + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.StreetAddress != nil { + in, out := &in.StreetAddress, &out.StreetAddress + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressesInitParameters. +func (in *AddressesInitParameters) DeepCopy() *AddressesInitParameters { + if in == nil { + return nil + } + out := new(AddressesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressesObservation) DeepCopyInto(out *AddressesObservation) { + *out = *in + if in.Country != nil { + in, out := &in.Country, &out.Country + *out = new(string) + **out = **in + } + if in.Formatted != nil { + in, out := &in.Formatted, &out.Formatted + *out = new(string) + **out = **in + } + if in.Locality != nil { + in, out := &in.Locality, &out.Locality + *out = new(string) + **out = **in + } + if in.PostalCode != nil { + in, out := &in.PostalCode, &out.PostalCode + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.StreetAddress != nil { + in, out := &in.StreetAddress, &out.StreetAddress + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressesObservation. +func (in *AddressesObservation) DeepCopy() *AddressesObservation { + if in == nil { + return nil + } + out := new(AddressesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressesParameters) DeepCopyInto(out *AddressesParameters) { + *out = *in + if in.Country != nil { + in, out := &in.Country, &out.Country + *out = new(string) + **out = **in + } + if in.Formatted != nil { + in, out := &in.Formatted, &out.Formatted + *out = new(string) + **out = **in + } + if in.Locality != nil { + in, out := &in.Locality, &out.Locality + *out = new(string) + **out = **in + } + if in.PostalCode != nil { + in, out := &in.PostalCode, &out.PostalCode + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.StreetAddress != nil { + in, out := &in.StreetAddress, &out.StreetAddress + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressesParameters. +func (in *AddressesParameters) DeepCopy() *AddressesParameters { + if in == nil { + return nil + } + out := new(AddressesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailsInitParameters) DeepCopyInto(out *EmailsInitParameters) { + *out = *in + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailsInitParameters. +func (in *EmailsInitParameters) DeepCopy() *EmailsInitParameters { + if in == nil { + return nil + } + out := new(EmailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailsObservation) DeepCopyInto(out *EmailsObservation) { + *out = *in + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailsObservation. +func (in *EmailsObservation) DeepCopy() *EmailsObservation { + if in == nil { + return nil + } + out := new(EmailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailsParameters) DeepCopyInto(out *EmailsParameters) { + *out = *in + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailsParameters. +func (in *EmailsParameters) DeepCopy() *EmailsParameters { + if in == nil { + return nil + } + out := new(EmailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIdsInitParameters) DeepCopyInto(out *ExternalIdsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIdsInitParameters. +func (in *ExternalIdsInitParameters) DeepCopy() *ExternalIdsInitParameters { + if in == nil { + return nil + } + out := new(ExternalIdsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIdsObservation) DeepCopyInto(out *ExternalIdsObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIdsObservation. +func (in *ExternalIdsObservation) DeepCopy() *ExternalIdsObservation { + if in == nil { + return nil + } + out := new(ExternalIdsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIdsParameters) DeepCopyInto(out *ExternalIdsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIdsParameters. +func (in *ExternalIdsParameters) DeepCopy() *ExternalIdsParameters { + if in == nil { + return nil + } + out := new(ExternalIdsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameInitParameters) DeepCopyInto(out *NameInitParameters) { + *out = *in + if in.FamilyName != nil { + in, out := &in.FamilyName, &out.FamilyName + *out = new(string) + **out = **in + } + if in.Formatted != nil { + in, out := &in.Formatted, &out.Formatted + *out = new(string) + **out = **in + } + if in.GivenName != nil { + in, out := &in.GivenName, &out.GivenName + *out = new(string) + **out = **in + } + if in.HonorificPrefix != nil { + in, out := &in.HonorificPrefix, &out.HonorificPrefix + *out = new(string) + **out = **in + } + if in.HonorificSuffix != nil { + in, out := &in.HonorificSuffix, &out.HonorificSuffix + *out = new(string) + **out = **in + } + if in.MiddleName != nil { + in, out := &in.MiddleName, &out.MiddleName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameInitParameters. +func (in *NameInitParameters) DeepCopy() *NameInitParameters { + if in == nil { + return nil + } + out := new(NameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameObservation) DeepCopyInto(out *NameObservation) { + *out = *in + if in.FamilyName != nil { + in, out := &in.FamilyName, &out.FamilyName + *out = new(string) + **out = **in + } + if in.Formatted != nil { + in, out := &in.Formatted, &out.Formatted + *out = new(string) + **out = **in + } + if in.GivenName != nil { + in, out := &in.GivenName, &out.GivenName + *out = new(string) + **out = **in + } + if in.HonorificPrefix != nil { + in, out := &in.HonorificPrefix, &out.HonorificPrefix + *out = new(string) + **out = **in + } + if in.HonorificSuffix != nil { + in, out := &in.HonorificSuffix, &out.HonorificSuffix + *out = new(string) + **out = **in + } + if in.MiddleName != nil { + in, out := &in.MiddleName, &out.MiddleName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameObservation. +func (in *NameObservation) DeepCopy() *NameObservation { + if in == nil { + return nil + } + out := new(NameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameParameters) DeepCopyInto(out *NameParameters) { + *out = *in + if in.FamilyName != nil { + in, out := &in.FamilyName, &out.FamilyName + *out = new(string) + **out = **in + } + if in.Formatted != nil { + in, out := &in.Formatted, &out.Formatted + *out = new(string) + **out = **in + } + if in.GivenName != nil { + in, out := &in.GivenName, &out.GivenName + *out = new(string) + **out = **in + } + if in.HonorificPrefix != nil { + in, out := &in.HonorificPrefix, &out.HonorificPrefix + *out = new(string) + **out = **in + } + if in.HonorificSuffix != nil { + in, out := &in.HonorificSuffix, &out.HonorificSuffix + *out = new(string) + **out = **in + } + if in.MiddleName != nil { + in, out := &in.MiddleName, &out.MiddleName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameParameters. +func (in *NameParameters) DeepCopy() *NameParameters { + if in == nil { + return nil + } + out := new(NameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PhoneNumbersInitParameters) DeepCopyInto(out *PhoneNumbersInitParameters) { + *out = *in + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhoneNumbersInitParameters. +func (in *PhoneNumbersInitParameters) DeepCopy() *PhoneNumbersInitParameters { + if in == nil { + return nil + } + out := new(PhoneNumbersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PhoneNumbersObservation) DeepCopyInto(out *PhoneNumbersObservation) { + *out = *in + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhoneNumbersObservation. +func (in *PhoneNumbersObservation) DeepCopy() *PhoneNumbersObservation { + if in == nil { + return nil + } + out := new(PhoneNumbersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PhoneNumbersParameters) DeepCopyInto(out *PhoneNumbersParameters) { + *out = *in + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhoneNumbersParameters. +func (in *PhoneNumbersParameters) DeepCopy() *PhoneNumbersParameters { + if in == nil { + return nil + } + out := new(PhoneNumbersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *User) DeepCopyInto(out *User) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User. +func (in *User) DeepCopy() *User { + if in == nil { + return nil + } + out := new(User) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *User) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserInitParameters) DeepCopyInto(out *UserInitParameters) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = new(AddressesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Emails != nil { + in, out := &in.Emails, &out.Emails + *out = new(EmailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Locale != nil { + in, out := &in.Locale, &out.Locale + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(NameInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Nickname != nil { + in, out := &in.Nickname, &out.Nickname + *out = new(string) + **out = **in + } + if in.PhoneNumbers != nil { + in, out := &in.PhoneNumbers, &out.PhoneNumbers + *out = new(PhoneNumbersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PreferredLanguage != nil { + in, out := &in.PreferredLanguage, &out.PreferredLanguage + *out = new(string) + **out = **in + } + if in.ProfileURL != nil { + in, out := &in.ProfileURL, &out.ProfileURL + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.UserName != nil { + in, out := &in.UserName, &out.UserName + *out = new(string) + **out = **in + } + if in.UserType != nil { + in, out := &in.UserType, &out.UserType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInitParameters. +func (in *UserInitParameters) DeepCopy() *UserInitParameters { + if in == nil { + return nil + } + out := new(UserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserList) DeepCopyInto(out *UserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]User, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserList. +func (in *UserList) DeepCopy() *UserList { + if in == nil { + return nil + } + out := new(UserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserObservation) DeepCopyInto(out *UserObservation) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = new(AddressesObservation) + (*in).DeepCopyInto(*out) + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Emails != nil { + in, out := &in.Emails, &out.Emails + *out = new(EmailsObservation) + (*in).DeepCopyInto(*out) + } + if in.ExternalIds != nil { + in, out := &in.ExternalIds, &out.ExternalIds + *out = make([]ExternalIdsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IdentityStoreID != nil { + in, out := &in.IdentityStoreID, &out.IdentityStoreID + *out = new(string) + **out = **in + } + if in.Locale != nil { + in, out := &in.Locale, &out.Locale + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(NameObservation) + (*in).DeepCopyInto(*out) + } + if in.Nickname != nil { + in, out := &in.Nickname, &out.Nickname + *out = new(string) + **out = **in + } + if in.PhoneNumbers != nil { + in, out := &in.PhoneNumbers, &out.PhoneNumbers + *out = new(PhoneNumbersObservation) + (*in).DeepCopyInto(*out) + } + if in.PreferredLanguage != nil { + in, out := &in.PreferredLanguage, &out.PreferredLanguage + *out = new(string) + **out = **in + } + if in.ProfileURL != nil { + in, out := &in.ProfileURL, &out.ProfileURL + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.UserID != nil { + in, out := &in.UserID, &out.UserID + *out = new(string) + **out = **in + } + if in.UserName != nil { + in, out := &in.UserName, &out.UserName + *out = new(string) + **out = **in + } + if in.UserType != nil { + in, out := &in.UserType, &out.UserType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserObservation. +func (in *UserObservation) DeepCopy() *UserObservation { + if in == nil { + return nil + } + out := new(UserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserParameters) DeepCopyInto(out *UserParameters) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = new(AddressesParameters) + (*in).DeepCopyInto(*out) + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Emails != nil { + in, out := &in.Emails, &out.Emails + *out = new(EmailsParameters) + (*in).DeepCopyInto(*out) + } + if in.IdentityStoreID != nil { + in, out := &in.IdentityStoreID, &out.IdentityStoreID + *out = new(string) + **out = **in + } + if in.Locale != nil { + in, out := &in.Locale, &out.Locale + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(NameParameters) + (*in).DeepCopyInto(*out) + } + if in.Nickname != nil { + in, out := &in.Nickname, &out.Nickname + *out = new(string) + **out = **in + } + if in.PhoneNumbers != nil { + in, out := &in.PhoneNumbers, &out.PhoneNumbers + *out = new(PhoneNumbersParameters) + (*in).DeepCopyInto(*out) + } + if in.PreferredLanguage != nil { + in, out := &in.PreferredLanguage, &out.PreferredLanguage + *out = new(string) + **out = **in + } + if in.ProfileURL != nil { + in, out := &in.ProfileURL, &out.ProfileURL + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.UserName != nil { + in, out := &in.UserName, &out.UserName + *out = new(string) + **out = **in + } + if in.UserType != nil { + in, out := &in.UserType, &out.UserType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserParameters. +func (in *UserParameters) DeepCopy() *UserParameters { + if in == nil { + return nil + } + out := new(UserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSpec) DeepCopyInto(out *UserSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSpec. +func (in *UserSpec) DeepCopy() *UserSpec { + if in == nil { + return nil + } + out := new(UserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserStatus) DeepCopyInto(out *UserStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserStatus. +func (in *UserStatus) DeepCopy() *UserStatus { + if in == nil { + return nil + } + out := new(UserStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/identitystore/v1beta2/zz_generated.managed.go b/apis/identitystore/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..9c3f149b93 --- /dev/null +++ b/apis/identitystore/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this User. +func (mg *User) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this User. +func (mg *User) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this User. +func (mg *User) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this User. +func (mg *User) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this User. +func (mg *User) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this User. +func (mg *User) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this User. +func (mg *User) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this User. +func (mg *User) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this User. +func (mg *User) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this User. +func (mg *User) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this User. +func (mg *User) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this User. +func (mg *User) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/identitystore/v1beta2/zz_generated.managedlist.go b/apis/identitystore/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..06af949ffc --- /dev/null +++ b/apis/identitystore/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this UserList. +func (l *UserList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/identitystore/v1beta2/zz_groupversion_info.go b/apis/identitystore/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..d4eeca493a --- /dev/null +++ b/apis/identitystore/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=identitystore.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "identitystore.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/identitystore/v1beta2/zz_user_terraformed.go b/apis/identitystore/v1beta2/zz_user_terraformed.go new file mode 100755 index 0000000000..3fc9a20122 --- /dev/null +++ b/apis/identitystore/v1beta2/zz_user_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this User +func (mg *User) GetTerraformResourceType() string { + return "aws_identitystore_user" +} + +// GetConnectionDetailsMapping for this User +func (tr *User) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this User +func (tr *User) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this User +func (tr *User) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this User +func (tr *User) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this User +func (tr *User) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this User +func (tr *User) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this User +func (tr *User) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this User +func (tr *User) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this User using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *User) LateInitialize(attrs []byte) (bool, error) { + params := &UserParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *User) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/identitystore/v1beta2/zz_user_types.go b/apis/identitystore/v1beta2/zz_user_types.go new file mode 100755 index 0000000000..5b36cc3fd4 --- /dev/null +++ b/apis/identitystore/v1beta2/zz_user_types.go @@ -0,0 +1,486 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AddressesInitParameters struct { + + // The country that this address is in. + Country *string `json:"country,omitempty" tf:"country,omitempty"` + + // The name that is typically displayed when the address is shown for display. + Formatted *string `json:"formatted,omitempty" tf:"formatted,omitempty"` + + // The address locality. + Locality *string `json:"locality,omitempty" tf:"locality,omitempty"` + + // The postal code of the address. + PostalCode *string `json:"postalCode,omitempty" tf:"postal_code,omitempty"` + + // When true, this is the primary address associated with the user. + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // The street of the address. + StreetAddress *string `json:"streetAddress,omitempty" tf:"street_address,omitempty"` + + // The type of address. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type AddressesObservation struct { + + // The country that this address is in. + Country *string `json:"country,omitempty" tf:"country,omitempty"` + + // The name that is typically displayed when the address is shown for display. + Formatted *string `json:"formatted,omitempty" tf:"formatted,omitempty"` + + // The address locality. + Locality *string `json:"locality,omitempty" tf:"locality,omitempty"` + + // The postal code of the address. + PostalCode *string `json:"postalCode,omitempty" tf:"postal_code,omitempty"` + + // When true, this is the primary address associated with the user. + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // The region of the address. + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // The street of the address. + StreetAddress *string `json:"streetAddress,omitempty" tf:"street_address,omitempty"` + + // The type of address. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type AddressesParameters struct { + + // The country that this address is in. + // +kubebuilder:validation:Optional + Country *string `json:"country,omitempty" tf:"country,omitempty"` + + // The name that is typically displayed when the address is shown for display. + // +kubebuilder:validation:Optional + Formatted *string `json:"formatted,omitempty" tf:"formatted,omitempty"` + + // The address locality. + // +kubebuilder:validation:Optional + Locality *string `json:"locality,omitempty" tf:"locality,omitempty"` + + // The postal code of the address. + // +kubebuilder:validation:Optional + PostalCode *string `json:"postalCode,omitempty" tf:"postal_code,omitempty"` + + // When true, this is the primary address associated with the user. + // +kubebuilder:validation:Optional + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // The region of the address. + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // The street of the address. + // +kubebuilder:validation:Optional + StreetAddress *string `json:"streetAddress,omitempty" tf:"street_address,omitempty"` + + // The type of address. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EmailsInitParameters struct { + + // When true, this is the primary email associated with the user. + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // The type of email. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The email address. This value must be unique across the identity store. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type EmailsObservation struct { + + // When true, this is the primary email associated with the user. + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // The type of email. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The email address. This value must be unique across the identity store. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type EmailsParameters struct { + + // When true, this is the primary email associated with the user. + // +kubebuilder:validation:Optional + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // The type of email. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The email address. This value must be unique across the identity store. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ExternalIdsInitParameters struct { +} + +type ExternalIdsObservation struct { + + // The identifier issued to this resource by an external identity provider. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The issuer for an external identifier. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` +} + +type ExternalIdsParameters struct { +} + +type NameInitParameters struct { + + // The family name of the user. + FamilyName *string `json:"familyName,omitempty" tf:"family_name,omitempty"` + + // The name that is typically displayed when the name is shown for display. + Formatted *string `json:"formatted,omitempty" tf:"formatted,omitempty"` + + // The given name of the user. + GivenName *string `json:"givenName,omitempty" tf:"given_name,omitempty"` + + // The honorific prefix of the user. + HonorificPrefix *string `json:"honorificPrefix,omitempty" tf:"honorific_prefix,omitempty"` + + // The honorific suffix of the user. + HonorificSuffix *string `json:"honorificSuffix,omitempty" tf:"honorific_suffix,omitempty"` + + // The middle name of the user. + MiddleName *string `json:"middleName,omitempty" tf:"middle_name,omitempty"` +} + +type NameObservation struct { + + // The family name of the user. + FamilyName *string `json:"familyName,omitempty" tf:"family_name,omitempty"` + + // The name that is typically displayed when the name is shown for display. + Formatted *string `json:"formatted,omitempty" tf:"formatted,omitempty"` + + // The given name of the user. + GivenName *string `json:"givenName,omitempty" tf:"given_name,omitempty"` + + // The honorific prefix of the user. + HonorificPrefix *string `json:"honorificPrefix,omitempty" tf:"honorific_prefix,omitempty"` + + // The honorific suffix of the user. + HonorificSuffix *string `json:"honorificSuffix,omitempty" tf:"honorific_suffix,omitempty"` + + // The middle name of the user. + MiddleName *string `json:"middleName,omitempty" tf:"middle_name,omitempty"` +} + +type NameParameters struct { + + // The family name of the user. + // +kubebuilder:validation:Optional + FamilyName *string `json:"familyName" tf:"family_name,omitempty"` + + // The name that is typically displayed when the name is shown for display. + // +kubebuilder:validation:Optional + Formatted *string `json:"formatted,omitempty" tf:"formatted,omitempty"` + + // The given name of the user. + // +kubebuilder:validation:Optional + GivenName *string `json:"givenName" tf:"given_name,omitempty"` + + // The honorific prefix of the user. + // +kubebuilder:validation:Optional + HonorificPrefix *string `json:"honorificPrefix,omitempty" tf:"honorific_prefix,omitempty"` + + // The honorific suffix of the user. + // +kubebuilder:validation:Optional + HonorificSuffix *string `json:"honorificSuffix,omitempty" tf:"honorific_suffix,omitempty"` + + // The middle name of the user. + // +kubebuilder:validation:Optional + MiddleName *string `json:"middleName,omitempty" tf:"middle_name,omitempty"` +} + +type PhoneNumbersInitParameters struct { + + // When true, this is the primary phone number associated with the user. + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // The type of phone number. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The user's phone number. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type PhoneNumbersObservation struct { + + // When true, this is the primary phone number associated with the user. + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // The type of phone number. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The user's phone number. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type PhoneNumbersParameters struct { + + // When true, this is the primary phone number associated with the user. + // +kubebuilder:validation:Optional + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // The type of phone number. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The user's phone number. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type UserInitParameters struct { + + // Details about the user's address. At most 1 address is allowed. Detailed below. + Addresses *AddressesInitParameters `json:"addresses,omitempty" tf:"addresses,omitempty"` + + // The name that is typically displayed when the user is referenced. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Details about the user's email. At most 1 email is allowed. Detailed below. + Emails *EmailsInitParameters `json:"emails,omitempty" tf:"emails,omitempty"` + + // The user's geographical region or location. + Locale *string `json:"locale,omitempty" tf:"locale,omitempty"` + + // Details about the user's full name. Detailed below. + Name *NameInitParameters `json:"name,omitempty" tf:"name,omitempty"` + + // An alternate name for the user. + Nickname *string `json:"nickname,omitempty" tf:"nickname,omitempty"` + + // Details about the user's phone number. At most 1 phone number is allowed. Detailed below. + PhoneNumbers *PhoneNumbersInitParameters `json:"phoneNumbers,omitempty" tf:"phone_numbers,omitempty"` + + // The preferred language of the user. + PreferredLanguage *string `json:"preferredLanguage,omitempty" tf:"preferred_language,omitempty"` + + // An URL that may be associated with the user. + ProfileURL *string `json:"profileUrl,omitempty" tf:"profile_url,omitempty"` + + // The user's time zone. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // The user's title. + Title *string `json:"title,omitempty" tf:"title,omitempty"` + + // A unique string used to identify the user. This value can consist of letters, accented characters, symbols, numbers, and punctuation. This value is specified at the time the user is created and stored as an attribute of the user object in the identity store. The limit is 128 characters. + UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` + + // The user type. + UserType *string `json:"userType,omitempty" tf:"user_type,omitempty"` +} + +type UserObservation struct { + + // Details about the user's address. At most 1 address is allowed. Detailed below. + Addresses *AddressesObservation `json:"addresses,omitempty" tf:"addresses,omitempty"` + + // The name that is typically displayed when the user is referenced. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Details about the user's email. At most 1 email is allowed. Detailed below. + Emails *EmailsObservation `json:"emails,omitempty" tf:"emails,omitempty"` + + // A list of identifiers issued to this resource by an external identity provider. + ExternalIds []ExternalIdsObservation `json:"externalIds,omitempty" tf:"external_ids,omitempty"` + + // The identifier issued to this resource by an external identity provider. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The globally unique identifier for the identity store that this user is in. + IdentityStoreID *string `json:"identityStoreId,omitempty" tf:"identity_store_id,omitempty"` + + // The user's geographical region or location. + Locale *string `json:"locale,omitempty" tf:"locale,omitempty"` + + // Details about the user's full name. Detailed below. + Name *NameObservation `json:"name,omitempty" tf:"name,omitempty"` + + // An alternate name for the user. + Nickname *string `json:"nickname,omitempty" tf:"nickname,omitempty"` + + // Details about the user's phone number. At most 1 phone number is allowed. Detailed below. + PhoneNumbers *PhoneNumbersObservation `json:"phoneNumbers,omitempty" tf:"phone_numbers,omitempty"` + + // The preferred language of the user. + PreferredLanguage *string `json:"preferredLanguage,omitempty" tf:"preferred_language,omitempty"` + + // An URL that may be associated with the user. + ProfileURL *string `json:"profileUrl,omitempty" tf:"profile_url,omitempty"` + + // The user's time zone. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // The user's title. + Title *string `json:"title,omitempty" tf:"title,omitempty"` + + // The identifier for this user in the identity store. + UserID *string `json:"userId,omitempty" tf:"user_id,omitempty"` + + // A unique string used to identify the user. This value can consist of letters, accented characters, symbols, numbers, and punctuation. This value is specified at the time the user is created and stored as an attribute of the user object in the identity store. The limit is 128 characters. + UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` + + // The user type. + UserType *string `json:"userType,omitempty" tf:"user_type,omitempty"` +} + +type UserParameters struct { + + // Details about the user's address. At most 1 address is allowed. Detailed below. + // +kubebuilder:validation:Optional + Addresses *AddressesParameters `json:"addresses,omitempty" tf:"addresses,omitempty"` + + // The name that is typically displayed when the user is referenced. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Details about the user's email. At most 1 email is allowed. Detailed below. + // +kubebuilder:validation:Optional + Emails *EmailsParameters `json:"emails,omitempty" tf:"emails,omitempty"` + + // The globally unique identifier for the identity store that this user is in. + // +kubebuilder:validation:Required + IdentityStoreID *string `json:"identityStoreId" tf:"identity_store_id,omitempty"` + + // The user's geographical region or location. + // +kubebuilder:validation:Optional + Locale *string `json:"locale,omitempty" tf:"locale,omitempty"` + + // Details about the user's full name. Detailed below. + // +kubebuilder:validation:Optional + Name *NameParameters `json:"name,omitempty" tf:"name,omitempty"` + + // An alternate name for the user. + // +kubebuilder:validation:Optional + Nickname *string `json:"nickname,omitempty" tf:"nickname,omitempty"` + + // Details about the user's phone number. At most 1 phone number is allowed. Detailed below. + // +kubebuilder:validation:Optional + PhoneNumbers *PhoneNumbersParameters `json:"phoneNumbers,omitempty" tf:"phone_numbers,omitempty"` + + // The preferred language of the user. + // +kubebuilder:validation:Optional + PreferredLanguage *string `json:"preferredLanguage,omitempty" tf:"preferred_language,omitempty"` + + // An URL that may be associated with the user. + // +kubebuilder:validation:Optional + ProfileURL *string `json:"profileUrl,omitempty" tf:"profile_url,omitempty"` + + // The region of the address. + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The user's time zone. + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // The user's title. + // +kubebuilder:validation:Optional + Title *string `json:"title,omitempty" tf:"title,omitempty"` + + // A unique string used to identify the user. This value can consist of letters, accented characters, symbols, numbers, and punctuation. This value is specified at the time the user is created and stored as an attribute of the user object in the identity store. The limit is 128 characters. + // +kubebuilder:validation:Optional + UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` + + // The user type. + // +kubebuilder:validation:Optional + UserType *string `json:"userType,omitempty" tf:"user_type,omitempty"` +} + +// UserSpec defines the desired state of User +type UserSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider UserParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider UserInitParameters `json:"initProvider,omitempty"` +} + +// UserStatus defines the observed state of User. +type UserStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider UserObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// User is the Schema for the Users API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type User struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.displayName) || (has(self.initProvider) && has(self.initProvider.displayName))",message="spec.forProvider.displayName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.userName) || (has(self.initProvider) && has(self.initProvider.userName))",message="spec.forProvider.userName is a required parameter" + Spec UserSpec `json:"spec"` + Status UserStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// UserList contains a list of Users +type UserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []User `json:"items"` +} + +// Repository type metadata. +var ( + User_Kind = "User" + User_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: User_Kind}.String() + User_KindAPIVersion = User_Kind + "." + CRDGroupVersion.String() + User_GroupVersionKind = CRDGroupVersion.WithKind(User_Kind) +) + +func init() { + SchemeBuilder.Register(&User{}, &UserList{}) +} diff --git a/apis/imagebuilder/v1beta1/zz_generated.conversion_hubs.go b/apis/imagebuilder/v1beta1/zz_generated.conversion_hubs.go index 85629d792a..f5586b3b2e 100755 --- a/apis/imagebuilder/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/imagebuilder/v1beta1/zz_generated.conversion_hubs.go @@ -8,21 +8,3 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *Component) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ContainerRecipe) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *DistributionConfiguration) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Image) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ImagePipeline) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ImageRecipe) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *InfrastructureConfiguration) Hub() {} diff --git a/apis/imagebuilder/v1beta1/zz_generated.conversion_spokes.go b/apis/imagebuilder/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..c857d08718 --- /dev/null +++ b/apis/imagebuilder/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,134 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ContainerRecipe to the hub type. +func (tr *ContainerRecipe) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ContainerRecipe type. +func (tr *ContainerRecipe) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DistributionConfiguration to the hub type. +func (tr *DistributionConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DistributionConfiguration type. +func (tr *DistributionConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Image to the hub type. +func (tr *Image) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Image type. +func (tr *Image) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ImagePipeline to the hub type. +func (tr *ImagePipeline) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ImagePipeline type. +func (tr *ImagePipeline) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ImageRecipe to the hub type. +func (tr *ImageRecipe) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ImageRecipe type. +func (tr *ImageRecipe) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this InfrastructureConfiguration to the hub type. +func (tr *InfrastructureConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the InfrastructureConfiguration type. +func (tr *InfrastructureConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/imagebuilder/v1beta2/zz_containerrecipe_terraformed.go b/apis/imagebuilder/v1beta2/zz_containerrecipe_terraformed.go new file mode 100755 index 0000000000..9f419721a9 --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_containerrecipe_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ContainerRecipe +func (mg *ContainerRecipe) GetTerraformResourceType() string { + return "aws_imagebuilder_container_recipe" +} + +// GetConnectionDetailsMapping for this ContainerRecipe +func (tr *ContainerRecipe) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ContainerRecipe +func (tr *ContainerRecipe) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ContainerRecipe +func (tr *ContainerRecipe) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ContainerRecipe +func (tr *ContainerRecipe) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ContainerRecipe +func (tr *ContainerRecipe) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ContainerRecipe +func (tr *ContainerRecipe) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ContainerRecipe +func (tr *ContainerRecipe) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ContainerRecipe +func (tr *ContainerRecipe) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ContainerRecipe using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ContainerRecipe) LateInitialize(attrs []byte) (bool, error) { + params := &ContainerRecipeParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ContainerRecipe) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/imagebuilder/v1beta2/zz_containerrecipe_types.go b/apis/imagebuilder/v1beta2/zz_containerrecipe_types.go new file mode 100755 index 0000000000..fffcd8cbc7 --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_containerrecipe_types.go @@ -0,0 +1,568 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BlockDeviceMappingInitParameters struct { + + // Name of the device. For example, /dev/sda or /dev/xvdb. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Configuration block with Elastic Block Storage (EBS) block device mapping settings. Detailed below. + EBS *EBSInitParameters `json:"ebs,omitempty" tf:"ebs,omitempty"` + + // Set to true to remove a mapping from the parent image. + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // Virtual device name. For example, ephemeral0. Instance store volumes are numbered starting from 0. + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type BlockDeviceMappingObservation struct { + + // Name of the device. For example, /dev/sda or /dev/xvdb. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Configuration block with Elastic Block Storage (EBS) block device mapping settings. Detailed below. + EBS *EBSObservation `json:"ebs,omitempty" tf:"ebs,omitempty"` + + // Set to true to remove a mapping from the parent image. + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // Virtual device name. For example, ephemeral0. Instance store volumes are numbered starting from 0. + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type BlockDeviceMappingParameters struct { + + // Name of the device. For example, /dev/sda or /dev/xvdb. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Configuration block with Elastic Block Storage (EBS) block device mapping settings. Detailed below. + // +kubebuilder:validation:Optional + EBS *EBSParameters `json:"ebs,omitempty" tf:"ebs,omitempty"` + + // Set to true to remove a mapping from the parent image. + // +kubebuilder:validation:Optional + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // Virtual device name. For example, ephemeral0. Instance store volumes are numbered starting from 0. + // +kubebuilder:validation:Optional + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type ComponentInitParameters struct { + + // Amazon Resource Name (ARN) of the Image Builder Component to associate. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/imagebuilder/v1beta1.Component + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + ComponentArn *string `json:"componentArn,omitempty" tf:"component_arn,omitempty"` + + // Reference to a Component in imagebuilder to populate componentArn. + // +kubebuilder:validation:Optional + ComponentArnRef *v1.Reference `json:"componentArnRef,omitempty" tf:"-"` + + // Selector for a Component in imagebuilder to populate componentArn. + // +kubebuilder:validation:Optional + ComponentArnSelector *v1.Selector `json:"componentArnSelector,omitempty" tf:"-"` + + // Configuration block(s) for parameters to configure the component. Detailed below. + Parameter []ParameterInitParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` +} + +type ComponentObservation struct { + + // Amazon Resource Name (ARN) of the Image Builder Component to associate. + ComponentArn *string `json:"componentArn,omitempty" tf:"component_arn,omitempty"` + + // Configuration block(s) for parameters to configure the component. Detailed below. + Parameter []ParameterObservation `json:"parameter,omitempty" tf:"parameter,omitempty"` +} + +type ComponentParameters struct { + + // Amazon Resource Name (ARN) of the Image Builder Component to associate. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/imagebuilder/v1beta1.Component + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + ComponentArn *string `json:"componentArn,omitempty" tf:"component_arn,omitempty"` + + // Reference to a Component in imagebuilder to populate componentArn. + // +kubebuilder:validation:Optional + ComponentArnRef *v1.Reference `json:"componentArnRef,omitempty" tf:"-"` + + // Selector for a Component in imagebuilder to populate componentArn. + // +kubebuilder:validation:Optional + ComponentArnSelector *v1.Selector `json:"componentArnSelector,omitempty" tf:"-"` + + // Configuration block(s) for parameters to configure the component. Detailed below. + // +kubebuilder:validation:Optional + Parameter []ParameterParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` +} + +type ContainerRecipeInitParameters struct { + + // Ordered configuration block(s) with components for the container recipe. Detailed below. + Component []ComponentInitParameters `json:"component,omitempty" tf:"component,omitempty"` + + // The type of the container to create. Valid values: DOCKER. + ContainerType *string `json:"containerType,omitempty" tf:"container_type,omitempty"` + + // The description of the container recipe. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Dockerfile template used to build the image as an inline data blob. + DockerfileTemplateData *string `json:"dockerfileTemplateData,omitempty" tf:"dockerfile_template_data,omitempty"` + + // The Amazon S3 URI for the Dockerfile that will be used to build the container image. + DockerfileTemplateURI *string `json:"dockerfileTemplateUri,omitempty" tf:"dockerfile_template_uri,omitempty"` + + // Configuration block used to configure an instance for building and testing container images. Detailed below. + InstanceConfiguration *InstanceConfigurationInitParameters `json:"instanceConfiguration,omitempty" tf:"instance_configuration,omitempty"` + + // The KMS key used to encrypt the container image. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // The name of the container recipe. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The base image for the container recipe. + ParentImage *string `json:"parentImage,omitempty" tf:"parent_image,omitempty"` + + // Specifies the operating system platform when you use a custom base image. + PlatformOverride *string `json:"platformOverride,omitempty" tf:"platform_override,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The destination repository for the container image. Detailed below. + TargetRepository *TargetRepositoryInitParameters `json:"targetRepository,omitempty" tf:"target_repository,omitempty"` + + // Version of the container recipe. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // The working directory to be used during build and test workflows. + WorkingDirectory *string `json:"workingDirectory,omitempty" tf:"working_directory,omitempty"` +} + +type ContainerRecipeObservation struct { + + // Amazon Resource Name (ARN) of the container recipe. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Ordered configuration block(s) with components for the container recipe. Detailed below. + Component []ComponentObservation `json:"component,omitempty" tf:"component,omitempty"` + + // The type of the container to create. Valid values: DOCKER. + ContainerType *string `json:"containerType,omitempty" tf:"container_type,omitempty"` + + // Date the container recipe was created. + DateCreated *string `json:"dateCreated,omitempty" tf:"date_created,omitempty"` + + // The description of the container recipe. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Dockerfile template used to build the image as an inline data blob. + DockerfileTemplateData *string `json:"dockerfileTemplateData,omitempty" tf:"dockerfile_template_data,omitempty"` + + // The Amazon S3 URI for the Dockerfile that will be used to build the container image. + DockerfileTemplateURI *string `json:"dockerfileTemplateUri,omitempty" tf:"dockerfile_template_uri,omitempty"` + + // A flag that indicates if the target container is encrypted. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration block used to configure an instance for building and testing container images. Detailed below. + InstanceConfiguration *InstanceConfigurationObservation `json:"instanceConfiguration,omitempty" tf:"instance_configuration,omitempty"` + + // The KMS key used to encrypt the container image. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The name of the container recipe. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Owner of the container recipe. + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + // The base image for the container recipe. + ParentImage *string `json:"parentImage,omitempty" tf:"parent_image,omitempty"` + + // Platform of the container recipe. + Platform *string `json:"platform,omitempty" tf:"platform,omitempty"` + + // Specifies the operating system platform when you use a custom base image. + PlatformOverride *string `json:"platformOverride,omitempty" tf:"platform_override,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The destination repository for the container image. Detailed below. + TargetRepository *TargetRepositoryObservation `json:"targetRepository,omitempty" tf:"target_repository,omitempty"` + + // Version of the container recipe. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // The working directory to be used during build and test workflows. + WorkingDirectory *string `json:"workingDirectory,omitempty" tf:"working_directory,omitempty"` +} + +type ContainerRecipeParameters struct { + + // Ordered configuration block(s) with components for the container recipe. Detailed below. + // +kubebuilder:validation:Optional + Component []ComponentParameters `json:"component,omitempty" tf:"component,omitempty"` + + // The type of the container to create. Valid values: DOCKER. + // +kubebuilder:validation:Optional + ContainerType *string `json:"containerType,omitempty" tf:"container_type,omitempty"` + + // The description of the container recipe. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Dockerfile template used to build the image as an inline data blob. + // +kubebuilder:validation:Optional + DockerfileTemplateData *string `json:"dockerfileTemplateData,omitempty" tf:"dockerfile_template_data,omitempty"` + + // The Amazon S3 URI for the Dockerfile that will be used to build the container image. + // +kubebuilder:validation:Optional + DockerfileTemplateURI *string `json:"dockerfileTemplateUri,omitempty" tf:"dockerfile_template_uri,omitempty"` + + // Configuration block used to configure an instance for building and testing container images. Detailed below. + // +kubebuilder:validation:Optional + InstanceConfiguration *InstanceConfigurationParameters `json:"instanceConfiguration,omitempty" tf:"instance_configuration,omitempty"` + + // The KMS key used to encrypt the container image. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // The name of the container recipe. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The base image for the container recipe. + // +kubebuilder:validation:Optional + ParentImage *string `json:"parentImage,omitempty" tf:"parent_image,omitempty"` + + // Specifies the operating system platform when you use a custom base image. + // +kubebuilder:validation:Optional + PlatformOverride *string `json:"platformOverride,omitempty" tf:"platform_override,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The destination repository for the container image. Detailed below. + // +kubebuilder:validation:Optional + TargetRepository *TargetRepositoryParameters `json:"targetRepository,omitempty" tf:"target_repository,omitempty"` + + // Version of the container recipe. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // The working directory to be used during build and test workflows. + // +kubebuilder:validation:Optional + WorkingDirectory *string `json:"workingDirectory,omitempty" tf:"working_directory,omitempty"` +} + +type EBSInitParameters struct { + + // Whether to delete the volume on termination. Defaults to unset, which is the value inherited from the parent image. + DeleteOnTermination *string `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Whether to encrypt the volume. Defaults to unset, which is the value inherited from the parent image. + Encrypted *string `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // Number of Input/Output (I/O) operations per second to provision for an io1 or io2 volume. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Amazon Resource Name (ARN) of the Key Management Service (KMS) Key for encryption. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Identifier of the EC2 Volume Snapshot. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // For GP3 volumes only. The throughput in MiB/s that the volume supports. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // Size of the volume, in GiB. + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of the volume. For example, gp2 or io2. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type EBSObservation struct { + + // Whether to delete the volume on termination. Defaults to unset, which is the value inherited from the parent image. + DeleteOnTermination *string `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Whether to encrypt the volume. Defaults to unset, which is the value inherited from the parent image. + Encrypted *string `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // Number of Input/Output (I/O) operations per second to provision for an io1 or io2 volume. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Amazon Resource Name (ARN) of the Key Management Service (KMS) Key for encryption. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Identifier of the EC2 Volume Snapshot. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // For GP3 volumes only. The throughput in MiB/s that the volume supports. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // Size of the volume, in GiB. + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of the volume. For example, gp2 or io2. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type EBSParameters struct { + + // Whether to delete the volume on termination. Defaults to unset, which is the value inherited from the parent image. + // +kubebuilder:validation:Optional + DeleteOnTermination *string `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Whether to encrypt the volume. Defaults to unset, which is the value inherited from the parent image. + // +kubebuilder:validation:Optional + Encrypted *string `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // Number of Input/Output (I/O) operations per second to provision for an io1 or io2 volume. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Amazon Resource Name (ARN) of the Key Management Service (KMS) Key for encryption. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Identifier of the EC2 Volume Snapshot. + // +kubebuilder:validation:Optional + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // For GP3 volumes only. The throughput in MiB/s that the volume supports. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // Size of the volume, in GiB. + // +kubebuilder:validation:Optional + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of the volume. For example, gp2 or io2. + // +kubebuilder:validation:Optional + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type InstanceConfigurationInitParameters struct { + + // Configuration block(s) with block device mappings for the container recipe. Detailed below. + BlockDeviceMapping []BlockDeviceMappingInitParameters `json:"blockDeviceMapping,omitempty" tf:"block_device_mapping,omitempty"` + + // The AMI ID to use as the base image for a container build and test instance. If not specified, Image Builder will use the appropriate ECS-optimized AMI as a base image. + Image *string `json:"image,omitempty" tf:"image,omitempty"` +} + +type InstanceConfigurationObservation struct { + + // Configuration block(s) with block device mappings for the container recipe. Detailed below. + BlockDeviceMapping []BlockDeviceMappingObservation `json:"blockDeviceMapping,omitempty" tf:"block_device_mapping,omitempty"` + + // The AMI ID to use as the base image for a container build and test instance. If not specified, Image Builder will use the appropriate ECS-optimized AMI as a base image. + Image *string `json:"image,omitempty" tf:"image,omitempty"` +} + +type InstanceConfigurationParameters struct { + + // Configuration block(s) with block device mappings for the container recipe. Detailed below. + // +kubebuilder:validation:Optional + BlockDeviceMapping []BlockDeviceMappingParameters `json:"blockDeviceMapping,omitempty" tf:"block_device_mapping,omitempty"` + + // The AMI ID to use as the base image for a container build and test instance. If not specified, Image Builder will use the appropriate ECS-optimized AMI as a base image. + // +kubebuilder:validation:Optional + Image *string `json:"image,omitempty" tf:"image,omitempty"` +} + +type ParameterInitParameters struct { + + // The name of the component parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value for the named component parameter. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ParameterObservation struct { + + // The name of the component parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value for the named component parameter. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ParameterParameters struct { + + // The name of the component parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value for the named component parameter. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type TargetRepositoryInitParameters struct { + + // The name of the container repository where the output container image is stored. This name is prefixed by the repository location. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecr/v1beta2.Repository + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` + + // Reference to a Repository in ecr to populate repositoryName. + // +kubebuilder:validation:Optional + RepositoryNameRef *v1.Reference `json:"repositoryNameRef,omitempty" tf:"-"` + + // Selector for a Repository in ecr to populate repositoryName. + // +kubebuilder:validation:Optional + RepositoryNameSelector *v1.Selector `json:"repositoryNameSelector,omitempty" tf:"-"` + + // The service in which this image is registered. Valid values: ECR. + Service *string `json:"service,omitempty" tf:"service,omitempty"` +} + +type TargetRepositoryObservation struct { + + // The name of the container repository where the output container image is stored. This name is prefixed by the repository location. + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` + + // The service in which this image is registered. Valid values: ECR. + Service *string `json:"service,omitempty" tf:"service,omitempty"` +} + +type TargetRepositoryParameters struct { + + // The name of the container repository where the output container image is stored. This name is prefixed by the repository location. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecr/v1beta2.Repository + // +kubebuilder:validation:Optional + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` + + // Reference to a Repository in ecr to populate repositoryName. + // +kubebuilder:validation:Optional + RepositoryNameRef *v1.Reference `json:"repositoryNameRef,omitempty" tf:"-"` + + // Selector for a Repository in ecr to populate repositoryName. + // +kubebuilder:validation:Optional + RepositoryNameSelector *v1.Selector `json:"repositoryNameSelector,omitempty" tf:"-"` + + // The service in which this image is registered. Valid values: ECR. + // +kubebuilder:validation:Optional + Service *string `json:"service" tf:"service,omitempty"` +} + +// ContainerRecipeSpec defines the desired state of ContainerRecipe +type ContainerRecipeSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ContainerRecipeParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ContainerRecipeInitParameters `json:"initProvider,omitempty"` +} + +// ContainerRecipeStatus defines the observed state of ContainerRecipe. +type ContainerRecipeStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ContainerRecipeObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ContainerRecipe is the Schema for the ContainerRecipes API. Manage an Image Builder Container Recipe +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ContainerRecipe struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.component) || (has(self.initProvider) && has(self.initProvider.component))",message="spec.forProvider.component is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.containerType) || (has(self.initProvider) && has(self.initProvider.containerType))",message="spec.forProvider.containerType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.parentImage) || (has(self.initProvider) && has(self.initProvider.parentImage))",message="spec.forProvider.parentImage is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.targetRepository) || (has(self.initProvider) && has(self.initProvider.targetRepository))",message="spec.forProvider.targetRepository is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.version) || (has(self.initProvider) && has(self.initProvider.version))",message="spec.forProvider.version is a required parameter" + Spec ContainerRecipeSpec `json:"spec"` + Status ContainerRecipeStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ContainerRecipeList contains a list of ContainerRecipes +type ContainerRecipeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ContainerRecipe `json:"items"` +} + +// Repository type metadata. +var ( + ContainerRecipe_Kind = "ContainerRecipe" + ContainerRecipe_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ContainerRecipe_Kind}.String() + ContainerRecipe_KindAPIVersion = ContainerRecipe_Kind + "." + CRDGroupVersion.String() + ContainerRecipe_GroupVersionKind = CRDGroupVersion.WithKind(ContainerRecipe_Kind) +) + +func init() { + SchemeBuilder.Register(&ContainerRecipe{}, &ContainerRecipeList{}) +} diff --git a/apis/imagebuilder/v1beta2/zz_distributionconfiguration_terraformed.go b/apis/imagebuilder/v1beta2/zz_distributionconfiguration_terraformed.go new file mode 100755 index 0000000000..b08ad62a16 --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_distributionconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DistributionConfiguration +func (mg *DistributionConfiguration) GetTerraformResourceType() string { + return "aws_imagebuilder_distribution_configuration" +} + +// GetConnectionDetailsMapping for this DistributionConfiguration +func (tr *DistributionConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DistributionConfiguration +func (tr *DistributionConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DistributionConfiguration +func (tr *DistributionConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DistributionConfiguration +func (tr *DistributionConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DistributionConfiguration +func (tr *DistributionConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DistributionConfiguration +func (tr *DistributionConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DistributionConfiguration +func (tr *DistributionConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DistributionConfiguration +func (tr *DistributionConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DistributionConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DistributionConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &DistributionConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DistributionConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/imagebuilder/v1beta2/zz_distributionconfiguration_types.go b/apis/imagebuilder/v1beta2/zz_distributionconfiguration_types.go new file mode 100755 index 0000000000..dc0990cd3c --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_distributionconfiguration_types.go @@ -0,0 +1,581 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AMIDistributionConfigurationInitParameters struct { + + // Key-value map of tags to apply to the distributed AMI. + // +mapType=granular + AMITags map[string]*string `json:"amiTags,omitempty" tf:"ami_tags,omitempty"` + + // Description to apply to the distributed AMI. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Amazon Resource Name (ARN) of the Key Management Service (KMS) Key to encrypt the distributed AMI. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Configuration block of EC2 launch permissions to apply to the distributed AMI. Detailed below. + LaunchPermission *LaunchPermissionInitParameters `json:"launchPermission,omitempty" tf:"launch_permission,omitempty"` + + // Name to apply to the distributed AMI. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Set of AWS Account identifiers to distribute the AMI. + // +listType=set + TargetAccountIds []*string `json:"targetAccountIds,omitempty" tf:"target_account_ids,omitempty"` +} + +type AMIDistributionConfigurationObservation struct { + + // Key-value map of tags to apply to the distributed AMI. + // +mapType=granular + AMITags map[string]*string `json:"amiTags,omitempty" tf:"ami_tags,omitempty"` + + // Description to apply to the distributed AMI. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Amazon Resource Name (ARN) of the Key Management Service (KMS) Key to encrypt the distributed AMI. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Configuration block of EC2 launch permissions to apply to the distributed AMI. Detailed below. + LaunchPermission *LaunchPermissionObservation `json:"launchPermission,omitempty" tf:"launch_permission,omitempty"` + + // Name to apply to the distributed AMI. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Set of AWS Account identifiers to distribute the AMI. + // +listType=set + TargetAccountIds []*string `json:"targetAccountIds,omitempty" tf:"target_account_ids,omitempty"` +} + +type AMIDistributionConfigurationParameters struct { + + // Key-value map of tags to apply to the distributed AMI. + // +kubebuilder:validation:Optional + // +mapType=granular + AMITags map[string]*string `json:"amiTags,omitempty" tf:"ami_tags,omitempty"` + + // Description to apply to the distributed AMI. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Amazon Resource Name (ARN) of the Key Management Service (KMS) Key to encrypt the distributed AMI. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Configuration block of EC2 launch permissions to apply to the distributed AMI. Detailed below. + // +kubebuilder:validation:Optional + LaunchPermission *LaunchPermissionParameters `json:"launchPermission,omitempty" tf:"launch_permission,omitempty"` + + // Name to apply to the distributed AMI. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Set of AWS Account identifiers to distribute the AMI. + // +kubebuilder:validation:Optional + // +listType=set + TargetAccountIds []*string `json:"targetAccountIds,omitempty" tf:"target_account_ids,omitempty"` +} + +type ContainerDistributionConfigurationInitParameters struct { + + // Set of tags that are attached to the container distribution configuration. + // +listType=set + ContainerTags []*string `json:"containerTags,omitempty" tf:"container_tags,omitempty"` + + // Description of the container distribution configuration. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Configuration block with the destination repository for the container distribution configuration. + TargetRepository *ContainerDistributionConfigurationTargetRepositoryInitParameters `json:"targetRepository,omitempty" tf:"target_repository,omitempty"` +} + +type ContainerDistributionConfigurationObservation struct { + + // Set of tags that are attached to the container distribution configuration. + // +listType=set + ContainerTags []*string `json:"containerTags,omitempty" tf:"container_tags,omitempty"` + + // Description of the container distribution configuration. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Configuration block with the destination repository for the container distribution configuration. + TargetRepository *ContainerDistributionConfigurationTargetRepositoryObservation `json:"targetRepository,omitempty" tf:"target_repository,omitempty"` +} + +type ContainerDistributionConfigurationParameters struct { + + // Set of tags that are attached to the container distribution configuration. + // +kubebuilder:validation:Optional + // +listType=set + ContainerTags []*string `json:"containerTags,omitempty" tf:"container_tags,omitempty"` + + // Description of the container distribution configuration. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Configuration block with the destination repository for the container distribution configuration. + // +kubebuilder:validation:Optional + TargetRepository *ContainerDistributionConfigurationTargetRepositoryParameters `json:"targetRepository" tf:"target_repository,omitempty"` +} + +type ContainerDistributionConfigurationTargetRepositoryInitParameters struct { + + // The name of the container repository where the output container image is stored. This name is prefixed by the repository location. + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` + + // The service in which this image is registered. Valid values: ECR. + Service *string `json:"service,omitempty" tf:"service,omitempty"` +} + +type ContainerDistributionConfigurationTargetRepositoryObservation struct { + + // The name of the container repository where the output container image is stored. This name is prefixed by the repository location. + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` + + // The service in which this image is registered. Valid values: ECR. + Service *string `json:"service,omitempty" tf:"service,omitempty"` +} + +type ContainerDistributionConfigurationTargetRepositoryParameters struct { + + // The name of the container repository where the output container image is stored. This name is prefixed by the repository location. + // +kubebuilder:validation:Optional + RepositoryName *string `json:"repositoryName" tf:"repository_name,omitempty"` + + // The service in which this image is registered. Valid values: ECR. + // +kubebuilder:validation:Optional + Service *string `json:"service" tf:"service,omitempty"` +} + +type DistributionConfigurationInitParameters struct { + + // Description of the distribution configuration. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more configuration blocks with distribution settings. Detailed below. + Distribution []DistributionInitParameters `json:"distribution,omitempty" tf:"distribution,omitempty"` + + // Name of the distribution configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type DistributionConfigurationObservation struct { + + // Amazon Resource Name (ARN) of the distribution configuration. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Date the distribution configuration was created. + DateCreated *string `json:"dateCreated,omitempty" tf:"date_created,omitempty"` + + // Date the distribution configuration was updated. + DateUpdated *string `json:"dateUpdated,omitempty" tf:"date_updated,omitempty"` + + // Description of the distribution configuration. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more configuration blocks with distribution settings. Detailed below. + Distribution []DistributionObservation `json:"distribution,omitempty" tf:"distribution,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the distribution configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type DistributionConfigurationParameters struct { + + // Description of the distribution configuration. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more configuration blocks with distribution settings. Detailed below. + // +kubebuilder:validation:Optional + Distribution []DistributionParameters `json:"distribution,omitempty" tf:"distribution,omitempty"` + + // Name of the distribution configuration. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // AWS Region for the distribution. + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type DistributionInitParameters struct { + + // Configuration block with Amazon Machine Image (AMI) distribution settings. Detailed below. + AMIDistributionConfiguration *AMIDistributionConfigurationInitParameters `json:"amiDistributionConfiguration,omitempty" tf:"ami_distribution_configuration,omitempty"` + + // Configuration block with container distribution settings. Detailed below. + ContainerDistributionConfiguration *ContainerDistributionConfigurationInitParameters `json:"containerDistributionConfiguration,omitempty" tf:"container_distribution_configuration,omitempty"` + + // Set of Windows faster-launching configurations to use for AMI distribution. Detailed below. + FastLaunchConfiguration []FastLaunchConfigurationInitParameters `json:"fastLaunchConfiguration,omitempty" tf:"fast_launch_configuration,omitempty"` + + // Set of launch template configuration settings that apply to image distribution. Detailed below. + LaunchTemplateConfiguration []LaunchTemplateConfigurationInitParameters `json:"launchTemplateConfiguration,omitempty" tf:"launch_template_configuration,omitempty"` + + // Set of Amazon Resource Names (ARNs) of License Manager License Configurations. + // +listType=set + LicenseConfigurationArns []*string `json:"licenseConfigurationArns,omitempty" tf:"license_configuration_arns,omitempty"` +} + +type DistributionObservation struct { + + // Configuration block with Amazon Machine Image (AMI) distribution settings. Detailed below. + AMIDistributionConfiguration *AMIDistributionConfigurationObservation `json:"amiDistributionConfiguration,omitempty" tf:"ami_distribution_configuration,omitempty"` + + // Configuration block with container distribution settings. Detailed below. + ContainerDistributionConfiguration *ContainerDistributionConfigurationObservation `json:"containerDistributionConfiguration,omitempty" tf:"container_distribution_configuration,omitempty"` + + // Set of Windows faster-launching configurations to use for AMI distribution. Detailed below. + FastLaunchConfiguration []FastLaunchConfigurationObservation `json:"fastLaunchConfiguration,omitempty" tf:"fast_launch_configuration,omitempty"` + + // Set of launch template configuration settings that apply to image distribution. Detailed below. + LaunchTemplateConfiguration []LaunchTemplateConfigurationObservation `json:"launchTemplateConfiguration,omitempty" tf:"launch_template_configuration,omitempty"` + + // Set of Amazon Resource Names (ARNs) of License Manager License Configurations. + // +listType=set + LicenseConfigurationArns []*string `json:"licenseConfigurationArns,omitempty" tf:"license_configuration_arns,omitempty"` + + // AWS Region for the distribution. + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +type DistributionParameters struct { + + // Configuration block with Amazon Machine Image (AMI) distribution settings. Detailed below. + // +kubebuilder:validation:Optional + AMIDistributionConfiguration *AMIDistributionConfigurationParameters `json:"amiDistributionConfiguration,omitempty" tf:"ami_distribution_configuration,omitempty"` + + // Configuration block with container distribution settings. Detailed below. + // +kubebuilder:validation:Optional + ContainerDistributionConfiguration *ContainerDistributionConfigurationParameters `json:"containerDistributionConfiguration,omitempty" tf:"container_distribution_configuration,omitempty"` + + // Set of Windows faster-launching configurations to use for AMI distribution. Detailed below. + // +kubebuilder:validation:Optional + FastLaunchConfiguration []FastLaunchConfigurationParameters `json:"fastLaunchConfiguration,omitempty" tf:"fast_launch_configuration,omitempty"` + + // Set of launch template configuration settings that apply to image distribution. Detailed below. + // +kubebuilder:validation:Optional + LaunchTemplateConfiguration []LaunchTemplateConfigurationParameters `json:"launchTemplateConfiguration,omitempty" tf:"launch_template_configuration,omitempty"` + + // Set of Amazon Resource Names (ARNs) of License Manager License Configurations. + // +kubebuilder:validation:Optional + // +listType=set + LicenseConfigurationArns []*string `json:"licenseConfigurationArns,omitempty" tf:"license_configuration_arns,omitempty"` + + // AWS Region for the distribution. + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"region,omitempty"` +} + +type FastLaunchConfigurationInitParameters struct { + + // The owner account ID for the fast-launch enabled Windows AMI. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // A Boolean that represents the current state of faster launching for the Windows AMI. Set to true to start using Windows faster launching, or false to stop using it. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Configuration block for the launch template that the fast-launch enabled Windows AMI uses when it launches Windows instances to create pre-provisioned snapshots. Detailed below. + LaunchTemplate *LaunchTemplateInitParameters `json:"launchTemplate,omitempty" tf:"launch_template,omitempty"` + + // The maximum number of parallel instances that are launched for creating resources. + MaxParallelLaunches *float64 `json:"maxParallelLaunches,omitempty" tf:"max_parallel_launches,omitempty"` + + // Configuration block for managing the number of snapshots that are created from pre-provisioned instances for the Windows AMI when faster launching is enabled. Detailed below. + SnapshotConfiguration *SnapshotConfigurationInitParameters `json:"snapshotConfiguration,omitempty" tf:"snapshot_configuration,omitempty"` +} + +type FastLaunchConfigurationObservation struct { + + // The owner account ID for the fast-launch enabled Windows AMI. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // A Boolean that represents the current state of faster launching for the Windows AMI. Set to true to start using Windows faster launching, or false to stop using it. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Configuration block for the launch template that the fast-launch enabled Windows AMI uses when it launches Windows instances to create pre-provisioned snapshots. Detailed below. + LaunchTemplate *LaunchTemplateObservation `json:"launchTemplate,omitempty" tf:"launch_template,omitempty"` + + // The maximum number of parallel instances that are launched for creating resources. + MaxParallelLaunches *float64 `json:"maxParallelLaunches,omitempty" tf:"max_parallel_launches,omitempty"` + + // Configuration block for managing the number of snapshots that are created from pre-provisioned instances for the Windows AMI when faster launching is enabled. Detailed below. + SnapshotConfiguration *SnapshotConfigurationObservation `json:"snapshotConfiguration,omitempty" tf:"snapshot_configuration,omitempty"` +} + +type FastLaunchConfigurationParameters struct { + + // The owner account ID for the fast-launch enabled Windows AMI. + // +kubebuilder:validation:Optional + AccountID *string `json:"accountId" tf:"account_id,omitempty"` + + // A Boolean that represents the current state of faster launching for the Windows AMI. Set to true to start using Windows faster launching, or false to stop using it. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Configuration block for the launch template that the fast-launch enabled Windows AMI uses when it launches Windows instances to create pre-provisioned snapshots. Detailed below. + // +kubebuilder:validation:Optional + LaunchTemplate *LaunchTemplateParameters `json:"launchTemplate,omitempty" tf:"launch_template,omitempty"` + + // The maximum number of parallel instances that are launched for creating resources. + // +kubebuilder:validation:Optional + MaxParallelLaunches *float64 `json:"maxParallelLaunches,omitempty" tf:"max_parallel_launches,omitempty"` + + // Configuration block for managing the number of snapshots that are created from pre-provisioned instances for the Windows AMI when faster launching is enabled. Detailed below. + // +kubebuilder:validation:Optional + SnapshotConfiguration *SnapshotConfigurationParameters `json:"snapshotConfiguration,omitempty" tf:"snapshot_configuration,omitempty"` +} + +type LaunchPermissionInitParameters struct { + + // Set of AWS Organization ARNs to assign. + // +listType=set + OrganizationArns []*string `json:"organizationArns,omitempty" tf:"organization_arns,omitempty"` + + // Set of AWS Organizational Unit ARNs to assign. + // +listType=set + OrganizationalUnitArns []*string `json:"organizationalUnitArns,omitempty" tf:"organizational_unit_arns,omitempty"` + + // Set of EC2 launch permission user groups to assign. Use all to distribute a public AMI. + // +listType=set + UserGroups []*string `json:"userGroups,omitempty" tf:"user_groups,omitempty"` + + // Set of AWS Account identifiers to assign. + // +listType=set + UserIds []*string `json:"userIds,omitempty" tf:"user_ids,omitempty"` +} + +type LaunchPermissionObservation struct { + + // Set of AWS Organization ARNs to assign. + // +listType=set + OrganizationArns []*string `json:"organizationArns,omitempty" tf:"organization_arns,omitempty"` + + // Set of AWS Organizational Unit ARNs to assign. + // +listType=set + OrganizationalUnitArns []*string `json:"organizationalUnitArns,omitempty" tf:"organizational_unit_arns,omitempty"` + + // Set of EC2 launch permission user groups to assign. Use all to distribute a public AMI. + // +listType=set + UserGroups []*string `json:"userGroups,omitempty" tf:"user_groups,omitempty"` + + // Set of AWS Account identifiers to assign. + // +listType=set + UserIds []*string `json:"userIds,omitempty" tf:"user_ids,omitempty"` +} + +type LaunchPermissionParameters struct { + + // Set of AWS Organization ARNs to assign. + // +kubebuilder:validation:Optional + // +listType=set + OrganizationArns []*string `json:"organizationArns,omitempty" tf:"organization_arns,omitempty"` + + // Set of AWS Organizational Unit ARNs to assign. + // +kubebuilder:validation:Optional + // +listType=set + OrganizationalUnitArns []*string `json:"organizationalUnitArns,omitempty" tf:"organizational_unit_arns,omitempty"` + + // Set of EC2 launch permission user groups to assign. Use all to distribute a public AMI. + // +kubebuilder:validation:Optional + // +listType=set + UserGroups []*string `json:"userGroups,omitempty" tf:"user_groups,omitempty"` + + // Set of AWS Account identifiers to assign. + // +kubebuilder:validation:Optional + // +listType=set + UserIds []*string `json:"userIds,omitempty" tf:"user_ids,omitempty"` +} + +type LaunchTemplateConfigurationInitParameters struct { + + // The account ID that this configuration applies to. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Indicates whether to set the specified Amazon EC2 launch template as the default launch template. Defaults to true. + Default *bool `json:"default,omitempty" tf:"default,omitempty"` + + // The ID of the Amazon EC2 launch template to use. + LaunchTemplateID *string `json:"launchTemplateId,omitempty" tf:"launch_template_id,omitempty"` +} + +type LaunchTemplateConfigurationObservation struct { + + // The account ID that this configuration applies to. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Indicates whether to set the specified Amazon EC2 launch template as the default launch template. Defaults to true. + Default *bool `json:"default,omitempty" tf:"default,omitempty"` + + // The ID of the Amazon EC2 launch template to use. + LaunchTemplateID *string `json:"launchTemplateId,omitempty" tf:"launch_template_id,omitempty"` +} + +type LaunchTemplateConfigurationParameters struct { + + // The account ID that this configuration applies to. + // +kubebuilder:validation:Optional + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Indicates whether to set the specified Amazon EC2 launch template as the default launch template. Defaults to true. + // +kubebuilder:validation:Optional + Default *bool `json:"default,omitempty" tf:"default,omitempty"` + + // The ID of the Amazon EC2 launch template to use. + // +kubebuilder:validation:Optional + LaunchTemplateID *string `json:"launchTemplateId" tf:"launch_template_id,omitempty"` +} + +type LaunchTemplateInitParameters struct { + + // The ID of the launch template to use for faster launching for a Windows AMI. + LaunchTemplateID *string `json:"launchTemplateId,omitempty" tf:"launch_template_id,omitempty"` + + // The name of the launch template to use for faster launching for a Windows AMI. + LaunchTemplateName *string `json:"launchTemplateName,omitempty" tf:"launch_template_name,omitempty"` + + // The version of the launch template to use for faster launching for a Windows AMI. + LaunchTemplateVersion *string `json:"launchTemplateVersion,omitempty" tf:"launch_template_version,omitempty"` +} + +type LaunchTemplateObservation struct { + + // The ID of the launch template to use for faster launching for a Windows AMI. + LaunchTemplateID *string `json:"launchTemplateId,omitempty" tf:"launch_template_id,omitempty"` + + // The name of the launch template to use for faster launching for a Windows AMI. + LaunchTemplateName *string `json:"launchTemplateName,omitempty" tf:"launch_template_name,omitempty"` + + // The version of the launch template to use for faster launching for a Windows AMI. + LaunchTemplateVersion *string `json:"launchTemplateVersion,omitempty" tf:"launch_template_version,omitempty"` +} + +type LaunchTemplateParameters struct { + + // The ID of the launch template to use for faster launching for a Windows AMI. + // +kubebuilder:validation:Optional + LaunchTemplateID *string `json:"launchTemplateId,omitempty" tf:"launch_template_id,omitempty"` + + // The name of the launch template to use for faster launching for a Windows AMI. + // +kubebuilder:validation:Optional + LaunchTemplateName *string `json:"launchTemplateName,omitempty" tf:"launch_template_name,omitempty"` + + // The version of the launch template to use for faster launching for a Windows AMI. + // +kubebuilder:validation:Optional + LaunchTemplateVersion *string `json:"launchTemplateVersion,omitempty" tf:"launch_template_version,omitempty"` +} + +type SnapshotConfigurationInitParameters struct { + + // The number of pre-provisioned snapshots to keep on hand for a fast-launch enabled Windows AMI. + TargetResourceCount *float64 `json:"targetResourceCount,omitempty" tf:"target_resource_count,omitempty"` +} + +type SnapshotConfigurationObservation struct { + + // The number of pre-provisioned snapshots to keep on hand for a fast-launch enabled Windows AMI. + TargetResourceCount *float64 `json:"targetResourceCount,omitempty" tf:"target_resource_count,omitempty"` +} + +type SnapshotConfigurationParameters struct { + + // The number of pre-provisioned snapshots to keep on hand for a fast-launch enabled Windows AMI. + // +kubebuilder:validation:Optional + TargetResourceCount *float64 `json:"targetResourceCount,omitempty" tf:"target_resource_count,omitempty"` +} + +// DistributionConfigurationSpec defines the desired state of DistributionConfiguration +type DistributionConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DistributionConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DistributionConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// DistributionConfigurationStatus defines the observed state of DistributionConfiguration. +type DistributionConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DistributionConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DistributionConfiguration is the Schema for the DistributionConfigurations API. Manage an Image Builder Distribution Configuration +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type DistributionConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.distribution) || (has(self.initProvider) && has(self.initProvider.distribution))",message="spec.forProvider.distribution is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec DistributionConfigurationSpec `json:"spec"` + Status DistributionConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DistributionConfigurationList contains a list of DistributionConfigurations +type DistributionConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DistributionConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + DistributionConfiguration_Kind = "DistributionConfiguration" + DistributionConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DistributionConfiguration_Kind}.String() + DistributionConfiguration_KindAPIVersion = DistributionConfiguration_Kind + "." + CRDGroupVersion.String() + DistributionConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(DistributionConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&DistributionConfiguration{}, &DistributionConfigurationList{}) +} diff --git a/apis/imagebuilder/v1beta2/zz_generated.conversion_hubs.go b/apis/imagebuilder/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..76172925cd --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,25 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ContainerRecipe) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DistributionConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Image) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ImagePipeline) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ImageRecipe) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *InfrastructureConfiguration) Hub() {} diff --git a/apis/imagebuilder/v1beta2/zz_generated.deepcopy.go b/apis/imagebuilder/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..ddba5008c7 --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,5988 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AMIDistributionConfigurationInitParameters) DeepCopyInto(out *AMIDistributionConfigurationInitParameters) { + *out = *in + if in.AMITags != nil { + in, out := &in.AMITags, &out.AMITags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.LaunchPermission != nil { + in, out := &in.LaunchPermission, &out.LaunchPermission + *out = new(LaunchPermissionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TargetAccountIds != nil { + in, out := &in.TargetAccountIds, &out.TargetAccountIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AMIDistributionConfigurationInitParameters. +func (in *AMIDistributionConfigurationInitParameters) DeepCopy() *AMIDistributionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AMIDistributionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AMIDistributionConfigurationObservation) DeepCopyInto(out *AMIDistributionConfigurationObservation) { + *out = *in + if in.AMITags != nil { + in, out := &in.AMITags, &out.AMITags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.LaunchPermission != nil { + in, out := &in.LaunchPermission, &out.LaunchPermission + *out = new(LaunchPermissionObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TargetAccountIds != nil { + in, out := &in.TargetAccountIds, &out.TargetAccountIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AMIDistributionConfigurationObservation. +func (in *AMIDistributionConfigurationObservation) DeepCopy() *AMIDistributionConfigurationObservation { + if in == nil { + return nil + } + out := new(AMIDistributionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AMIDistributionConfigurationParameters) DeepCopyInto(out *AMIDistributionConfigurationParameters) { + *out = *in + if in.AMITags != nil { + in, out := &in.AMITags, &out.AMITags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.LaunchPermission != nil { + in, out := &in.LaunchPermission, &out.LaunchPermission + *out = new(LaunchPermissionParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TargetAccountIds != nil { + in, out := &in.TargetAccountIds, &out.TargetAccountIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AMIDistributionConfigurationParameters. +func (in *AMIDistributionConfigurationParameters) DeepCopy() *AMIDistributionConfigurationParameters { + if in == nil { + return nil + } + out := new(AMIDistributionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmisInitParameters) DeepCopyInto(out *AmisInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmisInitParameters. +func (in *AmisInitParameters) DeepCopy() *AmisInitParameters { + if in == nil { + return nil + } + out := new(AmisInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmisObservation) DeepCopyInto(out *AmisObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmisObservation. +func (in *AmisObservation) DeepCopy() *AmisObservation { + if in == nil { + return nil + } + out := new(AmisObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmisParameters) DeepCopyInto(out *AmisParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmisParameters. +func (in *AmisParameters) DeepCopy() *AmisParameters { + if in == nil { + return nil + } + out := new(AmisParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockDeviceMappingEBSInitParameters) DeepCopyInto(out *BlockDeviceMappingEBSInitParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(string) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceMappingEBSInitParameters. +func (in *BlockDeviceMappingEBSInitParameters) DeepCopy() *BlockDeviceMappingEBSInitParameters { + if in == nil { + return nil + } + out := new(BlockDeviceMappingEBSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockDeviceMappingEBSObservation) DeepCopyInto(out *BlockDeviceMappingEBSObservation) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(string) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceMappingEBSObservation. +func (in *BlockDeviceMappingEBSObservation) DeepCopy() *BlockDeviceMappingEBSObservation { + if in == nil { + return nil + } + out := new(BlockDeviceMappingEBSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockDeviceMappingEBSParameters) DeepCopyInto(out *BlockDeviceMappingEBSParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(string) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceMappingEBSParameters. +func (in *BlockDeviceMappingEBSParameters) DeepCopy() *BlockDeviceMappingEBSParameters { + if in == nil { + return nil + } + out := new(BlockDeviceMappingEBSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockDeviceMappingInitParameters) DeepCopyInto(out *BlockDeviceMappingInitParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.EBS != nil { + in, out := &in.EBS, &out.EBS + *out = new(EBSInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceMappingInitParameters. +func (in *BlockDeviceMappingInitParameters) DeepCopy() *BlockDeviceMappingInitParameters { + if in == nil { + return nil + } + out := new(BlockDeviceMappingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockDeviceMappingObservation) DeepCopyInto(out *BlockDeviceMappingObservation) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.EBS != nil { + in, out := &in.EBS, &out.EBS + *out = new(EBSObservation) + (*in).DeepCopyInto(*out) + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceMappingObservation. +func (in *BlockDeviceMappingObservation) DeepCopy() *BlockDeviceMappingObservation { + if in == nil { + return nil + } + out := new(BlockDeviceMappingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockDeviceMappingParameters) DeepCopyInto(out *BlockDeviceMappingParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.EBS != nil { + in, out := &in.EBS, &out.EBS + *out = new(EBSParameters) + (*in).DeepCopyInto(*out) + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceMappingParameters. +func (in *BlockDeviceMappingParameters) DeepCopy() *BlockDeviceMappingParameters { + if in == nil { + return nil + } + out := new(BlockDeviceMappingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentInitParameters) DeepCopyInto(out *ComponentInitParameters) { + *out = *in + if in.ComponentArn != nil { + in, out := &in.ComponentArn, &out.ComponentArn + *out = new(string) + **out = **in + } + if in.ComponentArnRef != nil { + in, out := &in.ComponentArnRef, &out.ComponentArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ComponentArnSelector != nil { + in, out := &in.ComponentArnSelector, &out.ComponentArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentInitParameters. +func (in *ComponentInitParameters) DeepCopy() *ComponentInitParameters { + if in == nil { + return nil + } + out := new(ComponentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentObservation) DeepCopyInto(out *ComponentObservation) { + *out = *in + if in.ComponentArn != nil { + in, out := &in.ComponentArn, &out.ComponentArn + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentObservation. +func (in *ComponentObservation) DeepCopy() *ComponentObservation { + if in == nil { + return nil + } + out := new(ComponentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentParameterInitParameters) DeepCopyInto(out *ComponentParameterInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentParameterInitParameters. +func (in *ComponentParameterInitParameters) DeepCopy() *ComponentParameterInitParameters { + if in == nil { + return nil + } + out := new(ComponentParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentParameterObservation) DeepCopyInto(out *ComponentParameterObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentParameterObservation. +func (in *ComponentParameterObservation) DeepCopy() *ComponentParameterObservation { + if in == nil { + return nil + } + out := new(ComponentParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentParameterParameters) DeepCopyInto(out *ComponentParameterParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentParameterParameters. +func (in *ComponentParameterParameters) DeepCopy() *ComponentParameterParameters { + if in == nil { + return nil + } + out := new(ComponentParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentParameters) DeepCopyInto(out *ComponentParameters) { + *out = *in + if in.ComponentArn != nil { + in, out := &in.ComponentArn, &out.ComponentArn + *out = new(string) + **out = **in + } + if in.ComponentArnRef != nil { + in, out := &in.ComponentArnRef, &out.ComponentArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ComponentArnSelector != nil { + in, out := &in.ComponentArnSelector, &out.ComponentArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentParameters. +func (in *ComponentParameters) DeepCopy() *ComponentParameters { + if in == nil { + return nil + } + out := new(ComponentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerDistributionConfigurationInitParameters) DeepCopyInto(out *ContainerDistributionConfigurationInitParameters) { + *out = *in + if in.ContainerTags != nil { + in, out := &in.ContainerTags, &out.ContainerTags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.TargetRepository != nil { + in, out := &in.TargetRepository, &out.TargetRepository + *out = new(ContainerDistributionConfigurationTargetRepositoryInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerDistributionConfigurationInitParameters. +func (in *ContainerDistributionConfigurationInitParameters) DeepCopy() *ContainerDistributionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ContainerDistributionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerDistributionConfigurationObservation) DeepCopyInto(out *ContainerDistributionConfigurationObservation) { + *out = *in + if in.ContainerTags != nil { + in, out := &in.ContainerTags, &out.ContainerTags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.TargetRepository != nil { + in, out := &in.TargetRepository, &out.TargetRepository + *out = new(ContainerDistributionConfigurationTargetRepositoryObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerDistributionConfigurationObservation. +func (in *ContainerDistributionConfigurationObservation) DeepCopy() *ContainerDistributionConfigurationObservation { + if in == nil { + return nil + } + out := new(ContainerDistributionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerDistributionConfigurationParameters) DeepCopyInto(out *ContainerDistributionConfigurationParameters) { + *out = *in + if in.ContainerTags != nil { + in, out := &in.ContainerTags, &out.ContainerTags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.TargetRepository != nil { + in, out := &in.TargetRepository, &out.TargetRepository + *out = new(ContainerDistributionConfigurationTargetRepositoryParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerDistributionConfigurationParameters. +func (in *ContainerDistributionConfigurationParameters) DeepCopy() *ContainerDistributionConfigurationParameters { + if in == nil { + return nil + } + out := new(ContainerDistributionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerDistributionConfigurationTargetRepositoryInitParameters) DeepCopyInto(out *ContainerDistributionConfigurationTargetRepositoryInitParameters) { + *out = *in + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerDistributionConfigurationTargetRepositoryInitParameters. +func (in *ContainerDistributionConfigurationTargetRepositoryInitParameters) DeepCopy() *ContainerDistributionConfigurationTargetRepositoryInitParameters { + if in == nil { + return nil + } + out := new(ContainerDistributionConfigurationTargetRepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerDistributionConfigurationTargetRepositoryObservation) DeepCopyInto(out *ContainerDistributionConfigurationTargetRepositoryObservation) { + *out = *in + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerDistributionConfigurationTargetRepositoryObservation. +func (in *ContainerDistributionConfigurationTargetRepositoryObservation) DeepCopy() *ContainerDistributionConfigurationTargetRepositoryObservation { + if in == nil { + return nil + } + out := new(ContainerDistributionConfigurationTargetRepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerDistributionConfigurationTargetRepositoryParameters) DeepCopyInto(out *ContainerDistributionConfigurationTargetRepositoryParameters) { + *out = *in + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerDistributionConfigurationTargetRepositoryParameters. +func (in *ContainerDistributionConfigurationTargetRepositoryParameters) DeepCopy() *ContainerDistributionConfigurationTargetRepositoryParameters { + if in == nil { + return nil + } + out := new(ContainerDistributionConfigurationTargetRepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRecipe) DeepCopyInto(out *ContainerRecipe) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRecipe. +func (in *ContainerRecipe) DeepCopy() *ContainerRecipe { + if in == nil { + return nil + } + out := new(ContainerRecipe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ContainerRecipe) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRecipeInitParameters) DeepCopyInto(out *ContainerRecipeInitParameters) { + *out = *in + if in.Component != nil { + in, out := &in.Component, &out.Component + *out = make([]ComponentInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerType != nil { + in, out := &in.ContainerType, &out.ContainerType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DockerfileTemplateData != nil { + in, out := &in.DockerfileTemplateData, &out.DockerfileTemplateData + *out = new(string) + **out = **in + } + if in.DockerfileTemplateURI != nil { + in, out := &in.DockerfileTemplateURI, &out.DockerfileTemplateURI + *out = new(string) + **out = **in + } + if in.InstanceConfiguration != nil { + in, out := &in.InstanceConfiguration, &out.InstanceConfiguration + *out = new(InstanceConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ParentImage != nil { + in, out := &in.ParentImage, &out.ParentImage + *out = new(string) + **out = **in + } + if in.PlatformOverride != nil { + in, out := &in.PlatformOverride, &out.PlatformOverride + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetRepository != nil { + in, out := &in.TargetRepository, &out.TargetRepository + *out = new(TargetRepositoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.WorkingDirectory != nil { + in, out := &in.WorkingDirectory, &out.WorkingDirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRecipeInitParameters. +func (in *ContainerRecipeInitParameters) DeepCopy() *ContainerRecipeInitParameters { + if in == nil { + return nil + } + out := new(ContainerRecipeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRecipeList) DeepCopyInto(out *ContainerRecipeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ContainerRecipe, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRecipeList. +func (in *ContainerRecipeList) DeepCopy() *ContainerRecipeList { + if in == nil { + return nil + } + out := new(ContainerRecipeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ContainerRecipeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRecipeObservation) DeepCopyInto(out *ContainerRecipeObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Component != nil { + in, out := &in.Component, &out.Component + *out = make([]ComponentObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerType != nil { + in, out := &in.ContainerType, &out.ContainerType + *out = new(string) + **out = **in + } + if in.DateCreated != nil { + in, out := &in.DateCreated, &out.DateCreated + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DockerfileTemplateData != nil { + in, out := &in.DockerfileTemplateData, &out.DockerfileTemplateData + *out = new(string) + **out = **in + } + if in.DockerfileTemplateURI != nil { + in, out := &in.DockerfileTemplateURI, &out.DockerfileTemplateURI + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceConfiguration != nil { + in, out := &in.InstanceConfiguration, &out.InstanceConfiguration + *out = new(InstanceConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.ParentImage != nil { + in, out := &in.ParentImage, &out.ParentImage + *out = new(string) + **out = **in + } + if in.Platform != nil { + in, out := &in.Platform, &out.Platform + *out = new(string) + **out = **in + } + if in.PlatformOverride != nil { + in, out := &in.PlatformOverride, &out.PlatformOverride + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetRepository != nil { + in, out := &in.TargetRepository, &out.TargetRepository + *out = new(TargetRepositoryObservation) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.WorkingDirectory != nil { + in, out := &in.WorkingDirectory, &out.WorkingDirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRecipeObservation. +func (in *ContainerRecipeObservation) DeepCopy() *ContainerRecipeObservation { + if in == nil { + return nil + } + out := new(ContainerRecipeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRecipeParameters) DeepCopyInto(out *ContainerRecipeParameters) { + *out = *in + if in.Component != nil { + in, out := &in.Component, &out.Component + *out = make([]ComponentParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerType != nil { + in, out := &in.ContainerType, &out.ContainerType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DockerfileTemplateData != nil { + in, out := &in.DockerfileTemplateData, &out.DockerfileTemplateData + *out = new(string) + **out = **in + } + if in.DockerfileTemplateURI != nil { + in, out := &in.DockerfileTemplateURI, &out.DockerfileTemplateURI + *out = new(string) + **out = **in + } + if in.InstanceConfiguration != nil { + in, out := &in.InstanceConfiguration, &out.InstanceConfiguration + *out = new(InstanceConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ParentImage != nil { + in, out := &in.ParentImage, &out.ParentImage + *out = new(string) + **out = **in + } + if in.PlatformOverride != nil { + in, out := &in.PlatformOverride, &out.PlatformOverride + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetRepository != nil { + in, out := &in.TargetRepository, &out.TargetRepository + *out = new(TargetRepositoryParameters) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.WorkingDirectory != nil { + in, out := &in.WorkingDirectory, &out.WorkingDirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRecipeParameters. +func (in *ContainerRecipeParameters) DeepCopy() *ContainerRecipeParameters { + if in == nil { + return nil + } + out := new(ContainerRecipeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRecipeSpec) DeepCopyInto(out *ContainerRecipeSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRecipeSpec. +func (in *ContainerRecipeSpec) DeepCopy() *ContainerRecipeSpec { + if in == nil { + return nil + } + out := new(ContainerRecipeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRecipeStatus) DeepCopyInto(out *ContainerRecipeStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRecipeStatus. +func (in *ContainerRecipeStatus) DeepCopy() *ContainerRecipeStatus { + if in == nil { + return nil + } + out := new(ContainerRecipeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainersInitParameters) DeepCopyInto(out *ContainersInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainersInitParameters. +func (in *ContainersInitParameters) DeepCopy() *ContainersInitParameters { + if in == nil { + return nil + } + out := new(ContainersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainersObservation) DeepCopyInto(out *ContainersObservation) { + *out = *in + if in.ImageUris != nil { + in, out := &in.ImageUris, &out.ImageUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainersObservation. +func (in *ContainersObservation) DeepCopy() *ContainersObservation { + if in == nil { + return nil + } + out := new(ContainersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainersParameters) DeepCopyInto(out *ContainersParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainersParameters. +func (in *ContainersParameters) DeepCopy() *ContainersParameters { + if in == nil { + return nil + } + out := new(ContainersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionConfiguration) DeepCopyInto(out *DistributionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionConfiguration. +func (in *DistributionConfiguration) DeepCopy() *DistributionConfiguration { + if in == nil { + return nil + } + out := new(DistributionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DistributionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionConfigurationInitParameters) DeepCopyInto(out *DistributionConfigurationInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Distribution != nil { + in, out := &in.Distribution, &out.Distribution + *out = make([]DistributionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionConfigurationInitParameters. +func (in *DistributionConfigurationInitParameters) DeepCopy() *DistributionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(DistributionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionConfigurationList) DeepCopyInto(out *DistributionConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DistributionConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionConfigurationList. +func (in *DistributionConfigurationList) DeepCopy() *DistributionConfigurationList { + if in == nil { + return nil + } + out := new(DistributionConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DistributionConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionConfigurationObservation) DeepCopyInto(out *DistributionConfigurationObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DateCreated != nil { + in, out := &in.DateCreated, &out.DateCreated + *out = new(string) + **out = **in + } + if in.DateUpdated != nil { + in, out := &in.DateUpdated, &out.DateUpdated + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Distribution != nil { + in, out := &in.Distribution, &out.Distribution + *out = make([]DistributionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionConfigurationObservation. +func (in *DistributionConfigurationObservation) DeepCopy() *DistributionConfigurationObservation { + if in == nil { + return nil + } + out := new(DistributionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionConfigurationParameters) DeepCopyInto(out *DistributionConfigurationParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Distribution != nil { + in, out := &in.Distribution, &out.Distribution + *out = make([]DistributionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionConfigurationParameters. +func (in *DistributionConfigurationParameters) DeepCopy() *DistributionConfigurationParameters { + if in == nil { + return nil + } + out := new(DistributionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionConfigurationSpec) DeepCopyInto(out *DistributionConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionConfigurationSpec. +func (in *DistributionConfigurationSpec) DeepCopy() *DistributionConfigurationSpec { + if in == nil { + return nil + } + out := new(DistributionConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionConfigurationStatus) DeepCopyInto(out *DistributionConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionConfigurationStatus. +func (in *DistributionConfigurationStatus) DeepCopy() *DistributionConfigurationStatus { + if in == nil { + return nil + } + out := new(DistributionConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionInitParameters) DeepCopyInto(out *DistributionInitParameters) { + *out = *in + if in.AMIDistributionConfiguration != nil { + in, out := &in.AMIDistributionConfiguration, &out.AMIDistributionConfiguration + *out = new(AMIDistributionConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ContainerDistributionConfiguration != nil { + in, out := &in.ContainerDistributionConfiguration, &out.ContainerDistributionConfiguration + *out = new(ContainerDistributionConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FastLaunchConfiguration != nil { + in, out := &in.FastLaunchConfiguration, &out.FastLaunchConfiguration + *out = make([]FastLaunchConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LaunchTemplateConfiguration != nil { + in, out := &in.LaunchTemplateConfiguration, &out.LaunchTemplateConfiguration + *out = make([]LaunchTemplateConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LicenseConfigurationArns != nil { + in, out := &in.LicenseConfigurationArns, &out.LicenseConfigurationArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionInitParameters. +func (in *DistributionInitParameters) DeepCopy() *DistributionInitParameters { + if in == nil { + return nil + } + out := new(DistributionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionObservation) DeepCopyInto(out *DistributionObservation) { + *out = *in + if in.AMIDistributionConfiguration != nil { + in, out := &in.AMIDistributionConfiguration, &out.AMIDistributionConfiguration + *out = new(AMIDistributionConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ContainerDistributionConfiguration != nil { + in, out := &in.ContainerDistributionConfiguration, &out.ContainerDistributionConfiguration + *out = new(ContainerDistributionConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.FastLaunchConfiguration != nil { + in, out := &in.FastLaunchConfiguration, &out.FastLaunchConfiguration + *out = make([]FastLaunchConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LaunchTemplateConfiguration != nil { + in, out := &in.LaunchTemplateConfiguration, &out.LaunchTemplateConfiguration + *out = make([]LaunchTemplateConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LicenseConfigurationArns != nil { + in, out := &in.LicenseConfigurationArns, &out.LicenseConfigurationArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionObservation. +func (in *DistributionObservation) DeepCopy() *DistributionObservation { + if in == nil { + return nil + } + out := new(DistributionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DistributionParameters) DeepCopyInto(out *DistributionParameters) { + *out = *in + if in.AMIDistributionConfiguration != nil { + in, out := &in.AMIDistributionConfiguration, &out.AMIDistributionConfiguration + *out = new(AMIDistributionConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ContainerDistributionConfiguration != nil { + in, out := &in.ContainerDistributionConfiguration, &out.ContainerDistributionConfiguration + *out = new(ContainerDistributionConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.FastLaunchConfiguration != nil { + in, out := &in.FastLaunchConfiguration, &out.FastLaunchConfiguration + *out = make([]FastLaunchConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LaunchTemplateConfiguration != nil { + in, out := &in.LaunchTemplateConfiguration, &out.LaunchTemplateConfiguration + *out = make([]LaunchTemplateConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LicenseConfigurationArns != nil { + in, out := &in.LicenseConfigurationArns, &out.LicenseConfigurationArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributionParameters. +func (in *DistributionParameters) DeepCopy() *DistributionParameters { + if in == nil { + return nil + } + out := new(DistributionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSInitParameters) DeepCopyInto(out *EBSInitParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(string) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSInitParameters. +func (in *EBSInitParameters) DeepCopy() *EBSInitParameters { + if in == nil { + return nil + } + out := new(EBSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSObservation) DeepCopyInto(out *EBSObservation) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(string) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSObservation. +func (in *EBSObservation) DeepCopy() *EBSObservation { + if in == nil { + return nil + } + out := new(EBSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSParameters) DeepCopyInto(out *EBSParameters) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(string) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSParameters. +func (in *EBSParameters) DeepCopy() *EBSParameters { + if in == nil { + return nil + } + out := new(EBSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcrConfigurationInitParameters) DeepCopyInto(out *EcrConfigurationInitParameters) { + *out = *in + if in.ContainerTags != nil { + in, out := &in.ContainerTags, &out.ContainerTags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcrConfigurationInitParameters. +func (in *EcrConfigurationInitParameters) DeepCopy() *EcrConfigurationInitParameters { + if in == nil { + return nil + } + out := new(EcrConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcrConfigurationObservation) DeepCopyInto(out *EcrConfigurationObservation) { + *out = *in + if in.ContainerTags != nil { + in, out := &in.ContainerTags, &out.ContainerTags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcrConfigurationObservation. +func (in *EcrConfigurationObservation) DeepCopy() *EcrConfigurationObservation { + if in == nil { + return nil + } + out := new(EcrConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcrConfigurationParameters) DeepCopyInto(out *EcrConfigurationParameters) { + *out = *in + if in.ContainerTags != nil { + in, out := &in.ContainerTags, &out.ContainerTags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcrConfigurationParameters. +func (in *EcrConfigurationParameters) DeepCopy() *EcrConfigurationParameters { + if in == nil { + return nil + } + out := new(EcrConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FastLaunchConfigurationInitParameters) DeepCopyInto(out *FastLaunchConfigurationInitParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(LaunchTemplateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxParallelLaunches != nil { + in, out := &in.MaxParallelLaunches, &out.MaxParallelLaunches + *out = new(float64) + **out = **in + } + if in.SnapshotConfiguration != nil { + in, out := &in.SnapshotConfiguration, &out.SnapshotConfiguration + *out = new(SnapshotConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FastLaunchConfigurationInitParameters. +func (in *FastLaunchConfigurationInitParameters) DeepCopy() *FastLaunchConfigurationInitParameters { + if in == nil { + return nil + } + out := new(FastLaunchConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FastLaunchConfigurationObservation) DeepCopyInto(out *FastLaunchConfigurationObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(LaunchTemplateObservation) + (*in).DeepCopyInto(*out) + } + if in.MaxParallelLaunches != nil { + in, out := &in.MaxParallelLaunches, &out.MaxParallelLaunches + *out = new(float64) + **out = **in + } + if in.SnapshotConfiguration != nil { + in, out := &in.SnapshotConfiguration, &out.SnapshotConfiguration + *out = new(SnapshotConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FastLaunchConfigurationObservation. +func (in *FastLaunchConfigurationObservation) DeepCopy() *FastLaunchConfigurationObservation { + if in == nil { + return nil + } + out := new(FastLaunchConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FastLaunchConfigurationParameters) DeepCopyInto(out *FastLaunchConfigurationParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LaunchTemplate != nil { + in, out := &in.LaunchTemplate, &out.LaunchTemplate + *out = new(LaunchTemplateParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxParallelLaunches != nil { + in, out := &in.MaxParallelLaunches, &out.MaxParallelLaunches + *out = new(float64) + **out = **in + } + if in.SnapshotConfiguration != nil { + in, out := &in.SnapshotConfiguration, &out.SnapshotConfiguration + *out = new(SnapshotConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FastLaunchConfigurationParameters. +func (in *FastLaunchConfigurationParameters) DeepCopy() *FastLaunchConfigurationParameters { + if in == nil { + return nil + } + out := new(FastLaunchConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Image) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageInitParameters) DeepCopyInto(out *ImageInitParameters) { + *out = *in + if in.ContainerRecipeArn != nil { + in, out := &in.ContainerRecipeArn, &out.ContainerRecipeArn + *out = new(string) + **out = **in + } + if in.DistributionConfigurationArn != nil { + in, out := &in.DistributionConfigurationArn, &out.DistributionConfigurationArn + *out = new(string) + **out = **in + } + if in.DistributionConfigurationArnRef != nil { + in, out := &in.DistributionConfigurationArnRef, &out.DistributionConfigurationArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DistributionConfigurationArnSelector != nil { + in, out := &in.DistributionConfigurationArnSelector, &out.DistributionConfigurationArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EnhancedImageMetadataEnabled != nil { + in, out := &in.EnhancedImageMetadataEnabled, &out.EnhancedImageMetadataEnabled + *out = new(bool) + **out = **in + } + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.ImageRecipeArn != nil { + in, out := &in.ImageRecipeArn, &out.ImageRecipeArn + *out = new(string) + **out = **in + } + if in.ImageRecipeArnRef != nil { + in, out := &in.ImageRecipeArnRef, &out.ImageRecipeArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ImageRecipeArnSelector != nil { + in, out := &in.ImageRecipeArnSelector, &out.ImageRecipeArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ImageScanningConfiguration != nil { + in, out := &in.ImageScanningConfiguration, &out.ImageScanningConfiguration + *out = new(ImageScanningConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageTestsConfiguration != nil { + in, out := &in.ImageTestsConfiguration, &out.ImageTestsConfiguration + *out = new(ImageTestsConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureConfigurationArn != nil { + in, out := &in.InfrastructureConfigurationArn, &out.InfrastructureConfigurationArn + *out = new(string) + **out = **in + } + if in.InfrastructureConfigurationArnRef != nil { + in, out := &in.InfrastructureConfigurationArnRef, &out.InfrastructureConfigurationArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureConfigurationArnSelector != nil { + in, out := &in.InfrastructureConfigurationArnSelector, &out.InfrastructureConfigurationArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Workflow != nil { + in, out := &in.Workflow, &out.Workflow + *out = make([]WorkflowInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageInitParameters. +func (in *ImageInitParameters) DeepCopy() *ImageInitParameters { + if in == nil { + return nil + } + out := new(ImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageList) DeepCopyInto(out *ImageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Image, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. +func (in *ImageList) DeepCopy() *ImageList { + if in == nil { + return nil + } + out := new(ImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageObservation) DeepCopyInto(out *ImageObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ContainerRecipeArn != nil { + in, out := &in.ContainerRecipeArn, &out.ContainerRecipeArn + *out = new(string) + **out = **in + } + if in.DateCreated != nil { + in, out := &in.DateCreated, &out.DateCreated + *out = new(string) + **out = **in + } + if in.DistributionConfigurationArn != nil { + in, out := &in.DistributionConfigurationArn, &out.DistributionConfigurationArn + *out = new(string) + **out = **in + } + if in.EnhancedImageMetadataEnabled != nil { + in, out := &in.EnhancedImageMetadataEnabled, &out.EnhancedImageMetadataEnabled + *out = new(bool) + **out = **in + } + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ImageRecipeArn != nil { + in, out := &in.ImageRecipeArn, &out.ImageRecipeArn + *out = new(string) + **out = **in + } + if in.ImageScanningConfiguration != nil { + in, out := &in.ImageScanningConfiguration, &out.ImageScanningConfiguration + *out = new(ImageScanningConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ImageTestsConfiguration != nil { + in, out := &in.ImageTestsConfiguration, &out.ImageTestsConfiguration + *out = new(ImageTestsConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureConfigurationArn != nil { + in, out := &in.InfrastructureConfigurationArn, &out.InfrastructureConfigurationArn + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OsVersion != nil { + in, out := &in.OsVersion, &out.OsVersion + *out = new(string) + **out = **in + } + if in.OutputResources != nil { + in, out := &in.OutputResources, &out.OutputResources + *out = make([]OutputResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Platform != nil { + in, out := &in.Platform, &out.Platform + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Workflow != nil { + in, out := &in.Workflow, &out.Workflow + *out = make([]WorkflowObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageObservation. +func (in *ImageObservation) DeepCopy() *ImageObservation { + if in == nil { + return nil + } + out := new(ImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageParameters) DeepCopyInto(out *ImageParameters) { + *out = *in + if in.ContainerRecipeArn != nil { + in, out := &in.ContainerRecipeArn, &out.ContainerRecipeArn + *out = new(string) + **out = **in + } + if in.DistributionConfigurationArn != nil { + in, out := &in.DistributionConfigurationArn, &out.DistributionConfigurationArn + *out = new(string) + **out = **in + } + if in.DistributionConfigurationArnRef != nil { + in, out := &in.DistributionConfigurationArnRef, &out.DistributionConfigurationArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DistributionConfigurationArnSelector != nil { + in, out := &in.DistributionConfigurationArnSelector, &out.DistributionConfigurationArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EnhancedImageMetadataEnabled != nil { + in, out := &in.EnhancedImageMetadataEnabled, &out.EnhancedImageMetadataEnabled + *out = new(bool) + **out = **in + } + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.ImageRecipeArn != nil { + in, out := &in.ImageRecipeArn, &out.ImageRecipeArn + *out = new(string) + **out = **in + } + if in.ImageRecipeArnRef != nil { + in, out := &in.ImageRecipeArnRef, &out.ImageRecipeArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ImageRecipeArnSelector != nil { + in, out := &in.ImageRecipeArnSelector, &out.ImageRecipeArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ImageScanningConfiguration != nil { + in, out := &in.ImageScanningConfiguration, &out.ImageScanningConfiguration + *out = new(ImageScanningConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageTestsConfiguration != nil { + in, out := &in.ImageTestsConfiguration, &out.ImageTestsConfiguration + *out = new(ImageTestsConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureConfigurationArn != nil { + in, out := &in.InfrastructureConfigurationArn, &out.InfrastructureConfigurationArn + *out = new(string) + **out = **in + } + if in.InfrastructureConfigurationArnRef != nil { + in, out := &in.InfrastructureConfigurationArnRef, &out.InfrastructureConfigurationArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureConfigurationArnSelector != nil { + in, out := &in.InfrastructureConfigurationArnSelector, &out.InfrastructureConfigurationArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Workflow != nil { + in, out := &in.Workflow, &out.Workflow + *out = make([]WorkflowParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageParameters. +func (in *ImageParameters) DeepCopy() *ImageParameters { + if in == nil { + return nil + } + out := new(ImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePipeline) DeepCopyInto(out *ImagePipeline) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePipeline. +func (in *ImagePipeline) DeepCopy() *ImagePipeline { + if in == nil { + return nil + } + out := new(ImagePipeline) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePipeline) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePipelineImageScanningConfigurationInitParameters) DeepCopyInto(out *ImagePipelineImageScanningConfigurationInitParameters) { + *out = *in + if in.EcrConfiguration != nil { + in, out := &in.EcrConfiguration, &out.EcrConfiguration + *out = new(ImageScanningConfigurationEcrConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageScanningEnabled != nil { + in, out := &in.ImageScanningEnabled, &out.ImageScanningEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePipelineImageScanningConfigurationInitParameters. +func (in *ImagePipelineImageScanningConfigurationInitParameters) DeepCopy() *ImagePipelineImageScanningConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ImagePipelineImageScanningConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePipelineImageScanningConfigurationObservation) DeepCopyInto(out *ImagePipelineImageScanningConfigurationObservation) { + *out = *in + if in.EcrConfiguration != nil { + in, out := &in.EcrConfiguration, &out.EcrConfiguration + *out = new(ImageScanningConfigurationEcrConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ImageScanningEnabled != nil { + in, out := &in.ImageScanningEnabled, &out.ImageScanningEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePipelineImageScanningConfigurationObservation. +func (in *ImagePipelineImageScanningConfigurationObservation) DeepCopy() *ImagePipelineImageScanningConfigurationObservation { + if in == nil { + return nil + } + out := new(ImagePipelineImageScanningConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePipelineImageScanningConfigurationParameters) DeepCopyInto(out *ImagePipelineImageScanningConfigurationParameters) { + *out = *in + if in.EcrConfiguration != nil { + in, out := &in.EcrConfiguration, &out.EcrConfiguration + *out = new(ImageScanningConfigurationEcrConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageScanningEnabled != nil { + in, out := &in.ImageScanningEnabled, &out.ImageScanningEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePipelineImageScanningConfigurationParameters. +func (in *ImagePipelineImageScanningConfigurationParameters) DeepCopy() *ImagePipelineImageScanningConfigurationParameters { + if in == nil { + return nil + } + out := new(ImagePipelineImageScanningConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePipelineImageTestsConfigurationInitParameters) DeepCopyInto(out *ImagePipelineImageTestsConfigurationInitParameters) { + *out = *in + if in.ImageTestsEnabled != nil { + in, out := &in.ImageTestsEnabled, &out.ImageTestsEnabled + *out = new(bool) + **out = **in + } + if in.TimeoutMinutes != nil { + in, out := &in.TimeoutMinutes, &out.TimeoutMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePipelineImageTestsConfigurationInitParameters. +func (in *ImagePipelineImageTestsConfigurationInitParameters) DeepCopy() *ImagePipelineImageTestsConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ImagePipelineImageTestsConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePipelineImageTestsConfigurationObservation) DeepCopyInto(out *ImagePipelineImageTestsConfigurationObservation) { + *out = *in + if in.ImageTestsEnabled != nil { + in, out := &in.ImageTestsEnabled, &out.ImageTestsEnabled + *out = new(bool) + **out = **in + } + if in.TimeoutMinutes != nil { + in, out := &in.TimeoutMinutes, &out.TimeoutMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePipelineImageTestsConfigurationObservation. +func (in *ImagePipelineImageTestsConfigurationObservation) DeepCopy() *ImagePipelineImageTestsConfigurationObservation { + if in == nil { + return nil + } + out := new(ImagePipelineImageTestsConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePipelineImageTestsConfigurationParameters) DeepCopyInto(out *ImagePipelineImageTestsConfigurationParameters) { + *out = *in + if in.ImageTestsEnabled != nil { + in, out := &in.ImageTestsEnabled, &out.ImageTestsEnabled + *out = new(bool) + **out = **in + } + if in.TimeoutMinutes != nil { + in, out := &in.TimeoutMinutes, &out.TimeoutMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePipelineImageTestsConfigurationParameters. +func (in *ImagePipelineImageTestsConfigurationParameters) DeepCopy() *ImagePipelineImageTestsConfigurationParameters { + if in == nil { + return nil + } + out := new(ImagePipelineImageTestsConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePipelineInitParameters) DeepCopyInto(out *ImagePipelineInitParameters) { + *out = *in + if in.ContainerRecipeArn != nil { + in, out := &in.ContainerRecipeArn, &out.ContainerRecipeArn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DistributionConfigurationArn != nil { + in, out := &in.DistributionConfigurationArn, &out.DistributionConfigurationArn + *out = new(string) + **out = **in + } + if in.EnhancedImageMetadataEnabled != nil { + in, out := &in.EnhancedImageMetadataEnabled, &out.EnhancedImageMetadataEnabled + *out = new(bool) + **out = **in + } + if in.ImageRecipeArn != nil { + in, out := &in.ImageRecipeArn, &out.ImageRecipeArn + *out = new(string) + **out = **in + } + if in.ImageRecipeArnRef != nil { + in, out := &in.ImageRecipeArnRef, &out.ImageRecipeArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ImageRecipeArnSelector != nil { + in, out := &in.ImageRecipeArnSelector, &out.ImageRecipeArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ImageScanningConfiguration != nil { + in, out := &in.ImageScanningConfiguration, &out.ImageScanningConfiguration + *out = new(ImagePipelineImageScanningConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageTestsConfiguration != nil { + in, out := &in.ImageTestsConfiguration, &out.ImageTestsConfiguration + *out = new(ImagePipelineImageTestsConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureConfigurationArn != nil { + in, out := &in.InfrastructureConfigurationArn, &out.InfrastructureConfigurationArn + *out = new(string) + **out = **in + } + if in.InfrastructureConfigurationArnRef != nil { + in, out := &in.InfrastructureConfigurationArnRef, &out.InfrastructureConfigurationArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureConfigurationArnSelector != nil { + in, out := &in.InfrastructureConfigurationArnSelector, &out.InfrastructureConfigurationArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePipelineInitParameters. +func (in *ImagePipelineInitParameters) DeepCopy() *ImagePipelineInitParameters { + if in == nil { + return nil + } + out := new(ImagePipelineInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePipelineList) DeepCopyInto(out *ImagePipelineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImagePipeline, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePipelineList. +func (in *ImagePipelineList) DeepCopy() *ImagePipelineList { + if in == nil { + return nil + } + out := new(ImagePipelineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePipelineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePipelineObservation) DeepCopyInto(out *ImagePipelineObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ContainerRecipeArn != nil { + in, out := &in.ContainerRecipeArn, &out.ContainerRecipeArn + *out = new(string) + **out = **in + } + if in.DateCreated != nil { + in, out := &in.DateCreated, &out.DateCreated + *out = new(string) + **out = **in + } + if in.DateLastRun != nil { + in, out := &in.DateLastRun, &out.DateLastRun + *out = new(string) + **out = **in + } + if in.DateNextRun != nil { + in, out := &in.DateNextRun, &out.DateNextRun + *out = new(string) + **out = **in + } + if in.DateUpdated != nil { + in, out := &in.DateUpdated, &out.DateUpdated + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DistributionConfigurationArn != nil { + in, out := &in.DistributionConfigurationArn, &out.DistributionConfigurationArn + *out = new(string) + **out = **in + } + if in.EnhancedImageMetadataEnabled != nil { + in, out := &in.EnhancedImageMetadataEnabled, &out.EnhancedImageMetadataEnabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ImageRecipeArn != nil { + in, out := &in.ImageRecipeArn, &out.ImageRecipeArn + *out = new(string) + **out = **in + } + if in.ImageScanningConfiguration != nil { + in, out := &in.ImageScanningConfiguration, &out.ImageScanningConfiguration + *out = new(ImagePipelineImageScanningConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ImageTestsConfiguration != nil { + in, out := &in.ImageTestsConfiguration, &out.ImageTestsConfiguration + *out = new(ImagePipelineImageTestsConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureConfigurationArn != nil { + in, out := &in.InfrastructureConfigurationArn, &out.InfrastructureConfigurationArn + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Platform != nil { + in, out := &in.Platform, &out.Platform + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleObservation) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePipelineObservation. +func (in *ImagePipelineObservation) DeepCopy() *ImagePipelineObservation { + if in == nil { + return nil + } + out := new(ImagePipelineObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePipelineParameters) DeepCopyInto(out *ImagePipelineParameters) { + *out = *in + if in.ContainerRecipeArn != nil { + in, out := &in.ContainerRecipeArn, &out.ContainerRecipeArn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DistributionConfigurationArn != nil { + in, out := &in.DistributionConfigurationArn, &out.DistributionConfigurationArn + *out = new(string) + **out = **in + } + if in.EnhancedImageMetadataEnabled != nil { + in, out := &in.EnhancedImageMetadataEnabled, &out.EnhancedImageMetadataEnabled + *out = new(bool) + **out = **in + } + if in.ImageRecipeArn != nil { + in, out := &in.ImageRecipeArn, &out.ImageRecipeArn + *out = new(string) + **out = **in + } + if in.ImageRecipeArnRef != nil { + in, out := &in.ImageRecipeArnRef, &out.ImageRecipeArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ImageRecipeArnSelector != nil { + in, out := &in.ImageRecipeArnSelector, &out.ImageRecipeArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ImageScanningConfiguration != nil { + in, out := &in.ImageScanningConfiguration, &out.ImageScanningConfiguration + *out = new(ImagePipelineImageScanningConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageTestsConfiguration != nil { + in, out := &in.ImageTestsConfiguration, &out.ImageTestsConfiguration + *out = new(ImagePipelineImageTestsConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureConfigurationArn != nil { + in, out := &in.InfrastructureConfigurationArn, &out.InfrastructureConfigurationArn + *out = new(string) + **out = **in + } + if in.InfrastructureConfigurationArnRef != nil { + in, out := &in.InfrastructureConfigurationArnRef, &out.InfrastructureConfigurationArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureConfigurationArnSelector != nil { + in, out := &in.InfrastructureConfigurationArnSelector, &out.InfrastructureConfigurationArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleParameters) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePipelineParameters. +func (in *ImagePipelineParameters) DeepCopy() *ImagePipelineParameters { + if in == nil { + return nil + } + out := new(ImagePipelineParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePipelineSpec) DeepCopyInto(out *ImagePipelineSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePipelineSpec. +func (in *ImagePipelineSpec) DeepCopy() *ImagePipelineSpec { + if in == nil { + return nil + } + out := new(ImagePipelineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePipelineStatus) DeepCopyInto(out *ImagePipelineStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePipelineStatus. +func (in *ImagePipelineStatus) DeepCopy() *ImagePipelineStatus { + if in == nil { + return nil + } + out := new(ImagePipelineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRecipe) DeepCopyInto(out *ImageRecipe) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRecipe. +func (in *ImageRecipe) DeepCopy() *ImageRecipe { + if in == nil { + return nil + } + out := new(ImageRecipe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageRecipe) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRecipeBlockDeviceMappingInitParameters) DeepCopyInto(out *ImageRecipeBlockDeviceMappingInitParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.EBS != nil { + in, out := &in.EBS, &out.EBS + *out = new(BlockDeviceMappingEBSInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRecipeBlockDeviceMappingInitParameters. +func (in *ImageRecipeBlockDeviceMappingInitParameters) DeepCopy() *ImageRecipeBlockDeviceMappingInitParameters { + if in == nil { + return nil + } + out := new(ImageRecipeBlockDeviceMappingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRecipeBlockDeviceMappingObservation) DeepCopyInto(out *ImageRecipeBlockDeviceMappingObservation) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.EBS != nil { + in, out := &in.EBS, &out.EBS + *out = new(BlockDeviceMappingEBSObservation) + (*in).DeepCopyInto(*out) + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRecipeBlockDeviceMappingObservation. +func (in *ImageRecipeBlockDeviceMappingObservation) DeepCopy() *ImageRecipeBlockDeviceMappingObservation { + if in == nil { + return nil + } + out := new(ImageRecipeBlockDeviceMappingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRecipeBlockDeviceMappingParameters) DeepCopyInto(out *ImageRecipeBlockDeviceMappingParameters) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.EBS != nil { + in, out := &in.EBS, &out.EBS + *out = new(BlockDeviceMappingEBSParameters) + (*in).DeepCopyInto(*out) + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(bool) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRecipeBlockDeviceMappingParameters. +func (in *ImageRecipeBlockDeviceMappingParameters) DeepCopy() *ImageRecipeBlockDeviceMappingParameters { + if in == nil { + return nil + } + out := new(ImageRecipeBlockDeviceMappingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRecipeComponentInitParameters) DeepCopyInto(out *ImageRecipeComponentInitParameters) { + *out = *in + if in.ComponentArn != nil { + in, out := &in.ComponentArn, &out.ComponentArn + *out = new(string) + **out = **in + } + if in.ComponentArnRef != nil { + in, out := &in.ComponentArnRef, &out.ComponentArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ComponentArnSelector != nil { + in, out := &in.ComponentArnSelector, &out.ComponentArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ComponentParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRecipeComponentInitParameters. +func (in *ImageRecipeComponentInitParameters) DeepCopy() *ImageRecipeComponentInitParameters { + if in == nil { + return nil + } + out := new(ImageRecipeComponentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRecipeComponentObservation) DeepCopyInto(out *ImageRecipeComponentObservation) { + *out = *in + if in.ComponentArn != nil { + in, out := &in.ComponentArn, &out.ComponentArn + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ComponentParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRecipeComponentObservation. +func (in *ImageRecipeComponentObservation) DeepCopy() *ImageRecipeComponentObservation { + if in == nil { + return nil + } + out := new(ImageRecipeComponentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRecipeComponentParameters) DeepCopyInto(out *ImageRecipeComponentParameters) { + *out = *in + if in.ComponentArn != nil { + in, out := &in.ComponentArn, &out.ComponentArn + *out = new(string) + **out = **in + } + if in.ComponentArnRef != nil { + in, out := &in.ComponentArnRef, &out.ComponentArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ComponentArnSelector != nil { + in, out := &in.ComponentArnSelector, &out.ComponentArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ComponentParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRecipeComponentParameters. +func (in *ImageRecipeComponentParameters) DeepCopy() *ImageRecipeComponentParameters { + if in == nil { + return nil + } + out := new(ImageRecipeComponentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRecipeInitParameters) DeepCopyInto(out *ImageRecipeInitParameters) { + *out = *in + if in.BlockDeviceMapping != nil { + in, out := &in.BlockDeviceMapping, &out.BlockDeviceMapping + *out = make([]ImageRecipeBlockDeviceMappingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Component != nil { + in, out := &in.Component, &out.Component + *out = make([]ImageRecipeComponentInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ParentImage != nil { + in, out := &in.ParentImage, &out.ParentImage + *out = new(string) + **out = **in + } + if in.SystemsManagerAgent != nil { + in, out := &in.SystemsManagerAgent, &out.SystemsManagerAgent + *out = new(SystemsManagerAgentInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserDataBase64 != nil { + in, out := &in.UserDataBase64, &out.UserDataBase64 + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.WorkingDirectory != nil { + in, out := &in.WorkingDirectory, &out.WorkingDirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRecipeInitParameters. +func (in *ImageRecipeInitParameters) DeepCopy() *ImageRecipeInitParameters { + if in == nil { + return nil + } + out := new(ImageRecipeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRecipeList) DeepCopyInto(out *ImageRecipeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageRecipe, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRecipeList. +func (in *ImageRecipeList) DeepCopy() *ImageRecipeList { + if in == nil { + return nil + } + out := new(ImageRecipeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageRecipeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRecipeObservation) DeepCopyInto(out *ImageRecipeObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.BlockDeviceMapping != nil { + in, out := &in.BlockDeviceMapping, &out.BlockDeviceMapping + *out = make([]ImageRecipeBlockDeviceMappingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Component != nil { + in, out := &in.Component, &out.Component + *out = make([]ImageRecipeComponentObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DateCreated != nil { + in, out := &in.DateCreated, &out.DateCreated + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.ParentImage != nil { + in, out := &in.ParentImage, &out.ParentImage + *out = new(string) + **out = **in + } + if in.Platform != nil { + in, out := &in.Platform, &out.Platform + *out = new(string) + **out = **in + } + if in.SystemsManagerAgent != nil { + in, out := &in.SystemsManagerAgent, &out.SystemsManagerAgent + *out = new(SystemsManagerAgentObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserDataBase64 != nil { + in, out := &in.UserDataBase64, &out.UserDataBase64 + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.WorkingDirectory != nil { + in, out := &in.WorkingDirectory, &out.WorkingDirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRecipeObservation. +func (in *ImageRecipeObservation) DeepCopy() *ImageRecipeObservation { + if in == nil { + return nil + } + out := new(ImageRecipeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRecipeParameters) DeepCopyInto(out *ImageRecipeParameters) { + *out = *in + if in.BlockDeviceMapping != nil { + in, out := &in.BlockDeviceMapping, &out.BlockDeviceMapping + *out = make([]ImageRecipeBlockDeviceMappingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Component != nil { + in, out := &in.Component, &out.Component + *out = make([]ImageRecipeComponentParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ParentImage != nil { + in, out := &in.ParentImage, &out.ParentImage + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SystemsManagerAgent != nil { + in, out := &in.SystemsManagerAgent, &out.SystemsManagerAgent + *out = new(SystemsManagerAgentParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserDataBase64 != nil { + in, out := &in.UserDataBase64, &out.UserDataBase64 + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.WorkingDirectory != nil { + in, out := &in.WorkingDirectory, &out.WorkingDirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRecipeParameters. +func (in *ImageRecipeParameters) DeepCopy() *ImageRecipeParameters { + if in == nil { + return nil + } + out := new(ImageRecipeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRecipeSpec) DeepCopyInto(out *ImageRecipeSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRecipeSpec. +func (in *ImageRecipeSpec) DeepCopy() *ImageRecipeSpec { + if in == nil { + return nil + } + out := new(ImageRecipeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRecipeStatus) DeepCopyInto(out *ImageRecipeStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRecipeStatus. +func (in *ImageRecipeStatus) DeepCopy() *ImageRecipeStatus { + if in == nil { + return nil + } + out := new(ImageRecipeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageScanningConfigurationEcrConfigurationInitParameters) DeepCopyInto(out *ImageScanningConfigurationEcrConfigurationInitParameters) { + *out = *in + if in.ContainerTags != nil { + in, out := &in.ContainerTags, &out.ContainerTags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageScanningConfigurationEcrConfigurationInitParameters. +func (in *ImageScanningConfigurationEcrConfigurationInitParameters) DeepCopy() *ImageScanningConfigurationEcrConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ImageScanningConfigurationEcrConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageScanningConfigurationEcrConfigurationObservation) DeepCopyInto(out *ImageScanningConfigurationEcrConfigurationObservation) { + *out = *in + if in.ContainerTags != nil { + in, out := &in.ContainerTags, &out.ContainerTags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageScanningConfigurationEcrConfigurationObservation. +func (in *ImageScanningConfigurationEcrConfigurationObservation) DeepCopy() *ImageScanningConfigurationEcrConfigurationObservation { + if in == nil { + return nil + } + out := new(ImageScanningConfigurationEcrConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageScanningConfigurationEcrConfigurationParameters) DeepCopyInto(out *ImageScanningConfigurationEcrConfigurationParameters) { + *out = *in + if in.ContainerTags != nil { + in, out := &in.ContainerTags, &out.ContainerTags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageScanningConfigurationEcrConfigurationParameters. +func (in *ImageScanningConfigurationEcrConfigurationParameters) DeepCopy() *ImageScanningConfigurationEcrConfigurationParameters { + if in == nil { + return nil + } + out := new(ImageScanningConfigurationEcrConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageScanningConfigurationInitParameters) DeepCopyInto(out *ImageScanningConfigurationInitParameters) { + *out = *in + if in.EcrConfiguration != nil { + in, out := &in.EcrConfiguration, &out.EcrConfiguration + *out = new(EcrConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageScanningEnabled != nil { + in, out := &in.ImageScanningEnabled, &out.ImageScanningEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageScanningConfigurationInitParameters. +func (in *ImageScanningConfigurationInitParameters) DeepCopy() *ImageScanningConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ImageScanningConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageScanningConfigurationObservation) DeepCopyInto(out *ImageScanningConfigurationObservation) { + *out = *in + if in.EcrConfiguration != nil { + in, out := &in.EcrConfiguration, &out.EcrConfiguration + *out = new(EcrConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ImageScanningEnabled != nil { + in, out := &in.ImageScanningEnabled, &out.ImageScanningEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageScanningConfigurationObservation. +func (in *ImageScanningConfigurationObservation) DeepCopy() *ImageScanningConfigurationObservation { + if in == nil { + return nil + } + out := new(ImageScanningConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageScanningConfigurationParameters) DeepCopyInto(out *ImageScanningConfigurationParameters) { + *out = *in + if in.EcrConfiguration != nil { + in, out := &in.EcrConfiguration, &out.EcrConfiguration + *out = new(EcrConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageScanningEnabled != nil { + in, out := &in.ImageScanningEnabled, &out.ImageScanningEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageScanningConfigurationParameters. +func (in *ImageScanningConfigurationParameters) DeepCopy() *ImageScanningConfigurationParameters { + if in == nil { + return nil + } + out := new(ImageScanningConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec. +func (in *ImageSpec) DeepCopy() *ImageSpec { + if in == nil { + return nil + } + out := new(ImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStatus) DeepCopyInto(out *ImageStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus. +func (in *ImageStatus) DeepCopy() *ImageStatus { + if in == nil { + return nil + } + out := new(ImageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTestsConfigurationInitParameters) DeepCopyInto(out *ImageTestsConfigurationInitParameters) { + *out = *in + if in.ImageTestsEnabled != nil { + in, out := &in.ImageTestsEnabled, &out.ImageTestsEnabled + *out = new(bool) + **out = **in + } + if in.TimeoutMinutes != nil { + in, out := &in.TimeoutMinutes, &out.TimeoutMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTestsConfigurationInitParameters. +func (in *ImageTestsConfigurationInitParameters) DeepCopy() *ImageTestsConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ImageTestsConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTestsConfigurationObservation) DeepCopyInto(out *ImageTestsConfigurationObservation) { + *out = *in + if in.ImageTestsEnabled != nil { + in, out := &in.ImageTestsEnabled, &out.ImageTestsEnabled + *out = new(bool) + **out = **in + } + if in.TimeoutMinutes != nil { + in, out := &in.TimeoutMinutes, &out.TimeoutMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTestsConfigurationObservation. +func (in *ImageTestsConfigurationObservation) DeepCopy() *ImageTestsConfigurationObservation { + if in == nil { + return nil + } + out := new(ImageTestsConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTestsConfigurationParameters) DeepCopyInto(out *ImageTestsConfigurationParameters) { + *out = *in + if in.ImageTestsEnabled != nil { + in, out := &in.ImageTestsEnabled, &out.ImageTestsEnabled + *out = new(bool) + **out = **in + } + if in.TimeoutMinutes != nil { + in, out := &in.TimeoutMinutes, &out.TimeoutMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTestsConfigurationParameters. +func (in *ImageTestsConfigurationParameters) DeepCopy() *ImageTestsConfigurationParameters { + if in == nil { + return nil + } + out := new(ImageTestsConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureConfiguration) DeepCopyInto(out *InfrastructureConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureConfiguration. +func (in *InfrastructureConfiguration) DeepCopy() *InfrastructureConfiguration { + if in == nil { + return nil + } + out := new(InfrastructureConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InfrastructureConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureConfigurationInitParameters) DeepCopyInto(out *InfrastructureConfigurationInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.InstanceMetadataOptions != nil { + in, out := &in.InstanceMetadataOptions, &out.InstanceMetadataOptions + *out = new(InstanceMetadataOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceProfileName != nil { + in, out := &in.InstanceProfileName, &out.InstanceProfileName + *out = new(string) + **out = **in + } + if in.InstanceProfileNameRef != nil { + in, out := &in.InstanceProfileNameRef, &out.InstanceProfileNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceProfileNameSelector != nil { + in, out := &in.InstanceProfileNameSelector, &out.InstanceProfileNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InstanceTypes != nil { + in, out := &in.InstanceTypes, &out.InstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KeyPair != nil { + in, out := &in.KeyPair, &out.KeyPair + *out = new(string) + **out = **in + } + if in.KeyPairRef != nil { + in, out := &in.KeyPairRef, &out.KeyPairRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyPairSelector != nil { + in, out := &in.KeyPairSelector, &out.KeyPairSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(LoggingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SnsTopicArn != nil { + in, out := &in.SnsTopicArn, &out.SnsTopicArn + *out = new(string) + **out = **in + } + if in.SnsTopicArnRef != nil { + in, out := &in.SnsTopicArnRef, &out.SnsTopicArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SnsTopicArnSelector != nil { + in, out := &in.SnsTopicArnSelector, &out.SnsTopicArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminateInstanceOnFailure != nil { + in, out := &in.TerminateInstanceOnFailure, &out.TerminateInstanceOnFailure + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureConfigurationInitParameters. +func (in *InfrastructureConfigurationInitParameters) DeepCopy() *InfrastructureConfigurationInitParameters { + if in == nil { + return nil + } + out := new(InfrastructureConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureConfigurationList) DeepCopyInto(out *InfrastructureConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InfrastructureConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureConfigurationList. +func (in *InfrastructureConfigurationList) DeepCopy() *InfrastructureConfigurationList { + if in == nil { + return nil + } + out := new(InfrastructureConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InfrastructureConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureConfigurationObservation) DeepCopyInto(out *InfrastructureConfigurationObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DateCreated != nil { + in, out := &in.DateCreated, &out.DateCreated + *out = new(string) + **out = **in + } + if in.DateUpdated != nil { + in, out := &in.DateUpdated, &out.DateUpdated + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceMetadataOptions != nil { + in, out := &in.InstanceMetadataOptions, &out.InstanceMetadataOptions + *out = new(InstanceMetadataOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.InstanceProfileName != nil { + in, out := &in.InstanceProfileName, &out.InstanceProfileName + *out = new(string) + **out = **in + } + if in.InstanceTypes != nil { + in, out := &in.InstanceTypes, &out.InstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KeyPair != nil { + in, out := &in.KeyPair, &out.KeyPair + *out = new(string) + **out = **in + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(LoggingObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SnsTopicArn != nil { + in, out := &in.SnsTopicArn, &out.SnsTopicArn + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminateInstanceOnFailure != nil { + in, out := &in.TerminateInstanceOnFailure, &out.TerminateInstanceOnFailure + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureConfigurationObservation. +func (in *InfrastructureConfigurationObservation) DeepCopy() *InfrastructureConfigurationObservation { + if in == nil { + return nil + } + out := new(InfrastructureConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureConfigurationParameters) DeepCopyInto(out *InfrastructureConfigurationParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.InstanceMetadataOptions != nil { + in, out := &in.InstanceMetadataOptions, &out.InstanceMetadataOptions + *out = new(InstanceMetadataOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceProfileName != nil { + in, out := &in.InstanceProfileName, &out.InstanceProfileName + *out = new(string) + **out = **in + } + if in.InstanceProfileNameRef != nil { + in, out := &in.InstanceProfileNameRef, &out.InstanceProfileNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InstanceProfileNameSelector != nil { + in, out := &in.InstanceProfileNameSelector, &out.InstanceProfileNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InstanceTypes != nil { + in, out := &in.InstanceTypes, &out.InstanceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KeyPair != nil { + in, out := &in.KeyPair, &out.KeyPair + *out = new(string) + **out = **in + } + if in.KeyPairRef != nil { + in, out := &in.KeyPairRef, &out.KeyPairRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyPairSelector != nil { + in, out := &in.KeyPairSelector, &out.KeyPairSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(LoggingParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SnsTopicArn != nil { + in, out := &in.SnsTopicArn, &out.SnsTopicArn + *out = new(string) + **out = **in + } + if in.SnsTopicArnRef != nil { + in, out := &in.SnsTopicArnRef, &out.SnsTopicArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SnsTopicArnSelector != nil { + in, out := &in.SnsTopicArnSelector, &out.SnsTopicArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminateInstanceOnFailure != nil { + in, out := &in.TerminateInstanceOnFailure, &out.TerminateInstanceOnFailure + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureConfigurationParameters. +func (in *InfrastructureConfigurationParameters) DeepCopy() *InfrastructureConfigurationParameters { + if in == nil { + return nil + } + out := new(InfrastructureConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureConfigurationSpec) DeepCopyInto(out *InfrastructureConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureConfigurationSpec. +func (in *InfrastructureConfigurationSpec) DeepCopy() *InfrastructureConfigurationSpec { + if in == nil { + return nil + } + out := new(InfrastructureConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureConfigurationStatus) DeepCopyInto(out *InfrastructureConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureConfigurationStatus. +func (in *InfrastructureConfigurationStatus) DeepCopy() *InfrastructureConfigurationStatus { + if in == nil { + return nil + } + out := new(InfrastructureConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceConfigurationInitParameters) DeepCopyInto(out *InstanceConfigurationInitParameters) { + *out = *in + if in.BlockDeviceMapping != nil { + in, out := &in.BlockDeviceMapping, &out.BlockDeviceMapping + *out = make([]BlockDeviceMappingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceConfigurationInitParameters. +func (in *InstanceConfigurationInitParameters) DeepCopy() *InstanceConfigurationInitParameters { + if in == nil { + return nil + } + out := new(InstanceConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceConfigurationObservation) DeepCopyInto(out *InstanceConfigurationObservation) { + *out = *in + if in.BlockDeviceMapping != nil { + in, out := &in.BlockDeviceMapping, &out.BlockDeviceMapping + *out = make([]BlockDeviceMappingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceConfigurationObservation. +func (in *InstanceConfigurationObservation) DeepCopy() *InstanceConfigurationObservation { + if in == nil { + return nil + } + out := new(InstanceConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceConfigurationParameters) DeepCopyInto(out *InstanceConfigurationParameters) { + *out = *in + if in.BlockDeviceMapping != nil { + in, out := &in.BlockDeviceMapping, &out.BlockDeviceMapping + *out = make([]BlockDeviceMappingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceConfigurationParameters. +func (in *InstanceConfigurationParameters) DeepCopy() *InstanceConfigurationParameters { + if in == nil { + return nil + } + out := new(InstanceConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMetadataOptionsInitParameters) DeepCopyInto(out *InstanceMetadataOptionsInitParameters) { + *out = *in + if in.HTTPPutResponseHopLimit != nil { + in, out := &in.HTTPPutResponseHopLimit, &out.HTTPPutResponseHopLimit + *out = new(float64) + **out = **in + } + if in.HTTPTokens != nil { + in, out := &in.HTTPTokens, &out.HTTPTokens + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMetadataOptionsInitParameters. +func (in *InstanceMetadataOptionsInitParameters) DeepCopy() *InstanceMetadataOptionsInitParameters { + if in == nil { + return nil + } + out := new(InstanceMetadataOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMetadataOptionsObservation) DeepCopyInto(out *InstanceMetadataOptionsObservation) { + *out = *in + if in.HTTPPutResponseHopLimit != nil { + in, out := &in.HTTPPutResponseHopLimit, &out.HTTPPutResponseHopLimit + *out = new(float64) + **out = **in + } + if in.HTTPTokens != nil { + in, out := &in.HTTPTokens, &out.HTTPTokens + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMetadataOptionsObservation. +func (in *InstanceMetadataOptionsObservation) DeepCopy() *InstanceMetadataOptionsObservation { + if in == nil { + return nil + } + out := new(InstanceMetadataOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMetadataOptionsParameters) DeepCopyInto(out *InstanceMetadataOptionsParameters) { + *out = *in + if in.HTTPPutResponseHopLimit != nil { + in, out := &in.HTTPPutResponseHopLimit, &out.HTTPPutResponseHopLimit + *out = new(float64) + **out = **in + } + if in.HTTPTokens != nil { + in, out := &in.HTTPTokens, &out.HTTPTokens + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMetadataOptionsParameters. +func (in *InstanceMetadataOptionsParameters) DeepCopy() *InstanceMetadataOptionsParameters { + if in == nil { + return nil + } + out := new(InstanceMetadataOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchPermissionInitParameters) DeepCopyInto(out *LaunchPermissionInitParameters) { + *out = *in + if in.OrganizationArns != nil { + in, out := &in.OrganizationArns, &out.OrganizationArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OrganizationalUnitArns != nil { + in, out := &in.OrganizationalUnitArns, &out.OrganizationalUnitArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UserGroups != nil { + in, out := &in.UserGroups, &out.UserGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UserIds != nil { + in, out := &in.UserIds, &out.UserIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchPermissionInitParameters. +func (in *LaunchPermissionInitParameters) DeepCopy() *LaunchPermissionInitParameters { + if in == nil { + return nil + } + out := new(LaunchPermissionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchPermissionObservation) DeepCopyInto(out *LaunchPermissionObservation) { + *out = *in + if in.OrganizationArns != nil { + in, out := &in.OrganizationArns, &out.OrganizationArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OrganizationalUnitArns != nil { + in, out := &in.OrganizationalUnitArns, &out.OrganizationalUnitArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UserGroups != nil { + in, out := &in.UserGroups, &out.UserGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UserIds != nil { + in, out := &in.UserIds, &out.UserIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchPermissionObservation. +func (in *LaunchPermissionObservation) DeepCopy() *LaunchPermissionObservation { + if in == nil { + return nil + } + out := new(LaunchPermissionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchPermissionParameters) DeepCopyInto(out *LaunchPermissionParameters) { + *out = *in + if in.OrganizationArns != nil { + in, out := &in.OrganizationArns, &out.OrganizationArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OrganizationalUnitArns != nil { + in, out := &in.OrganizationalUnitArns, &out.OrganizationalUnitArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UserGroups != nil { + in, out := &in.UserGroups, &out.UserGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UserIds != nil { + in, out := &in.UserIds, &out.UserIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchPermissionParameters. +func (in *LaunchPermissionParameters) DeepCopy() *LaunchPermissionParameters { + if in == nil { + return nil + } + out := new(LaunchPermissionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateConfigurationInitParameters) DeepCopyInto(out *LaunchTemplateConfigurationInitParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(bool) + **out = **in + } + if in.LaunchTemplateID != nil { + in, out := &in.LaunchTemplateID, &out.LaunchTemplateID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateConfigurationInitParameters. +func (in *LaunchTemplateConfigurationInitParameters) DeepCopy() *LaunchTemplateConfigurationInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateConfigurationObservation) DeepCopyInto(out *LaunchTemplateConfigurationObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(bool) + **out = **in + } + if in.LaunchTemplateID != nil { + in, out := &in.LaunchTemplateID, &out.LaunchTemplateID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateConfigurationObservation. +func (in *LaunchTemplateConfigurationObservation) DeepCopy() *LaunchTemplateConfigurationObservation { + if in == nil { + return nil + } + out := new(LaunchTemplateConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateConfigurationParameters) DeepCopyInto(out *LaunchTemplateConfigurationParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(bool) + **out = **in + } + if in.LaunchTemplateID != nil { + in, out := &in.LaunchTemplateID, &out.LaunchTemplateID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateConfigurationParameters. +func (in *LaunchTemplateConfigurationParameters) DeepCopy() *LaunchTemplateConfigurationParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateInitParameters) DeepCopyInto(out *LaunchTemplateInitParameters) { + *out = *in + if in.LaunchTemplateID != nil { + in, out := &in.LaunchTemplateID, &out.LaunchTemplateID + *out = new(string) + **out = **in + } + if in.LaunchTemplateName != nil { + in, out := &in.LaunchTemplateName, &out.LaunchTemplateName + *out = new(string) + **out = **in + } + if in.LaunchTemplateVersion != nil { + in, out := &in.LaunchTemplateVersion, &out.LaunchTemplateVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateInitParameters. +func (in *LaunchTemplateInitParameters) DeepCopy() *LaunchTemplateInitParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateObservation) DeepCopyInto(out *LaunchTemplateObservation) { + *out = *in + if in.LaunchTemplateID != nil { + in, out := &in.LaunchTemplateID, &out.LaunchTemplateID + *out = new(string) + **out = **in + } + if in.LaunchTemplateName != nil { + in, out := &in.LaunchTemplateName, &out.LaunchTemplateName + *out = new(string) + **out = **in + } + if in.LaunchTemplateVersion != nil { + in, out := &in.LaunchTemplateVersion, &out.LaunchTemplateVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateObservation. +func (in *LaunchTemplateObservation) DeepCopy() *LaunchTemplateObservation { + if in == nil { + return nil + } + out := new(LaunchTemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchTemplateParameters) DeepCopyInto(out *LaunchTemplateParameters) { + *out = *in + if in.LaunchTemplateID != nil { + in, out := &in.LaunchTemplateID, &out.LaunchTemplateID + *out = new(string) + **out = **in + } + if in.LaunchTemplateName != nil { + in, out := &in.LaunchTemplateName, &out.LaunchTemplateName + *out = new(string) + **out = **in + } + if in.LaunchTemplateVersion != nil { + in, out := &in.LaunchTemplateVersion, &out.LaunchTemplateVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchTemplateParameters. +func (in *LaunchTemplateParameters) DeepCopy() *LaunchTemplateParameters { + if in == nil { + return nil + } + out := new(LaunchTemplateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingInitParameters) DeepCopyInto(out *LoggingInitParameters) { + *out = *in + if in.S3Logs != nil { + in, out := &in.S3Logs, &out.S3Logs + *out = new(S3LogsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingInitParameters. +func (in *LoggingInitParameters) DeepCopy() *LoggingInitParameters { + if in == nil { + return nil + } + out := new(LoggingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingObservation) DeepCopyInto(out *LoggingObservation) { + *out = *in + if in.S3Logs != nil { + in, out := &in.S3Logs, &out.S3Logs + *out = new(S3LogsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingObservation. +func (in *LoggingObservation) DeepCopy() *LoggingObservation { + if in == nil { + return nil + } + out := new(LoggingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingParameters) DeepCopyInto(out *LoggingParameters) { + *out = *in + if in.S3Logs != nil { + in, out := &in.S3Logs, &out.S3Logs + *out = new(S3LogsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingParameters. +func (in *LoggingParameters) DeepCopy() *LoggingParameters { + if in == nil { + return nil + } + out := new(LoggingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputResourcesInitParameters) DeepCopyInto(out *OutputResourcesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputResourcesInitParameters. +func (in *OutputResourcesInitParameters) DeepCopy() *OutputResourcesInitParameters { + if in == nil { + return nil + } + out := new(OutputResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputResourcesObservation) DeepCopyInto(out *OutputResourcesObservation) { + *out = *in + if in.Amis != nil { + in, out := &in.Amis, &out.Amis + *out = make([]AmisObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]ContainersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputResourcesObservation. +func (in *OutputResourcesObservation) DeepCopy() *OutputResourcesObservation { + if in == nil { + return nil + } + out := new(OutputResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputResourcesParameters) DeepCopyInto(out *OutputResourcesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputResourcesParameters. +func (in *OutputResourcesParameters) DeepCopy() *OutputResourcesParameters { + if in == nil { + return nil + } + out := new(OutputResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterInitParameters) DeepCopyInto(out *ParameterInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterInitParameters. +func (in *ParameterInitParameters) DeepCopy() *ParameterInitParameters { + if in == nil { + return nil + } + out := new(ParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterObservation) DeepCopyInto(out *ParameterObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterObservation. +func (in *ParameterObservation) DeepCopy() *ParameterObservation { + if in == nil { + return nil + } + out := new(ParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterParameters) DeepCopyInto(out *ParameterParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterParameters. +func (in *ParameterParameters) DeepCopy() *ParameterParameters { + if in == nil { + return nil + } + out := new(ParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3LogsInitParameters) DeepCopyInto(out *S3LogsInitParameters) { + *out = *in + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3BucketNameRef != nil { + in, out := &in.S3BucketNameRef, &out.S3BucketNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.S3BucketNameSelector != nil { + in, out := &in.S3BucketNameSelector, &out.S3BucketNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3LogsInitParameters. +func (in *S3LogsInitParameters) DeepCopy() *S3LogsInitParameters { + if in == nil { + return nil + } + out := new(S3LogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3LogsObservation) DeepCopyInto(out *S3LogsObservation) { + *out = *in + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3LogsObservation. +func (in *S3LogsObservation) DeepCopy() *S3LogsObservation { + if in == nil { + return nil + } + out := new(S3LogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3LogsParameters) DeepCopyInto(out *S3LogsParameters) { + *out = *in + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3BucketNameRef != nil { + in, out := &in.S3BucketNameRef, &out.S3BucketNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.S3BucketNameSelector != nil { + in, out := &in.S3BucketNameSelector, &out.S3BucketNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3LogsParameters. +func (in *S3LogsParameters) DeepCopy() *S3LogsParameters { + if in == nil { + return nil + } + out := new(S3LogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleInitParameters) DeepCopyInto(out *ScheduleInitParameters) { + *out = *in + if in.PipelineExecutionStartCondition != nil { + in, out := &in.PipelineExecutionStartCondition, &out.PipelineExecutionStartCondition + *out = new(string) + **out = **in + } + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleInitParameters. +func (in *ScheduleInitParameters) DeepCopy() *ScheduleInitParameters { + if in == nil { + return nil + } + out := new(ScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleObservation) DeepCopyInto(out *ScheduleObservation) { + *out = *in + if in.PipelineExecutionStartCondition != nil { + in, out := &in.PipelineExecutionStartCondition, &out.PipelineExecutionStartCondition + *out = new(string) + **out = **in + } + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleObservation. +func (in *ScheduleObservation) DeepCopy() *ScheduleObservation { + if in == nil { + return nil + } + out := new(ScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleParameters) DeepCopyInto(out *ScheduleParameters) { + *out = *in + if in.PipelineExecutionStartCondition != nil { + in, out := &in.PipelineExecutionStartCondition, &out.PipelineExecutionStartCondition + *out = new(string) + **out = **in + } + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleParameters. +func (in *ScheduleParameters) DeepCopy() *ScheduleParameters { + if in == nil { + return nil + } + out := new(ScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotConfigurationInitParameters) DeepCopyInto(out *SnapshotConfigurationInitParameters) { + *out = *in + if in.TargetResourceCount != nil { + in, out := &in.TargetResourceCount, &out.TargetResourceCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotConfigurationInitParameters. +func (in *SnapshotConfigurationInitParameters) DeepCopy() *SnapshotConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SnapshotConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotConfigurationObservation) DeepCopyInto(out *SnapshotConfigurationObservation) { + *out = *in + if in.TargetResourceCount != nil { + in, out := &in.TargetResourceCount, &out.TargetResourceCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotConfigurationObservation. +func (in *SnapshotConfigurationObservation) DeepCopy() *SnapshotConfigurationObservation { + if in == nil { + return nil + } + out := new(SnapshotConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotConfigurationParameters) DeepCopyInto(out *SnapshotConfigurationParameters) { + *out = *in + if in.TargetResourceCount != nil { + in, out := &in.TargetResourceCount, &out.TargetResourceCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotConfigurationParameters. +func (in *SnapshotConfigurationParameters) DeepCopy() *SnapshotConfigurationParameters { + if in == nil { + return nil + } + out := new(SnapshotConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemsManagerAgentInitParameters) DeepCopyInto(out *SystemsManagerAgentInitParameters) { + *out = *in + if in.UninstallAfterBuild != nil { + in, out := &in.UninstallAfterBuild, &out.UninstallAfterBuild + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemsManagerAgentInitParameters. +func (in *SystemsManagerAgentInitParameters) DeepCopy() *SystemsManagerAgentInitParameters { + if in == nil { + return nil + } + out := new(SystemsManagerAgentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemsManagerAgentObservation) DeepCopyInto(out *SystemsManagerAgentObservation) { + *out = *in + if in.UninstallAfterBuild != nil { + in, out := &in.UninstallAfterBuild, &out.UninstallAfterBuild + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemsManagerAgentObservation. +func (in *SystemsManagerAgentObservation) DeepCopy() *SystemsManagerAgentObservation { + if in == nil { + return nil + } + out := new(SystemsManagerAgentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemsManagerAgentParameters) DeepCopyInto(out *SystemsManagerAgentParameters) { + *out = *in + if in.UninstallAfterBuild != nil { + in, out := &in.UninstallAfterBuild, &out.UninstallAfterBuild + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemsManagerAgentParameters. +func (in *SystemsManagerAgentParameters) DeepCopy() *SystemsManagerAgentParameters { + if in == nil { + return nil + } + out := new(SystemsManagerAgentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetRepositoryInitParameters) DeepCopyInto(out *TargetRepositoryInitParameters) { + *out = *in + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.RepositoryNameRef != nil { + in, out := &in.RepositoryNameRef, &out.RepositoryNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RepositoryNameSelector != nil { + in, out := &in.RepositoryNameSelector, &out.RepositoryNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetRepositoryInitParameters. +func (in *TargetRepositoryInitParameters) DeepCopy() *TargetRepositoryInitParameters { + if in == nil { + return nil + } + out := new(TargetRepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetRepositoryObservation) DeepCopyInto(out *TargetRepositoryObservation) { + *out = *in + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetRepositoryObservation. +func (in *TargetRepositoryObservation) DeepCopy() *TargetRepositoryObservation { + if in == nil { + return nil + } + out := new(TargetRepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetRepositoryParameters) DeepCopyInto(out *TargetRepositoryParameters) { + *out = *in + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.RepositoryNameRef != nil { + in, out := &in.RepositoryNameRef, &out.RepositoryNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RepositoryNameSelector != nil { + in, out := &in.RepositoryNameSelector, &out.RepositoryNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetRepositoryParameters. +func (in *TargetRepositoryParameters) DeepCopy() *TargetRepositoryParameters { + if in == nil { + return nil + } + out := new(TargetRepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowInitParameters) DeepCopyInto(out *WorkflowInitParameters) { + *out = *in + if in.OnFailure != nil { + in, out := &in.OnFailure, &out.OnFailure + *out = new(string) + **out = **in + } + if in.ParallelGroup != nil { + in, out := &in.ParallelGroup, &out.ParallelGroup + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]WorkflowParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WorkflowArn != nil { + in, out := &in.WorkflowArn, &out.WorkflowArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowInitParameters. +func (in *WorkflowInitParameters) DeepCopy() *WorkflowInitParameters { + if in == nil { + return nil + } + out := new(WorkflowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowObservation) DeepCopyInto(out *WorkflowObservation) { + *out = *in + if in.OnFailure != nil { + in, out := &in.OnFailure, &out.OnFailure + *out = new(string) + **out = **in + } + if in.ParallelGroup != nil { + in, out := &in.ParallelGroup, &out.ParallelGroup + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]WorkflowParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WorkflowArn != nil { + in, out := &in.WorkflowArn, &out.WorkflowArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowObservation. +func (in *WorkflowObservation) DeepCopy() *WorkflowObservation { + if in == nil { + return nil + } + out := new(WorkflowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowParameterInitParameters) DeepCopyInto(out *WorkflowParameterInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowParameterInitParameters. +func (in *WorkflowParameterInitParameters) DeepCopy() *WorkflowParameterInitParameters { + if in == nil { + return nil + } + out := new(WorkflowParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowParameterObservation) DeepCopyInto(out *WorkflowParameterObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowParameterObservation. +func (in *WorkflowParameterObservation) DeepCopy() *WorkflowParameterObservation { + if in == nil { + return nil + } + out := new(WorkflowParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowParameterParameters) DeepCopyInto(out *WorkflowParameterParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowParameterParameters. +func (in *WorkflowParameterParameters) DeepCopy() *WorkflowParameterParameters { + if in == nil { + return nil + } + out := new(WorkflowParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowParameters) DeepCopyInto(out *WorkflowParameters) { + *out = *in + if in.OnFailure != nil { + in, out := &in.OnFailure, &out.OnFailure + *out = new(string) + **out = **in + } + if in.ParallelGroup != nil { + in, out := &in.ParallelGroup, &out.ParallelGroup + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]WorkflowParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WorkflowArn != nil { + in, out := &in.WorkflowArn, &out.WorkflowArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowParameters. +func (in *WorkflowParameters) DeepCopy() *WorkflowParameters { + if in == nil { + return nil + } + out := new(WorkflowParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/imagebuilder/v1beta2/zz_generated.managed.go b/apis/imagebuilder/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..0309ee2ea3 --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_generated.managed.go @@ -0,0 +1,368 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ContainerRecipe. +func (mg *ContainerRecipe) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ContainerRecipe. +func (mg *ContainerRecipe) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ContainerRecipe. +func (mg *ContainerRecipe) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ContainerRecipe. +func (mg *ContainerRecipe) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ContainerRecipe. +func (mg *ContainerRecipe) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ContainerRecipe. +func (mg *ContainerRecipe) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ContainerRecipe. +func (mg *ContainerRecipe) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ContainerRecipe. +func (mg *ContainerRecipe) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ContainerRecipe. +func (mg *ContainerRecipe) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ContainerRecipe. +func (mg *ContainerRecipe) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ContainerRecipe. +func (mg *ContainerRecipe) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ContainerRecipe. +func (mg *ContainerRecipe) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DistributionConfiguration. +func (mg *DistributionConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DistributionConfiguration. +func (mg *DistributionConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DistributionConfiguration. +func (mg *DistributionConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DistributionConfiguration. +func (mg *DistributionConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DistributionConfiguration. +func (mg *DistributionConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DistributionConfiguration. +func (mg *DistributionConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DistributionConfiguration. +func (mg *DistributionConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DistributionConfiguration. +func (mg *DistributionConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DistributionConfiguration. +func (mg *DistributionConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DistributionConfiguration. +func (mg *DistributionConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DistributionConfiguration. +func (mg *DistributionConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DistributionConfiguration. +func (mg *DistributionConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Image. +func (mg *Image) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Image. +func (mg *Image) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Image. +func (mg *Image) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Image. +func (mg *Image) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Image. +func (mg *Image) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Image. +func (mg *Image) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Image. +func (mg *Image) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Image. +func (mg *Image) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Image. +func (mg *Image) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Image. +func (mg *Image) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Image. +func (mg *Image) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Image. +func (mg *Image) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ImagePipeline. +func (mg *ImagePipeline) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ImagePipeline. +func (mg *ImagePipeline) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ImagePipeline. +func (mg *ImagePipeline) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ImagePipeline. +func (mg *ImagePipeline) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ImagePipeline. +func (mg *ImagePipeline) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ImagePipeline. +func (mg *ImagePipeline) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ImagePipeline. +func (mg *ImagePipeline) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ImagePipeline. +func (mg *ImagePipeline) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ImagePipeline. +func (mg *ImagePipeline) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ImagePipeline. +func (mg *ImagePipeline) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ImagePipeline. +func (mg *ImagePipeline) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ImagePipeline. +func (mg *ImagePipeline) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ImageRecipe. +func (mg *ImageRecipe) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ImageRecipe. +func (mg *ImageRecipe) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ImageRecipe. +func (mg *ImageRecipe) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ImageRecipe. +func (mg *ImageRecipe) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ImageRecipe. +func (mg *ImageRecipe) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ImageRecipe. +func (mg *ImageRecipe) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ImageRecipe. +func (mg *ImageRecipe) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ImageRecipe. +func (mg *ImageRecipe) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ImageRecipe. +func (mg *ImageRecipe) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ImageRecipe. +func (mg *ImageRecipe) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ImageRecipe. +func (mg *ImageRecipe) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ImageRecipe. +func (mg *ImageRecipe) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this InfrastructureConfiguration. +func (mg *InfrastructureConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this InfrastructureConfiguration. +func (mg *InfrastructureConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this InfrastructureConfiguration. +func (mg *InfrastructureConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this InfrastructureConfiguration. +func (mg *InfrastructureConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this InfrastructureConfiguration. +func (mg *InfrastructureConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this InfrastructureConfiguration. +func (mg *InfrastructureConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this InfrastructureConfiguration. +func (mg *InfrastructureConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this InfrastructureConfiguration. +func (mg *InfrastructureConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this InfrastructureConfiguration. +func (mg *InfrastructureConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this InfrastructureConfiguration. +func (mg *InfrastructureConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this InfrastructureConfiguration. +func (mg *InfrastructureConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this InfrastructureConfiguration. +func (mg *InfrastructureConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/imagebuilder/v1beta2/zz_generated.managedlist.go b/apis/imagebuilder/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..f957633548 --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,62 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ContainerRecipeList. +func (l *ContainerRecipeList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DistributionConfigurationList. +func (l *DistributionConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ImageList. +func (l *ImageList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ImagePipelineList. +func (l *ImagePipelineList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ImageRecipeList. +func (l *ImageRecipeList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this InfrastructureConfigurationList. +func (l *InfrastructureConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/imagebuilder/v1beta2/zz_generated.resolvers.go b/apis/imagebuilder/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..e1c1f8bd2e --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,671 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *ContainerRecipe) ResolveReferences( // ResolveReferences of this ContainerRecipe. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Component); i3++ { + { + m, l, err = apisresolver.GetManagedResource("imagebuilder.aws.upbound.io", "v1beta1", "Component", "ComponentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Component[i3].ComponentArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Component[i3].ComponentArnRef, + Selector: mg.Spec.ForProvider.Component[i3].ComponentArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Component[i3].ComponentArn") + } + mg.Spec.ForProvider.Component[i3].ComponentArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Component[i3].ComponentArnRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyID") + } + mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.TargetRepository != nil { + { + m, l, err = apisresolver.GetManagedResource("ecr.aws.upbound.io", "v1beta2", "Repository", "RepositoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TargetRepository.RepositoryName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.TargetRepository.RepositoryNameRef, + Selector: mg.Spec.ForProvider.TargetRepository.RepositoryNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TargetRepository.RepositoryName") + } + mg.Spec.ForProvider.TargetRepository.RepositoryName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TargetRepository.RepositoryNameRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Component); i3++ { + { + m, l, err = apisresolver.GetManagedResource("imagebuilder.aws.upbound.io", "v1beta1", "Component", "ComponentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Component[i3].ComponentArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Component[i3].ComponentArnRef, + Selector: mg.Spec.InitProvider.Component[i3].ComponentArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Component[i3].ComponentArn") + } + mg.Spec.InitProvider.Component[i3].ComponentArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Component[i3].ComponentArnRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyID") + } + mg.Spec.InitProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.TargetRepository != nil { + { + m, l, err = apisresolver.GetManagedResource("ecr.aws.upbound.io", "v1beta2", "Repository", "RepositoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TargetRepository.RepositoryName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.TargetRepository.RepositoryNameRef, + Selector: mg.Spec.InitProvider.TargetRepository.RepositoryNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TargetRepository.RepositoryName") + } + mg.Spec.InitProvider.TargetRepository.RepositoryName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TargetRepository.RepositoryNameRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this Image. +func (mg *Image) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("imagebuilder.aws.upbound.io", "v1beta2", "DistributionConfiguration", "DistributionConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DistributionConfigurationArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.DistributionConfigurationArnRef, + Selector: mg.Spec.ForProvider.DistributionConfigurationArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DistributionConfigurationArn") + } + mg.Spec.ForProvider.DistributionConfigurationArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DistributionConfigurationArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("imagebuilder.aws.upbound.io", "v1beta2", "ImageRecipe", "ImageRecipeList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ImageRecipeArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.ImageRecipeArnRef, + Selector: mg.Spec.ForProvider.ImageRecipeArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ImageRecipeArn") + } + mg.Spec.ForProvider.ImageRecipeArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ImageRecipeArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("imagebuilder.aws.upbound.io", "v1beta2", "InfrastructureConfiguration", "InfrastructureConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InfrastructureConfigurationArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.InfrastructureConfigurationArnRef, + Selector: mg.Spec.ForProvider.InfrastructureConfigurationArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InfrastructureConfigurationArn") + } + mg.Spec.ForProvider.InfrastructureConfigurationArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InfrastructureConfigurationArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("imagebuilder.aws.upbound.io", "v1beta2", "DistributionConfiguration", "DistributionConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DistributionConfigurationArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.DistributionConfigurationArnRef, + Selector: mg.Spec.InitProvider.DistributionConfigurationArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DistributionConfigurationArn") + } + mg.Spec.InitProvider.DistributionConfigurationArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DistributionConfigurationArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("imagebuilder.aws.upbound.io", "v1beta2", "ImageRecipe", "ImageRecipeList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ImageRecipeArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.ImageRecipeArnRef, + Selector: mg.Spec.InitProvider.ImageRecipeArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ImageRecipeArn") + } + mg.Spec.InitProvider.ImageRecipeArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ImageRecipeArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("imagebuilder.aws.upbound.io", "v1beta2", "InfrastructureConfiguration", "InfrastructureConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.InfrastructureConfigurationArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.InfrastructureConfigurationArnRef, + Selector: mg.Spec.InitProvider.InfrastructureConfigurationArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InfrastructureConfigurationArn") + } + mg.Spec.InitProvider.InfrastructureConfigurationArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.InfrastructureConfigurationArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ImagePipeline. +func (mg *ImagePipeline) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("imagebuilder.aws.upbound.io", "v1beta2", "ImageRecipe", "ImageRecipeList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ImageRecipeArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.ImageRecipeArnRef, + Selector: mg.Spec.ForProvider.ImageRecipeArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ImageRecipeArn") + } + mg.Spec.ForProvider.ImageRecipeArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ImageRecipeArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("imagebuilder.aws.upbound.io", "v1beta2", "InfrastructureConfiguration", "InfrastructureConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InfrastructureConfigurationArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.InfrastructureConfigurationArnRef, + Selector: mg.Spec.ForProvider.InfrastructureConfigurationArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InfrastructureConfigurationArn") + } + mg.Spec.ForProvider.InfrastructureConfigurationArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InfrastructureConfigurationArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("imagebuilder.aws.upbound.io", "v1beta2", "ImageRecipe", "ImageRecipeList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ImageRecipeArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.ImageRecipeArnRef, + Selector: mg.Spec.InitProvider.ImageRecipeArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ImageRecipeArn") + } + mg.Spec.InitProvider.ImageRecipeArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ImageRecipeArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("imagebuilder.aws.upbound.io", "v1beta2", "InfrastructureConfiguration", "InfrastructureConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.InfrastructureConfigurationArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.InfrastructureConfigurationArnRef, + Selector: mg.Spec.InitProvider.InfrastructureConfigurationArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InfrastructureConfigurationArn") + } + mg.Spec.InitProvider.InfrastructureConfigurationArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.InfrastructureConfigurationArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ImageRecipe. +func (mg *ImageRecipe) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Component); i3++ { + { + m, l, err = apisresolver.GetManagedResource("imagebuilder.aws.upbound.io", "v1beta1", "Component", "ComponentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Component[i3].ComponentArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Component[i3].ComponentArnRef, + Selector: mg.Spec.ForProvider.Component[i3].ComponentArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Component[i3].ComponentArn") + } + mg.Spec.ForProvider.Component[i3].ComponentArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Component[i3].ComponentArnRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Component); i3++ { + { + m, l, err = apisresolver.GetManagedResource("imagebuilder.aws.upbound.io", "v1beta1", "Component", "ComponentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Component[i3].ComponentArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Component[i3].ComponentArnRef, + Selector: mg.Spec.InitProvider.Component[i3].ComponentArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Component[i3].ComponentArn") + } + mg.Spec.InitProvider.Component[i3].ComponentArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Component[i3].ComponentArnRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this InfrastructureConfiguration. +func (mg *InfrastructureConfiguration) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "InstanceProfile", "InstanceProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InstanceProfileName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.InstanceProfileNameRef, + Selector: mg.Spec.ForProvider.InstanceProfileNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InstanceProfileName") + } + mg.Spec.ForProvider.InstanceProfileName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InstanceProfileNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "KeyPair", "KeyPairList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KeyPair), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KeyPairRef, + Selector: mg.Spec.ForProvider.KeyPairSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KeyPair") + } + mg.Spec.ForProvider.KeyPair = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KeyPairRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Logging != nil { + if mg.Spec.ForProvider.Logging.S3Logs != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Logging.S3Logs.S3BucketName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Logging.S3Logs.S3BucketNameRef, + Selector: mg.Spec.ForProvider.Logging.S3Logs.S3BucketNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Logging.S3Logs.S3BucketName") + } + mg.Spec.ForProvider.Logging.S3Logs.S3BucketName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Logging.S3Logs.S3BucketNameRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroupIds") + } + mg.Spec.ForProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SnsTopicArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.SnsTopicArnRef, + Selector: mg.Spec.ForProvider.SnsTopicArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SnsTopicArn") + } + mg.Spec.ForProvider.SnsTopicArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SnsTopicArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.SubnetIDRef, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetID") + } + mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "InstanceProfile", "InstanceProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.InstanceProfileName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.InstanceProfileNameRef, + Selector: mg.Spec.InitProvider.InstanceProfileNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InstanceProfileName") + } + mg.Spec.InitProvider.InstanceProfileName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.InstanceProfileNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "KeyPair", "KeyPairList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KeyPair), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KeyPairRef, + Selector: mg.Spec.InitProvider.KeyPairSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KeyPair") + } + mg.Spec.InitProvider.KeyPair = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KeyPairRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Logging != nil { + if mg.Spec.InitProvider.Logging.S3Logs != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Logging.S3Logs.S3BucketName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Logging.S3Logs.S3BucketNameRef, + Selector: mg.Spec.InitProvider.Logging.S3Logs.S3BucketNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Logging.S3Logs.S3BucketName") + } + mg.Spec.InitProvider.Logging.S3Logs.S3BucketName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Logging.S3Logs.S3BucketNameRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroupIds") + } + mg.Spec.InitProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SnsTopicArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.SnsTopicArnRef, + Selector: mg.Spec.InitProvider.SnsTopicArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SnsTopicArn") + } + mg.Spec.InitProvider.SnsTopicArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SnsTopicArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.SubnetIDRef, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetID") + } + mg.Spec.InitProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SubnetIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/imagebuilder/v1beta2/zz_groupversion_info.go b/apis/imagebuilder/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..c7805d0f6b --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=imagebuilder.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "imagebuilder.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/imagebuilder/v1beta2/zz_image_terraformed.go b/apis/imagebuilder/v1beta2/zz_image_terraformed.go new file mode 100755 index 0000000000..62323f9510 --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_image_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Image +func (mg *Image) GetTerraformResourceType() string { + return "aws_imagebuilder_image" +} + +// GetConnectionDetailsMapping for this Image +func (tr *Image) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Image +func (tr *Image) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Image +func (tr *Image) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Image +func (tr *Image) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Image +func (tr *Image) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Image +func (tr *Image) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Image +func (tr *Image) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Image +func (tr *Image) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Image using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Image) LateInitialize(attrs []byte) (bool, error) { + params := &ImageParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Image) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/imagebuilder/v1beta2/zz_image_types.go b/apis/imagebuilder/v1beta2/zz_image_types.go new file mode 100755 index 0000000000..639baf3a68 --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_image_types.go @@ -0,0 +1,502 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AmisInitParameters struct { +} + +type AmisObservation struct { + + // Account identifier of the AMI. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Description of the AMI. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Identifier of the AMI. + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // The name of the Workflow parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region of the AMI. + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +type AmisParameters struct { +} + +type ContainersInitParameters struct { +} + +type ContainersObservation struct { + + // Set of URIs for created containers. + // +listType=set + ImageUris []*string `json:"imageUris,omitempty" tf:"image_uris,omitempty"` + + // Region of the AMI. + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +type ContainersParameters struct { +} + +type EcrConfigurationInitParameters struct { + + // Set of tags for Image Builder to apply to the output container image that that Amazon Inspector scans. + // +listType=set + ContainerTags []*string `json:"containerTags,omitempty" tf:"container_tags,omitempty"` + + // The name of the container repository that Amazon Inspector scans to identify findings for your container images. + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` +} + +type EcrConfigurationObservation struct { + + // Set of tags for Image Builder to apply to the output container image that that Amazon Inspector scans. + // +listType=set + ContainerTags []*string `json:"containerTags,omitempty" tf:"container_tags,omitempty"` + + // The name of the container repository that Amazon Inspector scans to identify findings for your container images. + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` +} + +type EcrConfigurationParameters struct { + + // Set of tags for Image Builder to apply to the output container image that that Amazon Inspector scans. + // +kubebuilder:validation:Optional + // +listType=set + ContainerTags []*string `json:"containerTags,omitempty" tf:"container_tags,omitempty"` + + // The name of the container repository that Amazon Inspector scans to identify findings for your container images. + // +kubebuilder:validation:Optional + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` +} + +type ImageInitParameters struct { + + // - Amazon Resource Name (ARN) of the container recipe. + ContainerRecipeArn *string `json:"containerRecipeArn,omitempty" tf:"container_recipe_arn,omitempty"` + + // Amazon Resource Name (ARN) of the Image Builder Distribution Configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/imagebuilder/v1beta2.DistributionConfiguration + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + DistributionConfigurationArn *string `json:"distributionConfigurationArn,omitempty" tf:"distribution_configuration_arn,omitempty"` + + // Reference to a DistributionConfiguration in imagebuilder to populate distributionConfigurationArn. + // +kubebuilder:validation:Optional + DistributionConfigurationArnRef *v1.Reference `json:"distributionConfigurationArnRef,omitempty" tf:"-"` + + // Selector for a DistributionConfiguration in imagebuilder to populate distributionConfigurationArn. + // +kubebuilder:validation:Optional + DistributionConfigurationArnSelector *v1.Selector `json:"distributionConfigurationArnSelector,omitempty" tf:"-"` + + // Whether additional information about the image being created is collected. Defaults to true. + EnhancedImageMetadataEnabled *bool `json:"enhancedImageMetadataEnabled,omitempty" tf:"enhanced_image_metadata_enabled,omitempty"` + + // Amazon Resource Name (ARN) of the service-linked role to be used by Image Builder to execute workflows. + ExecutionRole *string `json:"executionRole,omitempty" tf:"execution_role,omitempty"` + + // Amazon Resource Name (ARN) of the image recipe. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/imagebuilder/v1beta2.ImageRecipe + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + ImageRecipeArn *string `json:"imageRecipeArn,omitempty" tf:"image_recipe_arn,omitempty"` + + // Reference to a ImageRecipe in imagebuilder to populate imageRecipeArn. + // +kubebuilder:validation:Optional + ImageRecipeArnRef *v1.Reference `json:"imageRecipeArnRef,omitempty" tf:"-"` + + // Selector for a ImageRecipe in imagebuilder to populate imageRecipeArn. + // +kubebuilder:validation:Optional + ImageRecipeArnSelector *v1.Selector `json:"imageRecipeArnSelector,omitempty" tf:"-"` + + // Configuration block with image scanning configuration. Detailed below. + ImageScanningConfiguration *ImageScanningConfigurationInitParameters `json:"imageScanningConfiguration,omitempty" tf:"image_scanning_configuration,omitempty"` + + // Configuration block with image tests configuration. Detailed below. + ImageTestsConfiguration *ImageTestsConfigurationInitParameters `json:"imageTestsConfiguration,omitempty" tf:"image_tests_configuration,omitempty"` + + // Amazon Resource Name (ARN) of the Image Builder Infrastructure Configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/imagebuilder/v1beta2.InfrastructureConfiguration + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + InfrastructureConfigurationArn *string `json:"infrastructureConfigurationArn,omitempty" tf:"infrastructure_configuration_arn,omitempty"` + + // Reference to a InfrastructureConfiguration in imagebuilder to populate infrastructureConfigurationArn. + // +kubebuilder:validation:Optional + InfrastructureConfigurationArnRef *v1.Reference `json:"infrastructureConfigurationArnRef,omitempty" tf:"-"` + + // Selector for a InfrastructureConfiguration in imagebuilder to populate infrastructureConfigurationArn. + // +kubebuilder:validation:Optional + InfrastructureConfigurationArnSelector *v1.Selector `json:"infrastructureConfigurationArnSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block with the workflow configuration. Detailed below. + Workflow []WorkflowInitParameters `json:"workflow,omitempty" tf:"workflow,omitempty"` +} + +type ImageObservation struct { + + // Amazon Resource Name (ARN) of the image. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // - Amazon Resource Name (ARN) of the container recipe. + ContainerRecipeArn *string `json:"containerRecipeArn,omitempty" tf:"container_recipe_arn,omitempty"` + + // Date the image was created. + DateCreated *string `json:"dateCreated,omitempty" tf:"date_created,omitempty"` + + // Amazon Resource Name (ARN) of the Image Builder Distribution Configuration. + DistributionConfigurationArn *string `json:"distributionConfigurationArn,omitempty" tf:"distribution_configuration_arn,omitempty"` + + // Whether additional information about the image being created is collected. Defaults to true. + EnhancedImageMetadataEnabled *bool `json:"enhancedImageMetadataEnabled,omitempty" tf:"enhanced_image_metadata_enabled,omitempty"` + + // Amazon Resource Name (ARN) of the service-linked role to be used by Image Builder to execute workflows. + ExecutionRole *string `json:"executionRole,omitempty" tf:"execution_role,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Amazon Resource Name (ARN) of the image recipe. + ImageRecipeArn *string `json:"imageRecipeArn,omitempty" tf:"image_recipe_arn,omitempty"` + + // Configuration block with image scanning configuration. Detailed below. + ImageScanningConfiguration *ImageScanningConfigurationObservation `json:"imageScanningConfiguration,omitempty" tf:"image_scanning_configuration,omitempty"` + + // Configuration block with image tests configuration. Detailed below. + ImageTestsConfiguration *ImageTestsConfigurationObservation `json:"imageTestsConfiguration,omitempty" tf:"image_tests_configuration,omitempty"` + + // Amazon Resource Name (ARN) of the Image Builder Infrastructure Configuration. + InfrastructureConfigurationArn *string `json:"infrastructureConfigurationArn,omitempty" tf:"infrastructure_configuration_arn,omitempty"` + + // Name of the AMI. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Operating System version of the image. + OsVersion *string `json:"osVersion,omitempty" tf:"os_version,omitempty"` + + // List of objects with resources created by the image. + OutputResources []OutputResourcesObservation `json:"outputResources,omitempty" tf:"output_resources,omitempty"` + + // Platform of the image. + Platform *string `json:"platform,omitempty" tf:"platform,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Version of the image. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // Configuration block with the workflow configuration. Detailed below. + Workflow []WorkflowObservation `json:"workflow,omitempty" tf:"workflow,omitempty"` +} + +type ImageParameters struct { + + // - Amazon Resource Name (ARN) of the container recipe. + // +kubebuilder:validation:Optional + ContainerRecipeArn *string `json:"containerRecipeArn,omitempty" tf:"container_recipe_arn,omitempty"` + + // Amazon Resource Name (ARN) of the Image Builder Distribution Configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/imagebuilder/v1beta2.DistributionConfiguration + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + DistributionConfigurationArn *string `json:"distributionConfigurationArn,omitempty" tf:"distribution_configuration_arn,omitempty"` + + // Reference to a DistributionConfiguration in imagebuilder to populate distributionConfigurationArn. + // +kubebuilder:validation:Optional + DistributionConfigurationArnRef *v1.Reference `json:"distributionConfigurationArnRef,omitempty" tf:"-"` + + // Selector for a DistributionConfiguration in imagebuilder to populate distributionConfigurationArn. + // +kubebuilder:validation:Optional + DistributionConfigurationArnSelector *v1.Selector `json:"distributionConfigurationArnSelector,omitempty" tf:"-"` + + // Whether additional information about the image being created is collected. Defaults to true. + // +kubebuilder:validation:Optional + EnhancedImageMetadataEnabled *bool `json:"enhancedImageMetadataEnabled,omitempty" tf:"enhanced_image_metadata_enabled,omitempty"` + + // Amazon Resource Name (ARN) of the service-linked role to be used by Image Builder to execute workflows. + // +kubebuilder:validation:Optional + ExecutionRole *string `json:"executionRole,omitempty" tf:"execution_role,omitempty"` + + // Amazon Resource Name (ARN) of the image recipe. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/imagebuilder/v1beta2.ImageRecipe + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + ImageRecipeArn *string `json:"imageRecipeArn,omitempty" tf:"image_recipe_arn,omitempty"` + + // Reference to a ImageRecipe in imagebuilder to populate imageRecipeArn. + // +kubebuilder:validation:Optional + ImageRecipeArnRef *v1.Reference `json:"imageRecipeArnRef,omitempty" tf:"-"` + + // Selector for a ImageRecipe in imagebuilder to populate imageRecipeArn. + // +kubebuilder:validation:Optional + ImageRecipeArnSelector *v1.Selector `json:"imageRecipeArnSelector,omitempty" tf:"-"` + + // Configuration block with image scanning configuration. Detailed below. + // +kubebuilder:validation:Optional + ImageScanningConfiguration *ImageScanningConfigurationParameters `json:"imageScanningConfiguration,omitempty" tf:"image_scanning_configuration,omitempty"` + + // Configuration block with image tests configuration. Detailed below. + // +kubebuilder:validation:Optional + ImageTestsConfiguration *ImageTestsConfigurationParameters `json:"imageTestsConfiguration,omitempty" tf:"image_tests_configuration,omitempty"` + + // Amazon Resource Name (ARN) of the Image Builder Infrastructure Configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/imagebuilder/v1beta2.InfrastructureConfiguration + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + InfrastructureConfigurationArn *string `json:"infrastructureConfigurationArn,omitempty" tf:"infrastructure_configuration_arn,omitempty"` + + // Reference to a InfrastructureConfiguration in imagebuilder to populate infrastructureConfigurationArn. + // +kubebuilder:validation:Optional + InfrastructureConfigurationArnRef *v1.Reference `json:"infrastructureConfigurationArnRef,omitempty" tf:"-"` + + // Selector for a InfrastructureConfiguration in imagebuilder to populate infrastructureConfigurationArn. + // +kubebuilder:validation:Optional + InfrastructureConfigurationArnSelector *v1.Selector `json:"infrastructureConfigurationArnSelector,omitempty" tf:"-"` + + // Region of the AMI. + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block with the workflow configuration. Detailed below. + // +kubebuilder:validation:Optional + Workflow []WorkflowParameters `json:"workflow,omitempty" tf:"workflow,omitempty"` +} + +type ImageScanningConfigurationInitParameters struct { + + // Configuration block with ECR configuration. Detailed below. + EcrConfiguration *EcrConfigurationInitParameters `json:"ecrConfiguration,omitempty" tf:"ecr_configuration,omitempty"` + + // Indicates whether Image Builder keeps a snapshot of the vulnerability scans that Amazon Inspector runs against the build instance when you create a new image. Defaults to false. + ImageScanningEnabled *bool `json:"imageScanningEnabled,omitempty" tf:"image_scanning_enabled,omitempty"` +} + +type ImageScanningConfigurationObservation struct { + + // Configuration block with ECR configuration. Detailed below. + EcrConfiguration *EcrConfigurationObservation `json:"ecrConfiguration,omitempty" tf:"ecr_configuration,omitempty"` + + // Indicates whether Image Builder keeps a snapshot of the vulnerability scans that Amazon Inspector runs against the build instance when you create a new image. Defaults to false. + ImageScanningEnabled *bool `json:"imageScanningEnabled,omitempty" tf:"image_scanning_enabled,omitempty"` +} + +type ImageScanningConfigurationParameters struct { + + // Configuration block with ECR configuration. Detailed below. + // +kubebuilder:validation:Optional + EcrConfiguration *EcrConfigurationParameters `json:"ecrConfiguration,omitempty" tf:"ecr_configuration,omitempty"` + + // Indicates whether Image Builder keeps a snapshot of the vulnerability scans that Amazon Inspector runs against the build instance when you create a new image. Defaults to false. + // +kubebuilder:validation:Optional + ImageScanningEnabled *bool `json:"imageScanningEnabled,omitempty" tf:"image_scanning_enabled,omitempty"` +} + +type ImageTestsConfigurationInitParameters struct { + + // Whether image tests are enabled. Defaults to true. + ImageTestsEnabled *bool `json:"imageTestsEnabled,omitempty" tf:"image_tests_enabled,omitempty"` + + // Number of minutes before image tests time out. Valid values are between 60 and 1440. Defaults to 720. + TimeoutMinutes *float64 `json:"timeoutMinutes,omitempty" tf:"timeout_minutes,omitempty"` +} + +type ImageTestsConfigurationObservation struct { + + // Whether image tests are enabled. Defaults to true. + ImageTestsEnabled *bool `json:"imageTestsEnabled,omitempty" tf:"image_tests_enabled,omitempty"` + + // Number of minutes before image tests time out. Valid values are between 60 and 1440. Defaults to 720. + TimeoutMinutes *float64 `json:"timeoutMinutes,omitempty" tf:"timeout_minutes,omitempty"` +} + +type ImageTestsConfigurationParameters struct { + + // Whether image tests are enabled. Defaults to true. + // +kubebuilder:validation:Optional + ImageTestsEnabled *bool `json:"imageTestsEnabled,omitempty" tf:"image_tests_enabled,omitempty"` + + // Number of minutes before image tests time out. Valid values are between 60 and 1440. Defaults to 720. + // +kubebuilder:validation:Optional + TimeoutMinutes *float64 `json:"timeoutMinutes,omitempty" tf:"timeout_minutes,omitempty"` +} + +type OutputResourcesInitParameters struct { +} + +type OutputResourcesObservation struct { + + // Set of objects with each Amazon Machine Image (AMI) created. + Amis []AmisObservation `json:"amis,omitempty" tf:"amis,omitempty"` + + // Set of objects with each container image created and stored in the output repository. + Containers []ContainersObservation `json:"containers,omitempty" tf:"containers,omitempty"` +} + +type OutputResourcesParameters struct { +} + +type WorkflowInitParameters struct { + + // The action to take if the workflow fails. Must be one of CONTINUE or ABORT. + OnFailure *string `json:"onFailure,omitempty" tf:"on_failure,omitempty"` + + // The parallel group in which to run a test Workflow. + ParallelGroup *string `json:"parallelGroup,omitempty" tf:"parallel_group,omitempty"` + + // Configuration block for the workflow parameters. Detailed below. + Parameter []WorkflowParameterInitParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // Amazon Resource Name (ARN) of the Image Builder Workflow. + WorkflowArn *string `json:"workflowArn,omitempty" tf:"workflow_arn,omitempty"` +} + +type WorkflowObservation struct { + + // The action to take if the workflow fails. Must be one of CONTINUE or ABORT. + OnFailure *string `json:"onFailure,omitempty" tf:"on_failure,omitempty"` + + // The parallel group in which to run a test Workflow. + ParallelGroup *string `json:"parallelGroup,omitempty" tf:"parallel_group,omitempty"` + + // Configuration block for the workflow parameters. Detailed below. + Parameter []WorkflowParameterObservation `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // Amazon Resource Name (ARN) of the Image Builder Workflow. + WorkflowArn *string `json:"workflowArn,omitempty" tf:"workflow_arn,omitempty"` +} + +type WorkflowParameterInitParameters struct { + + // The name of the Workflow parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the Workflow parameter. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type WorkflowParameterObservation struct { + + // The name of the Workflow parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the Workflow parameter. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type WorkflowParameterParameters struct { + + // The name of the Workflow parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value of the Workflow parameter. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type WorkflowParameters struct { + + // The action to take if the workflow fails. Must be one of CONTINUE or ABORT. + // +kubebuilder:validation:Optional + OnFailure *string `json:"onFailure,omitempty" tf:"on_failure,omitempty"` + + // The parallel group in which to run a test Workflow. + // +kubebuilder:validation:Optional + ParallelGroup *string `json:"parallelGroup,omitempty" tf:"parallel_group,omitempty"` + + // Configuration block for the workflow parameters. Detailed below. + // +kubebuilder:validation:Optional + Parameter []WorkflowParameterParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // Amazon Resource Name (ARN) of the Image Builder Workflow. + // +kubebuilder:validation:Optional + WorkflowArn *string `json:"workflowArn" tf:"workflow_arn,omitempty"` +} + +// ImageSpec defines the desired state of Image +type ImageSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ImageParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ImageInitParameters `json:"initProvider,omitempty"` +} + +// ImageStatus defines the observed state of Image. +type ImageStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ImageObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Image is the Schema for the Images API. Manages an Image Builder Image +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Image struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ImageSpec `json:"spec"` + Status ImageStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ImageList contains a list of Images +type ImageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Image `json:"items"` +} + +// Repository type metadata. +var ( + Image_Kind = "Image" + Image_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Image_Kind}.String() + Image_KindAPIVersion = Image_Kind + "." + CRDGroupVersion.String() + Image_GroupVersionKind = CRDGroupVersion.WithKind(Image_Kind) +) + +func init() { + SchemeBuilder.Register(&Image{}, &ImageList{}) +} diff --git a/apis/imagebuilder/v1beta2/zz_imagepipeline_terraformed.go b/apis/imagebuilder/v1beta2/zz_imagepipeline_terraformed.go new file mode 100755 index 0000000000..e0e743c084 --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_imagepipeline_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ImagePipeline +func (mg *ImagePipeline) GetTerraformResourceType() string { + return "aws_imagebuilder_image_pipeline" +} + +// GetConnectionDetailsMapping for this ImagePipeline +func (tr *ImagePipeline) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ImagePipeline +func (tr *ImagePipeline) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ImagePipeline +func (tr *ImagePipeline) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ImagePipeline +func (tr *ImagePipeline) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ImagePipeline +func (tr *ImagePipeline) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ImagePipeline +func (tr *ImagePipeline) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ImagePipeline +func (tr *ImagePipeline) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ImagePipeline +func (tr *ImagePipeline) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ImagePipeline using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ImagePipeline) LateInitialize(attrs []byte) (bool, error) { + params := &ImagePipelineParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ImagePipeline) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/imagebuilder/v1beta2/zz_imagepipeline_types.go b/apis/imagebuilder/v1beta2/zz_imagepipeline_types.go new file mode 100755 index 0000000000..92734eab01 --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_imagepipeline_types.go @@ -0,0 +1,405 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ImagePipelineImageScanningConfigurationInitParameters struct { + + // Configuration block with ECR configuration for image scanning. Detailed below. + EcrConfiguration *ImageScanningConfigurationEcrConfigurationInitParameters `json:"ecrConfiguration,omitempty" tf:"ecr_configuration,omitempty"` + + // Whether image scans are enabled. Defaults to false. + ImageScanningEnabled *bool `json:"imageScanningEnabled,omitempty" tf:"image_scanning_enabled,omitempty"` +} + +type ImagePipelineImageScanningConfigurationObservation struct { + + // Configuration block with ECR configuration for image scanning. Detailed below. + EcrConfiguration *ImageScanningConfigurationEcrConfigurationObservation `json:"ecrConfiguration,omitempty" tf:"ecr_configuration,omitempty"` + + // Whether image scans are enabled. Defaults to false. + ImageScanningEnabled *bool `json:"imageScanningEnabled,omitempty" tf:"image_scanning_enabled,omitempty"` +} + +type ImagePipelineImageScanningConfigurationParameters struct { + + // Configuration block with ECR configuration for image scanning. Detailed below. + // +kubebuilder:validation:Optional + EcrConfiguration *ImageScanningConfigurationEcrConfigurationParameters `json:"ecrConfiguration,omitempty" tf:"ecr_configuration,omitempty"` + + // Whether image scans are enabled. Defaults to false. + // +kubebuilder:validation:Optional + ImageScanningEnabled *bool `json:"imageScanningEnabled,omitempty" tf:"image_scanning_enabled,omitempty"` +} + +type ImagePipelineImageTestsConfigurationInitParameters struct { + + // Whether image tests are enabled. Defaults to true. + ImageTestsEnabled *bool `json:"imageTestsEnabled,omitempty" tf:"image_tests_enabled,omitempty"` + + // Number of minutes before image tests time out. Valid values are between 60 and 1440. Defaults to 720. + TimeoutMinutes *float64 `json:"timeoutMinutes,omitempty" tf:"timeout_minutes,omitempty"` +} + +type ImagePipelineImageTestsConfigurationObservation struct { + + // Whether image tests are enabled. Defaults to true. + ImageTestsEnabled *bool `json:"imageTestsEnabled,omitempty" tf:"image_tests_enabled,omitempty"` + + // Number of minutes before image tests time out. Valid values are between 60 and 1440. Defaults to 720. + TimeoutMinutes *float64 `json:"timeoutMinutes,omitempty" tf:"timeout_minutes,omitempty"` +} + +type ImagePipelineImageTestsConfigurationParameters struct { + + // Whether image tests are enabled. Defaults to true. + // +kubebuilder:validation:Optional + ImageTestsEnabled *bool `json:"imageTestsEnabled,omitempty" tf:"image_tests_enabled,omitempty"` + + // Number of minutes before image tests time out. Valid values are between 60 and 1440. Defaults to 720. + // +kubebuilder:validation:Optional + TimeoutMinutes *float64 `json:"timeoutMinutes,omitempty" tf:"timeout_minutes,omitempty"` +} + +type ImagePipelineInitParameters struct { + + // Amazon Resource Name (ARN) of the container recipe. + ContainerRecipeArn *string `json:"containerRecipeArn,omitempty" tf:"container_recipe_arn,omitempty"` + + // Description of the image pipeline. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Amazon Resource Name (ARN) of the Image Builder Distribution Configuration. + DistributionConfigurationArn *string `json:"distributionConfigurationArn,omitempty" tf:"distribution_configuration_arn,omitempty"` + + // Whether additional information about the image being created is collected. Defaults to true. + EnhancedImageMetadataEnabled *bool `json:"enhancedImageMetadataEnabled,omitempty" tf:"enhanced_image_metadata_enabled,omitempty"` + + // Amazon Resource Name (ARN) of the image recipe. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/imagebuilder/v1beta2.ImageRecipe + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + ImageRecipeArn *string `json:"imageRecipeArn,omitempty" tf:"image_recipe_arn,omitempty"` + + // Reference to a ImageRecipe in imagebuilder to populate imageRecipeArn. + // +kubebuilder:validation:Optional + ImageRecipeArnRef *v1.Reference `json:"imageRecipeArnRef,omitempty" tf:"-"` + + // Selector for a ImageRecipe in imagebuilder to populate imageRecipeArn. + // +kubebuilder:validation:Optional + ImageRecipeArnSelector *v1.Selector `json:"imageRecipeArnSelector,omitempty" tf:"-"` + + // Configuration block with image scanning configuration. Detailed below. + ImageScanningConfiguration *ImagePipelineImageScanningConfigurationInitParameters `json:"imageScanningConfiguration,omitempty" tf:"image_scanning_configuration,omitempty"` + + // Configuration block with image tests configuration. Detailed below. + ImageTestsConfiguration *ImagePipelineImageTestsConfigurationInitParameters `json:"imageTestsConfiguration,omitempty" tf:"image_tests_configuration,omitempty"` + + // Amazon Resource Name (ARN) of the Image Builder Infrastructure Configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/imagebuilder/v1beta2.InfrastructureConfiguration + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + InfrastructureConfigurationArn *string `json:"infrastructureConfigurationArn,omitempty" tf:"infrastructure_configuration_arn,omitempty"` + + // Reference to a InfrastructureConfiguration in imagebuilder to populate infrastructureConfigurationArn. + // +kubebuilder:validation:Optional + InfrastructureConfigurationArnRef *v1.Reference `json:"infrastructureConfigurationArnRef,omitempty" tf:"-"` + + // Selector for a InfrastructureConfiguration in imagebuilder to populate infrastructureConfigurationArn. + // +kubebuilder:validation:Optional + InfrastructureConfigurationArnSelector *v1.Selector `json:"infrastructureConfigurationArnSelector,omitempty" tf:"-"` + + // Name of the image pipeline. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration block with schedule settings. Detailed below. + Schedule *ScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // Status of the image pipeline. Valid values are DISABLED and ENABLED. Defaults to ENABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ImagePipelineObservation struct { + + // Amazon Resource Name (ARN) of the image pipeline. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Amazon Resource Name (ARN) of the container recipe. + ContainerRecipeArn *string `json:"containerRecipeArn,omitempty" tf:"container_recipe_arn,omitempty"` + + // Date the image pipeline was created. + DateCreated *string `json:"dateCreated,omitempty" tf:"date_created,omitempty"` + + // Date the image pipeline was last run. + DateLastRun *string `json:"dateLastRun,omitempty" tf:"date_last_run,omitempty"` + + // Date the image pipeline will run next. + DateNextRun *string `json:"dateNextRun,omitempty" tf:"date_next_run,omitempty"` + + // Date the image pipeline was updated. + DateUpdated *string `json:"dateUpdated,omitempty" tf:"date_updated,omitempty"` + + // Description of the image pipeline. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Amazon Resource Name (ARN) of the Image Builder Distribution Configuration. + DistributionConfigurationArn *string `json:"distributionConfigurationArn,omitempty" tf:"distribution_configuration_arn,omitempty"` + + // Whether additional information about the image being created is collected. Defaults to true. + EnhancedImageMetadataEnabled *bool `json:"enhancedImageMetadataEnabled,omitempty" tf:"enhanced_image_metadata_enabled,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Amazon Resource Name (ARN) of the image recipe. + ImageRecipeArn *string `json:"imageRecipeArn,omitempty" tf:"image_recipe_arn,omitempty"` + + // Configuration block with image scanning configuration. Detailed below. + ImageScanningConfiguration *ImagePipelineImageScanningConfigurationObservation `json:"imageScanningConfiguration,omitempty" tf:"image_scanning_configuration,omitempty"` + + // Configuration block with image tests configuration. Detailed below. + ImageTestsConfiguration *ImagePipelineImageTestsConfigurationObservation `json:"imageTestsConfiguration,omitempty" tf:"image_tests_configuration,omitempty"` + + // Amazon Resource Name (ARN) of the Image Builder Infrastructure Configuration. + InfrastructureConfigurationArn *string `json:"infrastructureConfigurationArn,omitempty" tf:"infrastructure_configuration_arn,omitempty"` + + // Name of the image pipeline. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Platform of the image pipeline. + Platform *string `json:"platform,omitempty" tf:"platform,omitempty"` + + // Configuration block with schedule settings. Detailed below. + Schedule *ScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // Status of the image pipeline. Valid values are DISABLED and ENABLED. Defaults to ENABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type ImagePipelineParameters struct { + + // Amazon Resource Name (ARN) of the container recipe. + // +kubebuilder:validation:Optional + ContainerRecipeArn *string `json:"containerRecipeArn,omitempty" tf:"container_recipe_arn,omitempty"` + + // Description of the image pipeline. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Amazon Resource Name (ARN) of the Image Builder Distribution Configuration. + // +kubebuilder:validation:Optional + DistributionConfigurationArn *string `json:"distributionConfigurationArn,omitempty" tf:"distribution_configuration_arn,omitempty"` + + // Whether additional information about the image being created is collected. Defaults to true. + // +kubebuilder:validation:Optional + EnhancedImageMetadataEnabled *bool `json:"enhancedImageMetadataEnabled,omitempty" tf:"enhanced_image_metadata_enabled,omitempty"` + + // Amazon Resource Name (ARN) of the image recipe. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/imagebuilder/v1beta2.ImageRecipe + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + ImageRecipeArn *string `json:"imageRecipeArn,omitempty" tf:"image_recipe_arn,omitempty"` + + // Reference to a ImageRecipe in imagebuilder to populate imageRecipeArn. + // +kubebuilder:validation:Optional + ImageRecipeArnRef *v1.Reference `json:"imageRecipeArnRef,omitempty" tf:"-"` + + // Selector for a ImageRecipe in imagebuilder to populate imageRecipeArn. + // +kubebuilder:validation:Optional + ImageRecipeArnSelector *v1.Selector `json:"imageRecipeArnSelector,omitempty" tf:"-"` + + // Configuration block with image scanning configuration. Detailed below. + // +kubebuilder:validation:Optional + ImageScanningConfiguration *ImagePipelineImageScanningConfigurationParameters `json:"imageScanningConfiguration,omitempty" tf:"image_scanning_configuration,omitempty"` + + // Configuration block with image tests configuration. Detailed below. + // +kubebuilder:validation:Optional + ImageTestsConfiguration *ImagePipelineImageTestsConfigurationParameters `json:"imageTestsConfiguration,omitempty" tf:"image_tests_configuration,omitempty"` + + // Amazon Resource Name (ARN) of the Image Builder Infrastructure Configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/imagebuilder/v1beta2.InfrastructureConfiguration + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + InfrastructureConfigurationArn *string `json:"infrastructureConfigurationArn,omitempty" tf:"infrastructure_configuration_arn,omitempty"` + + // Reference to a InfrastructureConfiguration in imagebuilder to populate infrastructureConfigurationArn. + // +kubebuilder:validation:Optional + InfrastructureConfigurationArnRef *v1.Reference `json:"infrastructureConfigurationArnRef,omitempty" tf:"-"` + + // Selector for a InfrastructureConfiguration in imagebuilder to populate infrastructureConfigurationArn. + // +kubebuilder:validation:Optional + InfrastructureConfigurationArnSelector *v1.Selector `json:"infrastructureConfigurationArnSelector,omitempty" tf:"-"` + + // Name of the image pipeline. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration block with schedule settings. Detailed below. + // +kubebuilder:validation:Optional + Schedule *ScheduleParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // Status of the image pipeline. Valid values are DISABLED and ENABLED. Defaults to ENABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ImageScanningConfigurationEcrConfigurationInitParameters struct { + + // Key-value map of resource tags. + // +listType=set + ContainerTags []*string `json:"containerTags,omitempty" tf:"container_tags,omitempty"` + + // The name of the repository to scan + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` +} + +type ImageScanningConfigurationEcrConfigurationObservation struct { + + // Key-value map of resource tags. + // +listType=set + ContainerTags []*string `json:"containerTags,omitempty" tf:"container_tags,omitempty"` + + // The name of the repository to scan + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` +} + +type ImageScanningConfigurationEcrConfigurationParameters struct { + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +listType=set + ContainerTags []*string `json:"containerTags,omitempty" tf:"container_tags,omitempty"` + + // The name of the repository to scan + // +kubebuilder:validation:Optional + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` +} + +type ScheduleInitParameters struct { + + // Condition when the pipeline should trigger a new image build. Valid values are EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE and EXPRESSION_MATCH_ONLY. Defaults to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE. + PipelineExecutionStartCondition *string `json:"pipelineExecutionStartCondition,omitempty" tf:"pipeline_execution_start_condition,omitempty"` + + // Cron expression of how often the pipeline start condition is evaluated. For example, cron(0 0 * * ? *) is evaluated every day at midnight UTC. Configurations using the five field syntax that was previously accepted by the API, such as cron(0 0 * * *), must be updated to the six field syntax. For more information, see the Image Builder User Guide. + ScheduleExpression *string `json:"scheduleExpression,omitempty" tf:"schedule_expression,omitempty"` + + // The timezone that applies to the scheduling expression. For example, "Etc/UTC", "America/Los_Angeles" in the IANA timezone format. If not specified this defaults to UTC. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type ScheduleObservation struct { + + // Condition when the pipeline should trigger a new image build. Valid values are EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE and EXPRESSION_MATCH_ONLY. Defaults to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE. + PipelineExecutionStartCondition *string `json:"pipelineExecutionStartCondition,omitempty" tf:"pipeline_execution_start_condition,omitempty"` + + // Cron expression of how often the pipeline start condition is evaluated. For example, cron(0 0 * * ? *) is evaluated every day at midnight UTC. Configurations using the five field syntax that was previously accepted by the API, such as cron(0 0 * * *), must be updated to the six field syntax. For more information, see the Image Builder User Guide. + ScheduleExpression *string `json:"scheduleExpression,omitempty" tf:"schedule_expression,omitempty"` + + // The timezone that applies to the scheduling expression. For example, "Etc/UTC", "America/Los_Angeles" in the IANA timezone format. If not specified this defaults to UTC. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type ScheduleParameters struct { + + // Condition when the pipeline should trigger a new image build. Valid values are EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE and EXPRESSION_MATCH_ONLY. Defaults to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE. + // +kubebuilder:validation:Optional + PipelineExecutionStartCondition *string `json:"pipelineExecutionStartCondition,omitempty" tf:"pipeline_execution_start_condition,omitempty"` + + // Cron expression of how often the pipeline start condition is evaluated. For example, cron(0 0 * * ? *) is evaluated every day at midnight UTC. Configurations using the five field syntax that was previously accepted by the API, such as cron(0 0 * * *), must be updated to the six field syntax. For more information, see the Image Builder User Guide. + // +kubebuilder:validation:Optional + ScheduleExpression *string `json:"scheduleExpression" tf:"schedule_expression,omitempty"` + + // The timezone that applies to the scheduling expression. For example, "Etc/UTC", "America/Los_Angeles" in the IANA timezone format. If not specified this defaults to UTC. + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +// ImagePipelineSpec defines the desired state of ImagePipeline +type ImagePipelineSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ImagePipelineParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ImagePipelineInitParameters `json:"initProvider,omitempty"` +} + +// ImagePipelineStatus defines the observed state of ImagePipeline. +type ImagePipelineStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ImagePipelineObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ImagePipeline is the Schema for the ImagePipelines API. Manages an Image Builder Image Pipeline +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ImagePipeline struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ImagePipelineSpec `json:"spec"` + Status ImagePipelineStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ImagePipelineList contains a list of ImagePipelines +type ImagePipelineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ImagePipeline `json:"items"` +} + +// Repository type metadata. +var ( + ImagePipeline_Kind = "ImagePipeline" + ImagePipeline_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ImagePipeline_Kind}.String() + ImagePipeline_KindAPIVersion = ImagePipeline_Kind + "." + CRDGroupVersion.String() + ImagePipeline_GroupVersionKind = CRDGroupVersion.WithKind(ImagePipeline_Kind) +) + +func init() { + SchemeBuilder.Register(&ImagePipeline{}, &ImagePipelineList{}) +} diff --git a/apis/imagebuilder/v1beta2/zz_imagerecipe_terraformed.go b/apis/imagebuilder/v1beta2/zz_imagerecipe_terraformed.go new file mode 100755 index 0000000000..c0145fa851 --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_imagerecipe_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ImageRecipe +func (mg *ImageRecipe) GetTerraformResourceType() string { + return "aws_imagebuilder_image_recipe" +} + +// GetConnectionDetailsMapping for this ImageRecipe +func (tr *ImageRecipe) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ImageRecipe +func (tr *ImageRecipe) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ImageRecipe +func (tr *ImageRecipe) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ImageRecipe +func (tr *ImageRecipe) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ImageRecipe +func (tr *ImageRecipe) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ImageRecipe +func (tr *ImageRecipe) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ImageRecipe +func (tr *ImageRecipe) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ImageRecipe +func (tr *ImageRecipe) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ImageRecipe using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ImageRecipe) LateInitialize(attrs []byte) (bool, error) { + params := &ImageRecipeParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ImageRecipe) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/imagebuilder/v1beta2/zz_imagerecipe_types.go b/apis/imagebuilder/v1beta2/zz_imagerecipe_types.go new file mode 100755 index 0000000000..d96627f2c2 --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_imagerecipe_types.go @@ -0,0 +1,448 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BlockDeviceMappingEBSInitParameters struct { + + // Whether to delete the volume on termination. Defaults to unset, which is the value inherited from the parent image. + DeleteOnTermination *string `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Whether to encrypt the volume. Defaults to unset, which is the value inherited from the parent image. + Encrypted *string `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // Number of Input/Output (I/O) operations per second to provision for an io1 or io2 volume. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Amazon Resource Name (ARN) of the Key Management Service (KMS) Key for encryption. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Identifier of the EC2 Volume Snapshot. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // For GP3 volumes only. The throughput in MiB/s that the volume supports. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // Size of the volume, in GiB. + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of the volume. For example, gp2 or io2. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type BlockDeviceMappingEBSObservation struct { + + // Whether to delete the volume on termination. Defaults to unset, which is the value inherited from the parent image. + DeleteOnTermination *string `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Whether to encrypt the volume. Defaults to unset, which is the value inherited from the parent image. + Encrypted *string `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // Number of Input/Output (I/O) operations per second to provision for an io1 or io2 volume. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Amazon Resource Name (ARN) of the Key Management Service (KMS) Key for encryption. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Identifier of the EC2 Volume Snapshot. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // For GP3 volumes only. The throughput in MiB/s that the volume supports. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // Size of the volume, in GiB. + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of the volume. For example, gp2 or io2. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type BlockDeviceMappingEBSParameters struct { + + // Whether to delete the volume on termination. Defaults to unset, which is the value inherited from the parent image. + // +kubebuilder:validation:Optional + DeleteOnTermination *string `json:"deleteOnTermination,omitempty" tf:"delete_on_termination,omitempty"` + + // Whether to encrypt the volume. Defaults to unset, which is the value inherited from the parent image. + // +kubebuilder:validation:Optional + Encrypted *string `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // Number of Input/Output (I/O) operations per second to provision for an io1 or io2 volume. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Amazon Resource Name (ARN) of the Key Management Service (KMS) Key for encryption. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Identifier of the EC2 Volume Snapshot. + // +kubebuilder:validation:Optional + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // For GP3 volumes only. The throughput in MiB/s that the volume supports. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // Size of the volume, in GiB. + // +kubebuilder:validation:Optional + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of the volume. For example, gp2 or io2. + // +kubebuilder:validation:Optional + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type ComponentParameterInitParameters struct { + + // The name of the component parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value for the named component parameter. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ComponentParameterObservation struct { + + // The name of the component parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value for the named component parameter. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ComponentParameterParameters struct { + + // The name of the component parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value for the named component parameter. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ImageRecipeBlockDeviceMappingInitParameters struct { + + // Name of the device. For example, /dev/sda or /dev/xvdb. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Configuration block with Elastic Block Storage (EBS) block device mapping settings. Detailed below. + EBS *BlockDeviceMappingEBSInitParameters `json:"ebs,omitempty" tf:"ebs,omitempty"` + + // Set to true to remove a mapping from the parent image. + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // Virtual device name. For example, ephemeral0. Instance store volumes are numbered starting from 0. + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type ImageRecipeBlockDeviceMappingObservation struct { + + // Name of the device. For example, /dev/sda or /dev/xvdb. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Configuration block with Elastic Block Storage (EBS) block device mapping settings. Detailed below. + EBS *BlockDeviceMappingEBSObservation `json:"ebs,omitempty" tf:"ebs,omitempty"` + + // Set to true to remove a mapping from the parent image. + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // Virtual device name. For example, ephemeral0. Instance store volumes are numbered starting from 0. + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type ImageRecipeBlockDeviceMappingParameters struct { + + // Name of the device. For example, /dev/sda or /dev/xvdb. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Configuration block with Elastic Block Storage (EBS) block device mapping settings. Detailed below. + // +kubebuilder:validation:Optional + EBS *BlockDeviceMappingEBSParameters `json:"ebs,omitempty" tf:"ebs,omitempty"` + + // Set to true to remove a mapping from the parent image. + // +kubebuilder:validation:Optional + NoDevice *bool `json:"noDevice,omitempty" tf:"no_device,omitempty"` + + // Virtual device name. For example, ephemeral0. Instance store volumes are numbered starting from 0. + // +kubebuilder:validation:Optional + VirtualName *string `json:"virtualName,omitempty" tf:"virtual_name,omitempty"` +} + +type ImageRecipeComponentInitParameters struct { + + // Amazon Resource Name (ARN) of the Image Builder Component to associate. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/imagebuilder/v1beta1.Component + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + ComponentArn *string `json:"componentArn,omitempty" tf:"component_arn,omitempty"` + + // Reference to a Component in imagebuilder to populate componentArn. + // +kubebuilder:validation:Optional + ComponentArnRef *v1.Reference `json:"componentArnRef,omitempty" tf:"-"` + + // Selector for a Component in imagebuilder to populate componentArn. + // +kubebuilder:validation:Optional + ComponentArnSelector *v1.Selector `json:"componentArnSelector,omitempty" tf:"-"` + + // Configuration block(s) for parameters to configure the component. Detailed below. + Parameter []ComponentParameterInitParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` +} + +type ImageRecipeComponentObservation struct { + + // Amazon Resource Name (ARN) of the Image Builder Component to associate. + ComponentArn *string `json:"componentArn,omitempty" tf:"component_arn,omitempty"` + + // Configuration block(s) for parameters to configure the component. Detailed below. + Parameter []ComponentParameterObservation `json:"parameter,omitempty" tf:"parameter,omitempty"` +} + +type ImageRecipeComponentParameters struct { + + // Amazon Resource Name (ARN) of the Image Builder Component to associate. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/imagebuilder/v1beta1.Component + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + ComponentArn *string `json:"componentArn,omitempty" tf:"component_arn,omitempty"` + + // Reference to a Component in imagebuilder to populate componentArn. + // +kubebuilder:validation:Optional + ComponentArnRef *v1.Reference `json:"componentArnRef,omitempty" tf:"-"` + + // Selector for a Component in imagebuilder to populate componentArn. + // +kubebuilder:validation:Optional + ComponentArnSelector *v1.Selector `json:"componentArnSelector,omitempty" tf:"-"` + + // Configuration block(s) for parameters to configure the component. Detailed below. + // +kubebuilder:validation:Optional + Parameter []ComponentParameterParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` +} + +type ImageRecipeInitParameters struct { + + // Configuration block(s) with block device mappings for the image recipe. Detailed below. + BlockDeviceMapping []ImageRecipeBlockDeviceMappingInitParameters `json:"blockDeviceMapping,omitempty" tf:"block_device_mapping,omitempty"` + + // Ordered configuration block(s) with components for the image recipe. Detailed below. + Component []ImageRecipeComponentInitParameters `json:"component,omitempty" tf:"component,omitempty"` + + // Description of the image recipe. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Name of the image recipe. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The image recipe uses this image as a base from which to build your customized image. The value can be the base image ARN or an AMI ID. + ParentImage *string `json:"parentImage,omitempty" tf:"parent_image,omitempty"` + + // Configuration block for the Systems Manager Agent installed by default by Image Builder. Detailed below. + SystemsManagerAgent *SystemsManagerAgentInitParameters `json:"systemsManagerAgent,omitempty" tf:"systems_manager_agent,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Base64 encoded user data. Use this to provide commands or a command script to run when you launch your build instance. + UserDataBase64 *string `json:"userDataBase64,omitempty" tf:"user_data_base64,omitempty"` + + // The semantic version of the image recipe, which specifies the version in the following format, with numeric values in each position to indicate a specific version: major.minor.patch. For example: 1.0.0. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // The working directory to be used during build and test workflows. + WorkingDirectory *string `json:"workingDirectory,omitempty" tf:"working_directory,omitempty"` +} + +type ImageRecipeObservation struct { + + // Amazon Resource Name (ARN) of the image recipe. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Configuration block(s) with block device mappings for the image recipe. Detailed below. + BlockDeviceMapping []ImageRecipeBlockDeviceMappingObservation `json:"blockDeviceMapping,omitempty" tf:"block_device_mapping,omitempty"` + + // Ordered configuration block(s) with components for the image recipe. Detailed below. + Component []ImageRecipeComponentObservation `json:"component,omitempty" tf:"component,omitempty"` + + // Date the image recipe was created. + DateCreated *string `json:"dateCreated,omitempty" tf:"date_created,omitempty"` + + // Description of the image recipe. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the image recipe. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Owner of the image recipe. + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + // The image recipe uses this image as a base from which to build your customized image. The value can be the base image ARN or an AMI ID. + ParentImage *string `json:"parentImage,omitempty" tf:"parent_image,omitempty"` + + // Platform of the image recipe. + Platform *string `json:"platform,omitempty" tf:"platform,omitempty"` + + // Configuration block for the Systems Manager Agent installed by default by Image Builder. Detailed below. + SystemsManagerAgent *SystemsManagerAgentObservation `json:"systemsManagerAgent,omitempty" tf:"systems_manager_agent,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Base64 encoded user data. Use this to provide commands or a command script to run when you launch your build instance. + UserDataBase64 *string `json:"userDataBase64,omitempty" tf:"user_data_base64,omitempty"` + + // The semantic version of the image recipe, which specifies the version in the following format, with numeric values in each position to indicate a specific version: major.minor.patch. For example: 1.0.0. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // The working directory to be used during build and test workflows. + WorkingDirectory *string `json:"workingDirectory,omitempty" tf:"working_directory,omitempty"` +} + +type ImageRecipeParameters struct { + + // Configuration block(s) with block device mappings for the image recipe. Detailed below. + // +kubebuilder:validation:Optional + BlockDeviceMapping []ImageRecipeBlockDeviceMappingParameters `json:"blockDeviceMapping,omitempty" tf:"block_device_mapping,omitempty"` + + // Ordered configuration block(s) with components for the image recipe. Detailed below. + // +kubebuilder:validation:Optional + Component []ImageRecipeComponentParameters `json:"component,omitempty" tf:"component,omitempty"` + + // Description of the image recipe. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Name of the image recipe. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The image recipe uses this image as a base from which to build your customized image. The value can be the base image ARN or an AMI ID. + // +kubebuilder:validation:Optional + ParentImage *string `json:"parentImage,omitempty" tf:"parent_image,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration block for the Systems Manager Agent installed by default by Image Builder. Detailed below. + // +kubebuilder:validation:Optional + SystemsManagerAgent *SystemsManagerAgentParameters `json:"systemsManagerAgent,omitempty" tf:"systems_manager_agent,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Base64 encoded user data. Use this to provide commands or a command script to run when you launch your build instance. + // +kubebuilder:validation:Optional + UserDataBase64 *string `json:"userDataBase64,omitempty" tf:"user_data_base64,omitempty"` + + // The semantic version of the image recipe, which specifies the version in the following format, with numeric values in each position to indicate a specific version: major.minor.patch. For example: 1.0.0. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // The working directory to be used during build and test workflows. + // +kubebuilder:validation:Optional + WorkingDirectory *string `json:"workingDirectory,omitempty" tf:"working_directory,omitempty"` +} + +type SystemsManagerAgentInitParameters struct { + + // Whether to remove the Systems Manager Agent after the image has been built. Defaults to false. + UninstallAfterBuild *bool `json:"uninstallAfterBuild,omitempty" tf:"uninstall_after_build,omitempty"` +} + +type SystemsManagerAgentObservation struct { + + // Whether to remove the Systems Manager Agent after the image has been built. Defaults to false. + UninstallAfterBuild *bool `json:"uninstallAfterBuild,omitempty" tf:"uninstall_after_build,omitempty"` +} + +type SystemsManagerAgentParameters struct { + + // Whether to remove the Systems Manager Agent after the image has been built. Defaults to false. + // +kubebuilder:validation:Optional + UninstallAfterBuild *bool `json:"uninstallAfterBuild" tf:"uninstall_after_build,omitempty"` +} + +// ImageRecipeSpec defines the desired state of ImageRecipe +type ImageRecipeSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ImageRecipeParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ImageRecipeInitParameters `json:"initProvider,omitempty"` +} + +// ImageRecipeStatus defines the observed state of ImageRecipe. +type ImageRecipeStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ImageRecipeObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ImageRecipe is the Schema for the ImageRecipes API. Manage an Image Builder Image Recipe +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ImageRecipe struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.component) || (has(self.initProvider) && has(self.initProvider.component))",message="spec.forProvider.component is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.parentImage) || (has(self.initProvider) && has(self.initProvider.parentImage))",message="spec.forProvider.parentImage is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.version) || (has(self.initProvider) && has(self.initProvider.version))",message="spec.forProvider.version is a required parameter" + Spec ImageRecipeSpec `json:"spec"` + Status ImageRecipeStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ImageRecipeList contains a list of ImageRecipes +type ImageRecipeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ImageRecipe `json:"items"` +} + +// Repository type metadata. +var ( + ImageRecipe_Kind = "ImageRecipe" + ImageRecipe_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ImageRecipe_Kind}.String() + ImageRecipe_KindAPIVersion = ImageRecipe_Kind + "." + CRDGroupVersion.String() + ImageRecipe_GroupVersionKind = CRDGroupVersion.WithKind(ImageRecipe_Kind) +) + +func init() { + SchemeBuilder.Register(&ImageRecipe{}, &ImageRecipeList{}) +} diff --git a/apis/imagebuilder/v1beta2/zz_infrastructureconfiguration_terraformed.go b/apis/imagebuilder/v1beta2/zz_infrastructureconfiguration_terraformed.go new file mode 100755 index 0000000000..20816ebfbd --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_infrastructureconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this InfrastructureConfiguration +func (mg *InfrastructureConfiguration) GetTerraformResourceType() string { + return "aws_imagebuilder_infrastructure_configuration" +} + +// GetConnectionDetailsMapping for this InfrastructureConfiguration +func (tr *InfrastructureConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this InfrastructureConfiguration +func (tr *InfrastructureConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this InfrastructureConfiguration +func (tr *InfrastructureConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this InfrastructureConfiguration +func (tr *InfrastructureConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this InfrastructureConfiguration +func (tr *InfrastructureConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this InfrastructureConfiguration +func (tr *InfrastructureConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this InfrastructureConfiguration +func (tr *InfrastructureConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this InfrastructureConfiguration +func (tr *InfrastructureConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this InfrastructureConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *InfrastructureConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &InfrastructureConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *InfrastructureConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/imagebuilder/v1beta2/zz_infrastructureconfiguration_types.go b/apis/imagebuilder/v1beta2/zz_infrastructureconfiguration_types.go new file mode 100755 index 0000000000..bbc8a03fa2 --- /dev/null +++ b/apis/imagebuilder/v1beta2/zz_infrastructureconfiguration_types.go @@ -0,0 +1,438 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type InfrastructureConfigurationInitParameters struct { + + // Description for the configuration. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Configuration block with instance metadata options for the HTTP requests that pipeline builds use to launch EC2 build and test instances. Detailed below. + InstanceMetadataOptions *InstanceMetadataOptionsInitParameters `json:"instanceMetadataOptions,omitempty" tf:"instance_metadata_options,omitempty"` + + // Name of IAM Instance Profile. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.InstanceProfile + InstanceProfileName *string `json:"instanceProfileName,omitempty" tf:"instance_profile_name,omitempty"` + + // Reference to a InstanceProfile in iam to populate instanceProfileName. + // +kubebuilder:validation:Optional + InstanceProfileNameRef *v1.Reference `json:"instanceProfileNameRef,omitempty" tf:"-"` + + // Selector for a InstanceProfile in iam to populate instanceProfileName. + // +kubebuilder:validation:Optional + InstanceProfileNameSelector *v1.Selector `json:"instanceProfileNameSelector,omitempty" tf:"-"` + + // Set of EC2 Instance Types. + // +listType=set + InstanceTypes []*string `json:"instanceTypes,omitempty" tf:"instance_types,omitempty"` + + // Name of EC2 Key Pair. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.KeyPair + KeyPair *string `json:"keyPair,omitempty" tf:"key_pair,omitempty"` + + // Reference to a KeyPair in ec2 to populate keyPair. + // +kubebuilder:validation:Optional + KeyPairRef *v1.Reference `json:"keyPairRef,omitempty" tf:"-"` + + // Selector for a KeyPair in ec2 to populate keyPair. + // +kubebuilder:validation:Optional + KeyPairSelector *v1.Selector `json:"keyPairSelector,omitempty" tf:"-"` + + // Configuration block with logging settings. Detailed below. + Logging *LoggingInitParameters `json:"logging,omitempty" tf:"logging,omitempty"` + + // Name for the configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Key-value map of resource tags to assign to infrastructure created by the configuration. + // +mapType=granular + ResourceTags map[string]*string `json:"resourceTags,omitempty" tf:"resource_tags,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // Set of EC2 Security Group identifiers. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // Amazon Resource Name (ARN) of SNS Topic. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + SnsTopicArn *string `json:"snsTopicArn,omitempty" tf:"sns_topic_arn,omitempty"` + + // Reference to a Topic in sns to populate snsTopicArn. + // +kubebuilder:validation:Optional + SnsTopicArnRef *v1.Reference `json:"snsTopicArnRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate snsTopicArn. + // +kubebuilder:validation:Optional + SnsTopicArnSelector *v1.Selector `json:"snsTopicArnSelector,omitempty" tf:"-"` + + // EC2 Subnet identifier. Also requires security_group_ids argument. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Enable if the instance should be terminated when the pipeline fails. Defaults to false. + TerminateInstanceOnFailure *bool `json:"terminateInstanceOnFailure,omitempty" tf:"terminate_instance_on_failure,omitempty"` +} + +type InfrastructureConfigurationObservation struct { + + // Amazon Resource Name (ARN) of the configuration. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Date when the configuration was created. + DateCreated *string `json:"dateCreated,omitempty" tf:"date_created,omitempty"` + + // Date when the configuration was updated. + DateUpdated *string `json:"dateUpdated,omitempty" tf:"date_updated,omitempty"` + + // Description for the configuration. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Amazon Resource Name (ARN) of the configuration. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration block with instance metadata options for the HTTP requests that pipeline builds use to launch EC2 build and test instances. Detailed below. + InstanceMetadataOptions *InstanceMetadataOptionsObservation `json:"instanceMetadataOptions,omitempty" tf:"instance_metadata_options,omitempty"` + + // Name of IAM Instance Profile. + InstanceProfileName *string `json:"instanceProfileName,omitempty" tf:"instance_profile_name,omitempty"` + + // Set of EC2 Instance Types. + // +listType=set + InstanceTypes []*string `json:"instanceTypes,omitempty" tf:"instance_types,omitempty"` + + // Name of EC2 Key Pair. + KeyPair *string `json:"keyPair,omitempty" tf:"key_pair,omitempty"` + + // Configuration block with logging settings. Detailed below. + Logging *LoggingObservation `json:"logging,omitempty" tf:"logging,omitempty"` + + // Name for the configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Key-value map of resource tags to assign to infrastructure created by the configuration. + // +mapType=granular + ResourceTags map[string]*string `json:"resourceTags,omitempty" tf:"resource_tags,omitempty"` + + // Set of EC2 Security Group identifiers. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // Amazon Resource Name (ARN) of SNS Topic. + SnsTopicArn *string `json:"snsTopicArn,omitempty" tf:"sns_topic_arn,omitempty"` + + // EC2 Subnet identifier. Also requires security_group_ids argument. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Enable if the instance should be terminated when the pipeline fails. Defaults to false. + TerminateInstanceOnFailure *bool `json:"terminateInstanceOnFailure,omitempty" tf:"terminate_instance_on_failure,omitempty"` +} + +type InfrastructureConfigurationParameters struct { + + // Description for the configuration. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Configuration block with instance metadata options for the HTTP requests that pipeline builds use to launch EC2 build and test instances. Detailed below. + // +kubebuilder:validation:Optional + InstanceMetadataOptions *InstanceMetadataOptionsParameters `json:"instanceMetadataOptions,omitempty" tf:"instance_metadata_options,omitempty"` + + // Name of IAM Instance Profile. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.InstanceProfile + // +kubebuilder:validation:Optional + InstanceProfileName *string `json:"instanceProfileName,omitempty" tf:"instance_profile_name,omitempty"` + + // Reference to a InstanceProfile in iam to populate instanceProfileName. + // +kubebuilder:validation:Optional + InstanceProfileNameRef *v1.Reference `json:"instanceProfileNameRef,omitempty" tf:"-"` + + // Selector for a InstanceProfile in iam to populate instanceProfileName. + // +kubebuilder:validation:Optional + InstanceProfileNameSelector *v1.Selector `json:"instanceProfileNameSelector,omitempty" tf:"-"` + + // Set of EC2 Instance Types. + // +kubebuilder:validation:Optional + // +listType=set + InstanceTypes []*string `json:"instanceTypes,omitempty" tf:"instance_types,omitempty"` + + // Name of EC2 Key Pair. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.KeyPair + // +kubebuilder:validation:Optional + KeyPair *string `json:"keyPair,omitempty" tf:"key_pair,omitempty"` + + // Reference to a KeyPair in ec2 to populate keyPair. + // +kubebuilder:validation:Optional + KeyPairRef *v1.Reference `json:"keyPairRef,omitempty" tf:"-"` + + // Selector for a KeyPair in ec2 to populate keyPair. + // +kubebuilder:validation:Optional + KeyPairSelector *v1.Selector `json:"keyPairSelector,omitempty" tf:"-"` + + // Configuration block with logging settings. Detailed below. + // +kubebuilder:validation:Optional + Logging *LoggingParameters `json:"logging,omitempty" tf:"logging,omitempty"` + + // Name for the configuration. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags to assign to infrastructure created by the configuration. + // +kubebuilder:validation:Optional + // +mapType=granular + ResourceTags map[string]*string `json:"resourceTags,omitempty" tf:"resource_tags,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // Set of EC2 Security Group identifiers. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // Amazon Resource Name (ARN) of SNS Topic. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + SnsTopicArn *string `json:"snsTopicArn,omitempty" tf:"sns_topic_arn,omitempty"` + + // Reference to a Topic in sns to populate snsTopicArn. + // +kubebuilder:validation:Optional + SnsTopicArnRef *v1.Reference `json:"snsTopicArnRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate snsTopicArn. + // +kubebuilder:validation:Optional + SnsTopicArnSelector *v1.Selector `json:"snsTopicArnSelector,omitempty" tf:"-"` + + // EC2 Subnet identifier. Also requires security_group_ids argument. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Enable if the instance should be terminated when the pipeline fails. Defaults to false. + // +kubebuilder:validation:Optional + TerminateInstanceOnFailure *bool `json:"terminateInstanceOnFailure,omitempty" tf:"terminate_instance_on_failure,omitempty"` +} + +type InstanceMetadataOptionsInitParameters struct { + + // The number of hops that an instance can traverse to reach its destonation. + HTTPPutResponseHopLimit *float64 `json:"httpPutResponseHopLimit,omitempty" tf:"http_put_response_hop_limit,omitempty"` + + // Whether a signed token is required for instance metadata retrieval requests. Valid values: required, optional. + HTTPTokens *string `json:"httpTokens,omitempty" tf:"http_tokens,omitempty"` +} + +type InstanceMetadataOptionsObservation struct { + + // The number of hops that an instance can traverse to reach its destonation. + HTTPPutResponseHopLimit *float64 `json:"httpPutResponseHopLimit,omitempty" tf:"http_put_response_hop_limit,omitempty"` + + // Whether a signed token is required for instance metadata retrieval requests. Valid values: required, optional. + HTTPTokens *string `json:"httpTokens,omitempty" tf:"http_tokens,omitempty"` +} + +type InstanceMetadataOptionsParameters struct { + + // The number of hops that an instance can traverse to reach its destonation. + // +kubebuilder:validation:Optional + HTTPPutResponseHopLimit *float64 `json:"httpPutResponseHopLimit,omitempty" tf:"http_put_response_hop_limit,omitempty"` + + // Whether a signed token is required for instance metadata retrieval requests. Valid values: required, optional. + // +kubebuilder:validation:Optional + HTTPTokens *string `json:"httpTokens,omitempty" tf:"http_tokens,omitempty"` +} + +type LoggingInitParameters struct { + + // Configuration block with S3 logging settings. Detailed below. + S3Logs *S3LogsInitParameters `json:"s3Logs,omitempty" tf:"s3_logs,omitempty"` +} + +type LoggingObservation struct { + + // Configuration block with S3 logging settings. Detailed below. + S3Logs *S3LogsObservation `json:"s3Logs,omitempty" tf:"s3_logs,omitempty"` +} + +type LoggingParameters struct { + + // Configuration block with S3 logging settings. Detailed below. + // +kubebuilder:validation:Optional + S3Logs *S3LogsParameters `json:"s3Logs" tf:"s3_logs,omitempty"` +} + +type S3LogsInitParameters struct { + + // Name of the S3 Bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` + + // Reference to a Bucket in s3 to populate s3BucketName. + // +kubebuilder:validation:Optional + S3BucketNameRef *v1.Reference `json:"s3BucketNameRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate s3BucketName. + // +kubebuilder:validation:Optional + S3BucketNameSelector *v1.Selector `json:"s3BucketNameSelector,omitempty" tf:"-"` + + // Prefix to use for S3 logs. Defaults to /. + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` +} + +type S3LogsObservation struct { + + // Name of the S3 Bucket. + S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` + + // Prefix to use for S3 logs. Defaults to /. + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` +} + +type S3LogsParameters struct { + + // Name of the S3 Bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +kubebuilder:validation:Optional + S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` + + // Reference to a Bucket in s3 to populate s3BucketName. + // +kubebuilder:validation:Optional + S3BucketNameRef *v1.Reference `json:"s3BucketNameRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate s3BucketName. + // +kubebuilder:validation:Optional + S3BucketNameSelector *v1.Selector `json:"s3BucketNameSelector,omitempty" tf:"-"` + + // Prefix to use for S3 logs. Defaults to /. + // +kubebuilder:validation:Optional + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` +} + +// InfrastructureConfigurationSpec defines the desired state of InfrastructureConfiguration +type InfrastructureConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider InfrastructureConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider InfrastructureConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// InfrastructureConfigurationStatus defines the observed state of InfrastructureConfiguration. +type InfrastructureConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider InfrastructureConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// InfrastructureConfiguration is the Schema for the InfrastructureConfigurations API. Manages an Image Builder Infrastructure Configuration +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type InfrastructureConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec InfrastructureConfigurationSpec `json:"spec"` + Status InfrastructureConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// InfrastructureConfigurationList contains a list of InfrastructureConfigurations +type InfrastructureConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []InfrastructureConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + InfrastructureConfiguration_Kind = "InfrastructureConfiguration" + InfrastructureConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: InfrastructureConfiguration_Kind}.String() + InfrastructureConfiguration_KindAPIVersion = InfrastructureConfiguration_Kind + "." + CRDGroupVersion.String() + InfrastructureConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(InfrastructureConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&InfrastructureConfiguration{}, &InfrastructureConfigurationList{}) +} diff --git a/apis/iot/v1beta1/zz_generated.conversion_hubs.go b/apis/iot/v1beta1/zz_generated.conversion_hubs.go index fd63517aa6..f7e87b8c2e 100755 --- a/apis/iot/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/iot/v1beta1/zz_generated.conversion_hubs.go @@ -9,9 +9,6 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *Certificate) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *IndexingConfiguration) Hub() {} - // Hub marks this type as a conversion hub. func (tr *LoggingOptions) Hub() {} @@ -21,29 +18,14 @@ func (tr *Policy) Hub() {} // Hub marks this type as a conversion hub. func (tr *PolicyAttachment) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ProvisioningTemplate) Hub() {} - // Hub marks this type as a conversion hub. func (tr *RoleAlias) Hub() {} // Hub marks this type as a conversion hub. func (tr *Thing) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ThingGroup) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ThingGroupMembership) Hub() {} // Hub marks this type as a conversion hub. func (tr *ThingPrincipalAttachment) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ThingType) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *TopicRule) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *TopicRuleDestination) Hub() {} diff --git a/apis/iot/v1beta1/zz_generated.conversion_spokes.go b/apis/iot/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..41c70dbc8d --- /dev/null +++ b/apis/iot/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,134 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this IndexingConfiguration to the hub type. +func (tr *IndexingConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the IndexingConfiguration type. +func (tr *IndexingConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ProvisioningTemplate to the hub type. +func (tr *ProvisioningTemplate) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ProvisioningTemplate type. +func (tr *ProvisioningTemplate) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ThingGroup to the hub type. +func (tr *ThingGroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ThingGroup type. +func (tr *ThingGroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ThingType to the hub type. +func (tr *ThingType) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ThingType type. +func (tr *ThingType) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this TopicRule to the hub type. +func (tr *TopicRule) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the TopicRule type. +func (tr *TopicRule) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this TopicRuleDestination to the hub type. +func (tr *TopicRuleDestination) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the TopicRuleDestination type. +func (tr *TopicRuleDestination) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/iot/v1beta2/zz_generated.conversion_hubs.go b/apis/iot/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..293e12def7 --- /dev/null +++ b/apis/iot/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,25 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *IndexingConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ProvisioningTemplate) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ThingGroup) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ThingType) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *TopicRule) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *TopicRuleDestination) Hub() {} diff --git a/apis/iot/v1beta2/zz_generated.deepcopy.go b/apis/iot/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..6cd4a23009 --- /dev/null +++ b/apis/iot/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,8431 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttributePayloadInitParameters) DeepCopyInto(out *AttributePayloadInitParameters) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttributePayloadInitParameters. +func (in *AttributePayloadInitParameters) DeepCopy() *AttributePayloadInitParameters { + if in == nil { + return nil + } + out := new(AttributePayloadInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttributePayloadObservation) DeepCopyInto(out *AttributePayloadObservation) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttributePayloadObservation. +func (in *AttributePayloadObservation) DeepCopy() *AttributePayloadObservation { + if in == nil { + return nil + } + out := new(AttributePayloadObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttributePayloadParameters) DeepCopyInto(out *AttributePayloadParameters) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttributePayloadParameters. +func (in *AttributePayloadParameters) DeepCopy() *AttributePayloadParameters { + if in == nil { + return nil + } + out := new(AttributePayloadParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchAlarmInitParameters) DeepCopyInto(out *CloudwatchAlarmInitParameters) { + *out = *in + if in.AlarmName != nil { + in, out := &in.AlarmName, &out.AlarmName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StateReason != nil { + in, out := &in.StateReason, &out.StateReason + *out = new(string) + **out = **in + } + if in.StateValue != nil { + in, out := &in.StateValue, &out.StateValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchAlarmInitParameters. +func (in *CloudwatchAlarmInitParameters) DeepCopy() *CloudwatchAlarmInitParameters { + if in == nil { + return nil + } + out := new(CloudwatchAlarmInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchAlarmObservation) DeepCopyInto(out *CloudwatchAlarmObservation) { + *out = *in + if in.AlarmName != nil { + in, out := &in.AlarmName, &out.AlarmName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StateReason != nil { + in, out := &in.StateReason, &out.StateReason + *out = new(string) + **out = **in + } + if in.StateValue != nil { + in, out := &in.StateValue, &out.StateValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchAlarmObservation. +func (in *CloudwatchAlarmObservation) DeepCopy() *CloudwatchAlarmObservation { + if in == nil { + return nil + } + out := new(CloudwatchAlarmObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchAlarmParameters) DeepCopyInto(out *CloudwatchAlarmParameters) { + *out = *in + if in.AlarmName != nil { + in, out := &in.AlarmName, &out.AlarmName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StateReason != nil { + in, out := &in.StateReason, &out.StateReason + *out = new(string) + **out = **in + } + if in.StateValue != nil { + in, out := &in.StateValue, &out.StateValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchAlarmParameters. +func (in *CloudwatchAlarmParameters) DeepCopy() *CloudwatchAlarmParameters { + if in == nil { + return nil + } + out := new(CloudwatchAlarmParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogsInitParameters) DeepCopyInto(out *CloudwatchLogsInitParameters) { + *out = *in + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogsInitParameters. +func (in *CloudwatchLogsInitParameters) DeepCopy() *CloudwatchLogsInitParameters { + if in == nil { + return nil + } + out := new(CloudwatchLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogsObservation) DeepCopyInto(out *CloudwatchLogsObservation) { + *out = *in + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogsObservation. +func (in *CloudwatchLogsObservation) DeepCopy() *CloudwatchLogsObservation { + if in == nil { + return nil + } + out := new(CloudwatchLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogsParameters) DeepCopyInto(out *CloudwatchLogsParameters) { + *out = *in + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogsParameters. +func (in *CloudwatchLogsParameters) DeepCopy() *CloudwatchLogsParameters { + if in == nil { + return nil + } + out := new(CloudwatchLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchMetricInitParameters) DeepCopyInto(out *CloudwatchMetricInitParameters) { + *out = *in + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricNamespace != nil { + in, out := &in.MetricNamespace, &out.MetricNamespace + *out = new(string) + **out = **in + } + if in.MetricTimestamp != nil { + in, out := &in.MetricTimestamp, &out.MetricTimestamp + *out = new(string) + **out = **in + } + if in.MetricUnit != nil { + in, out := &in.MetricUnit, &out.MetricUnit + *out = new(string) + **out = **in + } + if in.MetricValue != nil { + in, out := &in.MetricValue, &out.MetricValue + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchMetricInitParameters. +func (in *CloudwatchMetricInitParameters) DeepCopy() *CloudwatchMetricInitParameters { + if in == nil { + return nil + } + out := new(CloudwatchMetricInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchMetricObservation) DeepCopyInto(out *CloudwatchMetricObservation) { + *out = *in + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricNamespace != nil { + in, out := &in.MetricNamespace, &out.MetricNamespace + *out = new(string) + **out = **in + } + if in.MetricTimestamp != nil { + in, out := &in.MetricTimestamp, &out.MetricTimestamp + *out = new(string) + **out = **in + } + if in.MetricUnit != nil { + in, out := &in.MetricUnit, &out.MetricUnit + *out = new(string) + **out = **in + } + if in.MetricValue != nil { + in, out := &in.MetricValue, &out.MetricValue + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchMetricObservation. +func (in *CloudwatchMetricObservation) DeepCopy() *CloudwatchMetricObservation { + if in == nil { + return nil + } + out := new(CloudwatchMetricObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchMetricParameters) DeepCopyInto(out *CloudwatchMetricParameters) { + *out = *in + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricNamespace != nil { + in, out := &in.MetricNamespace, &out.MetricNamespace + *out = new(string) + **out = **in + } + if in.MetricTimestamp != nil { + in, out := &in.MetricTimestamp, &out.MetricTimestamp + *out = new(string) + **out = **in + } + if in.MetricUnit != nil { + in, out := &in.MetricUnit, &out.MetricUnit + *out = new(string) + **out = **in + } + if in.MetricValue != nil { + in, out := &in.MetricValue, &out.MetricValue + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchMetricParameters. +func (in *CloudwatchMetricParameters) DeepCopy() *CloudwatchMetricParameters { + if in == nil { + return nil + } + out := new(CloudwatchMetricParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFieldInitParameters) DeepCopyInto(out *CustomFieldInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFieldInitParameters. +func (in *CustomFieldInitParameters) DeepCopy() *CustomFieldInitParameters { + if in == nil { + return nil + } + out := new(CustomFieldInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFieldObservation) DeepCopyInto(out *CustomFieldObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFieldObservation. +func (in *CustomFieldObservation) DeepCopy() *CustomFieldObservation { + if in == nil { + return nil + } + out := new(CustomFieldObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFieldParameters) DeepCopyInto(out *CustomFieldParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFieldParameters. +func (in *CustomFieldParameters) DeepCopy() *CustomFieldParameters { + if in == nil { + return nil + } + out := new(CustomFieldParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionInitParameters) DeepCopyInto(out *DimensionInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionInitParameters. +func (in *DimensionInitParameters) DeepCopy() *DimensionInitParameters { + if in == nil { + return nil + } + out := new(DimensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionObservation) DeepCopyInto(out *DimensionObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionObservation. +func (in *DimensionObservation) DeepCopy() *DimensionObservation { + if in == nil { + return nil + } + out := new(DimensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionParameters) DeepCopyInto(out *DimensionParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionParameters. +func (in *DimensionParameters) DeepCopy() *DimensionParameters { + if in == nil { + return nil + } + out := new(DimensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamodbInitParameters) DeepCopyInto(out *DynamodbInitParameters) { + *out = *in + if in.HashKeyField != nil { + in, out := &in.HashKeyField, &out.HashKeyField + *out = new(string) + **out = **in + } + if in.HashKeyType != nil { + in, out := &in.HashKeyType, &out.HashKeyType + *out = new(string) + **out = **in + } + if in.HashKeyValue != nil { + in, out := &in.HashKeyValue, &out.HashKeyValue + *out = new(string) + **out = **in + } + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.PayloadField != nil { + in, out := &in.PayloadField, &out.PayloadField + *out = new(string) + **out = **in + } + if in.RangeKeyField != nil { + in, out := &in.RangeKeyField, &out.RangeKeyField + *out = new(string) + **out = **in + } + if in.RangeKeyType != nil { + in, out := &in.RangeKeyType, &out.RangeKeyType + *out = new(string) + **out = **in + } + if in.RangeKeyValue != nil { + in, out := &in.RangeKeyValue, &out.RangeKeyValue + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamodbInitParameters. +func (in *DynamodbInitParameters) DeepCopy() *DynamodbInitParameters { + if in == nil { + return nil + } + out := new(DynamodbInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamodbObservation) DeepCopyInto(out *DynamodbObservation) { + *out = *in + if in.HashKeyField != nil { + in, out := &in.HashKeyField, &out.HashKeyField + *out = new(string) + **out = **in + } + if in.HashKeyType != nil { + in, out := &in.HashKeyType, &out.HashKeyType + *out = new(string) + **out = **in + } + if in.HashKeyValue != nil { + in, out := &in.HashKeyValue, &out.HashKeyValue + *out = new(string) + **out = **in + } + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.PayloadField != nil { + in, out := &in.PayloadField, &out.PayloadField + *out = new(string) + **out = **in + } + if in.RangeKeyField != nil { + in, out := &in.RangeKeyField, &out.RangeKeyField + *out = new(string) + **out = **in + } + if in.RangeKeyType != nil { + in, out := &in.RangeKeyType, &out.RangeKeyType + *out = new(string) + **out = **in + } + if in.RangeKeyValue != nil { + in, out := &in.RangeKeyValue, &out.RangeKeyValue + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamodbObservation. +func (in *DynamodbObservation) DeepCopy() *DynamodbObservation { + if in == nil { + return nil + } + out := new(DynamodbObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamodbParameters) DeepCopyInto(out *DynamodbParameters) { + *out = *in + if in.HashKeyField != nil { + in, out := &in.HashKeyField, &out.HashKeyField + *out = new(string) + **out = **in + } + if in.HashKeyType != nil { + in, out := &in.HashKeyType, &out.HashKeyType + *out = new(string) + **out = **in + } + if in.HashKeyValue != nil { + in, out := &in.HashKeyValue, &out.HashKeyValue + *out = new(string) + **out = **in + } + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.PayloadField != nil { + in, out := &in.PayloadField, &out.PayloadField + *out = new(string) + **out = **in + } + if in.RangeKeyField != nil { + in, out := &in.RangeKeyField, &out.RangeKeyField + *out = new(string) + **out = **in + } + if in.RangeKeyType != nil { + in, out := &in.RangeKeyType, &out.RangeKeyType + *out = new(string) + **out = **in + } + if in.RangeKeyValue != nil { + in, out := &in.RangeKeyValue, &out.RangeKeyValue + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamodbParameters. +func (in *DynamodbParameters) DeepCopy() *DynamodbParameters { + if in == nil { + return nil + } + out := new(DynamodbParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Dynamodbv2InitParameters) DeepCopyInto(out *Dynamodbv2InitParameters) { + *out = *in + if in.PutItem != nil { + in, out := &in.PutItem, &out.PutItem + *out = new(PutItemInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dynamodbv2InitParameters. +func (in *Dynamodbv2InitParameters) DeepCopy() *Dynamodbv2InitParameters { + if in == nil { + return nil + } + out := new(Dynamodbv2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Dynamodbv2Observation) DeepCopyInto(out *Dynamodbv2Observation) { + *out = *in + if in.PutItem != nil { + in, out := &in.PutItem, &out.PutItem + *out = new(PutItemObservation) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dynamodbv2Observation. +func (in *Dynamodbv2Observation) DeepCopy() *Dynamodbv2Observation { + if in == nil { + return nil + } + out := new(Dynamodbv2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Dynamodbv2Parameters) DeepCopyInto(out *Dynamodbv2Parameters) { + *out = *in + if in.PutItem != nil { + in, out := &in.PutItem, &out.PutItem + *out = new(PutItemParameters) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dynamodbv2Parameters. +func (in *Dynamodbv2Parameters) DeepCopy() *Dynamodbv2Parameters { + if in == nil { + return nil + } + out := new(Dynamodbv2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Dynamodbv2PutItemInitParameters) DeepCopyInto(out *Dynamodbv2PutItemInitParameters) { + *out = *in + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dynamodbv2PutItemInitParameters. +func (in *Dynamodbv2PutItemInitParameters) DeepCopy() *Dynamodbv2PutItemInitParameters { + if in == nil { + return nil + } + out := new(Dynamodbv2PutItemInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Dynamodbv2PutItemObservation) DeepCopyInto(out *Dynamodbv2PutItemObservation) { + *out = *in + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dynamodbv2PutItemObservation. +func (in *Dynamodbv2PutItemObservation) DeepCopy() *Dynamodbv2PutItemObservation { + if in == nil { + return nil + } + out := new(Dynamodbv2PutItemObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Dynamodbv2PutItemParameters) DeepCopyInto(out *Dynamodbv2PutItemParameters) { + *out = *in + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dynamodbv2PutItemParameters. +func (in *Dynamodbv2PutItemParameters) DeepCopy() *Dynamodbv2PutItemParameters { + if in == nil { + return nil + } + out := new(Dynamodbv2PutItemParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchInitParameters) DeepCopyInto(out *ElasticsearchInitParameters) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchInitParameters. +func (in *ElasticsearchInitParameters) DeepCopy() *ElasticsearchInitParameters { + if in == nil { + return nil + } + out := new(ElasticsearchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchObservation) DeepCopyInto(out *ElasticsearchObservation) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchObservation. +func (in *ElasticsearchObservation) DeepCopy() *ElasticsearchObservation { + if in == nil { + return nil + } + out := new(ElasticsearchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchParameters) DeepCopyInto(out *ElasticsearchParameters) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchParameters. +func (in *ElasticsearchParameters) DeepCopy() *ElasticsearchParameters { + if in == nil { + return nil + } + out := new(ElasticsearchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionCloudwatchAlarmInitParameters) DeepCopyInto(out *ErrorActionCloudwatchAlarmInitParameters) { + *out = *in + if in.AlarmName != nil { + in, out := &in.AlarmName, &out.AlarmName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StateReason != nil { + in, out := &in.StateReason, &out.StateReason + *out = new(string) + **out = **in + } + if in.StateValue != nil { + in, out := &in.StateValue, &out.StateValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionCloudwatchAlarmInitParameters. +func (in *ErrorActionCloudwatchAlarmInitParameters) DeepCopy() *ErrorActionCloudwatchAlarmInitParameters { + if in == nil { + return nil + } + out := new(ErrorActionCloudwatchAlarmInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionCloudwatchAlarmObservation) DeepCopyInto(out *ErrorActionCloudwatchAlarmObservation) { + *out = *in + if in.AlarmName != nil { + in, out := &in.AlarmName, &out.AlarmName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StateReason != nil { + in, out := &in.StateReason, &out.StateReason + *out = new(string) + **out = **in + } + if in.StateValue != nil { + in, out := &in.StateValue, &out.StateValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionCloudwatchAlarmObservation. +func (in *ErrorActionCloudwatchAlarmObservation) DeepCopy() *ErrorActionCloudwatchAlarmObservation { + if in == nil { + return nil + } + out := new(ErrorActionCloudwatchAlarmObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionCloudwatchAlarmParameters) DeepCopyInto(out *ErrorActionCloudwatchAlarmParameters) { + *out = *in + if in.AlarmName != nil { + in, out := &in.AlarmName, &out.AlarmName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StateReason != nil { + in, out := &in.StateReason, &out.StateReason + *out = new(string) + **out = **in + } + if in.StateValue != nil { + in, out := &in.StateValue, &out.StateValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionCloudwatchAlarmParameters. +func (in *ErrorActionCloudwatchAlarmParameters) DeepCopy() *ErrorActionCloudwatchAlarmParameters { + if in == nil { + return nil + } + out := new(ErrorActionCloudwatchAlarmParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionCloudwatchLogsInitParameters) DeepCopyInto(out *ErrorActionCloudwatchLogsInitParameters) { + *out = *in + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionCloudwatchLogsInitParameters. +func (in *ErrorActionCloudwatchLogsInitParameters) DeepCopy() *ErrorActionCloudwatchLogsInitParameters { + if in == nil { + return nil + } + out := new(ErrorActionCloudwatchLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionCloudwatchLogsObservation) DeepCopyInto(out *ErrorActionCloudwatchLogsObservation) { + *out = *in + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionCloudwatchLogsObservation. +func (in *ErrorActionCloudwatchLogsObservation) DeepCopy() *ErrorActionCloudwatchLogsObservation { + if in == nil { + return nil + } + out := new(ErrorActionCloudwatchLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionCloudwatchLogsParameters) DeepCopyInto(out *ErrorActionCloudwatchLogsParameters) { + *out = *in + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionCloudwatchLogsParameters. +func (in *ErrorActionCloudwatchLogsParameters) DeepCopy() *ErrorActionCloudwatchLogsParameters { + if in == nil { + return nil + } + out := new(ErrorActionCloudwatchLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionCloudwatchMetricInitParameters) DeepCopyInto(out *ErrorActionCloudwatchMetricInitParameters) { + *out = *in + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricNamespace != nil { + in, out := &in.MetricNamespace, &out.MetricNamespace + *out = new(string) + **out = **in + } + if in.MetricTimestamp != nil { + in, out := &in.MetricTimestamp, &out.MetricTimestamp + *out = new(string) + **out = **in + } + if in.MetricUnit != nil { + in, out := &in.MetricUnit, &out.MetricUnit + *out = new(string) + **out = **in + } + if in.MetricValue != nil { + in, out := &in.MetricValue, &out.MetricValue + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionCloudwatchMetricInitParameters. +func (in *ErrorActionCloudwatchMetricInitParameters) DeepCopy() *ErrorActionCloudwatchMetricInitParameters { + if in == nil { + return nil + } + out := new(ErrorActionCloudwatchMetricInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionCloudwatchMetricObservation) DeepCopyInto(out *ErrorActionCloudwatchMetricObservation) { + *out = *in + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricNamespace != nil { + in, out := &in.MetricNamespace, &out.MetricNamespace + *out = new(string) + **out = **in + } + if in.MetricTimestamp != nil { + in, out := &in.MetricTimestamp, &out.MetricTimestamp + *out = new(string) + **out = **in + } + if in.MetricUnit != nil { + in, out := &in.MetricUnit, &out.MetricUnit + *out = new(string) + **out = **in + } + if in.MetricValue != nil { + in, out := &in.MetricValue, &out.MetricValue + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionCloudwatchMetricObservation. +func (in *ErrorActionCloudwatchMetricObservation) DeepCopy() *ErrorActionCloudwatchMetricObservation { + if in == nil { + return nil + } + out := new(ErrorActionCloudwatchMetricObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionCloudwatchMetricParameters) DeepCopyInto(out *ErrorActionCloudwatchMetricParameters) { + *out = *in + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricNamespace != nil { + in, out := &in.MetricNamespace, &out.MetricNamespace + *out = new(string) + **out = **in + } + if in.MetricTimestamp != nil { + in, out := &in.MetricTimestamp, &out.MetricTimestamp + *out = new(string) + **out = **in + } + if in.MetricUnit != nil { + in, out := &in.MetricUnit, &out.MetricUnit + *out = new(string) + **out = **in + } + if in.MetricValue != nil { + in, out := &in.MetricValue, &out.MetricValue + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionCloudwatchMetricParameters. +func (in *ErrorActionCloudwatchMetricParameters) DeepCopy() *ErrorActionCloudwatchMetricParameters { + if in == nil { + return nil + } + out := new(ErrorActionCloudwatchMetricParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionDynamodbInitParameters) DeepCopyInto(out *ErrorActionDynamodbInitParameters) { + *out = *in + if in.HashKeyField != nil { + in, out := &in.HashKeyField, &out.HashKeyField + *out = new(string) + **out = **in + } + if in.HashKeyType != nil { + in, out := &in.HashKeyType, &out.HashKeyType + *out = new(string) + **out = **in + } + if in.HashKeyValue != nil { + in, out := &in.HashKeyValue, &out.HashKeyValue + *out = new(string) + **out = **in + } + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.PayloadField != nil { + in, out := &in.PayloadField, &out.PayloadField + *out = new(string) + **out = **in + } + if in.RangeKeyField != nil { + in, out := &in.RangeKeyField, &out.RangeKeyField + *out = new(string) + **out = **in + } + if in.RangeKeyType != nil { + in, out := &in.RangeKeyType, &out.RangeKeyType + *out = new(string) + **out = **in + } + if in.RangeKeyValue != nil { + in, out := &in.RangeKeyValue, &out.RangeKeyValue + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionDynamodbInitParameters. +func (in *ErrorActionDynamodbInitParameters) DeepCopy() *ErrorActionDynamodbInitParameters { + if in == nil { + return nil + } + out := new(ErrorActionDynamodbInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionDynamodbObservation) DeepCopyInto(out *ErrorActionDynamodbObservation) { + *out = *in + if in.HashKeyField != nil { + in, out := &in.HashKeyField, &out.HashKeyField + *out = new(string) + **out = **in + } + if in.HashKeyType != nil { + in, out := &in.HashKeyType, &out.HashKeyType + *out = new(string) + **out = **in + } + if in.HashKeyValue != nil { + in, out := &in.HashKeyValue, &out.HashKeyValue + *out = new(string) + **out = **in + } + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.PayloadField != nil { + in, out := &in.PayloadField, &out.PayloadField + *out = new(string) + **out = **in + } + if in.RangeKeyField != nil { + in, out := &in.RangeKeyField, &out.RangeKeyField + *out = new(string) + **out = **in + } + if in.RangeKeyType != nil { + in, out := &in.RangeKeyType, &out.RangeKeyType + *out = new(string) + **out = **in + } + if in.RangeKeyValue != nil { + in, out := &in.RangeKeyValue, &out.RangeKeyValue + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionDynamodbObservation. +func (in *ErrorActionDynamodbObservation) DeepCopy() *ErrorActionDynamodbObservation { + if in == nil { + return nil + } + out := new(ErrorActionDynamodbObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionDynamodbParameters) DeepCopyInto(out *ErrorActionDynamodbParameters) { + *out = *in + if in.HashKeyField != nil { + in, out := &in.HashKeyField, &out.HashKeyField + *out = new(string) + **out = **in + } + if in.HashKeyType != nil { + in, out := &in.HashKeyType, &out.HashKeyType + *out = new(string) + **out = **in + } + if in.HashKeyValue != nil { + in, out := &in.HashKeyValue, &out.HashKeyValue + *out = new(string) + **out = **in + } + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.PayloadField != nil { + in, out := &in.PayloadField, &out.PayloadField + *out = new(string) + **out = **in + } + if in.RangeKeyField != nil { + in, out := &in.RangeKeyField, &out.RangeKeyField + *out = new(string) + **out = **in + } + if in.RangeKeyType != nil { + in, out := &in.RangeKeyType, &out.RangeKeyType + *out = new(string) + **out = **in + } + if in.RangeKeyValue != nil { + in, out := &in.RangeKeyValue, &out.RangeKeyValue + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionDynamodbParameters. +func (in *ErrorActionDynamodbParameters) DeepCopy() *ErrorActionDynamodbParameters { + if in == nil { + return nil + } + out := new(ErrorActionDynamodbParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionDynamodbv2InitParameters) DeepCopyInto(out *ErrorActionDynamodbv2InitParameters) { + *out = *in + if in.PutItem != nil { + in, out := &in.PutItem, &out.PutItem + *out = new(Dynamodbv2PutItemInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionDynamodbv2InitParameters. +func (in *ErrorActionDynamodbv2InitParameters) DeepCopy() *ErrorActionDynamodbv2InitParameters { + if in == nil { + return nil + } + out := new(ErrorActionDynamodbv2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionDynamodbv2Observation) DeepCopyInto(out *ErrorActionDynamodbv2Observation) { + *out = *in + if in.PutItem != nil { + in, out := &in.PutItem, &out.PutItem + *out = new(Dynamodbv2PutItemObservation) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionDynamodbv2Observation. +func (in *ErrorActionDynamodbv2Observation) DeepCopy() *ErrorActionDynamodbv2Observation { + if in == nil { + return nil + } + out := new(ErrorActionDynamodbv2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionDynamodbv2Parameters) DeepCopyInto(out *ErrorActionDynamodbv2Parameters) { + *out = *in + if in.PutItem != nil { + in, out := &in.PutItem, &out.PutItem + *out = new(Dynamodbv2PutItemParameters) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionDynamodbv2Parameters. +func (in *ErrorActionDynamodbv2Parameters) DeepCopy() *ErrorActionDynamodbv2Parameters { + if in == nil { + return nil + } + out := new(ErrorActionDynamodbv2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionElasticsearchInitParameters) DeepCopyInto(out *ErrorActionElasticsearchInitParameters) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionElasticsearchInitParameters. +func (in *ErrorActionElasticsearchInitParameters) DeepCopy() *ErrorActionElasticsearchInitParameters { + if in == nil { + return nil + } + out := new(ErrorActionElasticsearchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionElasticsearchObservation) DeepCopyInto(out *ErrorActionElasticsearchObservation) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionElasticsearchObservation. +func (in *ErrorActionElasticsearchObservation) DeepCopy() *ErrorActionElasticsearchObservation { + if in == nil { + return nil + } + out := new(ErrorActionElasticsearchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionElasticsearchParameters) DeepCopyInto(out *ErrorActionElasticsearchParameters) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionElasticsearchParameters. +func (in *ErrorActionElasticsearchParameters) DeepCopy() *ErrorActionElasticsearchParameters { + if in == nil { + return nil + } + out := new(ErrorActionElasticsearchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionInitParameters) DeepCopyInto(out *ErrorActionInitParameters) { + *out = *in + if in.CloudwatchAlarm != nil { + in, out := &in.CloudwatchAlarm, &out.CloudwatchAlarm + *out = new(ErrorActionCloudwatchAlarmInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CloudwatchLogs != nil { + in, out := &in.CloudwatchLogs, &out.CloudwatchLogs + *out = new(ErrorActionCloudwatchLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CloudwatchMetric != nil { + in, out := &in.CloudwatchMetric, &out.CloudwatchMetric + *out = new(ErrorActionCloudwatchMetricInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Dynamodb != nil { + in, out := &in.Dynamodb, &out.Dynamodb + *out = new(ErrorActionDynamodbInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Dynamodbv2 != nil { + in, out := &in.Dynamodbv2, &out.Dynamodbv2 + *out = new(ErrorActionDynamodbv2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Elasticsearch != nil { + in, out := &in.Elasticsearch, &out.Elasticsearch + *out = new(ErrorActionElasticsearchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Firehose != nil { + in, out := &in.Firehose, &out.Firehose + *out = new(FirehoseInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IotAnalytics != nil { + in, out := &in.IotAnalytics, &out.IotAnalytics + *out = new(IotAnalyticsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IotEvents != nil { + in, out := &in.IotEvents, &out.IotEvents + *out = new(IotEventsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = new(KafkaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Kinesis != nil { + in, out := &in.Kinesis, &out.Kinesis + *out = new(KinesisInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Lambda != nil { + in, out := &in.Lambda, &out.Lambda + *out = new(LambdaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Republish != nil { + in, out := &in.Republish, &out.Republish + *out = new(RepublishInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Sns != nil { + in, out := &in.Sns, &out.Sns + *out = new(SnsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Sqs != nil { + in, out := &in.Sqs, &out.Sqs + *out = new(SqsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StepFunctions != nil { + in, out := &in.StepFunctions, &out.StepFunctions + *out = new(StepFunctionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Timestream != nil { + in, out := &in.Timestream, &out.Timestream + *out = new(TimestreamInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionInitParameters. +func (in *ErrorActionInitParameters) DeepCopy() *ErrorActionInitParameters { + if in == nil { + return nil + } + out := new(ErrorActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionObservation) DeepCopyInto(out *ErrorActionObservation) { + *out = *in + if in.CloudwatchAlarm != nil { + in, out := &in.CloudwatchAlarm, &out.CloudwatchAlarm + *out = new(ErrorActionCloudwatchAlarmObservation) + (*in).DeepCopyInto(*out) + } + if in.CloudwatchLogs != nil { + in, out := &in.CloudwatchLogs, &out.CloudwatchLogs + *out = new(ErrorActionCloudwatchLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.CloudwatchMetric != nil { + in, out := &in.CloudwatchMetric, &out.CloudwatchMetric + *out = new(ErrorActionCloudwatchMetricObservation) + (*in).DeepCopyInto(*out) + } + if in.Dynamodb != nil { + in, out := &in.Dynamodb, &out.Dynamodb + *out = new(ErrorActionDynamodbObservation) + (*in).DeepCopyInto(*out) + } + if in.Dynamodbv2 != nil { + in, out := &in.Dynamodbv2, &out.Dynamodbv2 + *out = new(ErrorActionDynamodbv2Observation) + (*in).DeepCopyInto(*out) + } + if in.Elasticsearch != nil { + in, out := &in.Elasticsearch, &out.Elasticsearch + *out = new(ErrorActionElasticsearchObservation) + (*in).DeepCopyInto(*out) + } + if in.Firehose != nil { + in, out := &in.Firehose, &out.Firehose + *out = new(FirehoseObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPObservation) + (*in).DeepCopyInto(*out) + } + if in.IotAnalytics != nil { + in, out := &in.IotAnalytics, &out.IotAnalytics + *out = new(IotAnalyticsObservation) + (*in).DeepCopyInto(*out) + } + if in.IotEvents != nil { + in, out := &in.IotEvents, &out.IotEvents + *out = new(IotEventsObservation) + (*in).DeepCopyInto(*out) + } + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = new(KafkaObservation) + (*in).DeepCopyInto(*out) + } + if in.Kinesis != nil { + in, out := &in.Kinesis, &out.Kinesis + *out = new(KinesisObservation) + (*in).DeepCopyInto(*out) + } + if in.Lambda != nil { + in, out := &in.Lambda, &out.Lambda + *out = new(LambdaObservation) + (*in).DeepCopyInto(*out) + } + if in.Republish != nil { + in, out := &in.Republish, &out.Republish + *out = new(RepublishObservation) + (*in).DeepCopyInto(*out) + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Observation) + (*in).DeepCopyInto(*out) + } + if in.Sns != nil { + in, out := &in.Sns, &out.Sns + *out = new(SnsObservation) + (*in).DeepCopyInto(*out) + } + if in.Sqs != nil { + in, out := &in.Sqs, &out.Sqs + *out = new(SqsObservation) + (*in).DeepCopyInto(*out) + } + if in.StepFunctions != nil { + in, out := &in.StepFunctions, &out.StepFunctions + *out = new(StepFunctionsObservation) + (*in).DeepCopyInto(*out) + } + if in.Timestream != nil { + in, out := &in.Timestream, &out.Timestream + *out = new(TimestreamObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionObservation. +func (in *ErrorActionObservation) DeepCopy() *ErrorActionObservation { + if in == nil { + return nil + } + out := new(ErrorActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorActionParameters) DeepCopyInto(out *ErrorActionParameters) { + *out = *in + if in.CloudwatchAlarm != nil { + in, out := &in.CloudwatchAlarm, &out.CloudwatchAlarm + *out = new(ErrorActionCloudwatchAlarmParameters) + (*in).DeepCopyInto(*out) + } + if in.CloudwatchLogs != nil { + in, out := &in.CloudwatchLogs, &out.CloudwatchLogs + *out = new(ErrorActionCloudwatchLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.CloudwatchMetric != nil { + in, out := &in.CloudwatchMetric, &out.CloudwatchMetric + *out = new(ErrorActionCloudwatchMetricParameters) + (*in).DeepCopyInto(*out) + } + if in.Dynamodb != nil { + in, out := &in.Dynamodb, &out.Dynamodb + *out = new(ErrorActionDynamodbParameters) + (*in).DeepCopyInto(*out) + } + if in.Dynamodbv2 != nil { + in, out := &in.Dynamodbv2, &out.Dynamodbv2 + *out = new(ErrorActionDynamodbv2Parameters) + (*in).DeepCopyInto(*out) + } + if in.Elasticsearch != nil { + in, out := &in.Elasticsearch, &out.Elasticsearch + *out = new(ErrorActionElasticsearchParameters) + (*in).DeepCopyInto(*out) + } + if in.Firehose != nil { + in, out := &in.Firehose, &out.Firehose + *out = new(FirehoseParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPParameters) + (*in).DeepCopyInto(*out) + } + if in.IotAnalytics != nil { + in, out := &in.IotAnalytics, &out.IotAnalytics + *out = new(IotAnalyticsParameters) + (*in).DeepCopyInto(*out) + } + if in.IotEvents != nil { + in, out := &in.IotEvents, &out.IotEvents + *out = new(IotEventsParameters) + (*in).DeepCopyInto(*out) + } + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = new(KafkaParameters) + (*in).DeepCopyInto(*out) + } + if in.Kinesis != nil { + in, out := &in.Kinesis, &out.Kinesis + *out = new(KinesisParameters) + (*in).DeepCopyInto(*out) + } + if in.Lambda != nil { + in, out := &in.Lambda, &out.Lambda + *out = new(LambdaParameters) + (*in).DeepCopyInto(*out) + } + if in.Republish != nil { + in, out := &in.Republish, &out.Republish + *out = new(RepublishParameters) + (*in).DeepCopyInto(*out) + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Parameters) + (*in).DeepCopyInto(*out) + } + if in.Sns != nil { + in, out := &in.Sns, &out.Sns + *out = new(SnsParameters) + (*in).DeepCopyInto(*out) + } + if in.Sqs != nil { + in, out := &in.Sqs, &out.Sqs + *out = new(SqsParameters) + (*in).DeepCopyInto(*out) + } + if in.StepFunctions != nil { + in, out := &in.StepFunctions, &out.StepFunctions + *out = new(StepFunctionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Timestream != nil { + in, out := &in.Timestream, &out.Timestream + *out = new(TimestreamParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorActionParameters. +func (in *ErrorActionParameters) DeepCopy() *ErrorActionParameters { + if in == nil { + return nil + } + out := new(ErrorActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterInitParameters) DeepCopyInto(out *FilterInitParameters) { + *out = *in + if in.NamedShadowNames != nil { + in, out := &in.NamedShadowNames, &out.NamedShadowNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterInitParameters. +func (in *FilterInitParameters) DeepCopy() *FilterInitParameters { + if in == nil { + return nil + } + out := new(FilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterObservation) DeepCopyInto(out *FilterObservation) { + *out = *in + if in.NamedShadowNames != nil { + in, out := &in.NamedShadowNames, &out.NamedShadowNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterObservation. +func (in *FilterObservation) DeepCopy() *FilterObservation { + if in == nil { + return nil + } + out := new(FilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterParameters) DeepCopyInto(out *FilterParameters) { + *out = *in + if in.NamedShadowNames != nil { + in, out := &in.NamedShadowNames, &out.NamedShadowNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterParameters. +func (in *FilterParameters) DeepCopy() *FilterParameters { + if in == nil { + return nil + } + out := new(FilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirehoseInitParameters) DeepCopyInto(out *FirehoseInitParameters) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.DeliveryStreamName != nil { + in, out := &in.DeliveryStreamName, &out.DeliveryStreamName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Separator != nil { + in, out := &in.Separator, &out.Separator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirehoseInitParameters. +func (in *FirehoseInitParameters) DeepCopy() *FirehoseInitParameters { + if in == nil { + return nil + } + out := new(FirehoseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirehoseObservation) DeepCopyInto(out *FirehoseObservation) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.DeliveryStreamName != nil { + in, out := &in.DeliveryStreamName, &out.DeliveryStreamName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Separator != nil { + in, out := &in.Separator, &out.Separator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirehoseObservation. +func (in *FirehoseObservation) DeepCopy() *FirehoseObservation { + if in == nil { + return nil + } + out := new(FirehoseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirehoseParameters) DeepCopyInto(out *FirehoseParameters) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.DeliveryStreamName != nil { + in, out := &in.DeliveryStreamName, &out.DeliveryStreamName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Separator != nil { + in, out := &in.Separator, &out.Separator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirehoseParameters. +func (in *FirehoseParameters) DeepCopy() *FirehoseParameters { + if in == nil { + return nil + } + out := new(FirehoseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHTTPHeaderInitParameters) DeepCopyInto(out *HTTPHTTPHeaderInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHTTPHeaderInitParameters. +func (in *HTTPHTTPHeaderInitParameters) DeepCopy() *HTTPHTTPHeaderInitParameters { + if in == nil { + return nil + } + out := new(HTTPHTTPHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHTTPHeaderObservation) DeepCopyInto(out *HTTPHTTPHeaderObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHTTPHeaderObservation. +func (in *HTTPHTTPHeaderObservation) DeepCopy() *HTTPHTTPHeaderObservation { + if in == nil { + return nil + } + out := new(HTTPHTTPHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHTTPHeaderParameters) DeepCopyInto(out *HTTPHTTPHeaderParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHTTPHeaderParameters. +func (in *HTTPHTTPHeaderParameters) DeepCopy() *HTTPHTTPHeaderParameters { + if in == nil { + return nil + } + out := new(HTTPHTTPHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeaderInitParameters) DeepCopyInto(out *HTTPHeaderInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeaderInitParameters. +func (in *HTTPHeaderInitParameters) DeepCopy() *HTTPHeaderInitParameters { + if in == nil { + return nil + } + out := new(HTTPHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeaderObservation) DeepCopyInto(out *HTTPHeaderObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeaderObservation. +func (in *HTTPHeaderObservation) DeepCopy() *HTTPHeaderObservation { + if in == nil { + return nil + } + out := new(HTTPHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeaderParameters) DeepCopyInto(out *HTTPHeaderParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeaderParameters. +func (in *HTTPHeaderParameters) DeepCopy() *HTTPHeaderParameters { + if in == nil { + return nil + } + out := new(HTTPHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPInitParameters) DeepCopyInto(out *HTTPInitParameters) { + *out = *in + if in.ConfirmationURL != nil { + in, out := &in.ConfirmationURL, &out.ConfirmationURL + *out = new(string) + **out = **in + } + if in.HTTPHeader != nil { + in, out := &in.HTTPHeader, &out.HTTPHeader + *out = make([]HTTPHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPInitParameters. +func (in *HTTPInitParameters) DeepCopy() *HTTPInitParameters { + if in == nil { + return nil + } + out := new(HTTPInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPObservation) DeepCopyInto(out *HTTPObservation) { + *out = *in + if in.ConfirmationURL != nil { + in, out := &in.ConfirmationURL, &out.ConfirmationURL + *out = new(string) + **out = **in + } + if in.HTTPHeader != nil { + in, out := &in.HTTPHeader, &out.HTTPHeader + *out = make([]HTTPHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPObservation. +func (in *HTTPObservation) DeepCopy() *HTTPObservation { + if in == nil { + return nil + } + out := new(HTTPObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPParameters) DeepCopyInto(out *HTTPParameters) { + *out = *in + if in.ConfirmationURL != nil { + in, out := &in.ConfirmationURL, &out.ConfirmationURL + *out = new(string) + **out = **in + } + if in.HTTPHeader != nil { + in, out := &in.HTTPHeader, &out.HTTPHeader + *out = make([]HTTPHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPParameters. +func (in *HTTPParameters) DeepCopy() *HTTPParameters { + if in == nil { + return nil + } + out := new(HTTPParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderInitParameters) DeepCopyInto(out *HeaderInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderInitParameters. +func (in *HeaderInitParameters) DeepCopy() *HeaderInitParameters { + if in == nil { + return nil + } + out := new(HeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderObservation) DeepCopyInto(out *HeaderObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderObservation. +func (in *HeaderObservation) DeepCopy() *HeaderObservation { + if in == nil { + return nil + } + out := new(HeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderParameters) DeepCopyInto(out *HeaderParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderParameters. +func (in *HeaderParameters) DeepCopy() *HeaderParameters { + if in == nil { + return nil + } + out := new(HeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexingConfiguration) DeepCopyInto(out *IndexingConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexingConfiguration. +func (in *IndexingConfiguration) DeepCopy() *IndexingConfiguration { + if in == nil { + return nil + } + out := new(IndexingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IndexingConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexingConfigurationInitParameters) DeepCopyInto(out *IndexingConfigurationInitParameters) { + *out = *in + if in.ThingGroupIndexingConfiguration != nil { + in, out := &in.ThingGroupIndexingConfiguration, &out.ThingGroupIndexingConfiguration + *out = new(ThingGroupIndexingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ThingIndexingConfiguration != nil { + in, out := &in.ThingIndexingConfiguration, &out.ThingIndexingConfiguration + *out = new(ThingIndexingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexingConfigurationInitParameters. +func (in *IndexingConfigurationInitParameters) DeepCopy() *IndexingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(IndexingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexingConfigurationList) DeepCopyInto(out *IndexingConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IndexingConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexingConfigurationList. +func (in *IndexingConfigurationList) DeepCopy() *IndexingConfigurationList { + if in == nil { + return nil + } + out := new(IndexingConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IndexingConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexingConfigurationObservation) DeepCopyInto(out *IndexingConfigurationObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ThingGroupIndexingConfiguration != nil { + in, out := &in.ThingGroupIndexingConfiguration, &out.ThingGroupIndexingConfiguration + *out = new(ThingGroupIndexingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ThingIndexingConfiguration != nil { + in, out := &in.ThingIndexingConfiguration, &out.ThingIndexingConfiguration + *out = new(ThingIndexingConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexingConfigurationObservation. +func (in *IndexingConfigurationObservation) DeepCopy() *IndexingConfigurationObservation { + if in == nil { + return nil + } + out := new(IndexingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexingConfigurationParameters) DeepCopyInto(out *IndexingConfigurationParameters) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ThingGroupIndexingConfiguration != nil { + in, out := &in.ThingGroupIndexingConfiguration, &out.ThingGroupIndexingConfiguration + *out = new(ThingGroupIndexingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ThingIndexingConfiguration != nil { + in, out := &in.ThingIndexingConfiguration, &out.ThingIndexingConfiguration + *out = new(ThingIndexingConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexingConfigurationParameters. +func (in *IndexingConfigurationParameters) DeepCopy() *IndexingConfigurationParameters { + if in == nil { + return nil + } + out := new(IndexingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexingConfigurationSpec) DeepCopyInto(out *IndexingConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexingConfigurationSpec. +func (in *IndexingConfigurationSpec) DeepCopy() *IndexingConfigurationSpec { + if in == nil { + return nil + } + out := new(IndexingConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexingConfigurationStatus) DeepCopyInto(out *IndexingConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexingConfigurationStatus. +func (in *IndexingConfigurationStatus) DeepCopy() *IndexingConfigurationStatus { + if in == nil { + return nil + } + out := new(IndexingConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IotAnalyticsInitParameters) DeepCopyInto(out *IotAnalyticsInitParameters) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.ChannelName != nil { + in, out := &in.ChannelName, &out.ChannelName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IotAnalyticsInitParameters. +func (in *IotAnalyticsInitParameters) DeepCopy() *IotAnalyticsInitParameters { + if in == nil { + return nil + } + out := new(IotAnalyticsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IotAnalyticsObservation) DeepCopyInto(out *IotAnalyticsObservation) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.ChannelName != nil { + in, out := &in.ChannelName, &out.ChannelName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IotAnalyticsObservation. +func (in *IotAnalyticsObservation) DeepCopy() *IotAnalyticsObservation { + if in == nil { + return nil + } + out := new(IotAnalyticsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IotAnalyticsParameters) DeepCopyInto(out *IotAnalyticsParameters) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.ChannelName != nil { + in, out := &in.ChannelName, &out.ChannelName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IotAnalyticsParameters. +func (in *IotAnalyticsParameters) DeepCopy() *IotAnalyticsParameters { + if in == nil { + return nil + } + out := new(IotAnalyticsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IotEventsInitParameters) DeepCopyInto(out *IotEventsInitParameters) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.InputName != nil { + in, out := &in.InputName, &out.InputName + *out = new(string) + **out = **in + } + if in.MessageID != nil { + in, out := &in.MessageID, &out.MessageID + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IotEventsInitParameters. +func (in *IotEventsInitParameters) DeepCopy() *IotEventsInitParameters { + if in == nil { + return nil + } + out := new(IotEventsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IotEventsObservation) DeepCopyInto(out *IotEventsObservation) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.InputName != nil { + in, out := &in.InputName, &out.InputName + *out = new(string) + **out = **in + } + if in.MessageID != nil { + in, out := &in.MessageID, &out.MessageID + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IotEventsObservation. +func (in *IotEventsObservation) DeepCopy() *IotEventsObservation { + if in == nil { + return nil + } + out := new(IotEventsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IotEventsParameters) DeepCopyInto(out *IotEventsParameters) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.InputName != nil { + in, out := &in.InputName, &out.InputName + *out = new(string) + **out = **in + } + if in.MessageID != nil { + in, out := &in.MessageID, &out.MessageID + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IotEventsParameters. +func (in *IotEventsParameters) DeepCopy() *IotEventsParameters { + if in == nil { + return nil + } + out := new(IotEventsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaHeaderInitParameters) DeepCopyInto(out *KafkaHeaderInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaHeaderInitParameters. +func (in *KafkaHeaderInitParameters) DeepCopy() *KafkaHeaderInitParameters { + if in == nil { + return nil + } + out := new(KafkaHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaHeaderObservation) DeepCopyInto(out *KafkaHeaderObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaHeaderObservation. +func (in *KafkaHeaderObservation) DeepCopy() *KafkaHeaderObservation { + if in == nil { + return nil + } + out := new(KafkaHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaHeaderParameters) DeepCopyInto(out *KafkaHeaderParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaHeaderParameters. +func (in *KafkaHeaderParameters) DeepCopy() *KafkaHeaderParameters { + if in == nil { + return nil + } + out := new(KafkaHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaInitParameters) DeepCopyInto(out *KafkaInitParameters) { + *out = *in + if in.ClientProperties != nil { + in, out := &in.ClientProperties, &out.ClientProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.DestinationArn != nil { + in, out := &in.DestinationArn, &out.DestinationArn + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Partition != nil { + in, out := &in.Partition, &out.Partition + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaInitParameters. +func (in *KafkaInitParameters) DeepCopy() *KafkaInitParameters { + if in == nil { + return nil + } + out := new(KafkaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaObservation) DeepCopyInto(out *KafkaObservation) { + *out = *in + if in.ClientProperties != nil { + in, out := &in.ClientProperties, &out.ClientProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.DestinationArn != nil { + in, out := &in.DestinationArn, &out.DestinationArn + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Partition != nil { + in, out := &in.Partition, &out.Partition + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaObservation. +func (in *KafkaObservation) DeepCopy() *KafkaObservation { + if in == nil { + return nil + } + out := new(KafkaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaParameters) DeepCopyInto(out *KafkaParameters) { + *out = *in + if in.ClientProperties != nil { + in, out := &in.ClientProperties, &out.ClientProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.DestinationArn != nil { + in, out := &in.DestinationArn, &out.DestinationArn + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Partition != nil { + in, out := &in.Partition, &out.Partition + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaParameters. +func (in *KafkaParameters) DeepCopy() *KafkaParameters { + if in == nil { + return nil + } + out := new(KafkaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisInitParameters) DeepCopyInto(out *KinesisInitParameters) { + *out = *in + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisInitParameters. +func (in *KinesisInitParameters) DeepCopy() *KinesisInitParameters { + if in == nil { + return nil + } + out := new(KinesisInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisObservation) DeepCopyInto(out *KinesisObservation) { + *out = *in + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisObservation. +func (in *KinesisObservation) DeepCopy() *KinesisObservation { + if in == nil { + return nil + } + out := new(KinesisObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisParameters) DeepCopyInto(out *KinesisParameters) { + *out = *in + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisParameters. +func (in *KinesisParameters) DeepCopy() *KinesisParameters { + if in == nil { + return nil + } + out := new(KinesisParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaInitParameters) DeepCopyInto(out *LambdaInitParameters) { + *out = *in + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaInitParameters. +func (in *LambdaInitParameters) DeepCopy() *LambdaInitParameters { + if in == nil { + return nil + } + out := new(LambdaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaObservation) DeepCopyInto(out *LambdaObservation) { + *out = *in + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaObservation. +func (in *LambdaObservation) DeepCopy() *LambdaObservation { + if in == nil { + return nil + } + out := new(LambdaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaParameters) DeepCopyInto(out *LambdaParameters) { + *out = *in + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaParameters. +func (in *LambdaParameters) DeepCopy() *LambdaParameters { + if in == nil { + return nil + } + out := new(LambdaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedFieldInitParameters) DeepCopyInto(out *ManagedFieldInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedFieldInitParameters. +func (in *ManagedFieldInitParameters) DeepCopy() *ManagedFieldInitParameters { + if in == nil { + return nil + } + out := new(ManagedFieldInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedFieldObservation) DeepCopyInto(out *ManagedFieldObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedFieldObservation. +func (in *ManagedFieldObservation) DeepCopy() *ManagedFieldObservation { + if in == nil { + return nil + } + out := new(ManagedFieldObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedFieldParameters) DeepCopyInto(out *ManagedFieldParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedFieldParameters. +func (in *ManagedFieldParameters) DeepCopy() *ManagedFieldParameters { + if in == nil { + return nil + } + out := new(ManagedFieldParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataInitParameters) DeepCopyInto(out *MetadataInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataInitParameters. +func (in *MetadataInitParameters) DeepCopy() *MetadataInitParameters { + if in == nil { + return nil + } + out := new(MetadataInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataObservation) DeepCopyInto(out *MetadataObservation) { + *out = *in + if in.CreationDate != nil { + in, out := &in.CreationDate, &out.CreationDate + *out = new(string) + **out = **in + } + if in.ParentGroupName != nil { + in, out := &in.ParentGroupName, &out.ParentGroupName + *out = new(string) + **out = **in + } + if in.RootToParentGroups != nil { + in, out := &in.RootToParentGroups, &out.RootToParentGroups + *out = make([]RootToParentGroupsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataObservation. +func (in *MetadataObservation) DeepCopy() *MetadataObservation { + if in == nil { + return nil + } + out := new(MetadataObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataParameters) DeepCopyInto(out *MetadataParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataParameters. +func (in *MetadataParameters) DeepCopy() *MetadataParameters { + if in == nil { + return nil + } + out := new(MetadataParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreProvisioningHookInitParameters) DeepCopyInto(out *PreProvisioningHookInitParameters) { + *out = *in + if in.PayloadVersion != nil { + in, out := &in.PayloadVersion, &out.PayloadVersion + *out = new(string) + **out = **in + } + if in.TargetArn != nil { + in, out := &in.TargetArn, &out.TargetArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreProvisioningHookInitParameters. +func (in *PreProvisioningHookInitParameters) DeepCopy() *PreProvisioningHookInitParameters { + if in == nil { + return nil + } + out := new(PreProvisioningHookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreProvisioningHookObservation) DeepCopyInto(out *PreProvisioningHookObservation) { + *out = *in + if in.PayloadVersion != nil { + in, out := &in.PayloadVersion, &out.PayloadVersion + *out = new(string) + **out = **in + } + if in.TargetArn != nil { + in, out := &in.TargetArn, &out.TargetArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreProvisioningHookObservation. +func (in *PreProvisioningHookObservation) DeepCopy() *PreProvisioningHookObservation { + if in == nil { + return nil + } + out := new(PreProvisioningHookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreProvisioningHookParameters) DeepCopyInto(out *PreProvisioningHookParameters) { + *out = *in + if in.PayloadVersion != nil { + in, out := &in.PayloadVersion, &out.PayloadVersion + *out = new(string) + **out = **in + } + if in.TargetArn != nil { + in, out := &in.TargetArn, &out.TargetArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreProvisioningHookParameters. +func (in *PreProvisioningHookParameters) DeepCopy() *PreProvisioningHookParameters { + if in == nil { + return nil + } + out := new(PreProvisioningHookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropertiesInitParameters) DeepCopyInto(out *PropertiesInitParameters) { + *out = *in + if in.AttributePayload != nil { + in, out := &in.AttributePayload, &out.AttributePayload + *out = new(AttributePayloadInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropertiesInitParameters. +func (in *PropertiesInitParameters) DeepCopy() *PropertiesInitParameters { + if in == nil { + return nil + } + out := new(PropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropertiesObservation) DeepCopyInto(out *PropertiesObservation) { + *out = *in + if in.AttributePayload != nil { + in, out := &in.AttributePayload, &out.AttributePayload + *out = new(AttributePayloadObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropertiesObservation. +func (in *PropertiesObservation) DeepCopy() *PropertiesObservation { + if in == nil { + return nil + } + out := new(PropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropertiesParameters) DeepCopyInto(out *PropertiesParameters) { + *out = *in + if in.AttributePayload != nil { + in, out := &in.AttributePayload, &out.AttributePayload + *out = new(AttributePayloadParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropertiesParameters. +func (in *PropertiesParameters) DeepCopy() *PropertiesParameters { + if in == nil { + return nil + } + out := new(PropertiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisioningTemplate) DeepCopyInto(out *ProvisioningTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningTemplate. +func (in *ProvisioningTemplate) DeepCopy() *ProvisioningTemplate { + if in == nil { + return nil + } + out := new(ProvisioningTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProvisioningTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisioningTemplateInitParameters) DeepCopyInto(out *ProvisioningTemplateInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.PreProvisioningHook != nil { + in, out := &in.PreProvisioningHook, &out.PreProvisioningHook + *out = new(PreProvisioningHookInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ProvisioningRoleArn != nil { + in, out := &in.ProvisioningRoleArn, &out.ProvisioningRoleArn + *out = new(string) + **out = **in + } + if in.ProvisioningRoleArnRef != nil { + in, out := &in.ProvisioningRoleArnRef, &out.ProvisioningRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ProvisioningRoleArnSelector != nil { + in, out := &in.ProvisioningRoleArnSelector, &out.ProvisioningRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TemplateBody != nil { + in, out := &in.TemplateBody, &out.TemplateBody + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningTemplateInitParameters. +func (in *ProvisioningTemplateInitParameters) DeepCopy() *ProvisioningTemplateInitParameters { + if in == nil { + return nil + } + out := new(ProvisioningTemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisioningTemplateList) DeepCopyInto(out *ProvisioningTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProvisioningTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningTemplateList. +func (in *ProvisioningTemplateList) DeepCopy() *ProvisioningTemplateList { + if in == nil { + return nil + } + out := new(ProvisioningTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProvisioningTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisioningTemplateObservation) DeepCopyInto(out *ProvisioningTemplateObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DefaultVersionID != nil { + in, out := &in.DefaultVersionID, &out.DefaultVersionID + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PreProvisioningHook != nil { + in, out := &in.PreProvisioningHook, &out.PreProvisioningHook + *out = new(PreProvisioningHookObservation) + (*in).DeepCopyInto(*out) + } + if in.ProvisioningRoleArn != nil { + in, out := &in.ProvisioningRoleArn, &out.ProvisioningRoleArn + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TemplateBody != nil { + in, out := &in.TemplateBody, &out.TemplateBody + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningTemplateObservation. +func (in *ProvisioningTemplateObservation) DeepCopy() *ProvisioningTemplateObservation { + if in == nil { + return nil + } + out := new(ProvisioningTemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisioningTemplateParameters) DeepCopyInto(out *ProvisioningTemplateParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.PreProvisioningHook != nil { + in, out := &in.PreProvisioningHook, &out.PreProvisioningHook + *out = new(PreProvisioningHookParameters) + (*in).DeepCopyInto(*out) + } + if in.ProvisioningRoleArn != nil { + in, out := &in.ProvisioningRoleArn, &out.ProvisioningRoleArn + *out = new(string) + **out = **in + } + if in.ProvisioningRoleArnRef != nil { + in, out := &in.ProvisioningRoleArnRef, &out.ProvisioningRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ProvisioningRoleArnSelector != nil { + in, out := &in.ProvisioningRoleArnSelector, &out.ProvisioningRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TemplateBody != nil { + in, out := &in.TemplateBody, &out.TemplateBody + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningTemplateParameters. +func (in *ProvisioningTemplateParameters) DeepCopy() *ProvisioningTemplateParameters { + if in == nil { + return nil + } + out := new(ProvisioningTemplateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisioningTemplateSpec) DeepCopyInto(out *ProvisioningTemplateSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningTemplateSpec. +func (in *ProvisioningTemplateSpec) DeepCopy() *ProvisioningTemplateSpec { + if in == nil { + return nil + } + out := new(ProvisioningTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisioningTemplateStatus) DeepCopyInto(out *ProvisioningTemplateStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningTemplateStatus. +func (in *ProvisioningTemplateStatus) DeepCopy() *ProvisioningTemplateStatus { + if in == nil { + return nil + } + out := new(ProvisioningTemplateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PutItemInitParameters) DeepCopyInto(out *PutItemInitParameters) { + *out = *in + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PutItemInitParameters. +func (in *PutItemInitParameters) DeepCopy() *PutItemInitParameters { + if in == nil { + return nil + } + out := new(PutItemInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PutItemObservation) DeepCopyInto(out *PutItemObservation) { + *out = *in + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PutItemObservation. +func (in *PutItemObservation) DeepCopy() *PutItemObservation { + if in == nil { + return nil + } + out := new(PutItemObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PutItemParameters) DeepCopyInto(out *PutItemParameters) { + *out = *in + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PutItemParameters. +func (in *PutItemParameters) DeepCopy() *PutItemParameters { + if in == nil { + return nil + } + out := new(PutItemParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepublishInitParameters) DeepCopyInto(out *RepublishInitParameters) { + *out = *in + if in.Qos != nil { + in, out := &in.Qos, &out.Qos + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepublishInitParameters. +func (in *RepublishInitParameters) DeepCopy() *RepublishInitParameters { + if in == nil { + return nil + } + out := new(RepublishInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepublishObservation) DeepCopyInto(out *RepublishObservation) { + *out = *in + if in.Qos != nil { + in, out := &in.Qos, &out.Qos + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepublishObservation. +func (in *RepublishObservation) DeepCopy() *RepublishObservation { + if in == nil { + return nil + } + out := new(RepublishObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepublishParameters) DeepCopyInto(out *RepublishParameters) { + *out = *in + if in.Qos != nil { + in, out := &in.Qos, &out.Qos + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepublishParameters. +func (in *RepublishParameters) DeepCopy() *RepublishParameters { + if in == nil { + return nil + } + out := new(RepublishParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootToParentGroupsInitParameters) DeepCopyInto(out *RootToParentGroupsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootToParentGroupsInitParameters. +func (in *RootToParentGroupsInitParameters) DeepCopy() *RootToParentGroupsInitParameters { + if in == nil { + return nil + } + out := new(RootToParentGroupsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootToParentGroupsObservation) DeepCopyInto(out *RootToParentGroupsObservation) { + *out = *in + if in.GroupArn != nil { + in, out := &in.GroupArn, &out.GroupArn + *out = new(string) + **out = **in + } + if in.GroupName != nil { + in, out := &in.GroupName, &out.GroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootToParentGroupsObservation. +func (in *RootToParentGroupsObservation) DeepCopy() *RootToParentGroupsObservation { + if in == nil { + return nil + } + out := new(RootToParentGroupsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootToParentGroupsParameters) DeepCopyInto(out *RootToParentGroupsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootToParentGroupsParameters. +func (in *RootToParentGroupsParameters) DeepCopy() *RootToParentGroupsParameters { + if in == nil { + return nil + } + out := new(RootToParentGroupsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InitParameters) DeepCopyInto(out *S3InitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.CannedACL != nil { + in, out := &in.CannedACL, &out.CannedACL + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InitParameters. +func (in *S3InitParameters) DeepCopy() *S3InitParameters { + if in == nil { + return nil + } + out := new(S3InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Observation) DeepCopyInto(out *S3Observation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.CannedACL != nil { + in, out := &in.CannedACL, &out.CannedACL + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Observation. +func (in *S3Observation) DeepCopy() *S3Observation { + if in == nil { + return nil + } + out := new(S3Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Parameters) DeepCopyInto(out *S3Parameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.CannedACL != nil { + in, out := &in.CannedACL, &out.CannedACL + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Parameters. +func (in *S3Parameters) DeepCopy() *S3Parameters { + if in == nil { + return nil + } + out := new(S3Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnsInitParameters) DeepCopyInto(out *SnsInitParameters) { + *out = *in + if in.MessageFormat != nil { + in, out := &in.MessageFormat, &out.MessageFormat + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetArn != nil { + in, out := &in.TargetArn, &out.TargetArn + *out = new(string) + **out = **in + } + if in.TargetArnRef != nil { + in, out := &in.TargetArnRef, &out.TargetArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetArnSelector != nil { + in, out := &in.TargetArnSelector, &out.TargetArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnsInitParameters. +func (in *SnsInitParameters) DeepCopy() *SnsInitParameters { + if in == nil { + return nil + } + out := new(SnsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnsObservation) DeepCopyInto(out *SnsObservation) { + *out = *in + if in.MessageFormat != nil { + in, out := &in.MessageFormat, &out.MessageFormat + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.TargetArn != nil { + in, out := &in.TargetArn, &out.TargetArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnsObservation. +func (in *SnsObservation) DeepCopy() *SnsObservation { + if in == nil { + return nil + } + out := new(SnsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnsParameters) DeepCopyInto(out *SnsParameters) { + *out = *in + if in.MessageFormat != nil { + in, out := &in.MessageFormat, &out.MessageFormat + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetArn != nil { + in, out := &in.TargetArn, &out.TargetArn + *out = new(string) + **out = **in + } + if in.TargetArnRef != nil { + in, out := &in.TargetArnRef, &out.TargetArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetArnSelector != nil { + in, out := &in.TargetArnSelector, &out.TargetArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnsParameters. +func (in *SnsParameters) DeepCopy() *SnsParameters { + if in == nil { + return nil + } + out := new(SnsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqsInitParameters) DeepCopyInto(out *SqsInitParameters) { + *out = *in + if in.QueueURL != nil { + in, out := &in.QueueURL, &out.QueueURL + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.UseBase64 != nil { + in, out := &in.UseBase64, &out.UseBase64 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqsInitParameters. +func (in *SqsInitParameters) DeepCopy() *SqsInitParameters { + if in == nil { + return nil + } + out := new(SqsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqsObservation) DeepCopyInto(out *SqsObservation) { + *out = *in + if in.QueueURL != nil { + in, out := &in.QueueURL, &out.QueueURL + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.UseBase64 != nil { + in, out := &in.UseBase64, &out.UseBase64 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqsObservation. +func (in *SqsObservation) DeepCopy() *SqsObservation { + if in == nil { + return nil + } + out := new(SqsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqsParameters) DeepCopyInto(out *SqsParameters) { + *out = *in + if in.QueueURL != nil { + in, out := &in.QueueURL, &out.QueueURL + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.UseBase64 != nil { + in, out := &in.UseBase64, &out.UseBase64 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqsParameters. +func (in *SqsParameters) DeepCopy() *SqsParameters { + if in == nil { + return nil + } + out := new(SqsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepFunctionsInitParameters) DeepCopyInto(out *StepFunctionsInitParameters) { + *out = *in + if in.ExecutionNamePrefix != nil { + in, out := &in.ExecutionNamePrefix, &out.ExecutionNamePrefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StateMachineName != nil { + in, out := &in.StateMachineName, &out.StateMachineName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepFunctionsInitParameters. +func (in *StepFunctionsInitParameters) DeepCopy() *StepFunctionsInitParameters { + if in == nil { + return nil + } + out := new(StepFunctionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepFunctionsObservation) DeepCopyInto(out *StepFunctionsObservation) { + *out = *in + if in.ExecutionNamePrefix != nil { + in, out := &in.ExecutionNamePrefix, &out.ExecutionNamePrefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StateMachineName != nil { + in, out := &in.StateMachineName, &out.StateMachineName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepFunctionsObservation. +func (in *StepFunctionsObservation) DeepCopy() *StepFunctionsObservation { + if in == nil { + return nil + } + out := new(StepFunctionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepFunctionsParameters) DeepCopyInto(out *StepFunctionsParameters) { + *out = *in + if in.ExecutionNamePrefix != nil { + in, out := &in.ExecutionNamePrefix, &out.ExecutionNamePrefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StateMachineName != nil { + in, out := &in.StateMachineName, &out.StateMachineName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepFunctionsParameters. +func (in *StepFunctionsParameters) DeepCopy() *StepFunctionsParameters { + if in == nil { + return nil + } + out := new(StepFunctionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingGroup) DeepCopyInto(out *ThingGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingGroup. +func (in *ThingGroup) DeepCopy() *ThingGroup { + if in == nil { + return nil + } + out := new(ThingGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ThingGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingGroupIndexingConfigurationInitParameters) DeepCopyInto(out *ThingGroupIndexingConfigurationInitParameters) { + *out = *in + if in.CustomField != nil { + in, out := &in.CustomField, &out.CustomField + *out = make([]CustomFieldInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ManagedField != nil { + in, out := &in.ManagedField, &out.ManagedField + *out = make([]ManagedFieldInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThingGroupIndexingMode != nil { + in, out := &in.ThingGroupIndexingMode, &out.ThingGroupIndexingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingGroupIndexingConfigurationInitParameters. +func (in *ThingGroupIndexingConfigurationInitParameters) DeepCopy() *ThingGroupIndexingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ThingGroupIndexingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingGroupIndexingConfigurationObservation) DeepCopyInto(out *ThingGroupIndexingConfigurationObservation) { + *out = *in + if in.CustomField != nil { + in, out := &in.CustomField, &out.CustomField + *out = make([]CustomFieldObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ManagedField != nil { + in, out := &in.ManagedField, &out.ManagedField + *out = make([]ManagedFieldObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThingGroupIndexingMode != nil { + in, out := &in.ThingGroupIndexingMode, &out.ThingGroupIndexingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingGroupIndexingConfigurationObservation. +func (in *ThingGroupIndexingConfigurationObservation) DeepCopy() *ThingGroupIndexingConfigurationObservation { + if in == nil { + return nil + } + out := new(ThingGroupIndexingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingGroupIndexingConfigurationParameters) DeepCopyInto(out *ThingGroupIndexingConfigurationParameters) { + *out = *in + if in.CustomField != nil { + in, out := &in.CustomField, &out.CustomField + *out = make([]CustomFieldParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ManagedField != nil { + in, out := &in.ManagedField, &out.ManagedField + *out = make([]ManagedFieldParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThingGroupIndexingMode != nil { + in, out := &in.ThingGroupIndexingMode, &out.ThingGroupIndexingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingGroupIndexingConfigurationParameters. +func (in *ThingGroupIndexingConfigurationParameters) DeepCopy() *ThingGroupIndexingConfigurationParameters { + if in == nil { + return nil + } + out := new(ThingGroupIndexingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingGroupInitParameters) DeepCopyInto(out *ThingGroupInitParameters) { + *out = *in + if in.ParentGroupName != nil { + in, out := &in.ParentGroupName, &out.ParentGroupName + *out = new(string) + **out = **in + } + if in.ParentGroupNameRef != nil { + in, out := &in.ParentGroupNameRef, &out.ParentGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ParentGroupNameSelector != nil { + in, out := &in.ParentGroupNameSelector, &out.ParentGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = new(PropertiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingGroupInitParameters. +func (in *ThingGroupInitParameters) DeepCopy() *ThingGroupInitParameters { + if in == nil { + return nil + } + out := new(ThingGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingGroupList) DeepCopyInto(out *ThingGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ThingGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingGroupList. +func (in *ThingGroupList) DeepCopy() *ThingGroupList { + if in == nil { + return nil + } + out := new(ThingGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ThingGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingGroupObservation) DeepCopyInto(out *ThingGroupObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make([]MetadataObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ParentGroupName != nil { + in, out := &in.ParentGroupName, &out.ParentGroupName + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = new(PropertiesObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingGroupObservation. +func (in *ThingGroupObservation) DeepCopy() *ThingGroupObservation { + if in == nil { + return nil + } + out := new(ThingGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingGroupParameters) DeepCopyInto(out *ThingGroupParameters) { + *out = *in + if in.ParentGroupName != nil { + in, out := &in.ParentGroupName, &out.ParentGroupName + *out = new(string) + **out = **in + } + if in.ParentGroupNameRef != nil { + in, out := &in.ParentGroupNameRef, &out.ParentGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ParentGroupNameSelector != nil { + in, out := &in.ParentGroupNameSelector, &out.ParentGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = new(PropertiesParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingGroupParameters. +func (in *ThingGroupParameters) DeepCopy() *ThingGroupParameters { + if in == nil { + return nil + } + out := new(ThingGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingGroupSpec) DeepCopyInto(out *ThingGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingGroupSpec. +func (in *ThingGroupSpec) DeepCopy() *ThingGroupSpec { + if in == nil { + return nil + } + out := new(ThingGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingGroupStatus) DeepCopyInto(out *ThingGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingGroupStatus. +func (in *ThingGroupStatus) DeepCopy() *ThingGroupStatus { + if in == nil { + return nil + } + out := new(ThingGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingIndexingConfigurationCustomFieldInitParameters) DeepCopyInto(out *ThingIndexingConfigurationCustomFieldInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingIndexingConfigurationCustomFieldInitParameters. +func (in *ThingIndexingConfigurationCustomFieldInitParameters) DeepCopy() *ThingIndexingConfigurationCustomFieldInitParameters { + if in == nil { + return nil + } + out := new(ThingIndexingConfigurationCustomFieldInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingIndexingConfigurationCustomFieldObservation) DeepCopyInto(out *ThingIndexingConfigurationCustomFieldObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingIndexingConfigurationCustomFieldObservation. +func (in *ThingIndexingConfigurationCustomFieldObservation) DeepCopy() *ThingIndexingConfigurationCustomFieldObservation { + if in == nil { + return nil + } + out := new(ThingIndexingConfigurationCustomFieldObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingIndexingConfigurationCustomFieldParameters) DeepCopyInto(out *ThingIndexingConfigurationCustomFieldParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingIndexingConfigurationCustomFieldParameters. +func (in *ThingIndexingConfigurationCustomFieldParameters) DeepCopy() *ThingIndexingConfigurationCustomFieldParameters { + if in == nil { + return nil + } + out := new(ThingIndexingConfigurationCustomFieldParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingIndexingConfigurationInitParameters) DeepCopyInto(out *ThingIndexingConfigurationInitParameters) { + *out = *in + if in.CustomField != nil { + in, out := &in.CustomField, &out.CustomField + *out = make([]ThingIndexingConfigurationCustomFieldInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeviceDefenderIndexingMode != nil { + in, out := &in.DeviceDefenderIndexingMode, &out.DeviceDefenderIndexingMode + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(FilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ManagedField != nil { + in, out := &in.ManagedField, &out.ManagedField + *out = make([]ThingIndexingConfigurationManagedFieldInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NamedShadowIndexingMode != nil { + in, out := &in.NamedShadowIndexingMode, &out.NamedShadowIndexingMode + *out = new(string) + **out = **in + } + if in.ThingConnectivityIndexingMode != nil { + in, out := &in.ThingConnectivityIndexingMode, &out.ThingConnectivityIndexingMode + *out = new(string) + **out = **in + } + if in.ThingIndexingMode != nil { + in, out := &in.ThingIndexingMode, &out.ThingIndexingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingIndexingConfigurationInitParameters. +func (in *ThingIndexingConfigurationInitParameters) DeepCopy() *ThingIndexingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ThingIndexingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingIndexingConfigurationManagedFieldInitParameters) DeepCopyInto(out *ThingIndexingConfigurationManagedFieldInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingIndexingConfigurationManagedFieldInitParameters. +func (in *ThingIndexingConfigurationManagedFieldInitParameters) DeepCopy() *ThingIndexingConfigurationManagedFieldInitParameters { + if in == nil { + return nil + } + out := new(ThingIndexingConfigurationManagedFieldInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingIndexingConfigurationManagedFieldObservation) DeepCopyInto(out *ThingIndexingConfigurationManagedFieldObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingIndexingConfigurationManagedFieldObservation. +func (in *ThingIndexingConfigurationManagedFieldObservation) DeepCopy() *ThingIndexingConfigurationManagedFieldObservation { + if in == nil { + return nil + } + out := new(ThingIndexingConfigurationManagedFieldObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingIndexingConfigurationManagedFieldParameters) DeepCopyInto(out *ThingIndexingConfigurationManagedFieldParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingIndexingConfigurationManagedFieldParameters. +func (in *ThingIndexingConfigurationManagedFieldParameters) DeepCopy() *ThingIndexingConfigurationManagedFieldParameters { + if in == nil { + return nil + } + out := new(ThingIndexingConfigurationManagedFieldParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingIndexingConfigurationObservation) DeepCopyInto(out *ThingIndexingConfigurationObservation) { + *out = *in + if in.CustomField != nil { + in, out := &in.CustomField, &out.CustomField + *out = make([]ThingIndexingConfigurationCustomFieldObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeviceDefenderIndexingMode != nil { + in, out := &in.DeviceDefenderIndexingMode, &out.DeviceDefenderIndexingMode + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(FilterObservation) + (*in).DeepCopyInto(*out) + } + if in.ManagedField != nil { + in, out := &in.ManagedField, &out.ManagedField + *out = make([]ThingIndexingConfigurationManagedFieldObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NamedShadowIndexingMode != nil { + in, out := &in.NamedShadowIndexingMode, &out.NamedShadowIndexingMode + *out = new(string) + **out = **in + } + if in.ThingConnectivityIndexingMode != nil { + in, out := &in.ThingConnectivityIndexingMode, &out.ThingConnectivityIndexingMode + *out = new(string) + **out = **in + } + if in.ThingIndexingMode != nil { + in, out := &in.ThingIndexingMode, &out.ThingIndexingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingIndexingConfigurationObservation. +func (in *ThingIndexingConfigurationObservation) DeepCopy() *ThingIndexingConfigurationObservation { + if in == nil { + return nil + } + out := new(ThingIndexingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingIndexingConfigurationParameters) DeepCopyInto(out *ThingIndexingConfigurationParameters) { + *out = *in + if in.CustomField != nil { + in, out := &in.CustomField, &out.CustomField + *out = make([]ThingIndexingConfigurationCustomFieldParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeviceDefenderIndexingMode != nil { + in, out := &in.DeviceDefenderIndexingMode, &out.DeviceDefenderIndexingMode + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(FilterParameters) + (*in).DeepCopyInto(*out) + } + if in.ManagedField != nil { + in, out := &in.ManagedField, &out.ManagedField + *out = make([]ThingIndexingConfigurationManagedFieldParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NamedShadowIndexingMode != nil { + in, out := &in.NamedShadowIndexingMode, &out.NamedShadowIndexingMode + *out = new(string) + **out = **in + } + if in.ThingConnectivityIndexingMode != nil { + in, out := &in.ThingConnectivityIndexingMode, &out.ThingConnectivityIndexingMode + *out = new(string) + **out = **in + } + if in.ThingIndexingMode != nil { + in, out := &in.ThingIndexingMode, &out.ThingIndexingMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingIndexingConfigurationParameters. +func (in *ThingIndexingConfigurationParameters) DeepCopy() *ThingIndexingConfigurationParameters { + if in == nil { + return nil + } + out := new(ThingIndexingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingType) DeepCopyInto(out *ThingType) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingType. +func (in *ThingType) DeepCopy() *ThingType { + if in == nil { + return nil + } + out := new(ThingType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ThingType) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingTypeInitParameters) DeepCopyInto(out *ThingTypeInitParameters) { + *out = *in + if in.Deprecated != nil { + in, out := &in.Deprecated, &out.Deprecated + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = new(ThingTypePropertiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingTypeInitParameters. +func (in *ThingTypeInitParameters) DeepCopy() *ThingTypeInitParameters { + if in == nil { + return nil + } + out := new(ThingTypeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingTypeList) DeepCopyInto(out *ThingTypeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ThingType, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingTypeList. +func (in *ThingTypeList) DeepCopy() *ThingTypeList { + if in == nil { + return nil + } + out := new(ThingTypeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ThingTypeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingTypeObservation) DeepCopyInto(out *ThingTypeObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Deprecated != nil { + in, out := &in.Deprecated, &out.Deprecated + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = new(ThingTypePropertiesObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingTypeObservation. +func (in *ThingTypeObservation) DeepCopy() *ThingTypeObservation { + if in == nil { + return nil + } + out := new(ThingTypeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingTypeParameters) DeepCopyInto(out *ThingTypeParameters) { + *out = *in + if in.Deprecated != nil { + in, out := &in.Deprecated, &out.Deprecated + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = new(ThingTypePropertiesParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingTypeParameters. +func (in *ThingTypeParameters) DeepCopy() *ThingTypeParameters { + if in == nil { + return nil + } + out := new(ThingTypeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingTypePropertiesInitParameters) DeepCopyInto(out *ThingTypePropertiesInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.SearchableAttributes != nil { + in, out := &in.SearchableAttributes, &out.SearchableAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingTypePropertiesInitParameters. +func (in *ThingTypePropertiesInitParameters) DeepCopy() *ThingTypePropertiesInitParameters { + if in == nil { + return nil + } + out := new(ThingTypePropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingTypePropertiesObservation) DeepCopyInto(out *ThingTypePropertiesObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.SearchableAttributes != nil { + in, out := &in.SearchableAttributes, &out.SearchableAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingTypePropertiesObservation. +func (in *ThingTypePropertiesObservation) DeepCopy() *ThingTypePropertiesObservation { + if in == nil { + return nil + } + out := new(ThingTypePropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingTypePropertiesParameters) DeepCopyInto(out *ThingTypePropertiesParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.SearchableAttributes != nil { + in, out := &in.SearchableAttributes, &out.SearchableAttributes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingTypePropertiesParameters. +func (in *ThingTypePropertiesParameters) DeepCopy() *ThingTypePropertiesParameters { + if in == nil { + return nil + } + out := new(ThingTypePropertiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingTypeSpec) DeepCopyInto(out *ThingTypeSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingTypeSpec. +func (in *ThingTypeSpec) DeepCopy() *ThingTypeSpec { + if in == nil { + return nil + } + out := new(ThingTypeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThingTypeStatus) DeepCopyInto(out *ThingTypeStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThingTypeStatus. +func (in *ThingTypeStatus) DeepCopy() *ThingTypeStatus { + if in == nil { + return nil + } + out := new(ThingTypeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimestampInitParameters) DeepCopyInto(out *TimestampInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimestampInitParameters. +func (in *TimestampInitParameters) DeepCopy() *TimestampInitParameters { + if in == nil { + return nil + } + out := new(TimestampInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimestampObservation) DeepCopyInto(out *TimestampObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimestampObservation. +func (in *TimestampObservation) DeepCopy() *TimestampObservation { + if in == nil { + return nil + } + out := new(TimestampObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimestampParameters) DeepCopyInto(out *TimestampParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimestampParameters. +func (in *TimestampParameters) DeepCopy() *TimestampParameters { + if in == nil { + return nil + } + out := new(TimestampParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimestreamDimensionInitParameters) DeepCopyInto(out *TimestreamDimensionInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimestreamDimensionInitParameters. +func (in *TimestreamDimensionInitParameters) DeepCopy() *TimestreamDimensionInitParameters { + if in == nil { + return nil + } + out := new(TimestreamDimensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimestreamDimensionObservation) DeepCopyInto(out *TimestreamDimensionObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimestreamDimensionObservation. +func (in *TimestreamDimensionObservation) DeepCopy() *TimestreamDimensionObservation { + if in == nil { + return nil + } + out := new(TimestreamDimensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimestreamDimensionParameters) DeepCopyInto(out *TimestreamDimensionParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimestreamDimensionParameters. +func (in *TimestreamDimensionParameters) DeepCopy() *TimestreamDimensionParameters { + if in == nil { + return nil + } + out := new(TimestreamDimensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimestreamInitParameters) DeepCopyInto(out *TimestreamInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]DimensionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.Timestamp != nil { + in, out := &in.Timestamp, &out.Timestamp + *out = new(TimestampInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimestreamInitParameters. +func (in *TimestreamInitParameters) DeepCopy() *TimestreamInitParameters { + if in == nil { + return nil + } + out := new(TimestreamInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimestreamObservation) DeepCopyInto(out *TimestreamObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]DimensionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.Timestamp != nil { + in, out := &in.Timestamp, &out.Timestamp + *out = new(TimestampObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimestreamObservation. +func (in *TimestreamObservation) DeepCopy() *TimestreamObservation { + if in == nil { + return nil + } + out := new(TimestreamObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimestreamParameters) DeepCopyInto(out *TimestreamParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]DimensionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.Timestamp != nil { + in, out := &in.Timestamp, &out.Timestamp + *out = new(TimestampParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimestreamParameters. +func (in *TimestreamParameters) DeepCopy() *TimestreamParameters { + if in == nil { + return nil + } + out := new(TimestreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimestreamTimestampInitParameters) DeepCopyInto(out *TimestreamTimestampInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimestreamTimestampInitParameters. +func (in *TimestreamTimestampInitParameters) DeepCopy() *TimestreamTimestampInitParameters { + if in == nil { + return nil + } + out := new(TimestreamTimestampInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimestreamTimestampObservation) DeepCopyInto(out *TimestreamTimestampObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimestreamTimestampObservation. +func (in *TimestreamTimestampObservation) DeepCopy() *TimestreamTimestampObservation { + if in == nil { + return nil + } + out := new(TimestreamTimestampObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimestreamTimestampParameters) DeepCopyInto(out *TimestreamTimestampParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimestreamTimestampParameters. +func (in *TimestreamTimestampParameters) DeepCopy() *TimestreamTimestampParameters { + if in == nil { + return nil + } + out := new(TimestreamTimestampParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRule) DeepCopyInto(out *TopicRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRule. +func (in *TopicRule) DeepCopy() *TopicRule { + if in == nil { + return nil + } + out := new(TopicRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TopicRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleDestination) DeepCopyInto(out *TopicRuleDestination) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleDestination. +func (in *TopicRuleDestination) DeepCopy() *TopicRuleDestination { + if in == nil { + return nil + } + out := new(TopicRuleDestination) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TopicRuleDestination) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleDestinationInitParameters) DeepCopyInto(out *TopicRuleDestinationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleDestinationInitParameters. +func (in *TopicRuleDestinationInitParameters) DeepCopy() *TopicRuleDestinationInitParameters { + if in == nil { + return nil + } + out := new(TopicRuleDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleDestinationList) DeepCopyInto(out *TopicRuleDestinationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TopicRuleDestination, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleDestinationList. +func (in *TopicRuleDestinationList) DeepCopy() *TopicRuleDestinationList { + if in == nil { + return nil + } + out := new(TopicRuleDestinationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TopicRuleDestinationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleDestinationObservation) DeepCopyInto(out *TopicRuleDestinationObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleDestinationObservation. +func (in *TopicRuleDestinationObservation) DeepCopy() *TopicRuleDestinationObservation { + if in == nil { + return nil + } + out := new(TopicRuleDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleDestinationParameters) DeepCopyInto(out *TopicRuleDestinationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleDestinationParameters. +func (in *TopicRuleDestinationParameters) DeepCopy() *TopicRuleDestinationParameters { + if in == nil { + return nil + } + out := new(TopicRuleDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleDestinationSpec) DeepCopyInto(out *TopicRuleDestinationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleDestinationSpec. +func (in *TopicRuleDestinationSpec) DeepCopy() *TopicRuleDestinationSpec { + if in == nil { + return nil + } + out := new(TopicRuleDestinationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleDestinationStatus) DeepCopyInto(out *TopicRuleDestinationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleDestinationStatus. +func (in *TopicRuleDestinationStatus) DeepCopy() *TopicRuleDestinationStatus { + if in == nil { + return nil + } + out := new(TopicRuleDestinationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleFirehoseInitParameters) DeepCopyInto(out *TopicRuleFirehoseInitParameters) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.DeliveryStreamName != nil { + in, out := &in.DeliveryStreamName, &out.DeliveryStreamName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Separator != nil { + in, out := &in.Separator, &out.Separator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleFirehoseInitParameters. +func (in *TopicRuleFirehoseInitParameters) DeepCopy() *TopicRuleFirehoseInitParameters { + if in == nil { + return nil + } + out := new(TopicRuleFirehoseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleFirehoseObservation) DeepCopyInto(out *TopicRuleFirehoseObservation) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.DeliveryStreamName != nil { + in, out := &in.DeliveryStreamName, &out.DeliveryStreamName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Separator != nil { + in, out := &in.Separator, &out.Separator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleFirehoseObservation. +func (in *TopicRuleFirehoseObservation) DeepCopy() *TopicRuleFirehoseObservation { + if in == nil { + return nil + } + out := new(TopicRuleFirehoseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleFirehoseParameters) DeepCopyInto(out *TopicRuleFirehoseParameters) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.DeliveryStreamName != nil { + in, out := &in.DeliveryStreamName, &out.DeliveryStreamName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Separator != nil { + in, out := &in.Separator, &out.Separator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleFirehoseParameters. +func (in *TopicRuleFirehoseParameters) DeepCopy() *TopicRuleFirehoseParameters { + if in == nil { + return nil + } + out := new(TopicRuleFirehoseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleHTTPInitParameters) DeepCopyInto(out *TopicRuleHTTPInitParameters) { + *out = *in + if in.ConfirmationURL != nil { + in, out := &in.ConfirmationURL, &out.ConfirmationURL + *out = new(string) + **out = **in + } + if in.HTTPHeader != nil { + in, out := &in.HTTPHeader, &out.HTTPHeader + *out = make([]HTTPHTTPHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleHTTPInitParameters. +func (in *TopicRuleHTTPInitParameters) DeepCopy() *TopicRuleHTTPInitParameters { + if in == nil { + return nil + } + out := new(TopicRuleHTTPInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleHTTPObservation) DeepCopyInto(out *TopicRuleHTTPObservation) { + *out = *in + if in.ConfirmationURL != nil { + in, out := &in.ConfirmationURL, &out.ConfirmationURL + *out = new(string) + **out = **in + } + if in.HTTPHeader != nil { + in, out := &in.HTTPHeader, &out.HTTPHeader + *out = make([]HTTPHTTPHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleHTTPObservation. +func (in *TopicRuleHTTPObservation) DeepCopy() *TopicRuleHTTPObservation { + if in == nil { + return nil + } + out := new(TopicRuleHTTPObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleHTTPParameters) DeepCopyInto(out *TopicRuleHTTPParameters) { + *out = *in + if in.ConfirmationURL != nil { + in, out := &in.ConfirmationURL, &out.ConfirmationURL + *out = new(string) + **out = **in + } + if in.HTTPHeader != nil { + in, out := &in.HTTPHeader, &out.HTTPHeader + *out = make([]HTTPHTTPHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleHTTPParameters. +func (in *TopicRuleHTTPParameters) DeepCopy() *TopicRuleHTTPParameters { + if in == nil { + return nil + } + out := new(TopicRuleHTTPParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleInitParameters) DeepCopyInto(out *TopicRuleInitParameters) { + *out = *in + if in.CloudwatchAlarm != nil { + in, out := &in.CloudwatchAlarm, &out.CloudwatchAlarm + *out = make([]CloudwatchAlarmInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CloudwatchLogs != nil { + in, out := &in.CloudwatchLogs, &out.CloudwatchLogs + *out = make([]CloudwatchLogsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CloudwatchMetric != nil { + in, out := &in.CloudwatchMetric, &out.CloudwatchMetric + *out = make([]CloudwatchMetricInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Dynamodb != nil { + in, out := &in.Dynamodb, &out.Dynamodb + *out = make([]DynamodbInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Dynamodbv2 != nil { + in, out := &in.Dynamodbv2, &out.Dynamodbv2 + *out = make([]Dynamodbv2InitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Elasticsearch != nil { + in, out := &in.Elasticsearch, &out.Elasticsearch + *out = make([]ElasticsearchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ErrorAction != nil { + in, out := &in.ErrorAction, &out.ErrorAction + *out = new(ErrorActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Firehose != nil { + in, out := &in.Firehose, &out.Firehose + *out = make([]TopicRuleFirehoseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = make([]TopicRuleHTTPInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IotAnalytics != nil { + in, out := &in.IotAnalytics, &out.IotAnalytics + *out = make([]TopicRuleIotAnalyticsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IotEvents != nil { + in, out := &in.IotEvents, &out.IotEvents + *out = make([]TopicRuleIotEventsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = make([]TopicRuleKafkaInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Kinesis != nil { + in, out := &in.Kinesis, &out.Kinesis + *out = make([]TopicRuleKinesisInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Lambda != nil { + in, out := &in.Lambda, &out.Lambda + *out = make([]TopicRuleLambdaInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Republish != nil { + in, out := &in.Republish, &out.Republish + *out = make([]TopicRuleRepublishInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = make([]TopicRuleS3InitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SQL != nil { + in, out := &in.SQL, &out.SQL + *out = new(string) + **out = **in + } + if in.SQLVersion != nil { + in, out := &in.SQLVersion, &out.SQLVersion + *out = new(string) + **out = **in + } + if in.Sns != nil { + in, out := &in.Sns, &out.Sns + *out = make([]TopicRuleSnsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Sqs != nil { + in, out := &in.Sqs, &out.Sqs + *out = make([]TopicRuleSqsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StepFunctions != nil { + in, out := &in.StepFunctions, &out.StepFunctions + *out = make([]TopicRuleStepFunctionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timestream != nil { + in, out := &in.Timestream, &out.Timestream + *out = make([]TopicRuleTimestreamInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleInitParameters. +func (in *TopicRuleInitParameters) DeepCopy() *TopicRuleInitParameters { + if in == nil { + return nil + } + out := new(TopicRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleIotAnalyticsInitParameters) DeepCopyInto(out *TopicRuleIotAnalyticsInitParameters) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.ChannelName != nil { + in, out := &in.ChannelName, &out.ChannelName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleIotAnalyticsInitParameters. +func (in *TopicRuleIotAnalyticsInitParameters) DeepCopy() *TopicRuleIotAnalyticsInitParameters { + if in == nil { + return nil + } + out := new(TopicRuleIotAnalyticsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleIotAnalyticsObservation) DeepCopyInto(out *TopicRuleIotAnalyticsObservation) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.ChannelName != nil { + in, out := &in.ChannelName, &out.ChannelName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleIotAnalyticsObservation. +func (in *TopicRuleIotAnalyticsObservation) DeepCopy() *TopicRuleIotAnalyticsObservation { + if in == nil { + return nil + } + out := new(TopicRuleIotAnalyticsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleIotAnalyticsParameters) DeepCopyInto(out *TopicRuleIotAnalyticsParameters) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.ChannelName != nil { + in, out := &in.ChannelName, &out.ChannelName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleIotAnalyticsParameters. +func (in *TopicRuleIotAnalyticsParameters) DeepCopy() *TopicRuleIotAnalyticsParameters { + if in == nil { + return nil + } + out := new(TopicRuleIotAnalyticsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleIotEventsInitParameters) DeepCopyInto(out *TopicRuleIotEventsInitParameters) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.InputName != nil { + in, out := &in.InputName, &out.InputName + *out = new(string) + **out = **in + } + if in.MessageID != nil { + in, out := &in.MessageID, &out.MessageID + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleIotEventsInitParameters. +func (in *TopicRuleIotEventsInitParameters) DeepCopy() *TopicRuleIotEventsInitParameters { + if in == nil { + return nil + } + out := new(TopicRuleIotEventsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleIotEventsObservation) DeepCopyInto(out *TopicRuleIotEventsObservation) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.InputName != nil { + in, out := &in.InputName, &out.InputName + *out = new(string) + **out = **in + } + if in.MessageID != nil { + in, out := &in.MessageID, &out.MessageID + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleIotEventsObservation. +func (in *TopicRuleIotEventsObservation) DeepCopy() *TopicRuleIotEventsObservation { + if in == nil { + return nil + } + out := new(TopicRuleIotEventsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleIotEventsParameters) DeepCopyInto(out *TopicRuleIotEventsParameters) { + *out = *in + if in.BatchMode != nil { + in, out := &in.BatchMode, &out.BatchMode + *out = new(bool) + **out = **in + } + if in.InputName != nil { + in, out := &in.InputName, &out.InputName + *out = new(string) + **out = **in + } + if in.MessageID != nil { + in, out := &in.MessageID, &out.MessageID + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleIotEventsParameters. +func (in *TopicRuleIotEventsParameters) DeepCopy() *TopicRuleIotEventsParameters { + if in == nil { + return nil + } + out := new(TopicRuleIotEventsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleKafkaInitParameters) DeepCopyInto(out *TopicRuleKafkaInitParameters) { + *out = *in + if in.ClientProperties != nil { + in, out := &in.ClientProperties, &out.ClientProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.DestinationArn != nil { + in, out := &in.DestinationArn, &out.DestinationArn + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]KafkaHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Partition != nil { + in, out := &in.Partition, &out.Partition + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleKafkaInitParameters. +func (in *TopicRuleKafkaInitParameters) DeepCopy() *TopicRuleKafkaInitParameters { + if in == nil { + return nil + } + out := new(TopicRuleKafkaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleKafkaObservation) DeepCopyInto(out *TopicRuleKafkaObservation) { + *out = *in + if in.ClientProperties != nil { + in, out := &in.ClientProperties, &out.ClientProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.DestinationArn != nil { + in, out := &in.DestinationArn, &out.DestinationArn + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]KafkaHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Partition != nil { + in, out := &in.Partition, &out.Partition + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleKafkaObservation. +func (in *TopicRuleKafkaObservation) DeepCopy() *TopicRuleKafkaObservation { + if in == nil { + return nil + } + out := new(TopicRuleKafkaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleKafkaParameters) DeepCopyInto(out *TopicRuleKafkaParameters) { + *out = *in + if in.ClientProperties != nil { + in, out := &in.ClientProperties, &out.ClientProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.DestinationArn != nil { + in, out := &in.DestinationArn, &out.DestinationArn + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]KafkaHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Partition != nil { + in, out := &in.Partition, &out.Partition + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleKafkaParameters. +func (in *TopicRuleKafkaParameters) DeepCopy() *TopicRuleKafkaParameters { + if in == nil { + return nil + } + out := new(TopicRuleKafkaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleKinesisInitParameters) DeepCopyInto(out *TopicRuleKinesisInitParameters) { + *out = *in + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleKinesisInitParameters. +func (in *TopicRuleKinesisInitParameters) DeepCopy() *TopicRuleKinesisInitParameters { + if in == nil { + return nil + } + out := new(TopicRuleKinesisInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleKinesisObservation) DeepCopyInto(out *TopicRuleKinesisObservation) { + *out = *in + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleKinesisObservation. +func (in *TopicRuleKinesisObservation) DeepCopy() *TopicRuleKinesisObservation { + if in == nil { + return nil + } + out := new(TopicRuleKinesisObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleKinesisParameters) DeepCopyInto(out *TopicRuleKinesisParameters) { + *out = *in + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleKinesisParameters. +func (in *TopicRuleKinesisParameters) DeepCopy() *TopicRuleKinesisParameters { + if in == nil { + return nil + } + out := new(TopicRuleKinesisParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleLambdaInitParameters) DeepCopyInto(out *TopicRuleLambdaInitParameters) { + *out = *in + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleLambdaInitParameters. +func (in *TopicRuleLambdaInitParameters) DeepCopy() *TopicRuleLambdaInitParameters { + if in == nil { + return nil + } + out := new(TopicRuleLambdaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleLambdaObservation) DeepCopyInto(out *TopicRuleLambdaObservation) { + *out = *in + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleLambdaObservation. +func (in *TopicRuleLambdaObservation) DeepCopy() *TopicRuleLambdaObservation { + if in == nil { + return nil + } + out := new(TopicRuleLambdaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleLambdaParameters) DeepCopyInto(out *TopicRuleLambdaParameters) { + *out = *in + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleLambdaParameters. +func (in *TopicRuleLambdaParameters) DeepCopy() *TopicRuleLambdaParameters { + if in == nil { + return nil + } + out := new(TopicRuleLambdaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleList) DeepCopyInto(out *TopicRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TopicRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleList. +func (in *TopicRuleList) DeepCopy() *TopicRuleList { + if in == nil { + return nil + } + out := new(TopicRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TopicRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleObservation) DeepCopyInto(out *TopicRuleObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CloudwatchAlarm != nil { + in, out := &in.CloudwatchAlarm, &out.CloudwatchAlarm + *out = make([]CloudwatchAlarmObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CloudwatchLogs != nil { + in, out := &in.CloudwatchLogs, &out.CloudwatchLogs + *out = make([]CloudwatchLogsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CloudwatchMetric != nil { + in, out := &in.CloudwatchMetric, &out.CloudwatchMetric + *out = make([]CloudwatchMetricObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Dynamodb != nil { + in, out := &in.Dynamodb, &out.Dynamodb + *out = make([]DynamodbObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Dynamodbv2 != nil { + in, out := &in.Dynamodbv2, &out.Dynamodbv2 + *out = make([]Dynamodbv2Observation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Elasticsearch != nil { + in, out := &in.Elasticsearch, &out.Elasticsearch + *out = make([]ElasticsearchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ErrorAction != nil { + in, out := &in.ErrorAction, &out.ErrorAction + *out = new(ErrorActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Firehose != nil { + in, out := &in.Firehose, &out.Firehose + *out = make([]TopicRuleFirehoseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = make([]TopicRuleHTTPObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IotAnalytics != nil { + in, out := &in.IotAnalytics, &out.IotAnalytics + *out = make([]TopicRuleIotAnalyticsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IotEvents != nil { + in, out := &in.IotEvents, &out.IotEvents + *out = make([]TopicRuleIotEventsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = make([]TopicRuleKafkaObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Kinesis != nil { + in, out := &in.Kinesis, &out.Kinesis + *out = make([]TopicRuleKinesisObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Lambda != nil { + in, out := &in.Lambda, &out.Lambda + *out = make([]TopicRuleLambdaObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Republish != nil { + in, out := &in.Republish, &out.Republish + *out = make([]TopicRuleRepublishObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = make([]TopicRuleS3Observation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SQL != nil { + in, out := &in.SQL, &out.SQL + *out = new(string) + **out = **in + } + if in.SQLVersion != nil { + in, out := &in.SQLVersion, &out.SQLVersion + *out = new(string) + **out = **in + } + if in.Sns != nil { + in, out := &in.Sns, &out.Sns + *out = make([]TopicRuleSnsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Sqs != nil { + in, out := &in.Sqs, &out.Sqs + *out = make([]TopicRuleSqsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StepFunctions != nil { + in, out := &in.StepFunctions, &out.StepFunctions + *out = make([]TopicRuleStepFunctionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timestream != nil { + in, out := &in.Timestream, &out.Timestream + *out = make([]TopicRuleTimestreamObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleObservation. +func (in *TopicRuleObservation) DeepCopy() *TopicRuleObservation { + if in == nil { + return nil + } + out := new(TopicRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleParameters) DeepCopyInto(out *TopicRuleParameters) { + *out = *in + if in.CloudwatchAlarm != nil { + in, out := &in.CloudwatchAlarm, &out.CloudwatchAlarm + *out = make([]CloudwatchAlarmParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CloudwatchLogs != nil { + in, out := &in.CloudwatchLogs, &out.CloudwatchLogs + *out = make([]CloudwatchLogsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CloudwatchMetric != nil { + in, out := &in.CloudwatchMetric, &out.CloudwatchMetric + *out = make([]CloudwatchMetricParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Dynamodb != nil { + in, out := &in.Dynamodb, &out.Dynamodb + *out = make([]DynamodbParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Dynamodbv2 != nil { + in, out := &in.Dynamodbv2, &out.Dynamodbv2 + *out = make([]Dynamodbv2Parameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Elasticsearch != nil { + in, out := &in.Elasticsearch, &out.Elasticsearch + *out = make([]ElasticsearchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ErrorAction != nil { + in, out := &in.ErrorAction, &out.ErrorAction + *out = new(ErrorActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Firehose != nil { + in, out := &in.Firehose, &out.Firehose + *out = make([]TopicRuleFirehoseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = make([]TopicRuleHTTPParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IotAnalytics != nil { + in, out := &in.IotAnalytics, &out.IotAnalytics + *out = make([]TopicRuleIotAnalyticsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IotEvents != nil { + in, out := &in.IotEvents, &out.IotEvents + *out = make([]TopicRuleIotEventsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = make([]TopicRuleKafkaParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Kinesis != nil { + in, out := &in.Kinesis, &out.Kinesis + *out = make([]TopicRuleKinesisParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Lambda != nil { + in, out := &in.Lambda, &out.Lambda + *out = make([]TopicRuleLambdaParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Republish != nil { + in, out := &in.Republish, &out.Republish + *out = make([]TopicRuleRepublishParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = make([]TopicRuleS3Parameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SQL != nil { + in, out := &in.SQL, &out.SQL + *out = new(string) + **out = **in + } + if in.SQLVersion != nil { + in, out := &in.SQLVersion, &out.SQLVersion + *out = new(string) + **out = **in + } + if in.Sns != nil { + in, out := &in.Sns, &out.Sns + *out = make([]TopicRuleSnsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Sqs != nil { + in, out := &in.Sqs, &out.Sqs + *out = make([]TopicRuleSqsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StepFunctions != nil { + in, out := &in.StepFunctions, &out.StepFunctions + *out = make([]TopicRuleStepFunctionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timestream != nil { + in, out := &in.Timestream, &out.Timestream + *out = make([]TopicRuleTimestreamParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleParameters. +func (in *TopicRuleParameters) DeepCopy() *TopicRuleParameters { + if in == nil { + return nil + } + out := new(TopicRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleRepublishInitParameters) DeepCopyInto(out *TopicRuleRepublishInitParameters) { + *out = *in + if in.Qos != nil { + in, out := &in.Qos, &out.Qos + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleRepublishInitParameters. +func (in *TopicRuleRepublishInitParameters) DeepCopy() *TopicRuleRepublishInitParameters { + if in == nil { + return nil + } + out := new(TopicRuleRepublishInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleRepublishObservation) DeepCopyInto(out *TopicRuleRepublishObservation) { + *out = *in + if in.Qos != nil { + in, out := &in.Qos, &out.Qos + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleRepublishObservation. +func (in *TopicRuleRepublishObservation) DeepCopy() *TopicRuleRepublishObservation { + if in == nil { + return nil + } + out := new(TopicRuleRepublishObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleRepublishParameters) DeepCopyInto(out *TopicRuleRepublishParameters) { + *out = *in + if in.Qos != nil { + in, out := &in.Qos, &out.Qos + *out = new(float64) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleRepublishParameters. +func (in *TopicRuleRepublishParameters) DeepCopy() *TopicRuleRepublishParameters { + if in == nil { + return nil + } + out := new(TopicRuleRepublishParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleS3InitParameters) DeepCopyInto(out *TopicRuleS3InitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.CannedACL != nil { + in, out := &in.CannedACL, &out.CannedACL + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleS3InitParameters. +func (in *TopicRuleS3InitParameters) DeepCopy() *TopicRuleS3InitParameters { + if in == nil { + return nil + } + out := new(TopicRuleS3InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleS3Observation) DeepCopyInto(out *TopicRuleS3Observation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.CannedACL != nil { + in, out := &in.CannedACL, &out.CannedACL + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleS3Observation. +func (in *TopicRuleS3Observation) DeepCopy() *TopicRuleS3Observation { + if in == nil { + return nil + } + out := new(TopicRuleS3Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleS3Parameters) DeepCopyInto(out *TopicRuleS3Parameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.CannedACL != nil { + in, out := &in.CannedACL, &out.CannedACL + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleS3Parameters. +func (in *TopicRuleS3Parameters) DeepCopy() *TopicRuleS3Parameters { + if in == nil { + return nil + } + out := new(TopicRuleS3Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleSnsInitParameters) DeepCopyInto(out *TopicRuleSnsInitParameters) { + *out = *in + if in.MessageFormat != nil { + in, out := &in.MessageFormat, &out.MessageFormat + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetArn != nil { + in, out := &in.TargetArn, &out.TargetArn + *out = new(string) + **out = **in + } + if in.TargetArnRef != nil { + in, out := &in.TargetArnRef, &out.TargetArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetArnSelector != nil { + in, out := &in.TargetArnSelector, &out.TargetArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleSnsInitParameters. +func (in *TopicRuleSnsInitParameters) DeepCopy() *TopicRuleSnsInitParameters { + if in == nil { + return nil + } + out := new(TopicRuleSnsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleSnsObservation) DeepCopyInto(out *TopicRuleSnsObservation) { + *out = *in + if in.MessageFormat != nil { + in, out := &in.MessageFormat, &out.MessageFormat + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.TargetArn != nil { + in, out := &in.TargetArn, &out.TargetArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleSnsObservation. +func (in *TopicRuleSnsObservation) DeepCopy() *TopicRuleSnsObservation { + if in == nil { + return nil + } + out := new(TopicRuleSnsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleSnsParameters) DeepCopyInto(out *TopicRuleSnsParameters) { + *out = *in + if in.MessageFormat != nil { + in, out := &in.MessageFormat, &out.MessageFormat + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetArn != nil { + in, out := &in.TargetArn, &out.TargetArn + *out = new(string) + **out = **in + } + if in.TargetArnRef != nil { + in, out := &in.TargetArnRef, &out.TargetArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetArnSelector != nil { + in, out := &in.TargetArnSelector, &out.TargetArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleSnsParameters. +func (in *TopicRuleSnsParameters) DeepCopy() *TopicRuleSnsParameters { + if in == nil { + return nil + } + out := new(TopicRuleSnsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleSpec) DeepCopyInto(out *TopicRuleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleSpec. +func (in *TopicRuleSpec) DeepCopy() *TopicRuleSpec { + if in == nil { + return nil + } + out := new(TopicRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleSqsInitParameters) DeepCopyInto(out *TopicRuleSqsInitParameters) { + *out = *in + if in.QueueURL != nil { + in, out := &in.QueueURL, &out.QueueURL + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.UseBase64 != nil { + in, out := &in.UseBase64, &out.UseBase64 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleSqsInitParameters. +func (in *TopicRuleSqsInitParameters) DeepCopy() *TopicRuleSqsInitParameters { + if in == nil { + return nil + } + out := new(TopicRuleSqsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleSqsObservation) DeepCopyInto(out *TopicRuleSqsObservation) { + *out = *in + if in.QueueURL != nil { + in, out := &in.QueueURL, &out.QueueURL + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.UseBase64 != nil { + in, out := &in.UseBase64, &out.UseBase64 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleSqsObservation. +func (in *TopicRuleSqsObservation) DeepCopy() *TopicRuleSqsObservation { + if in == nil { + return nil + } + out := new(TopicRuleSqsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleSqsParameters) DeepCopyInto(out *TopicRuleSqsParameters) { + *out = *in + if in.QueueURL != nil { + in, out := &in.QueueURL, &out.QueueURL + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.UseBase64 != nil { + in, out := &in.UseBase64, &out.UseBase64 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleSqsParameters. +func (in *TopicRuleSqsParameters) DeepCopy() *TopicRuleSqsParameters { + if in == nil { + return nil + } + out := new(TopicRuleSqsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleStatus) DeepCopyInto(out *TopicRuleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleStatus. +func (in *TopicRuleStatus) DeepCopy() *TopicRuleStatus { + if in == nil { + return nil + } + out := new(TopicRuleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleStepFunctionsInitParameters) DeepCopyInto(out *TopicRuleStepFunctionsInitParameters) { + *out = *in + if in.ExecutionNamePrefix != nil { + in, out := &in.ExecutionNamePrefix, &out.ExecutionNamePrefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StateMachineName != nil { + in, out := &in.StateMachineName, &out.StateMachineName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleStepFunctionsInitParameters. +func (in *TopicRuleStepFunctionsInitParameters) DeepCopy() *TopicRuleStepFunctionsInitParameters { + if in == nil { + return nil + } + out := new(TopicRuleStepFunctionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleStepFunctionsObservation) DeepCopyInto(out *TopicRuleStepFunctionsObservation) { + *out = *in + if in.ExecutionNamePrefix != nil { + in, out := &in.ExecutionNamePrefix, &out.ExecutionNamePrefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StateMachineName != nil { + in, out := &in.StateMachineName, &out.StateMachineName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleStepFunctionsObservation. +func (in *TopicRuleStepFunctionsObservation) DeepCopy() *TopicRuleStepFunctionsObservation { + if in == nil { + return nil + } + out := new(TopicRuleStepFunctionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleStepFunctionsParameters) DeepCopyInto(out *TopicRuleStepFunctionsParameters) { + *out = *in + if in.ExecutionNamePrefix != nil { + in, out := &in.ExecutionNamePrefix, &out.ExecutionNamePrefix + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StateMachineName != nil { + in, out := &in.StateMachineName, &out.StateMachineName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleStepFunctionsParameters. +func (in *TopicRuleStepFunctionsParameters) DeepCopy() *TopicRuleStepFunctionsParameters { + if in == nil { + return nil + } + out := new(TopicRuleStepFunctionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleTimestreamInitParameters) DeepCopyInto(out *TopicRuleTimestreamInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]TimestreamDimensionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.Timestamp != nil { + in, out := &in.Timestamp, &out.Timestamp + *out = new(TimestreamTimestampInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleTimestreamInitParameters. +func (in *TopicRuleTimestreamInitParameters) DeepCopy() *TopicRuleTimestreamInitParameters { + if in == nil { + return nil + } + out := new(TopicRuleTimestreamInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleTimestreamObservation) DeepCopyInto(out *TopicRuleTimestreamObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]TimestreamDimensionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.Timestamp != nil { + in, out := &in.Timestamp, &out.Timestamp + *out = new(TimestreamTimestampObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleTimestreamObservation. +func (in *TopicRuleTimestreamObservation) DeepCopy() *TopicRuleTimestreamObservation { + if in == nil { + return nil + } + out := new(TopicRuleTimestreamObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicRuleTimestreamParameters) DeepCopyInto(out *TopicRuleTimestreamParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]TimestreamDimensionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.Timestamp != nil { + in, out := &in.Timestamp, &out.Timestamp + *out = new(TimestreamTimestampParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicRuleTimestreamParameters. +func (in *TopicRuleTimestreamParameters) DeepCopy() *TopicRuleTimestreamParameters { + if in == nil { + return nil + } + out := new(TopicRuleTimestreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigurationInitParameters) DeepCopyInto(out *VPCConfigurationInitParameters) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupRefs != nil { + in, out := &in.SecurityGroupRefs, &out.SecurityGroupRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupSelector != nil { + in, out := &in.SecurityGroupSelector, &out.SecurityGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigurationInitParameters. +func (in *VPCConfigurationInitParameters) DeepCopy() *VPCConfigurationInitParameters { + if in == nil { + return nil + } + out := new(VPCConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigurationObservation) DeepCopyInto(out *VPCConfigurationObservation) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigurationObservation. +func (in *VPCConfigurationObservation) DeepCopy() *VPCConfigurationObservation { + if in == nil { + return nil + } + out := new(VPCConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigurationParameters) DeepCopyInto(out *VPCConfigurationParameters) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupRefs != nil { + in, out := &in.SecurityGroupRefs, &out.SecurityGroupRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupSelector != nil { + in, out := &in.SecurityGroupSelector, &out.SecurityGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigurationParameters. +func (in *VPCConfigurationParameters) DeepCopy() *VPCConfigurationParameters { + if in == nil { + return nil + } + out := new(VPCConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/iot/v1beta2/zz_generated.managed.go b/apis/iot/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..8b67c41b45 --- /dev/null +++ b/apis/iot/v1beta2/zz_generated.managed.go @@ -0,0 +1,368 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this IndexingConfiguration. +func (mg *IndexingConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this IndexingConfiguration. +func (mg *IndexingConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this IndexingConfiguration. +func (mg *IndexingConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this IndexingConfiguration. +func (mg *IndexingConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this IndexingConfiguration. +func (mg *IndexingConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this IndexingConfiguration. +func (mg *IndexingConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this IndexingConfiguration. +func (mg *IndexingConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this IndexingConfiguration. +func (mg *IndexingConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this IndexingConfiguration. +func (mg *IndexingConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this IndexingConfiguration. +func (mg *IndexingConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this IndexingConfiguration. +func (mg *IndexingConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this IndexingConfiguration. +func (mg *IndexingConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ProvisioningTemplate. +func (mg *ProvisioningTemplate) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProvisioningTemplate. +func (mg *ProvisioningTemplate) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProvisioningTemplate. +func (mg *ProvisioningTemplate) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProvisioningTemplate. +func (mg *ProvisioningTemplate) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProvisioningTemplate. +func (mg *ProvisioningTemplate) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProvisioningTemplate. +func (mg *ProvisioningTemplate) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProvisioningTemplate. +func (mg *ProvisioningTemplate) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProvisioningTemplate. +func (mg *ProvisioningTemplate) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProvisioningTemplate. +func (mg *ProvisioningTemplate) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProvisioningTemplate. +func (mg *ProvisioningTemplate) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProvisioningTemplate. +func (mg *ProvisioningTemplate) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProvisioningTemplate. +func (mg *ProvisioningTemplate) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ThingGroup. +func (mg *ThingGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ThingGroup. +func (mg *ThingGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ThingGroup. +func (mg *ThingGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ThingGroup. +func (mg *ThingGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ThingGroup. +func (mg *ThingGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ThingGroup. +func (mg *ThingGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ThingGroup. +func (mg *ThingGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ThingGroup. +func (mg *ThingGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ThingGroup. +func (mg *ThingGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ThingGroup. +func (mg *ThingGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ThingGroup. +func (mg *ThingGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ThingGroup. +func (mg *ThingGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ThingType. +func (mg *ThingType) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ThingType. +func (mg *ThingType) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ThingType. +func (mg *ThingType) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ThingType. +func (mg *ThingType) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ThingType. +func (mg *ThingType) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ThingType. +func (mg *ThingType) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ThingType. +func (mg *ThingType) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ThingType. +func (mg *ThingType) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ThingType. +func (mg *ThingType) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ThingType. +func (mg *ThingType) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ThingType. +func (mg *ThingType) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ThingType. +func (mg *ThingType) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this TopicRule. +func (mg *TopicRule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this TopicRule. +func (mg *TopicRule) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this TopicRule. +func (mg *TopicRule) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this TopicRule. +func (mg *TopicRule) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this TopicRule. +func (mg *TopicRule) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this TopicRule. +func (mg *TopicRule) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this TopicRule. +func (mg *TopicRule) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this TopicRule. +func (mg *TopicRule) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this TopicRule. +func (mg *TopicRule) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this TopicRule. +func (mg *TopicRule) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this TopicRule. +func (mg *TopicRule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this TopicRule. +func (mg *TopicRule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this TopicRuleDestination. +func (mg *TopicRuleDestination) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this TopicRuleDestination. +func (mg *TopicRuleDestination) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this TopicRuleDestination. +func (mg *TopicRuleDestination) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this TopicRuleDestination. +func (mg *TopicRuleDestination) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this TopicRuleDestination. +func (mg *TopicRuleDestination) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this TopicRuleDestination. +func (mg *TopicRuleDestination) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this TopicRuleDestination. +func (mg *TopicRuleDestination) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this TopicRuleDestination. +func (mg *TopicRuleDestination) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this TopicRuleDestination. +func (mg *TopicRuleDestination) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this TopicRuleDestination. +func (mg *TopicRuleDestination) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this TopicRuleDestination. +func (mg *TopicRuleDestination) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this TopicRuleDestination. +func (mg *TopicRuleDestination) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/iot/v1beta2/zz_generated.managedlist.go b/apis/iot/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..a1857ce604 --- /dev/null +++ b/apis/iot/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,62 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this IndexingConfigurationList. +func (l *IndexingConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProvisioningTemplateList. +func (l *ProvisioningTemplateList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ThingGroupList. +func (l *ThingGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ThingTypeList. +func (l *ThingTypeList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TopicRuleDestinationList. +func (l *TopicRuleDestinationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TopicRuleList. +func (l *TopicRuleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/iot/v1beta2/zz_generated.resolvers.go b/apis/iot/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..47dae5bb53 --- /dev/null +++ b/apis/iot/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,490 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *ProvisioningTemplate) ResolveReferences( // ResolveReferences of this ProvisioningTemplate. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ProvisioningRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ProvisioningRoleArnRef, + Selector: mg.Spec.ForProvider.ProvisioningRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ProvisioningRoleArn") + } + mg.Spec.ForProvider.ProvisioningRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ProvisioningRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ProvisioningRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ProvisioningRoleArnRef, + Selector: mg.Spec.InitProvider.ProvisioningRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ProvisioningRoleArn") + } + mg.Spec.InitProvider.ProvisioningRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ProvisioningRoleArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ThingGroup. +func (mg *ThingGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iot.aws.upbound.io", "v1beta2", "ThingGroup", "ThingGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ParentGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ParentGroupNameRef, + Selector: mg.Spec.ForProvider.ParentGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ParentGroupName") + } + mg.Spec.ForProvider.ParentGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ParentGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iot.aws.upbound.io", "v1beta2", "ThingGroup", "ThingGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ParentGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ParentGroupNameRef, + Selector: mg.Spec.InitProvider.ParentGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ParentGroupName") + } + mg.Spec.InitProvider.ParentGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ParentGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this TopicRule. +func (mg *TopicRule) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.ErrorAction != nil { + if mg.Spec.ForProvider.ErrorAction.Sns != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ErrorAction.Sns.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.ErrorAction.Sns.RoleArnRef, + Selector: mg.Spec.ForProvider.ErrorAction.Sns.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ErrorAction.Sns.RoleArn") + } + mg.Spec.ForProvider.ErrorAction.Sns.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ErrorAction.Sns.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.ErrorAction != nil { + if mg.Spec.ForProvider.ErrorAction.Sns != nil { + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ErrorAction.Sns.TargetArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.ErrorAction.Sns.TargetArnRef, + Selector: mg.Spec.ForProvider.ErrorAction.Sns.TargetArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ErrorAction.Sns.TargetArn") + } + mg.Spec.ForProvider.ErrorAction.Sns.TargetArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ErrorAction.Sns.TargetArnRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Sns); i3++ { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Sns[i3].RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Sns[i3].RoleArnRef, + Selector: mg.Spec.ForProvider.Sns[i3].RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Sns[i3].RoleArn") + } + mg.Spec.ForProvider.Sns[i3].RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Sns[i3].RoleArnRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Sns); i3++ { + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Sns[i3].TargetArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Sns[i3].TargetArnRef, + Selector: mg.Spec.ForProvider.Sns[i3].TargetArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Sns[i3].TargetArn") + } + mg.Spec.ForProvider.Sns[i3].TargetArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Sns[i3].TargetArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.ErrorAction != nil { + if mg.Spec.InitProvider.ErrorAction.Sns != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ErrorAction.Sns.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.ErrorAction.Sns.RoleArnRef, + Selector: mg.Spec.InitProvider.ErrorAction.Sns.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ErrorAction.Sns.RoleArn") + } + mg.Spec.InitProvider.ErrorAction.Sns.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ErrorAction.Sns.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.ErrorAction != nil { + if mg.Spec.InitProvider.ErrorAction.Sns != nil { + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ErrorAction.Sns.TargetArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.ErrorAction.Sns.TargetArnRef, + Selector: mg.Spec.InitProvider.ErrorAction.Sns.TargetArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ErrorAction.Sns.TargetArn") + } + mg.Spec.InitProvider.ErrorAction.Sns.TargetArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ErrorAction.Sns.TargetArnRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Sns); i3++ { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Sns[i3].RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Sns[i3].RoleArnRef, + Selector: mg.Spec.InitProvider.Sns[i3].RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Sns[i3].RoleArn") + } + mg.Spec.InitProvider.Sns[i3].RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Sns[i3].RoleArnRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Sns); i3++ { + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Sns[i3].TargetArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Sns[i3].TargetArnRef, + Selector: mg.Spec.InitProvider.Sns[i3].TargetArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Sns[i3].TargetArn") + } + mg.Spec.InitProvider.Sns[i3].TargetArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Sns[i3].TargetArnRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this TopicRuleDestination. +func (mg *TopicRuleDestination) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + if mg.Spec.ForProvider.VPCConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.VPCConfiguration.RoleArnRef, + Selector: mg.Spec.ForProvider.VPCConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCConfiguration.RoleArn") + } + mg.Spec.ForProvider.VPCConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPCConfiguration.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.VPCConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCConfiguration.SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCConfiguration.SecurityGroupRefs, + Selector: mg.Spec.ForProvider.VPCConfiguration.SecurityGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCConfiguration.SecurityGroups") + } + mg.Spec.ForProvider.VPCConfiguration.SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCConfiguration.SecurityGroupRefs = mrsp.ResolvedReferences + + } + if mg.Spec.ForProvider.VPCConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCConfiguration.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCConfiguration.SubnetIDRefs, + Selector: mg.Spec.ForProvider.VPCConfiguration.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCConfiguration.SubnetIds") + } + mg.Spec.ForProvider.VPCConfiguration.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCConfiguration.SubnetIDRefs = mrsp.ResolvedReferences + + } + if mg.Spec.ForProvider.VPCConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCConfiguration.VPCID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VPCConfiguration.VPCIDRef, + Selector: mg.Spec.ForProvider.VPCConfiguration.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCConfiguration.VPCID") + } + mg.Spec.ForProvider.VPCConfiguration.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPCConfiguration.VPCIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.VPCConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCConfiguration.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.VPCConfiguration.RoleArnRef, + Selector: mg.Spec.InitProvider.VPCConfiguration.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCConfiguration.RoleArn") + } + mg.Spec.InitProvider.VPCConfiguration.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPCConfiguration.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.VPCConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCConfiguration.SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCConfiguration.SecurityGroupRefs, + Selector: mg.Spec.InitProvider.VPCConfiguration.SecurityGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCConfiguration.SecurityGroups") + } + mg.Spec.InitProvider.VPCConfiguration.SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCConfiguration.SecurityGroupRefs = mrsp.ResolvedReferences + + } + if mg.Spec.InitProvider.VPCConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCConfiguration.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCConfiguration.SubnetIDRefs, + Selector: mg.Spec.InitProvider.VPCConfiguration.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCConfiguration.SubnetIds") + } + mg.Spec.InitProvider.VPCConfiguration.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCConfiguration.SubnetIDRefs = mrsp.ResolvedReferences + + } + if mg.Spec.InitProvider.VPCConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCConfiguration.VPCID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VPCConfiguration.VPCIDRef, + Selector: mg.Spec.InitProvider.VPCConfiguration.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCConfiguration.VPCID") + } + mg.Spec.InitProvider.VPCConfiguration.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPCConfiguration.VPCIDRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/iot/v1beta2/zz_groupversion_info.go b/apis/iot/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..a604683915 --- /dev/null +++ b/apis/iot/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=iot.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "iot.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/iot/v1beta2/zz_indexingconfiguration_terraformed.go b/apis/iot/v1beta2/zz_indexingconfiguration_terraformed.go new file mode 100755 index 0000000000..72601d6245 --- /dev/null +++ b/apis/iot/v1beta2/zz_indexingconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this IndexingConfiguration +func (mg *IndexingConfiguration) GetTerraformResourceType() string { + return "aws_iot_indexing_configuration" +} + +// GetConnectionDetailsMapping for this IndexingConfiguration +func (tr *IndexingConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this IndexingConfiguration +func (tr *IndexingConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this IndexingConfiguration +func (tr *IndexingConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this IndexingConfiguration +func (tr *IndexingConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this IndexingConfiguration +func (tr *IndexingConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this IndexingConfiguration +func (tr *IndexingConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this IndexingConfiguration +func (tr *IndexingConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this IndexingConfiguration +func (tr *IndexingConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this IndexingConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *IndexingConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &IndexingConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *IndexingConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/iot/v1beta2/zz_indexingconfiguration_types.go b/apis/iot/v1beta2/zz_indexingconfiguration_types.go new file mode 100755 index 0000000000..7eff95e08c --- /dev/null +++ b/apis/iot/v1beta2/zz_indexingconfiguration_types.go @@ -0,0 +1,365 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomFieldInitParameters struct { + + // The name of the field. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The data type of the field. Valid values: Number, String, Boolean. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type CustomFieldObservation struct { + + // The name of the field. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The data type of the field. Valid values: Number, String, Boolean. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type CustomFieldParameters struct { + + // The name of the field. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The data type of the field. Valid values: Number, String, Boolean. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FilterInitParameters struct { + + // List of shadow names that you select to index. + // +listType=set + NamedShadowNames []*string `json:"namedShadowNames,omitempty" tf:"named_shadow_names,omitempty"` +} + +type FilterObservation struct { + + // List of shadow names that you select to index. + // +listType=set + NamedShadowNames []*string `json:"namedShadowNames,omitempty" tf:"named_shadow_names,omitempty"` +} + +type FilterParameters struct { + + // List of shadow names that you select to index. + // +kubebuilder:validation:Optional + // +listType=set + NamedShadowNames []*string `json:"namedShadowNames,omitempty" tf:"named_shadow_names,omitempty"` +} + +type IndexingConfigurationInitParameters struct { + + // Thing group indexing configuration. See below. + ThingGroupIndexingConfiguration *ThingGroupIndexingConfigurationInitParameters `json:"thingGroupIndexingConfiguration,omitempty" tf:"thing_group_indexing_configuration,omitempty"` + + // Thing indexing configuration. See below. + ThingIndexingConfiguration *ThingIndexingConfigurationInitParameters `json:"thingIndexingConfiguration,omitempty" tf:"thing_indexing_configuration,omitempty"` +} + +type IndexingConfigurationObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Thing group indexing configuration. See below. + ThingGroupIndexingConfiguration *ThingGroupIndexingConfigurationObservation `json:"thingGroupIndexingConfiguration,omitempty" tf:"thing_group_indexing_configuration,omitempty"` + + // Thing indexing configuration. See below. + ThingIndexingConfiguration *ThingIndexingConfigurationObservation `json:"thingIndexingConfiguration,omitempty" tf:"thing_indexing_configuration,omitempty"` +} + +type IndexingConfigurationParameters struct { + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Thing group indexing configuration. See below. + // +kubebuilder:validation:Optional + ThingGroupIndexingConfiguration *ThingGroupIndexingConfigurationParameters `json:"thingGroupIndexingConfiguration,omitempty" tf:"thing_group_indexing_configuration,omitempty"` + + // Thing indexing configuration. See below. + // +kubebuilder:validation:Optional + ThingIndexingConfiguration *ThingIndexingConfigurationParameters `json:"thingIndexingConfiguration,omitempty" tf:"thing_indexing_configuration,omitempty"` +} + +type ManagedFieldInitParameters struct { + + // The name of the field. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The data type of the field. Valid values: Number, String, Boolean. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ManagedFieldObservation struct { + + // The name of the field. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The data type of the field. Valid values: Number, String, Boolean. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ManagedFieldParameters struct { + + // The name of the field. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The data type of the field. Valid values: Number, String, Boolean. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ThingGroupIndexingConfigurationInitParameters struct { + + // A list of thing group fields to index. This list cannot contain any managed fields. See below. + CustomField []CustomFieldInitParameters `json:"customField,omitempty" tf:"custom_field,omitempty"` + + // Contains fields that are indexed and whose types are already known by the Fleet Indexing service. See below. + ManagedField []ManagedFieldInitParameters `json:"managedField,omitempty" tf:"managed_field,omitempty"` + + // Thing group indexing mode. Valid values: OFF, ON. + ThingGroupIndexingMode *string `json:"thingGroupIndexingMode,omitempty" tf:"thing_group_indexing_mode,omitempty"` +} + +type ThingGroupIndexingConfigurationObservation struct { + + // A list of thing group fields to index. This list cannot contain any managed fields. See below. + CustomField []CustomFieldObservation `json:"customField,omitempty" tf:"custom_field,omitempty"` + + // Contains fields that are indexed and whose types are already known by the Fleet Indexing service. See below. + ManagedField []ManagedFieldObservation `json:"managedField,omitempty" tf:"managed_field,omitempty"` + + // Thing group indexing mode. Valid values: OFF, ON. + ThingGroupIndexingMode *string `json:"thingGroupIndexingMode,omitempty" tf:"thing_group_indexing_mode,omitempty"` +} + +type ThingGroupIndexingConfigurationParameters struct { + + // A list of thing group fields to index. This list cannot contain any managed fields. See below. + // +kubebuilder:validation:Optional + CustomField []CustomFieldParameters `json:"customField,omitempty" tf:"custom_field,omitempty"` + + // Contains fields that are indexed and whose types are already known by the Fleet Indexing service. See below. + // +kubebuilder:validation:Optional + ManagedField []ManagedFieldParameters `json:"managedField,omitempty" tf:"managed_field,omitempty"` + + // Thing group indexing mode. Valid values: OFF, ON. + // +kubebuilder:validation:Optional + ThingGroupIndexingMode *string `json:"thingGroupIndexingMode" tf:"thing_group_indexing_mode,omitempty"` +} + +type ThingIndexingConfigurationCustomFieldInitParameters struct { + + // The name of the field. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The data type of the field. Valid values: Number, String, Boolean. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ThingIndexingConfigurationCustomFieldObservation struct { + + // The name of the field. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The data type of the field. Valid values: Number, String, Boolean. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ThingIndexingConfigurationCustomFieldParameters struct { + + // The name of the field. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The data type of the field. Valid values: Number, String, Boolean. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ThingIndexingConfigurationInitParameters struct { + + // Contains custom field names and their data type. See below. + CustomField []ThingIndexingConfigurationCustomFieldInitParameters `json:"customField,omitempty" tf:"custom_field,omitempty"` + + // Device Defender indexing mode. Valid values: VIOLATIONS, OFF. Default: OFF. + DeviceDefenderIndexingMode *string `json:"deviceDefenderIndexingMode,omitempty" tf:"device_defender_indexing_mode,omitempty"` + + // Required if named_shadow_indexing_mode is ON. Enables to add named shadows filtered by filter to fleet indexing configuration. + Filter *FilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Contains fields that are indexed and whose types are already known by the Fleet Indexing service. See below. + ManagedField []ThingIndexingConfigurationManagedFieldInitParameters `json:"managedField,omitempty" tf:"managed_field,omitempty"` + + // Named shadow indexing mode. Valid values: ON, OFF. Default: OFF. + NamedShadowIndexingMode *string `json:"namedShadowIndexingMode,omitempty" tf:"named_shadow_indexing_mode,omitempty"` + + // Thing connectivity indexing mode. Valid values: STATUS, OFF. Default: OFF. + ThingConnectivityIndexingMode *string `json:"thingConnectivityIndexingMode,omitempty" tf:"thing_connectivity_indexing_mode,omitempty"` + + // Thing indexing mode. Valid values: REGISTRY, REGISTRY_AND_SHADOW, OFF. + ThingIndexingMode *string `json:"thingIndexingMode,omitempty" tf:"thing_indexing_mode,omitempty"` +} + +type ThingIndexingConfigurationManagedFieldInitParameters struct { + + // The name of the field. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The data type of the field. Valid values: Number, String, Boolean. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ThingIndexingConfigurationManagedFieldObservation struct { + + // The name of the field. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The data type of the field. Valid values: Number, String, Boolean. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ThingIndexingConfigurationManagedFieldParameters struct { + + // The name of the field. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The data type of the field. Valid values: Number, String, Boolean. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ThingIndexingConfigurationObservation struct { + + // Contains custom field names and their data type. See below. + CustomField []ThingIndexingConfigurationCustomFieldObservation `json:"customField,omitempty" tf:"custom_field,omitempty"` + + // Device Defender indexing mode. Valid values: VIOLATIONS, OFF. Default: OFF. + DeviceDefenderIndexingMode *string `json:"deviceDefenderIndexingMode,omitempty" tf:"device_defender_indexing_mode,omitempty"` + + // Required if named_shadow_indexing_mode is ON. Enables to add named shadows filtered by filter to fleet indexing configuration. + Filter *FilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + // Contains fields that are indexed and whose types are already known by the Fleet Indexing service. See below. + ManagedField []ThingIndexingConfigurationManagedFieldObservation `json:"managedField,omitempty" tf:"managed_field,omitempty"` + + // Named shadow indexing mode. Valid values: ON, OFF. Default: OFF. + NamedShadowIndexingMode *string `json:"namedShadowIndexingMode,omitempty" tf:"named_shadow_indexing_mode,omitempty"` + + // Thing connectivity indexing mode. Valid values: STATUS, OFF. Default: OFF. + ThingConnectivityIndexingMode *string `json:"thingConnectivityIndexingMode,omitempty" tf:"thing_connectivity_indexing_mode,omitempty"` + + // Thing indexing mode. Valid values: REGISTRY, REGISTRY_AND_SHADOW, OFF. + ThingIndexingMode *string `json:"thingIndexingMode,omitempty" tf:"thing_indexing_mode,omitempty"` +} + +type ThingIndexingConfigurationParameters struct { + + // Contains custom field names and their data type. See below. + // +kubebuilder:validation:Optional + CustomField []ThingIndexingConfigurationCustomFieldParameters `json:"customField,omitempty" tf:"custom_field,omitempty"` + + // Device Defender indexing mode. Valid values: VIOLATIONS, OFF. Default: OFF. + // +kubebuilder:validation:Optional + DeviceDefenderIndexingMode *string `json:"deviceDefenderIndexingMode,omitempty" tf:"device_defender_indexing_mode,omitempty"` + + // Required if named_shadow_indexing_mode is ON. Enables to add named shadows filtered by filter to fleet indexing configuration. + // +kubebuilder:validation:Optional + Filter *FilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Contains fields that are indexed and whose types are already known by the Fleet Indexing service. See below. + // +kubebuilder:validation:Optional + ManagedField []ThingIndexingConfigurationManagedFieldParameters `json:"managedField,omitempty" tf:"managed_field,omitempty"` + + // Named shadow indexing mode. Valid values: ON, OFF. Default: OFF. + // +kubebuilder:validation:Optional + NamedShadowIndexingMode *string `json:"namedShadowIndexingMode,omitempty" tf:"named_shadow_indexing_mode,omitempty"` + + // Thing connectivity indexing mode. Valid values: STATUS, OFF. Default: OFF. + // +kubebuilder:validation:Optional + ThingConnectivityIndexingMode *string `json:"thingConnectivityIndexingMode,omitempty" tf:"thing_connectivity_indexing_mode,omitempty"` + + // Thing indexing mode. Valid values: REGISTRY, REGISTRY_AND_SHADOW, OFF. + // +kubebuilder:validation:Optional + ThingIndexingMode *string `json:"thingIndexingMode" tf:"thing_indexing_mode,omitempty"` +} + +// IndexingConfigurationSpec defines the desired state of IndexingConfiguration +type IndexingConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IndexingConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IndexingConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// IndexingConfigurationStatus defines the observed state of IndexingConfiguration. +type IndexingConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IndexingConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// IndexingConfiguration is the Schema for the IndexingConfigurations API. Managing IoT Thing indexing. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type IndexingConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec IndexingConfigurationSpec `json:"spec"` + Status IndexingConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IndexingConfigurationList contains a list of IndexingConfigurations +type IndexingConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IndexingConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + IndexingConfiguration_Kind = "IndexingConfiguration" + IndexingConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: IndexingConfiguration_Kind}.String() + IndexingConfiguration_KindAPIVersion = IndexingConfiguration_Kind + "." + CRDGroupVersion.String() + IndexingConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(IndexingConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&IndexingConfiguration{}, &IndexingConfigurationList{}) +} diff --git a/apis/iot/v1beta2/zz_provisioningtemplate_terraformed.go b/apis/iot/v1beta2/zz_provisioningtemplate_terraformed.go new file mode 100755 index 0000000000..70228c838d --- /dev/null +++ b/apis/iot/v1beta2/zz_provisioningtemplate_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ProvisioningTemplate +func (mg *ProvisioningTemplate) GetTerraformResourceType() string { + return "aws_iot_provisioning_template" +} + +// GetConnectionDetailsMapping for this ProvisioningTemplate +func (tr *ProvisioningTemplate) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ProvisioningTemplate +func (tr *ProvisioningTemplate) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProvisioningTemplate +func (tr *ProvisioningTemplate) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProvisioningTemplate +func (tr *ProvisioningTemplate) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProvisioningTemplate +func (tr *ProvisioningTemplate) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProvisioningTemplate +func (tr *ProvisioningTemplate) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProvisioningTemplate +func (tr *ProvisioningTemplate) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ProvisioningTemplate +func (tr *ProvisioningTemplate) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ProvisioningTemplate using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProvisioningTemplate) LateInitialize(attrs []byte) (bool, error) { + params := &ProvisioningTemplateParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProvisioningTemplate) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/iot/v1beta2/zz_provisioningtemplate_types.go b/apis/iot/v1beta2/zz_provisioningtemplate_types.go new file mode 100755 index 0000000000..e02326610e --- /dev/null +++ b/apis/iot/v1beta2/zz_provisioningtemplate_types.go @@ -0,0 +1,223 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type PreProvisioningHookInitParameters struct { + + // The version of the payload that was sent to the target function. The only valid (and the default) payload version is "2020-04-01". + PayloadVersion *string `json:"payloadVersion,omitempty" tf:"payload_version,omitempty"` + + // The ARN of the target function. + TargetArn *string `json:"targetArn,omitempty" tf:"target_arn,omitempty"` +} + +type PreProvisioningHookObservation struct { + + // The version of the payload that was sent to the target function. The only valid (and the default) payload version is "2020-04-01". + PayloadVersion *string `json:"payloadVersion,omitempty" tf:"payload_version,omitempty"` + + // The ARN of the target function. + TargetArn *string `json:"targetArn,omitempty" tf:"target_arn,omitempty"` +} + +type PreProvisioningHookParameters struct { + + // The version of the payload that was sent to the target function. The only valid (and the default) payload version is "2020-04-01". + // +kubebuilder:validation:Optional + PayloadVersion *string `json:"payloadVersion,omitempty" tf:"payload_version,omitempty"` + + // The ARN of the target function. + // +kubebuilder:validation:Optional + TargetArn *string `json:"targetArn" tf:"target_arn,omitempty"` +} + +type ProvisioningTemplateInitParameters struct { + + // The description of the fleet provisioning template. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // True to enable the fleet provisioning template, otherwise false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Creates a pre-provisioning hook template. Details below. + PreProvisioningHook *PreProvisioningHookInitParameters `json:"preProvisioningHook,omitempty" tf:"pre_provisioning_hook,omitempty"` + + // The role ARN for the role associated with the fleet provisioning template. This IoT role grants permission to provision a device. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + ProvisioningRoleArn *string `json:"provisioningRoleArn,omitempty" tf:"provisioning_role_arn,omitempty"` + + // Reference to a Role in iam to populate provisioningRoleArn. + // +kubebuilder:validation:Optional + ProvisioningRoleArnRef *v1.Reference `json:"provisioningRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate provisioningRoleArn. + // +kubebuilder:validation:Optional + ProvisioningRoleArnSelector *v1.Selector `json:"provisioningRoleArnSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The JSON formatted contents of the fleet provisioning template. + TemplateBody *string `json:"templateBody,omitempty" tf:"template_body,omitempty"` + + // The type you define in a provisioning template. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ProvisioningTemplateObservation struct { + + // The ARN that identifies the provisioning template. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The default version of the fleet provisioning template. + DefaultVersionID *float64 `json:"defaultVersionId,omitempty" tf:"default_version_id,omitempty"` + + // The description of the fleet provisioning template. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // True to enable the fleet provisioning template, otherwise false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Creates a pre-provisioning hook template. Details below. + PreProvisioningHook *PreProvisioningHookObservation `json:"preProvisioningHook,omitempty" tf:"pre_provisioning_hook,omitempty"` + + // The role ARN for the role associated with the fleet provisioning template. This IoT role grants permission to provision a device. + ProvisioningRoleArn *string `json:"provisioningRoleArn,omitempty" tf:"provisioning_role_arn,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The JSON formatted contents of the fleet provisioning template. + TemplateBody *string `json:"templateBody,omitempty" tf:"template_body,omitempty"` + + // The type you define in a provisioning template. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ProvisioningTemplateParameters struct { + + // The description of the fleet provisioning template. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // True to enable the fleet provisioning template, otherwise false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Creates a pre-provisioning hook template. Details below. + // +kubebuilder:validation:Optional + PreProvisioningHook *PreProvisioningHookParameters `json:"preProvisioningHook,omitempty" tf:"pre_provisioning_hook,omitempty"` + + // The role ARN for the role associated with the fleet provisioning template. This IoT role grants permission to provision a device. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + ProvisioningRoleArn *string `json:"provisioningRoleArn,omitempty" tf:"provisioning_role_arn,omitempty"` + + // Reference to a Role in iam to populate provisioningRoleArn. + // +kubebuilder:validation:Optional + ProvisioningRoleArnRef *v1.Reference `json:"provisioningRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate provisioningRoleArn. + // +kubebuilder:validation:Optional + ProvisioningRoleArnSelector *v1.Selector `json:"provisioningRoleArnSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The JSON formatted contents of the fleet provisioning template. + // +kubebuilder:validation:Optional + TemplateBody *string `json:"templateBody,omitempty" tf:"template_body,omitempty"` + + // The type you define in a provisioning template. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +// ProvisioningTemplateSpec defines the desired state of ProvisioningTemplate +type ProvisioningTemplateSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProvisioningTemplateParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProvisioningTemplateInitParameters `json:"initProvider,omitempty"` +} + +// ProvisioningTemplateStatus defines the observed state of ProvisioningTemplate. +type ProvisioningTemplateStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProvisioningTemplateObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ProvisioningTemplate is the Schema for the ProvisioningTemplates API. Manages an IoT fleet provisioning template. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ProvisioningTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.templateBody) || (has(self.initProvider) && has(self.initProvider.templateBody))",message="spec.forProvider.templateBody is a required parameter" + Spec ProvisioningTemplateSpec `json:"spec"` + Status ProvisioningTemplateStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProvisioningTemplateList contains a list of ProvisioningTemplates +type ProvisioningTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProvisioningTemplate `json:"items"` +} + +// Repository type metadata. +var ( + ProvisioningTemplate_Kind = "ProvisioningTemplate" + ProvisioningTemplate_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProvisioningTemplate_Kind}.String() + ProvisioningTemplate_KindAPIVersion = ProvisioningTemplate_Kind + "." + CRDGroupVersion.String() + ProvisioningTemplate_GroupVersionKind = CRDGroupVersion.WithKind(ProvisioningTemplate_Kind) +) + +func init() { + SchemeBuilder.Register(&ProvisioningTemplate{}, &ProvisioningTemplateList{}) +} diff --git a/apis/iot/v1beta2/zz_thinggroup_terraformed.go b/apis/iot/v1beta2/zz_thinggroup_terraformed.go new file mode 100755 index 0000000000..14f8e0fea4 --- /dev/null +++ b/apis/iot/v1beta2/zz_thinggroup_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ThingGroup +func (mg *ThingGroup) GetTerraformResourceType() string { + return "aws_iot_thing_group" +} + +// GetConnectionDetailsMapping for this ThingGroup +func (tr *ThingGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ThingGroup +func (tr *ThingGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ThingGroup +func (tr *ThingGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ThingGroup +func (tr *ThingGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ThingGroup +func (tr *ThingGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ThingGroup +func (tr *ThingGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ThingGroup +func (tr *ThingGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ThingGroup +func (tr *ThingGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ThingGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ThingGroup) LateInitialize(attrs []byte) (bool, error) { + params := &ThingGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ThingGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/iot/v1beta2/zz_thinggroup_types.go b/apis/iot/v1beta2/zz_thinggroup_types.go new file mode 100755 index 0000000000..b608e69b3a --- /dev/null +++ b/apis/iot/v1beta2/zz_thinggroup_types.go @@ -0,0 +1,234 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AttributePayloadInitParameters struct { + + // Key-value map. + // +mapType=granular + Attributes map[string]*string `json:"attributes,omitempty" tf:"attributes,omitempty"` +} + +type AttributePayloadObservation struct { + + // Key-value map. + // +mapType=granular + Attributes map[string]*string `json:"attributes,omitempty" tf:"attributes,omitempty"` +} + +type AttributePayloadParameters struct { + + // Key-value map. + // +kubebuilder:validation:Optional + // +mapType=granular + Attributes map[string]*string `json:"attributes,omitempty" tf:"attributes,omitempty"` +} + +type MetadataInitParameters struct { +} + +type MetadataObservation struct { + CreationDate *string `json:"creationDate,omitempty" tf:"creation_date,omitempty"` + + // The name of the parent Thing Group. + ParentGroupName *string `json:"parentGroupName,omitempty" tf:"parent_group_name,omitempty"` + + RootToParentGroups []RootToParentGroupsObservation `json:"rootToParentGroups,omitempty" tf:"root_to_parent_groups,omitempty"` +} + +type MetadataParameters struct { +} + +type PropertiesInitParameters struct { + + // The Thing Group attributes. Defined below. + AttributePayload *AttributePayloadInitParameters `json:"attributePayload,omitempty" tf:"attribute_payload,omitempty"` + + // A description of the Thing Group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` +} + +type PropertiesObservation struct { + + // The Thing Group attributes. Defined below. + AttributePayload *AttributePayloadObservation `json:"attributePayload,omitempty" tf:"attribute_payload,omitempty"` + + // A description of the Thing Group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` +} + +type PropertiesParameters struct { + + // The Thing Group attributes. Defined below. + // +kubebuilder:validation:Optional + AttributePayload *AttributePayloadParameters `json:"attributePayload,omitempty" tf:"attribute_payload,omitempty"` + + // A description of the Thing Group. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` +} + +type RootToParentGroupsInitParameters struct { +} + +type RootToParentGroupsObservation struct { + + // The ARN of the Thing Group. + GroupArn *string `json:"groupArn,omitempty" tf:"group_arn,omitempty"` + + // The name of the Thing Group. + GroupName *string `json:"groupName,omitempty" tf:"group_name,omitempty"` +} + +type RootToParentGroupsParameters struct { +} + +type ThingGroupInitParameters struct { + + // The name of the parent Thing Group. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iot/v1beta2.ThingGroup + ParentGroupName *string `json:"parentGroupName,omitempty" tf:"parent_group_name,omitempty"` + + // Reference to a ThingGroup in iot to populate parentGroupName. + // +kubebuilder:validation:Optional + ParentGroupNameRef *v1.Reference `json:"parentGroupNameRef,omitempty" tf:"-"` + + // Selector for a ThingGroup in iot to populate parentGroupName. + // +kubebuilder:validation:Optional + ParentGroupNameSelector *v1.Selector `json:"parentGroupNameSelector,omitempty" tf:"-"` + + // The Thing Group properties. Defined below. + Properties *PropertiesInitParameters `json:"properties,omitempty" tf:"properties,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ThingGroupObservation struct { + + // The ARN of the Thing Group. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The Thing Group ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + Metadata []MetadataObservation `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // The name of the parent Thing Group. + ParentGroupName *string `json:"parentGroupName,omitempty" tf:"parent_group_name,omitempty"` + + // The Thing Group properties. Defined below. + Properties *PropertiesObservation `json:"properties,omitempty" tf:"properties,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The current version of the Thing Group record in the registry. + Version *float64 `json:"version,omitempty" tf:"version,omitempty"` +} + +type ThingGroupParameters struct { + + // The name of the parent Thing Group. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iot/v1beta2.ThingGroup + // +kubebuilder:validation:Optional + ParentGroupName *string `json:"parentGroupName,omitempty" tf:"parent_group_name,omitempty"` + + // Reference to a ThingGroup in iot to populate parentGroupName. + // +kubebuilder:validation:Optional + ParentGroupNameRef *v1.Reference `json:"parentGroupNameRef,omitempty" tf:"-"` + + // Selector for a ThingGroup in iot to populate parentGroupName. + // +kubebuilder:validation:Optional + ParentGroupNameSelector *v1.Selector `json:"parentGroupNameSelector,omitempty" tf:"-"` + + // The Thing Group properties. Defined below. + // +kubebuilder:validation:Optional + Properties *PropertiesParameters `json:"properties,omitempty" tf:"properties,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// ThingGroupSpec defines the desired state of ThingGroup +type ThingGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ThingGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ThingGroupInitParameters `json:"initProvider,omitempty"` +} + +// ThingGroupStatus defines the observed state of ThingGroup. +type ThingGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ThingGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ThingGroup is the Schema for the ThingGroups API. Manages an AWS IoT Thing Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ThingGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ThingGroupSpec `json:"spec"` + Status ThingGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ThingGroupList contains a list of ThingGroups +type ThingGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ThingGroup `json:"items"` +} + +// Repository type metadata. +var ( + ThingGroup_Kind = "ThingGroup" + ThingGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ThingGroup_Kind}.String() + ThingGroup_KindAPIVersion = ThingGroup_Kind + "." + CRDGroupVersion.String() + ThingGroup_GroupVersionKind = CRDGroupVersion.WithKind(ThingGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&ThingGroup{}, &ThingGroupList{}) +} diff --git a/apis/iot/v1beta2/zz_thingtype_terraformed.go b/apis/iot/v1beta2/zz_thingtype_terraformed.go new file mode 100755 index 0000000000..8b39069d9b --- /dev/null +++ b/apis/iot/v1beta2/zz_thingtype_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ThingType +func (mg *ThingType) GetTerraformResourceType() string { + return "aws_iot_thing_type" +} + +// GetConnectionDetailsMapping for this ThingType +func (tr *ThingType) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ThingType +func (tr *ThingType) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ThingType +func (tr *ThingType) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ThingType +func (tr *ThingType) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ThingType +func (tr *ThingType) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ThingType +func (tr *ThingType) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ThingType +func (tr *ThingType) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ThingType +func (tr *ThingType) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ThingType using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ThingType) LateInitialize(attrs []byte) (bool, error) { + params := &ThingTypeParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ThingType) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/iot/v1beta2/zz_thingtype_types.go b/apis/iot/v1beta2/zz_thingtype_types.go new file mode 100755 index 0000000000..ea822fd1e7 --- /dev/null +++ b/apis/iot/v1beta2/zz_thingtype_types.go @@ -0,0 +1,173 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ThingTypeInitParameters struct { + + // Whether the thing type is deprecated. If true, no new things could be associated with this type. + Deprecated *bool `json:"deprecated,omitempty" tf:"deprecated,omitempty"` + + // The name of the thing type. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // , Configuration block that can contain the following properties of the thing type: + Properties *ThingTypePropertiesInitParameters `json:"properties,omitempty" tf:"properties,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ThingTypeObservation struct { + + // The ARN of the created AWS IoT Thing Type. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Whether the thing type is deprecated. If true, no new things could be associated with this type. + Deprecated *bool `json:"deprecated,omitempty" tf:"deprecated,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the thing type. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // , Configuration block that can contain the following properties of the thing type: + Properties *ThingTypePropertiesObservation `json:"properties,omitempty" tf:"properties,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type ThingTypeParameters struct { + + // Whether the thing type is deprecated. If true, no new things could be associated with this type. + // +kubebuilder:validation:Optional + Deprecated *bool `json:"deprecated,omitempty" tf:"deprecated,omitempty"` + + // The name of the thing type. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // , Configuration block that can contain the following properties of the thing type: + // +kubebuilder:validation:Optional + Properties *ThingTypePropertiesParameters `json:"properties,omitempty" tf:"properties,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ThingTypePropertiesInitParameters struct { + + // The description of the thing type. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A list of searchable thing attribute names. + // +listType=set + SearchableAttributes []*string `json:"searchableAttributes,omitempty" tf:"searchable_attributes,omitempty"` +} + +type ThingTypePropertiesObservation struct { + + // The description of the thing type. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A list of searchable thing attribute names. + // +listType=set + SearchableAttributes []*string `json:"searchableAttributes,omitempty" tf:"searchable_attributes,omitempty"` +} + +type ThingTypePropertiesParameters struct { + + // The description of the thing type. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A list of searchable thing attribute names. + // +kubebuilder:validation:Optional + // +listType=set + SearchableAttributes []*string `json:"searchableAttributes,omitempty" tf:"searchable_attributes,omitempty"` +} + +// ThingTypeSpec defines the desired state of ThingType +type ThingTypeSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ThingTypeParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ThingTypeInitParameters `json:"initProvider,omitempty"` +} + +// ThingTypeStatus defines the observed state of ThingType. +type ThingTypeStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ThingTypeObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ThingType is the Schema for the ThingTypes API. Creates and manages an AWS IoT Thing Type. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ThingType struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ThingTypeSpec `json:"spec"` + Status ThingTypeStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ThingTypeList contains a list of ThingTypes +type ThingTypeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ThingType `json:"items"` +} + +// Repository type metadata. +var ( + ThingType_Kind = "ThingType" + ThingType_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ThingType_Kind}.String() + ThingType_KindAPIVersion = ThingType_Kind + "." + CRDGroupVersion.String() + ThingType_GroupVersionKind = CRDGroupVersion.WithKind(ThingType_Kind) +) + +func init() { + SchemeBuilder.Register(&ThingType{}, &ThingTypeList{}) +} diff --git a/apis/iot/v1beta2/zz_topicrule_terraformed.go b/apis/iot/v1beta2/zz_topicrule_terraformed.go new file mode 100755 index 0000000000..c65cb22bae --- /dev/null +++ b/apis/iot/v1beta2/zz_topicrule_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this TopicRule +func (mg *TopicRule) GetTerraformResourceType() string { + return "aws_iot_topic_rule" +} + +// GetConnectionDetailsMapping for this TopicRule +func (tr *TopicRule) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this TopicRule +func (tr *TopicRule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this TopicRule +func (tr *TopicRule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this TopicRule +func (tr *TopicRule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this TopicRule +func (tr *TopicRule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this TopicRule +func (tr *TopicRule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this TopicRule +func (tr *TopicRule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this TopicRule +func (tr *TopicRule) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this TopicRule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *TopicRule) LateInitialize(attrs []byte) (bool, error) { + params := &TopicRuleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *TopicRule) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/iot/v1beta2/zz_topicrule_types.go b/apis/iot/v1beta2/zz_topicrule_types.go new file mode 100755 index 0000000000..476298695e --- /dev/null +++ b/apis/iot/v1beta2/zz_topicrule_types.go @@ -0,0 +1,2614 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CloudwatchAlarmInitParameters struct { + + // The CloudWatch alarm name. + AlarmName *string `json:"alarmName,omitempty" tf:"alarm_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The reason for the alarm change. + StateReason *string `json:"stateReason,omitempty" tf:"state_reason,omitempty"` + + // The value of the alarm state. Acceptable values are: OK, ALARM, INSUFFICIENT_DATA. + StateValue *string `json:"stateValue,omitempty" tf:"state_value,omitempty"` +} + +type CloudwatchAlarmObservation struct { + + // The CloudWatch alarm name. + AlarmName *string `json:"alarmName,omitempty" tf:"alarm_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The reason for the alarm change. + StateReason *string `json:"stateReason,omitempty" tf:"state_reason,omitempty"` + + // The value of the alarm state. Acceptable values are: OK, ALARM, INSUFFICIENT_DATA. + StateValue *string `json:"stateValue,omitempty" tf:"state_value,omitempty"` +} + +type CloudwatchAlarmParameters struct { + + // The CloudWatch alarm name. + // +kubebuilder:validation:Optional + AlarmName *string `json:"alarmName" tf:"alarm_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // The reason for the alarm change. + // +kubebuilder:validation:Optional + StateReason *string `json:"stateReason" tf:"state_reason,omitempty"` + + // The value of the alarm state. Acceptable values are: OK, ALARM, INSUFFICIENT_DATA. + // +kubebuilder:validation:Optional + StateValue *string `json:"stateValue" tf:"state_value,omitempty"` +} + +type CloudwatchLogsInitParameters struct { + + // The CloudWatch log group name. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type CloudwatchLogsObservation struct { + + // The CloudWatch log group name. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type CloudwatchLogsParameters struct { + + // The CloudWatch log group name. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName" tf:"log_group_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type CloudwatchMetricInitParameters struct { + + // The CloudWatch metric name. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // The CloudWatch metric namespace name. + MetricNamespace *string `json:"metricNamespace,omitempty" tf:"metric_namespace,omitempty"` + + // An optional Unix timestamp (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp). + MetricTimestamp *string `json:"metricTimestamp,omitempty" tf:"metric_timestamp,omitempty"` + + // The metric unit (supported units can be found here: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Unit) + MetricUnit *string `json:"metricUnit,omitempty" tf:"metric_unit,omitempty"` + + // The CloudWatch metric value. + MetricValue *string `json:"metricValue,omitempty" tf:"metric_value,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type CloudwatchMetricObservation struct { + + // The CloudWatch metric name. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // The CloudWatch metric namespace name. + MetricNamespace *string `json:"metricNamespace,omitempty" tf:"metric_namespace,omitempty"` + + // An optional Unix timestamp (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp). + MetricTimestamp *string `json:"metricTimestamp,omitempty" tf:"metric_timestamp,omitempty"` + + // The metric unit (supported units can be found here: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Unit) + MetricUnit *string `json:"metricUnit,omitempty" tf:"metric_unit,omitempty"` + + // The CloudWatch metric value. + MetricValue *string `json:"metricValue,omitempty" tf:"metric_value,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type CloudwatchMetricParameters struct { + + // The CloudWatch metric name. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName" tf:"metric_name,omitempty"` + + // The CloudWatch metric namespace name. + // +kubebuilder:validation:Optional + MetricNamespace *string `json:"metricNamespace" tf:"metric_namespace,omitempty"` + + // An optional Unix timestamp (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp). + // +kubebuilder:validation:Optional + MetricTimestamp *string `json:"metricTimestamp,omitempty" tf:"metric_timestamp,omitempty"` + + // The metric unit (supported units can be found here: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Unit) + // +kubebuilder:validation:Optional + MetricUnit *string `json:"metricUnit" tf:"metric_unit,omitempty"` + + // The CloudWatch metric value. + // +kubebuilder:validation:Optional + MetricValue *string `json:"metricValue" tf:"metric_value,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type DimensionInitParameters struct { + + // The name of the rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DimensionObservation struct { + + // The name of the rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DimensionParameters struct { + + // The name of the rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value of the HTTP header. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type DynamodbInitParameters struct { + + // The hash key name. + HashKeyField *string `json:"hashKeyField,omitempty" tf:"hash_key_field,omitempty"` + + // The hash key type. Valid values are "STRING" or "NUMBER". + HashKeyType *string `json:"hashKeyType,omitempty" tf:"hash_key_type,omitempty"` + + // The hash key value. + HashKeyValue *string `json:"hashKeyValue,omitempty" tf:"hash_key_value,omitempty"` + + // The operation. Valid values are "INSERT", "UPDATE", or "DELETE". + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The action payload. + PayloadField *string `json:"payloadField,omitempty" tf:"payload_field,omitempty"` + + // The range key name. + RangeKeyField *string `json:"rangeKeyField,omitempty" tf:"range_key_field,omitempty"` + + // The range key type. Valid values are "STRING" or "NUMBER". + RangeKeyType *string `json:"rangeKeyType,omitempty" tf:"range_key_type,omitempty"` + + // The range key value. + RangeKeyValue *string `json:"rangeKeyValue,omitempty" tf:"range_key_value,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the DynamoDB table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type DynamodbObservation struct { + + // The hash key name. + HashKeyField *string `json:"hashKeyField,omitempty" tf:"hash_key_field,omitempty"` + + // The hash key type. Valid values are "STRING" or "NUMBER". + HashKeyType *string `json:"hashKeyType,omitempty" tf:"hash_key_type,omitempty"` + + // The hash key value. + HashKeyValue *string `json:"hashKeyValue,omitempty" tf:"hash_key_value,omitempty"` + + // The operation. Valid values are "INSERT", "UPDATE", or "DELETE". + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The action payload. + PayloadField *string `json:"payloadField,omitempty" tf:"payload_field,omitempty"` + + // The range key name. + RangeKeyField *string `json:"rangeKeyField,omitempty" tf:"range_key_field,omitempty"` + + // The range key type. Valid values are "STRING" or "NUMBER". + RangeKeyType *string `json:"rangeKeyType,omitempty" tf:"range_key_type,omitempty"` + + // The range key value. + RangeKeyValue *string `json:"rangeKeyValue,omitempty" tf:"range_key_value,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the DynamoDB table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type DynamodbParameters struct { + + // The hash key name. + // +kubebuilder:validation:Optional + HashKeyField *string `json:"hashKeyField" tf:"hash_key_field,omitempty"` + + // The hash key type. Valid values are "STRING" or "NUMBER". + // +kubebuilder:validation:Optional + HashKeyType *string `json:"hashKeyType,omitempty" tf:"hash_key_type,omitempty"` + + // The hash key value. + // +kubebuilder:validation:Optional + HashKeyValue *string `json:"hashKeyValue" tf:"hash_key_value,omitempty"` + + // The operation. Valid values are "INSERT", "UPDATE", or "DELETE". + // +kubebuilder:validation:Optional + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The action payload. + // +kubebuilder:validation:Optional + PayloadField *string `json:"payloadField,omitempty" tf:"payload_field,omitempty"` + + // The range key name. + // +kubebuilder:validation:Optional + RangeKeyField *string `json:"rangeKeyField,omitempty" tf:"range_key_field,omitempty"` + + // The range key type. Valid values are "STRING" or "NUMBER". + // +kubebuilder:validation:Optional + RangeKeyType *string `json:"rangeKeyType,omitempty" tf:"range_key_type,omitempty"` + + // The range key value. + // +kubebuilder:validation:Optional + RangeKeyValue *string `json:"rangeKeyValue,omitempty" tf:"range_key_value,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // The name of the DynamoDB table. + // +kubebuilder:validation:Optional + TableName *string `json:"tableName" tf:"table_name,omitempty"` +} + +type Dynamodbv2InitParameters struct { + + // Configuration block with DynamoDB Table to which the message will be written. Nested arguments below. + PutItem *PutItemInitParameters `json:"putItem,omitempty" tf:"put_item,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type Dynamodbv2Observation struct { + + // Configuration block with DynamoDB Table to which the message will be written. Nested arguments below. + PutItem *PutItemObservation `json:"putItem,omitempty" tf:"put_item,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type Dynamodbv2Parameters struct { + + // Configuration block with DynamoDB Table to which the message will be written. Nested arguments below. + // +kubebuilder:validation:Optional + PutItem *PutItemParameters `json:"putItem,omitempty" tf:"put_item,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type Dynamodbv2PutItemInitParameters struct { + + // The name of the DynamoDB table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type Dynamodbv2PutItemObservation struct { + + // The name of the DynamoDB table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type Dynamodbv2PutItemParameters struct { + + // The name of the DynamoDB table. + // +kubebuilder:validation:Optional + TableName *string `json:"tableName" tf:"table_name,omitempty"` +} + +type ElasticsearchInitParameters struct { + + // The endpoint of your Elasticsearch domain. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The unique identifier for the document you are storing. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Elasticsearch index where you want to store your data. + Index *string `json:"index,omitempty" tf:"index,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The type of document you are storing. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ElasticsearchObservation struct { + + // The endpoint of your Elasticsearch domain. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The unique identifier for the document you are storing. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Elasticsearch index where you want to store your data. + Index *string `json:"index,omitempty" tf:"index,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The type of document you are storing. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ElasticsearchParameters struct { + + // The endpoint of your Elasticsearch domain. + // +kubebuilder:validation:Optional + Endpoint *string `json:"endpoint" tf:"endpoint,omitempty"` + + // The unique identifier for the document you are storing. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // The Elasticsearch index where you want to store your data. + // +kubebuilder:validation:Optional + Index *string `json:"index" tf:"index,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // The type of document you are storing. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ErrorActionCloudwatchAlarmInitParameters struct { + + // The CloudWatch alarm name. + AlarmName *string `json:"alarmName,omitempty" tf:"alarm_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The reason for the alarm change. + StateReason *string `json:"stateReason,omitempty" tf:"state_reason,omitempty"` + + // The value of the alarm state. Acceptable values are: OK, ALARM, INSUFFICIENT_DATA. + StateValue *string `json:"stateValue,omitempty" tf:"state_value,omitempty"` +} + +type ErrorActionCloudwatchAlarmObservation struct { + + // The CloudWatch alarm name. + AlarmName *string `json:"alarmName,omitempty" tf:"alarm_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The reason for the alarm change. + StateReason *string `json:"stateReason,omitempty" tf:"state_reason,omitempty"` + + // The value of the alarm state. Acceptable values are: OK, ALARM, INSUFFICIENT_DATA. + StateValue *string `json:"stateValue,omitempty" tf:"state_value,omitempty"` +} + +type ErrorActionCloudwatchAlarmParameters struct { + + // The CloudWatch alarm name. + // +kubebuilder:validation:Optional + AlarmName *string `json:"alarmName" tf:"alarm_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // The reason for the alarm change. + // +kubebuilder:validation:Optional + StateReason *string `json:"stateReason" tf:"state_reason,omitempty"` + + // The value of the alarm state. Acceptable values are: OK, ALARM, INSUFFICIENT_DATA. + // +kubebuilder:validation:Optional + StateValue *string `json:"stateValue" tf:"state_value,omitempty"` +} + +type ErrorActionCloudwatchLogsInitParameters struct { + + // The CloudWatch log group name. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type ErrorActionCloudwatchLogsObservation struct { + + // The CloudWatch log group name. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type ErrorActionCloudwatchLogsParameters struct { + + // The CloudWatch log group name. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName" tf:"log_group_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type ErrorActionCloudwatchMetricInitParameters struct { + + // The CloudWatch metric name. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // The CloudWatch metric namespace name. + MetricNamespace *string `json:"metricNamespace,omitempty" tf:"metric_namespace,omitempty"` + + // An optional Unix timestamp (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp). + MetricTimestamp *string `json:"metricTimestamp,omitempty" tf:"metric_timestamp,omitempty"` + + // The metric unit (supported units can be found here: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Unit) + MetricUnit *string `json:"metricUnit,omitempty" tf:"metric_unit,omitempty"` + + // The CloudWatch metric value. + MetricValue *string `json:"metricValue,omitempty" tf:"metric_value,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type ErrorActionCloudwatchMetricObservation struct { + + // The CloudWatch metric name. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // The CloudWatch metric namespace name. + MetricNamespace *string `json:"metricNamespace,omitempty" tf:"metric_namespace,omitempty"` + + // An optional Unix timestamp (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp). + MetricTimestamp *string `json:"metricTimestamp,omitempty" tf:"metric_timestamp,omitempty"` + + // The metric unit (supported units can be found here: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Unit) + MetricUnit *string `json:"metricUnit,omitempty" tf:"metric_unit,omitempty"` + + // The CloudWatch metric value. + MetricValue *string `json:"metricValue,omitempty" tf:"metric_value,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type ErrorActionCloudwatchMetricParameters struct { + + // The CloudWatch metric name. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName" tf:"metric_name,omitempty"` + + // The CloudWatch metric namespace name. + // +kubebuilder:validation:Optional + MetricNamespace *string `json:"metricNamespace" tf:"metric_namespace,omitempty"` + + // An optional Unix timestamp (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp). + // +kubebuilder:validation:Optional + MetricTimestamp *string `json:"metricTimestamp,omitempty" tf:"metric_timestamp,omitempty"` + + // The metric unit (supported units can be found here: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Unit) + // +kubebuilder:validation:Optional + MetricUnit *string `json:"metricUnit" tf:"metric_unit,omitempty"` + + // The CloudWatch metric value. + // +kubebuilder:validation:Optional + MetricValue *string `json:"metricValue" tf:"metric_value,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type ErrorActionDynamodbInitParameters struct { + + // The hash key name. + HashKeyField *string `json:"hashKeyField,omitempty" tf:"hash_key_field,omitempty"` + + // The hash key type. Valid values are "STRING" or "NUMBER". + HashKeyType *string `json:"hashKeyType,omitempty" tf:"hash_key_type,omitempty"` + + // The hash key value. + HashKeyValue *string `json:"hashKeyValue,omitempty" tf:"hash_key_value,omitempty"` + + // The operation. Valid values are "INSERT", "UPDATE", or "DELETE". + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The action payload. + PayloadField *string `json:"payloadField,omitempty" tf:"payload_field,omitempty"` + + // The range key name. + RangeKeyField *string `json:"rangeKeyField,omitempty" tf:"range_key_field,omitempty"` + + // The range key type. Valid values are "STRING" or "NUMBER". + RangeKeyType *string `json:"rangeKeyType,omitempty" tf:"range_key_type,omitempty"` + + // The range key value. + RangeKeyValue *string `json:"rangeKeyValue,omitempty" tf:"range_key_value,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the DynamoDB table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type ErrorActionDynamodbObservation struct { + + // The hash key name. + HashKeyField *string `json:"hashKeyField,omitempty" tf:"hash_key_field,omitempty"` + + // The hash key type. Valid values are "STRING" or "NUMBER". + HashKeyType *string `json:"hashKeyType,omitempty" tf:"hash_key_type,omitempty"` + + // The hash key value. + HashKeyValue *string `json:"hashKeyValue,omitempty" tf:"hash_key_value,omitempty"` + + // The operation. Valid values are "INSERT", "UPDATE", or "DELETE". + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The action payload. + PayloadField *string `json:"payloadField,omitempty" tf:"payload_field,omitempty"` + + // The range key name. + RangeKeyField *string `json:"rangeKeyField,omitempty" tf:"range_key_field,omitempty"` + + // The range key type. Valid values are "STRING" or "NUMBER". + RangeKeyType *string `json:"rangeKeyType,omitempty" tf:"range_key_type,omitempty"` + + // The range key value. + RangeKeyValue *string `json:"rangeKeyValue,omitempty" tf:"range_key_value,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the DynamoDB table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type ErrorActionDynamodbParameters struct { + + // The hash key name. + // +kubebuilder:validation:Optional + HashKeyField *string `json:"hashKeyField" tf:"hash_key_field,omitempty"` + + // The hash key type. Valid values are "STRING" or "NUMBER". + // +kubebuilder:validation:Optional + HashKeyType *string `json:"hashKeyType,omitempty" tf:"hash_key_type,omitempty"` + + // The hash key value. + // +kubebuilder:validation:Optional + HashKeyValue *string `json:"hashKeyValue" tf:"hash_key_value,omitempty"` + + // The operation. Valid values are "INSERT", "UPDATE", or "DELETE". + // +kubebuilder:validation:Optional + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The action payload. + // +kubebuilder:validation:Optional + PayloadField *string `json:"payloadField,omitempty" tf:"payload_field,omitempty"` + + // The range key name. + // +kubebuilder:validation:Optional + RangeKeyField *string `json:"rangeKeyField,omitempty" tf:"range_key_field,omitempty"` + + // The range key type. Valid values are "STRING" or "NUMBER". + // +kubebuilder:validation:Optional + RangeKeyType *string `json:"rangeKeyType,omitempty" tf:"range_key_type,omitempty"` + + // The range key value. + // +kubebuilder:validation:Optional + RangeKeyValue *string `json:"rangeKeyValue,omitempty" tf:"range_key_value,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // The name of the DynamoDB table. + // +kubebuilder:validation:Optional + TableName *string `json:"tableName" tf:"table_name,omitempty"` +} + +type ErrorActionDynamodbv2InitParameters struct { + + // Configuration block with DynamoDB Table to which the message will be written. Nested arguments below. + PutItem *Dynamodbv2PutItemInitParameters `json:"putItem,omitempty" tf:"put_item,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type ErrorActionDynamodbv2Observation struct { + + // Configuration block with DynamoDB Table to which the message will be written. Nested arguments below. + PutItem *Dynamodbv2PutItemObservation `json:"putItem,omitempty" tf:"put_item,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type ErrorActionDynamodbv2Parameters struct { + + // Configuration block with DynamoDB Table to which the message will be written. Nested arguments below. + // +kubebuilder:validation:Optional + PutItem *Dynamodbv2PutItemParameters `json:"putItem,omitempty" tf:"put_item,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type ErrorActionElasticsearchInitParameters struct { + + // The endpoint of your Elasticsearch domain. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The unique identifier for the document you are storing. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Elasticsearch index where you want to store your data. + Index *string `json:"index,omitempty" tf:"index,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The type of document you are storing. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ErrorActionElasticsearchObservation struct { + + // The endpoint of your Elasticsearch domain. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The unique identifier for the document you are storing. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Elasticsearch index where you want to store your data. + Index *string `json:"index,omitempty" tf:"index,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The type of document you are storing. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ErrorActionElasticsearchParameters struct { + + // The endpoint of your Elasticsearch domain. + // +kubebuilder:validation:Optional + Endpoint *string `json:"endpoint" tf:"endpoint,omitempty"` + + // The unique identifier for the document you are storing. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // The Elasticsearch index where you want to store your data. + // +kubebuilder:validation:Optional + Index *string `json:"index" tf:"index,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // The type of document you are storing. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ErrorActionInitParameters struct { + CloudwatchAlarm *ErrorActionCloudwatchAlarmInitParameters `json:"cloudwatchAlarm,omitempty" tf:"cloudwatch_alarm,omitempty"` + + CloudwatchLogs *ErrorActionCloudwatchLogsInitParameters `json:"cloudwatchLogs,omitempty" tf:"cloudwatch_logs,omitempty"` + + CloudwatchMetric *ErrorActionCloudwatchMetricInitParameters `json:"cloudwatchMetric,omitempty" tf:"cloudwatch_metric,omitempty"` + + Dynamodb *ErrorActionDynamodbInitParameters `json:"dynamodb,omitempty" tf:"dynamodb,omitempty"` + + Dynamodbv2 *ErrorActionDynamodbv2InitParameters `json:"dynamodbv2,omitempty" tf:"dynamodbv2,omitempty"` + + Elasticsearch *ErrorActionElasticsearchInitParameters `json:"elasticsearch,omitempty" tf:"elasticsearch,omitempty"` + + Firehose *FirehoseInitParameters `json:"firehose,omitempty" tf:"firehose,omitempty"` + + HTTP *HTTPInitParameters `json:"http,omitempty" tf:"http,omitempty"` + + IotAnalytics *IotAnalyticsInitParameters `json:"iotAnalytics,omitempty" tf:"iot_analytics,omitempty"` + + IotEvents *IotEventsInitParameters `json:"iotEvents,omitempty" tf:"iot_events,omitempty"` + + Kafka *KafkaInitParameters `json:"kafka,omitempty" tf:"kafka,omitempty"` + + Kinesis *KinesisInitParameters `json:"kinesis,omitempty" tf:"kinesis,omitempty"` + + Lambda *LambdaInitParameters `json:"lambda,omitempty" tf:"lambda,omitempty"` + + Republish *RepublishInitParameters `json:"republish,omitempty" tf:"republish,omitempty"` + + S3 *S3InitParameters `json:"s3,omitempty" tf:"s3,omitempty"` + + Sns *SnsInitParameters `json:"sns,omitempty" tf:"sns,omitempty"` + + Sqs *SqsInitParameters `json:"sqs,omitempty" tf:"sqs,omitempty"` + + StepFunctions *StepFunctionsInitParameters `json:"stepFunctions,omitempty" tf:"step_functions,omitempty"` + + Timestream *TimestreamInitParameters `json:"timestream,omitempty" tf:"timestream,omitempty"` +} + +type ErrorActionObservation struct { + CloudwatchAlarm *ErrorActionCloudwatchAlarmObservation `json:"cloudwatchAlarm,omitempty" tf:"cloudwatch_alarm,omitempty"` + + CloudwatchLogs *ErrorActionCloudwatchLogsObservation `json:"cloudwatchLogs,omitempty" tf:"cloudwatch_logs,omitempty"` + + CloudwatchMetric *ErrorActionCloudwatchMetricObservation `json:"cloudwatchMetric,omitempty" tf:"cloudwatch_metric,omitempty"` + + Dynamodb *ErrorActionDynamodbObservation `json:"dynamodb,omitempty" tf:"dynamodb,omitempty"` + + Dynamodbv2 *ErrorActionDynamodbv2Observation `json:"dynamodbv2,omitempty" tf:"dynamodbv2,omitempty"` + + Elasticsearch *ErrorActionElasticsearchObservation `json:"elasticsearch,omitempty" tf:"elasticsearch,omitempty"` + + Firehose *FirehoseObservation `json:"firehose,omitempty" tf:"firehose,omitempty"` + + HTTP *HTTPObservation `json:"http,omitempty" tf:"http,omitempty"` + + IotAnalytics *IotAnalyticsObservation `json:"iotAnalytics,omitempty" tf:"iot_analytics,omitempty"` + + IotEvents *IotEventsObservation `json:"iotEvents,omitempty" tf:"iot_events,omitempty"` + + Kafka *KafkaObservation `json:"kafka,omitempty" tf:"kafka,omitempty"` + + Kinesis *KinesisObservation `json:"kinesis,omitempty" tf:"kinesis,omitempty"` + + Lambda *LambdaObservation `json:"lambda,omitempty" tf:"lambda,omitempty"` + + Republish *RepublishObservation `json:"republish,omitempty" tf:"republish,omitempty"` + + S3 *S3Observation `json:"s3,omitempty" tf:"s3,omitempty"` + + Sns *SnsObservation `json:"sns,omitempty" tf:"sns,omitempty"` + + Sqs *SqsObservation `json:"sqs,omitempty" tf:"sqs,omitempty"` + + StepFunctions *StepFunctionsObservation `json:"stepFunctions,omitempty" tf:"step_functions,omitempty"` + + Timestream *TimestreamObservation `json:"timestream,omitempty" tf:"timestream,omitempty"` +} + +type ErrorActionParameters struct { + + // +kubebuilder:validation:Optional + CloudwatchAlarm *ErrorActionCloudwatchAlarmParameters `json:"cloudwatchAlarm,omitempty" tf:"cloudwatch_alarm,omitempty"` + + // +kubebuilder:validation:Optional + CloudwatchLogs *ErrorActionCloudwatchLogsParameters `json:"cloudwatchLogs,omitempty" tf:"cloudwatch_logs,omitempty"` + + // +kubebuilder:validation:Optional + CloudwatchMetric *ErrorActionCloudwatchMetricParameters `json:"cloudwatchMetric,omitempty" tf:"cloudwatch_metric,omitempty"` + + // +kubebuilder:validation:Optional + Dynamodb *ErrorActionDynamodbParameters `json:"dynamodb,omitempty" tf:"dynamodb,omitempty"` + + // +kubebuilder:validation:Optional + Dynamodbv2 *ErrorActionDynamodbv2Parameters `json:"dynamodbv2,omitempty" tf:"dynamodbv2,omitempty"` + + // +kubebuilder:validation:Optional + Elasticsearch *ErrorActionElasticsearchParameters `json:"elasticsearch,omitempty" tf:"elasticsearch,omitempty"` + + // +kubebuilder:validation:Optional + Firehose *FirehoseParameters `json:"firehose,omitempty" tf:"firehose,omitempty"` + + // +kubebuilder:validation:Optional + HTTP *HTTPParameters `json:"http,omitempty" tf:"http,omitempty"` + + // +kubebuilder:validation:Optional + IotAnalytics *IotAnalyticsParameters `json:"iotAnalytics,omitempty" tf:"iot_analytics,omitempty"` + + // +kubebuilder:validation:Optional + IotEvents *IotEventsParameters `json:"iotEvents,omitempty" tf:"iot_events,omitempty"` + + // +kubebuilder:validation:Optional + Kafka *KafkaParameters `json:"kafka,omitempty" tf:"kafka,omitempty"` + + // +kubebuilder:validation:Optional + Kinesis *KinesisParameters `json:"kinesis,omitempty" tf:"kinesis,omitempty"` + + // +kubebuilder:validation:Optional + Lambda *LambdaParameters `json:"lambda,omitempty" tf:"lambda,omitempty"` + + // +kubebuilder:validation:Optional + Republish *RepublishParameters `json:"republish,omitempty" tf:"republish,omitempty"` + + // +kubebuilder:validation:Optional + S3 *S3Parameters `json:"s3,omitempty" tf:"s3,omitempty"` + + // +kubebuilder:validation:Optional + Sns *SnsParameters `json:"sns,omitempty" tf:"sns,omitempty"` + + // +kubebuilder:validation:Optional + Sqs *SqsParameters `json:"sqs,omitempty" tf:"sqs,omitempty"` + + // +kubebuilder:validation:Optional + StepFunctions *StepFunctionsParameters `json:"stepFunctions,omitempty" tf:"step_functions,omitempty"` + + // +kubebuilder:validation:Optional + Timestream *TimestreamParameters `json:"timestream,omitempty" tf:"timestream,omitempty"` +} + +type FirehoseInitParameters struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // The delivery stream name. + DeliveryStreamName *string `json:"deliveryStreamName,omitempty" tf:"delivery_stream_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // A character separator that is used to separate records written to the Firehose stream. Valid values are: '\n' (newline), '\t' (tab), '\r\n' (Windows newline), ',' (comma). + Separator *string `json:"separator,omitempty" tf:"separator,omitempty"` +} + +type FirehoseObservation struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // The delivery stream name. + DeliveryStreamName *string `json:"deliveryStreamName,omitempty" tf:"delivery_stream_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // A character separator that is used to separate records written to the Firehose stream. Valid values are: '\n' (newline), '\t' (tab), '\r\n' (Windows newline), ',' (comma). + Separator *string `json:"separator,omitempty" tf:"separator,omitempty"` +} + +type FirehoseParameters struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + // +kubebuilder:validation:Optional + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // The delivery stream name. + // +kubebuilder:validation:Optional + DeliveryStreamName *string `json:"deliveryStreamName" tf:"delivery_stream_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // A character separator that is used to separate records written to the Firehose stream. Valid values are: '\n' (newline), '\t' (tab), '\r\n' (Windows newline), ',' (comma). + // +kubebuilder:validation:Optional + Separator *string `json:"separator,omitempty" tf:"separator,omitempty"` +} + +type HTTPHTTPHeaderInitParameters struct { + + // The name of the HTTP header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HTTPHTTPHeaderObservation struct { + + // The name of the HTTP header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HTTPHTTPHeaderParameters struct { + + // The name of the HTTP header. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // The value of the HTTP header. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type HTTPHeaderInitParameters struct { + + // The name of the HTTP header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HTTPHeaderObservation struct { + + // The name of the HTTP header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HTTPHeaderParameters struct { + + // The name of the HTTP header. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // The value of the HTTP header. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type HTTPInitParameters struct { + + // The HTTPS URL used to verify ownership of url. + ConfirmationURL *string `json:"confirmationUrl,omitempty" tf:"confirmation_url,omitempty"` + + // Custom HTTP header IoT Core should send. It is possible to define more than one custom header. + HTTPHeader []HTTPHeaderInitParameters `json:"httpHeader,omitempty" tf:"http_header,omitempty"` + + // The HTTPS URL. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type HTTPObservation struct { + + // The HTTPS URL used to verify ownership of url. + ConfirmationURL *string `json:"confirmationUrl,omitempty" tf:"confirmation_url,omitempty"` + + // Custom HTTP header IoT Core should send. It is possible to define more than one custom header. + HTTPHeader []HTTPHeaderObservation `json:"httpHeader,omitempty" tf:"http_header,omitempty"` + + // The HTTPS URL. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type HTTPParameters struct { + + // The HTTPS URL used to verify ownership of url. + // +kubebuilder:validation:Optional + ConfirmationURL *string `json:"confirmationUrl,omitempty" tf:"confirmation_url,omitempty"` + + // Custom HTTP header IoT Core should send. It is possible to define more than one custom header. + // +kubebuilder:validation:Optional + HTTPHeader []HTTPHeaderParameters `json:"httpHeader,omitempty" tf:"http_header,omitempty"` + + // The HTTPS URL. + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` +} + +type HeaderInitParameters struct { + + // The name of the HTTP header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HeaderObservation struct { + + // The name of the HTTP header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HeaderParameters struct { + + // The name of the HTTP header. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // The value of the HTTP header. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type IotAnalyticsInitParameters struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // Name of AWS IOT Analytics channel. + ChannelName *string `json:"channelName,omitempty" tf:"channel_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type IotAnalyticsObservation struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // Name of AWS IOT Analytics channel. + ChannelName *string `json:"channelName,omitempty" tf:"channel_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type IotAnalyticsParameters struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + // +kubebuilder:validation:Optional + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // Name of AWS IOT Analytics channel. + // +kubebuilder:validation:Optional + ChannelName *string `json:"channelName" tf:"channel_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type IotEventsInitParameters struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // The name of the AWS IoT Events input. + InputName *string `json:"inputName,omitempty" tf:"input_name,omitempty"` + + // Use this to ensure that only one input (message) with a given messageId is processed by an AWS IoT Events detector. + MessageID *string `json:"messageId,omitempty" tf:"message_id,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type IotEventsObservation struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // The name of the AWS IoT Events input. + InputName *string `json:"inputName,omitempty" tf:"input_name,omitempty"` + + // Use this to ensure that only one input (message) with a given messageId is processed by an AWS IoT Events detector. + MessageID *string `json:"messageId,omitempty" tf:"message_id,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type IotEventsParameters struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + // +kubebuilder:validation:Optional + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // The name of the AWS IoT Events input. + // +kubebuilder:validation:Optional + InputName *string `json:"inputName" tf:"input_name,omitempty"` + + // Use this to ensure that only one input (message) with a given messageId is processed by an AWS IoT Events detector. + // +kubebuilder:validation:Optional + MessageID *string `json:"messageId,omitempty" tf:"message_id,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type KafkaHeaderInitParameters struct { + + // The name of the HTTP header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type KafkaHeaderObservation struct { + + // The name of the HTTP header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type KafkaHeaderParameters struct { + + // The name of the HTTP header. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // The value of the HTTP header. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type KafkaInitParameters struct { + + // Properties of the Apache Kafka producer client. For more info, see the AWS documentation. + // +mapType=granular + ClientProperties map[string]*string `json:"clientProperties,omitempty" tf:"client_properties,omitempty"` + + // The ARN of Kafka action's VPC aws_iot_topic_rule_destination. + DestinationArn *string `json:"destinationArn,omitempty" tf:"destination_arn,omitempty"` + + // The list of Kafka headers that you specify. Nested arguments below. + Header []HeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` + + // The name of the HTTP header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The Kafka message partition. + Partition *string `json:"partition,omitempty" tf:"partition,omitempty"` + + // The Kafka topic for messages to be sent to the Kafka broker. + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type KafkaObservation struct { + + // Properties of the Apache Kafka producer client. For more info, see the AWS documentation. + // +mapType=granular + ClientProperties map[string]*string `json:"clientProperties,omitempty" tf:"client_properties,omitempty"` + + // The ARN of Kafka action's VPC aws_iot_topic_rule_destination. + DestinationArn *string `json:"destinationArn,omitempty" tf:"destination_arn,omitempty"` + + // The list of Kafka headers that you specify. Nested arguments below. + Header []HeaderObservation `json:"header,omitempty" tf:"header,omitempty"` + + // The name of the HTTP header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The Kafka message partition. + Partition *string `json:"partition,omitempty" tf:"partition,omitempty"` + + // The Kafka topic for messages to be sent to the Kafka broker. + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type KafkaParameters struct { + + // Properties of the Apache Kafka producer client. For more info, see the AWS documentation. + // +kubebuilder:validation:Optional + // +mapType=granular + ClientProperties map[string]*string `json:"clientProperties" tf:"client_properties,omitempty"` + + // The ARN of Kafka action's VPC aws_iot_topic_rule_destination. + // +kubebuilder:validation:Optional + DestinationArn *string `json:"destinationArn" tf:"destination_arn,omitempty"` + + // The list of Kafka headers that you specify. Nested arguments below. + // +kubebuilder:validation:Optional + Header []HeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + + // The name of the HTTP header. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The Kafka message partition. + // +kubebuilder:validation:Optional + Partition *string `json:"partition,omitempty" tf:"partition,omitempty"` + + // The Kafka topic for messages to be sent to the Kafka broker. + // +kubebuilder:validation:Optional + Topic *string `json:"topic" tf:"topic,omitempty"` +} + +type KinesisInitParameters struct { + + // The partition key. + PartitionKey *string `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the Amazon Kinesis stream. + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` +} + +type KinesisObservation struct { + + // The partition key. + PartitionKey *string `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the Amazon Kinesis stream. + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` +} + +type KinesisParameters struct { + + // The partition key. + // +kubebuilder:validation:Optional + PartitionKey *string `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // The name of the Amazon Kinesis stream. + // +kubebuilder:validation:Optional + StreamName *string `json:"streamName" tf:"stream_name,omitempty"` +} + +type LambdaInitParameters struct { + + // The ARN of the Lambda function. + FunctionArn *string `json:"functionArn,omitempty" tf:"function_arn,omitempty"` +} + +type LambdaObservation struct { + + // The ARN of the Lambda function. + FunctionArn *string `json:"functionArn,omitempty" tf:"function_arn,omitempty"` +} + +type LambdaParameters struct { + + // The ARN of the Lambda function. + // +kubebuilder:validation:Optional + FunctionArn *string `json:"functionArn" tf:"function_arn,omitempty"` +} + +type PutItemInitParameters struct { + + // The name of the DynamoDB table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type PutItemObservation struct { + + // The name of the DynamoDB table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type PutItemParameters struct { + + // The name of the DynamoDB table. + // +kubebuilder:validation:Optional + TableName *string `json:"tableName" tf:"table_name,omitempty"` +} + +type RepublishInitParameters struct { + + // The Quality of Service (QoS) level to use when republishing messages. Valid values are 0 or 1. The default value is 0. + Qos *float64 `json:"qos,omitempty" tf:"qos,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The Kafka topic for messages to be sent to the Kafka broker. + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type RepublishObservation struct { + + // The Quality of Service (QoS) level to use when republishing messages. Valid values are 0 or 1. The default value is 0. + Qos *float64 `json:"qos,omitempty" tf:"qos,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The Kafka topic for messages to be sent to the Kafka broker. + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type RepublishParameters struct { + + // The Quality of Service (QoS) level to use when republishing messages. Valid values are 0 or 1. The default value is 0. + // +kubebuilder:validation:Optional + Qos *float64 `json:"qos,omitempty" tf:"qos,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // The Kafka topic for messages to be sent to the Kafka broker. + // +kubebuilder:validation:Optional + Topic *string `json:"topic" tf:"topic,omitempty"` +} + +type S3InitParameters struct { + + // The Amazon S3 bucket name. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // The Amazon S3 canned ACL that controls access to the object identified by the object key. Valid values. + CannedACL *string `json:"cannedAcl,omitempty" tf:"canned_acl,omitempty"` + + // The name of the HTTP header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type S3Observation struct { + + // The Amazon S3 bucket name. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // The Amazon S3 canned ACL that controls access to the object identified by the object key. Valid values. + CannedACL *string `json:"cannedAcl,omitempty" tf:"canned_acl,omitempty"` + + // The name of the HTTP header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type S3Parameters struct { + + // The Amazon S3 bucket name. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName" tf:"bucket_name,omitempty"` + + // The Amazon S3 canned ACL that controls access to the object identified by the object key. Valid values. + // +kubebuilder:validation:Optional + CannedACL *string `json:"cannedAcl,omitempty" tf:"canned_acl,omitempty"` + + // The name of the HTTP header. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type SnsInitParameters struct { + + // The message format of the message to publish. Accepted values are "JSON" and "RAW". + MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The ARN of the SNS topic. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + TargetArn *string `json:"targetArn,omitempty" tf:"target_arn,omitempty"` + + // Reference to a Topic in sns to populate targetArn. + // +kubebuilder:validation:Optional + TargetArnRef *v1.Reference `json:"targetArnRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate targetArn. + // +kubebuilder:validation:Optional + TargetArnSelector *v1.Selector `json:"targetArnSelector,omitempty" tf:"-"` +} + +type SnsObservation struct { + + // The message format of the message to publish. Accepted values are "JSON" and "RAW". + MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The ARN of the SNS topic. + TargetArn *string `json:"targetArn,omitempty" tf:"target_arn,omitempty"` +} + +type SnsParameters struct { + + // The message format of the message to publish. Accepted values are "JSON" and "RAW". + // +kubebuilder:validation:Optional + MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The ARN of the SNS topic. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + TargetArn *string `json:"targetArn,omitempty" tf:"target_arn,omitempty"` + + // Reference to a Topic in sns to populate targetArn. + // +kubebuilder:validation:Optional + TargetArnRef *v1.Reference `json:"targetArnRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate targetArn. + // +kubebuilder:validation:Optional + TargetArnSelector *v1.Selector `json:"targetArnSelector,omitempty" tf:"-"` +} + +type SqsInitParameters struct { + + // The URL of the Amazon SQS queue. + QueueURL *string `json:"queueUrl,omitempty" tf:"queue_url,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Specifies whether to use Base64 encoding. + UseBase64 *bool `json:"useBase64,omitempty" tf:"use_base64,omitempty"` +} + +type SqsObservation struct { + + // The URL of the Amazon SQS queue. + QueueURL *string `json:"queueUrl,omitempty" tf:"queue_url,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Specifies whether to use Base64 encoding. + UseBase64 *bool `json:"useBase64,omitempty" tf:"use_base64,omitempty"` +} + +type SqsParameters struct { + + // The URL of the Amazon SQS queue. + // +kubebuilder:validation:Optional + QueueURL *string `json:"queueUrl" tf:"queue_url,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // Specifies whether to use Base64 encoding. + // +kubebuilder:validation:Optional + UseBase64 *bool `json:"useBase64" tf:"use_base64,omitempty"` +} + +type StepFunctionsInitParameters struct { + + // The prefix used to generate, along with a UUID, the unique state machine execution name. + ExecutionNamePrefix *string `json:"executionNamePrefix,omitempty" tf:"execution_name_prefix,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the Step Functions state machine whose execution will be started. + StateMachineName *string `json:"stateMachineName,omitempty" tf:"state_machine_name,omitempty"` +} + +type StepFunctionsObservation struct { + + // The prefix used to generate, along with a UUID, the unique state machine execution name. + ExecutionNamePrefix *string `json:"executionNamePrefix,omitempty" tf:"execution_name_prefix,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the Step Functions state machine whose execution will be started. + StateMachineName *string `json:"stateMachineName,omitempty" tf:"state_machine_name,omitempty"` +} + +type StepFunctionsParameters struct { + + // The prefix used to generate, along with a UUID, the unique state machine execution name. + // +kubebuilder:validation:Optional + ExecutionNamePrefix *string `json:"executionNamePrefix,omitempty" tf:"execution_name_prefix,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // The name of the Step Functions state machine whose execution will be started. + // +kubebuilder:validation:Optional + StateMachineName *string `json:"stateMachineName" tf:"state_machine_name,omitempty"` +} + +type TimestampInitParameters struct { + + // The precision of the timestamp value that results from the expression described in value. Valid values: SECONDS, MILLISECONDS, MICROSECONDS, NANOSECONDS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TimestampObservation struct { + + // The precision of the timestamp value that results from the expression described in value. Valid values: SECONDS, MILLISECONDS, MICROSECONDS, NANOSECONDS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TimestampParameters struct { + + // The precision of the timestamp value that results from the expression described in value. Valid values: SECONDS, MILLISECONDS, MICROSECONDS, NANOSECONDS. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // The value of the HTTP header. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type TimestreamDimensionInitParameters struct { + + // The name of the rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TimestreamDimensionObservation struct { + + // The name of the rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TimestreamDimensionParameters struct { + + // The name of the rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value of the HTTP header. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type TimestreamInitParameters struct { + + // The name of an Amazon Timestream database. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Configuration blocks with metadata attributes of the time series that are written in each measure record. Nested arguments below. + Dimension []DimensionInitParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the DynamoDB table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` + + // Configuration block specifying an application-defined value to replace the default value assigned to the Timestream record's timestamp in the time column. Nested arguments below. + Timestamp *TimestampInitParameters `json:"timestamp,omitempty" tf:"timestamp,omitempty"` +} + +type TimestreamObservation struct { + + // The name of an Amazon Timestream database. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Configuration blocks with metadata attributes of the time series that are written in each measure record. Nested arguments below. + Dimension []DimensionObservation `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the DynamoDB table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` + + // Configuration block specifying an application-defined value to replace the default value assigned to the Timestream record's timestamp in the time column. Nested arguments below. + Timestamp *TimestampObservation `json:"timestamp,omitempty" tf:"timestamp,omitempty"` +} + +type TimestreamParameters struct { + + // The name of an Amazon Timestream database. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // Configuration blocks with metadata attributes of the time series that are written in each measure record. Nested arguments below. + // +kubebuilder:validation:Optional + Dimension []DimensionParameters `json:"dimension" tf:"dimension,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // The name of the DynamoDB table. + // +kubebuilder:validation:Optional + TableName *string `json:"tableName" tf:"table_name,omitempty"` + + // Configuration block specifying an application-defined value to replace the default value assigned to the Timestream record's timestamp in the time column. Nested arguments below. + // +kubebuilder:validation:Optional + Timestamp *TimestampParameters `json:"timestamp,omitempty" tf:"timestamp,omitempty"` +} + +type TimestreamTimestampInitParameters struct { + + // The precision of the timestamp value that results from the expression described in value. Valid values: SECONDS, MILLISECONDS, MICROSECONDS, NANOSECONDS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TimestreamTimestampObservation struct { + + // The precision of the timestamp value that results from the expression described in value. Valid values: SECONDS, MILLISECONDS, MICROSECONDS, NANOSECONDS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TimestreamTimestampParameters struct { + + // The precision of the timestamp value that results from the expression described in value. Valid values: SECONDS, MILLISECONDS, MICROSECONDS, NANOSECONDS. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // The value of the HTTP header. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type TopicRuleFirehoseInitParameters struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // The delivery stream name. + DeliveryStreamName *string `json:"deliveryStreamName,omitempty" tf:"delivery_stream_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // A character separator that is used to separate records written to the Firehose stream. Valid values are: '\n' (newline), '\t' (tab), '\r\n' (Windows newline), ',' (comma). + Separator *string `json:"separator,omitempty" tf:"separator,omitempty"` +} + +type TopicRuleFirehoseObservation struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // The delivery stream name. + DeliveryStreamName *string `json:"deliveryStreamName,omitempty" tf:"delivery_stream_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // A character separator that is used to separate records written to the Firehose stream. Valid values are: '\n' (newline), '\t' (tab), '\r\n' (Windows newline), ',' (comma). + Separator *string `json:"separator,omitempty" tf:"separator,omitempty"` +} + +type TopicRuleFirehoseParameters struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + // +kubebuilder:validation:Optional + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // The delivery stream name. + // +kubebuilder:validation:Optional + DeliveryStreamName *string `json:"deliveryStreamName" tf:"delivery_stream_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // A character separator that is used to separate records written to the Firehose stream. Valid values are: '\n' (newline), '\t' (tab), '\r\n' (Windows newline), ',' (comma). + // +kubebuilder:validation:Optional + Separator *string `json:"separator,omitempty" tf:"separator,omitempty"` +} + +type TopicRuleHTTPInitParameters struct { + + // The HTTPS URL used to verify ownership of url. + ConfirmationURL *string `json:"confirmationUrl,omitempty" tf:"confirmation_url,omitempty"` + + // Custom HTTP header IoT Core should send. It is possible to define more than one custom header. + HTTPHeader []HTTPHTTPHeaderInitParameters `json:"httpHeader,omitempty" tf:"http_header,omitempty"` + + // The HTTPS URL. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type TopicRuleHTTPObservation struct { + + // The HTTPS URL used to verify ownership of url. + ConfirmationURL *string `json:"confirmationUrl,omitempty" tf:"confirmation_url,omitempty"` + + // Custom HTTP header IoT Core should send. It is possible to define more than one custom header. + HTTPHeader []HTTPHTTPHeaderObservation `json:"httpHeader,omitempty" tf:"http_header,omitempty"` + + // The HTTPS URL. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type TopicRuleHTTPParameters struct { + + // The HTTPS URL used to verify ownership of url. + // +kubebuilder:validation:Optional + ConfirmationURL *string `json:"confirmationUrl,omitempty" tf:"confirmation_url,omitempty"` + + // Custom HTTP header IoT Core should send. It is possible to define more than one custom header. + // +kubebuilder:validation:Optional + HTTPHeader []HTTPHTTPHeaderParameters `json:"httpHeader,omitempty" tf:"http_header,omitempty"` + + // The HTTPS URL. + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` +} + +type TopicRuleInitParameters struct { + CloudwatchAlarm []CloudwatchAlarmInitParameters `json:"cloudwatchAlarm,omitempty" tf:"cloudwatch_alarm,omitempty"` + + CloudwatchLogs []CloudwatchLogsInitParameters `json:"cloudwatchLogs,omitempty" tf:"cloudwatch_logs,omitempty"` + + CloudwatchMetric []CloudwatchMetricInitParameters `json:"cloudwatchMetric,omitempty" tf:"cloudwatch_metric,omitempty"` + + // The description of the rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + Dynamodb []DynamodbInitParameters `json:"dynamodb,omitempty" tf:"dynamodb,omitempty"` + + Dynamodbv2 []Dynamodbv2InitParameters `json:"dynamodbv2,omitempty" tf:"dynamodbv2,omitempty"` + + Elasticsearch []ElasticsearchInitParameters `json:"elasticsearch,omitempty" tf:"elasticsearch,omitempty"` + + // Specifies whether the rule is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Configuration block with error action to be associated with the rule. See the documentation for cloudwatch_alarm, cloudwatch_logs, cloudwatch_metric, dynamodb, dynamodbv2, elasticsearch, firehose, http, iot_analytics, iot_events, kafka, kinesis, lambda, republish, s3, sns, sqs, step_functions, timestream configuration blocks for further configuration details. + ErrorAction *ErrorActionInitParameters `json:"errorAction,omitempty" tf:"error_action,omitempty"` + + Firehose []TopicRuleFirehoseInitParameters `json:"firehose,omitempty" tf:"firehose,omitempty"` + + HTTP []TopicRuleHTTPInitParameters `json:"http,omitempty" tf:"http,omitempty"` + + IotAnalytics []TopicRuleIotAnalyticsInitParameters `json:"iotAnalytics,omitempty" tf:"iot_analytics,omitempty"` + + IotEvents []TopicRuleIotEventsInitParameters `json:"iotEvents,omitempty" tf:"iot_events,omitempty"` + + Kafka []TopicRuleKafkaInitParameters `json:"kafka,omitempty" tf:"kafka,omitempty"` + + Kinesis []TopicRuleKinesisInitParameters `json:"kinesis,omitempty" tf:"kinesis,omitempty"` + + Lambda []TopicRuleLambdaInitParameters `json:"lambda,omitempty" tf:"lambda,omitempty"` + + Republish []TopicRuleRepublishInitParameters `json:"republish,omitempty" tf:"republish,omitempty"` + + S3 []TopicRuleS3InitParameters `json:"s3,omitempty" tf:"s3,omitempty"` + + // The SQL statement used to query the topic. For more information, see AWS IoT SQL Reference (http://docs.aws.amazon.com/iot/latest/developerguide/iot-rules.html#aws-iot-sql-reference) in the AWS IoT Developer Guide. + SQL *string `json:"sql,omitempty" tf:"sql,omitempty"` + + // The version of the SQL rules engine to use when evaluating the rule. + SQLVersion *string `json:"sqlVersion,omitempty" tf:"sql_version,omitempty"` + + Sns []TopicRuleSnsInitParameters `json:"sns,omitempty" tf:"sns,omitempty"` + + Sqs []TopicRuleSqsInitParameters `json:"sqs,omitempty" tf:"sqs,omitempty"` + + StepFunctions []TopicRuleStepFunctionsInitParameters `json:"stepFunctions,omitempty" tf:"step_functions,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + Timestream []TopicRuleTimestreamInitParameters `json:"timestream,omitempty" tf:"timestream,omitempty"` +} + +type TopicRuleIotAnalyticsInitParameters struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // Name of AWS IOT Analytics channel. + ChannelName *string `json:"channelName,omitempty" tf:"channel_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type TopicRuleIotAnalyticsObservation struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // Name of AWS IOT Analytics channel. + ChannelName *string `json:"channelName,omitempty" tf:"channel_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type TopicRuleIotAnalyticsParameters struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + // +kubebuilder:validation:Optional + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // Name of AWS IOT Analytics channel. + // +kubebuilder:validation:Optional + ChannelName *string `json:"channelName" tf:"channel_name,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type TopicRuleIotEventsInitParameters struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // The name of the AWS IoT Events input. + InputName *string `json:"inputName,omitempty" tf:"input_name,omitempty"` + + // Use this to ensure that only one input (message) with a given messageId is processed by an AWS IoT Events detector. + MessageID *string `json:"messageId,omitempty" tf:"message_id,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type TopicRuleIotEventsObservation struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // The name of the AWS IoT Events input. + InputName *string `json:"inputName,omitempty" tf:"input_name,omitempty"` + + // Use this to ensure that only one input (message) with a given messageId is processed by an AWS IoT Events detector. + MessageID *string `json:"messageId,omitempty" tf:"message_id,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type TopicRuleIotEventsParameters struct { + + // The payload that contains a JSON array of records will be sent to Kinesis Firehose via a batch call. + // +kubebuilder:validation:Optional + BatchMode *bool `json:"batchMode,omitempty" tf:"batch_mode,omitempty"` + + // The name of the AWS IoT Events input. + // +kubebuilder:validation:Optional + InputName *string `json:"inputName" tf:"input_name,omitempty"` + + // Use this to ensure that only one input (message) with a given messageId is processed by an AWS IoT Events detector. + // +kubebuilder:validation:Optional + MessageID *string `json:"messageId,omitempty" tf:"message_id,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type TopicRuleKafkaInitParameters struct { + + // Properties of the Apache Kafka producer client. For more info, see the AWS documentation. + // +mapType=granular + ClientProperties map[string]*string `json:"clientProperties,omitempty" tf:"client_properties,omitempty"` + + // The ARN of Kafka action's VPC aws_iot_topic_rule_destination. + DestinationArn *string `json:"destinationArn,omitempty" tf:"destination_arn,omitempty"` + + // The list of Kafka headers that you specify. Nested arguments below. + Header []KafkaHeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` + + // The name of the HTTP header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The Kafka message partition. + Partition *string `json:"partition,omitempty" tf:"partition,omitempty"` + + // The Kafka topic for messages to be sent to the Kafka broker. + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type TopicRuleKafkaObservation struct { + + // Properties of the Apache Kafka producer client. For more info, see the AWS documentation. + // +mapType=granular + ClientProperties map[string]*string `json:"clientProperties,omitempty" tf:"client_properties,omitempty"` + + // The ARN of Kafka action's VPC aws_iot_topic_rule_destination. + DestinationArn *string `json:"destinationArn,omitempty" tf:"destination_arn,omitempty"` + + // The list of Kafka headers that you specify. Nested arguments below. + Header []KafkaHeaderObservation `json:"header,omitempty" tf:"header,omitempty"` + + // The name of the HTTP header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The Kafka message partition. + Partition *string `json:"partition,omitempty" tf:"partition,omitempty"` + + // The Kafka topic for messages to be sent to the Kafka broker. + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type TopicRuleKafkaParameters struct { + + // Properties of the Apache Kafka producer client. For more info, see the AWS documentation. + // +kubebuilder:validation:Optional + // +mapType=granular + ClientProperties map[string]*string `json:"clientProperties" tf:"client_properties,omitempty"` + + // The ARN of Kafka action's VPC aws_iot_topic_rule_destination. + // +kubebuilder:validation:Optional + DestinationArn *string `json:"destinationArn" tf:"destination_arn,omitempty"` + + // The list of Kafka headers that you specify. Nested arguments below. + // +kubebuilder:validation:Optional + Header []KafkaHeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + + // The name of the HTTP header. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The Kafka message partition. + // +kubebuilder:validation:Optional + Partition *string `json:"partition,omitempty" tf:"partition,omitempty"` + + // The Kafka topic for messages to be sent to the Kafka broker. + // +kubebuilder:validation:Optional + Topic *string `json:"topic" tf:"topic,omitempty"` +} + +type TopicRuleKinesisInitParameters struct { + + // The partition key. + PartitionKey *string `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the Amazon Kinesis stream. + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` +} + +type TopicRuleKinesisObservation struct { + + // The partition key. + PartitionKey *string `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the Amazon Kinesis stream. + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` +} + +type TopicRuleKinesisParameters struct { + + // The partition key. + // +kubebuilder:validation:Optional + PartitionKey *string `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // The name of the Amazon Kinesis stream. + // +kubebuilder:validation:Optional + StreamName *string `json:"streamName" tf:"stream_name,omitempty"` +} + +type TopicRuleLambdaInitParameters struct { + + // The ARN of the Lambda function. + FunctionArn *string `json:"functionArn,omitempty" tf:"function_arn,omitempty"` +} + +type TopicRuleLambdaObservation struct { + + // The ARN of the Lambda function. + FunctionArn *string `json:"functionArn,omitempty" tf:"function_arn,omitempty"` +} + +type TopicRuleLambdaParameters struct { + + // The ARN of the Lambda function. + // +kubebuilder:validation:Optional + FunctionArn *string `json:"functionArn" tf:"function_arn,omitempty"` +} + +type TopicRuleObservation struct { + + // The ARN of the topic rule + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + CloudwatchAlarm []CloudwatchAlarmObservation `json:"cloudwatchAlarm,omitempty" tf:"cloudwatch_alarm,omitempty"` + + CloudwatchLogs []CloudwatchLogsObservation `json:"cloudwatchLogs,omitempty" tf:"cloudwatch_logs,omitempty"` + + CloudwatchMetric []CloudwatchMetricObservation `json:"cloudwatchMetric,omitempty" tf:"cloudwatch_metric,omitempty"` + + // The description of the rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + Dynamodb []DynamodbObservation `json:"dynamodb,omitempty" tf:"dynamodb,omitempty"` + + Dynamodbv2 []Dynamodbv2Observation `json:"dynamodbv2,omitempty" tf:"dynamodbv2,omitempty"` + + Elasticsearch []ElasticsearchObservation `json:"elasticsearch,omitempty" tf:"elasticsearch,omitempty"` + + // Specifies whether the rule is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Configuration block with error action to be associated with the rule. See the documentation for cloudwatch_alarm, cloudwatch_logs, cloudwatch_metric, dynamodb, dynamodbv2, elasticsearch, firehose, http, iot_analytics, iot_events, kafka, kinesis, lambda, republish, s3, sns, sqs, step_functions, timestream configuration blocks for further configuration details. + ErrorAction *ErrorActionObservation `json:"errorAction,omitempty" tf:"error_action,omitempty"` + + Firehose []TopicRuleFirehoseObservation `json:"firehose,omitempty" tf:"firehose,omitempty"` + + HTTP []TopicRuleHTTPObservation `json:"http,omitempty" tf:"http,omitempty"` + + // The unique identifier for the document you are storing. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + IotAnalytics []TopicRuleIotAnalyticsObservation `json:"iotAnalytics,omitempty" tf:"iot_analytics,omitempty"` + + IotEvents []TopicRuleIotEventsObservation `json:"iotEvents,omitempty" tf:"iot_events,omitempty"` + + Kafka []TopicRuleKafkaObservation `json:"kafka,omitempty" tf:"kafka,omitempty"` + + Kinesis []TopicRuleKinesisObservation `json:"kinesis,omitempty" tf:"kinesis,omitempty"` + + Lambda []TopicRuleLambdaObservation `json:"lambda,omitempty" tf:"lambda,omitempty"` + + Republish []TopicRuleRepublishObservation `json:"republish,omitempty" tf:"republish,omitempty"` + + S3 []TopicRuleS3Observation `json:"s3,omitempty" tf:"s3,omitempty"` + + // The SQL statement used to query the topic. For more information, see AWS IoT SQL Reference (http://docs.aws.amazon.com/iot/latest/developerguide/iot-rules.html#aws-iot-sql-reference) in the AWS IoT Developer Guide. + SQL *string `json:"sql,omitempty" tf:"sql,omitempty"` + + // The version of the SQL rules engine to use when evaluating the rule. + SQLVersion *string `json:"sqlVersion,omitempty" tf:"sql_version,omitempty"` + + Sns []TopicRuleSnsObservation `json:"sns,omitempty" tf:"sns,omitempty"` + + Sqs []TopicRuleSqsObservation `json:"sqs,omitempty" tf:"sqs,omitempty"` + + StepFunctions []TopicRuleStepFunctionsObservation `json:"stepFunctions,omitempty" tf:"step_functions,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + Timestream []TopicRuleTimestreamObservation `json:"timestream,omitempty" tf:"timestream,omitempty"` +} + +type TopicRuleParameters struct { + + // +kubebuilder:validation:Optional + CloudwatchAlarm []CloudwatchAlarmParameters `json:"cloudwatchAlarm,omitempty" tf:"cloudwatch_alarm,omitempty"` + + // +kubebuilder:validation:Optional + CloudwatchLogs []CloudwatchLogsParameters `json:"cloudwatchLogs,omitempty" tf:"cloudwatch_logs,omitempty"` + + // +kubebuilder:validation:Optional + CloudwatchMetric []CloudwatchMetricParameters `json:"cloudwatchMetric,omitempty" tf:"cloudwatch_metric,omitempty"` + + // The description of the rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // +kubebuilder:validation:Optional + Dynamodb []DynamodbParameters `json:"dynamodb,omitempty" tf:"dynamodb,omitempty"` + + // +kubebuilder:validation:Optional + Dynamodbv2 []Dynamodbv2Parameters `json:"dynamodbv2,omitempty" tf:"dynamodbv2,omitempty"` + + // +kubebuilder:validation:Optional + Elasticsearch []ElasticsearchParameters `json:"elasticsearch,omitempty" tf:"elasticsearch,omitempty"` + + // Specifies whether the rule is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Configuration block with error action to be associated with the rule. See the documentation for cloudwatch_alarm, cloudwatch_logs, cloudwatch_metric, dynamodb, dynamodbv2, elasticsearch, firehose, http, iot_analytics, iot_events, kafka, kinesis, lambda, republish, s3, sns, sqs, step_functions, timestream configuration blocks for further configuration details. + // +kubebuilder:validation:Optional + ErrorAction *ErrorActionParameters `json:"errorAction,omitempty" tf:"error_action,omitempty"` + + // +kubebuilder:validation:Optional + Firehose []TopicRuleFirehoseParameters `json:"firehose,omitempty" tf:"firehose,omitempty"` + + // +kubebuilder:validation:Optional + HTTP []TopicRuleHTTPParameters `json:"http,omitempty" tf:"http,omitempty"` + + // +kubebuilder:validation:Optional + IotAnalytics []TopicRuleIotAnalyticsParameters `json:"iotAnalytics,omitempty" tf:"iot_analytics,omitempty"` + + // +kubebuilder:validation:Optional + IotEvents []TopicRuleIotEventsParameters `json:"iotEvents,omitempty" tf:"iot_events,omitempty"` + + // +kubebuilder:validation:Optional + Kafka []TopicRuleKafkaParameters `json:"kafka,omitempty" tf:"kafka,omitempty"` + + // +kubebuilder:validation:Optional + Kinesis []TopicRuleKinesisParameters `json:"kinesis,omitempty" tf:"kinesis,omitempty"` + + // +kubebuilder:validation:Optional + Lambda []TopicRuleLambdaParameters `json:"lambda,omitempty" tf:"lambda,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // +kubebuilder:validation:Optional + Republish []TopicRuleRepublishParameters `json:"republish,omitempty" tf:"republish,omitempty"` + + // +kubebuilder:validation:Optional + S3 []TopicRuleS3Parameters `json:"s3,omitempty" tf:"s3,omitempty"` + + // The SQL statement used to query the topic. For more information, see AWS IoT SQL Reference (http://docs.aws.amazon.com/iot/latest/developerguide/iot-rules.html#aws-iot-sql-reference) in the AWS IoT Developer Guide. + // +kubebuilder:validation:Optional + SQL *string `json:"sql,omitempty" tf:"sql,omitempty"` + + // The version of the SQL rules engine to use when evaluating the rule. + // +kubebuilder:validation:Optional + SQLVersion *string `json:"sqlVersion,omitempty" tf:"sql_version,omitempty"` + + // +kubebuilder:validation:Optional + Sns []TopicRuleSnsParameters `json:"sns,omitempty" tf:"sns,omitempty"` + + // +kubebuilder:validation:Optional + Sqs []TopicRuleSqsParameters `json:"sqs,omitempty" tf:"sqs,omitempty"` + + // +kubebuilder:validation:Optional + StepFunctions []TopicRuleStepFunctionsParameters `json:"stepFunctions,omitempty" tf:"step_functions,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +kubebuilder:validation:Optional + Timestream []TopicRuleTimestreamParameters `json:"timestream,omitempty" tf:"timestream,omitempty"` +} + +type TopicRuleRepublishInitParameters struct { + + // The Quality of Service (QoS) level to use when republishing messages. Valid values are 0 or 1. The default value is 0. + Qos *float64 `json:"qos,omitempty" tf:"qos,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The Kafka topic for messages to be sent to the Kafka broker. + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type TopicRuleRepublishObservation struct { + + // The Quality of Service (QoS) level to use when republishing messages. Valid values are 0 or 1. The default value is 0. + Qos *float64 `json:"qos,omitempty" tf:"qos,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The Kafka topic for messages to be sent to the Kafka broker. + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type TopicRuleRepublishParameters struct { + + // The Quality of Service (QoS) level to use when republishing messages. Valid values are 0 or 1. The default value is 0. + // +kubebuilder:validation:Optional + Qos *float64 `json:"qos,omitempty" tf:"qos,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // The Kafka topic for messages to be sent to the Kafka broker. + // +kubebuilder:validation:Optional + Topic *string `json:"topic" tf:"topic,omitempty"` +} + +type TopicRuleS3InitParameters struct { + + // The Amazon S3 bucket name. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // The Amazon S3 canned ACL that controls access to the object identified by the object key. Valid values. + CannedACL *string `json:"cannedAcl,omitempty" tf:"canned_acl,omitempty"` + + // The name of the HTTP header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type TopicRuleS3Observation struct { + + // The Amazon S3 bucket name. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // The Amazon S3 canned ACL that controls access to the object identified by the object key. Valid values. + CannedACL *string `json:"cannedAcl,omitempty" tf:"canned_acl,omitempty"` + + // The name of the HTTP header. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type TopicRuleS3Parameters struct { + + // The Amazon S3 bucket name. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName" tf:"bucket_name,omitempty"` + + // The Amazon S3 canned ACL that controls access to the object identified by the object key. Valid values. + // +kubebuilder:validation:Optional + CannedACL *string `json:"cannedAcl,omitempty" tf:"canned_acl,omitempty"` + + // The name of the HTTP header. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type TopicRuleSnsInitParameters struct { + + // The message format of the message to publish. Accepted values are "JSON" and "RAW". + MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The ARN of the SNS topic. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + TargetArn *string `json:"targetArn,omitempty" tf:"target_arn,omitempty"` + + // Reference to a Topic in sns to populate targetArn. + // +kubebuilder:validation:Optional + TargetArnRef *v1.Reference `json:"targetArnRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate targetArn. + // +kubebuilder:validation:Optional + TargetArnSelector *v1.Selector `json:"targetArnSelector,omitempty" tf:"-"` +} + +type TopicRuleSnsObservation struct { + + // The message format of the message to publish. Accepted values are "JSON" and "RAW". + MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The ARN of the SNS topic. + TargetArn *string `json:"targetArn,omitempty" tf:"target_arn,omitempty"` +} + +type TopicRuleSnsParameters struct { + + // The message format of the message to publish. Accepted values are "JSON" and "RAW". + // +kubebuilder:validation:Optional + MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The ARN of the SNS topic. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + TargetArn *string `json:"targetArn,omitempty" tf:"target_arn,omitempty"` + + // Reference to a Topic in sns to populate targetArn. + // +kubebuilder:validation:Optional + TargetArnRef *v1.Reference `json:"targetArnRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate targetArn. + // +kubebuilder:validation:Optional + TargetArnSelector *v1.Selector `json:"targetArnSelector,omitempty" tf:"-"` +} + +type TopicRuleSqsInitParameters struct { + + // The URL of the Amazon SQS queue. + QueueURL *string `json:"queueUrl,omitempty" tf:"queue_url,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Specifies whether to use Base64 encoding. + UseBase64 *bool `json:"useBase64,omitempty" tf:"use_base64,omitempty"` +} + +type TopicRuleSqsObservation struct { + + // The URL of the Amazon SQS queue. + QueueURL *string `json:"queueUrl,omitempty" tf:"queue_url,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Specifies whether to use Base64 encoding. + UseBase64 *bool `json:"useBase64,omitempty" tf:"use_base64,omitempty"` +} + +type TopicRuleSqsParameters struct { + + // The URL of the Amazon SQS queue. + // +kubebuilder:validation:Optional + QueueURL *string `json:"queueUrl" tf:"queue_url,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // Specifies whether to use Base64 encoding. + // +kubebuilder:validation:Optional + UseBase64 *bool `json:"useBase64" tf:"use_base64,omitempty"` +} + +type TopicRuleStepFunctionsInitParameters struct { + + // The prefix used to generate, along with a UUID, the unique state machine execution name. + ExecutionNamePrefix *string `json:"executionNamePrefix,omitempty" tf:"execution_name_prefix,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the Step Functions state machine whose execution will be started. + StateMachineName *string `json:"stateMachineName,omitempty" tf:"state_machine_name,omitempty"` +} + +type TopicRuleStepFunctionsObservation struct { + + // The prefix used to generate, along with a UUID, the unique state machine execution name. + ExecutionNamePrefix *string `json:"executionNamePrefix,omitempty" tf:"execution_name_prefix,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the Step Functions state machine whose execution will be started. + StateMachineName *string `json:"stateMachineName,omitempty" tf:"state_machine_name,omitempty"` +} + +type TopicRuleStepFunctionsParameters struct { + + // The prefix used to generate, along with a UUID, the unique state machine execution name. + // +kubebuilder:validation:Optional + ExecutionNamePrefix *string `json:"executionNamePrefix,omitempty" tf:"execution_name_prefix,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // The name of the Step Functions state machine whose execution will be started. + // +kubebuilder:validation:Optional + StateMachineName *string `json:"stateMachineName" tf:"state_machine_name,omitempty"` +} + +type TopicRuleTimestreamInitParameters struct { + + // The name of an Amazon Timestream database. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Configuration blocks with metadata attributes of the time series that are written in each measure record. Nested arguments below. + Dimension []TimestreamDimensionInitParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the DynamoDB table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` + + // Configuration block specifying an application-defined value to replace the default value assigned to the Timestream record's timestamp in the time column. Nested arguments below. + Timestamp *TimestreamTimestampInitParameters `json:"timestamp,omitempty" tf:"timestamp,omitempty"` +} + +type TopicRuleTimestreamObservation struct { + + // The name of an Amazon Timestream database. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Configuration blocks with metadata attributes of the time series that are written in each measure record. Nested arguments below. + Dimension []TimestreamDimensionObservation `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name of the DynamoDB table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` + + // Configuration block specifying an application-defined value to replace the default value assigned to the Timestream record's timestamp in the time column. Nested arguments below. + Timestamp *TimestreamTimestampObservation `json:"timestamp,omitempty" tf:"timestamp,omitempty"` +} + +type TopicRuleTimestreamParameters struct { + + // The name of an Amazon Timestream database. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // Configuration blocks with metadata attributes of the time series that are written in each measure record. Nested arguments below. + // +kubebuilder:validation:Optional + Dimension []TimestreamDimensionParameters `json:"dimension" tf:"dimension,omitempty"` + + // The IAM role ARN that allows access to the CloudWatch alarm. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // The name of the DynamoDB table. + // +kubebuilder:validation:Optional + TableName *string `json:"tableName" tf:"table_name,omitempty"` + + // Configuration block specifying an application-defined value to replace the default value assigned to the Timestream record's timestamp in the time column. Nested arguments below. + // +kubebuilder:validation:Optional + Timestamp *TimestreamTimestampParameters `json:"timestamp,omitempty" tf:"timestamp,omitempty"` +} + +// TopicRuleSpec defines the desired state of TopicRule +type TopicRuleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TopicRuleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TopicRuleInitParameters `json:"initProvider,omitempty"` +} + +// TopicRuleStatus defines the observed state of TopicRule. +type TopicRuleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TopicRuleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// TopicRule is the Schema for the TopicRules API. Creates and manages an AWS IoT topic rule +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type TopicRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.enabled) || (has(self.initProvider) && has(self.initProvider.enabled))",message="spec.forProvider.enabled is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sql) || (has(self.initProvider) && has(self.initProvider.sql))",message="spec.forProvider.sql is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sqlVersion) || (has(self.initProvider) && has(self.initProvider.sqlVersion))",message="spec.forProvider.sqlVersion is a required parameter" + Spec TopicRuleSpec `json:"spec"` + Status TopicRuleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TopicRuleList contains a list of TopicRules +type TopicRuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TopicRule `json:"items"` +} + +// Repository type metadata. +var ( + TopicRule_Kind = "TopicRule" + TopicRule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: TopicRule_Kind}.String() + TopicRule_KindAPIVersion = TopicRule_Kind + "." + CRDGroupVersion.String() + TopicRule_GroupVersionKind = CRDGroupVersion.WithKind(TopicRule_Kind) +) + +func init() { + SchemeBuilder.Register(&TopicRule{}, &TopicRuleList{}) +} diff --git a/apis/iot/v1beta2/zz_topicruledestination_terraformed.go b/apis/iot/v1beta2/zz_topicruledestination_terraformed.go new file mode 100755 index 0000000000..b4de261f7a --- /dev/null +++ b/apis/iot/v1beta2/zz_topicruledestination_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this TopicRuleDestination +func (mg *TopicRuleDestination) GetTerraformResourceType() string { + return "aws_iot_topic_rule_destination" +} + +// GetConnectionDetailsMapping for this TopicRuleDestination +func (tr *TopicRuleDestination) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this TopicRuleDestination +func (tr *TopicRuleDestination) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this TopicRuleDestination +func (tr *TopicRuleDestination) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this TopicRuleDestination +func (tr *TopicRuleDestination) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this TopicRuleDestination +func (tr *TopicRuleDestination) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this TopicRuleDestination +func (tr *TopicRuleDestination) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this TopicRuleDestination +func (tr *TopicRuleDestination) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this TopicRuleDestination +func (tr *TopicRuleDestination) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this TopicRuleDestination using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *TopicRuleDestination) LateInitialize(attrs []byte) (bool, error) { + params := &TopicRuleDestinationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *TopicRuleDestination) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/iot/v1beta2/zz_topicruledestination_types.go b/apis/iot/v1beta2/zz_topicruledestination_types.go new file mode 100755 index 0000000000..7a32c6d432 --- /dev/null +++ b/apis/iot/v1beta2/zz_topicruledestination_types.go @@ -0,0 +1,253 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type TopicRuleDestinationInitParameters struct { + + // Whether or not to enable the destination. Default: true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Configuration of the virtual private cloud (VPC) connection. For more info, see the AWS documentation. + VPCConfiguration *VPCConfigurationInitParameters `json:"vpcConfiguration,omitempty" tf:"vpc_configuration,omitempty"` +} + +type TopicRuleDestinationObservation struct { + + // The ARN of the topic rule destination + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Whether or not to enable the destination. Default: true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration of the virtual private cloud (VPC) connection. For more info, see the AWS documentation. + VPCConfiguration *VPCConfigurationObservation `json:"vpcConfiguration,omitempty" tf:"vpc_configuration,omitempty"` +} + +type TopicRuleDestinationParameters struct { + + // Whether or not to enable the destination. Default: true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration of the virtual private cloud (VPC) connection. For more info, see the AWS documentation. + // +kubebuilder:validation:Optional + VPCConfiguration *VPCConfigurationParameters `json:"vpcConfiguration,omitempty" tf:"vpc_configuration,omitempty"` +} + +type VPCConfigurationInitParameters struct { + + // The ARN of a role that has permission to create and attach to elastic network interfaces (ENIs). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // References to SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupRefs []v1.Reference `json:"securityGroupRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupSelector *v1.Selector `json:"securityGroupSelector,omitempty" tf:"-"` + + // The security groups of the VPC destination. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupSelector + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The subnet IDs of the VPC destination. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // The ID of the VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type VPCConfigurationObservation struct { + + // The ARN of a role that has permission to create and attach to elastic network interfaces (ENIs). + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The security groups of the VPC destination. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The subnet IDs of the VPC destination. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // The ID of the VPC. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type VPCConfigurationParameters struct { + + // The ARN of a role that has permission to create and attach to elastic network interfaces (ENIs). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // References to SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupRefs []v1.Reference `json:"securityGroupRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupSelector *v1.Selector `json:"securityGroupSelector,omitempty" tf:"-"` + + // The security groups of the VPC destination. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupSelector + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The subnet IDs of the VPC destination. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // The ID of the VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +// TopicRuleDestinationSpec defines the desired state of TopicRuleDestination +type TopicRuleDestinationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TopicRuleDestinationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TopicRuleDestinationInitParameters `json:"initProvider,omitempty"` +} + +// TopicRuleDestinationStatus defines the observed state of TopicRuleDestination. +type TopicRuleDestinationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TopicRuleDestinationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// TopicRuleDestination is the Schema for the TopicRuleDestinations API. Creates and manages an AWS IoT topic rule destination +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type TopicRuleDestination struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.vpcConfiguration) || (has(self.initProvider) && has(self.initProvider.vpcConfiguration))",message="spec.forProvider.vpcConfiguration is a required parameter" + Spec TopicRuleDestinationSpec `json:"spec"` + Status TopicRuleDestinationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TopicRuleDestinationList contains a list of TopicRuleDestinations +type TopicRuleDestinationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TopicRuleDestination `json:"items"` +} + +// Repository type metadata. +var ( + TopicRuleDestination_Kind = "TopicRuleDestination" + TopicRuleDestination_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: TopicRuleDestination_Kind}.String() + TopicRuleDestination_KindAPIVersion = TopicRuleDestination_Kind + "." + CRDGroupVersion.String() + TopicRuleDestination_GroupVersionKind = CRDGroupVersion.WithKind(TopicRuleDestination_Kind) +) + +func init() { + SchemeBuilder.Register(&TopicRuleDestination{}, &TopicRuleDestinationList{}) +} diff --git a/apis/ivs/v1beta1/zz_generated.conversion_hubs.go b/apis/ivs/v1beta1/zz_generated.conversion_hubs.go index ec6834e943..80b5da0840 100755 --- a/apis/ivs/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/ivs/v1beta1/zz_generated.conversion_hubs.go @@ -8,6 +8,3 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *Channel) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *RecordingConfiguration) Hub() {} diff --git a/apis/ivs/v1beta1/zz_generated.conversion_spokes.go b/apis/ivs/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..113ae236e5 --- /dev/null +++ b/apis/ivs/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this RecordingConfiguration to the hub type. +func (tr *RecordingConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the RecordingConfiguration type. +func (tr *RecordingConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/ivs/v1beta2/zz_generated.conversion_hubs.go b/apis/ivs/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..9371927605 --- /dev/null +++ b/apis/ivs/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *RecordingConfiguration) Hub() {} diff --git a/apis/ivs/v1beta2/zz_generated.deepcopy.go b/apis/ivs/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..b8e560e9d4 --- /dev/null +++ b/apis/ivs/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,491 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationConfigurationInitParameters) DeepCopyInto(out *DestinationConfigurationInitParameters) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3InitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationConfigurationInitParameters. +func (in *DestinationConfigurationInitParameters) DeepCopy() *DestinationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(DestinationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationConfigurationObservation) DeepCopyInto(out *DestinationConfigurationObservation) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Observation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationConfigurationObservation. +func (in *DestinationConfigurationObservation) DeepCopy() *DestinationConfigurationObservation { + if in == nil { + return nil + } + out := new(DestinationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationConfigurationParameters) DeepCopyInto(out *DestinationConfigurationParameters) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Parameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationConfigurationParameters. +func (in *DestinationConfigurationParameters) DeepCopy() *DestinationConfigurationParameters { + if in == nil { + return nil + } + out := new(DestinationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingConfiguration) DeepCopyInto(out *RecordingConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingConfiguration. +func (in *RecordingConfiguration) DeepCopy() *RecordingConfiguration { + if in == nil { + return nil + } + out := new(RecordingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RecordingConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingConfigurationInitParameters) DeepCopyInto(out *RecordingConfigurationInitParameters) { + *out = *in + if in.DestinationConfiguration != nil { + in, out := &in.DestinationConfiguration, &out.DestinationConfiguration + *out = new(DestinationConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RecordingReconnectWindowSeconds != nil { + in, out := &in.RecordingReconnectWindowSeconds, &out.RecordingReconnectWindowSeconds + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThumbnailConfiguration != nil { + in, out := &in.ThumbnailConfiguration, &out.ThumbnailConfiguration + *out = new(ThumbnailConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingConfigurationInitParameters. +func (in *RecordingConfigurationInitParameters) DeepCopy() *RecordingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RecordingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingConfigurationList) DeepCopyInto(out *RecordingConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RecordingConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingConfigurationList. +func (in *RecordingConfigurationList) DeepCopy() *RecordingConfigurationList { + if in == nil { + return nil + } + out := new(RecordingConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RecordingConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingConfigurationObservation) DeepCopyInto(out *RecordingConfigurationObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DestinationConfiguration != nil { + in, out := &in.DestinationConfiguration, &out.DestinationConfiguration + *out = new(DestinationConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RecordingReconnectWindowSeconds != nil { + in, out := &in.RecordingReconnectWindowSeconds, &out.RecordingReconnectWindowSeconds + *out = new(float64) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThumbnailConfiguration != nil { + in, out := &in.ThumbnailConfiguration, &out.ThumbnailConfiguration + *out = new(ThumbnailConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingConfigurationObservation. +func (in *RecordingConfigurationObservation) DeepCopy() *RecordingConfigurationObservation { + if in == nil { + return nil + } + out := new(RecordingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingConfigurationParameters) DeepCopyInto(out *RecordingConfigurationParameters) { + *out = *in + if in.DestinationConfiguration != nil { + in, out := &in.DestinationConfiguration, &out.DestinationConfiguration + *out = new(DestinationConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RecordingReconnectWindowSeconds != nil { + in, out := &in.RecordingReconnectWindowSeconds, &out.RecordingReconnectWindowSeconds + *out = new(float64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThumbnailConfiguration != nil { + in, out := &in.ThumbnailConfiguration, &out.ThumbnailConfiguration + *out = new(ThumbnailConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingConfigurationParameters. +func (in *RecordingConfigurationParameters) DeepCopy() *RecordingConfigurationParameters { + if in == nil { + return nil + } + out := new(RecordingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingConfigurationSpec) DeepCopyInto(out *RecordingConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingConfigurationSpec. +func (in *RecordingConfigurationSpec) DeepCopy() *RecordingConfigurationSpec { + if in == nil { + return nil + } + out := new(RecordingConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordingConfigurationStatus) DeepCopyInto(out *RecordingConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordingConfigurationStatus. +func (in *RecordingConfigurationStatus) DeepCopy() *RecordingConfigurationStatus { + if in == nil { + return nil + } + out := new(RecordingConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InitParameters) DeepCopyInto(out *S3InitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InitParameters. +func (in *S3InitParameters) DeepCopy() *S3InitParameters { + if in == nil { + return nil + } + out := new(S3InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Observation) DeepCopyInto(out *S3Observation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Observation. +func (in *S3Observation) DeepCopy() *S3Observation { + if in == nil { + return nil + } + out := new(S3Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Parameters) DeepCopyInto(out *S3Parameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Parameters. +func (in *S3Parameters) DeepCopy() *S3Parameters { + if in == nil { + return nil + } + out := new(S3Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThumbnailConfigurationInitParameters) DeepCopyInto(out *ThumbnailConfigurationInitParameters) { + *out = *in + if in.RecordingMode != nil { + in, out := &in.RecordingMode, &out.RecordingMode + *out = new(string) + **out = **in + } + if in.TargetIntervalSeconds != nil { + in, out := &in.TargetIntervalSeconds, &out.TargetIntervalSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThumbnailConfigurationInitParameters. +func (in *ThumbnailConfigurationInitParameters) DeepCopy() *ThumbnailConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ThumbnailConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThumbnailConfigurationObservation) DeepCopyInto(out *ThumbnailConfigurationObservation) { + *out = *in + if in.RecordingMode != nil { + in, out := &in.RecordingMode, &out.RecordingMode + *out = new(string) + **out = **in + } + if in.TargetIntervalSeconds != nil { + in, out := &in.TargetIntervalSeconds, &out.TargetIntervalSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThumbnailConfigurationObservation. +func (in *ThumbnailConfigurationObservation) DeepCopy() *ThumbnailConfigurationObservation { + if in == nil { + return nil + } + out := new(ThumbnailConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThumbnailConfigurationParameters) DeepCopyInto(out *ThumbnailConfigurationParameters) { + *out = *in + if in.RecordingMode != nil { + in, out := &in.RecordingMode, &out.RecordingMode + *out = new(string) + **out = **in + } + if in.TargetIntervalSeconds != nil { + in, out := &in.TargetIntervalSeconds, &out.TargetIntervalSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThumbnailConfigurationParameters. +func (in *ThumbnailConfigurationParameters) DeepCopy() *ThumbnailConfigurationParameters { + if in == nil { + return nil + } + out := new(ThumbnailConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/ivs/v1beta2/zz_generated.managed.go b/apis/ivs/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..7832dedb79 --- /dev/null +++ b/apis/ivs/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this RecordingConfiguration. +func (mg *RecordingConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this RecordingConfiguration. +func (mg *RecordingConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this RecordingConfiguration. +func (mg *RecordingConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this RecordingConfiguration. +func (mg *RecordingConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this RecordingConfiguration. +func (mg *RecordingConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this RecordingConfiguration. +func (mg *RecordingConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this RecordingConfiguration. +func (mg *RecordingConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this RecordingConfiguration. +func (mg *RecordingConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this RecordingConfiguration. +func (mg *RecordingConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this RecordingConfiguration. +func (mg *RecordingConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this RecordingConfiguration. +func (mg *RecordingConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this RecordingConfiguration. +func (mg *RecordingConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/ivs/v1beta2/zz_generated.managedlist.go b/apis/ivs/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..9f229b2016 --- /dev/null +++ b/apis/ivs/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this RecordingConfigurationList. +func (l *RecordingConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/ivs/v1beta2/zz_groupversion_info.go b/apis/ivs/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..cf780caf8e --- /dev/null +++ b/apis/ivs/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=ivs.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "ivs.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/ivs/v1beta2/zz_recordingconfiguration_terraformed.go b/apis/ivs/v1beta2/zz_recordingconfiguration_terraformed.go new file mode 100755 index 0000000000..b1630703c4 --- /dev/null +++ b/apis/ivs/v1beta2/zz_recordingconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this RecordingConfiguration +func (mg *RecordingConfiguration) GetTerraformResourceType() string { + return "aws_ivs_recording_configuration" +} + +// GetConnectionDetailsMapping for this RecordingConfiguration +func (tr *RecordingConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this RecordingConfiguration +func (tr *RecordingConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this RecordingConfiguration +func (tr *RecordingConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this RecordingConfiguration +func (tr *RecordingConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this RecordingConfiguration +func (tr *RecordingConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this RecordingConfiguration +func (tr *RecordingConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this RecordingConfiguration +func (tr *RecordingConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this RecordingConfiguration +func (tr *RecordingConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this RecordingConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *RecordingConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &RecordingConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *RecordingConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ivs/v1beta2/zz_recordingconfiguration_types.go b/apis/ivs/v1beta2/zz_recordingconfiguration_types.go new file mode 100755 index 0000000000..bba793b45b --- /dev/null +++ b/apis/ivs/v1beta2/zz_recordingconfiguration_types.go @@ -0,0 +1,221 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DestinationConfigurationInitParameters struct { + + // S3 destination configuration where recorded videos will be stored. + S3 *S3InitParameters `json:"s3,omitempty" tf:"s3,omitempty"` +} + +type DestinationConfigurationObservation struct { + + // S3 destination configuration where recorded videos will be stored. + S3 *S3Observation `json:"s3,omitempty" tf:"s3,omitempty"` +} + +type DestinationConfigurationParameters struct { + + // S3 destination configuration where recorded videos will be stored. + // +kubebuilder:validation:Optional + S3 *S3Parameters `json:"s3" tf:"s3,omitempty"` +} + +type RecordingConfigurationInitParameters struct { + + // Object containing destination configuration for where recorded video will be stored. + DestinationConfiguration *DestinationConfigurationInitParameters `json:"destinationConfiguration,omitempty" tf:"destination_configuration,omitempty"` + + // Recording Configuration name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // If a broadcast disconnects and then reconnects within the specified interval, the multiple streams will be considered a single broadcast and merged together. + RecordingReconnectWindowSeconds *float64 `json:"recordingReconnectWindowSeconds,omitempty" tf:"recording_reconnect_window_seconds,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Object containing information to enable/disable the recording of thumbnails for a live session and modify the interval at which thumbnails are generated for the live session. + ThumbnailConfiguration *ThumbnailConfigurationInitParameters `json:"thumbnailConfiguration,omitempty" tf:"thumbnail_configuration,omitempty"` +} + +type RecordingConfigurationObservation struct { + + // ARN of the Recording Configuration. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Object containing destination configuration for where recorded video will be stored. + DestinationConfiguration *DestinationConfigurationObservation `json:"destinationConfiguration,omitempty" tf:"destination_configuration,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Recording Configuration name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // If a broadcast disconnects and then reconnects within the specified interval, the multiple streams will be considered a single broadcast and merged together. + RecordingReconnectWindowSeconds *float64 `json:"recordingReconnectWindowSeconds,omitempty" tf:"recording_reconnect_window_seconds,omitempty"` + + // The current state of the Recording Configuration. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Object containing information to enable/disable the recording of thumbnails for a live session and modify the interval at which thumbnails are generated for the live session. + ThumbnailConfiguration *ThumbnailConfigurationObservation `json:"thumbnailConfiguration,omitempty" tf:"thumbnail_configuration,omitempty"` +} + +type RecordingConfigurationParameters struct { + + // Object containing destination configuration for where recorded video will be stored. + // +kubebuilder:validation:Optional + DestinationConfiguration *DestinationConfigurationParameters `json:"destinationConfiguration,omitempty" tf:"destination_configuration,omitempty"` + + // Recording Configuration name. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // If a broadcast disconnects and then reconnects within the specified interval, the multiple streams will be considered a single broadcast and merged together. + // +kubebuilder:validation:Optional + RecordingReconnectWindowSeconds *float64 `json:"recordingReconnectWindowSeconds,omitempty" tf:"recording_reconnect_window_seconds,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Object containing information to enable/disable the recording of thumbnails for a live session and modify the interval at which thumbnails are generated for the live session. + // +kubebuilder:validation:Optional + ThumbnailConfiguration *ThumbnailConfigurationParameters `json:"thumbnailConfiguration,omitempty" tf:"thumbnail_configuration,omitempty"` +} + +type S3InitParameters struct { + + // S3 bucket name where recorded videos will be stored. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` +} + +type S3Observation struct { + + // S3 bucket name where recorded videos will be stored. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` +} + +type S3Parameters struct { + + // S3 bucket name where recorded videos will be stored. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName" tf:"bucket_name,omitempty"` +} + +type ThumbnailConfigurationInitParameters struct { + + // Thumbnail recording mode. Valid values: DISABLED, INTERVAL. + RecordingMode *string `json:"recordingMode,omitempty" tf:"recording_mode,omitempty"` + + // The targeted thumbnail-generation interval in seconds. + TargetIntervalSeconds *float64 `json:"targetIntervalSeconds,omitempty" tf:"target_interval_seconds,omitempty"` +} + +type ThumbnailConfigurationObservation struct { + + // Thumbnail recording mode. Valid values: DISABLED, INTERVAL. + RecordingMode *string `json:"recordingMode,omitempty" tf:"recording_mode,omitempty"` + + // The targeted thumbnail-generation interval in seconds. + TargetIntervalSeconds *float64 `json:"targetIntervalSeconds,omitempty" tf:"target_interval_seconds,omitempty"` +} + +type ThumbnailConfigurationParameters struct { + + // Thumbnail recording mode. Valid values: DISABLED, INTERVAL. + // +kubebuilder:validation:Optional + RecordingMode *string `json:"recordingMode,omitempty" tf:"recording_mode,omitempty"` + + // The targeted thumbnail-generation interval in seconds. + // +kubebuilder:validation:Optional + TargetIntervalSeconds *float64 `json:"targetIntervalSeconds,omitempty" tf:"target_interval_seconds,omitempty"` +} + +// RecordingConfigurationSpec defines the desired state of RecordingConfiguration +type RecordingConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RecordingConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RecordingConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// RecordingConfigurationStatus defines the observed state of RecordingConfiguration. +type RecordingConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RecordingConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// RecordingConfiguration is the Schema for the RecordingConfigurations API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type RecordingConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.destinationConfiguration) || (has(self.initProvider) && has(self.initProvider.destinationConfiguration))",message="spec.forProvider.destinationConfiguration is a required parameter" + Spec RecordingConfigurationSpec `json:"spec"` + Status RecordingConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RecordingConfigurationList contains a list of RecordingConfigurations +type RecordingConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RecordingConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + RecordingConfiguration_Kind = "RecordingConfiguration" + RecordingConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: RecordingConfiguration_Kind}.String() + RecordingConfiguration_KindAPIVersion = RecordingConfiguration_Kind + "." + CRDGroupVersion.String() + RecordingConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(RecordingConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&RecordingConfiguration{}, &RecordingConfigurationList{}) +} diff --git a/apis/kafka/v1beta1/zz_generated.conversion_hubs.go b/apis/kafka/v1beta1/zz_generated.conversion_hubs.go index aed42ea81a..0ba39b03f1 100755 --- a/apis/kafka/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/kafka/v1beta1/zz_generated.conversion_hubs.go @@ -11,6 +11,3 @@ func (tr *Configuration) Hub() {} // Hub marks this type as a conversion hub. func (tr *ScramSecretAssociation) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ServerlessCluster) Hub() {} diff --git a/apis/kafka/v1beta1/zz_generated.conversion_spokes.go b/apis/kafka/v1beta1/zz_generated.conversion_spokes.go index 49d5c675ce..1b49384ca1 100755 --- a/apis/kafka/v1beta1/zz_generated.conversion_spokes.go +++ b/apis/kafka/v1beta1/zz_generated.conversion_spokes.go @@ -32,3 +32,23 @@ func (tr *Cluster) ConvertFrom(srcRaw conversion.Hub) error { } return nil } + +// ConvertTo converts this ServerlessCluster to the hub type. +func (tr *ServerlessCluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ServerlessCluster type. +func (tr *ServerlessCluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/kafka/v1beta1/zz_generated.resolvers.go b/apis/kafka/v1beta1/zz_generated.resolvers.go index ac03af5ee7..8f2f6b5bfc 100644 --- a/apis/kafka/v1beta1/zz_generated.resolvers.go +++ b/apis/kafka/v1beta1/zz_generated.resolvers.go @@ -361,7 +361,7 @@ func (mg *ScramSecretAssociation) ResolveReferences(ctx context.Context, c clien var mrsp reference.MultiResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("kafka.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("kafka.aws.upbound.io", "v1beta3", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -399,7 +399,7 @@ func (mg *ScramSecretAssociation) ResolveReferences(ctx context.Context, c clien mg.Spec.ForProvider.SecretArnList = reference.ToPtrValues(mrsp.ResolvedValues) mg.Spec.ForProvider.SecretArnRefs = mrsp.ResolvedReferences { - m, l, err = apisresolver.GetManagedResource("kafka.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("kafka.aws.upbound.io", "v1beta3", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/kafka/v1beta1/zz_scramsecretassociation_types.go b/apis/kafka/v1beta1/zz_scramsecretassociation_types.go index 3ba83c643c..3b4f2bbdbd 100755 --- a/apis/kafka/v1beta1/zz_scramsecretassociation_types.go +++ b/apis/kafka/v1beta1/zz_scramsecretassociation_types.go @@ -16,7 +16,7 @@ import ( type ScramSecretAssociationInitParameters struct { // Amazon Resource Name (ARN) of the MSK cluster. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kafka/v1beta2.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kafka/v1beta3.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) ClusterArn *string `json:"clusterArn,omitempty" tf:"cluster_arn,omitempty"` @@ -60,7 +60,7 @@ type ScramSecretAssociationObservation struct { type ScramSecretAssociationParameters struct { // Amazon Resource Name (ARN) of the MSK cluster. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kafka/v1beta2.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kafka/v1beta3.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional ClusterArn *string `json:"clusterArn,omitempty" tf:"cluster_arn,omitempty"` diff --git a/apis/kafka/v1beta2/zz_generated.conversion_hubs.go b/apis/kafka/v1beta2/zz_generated.conversion_hubs.go index 5640ab69b4..4fe5e87229 100755 --- a/apis/kafka/v1beta2/zz_generated.conversion_hubs.go +++ b/apis/kafka/v1beta2/zz_generated.conversion_hubs.go @@ -7,4 +7,4 @@ package v1beta2 // Hub marks this type as a conversion hub. -func (tr *Cluster) Hub() {} +func (tr *ServerlessCluster) Hub() {} diff --git a/apis/kafka/v1beta2/zz_generated.conversion_spokes.go b/apis/kafka/v1beta2/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..dd3ba8f70d --- /dev/null +++ b/apis/kafka/v1beta2/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Cluster to the hub type. +func (tr *Cluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Cluster type. +func (tr *Cluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/kafka/v1beta2/zz_generated.deepcopy.go b/apis/kafka/v1beta2/zz_generated.deepcopy.go index e7cbb31e5d..94d9d708fc 100644 --- a/apis/kafka/v1beta2/zz_generated.deepcopy.go +++ b/apis/kafka/v1beta2/zz_generated.deepcopy.go @@ -1728,6 +1728,66 @@ func (in *FirehoseParameters) DeepCopy() *FirehoseParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMInitParameters) DeepCopyInto(out *IAMInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMInitParameters. +func (in *IAMInitParameters) DeepCopy() *IAMInitParameters { + if in == nil { + return nil + } + out := new(IAMInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMObservation) DeepCopyInto(out *IAMObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMObservation. +func (in *IAMObservation) DeepCopy() *IAMObservation { + if in == nil { + return nil + } + out := new(IAMObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMParameters) DeepCopyInto(out *IAMParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMParameters. +func (in *IAMParameters) DeepCopy() *IAMParameters { + if in == nil { + return nil + } + out := new(IAMParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *JmxExporterInitParameters) DeepCopyInto(out *JmxExporterInitParameters) { *out = *in @@ -2387,6 +2447,400 @@ func (in *SaslParameters) DeepCopy() *SaslParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessCluster) DeepCopyInto(out *ServerlessCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessCluster. +func (in *ServerlessCluster) DeepCopy() *ServerlessCluster { + if in == nil { + return nil + } + out := new(ServerlessCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServerlessCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessClusterClientAuthenticationInitParameters) DeepCopyInto(out *ServerlessClusterClientAuthenticationInitParameters) { + *out = *in + if in.Sasl != nil { + in, out := &in.Sasl, &out.Sasl + *out = new(ServerlessClusterClientAuthenticationSaslInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessClusterClientAuthenticationInitParameters. +func (in *ServerlessClusterClientAuthenticationInitParameters) DeepCopy() *ServerlessClusterClientAuthenticationInitParameters { + if in == nil { + return nil + } + out := new(ServerlessClusterClientAuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessClusterClientAuthenticationObservation) DeepCopyInto(out *ServerlessClusterClientAuthenticationObservation) { + *out = *in + if in.Sasl != nil { + in, out := &in.Sasl, &out.Sasl + *out = new(ServerlessClusterClientAuthenticationSaslObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessClusterClientAuthenticationObservation. +func (in *ServerlessClusterClientAuthenticationObservation) DeepCopy() *ServerlessClusterClientAuthenticationObservation { + if in == nil { + return nil + } + out := new(ServerlessClusterClientAuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessClusterClientAuthenticationParameters) DeepCopyInto(out *ServerlessClusterClientAuthenticationParameters) { + *out = *in + if in.Sasl != nil { + in, out := &in.Sasl, &out.Sasl + *out = new(ServerlessClusterClientAuthenticationSaslParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessClusterClientAuthenticationParameters. +func (in *ServerlessClusterClientAuthenticationParameters) DeepCopy() *ServerlessClusterClientAuthenticationParameters { + if in == nil { + return nil + } + out := new(ServerlessClusterClientAuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessClusterClientAuthenticationSaslInitParameters) DeepCopyInto(out *ServerlessClusterClientAuthenticationSaslInitParameters) { + *out = *in + if in.IAM != nil { + in, out := &in.IAM, &out.IAM + *out = new(IAMInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessClusterClientAuthenticationSaslInitParameters. +func (in *ServerlessClusterClientAuthenticationSaslInitParameters) DeepCopy() *ServerlessClusterClientAuthenticationSaslInitParameters { + if in == nil { + return nil + } + out := new(ServerlessClusterClientAuthenticationSaslInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessClusterClientAuthenticationSaslObservation) DeepCopyInto(out *ServerlessClusterClientAuthenticationSaslObservation) { + *out = *in + if in.IAM != nil { + in, out := &in.IAM, &out.IAM + *out = new(IAMObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessClusterClientAuthenticationSaslObservation. +func (in *ServerlessClusterClientAuthenticationSaslObservation) DeepCopy() *ServerlessClusterClientAuthenticationSaslObservation { + if in == nil { + return nil + } + out := new(ServerlessClusterClientAuthenticationSaslObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessClusterClientAuthenticationSaslParameters) DeepCopyInto(out *ServerlessClusterClientAuthenticationSaslParameters) { + *out = *in + if in.IAM != nil { + in, out := &in.IAM, &out.IAM + *out = new(IAMParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessClusterClientAuthenticationSaslParameters. +func (in *ServerlessClusterClientAuthenticationSaslParameters) DeepCopy() *ServerlessClusterClientAuthenticationSaslParameters { + if in == nil { + return nil + } + out := new(ServerlessClusterClientAuthenticationSaslParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessClusterInitParameters) DeepCopyInto(out *ServerlessClusterInitParameters) { + *out = *in + if in.ClientAuthentication != nil { + in, out := &in.ClientAuthentication, &out.ClientAuthentication + *out = new(ServerlessClusterClientAuthenticationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = make([]VPCConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessClusterInitParameters. +func (in *ServerlessClusterInitParameters) DeepCopy() *ServerlessClusterInitParameters { + if in == nil { + return nil + } + out := new(ServerlessClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessClusterList) DeepCopyInto(out *ServerlessClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServerlessCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessClusterList. +func (in *ServerlessClusterList) DeepCopy() *ServerlessClusterList { + if in == nil { + return nil + } + out := new(ServerlessClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServerlessClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessClusterObservation) DeepCopyInto(out *ServerlessClusterObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ClientAuthentication != nil { + in, out := &in.ClientAuthentication, &out.ClientAuthentication + *out = new(ServerlessClusterClientAuthenticationObservation) + (*in).DeepCopyInto(*out) + } + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.ClusterUUID != nil { + in, out := &in.ClusterUUID, &out.ClusterUUID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = make([]VPCConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessClusterObservation. +func (in *ServerlessClusterObservation) DeepCopy() *ServerlessClusterObservation { + if in == nil { + return nil + } + out := new(ServerlessClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessClusterParameters) DeepCopyInto(out *ServerlessClusterParameters) { + *out = *in + if in.ClientAuthentication != nil { + in, out := &in.ClientAuthentication, &out.ClientAuthentication + *out = new(ServerlessClusterClientAuthenticationParameters) + (*in).DeepCopyInto(*out) + } + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = make([]VPCConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessClusterParameters. +func (in *ServerlessClusterParameters) DeepCopy() *ServerlessClusterParameters { + if in == nil { + return nil + } + out := new(ServerlessClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessClusterSpec) DeepCopyInto(out *ServerlessClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessClusterSpec. +func (in *ServerlessClusterSpec) DeepCopy() *ServerlessClusterSpec { + if in == nil { + return nil + } + out := new(ServerlessClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessClusterStatus) DeepCopyInto(out *ServerlessClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessClusterStatus. +func (in *ServerlessClusterStatus) DeepCopy() *ServerlessClusterStatus { + if in == nil { + return nil + } + out := new(ServerlessClusterStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageInfoInitParameters) DeepCopyInto(out *StorageInfoInitParameters) { *out = *in @@ -2531,6 +2985,165 @@ func (in *TLSParameters) DeepCopy() *TLSParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigInitParameters) DeepCopyInto(out *VPCConfigInitParameters) { + *out = *in + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigInitParameters. +func (in *VPCConfigInitParameters) DeepCopy() *VPCConfigInitParameters { + if in == nil { + return nil + } + out := new(VPCConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigObservation) DeepCopyInto(out *VPCConfigObservation) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigObservation. +func (in *VPCConfigObservation) DeepCopy() *VPCConfigObservation { + if in == nil { + return nil + } + out := new(VPCConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigParameters) DeepCopyInto(out *VPCConfigParameters) { + *out = *in + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigParameters. +func (in *VPCConfigParameters) DeepCopy() *VPCConfigParameters { + if in == nil { + return nil + } + out := new(VPCConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VPCConnectivityInitParameters) DeepCopyInto(out *VPCConnectivityInitParameters) { *out = *in diff --git a/apis/kafka/v1beta2/zz_generated.managed.go b/apis/kafka/v1beta2/zz_generated.managed.go index 8a26829392..8afda12a8c 100644 --- a/apis/kafka/v1beta2/zz_generated.managed.go +++ b/apis/kafka/v1beta2/zz_generated.managed.go @@ -66,3 +66,63 @@ func (mg *Cluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetail func (mg *Cluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { mg.Spec.WriteConnectionSecretToReference = r } + +// GetCondition of this ServerlessCluster. +func (mg *ServerlessCluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ServerlessCluster. +func (mg *ServerlessCluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ServerlessCluster. +func (mg *ServerlessCluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ServerlessCluster. +func (mg *ServerlessCluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ServerlessCluster. +func (mg *ServerlessCluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ServerlessCluster. +func (mg *ServerlessCluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ServerlessCluster. +func (mg *ServerlessCluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ServerlessCluster. +func (mg *ServerlessCluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ServerlessCluster. +func (mg *ServerlessCluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ServerlessCluster. +func (mg *ServerlessCluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ServerlessCluster. +func (mg *ServerlessCluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ServerlessCluster. +func (mg *ServerlessCluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/kafka/v1beta2/zz_generated.managedlist.go b/apis/kafka/v1beta2/zz_generated.managedlist.go index a6628c8e29..3f075d662a 100644 --- a/apis/kafka/v1beta2/zz_generated.managedlist.go +++ b/apis/kafka/v1beta2/zz_generated.managedlist.go @@ -15,3 +15,12 @@ func (l *ClusterList) GetItems() []resource.Managed { } return items } + +// GetItems of this ServerlessClusterList. +func (l *ServerlessClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/kafka/v1beta2/zz_generated.resolvers.go b/apis/kafka/v1beta2/zz_generated.resolvers.go index 66cb04282e..f648b39f7b 100644 --- a/apis/kafka/v1beta2/zz_generated.resolvers.go +++ b/apis/kafka/v1beta2/zz_generated.resolvers.go @@ -350,3 +350,100 @@ func (mg *Cluster) ResolveReferences(ctx context.Context, c client.Reader) error return nil } + +// ResolveReferences of this ServerlessCluster. +func (mg *ServerlessCluster) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var mrsp reference.MultiResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.VPCConfig); i3++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCConfig[i3].SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCConfig[i3].SecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.VPCConfig[i3].SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCConfig[i3].SecurityGroupIds") + } + mg.Spec.ForProvider.VPCConfig[i3].SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCConfig[i3].SecurityGroupIDRefs = mrsp.ResolvedReferences + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.VPCConfig); i3++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCConfig[i3].SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCConfig[i3].SubnetIDRefs, + Selector: mg.Spec.ForProvider.VPCConfig[i3].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCConfig[i3].SubnetIds") + } + mg.Spec.ForProvider.VPCConfig[i3].SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCConfig[i3].SubnetIDRefs = mrsp.ResolvedReferences + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.VPCConfig); i3++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCConfig[i3].SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCConfig[i3].SecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.VPCConfig[i3].SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCConfig[i3].SecurityGroupIds") + } + mg.Spec.InitProvider.VPCConfig[i3].SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCConfig[i3].SecurityGroupIDRefs = mrsp.ResolvedReferences + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.VPCConfig); i3++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCConfig[i3].SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCConfig[i3].SubnetIDRefs, + Selector: mg.Spec.InitProvider.VPCConfig[i3].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCConfig[i3].SubnetIds") + } + mg.Spec.InitProvider.VPCConfig[i3].SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCConfig[i3].SubnetIDRefs = mrsp.ResolvedReferences + + } + + return nil +} diff --git a/apis/kafka/v1beta2/zz_serverlesscluster_terraformed.go b/apis/kafka/v1beta2/zz_serverlesscluster_terraformed.go new file mode 100755 index 0000000000..45468f96d7 --- /dev/null +++ b/apis/kafka/v1beta2/zz_serverlesscluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ServerlessCluster +func (mg *ServerlessCluster) GetTerraformResourceType() string { + return "aws_msk_serverless_cluster" +} + +// GetConnectionDetailsMapping for this ServerlessCluster +func (tr *ServerlessCluster) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ServerlessCluster +func (tr *ServerlessCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ServerlessCluster +func (tr *ServerlessCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ServerlessCluster +func (tr *ServerlessCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ServerlessCluster +func (tr *ServerlessCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ServerlessCluster +func (tr *ServerlessCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ServerlessCluster +func (tr *ServerlessCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ServerlessCluster +func (tr *ServerlessCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ServerlessCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ServerlessCluster) LateInitialize(attrs []byte) (bool, error) { + params := &ServerlessClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ServerlessCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kafka/v1beta2/zz_serverlesscluster_types.go b/apis/kafka/v1beta2/zz_serverlesscluster_types.go new file mode 100755 index 0000000000..18ff2879fe --- /dev/null +++ b/apis/kafka/v1beta2/zz_serverlesscluster_types.go @@ -0,0 +1,282 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IAMInitParameters struct { + + // Whether SASL/IAM authentication is enabled or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type IAMObservation struct { + + // Whether SASL/IAM authentication is enabled or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type IAMParameters struct { + + // Whether SASL/IAM authentication is enabled or not. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + +type ServerlessClusterClientAuthenticationInitParameters struct { + + // Details for client authentication using SASL. See below. + Sasl *ServerlessClusterClientAuthenticationSaslInitParameters `json:"sasl,omitempty" tf:"sasl,omitempty"` +} + +type ServerlessClusterClientAuthenticationObservation struct { + + // Details for client authentication using SASL. See below. + Sasl *ServerlessClusterClientAuthenticationSaslObservation `json:"sasl,omitempty" tf:"sasl,omitempty"` +} + +type ServerlessClusterClientAuthenticationParameters struct { + + // Details for client authentication using SASL. See below. + // +kubebuilder:validation:Optional + Sasl *ServerlessClusterClientAuthenticationSaslParameters `json:"sasl" tf:"sasl,omitempty"` +} + +type ServerlessClusterClientAuthenticationSaslInitParameters struct { + + // Details for client authentication using IAM. See below. + IAM *IAMInitParameters `json:"iam,omitempty" tf:"iam,omitempty"` +} + +type ServerlessClusterClientAuthenticationSaslObservation struct { + + // Details for client authentication using IAM. See below. + IAM *IAMObservation `json:"iam,omitempty" tf:"iam,omitempty"` +} + +type ServerlessClusterClientAuthenticationSaslParameters struct { + + // Details for client authentication using IAM. See below. + // +kubebuilder:validation:Optional + IAM *IAMParameters `json:"iam" tf:"iam,omitempty"` +} + +type ServerlessClusterInitParameters struct { + + // Specifies client authentication information for the serverless cluster. See below. + ClientAuthentication *ServerlessClusterClientAuthenticationInitParameters `json:"clientAuthentication,omitempty" tf:"client_authentication,omitempty"` + + // The name of the serverless cluster. + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // VPC configuration information. See below. + VPCConfig []VPCConfigInitParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type ServerlessClusterObservation struct { + + // The ARN of the serverless cluster. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Specifies client authentication information for the serverless cluster. See below. + ClientAuthentication *ServerlessClusterClientAuthenticationObservation `json:"clientAuthentication,omitempty" tf:"client_authentication,omitempty"` + + // The name of the serverless cluster. + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // UUID of the serverless cluster, for use in IAM policies. + ClusterUUID *string `json:"clusterUuid,omitempty" tf:"cluster_uuid,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // VPC configuration information. See below. + VPCConfig []VPCConfigObservation `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type ServerlessClusterParameters struct { + + // Specifies client authentication information for the serverless cluster. See below. + // +kubebuilder:validation:Optional + ClientAuthentication *ServerlessClusterClientAuthenticationParameters `json:"clientAuthentication,omitempty" tf:"client_authentication,omitempty"` + + // The name of the serverless cluster. + // +kubebuilder:validation:Optional + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // VPC configuration information. See below. + // +kubebuilder:validation:Optional + VPCConfig []VPCConfigParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type VPCConfigInitParameters struct { + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // Specifies up to five security groups that control inbound and outbound traffic for the serverless cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A list of subnets in at least two different Availability Zones that host your client applications. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type VPCConfigObservation struct { + + // Specifies up to five security groups that control inbound and outbound traffic for the serverless cluster. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of subnets in at least two different Availability Zones that host your client applications. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type VPCConfigParameters struct { + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // Specifies up to five security groups that control inbound and outbound traffic for the serverless cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A list of subnets in at least two different Availability Zones that host your client applications. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +// ServerlessClusterSpec defines the desired state of ServerlessCluster +type ServerlessClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServerlessClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServerlessClusterInitParameters `json:"initProvider,omitempty"` +} + +// ServerlessClusterStatus defines the observed state of ServerlessCluster. +type ServerlessClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServerlessClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ServerlessCluster is the Schema for the ServerlessClusters API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ServerlessCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clientAuthentication) || (has(self.initProvider) && has(self.initProvider.clientAuthentication))",message="spec.forProvider.clientAuthentication is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterName) || (has(self.initProvider) && has(self.initProvider.clusterName))",message="spec.forProvider.clusterName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.vpcConfig) || (has(self.initProvider) && has(self.initProvider.vpcConfig))",message="spec.forProvider.vpcConfig is a required parameter" + Spec ServerlessClusterSpec `json:"spec"` + Status ServerlessClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServerlessClusterList contains a list of ServerlessClusters +type ServerlessClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ServerlessCluster `json:"items"` +} + +// Repository type metadata. +var ( + ServerlessCluster_Kind = "ServerlessCluster" + ServerlessCluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ServerlessCluster_Kind}.String() + ServerlessCluster_KindAPIVersion = ServerlessCluster_Kind + "." + CRDGroupVersion.String() + ServerlessCluster_GroupVersionKind = CRDGroupVersion.WithKind(ServerlessCluster_Kind) +) + +func init() { + SchemeBuilder.Register(&ServerlessCluster{}, &ServerlessClusterList{}) +} diff --git a/apis/kafka/v1beta3/zz_cluster_terraformed.go b/apis/kafka/v1beta3/zz_cluster_terraformed.go new file mode 100755 index 0000000000..0358b9c4fb --- /dev/null +++ b/apis/kafka/v1beta3/zz_cluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta3 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Cluster +func (mg *Cluster) GetTerraformResourceType() string { + return "aws_msk_cluster" +} + +// GetConnectionDetailsMapping for this Cluster +func (tr *Cluster) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Cluster +func (tr *Cluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Cluster +func (tr *Cluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Cluster +func (tr *Cluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Cluster +func (tr *Cluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Cluster +func (tr *Cluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Cluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Cluster) LateInitialize(attrs []byte) (bool, error) { + params := &ClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Cluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kafka/v1beta3/zz_cluster_types.go b/apis/kafka/v1beta3/zz_cluster_types.go new file mode 100755 index 0000000000..2de4c58fc9 --- /dev/null +++ b/apis/kafka/v1beta3/zz_cluster_types.go @@ -0,0 +1,1082 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BrokerLogsInitParameters struct { + CloudwatchLogs *CloudwatchLogsInitParameters `json:"cloudwatchLogs,omitempty" tf:"cloudwatch_logs,omitempty"` + + Firehose *FirehoseInitParameters `json:"firehose,omitempty" tf:"firehose,omitempty"` + + S3 *S3InitParameters `json:"s3,omitempty" tf:"s3,omitempty"` +} + +type BrokerLogsObservation struct { + CloudwatchLogs *CloudwatchLogsObservation `json:"cloudwatchLogs,omitempty" tf:"cloudwatch_logs,omitempty"` + + Firehose *FirehoseObservation `json:"firehose,omitempty" tf:"firehose,omitempty"` + + S3 *S3Observation `json:"s3,omitempty" tf:"s3,omitempty"` +} + +type BrokerLogsParameters struct { + + // +kubebuilder:validation:Optional + CloudwatchLogs *CloudwatchLogsParameters `json:"cloudwatchLogs,omitempty" tf:"cloudwatch_logs,omitempty"` + + // +kubebuilder:validation:Optional + Firehose *FirehoseParameters `json:"firehose,omitempty" tf:"firehose,omitempty"` + + // +kubebuilder:validation:Optional + S3 *S3Parameters `json:"s3,omitempty" tf:"s3,omitempty"` +} + +type BrokerNodeGroupInfoInitParameters struct { + + // The distribution of broker nodes across availability zones (documentation). Currently the only valid value is DEFAULT. + AzDistribution *string `json:"azDistribution,omitempty" tf:"az_distribution,omitempty"` + + // A list of subnets to connect to in client VPC (documentation). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +listType=set + ClientSubnets []*string `json:"clientSubnets,omitempty" tf:"client_subnets,omitempty"` + + // References to Subnet in ec2 to populate clientSubnets. + // +kubebuilder:validation:Optional + ClientSubnetsRefs []v1.Reference `json:"clientSubnetsRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate clientSubnets. + // +kubebuilder:validation:Optional + ClientSubnetsSelector *v1.Selector `json:"clientSubnetsSelector,omitempty" tf:"-"` + + // Information about the cluster access configuration. See below. For security reasons, you can't turn on public access while creating an MSK cluster. However, you can update an existing cluster to make it publicly accessible. You can also create a new cluster and then update it to make it publicly accessible (documentation). + ConnectivityInfo *ConnectivityInfoInitParameters `json:"connectivityInfo,omitempty" tf:"connectivity_info,omitempty"` + + // Specify the instance type to use for the kafka brokersE.g., kafka.m5.large. (Pricing info) + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // A list of the security groups to associate with the elastic network interfaces to control who can communicate with the cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` + + // A block that contains information about storage volumes attached to MSK broker nodes. See below. + StorageInfo *StorageInfoInitParameters `json:"storageInfo,omitempty" tf:"storage_info,omitempty"` +} + +type BrokerNodeGroupInfoObservation struct { + + // The distribution of broker nodes across availability zones (documentation). Currently the only valid value is DEFAULT. + AzDistribution *string `json:"azDistribution,omitempty" tf:"az_distribution,omitempty"` + + // A list of subnets to connect to in client VPC (documentation). + // +listType=set + ClientSubnets []*string `json:"clientSubnets,omitempty" tf:"client_subnets,omitempty"` + + // Information about the cluster access configuration. See below. For security reasons, you can't turn on public access while creating an MSK cluster. However, you can update an existing cluster to make it publicly accessible. You can also create a new cluster and then update it to make it publicly accessible (documentation). + ConnectivityInfo *ConnectivityInfoObservation `json:"connectivityInfo,omitempty" tf:"connectivity_info,omitempty"` + + // Specify the instance type to use for the kafka brokersE.g., kafka.m5.large. (Pricing info) + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // A list of the security groups to associate with the elastic network interfaces to control who can communicate with the cluster. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // A block that contains information about storage volumes attached to MSK broker nodes. See below. + StorageInfo *StorageInfoObservation `json:"storageInfo,omitempty" tf:"storage_info,omitempty"` +} + +type BrokerNodeGroupInfoParameters struct { + + // The distribution of broker nodes across availability zones (documentation). Currently the only valid value is DEFAULT. + // +kubebuilder:validation:Optional + AzDistribution *string `json:"azDistribution,omitempty" tf:"az_distribution,omitempty"` + + // A list of subnets to connect to in client VPC (documentation). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +kubebuilder:validation:Optional + // +listType=set + ClientSubnets []*string `json:"clientSubnets,omitempty" tf:"client_subnets,omitempty"` + + // References to Subnet in ec2 to populate clientSubnets. + // +kubebuilder:validation:Optional + ClientSubnetsRefs []v1.Reference `json:"clientSubnetsRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate clientSubnets. + // +kubebuilder:validation:Optional + ClientSubnetsSelector *v1.Selector `json:"clientSubnetsSelector,omitempty" tf:"-"` + + // Information about the cluster access configuration. See below. For security reasons, you can't turn on public access while creating an MSK cluster. However, you can update an existing cluster to make it publicly accessible. You can also create a new cluster and then update it to make it publicly accessible (documentation). + // +kubebuilder:validation:Optional + ConnectivityInfo *ConnectivityInfoParameters `json:"connectivityInfo,omitempty" tf:"connectivity_info,omitempty"` + + // Specify the instance type to use for the kafka brokersE.g., kafka.m5.large. (Pricing info) + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType" tf:"instance_type,omitempty"` + + // A list of the security groups to associate with the elastic network interfaces to control who can communicate with the cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsRefs []v1.Reference `json:"securityGroupsRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupsSelector *v1.Selector `json:"securityGroupsSelector,omitempty" tf:"-"` + + // A block that contains information about storage volumes attached to MSK broker nodes. See below. + // +kubebuilder:validation:Optional + StorageInfo *StorageInfoParameters `json:"storageInfo,omitempty" tf:"storage_info,omitempty"` +} + +type ClientAuthenticationInitParameters struct { + + // SASL authentication type details for VPC connectivity. See below. + Sasl *SaslInitParameters `json:"sasl,omitempty" tf:"sasl,omitempty"` + + // Enables TLS authentication for VPC connectivity. + TLS *bool `json:"tls,omitempty" tf:"tls,omitempty"` +} + +type ClientAuthenticationObservation struct { + + // SASL authentication type details for VPC connectivity. See below. + Sasl *SaslObservation `json:"sasl,omitempty" tf:"sasl,omitempty"` + + // Enables TLS authentication for VPC connectivity. + TLS *bool `json:"tls,omitempty" tf:"tls,omitempty"` +} + +type ClientAuthenticationParameters struct { + + // SASL authentication type details for VPC connectivity. See below. + // +kubebuilder:validation:Optional + Sasl *SaslParameters `json:"sasl,omitempty" tf:"sasl,omitempty"` + + // Enables TLS authentication for VPC connectivity. + // +kubebuilder:validation:Optional + TLS *bool `json:"tls,omitempty" tf:"tls,omitempty"` +} + +type ClientAuthenticationSaslInitParameters struct { + + // Enables SASL/IAM authentication for VPC connectivity. + IAM *bool `json:"iam,omitempty" tf:"iam,omitempty"` + + // Enables SASL/SCRAM authentication for VPC connectivity. + Scram *bool `json:"scram,omitempty" tf:"scram,omitempty"` +} + +type ClientAuthenticationSaslObservation struct { + + // Enables SASL/IAM authentication for VPC connectivity. + IAM *bool `json:"iam,omitempty" tf:"iam,omitempty"` + + // Enables SASL/SCRAM authentication for VPC connectivity. + Scram *bool `json:"scram,omitempty" tf:"scram,omitempty"` +} + +type ClientAuthenticationSaslParameters struct { + + // Enables SASL/IAM authentication for VPC connectivity. + // +kubebuilder:validation:Optional + IAM *bool `json:"iam,omitempty" tf:"iam,omitempty"` + + // Enables SASL/SCRAM authentication for VPC connectivity. + // +kubebuilder:validation:Optional + Scram *bool `json:"scram,omitempty" tf:"scram,omitempty"` +} + +type CloudwatchLogsInitParameters struct { + + // Controls whether provisioned throughput is enabled or not. Default value: false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Name of the Cloudwatch Log Group to deliver logs to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Group + LogGroup *string `json:"logGroup,omitempty" tf:"log_group,omitempty"` + + // Reference to a Group in cloudwatchlogs to populate logGroup. + // +kubebuilder:validation:Optional + LogGroupRef *v1.Reference `json:"logGroupRef,omitempty" tf:"-"` + + // Selector for a Group in cloudwatchlogs to populate logGroup. + // +kubebuilder:validation:Optional + LogGroupSelector *v1.Selector `json:"logGroupSelector,omitempty" tf:"-"` +} + +type CloudwatchLogsObservation struct { + + // Controls whether provisioned throughput is enabled or not. Default value: false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Name of the Cloudwatch Log Group to deliver logs to. + LogGroup *string `json:"logGroup,omitempty" tf:"log_group,omitempty"` +} + +type CloudwatchLogsParameters struct { + + // Controls whether provisioned throughput is enabled or not. Default value: false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Name of the Cloudwatch Log Group to deliver logs to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Group + // +kubebuilder:validation:Optional + LogGroup *string `json:"logGroup,omitempty" tf:"log_group,omitempty"` + + // Reference to a Group in cloudwatchlogs to populate logGroup. + // +kubebuilder:validation:Optional + LogGroupRef *v1.Reference `json:"logGroupRef,omitempty" tf:"-"` + + // Selector for a Group in cloudwatchlogs to populate logGroup. + // +kubebuilder:validation:Optional + LogGroupSelector *v1.Selector `json:"logGroupSelector,omitempty" tf:"-"` +} + +type ClusterClientAuthenticationInitParameters struct { + + // SASL authentication type details for VPC connectivity. See below. + Sasl *ClientAuthenticationSaslInitParameters `json:"sasl,omitempty" tf:"sasl,omitempty"` + + // Enables TLS authentication for VPC connectivity. + TLS *TLSInitParameters `json:"tls,omitempty" tf:"tls,omitempty"` + + // Enables unauthenticated access. + Unauthenticated *bool `json:"unauthenticated,omitempty" tf:"unauthenticated,omitempty"` +} + +type ClusterClientAuthenticationObservation struct { + + // SASL authentication type details for VPC connectivity. See below. + Sasl *ClientAuthenticationSaslObservation `json:"sasl,omitempty" tf:"sasl,omitempty"` + + // Enables TLS authentication for VPC connectivity. + TLS *TLSObservation `json:"tls,omitempty" tf:"tls,omitempty"` + + // Enables unauthenticated access. + Unauthenticated *bool `json:"unauthenticated,omitempty" tf:"unauthenticated,omitempty"` +} + +type ClusterClientAuthenticationParameters struct { + + // SASL authentication type details for VPC connectivity. See below. + // +kubebuilder:validation:Optional + Sasl *ClientAuthenticationSaslParameters `json:"sasl,omitempty" tf:"sasl,omitempty"` + + // Enables TLS authentication for VPC connectivity. + // +kubebuilder:validation:Optional + TLS *TLSParameters `json:"tls,omitempty" tf:"tls,omitempty"` + + // Enables unauthenticated access. + // +kubebuilder:validation:Optional + Unauthenticated *bool `json:"unauthenticated,omitempty" tf:"unauthenticated,omitempty"` +} + +type ClusterInitParameters struct { + + // Configuration block for the broker nodes of the Kafka cluster. + BrokerNodeGroupInfo *BrokerNodeGroupInfoInitParameters `json:"brokerNodeGroupInfo,omitempty" tf:"broker_node_group_info,omitempty"` + + // Configuration block for specifying a client authentication. See below. + ClientAuthentication *ClusterClientAuthenticationInitParameters `json:"clientAuthentication,omitempty" tf:"client_authentication,omitempty"` + + // Name of the MSK cluster. + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // Configuration block for specifying a MSK Configuration to attach to Kafka brokers. See below. + ConfigurationInfo *ConfigurationInfoInitParameters `json:"configurationInfo,omitempty" tf:"configuration_info,omitempty"` + + // Configuration block for specifying encryption. See below. + EncryptionInfo *EncryptionInfoInitParameters `json:"encryptionInfo,omitempty" tf:"encryption_info,omitempty"` + + // Specify the desired enhanced MSK CloudWatch monitoring level. See Monitoring Amazon MSK with Amazon CloudWatch + EnhancedMonitoring *string `json:"enhancedMonitoring,omitempty" tf:"enhanced_monitoring,omitempty"` + + // Specify the desired Kafka software version. + KafkaVersion *string `json:"kafkaVersion,omitempty" tf:"kafka_version,omitempty"` + + // Configuration block for streaming broker logs to Cloudwatch/S3/Kinesis Firehose. See below. + LoggingInfo *LoggingInfoInitParameters `json:"loggingInfo,omitempty" tf:"logging_info,omitempty"` + + // The desired total number of broker nodes in the kafka cluster. It must be a multiple of the number of specified client subnets. + NumberOfBrokerNodes *float64 `json:"numberOfBrokerNodes,omitempty" tf:"number_of_broker_nodes,omitempty"` + + // Configuration block for JMX and Node monitoring for the MSK cluster. See below. + OpenMonitoring *OpenMonitoringInitParameters `json:"openMonitoring,omitempty" tf:"open_monitoring,omitempty"` + + // Controls storage mode for supported storage tiers. Valid values are: LOCAL or TIERED. + StorageMode *string `json:"storageMode,omitempty" tf:"storage_mode,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ClusterObservation struct { + + // Amazon Resource Name (ARN) of the MSK Configuration to use in the cluster. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Comma separated list of one or more hostname:port pairs of kafka brokers suitable to bootstrap connectivity to the kafka cluster. Contains a value if encryption_info.0.encryption_in_transit.0.client_broker is set to PLAINTEXT or TLS_PLAINTEXT. The resource sorts values alphabetically. AWS may not always return all endpoints so this value is not guaranteed to be stable across applies. + BootstrapBrokers *string `json:"bootstrapBrokers,omitempty" tf:"bootstrap_brokers,omitempty"` + + // One or more DNS names (or IP addresses) and SASL IAM port pairs. For example, b-1-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9198,b-2-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9198,b-3-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9198. This attribute will have a value if encryption_info.0.encryption_in_transit.0.client_broker is set to TLS_PLAINTEXT or TLS and client_authentication.0.sasl.0.iam is set to true and broker_node_group_info.0.connectivity_info.0.public_access.0.type is set to SERVICE_PROVIDED_EIPS and the cluster fulfill all other requirements for public access. The resource sorts the list alphabetically. AWS may not always return all endpoints so the values may not be stable across applies. + BootstrapBrokersPublicSaslIAM *string `json:"bootstrapBrokersPublicSaslIam,omitempty" tf:"bootstrap_brokers_public_sasl_iam,omitempty"` + + // One or more DNS names (or IP addresses) and SASL SCRAM port pairs. For example, b-1-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9196,b-2-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9196,b-3-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9196. This attribute will have a value if encryption_info.0.encryption_in_transit.0.client_broker is set to TLS_PLAINTEXT or TLS and client_authentication.0.sasl.0.scram is set to true and broker_node_group_info.0.connectivity_info.0.public_access.0.type is set to SERVICE_PROVIDED_EIPS and the cluster fulfill all other requirements for public access. The resource sorts the list alphabetically. AWS may not always return all endpoints so the values may not be stable across applies. + BootstrapBrokersPublicSaslScram *string `json:"bootstrapBrokersPublicSaslScram,omitempty" tf:"bootstrap_brokers_public_sasl_scram,omitempty"` + + // One or more DNS names (or IP addresses) and TLS port pairs. For example, b-1-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9194,b-2-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9194,b-3-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9194. This attribute will have a value if encryption_info.0.encryption_in_transit.0.client_broker is set to TLS_PLAINTEXT or TLS and broker_node_group_info.0.connectivity_info.0.public_access.0.type is set to SERVICE_PROVIDED_EIPS and the cluster fulfill all other requirements for public access. The resource sorts the list alphabetically. AWS may not always return all endpoints so the values may not be stable across applies. + BootstrapBrokersPublicTLS *string `json:"bootstrapBrokersPublicTls,omitempty" tf:"bootstrap_brokers_public_tls,omitempty"` + + // One or more DNS names (or IP addresses) and SASL IAM port pairs. For example, b-1.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9098,b-2.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9098,b-3.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9098. This attribute will have a value if encryption_info.0.encryption_in_transit.0.client_broker is set to TLS_PLAINTEXT or TLS and client_authentication.0.sasl.0.iam is set to true. The resource sorts the list alphabetically. AWS may not always return all endpoints so the values may not be stable across applies. + BootstrapBrokersSaslIAM *string `json:"bootstrapBrokersSaslIam,omitempty" tf:"bootstrap_brokers_sasl_iam,omitempty"` + + // One or more DNS names (or IP addresses) and SASL SCRAM port pairs. For example, b-1.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9096,b-2.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9096,b-3.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9096. This attribute will have a value if encryption_info.0.encryption_in_transit.0.client_broker is set to TLS_PLAINTEXT or TLS and client_authentication.0.sasl.0.scram is set to true. The resource sorts the list alphabetically. AWS may not always return all endpoints so the values may not be stable across applies. + BootstrapBrokersSaslScram *string `json:"bootstrapBrokersSaslScram,omitempty" tf:"bootstrap_brokers_sasl_scram,omitempty"` + + // One or more DNS names (or IP addresses) and TLS port pairs. For example, b-1.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094,b-2.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094,b-3.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094. This attribute will have a value if encryption_info.0.encryption_in_transit.0.client_broker is set to TLS_PLAINTEXT or TLS. The resource sorts the list alphabetically. AWS may not always return all endpoints so the values may not be stable across applies. + BootstrapBrokersTLS *string `json:"bootstrapBrokersTls,omitempty" tf:"bootstrap_brokers_tls,omitempty"` + + // A string containing one or more DNS names (or IP addresses) and SASL IAM port pairs for VPC connectivity. AWS may not always return all endpoints so the values may not be stable across applies. + BootstrapBrokersVPCConnectivitySaslIAM *string `json:"bootstrapBrokersVpcConnectivitySaslIam,omitempty" tf:"bootstrap_brokers_vpc_connectivity_sasl_iam,omitempty"` + + // A string containing one or more DNS names (or IP addresses) and SASL SCRAM port pairs for VPC connectivity. AWS may not always return all endpoints so the values may not be stable across applies. + BootstrapBrokersVPCConnectivitySaslScram *string `json:"bootstrapBrokersVpcConnectivitySaslScram,omitempty" tf:"bootstrap_brokers_vpc_connectivity_sasl_scram,omitempty"` + + // A string containing one or more DNS names (or IP addresses) and TLS port pairs for VPC connectivity. AWS may not always return all endpoints so the values may not be stable across applies. + BootstrapBrokersVPCConnectivityTLS *string `json:"bootstrapBrokersVpcConnectivityTls,omitempty" tf:"bootstrap_brokers_vpc_connectivity_tls,omitempty"` + + // Configuration block for the broker nodes of the Kafka cluster. + BrokerNodeGroupInfo *BrokerNodeGroupInfoObservation `json:"brokerNodeGroupInfo,omitempty" tf:"broker_node_group_info,omitempty"` + + // Configuration block for specifying a client authentication. See below. + ClientAuthentication *ClusterClientAuthenticationObservation `json:"clientAuthentication,omitempty" tf:"client_authentication,omitempty"` + + // Name of the MSK cluster. + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // UUID of the MSK cluster, for use in IAM policies. + ClusterUUID *string `json:"clusterUuid,omitempty" tf:"cluster_uuid,omitempty"` + + // Configuration block for specifying a MSK Configuration to attach to Kafka brokers. See below. + ConfigurationInfo *ConfigurationInfoObservation `json:"configurationInfo,omitempty" tf:"configuration_info,omitempty"` + + // Current version of the MSK Cluster used for updates, e.g., K13V1IB3VIYZZH + CurrentVersion *string `json:"currentVersion,omitempty" tf:"current_version,omitempty"` + + // Configuration block for specifying encryption. See below. + EncryptionInfo *EncryptionInfoObservation `json:"encryptionInfo,omitempty" tf:"encryption_info,omitempty"` + + // Specify the desired enhanced MSK CloudWatch monitoring level. See Monitoring Amazon MSK with Amazon CloudWatch + EnhancedMonitoring *string `json:"enhancedMonitoring,omitempty" tf:"enhanced_monitoring,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specify the desired Kafka software version. + KafkaVersion *string `json:"kafkaVersion,omitempty" tf:"kafka_version,omitempty"` + + // Configuration block for streaming broker logs to Cloudwatch/S3/Kinesis Firehose. See below. + LoggingInfo *LoggingInfoObservation `json:"loggingInfo,omitempty" tf:"logging_info,omitempty"` + + // The desired total number of broker nodes in the kafka cluster. It must be a multiple of the number of specified client subnets. + NumberOfBrokerNodes *float64 `json:"numberOfBrokerNodes,omitempty" tf:"number_of_broker_nodes,omitempty"` + + // Configuration block for JMX and Node monitoring for the MSK cluster. See below. + OpenMonitoring *OpenMonitoringObservation `json:"openMonitoring,omitempty" tf:"open_monitoring,omitempty"` + + // Controls storage mode for supported storage tiers. Valid values are: LOCAL or TIERED. + StorageMode *string `json:"storageMode,omitempty" tf:"storage_mode,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // A comma separated list of one or more hostname:port pairs to use to connect to the Apache Zookeeper cluster. The returned values are sorted alphabetically. The AWS API may not return all endpoints, so this value is not guaranteed to be stable across applies. + ZookeeperConnectString *string `json:"zookeeperConnectString,omitempty" tf:"zookeeper_connect_string,omitempty"` + + // A comma separated list of one or more hostname:port pairs to use to connect to the Apache Zookeeper cluster via TLS. The returned values are sorted alphabetically. The AWS API may not return all endpoints, so this value is not guaranteed to be stable across applies. + ZookeeperConnectStringTLS *string `json:"zookeeperConnectStringTls,omitempty" tf:"zookeeper_connect_string_tls,omitempty"` +} + +type ClusterParameters struct { + + // Configuration block for the broker nodes of the Kafka cluster. + // +kubebuilder:validation:Optional + BrokerNodeGroupInfo *BrokerNodeGroupInfoParameters `json:"brokerNodeGroupInfo,omitempty" tf:"broker_node_group_info,omitempty"` + + // Configuration block for specifying a client authentication. See below. + // +kubebuilder:validation:Optional + ClientAuthentication *ClusterClientAuthenticationParameters `json:"clientAuthentication,omitempty" tf:"client_authentication,omitempty"` + + // Name of the MSK cluster. + // +kubebuilder:validation:Optional + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // Configuration block for specifying a MSK Configuration to attach to Kafka brokers. See below. + // +kubebuilder:validation:Optional + ConfigurationInfo *ConfigurationInfoParameters `json:"configurationInfo,omitempty" tf:"configuration_info,omitempty"` + + // Configuration block for specifying encryption. See below. + // +kubebuilder:validation:Optional + EncryptionInfo *EncryptionInfoParameters `json:"encryptionInfo,omitempty" tf:"encryption_info,omitempty"` + + // Specify the desired enhanced MSK CloudWatch monitoring level. See Monitoring Amazon MSK with Amazon CloudWatch + // +kubebuilder:validation:Optional + EnhancedMonitoring *string `json:"enhancedMonitoring,omitempty" tf:"enhanced_monitoring,omitempty"` + + // Specify the desired Kafka software version. + // +kubebuilder:validation:Optional + KafkaVersion *string `json:"kafkaVersion,omitempty" tf:"kafka_version,omitempty"` + + // Configuration block for streaming broker logs to Cloudwatch/S3/Kinesis Firehose. See below. + // +kubebuilder:validation:Optional + LoggingInfo *LoggingInfoParameters `json:"loggingInfo,omitempty" tf:"logging_info,omitempty"` + + // The desired total number of broker nodes in the kafka cluster. It must be a multiple of the number of specified client subnets. + // +kubebuilder:validation:Optional + NumberOfBrokerNodes *float64 `json:"numberOfBrokerNodes,omitempty" tf:"number_of_broker_nodes,omitempty"` + + // Configuration block for JMX and Node monitoring for the MSK cluster. See below. + // +kubebuilder:validation:Optional + OpenMonitoring *OpenMonitoringParameters `json:"openMonitoring,omitempty" tf:"open_monitoring,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Controls storage mode for supported storage tiers. Valid values are: LOCAL or TIERED. + // +kubebuilder:validation:Optional + StorageMode *string `json:"storageMode,omitempty" tf:"storage_mode,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ConfigurationInfoInitParameters struct { + + // Amazon Resource Name (ARN) of the MSK Configuration to use in the cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kafka/v1beta1.Configuration + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a Configuration in kafka to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a Configuration in kafka to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // Revision of the MSK Configuration to use in the cluster. + Revision *float64 `json:"revision,omitempty" tf:"revision,omitempty"` +} + +type ConfigurationInfoObservation struct { + + // Amazon Resource Name (ARN) of the MSK Configuration to use in the cluster. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Revision of the MSK Configuration to use in the cluster. + Revision *float64 `json:"revision,omitempty" tf:"revision,omitempty"` +} + +type ConfigurationInfoParameters struct { + + // Amazon Resource Name (ARN) of the MSK Configuration to use in the cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kafka/v1beta1.Configuration + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a Configuration in kafka to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a Configuration in kafka to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // Revision of the MSK Configuration to use in the cluster. + // +kubebuilder:validation:Optional + Revision *float64 `json:"revision" tf:"revision,omitempty"` +} + +type ConnectivityInfoInitParameters struct { + + // Access control settings for brokers. See below. + PublicAccess *PublicAccessInitParameters `json:"publicAccess,omitempty" tf:"public_access,omitempty"` + + // VPC connectivity access control for brokers. See below. + VPCConnectivity *VPCConnectivityInitParameters `json:"vpcConnectivity,omitempty" tf:"vpc_connectivity,omitempty"` +} + +type ConnectivityInfoObservation struct { + + // Access control settings for brokers. See below. + PublicAccess *PublicAccessObservation `json:"publicAccess,omitempty" tf:"public_access,omitempty"` + + // VPC connectivity access control for brokers. See below. + VPCConnectivity *VPCConnectivityObservation `json:"vpcConnectivity,omitempty" tf:"vpc_connectivity,omitempty"` +} + +type ConnectivityInfoParameters struct { + + // Access control settings for brokers. See below. + // +kubebuilder:validation:Optional + PublicAccess *PublicAccessParameters `json:"publicAccess,omitempty" tf:"public_access,omitempty"` + + // VPC connectivity access control for brokers. See below. + // +kubebuilder:validation:Optional + VPCConnectivity *VPCConnectivityParameters `json:"vpcConnectivity,omitempty" tf:"vpc_connectivity,omitempty"` +} + +type EBSStorageInfoInitParameters struct { + + // A block that contains EBS volume provisioned throughput information. To provision storage throughput, you must choose broker type kafka.m5.4xlarge or larger. See below. + ProvisionedThroughput *ProvisionedThroughputInitParameters `json:"provisionedThroughput,omitempty" tf:"provisioned_throughput,omitempty"` + + // The size in GiB of the EBS volume for the data drive on each broker node. Minimum value of 1 and maximum value of 16384. + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` +} + +type EBSStorageInfoObservation struct { + + // A block that contains EBS volume provisioned throughput information. To provision storage throughput, you must choose broker type kafka.m5.4xlarge or larger. See below. + ProvisionedThroughput *ProvisionedThroughputObservation `json:"provisionedThroughput,omitempty" tf:"provisioned_throughput,omitempty"` + + // The size in GiB of the EBS volume for the data drive on each broker node. Minimum value of 1 and maximum value of 16384. + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` +} + +type EBSStorageInfoParameters struct { + + // A block that contains EBS volume provisioned throughput information. To provision storage throughput, you must choose broker type kafka.m5.4xlarge or larger. See below. + // +kubebuilder:validation:Optional + ProvisionedThroughput *ProvisionedThroughputParameters `json:"provisionedThroughput,omitempty" tf:"provisioned_throughput,omitempty"` + + // The size in GiB of the EBS volume for the data drive on each broker node. Minimum value of 1 and maximum value of 16384. + // +kubebuilder:validation:Optional + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` +} + +type EncryptionInTransitInitParameters struct { + + // Encryption setting for data in transit between clients and brokers. Valid values: TLS, TLS_PLAINTEXT, and PLAINTEXT. Default value is TLS. + ClientBroker *string `json:"clientBroker,omitempty" tf:"client_broker,omitempty"` + + // Whether data communication among broker nodes is encrypted. Default value: true. + InCluster *bool `json:"inCluster,omitempty" tf:"in_cluster,omitempty"` +} + +type EncryptionInTransitObservation struct { + + // Encryption setting for data in transit between clients and brokers. Valid values: TLS, TLS_PLAINTEXT, and PLAINTEXT. Default value is TLS. + ClientBroker *string `json:"clientBroker,omitempty" tf:"client_broker,omitempty"` + + // Whether data communication among broker nodes is encrypted. Default value: true. + InCluster *bool `json:"inCluster,omitempty" tf:"in_cluster,omitempty"` +} + +type EncryptionInTransitParameters struct { + + // Encryption setting for data in transit between clients and brokers. Valid values: TLS, TLS_PLAINTEXT, and PLAINTEXT. Default value is TLS. + // +kubebuilder:validation:Optional + ClientBroker *string `json:"clientBroker,omitempty" tf:"client_broker,omitempty"` + + // Whether data communication among broker nodes is encrypted. Default value: true. + // +kubebuilder:validation:Optional + InCluster *bool `json:"inCluster,omitempty" tf:"in_cluster,omitempty"` +} + +type EncryptionInfoInitParameters struct { + + // The ARN of the KMS key used for encryption at rest of the broker data volumes. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + EncryptionAtRestKMSKeyArn *string `json:"encryptionAtRestKmsKeyArn,omitempty" tf:"encryption_at_rest_kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate encryptionAtRestKmsKeyArn. + // +kubebuilder:validation:Optional + EncryptionAtRestKMSKeyArnRef *v1.Reference `json:"encryptionAtRestKmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate encryptionAtRestKmsKeyArn. + // +kubebuilder:validation:Optional + EncryptionAtRestKMSKeyArnSelector *v1.Selector `json:"encryptionAtRestKmsKeyArnSelector,omitempty" tf:"-"` + + // Configuration block to specify encryption in transit. See below. + EncryptionInTransit *EncryptionInTransitInitParameters `json:"encryptionInTransit,omitempty" tf:"encryption_in_transit,omitempty"` +} + +type EncryptionInfoObservation struct { + + // The ARN of the KMS key used for encryption at rest of the broker data volumes. + EncryptionAtRestKMSKeyArn *string `json:"encryptionAtRestKmsKeyArn,omitempty" tf:"encryption_at_rest_kms_key_arn,omitempty"` + + // Configuration block to specify encryption in transit. See below. + EncryptionInTransit *EncryptionInTransitObservation `json:"encryptionInTransit,omitempty" tf:"encryption_in_transit,omitempty"` +} + +type EncryptionInfoParameters struct { + + // The ARN of the KMS key used for encryption at rest of the broker data volumes. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + EncryptionAtRestKMSKeyArn *string `json:"encryptionAtRestKmsKeyArn,omitempty" tf:"encryption_at_rest_kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate encryptionAtRestKmsKeyArn. + // +kubebuilder:validation:Optional + EncryptionAtRestKMSKeyArnRef *v1.Reference `json:"encryptionAtRestKmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate encryptionAtRestKmsKeyArn. + // +kubebuilder:validation:Optional + EncryptionAtRestKMSKeyArnSelector *v1.Selector `json:"encryptionAtRestKmsKeyArnSelector,omitempty" tf:"-"` + + // Configuration block to specify encryption in transit. See below. + // +kubebuilder:validation:Optional + EncryptionInTransit *EncryptionInTransitParameters `json:"encryptionInTransit,omitempty" tf:"encryption_in_transit,omitempty"` +} + +type FirehoseInitParameters struct { + + // Name of the Kinesis Data Firehose delivery stream to deliver logs to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + DeliveryStream *string `json:"deliveryStream,omitempty" tf:"delivery_stream,omitempty"` + + // Reference to a DeliveryStream in firehose to populate deliveryStream. + // +kubebuilder:validation:Optional + DeliveryStreamRef *v1.Reference `json:"deliveryStreamRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate deliveryStream. + // +kubebuilder:validation:Optional + DeliveryStreamSelector *v1.Selector `json:"deliveryStreamSelector,omitempty" tf:"-"` + + // Controls whether provisioned throughput is enabled or not. Default value: false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type FirehoseObservation struct { + + // Name of the Kinesis Data Firehose delivery stream to deliver logs to. + DeliveryStream *string `json:"deliveryStream,omitempty" tf:"delivery_stream,omitempty"` + + // Controls whether provisioned throughput is enabled or not. Default value: false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type FirehoseParameters struct { + + // Name of the Kinesis Data Firehose delivery stream to deliver logs to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + // +kubebuilder:validation:Optional + DeliveryStream *string `json:"deliveryStream,omitempty" tf:"delivery_stream,omitempty"` + + // Reference to a DeliveryStream in firehose to populate deliveryStream. + // +kubebuilder:validation:Optional + DeliveryStreamRef *v1.Reference `json:"deliveryStreamRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate deliveryStream. + // +kubebuilder:validation:Optional + DeliveryStreamSelector *v1.Selector `json:"deliveryStreamSelector,omitempty" tf:"-"` + + // Controls whether provisioned throughput is enabled or not. Default value: false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + +type JmxExporterInitParameters struct { + + // Indicates whether you want to enable or disable the JMX Exporter. + EnabledInBroker *bool `json:"enabledInBroker,omitempty" tf:"enabled_in_broker,omitempty"` +} + +type JmxExporterObservation struct { + + // Indicates whether you want to enable or disable the JMX Exporter. + EnabledInBroker *bool `json:"enabledInBroker,omitempty" tf:"enabled_in_broker,omitempty"` +} + +type JmxExporterParameters struct { + + // Indicates whether you want to enable or disable the JMX Exporter. + // +kubebuilder:validation:Optional + EnabledInBroker *bool `json:"enabledInBroker" tf:"enabled_in_broker,omitempty"` +} + +type LoggingInfoInitParameters struct { + + // Configuration block for Broker Logs settings for logging info. See below. + BrokerLogs *BrokerLogsInitParameters `json:"brokerLogs,omitempty" tf:"broker_logs,omitempty"` +} + +type LoggingInfoObservation struct { + + // Configuration block for Broker Logs settings for logging info. See below. + BrokerLogs *BrokerLogsObservation `json:"brokerLogs,omitempty" tf:"broker_logs,omitempty"` +} + +type LoggingInfoParameters struct { + + // Configuration block for Broker Logs settings for logging info. See below. + // +kubebuilder:validation:Optional + BrokerLogs *BrokerLogsParameters `json:"brokerLogs" tf:"broker_logs,omitempty"` +} + +type NodeExporterInitParameters struct { + + // Indicates whether you want to enable or disable the JMX Exporter. + EnabledInBroker *bool `json:"enabledInBroker,omitempty" tf:"enabled_in_broker,omitempty"` +} + +type NodeExporterObservation struct { + + // Indicates whether you want to enable or disable the JMX Exporter. + EnabledInBroker *bool `json:"enabledInBroker,omitempty" tf:"enabled_in_broker,omitempty"` +} + +type NodeExporterParameters struct { + + // Indicates whether you want to enable or disable the JMX Exporter. + // +kubebuilder:validation:Optional + EnabledInBroker *bool `json:"enabledInBroker" tf:"enabled_in_broker,omitempty"` +} + +type OpenMonitoringInitParameters struct { + + // Configuration block for Prometheus settings for open monitoring. See below. + Prometheus *PrometheusInitParameters `json:"prometheus,omitempty" tf:"prometheus,omitempty"` +} + +type OpenMonitoringObservation struct { + + // Configuration block for Prometheus settings for open monitoring. See below. + Prometheus *PrometheusObservation `json:"prometheus,omitempty" tf:"prometheus,omitempty"` +} + +type OpenMonitoringParameters struct { + + // Configuration block for Prometheus settings for open monitoring. See below. + // +kubebuilder:validation:Optional + Prometheus *PrometheusParameters `json:"prometheus" tf:"prometheus,omitempty"` +} + +type PrometheusInitParameters struct { + + // Configuration block for JMX Exporter. See below. + JmxExporter *JmxExporterInitParameters `json:"jmxExporter,omitempty" tf:"jmx_exporter,omitempty"` + + // Configuration block for Node Exporter. See below. + NodeExporter *NodeExporterInitParameters `json:"nodeExporter,omitempty" tf:"node_exporter,omitempty"` +} + +type PrometheusObservation struct { + + // Configuration block for JMX Exporter. See below. + JmxExporter *JmxExporterObservation `json:"jmxExporter,omitempty" tf:"jmx_exporter,omitempty"` + + // Configuration block for Node Exporter. See below. + NodeExporter *NodeExporterObservation `json:"nodeExporter,omitempty" tf:"node_exporter,omitempty"` +} + +type PrometheusParameters struct { + + // Configuration block for JMX Exporter. See below. + // +kubebuilder:validation:Optional + JmxExporter *JmxExporterParameters `json:"jmxExporter,omitempty" tf:"jmx_exporter,omitempty"` + + // Configuration block for Node Exporter. See below. + // +kubebuilder:validation:Optional + NodeExporter *NodeExporterParameters `json:"nodeExporter,omitempty" tf:"node_exporter,omitempty"` +} + +type ProvisionedThroughputInitParameters struct { + + // Controls whether provisioned throughput is enabled or not. Default value: false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Throughput value of the EBS volumes for the data drive on each kafka broker node in MiB per second. The minimum value is 250. The maximum value varies between broker type. You can refer to the valid values for the maximum volume throughput at the following documentation on throughput bottlenecks + VolumeThroughput *float64 `json:"volumeThroughput,omitempty" tf:"volume_throughput,omitempty"` +} + +type ProvisionedThroughputObservation struct { + + // Controls whether provisioned throughput is enabled or not. Default value: false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Throughput value of the EBS volumes for the data drive on each kafka broker node in MiB per second. The minimum value is 250. The maximum value varies between broker type. You can refer to the valid values for the maximum volume throughput at the following documentation on throughput bottlenecks + VolumeThroughput *float64 `json:"volumeThroughput,omitempty" tf:"volume_throughput,omitempty"` +} + +type ProvisionedThroughputParameters struct { + + // Controls whether provisioned throughput is enabled or not. Default value: false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Throughput value of the EBS volumes for the data drive on each kafka broker node in MiB per second. The minimum value is 250. The maximum value varies between broker type. You can refer to the valid values for the maximum volume throughput at the following documentation on throughput bottlenecks + // +kubebuilder:validation:Optional + VolumeThroughput *float64 `json:"volumeThroughput,omitempty" tf:"volume_throughput,omitempty"` +} + +type PublicAccessInitParameters struct { + + // Public access type. Valid values: DISABLED, SERVICE_PROVIDED_EIPS. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PublicAccessObservation struct { + + // Public access type. Valid values: DISABLED, SERVICE_PROVIDED_EIPS. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PublicAccessParameters struct { + + // Public access type. Valid values: DISABLED, SERVICE_PROVIDED_EIPS. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type S3InitParameters struct { + + // Name of the S3 bucket to deliver logs to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Controls whether provisioned throughput is enabled or not. Default value: false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Prefix to append to the folder name. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type S3Observation struct { + + // Name of the S3 bucket to deliver logs to. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Controls whether provisioned throughput is enabled or not. Default value: false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Prefix to append to the folder name. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type S3Parameters struct { + + // Name of the S3 bucket to deliver logs to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Controls whether provisioned throughput is enabled or not. Default value: false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Prefix to append to the folder name. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type SaslInitParameters struct { + + // Enables SASL/IAM authentication for VPC connectivity. + IAM *bool `json:"iam,omitempty" tf:"iam,omitempty"` + + // Enables SASL/SCRAM authentication for VPC connectivity. + Scram *bool `json:"scram,omitempty" tf:"scram,omitempty"` +} + +type SaslObservation struct { + + // Enables SASL/IAM authentication for VPC connectivity. + IAM *bool `json:"iam,omitempty" tf:"iam,omitempty"` + + // Enables SASL/SCRAM authentication for VPC connectivity. + Scram *bool `json:"scram,omitempty" tf:"scram,omitempty"` +} + +type SaslParameters struct { + + // Enables SASL/IAM authentication for VPC connectivity. + // +kubebuilder:validation:Optional + IAM *bool `json:"iam,omitempty" tf:"iam,omitempty"` + + // Enables SASL/SCRAM authentication for VPC connectivity. + // +kubebuilder:validation:Optional + Scram *bool `json:"scram,omitempty" tf:"scram,omitempty"` +} + +type StorageInfoInitParameters struct { + + // A block that contains EBS volume information. See below. + EBSStorageInfo *EBSStorageInfoInitParameters `json:"ebsStorageInfo,omitempty" tf:"ebs_storage_info,omitempty"` +} + +type StorageInfoObservation struct { + + // A block that contains EBS volume information. See below. + EBSStorageInfo *EBSStorageInfoObservation `json:"ebsStorageInfo,omitempty" tf:"ebs_storage_info,omitempty"` +} + +type StorageInfoParameters struct { + + // A block that contains EBS volume information. See below. + // +kubebuilder:validation:Optional + EBSStorageInfo *EBSStorageInfoParameters `json:"ebsStorageInfo,omitempty" tf:"ebs_storage_info,omitempty"` +} + +type TLSInitParameters struct { + + // List of ACM Certificate Authority Amazon Resource Names (ARNs). + // +listType=set + CertificateAuthorityArns []*string `json:"certificateAuthorityArns,omitempty" tf:"certificate_authority_arns,omitempty"` +} + +type TLSObservation struct { + + // List of ACM Certificate Authority Amazon Resource Names (ARNs). + // +listType=set + CertificateAuthorityArns []*string `json:"certificateAuthorityArns,omitempty" tf:"certificate_authority_arns,omitempty"` +} + +type TLSParameters struct { + + // List of ACM Certificate Authority Amazon Resource Names (ARNs). + // +kubebuilder:validation:Optional + // +listType=set + CertificateAuthorityArns []*string `json:"certificateAuthorityArns,omitempty" tf:"certificate_authority_arns,omitempty"` +} + +type VPCConnectivityInitParameters struct { + + // Configuration block for specifying a client authentication. See below. + ClientAuthentication *ClientAuthenticationInitParameters `json:"clientAuthentication,omitempty" tf:"client_authentication,omitempty"` +} + +type VPCConnectivityObservation struct { + + // Configuration block for specifying a client authentication. See below. + ClientAuthentication *ClientAuthenticationObservation `json:"clientAuthentication,omitempty" tf:"client_authentication,omitempty"` +} + +type VPCConnectivityParameters struct { + + // Configuration block for specifying a client authentication. See below. + // +kubebuilder:validation:Optional + ClientAuthentication *ClientAuthenticationParameters `json:"clientAuthentication,omitempty" tf:"client_authentication,omitempty"` +} + +// ClusterSpec defines the desired state of Cluster +type ClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ClusterInitParameters `json:"initProvider,omitempty"` +} + +// ClusterStatus defines the observed state of Cluster. +type ClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Cluster is the Schema for the Clusters API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.brokerNodeGroupInfo) || (has(self.initProvider) && has(self.initProvider.brokerNodeGroupInfo))",message="spec.forProvider.brokerNodeGroupInfo is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterName) || (has(self.initProvider) && has(self.initProvider.clusterName))",message="spec.forProvider.clusterName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.kafkaVersion) || (has(self.initProvider) && has(self.initProvider.kafkaVersion))",message="spec.forProvider.kafkaVersion is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.numberOfBrokerNodes) || (has(self.initProvider) && has(self.initProvider.numberOfBrokerNodes))",message="spec.forProvider.numberOfBrokerNodes is a required parameter" + Spec ClusterSpec `json:"spec"` + Status ClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterList contains a list of Clusters +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +// Repository type metadata. +var ( + Cluster_Kind = "Cluster" + Cluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Cluster_Kind}.String() + Cluster_KindAPIVersion = Cluster_Kind + "." + CRDGroupVersion.String() + Cluster_GroupVersionKind = CRDGroupVersion.WithKind(Cluster_Kind) +) + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/apis/kafka/v1beta3/zz_generated.conversion_hubs.go b/apis/kafka/v1beta3/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..010e3bc807 --- /dev/null +++ b/apis/kafka/v1beta3/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta3 + +// Hub marks this type as a conversion hub. +func (tr *Cluster) Hub() {} diff --git a/apis/kafka/v1beta3/zz_generated.deepcopy.go b/apis/kafka/v1beta3/zz_generated.deepcopy.go new file mode 100644 index 0000000000..5376f73566 --- /dev/null +++ b/apis/kafka/v1beta3/zz_generated.deepcopy.go @@ -0,0 +1,2454 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta3 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerLogsInitParameters) DeepCopyInto(out *BrokerLogsInitParameters) { + *out = *in + if in.CloudwatchLogs != nil { + in, out := &in.CloudwatchLogs, &out.CloudwatchLogs + *out = new(CloudwatchLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Firehose != nil { + in, out := &in.Firehose, &out.Firehose + *out = new(FirehoseInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3InitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerLogsInitParameters. +func (in *BrokerLogsInitParameters) DeepCopy() *BrokerLogsInitParameters { + if in == nil { + return nil + } + out := new(BrokerLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerLogsObservation) DeepCopyInto(out *BrokerLogsObservation) { + *out = *in + if in.CloudwatchLogs != nil { + in, out := &in.CloudwatchLogs, &out.CloudwatchLogs + *out = new(CloudwatchLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.Firehose != nil { + in, out := &in.Firehose, &out.Firehose + *out = new(FirehoseObservation) + (*in).DeepCopyInto(*out) + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Observation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerLogsObservation. +func (in *BrokerLogsObservation) DeepCopy() *BrokerLogsObservation { + if in == nil { + return nil + } + out := new(BrokerLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerLogsParameters) DeepCopyInto(out *BrokerLogsParameters) { + *out = *in + if in.CloudwatchLogs != nil { + in, out := &in.CloudwatchLogs, &out.CloudwatchLogs + *out = new(CloudwatchLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.Firehose != nil { + in, out := &in.Firehose, &out.Firehose + *out = new(FirehoseParameters) + (*in).DeepCopyInto(*out) + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Parameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerLogsParameters. +func (in *BrokerLogsParameters) DeepCopy() *BrokerLogsParameters { + if in == nil { + return nil + } + out := new(BrokerLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerNodeGroupInfoInitParameters) DeepCopyInto(out *BrokerNodeGroupInfoInitParameters) { + *out = *in + if in.AzDistribution != nil { + in, out := &in.AzDistribution, &out.AzDistribution + *out = new(string) + **out = **in + } + if in.ClientSubnets != nil { + in, out := &in.ClientSubnets, &out.ClientSubnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientSubnetsRefs != nil { + in, out := &in.ClientSubnetsRefs, &out.ClientSubnetsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClientSubnetsSelector != nil { + in, out := &in.ClientSubnetsSelector, &out.ClientSubnetsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ConnectivityInfo != nil { + in, out := &in.ConnectivityInfo, &out.ConnectivityInfo + *out = new(ConnectivityInfoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupsRefs != nil { + in, out := &in.SecurityGroupsRefs, &out.SecurityGroupsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupsSelector != nil { + in, out := &in.SecurityGroupsSelector, &out.SecurityGroupsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageInfo != nil { + in, out := &in.StorageInfo, &out.StorageInfo + *out = new(StorageInfoInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerNodeGroupInfoInitParameters. +func (in *BrokerNodeGroupInfoInitParameters) DeepCopy() *BrokerNodeGroupInfoInitParameters { + if in == nil { + return nil + } + out := new(BrokerNodeGroupInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerNodeGroupInfoObservation) DeepCopyInto(out *BrokerNodeGroupInfoObservation) { + *out = *in + if in.AzDistribution != nil { + in, out := &in.AzDistribution, &out.AzDistribution + *out = new(string) + **out = **in + } + if in.ClientSubnets != nil { + in, out := &in.ClientSubnets, &out.ClientSubnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectivityInfo != nil { + in, out := &in.ConnectivityInfo, &out.ConnectivityInfo + *out = new(ConnectivityInfoObservation) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StorageInfo != nil { + in, out := &in.StorageInfo, &out.StorageInfo + *out = new(StorageInfoObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerNodeGroupInfoObservation. +func (in *BrokerNodeGroupInfoObservation) DeepCopy() *BrokerNodeGroupInfoObservation { + if in == nil { + return nil + } + out := new(BrokerNodeGroupInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerNodeGroupInfoParameters) DeepCopyInto(out *BrokerNodeGroupInfoParameters) { + *out = *in + if in.AzDistribution != nil { + in, out := &in.AzDistribution, &out.AzDistribution + *out = new(string) + **out = **in + } + if in.ClientSubnets != nil { + in, out := &in.ClientSubnets, &out.ClientSubnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientSubnetsRefs != nil { + in, out := &in.ClientSubnetsRefs, &out.ClientSubnetsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClientSubnetsSelector != nil { + in, out := &in.ClientSubnetsSelector, &out.ClientSubnetsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ConnectivityInfo != nil { + in, out := &in.ConnectivityInfo, &out.ConnectivityInfo + *out = new(ConnectivityInfoParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupsRefs != nil { + in, out := &in.SecurityGroupsRefs, &out.SecurityGroupsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupsSelector != nil { + in, out := &in.SecurityGroupsSelector, &out.SecurityGroupsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageInfo != nil { + in, out := &in.StorageInfo, &out.StorageInfo + *out = new(StorageInfoParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerNodeGroupInfoParameters. +func (in *BrokerNodeGroupInfoParameters) DeepCopy() *BrokerNodeGroupInfoParameters { + if in == nil { + return nil + } + out := new(BrokerNodeGroupInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientAuthenticationInitParameters) DeepCopyInto(out *ClientAuthenticationInitParameters) { + *out = *in + if in.Sasl != nil { + in, out := &in.Sasl, &out.Sasl + *out = new(SaslInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientAuthenticationInitParameters. +func (in *ClientAuthenticationInitParameters) DeepCopy() *ClientAuthenticationInitParameters { + if in == nil { + return nil + } + out := new(ClientAuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientAuthenticationObservation) DeepCopyInto(out *ClientAuthenticationObservation) { + *out = *in + if in.Sasl != nil { + in, out := &in.Sasl, &out.Sasl + *out = new(SaslObservation) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientAuthenticationObservation. +func (in *ClientAuthenticationObservation) DeepCopy() *ClientAuthenticationObservation { + if in == nil { + return nil + } + out := new(ClientAuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientAuthenticationParameters) DeepCopyInto(out *ClientAuthenticationParameters) { + *out = *in + if in.Sasl != nil { + in, out := &in.Sasl, &out.Sasl + *out = new(SaslParameters) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientAuthenticationParameters. +func (in *ClientAuthenticationParameters) DeepCopy() *ClientAuthenticationParameters { + if in == nil { + return nil + } + out := new(ClientAuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientAuthenticationSaslInitParameters) DeepCopyInto(out *ClientAuthenticationSaslInitParameters) { + *out = *in + if in.IAM != nil { + in, out := &in.IAM, &out.IAM + *out = new(bool) + **out = **in + } + if in.Scram != nil { + in, out := &in.Scram, &out.Scram + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientAuthenticationSaslInitParameters. +func (in *ClientAuthenticationSaslInitParameters) DeepCopy() *ClientAuthenticationSaslInitParameters { + if in == nil { + return nil + } + out := new(ClientAuthenticationSaslInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientAuthenticationSaslObservation) DeepCopyInto(out *ClientAuthenticationSaslObservation) { + *out = *in + if in.IAM != nil { + in, out := &in.IAM, &out.IAM + *out = new(bool) + **out = **in + } + if in.Scram != nil { + in, out := &in.Scram, &out.Scram + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientAuthenticationSaslObservation. +func (in *ClientAuthenticationSaslObservation) DeepCopy() *ClientAuthenticationSaslObservation { + if in == nil { + return nil + } + out := new(ClientAuthenticationSaslObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientAuthenticationSaslParameters) DeepCopyInto(out *ClientAuthenticationSaslParameters) { + *out = *in + if in.IAM != nil { + in, out := &in.IAM, &out.IAM + *out = new(bool) + **out = **in + } + if in.Scram != nil { + in, out := &in.Scram, &out.Scram + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientAuthenticationSaslParameters. +func (in *ClientAuthenticationSaslParameters) DeepCopy() *ClientAuthenticationSaslParameters { + if in == nil { + return nil + } + out := new(ClientAuthenticationSaslParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogsInitParameters) DeepCopyInto(out *CloudwatchLogsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = new(string) + **out = **in + } + if in.LogGroupRef != nil { + in, out := &in.LogGroupRef, &out.LogGroupRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogGroupSelector != nil { + in, out := &in.LogGroupSelector, &out.LogGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogsInitParameters. +func (in *CloudwatchLogsInitParameters) DeepCopy() *CloudwatchLogsInitParameters { + if in == nil { + return nil + } + out := new(CloudwatchLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogsObservation) DeepCopyInto(out *CloudwatchLogsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogsObservation. +func (in *CloudwatchLogsObservation) DeepCopy() *CloudwatchLogsObservation { + if in == nil { + return nil + } + out := new(CloudwatchLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogsParameters) DeepCopyInto(out *CloudwatchLogsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = new(string) + **out = **in + } + if in.LogGroupRef != nil { + in, out := &in.LogGroupRef, &out.LogGroupRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogGroupSelector != nil { + in, out := &in.LogGroupSelector, &out.LogGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogsParameters. +func (in *CloudwatchLogsParameters) DeepCopy() *CloudwatchLogsParameters { + if in == nil { + return nil + } + out := new(CloudwatchLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterClientAuthenticationInitParameters) DeepCopyInto(out *ClusterClientAuthenticationInitParameters) { + *out = *in + if in.Sasl != nil { + in, out := &in.Sasl, &out.Sasl + *out = new(ClientAuthenticationSaslInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Unauthenticated != nil { + in, out := &in.Unauthenticated, &out.Unauthenticated + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClientAuthenticationInitParameters. +func (in *ClusterClientAuthenticationInitParameters) DeepCopy() *ClusterClientAuthenticationInitParameters { + if in == nil { + return nil + } + out := new(ClusterClientAuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterClientAuthenticationObservation) DeepCopyInto(out *ClusterClientAuthenticationObservation) { + *out = *in + if in.Sasl != nil { + in, out := &in.Sasl, &out.Sasl + *out = new(ClientAuthenticationSaslObservation) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSObservation) + (*in).DeepCopyInto(*out) + } + if in.Unauthenticated != nil { + in, out := &in.Unauthenticated, &out.Unauthenticated + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClientAuthenticationObservation. +func (in *ClusterClientAuthenticationObservation) DeepCopy() *ClusterClientAuthenticationObservation { + if in == nil { + return nil + } + out := new(ClusterClientAuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterClientAuthenticationParameters) DeepCopyInto(out *ClusterClientAuthenticationParameters) { + *out = *in + if in.Sasl != nil { + in, out := &in.Sasl, &out.Sasl + *out = new(ClientAuthenticationSaslParameters) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSParameters) + (*in).DeepCopyInto(*out) + } + if in.Unauthenticated != nil { + in, out := &in.Unauthenticated, &out.Unauthenticated + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClientAuthenticationParameters. +func (in *ClusterClientAuthenticationParameters) DeepCopy() *ClusterClientAuthenticationParameters { + if in == nil { + return nil + } + out := new(ClusterClientAuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { + *out = *in + if in.BrokerNodeGroupInfo != nil { + in, out := &in.BrokerNodeGroupInfo, &out.BrokerNodeGroupInfo + *out = new(BrokerNodeGroupInfoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientAuthentication != nil { + in, out := &in.ClientAuthentication, &out.ClientAuthentication + *out = new(ClusterClientAuthenticationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.ConfigurationInfo != nil { + in, out := &in.ConfigurationInfo, &out.ConfigurationInfo + *out = new(ConfigurationInfoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EncryptionInfo != nil { + in, out := &in.EncryptionInfo, &out.EncryptionInfo + *out = new(EncryptionInfoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EnhancedMonitoring != nil { + in, out := &in.EnhancedMonitoring, &out.EnhancedMonitoring + *out = new(string) + **out = **in + } + if in.KafkaVersion != nil { + in, out := &in.KafkaVersion, &out.KafkaVersion + *out = new(string) + **out = **in + } + if in.LoggingInfo != nil { + in, out := &in.LoggingInfo, &out.LoggingInfo + *out = new(LoggingInfoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NumberOfBrokerNodes != nil { + in, out := &in.NumberOfBrokerNodes, &out.NumberOfBrokerNodes + *out = new(float64) + **out = **in + } + if in.OpenMonitoring != nil { + in, out := &in.OpenMonitoring, &out.OpenMonitoring + *out = new(OpenMonitoringInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageMode != nil { + in, out := &in.StorageMode, &out.StorageMode + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInitParameters. +func (in *ClusterInitParameters) DeepCopy() *ClusterInitParameters { + if in == nil { + return nil + } + out := new(ClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.BootstrapBrokers != nil { + in, out := &in.BootstrapBrokers, &out.BootstrapBrokers + *out = new(string) + **out = **in + } + if in.BootstrapBrokersPublicSaslIAM != nil { + in, out := &in.BootstrapBrokersPublicSaslIAM, &out.BootstrapBrokersPublicSaslIAM + *out = new(string) + **out = **in + } + if in.BootstrapBrokersPublicSaslScram != nil { + in, out := &in.BootstrapBrokersPublicSaslScram, &out.BootstrapBrokersPublicSaslScram + *out = new(string) + **out = **in + } + if in.BootstrapBrokersPublicTLS != nil { + in, out := &in.BootstrapBrokersPublicTLS, &out.BootstrapBrokersPublicTLS + *out = new(string) + **out = **in + } + if in.BootstrapBrokersSaslIAM != nil { + in, out := &in.BootstrapBrokersSaslIAM, &out.BootstrapBrokersSaslIAM + *out = new(string) + **out = **in + } + if in.BootstrapBrokersSaslScram != nil { + in, out := &in.BootstrapBrokersSaslScram, &out.BootstrapBrokersSaslScram + *out = new(string) + **out = **in + } + if in.BootstrapBrokersTLS != nil { + in, out := &in.BootstrapBrokersTLS, &out.BootstrapBrokersTLS + *out = new(string) + **out = **in + } + if in.BootstrapBrokersVPCConnectivitySaslIAM != nil { + in, out := &in.BootstrapBrokersVPCConnectivitySaslIAM, &out.BootstrapBrokersVPCConnectivitySaslIAM + *out = new(string) + **out = **in + } + if in.BootstrapBrokersVPCConnectivitySaslScram != nil { + in, out := &in.BootstrapBrokersVPCConnectivitySaslScram, &out.BootstrapBrokersVPCConnectivitySaslScram + *out = new(string) + **out = **in + } + if in.BootstrapBrokersVPCConnectivityTLS != nil { + in, out := &in.BootstrapBrokersVPCConnectivityTLS, &out.BootstrapBrokersVPCConnectivityTLS + *out = new(string) + **out = **in + } + if in.BrokerNodeGroupInfo != nil { + in, out := &in.BrokerNodeGroupInfo, &out.BrokerNodeGroupInfo + *out = new(BrokerNodeGroupInfoObservation) + (*in).DeepCopyInto(*out) + } + if in.ClientAuthentication != nil { + in, out := &in.ClientAuthentication, &out.ClientAuthentication + *out = new(ClusterClientAuthenticationObservation) + (*in).DeepCopyInto(*out) + } + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.ClusterUUID != nil { + in, out := &in.ClusterUUID, &out.ClusterUUID + *out = new(string) + **out = **in + } + if in.ConfigurationInfo != nil { + in, out := &in.ConfigurationInfo, &out.ConfigurationInfo + *out = new(ConfigurationInfoObservation) + (*in).DeepCopyInto(*out) + } + if in.CurrentVersion != nil { + in, out := &in.CurrentVersion, &out.CurrentVersion + *out = new(string) + **out = **in + } + if in.EncryptionInfo != nil { + in, out := &in.EncryptionInfo, &out.EncryptionInfo + *out = new(EncryptionInfoObservation) + (*in).DeepCopyInto(*out) + } + if in.EnhancedMonitoring != nil { + in, out := &in.EnhancedMonitoring, &out.EnhancedMonitoring + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KafkaVersion != nil { + in, out := &in.KafkaVersion, &out.KafkaVersion + *out = new(string) + **out = **in + } + if in.LoggingInfo != nil { + in, out := &in.LoggingInfo, &out.LoggingInfo + *out = new(LoggingInfoObservation) + (*in).DeepCopyInto(*out) + } + if in.NumberOfBrokerNodes != nil { + in, out := &in.NumberOfBrokerNodes, &out.NumberOfBrokerNodes + *out = new(float64) + **out = **in + } + if in.OpenMonitoring != nil { + in, out := &in.OpenMonitoring, &out.OpenMonitoring + *out = new(OpenMonitoringObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageMode != nil { + in, out := &in.StorageMode, &out.StorageMode + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZookeeperConnectString != nil { + in, out := &in.ZookeeperConnectString, &out.ZookeeperConnectString + *out = new(string) + **out = **in + } + if in.ZookeeperConnectStringTLS != nil { + in, out := &in.ZookeeperConnectStringTLS, &out.ZookeeperConnectStringTLS + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservation. +func (in *ClusterObservation) DeepCopy() *ClusterObservation { + if in == nil { + return nil + } + out := new(ClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { + *out = *in + if in.BrokerNodeGroupInfo != nil { + in, out := &in.BrokerNodeGroupInfo, &out.BrokerNodeGroupInfo + *out = new(BrokerNodeGroupInfoParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientAuthentication != nil { + in, out := &in.ClientAuthentication, &out.ClientAuthentication + *out = new(ClusterClientAuthenticationParameters) + (*in).DeepCopyInto(*out) + } + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.ConfigurationInfo != nil { + in, out := &in.ConfigurationInfo, &out.ConfigurationInfo + *out = new(ConfigurationInfoParameters) + (*in).DeepCopyInto(*out) + } + if in.EncryptionInfo != nil { + in, out := &in.EncryptionInfo, &out.EncryptionInfo + *out = new(EncryptionInfoParameters) + (*in).DeepCopyInto(*out) + } + if in.EnhancedMonitoring != nil { + in, out := &in.EnhancedMonitoring, &out.EnhancedMonitoring + *out = new(string) + **out = **in + } + if in.KafkaVersion != nil { + in, out := &in.KafkaVersion, &out.KafkaVersion + *out = new(string) + **out = **in + } + if in.LoggingInfo != nil { + in, out := &in.LoggingInfo, &out.LoggingInfo + *out = new(LoggingInfoParameters) + (*in).DeepCopyInto(*out) + } + if in.NumberOfBrokerNodes != nil { + in, out := &in.NumberOfBrokerNodes, &out.NumberOfBrokerNodes + *out = new(float64) + **out = **in + } + if in.OpenMonitoring != nil { + in, out := &in.OpenMonitoring, &out.OpenMonitoring + *out = new(OpenMonitoringParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.StorageMode != nil { + in, out := &in.StorageMode, &out.StorageMode + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. +func (in *ClusterParameters) DeepCopy() *ClusterParameters { + if in == nil { + return nil + } + out := new(ClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationInfoInitParameters) DeepCopyInto(out *ConfigurationInfoInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationInfoInitParameters. +func (in *ConfigurationInfoInitParameters) DeepCopy() *ConfigurationInfoInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationInfoObservation) DeepCopyInto(out *ConfigurationInfoObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationInfoObservation. +func (in *ConfigurationInfoObservation) DeepCopy() *ConfigurationInfoObservation { + if in == nil { + return nil + } + out := new(ConfigurationInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationInfoParameters) DeepCopyInto(out *ConfigurationInfoParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationInfoParameters. +func (in *ConfigurationInfoParameters) DeepCopy() *ConfigurationInfoParameters { + if in == nil { + return nil + } + out := new(ConfigurationInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityInfoInitParameters) DeepCopyInto(out *ConnectivityInfoInitParameters) { + *out = *in + if in.PublicAccess != nil { + in, out := &in.PublicAccess, &out.PublicAccess + *out = new(PublicAccessInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VPCConnectivity != nil { + in, out := &in.VPCConnectivity, &out.VPCConnectivity + *out = new(VPCConnectivityInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityInfoInitParameters. +func (in *ConnectivityInfoInitParameters) DeepCopy() *ConnectivityInfoInitParameters { + if in == nil { + return nil + } + out := new(ConnectivityInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityInfoObservation) DeepCopyInto(out *ConnectivityInfoObservation) { + *out = *in + if in.PublicAccess != nil { + in, out := &in.PublicAccess, &out.PublicAccess + *out = new(PublicAccessObservation) + (*in).DeepCopyInto(*out) + } + if in.VPCConnectivity != nil { + in, out := &in.VPCConnectivity, &out.VPCConnectivity + *out = new(VPCConnectivityObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityInfoObservation. +func (in *ConnectivityInfoObservation) DeepCopy() *ConnectivityInfoObservation { + if in == nil { + return nil + } + out := new(ConnectivityInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityInfoParameters) DeepCopyInto(out *ConnectivityInfoParameters) { + *out = *in + if in.PublicAccess != nil { + in, out := &in.PublicAccess, &out.PublicAccess + *out = new(PublicAccessParameters) + (*in).DeepCopyInto(*out) + } + if in.VPCConnectivity != nil { + in, out := &in.VPCConnectivity, &out.VPCConnectivity + *out = new(VPCConnectivityParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityInfoParameters. +func (in *ConnectivityInfoParameters) DeepCopy() *ConnectivityInfoParameters { + if in == nil { + return nil + } + out := new(ConnectivityInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSStorageInfoInitParameters) DeepCopyInto(out *EBSStorageInfoInitParameters) { + *out = *in + if in.ProvisionedThroughput != nil { + in, out := &in.ProvisionedThroughput, &out.ProvisionedThroughput + *out = new(ProvisionedThroughputInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSStorageInfoInitParameters. +func (in *EBSStorageInfoInitParameters) DeepCopy() *EBSStorageInfoInitParameters { + if in == nil { + return nil + } + out := new(EBSStorageInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSStorageInfoObservation) DeepCopyInto(out *EBSStorageInfoObservation) { + *out = *in + if in.ProvisionedThroughput != nil { + in, out := &in.ProvisionedThroughput, &out.ProvisionedThroughput + *out = new(ProvisionedThroughputObservation) + (*in).DeepCopyInto(*out) + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSStorageInfoObservation. +func (in *EBSStorageInfoObservation) DeepCopy() *EBSStorageInfoObservation { + if in == nil { + return nil + } + out := new(EBSStorageInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSStorageInfoParameters) DeepCopyInto(out *EBSStorageInfoParameters) { + *out = *in + if in.ProvisionedThroughput != nil { + in, out := &in.ProvisionedThroughput, &out.ProvisionedThroughput + *out = new(ProvisionedThroughputParameters) + (*in).DeepCopyInto(*out) + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSStorageInfoParameters. +func (in *EBSStorageInfoParameters) DeepCopy() *EBSStorageInfoParameters { + if in == nil { + return nil + } + out := new(EBSStorageInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionInTransitInitParameters) DeepCopyInto(out *EncryptionInTransitInitParameters) { + *out = *in + if in.ClientBroker != nil { + in, out := &in.ClientBroker, &out.ClientBroker + *out = new(string) + **out = **in + } + if in.InCluster != nil { + in, out := &in.InCluster, &out.InCluster + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionInTransitInitParameters. +func (in *EncryptionInTransitInitParameters) DeepCopy() *EncryptionInTransitInitParameters { + if in == nil { + return nil + } + out := new(EncryptionInTransitInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionInTransitObservation) DeepCopyInto(out *EncryptionInTransitObservation) { + *out = *in + if in.ClientBroker != nil { + in, out := &in.ClientBroker, &out.ClientBroker + *out = new(string) + **out = **in + } + if in.InCluster != nil { + in, out := &in.InCluster, &out.InCluster + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionInTransitObservation. +func (in *EncryptionInTransitObservation) DeepCopy() *EncryptionInTransitObservation { + if in == nil { + return nil + } + out := new(EncryptionInTransitObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionInTransitParameters) DeepCopyInto(out *EncryptionInTransitParameters) { + *out = *in + if in.ClientBroker != nil { + in, out := &in.ClientBroker, &out.ClientBroker + *out = new(string) + **out = **in + } + if in.InCluster != nil { + in, out := &in.InCluster, &out.InCluster + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionInTransitParameters. +func (in *EncryptionInTransitParameters) DeepCopy() *EncryptionInTransitParameters { + if in == nil { + return nil + } + out := new(EncryptionInTransitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionInfoInitParameters) DeepCopyInto(out *EncryptionInfoInitParameters) { + *out = *in + if in.EncryptionAtRestKMSKeyArn != nil { + in, out := &in.EncryptionAtRestKMSKeyArn, &out.EncryptionAtRestKMSKeyArn + *out = new(string) + **out = **in + } + if in.EncryptionAtRestKMSKeyArnRef != nil { + in, out := &in.EncryptionAtRestKMSKeyArnRef, &out.EncryptionAtRestKMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EncryptionAtRestKMSKeyArnSelector != nil { + in, out := &in.EncryptionAtRestKMSKeyArnSelector, &out.EncryptionAtRestKMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EncryptionInTransit != nil { + in, out := &in.EncryptionInTransit, &out.EncryptionInTransit + *out = new(EncryptionInTransitInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionInfoInitParameters. +func (in *EncryptionInfoInitParameters) DeepCopy() *EncryptionInfoInitParameters { + if in == nil { + return nil + } + out := new(EncryptionInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionInfoObservation) DeepCopyInto(out *EncryptionInfoObservation) { + *out = *in + if in.EncryptionAtRestKMSKeyArn != nil { + in, out := &in.EncryptionAtRestKMSKeyArn, &out.EncryptionAtRestKMSKeyArn + *out = new(string) + **out = **in + } + if in.EncryptionInTransit != nil { + in, out := &in.EncryptionInTransit, &out.EncryptionInTransit + *out = new(EncryptionInTransitObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionInfoObservation. +func (in *EncryptionInfoObservation) DeepCopy() *EncryptionInfoObservation { + if in == nil { + return nil + } + out := new(EncryptionInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionInfoParameters) DeepCopyInto(out *EncryptionInfoParameters) { + *out = *in + if in.EncryptionAtRestKMSKeyArn != nil { + in, out := &in.EncryptionAtRestKMSKeyArn, &out.EncryptionAtRestKMSKeyArn + *out = new(string) + **out = **in + } + if in.EncryptionAtRestKMSKeyArnRef != nil { + in, out := &in.EncryptionAtRestKMSKeyArnRef, &out.EncryptionAtRestKMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EncryptionAtRestKMSKeyArnSelector != nil { + in, out := &in.EncryptionAtRestKMSKeyArnSelector, &out.EncryptionAtRestKMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EncryptionInTransit != nil { + in, out := &in.EncryptionInTransit, &out.EncryptionInTransit + *out = new(EncryptionInTransitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionInfoParameters. +func (in *EncryptionInfoParameters) DeepCopy() *EncryptionInfoParameters { + if in == nil { + return nil + } + out := new(EncryptionInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirehoseInitParameters) DeepCopyInto(out *FirehoseInitParameters) { + *out = *in + if in.DeliveryStream != nil { + in, out := &in.DeliveryStream, &out.DeliveryStream + *out = new(string) + **out = **in + } + if in.DeliveryStreamRef != nil { + in, out := &in.DeliveryStreamRef, &out.DeliveryStreamRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DeliveryStreamSelector != nil { + in, out := &in.DeliveryStreamSelector, &out.DeliveryStreamSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirehoseInitParameters. +func (in *FirehoseInitParameters) DeepCopy() *FirehoseInitParameters { + if in == nil { + return nil + } + out := new(FirehoseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirehoseObservation) DeepCopyInto(out *FirehoseObservation) { + *out = *in + if in.DeliveryStream != nil { + in, out := &in.DeliveryStream, &out.DeliveryStream + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirehoseObservation. +func (in *FirehoseObservation) DeepCopy() *FirehoseObservation { + if in == nil { + return nil + } + out := new(FirehoseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirehoseParameters) DeepCopyInto(out *FirehoseParameters) { + *out = *in + if in.DeliveryStream != nil { + in, out := &in.DeliveryStream, &out.DeliveryStream + *out = new(string) + **out = **in + } + if in.DeliveryStreamRef != nil { + in, out := &in.DeliveryStreamRef, &out.DeliveryStreamRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DeliveryStreamSelector != nil { + in, out := &in.DeliveryStreamSelector, &out.DeliveryStreamSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirehoseParameters. +func (in *FirehoseParameters) DeepCopy() *FirehoseParameters { + if in == nil { + return nil + } + out := new(FirehoseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JmxExporterInitParameters) DeepCopyInto(out *JmxExporterInitParameters) { + *out = *in + if in.EnabledInBroker != nil { + in, out := &in.EnabledInBroker, &out.EnabledInBroker + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JmxExporterInitParameters. +func (in *JmxExporterInitParameters) DeepCopy() *JmxExporterInitParameters { + if in == nil { + return nil + } + out := new(JmxExporterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JmxExporterObservation) DeepCopyInto(out *JmxExporterObservation) { + *out = *in + if in.EnabledInBroker != nil { + in, out := &in.EnabledInBroker, &out.EnabledInBroker + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JmxExporterObservation. +func (in *JmxExporterObservation) DeepCopy() *JmxExporterObservation { + if in == nil { + return nil + } + out := new(JmxExporterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JmxExporterParameters) DeepCopyInto(out *JmxExporterParameters) { + *out = *in + if in.EnabledInBroker != nil { + in, out := &in.EnabledInBroker, &out.EnabledInBroker + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JmxExporterParameters. +func (in *JmxExporterParameters) DeepCopy() *JmxExporterParameters { + if in == nil { + return nil + } + out := new(JmxExporterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingInfoInitParameters) DeepCopyInto(out *LoggingInfoInitParameters) { + *out = *in + if in.BrokerLogs != nil { + in, out := &in.BrokerLogs, &out.BrokerLogs + *out = new(BrokerLogsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingInfoInitParameters. +func (in *LoggingInfoInitParameters) DeepCopy() *LoggingInfoInitParameters { + if in == nil { + return nil + } + out := new(LoggingInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingInfoObservation) DeepCopyInto(out *LoggingInfoObservation) { + *out = *in + if in.BrokerLogs != nil { + in, out := &in.BrokerLogs, &out.BrokerLogs + *out = new(BrokerLogsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingInfoObservation. +func (in *LoggingInfoObservation) DeepCopy() *LoggingInfoObservation { + if in == nil { + return nil + } + out := new(LoggingInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingInfoParameters) DeepCopyInto(out *LoggingInfoParameters) { + *out = *in + if in.BrokerLogs != nil { + in, out := &in.BrokerLogs, &out.BrokerLogs + *out = new(BrokerLogsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingInfoParameters. +func (in *LoggingInfoParameters) DeepCopy() *LoggingInfoParameters { + if in == nil { + return nil + } + out := new(LoggingInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeExporterInitParameters) DeepCopyInto(out *NodeExporterInitParameters) { + *out = *in + if in.EnabledInBroker != nil { + in, out := &in.EnabledInBroker, &out.EnabledInBroker + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeExporterInitParameters. +func (in *NodeExporterInitParameters) DeepCopy() *NodeExporterInitParameters { + if in == nil { + return nil + } + out := new(NodeExporterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeExporterObservation) DeepCopyInto(out *NodeExporterObservation) { + *out = *in + if in.EnabledInBroker != nil { + in, out := &in.EnabledInBroker, &out.EnabledInBroker + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeExporterObservation. +func (in *NodeExporterObservation) DeepCopy() *NodeExporterObservation { + if in == nil { + return nil + } + out := new(NodeExporterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeExporterParameters) DeepCopyInto(out *NodeExporterParameters) { + *out = *in + if in.EnabledInBroker != nil { + in, out := &in.EnabledInBroker, &out.EnabledInBroker + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeExporterParameters. +func (in *NodeExporterParameters) DeepCopy() *NodeExporterParameters { + if in == nil { + return nil + } + out := new(NodeExporterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenMonitoringInitParameters) DeepCopyInto(out *OpenMonitoringInitParameters) { + *out = *in + if in.Prometheus != nil { + in, out := &in.Prometheus, &out.Prometheus + *out = new(PrometheusInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenMonitoringInitParameters. +func (in *OpenMonitoringInitParameters) DeepCopy() *OpenMonitoringInitParameters { + if in == nil { + return nil + } + out := new(OpenMonitoringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenMonitoringObservation) DeepCopyInto(out *OpenMonitoringObservation) { + *out = *in + if in.Prometheus != nil { + in, out := &in.Prometheus, &out.Prometheus + *out = new(PrometheusObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenMonitoringObservation. +func (in *OpenMonitoringObservation) DeepCopy() *OpenMonitoringObservation { + if in == nil { + return nil + } + out := new(OpenMonitoringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenMonitoringParameters) DeepCopyInto(out *OpenMonitoringParameters) { + *out = *in + if in.Prometheus != nil { + in, out := &in.Prometheus, &out.Prometheus + *out = new(PrometheusParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenMonitoringParameters. +func (in *OpenMonitoringParameters) DeepCopy() *OpenMonitoringParameters { + if in == nil { + return nil + } + out := new(OpenMonitoringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusInitParameters) DeepCopyInto(out *PrometheusInitParameters) { + *out = *in + if in.JmxExporter != nil { + in, out := &in.JmxExporter, &out.JmxExporter + *out = new(JmxExporterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NodeExporter != nil { + in, out := &in.NodeExporter, &out.NodeExporter + *out = new(NodeExporterInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusInitParameters. +func (in *PrometheusInitParameters) DeepCopy() *PrometheusInitParameters { + if in == nil { + return nil + } + out := new(PrometheusInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusObservation) DeepCopyInto(out *PrometheusObservation) { + *out = *in + if in.JmxExporter != nil { + in, out := &in.JmxExporter, &out.JmxExporter + *out = new(JmxExporterObservation) + (*in).DeepCopyInto(*out) + } + if in.NodeExporter != nil { + in, out := &in.NodeExporter, &out.NodeExporter + *out = new(NodeExporterObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusObservation. +func (in *PrometheusObservation) DeepCopy() *PrometheusObservation { + if in == nil { + return nil + } + out := new(PrometheusObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusParameters) DeepCopyInto(out *PrometheusParameters) { + *out = *in + if in.JmxExporter != nil { + in, out := &in.JmxExporter, &out.JmxExporter + *out = new(JmxExporterParameters) + (*in).DeepCopyInto(*out) + } + if in.NodeExporter != nil { + in, out := &in.NodeExporter, &out.NodeExporter + *out = new(NodeExporterParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusParameters. +func (in *PrometheusParameters) DeepCopy() *PrometheusParameters { + if in == nil { + return nil + } + out := new(PrometheusParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisionedThroughputInitParameters) DeepCopyInto(out *ProvisionedThroughputInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.VolumeThroughput != nil { + in, out := &in.VolumeThroughput, &out.VolumeThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisionedThroughputInitParameters. +func (in *ProvisionedThroughputInitParameters) DeepCopy() *ProvisionedThroughputInitParameters { + if in == nil { + return nil + } + out := new(ProvisionedThroughputInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisionedThroughputObservation) DeepCopyInto(out *ProvisionedThroughputObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.VolumeThroughput != nil { + in, out := &in.VolumeThroughput, &out.VolumeThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisionedThroughputObservation. +func (in *ProvisionedThroughputObservation) DeepCopy() *ProvisionedThroughputObservation { + if in == nil { + return nil + } + out := new(ProvisionedThroughputObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisionedThroughputParameters) DeepCopyInto(out *ProvisionedThroughputParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.VolumeThroughput != nil { + in, out := &in.VolumeThroughput, &out.VolumeThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisionedThroughputParameters. +func (in *ProvisionedThroughputParameters) DeepCopy() *ProvisionedThroughputParameters { + if in == nil { + return nil + } + out := new(ProvisionedThroughputParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicAccessInitParameters) DeepCopyInto(out *PublicAccessInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicAccessInitParameters. +func (in *PublicAccessInitParameters) DeepCopy() *PublicAccessInitParameters { + if in == nil { + return nil + } + out := new(PublicAccessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicAccessObservation) DeepCopyInto(out *PublicAccessObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicAccessObservation. +func (in *PublicAccessObservation) DeepCopy() *PublicAccessObservation { + if in == nil { + return nil + } + out := new(PublicAccessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicAccessParameters) DeepCopyInto(out *PublicAccessParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicAccessParameters. +func (in *PublicAccessParameters) DeepCopy() *PublicAccessParameters { + if in == nil { + return nil + } + out := new(PublicAccessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InitParameters) DeepCopyInto(out *S3InitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InitParameters. +func (in *S3InitParameters) DeepCopy() *S3InitParameters { + if in == nil { + return nil + } + out := new(S3InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Observation) DeepCopyInto(out *S3Observation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Observation. +func (in *S3Observation) DeepCopy() *S3Observation { + if in == nil { + return nil + } + out := new(S3Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Parameters) DeepCopyInto(out *S3Parameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Parameters. +func (in *S3Parameters) DeepCopy() *S3Parameters { + if in == nil { + return nil + } + out := new(S3Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SaslInitParameters) DeepCopyInto(out *SaslInitParameters) { + *out = *in + if in.IAM != nil { + in, out := &in.IAM, &out.IAM + *out = new(bool) + **out = **in + } + if in.Scram != nil { + in, out := &in.Scram, &out.Scram + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SaslInitParameters. +func (in *SaslInitParameters) DeepCopy() *SaslInitParameters { + if in == nil { + return nil + } + out := new(SaslInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SaslObservation) DeepCopyInto(out *SaslObservation) { + *out = *in + if in.IAM != nil { + in, out := &in.IAM, &out.IAM + *out = new(bool) + **out = **in + } + if in.Scram != nil { + in, out := &in.Scram, &out.Scram + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SaslObservation. +func (in *SaslObservation) DeepCopy() *SaslObservation { + if in == nil { + return nil + } + out := new(SaslObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SaslParameters) DeepCopyInto(out *SaslParameters) { + *out = *in + if in.IAM != nil { + in, out := &in.IAM, &out.IAM + *out = new(bool) + **out = **in + } + if in.Scram != nil { + in, out := &in.Scram, &out.Scram + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SaslParameters. +func (in *SaslParameters) DeepCopy() *SaslParameters { + if in == nil { + return nil + } + out := new(SaslParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageInfoInitParameters) DeepCopyInto(out *StorageInfoInitParameters) { + *out = *in + if in.EBSStorageInfo != nil { + in, out := &in.EBSStorageInfo, &out.EBSStorageInfo + *out = new(EBSStorageInfoInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageInfoInitParameters. +func (in *StorageInfoInitParameters) DeepCopy() *StorageInfoInitParameters { + if in == nil { + return nil + } + out := new(StorageInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageInfoObservation) DeepCopyInto(out *StorageInfoObservation) { + *out = *in + if in.EBSStorageInfo != nil { + in, out := &in.EBSStorageInfo, &out.EBSStorageInfo + *out = new(EBSStorageInfoObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageInfoObservation. +func (in *StorageInfoObservation) DeepCopy() *StorageInfoObservation { + if in == nil { + return nil + } + out := new(StorageInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageInfoParameters) DeepCopyInto(out *StorageInfoParameters) { + *out = *in + if in.EBSStorageInfo != nil { + in, out := &in.EBSStorageInfo, &out.EBSStorageInfo + *out = new(EBSStorageInfoParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageInfoParameters. +func (in *StorageInfoParameters) DeepCopy() *StorageInfoParameters { + if in == nil { + return nil + } + out := new(StorageInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSInitParameters) DeepCopyInto(out *TLSInitParameters) { + *out = *in + if in.CertificateAuthorityArns != nil { + in, out := &in.CertificateAuthorityArns, &out.CertificateAuthorityArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSInitParameters. +func (in *TLSInitParameters) DeepCopy() *TLSInitParameters { + if in == nil { + return nil + } + out := new(TLSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSObservation) DeepCopyInto(out *TLSObservation) { + *out = *in + if in.CertificateAuthorityArns != nil { + in, out := &in.CertificateAuthorityArns, &out.CertificateAuthorityArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSObservation. +func (in *TLSObservation) DeepCopy() *TLSObservation { + if in == nil { + return nil + } + out := new(TLSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSParameters) DeepCopyInto(out *TLSParameters) { + *out = *in + if in.CertificateAuthorityArns != nil { + in, out := &in.CertificateAuthorityArns, &out.CertificateAuthorityArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSParameters. +func (in *TLSParameters) DeepCopy() *TLSParameters { + if in == nil { + return nil + } + out := new(TLSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConnectivityInitParameters) DeepCopyInto(out *VPCConnectivityInitParameters) { + *out = *in + if in.ClientAuthentication != nil { + in, out := &in.ClientAuthentication, &out.ClientAuthentication + *out = new(ClientAuthenticationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConnectivityInitParameters. +func (in *VPCConnectivityInitParameters) DeepCopy() *VPCConnectivityInitParameters { + if in == nil { + return nil + } + out := new(VPCConnectivityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConnectivityObservation) DeepCopyInto(out *VPCConnectivityObservation) { + *out = *in + if in.ClientAuthentication != nil { + in, out := &in.ClientAuthentication, &out.ClientAuthentication + *out = new(ClientAuthenticationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConnectivityObservation. +func (in *VPCConnectivityObservation) DeepCopy() *VPCConnectivityObservation { + if in == nil { + return nil + } + out := new(VPCConnectivityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConnectivityParameters) DeepCopyInto(out *VPCConnectivityParameters) { + *out = *in + if in.ClientAuthentication != nil { + in, out := &in.ClientAuthentication, &out.ClientAuthentication + *out = new(ClientAuthenticationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConnectivityParameters. +func (in *VPCConnectivityParameters) DeepCopy() *VPCConnectivityParameters { + if in == nil { + return nil + } + out := new(VPCConnectivityParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/kafka/v1beta3/zz_generated.managed.go b/apis/kafka/v1beta3/zz_generated.managed.go new file mode 100644 index 0000000000..05ee4f570b --- /dev/null +++ b/apis/kafka/v1beta3/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta3 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Cluster. +func (mg *Cluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Cluster. +func (mg *Cluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Cluster. +func (mg *Cluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Cluster. +func (mg *Cluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Cluster. +func (mg *Cluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Cluster. +func (mg *Cluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Cluster. +func (mg *Cluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Cluster. +func (mg *Cluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/kafka/v1beta3/zz_generated.managedlist.go b/apis/kafka/v1beta3/zz_generated.managedlist.go new file mode 100644 index 0000000000..d8b8b04e89 --- /dev/null +++ b/apis/kafka/v1beta3/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta3 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ClusterList. +func (l *ClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/kafka/v1beta3/zz_generated.resolvers.go b/apis/kafka/v1beta3/zz_generated.resolvers.go new file mode 100644 index 0000000000..8e92449901 --- /dev/null +++ b/apis/kafka/v1beta3/zz_generated.resolvers.go @@ -0,0 +1,352 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta3 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Cluster. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Cluster) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + if mg.Spec.ForProvider.BrokerNodeGroupInfo != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.BrokerNodeGroupInfo.ClientSubnets), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.BrokerNodeGroupInfo.ClientSubnetsRefs, + Selector: mg.Spec.ForProvider.BrokerNodeGroupInfo.ClientSubnetsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.BrokerNodeGroupInfo.ClientSubnets") + } + mg.Spec.ForProvider.BrokerNodeGroupInfo.ClientSubnets = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.BrokerNodeGroupInfo.ClientSubnetsRefs = mrsp.ResolvedReferences + + } + if mg.Spec.ForProvider.BrokerNodeGroupInfo != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.BrokerNodeGroupInfo.SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.BrokerNodeGroupInfo.SecurityGroupsRefs, + Selector: mg.Spec.ForProvider.BrokerNodeGroupInfo.SecurityGroupsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.BrokerNodeGroupInfo.SecurityGroups") + } + mg.Spec.ForProvider.BrokerNodeGroupInfo.SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.BrokerNodeGroupInfo.SecurityGroupsRefs = mrsp.ResolvedReferences + + } + if mg.Spec.ForProvider.ConfigurationInfo != nil { + { + m, l, err = apisresolver.GetManagedResource("kafka.aws.upbound.io", "v1beta1", "Configuration", "ConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ConfigurationInfo.Arn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ConfigurationInfo.ArnRef, + Selector: mg.Spec.ForProvider.ConfigurationInfo.ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ConfigurationInfo.Arn") + } + mg.Spec.ForProvider.ConfigurationInfo.Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ConfigurationInfo.ArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.EncryptionInfo != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EncryptionInfo.EncryptionAtRestKMSKeyArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.EncryptionInfo.EncryptionAtRestKMSKeyArnRef, + Selector: mg.Spec.ForProvider.EncryptionInfo.EncryptionAtRestKMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EncryptionInfo.EncryptionAtRestKMSKeyArn") + } + mg.Spec.ForProvider.EncryptionInfo.EncryptionAtRestKMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EncryptionInfo.EncryptionAtRestKMSKeyArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.LoggingInfo != nil { + if mg.Spec.ForProvider.LoggingInfo.BrokerLogs != nil { + if mg.Spec.ForProvider.LoggingInfo.BrokerLogs.CloudwatchLogs != nil { + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Group", "GroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LoggingInfo.BrokerLogs.CloudwatchLogs.LogGroup), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LoggingInfo.BrokerLogs.CloudwatchLogs.LogGroupRef, + Selector: mg.Spec.ForProvider.LoggingInfo.BrokerLogs.CloudwatchLogs.LogGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LoggingInfo.BrokerLogs.CloudwatchLogs.LogGroup") + } + mg.Spec.ForProvider.LoggingInfo.BrokerLogs.CloudwatchLogs.LogGroup = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LoggingInfo.BrokerLogs.CloudwatchLogs.LogGroupRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.ForProvider.LoggingInfo != nil { + if mg.Spec.ForProvider.LoggingInfo.BrokerLogs != nil { + if mg.Spec.ForProvider.LoggingInfo.BrokerLogs.Firehose != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LoggingInfo.BrokerLogs.Firehose.DeliveryStream), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.ForProvider.LoggingInfo.BrokerLogs.Firehose.DeliveryStreamRef, + Selector: mg.Spec.ForProvider.LoggingInfo.BrokerLogs.Firehose.DeliveryStreamSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LoggingInfo.BrokerLogs.Firehose.DeliveryStream") + } + mg.Spec.ForProvider.LoggingInfo.BrokerLogs.Firehose.DeliveryStream = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LoggingInfo.BrokerLogs.Firehose.DeliveryStreamRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.ForProvider.LoggingInfo != nil { + if mg.Spec.ForProvider.LoggingInfo.BrokerLogs != nil { + if mg.Spec.ForProvider.LoggingInfo.BrokerLogs.S3 != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LoggingInfo.BrokerLogs.S3.Bucket), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LoggingInfo.BrokerLogs.S3.BucketRef, + Selector: mg.Spec.ForProvider.LoggingInfo.BrokerLogs.S3.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LoggingInfo.BrokerLogs.S3.Bucket") + } + mg.Spec.ForProvider.LoggingInfo.BrokerLogs.S3.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LoggingInfo.BrokerLogs.S3.BucketRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.InitProvider.BrokerNodeGroupInfo != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.BrokerNodeGroupInfo.ClientSubnets), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.BrokerNodeGroupInfo.ClientSubnetsRefs, + Selector: mg.Spec.InitProvider.BrokerNodeGroupInfo.ClientSubnetsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.BrokerNodeGroupInfo.ClientSubnets") + } + mg.Spec.InitProvider.BrokerNodeGroupInfo.ClientSubnets = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.BrokerNodeGroupInfo.ClientSubnetsRefs = mrsp.ResolvedReferences + + } + if mg.Spec.InitProvider.BrokerNodeGroupInfo != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.BrokerNodeGroupInfo.SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.BrokerNodeGroupInfo.SecurityGroupsRefs, + Selector: mg.Spec.InitProvider.BrokerNodeGroupInfo.SecurityGroupsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.BrokerNodeGroupInfo.SecurityGroups") + } + mg.Spec.InitProvider.BrokerNodeGroupInfo.SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.BrokerNodeGroupInfo.SecurityGroupsRefs = mrsp.ResolvedReferences + + } + if mg.Spec.InitProvider.ConfigurationInfo != nil { + { + m, l, err = apisresolver.GetManagedResource("kafka.aws.upbound.io", "v1beta1", "Configuration", "ConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ConfigurationInfo.Arn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ConfigurationInfo.ArnRef, + Selector: mg.Spec.InitProvider.ConfigurationInfo.ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ConfigurationInfo.Arn") + } + mg.Spec.InitProvider.ConfigurationInfo.Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ConfigurationInfo.ArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.EncryptionInfo != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EncryptionInfo.EncryptionAtRestKMSKeyArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.EncryptionInfo.EncryptionAtRestKMSKeyArnRef, + Selector: mg.Spec.InitProvider.EncryptionInfo.EncryptionAtRestKMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EncryptionInfo.EncryptionAtRestKMSKeyArn") + } + mg.Spec.InitProvider.EncryptionInfo.EncryptionAtRestKMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EncryptionInfo.EncryptionAtRestKMSKeyArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LoggingInfo != nil { + if mg.Spec.InitProvider.LoggingInfo.BrokerLogs != nil { + if mg.Spec.InitProvider.LoggingInfo.BrokerLogs.CloudwatchLogs != nil { + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Group", "GroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LoggingInfo.BrokerLogs.CloudwatchLogs.LogGroup), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LoggingInfo.BrokerLogs.CloudwatchLogs.LogGroupRef, + Selector: mg.Spec.InitProvider.LoggingInfo.BrokerLogs.CloudwatchLogs.LogGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LoggingInfo.BrokerLogs.CloudwatchLogs.LogGroup") + } + mg.Spec.InitProvider.LoggingInfo.BrokerLogs.CloudwatchLogs.LogGroup = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LoggingInfo.BrokerLogs.CloudwatchLogs.LogGroupRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.InitProvider.LoggingInfo != nil { + if mg.Spec.InitProvider.LoggingInfo.BrokerLogs != nil { + if mg.Spec.InitProvider.LoggingInfo.BrokerLogs.Firehose != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LoggingInfo.BrokerLogs.Firehose.DeliveryStream), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.InitProvider.LoggingInfo.BrokerLogs.Firehose.DeliveryStreamRef, + Selector: mg.Spec.InitProvider.LoggingInfo.BrokerLogs.Firehose.DeliveryStreamSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LoggingInfo.BrokerLogs.Firehose.DeliveryStream") + } + mg.Spec.InitProvider.LoggingInfo.BrokerLogs.Firehose.DeliveryStream = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LoggingInfo.BrokerLogs.Firehose.DeliveryStreamRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.InitProvider.LoggingInfo != nil { + if mg.Spec.InitProvider.LoggingInfo.BrokerLogs != nil { + if mg.Spec.InitProvider.LoggingInfo.BrokerLogs.S3 != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LoggingInfo.BrokerLogs.S3.Bucket), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LoggingInfo.BrokerLogs.S3.BucketRef, + Selector: mg.Spec.InitProvider.LoggingInfo.BrokerLogs.S3.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LoggingInfo.BrokerLogs.S3.Bucket") + } + mg.Spec.InitProvider.LoggingInfo.BrokerLogs.S3.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LoggingInfo.BrokerLogs.S3.BucketRef = rsp.ResolvedReference + + } + } + } + + return nil +} diff --git a/apis/kafka/v1beta3/zz_groupversion_info.go b/apis/kafka/v1beta3/zz_groupversion_info.go new file mode 100755 index 0000000000..a43b860770 --- /dev/null +++ b/apis/kafka/v1beta3/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=kafka.aws.upbound.io +// +versionName=v1beta3 +package v1beta3 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "kafka.aws.upbound.io" + CRDVersion = "v1beta3" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/kafkaconnect/v1beta1/zz_generated.conversion_hubs.go b/apis/kafkaconnect/v1beta1/zz_generated.conversion_hubs.go index 2ae79e2382..a4e06cf112 100755 --- a/apis/kafkaconnect/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/kafkaconnect/v1beta1/zz_generated.conversion_hubs.go @@ -6,11 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Connector) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *CustomPlugin) Hub() {} - // Hub marks this type as a conversion hub. func (tr *WorkerConfiguration) Hub() {} diff --git a/apis/kafkaconnect/v1beta1/zz_generated.conversion_spokes.go b/apis/kafkaconnect/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..ad9c06be1f --- /dev/null +++ b/apis/kafkaconnect/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Connector to the hub type. +func (tr *Connector) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Connector type. +func (tr *Connector) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this CustomPlugin to the hub type. +func (tr *CustomPlugin) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CustomPlugin type. +func (tr *CustomPlugin) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/kafkaconnect/v1beta2/zz_connector_terraformed.go b/apis/kafkaconnect/v1beta2/zz_connector_terraformed.go new file mode 100755 index 0000000000..9a855d88cb --- /dev/null +++ b/apis/kafkaconnect/v1beta2/zz_connector_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Connector +func (mg *Connector) GetTerraformResourceType() string { + return "aws_mskconnect_connector" +} + +// GetConnectionDetailsMapping for this Connector +func (tr *Connector) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Connector +func (tr *Connector) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Connector +func (tr *Connector) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Connector +func (tr *Connector) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Connector +func (tr *Connector) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Connector +func (tr *Connector) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Connector +func (tr *Connector) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Connector +func (tr *Connector) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Connector using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Connector) LateInitialize(attrs []byte) (bool, error) { + params := &ConnectorParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Connector) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kafkaconnect/v1beta2/zz_connector_types.go b/apis/kafkaconnect/v1beta2/zz_connector_types.go new file mode 100755 index 0000000000..da84d463fe --- /dev/null +++ b/apis/kafkaconnect/v1beta2/zz_connector_types.go @@ -0,0 +1,891 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ApacheKafkaClusterInitParameters struct { + + // The bootstrap servers of the cluster. + BootstrapServers *string `json:"bootstrapServers,omitempty" tf:"bootstrap_servers,omitempty"` + + // Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster. + VPC *VPCInitParameters `json:"vpc,omitempty" tf:"vpc,omitempty"` +} + +type ApacheKafkaClusterObservation struct { + + // The bootstrap servers of the cluster. + BootstrapServers *string `json:"bootstrapServers,omitempty" tf:"bootstrap_servers,omitempty"` + + // Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster. + VPC *VPCObservation `json:"vpc,omitempty" tf:"vpc,omitempty"` +} + +type ApacheKafkaClusterParameters struct { + + // The bootstrap servers of the cluster. + // +kubebuilder:validation:Optional + BootstrapServers *string `json:"bootstrapServers" tf:"bootstrap_servers,omitempty"` + + // Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster. + // +kubebuilder:validation:Optional + VPC *VPCParameters `json:"vpc" tf:"vpc,omitempty"` +} + +type AutoscalingInitParameters struct { + + // The maximum number of workers allocated to the connector. + MaxWorkerCount *float64 `json:"maxWorkerCount,omitempty" tf:"max_worker_count,omitempty"` + + // The number of microcontroller units (MCUs) allocated to each connector worker. Valid values: 1, 2, 4, 8. The default value is 1. + McuCount *float64 `json:"mcuCount,omitempty" tf:"mcu_count,omitempty"` + + // The minimum number of workers allocated to the connector. + MinWorkerCount *float64 `json:"minWorkerCount,omitempty" tf:"min_worker_count,omitempty"` + + // The scale-in policy for the connector. See below. + ScaleInPolicy *ScaleInPolicyInitParameters `json:"scaleInPolicy,omitempty" tf:"scale_in_policy,omitempty"` + + // The scale-out policy for the connector. See below. + ScaleOutPolicy *ScaleOutPolicyInitParameters `json:"scaleOutPolicy,omitempty" tf:"scale_out_policy,omitempty"` +} + +type AutoscalingObservation struct { + + // The maximum number of workers allocated to the connector. + MaxWorkerCount *float64 `json:"maxWorkerCount,omitempty" tf:"max_worker_count,omitempty"` + + // The number of microcontroller units (MCUs) allocated to each connector worker. Valid values: 1, 2, 4, 8. The default value is 1. + McuCount *float64 `json:"mcuCount,omitempty" tf:"mcu_count,omitempty"` + + // The minimum number of workers allocated to the connector. + MinWorkerCount *float64 `json:"minWorkerCount,omitempty" tf:"min_worker_count,omitempty"` + + // The scale-in policy for the connector. See below. + ScaleInPolicy *ScaleInPolicyObservation `json:"scaleInPolicy,omitempty" tf:"scale_in_policy,omitempty"` + + // The scale-out policy for the connector. See below. + ScaleOutPolicy *ScaleOutPolicyObservation `json:"scaleOutPolicy,omitempty" tf:"scale_out_policy,omitempty"` +} + +type AutoscalingParameters struct { + + // The maximum number of workers allocated to the connector. + // +kubebuilder:validation:Optional + MaxWorkerCount *float64 `json:"maxWorkerCount" tf:"max_worker_count,omitempty"` + + // The number of microcontroller units (MCUs) allocated to each connector worker. Valid values: 1, 2, 4, 8. The default value is 1. + // +kubebuilder:validation:Optional + McuCount *float64 `json:"mcuCount,omitempty" tf:"mcu_count,omitempty"` + + // The minimum number of workers allocated to the connector. + // +kubebuilder:validation:Optional + MinWorkerCount *float64 `json:"minWorkerCount" tf:"min_worker_count,omitempty"` + + // The scale-in policy for the connector. See below. + // +kubebuilder:validation:Optional + ScaleInPolicy *ScaleInPolicyParameters `json:"scaleInPolicy,omitempty" tf:"scale_in_policy,omitempty"` + + // The scale-out policy for the connector. See below. + // +kubebuilder:validation:Optional + ScaleOutPolicy *ScaleOutPolicyParameters `json:"scaleOutPolicy,omitempty" tf:"scale_out_policy,omitempty"` +} + +type CapacityInitParameters struct { + + // Information about the auto scaling parameters for the connector. See below. + Autoscaling *AutoscalingInitParameters `json:"autoscaling,omitempty" tf:"autoscaling,omitempty"` + + // Details about a fixed capacity allocated to a connector. See below. + ProvisionedCapacity *ProvisionedCapacityInitParameters `json:"provisionedCapacity,omitempty" tf:"provisioned_capacity,omitempty"` +} + +type CapacityObservation struct { + + // Information about the auto scaling parameters for the connector. See below. + Autoscaling *AutoscalingObservation `json:"autoscaling,omitempty" tf:"autoscaling,omitempty"` + + // Details about a fixed capacity allocated to a connector. See below. + ProvisionedCapacity *ProvisionedCapacityObservation `json:"provisionedCapacity,omitempty" tf:"provisioned_capacity,omitempty"` +} + +type CapacityParameters struct { + + // Information about the auto scaling parameters for the connector. See below. + // +kubebuilder:validation:Optional + Autoscaling *AutoscalingParameters `json:"autoscaling,omitempty" tf:"autoscaling,omitempty"` + + // Details about a fixed capacity allocated to a connector. See below. + // +kubebuilder:validation:Optional + ProvisionedCapacity *ProvisionedCapacityParameters `json:"provisionedCapacity,omitempty" tf:"provisioned_capacity,omitempty"` +} + +type CloudwatchLogsInitParameters struct { + + // Specifies whether connector logs get sent to the specified Amazon S3 destination. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name of the CloudWatch log group that is the destination for log delivery. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Group + LogGroup *string `json:"logGroup,omitempty" tf:"log_group,omitempty"` + + // Reference to a Group in cloudwatchlogs to populate logGroup. + // +kubebuilder:validation:Optional + LogGroupRef *v1.Reference `json:"logGroupRef,omitempty" tf:"-"` + + // Selector for a Group in cloudwatchlogs to populate logGroup. + // +kubebuilder:validation:Optional + LogGroupSelector *v1.Selector `json:"logGroupSelector,omitempty" tf:"-"` +} + +type CloudwatchLogsObservation struct { + + // Specifies whether connector logs get sent to the specified Amazon S3 destination. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name of the CloudWatch log group that is the destination for log delivery. + LogGroup *string `json:"logGroup,omitempty" tf:"log_group,omitempty"` +} + +type CloudwatchLogsParameters struct { + + // Specifies whether connector logs get sent to the specified Amazon S3 destination. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // The name of the CloudWatch log group that is the destination for log delivery. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Group + // +kubebuilder:validation:Optional + LogGroup *string `json:"logGroup,omitempty" tf:"log_group,omitempty"` + + // Reference to a Group in cloudwatchlogs to populate logGroup. + // +kubebuilder:validation:Optional + LogGroupRef *v1.Reference `json:"logGroupRef,omitempty" tf:"-"` + + // Selector for a Group in cloudwatchlogs to populate logGroup. + // +kubebuilder:validation:Optional + LogGroupSelector *v1.Selector `json:"logGroupSelector,omitempty" tf:"-"` +} + +type ConnectorInitParameters struct { + + // Information about the capacity allocated to the connector. See below. + Capacity *CapacityInitParameters `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // A map of keys to values that represent the configuration for the connector. + // +mapType=granular + ConnectorConfiguration map[string]*string `json:"connectorConfiguration,omitempty" tf:"connector_configuration,omitempty"` + + // A summary description of the connector. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies which Apache Kafka cluster to connect to. See below. + KafkaCluster *KafkaClusterInitParameters `json:"kafkaCluster,omitempty" tf:"kafka_cluster,omitempty"` + + // Details of the client authentication used by the Apache Kafka cluster. See below. + KafkaClusterClientAuthentication *KafkaClusterClientAuthenticationInitParameters `json:"kafkaClusterClientAuthentication,omitempty" tf:"kafka_cluster_client_authentication,omitempty"` + + // Details of encryption in transit to the Apache Kafka cluster. See below. + KafkaClusterEncryptionInTransit *KafkaClusterEncryptionInTransitInitParameters `json:"kafkaClusterEncryptionInTransit,omitempty" tf:"kafka_cluster_encryption_in_transit,omitempty"` + + // The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins. + KafkaconnectVersion *string `json:"kafkaconnectVersion,omitempty" tf:"kafkaconnect_version,omitempty"` + + // Details about log delivery. See below. + LogDelivery *LogDeliveryInitParameters `json:"logDelivery,omitempty" tf:"log_delivery,omitempty"` + + // Specifies which plugins to use for the connector. See below. + Plugin []PluginInitParameters `json:"plugin,omitempty" tf:"plugin,omitempty"` + + // The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + ServiceExecutionRoleArn *string `json:"serviceExecutionRoleArn,omitempty" tf:"service_execution_role_arn,omitempty"` + + // Reference to a Role in iam to populate serviceExecutionRoleArn. + // +kubebuilder:validation:Optional + ServiceExecutionRoleArnRef *v1.Reference `json:"serviceExecutionRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceExecutionRoleArn. + // +kubebuilder:validation:Optional + ServiceExecutionRoleArnSelector *v1.Selector `json:"serviceExecutionRoleArnSelector,omitempty" tf:"-"` + + // Specifies which worker configuration to use with the connector. See below. + WorkerConfiguration *WorkerConfigurationInitParameters `json:"workerConfiguration,omitempty" tf:"worker_configuration,omitempty"` +} + +type ConnectorObservation struct { + + // The Amazon Resource Name (ARN) of the custom plugin. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Information about the capacity allocated to the connector. See below. + Capacity *CapacityObservation `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // A map of keys to values that represent the configuration for the connector. + // +mapType=granular + ConnectorConfiguration map[string]*string `json:"connectorConfiguration,omitempty" tf:"connector_configuration,omitempty"` + + // A summary description of the connector. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies which Apache Kafka cluster to connect to. See below. + KafkaCluster *KafkaClusterObservation `json:"kafkaCluster,omitempty" tf:"kafka_cluster,omitempty"` + + // Details of the client authentication used by the Apache Kafka cluster. See below. + KafkaClusterClientAuthentication *KafkaClusterClientAuthenticationObservation `json:"kafkaClusterClientAuthentication,omitempty" tf:"kafka_cluster_client_authentication,omitempty"` + + // Details of encryption in transit to the Apache Kafka cluster. See below. + KafkaClusterEncryptionInTransit *KafkaClusterEncryptionInTransitObservation `json:"kafkaClusterEncryptionInTransit,omitempty" tf:"kafka_cluster_encryption_in_transit,omitempty"` + + // The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins. + KafkaconnectVersion *string `json:"kafkaconnectVersion,omitempty" tf:"kafkaconnect_version,omitempty"` + + // Details about log delivery. See below. + LogDelivery *LogDeliveryObservation `json:"logDelivery,omitempty" tf:"log_delivery,omitempty"` + + // The name of the connector. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which plugins to use for the connector. See below. + Plugin []PluginObservation `json:"plugin,omitempty" tf:"plugin,omitempty"` + + // The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket. + ServiceExecutionRoleArn *string `json:"serviceExecutionRoleArn,omitempty" tf:"service_execution_role_arn,omitempty"` + + // The current version of the connector. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // Specifies which worker configuration to use with the connector. See below. + WorkerConfiguration *WorkerConfigurationObservation `json:"workerConfiguration,omitempty" tf:"worker_configuration,omitempty"` +} + +type ConnectorParameters struct { + + // Information about the capacity allocated to the connector. See below. + // +kubebuilder:validation:Optional + Capacity *CapacityParameters `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // A map of keys to values that represent the configuration for the connector. + // +kubebuilder:validation:Optional + // +mapType=granular + ConnectorConfiguration map[string]*string `json:"connectorConfiguration,omitempty" tf:"connector_configuration,omitempty"` + + // A summary description of the connector. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies which Apache Kafka cluster to connect to. See below. + // +kubebuilder:validation:Optional + KafkaCluster *KafkaClusterParameters `json:"kafkaCluster,omitempty" tf:"kafka_cluster,omitempty"` + + // Details of the client authentication used by the Apache Kafka cluster. See below. + // +kubebuilder:validation:Optional + KafkaClusterClientAuthentication *KafkaClusterClientAuthenticationParameters `json:"kafkaClusterClientAuthentication,omitempty" tf:"kafka_cluster_client_authentication,omitempty"` + + // Details of encryption in transit to the Apache Kafka cluster. See below. + // +kubebuilder:validation:Optional + KafkaClusterEncryptionInTransit *KafkaClusterEncryptionInTransitParameters `json:"kafkaClusterEncryptionInTransit,omitempty" tf:"kafka_cluster_encryption_in_transit,omitempty"` + + // The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins. + // +kubebuilder:validation:Optional + KafkaconnectVersion *string `json:"kafkaconnectVersion,omitempty" tf:"kafkaconnect_version,omitempty"` + + // Details about log delivery. See below. + // +kubebuilder:validation:Optional + LogDelivery *LogDeliveryParameters `json:"logDelivery,omitempty" tf:"log_delivery,omitempty"` + + // The name of the connector. + // +kubebuilder:validation:Required + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies which plugins to use for the connector. See below. + // +kubebuilder:validation:Optional + Plugin []PluginParameters `json:"plugin,omitempty" tf:"plugin,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + ServiceExecutionRoleArn *string `json:"serviceExecutionRoleArn,omitempty" tf:"service_execution_role_arn,omitempty"` + + // Reference to a Role in iam to populate serviceExecutionRoleArn. + // +kubebuilder:validation:Optional + ServiceExecutionRoleArnRef *v1.Reference `json:"serviceExecutionRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceExecutionRoleArn. + // +kubebuilder:validation:Optional + ServiceExecutionRoleArnSelector *v1.Selector `json:"serviceExecutionRoleArnSelector,omitempty" tf:"-"` + + // Specifies which worker configuration to use with the connector. See below. + // +kubebuilder:validation:Optional + WorkerConfiguration *WorkerConfigurationParameters `json:"workerConfiguration,omitempty" tf:"worker_configuration,omitempty"` +} + +type CustomPluginInitParameters struct { + + // The Amazon Resource Name (ARN) of the worker configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kafkaconnect/v1beta2.CustomPlugin + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a CustomPlugin in kafkaconnect to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a CustomPlugin in kafkaconnect to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // The revision of the worker configuration. + Revision *float64 `json:"revision,omitempty" tf:"revision,omitempty"` +} + +type CustomPluginObservation struct { + + // The Amazon Resource Name (ARN) of the worker configuration. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The revision of the worker configuration. + Revision *float64 `json:"revision,omitempty" tf:"revision,omitempty"` +} + +type CustomPluginParameters struct { + + // The Amazon Resource Name (ARN) of the worker configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kafkaconnect/v1beta2.CustomPlugin + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a CustomPlugin in kafkaconnect to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a CustomPlugin in kafkaconnect to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // The revision of the worker configuration. + // +kubebuilder:validation:Optional + Revision *float64 `json:"revision" tf:"revision,omitempty"` +} + +type FirehoseInitParameters struct { + + // The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",true) + DeliveryStream *string `json:"deliveryStream,omitempty" tf:"delivery_stream,omitempty"` + + // Reference to a DeliveryStream in firehose to populate deliveryStream. + // +kubebuilder:validation:Optional + DeliveryStreamRef *v1.Reference `json:"deliveryStreamRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate deliveryStream. + // +kubebuilder:validation:Optional + DeliveryStreamSelector *v1.Selector `json:"deliveryStreamSelector,omitempty" tf:"-"` + + // Specifies whether connector logs get sent to the specified Amazon S3 destination. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type FirehoseObservation struct { + + // The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery. + DeliveryStream *string `json:"deliveryStream,omitempty" tf:"delivery_stream,omitempty"` + + // Specifies whether connector logs get sent to the specified Amazon S3 destination. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type FirehoseParameters struct { + + // The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",true) + // +kubebuilder:validation:Optional + DeliveryStream *string `json:"deliveryStream,omitempty" tf:"delivery_stream,omitempty"` + + // Reference to a DeliveryStream in firehose to populate deliveryStream. + // +kubebuilder:validation:Optional + DeliveryStreamRef *v1.Reference `json:"deliveryStreamRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate deliveryStream. + // +kubebuilder:validation:Optional + DeliveryStreamSelector *v1.Selector `json:"deliveryStreamSelector,omitempty" tf:"-"` + + // Specifies whether connector logs get sent to the specified Amazon S3 destination. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + +type KafkaClusterClientAuthenticationInitParameters struct { + + // The type of client authentication used to connect to the Apache Kafka cluster. Valid values: IAM, NONE. A value of NONE means that no client authentication is used. The default value is NONE. + AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` +} + +type KafkaClusterClientAuthenticationObservation struct { + + // The type of client authentication used to connect to the Apache Kafka cluster. Valid values: IAM, NONE. A value of NONE means that no client authentication is used. The default value is NONE. + AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` +} + +type KafkaClusterClientAuthenticationParameters struct { + + // The type of client authentication used to connect to the Apache Kafka cluster. Valid values: IAM, NONE. A value of NONE means that no client authentication is used. The default value is NONE. + // +kubebuilder:validation:Optional + AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` +} + +type KafkaClusterEncryptionInTransitInitParameters struct { + + // The type of encryption in transit to the Apache Kafka cluster. Valid values: PLAINTEXT, TLS. The default values is PLAINTEXT. + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` +} + +type KafkaClusterEncryptionInTransitObservation struct { + + // The type of encryption in transit to the Apache Kafka cluster. Valid values: PLAINTEXT, TLS. The default values is PLAINTEXT. + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` +} + +type KafkaClusterEncryptionInTransitParameters struct { + + // The type of encryption in transit to the Apache Kafka cluster. Valid values: PLAINTEXT, TLS. The default values is PLAINTEXT. + // +kubebuilder:validation:Optional + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` +} + +type KafkaClusterInitParameters struct { + + // The Apache Kafka cluster to which the connector is connected. + ApacheKafkaCluster *ApacheKafkaClusterInitParameters `json:"apacheKafkaCluster,omitempty" tf:"apache_kafka_cluster,omitempty"` +} + +type KafkaClusterObservation struct { + + // The Apache Kafka cluster to which the connector is connected. + ApacheKafkaCluster *ApacheKafkaClusterObservation `json:"apacheKafkaCluster,omitempty" tf:"apache_kafka_cluster,omitempty"` +} + +type KafkaClusterParameters struct { + + // The Apache Kafka cluster to which the connector is connected. + // +kubebuilder:validation:Optional + ApacheKafkaCluster *ApacheKafkaClusterParameters `json:"apacheKafkaCluster" tf:"apache_kafka_cluster,omitempty"` +} + +type LogDeliveryInitParameters struct { + + // The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See below. + WorkerLogDelivery *WorkerLogDeliveryInitParameters `json:"workerLogDelivery,omitempty" tf:"worker_log_delivery,omitempty"` +} + +type LogDeliveryObservation struct { + + // The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See below. + WorkerLogDelivery *WorkerLogDeliveryObservation `json:"workerLogDelivery,omitempty" tf:"worker_log_delivery,omitempty"` +} + +type LogDeliveryParameters struct { + + // The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See below. + // +kubebuilder:validation:Optional + WorkerLogDelivery *WorkerLogDeliveryParameters `json:"workerLogDelivery" tf:"worker_log_delivery,omitempty"` +} + +type PluginInitParameters struct { + + // Details about a custom plugin. See below. + CustomPlugin *CustomPluginInitParameters `json:"customPlugin,omitempty" tf:"custom_plugin,omitempty"` +} + +type PluginObservation struct { + + // Details about a custom plugin. See below. + CustomPlugin *CustomPluginObservation `json:"customPlugin,omitempty" tf:"custom_plugin,omitempty"` +} + +type PluginParameters struct { + + // Details about a custom plugin. See below. + // +kubebuilder:validation:Optional + CustomPlugin *CustomPluginParameters `json:"customPlugin" tf:"custom_plugin,omitempty"` +} + +type ProvisionedCapacityInitParameters struct { + + // The number of microcontroller units (MCUs) allocated to each connector worker. Valid values: 1, 2, 4, 8. The default value is 1. + McuCount *float64 `json:"mcuCount,omitempty" tf:"mcu_count,omitempty"` + + // The number of workers that are allocated to the connector. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type ProvisionedCapacityObservation struct { + + // The number of microcontroller units (MCUs) allocated to each connector worker. Valid values: 1, 2, 4, 8. The default value is 1. + McuCount *float64 `json:"mcuCount,omitempty" tf:"mcu_count,omitempty"` + + // The number of workers that are allocated to the connector. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type ProvisionedCapacityParameters struct { + + // The number of microcontroller units (MCUs) allocated to each connector worker. Valid values: 1, 2, 4, 8. The default value is 1. + // +kubebuilder:validation:Optional + McuCount *float64 `json:"mcuCount,omitempty" tf:"mcu_count,omitempty"` + + // The number of workers that are allocated to the connector. + // +kubebuilder:validation:Optional + WorkerCount *float64 `json:"workerCount" tf:"worker_count,omitempty"` +} + +type S3InitParameters struct { + + // The name of the S3 bucket that is the destination for log delivery. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Specifies whether connector logs get sent to the specified Amazon S3 destination. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The S3 prefix that is the destination for log delivery. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type S3Observation struct { + + // The name of the S3 bucket that is the destination for log delivery. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Specifies whether connector logs get sent to the specified Amazon S3 destination. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The S3 prefix that is the destination for log delivery. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type S3Parameters struct { + + // The name of the S3 bucket that is the destination for log delivery. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Specifies whether connector logs get sent to the specified Amazon S3 destination. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // The S3 prefix that is the destination for log delivery. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type ScaleInPolicyInitParameters struct { + + // The CPU utilization percentage threshold at which you want connector scale out to be triggered. + CPUUtilizationPercentage *float64 `json:"cpuUtilizationPercentage,omitempty" tf:"cpu_utilization_percentage,omitempty"` +} + +type ScaleInPolicyObservation struct { + + // The CPU utilization percentage threshold at which you want connector scale out to be triggered. + CPUUtilizationPercentage *float64 `json:"cpuUtilizationPercentage,omitempty" tf:"cpu_utilization_percentage,omitempty"` +} + +type ScaleInPolicyParameters struct { + + // The CPU utilization percentage threshold at which you want connector scale out to be triggered. + // +kubebuilder:validation:Optional + CPUUtilizationPercentage *float64 `json:"cpuUtilizationPercentage,omitempty" tf:"cpu_utilization_percentage,omitempty"` +} + +type ScaleOutPolicyInitParameters struct { + + // The CPU utilization percentage threshold at which you want connector scale out to be triggered. + CPUUtilizationPercentage *float64 `json:"cpuUtilizationPercentage,omitempty" tf:"cpu_utilization_percentage,omitempty"` +} + +type ScaleOutPolicyObservation struct { + + // The CPU utilization percentage threshold at which you want connector scale out to be triggered. + CPUUtilizationPercentage *float64 `json:"cpuUtilizationPercentage,omitempty" tf:"cpu_utilization_percentage,omitempty"` +} + +type ScaleOutPolicyParameters struct { + + // The CPU utilization percentage threshold at which you want connector scale out to be triggered. + // +kubebuilder:validation:Optional + CPUUtilizationPercentage *float64 `json:"cpuUtilizationPercentage,omitempty" tf:"cpu_utilization_percentage,omitempty"` +} + +type VPCInitParameters struct { + + // References to SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupRefs []v1.Reference `json:"securityGroupRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupSelector *v1.Selector `json:"securityGroupSelector,omitempty" tf:"-"` + + // The security groups for the connector. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupSelector + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // References to Subnet in ec2 to populate subnets. + // +kubebuilder:validation:Optional + SubnetRefs []v1.Reference `json:"subnetRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnets. + // +kubebuilder:validation:Optional + SubnetSelector *v1.Selector `json:"subnetSelector,omitempty" tf:"-"` + + // The subnets for the connector. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetRefs + // +crossplane:generate:reference:selectorFieldName=SubnetSelector + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` +} + +type VPCObservation struct { + + // The security groups for the connector. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The subnets for the connector. + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` +} + +type VPCParameters struct { + + // References to SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupRefs []v1.Reference `json:"securityGroupRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupSelector *v1.Selector `json:"securityGroupSelector,omitempty" tf:"-"` + + // The security groups for the connector. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupSelector + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // References to Subnet in ec2 to populate subnets. + // +kubebuilder:validation:Optional + SubnetRefs []v1.Reference `json:"subnetRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnets. + // +kubebuilder:validation:Optional + SubnetSelector *v1.Selector `json:"subnetSelector,omitempty" tf:"-"` + + // The subnets for the connector. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetRefs + // +crossplane:generate:reference:selectorFieldName=SubnetSelector + // +kubebuilder:validation:Optional + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` +} + +type WorkerConfigurationInitParameters struct { + + // The Amazon Resource Name (ARN) of the worker configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kafkaconnect/v1beta1.WorkerConfiguration + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a WorkerConfiguration in kafkaconnect to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a WorkerConfiguration in kafkaconnect to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // The revision of the worker configuration. + Revision *float64 `json:"revision,omitempty" tf:"revision,omitempty"` +} + +type WorkerConfigurationObservation struct { + + // The Amazon Resource Name (ARN) of the worker configuration. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The revision of the worker configuration. + Revision *float64 `json:"revision,omitempty" tf:"revision,omitempty"` +} + +type WorkerConfigurationParameters struct { + + // The Amazon Resource Name (ARN) of the worker configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kafkaconnect/v1beta1.WorkerConfiguration + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a WorkerConfiguration in kafkaconnect to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a WorkerConfiguration in kafkaconnect to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // The revision of the worker configuration. + // +kubebuilder:validation:Optional + Revision *float64 `json:"revision" tf:"revision,omitempty"` +} + +type WorkerLogDeliveryInitParameters struct { + + // Details about delivering logs to Amazon CloudWatch Logs. See below. + CloudwatchLogs *CloudwatchLogsInitParameters `json:"cloudwatchLogs,omitempty" tf:"cloudwatch_logs,omitempty"` + + // Details about delivering logs to Amazon Kinesis Data Firehose. See below. + Firehose *FirehoseInitParameters `json:"firehose,omitempty" tf:"firehose,omitempty"` + + // Details about delivering logs to Amazon S3. See below. + S3 *S3InitParameters `json:"s3,omitempty" tf:"s3,omitempty"` +} + +type WorkerLogDeliveryObservation struct { + + // Details about delivering logs to Amazon CloudWatch Logs. See below. + CloudwatchLogs *CloudwatchLogsObservation `json:"cloudwatchLogs,omitempty" tf:"cloudwatch_logs,omitempty"` + + // Details about delivering logs to Amazon Kinesis Data Firehose. See below. + Firehose *FirehoseObservation `json:"firehose,omitempty" tf:"firehose,omitempty"` + + // Details about delivering logs to Amazon S3. See below. + S3 *S3Observation `json:"s3,omitempty" tf:"s3,omitempty"` +} + +type WorkerLogDeliveryParameters struct { + + // Details about delivering logs to Amazon CloudWatch Logs. See below. + // +kubebuilder:validation:Optional + CloudwatchLogs *CloudwatchLogsParameters `json:"cloudwatchLogs,omitempty" tf:"cloudwatch_logs,omitempty"` + + // Details about delivering logs to Amazon Kinesis Data Firehose. See below. + // +kubebuilder:validation:Optional + Firehose *FirehoseParameters `json:"firehose,omitempty" tf:"firehose,omitempty"` + + // Details about delivering logs to Amazon S3. See below. + // +kubebuilder:validation:Optional + S3 *S3Parameters `json:"s3,omitempty" tf:"s3,omitempty"` +} + +// ConnectorSpec defines the desired state of Connector +type ConnectorSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ConnectorParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ConnectorInitParameters `json:"initProvider,omitempty"` +} + +// ConnectorStatus defines the observed state of Connector. +type ConnectorStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ConnectorObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Connector is the Schema for the Connectors API. Provides an Amazon MSK Connect Connector resource. Changes to any parameter besides "scaling" will be rejected. Instead you must create a new resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Connector struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.capacity) || (has(self.initProvider) && has(self.initProvider.capacity))",message="spec.forProvider.capacity is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.connectorConfiguration) || (has(self.initProvider) && has(self.initProvider.connectorConfiguration))",message="spec.forProvider.connectorConfiguration is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.kafkaCluster) || (has(self.initProvider) && has(self.initProvider.kafkaCluster))",message="spec.forProvider.kafkaCluster is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.kafkaClusterClientAuthentication) || (has(self.initProvider) && has(self.initProvider.kafkaClusterClientAuthentication))",message="spec.forProvider.kafkaClusterClientAuthentication is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.kafkaClusterEncryptionInTransit) || (has(self.initProvider) && has(self.initProvider.kafkaClusterEncryptionInTransit))",message="spec.forProvider.kafkaClusterEncryptionInTransit is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.kafkaconnectVersion) || (has(self.initProvider) && has(self.initProvider.kafkaconnectVersion))",message="spec.forProvider.kafkaconnectVersion is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.plugin) || (has(self.initProvider) && has(self.initProvider.plugin))",message="spec.forProvider.plugin is a required parameter" + Spec ConnectorSpec `json:"spec"` + Status ConnectorStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConnectorList contains a list of Connectors +type ConnectorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Connector `json:"items"` +} + +// Repository type metadata. +var ( + Connector_Kind = "Connector" + Connector_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Connector_Kind}.String() + Connector_KindAPIVersion = Connector_Kind + "." + CRDGroupVersion.String() + Connector_GroupVersionKind = CRDGroupVersion.WithKind(Connector_Kind) +) + +func init() { + SchemeBuilder.Register(&Connector{}, &ConnectorList{}) +} diff --git a/apis/kafkaconnect/v1beta2/zz_customplugin_terraformed.go b/apis/kafkaconnect/v1beta2/zz_customplugin_terraformed.go new file mode 100755 index 0000000000..975ed32126 --- /dev/null +++ b/apis/kafkaconnect/v1beta2/zz_customplugin_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CustomPlugin +func (mg *CustomPlugin) GetTerraformResourceType() string { + return "aws_mskconnect_custom_plugin" +} + +// GetConnectionDetailsMapping for this CustomPlugin +func (tr *CustomPlugin) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CustomPlugin +func (tr *CustomPlugin) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CustomPlugin +func (tr *CustomPlugin) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CustomPlugin +func (tr *CustomPlugin) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CustomPlugin +func (tr *CustomPlugin) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CustomPlugin +func (tr *CustomPlugin) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CustomPlugin +func (tr *CustomPlugin) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CustomPlugin +func (tr *CustomPlugin) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CustomPlugin using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CustomPlugin) LateInitialize(attrs []byte) (bool, error) { + params := &CustomPluginParameters_2{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CustomPlugin) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kafkaconnect/v1beta2/zz_customplugin_types.go b/apis/kafkaconnect/v1beta2/zz_customplugin_types.go new file mode 100755 index 0000000000..ec05ed4116 --- /dev/null +++ b/apis/kafkaconnect/v1beta2/zz_customplugin_types.go @@ -0,0 +1,236 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomPluginInitParameters_2 struct { + + // The type of the plugin file. Allowed values are ZIP and JAR. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // A summary description of the custom plugin. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Information about the location of a custom plugin. See below. + Location *LocationInitParameters `json:"location,omitempty" tf:"location,omitempty"` +} + +type CustomPluginObservation_2 struct { + + // the Amazon Resource Name (ARN) of the custom plugin. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The type of the plugin file. Allowed values are ZIP and JAR. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // A summary description of the custom plugin. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // an ID of the latest successfully created revision of the custom plugin. + LatestRevision *float64 `json:"latestRevision,omitempty" tf:"latest_revision,omitempty"` + + // Information about the location of a custom plugin. See below. + Location *LocationObservation `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the custom plugin.. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // the state of the custom plugin. + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type CustomPluginParameters_2 struct { + + // The type of the plugin file. Allowed values are ZIP and JAR. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // A summary description of the custom plugin. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Information about the location of a custom plugin. See below. + // +kubebuilder:validation:Optional + Location *LocationParameters `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the custom plugin.. + // +kubebuilder:validation:Required + Name *string `json:"name" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type LocationInitParameters struct { + + // Information of the plugin file stored in Amazon S3. See below. + S3 *LocationS3InitParameters `json:"s3,omitempty" tf:"s3,omitempty"` +} + +type LocationObservation struct { + + // Information of the plugin file stored in Amazon S3. See below. + S3 *LocationS3Observation `json:"s3,omitempty" tf:"s3,omitempty"` +} + +type LocationParameters struct { + + // Information of the plugin file stored in Amazon S3. See below. + // +kubebuilder:validation:Optional + S3 *LocationS3Parameters `json:"s3" tf:"s3,omitempty"` +} + +type LocationS3InitParameters struct { + + // The Amazon Resource Name (ARN) of an S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // The file key for an object in an S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Object + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("key",false) + FileKey *string `json:"fileKey,omitempty" tf:"file_key,omitempty"` + + // Reference to a Object in s3 to populate fileKey. + // +kubebuilder:validation:Optional + FileKeyRef *v1.Reference `json:"fileKeyRef,omitempty" tf:"-"` + + // Selector for a Object in s3 to populate fileKey. + // +kubebuilder:validation:Optional + FileKeySelector *v1.Selector `json:"fileKeySelector,omitempty" tf:"-"` + + // The version of an object in an S3 bucket. + ObjectVersion *string `json:"objectVersion,omitempty" tf:"object_version,omitempty"` +} + +type LocationS3Observation struct { + + // The Amazon Resource Name (ARN) of an S3 bucket. + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // The file key for an object in an S3 bucket. + FileKey *string `json:"fileKey,omitempty" tf:"file_key,omitempty"` + + // The version of an object in an S3 bucket. + ObjectVersion *string `json:"objectVersion,omitempty" tf:"object_version,omitempty"` +} + +type LocationS3Parameters struct { + + // The Amazon Resource Name (ARN) of an S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // The file key for an object in an S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Object + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("key",false) + // +kubebuilder:validation:Optional + FileKey *string `json:"fileKey,omitempty" tf:"file_key,omitempty"` + + // Reference to a Object in s3 to populate fileKey. + // +kubebuilder:validation:Optional + FileKeyRef *v1.Reference `json:"fileKeyRef,omitempty" tf:"-"` + + // Selector for a Object in s3 to populate fileKey. + // +kubebuilder:validation:Optional + FileKeySelector *v1.Selector `json:"fileKeySelector,omitempty" tf:"-"` + + // The version of an object in an S3 bucket. + // +kubebuilder:validation:Optional + ObjectVersion *string `json:"objectVersion,omitempty" tf:"object_version,omitempty"` +} + +// CustomPluginSpec defines the desired state of CustomPlugin +type CustomPluginSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CustomPluginParameters_2 `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CustomPluginInitParameters_2 `json:"initProvider,omitempty"` +} + +// CustomPluginStatus defines the observed state of CustomPlugin. +type CustomPluginStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CustomPluginObservation_2 `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CustomPlugin is the Schema for the CustomPlugins API. Provides an Amazon MSK Connect custom plugin resource. This resource can be Created, Observed and Deleted, but not Updated. AWS does not currently provide update APIs. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type CustomPlugin struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.contentType) || (has(self.initProvider) && has(self.initProvider.contentType))",message="spec.forProvider.contentType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec CustomPluginSpec `json:"spec"` + Status CustomPluginStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CustomPluginList contains a list of CustomPlugins +type CustomPluginList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CustomPlugin `json:"items"` +} + +// Repository type metadata. +var ( + CustomPlugin_Kind = "CustomPlugin" + CustomPlugin_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CustomPlugin_Kind}.String() + CustomPlugin_KindAPIVersion = CustomPlugin_Kind + "." + CRDGroupVersion.String() + CustomPlugin_GroupVersionKind = CRDGroupVersion.WithKind(CustomPlugin_Kind) +) + +func init() { + SchemeBuilder.Register(&CustomPlugin{}, &CustomPluginList{}) +} diff --git a/apis/kafkaconnect/v1beta2/zz_generated.conversion_hubs.go b/apis/kafkaconnect/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..ab9289462b --- /dev/null +++ b/apis/kafkaconnect/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Connector) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *CustomPlugin) Hub() {} diff --git a/apis/kafkaconnect/v1beta2/zz_generated.deepcopy.go b/apis/kafkaconnect/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..93f6793c58 --- /dev/null +++ b/apis/kafkaconnect/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2320 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApacheKafkaClusterInitParameters) DeepCopyInto(out *ApacheKafkaClusterInitParameters) { + *out = *in + if in.BootstrapServers != nil { + in, out := &in.BootstrapServers, &out.BootstrapServers + *out = new(string) + **out = **in + } + if in.VPC != nil { + in, out := &in.VPC, &out.VPC + *out = new(VPCInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApacheKafkaClusterInitParameters. +func (in *ApacheKafkaClusterInitParameters) DeepCopy() *ApacheKafkaClusterInitParameters { + if in == nil { + return nil + } + out := new(ApacheKafkaClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApacheKafkaClusterObservation) DeepCopyInto(out *ApacheKafkaClusterObservation) { + *out = *in + if in.BootstrapServers != nil { + in, out := &in.BootstrapServers, &out.BootstrapServers + *out = new(string) + **out = **in + } + if in.VPC != nil { + in, out := &in.VPC, &out.VPC + *out = new(VPCObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApacheKafkaClusterObservation. +func (in *ApacheKafkaClusterObservation) DeepCopy() *ApacheKafkaClusterObservation { + if in == nil { + return nil + } + out := new(ApacheKafkaClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApacheKafkaClusterParameters) DeepCopyInto(out *ApacheKafkaClusterParameters) { + *out = *in + if in.BootstrapServers != nil { + in, out := &in.BootstrapServers, &out.BootstrapServers + *out = new(string) + **out = **in + } + if in.VPC != nil { + in, out := &in.VPC, &out.VPC + *out = new(VPCParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApacheKafkaClusterParameters. +func (in *ApacheKafkaClusterParameters) DeepCopy() *ApacheKafkaClusterParameters { + if in == nil { + return nil + } + out := new(ApacheKafkaClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingInitParameters) DeepCopyInto(out *AutoscalingInitParameters) { + *out = *in + if in.MaxWorkerCount != nil { + in, out := &in.MaxWorkerCount, &out.MaxWorkerCount + *out = new(float64) + **out = **in + } + if in.McuCount != nil { + in, out := &in.McuCount, &out.McuCount + *out = new(float64) + **out = **in + } + if in.MinWorkerCount != nil { + in, out := &in.MinWorkerCount, &out.MinWorkerCount + *out = new(float64) + **out = **in + } + if in.ScaleInPolicy != nil { + in, out := &in.ScaleInPolicy, &out.ScaleInPolicy + *out = new(ScaleInPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ScaleOutPolicy != nil { + in, out := &in.ScaleOutPolicy, &out.ScaleOutPolicy + *out = new(ScaleOutPolicyInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingInitParameters. +func (in *AutoscalingInitParameters) DeepCopy() *AutoscalingInitParameters { + if in == nil { + return nil + } + out := new(AutoscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingObservation) DeepCopyInto(out *AutoscalingObservation) { + *out = *in + if in.MaxWorkerCount != nil { + in, out := &in.MaxWorkerCount, &out.MaxWorkerCount + *out = new(float64) + **out = **in + } + if in.McuCount != nil { + in, out := &in.McuCount, &out.McuCount + *out = new(float64) + **out = **in + } + if in.MinWorkerCount != nil { + in, out := &in.MinWorkerCount, &out.MinWorkerCount + *out = new(float64) + **out = **in + } + if in.ScaleInPolicy != nil { + in, out := &in.ScaleInPolicy, &out.ScaleInPolicy + *out = new(ScaleInPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.ScaleOutPolicy != nil { + in, out := &in.ScaleOutPolicy, &out.ScaleOutPolicy + *out = new(ScaleOutPolicyObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingObservation. +func (in *AutoscalingObservation) DeepCopy() *AutoscalingObservation { + if in == nil { + return nil + } + out := new(AutoscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingParameters) DeepCopyInto(out *AutoscalingParameters) { + *out = *in + if in.MaxWorkerCount != nil { + in, out := &in.MaxWorkerCount, &out.MaxWorkerCount + *out = new(float64) + **out = **in + } + if in.McuCount != nil { + in, out := &in.McuCount, &out.McuCount + *out = new(float64) + **out = **in + } + if in.MinWorkerCount != nil { + in, out := &in.MinWorkerCount, &out.MinWorkerCount + *out = new(float64) + **out = **in + } + if in.ScaleInPolicy != nil { + in, out := &in.ScaleInPolicy, &out.ScaleInPolicy + *out = new(ScaleInPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.ScaleOutPolicy != nil { + in, out := &in.ScaleOutPolicy, &out.ScaleOutPolicy + *out = new(ScaleOutPolicyParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingParameters. +func (in *AutoscalingParameters) DeepCopy() *AutoscalingParameters { + if in == nil { + return nil + } + out := new(AutoscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityInitParameters) DeepCopyInto(out *CapacityInitParameters) { + *out = *in + if in.Autoscaling != nil { + in, out := &in.Autoscaling, &out.Autoscaling + *out = new(AutoscalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ProvisionedCapacity != nil { + in, out := &in.ProvisionedCapacity, &out.ProvisionedCapacity + *out = new(ProvisionedCapacityInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityInitParameters. +func (in *CapacityInitParameters) DeepCopy() *CapacityInitParameters { + if in == nil { + return nil + } + out := new(CapacityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityObservation) DeepCopyInto(out *CapacityObservation) { + *out = *in + if in.Autoscaling != nil { + in, out := &in.Autoscaling, &out.Autoscaling + *out = new(AutoscalingObservation) + (*in).DeepCopyInto(*out) + } + if in.ProvisionedCapacity != nil { + in, out := &in.ProvisionedCapacity, &out.ProvisionedCapacity + *out = new(ProvisionedCapacityObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityObservation. +func (in *CapacityObservation) DeepCopy() *CapacityObservation { + if in == nil { + return nil + } + out := new(CapacityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityParameters) DeepCopyInto(out *CapacityParameters) { + *out = *in + if in.Autoscaling != nil { + in, out := &in.Autoscaling, &out.Autoscaling + *out = new(AutoscalingParameters) + (*in).DeepCopyInto(*out) + } + if in.ProvisionedCapacity != nil { + in, out := &in.ProvisionedCapacity, &out.ProvisionedCapacity + *out = new(ProvisionedCapacityParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityParameters. +func (in *CapacityParameters) DeepCopy() *CapacityParameters { + if in == nil { + return nil + } + out := new(CapacityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogsInitParameters) DeepCopyInto(out *CloudwatchLogsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = new(string) + **out = **in + } + if in.LogGroupRef != nil { + in, out := &in.LogGroupRef, &out.LogGroupRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogGroupSelector != nil { + in, out := &in.LogGroupSelector, &out.LogGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogsInitParameters. +func (in *CloudwatchLogsInitParameters) DeepCopy() *CloudwatchLogsInitParameters { + if in == nil { + return nil + } + out := new(CloudwatchLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogsObservation) DeepCopyInto(out *CloudwatchLogsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogsObservation. +func (in *CloudwatchLogsObservation) DeepCopy() *CloudwatchLogsObservation { + if in == nil { + return nil + } + out := new(CloudwatchLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLogsParameters) DeepCopyInto(out *CloudwatchLogsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = new(string) + **out = **in + } + if in.LogGroupRef != nil { + in, out := &in.LogGroupRef, &out.LogGroupRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogGroupSelector != nil { + in, out := &in.LogGroupSelector, &out.LogGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLogsParameters. +func (in *CloudwatchLogsParameters) DeepCopy() *CloudwatchLogsParameters { + if in == nil { + return nil + } + out := new(CloudwatchLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Connector) DeepCopyInto(out *Connector) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connector. +func (in *Connector) DeepCopy() *Connector { + if in == nil { + return nil + } + out := new(Connector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Connector) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorInitParameters) DeepCopyInto(out *ConnectorInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(CapacityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConnectorConfiguration != nil { + in, out := &in.ConnectorConfiguration, &out.ConnectorConfiguration + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.KafkaCluster != nil { + in, out := &in.KafkaCluster, &out.KafkaCluster + *out = new(KafkaClusterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KafkaClusterClientAuthentication != nil { + in, out := &in.KafkaClusterClientAuthentication, &out.KafkaClusterClientAuthentication + *out = new(KafkaClusterClientAuthenticationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KafkaClusterEncryptionInTransit != nil { + in, out := &in.KafkaClusterEncryptionInTransit, &out.KafkaClusterEncryptionInTransit + *out = new(KafkaClusterEncryptionInTransitInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KafkaconnectVersion != nil { + in, out := &in.KafkaconnectVersion, &out.KafkaconnectVersion + *out = new(string) + **out = **in + } + if in.LogDelivery != nil { + in, out := &in.LogDelivery, &out.LogDelivery + *out = new(LogDeliveryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Plugin != nil { + in, out := &in.Plugin, &out.Plugin + *out = make([]PluginInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceExecutionRoleArn != nil { + in, out := &in.ServiceExecutionRoleArn, &out.ServiceExecutionRoleArn + *out = new(string) + **out = **in + } + if in.ServiceExecutionRoleArnRef != nil { + in, out := &in.ServiceExecutionRoleArnRef, &out.ServiceExecutionRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceExecutionRoleArnSelector != nil { + in, out := &in.ServiceExecutionRoleArnSelector, &out.ServiceExecutionRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WorkerConfiguration != nil { + in, out := &in.WorkerConfiguration, &out.WorkerConfiguration + *out = new(WorkerConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorInitParameters. +func (in *ConnectorInitParameters) DeepCopy() *ConnectorInitParameters { + if in == nil { + return nil + } + out := new(ConnectorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorList) DeepCopyInto(out *ConnectorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Connector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorList. +func (in *ConnectorList) DeepCopy() *ConnectorList { + if in == nil { + return nil + } + out := new(ConnectorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConnectorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorObservation) DeepCopyInto(out *ConnectorObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(CapacityObservation) + (*in).DeepCopyInto(*out) + } + if in.ConnectorConfiguration != nil { + in, out := &in.ConnectorConfiguration, &out.ConnectorConfiguration + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KafkaCluster != nil { + in, out := &in.KafkaCluster, &out.KafkaCluster + *out = new(KafkaClusterObservation) + (*in).DeepCopyInto(*out) + } + if in.KafkaClusterClientAuthentication != nil { + in, out := &in.KafkaClusterClientAuthentication, &out.KafkaClusterClientAuthentication + *out = new(KafkaClusterClientAuthenticationObservation) + (*in).DeepCopyInto(*out) + } + if in.KafkaClusterEncryptionInTransit != nil { + in, out := &in.KafkaClusterEncryptionInTransit, &out.KafkaClusterEncryptionInTransit + *out = new(KafkaClusterEncryptionInTransitObservation) + (*in).DeepCopyInto(*out) + } + if in.KafkaconnectVersion != nil { + in, out := &in.KafkaconnectVersion, &out.KafkaconnectVersion + *out = new(string) + **out = **in + } + if in.LogDelivery != nil { + in, out := &in.LogDelivery, &out.LogDelivery + *out = new(LogDeliveryObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Plugin != nil { + in, out := &in.Plugin, &out.Plugin + *out = make([]PluginObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceExecutionRoleArn != nil { + in, out := &in.ServiceExecutionRoleArn, &out.ServiceExecutionRoleArn + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.WorkerConfiguration != nil { + in, out := &in.WorkerConfiguration, &out.WorkerConfiguration + *out = new(WorkerConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorObservation. +func (in *ConnectorObservation) DeepCopy() *ConnectorObservation { + if in == nil { + return nil + } + out := new(ConnectorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorParameters) DeepCopyInto(out *ConnectorParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(CapacityParameters) + (*in).DeepCopyInto(*out) + } + if in.ConnectorConfiguration != nil { + in, out := &in.ConnectorConfiguration, &out.ConnectorConfiguration + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.KafkaCluster != nil { + in, out := &in.KafkaCluster, &out.KafkaCluster + *out = new(KafkaClusterParameters) + (*in).DeepCopyInto(*out) + } + if in.KafkaClusterClientAuthentication != nil { + in, out := &in.KafkaClusterClientAuthentication, &out.KafkaClusterClientAuthentication + *out = new(KafkaClusterClientAuthenticationParameters) + (*in).DeepCopyInto(*out) + } + if in.KafkaClusterEncryptionInTransit != nil { + in, out := &in.KafkaClusterEncryptionInTransit, &out.KafkaClusterEncryptionInTransit + *out = new(KafkaClusterEncryptionInTransitParameters) + (*in).DeepCopyInto(*out) + } + if in.KafkaconnectVersion != nil { + in, out := &in.KafkaconnectVersion, &out.KafkaconnectVersion + *out = new(string) + **out = **in + } + if in.LogDelivery != nil { + in, out := &in.LogDelivery, &out.LogDelivery + *out = new(LogDeliveryParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Plugin != nil { + in, out := &in.Plugin, &out.Plugin + *out = make([]PluginParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ServiceExecutionRoleArn != nil { + in, out := &in.ServiceExecutionRoleArn, &out.ServiceExecutionRoleArn + *out = new(string) + **out = **in + } + if in.ServiceExecutionRoleArnRef != nil { + in, out := &in.ServiceExecutionRoleArnRef, &out.ServiceExecutionRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceExecutionRoleArnSelector != nil { + in, out := &in.ServiceExecutionRoleArnSelector, &out.ServiceExecutionRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WorkerConfiguration != nil { + in, out := &in.WorkerConfiguration, &out.WorkerConfiguration + *out = new(WorkerConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorParameters. +func (in *ConnectorParameters) DeepCopy() *ConnectorParameters { + if in == nil { + return nil + } + out := new(ConnectorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorSpec) DeepCopyInto(out *ConnectorSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorSpec. +func (in *ConnectorSpec) DeepCopy() *ConnectorSpec { + if in == nil { + return nil + } + out := new(ConnectorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorStatus) DeepCopyInto(out *ConnectorStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorStatus. +func (in *ConnectorStatus) DeepCopy() *ConnectorStatus { + if in == nil { + return nil + } + out := new(ConnectorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPlugin) DeepCopyInto(out *CustomPlugin) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPlugin. +func (in *CustomPlugin) DeepCopy() *CustomPlugin { + if in == nil { + return nil + } + out := new(CustomPlugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomPlugin) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPluginInitParameters) DeepCopyInto(out *CustomPluginInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPluginInitParameters. +func (in *CustomPluginInitParameters) DeepCopy() *CustomPluginInitParameters { + if in == nil { + return nil + } + out := new(CustomPluginInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPluginInitParameters_2) DeepCopyInto(out *CustomPluginInitParameters_2) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(LocationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPluginInitParameters_2. +func (in *CustomPluginInitParameters_2) DeepCopy() *CustomPluginInitParameters_2 { + if in == nil { + return nil + } + out := new(CustomPluginInitParameters_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPluginList) DeepCopyInto(out *CustomPluginList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomPlugin, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPluginList. +func (in *CustomPluginList) DeepCopy() *CustomPluginList { + if in == nil { + return nil + } + out := new(CustomPluginList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomPluginList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPluginObservation) DeepCopyInto(out *CustomPluginObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPluginObservation. +func (in *CustomPluginObservation) DeepCopy() *CustomPluginObservation { + if in == nil { + return nil + } + out := new(CustomPluginObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPluginObservation_2) DeepCopyInto(out *CustomPluginObservation_2) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LatestRevision != nil { + in, out := &in.LatestRevision, &out.LatestRevision + *out = new(float64) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(LocationObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPluginObservation_2. +func (in *CustomPluginObservation_2) DeepCopy() *CustomPluginObservation_2 { + if in == nil { + return nil + } + out := new(CustomPluginObservation_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPluginParameters) DeepCopyInto(out *CustomPluginParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPluginParameters. +func (in *CustomPluginParameters) DeepCopy() *CustomPluginParameters { + if in == nil { + return nil + } + out := new(CustomPluginParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPluginParameters_2) DeepCopyInto(out *CustomPluginParameters_2) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(LocationParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPluginParameters_2. +func (in *CustomPluginParameters_2) DeepCopy() *CustomPluginParameters_2 { + if in == nil { + return nil + } + out := new(CustomPluginParameters_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPluginSpec) DeepCopyInto(out *CustomPluginSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPluginSpec. +func (in *CustomPluginSpec) DeepCopy() *CustomPluginSpec { + if in == nil { + return nil + } + out := new(CustomPluginSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPluginStatus) DeepCopyInto(out *CustomPluginStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPluginStatus. +func (in *CustomPluginStatus) DeepCopy() *CustomPluginStatus { + if in == nil { + return nil + } + out := new(CustomPluginStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirehoseInitParameters) DeepCopyInto(out *FirehoseInitParameters) { + *out = *in + if in.DeliveryStream != nil { + in, out := &in.DeliveryStream, &out.DeliveryStream + *out = new(string) + **out = **in + } + if in.DeliveryStreamRef != nil { + in, out := &in.DeliveryStreamRef, &out.DeliveryStreamRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DeliveryStreamSelector != nil { + in, out := &in.DeliveryStreamSelector, &out.DeliveryStreamSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirehoseInitParameters. +func (in *FirehoseInitParameters) DeepCopy() *FirehoseInitParameters { + if in == nil { + return nil + } + out := new(FirehoseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirehoseObservation) DeepCopyInto(out *FirehoseObservation) { + *out = *in + if in.DeliveryStream != nil { + in, out := &in.DeliveryStream, &out.DeliveryStream + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirehoseObservation. +func (in *FirehoseObservation) DeepCopy() *FirehoseObservation { + if in == nil { + return nil + } + out := new(FirehoseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirehoseParameters) DeepCopyInto(out *FirehoseParameters) { + *out = *in + if in.DeliveryStream != nil { + in, out := &in.DeliveryStream, &out.DeliveryStream + *out = new(string) + **out = **in + } + if in.DeliveryStreamRef != nil { + in, out := &in.DeliveryStreamRef, &out.DeliveryStreamRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DeliveryStreamSelector != nil { + in, out := &in.DeliveryStreamSelector, &out.DeliveryStreamSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirehoseParameters. +func (in *FirehoseParameters) DeepCopy() *FirehoseParameters { + if in == nil { + return nil + } + out := new(FirehoseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterClientAuthenticationInitParameters) DeepCopyInto(out *KafkaClusterClientAuthenticationInitParameters) { + *out = *in + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterClientAuthenticationInitParameters. +func (in *KafkaClusterClientAuthenticationInitParameters) DeepCopy() *KafkaClusterClientAuthenticationInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterClientAuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterClientAuthenticationObservation) DeepCopyInto(out *KafkaClusterClientAuthenticationObservation) { + *out = *in + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterClientAuthenticationObservation. +func (in *KafkaClusterClientAuthenticationObservation) DeepCopy() *KafkaClusterClientAuthenticationObservation { + if in == nil { + return nil + } + out := new(KafkaClusterClientAuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterClientAuthenticationParameters) DeepCopyInto(out *KafkaClusterClientAuthenticationParameters) { + *out = *in + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterClientAuthenticationParameters. +func (in *KafkaClusterClientAuthenticationParameters) DeepCopy() *KafkaClusterClientAuthenticationParameters { + if in == nil { + return nil + } + out := new(KafkaClusterClientAuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterEncryptionInTransitInitParameters) DeepCopyInto(out *KafkaClusterEncryptionInTransitInitParameters) { + *out = *in + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterEncryptionInTransitInitParameters. +func (in *KafkaClusterEncryptionInTransitInitParameters) DeepCopy() *KafkaClusterEncryptionInTransitInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterEncryptionInTransitInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterEncryptionInTransitObservation) DeepCopyInto(out *KafkaClusterEncryptionInTransitObservation) { + *out = *in + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterEncryptionInTransitObservation. +func (in *KafkaClusterEncryptionInTransitObservation) DeepCopy() *KafkaClusterEncryptionInTransitObservation { + if in == nil { + return nil + } + out := new(KafkaClusterEncryptionInTransitObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterEncryptionInTransitParameters) DeepCopyInto(out *KafkaClusterEncryptionInTransitParameters) { + *out = *in + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterEncryptionInTransitParameters. +func (in *KafkaClusterEncryptionInTransitParameters) DeepCopy() *KafkaClusterEncryptionInTransitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterEncryptionInTransitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterInitParameters) DeepCopyInto(out *KafkaClusterInitParameters) { + *out = *in + if in.ApacheKafkaCluster != nil { + in, out := &in.ApacheKafkaCluster, &out.ApacheKafkaCluster + *out = new(ApacheKafkaClusterInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterInitParameters. +func (in *KafkaClusterInitParameters) DeepCopy() *KafkaClusterInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterObservation) DeepCopyInto(out *KafkaClusterObservation) { + *out = *in + if in.ApacheKafkaCluster != nil { + in, out := &in.ApacheKafkaCluster, &out.ApacheKafkaCluster + *out = new(ApacheKafkaClusterObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterObservation. +func (in *KafkaClusterObservation) DeepCopy() *KafkaClusterObservation { + if in == nil { + return nil + } + out := new(KafkaClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterParameters) DeepCopyInto(out *KafkaClusterParameters) { + *out = *in + if in.ApacheKafkaCluster != nil { + in, out := &in.ApacheKafkaCluster, &out.ApacheKafkaCluster + *out = new(ApacheKafkaClusterParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterParameters. +func (in *KafkaClusterParameters) DeepCopy() *KafkaClusterParameters { + if in == nil { + return nil + } + out := new(KafkaClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationInitParameters) DeepCopyInto(out *LocationInitParameters) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(LocationS3InitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationInitParameters. +func (in *LocationInitParameters) DeepCopy() *LocationInitParameters { + if in == nil { + return nil + } + out := new(LocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationObservation) DeepCopyInto(out *LocationObservation) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(LocationS3Observation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationObservation. +func (in *LocationObservation) DeepCopy() *LocationObservation { + if in == nil { + return nil + } + out := new(LocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationParameters) DeepCopyInto(out *LocationParameters) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(LocationS3Parameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationParameters. +func (in *LocationParameters) DeepCopy() *LocationParameters { + if in == nil { + return nil + } + out := new(LocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationS3InitParameters) DeepCopyInto(out *LocationS3InitParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FileKey != nil { + in, out := &in.FileKey, &out.FileKey + *out = new(string) + **out = **in + } + if in.FileKeyRef != nil { + in, out := &in.FileKeyRef, &out.FileKeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FileKeySelector != nil { + in, out := &in.FileKeySelector, &out.FileKeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ObjectVersion != nil { + in, out := &in.ObjectVersion, &out.ObjectVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationS3InitParameters. +func (in *LocationS3InitParameters) DeepCopy() *LocationS3InitParameters { + if in == nil { + return nil + } + out := new(LocationS3InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationS3Observation) DeepCopyInto(out *LocationS3Observation) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.FileKey != nil { + in, out := &in.FileKey, &out.FileKey + *out = new(string) + **out = **in + } + if in.ObjectVersion != nil { + in, out := &in.ObjectVersion, &out.ObjectVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationS3Observation. +func (in *LocationS3Observation) DeepCopy() *LocationS3Observation { + if in == nil { + return nil + } + out := new(LocationS3Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationS3Parameters) DeepCopyInto(out *LocationS3Parameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FileKey != nil { + in, out := &in.FileKey, &out.FileKey + *out = new(string) + **out = **in + } + if in.FileKeyRef != nil { + in, out := &in.FileKeyRef, &out.FileKeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FileKeySelector != nil { + in, out := &in.FileKeySelector, &out.FileKeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ObjectVersion != nil { + in, out := &in.ObjectVersion, &out.ObjectVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationS3Parameters. +func (in *LocationS3Parameters) DeepCopy() *LocationS3Parameters { + if in == nil { + return nil + } + out := new(LocationS3Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogDeliveryInitParameters) DeepCopyInto(out *LogDeliveryInitParameters) { + *out = *in + if in.WorkerLogDelivery != nil { + in, out := &in.WorkerLogDelivery, &out.WorkerLogDelivery + *out = new(WorkerLogDeliveryInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogDeliveryInitParameters. +func (in *LogDeliveryInitParameters) DeepCopy() *LogDeliveryInitParameters { + if in == nil { + return nil + } + out := new(LogDeliveryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogDeliveryObservation) DeepCopyInto(out *LogDeliveryObservation) { + *out = *in + if in.WorkerLogDelivery != nil { + in, out := &in.WorkerLogDelivery, &out.WorkerLogDelivery + *out = new(WorkerLogDeliveryObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogDeliveryObservation. +func (in *LogDeliveryObservation) DeepCopy() *LogDeliveryObservation { + if in == nil { + return nil + } + out := new(LogDeliveryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogDeliveryParameters) DeepCopyInto(out *LogDeliveryParameters) { + *out = *in + if in.WorkerLogDelivery != nil { + in, out := &in.WorkerLogDelivery, &out.WorkerLogDelivery + *out = new(WorkerLogDeliveryParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogDeliveryParameters. +func (in *LogDeliveryParameters) DeepCopy() *LogDeliveryParameters { + if in == nil { + return nil + } + out := new(LogDeliveryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginInitParameters) DeepCopyInto(out *PluginInitParameters) { + *out = *in + if in.CustomPlugin != nil { + in, out := &in.CustomPlugin, &out.CustomPlugin + *out = new(CustomPluginInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginInitParameters. +func (in *PluginInitParameters) DeepCopy() *PluginInitParameters { + if in == nil { + return nil + } + out := new(PluginInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginObservation) DeepCopyInto(out *PluginObservation) { + *out = *in + if in.CustomPlugin != nil { + in, out := &in.CustomPlugin, &out.CustomPlugin + *out = new(CustomPluginObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginObservation. +func (in *PluginObservation) DeepCopy() *PluginObservation { + if in == nil { + return nil + } + out := new(PluginObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginParameters) DeepCopyInto(out *PluginParameters) { + *out = *in + if in.CustomPlugin != nil { + in, out := &in.CustomPlugin, &out.CustomPlugin + *out = new(CustomPluginParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginParameters. +func (in *PluginParameters) DeepCopy() *PluginParameters { + if in == nil { + return nil + } + out := new(PluginParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisionedCapacityInitParameters) DeepCopyInto(out *ProvisionedCapacityInitParameters) { + *out = *in + if in.McuCount != nil { + in, out := &in.McuCount, &out.McuCount + *out = new(float64) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisionedCapacityInitParameters. +func (in *ProvisionedCapacityInitParameters) DeepCopy() *ProvisionedCapacityInitParameters { + if in == nil { + return nil + } + out := new(ProvisionedCapacityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisionedCapacityObservation) DeepCopyInto(out *ProvisionedCapacityObservation) { + *out = *in + if in.McuCount != nil { + in, out := &in.McuCount, &out.McuCount + *out = new(float64) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisionedCapacityObservation. +func (in *ProvisionedCapacityObservation) DeepCopy() *ProvisionedCapacityObservation { + if in == nil { + return nil + } + out := new(ProvisionedCapacityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisionedCapacityParameters) DeepCopyInto(out *ProvisionedCapacityParameters) { + *out = *in + if in.McuCount != nil { + in, out := &in.McuCount, &out.McuCount + *out = new(float64) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisionedCapacityParameters. +func (in *ProvisionedCapacityParameters) DeepCopy() *ProvisionedCapacityParameters { + if in == nil { + return nil + } + out := new(ProvisionedCapacityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InitParameters) DeepCopyInto(out *S3InitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InitParameters. +func (in *S3InitParameters) DeepCopy() *S3InitParameters { + if in == nil { + return nil + } + out := new(S3InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Observation) DeepCopyInto(out *S3Observation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Observation. +func (in *S3Observation) DeepCopy() *S3Observation { + if in == nil { + return nil + } + out := new(S3Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Parameters) DeepCopyInto(out *S3Parameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Parameters. +func (in *S3Parameters) DeepCopy() *S3Parameters { + if in == nil { + return nil + } + out := new(S3Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleInPolicyInitParameters) DeepCopyInto(out *ScaleInPolicyInitParameters) { + *out = *in + if in.CPUUtilizationPercentage != nil { + in, out := &in.CPUUtilizationPercentage, &out.CPUUtilizationPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleInPolicyInitParameters. +func (in *ScaleInPolicyInitParameters) DeepCopy() *ScaleInPolicyInitParameters { + if in == nil { + return nil + } + out := new(ScaleInPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleInPolicyObservation) DeepCopyInto(out *ScaleInPolicyObservation) { + *out = *in + if in.CPUUtilizationPercentage != nil { + in, out := &in.CPUUtilizationPercentage, &out.CPUUtilizationPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleInPolicyObservation. +func (in *ScaleInPolicyObservation) DeepCopy() *ScaleInPolicyObservation { + if in == nil { + return nil + } + out := new(ScaleInPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleInPolicyParameters) DeepCopyInto(out *ScaleInPolicyParameters) { + *out = *in + if in.CPUUtilizationPercentage != nil { + in, out := &in.CPUUtilizationPercentage, &out.CPUUtilizationPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleInPolicyParameters. +func (in *ScaleInPolicyParameters) DeepCopy() *ScaleInPolicyParameters { + if in == nil { + return nil + } + out := new(ScaleInPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleOutPolicyInitParameters) DeepCopyInto(out *ScaleOutPolicyInitParameters) { + *out = *in + if in.CPUUtilizationPercentage != nil { + in, out := &in.CPUUtilizationPercentage, &out.CPUUtilizationPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleOutPolicyInitParameters. +func (in *ScaleOutPolicyInitParameters) DeepCopy() *ScaleOutPolicyInitParameters { + if in == nil { + return nil + } + out := new(ScaleOutPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleOutPolicyObservation) DeepCopyInto(out *ScaleOutPolicyObservation) { + *out = *in + if in.CPUUtilizationPercentage != nil { + in, out := &in.CPUUtilizationPercentage, &out.CPUUtilizationPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleOutPolicyObservation. +func (in *ScaleOutPolicyObservation) DeepCopy() *ScaleOutPolicyObservation { + if in == nil { + return nil + } + out := new(ScaleOutPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleOutPolicyParameters) DeepCopyInto(out *ScaleOutPolicyParameters) { + *out = *in + if in.CPUUtilizationPercentage != nil { + in, out := &in.CPUUtilizationPercentage, &out.CPUUtilizationPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleOutPolicyParameters. +func (in *ScaleOutPolicyParameters) DeepCopy() *ScaleOutPolicyParameters { + if in == nil { + return nil + } + out := new(ScaleOutPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCInitParameters) DeepCopyInto(out *VPCInitParameters) { + *out = *in + if in.SecurityGroupRefs != nil { + in, out := &in.SecurityGroupRefs, &out.SecurityGroupRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupSelector != nil { + in, out := &in.SecurityGroupSelector, &out.SecurityGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetRefs != nil { + in, out := &in.SubnetRefs, &out.SubnetRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetSelector != nil { + in, out := &in.SubnetSelector, &out.SubnetSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCInitParameters. +func (in *VPCInitParameters) DeepCopy() *VPCInitParameters { + if in == nil { + return nil + } + out := new(VPCInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCObservation) DeepCopyInto(out *VPCObservation) { + *out = *in + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCObservation. +func (in *VPCObservation) DeepCopy() *VPCObservation { + if in == nil { + return nil + } + out := new(VPCObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCParameters) DeepCopyInto(out *VPCParameters) { + *out = *in + if in.SecurityGroupRefs != nil { + in, out := &in.SecurityGroupRefs, &out.SecurityGroupRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupSelector != nil { + in, out := &in.SecurityGroupSelector, &out.SecurityGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetRefs != nil { + in, out := &in.SubnetRefs, &out.SubnetRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetSelector != nil { + in, out := &in.SubnetSelector, &out.SubnetSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCParameters. +func (in *VPCParameters) DeepCopy() *VPCParameters { + if in == nil { + return nil + } + out := new(VPCParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerConfigurationInitParameters) DeepCopyInto(out *WorkerConfigurationInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerConfigurationInitParameters. +func (in *WorkerConfigurationInitParameters) DeepCopy() *WorkerConfigurationInitParameters { + if in == nil { + return nil + } + out := new(WorkerConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerConfigurationObservation) DeepCopyInto(out *WorkerConfigurationObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerConfigurationObservation. +func (in *WorkerConfigurationObservation) DeepCopy() *WorkerConfigurationObservation { + if in == nil { + return nil + } + out := new(WorkerConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerConfigurationParameters) DeepCopyInto(out *WorkerConfigurationParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerConfigurationParameters. +func (in *WorkerConfigurationParameters) DeepCopy() *WorkerConfigurationParameters { + if in == nil { + return nil + } + out := new(WorkerConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerLogDeliveryInitParameters) DeepCopyInto(out *WorkerLogDeliveryInitParameters) { + *out = *in + if in.CloudwatchLogs != nil { + in, out := &in.CloudwatchLogs, &out.CloudwatchLogs + *out = new(CloudwatchLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Firehose != nil { + in, out := &in.Firehose, &out.Firehose + *out = new(FirehoseInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3InitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerLogDeliveryInitParameters. +func (in *WorkerLogDeliveryInitParameters) DeepCopy() *WorkerLogDeliveryInitParameters { + if in == nil { + return nil + } + out := new(WorkerLogDeliveryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerLogDeliveryObservation) DeepCopyInto(out *WorkerLogDeliveryObservation) { + *out = *in + if in.CloudwatchLogs != nil { + in, out := &in.CloudwatchLogs, &out.CloudwatchLogs + *out = new(CloudwatchLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.Firehose != nil { + in, out := &in.Firehose, &out.Firehose + *out = new(FirehoseObservation) + (*in).DeepCopyInto(*out) + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Observation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerLogDeliveryObservation. +func (in *WorkerLogDeliveryObservation) DeepCopy() *WorkerLogDeliveryObservation { + if in == nil { + return nil + } + out := new(WorkerLogDeliveryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerLogDeliveryParameters) DeepCopyInto(out *WorkerLogDeliveryParameters) { + *out = *in + if in.CloudwatchLogs != nil { + in, out := &in.CloudwatchLogs, &out.CloudwatchLogs + *out = new(CloudwatchLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.Firehose != nil { + in, out := &in.Firehose, &out.Firehose + *out = new(FirehoseParameters) + (*in).DeepCopyInto(*out) + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Parameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerLogDeliveryParameters. +func (in *WorkerLogDeliveryParameters) DeepCopy() *WorkerLogDeliveryParameters { + if in == nil { + return nil + } + out := new(WorkerLogDeliveryParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/kafkaconnect/v1beta2/zz_generated.managed.go b/apis/kafkaconnect/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..64fbc046b3 --- /dev/null +++ b/apis/kafkaconnect/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Connector. +func (mg *Connector) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Connector. +func (mg *Connector) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Connector. +func (mg *Connector) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Connector. +func (mg *Connector) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Connector. +func (mg *Connector) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Connector. +func (mg *Connector) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Connector. +func (mg *Connector) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Connector. +func (mg *Connector) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Connector. +func (mg *Connector) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Connector. +func (mg *Connector) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Connector. +func (mg *Connector) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Connector. +func (mg *Connector) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this CustomPlugin. +func (mg *CustomPlugin) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CustomPlugin. +func (mg *CustomPlugin) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CustomPlugin. +func (mg *CustomPlugin) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CustomPlugin. +func (mg *CustomPlugin) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CustomPlugin. +func (mg *CustomPlugin) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CustomPlugin. +func (mg *CustomPlugin) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CustomPlugin. +func (mg *CustomPlugin) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CustomPlugin. +func (mg *CustomPlugin) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CustomPlugin. +func (mg *CustomPlugin) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CustomPlugin. +func (mg *CustomPlugin) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CustomPlugin. +func (mg *CustomPlugin) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CustomPlugin. +func (mg *CustomPlugin) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/kafkaconnect/v1beta2/zz_generated.managedlist.go b/apis/kafkaconnect/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..c1fe2413b7 --- /dev/null +++ b/apis/kafkaconnect/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ConnectorList. +func (l *ConnectorList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this CustomPluginList. +func (l *CustomPluginList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/kafkaconnect/v1beta2/zz_generated.resolvers.go b/apis/kafkaconnect/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..1b8c234ca3 --- /dev/null +++ b/apis/kafkaconnect/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,514 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Connector) ResolveReferences( // ResolveReferences of this Connector. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + if mg.Spec.ForProvider.KafkaCluster != nil { + if mg.Spec.ForProvider.KafkaCluster.ApacheKafkaCluster != nil { + if mg.Spec.ForProvider.KafkaCluster.ApacheKafkaCluster.VPC != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.KafkaCluster.ApacheKafkaCluster.VPC.SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.KafkaCluster.ApacheKafkaCluster.VPC.SecurityGroupRefs, + Selector: mg.Spec.ForProvider.KafkaCluster.ApacheKafkaCluster.VPC.SecurityGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KafkaCluster.ApacheKafkaCluster.VPC.SecurityGroups") + } + mg.Spec.ForProvider.KafkaCluster.ApacheKafkaCluster.VPC.SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.KafkaCluster.ApacheKafkaCluster.VPC.SecurityGroupRefs = mrsp.ResolvedReferences + + } + } + } + if mg.Spec.ForProvider.KafkaCluster != nil { + if mg.Spec.ForProvider.KafkaCluster.ApacheKafkaCluster != nil { + if mg.Spec.ForProvider.KafkaCluster.ApacheKafkaCluster.VPC != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.KafkaCluster.ApacheKafkaCluster.VPC.Subnets), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.KafkaCluster.ApacheKafkaCluster.VPC.SubnetRefs, + Selector: mg.Spec.ForProvider.KafkaCluster.ApacheKafkaCluster.VPC.SubnetSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KafkaCluster.ApacheKafkaCluster.VPC.Subnets") + } + mg.Spec.ForProvider.KafkaCluster.ApacheKafkaCluster.VPC.Subnets = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.KafkaCluster.ApacheKafkaCluster.VPC.SubnetRefs = mrsp.ResolvedReferences + + } + } + } + if mg.Spec.ForProvider.LogDelivery != nil { + if mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery != nil { + if mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.CloudwatchLogs != nil { + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Group", "GroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.CloudwatchLogs.LogGroup), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.CloudwatchLogs.LogGroupRef, + Selector: mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.CloudwatchLogs.LogGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.CloudwatchLogs.LogGroup") + } + mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.CloudwatchLogs.LogGroup = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.CloudwatchLogs.LogGroupRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.ForProvider.LogDelivery != nil { + if mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery != nil { + if mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.Firehose != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.Firehose.DeliveryStream), + Extract: resource.ExtractParamPath("name", true), + Reference: mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.Firehose.DeliveryStreamRef, + Selector: mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.Firehose.DeliveryStreamSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.Firehose.DeliveryStream") + } + mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.Firehose.DeliveryStream = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.Firehose.DeliveryStreamRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.ForProvider.LogDelivery != nil { + if mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery != nil { + if mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.S3 != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.S3.Bucket), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.S3.BucketRef, + Selector: mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.S3.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.S3.Bucket") + } + mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.S3.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LogDelivery.WorkerLogDelivery.S3.BucketRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Plugin); i3++ { + if mg.Spec.ForProvider.Plugin[i3].CustomPlugin != nil { + { + m, l, err = apisresolver.GetManagedResource("kafkaconnect.aws.upbound.io", "v1beta2", "CustomPlugin", "CustomPluginList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Plugin[i3].CustomPlugin.Arn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.Plugin[i3].CustomPlugin.ArnRef, + Selector: mg.Spec.ForProvider.Plugin[i3].CustomPlugin.ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Plugin[i3].CustomPlugin.Arn") + } + mg.Spec.ForProvider.Plugin[i3].CustomPlugin.Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Plugin[i3].CustomPlugin.ArnRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceExecutionRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ServiceExecutionRoleArnRef, + Selector: mg.Spec.ForProvider.ServiceExecutionRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceExecutionRoleArn") + } + mg.Spec.ForProvider.ServiceExecutionRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceExecutionRoleArnRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.WorkerConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("kafkaconnect.aws.upbound.io", "v1beta1", "WorkerConfiguration", "WorkerConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.WorkerConfiguration.Arn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.WorkerConfiguration.ArnRef, + Selector: mg.Spec.ForProvider.WorkerConfiguration.ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.WorkerConfiguration.Arn") + } + mg.Spec.ForProvider.WorkerConfiguration.Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.WorkerConfiguration.ArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.KafkaCluster != nil { + if mg.Spec.InitProvider.KafkaCluster.ApacheKafkaCluster != nil { + if mg.Spec.InitProvider.KafkaCluster.ApacheKafkaCluster.VPC != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.KafkaCluster.ApacheKafkaCluster.VPC.SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.KafkaCluster.ApacheKafkaCluster.VPC.SecurityGroupRefs, + Selector: mg.Spec.InitProvider.KafkaCluster.ApacheKafkaCluster.VPC.SecurityGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KafkaCluster.ApacheKafkaCluster.VPC.SecurityGroups") + } + mg.Spec.InitProvider.KafkaCluster.ApacheKafkaCluster.VPC.SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.KafkaCluster.ApacheKafkaCluster.VPC.SecurityGroupRefs = mrsp.ResolvedReferences + + } + } + } + if mg.Spec.InitProvider.KafkaCluster != nil { + if mg.Spec.InitProvider.KafkaCluster.ApacheKafkaCluster != nil { + if mg.Spec.InitProvider.KafkaCluster.ApacheKafkaCluster.VPC != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.KafkaCluster.ApacheKafkaCluster.VPC.Subnets), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.KafkaCluster.ApacheKafkaCluster.VPC.SubnetRefs, + Selector: mg.Spec.InitProvider.KafkaCluster.ApacheKafkaCluster.VPC.SubnetSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KafkaCluster.ApacheKafkaCluster.VPC.Subnets") + } + mg.Spec.InitProvider.KafkaCluster.ApacheKafkaCluster.VPC.Subnets = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.KafkaCluster.ApacheKafkaCluster.VPC.SubnetRefs = mrsp.ResolvedReferences + + } + } + } + if mg.Spec.InitProvider.LogDelivery != nil { + if mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery != nil { + if mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.CloudwatchLogs != nil { + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Group", "GroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.CloudwatchLogs.LogGroup), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.CloudwatchLogs.LogGroupRef, + Selector: mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.CloudwatchLogs.LogGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.CloudwatchLogs.LogGroup") + } + mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.CloudwatchLogs.LogGroup = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.CloudwatchLogs.LogGroupRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.InitProvider.LogDelivery != nil { + if mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery != nil { + if mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.Firehose != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.Firehose.DeliveryStream), + Extract: resource.ExtractParamPath("name", true), + Reference: mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.Firehose.DeliveryStreamRef, + Selector: mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.Firehose.DeliveryStreamSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.Firehose.DeliveryStream") + } + mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.Firehose.DeliveryStream = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.Firehose.DeliveryStreamRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.InitProvider.LogDelivery != nil { + if mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery != nil { + if mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.S3 != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.S3.Bucket), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.S3.BucketRef, + Selector: mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.S3.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.S3.Bucket") + } + mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.S3.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LogDelivery.WorkerLogDelivery.S3.BucketRef = rsp.ResolvedReference + + } + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Plugin); i3++ { + if mg.Spec.InitProvider.Plugin[i3].CustomPlugin != nil { + { + m, l, err = apisresolver.GetManagedResource("kafkaconnect.aws.upbound.io", "v1beta2", "CustomPlugin", "CustomPluginList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Plugin[i3].CustomPlugin.Arn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.Plugin[i3].CustomPlugin.ArnRef, + Selector: mg.Spec.InitProvider.Plugin[i3].CustomPlugin.ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Plugin[i3].CustomPlugin.Arn") + } + mg.Spec.InitProvider.Plugin[i3].CustomPlugin.Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Plugin[i3].CustomPlugin.ArnRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceExecutionRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ServiceExecutionRoleArnRef, + Selector: mg.Spec.InitProvider.ServiceExecutionRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceExecutionRoleArn") + } + mg.Spec.InitProvider.ServiceExecutionRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceExecutionRoleArnRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.WorkerConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("kafkaconnect.aws.upbound.io", "v1beta1", "WorkerConfiguration", "WorkerConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.WorkerConfiguration.Arn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.WorkerConfiguration.ArnRef, + Selector: mg.Spec.InitProvider.WorkerConfiguration.ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.WorkerConfiguration.Arn") + } + mg.Spec.InitProvider.WorkerConfiguration.Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.WorkerConfiguration.ArnRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this CustomPlugin. +func (mg *CustomPlugin) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.Location != nil { + if mg.Spec.ForProvider.Location.S3 != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Location.S3.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Location.S3.BucketArnRef, + Selector: mg.Spec.ForProvider.Location.S3.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Location.S3.BucketArn") + } + mg.Spec.ForProvider.Location.S3.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Location.S3.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Location != nil { + if mg.Spec.ForProvider.Location.S3 != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Object", "ObjectList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Location.S3.FileKey), + Extract: resource.ExtractParamPath("key", false), + Reference: mg.Spec.ForProvider.Location.S3.FileKeyRef, + Selector: mg.Spec.ForProvider.Location.S3.FileKeySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Location.S3.FileKey") + } + mg.Spec.ForProvider.Location.S3.FileKey = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Location.S3.FileKeyRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Location != nil { + if mg.Spec.InitProvider.Location.S3 != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Location.S3.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Location.S3.BucketArnRef, + Selector: mg.Spec.InitProvider.Location.S3.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Location.S3.BucketArn") + } + mg.Spec.InitProvider.Location.S3.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Location.S3.BucketArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Location != nil { + if mg.Spec.InitProvider.Location.S3 != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Object", "ObjectList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Location.S3.FileKey), + Extract: resource.ExtractParamPath("key", false), + Reference: mg.Spec.InitProvider.Location.S3.FileKeyRef, + Selector: mg.Spec.InitProvider.Location.S3.FileKeySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Location.S3.FileKey") + } + mg.Spec.InitProvider.Location.S3.FileKey = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Location.S3.FileKeyRef = rsp.ResolvedReference + + } + } + + return nil +} diff --git a/apis/kafkaconnect/v1beta2/zz_groupversion_info.go b/apis/kafkaconnect/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..2307e26733 --- /dev/null +++ b/apis/kafkaconnect/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=kafkaconnect.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "kafkaconnect.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/kendra/v1beta1/zz_generated.conversion_spokes.go b/apis/kendra/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..cf576ec68a --- /dev/null +++ b/apis/kendra/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,114 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this DataSource to the hub type. +func (tr *DataSource) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DataSource type. +func (tr *DataSource) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Experience to the hub type. +func (tr *Experience) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Experience type. +func (tr *Experience) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Index to the hub type. +func (tr *Index) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Index type. +func (tr *Index) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this QuerySuggestionsBlockList to the hub type. +func (tr *QuerySuggestionsBlockList) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the QuerySuggestionsBlockList type. +func (tr *QuerySuggestionsBlockList) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Thesaurus to the hub type. +func (tr *Thesaurus) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Thesaurus type. +func (tr *Thesaurus) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/kendra/v1beta2/zz_datasource_terraformed.go b/apis/kendra/v1beta2/zz_datasource_terraformed.go new file mode 100755 index 0000000000..74cf9dd0c1 --- /dev/null +++ b/apis/kendra/v1beta2/zz_datasource_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DataSource +func (mg *DataSource) GetTerraformResourceType() string { + return "aws_kendra_data_source" +} + +// GetConnectionDetailsMapping for this DataSource +func (tr *DataSource) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DataSource +func (tr *DataSource) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DataSource +func (tr *DataSource) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DataSource +func (tr *DataSource) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DataSource +func (tr *DataSource) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DataSource +func (tr *DataSource) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DataSource +func (tr *DataSource) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DataSource +func (tr *DataSource) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DataSource using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DataSource) LateInitialize(attrs []byte) (bool, error) { + params := &DataSourceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DataSource) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kendra/v1beta2/zz_datasource_types.go b/apis/kendra/v1beta2/zz_datasource_types.go new file mode 100755 index 0000000000..c5d470e276 --- /dev/null +++ b/apis/kendra/v1beta2/zz_datasource_types.go @@ -0,0 +1,1266 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessControlListConfigurationInitParameters struct { + + // Path to the AWS S3 bucket that contains the ACL files. + KeyPath *string `json:"keyPath,omitempty" tf:"key_path,omitempty"` +} + +type AccessControlListConfigurationObservation struct { + + // Path to the AWS S3 bucket that contains the ACL files. + KeyPath *string `json:"keyPath,omitempty" tf:"key_path,omitempty"` +} + +type AccessControlListConfigurationParameters struct { + + // Path to the AWS S3 bucket that contains the ACL files. + // +kubebuilder:validation:Optional + KeyPath *string `json:"keyPath,omitempty" tf:"key_path,omitempty"` +} + +type AuthenticationConfigurationInitParameters struct { + + // The list of configuration information that's required to connect to and crawl a website host using basic authentication credentials. The list includes the name and port number of the website host. Detailed below. + BasicAuthentication []BasicAuthenticationInitParameters `json:"basicAuthentication,omitempty" tf:"basic_authentication,omitempty"` +} + +type AuthenticationConfigurationObservation struct { + + // The list of configuration information that's required to connect to and crawl a website host using basic authentication credentials. The list includes the name and port number of the website host. Detailed below. + BasicAuthentication []BasicAuthenticationObservation `json:"basicAuthentication,omitempty" tf:"basic_authentication,omitempty"` +} + +type AuthenticationConfigurationParameters struct { + + // The list of configuration information that's required to connect to and crawl a website host using basic authentication credentials. The list includes the name and port number of the website host. Detailed below. + // +kubebuilder:validation:Optional + BasicAuthentication []BasicAuthenticationParameters `json:"basicAuthentication,omitempty" tf:"basic_authentication,omitempty"` +} + +type BasicAuthenticationInitParameters struct { + + // Your secret ARN, which you can create in AWS Secrets Manager. You use a secret if basic authentication credentials are required to connect to a website. The secret stores your credentials of user name and password. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/secretsmanager/v1beta1.Secret + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + Credentials *string `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // Reference to a Secret in secretsmanager to populate credentials. + // +kubebuilder:validation:Optional + CredentialsRef *v1.Reference `json:"credentialsRef,omitempty" tf:"-"` + + // Selector for a Secret in secretsmanager to populate credentials. + // +kubebuilder:validation:Optional + CredentialsSelector *v1.Selector `json:"credentialsSelector,omitempty" tf:"-"` + + // The name of the website host you want to connect to using authentication credentials. For example, the host name of https://a.example.com/page1.html is "a.example.com". + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // The port number of the website host you want to connect to using authentication credentials. For example, the port for https://a.example.com/page1.html is 443, the standard port for HTTPS. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` +} + +type BasicAuthenticationObservation struct { + + // Your secret ARN, which you can create in AWS Secrets Manager. You use a secret if basic authentication credentials are required to connect to a website. The secret stores your credentials of user name and password. + Credentials *string `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // The name of the website host you want to connect to using authentication credentials. For example, the host name of https://a.example.com/page1.html is "a.example.com". + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // The port number of the website host you want to connect to using authentication credentials. For example, the port for https://a.example.com/page1.html is 443, the standard port for HTTPS. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` +} + +type BasicAuthenticationParameters struct { + + // Your secret ARN, which you can create in AWS Secrets Manager. You use a secret if basic authentication credentials are required to connect to a website. The secret stores your credentials of user name and password. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/secretsmanager/v1beta1.Secret + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + Credentials *string `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // Reference to a Secret in secretsmanager to populate credentials. + // +kubebuilder:validation:Optional + CredentialsRef *v1.Reference `json:"credentialsRef,omitempty" tf:"-"` + + // Selector for a Secret in secretsmanager to populate credentials. + // +kubebuilder:validation:Optional + CredentialsSelector *v1.Selector `json:"credentialsSelector,omitempty" tf:"-"` + + // The name of the website host you want to connect to using authentication credentials. For example, the host name of https://a.example.com/page1.html is "a.example.com". + // +kubebuilder:validation:Optional + Host *string `json:"host" tf:"host,omitempty"` + + // The port number of the website host you want to connect to using authentication credentials. For example, the port for https://a.example.com/page1.html is 443, the standard port for HTTPS. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` +} + +type ConditionInitParameters struct { + + // The identifier of the document attribute used for the condition. For example, _source_uri could be an identifier for the attribute or metadata field that contains source URIs associated with the documents. Amazon Kendra currently does not support _document_body as an attribute key used for the condition. + ConditionDocumentAttributeKey *string `json:"conditionDocumentAttributeKey,omitempty" tf:"condition_document_attribute_key,omitempty"` + + // The value used by the operator. For example, you can specify the value 'financial' for strings in the _source_uri field that partially match or contain this value. See condition_on_value. + ConditionOnValue *ConditionOnValueInitParameters `json:"conditionOnValue,omitempty" tf:"condition_on_value,omitempty"` + + // The condition operator. For example, you can use Contains to partially match a string. Valid Values: GreaterThan | GreaterThanOrEquals | LessThan | LessThanOrEquals | Equals | NotEquals | Contains | NotContains | Exists | NotExists | BeginsWith. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ConditionObservation struct { + + // The identifier of the document attribute used for the condition. For example, _source_uri could be an identifier for the attribute or metadata field that contains source URIs associated with the documents. Amazon Kendra currently does not support _document_body as an attribute key used for the condition. + ConditionDocumentAttributeKey *string `json:"conditionDocumentAttributeKey,omitempty" tf:"condition_document_attribute_key,omitempty"` + + // The value used by the operator. For example, you can specify the value 'financial' for strings in the _source_uri field that partially match or contain this value. See condition_on_value. + ConditionOnValue *ConditionOnValueObservation `json:"conditionOnValue,omitempty" tf:"condition_on_value,omitempty"` + + // The condition operator. For example, you can use Contains to partially match a string. Valid Values: GreaterThan | GreaterThanOrEquals | LessThan | LessThanOrEquals | Equals | NotEquals | Contains | NotContains | Exists | NotExists | BeginsWith. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ConditionOnValueInitParameters struct { + + // A date expressed as an ISO 8601 string. It is important for the time zone to be included in the ISO 8601 date-time format. As of this writing only UTC is supported. For example, 2012-03-25T12:30:10+00:00. + DateValue *string `json:"dateValue,omitempty" tf:"date_value,omitempty"` + + // A long integer value. + LongValue *float64 `json:"longValue,omitempty" tf:"long_value,omitempty"` + + // A list of strings. + // +listType=set + StringListValue []*string `json:"stringListValue,omitempty" tf:"string_list_value,omitempty"` + + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` +} + +type ConditionOnValueObservation struct { + + // A date expressed as an ISO 8601 string. It is important for the time zone to be included in the ISO 8601 date-time format. As of this writing only UTC is supported. For example, 2012-03-25T12:30:10+00:00. + DateValue *string `json:"dateValue,omitempty" tf:"date_value,omitempty"` + + // A long integer value. + LongValue *float64 `json:"longValue,omitempty" tf:"long_value,omitempty"` + + // A list of strings. + // +listType=set + StringListValue []*string `json:"stringListValue,omitempty" tf:"string_list_value,omitempty"` + + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` +} + +type ConditionOnValueParameters struct { + + // A date expressed as an ISO 8601 string. It is important for the time zone to be included in the ISO 8601 date-time format. As of this writing only UTC is supported. For example, 2012-03-25T12:30:10+00:00. + // +kubebuilder:validation:Optional + DateValue *string `json:"dateValue,omitempty" tf:"date_value,omitempty"` + + // A long integer value. + // +kubebuilder:validation:Optional + LongValue *float64 `json:"longValue,omitempty" tf:"long_value,omitempty"` + + // A list of strings. + // +kubebuilder:validation:Optional + // +listType=set + StringListValue []*string `json:"stringListValue,omitempty" tf:"string_list_value,omitempty"` + + // +kubebuilder:validation:Optional + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` +} + +type ConditionParameters struct { + + // The identifier of the document attribute used for the condition. For example, _source_uri could be an identifier for the attribute or metadata field that contains source URIs associated with the documents. Amazon Kendra currently does not support _document_body as an attribute key used for the condition. + // +kubebuilder:validation:Optional + ConditionDocumentAttributeKey *string `json:"conditionDocumentAttributeKey" tf:"condition_document_attribute_key,omitempty"` + + // The value used by the operator. For example, you can specify the value 'financial' for strings in the _source_uri field that partially match or contain this value. See condition_on_value. + // +kubebuilder:validation:Optional + ConditionOnValue *ConditionOnValueParameters `json:"conditionOnValue,omitempty" tf:"condition_on_value,omitempty"` + + // The condition operator. For example, you can use Contains to partially match a string. Valid Values: GreaterThan | GreaterThanOrEquals | LessThan | LessThanOrEquals | Equals | NotEquals | Contains | NotContains | Exists | NotExists | BeginsWith. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` +} + +type ConfigurationInitParameters struct { + + // A block that provides the configuration information to connect to an Amazon S3 bucket as your data source. Detailed below. + S3Configuration *S3ConfigurationInitParameters `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` + + // A block that provides the configuration information required for Amazon Kendra Web Crawler. Detailed below. + WebCrawlerConfiguration *WebCrawlerConfigurationInitParameters `json:"webCrawlerConfiguration,omitempty" tf:"web_crawler_configuration,omitempty"` +} + +type ConfigurationObservation struct { + + // A block that provides the configuration information to connect to an Amazon S3 bucket as your data source. Detailed below. + S3Configuration *S3ConfigurationObservation `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` + + // A block that provides the configuration information required for Amazon Kendra Web Crawler. Detailed below. + WebCrawlerConfiguration *WebCrawlerConfigurationObservation `json:"webCrawlerConfiguration,omitempty" tf:"web_crawler_configuration,omitempty"` +} + +type ConfigurationParameters struct { + + // A block that provides the configuration information to connect to an Amazon S3 bucket as your data source. Detailed below. + // +kubebuilder:validation:Optional + S3Configuration *S3ConfigurationParameters `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` + + // A block that provides the configuration information required for Amazon Kendra Web Crawler. Detailed below. + // +kubebuilder:validation:Optional + WebCrawlerConfiguration *WebCrawlerConfigurationParameters `json:"webCrawlerConfiguration,omitempty" tf:"web_crawler_configuration,omitempty"` +} + +type CustomDocumentEnrichmentConfigurationInitParameters struct { + + // Configuration information to alter document attributes or metadata fields and content when ingesting documents into Amazon Kendra. Minimum number of 0 items. Maximum number of 100 items. Detailed below. + InlineConfigurations []InlineConfigurationsInitParameters `json:"inlineConfigurations,omitempty" tf:"inline_configurations,omitempty"` + + // A block that specifies the configuration information for invoking a Lambda function in AWS Lambda on the structured documents with their metadata and text extracted. You can use a Lambda function to apply advanced logic for creating, modifying, or deleting document metadata and content. For more information, see Advanced data manipulation. Detailed below. + PostExtractionHookConfiguration *PostExtractionHookConfigurationInitParameters `json:"postExtractionHookConfiguration,omitempty" tf:"post_extraction_hook_configuration,omitempty"` + + // Configuration information for invoking a Lambda function in AWS Lambda on the original or raw documents before extracting their metadata and text. You can use a Lambda function to apply advanced logic for creating, modifying, or deleting document metadata and content. For more information, see Advanced data manipulation. Detailed below. + PreExtractionHookConfiguration *PreExtractionHookConfigurationInitParameters `json:"preExtractionHookConfiguration,omitempty" tf:"pre_extraction_hook_configuration,omitempty"` + + // The Amazon Resource Name (ARN) of a role with permission to run pre_extraction_hook_configuration and post_extraction_hook_configuration for altering document metadata and content during the document ingestion process. For more information, see IAM roles for Amazon Kendra. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type CustomDocumentEnrichmentConfigurationObservation struct { + + // Configuration information to alter document attributes or metadata fields and content when ingesting documents into Amazon Kendra. Minimum number of 0 items. Maximum number of 100 items. Detailed below. + InlineConfigurations []InlineConfigurationsObservation `json:"inlineConfigurations,omitempty" tf:"inline_configurations,omitempty"` + + // A block that specifies the configuration information for invoking a Lambda function in AWS Lambda on the structured documents with their metadata and text extracted. You can use a Lambda function to apply advanced logic for creating, modifying, or deleting document metadata and content. For more information, see Advanced data manipulation. Detailed below. + PostExtractionHookConfiguration *PostExtractionHookConfigurationObservation `json:"postExtractionHookConfiguration,omitempty" tf:"post_extraction_hook_configuration,omitempty"` + + // Configuration information for invoking a Lambda function in AWS Lambda on the original or raw documents before extracting their metadata and text. You can use a Lambda function to apply advanced logic for creating, modifying, or deleting document metadata and content. For more information, see Advanced data manipulation. Detailed below. + PreExtractionHookConfiguration *PreExtractionHookConfigurationObservation `json:"preExtractionHookConfiguration,omitempty" tf:"pre_extraction_hook_configuration,omitempty"` + + // The Amazon Resource Name (ARN) of a role with permission to run pre_extraction_hook_configuration and post_extraction_hook_configuration for altering document metadata and content during the document ingestion process. For more information, see IAM roles for Amazon Kendra. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type CustomDocumentEnrichmentConfigurationParameters struct { + + // Configuration information to alter document attributes or metadata fields and content when ingesting documents into Amazon Kendra. Minimum number of 0 items. Maximum number of 100 items. Detailed below. + // +kubebuilder:validation:Optional + InlineConfigurations []InlineConfigurationsParameters `json:"inlineConfigurations,omitempty" tf:"inline_configurations,omitempty"` + + // A block that specifies the configuration information for invoking a Lambda function in AWS Lambda on the structured documents with their metadata and text extracted. You can use a Lambda function to apply advanced logic for creating, modifying, or deleting document metadata and content. For more information, see Advanced data manipulation. Detailed below. + // +kubebuilder:validation:Optional + PostExtractionHookConfiguration *PostExtractionHookConfigurationParameters `json:"postExtractionHookConfiguration,omitempty" tf:"post_extraction_hook_configuration,omitempty"` + + // Configuration information for invoking a Lambda function in AWS Lambda on the original or raw documents before extracting their metadata and text. You can use a Lambda function to apply advanced logic for creating, modifying, or deleting document metadata and content. For more information, see Advanced data manipulation. Detailed below. + // +kubebuilder:validation:Optional + PreExtractionHookConfiguration *PreExtractionHookConfigurationParameters `json:"preExtractionHookConfiguration,omitempty" tf:"pre_extraction_hook_configuration,omitempty"` + + // The Amazon Resource Name (ARN) of a role with permission to run pre_extraction_hook_configuration and post_extraction_hook_configuration for altering document metadata and content during the document ingestion process. For more information, see IAM roles for Amazon Kendra. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type DataSourceInitParameters struct { + + // A block with the configuration information to connect to your Data Source repository. You can't specify the configuration block when the type parameter is set to CUSTOM. Detailed below. + Configuration *ConfigurationInitParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // A block with the configuration information for altering document metadata and content during the document ingestion process. For more information on how to create, modify and delete document metadata, or make other content alterations when you ingest documents into Amazon Kendra, see Customizing document metadata during the ingestion process. Detailed below. + CustomDocumentEnrichmentConfiguration *CustomDocumentEnrichmentConfigurationInitParameters `json:"customDocumentEnrichmentConfiguration,omitempty" tf:"custom_document_enrichment_configuration,omitempty"` + + // A description for the Data Source connector. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The identifier of the index for your Amazon Kendra data source. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kendra/v1beta2.Index + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + IndexID *string `json:"indexId,omitempty" tf:"index_id,omitempty"` + + // Reference to a Index in kendra to populate indexId. + // +kubebuilder:validation:Optional + IndexIDRef *v1.Reference `json:"indexIdRef,omitempty" tf:"-"` + + // Selector for a Index in kendra to populate indexId. + // +kubebuilder:validation:Optional + IndexIDSelector *v1.Selector `json:"indexIdSelector,omitempty" tf:"-"` + + // The code for a language. This allows you to support a language for all documents when creating the Data Source connector. English is supported by default. For more information on supported languages, including their codes, see Adding documents in languages other than English. + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + // A name for your data source connector. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Amazon Resource Name (ARN) of a role with permission to access the data source connector. For more information, see IAM roles for Amazon Kendra. You can't specify the role_arn parameter when the type parameter is set to CUSTOM. The role_arn parameter is required for all other data sources. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Sets the frequency for Amazon Kendra to check the documents in your Data Source repository and update the index. If you don't set a schedule Amazon Kendra will not periodically update the index. You can call the StartDataSourceSyncJob API to update the index. + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The type of data source repository. For an updated list of values, refer to Valid Values for Type. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DataSourceObservation struct { + + // ARN of the Data Source. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A block with the configuration information to connect to your Data Source repository. You can't specify the configuration block when the type parameter is set to CUSTOM. Detailed below. + Configuration *ConfigurationObservation `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // The Unix timestamp of when the Data Source was created. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // A block with the configuration information for altering document metadata and content during the document ingestion process. For more information on how to create, modify and delete document metadata, or make other content alterations when you ingest documents into Amazon Kendra, see Customizing document metadata during the ingestion process. Detailed below. + CustomDocumentEnrichmentConfiguration *CustomDocumentEnrichmentConfigurationObservation `json:"customDocumentEnrichmentConfiguration,omitempty" tf:"custom_document_enrichment_configuration,omitempty"` + + // The unique identifiers of the Data Source. + DataSourceID *string `json:"dataSourceId,omitempty" tf:"data_source_id,omitempty"` + + // A description for the Data Source connector. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // When the Status field value is FAILED, the ErrorMessage field contains a description of the error that caused the Data Source to fail. + ErrorMessage *string `json:"errorMessage,omitempty" tf:"error_message,omitempty"` + + // The unique identifiers of the Data Source and index separated by a slash (/). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The identifier of the index for your Amazon Kendra data source. + IndexID *string `json:"indexId,omitempty" tf:"index_id,omitempty"` + + // The code for a language. This allows you to support a language for all documents when creating the Data Source connector. English is supported by default. For more information on supported languages, including their codes, see Adding documents in languages other than English. + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + // A name for your data source connector. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Amazon Resource Name (ARN) of a role with permission to access the data source connector. For more information, see IAM roles for Amazon Kendra. You can't specify the role_arn parameter when the type parameter is set to CUSTOM. The role_arn parameter is required for all other data sources. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Sets the frequency for Amazon Kendra to check the documents in your Data Source repository and update the index. If you don't set a schedule Amazon Kendra will not periodically update the index. You can call the StartDataSourceSyncJob API to update the index. + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The current status of the Data Source. When the status is ACTIVE the Data Source is ready to use. When the status is FAILED, the error_message field contains the reason that the Data Source failed. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The type of data source repository. For an updated list of values, refer to Valid Values for Type. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The Unix timestamp of when the Data Source was last updated. + UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` +} + +type DataSourceParameters struct { + + // A block with the configuration information to connect to your Data Source repository. You can't specify the configuration block when the type parameter is set to CUSTOM. Detailed below. + // +kubebuilder:validation:Optional + Configuration *ConfigurationParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // A block with the configuration information for altering document metadata and content during the document ingestion process. For more information on how to create, modify and delete document metadata, or make other content alterations when you ingest documents into Amazon Kendra, see Customizing document metadata during the ingestion process. Detailed below. + // +kubebuilder:validation:Optional + CustomDocumentEnrichmentConfiguration *CustomDocumentEnrichmentConfigurationParameters `json:"customDocumentEnrichmentConfiguration,omitempty" tf:"custom_document_enrichment_configuration,omitempty"` + + // A description for the Data Source connector. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The identifier of the index for your Amazon Kendra data source. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kendra/v1beta2.Index + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + IndexID *string `json:"indexId,omitempty" tf:"index_id,omitempty"` + + // Reference to a Index in kendra to populate indexId. + // +kubebuilder:validation:Optional + IndexIDRef *v1.Reference `json:"indexIdRef,omitempty" tf:"-"` + + // Selector for a Index in kendra to populate indexId. + // +kubebuilder:validation:Optional + IndexIDSelector *v1.Selector `json:"indexIdSelector,omitempty" tf:"-"` + + // The code for a language. This allows you to support a language for all documents when creating the Data Source connector. English is supported by default. For more information on supported languages, including their codes, see Adding documents in languages other than English. + // +kubebuilder:validation:Optional + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + // A name for your data source connector. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The Amazon Resource Name (ARN) of a role with permission to access the data source connector. For more information, see IAM roles for Amazon Kendra. You can't specify the role_arn parameter when the type parameter is set to CUSTOM. The role_arn parameter is required for all other data sources. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Sets the frequency for Amazon Kendra to check the documents in your Data Source repository and update the index. If you don't set a schedule Amazon Kendra will not periodically update the index. You can call the StartDataSourceSyncJob API to update the index. + // +kubebuilder:validation:Optional + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The type of data source repository. For an updated list of values, refer to Valid Values for Type. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DocumentsMetadataConfigurationInitParameters struct { + + // A prefix used to filter metadata configuration files in the AWS S3 bucket. The S3 bucket might contain multiple metadata files. Use s3_prefix to include only the desired metadata files. + S3Prefix *string `json:"s3Prefix,omitempty" tf:"s3_prefix,omitempty"` +} + +type DocumentsMetadataConfigurationObservation struct { + + // A prefix used to filter metadata configuration files in the AWS S3 bucket. The S3 bucket might contain multiple metadata files. Use s3_prefix to include only the desired metadata files. + S3Prefix *string `json:"s3Prefix,omitempty" tf:"s3_prefix,omitempty"` +} + +type DocumentsMetadataConfigurationParameters struct { + + // A prefix used to filter metadata configuration files in the AWS S3 bucket. The S3 bucket might contain multiple metadata files. Use s3_prefix to include only the desired metadata files. + // +kubebuilder:validation:Optional + S3Prefix *string `json:"s3Prefix,omitempty" tf:"s3_prefix,omitempty"` +} + +type InlineConfigurationsInitParameters struct { + + // Configuration of the condition used for the target document attribute or metadata field when ingesting documents into Amazon Kendra. See condition. + Condition *ConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // TRUE to delete content if the condition used for the target attribute is met. + DocumentContentDeletion *bool `json:"documentContentDeletion,omitempty" tf:"document_content_deletion,omitempty"` + + // Configuration of the target document attribute or metadata field when ingesting documents into Amazon Kendra. You can also include a value. Detailed below. + Target *TargetInitParameters `json:"target,omitempty" tf:"target,omitempty"` +} + +type InlineConfigurationsObservation struct { + + // Configuration of the condition used for the target document attribute or metadata field when ingesting documents into Amazon Kendra. See condition. + Condition *ConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` + + // TRUE to delete content if the condition used for the target attribute is met. + DocumentContentDeletion *bool `json:"documentContentDeletion,omitempty" tf:"document_content_deletion,omitempty"` + + // Configuration of the target document attribute or metadata field when ingesting documents into Amazon Kendra. You can also include a value. Detailed below. + Target *TargetObservation `json:"target,omitempty" tf:"target,omitempty"` +} + +type InlineConfigurationsParameters struct { + + // Configuration of the condition used for the target document attribute or metadata field when ingesting documents into Amazon Kendra. See condition. + // +kubebuilder:validation:Optional + Condition *ConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // TRUE to delete content if the condition used for the target attribute is met. + // +kubebuilder:validation:Optional + DocumentContentDeletion *bool `json:"documentContentDeletion,omitempty" tf:"document_content_deletion,omitempty"` + + // Configuration of the target document attribute or metadata field when ingesting documents into Amazon Kendra. You can also include a value. Detailed below. + // +kubebuilder:validation:Optional + Target *TargetParameters `json:"target,omitempty" tf:"target,omitempty"` +} + +type InvocationConditionConditionOnValueInitParameters struct { + + // A date expressed as an ISO 8601 string. It is important for the time zone to be included in the ISO 8601 date-time format. As of this writing only UTC is supported. For example, 2012-03-25T12:30:10+00:00. + DateValue *string `json:"dateValue,omitempty" tf:"date_value,omitempty"` + + // A long integer value. + LongValue *float64 `json:"longValue,omitempty" tf:"long_value,omitempty"` + + // A list of strings. + // +listType=set + StringListValue []*string `json:"stringListValue,omitempty" tf:"string_list_value,omitempty"` + + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` +} + +type InvocationConditionConditionOnValueObservation struct { + + // A date expressed as an ISO 8601 string. It is important for the time zone to be included in the ISO 8601 date-time format. As of this writing only UTC is supported. For example, 2012-03-25T12:30:10+00:00. + DateValue *string `json:"dateValue,omitempty" tf:"date_value,omitempty"` + + // A long integer value. + LongValue *float64 `json:"longValue,omitempty" tf:"long_value,omitempty"` + + // A list of strings. + // +listType=set + StringListValue []*string `json:"stringListValue,omitempty" tf:"string_list_value,omitempty"` + + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` +} + +type InvocationConditionConditionOnValueParameters struct { + + // A date expressed as an ISO 8601 string. It is important for the time zone to be included in the ISO 8601 date-time format. As of this writing only UTC is supported. For example, 2012-03-25T12:30:10+00:00. + // +kubebuilder:validation:Optional + DateValue *string `json:"dateValue,omitempty" tf:"date_value,omitempty"` + + // A long integer value. + // +kubebuilder:validation:Optional + LongValue *float64 `json:"longValue,omitempty" tf:"long_value,omitempty"` + + // A list of strings. + // +kubebuilder:validation:Optional + // +listType=set + StringListValue []*string `json:"stringListValue,omitempty" tf:"string_list_value,omitempty"` + + // +kubebuilder:validation:Optional + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` +} + +type InvocationConditionInitParameters struct { + + // The identifier of the document attribute used for the condition. For example, _source_uri could be an identifier for the attribute or metadata field that contains source URIs associated with the documents. Amazon Kendra currently does not support _document_body as an attribute key used for the condition. + ConditionDocumentAttributeKey *string `json:"conditionDocumentAttributeKey,omitempty" tf:"condition_document_attribute_key,omitempty"` + + // The value used by the operator. For example, you can specify the value 'financial' for strings in the _source_uri field that partially match or contain this value. See condition_on_value. + ConditionOnValue *InvocationConditionConditionOnValueInitParameters `json:"conditionOnValue,omitempty" tf:"condition_on_value,omitempty"` + + // The condition operator. For example, you can use Contains to partially match a string. Valid Values: GreaterThan | GreaterThanOrEquals | LessThan | LessThanOrEquals | Equals | NotEquals | Contains | NotContains | Exists | NotExists | BeginsWith. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type InvocationConditionObservation struct { + + // The identifier of the document attribute used for the condition. For example, _source_uri could be an identifier for the attribute or metadata field that contains source URIs associated with the documents. Amazon Kendra currently does not support _document_body as an attribute key used for the condition. + ConditionDocumentAttributeKey *string `json:"conditionDocumentAttributeKey,omitempty" tf:"condition_document_attribute_key,omitempty"` + + // The value used by the operator. For example, you can specify the value 'financial' for strings in the _source_uri field that partially match or contain this value. See condition_on_value. + ConditionOnValue *InvocationConditionConditionOnValueObservation `json:"conditionOnValue,omitempty" tf:"condition_on_value,omitempty"` + + // The condition operator. For example, you can use Contains to partially match a string. Valid Values: GreaterThan | GreaterThanOrEquals | LessThan | LessThanOrEquals | Equals | NotEquals | Contains | NotContains | Exists | NotExists | BeginsWith. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type InvocationConditionParameters struct { + + // The identifier of the document attribute used for the condition. For example, _source_uri could be an identifier for the attribute or metadata field that contains source URIs associated with the documents. Amazon Kendra currently does not support _document_body as an attribute key used for the condition. + // +kubebuilder:validation:Optional + ConditionDocumentAttributeKey *string `json:"conditionDocumentAttributeKey" tf:"condition_document_attribute_key,omitempty"` + + // The value used by the operator. For example, you can specify the value 'financial' for strings in the _source_uri field that partially match or contain this value. See condition_on_value. + // +kubebuilder:validation:Optional + ConditionOnValue *InvocationConditionConditionOnValueParameters `json:"conditionOnValue,omitempty" tf:"condition_on_value,omitempty"` + + // The condition operator. For example, you can use Contains to partially match a string. Valid Values: GreaterThan | GreaterThanOrEquals | LessThan | LessThanOrEquals | Equals | NotEquals | Contains | NotContains | Exists | NotExists | BeginsWith. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` +} + +type PostExtractionHookConfigurationInitParameters struct { + + // A block that specifies the condition used for when a Lambda function should be invoked. For example, you can specify a condition that if there are empty date-time values, then Amazon Kendra should invoke a function that inserts the current date-time. See invocation_condition. + InvocationCondition *InvocationConditionInitParameters `json:"invocationCondition,omitempty" tf:"invocation_condition,omitempty"` + + // The Amazon Resource Name (ARN) of a Lambda Function that can manipulate your document metadata fields or attributes and content. + LambdaArn *string `json:"lambdaArn,omitempty" tf:"lambda_arn,omitempty"` + + // Stores the original, raw documents or the structured, parsed documents before and after altering them. For more information, see Data contracts for Lambda functions. + S3Bucket *string `json:"s3Bucket,omitempty" tf:"s3_bucket,omitempty"` +} + +type PostExtractionHookConfigurationObservation struct { + + // A block that specifies the condition used for when a Lambda function should be invoked. For example, you can specify a condition that if there are empty date-time values, then Amazon Kendra should invoke a function that inserts the current date-time. See invocation_condition. + InvocationCondition *InvocationConditionObservation `json:"invocationCondition,omitempty" tf:"invocation_condition,omitempty"` + + // The Amazon Resource Name (ARN) of a Lambda Function that can manipulate your document metadata fields or attributes and content. + LambdaArn *string `json:"lambdaArn,omitempty" tf:"lambda_arn,omitempty"` + + // Stores the original, raw documents or the structured, parsed documents before and after altering them. For more information, see Data contracts for Lambda functions. + S3Bucket *string `json:"s3Bucket,omitempty" tf:"s3_bucket,omitempty"` +} + +type PostExtractionHookConfigurationParameters struct { + + // A block that specifies the condition used for when a Lambda function should be invoked. For example, you can specify a condition that if there are empty date-time values, then Amazon Kendra should invoke a function that inserts the current date-time. See invocation_condition. + // +kubebuilder:validation:Optional + InvocationCondition *InvocationConditionParameters `json:"invocationCondition,omitempty" tf:"invocation_condition,omitempty"` + + // The Amazon Resource Name (ARN) of a Lambda Function that can manipulate your document metadata fields or attributes and content. + // +kubebuilder:validation:Optional + LambdaArn *string `json:"lambdaArn" tf:"lambda_arn,omitempty"` + + // Stores the original, raw documents or the structured, parsed documents before and after altering them. For more information, see Data contracts for Lambda functions. + // +kubebuilder:validation:Optional + S3Bucket *string `json:"s3Bucket" tf:"s3_bucket,omitempty"` +} + +type PreExtractionHookConfigurationInitParameters struct { + + // A block that specifies the condition used for when a Lambda function should be invoked. For example, you can specify a condition that if there are empty date-time values, then Amazon Kendra should invoke a function that inserts the current date-time. See invocation_condition. + InvocationCondition *PreExtractionHookConfigurationInvocationConditionInitParameters `json:"invocationCondition,omitempty" tf:"invocation_condition,omitempty"` + + // The Amazon Resource Name (ARN) of a Lambda Function that can manipulate your document metadata fields or attributes and content. + LambdaArn *string `json:"lambdaArn,omitempty" tf:"lambda_arn,omitempty"` + + // Stores the original, raw documents or the structured, parsed documents before and after altering them. For more information, see Data contracts for Lambda functions. + S3Bucket *string `json:"s3Bucket,omitempty" tf:"s3_bucket,omitempty"` +} + +type PreExtractionHookConfigurationInvocationConditionConditionOnValueInitParameters struct { + + // A date expressed as an ISO 8601 string. It is important for the time zone to be included in the ISO 8601 date-time format. As of this writing only UTC is supported. For example, 2012-03-25T12:30:10+00:00. + DateValue *string `json:"dateValue,omitempty" tf:"date_value,omitempty"` + + // A long integer value. + LongValue *float64 `json:"longValue,omitempty" tf:"long_value,omitempty"` + + // A list of strings. + // +listType=set + StringListValue []*string `json:"stringListValue,omitempty" tf:"string_list_value,omitempty"` + + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` +} + +type PreExtractionHookConfigurationInvocationConditionConditionOnValueObservation struct { + + // A date expressed as an ISO 8601 string. It is important for the time zone to be included in the ISO 8601 date-time format. As of this writing only UTC is supported. For example, 2012-03-25T12:30:10+00:00. + DateValue *string `json:"dateValue,omitempty" tf:"date_value,omitempty"` + + // A long integer value. + LongValue *float64 `json:"longValue,omitempty" tf:"long_value,omitempty"` + + // A list of strings. + // +listType=set + StringListValue []*string `json:"stringListValue,omitempty" tf:"string_list_value,omitempty"` + + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` +} + +type PreExtractionHookConfigurationInvocationConditionConditionOnValueParameters struct { + + // A date expressed as an ISO 8601 string. It is important for the time zone to be included in the ISO 8601 date-time format. As of this writing only UTC is supported. For example, 2012-03-25T12:30:10+00:00. + // +kubebuilder:validation:Optional + DateValue *string `json:"dateValue,omitempty" tf:"date_value,omitempty"` + + // A long integer value. + // +kubebuilder:validation:Optional + LongValue *float64 `json:"longValue,omitempty" tf:"long_value,omitempty"` + + // A list of strings. + // +kubebuilder:validation:Optional + // +listType=set + StringListValue []*string `json:"stringListValue,omitempty" tf:"string_list_value,omitempty"` + + // +kubebuilder:validation:Optional + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` +} + +type PreExtractionHookConfigurationInvocationConditionInitParameters struct { + + // The identifier of the document attribute used for the condition. For example, _source_uri could be an identifier for the attribute or metadata field that contains source URIs associated with the documents. Amazon Kendra currently does not support _document_body as an attribute key used for the condition. + ConditionDocumentAttributeKey *string `json:"conditionDocumentAttributeKey,omitempty" tf:"condition_document_attribute_key,omitempty"` + + // The value used by the operator. For example, you can specify the value 'financial' for strings in the _source_uri field that partially match or contain this value. See condition_on_value. + ConditionOnValue *PreExtractionHookConfigurationInvocationConditionConditionOnValueInitParameters `json:"conditionOnValue,omitempty" tf:"condition_on_value,omitempty"` + + // The condition operator. For example, you can use Contains to partially match a string. Valid Values: GreaterThan | GreaterThanOrEquals | LessThan | LessThanOrEquals | Equals | NotEquals | Contains | NotContains | Exists | NotExists | BeginsWith. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type PreExtractionHookConfigurationInvocationConditionObservation struct { + + // The identifier of the document attribute used for the condition. For example, _source_uri could be an identifier for the attribute or metadata field that contains source URIs associated with the documents. Amazon Kendra currently does not support _document_body as an attribute key used for the condition. + ConditionDocumentAttributeKey *string `json:"conditionDocumentAttributeKey,omitempty" tf:"condition_document_attribute_key,omitempty"` + + // The value used by the operator. For example, you can specify the value 'financial' for strings in the _source_uri field that partially match or contain this value. See condition_on_value. + ConditionOnValue *PreExtractionHookConfigurationInvocationConditionConditionOnValueObservation `json:"conditionOnValue,omitempty" tf:"condition_on_value,omitempty"` + + // The condition operator. For example, you can use Contains to partially match a string. Valid Values: GreaterThan | GreaterThanOrEquals | LessThan | LessThanOrEquals | Equals | NotEquals | Contains | NotContains | Exists | NotExists | BeginsWith. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type PreExtractionHookConfigurationInvocationConditionParameters struct { + + // The identifier of the document attribute used for the condition. For example, _source_uri could be an identifier for the attribute or metadata field that contains source URIs associated with the documents. Amazon Kendra currently does not support _document_body as an attribute key used for the condition. + // +kubebuilder:validation:Optional + ConditionDocumentAttributeKey *string `json:"conditionDocumentAttributeKey" tf:"condition_document_attribute_key,omitempty"` + + // The value used by the operator. For example, you can specify the value 'financial' for strings in the _source_uri field that partially match or contain this value. See condition_on_value. + // +kubebuilder:validation:Optional + ConditionOnValue *PreExtractionHookConfigurationInvocationConditionConditionOnValueParameters `json:"conditionOnValue,omitempty" tf:"condition_on_value,omitempty"` + + // The condition operator. For example, you can use Contains to partially match a string. Valid Values: GreaterThan | GreaterThanOrEquals | LessThan | LessThanOrEquals | Equals | NotEquals | Contains | NotContains | Exists | NotExists | BeginsWith. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` +} + +type PreExtractionHookConfigurationObservation struct { + + // A block that specifies the condition used for when a Lambda function should be invoked. For example, you can specify a condition that if there are empty date-time values, then Amazon Kendra should invoke a function that inserts the current date-time. See invocation_condition. + InvocationCondition *PreExtractionHookConfigurationInvocationConditionObservation `json:"invocationCondition,omitempty" tf:"invocation_condition,omitempty"` + + // The Amazon Resource Name (ARN) of a Lambda Function that can manipulate your document metadata fields or attributes and content. + LambdaArn *string `json:"lambdaArn,omitempty" tf:"lambda_arn,omitempty"` + + // Stores the original, raw documents or the structured, parsed documents before and after altering them. For more information, see Data contracts for Lambda functions. + S3Bucket *string `json:"s3Bucket,omitempty" tf:"s3_bucket,omitempty"` +} + +type PreExtractionHookConfigurationParameters struct { + + // A block that specifies the condition used for when a Lambda function should be invoked. For example, you can specify a condition that if there are empty date-time values, then Amazon Kendra should invoke a function that inserts the current date-time. See invocation_condition. + // +kubebuilder:validation:Optional + InvocationCondition *PreExtractionHookConfigurationInvocationConditionParameters `json:"invocationCondition,omitempty" tf:"invocation_condition,omitempty"` + + // The Amazon Resource Name (ARN) of a Lambda Function that can manipulate your document metadata fields or attributes and content. + // +kubebuilder:validation:Optional + LambdaArn *string `json:"lambdaArn" tf:"lambda_arn,omitempty"` + + // Stores the original, raw documents or the structured, parsed documents before and after altering them. For more information, see Data contracts for Lambda functions. + // +kubebuilder:validation:Optional + S3Bucket *string `json:"s3Bucket" tf:"s3_bucket,omitempty"` +} + +type ProxyConfigurationInitParameters struct { + + // Your secret ARN, which you can create in AWS Secrets Manager. You use a secret if basic authentication credentials are required to connect to a website. The secret stores your credentials of user name and password. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/secretsmanager/v1beta1.Secret + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + Credentials *string `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // Reference to a Secret in secretsmanager to populate credentials. + // +kubebuilder:validation:Optional + CredentialsRef *v1.Reference `json:"credentialsRef,omitempty" tf:"-"` + + // Selector for a Secret in secretsmanager to populate credentials. + // +kubebuilder:validation:Optional + CredentialsSelector *v1.Selector `json:"credentialsSelector,omitempty" tf:"-"` + + // The name of the website host you want to connect to using authentication credentials. For example, the host name of https://a.example.com/page1.html is "a.example.com". + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // The port number of the website host you want to connect to using authentication credentials. For example, the port for https://a.example.com/page1.html is 443, the standard port for HTTPS. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` +} + +type ProxyConfigurationObservation struct { + + // Your secret ARN, which you can create in AWS Secrets Manager. You use a secret if basic authentication credentials are required to connect to a website. The secret stores your credentials of user name and password. + Credentials *string `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // The name of the website host you want to connect to using authentication credentials. For example, the host name of https://a.example.com/page1.html is "a.example.com". + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // The port number of the website host you want to connect to using authentication credentials. For example, the port for https://a.example.com/page1.html is 443, the standard port for HTTPS. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` +} + +type ProxyConfigurationParameters struct { + + // Your secret ARN, which you can create in AWS Secrets Manager. You use a secret if basic authentication credentials are required to connect to a website. The secret stores your credentials of user name and password. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/secretsmanager/v1beta1.Secret + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + Credentials *string `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // Reference to a Secret in secretsmanager to populate credentials. + // +kubebuilder:validation:Optional + CredentialsRef *v1.Reference `json:"credentialsRef,omitempty" tf:"-"` + + // Selector for a Secret in secretsmanager to populate credentials. + // +kubebuilder:validation:Optional + CredentialsSelector *v1.Selector `json:"credentialsSelector,omitempty" tf:"-"` + + // The name of the website host you want to connect to using authentication credentials. For example, the host name of https://a.example.com/page1.html is "a.example.com". + // +kubebuilder:validation:Optional + Host *string `json:"host" tf:"host,omitempty"` + + // The port number of the website host you want to connect to using authentication credentials. For example, the port for https://a.example.com/page1.html is 443, the standard port for HTTPS. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` +} + +type S3ConfigurationInitParameters struct { + + // A block that provides the path to the S3 bucket that contains the user context filtering files for the data source. For the format of the file, see Access control for S3 data sources. Detailed below. + AccessControlListConfiguration *AccessControlListConfigurationInitParameters `json:"accessControlListConfiguration,omitempty" tf:"access_control_list_configuration,omitempty"` + + // The name of the bucket that contains the documents. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Reference to a Bucket in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameRef *v1.Reference `json:"bucketNameRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameSelector *v1.Selector `json:"bucketNameSelector,omitempty" tf:"-"` + + // A block that defines the Document metadata files that contain information such as the document access control information, source URI, document author, and custom attributes. Each metadata file contains metadata about a single document. Detailed below. + DocumentsMetadataConfiguration *DocumentsMetadataConfigurationInitParameters `json:"documentsMetadataConfiguration,omitempty" tf:"documents_metadata_configuration,omitempty"` + + // A list of glob patterns for documents that should not be indexed. If a document that matches an inclusion prefix or inclusion pattern also matches an exclusion pattern, the document is not indexed. Refer to Exclusion Patterns for more examples. + // +listType=set + ExclusionPatterns []*string `json:"exclusionPatterns,omitempty" tf:"exclusion_patterns,omitempty"` + + // A list of glob patterns for documents that should be indexed. If a document that matches an inclusion pattern also matches an exclusion pattern, the document is not indexed. Refer to Inclusion Patterns for more examples. + // +listType=set + InclusionPatterns []*string `json:"inclusionPatterns,omitempty" tf:"inclusion_patterns,omitempty"` + + // A list of S3 prefixes for the documents that should be included in the index. + // +listType=set + InclusionPrefixes []*string `json:"inclusionPrefixes,omitempty" tf:"inclusion_prefixes,omitempty"` +} + +type S3ConfigurationObservation struct { + + // A block that provides the path to the S3 bucket that contains the user context filtering files for the data source. For the format of the file, see Access control for S3 data sources. Detailed below. + AccessControlListConfiguration *AccessControlListConfigurationObservation `json:"accessControlListConfiguration,omitempty" tf:"access_control_list_configuration,omitempty"` + + // The name of the bucket that contains the documents. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // A block that defines the Document metadata files that contain information such as the document access control information, source URI, document author, and custom attributes. Each metadata file contains metadata about a single document. Detailed below. + DocumentsMetadataConfiguration *DocumentsMetadataConfigurationObservation `json:"documentsMetadataConfiguration,omitempty" tf:"documents_metadata_configuration,omitempty"` + + // A list of glob patterns for documents that should not be indexed. If a document that matches an inclusion prefix or inclusion pattern also matches an exclusion pattern, the document is not indexed. Refer to Exclusion Patterns for more examples. + // +listType=set + ExclusionPatterns []*string `json:"exclusionPatterns,omitempty" tf:"exclusion_patterns,omitempty"` + + // A list of glob patterns for documents that should be indexed. If a document that matches an inclusion pattern also matches an exclusion pattern, the document is not indexed. Refer to Inclusion Patterns for more examples. + // +listType=set + InclusionPatterns []*string `json:"inclusionPatterns,omitempty" tf:"inclusion_patterns,omitempty"` + + // A list of S3 prefixes for the documents that should be included in the index. + // +listType=set + InclusionPrefixes []*string `json:"inclusionPrefixes,omitempty" tf:"inclusion_prefixes,omitempty"` +} + +type S3ConfigurationParameters struct { + + // A block that provides the path to the S3 bucket that contains the user context filtering files for the data source. For the format of the file, see Access control for S3 data sources. Detailed below. + // +kubebuilder:validation:Optional + AccessControlListConfiguration *AccessControlListConfigurationParameters `json:"accessControlListConfiguration,omitempty" tf:"access_control_list_configuration,omitempty"` + + // The name of the bucket that contains the documents. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Reference to a Bucket in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameRef *v1.Reference `json:"bucketNameRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameSelector *v1.Selector `json:"bucketNameSelector,omitempty" tf:"-"` + + // A block that defines the Document metadata files that contain information such as the document access control information, source URI, document author, and custom attributes. Each metadata file contains metadata about a single document. Detailed below. + // +kubebuilder:validation:Optional + DocumentsMetadataConfiguration *DocumentsMetadataConfigurationParameters `json:"documentsMetadataConfiguration,omitempty" tf:"documents_metadata_configuration,omitempty"` + + // A list of glob patterns for documents that should not be indexed. If a document that matches an inclusion prefix or inclusion pattern also matches an exclusion pattern, the document is not indexed. Refer to Exclusion Patterns for more examples. + // +kubebuilder:validation:Optional + // +listType=set + ExclusionPatterns []*string `json:"exclusionPatterns,omitempty" tf:"exclusion_patterns,omitempty"` + + // A list of glob patterns for documents that should be indexed. If a document that matches an inclusion pattern also matches an exclusion pattern, the document is not indexed. Refer to Inclusion Patterns for more examples. + // +kubebuilder:validation:Optional + // +listType=set + InclusionPatterns []*string `json:"inclusionPatterns,omitempty" tf:"inclusion_patterns,omitempty"` + + // A list of S3 prefixes for the documents that should be included in the index. + // +kubebuilder:validation:Optional + // +listType=set + InclusionPrefixes []*string `json:"inclusionPrefixes,omitempty" tf:"inclusion_prefixes,omitempty"` +} + +type SeedURLConfigurationInitParameters struct { + + // The list of seed or starting point URLs of the websites you want to crawl. The list can include a maximum of 100 seed URLs. Array Members: Minimum number of 0 items. Maximum number of 100 items. Length Constraints: Minimum length of 1. Maximum length of 2048. + // +listType=set + SeedUrls []*string `json:"seedUrls,omitempty" tf:"seed_urls,omitempty"` + + // The default mode is set to HOST_ONLY. You can choose one of the following modes: + WebCrawlerMode *string `json:"webCrawlerMode,omitempty" tf:"web_crawler_mode,omitempty"` +} + +type SeedURLConfigurationObservation struct { + + // The list of seed or starting point URLs of the websites you want to crawl. The list can include a maximum of 100 seed URLs. Array Members: Minimum number of 0 items. Maximum number of 100 items. Length Constraints: Minimum length of 1. Maximum length of 2048. + // +listType=set + SeedUrls []*string `json:"seedUrls,omitempty" tf:"seed_urls,omitempty"` + + // The default mode is set to HOST_ONLY. You can choose one of the following modes: + WebCrawlerMode *string `json:"webCrawlerMode,omitempty" tf:"web_crawler_mode,omitempty"` +} + +type SeedURLConfigurationParameters struct { + + // The list of seed or starting point URLs of the websites you want to crawl. The list can include a maximum of 100 seed URLs. Array Members: Minimum number of 0 items. Maximum number of 100 items. Length Constraints: Minimum length of 1. Maximum length of 2048. + // +kubebuilder:validation:Optional + // +listType=set + SeedUrls []*string `json:"seedUrls" tf:"seed_urls,omitempty"` + + // The default mode is set to HOST_ONLY. You can choose one of the following modes: + // +kubebuilder:validation:Optional + WebCrawlerMode *string `json:"webCrawlerMode,omitempty" tf:"web_crawler_mode,omitempty"` +} + +type SiteMapsConfigurationInitParameters struct { + + // The list of sitemap URLs of the websites you want to crawl. The list can include a maximum of 3 sitemap URLs. + // +listType=set + SiteMaps []*string `json:"siteMaps,omitempty" tf:"site_maps,omitempty"` +} + +type SiteMapsConfigurationObservation struct { + + // The list of sitemap URLs of the websites you want to crawl. The list can include a maximum of 3 sitemap URLs. + // +listType=set + SiteMaps []*string `json:"siteMaps,omitempty" tf:"site_maps,omitempty"` +} + +type SiteMapsConfigurationParameters struct { + + // The list of sitemap URLs of the websites you want to crawl. The list can include a maximum of 3 sitemap URLs. + // +kubebuilder:validation:Optional + // +listType=set + SiteMaps []*string `json:"siteMaps" tf:"site_maps,omitempty"` +} + +type TargetDocumentAttributeValueInitParameters struct { + + // A date expressed as an ISO 8601 string. It is important for the time zone to be included in the ISO 8601 date-time format. As of this writing only UTC is supported. For example, 2012-03-25T12:30:10+00:00. + DateValue *string `json:"dateValue,omitempty" tf:"date_value,omitempty"` + + // A long integer value. + LongValue *float64 `json:"longValue,omitempty" tf:"long_value,omitempty"` + + // A list of strings. + // +listType=set + StringListValue []*string `json:"stringListValue,omitempty" tf:"string_list_value,omitempty"` + + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` +} + +type TargetDocumentAttributeValueObservation struct { + + // A date expressed as an ISO 8601 string. It is important for the time zone to be included in the ISO 8601 date-time format. As of this writing only UTC is supported. For example, 2012-03-25T12:30:10+00:00. + DateValue *string `json:"dateValue,omitempty" tf:"date_value,omitempty"` + + // A long integer value. + LongValue *float64 `json:"longValue,omitempty" tf:"long_value,omitempty"` + + // A list of strings. + // +listType=set + StringListValue []*string `json:"stringListValue,omitempty" tf:"string_list_value,omitempty"` + + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` +} + +type TargetDocumentAttributeValueParameters struct { + + // A date expressed as an ISO 8601 string. It is important for the time zone to be included in the ISO 8601 date-time format. As of this writing only UTC is supported. For example, 2012-03-25T12:30:10+00:00. + // +kubebuilder:validation:Optional + DateValue *string `json:"dateValue,omitempty" tf:"date_value,omitempty"` + + // A long integer value. + // +kubebuilder:validation:Optional + LongValue *float64 `json:"longValue,omitempty" tf:"long_value,omitempty"` + + // A list of strings. + // +kubebuilder:validation:Optional + // +listType=set + StringListValue []*string `json:"stringListValue,omitempty" tf:"string_list_value,omitempty"` + + // +kubebuilder:validation:Optional + StringValue *string `json:"stringValue,omitempty" tf:"string_value,omitempty"` +} + +type TargetInitParameters struct { + + // The identifier of the target document attribute or metadata field. For example, 'Department' could be an identifier for the target attribute or metadata field that includes the department names associated with the documents. + TargetDocumentAttributeKey *string `json:"targetDocumentAttributeKey,omitempty" tf:"target_document_attribute_key,omitempty"` + + // The target value you want to create for the target attribute. For example, 'Finance' could be the target value for the target attribute key 'Department'. See target_document_attribute_value. + TargetDocumentAttributeValue *TargetDocumentAttributeValueInitParameters `json:"targetDocumentAttributeValue,omitempty" tf:"target_document_attribute_value,omitempty"` + + // TRUE to delete the existing target value for your specified target attribute key. You cannot create a target value and set this to TRUE. To create a target value (TargetDocumentAttributeValue), set this to FALSE. + TargetDocumentAttributeValueDeletion *bool `json:"targetDocumentAttributeValueDeletion,omitempty" tf:"target_document_attribute_value_deletion,omitempty"` +} + +type TargetObservation struct { + + // The identifier of the target document attribute or metadata field. For example, 'Department' could be an identifier for the target attribute or metadata field that includes the department names associated with the documents. + TargetDocumentAttributeKey *string `json:"targetDocumentAttributeKey,omitempty" tf:"target_document_attribute_key,omitempty"` + + // The target value you want to create for the target attribute. For example, 'Finance' could be the target value for the target attribute key 'Department'. See target_document_attribute_value. + TargetDocumentAttributeValue *TargetDocumentAttributeValueObservation `json:"targetDocumentAttributeValue,omitempty" tf:"target_document_attribute_value,omitempty"` + + // TRUE to delete the existing target value for your specified target attribute key. You cannot create a target value and set this to TRUE. To create a target value (TargetDocumentAttributeValue), set this to FALSE. + TargetDocumentAttributeValueDeletion *bool `json:"targetDocumentAttributeValueDeletion,omitempty" tf:"target_document_attribute_value_deletion,omitempty"` +} + +type TargetParameters struct { + + // The identifier of the target document attribute or metadata field. For example, 'Department' could be an identifier for the target attribute or metadata field that includes the department names associated with the documents. + // +kubebuilder:validation:Optional + TargetDocumentAttributeKey *string `json:"targetDocumentAttributeKey,omitempty" tf:"target_document_attribute_key,omitempty"` + + // The target value you want to create for the target attribute. For example, 'Finance' could be the target value for the target attribute key 'Department'. See target_document_attribute_value. + // +kubebuilder:validation:Optional + TargetDocumentAttributeValue *TargetDocumentAttributeValueParameters `json:"targetDocumentAttributeValue,omitempty" tf:"target_document_attribute_value,omitempty"` + + // TRUE to delete the existing target value for your specified target attribute key. You cannot create a target value and set this to TRUE. To create a target value (TargetDocumentAttributeValue), set this to FALSE. + // +kubebuilder:validation:Optional + TargetDocumentAttributeValueDeletion *bool `json:"targetDocumentAttributeValueDeletion,omitempty" tf:"target_document_attribute_value_deletion,omitempty"` +} + +type UrlsInitParameters struct { + + // A block that specifies the configuration of the seed or starting point URLs of the websites you want to crawl. You can choose to crawl only the website host names, or the website host names with subdomains, or the website host names with subdomains and other domains that the webpages link to. You can list up to 100 seed URLs. Detailed below. + SeedURLConfiguration *SeedURLConfigurationInitParameters `json:"seedUrlConfiguration,omitempty" tf:"seed_url_configuration,omitempty"` + + // A block that specifies the configuration of the sitemap URLs of the websites you want to crawl. Only URLs belonging to the same website host names are crawled. You can list up to 3 sitemap URLs. Detailed below. + SiteMapsConfiguration *SiteMapsConfigurationInitParameters `json:"siteMapsConfiguration,omitempty" tf:"site_maps_configuration,omitempty"` +} + +type UrlsObservation struct { + + // A block that specifies the configuration of the seed or starting point URLs of the websites you want to crawl. You can choose to crawl only the website host names, or the website host names with subdomains, or the website host names with subdomains and other domains that the webpages link to. You can list up to 100 seed URLs. Detailed below. + SeedURLConfiguration *SeedURLConfigurationObservation `json:"seedUrlConfiguration,omitempty" tf:"seed_url_configuration,omitempty"` + + // A block that specifies the configuration of the sitemap URLs of the websites you want to crawl. Only URLs belonging to the same website host names are crawled. You can list up to 3 sitemap URLs. Detailed below. + SiteMapsConfiguration *SiteMapsConfigurationObservation `json:"siteMapsConfiguration,omitempty" tf:"site_maps_configuration,omitempty"` +} + +type UrlsParameters struct { + + // A block that specifies the configuration of the seed or starting point URLs of the websites you want to crawl. You can choose to crawl only the website host names, or the website host names with subdomains, or the website host names with subdomains and other domains that the webpages link to. You can list up to 100 seed URLs. Detailed below. + // +kubebuilder:validation:Optional + SeedURLConfiguration *SeedURLConfigurationParameters `json:"seedUrlConfiguration,omitempty" tf:"seed_url_configuration,omitempty"` + + // A block that specifies the configuration of the sitemap URLs of the websites you want to crawl. Only URLs belonging to the same website host names are crawled. You can list up to 3 sitemap URLs. Detailed below. + // +kubebuilder:validation:Optional + SiteMapsConfiguration *SiteMapsConfigurationParameters `json:"siteMapsConfiguration,omitempty" tf:"site_maps_configuration,omitempty"` +} + +type WebCrawlerConfigurationInitParameters struct { + + // A block with the configuration information required to connect to websites using authentication. You can connect to websites using basic authentication of user name and password. You use a secret in AWS Secrets Manager to store your authentication credentials. You must provide the website host name and port number. For example, the host name of https://a.example.com/page1.html is "a.example.com" and the port is 443, the standard port for HTTPS. Detailed below. + AuthenticationConfiguration *AuthenticationConfigurationInitParameters `json:"authenticationConfiguration,omitempty" tf:"authentication_configuration,omitempty"` + + // Specifies the number of levels in a website that you want to crawl. The first level begins from the website seed or starting point URL. For example, if a website has 3 levels – index level (i.e. seed in this example), sections level, and subsections level – and you are only interested in crawling information up to the sections level (i.e. levels 0-1), you can set your depth to 1. The default crawl depth is set to 2. Minimum value of 0. Maximum value of 10. + CrawlDepth *float64 `json:"crawlDepth,omitempty" tf:"crawl_depth,omitempty"` + + // The maximum size (in MB) of a webpage or attachment to crawl. Files larger than this size (in MB) are skipped/not crawled. The default maximum size of a webpage or attachment is set to 50 MB. Minimum value of 1.0e-06. Maximum value of 50. + MaxContentSizePerPageInMegaBytes *float64 `json:"maxContentSizePerPageInMegaBytes,omitempty" tf:"max_content_size_per_page_in_mega_bytes,omitempty"` + + // The maximum number of URLs on a webpage to include when crawling a website. This number is per webpage. As a website’s webpages are crawled, any URLs the webpages link to are also crawled. URLs on a webpage are crawled in order of appearance. The default maximum links per page is 100. Minimum value of 1. Maximum value of 1000. + MaxLinksPerPage *float64 `json:"maxLinksPerPage,omitempty" tf:"max_links_per_page,omitempty"` + + // The maximum number of URLs crawled per website host per minute. The default maximum number of URLs crawled per website host per minute is 300. Minimum value of 1. Maximum value of 300. + MaxUrlsPerMinuteCrawlRate *float64 `json:"maxUrlsPerMinuteCrawlRate,omitempty" tf:"max_urls_per_minute_crawl_rate,omitempty"` + + // Configuration information required to connect to your internal websites via a web proxy. You must provide the website host name and port number. For example, the host name of https://a.example.com/page1.html is "a.example.com" and the port is 443, the standard port for HTTPS. Web proxy credentials are optional and you can use them to connect to a web proxy server that requires basic authentication. To store web proxy credentials, you use a secret in AWS Secrets Manager. Detailed below. + ProxyConfiguration *ProxyConfigurationInitParameters `json:"proxyConfiguration,omitempty" tf:"proxy_configuration,omitempty"` + + // A list of regular expression patterns to exclude certain URLs to crawl. URLs that match the patterns are excluded from the index. URLs that don't match the patterns are included in the index. If a URL matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the URL file isn't included in the index. Array Members: Minimum number of 0 items. Maximum number of 100 items. Length Constraints: Minimum length of 1. Maximum length of 150. + // +listType=set + URLExclusionPatterns []*string `json:"urlExclusionPatterns,omitempty" tf:"url_exclusion_patterns,omitempty"` + + // A list of regular expression patterns to include certain URLs to crawl. URLs that match the patterns are included in the index. URLs that don't match the patterns are excluded from the index. If a URL matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the URL file isn't included in the index. Array Members: Minimum number of 0 items. Maximum number of 100 items. Length Constraints: Minimum length of 1. Maximum length of 150. + // +listType=set + URLInclusionPatterns []*string `json:"urlInclusionPatterns,omitempty" tf:"url_inclusion_patterns,omitempty"` + + // A block that specifies the seed or starting point URLs of the websites or the sitemap URLs of the websites you want to crawl. You can include website subdomains. You can list up to 100 seed URLs and up to 3 sitemap URLs. You can only crawl websites that use the secure communication protocol, Hypertext Transfer Protocol Secure (HTTPS). If you receive an error when crawling a website, it could be that the website is blocked from crawling. When selecting websites to index, you must adhere to the Amazon Acceptable Use Policy and all other Amazon terms. Remember that you must only use Amazon Kendra Web Crawler to index your own webpages, or webpages that you have authorization to index. Detailed below. + Urls *UrlsInitParameters `json:"urls,omitempty" tf:"urls,omitempty"` +} + +type WebCrawlerConfigurationObservation struct { + + // A block with the configuration information required to connect to websites using authentication. You can connect to websites using basic authentication of user name and password. You use a secret in AWS Secrets Manager to store your authentication credentials. You must provide the website host name and port number. For example, the host name of https://a.example.com/page1.html is "a.example.com" and the port is 443, the standard port for HTTPS. Detailed below. + AuthenticationConfiguration *AuthenticationConfigurationObservation `json:"authenticationConfiguration,omitempty" tf:"authentication_configuration,omitempty"` + + // Specifies the number of levels in a website that you want to crawl. The first level begins from the website seed or starting point URL. For example, if a website has 3 levels – index level (i.e. seed in this example), sections level, and subsections level – and you are only interested in crawling information up to the sections level (i.e. levels 0-1), you can set your depth to 1. The default crawl depth is set to 2. Minimum value of 0. Maximum value of 10. + CrawlDepth *float64 `json:"crawlDepth,omitempty" tf:"crawl_depth,omitempty"` + + // The maximum size (in MB) of a webpage or attachment to crawl. Files larger than this size (in MB) are skipped/not crawled. The default maximum size of a webpage or attachment is set to 50 MB. Minimum value of 1.0e-06. Maximum value of 50. + MaxContentSizePerPageInMegaBytes *float64 `json:"maxContentSizePerPageInMegaBytes,omitempty" tf:"max_content_size_per_page_in_mega_bytes,omitempty"` + + // The maximum number of URLs on a webpage to include when crawling a website. This number is per webpage. As a website’s webpages are crawled, any URLs the webpages link to are also crawled. URLs on a webpage are crawled in order of appearance. The default maximum links per page is 100. Minimum value of 1. Maximum value of 1000. + MaxLinksPerPage *float64 `json:"maxLinksPerPage,omitempty" tf:"max_links_per_page,omitempty"` + + // The maximum number of URLs crawled per website host per minute. The default maximum number of URLs crawled per website host per minute is 300. Minimum value of 1. Maximum value of 300. + MaxUrlsPerMinuteCrawlRate *float64 `json:"maxUrlsPerMinuteCrawlRate,omitempty" tf:"max_urls_per_minute_crawl_rate,omitempty"` + + // Configuration information required to connect to your internal websites via a web proxy. You must provide the website host name and port number. For example, the host name of https://a.example.com/page1.html is "a.example.com" and the port is 443, the standard port for HTTPS. Web proxy credentials are optional and you can use them to connect to a web proxy server that requires basic authentication. To store web proxy credentials, you use a secret in AWS Secrets Manager. Detailed below. + ProxyConfiguration *ProxyConfigurationObservation `json:"proxyConfiguration,omitempty" tf:"proxy_configuration,omitempty"` + + // A list of regular expression patterns to exclude certain URLs to crawl. URLs that match the patterns are excluded from the index. URLs that don't match the patterns are included in the index. If a URL matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the URL file isn't included in the index. Array Members: Minimum number of 0 items. Maximum number of 100 items. Length Constraints: Minimum length of 1. Maximum length of 150. + // +listType=set + URLExclusionPatterns []*string `json:"urlExclusionPatterns,omitempty" tf:"url_exclusion_patterns,omitempty"` + + // A list of regular expression patterns to include certain URLs to crawl. URLs that match the patterns are included in the index. URLs that don't match the patterns are excluded from the index. If a URL matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the URL file isn't included in the index. Array Members: Minimum number of 0 items. Maximum number of 100 items. Length Constraints: Minimum length of 1. Maximum length of 150. + // +listType=set + URLInclusionPatterns []*string `json:"urlInclusionPatterns,omitempty" tf:"url_inclusion_patterns,omitempty"` + + // A block that specifies the seed or starting point URLs of the websites or the sitemap URLs of the websites you want to crawl. You can include website subdomains. You can list up to 100 seed URLs and up to 3 sitemap URLs. You can only crawl websites that use the secure communication protocol, Hypertext Transfer Protocol Secure (HTTPS). If you receive an error when crawling a website, it could be that the website is blocked from crawling. When selecting websites to index, you must adhere to the Amazon Acceptable Use Policy and all other Amazon terms. Remember that you must only use Amazon Kendra Web Crawler to index your own webpages, or webpages that you have authorization to index. Detailed below. + Urls *UrlsObservation `json:"urls,omitempty" tf:"urls,omitempty"` +} + +type WebCrawlerConfigurationParameters struct { + + // A block with the configuration information required to connect to websites using authentication. You can connect to websites using basic authentication of user name and password. You use a secret in AWS Secrets Manager to store your authentication credentials. You must provide the website host name and port number. For example, the host name of https://a.example.com/page1.html is "a.example.com" and the port is 443, the standard port for HTTPS. Detailed below. + // +kubebuilder:validation:Optional + AuthenticationConfiguration *AuthenticationConfigurationParameters `json:"authenticationConfiguration,omitempty" tf:"authentication_configuration,omitempty"` + + // Specifies the number of levels in a website that you want to crawl. The first level begins from the website seed or starting point URL. For example, if a website has 3 levels – index level (i.e. seed in this example), sections level, and subsections level – and you are only interested in crawling information up to the sections level (i.e. levels 0-1), you can set your depth to 1. The default crawl depth is set to 2. Minimum value of 0. Maximum value of 10. + // +kubebuilder:validation:Optional + CrawlDepth *float64 `json:"crawlDepth,omitempty" tf:"crawl_depth,omitempty"` + + // The maximum size (in MB) of a webpage or attachment to crawl. Files larger than this size (in MB) are skipped/not crawled. The default maximum size of a webpage or attachment is set to 50 MB. Minimum value of 1.0e-06. Maximum value of 50. + // +kubebuilder:validation:Optional + MaxContentSizePerPageInMegaBytes *float64 `json:"maxContentSizePerPageInMegaBytes,omitempty" tf:"max_content_size_per_page_in_mega_bytes,omitempty"` + + // The maximum number of URLs on a webpage to include when crawling a website. This number is per webpage. As a website’s webpages are crawled, any URLs the webpages link to are also crawled. URLs on a webpage are crawled in order of appearance. The default maximum links per page is 100. Minimum value of 1. Maximum value of 1000. + // +kubebuilder:validation:Optional + MaxLinksPerPage *float64 `json:"maxLinksPerPage,omitempty" tf:"max_links_per_page,omitempty"` + + // The maximum number of URLs crawled per website host per minute. The default maximum number of URLs crawled per website host per minute is 300. Minimum value of 1. Maximum value of 300. + // +kubebuilder:validation:Optional + MaxUrlsPerMinuteCrawlRate *float64 `json:"maxUrlsPerMinuteCrawlRate,omitempty" tf:"max_urls_per_minute_crawl_rate,omitempty"` + + // Configuration information required to connect to your internal websites via a web proxy. You must provide the website host name and port number. For example, the host name of https://a.example.com/page1.html is "a.example.com" and the port is 443, the standard port for HTTPS. Web proxy credentials are optional and you can use them to connect to a web proxy server that requires basic authentication. To store web proxy credentials, you use a secret in AWS Secrets Manager. Detailed below. + // +kubebuilder:validation:Optional + ProxyConfiguration *ProxyConfigurationParameters `json:"proxyConfiguration,omitempty" tf:"proxy_configuration,omitempty"` + + // A list of regular expression patterns to exclude certain URLs to crawl. URLs that match the patterns are excluded from the index. URLs that don't match the patterns are included in the index. If a URL matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the URL file isn't included in the index. Array Members: Minimum number of 0 items. Maximum number of 100 items. Length Constraints: Minimum length of 1. Maximum length of 150. + // +kubebuilder:validation:Optional + // +listType=set + URLExclusionPatterns []*string `json:"urlExclusionPatterns,omitempty" tf:"url_exclusion_patterns,omitempty"` + + // A list of regular expression patterns to include certain URLs to crawl. URLs that match the patterns are included in the index. URLs that don't match the patterns are excluded from the index. If a URL matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the URL file isn't included in the index. Array Members: Minimum number of 0 items. Maximum number of 100 items. Length Constraints: Minimum length of 1. Maximum length of 150. + // +kubebuilder:validation:Optional + // +listType=set + URLInclusionPatterns []*string `json:"urlInclusionPatterns,omitempty" tf:"url_inclusion_patterns,omitempty"` + + // A block that specifies the seed or starting point URLs of the websites or the sitemap URLs of the websites you want to crawl. You can include website subdomains. You can list up to 100 seed URLs and up to 3 sitemap URLs. You can only crawl websites that use the secure communication protocol, Hypertext Transfer Protocol Secure (HTTPS). If you receive an error when crawling a website, it could be that the website is blocked from crawling. When selecting websites to index, you must adhere to the Amazon Acceptable Use Policy and all other Amazon terms. Remember that you must only use Amazon Kendra Web Crawler to index your own webpages, or webpages that you have authorization to index. Detailed below. + // +kubebuilder:validation:Optional + Urls *UrlsParameters `json:"urls" tf:"urls,omitempty"` +} + +// DataSourceSpec defines the desired state of DataSource +type DataSourceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DataSourceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DataSourceInitParameters `json:"initProvider,omitempty"` +} + +// DataSourceStatus defines the observed state of DataSource. +type DataSourceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DataSourceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DataSource is the Schema for the DataSources API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type DataSource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + Spec DataSourceSpec `json:"spec"` + Status DataSourceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DataSourceList contains a list of DataSources +type DataSourceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DataSource `json:"items"` +} + +// Repository type metadata. +var ( + DataSource_Kind = "DataSource" + DataSource_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DataSource_Kind}.String() + DataSource_KindAPIVersion = DataSource_Kind + "." + CRDGroupVersion.String() + DataSource_GroupVersionKind = CRDGroupVersion.WithKind(DataSource_Kind) +) + +func init() { + SchemeBuilder.Register(&DataSource{}, &DataSourceList{}) +} diff --git a/apis/kendra/v1beta2/zz_experience_terraformed.go b/apis/kendra/v1beta2/zz_experience_terraformed.go new file mode 100755 index 0000000000..c2eb3dd03f --- /dev/null +++ b/apis/kendra/v1beta2/zz_experience_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Experience +func (mg *Experience) GetTerraformResourceType() string { + return "aws_kendra_experience" +} + +// GetConnectionDetailsMapping for this Experience +func (tr *Experience) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Experience +func (tr *Experience) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Experience +func (tr *Experience) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Experience +func (tr *Experience) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Experience +func (tr *Experience) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Experience +func (tr *Experience) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Experience +func (tr *Experience) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Experience +func (tr *Experience) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Experience using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Experience) LateInitialize(attrs []byte) (bool, error) { + params := &ExperienceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Experience) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kendra/v1beta2/zz_experience_types.go b/apis/kendra/v1beta2/zz_experience_types.go new file mode 100755 index 0000000000..71cc04894f --- /dev/null +++ b/apis/kendra/v1beta2/zz_experience_types.go @@ -0,0 +1,302 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ContentSourceConfigurationInitParameters struct { + + // The identifiers of the data sources you want to use for your Amazon Kendra experience. Maximum number of 100 items. + // +listType=set + DataSourceIds []*string `json:"dataSourceIds,omitempty" tf:"data_source_ids,omitempty"` + + // Whether to use documents you indexed directly using the BatchPutDocument API. Defaults to false. + DirectPutContent *bool `json:"directPutContent,omitempty" tf:"direct_put_content,omitempty"` + + // The identifier of the FAQs that you want to use for your Amazon Kendra experience. Maximum number of 100 items. + // +listType=set + FaqIds []*string `json:"faqIds,omitempty" tf:"faq_ids,omitempty"` +} + +type ContentSourceConfigurationObservation struct { + + // The identifiers of the data sources you want to use for your Amazon Kendra experience. Maximum number of 100 items. + // +listType=set + DataSourceIds []*string `json:"dataSourceIds,omitempty" tf:"data_source_ids,omitempty"` + + // Whether to use documents you indexed directly using the BatchPutDocument API. Defaults to false. + DirectPutContent *bool `json:"directPutContent,omitempty" tf:"direct_put_content,omitempty"` + + // The identifier of the FAQs that you want to use for your Amazon Kendra experience. Maximum number of 100 items. + // +listType=set + FaqIds []*string `json:"faqIds,omitempty" tf:"faq_ids,omitempty"` +} + +type ContentSourceConfigurationParameters struct { + + // The identifiers of the data sources you want to use for your Amazon Kendra experience. Maximum number of 100 items. + // +kubebuilder:validation:Optional + // +listType=set + DataSourceIds []*string `json:"dataSourceIds,omitempty" tf:"data_source_ids,omitempty"` + + // Whether to use documents you indexed directly using the BatchPutDocument API. Defaults to false. + // +kubebuilder:validation:Optional + DirectPutContent *bool `json:"directPutContent,omitempty" tf:"direct_put_content,omitempty"` + + // The identifier of the FAQs that you want to use for your Amazon Kendra experience. Maximum number of 100 items. + // +kubebuilder:validation:Optional + // +listType=set + FaqIds []*string `json:"faqIds,omitempty" tf:"faq_ids,omitempty"` +} + +type EndpointsInitParameters struct { +} + +type EndpointsObservation struct { + + // The endpoint of your Amazon Kendra experience. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The type of endpoint for your Amazon Kendra experience. + EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"` +} + +type EndpointsParameters struct { +} + +type ExperienceConfigurationInitParameters struct { + + // The identifiers of your data sources and FAQs. Or, you can specify that you want to use documents indexed via the BatchPutDocument API. Detailed below. + ContentSourceConfiguration *ContentSourceConfigurationInitParameters `json:"contentSourceConfiguration,omitempty" tf:"content_source_configuration,omitempty"` + + // The AWS SSO field name that contains the identifiers of your users, such as their emails. Detailed below. + UserIdentityConfiguration *UserIdentityConfigurationInitParameters `json:"userIdentityConfiguration,omitempty" tf:"user_identity_configuration,omitempty"` +} + +type ExperienceConfigurationObservation struct { + + // The identifiers of your data sources and FAQs. Or, you can specify that you want to use documents indexed via the BatchPutDocument API. Detailed below. + ContentSourceConfiguration *ContentSourceConfigurationObservation `json:"contentSourceConfiguration,omitempty" tf:"content_source_configuration,omitempty"` + + // The AWS SSO field name that contains the identifiers of your users, such as their emails. Detailed below. + UserIdentityConfiguration *UserIdentityConfigurationObservation `json:"userIdentityConfiguration,omitempty" tf:"user_identity_configuration,omitempty"` +} + +type ExperienceConfigurationParameters struct { + + // The identifiers of your data sources and FAQs. Or, you can specify that you want to use documents indexed via the BatchPutDocument API. Detailed below. + // +kubebuilder:validation:Optional + ContentSourceConfiguration *ContentSourceConfigurationParameters `json:"contentSourceConfiguration,omitempty" tf:"content_source_configuration,omitempty"` + + // The AWS SSO field name that contains the identifiers of your users, such as their emails. Detailed below. + // +kubebuilder:validation:Optional + UserIdentityConfiguration *UserIdentityConfigurationParameters `json:"userIdentityConfiguration,omitempty" tf:"user_identity_configuration,omitempty"` +} + +type ExperienceInitParameters struct { + + // Configuration information for your Amazon Kendra experience. Detailed below. + Configuration *ExperienceConfigurationInitParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // A description for your Amazon Kendra experience. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The identifier of the index for your Amazon Kendra experience. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kendra/v1beta2.Index + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + IndexID *string `json:"indexId,omitempty" tf:"index_id,omitempty"` + + // Reference to a Index in kendra to populate indexId. + // +kubebuilder:validation:Optional + IndexIDRef *v1.Reference `json:"indexIdRef,omitempty" tf:"-"` + + // Selector for a Index in kendra to populate indexId. + // +kubebuilder:validation:Optional + IndexIDSelector *v1.Selector `json:"indexIdSelector,omitempty" tf:"-"` + + // A name for your Amazon Kendra experience. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Amazon Resource Name (ARN) of a role with permission to access Query API, QuerySuggestions API, SubmitFeedback API, and AWS SSO that stores your user and group information. For more information, see IAM roles for Amazon Kendra. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type ExperienceObservation struct { + + // ARN of the Experience. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Configuration information for your Amazon Kendra experience. Detailed below. + Configuration *ExperienceConfigurationObservation `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // A description for your Amazon Kendra experience. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Shows the endpoint URLs for your Amazon Kendra experiences. The URLs are unique and fully hosted by AWS. + Endpoints []EndpointsObservation `json:"endpoints,omitempty" tf:"endpoints,omitempty"` + + // The unique identifier of the experience. + ExperienceID *string `json:"experienceId,omitempty" tf:"experience_id,omitempty"` + + // The unique identifiers of the experience and index separated by a slash (/). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The identifier of the index for your Amazon Kendra experience. + IndexID *string `json:"indexId,omitempty" tf:"index_id,omitempty"` + + // A name for your Amazon Kendra experience. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Amazon Resource Name (ARN) of a role with permission to access Query API, QuerySuggestions API, SubmitFeedback API, and AWS SSO that stores your user and group information. For more information, see IAM roles for Amazon Kendra. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The current processing status of your Amazon Kendra experience. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ExperienceParameters struct { + + // Configuration information for your Amazon Kendra experience. Detailed below. + // +kubebuilder:validation:Optional + Configuration *ExperienceConfigurationParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // A description for your Amazon Kendra experience. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The identifier of the index for your Amazon Kendra experience. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kendra/v1beta2.Index + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + IndexID *string `json:"indexId,omitempty" tf:"index_id,omitempty"` + + // Reference to a Index in kendra to populate indexId. + // +kubebuilder:validation:Optional + IndexIDRef *v1.Reference `json:"indexIdRef,omitempty" tf:"-"` + + // Selector for a Index in kendra to populate indexId. + // +kubebuilder:validation:Optional + IndexIDSelector *v1.Selector `json:"indexIdSelector,omitempty" tf:"-"` + + // A name for your Amazon Kendra experience. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The Amazon Resource Name (ARN) of a role with permission to access Query API, QuerySuggestions API, SubmitFeedback API, and AWS SSO that stores your user and group information. For more information, see IAM roles for Amazon Kendra. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type UserIdentityConfigurationInitParameters struct { + + // The AWS SSO field name that contains the identifiers of your users, such as their emails. + IdentityAttributeName *string `json:"identityAttributeName,omitempty" tf:"identity_attribute_name,omitempty"` +} + +type UserIdentityConfigurationObservation struct { + + // The AWS SSO field name that contains the identifiers of your users, such as their emails. + IdentityAttributeName *string `json:"identityAttributeName,omitempty" tf:"identity_attribute_name,omitempty"` +} + +type UserIdentityConfigurationParameters struct { + + // The AWS SSO field name that contains the identifiers of your users, such as their emails. + // +kubebuilder:validation:Optional + IdentityAttributeName *string `json:"identityAttributeName" tf:"identity_attribute_name,omitempty"` +} + +// ExperienceSpec defines the desired state of Experience +type ExperienceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ExperienceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ExperienceInitParameters `json:"initProvider,omitempty"` +} + +// ExperienceStatus defines the observed state of Experience. +type ExperienceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ExperienceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Experience is the Schema for the Experiences API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Experience struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ExperienceSpec `json:"spec"` + Status ExperienceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ExperienceList contains a list of Experiences +type ExperienceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Experience `json:"items"` +} + +// Repository type metadata. +var ( + Experience_Kind = "Experience" + Experience_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Experience_Kind}.String() + Experience_KindAPIVersion = Experience_Kind + "." + CRDGroupVersion.String() + Experience_GroupVersionKind = CRDGroupVersion.WithKind(Experience_Kind) +) + +func init() { + SchemeBuilder.Register(&Experience{}, &ExperienceList{}) +} diff --git a/apis/kendra/v1beta2/zz_generated.conversion_hubs.go b/apis/kendra/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..905e840f60 --- /dev/null +++ b/apis/kendra/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *DataSource) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Experience) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Index) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *QuerySuggestionsBlockList) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Thesaurus) Hub() {} diff --git a/apis/kendra/v1beta2/zz_generated.deepcopy.go b/apis/kendra/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..f821e1a3e0 --- /dev/null +++ b/apis/kendra/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,5758 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlListConfigurationInitParameters) DeepCopyInto(out *AccessControlListConfigurationInitParameters) { + *out = *in + if in.KeyPath != nil { + in, out := &in.KeyPath, &out.KeyPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlListConfigurationInitParameters. +func (in *AccessControlListConfigurationInitParameters) DeepCopy() *AccessControlListConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AccessControlListConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlListConfigurationObservation) DeepCopyInto(out *AccessControlListConfigurationObservation) { + *out = *in + if in.KeyPath != nil { + in, out := &in.KeyPath, &out.KeyPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlListConfigurationObservation. +func (in *AccessControlListConfigurationObservation) DeepCopy() *AccessControlListConfigurationObservation { + if in == nil { + return nil + } + out := new(AccessControlListConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlListConfigurationParameters) DeepCopyInto(out *AccessControlListConfigurationParameters) { + *out = *in + if in.KeyPath != nil { + in, out := &in.KeyPath, &out.KeyPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlListConfigurationParameters. +func (in *AccessControlListConfigurationParameters) DeepCopy() *AccessControlListConfigurationParameters { + if in == nil { + return nil + } + out := new(AccessControlListConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfigurationInitParameters) DeepCopyInto(out *AuthenticationConfigurationInitParameters) { + *out = *in + if in.BasicAuthentication != nil { + in, out := &in.BasicAuthentication, &out.BasicAuthentication + *out = make([]BasicAuthenticationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfigurationInitParameters. +func (in *AuthenticationConfigurationInitParameters) DeepCopy() *AuthenticationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AuthenticationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfigurationObservation) DeepCopyInto(out *AuthenticationConfigurationObservation) { + *out = *in + if in.BasicAuthentication != nil { + in, out := &in.BasicAuthentication, &out.BasicAuthentication + *out = make([]BasicAuthenticationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfigurationObservation. +func (in *AuthenticationConfigurationObservation) DeepCopy() *AuthenticationConfigurationObservation { + if in == nil { + return nil + } + out := new(AuthenticationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfigurationParameters) DeepCopyInto(out *AuthenticationConfigurationParameters) { + *out = *in + if in.BasicAuthentication != nil { + in, out := &in.BasicAuthentication, &out.BasicAuthentication + *out = make([]BasicAuthenticationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfigurationParameters. +func (in *AuthenticationConfigurationParameters) DeepCopy() *AuthenticationConfigurationParameters { + if in == nil { + return nil + } + out := new(AuthenticationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthenticationInitParameters) DeepCopyInto(out *BasicAuthenticationInitParameters) { + *out = *in + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(string) + **out = **in + } + if in.CredentialsRef != nil { + in, out := &in.CredentialsRef, &out.CredentialsRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CredentialsSelector != nil { + in, out := &in.CredentialsSelector, &out.CredentialsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthenticationInitParameters. +func (in *BasicAuthenticationInitParameters) DeepCopy() *BasicAuthenticationInitParameters { + if in == nil { + return nil + } + out := new(BasicAuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthenticationObservation) DeepCopyInto(out *BasicAuthenticationObservation) { + *out = *in + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(string) + **out = **in + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthenticationObservation. +func (in *BasicAuthenticationObservation) DeepCopy() *BasicAuthenticationObservation { + if in == nil { + return nil + } + out := new(BasicAuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthenticationParameters) DeepCopyInto(out *BasicAuthenticationParameters) { + *out = *in + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(string) + **out = **in + } + if in.CredentialsRef != nil { + in, out := &in.CredentialsRef, &out.CredentialsRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CredentialsSelector != nil { + in, out := &in.CredentialsSelector, &out.CredentialsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthenticationParameters. +func (in *BasicAuthenticationParameters) DeepCopy() *BasicAuthenticationParameters { + if in == nil { + return nil + } + out := new(BasicAuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityUnitsInitParameters) DeepCopyInto(out *CapacityUnitsInitParameters) { + *out = *in + if in.QueryCapacityUnits != nil { + in, out := &in.QueryCapacityUnits, &out.QueryCapacityUnits + *out = new(float64) + **out = **in + } + if in.StorageCapacityUnits != nil { + in, out := &in.StorageCapacityUnits, &out.StorageCapacityUnits + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityUnitsInitParameters. +func (in *CapacityUnitsInitParameters) DeepCopy() *CapacityUnitsInitParameters { + if in == nil { + return nil + } + out := new(CapacityUnitsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityUnitsObservation) DeepCopyInto(out *CapacityUnitsObservation) { + *out = *in + if in.QueryCapacityUnits != nil { + in, out := &in.QueryCapacityUnits, &out.QueryCapacityUnits + *out = new(float64) + **out = **in + } + if in.StorageCapacityUnits != nil { + in, out := &in.StorageCapacityUnits, &out.StorageCapacityUnits + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityUnitsObservation. +func (in *CapacityUnitsObservation) DeepCopy() *CapacityUnitsObservation { + if in == nil { + return nil + } + out := new(CapacityUnitsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityUnitsParameters) DeepCopyInto(out *CapacityUnitsParameters) { + *out = *in + if in.QueryCapacityUnits != nil { + in, out := &in.QueryCapacityUnits, &out.QueryCapacityUnits + *out = new(float64) + **out = **in + } + if in.StorageCapacityUnits != nil { + in, out := &in.StorageCapacityUnits, &out.StorageCapacityUnits + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityUnitsParameters. +func (in *CapacityUnitsParameters) DeepCopy() *CapacityUnitsParameters { + if in == nil { + return nil + } + out := new(CapacityUnitsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionInitParameters) DeepCopyInto(out *ConditionInitParameters) { + *out = *in + if in.ConditionDocumentAttributeKey != nil { + in, out := &in.ConditionDocumentAttributeKey, &out.ConditionDocumentAttributeKey + *out = new(string) + **out = **in + } + if in.ConditionOnValue != nil { + in, out := &in.ConditionOnValue, &out.ConditionOnValue + *out = new(ConditionOnValueInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionInitParameters. +func (in *ConditionInitParameters) DeepCopy() *ConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionObservation) DeepCopyInto(out *ConditionObservation) { + *out = *in + if in.ConditionDocumentAttributeKey != nil { + in, out := &in.ConditionDocumentAttributeKey, &out.ConditionDocumentAttributeKey + *out = new(string) + **out = **in + } + if in.ConditionOnValue != nil { + in, out := &in.ConditionOnValue, &out.ConditionOnValue + *out = new(ConditionOnValueObservation) + (*in).DeepCopyInto(*out) + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionObservation. +func (in *ConditionObservation) DeepCopy() *ConditionObservation { + if in == nil { + return nil + } + out := new(ConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionOnValueInitParameters) DeepCopyInto(out *ConditionOnValueInitParameters) { + *out = *in + if in.DateValue != nil { + in, out := &in.DateValue, &out.DateValue + *out = new(string) + **out = **in + } + if in.LongValue != nil { + in, out := &in.LongValue, &out.LongValue + *out = new(float64) + **out = **in + } + if in.StringListValue != nil { + in, out := &in.StringListValue, &out.StringListValue + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionOnValueInitParameters. +func (in *ConditionOnValueInitParameters) DeepCopy() *ConditionOnValueInitParameters { + if in == nil { + return nil + } + out := new(ConditionOnValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionOnValueObservation) DeepCopyInto(out *ConditionOnValueObservation) { + *out = *in + if in.DateValue != nil { + in, out := &in.DateValue, &out.DateValue + *out = new(string) + **out = **in + } + if in.LongValue != nil { + in, out := &in.LongValue, &out.LongValue + *out = new(float64) + **out = **in + } + if in.StringListValue != nil { + in, out := &in.StringListValue, &out.StringListValue + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionOnValueObservation. +func (in *ConditionOnValueObservation) DeepCopy() *ConditionOnValueObservation { + if in == nil { + return nil + } + out := new(ConditionOnValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionOnValueParameters) DeepCopyInto(out *ConditionOnValueParameters) { + *out = *in + if in.DateValue != nil { + in, out := &in.DateValue, &out.DateValue + *out = new(string) + **out = **in + } + if in.LongValue != nil { + in, out := &in.LongValue, &out.LongValue + *out = new(float64) + **out = **in + } + if in.StringListValue != nil { + in, out := &in.StringListValue, &out.StringListValue + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionOnValueParameters. +func (in *ConditionOnValueParameters) DeepCopy() *ConditionOnValueParameters { + if in == nil { + return nil + } + out := new(ConditionOnValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionParameters) DeepCopyInto(out *ConditionParameters) { + *out = *in + if in.ConditionDocumentAttributeKey != nil { + in, out := &in.ConditionDocumentAttributeKey, &out.ConditionDocumentAttributeKey + *out = new(string) + **out = **in + } + if in.ConditionOnValue != nil { + in, out := &in.ConditionOnValue, &out.ConditionOnValue + *out = new(ConditionOnValueParameters) + (*in).DeepCopyInto(*out) + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionParameters. +func (in *ConditionParameters) DeepCopy() *ConditionParameters { + if in == nil { + return nil + } + out := new(ConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationInitParameters) DeepCopyInto(out *ConfigurationInitParameters) { + *out = *in + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WebCrawlerConfiguration != nil { + in, out := &in.WebCrawlerConfiguration, &out.WebCrawlerConfiguration + *out = new(WebCrawlerConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationInitParameters. +func (in *ConfigurationInitParameters) DeepCopy() *ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationObservation) DeepCopyInto(out *ConfigurationObservation) { + *out = *in + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3ConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.WebCrawlerConfiguration != nil { + in, out := &in.WebCrawlerConfiguration, &out.WebCrawlerConfiguration + *out = new(WebCrawlerConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationObservation. +func (in *ConfigurationObservation) DeepCopy() *ConfigurationObservation { + if in == nil { + return nil + } + out := new(ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationParameters) DeepCopyInto(out *ConfigurationParameters) { + *out = *in + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3ConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.WebCrawlerConfiguration != nil { + in, out := &in.WebCrawlerConfiguration, &out.WebCrawlerConfiguration + *out = new(WebCrawlerConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationParameters. +func (in *ConfigurationParameters) DeepCopy() *ConfigurationParameters { + if in == nil { + return nil + } + out := new(ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentSourceConfigurationInitParameters) DeepCopyInto(out *ContentSourceConfigurationInitParameters) { + *out = *in + if in.DataSourceIds != nil { + in, out := &in.DataSourceIds, &out.DataSourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DirectPutContent != nil { + in, out := &in.DirectPutContent, &out.DirectPutContent + *out = new(bool) + **out = **in + } + if in.FaqIds != nil { + in, out := &in.FaqIds, &out.FaqIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentSourceConfigurationInitParameters. +func (in *ContentSourceConfigurationInitParameters) DeepCopy() *ContentSourceConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ContentSourceConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentSourceConfigurationObservation) DeepCopyInto(out *ContentSourceConfigurationObservation) { + *out = *in + if in.DataSourceIds != nil { + in, out := &in.DataSourceIds, &out.DataSourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DirectPutContent != nil { + in, out := &in.DirectPutContent, &out.DirectPutContent + *out = new(bool) + **out = **in + } + if in.FaqIds != nil { + in, out := &in.FaqIds, &out.FaqIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentSourceConfigurationObservation. +func (in *ContentSourceConfigurationObservation) DeepCopy() *ContentSourceConfigurationObservation { + if in == nil { + return nil + } + out := new(ContentSourceConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentSourceConfigurationParameters) DeepCopyInto(out *ContentSourceConfigurationParameters) { + *out = *in + if in.DataSourceIds != nil { + in, out := &in.DataSourceIds, &out.DataSourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DirectPutContent != nil { + in, out := &in.DirectPutContent, &out.DirectPutContent + *out = new(bool) + **out = **in + } + if in.FaqIds != nil { + in, out := &in.FaqIds, &out.FaqIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentSourceConfigurationParameters. +func (in *ContentSourceConfigurationParameters) DeepCopy() *ContentSourceConfigurationParameters { + if in == nil { + return nil + } + out := new(ContentSourceConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDocumentEnrichmentConfigurationInitParameters) DeepCopyInto(out *CustomDocumentEnrichmentConfigurationInitParameters) { + *out = *in + if in.InlineConfigurations != nil { + in, out := &in.InlineConfigurations, &out.InlineConfigurations + *out = make([]InlineConfigurationsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostExtractionHookConfiguration != nil { + in, out := &in.PostExtractionHookConfiguration, &out.PostExtractionHookConfiguration + *out = new(PostExtractionHookConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PreExtractionHookConfiguration != nil { + in, out := &in.PreExtractionHookConfiguration, &out.PreExtractionHookConfiguration + *out = new(PreExtractionHookConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDocumentEnrichmentConfigurationInitParameters. +func (in *CustomDocumentEnrichmentConfigurationInitParameters) DeepCopy() *CustomDocumentEnrichmentConfigurationInitParameters { + if in == nil { + return nil + } + out := new(CustomDocumentEnrichmentConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDocumentEnrichmentConfigurationObservation) DeepCopyInto(out *CustomDocumentEnrichmentConfigurationObservation) { + *out = *in + if in.InlineConfigurations != nil { + in, out := &in.InlineConfigurations, &out.InlineConfigurations + *out = make([]InlineConfigurationsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostExtractionHookConfiguration != nil { + in, out := &in.PostExtractionHookConfiguration, &out.PostExtractionHookConfiguration + *out = new(PostExtractionHookConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.PreExtractionHookConfiguration != nil { + in, out := &in.PreExtractionHookConfiguration, &out.PreExtractionHookConfiguration + *out = new(PreExtractionHookConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDocumentEnrichmentConfigurationObservation. +func (in *CustomDocumentEnrichmentConfigurationObservation) DeepCopy() *CustomDocumentEnrichmentConfigurationObservation { + if in == nil { + return nil + } + out := new(CustomDocumentEnrichmentConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDocumentEnrichmentConfigurationParameters) DeepCopyInto(out *CustomDocumentEnrichmentConfigurationParameters) { + *out = *in + if in.InlineConfigurations != nil { + in, out := &in.InlineConfigurations, &out.InlineConfigurations + *out = make([]InlineConfigurationsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostExtractionHookConfiguration != nil { + in, out := &in.PostExtractionHookConfiguration, &out.PostExtractionHookConfiguration + *out = new(PostExtractionHookConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.PreExtractionHookConfiguration != nil { + in, out := &in.PreExtractionHookConfiguration, &out.PreExtractionHookConfiguration + *out = new(PreExtractionHookConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDocumentEnrichmentConfigurationParameters. +func (in *CustomDocumentEnrichmentConfigurationParameters) DeepCopy() *CustomDocumentEnrichmentConfigurationParameters { + if in == nil { + return nil + } + out := new(CustomDocumentEnrichmentConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSource) DeepCopyInto(out *DataSource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSource. +func (in *DataSource) DeepCopy() *DataSource { + if in == nil { + return nil + } + out := new(DataSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataSource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourceInitParameters) DeepCopyInto(out *DataSourceInitParameters) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomDocumentEnrichmentConfiguration != nil { + in, out := &in.CustomDocumentEnrichmentConfiguration, &out.CustomDocumentEnrichmentConfiguration + *out = new(CustomDocumentEnrichmentConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IndexID != nil { + in, out := &in.IndexID, &out.IndexID + *out = new(string) + **out = **in + } + if in.IndexIDRef != nil { + in, out := &in.IndexIDRef, &out.IndexIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IndexIDSelector != nil { + in, out := &in.IndexIDSelector, &out.IndexIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceInitParameters. +func (in *DataSourceInitParameters) DeepCopy() *DataSourceInitParameters { + if in == nil { + return nil + } + out := new(DataSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourceList) DeepCopyInto(out *DataSourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceList. +func (in *DataSourceList) DeepCopy() *DataSourceList { + if in == nil { + return nil + } + out := new(DataSourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataSourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourceObservation) DeepCopyInto(out *DataSourceObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.CustomDocumentEnrichmentConfiguration != nil { + in, out := &in.CustomDocumentEnrichmentConfiguration, &out.CustomDocumentEnrichmentConfiguration + *out = new(CustomDocumentEnrichmentConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.DataSourceID != nil { + in, out := &in.DataSourceID, &out.DataSourceID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IndexID != nil { + in, out := &in.IndexID, &out.IndexID + *out = new(string) + **out = **in + } + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceObservation. +func (in *DataSourceObservation) DeepCopy() *DataSourceObservation { + if in == nil { + return nil + } + out := new(DataSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourceParameters) DeepCopyInto(out *DataSourceParameters) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomDocumentEnrichmentConfiguration != nil { + in, out := &in.CustomDocumentEnrichmentConfiguration, &out.CustomDocumentEnrichmentConfiguration + *out = new(CustomDocumentEnrichmentConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IndexID != nil { + in, out := &in.IndexID, &out.IndexID + *out = new(string) + **out = **in + } + if in.IndexIDRef != nil { + in, out := &in.IndexIDRef, &out.IndexIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IndexIDSelector != nil { + in, out := &in.IndexIDSelector, &out.IndexIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceParameters. +func (in *DataSourceParameters) DeepCopy() *DataSourceParameters { + if in == nil { + return nil + } + out := new(DataSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourceSpec) DeepCopyInto(out *DataSourceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceSpec. +func (in *DataSourceSpec) DeepCopy() *DataSourceSpec { + if in == nil { + return nil + } + out := new(DataSourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourceStatus) DeepCopyInto(out *DataSourceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceStatus. +func (in *DataSourceStatus) DeepCopy() *DataSourceStatus { + if in == nil { + return nil + } + out := new(DataSourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentMetadataConfigurationUpdatesInitParameters) DeepCopyInto(out *DocumentMetadataConfigurationUpdatesInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Relevance != nil { + in, out := &in.Relevance, &out.Relevance + *out = new(RelevanceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Search != nil { + in, out := &in.Search, &out.Search + *out = new(SearchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentMetadataConfigurationUpdatesInitParameters. +func (in *DocumentMetadataConfigurationUpdatesInitParameters) DeepCopy() *DocumentMetadataConfigurationUpdatesInitParameters { + if in == nil { + return nil + } + out := new(DocumentMetadataConfigurationUpdatesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentMetadataConfigurationUpdatesObservation) DeepCopyInto(out *DocumentMetadataConfigurationUpdatesObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Relevance != nil { + in, out := &in.Relevance, &out.Relevance + *out = new(RelevanceObservation) + (*in).DeepCopyInto(*out) + } + if in.Search != nil { + in, out := &in.Search, &out.Search + *out = new(SearchObservation) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentMetadataConfigurationUpdatesObservation. +func (in *DocumentMetadataConfigurationUpdatesObservation) DeepCopy() *DocumentMetadataConfigurationUpdatesObservation { + if in == nil { + return nil + } + out := new(DocumentMetadataConfigurationUpdatesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentMetadataConfigurationUpdatesParameters) DeepCopyInto(out *DocumentMetadataConfigurationUpdatesParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Relevance != nil { + in, out := &in.Relevance, &out.Relevance + *out = new(RelevanceParameters) + (*in).DeepCopyInto(*out) + } + if in.Search != nil { + in, out := &in.Search, &out.Search + *out = new(SearchParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentMetadataConfigurationUpdatesParameters. +func (in *DocumentMetadataConfigurationUpdatesParameters) DeepCopy() *DocumentMetadataConfigurationUpdatesParameters { + if in == nil { + return nil + } + out := new(DocumentMetadataConfigurationUpdatesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentsMetadataConfigurationInitParameters) DeepCopyInto(out *DocumentsMetadataConfigurationInitParameters) { + *out = *in + if in.S3Prefix != nil { + in, out := &in.S3Prefix, &out.S3Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentsMetadataConfigurationInitParameters. +func (in *DocumentsMetadataConfigurationInitParameters) DeepCopy() *DocumentsMetadataConfigurationInitParameters { + if in == nil { + return nil + } + out := new(DocumentsMetadataConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentsMetadataConfigurationObservation) DeepCopyInto(out *DocumentsMetadataConfigurationObservation) { + *out = *in + if in.S3Prefix != nil { + in, out := &in.S3Prefix, &out.S3Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentsMetadataConfigurationObservation. +func (in *DocumentsMetadataConfigurationObservation) DeepCopy() *DocumentsMetadataConfigurationObservation { + if in == nil { + return nil + } + out := new(DocumentsMetadataConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentsMetadataConfigurationParameters) DeepCopyInto(out *DocumentsMetadataConfigurationParameters) { + *out = *in + if in.S3Prefix != nil { + in, out := &in.S3Prefix, &out.S3Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentsMetadataConfigurationParameters. +func (in *DocumentsMetadataConfigurationParameters) DeepCopy() *DocumentsMetadataConfigurationParameters { + if in == nil { + return nil + } + out := new(DocumentsMetadataConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsInitParameters) DeepCopyInto(out *EndpointsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsInitParameters. +func (in *EndpointsInitParameters) DeepCopy() *EndpointsInitParameters { + if in == nil { + return nil + } + out := new(EndpointsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsObservation) DeepCopyInto(out *EndpointsObservation) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsObservation. +func (in *EndpointsObservation) DeepCopy() *EndpointsObservation { + if in == nil { + return nil + } + out := new(EndpointsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsParameters) DeepCopyInto(out *EndpointsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsParameters. +func (in *EndpointsParameters) DeepCopy() *EndpointsParameters { + if in == nil { + return nil + } + out := new(EndpointsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Experience) DeepCopyInto(out *Experience) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Experience. +func (in *Experience) DeepCopy() *Experience { + if in == nil { + return nil + } + out := new(Experience) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Experience) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperienceConfigurationInitParameters) DeepCopyInto(out *ExperienceConfigurationInitParameters) { + *out = *in + if in.ContentSourceConfiguration != nil { + in, out := &in.ContentSourceConfiguration, &out.ContentSourceConfiguration + *out = new(ContentSourceConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UserIdentityConfiguration != nil { + in, out := &in.UserIdentityConfiguration, &out.UserIdentityConfiguration + *out = new(UserIdentityConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperienceConfigurationInitParameters. +func (in *ExperienceConfigurationInitParameters) DeepCopy() *ExperienceConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ExperienceConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperienceConfigurationObservation) DeepCopyInto(out *ExperienceConfigurationObservation) { + *out = *in + if in.ContentSourceConfiguration != nil { + in, out := &in.ContentSourceConfiguration, &out.ContentSourceConfiguration + *out = new(ContentSourceConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.UserIdentityConfiguration != nil { + in, out := &in.UserIdentityConfiguration, &out.UserIdentityConfiguration + *out = new(UserIdentityConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperienceConfigurationObservation. +func (in *ExperienceConfigurationObservation) DeepCopy() *ExperienceConfigurationObservation { + if in == nil { + return nil + } + out := new(ExperienceConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperienceConfigurationParameters) DeepCopyInto(out *ExperienceConfigurationParameters) { + *out = *in + if in.ContentSourceConfiguration != nil { + in, out := &in.ContentSourceConfiguration, &out.ContentSourceConfiguration + *out = new(ContentSourceConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.UserIdentityConfiguration != nil { + in, out := &in.UserIdentityConfiguration, &out.UserIdentityConfiguration + *out = new(UserIdentityConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperienceConfigurationParameters. +func (in *ExperienceConfigurationParameters) DeepCopy() *ExperienceConfigurationParameters { + if in == nil { + return nil + } + out := new(ExperienceConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperienceInitParameters) DeepCopyInto(out *ExperienceInitParameters) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ExperienceConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IndexID != nil { + in, out := &in.IndexID, &out.IndexID + *out = new(string) + **out = **in + } + if in.IndexIDRef != nil { + in, out := &in.IndexIDRef, &out.IndexIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IndexIDSelector != nil { + in, out := &in.IndexIDSelector, &out.IndexIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperienceInitParameters. +func (in *ExperienceInitParameters) DeepCopy() *ExperienceInitParameters { + if in == nil { + return nil + } + out := new(ExperienceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperienceList) DeepCopyInto(out *ExperienceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Experience, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperienceList. +func (in *ExperienceList) DeepCopy() *ExperienceList { + if in == nil { + return nil + } + out := new(ExperienceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExperienceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperienceObservation) DeepCopyInto(out *ExperienceObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ExperienceConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]EndpointsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExperienceID != nil { + in, out := &in.ExperienceID, &out.ExperienceID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IndexID != nil { + in, out := &in.IndexID, &out.IndexID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperienceObservation. +func (in *ExperienceObservation) DeepCopy() *ExperienceObservation { + if in == nil { + return nil + } + out := new(ExperienceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperienceParameters) DeepCopyInto(out *ExperienceParameters) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ExperienceConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IndexID != nil { + in, out := &in.IndexID, &out.IndexID + *out = new(string) + **out = **in + } + if in.IndexIDRef != nil { + in, out := &in.IndexIDRef, &out.IndexIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IndexIDSelector != nil { + in, out := &in.IndexIDSelector, &out.IndexIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperienceParameters. +func (in *ExperienceParameters) DeepCopy() *ExperienceParameters { + if in == nil { + return nil + } + out := new(ExperienceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperienceSpec) DeepCopyInto(out *ExperienceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperienceSpec. +func (in *ExperienceSpec) DeepCopy() *ExperienceSpec { + if in == nil { + return nil + } + out := new(ExperienceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExperienceStatus) DeepCopyInto(out *ExperienceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperienceStatus. +func (in *ExperienceStatus) DeepCopy() *ExperienceStatus { + if in == nil { + return nil + } + out := new(ExperienceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FaqStatisticsInitParameters) DeepCopyInto(out *FaqStatisticsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FaqStatisticsInitParameters. +func (in *FaqStatisticsInitParameters) DeepCopy() *FaqStatisticsInitParameters { + if in == nil { + return nil + } + out := new(FaqStatisticsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FaqStatisticsObservation) DeepCopyInto(out *FaqStatisticsObservation) { + *out = *in + if in.IndexedQuestionAnswersCount != nil { + in, out := &in.IndexedQuestionAnswersCount, &out.IndexedQuestionAnswersCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FaqStatisticsObservation. +func (in *FaqStatisticsObservation) DeepCopy() *FaqStatisticsObservation { + if in == nil { + return nil + } + out := new(FaqStatisticsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FaqStatisticsParameters) DeepCopyInto(out *FaqStatisticsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FaqStatisticsParameters. +func (in *FaqStatisticsParameters) DeepCopy() *FaqStatisticsParameters { + if in == nil { + return nil + } + out := new(FaqStatisticsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Index) DeepCopyInto(out *Index) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Index. +func (in *Index) DeepCopy() *Index { + if in == nil { + return nil + } + out := new(Index) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Index) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexInitParameters) DeepCopyInto(out *IndexInitParameters) { + *out = *in + if in.CapacityUnits != nil { + in, out := &in.CapacityUnits, &out.CapacityUnits + *out = new(CapacityUnitsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DocumentMetadataConfigurationUpdates != nil { + in, out := &in.DocumentMetadataConfigurationUpdates, &out.DocumentMetadataConfigurationUpdates + *out = make([]DocumentMetadataConfigurationUpdatesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServerSideEncryptionConfiguration != nil { + in, out := &in.ServerSideEncryptionConfiguration, &out.ServerSideEncryptionConfiguration + *out = new(ServerSideEncryptionConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserContextPolicy != nil { + in, out := &in.UserContextPolicy, &out.UserContextPolicy + *out = new(string) + **out = **in + } + if in.UserGroupResolutionConfiguration != nil { + in, out := &in.UserGroupResolutionConfiguration, &out.UserGroupResolutionConfiguration + *out = new(UserGroupResolutionConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UserTokenConfigurations != nil { + in, out := &in.UserTokenConfigurations, &out.UserTokenConfigurations + *out = new(UserTokenConfigurationsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexInitParameters. +func (in *IndexInitParameters) DeepCopy() *IndexInitParameters { + if in == nil { + return nil + } + out := new(IndexInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexList) DeepCopyInto(out *IndexList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Index, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexList. +func (in *IndexList) DeepCopy() *IndexList { + if in == nil { + return nil + } + out := new(IndexList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IndexList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexObservation) DeepCopyInto(out *IndexObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CapacityUnits != nil { + in, out := &in.CapacityUnits, &out.CapacityUnits + *out = new(CapacityUnitsObservation) + (*in).DeepCopyInto(*out) + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DocumentMetadataConfigurationUpdates != nil { + in, out := &in.DocumentMetadataConfigurationUpdates, &out.DocumentMetadataConfigurationUpdates + *out = make([]DocumentMetadataConfigurationUpdatesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IndexStatistics != nil { + in, out := &in.IndexStatistics, &out.IndexStatistics + *out = make([]IndexStatisticsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.ServerSideEncryptionConfiguration != nil { + in, out := &in.ServerSideEncryptionConfiguration, &out.ServerSideEncryptionConfiguration + *out = new(ServerSideEncryptionConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = new(string) + **out = **in + } + if in.UserContextPolicy != nil { + in, out := &in.UserContextPolicy, &out.UserContextPolicy + *out = new(string) + **out = **in + } + if in.UserGroupResolutionConfiguration != nil { + in, out := &in.UserGroupResolutionConfiguration, &out.UserGroupResolutionConfiguration + *out = new(UserGroupResolutionConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.UserTokenConfigurations != nil { + in, out := &in.UserTokenConfigurations, &out.UserTokenConfigurations + *out = new(UserTokenConfigurationsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexObservation. +func (in *IndexObservation) DeepCopy() *IndexObservation { + if in == nil { + return nil + } + out := new(IndexObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexParameters) DeepCopyInto(out *IndexParameters) { + *out = *in + if in.CapacityUnits != nil { + in, out := &in.CapacityUnits, &out.CapacityUnits + *out = new(CapacityUnitsParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DocumentMetadataConfigurationUpdates != nil { + in, out := &in.DocumentMetadataConfigurationUpdates, &out.DocumentMetadataConfigurationUpdates + *out = make([]DocumentMetadataConfigurationUpdatesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServerSideEncryptionConfiguration != nil { + in, out := &in.ServerSideEncryptionConfiguration, &out.ServerSideEncryptionConfiguration + *out = new(ServerSideEncryptionConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserContextPolicy != nil { + in, out := &in.UserContextPolicy, &out.UserContextPolicy + *out = new(string) + **out = **in + } + if in.UserGroupResolutionConfiguration != nil { + in, out := &in.UserGroupResolutionConfiguration, &out.UserGroupResolutionConfiguration + *out = new(UserGroupResolutionConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.UserTokenConfigurations != nil { + in, out := &in.UserTokenConfigurations, &out.UserTokenConfigurations + *out = new(UserTokenConfigurationsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexParameters. +func (in *IndexParameters) DeepCopy() *IndexParameters { + if in == nil { + return nil + } + out := new(IndexParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexSpec) DeepCopyInto(out *IndexSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexSpec. +func (in *IndexSpec) DeepCopy() *IndexSpec { + if in == nil { + return nil + } + out := new(IndexSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexStatisticsInitParameters) DeepCopyInto(out *IndexStatisticsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexStatisticsInitParameters. +func (in *IndexStatisticsInitParameters) DeepCopy() *IndexStatisticsInitParameters { + if in == nil { + return nil + } + out := new(IndexStatisticsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexStatisticsObservation) DeepCopyInto(out *IndexStatisticsObservation) { + *out = *in + if in.FaqStatistics != nil { + in, out := &in.FaqStatistics, &out.FaqStatistics + *out = make([]FaqStatisticsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TextDocumentStatistics != nil { + in, out := &in.TextDocumentStatistics, &out.TextDocumentStatistics + *out = make([]TextDocumentStatisticsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexStatisticsObservation. +func (in *IndexStatisticsObservation) DeepCopy() *IndexStatisticsObservation { + if in == nil { + return nil + } + out := new(IndexStatisticsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexStatisticsParameters) DeepCopyInto(out *IndexStatisticsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexStatisticsParameters. +func (in *IndexStatisticsParameters) DeepCopy() *IndexStatisticsParameters { + if in == nil { + return nil + } + out := new(IndexStatisticsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexStatus) DeepCopyInto(out *IndexStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexStatus. +func (in *IndexStatus) DeepCopy() *IndexStatus { + if in == nil { + return nil + } + out := new(IndexStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InlineConfigurationsInitParameters) DeepCopyInto(out *InlineConfigurationsInitParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(ConditionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DocumentContentDeletion != nil { + in, out := &in.DocumentContentDeletion, &out.DocumentContentDeletion + *out = new(bool) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(TargetInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InlineConfigurationsInitParameters. +func (in *InlineConfigurationsInitParameters) DeepCopy() *InlineConfigurationsInitParameters { + if in == nil { + return nil + } + out := new(InlineConfigurationsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InlineConfigurationsObservation) DeepCopyInto(out *InlineConfigurationsObservation) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(ConditionObservation) + (*in).DeepCopyInto(*out) + } + if in.DocumentContentDeletion != nil { + in, out := &in.DocumentContentDeletion, &out.DocumentContentDeletion + *out = new(bool) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(TargetObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InlineConfigurationsObservation. +func (in *InlineConfigurationsObservation) DeepCopy() *InlineConfigurationsObservation { + if in == nil { + return nil + } + out := new(InlineConfigurationsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InlineConfigurationsParameters) DeepCopyInto(out *InlineConfigurationsParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(ConditionParameters) + (*in).DeepCopyInto(*out) + } + if in.DocumentContentDeletion != nil { + in, out := &in.DocumentContentDeletion, &out.DocumentContentDeletion + *out = new(bool) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(TargetParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InlineConfigurationsParameters. +func (in *InlineConfigurationsParameters) DeepCopy() *InlineConfigurationsParameters { + if in == nil { + return nil + } + out := new(InlineConfigurationsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InvocationConditionConditionOnValueInitParameters) DeepCopyInto(out *InvocationConditionConditionOnValueInitParameters) { + *out = *in + if in.DateValue != nil { + in, out := &in.DateValue, &out.DateValue + *out = new(string) + **out = **in + } + if in.LongValue != nil { + in, out := &in.LongValue, &out.LongValue + *out = new(float64) + **out = **in + } + if in.StringListValue != nil { + in, out := &in.StringListValue, &out.StringListValue + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InvocationConditionConditionOnValueInitParameters. +func (in *InvocationConditionConditionOnValueInitParameters) DeepCopy() *InvocationConditionConditionOnValueInitParameters { + if in == nil { + return nil + } + out := new(InvocationConditionConditionOnValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InvocationConditionConditionOnValueObservation) DeepCopyInto(out *InvocationConditionConditionOnValueObservation) { + *out = *in + if in.DateValue != nil { + in, out := &in.DateValue, &out.DateValue + *out = new(string) + **out = **in + } + if in.LongValue != nil { + in, out := &in.LongValue, &out.LongValue + *out = new(float64) + **out = **in + } + if in.StringListValue != nil { + in, out := &in.StringListValue, &out.StringListValue + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InvocationConditionConditionOnValueObservation. +func (in *InvocationConditionConditionOnValueObservation) DeepCopy() *InvocationConditionConditionOnValueObservation { + if in == nil { + return nil + } + out := new(InvocationConditionConditionOnValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InvocationConditionConditionOnValueParameters) DeepCopyInto(out *InvocationConditionConditionOnValueParameters) { + *out = *in + if in.DateValue != nil { + in, out := &in.DateValue, &out.DateValue + *out = new(string) + **out = **in + } + if in.LongValue != nil { + in, out := &in.LongValue, &out.LongValue + *out = new(float64) + **out = **in + } + if in.StringListValue != nil { + in, out := &in.StringListValue, &out.StringListValue + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InvocationConditionConditionOnValueParameters. +func (in *InvocationConditionConditionOnValueParameters) DeepCopy() *InvocationConditionConditionOnValueParameters { + if in == nil { + return nil + } + out := new(InvocationConditionConditionOnValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InvocationConditionInitParameters) DeepCopyInto(out *InvocationConditionInitParameters) { + *out = *in + if in.ConditionDocumentAttributeKey != nil { + in, out := &in.ConditionDocumentAttributeKey, &out.ConditionDocumentAttributeKey + *out = new(string) + **out = **in + } + if in.ConditionOnValue != nil { + in, out := &in.ConditionOnValue, &out.ConditionOnValue + *out = new(InvocationConditionConditionOnValueInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InvocationConditionInitParameters. +func (in *InvocationConditionInitParameters) DeepCopy() *InvocationConditionInitParameters { + if in == nil { + return nil + } + out := new(InvocationConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InvocationConditionObservation) DeepCopyInto(out *InvocationConditionObservation) { + *out = *in + if in.ConditionDocumentAttributeKey != nil { + in, out := &in.ConditionDocumentAttributeKey, &out.ConditionDocumentAttributeKey + *out = new(string) + **out = **in + } + if in.ConditionOnValue != nil { + in, out := &in.ConditionOnValue, &out.ConditionOnValue + *out = new(InvocationConditionConditionOnValueObservation) + (*in).DeepCopyInto(*out) + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InvocationConditionObservation. +func (in *InvocationConditionObservation) DeepCopy() *InvocationConditionObservation { + if in == nil { + return nil + } + out := new(InvocationConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InvocationConditionParameters) DeepCopyInto(out *InvocationConditionParameters) { + *out = *in + if in.ConditionDocumentAttributeKey != nil { + in, out := &in.ConditionDocumentAttributeKey, &out.ConditionDocumentAttributeKey + *out = new(string) + **out = **in + } + if in.ConditionOnValue != nil { + in, out := &in.ConditionOnValue, &out.ConditionOnValue + *out = new(InvocationConditionConditionOnValueParameters) + (*in).DeepCopyInto(*out) + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InvocationConditionParameters. +func (in *InvocationConditionParameters) DeepCopy() *InvocationConditionParameters { + if in == nil { + return nil + } + out := new(InvocationConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONTokenTypeConfigurationInitParameters) DeepCopyInto(out *JSONTokenTypeConfigurationInitParameters) { + *out = *in + if in.GroupAttributeField != nil { + in, out := &in.GroupAttributeField, &out.GroupAttributeField + *out = new(string) + **out = **in + } + if in.UserNameAttributeField != nil { + in, out := &in.UserNameAttributeField, &out.UserNameAttributeField + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONTokenTypeConfigurationInitParameters. +func (in *JSONTokenTypeConfigurationInitParameters) DeepCopy() *JSONTokenTypeConfigurationInitParameters { + if in == nil { + return nil + } + out := new(JSONTokenTypeConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONTokenTypeConfigurationObservation) DeepCopyInto(out *JSONTokenTypeConfigurationObservation) { + *out = *in + if in.GroupAttributeField != nil { + in, out := &in.GroupAttributeField, &out.GroupAttributeField + *out = new(string) + **out = **in + } + if in.UserNameAttributeField != nil { + in, out := &in.UserNameAttributeField, &out.UserNameAttributeField + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONTokenTypeConfigurationObservation. +func (in *JSONTokenTypeConfigurationObservation) DeepCopy() *JSONTokenTypeConfigurationObservation { + if in == nil { + return nil + } + out := new(JSONTokenTypeConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONTokenTypeConfigurationParameters) DeepCopyInto(out *JSONTokenTypeConfigurationParameters) { + *out = *in + if in.GroupAttributeField != nil { + in, out := &in.GroupAttributeField, &out.GroupAttributeField + *out = new(string) + **out = **in + } + if in.UserNameAttributeField != nil { + in, out := &in.UserNameAttributeField, &out.UserNameAttributeField + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONTokenTypeConfigurationParameters. +func (in *JSONTokenTypeConfigurationParameters) DeepCopy() *JSONTokenTypeConfigurationParameters { + if in == nil { + return nil + } + out := new(JSONTokenTypeConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JwtTokenTypeConfigurationInitParameters) DeepCopyInto(out *JwtTokenTypeConfigurationInitParameters) { + *out = *in + if in.ClaimRegex != nil { + in, out := &in.ClaimRegex, &out.ClaimRegex + *out = new(string) + **out = **in + } + if in.GroupAttributeField != nil { + in, out := &in.GroupAttributeField, &out.GroupAttributeField + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.KeyLocation != nil { + in, out := &in.KeyLocation, &out.KeyLocation + *out = new(string) + **out = **in + } + if in.SecretsManagerArn != nil { + in, out := &in.SecretsManagerArn, &out.SecretsManagerArn + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.UserNameAttributeField != nil { + in, out := &in.UserNameAttributeField, &out.UserNameAttributeField + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JwtTokenTypeConfigurationInitParameters. +func (in *JwtTokenTypeConfigurationInitParameters) DeepCopy() *JwtTokenTypeConfigurationInitParameters { + if in == nil { + return nil + } + out := new(JwtTokenTypeConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JwtTokenTypeConfigurationObservation) DeepCopyInto(out *JwtTokenTypeConfigurationObservation) { + *out = *in + if in.ClaimRegex != nil { + in, out := &in.ClaimRegex, &out.ClaimRegex + *out = new(string) + **out = **in + } + if in.GroupAttributeField != nil { + in, out := &in.GroupAttributeField, &out.GroupAttributeField + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.KeyLocation != nil { + in, out := &in.KeyLocation, &out.KeyLocation + *out = new(string) + **out = **in + } + if in.SecretsManagerArn != nil { + in, out := &in.SecretsManagerArn, &out.SecretsManagerArn + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.UserNameAttributeField != nil { + in, out := &in.UserNameAttributeField, &out.UserNameAttributeField + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JwtTokenTypeConfigurationObservation. +func (in *JwtTokenTypeConfigurationObservation) DeepCopy() *JwtTokenTypeConfigurationObservation { + if in == nil { + return nil + } + out := new(JwtTokenTypeConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JwtTokenTypeConfigurationParameters) DeepCopyInto(out *JwtTokenTypeConfigurationParameters) { + *out = *in + if in.ClaimRegex != nil { + in, out := &in.ClaimRegex, &out.ClaimRegex + *out = new(string) + **out = **in + } + if in.GroupAttributeField != nil { + in, out := &in.GroupAttributeField, &out.GroupAttributeField + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.KeyLocation != nil { + in, out := &in.KeyLocation, &out.KeyLocation + *out = new(string) + **out = **in + } + if in.SecretsManagerArn != nil { + in, out := &in.SecretsManagerArn, &out.SecretsManagerArn + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.UserNameAttributeField != nil { + in, out := &in.UserNameAttributeField, &out.UserNameAttributeField + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JwtTokenTypeConfigurationParameters. +func (in *JwtTokenTypeConfigurationParameters) DeepCopy() *JwtTokenTypeConfigurationParameters { + if in == nil { + return nil + } + out := new(JwtTokenTypeConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostExtractionHookConfigurationInitParameters) DeepCopyInto(out *PostExtractionHookConfigurationInitParameters) { + *out = *in + if in.InvocationCondition != nil { + in, out := &in.InvocationCondition, &out.InvocationCondition + *out = new(InvocationConditionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } + if in.S3Bucket != nil { + in, out := &in.S3Bucket, &out.S3Bucket + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostExtractionHookConfigurationInitParameters. +func (in *PostExtractionHookConfigurationInitParameters) DeepCopy() *PostExtractionHookConfigurationInitParameters { + if in == nil { + return nil + } + out := new(PostExtractionHookConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostExtractionHookConfigurationObservation) DeepCopyInto(out *PostExtractionHookConfigurationObservation) { + *out = *in + if in.InvocationCondition != nil { + in, out := &in.InvocationCondition, &out.InvocationCondition + *out = new(InvocationConditionObservation) + (*in).DeepCopyInto(*out) + } + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } + if in.S3Bucket != nil { + in, out := &in.S3Bucket, &out.S3Bucket + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostExtractionHookConfigurationObservation. +func (in *PostExtractionHookConfigurationObservation) DeepCopy() *PostExtractionHookConfigurationObservation { + if in == nil { + return nil + } + out := new(PostExtractionHookConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostExtractionHookConfigurationParameters) DeepCopyInto(out *PostExtractionHookConfigurationParameters) { + *out = *in + if in.InvocationCondition != nil { + in, out := &in.InvocationCondition, &out.InvocationCondition + *out = new(InvocationConditionParameters) + (*in).DeepCopyInto(*out) + } + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } + if in.S3Bucket != nil { + in, out := &in.S3Bucket, &out.S3Bucket + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostExtractionHookConfigurationParameters. +func (in *PostExtractionHookConfigurationParameters) DeepCopy() *PostExtractionHookConfigurationParameters { + if in == nil { + return nil + } + out := new(PostExtractionHookConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreExtractionHookConfigurationInitParameters) DeepCopyInto(out *PreExtractionHookConfigurationInitParameters) { + *out = *in + if in.InvocationCondition != nil { + in, out := &in.InvocationCondition, &out.InvocationCondition + *out = new(PreExtractionHookConfigurationInvocationConditionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } + if in.S3Bucket != nil { + in, out := &in.S3Bucket, &out.S3Bucket + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreExtractionHookConfigurationInitParameters. +func (in *PreExtractionHookConfigurationInitParameters) DeepCopy() *PreExtractionHookConfigurationInitParameters { + if in == nil { + return nil + } + out := new(PreExtractionHookConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreExtractionHookConfigurationInvocationConditionConditionOnValueInitParameters) DeepCopyInto(out *PreExtractionHookConfigurationInvocationConditionConditionOnValueInitParameters) { + *out = *in + if in.DateValue != nil { + in, out := &in.DateValue, &out.DateValue + *out = new(string) + **out = **in + } + if in.LongValue != nil { + in, out := &in.LongValue, &out.LongValue + *out = new(float64) + **out = **in + } + if in.StringListValue != nil { + in, out := &in.StringListValue, &out.StringListValue + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreExtractionHookConfigurationInvocationConditionConditionOnValueInitParameters. +func (in *PreExtractionHookConfigurationInvocationConditionConditionOnValueInitParameters) DeepCopy() *PreExtractionHookConfigurationInvocationConditionConditionOnValueInitParameters { + if in == nil { + return nil + } + out := new(PreExtractionHookConfigurationInvocationConditionConditionOnValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreExtractionHookConfigurationInvocationConditionConditionOnValueObservation) DeepCopyInto(out *PreExtractionHookConfigurationInvocationConditionConditionOnValueObservation) { + *out = *in + if in.DateValue != nil { + in, out := &in.DateValue, &out.DateValue + *out = new(string) + **out = **in + } + if in.LongValue != nil { + in, out := &in.LongValue, &out.LongValue + *out = new(float64) + **out = **in + } + if in.StringListValue != nil { + in, out := &in.StringListValue, &out.StringListValue + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreExtractionHookConfigurationInvocationConditionConditionOnValueObservation. +func (in *PreExtractionHookConfigurationInvocationConditionConditionOnValueObservation) DeepCopy() *PreExtractionHookConfigurationInvocationConditionConditionOnValueObservation { + if in == nil { + return nil + } + out := new(PreExtractionHookConfigurationInvocationConditionConditionOnValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreExtractionHookConfigurationInvocationConditionConditionOnValueParameters) DeepCopyInto(out *PreExtractionHookConfigurationInvocationConditionConditionOnValueParameters) { + *out = *in + if in.DateValue != nil { + in, out := &in.DateValue, &out.DateValue + *out = new(string) + **out = **in + } + if in.LongValue != nil { + in, out := &in.LongValue, &out.LongValue + *out = new(float64) + **out = **in + } + if in.StringListValue != nil { + in, out := &in.StringListValue, &out.StringListValue + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreExtractionHookConfigurationInvocationConditionConditionOnValueParameters. +func (in *PreExtractionHookConfigurationInvocationConditionConditionOnValueParameters) DeepCopy() *PreExtractionHookConfigurationInvocationConditionConditionOnValueParameters { + if in == nil { + return nil + } + out := new(PreExtractionHookConfigurationInvocationConditionConditionOnValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreExtractionHookConfigurationInvocationConditionInitParameters) DeepCopyInto(out *PreExtractionHookConfigurationInvocationConditionInitParameters) { + *out = *in + if in.ConditionDocumentAttributeKey != nil { + in, out := &in.ConditionDocumentAttributeKey, &out.ConditionDocumentAttributeKey + *out = new(string) + **out = **in + } + if in.ConditionOnValue != nil { + in, out := &in.ConditionOnValue, &out.ConditionOnValue + *out = new(PreExtractionHookConfigurationInvocationConditionConditionOnValueInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreExtractionHookConfigurationInvocationConditionInitParameters. +func (in *PreExtractionHookConfigurationInvocationConditionInitParameters) DeepCopy() *PreExtractionHookConfigurationInvocationConditionInitParameters { + if in == nil { + return nil + } + out := new(PreExtractionHookConfigurationInvocationConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreExtractionHookConfigurationInvocationConditionObservation) DeepCopyInto(out *PreExtractionHookConfigurationInvocationConditionObservation) { + *out = *in + if in.ConditionDocumentAttributeKey != nil { + in, out := &in.ConditionDocumentAttributeKey, &out.ConditionDocumentAttributeKey + *out = new(string) + **out = **in + } + if in.ConditionOnValue != nil { + in, out := &in.ConditionOnValue, &out.ConditionOnValue + *out = new(PreExtractionHookConfigurationInvocationConditionConditionOnValueObservation) + (*in).DeepCopyInto(*out) + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreExtractionHookConfigurationInvocationConditionObservation. +func (in *PreExtractionHookConfigurationInvocationConditionObservation) DeepCopy() *PreExtractionHookConfigurationInvocationConditionObservation { + if in == nil { + return nil + } + out := new(PreExtractionHookConfigurationInvocationConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreExtractionHookConfigurationInvocationConditionParameters) DeepCopyInto(out *PreExtractionHookConfigurationInvocationConditionParameters) { + *out = *in + if in.ConditionDocumentAttributeKey != nil { + in, out := &in.ConditionDocumentAttributeKey, &out.ConditionDocumentAttributeKey + *out = new(string) + **out = **in + } + if in.ConditionOnValue != nil { + in, out := &in.ConditionOnValue, &out.ConditionOnValue + *out = new(PreExtractionHookConfigurationInvocationConditionConditionOnValueParameters) + (*in).DeepCopyInto(*out) + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreExtractionHookConfigurationInvocationConditionParameters. +func (in *PreExtractionHookConfigurationInvocationConditionParameters) DeepCopy() *PreExtractionHookConfigurationInvocationConditionParameters { + if in == nil { + return nil + } + out := new(PreExtractionHookConfigurationInvocationConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreExtractionHookConfigurationObservation) DeepCopyInto(out *PreExtractionHookConfigurationObservation) { + *out = *in + if in.InvocationCondition != nil { + in, out := &in.InvocationCondition, &out.InvocationCondition + *out = new(PreExtractionHookConfigurationInvocationConditionObservation) + (*in).DeepCopyInto(*out) + } + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } + if in.S3Bucket != nil { + in, out := &in.S3Bucket, &out.S3Bucket + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreExtractionHookConfigurationObservation. +func (in *PreExtractionHookConfigurationObservation) DeepCopy() *PreExtractionHookConfigurationObservation { + if in == nil { + return nil + } + out := new(PreExtractionHookConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreExtractionHookConfigurationParameters) DeepCopyInto(out *PreExtractionHookConfigurationParameters) { + *out = *in + if in.InvocationCondition != nil { + in, out := &in.InvocationCondition, &out.InvocationCondition + *out = new(PreExtractionHookConfigurationInvocationConditionParameters) + (*in).DeepCopyInto(*out) + } + if in.LambdaArn != nil { + in, out := &in.LambdaArn, &out.LambdaArn + *out = new(string) + **out = **in + } + if in.S3Bucket != nil { + in, out := &in.S3Bucket, &out.S3Bucket + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreExtractionHookConfigurationParameters. +func (in *PreExtractionHookConfigurationParameters) DeepCopy() *PreExtractionHookConfigurationParameters { + if in == nil { + return nil + } + out := new(PreExtractionHookConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfigurationInitParameters) DeepCopyInto(out *ProxyConfigurationInitParameters) { + *out = *in + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(string) + **out = **in + } + if in.CredentialsRef != nil { + in, out := &in.CredentialsRef, &out.CredentialsRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CredentialsSelector != nil { + in, out := &in.CredentialsSelector, &out.CredentialsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfigurationInitParameters. +func (in *ProxyConfigurationInitParameters) DeepCopy() *ProxyConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ProxyConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfigurationObservation) DeepCopyInto(out *ProxyConfigurationObservation) { + *out = *in + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(string) + **out = **in + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfigurationObservation. +func (in *ProxyConfigurationObservation) DeepCopy() *ProxyConfigurationObservation { + if in == nil { + return nil + } + out := new(ProxyConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfigurationParameters) DeepCopyInto(out *ProxyConfigurationParameters) { + *out = *in + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(string) + **out = **in + } + if in.CredentialsRef != nil { + in, out := &in.CredentialsRef, &out.CredentialsRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CredentialsSelector != nil { + in, out := &in.CredentialsSelector, &out.CredentialsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfigurationParameters. +func (in *ProxyConfigurationParameters) DeepCopy() *ProxyConfigurationParameters { + if in == nil { + return nil + } + out := new(ProxyConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuerySuggestionsBlockList) DeepCopyInto(out *QuerySuggestionsBlockList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuerySuggestionsBlockList. +func (in *QuerySuggestionsBlockList) DeepCopy() *QuerySuggestionsBlockList { + if in == nil { + return nil + } + out := new(QuerySuggestionsBlockList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *QuerySuggestionsBlockList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuerySuggestionsBlockListInitParameters) DeepCopyInto(out *QuerySuggestionsBlockListInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IndexID != nil { + in, out := &in.IndexID, &out.IndexID + *out = new(string) + **out = **in + } + if in.IndexIDRef != nil { + in, out := &in.IndexIDRef, &out.IndexIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IndexIDSelector != nil { + in, out := &in.IndexIDSelector, &out.IndexIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SourceS3Path != nil { + in, out := &in.SourceS3Path, &out.SourceS3Path + *out = new(SourceS3PathInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuerySuggestionsBlockListInitParameters. +func (in *QuerySuggestionsBlockListInitParameters) DeepCopy() *QuerySuggestionsBlockListInitParameters { + if in == nil { + return nil + } + out := new(QuerySuggestionsBlockListInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuerySuggestionsBlockListList) DeepCopyInto(out *QuerySuggestionsBlockListList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]QuerySuggestionsBlockList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuerySuggestionsBlockListList. +func (in *QuerySuggestionsBlockListList) DeepCopy() *QuerySuggestionsBlockListList { + if in == nil { + return nil + } + out := new(QuerySuggestionsBlockListList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *QuerySuggestionsBlockListList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuerySuggestionsBlockListObservation) DeepCopyInto(out *QuerySuggestionsBlockListObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IndexID != nil { + in, out := &in.IndexID, &out.IndexID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.QuerySuggestionsBlockListID != nil { + in, out := &in.QuerySuggestionsBlockListID, &out.QuerySuggestionsBlockListID + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.SourceS3Path != nil { + in, out := &in.SourceS3Path, &out.SourceS3Path + *out = new(SourceS3PathObservation) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuerySuggestionsBlockListObservation. +func (in *QuerySuggestionsBlockListObservation) DeepCopy() *QuerySuggestionsBlockListObservation { + if in == nil { + return nil + } + out := new(QuerySuggestionsBlockListObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuerySuggestionsBlockListParameters) DeepCopyInto(out *QuerySuggestionsBlockListParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IndexID != nil { + in, out := &in.IndexID, &out.IndexID + *out = new(string) + **out = **in + } + if in.IndexIDRef != nil { + in, out := &in.IndexIDRef, &out.IndexIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IndexIDSelector != nil { + in, out := &in.IndexIDSelector, &out.IndexIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SourceS3Path != nil { + in, out := &in.SourceS3Path, &out.SourceS3Path + *out = new(SourceS3PathParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuerySuggestionsBlockListParameters. +func (in *QuerySuggestionsBlockListParameters) DeepCopy() *QuerySuggestionsBlockListParameters { + if in == nil { + return nil + } + out := new(QuerySuggestionsBlockListParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuerySuggestionsBlockListSpec) DeepCopyInto(out *QuerySuggestionsBlockListSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuerySuggestionsBlockListSpec. +func (in *QuerySuggestionsBlockListSpec) DeepCopy() *QuerySuggestionsBlockListSpec { + if in == nil { + return nil + } + out := new(QuerySuggestionsBlockListSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuerySuggestionsBlockListStatus) DeepCopyInto(out *QuerySuggestionsBlockListStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuerySuggestionsBlockListStatus. +func (in *QuerySuggestionsBlockListStatus) DeepCopy() *QuerySuggestionsBlockListStatus { + if in == nil { + return nil + } + out := new(QuerySuggestionsBlockListStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelevanceInitParameters) DeepCopyInto(out *RelevanceInitParameters) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.Freshness != nil { + in, out := &in.Freshness, &out.Freshness + *out = new(bool) + **out = **in + } + if in.Importance != nil { + in, out := &in.Importance, &out.Importance + *out = new(float64) + **out = **in + } + if in.RankOrder != nil { + in, out := &in.RankOrder, &out.RankOrder + *out = new(string) + **out = **in + } + if in.ValuesImportanceMap != nil { + in, out := &in.ValuesImportanceMap, &out.ValuesImportanceMap + *out = make(map[string]*float64, len(*in)) + for key, val := range *in { + var outVal *float64 + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(float64) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelevanceInitParameters. +func (in *RelevanceInitParameters) DeepCopy() *RelevanceInitParameters { + if in == nil { + return nil + } + out := new(RelevanceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelevanceObservation) DeepCopyInto(out *RelevanceObservation) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.Freshness != nil { + in, out := &in.Freshness, &out.Freshness + *out = new(bool) + **out = **in + } + if in.Importance != nil { + in, out := &in.Importance, &out.Importance + *out = new(float64) + **out = **in + } + if in.RankOrder != nil { + in, out := &in.RankOrder, &out.RankOrder + *out = new(string) + **out = **in + } + if in.ValuesImportanceMap != nil { + in, out := &in.ValuesImportanceMap, &out.ValuesImportanceMap + *out = make(map[string]*float64, len(*in)) + for key, val := range *in { + var outVal *float64 + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(float64) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelevanceObservation. +func (in *RelevanceObservation) DeepCopy() *RelevanceObservation { + if in == nil { + return nil + } + out := new(RelevanceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelevanceParameters) DeepCopyInto(out *RelevanceParameters) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.Freshness != nil { + in, out := &in.Freshness, &out.Freshness + *out = new(bool) + **out = **in + } + if in.Importance != nil { + in, out := &in.Importance, &out.Importance + *out = new(float64) + **out = **in + } + if in.RankOrder != nil { + in, out := &in.RankOrder, &out.RankOrder + *out = new(string) + **out = **in + } + if in.ValuesImportanceMap != nil { + in, out := &in.ValuesImportanceMap, &out.ValuesImportanceMap + *out = make(map[string]*float64, len(*in)) + for key, val := range *in { + var outVal *float64 + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(float64) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelevanceParameters. +func (in *RelevanceParameters) DeepCopy() *RelevanceParameters { + if in == nil { + return nil + } + out := new(RelevanceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigurationInitParameters) DeepCopyInto(out *S3ConfigurationInitParameters) { + *out = *in + if in.AccessControlListConfiguration != nil { + in, out := &in.AccessControlListConfiguration, &out.AccessControlListConfiguration + *out = new(AccessControlListConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketNameRef != nil { + in, out := &in.BucketNameRef, &out.BucketNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketNameSelector != nil { + in, out := &in.BucketNameSelector, &out.BucketNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DocumentsMetadataConfiguration != nil { + in, out := &in.DocumentsMetadataConfiguration, &out.DocumentsMetadataConfiguration + *out = new(DocumentsMetadataConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ExclusionPatterns != nil { + in, out := &in.ExclusionPatterns, &out.ExclusionPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InclusionPatterns != nil { + in, out := &in.InclusionPatterns, &out.InclusionPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InclusionPrefixes != nil { + in, out := &in.InclusionPrefixes, &out.InclusionPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigurationInitParameters. +func (in *S3ConfigurationInitParameters) DeepCopy() *S3ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(S3ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigurationObservation) DeepCopyInto(out *S3ConfigurationObservation) { + *out = *in + if in.AccessControlListConfiguration != nil { + in, out := &in.AccessControlListConfiguration, &out.AccessControlListConfiguration + *out = new(AccessControlListConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.DocumentsMetadataConfiguration != nil { + in, out := &in.DocumentsMetadataConfiguration, &out.DocumentsMetadataConfiguration + *out = new(DocumentsMetadataConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ExclusionPatterns != nil { + in, out := &in.ExclusionPatterns, &out.ExclusionPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InclusionPatterns != nil { + in, out := &in.InclusionPatterns, &out.InclusionPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InclusionPrefixes != nil { + in, out := &in.InclusionPrefixes, &out.InclusionPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigurationObservation. +func (in *S3ConfigurationObservation) DeepCopy() *S3ConfigurationObservation { + if in == nil { + return nil + } + out := new(S3ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigurationParameters) DeepCopyInto(out *S3ConfigurationParameters) { + *out = *in + if in.AccessControlListConfiguration != nil { + in, out := &in.AccessControlListConfiguration, &out.AccessControlListConfiguration + *out = new(AccessControlListConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketNameRef != nil { + in, out := &in.BucketNameRef, &out.BucketNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketNameSelector != nil { + in, out := &in.BucketNameSelector, &out.BucketNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DocumentsMetadataConfiguration != nil { + in, out := &in.DocumentsMetadataConfiguration, &out.DocumentsMetadataConfiguration + *out = new(DocumentsMetadataConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ExclusionPatterns != nil { + in, out := &in.ExclusionPatterns, &out.ExclusionPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InclusionPatterns != nil { + in, out := &in.InclusionPatterns, &out.InclusionPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InclusionPrefixes != nil { + in, out := &in.InclusionPrefixes, &out.InclusionPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigurationParameters. +func (in *S3ConfigurationParameters) DeepCopy() *S3ConfigurationParameters { + if in == nil { + return nil + } + out := new(S3ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SearchInitParameters) DeepCopyInto(out *SearchInitParameters) { + *out = *in + if in.Displayable != nil { + in, out := &in.Displayable, &out.Displayable + *out = new(bool) + **out = **in + } + if in.Facetable != nil { + in, out := &in.Facetable, &out.Facetable + *out = new(bool) + **out = **in + } + if in.Searchable != nil { + in, out := &in.Searchable, &out.Searchable + *out = new(bool) + **out = **in + } + if in.Sortable != nil { + in, out := &in.Sortable, &out.Sortable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchInitParameters. +func (in *SearchInitParameters) DeepCopy() *SearchInitParameters { + if in == nil { + return nil + } + out := new(SearchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SearchObservation) DeepCopyInto(out *SearchObservation) { + *out = *in + if in.Displayable != nil { + in, out := &in.Displayable, &out.Displayable + *out = new(bool) + **out = **in + } + if in.Facetable != nil { + in, out := &in.Facetable, &out.Facetable + *out = new(bool) + **out = **in + } + if in.Searchable != nil { + in, out := &in.Searchable, &out.Searchable + *out = new(bool) + **out = **in + } + if in.Sortable != nil { + in, out := &in.Sortable, &out.Sortable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchObservation. +func (in *SearchObservation) DeepCopy() *SearchObservation { + if in == nil { + return nil + } + out := new(SearchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SearchParameters) DeepCopyInto(out *SearchParameters) { + *out = *in + if in.Displayable != nil { + in, out := &in.Displayable, &out.Displayable + *out = new(bool) + **out = **in + } + if in.Facetable != nil { + in, out := &in.Facetable, &out.Facetable + *out = new(bool) + **out = **in + } + if in.Searchable != nil { + in, out := &in.Searchable, &out.Searchable + *out = new(bool) + **out = **in + } + if in.Sortable != nil { + in, out := &in.Sortable, &out.Sortable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchParameters. +func (in *SearchParameters) DeepCopy() *SearchParameters { + if in == nil { + return nil + } + out := new(SearchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedURLConfigurationInitParameters) DeepCopyInto(out *SeedURLConfigurationInitParameters) { + *out = *in + if in.SeedUrls != nil { + in, out := &in.SeedUrls, &out.SeedUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WebCrawlerMode != nil { + in, out := &in.WebCrawlerMode, &out.WebCrawlerMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedURLConfigurationInitParameters. +func (in *SeedURLConfigurationInitParameters) DeepCopy() *SeedURLConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SeedURLConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedURLConfigurationObservation) DeepCopyInto(out *SeedURLConfigurationObservation) { + *out = *in + if in.SeedUrls != nil { + in, out := &in.SeedUrls, &out.SeedUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WebCrawlerMode != nil { + in, out := &in.WebCrawlerMode, &out.WebCrawlerMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedURLConfigurationObservation. +func (in *SeedURLConfigurationObservation) DeepCopy() *SeedURLConfigurationObservation { + if in == nil { + return nil + } + out := new(SeedURLConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedURLConfigurationParameters) DeepCopyInto(out *SeedURLConfigurationParameters) { + *out = *in + if in.SeedUrls != nil { + in, out := &in.SeedUrls, &out.SeedUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WebCrawlerMode != nil { + in, out := &in.WebCrawlerMode, &out.WebCrawlerMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedURLConfigurationParameters. +func (in *SeedURLConfigurationParameters) DeepCopy() *SeedURLConfigurationParameters { + if in == nil { + return nil + } + out := new(SeedURLConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionConfigurationInitParameters) DeepCopyInto(out *ServerSideEncryptionConfigurationInitParameters) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionConfigurationInitParameters. +func (in *ServerSideEncryptionConfigurationInitParameters) DeepCopy() *ServerSideEncryptionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ServerSideEncryptionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionConfigurationObservation) DeepCopyInto(out *ServerSideEncryptionConfigurationObservation) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionConfigurationObservation. +func (in *ServerSideEncryptionConfigurationObservation) DeepCopy() *ServerSideEncryptionConfigurationObservation { + if in == nil { + return nil + } + out := new(ServerSideEncryptionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionConfigurationParameters) DeepCopyInto(out *ServerSideEncryptionConfigurationParameters) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionConfigurationParameters. +func (in *ServerSideEncryptionConfigurationParameters) DeepCopy() *ServerSideEncryptionConfigurationParameters { + if in == nil { + return nil + } + out := new(ServerSideEncryptionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteMapsConfigurationInitParameters) DeepCopyInto(out *SiteMapsConfigurationInitParameters) { + *out = *in + if in.SiteMaps != nil { + in, out := &in.SiteMaps, &out.SiteMaps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteMapsConfigurationInitParameters. +func (in *SiteMapsConfigurationInitParameters) DeepCopy() *SiteMapsConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SiteMapsConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteMapsConfigurationObservation) DeepCopyInto(out *SiteMapsConfigurationObservation) { + *out = *in + if in.SiteMaps != nil { + in, out := &in.SiteMaps, &out.SiteMaps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteMapsConfigurationObservation. +func (in *SiteMapsConfigurationObservation) DeepCopy() *SiteMapsConfigurationObservation { + if in == nil { + return nil + } + out := new(SiteMapsConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteMapsConfigurationParameters) DeepCopyInto(out *SiteMapsConfigurationParameters) { + *out = *in + if in.SiteMaps != nil { + in, out := &in.SiteMaps, &out.SiteMaps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteMapsConfigurationParameters. +func (in *SiteMapsConfigurationParameters) DeepCopy() *SiteMapsConfigurationParameters { + if in == nil { + return nil + } + out := new(SiteMapsConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceS3PathInitParameters) DeepCopyInto(out *SourceS3PathInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceS3PathInitParameters. +func (in *SourceS3PathInitParameters) DeepCopy() *SourceS3PathInitParameters { + if in == nil { + return nil + } + out := new(SourceS3PathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceS3PathObservation) DeepCopyInto(out *SourceS3PathObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceS3PathObservation. +func (in *SourceS3PathObservation) DeepCopy() *SourceS3PathObservation { + if in == nil { + return nil + } + out := new(SourceS3PathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceS3PathParameters) DeepCopyInto(out *SourceS3PathParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceS3PathParameters. +func (in *SourceS3PathParameters) DeepCopy() *SourceS3PathParameters { + if in == nil { + return nil + } + out := new(SourceS3PathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetDocumentAttributeValueInitParameters) DeepCopyInto(out *TargetDocumentAttributeValueInitParameters) { + *out = *in + if in.DateValue != nil { + in, out := &in.DateValue, &out.DateValue + *out = new(string) + **out = **in + } + if in.LongValue != nil { + in, out := &in.LongValue, &out.LongValue + *out = new(float64) + **out = **in + } + if in.StringListValue != nil { + in, out := &in.StringListValue, &out.StringListValue + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetDocumentAttributeValueInitParameters. +func (in *TargetDocumentAttributeValueInitParameters) DeepCopy() *TargetDocumentAttributeValueInitParameters { + if in == nil { + return nil + } + out := new(TargetDocumentAttributeValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetDocumentAttributeValueObservation) DeepCopyInto(out *TargetDocumentAttributeValueObservation) { + *out = *in + if in.DateValue != nil { + in, out := &in.DateValue, &out.DateValue + *out = new(string) + **out = **in + } + if in.LongValue != nil { + in, out := &in.LongValue, &out.LongValue + *out = new(float64) + **out = **in + } + if in.StringListValue != nil { + in, out := &in.StringListValue, &out.StringListValue + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetDocumentAttributeValueObservation. +func (in *TargetDocumentAttributeValueObservation) DeepCopy() *TargetDocumentAttributeValueObservation { + if in == nil { + return nil + } + out := new(TargetDocumentAttributeValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetDocumentAttributeValueParameters) DeepCopyInto(out *TargetDocumentAttributeValueParameters) { + *out = *in + if in.DateValue != nil { + in, out := &in.DateValue, &out.DateValue + *out = new(string) + **out = **in + } + if in.LongValue != nil { + in, out := &in.LongValue, &out.LongValue + *out = new(float64) + **out = **in + } + if in.StringListValue != nil { + in, out := &in.StringListValue, &out.StringListValue + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetDocumentAttributeValueParameters. +func (in *TargetDocumentAttributeValueParameters) DeepCopy() *TargetDocumentAttributeValueParameters { + if in == nil { + return nil + } + out := new(TargetDocumentAttributeValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetInitParameters) DeepCopyInto(out *TargetInitParameters) { + *out = *in + if in.TargetDocumentAttributeKey != nil { + in, out := &in.TargetDocumentAttributeKey, &out.TargetDocumentAttributeKey + *out = new(string) + **out = **in + } + if in.TargetDocumentAttributeValue != nil { + in, out := &in.TargetDocumentAttributeValue, &out.TargetDocumentAttributeValue + *out = new(TargetDocumentAttributeValueInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetDocumentAttributeValueDeletion != nil { + in, out := &in.TargetDocumentAttributeValueDeletion, &out.TargetDocumentAttributeValueDeletion + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetInitParameters. +func (in *TargetInitParameters) DeepCopy() *TargetInitParameters { + if in == nil { + return nil + } + out := new(TargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetObservation) DeepCopyInto(out *TargetObservation) { + *out = *in + if in.TargetDocumentAttributeKey != nil { + in, out := &in.TargetDocumentAttributeKey, &out.TargetDocumentAttributeKey + *out = new(string) + **out = **in + } + if in.TargetDocumentAttributeValue != nil { + in, out := &in.TargetDocumentAttributeValue, &out.TargetDocumentAttributeValue + *out = new(TargetDocumentAttributeValueObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetDocumentAttributeValueDeletion != nil { + in, out := &in.TargetDocumentAttributeValueDeletion, &out.TargetDocumentAttributeValueDeletion + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetObservation. +func (in *TargetObservation) DeepCopy() *TargetObservation { + if in == nil { + return nil + } + out := new(TargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetParameters) DeepCopyInto(out *TargetParameters) { + *out = *in + if in.TargetDocumentAttributeKey != nil { + in, out := &in.TargetDocumentAttributeKey, &out.TargetDocumentAttributeKey + *out = new(string) + **out = **in + } + if in.TargetDocumentAttributeValue != nil { + in, out := &in.TargetDocumentAttributeValue, &out.TargetDocumentAttributeValue + *out = new(TargetDocumentAttributeValueParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetDocumentAttributeValueDeletion != nil { + in, out := &in.TargetDocumentAttributeValueDeletion, &out.TargetDocumentAttributeValueDeletion + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetParameters. +func (in *TargetParameters) DeepCopy() *TargetParameters { + if in == nil { + return nil + } + out := new(TargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TextDocumentStatisticsInitParameters) DeepCopyInto(out *TextDocumentStatisticsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TextDocumentStatisticsInitParameters. +func (in *TextDocumentStatisticsInitParameters) DeepCopy() *TextDocumentStatisticsInitParameters { + if in == nil { + return nil + } + out := new(TextDocumentStatisticsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TextDocumentStatisticsObservation) DeepCopyInto(out *TextDocumentStatisticsObservation) { + *out = *in + if in.IndexedTextBytes != nil { + in, out := &in.IndexedTextBytes, &out.IndexedTextBytes + *out = new(float64) + **out = **in + } + if in.IndexedTextDocumentsCount != nil { + in, out := &in.IndexedTextDocumentsCount, &out.IndexedTextDocumentsCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TextDocumentStatisticsObservation. +func (in *TextDocumentStatisticsObservation) DeepCopy() *TextDocumentStatisticsObservation { + if in == nil { + return nil + } + out := new(TextDocumentStatisticsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TextDocumentStatisticsParameters) DeepCopyInto(out *TextDocumentStatisticsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TextDocumentStatisticsParameters. +func (in *TextDocumentStatisticsParameters) DeepCopy() *TextDocumentStatisticsParameters { + if in == nil { + return nil + } + out := new(TextDocumentStatisticsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Thesaurus) DeepCopyInto(out *Thesaurus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Thesaurus. +func (in *Thesaurus) DeepCopy() *Thesaurus { + if in == nil { + return nil + } + out := new(Thesaurus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Thesaurus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThesaurusInitParameters) DeepCopyInto(out *ThesaurusInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IndexID != nil { + in, out := &in.IndexID, &out.IndexID + *out = new(string) + **out = **in + } + if in.IndexIDRef != nil { + in, out := &in.IndexIDRef, &out.IndexIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IndexIDSelector != nil { + in, out := &in.IndexIDSelector, &out.IndexIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SourceS3Path != nil { + in, out := &in.SourceS3Path, &out.SourceS3Path + *out = new(ThesaurusSourceS3PathInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThesaurusInitParameters. +func (in *ThesaurusInitParameters) DeepCopy() *ThesaurusInitParameters { + if in == nil { + return nil + } + out := new(ThesaurusInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThesaurusList) DeepCopyInto(out *ThesaurusList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Thesaurus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThesaurusList. +func (in *ThesaurusList) DeepCopy() *ThesaurusList { + if in == nil { + return nil + } + out := new(ThesaurusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ThesaurusList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThesaurusObservation) DeepCopyInto(out *ThesaurusObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IndexID != nil { + in, out := &in.IndexID, &out.IndexID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.SourceS3Path != nil { + in, out := &in.SourceS3Path, &out.SourceS3Path + *out = new(ThesaurusSourceS3PathObservation) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThesaurusID != nil { + in, out := &in.ThesaurusID, &out.ThesaurusID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThesaurusObservation. +func (in *ThesaurusObservation) DeepCopy() *ThesaurusObservation { + if in == nil { + return nil + } + out := new(ThesaurusObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThesaurusParameters) DeepCopyInto(out *ThesaurusParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IndexID != nil { + in, out := &in.IndexID, &out.IndexID + *out = new(string) + **out = **in + } + if in.IndexIDRef != nil { + in, out := &in.IndexIDRef, &out.IndexIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IndexIDSelector != nil { + in, out := &in.IndexIDSelector, &out.IndexIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SourceS3Path != nil { + in, out := &in.SourceS3Path, &out.SourceS3Path + *out = new(ThesaurusSourceS3PathParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThesaurusParameters. +func (in *ThesaurusParameters) DeepCopy() *ThesaurusParameters { + if in == nil { + return nil + } + out := new(ThesaurusParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThesaurusSourceS3PathInitParameters) DeepCopyInto(out *ThesaurusSourceS3PathInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.KeyRef != nil { + in, out := &in.KeyRef, &out.KeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeySelector != nil { + in, out := &in.KeySelector, &out.KeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThesaurusSourceS3PathInitParameters. +func (in *ThesaurusSourceS3PathInitParameters) DeepCopy() *ThesaurusSourceS3PathInitParameters { + if in == nil { + return nil + } + out := new(ThesaurusSourceS3PathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThesaurusSourceS3PathObservation) DeepCopyInto(out *ThesaurusSourceS3PathObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThesaurusSourceS3PathObservation. +func (in *ThesaurusSourceS3PathObservation) DeepCopy() *ThesaurusSourceS3PathObservation { + if in == nil { + return nil + } + out := new(ThesaurusSourceS3PathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThesaurusSourceS3PathParameters) DeepCopyInto(out *ThesaurusSourceS3PathParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.KeyRef != nil { + in, out := &in.KeyRef, &out.KeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeySelector != nil { + in, out := &in.KeySelector, &out.KeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThesaurusSourceS3PathParameters. +func (in *ThesaurusSourceS3PathParameters) DeepCopy() *ThesaurusSourceS3PathParameters { + if in == nil { + return nil + } + out := new(ThesaurusSourceS3PathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThesaurusSpec) DeepCopyInto(out *ThesaurusSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThesaurusSpec. +func (in *ThesaurusSpec) DeepCopy() *ThesaurusSpec { + if in == nil { + return nil + } + out := new(ThesaurusSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThesaurusStatus) DeepCopyInto(out *ThesaurusStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThesaurusStatus. +func (in *ThesaurusStatus) DeepCopy() *ThesaurusStatus { + if in == nil { + return nil + } + out := new(ThesaurusStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UrlsInitParameters) DeepCopyInto(out *UrlsInitParameters) { + *out = *in + if in.SeedURLConfiguration != nil { + in, out := &in.SeedURLConfiguration, &out.SeedURLConfiguration + *out = new(SeedURLConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SiteMapsConfiguration != nil { + in, out := &in.SiteMapsConfiguration, &out.SiteMapsConfiguration + *out = new(SiteMapsConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UrlsInitParameters. +func (in *UrlsInitParameters) DeepCopy() *UrlsInitParameters { + if in == nil { + return nil + } + out := new(UrlsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UrlsObservation) DeepCopyInto(out *UrlsObservation) { + *out = *in + if in.SeedURLConfiguration != nil { + in, out := &in.SeedURLConfiguration, &out.SeedURLConfiguration + *out = new(SeedURLConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.SiteMapsConfiguration != nil { + in, out := &in.SiteMapsConfiguration, &out.SiteMapsConfiguration + *out = new(SiteMapsConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UrlsObservation. +func (in *UrlsObservation) DeepCopy() *UrlsObservation { + if in == nil { + return nil + } + out := new(UrlsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UrlsParameters) DeepCopyInto(out *UrlsParameters) { + *out = *in + if in.SeedURLConfiguration != nil { + in, out := &in.SeedURLConfiguration, &out.SeedURLConfiguration + *out = new(SeedURLConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.SiteMapsConfiguration != nil { + in, out := &in.SiteMapsConfiguration, &out.SiteMapsConfiguration + *out = new(SiteMapsConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UrlsParameters. +func (in *UrlsParameters) DeepCopy() *UrlsParameters { + if in == nil { + return nil + } + out := new(UrlsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserGroupResolutionConfigurationInitParameters) DeepCopyInto(out *UserGroupResolutionConfigurationInitParameters) { + *out = *in + if in.UserGroupResolutionMode != nil { + in, out := &in.UserGroupResolutionMode, &out.UserGroupResolutionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserGroupResolutionConfigurationInitParameters. +func (in *UserGroupResolutionConfigurationInitParameters) DeepCopy() *UserGroupResolutionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(UserGroupResolutionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserGroupResolutionConfigurationObservation) DeepCopyInto(out *UserGroupResolutionConfigurationObservation) { + *out = *in + if in.UserGroupResolutionMode != nil { + in, out := &in.UserGroupResolutionMode, &out.UserGroupResolutionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserGroupResolutionConfigurationObservation. +func (in *UserGroupResolutionConfigurationObservation) DeepCopy() *UserGroupResolutionConfigurationObservation { + if in == nil { + return nil + } + out := new(UserGroupResolutionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserGroupResolutionConfigurationParameters) DeepCopyInto(out *UserGroupResolutionConfigurationParameters) { + *out = *in + if in.UserGroupResolutionMode != nil { + in, out := &in.UserGroupResolutionMode, &out.UserGroupResolutionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserGroupResolutionConfigurationParameters. +func (in *UserGroupResolutionConfigurationParameters) DeepCopy() *UserGroupResolutionConfigurationParameters { + if in == nil { + return nil + } + out := new(UserGroupResolutionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserIdentityConfigurationInitParameters) DeepCopyInto(out *UserIdentityConfigurationInitParameters) { + *out = *in + if in.IdentityAttributeName != nil { + in, out := &in.IdentityAttributeName, &out.IdentityAttributeName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserIdentityConfigurationInitParameters. +func (in *UserIdentityConfigurationInitParameters) DeepCopy() *UserIdentityConfigurationInitParameters { + if in == nil { + return nil + } + out := new(UserIdentityConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserIdentityConfigurationObservation) DeepCopyInto(out *UserIdentityConfigurationObservation) { + *out = *in + if in.IdentityAttributeName != nil { + in, out := &in.IdentityAttributeName, &out.IdentityAttributeName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserIdentityConfigurationObservation. +func (in *UserIdentityConfigurationObservation) DeepCopy() *UserIdentityConfigurationObservation { + if in == nil { + return nil + } + out := new(UserIdentityConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserIdentityConfigurationParameters) DeepCopyInto(out *UserIdentityConfigurationParameters) { + *out = *in + if in.IdentityAttributeName != nil { + in, out := &in.IdentityAttributeName, &out.IdentityAttributeName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserIdentityConfigurationParameters. +func (in *UserIdentityConfigurationParameters) DeepCopy() *UserIdentityConfigurationParameters { + if in == nil { + return nil + } + out := new(UserIdentityConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserTokenConfigurationsInitParameters) DeepCopyInto(out *UserTokenConfigurationsInitParameters) { + *out = *in + if in.JSONTokenTypeConfiguration != nil { + in, out := &in.JSONTokenTypeConfiguration, &out.JSONTokenTypeConfiguration + *out = new(JSONTokenTypeConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.JwtTokenTypeConfiguration != nil { + in, out := &in.JwtTokenTypeConfiguration, &out.JwtTokenTypeConfiguration + *out = new(JwtTokenTypeConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserTokenConfigurationsInitParameters. +func (in *UserTokenConfigurationsInitParameters) DeepCopy() *UserTokenConfigurationsInitParameters { + if in == nil { + return nil + } + out := new(UserTokenConfigurationsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserTokenConfigurationsObservation) DeepCopyInto(out *UserTokenConfigurationsObservation) { + *out = *in + if in.JSONTokenTypeConfiguration != nil { + in, out := &in.JSONTokenTypeConfiguration, &out.JSONTokenTypeConfiguration + *out = new(JSONTokenTypeConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.JwtTokenTypeConfiguration != nil { + in, out := &in.JwtTokenTypeConfiguration, &out.JwtTokenTypeConfiguration + *out = new(JwtTokenTypeConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserTokenConfigurationsObservation. +func (in *UserTokenConfigurationsObservation) DeepCopy() *UserTokenConfigurationsObservation { + if in == nil { + return nil + } + out := new(UserTokenConfigurationsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserTokenConfigurationsParameters) DeepCopyInto(out *UserTokenConfigurationsParameters) { + *out = *in + if in.JSONTokenTypeConfiguration != nil { + in, out := &in.JSONTokenTypeConfiguration, &out.JSONTokenTypeConfiguration + *out = new(JSONTokenTypeConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.JwtTokenTypeConfiguration != nil { + in, out := &in.JwtTokenTypeConfiguration, &out.JwtTokenTypeConfiguration + *out = new(JwtTokenTypeConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserTokenConfigurationsParameters. +func (in *UserTokenConfigurationsParameters) DeepCopy() *UserTokenConfigurationsParameters { + if in == nil { + return nil + } + out := new(UserTokenConfigurationsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebCrawlerConfigurationInitParameters) DeepCopyInto(out *WebCrawlerConfigurationInitParameters) { + *out = *in + if in.AuthenticationConfiguration != nil { + in, out := &in.AuthenticationConfiguration, &out.AuthenticationConfiguration + *out = new(AuthenticationConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CrawlDepth != nil { + in, out := &in.CrawlDepth, &out.CrawlDepth + *out = new(float64) + **out = **in + } + if in.MaxContentSizePerPageInMegaBytes != nil { + in, out := &in.MaxContentSizePerPageInMegaBytes, &out.MaxContentSizePerPageInMegaBytes + *out = new(float64) + **out = **in + } + if in.MaxLinksPerPage != nil { + in, out := &in.MaxLinksPerPage, &out.MaxLinksPerPage + *out = new(float64) + **out = **in + } + if in.MaxUrlsPerMinuteCrawlRate != nil { + in, out := &in.MaxUrlsPerMinuteCrawlRate, &out.MaxUrlsPerMinuteCrawlRate + *out = new(float64) + **out = **in + } + if in.ProxyConfiguration != nil { + in, out := &in.ProxyConfiguration, &out.ProxyConfiguration + *out = new(ProxyConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.URLExclusionPatterns != nil { + in, out := &in.URLExclusionPatterns, &out.URLExclusionPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.URLInclusionPatterns != nil { + in, out := &in.URLInclusionPatterns, &out.URLInclusionPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Urls != nil { + in, out := &in.Urls, &out.Urls + *out = new(UrlsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebCrawlerConfigurationInitParameters. +func (in *WebCrawlerConfigurationInitParameters) DeepCopy() *WebCrawlerConfigurationInitParameters { + if in == nil { + return nil + } + out := new(WebCrawlerConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebCrawlerConfigurationObservation) DeepCopyInto(out *WebCrawlerConfigurationObservation) { + *out = *in + if in.AuthenticationConfiguration != nil { + in, out := &in.AuthenticationConfiguration, &out.AuthenticationConfiguration + *out = new(AuthenticationConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CrawlDepth != nil { + in, out := &in.CrawlDepth, &out.CrawlDepth + *out = new(float64) + **out = **in + } + if in.MaxContentSizePerPageInMegaBytes != nil { + in, out := &in.MaxContentSizePerPageInMegaBytes, &out.MaxContentSizePerPageInMegaBytes + *out = new(float64) + **out = **in + } + if in.MaxLinksPerPage != nil { + in, out := &in.MaxLinksPerPage, &out.MaxLinksPerPage + *out = new(float64) + **out = **in + } + if in.MaxUrlsPerMinuteCrawlRate != nil { + in, out := &in.MaxUrlsPerMinuteCrawlRate, &out.MaxUrlsPerMinuteCrawlRate + *out = new(float64) + **out = **in + } + if in.ProxyConfiguration != nil { + in, out := &in.ProxyConfiguration, &out.ProxyConfiguration + *out = new(ProxyConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.URLExclusionPatterns != nil { + in, out := &in.URLExclusionPatterns, &out.URLExclusionPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.URLInclusionPatterns != nil { + in, out := &in.URLInclusionPatterns, &out.URLInclusionPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Urls != nil { + in, out := &in.Urls, &out.Urls + *out = new(UrlsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebCrawlerConfigurationObservation. +func (in *WebCrawlerConfigurationObservation) DeepCopy() *WebCrawlerConfigurationObservation { + if in == nil { + return nil + } + out := new(WebCrawlerConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebCrawlerConfigurationParameters) DeepCopyInto(out *WebCrawlerConfigurationParameters) { + *out = *in + if in.AuthenticationConfiguration != nil { + in, out := &in.AuthenticationConfiguration, &out.AuthenticationConfiguration + *out = new(AuthenticationConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CrawlDepth != nil { + in, out := &in.CrawlDepth, &out.CrawlDepth + *out = new(float64) + **out = **in + } + if in.MaxContentSizePerPageInMegaBytes != nil { + in, out := &in.MaxContentSizePerPageInMegaBytes, &out.MaxContentSizePerPageInMegaBytes + *out = new(float64) + **out = **in + } + if in.MaxLinksPerPage != nil { + in, out := &in.MaxLinksPerPage, &out.MaxLinksPerPage + *out = new(float64) + **out = **in + } + if in.MaxUrlsPerMinuteCrawlRate != nil { + in, out := &in.MaxUrlsPerMinuteCrawlRate, &out.MaxUrlsPerMinuteCrawlRate + *out = new(float64) + **out = **in + } + if in.ProxyConfiguration != nil { + in, out := &in.ProxyConfiguration, &out.ProxyConfiguration + *out = new(ProxyConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.URLExclusionPatterns != nil { + in, out := &in.URLExclusionPatterns, &out.URLExclusionPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.URLInclusionPatterns != nil { + in, out := &in.URLInclusionPatterns, &out.URLInclusionPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Urls != nil { + in, out := &in.Urls, &out.Urls + *out = new(UrlsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebCrawlerConfigurationParameters. +func (in *WebCrawlerConfigurationParameters) DeepCopy() *WebCrawlerConfigurationParameters { + if in == nil { + return nil + } + out := new(WebCrawlerConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/kendra/v1beta2/zz_generated.managed.go b/apis/kendra/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..9931fe71c5 --- /dev/null +++ b/apis/kendra/v1beta2/zz_generated.managed.go @@ -0,0 +1,308 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this DataSource. +func (mg *DataSource) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DataSource. +func (mg *DataSource) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DataSource. +func (mg *DataSource) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DataSource. +func (mg *DataSource) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DataSource. +func (mg *DataSource) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DataSource. +func (mg *DataSource) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DataSource. +func (mg *DataSource) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DataSource. +func (mg *DataSource) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DataSource. +func (mg *DataSource) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DataSource. +func (mg *DataSource) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DataSource. +func (mg *DataSource) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DataSource. +func (mg *DataSource) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Experience. +func (mg *Experience) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Experience. +func (mg *Experience) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Experience. +func (mg *Experience) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Experience. +func (mg *Experience) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Experience. +func (mg *Experience) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Experience. +func (mg *Experience) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Experience. +func (mg *Experience) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Experience. +func (mg *Experience) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Experience. +func (mg *Experience) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Experience. +func (mg *Experience) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Experience. +func (mg *Experience) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Experience. +func (mg *Experience) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Index. +func (mg *Index) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Index. +func (mg *Index) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Index. +func (mg *Index) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Index. +func (mg *Index) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Index. +func (mg *Index) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Index. +func (mg *Index) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Index. +func (mg *Index) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Index. +func (mg *Index) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Index. +func (mg *Index) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Index. +func (mg *Index) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Index. +func (mg *Index) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Index. +func (mg *Index) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this QuerySuggestionsBlockList. +func (mg *QuerySuggestionsBlockList) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this QuerySuggestionsBlockList. +func (mg *QuerySuggestionsBlockList) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this QuerySuggestionsBlockList. +func (mg *QuerySuggestionsBlockList) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this QuerySuggestionsBlockList. +func (mg *QuerySuggestionsBlockList) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this QuerySuggestionsBlockList. +func (mg *QuerySuggestionsBlockList) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this QuerySuggestionsBlockList. +func (mg *QuerySuggestionsBlockList) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this QuerySuggestionsBlockList. +func (mg *QuerySuggestionsBlockList) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this QuerySuggestionsBlockList. +func (mg *QuerySuggestionsBlockList) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this QuerySuggestionsBlockList. +func (mg *QuerySuggestionsBlockList) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this QuerySuggestionsBlockList. +func (mg *QuerySuggestionsBlockList) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this QuerySuggestionsBlockList. +func (mg *QuerySuggestionsBlockList) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this QuerySuggestionsBlockList. +func (mg *QuerySuggestionsBlockList) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Thesaurus. +func (mg *Thesaurus) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Thesaurus. +func (mg *Thesaurus) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Thesaurus. +func (mg *Thesaurus) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Thesaurus. +func (mg *Thesaurus) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Thesaurus. +func (mg *Thesaurus) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Thesaurus. +func (mg *Thesaurus) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Thesaurus. +func (mg *Thesaurus) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Thesaurus. +func (mg *Thesaurus) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Thesaurus. +func (mg *Thesaurus) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Thesaurus. +func (mg *Thesaurus) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Thesaurus. +func (mg *Thesaurus) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Thesaurus. +func (mg *Thesaurus) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/kendra/v1beta2/zz_generated.managedlist.go b/apis/kendra/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..c7166e0d37 --- /dev/null +++ b/apis/kendra/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,53 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this DataSourceList. +func (l *DataSourceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ExperienceList. +func (l *ExperienceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this IndexList. +func (l *IndexList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this QuerySuggestionsBlockListList. +func (l *QuerySuggestionsBlockListList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ThesaurusList. +func (l *ThesaurusList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/kendra/v1beta2/zz_generated.resolvers.go b/apis/kendra/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..3e03ee21ad --- /dev/null +++ b/apis/kendra/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,699 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *DataSource) ResolveReferences( // ResolveReferences of this DataSource. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.Configuration != nil { + if mg.Spec.ForProvider.Configuration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Configuration.S3Configuration.BucketName), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Configuration.S3Configuration.BucketNameRef, + Selector: mg.Spec.ForProvider.Configuration.S3Configuration.BucketNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Configuration.S3Configuration.BucketName") + } + mg.Spec.ForProvider.Configuration.S3Configuration.BucketName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Configuration.S3Configuration.BucketNameRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Configuration != nil { + if mg.Spec.ForProvider.Configuration.WebCrawlerConfiguration != nil { + if mg.Spec.ForProvider.Configuration.WebCrawlerConfiguration.AuthenticationConfiguration != nil { + for i6 := 0; i6 < len(mg.Spec.ForProvider.Configuration.WebCrawlerConfiguration.AuthenticationConfiguration.BasicAuthentication); i6++ { + { + m, l, err = apisresolver.GetManagedResource("secretsmanager.aws.upbound.io", "v1beta1", "Secret", "SecretList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Configuration.WebCrawlerConfiguration.AuthenticationConfiguration.BasicAuthentication[i6].Credentials), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Configuration.WebCrawlerConfiguration.AuthenticationConfiguration.BasicAuthentication[i6].CredentialsRef, + Selector: mg.Spec.ForProvider.Configuration.WebCrawlerConfiguration.AuthenticationConfiguration.BasicAuthentication[i6].CredentialsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Configuration.WebCrawlerConfiguration.AuthenticationConfiguration.BasicAuthentication[i6].Credentials") + } + mg.Spec.ForProvider.Configuration.WebCrawlerConfiguration.AuthenticationConfiguration.BasicAuthentication[i6].Credentials = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Configuration.WebCrawlerConfiguration.AuthenticationConfiguration.BasicAuthentication[i6].CredentialsRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.ForProvider.Configuration != nil { + if mg.Spec.ForProvider.Configuration.WebCrawlerConfiguration != nil { + if mg.Spec.ForProvider.Configuration.WebCrawlerConfiguration.ProxyConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("secretsmanager.aws.upbound.io", "v1beta1", "Secret", "SecretList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Configuration.WebCrawlerConfiguration.ProxyConfiguration.Credentials), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Configuration.WebCrawlerConfiguration.ProxyConfiguration.CredentialsRef, + Selector: mg.Spec.ForProvider.Configuration.WebCrawlerConfiguration.ProxyConfiguration.CredentialsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Configuration.WebCrawlerConfiguration.ProxyConfiguration.Credentials") + } + mg.Spec.ForProvider.Configuration.WebCrawlerConfiguration.ProxyConfiguration.Credentials = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Configuration.WebCrawlerConfiguration.ProxyConfiguration.CredentialsRef = rsp.ResolvedReference + + } + } + } + { + m, l, err = apisresolver.GetManagedResource("kendra.aws.upbound.io", "v1beta2", "Index", "IndexList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IndexID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.IndexIDRef, + Selector: mg.Spec.ForProvider.IndexIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IndexID") + } + mg.Spec.ForProvider.IndexID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IndexIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Configuration != nil { + if mg.Spec.InitProvider.Configuration.S3Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Configuration.S3Configuration.BucketName), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Configuration.S3Configuration.BucketNameRef, + Selector: mg.Spec.InitProvider.Configuration.S3Configuration.BucketNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Configuration.S3Configuration.BucketName") + } + mg.Spec.InitProvider.Configuration.S3Configuration.BucketName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Configuration.S3Configuration.BucketNameRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Configuration != nil { + if mg.Spec.InitProvider.Configuration.WebCrawlerConfiguration != nil { + if mg.Spec.InitProvider.Configuration.WebCrawlerConfiguration.AuthenticationConfiguration != nil { + for i6 := 0; i6 < len(mg.Spec.InitProvider.Configuration.WebCrawlerConfiguration.AuthenticationConfiguration.BasicAuthentication); i6++ { + { + m, l, err = apisresolver.GetManagedResource("secretsmanager.aws.upbound.io", "v1beta1", "Secret", "SecretList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Configuration.WebCrawlerConfiguration.AuthenticationConfiguration.BasicAuthentication[i6].Credentials), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Configuration.WebCrawlerConfiguration.AuthenticationConfiguration.BasicAuthentication[i6].CredentialsRef, + Selector: mg.Spec.InitProvider.Configuration.WebCrawlerConfiguration.AuthenticationConfiguration.BasicAuthentication[i6].CredentialsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Configuration.WebCrawlerConfiguration.AuthenticationConfiguration.BasicAuthentication[i6].Credentials") + } + mg.Spec.InitProvider.Configuration.WebCrawlerConfiguration.AuthenticationConfiguration.BasicAuthentication[i6].Credentials = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Configuration.WebCrawlerConfiguration.AuthenticationConfiguration.BasicAuthentication[i6].CredentialsRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.InitProvider.Configuration != nil { + if mg.Spec.InitProvider.Configuration.WebCrawlerConfiguration != nil { + if mg.Spec.InitProvider.Configuration.WebCrawlerConfiguration.ProxyConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("secretsmanager.aws.upbound.io", "v1beta1", "Secret", "SecretList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Configuration.WebCrawlerConfiguration.ProxyConfiguration.Credentials), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Configuration.WebCrawlerConfiguration.ProxyConfiguration.CredentialsRef, + Selector: mg.Spec.InitProvider.Configuration.WebCrawlerConfiguration.ProxyConfiguration.CredentialsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Configuration.WebCrawlerConfiguration.ProxyConfiguration.Credentials") + } + mg.Spec.InitProvider.Configuration.WebCrawlerConfiguration.ProxyConfiguration.Credentials = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Configuration.WebCrawlerConfiguration.ProxyConfiguration.CredentialsRef = rsp.ResolvedReference + + } + } + } + { + m, l, err = apisresolver.GetManagedResource("kendra.aws.upbound.io", "v1beta2", "Index", "IndexList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IndexID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.IndexIDRef, + Selector: mg.Spec.InitProvider.IndexIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IndexID") + } + mg.Spec.InitProvider.IndexID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IndexIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Experience. +func (mg *Experience) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("kendra.aws.upbound.io", "v1beta2", "Index", "IndexList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IndexID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.IndexIDRef, + Selector: mg.Spec.ForProvider.IndexIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IndexID") + } + mg.Spec.ForProvider.IndexID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IndexIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kendra.aws.upbound.io", "v1beta2", "Index", "IndexList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IndexID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.IndexIDRef, + Selector: mg.Spec.InitProvider.IndexIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IndexID") + } + mg.Spec.InitProvider.IndexID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IndexIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Index. +func (mg *Index) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this QuerySuggestionsBlockList. +func (mg *QuerySuggestionsBlockList) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("kendra.aws.upbound.io", "v1beta2", "Index", "IndexList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IndexID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.IndexIDRef, + Selector: mg.Spec.ForProvider.IndexIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IndexID") + } + mg.Spec.ForProvider.IndexID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IndexIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.SourceS3Path != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SourceS3Path.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SourceS3Path.BucketRef, + Selector: mg.Spec.ForProvider.SourceS3Path.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SourceS3Path.Bucket") + } + mg.Spec.ForProvider.SourceS3Path.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SourceS3Path.BucketRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("kendra.aws.upbound.io", "v1beta2", "Index", "IndexList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IndexID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.IndexIDRef, + Selector: mg.Spec.InitProvider.IndexIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IndexID") + } + mg.Spec.InitProvider.IndexID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IndexIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.SourceS3Path != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SourceS3Path.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SourceS3Path.BucketRef, + Selector: mg.Spec.InitProvider.SourceS3Path.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SourceS3Path.Bucket") + } + mg.Spec.InitProvider.SourceS3Path.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SourceS3Path.BucketRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this Thesaurus. +func (mg *Thesaurus) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("kendra.aws.upbound.io", "v1beta2", "Index", "IndexList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IndexID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.IndexIDRef, + Selector: mg.Spec.ForProvider.IndexIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IndexID") + } + mg.Spec.ForProvider.IndexID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IndexIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.SourceS3Path != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SourceS3Path.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SourceS3Path.BucketRef, + Selector: mg.Spec.ForProvider.SourceS3Path.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SourceS3Path.Bucket") + } + mg.Spec.ForProvider.SourceS3Path.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SourceS3Path.BucketRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.SourceS3Path != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Object", "ObjectList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SourceS3Path.Key), + Extract: resource.ExtractParamPath("key", false), + Reference: mg.Spec.ForProvider.SourceS3Path.KeyRef, + Selector: mg.Spec.ForProvider.SourceS3Path.KeySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SourceS3Path.Key") + } + mg.Spec.ForProvider.SourceS3Path.Key = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SourceS3Path.KeyRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("kendra.aws.upbound.io", "v1beta2", "Index", "IndexList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IndexID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.IndexIDRef, + Selector: mg.Spec.InitProvider.IndexIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IndexID") + } + mg.Spec.InitProvider.IndexID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IndexIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.SourceS3Path != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SourceS3Path.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SourceS3Path.BucketRef, + Selector: mg.Spec.InitProvider.SourceS3Path.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SourceS3Path.Bucket") + } + mg.Spec.InitProvider.SourceS3Path.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SourceS3Path.BucketRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.SourceS3Path != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Object", "ObjectList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SourceS3Path.Key), + Extract: resource.ExtractParamPath("key", false), + Reference: mg.Spec.InitProvider.SourceS3Path.KeyRef, + Selector: mg.Spec.InitProvider.SourceS3Path.KeySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SourceS3Path.Key") + } + mg.Spec.InitProvider.SourceS3Path.Key = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SourceS3Path.KeyRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/kendra/v1beta2/zz_groupversion_info.go b/apis/kendra/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..6064e16fa2 --- /dev/null +++ b/apis/kendra/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=kendra.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "kendra.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/kendra/v1beta2/zz_index_terraformed.go b/apis/kendra/v1beta2/zz_index_terraformed.go new file mode 100755 index 0000000000..a1066a92cf --- /dev/null +++ b/apis/kendra/v1beta2/zz_index_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Index +func (mg *Index) GetTerraformResourceType() string { + return "aws_kendra_index" +} + +// GetConnectionDetailsMapping for this Index +func (tr *Index) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Index +func (tr *Index) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Index +func (tr *Index) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Index +func (tr *Index) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Index +func (tr *Index) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Index +func (tr *Index) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Index +func (tr *Index) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Index +func (tr *Index) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Index using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Index) LateInitialize(attrs []byte) (bool, error) { + params := &IndexParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Index) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kendra/v1beta2/zz_index_types.go b/apis/kendra/v1beta2/zz_index_types.go new file mode 100755 index 0000000000..85ca214b9c --- /dev/null +++ b/apis/kendra/v1beta2/zz_index_types.go @@ -0,0 +1,653 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CapacityUnitsInitParameters struct { + + // The amount of extra query capacity for an index and GetQuerySuggestions capacity. For more information, refer to QueryCapacityUnits. + QueryCapacityUnits *float64 `json:"queryCapacityUnits,omitempty" tf:"query_capacity_units,omitempty"` + + // The amount of extra storage capacity for an index. A single capacity unit provides 30 GB of storage space or 100,000 documents, whichever is reached first. Minimum value of 0. + StorageCapacityUnits *float64 `json:"storageCapacityUnits,omitempty" tf:"storage_capacity_units,omitempty"` +} + +type CapacityUnitsObservation struct { + + // The amount of extra query capacity for an index and GetQuerySuggestions capacity. For more information, refer to QueryCapacityUnits. + QueryCapacityUnits *float64 `json:"queryCapacityUnits,omitempty" tf:"query_capacity_units,omitempty"` + + // The amount of extra storage capacity for an index. A single capacity unit provides 30 GB of storage space or 100,000 documents, whichever is reached first. Minimum value of 0. + StorageCapacityUnits *float64 `json:"storageCapacityUnits,omitempty" tf:"storage_capacity_units,omitempty"` +} + +type CapacityUnitsParameters struct { + + // The amount of extra query capacity for an index and GetQuerySuggestions capacity. For more information, refer to QueryCapacityUnits. + // +kubebuilder:validation:Optional + QueryCapacityUnits *float64 `json:"queryCapacityUnits,omitempty" tf:"query_capacity_units,omitempty"` + + // The amount of extra storage capacity for an index. A single capacity unit provides 30 GB of storage space or 100,000 documents, whichever is reached first. Minimum value of 0. + // +kubebuilder:validation:Optional + StorageCapacityUnits *float64 `json:"storageCapacityUnits,omitempty" tf:"storage_capacity_units,omitempty"` +} + +type DocumentMetadataConfigurationUpdatesInitParameters struct { + + // The name of the index field. Minimum length of 1. Maximum length of 30. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A block that provides manual tuning parameters to determine how the field affects the search results. Detailed below + Relevance *RelevanceInitParameters `json:"relevance,omitempty" tf:"relevance,omitempty"` + + // A block that provides information about how the field is used during a search. Documented below. Detailed below + Search *SearchInitParameters `json:"search,omitempty" tf:"search,omitempty"` + + // The data type of the index field. Valid values are STRING_VALUE, STRING_LIST_VALUE, LONG_VALUE, DATE_VALUE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DocumentMetadataConfigurationUpdatesObservation struct { + + // The name of the index field. Minimum length of 1. Maximum length of 30. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A block that provides manual tuning parameters to determine how the field affects the search results. Detailed below + Relevance *RelevanceObservation `json:"relevance,omitempty" tf:"relevance,omitempty"` + + // A block that provides information about how the field is used during a search. Documented below. Detailed below + Search *SearchObservation `json:"search,omitempty" tf:"search,omitempty"` + + // The data type of the index field. Valid values are STRING_VALUE, STRING_LIST_VALUE, LONG_VALUE, DATE_VALUE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DocumentMetadataConfigurationUpdatesParameters struct { + + // The name of the index field. Minimum length of 1. Maximum length of 30. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A block that provides manual tuning parameters to determine how the field affects the search results. Detailed below + // +kubebuilder:validation:Optional + Relevance *RelevanceParameters `json:"relevance,omitempty" tf:"relevance,omitempty"` + + // A block that provides information about how the field is used during a search. Documented below. Detailed below + // +kubebuilder:validation:Optional + Search *SearchParameters `json:"search,omitempty" tf:"search,omitempty"` + + // The data type of the index field. Valid values are STRING_VALUE, STRING_LIST_VALUE, LONG_VALUE, DATE_VALUE. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type FaqStatisticsInitParameters struct { +} + +type FaqStatisticsObservation struct { + + // The total number of FAQ questions and answers contained in the index. + IndexedQuestionAnswersCount *float64 `json:"indexedQuestionAnswersCount,omitempty" tf:"indexed_question_answers_count,omitempty"` +} + +type FaqStatisticsParameters struct { +} + +type IndexInitParameters struct { + + // A block that sets the number of additional document storage and query capacity units that should be used by the index. Detailed below. + CapacityUnits *CapacityUnitsInitParameters `json:"capacityUnits,omitempty" tf:"capacity_units,omitempty"` + + // The description of the Index. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more blocks that specify the configuration settings for any metadata applied to the documents in the index. Minimum number of 0 items. Maximum number of 500 items. If specified, you must define all elements, including those that are provided by default. These index fields are documented at Amazon Kendra Index documentation. For an example resource that defines these default index fields, refer to the default example above. For an example resource that appends additional index fields, refer to the append example above. All arguments for each block must be specified. Note that blocks cannot be removed since index fields cannot be deleted. This argument is detailed below. + DocumentMetadataConfigurationUpdates []DocumentMetadataConfigurationUpdatesInitParameters `json:"documentMetadataConfigurationUpdates,omitempty" tf:"document_metadata_configuration_updates,omitempty"` + + // The Amazon Kendra edition to use for the index. Choose DEVELOPER_EDITION for indexes intended for development, testing, or proof of concept. Use ENTERPRISE_EDITION for your production databases. Once you set the edition for an index, it can't be changed. Defaults to ENTERPRISE_EDITION + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + + // Specifies the name of the Index. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // An AWS Identity and Access Management (IAM) role that gives Amazon Kendra permissions to access your Amazon CloudWatch logs and metrics. This is also the role you use when you call the BatchPutDocument API to index documents from an Amazon S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // A block that specifies the identifier of the AWS KMS customer managed key (CMK) that's used to encrypt data indexed by Amazon Kendra. Amazon Kendra doesn't support asymmetric CMKs. Detailed below. + ServerSideEncryptionConfiguration *ServerSideEncryptionConfigurationInitParameters `json:"serverSideEncryptionConfiguration,omitempty" tf:"server_side_encryption_configuration,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The user context policy. Valid values are ATTRIBUTE_FILTER or USER_TOKEN. For more information, refer to UserContextPolicy. Defaults to ATTRIBUTE_FILTER. + UserContextPolicy *string `json:"userContextPolicy,omitempty" tf:"user_context_policy,omitempty"` + + // A block that enables fetching access levels of groups and users from an AWS Single Sign-On identity source. To configure this, see UserGroupResolutionConfiguration. Detailed below. + UserGroupResolutionConfiguration *UserGroupResolutionConfigurationInitParameters `json:"userGroupResolutionConfiguration,omitempty" tf:"user_group_resolution_configuration,omitempty"` + + // A block that specifies the user token configuration. Detailed below. + UserTokenConfigurations *UserTokenConfigurationsInitParameters `json:"userTokenConfigurations,omitempty" tf:"user_token_configurations,omitempty"` +} + +type IndexObservation struct { + + // The Amazon Resource Name (ARN) of the Index. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A block that sets the number of additional document storage and query capacity units that should be used by the index. Detailed below. + CapacityUnits *CapacityUnitsObservation `json:"capacityUnits,omitempty" tf:"capacity_units,omitempty"` + + // The Unix datetime that the index was created. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // The description of the Index. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more blocks that specify the configuration settings for any metadata applied to the documents in the index. Minimum number of 0 items. Maximum number of 500 items. If specified, you must define all elements, including those that are provided by default. These index fields are documented at Amazon Kendra Index documentation. For an example resource that defines these default index fields, refer to the default example above. For an example resource that appends additional index fields, refer to the append example above. All arguments for each block must be specified. Note that blocks cannot be removed since index fields cannot be deleted. This argument is detailed below. + DocumentMetadataConfigurationUpdates []DocumentMetadataConfigurationUpdatesObservation `json:"documentMetadataConfigurationUpdates,omitempty" tf:"document_metadata_configuration_updates,omitempty"` + + // The Amazon Kendra edition to use for the index. Choose DEVELOPER_EDITION for indexes intended for development, testing, or proof of concept. Use ENTERPRISE_EDITION for your production databases. Once you set the edition for an index, it can't be changed. Defaults to ENTERPRISE_EDITION + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + + // When the Status field value is FAILED, this contains a message that explains why. + ErrorMessage *string `json:"errorMessage,omitempty" tf:"error_message,omitempty"` + + // The identifier of the Index. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A block that provides information about the number of FAQ questions and answers and the number of text documents indexed. Detailed below. + IndexStatistics []IndexStatisticsObservation `json:"indexStatistics,omitempty" tf:"index_statistics,omitempty"` + + // Specifies the name of the Index. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // An AWS Identity and Access Management (IAM) role that gives Amazon Kendra permissions to access your Amazon CloudWatch logs and metrics. This is also the role you use when you call the BatchPutDocument API to index documents from an Amazon S3 bucket. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // A block that specifies the identifier of the AWS KMS customer managed key (CMK) that's used to encrypt data indexed by Amazon Kendra. Amazon Kendra doesn't support asymmetric CMKs. Detailed below. + ServerSideEncryptionConfiguration *ServerSideEncryptionConfigurationObservation `json:"serverSideEncryptionConfiguration,omitempty" tf:"server_side_encryption_configuration,omitempty"` + + // The current status of the index. When the value is ACTIVE, the index is ready for use. If the Status field value is FAILED, the error_message field contains a message that explains why. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The Unix datetime that the index was last updated. + UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` + + // The user context policy. Valid values are ATTRIBUTE_FILTER or USER_TOKEN. For more information, refer to UserContextPolicy. Defaults to ATTRIBUTE_FILTER. + UserContextPolicy *string `json:"userContextPolicy,omitempty" tf:"user_context_policy,omitempty"` + + // A block that enables fetching access levels of groups and users from an AWS Single Sign-On identity source. To configure this, see UserGroupResolutionConfiguration. Detailed below. + UserGroupResolutionConfiguration *UserGroupResolutionConfigurationObservation `json:"userGroupResolutionConfiguration,omitempty" tf:"user_group_resolution_configuration,omitempty"` + + // A block that specifies the user token configuration. Detailed below. + UserTokenConfigurations *UserTokenConfigurationsObservation `json:"userTokenConfigurations,omitempty" tf:"user_token_configurations,omitempty"` +} + +type IndexParameters struct { + + // A block that sets the number of additional document storage and query capacity units that should be used by the index. Detailed below. + // +kubebuilder:validation:Optional + CapacityUnits *CapacityUnitsParameters `json:"capacityUnits,omitempty" tf:"capacity_units,omitempty"` + + // The description of the Index. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more blocks that specify the configuration settings for any metadata applied to the documents in the index. Minimum number of 0 items. Maximum number of 500 items. If specified, you must define all elements, including those that are provided by default. These index fields are documented at Amazon Kendra Index documentation. For an example resource that defines these default index fields, refer to the default example above. For an example resource that appends additional index fields, refer to the append example above. All arguments for each block must be specified. Note that blocks cannot be removed since index fields cannot be deleted. This argument is detailed below. + // +kubebuilder:validation:Optional + DocumentMetadataConfigurationUpdates []DocumentMetadataConfigurationUpdatesParameters `json:"documentMetadataConfigurationUpdates,omitempty" tf:"document_metadata_configuration_updates,omitempty"` + + // The Amazon Kendra edition to use for the index. Choose DEVELOPER_EDITION for indexes intended for development, testing, or proof of concept. Use ENTERPRISE_EDITION for your production databases. Once you set the edition for an index, it can't be changed. Defaults to ENTERPRISE_EDITION + // +kubebuilder:validation:Optional + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + + // Specifies the name of the Index. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // An AWS Identity and Access Management (IAM) role that gives Amazon Kendra permissions to access your Amazon CloudWatch logs and metrics. This is also the role you use when you call the BatchPutDocument API to index documents from an Amazon S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // A block that specifies the identifier of the AWS KMS customer managed key (CMK) that's used to encrypt data indexed by Amazon Kendra. Amazon Kendra doesn't support asymmetric CMKs. Detailed below. + // +kubebuilder:validation:Optional + ServerSideEncryptionConfiguration *ServerSideEncryptionConfigurationParameters `json:"serverSideEncryptionConfiguration,omitempty" tf:"server_side_encryption_configuration,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The user context policy. Valid values are ATTRIBUTE_FILTER or USER_TOKEN. For more information, refer to UserContextPolicy. Defaults to ATTRIBUTE_FILTER. + // +kubebuilder:validation:Optional + UserContextPolicy *string `json:"userContextPolicy,omitempty" tf:"user_context_policy,omitempty"` + + // A block that enables fetching access levels of groups and users from an AWS Single Sign-On identity source. To configure this, see UserGroupResolutionConfiguration. Detailed below. + // +kubebuilder:validation:Optional + UserGroupResolutionConfiguration *UserGroupResolutionConfigurationParameters `json:"userGroupResolutionConfiguration,omitempty" tf:"user_group_resolution_configuration,omitempty"` + + // A block that specifies the user token configuration. Detailed below. + // +kubebuilder:validation:Optional + UserTokenConfigurations *UserTokenConfigurationsParameters `json:"userTokenConfigurations,omitempty" tf:"user_token_configurations,omitempty"` +} + +type IndexStatisticsInitParameters struct { +} + +type IndexStatisticsObservation struct { + + // A block that specifies the number of question and answer topics in the index. Detailed below. + FaqStatistics []FaqStatisticsObservation `json:"faqStatistics,omitempty" tf:"faq_statistics,omitempty"` + + // A block that specifies the number of text documents indexed. Detailed below. + TextDocumentStatistics []TextDocumentStatisticsObservation `json:"textDocumentStatistics,omitempty" tf:"text_document_statistics,omitempty"` +} + +type IndexStatisticsParameters struct { +} + +type JSONTokenTypeConfigurationInitParameters struct { + + // The group attribute field. Minimum length of 1. Maximum length of 2048. + GroupAttributeField *string `json:"groupAttributeField,omitempty" tf:"group_attribute_field,omitempty"` + + // The user name attribute field. Minimum length of 1. Maximum length of 2048. + UserNameAttributeField *string `json:"userNameAttributeField,omitempty" tf:"user_name_attribute_field,omitempty"` +} + +type JSONTokenTypeConfigurationObservation struct { + + // The group attribute field. Minimum length of 1. Maximum length of 2048. + GroupAttributeField *string `json:"groupAttributeField,omitempty" tf:"group_attribute_field,omitempty"` + + // The user name attribute field. Minimum length of 1. Maximum length of 2048. + UserNameAttributeField *string `json:"userNameAttributeField,omitempty" tf:"user_name_attribute_field,omitempty"` +} + +type JSONTokenTypeConfigurationParameters struct { + + // The group attribute field. Minimum length of 1. Maximum length of 2048. + // +kubebuilder:validation:Optional + GroupAttributeField *string `json:"groupAttributeField" tf:"group_attribute_field,omitempty"` + + // The user name attribute field. Minimum length of 1. Maximum length of 2048. + // +kubebuilder:validation:Optional + UserNameAttributeField *string `json:"userNameAttributeField" tf:"user_name_attribute_field,omitempty"` +} + +type JwtTokenTypeConfigurationInitParameters struct { + + // The regular expression that identifies the claim. Minimum length of 1. Maximum length of 100. + ClaimRegex *string `json:"claimRegex,omitempty" tf:"claim_regex,omitempty"` + + // The group attribute field. Minimum length of 1. Maximum length of 2048. + GroupAttributeField *string `json:"groupAttributeField,omitempty" tf:"group_attribute_field,omitempty"` + + // The issuer of the token. Minimum length of 1. Maximum length of 65. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // The location of the key. Valid values are URL or SECRET_MANAGER + KeyLocation *string `json:"keyLocation,omitempty" tf:"key_location,omitempty"` + + // The Amazon Resource Name (ARN) of the secret. + SecretsManagerArn *string `json:"secretsManagerArn,omitempty" tf:"secrets_manager_arn,omitempty"` + + // The signing key URL. Valid pattern is ^(https?|ftp|file):\/\/([^\s]*) + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // The user name attribute field. Minimum length of 1. Maximum length of 2048. + UserNameAttributeField *string `json:"userNameAttributeField,omitempty" tf:"user_name_attribute_field,omitempty"` +} + +type JwtTokenTypeConfigurationObservation struct { + + // The regular expression that identifies the claim. Minimum length of 1. Maximum length of 100. + ClaimRegex *string `json:"claimRegex,omitempty" tf:"claim_regex,omitempty"` + + // The group attribute field. Minimum length of 1. Maximum length of 2048. + GroupAttributeField *string `json:"groupAttributeField,omitempty" tf:"group_attribute_field,omitempty"` + + // The issuer of the token. Minimum length of 1. Maximum length of 65. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // The location of the key. Valid values are URL or SECRET_MANAGER + KeyLocation *string `json:"keyLocation,omitempty" tf:"key_location,omitempty"` + + // The Amazon Resource Name (ARN) of the secret. + SecretsManagerArn *string `json:"secretsManagerArn,omitempty" tf:"secrets_manager_arn,omitempty"` + + // The signing key URL. Valid pattern is ^(https?|ftp|file):\/\/([^\s]*) + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // The user name attribute field. Minimum length of 1. Maximum length of 2048. + UserNameAttributeField *string `json:"userNameAttributeField,omitempty" tf:"user_name_attribute_field,omitempty"` +} + +type JwtTokenTypeConfigurationParameters struct { + + // The regular expression that identifies the claim. Minimum length of 1. Maximum length of 100. + // +kubebuilder:validation:Optional + ClaimRegex *string `json:"claimRegex,omitempty" tf:"claim_regex,omitempty"` + + // The group attribute field. Minimum length of 1. Maximum length of 2048. + // +kubebuilder:validation:Optional + GroupAttributeField *string `json:"groupAttributeField,omitempty" tf:"group_attribute_field,omitempty"` + + // The issuer of the token. Minimum length of 1. Maximum length of 65. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // The location of the key. Valid values are URL or SECRET_MANAGER + // +kubebuilder:validation:Optional + KeyLocation *string `json:"keyLocation" tf:"key_location,omitempty"` + + // The Amazon Resource Name (ARN) of the secret. + // +kubebuilder:validation:Optional + SecretsManagerArn *string `json:"secretsManagerArn,omitempty" tf:"secrets_manager_arn,omitempty"` + + // The signing key URL. Valid pattern is ^(https?|ftp|file):\/\/([^\s]*) + // +kubebuilder:validation:Optional + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // The user name attribute field. Minimum length of 1. Maximum length of 2048. + // +kubebuilder:validation:Optional + UserNameAttributeField *string `json:"userNameAttributeField,omitempty" tf:"user_name_attribute_field,omitempty"` +} + +type RelevanceInitParameters struct { + + // Specifies the time period that the boost applies to. For more information, refer to Duration. + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // Indicates that this field determines how "fresh" a document is. For more information, refer to Freshness. + Freshness *bool `json:"freshness,omitempty" tf:"freshness,omitempty"` + + // The relative importance of the field in the search. Larger numbers provide more of a boost than smaller numbers. Minimum value of 1. Maximum value of 10. + Importance *float64 `json:"importance,omitempty" tf:"importance,omitempty"` + + // Determines how values should be interpreted. For more information, refer to RankOrder. + RankOrder *string `json:"rankOrder,omitempty" tf:"rank_order,omitempty"` + + // A list of values that should be given a different boost when they appear in the result list. For more information, refer to ValueImportanceMap. + // +mapType=granular + ValuesImportanceMap map[string]*float64 `json:"valuesImportanceMap,omitempty" tf:"values_importance_map,omitempty"` +} + +type RelevanceObservation struct { + + // Specifies the time period that the boost applies to. For more information, refer to Duration. + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // Indicates that this field determines how "fresh" a document is. For more information, refer to Freshness. + Freshness *bool `json:"freshness,omitempty" tf:"freshness,omitempty"` + + // The relative importance of the field in the search. Larger numbers provide more of a boost than smaller numbers. Minimum value of 1. Maximum value of 10. + Importance *float64 `json:"importance,omitempty" tf:"importance,omitempty"` + + // Determines how values should be interpreted. For more information, refer to RankOrder. + RankOrder *string `json:"rankOrder,omitempty" tf:"rank_order,omitempty"` + + // A list of values that should be given a different boost when they appear in the result list. For more information, refer to ValueImportanceMap. + // +mapType=granular + ValuesImportanceMap map[string]*float64 `json:"valuesImportanceMap,omitempty" tf:"values_importance_map,omitempty"` +} + +type RelevanceParameters struct { + + // Specifies the time period that the boost applies to. For more information, refer to Duration. + // +kubebuilder:validation:Optional + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // Indicates that this field determines how "fresh" a document is. For more information, refer to Freshness. + // +kubebuilder:validation:Optional + Freshness *bool `json:"freshness,omitempty" tf:"freshness,omitempty"` + + // The relative importance of the field in the search. Larger numbers provide more of a boost than smaller numbers. Minimum value of 1. Maximum value of 10. + // +kubebuilder:validation:Optional + Importance *float64 `json:"importance,omitempty" tf:"importance,omitempty"` + + // Determines how values should be interpreted. For more information, refer to RankOrder. + // +kubebuilder:validation:Optional + RankOrder *string `json:"rankOrder,omitempty" tf:"rank_order,omitempty"` + + // A list of values that should be given a different boost when they appear in the result list. For more information, refer to ValueImportanceMap. + // +kubebuilder:validation:Optional + // +mapType=granular + ValuesImportanceMap map[string]*float64 `json:"valuesImportanceMap,omitempty" tf:"values_importance_map,omitempty"` +} + +type SearchInitParameters struct { + + // Determines whether the field is returned in the query response. The default is true. + Displayable *bool `json:"displayable,omitempty" tf:"displayable,omitempty"` + + // Indicates that the field can be used to create search facets, a count of results for each value in the field. The default is false. + Facetable *bool `json:"facetable,omitempty" tf:"facetable,omitempty"` + + // Determines whether the field is used in the search. If the Searchable field is true, you can use relevance tuning to manually tune how Amazon Kendra weights the field in the search. The default is true for string fields and false for number and date fields. + Searchable *bool `json:"searchable,omitempty" tf:"searchable,omitempty"` + + // Determines whether the field can be used to sort the results of a query. If you specify sorting on a field that does not have Sortable set to true, Amazon Kendra returns an exception. The default is false. + Sortable *bool `json:"sortable,omitempty" tf:"sortable,omitempty"` +} + +type SearchObservation struct { + + // Determines whether the field is returned in the query response. The default is true. + Displayable *bool `json:"displayable,omitempty" tf:"displayable,omitempty"` + + // Indicates that the field can be used to create search facets, a count of results for each value in the field. The default is false. + Facetable *bool `json:"facetable,omitempty" tf:"facetable,omitempty"` + + // Determines whether the field is used in the search. If the Searchable field is true, you can use relevance tuning to manually tune how Amazon Kendra weights the field in the search. The default is true for string fields and false for number and date fields. + Searchable *bool `json:"searchable,omitempty" tf:"searchable,omitempty"` + + // Determines whether the field can be used to sort the results of a query. If you specify sorting on a field that does not have Sortable set to true, Amazon Kendra returns an exception. The default is false. + Sortable *bool `json:"sortable,omitempty" tf:"sortable,omitempty"` +} + +type SearchParameters struct { + + // Determines whether the field is returned in the query response. The default is true. + // +kubebuilder:validation:Optional + Displayable *bool `json:"displayable,omitempty" tf:"displayable,omitempty"` + + // Indicates that the field can be used to create search facets, a count of results for each value in the field. The default is false. + // +kubebuilder:validation:Optional + Facetable *bool `json:"facetable,omitempty" tf:"facetable,omitempty"` + + // Determines whether the field is used in the search. If the Searchable field is true, you can use relevance tuning to manually tune how Amazon Kendra weights the field in the search. The default is true for string fields and false for number and date fields. + // +kubebuilder:validation:Optional + Searchable *bool `json:"searchable,omitempty" tf:"searchable,omitempty"` + + // Determines whether the field can be used to sort the results of a query. If you specify sorting on a field that does not have Sortable set to true, Amazon Kendra returns an exception. The default is false. + // +kubebuilder:validation:Optional + Sortable *bool `json:"sortable,omitempty" tf:"sortable,omitempty"` +} + +type ServerSideEncryptionConfigurationInitParameters struct { + + // The identifier of the AWS KMScustomer master key (CMK). Amazon Kendra doesn't support asymmetric CMKs. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type ServerSideEncryptionConfigurationObservation struct { + + // The identifier of the AWS KMScustomer master key (CMK). Amazon Kendra doesn't support asymmetric CMKs. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type ServerSideEncryptionConfigurationParameters struct { + + // The identifier of the AWS KMScustomer master key (CMK). Amazon Kendra doesn't support asymmetric CMKs. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type TextDocumentStatisticsInitParameters struct { +} + +type TextDocumentStatisticsObservation struct { + + // The total size, in bytes, of the indexed documents. + IndexedTextBytes *float64 `json:"indexedTextBytes,omitempty" tf:"indexed_text_bytes,omitempty"` + + // The number of text documents indexed. + IndexedTextDocumentsCount *float64 `json:"indexedTextDocumentsCount,omitempty" tf:"indexed_text_documents_count,omitempty"` +} + +type TextDocumentStatisticsParameters struct { +} + +type UserGroupResolutionConfigurationInitParameters struct { + + // The identity store provider (mode) you want to use to fetch access levels of groups and users. AWS Single Sign-On is currently the only available mode. Your users and groups must exist in an AWS SSO identity source in order to use this mode. Valid Values are AWS_SSO or NONE. + UserGroupResolutionMode *string `json:"userGroupResolutionMode,omitempty" tf:"user_group_resolution_mode,omitempty"` +} + +type UserGroupResolutionConfigurationObservation struct { + + // The identity store provider (mode) you want to use to fetch access levels of groups and users. AWS Single Sign-On is currently the only available mode. Your users and groups must exist in an AWS SSO identity source in order to use this mode. Valid Values are AWS_SSO or NONE. + UserGroupResolutionMode *string `json:"userGroupResolutionMode,omitempty" tf:"user_group_resolution_mode,omitempty"` +} + +type UserGroupResolutionConfigurationParameters struct { + + // The identity store provider (mode) you want to use to fetch access levels of groups and users. AWS Single Sign-On is currently the only available mode. Your users and groups must exist in an AWS SSO identity source in order to use this mode. Valid Values are AWS_SSO or NONE. + // +kubebuilder:validation:Optional + UserGroupResolutionMode *string `json:"userGroupResolutionMode" tf:"user_group_resolution_mode,omitempty"` +} + +type UserTokenConfigurationsInitParameters struct { + + // A block that specifies the information about the JSON token type configuration. Detailed below. + JSONTokenTypeConfiguration *JSONTokenTypeConfigurationInitParameters `json:"jsonTokenTypeConfiguration,omitempty" tf:"json_token_type_configuration,omitempty"` + + // A block that specifies the information about the JWT token type configuration. Detailed below. + JwtTokenTypeConfiguration *JwtTokenTypeConfigurationInitParameters `json:"jwtTokenTypeConfiguration,omitempty" tf:"jwt_token_type_configuration,omitempty"` +} + +type UserTokenConfigurationsObservation struct { + + // A block that specifies the information about the JSON token type configuration. Detailed below. + JSONTokenTypeConfiguration *JSONTokenTypeConfigurationObservation `json:"jsonTokenTypeConfiguration,omitempty" tf:"json_token_type_configuration,omitempty"` + + // A block that specifies the information about the JWT token type configuration. Detailed below. + JwtTokenTypeConfiguration *JwtTokenTypeConfigurationObservation `json:"jwtTokenTypeConfiguration,omitempty" tf:"jwt_token_type_configuration,omitempty"` +} + +type UserTokenConfigurationsParameters struct { + + // A block that specifies the information about the JSON token type configuration. Detailed below. + // +kubebuilder:validation:Optional + JSONTokenTypeConfiguration *JSONTokenTypeConfigurationParameters `json:"jsonTokenTypeConfiguration,omitempty" tf:"json_token_type_configuration,omitempty"` + + // A block that specifies the information about the JWT token type configuration. Detailed below. + // +kubebuilder:validation:Optional + JwtTokenTypeConfiguration *JwtTokenTypeConfigurationParameters `json:"jwtTokenTypeConfiguration,omitempty" tf:"jwt_token_type_configuration,omitempty"` +} + +// IndexSpec defines the desired state of Index +type IndexSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IndexParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IndexInitParameters `json:"initProvider,omitempty"` +} + +// IndexStatus defines the observed state of Index. +type IndexStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IndexObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Index is the Schema for the Indexs API. Provides an Amazon Kendra Index resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Index struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec IndexSpec `json:"spec"` + Status IndexStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IndexList contains a list of Indexs +type IndexList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Index `json:"items"` +} + +// Repository type metadata. +var ( + Index_Kind = "Index" + Index_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Index_Kind}.String() + Index_KindAPIVersion = Index_Kind + "." + CRDGroupVersion.String() + Index_GroupVersionKind = CRDGroupVersion.WithKind(Index_Kind) +) + +func init() { + SchemeBuilder.Register(&Index{}, &IndexList{}) +} diff --git a/apis/kendra/v1beta2/zz_querysuggestionsblocklist_terraformed.go b/apis/kendra/v1beta2/zz_querysuggestionsblocklist_terraformed.go new file mode 100755 index 0000000000..b0f0e4e223 --- /dev/null +++ b/apis/kendra/v1beta2/zz_querysuggestionsblocklist_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this QuerySuggestionsBlockList +func (mg *QuerySuggestionsBlockList) GetTerraformResourceType() string { + return "aws_kendra_query_suggestions_block_list" +} + +// GetConnectionDetailsMapping for this QuerySuggestionsBlockList +func (tr *QuerySuggestionsBlockList) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this QuerySuggestionsBlockList +func (tr *QuerySuggestionsBlockList) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this QuerySuggestionsBlockList +func (tr *QuerySuggestionsBlockList) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this QuerySuggestionsBlockList +func (tr *QuerySuggestionsBlockList) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this QuerySuggestionsBlockList +func (tr *QuerySuggestionsBlockList) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this QuerySuggestionsBlockList +func (tr *QuerySuggestionsBlockList) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this QuerySuggestionsBlockList +func (tr *QuerySuggestionsBlockList) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this QuerySuggestionsBlockList +func (tr *QuerySuggestionsBlockList) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this QuerySuggestionsBlockList using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *QuerySuggestionsBlockList) LateInitialize(attrs []byte) (bool, error) { + params := &QuerySuggestionsBlockListParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *QuerySuggestionsBlockList) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kendra/v1beta2/zz_querysuggestionsblocklist_types.go b/apis/kendra/v1beta2/zz_querysuggestionsblocklist_types.go new file mode 100755 index 0000000000..cabcfc720c --- /dev/null +++ b/apis/kendra/v1beta2/zz_querysuggestionsblocklist_types.go @@ -0,0 +1,256 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type QuerySuggestionsBlockListInitParameters struct { + + // Description for a block list. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Identifier of the index for a block list. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kendra/v1beta2.Index + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + IndexID *string `json:"indexId,omitempty" tf:"index_id,omitempty"` + + // Reference to a Index in kendra to populate indexId. + // +kubebuilder:validation:Optional + IndexIDRef *v1.Reference `json:"indexIdRef,omitempty" tf:"-"` + + // Selector for a Index in kendra to populate indexId. + // +kubebuilder:validation:Optional + IndexIDSelector *v1.Selector `json:"indexIdSelector,omitempty" tf:"-"` + + // Name for the block list. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // IAM (Identity and Access Management) role used to access the block list text file in S3. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // S3 path where your block list text file is located. See details below. + SourceS3Path *SourceS3PathInitParameters `json:"sourceS3Path,omitempty" tf:"source_s3_path,omitempty"` + + // Key-value map of resource tags. If configured with a provider default_tags configuration block, tags with matching keys will overwrite those defined at the provider-level. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type QuerySuggestionsBlockListObservation struct { + + // ARN of the block list. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Description for a block list. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Identifier of the index for a block list. + IndexID *string `json:"indexId,omitempty" tf:"index_id,omitempty"` + + // Name for the block list. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Unique identifier of the block list. + QuerySuggestionsBlockListID *string `json:"querySuggestionsBlockListId,omitempty" tf:"query_suggestions_block_list_id,omitempty"` + + // IAM (Identity and Access Management) role used to access the block list text file in S3. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // S3 path where your block list text file is located. See details below. + SourceS3Path *SourceS3PathObservation `json:"sourceS3Path,omitempty" tf:"source_s3_path,omitempty"` + + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. If configured with a provider default_tags configuration block, tags with matching keys will overwrite those defined at the provider-level. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider's default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type QuerySuggestionsBlockListParameters struct { + + // Description for a block list. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Identifier of the index for a block list. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kendra/v1beta2.Index + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + IndexID *string `json:"indexId,omitempty" tf:"index_id,omitempty"` + + // Reference to a Index in kendra to populate indexId. + // +kubebuilder:validation:Optional + IndexIDRef *v1.Reference `json:"indexIdRef,omitempty" tf:"-"` + + // Selector for a Index in kendra to populate indexId. + // +kubebuilder:validation:Optional + IndexIDSelector *v1.Selector `json:"indexIdSelector,omitempty" tf:"-"` + + // Name for the block list. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // IAM (Identity and Access Management) role used to access the block list text file in S3. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // S3 path where your block list text file is located. See details below. + // +kubebuilder:validation:Optional + SourceS3Path *SourceS3PathParameters `json:"sourceS3Path,omitempty" tf:"source_s3_path,omitempty"` + + // Key-value map of resource tags. If configured with a provider default_tags configuration block, tags with matching keys will overwrite those defined at the provider-level. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SourceS3PathInitParameters struct { + + // Name of the S3 bucket that contains the file. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Name of the file. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type SourceS3PathObservation struct { + + // Name of the S3 bucket that contains the file. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Name of the file. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type SourceS3PathParameters struct { + + // Name of the S3 bucket that contains the file. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Name of the file. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` +} + +// QuerySuggestionsBlockListSpec defines the desired state of QuerySuggestionsBlockList +type QuerySuggestionsBlockListSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider QuerySuggestionsBlockListParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider QuerySuggestionsBlockListInitParameters `json:"initProvider,omitempty"` +} + +// QuerySuggestionsBlockListStatus defines the observed state of QuerySuggestionsBlockList. +type QuerySuggestionsBlockListStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider QuerySuggestionsBlockListObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// QuerySuggestionsBlockList is the Schema for the QuerySuggestionsBlockLists API. provider resource for managing an aws kendra block list used for query suggestions for an index +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type QuerySuggestionsBlockList struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sourceS3Path) || (has(self.initProvider) && has(self.initProvider.sourceS3Path))",message="spec.forProvider.sourceS3Path is a required parameter" + Spec QuerySuggestionsBlockListSpec `json:"spec"` + Status QuerySuggestionsBlockListStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// QuerySuggestionsBlockListList contains a list of QuerySuggestionsBlockLists +type QuerySuggestionsBlockListList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []QuerySuggestionsBlockList `json:"items"` +} + +// Repository type metadata. +var ( + QuerySuggestionsBlockList_Kind = "QuerySuggestionsBlockList" + QuerySuggestionsBlockList_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: QuerySuggestionsBlockList_Kind}.String() + QuerySuggestionsBlockList_KindAPIVersion = QuerySuggestionsBlockList_Kind + "." + CRDGroupVersion.String() + QuerySuggestionsBlockList_GroupVersionKind = CRDGroupVersion.WithKind(QuerySuggestionsBlockList_Kind) +) + +func init() { + SchemeBuilder.Register(&QuerySuggestionsBlockList{}, &QuerySuggestionsBlockListList{}) +} diff --git a/apis/kendra/v1beta2/zz_thesaurus_terraformed.go b/apis/kendra/v1beta2/zz_thesaurus_terraformed.go new file mode 100755 index 0000000000..e26bd45fab --- /dev/null +++ b/apis/kendra/v1beta2/zz_thesaurus_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Thesaurus +func (mg *Thesaurus) GetTerraformResourceType() string { + return "aws_kendra_thesaurus" +} + +// GetConnectionDetailsMapping for this Thesaurus +func (tr *Thesaurus) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Thesaurus +func (tr *Thesaurus) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Thesaurus +func (tr *Thesaurus) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Thesaurus +func (tr *Thesaurus) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Thesaurus +func (tr *Thesaurus) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Thesaurus +func (tr *Thesaurus) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Thesaurus +func (tr *Thesaurus) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Thesaurus +func (tr *Thesaurus) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Thesaurus using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Thesaurus) LateInitialize(attrs []byte) (bool, error) { + params := &ThesaurusParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Thesaurus) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kendra/v1beta2/zz_thesaurus_types.go b/apis/kendra/v1beta2/zz_thesaurus_types.go new file mode 100755 index 0000000000..8eae9ae2b1 --- /dev/null +++ b/apis/kendra/v1beta2/zz_thesaurus_types.go @@ -0,0 +1,278 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ThesaurusInitParameters struct { + + // The description for a thesaurus. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The identifier of the index for a thesaurus. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kendra/v1beta2.Index + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + IndexID *string `json:"indexId,omitempty" tf:"index_id,omitempty"` + + // Reference to a Index in kendra to populate indexId. + // +kubebuilder:validation:Optional + IndexIDRef *v1.Reference `json:"indexIdRef,omitempty" tf:"-"` + + // Selector for a Index in kendra to populate indexId. + // +kubebuilder:validation:Optional + IndexIDSelector *v1.Selector `json:"indexIdSelector,omitempty" tf:"-"` + + // The name for the thesaurus. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The IAM (Identity and Access Management) role used to access the thesaurus file in S3. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The S3 path where your thesaurus file sits in S3. Detailed below. + SourceS3Path *ThesaurusSourceS3PathInitParameters `json:"sourceS3Path,omitempty" tf:"source_s3_path,omitempty"` + + // Key-value map of resource tags. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ThesaurusObservation struct { + + // ARN of the thesaurus. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The description for a thesaurus. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The unique identifiers of the thesaurus and index separated by a slash (/). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The identifier of the index for a thesaurus. + IndexID *string `json:"indexId,omitempty" tf:"index_id,omitempty"` + + // The name for the thesaurus. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The IAM (Identity and Access Management) role used to access the thesaurus file in S3. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The S3 path where your thesaurus file sits in S3. Detailed below. + SourceS3Path *ThesaurusSourceS3PathObservation `json:"sourceS3Path,omitempty" tf:"source_s3_path,omitempty"` + + // The current status of the thesaurus. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The unique identifiers of the thesaurus and index separated by a slash (/). + ThesaurusID *string `json:"thesaurusId,omitempty" tf:"thesaurus_id,omitempty"` +} + +type ThesaurusParameters struct { + + // The description for a thesaurus. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The identifier of the index for a thesaurus. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kendra/v1beta2.Index + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + IndexID *string `json:"indexId,omitempty" tf:"index_id,omitempty"` + + // Reference to a Index in kendra to populate indexId. + // +kubebuilder:validation:Optional + IndexIDRef *v1.Reference `json:"indexIdRef,omitempty" tf:"-"` + + // Selector for a Index in kendra to populate indexId. + // +kubebuilder:validation:Optional + IndexIDSelector *v1.Selector `json:"indexIdSelector,omitempty" tf:"-"` + + // The name for the thesaurus. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The IAM (Identity and Access Management) role used to access the thesaurus file in S3. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The S3 path where your thesaurus file sits in S3. Detailed below. + // +kubebuilder:validation:Optional + SourceS3Path *ThesaurusSourceS3PathParameters `json:"sourceS3Path,omitempty" tf:"source_s3_path,omitempty"` + + // Key-value map of resource tags. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ThesaurusSourceS3PathInitParameters struct { + + // The name of the S3 bucket that contains the file. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // The name of the file. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Object + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("key",false) + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Reference to a Object in s3 to populate key. + // +kubebuilder:validation:Optional + KeyRef *v1.Reference `json:"keyRef,omitempty" tf:"-"` + + // Selector for a Object in s3 to populate key. + // +kubebuilder:validation:Optional + KeySelector *v1.Selector `json:"keySelector,omitempty" tf:"-"` +} + +type ThesaurusSourceS3PathObservation struct { + + // The name of the S3 bucket that contains the file. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The name of the file. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type ThesaurusSourceS3PathParameters struct { + + // The name of the S3 bucket that contains the file. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // The name of the file. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Object + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("key",false) + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Reference to a Object in s3 to populate key. + // +kubebuilder:validation:Optional + KeyRef *v1.Reference `json:"keyRef,omitempty" tf:"-"` + + // Selector for a Object in s3 to populate key. + // +kubebuilder:validation:Optional + KeySelector *v1.Selector `json:"keySelector,omitempty" tf:"-"` +} + +// ThesaurusSpec defines the desired state of Thesaurus +type ThesaurusSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ThesaurusParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ThesaurusInitParameters `json:"initProvider,omitempty"` +} + +// ThesaurusStatus defines the observed state of Thesaurus. +type ThesaurusStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ThesaurusObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Thesaurus is the Schema for the Thesauruss API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws},path=thesaurus +type Thesaurus struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sourceS3Path) || (has(self.initProvider) && has(self.initProvider.sourceS3Path))",message="spec.forProvider.sourceS3Path is a required parameter" + Spec ThesaurusSpec `json:"spec"` + Status ThesaurusStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ThesaurusList contains a list of Thesauruss +type ThesaurusList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Thesaurus `json:"items"` +} + +// Repository type metadata. +var ( + Thesaurus_Kind = "Thesaurus" + Thesaurus_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Thesaurus_Kind}.String() + Thesaurus_KindAPIVersion = Thesaurus_Kind + "." + CRDGroupVersion.String() + Thesaurus_GroupVersionKind = CRDGroupVersion.WithKind(Thesaurus_Kind) +) + +func init() { + SchemeBuilder.Register(&Thesaurus{}, &ThesaurusList{}) +} diff --git a/apis/keyspaces/v1beta1/zz_generated.conversion_hubs.go b/apis/keyspaces/v1beta1/zz_generated.conversion_hubs.go index bd7f32f99b..3db8b22480 100755 --- a/apis/keyspaces/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/keyspaces/v1beta1/zz_generated.conversion_hubs.go @@ -8,6 +8,3 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *Keyspace) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Table) Hub() {} diff --git a/apis/keyspaces/v1beta1/zz_generated.conversion_spokes.go b/apis/keyspaces/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..5062339cc1 --- /dev/null +++ b/apis/keyspaces/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Table to the hub type. +func (tr *Table) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Table type. +func (tr *Table) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/keyspaces/v1beta2/zz_generated.conversion_hubs.go b/apis/keyspaces/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..0ed428e742 --- /dev/null +++ b/apis/keyspaces/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Table) Hub() {} diff --git a/apis/keyspaces/v1beta2/zz_generated.deepcopy.go b/apis/keyspaces/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..953899daed --- /dev/null +++ b/apis/keyspaces/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1206 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacitySpecificationInitParameters) DeepCopyInto(out *CapacitySpecificationInitParameters) { + *out = *in + if in.ReadCapacityUnits != nil { + in, out := &in.ReadCapacityUnits, &out.ReadCapacityUnits + *out = new(float64) + **out = **in + } + if in.ThroughputMode != nil { + in, out := &in.ThroughputMode, &out.ThroughputMode + *out = new(string) + **out = **in + } + if in.WriteCapacityUnits != nil { + in, out := &in.WriteCapacityUnits, &out.WriteCapacityUnits + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacitySpecificationInitParameters. +func (in *CapacitySpecificationInitParameters) DeepCopy() *CapacitySpecificationInitParameters { + if in == nil { + return nil + } + out := new(CapacitySpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacitySpecificationObservation) DeepCopyInto(out *CapacitySpecificationObservation) { + *out = *in + if in.ReadCapacityUnits != nil { + in, out := &in.ReadCapacityUnits, &out.ReadCapacityUnits + *out = new(float64) + **out = **in + } + if in.ThroughputMode != nil { + in, out := &in.ThroughputMode, &out.ThroughputMode + *out = new(string) + **out = **in + } + if in.WriteCapacityUnits != nil { + in, out := &in.WriteCapacityUnits, &out.WriteCapacityUnits + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacitySpecificationObservation. +func (in *CapacitySpecificationObservation) DeepCopy() *CapacitySpecificationObservation { + if in == nil { + return nil + } + out := new(CapacitySpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacitySpecificationParameters) DeepCopyInto(out *CapacitySpecificationParameters) { + *out = *in + if in.ReadCapacityUnits != nil { + in, out := &in.ReadCapacityUnits, &out.ReadCapacityUnits + *out = new(float64) + **out = **in + } + if in.ThroughputMode != nil { + in, out := &in.ThroughputMode, &out.ThroughputMode + *out = new(string) + **out = **in + } + if in.WriteCapacityUnits != nil { + in, out := &in.WriteCapacityUnits, &out.WriteCapacityUnits + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacitySpecificationParameters. +func (in *CapacitySpecificationParameters) DeepCopy() *CapacitySpecificationParameters { + if in == nil { + return nil + } + out := new(CapacitySpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientSideTimestampsInitParameters) DeepCopyInto(out *ClientSideTimestampsInitParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientSideTimestampsInitParameters. +func (in *ClientSideTimestampsInitParameters) DeepCopy() *ClientSideTimestampsInitParameters { + if in == nil { + return nil + } + out := new(ClientSideTimestampsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientSideTimestampsObservation) DeepCopyInto(out *ClientSideTimestampsObservation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientSideTimestampsObservation. +func (in *ClientSideTimestampsObservation) DeepCopy() *ClientSideTimestampsObservation { + if in == nil { + return nil + } + out := new(ClientSideTimestampsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientSideTimestampsParameters) DeepCopyInto(out *ClientSideTimestampsParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientSideTimestampsParameters. +func (in *ClientSideTimestampsParameters) DeepCopy() *ClientSideTimestampsParameters { + if in == nil { + return nil + } + out := new(ClientSideTimestampsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusteringKeyInitParameters) DeepCopyInto(out *ClusteringKeyInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrderBy != nil { + in, out := &in.OrderBy, &out.OrderBy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusteringKeyInitParameters. +func (in *ClusteringKeyInitParameters) DeepCopy() *ClusteringKeyInitParameters { + if in == nil { + return nil + } + out := new(ClusteringKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusteringKeyObservation) DeepCopyInto(out *ClusteringKeyObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrderBy != nil { + in, out := &in.OrderBy, &out.OrderBy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusteringKeyObservation. +func (in *ClusteringKeyObservation) DeepCopy() *ClusteringKeyObservation { + if in == nil { + return nil + } + out := new(ClusteringKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusteringKeyParameters) DeepCopyInto(out *ClusteringKeyParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrderBy != nil { + in, out := &in.OrderBy, &out.OrderBy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusteringKeyParameters. +func (in *ClusteringKeyParameters) DeepCopy() *ClusteringKeyParameters { + if in == nil { + return nil + } + out := new(ClusteringKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnInitParameters) DeepCopyInto(out *ColumnInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnInitParameters. +func (in *ColumnInitParameters) DeepCopy() *ColumnInitParameters { + if in == nil { + return nil + } + out := new(ColumnInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnObservation) DeepCopyInto(out *ColumnObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnObservation. +func (in *ColumnObservation) DeepCopy() *ColumnObservation { + if in == nil { + return nil + } + out := new(ColumnObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnParameters) DeepCopyInto(out *ColumnParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnParameters. +func (in *ColumnParameters) DeepCopy() *ColumnParameters { + if in == nil { + return nil + } + out := new(ColumnParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommentInitParameters) DeepCopyInto(out *CommentInitParameters) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommentInitParameters. +func (in *CommentInitParameters) DeepCopy() *CommentInitParameters { + if in == nil { + return nil + } + out := new(CommentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommentObservation) DeepCopyInto(out *CommentObservation) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommentObservation. +func (in *CommentObservation) DeepCopy() *CommentObservation { + if in == nil { + return nil + } + out := new(CommentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommentParameters) DeepCopyInto(out *CommentParameters) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommentParameters. +func (in *CommentParameters) DeepCopy() *CommentParameters { + if in == nil { + return nil + } + out := new(CommentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionSpecificationInitParameters) DeepCopyInto(out *EncryptionSpecificationInitParameters) { + *out = *in + if in.KMSKeyIdentifier != nil { + in, out := &in.KMSKeyIdentifier, &out.KMSKeyIdentifier + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionSpecificationInitParameters. +func (in *EncryptionSpecificationInitParameters) DeepCopy() *EncryptionSpecificationInitParameters { + if in == nil { + return nil + } + out := new(EncryptionSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionSpecificationObservation) DeepCopyInto(out *EncryptionSpecificationObservation) { + *out = *in + if in.KMSKeyIdentifier != nil { + in, out := &in.KMSKeyIdentifier, &out.KMSKeyIdentifier + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionSpecificationObservation. +func (in *EncryptionSpecificationObservation) DeepCopy() *EncryptionSpecificationObservation { + if in == nil { + return nil + } + out := new(EncryptionSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionSpecificationParameters) DeepCopyInto(out *EncryptionSpecificationParameters) { + *out = *in + if in.KMSKeyIdentifier != nil { + in, out := &in.KMSKeyIdentifier, &out.KMSKeyIdentifier + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionSpecificationParameters. +func (in *EncryptionSpecificationParameters) DeepCopy() *EncryptionSpecificationParameters { + if in == nil { + return nil + } + out := new(EncryptionSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionKeyInitParameters) DeepCopyInto(out *PartitionKeyInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionKeyInitParameters. +func (in *PartitionKeyInitParameters) DeepCopy() *PartitionKeyInitParameters { + if in == nil { + return nil + } + out := new(PartitionKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionKeyObservation) DeepCopyInto(out *PartitionKeyObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionKeyObservation. +func (in *PartitionKeyObservation) DeepCopy() *PartitionKeyObservation { + if in == nil { + return nil + } + out := new(PartitionKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionKeyParameters) DeepCopyInto(out *PartitionKeyParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionKeyParameters. +func (in *PartitionKeyParameters) DeepCopy() *PartitionKeyParameters { + if in == nil { + return nil + } + out := new(PartitionKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PointInTimeRecoveryInitParameters) DeepCopyInto(out *PointInTimeRecoveryInitParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PointInTimeRecoveryInitParameters. +func (in *PointInTimeRecoveryInitParameters) DeepCopy() *PointInTimeRecoveryInitParameters { + if in == nil { + return nil + } + out := new(PointInTimeRecoveryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PointInTimeRecoveryObservation) DeepCopyInto(out *PointInTimeRecoveryObservation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PointInTimeRecoveryObservation. +func (in *PointInTimeRecoveryObservation) DeepCopy() *PointInTimeRecoveryObservation { + if in == nil { + return nil + } + out := new(PointInTimeRecoveryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PointInTimeRecoveryParameters) DeepCopyInto(out *PointInTimeRecoveryParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PointInTimeRecoveryParameters. +func (in *PointInTimeRecoveryParameters) DeepCopy() *PointInTimeRecoveryParameters { + if in == nil { + return nil + } + out := new(PointInTimeRecoveryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaDefinitionInitParameters) DeepCopyInto(out *SchemaDefinitionInitParameters) { + *out = *in + if in.ClusteringKey != nil { + in, out := &in.ClusteringKey, &out.ClusteringKey + *out = make([]ClusteringKeyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Column != nil { + in, out := &in.Column, &out.Column + *out = make([]ColumnInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = make([]PartitionKeyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StaticColumn != nil { + in, out := &in.StaticColumn, &out.StaticColumn + *out = make([]StaticColumnInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaDefinitionInitParameters. +func (in *SchemaDefinitionInitParameters) DeepCopy() *SchemaDefinitionInitParameters { + if in == nil { + return nil + } + out := new(SchemaDefinitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaDefinitionObservation) DeepCopyInto(out *SchemaDefinitionObservation) { + *out = *in + if in.ClusteringKey != nil { + in, out := &in.ClusteringKey, &out.ClusteringKey + *out = make([]ClusteringKeyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Column != nil { + in, out := &in.Column, &out.Column + *out = make([]ColumnObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = make([]PartitionKeyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StaticColumn != nil { + in, out := &in.StaticColumn, &out.StaticColumn + *out = make([]StaticColumnObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaDefinitionObservation. +func (in *SchemaDefinitionObservation) DeepCopy() *SchemaDefinitionObservation { + if in == nil { + return nil + } + out := new(SchemaDefinitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaDefinitionParameters) DeepCopyInto(out *SchemaDefinitionParameters) { + *out = *in + if in.ClusteringKey != nil { + in, out := &in.ClusteringKey, &out.ClusteringKey + *out = make([]ClusteringKeyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Column != nil { + in, out := &in.Column, &out.Column + *out = make([]ColumnParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = make([]PartitionKeyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StaticColumn != nil { + in, out := &in.StaticColumn, &out.StaticColumn + *out = make([]StaticColumnParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaDefinitionParameters. +func (in *SchemaDefinitionParameters) DeepCopy() *SchemaDefinitionParameters { + if in == nil { + return nil + } + out := new(SchemaDefinitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticColumnInitParameters) DeepCopyInto(out *StaticColumnInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticColumnInitParameters. +func (in *StaticColumnInitParameters) DeepCopy() *StaticColumnInitParameters { + if in == nil { + return nil + } + out := new(StaticColumnInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticColumnObservation) DeepCopyInto(out *StaticColumnObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticColumnObservation. +func (in *StaticColumnObservation) DeepCopy() *StaticColumnObservation { + if in == nil { + return nil + } + out := new(StaticColumnObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticColumnParameters) DeepCopyInto(out *StaticColumnParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticColumnParameters. +func (in *StaticColumnParameters) DeepCopy() *StaticColumnParameters { + if in == nil { + return nil + } + out := new(StaticColumnParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TTLInitParameters) DeepCopyInto(out *TTLInitParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TTLInitParameters. +func (in *TTLInitParameters) DeepCopy() *TTLInitParameters { + if in == nil { + return nil + } + out := new(TTLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TTLObservation) DeepCopyInto(out *TTLObservation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TTLObservation. +func (in *TTLObservation) DeepCopy() *TTLObservation { + if in == nil { + return nil + } + out := new(TTLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TTLParameters) DeepCopyInto(out *TTLParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TTLParameters. +func (in *TTLParameters) DeepCopy() *TTLParameters { + if in == nil { + return nil + } + out := new(TTLParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Table) DeepCopyInto(out *Table) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. +func (in *Table) DeepCopy() *Table { + if in == nil { + return nil + } + out := new(Table) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Table) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableInitParameters) DeepCopyInto(out *TableInitParameters) { + *out = *in + if in.CapacitySpecification != nil { + in, out := &in.CapacitySpecification, &out.CapacitySpecification + *out = new(CapacitySpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientSideTimestamps != nil { + in, out := &in.ClientSideTimestamps, &out.ClientSideTimestamps + *out = new(ClientSideTimestampsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(CommentInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultTimeToLive != nil { + in, out := &in.DefaultTimeToLive, &out.DefaultTimeToLive + *out = new(float64) + **out = **in + } + if in.EncryptionSpecification != nil { + in, out := &in.EncryptionSpecification, &out.EncryptionSpecification + *out = new(EncryptionSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyspaceName != nil { + in, out := &in.KeyspaceName, &out.KeyspaceName + *out = new(string) + **out = **in + } + if in.KeyspaceNameRef != nil { + in, out := &in.KeyspaceNameRef, &out.KeyspaceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyspaceNameSelector != nil { + in, out := &in.KeyspaceNameSelector, &out.KeyspaceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PointInTimeRecovery != nil { + in, out := &in.PointInTimeRecovery, &out.PointInTimeRecovery + *out = new(PointInTimeRecoveryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SchemaDefinition != nil { + in, out := &in.SchemaDefinition, &out.SchemaDefinition + *out = new(SchemaDefinitionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(TTLInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableInitParameters. +func (in *TableInitParameters) DeepCopy() *TableInitParameters { + if in == nil { + return nil + } + out := new(TableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableList) DeepCopyInto(out *TableList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Table, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableList. +func (in *TableList) DeepCopy() *TableList { + if in == nil { + return nil + } + out := new(TableList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TableList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableObservation) DeepCopyInto(out *TableObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CapacitySpecification != nil { + in, out := &in.CapacitySpecification, &out.CapacitySpecification + *out = new(CapacitySpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.ClientSideTimestamps != nil { + in, out := &in.ClientSideTimestamps, &out.ClientSideTimestamps + *out = new(ClientSideTimestampsObservation) + (*in).DeepCopyInto(*out) + } + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(CommentObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultTimeToLive != nil { + in, out := &in.DefaultTimeToLive, &out.DefaultTimeToLive + *out = new(float64) + **out = **in + } + if in.EncryptionSpecification != nil { + in, out := &in.EncryptionSpecification, &out.EncryptionSpecification + *out = new(EncryptionSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KeyspaceName != nil { + in, out := &in.KeyspaceName, &out.KeyspaceName + *out = new(string) + **out = **in + } + if in.PointInTimeRecovery != nil { + in, out := &in.PointInTimeRecovery, &out.PointInTimeRecovery + *out = new(PointInTimeRecoveryObservation) + (*in).DeepCopyInto(*out) + } + if in.SchemaDefinition != nil { + in, out := &in.SchemaDefinition, &out.SchemaDefinition + *out = new(SchemaDefinitionObservation) + (*in).DeepCopyInto(*out) + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(TTLObservation) + (*in).DeepCopyInto(*out) + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableObservation. +func (in *TableObservation) DeepCopy() *TableObservation { + if in == nil { + return nil + } + out := new(TableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableParameters) DeepCopyInto(out *TableParameters) { + *out = *in + if in.CapacitySpecification != nil { + in, out := &in.CapacitySpecification, &out.CapacitySpecification + *out = new(CapacitySpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientSideTimestamps != nil { + in, out := &in.ClientSideTimestamps, &out.ClientSideTimestamps + *out = new(ClientSideTimestampsParameters) + (*in).DeepCopyInto(*out) + } + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(CommentParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultTimeToLive != nil { + in, out := &in.DefaultTimeToLive, &out.DefaultTimeToLive + *out = new(float64) + **out = **in + } + if in.EncryptionSpecification != nil { + in, out := &in.EncryptionSpecification, &out.EncryptionSpecification + *out = new(EncryptionSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyspaceName != nil { + in, out := &in.KeyspaceName, &out.KeyspaceName + *out = new(string) + **out = **in + } + if in.KeyspaceNameRef != nil { + in, out := &in.KeyspaceNameRef, &out.KeyspaceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyspaceNameSelector != nil { + in, out := &in.KeyspaceNameSelector, &out.KeyspaceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PointInTimeRecovery != nil { + in, out := &in.PointInTimeRecovery, &out.PointInTimeRecovery + *out = new(PointInTimeRecoveryParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SchemaDefinition != nil { + in, out := &in.SchemaDefinition, &out.SchemaDefinition + *out = new(SchemaDefinitionParameters) + (*in).DeepCopyInto(*out) + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(TTLParameters) + (*in).DeepCopyInto(*out) + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableParameters. +func (in *TableParameters) DeepCopy() *TableParameters { + if in == nil { + return nil + } + out := new(TableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableSpec) DeepCopyInto(out *TableSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableSpec. +func (in *TableSpec) DeepCopy() *TableSpec { + if in == nil { + return nil + } + out := new(TableSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableStatus) DeepCopyInto(out *TableStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableStatus. +func (in *TableStatus) DeepCopy() *TableStatus { + if in == nil { + return nil + } + out := new(TableStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/keyspaces/v1beta2/zz_generated.managed.go b/apis/keyspaces/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..25acdb1a58 --- /dev/null +++ b/apis/keyspaces/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Table. +func (mg *Table) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Table. +func (mg *Table) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Table. +func (mg *Table) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Table. +func (mg *Table) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Table. +func (mg *Table) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Table. +func (mg *Table) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Table. +func (mg *Table) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Table. +func (mg *Table) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Table. +func (mg *Table) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Table. +func (mg *Table) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Table. +func (mg *Table) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Table. +func (mg *Table) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/keyspaces/v1beta2/zz_generated.managedlist.go b/apis/keyspaces/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..bfa82abb5e --- /dev/null +++ b/apis/keyspaces/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this TableList. +func (l *TableList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/keyspaces/v1beta2/zz_generated.resolvers.go b/apis/keyspaces/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..c86c69f0a2 --- /dev/null +++ b/apis/keyspaces/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Table. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Table) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("keyspaces.aws.upbound.io", "v1beta1", "Keyspace", "KeyspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KeyspaceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KeyspaceNameRef, + Selector: mg.Spec.ForProvider.KeyspaceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KeyspaceName") + } + mg.Spec.ForProvider.KeyspaceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KeyspaceNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("keyspaces.aws.upbound.io", "v1beta1", "Keyspace", "KeyspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KeyspaceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KeyspaceNameRef, + Selector: mg.Spec.InitProvider.KeyspaceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KeyspaceName") + } + mg.Spec.InitProvider.KeyspaceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KeyspaceNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/keyspaces/v1beta2/zz_groupversion_info.go b/apis/keyspaces/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..54b150b290 --- /dev/null +++ b/apis/keyspaces/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=keyspaces.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "keyspaces.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/keyspaces/v1beta2/zz_table_terraformed.go b/apis/keyspaces/v1beta2/zz_table_terraformed.go new file mode 100755 index 0000000000..42c5b94fbf --- /dev/null +++ b/apis/keyspaces/v1beta2/zz_table_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Table +func (mg *Table) GetTerraformResourceType() string { + return "aws_keyspaces_table" +} + +// GetConnectionDetailsMapping for this Table +func (tr *Table) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Table +func (tr *Table) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Table +func (tr *Table) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Table +func (tr *Table) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Table +func (tr *Table) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Table +func (tr *Table) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Table +func (tr *Table) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Table +func (tr *Table) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Table using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Table) LateInitialize(attrs []byte) (bool, error) { + params := &TableParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Table) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/keyspaces/v1beta2/zz_table_types.go b/apis/keyspaces/v1beta2/zz_table_types.go new file mode 100755 index 0000000000..31044e5946 --- /dev/null +++ b/apis/keyspaces/v1beta2/zz_table_types.go @@ -0,0 +1,519 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CapacitySpecificationInitParameters struct { + + // The throughput capacity specified for read operations defined in read capacity units (RCUs). + ReadCapacityUnits *float64 `json:"readCapacityUnits,omitempty" tf:"read_capacity_units,omitempty"` + + // The read/write throughput capacity mode for a table. Valid values: PAY_PER_REQUEST, PROVISIONED. The default value is PAY_PER_REQUEST. + ThroughputMode *string `json:"throughputMode,omitempty" tf:"throughput_mode,omitempty"` + + // The throughput capacity specified for write operations defined in write capacity units (WCUs). + WriteCapacityUnits *float64 `json:"writeCapacityUnits,omitempty" tf:"write_capacity_units,omitempty"` +} + +type CapacitySpecificationObservation struct { + + // The throughput capacity specified for read operations defined in read capacity units (RCUs). + ReadCapacityUnits *float64 `json:"readCapacityUnits,omitempty" tf:"read_capacity_units,omitempty"` + + // The read/write throughput capacity mode for a table. Valid values: PAY_PER_REQUEST, PROVISIONED. The default value is PAY_PER_REQUEST. + ThroughputMode *string `json:"throughputMode,omitempty" tf:"throughput_mode,omitempty"` + + // The throughput capacity specified for write operations defined in write capacity units (WCUs). + WriteCapacityUnits *float64 `json:"writeCapacityUnits,omitempty" tf:"write_capacity_units,omitempty"` +} + +type CapacitySpecificationParameters struct { + + // The throughput capacity specified for read operations defined in read capacity units (RCUs). + // +kubebuilder:validation:Optional + ReadCapacityUnits *float64 `json:"readCapacityUnits,omitempty" tf:"read_capacity_units,omitempty"` + + // The read/write throughput capacity mode for a table. Valid values: PAY_PER_REQUEST, PROVISIONED. The default value is PAY_PER_REQUEST. + // +kubebuilder:validation:Optional + ThroughputMode *string `json:"throughputMode,omitempty" tf:"throughput_mode,omitempty"` + + // The throughput capacity specified for write operations defined in write capacity units (WCUs). + // +kubebuilder:validation:Optional + WriteCapacityUnits *float64 `json:"writeCapacityUnits,omitempty" tf:"write_capacity_units,omitempty"` +} + +type ClientSideTimestampsInitParameters struct { + + // Shows how to enable client-side timestamps settings for the specified table. Valid values: ENABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ClientSideTimestampsObservation struct { + + // Shows how to enable client-side timestamps settings for the specified table. Valid values: ENABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ClientSideTimestampsParameters struct { + + // Shows how to enable client-side timestamps settings for the specified table. Valid values: ENABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status" tf:"status,omitempty"` +} + +type ClusteringKeyInitParameters struct { + + // The name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The order modifier. Valid values: ASC, DESC. + OrderBy *string `json:"orderBy,omitempty" tf:"order_by,omitempty"` +} + +type ClusteringKeyObservation struct { + + // The name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The order modifier. Valid values: ASC, DESC. + OrderBy *string `json:"orderBy,omitempty" tf:"order_by,omitempty"` +} + +type ClusteringKeyParameters struct { + + // The name of the column. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The order modifier. Valid values: ASC, DESC. + // +kubebuilder:validation:Optional + OrderBy *string `json:"orderBy" tf:"order_by,omitempty"` +} + +type ColumnInitParameters struct { + + // The name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The encryption option specified for the table. Valid values: AWS_OWNED_KMS_KEY, CUSTOMER_MANAGED_KMS_KEY. The default value is AWS_OWNED_KMS_KEY. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ColumnObservation struct { + + // The name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The encryption option specified for the table. Valid values: AWS_OWNED_KMS_KEY, CUSTOMER_MANAGED_KMS_KEY. The default value is AWS_OWNED_KMS_KEY. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ColumnParameters struct { + + // The name of the column. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The encryption option specified for the table. Valid values: AWS_OWNED_KMS_KEY, CUSTOMER_MANAGED_KMS_KEY. The default value is AWS_OWNED_KMS_KEY. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type CommentInitParameters struct { + + // A description of the table. + Message *string `json:"message,omitempty" tf:"message,omitempty"` +} + +type CommentObservation struct { + + // A description of the table. + Message *string `json:"message,omitempty" tf:"message,omitempty"` +} + +type CommentParameters struct { + + // A description of the table. + // +kubebuilder:validation:Optional + Message *string `json:"message,omitempty" tf:"message,omitempty"` +} + +type EncryptionSpecificationInitParameters struct { + + // The Amazon Resource Name (ARN) of the customer managed KMS key. + KMSKeyIdentifier *string `json:"kmsKeyIdentifier,omitempty" tf:"kms_key_identifier,omitempty"` + + // The encryption option specified for the table. Valid values: AWS_OWNED_KMS_KEY, CUSTOMER_MANAGED_KMS_KEY. The default value is AWS_OWNED_KMS_KEY. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EncryptionSpecificationObservation struct { + + // The Amazon Resource Name (ARN) of the customer managed KMS key. + KMSKeyIdentifier *string `json:"kmsKeyIdentifier,omitempty" tf:"kms_key_identifier,omitempty"` + + // The encryption option specified for the table. Valid values: AWS_OWNED_KMS_KEY, CUSTOMER_MANAGED_KMS_KEY. The default value is AWS_OWNED_KMS_KEY. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EncryptionSpecificationParameters struct { + + // The Amazon Resource Name (ARN) of the customer managed KMS key. + // +kubebuilder:validation:Optional + KMSKeyIdentifier *string `json:"kmsKeyIdentifier,omitempty" tf:"kms_key_identifier,omitempty"` + + // The encryption option specified for the table. Valid values: AWS_OWNED_KMS_KEY, CUSTOMER_MANAGED_KMS_KEY. The default value is AWS_OWNED_KMS_KEY. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PartitionKeyInitParameters struct { + + // The name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type PartitionKeyObservation struct { + + // The name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type PartitionKeyParameters struct { + + // The name of the column. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type PointInTimeRecoveryInitParameters struct { + + // Shows how to enable client-side timestamps settings for the specified table. Valid values: ENABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type PointInTimeRecoveryObservation struct { + + // Shows how to enable client-side timestamps settings for the specified table. Valid values: ENABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type PointInTimeRecoveryParameters struct { + + // Shows how to enable client-side timestamps settings for the specified table. Valid values: ENABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type SchemaDefinitionInitParameters struct { + + // The columns that are part of the clustering key of the table. + ClusteringKey []ClusteringKeyInitParameters `json:"clusteringKey,omitempty" tf:"clustering_key,omitempty"` + + // The regular columns of the table. + Column []ColumnInitParameters `json:"column,omitempty" tf:"column,omitempty"` + + // The columns that are part of the partition key of the table . + PartitionKey []PartitionKeyInitParameters `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` + + // The columns that have been defined as STATIC. Static columns store values that are shared by all rows in the same partition. + StaticColumn []StaticColumnInitParameters `json:"staticColumn,omitempty" tf:"static_column,omitempty"` +} + +type SchemaDefinitionObservation struct { + + // The columns that are part of the clustering key of the table. + ClusteringKey []ClusteringKeyObservation `json:"clusteringKey,omitempty" tf:"clustering_key,omitempty"` + + // The regular columns of the table. + Column []ColumnObservation `json:"column,omitempty" tf:"column,omitempty"` + + // The columns that are part of the partition key of the table . + PartitionKey []PartitionKeyObservation `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` + + // The columns that have been defined as STATIC. Static columns store values that are shared by all rows in the same partition. + StaticColumn []StaticColumnObservation `json:"staticColumn,omitempty" tf:"static_column,omitempty"` +} + +type SchemaDefinitionParameters struct { + + // The columns that are part of the clustering key of the table. + // +kubebuilder:validation:Optional + ClusteringKey []ClusteringKeyParameters `json:"clusteringKey,omitempty" tf:"clustering_key,omitempty"` + + // The regular columns of the table. + // +kubebuilder:validation:Optional + Column []ColumnParameters `json:"column" tf:"column,omitempty"` + + // The columns that are part of the partition key of the table . + // +kubebuilder:validation:Optional + PartitionKey []PartitionKeyParameters `json:"partitionKey" tf:"partition_key,omitempty"` + + // The columns that have been defined as STATIC. Static columns store values that are shared by all rows in the same partition. + // +kubebuilder:validation:Optional + StaticColumn []StaticColumnParameters `json:"staticColumn,omitempty" tf:"static_column,omitempty"` +} + +type StaticColumnInitParameters struct { + + // The name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type StaticColumnObservation struct { + + // The name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type StaticColumnParameters struct { + + // The name of the column. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type TTLInitParameters struct { + + // Shows how to enable client-side timestamps settings for the specified table. Valid values: ENABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type TTLObservation struct { + + // Shows how to enable client-side timestamps settings for the specified table. Valid values: ENABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type TTLParameters struct { + + // Shows how to enable client-side timestamps settings for the specified table. Valid values: ENABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status" tf:"status,omitempty"` +} + +type TableInitParameters struct { + + // Specifies the read/write throughput capacity mode for the table. + CapacitySpecification *CapacitySpecificationInitParameters `json:"capacitySpecification,omitempty" tf:"capacity_specification,omitempty"` + + // Enables client-side timestamps for the table. By default, the setting is disabled. + ClientSideTimestamps *ClientSideTimestampsInitParameters `json:"clientSideTimestamps,omitempty" tf:"client_side_timestamps,omitempty"` + + // A description of the table. + Comment *CommentInitParameters `json:"comment,omitempty" tf:"comment,omitempty"` + + // The default Time to Live setting in seconds for the table. More information can be found in the Developer Guide. + DefaultTimeToLive *float64 `json:"defaultTimeToLive,omitempty" tf:"default_time_to_live,omitempty"` + + // Specifies how the encryption key for encryption at rest is managed for the table. More information can be found in the Developer Guide. + EncryptionSpecification *EncryptionSpecificationInitParameters `json:"encryptionSpecification,omitempty" tf:"encryption_specification,omitempty"` + + // The name of the keyspace that the table is going to be created in. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/keyspaces/v1beta1.Keyspace + KeyspaceName *string `json:"keyspaceName,omitempty" tf:"keyspace_name,omitempty"` + + // Reference to a Keyspace in keyspaces to populate keyspaceName. + // +kubebuilder:validation:Optional + KeyspaceNameRef *v1.Reference `json:"keyspaceNameRef,omitempty" tf:"-"` + + // Selector for a Keyspace in keyspaces to populate keyspaceName. + // +kubebuilder:validation:Optional + KeyspaceNameSelector *v1.Selector `json:"keyspaceNameSelector,omitempty" tf:"-"` + + // Specifies if point-in-time recovery is enabled or disabled for the table. More information can be found in the Developer Guide. + PointInTimeRecovery *PointInTimeRecoveryInitParameters `json:"pointInTimeRecovery,omitempty" tf:"point_in_time_recovery,omitempty"` + + // Describes the schema of the table. + SchemaDefinition *SchemaDefinitionInitParameters `json:"schemaDefinition,omitempty" tf:"schema_definition,omitempty"` + + // Enables Time to Live custom settings for the table. More information can be found in the Developer Guide. + TTL *TTLInitParameters `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // The name of the table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type TableObservation struct { + + // The ARN of the table. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Specifies the read/write throughput capacity mode for the table. + CapacitySpecification *CapacitySpecificationObservation `json:"capacitySpecification,omitempty" tf:"capacity_specification,omitempty"` + + // Enables client-side timestamps for the table. By default, the setting is disabled. + ClientSideTimestamps *ClientSideTimestampsObservation `json:"clientSideTimestamps,omitempty" tf:"client_side_timestamps,omitempty"` + + // A description of the table. + Comment *CommentObservation `json:"comment,omitempty" tf:"comment,omitempty"` + + // The default Time to Live setting in seconds for the table. More information can be found in the Developer Guide. + DefaultTimeToLive *float64 `json:"defaultTimeToLive,omitempty" tf:"default_time_to_live,omitempty"` + + // Specifies how the encryption key for encryption at rest is managed for the table. More information can be found in the Developer Guide. + EncryptionSpecification *EncryptionSpecificationObservation `json:"encryptionSpecification,omitempty" tf:"encryption_specification,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the keyspace that the table is going to be created in. + KeyspaceName *string `json:"keyspaceName,omitempty" tf:"keyspace_name,omitempty"` + + // Specifies if point-in-time recovery is enabled or disabled for the table. More information can be found in the Developer Guide. + PointInTimeRecovery *PointInTimeRecoveryObservation `json:"pointInTimeRecovery,omitempty" tf:"point_in_time_recovery,omitempty"` + + // Describes the schema of the table. + SchemaDefinition *SchemaDefinitionObservation `json:"schemaDefinition,omitempty" tf:"schema_definition,omitempty"` + + // Enables Time to Live custom settings for the table. More information can be found in the Developer Guide. + TTL *TTLObservation `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // The name of the table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type TableParameters struct { + + // Specifies the read/write throughput capacity mode for the table. + // +kubebuilder:validation:Optional + CapacitySpecification *CapacitySpecificationParameters `json:"capacitySpecification,omitempty" tf:"capacity_specification,omitempty"` + + // Enables client-side timestamps for the table. By default, the setting is disabled. + // +kubebuilder:validation:Optional + ClientSideTimestamps *ClientSideTimestampsParameters `json:"clientSideTimestamps,omitempty" tf:"client_side_timestamps,omitempty"` + + // A description of the table. + // +kubebuilder:validation:Optional + Comment *CommentParameters `json:"comment,omitempty" tf:"comment,omitempty"` + + // The default Time to Live setting in seconds for the table. More information can be found in the Developer Guide. + // +kubebuilder:validation:Optional + DefaultTimeToLive *float64 `json:"defaultTimeToLive,omitempty" tf:"default_time_to_live,omitempty"` + + // Specifies how the encryption key for encryption at rest is managed for the table. More information can be found in the Developer Guide. + // +kubebuilder:validation:Optional + EncryptionSpecification *EncryptionSpecificationParameters `json:"encryptionSpecification,omitempty" tf:"encryption_specification,omitempty"` + + // The name of the keyspace that the table is going to be created in. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/keyspaces/v1beta1.Keyspace + // +kubebuilder:validation:Optional + KeyspaceName *string `json:"keyspaceName,omitempty" tf:"keyspace_name,omitempty"` + + // Reference to a Keyspace in keyspaces to populate keyspaceName. + // +kubebuilder:validation:Optional + KeyspaceNameRef *v1.Reference `json:"keyspaceNameRef,omitempty" tf:"-"` + + // Selector for a Keyspace in keyspaces to populate keyspaceName. + // +kubebuilder:validation:Optional + KeyspaceNameSelector *v1.Selector `json:"keyspaceNameSelector,omitempty" tf:"-"` + + // Specifies if point-in-time recovery is enabled or disabled for the table. More information can be found in the Developer Guide. + // +kubebuilder:validation:Optional + PointInTimeRecovery *PointInTimeRecoveryParameters `json:"pointInTimeRecovery,omitempty" tf:"point_in_time_recovery,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Describes the schema of the table. + // +kubebuilder:validation:Optional + SchemaDefinition *SchemaDefinitionParameters `json:"schemaDefinition,omitempty" tf:"schema_definition,omitempty"` + + // Enables Time to Live custom settings for the table. More information can be found in the Developer Guide. + // +kubebuilder:validation:Optional + TTL *TTLParameters `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // The name of the table. + // +kubebuilder:validation:Optional + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// TableSpec defines the desired state of Table +type TableSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TableParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TableInitParameters `json:"initProvider,omitempty"` +} + +// TableStatus defines the observed state of Table. +type TableStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TableObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Table is the Schema for the Tables API. Provides a Keyspaces Table. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Table struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.schemaDefinition) || (has(self.initProvider) && has(self.initProvider.schemaDefinition))",message="spec.forProvider.schemaDefinition is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.tableName) || (has(self.initProvider) && has(self.initProvider.tableName))",message="spec.forProvider.tableName is a required parameter" + Spec TableSpec `json:"spec"` + Status TableStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TableList contains a list of Tables +type TableList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Table `json:"items"` +} + +// Repository type metadata. +var ( + Table_Kind = "Table" + Table_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Table_Kind}.String() + Table_KindAPIVersion = Table_Kind + "." + CRDGroupVersion.String() + Table_GroupVersionKind = CRDGroupVersion.WithKind(Table_Kind) +) + +func init() { + SchemeBuilder.Register(&Table{}, &TableList{}) +} diff --git a/apis/kinesis/v1beta1/zz_generated.conversion_hubs.go b/apis/kinesis/v1beta1/zz_generated.conversion_hubs.go index 67a315a926..ec2bf50126 100755 --- a/apis/kinesis/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/kinesis/v1beta1/zz_generated.conversion_hubs.go @@ -6,8 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Stream) Hub() {} - // Hub marks this type as a conversion hub. func (tr *StreamConsumer) Hub() {} diff --git a/apis/kinesis/v1beta1/zz_generated.conversion_spokes.go b/apis/kinesis/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..8900b92697 --- /dev/null +++ b/apis/kinesis/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Stream to the hub type. +func (tr *Stream) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Stream type. +func (tr *Stream) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/kinesis/v1beta1/zz_generated.resolvers.go b/apis/kinesis/v1beta1/zz_generated.resolvers.go index c235887826..a4d02c78c4 100644 --- a/apis/kinesis/v1beta1/zz_generated.resolvers.go +++ b/apis/kinesis/v1beta1/zz_generated.resolvers.go @@ -77,7 +77,7 @@ func (mg *StreamConsumer) ResolveReferences(ctx context.Context, c client.Reader var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta1", "Stream", "StreamList") + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -96,7 +96,7 @@ func (mg *StreamConsumer) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.ForProvider.StreamArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StreamArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta1", "Stream", "StreamList") + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/kinesis/v1beta1/zz_streamconsumer_types.go b/apis/kinesis/v1beta1/zz_streamconsumer_types.go index 2d76b7f0f3..7de938f4c6 100755 --- a/apis/kinesis/v1beta1/zz_streamconsumer_types.go +++ b/apis/kinesis/v1beta1/zz_streamconsumer_types.go @@ -19,7 +19,7 @@ type StreamConsumerInitParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // – Amazon Resource Name (ARN) of the data stream the consumer is registered with. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta1.Stream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` @@ -62,7 +62,7 @@ type StreamConsumerParameters struct { Region *string `json:"region" tf:"-"` // – Amazon Resource Name (ARN) of the data stream the consumer is registered with. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta1.Stream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() // +kubebuilder:validation:Optional StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` diff --git a/apis/kinesis/v1beta2/zz_generated.conversion_hubs.go b/apis/kinesis/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..2f8b78f8b9 --- /dev/null +++ b/apis/kinesis/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Stream) Hub() {} diff --git a/apis/kinesis/v1beta2/zz_generated.deepcopy.go b/apis/kinesis/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..93f8c20279 --- /dev/null +++ b/apis/kinesis/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,435 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Stream) DeepCopyInto(out *Stream) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Stream. +func (in *Stream) DeepCopy() *Stream { + if in == nil { + return nil + } + out := new(Stream) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Stream) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInitParameters) DeepCopyInto(out *StreamInitParameters) { + *out = *in + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.EnforceConsumerDeletion != nil { + in, out := &in.EnforceConsumerDeletion, &out.EnforceConsumerDeletion + *out = new(bool) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RetentionPeriod != nil { + in, out := &in.RetentionPeriod, &out.RetentionPeriod + *out = new(float64) + **out = **in + } + if in.ShardCount != nil { + in, out := &in.ShardCount, &out.ShardCount + *out = new(float64) + **out = **in + } + if in.ShardLevelMetrics != nil { + in, out := &in.ShardLevelMetrics, &out.ShardLevelMetrics + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StreamModeDetails != nil { + in, out := &in.StreamModeDetails, &out.StreamModeDetails + *out = new(StreamModeDetailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInitParameters. +func (in *StreamInitParameters) DeepCopy() *StreamInitParameters { + if in == nil { + return nil + } + out := new(StreamInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamList) DeepCopyInto(out *StreamList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Stream, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamList. +func (in *StreamList) DeepCopy() *StreamList { + if in == nil { + return nil + } + out := new(StreamList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StreamList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamModeDetailsInitParameters) DeepCopyInto(out *StreamModeDetailsInitParameters) { + *out = *in + if in.StreamMode != nil { + in, out := &in.StreamMode, &out.StreamMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamModeDetailsInitParameters. +func (in *StreamModeDetailsInitParameters) DeepCopy() *StreamModeDetailsInitParameters { + if in == nil { + return nil + } + out := new(StreamModeDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamModeDetailsObservation) DeepCopyInto(out *StreamModeDetailsObservation) { + *out = *in + if in.StreamMode != nil { + in, out := &in.StreamMode, &out.StreamMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamModeDetailsObservation. +func (in *StreamModeDetailsObservation) DeepCopy() *StreamModeDetailsObservation { + if in == nil { + return nil + } + out := new(StreamModeDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamModeDetailsParameters) DeepCopyInto(out *StreamModeDetailsParameters) { + *out = *in + if in.StreamMode != nil { + in, out := &in.StreamMode, &out.StreamMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamModeDetailsParameters. +func (in *StreamModeDetailsParameters) DeepCopy() *StreamModeDetailsParameters { + if in == nil { + return nil + } + out := new(StreamModeDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamObservation) DeepCopyInto(out *StreamObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.EnforceConsumerDeletion != nil { + in, out := &in.EnforceConsumerDeletion, &out.EnforceConsumerDeletion + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.RetentionPeriod != nil { + in, out := &in.RetentionPeriod, &out.RetentionPeriod + *out = new(float64) + **out = **in + } + if in.ShardCount != nil { + in, out := &in.ShardCount, &out.ShardCount + *out = new(float64) + **out = **in + } + if in.ShardLevelMetrics != nil { + in, out := &in.ShardLevelMetrics, &out.ShardLevelMetrics + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StreamModeDetails != nil { + in, out := &in.StreamModeDetails, &out.StreamModeDetails + *out = new(StreamModeDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamObservation. +func (in *StreamObservation) DeepCopy() *StreamObservation { + if in == nil { + return nil + } + out := new(StreamObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamParameters) DeepCopyInto(out *StreamParameters) { + *out = *in + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.EnforceConsumerDeletion != nil { + in, out := &in.EnforceConsumerDeletion, &out.EnforceConsumerDeletion + *out = new(bool) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RetentionPeriod != nil { + in, out := &in.RetentionPeriod, &out.RetentionPeriod + *out = new(float64) + **out = **in + } + if in.ShardCount != nil { + in, out := &in.ShardCount, &out.ShardCount + *out = new(float64) + **out = **in + } + if in.ShardLevelMetrics != nil { + in, out := &in.ShardLevelMetrics, &out.ShardLevelMetrics + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StreamModeDetails != nil { + in, out := &in.StreamModeDetails, &out.StreamModeDetails + *out = new(StreamModeDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamParameters. +func (in *StreamParameters) DeepCopy() *StreamParameters { + if in == nil { + return nil + } + out := new(StreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamSpec) DeepCopyInto(out *StreamSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamSpec. +func (in *StreamSpec) DeepCopy() *StreamSpec { + if in == nil { + return nil + } + out := new(StreamSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamStatus) DeepCopyInto(out *StreamStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamStatus. +func (in *StreamStatus) DeepCopy() *StreamStatus { + if in == nil { + return nil + } + out := new(StreamStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/kinesis/v1beta2/zz_generated.managed.go b/apis/kinesis/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..0867e0bba5 --- /dev/null +++ b/apis/kinesis/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Stream. +func (mg *Stream) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Stream. +func (mg *Stream) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Stream. +func (mg *Stream) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Stream. +func (mg *Stream) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Stream. +func (mg *Stream) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Stream. +func (mg *Stream) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Stream. +func (mg *Stream) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Stream. +func (mg *Stream) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Stream. +func (mg *Stream) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Stream. +func (mg *Stream) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Stream. +func (mg *Stream) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Stream. +func (mg *Stream) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/kinesis/v1beta2/zz_generated.managedlist.go b/apis/kinesis/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..8e2be3708a --- /dev/null +++ b/apis/kinesis/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this StreamList. +func (l *StreamList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/kinesis/v1beta2/zz_generated.resolvers.go b/apis/kinesis/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..93d53225d3 --- /dev/null +++ b/apis/kinesis/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Stream. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Stream) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyID") + } + mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyID") + } + mg.Spec.InitProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/kinesis/v1beta2/zz_groupversion_info.go b/apis/kinesis/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..e71420604d --- /dev/null +++ b/apis/kinesis/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=kinesis.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "kinesis.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/kinesis/v1beta2/zz_stream_terraformed.go b/apis/kinesis/v1beta2/zz_stream_terraformed.go new file mode 100755 index 0000000000..3a26758466 --- /dev/null +++ b/apis/kinesis/v1beta2/zz_stream_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Stream +func (mg *Stream) GetTerraformResourceType() string { + return "aws_kinesis_stream" +} + +// GetConnectionDetailsMapping for this Stream +func (tr *Stream) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Stream +func (tr *Stream) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Stream +func (tr *Stream) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Stream +func (tr *Stream) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Stream +func (tr *Stream) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Stream +func (tr *Stream) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Stream +func (tr *Stream) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Stream +func (tr *Stream) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Stream using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Stream) LateInitialize(attrs []byte) (bool, error) { + params := &StreamParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("EnforceConsumerDeletion")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Stream) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/kinesis/v1beta2/zz_stream_types.go b/apis/kinesis/v1beta2/zz_stream_types.go new file mode 100755 index 0000000000..fb61818c5c --- /dev/null +++ b/apis/kinesis/v1beta2/zz_stream_types.go @@ -0,0 +1,224 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type StreamInitParameters struct { + + // The encryption type to use. The only acceptable values are NONE or KMS. The default value is NONE. + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` + + // A boolean that indicates all registered consumers should be deregistered from the stream so that the stream can be destroyed without error. The default value is false. + EnforceConsumerDeletion *bool `json:"enforceConsumerDeletion,omitempty" tf:"enforce_consumer_deletion,omitempty"` + + // The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias alias/aws/kinesis. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24. + RetentionPeriod *float64 `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` + + // – The number of shards that the stream will use. If the stream_mode is PROVISIONED, this field is required. + // Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See Amazon Kinesis Streams for more. + ShardCount *float64 `json:"shardCount,omitempty" tf:"shard_count,omitempty"` + + // A list of shard-level CloudWatch metrics which can be enabled for the stream. See Monitoring with CloudWatch for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable. + // +listType=set + ShardLevelMetrics []*string `json:"shardLevelMetrics,omitempty" tf:"shard_level_metrics,omitempty"` + + // Indicates the capacity mode of the data stream. Detailed below. + StreamModeDetails *StreamModeDetailsInitParameters `json:"streamModeDetails,omitempty" tf:"stream_mode_details,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type StreamModeDetailsInitParameters struct { + + // Specifies the capacity mode of the stream. Must be either PROVISIONED or ON_DEMAND. + StreamMode *string `json:"streamMode,omitempty" tf:"stream_mode,omitempty"` +} + +type StreamModeDetailsObservation struct { + + // Specifies the capacity mode of the stream. Must be either PROVISIONED or ON_DEMAND. + StreamMode *string `json:"streamMode,omitempty" tf:"stream_mode,omitempty"` +} + +type StreamModeDetailsParameters struct { + + // Specifies the capacity mode of the stream. Must be either PROVISIONED or ON_DEMAND. + // +kubebuilder:validation:Optional + StreamMode *string `json:"streamMode" tf:"stream_mode,omitempty"` +} + +type StreamObservation struct { + + // The Amazon Resource Name (ARN) specifying the Stream (same as id) + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The encryption type to use. The only acceptable values are NONE or KMS. The default value is NONE. + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` + + // A boolean that indicates all registered consumers should be deregistered from the stream so that the stream can be destroyed without error. The default value is false. + EnforceConsumerDeletion *bool `json:"enforceConsumerDeletion,omitempty" tf:"enforce_consumer_deletion,omitempty"` + + // The unique Stream id + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias alias/aws/kinesis. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24. + RetentionPeriod *float64 `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` + + // – The number of shards that the stream will use. If the stream_mode is PROVISIONED, this field is required. + // Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See Amazon Kinesis Streams for more. + ShardCount *float64 `json:"shardCount,omitempty" tf:"shard_count,omitempty"` + + // A list of shard-level CloudWatch metrics which can be enabled for the stream. See Monitoring with CloudWatch for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable. + // +listType=set + ShardLevelMetrics []*string `json:"shardLevelMetrics,omitempty" tf:"shard_level_metrics,omitempty"` + + // Indicates the capacity mode of the data stream. Detailed below. + StreamModeDetails *StreamModeDetailsObservation `json:"streamModeDetails,omitempty" tf:"stream_mode_details,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type StreamParameters struct { + + // The encryption type to use. The only acceptable values are NONE or KMS. The default value is NONE. + // +kubebuilder:validation:Optional + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` + + // A boolean that indicates all registered consumers should be deregistered from the stream so that the stream can be destroyed without error. The default value is false. + // +kubebuilder:validation:Optional + EnforceConsumerDeletion *bool `json:"enforceConsumerDeletion,omitempty" tf:"enforce_consumer_deletion,omitempty"` + + // The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias alias/aws/kinesis. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24. + // +kubebuilder:validation:Optional + RetentionPeriod *float64 `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` + + // – The number of shards that the stream will use. If the stream_mode is PROVISIONED, this field is required. + // Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See Amazon Kinesis Streams for more. + // +kubebuilder:validation:Optional + ShardCount *float64 `json:"shardCount,omitempty" tf:"shard_count,omitempty"` + + // A list of shard-level CloudWatch metrics which can be enabled for the stream. See Monitoring with CloudWatch for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable. + // +kubebuilder:validation:Optional + // +listType=set + ShardLevelMetrics []*string `json:"shardLevelMetrics,omitempty" tf:"shard_level_metrics,omitempty"` + + // Indicates the capacity mode of the data stream. Detailed below. + // +kubebuilder:validation:Optional + StreamModeDetails *StreamModeDetailsParameters `json:"streamModeDetails,omitempty" tf:"stream_mode_details,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// StreamSpec defines the desired state of Stream +type StreamSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StreamParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StreamInitParameters `json:"initProvider,omitempty"` +} + +// StreamStatus defines the observed state of Stream. +type StreamStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StreamObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Stream is the Schema for the Streams API. Provides a AWS Kinesis Stream +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Stream struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec StreamSpec `json:"spec"` + Status StreamStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StreamList contains a list of Streams +type StreamList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Stream `json:"items"` +} + +// Repository type metadata. +var ( + Stream_Kind = "Stream" + Stream_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Stream_Kind}.String() + Stream_KindAPIVersion = Stream_Kind + "." + CRDGroupVersion.String() + Stream_GroupVersionKind = CRDGroupVersion.WithKind(Stream_Kind) +) + +func init() { + SchemeBuilder.Register(&Stream{}, &StreamList{}) +} diff --git a/apis/kinesisanalytics/v1beta1/zz_generated.conversion_spokes.go b/apis/kinesisanalytics/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..ada25ff080 --- /dev/null +++ b/apis/kinesisanalytics/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Application to the hub type. +func (tr *Application) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Application type. +func (tr *Application) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/kinesisanalytics/v1beta2/zz_application_terraformed.go b/apis/kinesisanalytics/v1beta2/zz_application_terraformed.go new file mode 100755 index 0000000000..1fcb3764ee --- /dev/null +++ b/apis/kinesisanalytics/v1beta2/zz_application_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Application +func (mg *Application) GetTerraformResourceType() string { + return "aws_kinesis_analytics_application" +} + +// GetConnectionDetailsMapping for this Application +func (tr *Application) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Application +func (tr *Application) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Application +func (tr *Application) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Application +func (tr *Application) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Application +func (tr *Application) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Application +func (tr *Application) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Application +func (tr *Application) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Application +func (tr *Application) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Application using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Application) LateInitialize(attrs []byte) (bool, error) { + params := &ApplicationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Application) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kinesisanalytics/v1beta2/zz_application_types.go b/apis/kinesisanalytics/v1beta2/zz_application_types.go new file mode 100755 index 0000000000..7abb5797d6 --- /dev/null +++ b/apis/kinesisanalytics/v1beta2/zz_application_types.go @@ -0,0 +1,1236 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ApplicationInitParameters struct { + + // The CloudWatch log stream options to monitor application errors. + // See CloudWatch Logging Options below for more details. + CloudwatchLoggingOptions *CloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // SQL Code to transform input data, and generate output. + Code *string `json:"code,omitempty" tf:"code,omitempty"` + + // Description of the application. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Input configuration of the application. See Inputs below for more details. + Inputs *InputsInitParameters `json:"inputs,omitempty" tf:"inputs,omitempty"` + + // Output destination configuration of the application. See Outputs below for more details. + Outputs []OutputsInitParameters `json:"outputs,omitempty" tf:"outputs,omitempty"` + + // An S3 Reference Data Source for the application. + // See Reference Data Sources below for more details. + ReferenceDataSources *ReferenceDataSourcesInitParameters `json:"referenceDataSources,omitempty" tf:"reference_data_sources,omitempty"` + + // Whether to start or stop the Kinesis Analytics Application. To start an application, an input with a defined starting_position must be configured. + // To modify an application's starting position, first stop the application by setting start_application = false, then update starting_position and set start_application = true. + StartApplication *bool `json:"startApplication,omitempty" tf:"start_application,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ApplicationObservation struct { + + // The ARN of the Kinesis Analytics Appliation. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The CloudWatch log stream options to monitor application errors. + // See CloudWatch Logging Options below for more details. + CloudwatchLoggingOptions *CloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // SQL Code to transform input data, and generate output. + Code *string `json:"code,omitempty" tf:"code,omitempty"` + + // The Timestamp when the application version was created. + CreateTimestamp *string `json:"createTimestamp,omitempty" tf:"create_timestamp,omitempty"` + + // Description of the application. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ARN of the Kinesis Analytics Application. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Input configuration of the application. See Inputs below for more details. + Inputs *InputsObservation `json:"inputs,omitempty" tf:"inputs,omitempty"` + + // The Timestamp when the application was last updated. + LastUpdateTimestamp *string `json:"lastUpdateTimestamp,omitempty" tf:"last_update_timestamp,omitempty"` + + // Output destination configuration of the application. See Outputs below for more details. + Outputs []OutputsObservation `json:"outputs,omitempty" tf:"outputs,omitempty"` + + // An S3 Reference Data Source for the application. + // See Reference Data Sources below for more details. + ReferenceDataSources *ReferenceDataSourcesObservation `json:"referenceDataSources,omitempty" tf:"reference_data_sources,omitempty"` + + // Whether to start or stop the Kinesis Analytics Application. To start an application, an input with a defined starting_position must be configured. + // To modify an application's starting position, first stop the application by setting start_application = false, then update starting_position and set start_application = true. + StartApplication *bool `json:"startApplication,omitempty" tf:"start_application,omitempty"` + + // The Status of the application. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The Version of the application. + Version *float64 `json:"version,omitempty" tf:"version,omitempty"` +} + +type ApplicationParameters struct { + + // The CloudWatch log stream options to monitor application errors. + // See CloudWatch Logging Options below for more details. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *CloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // SQL Code to transform input data, and generate output. + // +kubebuilder:validation:Optional + Code *string `json:"code,omitempty" tf:"code,omitempty"` + + // Description of the application. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Input configuration of the application. See Inputs below for more details. + // +kubebuilder:validation:Optional + Inputs *InputsParameters `json:"inputs,omitempty" tf:"inputs,omitempty"` + + // Output destination configuration of the application. See Outputs below for more details. + // +kubebuilder:validation:Optional + Outputs []OutputsParameters `json:"outputs,omitempty" tf:"outputs,omitempty"` + + // An S3 Reference Data Source for the application. + // See Reference Data Sources below for more details. + // +kubebuilder:validation:Optional + ReferenceDataSources *ReferenceDataSourcesParameters `json:"referenceDataSources,omitempty" tf:"reference_data_sources,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Whether to start or stop the Kinesis Analytics Application. To start an application, an input with a defined starting_position must be configured. + // To modify an application's starting position, first stop the application by setting start_application = false, then update starting_position and set start_application = true. + // +kubebuilder:validation:Optional + StartApplication *bool `json:"startApplication,omitempty" tf:"start_application,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type CloudwatchLoggingOptionsInitParameters struct { + + // The ARN of the CloudWatch Log Stream. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Stream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + LogStreamArn *string `json:"logStreamArn,omitempty" tf:"log_stream_arn,omitempty"` + + // Reference to a Stream in cloudwatchlogs to populate logStreamArn. + // +kubebuilder:validation:Optional + LogStreamArnRef *v1.Reference `json:"logStreamArnRef,omitempty" tf:"-"` + + // Selector for a Stream in cloudwatchlogs to populate logStreamArn. + // +kubebuilder:validation:Optional + LogStreamArnSelector *v1.Selector `json:"logStreamArnSelector,omitempty" tf:"-"` + + // The ARN of the IAM Role used to send application messages. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type CloudwatchLoggingOptionsObservation struct { + + // The ARN of the Kinesis Analytics Application. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The ARN of the CloudWatch Log Stream. + LogStreamArn *string `json:"logStreamArn,omitempty" tf:"log_stream_arn,omitempty"` + + // The ARN of the IAM Role used to send application messages. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type CloudwatchLoggingOptionsParameters struct { + + // The ARN of the CloudWatch Log Stream. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Stream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + LogStreamArn *string `json:"logStreamArn,omitempty" tf:"log_stream_arn,omitempty"` + + // Reference to a Stream in cloudwatchlogs to populate logStreamArn. + // +kubebuilder:validation:Optional + LogStreamArnRef *v1.Reference `json:"logStreamArnRef,omitempty" tf:"-"` + + // Selector for a Stream in cloudwatchlogs to populate logStreamArn. + // +kubebuilder:validation:Optional + LogStreamArnSelector *v1.Selector `json:"logStreamArnSelector,omitempty" tf:"-"` + + // The ARN of the IAM Role used to send application messages. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type CsvInitParameters struct { + + // The Column Delimiter. + RecordColumnDelimiter *string `json:"recordColumnDelimiter,omitempty" tf:"record_column_delimiter,omitempty"` + + // The Row Delimiter. + RecordRowDelimiter *string `json:"recordRowDelimiter,omitempty" tf:"record_row_delimiter,omitempty"` +} + +type CsvObservation struct { + + // The Column Delimiter. + RecordColumnDelimiter *string `json:"recordColumnDelimiter,omitempty" tf:"record_column_delimiter,omitempty"` + + // The Row Delimiter. + RecordRowDelimiter *string `json:"recordRowDelimiter,omitempty" tf:"record_row_delimiter,omitempty"` +} + +type CsvParameters struct { + + // The Column Delimiter. + // +kubebuilder:validation:Optional + RecordColumnDelimiter *string `json:"recordColumnDelimiter" tf:"record_column_delimiter,omitempty"` + + // The Row Delimiter. + // +kubebuilder:validation:Optional + RecordRowDelimiter *string `json:"recordRowDelimiter" tf:"record_row_delimiter,omitempty"` +} + +type InputsInitParameters struct { + + // The Kinesis Firehose configuration for the streaming source. Conflicts with kinesis_stream. + // See Kinesis Firehose below for more details. + KinesisFirehose *KinesisFirehoseInitParameters `json:"kinesisFirehose,omitempty" tf:"kinesis_firehose,omitempty"` + + // The Kinesis Stream configuration for the streaming source. Conflicts with kinesis_firehose. + // See Kinesis Stream below for more details. + KinesisStream *KinesisStreamInitParameters `json:"kinesisStream,omitempty" tf:"kinesis_stream,omitempty"` + + // The Name Prefix to use when creating an in-application stream. + NamePrefix *string `json:"namePrefix,omitempty" tf:"name_prefix,omitempty"` + + // The number of Parallel in-application streams to create. + // See Parallelism below for more details. + Parallelism *ParallelismInitParameters `json:"parallelism,omitempty" tf:"parallelism,omitempty"` + + // The Processing Configuration to transform records as they are received from the stream. + // See Processing Configuration below for more details. + ProcessingConfiguration *ProcessingConfigurationInitParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // The Schema format of the data in the streaming source. See Source Schema below for more details. + Schema *SchemaInitParameters `json:"schema,omitempty" tf:"schema,omitempty"` + + // The point at which the application starts processing records from the streaming source. + // See Starting Position Configuration below for more details. + StartingPositionConfiguration []StartingPositionConfigurationInitParameters `json:"startingPositionConfiguration,omitempty" tf:"starting_position_configuration,omitempty"` +} + +type InputsObservation struct { + + // The ARN of the Kinesis Analytics Application. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Kinesis Firehose configuration for the streaming source. Conflicts with kinesis_stream. + // See Kinesis Firehose below for more details. + KinesisFirehose *KinesisFirehoseObservation `json:"kinesisFirehose,omitempty" tf:"kinesis_firehose,omitempty"` + + // The Kinesis Stream configuration for the streaming source. Conflicts with kinesis_firehose. + // See Kinesis Stream below for more details. + KinesisStream *KinesisStreamObservation `json:"kinesisStream,omitempty" tf:"kinesis_stream,omitempty"` + + // The Name Prefix to use when creating an in-application stream. + NamePrefix *string `json:"namePrefix,omitempty" tf:"name_prefix,omitempty"` + + // The number of Parallel in-application streams to create. + // See Parallelism below for more details. + Parallelism *ParallelismObservation `json:"parallelism,omitempty" tf:"parallelism,omitempty"` + + // The Processing Configuration to transform records as they are received from the stream. + // See Processing Configuration below for more details. + ProcessingConfiguration *ProcessingConfigurationObservation `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // The Schema format of the data in the streaming source. See Source Schema below for more details. + Schema *SchemaObservation `json:"schema,omitempty" tf:"schema,omitempty"` + + // The point at which the application starts processing records from the streaming source. + // See Starting Position Configuration below for more details. + StartingPositionConfiguration []StartingPositionConfigurationObservation `json:"startingPositionConfiguration,omitempty" tf:"starting_position_configuration,omitempty"` + + StreamNames []*string `json:"streamNames,omitempty" tf:"stream_names,omitempty"` +} + +type InputsParameters struct { + + // The Kinesis Firehose configuration for the streaming source. Conflicts with kinesis_stream. + // See Kinesis Firehose below for more details. + // +kubebuilder:validation:Optional + KinesisFirehose *KinesisFirehoseParameters `json:"kinesisFirehose,omitempty" tf:"kinesis_firehose,omitempty"` + + // The Kinesis Stream configuration for the streaming source. Conflicts with kinesis_firehose. + // See Kinesis Stream below for more details. + // +kubebuilder:validation:Optional + KinesisStream *KinesisStreamParameters `json:"kinesisStream,omitempty" tf:"kinesis_stream,omitempty"` + + // The Name Prefix to use when creating an in-application stream. + // +kubebuilder:validation:Optional + NamePrefix *string `json:"namePrefix" tf:"name_prefix,omitempty"` + + // The number of Parallel in-application streams to create. + // See Parallelism below for more details. + // +kubebuilder:validation:Optional + Parallelism *ParallelismParameters `json:"parallelism,omitempty" tf:"parallelism,omitempty"` + + // The Processing Configuration to transform records as they are received from the stream. + // See Processing Configuration below for more details. + // +kubebuilder:validation:Optional + ProcessingConfiguration *ProcessingConfigurationParameters `json:"processingConfiguration,omitempty" tf:"processing_configuration,omitempty"` + + // The Schema format of the data in the streaming source. See Source Schema below for more details. + // +kubebuilder:validation:Optional + Schema *SchemaParameters `json:"schema" tf:"schema,omitempty"` + + // The point at which the application starts processing records from the streaming source. + // See Starting Position Configuration below for more details. + // +kubebuilder:validation:Optional + StartingPositionConfiguration []StartingPositionConfigurationParameters `json:"startingPositionConfiguration,omitempty" tf:"starting_position_configuration,omitempty"` +} + +type JSONInitParameters struct { + + // Path to the top-level parent that contains the records. + RecordRowPath *string `json:"recordRowPath,omitempty" tf:"record_row_path,omitempty"` +} + +type JSONObservation struct { + + // Path to the top-level parent that contains the records. + RecordRowPath *string `json:"recordRowPath,omitempty" tf:"record_row_path,omitempty"` +} + +type JSONParameters struct { + + // Path to the top-level parent that contains the records. + // +kubebuilder:validation:Optional + RecordRowPath *string `json:"recordRowPath" tf:"record_row_path,omitempty"` +} + +type KinesisFirehoseInitParameters struct { + + // The ARN of the Lambda function. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // The IAM Role ARN to read the data. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type KinesisFirehoseObservation struct { + + // The ARN of the Lambda function. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // The IAM Role ARN to read the data. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type KinesisFirehoseParameters struct { + + // The ARN of the Lambda function. + // +kubebuilder:validation:Optional + ResourceArn *string `json:"resourceArn" tf:"resource_arn,omitempty"` + + // The IAM Role ARN to read the data. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type KinesisStreamInitParameters struct { + + // The ARN of the Lambda function. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // Reference to a Stream in kinesis to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnRef *v1.Reference `json:"resourceArnRef,omitempty" tf:"-"` + + // Selector for a Stream in kinesis to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnSelector *v1.Selector `json:"resourceArnSelector,omitempty" tf:"-"` + + // The IAM Role ARN to read the data. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type KinesisStreamObservation struct { + + // The ARN of the Lambda function. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // The IAM Role ARN to read the data. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type KinesisStreamParameters struct { + + // The ARN of the Lambda function. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() + // +kubebuilder:validation:Optional + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // Reference to a Stream in kinesis to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnRef *v1.Reference `json:"resourceArnRef,omitempty" tf:"-"` + + // Selector for a Stream in kinesis to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnSelector *v1.Selector `json:"resourceArnSelector,omitempty" tf:"-"` + + // The IAM Role ARN to read the data. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type LambdaInitParameters struct { + + // The ARN of the Lambda function. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // The IAM Role ARN to read the data. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type LambdaObservation struct { + + // The ARN of the Lambda function. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // The IAM Role ARN to read the data. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type LambdaParameters struct { + + // The ARN of the Lambda function. + // +kubebuilder:validation:Optional + ResourceArn *string `json:"resourceArn" tf:"resource_arn,omitempty"` + + // The IAM Role ARN to read the data. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type MappingParametersCsvInitParameters struct { + + // The Column Delimiter. + RecordColumnDelimiter *string `json:"recordColumnDelimiter,omitempty" tf:"record_column_delimiter,omitempty"` + + // The Row Delimiter. + RecordRowDelimiter *string `json:"recordRowDelimiter,omitempty" tf:"record_row_delimiter,omitempty"` +} + +type MappingParametersCsvObservation struct { + + // The Column Delimiter. + RecordColumnDelimiter *string `json:"recordColumnDelimiter,omitempty" tf:"record_column_delimiter,omitempty"` + + // The Row Delimiter. + RecordRowDelimiter *string `json:"recordRowDelimiter,omitempty" tf:"record_row_delimiter,omitempty"` +} + +type MappingParametersCsvParameters struct { + + // The Column Delimiter. + // +kubebuilder:validation:Optional + RecordColumnDelimiter *string `json:"recordColumnDelimiter" tf:"record_column_delimiter,omitempty"` + + // The Row Delimiter. + // +kubebuilder:validation:Optional + RecordRowDelimiter *string `json:"recordRowDelimiter" tf:"record_row_delimiter,omitempty"` +} + +type MappingParametersInitParameters struct { + + // Mapping information when the record format uses delimiters. + // See CSV Mapping Parameters below for more details. + Csv *CsvInitParameters `json:"csv,omitempty" tf:"csv,omitempty"` + + // Mapping information when JSON is the record format on the streaming source. + // See JSON Mapping Parameters below for more details. + JSON *JSONInitParameters `json:"json,omitempty" tf:"json,omitempty"` +} + +type MappingParametersJSONInitParameters struct { + + // Path to the top-level parent that contains the records. + RecordRowPath *string `json:"recordRowPath,omitempty" tf:"record_row_path,omitempty"` +} + +type MappingParametersJSONObservation struct { + + // Path to the top-level parent that contains the records. + RecordRowPath *string `json:"recordRowPath,omitempty" tf:"record_row_path,omitempty"` +} + +type MappingParametersJSONParameters struct { + + // Path to the top-level parent that contains the records. + // +kubebuilder:validation:Optional + RecordRowPath *string `json:"recordRowPath" tf:"record_row_path,omitempty"` +} + +type MappingParametersObservation struct { + + // Mapping information when the record format uses delimiters. + // See CSV Mapping Parameters below for more details. + Csv *CsvObservation `json:"csv,omitempty" tf:"csv,omitempty"` + + // Mapping information when JSON is the record format on the streaming source. + // See JSON Mapping Parameters below for more details. + JSON *JSONObservation `json:"json,omitempty" tf:"json,omitempty"` +} + +type MappingParametersParameters struct { + + // Mapping information when the record format uses delimiters. + // See CSV Mapping Parameters below for more details. + // +kubebuilder:validation:Optional + Csv *CsvParameters `json:"csv,omitempty" tf:"csv,omitempty"` + + // Mapping information when JSON is the record format on the streaming source. + // See JSON Mapping Parameters below for more details. + // +kubebuilder:validation:Optional + JSON *JSONParameters `json:"json,omitempty" tf:"json,omitempty"` +} + +type OutputsInitParameters struct { + + // The Kinesis Firehose configuration for the destination stream. Conflicts with kinesis_stream. + // See Kinesis Firehose below for more details. + KinesisFirehose *OutputsKinesisFirehoseInitParameters `json:"kinesisFirehose,omitempty" tf:"kinesis_firehose,omitempty"` + + // The Kinesis Stream configuration for the destination stream. Conflicts with kinesis_firehose. + // See Kinesis Stream below for more details. + KinesisStream *OutputsKinesisStreamInitParameters `json:"kinesisStream,omitempty" tf:"kinesis_stream,omitempty"` + + // The Lambda function destination. See Lambda below for more details. + Lambda *OutputsLambdaInitParameters `json:"lambda,omitempty" tf:"lambda,omitempty"` + + // The Name of the in-application stream. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Schema format of the data written to the destination. See Destination Schema below for more details. + Schema *OutputsSchemaInitParameters `json:"schema,omitempty" tf:"schema,omitempty"` +} + +type OutputsKinesisFirehoseInitParameters struct { + + // The ARN of the Lambda function. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // Reference to a DeliveryStream in firehose to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnRef *v1.Reference `json:"resourceArnRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnSelector *v1.Selector `json:"resourceArnSelector,omitempty" tf:"-"` + + // The IAM Role ARN to read the data. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type OutputsKinesisFirehoseObservation struct { + + // The ARN of the Lambda function. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // The IAM Role ARN to read the data. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type OutputsKinesisFirehoseParameters struct { + + // The ARN of the Lambda function. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + // +kubebuilder:validation:Optional + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // Reference to a DeliveryStream in firehose to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnRef *v1.Reference `json:"resourceArnRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnSelector *v1.Selector `json:"resourceArnSelector,omitempty" tf:"-"` + + // The IAM Role ARN to read the data. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` +} + +type OutputsKinesisStreamInitParameters struct { + + // The ARN of the Lambda function. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // The IAM Role ARN to read the data. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type OutputsKinesisStreamObservation struct { + + // The ARN of the Lambda function. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // The IAM Role ARN to read the data. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type OutputsKinesisStreamParameters struct { + + // The ARN of the Lambda function. + // +kubebuilder:validation:Optional + ResourceArn *string `json:"resourceArn" tf:"resource_arn,omitempty"` + + // The IAM Role ARN to read the data. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type OutputsLambdaInitParameters struct { + + // The ARN of the Lambda function. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // The IAM Role ARN to read the data. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type OutputsLambdaObservation struct { + + // The ARN of the Lambda function. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // The IAM Role ARN to read the data. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type OutputsLambdaParameters struct { + + // The ARN of the Lambda function. + // +kubebuilder:validation:Optional + ResourceArn *string `json:"resourceArn" tf:"resource_arn,omitempty"` + + // The IAM Role ARN to read the data. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type OutputsObservation struct { + + // The ARN of the Kinesis Analytics Application. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Kinesis Firehose configuration for the destination stream. Conflicts with kinesis_stream. + // See Kinesis Firehose below for more details. + KinesisFirehose *OutputsKinesisFirehoseObservation `json:"kinesisFirehose,omitempty" tf:"kinesis_firehose,omitempty"` + + // The Kinesis Stream configuration for the destination stream. Conflicts with kinesis_firehose. + // See Kinesis Stream below for more details. + KinesisStream *OutputsKinesisStreamObservation `json:"kinesisStream,omitempty" tf:"kinesis_stream,omitempty"` + + // The Lambda function destination. See Lambda below for more details. + Lambda *OutputsLambdaObservation `json:"lambda,omitempty" tf:"lambda,omitempty"` + + // The Name of the in-application stream. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Schema format of the data written to the destination. See Destination Schema below for more details. + Schema *OutputsSchemaObservation `json:"schema,omitempty" tf:"schema,omitempty"` +} + +type OutputsParameters struct { + + // The Kinesis Firehose configuration for the destination stream. Conflicts with kinesis_stream. + // See Kinesis Firehose below for more details. + // +kubebuilder:validation:Optional + KinesisFirehose *OutputsKinesisFirehoseParameters `json:"kinesisFirehose,omitempty" tf:"kinesis_firehose,omitempty"` + + // The Kinesis Stream configuration for the destination stream. Conflicts with kinesis_firehose. + // See Kinesis Stream below for more details. + // +kubebuilder:validation:Optional + KinesisStream *OutputsKinesisStreamParameters `json:"kinesisStream,omitempty" tf:"kinesis_stream,omitempty"` + + // The Lambda function destination. See Lambda below for more details. + // +kubebuilder:validation:Optional + Lambda *OutputsLambdaParameters `json:"lambda,omitempty" tf:"lambda,omitempty"` + + // The Name of the in-application stream. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Schema format of the data written to the destination. See Destination Schema below for more details. + // +kubebuilder:validation:Optional + Schema *OutputsSchemaParameters `json:"schema" tf:"schema,omitempty"` +} + +type OutputsSchemaInitParameters struct { + + // The Format Type of the records on the output stream. Can be CSV or JSON. + RecordFormatType *string `json:"recordFormatType,omitempty" tf:"record_format_type,omitempty"` +} + +type OutputsSchemaObservation struct { + + // The Format Type of the records on the output stream. Can be CSV or JSON. + RecordFormatType *string `json:"recordFormatType,omitempty" tf:"record_format_type,omitempty"` +} + +type OutputsSchemaParameters struct { + + // The Format Type of the records on the output stream. Can be CSV or JSON. + // +kubebuilder:validation:Optional + RecordFormatType *string `json:"recordFormatType" tf:"record_format_type,omitempty"` +} + +type ParallelismInitParameters struct { + + // The Count of streams. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` +} + +type ParallelismObservation struct { + + // The Count of streams. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` +} + +type ParallelismParameters struct { + + // The Count of streams. + // +kubebuilder:validation:Optional + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` +} + +type ProcessingConfigurationInitParameters struct { + + // The Lambda function configuration. See Lambda below for more details. + Lambda *LambdaInitParameters `json:"lambda,omitempty" tf:"lambda,omitempty"` +} + +type ProcessingConfigurationObservation struct { + + // The Lambda function configuration. See Lambda below for more details. + Lambda *LambdaObservation `json:"lambda,omitempty" tf:"lambda,omitempty"` +} + +type ProcessingConfigurationParameters struct { + + // The Lambda function configuration. See Lambda below for more details. + // +kubebuilder:validation:Optional + Lambda *LambdaParameters `json:"lambda" tf:"lambda,omitempty"` +} + +type RecordColumnsInitParameters struct { + + // The Mapping reference to the data element. + Mapping *string `json:"mapping,omitempty" tf:"mapping,omitempty"` + + // Name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The SQL Type of the column. + SQLType *string `json:"sqlType,omitempty" tf:"sql_type,omitempty"` +} + +type RecordColumnsObservation struct { + + // The Mapping reference to the data element. + Mapping *string `json:"mapping,omitempty" tf:"mapping,omitempty"` + + // Name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The SQL Type of the column. + SQLType *string `json:"sqlType,omitempty" tf:"sql_type,omitempty"` +} + +type RecordColumnsParameters struct { + + // The Mapping reference to the data element. + // +kubebuilder:validation:Optional + Mapping *string `json:"mapping,omitempty" tf:"mapping,omitempty"` + + // Name of the column. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The SQL Type of the column. + // +kubebuilder:validation:Optional + SQLType *string `json:"sqlType" tf:"sql_type,omitempty"` +} + +type RecordFormatInitParameters struct { + + // The Mapping Information for the record format. + // See Mapping Parameters below for more details. + MappingParameters *MappingParametersInitParameters `json:"mappingParameters,omitempty" tf:"mapping_parameters,omitempty"` +} + +type RecordFormatMappingParametersInitParameters struct { + + // Mapping information when the record format uses delimiters. + // See CSV Mapping Parameters below for more details. + Csv *MappingParametersCsvInitParameters `json:"csv,omitempty" tf:"csv,omitempty"` + + // Mapping information when JSON is the record format on the streaming source. + // See JSON Mapping Parameters below for more details. + JSON *MappingParametersJSONInitParameters `json:"json,omitempty" tf:"json,omitempty"` +} + +type RecordFormatMappingParametersObservation struct { + + // Mapping information when the record format uses delimiters. + // See CSV Mapping Parameters below for more details. + Csv *MappingParametersCsvObservation `json:"csv,omitempty" tf:"csv,omitempty"` + + // Mapping information when JSON is the record format on the streaming source. + // See JSON Mapping Parameters below for more details. + JSON *MappingParametersJSONObservation `json:"json,omitempty" tf:"json,omitempty"` +} + +type RecordFormatMappingParametersParameters struct { + + // Mapping information when the record format uses delimiters. + // See CSV Mapping Parameters below for more details. + // +kubebuilder:validation:Optional + Csv *MappingParametersCsvParameters `json:"csv,omitempty" tf:"csv,omitempty"` + + // Mapping information when JSON is the record format on the streaming source. + // See JSON Mapping Parameters below for more details. + // +kubebuilder:validation:Optional + JSON *MappingParametersJSONParameters `json:"json,omitempty" tf:"json,omitempty"` +} + +type RecordFormatObservation struct { + + // The Mapping Information for the record format. + // See Mapping Parameters below for more details. + MappingParameters *MappingParametersObservation `json:"mappingParameters,omitempty" tf:"mapping_parameters,omitempty"` + + // The Format Type of the records on the output stream. Can be CSV or JSON. + RecordFormatType *string `json:"recordFormatType,omitempty" tf:"record_format_type,omitempty"` +} + +type RecordFormatParameters struct { + + // The Mapping Information for the record format. + // See Mapping Parameters below for more details. + // +kubebuilder:validation:Optional + MappingParameters *MappingParametersParameters `json:"mappingParameters,omitempty" tf:"mapping_parameters,omitempty"` +} + +type ReferenceDataSourcesInitParameters struct { + + // The S3 configuration for the reference data source. See S3 Reference below for more details. + S3 *S3InitParameters `json:"s3,omitempty" tf:"s3,omitempty"` + + // The Schema format of the data in the streaming source. See Source Schema below for more details. + Schema *ReferenceDataSourcesSchemaInitParameters `json:"schema,omitempty" tf:"schema,omitempty"` + + // The in-application Table Name. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type ReferenceDataSourcesObservation struct { + + // The ARN of the Kinesis Analytics Application. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The S3 configuration for the reference data source. See S3 Reference below for more details. + S3 *S3Observation `json:"s3,omitempty" tf:"s3,omitempty"` + + // The Schema format of the data in the streaming source. See Source Schema below for more details. + Schema *ReferenceDataSourcesSchemaObservation `json:"schema,omitempty" tf:"schema,omitempty"` + + // The in-application Table Name. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type ReferenceDataSourcesParameters struct { + + // The S3 configuration for the reference data source. See S3 Reference below for more details. + // +kubebuilder:validation:Optional + S3 *S3Parameters `json:"s3" tf:"s3,omitempty"` + + // The Schema format of the data in the streaming source. See Source Schema below for more details. + // +kubebuilder:validation:Optional + Schema *ReferenceDataSourcesSchemaParameters `json:"schema" tf:"schema,omitempty"` + + // The in-application Table Name. + // +kubebuilder:validation:Optional + TableName *string `json:"tableName" tf:"table_name,omitempty"` +} + +type ReferenceDataSourcesSchemaInitParameters struct { + + // The Record Column mapping for the streaming source data element. + // See Record Columns below for more details. + RecordColumns []SchemaRecordColumnsInitParameters `json:"recordColumns,omitempty" tf:"record_columns,omitempty"` + + // The Encoding of the record in the streaming source. + RecordEncoding *string `json:"recordEncoding,omitempty" tf:"record_encoding,omitempty"` + + // The Record Format and mapping information to schematize a record. + // See Record Format below for more details. + RecordFormat *SchemaRecordFormatInitParameters `json:"recordFormat,omitempty" tf:"record_format,omitempty"` +} + +type ReferenceDataSourcesSchemaObservation struct { + + // The Record Column mapping for the streaming source data element. + // See Record Columns below for more details. + RecordColumns []SchemaRecordColumnsObservation `json:"recordColumns,omitempty" tf:"record_columns,omitempty"` + + // The Encoding of the record in the streaming source. + RecordEncoding *string `json:"recordEncoding,omitempty" tf:"record_encoding,omitempty"` + + // The Record Format and mapping information to schematize a record. + // See Record Format below for more details. + RecordFormat *SchemaRecordFormatObservation `json:"recordFormat,omitempty" tf:"record_format,omitempty"` +} + +type ReferenceDataSourcesSchemaParameters struct { + + // The Record Column mapping for the streaming source data element. + // See Record Columns below for more details. + // +kubebuilder:validation:Optional + RecordColumns []SchemaRecordColumnsParameters `json:"recordColumns" tf:"record_columns,omitempty"` + + // The Encoding of the record in the streaming source. + // +kubebuilder:validation:Optional + RecordEncoding *string `json:"recordEncoding,omitempty" tf:"record_encoding,omitempty"` + + // The Record Format and mapping information to schematize a record. + // See Record Format below for more details. + // +kubebuilder:validation:Optional + RecordFormat *SchemaRecordFormatParameters `json:"recordFormat" tf:"record_format,omitempty"` +} + +type S3InitParameters struct { + + // The S3 Bucket ARN. + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // The File Key name containing reference data. + FileKey *string `json:"fileKey,omitempty" tf:"file_key,omitempty"` + + // The IAM Role ARN to read the data. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type S3Observation struct { + + // The S3 Bucket ARN. + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // The File Key name containing reference data. + FileKey *string `json:"fileKey,omitempty" tf:"file_key,omitempty"` + + // The IAM Role ARN to read the data. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` +} + +type S3Parameters struct { + + // The S3 Bucket ARN. + // +kubebuilder:validation:Optional + BucketArn *string `json:"bucketArn" tf:"bucket_arn,omitempty"` + + // The File Key name containing reference data. + // +kubebuilder:validation:Optional + FileKey *string `json:"fileKey" tf:"file_key,omitempty"` + + // The IAM Role ARN to read the data. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` +} + +type SchemaInitParameters struct { + + // The Record Column mapping for the streaming source data element. + // See Record Columns below for more details. + RecordColumns []RecordColumnsInitParameters `json:"recordColumns,omitempty" tf:"record_columns,omitempty"` + + // The Encoding of the record in the streaming source. + RecordEncoding *string `json:"recordEncoding,omitempty" tf:"record_encoding,omitempty"` + + // The Record Format and mapping information to schematize a record. + // See Record Format below for more details. + RecordFormat *RecordFormatInitParameters `json:"recordFormat,omitempty" tf:"record_format,omitempty"` +} + +type SchemaObservation struct { + + // The Record Column mapping for the streaming source data element. + // See Record Columns below for more details. + RecordColumns []RecordColumnsObservation `json:"recordColumns,omitempty" tf:"record_columns,omitempty"` + + // The Encoding of the record in the streaming source. + RecordEncoding *string `json:"recordEncoding,omitempty" tf:"record_encoding,omitempty"` + + // The Record Format and mapping information to schematize a record. + // See Record Format below for more details. + RecordFormat *RecordFormatObservation `json:"recordFormat,omitempty" tf:"record_format,omitempty"` +} + +type SchemaParameters struct { + + // The Record Column mapping for the streaming source data element. + // See Record Columns below for more details. + // +kubebuilder:validation:Optional + RecordColumns []RecordColumnsParameters `json:"recordColumns" tf:"record_columns,omitempty"` + + // The Encoding of the record in the streaming source. + // +kubebuilder:validation:Optional + RecordEncoding *string `json:"recordEncoding,omitempty" tf:"record_encoding,omitempty"` + + // The Record Format and mapping information to schematize a record. + // See Record Format below for more details. + // +kubebuilder:validation:Optional + RecordFormat *RecordFormatParameters `json:"recordFormat" tf:"record_format,omitempty"` +} + +type SchemaRecordColumnsInitParameters struct { + + // The Mapping reference to the data element. + Mapping *string `json:"mapping,omitempty" tf:"mapping,omitempty"` + + // Name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The SQL Type of the column. + SQLType *string `json:"sqlType,omitempty" tf:"sql_type,omitempty"` +} + +type SchemaRecordColumnsObservation struct { + + // The Mapping reference to the data element. + Mapping *string `json:"mapping,omitempty" tf:"mapping,omitempty"` + + // Name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The SQL Type of the column. + SQLType *string `json:"sqlType,omitempty" tf:"sql_type,omitempty"` +} + +type SchemaRecordColumnsParameters struct { + + // The Mapping reference to the data element. + // +kubebuilder:validation:Optional + Mapping *string `json:"mapping,omitempty" tf:"mapping,omitempty"` + + // Name of the column. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The SQL Type of the column. + // +kubebuilder:validation:Optional + SQLType *string `json:"sqlType" tf:"sql_type,omitempty"` +} + +type SchemaRecordFormatInitParameters struct { + + // The Mapping Information for the record format. + // See Mapping Parameters below for more details. + MappingParameters *RecordFormatMappingParametersInitParameters `json:"mappingParameters,omitempty" tf:"mapping_parameters,omitempty"` +} + +type SchemaRecordFormatObservation struct { + + // The Mapping Information for the record format. + // See Mapping Parameters below for more details. + MappingParameters *RecordFormatMappingParametersObservation `json:"mappingParameters,omitempty" tf:"mapping_parameters,omitempty"` + + // The Format Type of the records on the output stream. Can be CSV or JSON. + RecordFormatType *string `json:"recordFormatType,omitempty" tf:"record_format_type,omitempty"` +} + +type SchemaRecordFormatParameters struct { + + // The Mapping Information for the record format. + // See Mapping Parameters below for more details. + // +kubebuilder:validation:Optional + MappingParameters *RecordFormatMappingParametersParameters `json:"mappingParameters,omitempty" tf:"mapping_parameters,omitempty"` +} + +type StartingPositionConfigurationInitParameters struct { + + // The starting position on the stream. Valid values: LAST_STOPPED_POINT, NOW, TRIM_HORIZON. + StartingPosition *string `json:"startingPosition,omitempty" tf:"starting_position,omitempty"` +} + +type StartingPositionConfigurationObservation struct { + + // The starting position on the stream. Valid values: LAST_STOPPED_POINT, NOW, TRIM_HORIZON. + StartingPosition *string `json:"startingPosition,omitempty" tf:"starting_position,omitempty"` +} + +type StartingPositionConfigurationParameters struct { + + // The starting position on the stream. Valid values: LAST_STOPPED_POINT, NOW, TRIM_HORIZON. + // +kubebuilder:validation:Optional + StartingPosition *string `json:"startingPosition,omitempty" tf:"starting_position,omitempty"` +} + +// ApplicationSpec defines the desired state of Application +type ApplicationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ApplicationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ApplicationInitParameters `json:"initProvider,omitempty"` +} + +// ApplicationStatus defines the observed state of Application. +type ApplicationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ApplicationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Application is the Schema for the Applications API. Provides a AWS Kinesis Analytics Application +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Application struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ApplicationSpec `json:"spec"` + Status ApplicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ApplicationList contains a list of Applications +type ApplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Application `json:"items"` +} + +// Repository type metadata. +var ( + Application_Kind = "Application" + Application_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Application_Kind}.String() + Application_KindAPIVersion = Application_Kind + "." + CRDGroupVersion.String() + Application_GroupVersionKind = CRDGroupVersion.WithKind(Application_Kind) +) + +func init() { + SchemeBuilder.Register(&Application{}, &ApplicationList{}) +} diff --git a/apis/kinesisanalytics/v1beta2/zz_generated.conversion_hubs.go b/apis/kinesisanalytics/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..2d48655531 --- /dev/null +++ b/apis/kinesisanalytics/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Application) Hub() {} diff --git a/apis/kinesisanalytics/v1beta2/zz_generated.deepcopy.go b/apis/kinesisanalytics/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..0117eff4e7 --- /dev/null +++ b/apis/kinesisanalytics/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2657 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Application) DeepCopyInto(out *Application) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Application. +func (in *Application) DeepCopy() *Application { + if in == nil { + return nil + } + out := new(Application) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Application) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInitParameters) DeepCopyInto(out *ApplicationInitParameters) { + *out = *in + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(CloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Code != nil { + in, out := &in.Code, &out.Code + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Inputs != nil { + in, out := &in.Inputs, &out.Inputs + *out = new(InputsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Outputs != nil { + in, out := &in.Outputs, &out.Outputs + *out = make([]OutputsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReferenceDataSources != nil { + in, out := &in.ReferenceDataSources, &out.ReferenceDataSources + *out = new(ReferenceDataSourcesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StartApplication != nil { + in, out := &in.StartApplication, &out.StartApplication + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInitParameters. +func (in *ApplicationInitParameters) DeepCopy() *ApplicationInitParameters { + if in == nil { + return nil + } + out := new(ApplicationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationList) DeepCopyInto(out *ApplicationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Application, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationList. +func (in *ApplicationList) DeepCopy() *ApplicationList { + if in == nil { + return nil + } + out := new(ApplicationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ApplicationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationObservation) DeepCopyInto(out *ApplicationObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(CloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.Code != nil { + in, out := &in.Code, &out.Code + *out = new(string) + **out = **in + } + if in.CreateTimestamp != nil { + in, out := &in.CreateTimestamp, &out.CreateTimestamp + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Inputs != nil { + in, out := &in.Inputs, &out.Inputs + *out = new(InputsObservation) + (*in).DeepCopyInto(*out) + } + if in.LastUpdateTimestamp != nil { + in, out := &in.LastUpdateTimestamp, &out.LastUpdateTimestamp + *out = new(string) + **out = **in + } + if in.Outputs != nil { + in, out := &in.Outputs, &out.Outputs + *out = make([]OutputsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReferenceDataSources != nil { + in, out := &in.ReferenceDataSources, &out.ReferenceDataSources + *out = new(ReferenceDataSourcesObservation) + (*in).DeepCopyInto(*out) + } + if in.StartApplication != nil { + in, out := &in.StartApplication, &out.StartApplication + *out = new(bool) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationObservation. +func (in *ApplicationObservation) DeepCopy() *ApplicationObservation { + if in == nil { + return nil + } + out := new(ApplicationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationParameters) DeepCopyInto(out *ApplicationParameters) { + *out = *in + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(CloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Code != nil { + in, out := &in.Code, &out.Code + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Inputs != nil { + in, out := &in.Inputs, &out.Inputs + *out = new(InputsParameters) + (*in).DeepCopyInto(*out) + } + if in.Outputs != nil { + in, out := &in.Outputs, &out.Outputs + *out = make([]OutputsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReferenceDataSources != nil { + in, out := &in.ReferenceDataSources, &out.ReferenceDataSources + *out = new(ReferenceDataSourcesParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.StartApplication != nil { + in, out := &in.StartApplication, &out.StartApplication + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationParameters. +func (in *ApplicationParameters) DeepCopy() *ApplicationParameters { + if in == nil { + return nil + } + out := new(ApplicationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationSpec) DeepCopyInto(out *ApplicationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSpec. +func (in *ApplicationSpec) DeepCopy() *ApplicationSpec { + if in == nil { + return nil + } + out := new(ApplicationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationStatus) DeepCopyInto(out *ApplicationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationStatus. +func (in *ApplicationStatus) DeepCopy() *ApplicationStatus { + if in == nil { + return nil + } + out := new(ApplicationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *CloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.LogStreamArn != nil { + in, out := &in.LogStreamArn, &out.LogStreamArn + *out = new(string) + **out = **in + } + if in.LogStreamArnRef != nil { + in, out := &in.LogStreamArnRef, &out.LogStreamArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogStreamArnSelector != nil { + in, out := &in.LogStreamArnSelector, &out.LogStreamArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLoggingOptionsInitParameters. +func (in *CloudwatchLoggingOptionsInitParameters) DeepCopy() *CloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(CloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLoggingOptionsObservation) DeepCopyInto(out *CloudwatchLoggingOptionsObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LogStreamArn != nil { + in, out := &in.LogStreamArn, &out.LogStreamArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLoggingOptionsObservation. +func (in *CloudwatchLoggingOptionsObservation) DeepCopy() *CloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(CloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLoggingOptionsParameters) DeepCopyInto(out *CloudwatchLoggingOptionsParameters) { + *out = *in + if in.LogStreamArn != nil { + in, out := &in.LogStreamArn, &out.LogStreamArn + *out = new(string) + **out = **in + } + if in.LogStreamArnRef != nil { + in, out := &in.LogStreamArnRef, &out.LogStreamArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogStreamArnSelector != nil { + in, out := &in.LogStreamArnSelector, &out.LogStreamArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLoggingOptionsParameters. +func (in *CloudwatchLoggingOptionsParameters) DeepCopy() *CloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(CloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CsvInitParameters) DeepCopyInto(out *CsvInitParameters) { + *out = *in + if in.RecordColumnDelimiter != nil { + in, out := &in.RecordColumnDelimiter, &out.RecordColumnDelimiter + *out = new(string) + **out = **in + } + if in.RecordRowDelimiter != nil { + in, out := &in.RecordRowDelimiter, &out.RecordRowDelimiter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CsvInitParameters. +func (in *CsvInitParameters) DeepCopy() *CsvInitParameters { + if in == nil { + return nil + } + out := new(CsvInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CsvObservation) DeepCopyInto(out *CsvObservation) { + *out = *in + if in.RecordColumnDelimiter != nil { + in, out := &in.RecordColumnDelimiter, &out.RecordColumnDelimiter + *out = new(string) + **out = **in + } + if in.RecordRowDelimiter != nil { + in, out := &in.RecordRowDelimiter, &out.RecordRowDelimiter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CsvObservation. +func (in *CsvObservation) DeepCopy() *CsvObservation { + if in == nil { + return nil + } + out := new(CsvObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CsvParameters) DeepCopyInto(out *CsvParameters) { + *out = *in + if in.RecordColumnDelimiter != nil { + in, out := &in.RecordColumnDelimiter, &out.RecordColumnDelimiter + *out = new(string) + **out = **in + } + if in.RecordRowDelimiter != nil { + in, out := &in.RecordRowDelimiter, &out.RecordRowDelimiter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CsvParameters. +func (in *CsvParameters) DeepCopy() *CsvParameters { + if in == nil { + return nil + } + out := new(CsvParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputsInitParameters) DeepCopyInto(out *InputsInitParameters) { + *out = *in + if in.KinesisFirehose != nil { + in, out := &in.KinesisFirehose, &out.KinesisFirehose + *out = new(KinesisFirehoseInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisStream != nil { + in, out := &in.KinesisStream, &out.KinesisStream + *out = new(KinesisStreamInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NamePrefix != nil { + in, out := &in.NamePrefix, &out.NamePrefix + *out = new(string) + **out = **in + } + if in.Parallelism != nil { + in, out := &in.Parallelism, &out.Parallelism + *out = new(ParallelismInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(SchemaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StartingPositionConfiguration != nil { + in, out := &in.StartingPositionConfiguration, &out.StartingPositionConfiguration + *out = make([]StartingPositionConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputsInitParameters. +func (in *InputsInitParameters) DeepCopy() *InputsInitParameters { + if in == nil { + return nil + } + out := new(InputsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputsObservation) DeepCopyInto(out *InputsObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KinesisFirehose != nil { + in, out := &in.KinesisFirehose, &out.KinesisFirehose + *out = new(KinesisFirehoseObservation) + (*in).DeepCopyInto(*out) + } + if in.KinesisStream != nil { + in, out := &in.KinesisStream, &out.KinesisStream + *out = new(KinesisStreamObservation) + (*in).DeepCopyInto(*out) + } + if in.NamePrefix != nil { + in, out := &in.NamePrefix, &out.NamePrefix + *out = new(string) + **out = **in + } + if in.Parallelism != nil { + in, out := &in.Parallelism, &out.Parallelism + *out = new(ParallelismObservation) + (*in).DeepCopyInto(*out) + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(SchemaObservation) + (*in).DeepCopyInto(*out) + } + if in.StartingPositionConfiguration != nil { + in, out := &in.StartingPositionConfiguration, &out.StartingPositionConfiguration + *out = make([]StartingPositionConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StreamNames != nil { + in, out := &in.StreamNames, &out.StreamNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputsObservation. +func (in *InputsObservation) DeepCopy() *InputsObservation { + if in == nil { + return nil + } + out := new(InputsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputsParameters) DeepCopyInto(out *InputsParameters) { + *out = *in + if in.KinesisFirehose != nil { + in, out := &in.KinesisFirehose, &out.KinesisFirehose + *out = new(KinesisFirehoseParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisStream != nil { + in, out := &in.KinesisStream, &out.KinesisStream + *out = new(KinesisStreamParameters) + (*in).DeepCopyInto(*out) + } + if in.NamePrefix != nil { + in, out := &in.NamePrefix, &out.NamePrefix + *out = new(string) + **out = **in + } + if in.Parallelism != nil { + in, out := &in.Parallelism, &out.Parallelism + *out = new(ParallelismParameters) + (*in).DeepCopyInto(*out) + } + if in.ProcessingConfiguration != nil { + in, out := &in.ProcessingConfiguration, &out.ProcessingConfiguration + *out = new(ProcessingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(SchemaParameters) + (*in).DeepCopyInto(*out) + } + if in.StartingPositionConfiguration != nil { + in, out := &in.StartingPositionConfiguration, &out.StartingPositionConfiguration + *out = make([]StartingPositionConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputsParameters. +func (in *InputsParameters) DeepCopy() *InputsParameters { + if in == nil { + return nil + } + out := new(InputsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONInitParameters) DeepCopyInto(out *JSONInitParameters) { + *out = *in + if in.RecordRowPath != nil { + in, out := &in.RecordRowPath, &out.RecordRowPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONInitParameters. +func (in *JSONInitParameters) DeepCopy() *JSONInitParameters { + if in == nil { + return nil + } + out := new(JSONInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONObservation) DeepCopyInto(out *JSONObservation) { + *out = *in + if in.RecordRowPath != nil { + in, out := &in.RecordRowPath, &out.RecordRowPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONObservation. +func (in *JSONObservation) DeepCopy() *JSONObservation { + if in == nil { + return nil + } + out := new(JSONObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONParameters) DeepCopyInto(out *JSONParameters) { + *out = *in + if in.RecordRowPath != nil { + in, out := &in.RecordRowPath, &out.RecordRowPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONParameters. +func (in *JSONParameters) DeepCopy() *JSONParameters { + if in == nil { + return nil + } + out := new(JSONParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisFirehoseInitParameters) DeepCopyInto(out *KinesisFirehoseInitParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseInitParameters. +func (in *KinesisFirehoseInitParameters) DeepCopy() *KinesisFirehoseInitParameters { + if in == nil { + return nil + } + out := new(KinesisFirehoseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisFirehoseObservation) DeepCopyInto(out *KinesisFirehoseObservation) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseObservation. +func (in *KinesisFirehoseObservation) DeepCopy() *KinesisFirehoseObservation { + if in == nil { + return nil + } + out := new(KinesisFirehoseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisFirehoseParameters) DeepCopyInto(out *KinesisFirehoseParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseParameters. +func (in *KinesisFirehoseParameters) DeepCopy() *KinesisFirehoseParameters { + if in == nil { + return nil + } + out := new(KinesisFirehoseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisStreamInitParameters) DeepCopyInto(out *KinesisStreamInitParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.ResourceArnRef != nil { + in, out := &in.ResourceArnRef, &out.ResourceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceArnSelector != nil { + in, out := &in.ResourceArnSelector, &out.ResourceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamInitParameters. +func (in *KinesisStreamInitParameters) DeepCopy() *KinesisStreamInitParameters { + if in == nil { + return nil + } + out := new(KinesisStreamInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisStreamObservation) DeepCopyInto(out *KinesisStreamObservation) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamObservation. +func (in *KinesisStreamObservation) DeepCopy() *KinesisStreamObservation { + if in == nil { + return nil + } + out := new(KinesisStreamObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisStreamParameters) DeepCopyInto(out *KinesisStreamParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.ResourceArnRef != nil { + in, out := &in.ResourceArnRef, &out.ResourceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceArnSelector != nil { + in, out := &in.ResourceArnSelector, &out.ResourceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamParameters. +func (in *KinesisStreamParameters) DeepCopy() *KinesisStreamParameters { + if in == nil { + return nil + } + out := new(KinesisStreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaInitParameters) DeepCopyInto(out *LambdaInitParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaInitParameters. +func (in *LambdaInitParameters) DeepCopy() *LambdaInitParameters { + if in == nil { + return nil + } + out := new(LambdaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaObservation) DeepCopyInto(out *LambdaObservation) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaObservation. +func (in *LambdaObservation) DeepCopy() *LambdaObservation { + if in == nil { + return nil + } + out := new(LambdaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaParameters) DeepCopyInto(out *LambdaParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaParameters. +func (in *LambdaParameters) DeepCopy() *LambdaParameters { + if in == nil { + return nil + } + out := new(LambdaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersCsvInitParameters) DeepCopyInto(out *MappingParametersCsvInitParameters) { + *out = *in + if in.RecordColumnDelimiter != nil { + in, out := &in.RecordColumnDelimiter, &out.RecordColumnDelimiter + *out = new(string) + **out = **in + } + if in.RecordRowDelimiter != nil { + in, out := &in.RecordRowDelimiter, &out.RecordRowDelimiter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersCsvInitParameters. +func (in *MappingParametersCsvInitParameters) DeepCopy() *MappingParametersCsvInitParameters { + if in == nil { + return nil + } + out := new(MappingParametersCsvInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersCsvObservation) DeepCopyInto(out *MappingParametersCsvObservation) { + *out = *in + if in.RecordColumnDelimiter != nil { + in, out := &in.RecordColumnDelimiter, &out.RecordColumnDelimiter + *out = new(string) + **out = **in + } + if in.RecordRowDelimiter != nil { + in, out := &in.RecordRowDelimiter, &out.RecordRowDelimiter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersCsvObservation. +func (in *MappingParametersCsvObservation) DeepCopy() *MappingParametersCsvObservation { + if in == nil { + return nil + } + out := new(MappingParametersCsvObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersCsvParameters) DeepCopyInto(out *MappingParametersCsvParameters) { + *out = *in + if in.RecordColumnDelimiter != nil { + in, out := &in.RecordColumnDelimiter, &out.RecordColumnDelimiter + *out = new(string) + **out = **in + } + if in.RecordRowDelimiter != nil { + in, out := &in.RecordRowDelimiter, &out.RecordRowDelimiter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersCsvParameters. +func (in *MappingParametersCsvParameters) DeepCopy() *MappingParametersCsvParameters { + if in == nil { + return nil + } + out := new(MappingParametersCsvParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersInitParameters) DeepCopyInto(out *MappingParametersInitParameters) { + *out = *in + if in.Csv != nil { + in, out := &in.Csv, &out.Csv + *out = new(CsvInitParameters) + (*in).DeepCopyInto(*out) + } + if in.JSON != nil { + in, out := &in.JSON, &out.JSON + *out = new(JSONInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersInitParameters. +func (in *MappingParametersInitParameters) DeepCopy() *MappingParametersInitParameters { + if in == nil { + return nil + } + out := new(MappingParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersJSONInitParameters) DeepCopyInto(out *MappingParametersJSONInitParameters) { + *out = *in + if in.RecordRowPath != nil { + in, out := &in.RecordRowPath, &out.RecordRowPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersJSONInitParameters. +func (in *MappingParametersJSONInitParameters) DeepCopy() *MappingParametersJSONInitParameters { + if in == nil { + return nil + } + out := new(MappingParametersJSONInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersJSONObservation) DeepCopyInto(out *MappingParametersJSONObservation) { + *out = *in + if in.RecordRowPath != nil { + in, out := &in.RecordRowPath, &out.RecordRowPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersJSONObservation. +func (in *MappingParametersJSONObservation) DeepCopy() *MappingParametersJSONObservation { + if in == nil { + return nil + } + out := new(MappingParametersJSONObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersJSONParameters) DeepCopyInto(out *MappingParametersJSONParameters) { + *out = *in + if in.RecordRowPath != nil { + in, out := &in.RecordRowPath, &out.RecordRowPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersJSONParameters. +func (in *MappingParametersJSONParameters) DeepCopy() *MappingParametersJSONParameters { + if in == nil { + return nil + } + out := new(MappingParametersJSONParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersObservation) DeepCopyInto(out *MappingParametersObservation) { + *out = *in + if in.Csv != nil { + in, out := &in.Csv, &out.Csv + *out = new(CsvObservation) + (*in).DeepCopyInto(*out) + } + if in.JSON != nil { + in, out := &in.JSON, &out.JSON + *out = new(JSONObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersObservation. +func (in *MappingParametersObservation) DeepCopy() *MappingParametersObservation { + if in == nil { + return nil + } + out := new(MappingParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersParameters) DeepCopyInto(out *MappingParametersParameters) { + *out = *in + if in.Csv != nil { + in, out := &in.Csv, &out.Csv + *out = new(CsvParameters) + (*in).DeepCopyInto(*out) + } + if in.JSON != nil { + in, out := &in.JSON, &out.JSON + *out = new(JSONParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersParameters. +func (in *MappingParametersParameters) DeepCopy() *MappingParametersParameters { + if in == nil { + return nil + } + out := new(MappingParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsInitParameters) DeepCopyInto(out *OutputsInitParameters) { + *out = *in + if in.KinesisFirehose != nil { + in, out := &in.KinesisFirehose, &out.KinesisFirehose + *out = new(OutputsKinesisFirehoseInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisStream != nil { + in, out := &in.KinesisStream, &out.KinesisStream + *out = new(OutputsKinesisStreamInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Lambda != nil { + in, out := &in.Lambda, &out.Lambda + *out = new(OutputsLambdaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(OutputsSchemaInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsInitParameters. +func (in *OutputsInitParameters) DeepCopy() *OutputsInitParameters { + if in == nil { + return nil + } + out := new(OutputsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsKinesisFirehoseInitParameters) DeepCopyInto(out *OutputsKinesisFirehoseInitParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.ResourceArnRef != nil { + in, out := &in.ResourceArnRef, &out.ResourceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceArnSelector != nil { + in, out := &in.ResourceArnSelector, &out.ResourceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsKinesisFirehoseInitParameters. +func (in *OutputsKinesisFirehoseInitParameters) DeepCopy() *OutputsKinesisFirehoseInitParameters { + if in == nil { + return nil + } + out := new(OutputsKinesisFirehoseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsKinesisFirehoseObservation) DeepCopyInto(out *OutputsKinesisFirehoseObservation) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsKinesisFirehoseObservation. +func (in *OutputsKinesisFirehoseObservation) DeepCopy() *OutputsKinesisFirehoseObservation { + if in == nil { + return nil + } + out := new(OutputsKinesisFirehoseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsKinesisFirehoseParameters) DeepCopyInto(out *OutputsKinesisFirehoseParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.ResourceArnRef != nil { + in, out := &in.ResourceArnRef, &out.ResourceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceArnSelector != nil { + in, out := &in.ResourceArnSelector, &out.ResourceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsKinesisFirehoseParameters. +func (in *OutputsKinesisFirehoseParameters) DeepCopy() *OutputsKinesisFirehoseParameters { + if in == nil { + return nil + } + out := new(OutputsKinesisFirehoseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsKinesisStreamInitParameters) DeepCopyInto(out *OutputsKinesisStreamInitParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsKinesisStreamInitParameters. +func (in *OutputsKinesisStreamInitParameters) DeepCopy() *OutputsKinesisStreamInitParameters { + if in == nil { + return nil + } + out := new(OutputsKinesisStreamInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsKinesisStreamObservation) DeepCopyInto(out *OutputsKinesisStreamObservation) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsKinesisStreamObservation. +func (in *OutputsKinesisStreamObservation) DeepCopy() *OutputsKinesisStreamObservation { + if in == nil { + return nil + } + out := new(OutputsKinesisStreamObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsKinesisStreamParameters) DeepCopyInto(out *OutputsKinesisStreamParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsKinesisStreamParameters. +func (in *OutputsKinesisStreamParameters) DeepCopy() *OutputsKinesisStreamParameters { + if in == nil { + return nil + } + out := new(OutputsKinesisStreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsLambdaInitParameters) DeepCopyInto(out *OutputsLambdaInitParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsLambdaInitParameters. +func (in *OutputsLambdaInitParameters) DeepCopy() *OutputsLambdaInitParameters { + if in == nil { + return nil + } + out := new(OutputsLambdaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsLambdaObservation) DeepCopyInto(out *OutputsLambdaObservation) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsLambdaObservation. +func (in *OutputsLambdaObservation) DeepCopy() *OutputsLambdaObservation { + if in == nil { + return nil + } + out := new(OutputsLambdaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsLambdaParameters) DeepCopyInto(out *OutputsLambdaParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsLambdaParameters. +func (in *OutputsLambdaParameters) DeepCopy() *OutputsLambdaParameters { + if in == nil { + return nil + } + out := new(OutputsLambdaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsObservation) DeepCopyInto(out *OutputsObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KinesisFirehose != nil { + in, out := &in.KinesisFirehose, &out.KinesisFirehose + *out = new(OutputsKinesisFirehoseObservation) + (*in).DeepCopyInto(*out) + } + if in.KinesisStream != nil { + in, out := &in.KinesisStream, &out.KinesisStream + *out = new(OutputsKinesisStreamObservation) + (*in).DeepCopyInto(*out) + } + if in.Lambda != nil { + in, out := &in.Lambda, &out.Lambda + *out = new(OutputsLambdaObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(OutputsSchemaObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsObservation. +func (in *OutputsObservation) DeepCopy() *OutputsObservation { + if in == nil { + return nil + } + out := new(OutputsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsParameters) DeepCopyInto(out *OutputsParameters) { + *out = *in + if in.KinesisFirehose != nil { + in, out := &in.KinesisFirehose, &out.KinesisFirehose + *out = new(OutputsKinesisFirehoseParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisStream != nil { + in, out := &in.KinesisStream, &out.KinesisStream + *out = new(OutputsKinesisStreamParameters) + (*in).DeepCopyInto(*out) + } + if in.Lambda != nil { + in, out := &in.Lambda, &out.Lambda + *out = new(OutputsLambdaParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(OutputsSchemaParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsParameters. +func (in *OutputsParameters) DeepCopy() *OutputsParameters { + if in == nil { + return nil + } + out := new(OutputsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsSchemaInitParameters) DeepCopyInto(out *OutputsSchemaInitParameters) { + *out = *in + if in.RecordFormatType != nil { + in, out := &in.RecordFormatType, &out.RecordFormatType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsSchemaInitParameters. +func (in *OutputsSchemaInitParameters) DeepCopy() *OutputsSchemaInitParameters { + if in == nil { + return nil + } + out := new(OutputsSchemaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsSchemaObservation) DeepCopyInto(out *OutputsSchemaObservation) { + *out = *in + if in.RecordFormatType != nil { + in, out := &in.RecordFormatType, &out.RecordFormatType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsSchemaObservation. +func (in *OutputsSchemaObservation) DeepCopy() *OutputsSchemaObservation { + if in == nil { + return nil + } + out := new(OutputsSchemaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsSchemaParameters) DeepCopyInto(out *OutputsSchemaParameters) { + *out = *in + if in.RecordFormatType != nil { + in, out := &in.RecordFormatType, &out.RecordFormatType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsSchemaParameters. +func (in *OutputsSchemaParameters) DeepCopy() *OutputsSchemaParameters { + if in == nil { + return nil + } + out := new(OutputsSchemaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParallelismInitParameters) DeepCopyInto(out *ParallelismInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelismInitParameters. +func (in *ParallelismInitParameters) DeepCopy() *ParallelismInitParameters { + if in == nil { + return nil + } + out := new(ParallelismInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParallelismObservation) DeepCopyInto(out *ParallelismObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelismObservation. +func (in *ParallelismObservation) DeepCopy() *ParallelismObservation { + if in == nil { + return nil + } + out := new(ParallelismObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParallelismParameters) DeepCopyInto(out *ParallelismParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelismParameters. +func (in *ParallelismParameters) DeepCopy() *ParallelismParameters { + if in == nil { + return nil + } + out := new(ParallelismParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessingConfigurationInitParameters) DeepCopyInto(out *ProcessingConfigurationInitParameters) { + *out = *in + if in.Lambda != nil { + in, out := &in.Lambda, &out.Lambda + *out = new(LambdaInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessingConfigurationInitParameters. +func (in *ProcessingConfigurationInitParameters) DeepCopy() *ProcessingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ProcessingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessingConfigurationObservation) DeepCopyInto(out *ProcessingConfigurationObservation) { + *out = *in + if in.Lambda != nil { + in, out := &in.Lambda, &out.Lambda + *out = new(LambdaObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessingConfigurationObservation. +func (in *ProcessingConfigurationObservation) DeepCopy() *ProcessingConfigurationObservation { + if in == nil { + return nil + } + out := new(ProcessingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessingConfigurationParameters) DeepCopyInto(out *ProcessingConfigurationParameters) { + *out = *in + if in.Lambda != nil { + in, out := &in.Lambda, &out.Lambda + *out = new(LambdaParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessingConfigurationParameters. +func (in *ProcessingConfigurationParameters) DeepCopy() *ProcessingConfigurationParameters { + if in == nil { + return nil + } + out := new(ProcessingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordColumnsInitParameters) DeepCopyInto(out *RecordColumnsInitParameters) { + *out = *in + if in.Mapping != nil { + in, out := &in.Mapping, &out.Mapping + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SQLType != nil { + in, out := &in.SQLType, &out.SQLType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordColumnsInitParameters. +func (in *RecordColumnsInitParameters) DeepCopy() *RecordColumnsInitParameters { + if in == nil { + return nil + } + out := new(RecordColumnsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordColumnsObservation) DeepCopyInto(out *RecordColumnsObservation) { + *out = *in + if in.Mapping != nil { + in, out := &in.Mapping, &out.Mapping + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SQLType != nil { + in, out := &in.SQLType, &out.SQLType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordColumnsObservation. +func (in *RecordColumnsObservation) DeepCopy() *RecordColumnsObservation { + if in == nil { + return nil + } + out := new(RecordColumnsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordColumnsParameters) DeepCopyInto(out *RecordColumnsParameters) { + *out = *in + if in.Mapping != nil { + in, out := &in.Mapping, &out.Mapping + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SQLType != nil { + in, out := &in.SQLType, &out.SQLType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordColumnsParameters. +func (in *RecordColumnsParameters) DeepCopy() *RecordColumnsParameters { + if in == nil { + return nil + } + out := new(RecordColumnsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordFormatInitParameters) DeepCopyInto(out *RecordFormatInitParameters) { + *out = *in + if in.MappingParameters != nil { + in, out := &in.MappingParameters, &out.MappingParameters + *out = new(MappingParametersInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordFormatInitParameters. +func (in *RecordFormatInitParameters) DeepCopy() *RecordFormatInitParameters { + if in == nil { + return nil + } + out := new(RecordFormatInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordFormatMappingParametersInitParameters) DeepCopyInto(out *RecordFormatMappingParametersInitParameters) { + *out = *in + if in.Csv != nil { + in, out := &in.Csv, &out.Csv + *out = new(MappingParametersCsvInitParameters) + (*in).DeepCopyInto(*out) + } + if in.JSON != nil { + in, out := &in.JSON, &out.JSON + *out = new(MappingParametersJSONInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordFormatMappingParametersInitParameters. +func (in *RecordFormatMappingParametersInitParameters) DeepCopy() *RecordFormatMappingParametersInitParameters { + if in == nil { + return nil + } + out := new(RecordFormatMappingParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordFormatMappingParametersObservation) DeepCopyInto(out *RecordFormatMappingParametersObservation) { + *out = *in + if in.Csv != nil { + in, out := &in.Csv, &out.Csv + *out = new(MappingParametersCsvObservation) + (*in).DeepCopyInto(*out) + } + if in.JSON != nil { + in, out := &in.JSON, &out.JSON + *out = new(MappingParametersJSONObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordFormatMappingParametersObservation. +func (in *RecordFormatMappingParametersObservation) DeepCopy() *RecordFormatMappingParametersObservation { + if in == nil { + return nil + } + out := new(RecordFormatMappingParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordFormatMappingParametersParameters) DeepCopyInto(out *RecordFormatMappingParametersParameters) { + *out = *in + if in.Csv != nil { + in, out := &in.Csv, &out.Csv + *out = new(MappingParametersCsvParameters) + (*in).DeepCopyInto(*out) + } + if in.JSON != nil { + in, out := &in.JSON, &out.JSON + *out = new(MappingParametersJSONParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordFormatMappingParametersParameters. +func (in *RecordFormatMappingParametersParameters) DeepCopy() *RecordFormatMappingParametersParameters { + if in == nil { + return nil + } + out := new(RecordFormatMappingParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordFormatObservation) DeepCopyInto(out *RecordFormatObservation) { + *out = *in + if in.MappingParameters != nil { + in, out := &in.MappingParameters, &out.MappingParameters + *out = new(MappingParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.RecordFormatType != nil { + in, out := &in.RecordFormatType, &out.RecordFormatType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordFormatObservation. +func (in *RecordFormatObservation) DeepCopy() *RecordFormatObservation { + if in == nil { + return nil + } + out := new(RecordFormatObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordFormatParameters) DeepCopyInto(out *RecordFormatParameters) { + *out = *in + if in.MappingParameters != nil { + in, out := &in.MappingParameters, &out.MappingParameters + *out = new(MappingParametersParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordFormatParameters. +func (in *RecordFormatParameters) DeepCopy() *RecordFormatParameters { + if in == nil { + return nil + } + out := new(RecordFormatParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceDataSourcesInitParameters) DeepCopyInto(out *ReferenceDataSourcesInitParameters) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(ReferenceDataSourcesSchemaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceDataSourcesInitParameters. +func (in *ReferenceDataSourcesInitParameters) DeepCopy() *ReferenceDataSourcesInitParameters { + if in == nil { + return nil + } + out := new(ReferenceDataSourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceDataSourcesObservation) DeepCopyInto(out *ReferenceDataSourcesObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Observation) + (*in).DeepCopyInto(*out) + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(ReferenceDataSourcesSchemaObservation) + (*in).DeepCopyInto(*out) + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceDataSourcesObservation. +func (in *ReferenceDataSourcesObservation) DeepCopy() *ReferenceDataSourcesObservation { + if in == nil { + return nil + } + out := new(ReferenceDataSourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceDataSourcesParameters) DeepCopyInto(out *ReferenceDataSourcesParameters) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Parameters) + (*in).DeepCopyInto(*out) + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(ReferenceDataSourcesSchemaParameters) + (*in).DeepCopyInto(*out) + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceDataSourcesParameters. +func (in *ReferenceDataSourcesParameters) DeepCopy() *ReferenceDataSourcesParameters { + if in == nil { + return nil + } + out := new(ReferenceDataSourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceDataSourcesSchemaInitParameters) DeepCopyInto(out *ReferenceDataSourcesSchemaInitParameters) { + *out = *in + if in.RecordColumns != nil { + in, out := &in.RecordColumns, &out.RecordColumns + *out = make([]SchemaRecordColumnsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecordEncoding != nil { + in, out := &in.RecordEncoding, &out.RecordEncoding + *out = new(string) + **out = **in + } + if in.RecordFormat != nil { + in, out := &in.RecordFormat, &out.RecordFormat + *out = new(SchemaRecordFormatInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceDataSourcesSchemaInitParameters. +func (in *ReferenceDataSourcesSchemaInitParameters) DeepCopy() *ReferenceDataSourcesSchemaInitParameters { + if in == nil { + return nil + } + out := new(ReferenceDataSourcesSchemaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceDataSourcesSchemaObservation) DeepCopyInto(out *ReferenceDataSourcesSchemaObservation) { + *out = *in + if in.RecordColumns != nil { + in, out := &in.RecordColumns, &out.RecordColumns + *out = make([]SchemaRecordColumnsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecordEncoding != nil { + in, out := &in.RecordEncoding, &out.RecordEncoding + *out = new(string) + **out = **in + } + if in.RecordFormat != nil { + in, out := &in.RecordFormat, &out.RecordFormat + *out = new(SchemaRecordFormatObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceDataSourcesSchemaObservation. +func (in *ReferenceDataSourcesSchemaObservation) DeepCopy() *ReferenceDataSourcesSchemaObservation { + if in == nil { + return nil + } + out := new(ReferenceDataSourcesSchemaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceDataSourcesSchemaParameters) DeepCopyInto(out *ReferenceDataSourcesSchemaParameters) { + *out = *in + if in.RecordColumns != nil { + in, out := &in.RecordColumns, &out.RecordColumns + *out = make([]SchemaRecordColumnsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecordEncoding != nil { + in, out := &in.RecordEncoding, &out.RecordEncoding + *out = new(string) + **out = **in + } + if in.RecordFormat != nil { + in, out := &in.RecordFormat, &out.RecordFormat + *out = new(SchemaRecordFormatParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceDataSourcesSchemaParameters. +func (in *ReferenceDataSourcesSchemaParameters) DeepCopy() *ReferenceDataSourcesSchemaParameters { + if in == nil { + return nil + } + out := new(ReferenceDataSourcesSchemaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InitParameters) DeepCopyInto(out *S3InitParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.FileKey != nil { + in, out := &in.FileKey, &out.FileKey + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InitParameters. +func (in *S3InitParameters) DeepCopy() *S3InitParameters { + if in == nil { + return nil + } + out := new(S3InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Observation) DeepCopyInto(out *S3Observation) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.FileKey != nil { + in, out := &in.FileKey, &out.FileKey + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Observation. +func (in *S3Observation) DeepCopy() *S3Observation { + if in == nil { + return nil + } + out := new(S3Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Parameters) DeepCopyInto(out *S3Parameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.FileKey != nil { + in, out := &in.FileKey, &out.FileKey + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Parameters. +func (in *S3Parameters) DeepCopy() *S3Parameters { + if in == nil { + return nil + } + out := new(S3Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaInitParameters) DeepCopyInto(out *SchemaInitParameters) { + *out = *in + if in.RecordColumns != nil { + in, out := &in.RecordColumns, &out.RecordColumns + *out = make([]RecordColumnsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecordEncoding != nil { + in, out := &in.RecordEncoding, &out.RecordEncoding + *out = new(string) + **out = **in + } + if in.RecordFormat != nil { + in, out := &in.RecordFormat, &out.RecordFormat + *out = new(RecordFormatInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaInitParameters. +func (in *SchemaInitParameters) DeepCopy() *SchemaInitParameters { + if in == nil { + return nil + } + out := new(SchemaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaObservation) DeepCopyInto(out *SchemaObservation) { + *out = *in + if in.RecordColumns != nil { + in, out := &in.RecordColumns, &out.RecordColumns + *out = make([]RecordColumnsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecordEncoding != nil { + in, out := &in.RecordEncoding, &out.RecordEncoding + *out = new(string) + **out = **in + } + if in.RecordFormat != nil { + in, out := &in.RecordFormat, &out.RecordFormat + *out = new(RecordFormatObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaObservation. +func (in *SchemaObservation) DeepCopy() *SchemaObservation { + if in == nil { + return nil + } + out := new(SchemaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaParameters) DeepCopyInto(out *SchemaParameters) { + *out = *in + if in.RecordColumns != nil { + in, out := &in.RecordColumns, &out.RecordColumns + *out = make([]RecordColumnsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecordEncoding != nil { + in, out := &in.RecordEncoding, &out.RecordEncoding + *out = new(string) + **out = **in + } + if in.RecordFormat != nil { + in, out := &in.RecordFormat, &out.RecordFormat + *out = new(RecordFormatParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaParameters. +func (in *SchemaParameters) DeepCopy() *SchemaParameters { + if in == nil { + return nil + } + out := new(SchemaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaRecordColumnsInitParameters) DeepCopyInto(out *SchemaRecordColumnsInitParameters) { + *out = *in + if in.Mapping != nil { + in, out := &in.Mapping, &out.Mapping + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SQLType != nil { + in, out := &in.SQLType, &out.SQLType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaRecordColumnsInitParameters. +func (in *SchemaRecordColumnsInitParameters) DeepCopy() *SchemaRecordColumnsInitParameters { + if in == nil { + return nil + } + out := new(SchemaRecordColumnsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaRecordColumnsObservation) DeepCopyInto(out *SchemaRecordColumnsObservation) { + *out = *in + if in.Mapping != nil { + in, out := &in.Mapping, &out.Mapping + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SQLType != nil { + in, out := &in.SQLType, &out.SQLType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaRecordColumnsObservation. +func (in *SchemaRecordColumnsObservation) DeepCopy() *SchemaRecordColumnsObservation { + if in == nil { + return nil + } + out := new(SchemaRecordColumnsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaRecordColumnsParameters) DeepCopyInto(out *SchemaRecordColumnsParameters) { + *out = *in + if in.Mapping != nil { + in, out := &in.Mapping, &out.Mapping + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SQLType != nil { + in, out := &in.SQLType, &out.SQLType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaRecordColumnsParameters. +func (in *SchemaRecordColumnsParameters) DeepCopy() *SchemaRecordColumnsParameters { + if in == nil { + return nil + } + out := new(SchemaRecordColumnsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaRecordFormatInitParameters) DeepCopyInto(out *SchemaRecordFormatInitParameters) { + *out = *in + if in.MappingParameters != nil { + in, out := &in.MappingParameters, &out.MappingParameters + *out = new(RecordFormatMappingParametersInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaRecordFormatInitParameters. +func (in *SchemaRecordFormatInitParameters) DeepCopy() *SchemaRecordFormatInitParameters { + if in == nil { + return nil + } + out := new(SchemaRecordFormatInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaRecordFormatObservation) DeepCopyInto(out *SchemaRecordFormatObservation) { + *out = *in + if in.MappingParameters != nil { + in, out := &in.MappingParameters, &out.MappingParameters + *out = new(RecordFormatMappingParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.RecordFormatType != nil { + in, out := &in.RecordFormatType, &out.RecordFormatType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaRecordFormatObservation. +func (in *SchemaRecordFormatObservation) DeepCopy() *SchemaRecordFormatObservation { + if in == nil { + return nil + } + out := new(SchemaRecordFormatObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaRecordFormatParameters) DeepCopyInto(out *SchemaRecordFormatParameters) { + *out = *in + if in.MappingParameters != nil { + in, out := &in.MappingParameters, &out.MappingParameters + *out = new(RecordFormatMappingParametersParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaRecordFormatParameters. +func (in *SchemaRecordFormatParameters) DeepCopy() *SchemaRecordFormatParameters { + if in == nil { + return nil + } + out := new(SchemaRecordFormatParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartingPositionConfigurationInitParameters) DeepCopyInto(out *StartingPositionConfigurationInitParameters) { + *out = *in + if in.StartingPosition != nil { + in, out := &in.StartingPosition, &out.StartingPosition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartingPositionConfigurationInitParameters. +func (in *StartingPositionConfigurationInitParameters) DeepCopy() *StartingPositionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(StartingPositionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartingPositionConfigurationObservation) DeepCopyInto(out *StartingPositionConfigurationObservation) { + *out = *in + if in.StartingPosition != nil { + in, out := &in.StartingPosition, &out.StartingPosition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartingPositionConfigurationObservation. +func (in *StartingPositionConfigurationObservation) DeepCopy() *StartingPositionConfigurationObservation { + if in == nil { + return nil + } + out := new(StartingPositionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartingPositionConfigurationParameters) DeepCopyInto(out *StartingPositionConfigurationParameters) { + *out = *in + if in.StartingPosition != nil { + in, out := &in.StartingPosition, &out.StartingPosition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartingPositionConfigurationParameters. +func (in *StartingPositionConfigurationParameters) DeepCopy() *StartingPositionConfigurationParameters { + if in == nil { + return nil + } + out := new(StartingPositionConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/kinesisanalytics/v1beta2/zz_generated.managed.go b/apis/kinesisanalytics/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..d1481109bb --- /dev/null +++ b/apis/kinesisanalytics/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Application. +func (mg *Application) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Application. +func (mg *Application) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Application. +func (mg *Application) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Application. +func (mg *Application) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Application. +func (mg *Application) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Application. +func (mg *Application) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Application. +func (mg *Application) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Application. +func (mg *Application) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Application. +func (mg *Application) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Application. +func (mg *Application) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Application. +func (mg *Application) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Application. +func (mg *Application) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/kinesisanalytics/v1beta2/zz_generated.managedlist.go b/apis/kinesisanalytics/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..9c9817b1e1 --- /dev/null +++ b/apis/kinesisanalytics/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ApplicationList. +func (l *ApplicationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/kinesisanalytics/v1beta2/zz_generated.resolvers.go b/apis/kinesisanalytics/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..d1d01d49bb --- /dev/null +++ b/apis/kinesisanalytics/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,300 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Application) ResolveReferences( // ResolveReferences of this Application. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.CloudwatchLoggingOptions != nil { + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Stream", "StreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CloudwatchLoggingOptions.LogStreamArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.CloudwatchLoggingOptions.LogStreamArnRef, + Selector: mg.Spec.ForProvider.CloudwatchLoggingOptions.LogStreamArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CloudwatchLoggingOptions.LogStreamArn") + } + mg.Spec.ForProvider.CloudwatchLoggingOptions.LogStreamArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CloudwatchLoggingOptions.LogStreamArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.CloudwatchLoggingOptions != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CloudwatchLoggingOptions.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.CloudwatchLoggingOptions.RoleArnRef, + Selector: mg.Spec.ForProvider.CloudwatchLoggingOptions.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CloudwatchLoggingOptions.RoleArn") + } + mg.Spec.ForProvider.CloudwatchLoggingOptions.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CloudwatchLoggingOptions.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.Inputs != nil { + if mg.Spec.ForProvider.Inputs.KinesisStream != nil { + { + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Inputs.KinesisStream.ResourceArn), + Extract: common.TerraformID(), + Reference: mg.Spec.ForProvider.Inputs.KinesisStream.ResourceArnRef, + Selector: mg.Spec.ForProvider.Inputs.KinesisStream.ResourceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Inputs.KinesisStream.ResourceArn") + } + mg.Spec.ForProvider.Inputs.KinesisStream.ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Inputs.KinesisStream.ResourceArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Inputs != nil { + if mg.Spec.ForProvider.Inputs.KinesisStream != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Inputs.KinesisStream.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.Inputs.KinesisStream.RoleArnRef, + Selector: mg.Spec.ForProvider.Inputs.KinesisStream.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Inputs.KinesisStream.RoleArn") + } + mg.Spec.ForProvider.Inputs.KinesisStream.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Inputs.KinesisStream.RoleArnRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Outputs); i3++ { + if mg.Spec.ForProvider.Outputs[i3].KinesisFirehose != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Outputs[i3].KinesisFirehose.ResourceArn), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.ForProvider.Outputs[i3].KinesisFirehose.ResourceArnRef, + Selector: mg.Spec.ForProvider.Outputs[i3].KinesisFirehose.ResourceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Outputs[i3].KinesisFirehose.ResourceArn") + } + mg.Spec.ForProvider.Outputs[i3].KinesisFirehose.ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Outputs[i3].KinesisFirehose.ResourceArnRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Outputs); i3++ { + if mg.Spec.ForProvider.Outputs[i3].KinesisFirehose != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Outputs[i3].KinesisFirehose.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Outputs[i3].KinesisFirehose.RoleArnRef, + Selector: mg.Spec.ForProvider.Outputs[i3].KinesisFirehose.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Outputs[i3].KinesisFirehose.RoleArn") + } + mg.Spec.ForProvider.Outputs[i3].KinesisFirehose.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Outputs[i3].KinesisFirehose.RoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.CloudwatchLoggingOptions != nil { + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Stream", "StreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CloudwatchLoggingOptions.LogStreamArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.CloudwatchLoggingOptions.LogStreamArnRef, + Selector: mg.Spec.InitProvider.CloudwatchLoggingOptions.LogStreamArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CloudwatchLoggingOptions.LogStreamArn") + } + mg.Spec.InitProvider.CloudwatchLoggingOptions.LogStreamArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CloudwatchLoggingOptions.LogStreamArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.CloudwatchLoggingOptions != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CloudwatchLoggingOptions.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.CloudwatchLoggingOptions.RoleArnRef, + Selector: mg.Spec.InitProvider.CloudwatchLoggingOptions.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CloudwatchLoggingOptions.RoleArn") + } + mg.Spec.InitProvider.CloudwatchLoggingOptions.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CloudwatchLoggingOptions.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Inputs != nil { + if mg.Spec.InitProvider.Inputs.KinesisStream != nil { + { + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Inputs.KinesisStream.ResourceArn), + Extract: common.TerraformID(), + Reference: mg.Spec.InitProvider.Inputs.KinesisStream.ResourceArnRef, + Selector: mg.Spec.InitProvider.Inputs.KinesisStream.ResourceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Inputs.KinesisStream.ResourceArn") + } + mg.Spec.InitProvider.Inputs.KinesisStream.ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Inputs.KinesisStream.ResourceArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Inputs != nil { + if mg.Spec.InitProvider.Inputs.KinesisStream != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Inputs.KinesisStream.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.Inputs.KinesisStream.RoleArnRef, + Selector: mg.Spec.InitProvider.Inputs.KinesisStream.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Inputs.KinesisStream.RoleArn") + } + mg.Spec.InitProvider.Inputs.KinesisStream.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Inputs.KinesisStream.RoleArnRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Outputs); i3++ { + if mg.Spec.InitProvider.Outputs[i3].KinesisFirehose != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Outputs[i3].KinesisFirehose.ResourceArn), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.InitProvider.Outputs[i3].KinesisFirehose.ResourceArnRef, + Selector: mg.Spec.InitProvider.Outputs[i3].KinesisFirehose.ResourceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Outputs[i3].KinesisFirehose.ResourceArn") + } + mg.Spec.InitProvider.Outputs[i3].KinesisFirehose.ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Outputs[i3].KinesisFirehose.ResourceArnRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Outputs); i3++ { + if mg.Spec.InitProvider.Outputs[i3].KinesisFirehose != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Outputs[i3].KinesisFirehose.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Outputs[i3].KinesisFirehose.RoleArnRef, + Selector: mg.Spec.InitProvider.Outputs[i3].KinesisFirehose.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Outputs[i3].KinesisFirehose.RoleArn") + } + mg.Spec.InitProvider.Outputs[i3].KinesisFirehose.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Outputs[i3].KinesisFirehose.RoleArnRef = rsp.ResolvedReference + + } + } + + return nil +} diff --git a/apis/kinesisanalytics/v1beta2/zz_groupversion_info.go b/apis/kinesisanalytics/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..685b5537ff --- /dev/null +++ b/apis/kinesisanalytics/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=kinesisanalytics.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "kinesisanalytics.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/kinesisanalyticsv2/v1beta1/zz_applicationsnapshot_types.go b/apis/kinesisanalyticsv2/v1beta1/zz_applicationsnapshot_types.go index 3576ab16d9..8fddddf445 100755 --- a/apis/kinesisanalyticsv2/v1beta1/zz_applicationsnapshot_types.go +++ b/apis/kinesisanalyticsv2/v1beta1/zz_applicationsnapshot_types.go @@ -16,7 +16,7 @@ import ( type ApplicationSnapshotInitParameters struct { // The name of an existing Kinesis Analytics v2 Application. Note that the application must be running for a snapshot to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesisanalyticsv2/v1beta1.Application + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesisanalyticsv2/v1beta2.Application ApplicationName *string `json:"applicationName,omitempty" tf:"application_name,omitempty"` // Reference to a Application in kinesisanalyticsv2 to populate applicationName. @@ -46,7 +46,7 @@ type ApplicationSnapshotObservation struct { type ApplicationSnapshotParameters struct { // The name of an existing Kinesis Analytics v2 Application. Note that the application must be running for a snapshot to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesisanalyticsv2/v1beta1.Application + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesisanalyticsv2/v1beta2.Application // +kubebuilder:validation:Optional ApplicationName *string `json:"applicationName,omitempty" tf:"application_name,omitempty"` diff --git a/apis/kinesisanalyticsv2/v1beta1/zz_generated.conversion_hubs.go b/apis/kinesisanalyticsv2/v1beta1/zz_generated.conversion_hubs.go index 2b509581fa..fe07fc6ec0 100755 --- a/apis/kinesisanalyticsv2/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/kinesisanalyticsv2/v1beta1/zz_generated.conversion_hubs.go @@ -6,8 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Application) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ApplicationSnapshot) Hub() {} diff --git a/apis/kinesisanalyticsv2/v1beta1/zz_generated.conversion_spokes.go b/apis/kinesisanalyticsv2/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..ada25ff080 --- /dev/null +++ b/apis/kinesisanalyticsv2/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Application to the hub type. +func (tr *Application) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Application type. +func (tr *Application) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/kinesisanalyticsv2/v1beta1/zz_generated.resolvers.go b/apis/kinesisanalyticsv2/v1beta1/zz_generated.resolvers.go index 70d5f33b6d..b1ed235fcd 100644 --- a/apis/kinesisanalyticsv2/v1beta1/zz_generated.resolvers.go +++ b/apis/kinesisanalyticsv2/v1beta1/zz_generated.resolvers.go @@ -443,7 +443,7 @@ func (mg *ApplicationSnapshot) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("kinesisanalyticsv2.aws.upbound.io", "v1beta1", "Application", "ApplicationList") + m, l, err = apisresolver.GetManagedResource("kinesisanalyticsv2.aws.upbound.io", "v1beta2", "Application", "ApplicationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -462,7 +462,7 @@ func (mg *ApplicationSnapshot) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.ApplicationName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ApplicationNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("kinesisanalyticsv2.aws.upbound.io", "v1beta1", "Application", "ApplicationList") + m, l, err = apisresolver.GetManagedResource("kinesisanalyticsv2.aws.upbound.io", "v1beta2", "Application", "ApplicationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/kinesisanalyticsv2/v1beta2/zz_application_terraformed.go b/apis/kinesisanalyticsv2/v1beta2/zz_application_terraformed.go new file mode 100755 index 0000000000..e7e3806404 --- /dev/null +++ b/apis/kinesisanalyticsv2/v1beta2/zz_application_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Application +func (mg *Application) GetTerraformResourceType() string { + return "aws_kinesisanalyticsv2_application" +} + +// GetConnectionDetailsMapping for this Application +func (tr *Application) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Application +func (tr *Application) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Application +func (tr *Application) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Application +func (tr *Application) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Application +func (tr *Application) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Application +func (tr *Application) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Application +func (tr *Application) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Application +func (tr *Application) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Application using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Application) LateInitialize(attrs []byte) (bool, error) { + params := &ApplicationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Application) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/kinesisanalyticsv2/v1beta2/zz_application_types.go b/apis/kinesisanalyticsv2/v1beta2/zz_application_types.go new file mode 100755 index 0000000000..f29ae4dcf2 --- /dev/null +++ b/apis/kinesisanalyticsv2/v1beta2/zz_application_types.go @@ -0,0 +1,1732 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ApplicationCodeConfigurationInitParameters struct { + + // The location and type of the application code. + CodeContent *CodeContentInitParameters `json:"codeContent,omitempty" tf:"code_content,omitempty"` + + // Specifies whether the code content is in text or zip format. Valid values: PLAINTEXT, ZIPFILE. + CodeContentType *string `json:"codeContentType,omitempty" tf:"code_content_type,omitempty"` +} + +type ApplicationCodeConfigurationObservation struct { + + // The location and type of the application code. + CodeContent *CodeContentObservation `json:"codeContent,omitempty" tf:"code_content,omitempty"` + + // Specifies whether the code content is in text or zip format. Valid values: PLAINTEXT, ZIPFILE. + CodeContentType *string `json:"codeContentType,omitempty" tf:"code_content_type,omitempty"` +} + +type ApplicationCodeConfigurationParameters struct { + + // The location and type of the application code. + // +kubebuilder:validation:Optional + CodeContent *CodeContentParameters `json:"codeContent,omitempty" tf:"code_content,omitempty"` + + // Specifies whether the code content is in text or zip format. Valid values: PLAINTEXT, ZIPFILE. + // +kubebuilder:validation:Optional + CodeContentType *string `json:"codeContentType" tf:"code_content_type,omitempty"` +} + +type ApplicationConfigurationInitParameters struct { + + // The code location and type parameters for the application. + ApplicationCodeConfiguration *ApplicationCodeConfigurationInitParameters `json:"applicationCodeConfiguration,omitempty" tf:"application_code_configuration,omitempty"` + + // Describes whether snapshots are enabled for a Flink-based application. + ApplicationSnapshotConfiguration *ApplicationSnapshotConfigurationInitParameters `json:"applicationSnapshotConfiguration,omitempty" tf:"application_snapshot_configuration,omitempty"` + + // Describes execution properties for a Flink-based application. + EnvironmentProperties *EnvironmentPropertiesInitParameters `json:"environmentProperties,omitempty" tf:"environment_properties,omitempty"` + + // The configuration of a Flink-based application. + FlinkApplicationConfiguration *FlinkApplicationConfigurationInitParameters `json:"flinkApplicationConfiguration,omitempty" tf:"flink_application_configuration,omitempty"` + + // Describes the starting properties for a Flink-based application. + RunConfiguration *RunConfigurationInitParameters `json:"runConfiguration,omitempty" tf:"run_configuration,omitempty"` + + // The configuration of a SQL-based application. + SQLApplicationConfiguration *SQLApplicationConfigurationInitParameters `json:"sqlApplicationConfiguration,omitempty" tf:"sql_application_configuration,omitempty"` + + // The VPC configuration of a Flink-based application. + VPCConfiguration *VPCConfigurationInitParameters `json:"vpcConfiguration,omitempty" tf:"vpc_configuration,omitempty"` +} + +type ApplicationConfigurationObservation struct { + + // The code location and type parameters for the application. + ApplicationCodeConfiguration *ApplicationCodeConfigurationObservation `json:"applicationCodeConfiguration,omitempty" tf:"application_code_configuration,omitempty"` + + // Describes whether snapshots are enabled for a Flink-based application. + ApplicationSnapshotConfiguration *ApplicationSnapshotConfigurationObservation `json:"applicationSnapshotConfiguration,omitempty" tf:"application_snapshot_configuration,omitempty"` + + // Describes execution properties for a Flink-based application. + EnvironmentProperties *EnvironmentPropertiesObservation `json:"environmentProperties,omitempty" tf:"environment_properties,omitempty"` + + // The configuration of a Flink-based application. + FlinkApplicationConfiguration *FlinkApplicationConfigurationObservation `json:"flinkApplicationConfiguration,omitempty" tf:"flink_application_configuration,omitempty"` + + // Describes the starting properties for a Flink-based application. + RunConfiguration *RunConfigurationObservation `json:"runConfiguration,omitempty" tf:"run_configuration,omitempty"` + + // The configuration of a SQL-based application. + SQLApplicationConfiguration *SQLApplicationConfigurationObservation `json:"sqlApplicationConfiguration,omitempty" tf:"sql_application_configuration,omitempty"` + + // The VPC configuration of a Flink-based application. + VPCConfiguration *VPCConfigurationObservation `json:"vpcConfiguration,omitempty" tf:"vpc_configuration,omitempty"` +} + +type ApplicationConfigurationParameters struct { + + // The code location and type parameters for the application. + // +kubebuilder:validation:Optional + ApplicationCodeConfiguration *ApplicationCodeConfigurationParameters `json:"applicationCodeConfiguration" tf:"application_code_configuration,omitempty"` + + // Describes whether snapshots are enabled for a Flink-based application. + // +kubebuilder:validation:Optional + ApplicationSnapshotConfiguration *ApplicationSnapshotConfigurationParameters `json:"applicationSnapshotConfiguration,omitempty" tf:"application_snapshot_configuration,omitempty"` + + // Describes execution properties for a Flink-based application. + // +kubebuilder:validation:Optional + EnvironmentProperties *EnvironmentPropertiesParameters `json:"environmentProperties,omitempty" tf:"environment_properties,omitempty"` + + // The configuration of a Flink-based application. + // +kubebuilder:validation:Optional + FlinkApplicationConfiguration *FlinkApplicationConfigurationParameters `json:"flinkApplicationConfiguration,omitempty" tf:"flink_application_configuration,omitempty"` + + // Describes the starting properties for a Flink-based application. + // +kubebuilder:validation:Optional + RunConfiguration *RunConfigurationParameters `json:"runConfiguration,omitempty" tf:"run_configuration,omitempty"` + + // The configuration of a SQL-based application. + // +kubebuilder:validation:Optional + SQLApplicationConfiguration *SQLApplicationConfigurationParameters `json:"sqlApplicationConfiguration,omitempty" tf:"sql_application_configuration,omitempty"` + + // The VPC configuration of a Flink-based application. + // +kubebuilder:validation:Optional + VPCConfiguration *VPCConfigurationParameters `json:"vpcConfiguration,omitempty" tf:"vpc_configuration,omitempty"` +} + +type ApplicationInitParameters struct { + + // The application's configuration + ApplicationConfiguration *ApplicationConfigurationInitParameters `json:"applicationConfiguration,omitempty" tf:"application_configuration,omitempty"` + + // A CloudWatch log stream to monitor application configuration errors. + CloudwatchLoggingOptions *CloudwatchLoggingOptionsInitParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // A summary description of the application. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether to force stop an unresponsive Flink-based application. + ForceStop *bool `json:"forceStop,omitempty" tf:"force_stop,omitempty"` + + // The runtime environment for the application. Valid values: SQL-1_0, FLINK-1_6, FLINK-1_8, FLINK-1_11, FLINK-1_13, FLINK-1_15, FLINK-1_18. + RuntimeEnvironment *string `json:"runtimeEnvironment,omitempty" tf:"runtime_environment,omitempty"` + + // The ARN of the IAM role used by the application to access Kinesis data streams, Kinesis Data Firehose delivery streams, Amazon S3 objects, and other external resources. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + ServiceExecutionRole *string `json:"serviceExecutionRole,omitempty" tf:"service_execution_role,omitempty"` + + // Reference to a Role in iam to populate serviceExecutionRole. + // +kubebuilder:validation:Optional + ServiceExecutionRoleRef *v1.Reference `json:"serviceExecutionRoleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceExecutionRole. + // +kubebuilder:validation:Optional + ServiceExecutionRoleSelector *v1.Selector `json:"serviceExecutionRoleSelector,omitempty" tf:"-"` + + // Whether to start or stop the application. + StartApplication *bool `json:"startApplication,omitempty" tf:"start_application,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ApplicationObservation struct { + + // The application's configuration + ApplicationConfiguration *ApplicationConfigurationObservation `json:"applicationConfiguration,omitempty" tf:"application_configuration,omitempty"` + + // The ARN of the application. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A CloudWatch log stream to monitor application configuration errors. + CloudwatchLoggingOptions *CloudwatchLoggingOptionsObservation `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // The current timestamp when the application was created. + CreateTimestamp *string `json:"createTimestamp,omitempty" tf:"create_timestamp,omitempty"` + + // A summary description of the application. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether to force stop an unresponsive Flink-based application. + ForceStop *bool `json:"forceStop,omitempty" tf:"force_stop,omitempty"` + + // The application identifier. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The current timestamp when the application was last updated. + LastUpdateTimestamp *string `json:"lastUpdateTimestamp,omitempty" tf:"last_update_timestamp,omitempty"` + + // The runtime environment for the application. Valid values: SQL-1_0, FLINK-1_6, FLINK-1_8, FLINK-1_11, FLINK-1_13, FLINK-1_15, FLINK-1_18. + RuntimeEnvironment *string `json:"runtimeEnvironment,omitempty" tf:"runtime_environment,omitempty"` + + // The ARN of the IAM role used by the application to access Kinesis data streams, Kinesis Data Firehose delivery streams, Amazon S3 objects, and other external resources. + ServiceExecutionRole *string `json:"serviceExecutionRole,omitempty" tf:"service_execution_role,omitempty"` + + // Whether to start or stop the application. + StartApplication *bool `json:"startApplication,omitempty" tf:"start_application,omitempty"` + + // The status of the application. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The current application version. Kinesis Data Analytics updates the version_id each time the application is updated. + VersionID *float64 `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type ApplicationParameters struct { + + // The application's configuration + // +kubebuilder:validation:Optional + ApplicationConfiguration *ApplicationConfigurationParameters `json:"applicationConfiguration,omitempty" tf:"application_configuration,omitempty"` + + // A CloudWatch log stream to monitor application configuration errors. + // +kubebuilder:validation:Optional + CloudwatchLoggingOptions *CloudwatchLoggingOptionsParameters `json:"cloudwatchLoggingOptions,omitempty" tf:"cloudwatch_logging_options,omitempty"` + + // A summary description of the application. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether to force stop an unresponsive Flink-based application. + // +kubebuilder:validation:Optional + ForceStop *bool `json:"forceStop,omitempty" tf:"force_stop,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The runtime environment for the application. Valid values: SQL-1_0, FLINK-1_6, FLINK-1_8, FLINK-1_11, FLINK-1_13, FLINK-1_15, FLINK-1_18. + // +kubebuilder:validation:Optional + RuntimeEnvironment *string `json:"runtimeEnvironment,omitempty" tf:"runtime_environment,omitempty"` + + // The ARN of the IAM role used by the application to access Kinesis data streams, Kinesis Data Firehose delivery streams, Amazon S3 objects, and other external resources. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + ServiceExecutionRole *string `json:"serviceExecutionRole,omitempty" tf:"service_execution_role,omitempty"` + + // Reference to a Role in iam to populate serviceExecutionRole. + // +kubebuilder:validation:Optional + ServiceExecutionRoleRef *v1.Reference `json:"serviceExecutionRoleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceExecutionRole. + // +kubebuilder:validation:Optional + ServiceExecutionRoleSelector *v1.Selector `json:"serviceExecutionRoleSelector,omitempty" tf:"-"` + + // Whether to start or stop the application. + // +kubebuilder:validation:Optional + StartApplication *bool `json:"startApplication,omitempty" tf:"start_application,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ApplicationRestoreConfigurationInitParameters struct { + + // Specifies how the application should be restored. Valid values: RESTORE_FROM_CUSTOM_SNAPSHOT, RESTORE_FROM_LATEST_SNAPSHOT, SKIP_RESTORE_FROM_SNAPSHOT. + ApplicationRestoreType *string `json:"applicationRestoreType,omitempty" tf:"application_restore_type,omitempty"` + + // The identifier of an existing snapshot of application state to use to restart an application. The application uses this value if RESTORE_FROM_CUSTOM_SNAPSHOT is specified for application_restore_type. + SnapshotName *string `json:"snapshotName,omitempty" tf:"snapshot_name,omitempty"` +} + +type ApplicationRestoreConfigurationObservation struct { + + // Specifies how the application should be restored. Valid values: RESTORE_FROM_CUSTOM_SNAPSHOT, RESTORE_FROM_LATEST_SNAPSHOT, SKIP_RESTORE_FROM_SNAPSHOT. + ApplicationRestoreType *string `json:"applicationRestoreType,omitempty" tf:"application_restore_type,omitempty"` + + // The identifier of an existing snapshot of application state to use to restart an application. The application uses this value if RESTORE_FROM_CUSTOM_SNAPSHOT is specified for application_restore_type. + SnapshotName *string `json:"snapshotName,omitempty" tf:"snapshot_name,omitempty"` +} + +type ApplicationRestoreConfigurationParameters struct { + + // Specifies how the application should be restored. Valid values: RESTORE_FROM_CUSTOM_SNAPSHOT, RESTORE_FROM_LATEST_SNAPSHOT, SKIP_RESTORE_FROM_SNAPSHOT. + // +kubebuilder:validation:Optional + ApplicationRestoreType *string `json:"applicationRestoreType,omitempty" tf:"application_restore_type,omitempty"` + + // The identifier of an existing snapshot of application state to use to restart an application. The application uses this value if RESTORE_FROM_CUSTOM_SNAPSHOT is specified for application_restore_type. + // +kubebuilder:validation:Optional + SnapshotName *string `json:"snapshotName,omitempty" tf:"snapshot_name,omitempty"` +} + +type ApplicationSnapshotConfigurationInitParameters struct { + + // Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics application. + SnapshotsEnabled *bool `json:"snapshotsEnabled,omitempty" tf:"snapshots_enabled,omitempty"` +} + +type ApplicationSnapshotConfigurationObservation struct { + + // Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics application. + SnapshotsEnabled *bool `json:"snapshotsEnabled,omitempty" tf:"snapshots_enabled,omitempty"` +} + +type ApplicationSnapshotConfigurationParameters struct { + + // Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics application. + // +kubebuilder:validation:Optional + SnapshotsEnabled *bool `json:"snapshotsEnabled" tf:"snapshots_enabled,omitempty"` +} + +type CheckpointConfigurationInitParameters struct { + + // Describes the interval in milliseconds between checkpoint operations. + CheckpointInterval *float64 `json:"checkpointInterval,omitempty" tf:"checkpoint_interval,omitempty"` + + // Describes whether checkpointing is enabled for a Flink-based Kinesis Data Analytics application. + CheckpointingEnabled *bool `json:"checkpointingEnabled,omitempty" tf:"checkpointing_enabled,omitempty"` + + // Describes whether the application uses Kinesis Data Analytics' default checkpointing behavior. Valid values: CUSTOM, DEFAULT. Set this attribute to CUSTOM in order for any specified checkpointing_enabled, checkpoint_interval, or min_pause_between_checkpoints attribute values to be effective. If this attribute is set to DEFAULT, the application will always use the following values: + ConfigurationType *string `json:"configurationType,omitempty" tf:"configuration_type,omitempty"` + + // Describes the minimum time in milliseconds after a checkpoint operation completes that a new checkpoint operation can start. + MinPauseBetweenCheckpoints *float64 `json:"minPauseBetweenCheckpoints,omitempty" tf:"min_pause_between_checkpoints,omitempty"` +} + +type CheckpointConfigurationObservation struct { + + // Describes the interval in milliseconds between checkpoint operations. + CheckpointInterval *float64 `json:"checkpointInterval,omitempty" tf:"checkpoint_interval,omitempty"` + + // Describes whether checkpointing is enabled for a Flink-based Kinesis Data Analytics application. + CheckpointingEnabled *bool `json:"checkpointingEnabled,omitempty" tf:"checkpointing_enabled,omitempty"` + + // Describes whether the application uses Kinesis Data Analytics' default checkpointing behavior. Valid values: CUSTOM, DEFAULT. Set this attribute to CUSTOM in order for any specified checkpointing_enabled, checkpoint_interval, or min_pause_between_checkpoints attribute values to be effective. If this attribute is set to DEFAULT, the application will always use the following values: + ConfigurationType *string `json:"configurationType,omitempty" tf:"configuration_type,omitempty"` + + // Describes the minimum time in milliseconds after a checkpoint operation completes that a new checkpoint operation can start. + MinPauseBetweenCheckpoints *float64 `json:"minPauseBetweenCheckpoints,omitempty" tf:"min_pause_between_checkpoints,omitempty"` +} + +type CheckpointConfigurationParameters struct { + + // Describes the interval in milliseconds between checkpoint operations. + // +kubebuilder:validation:Optional + CheckpointInterval *float64 `json:"checkpointInterval,omitempty" tf:"checkpoint_interval,omitempty"` + + // Describes whether checkpointing is enabled for a Flink-based Kinesis Data Analytics application. + // +kubebuilder:validation:Optional + CheckpointingEnabled *bool `json:"checkpointingEnabled,omitempty" tf:"checkpointing_enabled,omitempty"` + + // Describes whether the application uses Kinesis Data Analytics' default checkpointing behavior. Valid values: CUSTOM, DEFAULT. Set this attribute to CUSTOM in order for any specified checkpointing_enabled, checkpoint_interval, or min_pause_between_checkpoints attribute values to be effective. If this attribute is set to DEFAULT, the application will always use the following values: + // +kubebuilder:validation:Optional + ConfigurationType *string `json:"configurationType" tf:"configuration_type,omitempty"` + + // Describes the minimum time in milliseconds after a checkpoint operation completes that a new checkpoint operation can start. + // +kubebuilder:validation:Optional + MinPauseBetweenCheckpoints *float64 `json:"minPauseBetweenCheckpoints,omitempty" tf:"min_pause_between_checkpoints,omitempty"` +} + +type CloudwatchLoggingOptionsInitParameters struct { + + // The ARN of the CloudWatch log stream to receive application messages. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Stream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + LogStreamArn *string `json:"logStreamArn,omitempty" tf:"log_stream_arn,omitempty"` + + // Reference to a Stream in cloudwatchlogs to populate logStreamArn. + // +kubebuilder:validation:Optional + LogStreamArnRef *v1.Reference `json:"logStreamArnRef,omitempty" tf:"-"` + + // Selector for a Stream in cloudwatchlogs to populate logStreamArn. + // +kubebuilder:validation:Optional + LogStreamArnSelector *v1.Selector `json:"logStreamArnSelector,omitempty" tf:"-"` +} + +type CloudwatchLoggingOptionsObservation struct { + + // The application identifier. + CloudwatchLoggingOptionID *string `json:"cloudwatchLoggingOptionId,omitempty" tf:"cloudwatch_logging_option_id,omitempty"` + + // The ARN of the CloudWatch log stream to receive application messages. + LogStreamArn *string `json:"logStreamArn,omitempty" tf:"log_stream_arn,omitempty"` +} + +type CloudwatchLoggingOptionsParameters struct { + + // The ARN of the CloudWatch log stream to receive application messages. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Stream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + LogStreamArn *string `json:"logStreamArn,omitempty" tf:"log_stream_arn,omitempty"` + + // Reference to a Stream in cloudwatchlogs to populate logStreamArn. + // +kubebuilder:validation:Optional + LogStreamArnRef *v1.Reference `json:"logStreamArnRef,omitempty" tf:"-"` + + // Selector for a Stream in cloudwatchlogs to populate logStreamArn. + // +kubebuilder:validation:Optional + LogStreamArnSelector *v1.Selector `json:"logStreamArnSelector,omitempty" tf:"-"` +} + +type CodeContentInitParameters struct { + + // Information about the Amazon S3 bucket containing the application code. + S3ContentLocation *S3ContentLocationInitParameters `json:"s3ContentLocation,omitempty" tf:"s3_content_location,omitempty"` + + // The text-format code for the application. + TextContent *string `json:"textContent,omitempty" tf:"text_content,omitempty"` +} + +type CodeContentObservation struct { + + // Information about the Amazon S3 bucket containing the application code. + S3ContentLocation *S3ContentLocationObservation `json:"s3ContentLocation,omitempty" tf:"s3_content_location,omitempty"` + + // The text-format code for the application. + TextContent *string `json:"textContent,omitempty" tf:"text_content,omitempty"` +} + +type CodeContentParameters struct { + + // Information about the Amazon S3 bucket containing the application code. + // +kubebuilder:validation:Optional + S3ContentLocation *S3ContentLocationParameters `json:"s3ContentLocation,omitempty" tf:"s3_content_location,omitempty"` + + // The text-format code for the application. + // +kubebuilder:validation:Optional + TextContent *string `json:"textContent,omitempty" tf:"text_content,omitempty"` +} + +type CsvMappingParametersInitParameters struct { + + // The column delimiter. For example, in a CSV format, a comma (,) is the typical column delimiter. + RecordColumnDelimiter *string `json:"recordColumnDelimiter,omitempty" tf:"record_column_delimiter,omitempty"` + + // The row delimiter. For example, in a CSV format, \n is the typical row delimiter. + RecordRowDelimiter *string `json:"recordRowDelimiter,omitempty" tf:"record_row_delimiter,omitempty"` +} + +type CsvMappingParametersObservation struct { + + // The column delimiter. For example, in a CSV format, a comma (,) is the typical column delimiter. + RecordColumnDelimiter *string `json:"recordColumnDelimiter,omitempty" tf:"record_column_delimiter,omitempty"` + + // The row delimiter. For example, in a CSV format, \n is the typical row delimiter. + RecordRowDelimiter *string `json:"recordRowDelimiter,omitempty" tf:"record_row_delimiter,omitempty"` +} + +type CsvMappingParametersParameters struct { + + // The column delimiter. For example, in a CSV format, a comma (,) is the typical column delimiter. + // +kubebuilder:validation:Optional + RecordColumnDelimiter *string `json:"recordColumnDelimiter" tf:"record_column_delimiter,omitempty"` + + // The row delimiter. For example, in a CSV format, \n is the typical row delimiter. + // +kubebuilder:validation:Optional + RecordRowDelimiter *string `json:"recordRowDelimiter" tf:"record_row_delimiter,omitempty"` +} + +type DestinationSchemaInitParameters struct { + + // The type of record format. Valid values: CSV, JSON. + RecordFormatType *string `json:"recordFormatType,omitempty" tf:"record_format_type,omitempty"` +} + +type DestinationSchemaObservation struct { + + // The type of record format. Valid values: CSV, JSON. + RecordFormatType *string `json:"recordFormatType,omitempty" tf:"record_format_type,omitempty"` +} + +type DestinationSchemaParameters struct { + + // The type of record format. Valid values: CSV, JSON. + // +kubebuilder:validation:Optional + RecordFormatType *string `json:"recordFormatType" tf:"record_format_type,omitempty"` +} + +type EnvironmentPropertiesInitParameters struct { + + // Describes the execution property groups. + PropertyGroup []PropertyGroupInitParameters `json:"propertyGroup,omitempty" tf:"property_group,omitempty"` +} + +type EnvironmentPropertiesObservation struct { + + // Describes the execution property groups. + PropertyGroup []PropertyGroupObservation `json:"propertyGroup,omitempty" tf:"property_group,omitempty"` +} + +type EnvironmentPropertiesParameters struct { + + // Describes the execution property groups. + // +kubebuilder:validation:Optional + PropertyGroup []PropertyGroupParameters `json:"propertyGroup" tf:"property_group,omitempty"` +} + +type FlinkApplicationConfigurationInitParameters struct { + + // Describes an application's checkpointing configuration. + CheckpointConfiguration *CheckpointConfigurationInitParameters `json:"checkpointConfiguration,omitempty" tf:"checkpoint_configuration,omitempty"` + + // Describes configuration parameters for CloudWatch logging for an application. + MonitoringConfiguration *MonitoringConfigurationInitParameters `json:"monitoringConfiguration,omitempty" tf:"monitoring_configuration,omitempty"` + + // Describes parameters for how an application executes multiple tasks simultaneously. + ParallelismConfiguration *ParallelismConfigurationInitParameters `json:"parallelismConfiguration,omitempty" tf:"parallelism_configuration,omitempty"` +} + +type FlinkApplicationConfigurationObservation struct { + + // Describes an application's checkpointing configuration. + CheckpointConfiguration *CheckpointConfigurationObservation `json:"checkpointConfiguration,omitempty" tf:"checkpoint_configuration,omitempty"` + + // Describes configuration parameters for CloudWatch logging for an application. + MonitoringConfiguration *MonitoringConfigurationObservation `json:"monitoringConfiguration,omitempty" tf:"monitoring_configuration,omitempty"` + + // Describes parameters for how an application executes multiple tasks simultaneously. + ParallelismConfiguration *ParallelismConfigurationObservation `json:"parallelismConfiguration,omitempty" tf:"parallelism_configuration,omitempty"` +} + +type FlinkApplicationConfigurationParameters struct { + + // Describes an application's checkpointing configuration. + // +kubebuilder:validation:Optional + CheckpointConfiguration *CheckpointConfigurationParameters `json:"checkpointConfiguration,omitempty" tf:"checkpoint_configuration,omitempty"` + + // Describes configuration parameters for CloudWatch logging for an application. + // +kubebuilder:validation:Optional + MonitoringConfiguration *MonitoringConfigurationParameters `json:"monitoringConfiguration,omitempty" tf:"monitoring_configuration,omitempty"` + + // Describes parameters for how an application executes multiple tasks simultaneously. + // +kubebuilder:validation:Optional + ParallelismConfiguration *ParallelismConfigurationParameters `json:"parallelismConfiguration,omitempty" tf:"parallelism_configuration,omitempty"` +} + +type FlinkRunConfigurationInitParameters struct { + + // When restoring from a snapshot, specifies whether the runtime is allowed to skip a state that cannot be mapped to the new program. Default is false. + AllowNonRestoredState *bool `json:"allowNonRestoredState,omitempty" tf:"allow_non_restored_state,omitempty"` +} + +type FlinkRunConfigurationObservation struct { + + // When restoring from a snapshot, specifies whether the runtime is allowed to skip a state that cannot be mapped to the new program. Default is false. + AllowNonRestoredState *bool `json:"allowNonRestoredState,omitempty" tf:"allow_non_restored_state,omitempty"` +} + +type FlinkRunConfigurationParameters struct { + + // When restoring from a snapshot, specifies whether the runtime is allowed to skip a state that cannot be mapped to the new program. Default is false. + // +kubebuilder:validation:Optional + AllowNonRestoredState *bool `json:"allowNonRestoredState,omitempty" tf:"allow_non_restored_state,omitempty"` +} + +type InputInitParameters struct { + + // Describes the number of in-application streams to create. + InputParallelism *InputParallelismInitParameters `json:"inputParallelism,omitempty" tf:"input_parallelism,omitempty"` + + // The input processing configuration for the input. + // An input processor transforms records as they are received from the stream, before the application's SQL code executes. + InputProcessingConfiguration *InputProcessingConfigurationInitParameters `json:"inputProcessingConfiguration,omitempty" tf:"input_processing_configuration,omitempty"` + + // Describes the format of the data in the streaming source, and how each data element maps to corresponding columns in the in-application stream that is being created. + InputSchema *InputSchemaInitParameters `json:"inputSchema,omitempty" tf:"input_schema,omitempty"` + + // The point at which the application starts processing records from the streaming source. + InputStartingPositionConfiguration []InputStartingPositionConfigurationInitParameters `json:"inputStartingPositionConfiguration,omitempty" tf:"input_starting_position_configuration,omitempty"` + + // If the streaming source is a Kinesis Data Firehose delivery stream, identifies the delivery stream's ARN. + KinesisFirehoseInput *KinesisFirehoseInputInitParameters `json:"kinesisFirehoseInput,omitempty" tf:"kinesis_firehose_input,omitempty"` + + // If the streaming source is a Kinesis data stream, identifies the stream's Amazon Resource Name (ARN). + KinesisStreamsInput *KinesisStreamsInputInitParameters `json:"kinesisStreamsInput,omitempty" tf:"kinesis_streams_input,omitempty"` + + // The name prefix to use when creating an in-application stream. + NamePrefix *string `json:"namePrefix,omitempty" tf:"name_prefix,omitempty"` +} + +type InputLambdaProcessorInitParameters struct { + + // The ARN of the Lambda function that operates on records in the stream. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` +} + +type InputLambdaProcessorObservation struct { + + // The ARN of the Lambda function that operates on records in the stream. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` +} + +type InputLambdaProcessorParameters struct { + + // The ARN of the Lambda function that operates on records in the stream. + // +kubebuilder:validation:Optional + ResourceArn *string `json:"resourceArn" tf:"resource_arn,omitempty"` +} + +type InputObservation struct { + InAppStreamNames []*string `json:"inAppStreamNames,omitempty" tf:"in_app_stream_names,omitempty"` + + // The application identifier. + InputID *string `json:"inputId,omitempty" tf:"input_id,omitempty"` + + // Describes the number of in-application streams to create. + InputParallelism *InputParallelismObservation `json:"inputParallelism,omitempty" tf:"input_parallelism,omitempty"` + + // The input processing configuration for the input. + // An input processor transforms records as they are received from the stream, before the application's SQL code executes. + InputProcessingConfiguration *InputProcessingConfigurationObservation `json:"inputProcessingConfiguration,omitempty" tf:"input_processing_configuration,omitempty"` + + // Describes the format of the data in the streaming source, and how each data element maps to corresponding columns in the in-application stream that is being created. + InputSchema *InputSchemaObservation `json:"inputSchema,omitempty" tf:"input_schema,omitempty"` + + // The point at which the application starts processing records from the streaming source. + InputStartingPositionConfiguration []InputStartingPositionConfigurationObservation `json:"inputStartingPositionConfiguration,omitempty" tf:"input_starting_position_configuration,omitempty"` + + // If the streaming source is a Kinesis Data Firehose delivery stream, identifies the delivery stream's ARN. + KinesisFirehoseInput *KinesisFirehoseInputObservation `json:"kinesisFirehoseInput,omitempty" tf:"kinesis_firehose_input,omitempty"` + + // If the streaming source is a Kinesis data stream, identifies the stream's Amazon Resource Name (ARN). + KinesisStreamsInput *KinesisStreamsInputObservation `json:"kinesisStreamsInput,omitempty" tf:"kinesis_streams_input,omitempty"` + + // The name prefix to use when creating an in-application stream. + NamePrefix *string `json:"namePrefix,omitempty" tf:"name_prefix,omitempty"` +} + +type InputParallelismInitParameters struct { + + // The number of in-application streams to create. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` +} + +type InputParallelismObservation struct { + + // The number of in-application streams to create. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` +} + +type InputParallelismParameters struct { + + // The number of in-application streams to create. + // +kubebuilder:validation:Optional + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` +} + +type InputParameters struct { + + // Describes the number of in-application streams to create. + // +kubebuilder:validation:Optional + InputParallelism *InputParallelismParameters `json:"inputParallelism,omitempty" tf:"input_parallelism,omitempty"` + + // The input processing configuration for the input. + // An input processor transforms records as they are received from the stream, before the application's SQL code executes. + // +kubebuilder:validation:Optional + InputProcessingConfiguration *InputProcessingConfigurationParameters `json:"inputProcessingConfiguration,omitempty" tf:"input_processing_configuration,omitempty"` + + // Describes the format of the data in the streaming source, and how each data element maps to corresponding columns in the in-application stream that is being created. + // +kubebuilder:validation:Optional + InputSchema *InputSchemaParameters `json:"inputSchema" tf:"input_schema,omitempty"` + + // The point at which the application starts processing records from the streaming source. + // +kubebuilder:validation:Optional + InputStartingPositionConfiguration []InputStartingPositionConfigurationParameters `json:"inputStartingPositionConfiguration,omitempty" tf:"input_starting_position_configuration,omitempty"` + + // If the streaming source is a Kinesis Data Firehose delivery stream, identifies the delivery stream's ARN. + // +kubebuilder:validation:Optional + KinesisFirehoseInput *KinesisFirehoseInputParameters `json:"kinesisFirehoseInput,omitempty" tf:"kinesis_firehose_input,omitempty"` + + // If the streaming source is a Kinesis data stream, identifies the stream's Amazon Resource Name (ARN). + // +kubebuilder:validation:Optional + KinesisStreamsInput *KinesisStreamsInputParameters `json:"kinesisStreamsInput,omitempty" tf:"kinesis_streams_input,omitempty"` + + // The name prefix to use when creating an in-application stream. + // +kubebuilder:validation:Optional + NamePrefix *string `json:"namePrefix" tf:"name_prefix,omitempty"` +} + +type InputProcessingConfigurationInitParameters struct { + + // Describes the Lambda function that is used to preprocess the records in the stream before being processed by your application code. + InputLambdaProcessor *InputLambdaProcessorInitParameters `json:"inputLambdaProcessor,omitempty" tf:"input_lambda_processor,omitempty"` +} + +type InputProcessingConfigurationObservation struct { + + // Describes the Lambda function that is used to preprocess the records in the stream before being processed by your application code. + InputLambdaProcessor *InputLambdaProcessorObservation `json:"inputLambdaProcessor,omitempty" tf:"input_lambda_processor,omitempty"` +} + +type InputProcessingConfigurationParameters struct { + + // Describes the Lambda function that is used to preprocess the records in the stream before being processed by your application code. + // +kubebuilder:validation:Optional + InputLambdaProcessor *InputLambdaProcessorParameters `json:"inputLambdaProcessor" tf:"input_lambda_processor,omitempty"` +} + +type InputSchemaInitParameters struct { + + // Describes the mapping of each data element in the streaming source to the corresponding column in the in-application stream. + RecordColumn []RecordColumnInitParameters `json:"recordColumn,omitempty" tf:"record_column,omitempty"` + + // Specifies the encoding of the records in the streaming source. For example, UTF-8. + RecordEncoding *string `json:"recordEncoding,omitempty" tf:"record_encoding,omitempty"` + + // Specifies the format of the records on the streaming source. + RecordFormat *RecordFormatInitParameters `json:"recordFormat,omitempty" tf:"record_format,omitempty"` +} + +type InputSchemaObservation struct { + + // Describes the mapping of each data element in the streaming source to the corresponding column in the in-application stream. + RecordColumn []RecordColumnObservation `json:"recordColumn,omitempty" tf:"record_column,omitempty"` + + // Specifies the encoding of the records in the streaming source. For example, UTF-8. + RecordEncoding *string `json:"recordEncoding,omitempty" tf:"record_encoding,omitempty"` + + // Specifies the format of the records on the streaming source. + RecordFormat *RecordFormatObservation `json:"recordFormat,omitempty" tf:"record_format,omitempty"` +} + +type InputSchemaParameters struct { + + // Describes the mapping of each data element in the streaming source to the corresponding column in the in-application stream. + // +kubebuilder:validation:Optional + RecordColumn []RecordColumnParameters `json:"recordColumn" tf:"record_column,omitempty"` + + // Specifies the encoding of the records in the streaming source. For example, UTF-8. + // +kubebuilder:validation:Optional + RecordEncoding *string `json:"recordEncoding,omitempty" tf:"record_encoding,omitempty"` + + // Specifies the format of the records on the streaming source. + // +kubebuilder:validation:Optional + RecordFormat *RecordFormatParameters `json:"recordFormat" tf:"record_format,omitempty"` +} + +type InputStartingPositionConfigurationInitParameters struct { + + // The starting position on the stream. Valid values: LAST_STOPPED_POINT, NOW, TRIM_HORIZON. + InputStartingPosition *string `json:"inputStartingPosition,omitempty" tf:"input_starting_position,omitempty"` +} + +type InputStartingPositionConfigurationObservation struct { + + // The starting position on the stream. Valid values: LAST_STOPPED_POINT, NOW, TRIM_HORIZON. + InputStartingPosition *string `json:"inputStartingPosition,omitempty" tf:"input_starting_position,omitempty"` +} + +type InputStartingPositionConfigurationParameters struct { + + // The starting position on the stream. Valid values: LAST_STOPPED_POINT, NOW, TRIM_HORIZON. + // +kubebuilder:validation:Optional + InputStartingPosition *string `json:"inputStartingPosition,omitempty" tf:"input_starting_position,omitempty"` +} + +type JSONMappingParametersInitParameters struct { + + // The path to the top-level parent that contains the records. + RecordRowPath *string `json:"recordRowPath,omitempty" tf:"record_row_path,omitempty"` +} + +type JSONMappingParametersObservation struct { + + // The path to the top-level parent that contains the records. + RecordRowPath *string `json:"recordRowPath,omitempty" tf:"record_row_path,omitempty"` +} + +type JSONMappingParametersParameters struct { + + // The path to the top-level parent that contains the records. + // +kubebuilder:validation:Optional + RecordRowPath *string `json:"recordRowPath" tf:"record_row_path,omitempty"` +} + +type KinesisFirehoseInputInitParameters struct { + + // The ARN of the Lambda function that operates on records in the stream. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` +} + +type KinesisFirehoseInputObservation struct { + + // The ARN of the Lambda function that operates on records in the stream. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` +} + +type KinesisFirehoseInputParameters struct { + + // The ARN of the Lambda function that operates on records in the stream. + // +kubebuilder:validation:Optional + ResourceArn *string `json:"resourceArn" tf:"resource_arn,omitempty"` +} + +type KinesisFirehoseOutputInitParameters struct { + + // The ARN of the Lambda function that operates on records in the stream. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // Reference to a DeliveryStream in firehose to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnRef *v1.Reference `json:"resourceArnRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnSelector *v1.Selector `json:"resourceArnSelector,omitempty" tf:"-"` +} + +type KinesisFirehoseOutputObservation struct { + + // The ARN of the Lambda function that operates on records in the stream. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` +} + +type KinesisFirehoseOutputParameters struct { + + // The ARN of the Lambda function that operates on records in the stream. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + // +kubebuilder:validation:Optional + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // Reference to a DeliveryStream in firehose to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnRef *v1.Reference `json:"resourceArnRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnSelector *v1.Selector `json:"resourceArnSelector,omitempty" tf:"-"` +} + +type KinesisStreamsInputInitParameters struct { + + // The ARN of the Lambda function that operates on records in the stream. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // Reference to a Stream in kinesis to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnRef *v1.Reference `json:"resourceArnRef,omitempty" tf:"-"` + + // Selector for a Stream in kinesis to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnSelector *v1.Selector `json:"resourceArnSelector,omitempty" tf:"-"` +} + +type KinesisStreamsInputObservation struct { + + // The ARN of the Lambda function that operates on records in the stream. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` +} + +type KinesisStreamsInputParameters struct { + + // The ARN of the Lambda function that operates on records in the stream. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() + // +kubebuilder:validation:Optional + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // Reference to a Stream in kinesis to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnRef *v1.Reference `json:"resourceArnRef,omitempty" tf:"-"` + + // Selector for a Stream in kinesis to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnSelector *v1.Selector `json:"resourceArnSelector,omitempty" tf:"-"` +} + +type KinesisStreamsOutputInitParameters struct { + + // The ARN of the Lambda function that operates on records in the stream. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` +} + +type KinesisStreamsOutputObservation struct { + + // The ARN of the Lambda function that operates on records in the stream. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` +} + +type KinesisStreamsOutputParameters struct { + + // The ARN of the Lambda function that operates on records in the stream. + // +kubebuilder:validation:Optional + ResourceArn *string `json:"resourceArn" tf:"resource_arn,omitempty"` +} + +type LambdaOutputInitParameters struct { + + // The ARN of the Lambda function that operates on records in the stream. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // Reference to a Function in lambda to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnRef *v1.Reference `json:"resourceArnRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnSelector *v1.Selector `json:"resourceArnSelector,omitempty" tf:"-"` +} + +type LambdaOutputObservation struct { + + // The ARN of the Lambda function that operates on records in the stream. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` +} + +type LambdaOutputParameters struct { + + // The ARN of the Lambda function that operates on records in the stream. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // Reference to a Function in lambda to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnRef *v1.Reference `json:"resourceArnRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnSelector *v1.Selector `json:"resourceArnSelector,omitempty" tf:"-"` +} + +type MappingParametersCsvMappingParametersInitParameters struct { + + // The column delimiter. For example, in a CSV format, a comma (,) is the typical column delimiter. + RecordColumnDelimiter *string `json:"recordColumnDelimiter,omitempty" tf:"record_column_delimiter,omitempty"` + + // The row delimiter. For example, in a CSV format, \n is the typical row delimiter. + RecordRowDelimiter *string `json:"recordRowDelimiter,omitempty" tf:"record_row_delimiter,omitempty"` +} + +type MappingParametersCsvMappingParametersObservation struct { + + // The column delimiter. For example, in a CSV format, a comma (,) is the typical column delimiter. + RecordColumnDelimiter *string `json:"recordColumnDelimiter,omitempty" tf:"record_column_delimiter,omitempty"` + + // The row delimiter. For example, in a CSV format, \n is the typical row delimiter. + RecordRowDelimiter *string `json:"recordRowDelimiter,omitempty" tf:"record_row_delimiter,omitempty"` +} + +type MappingParametersCsvMappingParametersParameters struct { + + // The column delimiter. For example, in a CSV format, a comma (,) is the typical column delimiter. + // +kubebuilder:validation:Optional + RecordColumnDelimiter *string `json:"recordColumnDelimiter" tf:"record_column_delimiter,omitempty"` + + // The row delimiter. For example, in a CSV format, \n is the typical row delimiter. + // +kubebuilder:validation:Optional + RecordRowDelimiter *string `json:"recordRowDelimiter" tf:"record_row_delimiter,omitempty"` +} + +type MappingParametersInitParameters struct { + + // Provides additional mapping information when the record format uses delimiters (for example, CSV). + CsvMappingParameters *CsvMappingParametersInitParameters `json:"csvMappingParameters,omitempty" tf:"csv_mapping_parameters,omitempty"` + + // Provides additional mapping information when JSON is the record format on the streaming source. + JSONMappingParameters *JSONMappingParametersInitParameters `json:"jsonMappingParameters,omitempty" tf:"json_mapping_parameters,omitempty"` +} + +type MappingParametersJSONMappingParametersInitParameters struct { + + // The path to the top-level parent that contains the records. + RecordRowPath *string `json:"recordRowPath,omitempty" tf:"record_row_path,omitempty"` +} + +type MappingParametersJSONMappingParametersObservation struct { + + // The path to the top-level parent that contains the records. + RecordRowPath *string `json:"recordRowPath,omitempty" tf:"record_row_path,omitempty"` +} + +type MappingParametersJSONMappingParametersParameters struct { + + // The path to the top-level parent that contains the records. + // +kubebuilder:validation:Optional + RecordRowPath *string `json:"recordRowPath" tf:"record_row_path,omitempty"` +} + +type MappingParametersObservation struct { + + // Provides additional mapping information when the record format uses delimiters (for example, CSV). + CsvMappingParameters *CsvMappingParametersObservation `json:"csvMappingParameters,omitempty" tf:"csv_mapping_parameters,omitempty"` + + // Provides additional mapping information when JSON is the record format on the streaming source. + JSONMappingParameters *JSONMappingParametersObservation `json:"jsonMappingParameters,omitempty" tf:"json_mapping_parameters,omitempty"` +} + +type MappingParametersParameters struct { + + // Provides additional mapping information when the record format uses delimiters (for example, CSV). + // +kubebuilder:validation:Optional + CsvMappingParameters *CsvMappingParametersParameters `json:"csvMappingParameters,omitempty" tf:"csv_mapping_parameters,omitempty"` + + // Provides additional mapping information when JSON is the record format on the streaming source. + // +kubebuilder:validation:Optional + JSONMappingParameters *JSONMappingParametersParameters `json:"jsonMappingParameters,omitempty" tf:"json_mapping_parameters,omitempty"` +} + +type MonitoringConfigurationInitParameters struct { + + // Describes whether the application uses Kinesis Data Analytics' default checkpointing behavior. Valid values: CUSTOM, DEFAULT. Set this attribute to CUSTOM in order for any specified checkpointing_enabled, checkpoint_interval, or min_pause_between_checkpoints attribute values to be effective. If this attribute is set to DEFAULT, the application will always use the following values: + ConfigurationType *string `json:"configurationType,omitempty" tf:"configuration_type,omitempty"` + + // Describes the verbosity of the CloudWatch Logs for an application. Valid values: DEBUG, ERROR, INFO, WARN. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` + + // Describes the granularity of the CloudWatch Logs for an application. Valid values: APPLICATION, OPERATOR, PARALLELISM, TASK. + MetricsLevel *string `json:"metricsLevel,omitempty" tf:"metrics_level,omitempty"` +} + +type MonitoringConfigurationObservation struct { + + // Describes whether the application uses Kinesis Data Analytics' default checkpointing behavior. Valid values: CUSTOM, DEFAULT. Set this attribute to CUSTOM in order for any specified checkpointing_enabled, checkpoint_interval, or min_pause_between_checkpoints attribute values to be effective. If this attribute is set to DEFAULT, the application will always use the following values: + ConfigurationType *string `json:"configurationType,omitempty" tf:"configuration_type,omitempty"` + + // Describes the verbosity of the CloudWatch Logs for an application. Valid values: DEBUG, ERROR, INFO, WARN. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` + + // Describes the granularity of the CloudWatch Logs for an application. Valid values: APPLICATION, OPERATOR, PARALLELISM, TASK. + MetricsLevel *string `json:"metricsLevel,omitempty" tf:"metrics_level,omitempty"` +} + +type MonitoringConfigurationParameters struct { + + // Describes whether the application uses Kinesis Data Analytics' default checkpointing behavior. Valid values: CUSTOM, DEFAULT. Set this attribute to CUSTOM in order for any specified checkpointing_enabled, checkpoint_interval, or min_pause_between_checkpoints attribute values to be effective. If this attribute is set to DEFAULT, the application will always use the following values: + // +kubebuilder:validation:Optional + ConfigurationType *string `json:"configurationType" tf:"configuration_type,omitempty"` + + // Describes the verbosity of the CloudWatch Logs for an application. Valid values: DEBUG, ERROR, INFO, WARN. + // +kubebuilder:validation:Optional + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` + + // Describes the granularity of the CloudWatch Logs for an application. Valid values: APPLICATION, OPERATOR, PARALLELISM, TASK. + // +kubebuilder:validation:Optional + MetricsLevel *string `json:"metricsLevel,omitempty" tf:"metrics_level,omitempty"` +} + +type OutputInitParameters struct { + + // Describes the data format when records are written to the destination. + DestinationSchema *DestinationSchemaInitParameters `json:"destinationSchema,omitempty" tf:"destination_schema,omitempty"` + + // Identifies a Kinesis Data Firehose delivery stream as the destination. + KinesisFirehoseOutput *KinesisFirehoseOutputInitParameters `json:"kinesisFirehoseOutput,omitempty" tf:"kinesis_firehose_output,omitempty"` + + // Identifies a Kinesis data stream as the destination. + KinesisStreamsOutput *KinesisStreamsOutputInitParameters `json:"kinesisStreamsOutput,omitempty" tf:"kinesis_streams_output,omitempty"` + + // Identifies a Lambda function as the destination. + LambdaOutput *LambdaOutputInitParameters `json:"lambdaOutput,omitempty" tf:"lambda_output,omitempty"` + + // The name of the application. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type OutputObservation struct { + + // Describes the data format when records are written to the destination. + DestinationSchema *DestinationSchemaObservation `json:"destinationSchema,omitempty" tf:"destination_schema,omitempty"` + + // Identifies a Kinesis Data Firehose delivery stream as the destination. + KinesisFirehoseOutput *KinesisFirehoseOutputObservation `json:"kinesisFirehoseOutput,omitempty" tf:"kinesis_firehose_output,omitempty"` + + // Identifies a Kinesis data stream as the destination. + KinesisStreamsOutput *KinesisStreamsOutputObservation `json:"kinesisStreamsOutput,omitempty" tf:"kinesis_streams_output,omitempty"` + + // Identifies a Lambda function as the destination. + LambdaOutput *LambdaOutputObservation `json:"lambdaOutput,omitempty" tf:"lambda_output,omitempty"` + + // The name of the application. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The application identifier. + OutputID *string `json:"outputId,omitempty" tf:"output_id,omitempty"` +} + +type OutputParameters struct { + + // Describes the data format when records are written to the destination. + // +kubebuilder:validation:Optional + DestinationSchema *DestinationSchemaParameters `json:"destinationSchema" tf:"destination_schema,omitempty"` + + // Identifies a Kinesis Data Firehose delivery stream as the destination. + // +kubebuilder:validation:Optional + KinesisFirehoseOutput *KinesisFirehoseOutputParameters `json:"kinesisFirehoseOutput,omitempty" tf:"kinesis_firehose_output,omitempty"` + + // Identifies a Kinesis data stream as the destination. + // +kubebuilder:validation:Optional + KinesisStreamsOutput *KinesisStreamsOutputParameters `json:"kinesisStreamsOutput,omitempty" tf:"kinesis_streams_output,omitempty"` + + // Identifies a Lambda function as the destination. + // +kubebuilder:validation:Optional + LambdaOutput *LambdaOutputParameters `json:"lambdaOutput,omitempty" tf:"lambda_output,omitempty"` + + // The name of the application. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type ParallelismConfigurationInitParameters struct { + + // Describes whether the Kinesis Data Analytics service can increase the parallelism of the application in response to increased throughput. + AutoScalingEnabled *bool `json:"autoScalingEnabled,omitempty" tf:"auto_scaling_enabled,omitempty"` + + // Describes whether the application uses Kinesis Data Analytics' default checkpointing behavior. Valid values: CUSTOM, DEFAULT. Set this attribute to CUSTOM in order for any specified checkpointing_enabled, checkpoint_interval, or min_pause_between_checkpoints attribute values to be effective. If this attribute is set to DEFAULT, the application will always use the following values: + ConfigurationType *string `json:"configurationType,omitempty" tf:"configuration_type,omitempty"` + + // Describes the initial number of parallel tasks that a Flink-based Kinesis Data Analytics application can perform. + Parallelism *float64 `json:"parallelism,omitempty" tf:"parallelism,omitempty"` + + // Describes the number of parallel tasks that a Flink-based Kinesis Data Analytics application can perform per Kinesis Processing Unit (KPU) used by the application. + ParallelismPerKpu *float64 `json:"parallelismPerKpu,omitempty" tf:"parallelism_per_kpu,omitempty"` +} + +type ParallelismConfigurationObservation struct { + + // Describes whether the Kinesis Data Analytics service can increase the parallelism of the application in response to increased throughput. + AutoScalingEnabled *bool `json:"autoScalingEnabled,omitempty" tf:"auto_scaling_enabled,omitempty"` + + // Describes whether the application uses Kinesis Data Analytics' default checkpointing behavior. Valid values: CUSTOM, DEFAULT. Set this attribute to CUSTOM in order for any specified checkpointing_enabled, checkpoint_interval, or min_pause_between_checkpoints attribute values to be effective. If this attribute is set to DEFAULT, the application will always use the following values: + ConfigurationType *string `json:"configurationType,omitempty" tf:"configuration_type,omitempty"` + + // Describes the initial number of parallel tasks that a Flink-based Kinesis Data Analytics application can perform. + Parallelism *float64 `json:"parallelism,omitempty" tf:"parallelism,omitempty"` + + // Describes the number of parallel tasks that a Flink-based Kinesis Data Analytics application can perform per Kinesis Processing Unit (KPU) used by the application. + ParallelismPerKpu *float64 `json:"parallelismPerKpu,omitempty" tf:"parallelism_per_kpu,omitempty"` +} + +type ParallelismConfigurationParameters struct { + + // Describes whether the Kinesis Data Analytics service can increase the parallelism of the application in response to increased throughput. + // +kubebuilder:validation:Optional + AutoScalingEnabled *bool `json:"autoScalingEnabled,omitempty" tf:"auto_scaling_enabled,omitempty"` + + // Describes whether the application uses Kinesis Data Analytics' default checkpointing behavior. Valid values: CUSTOM, DEFAULT. Set this attribute to CUSTOM in order for any specified checkpointing_enabled, checkpoint_interval, or min_pause_between_checkpoints attribute values to be effective. If this attribute is set to DEFAULT, the application will always use the following values: + // +kubebuilder:validation:Optional + ConfigurationType *string `json:"configurationType" tf:"configuration_type,omitempty"` + + // Describes the initial number of parallel tasks that a Flink-based Kinesis Data Analytics application can perform. + // +kubebuilder:validation:Optional + Parallelism *float64 `json:"parallelism,omitempty" tf:"parallelism,omitempty"` + + // Describes the number of parallel tasks that a Flink-based Kinesis Data Analytics application can perform per Kinesis Processing Unit (KPU) used by the application. + // +kubebuilder:validation:Optional + ParallelismPerKpu *float64 `json:"parallelismPerKpu,omitempty" tf:"parallelism_per_kpu,omitempty"` +} + +type PropertyGroupInitParameters struct { + + // The key of the application execution property key-value map. + PropertyGroupID *string `json:"propertyGroupId,omitempty" tf:"property_group_id,omitempty"` + + // Application execution property key-value map. + // +mapType=granular + PropertyMap map[string]*string `json:"propertyMap,omitempty" tf:"property_map,omitempty"` +} + +type PropertyGroupObservation struct { + + // The key of the application execution property key-value map. + PropertyGroupID *string `json:"propertyGroupId,omitempty" tf:"property_group_id,omitempty"` + + // Application execution property key-value map. + // +mapType=granular + PropertyMap map[string]*string `json:"propertyMap,omitempty" tf:"property_map,omitempty"` +} + +type PropertyGroupParameters struct { + + // The key of the application execution property key-value map. + // +kubebuilder:validation:Optional + PropertyGroupID *string `json:"propertyGroupId" tf:"property_group_id,omitempty"` + + // Application execution property key-value map. + // +kubebuilder:validation:Optional + // +mapType=granular + PropertyMap map[string]*string `json:"propertyMap" tf:"property_map,omitempty"` +} + +type RecordColumnInitParameters struct { + + // A reference to the data element in the streaming input or the reference data source. + Mapping *string `json:"mapping,omitempty" tf:"mapping,omitempty"` + + // The name of the application. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type of column created in the in-application input stream or reference table. + SQLType *string `json:"sqlType,omitempty" tf:"sql_type,omitempty"` +} + +type RecordColumnObservation struct { + + // A reference to the data element in the streaming input or the reference data source. + Mapping *string `json:"mapping,omitempty" tf:"mapping,omitempty"` + + // The name of the application. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type of column created in the in-application input stream or reference table. + SQLType *string `json:"sqlType,omitempty" tf:"sql_type,omitempty"` +} + +type RecordColumnParameters struct { + + // A reference to the data element in the streaming input or the reference data source. + // +kubebuilder:validation:Optional + Mapping *string `json:"mapping,omitempty" tf:"mapping,omitempty"` + + // The name of the application. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The type of column created in the in-application input stream or reference table. + // +kubebuilder:validation:Optional + SQLType *string `json:"sqlType" tf:"sql_type,omitempty"` +} + +type RecordFormatInitParameters struct { + + // Provides additional mapping information specific to the record format (such as JSON, CSV, or record fields delimited by some delimiter) on the streaming source. + MappingParameters *MappingParametersInitParameters `json:"mappingParameters,omitempty" tf:"mapping_parameters,omitempty"` + + // The type of record format. Valid values: CSV, JSON. + RecordFormatType *string `json:"recordFormatType,omitempty" tf:"record_format_type,omitempty"` +} + +type RecordFormatMappingParametersInitParameters struct { + + // Provides additional mapping information when the record format uses delimiters (for example, CSV). + CsvMappingParameters *MappingParametersCsvMappingParametersInitParameters `json:"csvMappingParameters,omitempty" tf:"csv_mapping_parameters,omitempty"` + + // Provides additional mapping information when JSON is the record format on the streaming source. + JSONMappingParameters *MappingParametersJSONMappingParametersInitParameters `json:"jsonMappingParameters,omitempty" tf:"json_mapping_parameters,omitempty"` +} + +type RecordFormatMappingParametersObservation struct { + + // Provides additional mapping information when the record format uses delimiters (for example, CSV). + CsvMappingParameters *MappingParametersCsvMappingParametersObservation `json:"csvMappingParameters,omitempty" tf:"csv_mapping_parameters,omitempty"` + + // Provides additional mapping information when JSON is the record format on the streaming source. + JSONMappingParameters *MappingParametersJSONMappingParametersObservation `json:"jsonMappingParameters,omitempty" tf:"json_mapping_parameters,omitempty"` +} + +type RecordFormatMappingParametersParameters struct { + + // Provides additional mapping information when the record format uses delimiters (for example, CSV). + // +kubebuilder:validation:Optional + CsvMappingParameters *MappingParametersCsvMappingParametersParameters `json:"csvMappingParameters,omitempty" tf:"csv_mapping_parameters,omitempty"` + + // Provides additional mapping information when JSON is the record format on the streaming source. + // +kubebuilder:validation:Optional + JSONMappingParameters *MappingParametersJSONMappingParametersParameters `json:"jsonMappingParameters,omitempty" tf:"json_mapping_parameters,omitempty"` +} + +type RecordFormatObservation struct { + + // Provides additional mapping information specific to the record format (such as JSON, CSV, or record fields delimited by some delimiter) on the streaming source. + MappingParameters *MappingParametersObservation `json:"mappingParameters,omitempty" tf:"mapping_parameters,omitempty"` + + // The type of record format. Valid values: CSV, JSON. + RecordFormatType *string `json:"recordFormatType,omitempty" tf:"record_format_type,omitempty"` +} + +type RecordFormatParameters struct { + + // Provides additional mapping information specific to the record format (such as JSON, CSV, or record fields delimited by some delimiter) on the streaming source. + // +kubebuilder:validation:Optional + MappingParameters *MappingParametersParameters `json:"mappingParameters" tf:"mapping_parameters,omitempty"` + + // The type of record format. Valid values: CSV, JSON. + // +kubebuilder:validation:Optional + RecordFormatType *string `json:"recordFormatType" tf:"record_format_type,omitempty"` +} + +type ReferenceDataSourceInitParameters struct { + + // Describes the format of the data in the streaming source, and how each data element maps to corresponding columns created in the in-application stream. + ReferenceSchema *ReferenceSchemaInitParameters `json:"referenceSchema,omitempty" tf:"reference_schema,omitempty"` + + // Identifies the S3 bucket and object that contains the reference data. + S3ReferenceDataSource *S3ReferenceDataSourceInitParameters `json:"s3ReferenceDataSource,omitempty" tf:"s3_reference_data_source,omitempty"` + + // The name of the in-application table to create. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type ReferenceDataSourceObservation struct { + + // The application identifier. + ReferenceID *string `json:"referenceId,omitempty" tf:"reference_id,omitempty"` + + // Describes the format of the data in the streaming source, and how each data element maps to corresponding columns created in the in-application stream. + ReferenceSchema *ReferenceSchemaObservation `json:"referenceSchema,omitempty" tf:"reference_schema,omitempty"` + + // Identifies the S3 bucket and object that contains the reference data. + S3ReferenceDataSource *S3ReferenceDataSourceObservation `json:"s3ReferenceDataSource,omitempty" tf:"s3_reference_data_source,omitempty"` + + // The name of the in-application table to create. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type ReferenceDataSourceParameters struct { + + // Describes the format of the data in the streaming source, and how each data element maps to corresponding columns created in the in-application stream. + // +kubebuilder:validation:Optional + ReferenceSchema *ReferenceSchemaParameters `json:"referenceSchema" tf:"reference_schema,omitempty"` + + // Identifies the S3 bucket and object that contains the reference data. + // +kubebuilder:validation:Optional + S3ReferenceDataSource *S3ReferenceDataSourceParameters `json:"s3ReferenceDataSource" tf:"s3_reference_data_source,omitempty"` + + // The name of the in-application table to create. + // +kubebuilder:validation:Optional + TableName *string `json:"tableName" tf:"table_name,omitempty"` +} + +type ReferenceSchemaInitParameters struct { + + // Describes the mapping of each data element in the streaming source to the corresponding column in the in-application stream. + RecordColumn []ReferenceSchemaRecordColumnInitParameters `json:"recordColumn,omitempty" tf:"record_column,omitempty"` + + // Specifies the encoding of the records in the streaming source. For example, UTF-8. + RecordEncoding *string `json:"recordEncoding,omitempty" tf:"record_encoding,omitempty"` + + // Specifies the format of the records on the streaming source. + RecordFormat *ReferenceSchemaRecordFormatInitParameters `json:"recordFormat,omitempty" tf:"record_format,omitempty"` +} + +type ReferenceSchemaObservation struct { + + // Describes the mapping of each data element in the streaming source to the corresponding column in the in-application stream. + RecordColumn []ReferenceSchemaRecordColumnObservation `json:"recordColumn,omitempty" tf:"record_column,omitempty"` + + // Specifies the encoding of the records in the streaming source. For example, UTF-8. + RecordEncoding *string `json:"recordEncoding,omitempty" tf:"record_encoding,omitempty"` + + // Specifies the format of the records on the streaming source. + RecordFormat *ReferenceSchemaRecordFormatObservation `json:"recordFormat,omitempty" tf:"record_format,omitempty"` +} + +type ReferenceSchemaParameters struct { + + // Describes the mapping of each data element in the streaming source to the corresponding column in the in-application stream. + // +kubebuilder:validation:Optional + RecordColumn []ReferenceSchemaRecordColumnParameters `json:"recordColumn" tf:"record_column,omitempty"` + + // Specifies the encoding of the records in the streaming source. For example, UTF-8. + // +kubebuilder:validation:Optional + RecordEncoding *string `json:"recordEncoding,omitempty" tf:"record_encoding,omitempty"` + + // Specifies the format of the records on the streaming source. + // +kubebuilder:validation:Optional + RecordFormat *ReferenceSchemaRecordFormatParameters `json:"recordFormat" tf:"record_format,omitempty"` +} + +type ReferenceSchemaRecordColumnInitParameters struct { + + // A reference to the data element in the streaming input or the reference data source. + Mapping *string `json:"mapping,omitempty" tf:"mapping,omitempty"` + + // The name of the application. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type of column created in the in-application input stream or reference table. + SQLType *string `json:"sqlType,omitempty" tf:"sql_type,omitempty"` +} + +type ReferenceSchemaRecordColumnObservation struct { + + // A reference to the data element in the streaming input or the reference data source. + Mapping *string `json:"mapping,omitempty" tf:"mapping,omitempty"` + + // The name of the application. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type of column created in the in-application input stream or reference table. + SQLType *string `json:"sqlType,omitempty" tf:"sql_type,omitempty"` +} + +type ReferenceSchemaRecordColumnParameters struct { + + // A reference to the data element in the streaming input or the reference data source. + // +kubebuilder:validation:Optional + Mapping *string `json:"mapping,omitempty" tf:"mapping,omitempty"` + + // The name of the application. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The type of column created in the in-application input stream or reference table. + // +kubebuilder:validation:Optional + SQLType *string `json:"sqlType" tf:"sql_type,omitempty"` +} + +type ReferenceSchemaRecordFormatInitParameters struct { + + // Provides additional mapping information specific to the record format (such as JSON, CSV, or record fields delimited by some delimiter) on the streaming source. + MappingParameters *RecordFormatMappingParametersInitParameters `json:"mappingParameters,omitempty" tf:"mapping_parameters,omitempty"` + + // The type of record format. Valid values: CSV, JSON. + RecordFormatType *string `json:"recordFormatType,omitempty" tf:"record_format_type,omitempty"` +} + +type ReferenceSchemaRecordFormatObservation struct { + + // Provides additional mapping information specific to the record format (such as JSON, CSV, or record fields delimited by some delimiter) on the streaming source. + MappingParameters *RecordFormatMappingParametersObservation `json:"mappingParameters,omitempty" tf:"mapping_parameters,omitempty"` + + // The type of record format. Valid values: CSV, JSON. + RecordFormatType *string `json:"recordFormatType,omitempty" tf:"record_format_type,omitempty"` +} + +type ReferenceSchemaRecordFormatParameters struct { + + // Provides additional mapping information specific to the record format (such as JSON, CSV, or record fields delimited by some delimiter) on the streaming source. + // +kubebuilder:validation:Optional + MappingParameters *RecordFormatMappingParametersParameters `json:"mappingParameters" tf:"mapping_parameters,omitempty"` + + // The type of record format. Valid values: CSV, JSON. + // +kubebuilder:validation:Optional + RecordFormatType *string `json:"recordFormatType" tf:"record_format_type,omitempty"` +} + +type RunConfigurationInitParameters struct { + + // The restore behavior of a restarting application. + ApplicationRestoreConfiguration *ApplicationRestoreConfigurationInitParameters `json:"applicationRestoreConfiguration,omitempty" tf:"application_restore_configuration,omitempty"` + + // The starting parameters for a Flink-based Kinesis Data Analytics application. + FlinkRunConfiguration *FlinkRunConfigurationInitParameters `json:"flinkRunConfiguration,omitempty" tf:"flink_run_configuration,omitempty"` +} + +type RunConfigurationObservation struct { + + // The restore behavior of a restarting application. + ApplicationRestoreConfiguration *ApplicationRestoreConfigurationObservation `json:"applicationRestoreConfiguration,omitempty" tf:"application_restore_configuration,omitempty"` + + // The starting parameters for a Flink-based Kinesis Data Analytics application. + FlinkRunConfiguration *FlinkRunConfigurationObservation `json:"flinkRunConfiguration,omitempty" tf:"flink_run_configuration,omitempty"` +} + +type RunConfigurationParameters struct { + + // The restore behavior of a restarting application. + // +kubebuilder:validation:Optional + ApplicationRestoreConfiguration *ApplicationRestoreConfigurationParameters `json:"applicationRestoreConfiguration,omitempty" tf:"application_restore_configuration,omitempty"` + + // The starting parameters for a Flink-based Kinesis Data Analytics application. + // +kubebuilder:validation:Optional + FlinkRunConfiguration *FlinkRunConfigurationParameters `json:"flinkRunConfiguration,omitempty" tf:"flink_run_configuration,omitempty"` +} + +type S3ContentLocationInitParameters struct { + + // The ARN for the S3 bucket containing the application code. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // The file key for the object containing the application code. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Object + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("key",false) + FileKey *string `json:"fileKey,omitempty" tf:"file_key,omitempty"` + + // Reference to a Object in s3 to populate fileKey. + // +kubebuilder:validation:Optional + FileKeyRef *v1.Reference `json:"fileKeyRef,omitempty" tf:"-"` + + // Selector for a Object in s3 to populate fileKey. + // +kubebuilder:validation:Optional + FileKeySelector *v1.Selector `json:"fileKeySelector,omitempty" tf:"-"` + + // The version of the object containing the application code. + ObjectVersion *string `json:"objectVersion,omitempty" tf:"object_version,omitempty"` +} + +type S3ContentLocationObservation struct { + + // The ARN for the S3 bucket containing the application code. + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // The file key for the object containing the application code. + FileKey *string `json:"fileKey,omitempty" tf:"file_key,omitempty"` + + // The version of the object containing the application code. + ObjectVersion *string `json:"objectVersion,omitempty" tf:"object_version,omitempty"` +} + +type S3ContentLocationParameters struct { + + // The ARN for the S3 bucket containing the application code. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // The file key for the object containing the application code. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Object + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("key",false) + // +kubebuilder:validation:Optional + FileKey *string `json:"fileKey,omitempty" tf:"file_key,omitempty"` + + // Reference to a Object in s3 to populate fileKey. + // +kubebuilder:validation:Optional + FileKeyRef *v1.Reference `json:"fileKeyRef,omitempty" tf:"-"` + + // Selector for a Object in s3 to populate fileKey. + // +kubebuilder:validation:Optional + FileKeySelector *v1.Selector `json:"fileKeySelector,omitempty" tf:"-"` + + // The version of the object containing the application code. + // +kubebuilder:validation:Optional + ObjectVersion *string `json:"objectVersion,omitempty" tf:"object_version,omitempty"` +} + +type S3ReferenceDataSourceInitParameters struct { + + // The ARN for the S3 bucket containing the application code. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // The file key for the object containing the application code. + FileKey *string `json:"fileKey,omitempty" tf:"file_key,omitempty"` +} + +type S3ReferenceDataSourceObservation struct { + + // The ARN for the S3 bucket containing the application code. + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // The file key for the object containing the application code. + FileKey *string `json:"fileKey,omitempty" tf:"file_key,omitempty"` +} + +type S3ReferenceDataSourceParameters struct { + + // The ARN for the S3 bucket containing the application code. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // The file key for the object containing the application code. + // +kubebuilder:validation:Optional + FileKey *string `json:"fileKey" tf:"file_key,omitempty"` +} + +type SQLApplicationConfigurationInitParameters struct { + + // The input stream used by the application. + Input *InputInitParameters `json:"input,omitempty" tf:"input,omitempty"` + + // The destination streams used by the application. + Output []OutputInitParameters `json:"output,omitempty" tf:"output,omitempty"` + + // The reference data source used by the application. + ReferenceDataSource *ReferenceDataSourceInitParameters `json:"referenceDataSource,omitempty" tf:"reference_data_source,omitempty"` +} + +type SQLApplicationConfigurationObservation struct { + + // The input stream used by the application. + Input *InputObservation `json:"input,omitempty" tf:"input,omitempty"` + + // The destination streams used by the application. + Output []OutputObservation `json:"output,omitempty" tf:"output,omitempty"` + + // The reference data source used by the application. + ReferenceDataSource *ReferenceDataSourceObservation `json:"referenceDataSource,omitempty" tf:"reference_data_source,omitempty"` +} + +type SQLApplicationConfigurationParameters struct { + + // The input stream used by the application. + // +kubebuilder:validation:Optional + Input *InputParameters `json:"input,omitempty" tf:"input,omitempty"` + + // The destination streams used by the application. + // +kubebuilder:validation:Optional + Output []OutputParameters `json:"output,omitempty" tf:"output,omitempty"` + + // The reference data source used by the application. + // +kubebuilder:validation:Optional + ReferenceDataSource *ReferenceDataSourceParameters `json:"referenceDataSource,omitempty" tf:"reference_data_source,omitempty"` +} + +type VPCConfigurationInitParameters struct { + + // The Security Group IDs used by the VPC configuration. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The Subnet IDs used by the VPC configuration. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type VPCConfigurationObservation struct { + + // The Security Group IDs used by the VPC configuration. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The Subnet IDs used by the VPC configuration. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // The application identifier. + VPCConfigurationID *string `json:"vpcConfigurationId,omitempty" tf:"vpc_configuration_id,omitempty"` + + // The application identifier. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type VPCConfigurationParameters struct { + + // The Security Group IDs used by the VPC configuration. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds" tf:"security_group_ids,omitempty"` + + // The Subnet IDs used by the VPC configuration. + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds" tf:"subnet_ids,omitempty"` +} + +// ApplicationSpec defines the desired state of Application +type ApplicationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ApplicationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ApplicationInitParameters `json:"initProvider,omitempty"` +} + +// ApplicationStatus defines the observed state of Application. +type ApplicationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ApplicationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Application is the Schema for the Applications API. Manages a Kinesis Analytics v2 Application. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Application struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.runtimeEnvironment) || (has(self.initProvider) && has(self.initProvider.runtimeEnvironment))",message="spec.forProvider.runtimeEnvironment is a required parameter" + Spec ApplicationSpec `json:"spec"` + Status ApplicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ApplicationList contains a list of Applications +type ApplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Application `json:"items"` +} + +// Repository type metadata. +var ( + Application_Kind = "Application" + Application_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Application_Kind}.String() + Application_KindAPIVersion = Application_Kind + "." + CRDGroupVersion.String() + Application_GroupVersionKind = CRDGroupVersion.WithKind(Application_Kind) +) + +func init() { + SchemeBuilder.Register(&Application{}, &ApplicationList{}) +} diff --git a/apis/kinesisanalyticsv2/v1beta2/zz_generated.conversion_hubs.go b/apis/kinesisanalyticsv2/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..2d48655531 --- /dev/null +++ b/apis/kinesisanalyticsv2/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Application) Hub() {} diff --git a/apis/kinesisanalyticsv2/v1beta2/zz_generated.deepcopy.go b/apis/kinesisanalyticsv2/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..b850c28c02 --- /dev/null +++ b/apis/kinesisanalyticsv2/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,4032 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Application) DeepCopyInto(out *Application) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Application. +func (in *Application) DeepCopy() *Application { + if in == nil { + return nil + } + out := new(Application) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Application) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationCodeConfigurationInitParameters) DeepCopyInto(out *ApplicationCodeConfigurationInitParameters) { + *out = *in + if in.CodeContent != nil { + in, out := &in.CodeContent, &out.CodeContent + *out = new(CodeContentInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CodeContentType != nil { + in, out := &in.CodeContentType, &out.CodeContentType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationCodeConfigurationInitParameters. +func (in *ApplicationCodeConfigurationInitParameters) DeepCopy() *ApplicationCodeConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ApplicationCodeConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationCodeConfigurationObservation) DeepCopyInto(out *ApplicationCodeConfigurationObservation) { + *out = *in + if in.CodeContent != nil { + in, out := &in.CodeContent, &out.CodeContent + *out = new(CodeContentObservation) + (*in).DeepCopyInto(*out) + } + if in.CodeContentType != nil { + in, out := &in.CodeContentType, &out.CodeContentType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationCodeConfigurationObservation. +func (in *ApplicationCodeConfigurationObservation) DeepCopy() *ApplicationCodeConfigurationObservation { + if in == nil { + return nil + } + out := new(ApplicationCodeConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationCodeConfigurationParameters) DeepCopyInto(out *ApplicationCodeConfigurationParameters) { + *out = *in + if in.CodeContent != nil { + in, out := &in.CodeContent, &out.CodeContent + *out = new(CodeContentParameters) + (*in).DeepCopyInto(*out) + } + if in.CodeContentType != nil { + in, out := &in.CodeContentType, &out.CodeContentType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationCodeConfigurationParameters. +func (in *ApplicationCodeConfigurationParameters) DeepCopy() *ApplicationCodeConfigurationParameters { + if in == nil { + return nil + } + out := new(ApplicationCodeConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationConfigurationInitParameters) DeepCopyInto(out *ApplicationConfigurationInitParameters) { + *out = *in + if in.ApplicationCodeConfiguration != nil { + in, out := &in.ApplicationCodeConfiguration, &out.ApplicationCodeConfiguration + *out = new(ApplicationCodeConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ApplicationSnapshotConfiguration != nil { + in, out := &in.ApplicationSnapshotConfiguration, &out.ApplicationSnapshotConfiguration + *out = new(ApplicationSnapshotConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EnvironmentProperties != nil { + in, out := &in.EnvironmentProperties, &out.EnvironmentProperties + *out = new(EnvironmentPropertiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FlinkApplicationConfiguration != nil { + in, out := &in.FlinkApplicationConfiguration, &out.FlinkApplicationConfiguration + *out = new(FlinkApplicationConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RunConfiguration != nil { + in, out := &in.RunConfiguration, &out.RunConfiguration + *out = new(RunConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SQLApplicationConfiguration != nil { + in, out := &in.SQLApplicationConfiguration, &out.SQLApplicationConfiguration + *out = new(SQLApplicationConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationConfigurationInitParameters. +func (in *ApplicationConfigurationInitParameters) DeepCopy() *ApplicationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ApplicationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationConfigurationObservation) DeepCopyInto(out *ApplicationConfigurationObservation) { + *out = *in + if in.ApplicationCodeConfiguration != nil { + in, out := &in.ApplicationCodeConfiguration, &out.ApplicationCodeConfiguration + *out = new(ApplicationCodeConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ApplicationSnapshotConfiguration != nil { + in, out := &in.ApplicationSnapshotConfiguration, &out.ApplicationSnapshotConfiguration + *out = new(ApplicationSnapshotConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.EnvironmentProperties != nil { + in, out := &in.EnvironmentProperties, &out.EnvironmentProperties + *out = new(EnvironmentPropertiesObservation) + (*in).DeepCopyInto(*out) + } + if in.FlinkApplicationConfiguration != nil { + in, out := &in.FlinkApplicationConfiguration, &out.FlinkApplicationConfiguration + *out = new(FlinkApplicationConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RunConfiguration != nil { + in, out := &in.RunConfiguration, &out.RunConfiguration + *out = new(RunConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.SQLApplicationConfiguration != nil { + in, out := &in.SQLApplicationConfiguration, &out.SQLApplicationConfiguration + *out = new(SQLApplicationConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationConfigurationObservation. +func (in *ApplicationConfigurationObservation) DeepCopy() *ApplicationConfigurationObservation { + if in == nil { + return nil + } + out := new(ApplicationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationConfigurationParameters) DeepCopyInto(out *ApplicationConfigurationParameters) { + *out = *in + if in.ApplicationCodeConfiguration != nil { + in, out := &in.ApplicationCodeConfiguration, &out.ApplicationCodeConfiguration + *out = new(ApplicationCodeConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ApplicationSnapshotConfiguration != nil { + in, out := &in.ApplicationSnapshotConfiguration, &out.ApplicationSnapshotConfiguration + *out = new(ApplicationSnapshotConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.EnvironmentProperties != nil { + in, out := &in.EnvironmentProperties, &out.EnvironmentProperties + *out = new(EnvironmentPropertiesParameters) + (*in).DeepCopyInto(*out) + } + if in.FlinkApplicationConfiguration != nil { + in, out := &in.FlinkApplicationConfiguration, &out.FlinkApplicationConfiguration + *out = new(FlinkApplicationConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.RunConfiguration != nil { + in, out := &in.RunConfiguration, &out.RunConfiguration + *out = new(RunConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.SQLApplicationConfiguration != nil { + in, out := &in.SQLApplicationConfiguration, &out.SQLApplicationConfiguration + *out = new(SQLApplicationConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationConfigurationParameters. +func (in *ApplicationConfigurationParameters) DeepCopy() *ApplicationConfigurationParameters { + if in == nil { + return nil + } + out := new(ApplicationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInitParameters) DeepCopyInto(out *ApplicationInitParameters) { + *out = *in + if in.ApplicationConfiguration != nil { + in, out := &in.ApplicationConfiguration, &out.ApplicationConfiguration + *out = new(ApplicationConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(CloudwatchLoggingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ForceStop != nil { + in, out := &in.ForceStop, &out.ForceStop + *out = new(bool) + **out = **in + } + if in.RuntimeEnvironment != nil { + in, out := &in.RuntimeEnvironment, &out.RuntimeEnvironment + *out = new(string) + **out = **in + } + if in.ServiceExecutionRole != nil { + in, out := &in.ServiceExecutionRole, &out.ServiceExecutionRole + *out = new(string) + **out = **in + } + if in.ServiceExecutionRoleRef != nil { + in, out := &in.ServiceExecutionRoleRef, &out.ServiceExecutionRoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceExecutionRoleSelector != nil { + in, out := &in.ServiceExecutionRoleSelector, &out.ServiceExecutionRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StartApplication != nil { + in, out := &in.StartApplication, &out.StartApplication + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInitParameters. +func (in *ApplicationInitParameters) DeepCopy() *ApplicationInitParameters { + if in == nil { + return nil + } + out := new(ApplicationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationList) DeepCopyInto(out *ApplicationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Application, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationList. +func (in *ApplicationList) DeepCopy() *ApplicationList { + if in == nil { + return nil + } + out := new(ApplicationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ApplicationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationObservation) DeepCopyInto(out *ApplicationObservation) { + *out = *in + if in.ApplicationConfiguration != nil { + in, out := &in.ApplicationConfiguration, &out.ApplicationConfiguration + *out = new(ApplicationConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(CloudwatchLoggingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.CreateTimestamp != nil { + in, out := &in.CreateTimestamp, &out.CreateTimestamp + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ForceStop != nil { + in, out := &in.ForceStop, &out.ForceStop + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LastUpdateTimestamp != nil { + in, out := &in.LastUpdateTimestamp, &out.LastUpdateTimestamp + *out = new(string) + **out = **in + } + if in.RuntimeEnvironment != nil { + in, out := &in.RuntimeEnvironment, &out.RuntimeEnvironment + *out = new(string) + **out = **in + } + if in.ServiceExecutionRole != nil { + in, out := &in.ServiceExecutionRole, &out.ServiceExecutionRole + *out = new(string) + **out = **in + } + if in.StartApplication != nil { + in, out := &in.StartApplication, &out.StartApplication + *out = new(bool) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationObservation. +func (in *ApplicationObservation) DeepCopy() *ApplicationObservation { + if in == nil { + return nil + } + out := new(ApplicationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationParameters) DeepCopyInto(out *ApplicationParameters) { + *out = *in + if in.ApplicationConfiguration != nil { + in, out := &in.ApplicationConfiguration, &out.ApplicationConfiguration + *out = new(ApplicationConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CloudwatchLoggingOptions != nil { + in, out := &in.CloudwatchLoggingOptions, &out.CloudwatchLoggingOptions + *out = new(CloudwatchLoggingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ForceStop != nil { + in, out := &in.ForceStop, &out.ForceStop + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RuntimeEnvironment != nil { + in, out := &in.RuntimeEnvironment, &out.RuntimeEnvironment + *out = new(string) + **out = **in + } + if in.ServiceExecutionRole != nil { + in, out := &in.ServiceExecutionRole, &out.ServiceExecutionRole + *out = new(string) + **out = **in + } + if in.ServiceExecutionRoleRef != nil { + in, out := &in.ServiceExecutionRoleRef, &out.ServiceExecutionRoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceExecutionRoleSelector != nil { + in, out := &in.ServiceExecutionRoleSelector, &out.ServiceExecutionRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StartApplication != nil { + in, out := &in.StartApplication, &out.StartApplication + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationParameters. +func (in *ApplicationParameters) DeepCopy() *ApplicationParameters { + if in == nil { + return nil + } + out := new(ApplicationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationRestoreConfigurationInitParameters) DeepCopyInto(out *ApplicationRestoreConfigurationInitParameters) { + *out = *in + if in.ApplicationRestoreType != nil { + in, out := &in.ApplicationRestoreType, &out.ApplicationRestoreType + *out = new(string) + **out = **in + } + if in.SnapshotName != nil { + in, out := &in.SnapshotName, &out.SnapshotName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationRestoreConfigurationInitParameters. +func (in *ApplicationRestoreConfigurationInitParameters) DeepCopy() *ApplicationRestoreConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ApplicationRestoreConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationRestoreConfigurationObservation) DeepCopyInto(out *ApplicationRestoreConfigurationObservation) { + *out = *in + if in.ApplicationRestoreType != nil { + in, out := &in.ApplicationRestoreType, &out.ApplicationRestoreType + *out = new(string) + **out = **in + } + if in.SnapshotName != nil { + in, out := &in.SnapshotName, &out.SnapshotName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationRestoreConfigurationObservation. +func (in *ApplicationRestoreConfigurationObservation) DeepCopy() *ApplicationRestoreConfigurationObservation { + if in == nil { + return nil + } + out := new(ApplicationRestoreConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationRestoreConfigurationParameters) DeepCopyInto(out *ApplicationRestoreConfigurationParameters) { + *out = *in + if in.ApplicationRestoreType != nil { + in, out := &in.ApplicationRestoreType, &out.ApplicationRestoreType + *out = new(string) + **out = **in + } + if in.SnapshotName != nil { + in, out := &in.SnapshotName, &out.SnapshotName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationRestoreConfigurationParameters. +func (in *ApplicationRestoreConfigurationParameters) DeepCopy() *ApplicationRestoreConfigurationParameters { + if in == nil { + return nil + } + out := new(ApplicationRestoreConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationSnapshotConfigurationInitParameters) DeepCopyInto(out *ApplicationSnapshotConfigurationInitParameters) { + *out = *in + if in.SnapshotsEnabled != nil { + in, out := &in.SnapshotsEnabled, &out.SnapshotsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSnapshotConfigurationInitParameters. +func (in *ApplicationSnapshotConfigurationInitParameters) DeepCopy() *ApplicationSnapshotConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ApplicationSnapshotConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationSnapshotConfigurationObservation) DeepCopyInto(out *ApplicationSnapshotConfigurationObservation) { + *out = *in + if in.SnapshotsEnabled != nil { + in, out := &in.SnapshotsEnabled, &out.SnapshotsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSnapshotConfigurationObservation. +func (in *ApplicationSnapshotConfigurationObservation) DeepCopy() *ApplicationSnapshotConfigurationObservation { + if in == nil { + return nil + } + out := new(ApplicationSnapshotConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationSnapshotConfigurationParameters) DeepCopyInto(out *ApplicationSnapshotConfigurationParameters) { + *out = *in + if in.SnapshotsEnabled != nil { + in, out := &in.SnapshotsEnabled, &out.SnapshotsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSnapshotConfigurationParameters. +func (in *ApplicationSnapshotConfigurationParameters) DeepCopy() *ApplicationSnapshotConfigurationParameters { + if in == nil { + return nil + } + out := new(ApplicationSnapshotConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationSpec) DeepCopyInto(out *ApplicationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSpec. +func (in *ApplicationSpec) DeepCopy() *ApplicationSpec { + if in == nil { + return nil + } + out := new(ApplicationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationStatus) DeepCopyInto(out *ApplicationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationStatus. +func (in *ApplicationStatus) DeepCopy() *ApplicationStatus { + if in == nil { + return nil + } + out := new(ApplicationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CheckpointConfigurationInitParameters) DeepCopyInto(out *CheckpointConfigurationInitParameters) { + *out = *in + if in.CheckpointInterval != nil { + in, out := &in.CheckpointInterval, &out.CheckpointInterval + *out = new(float64) + **out = **in + } + if in.CheckpointingEnabled != nil { + in, out := &in.CheckpointingEnabled, &out.CheckpointingEnabled + *out = new(bool) + **out = **in + } + if in.ConfigurationType != nil { + in, out := &in.ConfigurationType, &out.ConfigurationType + *out = new(string) + **out = **in + } + if in.MinPauseBetweenCheckpoints != nil { + in, out := &in.MinPauseBetweenCheckpoints, &out.MinPauseBetweenCheckpoints + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckpointConfigurationInitParameters. +func (in *CheckpointConfigurationInitParameters) DeepCopy() *CheckpointConfigurationInitParameters { + if in == nil { + return nil + } + out := new(CheckpointConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CheckpointConfigurationObservation) DeepCopyInto(out *CheckpointConfigurationObservation) { + *out = *in + if in.CheckpointInterval != nil { + in, out := &in.CheckpointInterval, &out.CheckpointInterval + *out = new(float64) + **out = **in + } + if in.CheckpointingEnabled != nil { + in, out := &in.CheckpointingEnabled, &out.CheckpointingEnabled + *out = new(bool) + **out = **in + } + if in.ConfigurationType != nil { + in, out := &in.ConfigurationType, &out.ConfigurationType + *out = new(string) + **out = **in + } + if in.MinPauseBetweenCheckpoints != nil { + in, out := &in.MinPauseBetweenCheckpoints, &out.MinPauseBetweenCheckpoints + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckpointConfigurationObservation. +func (in *CheckpointConfigurationObservation) DeepCopy() *CheckpointConfigurationObservation { + if in == nil { + return nil + } + out := new(CheckpointConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CheckpointConfigurationParameters) DeepCopyInto(out *CheckpointConfigurationParameters) { + *out = *in + if in.CheckpointInterval != nil { + in, out := &in.CheckpointInterval, &out.CheckpointInterval + *out = new(float64) + **out = **in + } + if in.CheckpointingEnabled != nil { + in, out := &in.CheckpointingEnabled, &out.CheckpointingEnabled + *out = new(bool) + **out = **in + } + if in.ConfigurationType != nil { + in, out := &in.ConfigurationType, &out.ConfigurationType + *out = new(string) + **out = **in + } + if in.MinPauseBetweenCheckpoints != nil { + in, out := &in.MinPauseBetweenCheckpoints, &out.MinPauseBetweenCheckpoints + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckpointConfigurationParameters. +func (in *CheckpointConfigurationParameters) DeepCopy() *CheckpointConfigurationParameters { + if in == nil { + return nil + } + out := new(CheckpointConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLoggingOptionsInitParameters) DeepCopyInto(out *CloudwatchLoggingOptionsInitParameters) { + *out = *in + if in.LogStreamArn != nil { + in, out := &in.LogStreamArn, &out.LogStreamArn + *out = new(string) + **out = **in + } + if in.LogStreamArnRef != nil { + in, out := &in.LogStreamArnRef, &out.LogStreamArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogStreamArnSelector != nil { + in, out := &in.LogStreamArnSelector, &out.LogStreamArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLoggingOptionsInitParameters. +func (in *CloudwatchLoggingOptionsInitParameters) DeepCopy() *CloudwatchLoggingOptionsInitParameters { + if in == nil { + return nil + } + out := new(CloudwatchLoggingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLoggingOptionsObservation) DeepCopyInto(out *CloudwatchLoggingOptionsObservation) { + *out = *in + if in.CloudwatchLoggingOptionID != nil { + in, out := &in.CloudwatchLoggingOptionID, &out.CloudwatchLoggingOptionID + *out = new(string) + **out = **in + } + if in.LogStreamArn != nil { + in, out := &in.LogStreamArn, &out.LogStreamArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLoggingOptionsObservation. +func (in *CloudwatchLoggingOptionsObservation) DeepCopy() *CloudwatchLoggingOptionsObservation { + if in == nil { + return nil + } + out := new(CloudwatchLoggingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchLoggingOptionsParameters) DeepCopyInto(out *CloudwatchLoggingOptionsParameters) { + *out = *in + if in.LogStreamArn != nil { + in, out := &in.LogStreamArn, &out.LogStreamArn + *out = new(string) + **out = **in + } + if in.LogStreamArnRef != nil { + in, out := &in.LogStreamArnRef, &out.LogStreamArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogStreamArnSelector != nil { + in, out := &in.LogStreamArnSelector, &out.LogStreamArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchLoggingOptionsParameters. +func (in *CloudwatchLoggingOptionsParameters) DeepCopy() *CloudwatchLoggingOptionsParameters { + if in == nil { + return nil + } + out := new(CloudwatchLoggingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeContentInitParameters) DeepCopyInto(out *CodeContentInitParameters) { + *out = *in + if in.S3ContentLocation != nil { + in, out := &in.S3ContentLocation, &out.S3ContentLocation + *out = new(S3ContentLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TextContent != nil { + in, out := &in.TextContent, &out.TextContent + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeContentInitParameters. +func (in *CodeContentInitParameters) DeepCopy() *CodeContentInitParameters { + if in == nil { + return nil + } + out := new(CodeContentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeContentObservation) DeepCopyInto(out *CodeContentObservation) { + *out = *in + if in.S3ContentLocation != nil { + in, out := &in.S3ContentLocation, &out.S3ContentLocation + *out = new(S3ContentLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.TextContent != nil { + in, out := &in.TextContent, &out.TextContent + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeContentObservation. +func (in *CodeContentObservation) DeepCopy() *CodeContentObservation { + if in == nil { + return nil + } + out := new(CodeContentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeContentParameters) DeepCopyInto(out *CodeContentParameters) { + *out = *in + if in.S3ContentLocation != nil { + in, out := &in.S3ContentLocation, &out.S3ContentLocation + *out = new(S3ContentLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.TextContent != nil { + in, out := &in.TextContent, &out.TextContent + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeContentParameters. +func (in *CodeContentParameters) DeepCopy() *CodeContentParameters { + if in == nil { + return nil + } + out := new(CodeContentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CsvMappingParametersInitParameters) DeepCopyInto(out *CsvMappingParametersInitParameters) { + *out = *in + if in.RecordColumnDelimiter != nil { + in, out := &in.RecordColumnDelimiter, &out.RecordColumnDelimiter + *out = new(string) + **out = **in + } + if in.RecordRowDelimiter != nil { + in, out := &in.RecordRowDelimiter, &out.RecordRowDelimiter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CsvMappingParametersInitParameters. +func (in *CsvMappingParametersInitParameters) DeepCopy() *CsvMappingParametersInitParameters { + if in == nil { + return nil + } + out := new(CsvMappingParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CsvMappingParametersObservation) DeepCopyInto(out *CsvMappingParametersObservation) { + *out = *in + if in.RecordColumnDelimiter != nil { + in, out := &in.RecordColumnDelimiter, &out.RecordColumnDelimiter + *out = new(string) + **out = **in + } + if in.RecordRowDelimiter != nil { + in, out := &in.RecordRowDelimiter, &out.RecordRowDelimiter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CsvMappingParametersObservation. +func (in *CsvMappingParametersObservation) DeepCopy() *CsvMappingParametersObservation { + if in == nil { + return nil + } + out := new(CsvMappingParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CsvMappingParametersParameters) DeepCopyInto(out *CsvMappingParametersParameters) { + *out = *in + if in.RecordColumnDelimiter != nil { + in, out := &in.RecordColumnDelimiter, &out.RecordColumnDelimiter + *out = new(string) + **out = **in + } + if in.RecordRowDelimiter != nil { + in, out := &in.RecordRowDelimiter, &out.RecordRowDelimiter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CsvMappingParametersParameters. +func (in *CsvMappingParametersParameters) DeepCopy() *CsvMappingParametersParameters { + if in == nil { + return nil + } + out := new(CsvMappingParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationSchemaInitParameters) DeepCopyInto(out *DestinationSchemaInitParameters) { + *out = *in + if in.RecordFormatType != nil { + in, out := &in.RecordFormatType, &out.RecordFormatType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationSchemaInitParameters. +func (in *DestinationSchemaInitParameters) DeepCopy() *DestinationSchemaInitParameters { + if in == nil { + return nil + } + out := new(DestinationSchemaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationSchemaObservation) DeepCopyInto(out *DestinationSchemaObservation) { + *out = *in + if in.RecordFormatType != nil { + in, out := &in.RecordFormatType, &out.RecordFormatType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationSchemaObservation. +func (in *DestinationSchemaObservation) DeepCopy() *DestinationSchemaObservation { + if in == nil { + return nil + } + out := new(DestinationSchemaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationSchemaParameters) DeepCopyInto(out *DestinationSchemaParameters) { + *out = *in + if in.RecordFormatType != nil { + in, out := &in.RecordFormatType, &out.RecordFormatType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationSchemaParameters. +func (in *DestinationSchemaParameters) DeepCopy() *DestinationSchemaParameters { + if in == nil { + return nil + } + out := new(DestinationSchemaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentPropertiesInitParameters) DeepCopyInto(out *EnvironmentPropertiesInitParameters) { + *out = *in + if in.PropertyGroup != nil { + in, out := &in.PropertyGroup, &out.PropertyGroup + *out = make([]PropertyGroupInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentPropertiesInitParameters. +func (in *EnvironmentPropertiesInitParameters) DeepCopy() *EnvironmentPropertiesInitParameters { + if in == nil { + return nil + } + out := new(EnvironmentPropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentPropertiesObservation) DeepCopyInto(out *EnvironmentPropertiesObservation) { + *out = *in + if in.PropertyGroup != nil { + in, out := &in.PropertyGroup, &out.PropertyGroup + *out = make([]PropertyGroupObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentPropertiesObservation. +func (in *EnvironmentPropertiesObservation) DeepCopy() *EnvironmentPropertiesObservation { + if in == nil { + return nil + } + out := new(EnvironmentPropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentPropertiesParameters) DeepCopyInto(out *EnvironmentPropertiesParameters) { + *out = *in + if in.PropertyGroup != nil { + in, out := &in.PropertyGroup, &out.PropertyGroup + *out = make([]PropertyGroupParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentPropertiesParameters. +func (in *EnvironmentPropertiesParameters) DeepCopy() *EnvironmentPropertiesParameters { + if in == nil { + return nil + } + out := new(EnvironmentPropertiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlinkApplicationConfigurationInitParameters) DeepCopyInto(out *FlinkApplicationConfigurationInitParameters) { + *out = *in + if in.CheckpointConfiguration != nil { + in, out := &in.CheckpointConfiguration, &out.CheckpointConfiguration + *out = new(CheckpointConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitoringConfiguration != nil { + in, out := &in.MonitoringConfiguration, &out.MonitoringConfiguration + *out = new(MonitoringConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ParallelismConfiguration != nil { + in, out := &in.ParallelismConfiguration, &out.ParallelismConfiguration + *out = new(ParallelismConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlinkApplicationConfigurationInitParameters. +func (in *FlinkApplicationConfigurationInitParameters) DeepCopy() *FlinkApplicationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(FlinkApplicationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlinkApplicationConfigurationObservation) DeepCopyInto(out *FlinkApplicationConfigurationObservation) { + *out = *in + if in.CheckpointConfiguration != nil { + in, out := &in.CheckpointConfiguration, &out.CheckpointConfiguration + *out = new(CheckpointConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.MonitoringConfiguration != nil { + in, out := &in.MonitoringConfiguration, &out.MonitoringConfiguration + *out = new(MonitoringConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ParallelismConfiguration != nil { + in, out := &in.ParallelismConfiguration, &out.ParallelismConfiguration + *out = new(ParallelismConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlinkApplicationConfigurationObservation. +func (in *FlinkApplicationConfigurationObservation) DeepCopy() *FlinkApplicationConfigurationObservation { + if in == nil { + return nil + } + out := new(FlinkApplicationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlinkApplicationConfigurationParameters) DeepCopyInto(out *FlinkApplicationConfigurationParameters) { + *out = *in + if in.CheckpointConfiguration != nil { + in, out := &in.CheckpointConfiguration, &out.CheckpointConfiguration + *out = new(CheckpointConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitoringConfiguration != nil { + in, out := &in.MonitoringConfiguration, &out.MonitoringConfiguration + *out = new(MonitoringConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.ParallelismConfiguration != nil { + in, out := &in.ParallelismConfiguration, &out.ParallelismConfiguration + *out = new(ParallelismConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlinkApplicationConfigurationParameters. +func (in *FlinkApplicationConfigurationParameters) DeepCopy() *FlinkApplicationConfigurationParameters { + if in == nil { + return nil + } + out := new(FlinkApplicationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlinkRunConfigurationInitParameters) DeepCopyInto(out *FlinkRunConfigurationInitParameters) { + *out = *in + if in.AllowNonRestoredState != nil { + in, out := &in.AllowNonRestoredState, &out.AllowNonRestoredState + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlinkRunConfigurationInitParameters. +func (in *FlinkRunConfigurationInitParameters) DeepCopy() *FlinkRunConfigurationInitParameters { + if in == nil { + return nil + } + out := new(FlinkRunConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlinkRunConfigurationObservation) DeepCopyInto(out *FlinkRunConfigurationObservation) { + *out = *in + if in.AllowNonRestoredState != nil { + in, out := &in.AllowNonRestoredState, &out.AllowNonRestoredState + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlinkRunConfigurationObservation. +func (in *FlinkRunConfigurationObservation) DeepCopy() *FlinkRunConfigurationObservation { + if in == nil { + return nil + } + out := new(FlinkRunConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlinkRunConfigurationParameters) DeepCopyInto(out *FlinkRunConfigurationParameters) { + *out = *in + if in.AllowNonRestoredState != nil { + in, out := &in.AllowNonRestoredState, &out.AllowNonRestoredState + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlinkRunConfigurationParameters. +func (in *FlinkRunConfigurationParameters) DeepCopy() *FlinkRunConfigurationParameters { + if in == nil { + return nil + } + out := new(FlinkRunConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputInitParameters) DeepCopyInto(out *InputInitParameters) { + *out = *in + if in.InputParallelism != nil { + in, out := &in.InputParallelism, &out.InputParallelism + *out = new(InputParallelismInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InputProcessingConfiguration != nil { + in, out := &in.InputProcessingConfiguration, &out.InputProcessingConfiguration + *out = new(InputProcessingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InputSchema != nil { + in, out := &in.InputSchema, &out.InputSchema + *out = new(InputSchemaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InputStartingPositionConfiguration != nil { + in, out := &in.InputStartingPositionConfiguration, &out.InputStartingPositionConfiguration + *out = make([]InputStartingPositionConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KinesisFirehoseInput != nil { + in, out := &in.KinesisFirehoseInput, &out.KinesisFirehoseInput + *out = new(KinesisFirehoseInputInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisStreamsInput != nil { + in, out := &in.KinesisStreamsInput, &out.KinesisStreamsInput + *out = new(KinesisStreamsInputInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NamePrefix != nil { + in, out := &in.NamePrefix, &out.NamePrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputInitParameters. +func (in *InputInitParameters) DeepCopy() *InputInitParameters { + if in == nil { + return nil + } + out := new(InputInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputLambdaProcessorInitParameters) DeepCopyInto(out *InputLambdaProcessorInitParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputLambdaProcessorInitParameters. +func (in *InputLambdaProcessorInitParameters) DeepCopy() *InputLambdaProcessorInitParameters { + if in == nil { + return nil + } + out := new(InputLambdaProcessorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputLambdaProcessorObservation) DeepCopyInto(out *InputLambdaProcessorObservation) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputLambdaProcessorObservation. +func (in *InputLambdaProcessorObservation) DeepCopy() *InputLambdaProcessorObservation { + if in == nil { + return nil + } + out := new(InputLambdaProcessorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputLambdaProcessorParameters) DeepCopyInto(out *InputLambdaProcessorParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputLambdaProcessorParameters. +func (in *InputLambdaProcessorParameters) DeepCopy() *InputLambdaProcessorParameters { + if in == nil { + return nil + } + out := new(InputLambdaProcessorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputObservation) DeepCopyInto(out *InputObservation) { + *out = *in + if in.InAppStreamNames != nil { + in, out := &in.InAppStreamNames, &out.InAppStreamNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InputID != nil { + in, out := &in.InputID, &out.InputID + *out = new(string) + **out = **in + } + if in.InputParallelism != nil { + in, out := &in.InputParallelism, &out.InputParallelism + *out = new(InputParallelismObservation) + (*in).DeepCopyInto(*out) + } + if in.InputProcessingConfiguration != nil { + in, out := &in.InputProcessingConfiguration, &out.InputProcessingConfiguration + *out = new(InputProcessingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.InputSchema != nil { + in, out := &in.InputSchema, &out.InputSchema + *out = new(InputSchemaObservation) + (*in).DeepCopyInto(*out) + } + if in.InputStartingPositionConfiguration != nil { + in, out := &in.InputStartingPositionConfiguration, &out.InputStartingPositionConfiguration + *out = make([]InputStartingPositionConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KinesisFirehoseInput != nil { + in, out := &in.KinesisFirehoseInput, &out.KinesisFirehoseInput + *out = new(KinesisFirehoseInputObservation) + (*in).DeepCopyInto(*out) + } + if in.KinesisStreamsInput != nil { + in, out := &in.KinesisStreamsInput, &out.KinesisStreamsInput + *out = new(KinesisStreamsInputObservation) + (*in).DeepCopyInto(*out) + } + if in.NamePrefix != nil { + in, out := &in.NamePrefix, &out.NamePrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputObservation. +func (in *InputObservation) DeepCopy() *InputObservation { + if in == nil { + return nil + } + out := new(InputObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputParallelismInitParameters) DeepCopyInto(out *InputParallelismInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputParallelismInitParameters. +func (in *InputParallelismInitParameters) DeepCopy() *InputParallelismInitParameters { + if in == nil { + return nil + } + out := new(InputParallelismInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputParallelismObservation) DeepCopyInto(out *InputParallelismObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputParallelismObservation. +func (in *InputParallelismObservation) DeepCopy() *InputParallelismObservation { + if in == nil { + return nil + } + out := new(InputParallelismObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputParallelismParameters) DeepCopyInto(out *InputParallelismParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputParallelismParameters. +func (in *InputParallelismParameters) DeepCopy() *InputParallelismParameters { + if in == nil { + return nil + } + out := new(InputParallelismParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputParameters) DeepCopyInto(out *InputParameters) { + *out = *in + if in.InputParallelism != nil { + in, out := &in.InputParallelism, &out.InputParallelism + *out = new(InputParallelismParameters) + (*in).DeepCopyInto(*out) + } + if in.InputProcessingConfiguration != nil { + in, out := &in.InputProcessingConfiguration, &out.InputProcessingConfiguration + *out = new(InputProcessingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.InputSchema != nil { + in, out := &in.InputSchema, &out.InputSchema + *out = new(InputSchemaParameters) + (*in).DeepCopyInto(*out) + } + if in.InputStartingPositionConfiguration != nil { + in, out := &in.InputStartingPositionConfiguration, &out.InputStartingPositionConfiguration + *out = make([]InputStartingPositionConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KinesisFirehoseInput != nil { + in, out := &in.KinesisFirehoseInput, &out.KinesisFirehoseInput + *out = new(KinesisFirehoseInputParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisStreamsInput != nil { + in, out := &in.KinesisStreamsInput, &out.KinesisStreamsInput + *out = new(KinesisStreamsInputParameters) + (*in).DeepCopyInto(*out) + } + if in.NamePrefix != nil { + in, out := &in.NamePrefix, &out.NamePrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputParameters. +func (in *InputParameters) DeepCopy() *InputParameters { + if in == nil { + return nil + } + out := new(InputParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputProcessingConfigurationInitParameters) DeepCopyInto(out *InputProcessingConfigurationInitParameters) { + *out = *in + if in.InputLambdaProcessor != nil { + in, out := &in.InputLambdaProcessor, &out.InputLambdaProcessor + *out = new(InputLambdaProcessorInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputProcessingConfigurationInitParameters. +func (in *InputProcessingConfigurationInitParameters) DeepCopy() *InputProcessingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(InputProcessingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputProcessingConfigurationObservation) DeepCopyInto(out *InputProcessingConfigurationObservation) { + *out = *in + if in.InputLambdaProcessor != nil { + in, out := &in.InputLambdaProcessor, &out.InputLambdaProcessor + *out = new(InputLambdaProcessorObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputProcessingConfigurationObservation. +func (in *InputProcessingConfigurationObservation) DeepCopy() *InputProcessingConfigurationObservation { + if in == nil { + return nil + } + out := new(InputProcessingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputProcessingConfigurationParameters) DeepCopyInto(out *InputProcessingConfigurationParameters) { + *out = *in + if in.InputLambdaProcessor != nil { + in, out := &in.InputLambdaProcessor, &out.InputLambdaProcessor + *out = new(InputLambdaProcessorParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputProcessingConfigurationParameters. +func (in *InputProcessingConfigurationParameters) DeepCopy() *InputProcessingConfigurationParameters { + if in == nil { + return nil + } + out := new(InputProcessingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputSchemaInitParameters) DeepCopyInto(out *InputSchemaInitParameters) { + *out = *in + if in.RecordColumn != nil { + in, out := &in.RecordColumn, &out.RecordColumn + *out = make([]RecordColumnInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecordEncoding != nil { + in, out := &in.RecordEncoding, &out.RecordEncoding + *out = new(string) + **out = **in + } + if in.RecordFormat != nil { + in, out := &in.RecordFormat, &out.RecordFormat + *out = new(RecordFormatInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputSchemaInitParameters. +func (in *InputSchemaInitParameters) DeepCopy() *InputSchemaInitParameters { + if in == nil { + return nil + } + out := new(InputSchemaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputSchemaObservation) DeepCopyInto(out *InputSchemaObservation) { + *out = *in + if in.RecordColumn != nil { + in, out := &in.RecordColumn, &out.RecordColumn + *out = make([]RecordColumnObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecordEncoding != nil { + in, out := &in.RecordEncoding, &out.RecordEncoding + *out = new(string) + **out = **in + } + if in.RecordFormat != nil { + in, out := &in.RecordFormat, &out.RecordFormat + *out = new(RecordFormatObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputSchemaObservation. +func (in *InputSchemaObservation) DeepCopy() *InputSchemaObservation { + if in == nil { + return nil + } + out := new(InputSchemaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputSchemaParameters) DeepCopyInto(out *InputSchemaParameters) { + *out = *in + if in.RecordColumn != nil { + in, out := &in.RecordColumn, &out.RecordColumn + *out = make([]RecordColumnParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecordEncoding != nil { + in, out := &in.RecordEncoding, &out.RecordEncoding + *out = new(string) + **out = **in + } + if in.RecordFormat != nil { + in, out := &in.RecordFormat, &out.RecordFormat + *out = new(RecordFormatParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputSchemaParameters. +func (in *InputSchemaParameters) DeepCopy() *InputSchemaParameters { + if in == nil { + return nil + } + out := new(InputSchemaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputStartingPositionConfigurationInitParameters) DeepCopyInto(out *InputStartingPositionConfigurationInitParameters) { + *out = *in + if in.InputStartingPosition != nil { + in, out := &in.InputStartingPosition, &out.InputStartingPosition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputStartingPositionConfigurationInitParameters. +func (in *InputStartingPositionConfigurationInitParameters) DeepCopy() *InputStartingPositionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(InputStartingPositionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputStartingPositionConfigurationObservation) DeepCopyInto(out *InputStartingPositionConfigurationObservation) { + *out = *in + if in.InputStartingPosition != nil { + in, out := &in.InputStartingPosition, &out.InputStartingPosition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputStartingPositionConfigurationObservation. +func (in *InputStartingPositionConfigurationObservation) DeepCopy() *InputStartingPositionConfigurationObservation { + if in == nil { + return nil + } + out := new(InputStartingPositionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputStartingPositionConfigurationParameters) DeepCopyInto(out *InputStartingPositionConfigurationParameters) { + *out = *in + if in.InputStartingPosition != nil { + in, out := &in.InputStartingPosition, &out.InputStartingPosition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputStartingPositionConfigurationParameters. +func (in *InputStartingPositionConfigurationParameters) DeepCopy() *InputStartingPositionConfigurationParameters { + if in == nil { + return nil + } + out := new(InputStartingPositionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONMappingParametersInitParameters) DeepCopyInto(out *JSONMappingParametersInitParameters) { + *out = *in + if in.RecordRowPath != nil { + in, out := &in.RecordRowPath, &out.RecordRowPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONMappingParametersInitParameters. +func (in *JSONMappingParametersInitParameters) DeepCopy() *JSONMappingParametersInitParameters { + if in == nil { + return nil + } + out := new(JSONMappingParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONMappingParametersObservation) DeepCopyInto(out *JSONMappingParametersObservation) { + *out = *in + if in.RecordRowPath != nil { + in, out := &in.RecordRowPath, &out.RecordRowPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONMappingParametersObservation. +func (in *JSONMappingParametersObservation) DeepCopy() *JSONMappingParametersObservation { + if in == nil { + return nil + } + out := new(JSONMappingParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONMappingParametersParameters) DeepCopyInto(out *JSONMappingParametersParameters) { + *out = *in + if in.RecordRowPath != nil { + in, out := &in.RecordRowPath, &out.RecordRowPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONMappingParametersParameters. +func (in *JSONMappingParametersParameters) DeepCopy() *JSONMappingParametersParameters { + if in == nil { + return nil + } + out := new(JSONMappingParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisFirehoseInputInitParameters) DeepCopyInto(out *KinesisFirehoseInputInitParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseInputInitParameters. +func (in *KinesisFirehoseInputInitParameters) DeepCopy() *KinesisFirehoseInputInitParameters { + if in == nil { + return nil + } + out := new(KinesisFirehoseInputInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisFirehoseInputObservation) DeepCopyInto(out *KinesisFirehoseInputObservation) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseInputObservation. +func (in *KinesisFirehoseInputObservation) DeepCopy() *KinesisFirehoseInputObservation { + if in == nil { + return nil + } + out := new(KinesisFirehoseInputObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisFirehoseInputParameters) DeepCopyInto(out *KinesisFirehoseInputParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseInputParameters. +func (in *KinesisFirehoseInputParameters) DeepCopy() *KinesisFirehoseInputParameters { + if in == nil { + return nil + } + out := new(KinesisFirehoseInputParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisFirehoseOutputInitParameters) DeepCopyInto(out *KinesisFirehoseOutputInitParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.ResourceArnRef != nil { + in, out := &in.ResourceArnRef, &out.ResourceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceArnSelector != nil { + in, out := &in.ResourceArnSelector, &out.ResourceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseOutputInitParameters. +func (in *KinesisFirehoseOutputInitParameters) DeepCopy() *KinesisFirehoseOutputInitParameters { + if in == nil { + return nil + } + out := new(KinesisFirehoseOutputInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisFirehoseOutputObservation) DeepCopyInto(out *KinesisFirehoseOutputObservation) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseOutputObservation. +func (in *KinesisFirehoseOutputObservation) DeepCopy() *KinesisFirehoseOutputObservation { + if in == nil { + return nil + } + out := new(KinesisFirehoseOutputObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisFirehoseOutputParameters) DeepCopyInto(out *KinesisFirehoseOutputParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.ResourceArnRef != nil { + in, out := &in.ResourceArnRef, &out.ResourceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceArnSelector != nil { + in, out := &in.ResourceArnSelector, &out.ResourceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseOutputParameters. +func (in *KinesisFirehoseOutputParameters) DeepCopy() *KinesisFirehoseOutputParameters { + if in == nil { + return nil + } + out := new(KinesisFirehoseOutputParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisStreamsInputInitParameters) DeepCopyInto(out *KinesisStreamsInputInitParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.ResourceArnRef != nil { + in, out := &in.ResourceArnRef, &out.ResourceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceArnSelector != nil { + in, out := &in.ResourceArnSelector, &out.ResourceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamsInputInitParameters. +func (in *KinesisStreamsInputInitParameters) DeepCopy() *KinesisStreamsInputInitParameters { + if in == nil { + return nil + } + out := new(KinesisStreamsInputInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisStreamsInputObservation) DeepCopyInto(out *KinesisStreamsInputObservation) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamsInputObservation. +func (in *KinesisStreamsInputObservation) DeepCopy() *KinesisStreamsInputObservation { + if in == nil { + return nil + } + out := new(KinesisStreamsInputObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisStreamsInputParameters) DeepCopyInto(out *KinesisStreamsInputParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.ResourceArnRef != nil { + in, out := &in.ResourceArnRef, &out.ResourceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceArnSelector != nil { + in, out := &in.ResourceArnSelector, &out.ResourceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamsInputParameters. +func (in *KinesisStreamsInputParameters) DeepCopy() *KinesisStreamsInputParameters { + if in == nil { + return nil + } + out := new(KinesisStreamsInputParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisStreamsOutputInitParameters) DeepCopyInto(out *KinesisStreamsOutputInitParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamsOutputInitParameters. +func (in *KinesisStreamsOutputInitParameters) DeepCopy() *KinesisStreamsOutputInitParameters { + if in == nil { + return nil + } + out := new(KinesisStreamsOutputInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisStreamsOutputObservation) DeepCopyInto(out *KinesisStreamsOutputObservation) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamsOutputObservation. +func (in *KinesisStreamsOutputObservation) DeepCopy() *KinesisStreamsOutputObservation { + if in == nil { + return nil + } + out := new(KinesisStreamsOutputObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisStreamsOutputParameters) DeepCopyInto(out *KinesisStreamsOutputParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamsOutputParameters. +func (in *KinesisStreamsOutputParameters) DeepCopy() *KinesisStreamsOutputParameters { + if in == nil { + return nil + } + out := new(KinesisStreamsOutputParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaOutputInitParameters) DeepCopyInto(out *LambdaOutputInitParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.ResourceArnRef != nil { + in, out := &in.ResourceArnRef, &out.ResourceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceArnSelector != nil { + in, out := &in.ResourceArnSelector, &out.ResourceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaOutputInitParameters. +func (in *LambdaOutputInitParameters) DeepCopy() *LambdaOutputInitParameters { + if in == nil { + return nil + } + out := new(LambdaOutputInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaOutputObservation) DeepCopyInto(out *LambdaOutputObservation) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaOutputObservation. +func (in *LambdaOutputObservation) DeepCopy() *LambdaOutputObservation { + if in == nil { + return nil + } + out := new(LambdaOutputObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaOutputParameters) DeepCopyInto(out *LambdaOutputParameters) { + *out = *in + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.ResourceArnRef != nil { + in, out := &in.ResourceArnRef, &out.ResourceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceArnSelector != nil { + in, out := &in.ResourceArnSelector, &out.ResourceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaOutputParameters. +func (in *LambdaOutputParameters) DeepCopy() *LambdaOutputParameters { + if in == nil { + return nil + } + out := new(LambdaOutputParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersCsvMappingParametersInitParameters) DeepCopyInto(out *MappingParametersCsvMappingParametersInitParameters) { + *out = *in + if in.RecordColumnDelimiter != nil { + in, out := &in.RecordColumnDelimiter, &out.RecordColumnDelimiter + *out = new(string) + **out = **in + } + if in.RecordRowDelimiter != nil { + in, out := &in.RecordRowDelimiter, &out.RecordRowDelimiter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersCsvMappingParametersInitParameters. +func (in *MappingParametersCsvMappingParametersInitParameters) DeepCopy() *MappingParametersCsvMappingParametersInitParameters { + if in == nil { + return nil + } + out := new(MappingParametersCsvMappingParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersCsvMappingParametersObservation) DeepCopyInto(out *MappingParametersCsvMappingParametersObservation) { + *out = *in + if in.RecordColumnDelimiter != nil { + in, out := &in.RecordColumnDelimiter, &out.RecordColumnDelimiter + *out = new(string) + **out = **in + } + if in.RecordRowDelimiter != nil { + in, out := &in.RecordRowDelimiter, &out.RecordRowDelimiter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersCsvMappingParametersObservation. +func (in *MappingParametersCsvMappingParametersObservation) DeepCopy() *MappingParametersCsvMappingParametersObservation { + if in == nil { + return nil + } + out := new(MappingParametersCsvMappingParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersCsvMappingParametersParameters) DeepCopyInto(out *MappingParametersCsvMappingParametersParameters) { + *out = *in + if in.RecordColumnDelimiter != nil { + in, out := &in.RecordColumnDelimiter, &out.RecordColumnDelimiter + *out = new(string) + **out = **in + } + if in.RecordRowDelimiter != nil { + in, out := &in.RecordRowDelimiter, &out.RecordRowDelimiter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersCsvMappingParametersParameters. +func (in *MappingParametersCsvMappingParametersParameters) DeepCopy() *MappingParametersCsvMappingParametersParameters { + if in == nil { + return nil + } + out := new(MappingParametersCsvMappingParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersInitParameters) DeepCopyInto(out *MappingParametersInitParameters) { + *out = *in + if in.CsvMappingParameters != nil { + in, out := &in.CsvMappingParameters, &out.CsvMappingParameters + *out = new(CsvMappingParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.JSONMappingParameters != nil { + in, out := &in.JSONMappingParameters, &out.JSONMappingParameters + *out = new(JSONMappingParametersInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersInitParameters. +func (in *MappingParametersInitParameters) DeepCopy() *MappingParametersInitParameters { + if in == nil { + return nil + } + out := new(MappingParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersJSONMappingParametersInitParameters) DeepCopyInto(out *MappingParametersJSONMappingParametersInitParameters) { + *out = *in + if in.RecordRowPath != nil { + in, out := &in.RecordRowPath, &out.RecordRowPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersJSONMappingParametersInitParameters. +func (in *MappingParametersJSONMappingParametersInitParameters) DeepCopy() *MappingParametersJSONMappingParametersInitParameters { + if in == nil { + return nil + } + out := new(MappingParametersJSONMappingParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersJSONMappingParametersObservation) DeepCopyInto(out *MappingParametersJSONMappingParametersObservation) { + *out = *in + if in.RecordRowPath != nil { + in, out := &in.RecordRowPath, &out.RecordRowPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersJSONMappingParametersObservation. +func (in *MappingParametersJSONMappingParametersObservation) DeepCopy() *MappingParametersJSONMappingParametersObservation { + if in == nil { + return nil + } + out := new(MappingParametersJSONMappingParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersJSONMappingParametersParameters) DeepCopyInto(out *MappingParametersJSONMappingParametersParameters) { + *out = *in + if in.RecordRowPath != nil { + in, out := &in.RecordRowPath, &out.RecordRowPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersJSONMappingParametersParameters. +func (in *MappingParametersJSONMappingParametersParameters) DeepCopy() *MappingParametersJSONMappingParametersParameters { + if in == nil { + return nil + } + out := new(MappingParametersJSONMappingParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersObservation) DeepCopyInto(out *MappingParametersObservation) { + *out = *in + if in.CsvMappingParameters != nil { + in, out := &in.CsvMappingParameters, &out.CsvMappingParameters + *out = new(CsvMappingParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.JSONMappingParameters != nil { + in, out := &in.JSONMappingParameters, &out.JSONMappingParameters + *out = new(JSONMappingParametersObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersObservation. +func (in *MappingParametersObservation) DeepCopy() *MappingParametersObservation { + if in == nil { + return nil + } + out := new(MappingParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MappingParametersParameters) DeepCopyInto(out *MappingParametersParameters) { + *out = *in + if in.CsvMappingParameters != nil { + in, out := &in.CsvMappingParameters, &out.CsvMappingParameters + *out = new(CsvMappingParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.JSONMappingParameters != nil { + in, out := &in.JSONMappingParameters, &out.JSONMappingParameters + *out = new(JSONMappingParametersParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MappingParametersParameters. +func (in *MappingParametersParameters) DeepCopy() *MappingParametersParameters { + if in == nil { + return nil + } + out := new(MappingParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringConfigurationInitParameters) DeepCopyInto(out *MonitoringConfigurationInitParameters) { + *out = *in + if in.ConfigurationType != nil { + in, out := &in.ConfigurationType, &out.ConfigurationType + *out = new(string) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } + if in.MetricsLevel != nil { + in, out := &in.MetricsLevel, &out.MetricsLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConfigurationInitParameters. +func (in *MonitoringConfigurationInitParameters) DeepCopy() *MonitoringConfigurationInitParameters { + if in == nil { + return nil + } + out := new(MonitoringConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringConfigurationObservation) DeepCopyInto(out *MonitoringConfigurationObservation) { + *out = *in + if in.ConfigurationType != nil { + in, out := &in.ConfigurationType, &out.ConfigurationType + *out = new(string) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } + if in.MetricsLevel != nil { + in, out := &in.MetricsLevel, &out.MetricsLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConfigurationObservation. +func (in *MonitoringConfigurationObservation) DeepCopy() *MonitoringConfigurationObservation { + if in == nil { + return nil + } + out := new(MonitoringConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringConfigurationParameters) DeepCopyInto(out *MonitoringConfigurationParameters) { + *out = *in + if in.ConfigurationType != nil { + in, out := &in.ConfigurationType, &out.ConfigurationType + *out = new(string) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } + if in.MetricsLevel != nil { + in, out := &in.MetricsLevel, &out.MetricsLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConfigurationParameters. +func (in *MonitoringConfigurationParameters) DeepCopy() *MonitoringConfigurationParameters { + if in == nil { + return nil + } + out := new(MonitoringConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputInitParameters) DeepCopyInto(out *OutputInitParameters) { + *out = *in + if in.DestinationSchema != nil { + in, out := &in.DestinationSchema, &out.DestinationSchema + *out = new(DestinationSchemaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisFirehoseOutput != nil { + in, out := &in.KinesisFirehoseOutput, &out.KinesisFirehoseOutput + *out = new(KinesisFirehoseOutputInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisStreamsOutput != nil { + in, out := &in.KinesisStreamsOutput, &out.KinesisStreamsOutput + *out = new(KinesisStreamsOutputInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LambdaOutput != nil { + in, out := &in.LambdaOutput, &out.LambdaOutput + *out = new(LambdaOutputInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputInitParameters. +func (in *OutputInitParameters) DeepCopy() *OutputInitParameters { + if in == nil { + return nil + } + out := new(OutputInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputObservation) DeepCopyInto(out *OutputObservation) { + *out = *in + if in.DestinationSchema != nil { + in, out := &in.DestinationSchema, &out.DestinationSchema + *out = new(DestinationSchemaObservation) + (*in).DeepCopyInto(*out) + } + if in.KinesisFirehoseOutput != nil { + in, out := &in.KinesisFirehoseOutput, &out.KinesisFirehoseOutput + *out = new(KinesisFirehoseOutputObservation) + (*in).DeepCopyInto(*out) + } + if in.KinesisStreamsOutput != nil { + in, out := &in.KinesisStreamsOutput, &out.KinesisStreamsOutput + *out = new(KinesisStreamsOutputObservation) + (*in).DeepCopyInto(*out) + } + if in.LambdaOutput != nil { + in, out := &in.LambdaOutput, &out.LambdaOutput + *out = new(LambdaOutputObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutputID != nil { + in, out := &in.OutputID, &out.OutputID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputObservation. +func (in *OutputObservation) DeepCopy() *OutputObservation { + if in == nil { + return nil + } + out := new(OutputObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputParameters) DeepCopyInto(out *OutputParameters) { + *out = *in + if in.DestinationSchema != nil { + in, out := &in.DestinationSchema, &out.DestinationSchema + *out = new(DestinationSchemaParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisFirehoseOutput != nil { + in, out := &in.KinesisFirehoseOutput, &out.KinesisFirehoseOutput + *out = new(KinesisFirehoseOutputParameters) + (*in).DeepCopyInto(*out) + } + if in.KinesisStreamsOutput != nil { + in, out := &in.KinesisStreamsOutput, &out.KinesisStreamsOutput + *out = new(KinesisStreamsOutputParameters) + (*in).DeepCopyInto(*out) + } + if in.LambdaOutput != nil { + in, out := &in.LambdaOutput, &out.LambdaOutput + *out = new(LambdaOutputParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputParameters. +func (in *OutputParameters) DeepCopy() *OutputParameters { + if in == nil { + return nil + } + out := new(OutputParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParallelismConfigurationInitParameters) DeepCopyInto(out *ParallelismConfigurationInitParameters) { + *out = *in + if in.AutoScalingEnabled != nil { + in, out := &in.AutoScalingEnabled, &out.AutoScalingEnabled + *out = new(bool) + **out = **in + } + if in.ConfigurationType != nil { + in, out := &in.ConfigurationType, &out.ConfigurationType + *out = new(string) + **out = **in + } + if in.Parallelism != nil { + in, out := &in.Parallelism, &out.Parallelism + *out = new(float64) + **out = **in + } + if in.ParallelismPerKpu != nil { + in, out := &in.ParallelismPerKpu, &out.ParallelismPerKpu + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelismConfigurationInitParameters. +func (in *ParallelismConfigurationInitParameters) DeepCopy() *ParallelismConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ParallelismConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParallelismConfigurationObservation) DeepCopyInto(out *ParallelismConfigurationObservation) { + *out = *in + if in.AutoScalingEnabled != nil { + in, out := &in.AutoScalingEnabled, &out.AutoScalingEnabled + *out = new(bool) + **out = **in + } + if in.ConfigurationType != nil { + in, out := &in.ConfigurationType, &out.ConfigurationType + *out = new(string) + **out = **in + } + if in.Parallelism != nil { + in, out := &in.Parallelism, &out.Parallelism + *out = new(float64) + **out = **in + } + if in.ParallelismPerKpu != nil { + in, out := &in.ParallelismPerKpu, &out.ParallelismPerKpu + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelismConfigurationObservation. +func (in *ParallelismConfigurationObservation) DeepCopy() *ParallelismConfigurationObservation { + if in == nil { + return nil + } + out := new(ParallelismConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParallelismConfigurationParameters) DeepCopyInto(out *ParallelismConfigurationParameters) { + *out = *in + if in.AutoScalingEnabled != nil { + in, out := &in.AutoScalingEnabled, &out.AutoScalingEnabled + *out = new(bool) + **out = **in + } + if in.ConfigurationType != nil { + in, out := &in.ConfigurationType, &out.ConfigurationType + *out = new(string) + **out = **in + } + if in.Parallelism != nil { + in, out := &in.Parallelism, &out.Parallelism + *out = new(float64) + **out = **in + } + if in.ParallelismPerKpu != nil { + in, out := &in.ParallelismPerKpu, &out.ParallelismPerKpu + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelismConfigurationParameters. +func (in *ParallelismConfigurationParameters) DeepCopy() *ParallelismConfigurationParameters { + if in == nil { + return nil + } + out := new(ParallelismConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropertyGroupInitParameters) DeepCopyInto(out *PropertyGroupInitParameters) { + *out = *in + if in.PropertyGroupID != nil { + in, out := &in.PropertyGroupID, &out.PropertyGroupID + *out = new(string) + **out = **in + } + if in.PropertyMap != nil { + in, out := &in.PropertyMap, &out.PropertyMap + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropertyGroupInitParameters. +func (in *PropertyGroupInitParameters) DeepCopy() *PropertyGroupInitParameters { + if in == nil { + return nil + } + out := new(PropertyGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropertyGroupObservation) DeepCopyInto(out *PropertyGroupObservation) { + *out = *in + if in.PropertyGroupID != nil { + in, out := &in.PropertyGroupID, &out.PropertyGroupID + *out = new(string) + **out = **in + } + if in.PropertyMap != nil { + in, out := &in.PropertyMap, &out.PropertyMap + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropertyGroupObservation. +func (in *PropertyGroupObservation) DeepCopy() *PropertyGroupObservation { + if in == nil { + return nil + } + out := new(PropertyGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropertyGroupParameters) DeepCopyInto(out *PropertyGroupParameters) { + *out = *in + if in.PropertyGroupID != nil { + in, out := &in.PropertyGroupID, &out.PropertyGroupID + *out = new(string) + **out = **in + } + if in.PropertyMap != nil { + in, out := &in.PropertyMap, &out.PropertyMap + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropertyGroupParameters. +func (in *PropertyGroupParameters) DeepCopy() *PropertyGroupParameters { + if in == nil { + return nil + } + out := new(PropertyGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordColumnInitParameters) DeepCopyInto(out *RecordColumnInitParameters) { + *out = *in + if in.Mapping != nil { + in, out := &in.Mapping, &out.Mapping + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SQLType != nil { + in, out := &in.SQLType, &out.SQLType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordColumnInitParameters. +func (in *RecordColumnInitParameters) DeepCopy() *RecordColumnInitParameters { + if in == nil { + return nil + } + out := new(RecordColumnInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordColumnObservation) DeepCopyInto(out *RecordColumnObservation) { + *out = *in + if in.Mapping != nil { + in, out := &in.Mapping, &out.Mapping + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SQLType != nil { + in, out := &in.SQLType, &out.SQLType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordColumnObservation. +func (in *RecordColumnObservation) DeepCopy() *RecordColumnObservation { + if in == nil { + return nil + } + out := new(RecordColumnObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordColumnParameters) DeepCopyInto(out *RecordColumnParameters) { + *out = *in + if in.Mapping != nil { + in, out := &in.Mapping, &out.Mapping + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SQLType != nil { + in, out := &in.SQLType, &out.SQLType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordColumnParameters. +func (in *RecordColumnParameters) DeepCopy() *RecordColumnParameters { + if in == nil { + return nil + } + out := new(RecordColumnParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordFormatInitParameters) DeepCopyInto(out *RecordFormatInitParameters) { + *out = *in + if in.MappingParameters != nil { + in, out := &in.MappingParameters, &out.MappingParameters + *out = new(MappingParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RecordFormatType != nil { + in, out := &in.RecordFormatType, &out.RecordFormatType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordFormatInitParameters. +func (in *RecordFormatInitParameters) DeepCopy() *RecordFormatInitParameters { + if in == nil { + return nil + } + out := new(RecordFormatInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordFormatMappingParametersInitParameters) DeepCopyInto(out *RecordFormatMappingParametersInitParameters) { + *out = *in + if in.CsvMappingParameters != nil { + in, out := &in.CsvMappingParameters, &out.CsvMappingParameters + *out = new(MappingParametersCsvMappingParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.JSONMappingParameters != nil { + in, out := &in.JSONMappingParameters, &out.JSONMappingParameters + *out = new(MappingParametersJSONMappingParametersInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordFormatMappingParametersInitParameters. +func (in *RecordFormatMappingParametersInitParameters) DeepCopy() *RecordFormatMappingParametersInitParameters { + if in == nil { + return nil + } + out := new(RecordFormatMappingParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordFormatMappingParametersObservation) DeepCopyInto(out *RecordFormatMappingParametersObservation) { + *out = *in + if in.CsvMappingParameters != nil { + in, out := &in.CsvMappingParameters, &out.CsvMappingParameters + *out = new(MappingParametersCsvMappingParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.JSONMappingParameters != nil { + in, out := &in.JSONMappingParameters, &out.JSONMappingParameters + *out = new(MappingParametersJSONMappingParametersObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordFormatMappingParametersObservation. +func (in *RecordFormatMappingParametersObservation) DeepCopy() *RecordFormatMappingParametersObservation { + if in == nil { + return nil + } + out := new(RecordFormatMappingParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordFormatMappingParametersParameters) DeepCopyInto(out *RecordFormatMappingParametersParameters) { + *out = *in + if in.CsvMappingParameters != nil { + in, out := &in.CsvMappingParameters, &out.CsvMappingParameters + *out = new(MappingParametersCsvMappingParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.JSONMappingParameters != nil { + in, out := &in.JSONMappingParameters, &out.JSONMappingParameters + *out = new(MappingParametersJSONMappingParametersParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordFormatMappingParametersParameters. +func (in *RecordFormatMappingParametersParameters) DeepCopy() *RecordFormatMappingParametersParameters { + if in == nil { + return nil + } + out := new(RecordFormatMappingParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordFormatObservation) DeepCopyInto(out *RecordFormatObservation) { + *out = *in + if in.MappingParameters != nil { + in, out := &in.MappingParameters, &out.MappingParameters + *out = new(MappingParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.RecordFormatType != nil { + in, out := &in.RecordFormatType, &out.RecordFormatType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordFormatObservation. +func (in *RecordFormatObservation) DeepCopy() *RecordFormatObservation { + if in == nil { + return nil + } + out := new(RecordFormatObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordFormatParameters) DeepCopyInto(out *RecordFormatParameters) { + *out = *in + if in.MappingParameters != nil { + in, out := &in.MappingParameters, &out.MappingParameters + *out = new(MappingParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.RecordFormatType != nil { + in, out := &in.RecordFormatType, &out.RecordFormatType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordFormatParameters. +func (in *RecordFormatParameters) DeepCopy() *RecordFormatParameters { + if in == nil { + return nil + } + out := new(RecordFormatParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceDataSourceInitParameters) DeepCopyInto(out *ReferenceDataSourceInitParameters) { + *out = *in + if in.ReferenceSchema != nil { + in, out := &in.ReferenceSchema, &out.ReferenceSchema + *out = new(ReferenceSchemaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3ReferenceDataSource != nil { + in, out := &in.S3ReferenceDataSource, &out.S3ReferenceDataSource + *out = new(S3ReferenceDataSourceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceDataSourceInitParameters. +func (in *ReferenceDataSourceInitParameters) DeepCopy() *ReferenceDataSourceInitParameters { + if in == nil { + return nil + } + out := new(ReferenceDataSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceDataSourceObservation) DeepCopyInto(out *ReferenceDataSourceObservation) { + *out = *in + if in.ReferenceID != nil { + in, out := &in.ReferenceID, &out.ReferenceID + *out = new(string) + **out = **in + } + if in.ReferenceSchema != nil { + in, out := &in.ReferenceSchema, &out.ReferenceSchema + *out = new(ReferenceSchemaObservation) + (*in).DeepCopyInto(*out) + } + if in.S3ReferenceDataSource != nil { + in, out := &in.S3ReferenceDataSource, &out.S3ReferenceDataSource + *out = new(S3ReferenceDataSourceObservation) + (*in).DeepCopyInto(*out) + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceDataSourceObservation. +func (in *ReferenceDataSourceObservation) DeepCopy() *ReferenceDataSourceObservation { + if in == nil { + return nil + } + out := new(ReferenceDataSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceDataSourceParameters) DeepCopyInto(out *ReferenceDataSourceParameters) { + *out = *in + if in.ReferenceSchema != nil { + in, out := &in.ReferenceSchema, &out.ReferenceSchema + *out = new(ReferenceSchemaParameters) + (*in).DeepCopyInto(*out) + } + if in.S3ReferenceDataSource != nil { + in, out := &in.S3ReferenceDataSource, &out.S3ReferenceDataSource + *out = new(S3ReferenceDataSourceParameters) + (*in).DeepCopyInto(*out) + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceDataSourceParameters. +func (in *ReferenceDataSourceParameters) DeepCopy() *ReferenceDataSourceParameters { + if in == nil { + return nil + } + out := new(ReferenceDataSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceSchemaInitParameters) DeepCopyInto(out *ReferenceSchemaInitParameters) { + *out = *in + if in.RecordColumn != nil { + in, out := &in.RecordColumn, &out.RecordColumn + *out = make([]ReferenceSchemaRecordColumnInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecordEncoding != nil { + in, out := &in.RecordEncoding, &out.RecordEncoding + *out = new(string) + **out = **in + } + if in.RecordFormat != nil { + in, out := &in.RecordFormat, &out.RecordFormat + *out = new(ReferenceSchemaRecordFormatInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceSchemaInitParameters. +func (in *ReferenceSchemaInitParameters) DeepCopy() *ReferenceSchemaInitParameters { + if in == nil { + return nil + } + out := new(ReferenceSchemaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceSchemaObservation) DeepCopyInto(out *ReferenceSchemaObservation) { + *out = *in + if in.RecordColumn != nil { + in, out := &in.RecordColumn, &out.RecordColumn + *out = make([]ReferenceSchemaRecordColumnObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecordEncoding != nil { + in, out := &in.RecordEncoding, &out.RecordEncoding + *out = new(string) + **out = **in + } + if in.RecordFormat != nil { + in, out := &in.RecordFormat, &out.RecordFormat + *out = new(ReferenceSchemaRecordFormatObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceSchemaObservation. +func (in *ReferenceSchemaObservation) DeepCopy() *ReferenceSchemaObservation { + if in == nil { + return nil + } + out := new(ReferenceSchemaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceSchemaParameters) DeepCopyInto(out *ReferenceSchemaParameters) { + *out = *in + if in.RecordColumn != nil { + in, out := &in.RecordColumn, &out.RecordColumn + *out = make([]ReferenceSchemaRecordColumnParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecordEncoding != nil { + in, out := &in.RecordEncoding, &out.RecordEncoding + *out = new(string) + **out = **in + } + if in.RecordFormat != nil { + in, out := &in.RecordFormat, &out.RecordFormat + *out = new(ReferenceSchemaRecordFormatParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceSchemaParameters. +func (in *ReferenceSchemaParameters) DeepCopy() *ReferenceSchemaParameters { + if in == nil { + return nil + } + out := new(ReferenceSchemaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceSchemaRecordColumnInitParameters) DeepCopyInto(out *ReferenceSchemaRecordColumnInitParameters) { + *out = *in + if in.Mapping != nil { + in, out := &in.Mapping, &out.Mapping + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SQLType != nil { + in, out := &in.SQLType, &out.SQLType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceSchemaRecordColumnInitParameters. +func (in *ReferenceSchemaRecordColumnInitParameters) DeepCopy() *ReferenceSchemaRecordColumnInitParameters { + if in == nil { + return nil + } + out := new(ReferenceSchemaRecordColumnInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceSchemaRecordColumnObservation) DeepCopyInto(out *ReferenceSchemaRecordColumnObservation) { + *out = *in + if in.Mapping != nil { + in, out := &in.Mapping, &out.Mapping + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SQLType != nil { + in, out := &in.SQLType, &out.SQLType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceSchemaRecordColumnObservation. +func (in *ReferenceSchemaRecordColumnObservation) DeepCopy() *ReferenceSchemaRecordColumnObservation { + if in == nil { + return nil + } + out := new(ReferenceSchemaRecordColumnObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceSchemaRecordColumnParameters) DeepCopyInto(out *ReferenceSchemaRecordColumnParameters) { + *out = *in + if in.Mapping != nil { + in, out := &in.Mapping, &out.Mapping + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SQLType != nil { + in, out := &in.SQLType, &out.SQLType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceSchemaRecordColumnParameters. +func (in *ReferenceSchemaRecordColumnParameters) DeepCopy() *ReferenceSchemaRecordColumnParameters { + if in == nil { + return nil + } + out := new(ReferenceSchemaRecordColumnParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceSchemaRecordFormatInitParameters) DeepCopyInto(out *ReferenceSchemaRecordFormatInitParameters) { + *out = *in + if in.MappingParameters != nil { + in, out := &in.MappingParameters, &out.MappingParameters + *out = new(RecordFormatMappingParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RecordFormatType != nil { + in, out := &in.RecordFormatType, &out.RecordFormatType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceSchemaRecordFormatInitParameters. +func (in *ReferenceSchemaRecordFormatInitParameters) DeepCopy() *ReferenceSchemaRecordFormatInitParameters { + if in == nil { + return nil + } + out := new(ReferenceSchemaRecordFormatInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceSchemaRecordFormatObservation) DeepCopyInto(out *ReferenceSchemaRecordFormatObservation) { + *out = *in + if in.MappingParameters != nil { + in, out := &in.MappingParameters, &out.MappingParameters + *out = new(RecordFormatMappingParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.RecordFormatType != nil { + in, out := &in.RecordFormatType, &out.RecordFormatType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceSchemaRecordFormatObservation. +func (in *ReferenceSchemaRecordFormatObservation) DeepCopy() *ReferenceSchemaRecordFormatObservation { + if in == nil { + return nil + } + out := new(ReferenceSchemaRecordFormatObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceSchemaRecordFormatParameters) DeepCopyInto(out *ReferenceSchemaRecordFormatParameters) { + *out = *in + if in.MappingParameters != nil { + in, out := &in.MappingParameters, &out.MappingParameters + *out = new(RecordFormatMappingParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.RecordFormatType != nil { + in, out := &in.RecordFormatType, &out.RecordFormatType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceSchemaRecordFormatParameters. +func (in *ReferenceSchemaRecordFormatParameters) DeepCopy() *ReferenceSchemaRecordFormatParameters { + if in == nil { + return nil + } + out := new(ReferenceSchemaRecordFormatParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunConfigurationInitParameters) DeepCopyInto(out *RunConfigurationInitParameters) { + *out = *in + if in.ApplicationRestoreConfiguration != nil { + in, out := &in.ApplicationRestoreConfiguration, &out.ApplicationRestoreConfiguration + *out = new(ApplicationRestoreConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FlinkRunConfiguration != nil { + in, out := &in.FlinkRunConfiguration, &out.FlinkRunConfiguration + *out = new(FlinkRunConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunConfigurationInitParameters. +func (in *RunConfigurationInitParameters) DeepCopy() *RunConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RunConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunConfigurationObservation) DeepCopyInto(out *RunConfigurationObservation) { + *out = *in + if in.ApplicationRestoreConfiguration != nil { + in, out := &in.ApplicationRestoreConfiguration, &out.ApplicationRestoreConfiguration + *out = new(ApplicationRestoreConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.FlinkRunConfiguration != nil { + in, out := &in.FlinkRunConfiguration, &out.FlinkRunConfiguration + *out = new(FlinkRunConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunConfigurationObservation. +func (in *RunConfigurationObservation) DeepCopy() *RunConfigurationObservation { + if in == nil { + return nil + } + out := new(RunConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunConfigurationParameters) DeepCopyInto(out *RunConfigurationParameters) { + *out = *in + if in.ApplicationRestoreConfiguration != nil { + in, out := &in.ApplicationRestoreConfiguration, &out.ApplicationRestoreConfiguration + *out = new(ApplicationRestoreConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.FlinkRunConfiguration != nil { + in, out := &in.FlinkRunConfiguration, &out.FlinkRunConfiguration + *out = new(FlinkRunConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunConfigurationParameters. +func (in *RunConfigurationParameters) DeepCopy() *RunConfigurationParameters { + if in == nil { + return nil + } + out := new(RunConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ContentLocationInitParameters) DeepCopyInto(out *S3ContentLocationInitParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FileKey != nil { + in, out := &in.FileKey, &out.FileKey + *out = new(string) + **out = **in + } + if in.FileKeyRef != nil { + in, out := &in.FileKeyRef, &out.FileKeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FileKeySelector != nil { + in, out := &in.FileKeySelector, &out.FileKeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ObjectVersion != nil { + in, out := &in.ObjectVersion, &out.ObjectVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ContentLocationInitParameters. +func (in *S3ContentLocationInitParameters) DeepCopy() *S3ContentLocationInitParameters { + if in == nil { + return nil + } + out := new(S3ContentLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ContentLocationObservation) DeepCopyInto(out *S3ContentLocationObservation) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.FileKey != nil { + in, out := &in.FileKey, &out.FileKey + *out = new(string) + **out = **in + } + if in.ObjectVersion != nil { + in, out := &in.ObjectVersion, &out.ObjectVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ContentLocationObservation. +func (in *S3ContentLocationObservation) DeepCopy() *S3ContentLocationObservation { + if in == nil { + return nil + } + out := new(S3ContentLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ContentLocationParameters) DeepCopyInto(out *S3ContentLocationParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FileKey != nil { + in, out := &in.FileKey, &out.FileKey + *out = new(string) + **out = **in + } + if in.FileKeyRef != nil { + in, out := &in.FileKeyRef, &out.FileKeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FileKeySelector != nil { + in, out := &in.FileKeySelector, &out.FileKeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ObjectVersion != nil { + in, out := &in.ObjectVersion, &out.ObjectVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ContentLocationParameters. +func (in *S3ContentLocationParameters) DeepCopy() *S3ContentLocationParameters { + if in == nil { + return nil + } + out := new(S3ContentLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ReferenceDataSourceInitParameters) DeepCopyInto(out *S3ReferenceDataSourceInitParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FileKey != nil { + in, out := &in.FileKey, &out.FileKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ReferenceDataSourceInitParameters. +func (in *S3ReferenceDataSourceInitParameters) DeepCopy() *S3ReferenceDataSourceInitParameters { + if in == nil { + return nil + } + out := new(S3ReferenceDataSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ReferenceDataSourceObservation) DeepCopyInto(out *S3ReferenceDataSourceObservation) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.FileKey != nil { + in, out := &in.FileKey, &out.FileKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ReferenceDataSourceObservation. +func (in *S3ReferenceDataSourceObservation) DeepCopy() *S3ReferenceDataSourceObservation { + if in == nil { + return nil + } + out := new(S3ReferenceDataSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ReferenceDataSourceParameters) DeepCopyInto(out *S3ReferenceDataSourceParameters) { + *out = *in + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FileKey != nil { + in, out := &in.FileKey, &out.FileKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ReferenceDataSourceParameters. +func (in *S3ReferenceDataSourceParameters) DeepCopy() *S3ReferenceDataSourceParameters { + if in == nil { + return nil + } + out := new(S3ReferenceDataSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLApplicationConfigurationInitParameters) DeepCopyInto(out *SQLApplicationConfigurationInitParameters) { + *out = *in + if in.Input != nil { + in, out := &in.Input, &out.Input + *out = new(InputInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Output != nil { + in, out := &in.Output, &out.Output + *out = make([]OutputInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReferenceDataSource != nil { + in, out := &in.ReferenceDataSource, &out.ReferenceDataSource + *out = new(ReferenceDataSourceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLApplicationConfigurationInitParameters. +func (in *SQLApplicationConfigurationInitParameters) DeepCopy() *SQLApplicationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SQLApplicationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLApplicationConfigurationObservation) DeepCopyInto(out *SQLApplicationConfigurationObservation) { + *out = *in + if in.Input != nil { + in, out := &in.Input, &out.Input + *out = new(InputObservation) + (*in).DeepCopyInto(*out) + } + if in.Output != nil { + in, out := &in.Output, &out.Output + *out = make([]OutputObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReferenceDataSource != nil { + in, out := &in.ReferenceDataSource, &out.ReferenceDataSource + *out = new(ReferenceDataSourceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLApplicationConfigurationObservation. +func (in *SQLApplicationConfigurationObservation) DeepCopy() *SQLApplicationConfigurationObservation { + if in == nil { + return nil + } + out := new(SQLApplicationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLApplicationConfigurationParameters) DeepCopyInto(out *SQLApplicationConfigurationParameters) { + *out = *in + if in.Input != nil { + in, out := &in.Input, &out.Input + *out = new(InputParameters) + (*in).DeepCopyInto(*out) + } + if in.Output != nil { + in, out := &in.Output, &out.Output + *out = make([]OutputParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReferenceDataSource != nil { + in, out := &in.ReferenceDataSource, &out.ReferenceDataSource + *out = new(ReferenceDataSourceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLApplicationConfigurationParameters. +func (in *SQLApplicationConfigurationParameters) DeepCopy() *SQLApplicationConfigurationParameters { + if in == nil { + return nil + } + out := new(SQLApplicationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigurationInitParameters) DeepCopyInto(out *VPCConfigurationInitParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigurationInitParameters. +func (in *VPCConfigurationInitParameters) DeepCopy() *VPCConfigurationInitParameters { + if in == nil { + return nil + } + out := new(VPCConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigurationObservation) DeepCopyInto(out *VPCConfigurationObservation) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCConfigurationID != nil { + in, out := &in.VPCConfigurationID, &out.VPCConfigurationID + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigurationObservation. +func (in *VPCConfigurationObservation) DeepCopy() *VPCConfigurationObservation { + if in == nil { + return nil + } + out := new(VPCConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigurationParameters) DeepCopyInto(out *VPCConfigurationParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigurationParameters. +func (in *VPCConfigurationParameters) DeepCopy() *VPCConfigurationParameters { + if in == nil { + return nil + } + out := new(VPCConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/kinesisanalyticsv2/v1beta2/zz_generated.managed.go b/apis/kinesisanalyticsv2/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..d1481109bb --- /dev/null +++ b/apis/kinesisanalyticsv2/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Application. +func (mg *Application) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Application. +func (mg *Application) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Application. +func (mg *Application) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Application. +func (mg *Application) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Application. +func (mg *Application) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Application. +func (mg *Application) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Application. +func (mg *Application) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Application. +func (mg *Application) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Application. +func (mg *Application) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Application. +func (mg *Application) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Application. +func (mg *Application) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Application. +func (mg *Application) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/kinesisanalyticsv2/v1beta2/zz_generated.managedlist.go b/apis/kinesisanalyticsv2/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..9c9817b1e1 --- /dev/null +++ b/apis/kinesisanalyticsv2/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ApplicationList. +func (l *ApplicationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/kinesisanalyticsv2/v1beta2/zz_generated.resolvers.go b/apis/kinesisanalyticsv2/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..042dc83d14 --- /dev/null +++ b/apis/kinesisanalyticsv2/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,435 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Application) ResolveReferences( // ResolveReferences of this Application. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.ApplicationConfiguration != nil { + if mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration != nil { + if mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent != nil { + if mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.BucketArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.BucketArnRef, + Selector: mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.BucketArn") + } + mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.BucketArnRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.ForProvider.ApplicationConfiguration != nil { + if mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration != nil { + if mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent != nil { + if mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Object", "ObjectList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.FileKey), + Extract: resource.ExtractParamPath("key", false), + Reference: mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.FileKeyRef, + Selector: mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.FileKeySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.FileKey") + } + mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.FileKey = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.FileKeyRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.ForProvider.ApplicationConfiguration != nil { + if mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration != nil { + if mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Input != nil { + if mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Input.KinesisStreamsInput != nil { + { + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Input.KinesisStreamsInput.ResourceArn), + Extract: common.TerraformID(), + Reference: mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Input.KinesisStreamsInput.ResourceArnRef, + Selector: mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Input.KinesisStreamsInput.ResourceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Input.KinesisStreamsInput.ResourceArn") + } + mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Input.KinesisStreamsInput.ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Input.KinesisStreamsInput.ResourceArnRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.ForProvider.ApplicationConfiguration != nil { + if mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration != nil { + for i5 := 0; i5 < len(mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output); i5++ { + if mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].KinesisFirehoseOutput != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].KinesisFirehoseOutput.ResourceArn), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].KinesisFirehoseOutput.ResourceArnRef, + Selector: mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].KinesisFirehoseOutput.ResourceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].KinesisFirehoseOutput.ResourceArn") + } + mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].KinesisFirehoseOutput.ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].KinesisFirehoseOutput.ResourceArnRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.ForProvider.ApplicationConfiguration != nil { + if mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration != nil { + for i5 := 0; i5 < len(mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output); i5++ { + if mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].LambdaOutput != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].LambdaOutput.ResourceArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].LambdaOutput.ResourceArnRef, + Selector: mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].LambdaOutput.ResourceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].LambdaOutput.ResourceArn") + } + mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].LambdaOutput.ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].LambdaOutput.ResourceArnRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.ForProvider.ApplicationConfiguration != nil { + if mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration != nil { + if mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.ReferenceDataSource != nil { + if mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.ReferenceDataSource.S3ReferenceDataSource != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.ReferenceDataSource.S3ReferenceDataSource.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.ReferenceDataSource.S3ReferenceDataSource.BucketArnRef, + Selector: mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.ReferenceDataSource.S3ReferenceDataSource.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.ReferenceDataSource.S3ReferenceDataSource.BucketArn") + } + mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.ReferenceDataSource.S3ReferenceDataSource.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ApplicationConfiguration.SQLApplicationConfiguration.ReferenceDataSource.S3ReferenceDataSource.BucketArnRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.ForProvider.CloudwatchLoggingOptions != nil { + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Stream", "StreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CloudwatchLoggingOptions.LogStreamArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.CloudwatchLoggingOptions.LogStreamArnRef, + Selector: mg.Spec.ForProvider.CloudwatchLoggingOptions.LogStreamArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CloudwatchLoggingOptions.LogStreamArn") + } + mg.Spec.ForProvider.CloudwatchLoggingOptions.LogStreamArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CloudwatchLoggingOptions.LogStreamArnRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceExecutionRole), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ServiceExecutionRoleRef, + Selector: mg.Spec.ForProvider.ServiceExecutionRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceExecutionRole") + } + mg.Spec.ForProvider.ServiceExecutionRole = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceExecutionRoleRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.ApplicationConfiguration != nil { + if mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration != nil { + if mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent != nil { + if mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.BucketArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.BucketArnRef, + Selector: mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.BucketArn") + } + mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.BucketArnRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.InitProvider.ApplicationConfiguration != nil { + if mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration != nil { + if mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent != nil { + if mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Object", "ObjectList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.FileKey), + Extract: resource.ExtractParamPath("key", false), + Reference: mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.FileKeyRef, + Selector: mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.FileKeySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.FileKey") + } + mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.FileKey = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ApplicationConfiguration.ApplicationCodeConfiguration.CodeContent.S3ContentLocation.FileKeyRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.InitProvider.ApplicationConfiguration != nil { + if mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration != nil { + if mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Input != nil { + if mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Input.KinesisStreamsInput != nil { + { + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Input.KinesisStreamsInput.ResourceArn), + Extract: common.TerraformID(), + Reference: mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Input.KinesisStreamsInput.ResourceArnRef, + Selector: mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Input.KinesisStreamsInput.ResourceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Input.KinesisStreamsInput.ResourceArn") + } + mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Input.KinesisStreamsInput.ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Input.KinesisStreamsInput.ResourceArnRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.InitProvider.ApplicationConfiguration != nil { + if mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration != nil { + for i5 := 0; i5 < len(mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output); i5++ { + if mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].KinesisFirehoseOutput != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].KinesisFirehoseOutput.ResourceArn), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].KinesisFirehoseOutput.ResourceArnRef, + Selector: mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].KinesisFirehoseOutput.ResourceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].KinesisFirehoseOutput.ResourceArn") + } + mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].KinesisFirehoseOutput.ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].KinesisFirehoseOutput.ResourceArnRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.InitProvider.ApplicationConfiguration != nil { + if mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration != nil { + for i5 := 0; i5 < len(mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output); i5++ { + if mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].LambdaOutput != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].LambdaOutput.ResourceArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].LambdaOutput.ResourceArnRef, + Selector: mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].LambdaOutput.ResourceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].LambdaOutput.ResourceArn") + } + mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].LambdaOutput.ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.Output[i5].LambdaOutput.ResourceArnRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.InitProvider.ApplicationConfiguration != nil { + if mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration != nil { + if mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.ReferenceDataSource != nil { + if mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.ReferenceDataSource.S3ReferenceDataSource != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.ReferenceDataSource.S3ReferenceDataSource.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.ReferenceDataSource.S3ReferenceDataSource.BucketArnRef, + Selector: mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.ReferenceDataSource.S3ReferenceDataSource.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.ReferenceDataSource.S3ReferenceDataSource.BucketArn") + } + mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.ReferenceDataSource.S3ReferenceDataSource.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ApplicationConfiguration.SQLApplicationConfiguration.ReferenceDataSource.S3ReferenceDataSource.BucketArnRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.InitProvider.CloudwatchLoggingOptions != nil { + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Stream", "StreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CloudwatchLoggingOptions.LogStreamArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.CloudwatchLoggingOptions.LogStreamArnRef, + Selector: mg.Spec.InitProvider.CloudwatchLoggingOptions.LogStreamArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CloudwatchLoggingOptions.LogStreamArn") + } + mg.Spec.InitProvider.CloudwatchLoggingOptions.LogStreamArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CloudwatchLoggingOptions.LogStreamArnRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceExecutionRole), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ServiceExecutionRoleRef, + Selector: mg.Spec.InitProvider.ServiceExecutionRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceExecutionRole") + } + mg.Spec.InitProvider.ServiceExecutionRole = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceExecutionRoleRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/kinesisanalyticsv2/v1beta2/zz_groupversion_info.go b/apis/kinesisanalyticsv2/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..db91092630 --- /dev/null +++ b/apis/kinesisanalyticsv2/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=kinesisanalyticsv2.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "kinesisanalyticsv2.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/lakeformation/v1beta1/zz_generated.conversion_hubs.go b/apis/lakeformation/v1beta1/zz_generated.conversion_hubs.go index 43c231ed8a..96c6d6eca9 100755 --- a/apis/lakeformation/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/lakeformation/v1beta1/zz_generated.conversion_hubs.go @@ -9,8 +9,5 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *DataLakeSettings) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Permissions) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Resource) Hub() {} diff --git a/apis/lakeformation/v1beta1/zz_generated.conversion_spokes.go b/apis/lakeformation/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..c8d421dbc1 --- /dev/null +++ b/apis/lakeformation/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Permissions to the hub type. +func (tr *Permissions) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Permissions type. +func (tr *Permissions) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/lakeformation/v1beta2/zz_generated.conversion_hubs.go b/apis/lakeformation/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..6e7b5587c1 --- /dev/null +++ b/apis/lakeformation/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Permissions) Hub() {} diff --git a/apis/lakeformation/v1beta2/zz_generated.deepcopy.go b/apis/lakeformation/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..acb0233813 --- /dev/null +++ b/apis/lakeformation/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1267 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCellsFilterInitParameters) DeepCopyInto(out *DataCellsFilterInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TableCatalogID != nil { + in, out := &in.TableCatalogID, &out.TableCatalogID + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCellsFilterInitParameters. +func (in *DataCellsFilterInitParameters) DeepCopy() *DataCellsFilterInitParameters { + if in == nil { + return nil + } + out := new(DataCellsFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCellsFilterObservation) DeepCopyInto(out *DataCellsFilterObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TableCatalogID != nil { + in, out := &in.TableCatalogID, &out.TableCatalogID + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCellsFilterObservation. +func (in *DataCellsFilterObservation) DeepCopy() *DataCellsFilterObservation { + if in == nil { + return nil + } + out := new(DataCellsFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCellsFilterParameters) DeepCopyInto(out *DataCellsFilterParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TableCatalogID != nil { + in, out := &in.TableCatalogID, &out.TableCatalogID + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCellsFilterParameters. +func (in *DataCellsFilterParameters) DeepCopy() *DataCellsFilterParameters { + if in == nil { + return nil + } + out := new(DataCellsFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataLocationInitParameters) DeepCopyInto(out *DataLocationInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataLocationInitParameters. +func (in *DataLocationInitParameters) DeepCopy() *DataLocationInitParameters { + if in == nil { + return nil + } + out := new(DataLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataLocationObservation) DeepCopyInto(out *DataLocationObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataLocationObservation. +func (in *DataLocationObservation) DeepCopy() *DataLocationObservation { + if in == nil { + return nil + } + out := new(DataLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataLocationParameters) DeepCopyInto(out *DataLocationParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataLocationParameters. +func (in *DataLocationParameters) DeepCopy() *DataLocationParameters { + if in == nil { + return nil + } + out := new(DataLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseInitParameters) DeepCopyInto(out *DatabaseInitParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseInitParameters. +func (in *DatabaseInitParameters) DeepCopy() *DatabaseInitParameters { + if in == nil { + return nil + } + out := new(DatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObservation) DeepCopyInto(out *DatabaseObservation) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObservation. +func (in *DatabaseObservation) DeepCopy() *DatabaseObservation { + if in == nil { + return nil + } + out := new(DatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseParameters) DeepCopyInto(out *DatabaseParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseParameters. +func (in *DatabaseParameters) DeepCopy() *DatabaseParameters { + if in == nil { + return nil + } + out := new(DatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressionInitParameters) DeepCopyInto(out *ExpressionInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressionInitParameters. +func (in *ExpressionInitParameters) DeepCopy() *ExpressionInitParameters { + if in == nil { + return nil + } + out := new(ExpressionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressionObservation) DeepCopyInto(out *ExpressionObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressionObservation. +func (in *ExpressionObservation) DeepCopy() *ExpressionObservation { + if in == nil { + return nil + } + out := new(ExpressionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressionParameters) DeepCopyInto(out *ExpressionParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressionParameters. +func (in *ExpressionParameters) DeepCopy() *ExpressionParameters { + if in == nil { + return nil + } + out := new(ExpressionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LfTagInitParameters) DeepCopyInto(out *LfTagInitParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LfTagInitParameters. +func (in *LfTagInitParameters) DeepCopy() *LfTagInitParameters { + if in == nil { + return nil + } + out := new(LfTagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LfTagObservation) DeepCopyInto(out *LfTagObservation) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LfTagObservation. +func (in *LfTagObservation) DeepCopy() *LfTagObservation { + if in == nil { + return nil + } + out := new(LfTagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LfTagParameters) DeepCopyInto(out *LfTagParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LfTagParameters. +func (in *LfTagParameters) DeepCopy() *LfTagParameters { + if in == nil { + return nil + } + out := new(LfTagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LfTagPolicyInitParameters) DeepCopyInto(out *LfTagPolicyInitParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = make([]ExpressionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LfTagPolicyInitParameters. +func (in *LfTagPolicyInitParameters) DeepCopy() *LfTagPolicyInitParameters { + if in == nil { + return nil + } + out := new(LfTagPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LfTagPolicyObservation) DeepCopyInto(out *LfTagPolicyObservation) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = make([]ExpressionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LfTagPolicyObservation. +func (in *LfTagPolicyObservation) DeepCopy() *LfTagPolicyObservation { + if in == nil { + return nil + } + out := new(LfTagPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LfTagPolicyParameters) DeepCopyInto(out *LfTagPolicyParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = make([]ExpressionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LfTagPolicyParameters. +func (in *LfTagPolicyParameters) DeepCopy() *LfTagPolicyParameters { + if in == nil { + return nil + } + out := new(LfTagPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Permissions) DeepCopyInto(out *Permissions) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Permissions. +func (in *Permissions) DeepCopy() *Permissions { + if in == nil { + return nil + } + out := new(Permissions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Permissions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsInitParameters) DeepCopyInto(out *PermissionsInitParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.CatalogResource != nil { + in, out := &in.CatalogResource, &out.CatalogResource + *out = new(bool) + **out = **in + } + if in.DataCellsFilter != nil { + in, out := &in.DataCellsFilter, &out.DataCellsFilter + *out = new(DataCellsFilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DataLocation != nil { + in, out := &in.DataLocation, &out.DataLocation + *out = new(DataLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(DatabaseInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LfTag != nil { + in, out := &in.LfTag, &out.LfTag + *out = new(LfTagInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LfTagPolicy != nil { + in, out := &in.LfTagPolicy, &out.LfTagPolicy + *out = new(LfTagPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PermissionsWithGrantOption != nil { + in, out := &in.PermissionsWithGrantOption, &out.PermissionsWithGrantOption + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Principal != nil { + in, out := &in.Principal, &out.Principal + *out = new(string) + **out = **in + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(TableInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TableWithColumns != nil { + in, out := &in.TableWithColumns, &out.TableWithColumns + *out = new(TableWithColumnsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsInitParameters. +func (in *PermissionsInitParameters) DeepCopy() *PermissionsInitParameters { + if in == nil { + return nil + } + out := new(PermissionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsList) DeepCopyInto(out *PermissionsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Permissions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsList. +func (in *PermissionsList) DeepCopy() *PermissionsList { + if in == nil { + return nil + } + out := new(PermissionsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PermissionsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsObservation) DeepCopyInto(out *PermissionsObservation) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.CatalogResource != nil { + in, out := &in.CatalogResource, &out.CatalogResource + *out = new(bool) + **out = **in + } + if in.DataCellsFilter != nil { + in, out := &in.DataCellsFilter, &out.DataCellsFilter + *out = new(DataCellsFilterObservation) + (*in).DeepCopyInto(*out) + } + if in.DataLocation != nil { + in, out := &in.DataLocation, &out.DataLocation + *out = new(DataLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(DatabaseObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LfTag != nil { + in, out := &in.LfTag, &out.LfTag + *out = new(LfTagObservation) + (*in).DeepCopyInto(*out) + } + if in.LfTagPolicy != nil { + in, out := &in.LfTagPolicy, &out.LfTagPolicy + *out = new(LfTagPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PermissionsWithGrantOption != nil { + in, out := &in.PermissionsWithGrantOption, &out.PermissionsWithGrantOption + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Principal != nil { + in, out := &in.Principal, &out.Principal + *out = new(string) + **out = **in + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(TableObservation) + (*in).DeepCopyInto(*out) + } + if in.TableWithColumns != nil { + in, out := &in.TableWithColumns, &out.TableWithColumns + *out = new(TableWithColumnsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsObservation. +func (in *PermissionsObservation) DeepCopy() *PermissionsObservation { + if in == nil { + return nil + } + out := new(PermissionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsParameters) DeepCopyInto(out *PermissionsParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.CatalogResource != nil { + in, out := &in.CatalogResource, &out.CatalogResource + *out = new(bool) + **out = **in + } + if in.DataCellsFilter != nil { + in, out := &in.DataCellsFilter, &out.DataCellsFilter + *out = new(DataCellsFilterParameters) + (*in).DeepCopyInto(*out) + } + if in.DataLocation != nil { + in, out := &in.DataLocation, &out.DataLocation + *out = new(DataLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(DatabaseParameters) + (*in).DeepCopyInto(*out) + } + if in.LfTag != nil { + in, out := &in.LfTag, &out.LfTag + *out = new(LfTagParameters) + (*in).DeepCopyInto(*out) + } + if in.LfTagPolicy != nil { + in, out := &in.LfTagPolicy, &out.LfTagPolicy + *out = new(LfTagPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PermissionsWithGrantOption != nil { + in, out := &in.PermissionsWithGrantOption, &out.PermissionsWithGrantOption + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Principal != nil { + in, out := &in.Principal, &out.Principal + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(TableParameters) + (*in).DeepCopyInto(*out) + } + if in.TableWithColumns != nil { + in, out := &in.TableWithColumns, &out.TableWithColumns + *out = new(TableWithColumnsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsParameters. +func (in *PermissionsParameters) DeepCopy() *PermissionsParameters { + if in == nil { + return nil + } + out := new(PermissionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsSpec) DeepCopyInto(out *PermissionsSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsSpec. +func (in *PermissionsSpec) DeepCopy() *PermissionsSpec { + if in == nil { + return nil + } + out := new(PermissionsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsStatus) DeepCopyInto(out *PermissionsStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsStatus. +func (in *PermissionsStatus) DeepCopy() *PermissionsStatus { + if in == nil { + return nil + } + out := new(PermissionsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableInitParameters) DeepCopyInto(out *TableInitParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Wildcard != nil { + in, out := &in.Wildcard, &out.Wildcard + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableInitParameters. +func (in *TableInitParameters) DeepCopy() *TableInitParameters { + if in == nil { + return nil + } + out := new(TableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableObservation) DeepCopyInto(out *TableObservation) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Wildcard != nil { + in, out := &in.Wildcard, &out.Wildcard + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableObservation. +func (in *TableObservation) DeepCopy() *TableObservation { + if in == nil { + return nil + } + out := new(TableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableParameters) DeepCopyInto(out *TableParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Wildcard != nil { + in, out := &in.Wildcard, &out.Wildcard + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableParameters. +func (in *TableParameters) DeepCopy() *TableParameters { + if in == nil { + return nil + } + out := new(TableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableWithColumnsInitParameters) DeepCopyInto(out *TableWithColumnsInitParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.ColumnNames != nil { + in, out := &in.ColumnNames, &out.ColumnNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.ExcludedColumnNames != nil { + in, out := &in.ExcludedColumnNames, &out.ExcludedColumnNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Wildcard != nil { + in, out := &in.Wildcard, &out.Wildcard + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableWithColumnsInitParameters. +func (in *TableWithColumnsInitParameters) DeepCopy() *TableWithColumnsInitParameters { + if in == nil { + return nil + } + out := new(TableWithColumnsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableWithColumnsObservation) DeepCopyInto(out *TableWithColumnsObservation) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.ColumnNames != nil { + in, out := &in.ColumnNames, &out.ColumnNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.ExcludedColumnNames != nil { + in, out := &in.ExcludedColumnNames, &out.ExcludedColumnNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Wildcard != nil { + in, out := &in.Wildcard, &out.Wildcard + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableWithColumnsObservation. +func (in *TableWithColumnsObservation) DeepCopy() *TableWithColumnsObservation { + if in == nil { + return nil + } + out := new(TableWithColumnsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableWithColumnsParameters) DeepCopyInto(out *TableWithColumnsParameters) { + *out = *in + if in.CatalogID != nil { + in, out := &in.CatalogID, &out.CatalogID + *out = new(string) + **out = **in + } + if in.ColumnNames != nil { + in, out := &in.ColumnNames, &out.ColumnNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.ExcludedColumnNames != nil { + in, out := &in.ExcludedColumnNames, &out.ExcludedColumnNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Wildcard != nil { + in, out := &in.Wildcard, &out.Wildcard + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableWithColumnsParameters. +func (in *TableWithColumnsParameters) DeepCopy() *TableWithColumnsParameters { + if in == nil { + return nil + } + out := new(TableWithColumnsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/lakeformation/v1beta2/zz_generated.managed.go b/apis/lakeformation/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..706a2fb7ab --- /dev/null +++ b/apis/lakeformation/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Permissions. +func (mg *Permissions) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Permissions. +func (mg *Permissions) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Permissions. +func (mg *Permissions) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Permissions. +func (mg *Permissions) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Permissions. +func (mg *Permissions) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Permissions. +func (mg *Permissions) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Permissions. +func (mg *Permissions) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Permissions. +func (mg *Permissions) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Permissions. +func (mg *Permissions) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Permissions. +func (mg *Permissions) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Permissions. +func (mg *Permissions) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Permissions. +func (mg *Permissions) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/lakeformation/v1beta2/zz_generated.managedlist.go b/apis/lakeformation/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..e38b477964 --- /dev/null +++ b/apis/lakeformation/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this PermissionsList. +func (l *PermissionsList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/lakeformation/v1beta2/zz_generated.resolvers.go b/apis/lakeformation/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..a24f1c6954 --- /dev/null +++ b/apis/lakeformation/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,157 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Permissions) ResolveReferences( // ResolveReferences of this Permissions. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.DataLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("lakeformation.aws.upbound.io", "v1beta1", "Resource", "ResourceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataLocation.Arn), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.ForProvider.DataLocation.ArnRef, + Selector: mg.Spec.ForProvider.DataLocation.ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataLocation.Arn") + } + mg.Spec.ForProvider.DataLocation.Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataLocation.ArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.Database != nil { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "CatalogDatabase", "CatalogDatabaseList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Database.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Database.NameRef, + Selector: mg.Spec.ForProvider.Database.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Database.Name") + } + mg.Spec.ForProvider.Database.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Database.NameRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.TableWithColumns != nil { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "CatalogTable", "CatalogTableList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TableWithColumns.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.TableWithColumns.NameRef, + Selector: mg.Spec.ForProvider.TableWithColumns.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TableWithColumns.Name") + } + mg.Spec.ForProvider.TableWithColumns.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TableWithColumns.NameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.DataLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("lakeformation.aws.upbound.io", "v1beta1", "Resource", "ResourceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DataLocation.Arn), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.InitProvider.DataLocation.ArnRef, + Selector: mg.Spec.InitProvider.DataLocation.ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DataLocation.Arn") + } + mg.Spec.InitProvider.DataLocation.Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DataLocation.ArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Database != nil { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "CatalogDatabase", "CatalogDatabaseList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Database.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Database.NameRef, + Selector: mg.Spec.InitProvider.Database.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Database.Name") + } + mg.Spec.InitProvider.Database.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Database.NameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.TableWithColumns != nil { + { + m, l, err = apisresolver.GetManagedResource("glue.aws.upbound.io", "v1beta2", "CatalogTable", "CatalogTableList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TableWithColumns.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.TableWithColumns.NameRef, + Selector: mg.Spec.InitProvider.TableWithColumns.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TableWithColumns.Name") + } + mg.Spec.InitProvider.TableWithColumns.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TableWithColumns.NameRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/lakeformation/v1beta2/zz_groupversion_info.go b/apis/lakeformation/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..f93563b9f0 --- /dev/null +++ b/apis/lakeformation/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=lakeformation.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "lakeformation.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/lakeformation/v1beta2/zz_permissions_terraformed.go b/apis/lakeformation/v1beta2/zz_permissions_terraformed.go new file mode 100755 index 0000000000..2a125bf081 --- /dev/null +++ b/apis/lakeformation/v1beta2/zz_permissions_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Permissions +func (mg *Permissions) GetTerraformResourceType() string { + return "aws_lakeformation_permissions" +} + +// GetConnectionDetailsMapping for this Permissions +func (tr *Permissions) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Permissions +func (tr *Permissions) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Permissions +func (tr *Permissions) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Permissions +func (tr *Permissions) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Permissions +func (tr *Permissions) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Permissions +func (tr *Permissions) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Permissions +func (tr *Permissions) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Permissions +func (tr *Permissions) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Permissions using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Permissions) LateInitialize(attrs []byte) (bool, error) { + params := &PermissionsParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Permissions) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/lakeformation/v1beta2/zz_permissions_types.go b/apis/lakeformation/v1beta2/zz_permissions_types.go new file mode 100755 index 0000000000..5e739b5663 --- /dev/null +++ b/apis/lakeformation/v1beta2/zz_permissions_types.go @@ -0,0 +1,612 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DataCellsFilterInitParameters struct { + + // The name of the database. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The name of the data cells filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Data Catalog. + TableCatalogID *string `json:"tableCatalogId,omitempty" tf:"table_catalog_id,omitempty"` + + // The name of the table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type DataCellsFilterObservation struct { + + // The name of the database. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The name of the data cells filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Data Catalog. + TableCatalogID *string `json:"tableCatalogId,omitempty" tf:"table_catalog_id,omitempty"` + + // The name of the table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type DataCellsFilterParameters struct { + + // The name of the database. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // The name of the data cells filter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The ID of the Data Catalog. + // +kubebuilder:validation:Optional + TableCatalogID *string `json:"tableCatalogId" tf:"table_catalog_id,omitempty"` + + // The name of the table. + // +kubebuilder:validation:Optional + TableName *string `json:"tableName" tf:"table_name,omitempty"` +} + +type DataLocationInitParameters struct { + + // – Amazon Resource Name (ARN) that uniquely identifies the data location resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lakeformation/v1beta1.Resource + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a Resource in lakeformation to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a Resource in lakeformation to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // Identifier for the Data Catalog where the location is registered with Lake Formation. By default, it is the account ID of the caller. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` +} + +type DataLocationObservation struct { + + // – Amazon Resource Name (ARN) that uniquely identifies the data location resource. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Identifier for the Data Catalog where the location is registered with Lake Formation. By default, it is the account ID of the caller. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` +} + +type DataLocationParameters struct { + + // – Amazon Resource Name (ARN) that uniquely identifies the data location resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lakeformation/v1beta1.Resource + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + // +kubebuilder:validation:Optional + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a Resource in lakeformation to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a Resource in lakeformation to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // Identifier for the Data Catalog where the location is registered with Lake Formation. By default, it is the account ID of the caller. + // +kubebuilder:validation:Optional + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` +} + +type DatabaseInitParameters struct { + + // Identifier for the Data Catalog. By default, it is the account ID of the caller. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // – Name of the database resource. Unique to the Data Catalog. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.CatalogDatabase + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a CatalogDatabase in glue to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a CatalogDatabase in glue to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` +} + +type DatabaseObservation struct { + + // Identifier for the Data Catalog. By default, it is the account ID of the caller. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // – Name of the database resource. Unique to the Data Catalog. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type DatabaseParameters struct { + + // Identifier for the Data Catalog. By default, it is the account ID of the caller. + // +kubebuilder:validation:Optional + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // – Name of the database resource. Unique to the Data Catalog. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.CatalogDatabase + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a CatalogDatabase in glue to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a CatalogDatabase in glue to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` +} + +type ExpressionInitParameters struct { + + // name of an LF-Tag. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A list of possible values of an LF-Tag. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ExpressionObservation struct { + + // name of an LF-Tag. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A list of possible values of an LF-Tag. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ExpressionParameters struct { + + // name of an LF-Tag. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // A list of possible values of an LF-Tag. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type LfTagInitParameters struct { + + // Identifier for the Data Catalog. By default, it is the account ID of the caller. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // name for the tag. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A list of possible values an attribute can take. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type LfTagObservation struct { + + // Identifier for the Data Catalog. By default, it is the account ID of the caller. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // name for the tag. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A list of possible values an attribute can take. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type LfTagParameters struct { + + // Identifier for the Data Catalog. By default, it is the account ID of the caller. + // +kubebuilder:validation:Optional + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // name for the tag. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // A list of possible values an attribute can take. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type LfTagPolicyInitParameters struct { + + // Identifier for the Data Catalog. By default, it is the account ID of the caller. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // A list of tag conditions that apply to the resource's tag policy. Configuration block for tag conditions that apply to the policy. See expression below. + Expression []ExpressionInitParameters `json:"expression,omitempty" tf:"expression,omitempty"` + + // – The resource type for which the tag policy applies. Valid values are DATABASE and TABLE. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type LfTagPolicyObservation struct { + + // Identifier for the Data Catalog. By default, it is the account ID of the caller. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // A list of tag conditions that apply to the resource's tag policy. Configuration block for tag conditions that apply to the policy. See expression below. + Expression []ExpressionObservation `json:"expression,omitempty" tf:"expression,omitempty"` + + // – The resource type for which the tag policy applies. Valid values are DATABASE and TABLE. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` +} + +type LfTagPolicyParameters struct { + + // Identifier for the Data Catalog. By default, it is the account ID of the caller. + // +kubebuilder:validation:Optional + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // A list of tag conditions that apply to the resource's tag policy. Configuration block for tag conditions that apply to the policy. See expression below. + // +kubebuilder:validation:Optional + Expression []ExpressionParameters `json:"expression" tf:"expression,omitempty"` + + // – The resource type for which the tag policy applies. Valid values are DATABASE and TABLE. + // +kubebuilder:validation:Optional + ResourceType *string `json:"resourceType" tf:"resource_type,omitempty"` +} + +type PermissionsInitParameters struct { + + // – Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // Whether the permissions are to be granted for the Data Catalog. Defaults to false. + CatalogResource *bool `json:"catalogResource,omitempty" tf:"catalog_resource,omitempty"` + + // Configuration block for a data cells filter resource. Detailed below. + DataCellsFilter *DataCellsFilterInitParameters `json:"dataCellsFilter,omitempty" tf:"data_cells_filter,omitempty"` + + // Configuration block for a data location resource. Detailed below. + DataLocation *DataLocationInitParameters `json:"dataLocation,omitempty" tf:"data_location,omitempty"` + + // Configuration block for a database resource. Detailed below. + Database *DatabaseInitParameters `json:"database,omitempty" tf:"database,omitempty"` + + // Configuration block for an LF-tag resource. Detailed below. + LfTag *LfTagInitParameters `json:"lfTag,omitempty" tf:"lf_tag,omitempty"` + + // Configuration block for an LF-tag policy resource. Detailed below. + LfTagPolicy *LfTagPolicyInitParameters `json:"lfTagPolicy,omitempty" tf:"lf_tag_policy,omitempty"` + + // – List of permissions granted to the principal. Valid values may include ALL, ALTER, ASSOCIATE, CREATE_DATABASE, CREATE_TABLE, DATA_LOCATION_ACCESS, DELETE, DESCRIBE, DROP, INSERT, and SELECT. For details on each permission, see Lake Formation Permissions Reference. + Permissions []*string `json:"permissions,omitempty" tf:"permissions,omitempty"` + + // Subset of permissions which the principal can pass. + PermissionsWithGrantOption []*string `json:"permissionsWithGrantOption,omitempty" tf:"permissions_with_grant_option,omitempty"` + + // account permissions. For more information, see Lake Formation Permissions Reference. + Principal *string `json:"principal,omitempty" tf:"principal,omitempty"` + + // Configuration block for a table resource. Detailed below. + Table *TableInitParameters `json:"table,omitempty" tf:"table,omitempty"` + + // Configuration block for a table with columns resource. Detailed below. + TableWithColumns *TableWithColumnsInitParameters `json:"tableWithColumns,omitempty" tf:"table_with_columns,omitempty"` +} + +type PermissionsObservation struct { + + // – Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // Whether the permissions are to be granted for the Data Catalog. Defaults to false. + CatalogResource *bool `json:"catalogResource,omitempty" tf:"catalog_resource,omitempty"` + + // Configuration block for a data cells filter resource. Detailed below. + DataCellsFilter *DataCellsFilterObservation `json:"dataCellsFilter,omitempty" tf:"data_cells_filter,omitempty"` + + // Configuration block for a data location resource. Detailed below. + DataLocation *DataLocationObservation `json:"dataLocation,omitempty" tf:"data_location,omitempty"` + + // Configuration block for a database resource. Detailed below. + Database *DatabaseObservation `json:"database,omitempty" tf:"database,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration block for an LF-tag resource. Detailed below. + LfTag *LfTagObservation `json:"lfTag,omitempty" tf:"lf_tag,omitempty"` + + // Configuration block for an LF-tag policy resource. Detailed below. + LfTagPolicy *LfTagPolicyObservation `json:"lfTagPolicy,omitempty" tf:"lf_tag_policy,omitempty"` + + // – List of permissions granted to the principal. Valid values may include ALL, ALTER, ASSOCIATE, CREATE_DATABASE, CREATE_TABLE, DATA_LOCATION_ACCESS, DELETE, DESCRIBE, DROP, INSERT, and SELECT. For details on each permission, see Lake Formation Permissions Reference. + Permissions []*string `json:"permissions,omitempty" tf:"permissions,omitempty"` + + // Subset of permissions which the principal can pass. + PermissionsWithGrantOption []*string `json:"permissionsWithGrantOption,omitempty" tf:"permissions_with_grant_option,omitempty"` + + // account permissions. For more information, see Lake Formation Permissions Reference. + Principal *string `json:"principal,omitempty" tf:"principal,omitempty"` + + // Configuration block for a table resource. Detailed below. + Table *TableObservation `json:"table,omitempty" tf:"table,omitempty"` + + // Configuration block for a table with columns resource. Detailed below. + TableWithColumns *TableWithColumnsObservation `json:"tableWithColumns,omitempty" tf:"table_with_columns,omitempty"` +} + +type PermissionsParameters struct { + + // – Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. + // +kubebuilder:validation:Optional + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // Whether the permissions are to be granted for the Data Catalog. Defaults to false. + // +kubebuilder:validation:Optional + CatalogResource *bool `json:"catalogResource,omitempty" tf:"catalog_resource,omitempty"` + + // Configuration block for a data cells filter resource. Detailed below. + // +kubebuilder:validation:Optional + DataCellsFilter *DataCellsFilterParameters `json:"dataCellsFilter,omitempty" tf:"data_cells_filter,omitempty"` + + // Configuration block for a data location resource. Detailed below. + // +kubebuilder:validation:Optional + DataLocation *DataLocationParameters `json:"dataLocation,omitempty" tf:"data_location,omitempty"` + + // Configuration block for a database resource. Detailed below. + // +kubebuilder:validation:Optional + Database *DatabaseParameters `json:"database,omitempty" tf:"database,omitempty"` + + // Configuration block for an LF-tag resource. Detailed below. + // +kubebuilder:validation:Optional + LfTag *LfTagParameters `json:"lfTag,omitempty" tf:"lf_tag,omitempty"` + + // Configuration block for an LF-tag policy resource. Detailed below. + // +kubebuilder:validation:Optional + LfTagPolicy *LfTagPolicyParameters `json:"lfTagPolicy,omitempty" tf:"lf_tag_policy,omitempty"` + + // – List of permissions granted to the principal. Valid values may include ALL, ALTER, ASSOCIATE, CREATE_DATABASE, CREATE_TABLE, DATA_LOCATION_ACCESS, DELETE, DESCRIBE, DROP, INSERT, and SELECT. For details on each permission, see Lake Formation Permissions Reference. + // +kubebuilder:validation:Optional + Permissions []*string `json:"permissions,omitempty" tf:"permissions,omitempty"` + + // Subset of permissions which the principal can pass. + // +kubebuilder:validation:Optional + PermissionsWithGrantOption []*string `json:"permissionsWithGrantOption,omitempty" tf:"permissions_with_grant_option,omitempty"` + + // account permissions. For more information, see Lake Formation Permissions Reference. + // +kubebuilder:validation:Optional + Principal *string `json:"principal,omitempty" tf:"principal,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration block for a table resource. Detailed below. + // +kubebuilder:validation:Optional + Table *TableParameters `json:"table,omitempty" tf:"table,omitempty"` + + // Configuration block for a table with columns resource. Detailed below. + // +kubebuilder:validation:Optional + TableWithColumns *TableWithColumnsParameters `json:"tableWithColumns,omitempty" tf:"table_with_columns,omitempty"` +} + +type TableInitParameters struct { + + // Identifier for the Data Catalog. By default, it is the account ID of the caller. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // – Name of the database for the table. Unique to a Data Catalog. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Name of the table. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether to use a wildcard representing every table under a database. Defaults to false. + Wildcard *bool `json:"wildcard,omitempty" tf:"wildcard,omitempty"` +} + +type TableObservation struct { + + // Identifier for the Data Catalog. By default, it is the account ID of the caller. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // – Name of the database for the table. Unique to a Data Catalog. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Name of the table. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether to use a wildcard representing every table under a database. Defaults to false. + Wildcard *bool `json:"wildcard,omitempty" tf:"wildcard,omitempty"` +} + +type TableParameters struct { + + // Identifier for the Data Catalog. By default, it is the account ID of the caller. + // +kubebuilder:validation:Optional + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // – Name of the database for the table. Unique to a Data Catalog. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // Name of the table. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether to use a wildcard representing every table under a database. Defaults to false. + // +kubebuilder:validation:Optional + Wildcard *bool `json:"wildcard,omitempty" tf:"wildcard,omitempty"` +} + +type TableWithColumnsInitParameters struct { + + // Identifier for the Data Catalog. By default, it is the account ID of the caller. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // Set of column names for the table. + // +listType=set + ColumnNames []*string `json:"columnNames,omitempty" tf:"column_names,omitempty"` + + // – Name of the database for the table with columns resource. Unique to the Data Catalog. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Set of column names for the table to exclude. + // +listType=set + ExcludedColumnNames []*string `json:"excludedColumnNames,omitempty" tf:"excluded_column_names,omitempty"` + + // – Name of the table resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.CatalogTable + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a CatalogTable in glue to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a CatalogTable in glue to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // Whether to use a column wildcard. + Wildcard *bool `json:"wildcard,omitempty" tf:"wildcard,omitempty"` +} + +type TableWithColumnsObservation struct { + + // Identifier for the Data Catalog. By default, it is the account ID of the caller. + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // Set of column names for the table. + // +listType=set + ColumnNames []*string `json:"columnNames,omitempty" tf:"column_names,omitempty"` + + // – Name of the database for the table with columns resource. Unique to the Data Catalog. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Set of column names for the table to exclude. + // +listType=set + ExcludedColumnNames []*string `json:"excludedColumnNames,omitempty" tf:"excluded_column_names,omitempty"` + + // – Name of the table resource. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether to use a column wildcard. + Wildcard *bool `json:"wildcard,omitempty" tf:"wildcard,omitempty"` +} + +type TableWithColumnsParameters struct { + + // Identifier for the Data Catalog. By default, it is the account ID of the caller. + // +kubebuilder:validation:Optional + CatalogID *string `json:"catalogId,omitempty" tf:"catalog_id,omitempty"` + + // Set of column names for the table. + // +kubebuilder:validation:Optional + // +listType=set + ColumnNames []*string `json:"columnNames,omitempty" tf:"column_names,omitempty"` + + // – Name of the database for the table with columns resource. Unique to the Data Catalog. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // Set of column names for the table to exclude. + // +kubebuilder:validation:Optional + // +listType=set + ExcludedColumnNames []*string `json:"excludedColumnNames,omitempty" tf:"excluded_column_names,omitempty"` + + // – Name of the table resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/glue/v1beta2.CatalogTable + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a CatalogTable in glue to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a CatalogTable in glue to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // Whether to use a column wildcard. + // +kubebuilder:validation:Optional + Wildcard *bool `json:"wildcard,omitempty" tf:"wildcard,omitempty"` +} + +// PermissionsSpec defines the desired state of Permissions +type PermissionsSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PermissionsParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PermissionsInitParameters `json:"initProvider,omitempty"` +} + +// PermissionsStatus defines the observed state of Permissions. +type PermissionsStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PermissionsObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Permissions is the Schema for the Permissionss API. Grants permissions to the principal to access metadata in the Data Catalog and data organized in underlying data storage such as Amazon S3. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Permissions struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.permissions) || (has(self.initProvider) && has(self.initProvider.permissions))",message="spec.forProvider.permissions is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.principal) || (has(self.initProvider) && has(self.initProvider.principal))",message="spec.forProvider.principal is a required parameter" + Spec PermissionsSpec `json:"spec"` + Status PermissionsStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PermissionsList contains a list of Permissionss +type PermissionsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Permissions `json:"items"` +} + +// Repository type metadata. +var ( + Permissions_Kind = "Permissions" + Permissions_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Permissions_Kind}.String() + Permissions_KindAPIVersion = Permissions_Kind + "." + CRDGroupVersion.String() + Permissions_GroupVersionKind = CRDGroupVersion.WithKind(Permissions_Kind) +) + +func init() { + SchemeBuilder.Register(&Permissions{}, &PermissionsList{}) +} diff --git a/apis/lambda/v1beta1/zz_generated.conversion_hubs.go b/apis/lambda/v1beta1/zz_generated.conversion_hubs.go index d221f27860..17bbbbb2bb 100755 --- a/apis/lambda/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/lambda/v1beta1/zz_generated.conversion_hubs.go @@ -6,24 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Alias) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *CodeSigningConfig) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *EventSourceMapping) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Function) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *FunctionEventInvokeConfig) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *FunctionURL) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Invocation) Hub() {} diff --git a/apis/lambda/v1beta1/zz_generated.conversion_spokes.go b/apis/lambda/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..59bfd0a178 --- /dev/null +++ b/apis/lambda/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,134 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Alias to the hub type. +func (tr *Alias) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Alias type. +func (tr *Alias) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this CodeSigningConfig to the hub type. +func (tr *CodeSigningConfig) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CodeSigningConfig type. +func (tr *CodeSigningConfig) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this EventSourceMapping to the hub type. +func (tr *EventSourceMapping) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the EventSourceMapping type. +func (tr *EventSourceMapping) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Function to the hub type. +func (tr *Function) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Function type. +func (tr *Function) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FunctionEventInvokeConfig to the hub type. +func (tr *FunctionEventInvokeConfig) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FunctionEventInvokeConfig type. +func (tr *FunctionEventInvokeConfig) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FunctionURL to the hub type. +func (tr *FunctionURL) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FunctionURL type. +func (tr *FunctionURL) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/lambda/v1beta1/zz_generated.resolvers.go b/apis/lambda/v1beta1/zz_generated.resolvers.go index cdc1ee5814..25414b302b 100644 --- a/apis/lambda/v1beta1/zz_generated.resolvers.go +++ b/apis/lambda/v1beta1/zz_generated.resolvers.go @@ -611,7 +611,7 @@ func (mg *Invocation) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta1", "Function", "FunctionList") + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -630,7 +630,7 @@ func (mg *Invocation) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.FunctionName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.FunctionNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta1", "Function", "FunctionList") + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -661,7 +661,7 @@ func (mg *Permission) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta1", "Function", "FunctionList") + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -680,7 +680,7 @@ func (mg *Permission) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.FunctionName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.FunctionNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta1", "Alias", "AliasList") + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Alias", "AliasList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -699,7 +699,7 @@ func (mg *Permission) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.Qualifier = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.QualifierRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta1", "Function", "FunctionList") + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -718,7 +718,7 @@ func (mg *Permission) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.InitProvider.FunctionName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.FunctionNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta1", "Alias", "AliasList") + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Alias", "AliasList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/lambda/v1beta1/zz_invocation_types.go b/apis/lambda/v1beta1/zz_invocation_types.go index 56ddbedf4e..fa5e9a2aa5 100755 --- a/apis/lambda/v1beta1/zz_invocation_types.go +++ b/apis/lambda/v1beta1/zz_invocation_types.go @@ -16,7 +16,7 @@ import ( type InvocationInitParameters struct { // Name of the lambda function. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta1.Function + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` // Reference to a Function in lambda to populate functionName. @@ -74,7 +74,7 @@ type InvocationObservation struct { type InvocationParameters struct { // Name of the lambda function. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta1.Function + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function // +kubebuilder:validation:Optional FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` diff --git a/apis/lambda/v1beta1/zz_permission_types.go b/apis/lambda/v1beta1/zz_permission_types.go index e2e6ceb37e..503ff3b4e6 100755 --- a/apis/lambda/v1beta1/zz_permission_types.go +++ b/apis/lambda/v1beta1/zz_permission_types.go @@ -22,7 +22,7 @@ type PermissionInitParameters struct { EventSourceToken *string `json:"eventSourceToken,omitempty" tf:"event_source_token,omitempty"` // Name of the Lambda function whose resource policy you are updating - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta1.Function + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` // Reference to a Function in lambda to populate functionName. @@ -43,7 +43,7 @@ type PermissionInitParameters struct { PrincipalOrgID *string `json:"principalOrgId,omitempty" tf:"principal_org_id,omitempty"` // Query parameter to specify function version or alias name. The permission will then apply to the specific qualified ARN e.g., arn:aws:lambda:aws-region:acct-id:function:function-name:2 - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta1.Alias + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Alias Qualifier *string `json:"qualifier,omitempty" tf:"qualifier,omitempty"` // Reference to a Alias in lambda to populate qualifier. @@ -124,7 +124,7 @@ type PermissionParameters struct { EventSourceToken *string `json:"eventSourceToken,omitempty" tf:"event_source_token,omitempty"` // Name of the Lambda function whose resource policy you are updating - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta1.Function + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function // +kubebuilder:validation:Optional FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` @@ -149,7 +149,7 @@ type PermissionParameters struct { PrincipalOrgID *string `json:"principalOrgId,omitempty" tf:"principal_org_id,omitempty"` // Query parameter to specify function version or alias name. The permission will then apply to the specific qualified ARN e.g., arn:aws:lambda:aws-region:acct-id:function:function-name:2 - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta1.Alias + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Alias // +kubebuilder:validation:Optional Qualifier *string `json:"qualifier,omitempty" tf:"qualifier,omitempty"` diff --git a/apis/lambda/v1beta2/zz_alias_terraformed.go b/apis/lambda/v1beta2/zz_alias_terraformed.go new file mode 100755 index 0000000000..52048b6e8b --- /dev/null +++ b/apis/lambda/v1beta2/zz_alias_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Alias +func (mg *Alias) GetTerraformResourceType() string { + return "aws_lambda_alias" +} + +// GetConnectionDetailsMapping for this Alias +func (tr *Alias) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Alias +func (tr *Alias) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Alias +func (tr *Alias) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Alias +func (tr *Alias) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Alias +func (tr *Alias) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Alias +func (tr *Alias) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Alias +func (tr *Alias) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Alias +func (tr *Alias) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Alias using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Alias) LateInitialize(attrs []byte) (bool, error) { + params := &AliasParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Alias) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/lambda/v1beta2/zz_alias_types.go b/apis/lambda/v1beta2/zz_alias_types.go new file mode 100755 index 0000000000..9bfd0f1f3d --- /dev/null +++ b/apis/lambda/v1beta2/zz_alias_types.go @@ -0,0 +1,165 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AliasInitParameters struct { + + // Description of the alias. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Lambda function version for which you are creating the alias. Pattern: (\$LATEST|[0-9]+). + FunctionVersion *string `json:"functionVersion,omitempty" tf:"function_version,omitempty"` + + // The Lambda alias' route configuration settings. Fields documented below + RoutingConfig *RoutingConfigInitParameters `json:"routingConfig,omitempty" tf:"routing_config,omitempty"` +} + +type AliasObservation struct { + + // The Amazon Resource Name (ARN) identifying your Lambda function alias. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Description of the alias. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Lambda Function name or ARN. + FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` + + // Lambda function version for which you are creating the alias. Pattern: (\$LATEST|[0-9]+). + FunctionVersion *string `json:"functionVersion,omitempty" tf:"function_version,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The ARN to be used for invoking Lambda Function from API Gateway - to be used in aws_api_gateway_integration's uri + InvokeArn *string `json:"invokeArn,omitempty" tf:"invoke_arn,omitempty"` + + // The Lambda alias' route configuration settings. Fields documented below + RoutingConfig *RoutingConfigObservation `json:"routingConfig,omitempty" tf:"routing_config,omitempty"` +} + +type AliasParameters struct { + + // Description of the alias. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Lambda Function name or ARN. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +kubebuilder:validation:Optional + FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` + + // Reference to a Function in lambda to populate functionName. + // +kubebuilder:validation:Optional + FunctionNameRef *v1.Reference `json:"functionNameRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate functionName. + // +kubebuilder:validation:Optional + FunctionNameSelector *v1.Selector `json:"functionNameSelector,omitempty" tf:"-"` + + // Lambda function version for which you are creating the alias. Pattern: (\$LATEST|[0-9]+). + // +kubebuilder:validation:Optional + FunctionVersion *string `json:"functionVersion,omitempty" tf:"function_version,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The Lambda alias' route configuration settings. Fields documented below + // +kubebuilder:validation:Optional + RoutingConfig *RoutingConfigParameters `json:"routingConfig,omitempty" tf:"routing_config,omitempty"` +} + +type RoutingConfigInitParameters struct { + + // A map that defines the proportion of events that should be sent to different versions of a lambda function. + // +mapType=granular + AdditionalVersionWeights map[string]*float64 `json:"additionalVersionWeights,omitempty" tf:"additional_version_weights,omitempty"` +} + +type RoutingConfigObservation struct { + + // A map that defines the proportion of events that should be sent to different versions of a lambda function. + // +mapType=granular + AdditionalVersionWeights map[string]*float64 `json:"additionalVersionWeights,omitempty" tf:"additional_version_weights,omitempty"` +} + +type RoutingConfigParameters struct { + + // A map that defines the proportion of events that should be sent to different versions of a lambda function. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalVersionWeights map[string]*float64 `json:"additionalVersionWeights,omitempty" tf:"additional_version_weights,omitempty"` +} + +// AliasSpec defines the desired state of Alias +type AliasSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AliasParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AliasInitParameters `json:"initProvider,omitempty"` +} + +// AliasStatus defines the observed state of Alias. +type AliasStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AliasObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Alias is the Schema for the Aliass API. Creates a Lambda function alias. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Alias struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.functionVersion) || (has(self.initProvider) && has(self.initProvider.functionVersion))",message="spec.forProvider.functionVersion is a required parameter" + Spec AliasSpec `json:"spec"` + Status AliasStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AliasList contains a list of Aliass +type AliasList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Alias `json:"items"` +} + +// Repository type metadata. +var ( + Alias_Kind = "Alias" + Alias_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Alias_Kind}.String() + Alias_KindAPIVersion = Alias_Kind + "." + CRDGroupVersion.String() + Alias_GroupVersionKind = CRDGroupVersion.WithKind(Alias_Kind) +) + +func init() { + SchemeBuilder.Register(&Alias{}, &AliasList{}) +} diff --git a/apis/lambda/v1beta2/zz_codesigningconfig_terraformed.go b/apis/lambda/v1beta2/zz_codesigningconfig_terraformed.go new file mode 100755 index 0000000000..3a28d43f9d --- /dev/null +++ b/apis/lambda/v1beta2/zz_codesigningconfig_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CodeSigningConfig +func (mg *CodeSigningConfig) GetTerraformResourceType() string { + return "aws_lambda_code_signing_config" +} + +// GetConnectionDetailsMapping for this CodeSigningConfig +func (tr *CodeSigningConfig) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CodeSigningConfig +func (tr *CodeSigningConfig) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CodeSigningConfig +func (tr *CodeSigningConfig) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CodeSigningConfig +func (tr *CodeSigningConfig) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CodeSigningConfig +func (tr *CodeSigningConfig) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CodeSigningConfig +func (tr *CodeSigningConfig) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CodeSigningConfig +func (tr *CodeSigningConfig) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CodeSigningConfig +func (tr *CodeSigningConfig) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CodeSigningConfig using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CodeSigningConfig) LateInitialize(attrs []byte) (bool, error) { + params := &CodeSigningConfigParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CodeSigningConfig) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/lambda/v1beta2/zz_codesigningconfig_types.go b/apis/lambda/v1beta2/zz_codesigningconfig_types.go new file mode 100755 index 0000000000..0a9481508f --- /dev/null +++ b/apis/lambda/v1beta2/zz_codesigningconfig_types.go @@ -0,0 +1,191 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AllowedPublishersInitParameters struct { + + // The Amazon Resource Name (ARN) for each of the signing profiles. A signing profile defines a trusted user who can sign a code package. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/signer/v1beta2.SigningProfile + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +listType=set + SigningProfileVersionArns []*string `json:"signingProfileVersionArns,omitempty" tf:"signing_profile_version_arns,omitempty"` + + // References to SigningProfile in signer to populate signingProfileVersionArns. + // +kubebuilder:validation:Optional + SigningProfileVersionArnsRefs []v1.Reference `json:"signingProfileVersionArnsRefs,omitempty" tf:"-"` + + // Selector for a list of SigningProfile in signer to populate signingProfileVersionArns. + // +kubebuilder:validation:Optional + SigningProfileVersionArnsSelector *v1.Selector `json:"signingProfileVersionArnsSelector,omitempty" tf:"-"` +} + +type AllowedPublishersObservation struct { + + // The Amazon Resource Name (ARN) for each of the signing profiles. A signing profile defines a trusted user who can sign a code package. + // +listType=set + SigningProfileVersionArns []*string `json:"signingProfileVersionArns,omitempty" tf:"signing_profile_version_arns,omitempty"` +} + +type AllowedPublishersParameters struct { + + // The Amazon Resource Name (ARN) for each of the signing profiles. A signing profile defines a trusted user who can sign a code package. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/signer/v1beta2.SigningProfile + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + // +listType=set + SigningProfileVersionArns []*string `json:"signingProfileVersionArns,omitempty" tf:"signing_profile_version_arns,omitempty"` + + // References to SigningProfile in signer to populate signingProfileVersionArns. + // +kubebuilder:validation:Optional + SigningProfileVersionArnsRefs []v1.Reference `json:"signingProfileVersionArnsRefs,omitempty" tf:"-"` + + // Selector for a list of SigningProfile in signer to populate signingProfileVersionArns. + // +kubebuilder:validation:Optional + SigningProfileVersionArnsSelector *v1.Selector `json:"signingProfileVersionArnsSelector,omitempty" tf:"-"` +} + +type CodeSigningConfigInitParameters struct { + + // A configuration block of allowed publishers as signing profiles for this code signing configuration. Detailed below. + AllowedPublishers *AllowedPublishersInitParameters `json:"allowedPublishers,omitempty" tf:"allowed_publishers,omitempty"` + + // Descriptive name for this code signing configuration. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A configuration block of code signing policies that define the actions to take if the validation checks fail. Detailed below. + Policies *PoliciesInitParameters `json:"policies,omitempty" tf:"policies,omitempty"` +} + +type CodeSigningConfigObservation struct { + + // A configuration block of allowed publishers as signing profiles for this code signing configuration. Detailed below. + AllowedPublishers *AllowedPublishersObservation `json:"allowedPublishers,omitempty" tf:"allowed_publishers,omitempty"` + + // The Amazon Resource Name (ARN) of the code signing configuration. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Unique identifier for the code signing configuration. + ConfigID *string `json:"configId,omitempty" tf:"config_id,omitempty"` + + // Descriptive name for this code signing configuration. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The date and time that the code signing configuration was last modified. + LastModified *string `json:"lastModified,omitempty" tf:"last_modified,omitempty"` + + // A configuration block of code signing policies that define the actions to take if the validation checks fail. Detailed below. + Policies *PoliciesObservation `json:"policies,omitempty" tf:"policies,omitempty"` +} + +type CodeSigningConfigParameters struct { + + // A configuration block of allowed publishers as signing profiles for this code signing configuration. Detailed below. + // +kubebuilder:validation:Optional + AllowedPublishers *AllowedPublishersParameters `json:"allowedPublishers,omitempty" tf:"allowed_publishers,omitempty"` + + // Descriptive name for this code signing configuration. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A configuration block of code signing policies that define the actions to take if the validation checks fail. Detailed below. + // +kubebuilder:validation:Optional + Policies *PoliciesParameters `json:"policies,omitempty" tf:"policies,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type PoliciesInitParameters struct { + + // Code signing configuration policy for deployment validation failure. If you set the policy to Enforce, Lambda blocks the deployment request if code-signing validation checks fail. If you set the policy to Warn, Lambda allows the deployment and creates a CloudWatch log. Valid values: Warn, Enforce. Default value: Warn. + UntrustedArtifactOnDeployment *string `json:"untrustedArtifactOnDeployment,omitempty" tf:"untrusted_artifact_on_deployment,omitempty"` +} + +type PoliciesObservation struct { + + // Code signing configuration policy for deployment validation failure. If you set the policy to Enforce, Lambda blocks the deployment request if code-signing validation checks fail. If you set the policy to Warn, Lambda allows the deployment and creates a CloudWatch log. Valid values: Warn, Enforce. Default value: Warn. + UntrustedArtifactOnDeployment *string `json:"untrustedArtifactOnDeployment,omitempty" tf:"untrusted_artifact_on_deployment,omitempty"` +} + +type PoliciesParameters struct { + + // Code signing configuration policy for deployment validation failure. If you set the policy to Enforce, Lambda blocks the deployment request if code-signing validation checks fail. If you set the policy to Warn, Lambda allows the deployment and creates a CloudWatch log. Valid values: Warn, Enforce. Default value: Warn. + // +kubebuilder:validation:Optional + UntrustedArtifactOnDeployment *string `json:"untrustedArtifactOnDeployment" tf:"untrusted_artifact_on_deployment,omitempty"` +} + +// CodeSigningConfigSpec defines the desired state of CodeSigningConfig +type CodeSigningConfigSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CodeSigningConfigParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CodeSigningConfigInitParameters `json:"initProvider,omitempty"` +} + +// CodeSigningConfigStatus defines the observed state of CodeSigningConfig. +type CodeSigningConfigStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CodeSigningConfigObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CodeSigningConfig is the Schema for the CodeSigningConfigs API. Provides a Lambda Code Signing Config resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type CodeSigningConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.allowedPublishers) || (has(self.initProvider) && has(self.initProvider.allowedPublishers))",message="spec.forProvider.allowedPublishers is a required parameter" + Spec CodeSigningConfigSpec `json:"spec"` + Status CodeSigningConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CodeSigningConfigList contains a list of CodeSigningConfigs +type CodeSigningConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CodeSigningConfig `json:"items"` +} + +// Repository type metadata. +var ( + CodeSigningConfig_Kind = "CodeSigningConfig" + CodeSigningConfig_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CodeSigningConfig_Kind}.String() + CodeSigningConfig_KindAPIVersion = CodeSigningConfig_Kind + "." + CRDGroupVersion.String() + CodeSigningConfig_GroupVersionKind = CRDGroupVersion.WithKind(CodeSigningConfig_Kind) +) + +func init() { + SchemeBuilder.Register(&CodeSigningConfig{}, &CodeSigningConfigList{}) +} diff --git a/apis/lambda/v1beta2/zz_eventsourcemapping_terraformed.go b/apis/lambda/v1beta2/zz_eventsourcemapping_terraformed.go new file mode 100755 index 0000000000..405ded8191 --- /dev/null +++ b/apis/lambda/v1beta2/zz_eventsourcemapping_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this EventSourceMapping +func (mg *EventSourceMapping) GetTerraformResourceType() string { + return "aws_lambda_event_source_mapping" +} + +// GetConnectionDetailsMapping for this EventSourceMapping +func (tr *EventSourceMapping) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this EventSourceMapping +func (tr *EventSourceMapping) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this EventSourceMapping +func (tr *EventSourceMapping) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this EventSourceMapping +func (tr *EventSourceMapping) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this EventSourceMapping +func (tr *EventSourceMapping) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this EventSourceMapping +func (tr *EventSourceMapping) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this EventSourceMapping +func (tr *EventSourceMapping) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this EventSourceMapping +func (tr *EventSourceMapping) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this EventSourceMapping using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *EventSourceMapping) LateInitialize(attrs []byte) (bool, error) { + params := &EventSourceMappingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *EventSourceMapping) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/lambda/v1beta2/zz_eventsourcemapping_types.go b/apis/lambda/v1beta2/zz_eventsourcemapping_types.go new file mode 100755 index 0000000000..e32b4b50b8 --- /dev/null +++ b/apis/lambda/v1beta2/zz_eventsourcemapping_types.go @@ -0,0 +1,587 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AmazonManagedKafkaEventSourceConfigInitParameters struct { + + // A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax. + ConsumerGroupID *string `json:"consumerGroupId,omitempty" tf:"consumer_group_id,omitempty"` +} + +type AmazonManagedKafkaEventSourceConfigObservation struct { + + // A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax. + ConsumerGroupID *string `json:"consumerGroupId,omitempty" tf:"consumer_group_id,omitempty"` +} + +type AmazonManagedKafkaEventSourceConfigParameters struct { + + // A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax. + // +kubebuilder:validation:Optional + ConsumerGroupID *string `json:"consumerGroupId,omitempty" tf:"consumer_group_id,omitempty"` +} + +type DestinationConfigInitParameters struct { + + // The destination configuration for failed invocations. Detailed below. + OnFailure *OnFailureInitParameters `json:"onFailure,omitempty" tf:"on_failure,omitempty"` +} + +type DestinationConfigObservation struct { + + // The destination configuration for failed invocations. Detailed below. + OnFailure *OnFailureObservation `json:"onFailure,omitempty" tf:"on_failure,omitempty"` +} + +type DestinationConfigParameters struct { + + // The destination configuration for failed invocations. Detailed below. + // +kubebuilder:validation:Optional + OnFailure *OnFailureParameters `json:"onFailure,omitempty" tf:"on_failure,omitempty"` +} + +type DocumentDBEventSourceConfigInitParameters struct { + + // The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections. + CollectionName *string `json:"collectionName,omitempty" tf:"collection_name,omitempty"` + + // The name of the database to consume within the DocumentDB cluster. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Determines what DocumentDB sends to your event stream during document update operations. If set to UpdateLookup, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values: UpdateLookup, Default. + FullDocument *string `json:"fullDocument,omitempty" tf:"full_document,omitempty"` +} + +type DocumentDBEventSourceConfigObservation struct { + + // The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections. + CollectionName *string `json:"collectionName,omitempty" tf:"collection_name,omitempty"` + + // The name of the database to consume within the DocumentDB cluster. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Determines what DocumentDB sends to your event stream during document update operations. If set to UpdateLookup, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values: UpdateLookup, Default. + FullDocument *string `json:"fullDocument,omitempty" tf:"full_document,omitempty"` +} + +type DocumentDBEventSourceConfigParameters struct { + + // The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections. + // +kubebuilder:validation:Optional + CollectionName *string `json:"collectionName,omitempty" tf:"collection_name,omitempty"` + + // The name of the database to consume within the DocumentDB cluster. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // Determines what DocumentDB sends to your event stream during document update operations. If set to UpdateLookup, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values: UpdateLookup, Default. + // +kubebuilder:validation:Optional + FullDocument *string `json:"fullDocument,omitempty" tf:"full_document,omitempty"` +} + +type EventSourceMappingInitParameters struct { + + // Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below. + AmazonManagedKafkaEventSourceConfig *AmazonManagedKafkaEventSourceConfigInitParameters `json:"amazonManagedKafkaEventSourceConfig,omitempty" tf:"amazon_managed_kafka_event_source_config,omitempty"` + + // The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS. + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false. + BisectBatchOnFunctionError *bool `json:"bisectBatchOnFunctionError,omitempty" tf:"bisect_batch_on_function_error,omitempty"` + + // An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below. + DestinationConfig *DestinationConfigInitParameters `json:"destinationConfig,omitempty" tf:"destination_config,omitempty"` + + // Configuration settings for a DocumentDB event source. Detailed below. + DocumentDBEventSourceConfig *DocumentDBEventSourceConfigInitParameters `json:"documentDbEventSourceConfig,omitempty" tf:"document_db_event_source_config,omitempty"` + + // Determines if the mapping will be enabled on creation. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source. + EventSourceArn *string `json:"eventSourceArn,omitempty" tf:"event_source_arn,omitempty"` + + // The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below. + FilterCriteria *FilterCriteriaInitParameters `json:"filterCriteria,omitempty" tf:"filter_criteria,omitempty"` + + // The name or the ARN of the Lambda function that will be subscribing to events. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` + + // Reference to a Function in lambda to populate functionName. + // +kubebuilder:validation:Optional + FunctionNameRef *v1.Reference `json:"functionNameRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate functionName. + // +kubebuilder:validation:Optional + FunctionNameSelector *v1.Selector `json:"functionNameSelector,omitempty" tf:"-"` + + // A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures. + // +listType=set + FunctionResponseTypes []*string `json:"functionResponseTypes,omitempty" tf:"function_response_types,omitempty"` + + // The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. + MaximumBatchingWindowInSeconds *float64 `json:"maximumBatchingWindowInSeconds,omitempty" tf:"maximum_batching_window_in_seconds,omitempty"` + + // The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive). + MaximumRecordAgeInSeconds *float64 `json:"maximumRecordAgeInSeconds,omitempty" tf:"maximum_record_age_in_seconds,omitempty"` + + // The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000. + MaximumRetryAttempts *float64 `json:"maximumRetryAttempts,omitempty" tf:"maximum_retry_attempts,omitempty"` + + // The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10. + ParallelizationFactor *float64 `json:"parallelizationFactor,omitempty" tf:"parallelization_factor,omitempty"` + + // The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name. + Queues []*string `json:"queues,omitempty" tf:"queues,omitempty"` + + // Scaling configuration of the event source. Only available for SQS queues. Detailed below. + ScalingConfig *ScalingConfigInitParameters `json:"scalingConfig,omitempty" tf:"scaling_config,omitempty"` + + // For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below. + SelfManagedEventSource *SelfManagedEventSourceInitParameters `json:"selfManagedEventSource,omitempty" tf:"self_managed_event_source,omitempty"` + + // Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below. + SelfManagedKafkaEventSourceConfig *SelfManagedKafkaEventSourceConfigInitParameters `json:"selfManagedKafkaEventSourceConfig,omitempty" tf:"self_managed_kafka_event_source_config,omitempty"` + + // : For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below. + SourceAccessConfiguration []SourceAccessConfigurationInitParameters `json:"sourceAccessConfiguration,omitempty" tf:"source_access_configuration,omitempty"` + + // The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference. + StartingPosition *string `json:"startingPosition,omitempty" tf:"starting_position,omitempty"` + + // A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. + StartingPositionTimestamp *string `json:"startingPositionTimestamp,omitempty" tf:"starting_position_timestamp,omitempty"` + + // The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified. + // +listType=set + Topics []*string `json:"topics,omitempty" tf:"topics,omitempty"` + + // The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis). + TumblingWindowInSeconds *float64 `json:"tumblingWindowInSeconds,omitempty" tf:"tumbling_window_in_seconds,omitempty"` +} + +type EventSourceMappingObservation struct { + + // Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below. + AmazonManagedKafkaEventSourceConfig *AmazonManagedKafkaEventSourceConfigObservation `json:"amazonManagedKafkaEventSourceConfig,omitempty" tf:"amazon_managed_kafka_event_source_config,omitempty"` + + // The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS. + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false. + BisectBatchOnFunctionError *bool `json:"bisectBatchOnFunctionError,omitempty" tf:"bisect_batch_on_function_error,omitempty"` + + // An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below. + DestinationConfig *DestinationConfigObservation `json:"destinationConfig,omitempty" tf:"destination_config,omitempty"` + + // Configuration settings for a DocumentDB event source. Detailed below. + DocumentDBEventSourceConfig *DocumentDBEventSourceConfigObservation `json:"documentDbEventSourceConfig,omitempty" tf:"document_db_event_source_config,omitempty"` + + // Determines if the mapping will be enabled on creation. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source. + EventSourceArn *string `json:"eventSourceArn,omitempty" tf:"event_source_arn,omitempty"` + + // The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below. + FilterCriteria *FilterCriteriaObservation `json:"filterCriteria,omitempty" tf:"filter_criteria,omitempty"` + + // The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from function_name above.) + FunctionArn *string `json:"functionArn,omitempty" tf:"function_arn,omitempty"` + + // The name or the ARN of the Lambda function that will be subscribing to events. + FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` + + // A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures. + // +listType=set + FunctionResponseTypes []*string `json:"functionResponseTypes,omitempty" tf:"function_response_types,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The date this resource was last modified. + LastModified *string `json:"lastModified,omitempty" tf:"last_modified,omitempty"` + + // The result of the last AWS Lambda invocation of your Lambda function. + LastProcessingResult *string `json:"lastProcessingResult,omitempty" tf:"last_processing_result,omitempty"` + + // The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. + MaximumBatchingWindowInSeconds *float64 `json:"maximumBatchingWindowInSeconds,omitempty" tf:"maximum_batching_window_in_seconds,omitempty"` + + // The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive). + MaximumRecordAgeInSeconds *float64 `json:"maximumRecordAgeInSeconds,omitempty" tf:"maximum_record_age_in_seconds,omitempty"` + + // The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000. + MaximumRetryAttempts *float64 `json:"maximumRetryAttempts,omitempty" tf:"maximum_retry_attempts,omitempty"` + + // The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10. + ParallelizationFactor *float64 `json:"parallelizationFactor,omitempty" tf:"parallelization_factor,omitempty"` + + // The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name. + Queues []*string `json:"queues,omitempty" tf:"queues,omitempty"` + + // Scaling configuration of the event source. Only available for SQS queues. Detailed below. + ScalingConfig *ScalingConfigObservation `json:"scalingConfig,omitempty" tf:"scaling_config,omitempty"` + + // For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below. + SelfManagedEventSource *SelfManagedEventSourceObservation `json:"selfManagedEventSource,omitempty" tf:"self_managed_event_source,omitempty"` + + // Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below. + SelfManagedKafkaEventSourceConfig *SelfManagedKafkaEventSourceConfigObservation `json:"selfManagedKafkaEventSourceConfig,omitempty" tf:"self_managed_kafka_event_source_config,omitempty"` + + // : For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below. + SourceAccessConfiguration []SourceAccessConfigurationObservation `json:"sourceAccessConfiguration,omitempty" tf:"source_access_configuration,omitempty"` + + // The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference. + StartingPosition *string `json:"startingPosition,omitempty" tf:"starting_position,omitempty"` + + // A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. + StartingPositionTimestamp *string `json:"startingPositionTimestamp,omitempty" tf:"starting_position_timestamp,omitempty"` + + // The state of the event source mapping. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // The reason the event source mapping is in its current state. + StateTransitionReason *string `json:"stateTransitionReason,omitempty" tf:"state_transition_reason,omitempty"` + + // The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified. + // +listType=set + Topics []*string `json:"topics,omitempty" tf:"topics,omitempty"` + + // The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis). + TumblingWindowInSeconds *float64 `json:"tumblingWindowInSeconds,omitempty" tf:"tumbling_window_in_seconds,omitempty"` + + // The UUID of the created event source mapping. + UUID *string `json:"uuid,omitempty" tf:"uuid,omitempty"` +} + +type EventSourceMappingParameters struct { + + // Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below. + // +kubebuilder:validation:Optional + AmazonManagedKafkaEventSourceConfig *AmazonManagedKafkaEventSourceConfigParameters `json:"amazonManagedKafkaEventSourceConfig,omitempty" tf:"amazon_managed_kafka_event_source_config,omitempty"` + + // The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS. + // +kubebuilder:validation:Optional + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to false. + // +kubebuilder:validation:Optional + BisectBatchOnFunctionError *bool `json:"bisectBatchOnFunctionError,omitempty" tf:"bisect_batch_on_function_error,omitempty"` + + // An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below. + // +kubebuilder:validation:Optional + DestinationConfig *DestinationConfigParameters `json:"destinationConfig,omitempty" tf:"destination_config,omitempty"` + + // Configuration settings for a DocumentDB event source. Detailed below. + // +kubebuilder:validation:Optional + DocumentDBEventSourceConfig *DocumentDBEventSourceConfigParameters `json:"documentDbEventSourceConfig,omitempty" tf:"document_db_event_source_config,omitempty"` + + // Determines if the mapping will be enabled on creation. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source. + // +kubebuilder:validation:Optional + EventSourceArn *string `json:"eventSourceArn,omitempty" tf:"event_source_arn,omitempty"` + + // The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below. + // +kubebuilder:validation:Optional + FilterCriteria *FilterCriteriaParameters `json:"filterCriteria,omitempty" tf:"filter_criteria,omitempty"` + + // The name or the ARN of the Lambda function that will be subscribing to events. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` + + // Reference to a Function in lambda to populate functionName. + // +kubebuilder:validation:Optional + FunctionNameRef *v1.Reference `json:"functionNameRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate functionName. + // +kubebuilder:validation:Optional + FunctionNameSelector *v1.Selector `json:"functionNameSelector,omitempty" tf:"-"` + + // A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures. + // +kubebuilder:validation:Optional + // +listType=set + FunctionResponseTypes []*string `json:"functionResponseTypes,omitempty" tf:"function_response_types,omitempty"` + + // The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. + // +kubebuilder:validation:Optional + MaximumBatchingWindowInSeconds *float64 `json:"maximumBatchingWindowInSeconds,omitempty" tf:"maximum_batching_window_in_seconds,omitempty"` + + // The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive). + // +kubebuilder:validation:Optional + MaximumRecordAgeInSeconds *float64 `json:"maximumRecordAgeInSeconds,omitempty" tf:"maximum_record_age_in_seconds,omitempty"` + + // The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000. + // +kubebuilder:validation:Optional + MaximumRetryAttempts *float64 `json:"maximumRetryAttempts,omitempty" tf:"maximum_retry_attempts,omitempty"` + + // The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10. + // +kubebuilder:validation:Optional + ParallelizationFactor *float64 `json:"parallelizationFactor,omitempty" tf:"parallelization_factor,omitempty"` + + // The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name. + // +kubebuilder:validation:Optional + Queues []*string `json:"queues,omitempty" tf:"queues,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Scaling configuration of the event source. Only available for SQS queues. Detailed below. + // +kubebuilder:validation:Optional + ScalingConfig *ScalingConfigParameters `json:"scalingConfig,omitempty" tf:"scaling_config,omitempty"` + + // For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include source_access_configuration. Detailed below. + // +kubebuilder:validation:Optional + SelfManagedEventSource *SelfManagedEventSourceParameters `json:"selfManagedEventSource,omitempty" tf:"self_managed_event_source,omitempty"` + + // Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below. + // +kubebuilder:validation:Optional + SelfManagedKafkaEventSourceConfig *SelfManagedKafkaEventSourceConfigParameters `json:"selfManagedKafkaEventSourceConfig,omitempty" tf:"self_managed_kafka_event_source_config,omitempty"` + + // : For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below. + // +kubebuilder:validation:Optional + SourceAccessConfiguration []SourceAccessConfigurationParameters `json:"sourceAccessConfiguration,omitempty" tf:"source_access_configuration,omitempty"` + + // The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference. + // +kubebuilder:validation:Optional + StartingPosition *string `json:"startingPosition,omitempty" tf:"starting_position,omitempty"` + + // A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. + // +kubebuilder:validation:Optional + StartingPositionTimestamp *string `json:"startingPositionTimestamp,omitempty" tf:"starting_position_timestamp,omitempty"` + + // The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified. + // +kubebuilder:validation:Optional + // +listType=set + Topics []*string `json:"topics,omitempty" tf:"topics,omitempty"` + + // The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis). + // +kubebuilder:validation:Optional + TumblingWindowInSeconds *float64 `json:"tumblingWindowInSeconds,omitempty" tf:"tumbling_window_in_seconds,omitempty"` +} + +type FilterCriteriaInitParameters struct { + + // A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below. + Filter []FilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` +} + +type FilterCriteriaObservation struct { + + // A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below. + Filter []FilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` +} + +type FilterCriteriaParameters struct { + + // A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below. + // +kubebuilder:validation:Optional + Filter []FilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` +} + +type FilterInitParameters struct { + + // A filter pattern up to 4096 characters. See Filter Rule Syntax. + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` +} + +type FilterObservation struct { + + // A filter pattern up to 4096 characters. See Filter Rule Syntax. + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` +} + +type FilterParameters struct { + + // A filter pattern up to 4096 characters. See Filter Rule Syntax. + // +kubebuilder:validation:Optional + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` +} + +type OnFailureInitParameters struct { + + // The Amazon Resource Name (ARN) of the destination resource. + DestinationArn *string `json:"destinationArn,omitempty" tf:"destination_arn,omitempty"` +} + +type OnFailureObservation struct { + + // The Amazon Resource Name (ARN) of the destination resource. + DestinationArn *string `json:"destinationArn,omitempty" tf:"destination_arn,omitempty"` +} + +type OnFailureParameters struct { + + // The Amazon Resource Name (ARN) of the destination resource. + // +kubebuilder:validation:Optional + DestinationArn *string `json:"destinationArn" tf:"destination_arn,omitempty"` +} + +type ScalingConfigInitParameters struct { + + // Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between 2 and 1000. See Configuring maximum concurrency for Amazon SQS event sources. + MaximumConcurrency *float64 `json:"maximumConcurrency,omitempty" tf:"maximum_concurrency,omitempty"` +} + +type ScalingConfigObservation struct { + + // Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between 2 and 1000. See Configuring maximum concurrency for Amazon SQS event sources. + MaximumConcurrency *float64 `json:"maximumConcurrency,omitempty" tf:"maximum_concurrency,omitempty"` +} + +type ScalingConfigParameters struct { + + // Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between 2 and 1000. See Configuring maximum concurrency for Amazon SQS event sources. + // +kubebuilder:validation:Optional + MaximumConcurrency *float64 `json:"maximumConcurrency,omitempty" tf:"maximum_concurrency,omitempty"` +} + +type SelfManagedEventSourceInitParameters struct { + + // A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS and the value should be a string with a comma separated list of broker endpoints. + // +mapType=granular + Endpoints map[string]*string `json:"endpoints,omitempty" tf:"endpoints,omitempty"` +} + +type SelfManagedEventSourceObservation struct { + + // A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS and the value should be a string with a comma separated list of broker endpoints. + // +mapType=granular + Endpoints map[string]*string `json:"endpoints,omitempty" tf:"endpoints,omitempty"` +} + +type SelfManagedEventSourceParameters struct { + + // A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS and the value should be a string with a comma separated list of broker endpoints. + // +kubebuilder:validation:Optional + // +mapType=granular + Endpoints map[string]*string `json:"endpoints" tf:"endpoints,omitempty"` +} + +type SelfManagedKafkaEventSourceConfigInitParameters struct { + + // A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax. + ConsumerGroupID *string `json:"consumerGroupId,omitempty" tf:"consumer_group_id,omitempty"` +} + +type SelfManagedKafkaEventSourceConfigObservation struct { + + // A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax. + ConsumerGroupID *string `json:"consumerGroupId,omitempty" tf:"consumer_group_id,omitempty"` +} + +type SelfManagedKafkaEventSourceConfigParameters struct { + + // A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax. + // +kubebuilder:validation:Optional + ConsumerGroupID *string `json:"consumerGroupId,omitempty" tf:"consumer_group_id,omitempty"` +} + +type SourceAccessConfigurationInitParameters struct { + + // The type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the AWS documentation. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The URI for this configuration. For type VPC_SUBNET the value should be subnet:subnet_id where subnet_id is the value you would find in an aws_subnet resource's id attribute. For type VPC_SECURITY_GROUP the value should be security_group:security_group_id where security_group_id is the value you would find in an aws_security_group resource's id attribute. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type SourceAccessConfigurationObservation struct { + + // The type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the AWS documentation. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The URI for this configuration. For type VPC_SUBNET the value should be subnet:subnet_id where subnet_id is the value you would find in an aws_subnet resource's id attribute. For type VPC_SECURITY_GROUP the value should be security_group:security_group_id where security_group_id is the value you would find in an aws_security_group resource's id attribute. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type SourceAccessConfigurationParameters struct { + + // The type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the AWS documentation. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The URI for this configuration. For type VPC_SUBNET the value should be subnet:subnet_id where subnet_id is the value you would find in an aws_subnet resource's id attribute. For type VPC_SECURITY_GROUP the value should be security_group:security_group_id where security_group_id is the value you would find in an aws_security_group resource's id attribute. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +// EventSourceMappingSpec defines the desired state of EventSourceMapping +type EventSourceMappingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider EventSourceMappingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider EventSourceMappingInitParameters `json:"initProvider,omitempty"` +} + +// EventSourceMappingStatus defines the observed state of EventSourceMapping. +type EventSourceMappingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider EventSourceMappingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// EventSourceMapping is the Schema for the EventSourceMappings API. Provides a Lambda event source mapping. This allows Lambda functions to get events from Kinesis, DynamoDB, SQS, Amazon MQ and Managed Streaming for Apache Kafka (MSK). +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type EventSourceMapping struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec EventSourceMappingSpec `json:"spec"` + Status EventSourceMappingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EventSourceMappingList contains a list of EventSourceMappings +type EventSourceMappingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []EventSourceMapping `json:"items"` +} + +// Repository type metadata. +var ( + EventSourceMapping_Kind = "EventSourceMapping" + EventSourceMapping_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: EventSourceMapping_Kind}.String() + EventSourceMapping_KindAPIVersion = EventSourceMapping_Kind + "." + CRDGroupVersion.String() + EventSourceMapping_GroupVersionKind = CRDGroupVersion.WithKind(EventSourceMapping_Kind) +) + +func init() { + SchemeBuilder.Register(&EventSourceMapping{}, &EventSourceMappingList{}) +} diff --git a/apis/lambda/v1beta2/zz_function_terraformed.go b/apis/lambda/v1beta2/zz_function_terraformed.go new file mode 100755 index 0000000000..d7a9ac2843 --- /dev/null +++ b/apis/lambda/v1beta2/zz_function_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Function +func (mg *Function) GetTerraformResourceType() string { + return "aws_lambda_function" +} + +// GetConnectionDetailsMapping for this Function +func (tr *Function) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Function +func (tr *Function) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Function +func (tr *Function) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Function +func (tr *Function) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Function +func (tr *Function) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Function +func (tr *Function) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Function +func (tr *Function) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Function +func (tr *Function) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Function using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Function) LateInitialize(attrs []byte) (bool, error) { + params := &FunctionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("SourceCodeHash")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Function) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/lambda/v1beta2/zz_function_types.go b/apis/lambda/v1beta2/zz_function_types.go new file mode 100755 index 0000000000..1d60f3c33a --- /dev/null +++ b/apis/lambda/v1beta2/zz_function_types.go @@ -0,0 +1,842 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DeadLetterConfigInitParameters struct { + + // ARN of an SNS topic or SQS queue to notify when an invocation fails. If this option is used, the function's IAM role must be granted suitable access to write to the target object, which means allowing either the sns:Publish or sqs:SendMessage action on this ARN, depending on which service is targeted. + TargetArn *string `json:"targetArn,omitempty" tf:"target_arn,omitempty"` +} + +type DeadLetterConfigObservation struct { + + // ARN of an SNS topic or SQS queue to notify when an invocation fails. If this option is used, the function's IAM role must be granted suitable access to write to the target object, which means allowing either the sns:Publish or sqs:SendMessage action on this ARN, depending on which service is targeted. + TargetArn *string `json:"targetArn,omitempty" tf:"target_arn,omitempty"` +} + +type DeadLetterConfigParameters struct { + + // ARN of an SNS topic or SQS queue to notify when an invocation fails. If this option is used, the function's IAM role must be granted suitable access to write to the target object, which means allowing either the sns:Publish or sqs:SendMessage action on this ARN, depending on which service is targeted. + // +kubebuilder:validation:Optional + TargetArn *string `json:"targetArn" tf:"target_arn,omitempty"` +} + +type EnvironmentInitParameters struct { + + // Map of environment variables that are accessible from the function code during execution. If provided at least one key must be present. + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` +} + +type EnvironmentObservation struct { + + // Map of environment variables that are accessible from the function code during execution. If provided at least one key must be present. + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` +} + +type EnvironmentParameters struct { + + // Map of environment variables that are accessible from the function code during execution. If provided at least one key must be present. + // +kubebuilder:validation:Optional + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` +} + +type EphemeralStorageInitParameters struct { + + // The size of the Lambda function Ephemeral storage(/tmp) represented in MB. The minimum supported ephemeral_storage value defaults to 512MB and the maximum supported value is 10240MB. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` +} + +type EphemeralStorageObservation struct { + + // The size of the Lambda function Ephemeral storage(/tmp) represented in MB. The minimum supported ephemeral_storage value defaults to 512MB and the maximum supported value is 10240MB. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` +} + +type EphemeralStorageParameters struct { + + // The size of the Lambda function Ephemeral storage(/tmp) represented in MB. The minimum supported ephemeral_storage value defaults to 512MB and the maximum supported value is 10240MB. + // +kubebuilder:validation:Optional + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` +} + +type FileSystemConfigInitParameters struct { + + // Amazon Resource Name (ARN) of the Amazon EFS Access Point that provides access to the file system. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/efs/v1beta2.AccessPoint + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a AccessPoint in efs to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a AccessPoint in efs to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // Path where the function can access the file system, starting with /mnt/. + LocalMountPath *string `json:"localMountPath,omitempty" tf:"local_mount_path,omitempty"` +} + +type FileSystemConfigObservation struct { + + // Amazon Resource Name (ARN) of the Amazon EFS Access Point that provides access to the file system. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Path where the function can access the file system, starting with /mnt/. + LocalMountPath *string `json:"localMountPath,omitempty" tf:"local_mount_path,omitempty"` +} + +type FileSystemConfigParameters struct { + + // Amazon Resource Name (ARN) of the Amazon EFS Access Point that provides access to the file system. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/efs/v1beta2.AccessPoint + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a AccessPoint in efs to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a AccessPoint in efs to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // Path where the function can access the file system, starting with /mnt/. + // +kubebuilder:validation:Optional + LocalMountPath *string `json:"localMountPath" tf:"local_mount_path,omitempty"` +} + +type FunctionInitParameters struct { + + // Instruction set architecture for your Lambda function. Valid values are ["x86_64"] and ["arm64"]. Default is ["x86_64"]. Removing this attribute, function's architecture stay the same. + Architectures []*string `json:"architectures,omitempty" tf:"architectures,omitempty"` + + // To enable code signing for this function, specify the ARN of a code-signing configuration. A code-signing configuration includes a set of signing profiles, which define the trusted publishers for this function. + CodeSigningConfigArn *string `json:"codeSigningConfigArn,omitempty" tf:"code_signing_config_arn,omitempty"` + + // Configuration block. Detailed below. + DeadLetterConfig *DeadLetterConfigInitParameters `json:"deadLetterConfig,omitempty" tf:"dead_letter_config,omitempty"` + + // Description of what your Lambda Function does. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Configuration block. Detailed below. + Environment *EnvironmentInitParameters `json:"environment,omitempty" tf:"environment,omitempty"` + + // The amount of Ephemeral storage(/tmp) to allocate for the Lambda Function in MB. This parameter is used to expand the total amount of Ephemeral storage available, beyond the default amount of 512MB. Detailed below. + EphemeralStorage *EphemeralStorageInitParameters `json:"ephemeralStorage,omitempty" tf:"ephemeral_storage,omitempty"` + + // Configuration block. Detailed below. + FileSystemConfig *FileSystemConfigInitParameters `json:"fileSystemConfig,omitempty" tf:"file_system_config,omitempty"` + + // Function entrypoint in your code. + Handler *string `json:"handler,omitempty" tf:"handler,omitempty"` + + // Configuration block. Detailed below. + ImageConfig *ImageConfigInitParameters `json:"imageConfig,omitempty" tf:"image_config,omitempty"` + + // ECR image URI containing the function's deployment package. Exactly one of filename, image_uri, or s3_bucket must be specified. + ImageURI *string `json:"imageUri,omitempty" tf:"image_uri,omitempty"` + + // Amazon Resource Name (ARN) of the AWS Key Management Service (KMS) key that is used to encrypt environment variables. If this configuration is not provided when environment variables are in use, AWS Lambda uses a default service key. To fix the perpetual difference, remove this configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` + + // List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function. See Lambda Layers + Layers []*string `json:"layers,omitempty" tf:"layers,omitempty"` + + // Configuration block used to specify advanced logging settings. Detailed below. + LoggingConfig *LoggingConfigInitParameters `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // Amount of memory in MB your Lambda Function can use at runtime. Defaults to 128. See Limits + MemorySize *float64 `json:"memorySize,omitempty" tf:"memory_size,omitempty"` + + // Lambda deployment package type. Valid values are Zip and Image. Defaults to Zip. + PackageType *string `json:"packageType,omitempty" tf:"package_type,omitempty"` + + // Whether to publish creation/change as new Lambda Function Version. Defaults to false. + Publish *bool `json:"publish,omitempty" tf:"publish,omitempty"` + + // AWS no longer supports this operation. This attribute now has no effect and will be removed in a future major version. Whether to replace the security groups on associated lambda network interfaces upon destruction. Removing these security groups from orphaned network interfaces can speed up security group deletion times by avoiding a dependency on AWS's internal cleanup operations. By default, the ENI security groups will be replaced with the default security group in the function's VPC. Set the replacement_security_group_ids attribute to use a custom list of security groups for replacement. + ReplaceSecurityGroupsOnDestroy *bool `json:"replaceSecurityGroupsOnDestroy,omitempty" tf:"replace_security_groups_on_destroy,omitempty"` + + // References to SecurityGroup in ec2 to populate replacementSecurityGroupIds. + // +kubebuilder:validation:Optional + ReplacementSecurityGroupIDRefs []v1.Reference `json:"replacementSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate replacementSecurityGroupIds. + // +kubebuilder:validation:Optional + ReplacementSecurityGroupIDSelector *v1.Selector `json:"replacementSecurityGroupIdSelector,omitempty" tf:"-"` + + // List of security group IDs to assign to orphaned Lambda function network interfaces upon destruction. replace_security_groups_on_destroy must be set to true to use this attribute. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=ReplacementSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=ReplacementSecurityGroupIDSelector + // +listType=set + ReplacementSecurityGroupIds []*string `json:"replacementSecurityGroupIds,omitempty" tf:"replacement_security_group_ids,omitempty"` + + // Amount of reserved concurrent executions for this lambda function. A value of 0 disables lambda from being triggered and -1 removes any concurrency limitations. Defaults to Unreserved Concurrency Limits -1. See Managing Concurrency + ReservedConcurrentExecutions *float64 `json:"reservedConcurrentExecutions,omitempty" tf:"reserved_concurrent_executions,omitempty"` + + // Amazon Resource Name (ARN) of the function's execution role. The role provides the function's identity and access to AWS services and resources. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // Reference to a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleRef *v1.Reference `json:"roleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleSelector *v1.Selector `json:"roleSelector,omitempty" tf:"-"` + + // Identifier of the function's runtime. See Runtimes for valid values. + Runtime *string `json:"runtime,omitempty" tf:"runtime,omitempty"` + + // S3 bucket location containing the function's deployment package. This bucket must reside in the same AWS region where you are creating the Lambda function. Exactly one of filename, image_uri, or s3_bucket must be specified. When s3_bucket is set, s3_key is required. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + S3Bucket *string `json:"s3Bucket,omitempty" tf:"s3_bucket,omitempty"` + + // Reference to a Bucket in s3 to populate s3Bucket. + // +kubebuilder:validation:Optional + S3BucketRef *v1.Reference `json:"s3BucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate s3Bucket. + // +kubebuilder:validation:Optional + S3BucketSelector *v1.Selector `json:"s3BucketSelector,omitempty" tf:"-"` + + // S3 key of an object containing the function's deployment package. When s3_bucket is set, s3_key is required. + S3Key *string `json:"s3Key,omitempty" tf:"s3_key,omitempty"` + + // Object version containing the function's deployment package. Conflicts with filename and image_uri. + S3ObjectVersion *string `json:"s3ObjectVersion,omitempty" tf:"s3_object_version,omitempty"` + + SkipDestroy *bool `json:"skipDestroy,omitempty" tf:"skip_destroy,omitempty"` + + // Snap start settings block. Detailed below. + SnapStart *SnapStartInitParameters `json:"snapStart,omitempty" tf:"snap_start,omitempty"` + + // Used to trigger updates. Must be set to a base64 encoded SHA256 hash of the package file specified with either filename or s3_key. If you have specified this field manually, it should be the actual (computed) hash of the underlying lambda function specified in the filename, image_uri, s3_bucket fields. + SourceCodeHash *string `json:"sourceCodeHash,omitempty" tf:"source_code_hash,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Amount of time your Lambda Function has to run in seconds. Defaults to 3. See Limits. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Configuration block. Detailed below. + TracingConfig *TracingConfigInitParameters `json:"tracingConfig,omitempty" tf:"tracing_config,omitempty"` + + // Configuration block. Detailed below. + VPCConfig *VPCConfigInitParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type FunctionObservation struct { + + // Instruction set architecture for your Lambda function. Valid values are ["x86_64"] and ["arm64"]. Default is ["x86_64"]. Removing this attribute, function's architecture stay the same. + Architectures []*string `json:"architectures,omitempty" tf:"architectures,omitempty"` + + // Amazon Resource Name (ARN) identifying your Lambda Function. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // To enable code signing for this function, specify the ARN of a code-signing configuration. A code-signing configuration includes a set of signing profiles, which define the trusted publishers for this function. + CodeSigningConfigArn *string `json:"codeSigningConfigArn,omitempty" tf:"code_signing_config_arn,omitempty"` + + // Configuration block. Detailed below. + DeadLetterConfig *DeadLetterConfigObservation `json:"deadLetterConfig,omitempty" tf:"dead_letter_config,omitempty"` + + // Description of what your Lambda Function does. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Configuration block. Detailed below. + Environment *EnvironmentObservation `json:"environment,omitempty" tf:"environment,omitempty"` + + // The amount of Ephemeral storage(/tmp) to allocate for the Lambda Function in MB. This parameter is used to expand the total amount of Ephemeral storage available, beyond the default amount of 512MB. Detailed below. + EphemeralStorage *EphemeralStorageObservation `json:"ephemeralStorage,omitempty" tf:"ephemeral_storage,omitempty"` + + // Configuration block. Detailed below. + FileSystemConfig *FileSystemConfigObservation `json:"fileSystemConfig,omitempty" tf:"file_system_config,omitempty"` + + // Function entrypoint in your code. + Handler *string `json:"handler,omitempty" tf:"handler,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration block. Detailed below. + ImageConfig *ImageConfigObservation `json:"imageConfig,omitempty" tf:"image_config,omitempty"` + + // ECR image URI containing the function's deployment package. Exactly one of filename, image_uri, or s3_bucket must be specified. + ImageURI *string `json:"imageUri,omitempty" tf:"image_uri,omitempty"` + + // ARN to be used for invoking Lambda Function from API Gateway - to be used in aws_api_gateway_integration's uri. + InvokeArn *string `json:"invokeArn,omitempty" tf:"invoke_arn,omitempty"` + + // Amazon Resource Name (ARN) of the AWS Key Management Service (KMS) key that is used to encrypt environment variables. If this configuration is not provided when environment variables are in use, AWS Lambda uses a default service key. To fix the perpetual difference, remove this configuration. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Date this resource was last modified. + LastModified *string `json:"lastModified,omitempty" tf:"last_modified,omitempty"` + + // List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function. See Lambda Layers + Layers []*string `json:"layers,omitempty" tf:"layers,omitempty"` + + // Configuration block used to specify advanced logging settings. Detailed below. + LoggingConfig *LoggingConfigObservation `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // Amount of memory in MB your Lambda Function can use at runtime. Defaults to 128. See Limits + MemorySize *float64 `json:"memorySize,omitempty" tf:"memory_size,omitempty"` + + // Lambda deployment package type. Valid values are Zip and Image. Defaults to Zip. + PackageType *string `json:"packageType,omitempty" tf:"package_type,omitempty"` + + // Whether to publish creation/change as new Lambda Function Version. Defaults to false. + Publish *bool `json:"publish,omitempty" tf:"publish,omitempty"` + + // ARN identifying your Lambda Function Version (if versioning is enabled via publish = true). + QualifiedArn *string `json:"qualifiedArn,omitempty" tf:"qualified_arn,omitempty"` + + // Qualified ARN (ARN with lambda version number) to be used for invoking Lambda Function from API Gateway - to be used in aws_api_gateway_integration's uri. + QualifiedInvokeArn *string `json:"qualifiedInvokeArn,omitempty" tf:"qualified_invoke_arn,omitempty"` + + // AWS no longer supports this operation. This attribute now has no effect and will be removed in a future major version. Whether to replace the security groups on associated lambda network interfaces upon destruction. Removing these security groups from orphaned network interfaces can speed up security group deletion times by avoiding a dependency on AWS's internal cleanup operations. By default, the ENI security groups will be replaced with the default security group in the function's VPC. Set the replacement_security_group_ids attribute to use a custom list of security groups for replacement. + ReplaceSecurityGroupsOnDestroy *bool `json:"replaceSecurityGroupsOnDestroy,omitempty" tf:"replace_security_groups_on_destroy,omitempty"` + + // List of security group IDs to assign to orphaned Lambda function network interfaces upon destruction. replace_security_groups_on_destroy must be set to true to use this attribute. + // +listType=set + ReplacementSecurityGroupIds []*string `json:"replacementSecurityGroupIds,omitempty" tf:"replacement_security_group_ids,omitempty"` + + // Amount of reserved concurrent executions for this lambda function. A value of 0 disables lambda from being triggered and -1 removes any concurrency limitations. Defaults to Unreserved Concurrency Limits -1. See Managing Concurrency + ReservedConcurrentExecutions *float64 `json:"reservedConcurrentExecutions,omitempty" tf:"reserved_concurrent_executions,omitempty"` + + // Amazon Resource Name (ARN) of the function's execution role. The role provides the function's identity and access to AWS services and resources. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // Identifier of the function's runtime. See Runtimes for valid values. + Runtime *string `json:"runtime,omitempty" tf:"runtime,omitempty"` + + // S3 bucket location containing the function's deployment package. This bucket must reside in the same AWS region where you are creating the Lambda function. Exactly one of filename, image_uri, or s3_bucket must be specified. When s3_bucket is set, s3_key is required. + S3Bucket *string `json:"s3Bucket,omitempty" tf:"s3_bucket,omitempty"` + + // S3 key of an object containing the function's deployment package. When s3_bucket is set, s3_key is required. + S3Key *string `json:"s3Key,omitempty" tf:"s3_key,omitempty"` + + // Object version containing the function's deployment package. Conflicts with filename and image_uri. + S3ObjectVersion *string `json:"s3ObjectVersion,omitempty" tf:"s3_object_version,omitempty"` + + // ARN of the signing job. + SigningJobArn *string `json:"signingJobArn,omitempty" tf:"signing_job_arn,omitempty"` + + // ARN of the signing profile version. + SigningProfileVersionArn *string `json:"signingProfileVersionArn,omitempty" tf:"signing_profile_version_arn,omitempty"` + + SkipDestroy *bool `json:"skipDestroy,omitempty" tf:"skip_destroy,omitempty"` + + // Snap start settings block. Detailed below. + SnapStart *SnapStartObservation `json:"snapStart,omitempty" tf:"snap_start,omitempty"` + + // Used to trigger updates. Must be set to a base64 encoded SHA256 hash of the package file specified with either filename or s3_key. If you have specified this field manually, it should be the actual (computed) hash of the underlying lambda function specified in the filename, image_uri, s3_bucket fields. + SourceCodeHash *string `json:"sourceCodeHash,omitempty" tf:"source_code_hash,omitempty"` + + // Size in bytes of the function .zip file. + SourceCodeSize *float64 `json:"sourceCodeSize,omitempty" tf:"source_code_size,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Amount of time your Lambda Function has to run in seconds. Defaults to 3. See Limits. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Configuration block. Detailed below. + TracingConfig *TracingConfigObservation `json:"tracingConfig,omitempty" tf:"tracing_config,omitempty"` + + // Configuration block. Detailed below. + VPCConfig *VPCConfigObservation `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` + + // Latest published version of your Lambda Function. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type FunctionParameters struct { + + // Instruction set architecture for your Lambda function. Valid values are ["x86_64"] and ["arm64"]. Default is ["x86_64"]. Removing this attribute, function's architecture stay the same. + // +kubebuilder:validation:Optional + Architectures []*string `json:"architectures,omitempty" tf:"architectures,omitempty"` + + // To enable code signing for this function, specify the ARN of a code-signing configuration. A code-signing configuration includes a set of signing profiles, which define the trusted publishers for this function. + // +kubebuilder:validation:Optional + CodeSigningConfigArn *string `json:"codeSigningConfigArn,omitempty" tf:"code_signing_config_arn,omitempty"` + + // Configuration block. Detailed below. + // +kubebuilder:validation:Optional + DeadLetterConfig *DeadLetterConfigParameters `json:"deadLetterConfig,omitempty" tf:"dead_letter_config,omitempty"` + + // Description of what your Lambda Function does. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Configuration block. Detailed below. + // +kubebuilder:validation:Optional + Environment *EnvironmentParameters `json:"environment,omitempty" tf:"environment,omitempty"` + + // The amount of Ephemeral storage(/tmp) to allocate for the Lambda Function in MB. This parameter is used to expand the total amount of Ephemeral storage available, beyond the default amount of 512MB. Detailed below. + // +kubebuilder:validation:Optional + EphemeralStorage *EphemeralStorageParameters `json:"ephemeralStorage,omitempty" tf:"ephemeral_storage,omitempty"` + + // Configuration block. Detailed below. + // +kubebuilder:validation:Optional + FileSystemConfig *FileSystemConfigParameters `json:"fileSystemConfig,omitempty" tf:"file_system_config,omitempty"` + + // Function entrypoint in your code. + // +kubebuilder:validation:Optional + Handler *string `json:"handler,omitempty" tf:"handler,omitempty"` + + // Configuration block. Detailed below. + // +kubebuilder:validation:Optional + ImageConfig *ImageConfigParameters `json:"imageConfig,omitempty" tf:"image_config,omitempty"` + + // ECR image URI containing the function's deployment package. Exactly one of filename, image_uri, or s3_bucket must be specified. + // +kubebuilder:validation:Optional + ImageURI *string `json:"imageUri,omitempty" tf:"image_uri,omitempty"` + + // Amazon Resource Name (ARN) of the AWS Key Management Service (KMS) key that is used to encrypt environment variables. If this configuration is not provided when environment variables are in use, AWS Lambda uses a default service key. To fix the perpetual difference, remove this configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` + + // List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function. See Lambda Layers + // +kubebuilder:validation:Optional + Layers []*string `json:"layers,omitempty" tf:"layers,omitempty"` + + // Configuration block used to specify advanced logging settings. Detailed below. + // +kubebuilder:validation:Optional + LoggingConfig *LoggingConfigParameters `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // Amount of memory in MB your Lambda Function can use at runtime. Defaults to 128. See Limits + // +kubebuilder:validation:Optional + MemorySize *float64 `json:"memorySize,omitempty" tf:"memory_size,omitempty"` + + // Lambda deployment package type. Valid values are Zip and Image. Defaults to Zip. + // +kubebuilder:validation:Optional + PackageType *string `json:"packageType,omitempty" tf:"package_type,omitempty"` + + // Whether to publish creation/change as new Lambda Function Version. Defaults to false. + // +kubebuilder:validation:Optional + Publish *bool `json:"publish,omitempty" tf:"publish,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // AWS no longer supports this operation. This attribute now has no effect and will be removed in a future major version. Whether to replace the security groups on associated lambda network interfaces upon destruction. Removing these security groups from orphaned network interfaces can speed up security group deletion times by avoiding a dependency on AWS's internal cleanup operations. By default, the ENI security groups will be replaced with the default security group in the function's VPC. Set the replacement_security_group_ids attribute to use a custom list of security groups for replacement. + // +kubebuilder:validation:Optional + ReplaceSecurityGroupsOnDestroy *bool `json:"replaceSecurityGroupsOnDestroy,omitempty" tf:"replace_security_groups_on_destroy,omitempty"` + + // References to SecurityGroup in ec2 to populate replacementSecurityGroupIds. + // +kubebuilder:validation:Optional + ReplacementSecurityGroupIDRefs []v1.Reference `json:"replacementSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate replacementSecurityGroupIds. + // +kubebuilder:validation:Optional + ReplacementSecurityGroupIDSelector *v1.Selector `json:"replacementSecurityGroupIdSelector,omitempty" tf:"-"` + + // List of security group IDs to assign to orphaned Lambda function network interfaces upon destruction. replace_security_groups_on_destroy must be set to true to use this attribute. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=ReplacementSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=ReplacementSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + ReplacementSecurityGroupIds []*string `json:"replacementSecurityGroupIds,omitempty" tf:"replacement_security_group_ids,omitempty"` + + // Amount of reserved concurrent executions for this lambda function. A value of 0 disables lambda from being triggered and -1 removes any concurrency limitations. Defaults to Unreserved Concurrency Limits -1. See Managing Concurrency + // +kubebuilder:validation:Optional + ReservedConcurrentExecutions *float64 `json:"reservedConcurrentExecutions,omitempty" tf:"reserved_concurrent_executions,omitempty"` + + // Amazon Resource Name (ARN) of the function's execution role. The role provides the function's identity and access to AWS services and resources. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // Reference to a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleRef *v1.Reference `json:"roleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleSelector *v1.Selector `json:"roleSelector,omitempty" tf:"-"` + + // Identifier of the function's runtime. See Runtimes for valid values. + // +kubebuilder:validation:Optional + Runtime *string `json:"runtime,omitempty" tf:"runtime,omitempty"` + + // S3 bucket location containing the function's deployment package. This bucket must reside in the same AWS region where you are creating the Lambda function. Exactly one of filename, image_uri, or s3_bucket must be specified. When s3_bucket is set, s3_key is required. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +kubebuilder:validation:Optional + S3Bucket *string `json:"s3Bucket,omitempty" tf:"s3_bucket,omitempty"` + + // Reference to a Bucket in s3 to populate s3Bucket. + // +kubebuilder:validation:Optional + S3BucketRef *v1.Reference `json:"s3BucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate s3Bucket. + // +kubebuilder:validation:Optional + S3BucketSelector *v1.Selector `json:"s3BucketSelector,omitempty" tf:"-"` + + // S3 key of an object containing the function's deployment package. When s3_bucket is set, s3_key is required. + // +kubebuilder:validation:Optional + S3Key *string `json:"s3Key,omitempty" tf:"s3_key,omitempty"` + + // Object version containing the function's deployment package. Conflicts with filename and image_uri. + // +kubebuilder:validation:Optional + S3ObjectVersion *string `json:"s3ObjectVersion,omitempty" tf:"s3_object_version,omitempty"` + + // +kubebuilder:validation:Optional + SkipDestroy *bool `json:"skipDestroy,omitempty" tf:"skip_destroy,omitempty"` + + // Snap start settings block. Detailed below. + // +kubebuilder:validation:Optional + SnapStart *SnapStartParameters `json:"snapStart,omitempty" tf:"snap_start,omitempty"` + + // Used to trigger updates. Must be set to a base64 encoded SHA256 hash of the package file specified with either filename or s3_key. If you have specified this field manually, it should be the actual (computed) hash of the underlying lambda function specified in the filename, image_uri, s3_bucket fields. + // +kubebuilder:validation:Optional + SourceCodeHash *string `json:"sourceCodeHash,omitempty" tf:"source_code_hash,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Amount of time your Lambda Function has to run in seconds. Defaults to 3. See Limits. + // +kubebuilder:validation:Optional + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Configuration block. Detailed below. + // +kubebuilder:validation:Optional + TracingConfig *TracingConfigParameters `json:"tracingConfig,omitempty" tf:"tracing_config,omitempty"` + + // Configuration block. Detailed below. + // +kubebuilder:validation:Optional + VPCConfig *VPCConfigParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type ImageConfigInitParameters struct { + + // Parameters that you want to pass in with entry_point. + Command []*string `json:"command,omitempty" tf:"command,omitempty"` + + // Entry point to your application, which is typically the location of the runtime executable. + EntryPoint []*string `json:"entryPoint,omitempty" tf:"entry_point,omitempty"` + + // Working directory. + WorkingDirectory *string `json:"workingDirectory,omitempty" tf:"working_directory,omitempty"` +} + +type ImageConfigObservation struct { + + // Parameters that you want to pass in with entry_point. + Command []*string `json:"command,omitempty" tf:"command,omitempty"` + + // Entry point to your application, which is typically the location of the runtime executable. + EntryPoint []*string `json:"entryPoint,omitempty" tf:"entry_point,omitempty"` + + // Working directory. + WorkingDirectory *string `json:"workingDirectory,omitempty" tf:"working_directory,omitempty"` +} + +type ImageConfigParameters struct { + + // Parameters that you want to pass in with entry_point. + // +kubebuilder:validation:Optional + Command []*string `json:"command,omitempty" tf:"command,omitempty"` + + // Entry point to your application, which is typically the location of the runtime executable. + // +kubebuilder:validation:Optional + EntryPoint []*string `json:"entryPoint,omitempty" tf:"entry_point,omitempty"` + + // Working directory. + // +kubebuilder:validation:Optional + WorkingDirectory *string `json:"workingDirectory,omitempty" tf:"working_directory,omitempty"` +} + +type LoggingConfigInitParameters struct { + + // for JSON structured logs, choose the detail level of the logs your application sends to CloudWatch when using supported logging libraries. + ApplicationLogLevel *string `json:"applicationLogLevel,omitempty" tf:"application_log_level,omitempty"` + + // select between Text and structured JSON format for your function's logs. + LogFormat *string `json:"logFormat,omitempty" tf:"log_format,omitempty"` + + // the CloudWatch log group your function sends logs to. + LogGroup *string `json:"logGroup,omitempty" tf:"log_group,omitempty"` + + // for JSON structured logs, choose the detail level of the Lambda platform event logs sent to CloudWatch, such as ERROR, DEBUG, or INFO. + SystemLogLevel *string `json:"systemLogLevel,omitempty" tf:"system_log_level,omitempty"` +} + +type LoggingConfigObservation struct { + + // for JSON structured logs, choose the detail level of the logs your application sends to CloudWatch when using supported logging libraries. + ApplicationLogLevel *string `json:"applicationLogLevel,omitempty" tf:"application_log_level,omitempty"` + + // select between Text and structured JSON format for your function's logs. + LogFormat *string `json:"logFormat,omitempty" tf:"log_format,omitempty"` + + // the CloudWatch log group your function sends logs to. + LogGroup *string `json:"logGroup,omitempty" tf:"log_group,omitempty"` + + // for JSON structured logs, choose the detail level of the Lambda platform event logs sent to CloudWatch, such as ERROR, DEBUG, or INFO. + SystemLogLevel *string `json:"systemLogLevel,omitempty" tf:"system_log_level,omitempty"` +} + +type LoggingConfigParameters struct { + + // for JSON structured logs, choose the detail level of the logs your application sends to CloudWatch when using supported logging libraries. + // +kubebuilder:validation:Optional + ApplicationLogLevel *string `json:"applicationLogLevel,omitempty" tf:"application_log_level,omitempty"` + + // select between Text and structured JSON format for your function's logs. + // +kubebuilder:validation:Optional + LogFormat *string `json:"logFormat" tf:"log_format,omitempty"` + + // the CloudWatch log group your function sends logs to. + // +kubebuilder:validation:Optional + LogGroup *string `json:"logGroup,omitempty" tf:"log_group,omitempty"` + + // for JSON structured logs, choose the detail level of the Lambda platform event logs sent to CloudWatch, such as ERROR, DEBUG, or INFO. + // +kubebuilder:validation:Optional + SystemLogLevel *string `json:"systemLogLevel,omitempty" tf:"system_log_level,omitempty"` +} + +type SnapStartInitParameters struct { + + // Conditions where snap start is enabled. Valid values are PublishedVersions. + ApplyOn *string `json:"applyOn,omitempty" tf:"apply_on,omitempty"` +} + +type SnapStartObservation struct { + + // Conditions where snap start is enabled. Valid values are PublishedVersions. + ApplyOn *string `json:"applyOn,omitempty" tf:"apply_on,omitempty"` + + // Optimization status of the snap start configuration. Valid values are On and Off. + OptimizationStatus *string `json:"optimizationStatus,omitempty" tf:"optimization_status,omitempty"` +} + +type SnapStartParameters struct { + + // Conditions where snap start is enabled. Valid values are PublishedVersions. + // +kubebuilder:validation:Optional + ApplyOn *string `json:"applyOn" tf:"apply_on,omitempty"` +} + +type TracingConfigInitParameters struct { + + // Whether to sample and trace a subset of incoming requests with AWS X-Ray. Valid values are PassThrough and Active. If PassThrough, Lambda will only trace the request from an upstream service if it contains a tracing header with "sampled=1". If Active, Lambda will respect any tracing header it receives from an upstream service. If no tracing header is received, Lambda will call X-Ray for a tracing decision. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type TracingConfigObservation struct { + + // Whether to sample and trace a subset of incoming requests with AWS X-Ray. Valid values are PassThrough and Active. If PassThrough, Lambda will only trace the request from an upstream service if it contains a tracing header with "sampled=1". If Active, Lambda will respect any tracing header it receives from an upstream service. If no tracing header is received, Lambda will call X-Ray for a tracing decision. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type TracingConfigParameters struct { + + // Whether to sample and trace a subset of incoming requests with AWS X-Ray. Valid values are PassThrough and Active. If PassThrough, Lambda will only trace the request from an upstream service if it contains a tracing header with "sampled=1". If Active, Lambda will respect any tracing header it receives from an upstream service. If no tracing header is received, Lambda will call X-Ray for a tracing decision. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` +} + +type VPCConfigInitParameters struct { + + // Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets. Default is false. + IPv6AllowedForDualStack *bool `json:"ipv6AllowedForDualStack,omitempty" tf:"ipv6_allowed_for_dual_stack,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // List of security group IDs associated with the Lambda function. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // List of subnet IDs associated with the Lambda function. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type VPCConfigObservation struct { + + // Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets. Default is false. + IPv6AllowedForDualStack *bool `json:"ipv6AllowedForDualStack,omitempty" tf:"ipv6_allowed_for_dual_stack,omitempty"` + + // List of security group IDs associated with the Lambda function. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // List of subnet IDs associated with the Lambda function. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // ID of the VPC. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type VPCConfigParameters struct { + + // Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets. Default is false. + // +kubebuilder:validation:Optional + IPv6AllowedForDualStack *bool `json:"ipv6AllowedForDualStack,omitempty" tf:"ipv6_allowed_for_dual_stack,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + + // List of security group IDs associated with the Lambda function. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // List of subnet IDs associated with the Lambda function. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +// FunctionSpec defines the desired state of Function +type FunctionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FunctionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FunctionInitParameters `json:"initProvider,omitempty"` +} + +// FunctionStatus defines the observed state of Function. +type FunctionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FunctionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Function is the Schema for the Functions API. Provides a Lambda Function resource. Lambda allows you to trigger execution of code in response to events in AWS, enabling serverless backend solutions. The Lambda Function itself includes source code and runtime configuration. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Function struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec FunctionSpec `json:"spec"` + Status FunctionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FunctionList contains a list of Functions +type FunctionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Function `json:"items"` +} + +// Repository type metadata. +var ( + Function_Kind = "Function" + Function_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Function_Kind}.String() + Function_KindAPIVersion = Function_Kind + "." + CRDGroupVersion.String() + Function_GroupVersionKind = CRDGroupVersion.WithKind(Function_Kind) +) + +func init() { + SchemeBuilder.Register(&Function{}, &FunctionList{}) +} diff --git a/apis/lambda/v1beta2/zz_functioneventinvokeconfig_terraformed.go b/apis/lambda/v1beta2/zz_functioneventinvokeconfig_terraformed.go new file mode 100755 index 0000000000..988e541d5c --- /dev/null +++ b/apis/lambda/v1beta2/zz_functioneventinvokeconfig_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FunctionEventInvokeConfig +func (mg *FunctionEventInvokeConfig) GetTerraformResourceType() string { + return "aws_lambda_function_event_invoke_config" +} + +// GetConnectionDetailsMapping for this FunctionEventInvokeConfig +func (tr *FunctionEventInvokeConfig) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FunctionEventInvokeConfig +func (tr *FunctionEventInvokeConfig) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FunctionEventInvokeConfig +func (tr *FunctionEventInvokeConfig) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FunctionEventInvokeConfig +func (tr *FunctionEventInvokeConfig) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FunctionEventInvokeConfig +func (tr *FunctionEventInvokeConfig) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FunctionEventInvokeConfig +func (tr *FunctionEventInvokeConfig) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FunctionEventInvokeConfig +func (tr *FunctionEventInvokeConfig) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FunctionEventInvokeConfig +func (tr *FunctionEventInvokeConfig) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FunctionEventInvokeConfig using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FunctionEventInvokeConfig) LateInitialize(attrs []byte) (bool, error) { + params := &FunctionEventInvokeConfigParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FunctionEventInvokeConfig) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/lambda/v1beta2/zz_functioneventinvokeconfig_types.go b/apis/lambda/v1beta2/zz_functioneventinvokeconfig_types.go new file mode 100755 index 0000000000..c1e3582ae8 --- /dev/null +++ b/apis/lambda/v1beta2/zz_functioneventinvokeconfig_types.go @@ -0,0 +1,249 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DestinationConfigOnFailureInitParameters struct { + + // Amazon Resource Name (ARN) of the destination resource. See the Lambda Developer Guide for acceptable resource types and associated IAM permissions. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sqs/v1beta1.Queue + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // Reference to a Queue in sqs to populate destination. + // +kubebuilder:validation:Optional + DestinationRef *v1.Reference `json:"destinationRef,omitempty" tf:"-"` + + // Selector for a Queue in sqs to populate destination. + // +kubebuilder:validation:Optional + DestinationSelector *v1.Selector `json:"destinationSelector,omitempty" tf:"-"` +} + +type DestinationConfigOnFailureObservation struct { + + // Amazon Resource Name (ARN) of the destination resource. See the Lambda Developer Guide for acceptable resource types and associated IAM permissions. + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` +} + +type DestinationConfigOnFailureParameters struct { + + // Amazon Resource Name (ARN) of the destination resource. See the Lambda Developer Guide for acceptable resource types and associated IAM permissions. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sqs/v1beta1.Queue + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // Reference to a Queue in sqs to populate destination. + // +kubebuilder:validation:Optional + DestinationRef *v1.Reference `json:"destinationRef,omitempty" tf:"-"` + + // Selector for a Queue in sqs to populate destination. + // +kubebuilder:validation:Optional + DestinationSelector *v1.Selector `json:"destinationSelector,omitempty" tf:"-"` +} + +type FunctionEventInvokeConfigDestinationConfigInitParameters struct { + + // Configuration block with destination configuration for failed asynchronous invocations. See below for details. + OnFailure *DestinationConfigOnFailureInitParameters `json:"onFailure,omitempty" tf:"on_failure,omitempty"` + + // Configuration block with destination configuration for successful asynchronous invocations. See below for details. + OnSuccess *OnSuccessInitParameters `json:"onSuccess,omitempty" tf:"on_success,omitempty"` +} + +type FunctionEventInvokeConfigDestinationConfigObservation struct { + + // Configuration block with destination configuration for failed asynchronous invocations. See below for details. + OnFailure *DestinationConfigOnFailureObservation `json:"onFailure,omitempty" tf:"on_failure,omitempty"` + + // Configuration block with destination configuration for successful asynchronous invocations. See below for details. + OnSuccess *OnSuccessObservation `json:"onSuccess,omitempty" tf:"on_success,omitempty"` +} + +type FunctionEventInvokeConfigDestinationConfigParameters struct { + + // Configuration block with destination configuration for failed asynchronous invocations. See below for details. + // +kubebuilder:validation:Optional + OnFailure *DestinationConfigOnFailureParameters `json:"onFailure,omitempty" tf:"on_failure,omitempty"` + + // Configuration block with destination configuration for successful asynchronous invocations. See below for details. + // +kubebuilder:validation:Optional + OnSuccess *OnSuccessParameters `json:"onSuccess,omitempty" tf:"on_success,omitempty"` +} + +type FunctionEventInvokeConfigInitParameters struct { + + // Configuration block with destination configuration. See below for details. + DestinationConfig *FunctionEventInvokeConfigDestinationConfigInitParameters `json:"destinationConfig,omitempty" tf:"destination_config,omitempty"` + + // Name or Amazon Resource Name (ARN) of the Lambda Function, omitting any version or alias qualifier. + FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` + + // Maximum age of a request that Lambda sends to a function for processing in seconds. Valid values between 60 and 21600. + MaximumEventAgeInSeconds *float64 `json:"maximumEventAgeInSeconds,omitempty" tf:"maximum_event_age_in_seconds,omitempty"` + + // Maximum number of times to retry when the function returns an error. Valid values between 0 and 2. Defaults to 2. + MaximumRetryAttempts *float64 `json:"maximumRetryAttempts,omitempty" tf:"maximum_retry_attempts,omitempty"` + + // Lambda Function published version, $LATEST, or Lambda Alias name. + Qualifier *string `json:"qualifier,omitempty" tf:"qualifier,omitempty"` +} + +type FunctionEventInvokeConfigObservation struct { + + // Configuration block with destination configuration. See below for details. + DestinationConfig *FunctionEventInvokeConfigDestinationConfigObservation `json:"destinationConfig,omitempty" tf:"destination_config,omitempty"` + + // Name or Amazon Resource Name (ARN) of the Lambda Function, omitting any version or alias qualifier. + FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` + + // Fully qualified Lambda Function name or Amazon Resource Name (ARN) + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Maximum age of a request that Lambda sends to a function for processing in seconds. Valid values between 60 and 21600. + MaximumEventAgeInSeconds *float64 `json:"maximumEventAgeInSeconds,omitempty" tf:"maximum_event_age_in_seconds,omitempty"` + + // Maximum number of times to retry when the function returns an error. Valid values between 0 and 2. Defaults to 2. + MaximumRetryAttempts *float64 `json:"maximumRetryAttempts,omitempty" tf:"maximum_retry_attempts,omitempty"` + + // Lambda Function published version, $LATEST, or Lambda Alias name. + Qualifier *string `json:"qualifier,omitempty" tf:"qualifier,omitempty"` +} + +type FunctionEventInvokeConfigParameters struct { + + // Configuration block with destination configuration. See below for details. + // +kubebuilder:validation:Optional + DestinationConfig *FunctionEventInvokeConfigDestinationConfigParameters `json:"destinationConfig,omitempty" tf:"destination_config,omitempty"` + + // Name or Amazon Resource Name (ARN) of the Lambda Function, omitting any version or alias qualifier. + // +kubebuilder:validation:Optional + FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` + + // Maximum age of a request that Lambda sends to a function for processing in seconds. Valid values between 60 and 21600. + // +kubebuilder:validation:Optional + MaximumEventAgeInSeconds *float64 `json:"maximumEventAgeInSeconds,omitempty" tf:"maximum_event_age_in_seconds,omitempty"` + + // Maximum number of times to retry when the function returns an error. Valid values between 0 and 2. Defaults to 2. + // +kubebuilder:validation:Optional + MaximumRetryAttempts *float64 `json:"maximumRetryAttempts,omitempty" tf:"maximum_retry_attempts,omitempty"` + + // Lambda Function published version, $LATEST, or Lambda Alias name. + // +kubebuilder:validation:Optional + Qualifier *string `json:"qualifier,omitempty" tf:"qualifier,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type OnSuccessInitParameters struct { + + // Amazon Resource Name (ARN) of the destination resource. See the Lambda Developer Guide for acceptable resource types and associated IAM permissions. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // Reference to a Topic in sns to populate destination. + // +kubebuilder:validation:Optional + DestinationRef *v1.Reference `json:"destinationRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate destination. + // +kubebuilder:validation:Optional + DestinationSelector *v1.Selector `json:"destinationSelector,omitempty" tf:"-"` +} + +type OnSuccessObservation struct { + + // Amazon Resource Name (ARN) of the destination resource. See the Lambda Developer Guide for acceptable resource types and associated IAM permissions. + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` +} + +type OnSuccessParameters struct { + + // Amazon Resource Name (ARN) of the destination resource. See the Lambda Developer Guide for acceptable resource types and associated IAM permissions. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // Reference to a Topic in sns to populate destination. + // +kubebuilder:validation:Optional + DestinationRef *v1.Reference `json:"destinationRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate destination. + // +kubebuilder:validation:Optional + DestinationSelector *v1.Selector `json:"destinationSelector,omitempty" tf:"-"` +} + +// FunctionEventInvokeConfigSpec defines the desired state of FunctionEventInvokeConfig +type FunctionEventInvokeConfigSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FunctionEventInvokeConfigParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FunctionEventInvokeConfigInitParameters `json:"initProvider,omitempty"` +} + +// FunctionEventInvokeConfigStatus defines the observed state of FunctionEventInvokeConfig. +type FunctionEventInvokeConfigStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FunctionEventInvokeConfigObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FunctionEventInvokeConfig is the Schema for the FunctionEventInvokeConfigs API. Manages an asynchronous invocation configuration for a Lambda Function or Alias. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type FunctionEventInvokeConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.functionName) || (has(self.initProvider) && has(self.initProvider.functionName))",message="spec.forProvider.functionName is a required parameter" + Spec FunctionEventInvokeConfigSpec `json:"spec"` + Status FunctionEventInvokeConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FunctionEventInvokeConfigList contains a list of FunctionEventInvokeConfigs +type FunctionEventInvokeConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FunctionEventInvokeConfig `json:"items"` +} + +// Repository type metadata. +var ( + FunctionEventInvokeConfig_Kind = "FunctionEventInvokeConfig" + FunctionEventInvokeConfig_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FunctionEventInvokeConfig_Kind}.String() + FunctionEventInvokeConfig_KindAPIVersion = FunctionEventInvokeConfig_Kind + "." + CRDGroupVersion.String() + FunctionEventInvokeConfig_GroupVersionKind = CRDGroupVersion.WithKind(FunctionEventInvokeConfig_Kind) +) + +func init() { + SchemeBuilder.Register(&FunctionEventInvokeConfig{}, &FunctionEventInvokeConfigList{}) +} diff --git a/apis/lambda/v1beta2/zz_functionurl_terraformed.go b/apis/lambda/v1beta2/zz_functionurl_terraformed.go new file mode 100755 index 0000000000..510bdb8034 --- /dev/null +++ b/apis/lambda/v1beta2/zz_functionurl_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FunctionURL +func (mg *FunctionURL) GetTerraformResourceType() string { + return "aws_lambda_function_url" +} + +// GetConnectionDetailsMapping for this FunctionURL +func (tr *FunctionURL) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FunctionURL +func (tr *FunctionURL) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FunctionURL +func (tr *FunctionURL) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FunctionURL +func (tr *FunctionURL) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FunctionURL +func (tr *FunctionURL) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FunctionURL +func (tr *FunctionURL) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FunctionURL +func (tr *FunctionURL) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FunctionURL +func (tr *FunctionURL) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FunctionURL using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FunctionURL) LateInitialize(attrs []byte) (bool, error) { + params := &FunctionURLParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FunctionURL) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/lambda/v1beta2/zz_functionurl_types.go b/apis/lambda/v1beta2/zz_functionurl_types.go new file mode 100755 index 0000000000..78692def88 --- /dev/null +++ b/apis/lambda/v1beta2/zz_functionurl_types.go @@ -0,0 +1,249 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CorsInitParameters struct { + + // Whether to allow cookies or other credentials in requests to the function URL. The default is false. + AllowCredentials *bool `json:"allowCredentials,omitempty" tf:"allow_credentials,omitempty"` + + // The HTTP headers that origins can include in requests to the function URL. For example: ["date", "keep-alive", "x-custom-header"]. + // +listType=set + AllowHeaders []*string `json:"allowHeaders,omitempty" tf:"allow_headers,omitempty"` + + // The HTTP methods that are allowed when calling the function URL. For example: ["GET", "POST", "DELETE"], or the wildcard character (["*"]). + // +listType=set + AllowMethods []*string `json:"allowMethods,omitempty" tf:"allow_methods,omitempty"` + + // The origins that can access the function URL. You can list any number of specific origins (or the wildcard character ("*")), separated by a comma. For example: ["https://www.example.com", "http://localhost:60905"]. + // +listType=set + AllowOrigins []*string `json:"allowOrigins,omitempty" tf:"allow_origins,omitempty"` + + // The HTTP headers in your function response that you want to expose to origins that call the function URL. + // +listType=set + ExposeHeaders []*string `json:"exposeHeaders,omitempty" tf:"expose_headers,omitempty"` + + // The maximum amount of time, in seconds, that web browsers can cache results of a preflight request. By default, this is set to 0, which means that the browser doesn't cache results. The maximum value is 86400. + MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` +} + +type CorsObservation struct { + + // Whether to allow cookies or other credentials in requests to the function URL. The default is false. + AllowCredentials *bool `json:"allowCredentials,omitempty" tf:"allow_credentials,omitempty"` + + // The HTTP headers that origins can include in requests to the function URL. For example: ["date", "keep-alive", "x-custom-header"]. + // +listType=set + AllowHeaders []*string `json:"allowHeaders,omitempty" tf:"allow_headers,omitempty"` + + // The HTTP methods that are allowed when calling the function URL. For example: ["GET", "POST", "DELETE"], or the wildcard character (["*"]). + // +listType=set + AllowMethods []*string `json:"allowMethods,omitempty" tf:"allow_methods,omitempty"` + + // The origins that can access the function URL. You can list any number of specific origins (or the wildcard character ("*")), separated by a comma. For example: ["https://www.example.com", "http://localhost:60905"]. + // +listType=set + AllowOrigins []*string `json:"allowOrigins,omitempty" tf:"allow_origins,omitempty"` + + // The HTTP headers in your function response that you want to expose to origins that call the function URL. + // +listType=set + ExposeHeaders []*string `json:"exposeHeaders,omitempty" tf:"expose_headers,omitempty"` + + // The maximum amount of time, in seconds, that web browsers can cache results of a preflight request. By default, this is set to 0, which means that the browser doesn't cache results. The maximum value is 86400. + MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` +} + +type CorsParameters struct { + + // Whether to allow cookies or other credentials in requests to the function URL. The default is false. + // +kubebuilder:validation:Optional + AllowCredentials *bool `json:"allowCredentials,omitempty" tf:"allow_credentials,omitempty"` + + // The HTTP headers that origins can include in requests to the function URL. For example: ["date", "keep-alive", "x-custom-header"]. + // +kubebuilder:validation:Optional + // +listType=set + AllowHeaders []*string `json:"allowHeaders,omitempty" tf:"allow_headers,omitempty"` + + // The HTTP methods that are allowed when calling the function URL. For example: ["GET", "POST", "DELETE"], or the wildcard character (["*"]). + // +kubebuilder:validation:Optional + // +listType=set + AllowMethods []*string `json:"allowMethods,omitempty" tf:"allow_methods,omitempty"` + + // The origins that can access the function URL. You can list any number of specific origins (or the wildcard character ("*")), separated by a comma. For example: ["https://www.example.com", "http://localhost:60905"]. + // +kubebuilder:validation:Optional + // +listType=set + AllowOrigins []*string `json:"allowOrigins,omitempty" tf:"allow_origins,omitempty"` + + // The HTTP headers in your function response that you want to expose to origins that call the function URL. + // +kubebuilder:validation:Optional + // +listType=set + ExposeHeaders []*string `json:"exposeHeaders,omitempty" tf:"expose_headers,omitempty"` + + // The maximum amount of time, in seconds, that web browsers can cache results of a preflight request. By default, this is set to 0, which means that the browser doesn't cache results. The maximum value is 86400. + // +kubebuilder:validation:Optional + MaxAge *float64 `json:"maxAge,omitempty" tf:"max_age,omitempty"` +} + +type FunctionURLInitParameters struct { + + // The type of authentication that the function URL uses. Set to "AWS_IAM" to restrict access to authenticated IAM users only. Set to "NONE" to bypass IAM authentication and create a public endpoint. See the AWS documentation for more details. + AuthorizationType *string `json:"authorizationType,omitempty" tf:"authorization_type,omitempty"` + + // The cross-origin resource sharing (CORS) settings for the function URL. Documented below. + Cors *CorsInitParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // The name (or ARN) of the Lambda function. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` + + // Reference to a Function in lambda to populate functionName. + // +kubebuilder:validation:Optional + FunctionNameRef *v1.Reference `json:"functionNameRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate functionName. + // +kubebuilder:validation:Optional + FunctionNameSelector *v1.Selector `json:"functionNameSelector,omitempty" tf:"-"` + + // Determines how the Lambda function responds to an invocation. Valid values are BUFFERED (default) and RESPONSE_STREAM. See more in Configuring a Lambda function to stream responses. + InvokeMode *string `json:"invokeMode,omitempty" tf:"invoke_mode,omitempty"` + + // The alias name or "$LATEST". + Qualifier *string `json:"qualifier,omitempty" tf:"qualifier,omitempty"` +} + +type FunctionURLObservation struct { + + // The type of authentication that the function URL uses. Set to "AWS_IAM" to restrict access to authenticated IAM users only. Set to "NONE" to bypass IAM authentication and create a public endpoint. See the AWS documentation for more details. + AuthorizationType *string `json:"authorizationType,omitempty" tf:"authorization_type,omitempty"` + + // The cross-origin resource sharing (CORS) settings for the function URL. Documented below. + Cors *CorsObservation `json:"cors,omitempty" tf:"cors,omitempty"` + + // The Amazon Resource Name (ARN) of the function. + FunctionArn *string `json:"functionArn,omitempty" tf:"function_arn,omitempty"` + + // The name (or ARN) of the Lambda function. + FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` + + // The HTTP URL endpoint for the function in the format https://.lambda-url..on.aws/. + FunctionURL *string `json:"functionUrl,omitempty" tf:"function_url,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Determines how the Lambda function responds to an invocation. Valid values are BUFFERED (default) and RESPONSE_STREAM. See more in Configuring a Lambda function to stream responses. + InvokeMode *string `json:"invokeMode,omitempty" tf:"invoke_mode,omitempty"` + + // The alias name or "$LATEST". + Qualifier *string `json:"qualifier,omitempty" tf:"qualifier,omitempty"` + + // A generated ID for the endpoint. + URLID *string `json:"urlId,omitempty" tf:"url_id,omitempty"` +} + +type FunctionURLParameters struct { + + // The type of authentication that the function URL uses. Set to "AWS_IAM" to restrict access to authenticated IAM users only. Set to "NONE" to bypass IAM authentication and create a public endpoint. See the AWS documentation for more details. + // +kubebuilder:validation:Optional + AuthorizationType *string `json:"authorizationType,omitempty" tf:"authorization_type,omitempty"` + + // The cross-origin resource sharing (CORS) settings for the function URL. Documented below. + // +kubebuilder:validation:Optional + Cors *CorsParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // The name (or ARN) of the Lambda function. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +kubebuilder:validation:Optional + FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` + + // Reference to a Function in lambda to populate functionName. + // +kubebuilder:validation:Optional + FunctionNameRef *v1.Reference `json:"functionNameRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate functionName. + // +kubebuilder:validation:Optional + FunctionNameSelector *v1.Selector `json:"functionNameSelector,omitempty" tf:"-"` + + // Determines how the Lambda function responds to an invocation. Valid values are BUFFERED (default) and RESPONSE_STREAM. See more in Configuring a Lambda function to stream responses. + // +kubebuilder:validation:Optional + InvokeMode *string `json:"invokeMode,omitempty" tf:"invoke_mode,omitempty"` + + // The alias name or "$LATEST". + // +kubebuilder:validation:Optional + Qualifier *string `json:"qualifier,omitempty" tf:"qualifier,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +// FunctionURLSpec defines the desired state of FunctionURL +type FunctionURLSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FunctionURLParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FunctionURLInitParameters `json:"initProvider,omitempty"` +} + +// FunctionURLStatus defines the observed state of FunctionURL. +type FunctionURLStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FunctionURLObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FunctionURL is the Schema for the FunctionURLs API. Provides a Lambda function URL resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type FunctionURL struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.authorizationType) || (has(self.initProvider) && has(self.initProvider.authorizationType))",message="spec.forProvider.authorizationType is a required parameter" + Spec FunctionURLSpec `json:"spec"` + Status FunctionURLStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FunctionURLList contains a list of FunctionURLs +type FunctionURLList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FunctionURL `json:"items"` +} + +// Repository type metadata. +var ( + FunctionURL_Kind = "FunctionURL" + FunctionURL_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FunctionURL_Kind}.String() + FunctionURL_KindAPIVersion = FunctionURL_Kind + "." + CRDGroupVersion.String() + FunctionURL_GroupVersionKind = CRDGroupVersion.WithKind(FunctionURL_Kind) +) + +func init() { + SchemeBuilder.Register(&FunctionURL{}, &FunctionURLList{}) +} diff --git a/apis/lambda/v1beta2/zz_generated.conversion_hubs.go b/apis/lambda/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..f988ae9fd1 --- /dev/null +++ b/apis/lambda/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,25 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Alias) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *CodeSigningConfig) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *EventSourceMapping) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Function) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FunctionEventInvokeConfig) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FunctionURL) Hub() {} diff --git a/apis/lambda/v1beta2/zz_generated.deepcopy.go b/apis/lambda/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..411d7064d0 --- /dev/null +++ b/apis/lambda/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,4604 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Alias) DeepCopyInto(out *Alias) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Alias. +func (in *Alias) DeepCopy() *Alias { + if in == nil { + return nil + } + out := new(Alias) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Alias) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AliasInitParameters) DeepCopyInto(out *AliasInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FunctionVersion != nil { + in, out := &in.FunctionVersion, &out.FunctionVersion + *out = new(string) + **out = **in + } + if in.RoutingConfig != nil { + in, out := &in.RoutingConfig, &out.RoutingConfig + *out = new(RoutingConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AliasInitParameters. +func (in *AliasInitParameters) DeepCopy() *AliasInitParameters { + if in == nil { + return nil + } + out := new(AliasInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AliasList) DeepCopyInto(out *AliasList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Alias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AliasList. +func (in *AliasList) DeepCopy() *AliasList { + if in == nil { + return nil + } + out := new(AliasList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AliasList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AliasObservation) DeepCopyInto(out *AliasObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FunctionName != nil { + in, out := &in.FunctionName, &out.FunctionName + *out = new(string) + **out = **in + } + if in.FunctionVersion != nil { + in, out := &in.FunctionVersion, &out.FunctionVersion + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InvokeArn != nil { + in, out := &in.InvokeArn, &out.InvokeArn + *out = new(string) + **out = **in + } + if in.RoutingConfig != nil { + in, out := &in.RoutingConfig, &out.RoutingConfig + *out = new(RoutingConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AliasObservation. +func (in *AliasObservation) DeepCopy() *AliasObservation { + if in == nil { + return nil + } + out := new(AliasObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AliasParameters) DeepCopyInto(out *AliasParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FunctionName != nil { + in, out := &in.FunctionName, &out.FunctionName + *out = new(string) + **out = **in + } + if in.FunctionNameRef != nil { + in, out := &in.FunctionNameRef, &out.FunctionNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FunctionNameSelector != nil { + in, out := &in.FunctionNameSelector, &out.FunctionNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FunctionVersion != nil { + in, out := &in.FunctionVersion, &out.FunctionVersion + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoutingConfig != nil { + in, out := &in.RoutingConfig, &out.RoutingConfig + *out = new(RoutingConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AliasParameters. +func (in *AliasParameters) DeepCopy() *AliasParameters { + if in == nil { + return nil + } + out := new(AliasParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AliasSpec) DeepCopyInto(out *AliasSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AliasSpec. +func (in *AliasSpec) DeepCopy() *AliasSpec { + if in == nil { + return nil + } + out := new(AliasSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AliasStatus) DeepCopyInto(out *AliasStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AliasStatus. +func (in *AliasStatus) DeepCopy() *AliasStatus { + if in == nil { + return nil + } + out := new(AliasStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedPublishersInitParameters) DeepCopyInto(out *AllowedPublishersInitParameters) { + *out = *in + if in.SigningProfileVersionArns != nil { + in, out := &in.SigningProfileVersionArns, &out.SigningProfileVersionArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SigningProfileVersionArnsRefs != nil { + in, out := &in.SigningProfileVersionArnsRefs, &out.SigningProfileVersionArnsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SigningProfileVersionArnsSelector != nil { + in, out := &in.SigningProfileVersionArnsSelector, &out.SigningProfileVersionArnsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedPublishersInitParameters. +func (in *AllowedPublishersInitParameters) DeepCopy() *AllowedPublishersInitParameters { + if in == nil { + return nil + } + out := new(AllowedPublishersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedPublishersObservation) DeepCopyInto(out *AllowedPublishersObservation) { + *out = *in + if in.SigningProfileVersionArns != nil { + in, out := &in.SigningProfileVersionArns, &out.SigningProfileVersionArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedPublishersObservation. +func (in *AllowedPublishersObservation) DeepCopy() *AllowedPublishersObservation { + if in == nil { + return nil + } + out := new(AllowedPublishersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedPublishersParameters) DeepCopyInto(out *AllowedPublishersParameters) { + *out = *in + if in.SigningProfileVersionArns != nil { + in, out := &in.SigningProfileVersionArns, &out.SigningProfileVersionArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SigningProfileVersionArnsRefs != nil { + in, out := &in.SigningProfileVersionArnsRefs, &out.SigningProfileVersionArnsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SigningProfileVersionArnsSelector != nil { + in, out := &in.SigningProfileVersionArnsSelector, &out.SigningProfileVersionArnsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedPublishersParameters. +func (in *AllowedPublishersParameters) DeepCopy() *AllowedPublishersParameters { + if in == nil { + return nil + } + out := new(AllowedPublishersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmazonManagedKafkaEventSourceConfigInitParameters) DeepCopyInto(out *AmazonManagedKafkaEventSourceConfigInitParameters) { + *out = *in + if in.ConsumerGroupID != nil { + in, out := &in.ConsumerGroupID, &out.ConsumerGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonManagedKafkaEventSourceConfigInitParameters. +func (in *AmazonManagedKafkaEventSourceConfigInitParameters) DeepCopy() *AmazonManagedKafkaEventSourceConfigInitParameters { + if in == nil { + return nil + } + out := new(AmazonManagedKafkaEventSourceConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmazonManagedKafkaEventSourceConfigObservation) DeepCopyInto(out *AmazonManagedKafkaEventSourceConfigObservation) { + *out = *in + if in.ConsumerGroupID != nil { + in, out := &in.ConsumerGroupID, &out.ConsumerGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonManagedKafkaEventSourceConfigObservation. +func (in *AmazonManagedKafkaEventSourceConfigObservation) DeepCopy() *AmazonManagedKafkaEventSourceConfigObservation { + if in == nil { + return nil + } + out := new(AmazonManagedKafkaEventSourceConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmazonManagedKafkaEventSourceConfigParameters) DeepCopyInto(out *AmazonManagedKafkaEventSourceConfigParameters) { + *out = *in + if in.ConsumerGroupID != nil { + in, out := &in.ConsumerGroupID, &out.ConsumerGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmazonManagedKafkaEventSourceConfigParameters. +func (in *AmazonManagedKafkaEventSourceConfigParameters) DeepCopy() *AmazonManagedKafkaEventSourceConfigParameters { + if in == nil { + return nil + } + out := new(AmazonManagedKafkaEventSourceConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeSigningConfig) DeepCopyInto(out *CodeSigningConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeSigningConfig. +func (in *CodeSigningConfig) DeepCopy() *CodeSigningConfig { + if in == nil { + return nil + } + out := new(CodeSigningConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CodeSigningConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeSigningConfigInitParameters) DeepCopyInto(out *CodeSigningConfigInitParameters) { + *out = *in + if in.AllowedPublishers != nil { + in, out := &in.AllowedPublishers, &out.AllowedPublishers + *out = new(AllowedPublishersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Policies != nil { + in, out := &in.Policies, &out.Policies + *out = new(PoliciesInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeSigningConfigInitParameters. +func (in *CodeSigningConfigInitParameters) DeepCopy() *CodeSigningConfigInitParameters { + if in == nil { + return nil + } + out := new(CodeSigningConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeSigningConfigList) DeepCopyInto(out *CodeSigningConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CodeSigningConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeSigningConfigList. +func (in *CodeSigningConfigList) DeepCopy() *CodeSigningConfigList { + if in == nil { + return nil + } + out := new(CodeSigningConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CodeSigningConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeSigningConfigObservation) DeepCopyInto(out *CodeSigningConfigObservation) { + *out = *in + if in.AllowedPublishers != nil { + in, out := &in.AllowedPublishers, &out.AllowedPublishers + *out = new(AllowedPublishersObservation) + (*in).DeepCopyInto(*out) + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ConfigID != nil { + in, out := &in.ConfigID, &out.ConfigID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LastModified != nil { + in, out := &in.LastModified, &out.LastModified + *out = new(string) + **out = **in + } + if in.Policies != nil { + in, out := &in.Policies, &out.Policies + *out = new(PoliciesObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeSigningConfigObservation. +func (in *CodeSigningConfigObservation) DeepCopy() *CodeSigningConfigObservation { + if in == nil { + return nil + } + out := new(CodeSigningConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeSigningConfigParameters) DeepCopyInto(out *CodeSigningConfigParameters) { + *out = *in + if in.AllowedPublishers != nil { + in, out := &in.AllowedPublishers, &out.AllowedPublishers + *out = new(AllowedPublishersParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Policies != nil { + in, out := &in.Policies, &out.Policies + *out = new(PoliciesParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeSigningConfigParameters. +func (in *CodeSigningConfigParameters) DeepCopy() *CodeSigningConfigParameters { + if in == nil { + return nil + } + out := new(CodeSigningConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeSigningConfigSpec) DeepCopyInto(out *CodeSigningConfigSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeSigningConfigSpec. +func (in *CodeSigningConfigSpec) DeepCopy() *CodeSigningConfigSpec { + if in == nil { + return nil + } + out := new(CodeSigningConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeSigningConfigStatus) DeepCopyInto(out *CodeSigningConfigStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeSigningConfigStatus. +func (in *CodeSigningConfigStatus) DeepCopy() *CodeSigningConfigStatus { + if in == nil { + return nil + } + out := new(CodeSigningConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsInitParameters) DeepCopyInto(out *CorsInitParameters) { + *out = *in + if in.AllowCredentials != nil { + in, out := &in.AllowCredentials, &out.AllowCredentials + *out = new(bool) + **out = **in + } + if in.AllowHeaders != nil { + in, out := &in.AllowHeaders, &out.AllowHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowMethods != nil { + in, out := &in.AllowMethods, &out.AllowMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowOrigins != nil { + in, out := &in.AllowOrigins, &out.AllowOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposeHeaders != nil { + in, out := &in.ExposeHeaders, &out.ExposeHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsInitParameters. +func (in *CorsInitParameters) DeepCopy() *CorsInitParameters { + if in == nil { + return nil + } + out := new(CorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsObservation) DeepCopyInto(out *CorsObservation) { + *out = *in + if in.AllowCredentials != nil { + in, out := &in.AllowCredentials, &out.AllowCredentials + *out = new(bool) + **out = **in + } + if in.AllowHeaders != nil { + in, out := &in.AllowHeaders, &out.AllowHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowMethods != nil { + in, out := &in.AllowMethods, &out.AllowMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowOrigins != nil { + in, out := &in.AllowOrigins, &out.AllowOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposeHeaders != nil { + in, out := &in.ExposeHeaders, &out.ExposeHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsObservation. +func (in *CorsObservation) DeepCopy() *CorsObservation { + if in == nil { + return nil + } + out := new(CorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsParameters) DeepCopyInto(out *CorsParameters) { + *out = *in + if in.AllowCredentials != nil { + in, out := &in.AllowCredentials, &out.AllowCredentials + *out = new(bool) + **out = **in + } + if in.AllowHeaders != nil { + in, out := &in.AllowHeaders, &out.AllowHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowMethods != nil { + in, out := &in.AllowMethods, &out.AllowMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowOrigins != nil { + in, out := &in.AllowOrigins, &out.AllowOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposeHeaders != nil { + in, out := &in.ExposeHeaders, &out.ExposeHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsParameters. +func (in *CorsParameters) DeepCopy() *CorsParameters { + if in == nil { + return nil + } + out := new(CorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeadLetterConfigInitParameters) DeepCopyInto(out *DeadLetterConfigInitParameters) { + *out = *in + if in.TargetArn != nil { + in, out := &in.TargetArn, &out.TargetArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeadLetterConfigInitParameters. +func (in *DeadLetterConfigInitParameters) DeepCopy() *DeadLetterConfigInitParameters { + if in == nil { + return nil + } + out := new(DeadLetterConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeadLetterConfigObservation) DeepCopyInto(out *DeadLetterConfigObservation) { + *out = *in + if in.TargetArn != nil { + in, out := &in.TargetArn, &out.TargetArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeadLetterConfigObservation. +func (in *DeadLetterConfigObservation) DeepCopy() *DeadLetterConfigObservation { + if in == nil { + return nil + } + out := new(DeadLetterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeadLetterConfigParameters) DeepCopyInto(out *DeadLetterConfigParameters) { + *out = *in + if in.TargetArn != nil { + in, out := &in.TargetArn, &out.TargetArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeadLetterConfigParameters. +func (in *DeadLetterConfigParameters) DeepCopy() *DeadLetterConfigParameters { + if in == nil { + return nil + } + out := new(DeadLetterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationConfigInitParameters) DeepCopyInto(out *DestinationConfigInitParameters) { + *out = *in + if in.OnFailure != nil { + in, out := &in.OnFailure, &out.OnFailure + *out = new(OnFailureInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationConfigInitParameters. +func (in *DestinationConfigInitParameters) DeepCopy() *DestinationConfigInitParameters { + if in == nil { + return nil + } + out := new(DestinationConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationConfigObservation) DeepCopyInto(out *DestinationConfigObservation) { + *out = *in + if in.OnFailure != nil { + in, out := &in.OnFailure, &out.OnFailure + *out = new(OnFailureObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationConfigObservation. +func (in *DestinationConfigObservation) DeepCopy() *DestinationConfigObservation { + if in == nil { + return nil + } + out := new(DestinationConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationConfigOnFailureInitParameters) DeepCopyInto(out *DestinationConfigOnFailureInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.DestinationRef != nil { + in, out := &in.DestinationRef, &out.DestinationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DestinationSelector != nil { + in, out := &in.DestinationSelector, &out.DestinationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationConfigOnFailureInitParameters. +func (in *DestinationConfigOnFailureInitParameters) DeepCopy() *DestinationConfigOnFailureInitParameters { + if in == nil { + return nil + } + out := new(DestinationConfigOnFailureInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationConfigOnFailureObservation) DeepCopyInto(out *DestinationConfigOnFailureObservation) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationConfigOnFailureObservation. +func (in *DestinationConfigOnFailureObservation) DeepCopy() *DestinationConfigOnFailureObservation { + if in == nil { + return nil + } + out := new(DestinationConfigOnFailureObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationConfigOnFailureParameters) DeepCopyInto(out *DestinationConfigOnFailureParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.DestinationRef != nil { + in, out := &in.DestinationRef, &out.DestinationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DestinationSelector != nil { + in, out := &in.DestinationSelector, &out.DestinationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationConfigOnFailureParameters. +func (in *DestinationConfigOnFailureParameters) DeepCopy() *DestinationConfigOnFailureParameters { + if in == nil { + return nil + } + out := new(DestinationConfigOnFailureParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationConfigParameters) DeepCopyInto(out *DestinationConfigParameters) { + *out = *in + if in.OnFailure != nil { + in, out := &in.OnFailure, &out.OnFailure + *out = new(OnFailureParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationConfigParameters. +func (in *DestinationConfigParameters) DeepCopy() *DestinationConfigParameters { + if in == nil { + return nil + } + out := new(DestinationConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentDBEventSourceConfigInitParameters) DeepCopyInto(out *DocumentDBEventSourceConfigInitParameters) { + *out = *in + if in.CollectionName != nil { + in, out := &in.CollectionName, &out.CollectionName + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.FullDocument != nil { + in, out := &in.FullDocument, &out.FullDocument + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentDBEventSourceConfigInitParameters. +func (in *DocumentDBEventSourceConfigInitParameters) DeepCopy() *DocumentDBEventSourceConfigInitParameters { + if in == nil { + return nil + } + out := new(DocumentDBEventSourceConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentDBEventSourceConfigObservation) DeepCopyInto(out *DocumentDBEventSourceConfigObservation) { + *out = *in + if in.CollectionName != nil { + in, out := &in.CollectionName, &out.CollectionName + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.FullDocument != nil { + in, out := &in.FullDocument, &out.FullDocument + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentDBEventSourceConfigObservation. +func (in *DocumentDBEventSourceConfigObservation) DeepCopy() *DocumentDBEventSourceConfigObservation { + if in == nil { + return nil + } + out := new(DocumentDBEventSourceConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DocumentDBEventSourceConfigParameters) DeepCopyInto(out *DocumentDBEventSourceConfigParameters) { + *out = *in + if in.CollectionName != nil { + in, out := &in.CollectionName, &out.CollectionName + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.FullDocument != nil { + in, out := &in.FullDocument, &out.FullDocument + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocumentDBEventSourceConfigParameters. +func (in *DocumentDBEventSourceConfigParameters) DeepCopy() *DocumentDBEventSourceConfigParameters { + if in == nil { + return nil + } + out := new(DocumentDBEventSourceConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentInitParameters) DeepCopyInto(out *EnvironmentInitParameters) { + *out = *in + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentInitParameters. +func (in *EnvironmentInitParameters) DeepCopy() *EnvironmentInitParameters { + if in == nil { + return nil + } + out := new(EnvironmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentObservation) DeepCopyInto(out *EnvironmentObservation) { + *out = *in + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentObservation. +func (in *EnvironmentObservation) DeepCopy() *EnvironmentObservation { + if in == nil { + return nil + } + out := new(EnvironmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentParameters) DeepCopyInto(out *EnvironmentParameters) { + *out = *in + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentParameters. +func (in *EnvironmentParameters) DeepCopy() *EnvironmentParameters { + if in == nil { + return nil + } + out := new(EnvironmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralStorageInitParameters) DeepCopyInto(out *EphemeralStorageInitParameters) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralStorageInitParameters. +func (in *EphemeralStorageInitParameters) DeepCopy() *EphemeralStorageInitParameters { + if in == nil { + return nil + } + out := new(EphemeralStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralStorageObservation) DeepCopyInto(out *EphemeralStorageObservation) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralStorageObservation. +func (in *EphemeralStorageObservation) DeepCopy() *EphemeralStorageObservation { + if in == nil { + return nil + } + out := new(EphemeralStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralStorageParameters) DeepCopyInto(out *EphemeralStorageParameters) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralStorageParameters. +func (in *EphemeralStorageParameters) DeepCopy() *EphemeralStorageParameters { + if in == nil { + return nil + } + out := new(EphemeralStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSourceMapping) DeepCopyInto(out *EventSourceMapping) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSourceMapping. +func (in *EventSourceMapping) DeepCopy() *EventSourceMapping { + if in == nil { + return nil + } + out := new(EventSourceMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventSourceMapping) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSourceMappingInitParameters) DeepCopyInto(out *EventSourceMappingInitParameters) { + *out = *in + if in.AmazonManagedKafkaEventSourceConfig != nil { + in, out := &in.AmazonManagedKafkaEventSourceConfig, &out.AmazonManagedKafkaEventSourceConfig + *out = new(AmazonManagedKafkaEventSourceConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BisectBatchOnFunctionError != nil { + in, out := &in.BisectBatchOnFunctionError, &out.BisectBatchOnFunctionError + *out = new(bool) + **out = **in + } + if in.DestinationConfig != nil { + in, out := &in.DestinationConfig, &out.DestinationConfig + *out = new(DestinationConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DocumentDBEventSourceConfig != nil { + in, out := &in.DocumentDBEventSourceConfig, &out.DocumentDBEventSourceConfig + *out = new(DocumentDBEventSourceConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EventSourceArn != nil { + in, out := &in.EventSourceArn, &out.EventSourceArn + *out = new(string) + **out = **in + } + if in.FilterCriteria != nil { + in, out := &in.FilterCriteria, &out.FilterCriteria + *out = new(FilterCriteriaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FunctionName != nil { + in, out := &in.FunctionName, &out.FunctionName + *out = new(string) + **out = **in + } + if in.FunctionNameRef != nil { + in, out := &in.FunctionNameRef, &out.FunctionNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FunctionNameSelector != nil { + in, out := &in.FunctionNameSelector, &out.FunctionNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FunctionResponseTypes != nil { + in, out := &in.FunctionResponseTypes, &out.FunctionResponseTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaximumBatchingWindowInSeconds != nil { + in, out := &in.MaximumBatchingWindowInSeconds, &out.MaximumBatchingWindowInSeconds + *out = new(float64) + **out = **in + } + if in.MaximumRecordAgeInSeconds != nil { + in, out := &in.MaximumRecordAgeInSeconds, &out.MaximumRecordAgeInSeconds + *out = new(float64) + **out = **in + } + if in.MaximumRetryAttempts != nil { + in, out := &in.MaximumRetryAttempts, &out.MaximumRetryAttempts + *out = new(float64) + **out = **in + } + if in.ParallelizationFactor != nil { + in, out := &in.ParallelizationFactor, &out.ParallelizationFactor + *out = new(float64) + **out = **in + } + if in.Queues != nil { + in, out := &in.Queues, &out.Queues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScalingConfig != nil { + in, out := &in.ScalingConfig, &out.ScalingConfig + *out = new(ScalingConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SelfManagedEventSource != nil { + in, out := &in.SelfManagedEventSource, &out.SelfManagedEventSource + *out = new(SelfManagedEventSourceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SelfManagedKafkaEventSourceConfig != nil { + in, out := &in.SelfManagedKafkaEventSourceConfig, &out.SelfManagedKafkaEventSourceConfig + *out = new(SelfManagedKafkaEventSourceConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SourceAccessConfiguration != nil { + in, out := &in.SourceAccessConfiguration, &out.SourceAccessConfiguration + *out = make([]SourceAccessConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartingPosition != nil { + in, out := &in.StartingPosition, &out.StartingPosition + *out = new(string) + **out = **in + } + if in.StartingPositionTimestamp != nil { + in, out := &in.StartingPositionTimestamp, &out.StartingPositionTimestamp + *out = new(string) + **out = **in + } + if in.Topics != nil { + in, out := &in.Topics, &out.Topics + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TumblingWindowInSeconds != nil { + in, out := &in.TumblingWindowInSeconds, &out.TumblingWindowInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSourceMappingInitParameters. +func (in *EventSourceMappingInitParameters) DeepCopy() *EventSourceMappingInitParameters { + if in == nil { + return nil + } + out := new(EventSourceMappingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSourceMappingList) DeepCopyInto(out *EventSourceMappingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EventSourceMapping, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSourceMappingList. +func (in *EventSourceMappingList) DeepCopy() *EventSourceMappingList { + if in == nil { + return nil + } + out := new(EventSourceMappingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventSourceMappingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSourceMappingObservation) DeepCopyInto(out *EventSourceMappingObservation) { + *out = *in + if in.AmazonManagedKafkaEventSourceConfig != nil { + in, out := &in.AmazonManagedKafkaEventSourceConfig, &out.AmazonManagedKafkaEventSourceConfig + *out = new(AmazonManagedKafkaEventSourceConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BisectBatchOnFunctionError != nil { + in, out := &in.BisectBatchOnFunctionError, &out.BisectBatchOnFunctionError + *out = new(bool) + **out = **in + } + if in.DestinationConfig != nil { + in, out := &in.DestinationConfig, &out.DestinationConfig + *out = new(DestinationConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.DocumentDBEventSourceConfig != nil { + in, out := &in.DocumentDBEventSourceConfig, &out.DocumentDBEventSourceConfig + *out = new(DocumentDBEventSourceConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EventSourceArn != nil { + in, out := &in.EventSourceArn, &out.EventSourceArn + *out = new(string) + **out = **in + } + if in.FilterCriteria != nil { + in, out := &in.FilterCriteria, &out.FilterCriteria + *out = new(FilterCriteriaObservation) + (*in).DeepCopyInto(*out) + } + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } + if in.FunctionName != nil { + in, out := &in.FunctionName, &out.FunctionName + *out = new(string) + **out = **in + } + if in.FunctionResponseTypes != nil { + in, out := &in.FunctionResponseTypes, &out.FunctionResponseTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LastModified != nil { + in, out := &in.LastModified, &out.LastModified + *out = new(string) + **out = **in + } + if in.LastProcessingResult != nil { + in, out := &in.LastProcessingResult, &out.LastProcessingResult + *out = new(string) + **out = **in + } + if in.MaximumBatchingWindowInSeconds != nil { + in, out := &in.MaximumBatchingWindowInSeconds, &out.MaximumBatchingWindowInSeconds + *out = new(float64) + **out = **in + } + if in.MaximumRecordAgeInSeconds != nil { + in, out := &in.MaximumRecordAgeInSeconds, &out.MaximumRecordAgeInSeconds + *out = new(float64) + **out = **in + } + if in.MaximumRetryAttempts != nil { + in, out := &in.MaximumRetryAttempts, &out.MaximumRetryAttempts + *out = new(float64) + **out = **in + } + if in.ParallelizationFactor != nil { + in, out := &in.ParallelizationFactor, &out.ParallelizationFactor + *out = new(float64) + **out = **in + } + if in.Queues != nil { + in, out := &in.Queues, &out.Queues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScalingConfig != nil { + in, out := &in.ScalingConfig, &out.ScalingConfig + *out = new(ScalingConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.SelfManagedEventSource != nil { + in, out := &in.SelfManagedEventSource, &out.SelfManagedEventSource + *out = new(SelfManagedEventSourceObservation) + (*in).DeepCopyInto(*out) + } + if in.SelfManagedKafkaEventSourceConfig != nil { + in, out := &in.SelfManagedKafkaEventSourceConfig, &out.SelfManagedKafkaEventSourceConfig + *out = new(SelfManagedKafkaEventSourceConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.SourceAccessConfiguration != nil { + in, out := &in.SourceAccessConfiguration, &out.SourceAccessConfiguration + *out = make([]SourceAccessConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartingPosition != nil { + in, out := &in.StartingPosition, &out.StartingPosition + *out = new(string) + **out = **in + } + if in.StartingPositionTimestamp != nil { + in, out := &in.StartingPositionTimestamp, &out.StartingPositionTimestamp + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.StateTransitionReason != nil { + in, out := &in.StateTransitionReason, &out.StateTransitionReason + *out = new(string) + **out = **in + } + if in.Topics != nil { + in, out := &in.Topics, &out.Topics + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TumblingWindowInSeconds != nil { + in, out := &in.TumblingWindowInSeconds, &out.TumblingWindowInSeconds + *out = new(float64) + **out = **in + } + if in.UUID != nil { + in, out := &in.UUID, &out.UUID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSourceMappingObservation. +func (in *EventSourceMappingObservation) DeepCopy() *EventSourceMappingObservation { + if in == nil { + return nil + } + out := new(EventSourceMappingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSourceMappingParameters) DeepCopyInto(out *EventSourceMappingParameters) { + *out = *in + if in.AmazonManagedKafkaEventSourceConfig != nil { + in, out := &in.AmazonManagedKafkaEventSourceConfig, &out.AmazonManagedKafkaEventSourceConfig + *out = new(AmazonManagedKafkaEventSourceConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BisectBatchOnFunctionError != nil { + in, out := &in.BisectBatchOnFunctionError, &out.BisectBatchOnFunctionError + *out = new(bool) + **out = **in + } + if in.DestinationConfig != nil { + in, out := &in.DestinationConfig, &out.DestinationConfig + *out = new(DestinationConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.DocumentDBEventSourceConfig != nil { + in, out := &in.DocumentDBEventSourceConfig, &out.DocumentDBEventSourceConfig + *out = new(DocumentDBEventSourceConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EventSourceArn != nil { + in, out := &in.EventSourceArn, &out.EventSourceArn + *out = new(string) + **out = **in + } + if in.FilterCriteria != nil { + in, out := &in.FilterCriteria, &out.FilterCriteria + *out = new(FilterCriteriaParameters) + (*in).DeepCopyInto(*out) + } + if in.FunctionName != nil { + in, out := &in.FunctionName, &out.FunctionName + *out = new(string) + **out = **in + } + if in.FunctionNameRef != nil { + in, out := &in.FunctionNameRef, &out.FunctionNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FunctionNameSelector != nil { + in, out := &in.FunctionNameSelector, &out.FunctionNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FunctionResponseTypes != nil { + in, out := &in.FunctionResponseTypes, &out.FunctionResponseTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaximumBatchingWindowInSeconds != nil { + in, out := &in.MaximumBatchingWindowInSeconds, &out.MaximumBatchingWindowInSeconds + *out = new(float64) + **out = **in + } + if in.MaximumRecordAgeInSeconds != nil { + in, out := &in.MaximumRecordAgeInSeconds, &out.MaximumRecordAgeInSeconds + *out = new(float64) + **out = **in + } + if in.MaximumRetryAttempts != nil { + in, out := &in.MaximumRetryAttempts, &out.MaximumRetryAttempts + *out = new(float64) + **out = **in + } + if in.ParallelizationFactor != nil { + in, out := &in.ParallelizationFactor, &out.ParallelizationFactor + *out = new(float64) + **out = **in + } + if in.Queues != nil { + in, out := &in.Queues, &out.Queues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ScalingConfig != nil { + in, out := &in.ScalingConfig, &out.ScalingConfig + *out = new(ScalingConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.SelfManagedEventSource != nil { + in, out := &in.SelfManagedEventSource, &out.SelfManagedEventSource + *out = new(SelfManagedEventSourceParameters) + (*in).DeepCopyInto(*out) + } + if in.SelfManagedKafkaEventSourceConfig != nil { + in, out := &in.SelfManagedKafkaEventSourceConfig, &out.SelfManagedKafkaEventSourceConfig + *out = new(SelfManagedKafkaEventSourceConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.SourceAccessConfiguration != nil { + in, out := &in.SourceAccessConfiguration, &out.SourceAccessConfiguration + *out = make([]SourceAccessConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartingPosition != nil { + in, out := &in.StartingPosition, &out.StartingPosition + *out = new(string) + **out = **in + } + if in.StartingPositionTimestamp != nil { + in, out := &in.StartingPositionTimestamp, &out.StartingPositionTimestamp + *out = new(string) + **out = **in + } + if in.Topics != nil { + in, out := &in.Topics, &out.Topics + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TumblingWindowInSeconds != nil { + in, out := &in.TumblingWindowInSeconds, &out.TumblingWindowInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSourceMappingParameters. +func (in *EventSourceMappingParameters) DeepCopy() *EventSourceMappingParameters { + if in == nil { + return nil + } + out := new(EventSourceMappingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSourceMappingSpec) DeepCopyInto(out *EventSourceMappingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSourceMappingSpec. +func (in *EventSourceMappingSpec) DeepCopy() *EventSourceMappingSpec { + if in == nil { + return nil + } + out := new(EventSourceMappingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSourceMappingStatus) DeepCopyInto(out *EventSourceMappingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSourceMappingStatus. +func (in *EventSourceMappingStatus) DeepCopy() *EventSourceMappingStatus { + if in == nil { + return nil + } + out := new(EventSourceMappingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystemConfigInitParameters) DeepCopyInto(out *FileSystemConfigInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LocalMountPath != nil { + in, out := &in.LocalMountPath, &out.LocalMountPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystemConfigInitParameters. +func (in *FileSystemConfigInitParameters) DeepCopy() *FileSystemConfigInitParameters { + if in == nil { + return nil + } + out := new(FileSystemConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystemConfigObservation) DeepCopyInto(out *FileSystemConfigObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.LocalMountPath != nil { + in, out := &in.LocalMountPath, &out.LocalMountPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystemConfigObservation. +func (in *FileSystemConfigObservation) DeepCopy() *FileSystemConfigObservation { + if in == nil { + return nil + } + out := new(FileSystemConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystemConfigParameters) DeepCopyInto(out *FileSystemConfigParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LocalMountPath != nil { + in, out := &in.LocalMountPath, &out.LocalMountPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystemConfigParameters. +func (in *FileSystemConfigParameters) DeepCopy() *FileSystemConfigParameters { + if in == nil { + return nil + } + out := new(FileSystemConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterCriteriaInitParameters) DeepCopyInto(out *FilterCriteriaInitParameters) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]FilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterCriteriaInitParameters. +func (in *FilterCriteriaInitParameters) DeepCopy() *FilterCriteriaInitParameters { + if in == nil { + return nil + } + out := new(FilterCriteriaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterCriteriaObservation) DeepCopyInto(out *FilterCriteriaObservation) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]FilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterCriteriaObservation. +func (in *FilterCriteriaObservation) DeepCopy() *FilterCriteriaObservation { + if in == nil { + return nil + } + out := new(FilterCriteriaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterCriteriaParameters) DeepCopyInto(out *FilterCriteriaParameters) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]FilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterCriteriaParameters. +func (in *FilterCriteriaParameters) DeepCopy() *FilterCriteriaParameters { + if in == nil { + return nil + } + out := new(FilterCriteriaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterInitParameters) DeepCopyInto(out *FilterInitParameters) { + *out = *in + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterInitParameters. +func (in *FilterInitParameters) DeepCopy() *FilterInitParameters { + if in == nil { + return nil + } + out := new(FilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterObservation) DeepCopyInto(out *FilterObservation) { + *out = *in + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterObservation. +func (in *FilterObservation) DeepCopy() *FilterObservation { + if in == nil { + return nil + } + out := new(FilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterParameters) DeepCopyInto(out *FilterParameters) { + *out = *in + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterParameters. +func (in *FilterParameters) DeepCopy() *FilterParameters { + if in == nil { + return nil + } + out := new(FilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Function) DeepCopyInto(out *Function) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Function. +func (in *Function) DeepCopy() *Function { + if in == nil { + return nil + } + out := new(Function) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Function) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionEventInvokeConfig) DeepCopyInto(out *FunctionEventInvokeConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionEventInvokeConfig. +func (in *FunctionEventInvokeConfig) DeepCopy() *FunctionEventInvokeConfig { + if in == nil { + return nil + } + out := new(FunctionEventInvokeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionEventInvokeConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionEventInvokeConfigDestinationConfigInitParameters) DeepCopyInto(out *FunctionEventInvokeConfigDestinationConfigInitParameters) { + *out = *in + if in.OnFailure != nil { + in, out := &in.OnFailure, &out.OnFailure + *out = new(DestinationConfigOnFailureInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OnSuccess != nil { + in, out := &in.OnSuccess, &out.OnSuccess + *out = new(OnSuccessInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionEventInvokeConfigDestinationConfigInitParameters. +func (in *FunctionEventInvokeConfigDestinationConfigInitParameters) DeepCopy() *FunctionEventInvokeConfigDestinationConfigInitParameters { + if in == nil { + return nil + } + out := new(FunctionEventInvokeConfigDestinationConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionEventInvokeConfigDestinationConfigObservation) DeepCopyInto(out *FunctionEventInvokeConfigDestinationConfigObservation) { + *out = *in + if in.OnFailure != nil { + in, out := &in.OnFailure, &out.OnFailure + *out = new(DestinationConfigOnFailureObservation) + (*in).DeepCopyInto(*out) + } + if in.OnSuccess != nil { + in, out := &in.OnSuccess, &out.OnSuccess + *out = new(OnSuccessObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionEventInvokeConfigDestinationConfigObservation. +func (in *FunctionEventInvokeConfigDestinationConfigObservation) DeepCopy() *FunctionEventInvokeConfigDestinationConfigObservation { + if in == nil { + return nil + } + out := new(FunctionEventInvokeConfigDestinationConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionEventInvokeConfigDestinationConfigParameters) DeepCopyInto(out *FunctionEventInvokeConfigDestinationConfigParameters) { + *out = *in + if in.OnFailure != nil { + in, out := &in.OnFailure, &out.OnFailure + *out = new(DestinationConfigOnFailureParameters) + (*in).DeepCopyInto(*out) + } + if in.OnSuccess != nil { + in, out := &in.OnSuccess, &out.OnSuccess + *out = new(OnSuccessParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionEventInvokeConfigDestinationConfigParameters. +func (in *FunctionEventInvokeConfigDestinationConfigParameters) DeepCopy() *FunctionEventInvokeConfigDestinationConfigParameters { + if in == nil { + return nil + } + out := new(FunctionEventInvokeConfigDestinationConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionEventInvokeConfigInitParameters) DeepCopyInto(out *FunctionEventInvokeConfigInitParameters) { + *out = *in + if in.DestinationConfig != nil { + in, out := &in.DestinationConfig, &out.DestinationConfig + *out = new(FunctionEventInvokeConfigDestinationConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FunctionName != nil { + in, out := &in.FunctionName, &out.FunctionName + *out = new(string) + **out = **in + } + if in.MaximumEventAgeInSeconds != nil { + in, out := &in.MaximumEventAgeInSeconds, &out.MaximumEventAgeInSeconds + *out = new(float64) + **out = **in + } + if in.MaximumRetryAttempts != nil { + in, out := &in.MaximumRetryAttempts, &out.MaximumRetryAttempts + *out = new(float64) + **out = **in + } + if in.Qualifier != nil { + in, out := &in.Qualifier, &out.Qualifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionEventInvokeConfigInitParameters. +func (in *FunctionEventInvokeConfigInitParameters) DeepCopy() *FunctionEventInvokeConfigInitParameters { + if in == nil { + return nil + } + out := new(FunctionEventInvokeConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionEventInvokeConfigList) DeepCopyInto(out *FunctionEventInvokeConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FunctionEventInvokeConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionEventInvokeConfigList. +func (in *FunctionEventInvokeConfigList) DeepCopy() *FunctionEventInvokeConfigList { + if in == nil { + return nil + } + out := new(FunctionEventInvokeConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionEventInvokeConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionEventInvokeConfigObservation) DeepCopyInto(out *FunctionEventInvokeConfigObservation) { + *out = *in + if in.DestinationConfig != nil { + in, out := &in.DestinationConfig, &out.DestinationConfig + *out = new(FunctionEventInvokeConfigDestinationConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.FunctionName != nil { + in, out := &in.FunctionName, &out.FunctionName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MaximumEventAgeInSeconds != nil { + in, out := &in.MaximumEventAgeInSeconds, &out.MaximumEventAgeInSeconds + *out = new(float64) + **out = **in + } + if in.MaximumRetryAttempts != nil { + in, out := &in.MaximumRetryAttempts, &out.MaximumRetryAttempts + *out = new(float64) + **out = **in + } + if in.Qualifier != nil { + in, out := &in.Qualifier, &out.Qualifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionEventInvokeConfigObservation. +func (in *FunctionEventInvokeConfigObservation) DeepCopy() *FunctionEventInvokeConfigObservation { + if in == nil { + return nil + } + out := new(FunctionEventInvokeConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionEventInvokeConfigParameters) DeepCopyInto(out *FunctionEventInvokeConfigParameters) { + *out = *in + if in.DestinationConfig != nil { + in, out := &in.DestinationConfig, &out.DestinationConfig + *out = new(FunctionEventInvokeConfigDestinationConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.FunctionName != nil { + in, out := &in.FunctionName, &out.FunctionName + *out = new(string) + **out = **in + } + if in.MaximumEventAgeInSeconds != nil { + in, out := &in.MaximumEventAgeInSeconds, &out.MaximumEventAgeInSeconds + *out = new(float64) + **out = **in + } + if in.MaximumRetryAttempts != nil { + in, out := &in.MaximumRetryAttempts, &out.MaximumRetryAttempts + *out = new(float64) + **out = **in + } + if in.Qualifier != nil { + in, out := &in.Qualifier, &out.Qualifier + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionEventInvokeConfigParameters. +func (in *FunctionEventInvokeConfigParameters) DeepCopy() *FunctionEventInvokeConfigParameters { + if in == nil { + return nil + } + out := new(FunctionEventInvokeConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionEventInvokeConfigSpec) DeepCopyInto(out *FunctionEventInvokeConfigSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionEventInvokeConfigSpec. +func (in *FunctionEventInvokeConfigSpec) DeepCopy() *FunctionEventInvokeConfigSpec { + if in == nil { + return nil + } + out := new(FunctionEventInvokeConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionEventInvokeConfigStatus) DeepCopyInto(out *FunctionEventInvokeConfigStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionEventInvokeConfigStatus. +func (in *FunctionEventInvokeConfigStatus) DeepCopy() *FunctionEventInvokeConfigStatus { + if in == nil { + return nil + } + out := new(FunctionEventInvokeConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionInitParameters) DeepCopyInto(out *FunctionInitParameters) { + *out = *in + if in.Architectures != nil { + in, out := &in.Architectures, &out.Architectures + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CodeSigningConfigArn != nil { + in, out := &in.CodeSigningConfigArn, &out.CodeSigningConfigArn + *out = new(string) + **out = **in + } + if in.DeadLetterConfig != nil { + in, out := &in.DeadLetterConfig, &out.DeadLetterConfig + *out = new(DeadLetterConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(EnvironmentInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EphemeralStorage != nil { + in, out := &in.EphemeralStorage, &out.EphemeralStorage + *out = new(EphemeralStorageInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemConfig != nil { + in, out := &in.FileSystemConfig, &out.FileSystemConfig + *out = new(FileSystemConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Handler != nil { + in, out := &in.Handler, &out.Handler + *out = new(string) + **out = **in + } + if in.ImageConfig != nil { + in, out := &in.ImageConfig, &out.ImageConfig + *out = new(ImageConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageURI != nil { + in, out := &in.ImageURI, &out.ImageURI + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Layers != nil { + in, out := &in.Layers, &out.Layers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = new(LoggingConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MemorySize != nil { + in, out := &in.MemorySize, &out.MemorySize + *out = new(float64) + **out = **in + } + if in.PackageType != nil { + in, out := &in.PackageType, &out.PackageType + *out = new(string) + **out = **in + } + if in.Publish != nil { + in, out := &in.Publish, &out.Publish + *out = new(bool) + **out = **in + } + if in.ReplaceSecurityGroupsOnDestroy != nil { + in, out := &in.ReplaceSecurityGroupsOnDestroy, &out.ReplaceSecurityGroupsOnDestroy + *out = new(bool) + **out = **in + } + if in.ReplacementSecurityGroupIDRefs != nil { + in, out := &in.ReplacementSecurityGroupIDRefs, &out.ReplacementSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReplacementSecurityGroupIDSelector != nil { + in, out := &in.ReplacementSecurityGroupIDSelector, &out.ReplacementSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReplacementSecurityGroupIds != nil { + in, out := &in.ReplacementSecurityGroupIds, &out.ReplacementSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ReservedConcurrentExecutions != nil { + in, out := &in.ReservedConcurrentExecutions, &out.ReservedConcurrentExecutions + *out = new(float64) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.RoleRef != nil { + in, out := &in.RoleRef, &out.RoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleSelector != nil { + in, out := &in.RoleSelector, &out.RoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(string) + **out = **in + } + if in.S3Bucket != nil { + in, out := &in.S3Bucket, &out.S3Bucket + *out = new(string) + **out = **in + } + if in.S3BucketRef != nil { + in, out := &in.S3BucketRef, &out.S3BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.S3BucketSelector != nil { + in, out := &in.S3BucketSelector, &out.S3BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3Key != nil { + in, out := &in.S3Key, &out.S3Key + *out = new(string) + **out = **in + } + if in.S3ObjectVersion != nil { + in, out := &in.S3ObjectVersion, &out.S3ObjectVersion + *out = new(string) + **out = **in + } + if in.SkipDestroy != nil { + in, out := &in.SkipDestroy, &out.SkipDestroy + *out = new(bool) + **out = **in + } + if in.SnapStart != nil { + in, out := &in.SnapStart, &out.SnapStart + *out = new(SnapStartInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SourceCodeHash != nil { + in, out := &in.SourceCodeHash, &out.SourceCodeHash + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.TracingConfig != nil { + in, out := &in.TracingConfig, &out.TracingConfig + *out = new(TracingConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionInitParameters. +func (in *FunctionInitParameters) DeepCopy() *FunctionInitParameters { + if in == nil { + return nil + } + out := new(FunctionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionList) DeepCopyInto(out *FunctionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Function, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionList. +func (in *FunctionList) DeepCopy() *FunctionList { + if in == nil { + return nil + } + out := new(FunctionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionObservation) DeepCopyInto(out *FunctionObservation) { + *out = *in + if in.Architectures != nil { + in, out := &in.Architectures, &out.Architectures + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CodeSigningConfigArn != nil { + in, out := &in.CodeSigningConfigArn, &out.CodeSigningConfigArn + *out = new(string) + **out = **in + } + if in.DeadLetterConfig != nil { + in, out := &in.DeadLetterConfig, &out.DeadLetterConfig + *out = new(DeadLetterConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(EnvironmentObservation) + (*in).DeepCopyInto(*out) + } + if in.EphemeralStorage != nil { + in, out := &in.EphemeralStorage, &out.EphemeralStorage + *out = new(EphemeralStorageObservation) + (*in).DeepCopyInto(*out) + } + if in.FileSystemConfig != nil { + in, out := &in.FileSystemConfig, &out.FileSystemConfig + *out = new(FileSystemConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Handler != nil { + in, out := &in.Handler, &out.Handler + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ImageConfig != nil { + in, out := &in.ImageConfig, &out.ImageConfig + *out = new(ImageConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ImageURI != nil { + in, out := &in.ImageURI, &out.ImageURI + *out = new(string) + **out = **in + } + if in.InvokeArn != nil { + in, out := &in.InvokeArn, &out.InvokeArn + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.LastModified != nil { + in, out := &in.LastModified, &out.LastModified + *out = new(string) + **out = **in + } + if in.Layers != nil { + in, out := &in.Layers, &out.Layers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = new(LoggingConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.MemorySize != nil { + in, out := &in.MemorySize, &out.MemorySize + *out = new(float64) + **out = **in + } + if in.PackageType != nil { + in, out := &in.PackageType, &out.PackageType + *out = new(string) + **out = **in + } + if in.Publish != nil { + in, out := &in.Publish, &out.Publish + *out = new(bool) + **out = **in + } + if in.QualifiedArn != nil { + in, out := &in.QualifiedArn, &out.QualifiedArn + *out = new(string) + **out = **in + } + if in.QualifiedInvokeArn != nil { + in, out := &in.QualifiedInvokeArn, &out.QualifiedInvokeArn + *out = new(string) + **out = **in + } + if in.ReplaceSecurityGroupsOnDestroy != nil { + in, out := &in.ReplaceSecurityGroupsOnDestroy, &out.ReplaceSecurityGroupsOnDestroy + *out = new(bool) + **out = **in + } + if in.ReplacementSecurityGroupIds != nil { + in, out := &in.ReplacementSecurityGroupIds, &out.ReplacementSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ReservedConcurrentExecutions != nil { + in, out := &in.ReservedConcurrentExecutions, &out.ReservedConcurrentExecutions + *out = new(float64) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(string) + **out = **in + } + if in.S3Bucket != nil { + in, out := &in.S3Bucket, &out.S3Bucket + *out = new(string) + **out = **in + } + if in.S3Key != nil { + in, out := &in.S3Key, &out.S3Key + *out = new(string) + **out = **in + } + if in.S3ObjectVersion != nil { + in, out := &in.S3ObjectVersion, &out.S3ObjectVersion + *out = new(string) + **out = **in + } + if in.SigningJobArn != nil { + in, out := &in.SigningJobArn, &out.SigningJobArn + *out = new(string) + **out = **in + } + if in.SigningProfileVersionArn != nil { + in, out := &in.SigningProfileVersionArn, &out.SigningProfileVersionArn + *out = new(string) + **out = **in + } + if in.SkipDestroy != nil { + in, out := &in.SkipDestroy, &out.SkipDestroy + *out = new(bool) + **out = **in + } + if in.SnapStart != nil { + in, out := &in.SnapStart, &out.SnapStart + *out = new(SnapStartObservation) + (*in).DeepCopyInto(*out) + } + if in.SourceCodeHash != nil { + in, out := &in.SourceCodeHash, &out.SourceCodeHash + *out = new(string) + **out = **in + } + if in.SourceCodeSize != nil { + in, out := &in.SourceCodeSize, &out.SourceCodeSize + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.TracingConfig != nil { + in, out := &in.TracingConfig, &out.TracingConfig + *out = new(TracingConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionObservation. +func (in *FunctionObservation) DeepCopy() *FunctionObservation { + if in == nil { + return nil + } + out := new(FunctionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionParameters) DeepCopyInto(out *FunctionParameters) { + *out = *in + if in.Architectures != nil { + in, out := &in.Architectures, &out.Architectures + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CodeSigningConfigArn != nil { + in, out := &in.CodeSigningConfigArn, &out.CodeSigningConfigArn + *out = new(string) + **out = **in + } + if in.DeadLetterConfig != nil { + in, out := &in.DeadLetterConfig, &out.DeadLetterConfig + *out = new(DeadLetterConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(EnvironmentParameters) + (*in).DeepCopyInto(*out) + } + if in.EphemeralStorage != nil { + in, out := &in.EphemeralStorage, &out.EphemeralStorage + *out = new(EphemeralStorageParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemConfig != nil { + in, out := &in.FileSystemConfig, &out.FileSystemConfig + *out = new(FileSystemConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Handler != nil { + in, out := &in.Handler, &out.Handler + *out = new(string) + **out = **in + } + if in.ImageConfig != nil { + in, out := &in.ImageConfig, &out.ImageConfig + *out = new(ImageConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageURI != nil { + in, out := &in.ImageURI, &out.ImageURI + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Layers != nil { + in, out := &in.Layers, &out.Layers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = new(LoggingConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.MemorySize != nil { + in, out := &in.MemorySize, &out.MemorySize + *out = new(float64) + **out = **in + } + if in.PackageType != nil { + in, out := &in.PackageType, &out.PackageType + *out = new(string) + **out = **in + } + if in.Publish != nil { + in, out := &in.Publish, &out.Publish + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ReplaceSecurityGroupsOnDestroy != nil { + in, out := &in.ReplaceSecurityGroupsOnDestroy, &out.ReplaceSecurityGroupsOnDestroy + *out = new(bool) + **out = **in + } + if in.ReplacementSecurityGroupIDRefs != nil { + in, out := &in.ReplacementSecurityGroupIDRefs, &out.ReplacementSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReplacementSecurityGroupIDSelector != nil { + in, out := &in.ReplacementSecurityGroupIDSelector, &out.ReplacementSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReplacementSecurityGroupIds != nil { + in, out := &in.ReplacementSecurityGroupIds, &out.ReplacementSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ReservedConcurrentExecutions != nil { + in, out := &in.ReservedConcurrentExecutions, &out.ReservedConcurrentExecutions + *out = new(float64) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.RoleRef != nil { + in, out := &in.RoleRef, &out.RoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleSelector != nil { + in, out := &in.RoleSelector, &out.RoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(string) + **out = **in + } + if in.S3Bucket != nil { + in, out := &in.S3Bucket, &out.S3Bucket + *out = new(string) + **out = **in + } + if in.S3BucketRef != nil { + in, out := &in.S3BucketRef, &out.S3BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.S3BucketSelector != nil { + in, out := &in.S3BucketSelector, &out.S3BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3Key != nil { + in, out := &in.S3Key, &out.S3Key + *out = new(string) + **out = **in + } + if in.S3ObjectVersion != nil { + in, out := &in.S3ObjectVersion, &out.S3ObjectVersion + *out = new(string) + **out = **in + } + if in.SkipDestroy != nil { + in, out := &in.SkipDestroy, &out.SkipDestroy + *out = new(bool) + **out = **in + } + if in.SnapStart != nil { + in, out := &in.SnapStart, &out.SnapStart + *out = new(SnapStartParameters) + (*in).DeepCopyInto(*out) + } + if in.SourceCodeHash != nil { + in, out := &in.SourceCodeHash, &out.SourceCodeHash + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.TracingConfig != nil { + in, out := &in.TracingConfig, &out.TracingConfig + *out = new(TracingConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionParameters. +func (in *FunctionParameters) DeepCopy() *FunctionParameters { + if in == nil { + return nil + } + out := new(FunctionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionSpec) DeepCopyInto(out *FunctionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionSpec. +func (in *FunctionSpec) DeepCopy() *FunctionSpec { + if in == nil { + return nil + } + out := new(FunctionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionStatus) DeepCopyInto(out *FunctionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionStatus. +func (in *FunctionStatus) DeepCopy() *FunctionStatus { + if in == nil { + return nil + } + out := new(FunctionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionURL) DeepCopyInto(out *FunctionURL) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionURL. +func (in *FunctionURL) DeepCopy() *FunctionURL { + if in == nil { + return nil + } + out := new(FunctionURL) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionURL) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionURLInitParameters) DeepCopyInto(out *FunctionURLInitParameters) { + *out = *in + if in.AuthorizationType != nil { + in, out := &in.AuthorizationType, &out.AuthorizationType + *out = new(string) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(CorsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FunctionName != nil { + in, out := &in.FunctionName, &out.FunctionName + *out = new(string) + **out = **in + } + if in.FunctionNameRef != nil { + in, out := &in.FunctionNameRef, &out.FunctionNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FunctionNameSelector != nil { + in, out := &in.FunctionNameSelector, &out.FunctionNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InvokeMode != nil { + in, out := &in.InvokeMode, &out.InvokeMode + *out = new(string) + **out = **in + } + if in.Qualifier != nil { + in, out := &in.Qualifier, &out.Qualifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionURLInitParameters. +func (in *FunctionURLInitParameters) DeepCopy() *FunctionURLInitParameters { + if in == nil { + return nil + } + out := new(FunctionURLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionURLList) DeepCopyInto(out *FunctionURLList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FunctionURL, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionURLList. +func (in *FunctionURLList) DeepCopy() *FunctionURLList { + if in == nil { + return nil + } + out := new(FunctionURLList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionURLList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionURLObservation) DeepCopyInto(out *FunctionURLObservation) { + *out = *in + if in.AuthorizationType != nil { + in, out := &in.AuthorizationType, &out.AuthorizationType + *out = new(string) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(CorsObservation) + (*in).DeepCopyInto(*out) + } + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } + if in.FunctionName != nil { + in, out := &in.FunctionName, &out.FunctionName + *out = new(string) + **out = **in + } + if in.FunctionURL != nil { + in, out := &in.FunctionURL, &out.FunctionURL + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InvokeMode != nil { + in, out := &in.InvokeMode, &out.InvokeMode + *out = new(string) + **out = **in + } + if in.Qualifier != nil { + in, out := &in.Qualifier, &out.Qualifier + *out = new(string) + **out = **in + } + if in.URLID != nil { + in, out := &in.URLID, &out.URLID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionURLObservation. +func (in *FunctionURLObservation) DeepCopy() *FunctionURLObservation { + if in == nil { + return nil + } + out := new(FunctionURLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionURLParameters) DeepCopyInto(out *FunctionURLParameters) { + *out = *in + if in.AuthorizationType != nil { + in, out := &in.AuthorizationType, &out.AuthorizationType + *out = new(string) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(CorsParameters) + (*in).DeepCopyInto(*out) + } + if in.FunctionName != nil { + in, out := &in.FunctionName, &out.FunctionName + *out = new(string) + **out = **in + } + if in.FunctionNameRef != nil { + in, out := &in.FunctionNameRef, &out.FunctionNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FunctionNameSelector != nil { + in, out := &in.FunctionNameSelector, &out.FunctionNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InvokeMode != nil { + in, out := &in.InvokeMode, &out.InvokeMode + *out = new(string) + **out = **in + } + if in.Qualifier != nil { + in, out := &in.Qualifier, &out.Qualifier + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionURLParameters. +func (in *FunctionURLParameters) DeepCopy() *FunctionURLParameters { + if in == nil { + return nil + } + out := new(FunctionURLParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionURLSpec) DeepCopyInto(out *FunctionURLSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionURLSpec. +func (in *FunctionURLSpec) DeepCopy() *FunctionURLSpec { + if in == nil { + return nil + } + out := new(FunctionURLSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionURLStatus) DeepCopyInto(out *FunctionURLStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionURLStatus. +func (in *FunctionURLStatus) DeepCopy() *FunctionURLStatus { + if in == nil { + return nil + } + out := new(FunctionURLStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfigInitParameters) DeepCopyInto(out *ImageConfigInitParameters) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EntryPoint != nil { + in, out := &in.EntryPoint, &out.EntryPoint + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WorkingDirectory != nil { + in, out := &in.WorkingDirectory, &out.WorkingDirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfigInitParameters. +func (in *ImageConfigInitParameters) DeepCopy() *ImageConfigInitParameters { + if in == nil { + return nil + } + out := new(ImageConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfigObservation) DeepCopyInto(out *ImageConfigObservation) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EntryPoint != nil { + in, out := &in.EntryPoint, &out.EntryPoint + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WorkingDirectory != nil { + in, out := &in.WorkingDirectory, &out.WorkingDirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfigObservation. +func (in *ImageConfigObservation) DeepCopy() *ImageConfigObservation { + if in == nil { + return nil + } + out := new(ImageConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfigParameters) DeepCopyInto(out *ImageConfigParameters) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EntryPoint != nil { + in, out := &in.EntryPoint, &out.EntryPoint + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WorkingDirectory != nil { + in, out := &in.WorkingDirectory, &out.WorkingDirectory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfigParameters. +func (in *ImageConfigParameters) DeepCopy() *ImageConfigParameters { + if in == nil { + return nil + } + out := new(ImageConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigInitParameters) DeepCopyInto(out *LoggingConfigInitParameters) { + *out = *in + if in.ApplicationLogLevel != nil { + in, out := &in.ApplicationLogLevel, &out.ApplicationLogLevel + *out = new(string) + **out = **in + } + if in.LogFormat != nil { + in, out := &in.LogFormat, &out.LogFormat + *out = new(string) + **out = **in + } + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = new(string) + **out = **in + } + if in.SystemLogLevel != nil { + in, out := &in.SystemLogLevel, &out.SystemLogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigInitParameters. +func (in *LoggingConfigInitParameters) DeepCopy() *LoggingConfigInitParameters { + if in == nil { + return nil + } + out := new(LoggingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigObservation) DeepCopyInto(out *LoggingConfigObservation) { + *out = *in + if in.ApplicationLogLevel != nil { + in, out := &in.ApplicationLogLevel, &out.ApplicationLogLevel + *out = new(string) + **out = **in + } + if in.LogFormat != nil { + in, out := &in.LogFormat, &out.LogFormat + *out = new(string) + **out = **in + } + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = new(string) + **out = **in + } + if in.SystemLogLevel != nil { + in, out := &in.SystemLogLevel, &out.SystemLogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigObservation. +func (in *LoggingConfigObservation) DeepCopy() *LoggingConfigObservation { + if in == nil { + return nil + } + out := new(LoggingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigParameters) DeepCopyInto(out *LoggingConfigParameters) { + *out = *in + if in.ApplicationLogLevel != nil { + in, out := &in.ApplicationLogLevel, &out.ApplicationLogLevel + *out = new(string) + **out = **in + } + if in.LogFormat != nil { + in, out := &in.LogFormat, &out.LogFormat + *out = new(string) + **out = **in + } + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = new(string) + **out = **in + } + if in.SystemLogLevel != nil { + in, out := &in.SystemLogLevel, &out.SystemLogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigParameters. +func (in *LoggingConfigParameters) DeepCopy() *LoggingConfigParameters { + if in == nil { + return nil + } + out := new(LoggingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnFailureInitParameters) DeepCopyInto(out *OnFailureInitParameters) { + *out = *in + if in.DestinationArn != nil { + in, out := &in.DestinationArn, &out.DestinationArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnFailureInitParameters. +func (in *OnFailureInitParameters) DeepCopy() *OnFailureInitParameters { + if in == nil { + return nil + } + out := new(OnFailureInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnFailureObservation) DeepCopyInto(out *OnFailureObservation) { + *out = *in + if in.DestinationArn != nil { + in, out := &in.DestinationArn, &out.DestinationArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnFailureObservation. +func (in *OnFailureObservation) DeepCopy() *OnFailureObservation { + if in == nil { + return nil + } + out := new(OnFailureObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnFailureParameters) DeepCopyInto(out *OnFailureParameters) { + *out = *in + if in.DestinationArn != nil { + in, out := &in.DestinationArn, &out.DestinationArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnFailureParameters. +func (in *OnFailureParameters) DeepCopy() *OnFailureParameters { + if in == nil { + return nil + } + out := new(OnFailureParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnSuccessInitParameters) DeepCopyInto(out *OnSuccessInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.DestinationRef != nil { + in, out := &in.DestinationRef, &out.DestinationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DestinationSelector != nil { + in, out := &in.DestinationSelector, &out.DestinationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnSuccessInitParameters. +func (in *OnSuccessInitParameters) DeepCopy() *OnSuccessInitParameters { + if in == nil { + return nil + } + out := new(OnSuccessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnSuccessObservation) DeepCopyInto(out *OnSuccessObservation) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnSuccessObservation. +func (in *OnSuccessObservation) DeepCopy() *OnSuccessObservation { + if in == nil { + return nil + } + out := new(OnSuccessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnSuccessParameters) DeepCopyInto(out *OnSuccessParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.DestinationRef != nil { + in, out := &in.DestinationRef, &out.DestinationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DestinationSelector != nil { + in, out := &in.DestinationSelector, &out.DestinationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnSuccessParameters. +func (in *OnSuccessParameters) DeepCopy() *OnSuccessParameters { + if in == nil { + return nil + } + out := new(OnSuccessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PoliciesInitParameters) DeepCopyInto(out *PoliciesInitParameters) { + *out = *in + if in.UntrustedArtifactOnDeployment != nil { + in, out := &in.UntrustedArtifactOnDeployment, &out.UntrustedArtifactOnDeployment + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoliciesInitParameters. +func (in *PoliciesInitParameters) DeepCopy() *PoliciesInitParameters { + if in == nil { + return nil + } + out := new(PoliciesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PoliciesObservation) DeepCopyInto(out *PoliciesObservation) { + *out = *in + if in.UntrustedArtifactOnDeployment != nil { + in, out := &in.UntrustedArtifactOnDeployment, &out.UntrustedArtifactOnDeployment + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoliciesObservation. +func (in *PoliciesObservation) DeepCopy() *PoliciesObservation { + if in == nil { + return nil + } + out := new(PoliciesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PoliciesParameters) DeepCopyInto(out *PoliciesParameters) { + *out = *in + if in.UntrustedArtifactOnDeployment != nil { + in, out := &in.UntrustedArtifactOnDeployment, &out.UntrustedArtifactOnDeployment + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoliciesParameters. +func (in *PoliciesParameters) DeepCopy() *PoliciesParameters { + if in == nil { + return nil + } + out := new(PoliciesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingConfigInitParameters) DeepCopyInto(out *RoutingConfigInitParameters) { + *out = *in + if in.AdditionalVersionWeights != nil { + in, out := &in.AdditionalVersionWeights, &out.AdditionalVersionWeights + *out = make(map[string]*float64, len(*in)) + for key, val := range *in { + var outVal *float64 + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(float64) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingConfigInitParameters. +func (in *RoutingConfigInitParameters) DeepCopy() *RoutingConfigInitParameters { + if in == nil { + return nil + } + out := new(RoutingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingConfigObservation) DeepCopyInto(out *RoutingConfigObservation) { + *out = *in + if in.AdditionalVersionWeights != nil { + in, out := &in.AdditionalVersionWeights, &out.AdditionalVersionWeights + *out = make(map[string]*float64, len(*in)) + for key, val := range *in { + var outVal *float64 + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(float64) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingConfigObservation. +func (in *RoutingConfigObservation) DeepCopy() *RoutingConfigObservation { + if in == nil { + return nil + } + out := new(RoutingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingConfigParameters) DeepCopyInto(out *RoutingConfigParameters) { + *out = *in + if in.AdditionalVersionWeights != nil { + in, out := &in.AdditionalVersionWeights, &out.AdditionalVersionWeights + *out = make(map[string]*float64, len(*in)) + for key, val := range *in { + var outVal *float64 + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(float64) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingConfigParameters. +func (in *RoutingConfigParameters) DeepCopy() *RoutingConfigParameters { + if in == nil { + return nil + } + out := new(RoutingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingConfigInitParameters) DeepCopyInto(out *ScalingConfigInitParameters) { + *out = *in + if in.MaximumConcurrency != nil { + in, out := &in.MaximumConcurrency, &out.MaximumConcurrency + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingConfigInitParameters. +func (in *ScalingConfigInitParameters) DeepCopy() *ScalingConfigInitParameters { + if in == nil { + return nil + } + out := new(ScalingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingConfigObservation) DeepCopyInto(out *ScalingConfigObservation) { + *out = *in + if in.MaximumConcurrency != nil { + in, out := &in.MaximumConcurrency, &out.MaximumConcurrency + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingConfigObservation. +func (in *ScalingConfigObservation) DeepCopy() *ScalingConfigObservation { + if in == nil { + return nil + } + out := new(ScalingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingConfigParameters) DeepCopyInto(out *ScalingConfigParameters) { + *out = *in + if in.MaximumConcurrency != nil { + in, out := &in.MaximumConcurrency, &out.MaximumConcurrency + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingConfigParameters. +func (in *ScalingConfigParameters) DeepCopy() *ScalingConfigParameters { + if in == nil { + return nil + } + out := new(ScalingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedEventSourceInitParameters) DeepCopyInto(out *SelfManagedEventSourceInitParameters) { + *out = *in + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedEventSourceInitParameters. +func (in *SelfManagedEventSourceInitParameters) DeepCopy() *SelfManagedEventSourceInitParameters { + if in == nil { + return nil + } + out := new(SelfManagedEventSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedEventSourceObservation) DeepCopyInto(out *SelfManagedEventSourceObservation) { + *out = *in + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedEventSourceObservation. +func (in *SelfManagedEventSourceObservation) DeepCopy() *SelfManagedEventSourceObservation { + if in == nil { + return nil + } + out := new(SelfManagedEventSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedEventSourceParameters) DeepCopyInto(out *SelfManagedEventSourceParameters) { + *out = *in + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedEventSourceParameters. +func (in *SelfManagedEventSourceParameters) DeepCopy() *SelfManagedEventSourceParameters { + if in == nil { + return nil + } + out := new(SelfManagedEventSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedKafkaEventSourceConfigInitParameters) DeepCopyInto(out *SelfManagedKafkaEventSourceConfigInitParameters) { + *out = *in + if in.ConsumerGroupID != nil { + in, out := &in.ConsumerGroupID, &out.ConsumerGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedKafkaEventSourceConfigInitParameters. +func (in *SelfManagedKafkaEventSourceConfigInitParameters) DeepCopy() *SelfManagedKafkaEventSourceConfigInitParameters { + if in == nil { + return nil + } + out := new(SelfManagedKafkaEventSourceConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedKafkaEventSourceConfigObservation) DeepCopyInto(out *SelfManagedKafkaEventSourceConfigObservation) { + *out = *in + if in.ConsumerGroupID != nil { + in, out := &in.ConsumerGroupID, &out.ConsumerGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedKafkaEventSourceConfigObservation. +func (in *SelfManagedKafkaEventSourceConfigObservation) DeepCopy() *SelfManagedKafkaEventSourceConfigObservation { + if in == nil { + return nil + } + out := new(SelfManagedKafkaEventSourceConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedKafkaEventSourceConfigParameters) DeepCopyInto(out *SelfManagedKafkaEventSourceConfigParameters) { + *out = *in + if in.ConsumerGroupID != nil { + in, out := &in.ConsumerGroupID, &out.ConsumerGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedKafkaEventSourceConfigParameters. +func (in *SelfManagedKafkaEventSourceConfigParameters) DeepCopy() *SelfManagedKafkaEventSourceConfigParameters { + if in == nil { + return nil + } + out := new(SelfManagedKafkaEventSourceConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapStartInitParameters) DeepCopyInto(out *SnapStartInitParameters) { + *out = *in + if in.ApplyOn != nil { + in, out := &in.ApplyOn, &out.ApplyOn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapStartInitParameters. +func (in *SnapStartInitParameters) DeepCopy() *SnapStartInitParameters { + if in == nil { + return nil + } + out := new(SnapStartInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapStartObservation) DeepCopyInto(out *SnapStartObservation) { + *out = *in + if in.ApplyOn != nil { + in, out := &in.ApplyOn, &out.ApplyOn + *out = new(string) + **out = **in + } + if in.OptimizationStatus != nil { + in, out := &in.OptimizationStatus, &out.OptimizationStatus + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapStartObservation. +func (in *SnapStartObservation) DeepCopy() *SnapStartObservation { + if in == nil { + return nil + } + out := new(SnapStartObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapStartParameters) DeepCopyInto(out *SnapStartParameters) { + *out = *in + if in.ApplyOn != nil { + in, out := &in.ApplyOn, &out.ApplyOn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapStartParameters. +func (in *SnapStartParameters) DeepCopy() *SnapStartParameters { + if in == nil { + return nil + } + out := new(SnapStartParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceAccessConfigurationInitParameters) DeepCopyInto(out *SourceAccessConfigurationInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceAccessConfigurationInitParameters. +func (in *SourceAccessConfigurationInitParameters) DeepCopy() *SourceAccessConfigurationInitParameters { + if in == nil { + return nil + } + out := new(SourceAccessConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceAccessConfigurationObservation) DeepCopyInto(out *SourceAccessConfigurationObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceAccessConfigurationObservation. +func (in *SourceAccessConfigurationObservation) DeepCopy() *SourceAccessConfigurationObservation { + if in == nil { + return nil + } + out := new(SourceAccessConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceAccessConfigurationParameters) DeepCopyInto(out *SourceAccessConfigurationParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceAccessConfigurationParameters. +func (in *SourceAccessConfigurationParameters) DeepCopy() *SourceAccessConfigurationParameters { + if in == nil { + return nil + } + out := new(SourceAccessConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingConfigInitParameters) DeepCopyInto(out *TracingConfigInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfigInitParameters. +func (in *TracingConfigInitParameters) DeepCopy() *TracingConfigInitParameters { + if in == nil { + return nil + } + out := new(TracingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingConfigObservation) DeepCopyInto(out *TracingConfigObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfigObservation. +func (in *TracingConfigObservation) DeepCopy() *TracingConfigObservation { + if in == nil { + return nil + } + out := new(TracingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingConfigParameters) DeepCopyInto(out *TracingConfigParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfigParameters. +func (in *TracingConfigParameters) DeepCopy() *TracingConfigParameters { + if in == nil { + return nil + } + out := new(TracingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigInitParameters) DeepCopyInto(out *VPCConfigInitParameters) { + *out = *in + if in.IPv6AllowedForDualStack != nil { + in, out := &in.IPv6AllowedForDualStack, &out.IPv6AllowedForDualStack + *out = new(bool) + **out = **in + } + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigInitParameters. +func (in *VPCConfigInitParameters) DeepCopy() *VPCConfigInitParameters { + if in == nil { + return nil + } + out := new(VPCConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigObservation) DeepCopyInto(out *VPCConfigObservation) { + *out = *in + if in.IPv6AllowedForDualStack != nil { + in, out := &in.IPv6AllowedForDualStack, &out.IPv6AllowedForDualStack + *out = new(bool) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigObservation. +func (in *VPCConfigObservation) DeepCopy() *VPCConfigObservation { + if in == nil { + return nil + } + out := new(VPCConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigParameters) DeepCopyInto(out *VPCConfigParameters) { + *out = *in + if in.IPv6AllowedForDualStack != nil { + in, out := &in.IPv6AllowedForDualStack, &out.IPv6AllowedForDualStack + *out = new(bool) + **out = **in + } + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigParameters. +func (in *VPCConfigParameters) DeepCopy() *VPCConfigParameters { + if in == nil { + return nil + } + out := new(VPCConfigParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/lambda/v1beta2/zz_generated.managed.go b/apis/lambda/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..0885c0a908 --- /dev/null +++ b/apis/lambda/v1beta2/zz_generated.managed.go @@ -0,0 +1,368 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Alias. +func (mg *Alias) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Alias. +func (mg *Alias) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Alias. +func (mg *Alias) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Alias. +func (mg *Alias) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Alias. +func (mg *Alias) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Alias. +func (mg *Alias) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Alias. +func (mg *Alias) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Alias. +func (mg *Alias) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Alias. +func (mg *Alias) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Alias. +func (mg *Alias) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Alias. +func (mg *Alias) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Alias. +func (mg *Alias) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this CodeSigningConfig. +func (mg *CodeSigningConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CodeSigningConfig. +func (mg *CodeSigningConfig) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CodeSigningConfig. +func (mg *CodeSigningConfig) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CodeSigningConfig. +func (mg *CodeSigningConfig) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CodeSigningConfig. +func (mg *CodeSigningConfig) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CodeSigningConfig. +func (mg *CodeSigningConfig) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CodeSigningConfig. +func (mg *CodeSigningConfig) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CodeSigningConfig. +func (mg *CodeSigningConfig) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CodeSigningConfig. +func (mg *CodeSigningConfig) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CodeSigningConfig. +func (mg *CodeSigningConfig) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CodeSigningConfig. +func (mg *CodeSigningConfig) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CodeSigningConfig. +func (mg *CodeSigningConfig) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this EventSourceMapping. +func (mg *EventSourceMapping) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this EventSourceMapping. +func (mg *EventSourceMapping) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this EventSourceMapping. +func (mg *EventSourceMapping) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this EventSourceMapping. +func (mg *EventSourceMapping) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this EventSourceMapping. +func (mg *EventSourceMapping) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this EventSourceMapping. +func (mg *EventSourceMapping) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this EventSourceMapping. +func (mg *EventSourceMapping) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this EventSourceMapping. +func (mg *EventSourceMapping) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this EventSourceMapping. +func (mg *EventSourceMapping) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this EventSourceMapping. +func (mg *EventSourceMapping) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this EventSourceMapping. +func (mg *EventSourceMapping) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this EventSourceMapping. +func (mg *EventSourceMapping) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Function. +func (mg *Function) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Function. +func (mg *Function) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Function. +func (mg *Function) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Function. +func (mg *Function) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Function. +func (mg *Function) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Function. +func (mg *Function) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Function. +func (mg *Function) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Function. +func (mg *Function) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Function. +func (mg *Function) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Function. +func (mg *Function) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Function. +func (mg *Function) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Function. +func (mg *Function) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FunctionEventInvokeConfig. +func (mg *FunctionEventInvokeConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FunctionEventInvokeConfig. +func (mg *FunctionEventInvokeConfig) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FunctionEventInvokeConfig. +func (mg *FunctionEventInvokeConfig) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FunctionEventInvokeConfig. +func (mg *FunctionEventInvokeConfig) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FunctionEventInvokeConfig. +func (mg *FunctionEventInvokeConfig) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FunctionEventInvokeConfig. +func (mg *FunctionEventInvokeConfig) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FunctionEventInvokeConfig. +func (mg *FunctionEventInvokeConfig) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FunctionEventInvokeConfig. +func (mg *FunctionEventInvokeConfig) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FunctionEventInvokeConfig. +func (mg *FunctionEventInvokeConfig) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FunctionEventInvokeConfig. +func (mg *FunctionEventInvokeConfig) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FunctionEventInvokeConfig. +func (mg *FunctionEventInvokeConfig) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FunctionEventInvokeConfig. +func (mg *FunctionEventInvokeConfig) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FunctionURL. +func (mg *FunctionURL) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FunctionURL. +func (mg *FunctionURL) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FunctionURL. +func (mg *FunctionURL) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FunctionURL. +func (mg *FunctionURL) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FunctionURL. +func (mg *FunctionURL) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FunctionURL. +func (mg *FunctionURL) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FunctionURL. +func (mg *FunctionURL) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FunctionURL. +func (mg *FunctionURL) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FunctionURL. +func (mg *FunctionURL) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FunctionURL. +func (mg *FunctionURL) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FunctionURL. +func (mg *FunctionURL) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FunctionURL. +func (mg *FunctionURL) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/lambda/v1beta2/zz_generated.managedlist.go b/apis/lambda/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..ac27031e41 --- /dev/null +++ b/apis/lambda/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,62 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AliasList. +func (l *AliasList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this CodeSigningConfigList. +func (l *CodeSigningConfigList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this EventSourceMappingList. +func (l *EventSourceMappingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FunctionEventInvokeConfigList. +func (l *FunctionEventInvokeConfigList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FunctionList. +func (l *FunctionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FunctionURLList. +func (l *FunctionURLList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/lambda/v1beta2/zz_generated.resolvers.go b/apis/lambda/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..6bf786664e --- /dev/null +++ b/apis/lambda/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,603 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Alias. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Alias) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FunctionName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FunctionNameRef, + Selector: mg.Spec.ForProvider.FunctionNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FunctionName") + } + mg.Spec.ForProvider.FunctionName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FunctionNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this CodeSigningConfig. +func (mg *CodeSigningConfig) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var mrsp reference.MultiResolutionResponse + var err error + + if mg.Spec.ForProvider.AllowedPublishers != nil { + { + m, l, err = apisresolver.GetManagedResource("signer.aws.upbound.io", "v1beta2", "SigningProfile", "SigningProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.AllowedPublishers.SigningProfileVersionArns), + Extract: common.ARNExtractor(), + References: mg.Spec.ForProvider.AllowedPublishers.SigningProfileVersionArnsRefs, + Selector: mg.Spec.ForProvider.AllowedPublishers.SigningProfileVersionArnsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AllowedPublishers.SigningProfileVersionArns") + } + mg.Spec.ForProvider.AllowedPublishers.SigningProfileVersionArns = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.AllowedPublishers.SigningProfileVersionArnsRefs = mrsp.ResolvedReferences + + } + if mg.Spec.InitProvider.AllowedPublishers != nil { + { + m, l, err = apisresolver.GetManagedResource("signer.aws.upbound.io", "v1beta2", "SigningProfile", "SigningProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.AllowedPublishers.SigningProfileVersionArns), + Extract: common.ARNExtractor(), + References: mg.Spec.InitProvider.AllowedPublishers.SigningProfileVersionArnsRefs, + Selector: mg.Spec.InitProvider.AllowedPublishers.SigningProfileVersionArnsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AllowedPublishers.SigningProfileVersionArns") + } + mg.Spec.InitProvider.AllowedPublishers.SigningProfileVersionArns = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.AllowedPublishers.SigningProfileVersionArnsRefs = mrsp.ResolvedReferences + + } + + return nil +} + +// ResolveReferences of this EventSourceMapping. +func (mg *EventSourceMapping) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FunctionName), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.FunctionNameRef, + Selector: mg.Spec.ForProvider.FunctionNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FunctionName") + } + mg.Spec.ForProvider.FunctionName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FunctionNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FunctionName), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.FunctionNameRef, + Selector: mg.Spec.InitProvider.FunctionNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FunctionName") + } + mg.Spec.InitProvider.FunctionName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FunctionNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Function. +func (mg *Function) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + if mg.Spec.ForProvider.FileSystemConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("efs.aws.upbound.io", "v1beta2", "AccessPoint", "AccessPointList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FileSystemConfig.Arn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.FileSystemConfig.ArnRef, + Selector: mg.Spec.ForProvider.FileSystemConfig.ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FileSystemConfig.Arn") + } + mg.Spec.ForProvider.FileSystemConfig.Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FileSystemConfig.ArnRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSKeyArnRef, + Selector: mg.Spec.ForProvider.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyArn") + } + mg.Spec.ForProvider.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.ReplacementSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.ReplacementSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.ReplacementSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ReplacementSecurityGroupIds") + } + mg.Spec.ForProvider.ReplacementSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.ReplacementSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Role), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleRef, + Selector: mg.Spec.ForProvider.RoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Role") + } + mg.Spec.ForProvider.Role = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.S3Bucket), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.S3BucketRef, + Selector: mg.Spec.ForProvider.S3BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.S3Bucket") + } + mg.Spec.ForProvider.S3Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.S3BucketRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCConfig.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCConfig.SecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.VPCConfig.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCConfig.SecurityGroupIds") + } + mg.Spec.ForProvider.VPCConfig.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCConfig.SecurityGroupIDRefs = mrsp.ResolvedReferences + + } + if mg.Spec.ForProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCConfig.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCConfig.SubnetIDRefs, + Selector: mg.Spec.ForProvider.VPCConfig.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCConfig.SubnetIds") + } + mg.Spec.ForProvider.VPCConfig.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCConfig.SubnetIDRefs = mrsp.ResolvedReferences + + } + if mg.Spec.InitProvider.FileSystemConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("efs.aws.upbound.io", "v1beta2", "AccessPoint", "AccessPointList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FileSystemConfig.Arn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.FileSystemConfig.ArnRef, + Selector: mg.Spec.InitProvider.FileSystemConfig.ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FileSystemConfig.Arn") + } + mg.Spec.InitProvider.FileSystemConfig.Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FileSystemConfig.ArnRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSKeyArnRef, + Selector: mg.Spec.InitProvider.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyArn") + } + mg.Spec.InitProvider.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.ReplacementSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.ReplacementSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.ReplacementSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ReplacementSecurityGroupIds") + } + mg.Spec.InitProvider.ReplacementSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.ReplacementSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Role), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleRef, + Selector: mg.Spec.InitProvider.RoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Role") + } + mg.Spec.InitProvider.Role = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.S3Bucket), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.S3BucketRef, + Selector: mg.Spec.InitProvider.S3BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.S3Bucket") + } + mg.Spec.InitProvider.S3Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.S3BucketRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCConfig.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCConfig.SecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.VPCConfig.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCConfig.SecurityGroupIds") + } + mg.Spec.InitProvider.VPCConfig.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCConfig.SecurityGroupIDRefs = mrsp.ResolvedReferences + + } + if mg.Spec.InitProvider.VPCConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCConfig.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCConfig.SubnetIDRefs, + Selector: mg.Spec.InitProvider.VPCConfig.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCConfig.SubnetIds") + } + mg.Spec.InitProvider.VPCConfig.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCConfig.SubnetIDRefs = mrsp.ResolvedReferences + + } + + return nil +} + +// ResolveReferences of this FunctionEventInvokeConfig. +func (mg *FunctionEventInvokeConfig) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.DestinationConfig != nil { + if mg.Spec.ForProvider.DestinationConfig.OnFailure != nil { + { + m, l, err = apisresolver.GetManagedResource("sqs.aws.upbound.io", "v1beta1", "Queue", "QueueList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DestinationConfig.OnFailure.Destination), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.DestinationConfig.OnFailure.DestinationRef, + Selector: mg.Spec.ForProvider.DestinationConfig.OnFailure.DestinationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DestinationConfig.OnFailure.Destination") + } + mg.Spec.ForProvider.DestinationConfig.OnFailure.Destination = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DestinationConfig.OnFailure.DestinationRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.DestinationConfig != nil { + if mg.Spec.ForProvider.DestinationConfig.OnSuccess != nil { + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DestinationConfig.OnSuccess.Destination), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.DestinationConfig.OnSuccess.DestinationRef, + Selector: mg.Spec.ForProvider.DestinationConfig.OnSuccess.DestinationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DestinationConfig.OnSuccess.Destination") + } + mg.Spec.ForProvider.DestinationConfig.OnSuccess.Destination = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DestinationConfig.OnSuccess.DestinationRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.DestinationConfig != nil { + if mg.Spec.InitProvider.DestinationConfig.OnFailure != nil { + { + m, l, err = apisresolver.GetManagedResource("sqs.aws.upbound.io", "v1beta1", "Queue", "QueueList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DestinationConfig.OnFailure.Destination), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.DestinationConfig.OnFailure.DestinationRef, + Selector: mg.Spec.InitProvider.DestinationConfig.OnFailure.DestinationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DestinationConfig.OnFailure.Destination") + } + mg.Spec.InitProvider.DestinationConfig.OnFailure.Destination = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DestinationConfig.OnFailure.DestinationRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.DestinationConfig != nil { + if mg.Spec.InitProvider.DestinationConfig.OnSuccess != nil { + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DestinationConfig.OnSuccess.Destination), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.DestinationConfig.OnSuccess.DestinationRef, + Selector: mg.Spec.InitProvider.DestinationConfig.OnSuccess.DestinationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DestinationConfig.OnSuccess.Destination") + } + mg.Spec.InitProvider.DestinationConfig.OnSuccess.Destination = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DestinationConfig.OnSuccess.DestinationRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this FunctionURL. +func (mg *FunctionURL) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FunctionName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FunctionNameRef, + Selector: mg.Spec.ForProvider.FunctionNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FunctionName") + } + mg.Spec.ForProvider.FunctionName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FunctionNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FunctionName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FunctionNameRef, + Selector: mg.Spec.InitProvider.FunctionNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FunctionName") + } + mg.Spec.InitProvider.FunctionName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FunctionNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/lambda/v1beta2/zz_groupversion_info.go b/apis/lambda/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..0f009d039d --- /dev/null +++ b/apis/lambda/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=lambda.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "lambda.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/lexmodels/v1beta1/zz_generated.conversion_hubs.go b/apis/lexmodels/v1beta1/zz_generated.conversion_hubs.go index 891aaebe60..4882ae5606 100755 --- a/apis/lexmodels/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/lexmodels/v1beta1/zz_generated.conversion_hubs.go @@ -6,14 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Bot) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *BotAlias) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Intent) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SlotType) Hub() {} diff --git a/apis/lexmodels/v1beta1/zz_generated.conversion_spokes.go b/apis/lexmodels/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..790853b6e3 --- /dev/null +++ b/apis/lexmodels/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Bot to the hub type. +func (tr *Bot) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Bot type. +func (tr *Bot) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BotAlias to the hub type. +func (tr *BotAlias) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BotAlias type. +func (tr *BotAlias) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Intent to the hub type. +func (tr *Intent) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Intent type. +func (tr *Intent) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/lexmodels/v1beta2/zz_bot_terraformed.go b/apis/lexmodels/v1beta2/zz_bot_terraformed.go new file mode 100755 index 0000000000..a42f61f61d --- /dev/null +++ b/apis/lexmodels/v1beta2/zz_bot_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Bot +func (mg *Bot) GetTerraformResourceType() string { + return "aws_lex_bot" +} + +// GetConnectionDetailsMapping for this Bot +func (tr *Bot) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Bot +func (tr *Bot) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Bot +func (tr *Bot) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Bot +func (tr *Bot) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Bot +func (tr *Bot) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Bot +func (tr *Bot) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Bot +func (tr *Bot) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Bot +func (tr *Bot) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Bot using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Bot) LateInitialize(attrs []byte) (bool, error) { + params := &BotParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Bot) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/lexmodels/v1beta2/zz_bot_types.go b/apis/lexmodels/v1beta2/zz_bot_types.go new file mode 100755 index 0000000000..a8d5be354d --- /dev/null +++ b/apis/lexmodels/v1beta2/zz_bot_types.go @@ -0,0 +1,452 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AbortStatementInitParameters struct { + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. + Message []MessageInitParameters `json:"message,omitempty" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type AbortStatementObservation struct { + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. + Message []MessageObservation `json:"message,omitempty" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type AbortStatementParameters struct { + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. + // +kubebuilder:validation:Optional + Message []MessageParameters `json:"message" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. + // +kubebuilder:validation:Optional + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type BotInitParameters struct { + + // The message that Amazon Lex uses to abort a conversation. Attributes are documented under statement. + AbortStatement *AbortStatementInitParameters `json:"abortStatement,omitempty" tf:"abort_statement,omitempty"` + + // By specifying true, you confirm that your use of Amazon Lex is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA. For more information see the Amazon Lex FAQ and the Amazon Lex PutBot API Docs. + ChildDirected *bool `json:"childDirected,omitempty" tf:"child_directed,omitempty"` + + // The message that Amazon Lex uses when it doesn't understand the user's request. Attributes are documented under prompt. + ClarificationPrompt *ClarificationPromptInitParameters `json:"clarificationPrompt,omitempty" tf:"clarification_prompt,omitempty"` + + // Determines if a new bot version is created when the initial resource is created and on each update. Defaults to false. + CreateVersion *bool `json:"createVersion,omitempty" tf:"create_version,omitempty"` + + // A description of the bot. Must be less than or equal to 200 characters in length. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // When set to true user utterances are sent to Amazon Comprehend for sentiment analysis. If you don't specify detectSentiment, the default is false. + DetectSentiment *bool `json:"detectSentiment,omitempty" tf:"detect_sentiment,omitempty"` + + // Set to true to enable access to natural language understanding improvements. When you set the enable_model_improvements parameter to true you can use the nlu_intent_confidence_threshold parameter to configure confidence scores. For more information, see Confidence Scores. You can only set the enable_model_improvements parameter in certain Regions. If you set the parameter to true, your bot has access to accuracy improvements. For more information see the Amazon Lex Bot PutBot API Docs. + EnableModelImprovements *bool `json:"enableModelImprovements,omitempty" tf:"enable_model_improvements,omitempty"` + + // The maximum time in seconds that Amazon Lex retains the data gathered in a conversation. Default is 300. Must be a number between 60 and 86400 (inclusive). + IdleSessionTTLInSeconds *float64 `json:"idleSessionTtlInSeconds,omitempty" tf:"idle_session_ttl_in_seconds,omitempty"` + + // A set of Intent objects. Each intent represents a command that a user can express. Attributes are documented under intent. Can have up to 250 Intent objects. + Intent []IntentInitParameters `json:"intent,omitempty" tf:"intent,omitempty"` + + // Specifies the target locale for the bot. Any intent used in the bot must be compatible with the locale of the bot. For available locales, see Amazon Lex Bot PutBot API Docs. Default is en-US. + Locale *string `json:"locale,omitempty" tf:"locale,omitempty"` + + // Determines the threshold where Amazon Lex will insert the AMAZON.FallbackIntent, AMAZON.KendraSearchIntent, or both when returning alternative intents in a PostContent or PostText response. AMAZON.FallbackIntent and AMAZON.KendraSearchIntent are only inserted if they are configured for the bot. For more information see Amazon Lex Bot PutBot API Docs This value requires enable_model_improvements to be set to true and the default is 0. Must be a float between 0 and 1. + NluIntentConfidenceThreshold *float64 `json:"nluIntentConfidenceThreshold,omitempty" tf:"nlu_intent_confidence_threshold,omitempty"` + + // If you set the process_behavior element to BUILD, Amazon Lex builds the bot so that it can be run. If you set the element to SAVE Amazon Lex saves the bot, but doesn't build it. Default is SAVE. + ProcessBehavior *string `json:"processBehavior,omitempty" tf:"process_behavior,omitempty"` + + // The Amazon Polly voice ID that you want Amazon Lex to use for voice interactions with the user. The locale configured for the voice must match the locale of the bot. For more information, see Available Voices in the Amazon Polly Developer Guide. + VoiceID *string `json:"voiceId,omitempty" tf:"voice_id,omitempty"` +} + +type BotObservation struct { + + // The message that Amazon Lex uses to abort a conversation. Attributes are documented under statement. + AbortStatement *AbortStatementObservation `json:"abortStatement,omitempty" tf:"abort_statement,omitempty"` + + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Checksum identifying the version of the bot that was created. The checksum is not + // included as an argument because the resource will add it automatically when updating the bot. + Checksum *string `json:"checksum,omitempty" tf:"checksum,omitempty"` + + // By specifying true, you confirm that your use of Amazon Lex is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA. For more information see the Amazon Lex FAQ and the Amazon Lex PutBot API Docs. + ChildDirected *bool `json:"childDirected,omitempty" tf:"child_directed,omitempty"` + + // The message that Amazon Lex uses when it doesn't understand the user's request. Attributes are documented under prompt. + ClarificationPrompt *ClarificationPromptObservation `json:"clarificationPrompt,omitempty" tf:"clarification_prompt,omitempty"` + + // Determines if a new bot version is created when the initial resource is created and on each update. Defaults to false. + CreateVersion *bool `json:"createVersion,omitempty" tf:"create_version,omitempty"` + + // The date when the bot version was created. + CreatedDate *string `json:"createdDate,omitempty" tf:"created_date,omitempty"` + + // A description of the bot. Must be less than or equal to 200 characters in length. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // When set to true user utterances are sent to Amazon Comprehend for sentiment analysis. If you don't specify detectSentiment, the default is false. + DetectSentiment *bool `json:"detectSentiment,omitempty" tf:"detect_sentiment,omitempty"` + + // Set to true to enable access to natural language understanding improvements. When you set the enable_model_improvements parameter to true you can use the nlu_intent_confidence_threshold parameter to configure confidence scores. For more information, see Confidence Scores. You can only set the enable_model_improvements parameter in certain Regions. If you set the parameter to true, your bot has access to accuracy improvements. For more information see the Amazon Lex Bot PutBot API Docs. + EnableModelImprovements *bool `json:"enableModelImprovements,omitempty" tf:"enable_model_improvements,omitempty"` + + // If status is FAILED, Amazon Lex provides the reason that it failed to build the bot. + FailureReason *string `json:"failureReason,omitempty" tf:"failure_reason,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The maximum time in seconds that Amazon Lex retains the data gathered in a conversation. Default is 300. Must be a number between 60 and 86400 (inclusive). + IdleSessionTTLInSeconds *float64 `json:"idleSessionTtlInSeconds,omitempty" tf:"idle_session_ttl_in_seconds,omitempty"` + + // A set of Intent objects. Each intent represents a command that a user can express. Attributes are documented under intent. Can have up to 250 Intent objects. + Intent []IntentObservation `json:"intent,omitempty" tf:"intent,omitempty"` + + // The date when the $LATEST version of this bot was updated. + LastUpdatedDate *string `json:"lastUpdatedDate,omitempty" tf:"last_updated_date,omitempty"` + + // Specifies the target locale for the bot. Any intent used in the bot must be compatible with the locale of the bot. For available locales, see Amazon Lex Bot PutBot API Docs. Default is en-US. + Locale *string `json:"locale,omitempty" tf:"locale,omitempty"` + + // Determines the threshold where Amazon Lex will insert the AMAZON.FallbackIntent, AMAZON.KendraSearchIntent, or both when returning alternative intents in a PostContent or PostText response. AMAZON.FallbackIntent and AMAZON.KendraSearchIntent are only inserted if they are configured for the bot. For more information see Amazon Lex Bot PutBot API Docs This value requires enable_model_improvements to be set to true and the default is 0. Must be a float between 0 and 1. + NluIntentConfidenceThreshold *float64 `json:"nluIntentConfidenceThreshold,omitempty" tf:"nlu_intent_confidence_threshold,omitempty"` + + // If you set the process_behavior element to BUILD, Amazon Lex builds the bot so that it can be run. If you set the element to SAVE Amazon Lex saves the bot, but doesn't build it. Default is SAVE. + ProcessBehavior *string `json:"processBehavior,omitempty" tf:"process_behavior,omitempty"` + + // When you send a request to create or update a bot, Amazon Lex sets the status response + // element to BUILDING. After Amazon Lex builds the bot, it sets status to READY. If Amazon Lex can't + // build the bot, it sets status to FAILED. Amazon Lex returns the reason for the failure in the + // failure_reason response element. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // The version of the bot. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // The Amazon Polly voice ID that you want Amazon Lex to use for voice interactions with the user. The locale configured for the voice must match the locale of the bot. For more information, see Available Voices in the Amazon Polly Developer Guide. + VoiceID *string `json:"voiceId,omitempty" tf:"voice_id,omitempty"` +} + +type BotParameters struct { + + // The message that Amazon Lex uses to abort a conversation. Attributes are documented under statement. + // +kubebuilder:validation:Optional + AbortStatement *AbortStatementParameters `json:"abortStatement,omitempty" tf:"abort_statement,omitempty"` + + // By specifying true, you confirm that your use of Amazon Lex is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA. For more information see the Amazon Lex FAQ and the Amazon Lex PutBot API Docs. + // +kubebuilder:validation:Optional + ChildDirected *bool `json:"childDirected,omitempty" tf:"child_directed,omitempty"` + + // The message that Amazon Lex uses when it doesn't understand the user's request. Attributes are documented under prompt. + // +kubebuilder:validation:Optional + ClarificationPrompt *ClarificationPromptParameters `json:"clarificationPrompt,omitempty" tf:"clarification_prompt,omitempty"` + + // Determines if a new bot version is created when the initial resource is created and on each update. Defaults to false. + // +kubebuilder:validation:Optional + CreateVersion *bool `json:"createVersion,omitempty" tf:"create_version,omitempty"` + + // A description of the bot. Must be less than or equal to 200 characters in length. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // When set to true user utterances are sent to Amazon Comprehend for sentiment analysis. If you don't specify detectSentiment, the default is false. + // +kubebuilder:validation:Optional + DetectSentiment *bool `json:"detectSentiment,omitempty" tf:"detect_sentiment,omitempty"` + + // Set to true to enable access to natural language understanding improvements. When you set the enable_model_improvements parameter to true you can use the nlu_intent_confidence_threshold parameter to configure confidence scores. For more information, see Confidence Scores. You can only set the enable_model_improvements parameter in certain Regions. If you set the parameter to true, your bot has access to accuracy improvements. For more information see the Amazon Lex Bot PutBot API Docs. + // +kubebuilder:validation:Optional + EnableModelImprovements *bool `json:"enableModelImprovements,omitempty" tf:"enable_model_improvements,omitempty"` + + // The maximum time in seconds that Amazon Lex retains the data gathered in a conversation. Default is 300. Must be a number between 60 and 86400 (inclusive). + // +kubebuilder:validation:Optional + IdleSessionTTLInSeconds *float64 `json:"idleSessionTtlInSeconds,omitempty" tf:"idle_session_ttl_in_seconds,omitempty"` + + // A set of Intent objects. Each intent represents a command that a user can express. Attributes are documented under intent. Can have up to 250 Intent objects. + // +kubebuilder:validation:Optional + Intent []IntentParameters `json:"intent,omitempty" tf:"intent,omitempty"` + + // Specifies the target locale for the bot. Any intent used in the bot must be compatible with the locale of the bot. For available locales, see Amazon Lex Bot PutBot API Docs. Default is en-US. + // +kubebuilder:validation:Optional + Locale *string `json:"locale,omitempty" tf:"locale,omitempty"` + + // Determines the threshold where Amazon Lex will insert the AMAZON.FallbackIntent, AMAZON.KendraSearchIntent, or both when returning alternative intents in a PostContent or PostText response. AMAZON.FallbackIntent and AMAZON.KendraSearchIntent are only inserted if they are configured for the bot. For more information see Amazon Lex Bot PutBot API Docs This value requires enable_model_improvements to be set to true and the default is 0. Must be a float between 0 and 1. + // +kubebuilder:validation:Optional + NluIntentConfidenceThreshold *float64 `json:"nluIntentConfidenceThreshold,omitempty" tf:"nlu_intent_confidence_threshold,omitempty"` + + // If you set the process_behavior element to BUILD, Amazon Lex builds the bot so that it can be run. If you set the element to SAVE Amazon Lex saves the bot, but doesn't build it. Default is SAVE. + // +kubebuilder:validation:Optional + ProcessBehavior *string `json:"processBehavior,omitempty" tf:"process_behavior,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The Amazon Polly voice ID that you want Amazon Lex to use for voice interactions with the user. The locale configured for the voice must match the locale of the bot. For more information, see Available Voices in the Amazon Polly Developer Guide. + // +kubebuilder:validation:Optional + VoiceID *string `json:"voiceId,omitempty" tf:"voice_id,omitempty"` +} + +type ClarificationPromptInitParameters struct { + + // The number of times to prompt the user for information. + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. + Message []ClarificationPromptMessageInitParameters `json:"message,omitempty" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type ClarificationPromptMessageInitParameters struct { + + // The text of the message. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The content type of the message string. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type ClarificationPromptMessageObservation struct { + + // The text of the message. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The content type of the message string. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type ClarificationPromptMessageParameters struct { + + // The text of the message. + // +kubebuilder:validation:Optional + Content *string `json:"content" tf:"content,omitempty"` + + // The content type of the message string. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. + // +kubebuilder:validation:Optional + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type ClarificationPromptObservation struct { + + // The number of times to prompt the user for information. + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. + Message []ClarificationPromptMessageObservation `json:"message,omitempty" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type ClarificationPromptParameters struct { + + // The number of times to prompt the user for information. + // +kubebuilder:validation:Optional + MaxAttempts *float64 `json:"maxAttempts" tf:"max_attempts,omitempty"` + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. + // +kubebuilder:validation:Optional + Message []ClarificationPromptMessageParameters `json:"message" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. + // +kubebuilder:validation:Optional + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type IntentInitParameters struct { + + // The name of the intent. Must be less than or equal to 100 characters in length. + IntentName *string `json:"intentName,omitempty" tf:"intent_name,omitempty"` + + // The version of the intent. Must be less than or equal to 64 characters in length. + IntentVersion *string `json:"intentVersion,omitempty" tf:"intent_version,omitempty"` +} + +type IntentObservation struct { + + // The name of the intent. Must be less than or equal to 100 characters in length. + IntentName *string `json:"intentName,omitempty" tf:"intent_name,omitempty"` + + // The version of the intent. Must be less than or equal to 64 characters in length. + IntentVersion *string `json:"intentVersion,omitempty" tf:"intent_version,omitempty"` +} + +type IntentParameters struct { + + // The name of the intent. Must be less than or equal to 100 characters in length. + // +kubebuilder:validation:Optional + IntentName *string `json:"intentName" tf:"intent_name,omitempty"` + + // The version of the intent. Must be less than or equal to 64 characters in length. + // +kubebuilder:validation:Optional + IntentVersion *string `json:"intentVersion" tf:"intent_version,omitempty"` +} + +type MessageInitParameters struct { + + // The text of the message. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The content type of the message string. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type MessageObservation struct { + + // The text of the message. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The content type of the message string. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type MessageParameters struct { + + // The text of the message. + // +kubebuilder:validation:Optional + Content *string `json:"content" tf:"content,omitempty"` + + // The content type of the message string. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. + // +kubebuilder:validation:Optional + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +// BotSpec defines the desired state of Bot +type BotSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BotParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BotInitParameters `json:"initProvider,omitempty"` +} + +// BotStatus defines the observed state of Bot. +type BotStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BotObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Bot is the Schema for the Bots API. Provides an Amazon Lex bot resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Bot struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.abortStatement) || (has(self.initProvider) && has(self.initProvider.abortStatement))",message="spec.forProvider.abortStatement is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.childDirected) || (has(self.initProvider) && has(self.initProvider.childDirected))",message="spec.forProvider.childDirected is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.intent) || (has(self.initProvider) && has(self.initProvider.intent))",message="spec.forProvider.intent is a required parameter" + Spec BotSpec `json:"spec"` + Status BotStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BotList contains a list of Bots +type BotList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Bot `json:"items"` +} + +// Repository type metadata. +var ( + Bot_Kind = "Bot" + Bot_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Bot_Kind}.String() + Bot_KindAPIVersion = Bot_Kind + "." + CRDGroupVersion.String() + Bot_GroupVersionKind = CRDGroupVersion.WithKind(Bot_Kind) +) + +func init() { + SchemeBuilder.Register(&Bot{}, &BotList{}) +} diff --git a/apis/lexmodels/v1beta2/zz_botalias_terraformed.go b/apis/lexmodels/v1beta2/zz_botalias_terraformed.go new file mode 100755 index 0000000000..73bbe1cb2f --- /dev/null +++ b/apis/lexmodels/v1beta2/zz_botalias_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BotAlias +func (mg *BotAlias) GetTerraformResourceType() string { + return "aws_lex_bot_alias" +} + +// GetConnectionDetailsMapping for this BotAlias +func (tr *BotAlias) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BotAlias +func (tr *BotAlias) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BotAlias +func (tr *BotAlias) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BotAlias +func (tr *BotAlias) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BotAlias +func (tr *BotAlias) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BotAlias +func (tr *BotAlias) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BotAlias +func (tr *BotAlias) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BotAlias +func (tr *BotAlias) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BotAlias using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BotAlias) LateInitialize(attrs []byte) (bool, error) { + params := &BotAliasParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BotAlias) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/lexmodels/v1beta2/zz_botalias_types.go b/apis/lexmodels/v1beta2/zz_botalias_types.go new file mode 100755 index 0000000000..ec974d8f5c --- /dev/null +++ b/apis/lexmodels/v1beta2/zz_botalias_types.go @@ -0,0 +1,225 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BotAliasInitParameters struct { + + // The name of the bot. + BotName *string `json:"botName,omitempty" tf:"bot_name,omitempty"` + + // The version of the bot. + BotVersion *string `json:"botVersion,omitempty" tf:"bot_version,omitempty"` + + // The settings that determine how Amazon Lex uses conversation logs for the alias. Attributes are documented under conversation_logs. + ConversationLogs *ConversationLogsInitParameters `json:"conversationLogs,omitempty" tf:"conversation_logs,omitempty"` + + // A description of the alias. Must be less than or equal to 200 characters in length. + Description *string `json:"description,omitempty" tf:"description,omitempty"` +} + +type BotAliasObservation struct { + + // The ARN of the bot alias. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The name of the bot. + BotName *string `json:"botName,omitempty" tf:"bot_name,omitempty"` + + // The version of the bot. + BotVersion *string `json:"botVersion,omitempty" tf:"bot_version,omitempty"` + + // Checksum of the bot alias. + Checksum *string `json:"checksum,omitempty" tf:"checksum,omitempty"` + + // The settings that determine how Amazon Lex uses conversation logs for the alias. Attributes are documented under conversation_logs. + ConversationLogs *ConversationLogsObservation `json:"conversationLogs,omitempty" tf:"conversation_logs,omitempty"` + + // The date that the bot alias was created. + CreatedDate *string `json:"createdDate,omitempty" tf:"created_date,omitempty"` + + // A description of the alias. Must be less than or equal to 200 characters in length. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The date that the bot alias was updated. When you create a resource, the creation date and the last updated date are the same. + LastUpdatedDate *string `json:"lastUpdatedDate,omitempty" tf:"last_updated_date,omitempty"` +} + +type BotAliasParameters struct { + + // The name of the bot. + // +kubebuilder:validation:Optional + BotName *string `json:"botName,omitempty" tf:"bot_name,omitempty"` + + // The version of the bot. + // +kubebuilder:validation:Optional + BotVersion *string `json:"botVersion,omitempty" tf:"bot_version,omitempty"` + + // The settings that determine how Amazon Lex uses conversation logs for the alias. Attributes are documented under conversation_logs. + // +kubebuilder:validation:Optional + ConversationLogs *ConversationLogsParameters `json:"conversationLogs,omitempty" tf:"conversation_logs,omitempty"` + + // A description of the alias. Must be less than or equal to 200 characters in length. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type ConversationLogsInitParameters struct { + + // The Amazon Resource Name (ARN) of the IAM role used to write your logs to CloudWatch Logs or an S3 bucket. Must be between 20 and 2048 characters in length. + IAMRoleArn *string `json:"iamRoleArn,omitempty" tf:"iam_role_arn,omitempty"` + + // The settings for your conversation logs. You can log text, audio, or both. Attributes are documented under log_settings. + LogSettings []LogSettingsInitParameters `json:"logSettings,omitempty" tf:"log_settings,omitempty"` +} + +type ConversationLogsObservation struct { + + // The Amazon Resource Name (ARN) of the IAM role used to write your logs to CloudWatch Logs or an S3 bucket. Must be between 20 and 2048 characters in length. + IAMRoleArn *string `json:"iamRoleArn,omitempty" tf:"iam_role_arn,omitempty"` + + // The settings for your conversation logs. You can log text, audio, or both. Attributes are documented under log_settings. + LogSettings []LogSettingsObservation `json:"logSettings,omitempty" tf:"log_settings,omitempty"` +} + +type ConversationLogsParameters struct { + + // The Amazon Resource Name (ARN) of the IAM role used to write your logs to CloudWatch Logs or an S3 bucket. Must be between 20 and 2048 characters in length. + // +kubebuilder:validation:Optional + IAMRoleArn *string `json:"iamRoleArn" tf:"iam_role_arn,omitempty"` + + // The settings for your conversation logs. You can log text, audio, or both. Attributes are documented under log_settings. + // +kubebuilder:validation:Optional + LogSettings []LogSettingsParameters `json:"logSettings,omitempty" tf:"log_settings,omitempty"` +} + +type LogSettingsInitParameters struct { + + // The destination where logs are delivered. Options are CLOUDWATCH_LOGS or S3. + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // The Amazon Resource Name (ARN) of the key used to encrypt audio logs in an S3 bucket. This can only be specified when destination is set to S3. Must be between 20 and 2048 characters in length. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The type of logging that is enabled. Options are AUDIO or TEXT. + LogType *string `json:"logType,omitempty" tf:"log_type,omitempty"` + + // The Amazon Resource Name (ARN) of the CloudWatch Logs log group or S3 bucket where the logs are delivered. Must be less than or equal to 2048 characters in length. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` +} + +type LogSettingsObservation struct { + + // The destination where logs are delivered. Options are CLOUDWATCH_LOGS or S3. + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // The Amazon Resource Name (ARN) of the key used to encrypt audio logs in an S3 bucket. This can only be specified when destination is set to S3. Must be between 20 and 2048 characters in length. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The type of logging that is enabled. Options are AUDIO or TEXT. + LogType *string `json:"logType,omitempty" tf:"log_type,omitempty"` + + // The Amazon Resource Name (ARN) of the CloudWatch Logs log group or S3 bucket where the logs are delivered. Must be less than or equal to 2048 characters in length. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // (Computed) The prefix of the S3 object key for AUDIO logs or the log stream name for TEXT logs. + ResourcePrefix *string `json:"resourcePrefix,omitempty" tf:"resource_prefix,omitempty"` +} + +type LogSettingsParameters struct { + + // The destination where logs are delivered. Options are CLOUDWATCH_LOGS or S3. + // +kubebuilder:validation:Optional + Destination *string `json:"destination" tf:"destination,omitempty"` + + // The Amazon Resource Name (ARN) of the key used to encrypt audio logs in an S3 bucket. This can only be specified when destination is set to S3. Must be between 20 and 2048 characters in length. + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // The type of logging that is enabled. Options are AUDIO or TEXT. + // +kubebuilder:validation:Optional + LogType *string `json:"logType" tf:"log_type,omitempty"` + + // The Amazon Resource Name (ARN) of the CloudWatch Logs log group or S3 bucket where the logs are delivered. Must be less than or equal to 2048 characters in length. + // +kubebuilder:validation:Optional + ResourceArn *string `json:"resourceArn" tf:"resource_arn,omitempty"` +} + +// BotAliasSpec defines the desired state of BotAlias +type BotAliasSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BotAliasParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BotAliasInitParameters `json:"initProvider,omitempty"` +} + +// BotAliasStatus defines the observed state of BotAlias. +type BotAliasStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BotAliasObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BotAlias is the Schema for the BotAliass API. Provides an Amazon Lex Bot Alias resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type BotAlias struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.botName) || (has(self.initProvider) && has(self.initProvider.botName))",message="spec.forProvider.botName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.botVersion) || (has(self.initProvider) && has(self.initProvider.botVersion))",message="spec.forProvider.botVersion is a required parameter" + Spec BotAliasSpec `json:"spec"` + Status BotAliasStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BotAliasList contains a list of BotAliass +type BotAliasList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BotAlias `json:"items"` +} + +// Repository type metadata. +var ( + BotAlias_Kind = "BotAlias" + BotAlias_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BotAlias_Kind}.String() + BotAlias_KindAPIVersion = BotAlias_Kind + "." + CRDGroupVersion.String() + BotAlias_GroupVersionKind = CRDGroupVersion.WithKind(BotAlias_Kind) +) + +func init() { + SchemeBuilder.Register(&BotAlias{}, &BotAliasList{}) +} diff --git a/apis/lexmodels/v1beta2/zz_generated.conversion_hubs.go b/apis/lexmodels/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..958f49b9ad --- /dev/null +++ b/apis/lexmodels/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Bot) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BotAlias) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Intent) Hub() {} diff --git a/apis/lexmodels/v1beta2/zz_generated.deepcopy.go b/apis/lexmodels/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..099540cf8b --- /dev/null +++ b/apis/lexmodels/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,3182 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AbortStatementInitParameters) DeepCopyInto(out *AbortStatementInitParameters) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]MessageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AbortStatementInitParameters. +func (in *AbortStatementInitParameters) DeepCopy() *AbortStatementInitParameters { + if in == nil { + return nil + } + out := new(AbortStatementInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AbortStatementObservation) DeepCopyInto(out *AbortStatementObservation) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]MessageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AbortStatementObservation. +func (in *AbortStatementObservation) DeepCopy() *AbortStatementObservation { + if in == nil { + return nil + } + out := new(AbortStatementObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AbortStatementParameters) DeepCopyInto(out *AbortStatementParameters) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]MessageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AbortStatementParameters. +func (in *AbortStatementParameters) DeepCopy() *AbortStatementParameters { + if in == nil { + return nil + } + out := new(AbortStatementParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Bot) DeepCopyInto(out *Bot) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bot. +func (in *Bot) DeepCopy() *Bot { + if in == nil { + return nil + } + out := new(Bot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Bot) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotAlias) DeepCopyInto(out *BotAlias) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotAlias. +func (in *BotAlias) DeepCopy() *BotAlias { + if in == nil { + return nil + } + out := new(BotAlias) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BotAlias) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotAliasInitParameters) DeepCopyInto(out *BotAliasInitParameters) { + *out = *in + if in.BotName != nil { + in, out := &in.BotName, &out.BotName + *out = new(string) + **out = **in + } + if in.BotVersion != nil { + in, out := &in.BotVersion, &out.BotVersion + *out = new(string) + **out = **in + } + if in.ConversationLogs != nil { + in, out := &in.ConversationLogs, &out.ConversationLogs + *out = new(ConversationLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotAliasInitParameters. +func (in *BotAliasInitParameters) DeepCopy() *BotAliasInitParameters { + if in == nil { + return nil + } + out := new(BotAliasInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotAliasList) DeepCopyInto(out *BotAliasList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BotAlias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotAliasList. +func (in *BotAliasList) DeepCopy() *BotAliasList { + if in == nil { + return nil + } + out := new(BotAliasList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BotAliasList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotAliasObservation) DeepCopyInto(out *BotAliasObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.BotName != nil { + in, out := &in.BotName, &out.BotName + *out = new(string) + **out = **in + } + if in.BotVersion != nil { + in, out := &in.BotVersion, &out.BotVersion + *out = new(string) + **out = **in + } + if in.Checksum != nil { + in, out := &in.Checksum, &out.Checksum + *out = new(string) + **out = **in + } + if in.ConversationLogs != nil { + in, out := &in.ConversationLogs, &out.ConversationLogs + *out = new(ConversationLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.CreatedDate != nil { + in, out := &in.CreatedDate, &out.CreatedDate + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LastUpdatedDate != nil { + in, out := &in.LastUpdatedDate, &out.LastUpdatedDate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotAliasObservation. +func (in *BotAliasObservation) DeepCopy() *BotAliasObservation { + if in == nil { + return nil + } + out := new(BotAliasObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotAliasParameters) DeepCopyInto(out *BotAliasParameters) { + *out = *in + if in.BotName != nil { + in, out := &in.BotName, &out.BotName + *out = new(string) + **out = **in + } + if in.BotVersion != nil { + in, out := &in.BotVersion, &out.BotVersion + *out = new(string) + **out = **in + } + if in.ConversationLogs != nil { + in, out := &in.ConversationLogs, &out.ConversationLogs + *out = new(ConversationLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotAliasParameters. +func (in *BotAliasParameters) DeepCopy() *BotAliasParameters { + if in == nil { + return nil + } + out := new(BotAliasParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotAliasSpec) DeepCopyInto(out *BotAliasSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotAliasSpec. +func (in *BotAliasSpec) DeepCopy() *BotAliasSpec { + if in == nil { + return nil + } + out := new(BotAliasSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotAliasStatus) DeepCopyInto(out *BotAliasStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotAliasStatus. +func (in *BotAliasStatus) DeepCopy() *BotAliasStatus { + if in == nil { + return nil + } + out := new(BotAliasStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotInitParameters) DeepCopyInto(out *BotInitParameters) { + *out = *in + if in.AbortStatement != nil { + in, out := &in.AbortStatement, &out.AbortStatement + *out = new(AbortStatementInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ChildDirected != nil { + in, out := &in.ChildDirected, &out.ChildDirected + *out = new(bool) + **out = **in + } + if in.ClarificationPrompt != nil { + in, out := &in.ClarificationPrompt, &out.ClarificationPrompt + *out = new(ClarificationPromptInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CreateVersion != nil { + in, out := &in.CreateVersion, &out.CreateVersion + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DetectSentiment != nil { + in, out := &in.DetectSentiment, &out.DetectSentiment + *out = new(bool) + **out = **in + } + if in.EnableModelImprovements != nil { + in, out := &in.EnableModelImprovements, &out.EnableModelImprovements + *out = new(bool) + **out = **in + } + if in.IdleSessionTTLInSeconds != nil { + in, out := &in.IdleSessionTTLInSeconds, &out.IdleSessionTTLInSeconds + *out = new(float64) + **out = **in + } + if in.Intent != nil { + in, out := &in.Intent, &out.Intent + *out = make([]IntentInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Locale != nil { + in, out := &in.Locale, &out.Locale + *out = new(string) + **out = **in + } + if in.NluIntentConfidenceThreshold != nil { + in, out := &in.NluIntentConfidenceThreshold, &out.NluIntentConfidenceThreshold + *out = new(float64) + **out = **in + } + if in.ProcessBehavior != nil { + in, out := &in.ProcessBehavior, &out.ProcessBehavior + *out = new(string) + **out = **in + } + if in.VoiceID != nil { + in, out := &in.VoiceID, &out.VoiceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotInitParameters. +func (in *BotInitParameters) DeepCopy() *BotInitParameters { + if in == nil { + return nil + } + out := new(BotInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotList) DeepCopyInto(out *BotList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Bot, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotList. +func (in *BotList) DeepCopy() *BotList { + if in == nil { + return nil + } + out := new(BotList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BotList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotObservation) DeepCopyInto(out *BotObservation) { + *out = *in + if in.AbortStatement != nil { + in, out := &in.AbortStatement, &out.AbortStatement + *out = new(AbortStatementObservation) + (*in).DeepCopyInto(*out) + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Checksum != nil { + in, out := &in.Checksum, &out.Checksum + *out = new(string) + **out = **in + } + if in.ChildDirected != nil { + in, out := &in.ChildDirected, &out.ChildDirected + *out = new(bool) + **out = **in + } + if in.ClarificationPrompt != nil { + in, out := &in.ClarificationPrompt, &out.ClarificationPrompt + *out = new(ClarificationPromptObservation) + (*in).DeepCopyInto(*out) + } + if in.CreateVersion != nil { + in, out := &in.CreateVersion, &out.CreateVersion + *out = new(bool) + **out = **in + } + if in.CreatedDate != nil { + in, out := &in.CreatedDate, &out.CreatedDate + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DetectSentiment != nil { + in, out := &in.DetectSentiment, &out.DetectSentiment + *out = new(bool) + **out = **in + } + if in.EnableModelImprovements != nil { + in, out := &in.EnableModelImprovements, &out.EnableModelImprovements + *out = new(bool) + **out = **in + } + if in.FailureReason != nil { + in, out := &in.FailureReason, &out.FailureReason + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IdleSessionTTLInSeconds != nil { + in, out := &in.IdleSessionTTLInSeconds, &out.IdleSessionTTLInSeconds + *out = new(float64) + **out = **in + } + if in.Intent != nil { + in, out := &in.Intent, &out.Intent + *out = make([]IntentObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LastUpdatedDate != nil { + in, out := &in.LastUpdatedDate, &out.LastUpdatedDate + *out = new(string) + **out = **in + } + if in.Locale != nil { + in, out := &in.Locale, &out.Locale + *out = new(string) + **out = **in + } + if in.NluIntentConfidenceThreshold != nil { + in, out := &in.NluIntentConfidenceThreshold, &out.NluIntentConfidenceThreshold + *out = new(float64) + **out = **in + } + if in.ProcessBehavior != nil { + in, out := &in.ProcessBehavior, &out.ProcessBehavior + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.VoiceID != nil { + in, out := &in.VoiceID, &out.VoiceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotObservation. +func (in *BotObservation) DeepCopy() *BotObservation { + if in == nil { + return nil + } + out := new(BotObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotParameters) DeepCopyInto(out *BotParameters) { + *out = *in + if in.AbortStatement != nil { + in, out := &in.AbortStatement, &out.AbortStatement + *out = new(AbortStatementParameters) + (*in).DeepCopyInto(*out) + } + if in.ChildDirected != nil { + in, out := &in.ChildDirected, &out.ChildDirected + *out = new(bool) + **out = **in + } + if in.ClarificationPrompt != nil { + in, out := &in.ClarificationPrompt, &out.ClarificationPrompt + *out = new(ClarificationPromptParameters) + (*in).DeepCopyInto(*out) + } + if in.CreateVersion != nil { + in, out := &in.CreateVersion, &out.CreateVersion + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DetectSentiment != nil { + in, out := &in.DetectSentiment, &out.DetectSentiment + *out = new(bool) + **out = **in + } + if in.EnableModelImprovements != nil { + in, out := &in.EnableModelImprovements, &out.EnableModelImprovements + *out = new(bool) + **out = **in + } + if in.IdleSessionTTLInSeconds != nil { + in, out := &in.IdleSessionTTLInSeconds, &out.IdleSessionTTLInSeconds + *out = new(float64) + **out = **in + } + if in.Intent != nil { + in, out := &in.Intent, &out.Intent + *out = make([]IntentParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Locale != nil { + in, out := &in.Locale, &out.Locale + *out = new(string) + **out = **in + } + if in.NluIntentConfidenceThreshold != nil { + in, out := &in.NluIntentConfidenceThreshold, &out.NluIntentConfidenceThreshold + *out = new(float64) + **out = **in + } + if in.ProcessBehavior != nil { + in, out := &in.ProcessBehavior, &out.ProcessBehavior + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.VoiceID != nil { + in, out := &in.VoiceID, &out.VoiceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotParameters. +func (in *BotParameters) DeepCopy() *BotParameters { + if in == nil { + return nil + } + out := new(BotParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotSpec) DeepCopyInto(out *BotSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotSpec. +func (in *BotSpec) DeepCopy() *BotSpec { + if in == nil { + return nil + } + out := new(BotSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BotStatus) DeepCopyInto(out *BotStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BotStatus. +func (in *BotStatus) DeepCopy() *BotStatus { + if in == nil { + return nil + } + out := new(BotStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClarificationPromptInitParameters) DeepCopyInto(out *ClarificationPromptInitParameters) { + *out = *in + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]ClarificationPromptMessageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClarificationPromptInitParameters. +func (in *ClarificationPromptInitParameters) DeepCopy() *ClarificationPromptInitParameters { + if in == nil { + return nil + } + out := new(ClarificationPromptInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClarificationPromptMessageInitParameters) DeepCopyInto(out *ClarificationPromptMessageInitParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClarificationPromptMessageInitParameters. +func (in *ClarificationPromptMessageInitParameters) DeepCopy() *ClarificationPromptMessageInitParameters { + if in == nil { + return nil + } + out := new(ClarificationPromptMessageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClarificationPromptMessageObservation) DeepCopyInto(out *ClarificationPromptMessageObservation) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClarificationPromptMessageObservation. +func (in *ClarificationPromptMessageObservation) DeepCopy() *ClarificationPromptMessageObservation { + if in == nil { + return nil + } + out := new(ClarificationPromptMessageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClarificationPromptMessageParameters) DeepCopyInto(out *ClarificationPromptMessageParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClarificationPromptMessageParameters. +func (in *ClarificationPromptMessageParameters) DeepCopy() *ClarificationPromptMessageParameters { + if in == nil { + return nil + } + out := new(ClarificationPromptMessageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClarificationPromptObservation) DeepCopyInto(out *ClarificationPromptObservation) { + *out = *in + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]ClarificationPromptMessageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClarificationPromptObservation. +func (in *ClarificationPromptObservation) DeepCopy() *ClarificationPromptObservation { + if in == nil { + return nil + } + out := new(ClarificationPromptObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClarificationPromptParameters) DeepCopyInto(out *ClarificationPromptParameters) { + *out = *in + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]ClarificationPromptMessageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClarificationPromptParameters. +func (in *ClarificationPromptParameters) DeepCopy() *ClarificationPromptParameters { + if in == nil { + return nil + } + out := new(ClarificationPromptParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeHookInitParameters) DeepCopyInto(out *CodeHookInitParameters) { + *out = *in + if in.MessageVersion != nil { + in, out := &in.MessageVersion, &out.MessageVersion + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeHookInitParameters. +func (in *CodeHookInitParameters) DeepCopy() *CodeHookInitParameters { + if in == nil { + return nil + } + out := new(CodeHookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeHookObservation) DeepCopyInto(out *CodeHookObservation) { + *out = *in + if in.MessageVersion != nil { + in, out := &in.MessageVersion, &out.MessageVersion + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeHookObservation. +func (in *CodeHookObservation) DeepCopy() *CodeHookObservation { + if in == nil { + return nil + } + out := new(CodeHookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeHookParameters) DeepCopyInto(out *CodeHookParameters) { + *out = *in + if in.MessageVersion != nil { + in, out := &in.MessageVersion, &out.MessageVersion + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeHookParameters. +func (in *CodeHookParameters) DeepCopy() *CodeHookParameters { + if in == nil { + return nil + } + out := new(CodeHookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConclusionStatementInitParameters) DeepCopyInto(out *ConclusionStatementInitParameters) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]ConclusionStatementMessageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConclusionStatementInitParameters. +func (in *ConclusionStatementInitParameters) DeepCopy() *ConclusionStatementInitParameters { + if in == nil { + return nil + } + out := new(ConclusionStatementInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConclusionStatementMessageInitParameters) DeepCopyInto(out *ConclusionStatementMessageInitParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConclusionStatementMessageInitParameters. +func (in *ConclusionStatementMessageInitParameters) DeepCopy() *ConclusionStatementMessageInitParameters { + if in == nil { + return nil + } + out := new(ConclusionStatementMessageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConclusionStatementMessageObservation) DeepCopyInto(out *ConclusionStatementMessageObservation) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConclusionStatementMessageObservation. +func (in *ConclusionStatementMessageObservation) DeepCopy() *ConclusionStatementMessageObservation { + if in == nil { + return nil + } + out := new(ConclusionStatementMessageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConclusionStatementMessageParameters) DeepCopyInto(out *ConclusionStatementMessageParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConclusionStatementMessageParameters. +func (in *ConclusionStatementMessageParameters) DeepCopy() *ConclusionStatementMessageParameters { + if in == nil { + return nil + } + out := new(ConclusionStatementMessageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConclusionStatementObservation) DeepCopyInto(out *ConclusionStatementObservation) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]ConclusionStatementMessageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConclusionStatementObservation. +func (in *ConclusionStatementObservation) DeepCopy() *ConclusionStatementObservation { + if in == nil { + return nil + } + out := new(ConclusionStatementObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConclusionStatementParameters) DeepCopyInto(out *ConclusionStatementParameters) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]ConclusionStatementMessageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConclusionStatementParameters. +func (in *ConclusionStatementParameters) DeepCopy() *ConclusionStatementParameters { + if in == nil { + return nil + } + out := new(ConclusionStatementParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfirmationPromptInitParameters) DeepCopyInto(out *ConfirmationPromptInitParameters) { + *out = *in + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]ConfirmationPromptMessageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfirmationPromptInitParameters. +func (in *ConfirmationPromptInitParameters) DeepCopy() *ConfirmationPromptInitParameters { + if in == nil { + return nil + } + out := new(ConfirmationPromptInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfirmationPromptMessageInitParameters) DeepCopyInto(out *ConfirmationPromptMessageInitParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfirmationPromptMessageInitParameters. +func (in *ConfirmationPromptMessageInitParameters) DeepCopy() *ConfirmationPromptMessageInitParameters { + if in == nil { + return nil + } + out := new(ConfirmationPromptMessageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfirmationPromptMessageObservation) DeepCopyInto(out *ConfirmationPromptMessageObservation) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfirmationPromptMessageObservation. +func (in *ConfirmationPromptMessageObservation) DeepCopy() *ConfirmationPromptMessageObservation { + if in == nil { + return nil + } + out := new(ConfirmationPromptMessageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfirmationPromptMessageParameters) DeepCopyInto(out *ConfirmationPromptMessageParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfirmationPromptMessageParameters. +func (in *ConfirmationPromptMessageParameters) DeepCopy() *ConfirmationPromptMessageParameters { + if in == nil { + return nil + } + out := new(ConfirmationPromptMessageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfirmationPromptObservation) DeepCopyInto(out *ConfirmationPromptObservation) { + *out = *in + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]ConfirmationPromptMessageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfirmationPromptObservation. +func (in *ConfirmationPromptObservation) DeepCopy() *ConfirmationPromptObservation { + if in == nil { + return nil + } + out := new(ConfirmationPromptObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfirmationPromptParameters) DeepCopyInto(out *ConfirmationPromptParameters) { + *out = *in + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]ConfirmationPromptMessageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfirmationPromptParameters. +func (in *ConfirmationPromptParameters) DeepCopy() *ConfirmationPromptParameters { + if in == nil { + return nil + } + out := new(ConfirmationPromptParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversationLogsInitParameters) DeepCopyInto(out *ConversationLogsInitParameters) { + *out = *in + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } + if in.LogSettings != nil { + in, out := &in.LogSettings, &out.LogSettings + *out = make([]LogSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversationLogsInitParameters. +func (in *ConversationLogsInitParameters) DeepCopy() *ConversationLogsInitParameters { + if in == nil { + return nil + } + out := new(ConversationLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversationLogsObservation) DeepCopyInto(out *ConversationLogsObservation) { + *out = *in + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } + if in.LogSettings != nil { + in, out := &in.LogSettings, &out.LogSettings + *out = make([]LogSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversationLogsObservation. +func (in *ConversationLogsObservation) DeepCopy() *ConversationLogsObservation { + if in == nil { + return nil + } + out := new(ConversationLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversationLogsParameters) DeepCopyInto(out *ConversationLogsParameters) { + *out = *in + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } + if in.LogSettings != nil { + in, out := &in.LogSettings, &out.LogSettings + *out = make([]LogSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversationLogsParameters. +func (in *ConversationLogsParameters) DeepCopy() *ConversationLogsParameters { + if in == nil { + return nil + } + out := new(ConversationLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DialogCodeHookInitParameters) DeepCopyInto(out *DialogCodeHookInitParameters) { + *out = *in + if in.MessageVersion != nil { + in, out := &in.MessageVersion, &out.MessageVersion + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DialogCodeHookInitParameters. +func (in *DialogCodeHookInitParameters) DeepCopy() *DialogCodeHookInitParameters { + if in == nil { + return nil + } + out := new(DialogCodeHookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DialogCodeHookObservation) DeepCopyInto(out *DialogCodeHookObservation) { + *out = *in + if in.MessageVersion != nil { + in, out := &in.MessageVersion, &out.MessageVersion + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DialogCodeHookObservation. +func (in *DialogCodeHookObservation) DeepCopy() *DialogCodeHookObservation { + if in == nil { + return nil + } + out := new(DialogCodeHookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DialogCodeHookParameters) DeepCopyInto(out *DialogCodeHookParameters) { + *out = *in + if in.MessageVersion != nil { + in, out := &in.MessageVersion, &out.MessageVersion + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DialogCodeHookParameters. +func (in *DialogCodeHookParameters) DeepCopy() *DialogCodeHookParameters { + if in == nil { + return nil + } + out := new(DialogCodeHookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FollowUpPromptInitParameters) DeepCopyInto(out *FollowUpPromptInitParameters) { + *out = *in + if in.Prompt != nil { + in, out := &in.Prompt, &out.Prompt + *out = new(PromptInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RejectionStatement != nil { + in, out := &in.RejectionStatement, &out.RejectionStatement + *out = new(RejectionStatementInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FollowUpPromptInitParameters. +func (in *FollowUpPromptInitParameters) DeepCopy() *FollowUpPromptInitParameters { + if in == nil { + return nil + } + out := new(FollowUpPromptInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FollowUpPromptObservation) DeepCopyInto(out *FollowUpPromptObservation) { + *out = *in + if in.Prompt != nil { + in, out := &in.Prompt, &out.Prompt + *out = new(PromptObservation) + (*in).DeepCopyInto(*out) + } + if in.RejectionStatement != nil { + in, out := &in.RejectionStatement, &out.RejectionStatement + *out = new(RejectionStatementObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FollowUpPromptObservation. +func (in *FollowUpPromptObservation) DeepCopy() *FollowUpPromptObservation { + if in == nil { + return nil + } + out := new(FollowUpPromptObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FollowUpPromptParameters) DeepCopyInto(out *FollowUpPromptParameters) { + *out = *in + if in.Prompt != nil { + in, out := &in.Prompt, &out.Prompt + *out = new(PromptParameters) + (*in).DeepCopyInto(*out) + } + if in.RejectionStatement != nil { + in, out := &in.RejectionStatement, &out.RejectionStatement + *out = new(RejectionStatementParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FollowUpPromptParameters. +func (in *FollowUpPromptParameters) DeepCopy() *FollowUpPromptParameters { + if in == nil { + return nil + } + out := new(FollowUpPromptParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FulfillmentActivityInitParameters) DeepCopyInto(out *FulfillmentActivityInitParameters) { + *out = *in + if in.CodeHook != nil { + in, out := &in.CodeHook, &out.CodeHook + *out = new(CodeHookInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FulfillmentActivityInitParameters. +func (in *FulfillmentActivityInitParameters) DeepCopy() *FulfillmentActivityInitParameters { + if in == nil { + return nil + } + out := new(FulfillmentActivityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FulfillmentActivityObservation) DeepCopyInto(out *FulfillmentActivityObservation) { + *out = *in + if in.CodeHook != nil { + in, out := &in.CodeHook, &out.CodeHook + *out = new(CodeHookObservation) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FulfillmentActivityObservation. +func (in *FulfillmentActivityObservation) DeepCopy() *FulfillmentActivityObservation { + if in == nil { + return nil + } + out := new(FulfillmentActivityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FulfillmentActivityParameters) DeepCopyInto(out *FulfillmentActivityParameters) { + *out = *in + if in.CodeHook != nil { + in, out := &in.CodeHook, &out.CodeHook + *out = new(CodeHookParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FulfillmentActivityParameters. +func (in *FulfillmentActivityParameters) DeepCopy() *FulfillmentActivityParameters { + if in == nil { + return nil + } + out := new(FulfillmentActivityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Intent) DeepCopyInto(out *Intent) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Intent. +func (in *Intent) DeepCopy() *Intent { + if in == nil { + return nil + } + out := new(Intent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Intent) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntentInitParameters) DeepCopyInto(out *IntentInitParameters) { + *out = *in + if in.IntentName != nil { + in, out := &in.IntentName, &out.IntentName + *out = new(string) + **out = **in + } + if in.IntentVersion != nil { + in, out := &in.IntentVersion, &out.IntentVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntentInitParameters. +func (in *IntentInitParameters) DeepCopy() *IntentInitParameters { + if in == nil { + return nil + } + out := new(IntentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntentInitParameters_2) DeepCopyInto(out *IntentInitParameters_2) { + *out = *in + if in.ConclusionStatement != nil { + in, out := &in.ConclusionStatement, &out.ConclusionStatement + *out = new(ConclusionStatementInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConfirmationPrompt != nil { + in, out := &in.ConfirmationPrompt, &out.ConfirmationPrompt + *out = new(ConfirmationPromptInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CreateVersion != nil { + in, out := &in.CreateVersion, &out.CreateVersion + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DialogCodeHook != nil { + in, out := &in.DialogCodeHook, &out.DialogCodeHook + *out = new(DialogCodeHookInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FollowUpPrompt != nil { + in, out := &in.FollowUpPrompt, &out.FollowUpPrompt + *out = new(FollowUpPromptInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FulfillmentActivity != nil { + in, out := &in.FulfillmentActivity, &out.FulfillmentActivity + *out = new(FulfillmentActivityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ParentIntentSignature != nil { + in, out := &in.ParentIntentSignature, &out.ParentIntentSignature + *out = new(string) + **out = **in + } + if in.RejectionStatement != nil { + in, out := &in.RejectionStatement, &out.RejectionStatement + *out = new(IntentRejectionStatementInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SampleUtterances != nil { + in, out := &in.SampleUtterances, &out.SampleUtterances + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Slot != nil { + in, out := &in.Slot, &out.Slot + *out = make([]SlotInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntentInitParameters_2. +func (in *IntentInitParameters_2) DeepCopy() *IntentInitParameters_2 { + if in == nil { + return nil + } + out := new(IntentInitParameters_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntentList) DeepCopyInto(out *IntentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Intent, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntentList. +func (in *IntentList) DeepCopy() *IntentList { + if in == nil { + return nil + } + out := new(IntentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IntentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntentObservation) DeepCopyInto(out *IntentObservation) { + *out = *in + if in.IntentName != nil { + in, out := &in.IntentName, &out.IntentName + *out = new(string) + **out = **in + } + if in.IntentVersion != nil { + in, out := &in.IntentVersion, &out.IntentVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntentObservation. +func (in *IntentObservation) DeepCopy() *IntentObservation { + if in == nil { + return nil + } + out := new(IntentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntentObservation_2) DeepCopyInto(out *IntentObservation_2) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Checksum != nil { + in, out := &in.Checksum, &out.Checksum + *out = new(string) + **out = **in + } + if in.ConclusionStatement != nil { + in, out := &in.ConclusionStatement, &out.ConclusionStatement + *out = new(ConclusionStatementObservation) + (*in).DeepCopyInto(*out) + } + if in.ConfirmationPrompt != nil { + in, out := &in.ConfirmationPrompt, &out.ConfirmationPrompt + *out = new(ConfirmationPromptObservation) + (*in).DeepCopyInto(*out) + } + if in.CreateVersion != nil { + in, out := &in.CreateVersion, &out.CreateVersion + *out = new(bool) + **out = **in + } + if in.CreatedDate != nil { + in, out := &in.CreatedDate, &out.CreatedDate + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DialogCodeHook != nil { + in, out := &in.DialogCodeHook, &out.DialogCodeHook + *out = new(DialogCodeHookObservation) + (*in).DeepCopyInto(*out) + } + if in.FollowUpPrompt != nil { + in, out := &in.FollowUpPrompt, &out.FollowUpPrompt + *out = new(FollowUpPromptObservation) + (*in).DeepCopyInto(*out) + } + if in.FulfillmentActivity != nil { + in, out := &in.FulfillmentActivity, &out.FulfillmentActivity + *out = new(FulfillmentActivityObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LastUpdatedDate != nil { + in, out := &in.LastUpdatedDate, &out.LastUpdatedDate + *out = new(string) + **out = **in + } + if in.ParentIntentSignature != nil { + in, out := &in.ParentIntentSignature, &out.ParentIntentSignature + *out = new(string) + **out = **in + } + if in.RejectionStatement != nil { + in, out := &in.RejectionStatement, &out.RejectionStatement + *out = new(IntentRejectionStatementObservation) + (*in).DeepCopyInto(*out) + } + if in.SampleUtterances != nil { + in, out := &in.SampleUtterances, &out.SampleUtterances + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Slot != nil { + in, out := &in.Slot, &out.Slot + *out = make([]SlotObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntentObservation_2. +func (in *IntentObservation_2) DeepCopy() *IntentObservation_2 { + if in == nil { + return nil + } + out := new(IntentObservation_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntentParameters) DeepCopyInto(out *IntentParameters) { + *out = *in + if in.IntentName != nil { + in, out := &in.IntentName, &out.IntentName + *out = new(string) + **out = **in + } + if in.IntentVersion != nil { + in, out := &in.IntentVersion, &out.IntentVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntentParameters. +func (in *IntentParameters) DeepCopy() *IntentParameters { + if in == nil { + return nil + } + out := new(IntentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntentParameters_2) DeepCopyInto(out *IntentParameters_2) { + *out = *in + if in.ConclusionStatement != nil { + in, out := &in.ConclusionStatement, &out.ConclusionStatement + *out = new(ConclusionStatementParameters) + (*in).DeepCopyInto(*out) + } + if in.ConfirmationPrompt != nil { + in, out := &in.ConfirmationPrompt, &out.ConfirmationPrompt + *out = new(ConfirmationPromptParameters) + (*in).DeepCopyInto(*out) + } + if in.CreateVersion != nil { + in, out := &in.CreateVersion, &out.CreateVersion + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DialogCodeHook != nil { + in, out := &in.DialogCodeHook, &out.DialogCodeHook + *out = new(DialogCodeHookParameters) + (*in).DeepCopyInto(*out) + } + if in.FollowUpPrompt != nil { + in, out := &in.FollowUpPrompt, &out.FollowUpPrompt + *out = new(FollowUpPromptParameters) + (*in).DeepCopyInto(*out) + } + if in.FulfillmentActivity != nil { + in, out := &in.FulfillmentActivity, &out.FulfillmentActivity + *out = new(FulfillmentActivityParameters) + (*in).DeepCopyInto(*out) + } + if in.ParentIntentSignature != nil { + in, out := &in.ParentIntentSignature, &out.ParentIntentSignature + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RejectionStatement != nil { + in, out := &in.RejectionStatement, &out.RejectionStatement + *out = new(IntentRejectionStatementParameters) + (*in).DeepCopyInto(*out) + } + if in.SampleUtterances != nil { + in, out := &in.SampleUtterances, &out.SampleUtterances + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Slot != nil { + in, out := &in.Slot, &out.Slot + *out = make([]SlotParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntentParameters_2. +func (in *IntentParameters_2) DeepCopy() *IntentParameters_2 { + if in == nil { + return nil + } + out := new(IntentParameters_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntentRejectionStatementInitParameters) DeepCopyInto(out *IntentRejectionStatementInitParameters) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]IntentRejectionStatementMessageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntentRejectionStatementInitParameters. +func (in *IntentRejectionStatementInitParameters) DeepCopy() *IntentRejectionStatementInitParameters { + if in == nil { + return nil + } + out := new(IntentRejectionStatementInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntentRejectionStatementMessageInitParameters) DeepCopyInto(out *IntentRejectionStatementMessageInitParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntentRejectionStatementMessageInitParameters. +func (in *IntentRejectionStatementMessageInitParameters) DeepCopy() *IntentRejectionStatementMessageInitParameters { + if in == nil { + return nil + } + out := new(IntentRejectionStatementMessageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntentRejectionStatementMessageObservation) DeepCopyInto(out *IntentRejectionStatementMessageObservation) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntentRejectionStatementMessageObservation. +func (in *IntentRejectionStatementMessageObservation) DeepCopy() *IntentRejectionStatementMessageObservation { + if in == nil { + return nil + } + out := new(IntentRejectionStatementMessageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntentRejectionStatementMessageParameters) DeepCopyInto(out *IntentRejectionStatementMessageParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntentRejectionStatementMessageParameters. +func (in *IntentRejectionStatementMessageParameters) DeepCopy() *IntentRejectionStatementMessageParameters { + if in == nil { + return nil + } + out := new(IntentRejectionStatementMessageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntentRejectionStatementObservation) DeepCopyInto(out *IntentRejectionStatementObservation) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]IntentRejectionStatementMessageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntentRejectionStatementObservation. +func (in *IntentRejectionStatementObservation) DeepCopy() *IntentRejectionStatementObservation { + if in == nil { + return nil + } + out := new(IntentRejectionStatementObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntentRejectionStatementParameters) DeepCopyInto(out *IntentRejectionStatementParameters) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]IntentRejectionStatementMessageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntentRejectionStatementParameters. +func (in *IntentRejectionStatementParameters) DeepCopy() *IntentRejectionStatementParameters { + if in == nil { + return nil + } + out := new(IntentRejectionStatementParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntentSpec) DeepCopyInto(out *IntentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntentSpec. +func (in *IntentSpec) DeepCopy() *IntentSpec { + if in == nil { + return nil + } + out := new(IntentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntentStatus) DeepCopyInto(out *IntentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntentStatus. +func (in *IntentStatus) DeepCopy() *IntentStatus { + if in == nil { + return nil + } + out := new(IntentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogSettingsInitParameters) DeepCopyInto(out *LogSettingsInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.LogType != nil { + in, out := &in.LogType, &out.LogType + *out = new(string) + **out = **in + } + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogSettingsInitParameters. +func (in *LogSettingsInitParameters) DeepCopy() *LogSettingsInitParameters { + if in == nil { + return nil + } + out := new(LogSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogSettingsObservation) DeepCopyInto(out *LogSettingsObservation) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.LogType != nil { + in, out := &in.LogType, &out.LogType + *out = new(string) + **out = **in + } + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.ResourcePrefix != nil { + in, out := &in.ResourcePrefix, &out.ResourcePrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogSettingsObservation. +func (in *LogSettingsObservation) DeepCopy() *LogSettingsObservation { + if in == nil { + return nil + } + out := new(LogSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogSettingsParameters) DeepCopyInto(out *LogSettingsParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.LogType != nil { + in, out := &in.LogType, &out.LogType + *out = new(string) + **out = **in + } + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogSettingsParameters. +func (in *LogSettingsParameters) DeepCopy() *LogSettingsParameters { + if in == nil { + return nil + } + out := new(LogSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MessageInitParameters) DeepCopyInto(out *MessageInitParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MessageInitParameters. +func (in *MessageInitParameters) DeepCopy() *MessageInitParameters { + if in == nil { + return nil + } + out := new(MessageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MessageObservation) DeepCopyInto(out *MessageObservation) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MessageObservation. +func (in *MessageObservation) DeepCopy() *MessageObservation { + if in == nil { + return nil + } + out := new(MessageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MessageParameters) DeepCopyInto(out *MessageParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MessageParameters. +func (in *MessageParameters) DeepCopy() *MessageParameters { + if in == nil { + return nil + } + out := new(MessageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PromptInitParameters) DeepCopyInto(out *PromptInitParameters) { + *out = *in + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]PromptMessageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromptInitParameters. +func (in *PromptInitParameters) DeepCopy() *PromptInitParameters { + if in == nil { + return nil + } + out := new(PromptInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PromptMessageInitParameters) DeepCopyInto(out *PromptMessageInitParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromptMessageInitParameters. +func (in *PromptMessageInitParameters) DeepCopy() *PromptMessageInitParameters { + if in == nil { + return nil + } + out := new(PromptMessageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PromptMessageObservation) DeepCopyInto(out *PromptMessageObservation) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromptMessageObservation. +func (in *PromptMessageObservation) DeepCopy() *PromptMessageObservation { + if in == nil { + return nil + } + out := new(PromptMessageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PromptMessageParameters) DeepCopyInto(out *PromptMessageParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromptMessageParameters. +func (in *PromptMessageParameters) DeepCopy() *PromptMessageParameters { + if in == nil { + return nil + } + out := new(PromptMessageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PromptObservation) DeepCopyInto(out *PromptObservation) { + *out = *in + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]PromptMessageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromptObservation. +func (in *PromptObservation) DeepCopy() *PromptObservation { + if in == nil { + return nil + } + out := new(PromptObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PromptParameters) DeepCopyInto(out *PromptParameters) { + *out = *in + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]PromptMessageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromptParameters. +func (in *PromptParameters) DeepCopy() *PromptParameters { + if in == nil { + return nil + } + out := new(PromptParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RejectionStatementInitParameters) DeepCopyInto(out *RejectionStatementInitParameters) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]RejectionStatementMessageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RejectionStatementInitParameters. +func (in *RejectionStatementInitParameters) DeepCopy() *RejectionStatementInitParameters { + if in == nil { + return nil + } + out := new(RejectionStatementInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RejectionStatementMessageInitParameters) DeepCopyInto(out *RejectionStatementMessageInitParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RejectionStatementMessageInitParameters. +func (in *RejectionStatementMessageInitParameters) DeepCopy() *RejectionStatementMessageInitParameters { + if in == nil { + return nil + } + out := new(RejectionStatementMessageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RejectionStatementMessageObservation) DeepCopyInto(out *RejectionStatementMessageObservation) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RejectionStatementMessageObservation. +func (in *RejectionStatementMessageObservation) DeepCopy() *RejectionStatementMessageObservation { + if in == nil { + return nil + } + out := new(RejectionStatementMessageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RejectionStatementMessageParameters) DeepCopyInto(out *RejectionStatementMessageParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RejectionStatementMessageParameters. +func (in *RejectionStatementMessageParameters) DeepCopy() *RejectionStatementMessageParameters { + if in == nil { + return nil + } + out := new(RejectionStatementMessageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RejectionStatementObservation) DeepCopyInto(out *RejectionStatementObservation) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]RejectionStatementMessageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RejectionStatementObservation. +func (in *RejectionStatementObservation) DeepCopy() *RejectionStatementObservation { + if in == nil { + return nil + } + out := new(RejectionStatementObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RejectionStatementParameters) DeepCopyInto(out *RejectionStatementParameters) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]RejectionStatementMessageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RejectionStatementParameters. +func (in *RejectionStatementParameters) DeepCopy() *RejectionStatementParameters { + if in == nil { + return nil + } + out := new(RejectionStatementParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlotInitParameters) DeepCopyInto(out *SlotInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } + if in.SampleUtterances != nil { + in, out := &in.SampleUtterances, &out.SampleUtterances + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SlotConstraint != nil { + in, out := &in.SlotConstraint, &out.SlotConstraint + *out = new(string) + **out = **in + } + if in.SlotType != nil { + in, out := &in.SlotType, &out.SlotType + *out = new(string) + **out = **in + } + if in.SlotTypeVersion != nil { + in, out := &in.SlotTypeVersion, &out.SlotTypeVersion + *out = new(string) + **out = **in + } + if in.ValueElicitationPrompt != nil { + in, out := &in.ValueElicitationPrompt, &out.ValueElicitationPrompt + *out = new(ValueElicitationPromptInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlotInitParameters. +func (in *SlotInitParameters) DeepCopy() *SlotInitParameters { + if in == nil { + return nil + } + out := new(SlotInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlotObservation) DeepCopyInto(out *SlotObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } + if in.SampleUtterances != nil { + in, out := &in.SampleUtterances, &out.SampleUtterances + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SlotConstraint != nil { + in, out := &in.SlotConstraint, &out.SlotConstraint + *out = new(string) + **out = **in + } + if in.SlotType != nil { + in, out := &in.SlotType, &out.SlotType + *out = new(string) + **out = **in + } + if in.SlotTypeVersion != nil { + in, out := &in.SlotTypeVersion, &out.SlotTypeVersion + *out = new(string) + **out = **in + } + if in.ValueElicitationPrompt != nil { + in, out := &in.ValueElicitationPrompt, &out.ValueElicitationPrompt + *out = new(ValueElicitationPromptObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlotObservation. +func (in *SlotObservation) DeepCopy() *SlotObservation { + if in == nil { + return nil + } + out := new(SlotObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlotParameters) DeepCopyInto(out *SlotParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } + if in.SampleUtterances != nil { + in, out := &in.SampleUtterances, &out.SampleUtterances + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SlotConstraint != nil { + in, out := &in.SlotConstraint, &out.SlotConstraint + *out = new(string) + **out = **in + } + if in.SlotType != nil { + in, out := &in.SlotType, &out.SlotType + *out = new(string) + **out = **in + } + if in.SlotTypeVersion != nil { + in, out := &in.SlotTypeVersion, &out.SlotTypeVersion + *out = new(string) + **out = **in + } + if in.ValueElicitationPrompt != nil { + in, out := &in.ValueElicitationPrompt, &out.ValueElicitationPrompt + *out = new(ValueElicitationPromptParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlotParameters. +func (in *SlotParameters) DeepCopy() *SlotParameters { + if in == nil { + return nil + } + out := new(SlotParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueElicitationPromptInitParameters) DeepCopyInto(out *ValueElicitationPromptInitParameters) { + *out = *in + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]ValueElicitationPromptMessageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueElicitationPromptInitParameters. +func (in *ValueElicitationPromptInitParameters) DeepCopy() *ValueElicitationPromptInitParameters { + if in == nil { + return nil + } + out := new(ValueElicitationPromptInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueElicitationPromptMessageInitParameters) DeepCopyInto(out *ValueElicitationPromptMessageInitParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueElicitationPromptMessageInitParameters. +func (in *ValueElicitationPromptMessageInitParameters) DeepCopy() *ValueElicitationPromptMessageInitParameters { + if in == nil { + return nil + } + out := new(ValueElicitationPromptMessageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueElicitationPromptMessageObservation) DeepCopyInto(out *ValueElicitationPromptMessageObservation) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueElicitationPromptMessageObservation. +func (in *ValueElicitationPromptMessageObservation) DeepCopy() *ValueElicitationPromptMessageObservation { + if in == nil { + return nil + } + out := new(ValueElicitationPromptMessageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueElicitationPromptMessageParameters) DeepCopyInto(out *ValueElicitationPromptMessageParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.GroupNumber != nil { + in, out := &in.GroupNumber, &out.GroupNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueElicitationPromptMessageParameters. +func (in *ValueElicitationPromptMessageParameters) DeepCopy() *ValueElicitationPromptMessageParameters { + if in == nil { + return nil + } + out := new(ValueElicitationPromptMessageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueElicitationPromptObservation) DeepCopyInto(out *ValueElicitationPromptObservation) { + *out = *in + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]ValueElicitationPromptMessageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueElicitationPromptObservation. +func (in *ValueElicitationPromptObservation) DeepCopy() *ValueElicitationPromptObservation { + if in == nil { + return nil + } + out := new(ValueElicitationPromptObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueElicitationPromptParameters) DeepCopyInto(out *ValueElicitationPromptParameters) { + *out = *in + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = make([]ValueElicitationPromptMessageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseCard != nil { + in, out := &in.ResponseCard, &out.ResponseCard + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueElicitationPromptParameters. +func (in *ValueElicitationPromptParameters) DeepCopy() *ValueElicitationPromptParameters { + if in == nil { + return nil + } + out := new(ValueElicitationPromptParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/lexmodels/v1beta2/zz_generated.managed.go b/apis/lexmodels/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..2dd2f2bdf7 --- /dev/null +++ b/apis/lexmodels/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Bot. +func (mg *Bot) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Bot. +func (mg *Bot) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Bot. +func (mg *Bot) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Bot. +func (mg *Bot) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Bot. +func (mg *Bot) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Bot. +func (mg *Bot) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Bot. +func (mg *Bot) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Bot. +func (mg *Bot) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Bot. +func (mg *Bot) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Bot. +func (mg *Bot) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Bot. +func (mg *Bot) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Bot. +func (mg *Bot) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BotAlias. +func (mg *BotAlias) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BotAlias. +func (mg *BotAlias) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BotAlias. +func (mg *BotAlias) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BotAlias. +func (mg *BotAlias) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BotAlias. +func (mg *BotAlias) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BotAlias. +func (mg *BotAlias) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BotAlias. +func (mg *BotAlias) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BotAlias. +func (mg *BotAlias) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BotAlias. +func (mg *BotAlias) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BotAlias. +func (mg *BotAlias) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BotAlias. +func (mg *BotAlias) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BotAlias. +func (mg *BotAlias) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Intent. +func (mg *Intent) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Intent. +func (mg *Intent) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Intent. +func (mg *Intent) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Intent. +func (mg *Intent) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Intent. +func (mg *Intent) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Intent. +func (mg *Intent) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Intent. +func (mg *Intent) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Intent. +func (mg *Intent) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Intent. +func (mg *Intent) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Intent. +func (mg *Intent) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Intent. +func (mg *Intent) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Intent. +func (mg *Intent) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/lexmodels/v1beta2/zz_generated.managedlist.go b/apis/lexmodels/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..bbf31f0b08 --- /dev/null +++ b/apis/lexmodels/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this BotAliasList. +func (l *BotAliasList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BotList. +func (l *BotList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this IntentList. +func (l *IntentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/lexmodels/v1beta2/zz_groupversion_info.go b/apis/lexmodels/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..44b45d535d --- /dev/null +++ b/apis/lexmodels/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=lexmodels.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "lexmodels.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/lexmodels/v1beta2/zz_intent_terraformed.go b/apis/lexmodels/v1beta2/zz_intent_terraformed.go new file mode 100755 index 0000000000..a33d76c9bd --- /dev/null +++ b/apis/lexmodels/v1beta2/zz_intent_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Intent +func (mg *Intent) GetTerraformResourceType() string { + return "aws_lex_intent" +} + +// GetConnectionDetailsMapping for this Intent +func (tr *Intent) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Intent +func (tr *Intent) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Intent +func (tr *Intent) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Intent +func (tr *Intent) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Intent +func (tr *Intent) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Intent +func (tr *Intent) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Intent +func (tr *Intent) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Intent +func (tr *Intent) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Intent using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Intent) LateInitialize(attrs []byte) (bool, error) { + params := &IntentParameters_2{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Intent) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/lexmodels/v1beta2/zz_intent_types.go b/apis/lexmodels/v1beta2/zz_intent_types.go new file mode 100755 index 0000000000..cfabdd2291 --- /dev/null +++ b/apis/lexmodels/v1beta2/zz_intent_types.go @@ -0,0 +1,1080 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CodeHookInitParameters struct { + + // The version of the request-response that you want Amazon Lex to use + // to invoke your Lambda function. For more information, see + // Using Lambda Functions. Must be less than or equal to 5 characters in length. + MessageVersion *string `json:"messageVersion,omitempty" tf:"message_version,omitempty"` + + // The Amazon Resource Name (ARN) of the Lambda function. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type CodeHookObservation struct { + + // The version of the request-response that you want Amazon Lex to use + // to invoke your Lambda function. For more information, see + // Using Lambda Functions. Must be less than or equal to 5 characters in length. + MessageVersion *string `json:"messageVersion,omitempty" tf:"message_version,omitempty"` + + // The Amazon Resource Name (ARN) of the Lambda function. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type CodeHookParameters struct { + + // The version of the request-response that you want Amazon Lex to use + // to invoke your Lambda function. For more information, see + // Using Lambda Functions. Must be less than or equal to 5 characters in length. + // +kubebuilder:validation:Optional + MessageVersion *string `json:"messageVersion" tf:"message_version,omitempty"` + + // The Amazon Resource Name (ARN) of the Lambda function. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type ConclusionStatementInitParameters struct { + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + Message []ConclusionStatementMessageInitParameters `json:"message,omitempty" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type ConclusionStatementMessageInitParameters struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The content type of the message string. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type ConclusionStatementMessageObservation struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The content type of the message string. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type ConclusionStatementMessageParameters struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + // +kubebuilder:validation:Optional + Content *string `json:"content" tf:"content,omitempty"` + + // The content type of the message string. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + // +kubebuilder:validation:Optional + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type ConclusionStatementObservation struct { + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + Message []ConclusionStatementMessageObservation `json:"message,omitempty" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type ConclusionStatementParameters struct { + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + // +kubebuilder:validation:Optional + Message []ConclusionStatementMessageParameters `json:"message" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + // +kubebuilder:validation:Optional + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type ConfirmationPromptInitParameters struct { + + // The number of times to prompt the user for information. Must be a number between 1 and 5 (inclusive). + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + Message []ConfirmationPromptMessageInitParameters `json:"message,omitempty" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type ConfirmationPromptMessageInitParameters struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The content type of the message string. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type ConfirmationPromptMessageObservation struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The content type of the message string. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type ConfirmationPromptMessageParameters struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + // +kubebuilder:validation:Optional + Content *string `json:"content" tf:"content,omitempty"` + + // The content type of the message string. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + // +kubebuilder:validation:Optional + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type ConfirmationPromptObservation struct { + + // The number of times to prompt the user for information. Must be a number between 1 and 5 (inclusive). + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + Message []ConfirmationPromptMessageObservation `json:"message,omitempty" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type ConfirmationPromptParameters struct { + + // The number of times to prompt the user for information. Must be a number between 1 and 5 (inclusive). + // +kubebuilder:validation:Optional + MaxAttempts *float64 `json:"maxAttempts" tf:"max_attempts,omitempty"` + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + // +kubebuilder:validation:Optional + Message []ConfirmationPromptMessageParameters `json:"message" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + // +kubebuilder:validation:Optional + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type DialogCodeHookInitParameters struct { + + // The version of the request-response that you want Amazon Lex to use + // to invoke your Lambda function. For more information, see + // Using Lambda Functions. Must be less than or equal to 5 characters in length. + MessageVersion *string `json:"messageVersion,omitempty" tf:"message_version,omitempty"` + + // The Amazon Resource Name (ARN) of the Lambda function. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type DialogCodeHookObservation struct { + + // The version of the request-response that you want Amazon Lex to use + // to invoke your Lambda function. For more information, see + // Using Lambda Functions. Must be less than or equal to 5 characters in length. + MessageVersion *string `json:"messageVersion,omitempty" tf:"message_version,omitempty"` + + // The Amazon Resource Name (ARN) of the Lambda function. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type DialogCodeHookParameters struct { + + // The version of the request-response that you want Amazon Lex to use + // to invoke your Lambda function. For more information, see + // Using Lambda Functions. Must be less than or equal to 5 characters in length. + // +kubebuilder:validation:Optional + MessageVersion *string `json:"messageVersion" tf:"message_version,omitempty"` + + // The Amazon Resource Name (ARN) of the Lambda function. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type FollowUpPromptInitParameters struct { + + // Prompts for information from the user. Attributes are documented under prompt. + Prompt *PromptInitParameters `json:"prompt,omitempty" tf:"prompt,omitempty"` + + // If the user answers "no" to the question defined in the prompt field, + // Amazon Lex responds with this statement to acknowledge that the intent was canceled. Attributes are + // documented below under statement. + RejectionStatement *RejectionStatementInitParameters `json:"rejectionStatement,omitempty" tf:"rejection_statement,omitempty"` +} + +type FollowUpPromptObservation struct { + + // Prompts for information from the user. Attributes are documented under prompt. + Prompt *PromptObservation `json:"prompt,omitempty" tf:"prompt,omitempty"` + + // If the user answers "no" to the question defined in the prompt field, + // Amazon Lex responds with this statement to acknowledge that the intent was canceled. Attributes are + // documented below under statement. + RejectionStatement *RejectionStatementObservation `json:"rejectionStatement,omitempty" tf:"rejection_statement,omitempty"` +} + +type FollowUpPromptParameters struct { + + // Prompts for information from the user. Attributes are documented under prompt. + // +kubebuilder:validation:Optional + Prompt *PromptParameters `json:"prompt" tf:"prompt,omitempty"` + + // If the user answers "no" to the question defined in the prompt field, + // Amazon Lex responds with this statement to acknowledge that the intent was canceled. Attributes are + // documented below under statement. + // +kubebuilder:validation:Optional + RejectionStatement *RejectionStatementParameters `json:"rejectionStatement" tf:"rejection_statement,omitempty"` +} + +type FulfillmentActivityInitParameters struct { + + // A description of the Lambda function that is run to fulfill the intent. + // Required if type is CodeHook. Attributes are documented under code_hook. + CodeHook *CodeHookInitParameters `json:"codeHook,omitempty" tf:"code_hook,omitempty"` + + // How the intent should be fulfilled, either by running a Lambda function or by + // returning the slot data to the client application. Type can be either ReturnIntent or CodeHook, as documented here. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FulfillmentActivityObservation struct { + + // A description of the Lambda function that is run to fulfill the intent. + // Required if type is CodeHook. Attributes are documented under code_hook. + CodeHook *CodeHookObservation `json:"codeHook,omitempty" tf:"code_hook,omitempty"` + + // How the intent should be fulfilled, either by running a Lambda function or by + // returning the slot data to the client application. Type can be either ReturnIntent or CodeHook, as documented here. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FulfillmentActivityParameters struct { + + // A description of the Lambda function that is run to fulfill the intent. + // Required if type is CodeHook. Attributes are documented under code_hook. + // +kubebuilder:validation:Optional + CodeHook *CodeHookParameters `json:"codeHook,omitempty" tf:"code_hook,omitempty"` + + // How the intent should be fulfilled, either by running a Lambda function or by + // returning the slot data to the client application. Type can be either ReturnIntent or CodeHook, as documented here. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type IntentInitParameters_2 struct { + + // The statement that you want Amazon Lex to convey to the user + // after the intent is successfully fulfilled by the Lambda function. This element is relevant only if + // you provide a Lambda function in the fulfillment_activity. If you return the intent to the client + // application, you can't specify this element. The follow_up_prompt and conclusion_statement are + // mutually exclusive. You can specify only one. Attributes are documented under statement. + ConclusionStatement *ConclusionStatementInitParameters `json:"conclusionStatement,omitempty" tf:"conclusion_statement,omitempty"` + + // Prompts the user to confirm the intent. This question should + // have a yes or no answer. You you must provide both the rejection_statement and confirmation_prompt, + // or neither. Attributes are documented under prompt. + ConfirmationPrompt *ConfirmationPromptInitParameters `json:"confirmationPrompt,omitempty" tf:"confirmation_prompt,omitempty"` + + // Determines if a new slot type version is created when the initial + // resource is created and on each update. Defaults to false. + CreateVersion *bool `json:"createVersion,omitempty" tf:"create_version,omitempty"` + + // A description of the intent. Must be less than or equal to 200 characters in length. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies a Lambda function to invoke for each user input. You can + // invoke this Lambda function to personalize user interaction. Attributes are documented under code_hook. + DialogCodeHook *DialogCodeHookInitParameters `json:"dialogCodeHook,omitempty" tf:"dialog_code_hook,omitempty"` + + // Amazon Lex uses this prompt to solicit additional activity after + // fulfilling an intent. For example, after the OrderPizza intent is fulfilled, you might prompt the + // user to order a drink. The follow_up_prompt field and the conclusion_statement field are mutually + // exclusive. You can specify only one. Attributes are documented under follow_up_prompt. + FollowUpPrompt *FollowUpPromptInitParameters `json:"followUpPrompt,omitempty" tf:"follow_up_prompt,omitempty"` + + // Describes how the intent is fulfilled. For example, after a + // user provides all of the information for a pizza order, fulfillment_activity defines how the bot + // places an order with a local pizza store. Attributes are documented under fulfillment_activity. + FulfillmentActivity *FulfillmentActivityInitParameters `json:"fulfillmentActivity,omitempty" tf:"fulfillment_activity,omitempty"` + + // A unique identifier for the built-in intent to base this + // intent on. To find the signature for an intent, see + // Standard Built-in Intents + // in the Alexa Skills Kit. + ParentIntentSignature *string `json:"parentIntentSignature,omitempty" tf:"parent_intent_signature,omitempty"` + + // When the user answers "no" to the question defined in + // confirmation_prompt, Amazon Lex responds with this statement to acknowledge that the intent was + // canceled. You must provide both the rejection_statement and the confirmation_prompt, or neither. + // Attributes are documented under statement. + RejectionStatement *IntentRejectionStatementInitParameters `json:"rejectionStatement,omitempty" tf:"rejection_statement,omitempty"` + + // An array of utterances (strings) that a user might say to signal + // the intent. For example, "I want {PizzaSize} pizza", "Order {Quantity} {PizzaSize} pizzas". + // In each utterance, a slot name is enclosed in curly braces. Must have between 1 and 10 items in the list, and each item must be less than or equal to 200 characters in length. + // +listType=set + SampleUtterances []*string `json:"sampleUtterances,omitempty" tf:"sample_utterances,omitempty"` + + // An list of intent slots. At runtime, Amazon Lex elicits required slot values + // from the user using prompts defined in the slots. Attributes are documented under slot. + Slot []SlotInitParameters `json:"slot,omitempty" tf:"slot,omitempty"` +} + +type IntentObservation_2 struct { + + // The ARN of the Lex intent. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Checksum identifying the version of the intent that was created. The checksum is not + // included as an argument because the resource will add it automatically when updating the intent. + Checksum *string `json:"checksum,omitempty" tf:"checksum,omitempty"` + + // The statement that you want Amazon Lex to convey to the user + // after the intent is successfully fulfilled by the Lambda function. This element is relevant only if + // you provide a Lambda function in the fulfillment_activity. If you return the intent to the client + // application, you can't specify this element. The follow_up_prompt and conclusion_statement are + // mutually exclusive. You can specify only one. Attributes are documented under statement. + ConclusionStatement *ConclusionStatementObservation `json:"conclusionStatement,omitempty" tf:"conclusion_statement,omitempty"` + + // Prompts the user to confirm the intent. This question should + // have a yes or no answer. You you must provide both the rejection_statement and confirmation_prompt, + // or neither. Attributes are documented under prompt. + ConfirmationPrompt *ConfirmationPromptObservation `json:"confirmationPrompt,omitempty" tf:"confirmation_prompt,omitempty"` + + // Determines if a new slot type version is created when the initial + // resource is created and on each update. Defaults to false. + CreateVersion *bool `json:"createVersion,omitempty" tf:"create_version,omitempty"` + + // The date when the intent version was created. + CreatedDate *string `json:"createdDate,omitempty" tf:"created_date,omitempty"` + + // A description of the intent. Must be less than or equal to 200 characters in length. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies a Lambda function to invoke for each user input. You can + // invoke this Lambda function to personalize user interaction. Attributes are documented under code_hook. + DialogCodeHook *DialogCodeHookObservation `json:"dialogCodeHook,omitempty" tf:"dialog_code_hook,omitempty"` + + // Amazon Lex uses this prompt to solicit additional activity after + // fulfilling an intent. For example, after the OrderPizza intent is fulfilled, you might prompt the + // user to order a drink. The follow_up_prompt field and the conclusion_statement field are mutually + // exclusive. You can specify only one. Attributes are documented under follow_up_prompt. + FollowUpPrompt *FollowUpPromptObservation `json:"followUpPrompt,omitempty" tf:"follow_up_prompt,omitempty"` + + // Describes how the intent is fulfilled. For example, after a + // user provides all of the information for a pizza order, fulfillment_activity defines how the bot + // places an order with a local pizza store. Attributes are documented under fulfillment_activity. + FulfillmentActivity *FulfillmentActivityObservation `json:"fulfillmentActivity,omitempty" tf:"fulfillment_activity,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The date when the $LATEST version of this intent was updated. + LastUpdatedDate *string `json:"lastUpdatedDate,omitempty" tf:"last_updated_date,omitempty"` + + // A unique identifier for the built-in intent to base this + // intent on. To find the signature for an intent, see + // Standard Built-in Intents + // in the Alexa Skills Kit. + ParentIntentSignature *string `json:"parentIntentSignature,omitempty" tf:"parent_intent_signature,omitempty"` + + // When the user answers "no" to the question defined in + // confirmation_prompt, Amazon Lex responds with this statement to acknowledge that the intent was + // canceled. You must provide both the rejection_statement and the confirmation_prompt, or neither. + // Attributes are documented under statement. + RejectionStatement *IntentRejectionStatementObservation `json:"rejectionStatement,omitempty" tf:"rejection_statement,omitempty"` + + // An array of utterances (strings) that a user might say to signal + // the intent. For example, "I want {PizzaSize} pizza", "Order {Quantity} {PizzaSize} pizzas". + // In each utterance, a slot name is enclosed in curly braces. Must have between 1 and 10 items in the list, and each item must be less than or equal to 200 characters in length. + // +listType=set + SampleUtterances []*string `json:"sampleUtterances,omitempty" tf:"sample_utterances,omitempty"` + + // An list of intent slots. At runtime, Amazon Lex elicits required slot values + // from the user using prompts defined in the slots. Attributes are documented under slot. + Slot []SlotObservation `json:"slot,omitempty" tf:"slot,omitempty"` + + // The version of the bot. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type IntentParameters_2 struct { + + // The statement that you want Amazon Lex to convey to the user + // after the intent is successfully fulfilled by the Lambda function. This element is relevant only if + // you provide a Lambda function in the fulfillment_activity. If you return the intent to the client + // application, you can't specify this element. The follow_up_prompt and conclusion_statement are + // mutually exclusive. You can specify only one. Attributes are documented under statement. + // +kubebuilder:validation:Optional + ConclusionStatement *ConclusionStatementParameters `json:"conclusionStatement,omitempty" tf:"conclusion_statement,omitempty"` + + // Prompts the user to confirm the intent. This question should + // have a yes or no answer. You you must provide both the rejection_statement and confirmation_prompt, + // or neither. Attributes are documented under prompt. + // +kubebuilder:validation:Optional + ConfirmationPrompt *ConfirmationPromptParameters `json:"confirmationPrompt,omitempty" tf:"confirmation_prompt,omitempty"` + + // Determines if a new slot type version is created when the initial + // resource is created and on each update. Defaults to false. + // +kubebuilder:validation:Optional + CreateVersion *bool `json:"createVersion,omitempty" tf:"create_version,omitempty"` + + // A description of the intent. Must be less than or equal to 200 characters in length. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies a Lambda function to invoke for each user input. You can + // invoke this Lambda function to personalize user interaction. Attributes are documented under code_hook. + // +kubebuilder:validation:Optional + DialogCodeHook *DialogCodeHookParameters `json:"dialogCodeHook,omitempty" tf:"dialog_code_hook,omitempty"` + + // Amazon Lex uses this prompt to solicit additional activity after + // fulfilling an intent. For example, after the OrderPizza intent is fulfilled, you might prompt the + // user to order a drink. The follow_up_prompt field and the conclusion_statement field are mutually + // exclusive. You can specify only one. Attributes are documented under follow_up_prompt. + // +kubebuilder:validation:Optional + FollowUpPrompt *FollowUpPromptParameters `json:"followUpPrompt,omitempty" tf:"follow_up_prompt,omitempty"` + + // Describes how the intent is fulfilled. For example, after a + // user provides all of the information for a pizza order, fulfillment_activity defines how the bot + // places an order with a local pizza store. Attributes are documented under fulfillment_activity. + // +kubebuilder:validation:Optional + FulfillmentActivity *FulfillmentActivityParameters `json:"fulfillmentActivity,omitempty" tf:"fulfillment_activity,omitempty"` + + // A unique identifier for the built-in intent to base this + // intent on. To find the signature for an intent, see + // Standard Built-in Intents + // in the Alexa Skills Kit. + // +kubebuilder:validation:Optional + ParentIntentSignature *string `json:"parentIntentSignature,omitempty" tf:"parent_intent_signature,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // When the user answers "no" to the question defined in + // confirmation_prompt, Amazon Lex responds with this statement to acknowledge that the intent was + // canceled. You must provide both the rejection_statement and the confirmation_prompt, or neither. + // Attributes are documented under statement. + // +kubebuilder:validation:Optional + RejectionStatement *IntentRejectionStatementParameters `json:"rejectionStatement,omitempty" tf:"rejection_statement,omitempty"` + + // An array of utterances (strings) that a user might say to signal + // the intent. For example, "I want {PizzaSize} pizza", "Order {Quantity} {PizzaSize} pizzas". + // In each utterance, a slot name is enclosed in curly braces. Must have between 1 and 10 items in the list, and each item must be less than or equal to 200 characters in length. + // +kubebuilder:validation:Optional + // +listType=set + SampleUtterances []*string `json:"sampleUtterances,omitempty" tf:"sample_utterances,omitempty"` + + // An list of intent slots. At runtime, Amazon Lex elicits required slot values + // from the user using prompts defined in the slots. Attributes are documented under slot. + // +kubebuilder:validation:Optional + Slot []SlotParameters `json:"slot,omitempty" tf:"slot,omitempty"` +} + +type IntentRejectionStatementInitParameters struct { + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + Message []IntentRejectionStatementMessageInitParameters `json:"message,omitempty" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type IntentRejectionStatementMessageInitParameters struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The content type of the message string. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type IntentRejectionStatementMessageObservation struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The content type of the message string. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type IntentRejectionStatementMessageParameters struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + // +kubebuilder:validation:Optional + Content *string `json:"content" tf:"content,omitempty"` + + // The content type of the message string. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + // +kubebuilder:validation:Optional + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type IntentRejectionStatementObservation struct { + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + Message []IntentRejectionStatementMessageObservation `json:"message,omitempty" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type IntentRejectionStatementParameters struct { + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + // +kubebuilder:validation:Optional + Message []IntentRejectionStatementMessageParameters `json:"message" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + // +kubebuilder:validation:Optional + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type PromptInitParameters struct { + + // The number of times to prompt the user for information. Must be a number between 1 and 5 (inclusive). + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + Message []PromptMessageInitParameters `json:"message,omitempty" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type PromptMessageInitParameters struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The content type of the message string. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type PromptMessageObservation struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The content type of the message string. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type PromptMessageParameters struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + // +kubebuilder:validation:Optional + Content *string `json:"content" tf:"content,omitempty"` + + // The content type of the message string. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + // +kubebuilder:validation:Optional + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type PromptObservation struct { + + // The number of times to prompt the user for information. Must be a number between 1 and 5 (inclusive). + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + Message []PromptMessageObservation `json:"message,omitempty" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type PromptParameters struct { + + // The number of times to prompt the user for information. Must be a number between 1 and 5 (inclusive). + // +kubebuilder:validation:Optional + MaxAttempts *float64 `json:"maxAttempts" tf:"max_attempts,omitempty"` + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + // +kubebuilder:validation:Optional + Message []PromptMessageParameters `json:"message" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + // +kubebuilder:validation:Optional + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type RejectionStatementInitParameters struct { + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + Message []RejectionStatementMessageInitParameters `json:"message,omitempty" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type RejectionStatementMessageInitParameters struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The content type of the message string. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type RejectionStatementMessageObservation struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The content type of the message string. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type RejectionStatementMessageParameters struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + // +kubebuilder:validation:Optional + Content *string `json:"content" tf:"content,omitempty"` + + // The content type of the message string. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + // +kubebuilder:validation:Optional + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type RejectionStatementObservation struct { + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + Message []RejectionStatementMessageObservation `json:"message,omitempty" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type RejectionStatementParameters struct { + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + // +kubebuilder:validation:Optional + Message []RejectionStatementMessageParameters `json:"message" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + // +kubebuilder:validation:Optional + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type SlotInitParameters struct { + + // A description of the bot. Must be less than or equal to 200 characters in length. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the intent slot that you want to create. The name is case sensitive. Must be less than or equal to 100 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Directs Lex the order in which to elicit this slot value from the user. + // For example, if the intent has two slots with priorities 1 and 2, AWS Lex first elicits a value for + // the slot with priority 1. If multiple slots share the same priority, the order in which Lex elicits + // values is arbitrary. Must be between 1 and 100. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` + + // If you know a specific pattern with which users might respond to + // an Amazon Lex request for a slot value, you can provide those utterances to improve accuracy. This + // is optional. In most cases, Amazon Lex is capable of understanding user utterances. Must have between 1 and 10 items in the list, and each item must be less than or equal to 200 characters in length. + SampleUtterances []*string `json:"sampleUtterances,omitempty" tf:"sample_utterances,omitempty"` + + // Specifies whether the slot is required or optional. + SlotConstraint *string `json:"slotConstraint,omitempty" tf:"slot_constraint,omitempty"` + + // The type of the slot, either a custom slot type that you defined or one of + // the built-in slot types. Must be less than or equal to 100 characters in length. + SlotType *string `json:"slotType,omitempty" tf:"slot_type,omitempty"` + + // The version of the slot type. Must be less than or equal to 64 characters in length. + SlotTypeVersion *string `json:"slotTypeVersion,omitempty" tf:"slot_type_version,omitempty"` + + // The prompt that Amazon Lex uses to elicit the slot value + // from the user. Attributes are documented under prompt. + ValueElicitationPrompt *ValueElicitationPromptInitParameters `json:"valueElicitationPrompt,omitempty" tf:"value_elicitation_prompt,omitempty"` +} + +type SlotObservation struct { + + // A description of the bot. Must be less than or equal to 200 characters in length. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the intent slot that you want to create. The name is case sensitive. Must be less than or equal to 100 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Directs Lex the order in which to elicit this slot value from the user. + // For example, if the intent has two slots with priorities 1 and 2, AWS Lex first elicits a value for + // the slot with priority 1. If multiple slots share the same priority, the order in which Lex elicits + // values is arbitrary. Must be between 1 and 100. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` + + // If you know a specific pattern with which users might respond to + // an Amazon Lex request for a slot value, you can provide those utterances to improve accuracy. This + // is optional. In most cases, Amazon Lex is capable of understanding user utterances. Must have between 1 and 10 items in the list, and each item must be less than or equal to 200 characters in length. + SampleUtterances []*string `json:"sampleUtterances,omitempty" tf:"sample_utterances,omitempty"` + + // Specifies whether the slot is required or optional. + SlotConstraint *string `json:"slotConstraint,omitempty" tf:"slot_constraint,omitempty"` + + // The type of the slot, either a custom slot type that you defined or one of + // the built-in slot types. Must be less than or equal to 100 characters in length. + SlotType *string `json:"slotType,omitempty" tf:"slot_type,omitempty"` + + // The version of the slot type. Must be less than or equal to 64 characters in length. + SlotTypeVersion *string `json:"slotTypeVersion,omitempty" tf:"slot_type_version,omitempty"` + + // The prompt that Amazon Lex uses to elicit the slot value + // from the user. Attributes are documented under prompt. + ValueElicitationPrompt *ValueElicitationPromptObservation `json:"valueElicitationPrompt,omitempty" tf:"value_elicitation_prompt,omitempty"` +} + +type SlotParameters struct { + + // A description of the bot. Must be less than or equal to 200 characters in length. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the intent slot that you want to create. The name is case sensitive. Must be less than or equal to 100 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Directs Lex the order in which to elicit this slot value from the user. + // For example, if the intent has two slots with priorities 1 and 2, AWS Lex first elicits a value for + // the slot with priority 1. If multiple slots share the same priority, the order in which Lex elicits + // values is arbitrary. Must be between 1 and 100. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + // +kubebuilder:validation:Optional + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` + + // If you know a specific pattern with which users might respond to + // an Amazon Lex request for a slot value, you can provide those utterances to improve accuracy. This + // is optional. In most cases, Amazon Lex is capable of understanding user utterances. Must have between 1 and 10 items in the list, and each item must be less than or equal to 200 characters in length. + // +kubebuilder:validation:Optional + SampleUtterances []*string `json:"sampleUtterances,omitempty" tf:"sample_utterances,omitempty"` + + // Specifies whether the slot is required or optional. + // +kubebuilder:validation:Optional + SlotConstraint *string `json:"slotConstraint" tf:"slot_constraint,omitempty"` + + // The type of the slot, either a custom slot type that you defined or one of + // the built-in slot types. Must be less than or equal to 100 characters in length. + // +kubebuilder:validation:Optional + SlotType *string `json:"slotType" tf:"slot_type,omitempty"` + + // The version of the slot type. Must be less than or equal to 64 characters in length. + // +kubebuilder:validation:Optional + SlotTypeVersion *string `json:"slotTypeVersion,omitempty" tf:"slot_type_version,omitempty"` + + // The prompt that Amazon Lex uses to elicit the slot value + // from the user. Attributes are documented under prompt. + // +kubebuilder:validation:Optional + ValueElicitationPrompt *ValueElicitationPromptParameters `json:"valueElicitationPrompt,omitempty" tf:"value_elicitation_prompt,omitempty"` +} + +type ValueElicitationPromptInitParameters struct { + + // The number of times to prompt the user for information. Must be a number between 1 and 5 (inclusive). + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + Message []ValueElicitationPromptMessageInitParameters `json:"message,omitempty" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type ValueElicitationPromptMessageInitParameters struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The content type of the message string. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type ValueElicitationPromptMessageObservation struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The content type of the message string. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type ValueElicitationPromptMessageParameters struct { + + // The text of the message. Must be less than or equal to 1000 characters in length. + // +kubebuilder:validation:Optional + Content *string `json:"content" tf:"content,omitempty"` + + // The content type of the message string. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType" tf:"content_type,omitempty"` + + // Identifies the message group that the message belongs to. When a group + // is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + // +kubebuilder:validation:Optional + GroupNumber *float64 `json:"groupNumber,omitempty" tf:"group_number,omitempty"` +} + +type ValueElicitationPromptObservation struct { + + // The number of times to prompt the user for information. Must be a number between 1 and 5 (inclusive). + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + Message []ValueElicitationPromptMessageObservation `json:"message,omitempty" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +type ValueElicitationPromptParameters struct { + + // The number of times to prompt the user for information. Must be a number between 1 and 5 (inclusive). + // +kubebuilder:validation:Optional + MaxAttempts *float64 `json:"maxAttempts" tf:"max_attempts,omitempty"` + + // A set of messages, each of which provides a message string and its type. + // You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + // Attributes are documented under message. Must contain between 1 and 15 messages. + // +kubebuilder:validation:Optional + Message []ValueElicitationPromptMessageParameters `json:"message" tf:"message,omitempty"` + + // The response card. Amazon Lex will substitute session attributes and + // slot values into the response card. For more information, see + // Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + // +kubebuilder:validation:Optional + ResponseCard *string `json:"responseCard,omitempty" tf:"response_card,omitempty"` +} + +// IntentSpec defines the desired state of Intent +type IntentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IntentParameters_2 `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IntentInitParameters_2 `json:"initProvider,omitempty"` +} + +// IntentStatus defines the observed state of Intent. +type IntentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IntentObservation_2 `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Intent is the Schema for the Intents API. Provides an Amazon Lex intent resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Intent struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.fulfillmentActivity) || (has(self.initProvider) && has(self.initProvider.fulfillmentActivity))",message="spec.forProvider.fulfillmentActivity is a required parameter" + Spec IntentSpec `json:"spec"` + Status IntentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IntentList contains a list of Intents +type IntentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Intent `json:"items"` +} + +// Repository type metadata. +var ( + Intent_Kind = "Intent" + Intent_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Intent_Kind}.String() + Intent_KindAPIVersion = Intent_Kind + "." + CRDGroupVersion.String() + Intent_GroupVersionKind = CRDGroupVersion.WithKind(Intent_Kind) +) + +func init() { + SchemeBuilder.Register(&Intent{}, &IntentList{}) +} diff --git a/apis/licensemanager/v1beta1/zz_association_types.go b/apis/licensemanager/v1beta1/zz_association_types.go index 4f68a5d090..d30bf22222 100755 --- a/apis/licensemanager/v1beta1/zz_association_types.go +++ b/apis/licensemanager/v1beta1/zz_association_types.go @@ -29,7 +29,7 @@ type AssociationInitParameters struct { LicenseConfigurationArnSelector *v1.Selector `json:"licenseConfigurationArnSelector,omitempty" tf:"-"` // ARN of the resource associated with the license configuration. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` @@ -76,7 +76,7 @@ type AssociationParameters struct { Region *string `json:"region" tf:"-"` // ARN of the resource associated with the license configuration. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta2.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` diff --git a/apis/licensemanager/v1beta1/zz_generated.resolvers.go b/apis/licensemanager/v1beta1/zz_generated.resolvers.go index 735a97bf14..3eb2006f46 100644 --- a/apis/licensemanager/v1beta1/zz_generated.resolvers.go +++ b/apis/licensemanager/v1beta1/zz_generated.resolvers.go @@ -46,7 +46,7 @@ func (mg *Association) ResolveReferences( // ResolveReferences of this Associati mg.Spec.ForProvider.LicenseConfigurationArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.LicenseConfigurationArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -84,7 +84,7 @@ func (mg *Association) ResolveReferences( // ResolveReferences of this Associati mg.Spec.InitProvider.LicenseConfigurationArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.LicenseConfigurationArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/lightsail/v1beta1/zz_diskattachment_types.go b/apis/lightsail/v1beta1/zz_diskattachment_types.go index 849562ea5f..ff3878f179 100755 --- a/apis/lightsail/v1beta1/zz_diskattachment_types.go +++ b/apis/lightsail/v1beta1/zz_diskattachment_types.go @@ -31,7 +31,7 @@ type DiskAttachmentInitParameters struct { DiskPath *string `json:"diskPath,omitempty" tf:"disk_path,omitempty"` // The name of the Lightsail Instance to attach to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lightsail/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lightsail/v1beta2.Instance InstanceName *string `json:"instanceName,omitempty" tf:"instance_name,omitempty"` // Reference to a Instance in lightsail to populate instanceName. @@ -78,7 +78,7 @@ type DiskAttachmentParameters struct { DiskPath *string `json:"diskPath,omitempty" tf:"disk_path,omitempty"` // The name of the Lightsail Instance to attach to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lightsail/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lightsail/v1beta2.Instance // +kubebuilder:validation:Optional InstanceName *string `json:"instanceName,omitempty" tf:"instance_name,omitempty"` diff --git a/apis/lightsail/v1beta1/zz_generated.conversion_hubs.go b/apis/lightsail/v1beta1/zz_generated.conversion_hubs.go index 10bc41c408..d2efe35492 100755 --- a/apis/lightsail/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/lightsail/v1beta1/zz_generated.conversion_hubs.go @@ -12,9 +12,6 @@ func (tr *Bucket) Hub() {} // Hub marks this type as a conversion hub. func (tr *Certificate) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ContainerService) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Disk) Hub() {} @@ -27,9 +24,6 @@ func (tr *Domain) Hub() {} // Hub marks this type as a conversion hub. func (tr *DomainEntry) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Instance) Hub() {} - // Hub marks this type as a conversion hub. func (tr *InstancePublicPorts) Hub() {} diff --git a/apis/lightsail/v1beta1/zz_generated.conversion_spokes.go b/apis/lightsail/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..07c846cc47 --- /dev/null +++ b/apis/lightsail/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ContainerService to the hub type. +func (tr *ContainerService) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ContainerService type. +func (tr *ContainerService) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Instance to the hub type. +func (tr *Instance) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Instance type. +func (tr *Instance) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/lightsail/v1beta1/zz_generated.resolvers.go b/apis/lightsail/v1beta1/zz_generated.resolvers.go index c69b5e3077..5412bcc199 100644 --- a/apis/lightsail/v1beta1/zz_generated.resolvers.go +++ b/apis/lightsail/v1beta1/zz_generated.resolvers.go @@ -9,9 +9,10 @@ package v1beta1 import ( "context" reference "github.com/crossplane/crossplane-runtime/pkg/reference" - xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" resource "github.com/crossplane/upjet/pkg/resource" errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" apisresolver "github.com/upbound/provider-aws/internal/apis" client "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -44,7 +45,7 @@ func (mg *DiskAttachment) ResolveReferences( // ResolveReferences of this DiskAt mg.Spec.ForProvider.DiskName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DiskNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("lightsail.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("lightsail.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -82,7 +83,7 @@ func (mg *DiskAttachment) ResolveReferences( // ResolveReferences of this DiskAt mg.Spec.InitProvider.DiskName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.DiskNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("lightsail.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("lightsail.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -144,7 +145,7 @@ func (mg *InstancePublicPorts) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("lightsail.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("lightsail.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -163,7 +164,7 @@ func (mg *InstancePublicPorts) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.InstanceName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.InstanceNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("lightsail.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("lightsail.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -194,7 +195,7 @@ func (mg *LBAttachment) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("lightsail.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("lightsail.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -232,7 +233,7 @@ func (mg *LBAttachment) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.LBName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.LBNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("lightsail.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("lightsail.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -313,7 +314,7 @@ func (mg *StaticIPAttachment) ResolveReferences(ctx context.Context, c client.Re var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("lightsail.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("lightsail.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -351,7 +352,7 @@ func (mg *StaticIPAttachment) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.StaticIPName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StaticIPNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("lightsail.aws.upbound.io", "v1beta1", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("lightsail.aws.upbound.io", "v1beta2", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/lightsail/v1beta1/zz_instancepublicports_types.go b/apis/lightsail/v1beta1/zz_instancepublicports_types.go index c9c24be90f..751f0a8be2 100755 --- a/apis/lightsail/v1beta1/zz_instancepublicports_types.go +++ b/apis/lightsail/v1beta1/zz_instancepublicports_types.go @@ -16,7 +16,7 @@ import ( type InstancePublicPortsInitParameters struct { // Name of the Lightsail Instance. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lightsail/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lightsail/v1beta2.Instance InstanceName *string `json:"instanceName,omitempty" tf:"instance_name,omitempty"` // Reference to a Instance in lightsail to populate instanceName. @@ -46,7 +46,7 @@ type InstancePublicPortsObservation struct { type InstancePublicPortsParameters struct { // Name of the Lightsail Instance. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lightsail/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lightsail/v1beta2.Instance // +kubebuilder:validation:Optional InstanceName *string `json:"instanceName,omitempty" tf:"instance_name,omitempty"` diff --git a/apis/lightsail/v1beta1/zz_lbattachment_types.go b/apis/lightsail/v1beta1/zz_lbattachment_types.go index d1a010c958..fb09d8315d 100755 --- a/apis/lightsail/v1beta1/zz_lbattachment_types.go +++ b/apis/lightsail/v1beta1/zz_lbattachment_types.go @@ -16,7 +16,7 @@ import ( type LBAttachmentInitParameters struct { // The name of the instance to attach to the load balancer. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lightsail/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lightsail/v1beta2.Instance InstanceName *string `json:"instanceName,omitempty" tf:"instance_name,omitempty"` // Reference to a Instance in lightsail to populate instanceName. @@ -55,7 +55,7 @@ type LBAttachmentObservation struct { type LBAttachmentParameters struct { // The name of the instance to attach to the load balancer. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lightsail/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lightsail/v1beta2.Instance // +kubebuilder:validation:Optional InstanceName *string `json:"instanceName,omitempty" tf:"instance_name,omitempty"` diff --git a/apis/lightsail/v1beta1/zz_staticipattachment_types.go b/apis/lightsail/v1beta1/zz_staticipattachment_types.go index a1d02d7dd0..9598e266ae 100755 --- a/apis/lightsail/v1beta1/zz_staticipattachment_types.go +++ b/apis/lightsail/v1beta1/zz_staticipattachment_types.go @@ -16,7 +16,7 @@ import ( type StaticIPAttachmentInitParameters struct { // The name of the Lightsail instance to attach the IP to - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lightsail/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lightsail/v1beta2.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() InstanceName *string `json:"instanceName,omitempty" tf:"instance_name,omitempty"` @@ -58,7 +58,7 @@ type StaticIPAttachmentObservation struct { type StaticIPAttachmentParameters struct { // The name of the Lightsail instance to attach the IP to - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lightsail/v1beta1.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lightsail/v1beta2.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional InstanceName *string `json:"instanceName,omitempty" tf:"instance_name,omitempty"` diff --git a/apis/lightsail/v1beta2/zz_containerservice_terraformed.go b/apis/lightsail/v1beta2/zz_containerservice_terraformed.go new file mode 100755 index 0000000000..baa772cf21 --- /dev/null +++ b/apis/lightsail/v1beta2/zz_containerservice_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ContainerService +func (mg *ContainerService) GetTerraformResourceType() string { + return "aws_lightsail_container_service" +} + +// GetConnectionDetailsMapping for this ContainerService +func (tr *ContainerService) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ContainerService +func (tr *ContainerService) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ContainerService +func (tr *ContainerService) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ContainerService +func (tr *ContainerService) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ContainerService +func (tr *ContainerService) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ContainerService +func (tr *ContainerService) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ContainerService +func (tr *ContainerService) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ContainerService +func (tr *ContainerService) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ContainerService using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ContainerService) LateInitialize(attrs []byte) (bool, error) { + params := &ContainerServiceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ContainerService) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/lightsail/v1beta2/zz_containerservice_types.go b/apis/lightsail/v1beta2/zz_containerservice_types.go new file mode 100755 index 0000000000..326398a33e --- /dev/null +++ b/apis/lightsail/v1beta2/zz_containerservice_types.go @@ -0,0 +1,298 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CertificateInitParameters struct { + + // The name for the container service. Names must be of length 1 to 63, and be + // unique within each AWS Region in your Lightsail account. + CertificateName *string `json:"certificateName,omitempty" tf:"certificate_name,omitempty"` + + DomainNames []*string `json:"domainNames,omitempty" tf:"domain_names,omitempty"` +} + +type CertificateObservation struct { + + // The name for the container service. Names must be of length 1 to 63, and be + // unique within each AWS Region in your Lightsail account. + CertificateName *string `json:"certificateName,omitempty" tf:"certificate_name,omitempty"` + + DomainNames []*string `json:"domainNames,omitempty" tf:"domain_names,omitempty"` +} + +type CertificateParameters struct { + + // The name for the container service. Names must be of length 1 to 63, and be + // unique within each AWS Region in your Lightsail account. + // +kubebuilder:validation:Optional + CertificateName *string `json:"certificateName" tf:"certificate_name,omitempty"` + + // +kubebuilder:validation:Optional + DomainNames []*string `json:"domainNames" tf:"domain_names,omitempty"` +} + +type ContainerServiceInitParameters struct { + + // A Boolean value indicating whether the container service is disabled. Defaults to false. + IsDisabled *bool `json:"isDisabled,omitempty" tf:"is_disabled,omitempty"` + + // The power specification for the container service. The power specifies the amount of memory, + // the number of vCPUs, and the monthly price of each node of the container service. + // Possible values: nano, micro, small, medium, large, xlarge. + Power *string `json:"power,omitempty" tf:"power,omitempty"` + + // An object to describe the configuration for the container service to access private container image repositories, such as Amazon Elastic Container Registry (Amazon ECR) private repositories. See Private Registry Access below for more details. + PrivateRegistryAccess *PrivateRegistryAccessInitParameters `json:"privateRegistryAccess,omitempty" tf:"private_registry_access,omitempty"` + + // The public domain names to use with the container service, such as example.com + // and www.example.com. You can specify up to four public domain names for a container service. The domain names that you + // specify are used when you create a deployment with a container configured as the public endpoint of your container + // service. If you don't specify public domain names, then you can use the default domain of the container service. + // Defined below. + PublicDomainNames *PublicDomainNamesInitParameters `json:"publicDomainNames,omitempty" tf:"public_domain_names,omitempty"` + + // The scale specification for the container service. The scale specifies the allocated compute + // nodes of the container service. + Scale *float64 `json:"scale,omitempty" tf:"scale,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ContainerServiceObservation struct { + + // The Amazon Resource Name (ARN) of the container service. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The Availability Zone. Follows the format us-east-2a (case-sensitive). + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Same as name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A Boolean value indicating whether the container service is disabled. Defaults to false. + IsDisabled *bool `json:"isDisabled,omitempty" tf:"is_disabled,omitempty"` + + // The power specification for the container service. The power specifies the amount of memory, + // the number of vCPUs, and the monthly price of each node of the container service. + // Possible values: nano, micro, small, medium, large, xlarge. + Power *string `json:"power,omitempty" tf:"power,omitempty"` + + // The ID of the power of the container service. + PowerID *string `json:"powerId,omitempty" tf:"power_id,omitempty"` + + // The principal ARN of the container service. The principal ARN can be used to create a trust + // relationship between your standard AWS account and your Lightsail container service. This allows you to give your + // service permission to access resources in your standard AWS account. + PrincipalArn *string `json:"principalArn,omitempty" tf:"principal_arn,omitempty"` + + // The private domain name of the container service. The private domain name is accessible only + // by other resources within the default virtual private cloud (VPC) of your Lightsail account. + PrivateDomainName *string `json:"privateDomainName,omitempty" tf:"private_domain_name,omitempty"` + + // An object to describe the configuration for the container service to access private container image repositories, such as Amazon Elastic Container Registry (Amazon ECR) private repositories. See Private Registry Access below for more details. + PrivateRegistryAccess *PrivateRegistryAccessObservation `json:"privateRegistryAccess,omitempty" tf:"private_registry_access,omitempty"` + + // The public domain names to use with the container service, such as example.com + // and www.example.com. You can specify up to four public domain names for a container service. The domain names that you + // specify are used when you create a deployment with a container configured as the public endpoint of your container + // service. If you don't specify public domain names, then you can use the default domain of the container service. + // Defined below. + PublicDomainNames *PublicDomainNamesObservation `json:"publicDomainNames,omitempty" tf:"public_domain_names,omitempty"` + + // The Lightsail resource type of the container service (i.e., ContainerService). + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // The scale specification for the container service. The scale specifies the allocated compute + // nodes of the container service. + Scale *float64 `json:"scale,omitempty" tf:"scale,omitempty"` + + // The current state of the container service. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider + // default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The publicly accessible URL of the container service. If no public endpoint is specified in the + // currentDeployment, this URL returns a 404 response. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type ContainerServiceParameters struct { + + // A Boolean value indicating whether the container service is disabled. Defaults to false. + // +kubebuilder:validation:Optional + IsDisabled *bool `json:"isDisabled,omitempty" tf:"is_disabled,omitempty"` + + // The power specification for the container service. The power specifies the amount of memory, + // the number of vCPUs, and the monthly price of each node of the container service. + // Possible values: nano, micro, small, medium, large, xlarge. + // +kubebuilder:validation:Optional + Power *string `json:"power,omitempty" tf:"power,omitempty"` + + // An object to describe the configuration for the container service to access private container image repositories, such as Amazon Elastic Container Registry (Amazon ECR) private repositories. See Private Registry Access below for more details. + // +kubebuilder:validation:Optional + PrivateRegistryAccess *PrivateRegistryAccessParameters `json:"privateRegistryAccess,omitempty" tf:"private_registry_access,omitempty"` + + // The public domain names to use with the container service, such as example.com + // and www.example.com. You can specify up to four public domain names for a container service. The domain names that you + // specify are used when you create a deployment with a container configured as the public endpoint of your container + // service. If you don't specify public domain names, then you can use the default domain of the container service. + // Defined below. + // +kubebuilder:validation:Optional + PublicDomainNames *PublicDomainNamesParameters `json:"publicDomainNames,omitempty" tf:"public_domain_names,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The scale specification for the container service. The scale specifies the allocated compute + // nodes of the container service. + // +kubebuilder:validation:Optional + Scale *float64 `json:"scale,omitempty" tf:"scale,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type EcrImagePullerRoleInitParameters struct { + + // A Boolean value that indicates whether to activate the role. The default is false. + IsActive *bool `json:"isActive,omitempty" tf:"is_active,omitempty"` +} + +type EcrImagePullerRoleObservation struct { + + // A Boolean value that indicates whether to activate the role. The default is false. + IsActive *bool `json:"isActive,omitempty" tf:"is_active,omitempty"` + + // The principal ARN of the container service. The principal ARN can be used to create a trust + // relationship between your standard AWS account and your Lightsail container service. This allows you to give your + // service permission to access resources in your standard AWS account. + PrincipalArn *string `json:"principalArn,omitempty" tf:"principal_arn,omitempty"` +} + +type EcrImagePullerRoleParameters struct { + + // A Boolean value that indicates whether to activate the role. The default is false. + // +kubebuilder:validation:Optional + IsActive *bool `json:"isActive,omitempty" tf:"is_active,omitempty"` +} + +type PrivateRegistryAccessInitParameters struct { + + // Describes a request to configure an Amazon Lightsail container service to access private container image repositories, such as Amazon Elastic Container Registry (Amazon ECR) private repositories. See ECR Image Puller Role below for more details. + EcrImagePullerRole *EcrImagePullerRoleInitParameters `json:"ecrImagePullerRole,omitempty" tf:"ecr_image_puller_role,omitempty"` +} + +type PrivateRegistryAccessObservation struct { + + // Describes a request to configure an Amazon Lightsail container service to access private container image repositories, such as Amazon Elastic Container Registry (Amazon ECR) private repositories. See ECR Image Puller Role below for more details. + EcrImagePullerRole *EcrImagePullerRoleObservation `json:"ecrImagePullerRole,omitempty" tf:"ecr_image_puller_role,omitempty"` +} + +type PrivateRegistryAccessParameters struct { + + // Describes a request to configure an Amazon Lightsail container service to access private container image repositories, such as Amazon Elastic Container Registry (Amazon ECR) private repositories. See ECR Image Puller Role below for more details. + // +kubebuilder:validation:Optional + EcrImagePullerRole *EcrImagePullerRoleParameters `json:"ecrImagePullerRole,omitempty" tf:"ecr_image_puller_role,omitempty"` +} + +type PublicDomainNamesInitParameters struct { + Certificate []CertificateInitParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` +} + +type PublicDomainNamesObservation struct { + Certificate []CertificateObservation `json:"certificate,omitempty" tf:"certificate,omitempty"` +} + +type PublicDomainNamesParameters struct { + + // +kubebuilder:validation:Optional + Certificate []CertificateParameters `json:"certificate" tf:"certificate,omitempty"` +} + +// ContainerServiceSpec defines the desired state of ContainerService +type ContainerServiceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ContainerServiceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ContainerServiceInitParameters `json:"initProvider,omitempty"` +} + +// ContainerServiceStatus defines the observed state of ContainerService. +type ContainerServiceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ContainerServiceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ContainerService is the Schema for the ContainerServices API. Provides a resource to manage Lightsail container service +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ContainerService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.power) || (has(self.initProvider) && has(self.initProvider.power))",message="spec.forProvider.power is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scale) || (has(self.initProvider) && has(self.initProvider.scale))",message="spec.forProvider.scale is a required parameter" + Spec ContainerServiceSpec `json:"spec"` + Status ContainerServiceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ContainerServiceList contains a list of ContainerServices +type ContainerServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ContainerService `json:"items"` +} + +// Repository type metadata. +var ( + ContainerService_Kind = "ContainerService" + ContainerService_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ContainerService_Kind}.String() + ContainerService_KindAPIVersion = ContainerService_Kind + "." + CRDGroupVersion.String() + ContainerService_GroupVersionKind = CRDGroupVersion.WithKind(ContainerService_Kind) +) + +func init() { + SchemeBuilder.Register(&ContainerService{}, &ContainerServiceList{}) +} diff --git a/apis/lightsail/v1beta2/zz_generated.conversion_hubs.go b/apis/lightsail/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..c271900691 --- /dev/null +++ b/apis/lightsail/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ContainerService) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Instance) Hub() {} diff --git a/apis/lightsail/v1beta2/zz_generated.deepcopy.go b/apis/lightsail/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..8c62c4e87f --- /dev/null +++ b/apis/lightsail/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1089 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddOnInitParameters) DeepCopyInto(out *AddOnInitParameters) { + *out = *in + if in.SnapshotTime != nil { + in, out := &in.SnapshotTime, &out.SnapshotTime + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnInitParameters. +func (in *AddOnInitParameters) DeepCopy() *AddOnInitParameters { + if in == nil { + return nil + } + out := new(AddOnInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddOnObservation) DeepCopyInto(out *AddOnObservation) { + *out = *in + if in.SnapshotTime != nil { + in, out := &in.SnapshotTime, &out.SnapshotTime + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnObservation. +func (in *AddOnObservation) DeepCopy() *AddOnObservation { + if in == nil { + return nil + } + out := new(AddOnObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddOnParameters) DeepCopyInto(out *AddOnParameters) { + *out = *in + if in.SnapshotTime != nil { + in, out := &in.SnapshotTime, &out.SnapshotTime + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnParameters. +func (in *AddOnParameters) DeepCopy() *AddOnParameters { + if in == nil { + return nil + } + out := new(AddOnParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateInitParameters) DeepCopyInto(out *CertificateInitParameters) { + *out = *in + if in.CertificateName != nil { + in, out := &in.CertificateName, &out.CertificateName + *out = new(string) + **out = **in + } + if in.DomainNames != nil { + in, out := &in.DomainNames, &out.DomainNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateInitParameters. +func (in *CertificateInitParameters) DeepCopy() *CertificateInitParameters { + if in == nil { + return nil + } + out := new(CertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateObservation) DeepCopyInto(out *CertificateObservation) { + *out = *in + if in.CertificateName != nil { + in, out := &in.CertificateName, &out.CertificateName + *out = new(string) + **out = **in + } + if in.DomainNames != nil { + in, out := &in.DomainNames, &out.DomainNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateObservation. +func (in *CertificateObservation) DeepCopy() *CertificateObservation { + if in == nil { + return nil + } + out := new(CertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateParameters) DeepCopyInto(out *CertificateParameters) { + *out = *in + if in.CertificateName != nil { + in, out := &in.CertificateName, &out.CertificateName + *out = new(string) + **out = **in + } + if in.DomainNames != nil { + in, out := &in.DomainNames, &out.DomainNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateParameters. +func (in *CertificateParameters) DeepCopy() *CertificateParameters { + if in == nil { + return nil + } + out := new(CertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerService) DeepCopyInto(out *ContainerService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerService. +func (in *ContainerService) DeepCopy() *ContainerService { + if in == nil { + return nil + } + out := new(ContainerService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ContainerService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerServiceInitParameters) DeepCopyInto(out *ContainerServiceInitParameters) { + *out = *in + if in.IsDisabled != nil { + in, out := &in.IsDisabled, &out.IsDisabled + *out = new(bool) + **out = **in + } + if in.Power != nil { + in, out := &in.Power, &out.Power + *out = new(string) + **out = **in + } + if in.PrivateRegistryAccess != nil { + in, out := &in.PrivateRegistryAccess, &out.PrivateRegistryAccess + *out = new(PrivateRegistryAccessInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicDomainNames != nil { + in, out := &in.PublicDomainNames, &out.PublicDomainNames + *out = new(PublicDomainNamesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Scale != nil { + in, out := &in.Scale, &out.Scale + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerServiceInitParameters. +func (in *ContainerServiceInitParameters) DeepCopy() *ContainerServiceInitParameters { + if in == nil { + return nil + } + out := new(ContainerServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerServiceList) DeepCopyInto(out *ContainerServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ContainerService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerServiceList. +func (in *ContainerServiceList) DeepCopy() *ContainerServiceList { + if in == nil { + return nil + } + out := new(ContainerServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ContainerServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerServiceObservation) DeepCopyInto(out *ContainerServiceObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IsDisabled != nil { + in, out := &in.IsDisabled, &out.IsDisabled + *out = new(bool) + **out = **in + } + if in.Power != nil { + in, out := &in.Power, &out.Power + *out = new(string) + **out = **in + } + if in.PowerID != nil { + in, out := &in.PowerID, &out.PowerID + *out = new(string) + **out = **in + } + if in.PrincipalArn != nil { + in, out := &in.PrincipalArn, &out.PrincipalArn + *out = new(string) + **out = **in + } + if in.PrivateDomainName != nil { + in, out := &in.PrivateDomainName, &out.PrivateDomainName + *out = new(string) + **out = **in + } + if in.PrivateRegistryAccess != nil { + in, out := &in.PrivateRegistryAccess, &out.PrivateRegistryAccess + *out = new(PrivateRegistryAccessObservation) + (*in).DeepCopyInto(*out) + } + if in.PublicDomainNames != nil { + in, out := &in.PublicDomainNames, &out.PublicDomainNames + *out = new(PublicDomainNamesObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } + if in.Scale != nil { + in, out := &in.Scale, &out.Scale + *out = new(float64) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerServiceObservation. +func (in *ContainerServiceObservation) DeepCopy() *ContainerServiceObservation { + if in == nil { + return nil + } + out := new(ContainerServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerServiceParameters) DeepCopyInto(out *ContainerServiceParameters) { + *out = *in + if in.IsDisabled != nil { + in, out := &in.IsDisabled, &out.IsDisabled + *out = new(bool) + **out = **in + } + if in.Power != nil { + in, out := &in.Power, &out.Power + *out = new(string) + **out = **in + } + if in.PrivateRegistryAccess != nil { + in, out := &in.PrivateRegistryAccess, &out.PrivateRegistryAccess + *out = new(PrivateRegistryAccessParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicDomainNames != nil { + in, out := &in.PublicDomainNames, &out.PublicDomainNames + *out = new(PublicDomainNamesParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Scale != nil { + in, out := &in.Scale, &out.Scale + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerServiceParameters. +func (in *ContainerServiceParameters) DeepCopy() *ContainerServiceParameters { + if in == nil { + return nil + } + out := new(ContainerServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerServiceSpec) DeepCopyInto(out *ContainerServiceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerServiceSpec. +func (in *ContainerServiceSpec) DeepCopy() *ContainerServiceSpec { + if in == nil { + return nil + } + out := new(ContainerServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerServiceStatus) DeepCopyInto(out *ContainerServiceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerServiceStatus. +func (in *ContainerServiceStatus) DeepCopy() *ContainerServiceStatus { + if in == nil { + return nil + } + out := new(ContainerServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcrImagePullerRoleInitParameters) DeepCopyInto(out *EcrImagePullerRoleInitParameters) { + *out = *in + if in.IsActive != nil { + in, out := &in.IsActive, &out.IsActive + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcrImagePullerRoleInitParameters. +func (in *EcrImagePullerRoleInitParameters) DeepCopy() *EcrImagePullerRoleInitParameters { + if in == nil { + return nil + } + out := new(EcrImagePullerRoleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcrImagePullerRoleObservation) DeepCopyInto(out *EcrImagePullerRoleObservation) { + *out = *in + if in.IsActive != nil { + in, out := &in.IsActive, &out.IsActive + *out = new(bool) + **out = **in + } + if in.PrincipalArn != nil { + in, out := &in.PrincipalArn, &out.PrincipalArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcrImagePullerRoleObservation. +func (in *EcrImagePullerRoleObservation) DeepCopy() *EcrImagePullerRoleObservation { + if in == nil { + return nil + } + out := new(EcrImagePullerRoleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcrImagePullerRoleParameters) DeepCopyInto(out *EcrImagePullerRoleParameters) { + *out = *in + if in.IsActive != nil { + in, out := &in.IsActive, &out.IsActive + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcrImagePullerRoleParameters. +func (in *EcrImagePullerRoleParameters) DeepCopy() *EcrImagePullerRoleParameters { + if in == nil { + return nil + } + out := new(EcrImagePullerRoleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Instance) DeepCopyInto(out *Instance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance. +func (in *Instance) DeepCopy() *Instance { + if in == nil { + return nil + } + out := new(Instance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Instance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceInitParameters) DeepCopyInto(out *InstanceInitParameters) { + *out = *in + if in.AddOn != nil { + in, out := &in.AddOn, &out.AddOn + *out = new(AddOnInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.BlueprintID != nil { + in, out := &in.BlueprintID, &out.BlueprintID + *out = new(string) + **out = **in + } + if in.BundleID != nil { + in, out := &in.BundleID, &out.BundleID + *out = new(string) + **out = **in + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.KeyPairName != nil { + in, out := &in.KeyPairName, &out.KeyPairName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceInitParameters. +func (in *InstanceInitParameters) DeepCopy() *InstanceInitParameters { + if in == nil { + return nil + } + out := new(InstanceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceList) DeepCopyInto(out *InstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Instance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceList. +func (in *InstanceList) DeepCopy() *InstanceList { + if in == nil { + return nil + } + out := new(InstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceObservation) DeepCopyInto(out *InstanceObservation) { + *out = *in + if in.AddOn != nil { + in, out := &in.AddOn, &out.AddOn + *out = new(AddOnObservation) + (*in).DeepCopyInto(*out) + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.BlueprintID != nil { + in, out := &in.BlueprintID, &out.BlueprintID + *out = new(string) + **out = **in + } + if in.BundleID != nil { + in, out := &in.BundleID, &out.BundleID + *out = new(string) + **out = **in + } + if in.CPUCount != nil { + in, out := &in.CPUCount, &out.CPUCount + *out = new(float64) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.IPv6Addresses != nil { + in, out := &in.IPv6Addresses, &out.IPv6Addresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IsStaticIP != nil { + in, out := &in.IsStaticIP, &out.IsStaticIP + *out = new(bool) + **out = **in + } + if in.KeyPairName != nil { + in, out := &in.KeyPairName, &out.KeyPairName + *out = new(string) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.PublicIPAddress != nil { + in, out := &in.PublicIPAddress, &out.PublicIPAddress + *out = new(string) + **out = **in + } + if in.RAMSize != nil { + in, out := &in.RAMSize, &out.RAMSize + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceObservation. +func (in *InstanceObservation) DeepCopy() *InstanceObservation { + if in == nil { + return nil + } + out := new(InstanceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceParameters) DeepCopyInto(out *InstanceParameters) { + *out = *in + if in.AddOn != nil { + in, out := &in.AddOn, &out.AddOn + *out = new(AddOnParameters) + (*in).DeepCopyInto(*out) + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.BlueprintID != nil { + in, out := &in.BlueprintID, &out.BlueprintID + *out = new(string) + **out = **in + } + if in.BundleID != nil { + in, out := &in.BundleID, &out.BundleID + *out = new(string) + **out = **in + } + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.KeyPairName != nil { + in, out := &in.KeyPairName, &out.KeyPairName + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceParameters. +func (in *InstanceParameters) DeepCopy() *InstanceParameters { + if in == nil { + return nil + } + out := new(InstanceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceSpec) DeepCopyInto(out *InstanceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceSpec. +func (in *InstanceSpec) DeepCopy() *InstanceSpec { + if in == nil { + return nil + } + out := new(InstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceStatus) DeepCopyInto(out *InstanceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceStatus. +func (in *InstanceStatus) DeepCopy() *InstanceStatus { + if in == nil { + return nil + } + out := new(InstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateRegistryAccessInitParameters) DeepCopyInto(out *PrivateRegistryAccessInitParameters) { + *out = *in + if in.EcrImagePullerRole != nil { + in, out := &in.EcrImagePullerRole, &out.EcrImagePullerRole + *out = new(EcrImagePullerRoleInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateRegistryAccessInitParameters. +func (in *PrivateRegistryAccessInitParameters) DeepCopy() *PrivateRegistryAccessInitParameters { + if in == nil { + return nil + } + out := new(PrivateRegistryAccessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateRegistryAccessObservation) DeepCopyInto(out *PrivateRegistryAccessObservation) { + *out = *in + if in.EcrImagePullerRole != nil { + in, out := &in.EcrImagePullerRole, &out.EcrImagePullerRole + *out = new(EcrImagePullerRoleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateRegistryAccessObservation. +func (in *PrivateRegistryAccessObservation) DeepCopy() *PrivateRegistryAccessObservation { + if in == nil { + return nil + } + out := new(PrivateRegistryAccessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateRegistryAccessParameters) DeepCopyInto(out *PrivateRegistryAccessParameters) { + *out = *in + if in.EcrImagePullerRole != nil { + in, out := &in.EcrImagePullerRole, &out.EcrImagePullerRole + *out = new(EcrImagePullerRoleParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateRegistryAccessParameters. +func (in *PrivateRegistryAccessParameters) DeepCopy() *PrivateRegistryAccessParameters { + if in == nil { + return nil + } + out := new(PrivateRegistryAccessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicDomainNamesInitParameters) DeepCopyInto(out *PublicDomainNamesInitParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]CertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicDomainNamesInitParameters. +func (in *PublicDomainNamesInitParameters) DeepCopy() *PublicDomainNamesInitParameters { + if in == nil { + return nil + } + out := new(PublicDomainNamesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicDomainNamesObservation) DeepCopyInto(out *PublicDomainNamesObservation) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]CertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicDomainNamesObservation. +func (in *PublicDomainNamesObservation) DeepCopy() *PublicDomainNamesObservation { + if in == nil { + return nil + } + out := new(PublicDomainNamesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicDomainNamesParameters) DeepCopyInto(out *PublicDomainNamesParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]CertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicDomainNamesParameters. +func (in *PublicDomainNamesParameters) DeepCopy() *PublicDomainNamesParameters { + if in == nil { + return nil + } + out := new(PublicDomainNamesParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/lightsail/v1beta2/zz_generated.managed.go b/apis/lightsail/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..5ffe953782 --- /dev/null +++ b/apis/lightsail/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ContainerService. +func (mg *ContainerService) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ContainerService. +func (mg *ContainerService) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ContainerService. +func (mg *ContainerService) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ContainerService. +func (mg *ContainerService) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ContainerService. +func (mg *ContainerService) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ContainerService. +func (mg *ContainerService) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ContainerService. +func (mg *ContainerService) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ContainerService. +func (mg *ContainerService) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ContainerService. +func (mg *ContainerService) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ContainerService. +func (mg *ContainerService) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ContainerService. +func (mg *ContainerService) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ContainerService. +func (mg *ContainerService) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Instance. +func (mg *Instance) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Instance. +func (mg *Instance) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Instance. +func (mg *Instance) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Instance. +func (mg *Instance) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Instance. +func (mg *Instance) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Instance. +func (mg *Instance) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Instance. +func (mg *Instance) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Instance. +func (mg *Instance) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Instance. +func (mg *Instance) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Instance. +func (mg *Instance) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Instance. +func (mg *Instance) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Instance. +func (mg *Instance) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/lightsail/v1beta2/zz_generated.managedlist.go b/apis/lightsail/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..877c85f30b --- /dev/null +++ b/apis/lightsail/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ContainerServiceList. +func (l *ContainerServiceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this InstanceList. +func (l *InstanceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/lightsail/v1beta2/zz_groupversion_info.go b/apis/lightsail/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..8cb4f99923 --- /dev/null +++ b/apis/lightsail/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=lightsail.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "lightsail.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/lightsail/v1beta2/zz_instance_terraformed.go b/apis/lightsail/v1beta2/zz_instance_terraformed.go new file mode 100755 index 0000000000..b975b58725 --- /dev/null +++ b/apis/lightsail/v1beta2/zz_instance_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Instance +func (mg *Instance) GetTerraformResourceType() string { + return "aws_lightsail_instance" +} + +// GetConnectionDetailsMapping for this Instance +func (tr *Instance) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Instance +func (tr *Instance) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Instance +func (tr *Instance) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Instance +func (tr *Instance) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Instance +func (tr *Instance) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Instance +func (tr *Instance) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Instance +func (tr *Instance) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Instance +func (tr *Instance) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Instance using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Instance) LateInitialize(attrs []byte) (bool, error) { + params := &InstanceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Instance) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/lightsail/v1beta2/zz_instance_types.go b/apis/lightsail/v1beta2/zz_instance_types.go new file mode 100755 index 0000000000..967ee23ed9 --- /dev/null +++ b/apis/lightsail/v1beta2/zz_instance_types.go @@ -0,0 +1,268 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AddOnInitParameters struct { + + // The daily time when an automatic snapshot will be created. Must be in HH:00 format, and in an hourly increment and specified in Coordinated Universal Time (UTC). The snapshot will be automatically created between the time specified and up to 45 minutes after. + SnapshotTime *string `json:"snapshotTime,omitempty" tf:"snapshot_time,omitempty"` + + // The status of the add on. Valid Values: Enabled, Disabled. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // The add-on type. There is currently only one valid type AutoSnapshot. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type AddOnObservation struct { + + // The daily time when an automatic snapshot will be created. Must be in HH:00 format, and in an hourly increment and specified in Coordinated Universal Time (UTC). The snapshot will be automatically created between the time specified and up to 45 minutes after. + SnapshotTime *string `json:"snapshotTime,omitempty" tf:"snapshot_time,omitempty"` + + // The status of the add on. Valid Values: Enabled, Disabled. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // The add-on type. There is currently only one valid type AutoSnapshot. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type AddOnParameters struct { + + // The daily time when an automatic snapshot will be created. Must be in HH:00 format, and in an hourly increment and specified in Coordinated Universal Time (UTC). The snapshot will be automatically created between the time specified and up to 45 minutes after. + // +kubebuilder:validation:Optional + SnapshotTime *string `json:"snapshotTime" tf:"snapshot_time,omitempty"` + + // The status of the add on. Valid Values: Enabled, Disabled. + // +kubebuilder:validation:Optional + Status *string `json:"status" tf:"status,omitempty"` + + // The add-on type. There is currently only one valid type AutoSnapshot. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type InstanceInitParameters struct { + + // The add on configuration for the instance. Detailed below. + AddOn *AddOnInitParameters `json:"addOn,omitempty" tf:"add_on,omitempty"` + + // The Availability Zone in which to create your instance. A + // list of available zones can be obtained using the AWS CLI command: + // aws lightsail get-regions --include-availability-zones. + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // The ID for a virtual private server image. A list of available + // blueprint IDs can be obtained using the AWS CLI command: + // aws lightsail get-blueprints. + BlueprintID *string `json:"blueprintId,omitempty" tf:"blueprint_id,omitempty"` + + // The bundle of specification information. A list of available + // bundle IDs can be obtained using the AWS CLI command: + // aws lightsail get-bundles. + BundleID *string `json:"bundleId,omitempty" tf:"bundle_id,omitempty"` + + // The IP address type of the Lightsail Instance. Valid Values: dualstack | ipv4. + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // The name of your key pair. Created in the + // Lightsail console (cannot use aws_key_pair at this time) + KeyPairName *string `json:"keyPairName,omitempty" tf:"key_pair_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Single lined launch script as a string to configure server with additional user data + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` +} + +type InstanceObservation struct { + + // The add on configuration for the instance. Detailed below. + AddOn *AddOnObservation `json:"addOn,omitempty" tf:"add_on,omitempty"` + + // The ARN of the Lightsail instance (matches id). + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The Availability Zone in which to create your instance. A + // list of available zones can be obtained using the AWS CLI command: + // aws lightsail get-regions --include-availability-zones. + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // The ID for a virtual private server image. A list of available + // blueprint IDs can be obtained using the AWS CLI command: + // aws lightsail get-blueprints. + BlueprintID *string `json:"blueprintId,omitempty" tf:"blueprint_id,omitempty"` + + // The bundle of specification information. A list of available + // bundle IDs can be obtained using the AWS CLI command: + // aws lightsail get-bundles. + BundleID *string `json:"bundleId,omitempty" tf:"bundle_id,omitempty"` + + // The number of vCPUs the instance has. + CPUCount *float64 `json:"cpuCount,omitempty" tf:"cpu_count,omitempty"` + + // The timestamp when the instance was created. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // The ARN of the Lightsail instance (matches arn). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The IP address type of the Lightsail Instance. Valid Values: dualstack | ipv4. + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // List of IPv6 addresses for the Lightsail instance. + IPv6Addresses []*string `json:"ipv6Addresses,omitempty" tf:"ipv6_addresses,omitempty"` + + // A Boolean value indicating whether this instance has a static IP assigned to it. + IsStaticIP *bool `json:"isStaticIp,omitempty" tf:"is_static_ip,omitempty"` + + // The name of your key pair. Created in the + // Lightsail console (cannot use aws_key_pair at this time) + KeyPairName *string `json:"keyPairName,omitempty" tf:"key_pair_name,omitempty"` + + // The private IP address of the instance. + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // The public IP address of the instance. + PublicIPAddress *string `json:"publicIpAddress,omitempty" tf:"public_ip_address,omitempty"` + + // The amount of RAM in GB on the instance (e.g., 1.0). + RAMSize *float64 `json:"ramSize,omitempty" tf:"ram_size,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Single lined launch script as a string to configure server with additional user data + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // The user name for connecting to the instance (e.g., ec2-user). + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type InstanceParameters struct { + + // The add on configuration for the instance. Detailed below. + // +kubebuilder:validation:Optional + AddOn *AddOnParameters `json:"addOn,omitempty" tf:"add_on,omitempty"` + + // The Availability Zone in which to create your instance. A + // list of available zones can be obtained using the AWS CLI command: + // aws lightsail get-regions --include-availability-zones. + // +kubebuilder:validation:Optional + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // The ID for a virtual private server image. A list of available + // blueprint IDs can be obtained using the AWS CLI command: + // aws lightsail get-blueprints. + // +kubebuilder:validation:Optional + BlueprintID *string `json:"blueprintId,omitempty" tf:"blueprint_id,omitempty"` + + // The bundle of specification information. A list of available + // bundle IDs can be obtained using the AWS CLI command: + // aws lightsail get-bundles. + // +kubebuilder:validation:Optional + BundleID *string `json:"bundleId,omitempty" tf:"bundle_id,omitempty"` + + // The IP address type of the Lightsail Instance. Valid Values: dualstack | ipv4. + // +kubebuilder:validation:Optional + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // The name of your key pair. Created in the + // Lightsail console (cannot use aws_key_pair at this time) + // +kubebuilder:validation:Optional + KeyPairName *string `json:"keyPairName,omitempty" tf:"key_pair_name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Single lined launch script as a string to configure server with additional user data + // +kubebuilder:validation:Optional + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` +} + +// InstanceSpec defines the desired state of Instance +type InstanceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider InstanceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider InstanceInitParameters `json:"initProvider,omitempty"` +} + +// InstanceStatus defines the observed state of Instance. +type InstanceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider InstanceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Instance is the Schema for the Instances API. Provides an Lightsail Instance +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Instance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.availabilityZone) || (has(self.initProvider) && has(self.initProvider.availabilityZone))",message="spec.forProvider.availabilityZone is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.blueprintId) || (has(self.initProvider) && has(self.initProvider.blueprintId))",message="spec.forProvider.blueprintId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.bundleId) || (has(self.initProvider) && has(self.initProvider.bundleId))",message="spec.forProvider.bundleId is a required parameter" + Spec InstanceSpec `json:"spec"` + Status InstanceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// InstanceList contains a list of Instances +type InstanceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Instance `json:"items"` +} + +// Repository type metadata. +var ( + Instance_Kind = "Instance" + Instance_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Instance_Kind}.String() + Instance_KindAPIVersion = Instance_Kind + "." + CRDGroupVersion.String() + Instance_GroupVersionKind = CRDGroupVersion.WithKind(Instance_Kind) +) + +func init() { + SchemeBuilder.Register(&Instance{}, &InstanceList{}) +} diff --git a/apis/location/v1beta1/zz_generated.conversion_hubs.go b/apis/location/v1beta1/zz_generated.conversion_hubs.go index e32613d154..f7bafd8a69 100755 --- a/apis/location/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/location/v1beta1/zz_generated.conversion_hubs.go @@ -9,9 +9,6 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *GeofenceCollection) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *PlaceIndex) Hub() {} - // Hub marks this type as a conversion hub. func (tr *RouteCalculator) Hub() {} diff --git a/apis/location/v1beta1/zz_generated.conversion_spokes.go b/apis/location/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..02e764aff0 --- /dev/null +++ b/apis/location/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this PlaceIndex to the hub type. +func (tr *PlaceIndex) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the PlaceIndex type. +func (tr *PlaceIndex) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/location/v1beta2/zz_generated.conversion_hubs.go b/apis/location/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..ba7f3d1a80 --- /dev/null +++ b/apis/location/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *PlaceIndex) Hub() {} diff --git a/apis/location/v1beta2/zz_generated.deepcopy.go b/apis/location/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..7ae84a40bd --- /dev/null +++ b/apis/location/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,346 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourceConfigurationInitParameters) DeepCopyInto(out *DataSourceConfigurationInitParameters) { + *out = *in + if in.IntendedUse != nil { + in, out := &in.IntendedUse, &out.IntendedUse + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceConfigurationInitParameters. +func (in *DataSourceConfigurationInitParameters) DeepCopy() *DataSourceConfigurationInitParameters { + if in == nil { + return nil + } + out := new(DataSourceConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourceConfigurationObservation) DeepCopyInto(out *DataSourceConfigurationObservation) { + *out = *in + if in.IntendedUse != nil { + in, out := &in.IntendedUse, &out.IntendedUse + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceConfigurationObservation. +func (in *DataSourceConfigurationObservation) DeepCopy() *DataSourceConfigurationObservation { + if in == nil { + return nil + } + out := new(DataSourceConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourceConfigurationParameters) DeepCopyInto(out *DataSourceConfigurationParameters) { + *out = *in + if in.IntendedUse != nil { + in, out := &in.IntendedUse, &out.IntendedUse + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceConfigurationParameters. +func (in *DataSourceConfigurationParameters) DeepCopy() *DataSourceConfigurationParameters { + if in == nil { + return nil + } + out := new(DataSourceConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlaceIndex) DeepCopyInto(out *PlaceIndex) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlaceIndex. +func (in *PlaceIndex) DeepCopy() *PlaceIndex { + if in == nil { + return nil + } + out := new(PlaceIndex) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlaceIndex) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlaceIndexInitParameters) DeepCopyInto(out *PlaceIndexInitParameters) { + *out = *in + if in.DataSource != nil { + in, out := &in.DataSource, &out.DataSource + *out = new(string) + **out = **in + } + if in.DataSourceConfiguration != nil { + in, out := &in.DataSourceConfiguration, &out.DataSourceConfiguration + *out = new(DataSourceConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlaceIndexInitParameters. +func (in *PlaceIndexInitParameters) DeepCopy() *PlaceIndexInitParameters { + if in == nil { + return nil + } + out := new(PlaceIndexInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlaceIndexList) DeepCopyInto(out *PlaceIndexList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PlaceIndex, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlaceIndexList. +func (in *PlaceIndexList) DeepCopy() *PlaceIndexList { + if in == nil { + return nil + } + out := new(PlaceIndexList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlaceIndexList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlaceIndexObservation) DeepCopyInto(out *PlaceIndexObservation) { + *out = *in + if in.CreateTime != nil { + in, out := &in.CreateTime, &out.CreateTime + *out = new(string) + **out = **in + } + if in.DataSource != nil { + in, out := &in.DataSource, &out.DataSource + *out = new(string) + **out = **in + } + if in.DataSourceConfiguration != nil { + in, out := &in.DataSourceConfiguration, &out.DataSourceConfiguration + *out = new(DataSourceConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IndexArn != nil { + in, out := &in.IndexArn, &out.IndexArn + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UpdateTime != nil { + in, out := &in.UpdateTime, &out.UpdateTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlaceIndexObservation. +func (in *PlaceIndexObservation) DeepCopy() *PlaceIndexObservation { + if in == nil { + return nil + } + out := new(PlaceIndexObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlaceIndexParameters) DeepCopyInto(out *PlaceIndexParameters) { + *out = *in + if in.DataSource != nil { + in, out := &in.DataSource, &out.DataSource + *out = new(string) + **out = **in + } + if in.DataSourceConfiguration != nil { + in, out := &in.DataSourceConfiguration, &out.DataSourceConfiguration + *out = new(DataSourceConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlaceIndexParameters. +func (in *PlaceIndexParameters) DeepCopy() *PlaceIndexParameters { + if in == nil { + return nil + } + out := new(PlaceIndexParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlaceIndexSpec) DeepCopyInto(out *PlaceIndexSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlaceIndexSpec. +func (in *PlaceIndexSpec) DeepCopy() *PlaceIndexSpec { + if in == nil { + return nil + } + out := new(PlaceIndexSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlaceIndexStatus) DeepCopyInto(out *PlaceIndexStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlaceIndexStatus. +func (in *PlaceIndexStatus) DeepCopy() *PlaceIndexStatus { + if in == nil { + return nil + } + out := new(PlaceIndexStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/location/v1beta2/zz_generated.managed.go b/apis/location/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..c7e313f168 --- /dev/null +++ b/apis/location/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this PlaceIndex. +func (mg *PlaceIndex) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this PlaceIndex. +func (mg *PlaceIndex) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this PlaceIndex. +func (mg *PlaceIndex) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this PlaceIndex. +func (mg *PlaceIndex) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this PlaceIndex. +func (mg *PlaceIndex) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this PlaceIndex. +func (mg *PlaceIndex) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this PlaceIndex. +func (mg *PlaceIndex) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this PlaceIndex. +func (mg *PlaceIndex) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this PlaceIndex. +func (mg *PlaceIndex) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this PlaceIndex. +func (mg *PlaceIndex) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this PlaceIndex. +func (mg *PlaceIndex) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this PlaceIndex. +func (mg *PlaceIndex) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/location/v1beta2/zz_generated.managedlist.go b/apis/location/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..93da910778 --- /dev/null +++ b/apis/location/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this PlaceIndexList. +func (l *PlaceIndexList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/location/v1beta2/zz_groupversion_info.go b/apis/location/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..4a4840627d --- /dev/null +++ b/apis/location/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=location.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "location.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/location/v1beta2/zz_placeindex_terraformed.go b/apis/location/v1beta2/zz_placeindex_terraformed.go new file mode 100755 index 0000000000..15dffe39a5 --- /dev/null +++ b/apis/location/v1beta2/zz_placeindex_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this PlaceIndex +func (mg *PlaceIndex) GetTerraformResourceType() string { + return "aws_location_place_index" +} + +// GetConnectionDetailsMapping for this PlaceIndex +func (tr *PlaceIndex) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this PlaceIndex +func (tr *PlaceIndex) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this PlaceIndex +func (tr *PlaceIndex) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this PlaceIndex +func (tr *PlaceIndex) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this PlaceIndex +func (tr *PlaceIndex) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this PlaceIndex +func (tr *PlaceIndex) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this PlaceIndex +func (tr *PlaceIndex) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this PlaceIndex +func (tr *PlaceIndex) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this PlaceIndex using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *PlaceIndex) LateInitialize(attrs []byte) (bool, error) { + params := &PlaceIndexParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *PlaceIndex) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/location/v1beta2/zz_placeindex_types.go b/apis/location/v1beta2/zz_placeindex_types.go new file mode 100755 index 0000000000..b0eaefc042 --- /dev/null +++ b/apis/location/v1beta2/zz_placeindex_types.go @@ -0,0 +1,166 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DataSourceConfigurationInitParameters struct { + + // Specifies how the results of an operation will be stored by the caller. Valid values: SingleUse, Storage. Default: SingleUse. + IntendedUse *string `json:"intendedUse,omitempty" tf:"intended_use,omitempty"` +} + +type DataSourceConfigurationObservation struct { + + // Specifies how the results of an operation will be stored by the caller. Valid values: SingleUse, Storage. Default: SingleUse. + IntendedUse *string `json:"intendedUse,omitempty" tf:"intended_use,omitempty"` +} + +type DataSourceConfigurationParameters struct { + + // Specifies how the results of an operation will be stored by the caller. Valid values: SingleUse, Storage. Default: SingleUse. + // +kubebuilder:validation:Optional + IntendedUse *string `json:"intendedUse,omitempty" tf:"intended_use,omitempty"` +} + +type PlaceIndexInitParameters struct { + + // Specifies the geospatial data provider for the new place index. + DataSource *string `json:"dataSource,omitempty" tf:"data_source,omitempty"` + + // Configuration block with the data storage option chosen for requesting Places. Detailed below. + DataSourceConfiguration *DataSourceConfigurationInitParameters `json:"dataSourceConfiguration,omitempty" tf:"data_source_configuration,omitempty"` + + // The optional description for the place index resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type PlaceIndexObservation struct { + + // The timestamp for when the place index resource was created in ISO 8601 format. + CreateTime *string `json:"createTime,omitempty" tf:"create_time,omitempty"` + + // Specifies the geospatial data provider for the new place index. + DataSource *string `json:"dataSource,omitempty" tf:"data_source,omitempty"` + + // Configuration block with the data storage option chosen for requesting Places. Detailed below. + DataSourceConfiguration *DataSourceConfigurationObservation `json:"dataSourceConfiguration,omitempty" tf:"data_source_configuration,omitempty"` + + // The optional description for the place index resource. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Amazon Resource Name (ARN) for the place index resource. Used to specify a resource across AWS. + IndexArn *string `json:"indexArn,omitempty" tf:"index_arn,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The timestamp for when the place index resource was last update in ISO 8601. + UpdateTime *string `json:"updateTime,omitempty" tf:"update_time,omitempty"` +} + +type PlaceIndexParameters struct { + + // Specifies the geospatial data provider for the new place index. + // +kubebuilder:validation:Optional + DataSource *string `json:"dataSource,omitempty" tf:"data_source,omitempty"` + + // Configuration block with the data storage option chosen for requesting Places. Detailed below. + // +kubebuilder:validation:Optional + DataSourceConfiguration *DataSourceConfigurationParameters `json:"dataSourceConfiguration,omitempty" tf:"data_source_configuration,omitempty"` + + // The optional description for the place index resource. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// PlaceIndexSpec defines the desired state of PlaceIndex +type PlaceIndexSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PlaceIndexParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PlaceIndexInitParameters `json:"initProvider,omitempty"` +} + +// PlaceIndexStatus defines the observed state of PlaceIndex. +type PlaceIndexStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PlaceIndexObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// PlaceIndex is the Schema for the PlaceIndexs API. Provides a Location Service Place Index. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type PlaceIndex struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.dataSource) || (has(self.initProvider) && has(self.initProvider.dataSource))",message="spec.forProvider.dataSource is a required parameter" + Spec PlaceIndexSpec `json:"spec"` + Status PlaceIndexStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PlaceIndexList contains a list of PlaceIndexs +type PlaceIndexList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PlaceIndex `json:"items"` +} + +// Repository type metadata. +var ( + PlaceIndex_Kind = "PlaceIndex" + PlaceIndex_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: PlaceIndex_Kind}.String() + PlaceIndex_KindAPIVersion = PlaceIndex_Kind + "." + CRDGroupVersion.String() + PlaceIndex_GroupVersionKind = CRDGroupVersion.WithKind(PlaceIndex_Kind) +) + +func init() { + SchemeBuilder.Register(&PlaceIndex{}, &PlaceIndexList{}) +} diff --git a/apis/macie2/v1beta1/zz_generated.conversion_hubs.go b/apis/macie2/v1beta1/zz_generated.conversion_hubs.go index 07e993b32f..dd3082d232 100755 --- a/apis/macie2/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/macie2/v1beta1/zz_generated.conversion_hubs.go @@ -9,15 +9,9 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *Account) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ClassificationJob) Hub() {} - // Hub marks this type as a conversion hub. func (tr *CustomDataIdentifier) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *FindingsFilter) Hub() {} - // Hub marks this type as a conversion hub. func (tr *InvitationAccepter) Hub() {} diff --git a/apis/macie2/v1beta1/zz_generated.conversion_spokes.go b/apis/macie2/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..139b334696 --- /dev/null +++ b/apis/macie2/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ClassificationJob to the hub type. +func (tr *ClassificationJob) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ClassificationJob type. +func (tr *ClassificationJob) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FindingsFilter to the hub type. +func (tr *FindingsFilter) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FindingsFilter type. +func (tr *FindingsFilter) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/macie2/v1beta2/zz_classificationjob_terraformed.go b/apis/macie2/v1beta2/zz_classificationjob_terraformed.go new file mode 100755 index 0000000000..cfdce707b3 --- /dev/null +++ b/apis/macie2/v1beta2/zz_classificationjob_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ClassificationJob +func (mg *ClassificationJob) GetTerraformResourceType() string { + return "aws_macie2_classification_job" +} + +// GetConnectionDetailsMapping for this ClassificationJob +func (tr *ClassificationJob) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ClassificationJob +func (tr *ClassificationJob) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ClassificationJob +func (tr *ClassificationJob) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ClassificationJob +func (tr *ClassificationJob) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ClassificationJob +func (tr *ClassificationJob) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ClassificationJob +func (tr *ClassificationJob) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ClassificationJob +func (tr *ClassificationJob) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ClassificationJob +func (tr *ClassificationJob) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ClassificationJob using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ClassificationJob) LateInitialize(attrs []byte) (bool, error) { + params := &ClassificationJobParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ClassificationJob) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/macie2/v1beta2/zz_classificationjob_types.go b/apis/macie2/v1beta2/zz_classificationjob_types.go new file mode 100755 index 0000000000..4954c6a23c --- /dev/null +++ b/apis/macie2/v1beta2/zz_classificationjob_types.go @@ -0,0 +1,1009 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AndInitParameters struct { + + // A property-based condition that defines a property, operator, and one or more values for including or excluding an S3 buckets from the job. (documented below) + SimpleCriterion *SimpleCriterionInitParameters `json:"simpleCriterion,omitempty" tf:"simple_criterion,omitempty"` + + // A tag-based condition that defines the operator and tag keys or tag key and value pairs for including or excluding an S3 buckets from the job. (documented below) + TagCriterion *TagCriterionInitParameters `json:"tagCriterion,omitempty" tf:"tag_criterion,omitempty"` +} + +type AndObservation struct { + + // A property-based condition that defines a property, operator, and one or more values for including or excluding an S3 buckets from the job. (documented below) + SimpleCriterion *SimpleCriterionObservation `json:"simpleCriterion,omitempty" tf:"simple_criterion,omitempty"` + + // A tag-based condition that defines the operator and tag keys or tag key and value pairs for including or excluding an S3 buckets from the job. (documented below) + TagCriterion *TagCriterionObservation `json:"tagCriterion,omitempty" tf:"tag_criterion,omitempty"` +} + +type AndParameters struct { + + // A property-based condition that defines a property, operator, and one or more values for including or excluding an S3 buckets from the job. (documented below) + // +kubebuilder:validation:Optional + SimpleCriterion *SimpleCriterionParameters `json:"simpleCriterion,omitempty" tf:"simple_criterion,omitempty"` + + // A tag-based condition that defines the operator and tag keys or tag key and value pairs for including or excluding an S3 buckets from the job. (documented below) + // +kubebuilder:validation:Optional + TagCriterion *TagCriterionParameters `json:"tagCriterion,omitempty" tf:"tag_criterion,omitempty"` +} + +type AndSimpleCriterionInitParameters struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // An array that lists the values to use in the condition. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type AndSimpleCriterionObservation struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // An array that lists the values to use in the condition. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type AndSimpleCriterionParameters struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + // +kubebuilder:validation:Optional + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // An array that lists the values to use in the condition. + // +kubebuilder:validation:Optional + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type AndSimpleScopeTermInitParameters struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // An array that lists the values to use in the condition. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type AndSimpleScopeTermObservation struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // An array that lists the values to use in the condition. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type AndSimpleScopeTermParameters struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + // +kubebuilder:validation:Optional + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // An array that lists the values to use in the condition. + // +kubebuilder:validation:Optional + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type AndTagCriterionInitParameters struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The tag keys or tag key and value pairs to use in the condition. + TagValues []TagCriterionTagValuesInitParameters `json:"tagValues,omitempty" tf:"tag_values,omitempty"` +} + +type AndTagCriterionObservation struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The tag keys or tag key and value pairs to use in the condition. + TagValues []TagCriterionTagValuesObservation `json:"tagValues,omitempty" tf:"tag_values,omitempty"` +} + +type AndTagCriterionParameters struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + // +kubebuilder:validation:Optional + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The tag keys or tag key and value pairs to use in the condition. + // +kubebuilder:validation:Optional + TagValues []TagCriterionTagValuesParameters `json:"tagValues,omitempty" tf:"tag_values,omitempty"` +} + +type AndTagScopeTermInitParameters struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag keys or tag key and value pairs to use in the condition. + TagValues []AndTagScopeTermTagValuesInitParameters `json:"tagValues,omitempty" tf:"tag_values,omitempty"` + + // The type of object to apply the condition to. The only valid value is S3_OBJECT. + Target *string `json:"target,omitempty" tf:"target,omitempty"` +} + +type AndTagScopeTermObservation struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag keys or tag key and value pairs to use in the condition. + TagValues []AndTagScopeTermTagValuesObservation `json:"tagValues,omitempty" tf:"tag_values,omitempty"` + + // The type of object to apply the condition to. The only valid value is S3_OBJECT. + Target *string `json:"target,omitempty" tf:"target,omitempty"` +} + +type AndTagScopeTermParameters struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + // +kubebuilder:validation:Optional + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag keys or tag key and value pairs to use in the condition. + // +kubebuilder:validation:Optional + TagValues []AndTagScopeTermTagValuesParameters `json:"tagValues,omitempty" tf:"tag_values,omitempty"` + + // The type of object to apply the condition to. The only valid value is S3_OBJECT. + // +kubebuilder:validation:Optional + Target *string `json:"target,omitempty" tf:"target,omitempty"` +} + +type AndTagScopeTermTagValuesInitParameters struct { + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type AndTagScopeTermTagValuesObservation struct { + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type AndTagScopeTermTagValuesParameters struct { + + // The object property to use in the condition. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag value. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type BucketCriteriaInitParameters struct { + + // The property- or tag-based conditions that determine which objects to exclude from the analysis. (documented below) + Excludes *ExcludesInitParameters `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // The property- or tag-based conditions that determine which objects to include in the analysis. (documented below) + Includes *IncludesInitParameters `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type BucketCriteriaObservation struct { + + // The property- or tag-based conditions that determine which objects to exclude from the analysis. (documented below) + Excludes *ExcludesObservation `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // The property- or tag-based conditions that determine which objects to include in the analysis. (documented below) + Includes *IncludesObservation `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type BucketCriteriaParameters struct { + + // The property- or tag-based conditions that determine which objects to exclude from the analysis. (documented below) + // +kubebuilder:validation:Optional + Excludes *ExcludesParameters `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // The property- or tag-based conditions that determine which objects to include in the analysis. (documented below) + // +kubebuilder:validation:Optional + Includes *IncludesParameters `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type BucketDefinitionsInitParameters struct { + + // The unique identifier for the AWS account that owns the buckets. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // An array that lists the names of the buckets. + Buckets []*string `json:"buckets,omitempty" tf:"buckets,omitempty"` +} + +type BucketDefinitionsObservation struct { + + // The unique identifier for the AWS account that owns the buckets. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // An array that lists the names of the buckets. + Buckets []*string `json:"buckets,omitempty" tf:"buckets,omitempty"` +} + +type BucketDefinitionsParameters struct { + + // The unique identifier for the AWS account that owns the buckets. + // +kubebuilder:validation:Optional + AccountID *string `json:"accountId" tf:"account_id,omitempty"` + + // An array that lists the names of the buckets. + // +kubebuilder:validation:Optional + Buckets []*string `json:"buckets" tf:"buckets,omitempty"` +} + +type ClassificationJobInitParameters struct { + + // The custom data identifiers to use for data analysis and classification. + CustomDataIdentifierIds []*string `json:"customDataIdentifierIds,omitempty" tf:"custom_data_identifier_ids,omitempty"` + + // A custom description of the job. The description can contain as many as 200 characters. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies whether to analyze all existing, eligible objects immediately after the job is created. + InitialRun *bool `json:"initialRun,omitempty" tf:"initial_run,omitempty"` + + // The status for the job. Valid values are: CANCELLED, RUNNING and USER_PAUSED + JobStatus *string `json:"jobStatus,omitempty" tf:"job_status,omitempty"` + + // The schedule for running the job. Valid values are: ONE_TIME - Run the job only once. If you specify this value, don't specify a value for the schedule_frequency property. SCHEDULED - Run the job on a daily, weekly, or monthly basis. If you specify this value, use the schedule_frequency property to define the recurrence pattern for the job. + JobType *string `json:"jobType,omitempty" tf:"job_type,omitempty"` + + // A custom name for the job. The name can contain as many as 500 characters. Conflicts with name_prefix. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The S3 buckets that contain the objects to analyze, and the scope of that analysis. (documented below) + S3JobDefinition *S3JobDefinitionInitParameters `json:"s3JobDefinition,omitempty" tf:"s3_job_definition,omitempty"` + + // The sampling depth, as a percentage, to apply when processing objects. This value determines the percentage of eligible objects that the job analyzes. If this value is less than 100, Amazon Macie selects the objects to analyze at random, up to the specified percentage, and analyzes all the data in those objects. + SamplingPercentage *float64 `json:"samplingPercentage,omitempty" tf:"sampling_percentage,omitempty"` + + // The recurrence pattern for running the job. To run the job only once, don't specify a value for this property and set the value for the job_type property to ONE_TIME. (documented below) + ScheduleFrequency *ScheduleFrequencyInitParameters `json:"scheduleFrequency,omitempty" tf:"schedule_frequency,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ClassificationJobObservation struct { + + // The date and time, in UTC and extended RFC 3339 format, when the job was created. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // The custom data identifiers to use for data analysis and classification. + CustomDataIdentifierIds []*string `json:"customDataIdentifierIds,omitempty" tf:"custom_data_identifier_ids,omitempty"` + + // A custom description of the job. The description can contain as many as 200 characters. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The unique identifier (ID) of the macie classification job. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies whether to analyze all existing, eligible objects immediately after the job is created. + InitialRun *bool `json:"initialRun,omitempty" tf:"initial_run,omitempty"` + + JobArn *string `json:"jobArn,omitempty" tf:"job_arn,omitempty"` + + // The unique identifier (ID) of the macie classification job. + JobID *string `json:"jobId,omitempty" tf:"job_id,omitempty"` + + // The status for the job. Valid values are: CANCELLED, RUNNING and USER_PAUSED + JobStatus *string `json:"jobStatus,omitempty" tf:"job_status,omitempty"` + + // The schedule for running the job. Valid values are: ONE_TIME - Run the job only once. If you specify this value, don't specify a value for the schedule_frequency property. SCHEDULED - Run the job on a daily, weekly, or monthly basis. If you specify this value, use the schedule_frequency property to define the recurrence pattern for the job. + JobType *string `json:"jobType,omitempty" tf:"job_type,omitempty"` + + // A custom name for the job. The name can contain as many as 500 characters. Conflicts with name_prefix. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The S3 buckets that contain the objects to analyze, and the scope of that analysis. (documented below) + S3JobDefinition *S3JobDefinitionObservation `json:"s3JobDefinition,omitempty" tf:"s3_job_definition,omitempty"` + + // The sampling depth, as a percentage, to apply when processing objects. This value determines the percentage of eligible objects that the job analyzes. If this value is less than 100, Amazon Macie selects the objects to analyze at random, up to the specified percentage, and analyzes all the data in those objects. + SamplingPercentage *float64 `json:"samplingPercentage,omitempty" tf:"sampling_percentage,omitempty"` + + // The recurrence pattern for running the job. To run the job only once, don't specify a value for this property and set the value for the job_type property to ONE_TIME. (documented below) + ScheduleFrequency *ScheduleFrequencyObservation `json:"scheduleFrequency,omitempty" tf:"schedule_frequency,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // If the current status of the job is USER_PAUSED, specifies when the job was paused and when the job or job run will expire and be cancelled if it isn't resumed. This value is present only if the value for job-status is USER_PAUSED. + UserPausedDetails []UserPausedDetailsObservation `json:"userPausedDetails,omitempty" tf:"user_paused_details,omitempty"` +} + +type ClassificationJobParameters struct { + + // The custom data identifiers to use for data analysis and classification. + // +kubebuilder:validation:Optional + CustomDataIdentifierIds []*string `json:"customDataIdentifierIds,omitempty" tf:"custom_data_identifier_ids,omitempty"` + + // A custom description of the job. The description can contain as many as 200 characters. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies whether to analyze all existing, eligible objects immediately after the job is created. + // +kubebuilder:validation:Optional + InitialRun *bool `json:"initialRun,omitempty" tf:"initial_run,omitempty"` + + // The status for the job. Valid values are: CANCELLED, RUNNING and USER_PAUSED + // +kubebuilder:validation:Optional + JobStatus *string `json:"jobStatus,omitempty" tf:"job_status,omitempty"` + + // The schedule for running the job. Valid values are: ONE_TIME - Run the job only once. If you specify this value, don't specify a value for the schedule_frequency property. SCHEDULED - Run the job on a daily, weekly, or monthly basis. If you specify this value, use the schedule_frequency property to define the recurrence pattern for the job. + // +kubebuilder:validation:Optional + JobType *string `json:"jobType,omitempty" tf:"job_type,omitempty"` + + // A custom name for the job. The name can contain as many as 500 characters. Conflicts with name_prefix. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The S3 buckets that contain the objects to analyze, and the scope of that analysis. (documented below) + // +kubebuilder:validation:Optional + S3JobDefinition *S3JobDefinitionParameters `json:"s3JobDefinition,omitempty" tf:"s3_job_definition,omitempty"` + + // The sampling depth, as a percentage, to apply when processing objects. This value determines the percentage of eligible objects that the job analyzes. If this value is less than 100, Amazon Macie selects the objects to analyze at random, up to the specified percentage, and analyzes all the data in those objects. + // +kubebuilder:validation:Optional + SamplingPercentage *float64 `json:"samplingPercentage,omitempty" tf:"sampling_percentage,omitempty"` + + // The recurrence pattern for running the job. To run the job only once, don't specify a value for this property and set the value for the job_type property to ONE_TIME. (documented below) + // +kubebuilder:validation:Optional + ScheduleFrequency *ScheduleFrequencyParameters `json:"scheduleFrequency,omitempty" tf:"schedule_frequency,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ExcludesAndInitParameters struct { + + // A property-based condition that defines a property, operator, and one or more values for including or excluding an object from the job. (documented below) + SimpleScopeTerm *SimpleScopeTermInitParameters `json:"simpleScopeTerm,omitempty" tf:"simple_scope_term,omitempty"` + + // A tag-based condition that defines the operator and tag keys or tag key and value pairs for including or excluding an object from the job. (documented below) + TagScopeTerm *TagScopeTermInitParameters `json:"tagScopeTerm,omitempty" tf:"tag_scope_term,omitempty"` +} + +type ExcludesAndObservation struct { + + // A property-based condition that defines a property, operator, and one or more values for including or excluding an object from the job. (documented below) + SimpleScopeTerm *SimpleScopeTermObservation `json:"simpleScopeTerm,omitempty" tf:"simple_scope_term,omitempty"` + + // A tag-based condition that defines the operator and tag keys or tag key and value pairs for including or excluding an object from the job. (documented below) + TagScopeTerm *TagScopeTermObservation `json:"tagScopeTerm,omitempty" tf:"tag_scope_term,omitempty"` +} + +type ExcludesAndParameters struct { + + // A property-based condition that defines a property, operator, and one or more values for including or excluding an object from the job. (documented below) + // +kubebuilder:validation:Optional + SimpleScopeTerm *SimpleScopeTermParameters `json:"simpleScopeTerm,omitempty" tf:"simple_scope_term,omitempty"` + + // A tag-based condition that defines the operator and tag keys or tag key and value pairs for including or excluding an object from the job. (documented below) + // +kubebuilder:validation:Optional + TagScopeTerm *TagScopeTermParameters `json:"tagScopeTerm,omitempty" tf:"tag_scope_term,omitempty"` +} + +type ExcludesInitParameters struct { + + // An array of conditions, one for each condition that determines which objects to include or exclude from the job. (documented below) + And []AndInitParameters `json:"and,omitempty" tf:"and,omitempty"` +} + +type ExcludesObservation struct { + + // An array of conditions, one for each condition that determines which objects to include or exclude from the job. (documented below) + And []AndObservation `json:"and,omitempty" tf:"and,omitempty"` +} + +type ExcludesParameters struct { + + // An array of conditions, one for each condition that determines which objects to include or exclude from the job. (documented below) + // +kubebuilder:validation:Optional + And []AndParameters `json:"and,omitempty" tf:"and,omitempty"` +} + +type IncludesAndInitParameters struct { + + // A property-based condition that defines a property, operator, and one or more values for including or excluding an S3 buckets from the job. (documented below) + SimpleCriterion *AndSimpleCriterionInitParameters `json:"simpleCriterion,omitempty" tf:"simple_criterion,omitempty"` + + // A tag-based condition that defines the operator and tag keys or tag key and value pairs for including or excluding an S3 buckets from the job. (documented below) + TagCriterion *AndTagCriterionInitParameters `json:"tagCriterion,omitempty" tf:"tag_criterion,omitempty"` +} + +type IncludesAndObservation struct { + + // A property-based condition that defines a property, operator, and one or more values for including or excluding an S3 buckets from the job. (documented below) + SimpleCriterion *AndSimpleCriterionObservation `json:"simpleCriterion,omitempty" tf:"simple_criterion,omitempty"` + + // A tag-based condition that defines the operator and tag keys or tag key and value pairs for including or excluding an S3 buckets from the job. (documented below) + TagCriterion *AndTagCriterionObservation `json:"tagCriterion,omitempty" tf:"tag_criterion,omitempty"` +} + +type IncludesAndParameters struct { + + // A property-based condition that defines a property, operator, and one or more values for including or excluding an S3 buckets from the job. (documented below) + // +kubebuilder:validation:Optional + SimpleCriterion *AndSimpleCriterionParameters `json:"simpleCriterion,omitempty" tf:"simple_criterion,omitempty"` + + // A tag-based condition that defines the operator and tag keys or tag key and value pairs for including or excluding an S3 buckets from the job. (documented below) + // +kubebuilder:validation:Optional + TagCriterion *AndTagCriterionParameters `json:"tagCriterion,omitempty" tf:"tag_criterion,omitempty"` +} + +type IncludesInitParameters struct { + + // An array of conditions, one for each condition that determines which objects to include or exclude from the job. (documented below) + And []IncludesAndInitParameters `json:"and,omitempty" tf:"and,omitempty"` +} + +type IncludesObservation struct { + + // An array of conditions, one for each condition that determines which objects to include or exclude from the job. (documented below) + And []IncludesAndObservation `json:"and,omitempty" tf:"and,omitempty"` +} + +type IncludesParameters struct { + + // An array of conditions, one for each condition that determines which objects to include or exclude from the job. (documented below) + // +kubebuilder:validation:Optional + And []IncludesAndParameters `json:"and,omitempty" tf:"and,omitempty"` +} + +type S3JobDefinitionInitParameters struct { + + // The property- and tag-based conditions that determine which S3 buckets to include or exclude from the analysis. Conflicts with bucket_definitions. (documented below) + BucketCriteria *BucketCriteriaInitParameters `json:"bucketCriteria,omitempty" tf:"bucket_criteria,omitempty"` + + // An array of objects, one for each AWS account that owns buckets to analyze. Each object specifies the account ID for an account and one or more buckets to analyze for the account. Conflicts with bucket_criteria. (documented below) + BucketDefinitions []BucketDefinitionsInitParameters `json:"bucketDefinitions,omitempty" tf:"bucket_definitions,omitempty"` + + // The property- and tag-based conditions that determine which objects to include or exclude from the analysis. (documented below) + Scoping *ScopingInitParameters `json:"scoping,omitempty" tf:"scoping,omitempty"` +} + +type S3JobDefinitionObservation struct { + + // The property- and tag-based conditions that determine which S3 buckets to include or exclude from the analysis. Conflicts with bucket_definitions. (documented below) + BucketCriteria *BucketCriteriaObservation `json:"bucketCriteria,omitempty" tf:"bucket_criteria,omitempty"` + + // An array of objects, one for each AWS account that owns buckets to analyze. Each object specifies the account ID for an account and one or more buckets to analyze for the account. Conflicts with bucket_criteria. (documented below) + BucketDefinitions []BucketDefinitionsObservation `json:"bucketDefinitions,omitempty" tf:"bucket_definitions,omitempty"` + + // The property- and tag-based conditions that determine which objects to include or exclude from the analysis. (documented below) + Scoping *ScopingObservation `json:"scoping,omitempty" tf:"scoping,omitempty"` +} + +type S3JobDefinitionParameters struct { + + // The property- and tag-based conditions that determine which S3 buckets to include or exclude from the analysis. Conflicts with bucket_definitions. (documented below) + // +kubebuilder:validation:Optional + BucketCriteria *BucketCriteriaParameters `json:"bucketCriteria,omitempty" tf:"bucket_criteria,omitempty"` + + // An array of objects, one for each AWS account that owns buckets to analyze. Each object specifies the account ID for an account and one or more buckets to analyze for the account. Conflicts with bucket_criteria. (documented below) + // +kubebuilder:validation:Optional + BucketDefinitions []BucketDefinitionsParameters `json:"bucketDefinitions,omitempty" tf:"bucket_definitions,omitempty"` + + // The property- and tag-based conditions that determine which objects to include or exclude from the analysis. (documented below) + // +kubebuilder:validation:Optional + Scoping *ScopingParameters `json:"scoping,omitempty" tf:"scoping,omitempty"` +} + +type ScheduleFrequencyInitParameters struct { + + // Specifies a daily recurrence pattern for running the job. + DailySchedule *bool `json:"dailySchedule,omitempty" tf:"daily_schedule,omitempty"` + + // Specifies a monthly recurrence pattern for running the job. + MonthlySchedule *float64 `json:"monthlySchedule,omitempty" tf:"monthly_schedule,omitempty"` + + // Specifies a weekly recurrence pattern for running the job. + WeeklySchedule *string `json:"weeklySchedule,omitempty" tf:"weekly_schedule,omitempty"` +} + +type ScheduleFrequencyObservation struct { + + // Specifies a daily recurrence pattern for running the job. + DailySchedule *bool `json:"dailySchedule,omitempty" tf:"daily_schedule,omitempty"` + + // Specifies a monthly recurrence pattern for running the job. + MonthlySchedule *float64 `json:"monthlySchedule,omitempty" tf:"monthly_schedule,omitempty"` + + // Specifies a weekly recurrence pattern for running the job. + WeeklySchedule *string `json:"weeklySchedule,omitempty" tf:"weekly_schedule,omitempty"` +} + +type ScheduleFrequencyParameters struct { + + // Specifies a daily recurrence pattern for running the job. + // +kubebuilder:validation:Optional + DailySchedule *bool `json:"dailySchedule,omitempty" tf:"daily_schedule,omitempty"` + + // Specifies a monthly recurrence pattern for running the job. + // +kubebuilder:validation:Optional + MonthlySchedule *float64 `json:"monthlySchedule,omitempty" tf:"monthly_schedule,omitempty"` + + // Specifies a weekly recurrence pattern for running the job. + // +kubebuilder:validation:Optional + WeeklySchedule *string `json:"weeklySchedule,omitempty" tf:"weekly_schedule,omitempty"` +} + +type ScopingExcludesInitParameters struct { + + // An array of conditions, one for each condition that determines which objects to include or exclude from the job. (documented below) + And []ExcludesAndInitParameters `json:"and,omitempty" tf:"and,omitempty"` +} + +type ScopingExcludesObservation struct { + + // An array of conditions, one for each condition that determines which objects to include or exclude from the job. (documented below) + And []ExcludesAndObservation `json:"and,omitempty" tf:"and,omitempty"` +} + +type ScopingExcludesParameters struct { + + // An array of conditions, one for each condition that determines which objects to include or exclude from the job. (documented below) + // +kubebuilder:validation:Optional + And []ExcludesAndParameters `json:"and,omitempty" tf:"and,omitempty"` +} + +type ScopingIncludesAndInitParameters struct { + + // A property-based condition that defines a property, operator, and one or more values for including or excluding an object from the job. (documented below) + SimpleScopeTerm *AndSimpleScopeTermInitParameters `json:"simpleScopeTerm,omitempty" tf:"simple_scope_term,omitempty"` + + // A tag-based condition that defines the operator and tag keys or tag key and value pairs for including or excluding an object from the job. (documented below) + TagScopeTerm *AndTagScopeTermInitParameters `json:"tagScopeTerm,omitempty" tf:"tag_scope_term,omitempty"` +} + +type ScopingIncludesAndObservation struct { + + // A property-based condition that defines a property, operator, and one or more values for including or excluding an object from the job. (documented below) + SimpleScopeTerm *AndSimpleScopeTermObservation `json:"simpleScopeTerm,omitempty" tf:"simple_scope_term,omitempty"` + + // A tag-based condition that defines the operator and tag keys or tag key and value pairs for including or excluding an object from the job. (documented below) + TagScopeTerm *AndTagScopeTermObservation `json:"tagScopeTerm,omitempty" tf:"tag_scope_term,omitempty"` +} + +type ScopingIncludesAndParameters struct { + + // A property-based condition that defines a property, operator, and one or more values for including or excluding an object from the job. (documented below) + // +kubebuilder:validation:Optional + SimpleScopeTerm *AndSimpleScopeTermParameters `json:"simpleScopeTerm,omitempty" tf:"simple_scope_term,omitempty"` + + // A tag-based condition that defines the operator and tag keys or tag key and value pairs for including or excluding an object from the job. (documented below) + // +kubebuilder:validation:Optional + TagScopeTerm *AndTagScopeTermParameters `json:"tagScopeTerm,omitempty" tf:"tag_scope_term,omitempty"` +} + +type ScopingIncludesInitParameters struct { + + // An array of conditions, one for each condition that determines which objects to include or exclude from the job. (documented below) + And []ScopingIncludesAndInitParameters `json:"and,omitempty" tf:"and,omitempty"` +} + +type ScopingIncludesObservation struct { + + // An array of conditions, one for each condition that determines which objects to include or exclude from the job. (documented below) + And []ScopingIncludesAndObservation `json:"and,omitempty" tf:"and,omitempty"` +} + +type ScopingIncludesParameters struct { + + // An array of conditions, one for each condition that determines which objects to include or exclude from the job. (documented below) + // +kubebuilder:validation:Optional + And []ScopingIncludesAndParameters `json:"and,omitempty" tf:"and,omitempty"` +} + +type ScopingInitParameters struct { + + // The property- or tag-based conditions that determine which objects to exclude from the analysis. (documented below) + Excludes *ScopingExcludesInitParameters `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // The property- or tag-based conditions that determine which objects to include in the analysis. (documented below) + Includes *ScopingIncludesInitParameters `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type ScopingObservation struct { + + // The property- or tag-based conditions that determine which objects to exclude from the analysis. (documented below) + Excludes *ScopingExcludesObservation `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // The property- or tag-based conditions that determine which objects to include in the analysis. (documented below) + Includes *ScopingIncludesObservation `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type ScopingParameters struct { + + // The property- or tag-based conditions that determine which objects to exclude from the analysis. (documented below) + // +kubebuilder:validation:Optional + Excludes *ScopingExcludesParameters `json:"excludes,omitempty" tf:"excludes,omitempty"` + + // The property- or tag-based conditions that determine which objects to include in the analysis. (documented below) + // +kubebuilder:validation:Optional + Includes *ScopingIncludesParameters `json:"includes,omitempty" tf:"includes,omitempty"` +} + +type SimpleCriterionInitParameters struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // An array that lists the values to use in the condition. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type SimpleCriterionObservation struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // An array that lists the values to use in the condition. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type SimpleCriterionParameters struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + // +kubebuilder:validation:Optional + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // An array that lists the values to use in the condition. + // +kubebuilder:validation:Optional + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type SimpleScopeTermInitParameters struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // An array that lists the values to use in the condition. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type SimpleScopeTermObservation struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // An array that lists the values to use in the condition. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type SimpleScopeTermParameters struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + // +kubebuilder:validation:Optional + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // An array that lists the values to use in the condition. + // +kubebuilder:validation:Optional + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type TagCriterionInitParameters struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The tag keys or tag key and value pairs to use in the condition. + TagValues []TagValuesInitParameters `json:"tagValues,omitempty" tf:"tag_values,omitempty"` +} + +type TagCriterionObservation struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The tag keys or tag key and value pairs to use in the condition. + TagValues []TagValuesObservation `json:"tagValues,omitempty" tf:"tag_values,omitempty"` +} + +type TagCriterionParameters struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + // +kubebuilder:validation:Optional + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The tag keys or tag key and value pairs to use in the condition. + // +kubebuilder:validation:Optional + TagValues []TagValuesParameters `json:"tagValues,omitempty" tf:"tag_values,omitempty"` +} + +type TagCriterionTagValuesInitParameters struct { + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagCriterionTagValuesObservation struct { + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagCriterionTagValuesParameters struct { + + // The object property to use in the condition. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag value. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagScopeTermInitParameters struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag keys or tag key and value pairs to use in the condition. + TagValues []TagScopeTermTagValuesInitParameters `json:"tagValues,omitempty" tf:"tag_values,omitempty"` + + // The type of object to apply the condition to. The only valid value is S3_OBJECT. + Target *string `json:"target,omitempty" tf:"target,omitempty"` +} + +type TagScopeTermObservation struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag keys or tag key and value pairs to use in the condition. + TagValues []TagScopeTermTagValuesObservation `json:"tagValues,omitempty" tf:"tag_values,omitempty"` + + // The type of object to apply the condition to. The only valid value is S3_OBJECT. + Target *string `json:"target,omitempty" tf:"target,omitempty"` +} + +type TagScopeTermParameters struct { + + // The operator to use in a condition. Valid values are: EQ, GT, GTE, LT, LTE, NE, CONTAINS, STARTS_WITH + // +kubebuilder:validation:Optional + Comparator *string `json:"comparator,omitempty" tf:"comparator,omitempty"` + + // The object property to use in the condition. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag keys or tag key and value pairs to use in the condition. + // +kubebuilder:validation:Optional + TagValues []TagScopeTermTagValuesParameters `json:"tagValues,omitempty" tf:"tag_values,omitempty"` + + // The type of object to apply the condition to. The only valid value is S3_OBJECT. + // +kubebuilder:validation:Optional + Target *string `json:"target,omitempty" tf:"target,omitempty"` +} + +type TagScopeTermTagValuesInitParameters struct { + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagScopeTermTagValuesObservation struct { + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagScopeTermTagValuesParameters struct { + + // The object property to use in the condition. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag value. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagValuesInitParameters struct { + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagValuesObservation struct { + + // The object property to use in the condition. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagValuesParameters struct { + + // The object property to use in the condition. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The tag value. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type UserPausedDetailsInitParameters struct { +} + +type UserPausedDetailsObservation struct { + JobExpiresAt *string `json:"jobExpiresAt,omitempty" tf:"job_expires_at,omitempty"` + + JobImminentExpirationHealthEventArn *string `json:"jobImminentExpirationHealthEventArn,omitempty" tf:"job_imminent_expiration_health_event_arn,omitempty"` + + JobPausedAt *string `json:"jobPausedAt,omitempty" tf:"job_paused_at,omitempty"` +} + +type UserPausedDetailsParameters struct { +} + +// ClassificationJobSpec defines the desired state of ClassificationJob +type ClassificationJobSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ClassificationJobParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ClassificationJobInitParameters `json:"initProvider,omitempty"` +} + +// ClassificationJobStatus defines the observed state of ClassificationJob. +type ClassificationJobStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ClassificationJobObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ClassificationJob is the Schema for the ClassificationJobs API. Provides a resource to manage an AWS Macie Classification Job. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ClassificationJob struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.jobType) || (has(self.initProvider) && has(self.initProvider.jobType))",message="spec.forProvider.jobType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.s3JobDefinition) || (has(self.initProvider) && has(self.initProvider.s3JobDefinition))",message="spec.forProvider.s3JobDefinition is a required parameter" + Spec ClassificationJobSpec `json:"spec"` + Status ClassificationJobStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClassificationJobList contains a list of ClassificationJobs +type ClassificationJobList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClassificationJob `json:"items"` +} + +// Repository type metadata. +var ( + ClassificationJob_Kind = "ClassificationJob" + ClassificationJob_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ClassificationJob_Kind}.String() + ClassificationJob_KindAPIVersion = ClassificationJob_Kind + "." + CRDGroupVersion.String() + ClassificationJob_GroupVersionKind = CRDGroupVersion.WithKind(ClassificationJob_Kind) +) + +func init() { + SchemeBuilder.Register(&ClassificationJob{}, &ClassificationJobList{}) +} diff --git a/apis/macie2/v1beta2/zz_findingsfilter_terraformed.go b/apis/macie2/v1beta2/zz_findingsfilter_terraformed.go new file mode 100755 index 0000000000..3058b1c341 --- /dev/null +++ b/apis/macie2/v1beta2/zz_findingsfilter_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FindingsFilter +func (mg *FindingsFilter) GetTerraformResourceType() string { + return "aws_macie2_findings_filter" +} + +// GetConnectionDetailsMapping for this FindingsFilter +func (tr *FindingsFilter) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FindingsFilter +func (tr *FindingsFilter) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FindingsFilter +func (tr *FindingsFilter) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FindingsFilter +func (tr *FindingsFilter) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FindingsFilter +func (tr *FindingsFilter) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FindingsFilter +func (tr *FindingsFilter) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FindingsFilter +func (tr *FindingsFilter) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FindingsFilter +func (tr *FindingsFilter) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FindingsFilter using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FindingsFilter) LateInitialize(attrs []byte) (bool, error) { + params := &FindingsFilterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FindingsFilter) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/macie2/v1beta2/zz_findingsfilter_types.go b/apis/macie2/v1beta2/zz_findingsfilter_types.go new file mode 100755 index 0000000000..92d178a70a --- /dev/null +++ b/apis/macie2/v1beta2/zz_findingsfilter_types.go @@ -0,0 +1,279 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CriterionInitParameters struct { + + // The value for the property matches (equals) the specified value. If you specify multiple values, Amazon Macie uses OR logic to join the values. + // +listType=set + Eq []*string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The value for the property exclusively matches (equals an exact match for) all the specified values. If you specify multiple values, Amazon Macie uses AND logic to join the values. + // +listType=set + EqExactMatch []*string `json:"eqExactMatch,omitempty" tf:"eq_exact_match,omitempty"` + + // The name of the field to be evaluated. + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + // The value for the property is greater than the specified value. + Gt *string `json:"gt,omitempty" tf:"gt,omitempty"` + + // The value for the property is greater than or equal to the specified value. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The value for the property is less than the specified value. + Lt *string `json:"lt,omitempty" tf:"lt,omitempty"` + + // The value for the property is less than or equal to the specified value. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` + + // The value for the property doesn't match (doesn't equal) the specified value. If you specify multiple values, Amazon Macie uses OR logic to join the values. + // +listType=set + Neq []*string `json:"neq,omitempty" tf:"neq,omitempty"` +} + +type CriterionObservation struct { + + // The value for the property matches (equals) the specified value. If you specify multiple values, Amazon Macie uses OR logic to join the values. + // +listType=set + Eq []*string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The value for the property exclusively matches (equals an exact match for) all the specified values. If you specify multiple values, Amazon Macie uses AND logic to join the values. + // +listType=set + EqExactMatch []*string `json:"eqExactMatch,omitempty" tf:"eq_exact_match,omitempty"` + + // The name of the field to be evaluated. + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + // The value for the property is greater than the specified value. + Gt *string `json:"gt,omitempty" tf:"gt,omitempty"` + + // The value for the property is greater than or equal to the specified value. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The value for the property is less than the specified value. + Lt *string `json:"lt,omitempty" tf:"lt,omitempty"` + + // The value for the property is less than or equal to the specified value. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` + + // The value for the property doesn't match (doesn't equal) the specified value. If you specify multiple values, Amazon Macie uses OR logic to join the values. + // +listType=set + Neq []*string `json:"neq,omitempty" tf:"neq,omitempty"` +} + +type CriterionParameters struct { + + // The value for the property matches (equals) the specified value. If you specify multiple values, Amazon Macie uses OR logic to join the values. + // +kubebuilder:validation:Optional + // +listType=set + Eq []*string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The value for the property exclusively matches (equals an exact match for) all the specified values. If you specify multiple values, Amazon Macie uses AND logic to join the values. + // +kubebuilder:validation:Optional + // +listType=set + EqExactMatch []*string `json:"eqExactMatch,omitempty" tf:"eq_exact_match,omitempty"` + + // The name of the field to be evaluated. + // +kubebuilder:validation:Optional + Field *string `json:"field" tf:"field,omitempty"` + + // The value for the property is greater than the specified value. + // +kubebuilder:validation:Optional + Gt *string `json:"gt,omitempty" tf:"gt,omitempty"` + + // The value for the property is greater than or equal to the specified value. + // +kubebuilder:validation:Optional + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The value for the property is less than the specified value. + // +kubebuilder:validation:Optional + Lt *string `json:"lt,omitempty" tf:"lt,omitempty"` + + // The value for the property is less than or equal to the specified value. + // +kubebuilder:validation:Optional + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` + + // The value for the property doesn't match (doesn't equal) the specified value. If you specify multiple values, Amazon Macie uses OR logic to join the values. + // +kubebuilder:validation:Optional + // +listType=set + Neq []*string `json:"neq,omitempty" tf:"neq,omitempty"` +} + +type FindingCriteriaInitParameters struct { + + // A condition that specifies the property, operator, and one or more values to use to filter the results. (documented below) + Criterion []CriterionInitParameters `json:"criterion,omitempty" tf:"criterion,omitempty"` +} + +type FindingCriteriaObservation struct { + + // A condition that specifies the property, operator, and one or more values to use to filter the results. (documented below) + Criterion []CriterionObservation `json:"criterion,omitempty" tf:"criterion,omitempty"` +} + +type FindingCriteriaParameters struct { + + // A condition that specifies the property, operator, and one or more values to use to filter the results. (documented below) + // +kubebuilder:validation:Optional + Criterion []CriterionParameters `json:"criterion,omitempty" tf:"criterion,omitempty"` +} + +type FindingsFilterInitParameters struct { + + // The action to perform on findings that meet the filter criteria (finding_criteria). Valid values are: ARCHIVE, suppress (automatically archive) the findings; and, NOOP, don't perform any action on the findings. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // A custom description of the filter. The description can contain as many as 512 characters. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The criteria to use to filter findings. + FindingCriteria *FindingCriteriaInitParameters `json:"findingCriteria,omitempty" tf:"finding_criteria,omitempty"` + + // A custom name for the filter. The name must contain at least 3 characters and can contain as many as 64 characters. Conflicts with name_prefix. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The position of the filter in the list of saved filters on the Amazon Macie console. This value also determines the order in which the filter is applied to findings, relative to other filters that are also applied to the findings. + Position *float64 `json:"position,omitempty" tf:"position,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type FindingsFilterObservation struct { + + // The action to perform on findings that meet the filter criteria (finding_criteria). Valid values are: ARCHIVE, suppress (automatically archive) the findings; and, NOOP, don't perform any action on the findings. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Amazon Resource Name (ARN) of the Findings Filter. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A custom description of the filter. The description can contain as many as 512 characters. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The criteria to use to filter findings. + FindingCriteria *FindingCriteriaObservation `json:"findingCriteria,omitempty" tf:"finding_criteria,omitempty"` + + // The unique identifier (ID) of the macie Findings Filter. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A custom name for the filter. The name must contain at least 3 characters and can contain as many as 64 characters. Conflicts with name_prefix. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The position of the filter in the list of saved filters on the Amazon Macie console. This value also determines the order in which the filter is applied to findings, relative to other filters that are also applied to the findings. + Position *float64 `json:"position,omitempty" tf:"position,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type FindingsFilterParameters struct { + + // The action to perform on findings that meet the filter criteria (finding_criteria). Valid values are: ARCHIVE, suppress (automatically archive) the findings; and, NOOP, don't perform any action on the findings. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // A custom description of the filter. The description can contain as many as 512 characters. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The criteria to use to filter findings. + // +kubebuilder:validation:Optional + FindingCriteria *FindingCriteriaParameters `json:"findingCriteria,omitempty" tf:"finding_criteria,omitempty"` + + // A custom name for the filter. The name must contain at least 3 characters and can contain as many as 64 characters. Conflicts with name_prefix. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The position of the filter in the list of saved filters on the Amazon Macie console. This value also determines the order in which the filter is applied to findings, relative to other filters that are also applied to the findings. + // +kubebuilder:validation:Optional + Position *float64 `json:"position,omitempty" tf:"position,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// FindingsFilterSpec defines the desired state of FindingsFilter +type FindingsFilterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FindingsFilterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FindingsFilterInitParameters `json:"initProvider,omitempty"` +} + +// FindingsFilterStatus defines the observed state of FindingsFilter. +type FindingsFilterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FindingsFilterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FindingsFilter is the Schema for the FindingsFilters API. Provides a resource to manage an Amazon Macie Findings Filter. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type FindingsFilter struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.action) || (has(self.initProvider) && has(self.initProvider.action))",message="spec.forProvider.action is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.findingCriteria) || (has(self.initProvider) && has(self.initProvider.findingCriteria))",message="spec.forProvider.findingCriteria is a required parameter" + Spec FindingsFilterSpec `json:"spec"` + Status FindingsFilterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FindingsFilterList contains a list of FindingsFilters +type FindingsFilterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FindingsFilter `json:"items"` +} + +// Repository type metadata. +var ( + FindingsFilter_Kind = "FindingsFilter" + FindingsFilter_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FindingsFilter_Kind}.String() + FindingsFilter_KindAPIVersion = FindingsFilter_Kind + "." + CRDGroupVersion.String() + FindingsFilter_GroupVersionKind = CRDGroupVersion.WithKind(FindingsFilter_Kind) +) + +func init() { + SchemeBuilder.Register(&FindingsFilter{}, &FindingsFilterList{}) +} diff --git a/apis/macie2/v1beta2/zz_generated.conversion_hubs.go b/apis/macie2/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..f3ff3a8d1c --- /dev/null +++ b/apis/macie2/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ClassificationJob) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FindingsFilter) Hub() {} diff --git a/apis/macie2/v1beta2/zz_generated.deepcopy.go b/apis/macie2/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..76ef685325 --- /dev/null +++ b/apis/macie2/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,3148 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndInitParameters) DeepCopyInto(out *AndInitParameters) { + *out = *in + if in.SimpleCriterion != nil { + in, out := &in.SimpleCriterion, &out.SimpleCriterion + *out = new(SimpleCriterionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TagCriterion != nil { + in, out := &in.TagCriterion, &out.TagCriterion + *out = new(TagCriterionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndInitParameters. +func (in *AndInitParameters) DeepCopy() *AndInitParameters { + if in == nil { + return nil + } + out := new(AndInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndObservation) DeepCopyInto(out *AndObservation) { + *out = *in + if in.SimpleCriterion != nil { + in, out := &in.SimpleCriterion, &out.SimpleCriterion + *out = new(SimpleCriterionObservation) + (*in).DeepCopyInto(*out) + } + if in.TagCriterion != nil { + in, out := &in.TagCriterion, &out.TagCriterion + *out = new(TagCriterionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndObservation. +func (in *AndObservation) DeepCopy() *AndObservation { + if in == nil { + return nil + } + out := new(AndObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndParameters) DeepCopyInto(out *AndParameters) { + *out = *in + if in.SimpleCriterion != nil { + in, out := &in.SimpleCriterion, &out.SimpleCriterion + *out = new(SimpleCriterionParameters) + (*in).DeepCopyInto(*out) + } + if in.TagCriterion != nil { + in, out := &in.TagCriterion, &out.TagCriterion + *out = new(TagCriterionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndParameters. +func (in *AndParameters) DeepCopy() *AndParameters { + if in == nil { + return nil + } + out := new(AndParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndSimpleCriterionInitParameters) DeepCopyInto(out *AndSimpleCriterionInitParameters) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndSimpleCriterionInitParameters. +func (in *AndSimpleCriterionInitParameters) DeepCopy() *AndSimpleCriterionInitParameters { + if in == nil { + return nil + } + out := new(AndSimpleCriterionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndSimpleCriterionObservation) DeepCopyInto(out *AndSimpleCriterionObservation) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndSimpleCriterionObservation. +func (in *AndSimpleCriterionObservation) DeepCopy() *AndSimpleCriterionObservation { + if in == nil { + return nil + } + out := new(AndSimpleCriterionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndSimpleCriterionParameters) DeepCopyInto(out *AndSimpleCriterionParameters) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndSimpleCriterionParameters. +func (in *AndSimpleCriterionParameters) DeepCopy() *AndSimpleCriterionParameters { + if in == nil { + return nil + } + out := new(AndSimpleCriterionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndSimpleScopeTermInitParameters) DeepCopyInto(out *AndSimpleScopeTermInitParameters) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndSimpleScopeTermInitParameters. +func (in *AndSimpleScopeTermInitParameters) DeepCopy() *AndSimpleScopeTermInitParameters { + if in == nil { + return nil + } + out := new(AndSimpleScopeTermInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndSimpleScopeTermObservation) DeepCopyInto(out *AndSimpleScopeTermObservation) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndSimpleScopeTermObservation. +func (in *AndSimpleScopeTermObservation) DeepCopy() *AndSimpleScopeTermObservation { + if in == nil { + return nil + } + out := new(AndSimpleScopeTermObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndSimpleScopeTermParameters) DeepCopyInto(out *AndSimpleScopeTermParameters) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndSimpleScopeTermParameters. +func (in *AndSimpleScopeTermParameters) DeepCopy() *AndSimpleScopeTermParameters { + if in == nil { + return nil + } + out := new(AndSimpleScopeTermParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndTagCriterionInitParameters) DeepCopyInto(out *AndTagCriterionInitParameters) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.TagValues != nil { + in, out := &in.TagValues, &out.TagValues + *out = make([]TagCriterionTagValuesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndTagCriterionInitParameters. +func (in *AndTagCriterionInitParameters) DeepCopy() *AndTagCriterionInitParameters { + if in == nil { + return nil + } + out := new(AndTagCriterionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndTagCriterionObservation) DeepCopyInto(out *AndTagCriterionObservation) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.TagValues != nil { + in, out := &in.TagValues, &out.TagValues + *out = make([]TagCriterionTagValuesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndTagCriterionObservation. +func (in *AndTagCriterionObservation) DeepCopy() *AndTagCriterionObservation { + if in == nil { + return nil + } + out := new(AndTagCriterionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndTagCriterionParameters) DeepCopyInto(out *AndTagCriterionParameters) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.TagValues != nil { + in, out := &in.TagValues, &out.TagValues + *out = make([]TagCriterionTagValuesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndTagCriterionParameters. +func (in *AndTagCriterionParameters) DeepCopy() *AndTagCriterionParameters { + if in == nil { + return nil + } + out := new(AndTagCriterionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndTagScopeTermInitParameters) DeepCopyInto(out *AndTagScopeTermInitParameters) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.TagValues != nil { + in, out := &in.TagValues, &out.TagValues + *out = make([]AndTagScopeTermTagValuesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndTagScopeTermInitParameters. +func (in *AndTagScopeTermInitParameters) DeepCopy() *AndTagScopeTermInitParameters { + if in == nil { + return nil + } + out := new(AndTagScopeTermInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndTagScopeTermObservation) DeepCopyInto(out *AndTagScopeTermObservation) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.TagValues != nil { + in, out := &in.TagValues, &out.TagValues + *out = make([]AndTagScopeTermTagValuesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndTagScopeTermObservation. +func (in *AndTagScopeTermObservation) DeepCopy() *AndTagScopeTermObservation { + if in == nil { + return nil + } + out := new(AndTagScopeTermObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndTagScopeTermParameters) DeepCopyInto(out *AndTagScopeTermParameters) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.TagValues != nil { + in, out := &in.TagValues, &out.TagValues + *out = make([]AndTagScopeTermTagValuesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndTagScopeTermParameters. +func (in *AndTagScopeTermParameters) DeepCopy() *AndTagScopeTermParameters { + if in == nil { + return nil + } + out := new(AndTagScopeTermParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndTagScopeTermTagValuesInitParameters) DeepCopyInto(out *AndTagScopeTermTagValuesInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndTagScopeTermTagValuesInitParameters. +func (in *AndTagScopeTermTagValuesInitParameters) DeepCopy() *AndTagScopeTermTagValuesInitParameters { + if in == nil { + return nil + } + out := new(AndTagScopeTermTagValuesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndTagScopeTermTagValuesObservation) DeepCopyInto(out *AndTagScopeTermTagValuesObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndTagScopeTermTagValuesObservation. +func (in *AndTagScopeTermTagValuesObservation) DeepCopy() *AndTagScopeTermTagValuesObservation { + if in == nil { + return nil + } + out := new(AndTagScopeTermTagValuesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndTagScopeTermTagValuesParameters) DeepCopyInto(out *AndTagScopeTermTagValuesParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndTagScopeTermTagValuesParameters. +func (in *AndTagScopeTermTagValuesParameters) DeepCopy() *AndTagScopeTermTagValuesParameters { + if in == nil { + return nil + } + out := new(AndTagScopeTermTagValuesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketCriteriaInitParameters) DeepCopyInto(out *BucketCriteriaInitParameters) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = new(ExcludesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = new(IncludesInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketCriteriaInitParameters. +func (in *BucketCriteriaInitParameters) DeepCopy() *BucketCriteriaInitParameters { + if in == nil { + return nil + } + out := new(BucketCriteriaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketCriteriaObservation) DeepCopyInto(out *BucketCriteriaObservation) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = new(ExcludesObservation) + (*in).DeepCopyInto(*out) + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = new(IncludesObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketCriteriaObservation. +func (in *BucketCriteriaObservation) DeepCopy() *BucketCriteriaObservation { + if in == nil { + return nil + } + out := new(BucketCriteriaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketCriteriaParameters) DeepCopyInto(out *BucketCriteriaParameters) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = new(ExcludesParameters) + (*in).DeepCopyInto(*out) + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = new(IncludesParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketCriteriaParameters. +func (in *BucketCriteriaParameters) DeepCopy() *BucketCriteriaParameters { + if in == nil { + return nil + } + out := new(BucketCriteriaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketDefinitionsInitParameters) DeepCopyInto(out *BucketDefinitionsInitParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Buckets != nil { + in, out := &in.Buckets, &out.Buckets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketDefinitionsInitParameters. +func (in *BucketDefinitionsInitParameters) DeepCopy() *BucketDefinitionsInitParameters { + if in == nil { + return nil + } + out := new(BucketDefinitionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketDefinitionsObservation) DeepCopyInto(out *BucketDefinitionsObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Buckets != nil { + in, out := &in.Buckets, &out.Buckets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketDefinitionsObservation. +func (in *BucketDefinitionsObservation) DeepCopy() *BucketDefinitionsObservation { + if in == nil { + return nil + } + out := new(BucketDefinitionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketDefinitionsParameters) DeepCopyInto(out *BucketDefinitionsParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Buckets != nil { + in, out := &in.Buckets, &out.Buckets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketDefinitionsParameters. +func (in *BucketDefinitionsParameters) DeepCopy() *BucketDefinitionsParameters { + if in == nil { + return nil + } + out := new(BucketDefinitionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClassificationJob) DeepCopyInto(out *ClassificationJob) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassificationJob. +func (in *ClassificationJob) DeepCopy() *ClassificationJob { + if in == nil { + return nil + } + out := new(ClassificationJob) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClassificationJob) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClassificationJobInitParameters) DeepCopyInto(out *ClassificationJobInitParameters) { + *out = *in + if in.CustomDataIdentifierIds != nil { + in, out := &in.CustomDataIdentifierIds, &out.CustomDataIdentifierIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.InitialRun != nil { + in, out := &in.InitialRun, &out.InitialRun + *out = new(bool) + **out = **in + } + if in.JobStatus != nil { + in, out := &in.JobStatus, &out.JobStatus + *out = new(string) + **out = **in + } + if in.JobType != nil { + in, out := &in.JobType, &out.JobType + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.S3JobDefinition != nil { + in, out := &in.S3JobDefinition, &out.S3JobDefinition + *out = new(S3JobDefinitionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SamplingPercentage != nil { + in, out := &in.SamplingPercentage, &out.SamplingPercentage + *out = new(float64) + **out = **in + } + if in.ScheduleFrequency != nil { + in, out := &in.ScheduleFrequency, &out.ScheduleFrequency + *out = new(ScheduleFrequencyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassificationJobInitParameters. +func (in *ClassificationJobInitParameters) DeepCopy() *ClassificationJobInitParameters { + if in == nil { + return nil + } + out := new(ClassificationJobInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClassificationJobList) DeepCopyInto(out *ClassificationJobList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClassificationJob, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassificationJobList. +func (in *ClassificationJobList) DeepCopy() *ClassificationJobList { + if in == nil { + return nil + } + out := new(ClassificationJobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClassificationJobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClassificationJobObservation) DeepCopyInto(out *ClassificationJobObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.CustomDataIdentifierIds != nil { + in, out := &in.CustomDataIdentifierIds, &out.CustomDataIdentifierIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InitialRun != nil { + in, out := &in.InitialRun, &out.InitialRun + *out = new(bool) + **out = **in + } + if in.JobArn != nil { + in, out := &in.JobArn, &out.JobArn + *out = new(string) + **out = **in + } + if in.JobID != nil { + in, out := &in.JobID, &out.JobID + *out = new(string) + **out = **in + } + if in.JobStatus != nil { + in, out := &in.JobStatus, &out.JobStatus + *out = new(string) + **out = **in + } + if in.JobType != nil { + in, out := &in.JobType, &out.JobType + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.S3JobDefinition != nil { + in, out := &in.S3JobDefinition, &out.S3JobDefinition + *out = new(S3JobDefinitionObservation) + (*in).DeepCopyInto(*out) + } + if in.SamplingPercentage != nil { + in, out := &in.SamplingPercentage, &out.SamplingPercentage + *out = new(float64) + **out = **in + } + if in.ScheduleFrequency != nil { + in, out := &in.ScheduleFrequency, &out.ScheduleFrequency + *out = new(ScheduleFrequencyObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserPausedDetails != nil { + in, out := &in.UserPausedDetails, &out.UserPausedDetails + *out = make([]UserPausedDetailsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassificationJobObservation. +func (in *ClassificationJobObservation) DeepCopy() *ClassificationJobObservation { + if in == nil { + return nil + } + out := new(ClassificationJobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClassificationJobParameters) DeepCopyInto(out *ClassificationJobParameters) { + *out = *in + if in.CustomDataIdentifierIds != nil { + in, out := &in.CustomDataIdentifierIds, &out.CustomDataIdentifierIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.InitialRun != nil { + in, out := &in.InitialRun, &out.InitialRun + *out = new(bool) + **out = **in + } + if in.JobStatus != nil { + in, out := &in.JobStatus, &out.JobStatus + *out = new(string) + **out = **in + } + if in.JobType != nil { + in, out := &in.JobType, &out.JobType + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.S3JobDefinition != nil { + in, out := &in.S3JobDefinition, &out.S3JobDefinition + *out = new(S3JobDefinitionParameters) + (*in).DeepCopyInto(*out) + } + if in.SamplingPercentage != nil { + in, out := &in.SamplingPercentage, &out.SamplingPercentage + *out = new(float64) + **out = **in + } + if in.ScheduleFrequency != nil { + in, out := &in.ScheduleFrequency, &out.ScheduleFrequency + *out = new(ScheduleFrequencyParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassificationJobParameters. +func (in *ClassificationJobParameters) DeepCopy() *ClassificationJobParameters { + if in == nil { + return nil + } + out := new(ClassificationJobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClassificationJobSpec) DeepCopyInto(out *ClassificationJobSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassificationJobSpec. +func (in *ClassificationJobSpec) DeepCopy() *ClassificationJobSpec { + if in == nil { + return nil + } + out := new(ClassificationJobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClassificationJobStatus) DeepCopyInto(out *ClassificationJobStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassificationJobStatus. +func (in *ClassificationJobStatus) DeepCopy() *ClassificationJobStatus { + if in == nil { + return nil + } + out := new(ClassificationJobStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriterionInitParameters) DeepCopyInto(out *CriterionInitParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EqExactMatch != nil { + in, out := &in.EqExactMatch, &out.EqExactMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Gt != nil { + in, out := &in.Gt, &out.Gt + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lt != nil { + in, out := &in.Lt, &out.Lt + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } + if in.Neq != nil { + in, out := &in.Neq, &out.Neq + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriterionInitParameters. +func (in *CriterionInitParameters) DeepCopy() *CriterionInitParameters { + if in == nil { + return nil + } + out := new(CriterionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriterionObservation) DeepCopyInto(out *CriterionObservation) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EqExactMatch != nil { + in, out := &in.EqExactMatch, &out.EqExactMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Gt != nil { + in, out := &in.Gt, &out.Gt + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lt != nil { + in, out := &in.Lt, &out.Lt + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } + if in.Neq != nil { + in, out := &in.Neq, &out.Neq + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriterionObservation. +func (in *CriterionObservation) DeepCopy() *CriterionObservation { + if in == nil { + return nil + } + out := new(CriterionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriterionParameters) DeepCopyInto(out *CriterionParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EqExactMatch != nil { + in, out := &in.EqExactMatch, &out.EqExactMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Gt != nil { + in, out := &in.Gt, &out.Gt + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lt != nil { + in, out := &in.Lt, &out.Lt + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } + if in.Neq != nil { + in, out := &in.Neq, &out.Neq + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriterionParameters. +func (in *CriterionParameters) DeepCopy() *CriterionParameters { + if in == nil { + return nil + } + out := new(CriterionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludesAndInitParameters) DeepCopyInto(out *ExcludesAndInitParameters) { + *out = *in + if in.SimpleScopeTerm != nil { + in, out := &in.SimpleScopeTerm, &out.SimpleScopeTerm + *out = new(SimpleScopeTermInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TagScopeTerm != nil { + in, out := &in.TagScopeTerm, &out.TagScopeTerm + *out = new(TagScopeTermInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludesAndInitParameters. +func (in *ExcludesAndInitParameters) DeepCopy() *ExcludesAndInitParameters { + if in == nil { + return nil + } + out := new(ExcludesAndInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludesAndObservation) DeepCopyInto(out *ExcludesAndObservation) { + *out = *in + if in.SimpleScopeTerm != nil { + in, out := &in.SimpleScopeTerm, &out.SimpleScopeTerm + *out = new(SimpleScopeTermObservation) + (*in).DeepCopyInto(*out) + } + if in.TagScopeTerm != nil { + in, out := &in.TagScopeTerm, &out.TagScopeTerm + *out = new(TagScopeTermObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludesAndObservation. +func (in *ExcludesAndObservation) DeepCopy() *ExcludesAndObservation { + if in == nil { + return nil + } + out := new(ExcludesAndObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludesAndParameters) DeepCopyInto(out *ExcludesAndParameters) { + *out = *in + if in.SimpleScopeTerm != nil { + in, out := &in.SimpleScopeTerm, &out.SimpleScopeTerm + *out = new(SimpleScopeTermParameters) + (*in).DeepCopyInto(*out) + } + if in.TagScopeTerm != nil { + in, out := &in.TagScopeTerm, &out.TagScopeTerm + *out = new(TagScopeTermParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludesAndParameters. +func (in *ExcludesAndParameters) DeepCopy() *ExcludesAndParameters { + if in == nil { + return nil + } + out := new(ExcludesAndParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludesInitParameters) DeepCopyInto(out *ExcludesInitParameters) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = make([]AndInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludesInitParameters. +func (in *ExcludesInitParameters) DeepCopy() *ExcludesInitParameters { + if in == nil { + return nil + } + out := new(ExcludesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludesObservation) DeepCopyInto(out *ExcludesObservation) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = make([]AndObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludesObservation. +func (in *ExcludesObservation) DeepCopy() *ExcludesObservation { + if in == nil { + return nil + } + out := new(ExcludesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludesParameters) DeepCopyInto(out *ExcludesParameters) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = make([]AndParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludesParameters. +func (in *ExcludesParameters) DeepCopy() *ExcludesParameters { + if in == nil { + return nil + } + out := new(ExcludesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingCriteriaInitParameters) DeepCopyInto(out *FindingCriteriaInitParameters) { + *out = *in + if in.Criterion != nil { + in, out := &in.Criterion, &out.Criterion + *out = make([]CriterionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingCriteriaInitParameters. +func (in *FindingCriteriaInitParameters) DeepCopy() *FindingCriteriaInitParameters { + if in == nil { + return nil + } + out := new(FindingCriteriaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingCriteriaObservation) DeepCopyInto(out *FindingCriteriaObservation) { + *out = *in + if in.Criterion != nil { + in, out := &in.Criterion, &out.Criterion + *out = make([]CriterionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingCriteriaObservation. +func (in *FindingCriteriaObservation) DeepCopy() *FindingCriteriaObservation { + if in == nil { + return nil + } + out := new(FindingCriteriaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingCriteriaParameters) DeepCopyInto(out *FindingCriteriaParameters) { + *out = *in + if in.Criterion != nil { + in, out := &in.Criterion, &out.Criterion + *out = make([]CriterionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingCriteriaParameters. +func (in *FindingCriteriaParameters) DeepCopy() *FindingCriteriaParameters { + if in == nil { + return nil + } + out := new(FindingCriteriaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingsFilter) DeepCopyInto(out *FindingsFilter) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingsFilter. +func (in *FindingsFilter) DeepCopy() *FindingsFilter { + if in == nil { + return nil + } + out := new(FindingsFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FindingsFilter) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingsFilterInitParameters) DeepCopyInto(out *FindingsFilterInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FindingCriteria != nil { + in, out := &in.FindingCriteria, &out.FindingCriteria + *out = new(FindingCriteriaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Position != nil { + in, out := &in.Position, &out.Position + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingsFilterInitParameters. +func (in *FindingsFilterInitParameters) DeepCopy() *FindingsFilterInitParameters { + if in == nil { + return nil + } + out := new(FindingsFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingsFilterList) DeepCopyInto(out *FindingsFilterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FindingsFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingsFilterList. +func (in *FindingsFilterList) DeepCopy() *FindingsFilterList { + if in == nil { + return nil + } + out := new(FindingsFilterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FindingsFilterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingsFilterObservation) DeepCopyInto(out *FindingsFilterObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FindingCriteria != nil { + in, out := &in.FindingCriteria, &out.FindingCriteria + *out = new(FindingCriteriaObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Position != nil { + in, out := &in.Position, &out.Position + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingsFilterObservation. +func (in *FindingsFilterObservation) DeepCopy() *FindingsFilterObservation { + if in == nil { + return nil + } + out := new(FindingsFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingsFilterParameters) DeepCopyInto(out *FindingsFilterParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FindingCriteria != nil { + in, out := &in.FindingCriteria, &out.FindingCriteria + *out = new(FindingCriteriaParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Position != nil { + in, out := &in.Position, &out.Position + *out = new(float64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingsFilterParameters. +func (in *FindingsFilterParameters) DeepCopy() *FindingsFilterParameters { + if in == nil { + return nil + } + out := new(FindingsFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingsFilterSpec) DeepCopyInto(out *FindingsFilterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingsFilterSpec. +func (in *FindingsFilterSpec) DeepCopy() *FindingsFilterSpec { + if in == nil { + return nil + } + out := new(FindingsFilterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingsFilterStatus) DeepCopyInto(out *FindingsFilterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingsFilterStatus. +func (in *FindingsFilterStatus) DeepCopy() *FindingsFilterStatus { + if in == nil { + return nil + } + out := new(FindingsFilterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludesAndInitParameters) DeepCopyInto(out *IncludesAndInitParameters) { + *out = *in + if in.SimpleCriterion != nil { + in, out := &in.SimpleCriterion, &out.SimpleCriterion + *out = new(AndSimpleCriterionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TagCriterion != nil { + in, out := &in.TagCriterion, &out.TagCriterion + *out = new(AndTagCriterionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludesAndInitParameters. +func (in *IncludesAndInitParameters) DeepCopy() *IncludesAndInitParameters { + if in == nil { + return nil + } + out := new(IncludesAndInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludesAndObservation) DeepCopyInto(out *IncludesAndObservation) { + *out = *in + if in.SimpleCriterion != nil { + in, out := &in.SimpleCriterion, &out.SimpleCriterion + *out = new(AndSimpleCriterionObservation) + (*in).DeepCopyInto(*out) + } + if in.TagCriterion != nil { + in, out := &in.TagCriterion, &out.TagCriterion + *out = new(AndTagCriterionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludesAndObservation. +func (in *IncludesAndObservation) DeepCopy() *IncludesAndObservation { + if in == nil { + return nil + } + out := new(IncludesAndObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludesAndParameters) DeepCopyInto(out *IncludesAndParameters) { + *out = *in + if in.SimpleCriterion != nil { + in, out := &in.SimpleCriterion, &out.SimpleCriterion + *out = new(AndSimpleCriterionParameters) + (*in).DeepCopyInto(*out) + } + if in.TagCriterion != nil { + in, out := &in.TagCriterion, &out.TagCriterion + *out = new(AndTagCriterionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludesAndParameters. +func (in *IncludesAndParameters) DeepCopy() *IncludesAndParameters { + if in == nil { + return nil + } + out := new(IncludesAndParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludesInitParameters) DeepCopyInto(out *IncludesInitParameters) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = make([]IncludesAndInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludesInitParameters. +func (in *IncludesInitParameters) DeepCopy() *IncludesInitParameters { + if in == nil { + return nil + } + out := new(IncludesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludesObservation) DeepCopyInto(out *IncludesObservation) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = make([]IncludesAndObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludesObservation. +func (in *IncludesObservation) DeepCopy() *IncludesObservation { + if in == nil { + return nil + } + out := new(IncludesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludesParameters) DeepCopyInto(out *IncludesParameters) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = make([]IncludesAndParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludesParameters. +func (in *IncludesParameters) DeepCopy() *IncludesParameters { + if in == nil { + return nil + } + out := new(IncludesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3JobDefinitionInitParameters) DeepCopyInto(out *S3JobDefinitionInitParameters) { + *out = *in + if in.BucketCriteria != nil { + in, out := &in.BucketCriteria, &out.BucketCriteria + *out = new(BucketCriteriaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BucketDefinitions != nil { + in, out := &in.BucketDefinitions, &out.BucketDefinitions + *out = make([]BucketDefinitionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Scoping != nil { + in, out := &in.Scoping, &out.Scoping + *out = new(ScopingInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3JobDefinitionInitParameters. +func (in *S3JobDefinitionInitParameters) DeepCopy() *S3JobDefinitionInitParameters { + if in == nil { + return nil + } + out := new(S3JobDefinitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3JobDefinitionObservation) DeepCopyInto(out *S3JobDefinitionObservation) { + *out = *in + if in.BucketCriteria != nil { + in, out := &in.BucketCriteria, &out.BucketCriteria + *out = new(BucketCriteriaObservation) + (*in).DeepCopyInto(*out) + } + if in.BucketDefinitions != nil { + in, out := &in.BucketDefinitions, &out.BucketDefinitions + *out = make([]BucketDefinitionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Scoping != nil { + in, out := &in.Scoping, &out.Scoping + *out = new(ScopingObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3JobDefinitionObservation. +func (in *S3JobDefinitionObservation) DeepCopy() *S3JobDefinitionObservation { + if in == nil { + return nil + } + out := new(S3JobDefinitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3JobDefinitionParameters) DeepCopyInto(out *S3JobDefinitionParameters) { + *out = *in + if in.BucketCriteria != nil { + in, out := &in.BucketCriteria, &out.BucketCriteria + *out = new(BucketCriteriaParameters) + (*in).DeepCopyInto(*out) + } + if in.BucketDefinitions != nil { + in, out := &in.BucketDefinitions, &out.BucketDefinitions + *out = make([]BucketDefinitionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Scoping != nil { + in, out := &in.Scoping, &out.Scoping + *out = new(ScopingParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3JobDefinitionParameters. +func (in *S3JobDefinitionParameters) DeepCopy() *S3JobDefinitionParameters { + if in == nil { + return nil + } + out := new(S3JobDefinitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleFrequencyInitParameters) DeepCopyInto(out *ScheduleFrequencyInitParameters) { + *out = *in + if in.DailySchedule != nil { + in, out := &in.DailySchedule, &out.DailySchedule + *out = new(bool) + **out = **in + } + if in.MonthlySchedule != nil { + in, out := &in.MonthlySchedule, &out.MonthlySchedule + *out = new(float64) + **out = **in + } + if in.WeeklySchedule != nil { + in, out := &in.WeeklySchedule, &out.WeeklySchedule + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleFrequencyInitParameters. +func (in *ScheduleFrequencyInitParameters) DeepCopy() *ScheduleFrequencyInitParameters { + if in == nil { + return nil + } + out := new(ScheduleFrequencyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleFrequencyObservation) DeepCopyInto(out *ScheduleFrequencyObservation) { + *out = *in + if in.DailySchedule != nil { + in, out := &in.DailySchedule, &out.DailySchedule + *out = new(bool) + **out = **in + } + if in.MonthlySchedule != nil { + in, out := &in.MonthlySchedule, &out.MonthlySchedule + *out = new(float64) + **out = **in + } + if in.WeeklySchedule != nil { + in, out := &in.WeeklySchedule, &out.WeeklySchedule + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleFrequencyObservation. +func (in *ScheduleFrequencyObservation) DeepCopy() *ScheduleFrequencyObservation { + if in == nil { + return nil + } + out := new(ScheduleFrequencyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleFrequencyParameters) DeepCopyInto(out *ScheduleFrequencyParameters) { + *out = *in + if in.DailySchedule != nil { + in, out := &in.DailySchedule, &out.DailySchedule + *out = new(bool) + **out = **in + } + if in.MonthlySchedule != nil { + in, out := &in.MonthlySchedule, &out.MonthlySchedule + *out = new(float64) + **out = **in + } + if in.WeeklySchedule != nil { + in, out := &in.WeeklySchedule, &out.WeeklySchedule + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleFrequencyParameters. +func (in *ScheduleFrequencyParameters) DeepCopy() *ScheduleFrequencyParameters { + if in == nil { + return nil + } + out := new(ScheduleFrequencyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopingExcludesInitParameters) DeepCopyInto(out *ScopingExcludesInitParameters) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = make([]ExcludesAndInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopingExcludesInitParameters. +func (in *ScopingExcludesInitParameters) DeepCopy() *ScopingExcludesInitParameters { + if in == nil { + return nil + } + out := new(ScopingExcludesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopingExcludesObservation) DeepCopyInto(out *ScopingExcludesObservation) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = make([]ExcludesAndObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopingExcludesObservation. +func (in *ScopingExcludesObservation) DeepCopy() *ScopingExcludesObservation { + if in == nil { + return nil + } + out := new(ScopingExcludesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopingExcludesParameters) DeepCopyInto(out *ScopingExcludesParameters) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = make([]ExcludesAndParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopingExcludesParameters. +func (in *ScopingExcludesParameters) DeepCopy() *ScopingExcludesParameters { + if in == nil { + return nil + } + out := new(ScopingExcludesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopingIncludesAndInitParameters) DeepCopyInto(out *ScopingIncludesAndInitParameters) { + *out = *in + if in.SimpleScopeTerm != nil { + in, out := &in.SimpleScopeTerm, &out.SimpleScopeTerm + *out = new(AndSimpleScopeTermInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TagScopeTerm != nil { + in, out := &in.TagScopeTerm, &out.TagScopeTerm + *out = new(AndTagScopeTermInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopingIncludesAndInitParameters. +func (in *ScopingIncludesAndInitParameters) DeepCopy() *ScopingIncludesAndInitParameters { + if in == nil { + return nil + } + out := new(ScopingIncludesAndInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopingIncludesAndObservation) DeepCopyInto(out *ScopingIncludesAndObservation) { + *out = *in + if in.SimpleScopeTerm != nil { + in, out := &in.SimpleScopeTerm, &out.SimpleScopeTerm + *out = new(AndSimpleScopeTermObservation) + (*in).DeepCopyInto(*out) + } + if in.TagScopeTerm != nil { + in, out := &in.TagScopeTerm, &out.TagScopeTerm + *out = new(AndTagScopeTermObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopingIncludesAndObservation. +func (in *ScopingIncludesAndObservation) DeepCopy() *ScopingIncludesAndObservation { + if in == nil { + return nil + } + out := new(ScopingIncludesAndObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopingIncludesAndParameters) DeepCopyInto(out *ScopingIncludesAndParameters) { + *out = *in + if in.SimpleScopeTerm != nil { + in, out := &in.SimpleScopeTerm, &out.SimpleScopeTerm + *out = new(AndSimpleScopeTermParameters) + (*in).DeepCopyInto(*out) + } + if in.TagScopeTerm != nil { + in, out := &in.TagScopeTerm, &out.TagScopeTerm + *out = new(AndTagScopeTermParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopingIncludesAndParameters. +func (in *ScopingIncludesAndParameters) DeepCopy() *ScopingIncludesAndParameters { + if in == nil { + return nil + } + out := new(ScopingIncludesAndParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopingIncludesInitParameters) DeepCopyInto(out *ScopingIncludesInitParameters) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = make([]ScopingIncludesAndInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopingIncludesInitParameters. +func (in *ScopingIncludesInitParameters) DeepCopy() *ScopingIncludesInitParameters { + if in == nil { + return nil + } + out := new(ScopingIncludesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopingIncludesObservation) DeepCopyInto(out *ScopingIncludesObservation) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = make([]ScopingIncludesAndObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopingIncludesObservation. +func (in *ScopingIncludesObservation) DeepCopy() *ScopingIncludesObservation { + if in == nil { + return nil + } + out := new(ScopingIncludesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopingIncludesParameters) DeepCopyInto(out *ScopingIncludesParameters) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = make([]ScopingIncludesAndParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopingIncludesParameters. +func (in *ScopingIncludesParameters) DeepCopy() *ScopingIncludesParameters { + if in == nil { + return nil + } + out := new(ScopingIncludesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopingInitParameters) DeepCopyInto(out *ScopingInitParameters) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = new(ScopingExcludesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = new(ScopingIncludesInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopingInitParameters. +func (in *ScopingInitParameters) DeepCopy() *ScopingInitParameters { + if in == nil { + return nil + } + out := new(ScopingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopingObservation) DeepCopyInto(out *ScopingObservation) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = new(ScopingExcludesObservation) + (*in).DeepCopyInto(*out) + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = new(ScopingIncludesObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopingObservation. +func (in *ScopingObservation) DeepCopy() *ScopingObservation { + if in == nil { + return nil + } + out := new(ScopingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopingParameters) DeepCopyInto(out *ScopingParameters) { + *out = *in + if in.Excludes != nil { + in, out := &in.Excludes, &out.Excludes + *out = new(ScopingExcludesParameters) + (*in).DeepCopyInto(*out) + } + if in.Includes != nil { + in, out := &in.Includes, &out.Includes + *out = new(ScopingIncludesParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopingParameters. +func (in *ScopingParameters) DeepCopy() *ScopingParameters { + if in == nil { + return nil + } + out := new(ScopingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SimpleCriterionInitParameters) DeepCopyInto(out *SimpleCriterionInitParameters) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleCriterionInitParameters. +func (in *SimpleCriterionInitParameters) DeepCopy() *SimpleCriterionInitParameters { + if in == nil { + return nil + } + out := new(SimpleCriterionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SimpleCriterionObservation) DeepCopyInto(out *SimpleCriterionObservation) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleCriterionObservation. +func (in *SimpleCriterionObservation) DeepCopy() *SimpleCriterionObservation { + if in == nil { + return nil + } + out := new(SimpleCriterionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SimpleCriterionParameters) DeepCopyInto(out *SimpleCriterionParameters) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleCriterionParameters. +func (in *SimpleCriterionParameters) DeepCopy() *SimpleCriterionParameters { + if in == nil { + return nil + } + out := new(SimpleCriterionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SimpleScopeTermInitParameters) DeepCopyInto(out *SimpleScopeTermInitParameters) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleScopeTermInitParameters. +func (in *SimpleScopeTermInitParameters) DeepCopy() *SimpleScopeTermInitParameters { + if in == nil { + return nil + } + out := new(SimpleScopeTermInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SimpleScopeTermObservation) DeepCopyInto(out *SimpleScopeTermObservation) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleScopeTermObservation. +func (in *SimpleScopeTermObservation) DeepCopy() *SimpleScopeTermObservation { + if in == nil { + return nil + } + out := new(SimpleScopeTermObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SimpleScopeTermParameters) DeepCopyInto(out *SimpleScopeTermParameters) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleScopeTermParameters. +func (in *SimpleScopeTermParameters) DeepCopy() *SimpleScopeTermParameters { + if in == nil { + return nil + } + out := new(SimpleScopeTermParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagCriterionInitParameters) DeepCopyInto(out *TagCriterionInitParameters) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.TagValues != nil { + in, out := &in.TagValues, &out.TagValues + *out = make([]TagValuesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagCriterionInitParameters. +func (in *TagCriterionInitParameters) DeepCopy() *TagCriterionInitParameters { + if in == nil { + return nil + } + out := new(TagCriterionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagCriterionObservation) DeepCopyInto(out *TagCriterionObservation) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.TagValues != nil { + in, out := &in.TagValues, &out.TagValues + *out = make([]TagValuesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagCriterionObservation. +func (in *TagCriterionObservation) DeepCopy() *TagCriterionObservation { + if in == nil { + return nil + } + out := new(TagCriterionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagCriterionParameters) DeepCopyInto(out *TagCriterionParameters) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.TagValues != nil { + in, out := &in.TagValues, &out.TagValues + *out = make([]TagValuesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagCriterionParameters. +func (in *TagCriterionParameters) DeepCopy() *TagCriterionParameters { + if in == nil { + return nil + } + out := new(TagCriterionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagCriterionTagValuesInitParameters) DeepCopyInto(out *TagCriterionTagValuesInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagCriterionTagValuesInitParameters. +func (in *TagCriterionTagValuesInitParameters) DeepCopy() *TagCriterionTagValuesInitParameters { + if in == nil { + return nil + } + out := new(TagCriterionTagValuesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagCriterionTagValuesObservation) DeepCopyInto(out *TagCriterionTagValuesObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagCriterionTagValuesObservation. +func (in *TagCriterionTagValuesObservation) DeepCopy() *TagCriterionTagValuesObservation { + if in == nil { + return nil + } + out := new(TagCriterionTagValuesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagCriterionTagValuesParameters) DeepCopyInto(out *TagCriterionTagValuesParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagCriterionTagValuesParameters. +func (in *TagCriterionTagValuesParameters) DeepCopy() *TagCriterionTagValuesParameters { + if in == nil { + return nil + } + out := new(TagCriterionTagValuesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagScopeTermInitParameters) DeepCopyInto(out *TagScopeTermInitParameters) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.TagValues != nil { + in, out := &in.TagValues, &out.TagValues + *out = make([]TagScopeTermTagValuesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagScopeTermInitParameters. +func (in *TagScopeTermInitParameters) DeepCopy() *TagScopeTermInitParameters { + if in == nil { + return nil + } + out := new(TagScopeTermInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagScopeTermObservation) DeepCopyInto(out *TagScopeTermObservation) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.TagValues != nil { + in, out := &in.TagValues, &out.TagValues + *out = make([]TagScopeTermTagValuesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagScopeTermObservation. +func (in *TagScopeTermObservation) DeepCopy() *TagScopeTermObservation { + if in == nil { + return nil + } + out := new(TagScopeTermObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagScopeTermParameters) DeepCopyInto(out *TagScopeTermParameters) { + *out = *in + if in.Comparator != nil { + in, out := &in.Comparator, &out.Comparator + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.TagValues != nil { + in, out := &in.TagValues, &out.TagValues + *out = make([]TagScopeTermTagValuesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagScopeTermParameters. +func (in *TagScopeTermParameters) DeepCopy() *TagScopeTermParameters { + if in == nil { + return nil + } + out := new(TagScopeTermParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagScopeTermTagValuesInitParameters) DeepCopyInto(out *TagScopeTermTagValuesInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagScopeTermTagValuesInitParameters. +func (in *TagScopeTermTagValuesInitParameters) DeepCopy() *TagScopeTermTagValuesInitParameters { + if in == nil { + return nil + } + out := new(TagScopeTermTagValuesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagScopeTermTagValuesObservation) DeepCopyInto(out *TagScopeTermTagValuesObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagScopeTermTagValuesObservation. +func (in *TagScopeTermTagValuesObservation) DeepCopy() *TagScopeTermTagValuesObservation { + if in == nil { + return nil + } + out := new(TagScopeTermTagValuesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagScopeTermTagValuesParameters) DeepCopyInto(out *TagScopeTermTagValuesParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagScopeTermTagValuesParameters. +func (in *TagScopeTermTagValuesParameters) DeepCopy() *TagScopeTermTagValuesParameters { + if in == nil { + return nil + } + out := new(TagScopeTermTagValuesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagValuesInitParameters) DeepCopyInto(out *TagValuesInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagValuesInitParameters. +func (in *TagValuesInitParameters) DeepCopy() *TagValuesInitParameters { + if in == nil { + return nil + } + out := new(TagValuesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagValuesObservation) DeepCopyInto(out *TagValuesObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagValuesObservation. +func (in *TagValuesObservation) DeepCopy() *TagValuesObservation { + if in == nil { + return nil + } + out := new(TagValuesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagValuesParameters) DeepCopyInto(out *TagValuesParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagValuesParameters. +func (in *TagValuesParameters) DeepCopy() *TagValuesParameters { + if in == nil { + return nil + } + out := new(TagValuesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPausedDetailsInitParameters) DeepCopyInto(out *UserPausedDetailsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPausedDetailsInitParameters. +func (in *UserPausedDetailsInitParameters) DeepCopy() *UserPausedDetailsInitParameters { + if in == nil { + return nil + } + out := new(UserPausedDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPausedDetailsObservation) DeepCopyInto(out *UserPausedDetailsObservation) { + *out = *in + if in.JobExpiresAt != nil { + in, out := &in.JobExpiresAt, &out.JobExpiresAt + *out = new(string) + **out = **in + } + if in.JobImminentExpirationHealthEventArn != nil { + in, out := &in.JobImminentExpirationHealthEventArn, &out.JobImminentExpirationHealthEventArn + *out = new(string) + **out = **in + } + if in.JobPausedAt != nil { + in, out := &in.JobPausedAt, &out.JobPausedAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPausedDetailsObservation. +func (in *UserPausedDetailsObservation) DeepCopy() *UserPausedDetailsObservation { + if in == nil { + return nil + } + out := new(UserPausedDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPausedDetailsParameters) DeepCopyInto(out *UserPausedDetailsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPausedDetailsParameters. +func (in *UserPausedDetailsParameters) DeepCopy() *UserPausedDetailsParameters { + if in == nil { + return nil + } + out := new(UserPausedDetailsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/macie2/v1beta2/zz_generated.managed.go b/apis/macie2/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..c6f762d828 --- /dev/null +++ b/apis/macie2/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ClassificationJob. +func (mg *ClassificationJob) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ClassificationJob. +func (mg *ClassificationJob) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ClassificationJob. +func (mg *ClassificationJob) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ClassificationJob. +func (mg *ClassificationJob) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ClassificationJob. +func (mg *ClassificationJob) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ClassificationJob. +func (mg *ClassificationJob) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ClassificationJob. +func (mg *ClassificationJob) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ClassificationJob. +func (mg *ClassificationJob) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ClassificationJob. +func (mg *ClassificationJob) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ClassificationJob. +func (mg *ClassificationJob) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ClassificationJob. +func (mg *ClassificationJob) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ClassificationJob. +func (mg *ClassificationJob) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FindingsFilter. +func (mg *FindingsFilter) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FindingsFilter. +func (mg *FindingsFilter) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FindingsFilter. +func (mg *FindingsFilter) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FindingsFilter. +func (mg *FindingsFilter) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FindingsFilter. +func (mg *FindingsFilter) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FindingsFilter. +func (mg *FindingsFilter) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FindingsFilter. +func (mg *FindingsFilter) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FindingsFilter. +func (mg *FindingsFilter) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FindingsFilter. +func (mg *FindingsFilter) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FindingsFilter. +func (mg *FindingsFilter) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FindingsFilter. +func (mg *FindingsFilter) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FindingsFilter. +func (mg *FindingsFilter) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/macie2/v1beta2/zz_generated.managedlist.go b/apis/macie2/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..f1ebcbb665 --- /dev/null +++ b/apis/macie2/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ClassificationJobList. +func (l *ClassificationJobList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FindingsFilterList. +func (l *FindingsFilterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/macie2/v1beta2/zz_groupversion_info.go b/apis/macie2/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..6293bf8cfd --- /dev/null +++ b/apis/macie2/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=macie2.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "macie2.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/mediaconvert/v1beta1/zz_generated.conversion_spokes.go b/apis/mediaconvert/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..11dd8b4976 --- /dev/null +++ b/apis/mediaconvert/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Queue to the hub type. +func (tr *Queue) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Queue type. +func (tr *Queue) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/mediaconvert/v1beta2/zz_generated.conversion_hubs.go b/apis/mediaconvert/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..9f8e93f55c --- /dev/null +++ b/apis/mediaconvert/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Queue) Hub() {} diff --git a/apis/mediaconvert/v1beta2/zz_generated.deepcopy.go b/apis/mediaconvert/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..be3a79d6b2 --- /dev/null +++ b/apis/mediaconvert/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,381 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Queue) DeepCopyInto(out *Queue) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Queue. +func (in *Queue) DeepCopy() *Queue { + if in == nil { + return nil + } + out := new(Queue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Queue) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueInitParameters) DeepCopyInto(out *QueueInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.PricingPlan != nil { + in, out := &in.PricingPlan, &out.PricingPlan + *out = new(string) + **out = **in + } + if in.ReservationPlanSettings != nil { + in, out := &in.ReservationPlanSettings, &out.ReservationPlanSettings + *out = new(ReservationPlanSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueInitParameters. +func (in *QueueInitParameters) DeepCopy() *QueueInitParameters { + if in == nil { + return nil + } + out := new(QueueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueList) DeepCopyInto(out *QueueList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Queue, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueList. +func (in *QueueList) DeepCopy() *QueueList { + if in == nil { + return nil + } + out := new(QueueList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *QueueList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueObservation) DeepCopyInto(out *QueueObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PricingPlan != nil { + in, out := &in.PricingPlan, &out.PricingPlan + *out = new(string) + **out = **in + } + if in.ReservationPlanSettings != nil { + in, out := &in.ReservationPlanSettings, &out.ReservationPlanSettings + *out = new(ReservationPlanSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueObservation. +func (in *QueueObservation) DeepCopy() *QueueObservation { + if in == nil { + return nil + } + out := new(QueueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueParameters) DeepCopyInto(out *QueueParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.PricingPlan != nil { + in, out := &in.PricingPlan, &out.PricingPlan + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ReservationPlanSettings != nil { + in, out := &in.ReservationPlanSettings, &out.ReservationPlanSettings + *out = new(ReservationPlanSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueParameters. +func (in *QueueParameters) DeepCopy() *QueueParameters { + if in == nil { + return nil + } + out := new(QueueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueSpec) DeepCopyInto(out *QueueSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueSpec. +func (in *QueueSpec) DeepCopy() *QueueSpec { + if in == nil { + return nil + } + out := new(QueueSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueStatus) DeepCopyInto(out *QueueStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueStatus. +func (in *QueueStatus) DeepCopy() *QueueStatus { + if in == nil { + return nil + } + out := new(QueueStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReservationPlanSettingsInitParameters) DeepCopyInto(out *ReservationPlanSettingsInitParameters) { + *out = *in + if in.Commitment != nil { + in, out := &in.Commitment, &out.Commitment + *out = new(string) + **out = **in + } + if in.RenewalType != nil { + in, out := &in.RenewalType, &out.RenewalType + *out = new(string) + **out = **in + } + if in.ReservedSlots != nil { + in, out := &in.ReservedSlots, &out.ReservedSlots + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReservationPlanSettingsInitParameters. +func (in *ReservationPlanSettingsInitParameters) DeepCopy() *ReservationPlanSettingsInitParameters { + if in == nil { + return nil + } + out := new(ReservationPlanSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReservationPlanSettingsObservation) DeepCopyInto(out *ReservationPlanSettingsObservation) { + *out = *in + if in.Commitment != nil { + in, out := &in.Commitment, &out.Commitment + *out = new(string) + **out = **in + } + if in.RenewalType != nil { + in, out := &in.RenewalType, &out.RenewalType + *out = new(string) + **out = **in + } + if in.ReservedSlots != nil { + in, out := &in.ReservedSlots, &out.ReservedSlots + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReservationPlanSettingsObservation. +func (in *ReservationPlanSettingsObservation) DeepCopy() *ReservationPlanSettingsObservation { + if in == nil { + return nil + } + out := new(ReservationPlanSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReservationPlanSettingsParameters) DeepCopyInto(out *ReservationPlanSettingsParameters) { + *out = *in + if in.Commitment != nil { + in, out := &in.Commitment, &out.Commitment + *out = new(string) + **out = **in + } + if in.RenewalType != nil { + in, out := &in.RenewalType, &out.RenewalType + *out = new(string) + **out = **in + } + if in.ReservedSlots != nil { + in, out := &in.ReservedSlots, &out.ReservedSlots + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReservationPlanSettingsParameters. +func (in *ReservationPlanSettingsParameters) DeepCopy() *ReservationPlanSettingsParameters { + if in == nil { + return nil + } + out := new(ReservationPlanSettingsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/mediaconvert/v1beta2/zz_generated.managed.go b/apis/mediaconvert/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..6272fa79fd --- /dev/null +++ b/apis/mediaconvert/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Queue. +func (mg *Queue) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Queue. +func (mg *Queue) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Queue. +func (mg *Queue) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Queue. +func (mg *Queue) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Queue. +func (mg *Queue) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Queue. +func (mg *Queue) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Queue. +func (mg *Queue) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Queue. +func (mg *Queue) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Queue. +func (mg *Queue) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Queue. +func (mg *Queue) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Queue. +func (mg *Queue) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Queue. +func (mg *Queue) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/mediaconvert/v1beta2/zz_generated.managedlist.go b/apis/mediaconvert/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..2475abe02a --- /dev/null +++ b/apis/mediaconvert/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this QueueList. +func (l *QueueList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/mediaconvert/v1beta2/zz_groupversion_info.go b/apis/mediaconvert/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..4d1711e8aa --- /dev/null +++ b/apis/mediaconvert/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=mediaconvert.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "mediaconvert.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/mediaconvert/v1beta2/zz_queue_terraformed.go b/apis/mediaconvert/v1beta2/zz_queue_terraformed.go new file mode 100755 index 0000000000..9bfd71a1e5 --- /dev/null +++ b/apis/mediaconvert/v1beta2/zz_queue_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Queue +func (mg *Queue) GetTerraformResourceType() string { + return "aws_media_convert_queue" +} + +// GetConnectionDetailsMapping for this Queue +func (tr *Queue) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Queue +func (tr *Queue) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Queue +func (tr *Queue) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Queue +func (tr *Queue) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Queue +func (tr *Queue) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Queue +func (tr *Queue) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Queue +func (tr *Queue) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Queue +func (tr *Queue) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Queue using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Queue) LateInitialize(attrs []byte) (bool, error) { + params := &QueueParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Queue) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mediaconvert/v1beta2/zz_queue_types.go b/apis/mediaconvert/v1beta2/zz_queue_types.go new file mode 100755 index 0000000000..e44811c7b7 --- /dev/null +++ b/apis/mediaconvert/v1beta2/zz_queue_types.go @@ -0,0 +1,190 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type QueueInitParameters struct { + + // A description of the queue + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies whether the pricing plan for the queue is on-demand or reserved. Valid values are ON_DEMAND or RESERVED. Default to ON_DEMAND. + PricingPlan *string `json:"pricingPlan,omitempty" tf:"pricing_plan,omitempty"` + + // A detail pricing plan of the reserved queue. See below. + ReservationPlanSettings *ReservationPlanSettingsInitParameters `json:"reservationPlanSettings,omitempty" tf:"reservation_plan_settings,omitempty"` + + // A status of the queue. Valid values are ACTIVE or RESERVED. Default to PAUSED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type QueueObservation struct { + + // The Arn of the queue + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A description of the queue + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The same as name + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies whether the pricing plan for the queue is on-demand or reserved. Valid values are ON_DEMAND or RESERVED. Default to ON_DEMAND. + PricingPlan *string `json:"pricingPlan,omitempty" tf:"pricing_plan,omitempty"` + + // A detail pricing plan of the reserved queue. See below. + ReservationPlanSettings *ReservationPlanSettingsObservation `json:"reservationPlanSettings,omitempty" tf:"reservation_plan_settings,omitempty"` + + // A status of the queue. Valid values are ACTIVE or RESERVED. Default to PAUSED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type QueueParameters struct { + + // A description of the queue + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies whether the pricing plan for the queue is on-demand or reserved. Valid values are ON_DEMAND or RESERVED. Default to ON_DEMAND. + // +kubebuilder:validation:Optional + PricingPlan *string `json:"pricingPlan,omitempty" tf:"pricing_plan,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // A detail pricing plan of the reserved queue. See below. + // +kubebuilder:validation:Optional + ReservationPlanSettings *ReservationPlanSettingsParameters `json:"reservationPlanSettings,omitempty" tf:"reservation_plan_settings,omitempty"` + + // A status of the queue. Valid values are ACTIVE or RESERVED. Default to PAUSED. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ReservationPlanSettingsInitParameters struct { + + // The length of the term of your reserved queue pricing plan commitment. Valid value is ONE_YEAR. + Commitment *string `json:"commitment,omitempty" tf:"commitment,omitempty"` + + // Specifies whether the term of your reserved queue pricing plan. Valid values are AUTO_RENEW or EXPIRE. + RenewalType *string `json:"renewalType,omitempty" tf:"renewal_type,omitempty"` + + // Specifies the number of reserved transcode slots (RTS) for queue. + ReservedSlots *float64 `json:"reservedSlots,omitempty" tf:"reserved_slots,omitempty"` +} + +type ReservationPlanSettingsObservation struct { + + // The length of the term of your reserved queue pricing plan commitment. Valid value is ONE_YEAR. + Commitment *string `json:"commitment,omitempty" tf:"commitment,omitempty"` + + // Specifies whether the term of your reserved queue pricing plan. Valid values are AUTO_RENEW or EXPIRE. + RenewalType *string `json:"renewalType,omitempty" tf:"renewal_type,omitempty"` + + // Specifies the number of reserved transcode slots (RTS) for queue. + ReservedSlots *float64 `json:"reservedSlots,omitempty" tf:"reserved_slots,omitempty"` +} + +type ReservationPlanSettingsParameters struct { + + // The length of the term of your reserved queue pricing plan commitment. Valid value is ONE_YEAR. + // +kubebuilder:validation:Optional + Commitment *string `json:"commitment" tf:"commitment,omitempty"` + + // Specifies whether the term of your reserved queue pricing plan. Valid values are AUTO_RENEW or EXPIRE. + // +kubebuilder:validation:Optional + RenewalType *string `json:"renewalType" tf:"renewal_type,omitempty"` + + // Specifies the number of reserved transcode slots (RTS) for queue. + // +kubebuilder:validation:Optional + ReservedSlots *float64 `json:"reservedSlots" tf:"reserved_slots,omitempty"` +} + +// QueueSpec defines the desired state of Queue +type QueueSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider QueueParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider QueueInitParameters `json:"initProvider,omitempty"` +} + +// QueueStatus defines the observed state of Queue. +type QueueStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider QueueObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Queue is the Schema for the Queues API. Provides an AWS Elemental MediaConvert Queue. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Queue struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec QueueSpec `json:"spec"` + Status QueueStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// QueueList contains a list of Queues +type QueueList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Queue `json:"items"` +} + +// Repository type metadata. +var ( + Queue_Kind = "Queue" + Queue_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Queue_Kind}.String() + Queue_KindAPIVersion = Queue_Kind + "." + CRDGroupVersion.String() + Queue_GroupVersionKind = CRDGroupVersion.WithKind(Queue_Kind) +) + +func init() { + SchemeBuilder.Register(&Queue{}, &QueueList{}) +} diff --git a/apis/medialive/v1beta1/zz_generated.conversion_hubs.go b/apis/medialive/v1beta1/zz_generated.conversion_hubs.go index 12b4fecdba..4c9e0c5c41 100755 --- a/apis/medialive/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/medialive/v1beta1/zz_generated.conversion_hubs.go @@ -6,14 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Channel) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Input) Hub() {} - // Hub marks this type as a conversion hub. func (tr *InputSecurityGroup) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Multiplex) Hub() {} diff --git a/apis/medialive/v1beta1/zz_generated.conversion_spokes.go b/apis/medialive/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..6ab8513e10 --- /dev/null +++ b/apis/medialive/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Channel to the hub type. +func (tr *Channel) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Channel type. +func (tr *Channel) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Input to the hub type. +func (tr *Input) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Input type. +func (tr *Input) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Multiplex to the hub type. +func (tr *Multiplex) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Multiplex type. +func (tr *Multiplex) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/medialive/v1beta2/zz_channel_terraformed.go b/apis/medialive/v1beta2/zz_channel_terraformed.go new file mode 100755 index 0000000000..d21da98e28 --- /dev/null +++ b/apis/medialive/v1beta2/zz_channel_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Channel +func (mg *Channel) GetTerraformResourceType() string { + return "aws_medialive_channel" +} + +// GetConnectionDetailsMapping for this Channel +func (tr *Channel) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Channel +func (tr *Channel) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Channel +func (tr *Channel) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Channel +func (tr *Channel) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Channel +func (tr *Channel) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Channel +func (tr *Channel) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Channel +func (tr *Channel) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Channel +func (tr *Channel) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Channel using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Channel) LateInitialize(attrs []byte) (bool, error) { + params := &ChannelParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Channel) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/medialive/v1beta2/zz_channel_types.go b/apis/medialive/v1beta2/zz_channel_types.go new file mode 100755 index 0000000000..3404aeb1b4 --- /dev/null +++ b/apis/medialive/v1beta2/zz_channel_types.go @@ -0,0 +1,8129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AacSettingsInitParameters struct { + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + // Set to "broadcasterMixedAd" when input contains pre-mixed main audio + AD (narration) as a stereo pair. + InputType *string `json:"inputType,omitempty" tf:"input_type,omitempty"` + + // AAC profile. + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // The rate control mode. + RateControlMode *string `json:"rateControlMode,omitempty" tf:"rate_control_mode,omitempty"` + + // Sets LATM/LOAS AAC output for raw containers. + RawFormat *string `json:"rawFormat,omitempty" tf:"raw_format,omitempty"` + + // Sample rate in Hz. + SampleRate *float64 `json:"sampleRate,omitempty" tf:"sample_rate,omitempty"` + + // Use MPEG-2 AAC audio instead of MPEG-4 AAC audio for raw or MPEG-2 Transport Stream containers. + Spec *string `json:"spec,omitempty" tf:"spec,omitempty"` + + // VBR Quality Level - Only used if rateControlMode is VBR. + VbrQuality *string `json:"vbrQuality,omitempty" tf:"vbr_quality,omitempty"` +} + +type AacSettingsObservation struct { + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + // Set to "broadcasterMixedAd" when input contains pre-mixed main audio + AD (narration) as a stereo pair. + InputType *string `json:"inputType,omitempty" tf:"input_type,omitempty"` + + // AAC profile. + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // The rate control mode. + RateControlMode *string `json:"rateControlMode,omitempty" tf:"rate_control_mode,omitempty"` + + // Sets LATM/LOAS AAC output for raw containers. + RawFormat *string `json:"rawFormat,omitempty" tf:"raw_format,omitempty"` + + // Sample rate in Hz. + SampleRate *float64 `json:"sampleRate,omitempty" tf:"sample_rate,omitempty"` + + // Use MPEG-2 AAC audio instead of MPEG-4 AAC audio for raw or MPEG-2 Transport Stream containers. + Spec *string `json:"spec,omitempty" tf:"spec,omitempty"` + + // VBR Quality Level - Only used if rateControlMode is VBR. + VbrQuality *string `json:"vbrQuality,omitempty" tf:"vbr_quality,omitempty"` +} + +type AacSettingsParameters struct { + + // Average bitrate in bits/second. + // +kubebuilder:validation:Optional + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + // +kubebuilder:validation:Optional + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + // Set to "broadcasterMixedAd" when input contains pre-mixed main audio + AD (narration) as a stereo pair. + // +kubebuilder:validation:Optional + InputType *string `json:"inputType,omitempty" tf:"input_type,omitempty"` + + // AAC profile. + // +kubebuilder:validation:Optional + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // The rate control mode. + // +kubebuilder:validation:Optional + RateControlMode *string `json:"rateControlMode,omitempty" tf:"rate_control_mode,omitempty"` + + // Sets LATM/LOAS AAC output for raw containers. + // +kubebuilder:validation:Optional + RawFormat *string `json:"rawFormat,omitempty" tf:"raw_format,omitempty"` + + // Sample rate in Hz. + // +kubebuilder:validation:Optional + SampleRate *float64 `json:"sampleRate,omitempty" tf:"sample_rate,omitempty"` + + // Use MPEG-2 AAC audio instead of MPEG-4 AAC audio for raw or MPEG-2 Transport Stream containers. + // +kubebuilder:validation:Optional + Spec *string `json:"spec,omitempty" tf:"spec,omitempty"` + + // VBR Quality Level - Only used if rateControlMode is VBR. + // +kubebuilder:validation:Optional + VbrQuality *string `json:"vbrQuality,omitempty" tf:"vbr_quality,omitempty"` +} + +type Ac3SettingsInitParameters struct { + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Specifies the bitstream mode (bsmod) for the emitted AC-3 stream. + BitstreamMode *string `json:"bitstreamMode,omitempty" tf:"bitstream_mode,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + // Sets the dialnorm of the output. + Dialnorm *float64 `json:"dialnorm,omitempty" tf:"dialnorm,omitempty"` + + // If set to filmStandard, adds dynamic range compression signaling to the output bitstream as defined in the Dolby Digital specification. + DrcProfile *string `json:"drcProfile,omitempty" tf:"drc_profile,omitempty"` + + // When set to enabled, applies a 120Hz lowpass filter to the LFE channel prior to encoding. + LfeFilter *string `json:"lfeFilter,omitempty" tf:"lfe_filter,omitempty"` + + // Metadata control. + MetadataControl *string `json:"metadataControl,omitempty" tf:"metadata_control,omitempty"` +} + +type Ac3SettingsObservation struct { + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Specifies the bitstream mode (bsmod) for the emitted AC-3 stream. + BitstreamMode *string `json:"bitstreamMode,omitempty" tf:"bitstream_mode,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + // Sets the dialnorm of the output. + Dialnorm *float64 `json:"dialnorm,omitempty" tf:"dialnorm,omitempty"` + + // If set to filmStandard, adds dynamic range compression signaling to the output bitstream as defined in the Dolby Digital specification. + DrcProfile *string `json:"drcProfile,omitempty" tf:"drc_profile,omitempty"` + + // When set to enabled, applies a 120Hz lowpass filter to the LFE channel prior to encoding. + LfeFilter *string `json:"lfeFilter,omitempty" tf:"lfe_filter,omitempty"` + + // Metadata control. + MetadataControl *string `json:"metadataControl,omitempty" tf:"metadata_control,omitempty"` +} + +type Ac3SettingsParameters struct { + + // Average bitrate in bits/second. + // +kubebuilder:validation:Optional + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Specifies the bitstream mode (bsmod) for the emitted AC-3 stream. + // +kubebuilder:validation:Optional + BitstreamMode *string `json:"bitstreamMode,omitempty" tf:"bitstream_mode,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + // +kubebuilder:validation:Optional + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + // Sets the dialnorm of the output. + // +kubebuilder:validation:Optional + Dialnorm *float64 `json:"dialnorm,omitempty" tf:"dialnorm,omitempty"` + + // If set to filmStandard, adds dynamic range compression signaling to the output bitstream as defined in the Dolby Digital specification. + // +kubebuilder:validation:Optional + DrcProfile *string `json:"drcProfile,omitempty" tf:"drc_profile,omitempty"` + + // When set to enabled, applies a 120Hz lowpass filter to the LFE channel prior to encoding. + // +kubebuilder:validation:Optional + LfeFilter *string `json:"lfeFilter,omitempty" tf:"lfe_filter,omitempty"` + + // Metadata control. + // +kubebuilder:validation:Optional + MetadataControl *string `json:"metadataControl,omitempty" tf:"metadata_control,omitempty"` +} + +type AncillarySourceSettingsInitParameters struct { + + // Specifies the number (1 to 4) of the captions channel you want to extract from the ancillary captions. If you plan to convert the ancillary captions to another format, complete this field. If you plan to choose Embedded as the captions destination in the output (to pass through all the channels in the ancillary captions), leave this field blank because MediaLive ignores the field. + SourceAncillaryChannelNumber *float64 `json:"sourceAncillaryChannelNumber,omitempty" tf:"source_ancillary_channel_number,omitempty"` +} + +type AncillarySourceSettingsObservation struct { + + // Specifies the number (1 to 4) of the captions channel you want to extract from the ancillary captions. If you plan to convert the ancillary captions to another format, complete this field. If you plan to choose Embedded as the captions destination in the output (to pass through all the channels in the ancillary captions), leave this field blank because MediaLive ignores the field. + SourceAncillaryChannelNumber *float64 `json:"sourceAncillaryChannelNumber,omitempty" tf:"source_ancillary_channel_number,omitempty"` +} + +type AncillarySourceSettingsParameters struct { + + // Specifies the number (1 to 4) of the captions channel you want to extract from the ancillary captions. If you plan to convert the ancillary captions to another format, complete this field. If you plan to choose Embedded as the captions destination in the output (to pass through all the channels in the ancillary captions), leave this field blank because MediaLive ignores the field. + // +kubebuilder:validation:Optional + SourceAncillaryChannelNumber *float64 `json:"sourceAncillaryChannelNumber,omitempty" tf:"source_ancillary_channel_number,omitempty"` +} + +type ArchiveCdnSettingsInitParameters struct { + + // Archive S3 Settings. See Archive S3 Settings for more details. + ArchiveS3Settings *ArchiveS3SettingsInitParameters `json:"archiveS3Settings,omitempty" tf:"archive_s3_settings,omitempty"` +} + +type ArchiveCdnSettingsObservation struct { + + // Archive S3 Settings. See Archive S3 Settings for more details. + ArchiveS3Settings *ArchiveS3SettingsObservation `json:"archiveS3Settings,omitempty" tf:"archive_s3_settings,omitempty"` +} + +type ArchiveCdnSettingsParameters struct { + + // Archive S3 Settings. See Archive S3 Settings for more details. + // +kubebuilder:validation:Optional + ArchiveS3Settings *ArchiveS3SettingsParameters `json:"archiveS3Settings,omitempty" tf:"archive_s3_settings,omitempty"` +} + +type ArchiveGroupSettingsInitParameters struct { + + // Parameters that control the interactions with the CDN. See Archive CDN Settings for more details. + ArchiveCdnSettings *ArchiveCdnSettingsInitParameters `json:"archiveCdnSettings,omitempty" tf:"archive_cdn_settings,omitempty"` + + // A director and base filename where archive files should be written. See Destination for more details. + Destination *DestinationInitParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + // Number of seconds to write to archive file before closing and starting a new one. + RolloverInterval *float64 `json:"rolloverInterval,omitempty" tf:"rollover_interval,omitempty"` +} + +type ArchiveGroupSettingsObservation struct { + + // Parameters that control the interactions with the CDN. See Archive CDN Settings for more details. + ArchiveCdnSettings *ArchiveCdnSettingsObservation `json:"archiveCdnSettings,omitempty" tf:"archive_cdn_settings,omitempty"` + + // A director and base filename where archive files should be written. See Destination for more details. + Destination *DestinationObservation `json:"destination,omitempty" tf:"destination,omitempty"` + + // Number of seconds to write to archive file before closing and starting a new one. + RolloverInterval *float64 `json:"rolloverInterval,omitempty" tf:"rollover_interval,omitempty"` +} + +type ArchiveGroupSettingsParameters struct { + + // Parameters that control the interactions with the CDN. See Archive CDN Settings for more details. + // +kubebuilder:validation:Optional + ArchiveCdnSettings *ArchiveCdnSettingsParameters `json:"archiveCdnSettings,omitempty" tf:"archive_cdn_settings,omitempty"` + + // A director and base filename where archive files should be written. See Destination for more details. + // +kubebuilder:validation:Optional + Destination *DestinationParameters `json:"destination" tf:"destination,omitempty"` + + // Number of seconds to write to archive file before closing and starting a new one. + // +kubebuilder:validation:Optional + RolloverInterval *float64 `json:"rolloverInterval,omitempty" tf:"rollover_interval,omitempty"` +} + +type ArchiveOutputSettingsInitParameters struct { + + // Settings specific to the container type of the file. See Container Settings for more details. + ContainerSettings *ContainerSettingsInitParameters `json:"containerSettings,omitempty" tf:"container_settings,omitempty"` + + // Output file extension. + Extension *string `json:"extension,omitempty" tf:"extension,omitempty"` + + // String concatenated to the end of the destination filename. Required for multiple outputs of the same type. + NameModifier *string `json:"nameModifier,omitempty" tf:"name_modifier,omitempty"` +} + +type ArchiveOutputSettingsObservation struct { + + // Settings specific to the container type of the file. See Container Settings for more details. + ContainerSettings *ContainerSettingsObservation `json:"containerSettings,omitempty" tf:"container_settings,omitempty"` + + // Output file extension. + Extension *string `json:"extension,omitempty" tf:"extension,omitempty"` + + // String concatenated to the end of the destination filename. Required for multiple outputs of the same type. + NameModifier *string `json:"nameModifier,omitempty" tf:"name_modifier,omitempty"` +} + +type ArchiveOutputSettingsParameters struct { + + // Settings specific to the container type of the file. See Container Settings for more details. + // +kubebuilder:validation:Optional + ContainerSettings *ContainerSettingsParameters `json:"containerSettings,omitempty" tf:"container_settings,omitempty"` + + // Output file extension. + // +kubebuilder:validation:Optional + Extension *string `json:"extension,omitempty" tf:"extension,omitempty"` + + // String concatenated to the end of the destination filename. Required for multiple outputs of the same type. + // +kubebuilder:validation:Optional + NameModifier *string `json:"nameModifier,omitempty" tf:"name_modifier,omitempty"` +} + +type ArchiveS3SettingsInitParameters struct { + + // Specify the canned ACL to apply to each S3 request. + CannedACL *string `json:"cannedAcl,omitempty" tf:"canned_acl,omitempty"` +} + +type ArchiveS3SettingsObservation struct { + + // Specify the canned ACL to apply to each S3 request. + CannedACL *string `json:"cannedAcl,omitempty" tf:"canned_acl,omitempty"` +} + +type ArchiveS3SettingsParameters struct { + + // Specify the canned ACL to apply to each S3 request. + // +kubebuilder:validation:Optional + CannedACL *string `json:"cannedAcl,omitempty" tf:"canned_acl,omitempty"` +} + +type AribDestinationSettingsInitParameters struct { +} + +type AribDestinationSettingsObservation struct { +} + +type AribDestinationSettingsParameters struct { +} + +type AribSourceSettingsInitParameters struct { +} + +type AribSourceSettingsObservation struct { +} + +type AribSourceSettingsParameters struct { +} + +type AudioDescriptionsInitParameters struct { + + // Advanced audio normalization settings. See Audio Normalization Settings for more details. + AudioNormalizationSettings *AudioNormalizationSettingsInitParameters `json:"audioNormalizationSettings,omitempty" tf:"audio_normalization_settings,omitempty"` + + // The name of the audio selector in the input that MediaLive should monitor to detect silence. Select your most important rendition. If you didn't create an audio selector in this input, leave blank. + AudioSelectorName *string `json:"audioSelectorName,omitempty" tf:"audio_selector_name,omitempty"` + + // Applies only if audioTypeControl is useConfigured. The values for audioType are defined in ISO-IEC 13818-1. + AudioType *string `json:"audioType,omitempty" tf:"audio_type,omitempty"` + + // Determined how audio type is determined. + AudioTypeControl *string `json:"audioTypeControl,omitempty" tf:"audio_type_control,omitempty"` + + // Settings to configure one or more solutions that insert audio watermarks in the audio encode. See Audio Watermark Settings for more details. + AudioWatermarkSettings *AudioWatermarkSettingsInitParameters `json:"audioWatermarkSettings,omitempty" tf:"audio_watermark_settings,omitempty"` + + // Audio codec settings. See Audio Codec Settings for more details. + CodecSettings *CodecSettingsInitParameters `json:"codecSettings,omitempty" tf:"codec_settings,omitempty"` + + // Selects a specific three-letter language code from within an audio source. + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + LanguageCodeControl *string `json:"languageCodeControl,omitempty" tf:"language_code_control,omitempty"` + + // Name of the Channel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + RemixSettings *RemixSettingsInitParameters `json:"remixSettings,omitempty" tf:"remix_settings,omitempty"` + + // Stream name RTMP destinations (URLs of type rtmp://) + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` +} + +type AudioDescriptionsObservation struct { + + // Advanced audio normalization settings. See Audio Normalization Settings for more details. + AudioNormalizationSettings *AudioNormalizationSettingsObservation `json:"audioNormalizationSettings,omitempty" tf:"audio_normalization_settings,omitempty"` + + // The name of the audio selector in the input that MediaLive should monitor to detect silence. Select your most important rendition. If you didn't create an audio selector in this input, leave blank. + AudioSelectorName *string `json:"audioSelectorName,omitempty" tf:"audio_selector_name,omitempty"` + + // Applies only if audioTypeControl is useConfigured. The values for audioType are defined in ISO-IEC 13818-1. + AudioType *string `json:"audioType,omitempty" tf:"audio_type,omitempty"` + + // Determined how audio type is determined. + AudioTypeControl *string `json:"audioTypeControl,omitempty" tf:"audio_type_control,omitempty"` + + // Settings to configure one or more solutions that insert audio watermarks in the audio encode. See Audio Watermark Settings for more details. + AudioWatermarkSettings *AudioWatermarkSettingsObservation `json:"audioWatermarkSettings,omitempty" tf:"audio_watermark_settings,omitempty"` + + // Audio codec settings. See Audio Codec Settings for more details. + CodecSettings *CodecSettingsObservation `json:"codecSettings,omitempty" tf:"codec_settings,omitempty"` + + // Selects a specific three-letter language code from within an audio source. + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + LanguageCodeControl *string `json:"languageCodeControl,omitempty" tf:"language_code_control,omitempty"` + + // Name of the Channel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + RemixSettings *RemixSettingsObservation `json:"remixSettings,omitempty" tf:"remix_settings,omitempty"` + + // Stream name RTMP destinations (URLs of type rtmp://) + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` +} + +type AudioDescriptionsParameters struct { + + // Advanced audio normalization settings. See Audio Normalization Settings for more details. + // +kubebuilder:validation:Optional + AudioNormalizationSettings *AudioNormalizationSettingsParameters `json:"audioNormalizationSettings,omitempty" tf:"audio_normalization_settings,omitempty"` + + // The name of the audio selector in the input that MediaLive should monitor to detect silence. Select your most important rendition. If you didn't create an audio selector in this input, leave blank. + // +kubebuilder:validation:Optional + AudioSelectorName *string `json:"audioSelectorName" tf:"audio_selector_name,omitempty"` + + // Applies only if audioTypeControl is useConfigured. The values for audioType are defined in ISO-IEC 13818-1. + // +kubebuilder:validation:Optional + AudioType *string `json:"audioType,omitempty" tf:"audio_type,omitempty"` + + // Determined how audio type is determined. + // +kubebuilder:validation:Optional + AudioTypeControl *string `json:"audioTypeControl,omitempty" tf:"audio_type_control,omitempty"` + + // Settings to configure one or more solutions that insert audio watermarks in the audio encode. See Audio Watermark Settings for more details. + // +kubebuilder:validation:Optional + AudioWatermarkSettings *AudioWatermarkSettingsParameters `json:"audioWatermarkSettings,omitempty" tf:"audio_watermark_settings,omitempty"` + + // Audio codec settings. See Audio Codec Settings for more details. + // +kubebuilder:validation:Optional + CodecSettings *CodecSettingsParameters `json:"codecSettings,omitempty" tf:"codec_settings,omitempty"` + + // Selects a specific three-letter language code from within an audio source. + // +kubebuilder:validation:Optional + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + // +kubebuilder:validation:Optional + LanguageCodeControl *string `json:"languageCodeControl,omitempty" tf:"language_code_control,omitempty"` + + // Name of the Channel. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + RemixSettings *RemixSettingsParameters `json:"remixSettings,omitempty" tf:"remix_settings,omitempty"` + + // Stream name RTMP destinations (URLs of type rtmp://) + // +kubebuilder:validation:Optional + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` +} + +type AudioHlsRenditionSelectionInitParameters struct { + + // Specifies the GROUP-ID in the #EXT-X-MEDIA tag of the target HLS audio rendition. + GroupID *string `json:"groupId,omitempty" tf:"group_id,omitempty"` + + // Name of the Channel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type AudioHlsRenditionSelectionObservation struct { + + // Specifies the GROUP-ID in the #EXT-X-MEDIA tag of the target HLS audio rendition. + GroupID *string `json:"groupId,omitempty" tf:"group_id,omitempty"` + + // Name of the Channel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type AudioHlsRenditionSelectionParameters struct { + + // Specifies the GROUP-ID in the #EXT-X-MEDIA tag of the target HLS audio rendition. + // +kubebuilder:validation:Optional + GroupID *string `json:"groupId" tf:"group_id,omitempty"` + + // Name of the Channel. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type AudioLanguageSelectionInitParameters struct { + + // Selects a specific three-letter language code from within an audio source. + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + // When set to “strict”, the transport stream demux strictly identifies audio streams by their language descriptor. If a PMT update occurs such that an audio stream matching the initially selected language is no longer present then mute will be encoded until the language returns. If “loose”, then on a PMT update the demux will choose another audio stream in the program with the same stream type if it can’t find one with the same language. + LanguageSelectionPolicy *string `json:"languageSelectionPolicy,omitempty" tf:"language_selection_policy,omitempty"` +} + +type AudioLanguageSelectionObservation struct { + + // Selects a specific three-letter language code from within an audio source. + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + // When set to “strict”, the transport stream demux strictly identifies audio streams by their language descriptor. If a PMT update occurs such that an audio stream matching the initially selected language is no longer present then mute will be encoded until the language returns. If “loose”, then on a PMT update the demux will choose another audio stream in the program with the same stream type if it can’t find one with the same language. + LanguageSelectionPolicy *string `json:"languageSelectionPolicy,omitempty" tf:"language_selection_policy,omitempty"` +} + +type AudioLanguageSelectionParameters struct { + + // Selects a specific three-letter language code from within an audio source. + // +kubebuilder:validation:Optional + LanguageCode *string `json:"languageCode" tf:"language_code,omitempty"` + + // When set to “strict”, the transport stream demux strictly identifies audio streams by their language descriptor. If a PMT update occurs such that an audio stream matching the initially selected language is no longer present then mute will be encoded until the language returns. If “loose”, then on a PMT update the demux will choose another audio stream in the program with the same stream type if it can’t find one with the same language. + // +kubebuilder:validation:Optional + LanguageSelectionPolicy *string `json:"languageSelectionPolicy,omitempty" tf:"language_selection_policy,omitempty"` +} + +type AudioNormalizationSettingsInitParameters struct { + + // Audio normalization algorithm to use. itu17701 conforms to the CALM Act specification, itu17702 to the EBU R-128 specification. + Algorithm *string `json:"algorithm,omitempty" tf:"algorithm,omitempty"` + + // Algorithm control for the audio description. + AlgorithmControl *string `json:"algorithmControl,omitempty" tf:"algorithm_control,omitempty"` + + // Target LKFS (loudness) to adjust volume to. + TargetLkfs *float64 `json:"targetLkfs,omitempty" tf:"target_lkfs,omitempty"` +} + +type AudioNormalizationSettingsObservation struct { + + // Audio normalization algorithm to use. itu17701 conforms to the CALM Act specification, itu17702 to the EBU R-128 specification. + Algorithm *string `json:"algorithm,omitempty" tf:"algorithm,omitempty"` + + // Algorithm control for the audio description. + AlgorithmControl *string `json:"algorithmControl,omitempty" tf:"algorithm_control,omitempty"` + + // Target LKFS (loudness) to adjust volume to. + TargetLkfs *float64 `json:"targetLkfs,omitempty" tf:"target_lkfs,omitempty"` +} + +type AudioNormalizationSettingsParameters struct { + + // Audio normalization algorithm to use. itu17701 conforms to the CALM Act specification, itu17702 to the EBU R-128 specification. + // +kubebuilder:validation:Optional + Algorithm *string `json:"algorithm,omitempty" tf:"algorithm,omitempty"` + + // Algorithm control for the audio description. + // +kubebuilder:validation:Optional + AlgorithmControl *string `json:"algorithmControl,omitempty" tf:"algorithm_control,omitempty"` + + // Target LKFS (loudness) to adjust volume to. + // +kubebuilder:validation:Optional + TargetLkfs *float64 `json:"targetLkfs,omitempty" tf:"target_lkfs,omitempty"` +} + +type AudioOnlyHlsSettingsInitParameters struct { + + // Specifies the GROUP-ID in the #EXT-X-MEDIA tag of the target HLS audio rendition. + AudioGroupID *string `json:"audioGroupId,omitempty" tf:"audio_group_id,omitempty"` + + AudioOnlyImage *AudioOnlyImageInitParameters `json:"audioOnlyImage,omitempty" tf:"audio_only_image,omitempty"` + + AudioTrackType *string `json:"audioTrackType,omitempty" tf:"audio_track_type,omitempty"` + + SegmentType *string `json:"segmentType,omitempty" tf:"segment_type,omitempty"` +} + +type AudioOnlyHlsSettingsObservation struct { + + // Specifies the GROUP-ID in the #EXT-X-MEDIA tag of the target HLS audio rendition. + AudioGroupID *string `json:"audioGroupId,omitempty" tf:"audio_group_id,omitempty"` + + AudioOnlyImage *AudioOnlyImageObservation `json:"audioOnlyImage,omitempty" tf:"audio_only_image,omitempty"` + + AudioTrackType *string `json:"audioTrackType,omitempty" tf:"audio_track_type,omitempty"` + + SegmentType *string `json:"segmentType,omitempty" tf:"segment_type,omitempty"` +} + +type AudioOnlyHlsSettingsParameters struct { + + // Specifies the GROUP-ID in the #EXT-X-MEDIA tag of the target HLS audio rendition. + // +kubebuilder:validation:Optional + AudioGroupID *string `json:"audioGroupId,omitempty" tf:"audio_group_id,omitempty"` + + // +kubebuilder:validation:Optional + AudioOnlyImage *AudioOnlyImageParameters `json:"audioOnlyImage,omitempty" tf:"audio_only_image,omitempty"` + + // +kubebuilder:validation:Optional + AudioTrackType *string `json:"audioTrackType,omitempty" tf:"audio_track_type,omitempty"` + + // +kubebuilder:validation:Optional + SegmentType *string `json:"segmentType,omitempty" tf:"segment_type,omitempty"` +} + +type AudioOnlyImageInitParameters struct { + + // Key used to extract the password from EC2 Parameter store. + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Username for destination. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type AudioOnlyImageObservation struct { + + // Key used to extract the password from EC2 Parameter store. + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Username for destination. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type AudioOnlyImageParameters struct { + + // Key used to extract the password from EC2 Parameter store. + // +kubebuilder:validation:Optional + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` + + // Username for destination. + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type AudioPidSelectionInitParameters struct { + + // Selects a specific PID from within a source. + Pid *float64 `json:"pid,omitempty" tf:"pid,omitempty"` +} + +type AudioPidSelectionObservation struct { + + // Selects a specific PID from within a source. + Pid *float64 `json:"pid,omitempty" tf:"pid,omitempty"` +} + +type AudioPidSelectionParameters struct { + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + Pid *float64 `json:"pid" tf:"pid,omitempty"` +} + +type AudioSelectorInitParameters struct { + + // Name of the Channel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The audio selector settings. See Audio Selector Settings for more details. + SelectorSettings *SelectorSettingsInitParameters `json:"selectorSettings,omitempty" tf:"selector_settings,omitempty"` +} + +type AudioSelectorObservation struct { + + // Name of the Channel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The audio selector settings. See Audio Selector Settings for more details. + SelectorSettings *SelectorSettingsObservation `json:"selectorSettings,omitempty" tf:"selector_settings,omitempty"` +} + +type AudioSelectorParameters struct { + + // Name of the Channel. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The audio selector settings. See Audio Selector Settings for more details. + // +kubebuilder:validation:Optional + SelectorSettings *SelectorSettingsParameters `json:"selectorSettings,omitempty" tf:"selector_settings,omitempty"` +} + +type AudioSilenceSettingsInitParameters struct { + + // The name of the audio selector in the input that MediaLive should monitor to detect silence. Select your most important rendition. If you didn't create an audio selector in this input, leave blank. + AudioSelectorName *string `json:"audioSelectorName,omitempty" tf:"audio_selector_name,omitempty"` + + // The amount of time (in milliseconds) that the active input must be silent before automatic input failover occurs. Silence is defined as audio loss or audio quieter than -50 dBFS. + AudioSilenceThresholdMsec *float64 `json:"audioSilenceThresholdMsec,omitempty" tf:"audio_silence_threshold_msec,omitempty"` +} + +type AudioSilenceSettingsObservation struct { + + // The name of the audio selector in the input that MediaLive should monitor to detect silence. Select your most important rendition. If you didn't create an audio selector in this input, leave blank. + AudioSelectorName *string `json:"audioSelectorName,omitempty" tf:"audio_selector_name,omitempty"` + + // The amount of time (in milliseconds) that the active input must be silent before automatic input failover occurs. Silence is defined as audio loss or audio quieter than -50 dBFS. + AudioSilenceThresholdMsec *float64 `json:"audioSilenceThresholdMsec,omitempty" tf:"audio_silence_threshold_msec,omitempty"` +} + +type AudioSilenceSettingsParameters struct { + + // The name of the audio selector in the input that MediaLive should monitor to detect silence. Select your most important rendition. If you didn't create an audio selector in this input, leave blank. + // +kubebuilder:validation:Optional + AudioSelectorName *string `json:"audioSelectorName" tf:"audio_selector_name,omitempty"` + + // The amount of time (in milliseconds) that the active input must be silent before automatic input failover occurs. Silence is defined as audio loss or audio quieter than -50 dBFS. + // +kubebuilder:validation:Optional + AudioSilenceThresholdMsec *float64 `json:"audioSilenceThresholdMsec,omitempty" tf:"audio_silence_threshold_msec,omitempty"` +} + +type AudioTrackSelectionInitParameters struct { + + // Configure decoding options for Dolby E streams - these should be Dolby E frames carried in PCM streams tagged with SMPTE-337. See Dolby E Decode for more details. + DolbyEDecode *DolbyEDecodeInitParameters `json:"dolbyEDecode,omitempty" tf:"dolby_e_decode,omitempty"` + + // Selects one or more unique audio tracks from within a source. See Audio Tracks for more details. + Tracks []TracksInitParameters `json:"tracks,omitempty" tf:"tracks,omitempty"` +} + +type AudioTrackSelectionObservation struct { + + // Configure decoding options for Dolby E streams - these should be Dolby E frames carried in PCM streams tagged with SMPTE-337. See Dolby E Decode for more details. + DolbyEDecode *DolbyEDecodeObservation `json:"dolbyEDecode,omitempty" tf:"dolby_e_decode,omitempty"` + + // Selects one or more unique audio tracks from within a source. See Audio Tracks for more details. + Tracks []TracksObservation `json:"tracks,omitempty" tf:"tracks,omitempty"` +} + +type AudioTrackSelectionParameters struct { + + // Configure decoding options for Dolby E streams - these should be Dolby E frames carried in PCM streams tagged with SMPTE-337. See Dolby E Decode for more details. + // +kubebuilder:validation:Optional + DolbyEDecode *DolbyEDecodeParameters `json:"dolbyEDecode,omitempty" tf:"dolby_e_decode,omitempty"` + + // Selects one or more unique audio tracks from within a source. See Audio Tracks for more details. + // +kubebuilder:validation:Optional + Tracks []TracksParameters `json:"tracks" tf:"tracks,omitempty"` +} + +type AudioWatermarkSettingsInitParameters struct { + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + NielsenWatermarksSettings *NielsenWatermarksSettingsInitParameters `json:"nielsenWatermarksSettings,omitempty" tf:"nielsen_watermarks_settings,omitempty"` +} + +type AudioWatermarkSettingsObservation struct { + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + NielsenWatermarksSettings *NielsenWatermarksSettingsObservation `json:"nielsenWatermarksSettings,omitempty" tf:"nielsen_watermarks_settings,omitempty"` +} + +type AudioWatermarkSettingsParameters struct { + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + NielsenWatermarksSettings *NielsenWatermarksSettingsParameters `json:"nielsenWatermarksSettings,omitempty" tf:"nielsen_watermarks_settings,omitempty"` +} + +type AutomaticInputFailoverSettingsInitParameters struct { + + // This clear time defines the requirement a recovered input must meet to be considered healthy. The input must have no failover conditions for this length of time. Enter a time in milliseconds. This value is particularly important if the input_preference for the failover pair is set to PRIMARY_INPUT_PREFERRED, because after this time, MediaLive will switch back to the primary input. + ErrorClearTimeMsec *float64 `json:"errorClearTimeMsec,omitempty" tf:"error_clear_time_msec,omitempty"` + + // A list of failover conditions. If any of these conditions occur, MediaLive will perform a failover to the other input. See Failover Condition Block for more details. + FailoverCondition []FailoverConditionInitParameters `json:"failoverCondition,omitempty" tf:"failover_condition,omitempty"` + + // Input preference when deciding which input to make active when a previously failed input has recovered. + InputPreference *string `json:"inputPreference,omitempty" tf:"input_preference,omitempty"` + + // The input ID of the secondary input in the automatic input failover pair. + SecondaryInputID *string `json:"secondaryInputId,omitempty" tf:"secondary_input_id,omitempty"` +} + +type AutomaticInputFailoverSettingsObservation struct { + + // This clear time defines the requirement a recovered input must meet to be considered healthy. The input must have no failover conditions for this length of time. Enter a time in milliseconds. This value is particularly important if the input_preference for the failover pair is set to PRIMARY_INPUT_PREFERRED, because after this time, MediaLive will switch back to the primary input. + ErrorClearTimeMsec *float64 `json:"errorClearTimeMsec,omitempty" tf:"error_clear_time_msec,omitempty"` + + // A list of failover conditions. If any of these conditions occur, MediaLive will perform a failover to the other input. See Failover Condition Block for more details. + FailoverCondition []FailoverConditionObservation `json:"failoverCondition,omitempty" tf:"failover_condition,omitempty"` + + // Input preference when deciding which input to make active when a previously failed input has recovered. + InputPreference *string `json:"inputPreference,omitempty" tf:"input_preference,omitempty"` + + // The input ID of the secondary input in the automatic input failover pair. + SecondaryInputID *string `json:"secondaryInputId,omitempty" tf:"secondary_input_id,omitempty"` +} + +type AutomaticInputFailoverSettingsParameters struct { + + // This clear time defines the requirement a recovered input must meet to be considered healthy. The input must have no failover conditions for this length of time. Enter a time in milliseconds. This value is particularly important if the input_preference for the failover pair is set to PRIMARY_INPUT_PREFERRED, because after this time, MediaLive will switch back to the primary input. + // +kubebuilder:validation:Optional + ErrorClearTimeMsec *float64 `json:"errorClearTimeMsec,omitempty" tf:"error_clear_time_msec,omitempty"` + + // A list of failover conditions. If any of these conditions occur, MediaLive will perform a failover to the other input. See Failover Condition Block for more details. + // +kubebuilder:validation:Optional + FailoverCondition []FailoverConditionParameters `json:"failoverCondition,omitempty" tf:"failover_condition,omitempty"` + + // Input preference when deciding which input to make active when a previously failed input has recovered. + // +kubebuilder:validation:Optional + InputPreference *string `json:"inputPreference,omitempty" tf:"input_preference,omitempty"` + + // The input ID of the secondary input in the automatic input failover pair. + // +kubebuilder:validation:Optional + SecondaryInputID *string `json:"secondaryInputId" tf:"secondary_input_id,omitempty"` +} + +type AvailBlankingImageInitParameters struct { + + // Key used to extract the password from EC2 Parameter store. + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Username for destination. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type AvailBlankingImageObservation struct { + + // Key used to extract the password from EC2 Parameter store. + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Username for destination. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type AvailBlankingImageParameters struct { + + // Key used to extract the password from EC2 Parameter store. + // +kubebuilder:validation:Optional + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` + + // Username for destination. + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type AvailBlankingInitParameters struct { + + // Blanking image to be used. See Avail Blanking Image for more details. + AvailBlankingImage *AvailBlankingImageInitParameters `json:"availBlankingImage,omitempty" tf:"avail_blanking_image,omitempty"` + + // When set to enabled, causes video, audio and captions to be blanked when insertion metadata is added. + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type AvailBlankingObservation struct { + + // Blanking image to be used. See Avail Blanking Image for more details. + AvailBlankingImage *AvailBlankingImageObservation `json:"availBlankingImage,omitempty" tf:"avail_blanking_image,omitempty"` + + // When set to enabled, causes video, audio and captions to be blanked when insertion metadata is added. + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type AvailBlankingParameters struct { + + // Blanking image to be used. See Avail Blanking Image for more details. + // +kubebuilder:validation:Optional + AvailBlankingImage *AvailBlankingImageParameters `json:"availBlankingImage,omitempty" tf:"avail_blanking_image,omitempty"` + + // When set to enabled, causes video, audio and captions to be blanked when insertion metadata is added. + // +kubebuilder:validation:Optional + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type BurnInDestinationSettingsInitParameters struct { + + // justify live subtitles and center-justify pre-recorded subtitles. All burn-in and DVB-Sub font settings must match. + Alignment *string `json:"alignment,omitempty" tf:"alignment,omitempty"` + + // in and DVB-Sub font settings must match. + BackgroundColor *string `json:"backgroundColor,omitempty" tf:"background_color,omitempty"` + + // in and DVB-Sub font settings must match. + BackgroundOpacity *float64 `json:"backgroundOpacity,omitempty" tf:"background_opacity,omitempty"` + + // in. File extension must be ‘ttf’ or ‘tte’. Although the user can select output fonts for many different types of input captions, embedded, STL and teletext sources use a strict grid system. Using external fonts with these caption sources could cause unexpected display of proportional fonts. All burn-in and DVB-Sub font settings must match. See Font for more details. + Font *FontInitParameters `json:"font,omitempty" tf:"font,omitempty"` + + // in captions. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. + FontColor *string `json:"fontColor,omitempty" tf:"font_color,omitempty"` + + // in captions. 255 is opaque; 0 is transparent. All burn-in and DVB-Sub font settings must match. + FontOpacity *float64 `json:"fontOpacity,omitempty" tf:"font_opacity,omitempty"` + + // in and DVB-Sub font settings must match. + FontResolution *float64 `json:"fontResolution,omitempty" tf:"font_resolution,omitempty"` + + // in and DVB-Sub font settings must match. + FontSize *string `json:"fontSize,omitempty" tf:"font_size,omitempty"` + + // defined by the caption stream. All burn-in and DVB-Sub font settings must match. + OutlineColor *string `json:"outlineColor,omitempty" tf:"outline_color,omitempty"` + + // defined by the caption stream. All burn-in and DVB-Sub font settings must match. + OutlineSize *float64 `json:"outlineSize,omitempty" tf:"outline_size,omitempty"` + + // in and DVB-Sub font settings must match. + ShadowColor *string `json:"shadowColor,omitempty" tf:"shadow_color,omitempty"` + + // in and DVB-Sub font settings must match. + ShadowOpacity *float64 `json:"shadowOpacity,omitempty" tf:"shadow_opacity,omitempty"` + + // 2 would result in a shadow offset 2 pixels to the left. All burn-in and DVB-Sub font settings must match. + ShadowXOffset *float64 `json:"shadowXOffset,omitempty" tf:"shadow_x_offset,omitempty"` + + // 2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match. + ShadowYOffset *float64 `json:"shadowYOffset,omitempty" tf:"shadow_y_offset,omitempty"` + + // Sub/Burn-in outputs. + TeletextGridControl *string `json:"teletextGridControl,omitempty" tf:"teletext_grid_control,omitempty"` + + // in and DVB-Sub font settings must match. + XPosition *float64 `json:"xPosition,omitempty" tf:"x_position,omitempty"` + + // in and DVB-Sub font settings must match. + YPosition *float64 `json:"yPosition,omitempty" tf:"y_position,omitempty"` +} + +type BurnInDestinationSettingsObservation struct { + + // justify live subtitles and center-justify pre-recorded subtitles. All burn-in and DVB-Sub font settings must match. + Alignment *string `json:"alignment,omitempty" tf:"alignment,omitempty"` + + // in and DVB-Sub font settings must match. + BackgroundColor *string `json:"backgroundColor,omitempty" tf:"background_color,omitempty"` + + // in and DVB-Sub font settings must match. + BackgroundOpacity *float64 `json:"backgroundOpacity,omitempty" tf:"background_opacity,omitempty"` + + // in. File extension must be ‘ttf’ or ‘tte’. Although the user can select output fonts for many different types of input captions, embedded, STL and teletext sources use a strict grid system. Using external fonts with these caption sources could cause unexpected display of proportional fonts. All burn-in and DVB-Sub font settings must match. See Font for more details. + Font *FontObservation `json:"font,omitempty" tf:"font,omitempty"` + + // in captions. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. + FontColor *string `json:"fontColor,omitempty" tf:"font_color,omitempty"` + + // in captions. 255 is opaque; 0 is transparent. All burn-in and DVB-Sub font settings must match. + FontOpacity *float64 `json:"fontOpacity,omitempty" tf:"font_opacity,omitempty"` + + // in and DVB-Sub font settings must match. + FontResolution *float64 `json:"fontResolution,omitempty" tf:"font_resolution,omitempty"` + + // in and DVB-Sub font settings must match. + FontSize *string `json:"fontSize,omitempty" tf:"font_size,omitempty"` + + // defined by the caption stream. All burn-in and DVB-Sub font settings must match. + OutlineColor *string `json:"outlineColor,omitempty" tf:"outline_color,omitempty"` + + // defined by the caption stream. All burn-in and DVB-Sub font settings must match. + OutlineSize *float64 `json:"outlineSize,omitempty" tf:"outline_size,omitempty"` + + // in and DVB-Sub font settings must match. + ShadowColor *string `json:"shadowColor,omitempty" tf:"shadow_color,omitempty"` + + // in and DVB-Sub font settings must match. + ShadowOpacity *float64 `json:"shadowOpacity,omitempty" tf:"shadow_opacity,omitempty"` + + // 2 would result in a shadow offset 2 pixels to the left. All burn-in and DVB-Sub font settings must match. + ShadowXOffset *float64 `json:"shadowXOffset,omitempty" tf:"shadow_x_offset,omitempty"` + + // 2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match. + ShadowYOffset *float64 `json:"shadowYOffset,omitempty" tf:"shadow_y_offset,omitempty"` + + // Sub/Burn-in outputs. + TeletextGridControl *string `json:"teletextGridControl,omitempty" tf:"teletext_grid_control,omitempty"` + + // in and DVB-Sub font settings must match. + XPosition *float64 `json:"xPosition,omitempty" tf:"x_position,omitempty"` + + // in and DVB-Sub font settings must match. + YPosition *float64 `json:"yPosition,omitempty" tf:"y_position,omitempty"` +} + +type BurnInDestinationSettingsParameters struct { + + // justify live subtitles and center-justify pre-recorded subtitles. All burn-in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + Alignment *string `json:"alignment,omitempty" tf:"alignment,omitempty"` + + // in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + BackgroundColor *string `json:"backgroundColor,omitempty" tf:"background_color,omitempty"` + + // in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + BackgroundOpacity *float64 `json:"backgroundOpacity,omitempty" tf:"background_opacity,omitempty"` + + // in. File extension must be ‘ttf’ or ‘tte’. Although the user can select output fonts for many different types of input captions, embedded, STL and teletext sources use a strict grid system. Using external fonts with these caption sources could cause unexpected display of proportional fonts. All burn-in and DVB-Sub font settings must match. See Font for more details. + // +kubebuilder:validation:Optional + Font *FontParameters `json:"font,omitempty" tf:"font,omitempty"` + + // in captions. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + FontColor *string `json:"fontColor,omitempty" tf:"font_color,omitempty"` + + // in captions. 255 is opaque; 0 is transparent. All burn-in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + FontOpacity *float64 `json:"fontOpacity,omitempty" tf:"font_opacity,omitempty"` + + // in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + FontResolution *float64 `json:"fontResolution,omitempty" tf:"font_resolution,omitempty"` + + // in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + FontSize *string `json:"fontSize,omitempty" tf:"font_size,omitempty"` + + // defined by the caption stream. All burn-in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + OutlineColor *string `json:"outlineColor" tf:"outline_color,omitempty"` + + // defined by the caption stream. All burn-in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + OutlineSize *float64 `json:"outlineSize,omitempty" tf:"outline_size,omitempty"` + + // in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + ShadowColor *string `json:"shadowColor,omitempty" tf:"shadow_color,omitempty"` + + // in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + ShadowOpacity *float64 `json:"shadowOpacity,omitempty" tf:"shadow_opacity,omitempty"` + + // 2 would result in a shadow offset 2 pixels to the left. All burn-in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + ShadowXOffset *float64 `json:"shadowXOffset,omitempty" tf:"shadow_x_offset,omitempty"` + + // 2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + ShadowYOffset *float64 `json:"shadowYOffset,omitempty" tf:"shadow_y_offset,omitempty"` + + // Sub/Burn-in outputs. + // +kubebuilder:validation:Optional + TeletextGridControl *string `json:"teletextGridControl" tf:"teletext_grid_control,omitempty"` + + // in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + XPosition *float64 `json:"xPosition,omitempty" tf:"x_position,omitempty"` + + // in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + YPosition *float64 `json:"yPosition,omitempty" tf:"y_position,omitempty"` +} + +type CaptionDescriptionsInitParameters struct { + + // Indicates whether the caption track implements accessibility features such as written descriptions of spoken dialog, music, and sounds. + Accessibility *string `json:"accessibility,omitempty" tf:"accessibility,omitempty"` + + // Specifies which input caption selector to use as a caption source when generating output captions. This field should match a captionSelector name. + CaptionSelectorName *string `json:"captionSelectorName,omitempty" tf:"caption_selector_name,omitempty"` + + // Additional settings for captions destination that depend on the destination type. See Destination Settings for more details. + DestinationSettings *DestinationSettingsInitParameters `json:"destinationSettings,omitempty" tf:"destination_settings,omitempty"` + + // Selects a specific three-letter language code from within an audio source. + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + // Human readable information to indicate captions available for players (eg. English, or Spanish). + LanguageDescription *string `json:"languageDescription,omitempty" tf:"language_description,omitempty"` + + // Name of the Channel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type CaptionDescriptionsObservation struct { + + // Indicates whether the caption track implements accessibility features such as written descriptions of spoken dialog, music, and sounds. + Accessibility *string `json:"accessibility,omitempty" tf:"accessibility,omitempty"` + + // Specifies which input caption selector to use as a caption source when generating output captions. This field should match a captionSelector name. + CaptionSelectorName *string `json:"captionSelectorName,omitempty" tf:"caption_selector_name,omitempty"` + + // Additional settings for captions destination that depend on the destination type. See Destination Settings for more details. + DestinationSettings *DestinationSettingsObservation `json:"destinationSettings,omitempty" tf:"destination_settings,omitempty"` + + // Selects a specific three-letter language code from within an audio source. + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + // Human readable information to indicate captions available for players (eg. English, or Spanish). + LanguageDescription *string `json:"languageDescription,omitempty" tf:"language_description,omitempty"` + + // Name of the Channel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type CaptionDescriptionsParameters struct { + + // Indicates whether the caption track implements accessibility features such as written descriptions of spoken dialog, music, and sounds. + // +kubebuilder:validation:Optional + Accessibility *string `json:"accessibility,omitempty" tf:"accessibility,omitempty"` + + // Specifies which input caption selector to use as a caption source when generating output captions. This field should match a captionSelector name. + // +kubebuilder:validation:Optional + CaptionSelectorName *string `json:"captionSelectorName" tf:"caption_selector_name,omitempty"` + + // Additional settings for captions destination that depend on the destination type. See Destination Settings for more details. + // +kubebuilder:validation:Optional + DestinationSettings *DestinationSettingsParameters `json:"destinationSettings,omitempty" tf:"destination_settings,omitempty"` + + // Selects a specific three-letter language code from within an audio source. + // +kubebuilder:validation:Optional + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + // Human readable information to indicate captions available for players (eg. English, or Spanish). + // +kubebuilder:validation:Optional + LanguageDescription *string `json:"languageDescription,omitempty" tf:"language_description,omitempty"` + + // Name of the Channel. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type CaptionLanguageMappingsInitParameters struct { + CaptionChannel *float64 `json:"captionChannel,omitempty" tf:"caption_channel,omitempty"` + + // Selects a specific three-letter language code from within an audio source. + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + // Human readable information to indicate captions available for players (eg. English, or Spanish). + LanguageDescription *string `json:"languageDescription,omitempty" tf:"language_description,omitempty"` +} + +type CaptionLanguageMappingsObservation struct { + CaptionChannel *float64 `json:"captionChannel,omitempty" tf:"caption_channel,omitempty"` + + // Selects a specific three-letter language code from within an audio source. + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + // Human readable information to indicate captions available for players (eg. English, or Spanish). + LanguageDescription *string `json:"languageDescription,omitempty" tf:"language_description,omitempty"` +} + +type CaptionLanguageMappingsParameters struct { + + // +kubebuilder:validation:Optional + CaptionChannel *float64 `json:"captionChannel" tf:"caption_channel,omitempty"` + + // Selects a specific three-letter language code from within an audio source. + // +kubebuilder:validation:Optional + LanguageCode *string `json:"languageCode" tf:"language_code,omitempty"` + + // Human readable information to indicate captions available for players (eg. English, or Spanish). + // +kubebuilder:validation:Optional + LanguageDescription *string `json:"languageDescription" tf:"language_description,omitempty"` +} + +type CaptionSelectorInitParameters struct { + + // Selects a specific three-letter language code from within an audio source. + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + // Name of the Channel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The audio selector settings. See Audio Selector Settings for more details. + SelectorSettings *CaptionSelectorSelectorSettingsInitParameters `json:"selectorSettings,omitempty" tf:"selector_settings,omitempty"` +} + +type CaptionSelectorObservation struct { + + // Selects a specific three-letter language code from within an audio source. + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + // Name of the Channel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The audio selector settings. See Audio Selector Settings for more details. + SelectorSettings *CaptionSelectorSelectorSettingsObservation `json:"selectorSettings,omitempty" tf:"selector_settings,omitempty"` +} + +type CaptionSelectorParameters struct { + + // Selects a specific three-letter language code from within an audio source. + // +kubebuilder:validation:Optional + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + // Name of the Channel. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The audio selector settings. See Audio Selector Settings for more details. + // +kubebuilder:validation:Optional + SelectorSettings *CaptionSelectorSelectorSettingsParameters `json:"selectorSettings,omitempty" tf:"selector_settings,omitempty"` +} + +type CaptionSelectorSelectorSettingsInitParameters struct { + + // Ancillary Source Settings. See Ancillary Source Settings for more details. + AncillarySourceSettings *AncillarySourceSettingsInitParameters `json:"ancillarySourceSettings,omitempty" tf:"ancillary_source_settings,omitempty"` + + // ARIB Source Settings. + AribSourceSettings *AribSourceSettingsInitParameters `json:"aribSourceSettings,omitempty" tf:"arib_source_settings,omitempty"` + + // DVB Sub Source Settings. See DVB Sub Source Settings for more details. + DvbSubSourceSettings *DvbSubSourceSettingsInitParameters `json:"dvbSubSourceSettings,omitempty" tf:"dvb_sub_source_settings,omitempty"` + + // Embedded Source Settings. See Embedded Source Settings for more details. + EmbeddedSourceSettings *EmbeddedSourceSettingsInitParameters `json:"embeddedSourceSettings,omitempty" tf:"embedded_source_settings,omitempty"` + + // SCTE20 Source Settings. See SCTE 20 Source Settings for more details. + Scte20SourceSettings *Scte20SourceSettingsInitParameters `json:"scte20SourceSettings,omitempty" tf:"scte20_source_settings,omitempty"` + + // SCTE27 Source Settings. See SCTE 27 Source Settings for more details. + Scte27SourceSettings *Scte27SourceSettingsInitParameters `json:"scte27SourceSettings,omitempty" tf:"scte27_source_settings,omitempty"` + + // Teletext Source Settings. See Teletext Source Settings for more details. + TeletextSourceSettings *TeletextSourceSettingsInitParameters `json:"teletextSourceSettings,omitempty" tf:"teletext_source_settings,omitempty"` +} + +type CaptionSelectorSelectorSettingsObservation struct { + + // Ancillary Source Settings. See Ancillary Source Settings for more details. + AncillarySourceSettings *AncillarySourceSettingsObservation `json:"ancillarySourceSettings,omitempty" tf:"ancillary_source_settings,omitempty"` + + // ARIB Source Settings. + AribSourceSettings *AribSourceSettingsParameters `json:"aribSourceSettings,omitempty" tf:"arib_source_settings,omitempty"` + + // DVB Sub Source Settings. See DVB Sub Source Settings for more details. + DvbSubSourceSettings *DvbSubSourceSettingsObservation `json:"dvbSubSourceSettings,omitempty" tf:"dvb_sub_source_settings,omitempty"` + + // Embedded Source Settings. See Embedded Source Settings for more details. + EmbeddedSourceSettings *EmbeddedSourceSettingsObservation `json:"embeddedSourceSettings,omitempty" tf:"embedded_source_settings,omitempty"` + + // SCTE20 Source Settings. See SCTE 20 Source Settings for more details. + Scte20SourceSettings *Scte20SourceSettingsObservation `json:"scte20SourceSettings,omitempty" tf:"scte20_source_settings,omitempty"` + + // SCTE27 Source Settings. See SCTE 27 Source Settings for more details. + Scte27SourceSettings *Scte27SourceSettingsObservation `json:"scte27SourceSettings,omitempty" tf:"scte27_source_settings,omitempty"` + + // Teletext Source Settings. See Teletext Source Settings for more details. + TeletextSourceSettings *TeletextSourceSettingsObservation `json:"teletextSourceSettings,omitempty" tf:"teletext_source_settings,omitempty"` +} + +type CaptionSelectorSelectorSettingsParameters struct { + + // Ancillary Source Settings. See Ancillary Source Settings for more details. + // +kubebuilder:validation:Optional + AncillarySourceSettings *AncillarySourceSettingsParameters `json:"ancillarySourceSettings,omitempty" tf:"ancillary_source_settings,omitempty"` + + // ARIB Source Settings. + // +kubebuilder:validation:Optional + AribSourceSettings *AribSourceSettingsParameters `json:"aribSourceSettings,omitempty" tf:"arib_source_settings,omitempty"` + + // DVB Sub Source Settings. See DVB Sub Source Settings for more details. + // +kubebuilder:validation:Optional + DvbSubSourceSettings *DvbSubSourceSettingsParameters `json:"dvbSubSourceSettings,omitempty" tf:"dvb_sub_source_settings,omitempty"` + + // Embedded Source Settings. See Embedded Source Settings for more details. + // +kubebuilder:validation:Optional + EmbeddedSourceSettings *EmbeddedSourceSettingsParameters `json:"embeddedSourceSettings,omitempty" tf:"embedded_source_settings,omitempty"` + + // SCTE20 Source Settings. See SCTE 20 Source Settings for more details. + // +kubebuilder:validation:Optional + Scte20SourceSettings *Scte20SourceSettingsParameters `json:"scte20SourceSettings,omitempty" tf:"scte20_source_settings,omitempty"` + + // SCTE27 Source Settings. See SCTE 27 Source Settings for more details. + // +kubebuilder:validation:Optional + Scte27SourceSettings *Scte27SourceSettingsParameters `json:"scte27SourceSettings,omitempty" tf:"scte27_source_settings,omitempty"` + + // Teletext Source Settings. See Teletext Source Settings for more details. + // +kubebuilder:validation:Optional + TeletextSourceSettings *TeletextSourceSettingsParameters `json:"teletextSourceSettings,omitempty" tf:"teletext_source_settings,omitempty"` +} + +type CdiInputSpecificationInitParameters struct { + + // - Maximum CDI input resolution. + Resolution *string `json:"resolution,omitempty" tf:"resolution,omitempty"` +} + +type CdiInputSpecificationObservation struct { + + // - Maximum CDI input resolution. + Resolution *string `json:"resolution,omitempty" tf:"resolution,omitempty"` +} + +type CdiInputSpecificationParameters struct { + + // - Maximum CDI input resolution. + // +kubebuilder:validation:Optional + Resolution *string `json:"resolution" tf:"resolution,omitempty"` +} + +type ChannelInitParameters struct { + + // Specification of CDI inputs for this channel. See CDI Input Specification for more details. + CdiInputSpecification *CdiInputSpecificationInitParameters `json:"cdiInputSpecification,omitempty" tf:"cdi_input_specification,omitempty"` + + // Concise argument description. + ChannelClass *string `json:"channelClass,omitempty" tf:"channel_class,omitempty"` + + // Destinations for channel. See Destinations for more details. + Destinations []DestinationsInitParameters `json:"destinations,omitempty" tf:"destinations,omitempty"` + + // Encoder settings. See Encoder Settings for more details. + EncoderSettings *EncoderSettingsInitParameters `json:"encoderSettings,omitempty" tf:"encoder_settings,omitempty"` + + // Input attachments for the channel. See Input Attachments for more details. + InputAttachments []InputAttachmentsInitParameters `json:"inputAttachments,omitempty" tf:"input_attachments,omitempty"` + + // Specification of network and file inputs for the channel. + InputSpecification *InputSpecificationInitParameters `json:"inputSpecification,omitempty" tf:"input_specification,omitempty"` + + // The log level to write to Cloudwatch logs. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` + + // Maintenance settings for this channel. See Maintenance for more details. + Maintenance *MaintenanceInitParameters `json:"maintenance,omitempty" tf:"maintenance,omitempty"` + + // Name of the Channel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Concise argument description. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Whether to start/stop channel. Default: false + StartChannel *bool `json:"startChannel,omitempty" tf:"start_channel,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Settings for the VPC outputs. See VPC for more details. + VPC *VPCInitParameters `json:"vpc,omitempty" tf:"vpc,omitempty"` +} + +type ChannelMappingsInitParameters struct { + InputChannelLevels []InputChannelLevelsInitParameters `json:"inputChannelLevels,omitempty" tf:"input_channel_levels,omitempty"` + + OutputChannel *float64 `json:"outputChannel,omitempty" tf:"output_channel,omitempty"` +} + +type ChannelMappingsObservation struct { + InputChannelLevels []InputChannelLevelsObservation `json:"inputChannelLevels,omitempty" tf:"input_channel_levels,omitempty"` + + OutputChannel *float64 `json:"outputChannel,omitempty" tf:"output_channel,omitempty"` +} + +type ChannelMappingsParameters struct { + + // +kubebuilder:validation:Optional + InputChannelLevels []InputChannelLevelsParameters `json:"inputChannelLevels" tf:"input_channel_levels,omitempty"` + + // +kubebuilder:validation:Optional + OutputChannel *float64 `json:"outputChannel" tf:"output_channel,omitempty"` +} + +type ChannelObservation struct { + + // ARN of the Channel. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Specification of CDI inputs for this channel. See CDI Input Specification for more details. + CdiInputSpecification *CdiInputSpecificationObservation `json:"cdiInputSpecification,omitempty" tf:"cdi_input_specification,omitempty"` + + // Concise argument description. + ChannelClass *string `json:"channelClass,omitempty" tf:"channel_class,omitempty"` + + // ID of the channel in MediaPackage that is the destination for this output group. + ChannelID *string `json:"channelId,omitempty" tf:"channel_id,omitempty"` + + // Destinations for channel. See Destinations for more details. + Destinations []DestinationsObservation `json:"destinations,omitempty" tf:"destinations,omitempty"` + + // Encoder settings. See Encoder Settings for more details. + EncoderSettings *EncoderSettingsObservation `json:"encoderSettings,omitempty" tf:"encoder_settings,omitempty"` + + // User-specified id. Ths is used in an output group or an output. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Input attachments for the channel. See Input Attachments for more details. + InputAttachments []InputAttachmentsObservation `json:"inputAttachments,omitempty" tf:"input_attachments,omitempty"` + + // Specification of network and file inputs for the channel. + InputSpecification *InputSpecificationObservation `json:"inputSpecification,omitempty" tf:"input_specification,omitempty"` + + // The log level to write to Cloudwatch logs. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` + + // Maintenance settings for this channel. See Maintenance for more details. + Maintenance *MaintenanceObservation `json:"maintenance,omitempty" tf:"maintenance,omitempty"` + + // Name of the Channel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Concise argument description. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Whether to start/stop channel. Default: false + StartChannel *bool `json:"startChannel,omitempty" tf:"start_channel,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Settings for the VPC outputs. See VPC for more details. + VPC *VPCObservation `json:"vpc,omitempty" tf:"vpc,omitempty"` +} + +type ChannelParameters struct { + + // Specification of CDI inputs for this channel. See CDI Input Specification for more details. + // +kubebuilder:validation:Optional + CdiInputSpecification *CdiInputSpecificationParameters `json:"cdiInputSpecification,omitempty" tf:"cdi_input_specification,omitempty"` + + // Concise argument description. + // +kubebuilder:validation:Optional + ChannelClass *string `json:"channelClass,omitempty" tf:"channel_class,omitempty"` + + // Destinations for channel. See Destinations for more details. + // +kubebuilder:validation:Optional + Destinations []DestinationsParameters `json:"destinations,omitempty" tf:"destinations,omitempty"` + + // Encoder settings. See Encoder Settings for more details. + // +kubebuilder:validation:Optional + EncoderSettings *EncoderSettingsParameters `json:"encoderSettings,omitempty" tf:"encoder_settings,omitempty"` + + // Input attachments for the channel. See Input Attachments for more details. + // +kubebuilder:validation:Optional + InputAttachments []InputAttachmentsParameters `json:"inputAttachments,omitempty" tf:"input_attachments,omitempty"` + + // Specification of network and file inputs for the channel. + // +kubebuilder:validation:Optional + InputSpecification *InputSpecificationParameters `json:"inputSpecification,omitempty" tf:"input_specification,omitempty"` + + // The log level to write to Cloudwatch logs. + // +kubebuilder:validation:Optional + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` + + // Maintenance settings for this channel. See Maintenance for more details. + // +kubebuilder:validation:Optional + Maintenance *MaintenanceParameters `json:"maintenance,omitempty" tf:"maintenance,omitempty"` + + // Name of the Channel. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Concise argument description. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Whether to start/stop channel. Default: false + // +kubebuilder:validation:Optional + StartChannel *bool `json:"startChannel,omitempty" tf:"start_channel,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Settings for the VPC outputs. See VPC for more details. + // +kubebuilder:validation:Optional + VPC *VPCParameters `json:"vpc,omitempty" tf:"vpc,omitempty"` +} + +type CodecSettingsInitParameters struct { + + // Aac Settings. See AAC Settings for more details. + AacSettings *AacSettingsInitParameters `json:"aacSettings,omitempty" tf:"aac_settings,omitempty"` + + // Ac3 Settings. See AC3 Settings for more details. + Ac3Settings *Ac3SettingsInitParameters `json:"ac3Settings,omitempty" tf:"ac3_settings,omitempty"` + + // - Eac3 Atmos Settings. See EAC3 Atmos Settings + Eac3AtmosSettings *Eac3AtmosSettingsInitParameters `json:"eac3AtmosSettings,omitempty" tf:"eac3_atmos_settings,omitempty"` + + // - Eac3 Settings. See EAC3 Settings + Eac3Settings *Eac3SettingsInitParameters `json:"eac3Settings,omitempty" tf:"eac3_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + Mp2Settings *Mp2SettingsInitParameters `json:"mp2Settings,omitempty" tf:"mp2_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + PassThroughSettings *PassThroughSettingsInitParameters `json:"passThroughSettings,omitempty" tf:"pass_through_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + WavSettings *WavSettingsInitParameters `json:"wavSettings,omitempty" tf:"wav_settings,omitempty"` +} + +type CodecSettingsObservation struct { + + // Aac Settings. See AAC Settings for more details. + AacSettings *AacSettingsObservation `json:"aacSettings,omitempty" tf:"aac_settings,omitempty"` + + // Ac3 Settings. See AC3 Settings for more details. + Ac3Settings *Ac3SettingsObservation `json:"ac3Settings,omitempty" tf:"ac3_settings,omitempty"` + + // - Eac3 Atmos Settings. See EAC3 Atmos Settings + Eac3AtmosSettings *Eac3AtmosSettingsObservation `json:"eac3AtmosSettings,omitempty" tf:"eac3_atmos_settings,omitempty"` + + // - Eac3 Settings. See EAC3 Settings + Eac3Settings *Eac3SettingsObservation `json:"eac3Settings,omitempty" tf:"eac3_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + Mp2Settings *Mp2SettingsObservation `json:"mp2Settings,omitempty" tf:"mp2_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + PassThroughSettings *PassThroughSettingsParameters `json:"passThroughSettings,omitempty" tf:"pass_through_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + WavSettings *WavSettingsObservation `json:"wavSettings,omitempty" tf:"wav_settings,omitempty"` +} + +type CodecSettingsParameters struct { + + // Aac Settings. See AAC Settings for more details. + // +kubebuilder:validation:Optional + AacSettings *AacSettingsParameters `json:"aacSettings,omitempty" tf:"aac_settings,omitempty"` + + // Ac3 Settings. See AC3 Settings for more details. + // +kubebuilder:validation:Optional + Ac3Settings *Ac3SettingsParameters `json:"ac3Settings,omitempty" tf:"ac3_settings,omitempty"` + + // - Eac3 Atmos Settings. See EAC3 Atmos Settings + // +kubebuilder:validation:Optional + Eac3AtmosSettings *Eac3AtmosSettingsParameters `json:"eac3AtmosSettings,omitempty" tf:"eac3_atmos_settings,omitempty"` + + // - Eac3 Settings. See EAC3 Settings + // +kubebuilder:validation:Optional + Eac3Settings *Eac3SettingsParameters `json:"eac3Settings,omitempty" tf:"eac3_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + Mp2Settings *Mp2SettingsParameters `json:"mp2Settings,omitempty" tf:"mp2_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + PassThroughSettings *PassThroughSettingsParameters `json:"passThroughSettings,omitempty" tf:"pass_through_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + WavSettings *WavSettingsParameters `json:"wavSettings,omitempty" tf:"wav_settings,omitempty"` +} + +type ColorSpacePassthroughSettingsInitParameters struct { +} + +type ColorSpacePassthroughSettingsObservation struct { +} + +type ColorSpacePassthroughSettingsParameters struct { +} + +type ColorSpaceSettingsInitParameters struct { + + // Sets the colorspace metadata to be passed through. + ColorSpacePassthroughSettings *ColorSpacePassthroughSettingsInitParameters `json:"colorSpacePassthroughSettings,omitempty" tf:"color_space_passthrough_settings,omitempty"` + + // Set the colorspace to Dolby Vision81. + DolbyVision81Settings *DolbyVision81SettingsInitParameters `json:"dolbyVision81Settings,omitempty" tf:"dolby_vision81_settings,omitempty"` + + // Set the colorspace to be HDR10. See H265 HDR10 Settings for more details. + Hdr10Settings *Hdr10SettingsInitParameters `json:"hdr10Settings,omitempty" tf:"hdr10_settings,omitempty"` + + // Set the colorspace to Rec. 601. + Rec601Settings *Rec601SettingsInitParameters `json:"rec601Settings,omitempty" tf:"rec601_settings,omitempty"` + + // Set the colorspace to Rec. 709. + Rec709Settings *Rec709SettingsInitParameters `json:"rec709Settings,omitempty" tf:"rec709_settings,omitempty"` +} + +type ColorSpaceSettingsObservation struct { + + // Sets the colorspace metadata to be passed through. + ColorSpacePassthroughSettings *ColorSpacePassthroughSettingsParameters `json:"colorSpacePassthroughSettings,omitempty" tf:"color_space_passthrough_settings,omitempty"` + + // Set the colorspace to Dolby Vision81. + DolbyVision81Settings *DolbyVision81SettingsParameters `json:"dolbyVision81Settings,omitempty" tf:"dolby_vision81_settings,omitempty"` + + // Set the colorspace to be HDR10. See H265 HDR10 Settings for more details. + Hdr10Settings *Hdr10SettingsObservation `json:"hdr10Settings,omitempty" tf:"hdr10_settings,omitempty"` + + // Set the colorspace to Rec. 601. + Rec601Settings *Rec601SettingsParameters `json:"rec601Settings,omitempty" tf:"rec601_settings,omitempty"` + + // Set the colorspace to Rec. 709. + Rec709Settings *Rec709SettingsParameters `json:"rec709Settings,omitempty" tf:"rec709_settings,omitempty"` +} + +type ColorSpaceSettingsParameters struct { + + // Sets the colorspace metadata to be passed through. + // +kubebuilder:validation:Optional + ColorSpacePassthroughSettings *ColorSpacePassthroughSettingsParameters `json:"colorSpacePassthroughSettings,omitempty" tf:"color_space_passthrough_settings,omitempty"` + + // Set the colorspace to Dolby Vision81. + // +kubebuilder:validation:Optional + DolbyVision81Settings *DolbyVision81SettingsParameters `json:"dolbyVision81Settings,omitempty" tf:"dolby_vision81_settings,omitempty"` + + // Set the colorspace to be HDR10. See H265 HDR10 Settings for more details. + // +kubebuilder:validation:Optional + Hdr10Settings *Hdr10SettingsParameters `json:"hdr10Settings,omitempty" tf:"hdr10_settings,omitempty"` + + // Set the colorspace to Rec. 601. + // +kubebuilder:validation:Optional + Rec601Settings *Rec601SettingsParameters `json:"rec601Settings,omitempty" tf:"rec601_settings,omitempty"` + + // Set the colorspace to Rec. 709. + // +kubebuilder:validation:Optional + Rec709Settings *Rec709SettingsParameters `json:"rec709Settings,omitempty" tf:"rec709_settings,omitempty"` +} + +type ContainerSettingsInitParameters struct { + + // M2TS Settings. See M2TS Settings for more details. + M2TsSettings *M2TsSettingsInitParameters `json:"m2tsSettings,omitempty" tf:"m2ts_settings,omitempty"` + + // Raw Settings. This can be set as an empty block. + RawSettings *RawSettingsInitParameters `json:"rawSettings,omitempty" tf:"raw_settings,omitempty"` +} + +type ContainerSettingsM2TsSettingsInitParameters struct { + AbsentInputAudioBehavior *string `json:"absentInputAudioBehavior,omitempty" tf:"absent_input_audio_behavior,omitempty"` + + Arib *string `json:"arib,omitempty" tf:"arib,omitempty"` + + // Selects a specific PID from within a source. + AribCaptionsPid *string `json:"aribCaptionsPid,omitempty" tf:"arib_captions_pid,omitempty"` + + AribCaptionsPidControl *string `json:"aribCaptionsPidControl,omitempty" tf:"arib_captions_pid_control,omitempty"` + + AudioBufferModel *string `json:"audioBufferModel,omitempty" tf:"audio_buffer_model,omitempty"` + + AudioFramesPerPes *float64 `json:"audioFramesPerPes,omitempty" tf:"audio_frames_per_pes,omitempty"` + + AudioPids *string `json:"audioPids,omitempty" tf:"audio_pids,omitempty"` + + AudioStreamType *string `json:"audioStreamType,omitempty" tf:"audio_stream_type,omitempty"` + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + BufferModel *string `json:"bufferModel,omitempty" tf:"buffer_model,omitempty"` + + CcDescriptor *string `json:"ccDescriptor,omitempty" tf:"cc_descriptor,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + DvbNitSettings *M2TsSettingsDvbNitSettingsInitParameters `json:"dvbNitSettings,omitempty" tf:"dvb_nit_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + DvbSdtSettings *M2TsSettingsDvbSdtSettingsInitParameters `json:"dvbSdtSettings,omitempty" tf:"dvb_sdt_settings,omitempty"` + + DvbSubPids *string `json:"dvbSubPids,omitempty" tf:"dvb_sub_pids,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + DvbTdtSettings *M2TsSettingsDvbTdtSettingsInitParameters `json:"dvbTdtSettings,omitempty" tf:"dvb_tdt_settings,omitempty"` + + // Selects a specific PID from within a source. + DvbTeletextPid *string `json:"dvbTeletextPid,omitempty" tf:"dvb_teletext_pid,omitempty"` + + Ebif *string `json:"ebif,omitempty" tf:"ebif,omitempty"` + + EbpAudioInterval *string `json:"ebpAudioInterval,omitempty" tf:"ebp_audio_interval,omitempty"` + + EbpLookaheadMs *float64 `json:"ebpLookaheadMs,omitempty" tf:"ebp_lookahead_ms,omitempty"` + + EbpPlacement *string `json:"ebpPlacement,omitempty" tf:"ebp_placement,omitempty"` + + // Selects a specific PID from within a source. + EcmPid *string `json:"ecmPid,omitempty" tf:"ecm_pid,omitempty"` + + EsRateInPes *string `json:"esRateInPes,omitempty" tf:"es_rate_in_pes,omitempty"` + + // Selects a specific PID from within a source. + EtvPlatformPid *string `json:"etvPlatformPid,omitempty" tf:"etv_platform_pid,omitempty"` + + // Selects a specific PID from within a source. + EtvSignalPid *string `json:"etvSignalPid,omitempty" tf:"etv_signal_pid,omitempty"` + + FragmentTime *float64 `json:"fragmentTime,omitempty" tf:"fragment_time,omitempty"` + + Klv *string `json:"klv,omitempty" tf:"klv,omitempty"` + + KlvDataPids *string `json:"klvDataPids,omitempty" tf:"klv_data_pids,omitempty"` + + NielsenId3Behavior *string `json:"nielsenId3Behavior,omitempty" tf:"nielsen_id3_behavior,omitempty"` + + // Average bitrate in bits/second. + NullPacketBitrate *float64 `json:"nullPacketBitrate,omitempty" tf:"null_packet_bitrate,omitempty"` + + PatInterval *float64 `json:"patInterval,omitempty" tf:"pat_interval,omitempty"` + + PcrControl *string `json:"pcrControl,omitempty" tf:"pcr_control,omitempty"` + + PcrPeriod *float64 `json:"pcrPeriod,omitempty" tf:"pcr_period,omitempty"` + + // Selects a specific PID from within a source. + PcrPid *string `json:"pcrPid,omitempty" tf:"pcr_pid,omitempty"` + + PmtInterval *float64 `json:"pmtInterval,omitempty" tf:"pmt_interval,omitempty"` + + // Selects a specific PID from within a source. + PmtPid *string `json:"pmtPid,omitempty" tf:"pmt_pid,omitempty"` + + ProgramNum *float64 `json:"programNum,omitempty" tf:"program_num,omitempty"` + + RateMode *string `json:"rateMode,omitempty" tf:"rate_mode,omitempty"` + + Scte27Pids *string `json:"scte27Pids,omitempty" tf:"scte27_pids,omitempty"` + + Scte35Control *string `json:"scte35Control,omitempty" tf:"scte35_control,omitempty"` + + // PID from which to read SCTE-35 messages. + Scte35Pid *string `json:"scte35Pid,omitempty" tf:"scte35_pid,omitempty"` + + SegmentationMarkers *string `json:"segmentationMarkers,omitempty" tf:"segmentation_markers,omitempty"` + + SegmentationStyle *string `json:"segmentationStyle,omitempty" tf:"segmentation_style,omitempty"` + + SegmentationTime *float64 `json:"segmentationTime,omitempty" tf:"segmentation_time,omitempty"` + + TimedMetadataBehavior *string `json:"timedMetadataBehavior,omitempty" tf:"timed_metadata_behavior,omitempty"` + + // Selects a specific PID from within a source. + TimedMetadataPid *string `json:"timedMetadataPid,omitempty" tf:"timed_metadata_pid,omitempty"` + + // User-specified id. Ths is used in an output group or an output. + TransportStreamID *float64 `json:"transportStreamId,omitempty" tf:"transport_stream_id,omitempty"` + + // Selects a specific PID from within a source. + VideoPid *string `json:"videoPid,omitempty" tf:"video_pid,omitempty"` +} + +type ContainerSettingsM2TsSettingsObservation struct { + AbsentInputAudioBehavior *string `json:"absentInputAudioBehavior,omitempty" tf:"absent_input_audio_behavior,omitempty"` + + Arib *string `json:"arib,omitempty" tf:"arib,omitempty"` + + // Selects a specific PID from within a source. + AribCaptionsPid *string `json:"aribCaptionsPid,omitempty" tf:"arib_captions_pid,omitempty"` + + AribCaptionsPidControl *string `json:"aribCaptionsPidControl,omitempty" tf:"arib_captions_pid_control,omitempty"` + + AudioBufferModel *string `json:"audioBufferModel,omitempty" tf:"audio_buffer_model,omitempty"` + + AudioFramesPerPes *float64 `json:"audioFramesPerPes,omitempty" tf:"audio_frames_per_pes,omitempty"` + + AudioPids *string `json:"audioPids,omitempty" tf:"audio_pids,omitempty"` + + AudioStreamType *string `json:"audioStreamType,omitempty" tf:"audio_stream_type,omitempty"` + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + BufferModel *string `json:"bufferModel,omitempty" tf:"buffer_model,omitempty"` + + CcDescriptor *string `json:"ccDescriptor,omitempty" tf:"cc_descriptor,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + DvbNitSettings *M2TsSettingsDvbNitSettingsObservation `json:"dvbNitSettings,omitempty" tf:"dvb_nit_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + DvbSdtSettings *M2TsSettingsDvbSdtSettingsObservation `json:"dvbSdtSettings,omitempty" tf:"dvb_sdt_settings,omitempty"` + + DvbSubPids *string `json:"dvbSubPids,omitempty" tf:"dvb_sub_pids,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + DvbTdtSettings *M2TsSettingsDvbTdtSettingsObservation `json:"dvbTdtSettings,omitempty" tf:"dvb_tdt_settings,omitempty"` + + // Selects a specific PID from within a source. + DvbTeletextPid *string `json:"dvbTeletextPid,omitempty" tf:"dvb_teletext_pid,omitempty"` + + Ebif *string `json:"ebif,omitempty" tf:"ebif,omitempty"` + + EbpAudioInterval *string `json:"ebpAudioInterval,omitempty" tf:"ebp_audio_interval,omitempty"` + + EbpLookaheadMs *float64 `json:"ebpLookaheadMs,omitempty" tf:"ebp_lookahead_ms,omitempty"` + + EbpPlacement *string `json:"ebpPlacement,omitempty" tf:"ebp_placement,omitempty"` + + // Selects a specific PID from within a source. + EcmPid *string `json:"ecmPid,omitempty" tf:"ecm_pid,omitempty"` + + EsRateInPes *string `json:"esRateInPes,omitempty" tf:"es_rate_in_pes,omitempty"` + + // Selects a specific PID from within a source. + EtvPlatformPid *string `json:"etvPlatformPid,omitempty" tf:"etv_platform_pid,omitempty"` + + // Selects a specific PID from within a source. + EtvSignalPid *string `json:"etvSignalPid,omitempty" tf:"etv_signal_pid,omitempty"` + + FragmentTime *float64 `json:"fragmentTime,omitempty" tf:"fragment_time,omitempty"` + + Klv *string `json:"klv,omitempty" tf:"klv,omitempty"` + + KlvDataPids *string `json:"klvDataPids,omitempty" tf:"klv_data_pids,omitempty"` + + NielsenId3Behavior *string `json:"nielsenId3Behavior,omitempty" tf:"nielsen_id3_behavior,omitempty"` + + // Average bitrate in bits/second. + NullPacketBitrate *float64 `json:"nullPacketBitrate,omitempty" tf:"null_packet_bitrate,omitempty"` + + PatInterval *float64 `json:"patInterval,omitempty" tf:"pat_interval,omitempty"` + + PcrControl *string `json:"pcrControl,omitempty" tf:"pcr_control,omitempty"` + + PcrPeriod *float64 `json:"pcrPeriod,omitempty" tf:"pcr_period,omitempty"` + + // Selects a specific PID from within a source. + PcrPid *string `json:"pcrPid,omitempty" tf:"pcr_pid,omitempty"` + + PmtInterval *float64 `json:"pmtInterval,omitempty" tf:"pmt_interval,omitempty"` + + // Selects a specific PID from within a source. + PmtPid *string `json:"pmtPid,omitempty" tf:"pmt_pid,omitempty"` + + ProgramNum *float64 `json:"programNum,omitempty" tf:"program_num,omitempty"` + + RateMode *string `json:"rateMode,omitempty" tf:"rate_mode,omitempty"` + + Scte27Pids *string `json:"scte27Pids,omitempty" tf:"scte27_pids,omitempty"` + + Scte35Control *string `json:"scte35Control,omitempty" tf:"scte35_control,omitempty"` + + // PID from which to read SCTE-35 messages. + Scte35Pid *string `json:"scte35Pid,omitempty" tf:"scte35_pid,omitempty"` + + SegmentationMarkers *string `json:"segmentationMarkers,omitempty" tf:"segmentation_markers,omitempty"` + + SegmentationStyle *string `json:"segmentationStyle,omitempty" tf:"segmentation_style,omitempty"` + + SegmentationTime *float64 `json:"segmentationTime,omitempty" tf:"segmentation_time,omitempty"` + + TimedMetadataBehavior *string `json:"timedMetadataBehavior,omitempty" tf:"timed_metadata_behavior,omitempty"` + + // Selects a specific PID from within a source. + TimedMetadataPid *string `json:"timedMetadataPid,omitempty" tf:"timed_metadata_pid,omitempty"` + + // User-specified id. Ths is used in an output group or an output. + TransportStreamID *float64 `json:"transportStreamId,omitempty" tf:"transport_stream_id,omitempty"` + + // Selects a specific PID from within a source. + VideoPid *string `json:"videoPid,omitempty" tf:"video_pid,omitempty"` +} + +type ContainerSettingsM2TsSettingsParameters struct { + + // +kubebuilder:validation:Optional + AbsentInputAudioBehavior *string `json:"absentInputAudioBehavior,omitempty" tf:"absent_input_audio_behavior,omitempty"` + + // +kubebuilder:validation:Optional + Arib *string `json:"arib,omitempty" tf:"arib,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + AribCaptionsPid *string `json:"aribCaptionsPid,omitempty" tf:"arib_captions_pid,omitempty"` + + // +kubebuilder:validation:Optional + AribCaptionsPidControl *string `json:"aribCaptionsPidControl,omitempty" tf:"arib_captions_pid_control,omitempty"` + + // +kubebuilder:validation:Optional + AudioBufferModel *string `json:"audioBufferModel,omitempty" tf:"audio_buffer_model,omitempty"` + + // +kubebuilder:validation:Optional + AudioFramesPerPes *float64 `json:"audioFramesPerPes,omitempty" tf:"audio_frames_per_pes,omitempty"` + + // +kubebuilder:validation:Optional + AudioPids *string `json:"audioPids,omitempty" tf:"audio_pids,omitempty"` + + // +kubebuilder:validation:Optional + AudioStreamType *string `json:"audioStreamType,omitempty" tf:"audio_stream_type,omitempty"` + + // Average bitrate in bits/second. + // +kubebuilder:validation:Optional + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // +kubebuilder:validation:Optional + BufferModel *string `json:"bufferModel,omitempty" tf:"buffer_model,omitempty"` + + // +kubebuilder:validation:Optional + CcDescriptor *string `json:"ccDescriptor,omitempty" tf:"cc_descriptor,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + DvbNitSettings *M2TsSettingsDvbNitSettingsParameters `json:"dvbNitSettings,omitempty" tf:"dvb_nit_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + DvbSdtSettings *M2TsSettingsDvbSdtSettingsParameters `json:"dvbSdtSettings,omitempty" tf:"dvb_sdt_settings,omitempty"` + + // +kubebuilder:validation:Optional + DvbSubPids *string `json:"dvbSubPids,omitempty" tf:"dvb_sub_pids,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + DvbTdtSettings *M2TsSettingsDvbTdtSettingsParameters `json:"dvbTdtSettings,omitempty" tf:"dvb_tdt_settings,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + DvbTeletextPid *string `json:"dvbTeletextPid,omitempty" tf:"dvb_teletext_pid,omitempty"` + + // +kubebuilder:validation:Optional + Ebif *string `json:"ebif,omitempty" tf:"ebif,omitempty"` + + // +kubebuilder:validation:Optional + EbpAudioInterval *string `json:"ebpAudioInterval,omitempty" tf:"ebp_audio_interval,omitempty"` + + // +kubebuilder:validation:Optional + EbpLookaheadMs *float64 `json:"ebpLookaheadMs,omitempty" tf:"ebp_lookahead_ms,omitempty"` + + // +kubebuilder:validation:Optional + EbpPlacement *string `json:"ebpPlacement,omitempty" tf:"ebp_placement,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + EcmPid *string `json:"ecmPid,omitempty" tf:"ecm_pid,omitempty"` + + // +kubebuilder:validation:Optional + EsRateInPes *string `json:"esRateInPes,omitempty" tf:"es_rate_in_pes,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + EtvPlatformPid *string `json:"etvPlatformPid,omitempty" tf:"etv_platform_pid,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + EtvSignalPid *string `json:"etvSignalPid,omitempty" tf:"etv_signal_pid,omitempty"` + + // +kubebuilder:validation:Optional + FragmentTime *float64 `json:"fragmentTime,omitempty" tf:"fragment_time,omitempty"` + + // +kubebuilder:validation:Optional + Klv *string `json:"klv,omitempty" tf:"klv,omitempty"` + + // +kubebuilder:validation:Optional + KlvDataPids *string `json:"klvDataPids,omitempty" tf:"klv_data_pids,omitempty"` + + // +kubebuilder:validation:Optional + NielsenId3Behavior *string `json:"nielsenId3Behavior,omitempty" tf:"nielsen_id3_behavior,omitempty"` + + // Average bitrate in bits/second. + // +kubebuilder:validation:Optional + NullPacketBitrate *float64 `json:"nullPacketBitrate,omitempty" tf:"null_packet_bitrate,omitempty"` + + // +kubebuilder:validation:Optional + PatInterval *float64 `json:"patInterval,omitempty" tf:"pat_interval,omitempty"` + + // +kubebuilder:validation:Optional + PcrControl *string `json:"pcrControl,omitempty" tf:"pcr_control,omitempty"` + + // +kubebuilder:validation:Optional + PcrPeriod *float64 `json:"pcrPeriod,omitempty" tf:"pcr_period,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + PcrPid *string `json:"pcrPid,omitempty" tf:"pcr_pid,omitempty"` + + // +kubebuilder:validation:Optional + PmtInterval *float64 `json:"pmtInterval,omitempty" tf:"pmt_interval,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + PmtPid *string `json:"pmtPid,omitempty" tf:"pmt_pid,omitempty"` + + // +kubebuilder:validation:Optional + ProgramNum *float64 `json:"programNum,omitempty" tf:"program_num,omitempty"` + + // +kubebuilder:validation:Optional + RateMode *string `json:"rateMode,omitempty" tf:"rate_mode,omitempty"` + + // +kubebuilder:validation:Optional + Scte27Pids *string `json:"scte27Pids,omitempty" tf:"scte27_pids,omitempty"` + + // +kubebuilder:validation:Optional + Scte35Control *string `json:"scte35Control,omitempty" tf:"scte35_control,omitempty"` + + // PID from which to read SCTE-35 messages. + // +kubebuilder:validation:Optional + Scte35Pid *string `json:"scte35Pid,omitempty" tf:"scte35_pid,omitempty"` + + // +kubebuilder:validation:Optional + SegmentationMarkers *string `json:"segmentationMarkers,omitempty" tf:"segmentation_markers,omitempty"` + + // +kubebuilder:validation:Optional + SegmentationStyle *string `json:"segmentationStyle,omitempty" tf:"segmentation_style,omitempty"` + + // +kubebuilder:validation:Optional + SegmentationTime *float64 `json:"segmentationTime,omitempty" tf:"segmentation_time,omitempty"` + + // +kubebuilder:validation:Optional + TimedMetadataBehavior *string `json:"timedMetadataBehavior,omitempty" tf:"timed_metadata_behavior,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + TimedMetadataPid *string `json:"timedMetadataPid,omitempty" tf:"timed_metadata_pid,omitempty"` + + // User-specified id. Ths is used in an output group or an output. + // +kubebuilder:validation:Optional + TransportStreamID *float64 `json:"transportStreamId,omitempty" tf:"transport_stream_id,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + VideoPid *string `json:"videoPid,omitempty" tf:"video_pid,omitempty"` +} + +type ContainerSettingsObservation struct { + + // M2TS Settings. See M2TS Settings for more details. + M2TsSettings *M2TsSettingsObservation `json:"m2tsSettings,omitempty" tf:"m2ts_settings,omitempty"` + + // Raw Settings. This can be set as an empty block. + RawSettings *RawSettingsParameters `json:"rawSettings,omitempty" tf:"raw_settings,omitempty"` +} + +type ContainerSettingsParameters struct { + + // M2TS Settings. See M2TS Settings for more details. + // +kubebuilder:validation:Optional + M2TsSettings *M2TsSettingsParameters `json:"m2tsSettings,omitempty" tf:"m2ts_settings,omitempty"` + + // Raw Settings. This can be set as an empty block. + // +kubebuilder:validation:Optional + RawSettings *RawSettingsParameters `json:"rawSettings,omitempty" tf:"raw_settings,omitempty"` +} + +type DestinationInitParameters struct { + + // Reference ID for the destination. + DestinationRefID *string `json:"destinationRefId,omitempty" tf:"destination_ref_id,omitempty"` +} + +type DestinationObservation struct { + + // Reference ID for the destination. + DestinationRefID *string `json:"destinationRefId,omitempty" tf:"destination_ref_id,omitempty"` +} + +type DestinationParameters struct { + + // Reference ID for the destination. + // +kubebuilder:validation:Optional + DestinationRefID *string `json:"destinationRefId" tf:"destination_ref_id,omitempty"` +} + +type DestinationSettingsInitParameters struct { + + // ARIB Destination Settings. + AribDestinationSettings *AribDestinationSettingsInitParameters `json:"aribDestinationSettings,omitempty" tf:"arib_destination_settings,omitempty"` + + // Burn In Destination Settings. See Burn In Destination Settings for more details. + BurnInDestinationSettings *BurnInDestinationSettingsInitParameters `json:"burnInDestinationSettings,omitempty" tf:"burn_in_destination_settings,omitempty"` + + // DVB Sub Destination Settings. See DVB Sub Destination Settings for more details. + DvbSubDestinationSettings *DvbSubDestinationSettingsInitParameters `json:"dvbSubDestinationSettings,omitempty" tf:"dvb_sub_destination_settings,omitempty"` + + // EBU TT D Destination Settings. See EBU TT D Destination Settings for more details. + EbuTtDDestinationSettings *EbuTtDDestinationSettingsInitParameters `json:"ebuTtDDestinationSettings,omitempty" tf:"ebu_tt_d_destination_settings,omitempty"` + + // Embedded Destination Settings. + EmbeddedDestinationSettings *EmbeddedDestinationSettingsInitParameters `json:"embeddedDestinationSettings,omitempty" tf:"embedded_destination_settings,omitempty"` + + // Embedded Plus SCTE20 Destination Settings. + EmbeddedPlusScte20DestinationSettings *EmbeddedPlusScte20DestinationSettingsInitParameters `json:"embeddedPlusScte20DestinationSettings,omitempty" tf:"embedded_plus_scte20_destination_settings,omitempty"` + + // RTMP Caption Info Destination Settings. + RtmpCaptionInfoDestinationSettings *RtmpCaptionInfoDestinationSettingsInitParameters `json:"rtmpCaptionInfoDestinationSettings,omitempty" tf:"rtmp_caption_info_destination_settings,omitempty"` + + // SCTE20 Plus Embedded Destination Settings. + Scte20PlusEmbeddedDestinationSettings *Scte20PlusEmbeddedDestinationSettingsInitParameters `json:"scte20PlusEmbeddedDestinationSettings,omitempty" tf:"scte20_plus_embedded_destination_settings,omitempty"` + + // – SCTE27 Destination Settings. + Scte27DestinationSettings *Scte27DestinationSettingsInitParameters `json:"scte27DestinationSettings,omitempty" tf:"scte27_destination_settings,omitempty"` + + // – SMPTE TT Destination Settings. + SmpteTtDestinationSettings *SmpteTtDestinationSettingsInitParameters `json:"smpteTtDestinationSettings,omitempty" tf:"smpte_tt_destination_settings,omitempty"` + + // – Teletext Destination Settings. + TeletextDestinationSettings *TeletextDestinationSettingsInitParameters `json:"teletextDestinationSettings,omitempty" tf:"teletext_destination_settings,omitempty"` + + // – TTML Destination Settings. See TTML Destination Settings for more details. + TtmlDestinationSettings *TtmlDestinationSettingsInitParameters `json:"ttmlDestinationSettings,omitempty" tf:"ttml_destination_settings,omitempty"` + + // WebVTT Destination Settings. See WebVTT Destination Settings for more details. + WebvttDestinationSettings *WebvttDestinationSettingsInitParameters `json:"webvttDestinationSettings,omitempty" tf:"webvtt_destination_settings,omitempty"` +} + +type DestinationSettingsObservation struct { + + // ARIB Destination Settings. + AribDestinationSettings *AribDestinationSettingsParameters `json:"aribDestinationSettings,omitempty" tf:"arib_destination_settings,omitempty"` + + // Burn In Destination Settings. See Burn In Destination Settings for more details. + BurnInDestinationSettings *BurnInDestinationSettingsObservation `json:"burnInDestinationSettings,omitempty" tf:"burn_in_destination_settings,omitempty"` + + // DVB Sub Destination Settings. See DVB Sub Destination Settings for more details. + DvbSubDestinationSettings *DvbSubDestinationSettingsObservation `json:"dvbSubDestinationSettings,omitempty" tf:"dvb_sub_destination_settings,omitempty"` + + // EBU TT D Destination Settings. See EBU TT D Destination Settings for more details. + EbuTtDDestinationSettings *EbuTtDDestinationSettingsObservation `json:"ebuTtDDestinationSettings,omitempty" tf:"ebu_tt_d_destination_settings,omitempty"` + + // Embedded Destination Settings. + EmbeddedDestinationSettings *EmbeddedDestinationSettingsParameters `json:"embeddedDestinationSettings,omitempty" tf:"embedded_destination_settings,omitempty"` + + // Embedded Plus SCTE20 Destination Settings. + EmbeddedPlusScte20DestinationSettings *EmbeddedPlusScte20DestinationSettingsParameters `json:"embeddedPlusScte20DestinationSettings,omitempty" tf:"embedded_plus_scte20_destination_settings,omitempty"` + + // RTMP Caption Info Destination Settings. + RtmpCaptionInfoDestinationSettings *RtmpCaptionInfoDestinationSettingsParameters `json:"rtmpCaptionInfoDestinationSettings,omitempty" tf:"rtmp_caption_info_destination_settings,omitempty"` + + // SCTE20 Plus Embedded Destination Settings. + Scte20PlusEmbeddedDestinationSettings *Scte20PlusEmbeddedDestinationSettingsParameters `json:"scte20PlusEmbeddedDestinationSettings,omitempty" tf:"scte20_plus_embedded_destination_settings,omitempty"` + + // – SCTE27 Destination Settings. + Scte27DestinationSettings *Scte27DestinationSettingsParameters `json:"scte27DestinationSettings,omitempty" tf:"scte27_destination_settings,omitempty"` + + // – SMPTE TT Destination Settings. + SmpteTtDestinationSettings *SmpteTtDestinationSettingsParameters `json:"smpteTtDestinationSettings,omitempty" tf:"smpte_tt_destination_settings,omitempty"` + + // – Teletext Destination Settings. + TeletextDestinationSettings *TeletextDestinationSettingsParameters `json:"teletextDestinationSettings,omitempty" tf:"teletext_destination_settings,omitempty"` + + // – TTML Destination Settings. See TTML Destination Settings for more details. + TtmlDestinationSettings *TtmlDestinationSettingsObservation `json:"ttmlDestinationSettings,omitempty" tf:"ttml_destination_settings,omitempty"` + + // WebVTT Destination Settings. See WebVTT Destination Settings for more details. + WebvttDestinationSettings *WebvttDestinationSettingsObservation `json:"webvttDestinationSettings,omitempty" tf:"webvtt_destination_settings,omitempty"` +} + +type DestinationSettingsParameters struct { + + // ARIB Destination Settings. + // +kubebuilder:validation:Optional + AribDestinationSettings *AribDestinationSettingsParameters `json:"aribDestinationSettings,omitempty" tf:"arib_destination_settings,omitempty"` + + // Burn In Destination Settings. See Burn In Destination Settings for more details. + // +kubebuilder:validation:Optional + BurnInDestinationSettings *BurnInDestinationSettingsParameters `json:"burnInDestinationSettings,omitempty" tf:"burn_in_destination_settings,omitempty"` + + // DVB Sub Destination Settings. See DVB Sub Destination Settings for more details. + // +kubebuilder:validation:Optional + DvbSubDestinationSettings *DvbSubDestinationSettingsParameters `json:"dvbSubDestinationSettings,omitempty" tf:"dvb_sub_destination_settings,omitempty"` + + // EBU TT D Destination Settings. See EBU TT D Destination Settings for more details. + // +kubebuilder:validation:Optional + EbuTtDDestinationSettings *EbuTtDDestinationSettingsParameters `json:"ebuTtDDestinationSettings,omitempty" tf:"ebu_tt_d_destination_settings,omitempty"` + + // Embedded Destination Settings. + // +kubebuilder:validation:Optional + EmbeddedDestinationSettings *EmbeddedDestinationSettingsParameters `json:"embeddedDestinationSettings,omitempty" tf:"embedded_destination_settings,omitempty"` + + // Embedded Plus SCTE20 Destination Settings. + // +kubebuilder:validation:Optional + EmbeddedPlusScte20DestinationSettings *EmbeddedPlusScte20DestinationSettingsParameters `json:"embeddedPlusScte20DestinationSettings,omitempty" tf:"embedded_plus_scte20_destination_settings,omitempty"` + + // RTMP Caption Info Destination Settings. + // +kubebuilder:validation:Optional + RtmpCaptionInfoDestinationSettings *RtmpCaptionInfoDestinationSettingsParameters `json:"rtmpCaptionInfoDestinationSettings,omitempty" tf:"rtmp_caption_info_destination_settings,omitempty"` + + // SCTE20 Plus Embedded Destination Settings. + // +kubebuilder:validation:Optional + Scte20PlusEmbeddedDestinationSettings *Scte20PlusEmbeddedDestinationSettingsParameters `json:"scte20PlusEmbeddedDestinationSettings,omitempty" tf:"scte20_plus_embedded_destination_settings,omitempty"` + + // – SCTE27 Destination Settings. + // +kubebuilder:validation:Optional + Scte27DestinationSettings *Scte27DestinationSettingsParameters `json:"scte27DestinationSettings,omitempty" tf:"scte27_destination_settings,omitempty"` + + // – SMPTE TT Destination Settings. + // +kubebuilder:validation:Optional + SmpteTtDestinationSettings *SmpteTtDestinationSettingsParameters `json:"smpteTtDestinationSettings,omitempty" tf:"smpte_tt_destination_settings,omitempty"` + + // – Teletext Destination Settings. + // +kubebuilder:validation:Optional + TeletextDestinationSettings *TeletextDestinationSettingsParameters `json:"teletextDestinationSettings,omitempty" tf:"teletext_destination_settings,omitempty"` + + // – TTML Destination Settings. See TTML Destination Settings for more details. + // +kubebuilder:validation:Optional + TtmlDestinationSettings *TtmlDestinationSettingsParameters `json:"ttmlDestinationSettings,omitempty" tf:"ttml_destination_settings,omitempty"` + + // WebVTT Destination Settings. See WebVTT Destination Settings for more details. + // +kubebuilder:validation:Optional + WebvttDestinationSettings *WebvttDestinationSettingsParameters `json:"webvttDestinationSettings,omitempty" tf:"webvtt_destination_settings,omitempty"` +} + +type DestinationsInitParameters struct { + + // User-specified id. Ths is used in an output group or an output. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Destination settings for a MediaPackage output; one destination for both encoders. See Media Package Settings for more details. + MediaPackageSettings []MediaPackageSettingsInitParameters `json:"mediaPackageSettings,omitempty" tf:"media_package_settings,omitempty"` + + // Destination settings for a Multiplex output; one destination for both encoders. See Multiplex Settings for more details. + MultiplexSettings *MultiplexSettingsInitParameters `json:"multiplexSettings,omitempty" tf:"multiplex_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + Settings []SettingsInitParameters `json:"settings,omitempty" tf:"settings,omitempty"` +} + +type DestinationsObservation struct { + + // User-specified id. Ths is used in an output group or an output. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Destination settings for a MediaPackage output; one destination for both encoders. See Media Package Settings for more details. + MediaPackageSettings []MediaPackageSettingsObservation `json:"mediaPackageSettings,omitempty" tf:"media_package_settings,omitempty"` + + // Destination settings for a Multiplex output; one destination for both encoders. See Multiplex Settings for more details. + MultiplexSettings *MultiplexSettingsObservation `json:"multiplexSettings,omitempty" tf:"multiplex_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + Settings []SettingsObservation `json:"settings,omitempty" tf:"settings,omitempty"` +} + +type DestinationsParameters struct { + + // User-specified id. Ths is used in an output group or an output. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // Destination settings for a MediaPackage output; one destination for both encoders. See Media Package Settings for more details. + // +kubebuilder:validation:Optional + MediaPackageSettings []MediaPackageSettingsParameters `json:"mediaPackageSettings,omitempty" tf:"media_package_settings,omitempty"` + + // Destination settings for a Multiplex output; one destination for both encoders. See Multiplex Settings for more details. + // +kubebuilder:validation:Optional + MultiplexSettings *MultiplexSettingsParameters `json:"multiplexSettings,omitempty" tf:"multiplex_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + Settings []SettingsParameters `json:"settings,omitempty" tf:"settings,omitempty"` +} + +type DolbyEDecodeInitParameters struct { + + // Applies only to Dolby E. Enter the program ID (according to the metadata in the audio) of the Dolby E program to extract from the specified track. One program extracted per audio selector. To select multiple programs, create multiple selectors with the same Track and different Program numbers. “All channels” means to ignore the program IDs and include all the channels in this selector; useful if metadata is known to be incorrect. + ProgramSelection *string `json:"programSelection,omitempty" tf:"program_selection,omitempty"` +} + +type DolbyEDecodeObservation struct { + + // Applies only to Dolby E. Enter the program ID (according to the metadata in the audio) of the Dolby E program to extract from the specified track. One program extracted per audio selector. To select multiple programs, create multiple selectors with the same Track and different Program numbers. “All channels” means to ignore the program IDs and include all the channels in this selector; useful if metadata is known to be incorrect. + ProgramSelection *string `json:"programSelection,omitempty" tf:"program_selection,omitempty"` +} + +type DolbyEDecodeParameters struct { + + // Applies only to Dolby E. Enter the program ID (according to the metadata in the audio) of the Dolby E program to extract from the specified track. One program extracted per audio selector. To select multiple programs, create multiple selectors with the same Track and different Program numbers. “All channels” means to ignore the program IDs and include all the channels in this selector; useful if metadata is known to be incorrect. + // +kubebuilder:validation:Optional + ProgramSelection *string `json:"programSelection" tf:"program_selection,omitempty"` +} + +type DolbyVision81SettingsInitParameters struct { +} + +type DolbyVision81SettingsObservation struct { +} + +type DolbyVision81SettingsParameters struct { +} + +type DvbNitSettingsInitParameters struct { + + // User-specified id. Ths is used in an output group or an output. + NetworkID *float64 `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Name of the Channel. + NetworkName *string `json:"networkName,omitempty" tf:"network_name,omitempty"` + + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` +} + +type DvbNitSettingsObservation struct { + + // User-specified id. Ths is used in an output group or an output. + NetworkID *float64 `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Name of the Channel. + NetworkName *string `json:"networkName,omitempty" tf:"network_name,omitempty"` + + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` +} + +type DvbNitSettingsParameters struct { + + // User-specified id. Ths is used in an output group or an output. + // +kubebuilder:validation:Optional + NetworkID *float64 `json:"networkId" tf:"network_id,omitempty"` + + // Name of the Channel. + // +kubebuilder:validation:Optional + NetworkName *string `json:"networkName" tf:"network_name,omitempty"` + + // +kubebuilder:validation:Optional + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` +} + +type DvbSdtSettingsInitParameters struct { + OutputSdt *string `json:"outputSdt,omitempty" tf:"output_sdt,omitempty"` + + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` + + // Name of the Channel. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Name of the Channel. + ServiceProviderName *string `json:"serviceProviderName,omitempty" tf:"service_provider_name,omitempty"` +} + +type DvbSdtSettingsObservation struct { + OutputSdt *string `json:"outputSdt,omitempty" tf:"output_sdt,omitempty"` + + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` + + // Name of the Channel. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Name of the Channel. + ServiceProviderName *string `json:"serviceProviderName,omitempty" tf:"service_provider_name,omitempty"` +} + +type DvbSdtSettingsParameters struct { + + // +kubebuilder:validation:Optional + OutputSdt *string `json:"outputSdt,omitempty" tf:"output_sdt,omitempty"` + + // +kubebuilder:validation:Optional + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` + + // Name of the Channel. + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Name of the Channel. + // +kubebuilder:validation:Optional + ServiceProviderName *string `json:"serviceProviderName,omitempty" tf:"service_provider_name,omitempty"` +} + +type DvbSubDestinationSettingsFontInitParameters struct { + + // Key used to extract the password from EC2 Parameter store. + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Username for destination. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type DvbSubDestinationSettingsFontObservation struct { + + // Key used to extract the password from EC2 Parameter store. + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Username for destination. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type DvbSubDestinationSettingsFontParameters struct { + + // Key used to extract the password from EC2 Parameter store. + // +kubebuilder:validation:Optional + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` + + // Username for destination. + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type DvbSubDestinationSettingsInitParameters struct { + + // justify live subtitles and center-justify pre-recorded subtitles. All burn-in and DVB-Sub font settings must match. + Alignment *string `json:"alignment,omitempty" tf:"alignment,omitempty"` + + // in and DVB-Sub font settings must match. + BackgroundColor *string `json:"backgroundColor,omitempty" tf:"background_color,omitempty"` + + // in and DVB-Sub font settings must match. + BackgroundOpacity *float64 `json:"backgroundOpacity,omitempty" tf:"background_opacity,omitempty"` + + // in. File extension must be ‘ttf’ or ‘tte’. Although the user can select output fonts for many different types of input captions, embedded, STL and teletext sources use a strict grid system. Using external fonts with these caption sources could cause unexpected display of proportional fonts. All burn-in and DVB-Sub font settings must match. See Font for more details. + Font *DvbSubDestinationSettingsFontInitParameters `json:"font,omitempty" tf:"font,omitempty"` + + // in captions. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. + FontColor *string `json:"fontColor,omitempty" tf:"font_color,omitempty"` + + // in captions. 255 is opaque; 0 is transparent. All burn-in and DVB-Sub font settings must match. + FontOpacity *float64 `json:"fontOpacity,omitempty" tf:"font_opacity,omitempty"` + + // in and DVB-Sub font settings must match. + FontResolution *float64 `json:"fontResolution,omitempty" tf:"font_resolution,omitempty"` + + // in and DVB-Sub font settings must match. + FontSize *string `json:"fontSize,omitempty" tf:"font_size,omitempty"` + + // defined by the caption stream. All burn-in and DVB-Sub font settings must match. + OutlineColor *string `json:"outlineColor,omitempty" tf:"outline_color,omitempty"` + + // defined by the caption stream. All burn-in and DVB-Sub font settings must match. + OutlineSize *float64 `json:"outlineSize,omitempty" tf:"outline_size,omitempty"` + + // in and DVB-Sub font settings must match. + ShadowColor *string `json:"shadowColor,omitempty" tf:"shadow_color,omitempty"` + + // in and DVB-Sub font settings must match. + ShadowOpacity *float64 `json:"shadowOpacity,omitempty" tf:"shadow_opacity,omitempty"` + + // 2 would result in a shadow offset 2 pixels to the left. All burn-in and DVB-Sub font settings must match. + ShadowXOffset *float64 `json:"shadowXOffset,omitempty" tf:"shadow_x_offset,omitempty"` + + // 2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match. + ShadowYOffset *float64 `json:"shadowYOffset,omitempty" tf:"shadow_y_offset,omitempty"` + + // Sub/Burn-in outputs. + TeletextGridControl *string `json:"teletextGridControl,omitempty" tf:"teletext_grid_control,omitempty"` + + // in and DVB-Sub font settings must match. + XPosition *float64 `json:"xPosition,omitempty" tf:"x_position,omitempty"` + + // in and DVB-Sub font settings must match. + YPosition *float64 `json:"yPosition,omitempty" tf:"y_position,omitempty"` +} + +type DvbSubDestinationSettingsObservation struct { + + // justify live subtitles and center-justify pre-recorded subtitles. All burn-in and DVB-Sub font settings must match. + Alignment *string `json:"alignment,omitempty" tf:"alignment,omitempty"` + + // in and DVB-Sub font settings must match. + BackgroundColor *string `json:"backgroundColor,omitempty" tf:"background_color,omitempty"` + + // in and DVB-Sub font settings must match. + BackgroundOpacity *float64 `json:"backgroundOpacity,omitempty" tf:"background_opacity,omitempty"` + + // in. File extension must be ‘ttf’ or ‘tte’. Although the user can select output fonts for many different types of input captions, embedded, STL and teletext sources use a strict grid system. Using external fonts with these caption sources could cause unexpected display of proportional fonts. All burn-in and DVB-Sub font settings must match. See Font for more details. + Font *DvbSubDestinationSettingsFontObservation `json:"font,omitempty" tf:"font,omitempty"` + + // in captions. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. + FontColor *string `json:"fontColor,omitempty" tf:"font_color,omitempty"` + + // in captions. 255 is opaque; 0 is transparent. All burn-in and DVB-Sub font settings must match. + FontOpacity *float64 `json:"fontOpacity,omitempty" tf:"font_opacity,omitempty"` + + // in and DVB-Sub font settings must match. + FontResolution *float64 `json:"fontResolution,omitempty" tf:"font_resolution,omitempty"` + + // in and DVB-Sub font settings must match. + FontSize *string `json:"fontSize,omitempty" tf:"font_size,omitempty"` + + // defined by the caption stream. All burn-in and DVB-Sub font settings must match. + OutlineColor *string `json:"outlineColor,omitempty" tf:"outline_color,omitempty"` + + // defined by the caption stream. All burn-in and DVB-Sub font settings must match. + OutlineSize *float64 `json:"outlineSize,omitempty" tf:"outline_size,omitempty"` + + // in and DVB-Sub font settings must match. + ShadowColor *string `json:"shadowColor,omitempty" tf:"shadow_color,omitempty"` + + // in and DVB-Sub font settings must match. + ShadowOpacity *float64 `json:"shadowOpacity,omitempty" tf:"shadow_opacity,omitempty"` + + // 2 would result in a shadow offset 2 pixels to the left. All burn-in and DVB-Sub font settings must match. + ShadowXOffset *float64 `json:"shadowXOffset,omitempty" tf:"shadow_x_offset,omitempty"` + + // 2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match. + ShadowYOffset *float64 `json:"shadowYOffset,omitempty" tf:"shadow_y_offset,omitempty"` + + // Sub/Burn-in outputs. + TeletextGridControl *string `json:"teletextGridControl,omitempty" tf:"teletext_grid_control,omitempty"` + + // in and DVB-Sub font settings must match. + XPosition *float64 `json:"xPosition,omitempty" tf:"x_position,omitempty"` + + // in and DVB-Sub font settings must match. + YPosition *float64 `json:"yPosition,omitempty" tf:"y_position,omitempty"` +} + +type DvbSubDestinationSettingsParameters struct { + + // justify live subtitles and center-justify pre-recorded subtitles. All burn-in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + Alignment *string `json:"alignment,omitempty" tf:"alignment,omitempty"` + + // in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + BackgroundColor *string `json:"backgroundColor,omitempty" tf:"background_color,omitempty"` + + // in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + BackgroundOpacity *float64 `json:"backgroundOpacity,omitempty" tf:"background_opacity,omitempty"` + + // in. File extension must be ‘ttf’ or ‘tte’. Although the user can select output fonts for many different types of input captions, embedded, STL and teletext sources use a strict grid system. Using external fonts with these caption sources could cause unexpected display of proportional fonts. All burn-in and DVB-Sub font settings must match. See Font for more details. + // +kubebuilder:validation:Optional + Font *DvbSubDestinationSettingsFontParameters `json:"font,omitempty" tf:"font,omitempty"` + + // in captions. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + FontColor *string `json:"fontColor,omitempty" tf:"font_color,omitempty"` + + // in captions. 255 is opaque; 0 is transparent. All burn-in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + FontOpacity *float64 `json:"fontOpacity,omitempty" tf:"font_opacity,omitempty"` + + // in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + FontResolution *float64 `json:"fontResolution,omitempty" tf:"font_resolution,omitempty"` + + // in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + FontSize *string `json:"fontSize,omitempty" tf:"font_size,omitempty"` + + // defined by the caption stream. All burn-in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + OutlineColor *string `json:"outlineColor,omitempty" tf:"outline_color,omitempty"` + + // defined by the caption stream. All burn-in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + OutlineSize *float64 `json:"outlineSize,omitempty" tf:"outline_size,omitempty"` + + // in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + ShadowColor *string `json:"shadowColor,omitempty" tf:"shadow_color,omitempty"` + + // in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + ShadowOpacity *float64 `json:"shadowOpacity,omitempty" tf:"shadow_opacity,omitempty"` + + // 2 would result in a shadow offset 2 pixels to the left. All burn-in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + ShadowXOffset *float64 `json:"shadowXOffset,omitempty" tf:"shadow_x_offset,omitempty"` + + // 2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + ShadowYOffset *float64 `json:"shadowYOffset,omitempty" tf:"shadow_y_offset,omitempty"` + + // Sub/Burn-in outputs. + // +kubebuilder:validation:Optional + TeletextGridControl *string `json:"teletextGridControl,omitempty" tf:"teletext_grid_control,omitempty"` + + // in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + XPosition *float64 `json:"xPosition,omitempty" tf:"x_position,omitempty"` + + // in and DVB-Sub font settings must match. + // +kubebuilder:validation:Optional + YPosition *float64 `json:"yPosition,omitempty" tf:"y_position,omitempty"` +} + +type DvbSubSourceSettingsInitParameters struct { + + // If you will configure a WebVTT caption description that references this caption selector, use this field to provide the language to consider when translating the image-based source to text. + OcrLanguage *string `json:"ocrLanguage,omitempty" tf:"ocr_language,omitempty"` + + // Selects a specific PID from within a source. + Pid *float64 `json:"pid,omitempty" tf:"pid,omitempty"` +} + +type DvbSubSourceSettingsObservation struct { + + // If you will configure a WebVTT caption description that references this caption selector, use this field to provide the language to consider when translating the image-based source to text. + OcrLanguage *string `json:"ocrLanguage,omitempty" tf:"ocr_language,omitempty"` + + // Selects a specific PID from within a source. + Pid *float64 `json:"pid,omitempty" tf:"pid,omitempty"` +} + +type DvbSubSourceSettingsParameters struct { + + // If you will configure a WebVTT caption description that references this caption selector, use this field to provide the language to consider when translating the image-based source to text. + // +kubebuilder:validation:Optional + OcrLanguage *string `json:"ocrLanguage,omitempty" tf:"ocr_language,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + Pid *float64 `json:"pid,omitempty" tf:"pid,omitempty"` +} + +type DvbTdtSettingsInitParameters struct { + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` +} + +type DvbTdtSettingsObservation struct { + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` +} + +type DvbTdtSettingsParameters struct { + + // +kubebuilder:validation:Optional + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` +} + +type Eac3AtmosSettingsInitParameters struct { + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + // Sets the dialnorm of the output. + Dialnorm *float64 `json:"dialnorm,omitempty" tf:"dialnorm,omitempty"` + + // Sets the Dolby dynamic range compression profile. + DrcLine *string `json:"drcLine,omitempty" tf:"drc_line,omitempty"` + + // Sets the profile for heavy Dolby dynamic range compression. + DrcRf *string `json:"drcRf,omitempty" tf:"drc_rf,omitempty"` + + // Height dimensional trim. + HeightTrim *float64 `json:"heightTrim,omitempty" tf:"height_trim,omitempty"` + + // Surround dimensional trim. + SurroundTrim *float64 `json:"surroundTrim,omitempty" tf:"surround_trim,omitempty"` +} + +type Eac3AtmosSettingsObservation struct { + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + // Sets the dialnorm of the output. + Dialnorm *float64 `json:"dialnorm,omitempty" tf:"dialnorm,omitempty"` + + // Sets the Dolby dynamic range compression profile. + DrcLine *string `json:"drcLine,omitempty" tf:"drc_line,omitempty"` + + // Sets the profile for heavy Dolby dynamic range compression. + DrcRf *string `json:"drcRf,omitempty" tf:"drc_rf,omitempty"` + + // Height dimensional trim. + HeightTrim *float64 `json:"heightTrim,omitempty" tf:"height_trim,omitempty"` + + // Surround dimensional trim. + SurroundTrim *float64 `json:"surroundTrim,omitempty" tf:"surround_trim,omitempty"` +} + +type Eac3AtmosSettingsParameters struct { + + // Average bitrate in bits/second. + // +kubebuilder:validation:Optional + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + // +kubebuilder:validation:Optional + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + // Sets the dialnorm of the output. + // +kubebuilder:validation:Optional + Dialnorm *float64 `json:"dialnorm,omitempty" tf:"dialnorm,omitempty"` + + // Sets the Dolby dynamic range compression profile. + // +kubebuilder:validation:Optional + DrcLine *string `json:"drcLine,omitempty" tf:"drc_line,omitempty"` + + // Sets the profile for heavy Dolby dynamic range compression. + // +kubebuilder:validation:Optional + DrcRf *string `json:"drcRf,omitempty" tf:"drc_rf,omitempty"` + + // Height dimensional trim. + // +kubebuilder:validation:Optional + HeightTrim *float64 `json:"heightTrim,omitempty" tf:"height_trim,omitempty"` + + // Surround dimensional trim. + // +kubebuilder:validation:Optional + SurroundTrim *float64 `json:"surroundTrim,omitempty" tf:"surround_trim,omitempty"` +} + +type Eac3SettingsInitParameters struct { + + // Sets the attenuation control. + AttenuationControl *string `json:"attenuationControl,omitempty" tf:"attenuation_control,omitempty"` + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Specifies the bitstream mode (bsmod) for the emitted AC-3 stream. + BitstreamMode *string `json:"bitstreamMode,omitempty" tf:"bitstream_mode,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + DcFilter *string `json:"dcFilter,omitempty" tf:"dc_filter,omitempty"` + + // Sets the dialnorm of the output. + Dialnorm *float64 `json:"dialnorm,omitempty" tf:"dialnorm,omitempty"` + + // Sets the Dolby dynamic range compression profile. + DrcLine *string `json:"drcLine,omitempty" tf:"drc_line,omitempty"` + + // Sets the profile for heavy Dolby dynamic range compression. + DrcRf *string `json:"drcRf,omitempty" tf:"drc_rf,omitempty"` + + LfeControl *string `json:"lfeControl,omitempty" tf:"lfe_control,omitempty"` + + // When set to enabled, applies a 120Hz lowpass filter to the LFE channel prior to encoding. + LfeFilter *string `json:"lfeFilter,omitempty" tf:"lfe_filter,omitempty"` + + // H264 level. + LoRoCenterMixLevel *float64 `json:"loRoCenterMixLevel,omitempty" tf:"lo_ro_center_mix_level,omitempty"` + + // H264 level. + LoRoSurroundMixLevel *float64 `json:"loRoSurroundMixLevel,omitempty" tf:"lo_ro_surround_mix_level,omitempty"` + + // H264 level. + LtRtCenterMixLevel *float64 `json:"ltRtCenterMixLevel,omitempty" tf:"lt_rt_center_mix_level,omitempty"` + + // H264 level. + LtRtSurroundMixLevel *float64 `json:"ltRtSurroundMixLevel,omitempty" tf:"lt_rt_surround_mix_level,omitempty"` + + // Metadata control. + MetadataControl *string `json:"metadataControl,omitempty" tf:"metadata_control,omitempty"` + + PassthroughControl *string `json:"passthroughControl,omitempty" tf:"passthrough_control,omitempty"` + + PhaseControl *string `json:"phaseControl,omitempty" tf:"phase_control,omitempty"` + + StereoDownmix *string `json:"stereoDownmix,omitempty" tf:"stereo_downmix,omitempty"` + + SurroundExMode *string `json:"surroundExMode,omitempty" tf:"surround_ex_mode,omitempty"` + + SurroundMode *string `json:"surroundMode,omitempty" tf:"surround_mode,omitempty"` +} + +type Eac3SettingsObservation struct { + + // Sets the attenuation control. + AttenuationControl *string `json:"attenuationControl,omitempty" tf:"attenuation_control,omitempty"` + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Specifies the bitstream mode (bsmod) for the emitted AC-3 stream. + BitstreamMode *string `json:"bitstreamMode,omitempty" tf:"bitstream_mode,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + DcFilter *string `json:"dcFilter,omitempty" tf:"dc_filter,omitempty"` + + // Sets the dialnorm of the output. + Dialnorm *float64 `json:"dialnorm,omitempty" tf:"dialnorm,omitempty"` + + // Sets the Dolby dynamic range compression profile. + DrcLine *string `json:"drcLine,omitempty" tf:"drc_line,omitempty"` + + // Sets the profile for heavy Dolby dynamic range compression. + DrcRf *string `json:"drcRf,omitempty" tf:"drc_rf,omitempty"` + + LfeControl *string `json:"lfeControl,omitempty" tf:"lfe_control,omitempty"` + + // When set to enabled, applies a 120Hz lowpass filter to the LFE channel prior to encoding. + LfeFilter *string `json:"lfeFilter,omitempty" tf:"lfe_filter,omitempty"` + + // H264 level. + LoRoCenterMixLevel *float64 `json:"loRoCenterMixLevel,omitempty" tf:"lo_ro_center_mix_level,omitempty"` + + // H264 level. + LoRoSurroundMixLevel *float64 `json:"loRoSurroundMixLevel,omitempty" tf:"lo_ro_surround_mix_level,omitempty"` + + // H264 level. + LtRtCenterMixLevel *float64 `json:"ltRtCenterMixLevel,omitempty" tf:"lt_rt_center_mix_level,omitempty"` + + // H264 level. + LtRtSurroundMixLevel *float64 `json:"ltRtSurroundMixLevel,omitempty" tf:"lt_rt_surround_mix_level,omitempty"` + + // Metadata control. + MetadataControl *string `json:"metadataControl,omitempty" tf:"metadata_control,omitempty"` + + PassthroughControl *string `json:"passthroughControl,omitempty" tf:"passthrough_control,omitempty"` + + PhaseControl *string `json:"phaseControl,omitempty" tf:"phase_control,omitempty"` + + StereoDownmix *string `json:"stereoDownmix,omitempty" tf:"stereo_downmix,omitempty"` + + SurroundExMode *string `json:"surroundExMode,omitempty" tf:"surround_ex_mode,omitempty"` + + SurroundMode *string `json:"surroundMode,omitempty" tf:"surround_mode,omitempty"` +} + +type Eac3SettingsParameters struct { + + // Sets the attenuation control. + // +kubebuilder:validation:Optional + AttenuationControl *string `json:"attenuationControl,omitempty" tf:"attenuation_control,omitempty"` + + // Average bitrate in bits/second. + // +kubebuilder:validation:Optional + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Specifies the bitstream mode (bsmod) for the emitted AC-3 stream. + // +kubebuilder:validation:Optional + BitstreamMode *string `json:"bitstreamMode,omitempty" tf:"bitstream_mode,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + // +kubebuilder:validation:Optional + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + // +kubebuilder:validation:Optional + DcFilter *string `json:"dcFilter,omitempty" tf:"dc_filter,omitempty"` + + // Sets the dialnorm of the output. + // +kubebuilder:validation:Optional + Dialnorm *float64 `json:"dialnorm,omitempty" tf:"dialnorm,omitempty"` + + // Sets the Dolby dynamic range compression profile. + // +kubebuilder:validation:Optional + DrcLine *string `json:"drcLine,omitempty" tf:"drc_line,omitempty"` + + // Sets the profile for heavy Dolby dynamic range compression. + // +kubebuilder:validation:Optional + DrcRf *string `json:"drcRf,omitempty" tf:"drc_rf,omitempty"` + + // +kubebuilder:validation:Optional + LfeControl *string `json:"lfeControl,omitempty" tf:"lfe_control,omitempty"` + + // When set to enabled, applies a 120Hz lowpass filter to the LFE channel prior to encoding. + // +kubebuilder:validation:Optional + LfeFilter *string `json:"lfeFilter,omitempty" tf:"lfe_filter,omitempty"` + + // H264 level. + // +kubebuilder:validation:Optional + LoRoCenterMixLevel *float64 `json:"loRoCenterMixLevel,omitempty" tf:"lo_ro_center_mix_level,omitempty"` + + // H264 level. + // +kubebuilder:validation:Optional + LoRoSurroundMixLevel *float64 `json:"loRoSurroundMixLevel,omitempty" tf:"lo_ro_surround_mix_level,omitempty"` + + // H264 level. + // +kubebuilder:validation:Optional + LtRtCenterMixLevel *float64 `json:"ltRtCenterMixLevel,omitempty" tf:"lt_rt_center_mix_level,omitempty"` + + // H264 level. + // +kubebuilder:validation:Optional + LtRtSurroundMixLevel *float64 `json:"ltRtSurroundMixLevel,omitempty" tf:"lt_rt_surround_mix_level,omitempty"` + + // Metadata control. + // +kubebuilder:validation:Optional + MetadataControl *string `json:"metadataControl,omitempty" tf:"metadata_control,omitempty"` + + // +kubebuilder:validation:Optional + PassthroughControl *string `json:"passthroughControl,omitempty" tf:"passthrough_control,omitempty"` + + // +kubebuilder:validation:Optional + PhaseControl *string `json:"phaseControl,omitempty" tf:"phase_control,omitempty"` + + // +kubebuilder:validation:Optional + StereoDownmix *string `json:"stereoDownmix,omitempty" tf:"stereo_downmix,omitempty"` + + // +kubebuilder:validation:Optional + SurroundExMode *string `json:"surroundExMode,omitempty" tf:"surround_ex_mode,omitempty"` + + // +kubebuilder:validation:Optional + SurroundMode *string `json:"surroundMode,omitempty" tf:"surround_mode,omitempty"` +} + +type EbuTtDDestinationSettingsInitParameters struct { + + // – Complete this field if you want to include the name of the copyright holder in the copyright tag in the captions metadata. + CopyrightHolder *string `json:"copyrightHolder,omitempty" tf:"copyright_holder,omitempty"` + + // line captions). - enabled: Fill with the captions background color (as specified in the input captions). - disabled: Leave the gap unfilled. + FillLineGap *string `json:"fillLineGap,omitempty" tf:"fill_line_gap,omitempty"` + + // TT captions. Valid only if styleControl is set to include. If you leave this field empty, the font family is set to “monospaced”. (If styleControl is set to exclude, the font family is always set to “monospaced”.) You specify only the font family. All other style information (color, bold, position and so on) is copied from the input captions. The size is always set to 100% to allow the downstream player to choose the size. - Enter a list of font families, as a comma-separated list of font names, in order of preference. The name can be a font family (such as “Arial”), or a generic font family (such as “serif”), or “default” (to let the downstream player choose the font). - Leave blank to set the family to “monospace”. + FontFamily *string `json:"fontFamily,omitempty" tf:"font_family,omitempty"` + + // TT captions. - include: Take the style information (font color, font position, and so on) from the source captions and include that information in the font data attached to the EBU-TT captions. This option is valid only if the source captions are Embedded or Teletext. - exclude: In the font data attached to the EBU-TT captions, set the font family to “monospaced”. Do not include any other style information. + StyleControl *string `json:"styleControl,omitempty" tf:"style_control,omitempty"` +} + +type EbuTtDDestinationSettingsObservation struct { + + // – Complete this field if you want to include the name of the copyright holder in the copyright tag in the captions metadata. + CopyrightHolder *string `json:"copyrightHolder,omitempty" tf:"copyright_holder,omitempty"` + + // line captions). - enabled: Fill with the captions background color (as specified in the input captions). - disabled: Leave the gap unfilled. + FillLineGap *string `json:"fillLineGap,omitempty" tf:"fill_line_gap,omitempty"` + + // TT captions. Valid only if styleControl is set to include. If you leave this field empty, the font family is set to “monospaced”. (If styleControl is set to exclude, the font family is always set to “monospaced”.) You specify only the font family. All other style information (color, bold, position and so on) is copied from the input captions. The size is always set to 100% to allow the downstream player to choose the size. - Enter a list of font families, as a comma-separated list of font names, in order of preference. The name can be a font family (such as “Arial”), or a generic font family (such as “serif”), or “default” (to let the downstream player choose the font). - Leave blank to set the family to “monospace”. + FontFamily *string `json:"fontFamily,omitempty" tf:"font_family,omitempty"` + + // TT captions. - include: Take the style information (font color, font position, and so on) from the source captions and include that information in the font data attached to the EBU-TT captions. This option is valid only if the source captions are Embedded or Teletext. - exclude: In the font data attached to the EBU-TT captions, set the font family to “monospaced”. Do not include any other style information. + StyleControl *string `json:"styleControl,omitempty" tf:"style_control,omitempty"` +} + +type EbuTtDDestinationSettingsParameters struct { + + // – Complete this field if you want to include the name of the copyright holder in the copyright tag in the captions metadata. + // +kubebuilder:validation:Optional + CopyrightHolder *string `json:"copyrightHolder,omitempty" tf:"copyright_holder,omitempty"` + + // line captions). - enabled: Fill with the captions background color (as specified in the input captions). - disabled: Leave the gap unfilled. + // +kubebuilder:validation:Optional + FillLineGap *string `json:"fillLineGap,omitempty" tf:"fill_line_gap,omitempty"` + + // TT captions. Valid only if styleControl is set to include. If you leave this field empty, the font family is set to “monospaced”. (If styleControl is set to exclude, the font family is always set to “monospaced”.) You specify only the font family. All other style information (color, bold, position and so on) is copied from the input captions. The size is always set to 100% to allow the downstream player to choose the size. - Enter a list of font families, as a comma-separated list of font names, in order of preference. The name can be a font family (such as “Arial”), or a generic font family (such as “serif”), or “default” (to let the downstream player choose the font). - Leave blank to set the family to “monospace”. + // +kubebuilder:validation:Optional + FontFamily *string `json:"fontFamily,omitempty" tf:"font_family,omitempty"` + + // TT captions. - include: Take the style information (font color, font position, and so on) from the source captions and include that information in the font data attached to the EBU-TT captions. This option is valid only if the source captions are Embedded or Teletext. - exclude: In the font data attached to the EBU-TT captions, set the font family to “monospaced”. Do not include any other style information. + // +kubebuilder:validation:Optional + StyleControl *string `json:"styleControl,omitempty" tf:"style_control,omitempty"` +} + +type EmbeddedDestinationSettingsInitParameters struct { +} + +type EmbeddedDestinationSettingsObservation struct { +} + +type EmbeddedDestinationSettingsParameters struct { +} + +type EmbeddedPlusScte20DestinationSettingsInitParameters struct { +} + +type EmbeddedPlusScte20DestinationSettingsObservation struct { +} + +type EmbeddedPlusScte20DestinationSettingsParameters struct { +} + +type EmbeddedSourceSettingsInitParameters struct { + + // If upconvert, 608 data is both passed through via the “608 compatibility bytes” fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded. + Convert608To708 *string `json:"convert608To708,omitempty" tf:"convert_608_to_708,omitempty"` + + // Set to “auto” to handle streams with intermittent and/or non-aligned SCTE-20 and Embedded captions. + Scte20Detection *string `json:"scte20Detection,omitempty" tf:"scte20_detection,omitempty"` + + // Specifies the 608/708 channel number within the video track from which to extract captions. Unused for passthrough. + Source608ChannelNumber *float64 `json:"source608ChannelNumber,omitempty" tf:"source_608_channel_number,omitempty"` +} + +type EmbeddedSourceSettingsObservation struct { + + // If upconvert, 608 data is both passed through via the “608 compatibility bytes” fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded. + Convert608To708 *string `json:"convert608To708,omitempty" tf:"convert_608_to_708,omitempty"` + + // Set to “auto” to handle streams with intermittent and/or non-aligned SCTE-20 and Embedded captions. + Scte20Detection *string `json:"scte20Detection,omitempty" tf:"scte20_detection,omitempty"` + + // Specifies the 608/708 channel number within the video track from which to extract captions. Unused for passthrough. + Source608ChannelNumber *float64 `json:"source608ChannelNumber,omitempty" tf:"source_608_channel_number,omitempty"` +} + +type EmbeddedSourceSettingsParameters struct { + + // If upconvert, 608 data is both passed through via the “608 compatibility bytes” fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded. + // +kubebuilder:validation:Optional + Convert608To708 *string `json:"convert608To708,omitempty" tf:"convert_608_to_708,omitempty"` + + // Set to “auto” to handle streams with intermittent and/or non-aligned SCTE-20 and Embedded captions. + // +kubebuilder:validation:Optional + Scte20Detection *string `json:"scte20Detection,omitempty" tf:"scte20_detection,omitempty"` + + // Specifies the 608/708 channel number within the video track from which to extract captions. Unused for passthrough. + // +kubebuilder:validation:Optional + Source608ChannelNumber *float64 `json:"source608ChannelNumber,omitempty" tf:"source_608_channel_number,omitempty"` +} + +type EncoderSettingsInitParameters struct { + + // Audio descriptions for the channel. See Audio Descriptions for more details. + AudioDescriptions []AudioDescriptionsInitParameters `json:"audioDescriptions,omitempty" tf:"audio_descriptions,omitempty"` + + // Settings for ad avail blanking. See Avail Blanking for more details. + AvailBlanking *AvailBlankingInitParameters `json:"availBlanking,omitempty" tf:"avail_blanking,omitempty"` + + // Caption Descriptions. See Caption Descriptions for more details. + CaptionDescriptions []CaptionDescriptionsInitParameters `json:"captionDescriptions,omitempty" tf:"caption_descriptions,omitempty"` + + // Configuration settings that apply to the event as a whole. See Global Configuration for more details. + GlobalConfiguration *GlobalConfigurationInitParameters `json:"globalConfiguration,omitempty" tf:"global_configuration,omitempty"` + + // Settings for motion graphics. See Motion Graphics Configuration for more details. + MotionGraphicsConfiguration *MotionGraphicsConfigurationInitParameters `json:"motionGraphicsConfiguration,omitempty" tf:"motion_graphics_configuration,omitempty"` + + // Nielsen configuration settings. See Nielsen Configuration for more details. + NielsenConfiguration *NielsenConfigurationInitParameters `json:"nielsenConfiguration,omitempty" tf:"nielsen_configuration,omitempty"` + + // Output groups for the channel. See Output Groups for more details. + OutputGroups []OutputGroupsInitParameters `json:"outputGroups,omitempty" tf:"output_groups,omitempty"` + + // Contains settings used to acquire and adjust timecode information from inputs. See Timecode Config for more details. + TimecodeConfig *TimecodeConfigInitParameters `json:"timecodeConfig,omitempty" tf:"timecode_config,omitempty"` + + // Video Descriptions. See Video Descriptions for more details. + VideoDescriptions []VideoDescriptionsInitParameters `json:"videoDescriptions,omitempty" tf:"video_descriptions,omitempty"` +} + +type EncoderSettingsObservation struct { + + // Audio descriptions for the channel. See Audio Descriptions for more details. + AudioDescriptions []AudioDescriptionsObservation `json:"audioDescriptions,omitempty" tf:"audio_descriptions,omitempty"` + + // Settings for ad avail blanking. See Avail Blanking for more details. + AvailBlanking *AvailBlankingObservation `json:"availBlanking,omitempty" tf:"avail_blanking,omitempty"` + + // Caption Descriptions. See Caption Descriptions for more details. + CaptionDescriptions []CaptionDescriptionsObservation `json:"captionDescriptions,omitempty" tf:"caption_descriptions,omitempty"` + + // Configuration settings that apply to the event as a whole. See Global Configuration for more details. + GlobalConfiguration *GlobalConfigurationObservation `json:"globalConfiguration,omitempty" tf:"global_configuration,omitempty"` + + // Settings for motion graphics. See Motion Graphics Configuration for more details. + MotionGraphicsConfiguration *MotionGraphicsConfigurationObservation `json:"motionGraphicsConfiguration,omitempty" tf:"motion_graphics_configuration,omitempty"` + + // Nielsen configuration settings. See Nielsen Configuration for more details. + NielsenConfiguration *NielsenConfigurationObservation `json:"nielsenConfiguration,omitempty" tf:"nielsen_configuration,omitempty"` + + // Output groups for the channel. See Output Groups for more details. + OutputGroups []OutputGroupsObservation `json:"outputGroups,omitempty" tf:"output_groups,omitempty"` + + // Contains settings used to acquire and adjust timecode information from inputs. See Timecode Config for more details. + TimecodeConfig *TimecodeConfigObservation `json:"timecodeConfig,omitempty" tf:"timecode_config,omitempty"` + + // Video Descriptions. See Video Descriptions for more details. + VideoDescriptions []VideoDescriptionsObservation `json:"videoDescriptions,omitempty" tf:"video_descriptions,omitempty"` +} + +type EncoderSettingsParameters struct { + + // Audio descriptions for the channel. See Audio Descriptions for more details. + // +kubebuilder:validation:Optional + AudioDescriptions []AudioDescriptionsParameters `json:"audioDescriptions,omitempty" tf:"audio_descriptions,omitempty"` + + // Settings for ad avail blanking. See Avail Blanking for more details. + // +kubebuilder:validation:Optional + AvailBlanking *AvailBlankingParameters `json:"availBlanking,omitempty" tf:"avail_blanking,omitempty"` + + // Caption Descriptions. See Caption Descriptions for more details. + // +kubebuilder:validation:Optional + CaptionDescriptions []CaptionDescriptionsParameters `json:"captionDescriptions,omitempty" tf:"caption_descriptions,omitempty"` + + // Configuration settings that apply to the event as a whole. See Global Configuration for more details. + // +kubebuilder:validation:Optional + GlobalConfiguration *GlobalConfigurationParameters `json:"globalConfiguration,omitempty" tf:"global_configuration,omitempty"` + + // Settings for motion graphics. See Motion Graphics Configuration for more details. + // +kubebuilder:validation:Optional + MotionGraphicsConfiguration *MotionGraphicsConfigurationParameters `json:"motionGraphicsConfiguration,omitempty" tf:"motion_graphics_configuration,omitempty"` + + // Nielsen configuration settings. See Nielsen Configuration for more details. + // +kubebuilder:validation:Optional + NielsenConfiguration *NielsenConfigurationParameters `json:"nielsenConfiguration,omitempty" tf:"nielsen_configuration,omitempty"` + + // Output groups for the channel. See Output Groups for more details. + // +kubebuilder:validation:Optional + OutputGroups []OutputGroupsParameters `json:"outputGroups" tf:"output_groups,omitempty"` + + // Contains settings used to acquire and adjust timecode information from inputs. See Timecode Config for more details. + // +kubebuilder:validation:Optional + TimecodeConfig *TimecodeConfigParameters `json:"timecodeConfig" tf:"timecode_config,omitempty"` + + // Video Descriptions. See Video Descriptions for more details. + // +kubebuilder:validation:Optional + VideoDescriptions []VideoDescriptionsParameters `json:"videoDescriptions,omitempty" tf:"video_descriptions,omitempty"` +} + +type FailoverConditionInitParameters struct { + + // Failover condition type-specific settings. See Failover Condition Settings for more details. + FailoverConditionSettings *FailoverConditionSettingsInitParameters `json:"failoverConditionSettings,omitempty" tf:"failover_condition_settings,omitempty"` +} + +type FailoverConditionObservation struct { + + // Failover condition type-specific settings. See Failover Condition Settings for more details. + FailoverConditionSettings *FailoverConditionSettingsObservation `json:"failoverConditionSettings,omitempty" tf:"failover_condition_settings,omitempty"` +} + +type FailoverConditionParameters struct { + + // Failover condition type-specific settings. See Failover Condition Settings for more details. + // +kubebuilder:validation:Optional + FailoverConditionSettings *FailoverConditionSettingsParameters `json:"failoverConditionSettings,omitempty" tf:"failover_condition_settings,omitempty"` +} + +type FailoverConditionSettingsInitParameters struct { + + // MediaLive will perform a failover if the specified audio selector is silent for the specified period. See Audio Silence Failover Settings for more details. + AudioSilenceSettings *AudioSilenceSettingsInitParameters `json:"audioSilenceSettings,omitempty" tf:"audio_silence_settings,omitempty"` + + // MediaLive will perform a failover if content is not detected in this input for the specified period. See Input Loss Failover Settings for more details. + InputLossSettings *InputLossSettingsInitParameters `json:"inputLossSettings,omitempty" tf:"input_loss_settings,omitempty"` + + // MediaLive will perform a failover if content is considered black for the specified period. See Video Black Failover Settings for more details. + VideoBlackSettings *VideoBlackSettingsInitParameters `json:"videoBlackSettings,omitempty" tf:"video_black_settings,omitempty"` +} + +type FailoverConditionSettingsObservation struct { + + // MediaLive will perform a failover if the specified audio selector is silent for the specified period. See Audio Silence Failover Settings for more details. + AudioSilenceSettings *AudioSilenceSettingsObservation `json:"audioSilenceSettings,omitempty" tf:"audio_silence_settings,omitempty"` + + // MediaLive will perform a failover if content is not detected in this input for the specified period. See Input Loss Failover Settings for more details. + InputLossSettings *InputLossSettingsObservation `json:"inputLossSettings,omitempty" tf:"input_loss_settings,omitempty"` + + // MediaLive will perform a failover if content is considered black for the specified period. See Video Black Failover Settings for more details. + VideoBlackSettings *VideoBlackSettingsObservation `json:"videoBlackSettings,omitempty" tf:"video_black_settings,omitempty"` +} + +type FailoverConditionSettingsParameters struct { + + // MediaLive will perform a failover if the specified audio selector is silent for the specified period. See Audio Silence Failover Settings for more details. + // +kubebuilder:validation:Optional + AudioSilenceSettings *AudioSilenceSettingsParameters `json:"audioSilenceSettings,omitempty" tf:"audio_silence_settings,omitempty"` + + // MediaLive will perform a failover if content is not detected in this input for the specified period. See Input Loss Failover Settings for more details. + // +kubebuilder:validation:Optional + InputLossSettings *InputLossSettingsParameters `json:"inputLossSettings,omitempty" tf:"input_loss_settings,omitempty"` + + // MediaLive will perform a failover if content is considered black for the specified period. See Video Black Failover Settings for more details. + // +kubebuilder:validation:Optional + VideoBlackSettings *VideoBlackSettingsParameters `json:"videoBlackSettings,omitempty" tf:"video_black_settings,omitempty"` +} + +type FecOutputSettingsInitParameters struct { + + // The height of the FEC protection matrix. + ColumnDepth *float64 `json:"columnDepth,omitempty" tf:"column_depth,omitempty"` + + // Enables column only or column and row based FEC. + IncludeFec *string `json:"includeFec,omitempty" tf:"include_fec,omitempty"` + + // The width of the FEC protection matrix. + RowLength *float64 `json:"rowLength,omitempty" tf:"row_length,omitempty"` +} + +type FecOutputSettingsObservation struct { + + // The height of the FEC protection matrix. + ColumnDepth *float64 `json:"columnDepth,omitempty" tf:"column_depth,omitempty"` + + // Enables column only or column and row based FEC. + IncludeFec *string `json:"includeFec,omitempty" tf:"include_fec,omitempty"` + + // The width of the FEC protection matrix. + RowLength *float64 `json:"rowLength,omitempty" tf:"row_length,omitempty"` +} + +type FecOutputSettingsParameters struct { + + // The height of the FEC protection matrix. + // +kubebuilder:validation:Optional + ColumnDepth *float64 `json:"columnDepth,omitempty" tf:"column_depth,omitempty"` + + // Enables column only or column and row based FEC. + // +kubebuilder:validation:Optional + IncludeFec *string `json:"includeFec,omitempty" tf:"include_fec,omitempty"` + + // The width of the FEC protection matrix. + // +kubebuilder:validation:Optional + RowLength *float64 `json:"rowLength,omitempty" tf:"row_length,omitempty"` +} + +type FilterSettingsInitParameters struct { + + // Temporal filter settings. See Temporal Filter Settings + TemporalFilterSettings *TemporalFilterSettingsInitParameters `json:"temporalFilterSettings,omitempty" tf:"temporal_filter_settings,omitempty"` +} + +type FilterSettingsObservation struct { + + // Temporal filter settings. See Temporal Filter Settings + TemporalFilterSettings *TemporalFilterSettingsObservation `json:"temporalFilterSettings,omitempty" tf:"temporal_filter_settings,omitempty"` +} + +type FilterSettingsParameters struct { + + // Temporal filter settings. See Temporal Filter Settings + // +kubebuilder:validation:Optional + TemporalFilterSettings *TemporalFilterSettingsParameters `json:"temporalFilterSettings,omitempty" tf:"temporal_filter_settings,omitempty"` +} + +type FilterSettingsTemporalFilterSettingsInitParameters struct { + + // Post filter sharpening. + PostFilterSharpening *string `json:"postFilterSharpening,omitempty" tf:"post_filter_sharpening,omitempty"` + + // Filter strength. + Strength *string `json:"strength,omitempty" tf:"strength,omitempty"` +} + +type FilterSettingsTemporalFilterSettingsObservation struct { + + // Post filter sharpening. + PostFilterSharpening *string `json:"postFilterSharpening,omitempty" tf:"post_filter_sharpening,omitempty"` + + // Filter strength. + Strength *string `json:"strength,omitempty" tf:"strength,omitempty"` +} + +type FilterSettingsTemporalFilterSettingsParameters struct { + + // Post filter sharpening. + // +kubebuilder:validation:Optional + PostFilterSharpening *string `json:"postFilterSharpening,omitempty" tf:"post_filter_sharpening,omitempty"` + + // Filter strength. + // +kubebuilder:validation:Optional + Strength *string `json:"strength,omitempty" tf:"strength,omitempty"` +} + +type Fmp4HlsSettingsInitParameters struct { + AudioRenditionSets *string `json:"audioRenditionSets,omitempty" tf:"audio_rendition_sets,omitempty"` + + NielsenId3Behavior *string `json:"nielsenId3Behavior,omitempty" tf:"nielsen_id3_behavior,omitempty"` + + TimedMetadataBehavior *string `json:"timedMetadataBehavior,omitempty" tf:"timed_metadata_behavior,omitempty"` +} + +type Fmp4HlsSettingsObservation struct { + AudioRenditionSets *string `json:"audioRenditionSets,omitempty" tf:"audio_rendition_sets,omitempty"` + + NielsenId3Behavior *string `json:"nielsenId3Behavior,omitempty" tf:"nielsen_id3_behavior,omitempty"` + + TimedMetadataBehavior *string `json:"timedMetadataBehavior,omitempty" tf:"timed_metadata_behavior,omitempty"` +} + +type Fmp4HlsSettingsParameters struct { + + // +kubebuilder:validation:Optional + AudioRenditionSets *string `json:"audioRenditionSets,omitempty" tf:"audio_rendition_sets,omitempty"` + + // +kubebuilder:validation:Optional + NielsenId3Behavior *string `json:"nielsenId3Behavior,omitempty" tf:"nielsen_id3_behavior,omitempty"` + + // +kubebuilder:validation:Optional + TimedMetadataBehavior *string `json:"timedMetadataBehavior,omitempty" tf:"timed_metadata_behavior,omitempty"` +} + +type FontInitParameters struct { + + // Key used to extract the password from EC2 Parameter store. + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Username for destination. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type FontObservation struct { + + // Key used to extract the password from EC2 Parameter store. + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Username for destination. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type FontParameters struct { + + // Key used to extract the password from EC2 Parameter store. + // +kubebuilder:validation:Optional + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` + + // Username for destination. + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type FrameCaptureCdnSettingsInitParameters struct { + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + FrameCaptureS3Settings *FrameCaptureS3SettingsInitParameters `json:"frameCaptureS3Settings,omitempty" tf:"frame_capture_s3_settings,omitempty"` +} + +type FrameCaptureCdnSettingsObservation struct { + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + FrameCaptureS3Settings *FrameCaptureS3SettingsObservation `json:"frameCaptureS3Settings,omitempty" tf:"frame_capture_s3_settings,omitempty"` +} + +type FrameCaptureCdnSettingsParameters struct { + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + FrameCaptureS3Settings *FrameCaptureS3SettingsParameters `json:"frameCaptureS3Settings,omitempty" tf:"frame_capture_s3_settings,omitempty"` +} + +type FrameCaptureGroupSettingsDestinationInitParameters struct { + + // Reference ID for the destination. + DestinationRefID *string `json:"destinationRefId,omitempty" tf:"destination_ref_id,omitempty"` +} + +type FrameCaptureGroupSettingsDestinationObservation struct { + + // Reference ID for the destination. + DestinationRefID *string `json:"destinationRefId,omitempty" tf:"destination_ref_id,omitempty"` +} + +type FrameCaptureGroupSettingsDestinationParameters struct { + + // Reference ID for the destination. + // +kubebuilder:validation:Optional + DestinationRefID *string `json:"destinationRefId" tf:"destination_ref_id,omitempty"` +} + +type FrameCaptureGroupSettingsInitParameters struct { + + // A director and base filename where archive files should be written. See Destination for more details. + Destination *FrameCaptureGroupSettingsDestinationInitParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + FrameCaptureCdnSettings *FrameCaptureCdnSettingsInitParameters `json:"frameCaptureCdnSettings,omitempty" tf:"frame_capture_cdn_settings,omitempty"` +} + +type FrameCaptureGroupSettingsObservation struct { + + // A director and base filename where archive files should be written. See Destination for more details. + Destination *FrameCaptureGroupSettingsDestinationObservation `json:"destination,omitempty" tf:"destination,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + FrameCaptureCdnSettings *FrameCaptureCdnSettingsObservation `json:"frameCaptureCdnSettings,omitempty" tf:"frame_capture_cdn_settings,omitempty"` +} + +type FrameCaptureGroupSettingsParameters struct { + + // A director and base filename where archive files should be written. See Destination for more details. + // +kubebuilder:validation:Optional + Destination *FrameCaptureGroupSettingsDestinationParameters `json:"destination" tf:"destination,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + FrameCaptureCdnSettings *FrameCaptureCdnSettingsParameters `json:"frameCaptureCdnSettings,omitempty" tf:"frame_capture_cdn_settings,omitempty"` +} + +type FrameCaptureHlsSettingsInitParameters struct { +} + +type FrameCaptureHlsSettingsObservation struct { +} + +type FrameCaptureHlsSettingsParameters struct { +} + +type FrameCaptureOutputSettingsInitParameters struct { + + // String concatenated to the end of the destination filename. Required for multiple outputs of the same type. + NameModifier *string `json:"nameModifier,omitempty" tf:"name_modifier,omitempty"` +} + +type FrameCaptureOutputSettingsObservation struct { + + // String concatenated to the end of the destination filename. Required for multiple outputs of the same type. + NameModifier *string `json:"nameModifier,omitempty" tf:"name_modifier,omitempty"` +} + +type FrameCaptureOutputSettingsParameters struct { + + // String concatenated to the end of the destination filename. Required for multiple outputs of the same type. + // +kubebuilder:validation:Optional + NameModifier *string `json:"nameModifier,omitempty" tf:"name_modifier,omitempty"` +} + +type FrameCaptureS3SettingsInitParameters struct { + + // Specify the canned ACL to apply to each S3 request. + CannedACL *string `json:"cannedAcl,omitempty" tf:"canned_acl,omitempty"` +} + +type FrameCaptureS3SettingsObservation struct { + + // Specify the canned ACL to apply to each S3 request. + CannedACL *string `json:"cannedAcl,omitempty" tf:"canned_acl,omitempty"` +} + +type FrameCaptureS3SettingsParameters struct { + + // Specify the canned ACL to apply to each S3 request. + // +kubebuilder:validation:Optional + CannedACL *string `json:"cannedAcl,omitempty" tf:"canned_acl,omitempty"` +} + +type FrameCaptureSettingsInitParameters struct { + + // The frequency at which to capture frames for inclusion in the output. + CaptureInterval *float64 `json:"captureInterval,omitempty" tf:"capture_interval,omitempty"` + + // Unit for the frame capture interval. + CaptureIntervalUnits *string `json:"captureIntervalUnits,omitempty" tf:"capture_interval_units,omitempty"` +} + +type FrameCaptureSettingsObservation struct { + + // The frequency at which to capture frames for inclusion in the output. + CaptureInterval *float64 `json:"captureInterval,omitempty" tf:"capture_interval,omitempty"` + + // Unit for the frame capture interval. + CaptureIntervalUnits *string `json:"captureIntervalUnits,omitempty" tf:"capture_interval_units,omitempty"` +} + +type FrameCaptureSettingsParameters struct { + + // The frequency at which to capture frames for inclusion in the output. + // +kubebuilder:validation:Optional + CaptureInterval *float64 `json:"captureInterval,omitempty" tf:"capture_interval,omitempty"` + + // Unit for the frame capture interval. + // +kubebuilder:validation:Optional + CaptureIntervalUnits *string `json:"captureIntervalUnits,omitempty" tf:"capture_interval_units,omitempty"` +} + +type GlobalConfigurationInitParameters struct { + + // – Value to set the initial audio gain for the Live Event. + InitialAudioGain *float64 `json:"initialAudioGain,omitempty" tf:"initial_audio_gain,omitempty"` + + // of-file). When switchAndLoopInputs is configured the encoder will restart at the beginning of the first input. When “none” is configured the encoder will transcode either black, a solid color, or a user specified slate images per the “Input Loss Behavior” configuration until the next input switch occurs (which is controlled through the Channel Schedule API). + InputEndAction *string `json:"inputEndAction,omitempty" tf:"input_end_action,omitempty"` + + // Settings for system actions when input is lost. See Input Loss Behavior for more details. + InputLossBehavior *InputLossBehaviorInitParameters `json:"inputLossBehavior,omitempty" tf:"input_loss_behavior,omitempty"` + + // MediaLive will attempt to synchronize the output of each pipeline to the other. EPOCH_LOCKING - MediaLive will attempt to synchronize the output of each pipeline to the Unix epoch. + OutputLockingMode *string `json:"outputLockingMode,omitempty" tf:"output_locking_mode,omitempty"` + + // – Indicates whether the rate of frames emitted by the Live encoder should be paced by its system clock (which optionally may be locked to another source via NTP) or should be locked to the clock of the source that is providing the input stream. + OutputTimingSource *string `json:"outputTimingSource,omitempty" tf:"output_timing_source,omitempty"` + + // – Adjusts video input buffer for streams with very low video framerates. This is commonly set to enabled for music channels with less than one video frame per second. + SupportLowFramerateInputs *string `json:"supportLowFramerateInputs,omitempty" tf:"support_low_framerate_inputs,omitempty"` +} + +type GlobalConfigurationObservation struct { + + // – Value to set the initial audio gain for the Live Event. + InitialAudioGain *float64 `json:"initialAudioGain,omitempty" tf:"initial_audio_gain,omitempty"` + + // of-file). When switchAndLoopInputs is configured the encoder will restart at the beginning of the first input. When “none” is configured the encoder will transcode either black, a solid color, or a user specified slate images per the “Input Loss Behavior” configuration until the next input switch occurs (which is controlled through the Channel Schedule API). + InputEndAction *string `json:"inputEndAction,omitempty" tf:"input_end_action,omitempty"` + + // Settings for system actions when input is lost. See Input Loss Behavior for more details. + InputLossBehavior *InputLossBehaviorObservation `json:"inputLossBehavior,omitempty" tf:"input_loss_behavior,omitempty"` + + // MediaLive will attempt to synchronize the output of each pipeline to the other. EPOCH_LOCKING - MediaLive will attempt to synchronize the output of each pipeline to the Unix epoch. + OutputLockingMode *string `json:"outputLockingMode,omitempty" tf:"output_locking_mode,omitempty"` + + // – Indicates whether the rate of frames emitted by the Live encoder should be paced by its system clock (which optionally may be locked to another source via NTP) or should be locked to the clock of the source that is providing the input stream. + OutputTimingSource *string `json:"outputTimingSource,omitempty" tf:"output_timing_source,omitempty"` + + // – Adjusts video input buffer for streams with very low video framerates. This is commonly set to enabled for music channels with less than one video frame per second. + SupportLowFramerateInputs *string `json:"supportLowFramerateInputs,omitempty" tf:"support_low_framerate_inputs,omitempty"` +} + +type GlobalConfigurationParameters struct { + + // – Value to set the initial audio gain for the Live Event. + // +kubebuilder:validation:Optional + InitialAudioGain *float64 `json:"initialAudioGain,omitempty" tf:"initial_audio_gain,omitempty"` + + // of-file). When switchAndLoopInputs is configured the encoder will restart at the beginning of the first input. When “none” is configured the encoder will transcode either black, a solid color, or a user specified slate images per the “Input Loss Behavior” configuration until the next input switch occurs (which is controlled through the Channel Schedule API). + // +kubebuilder:validation:Optional + InputEndAction *string `json:"inputEndAction,omitempty" tf:"input_end_action,omitempty"` + + // Settings for system actions when input is lost. See Input Loss Behavior for more details. + // +kubebuilder:validation:Optional + InputLossBehavior *InputLossBehaviorParameters `json:"inputLossBehavior,omitempty" tf:"input_loss_behavior,omitempty"` + + // MediaLive will attempt to synchronize the output of each pipeline to the other. EPOCH_LOCKING - MediaLive will attempt to synchronize the output of each pipeline to the Unix epoch. + // +kubebuilder:validation:Optional + OutputLockingMode *string `json:"outputLockingMode,omitempty" tf:"output_locking_mode,omitempty"` + + // – Indicates whether the rate of frames emitted by the Live encoder should be paced by its system clock (which optionally may be locked to another source via NTP) or should be locked to the clock of the source that is providing the input stream. + // +kubebuilder:validation:Optional + OutputTimingSource *string `json:"outputTimingSource,omitempty" tf:"output_timing_source,omitempty"` + + // – Adjusts video input buffer for streams with very low video framerates. This is commonly set to enabled for music channels with less than one video frame per second. + // +kubebuilder:validation:Optional + SupportLowFramerateInputs *string `json:"supportLowFramerateInputs,omitempty" tf:"support_low_framerate_inputs,omitempty"` +} + +type H264SettingsInitParameters struct { + + // Enables or disables adaptive quantization. + AdaptiveQuantization *string `json:"adaptiveQuantization,omitempty" tf:"adaptive_quantization,omitempty"` + + // Indicates that AFD values will be written into the output stream. + AfdSignaling *string `json:"afdSignaling,omitempty" tf:"afd_signaling,omitempty"` + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + BufFillPct *float64 `json:"bufFillPct,omitempty" tf:"buf_fill_pct,omitempty"` + + // Size of buffer in bits. + BufSize *float64 `json:"bufSize,omitempty" tf:"buf_size,omitempty"` + + // Includes color space metadata in the output. + ColorMetadata *string `json:"colorMetadata,omitempty" tf:"color_metadata,omitempty"` + + // Entropy encoding mode. + EntropyEncoding *string `json:"entropyEncoding,omitempty" tf:"entropy_encoding,omitempty"` + + // Filters to apply to an encode. See H264 Filter Settings for more details. + FilterSettings *FilterSettingsInitParameters `json:"filterSettings,omitempty" tf:"filter_settings,omitempty"` + + // Four bit AFD value to write on all frames of video in the output stream. + FixedAfd *string `json:"fixedAfd,omitempty" tf:"fixed_afd,omitempty"` + + FlickerAq *string `json:"flickerAq,omitempty" tf:"flicker_aq,omitempty"` + + // Controls whether coding is performed on a field basis or on a frame basis. + ForceFieldPictures *string `json:"forceFieldPictures,omitempty" tf:"force_field_pictures,omitempty"` + + // Indicates how the output video frame rate is specified. + FramerateControl *string `json:"framerateControl,omitempty" tf:"framerate_control,omitempty"` + + // Framerate denominator. + FramerateDenominator *float64 `json:"framerateDenominator,omitempty" tf:"framerate_denominator,omitempty"` + + // Framerate numerator. + FramerateNumerator *float64 `json:"framerateNumerator,omitempty" tf:"framerate_numerator,omitempty"` + + // GOP-B reference. + GopBReference *string `json:"gopBReference,omitempty" tf:"gop_b_reference,omitempty"` + + // Frequency of closed GOPs. + GopClosedCadence *float64 `json:"gopClosedCadence,omitempty" tf:"gop_closed_cadence,omitempty"` + + // Number of B-frames between reference frames. + GopNumBFrames *float64 `json:"gopNumBFrames,omitempty" tf:"gop_num_b_frames,omitempty"` + + // GOP size in units of either frames of seconds per gop_size_units. + GopSize *float64 `json:"gopSize,omitempty" tf:"gop_size,omitempty"` + + // Indicates if the gop_size is specified in frames or seconds. + GopSizeUnits *string `json:"gopSizeUnits,omitempty" tf:"gop_size_units,omitempty"` + + // H264 level. + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // Amount of lookahead. + LookAheadRateControl *string `json:"lookAheadRateControl,omitempty" tf:"look_ahead_rate_control,omitempty"` + + // Set the maximum bitrate in order to accommodate expected spikes in the complexity of the video. + MaxBitrate *float64 `json:"maxBitrate,omitempty" tf:"max_bitrate,omitempty"` + + MinIInterval *float64 `json:"minIInterval,omitempty" tf:"min_i_interval,omitempty"` + + // Number of reference frames to use. + NumRefFrames *float64 `json:"numRefFrames,omitempty" tf:"num_ref_frames,omitempty"` + + // Indicates how the output pixel aspect ratio is specified. + ParControl *string `json:"parControl,omitempty" tf:"par_control,omitempty"` + + // Pixel Aspect Ratio denominator. + ParDenominator *float64 `json:"parDenominator,omitempty" tf:"par_denominator,omitempty"` + + // Pixel Aspect Ratio numerator. + ParNumerator *float64 `json:"parNumerator,omitempty" tf:"par_numerator,omitempty"` + + // AAC profile. + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // Quality level. + QualityLevel *string `json:"qualityLevel,omitempty" tf:"quality_level,omitempty"` + + // Controls the target quality for the video encode. + QvbrQualityLevel *float64 `json:"qvbrQualityLevel,omitempty" tf:"qvbr_quality_level,omitempty"` + + // The rate control mode. + RateControlMode *string `json:"rateControlMode,omitempty" tf:"rate_control_mode,omitempty"` + + // Sets the scan type of the output. + ScanType *string `json:"scanType,omitempty" tf:"scan_type,omitempty"` + + // Scene change detection. + SceneChangeDetect *string `json:"sceneChangeDetect,omitempty" tf:"scene_change_detect,omitempty"` + + // Number of slices per picture. + Slices *float64 `json:"slices,omitempty" tf:"slices,omitempty"` + + // Softness. + Softness *float64 `json:"softness,omitempty" tf:"softness,omitempty"` + + // Makes adjustments within each frame based on spatial variation of content complexity. + SpatialAq *string `json:"spatialAq,omitempty" tf:"spatial_aq,omitempty"` + + // Subgop length. + SubgopLength *string `json:"subgopLength,omitempty" tf:"subgop_length,omitempty"` + + // Produces a bitstream compliant with SMPTE RP-2027. + Syntax *string `json:"syntax,omitempty" tf:"syntax,omitempty"` + + // Makes adjustments within each frame based on temporal variation of content complexity. + TemporalAq *string `json:"temporalAq,omitempty" tf:"temporal_aq,omitempty"` + + // Determines how timecodes should be inserted into the video elementary stream. + TimecodeInsertion *string `json:"timecodeInsertion,omitempty" tf:"timecode_insertion,omitempty"` +} + +type H264SettingsObservation struct { + + // Enables or disables adaptive quantization. + AdaptiveQuantization *string `json:"adaptiveQuantization,omitempty" tf:"adaptive_quantization,omitempty"` + + // Indicates that AFD values will be written into the output stream. + AfdSignaling *string `json:"afdSignaling,omitempty" tf:"afd_signaling,omitempty"` + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + BufFillPct *float64 `json:"bufFillPct,omitempty" tf:"buf_fill_pct,omitempty"` + + // Size of buffer in bits. + BufSize *float64 `json:"bufSize,omitempty" tf:"buf_size,omitempty"` + + // Includes color space metadata in the output. + ColorMetadata *string `json:"colorMetadata,omitempty" tf:"color_metadata,omitempty"` + + // Entropy encoding mode. + EntropyEncoding *string `json:"entropyEncoding,omitempty" tf:"entropy_encoding,omitempty"` + + // Filters to apply to an encode. See H264 Filter Settings for more details. + FilterSettings *FilterSettingsObservation `json:"filterSettings,omitempty" tf:"filter_settings,omitempty"` + + // Four bit AFD value to write on all frames of video in the output stream. + FixedAfd *string `json:"fixedAfd,omitempty" tf:"fixed_afd,omitempty"` + + FlickerAq *string `json:"flickerAq,omitempty" tf:"flicker_aq,omitempty"` + + // Controls whether coding is performed on a field basis or on a frame basis. + ForceFieldPictures *string `json:"forceFieldPictures,omitempty" tf:"force_field_pictures,omitempty"` + + // Indicates how the output video frame rate is specified. + FramerateControl *string `json:"framerateControl,omitempty" tf:"framerate_control,omitempty"` + + // Framerate denominator. + FramerateDenominator *float64 `json:"framerateDenominator,omitempty" tf:"framerate_denominator,omitempty"` + + // Framerate numerator. + FramerateNumerator *float64 `json:"framerateNumerator,omitempty" tf:"framerate_numerator,omitempty"` + + // GOP-B reference. + GopBReference *string `json:"gopBReference,omitempty" tf:"gop_b_reference,omitempty"` + + // Frequency of closed GOPs. + GopClosedCadence *float64 `json:"gopClosedCadence,omitempty" tf:"gop_closed_cadence,omitempty"` + + // Number of B-frames between reference frames. + GopNumBFrames *float64 `json:"gopNumBFrames,omitempty" tf:"gop_num_b_frames,omitempty"` + + // GOP size in units of either frames of seconds per gop_size_units. + GopSize *float64 `json:"gopSize,omitempty" tf:"gop_size,omitempty"` + + // Indicates if the gop_size is specified in frames or seconds. + GopSizeUnits *string `json:"gopSizeUnits,omitempty" tf:"gop_size_units,omitempty"` + + // H264 level. + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // Amount of lookahead. + LookAheadRateControl *string `json:"lookAheadRateControl,omitempty" tf:"look_ahead_rate_control,omitempty"` + + // Set the maximum bitrate in order to accommodate expected spikes in the complexity of the video. + MaxBitrate *float64 `json:"maxBitrate,omitempty" tf:"max_bitrate,omitempty"` + + MinIInterval *float64 `json:"minIInterval,omitempty" tf:"min_i_interval,omitempty"` + + // Number of reference frames to use. + NumRefFrames *float64 `json:"numRefFrames,omitempty" tf:"num_ref_frames,omitempty"` + + // Indicates how the output pixel aspect ratio is specified. + ParControl *string `json:"parControl,omitempty" tf:"par_control,omitempty"` + + // Pixel Aspect Ratio denominator. + ParDenominator *float64 `json:"parDenominator,omitempty" tf:"par_denominator,omitempty"` + + // Pixel Aspect Ratio numerator. + ParNumerator *float64 `json:"parNumerator,omitempty" tf:"par_numerator,omitempty"` + + // AAC profile. + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // Quality level. + QualityLevel *string `json:"qualityLevel,omitempty" tf:"quality_level,omitempty"` + + // Controls the target quality for the video encode. + QvbrQualityLevel *float64 `json:"qvbrQualityLevel,omitempty" tf:"qvbr_quality_level,omitempty"` + + // The rate control mode. + RateControlMode *string `json:"rateControlMode,omitempty" tf:"rate_control_mode,omitempty"` + + // Sets the scan type of the output. + ScanType *string `json:"scanType,omitempty" tf:"scan_type,omitempty"` + + // Scene change detection. + SceneChangeDetect *string `json:"sceneChangeDetect,omitempty" tf:"scene_change_detect,omitempty"` + + // Number of slices per picture. + Slices *float64 `json:"slices,omitempty" tf:"slices,omitempty"` + + // Softness. + Softness *float64 `json:"softness,omitempty" tf:"softness,omitempty"` + + // Makes adjustments within each frame based on spatial variation of content complexity. + SpatialAq *string `json:"spatialAq,omitempty" tf:"spatial_aq,omitempty"` + + // Subgop length. + SubgopLength *string `json:"subgopLength,omitempty" tf:"subgop_length,omitempty"` + + // Produces a bitstream compliant with SMPTE RP-2027. + Syntax *string `json:"syntax,omitempty" tf:"syntax,omitempty"` + + // Makes adjustments within each frame based on temporal variation of content complexity. + TemporalAq *string `json:"temporalAq,omitempty" tf:"temporal_aq,omitempty"` + + // Determines how timecodes should be inserted into the video elementary stream. + TimecodeInsertion *string `json:"timecodeInsertion,omitempty" tf:"timecode_insertion,omitempty"` +} + +type H264SettingsParameters struct { + + // Enables or disables adaptive quantization. + // +kubebuilder:validation:Optional + AdaptiveQuantization *string `json:"adaptiveQuantization,omitempty" tf:"adaptive_quantization,omitempty"` + + // Indicates that AFD values will be written into the output stream. + // +kubebuilder:validation:Optional + AfdSignaling *string `json:"afdSignaling,omitempty" tf:"afd_signaling,omitempty"` + + // Average bitrate in bits/second. + // +kubebuilder:validation:Optional + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // +kubebuilder:validation:Optional + BufFillPct *float64 `json:"bufFillPct,omitempty" tf:"buf_fill_pct,omitempty"` + + // Size of buffer in bits. + // +kubebuilder:validation:Optional + BufSize *float64 `json:"bufSize,omitempty" tf:"buf_size,omitempty"` + + // Includes color space metadata in the output. + // +kubebuilder:validation:Optional + ColorMetadata *string `json:"colorMetadata,omitempty" tf:"color_metadata,omitempty"` + + // Entropy encoding mode. + // +kubebuilder:validation:Optional + EntropyEncoding *string `json:"entropyEncoding,omitempty" tf:"entropy_encoding,omitempty"` + + // Filters to apply to an encode. See H264 Filter Settings for more details. + // +kubebuilder:validation:Optional + FilterSettings *FilterSettingsParameters `json:"filterSettings,omitempty" tf:"filter_settings,omitempty"` + + // Four bit AFD value to write on all frames of video in the output stream. + // +kubebuilder:validation:Optional + FixedAfd *string `json:"fixedAfd,omitempty" tf:"fixed_afd,omitempty"` + + // +kubebuilder:validation:Optional + FlickerAq *string `json:"flickerAq,omitempty" tf:"flicker_aq,omitempty"` + + // Controls whether coding is performed on a field basis or on a frame basis. + // +kubebuilder:validation:Optional + ForceFieldPictures *string `json:"forceFieldPictures,omitempty" tf:"force_field_pictures,omitempty"` + + // Indicates how the output video frame rate is specified. + // +kubebuilder:validation:Optional + FramerateControl *string `json:"framerateControl,omitempty" tf:"framerate_control,omitempty"` + + // Framerate denominator. + // +kubebuilder:validation:Optional + FramerateDenominator *float64 `json:"framerateDenominator,omitempty" tf:"framerate_denominator,omitempty"` + + // Framerate numerator. + // +kubebuilder:validation:Optional + FramerateNumerator *float64 `json:"framerateNumerator,omitempty" tf:"framerate_numerator,omitempty"` + + // GOP-B reference. + // +kubebuilder:validation:Optional + GopBReference *string `json:"gopBReference,omitempty" tf:"gop_b_reference,omitempty"` + + // Frequency of closed GOPs. + // +kubebuilder:validation:Optional + GopClosedCadence *float64 `json:"gopClosedCadence,omitempty" tf:"gop_closed_cadence,omitempty"` + + // Number of B-frames between reference frames. + // +kubebuilder:validation:Optional + GopNumBFrames *float64 `json:"gopNumBFrames,omitempty" tf:"gop_num_b_frames,omitempty"` + + // GOP size in units of either frames of seconds per gop_size_units. + // +kubebuilder:validation:Optional + GopSize *float64 `json:"gopSize,omitempty" tf:"gop_size,omitempty"` + + // Indicates if the gop_size is specified in frames or seconds. + // +kubebuilder:validation:Optional + GopSizeUnits *string `json:"gopSizeUnits,omitempty" tf:"gop_size_units,omitempty"` + + // H264 level. + // +kubebuilder:validation:Optional + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // Amount of lookahead. + // +kubebuilder:validation:Optional + LookAheadRateControl *string `json:"lookAheadRateControl,omitempty" tf:"look_ahead_rate_control,omitempty"` + + // Set the maximum bitrate in order to accommodate expected spikes in the complexity of the video. + // +kubebuilder:validation:Optional + MaxBitrate *float64 `json:"maxBitrate,omitempty" tf:"max_bitrate,omitempty"` + + // +kubebuilder:validation:Optional + MinIInterval *float64 `json:"minIInterval,omitempty" tf:"min_i_interval,omitempty"` + + // Number of reference frames to use. + // +kubebuilder:validation:Optional + NumRefFrames *float64 `json:"numRefFrames,omitempty" tf:"num_ref_frames,omitempty"` + + // Indicates how the output pixel aspect ratio is specified. + // +kubebuilder:validation:Optional + ParControl *string `json:"parControl,omitempty" tf:"par_control,omitempty"` + + // Pixel Aspect Ratio denominator. + // +kubebuilder:validation:Optional + ParDenominator *float64 `json:"parDenominator,omitempty" tf:"par_denominator,omitempty"` + + // Pixel Aspect Ratio numerator. + // +kubebuilder:validation:Optional + ParNumerator *float64 `json:"parNumerator,omitempty" tf:"par_numerator,omitempty"` + + // AAC profile. + // +kubebuilder:validation:Optional + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // Quality level. + // +kubebuilder:validation:Optional + QualityLevel *string `json:"qualityLevel,omitempty" tf:"quality_level,omitempty"` + + // Controls the target quality for the video encode. + // +kubebuilder:validation:Optional + QvbrQualityLevel *float64 `json:"qvbrQualityLevel,omitempty" tf:"qvbr_quality_level,omitempty"` + + // The rate control mode. + // +kubebuilder:validation:Optional + RateControlMode *string `json:"rateControlMode,omitempty" tf:"rate_control_mode,omitempty"` + + // Sets the scan type of the output. + // +kubebuilder:validation:Optional + ScanType *string `json:"scanType,omitempty" tf:"scan_type,omitempty"` + + // Scene change detection. + // +kubebuilder:validation:Optional + SceneChangeDetect *string `json:"sceneChangeDetect,omitempty" tf:"scene_change_detect,omitempty"` + + // Number of slices per picture. + // +kubebuilder:validation:Optional + Slices *float64 `json:"slices,omitempty" tf:"slices,omitempty"` + + // Softness. + // +kubebuilder:validation:Optional + Softness *float64 `json:"softness,omitempty" tf:"softness,omitempty"` + + // Makes adjustments within each frame based on spatial variation of content complexity. + // +kubebuilder:validation:Optional + SpatialAq *string `json:"spatialAq,omitempty" tf:"spatial_aq,omitempty"` + + // Subgop length. + // +kubebuilder:validation:Optional + SubgopLength *string `json:"subgopLength,omitempty" tf:"subgop_length,omitempty"` + + // Produces a bitstream compliant with SMPTE RP-2027. + // +kubebuilder:validation:Optional + Syntax *string `json:"syntax,omitempty" tf:"syntax,omitempty"` + + // Makes adjustments within each frame based on temporal variation of content complexity. + // +kubebuilder:validation:Optional + TemporalAq *string `json:"temporalAq,omitempty" tf:"temporal_aq,omitempty"` + + // Determines how timecodes should be inserted into the video elementary stream. + // +kubebuilder:validation:Optional + TimecodeInsertion *string `json:"timecodeInsertion,omitempty" tf:"timecode_insertion,omitempty"` +} + +type H265SettingsFilterSettingsInitParameters struct { + + // Temporal filter settings. See Temporal Filter Settings + TemporalFilterSettings *FilterSettingsTemporalFilterSettingsInitParameters `json:"temporalFilterSettings,omitempty" tf:"temporal_filter_settings,omitempty"` +} + +type H265SettingsFilterSettingsObservation struct { + + // Temporal filter settings. See Temporal Filter Settings + TemporalFilterSettings *FilterSettingsTemporalFilterSettingsObservation `json:"temporalFilterSettings,omitempty" tf:"temporal_filter_settings,omitempty"` +} + +type H265SettingsFilterSettingsParameters struct { + + // Temporal filter settings. See Temporal Filter Settings + // +kubebuilder:validation:Optional + TemporalFilterSettings *FilterSettingsTemporalFilterSettingsParameters `json:"temporalFilterSettings,omitempty" tf:"temporal_filter_settings,omitempty"` +} + +type H265SettingsInitParameters struct { + + // Enables or disables adaptive quantization. + AdaptiveQuantization *string `json:"adaptiveQuantization,omitempty" tf:"adaptive_quantization,omitempty"` + + // Indicates that AFD values will be written into the output stream. + AfdSignaling *string `json:"afdSignaling,omitempty" tf:"afd_signaling,omitempty"` + + // Whether or not EML should insert an Alternative Transfer Function SEI message. + AlternativeTransferFunction *string `json:"alternativeTransferFunction,omitempty" tf:"alternative_transfer_function,omitempty"` + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Size of buffer in bits. + BufSize *float64 `json:"bufSize,omitempty" tf:"buf_size,omitempty"` + + // Includes color space metadata in the output. + ColorMetadata *string `json:"colorMetadata,omitempty" tf:"color_metadata,omitempty"` + + // Define the color metadata for the output. H265 Color Space Settings for more details. + ColorSpaceSettings *ColorSpaceSettingsInitParameters `json:"colorSpaceSettings,omitempty" tf:"color_space_settings,omitempty"` + + // Filters to apply to an encode. See H264 Filter Settings for more details. + FilterSettings *H265SettingsFilterSettingsInitParameters `json:"filterSettings,omitempty" tf:"filter_settings,omitempty"` + + // Four bit AFD value to write on all frames of video in the output stream. + FixedAfd *string `json:"fixedAfd,omitempty" tf:"fixed_afd,omitempty"` + + FlickerAq *string `json:"flickerAq,omitempty" tf:"flicker_aq,omitempty"` + + // Framerate denominator. + FramerateDenominator *float64 `json:"framerateDenominator,omitempty" tf:"framerate_denominator,omitempty"` + + // Framerate numerator. + FramerateNumerator *float64 `json:"framerateNumerator,omitempty" tf:"framerate_numerator,omitempty"` + + // Frequency of closed GOPs. + GopClosedCadence *float64 `json:"gopClosedCadence,omitempty" tf:"gop_closed_cadence,omitempty"` + + // GOP size in units of either frames of seconds per gop_size_units. + GopSize *float64 `json:"gopSize,omitempty" tf:"gop_size,omitempty"` + + // Indicates if the gop_size is specified in frames or seconds. + GopSizeUnits *string `json:"gopSizeUnits,omitempty" tf:"gop_size_units,omitempty"` + + // H264 level. + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // Amount of lookahead. + LookAheadRateControl *string `json:"lookAheadRateControl,omitempty" tf:"look_ahead_rate_control,omitempty"` + + // Set the maximum bitrate in order to accommodate expected spikes in the complexity of the video. + MaxBitrate *float64 `json:"maxBitrate,omitempty" tf:"max_bitrate,omitempty"` + + MinIInterval *float64 `json:"minIInterval,omitempty" tf:"min_i_interval,omitempty"` + + // Pixel Aspect Ratio denominator. + ParDenominator *float64 `json:"parDenominator,omitempty" tf:"par_denominator,omitempty"` + + // Pixel Aspect Ratio numerator. + ParNumerator *float64 `json:"parNumerator,omitempty" tf:"par_numerator,omitempty"` + + // AAC profile. + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // Controls the target quality for the video encode. + QvbrQualityLevel *float64 `json:"qvbrQualityLevel,omitempty" tf:"qvbr_quality_level,omitempty"` + + // The rate control mode. + RateControlMode *string `json:"rateControlMode,omitempty" tf:"rate_control_mode,omitempty"` + + // Sets the scan type of the output. + ScanType *string `json:"scanType,omitempty" tf:"scan_type,omitempty"` + + // Scene change detection. + SceneChangeDetect *string `json:"sceneChangeDetect,omitempty" tf:"scene_change_detect,omitempty"` + + // Number of slices per picture. + Slices *float64 `json:"slices,omitempty" tf:"slices,omitempty"` + + // Set the H265 tier in the output. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` + + // Apply a burned in timecode. See H265 Timecode Burnin Settings for more details. + TimecodeBurninSettings *TimecodeBurninSettingsInitParameters `json:"timecodeBurninSettings,omitempty" tf:"timecode_burnin_settings,omitempty"` + + // Determines how timecodes should be inserted into the video elementary stream. + TimecodeInsertion *string `json:"timecodeInsertion,omitempty" tf:"timecode_insertion,omitempty"` +} + +type H265SettingsObservation struct { + + // Enables or disables adaptive quantization. + AdaptiveQuantization *string `json:"adaptiveQuantization,omitempty" tf:"adaptive_quantization,omitempty"` + + // Indicates that AFD values will be written into the output stream. + AfdSignaling *string `json:"afdSignaling,omitempty" tf:"afd_signaling,omitempty"` + + // Whether or not EML should insert an Alternative Transfer Function SEI message. + AlternativeTransferFunction *string `json:"alternativeTransferFunction,omitempty" tf:"alternative_transfer_function,omitempty"` + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Size of buffer in bits. + BufSize *float64 `json:"bufSize,omitempty" tf:"buf_size,omitempty"` + + // Includes color space metadata in the output. + ColorMetadata *string `json:"colorMetadata,omitempty" tf:"color_metadata,omitempty"` + + // Define the color metadata for the output. H265 Color Space Settings for more details. + ColorSpaceSettings *ColorSpaceSettingsObservation `json:"colorSpaceSettings,omitempty" tf:"color_space_settings,omitempty"` + + // Filters to apply to an encode. See H264 Filter Settings for more details. + FilterSettings *H265SettingsFilterSettingsObservation `json:"filterSettings,omitempty" tf:"filter_settings,omitempty"` + + // Four bit AFD value to write on all frames of video in the output stream. + FixedAfd *string `json:"fixedAfd,omitempty" tf:"fixed_afd,omitempty"` + + FlickerAq *string `json:"flickerAq,omitempty" tf:"flicker_aq,omitempty"` + + // Framerate denominator. + FramerateDenominator *float64 `json:"framerateDenominator,omitempty" tf:"framerate_denominator,omitempty"` + + // Framerate numerator. + FramerateNumerator *float64 `json:"framerateNumerator,omitempty" tf:"framerate_numerator,omitempty"` + + // Frequency of closed GOPs. + GopClosedCadence *float64 `json:"gopClosedCadence,omitempty" tf:"gop_closed_cadence,omitempty"` + + // GOP size in units of either frames of seconds per gop_size_units. + GopSize *float64 `json:"gopSize,omitempty" tf:"gop_size,omitempty"` + + // Indicates if the gop_size is specified in frames or seconds. + GopSizeUnits *string `json:"gopSizeUnits,omitempty" tf:"gop_size_units,omitempty"` + + // H264 level. + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // Amount of lookahead. + LookAheadRateControl *string `json:"lookAheadRateControl,omitempty" tf:"look_ahead_rate_control,omitempty"` + + // Set the maximum bitrate in order to accommodate expected spikes in the complexity of the video. + MaxBitrate *float64 `json:"maxBitrate,omitempty" tf:"max_bitrate,omitempty"` + + MinIInterval *float64 `json:"minIInterval,omitempty" tf:"min_i_interval,omitempty"` + + // Pixel Aspect Ratio denominator. + ParDenominator *float64 `json:"parDenominator,omitempty" tf:"par_denominator,omitempty"` + + // Pixel Aspect Ratio numerator. + ParNumerator *float64 `json:"parNumerator,omitempty" tf:"par_numerator,omitempty"` + + // AAC profile. + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // Controls the target quality for the video encode. + QvbrQualityLevel *float64 `json:"qvbrQualityLevel,omitempty" tf:"qvbr_quality_level,omitempty"` + + // The rate control mode. + RateControlMode *string `json:"rateControlMode,omitempty" tf:"rate_control_mode,omitempty"` + + // Sets the scan type of the output. + ScanType *string `json:"scanType,omitempty" tf:"scan_type,omitempty"` + + // Scene change detection. + SceneChangeDetect *string `json:"sceneChangeDetect,omitempty" tf:"scene_change_detect,omitempty"` + + // Number of slices per picture. + Slices *float64 `json:"slices,omitempty" tf:"slices,omitempty"` + + // Set the H265 tier in the output. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` + + // Apply a burned in timecode. See H265 Timecode Burnin Settings for more details. + TimecodeBurninSettings *TimecodeBurninSettingsObservation `json:"timecodeBurninSettings,omitempty" tf:"timecode_burnin_settings,omitempty"` + + // Determines how timecodes should be inserted into the video elementary stream. + TimecodeInsertion *string `json:"timecodeInsertion,omitempty" tf:"timecode_insertion,omitempty"` +} + +type H265SettingsParameters struct { + + // Enables or disables adaptive quantization. + // +kubebuilder:validation:Optional + AdaptiveQuantization *string `json:"adaptiveQuantization,omitempty" tf:"adaptive_quantization,omitempty"` + + // Indicates that AFD values will be written into the output stream. + // +kubebuilder:validation:Optional + AfdSignaling *string `json:"afdSignaling,omitempty" tf:"afd_signaling,omitempty"` + + // Whether or not EML should insert an Alternative Transfer Function SEI message. + // +kubebuilder:validation:Optional + AlternativeTransferFunction *string `json:"alternativeTransferFunction,omitempty" tf:"alternative_transfer_function,omitempty"` + + // Average bitrate in bits/second. + // +kubebuilder:validation:Optional + Bitrate *float64 `json:"bitrate" tf:"bitrate,omitempty"` + + // Size of buffer in bits. + // +kubebuilder:validation:Optional + BufSize *float64 `json:"bufSize,omitempty" tf:"buf_size,omitempty"` + + // Includes color space metadata in the output. + // +kubebuilder:validation:Optional + ColorMetadata *string `json:"colorMetadata,omitempty" tf:"color_metadata,omitempty"` + + // Define the color metadata for the output. H265 Color Space Settings for more details. + // +kubebuilder:validation:Optional + ColorSpaceSettings *ColorSpaceSettingsParameters `json:"colorSpaceSettings,omitempty" tf:"color_space_settings,omitempty"` + + // Filters to apply to an encode. See H264 Filter Settings for more details. + // +kubebuilder:validation:Optional + FilterSettings *H265SettingsFilterSettingsParameters `json:"filterSettings,omitempty" tf:"filter_settings,omitempty"` + + // Four bit AFD value to write on all frames of video in the output stream. + // +kubebuilder:validation:Optional + FixedAfd *string `json:"fixedAfd,omitempty" tf:"fixed_afd,omitempty"` + + // +kubebuilder:validation:Optional + FlickerAq *string `json:"flickerAq,omitempty" tf:"flicker_aq,omitempty"` + + // Framerate denominator. + // +kubebuilder:validation:Optional + FramerateDenominator *float64 `json:"framerateDenominator" tf:"framerate_denominator,omitempty"` + + // Framerate numerator. + // +kubebuilder:validation:Optional + FramerateNumerator *float64 `json:"framerateNumerator" tf:"framerate_numerator,omitempty"` + + // Frequency of closed GOPs. + // +kubebuilder:validation:Optional + GopClosedCadence *float64 `json:"gopClosedCadence,omitempty" tf:"gop_closed_cadence,omitempty"` + + // GOP size in units of either frames of seconds per gop_size_units. + // +kubebuilder:validation:Optional + GopSize *float64 `json:"gopSize,omitempty" tf:"gop_size,omitempty"` + + // Indicates if the gop_size is specified in frames or seconds. + // +kubebuilder:validation:Optional + GopSizeUnits *string `json:"gopSizeUnits,omitempty" tf:"gop_size_units,omitempty"` + + // H264 level. + // +kubebuilder:validation:Optional + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // Amount of lookahead. + // +kubebuilder:validation:Optional + LookAheadRateControl *string `json:"lookAheadRateControl,omitempty" tf:"look_ahead_rate_control,omitempty"` + + // Set the maximum bitrate in order to accommodate expected spikes in the complexity of the video. + // +kubebuilder:validation:Optional + MaxBitrate *float64 `json:"maxBitrate,omitempty" tf:"max_bitrate,omitempty"` + + // +kubebuilder:validation:Optional + MinIInterval *float64 `json:"minIInterval,omitempty" tf:"min_i_interval,omitempty"` + + // Pixel Aspect Ratio denominator. + // +kubebuilder:validation:Optional + ParDenominator *float64 `json:"parDenominator,omitempty" tf:"par_denominator,omitempty"` + + // Pixel Aspect Ratio numerator. + // +kubebuilder:validation:Optional + ParNumerator *float64 `json:"parNumerator,omitempty" tf:"par_numerator,omitempty"` + + // AAC profile. + // +kubebuilder:validation:Optional + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // Controls the target quality for the video encode. + // +kubebuilder:validation:Optional + QvbrQualityLevel *float64 `json:"qvbrQualityLevel,omitempty" tf:"qvbr_quality_level,omitempty"` + + // The rate control mode. + // +kubebuilder:validation:Optional + RateControlMode *string `json:"rateControlMode,omitempty" tf:"rate_control_mode,omitempty"` + + // Sets the scan type of the output. + // +kubebuilder:validation:Optional + ScanType *string `json:"scanType,omitempty" tf:"scan_type,omitempty"` + + // Scene change detection. + // +kubebuilder:validation:Optional + SceneChangeDetect *string `json:"sceneChangeDetect,omitempty" tf:"scene_change_detect,omitempty"` + + // Number of slices per picture. + // +kubebuilder:validation:Optional + Slices *float64 `json:"slices,omitempty" tf:"slices,omitempty"` + + // Set the H265 tier in the output. + // +kubebuilder:validation:Optional + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` + + // Apply a burned in timecode. See H265 Timecode Burnin Settings for more details. + // +kubebuilder:validation:Optional + TimecodeBurninSettings *TimecodeBurninSettingsParameters `json:"timecodeBurninSettings,omitempty" tf:"timecode_burnin_settings,omitempty"` + + // Determines how timecodes should be inserted into the video elementary stream. + // +kubebuilder:validation:Optional + TimecodeInsertion *string `json:"timecodeInsertion,omitempty" tf:"timecode_insertion,omitempty"` +} + +type HTMLMotionGraphicsSettingsInitParameters struct { +} + +type HTMLMotionGraphicsSettingsObservation struct { +} + +type HTMLMotionGraphicsSettingsParameters struct { +} + +type Hdr10SettingsInitParameters struct { + + // Sets the MaxCLL value for HDR10. + MaxCll *float64 `json:"maxCll,omitempty" tf:"max_cll,omitempty"` + + // Sets the MaxFALL value for HDR10. + MaxFall *float64 `json:"maxFall,omitempty" tf:"max_fall,omitempty"` +} + +type Hdr10SettingsObservation struct { + + // Sets the MaxCLL value for HDR10. + MaxCll *float64 `json:"maxCll,omitempty" tf:"max_cll,omitempty"` + + // Sets the MaxFALL value for HDR10. + MaxFall *float64 `json:"maxFall,omitempty" tf:"max_fall,omitempty"` +} + +type Hdr10SettingsParameters struct { + + // Sets the MaxCLL value for HDR10. + // +kubebuilder:validation:Optional + MaxCll *float64 `json:"maxCll,omitempty" tf:"max_cll,omitempty"` + + // Sets the MaxFALL value for HDR10. + // +kubebuilder:validation:Optional + MaxFall *float64 `json:"maxFall,omitempty" tf:"max_fall,omitempty"` +} + +type HlsAkamaiSettingsInitParameters struct { + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + FilecacheDuration *float64 `json:"filecacheDuration,omitempty" tf:"filecache_duration,omitempty"` + + HTTPTransferMode *string `json:"httpTransferMode,omitempty" tf:"http_transfer_mode,omitempty"` + + // Number of retry attempts. + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` + + // Number of seconds to wait until a restart is initiated. + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` + + Salt *string `json:"salt,omitempty" tf:"salt,omitempty"` + + Token *string `json:"token,omitempty" tf:"token,omitempty"` +} + +type HlsAkamaiSettingsObservation struct { + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + FilecacheDuration *float64 `json:"filecacheDuration,omitempty" tf:"filecache_duration,omitempty"` + + HTTPTransferMode *string `json:"httpTransferMode,omitempty" tf:"http_transfer_mode,omitempty"` + + // Number of retry attempts. + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` + + // Number of seconds to wait until a restart is initiated. + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` + + Salt *string `json:"salt,omitempty" tf:"salt,omitempty"` + + Token *string `json:"token,omitempty" tf:"token,omitempty"` +} + +type HlsAkamaiSettingsParameters struct { + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + // +kubebuilder:validation:Optional + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + // +kubebuilder:validation:Optional + FilecacheDuration *float64 `json:"filecacheDuration,omitempty" tf:"filecache_duration,omitempty"` + + // +kubebuilder:validation:Optional + HTTPTransferMode *string `json:"httpTransferMode,omitempty" tf:"http_transfer_mode,omitempty"` + + // Number of retry attempts. + // +kubebuilder:validation:Optional + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` + + // Number of seconds to wait until a restart is initiated. + // +kubebuilder:validation:Optional + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` + + // +kubebuilder:validation:Optional + Salt *string `json:"salt,omitempty" tf:"salt,omitempty"` + + // +kubebuilder:validation:Optional + Token *string `json:"token,omitempty" tf:"token,omitempty"` +} + +type HlsBasicPutSettingsInitParameters struct { + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + FilecacheDuration *float64 `json:"filecacheDuration,omitempty" tf:"filecache_duration,omitempty"` + + // Number of retry attempts. + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` + + // Number of seconds to wait until a restart is initiated. + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` +} + +type HlsBasicPutSettingsObservation struct { + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + FilecacheDuration *float64 `json:"filecacheDuration,omitempty" tf:"filecache_duration,omitempty"` + + // Number of retry attempts. + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` + + // Number of seconds to wait until a restart is initiated. + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` +} + +type HlsBasicPutSettingsParameters struct { + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + // +kubebuilder:validation:Optional + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + // +kubebuilder:validation:Optional + FilecacheDuration *float64 `json:"filecacheDuration,omitempty" tf:"filecache_duration,omitempty"` + + // Number of retry attempts. + // +kubebuilder:validation:Optional + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` + + // Number of seconds to wait until a restart is initiated. + // +kubebuilder:validation:Optional + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` +} + +type HlsCdnSettingsInitParameters struct { + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + HlsAkamaiSettings *HlsAkamaiSettingsInitParameters `json:"hlsAkamaiSettings,omitempty" tf:"hls_akamai_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + HlsBasicPutSettings *HlsBasicPutSettingsInitParameters `json:"hlsBasicPutSettings,omitempty" tf:"hls_basic_put_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + HlsMediaStoreSettings *HlsMediaStoreSettingsInitParameters `json:"hlsMediaStoreSettings,omitempty" tf:"hls_media_store_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + HlsS3Settings *HlsS3SettingsInitParameters `json:"hlsS3Settings,omitempty" tf:"hls_s3_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + HlsWebdavSettings *HlsWebdavSettingsInitParameters `json:"hlsWebdavSettings,omitempty" tf:"hls_webdav_settings,omitempty"` +} + +type HlsCdnSettingsObservation struct { + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + HlsAkamaiSettings *HlsAkamaiSettingsObservation `json:"hlsAkamaiSettings,omitempty" tf:"hls_akamai_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + HlsBasicPutSettings *HlsBasicPutSettingsObservation `json:"hlsBasicPutSettings,omitempty" tf:"hls_basic_put_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + HlsMediaStoreSettings *HlsMediaStoreSettingsObservation `json:"hlsMediaStoreSettings,omitempty" tf:"hls_media_store_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + HlsS3Settings *HlsS3SettingsObservation `json:"hlsS3Settings,omitempty" tf:"hls_s3_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + HlsWebdavSettings *HlsWebdavSettingsObservation `json:"hlsWebdavSettings,omitempty" tf:"hls_webdav_settings,omitempty"` +} + +type HlsCdnSettingsParameters struct { + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + HlsAkamaiSettings *HlsAkamaiSettingsParameters `json:"hlsAkamaiSettings,omitempty" tf:"hls_akamai_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + HlsBasicPutSettings *HlsBasicPutSettingsParameters `json:"hlsBasicPutSettings,omitempty" tf:"hls_basic_put_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + HlsMediaStoreSettings *HlsMediaStoreSettingsParameters `json:"hlsMediaStoreSettings,omitempty" tf:"hls_media_store_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + HlsS3Settings *HlsS3SettingsParameters `json:"hlsS3Settings,omitempty" tf:"hls_s3_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + HlsWebdavSettings *HlsWebdavSettingsParameters `json:"hlsWebdavSettings,omitempty" tf:"hls_webdav_settings,omitempty"` +} + +type HlsGroupSettingsDestinationInitParameters struct { + + // Reference ID for the destination. + DestinationRefID *string `json:"destinationRefId,omitempty" tf:"destination_ref_id,omitempty"` +} + +type HlsGroupSettingsDestinationObservation struct { + + // Reference ID for the destination. + DestinationRefID *string `json:"destinationRefId,omitempty" tf:"destination_ref_id,omitempty"` +} + +type HlsGroupSettingsDestinationParameters struct { + + // Reference ID for the destination. + // +kubebuilder:validation:Optional + DestinationRefID *string `json:"destinationRefId" tf:"destination_ref_id,omitempty"` +} + +type HlsGroupSettingsInitParameters struct { + + // The ad marker type for this output group. + AdMarkers []*string `json:"adMarkers,omitempty" tf:"ad_markers,omitempty"` + + BaseURLContent *string `json:"baseUrlContent,omitempty" tf:"base_url_content,omitempty"` + + BaseURLContent1 *string `json:"baseUrlContent1,omitempty" tf:"base_url_content1,omitempty"` + + BaseURLManifest *string `json:"baseUrlManifest,omitempty" tf:"base_url_manifest,omitempty"` + + BaseURLManifest1 *string `json:"baseUrlManifest1,omitempty" tf:"base_url_manifest1,omitempty"` + + CaptionLanguageMappings []CaptionLanguageMappingsInitParameters `json:"captionLanguageMappings,omitempty" tf:"caption_language_mappings,omitempty"` + + CaptionLanguageSetting *string `json:"captionLanguageSetting,omitempty" tf:"caption_language_setting,omitempty"` + + ClientCache *string `json:"clientCache,omitempty" tf:"client_cache,omitempty"` + + CodecSpecification *string `json:"codecSpecification,omitempty" tf:"codec_specification,omitempty"` + + ConstantIv *string `json:"constantIv,omitempty" tf:"constant_iv,omitempty"` + + // A director and base filename where archive files should be written. See Destination for more details. + Destination *HlsGroupSettingsDestinationInitParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + DirectoryStructure *string `json:"directoryStructure,omitempty" tf:"directory_structure,omitempty"` + + // Key-value map of resource tags. + DiscontinuityTags *string `json:"discontinuityTags,omitempty" tf:"discontinuity_tags,omitempty"` + + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + HlsCdnSettings []HlsCdnSettingsInitParameters `json:"hlsCdnSettings,omitempty" tf:"hls_cdn_settings,omitempty"` + + HlsId3SegmentTagging *string `json:"hlsId3SegmentTagging,omitempty" tf:"hls_id3_segment_tagging,omitempty"` + + IframeOnlyPlaylists *string `json:"iframeOnlyPlaylists,omitempty" tf:"iframe_only_playlists,omitempty"` + + IncompleteSegmentBehavior *string `json:"incompleteSegmentBehavior,omitempty" tf:"incomplete_segment_behavior,omitempty"` + + IndexNSegments *float64 `json:"indexNSegments,omitempty" tf:"index_n_segments,omitempty"` + + // Controls the behavior of the RTMP group if input becomes unavailable. + InputLossAction *string `json:"inputLossAction,omitempty" tf:"input_loss_action,omitempty"` + + IvInManifest *string `json:"ivInManifest,omitempty" tf:"iv_in_manifest,omitempty"` + + // The source for the timecode that will be associated with the events outputs. + IvSource *string `json:"ivSource,omitempty" tf:"iv_source,omitempty"` + + KeepSegments *float64 `json:"keepSegments,omitempty" tf:"keep_segments,omitempty"` + + KeyFormat *string `json:"keyFormat,omitempty" tf:"key_format,omitempty"` + + KeyFormatVersions *string `json:"keyFormatVersions,omitempty" tf:"key_format_versions,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + KeyProviderSettings *KeyProviderSettingsInitParameters `json:"keyProviderSettings,omitempty" tf:"key_provider_settings,omitempty"` + + ManifestCompression *string `json:"manifestCompression,omitempty" tf:"manifest_compression,omitempty"` + + ManifestDurationFormat *string `json:"manifestDurationFormat,omitempty" tf:"manifest_duration_format,omitempty"` + + MinSegmentLength *float64 `json:"minSegmentLength,omitempty" tf:"min_segment_length,omitempty"` + + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + OutputSelection *string `json:"outputSelection,omitempty" tf:"output_selection,omitempty"` + + ProgramDateTime *string `json:"programDateTime,omitempty" tf:"program_date_time,omitempty"` + + ProgramDateTimeClock *string `json:"programDateTimeClock,omitempty" tf:"program_date_time_clock,omitempty"` + + ProgramDateTimePeriod *float64 `json:"programDateTimePeriod,omitempty" tf:"program_date_time_period,omitempty"` + + RedundantManifest *string `json:"redundantManifest,omitempty" tf:"redundant_manifest,omitempty"` + + SegmentLength *float64 `json:"segmentLength,omitempty" tf:"segment_length,omitempty"` + + SegmentsPerSubdirectory *float64 `json:"segmentsPerSubdirectory,omitempty" tf:"segments_per_subdirectory,omitempty"` + + // - Maximum CDI input resolution. + StreamInfResolution *string `json:"streamInfResolution,omitempty" tf:"stream_inf_resolution,omitempty"` + + TSFileMode *string `json:"tsFileMode,omitempty" tf:"ts_file_mode,omitempty"` + + // Indicates ID3 frame that has the timecode. + TimedMetadataId3Frame *string `json:"timedMetadataId3Frame,omitempty" tf:"timed_metadata_id3_frame,omitempty"` + + TimedMetadataId3Period *float64 `json:"timedMetadataId3Period,omitempty" tf:"timed_metadata_id3_period,omitempty"` + + TimestampDeltaMilliseconds *float64 `json:"timestampDeltaMilliseconds,omitempty" tf:"timestamp_delta_milliseconds,omitempty"` +} + +type HlsGroupSettingsObservation struct { + + // The ad marker type for this output group. + AdMarkers []*string `json:"adMarkers,omitempty" tf:"ad_markers,omitempty"` + + BaseURLContent *string `json:"baseUrlContent,omitempty" tf:"base_url_content,omitempty"` + + BaseURLContent1 *string `json:"baseUrlContent1,omitempty" tf:"base_url_content1,omitempty"` + + BaseURLManifest *string `json:"baseUrlManifest,omitempty" tf:"base_url_manifest,omitempty"` + + BaseURLManifest1 *string `json:"baseUrlManifest1,omitempty" tf:"base_url_manifest1,omitempty"` + + CaptionLanguageMappings []CaptionLanguageMappingsObservation `json:"captionLanguageMappings,omitempty" tf:"caption_language_mappings,omitempty"` + + CaptionLanguageSetting *string `json:"captionLanguageSetting,omitempty" tf:"caption_language_setting,omitempty"` + + ClientCache *string `json:"clientCache,omitempty" tf:"client_cache,omitempty"` + + CodecSpecification *string `json:"codecSpecification,omitempty" tf:"codec_specification,omitempty"` + + ConstantIv *string `json:"constantIv,omitempty" tf:"constant_iv,omitempty"` + + // A director and base filename where archive files should be written. See Destination for more details. + Destination *HlsGroupSettingsDestinationObservation `json:"destination,omitempty" tf:"destination,omitempty"` + + DirectoryStructure *string `json:"directoryStructure,omitempty" tf:"directory_structure,omitempty"` + + // Key-value map of resource tags. + DiscontinuityTags *string `json:"discontinuityTags,omitempty" tf:"discontinuity_tags,omitempty"` + + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + HlsCdnSettings []HlsCdnSettingsObservation `json:"hlsCdnSettings,omitempty" tf:"hls_cdn_settings,omitempty"` + + HlsId3SegmentTagging *string `json:"hlsId3SegmentTagging,omitempty" tf:"hls_id3_segment_tagging,omitempty"` + + IframeOnlyPlaylists *string `json:"iframeOnlyPlaylists,omitempty" tf:"iframe_only_playlists,omitempty"` + + IncompleteSegmentBehavior *string `json:"incompleteSegmentBehavior,omitempty" tf:"incomplete_segment_behavior,omitempty"` + + IndexNSegments *float64 `json:"indexNSegments,omitempty" tf:"index_n_segments,omitempty"` + + // Controls the behavior of the RTMP group if input becomes unavailable. + InputLossAction *string `json:"inputLossAction,omitempty" tf:"input_loss_action,omitempty"` + + IvInManifest *string `json:"ivInManifest,omitempty" tf:"iv_in_manifest,omitempty"` + + // The source for the timecode that will be associated with the events outputs. + IvSource *string `json:"ivSource,omitempty" tf:"iv_source,omitempty"` + + KeepSegments *float64 `json:"keepSegments,omitempty" tf:"keep_segments,omitempty"` + + KeyFormat *string `json:"keyFormat,omitempty" tf:"key_format,omitempty"` + + KeyFormatVersions *string `json:"keyFormatVersions,omitempty" tf:"key_format_versions,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + KeyProviderSettings *KeyProviderSettingsObservation `json:"keyProviderSettings,omitempty" tf:"key_provider_settings,omitempty"` + + ManifestCompression *string `json:"manifestCompression,omitempty" tf:"manifest_compression,omitempty"` + + ManifestDurationFormat *string `json:"manifestDurationFormat,omitempty" tf:"manifest_duration_format,omitempty"` + + MinSegmentLength *float64 `json:"minSegmentLength,omitempty" tf:"min_segment_length,omitempty"` + + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + OutputSelection *string `json:"outputSelection,omitempty" tf:"output_selection,omitempty"` + + ProgramDateTime *string `json:"programDateTime,omitempty" tf:"program_date_time,omitempty"` + + ProgramDateTimeClock *string `json:"programDateTimeClock,omitempty" tf:"program_date_time_clock,omitempty"` + + ProgramDateTimePeriod *float64 `json:"programDateTimePeriod,omitempty" tf:"program_date_time_period,omitempty"` + + RedundantManifest *string `json:"redundantManifest,omitempty" tf:"redundant_manifest,omitempty"` + + SegmentLength *float64 `json:"segmentLength,omitempty" tf:"segment_length,omitempty"` + + SegmentsPerSubdirectory *float64 `json:"segmentsPerSubdirectory,omitempty" tf:"segments_per_subdirectory,omitempty"` + + // - Maximum CDI input resolution. + StreamInfResolution *string `json:"streamInfResolution,omitempty" tf:"stream_inf_resolution,omitempty"` + + TSFileMode *string `json:"tsFileMode,omitempty" tf:"ts_file_mode,omitempty"` + + // Indicates ID3 frame that has the timecode. + TimedMetadataId3Frame *string `json:"timedMetadataId3Frame,omitempty" tf:"timed_metadata_id3_frame,omitempty"` + + TimedMetadataId3Period *float64 `json:"timedMetadataId3Period,omitempty" tf:"timed_metadata_id3_period,omitempty"` + + TimestampDeltaMilliseconds *float64 `json:"timestampDeltaMilliseconds,omitempty" tf:"timestamp_delta_milliseconds,omitempty"` +} + +type HlsGroupSettingsParameters struct { + + // The ad marker type for this output group. + // +kubebuilder:validation:Optional + AdMarkers []*string `json:"adMarkers,omitempty" tf:"ad_markers,omitempty"` + + // +kubebuilder:validation:Optional + BaseURLContent *string `json:"baseUrlContent,omitempty" tf:"base_url_content,omitempty"` + + // +kubebuilder:validation:Optional + BaseURLContent1 *string `json:"baseUrlContent1,omitempty" tf:"base_url_content1,omitempty"` + + // +kubebuilder:validation:Optional + BaseURLManifest *string `json:"baseUrlManifest,omitempty" tf:"base_url_manifest,omitempty"` + + // +kubebuilder:validation:Optional + BaseURLManifest1 *string `json:"baseUrlManifest1,omitempty" tf:"base_url_manifest1,omitempty"` + + // +kubebuilder:validation:Optional + CaptionLanguageMappings []CaptionLanguageMappingsParameters `json:"captionLanguageMappings,omitempty" tf:"caption_language_mappings,omitempty"` + + // +kubebuilder:validation:Optional + CaptionLanguageSetting *string `json:"captionLanguageSetting,omitempty" tf:"caption_language_setting,omitempty"` + + // +kubebuilder:validation:Optional + ClientCache *string `json:"clientCache,omitempty" tf:"client_cache,omitempty"` + + // +kubebuilder:validation:Optional + CodecSpecification *string `json:"codecSpecification,omitempty" tf:"codec_specification,omitempty"` + + // +kubebuilder:validation:Optional + ConstantIv *string `json:"constantIv,omitempty" tf:"constant_iv,omitempty"` + + // A director and base filename where archive files should be written. See Destination for more details. + // +kubebuilder:validation:Optional + Destination *HlsGroupSettingsDestinationParameters `json:"destination" tf:"destination,omitempty"` + + // +kubebuilder:validation:Optional + DirectoryStructure *string `json:"directoryStructure,omitempty" tf:"directory_structure,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + DiscontinuityTags *string `json:"discontinuityTags,omitempty" tf:"discontinuity_tags,omitempty"` + + // +kubebuilder:validation:Optional + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + HlsCdnSettings []HlsCdnSettingsParameters `json:"hlsCdnSettings,omitempty" tf:"hls_cdn_settings,omitempty"` + + // +kubebuilder:validation:Optional + HlsId3SegmentTagging *string `json:"hlsId3SegmentTagging,omitempty" tf:"hls_id3_segment_tagging,omitempty"` + + // +kubebuilder:validation:Optional + IframeOnlyPlaylists *string `json:"iframeOnlyPlaylists,omitempty" tf:"iframe_only_playlists,omitempty"` + + // +kubebuilder:validation:Optional + IncompleteSegmentBehavior *string `json:"incompleteSegmentBehavior,omitempty" tf:"incomplete_segment_behavior,omitempty"` + + // +kubebuilder:validation:Optional + IndexNSegments *float64 `json:"indexNSegments,omitempty" tf:"index_n_segments,omitempty"` + + // Controls the behavior of the RTMP group if input becomes unavailable. + // +kubebuilder:validation:Optional + InputLossAction *string `json:"inputLossAction,omitempty" tf:"input_loss_action,omitempty"` + + // +kubebuilder:validation:Optional + IvInManifest *string `json:"ivInManifest,omitempty" tf:"iv_in_manifest,omitempty"` + + // The source for the timecode that will be associated with the events outputs. + // +kubebuilder:validation:Optional + IvSource *string `json:"ivSource,omitempty" tf:"iv_source,omitempty"` + + // +kubebuilder:validation:Optional + KeepSegments *float64 `json:"keepSegments,omitempty" tf:"keep_segments,omitempty"` + + // +kubebuilder:validation:Optional + KeyFormat *string `json:"keyFormat,omitempty" tf:"key_format,omitempty"` + + // +kubebuilder:validation:Optional + KeyFormatVersions *string `json:"keyFormatVersions,omitempty" tf:"key_format_versions,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + KeyProviderSettings *KeyProviderSettingsParameters `json:"keyProviderSettings,omitempty" tf:"key_provider_settings,omitempty"` + + // +kubebuilder:validation:Optional + ManifestCompression *string `json:"manifestCompression,omitempty" tf:"manifest_compression,omitempty"` + + // +kubebuilder:validation:Optional + ManifestDurationFormat *string `json:"manifestDurationFormat,omitempty" tf:"manifest_duration_format,omitempty"` + + // +kubebuilder:validation:Optional + MinSegmentLength *float64 `json:"minSegmentLength,omitempty" tf:"min_segment_length,omitempty"` + + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // +kubebuilder:validation:Optional + OutputSelection *string `json:"outputSelection,omitempty" tf:"output_selection,omitempty"` + + // +kubebuilder:validation:Optional + ProgramDateTime *string `json:"programDateTime,omitempty" tf:"program_date_time,omitempty"` + + // +kubebuilder:validation:Optional + ProgramDateTimeClock *string `json:"programDateTimeClock,omitempty" tf:"program_date_time_clock,omitempty"` + + // +kubebuilder:validation:Optional + ProgramDateTimePeriod *float64 `json:"programDateTimePeriod,omitempty" tf:"program_date_time_period,omitempty"` + + // +kubebuilder:validation:Optional + RedundantManifest *string `json:"redundantManifest,omitempty" tf:"redundant_manifest,omitempty"` + + // +kubebuilder:validation:Optional + SegmentLength *float64 `json:"segmentLength,omitempty" tf:"segment_length,omitempty"` + + // +kubebuilder:validation:Optional + SegmentsPerSubdirectory *float64 `json:"segmentsPerSubdirectory,omitempty" tf:"segments_per_subdirectory,omitempty"` + + // - Maximum CDI input resolution. + // +kubebuilder:validation:Optional + StreamInfResolution *string `json:"streamInfResolution,omitempty" tf:"stream_inf_resolution,omitempty"` + + // +kubebuilder:validation:Optional + TSFileMode *string `json:"tsFileMode,omitempty" tf:"ts_file_mode,omitempty"` + + // Indicates ID3 frame that has the timecode. + // +kubebuilder:validation:Optional + TimedMetadataId3Frame *string `json:"timedMetadataId3Frame,omitempty" tf:"timed_metadata_id3_frame,omitempty"` + + // +kubebuilder:validation:Optional + TimedMetadataId3Period *float64 `json:"timedMetadataId3Period,omitempty" tf:"timed_metadata_id3_period,omitempty"` + + // +kubebuilder:validation:Optional + TimestampDeltaMilliseconds *float64 `json:"timestampDeltaMilliseconds,omitempty" tf:"timestamp_delta_milliseconds,omitempty"` +} + +type HlsInputSettingsInitParameters struct { + + // The bitrate is specified in bits per second, as in an HLS manifest. + Bandwidth *float64 `json:"bandwidth,omitempty" tf:"bandwidth,omitempty"` + + // Buffer segments. + BufferSegments *float64 `json:"bufferSegments,omitempty" tf:"buffer_segments,omitempty"` + + // The number of consecutive times that attempts to read a manifest or segment must fail before the input is considered unavailable. + Retries *float64 `json:"retries,omitempty" tf:"retries,omitempty"` + + // The number of seconds between retries when an attempt to read a manifest or segment fails. + RetryInterval *float64 `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` + + // The source for the timecode that will be associated with the events outputs. + Scte35Source *string `json:"scte35Source,omitempty" tf:"scte35_source,omitempty"` +} + +type HlsInputSettingsObservation struct { + + // The bitrate is specified in bits per second, as in an HLS manifest. + Bandwidth *float64 `json:"bandwidth,omitempty" tf:"bandwidth,omitempty"` + + // Buffer segments. + BufferSegments *float64 `json:"bufferSegments,omitempty" tf:"buffer_segments,omitempty"` + + // The number of consecutive times that attempts to read a manifest or segment must fail before the input is considered unavailable. + Retries *float64 `json:"retries,omitempty" tf:"retries,omitempty"` + + // The number of seconds between retries when an attempt to read a manifest or segment fails. + RetryInterval *float64 `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` + + // The source for the timecode that will be associated with the events outputs. + Scte35Source *string `json:"scte35Source,omitempty" tf:"scte35_source,omitempty"` +} + +type HlsInputSettingsParameters struct { + + // The bitrate is specified in bits per second, as in an HLS manifest. + // +kubebuilder:validation:Optional + Bandwidth *float64 `json:"bandwidth,omitempty" tf:"bandwidth,omitempty"` + + // Buffer segments. + // +kubebuilder:validation:Optional + BufferSegments *float64 `json:"bufferSegments,omitempty" tf:"buffer_segments,omitempty"` + + // The number of consecutive times that attempts to read a manifest or segment must fail before the input is considered unavailable. + // +kubebuilder:validation:Optional + Retries *float64 `json:"retries,omitempty" tf:"retries,omitempty"` + + // The number of seconds between retries when an attempt to read a manifest or segment fails. + // +kubebuilder:validation:Optional + RetryInterval *float64 `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` + + // The source for the timecode that will be associated with the events outputs. + // +kubebuilder:validation:Optional + Scte35Source *string `json:"scte35Source,omitempty" tf:"scte35_source,omitempty"` +} + +type HlsMediaStoreSettingsInitParameters struct { + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + FilecacheDuration *float64 `json:"filecacheDuration,omitempty" tf:"filecache_duration,omitempty"` + + MediaStoreStorageClass *string `json:"mediaStoreStorageClass,omitempty" tf:"media_store_storage_class,omitempty"` + + // Number of retry attempts. + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` + + // Number of seconds to wait until a restart is initiated. + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` +} + +type HlsMediaStoreSettingsObservation struct { + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + FilecacheDuration *float64 `json:"filecacheDuration,omitempty" tf:"filecache_duration,omitempty"` + + MediaStoreStorageClass *string `json:"mediaStoreStorageClass,omitempty" tf:"media_store_storage_class,omitempty"` + + // Number of retry attempts. + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` + + // Number of seconds to wait until a restart is initiated. + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` +} + +type HlsMediaStoreSettingsParameters struct { + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + // +kubebuilder:validation:Optional + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + // +kubebuilder:validation:Optional + FilecacheDuration *float64 `json:"filecacheDuration,omitempty" tf:"filecache_duration,omitempty"` + + // +kubebuilder:validation:Optional + MediaStoreStorageClass *string `json:"mediaStoreStorageClass,omitempty" tf:"media_store_storage_class,omitempty"` + + // Number of retry attempts. + // +kubebuilder:validation:Optional + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` + + // Number of seconds to wait until a restart is initiated. + // +kubebuilder:validation:Optional + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` +} + +type HlsOutputSettingsInitParameters struct { + H265PackagingType *string `json:"h265PackagingType,omitempty" tf:"h265_packaging_type,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + HlsSettings *HlsSettingsInitParameters `json:"hlsSettings,omitempty" tf:"hls_settings,omitempty"` + + // String concatenated to the end of the destination filename. Required for multiple outputs of the same type. + NameModifier *string `json:"nameModifier,omitempty" tf:"name_modifier,omitempty"` + + SegmentModifier *string `json:"segmentModifier,omitempty" tf:"segment_modifier,omitempty"` +} + +type HlsOutputSettingsObservation struct { + H265PackagingType *string `json:"h265PackagingType,omitempty" tf:"h265_packaging_type,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + HlsSettings *HlsSettingsObservation `json:"hlsSettings,omitempty" tf:"hls_settings,omitempty"` + + // String concatenated to the end of the destination filename. Required for multiple outputs of the same type. + NameModifier *string `json:"nameModifier,omitempty" tf:"name_modifier,omitempty"` + + SegmentModifier *string `json:"segmentModifier,omitempty" tf:"segment_modifier,omitempty"` +} + +type HlsOutputSettingsParameters struct { + + // +kubebuilder:validation:Optional + H265PackagingType *string `json:"h265PackagingType,omitempty" tf:"h265_packaging_type,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + HlsSettings *HlsSettingsParameters `json:"hlsSettings" tf:"hls_settings,omitempty"` + + // String concatenated to the end of the destination filename. Required for multiple outputs of the same type. + // +kubebuilder:validation:Optional + NameModifier *string `json:"nameModifier,omitempty" tf:"name_modifier,omitempty"` + + // +kubebuilder:validation:Optional + SegmentModifier *string `json:"segmentModifier,omitempty" tf:"segment_modifier,omitempty"` +} + +type HlsS3SettingsInitParameters struct { + + // Specify the canned ACL to apply to each S3 request. + CannedACL *string `json:"cannedAcl,omitempty" tf:"canned_acl,omitempty"` +} + +type HlsS3SettingsObservation struct { + + // Specify the canned ACL to apply to each S3 request. + CannedACL *string `json:"cannedAcl,omitempty" tf:"canned_acl,omitempty"` +} + +type HlsS3SettingsParameters struct { + + // Specify the canned ACL to apply to each S3 request. + // +kubebuilder:validation:Optional + CannedACL *string `json:"cannedAcl,omitempty" tf:"canned_acl,omitempty"` +} + +type HlsSettingsInitParameters struct { + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + AudioOnlyHlsSettings *AudioOnlyHlsSettingsInitParameters `json:"audioOnlyHlsSettings,omitempty" tf:"audio_only_hls_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + Fmp4HlsSettings *Fmp4HlsSettingsInitParameters `json:"fmp4HlsSettings,omitempty" tf:"fmp4_hls_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + FrameCaptureHlsSettings *FrameCaptureHlsSettingsInitParameters `json:"frameCaptureHlsSettings,omitempty" tf:"frame_capture_hls_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + StandardHlsSettings *StandardHlsSettingsInitParameters `json:"standardHlsSettings,omitempty" tf:"standard_hls_settings,omitempty"` +} + +type HlsSettingsObservation struct { + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + AudioOnlyHlsSettings *AudioOnlyHlsSettingsObservation `json:"audioOnlyHlsSettings,omitempty" tf:"audio_only_hls_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + Fmp4HlsSettings *Fmp4HlsSettingsObservation `json:"fmp4HlsSettings,omitempty" tf:"fmp4_hls_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + FrameCaptureHlsSettings *FrameCaptureHlsSettingsParameters `json:"frameCaptureHlsSettings,omitempty" tf:"frame_capture_hls_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + StandardHlsSettings *StandardHlsSettingsObservation `json:"standardHlsSettings,omitempty" tf:"standard_hls_settings,omitempty"` +} + +type HlsSettingsParameters struct { + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + AudioOnlyHlsSettings *AudioOnlyHlsSettingsParameters `json:"audioOnlyHlsSettings,omitempty" tf:"audio_only_hls_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + Fmp4HlsSettings *Fmp4HlsSettingsParameters `json:"fmp4HlsSettings,omitempty" tf:"fmp4_hls_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + FrameCaptureHlsSettings *FrameCaptureHlsSettingsParameters `json:"frameCaptureHlsSettings,omitempty" tf:"frame_capture_hls_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + StandardHlsSettings *StandardHlsSettingsParameters `json:"standardHlsSettings,omitempty" tf:"standard_hls_settings,omitempty"` +} + +type HlsWebdavSettingsInitParameters struct { + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + FilecacheDuration *float64 `json:"filecacheDuration,omitempty" tf:"filecache_duration,omitempty"` + + HTTPTransferMode *string `json:"httpTransferMode,omitempty" tf:"http_transfer_mode,omitempty"` + + // Number of retry attempts. + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` + + // Number of seconds to wait until a restart is initiated. + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` +} + +type HlsWebdavSettingsObservation struct { + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + FilecacheDuration *float64 `json:"filecacheDuration,omitempty" tf:"filecache_duration,omitempty"` + + HTTPTransferMode *string `json:"httpTransferMode,omitempty" tf:"http_transfer_mode,omitempty"` + + // Number of retry attempts. + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` + + // Number of seconds to wait until a restart is initiated. + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` +} + +type HlsWebdavSettingsParameters struct { + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + // +kubebuilder:validation:Optional + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + // +kubebuilder:validation:Optional + FilecacheDuration *float64 `json:"filecacheDuration,omitempty" tf:"filecache_duration,omitempty"` + + // +kubebuilder:validation:Optional + HTTPTransferMode *string `json:"httpTransferMode,omitempty" tf:"http_transfer_mode,omitempty"` + + // Number of retry attempts. + // +kubebuilder:validation:Optional + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` + + // Number of seconds to wait until a restart is initiated. + // +kubebuilder:validation:Optional + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` +} + +type InputAttachmentsInitParameters struct { + + // User-specified settings for defining what the conditions are for declaring the input unhealthy and failing over to a different input. See Automatic Input Failover Settings for more details. + AutomaticInputFailoverSettings *AutomaticInputFailoverSettingsInitParameters `json:"automaticInputFailoverSettings,omitempty" tf:"automatic_input_failover_settings,omitempty"` + + // User-specified name for the attachment. + InputAttachmentName *string `json:"inputAttachmentName,omitempty" tf:"input_attachment_name,omitempty"` + + // The ID of the input. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/medialive/v1beta2.Input + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + InputID *string `json:"inputId,omitempty" tf:"input_id,omitempty"` + + // Reference to a Input in medialive to populate inputId. + // +kubebuilder:validation:Optional + InputIDRef *v1.Reference `json:"inputIdRef,omitempty" tf:"-"` + + // Selector for a Input in medialive to populate inputId. + // +kubebuilder:validation:Optional + InputIDSelector *v1.Selector `json:"inputIdSelector,omitempty" tf:"-"` + + // Settings of an input. See Input Settings for more details. + InputSettings *InputSettingsInitParameters `json:"inputSettings,omitempty" tf:"input_settings,omitempty"` +} + +type InputAttachmentsObservation struct { + + // User-specified settings for defining what the conditions are for declaring the input unhealthy and failing over to a different input. See Automatic Input Failover Settings for more details. + AutomaticInputFailoverSettings *AutomaticInputFailoverSettingsObservation `json:"automaticInputFailoverSettings,omitempty" tf:"automatic_input_failover_settings,omitempty"` + + // User-specified name for the attachment. + InputAttachmentName *string `json:"inputAttachmentName,omitempty" tf:"input_attachment_name,omitempty"` + + // The ID of the input. + InputID *string `json:"inputId,omitempty" tf:"input_id,omitempty"` + + // Settings of an input. See Input Settings for more details. + InputSettings *InputSettingsObservation `json:"inputSettings,omitempty" tf:"input_settings,omitempty"` +} + +type InputAttachmentsParameters struct { + + // User-specified settings for defining what the conditions are for declaring the input unhealthy and failing over to a different input. See Automatic Input Failover Settings for more details. + // +kubebuilder:validation:Optional + AutomaticInputFailoverSettings *AutomaticInputFailoverSettingsParameters `json:"automaticInputFailoverSettings,omitempty" tf:"automatic_input_failover_settings,omitempty"` + + // User-specified name for the attachment. + // +kubebuilder:validation:Optional + InputAttachmentName *string `json:"inputAttachmentName" tf:"input_attachment_name,omitempty"` + + // The ID of the input. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/medialive/v1beta2.Input + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + InputID *string `json:"inputId,omitempty" tf:"input_id,omitempty"` + + // Reference to a Input in medialive to populate inputId. + // +kubebuilder:validation:Optional + InputIDRef *v1.Reference `json:"inputIdRef,omitempty" tf:"-"` + + // Selector for a Input in medialive to populate inputId. + // +kubebuilder:validation:Optional + InputIDSelector *v1.Selector `json:"inputIdSelector,omitempty" tf:"-"` + + // Settings of an input. See Input Settings for more details. + // +kubebuilder:validation:Optional + InputSettings *InputSettingsParameters `json:"inputSettings,omitempty" tf:"input_settings,omitempty"` +} + +type InputChannelLevelsInitParameters struct { + Gain *float64 `json:"gain,omitempty" tf:"gain,omitempty"` + + InputChannel *float64 `json:"inputChannel,omitempty" tf:"input_channel,omitempty"` +} + +type InputChannelLevelsObservation struct { + Gain *float64 `json:"gain,omitempty" tf:"gain,omitempty"` + + InputChannel *float64 `json:"inputChannel,omitempty" tf:"input_channel,omitempty"` +} + +type InputChannelLevelsParameters struct { + + // +kubebuilder:validation:Optional + Gain *float64 `json:"gain" tf:"gain,omitempty"` + + // +kubebuilder:validation:Optional + InputChannel *float64 `json:"inputChannel" tf:"input_channel,omitempty"` +} + +type InputLossBehaviorInitParameters struct { + BlackFrameMsec *float64 `json:"blackFrameMsec,omitempty" tf:"black_frame_msec,omitempty"` + + InputLossImageColor *string `json:"inputLossImageColor,omitempty" tf:"input_loss_image_color,omitempty"` + + InputLossImageSlate *InputLossImageSlateInitParameters `json:"inputLossImageSlate,omitempty" tf:"input_loss_image_slate,omitempty"` + + InputLossImageType *string `json:"inputLossImageType,omitempty" tf:"input_loss_image_type,omitempty"` + + RepeatFrameMsec *float64 `json:"repeatFrameMsec,omitempty" tf:"repeat_frame_msec,omitempty"` +} + +type InputLossBehaviorObservation struct { + BlackFrameMsec *float64 `json:"blackFrameMsec,omitempty" tf:"black_frame_msec,omitempty"` + + InputLossImageColor *string `json:"inputLossImageColor,omitempty" tf:"input_loss_image_color,omitempty"` + + InputLossImageSlate *InputLossImageSlateObservation `json:"inputLossImageSlate,omitempty" tf:"input_loss_image_slate,omitempty"` + + InputLossImageType *string `json:"inputLossImageType,omitempty" tf:"input_loss_image_type,omitempty"` + + RepeatFrameMsec *float64 `json:"repeatFrameMsec,omitempty" tf:"repeat_frame_msec,omitempty"` +} + +type InputLossBehaviorParameters struct { + + // +kubebuilder:validation:Optional + BlackFrameMsec *float64 `json:"blackFrameMsec,omitempty" tf:"black_frame_msec,omitempty"` + + // +kubebuilder:validation:Optional + InputLossImageColor *string `json:"inputLossImageColor,omitempty" tf:"input_loss_image_color,omitempty"` + + // +kubebuilder:validation:Optional + InputLossImageSlate *InputLossImageSlateParameters `json:"inputLossImageSlate,omitempty" tf:"input_loss_image_slate,omitempty"` + + // +kubebuilder:validation:Optional + InputLossImageType *string `json:"inputLossImageType,omitempty" tf:"input_loss_image_type,omitempty"` + + // +kubebuilder:validation:Optional + RepeatFrameMsec *float64 `json:"repeatFrameMsec,omitempty" tf:"repeat_frame_msec,omitempty"` +} + +type InputLossImageSlateInitParameters struct { + + // Key used to extract the password from EC2 Parameter store. + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Username for destination. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type InputLossImageSlateObservation struct { + + // Key used to extract the password from EC2 Parameter store. + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Username for destination. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type InputLossImageSlateParameters struct { + + // Key used to extract the password from EC2 Parameter store. + // +kubebuilder:validation:Optional + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` + + // Username for destination. + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type InputLossSettingsInitParameters struct { + + // The amount of time (in milliseconds) that no input is detected. After that time, an input failover will occur. + InputLossThresholdMsec *float64 `json:"inputLossThresholdMsec,omitempty" tf:"input_loss_threshold_msec,omitempty"` +} + +type InputLossSettingsObservation struct { + + // The amount of time (in milliseconds) that no input is detected. After that time, an input failover will occur. + InputLossThresholdMsec *float64 `json:"inputLossThresholdMsec,omitempty" tf:"input_loss_threshold_msec,omitempty"` +} + +type InputLossSettingsParameters struct { + + // The amount of time (in milliseconds) that no input is detected. After that time, an input failover will occur. + // +kubebuilder:validation:Optional + InputLossThresholdMsec *float64 `json:"inputLossThresholdMsec,omitempty" tf:"input_loss_threshold_msec,omitempty"` +} + +type InputSettingsInitParameters struct { + AudioSelector []AudioSelectorInitParameters `json:"audioSelector,omitempty" tf:"audio_selector,omitempty"` + + CaptionSelector []CaptionSelectorInitParameters `json:"captionSelector,omitempty" tf:"caption_selector,omitempty"` + + // Enable or disable the deblock filter when filtering. + DeblockFilter *string `json:"deblockFilter,omitempty" tf:"deblock_filter,omitempty"` + + // Enable or disable the denoise filter when filtering. + DenoiseFilter *string `json:"denoiseFilter,omitempty" tf:"denoise_filter,omitempty"` + + // Adjusts the magnitude of filtering from 1 (minimal) to 5 (strongest). + FilterStrength *float64 `json:"filterStrength,omitempty" tf:"filter_strength,omitempty"` + + // Turns on the filter for the input. + InputFilter *string `json:"inputFilter,omitempty" tf:"input_filter,omitempty"` + + // Input settings. See Network Input Settings for more details. + NetworkInputSettings *NetworkInputSettingsInitParameters `json:"networkInputSettings,omitempty" tf:"network_input_settings,omitempty"` + + // PID from which to read SCTE-35 messages. + Scte35Pid *float64 `json:"scte35Pid,omitempty" tf:"scte35_pid,omitempty"` + + // Specifies whether to extract applicable ancillary data from a SMPTE-2038 source in the input. + Smpte2038DataPreference *string `json:"smpte2038DataPreference,omitempty" tf:"smpte2038_data_preference,omitempty"` + + // Loop input if it is a file. + SourceEndBehavior *string `json:"sourceEndBehavior,omitempty" tf:"source_end_behavior,omitempty"` + + VideoSelector *VideoSelectorInitParameters `json:"videoSelector,omitempty" tf:"video_selector,omitempty"` +} + +type InputSettingsObservation struct { + AudioSelector []AudioSelectorObservation `json:"audioSelector,omitempty" tf:"audio_selector,omitempty"` + + CaptionSelector []CaptionSelectorObservation `json:"captionSelector,omitempty" tf:"caption_selector,omitempty"` + + // Enable or disable the deblock filter when filtering. + DeblockFilter *string `json:"deblockFilter,omitempty" tf:"deblock_filter,omitempty"` + + // Enable or disable the denoise filter when filtering. + DenoiseFilter *string `json:"denoiseFilter,omitempty" tf:"denoise_filter,omitempty"` + + // Adjusts the magnitude of filtering from 1 (minimal) to 5 (strongest). + FilterStrength *float64 `json:"filterStrength,omitempty" tf:"filter_strength,omitempty"` + + // Turns on the filter for the input. + InputFilter *string `json:"inputFilter,omitempty" tf:"input_filter,omitempty"` + + // Input settings. See Network Input Settings for more details. + NetworkInputSettings *NetworkInputSettingsObservation `json:"networkInputSettings,omitempty" tf:"network_input_settings,omitempty"` + + // PID from which to read SCTE-35 messages. + Scte35Pid *float64 `json:"scte35Pid,omitempty" tf:"scte35_pid,omitempty"` + + // Specifies whether to extract applicable ancillary data from a SMPTE-2038 source in the input. + Smpte2038DataPreference *string `json:"smpte2038DataPreference,omitempty" tf:"smpte2038_data_preference,omitempty"` + + // Loop input if it is a file. + SourceEndBehavior *string `json:"sourceEndBehavior,omitempty" tf:"source_end_behavior,omitempty"` + + VideoSelector *VideoSelectorObservation `json:"videoSelector,omitempty" tf:"video_selector,omitempty"` +} + +type InputSettingsParameters struct { + + // +kubebuilder:validation:Optional + AudioSelector []AudioSelectorParameters `json:"audioSelector,omitempty" tf:"audio_selector,omitempty"` + + // +kubebuilder:validation:Optional + CaptionSelector []CaptionSelectorParameters `json:"captionSelector,omitempty" tf:"caption_selector,omitempty"` + + // Enable or disable the deblock filter when filtering. + // +kubebuilder:validation:Optional + DeblockFilter *string `json:"deblockFilter,omitempty" tf:"deblock_filter,omitempty"` + + // Enable or disable the denoise filter when filtering. + // +kubebuilder:validation:Optional + DenoiseFilter *string `json:"denoiseFilter,omitempty" tf:"denoise_filter,omitempty"` + + // Adjusts the magnitude of filtering from 1 (minimal) to 5 (strongest). + // +kubebuilder:validation:Optional + FilterStrength *float64 `json:"filterStrength,omitempty" tf:"filter_strength,omitempty"` + + // Turns on the filter for the input. + // +kubebuilder:validation:Optional + InputFilter *string `json:"inputFilter,omitempty" tf:"input_filter,omitempty"` + + // Input settings. See Network Input Settings for more details. + // +kubebuilder:validation:Optional + NetworkInputSettings *NetworkInputSettingsParameters `json:"networkInputSettings,omitempty" tf:"network_input_settings,omitempty"` + + // PID from which to read SCTE-35 messages. + // +kubebuilder:validation:Optional + Scte35Pid *float64 `json:"scte35Pid,omitempty" tf:"scte35_pid,omitempty"` + + // Specifies whether to extract applicable ancillary data from a SMPTE-2038 source in the input. + // +kubebuilder:validation:Optional + Smpte2038DataPreference *string `json:"smpte2038DataPreference,omitempty" tf:"smpte2038_data_preference,omitempty"` + + // Loop input if it is a file. + // +kubebuilder:validation:Optional + SourceEndBehavior *string `json:"sourceEndBehavior,omitempty" tf:"source_end_behavior,omitempty"` + + // +kubebuilder:validation:Optional + VideoSelector *VideoSelectorParameters `json:"videoSelector,omitempty" tf:"video_selector,omitempty"` +} + +type InputSpecificationInitParameters struct { + Codec *string `json:"codec,omitempty" tf:"codec,omitempty"` + + // - Maximum CDI input resolution. + InputResolution *string `json:"inputResolution,omitempty" tf:"input_resolution,omitempty"` + + // Average bitrate in bits/second. + MaximumBitrate *string `json:"maximumBitrate,omitempty" tf:"maximum_bitrate,omitempty"` +} + +type InputSpecificationObservation struct { + Codec *string `json:"codec,omitempty" tf:"codec,omitempty"` + + // - Maximum CDI input resolution. + InputResolution *string `json:"inputResolution,omitempty" tf:"input_resolution,omitempty"` + + // Average bitrate in bits/second. + MaximumBitrate *string `json:"maximumBitrate,omitempty" tf:"maximum_bitrate,omitempty"` +} + +type InputSpecificationParameters struct { + + // +kubebuilder:validation:Optional + Codec *string `json:"codec" tf:"codec,omitempty"` + + // - Maximum CDI input resolution. + // +kubebuilder:validation:Optional + InputResolution *string `json:"inputResolution" tf:"input_resolution,omitempty"` + + // Average bitrate in bits/second. + // +kubebuilder:validation:Optional + MaximumBitrate *string `json:"maximumBitrate" tf:"maximum_bitrate,omitempty"` +} + +type KeyProviderServerInitParameters struct { + + // Key used to extract the password from EC2 Parameter store. + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Username for destination. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type KeyProviderServerObservation struct { + + // Key used to extract the password from EC2 Parameter store. + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Username for destination. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type KeyProviderServerParameters struct { + + // Key used to extract the password from EC2 Parameter store. + // +kubebuilder:validation:Optional + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // – Path to a file accessible to the live stream. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` + + // Username for destination. + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type KeyProviderSettingsInitParameters struct { + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + StaticKeySettings []StaticKeySettingsInitParameters `json:"staticKeySettings,omitempty" tf:"static_key_settings,omitempty"` +} + +type KeyProviderSettingsObservation struct { + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + StaticKeySettings []StaticKeySettingsObservation `json:"staticKeySettings,omitempty" tf:"static_key_settings,omitempty"` +} + +type KeyProviderSettingsParameters struct { + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + StaticKeySettings []StaticKeySettingsParameters `json:"staticKeySettings,omitempty" tf:"static_key_settings,omitempty"` +} + +type M2TsSettingsDvbNitSettingsInitParameters struct { + + // User-specified id. Ths is used in an output group or an output. + NetworkID *float64 `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Name of the Channel. + NetworkName *string `json:"networkName,omitempty" tf:"network_name,omitempty"` + + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` +} + +type M2TsSettingsDvbNitSettingsObservation struct { + + // User-specified id. Ths is used in an output group or an output. + NetworkID *float64 `json:"networkId,omitempty" tf:"network_id,omitempty"` + + // Name of the Channel. + NetworkName *string `json:"networkName,omitempty" tf:"network_name,omitempty"` + + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` +} + +type M2TsSettingsDvbNitSettingsParameters struct { + + // User-specified id. Ths is used in an output group or an output. + // +kubebuilder:validation:Optional + NetworkID *float64 `json:"networkId" tf:"network_id,omitempty"` + + // Name of the Channel. + // +kubebuilder:validation:Optional + NetworkName *string `json:"networkName" tf:"network_name,omitempty"` + + // +kubebuilder:validation:Optional + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` +} + +type M2TsSettingsDvbSdtSettingsInitParameters struct { + OutputSdt *string `json:"outputSdt,omitempty" tf:"output_sdt,omitempty"` + + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` + + // Name of the Channel. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Name of the Channel. + ServiceProviderName *string `json:"serviceProviderName,omitempty" tf:"service_provider_name,omitempty"` +} + +type M2TsSettingsDvbSdtSettingsObservation struct { + OutputSdt *string `json:"outputSdt,omitempty" tf:"output_sdt,omitempty"` + + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` + + // Name of the Channel. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Name of the Channel. + ServiceProviderName *string `json:"serviceProviderName,omitempty" tf:"service_provider_name,omitempty"` +} + +type M2TsSettingsDvbSdtSettingsParameters struct { + + // +kubebuilder:validation:Optional + OutputSdt *string `json:"outputSdt,omitempty" tf:"output_sdt,omitempty"` + + // +kubebuilder:validation:Optional + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` + + // Name of the Channel. + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Name of the Channel. + // +kubebuilder:validation:Optional + ServiceProviderName *string `json:"serviceProviderName,omitempty" tf:"service_provider_name,omitempty"` +} + +type M2TsSettingsDvbTdtSettingsInitParameters struct { + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` +} + +type M2TsSettingsDvbTdtSettingsObservation struct { + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` +} + +type M2TsSettingsDvbTdtSettingsParameters struct { + + // +kubebuilder:validation:Optional + RepInterval *float64 `json:"repInterval,omitempty" tf:"rep_interval,omitempty"` +} + +type M2TsSettingsInitParameters struct { + AbsentInputAudioBehavior *string `json:"absentInputAudioBehavior,omitempty" tf:"absent_input_audio_behavior,omitempty"` + + Arib *string `json:"arib,omitempty" tf:"arib,omitempty"` + + // Selects a specific PID from within a source. + AribCaptionsPid *string `json:"aribCaptionsPid,omitempty" tf:"arib_captions_pid,omitempty"` + + AribCaptionsPidControl *string `json:"aribCaptionsPidControl,omitempty" tf:"arib_captions_pid_control,omitempty"` + + AudioBufferModel *string `json:"audioBufferModel,omitempty" tf:"audio_buffer_model,omitempty"` + + AudioFramesPerPes *float64 `json:"audioFramesPerPes,omitempty" tf:"audio_frames_per_pes,omitempty"` + + AudioPids *string `json:"audioPids,omitempty" tf:"audio_pids,omitempty"` + + AudioStreamType *string `json:"audioStreamType,omitempty" tf:"audio_stream_type,omitempty"` + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + BufferModel *string `json:"bufferModel,omitempty" tf:"buffer_model,omitempty"` + + CcDescriptor *string `json:"ccDescriptor,omitempty" tf:"cc_descriptor,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + DvbNitSettings *DvbNitSettingsInitParameters `json:"dvbNitSettings,omitempty" tf:"dvb_nit_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + DvbSdtSettings *DvbSdtSettingsInitParameters `json:"dvbSdtSettings,omitempty" tf:"dvb_sdt_settings,omitempty"` + + DvbSubPids *string `json:"dvbSubPids,omitempty" tf:"dvb_sub_pids,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + DvbTdtSettings *DvbTdtSettingsInitParameters `json:"dvbTdtSettings,omitempty" tf:"dvb_tdt_settings,omitempty"` + + // Selects a specific PID from within a source. + DvbTeletextPid *string `json:"dvbTeletextPid,omitempty" tf:"dvb_teletext_pid,omitempty"` + + Ebif *string `json:"ebif,omitempty" tf:"ebif,omitempty"` + + EbpAudioInterval *string `json:"ebpAudioInterval,omitempty" tf:"ebp_audio_interval,omitempty"` + + EbpLookaheadMs *float64 `json:"ebpLookaheadMs,omitempty" tf:"ebp_lookahead_ms,omitempty"` + + EbpPlacement *string `json:"ebpPlacement,omitempty" tf:"ebp_placement,omitempty"` + + // Selects a specific PID from within a source. + EcmPid *string `json:"ecmPid,omitempty" tf:"ecm_pid,omitempty"` + + EsRateInPes *string `json:"esRateInPes,omitempty" tf:"es_rate_in_pes,omitempty"` + + // Selects a specific PID from within a source. + EtvPlatformPid *string `json:"etvPlatformPid,omitempty" tf:"etv_platform_pid,omitempty"` + + // Selects a specific PID from within a source. + EtvSignalPid *string `json:"etvSignalPid,omitempty" tf:"etv_signal_pid,omitempty"` + + FragmentTime *float64 `json:"fragmentTime,omitempty" tf:"fragment_time,omitempty"` + + Klv *string `json:"klv,omitempty" tf:"klv,omitempty"` + + KlvDataPids *string `json:"klvDataPids,omitempty" tf:"klv_data_pids,omitempty"` + + NielsenId3Behavior *string `json:"nielsenId3Behavior,omitempty" tf:"nielsen_id3_behavior,omitempty"` + + // Average bitrate in bits/second. + NullPacketBitrate *float64 `json:"nullPacketBitrate,omitempty" tf:"null_packet_bitrate,omitempty"` + + PatInterval *float64 `json:"patInterval,omitempty" tf:"pat_interval,omitempty"` + + PcrControl *string `json:"pcrControl,omitempty" tf:"pcr_control,omitempty"` + + PcrPeriod *float64 `json:"pcrPeriod,omitempty" tf:"pcr_period,omitempty"` + + // Selects a specific PID from within a source. + PcrPid *string `json:"pcrPid,omitempty" tf:"pcr_pid,omitempty"` + + PmtInterval *float64 `json:"pmtInterval,omitempty" tf:"pmt_interval,omitempty"` + + // Selects a specific PID from within a source. + PmtPid *string `json:"pmtPid,omitempty" tf:"pmt_pid,omitempty"` + + ProgramNum *float64 `json:"programNum,omitempty" tf:"program_num,omitempty"` + + RateMode *string `json:"rateMode,omitempty" tf:"rate_mode,omitempty"` + + Scte27Pids *string `json:"scte27Pids,omitempty" tf:"scte27_pids,omitempty"` + + Scte35Control *string `json:"scte35Control,omitempty" tf:"scte35_control,omitempty"` + + // PID from which to read SCTE-35 messages. + Scte35Pid *string `json:"scte35Pid,omitempty" tf:"scte35_pid,omitempty"` + + SegmentationMarkers *string `json:"segmentationMarkers,omitempty" tf:"segmentation_markers,omitempty"` + + SegmentationStyle *string `json:"segmentationStyle,omitempty" tf:"segmentation_style,omitempty"` + + SegmentationTime *float64 `json:"segmentationTime,omitempty" tf:"segmentation_time,omitempty"` + + TimedMetadataBehavior *string `json:"timedMetadataBehavior,omitempty" tf:"timed_metadata_behavior,omitempty"` + + // Selects a specific PID from within a source. + TimedMetadataPid *string `json:"timedMetadataPid,omitempty" tf:"timed_metadata_pid,omitempty"` + + // User-specified id. Ths is used in an output group or an output. + TransportStreamID *float64 `json:"transportStreamId,omitempty" tf:"transport_stream_id,omitempty"` + + // Selects a specific PID from within a source. + VideoPid *string `json:"videoPid,omitempty" tf:"video_pid,omitempty"` +} + +type M2TsSettingsObservation struct { + AbsentInputAudioBehavior *string `json:"absentInputAudioBehavior,omitempty" tf:"absent_input_audio_behavior,omitempty"` + + Arib *string `json:"arib,omitempty" tf:"arib,omitempty"` + + // Selects a specific PID from within a source. + AribCaptionsPid *string `json:"aribCaptionsPid,omitempty" tf:"arib_captions_pid,omitempty"` + + AribCaptionsPidControl *string `json:"aribCaptionsPidControl,omitempty" tf:"arib_captions_pid_control,omitempty"` + + AudioBufferModel *string `json:"audioBufferModel,omitempty" tf:"audio_buffer_model,omitempty"` + + AudioFramesPerPes *float64 `json:"audioFramesPerPes,omitempty" tf:"audio_frames_per_pes,omitempty"` + + AudioPids *string `json:"audioPids,omitempty" tf:"audio_pids,omitempty"` + + AudioStreamType *string `json:"audioStreamType,omitempty" tf:"audio_stream_type,omitempty"` + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + BufferModel *string `json:"bufferModel,omitempty" tf:"buffer_model,omitempty"` + + CcDescriptor *string `json:"ccDescriptor,omitempty" tf:"cc_descriptor,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + DvbNitSettings *DvbNitSettingsObservation `json:"dvbNitSettings,omitempty" tf:"dvb_nit_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + DvbSdtSettings *DvbSdtSettingsObservation `json:"dvbSdtSettings,omitempty" tf:"dvb_sdt_settings,omitempty"` + + DvbSubPids *string `json:"dvbSubPids,omitempty" tf:"dvb_sub_pids,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + DvbTdtSettings *DvbTdtSettingsObservation `json:"dvbTdtSettings,omitempty" tf:"dvb_tdt_settings,omitempty"` + + // Selects a specific PID from within a source. + DvbTeletextPid *string `json:"dvbTeletextPid,omitempty" tf:"dvb_teletext_pid,omitempty"` + + Ebif *string `json:"ebif,omitempty" tf:"ebif,omitempty"` + + EbpAudioInterval *string `json:"ebpAudioInterval,omitempty" tf:"ebp_audio_interval,omitempty"` + + EbpLookaheadMs *float64 `json:"ebpLookaheadMs,omitempty" tf:"ebp_lookahead_ms,omitempty"` + + EbpPlacement *string `json:"ebpPlacement,omitempty" tf:"ebp_placement,omitempty"` + + // Selects a specific PID from within a source. + EcmPid *string `json:"ecmPid,omitempty" tf:"ecm_pid,omitempty"` + + EsRateInPes *string `json:"esRateInPes,omitempty" tf:"es_rate_in_pes,omitempty"` + + // Selects a specific PID from within a source. + EtvPlatformPid *string `json:"etvPlatformPid,omitempty" tf:"etv_platform_pid,omitempty"` + + // Selects a specific PID from within a source. + EtvSignalPid *string `json:"etvSignalPid,omitempty" tf:"etv_signal_pid,omitempty"` + + FragmentTime *float64 `json:"fragmentTime,omitempty" tf:"fragment_time,omitempty"` + + Klv *string `json:"klv,omitempty" tf:"klv,omitempty"` + + KlvDataPids *string `json:"klvDataPids,omitempty" tf:"klv_data_pids,omitempty"` + + NielsenId3Behavior *string `json:"nielsenId3Behavior,omitempty" tf:"nielsen_id3_behavior,omitempty"` + + // Average bitrate in bits/second. + NullPacketBitrate *float64 `json:"nullPacketBitrate,omitempty" tf:"null_packet_bitrate,omitempty"` + + PatInterval *float64 `json:"patInterval,omitempty" tf:"pat_interval,omitempty"` + + PcrControl *string `json:"pcrControl,omitempty" tf:"pcr_control,omitempty"` + + PcrPeriod *float64 `json:"pcrPeriod,omitempty" tf:"pcr_period,omitempty"` + + // Selects a specific PID from within a source. + PcrPid *string `json:"pcrPid,omitempty" tf:"pcr_pid,omitempty"` + + PmtInterval *float64 `json:"pmtInterval,omitempty" tf:"pmt_interval,omitempty"` + + // Selects a specific PID from within a source. + PmtPid *string `json:"pmtPid,omitempty" tf:"pmt_pid,omitempty"` + + ProgramNum *float64 `json:"programNum,omitempty" tf:"program_num,omitempty"` + + RateMode *string `json:"rateMode,omitempty" tf:"rate_mode,omitempty"` + + Scte27Pids *string `json:"scte27Pids,omitempty" tf:"scte27_pids,omitempty"` + + Scte35Control *string `json:"scte35Control,omitempty" tf:"scte35_control,omitempty"` + + // PID from which to read SCTE-35 messages. + Scte35Pid *string `json:"scte35Pid,omitempty" tf:"scte35_pid,omitempty"` + + SegmentationMarkers *string `json:"segmentationMarkers,omitempty" tf:"segmentation_markers,omitempty"` + + SegmentationStyle *string `json:"segmentationStyle,omitempty" tf:"segmentation_style,omitempty"` + + SegmentationTime *float64 `json:"segmentationTime,omitempty" tf:"segmentation_time,omitempty"` + + TimedMetadataBehavior *string `json:"timedMetadataBehavior,omitempty" tf:"timed_metadata_behavior,omitempty"` + + // Selects a specific PID from within a source. + TimedMetadataPid *string `json:"timedMetadataPid,omitempty" tf:"timed_metadata_pid,omitempty"` + + // User-specified id. Ths is used in an output group or an output. + TransportStreamID *float64 `json:"transportStreamId,omitempty" tf:"transport_stream_id,omitempty"` + + // Selects a specific PID from within a source. + VideoPid *string `json:"videoPid,omitempty" tf:"video_pid,omitempty"` +} + +type M2TsSettingsParameters struct { + + // +kubebuilder:validation:Optional + AbsentInputAudioBehavior *string `json:"absentInputAudioBehavior,omitempty" tf:"absent_input_audio_behavior,omitempty"` + + // +kubebuilder:validation:Optional + Arib *string `json:"arib,omitempty" tf:"arib,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + AribCaptionsPid *string `json:"aribCaptionsPid,omitempty" tf:"arib_captions_pid,omitempty"` + + // +kubebuilder:validation:Optional + AribCaptionsPidControl *string `json:"aribCaptionsPidControl,omitempty" tf:"arib_captions_pid_control,omitempty"` + + // +kubebuilder:validation:Optional + AudioBufferModel *string `json:"audioBufferModel,omitempty" tf:"audio_buffer_model,omitempty"` + + // +kubebuilder:validation:Optional + AudioFramesPerPes *float64 `json:"audioFramesPerPes,omitempty" tf:"audio_frames_per_pes,omitempty"` + + // +kubebuilder:validation:Optional + AudioPids *string `json:"audioPids,omitempty" tf:"audio_pids,omitempty"` + + // +kubebuilder:validation:Optional + AudioStreamType *string `json:"audioStreamType,omitempty" tf:"audio_stream_type,omitempty"` + + // Average bitrate in bits/second. + // +kubebuilder:validation:Optional + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // +kubebuilder:validation:Optional + BufferModel *string `json:"bufferModel,omitempty" tf:"buffer_model,omitempty"` + + // +kubebuilder:validation:Optional + CcDescriptor *string `json:"ccDescriptor,omitempty" tf:"cc_descriptor,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + DvbNitSettings *DvbNitSettingsParameters `json:"dvbNitSettings,omitempty" tf:"dvb_nit_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + DvbSdtSettings *DvbSdtSettingsParameters `json:"dvbSdtSettings,omitempty" tf:"dvb_sdt_settings,omitempty"` + + // +kubebuilder:validation:Optional + DvbSubPids *string `json:"dvbSubPids,omitempty" tf:"dvb_sub_pids,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + DvbTdtSettings *DvbTdtSettingsParameters `json:"dvbTdtSettings,omitempty" tf:"dvb_tdt_settings,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + DvbTeletextPid *string `json:"dvbTeletextPid,omitempty" tf:"dvb_teletext_pid,omitempty"` + + // +kubebuilder:validation:Optional + Ebif *string `json:"ebif,omitempty" tf:"ebif,omitempty"` + + // +kubebuilder:validation:Optional + EbpAudioInterval *string `json:"ebpAudioInterval,omitempty" tf:"ebp_audio_interval,omitempty"` + + // +kubebuilder:validation:Optional + EbpLookaheadMs *float64 `json:"ebpLookaheadMs,omitempty" tf:"ebp_lookahead_ms,omitempty"` + + // +kubebuilder:validation:Optional + EbpPlacement *string `json:"ebpPlacement,omitempty" tf:"ebp_placement,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + EcmPid *string `json:"ecmPid,omitempty" tf:"ecm_pid,omitempty"` + + // +kubebuilder:validation:Optional + EsRateInPes *string `json:"esRateInPes,omitempty" tf:"es_rate_in_pes,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + EtvPlatformPid *string `json:"etvPlatformPid,omitempty" tf:"etv_platform_pid,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + EtvSignalPid *string `json:"etvSignalPid,omitempty" tf:"etv_signal_pid,omitempty"` + + // +kubebuilder:validation:Optional + FragmentTime *float64 `json:"fragmentTime,omitempty" tf:"fragment_time,omitempty"` + + // +kubebuilder:validation:Optional + Klv *string `json:"klv,omitempty" tf:"klv,omitempty"` + + // +kubebuilder:validation:Optional + KlvDataPids *string `json:"klvDataPids,omitempty" tf:"klv_data_pids,omitempty"` + + // +kubebuilder:validation:Optional + NielsenId3Behavior *string `json:"nielsenId3Behavior,omitempty" tf:"nielsen_id3_behavior,omitempty"` + + // Average bitrate in bits/second. + // +kubebuilder:validation:Optional + NullPacketBitrate *float64 `json:"nullPacketBitrate,omitempty" tf:"null_packet_bitrate,omitempty"` + + // +kubebuilder:validation:Optional + PatInterval *float64 `json:"patInterval,omitempty" tf:"pat_interval,omitempty"` + + // +kubebuilder:validation:Optional + PcrControl *string `json:"pcrControl,omitempty" tf:"pcr_control,omitempty"` + + // +kubebuilder:validation:Optional + PcrPeriod *float64 `json:"pcrPeriod,omitempty" tf:"pcr_period,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + PcrPid *string `json:"pcrPid,omitempty" tf:"pcr_pid,omitempty"` + + // +kubebuilder:validation:Optional + PmtInterval *float64 `json:"pmtInterval,omitempty" tf:"pmt_interval,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + PmtPid *string `json:"pmtPid,omitempty" tf:"pmt_pid,omitempty"` + + // +kubebuilder:validation:Optional + ProgramNum *float64 `json:"programNum,omitempty" tf:"program_num,omitempty"` + + // +kubebuilder:validation:Optional + RateMode *string `json:"rateMode,omitempty" tf:"rate_mode,omitempty"` + + // +kubebuilder:validation:Optional + Scte27Pids *string `json:"scte27Pids,omitempty" tf:"scte27_pids,omitempty"` + + // +kubebuilder:validation:Optional + Scte35Control *string `json:"scte35Control,omitempty" tf:"scte35_control,omitempty"` + + // PID from which to read SCTE-35 messages. + // +kubebuilder:validation:Optional + Scte35Pid *string `json:"scte35Pid,omitempty" tf:"scte35_pid,omitempty"` + + // +kubebuilder:validation:Optional + SegmentationMarkers *string `json:"segmentationMarkers,omitempty" tf:"segmentation_markers,omitempty"` + + // +kubebuilder:validation:Optional + SegmentationStyle *string `json:"segmentationStyle,omitempty" tf:"segmentation_style,omitempty"` + + // +kubebuilder:validation:Optional + SegmentationTime *float64 `json:"segmentationTime,omitempty" tf:"segmentation_time,omitempty"` + + // +kubebuilder:validation:Optional + TimedMetadataBehavior *string `json:"timedMetadataBehavior,omitempty" tf:"timed_metadata_behavior,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + TimedMetadataPid *string `json:"timedMetadataPid,omitempty" tf:"timed_metadata_pid,omitempty"` + + // User-specified id. Ths is used in an output group or an output. + // +kubebuilder:validation:Optional + TransportStreamID *float64 `json:"transportStreamId,omitempty" tf:"transport_stream_id,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + VideoPid *string `json:"videoPid,omitempty" tf:"video_pid,omitempty"` +} + +type M3U8SettingsInitParameters struct { + AudioFramesPerPes *float64 `json:"audioFramesPerPes,omitempty" tf:"audio_frames_per_pes,omitempty"` + + AudioPids *string `json:"audioPids,omitempty" tf:"audio_pids,omitempty"` + + // Selects a specific PID from within a source. + EcmPid *string `json:"ecmPid,omitempty" tf:"ecm_pid,omitempty"` + + NielsenId3Behavior *string `json:"nielsenId3Behavior,omitempty" tf:"nielsen_id3_behavior,omitempty"` + + PatInterval *float64 `json:"patInterval,omitempty" tf:"pat_interval,omitempty"` + + PcrControl *string `json:"pcrControl,omitempty" tf:"pcr_control,omitempty"` + + PcrPeriod *float64 `json:"pcrPeriod,omitempty" tf:"pcr_period,omitempty"` + + // Selects a specific PID from within a source. + PcrPid *string `json:"pcrPid,omitempty" tf:"pcr_pid,omitempty"` + + PmtInterval *float64 `json:"pmtInterval,omitempty" tf:"pmt_interval,omitempty"` + + // Selects a specific PID from within a source. + PmtPid *string `json:"pmtPid,omitempty" tf:"pmt_pid,omitempty"` + + ProgramNum *float64 `json:"programNum,omitempty" tf:"program_num,omitempty"` + + Scte35Behavior *string `json:"scte35Behavior,omitempty" tf:"scte35_behavior,omitempty"` + + // PID from which to read SCTE-35 messages. + Scte35Pid *string `json:"scte35Pid,omitempty" tf:"scte35_pid,omitempty"` + + TimedMetadataBehavior *string `json:"timedMetadataBehavior,omitempty" tf:"timed_metadata_behavior,omitempty"` + + // Selects a specific PID from within a source. + TimedMetadataPid *string `json:"timedMetadataPid,omitempty" tf:"timed_metadata_pid,omitempty"` + + // User-specified id. Ths is used in an output group or an output. + TransportStreamID *float64 `json:"transportStreamId,omitempty" tf:"transport_stream_id,omitempty"` + + // Selects a specific PID from within a source. + VideoPid *string `json:"videoPid,omitempty" tf:"video_pid,omitempty"` +} + +type M3U8SettingsObservation struct { + AudioFramesPerPes *float64 `json:"audioFramesPerPes,omitempty" tf:"audio_frames_per_pes,omitempty"` + + AudioPids *string `json:"audioPids,omitempty" tf:"audio_pids,omitempty"` + + // Selects a specific PID from within a source. + EcmPid *string `json:"ecmPid,omitempty" tf:"ecm_pid,omitempty"` + + NielsenId3Behavior *string `json:"nielsenId3Behavior,omitempty" tf:"nielsen_id3_behavior,omitempty"` + + PatInterval *float64 `json:"patInterval,omitempty" tf:"pat_interval,omitempty"` + + PcrControl *string `json:"pcrControl,omitempty" tf:"pcr_control,omitempty"` + + PcrPeriod *float64 `json:"pcrPeriod,omitempty" tf:"pcr_period,omitempty"` + + // Selects a specific PID from within a source. + PcrPid *string `json:"pcrPid,omitempty" tf:"pcr_pid,omitempty"` + + PmtInterval *float64 `json:"pmtInterval,omitempty" tf:"pmt_interval,omitempty"` + + // Selects a specific PID from within a source. + PmtPid *string `json:"pmtPid,omitempty" tf:"pmt_pid,omitempty"` + + ProgramNum *float64 `json:"programNum,omitempty" tf:"program_num,omitempty"` + + Scte35Behavior *string `json:"scte35Behavior,omitempty" tf:"scte35_behavior,omitempty"` + + // PID from which to read SCTE-35 messages. + Scte35Pid *string `json:"scte35Pid,omitempty" tf:"scte35_pid,omitempty"` + + TimedMetadataBehavior *string `json:"timedMetadataBehavior,omitempty" tf:"timed_metadata_behavior,omitempty"` + + // Selects a specific PID from within a source. + TimedMetadataPid *string `json:"timedMetadataPid,omitempty" tf:"timed_metadata_pid,omitempty"` + + // User-specified id. Ths is used in an output group or an output. + TransportStreamID *float64 `json:"transportStreamId,omitempty" tf:"transport_stream_id,omitempty"` + + // Selects a specific PID from within a source. + VideoPid *string `json:"videoPid,omitempty" tf:"video_pid,omitempty"` +} + +type M3U8SettingsParameters struct { + + // +kubebuilder:validation:Optional + AudioFramesPerPes *float64 `json:"audioFramesPerPes,omitempty" tf:"audio_frames_per_pes,omitempty"` + + // +kubebuilder:validation:Optional + AudioPids *string `json:"audioPids,omitempty" tf:"audio_pids,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + EcmPid *string `json:"ecmPid,omitempty" tf:"ecm_pid,omitempty"` + + // +kubebuilder:validation:Optional + NielsenId3Behavior *string `json:"nielsenId3Behavior,omitempty" tf:"nielsen_id3_behavior,omitempty"` + + // +kubebuilder:validation:Optional + PatInterval *float64 `json:"patInterval,omitempty" tf:"pat_interval,omitempty"` + + // +kubebuilder:validation:Optional + PcrControl *string `json:"pcrControl,omitempty" tf:"pcr_control,omitempty"` + + // +kubebuilder:validation:Optional + PcrPeriod *float64 `json:"pcrPeriod,omitempty" tf:"pcr_period,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + PcrPid *string `json:"pcrPid,omitempty" tf:"pcr_pid,omitempty"` + + // +kubebuilder:validation:Optional + PmtInterval *float64 `json:"pmtInterval,omitempty" tf:"pmt_interval,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + PmtPid *string `json:"pmtPid,omitempty" tf:"pmt_pid,omitempty"` + + // +kubebuilder:validation:Optional + ProgramNum *float64 `json:"programNum,omitempty" tf:"program_num,omitempty"` + + // +kubebuilder:validation:Optional + Scte35Behavior *string `json:"scte35Behavior,omitempty" tf:"scte35_behavior,omitempty"` + + // PID from which to read SCTE-35 messages. + // +kubebuilder:validation:Optional + Scte35Pid *string `json:"scte35Pid,omitempty" tf:"scte35_pid,omitempty"` + + // +kubebuilder:validation:Optional + TimedMetadataBehavior *string `json:"timedMetadataBehavior,omitempty" tf:"timed_metadata_behavior,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + TimedMetadataPid *string `json:"timedMetadataPid,omitempty" tf:"timed_metadata_pid,omitempty"` + + // User-specified id. Ths is used in an output group or an output. + // +kubebuilder:validation:Optional + TransportStreamID *float64 `json:"transportStreamId,omitempty" tf:"transport_stream_id,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + VideoPid *string `json:"videoPid,omitempty" tf:"video_pid,omitempty"` +} + +type MaintenanceInitParameters struct { + + // The day of the week to use for maintenance. + MaintenanceDay *string `json:"maintenanceDay,omitempty" tf:"maintenance_day,omitempty"` + + // The hour maintenance will start. + MaintenanceStartTime *string `json:"maintenanceStartTime,omitempty" tf:"maintenance_start_time,omitempty"` +} + +type MaintenanceObservation struct { + + // The day of the week to use for maintenance. + MaintenanceDay *string `json:"maintenanceDay,omitempty" tf:"maintenance_day,omitempty"` + + // The hour maintenance will start. + MaintenanceStartTime *string `json:"maintenanceStartTime,omitempty" tf:"maintenance_start_time,omitempty"` +} + +type MaintenanceParameters struct { + + // The day of the week to use for maintenance. + // +kubebuilder:validation:Optional + MaintenanceDay *string `json:"maintenanceDay" tf:"maintenance_day,omitempty"` + + // The hour maintenance will start. + // +kubebuilder:validation:Optional + MaintenanceStartTime *string `json:"maintenanceStartTime" tf:"maintenance_start_time,omitempty"` +} + +type MediaPackageGroupSettingsDestinationInitParameters struct { + + // Reference ID for the destination. + DestinationRefID *string `json:"destinationRefId,omitempty" tf:"destination_ref_id,omitempty"` +} + +type MediaPackageGroupSettingsDestinationObservation struct { + + // Reference ID for the destination. + DestinationRefID *string `json:"destinationRefId,omitempty" tf:"destination_ref_id,omitempty"` +} + +type MediaPackageGroupSettingsDestinationParameters struct { + + // Reference ID for the destination. + // +kubebuilder:validation:Optional + DestinationRefID *string `json:"destinationRefId" tf:"destination_ref_id,omitempty"` +} + +type MediaPackageGroupSettingsInitParameters struct { + + // A director and base filename where archive files should be written. See Destination for more details. + Destination *MediaPackageGroupSettingsDestinationInitParameters `json:"destination,omitempty" tf:"destination,omitempty"` +} + +type MediaPackageGroupSettingsObservation struct { + + // A director and base filename where archive files should be written. See Destination for more details. + Destination *MediaPackageGroupSettingsDestinationObservation `json:"destination,omitempty" tf:"destination,omitempty"` +} + +type MediaPackageGroupSettingsParameters struct { + + // A director and base filename where archive files should be written. See Destination for more details. + // +kubebuilder:validation:Optional + Destination *MediaPackageGroupSettingsDestinationParameters `json:"destination" tf:"destination,omitempty"` +} + +type MediaPackageOutputSettingsInitParameters struct { +} + +type MediaPackageOutputSettingsObservation struct { +} + +type MediaPackageOutputSettingsParameters struct { +} + +type MediaPackageSettingsInitParameters struct { + + // ID of the channel in MediaPackage that is the destination for this output group. + ChannelID *string `json:"channelId,omitempty" tf:"channel_id,omitempty"` +} + +type MediaPackageSettingsObservation struct { + + // ID of the channel in MediaPackage that is the destination for this output group. + ChannelID *string `json:"channelId,omitempty" tf:"channel_id,omitempty"` +} + +type MediaPackageSettingsParameters struct { + + // ID of the channel in MediaPackage that is the destination for this output group. + // +kubebuilder:validation:Optional + ChannelID *string `json:"channelId" tf:"channel_id,omitempty"` +} + +type MotionGraphicsConfigurationInitParameters struct { + + // – Motion Graphics Insertion. + MotionGraphicsInsertion *string `json:"motionGraphicsInsertion,omitempty" tf:"motion_graphics_insertion,omitempty"` + + // – Motion Graphics Settings. See Motion Graphics Settings for more details. + MotionGraphicsSettings *MotionGraphicsSettingsInitParameters `json:"motionGraphicsSettings,omitempty" tf:"motion_graphics_settings,omitempty"` +} + +type MotionGraphicsConfigurationObservation struct { + + // – Motion Graphics Insertion. + MotionGraphicsInsertion *string `json:"motionGraphicsInsertion,omitempty" tf:"motion_graphics_insertion,omitempty"` + + // – Motion Graphics Settings. See Motion Graphics Settings for more details. + MotionGraphicsSettings *MotionGraphicsSettingsObservation `json:"motionGraphicsSettings,omitempty" tf:"motion_graphics_settings,omitempty"` +} + +type MotionGraphicsConfigurationParameters struct { + + // – Motion Graphics Insertion. + // +kubebuilder:validation:Optional + MotionGraphicsInsertion *string `json:"motionGraphicsInsertion,omitempty" tf:"motion_graphics_insertion,omitempty"` + + // – Motion Graphics Settings. See Motion Graphics Settings for more details. + // +kubebuilder:validation:Optional + MotionGraphicsSettings *MotionGraphicsSettingsParameters `json:"motionGraphicsSettings" tf:"motion_graphics_settings,omitempty"` +} + +type MotionGraphicsSettingsInitParameters struct { + + // – Html Motion Graphics Settings. + HTMLMotionGraphicsSettings *HTMLMotionGraphicsSettingsInitParameters `json:"htmlMotionGraphicsSettings,omitempty" tf:"html_motion_graphics_settings,omitempty"` +} + +type MotionGraphicsSettingsObservation struct { + + // – Html Motion Graphics Settings. + HTMLMotionGraphicsSettings *HTMLMotionGraphicsSettingsParameters `json:"htmlMotionGraphicsSettings,omitempty" tf:"html_motion_graphics_settings,omitempty"` +} + +type MotionGraphicsSettingsParameters struct { + + // – Html Motion Graphics Settings. + // +kubebuilder:validation:Optional + HTMLMotionGraphicsSettings *HTMLMotionGraphicsSettingsParameters `json:"htmlMotionGraphicsSettings,omitempty" tf:"html_motion_graphics_settings,omitempty"` +} + +type Mp2SettingsInitParameters struct { + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + // Sample rate in Hz. + SampleRate *float64 `json:"sampleRate,omitempty" tf:"sample_rate,omitempty"` +} + +type Mp2SettingsObservation struct { + + // Average bitrate in bits/second. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + // Sample rate in Hz. + SampleRate *float64 `json:"sampleRate,omitempty" tf:"sample_rate,omitempty"` +} + +type Mp2SettingsParameters struct { + + // Average bitrate in bits/second. + // +kubebuilder:validation:Optional + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + // +kubebuilder:validation:Optional + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + // Sample rate in Hz. + // +kubebuilder:validation:Optional + SampleRate *float64 `json:"sampleRate,omitempty" tf:"sample_rate,omitempty"` +} + +type MsSmoothGroupSettingsDestinationInitParameters struct { + + // Reference ID for the destination. + DestinationRefID *string `json:"destinationRefId,omitempty" tf:"destination_ref_id,omitempty"` +} + +type MsSmoothGroupSettingsDestinationObservation struct { + + // Reference ID for the destination. + DestinationRefID *string `json:"destinationRefId,omitempty" tf:"destination_ref_id,omitempty"` +} + +type MsSmoothGroupSettingsDestinationParameters struct { + + // Reference ID for the destination. + // +kubebuilder:validation:Optional + DestinationRefID *string `json:"destinationRefId" tf:"destination_ref_id,omitempty"` +} + +type MsSmoothGroupSettingsInitParameters struct { + + // User-specified id. Ths is used in an output group or an output. + AcquisitionPointID *string `json:"acquisitionPointId,omitempty" tf:"acquisition_point_id,omitempty"` + + AudioOnlyTimecodeControl *string `json:"audioOnlyTimecodeControl,omitempty" tf:"audio_only_timecode_control,omitempty"` + + // Setting to allow self signed or verified RTMP certificates. + CertificateMode *string `json:"certificateMode,omitempty" tf:"certificate_mode,omitempty"` + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + // A director and base filename where archive files should be written. See Destination for more details. + Destination *MsSmoothGroupSettingsDestinationInitParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + // User-specified id. Ths is used in an output group or an output. + EventID *string `json:"eventId,omitempty" tf:"event_id,omitempty"` + + EventIDMode *string `json:"eventIdMode,omitempty" tf:"event_id_mode,omitempty"` + + EventStopBehavior *string `json:"eventStopBehavior,omitempty" tf:"event_stop_behavior,omitempty"` + + FilecacheDuration *float64 `json:"filecacheDuration,omitempty" tf:"filecache_duration,omitempty"` + + FragmentLength *float64 `json:"fragmentLength,omitempty" tf:"fragment_length,omitempty"` + + // Controls the behavior of the RTMP group if input becomes unavailable. + InputLossAction *string `json:"inputLossAction,omitempty" tf:"input_loss_action,omitempty"` + + // Number of retry attempts. + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` + + // Number of seconds to wait until a restart is initiated. + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` + + SegmentationMode *string `json:"segmentationMode,omitempty" tf:"segmentation_mode,omitempty"` + + SendDelayMs *float64 `json:"sendDelayMs,omitempty" tf:"send_delay_ms,omitempty"` + + SparseTrackType *string `json:"sparseTrackType,omitempty" tf:"sparse_track_type,omitempty"` + + StreamManifestBehavior *string `json:"streamManifestBehavior,omitempty" tf:"stream_manifest_behavior,omitempty"` + + TimestampOffset *string `json:"timestampOffset,omitempty" tf:"timestamp_offset,omitempty"` + + TimestampOffsetMode *string `json:"timestampOffsetMode,omitempty" tf:"timestamp_offset_mode,omitempty"` +} + +type MsSmoothGroupSettingsObservation struct { + + // User-specified id. Ths is used in an output group or an output. + AcquisitionPointID *string `json:"acquisitionPointId,omitempty" tf:"acquisition_point_id,omitempty"` + + AudioOnlyTimecodeControl *string `json:"audioOnlyTimecodeControl,omitempty" tf:"audio_only_timecode_control,omitempty"` + + // Setting to allow self signed or verified RTMP certificates. + CertificateMode *string `json:"certificateMode,omitempty" tf:"certificate_mode,omitempty"` + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + // A director and base filename where archive files should be written. See Destination for more details. + Destination *MsSmoothGroupSettingsDestinationObservation `json:"destination,omitempty" tf:"destination,omitempty"` + + // User-specified id. Ths is used in an output group or an output. + EventID *string `json:"eventId,omitempty" tf:"event_id,omitempty"` + + EventIDMode *string `json:"eventIdMode,omitempty" tf:"event_id_mode,omitempty"` + + EventStopBehavior *string `json:"eventStopBehavior,omitempty" tf:"event_stop_behavior,omitempty"` + + FilecacheDuration *float64 `json:"filecacheDuration,omitempty" tf:"filecache_duration,omitempty"` + + FragmentLength *float64 `json:"fragmentLength,omitempty" tf:"fragment_length,omitempty"` + + // Controls the behavior of the RTMP group if input becomes unavailable. + InputLossAction *string `json:"inputLossAction,omitempty" tf:"input_loss_action,omitempty"` + + // Number of retry attempts. + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` + + // Number of seconds to wait until a restart is initiated. + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` + + SegmentationMode *string `json:"segmentationMode,omitempty" tf:"segmentation_mode,omitempty"` + + SendDelayMs *float64 `json:"sendDelayMs,omitempty" tf:"send_delay_ms,omitempty"` + + SparseTrackType *string `json:"sparseTrackType,omitempty" tf:"sparse_track_type,omitempty"` + + StreamManifestBehavior *string `json:"streamManifestBehavior,omitempty" tf:"stream_manifest_behavior,omitempty"` + + TimestampOffset *string `json:"timestampOffset,omitempty" tf:"timestamp_offset,omitempty"` + + TimestampOffsetMode *string `json:"timestampOffsetMode,omitempty" tf:"timestamp_offset_mode,omitempty"` +} + +type MsSmoothGroupSettingsParameters struct { + + // User-specified id. Ths is used in an output group or an output. + // +kubebuilder:validation:Optional + AcquisitionPointID *string `json:"acquisitionPointId,omitempty" tf:"acquisition_point_id,omitempty"` + + // +kubebuilder:validation:Optional + AudioOnlyTimecodeControl *string `json:"audioOnlyTimecodeControl,omitempty" tf:"audio_only_timecode_control,omitempty"` + + // Setting to allow self signed or verified RTMP certificates. + // +kubebuilder:validation:Optional + CertificateMode *string `json:"certificateMode,omitempty" tf:"certificate_mode,omitempty"` + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + // +kubebuilder:validation:Optional + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + // A director and base filename where archive files should be written. See Destination for more details. + // +kubebuilder:validation:Optional + Destination *MsSmoothGroupSettingsDestinationParameters `json:"destination" tf:"destination,omitempty"` + + // User-specified id. Ths is used in an output group or an output. + // +kubebuilder:validation:Optional + EventID *string `json:"eventId,omitempty" tf:"event_id,omitempty"` + + // +kubebuilder:validation:Optional + EventIDMode *string `json:"eventIdMode,omitempty" tf:"event_id_mode,omitempty"` + + // +kubebuilder:validation:Optional + EventStopBehavior *string `json:"eventStopBehavior,omitempty" tf:"event_stop_behavior,omitempty"` + + // +kubebuilder:validation:Optional + FilecacheDuration *float64 `json:"filecacheDuration,omitempty" tf:"filecache_duration,omitempty"` + + // +kubebuilder:validation:Optional + FragmentLength *float64 `json:"fragmentLength,omitempty" tf:"fragment_length,omitempty"` + + // Controls the behavior of the RTMP group if input becomes unavailable. + // +kubebuilder:validation:Optional + InputLossAction *string `json:"inputLossAction,omitempty" tf:"input_loss_action,omitempty"` + + // Number of retry attempts. + // +kubebuilder:validation:Optional + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` + + // Number of seconds to wait until a restart is initiated. + // +kubebuilder:validation:Optional + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` + + // +kubebuilder:validation:Optional + SegmentationMode *string `json:"segmentationMode,omitempty" tf:"segmentation_mode,omitempty"` + + // +kubebuilder:validation:Optional + SendDelayMs *float64 `json:"sendDelayMs,omitempty" tf:"send_delay_ms,omitempty"` + + // +kubebuilder:validation:Optional + SparseTrackType *string `json:"sparseTrackType,omitempty" tf:"sparse_track_type,omitempty"` + + // +kubebuilder:validation:Optional + StreamManifestBehavior *string `json:"streamManifestBehavior,omitempty" tf:"stream_manifest_behavior,omitempty"` + + // +kubebuilder:validation:Optional + TimestampOffset *string `json:"timestampOffset,omitempty" tf:"timestamp_offset,omitempty"` + + // +kubebuilder:validation:Optional + TimestampOffsetMode *string `json:"timestampOffsetMode,omitempty" tf:"timestamp_offset_mode,omitempty"` +} + +type MsSmoothOutputSettingsInitParameters struct { + H265PackagingType *string `json:"h265PackagingType,omitempty" tf:"h265_packaging_type,omitempty"` + + // String concatenated to the end of the destination filename. Required for multiple outputs of the same type. + NameModifier *string `json:"nameModifier,omitempty" tf:"name_modifier,omitempty"` +} + +type MsSmoothOutputSettingsObservation struct { + H265PackagingType *string `json:"h265PackagingType,omitempty" tf:"h265_packaging_type,omitempty"` + + // String concatenated to the end of the destination filename. Required for multiple outputs of the same type. + NameModifier *string `json:"nameModifier,omitempty" tf:"name_modifier,omitempty"` +} + +type MsSmoothOutputSettingsParameters struct { + + // +kubebuilder:validation:Optional + H265PackagingType *string `json:"h265PackagingType,omitempty" tf:"h265_packaging_type,omitempty"` + + // String concatenated to the end of the destination filename. Required for multiple outputs of the same type. + // +kubebuilder:validation:Optional + NameModifier *string `json:"nameModifier,omitempty" tf:"name_modifier,omitempty"` +} + +type MultiplexGroupSettingsInitParameters struct { +} + +type MultiplexGroupSettingsObservation struct { +} + +type MultiplexGroupSettingsParameters struct { +} + +type MultiplexOutputSettingsDestinationInitParameters struct { + + // Reference ID for the destination. + DestinationRefID *string `json:"destinationRefId,omitempty" tf:"destination_ref_id,omitempty"` +} + +type MultiplexOutputSettingsDestinationObservation struct { + + // Reference ID for the destination. + DestinationRefID *string `json:"destinationRefId,omitempty" tf:"destination_ref_id,omitempty"` +} + +type MultiplexOutputSettingsDestinationParameters struct { + + // Reference ID for the destination. + // +kubebuilder:validation:Optional + DestinationRefID *string `json:"destinationRefId" tf:"destination_ref_id,omitempty"` +} + +type MultiplexOutputSettingsInitParameters struct { + + // A director and base filename where archive files should be written. See Destination for more details. + Destination *MultiplexOutputSettingsDestinationInitParameters `json:"destination,omitempty" tf:"destination,omitempty"` +} + +type MultiplexOutputSettingsObservation struct { + + // A director and base filename where archive files should be written. See Destination for more details. + Destination *MultiplexOutputSettingsDestinationObservation `json:"destination,omitempty" tf:"destination,omitempty"` +} + +type MultiplexOutputSettingsParameters struct { + + // A director and base filename where archive files should be written. See Destination for more details. + // +kubebuilder:validation:Optional + Destination *MultiplexOutputSettingsDestinationParameters `json:"destination" tf:"destination,omitempty"` +} + +type MultiplexSettingsInitParameters struct { + + // The ID of the Multiplex that the encoder is providing output to. + MultiplexID *string `json:"multiplexId,omitempty" tf:"multiplex_id,omitempty"` + + // The program name of the Multiplex program that the encoder is providing output to. + ProgramName *string `json:"programName,omitempty" tf:"program_name,omitempty"` +} + +type MultiplexSettingsObservation struct { + + // The ID of the Multiplex that the encoder is providing output to. + MultiplexID *string `json:"multiplexId,omitempty" tf:"multiplex_id,omitempty"` + + // The program name of the Multiplex program that the encoder is providing output to. + ProgramName *string `json:"programName,omitempty" tf:"program_name,omitempty"` +} + +type MultiplexSettingsParameters struct { + + // The ID of the Multiplex that the encoder is providing output to. + // +kubebuilder:validation:Optional + MultiplexID *string `json:"multiplexId" tf:"multiplex_id,omitempty"` + + // The program name of the Multiplex program that the encoder is providing output to. + // +kubebuilder:validation:Optional + ProgramName *string `json:"programName" tf:"program_name,omitempty"` +} + +type NetworkInputSettingsInitParameters struct { + + // Specifies HLS input settings when the uri is for a HLS manifest. See HLS Input Settings for more details. + HlsInputSettings *HlsInputSettingsInitParameters `json:"hlsInputSettings,omitempty" tf:"hls_input_settings,omitempty"` + + // Check HTTPS server certificates. + ServerValidation *string `json:"serverValidation,omitempty" tf:"server_validation,omitempty"` +} + +type NetworkInputSettingsObservation struct { + + // Specifies HLS input settings when the uri is for a HLS manifest. See HLS Input Settings for more details. + HlsInputSettings *HlsInputSettingsObservation `json:"hlsInputSettings,omitempty" tf:"hls_input_settings,omitempty"` + + // Check HTTPS server certificates. + ServerValidation *string `json:"serverValidation,omitempty" tf:"server_validation,omitempty"` +} + +type NetworkInputSettingsParameters struct { + + // Specifies HLS input settings when the uri is for a HLS manifest. See HLS Input Settings for more details. + // +kubebuilder:validation:Optional + HlsInputSettings *HlsInputSettingsParameters `json:"hlsInputSettings,omitempty" tf:"hls_input_settings,omitempty"` + + // Check HTTPS server certificates. + // +kubebuilder:validation:Optional + ServerValidation *string `json:"serverValidation,omitempty" tf:"server_validation,omitempty"` +} + +type NielsenCbetSettingsInitParameters struct { + CbetCheckDigitString *string `json:"cbetCheckDigitString,omitempty" tf:"cbet_check_digit_string,omitempty"` + + // Determines the method of CBET insertion mode when prior encoding is detected on the same layer. + CbetStepaside *string `json:"cbetStepaside,omitempty" tf:"cbet_stepaside,omitempty"` + + // CBET source ID to use in the watermark. + Csid *string `json:"csid,omitempty" tf:"csid,omitempty"` +} + +type NielsenCbetSettingsObservation struct { + CbetCheckDigitString *string `json:"cbetCheckDigitString,omitempty" tf:"cbet_check_digit_string,omitempty"` + + // Determines the method of CBET insertion mode when prior encoding is detected on the same layer. + CbetStepaside *string `json:"cbetStepaside,omitempty" tf:"cbet_stepaside,omitempty"` + + // CBET source ID to use in the watermark. + Csid *string `json:"csid,omitempty" tf:"csid,omitempty"` +} + +type NielsenCbetSettingsParameters struct { + + // +kubebuilder:validation:Optional + CbetCheckDigitString *string `json:"cbetCheckDigitString" tf:"cbet_check_digit_string,omitempty"` + + // Determines the method of CBET insertion mode when prior encoding is detected on the same layer. + // +kubebuilder:validation:Optional + CbetStepaside *string `json:"cbetStepaside" tf:"cbet_stepaside,omitempty"` + + // CBET source ID to use in the watermark. + // +kubebuilder:validation:Optional + Csid *string `json:"csid" tf:"csid,omitempty"` +} + +type NielsenConfigurationInitParameters struct { + + // – Enter the Distributor ID assigned to your organization by Nielsen. + DistributorID *string `json:"distributorId,omitempty" tf:"distributor_id,omitempty"` + + // – Enables Nielsen PCM to ID3 tagging. + NielsenPcmToId3Tagging *string `json:"nielsenPcmToId3Tagging,omitempty" tf:"nielsen_pcm_to_id3_tagging,omitempty"` +} + +type NielsenConfigurationObservation struct { + + // – Enter the Distributor ID assigned to your organization by Nielsen. + DistributorID *string `json:"distributorId,omitempty" tf:"distributor_id,omitempty"` + + // – Enables Nielsen PCM to ID3 tagging. + NielsenPcmToId3Tagging *string `json:"nielsenPcmToId3Tagging,omitempty" tf:"nielsen_pcm_to_id3_tagging,omitempty"` +} + +type NielsenConfigurationParameters struct { + + // – Enter the Distributor ID assigned to your organization by Nielsen. + // +kubebuilder:validation:Optional + DistributorID *string `json:"distributorId,omitempty" tf:"distributor_id,omitempty"` + + // – Enables Nielsen PCM to ID3 tagging. + // +kubebuilder:validation:Optional + NielsenPcmToId3Tagging *string `json:"nielsenPcmToId3Tagging,omitempty" tf:"nielsen_pcm_to_id3_tagging,omitempty"` +} + +type NielsenNaesIiNwSettingsInitParameters struct { + CheckDigitString *string `json:"checkDigitString,omitempty" tf:"check_digit_string,omitempty"` + + // The Nielsen Source ID to include in the watermark. + Sid *float64 `json:"sid,omitempty" tf:"sid,omitempty"` +} + +type NielsenNaesIiNwSettingsObservation struct { + CheckDigitString *string `json:"checkDigitString,omitempty" tf:"check_digit_string,omitempty"` + + // The Nielsen Source ID to include in the watermark. + Sid *float64 `json:"sid,omitempty" tf:"sid,omitempty"` +} + +type NielsenNaesIiNwSettingsParameters struct { + + // +kubebuilder:validation:Optional + CheckDigitString *string `json:"checkDigitString" tf:"check_digit_string,omitempty"` + + // The Nielsen Source ID to include in the watermark. + // +kubebuilder:validation:Optional + Sid *float64 `json:"sid" tf:"sid,omitempty"` +} + +type NielsenWatermarksSettingsInitParameters struct { + + // Used to insert watermarks of type Nielsen CBET. See Nielsen CBET Settings for more details. + NielsenCbetSettings *NielsenCbetSettingsInitParameters `json:"nielsenCbetSettings,omitempty" tf:"nielsen_cbet_settings,omitempty"` + + // Distribution types to assign to the watermarks. Options are PROGRAM_CONTENT and FINAL_DISTRIBUTOR. + NielsenDistributionType *string `json:"nielsenDistributionType,omitempty" tf:"nielsen_distribution_type,omitempty"` + + // Used to insert watermarks of type Nielsen NAES, II (N2) and Nielsen NAES VI (NW). See Nielsen NAES II NW Settings for more details. + NielsenNaesIiNwSettings []NielsenNaesIiNwSettingsInitParameters `json:"nielsenNaesIiNwSettings,omitempty" tf:"nielsen_naes_ii_nw_settings,omitempty"` +} + +type NielsenWatermarksSettingsObservation struct { + + // Used to insert watermarks of type Nielsen CBET. See Nielsen CBET Settings for more details. + NielsenCbetSettings *NielsenCbetSettingsObservation `json:"nielsenCbetSettings,omitempty" tf:"nielsen_cbet_settings,omitempty"` + + // Distribution types to assign to the watermarks. Options are PROGRAM_CONTENT and FINAL_DISTRIBUTOR. + NielsenDistributionType *string `json:"nielsenDistributionType,omitempty" tf:"nielsen_distribution_type,omitempty"` + + // Used to insert watermarks of type Nielsen NAES, II (N2) and Nielsen NAES VI (NW). See Nielsen NAES II NW Settings for more details. + NielsenNaesIiNwSettings []NielsenNaesIiNwSettingsObservation `json:"nielsenNaesIiNwSettings,omitempty" tf:"nielsen_naes_ii_nw_settings,omitempty"` +} + +type NielsenWatermarksSettingsParameters struct { + + // Used to insert watermarks of type Nielsen CBET. See Nielsen CBET Settings for more details. + // +kubebuilder:validation:Optional + NielsenCbetSettings *NielsenCbetSettingsParameters `json:"nielsenCbetSettings,omitempty" tf:"nielsen_cbet_settings,omitempty"` + + // Distribution types to assign to the watermarks. Options are PROGRAM_CONTENT and FINAL_DISTRIBUTOR. + // +kubebuilder:validation:Optional + NielsenDistributionType *string `json:"nielsenDistributionType,omitempty" tf:"nielsen_distribution_type,omitempty"` + + // Used to insert watermarks of type Nielsen NAES, II (N2) and Nielsen NAES VI (NW). See Nielsen NAES II NW Settings for more details. + // +kubebuilder:validation:Optional + NielsenNaesIiNwSettings []NielsenNaesIiNwSettingsParameters `json:"nielsenNaesIiNwSettings,omitempty" tf:"nielsen_naes_ii_nw_settings,omitempty"` +} + +type OutputGroupSettingsInitParameters struct { + + // Archive group settings. See Archive Group Settings for more details. + ArchiveGroupSettings []ArchiveGroupSettingsInitParameters `json:"archiveGroupSettings,omitempty" tf:"archive_group_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + FrameCaptureGroupSettings *FrameCaptureGroupSettingsInitParameters `json:"frameCaptureGroupSettings,omitempty" tf:"frame_capture_group_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + HlsGroupSettings *HlsGroupSettingsInitParameters `json:"hlsGroupSettings,omitempty" tf:"hls_group_settings,omitempty"` + + // Media package group settings. See Media Package Group Settings for more details. + MediaPackageGroupSettings *MediaPackageGroupSettingsInitParameters `json:"mediaPackageGroupSettings,omitempty" tf:"media_package_group_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + MsSmoothGroupSettings *MsSmoothGroupSettingsInitParameters `json:"msSmoothGroupSettings,omitempty" tf:"ms_smooth_group_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + MultiplexGroupSettings *MultiplexGroupSettingsInitParameters `json:"multiplexGroupSettings,omitempty" tf:"multiplex_group_settings,omitempty"` + + // RTMP group settings. See RTMP Group Settings for more details. + RtmpGroupSettings *RtmpGroupSettingsInitParameters `json:"rtmpGroupSettings,omitempty" tf:"rtmp_group_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + UDPGroupSettings *UDPGroupSettingsInitParameters `json:"udpGroupSettings,omitempty" tf:"udp_group_settings,omitempty"` +} + +type OutputGroupSettingsObservation struct { + + // Archive group settings. See Archive Group Settings for more details. + ArchiveGroupSettings []ArchiveGroupSettingsObservation `json:"archiveGroupSettings,omitempty" tf:"archive_group_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + FrameCaptureGroupSettings *FrameCaptureGroupSettingsObservation `json:"frameCaptureGroupSettings,omitempty" tf:"frame_capture_group_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + HlsGroupSettings *HlsGroupSettingsObservation `json:"hlsGroupSettings,omitempty" tf:"hls_group_settings,omitempty"` + + // Media package group settings. See Media Package Group Settings for more details. + MediaPackageGroupSettings *MediaPackageGroupSettingsObservation `json:"mediaPackageGroupSettings,omitempty" tf:"media_package_group_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + MsSmoothGroupSettings *MsSmoothGroupSettingsObservation `json:"msSmoothGroupSettings,omitempty" tf:"ms_smooth_group_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + MultiplexGroupSettings *MultiplexGroupSettingsParameters `json:"multiplexGroupSettings,omitempty" tf:"multiplex_group_settings,omitempty"` + + // RTMP group settings. See RTMP Group Settings for more details. + RtmpGroupSettings *RtmpGroupSettingsObservation `json:"rtmpGroupSettings,omitempty" tf:"rtmp_group_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + UDPGroupSettings *UDPGroupSettingsObservation `json:"udpGroupSettings,omitempty" tf:"udp_group_settings,omitempty"` +} + +type OutputGroupSettingsParameters struct { + + // Archive group settings. See Archive Group Settings for more details. + // +kubebuilder:validation:Optional + ArchiveGroupSettings []ArchiveGroupSettingsParameters `json:"archiveGroupSettings,omitempty" tf:"archive_group_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + FrameCaptureGroupSettings *FrameCaptureGroupSettingsParameters `json:"frameCaptureGroupSettings,omitempty" tf:"frame_capture_group_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + HlsGroupSettings *HlsGroupSettingsParameters `json:"hlsGroupSettings,omitempty" tf:"hls_group_settings,omitempty"` + + // Media package group settings. See Media Package Group Settings for more details. + // +kubebuilder:validation:Optional + MediaPackageGroupSettings *MediaPackageGroupSettingsParameters `json:"mediaPackageGroupSettings,omitempty" tf:"media_package_group_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + MsSmoothGroupSettings *MsSmoothGroupSettingsParameters `json:"msSmoothGroupSettings,omitempty" tf:"ms_smooth_group_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + MultiplexGroupSettings *MultiplexGroupSettingsParameters `json:"multiplexGroupSettings,omitempty" tf:"multiplex_group_settings,omitempty"` + + // RTMP group settings. See RTMP Group Settings for more details. + // +kubebuilder:validation:Optional + RtmpGroupSettings *RtmpGroupSettingsParameters `json:"rtmpGroupSettings,omitempty" tf:"rtmp_group_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + UDPGroupSettings *UDPGroupSettingsParameters `json:"udpGroupSettings,omitempty" tf:"udp_group_settings,omitempty"` +} + +type OutputGroupsInitParameters struct { + + // Name of the Channel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Settings associated with the output group. See Output Group Settings for more details. + OutputGroupSettings *OutputGroupSettingsInitParameters `json:"outputGroupSettings,omitempty" tf:"output_group_settings,omitempty"` + + // List of outputs. See Outputs for more details. + Outputs []OutputsInitParameters `json:"outputs,omitempty" tf:"outputs,omitempty"` +} + +type OutputGroupsObservation struct { + + // Name of the Channel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Settings associated with the output group. See Output Group Settings for more details. + OutputGroupSettings *OutputGroupSettingsObservation `json:"outputGroupSettings,omitempty" tf:"output_group_settings,omitempty"` + + // List of outputs. See Outputs for more details. + Outputs []OutputsObservation `json:"outputs,omitempty" tf:"outputs,omitempty"` +} + +type OutputGroupsParameters struct { + + // Name of the Channel. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Settings associated with the output group. See Output Group Settings for more details. + // +kubebuilder:validation:Optional + OutputGroupSettings *OutputGroupSettingsParameters `json:"outputGroupSettings" tf:"output_group_settings,omitempty"` + + // List of outputs. See Outputs for more details. + // +kubebuilder:validation:Optional + Outputs []OutputsParameters `json:"outputs" tf:"outputs,omitempty"` +} + +type OutputRectangleInitParameters struct { + + // See the description in left_offset. For height, specify the entire height of the rectangle as a percentage of the underlying frame height. For example, "80" means the rectangle height is 80% of the underlying frame height. The top_offset and rectangle_height must add up to 100% or less. This field corresponds to tts:extent - Y in the TTML standard. + Height *float64 `json:"height,omitempty" tf:"height,omitempty"` + + // Applies only if you plan to convert these source captions to EBU-TT-D or TTML in an output. (Make sure to leave the default if you don’t have either of these formats in the output.) You can define a display rectangle for the captions that is smaller than the underlying video frame. You define the rectangle by specifying the position of the left edge, top edge, bottom edge, and right edge of the rectangle, all within the underlying video frame. The units for the measurements are percentages. If you specify a value for one of these fields, you must specify a value for all of them. For leftOffset, specify the position of the left edge of the rectangle, as a percentage of the underlying frame width, and relative to the left edge of the frame. For example, "10" means the measurement is 10% of the underlying frame width. The rectangle left edge starts at that position from the left edge of the frame. This field corresponds to tts:origin - X in the TTML standard. + LeftOffset *float64 `json:"leftOffset,omitempty" tf:"left_offset,omitempty"` + + // See the description in left_offset. For top_offset, specify the position of the top edge of the rectangle, as a percentage of the underlying frame height, and relative to the top edge of the frame. For example, "10" means the measurement is 10% of the underlying frame height. The rectangle top edge starts at that position from the top edge of the frame. This field corresponds to tts:origin - Y in the TTML standard. + TopOffset *float64 `json:"topOffset,omitempty" tf:"top_offset,omitempty"` + + // See the description in left_offset. For width, specify the entire width of the rectangle as a percentage of the underlying frame width. For example, "80" means the rectangle width is 80% of the underlying frame width. The left_offset and rectangle_width must add up to 100% or less. This field corresponds to tts:extent - X in the TTML standard. + Width *float64 `json:"width,omitempty" tf:"width,omitempty"` +} + +type OutputRectangleObservation struct { + + // See the description in left_offset. For height, specify the entire height of the rectangle as a percentage of the underlying frame height. For example, "80" means the rectangle height is 80% of the underlying frame height. The top_offset and rectangle_height must add up to 100% or less. This field corresponds to tts:extent - Y in the TTML standard. + Height *float64 `json:"height,omitempty" tf:"height,omitempty"` + + // Applies only if you plan to convert these source captions to EBU-TT-D or TTML in an output. (Make sure to leave the default if you don’t have either of these formats in the output.) You can define a display rectangle for the captions that is smaller than the underlying video frame. You define the rectangle by specifying the position of the left edge, top edge, bottom edge, and right edge of the rectangle, all within the underlying video frame. The units for the measurements are percentages. If you specify a value for one of these fields, you must specify a value for all of them. For leftOffset, specify the position of the left edge of the rectangle, as a percentage of the underlying frame width, and relative to the left edge of the frame. For example, "10" means the measurement is 10% of the underlying frame width. The rectangle left edge starts at that position from the left edge of the frame. This field corresponds to tts:origin - X in the TTML standard. + LeftOffset *float64 `json:"leftOffset,omitempty" tf:"left_offset,omitempty"` + + // See the description in left_offset. For top_offset, specify the position of the top edge of the rectangle, as a percentage of the underlying frame height, and relative to the top edge of the frame. For example, "10" means the measurement is 10% of the underlying frame height. The rectangle top edge starts at that position from the top edge of the frame. This field corresponds to tts:origin - Y in the TTML standard. + TopOffset *float64 `json:"topOffset,omitempty" tf:"top_offset,omitempty"` + + // See the description in left_offset. For width, specify the entire width of the rectangle as a percentage of the underlying frame width. For example, "80" means the rectangle width is 80% of the underlying frame width. The left_offset and rectangle_width must add up to 100% or less. This field corresponds to tts:extent - X in the TTML standard. + Width *float64 `json:"width,omitempty" tf:"width,omitempty"` +} + +type OutputRectangleParameters struct { + + // See the description in left_offset. For height, specify the entire height of the rectangle as a percentage of the underlying frame height. For example, "80" means the rectangle height is 80% of the underlying frame height. The top_offset and rectangle_height must add up to 100% or less. This field corresponds to tts:extent - Y in the TTML standard. + // +kubebuilder:validation:Optional + Height *float64 `json:"height" tf:"height,omitempty"` + + // Applies only if you plan to convert these source captions to EBU-TT-D or TTML in an output. (Make sure to leave the default if you don’t have either of these formats in the output.) You can define a display rectangle for the captions that is smaller than the underlying video frame. You define the rectangle by specifying the position of the left edge, top edge, bottom edge, and right edge of the rectangle, all within the underlying video frame. The units for the measurements are percentages. If you specify a value for one of these fields, you must specify a value for all of them. For leftOffset, specify the position of the left edge of the rectangle, as a percentage of the underlying frame width, and relative to the left edge of the frame. For example, "10" means the measurement is 10% of the underlying frame width. The rectangle left edge starts at that position from the left edge of the frame. This field corresponds to tts:origin - X in the TTML standard. + // +kubebuilder:validation:Optional + LeftOffset *float64 `json:"leftOffset" tf:"left_offset,omitempty"` + + // See the description in left_offset. For top_offset, specify the position of the top edge of the rectangle, as a percentage of the underlying frame height, and relative to the top edge of the frame. For example, "10" means the measurement is 10% of the underlying frame height. The rectangle top edge starts at that position from the top edge of the frame. This field corresponds to tts:origin - Y in the TTML standard. + // +kubebuilder:validation:Optional + TopOffset *float64 `json:"topOffset" tf:"top_offset,omitempty"` + + // See the description in left_offset. For width, specify the entire width of the rectangle as a percentage of the underlying frame width. For example, "80" means the rectangle width is 80% of the underlying frame width. The left_offset and rectangle_width must add up to 100% or less. This field corresponds to tts:extent - X in the TTML standard. + // +kubebuilder:validation:Optional + Width *float64 `json:"width" tf:"width,omitempty"` +} + +type OutputSettingsInitParameters struct { + + // Archive output settings. See Archive Output Settings for more details. + ArchiveOutputSettings *ArchiveOutputSettingsInitParameters `json:"archiveOutputSettings,omitempty" tf:"archive_output_settings,omitempty"` + + // Settings for output. See Output Settings for more details. + FrameCaptureOutputSettings *FrameCaptureOutputSettingsInitParameters `json:"frameCaptureOutputSettings,omitempty" tf:"frame_capture_output_settings,omitempty"` + + // Settings for output. See Output Settings for more details. + HlsOutputSettings *HlsOutputSettingsInitParameters `json:"hlsOutputSettings,omitempty" tf:"hls_output_settings,omitempty"` + + // Media package output settings. This can be set as an empty block. + MediaPackageOutputSettings *MediaPackageOutputSettingsInitParameters `json:"mediaPackageOutputSettings,omitempty" tf:"media_package_output_settings,omitempty"` + + // Settings for output. See Output Settings for more details. + MsSmoothOutputSettings *MsSmoothOutputSettingsInitParameters `json:"msSmoothOutputSettings,omitempty" tf:"ms_smooth_output_settings,omitempty"` + + // Multiplex output settings. See Multiplex Output Settings for more details. + MultiplexOutputSettings *MultiplexOutputSettingsInitParameters `json:"multiplexOutputSettings,omitempty" tf:"multiplex_output_settings,omitempty"` + + // RTMP output settings. See RTMP Output Settings for more details. + RtmpOutputSettings *RtmpOutputSettingsInitParameters `json:"rtmpOutputSettings,omitempty" tf:"rtmp_output_settings,omitempty"` + + // UDP output settings. See UDP Output Settings for more details. + UDPOutputSettings *UDPOutputSettingsInitParameters `json:"udpOutputSettings,omitempty" tf:"udp_output_settings,omitempty"` +} + +type OutputSettingsObservation struct { + + // Archive output settings. See Archive Output Settings for more details. + ArchiveOutputSettings *ArchiveOutputSettingsObservation `json:"archiveOutputSettings,omitempty" tf:"archive_output_settings,omitempty"` + + // Settings for output. See Output Settings for more details. + FrameCaptureOutputSettings *FrameCaptureOutputSettingsObservation `json:"frameCaptureOutputSettings,omitempty" tf:"frame_capture_output_settings,omitempty"` + + // Settings for output. See Output Settings for more details. + HlsOutputSettings *HlsOutputSettingsObservation `json:"hlsOutputSettings,omitempty" tf:"hls_output_settings,omitempty"` + + // Media package output settings. This can be set as an empty block. + MediaPackageOutputSettings *MediaPackageOutputSettingsParameters `json:"mediaPackageOutputSettings,omitempty" tf:"media_package_output_settings,omitempty"` + + // Settings for output. See Output Settings for more details. + MsSmoothOutputSettings *MsSmoothOutputSettingsObservation `json:"msSmoothOutputSettings,omitempty" tf:"ms_smooth_output_settings,omitempty"` + + // Multiplex output settings. See Multiplex Output Settings for more details. + MultiplexOutputSettings *MultiplexOutputSettingsObservation `json:"multiplexOutputSettings,omitempty" tf:"multiplex_output_settings,omitempty"` + + // RTMP output settings. See RTMP Output Settings for more details. + RtmpOutputSettings *RtmpOutputSettingsObservation `json:"rtmpOutputSettings,omitempty" tf:"rtmp_output_settings,omitempty"` + + // UDP output settings. See UDP Output Settings for more details. + UDPOutputSettings *UDPOutputSettingsObservation `json:"udpOutputSettings,omitempty" tf:"udp_output_settings,omitempty"` +} + +type OutputSettingsParameters struct { + + // Archive output settings. See Archive Output Settings for more details. + // +kubebuilder:validation:Optional + ArchiveOutputSettings *ArchiveOutputSettingsParameters `json:"archiveOutputSettings,omitempty" tf:"archive_output_settings,omitempty"` + + // Settings for output. See Output Settings for more details. + // +kubebuilder:validation:Optional + FrameCaptureOutputSettings *FrameCaptureOutputSettingsParameters `json:"frameCaptureOutputSettings,omitempty" tf:"frame_capture_output_settings,omitempty"` + + // Settings for output. See Output Settings for more details. + // +kubebuilder:validation:Optional + HlsOutputSettings *HlsOutputSettingsParameters `json:"hlsOutputSettings,omitempty" tf:"hls_output_settings,omitempty"` + + // Media package output settings. This can be set as an empty block. + // +kubebuilder:validation:Optional + MediaPackageOutputSettings *MediaPackageOutputSettingsParameters `json:"mediaPackageOutputSettings,omitempty" tf:"media_package_output_settings,omitempty"` + + // Settings for output. See Output Settings for more details. + // +kubebuilder:validation:Optional + MsSmoothOutputSettings *MsSmoothOutputSettingsParameters `json:"msSmoothOutputSettings,omitempty" tf:"ms_smooth_output_settings,omitempty"` + + // Multiplex output settings. See Multiplex Output Settings for more details. + // +kubebuilder:validation:Optional + MultiplexOutputSettings *MultiplexOutputSettingsParameters `json:"multiplexOutputSettings,omitempty" tf:"multiplex_output_settings,omitempty"` + + // RTMP output settings. See RTMP Output Settings for more details. + // +kubebuilder:validation:Optional + RtmpOutputSettings *RtmpOutputSettingsParameters `json:"rtmpOutputSettings,omitempty" tf:"rtmp_output_settings,omitempty"` + + // UDP output settings. See UDP Output Settings for more details. + // +kubebuilder:validation:Optional + UDPOutputSettings *UDPOutputSettingsParameters `json:"udpOutputSettings,omitempty" tf:"udp_output_settings,omitempty"` +} + +type OutputsInitParameters struct { + + // The names of the audio descriptions used as audio sources for the output. + // +listType=set + AudioDescriptionNames []*string `json:"audioDescriptionNames,omitempty" tf:"audio_description_names,omitempty"` + + // The names of the caption descriptions used as caption sources for the output. + // +listType=set + CaptionDescriptionNames []*string `json:"captionDescriptionNames,omitempty" tf:"caption_description_names,omitempty"` + + // The name used to identify an output. + OutputName *string `json:"outputName,omitempty" tf:"output_name,omitempty"` + + // Settings for output. See Output Settings for more details. + OutputSettings *OutputSettingsInitParameters `json:"outputSettings,omitempty" tf:"output_settings,omitempty"` + + // The name of the video description used as video source for the output. + VideoDescriptionName *string `json:"videoDescriptionName,omitempty" tf:"video_description_name,omitempty"` +} + +type OutputsObservation struct { + + // The names of the audio descriptions used as audio sources for the output. + // +listType=set + AudioDescriptionNames []*string `json:"audioDescriptionNames,omitempty" tf:"audio_description_names,omitempty"` + + // The names of the caption descriptions used as caption sources for the output. + // +listType=set + CaptionDescriptionNames []*string `json:"captionDescriptionNames,omitempty" tf:"caption_description_names,omitempty"` + + // The name used to identify an output. + OutputName *string `json:"outputName,omitempty" tf:"output_name,omitempty"` + + // Settings for output. See Output Settings for more details. + OutputSettings *OutputSettingsObservation `json:"outputSettings,omitempty" tf:"output_settings,omitempty"` + + // The name of the video description used as video source for the output. + VideoDescriptionName *string `json:"videoDescriptionName,omitempty" tf:"video_description_name,omitempty"` +} + +type OutputsParameters struct { + + // The names of the audio descriptions used as audio sources for the output. + // +kubebuilder:validation:Optional + // +listType=set + AudioDescriptionNames []*string `json:"audioDescriptionNames,omitempty" tf:"audio_description_names,omitempty"` + + // The names of the caption descriptions used as caption sources for the output. + // +kubebuilder:validation:Optional + // +listType=set + CaptionDescriptionNames []*string `json:"captionDescriptionNames,omitempty" tf:"caption_description_names,omitempty"` + + // The name used to identify an output. + // +kubebuilder:validation:Optional + OutputName *string `json:"outputName,omitempty" tf:"output_name,omitempty"` + + // Settings for output. See Output Settings for more details. + // +kubebuilder:validation:Optional + OutputSettings *OutputSettingsParameters `json:"outputSettings" tf:"output_settings,omitempty"` + + // The name of the video description used as video source for the output. + // +kubebuilder:validation:Optional + VideoDescriptionName *string `json:"videoDescriptionName,omitempty" tf:"video_description_name,omitempty"` +} + +type PassThroughSettingsInitParameters struct { +} + +type PassThroughSettingsObservation struct { +} + +type PassThroughSettingsParameters struct { +} + +type RawSettingsInitParameters struct { +} + +type RawSettingsObservation struct { +} + +type RawSettingsParameters struct { +} + +type Rec601SettingsInitParameters struct { +} + +type Rec601SettingsObservation struct { +} + +type Rec601SettingsParameters struct { +} + +type Rec709SettingsInitParameters struct { +} + +type Rec709SettingsObservation struct { +} + +type Rec709SettingsParameters struct { +} + +type RemixSettingsInitParameters struct { + ChannelMappings []ChannelMappingsInitParameters `json:"channelMappings,omitempty" tf:"channel_mappings,omitempty"` + + ChannelsIn *float64 `json:"channelsIn,omitempty" tf:"channels_in,omitempty"` + + ChannelsOut *float64 `json:"channelsOut,omitempty" tf:"channels_out,omitempty"` +} + +type RemixSettingsObservation struct { + ChannelMappings []ChannelMappingsObservation `json:"channelMappings,omitempty" tf:"channel_mappings,omitempty"` + + ChannelsIn *float64 `json:"channelsIn,omitempty" tf:"channels_in,omitempty"` + + ChannelsOut *float64 `json:"channelsOut,omitempty" tf:"channels_out,omitempty"` +} + +type RemixSettingsParameters struct { + + // +kubebuilder:validation:Optional + ChannelMappings []ChannelMappingsParameters `json:"channelMappings" tf:"channel_mappings,omitempty"` + + // +kubebuilder:validation:Optional + ChannelsIn *float64 `json:"channelsIn,omitempty" tf:"channels_in,omitempty"` + + // +kubebuilder:validation:Optional + ChannelsOut *float64 `json:"channelsOut,omitempty" tf:"channels_out,omitempty"` +} + +type RtmpCaptionInfoDestinationSettingsInitParameters struct { +} + +type RtmpCaptionInfoDestinationSettingsObservation struct { +} + +type RtmpCaptionInfoDestinationSettingsParameters struct { +} + +type RtmpGroupSettingsInitParameters struct { + + // The ad marker type for this output group. + AdMarkers []*string `json:"adMarkers,omitempty" tf:"ad_markers,omitempty"` + + // Authentication scheme to use when connecting with CDN. + AuthenticationScheme *string `json:"authenticationScheme,omitempty" tf:"authentication_scheme,omitempty"` + + // Controls behavior when content cache fills up. + CacheFullBehavior *string `json:"cacheFullBehavior,omitempty" tf:"cache_full_behavior,omitempty"` + + // Cache length in seconds, is used to calculate buffer size. + CacheLength *float64 `json:"cacheLength,omitempty" tf:"cache_length,omitempty"` + + // Controls the types of data that passes to onCaptionInfo outputs. + CaptionData *string `json:"captionData,omitempty" tf:"caption_data,omitempty"` + + // Controls the behavior of the RTMP group if input becomes unavailable. + InputLossAction *string `json:"inputLossAction,omitempty" tf:"input_loss_action,omitempty"` + + // Number of seconds to wait until a restart is initiated. + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` +} + +type RtmpGroupSettingsObservation struct { + + // The ad marker type for this output group. + AdMarkers []*string `json:"adMarkers,omitempty" tf:"ad_markers,omitempty"` + + // Authentication scheme to use when connecting with CDN. + AuthenticationScheme *string `json:"authenticationScheme,omitempty" tf:"authentication_scheme,omitempty"` + + // Controls behavior when content cache fills up. + CacheFullBehavior *string `json:"cacheFullBehavior,omitempty" tf:"cache_full_behavior,omitempty"` + + // Cache length in seconds, is used to calculate buffer size. + CacheLength *float64 `json:"cacheLength,omitempty" tf:"cache_length,omitempty"` + + // Controls the types of data that passes to onCaptionInfo outputs. + CaptionData *string `json:"captionData,omitempty" tf:"caption_data,omitempty"` + + // Controls the behavior of the RTMP group if input becomes unavailable. + InputLossAction *string `json:"inputLossAction,omitempty" tf:"input_loss_action,omitempty"` + + // Number of seconds to wait until a restart is initiated. + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` +} + +type RtmpGroupSettingsParameters struct { + + // The ad marker type for this output group. + // +kubebuilder:validation:Optional + AdMarkers []*string `json:"adMarkers,omitempty" tf:"ad_markers,omitempty"` + + // Authentication scheme to use when connecting with CDN. + // +kubebuilder:validation:Optional + AuthenticationScheme *string `json:"authenticationScheme,omitempty" tf:"authentication_scheme,omitempty"` + + // Controls behavior when content cache fills up. + // +kubebuilder:validation:Optional + CacheFullBehavior *string `json:"cacheFullBehavior,omitempty" tf:"cache_full_behavior,omitempty"` + + // Cache length in seconds, is used to calculate buffer size. + // +kubebuilder:validation:Optional + CacheLength *float64 `json:"cacheLength,omitempty" tf:"cache_length,omitempty"` + + // Controls the types of data that passes to onCaptionInfo outputs. + // +kubebuilder:validation:Optional + CaptionData *string `json:"captionData,omitempty" tf:"caption_data,omitempty"` + + // Controls the behavior of the RTMP group if input becomes unavailable. + // +kubebuilder:validation:Optional + InputLossAction *string `json:"inputLossAction,omitempty" tf:"input_loss_action,omitempty"` + + // Number of seconds to wait until a restart is initiated. + // +kubebuilder:validation:Optional + RestartDelay *float64 `json:"restartDelay,omitempty" tf:"restart_delay,omitempty"` +} + +type RtmpOutputSettingsDestinationInitParameters struct { + + // Reference ID for the destination. + DestinationRefID *string `json:"destinationRefId,omitempty" tf:"destination_ref_id,omitempty"` +} + +type RtmpOutputSettingsDestinationObservation struct { + + // Reference ID for the destination. + DestinationRefID *string `json:"destinationRefId,omitempty" tf:"destination_ref_id,omitempty"` +} + +type RtmpOutputSettingsDestinationParameters struct { + + // Reference ID for the destination. + // +kubebuilder:validation:Optional + DestinationRefID *string `json:"destinationRefId" tf:"destination_ref_id,omitempty"` +} + +type RtmpOutputSettingsInitParameters struct { + + // Setting to allow self signed or verified RTMP certificates. + CertificateMode *string `json:"certificateMode,omitempty" tf:"certificate_mode,omitempty"` + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + // A director and base filename where archive files should be written. See Destination for more details. + Destination *RtmpOutputSettingsDestinationInitParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + // Number of retry attempts. + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` +} + +type RtmpOutputSettingsObservation struct { + + // Setting to allow self signed or verified RTMP certificates. + CertificateMode *string `json:"certificateMode,omitempty" tf:"certificate_mode,omitempty"` + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + // A director and base filename where archive files should be written. See Destination for more details. + Destination *RtmpOutputSettingsDestinationObservation `json:"destination,omitempty" tf:"destination,omitempty"` + + // Number of retry attempts. + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` +} + +type RtmpOutputSettingsParameters struct { + + // Setting to allow self signed or verified RTMP certificates. + // +kubebuilder:validation:Optional + CertificateMode *string `json:"certificateMode,omitempty" tf:"certificate_mode,omitempty"` + + // Number of seconds to wait before retrying connection to the flash media server if the connection is lost. + // +kubebuilder:validation:Optional + ConnectionRetryInterval *float64 `json:"connectionRetryInterval,omitempty" tf:"connection_retry_interval,omitempty"` + + // A director and base filename where archive files should be written. See Destination for more details. + // +kubebuilder:validation:Optional + Destination *RtmpOutputSettingsDestinationParameters `json:"destination" tf:"destination,omitempty"` + + // Number of retry attempts. + // +kubebuilder:validation:Optional + NumRetries *float64 `json:"numRetries,omitempty" tf:"num_retries,omitempty"` +} + +type Scte20PlusEmbeddedDestinationSettingsInitParameters struct { +} + +type Scte20PlusEmbeddedDestinationSettingsObservation struct { +} + +type Scte20PlusEmbeddedDestinationSettingsParameters struct { +} + +type Scte20SourceSettingsInitParameters struct { + + // If upconvert, 608 data is both passed through via the “608 compatibility bytes” fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded. + Convert608To708 *string `json:"convert608To708,omitempty" tf:"convert_608_to_708,omitempty"` + + // Specifies the 608/708 channel number within the video track from which to extract captions. Unused for passthrough. + Source608ChannelNumber *float64 `json:"source608ChannelNumber,omitempty" tf:"source_608_channel_number,omitempty"` +} + +type Scte20SourceSettingsObservation struct { + + // If upconvert, 608 data is both passed through via the “608 compatibility bytes” fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded. + Convert608To708 *string `json:"convert608To708,omitempty" tf:"convert_608_to_708,omitempty"` + + // Specifies the 608/708 channel number within the video track from which to extract captions. Unused for passthrough. + Source608ChannelNumber *float64 `json:"source608ChannelNumber,omitempty" tf:"source_608_channel_number,omitempty"` +} + +type Scte20SourceSettingsParameters struct { + + // If upconvert, 608 data is both passed through via the “608 compatibility bytes” fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded. + // +kubebuilder:validation:Optional + Convert608To708 *string `json:"convert608To708,omitempty" tf:"convert_608_to_708,omitempty"` + + // Specifies the 608/708 channel number within the video track from which to extract captions. Unused for passthrough. + // +kubebuilder:validation:Optional + Source608ChannelNumber *float64 `json:"source608ChannelNumber,omitempty" tf:"source_608_channel_number,omitempty"` +} + +type Scte27DestinationSettingsInitParameters struct { +} + +type Scte27DestinationSettingsObservation struct { +} + +type Scte27DestinationSettingsParameters struct { +} + +type Scte27SourceSettingsInitParameters struct { + + // If you will configure a WebVTT caption description that references this caption selector, use this field to provide the language to consider when translating the image-based source to text. + OcrLanguage *string `json:"ocrLanguage,omitempty" tf:"ocr_language,omitempty"` + + // Selects a specific PID from within a source. + Pid *float64 `json:"pid,omitempty" tf:"pid,omitempty"` +} + +type Scte27SourceSettingsObservation struct { + + // If you will configure a WebVTT caption description that references this caption selector, use this field to provide the language to consider when translating the image-based source to text. + OcrLanguage *string `json:"ocrLanguage,omitempty" tf:"ocr_language,omitempty"` + + // Selects a specific PID from within a source. + Pid *float64 `json:"pid,omitempty" tf:"pid,omitempty"` +} + +type Scte27SourceSettingsParameters struct { + + // If you will configure a WebVTT caption description that references this caption selector, use this field to provide the language to consider when translating the image-based source to text. + // +kubebuilder:validation:Optional + OcrLanguage *string `json:"ocrLanguage,omitempty" tf:"ocr_language,omitempty"` + + // Selects a specific PID from within a source. + // +kubebuilder:validation:Optional + Pid *float64 `json:"pid,omitempty" tf:"pid,omitempty"` +} + +type SelectorSettingsInitParameters struct { + + // Audio HLS Rendition Selection. See Audio HLS Rendition Selection for more details. + AudioHlsRenditionSelection *AudioHlsRenditionSelectionInitParameters `json:"audioHlsRenditionSelection,omitempty" tf:"audio_hls_rendition_selection,omitempty"` + + // Audio Language Selection. See Audio Language Selection for more details. + AudioLanguageSelection *AudioLanguageSelectionInitParameters `json:"audioLanguageSelection,omitempty" tf:"audio_language_selection,omitempty"` + + // Audio Pid Selection. See Audio PID Selection for more details. + AudioPidSelection *AudioPidSelectionInitParameters `json:"audioPidSelection,omitempty" tf:"audio_pid_selection,omitempty"` + + // Audio Track Selection. See Audio Track Selection for more details. + AudioTrackSelection *AudioTrackSelectionInitParameters `json:"audioTrackSelection,omitempty" tf:"audio_track_selection,omitempty"` +} + +type SelectorSettingsObservation struct { + + // Audio HLS Rendition Selection. See Audio HLS Rendition Selection for more details. + AudioHlsRenditionSelection *AudioHlsRenditionSelectionObservation `json:"audioHlsRenditionSelection,omitempty" tf:"audio_hls_rendition_selection,omitempty"` + + // Audio Language Selection. See Audio Language Selection for more details. + AudioLanguageSelection *AudioLanguageSelectionObservation `json:"audioLanguageSelection,omitempty" tf:"audio_language_selection,omitempty"` + + // Audio Pid Selection. See Audio PID Selection for more details. + AudioPidSelection *AudioPidSelectionObservation `json:"audioPidSelection,omitempty" tf:"audio_pid_selection,omitempty"` + + // Audio Track Selection. See Audio Track Selection for more details. + AudioTrackSelection *AudioTrackSelectionObservation `json:"audioTrackSelection,omitempty" tf:"audio_track_selection,omitempty"` +} + +type SelectorSettingsParameters struct { + + // Audio HLS Rendition Selection. See Audio HLS Rendition Selection for more details. + // +kubebuilder:validation:Optional + AudioHlsRenditionSelection *AudioHlsRenditionSelectionParameters `json:"audioHlsRenditionSelection,omitempty" tf:"audio_hls_rendition_selection,omitempty"` + + // Audio Language Selection. See Audio Language Selection for more details. + // +kubebuilder:validation:Optional + AudioLanguageSelection *AudioLanguageSelectionParameters `json:"audioLanguageSelection,omitempty" tf:"audio_language_selection,omitempty"` + + // Audio Pid Selection. See Audio PID Selection for more details. + // +kubebuilder:validation:Optional + AudioPidSelection *AudioPidSelectionParameters `json:"audioPidSelection,omitempty" tf:"audio_pid_selection,omitempty"` + + // Audio Track Selection. See Audio Track Selection for more details. + // +kubebuilder:validation:Optional + AudioTrackSelection *AudioTrackSelectionParameters `json:"audioTrackSelection,omitempty" tf:"audio_track_selection,omitempty"` +} + +type SettingsInitParameters struct { + + // Key used to extract the password from EC2 Parameter store. + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // Stream name RTMP destinations (URLs of type rtmp://) + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` + + // A URL specifying a destination. + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // Username for destination. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SettingsObservation struct { + + // Key used to extract the password from EC2 Parameter store. + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // Stream name RTMP destinations (URLs of type rtmp://) + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` + + // A URL specifying a destination. + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // Username for destination. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SettingsParameters struct { + + // Key used to extract the password from EC2 Parameter store. + // +kubebuilder:validation:Optional + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // Stream name RTMP destinations (URLs of type rtmp://) + // +kubebuilder:validation:Optional + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` + + // A URL specifying a destination. + // +kubebuilder:validation:Optional + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // Username for destination. + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SmpteTtDestinationSettingsInitParameters struct { +} + +type SmpteTtDestinationSettingsObservation struct { +} + +type SmpteTtDestinationSettingsParameters struct { +} + +type StandardHlsSettingsInitParameters struct { + AudioRenditionSets *string `json:"audioRenditionSets,omitempty" tf:"audio_rendition_sets,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + M3U8Settings *M3U8SettingsInitParameters `json:"m3u8Settings,omitempty" tf:"m3u8_settings,omitempty"` +} + +type StandardHlsSettingsObservation struct { + AudioRenditionSets *string `json:"audioRenditionSets,omitempty" tf:"audio_rendition_sets,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + M3U8Settings *M3U8SettingsObservation `json:"m3u8Settings,omitempty" tf:"m3u8_settings,omitempty"` +} + +type StandardHlsSettingsParameters struct { + + // +kubebuilder:validation:Optional + AudioRenditionSets *string `json:"audioRenditionSets,omitempty" tf:"audio_rendition_sets,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + M3U8Settings *M3U8SettingsParameters `json:"m3u8Settings" tf:"m3u8_settings,omitempty"` +} + +type StaticKeySettingsInitParameters struct { + KeyProviderServer *KeyProviderServerInitParameters `json:"keyProviderServer,omitempty" tf:"key_provider_server,omitempty"` + + StaticKeyValue *string `json:"staticKeyValue,omitempty" tf:"static_key_value,omitempty"` +} + +type StaticKeySettingsObservation struct { + KeyProviderServer *KeyProviderServerObservation `json:"keyProviderServer,omitempty" tf:"key_provider_server,omitempty"` + + StaticKeyValue *string `json:"staticKeyValue,omitempty" tf:"static_key_value,omitempty"` +} + +type StaticKeySettingsParameters struct { + + // +kubebuilder:validation:Optional + KeyProviderServer *KeyProviderServerParameters `json:"keyProviderServer,omitempty" tf:"key_provider_server,omitempty"` + + // +kubebuilder:validation:Optional + StaticKeyValue *string `json:"staticKeyValue" tf:"static_key_value,omitempty"` +} + +type TeletextDestinationSettingsInitParameters struct { +} + +type TeletextDestinationSettingsObservation struct { +} + +type TeletextDestinationSettingsParameters struct { +} + +type TeletextSourceSettingsInitParameters struct { + + // Optionally defines a region where TTML style captions will be displayed. See Caption Rectangle for more details. + OutputRectangle *OutputRectangleInitParameters `json:"outputRectangle,omitempty" tf:"output_rectangle,omitempty"` + + // Specifies the teletext page number within the data stream from which to extract captions. Range of 0x100 (256) to 0x8FF (2303). Unused for passthrough. Should be specified as a hexadecimal string with no “0x” prefix. + PageNumber *string `json:"pageNumber,omitempty" tf:"page_number,omitempty"` +} + +type TeletextSourceSettingsObservation struct { + + // Optionally defines a region where TTML style captions will be displayed. See Caption Rectangle for more details. + OutputRectangle *OutputRectangleObservation `json:"outputRectangle,omitempty" tf:"output_rectangle,omitempty"` + + // Specifies the teletext page number within the data stream from which to extract captions. Range of 0x100 (256) to 0x8FF (2303). Unused for passthrough. Should be specified as a hexadecimal string with no “0x” prefix. + PageNumber *string `json:"pageNumber,omitempty" tf:"page_number,omitempty"` +} + +type TeletextSourceSettingsParameters struct { + + // Optionally defines a region where TTML style captions will be displayed. See Caption Rectangle for more details. + // +kubebuilder:validation:Optional + OutputRectangle *OutputRectangleParameters `json:"outputRectangle,omitempty" tf:"output_rectangle,omitempty"` + + // Specifies the teletext page number within the data stream from which to extract captions. Range of 0x100 (256) to 0x8FF (2303). Unused for passthrough. Should be specified as a hexadecimal string with no “0x” prefix. + // +kubebuilder:validation:Optional + PageNumber *string `json:"pageNumber,omitempty" tf:"page_number,omitempty"` +} + +type TemporalFilterSettingsInitParameters struct { + + // Post filter sharpening. + PostFilterSharpening *string `json:"postFilterSharpening,omitempty" tf:"post_filter_sharpening,omitempty"` + + // Filter strength. + Strength *string `json:"strength,omitempty" tf:"strength,omitempty"` +} + +type TemporalFilterSettingsObservation struct { + + // Post filter sharpening. + PostFilterSharpening *string `json:"postFilterSharpening,omitempty" tf:"post_filter_sharpening,omitempty"` + + // Filter strength. + Strength *string `json:"strength,omitempty" tf:"strength,omitempty"` +} + +type TemporalFilterSettingsParameters struct { + + // Post filter sharpening. + // +kubebuilder:validation:Optional + PostFilterSharpening *string `json:"postFilterSharpening,omitempty" tf:"post_filter_sharpening,omitempty"` + + // Filter strength. + // +kubebuilder:validation:Optional + Strength *string `json:"strength,omitempty" tf:"strength,omitempty"` +} + +type TimecodeBurninSettingsInitParameters struct { + + // Set a prefix on the burned in timecode. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Sets the size of the burned in timecode. + TimecodeBurninFontSize *string `json:"timecodeBurninFontSize,omitempty" tf:"timecode_burnin_font_size,omitempty"` + + // Sets the position of the burned in timecode. + TimecodeBurninPosition *string `json:"timecodeBurninPosition,omitempty" tf:"timecode_burnin_position,omitempty"` +} + +type TimecodeBurninSettingsObservation struct { + + // Set a prefix on the burned in timecode. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Sets the size of the burned in timecode. + TimecodeBurninFontSize *string `json:"timecodeBurninFontSize,omitempty" tf:"timecode_burnin_font_size,omitempty"` + + // Sets the position of the burned in timecode. + TimecodeBurninPosition *string `json:"timecodeBurninPosition,omitempty" tf:"timecode_burnin_position,omitempty"` +} + +type TimecodeBurninSettingsParameters struct { + + // Set a prefix on the burned in timecode. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Sets the size of the burned in timecode. + // +kubebuilder:validation:Optional + TimecodeBurninFontSize *string `json:"timecodeBurninFontSize,omitempty" tf:"timecode_burnin_font_size,omitempty"` + + // Sets the position of the burned in timecode. + // +kubebuilder:validation:Optional + TimecodeBurninPosition *string `json:"timecodeBurninPosition,omitempty" tf:"timecode_burnin_position,omitempty"` +} + +type TimecodeConfigInitParameters struct { + + // The source for the timecode that will be associated with the events outputs. + Source *string `json:"source,omitempty" tf:"source,omitempty"` + + // Threshold in frames beyond which output timecode is resynchronized to the input timecode. + SyncThreshold *float64 `json:"syncThreshold,omitempty" tf:"sync_threshold,omitempty"` +} + +type TimecodeConfigObservation struct { + + // The source for the timecode that will be associated with the events outputs. + Source *string `json:"source,omitempty" tf:"source,omitempty"` + + // Threshold in frames beyond which output timecode is resynchronized to the input timecode. + SyncThreshold *float64 `json:"syncThreshold,omitempty" tf:"sync_threshold,omitempty"` +} + +type TimecodeConfigParameters struct { + + // The source for the timecode that will be associated with the events outputs. + // +kubebuilder:validation:Optional + Source *string `json:"source" tf:"source,omitempty"` + + // Threshold in frames beyond which output timecode is resynchronized to the input timecode. + // +kubebuilder:validation:Optional + SyncThreshold *float64 `json:"syncThreshold,omitempty" tf:"sync_threshold,omitempty"` +} + +type TracksInitParameters struct { + + // 1-based integer value that maps to a specific audio track. + Track *float64 `json:"track,omitempty" tf:"track,omitempty"` +} + +type TracksObservation struct { + + // 1-based integer value that maps to a specific audio track. + Track *float64 `json:"track,omitempty" tf:"track,omitempty"` +} + +type TracksParameters struct { + + // 1-based integer value that maps to a specific audio track. + // +kubebuilder:validation:Optional + Track *float64 `json:"track" tf:"track,omitempty"` +} + +type TtmlDestinationSettingsInitParameters struct { + + // TT captions. - include: Take the style information (font color, font position, and so on) from the source captions and include that information in the font data attached to the EBU-TT captions. This option is valid only if the source captions are Embedded or Teletext. - exclude: In the font data attached to the EBU-TT captions, set the font family to “monospaced”. Do not include any other style information. + StyleControl *string `json:"styleControl,omitempty" tf:"style_control,omitempty"` +} + +type TtmlDestinationSettingsObservation struct { + + // TT captions. - include: Take the style information (font color, font position, and so on) from the source captions and include that information in the font data attached to the EBU-TT captions. This option is valid only if the source captions are Embedded or Teletext. - exclude: In the font data attached to the EBU-TT captions, set the font family to “monospaced”. Do not include any other style information. + StyleControl *string `json:"styleControl,omitempty" tf:"style_control,omitempty"` +} + +type TtmlDestinationSettingsParameters struct { + + // TT captions. - include: Take the style information (font color, font position, and so on) from the source captions and include that information in the font data attached to the EBU-TT captions. This option is valid only if the source captions are Embedded or Teletext. - exclude: In the font data attached to the EBU-TT captions, set the font family to “monospaced”. Do not include any other style information. + // +kubebuilder:validation:Optional + StyleControl *string `json:"styleControl" tf:"style_control,omitempty"` +} + +type UDPGroupSettingsInitParameters struct { + + // Controls the behavior of the RTMP group if input becomes unavailable. + InputLossAction *string `json:"inputLossAction,omitempty" tf:"input_loss_action,omitempty"` + + // Indicates ID3 frame that has the timecode. + TimedMetadataId3Frame *string `json:"timedMetadataId3Frame,omitempty" tf:"timed_metadata_id3_frame,omitempty"` + + TimedMetadataId3Period *float64 `json:"timedMetadataId3Period,omitempty" tf:"timed_metadata_id3_period,omitempty"` +} + +type UDPGroupSettingsObservation struct { + + // Controls the behavior of the RTMP group if input becomes unavailable. + InputLossAction *string `json:"inputLossAction,omitempty" tf:"input_loss_action,omitempty"` + + // Indicates ID3 frame that has the timecode. + TimedMetadataId3Frame *string `json:"timedMetadataId3Frame,omitempty" tf:"timed_metadata_id3_frame,omitempty"` + + TimedMetadataId3Period *float64 `json:"timedMetadataId3Period,omitempty" tf:"timed_metadata_id3_period,omitempty"` +} + +type UDPGroupSettingsParameters struct { + + // Controls the behavior of the RTMP group if input becomes unavailable. + // +kubebuilder:validation:Optional + InputLossAction *string `json:"inputLossAction,omitempty" tf:"input_loss_action,omitempty"` + + // Indicates ID3 frame that has the timecode. + // +kubebuilder:validation:Optional + TimedMetadataId3Frame *string `json:"timedMetadataId3Frame,omitempty" tf:"timed_metadata_id3_frame,omitempty"` + + // +kubebuilder:validation:Optional + TimedMetadataId3Period *float64 `json:"timedMetadataId3Period,omitempty" tf:"timed_metadata_id3_period,omitempty"` +} + +type UDPOutputSettingsContainerSettingsInitParameters struct { + + // M2TS Settings. See M2TS Settings for more details. + M2TsSettings *ContainerSettingsM2TsSettingsInitParameters `json:"m2tsSettings,omitempty" tf:"m2ts_settings,omitempty"` +} + +type UDPOutputSettingsContainerSettingsObservation struct { + + // M2TS Settings. See M2TS Settings for more details. + M2TsSettings *ContainerSettingsM2TsSettingsObservation `json:"m2tsSettings,omitempty" tf:"m2ts_settings,omitempty"` +} + +type UDPOutputSettingsContainerSettingsParameters struct { + + // M2TS Settings. See M2TS Settings for more details. + // +kubebuilder:validation:Optional + M2TsSettings *ContainerSettingsM2TsSettingsParameters `json:"m2tsSettings,omitempty" tf:"m2ts_settings,omitempty"` +} + +type UDPOutputSettingsDestinationInitParameters struct { + + // Reference ID for the destination. + DestinationRefID *string `json:"destinationRefId,omitempty" tf:"destination_ref_id,omitempty"` +} + +type UDPOutputSettingsDestinationObservation struct { + + // Reference ID for the destination. + DestinationRefID *string `json:"destinationRefId,omitempty" tf:"destination_ref_id,omitempty"` +} + +type UDPOutputSettingsDestinationParameters struct { + + // Reference ID for the destination. + // +kubebuilder:validation:Optional + DestinationRefID *string `json:"destinationRefId" tf:"destination_ref_id,omitempty"` +} + +type UDPOutputSettingsInitParameters struct { + + // UDP output buffering in milliseconds. + BufferMsec *float64 `json:"bufferMsec,omitempty" tf:"buffer_msec,omitempty"` + + // Settings specific to the container type of the file. See Container Settings for more details. + ContainerSettings *UDPOutputSettingsContainerSettingsInitParameters `json:"containerSettings,omitempty" tf:"container_settings,omitempty"` + + // A director and base filename where archive files should be written. See Destination for more details. + Destination *UDPOutputSettingsDestinationInitParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + // Settings for output. See Output Settings for more details. + FecOutputSettings *FecOutputSettingsInitParameters `json:"fecOutputSettings,omitempty" tf:"fec_output_settings,omitempty"` +} + +type UDPOutputSettingsObservation struct { + + // UDP output buffering in milliseconds. + BufferMsec *float64 `json:"bufferMsec,omitempty" tf:"buffer_msec,omitempty"` + + // Settings specific to the container type of the file. See Container Settings for more details. + ContainerSettings *UDPOutputSettingsContainerSettingsObservation `json:"containerSettings,omitempty" tf:"container_settings,omitempty"` + + // A director and base filename where archive files should be written. See Destination for more details. + Destination *UDPOutputSettingsDestinationObservation `json:"destination,omitempty" tf:"destination,omitempty"` + + // Settings for output. See Output Settings for more details. + FecOutputSettings *FecOutputSettingsObservation `json:"fecOutputSettings,omitempty" tf:"fec_output_settings,omitempty"` +} + +type UDPOutputSettingsParameters struct { + + // UDP output buffering in milliseconds. + // +kubebuilder:validation:Optional + BufferMsec *float64 `json:"bufferMsec,omitempty" tf:"buffer_msec,omitempty"` + + // Settings specific to the container type of the file. See Container Settings for more details. + // +kubebuilder:validation:Optional + ContainerSettings *UDPOutputSettingsContainerSettingsParameters `json:"containerSettings" tf:"container_settings,omitempty"` + + // A director and base filename where archive files should be written. See Destination for more details. + // +kubebuilder:validation:Optional + Destination *UDPOutputSettingsDestinationParameters `json:"destination" tf:"destination,omitempty"` + + // Settings for output. See Output Settings for more details. + // +kubebuilder:validation:Optional + FecOutputSettings *FecOutputSettingsParameters `json:"fecOutputSettings,omitempty" tf:"fec_output_settings,omitempty"` +} + +type VPCInitParameters struct { + + // List of public address allocation ids to associate with ENIs that will be created in Output VPC. Must specify one for SINGLE_PIPELINE, two for STANDARD channels. + PublicAddressAllocationIds []*string `json:"publicAddressAllocationIds,omitempty" tf:"public_address_allocation_ids,omitempty"` + + // A list of up to 5 EC2 VPC security group IDs to attach to the Output VPC network interfaces. If none are specified then the VPC default security group will be used. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of VPC subnet IDs from the same VPC. If STANDARD channel, subnet IDs must be mapped to two unique availability zones (AZ). + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type VPCObservation struct { + + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // +listType=set + NetworkInterfaceIds []*string `json:"networkInterfaceIds,omitempty" tf:"network_interface_ids,omitempty"` + + // List of public address allocation ids to associate with ENIs that will be created in Output VPC. Must specify one for SINGLE_PIPELINE, two for STANDARD channels. + PublicAddressAllocationIds []*string `json:"publicAddressAllocationIds,omitempty" tf:"public_address_allocation_ids,omitempty"` + + // A list of up to 5 EC2 VPC security group IDs to attach to the Output VPC network interfaces. If none are specified then the VPC default security group will be used. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of VPC subnet IDs from the same VPC. If STANDARD channel, subnet IDs must be mapped to two unique availability zones (AZ). + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type VPCParameters struct { + + // List of public address allocation ids to associate with ENIs that will be created in Output VPC. Must specify one for SINGLE_PIPELINE, two for STANDARD channels. + // +kubebuilder:validation:Optional + PublicAddressAllocationIds []*string `json:"publicAddressAllocationIds" tf:"public_address_allocation_ids,omitempty"` + + // A list of up to 5 EC2 VPC security group IDs to attach to the Output VPC network interfaces. If none are specified then the VPC default security group will be used. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of VPC subnet IDs from the same VPC. If STANDARD channel, subnet IDs must be mapped to two unique availability zones (AZ). + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds" tf:"subnet_ids,omitempty"` +} + +type VideoBlackSettingsInitParameters struct { + + // A value used in calculating the threshold below which MediaLive considers a pixel to be 'black'. For the input to be considered black, every pixel in a frame must be below this threshold. The threshold is calculated as a percentage (expressed as a decimal) of white. Therefore .1 means 10% white (or 90% black). Note how the formula works for any color depth. For example, if you set this field to 0.1 in 10-bit color depth: (10230.1=102.3), which means a pixel value of 102 or less is 'black'. If you set this field to .1 in an 8-bit color depth: (2550.1=25.5), which means a pixel value of 25 or less is 'black'. The range is 0.0 to 1.0, with any number of decimal places. + BlackDetectThreshold *float64 `json:"blackDetectThreshold,omitempty" tf:"black_detect_threshold,omitempty"` + + // The amount of time (in milliseconds) that the active input must be black before automatic input failover occurs. + VideoBlackThresholdMsec *float64 `json:"videoBlackThresholdMsec,omitempty" tf:"video_black_threshold_msec,omitempty"` +} + +type VideoBlackSettingsObservation struct { + + // A value used in calculating the threshold below which MediaLive considers a pixel to be 'black'. For the input to be considered black, every pixel in a frame must be below this threshold. The threshold is calculated as a percentage (expressed as a decimal) of white. Therefore .1 means 10% white (or 90% black). Note how the formula works for any color depth. For example, if you set this field to 0.1 in 10-bit color depth: (10230.1=102.3), which means a pixel value of 102 or less is 'black'. If you set this field to .1 in an 8-bit color depth: (2550.1=25.5), which means a pixel value of 25 or less is 'black'. The range is 0.0 to 1.0, with any number of decimal places. + BlackDetectThreshold *float64 `json:"blackDetectThreshold,omitempty" tf:"black_detect_threshold,omitempty"` + + // The amount of time (in milliseconds) that the active input must be black before automatic input failover occurs. + VideoBlackThresholdMsec *float64 `json:"videoBlackThresholdMsec,omitempty" tf:"video_black_threshold_msec,omitempty"` +} + +type VideoBlackSettingsParameters struct { + + // A value used in calculating the threshold below which MediaLive considers a pixel to be 'black'. For the input to be considered black, every pixel in a frame must be below this threshold. The threshold is calculated as a percentage (expressed as a decimal) of white. Therefore .1 means 10% white (or 90% black). Note how the formula works for any color depth. For example, if you set this field to 0.1 in 10-bit color depth: (10230.1=102.3), which means a pixel value of 102 or less is 'black'. If you set this field to .1 in an 8-bit color depth: (2550.1=25.5), which means a pixel value of 25 or less is 'black'. The range is 0.0 to 1.0, with any number of decimal places. + // +kubebuilder:validation:Optional + BlackDetectThreshold *float64 `json:"blackDetectThreshold,omitempty" tf:"black_detect_threshold,omitempty"` + + // The amount of time (in milliseconds) that the active input must be black before automatic input failover occurs. + // +kubebuilder:validation:Optional + VideoBlackThresholdMsec *float64 `json:"videoBlackThresholdMsec,omitempty" tf:"video_black_threshold_msec,omitempty"` +} + +type VideoDescriptionsCodecSettingsInitParameters struct { + + // Frame capture settings. See Frame Capture Settings for more details. + FrameCaptureSettings *FrameCaptureSettingsInitParameters `json:"frameCaptureSettings,omitempty" tf:"frame_capture_settings,omitempty"` + + // H264 settings. See H264 Settings for more details. + H264Settings *H264SettingsInitParameters `json:"h264Settings,omitempty" tf:"h264_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + H265Settings *H265SettingsInitParameters `json:"h265Settings,omitempty" tf:"h265_settings,omitempty"` +} + +type VideoDescriptionsCodecSettingsObservation struct { + + // Frame capture settings. See Frame Capture Settings for more details. + FrameCaptureSettings *FrameCaptureSettingsObservation `json:"frameCaptureSettings,omitempty" tf:"frame_capture_settings,omitempty"` + + // H264 settings. See H264 Settings for more details. + H264Settings *H264SettingsObservation `json:"h264Settings,omitempty" tf:"h264_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + H265Settings *H265SettingsObservation `json:"h265Settings,omitempty" tf:"h265_settings,omitempty"` +} + +type VideoDescriptionsCodecSettingsParameters struct { + + // Frame capture settings. See Frame Capture Settings for more details. + // +kubebuilder:validation:Optional + FrameCaptureSettings *FrameCaptureSettingsParameters `json:"frameCaptureSettings,omitempty" tf:"frame_capture_settings,omitempty"` + + // H264 settings. See H264 Settings for more details. + // +kubebuilder:validation:Optional + H264Settings *H264SettingsParameters `json:"h264Settings,omitempty" tf:"h264_settings,omitempty"` + + // Destination settings for a standard output; one destination for each redundant encoder. See Settings for more details. + // +kubebuilder:validation:Optional + H265Settings *H265SettingsParameters `json:"h265Settings,omitempty" tf:"h265_settings,omitempty"` +} + +type VideoDescriptionsInitParameters struct { + + // Audio codec settings. See Audio Codec Settings for more details. + CodecSettings *VideoDescriptionsCodecSettingsInitParameters `json:"codecSettings,omitempty" tf:"codec_settings,omitempty"` + + // See the description in left_offset. For height, specify the entire height of the rectangle as a percentage of the underlying frame height. For example, "80" means the rectangle height is 80% of the underlying frame height. The top_offset and rectangle_height must add up to 100% or less. This field corresponds to tts:extent - Y in the TTML standard. + Height *float64 `json:"height,omitempty" tf:"height,omitempty"` + + // Name of the Channel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Indicate how to respond to the AFD values that might be in the input video. + RespondToAfd *string `json:"respondToAfd,omitempty" tf:"respond_to_afd,omitempty"` + + // Behavior on how to scale. + ScalingBehavior *string `json:"scalingBehavior,omitempty" tf:"scaling_behavior,omitempty"` + + // Changes the strength of the anti-alias filter used for scaling. + Sharpness *float64 `json:"sharpness,omitempty" tf:"sharpness,omitempty"` + + // See the description in left_offset. For width, specify the entire width of the rectangle as a percentage of the underlying frame width. For example, "80" means the rectangle width is 80% of the underlying frame width. The left_offset and rectangle_width must add up to 100% or less. This field corresponds to tts:extent - X in the TTML standard. + Width *float64 `json:"width,omitempty" tf:"width,omitempty"` +} + +type VideoDescriptionsObservation struct { + + // Audio codec settings. See Audio Codec Settings for more details. + CodecSettings *VideoDescriptionsCodecSettingsObservation `json:"codecSettings,omitempty" tf:"codec_settings,omitempty"` + + // See the description in left_offset. For height, specify the entire height of the rectangle as a percentage of the underlying frame height. For example, "80" means the rectangle height is 80% of the underlying frame height. The top_offset and rectangle_height must add up to 100% or less. This field corresponds to tts:extent - Y in the TTML standard. + Height *float64 `json:"height,omitempty" tf:"height,omitempty"` + + // Name of the Channel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Indicate how to respond to the AFD values that might be in the input video. + RespondToAfd *string `json:"respondToAfd,omitempty" tf:"respond_to_afd,omitempty"` + + // Behavior on how to scale. + ScalingBehavior *string `json:"scalingBehavior,omitempty" tf:"scaling_behavior,omitempty"` + + // Changes the strength of the anti-alias filter used for scaling. + Sharpness *float64 `json:"sharpness,omitempty" tf:"sharpness,omitempty"` + + // See the description in left_offset. For width, specify the entire width of the rectangle as a percentage of the underlying frame width. For example, "80" means the rectangle width is 80% of the underlying frame width. The left_offset and rectangle_width must add up to 100% or less. This field corresponds to tts:extent - X in the TTML standard. + Width *float64 `json:"width,omitempty" tf:"width,omitempty"` +} + +type VideoDescriptionsParameters struct { + + // Audio codec settings. See Audio Codec Settings for more details. + // +kubebuilder:validation:Optional + CodecSettings *VideoDescriptionsCodecSettingsParameters `json:"codecSettings,omitempty" tf:"codec_settings,omitempty"` + + // See the description in left_offset. For height, specify the entire height of the rectangle as a percentage of the underlying frame height. For example, "80" means the rectangle height is 80% of the underlying frame height. The top_offset and rectangle_height must add up to 100% or less. This field corresponds to tts:extent - Y in the TTML standard. + // +kubebuilder:validation:Optional + Height *float64 `json:"height,omitempty" tf:"height,omitempty"` + + // Name of the Channel. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Indicate how to respond to the AFD values that might be in the input video. + // +kubebuilder:validation:Optional + RespondToAfd *string `json:"respondToAfd,omitempty" tf:"respond_to_afd,omitempty"` + + // Behavior on how to scale. + // +kubebuilder:validation:Optional + ScalingBehavior *string `json:"scalingBehavior,omitempty" tf:"scaling_behavior,omitempty"` + + // Changes the strength of the anti-alias filter used for scaling. + // +kubebuilder:validation:Optional + Sharpness *float64 `json:"sharpness,omitempty" tf:"sharpness,omitempty"` + + // See the description in left_offset. For width, specify the entire width of the rectangle as a percentage of the underlying frame width. For example, "80" means the rectangle width is 80% of the underlying frame width. The left_offset and rectangle_width must add up to 100% or less. This field corresponds to tts:extent - X in the TTML standard. + // +kubebuilder:validation:Optional + Width *float64 `json:"width,omitempty" tf:"width,omitempty"` +} + +type VideoSelectorInitParameters struct { + ColorSpace *string `json:"colorSpace,omitempty" tf:"color_space,omitempty"` + + ColorSpaceUsage *string `json:"colorSpaceUsage,omitempty" tf:"color_space_usage,omitempty"` +} + +type VideoSelectorObservation struct { + ColorSpace *string `json:"colorSpace,omitempty" tf:"color_space,omitempty"` + + ColorSpaceUsage *string `json:"colorSpaceUsage,omitempty" tf:"color_space_usage,omitempty"` +} + +type VideoSelectorParameters struct { + + // +kubebuilder:validation:Optional + ColorSpace *string `json:"colorSpace,omitempty" tf:"color_space,omitempty"` + + // +kubebuilder:validation:Optional + ColorSpaceUsage *string `json:"colorSpaceUsage,omitempty" tf:"color_space_usage,omitempty"` +} + +type WavSettingsInitParameters struct { + BitDepth *float64 `json:"bitDepth,omitempty" tf:"bit_depth,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + // Sample rate in Hz. + SampleRate *float64 `json:"sampleRate,omitempty" tf:"sample_rate,omitempty"` +} + +type WavSettingsObservation struct { + BitDepth *float64 `json:"bitDepth,omitempty" tf:"bit_depth,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + // Sample rate in Hz. + SampleRate *float64 `json:"sampleRate,omitempty" tf:"sample_rate,omitempty"` +} + +type WavSettingsParameters struct { + + // +kubebuilder:validation:Optional + BitDepth *float64 `json:"bitDepth,omitempty" tf:"bit_depth,omitempty"` + + // Mono, Stereo, or 5.1 channel layout. + // +kubebuilder:validation:Optional + CodingMode *string `json:"codingMode,omitempty" tf:"coding_mode,omitempty"` + + // Sample rate in Hz. + // +kubebuilder:validation:Optional + SampleRate *float64 `json:"sampleRate,omitempty" tf:"sample_rate,omitempty"` +} + +type WebvttDestinationSettingsInitParameters struct { + + // TT captions. - include: Take the style information (font color, font position, and so on) from the source captions and include that information in the font data attached to the EBU-TT captions. This option is valid only if the source captions are Embedded or Teletext. - exclude: In the font data attached to the EBU-TT captions, set the font family to “monospaced”. Do not include any other style information. + StyleControl *string `json:"styleControl,omitempty" tf:"style_control,omitempty"` +} + +type WebvttDestinationSettingsObservation struct { + + // TT captions. - include: Take the style information (font color, font position, and so on) from the source captions and include that information in the font data attached to the EBU-TT captions. This option is valid only if the source captions are Embedded or Teletext. - exclude: In the font data attached to the EBU-TT captions, set the font family to “monospaced”. Do not include any other style information. + StyleControl *string `json:"styleControl,omitempty" tf:"style_control,omitempty"` +} + +type WebvttDestinationSettingsParameters struct { + + // TT captions. - include: Take the style information (font color, font position, and so on) from the source captions and include that information in the font data attached to the EBU-TT captions. This option is valid only if the source captions are Embedded or Teletext. - exclude: In the font data attached to the EBU-TT captions, set the font family to “monospaced”. Do not include any other style information. + // +kubebuilder:validation:Optional + StyleControl *string `json:"styleControl" tf:"style_control,omitempty"` +} + +// ChannelSpec defines the desired state of Channel +type ChannelSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ChannelParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ChannelInitParameters `json:"initProvider,omitempty"` +} + +// ChannelStatus defines the observed state of Channel. +type ChannelStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ChannelObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Channel is the Schema for the Channels API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Channel struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.channelClass) || (has(self.initProvider) && has(self.initProvider.channelClass))",message="spec.forProvider.channelClass is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.destinations) || (has(self.initProvider) && has(self.initProvider.destinations))",message="spec.forProvider.destinations is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.encoderSettings) || (has(self.initProvider) && has(self.initProvider.encoderSettings))",message="spec.forProvider.encoderSettings is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.inputAttachments) || (has(self.initProvider) && has(self.initProvider.inputAttachments))",message="spec.forProvider.inputAttachments is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.inputSpecification) || (has(self.initProvider) && has(self.initProvider.inputSpecification))",message="spec.forProvider.inputSpecification is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ChannelSpec `json:"spec"` + Status ChannelStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ChannelList contains a list of Channels +type ChannelList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Channel `json:"items"` +} + +// Repository type metadata. +var ( + Channel_Kind = "Channel" + Channel_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Channel_Kind}.String() + Channel_KindAPIVersion = Channel_Kind + "." + CRDGroupVersion.String() + Channel_GroupVersionKind = CRDGroupVersion.WithKind(Channel_Kind) +) + +func init() { + SchemeBuilder.Register(&Channel{}, &ChannelList{}) +} diff --git a/apis/medialive/v1beta2/zz_generated.conversion_hubs.go b/apis/medialive/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..6c950dd859 --- /dev/null +++ b/apis/medialive/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Channel) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Input) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Multiplex) Hub() {} diff --git a/apis/medialive/v1beta2/zz_generated.deepcopy.go b/apis/medialive/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..4dc88baedd --- /dev/null +++ b/apis/medialive/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,19594 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AacSettingsInitParameters) DeepCopyInto(out *AacSettingsInitParameters) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.InputType != nil { + in, out := &in.InputType, &out.InputType + *out = new(string) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.RateControlMode != nil { + in, out := &in.RateControlMode, &out.RateControlMode + *out = new(string) + **out = **in + } + if in.RawFormat != nil { + in, out := &in.RawFormat, &out.RawFormat + *out = new(string) + **out = **in + } + if in.SampleRate != nil { + in, out := &in.SampleRate, &out.SampleRate + *out = new(float64) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(string) + **out = **in + } + if in.VbrQuality != nil { + in, out := &in.VbrQuality, &out.VbrQuality + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AacSettingsInitParameters. +func (in *AacSettingsInitParameters) DeepCopy() *AacSettingsInitParameters { + if in == nil { + return nil + } + out := new(AacSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AacSettingsObservation) DeepCopyInto(out *AacSettingsObservation) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.InputType != nil { + in, out := &in.InputType, &out.InputType + *out = new(string) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.RateControlMode != nil { + in, out := &in.RateControlMode, &out.RateControlMode + *out = new(string) + **out = **in + } + if in.RawFormat != nil { + in, out := &in.RawFormat, &out.RawFormat + *out = new(string) + **out = **in + } + if in.SampleRate != nil { + in, out := &in.SampleRate, &out.SampleRate + *out = new(float64) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(string) + **out = **in + } + if in.VbrQuality != nil { + in, out := &in.VbrQuality, &out.VbrQuality + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AacSettingsObservation. +func (in *AacSettingsObservation) DeepCopy() *AacSettingsObservation { + if in == nil { + return nil + } + out := new(AacSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AacSettingsParameters) DeepCopyInto(out *AacSettingsParameters) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.InputType != nil { + in, out := &in.InputType, &out.InputType + *out = new(string) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.RateControlMode != nil { + in, out := &in.RateControlMode, &out.RateControlMode + *out = new(string) + **out = **in + } + if in.RawFormat != nil { + in, out := &in.RawFormat, &out.RawFormat + *out = new(string) + **out = **in + } + if in.SampleRate != nil { + in, out := &in.SampleRate, &out.SampleRate + *out = new(float64) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(string) + **out = **in + } + if in.VbrQuality != nil { + in, out := &in.VbrQuality, &out.VbrQuality + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AacSettingsParameters. +func (in *AacSettingsParameters) DeepCopy() *AacSettingsParameters { + if in == nil { + return nil + } + out := new(AacSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ac3SettingsInitParameters) DeepCopyInto(out *Ac3SettingsInitParameters) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BitstreamMode != nil { + in, out := &in.BitstreamMode, &out.BitstreamMode + *out = new(string) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.Dialnorm != nil { + in, out := &in.Dialnorm, &out.Dialnorm + *out = new(float64) + **out = **in + } + if in.DrcProfile != nil { + in, out := &in.DrcProfile, &out.DrcProfile + *out = new(string) + **out = **in + } + if in.LfeFilter != nil { + in, out := &in.LfeFilter, &out.LfeFilter + *out = new(string) + **out = **in + } + if in.MetadataControl != nil { + in, out := &in.MetadataControl, &out.MetadataControl + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ac3SettingsInitParameters. +func (in *Ac3SettingsInitParameters) DeepCopy() *Ac3SettingsInitParameters { + if in == nil { + return nil + } + out := new(Ac3SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ac3SettingsObservation) DeepCopyInto(out *Ac3SettingsObservation) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BitstreamMode != nil { + in, out := &in.BitstreamMode, &out.BitstreamMode + *out = new(string) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.Dialnorm != nil { + in, out := &in.Dialnorm, &out.Dialnorm + *out = new(float64) + **out = **in + } + if in.DrcProfile != nil { + in, out := &in.DrcProfile, &out.DrcProfile + *out = new(string) + **out = **in + } + if in.LfeFilter != nil { + in, out := &in.LfeFilter, &out.LfeFilter + *out = new(string) + **out = **in + } + if in.MetadataControl != nil { + in, out := &in.MetadataControl, &out.MetadataControl + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ac3SettingsObservation. +func (in *Ac3SettingsObservation) DeepCopy() *Ac3SettingsObservation { + if in == nil { + return nil + } + out := new(Ac3SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ac3SettingsParameters) DeepCopyInto(out *Ac3SettingsParameters) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BitstreamMode != nil { + in, out := &in.BitstreamMode, &out.BitstreamMode + *out = new(string) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.Dialnorm != nil { + in, out := &in.Dialnorm, &out.Dialnorm + *out = new(float64) + **out = **in + } + if in.DrcProfile != nil { + in, out := &in.DrcProfile, &out.DrcProfile + *out = new(string) + **out = **in + } + if in.LfeFilter != nil { + in, out := &in.LfeFilter, &out.LfeFilter + *out = new(string) + **out = **in + } + if in.MetadataControl != nil { + in, out := &in.MetadataControl, &out.MetadataControl + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ac3SettingsParameters. +func (in *Ac3SettingsParameters) DeepCopy() *Ac3SettingsParameters { + if in == nil { + return nil + } + out := new(Ac3SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AncillarySourceSettingsInitParameters) DeepCopyInto(out *AncillarySourceSettingsInitParameters) { + *out = *in + if in.SourceAncillaryChannelNumber != nil { + in, out := &in.SourceAncillaryChannelNumber, &out.SourceAncillaryChannelNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AncillarySourceSettingsInitParameters. +func (in *AncillarySourceSettingsInitParameters) DeepCopy() *AncillarySourceSettingsInitParameters { + if in == nil { + return nil + } + out := new(AncillarySourceSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AncillarySourceSettingsObservation) DeepCopyInto(out *AncillarySourceSettingsObservation) { + *out = *in + if in.SourceAncillaryChannelNumber != nil { + in, out := &in.SourceAncillaryChannelNumber, &out.SourceAncillaryChannelNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AncillarySourceSettingsObservation. +func (in *AncillarySourceSettingsObservation) DeepCopy() *AncillarySourceSettingsObservation { + if in == nil { + return nil + } + out := new(AncillarySourceSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AncillarySourceSettingsParameters) DeepCopyInto(out *AncillarySourceSettingsParameters) { + *out = *in + if in.SourceAncillaryChannelNumber != nil { + in, out := &in.SourceAncillaryChannelNumber, &out.SourceAncillaryChannelNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AncillarySourceSettingsParameters. +func (in *AncillarySourceSettingsParameters) DeepCopy() *AncillarySourceSettingsParameters { + if in == nil { + return nil + } + out := new(AncillarySourceSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArchiveCdnSettingsInitParameters) DeepCopyInto(out *ArchiveCdnSettingsInitParameters) { + *out = *in + if in.ArchiveS3Settings != nil { + in, out := &in.ArchiveS3Settings, &out.ArchiveS3Settings + *out = new(ArchiveS3SettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchiveCdnSettingsInitParameters. +func (in *ArchiveCdnSettingsInitParameters) DeepCopy() *ArchiveCdnSettingsInitParameters { + if in == nil { + return nil + } + out := new(ArchiveCdnSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArchiveCdnSettingsObservation) DeepCopyInto(out *ArchiveCdnSettingsObservation) { + *out = *in + if in.ArchiveS3Settings != nil { + in, out := &in.ArchiveS3Settings, &out.ArchiveS3Settings + *out = new(ArchiveS3SettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchiveCdnSettingsObservation. +func (in *ArchiveCdnSettingsObservation) DeepCopy() *ArchiveCdnSettingsObservation { + if in == nil { + return nil + } + out := new(ArchiveCdnSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArchiveCdnSettingsParameters) DeepCopyInto(out *ArchiveCdnSettingsParameters) { + *out = *in + if in.ArchiveS3Settings != nil { + in, out := &in.ArchiveS3Settings, &out.ArchiveS3Settings + *out = new(ArchiveS3SettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchiveCdnSettingsParameters. +func (in *ArchiveCdnSettingsParameters) DeepCopy() *ArchiveCdnSettingsParameters { + if in == nil { + return nil + } + out := new(ArchiveCdnSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArchiveGroupSettingsInitParameters) DeepCopyInto(out *ArchiveGroupSettingsInitParameters) { + *out = *in + if in.ArchiveCdnSettings != nil { + in, out := &in.ArchiveCdnSettings, &out.ArchiveCdnSettings + *out = new(ArchiveCdnSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(DestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RolloverInterval != nil { + in, out := &in.RolloverInterval, &out.RolloverInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchiveGroupSettingsInitParameters. +func (in *ArchiveGroupSettingsInitParameters) DeepCopy() *ArchiveGroupSettingsInitParameters { + if in == nil { + return nil + } + out := new(ArchiveGroupSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArchiveGroupSettingsObservation) DeepCopyInto(out *ArchiveGroupSettingsObservation) { + *out = *in + if in.ArchiveCdnSettings != nil { + in, out := &in.ArchiveCdnSettings, &out.ArchiveCdnSettings + *out = new(ArchiveCdnSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(DestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.RolloverInterval != nil { + in, out := &in.RolloverInterval, &out.RolloverInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchiveGroupSettingsObservation. +func (in *ArchiveGroupSettingsObservation) DeepCopy() *ArchiveGroupSettingsObservation { + if in == nil { + return nil + } + out := new(ArchiveGroupSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArchiveGroupSettingsParameters) DeepCopyInto(out *ArchiveGroupSettingsParameters) { + *out = *in + if in.ArchiveCdnSettings != nil { + in, out := &in.ArchiveCdnSettings, &out.ArchiveCdnSettings + *out = new(ArchiveCdnSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(DestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.RolloverInterval != nil { + in, out := &in.RolloverInterval, &out.RolloverInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchiveGroupSettingsParameters. +func (in *ArchiveGroupSettingsParameters) DeepCopy() *ArchiveGroupSettingsParameters { + if in == nil { + return nil + } + out := new(ArchiveGroupSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArchiveOutputSettingsInitParameters) DeepCopyInto(out *ArchiveOutputSettingsInitParameters) { + *out = *in + if in.ContainerSettings != nil { + in, out := &in.ContainerSettings, &out.ContainerSettings + *out = new(ContainerSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(string) + **out = **in + } + if in.NameModifier != nil { + in, out := &in.NameModifier, &out.NameModifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchiveOutputSettingsInitParameters. +func (in *ArchiveOutputSettingsInitParameters) DeepCopy() *ArchiveOutputSettingsInitParameters { + if in == nil { + return nil + } + out := new(ArchiveOutputSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArchiveOutputSettingsObservation) DeepCopyInto(out *ArchiveOutputSettingsObservation) { + *out = *in + if in.ContainerSettings != nil { + in, out := &in.ContainerSettings, &out.ContainerSettings + *out = new(ContainerSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(string) + **out = **in + } + if in.NameModifier != nil { + in, out := &in.NameModifier, &out.NameModifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchiveOutputSettingsObservation. +func (in *ArchiveOutputSettingsObservation) DeepCopy() *ArchiveOutputSettingsObservation { + if in == nil { + return nil + } + out := new(ArchiveOutputSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArchiveOutputSettingsParameters) DeepCopyInto(out *ArchiveOutputSettingsParameters) { + *out = *in + if in.ContainerSettings != nil { + in, out := &in.ContainerSettings, &out.ContainerSettings + *out = new(ContainerSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(string) + **out = **in + } + if in.NameModifier != nil { + in, out := &in.NameModifier, &out.NameModifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchiveOutputSettingsParameters. +func (in *ArchiveOutputSettingsParameters) DeepCopy() *ArchiveOutputSettingsParameters { + if in == nil { + return nil + } + out := new(ArchiveOutputSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArchiveS3SettingsInitParameters) DeepCopyInto(out *ArchiveS3SettingsInitParameters) { + *out = *in + if in.CannedACL != nil { + in, out := &in.CannedACL, &out.CannedACL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchiveS3SettingsInitParameters. +func (in *ArchiveS3SettingsInitParameters) DeepCopy() *ArchiveS3SettingsInitParameters { + if in == nil { + return nil + } + out := new(ArchiveS3SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArchiveS3SettingsObservation) DeepCopyInto(out *ArchiveS3SettingsObservation) { + *out = *in + if in.CannedACL != nil { + in, out := &in.CannedACL, &out.CannedACL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchiveS3SettingsObservation. +func (in *ArchiveS3SettingsObservation) DeepCopy() *ArchiveS3SettingsObservation { + if in == nil { + return nil + } + out := new(ArchiveS3SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArchiveS3SettingsParameters) DeepCopyInto(out *ArchiveS3SettingsParameters) { + *out = *in + if in.CannedACL != nil { + in, out := &in.CannedACL, &out.CannedACL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchiveS3SettingsParameters. +func (in *ArchiveS3SettingsParameters) DeepCopy() *ArchiveS3SettingsParameters { + if in == nil { + return nil + } + out := new(ArchiveS3SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AribDestinationSettingsInitParameters) DeepCopyInto(out *AribDestinationSettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AribDestinationSettingsInitParameters. +func (in *AribDestinationSettingsInitParameters) DeepCopy() *AribDestinationSettingsInitParameters { + if in == nil { + return nil + } + out := new(AribDestinationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AribDestinationSettingsObservation) DeepCopyInto(out *AribDestinationSettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AribDestinationSettingsObservation. +func (in *AribDestinationSettingsObservation) DeepCopy() *AribDestinationSettingsObservation { + if in == nil { + return nil + } + out := new(AribDestinationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AribDestinationSettingsParameters) DeepCopyInto(out *AribDestinationSettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AribDestinationSettingsParameters. +func (in *AribDestinationSettingsParameters) DeepCopy() *AribDestinationSettingsParameters { + if in == nil { + return nil + } + out := new(AribDestinationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AribSourceSettingsInitParameters) DeepCopyInto(out *AribSourceSettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AribSourceSettingsInitParameters. +func (in *AribSourceSettingsInitParameters) DeepCopy() *AribSourceSettingsInitParameters { + if in == nil { + return nil + } + out := new(AribSourceSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AribSourceSettingsObservation) DeepCopyInto(out *AribSourceSettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AribSourceSettingsObservation. +func (in *AribSourceSettingsObservation) DeepCopy() *AribSourceSettingsObservation { + if in == nil { + return nil + } + out := new(AribSourceSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AribSourceSettingsParameters) DeepCopyInto(out *AribSourceSettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AribSourceSettingsParameters. +func (in *AribSourceSettingsParameters) DeepCopy() *AribSourceSettingsParameters { + if in == nil { + return nil + } + out := new(AribSourceSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioDescriptionsInitParameters) DeepCopyInto(out *AudioDescriptionsInitParameters) { + *out = *in + if in.AudioNormalizationSettings != nil { + in, out := &in.AudioNormalizationSettings, &out.AudioNormalizationSettings + *out = new(AudioNormalizationSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AudioSelectorName != nil { + in, out := &in.AudioSelectorName, &out.AudioSelectorName + *out = new(string) + **out = **in + } + if in.AudioType != nil { + in, out := &in.AudioType, &out.AudioType + *out = new(string) + **out = **in + } + if in.AudioTypeControl != nil { + in, out := &in.AudioTypeControl, &out.AudioTypeControl + *out = new(string) + **out = **in + } + if in.AudioWatermarkSettings != nil { + in, out := &in.AudioWatermarkSettings, &out.AudioWatermarkSettings + *out = new(AudioWatermarkSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CodecSettings != nil { + in, out := &in.CodecSettings, &out.CodecSettings + *out = new(CodecSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.LanguageCodeControl != nil { + in, out := &in.LanguageCodeControl, &out.LanguageCodeControl + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RemixSettings != nil { + in, out := &in.RemixSettings, &out.RemixSettings + *out = new(RemixSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioDescriptionsInitParameters. +func (in *AudioDescriptionsInitParameters) DeepCopy() *AudioDescriptionsInitParameters { + if in == nil { + return nil + } + out := new(AudioDescriptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioDescriptionsObservation) DeepCopyInto(out *AudioDescriptionsObservation) { + *out = *in + if in.AudioNormalizationSettings != nil { + in, out := &in.AudioNormalizationSettings, &out.AudioNormalizationSettings + *out = new(AudioNormalizationSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.AudioSelectorName != nil { + in, out := &in.AudioSelectorName, &out.AudioSelectorName + *out = new(string) + **out = **in + } + if in.AudioType != nil { + in, out := &in.AudioType, &out.AudioType + *out = new(string) + **out = **in + } + if in.AudioTypeControl != nil { + in, out := &in.AudioTypeControl, &out.AudioTypeControl + *out = new(string) + **out = **in + } + if in.AudioWatermarkSettings != nil { + in, out := &in.AudioWatermarkSettings, &out.AudioWatermarkSettings + *out = new(AudioWatermarkSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.CodecSettings != nil { + in, out := &in.CodecSettings, &out.CodecSettings + *out = new(CodecSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.LanguageCodeControl != nil { + in, out := &in.LanguageCodeControl, &out.LanguageCodeControl + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RemixSettings != nil { + in, out := &in.RemixSettings, &out.RemixSettings + *out = new(RemixSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioDescriptionsObservation. +func (in *AudioDescriptionsObservation) DeepCopy() *AudioDescriptionsObservation { + if in == nil { + return nil + } + out := new(AudioDescriptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioDescriptionsParameters) DeepCopyInto(out *AudioDescriptionsParameters) { + *out = *in + if in.AudioNormalizationSettings != nil { + in, out := &in.AudioNormalizationSettings, &out.AudioNormalizationSettings + *out = new(AudioNormalizationSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.AudioSelectorName != nil { + in, out := &in.AudioSelectorName, &out.AudioSelectorName + *out = new(string) + **out = **in + } + if in.AudioType != nil { + in, out := &in.AudioType, &out.AudioType + *out = new(string) + **out = **in + } + if in.AudioTypeControl != nil { + in, out := &in.AudioTypeControl, &out.AudioTypeControl + *out = new(string) + **out = **in + } + if in.AudioWatermarkSettings != nil { + in, out := &in.AudioWatermarkSettings, &out.AudioWatermarkSettings + *out = new(AudioWatermarkSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.CodecSettings != nil { + in, out := &in.CodecSettings, &out.CodecSettings + *out = new(CodecSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.LanguageCodeControl != nil { + in, out := &in.LanguageCodeControl, &out.LanguageCodeControl + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RemixSettings != nil { + in, out := &in.RemixSettings, &out.RemixSettings + *out = new(RemixSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioDescriptionsParameters. +func (in *AudioDescriptionsParameters) DeepCopy() *AudioDescriptionsParameters { + if in == nil { + return nil + } + out := new(AudioDescriptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioHlsRenditionSelectionInitParameters) DeepCopyInto(out *AudioHlsRenditionSelectionInitParameters) { + *out = *in + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioHlsRenditionSelectionInitParameters. +func (in *AudioHlsRenditionSelectionInitParameters) DeepCopy() *AudioHlsRenditionSelectionInitParameters { + if in == nil { + return nil + } + out := new(AudioHlsRenditionSelectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioHlsRenditionSelectionObservation) DeepCopyInto(out *AudioHlsRenditionSelectionObservation) { + *out = *in + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioHlsRenditionSelectionObservation. +func (in *AudioHlsRenditionSelectionObservation) DeepCopy() *AudioHlsRenditionSelectionObservation { + if in == nil { + return nil + } + out := new(AudioHlsRenditionSelectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioHlsRenditionSelectionParameters) DeepCopyInto(out *AudioHlsRenditionSelectionParameters) { + *out = *in + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioHlsRenditionSelectionParameters. +func (in *AudioHlsRenditionSelectionParameters) DeepCopy() *AudioHlsRenditionSelectionParameters { + if in == nil { + return nil + } + out := new(AudioHlsRenditionSelectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioLanguageSelectionInitParameters) DeepCopyInto(out *AudioLanguageSelectionInitParameters) { + *out = *in + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.LanguageSelectionPolicy != nil { + in, out := &in.LanguageSelectionPolicy, &out.LanguageSelectionPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioLanguageSelectionInitParameters. +func (in *AudioLanguageSelectionInitParameters) DeepCopy() *AudioLanguageSelectionInitParameters { + if in == nil { + return nil + } + out := new(AudioLanguageSelectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioLanguageSelectionObservation) DeepCopyInto(out *AudioLanguageSelectionObservation) { + *out = *in + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.LanguageSelectionPolicy != nil { + in, out := &in.LanguageSelectionPolicy, &out.LanguageSelectionPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioLanguageSelectionObservation. +func (in *AudioLanguageSelectionObservation) DeepCopy() *AudioLanguageSelectionObservation { + if in == nil { + return nil + } + out := new(AudioLanguageSelectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioLanguageSelectionParameters) DeepCopyInto(out *AudioLanguageSelectionParameters) { + *out = *in + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.LanguageSelectionPolicy != nil { + in, out := &in.LanguageSelectionPolicy, &out.LanguageSelectionPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioLanguageSelectionParameters. +func (in *AudioLanguageSelectionParameters) DeepCopy() *AudioLanguageSelectionParameters { + if in == nil { + return nil + } + out := new(AudioLanguageSelectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioNormalizationSettingsInitParameters) DeepCopyInto(out *AudioNormalizationSettingsInitParameters) { + *out = *in + if in.Algorithm != nil { + in, out := &in.Algorithm, &out.Algorithm + *out = new(string) + **out = **in + } + if in.AlgorithmControl != nil { + in, out := &in.AlgorithmControl, &out.AlgorithmControl + *out = new(string) + **out = **in + } + if in.TargetLkfs != nil { + in, out := &in.TargetLkfs, &out.TargetLkfs + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioNormalizationSettingsInitParameters. +func (in *AudioNormalizationSettingsInitParameters) DeepCopy() *AudioNormalizationSettingsInitParameters { + if in == nil { + return nil + } + out := new(AudioNormalizationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioNormalizationSettingsObservation) DeepCopyInto(out *AudioNormalizationSettingsObservation) { + *out = *in + if in.Algorithm != nil { + in, out := &in.Algorithm, &out.Algorithm + *out = new(string) + **out = **in + } + if in.AlgorithmControl != nil { + in, out := &in.AlgorithmControl, &out.AlgorithmControl + *out = new(string) + **out = **in + } + if in.TargetLkfs != nil { + in, out := &in.TargetLkfs, &out.TargetLkfs + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioNormalizationSettingsObservation. +func (in *AudioNormalizationSettingsObservation) DeepCopy() *AudioNormalizationSettingsObservation { + if in == nil { + return nil + } + out := new(AudioNormalizationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioNormalizationSettingsParameters) DeepCopyInto(out *AudioNormalizationSettingsParameters) { + *out = *in + if in.Algorithm != nil { + in, out := &in.Algorithm, &out.Algorithm + *out = new(string) + **out = **in + } + if in.AlgorithmControl != nil { + in, out := &in.AlgorithmControl, &out.AlgorithmControl + *out = new(string) + **out = **in + } + if in.TargetLkfs != nil { + in, out := &in.TargetLkfs, &out.TargetLkfs + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioNormalizationSettingsParameters. +func (in *AudioNormalizationSettingsParameters) DeepCopy() *AudioNormalizationSettingsParameters { + if in == nil { + return nil + } + out := new(AudioNormalizationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioOnlyHlsSettingsInitParameters) DeepCopyInto(out *AudioOnlyHlsSettingsInitParameters) { + *out = *in + if in.AudioGroupID != nil { + in, out := &in.AudioGroupID, &out.AudioGroupID + *out = new(string) + **out = **in + } + if in.AudioOnlyImage != nil { + in, out := &in.AudioOnlyImage, &out.AudioOnlyImage + *out = new(AudioOnlyImageInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AudioTrackType != nil { + in, out := &in.AudioTrackType, &out.AudioTrackType + *out = new(string) + **out = **in + } + if in.SegmentType != nil { + in, out := &in.SegmentType, &out.SegmentType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioOnlyHlsSettingsInitParameters. +func (in *AudioOnlyHlsSettingsInitParameters) DeepCopy() *AudioOnlyHlsSettingsInitParameters { + if in == nil { + return nil + } + out := new(AudioOnlyHlsSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioOnlyHlsSettingsObservation) DeepCopyInto(out *AudioOnlyHlsSettingsObservation) { + *out = *in + if in.AudioGroupID != nil { + in, out := &in.AudioGroupID, &out.AudioGroupID + *out = new(string) + **out = **in + } + if in.AudioOnlyImage != nil { + in, out := &in.AudioOnlyImage, &out.AudioOnlyImage + *out = new(AudioOnlyImageObservation) + (*in).DeepCopyInto(*out) + } + if in.AudioTrackType != nil { + in, out := &in.AudioTrackType, &out.AudioTrackType + *out = new(string) + **out = **in + } + if in.SegmentType != nil { + in, out := &in.SegmentType, &out.SegmentType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioOnlyHlsSettingsObservation. +func (in *AudioOnlyHlsSettingsObservation) DeepCopy() *AudioOnlyHlsSettingsObservation { + if in == nil { + return nil + } + out := new(AudioOnlyHlsSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioOnlyHlsSettingsParameters) DeepCopyInto(out *AudioOnlyHlsSettingsParameters) { + *out = *in + if in.AudioGroupID != nil { + in, out := &in.AudioGroupID, &out.AudioGroupID + *out = new(string) + **out = **in + } + if in.AudioOnlyImage != nil { + in, out := &in.AudioOnlyImage, &out.AudioOnlyImage + *out = new(AudioOnlyImageParameters) + (*in).DeepCopyInto(*out) + } + if in.AudioTrackType != nil { + in, out := &in.AudioTrackType, &out.AudioTrackType + *out = new(string) + **out = **in + } + if in.SegmentType != nil { + in, out := &in.SegmentType, &out.SegmentType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioOnlyHlsSettingsParameters. +func (in *AudioOnlyHlsSettingsParameters) DeepCopy() *AudioOnlyHlsSettingsParameters { + if in == nil { + return nil + } + out := new(AudioOnlyHlsSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioOnlyImageInitParameters) DeepCopyInto(out *AudioOnlyImageInitParameters) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioOnlyImageInitParameters. +func (in *AudioOnlyImageInitParameters) DeepCopy() *AudioOnlyImageInitParameters { + if in == nil { + return nil + } + out := new(AudioOnlyImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioOnlyImageObservation) DeepCopyInto(out *AudioOnlyImageObservation) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioOnlyImageObservation. +func (in *AudioOnlyImageObservation) DeepCopy() *AudioOnlyImageObservation { + if in == nil { + return nil + } + out := new(AudioOnlyImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioOnlyImageParameters) DeepCopyInto(out *AudioOnlyImageParameters) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioOnlyImageParameters. +func (in *AudioOnlyImageParameters) DeepCopy() *AudioOnlyImageParameters { + if in == nil { + return nil + } + out := new(AudioOnlyImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioPidSelectionInitParameters) DeepCopyInto(out *AudioPidSelectionInitParameters) { + *out = *in + if in.Pid != nil { + in, out := &in.Pid, &out.Pid + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioPidSelectionInitParameters. +func (in *AudioPidSelectionInitParameters) DeepCopy() *AudioPidSelectionInitParameters { + if in == nil { + return nil + } + out := new(AudioPidSelectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioPidSelectionObservation) DeepCopyInto(out *AudioPidSelectionObservation) { + *out = *in + if in.Pid != nil { + in, out := &in.Pid, &out.Pid + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioPidSelectionObservation. +func (in *AudioPidSelectionObservation) DeepCopy() *AudioPidSelectionObservation { + if in == nil { + return nil + } + out := new(AudioPidSelectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioPidSelectionParameters) DeepCopyInto(out *AudioPidSelectionParameters) { + *out = *in + if in.Pid != nil { + in, out := &in.Pid, &out.Pid + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioPidSelectionParameters. +func (in *AudioPidSelectionParameters) DeepCopy() *AudioPidSelectionParameters { + if in == nil { + return nil + } + out := new(AudioPidSelectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioSelectorInitParameters) DeepCopyInto(out *AudioSelectorInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SelectorSettings != nil { + in, out := &in.SelectorSettings, &out.SelectorSettings + *out = new(SelectorSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioSelectorInitParameters. +func (in *AudioSelectorInitParameters) DeepCopy() *AudioSelectorInitParameters { + if in == nil { + return nil + } + out := new(AudioSelectorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioSelectorObservation) DeepCopyInto(out *AudioSelectorObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SelectorSettings != nil { + in, out := &in.SelectorSettings, &out.SelectorSettings + *out = new(SelectorSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioSelectorObservation. +func (in *AudioSelectorObservation) DeepCopy() *AudioSelectorObservation { + if in == nil { + return nil + } + out := new(AudioSelectorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioSelectorParameters) DeepCopyInto(out *AudioSelectorParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SelectorSettings != nil { + in, out := &in.SelectorSettings, &out.SelectorSettings + *out = new(SelectorSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioSelectorParameters. +func (in *AudioSelectorParameters) DeepCopy() *AudioSelectorParameters { + if in == nil { + return nil + } + out := new(AudioSelectorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioSilenceSettingsInitParameters) DeepCopyInto(out *AudioSilenceSettingsInitParameters) { + *out = *in + if in.AudioSelectorName != nil { + in, out := &in.AudioSelectorName, &out.AudioSelectorName + *out = new(string) + **out = **in + } + if in.AudioSilenceThresholdMsec != nil { + in, out := &in.AudioSilenceThresholdMsec, &out.AudioSilenceThresholdMsec + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioSilenceSettingsInitParameters. +func (in *AudioSilenceSettingsInitParameters) DeepCopy() *AudioSilenceSettingsInitParameters { + if in == nil { + return nil + } + out := new(AudioSilenceSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioSilenceSettingsObservation) DeepCopyInto(out *AudioSilenceSettingsObservation) { + *out = *in + if in.AudioSelectorName != nil { + in, out := &in.AudioSelectorName, &out.AudioSelectorName + *out = new(string) + **out = **in + } + if in.AudioSilenceThresholdMsec != nil { + in, out := &in.AudioSilenceThresholdMsec, &out.AudioSilenceThresholdMsec + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioSilenceSettingsObservation. +func (in *AudioSilenceSettingsObservation) DeepCopy() *AudioSilenceSettingsObservation { + if in == nil { + return nil + } + out := new(AudioSilenceSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioSilenceSettingsParameters) DeepCopyInto(out *AudioSilenceSettingsParameters) { + *out = *in + if in.AudioSelectorName != nil { + in, out := &in.AudioSelectorName, &out.AudioSelectorName + *out = new(string) + **out = **in + } + if in.AudioSilenceThresholdMsec != nil { + in, out := &in.AudioSilenceThresholdMsec, &out.AudioSilenceThresholdMsec + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioSilenceSettingsParameters. +func (in *AudioSilenceSettingsParameters) DeepCopy() *AudioSilenceSettingsParameters { + if in == nil { + return nil + } + out := new(AudioSilenceSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioTrackSelectionInitParameters) DeepCopyInto(out *AudioTrackSelectionInitParameters) { + *out = *in + if in.DolbyEDecode != nil { + in, out := &in.DolbyEDecode, &out.DolbyEDecode + *out = new(DolbyEDecodeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tracks != nil { + in, out := &in.Tracks, &out.Tracks + *out = make([]TracksInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioTrackSelectionInitParameters. +func (in *AudioTrackSelectionInitParameters) DeepCopy() *AudioTrackSelectionInitParameters { + if in == nil { + return nil + } + out := new(AudioTrackSelectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioTrackSelectionObservation) DeepCopyInto(out *AudioTrackSelectionObservation) { + *out = *in + if in.DolbyEDecode != nil { + in, out := &in.DolbyEDecode, &out.DolbyEDecode + *out = new(DolbyEDecodeObservation) + (*in).DeepCopyInto(*out) + } + if in.Tracks != nil { + in, out := &in.Tracks, &out.Tracks + *out = make([]TracksObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioTrackSelectionObservation. +func (in *AudioTrackSelectionObservation) DeepCopy() *AudioTrackSelectionObservation { + if in == nil { + return nil + } + out := new(AudioTrackSelectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioTrackSelectionParameters) DeepCopyInto(out *AudioTrackSelectionParameters) { + *out = *in + if in.DolbyEDecode != nil { + in, out := &in.DolbyEDecode, &out.DolbyEDecode + *out = new(DolbyEDecodeParameters) + (*in).DeepCopyInto(*out) + } + if in.Tracks != nil { + in, out := &in.Tracks, &out.Tracks + *out = make([]TracksParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioTrackSelectionParameters. +func (in *AudioTrackSelectionParameters) DeepCopy() *AudioTrackSelectionParameters { + if in == nil { + return nil + } + out := new(AudioTrackSelectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioWatermarkSettingsInitParameters) DeepCopyInto(out *AudioWatermarkSettingsInitParameters) { + *out = *in + if in.NielsenWatermarksSettings != nil { + in, out := &in.NielsenWatermarksSettings, &out.NielsenWatermarksSettings + *out = new(NielsenWatermarksSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioWatermarkSettingsInitParameters. +func (in *AudioWatermarkSettingsInitParameters) DeepCopy() *AudioWatermarkSettingsInitParameters { + if in == nil { + return nil + } + out := new(AudioWatermarkSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioWatermarkSettingsObservation) DeepCopyInto(out *AudioWatermarkSettingsObservation) { + *out = *in + if in.NielsenWatermarksSettings != nil { + in, out := &in.NielsenWatermarksSettings, &out.NielsenWatermarksSettings + *out = new(NielsenWatermarksSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioWatermarkSettingsObservation. +func (in *AudioWatermarkSettingsObservation) DeepCopy() *AudioWatermarkSettingsObservation { + if in == nil { + return nil + } + out := new(AudioWatermarkSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioWatermarkSettingsParameters) DeepCopyInto(out *AudioWatermarkSettingsParameters) { + *out = *in + if in.NielsenWatermarksSettings != nil { + in, out := &in.NielsenWatermarksSettings, &out.NielsenWatermarksSettings + *out = new(NielsenWatermarksSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioWatermarkSettingsParameters. +func (in *AudioWatermarkSettingsParameters) DeepCopy() *AudioWatermarkSettingsParameters { + if in == nil { + return nil + } + out := new(AudioWatermarkSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticInputFailoverSettingsInitParameters) DeepCopyInto(out *AutomaticInputFailoverSettingsInitParameters) { + *out = *in + if in.ErrorClearTimeMsec != nil { + in, out := &in.ErrorClearTimeMsec, &out.ErrorClearTimeMsec + *out = new(float64) + **out = **in + } + if in.FailoverCondition != nil { + in, out := &in.FailoverCondition, &out.FailoverCondition + *out = make([]FailoverConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputPreference != nil { + in, out := &in.InputPreference, &out.InputPreference + *out = new(string) + **out = **in + } + if in.SecondaryInputID != nil { + in, out := &in.SecondaryInputID, &out.SecondaryInputID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticInputFailoverSettingsInitParameters. +func (in *AutomaticInputFailoverSettingsInitParameters) DeepCopy() *AutomaticInputFailoverSettingsInitParameters { + if in == nil { + return nil + } + out := new(AutomaticInputFailoverSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticInputFailoverSettingsObservation) DeepCopyInto(out *AutomaticInputFailoverSettingsObservation) { + *out = *in + if in.ErrorClearTimeMsec != nil { + in, out := &in.ErrorClearTimeMsec, &out.ErrorClearTimeMsec + *out = new(float64) + **out = **in + } + if in.FailoverCondition != nil { + in, out := &in.FailoverCondition, &out.FailoverCondition + *out = make([]FailoverConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputPreference != nil { + in, out := &in.InputPreference, &out.InputPreference + *out = new(string) + **out = **in + } + if in.SecondaryInputID != nil { + in, out := &in.SecondaryInputID, &out.SecondaryInputID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticInputFailoverSettingsObservation. +func (in *AutomaticInputFailoverSettingsObservation) DeepCopy() *AutomaticInputFailoverSettingsObservation { + if in == nil { + return nil + } + out := new(AutomaticInputFailoverSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticInputFailoverSettingsParameters) DeepCopyInto(out *AutomaticInputFailoverSettingsParameters) { + *out = *in + if in.ErrorClearTimeMsec != nil { + in, out := &in.ErrorClearTimeMsec, &out.ErrorClearTimeMsec + *out = new(float64) + **out = **in + } + if in.FailoverCondition != nil { + in, out := &in.FailoverCondition, &out.FailoverCondition + *out = make([]FailoverConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputPreference != nil { + in, out := &in.InputPreference, &out.InputPreference + *out = new(string) + **out = **in + } + if in.SecondaryInputID != nil { + in, out := &in.SecondaryInputID, &out.SecondaryInputID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticInputFailoverSettingsParameters. +func (in *AutomaticInputFailoverSettingsParameters) DeepCopy() *AutomaticInputFailoverSettingsParameters { + if in == nil { + return nil + } + out := new(AutomaticInputFailoverSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvailBlankingImageInitParameters) DeepCopyInto(out *AvailBlankingImageInitParameters) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvailBlankingImageInitParameters. +func (in *AvailBlankingImageInitParameters) DeepCopy() *AvailBlankingImageInitParameters { + if in == nil { + return nil + } + out := new(AvailBlankingImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvailBlankingImageObservation) DeepCopyInto(out *AvailBlankingImageObservation) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvailBlankingImageObservation. +func (in *AvailBlankingImageObservation) DeepCopy() *AvailBlankingImageObservation { + if in == nil { + return nil + } + out := new(AvailBlankingImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvailBlankingImageParameters) DeepCopyInto(out *AvailBlankingImageParameters) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvailBlankingImageParameters. +func (in *AvailBlankingImageParameters) DeepCopy() *AvailBlankingImageParameters { + if in == nil { + return nil + } + out := new(AvailBlankingImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvailBlankingInitParameters) DeepCopyInto(out *AvailBlankingInitParameters) { + *out = *in + if in.AvailBlankingImage != nil { + in, out := &in.AvailBlankingImage, &out.AvailBlankingImage + *out = new(AvailBlankingImageInitParameters) + (*in).DeepCopyInto(*out) + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvailBlankingInitParameters. +func (in *AvailBlankingInitParameters) DeepCopy() *AvailBlankingInitParameters { + if in == nil { + return nil + } + out := new(AvailBlankingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvailBlankingObservation) DeepCopyInto(out *AvailBlankingObservation) { + *out = *in + if in.AvailBlankingImage != nil { + in, out := &in.AvailBlankingImage, &out.AvailBlankingImage + *out = new(AvailBlankingImageObservation) + (*in).DeepCopyInto(*out) + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvailBlankingObservation. +func (in *AvailBlankingObservation) DeepCopy() *AvailBlankingObservation { + if in == nil { + return nil + } + out := new(AvailBlankingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvailBlankingParameters) DeepCopyInto(out *AvailBlankingParameters) { + *out = *in + if in.AvailBlankingImage != nil { + in, out := &in.AvailBlankingImage, &out.AvailBlankingImage + *out = new(AvailBlankingImageParameters) + (*in).DeepCopyInto(*out) + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvailBlankingParameters. +func (in *AvailBlankingParameters) DeepCopy() *AvailBlankingParameters { + if in == nil { + return nil + } + out := new(AvailBlankingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BurnInDestinationSettingsInitParameters) DeepCopyInto(out *BurnInDestinationSettingsInitParameters) { + *out = *in + if in.Alignment != nil { + in, out := &in.Alignment, &out.Alignment + *out = new(string) + **out = **in + } + if in.BackgroundColor != nil { + in, out := &in.BackgroundColor, &out.BackgroundColor + *out = new(string) + **out = **in + } + if in.BackgroundOpacity != nil { + in, out := &in.BackgroundOpacity, &out.BackgroundOpacity + *out = new(float64) + **out = **in + } + if in.Font != nil { + in, out := &in.Font, &out.Font + *out = new(FontInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FontColor != nil { + in, out := &in.FontColor, &out.FontColor + *out = new(string) + **out = **in + } + if in.FontOpacity != nil { + in, out := &in.FontOpacity, &out.FontOpacity + *out = new(float64) + **out = **in + } + if in.FontResolution != nil { + in, out := &in.FontResolution, &out.FontResolution + *out = new(float64) + **out = **in + } + if in.FontSize != nil { + in, out := &in.FontSize, &out.FontSize + *out = new(string) + **out = **in + } + if in.OutlineColor != nil { + in, out := &in.OutlineColor, &out.OutlineColor + *out = new(string) + **out = **in + } + if in.OutlineSize != nil { + in, out := &in.OutlineSize, &out.OutlineSize + *out = new(float64) + **out = **in + } + if in.ShadowColor != nil { + in, out := &in.ShadowColor, &out.ShadowColor + *out = new(string) + **out = **in + } + if in.ShadowOpacity != nil { + in, out := &in.ShadowOpacity, &out.ShadowOpacity + *out = new(float64) + **out = **in + } + if in.ShadowXOffset != nil { + in, out := &in.ShadowXOffset, &out.ShadowXOffset + *out = new(float64) + **out = **in + } + if in.ShadowYOffset != nil { + in, out := &in.ShadowYOffset, &out.ShadowYOffset + *out = new(float64) + **out = **in + } + if in.TeletextGridControl != nil { + in, out := &in.TeletextGridControl, &out.TeletextGridControl + *out = new(string) + **out = **in + } + if in.XPosition != nil { + in, out := &in.XPosition, &out.XPosition + *out = new(float64) + **out = **in + } + if in.YPosition != nil { + in, out := &in.YPosition, &out.YPosition + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BurnInDestinationSettingsInitParameters. +func (in *BurnInDestinationSettingsInitParameters) DeepCopy() *BurnInDestinationSettingsInitParameters { + if in == nil { + return nil + } + out := new(BurnInDestinationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BurnInDestinationSettingsObservation) DeepCopyInto(out *BurnInDestinationSettingsObservation) { + *out = *in + if in.Alignment != nil { + in, out := &in.Alignment, &out.Alignment + *out = new(string) + **out = **in + } + if in.BackgroundColor != nil { + in, out := &in.BackgroundColor, &out.BackgroundColor + *out = new(string) + **out = **in + } + if in.BackgroundOpacity != nil { + in, out := &in.BackgroundOpacity, &out.BackgroundOpacity + *out = new(float64) + **out = **in + } + if in.Font != nil { + in, out := &in.Font, &out.Font + *out = new(FontObservation) + (*in).DeepCopyInto(*out) + } + if in.FontColor != nil { + in, out := &in.FontColor, &out.FontColor + *out = new(string) + **out = **in + } + if in.FontOpacity != nil { + in, out := &in.FontOpacity, &out.FontOpacity + *out = new(float64) + **out = **in + } + if in.FontResolution != nil { + in, out := &in.FontResolution, &out.FontResolution + *out = new(float64) + **out = **in + } + if in.FontSize != nil { + in, out := &in.FontSize, &out.FontSize + *out = new(string) + **out = **in + } + if in.OutlineColor != nil { + in, out := &in.OutlineColor, &out.OutlineColor + *out = new(string) + **out = **in + } + if in.OutlineSize != nil { + in, out := &in.OutlineSize, &out.OutlineSize + *out = new(float64) + **out = **in + } + if in.ShadowColor != nil { + in, out := &in.ShadowColor, &out.ShadowColor + *out = new(string) + **out = **in + } + if in.ShadowOpacity != nil { + in, out := &in.ShadowOpacity, &out.ShadowOpacity + *out = new(float64) + **out = **in + } + if in.ShadowXOffset != nil { + in, out := &in.ShadowXOffset, &out.ShadowXOffset + *out = new(float64) + **out = **in + } + if in.ShadowYOffset != nil { + in, out := &in.ShadowYOffset, &out.ShadowYOffset + *out = new(float64) + **out = **in + } + if in.TeletextGridControl != nil { + in, out := &in.TeletextGridControl, &out.TeletextGridControl + *out = new(string) + **out = **in + } + if in.XPosition != nil { + in, out := &in.XPosition, &out.XPosition + *out = new(float64) + **out = **in + } + if in.YPosition != nil { + in, out := &in.YPosition, &out.YPosition + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BurnInDestinationSettingsObservation. +func (in *BurnInDestinationSettingsObservation) DeepCopy() *BurnInDestinationSettingsObservation { + if in == nil { + return nil + } + out := new(BurnInDestinationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BurnInDestinationSettingsParameters) DeepCopyInto(out *BurnInDestinationSettingsParameters) { + *out = *in + if in.Alignment != nil { + in, out := &in.Alignment, &out.Alignment + *out = new(string) + **out = **in + } + if in.BackgroundColor != nil { + in, out := &in.BackgroundColor, &out.BackgroundColor + *out = new(string) + **out = **in + } + if in.BackgroundOpacity != nil { + in, out := &in.BackgroundOpacity, &out.BackgroundOpacity + *out = new(float64) + **out = **in + } + if in.Font != nil { + in, out := &in.Font, &out.Font + *out = new(FontParameters) + (*in).DeepCopyInto(*out) + } + if in.FontColor != nil { + in, out := &in.FontColor, &out.FontColor + *out = new(string) + **out = **in + } + if in.FontOpacity != nil { + in, out := &in.FontOpacity, &out.FontOpacity + *out = new(float64) + **out = **in + } + if in.FontResolution != nil { + in, out := &in.FontResolution, &out.FontResolution + *out = new(float64) + **out = **in + } + if in.FontSize != nil { + in, out := &in.FontSize, &out.FontSize + *out = new(string) + **out = **in + } + if in.OutlineColor != nil { + in, out := &in.OutlineColor, &out.OutlineColor + *out = new(string) + **out = **in + } + if in.OutlineSize != nil { + in, out := &in.OutlineSize, &out.OutlineSize + *out = new(float64) + **out = **in + } + if in.ShadowColor != nil { + in, out := &in.ShadowColor, &out.ShadowColor + *out = new(string) + **out = **in + } + if in.ShadowOpacity != nil { + in, out := &in.ShadowOpacity, &out.ShadowOpacity + *out = new(float64) + **out = **in + } + if in.ShadowXOffset != nil { + in, out := &in.ShadowXOffset, &out.ShadowXOffset + *out = new(float64) + **out = **in + } + if in.ShadowYOffset != nil { + in, out := &in.ShadowYOffset, &out.ShadowYOffset + *out = new(float64) + **out = **in + } + if in.TeletextGridControl != nil { + in, out := &in.TeletextGridControl, &out.TeletextGridControl + *out = new(string) + **out = **in + } + if in.XPosition != nil { + in, out := &in.XPosition, &out.XPosition + *out = new(float64) + **out = **in + } + if in.YPosition != nil { + in, out := &in.YPosition, &out.YPosition + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BurnInDestinationSettingsParameters. +func (in *BurnInDestinationSettingsParameters) DeepCopy() *BurnInDestinationSettingsParameters { + if in == nil { + return nil + } + out := new(BurnInDestinationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptionDescriptionsInitParameters) DeepCopyInto(out *CaptionDescriptionsInitParameters) { + *out = *in + if in.Accessibility != nil { + in, out := &in.Accessibility, &out.Accessibility + *out = new(string) + **out = **in + } + if in.CaptionSelectorName != nil { + in, out := &in.CaptionSelectorName, &out.CaptionSelectorName + *out = new(string) + **out = **in + } + if in.DestinationSettings != nil { + in, out := &in.DestinationSettings, &out.DestinationSettings + *out = new(DestinationSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.LanguageDescription != nil { + in, out := &in.LanguageDescription, &out.LanguageDescription + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptionDescriptionsInitParameters. +func (in *CaptionDescriptionsInitParameters) DeepCopy() *CaptionDescriptionsInitParameters { + if in == nil { + return nil + } + out := new(CaptionDescriptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptionDescriptionsObservation) DeepCopyInto(out *CaptionDescriptionsObservation) { + *out = *in + if in.Accessibility != nil { + in, out := &in.Accessibility, &out.Accessibility + *out = new(string) + **out = **in + } + if in.CaptionSelectorName != nil { + in, out := &in.CaptionSelectorName, &out.CaptionSelectorName + *out = new(string) + **out = **in + } + if in.DestinationSettings != nil { + in, out := &in.DestinationSettings, &out.DestinationSettings + *out = new(DestinationSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.LanguageDescription != nil { + in, out := &in.LanguageDescription, &out.LanguageDescription + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptionDescriptionsObservation. +func (in *CaptionDescriptionsObservation) DeepCopy() *CaptionDescriptionsObservation { + if in == nil { + return nil + } + out := new(CaptionDescriptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptionDescriptionsParameters) DeepCopyInto(out *CaptionDescriptionsParameters) { + *out = *in + if in.Accessibility != nil { + in, out := &in.Accessibility, &out.Accessibility + *out = new(string) + **out = **in + } + if in.CaptionSelectorName != nil { + in, out := &in.CaptionSelectorName, &out.CaptionSelectorName + *out = new(string) + **out = **in + } + if in.DestinationSettings != nil { + in, out := &in.DestinationSettings, &out.DestinationSettings + *out = new(DestinationSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.LanguageDescription != nil { + in, out := &in.LanguageDescription, &out.LanguageDescription + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptionDescriptionsParameters. +func (in *CaptionDescriptionsParameters) DeepCopy() *CaptionDescriptionsParameters { + if in == nil { + return nil + } + out := new(CaptionDescriptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptionLanguageMappingsInitParameters) DeepCopyInto(out *CaptionLanguageMappingsInitParameters) { + *out = *in + if in.CaptionChannel != nil { + in, out := &in.CaptionChannel, &out.CaptionChannel + *out = new(float64) + **out = **in + } + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.LanguageDescription != nil { + in, out := &in.LanguageDescription, &out.LanguageDescription + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptionLanguageMappingsInitParameters. +func (in *CaptionLanguageMappingsInitParameters) DeepCopy() *CaptionLanguageMappingsInitParameters { + if in == nil { + return nil + } + out := new(CaptionLanguageMappingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptionLanguageMappingsObservation) DeepCopyInto(out *CaptionLanguageMappingsObservation) { + *out = *in + if in.CaptionChannel != nil { + in, out := &in.CaptionChannel, &out.CaptionChannel + *out = new(float64) + **out = **in + } + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.LanguageDescription != nil { + in, out := &in.LanguageDescription, &out.LanguageDescription + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptionLanguageMappingsObservation. +func (in *CaptionLanguageMappingsObservation) DeepCopy() *CaptionLanguageMappingsObservation { + if in == nil { + return nil + } + out := new(CaptionLanguageMappingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptionLanguageMappingsParameters) DeepCopyInto(out *CaptionLanguageMappingsParameters) { + *out = *in + if in.CaptionChannel != nil { + in, out := &in.CaptionChannel, &out.CaptionChannel + *out = new(float64) + **out = **in + } + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.LanguageDescription != nil { + in, out := &in.LanguageDescription, &out.LanguageDescription + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptionLanguageMappingsParameters. +func (in *CaptionLanguageMappingsParameters) DeepCopy() *CaptionLanguageMappingsParameters { + if in == nil { + return nil + } + out := new(CaptionLanguageMappingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptionSelectorInitParameters) DeepCopyInto(out *CaptionSelectorInitParameters) { + *out = *in + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SelectorSettings != nil { + in, out := &in.SelectorSettings, &out.SelectorSettings + *out = new(CaptionSelectorSelectorSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptionSelectorInitParameters. +func (in *CaptionSelectorInitParameters) DeepCopy() *CaptionSelectorInitParameters { + if in == nil { + return nil + } + out := new(CaptionSelectorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptionSelectorObservation) DeepCopyInto(out *CaptionSelectorObservation) { + *out = *in + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SelectorSettings != nil { + in, out := &in.SelectorSettings, &out.SelectorSettings + *out = new(CaptionSelectorSelectorSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptionSelectorObservation. +func (in *CaptionSelectorObservation) DeepCopy() *CaptionSelectorObservation { + if in == nil { + return nil + } + out := new(CaptionSelectorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptionSelectorParameters) DeepCopyInto(out *CaptionSelectorParameters) { + *out = *in + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SelectorSettings != nil { + in, out := &in.SelectorSettings, &out.SelectorSettings + *out = new(CaptionSelectorSelectorSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptionSelectorParameters. +func (in *CaptionSelectorParameters) DeepCopy() *CaptionSelectorParameters { + if in == nil { + return nil + } + out := new(CaptionSelectorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptionSelectorSelectorSettingsInitParameters) DeepCopyInto(out *CaptionSelectorSelectorSettingsInitParameters) { + *out = *in + if in.AncillarySourceSettings != nil { + in, out := &in.AncillarySourceSettings, &out.AncillarySourceSettings + *out = new(AncillarySourceSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AribSourceSettings != nil { + in, out := &in.AribSourceSettings, &out.AribSourceSettings + *out = new(AribSourceSettingsInitParameters) + **out = **in + } + if in.DvbSubSourceSettings != nil { + in, out := &in.DvbSubSourceSettings, &out.DvbSubSourceSettings + *out = new(DvbSubSourceSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EmbeddedSourceSettings != nil { + in, out := &in.EmbeddedSourceSettings, &out.EmbeddedSourceSettings + *out = new(EmbeddedSourceSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Scte20SourceSettings != nil { + in, out := &in.Scte20SourceSettings, &out.Scte20SourceSettings + *out = new(Scte20SourceSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Scte27SourceSettings != nil { + in, out := &in.Scte27SourceSettings, &out.Scte27SourceSettings + *out = new(Scte27SourceSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TeletextSourceSettings != nil { + in, out := &in.TeletextSourceSettings, &out.TeletextSourceSettings + *out = new(TeletextSourceSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptionSelectorSelectorSettingsInitParameters. +func (in *CaptionSelectorSelectorSettingsInitParameters) DeepCopy() *CaptionSelectorSelectorSettingsInitParameters { + if in == nil { + return nil + } + out := new(CaptionSelectorSelectorSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptionSelectorSelectorSettingsObservation) DeepCopyInto(out *CaptionSelectorSelectorSettingsObservation) { + *out = *in + if in.AncillarySourceSettings != nil { + in, out := &in.AncillarySourceSettings, &out.AncillarySourceSettings + *out = new(AncillarySourceSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.AribSourceSettings != nil { + in, out := &in.AribSourceSettings, &out.AribSourceSettings + *out = new(AribSourceSettingsParameters) + **out = **in + } + if in.DvbSubSourceSettings != nil { + in, out := &in.DvbSubSourceSettings, &out.DvbSubSourceSettings + *out = new(DvbSubSourceSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.EmbeddedSourceSettings != nil { + in, out := &in.EmbeddedSourceSettings, &out.EmbeddedSourceSettings + *out = new(EmbeddedSourceSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Scte20SourceSettings != nil { + in, out := &in.Scte20SourceSettings, &out.Scte20SourceSettings + *out = new(Scte20SourceSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Scte27SourceSettings != nil { + in, out := &in.Scte27SourceSettings, &out.Scte27SourceSettings + *out = new(Scte27SourceSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.TeletextSourceSettings != nil { + in, out := &in.TeletextSourceSettings, &out.TeletextSourceSettings + *out = new(TeletextSourceSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptionSelectorSelectorSettingsObservation. +func (in *CaptionSelectorSelectorSettingsObservation) DeepCopy() *CaptionSelectorSelectorSettingsObservation { + if in == nil { + return nil + } + out := new(CaptionSelectorSelectorSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptionSelectorSelectorSettingsParameters) DeepCopyInto(out *CaptionSelectorSelectorSettingsParameters) { + *out = *in + if in.AncillarySourceSettings != nil { + in, out := &in.AncillarySourceSettings, &out.AncillarySourceSettings + *out = new(AncillarySourceSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.AribSourceSettings != nil { + in, out := &in.AribSourceSettings, &out.AribSourceSettings + *out = new(AribSourceSettingsParameters) + **out = **in + } + if in.DvbSubSourceSettings != nil { + in, out := &in.DvbSubSourceSettings, &out.DvbSubSourceSettings + *out = new(DvbSubSourceSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.EmbeddedSourceSettings != nil { + in, out := &in.EmbeddedSourceSettings, &out.EmbeddedSourceSettings + *out = new(EmbeddedSourceSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Scte20SourceSettings != nil { + in, out := &in.Scte20SourceSettings, &out.Scte20SourceSettings + *out = new(Scte20SourceSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Scte27SourceSettings != nil { + in, out := &in.Scte27SourceSettings, &out.Scte27SourceSettings + *out = new(Scte27SourceSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.TeletextSourceSettings != nil { + in, out := &in.TeletextSourceSettings, &out.TeletextSourceSettings + *out = new(TeletextSourceSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptionSelectorSelectorSettingsParameters. +func (in *CaptionSelectorSelectorSettingsParameters) DeepCopy() *CaptionSelectorSelectorSettingsParameters { + if in == nil { + return nil + } + out := new(CaptionSelectorSelectorSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CdiInputSpecificationInitParameters) DeepCopyInto(out *CdiInputSpecificationInitParameters) { + *out = *in + if in.Resolution != nil { + in, out := &in.Resolution, &out.Resolution + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CdiInputSpecificationInitParameters. +func (in *CdiInputSpecificationInitParameters) DeepCopy() *CdiInputSpecificationInitParameters { + if in == nil { + return nil + } + out := new(CdiInputSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CdiInputSpecificationObservation) DeepCopyInto(out *CdiInputSpecificationObservation) { + *out = *in + if in.Resolution != nil { + in, out := &in.Resolution, &out.Resolution + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CdiInputSpecificationObservation. +func (in *CdiInputSpecificationObservation) DeepCopy() *CdiInputSpecificationObservation { + if in == nil { + return nil + } + out := new(CdiInputSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CdiInputSpecificationParameters) DeepCopyInto(out *CdiInputSpecificationParameters) { + *out = *in + if in.Resolution != nil { + in, out := &in.Resolution, &out.Resolution + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CdiInputSpecificationParameters. +func (in *CdiInputSpecificationParameters) DeepCopy() *CdiInputSpecificationParameters { + if in == nil { + return nil + } + out := new(CdiInputSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Channel) DeepCopyInto(out *Channel) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Channel. +func (in *Channel) DeepCopy() *Channel { + if in == nil { + return nil + } + out := new(Channel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Channel) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChannelInitParameters) DeepCopyInto(out *ChannelInitParameters) { + *out = *in + if in.CdiInputSpecification != nil { + in, out := &in.CdiInputSpecification, &out.CdiInputSpecification + *out = new(CdiInputSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ChannelClass != nil { + in, out := &in.ChannelClass, &out.ChannelClass + *out = new(string) + **out = **in + } + if in.Destinations != nil { + in, out := &in.Destinations, &out.Destinations + *out = make([]DestinationsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncoderSettings != nil { + in, out := &in.EncoderSettings, &out.EncoderSettings + *out = new(EncoderSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InputAttachments != nil { + in, out := &in.InputAttachments, &out.InputAttachments + *out = make([]InputAttachmentsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputSpecification != nil { + in, out := &in.InputSpecification, &out.InputSpecification + *out = new(InputSpecificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } + if in.Maintenance != nil { + in, out := &in.Maintenance, &out.Maintenance + *out = new(MaintenanceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StartChannel != nil { + in, out := &in.StartChannel, &out.StartChannel + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPC != nil { + in, out := &in.VPC, &out.VPC + *out = new(VPCInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelInitParameters. +func (in *ChannelInitParameters) DeepCopy() *ChannelInitParameters { + if in == nil { + return nil + } + out := new(ChannelInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChannelList) DeepCopyInto(out *ChannelList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Channel, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelList. +func (in *ChannelList) DeepCopy() *ChannelList { + if in == nil { + return nil + } + out := new(ChannelList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ChannelList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChannelMappingsInitParameters) DeepCopyInto(out *ChannelMappingsInitParameters) { + *out = *in + if in.InputChannelLevels != nil { + in, out := &in.InputChannelLevels, &out.InputChannelLevels + *out = make([]InputChannelLevelsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OutputChannel != nil { + in, out := &in.OutputChannel, &out.OutputChannel + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelMappingsInitParameters. +func (in *ChannelMappingsInitParameters) DeepCopy() *ChannelMappingsInitParameters { + if in == nil { + return nil + } + out := new(ChannelMappingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChannelMappingsObservation) DeepCopyInto(out *ChannelMappingsObservation) { + *out = *in + if in.InputChannelLevels != nil { + in, out := &in.InputChannelLevels, &out.InputChannelLevels + *out = make([]InputChannelLevelsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OutputChannel != nil { + in, out := &in.OutputChannel, &out.OutputChannel + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelMappingsObservation. +func (in *ChannelMappingsObservation) DeepCopy() *ChannelMappingsObservation { + if in == nil { + return nil + } + out := new(ChannelMappingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChannelMappingsParameters) DeepCopyInto(out *ChannelMappingsParameters) { + *out = *in + if in.InputChannelLevels != nil { + in, out := &in.InputChannelLevels, &out.InputChannelLevels + *out = make([]InputChannelLevelsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OutputChannel != nil { + in, out := &in.OutputChannel, &out.OutputChannel + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelMappingsParameters. +func (in *ChannelMappingsParameters) DeepCopy() *ChannelMappingsParameters { + if in == nil { + return nil + } + out := new(ChannelMappingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChannelObservation) DeepCopyInto(out *ChannelObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CdiInputSpecification != nil { + in, out := &in.CdiInputSpecification, &out.CdiInputSpecification + *out = new(CdiInputSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.ChannelClass != nil { + in, out := &in.ChannelClass, &out.ChannelClass + *out = new(string) + **out = **in + } + if in.ChannelID != nil { + in, out := &in.ChannelID, &out.ChannelID + *out = new(string) + **out = **in + } + if in.Destinations != nil { + in, out := &in.Destinations, &out.Destinations + *out = make([]DestinationsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncoderSettings != nil { + in, out := &in.EncoderSettings, &out.EncoderSettings + *out = new(EncoderSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InputAttachments != nil { + in, out := &in.InputAttachments, &out.InputAttachments + *out = make([]InputAttachmentsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputSpecification != nil { + in, out := &in.InputSpecification, &out.InputSpecification + *out = new(InputSpecificationObservation) + (*in).DeepCopyInto(*out) + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } + if in.Maintenance != nil { + in, out := &in.Maintenance, &out.Maintenance + *out = new(MaintenanceObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StartChannel != nil { + in, out := &in.StartChannel, &out.StartChannel + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPC != nil { + in, out := &in.VPC, &out.VPC + *out = new(VPCObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelObservation. +func (in *ChannelObservation) DeepCopy() *ChannelObservation { + if in == nil { + return nil + } + out := new(ChannelObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChannelParameters) DeepCopyInto(out *ChannelParameters) { + *out = *in + if in.CdiInputSpecification != nil { + in, out := &in.CdiInputSpecification, &out.CdiInputSpecification + *out = new(CdiInputSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.ChannelClass != nil { + in, out := &in.ChannelClass, &out.ChannelClass + *out = new(string) + **out = **in + } + if in.Destinations != nil { + in, out := &in.Destinations, &out.Destinations + *out = make([]DestinationsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncoderSettings != nil { + in, out := &in.EncoderSettings, &out.EncoderSettings + *out = new(EncoderSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.InputAttachments != nil { + in, out := &in.InputAttachments, &out.InputAttachments + *out = make([]InputAttachmentsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputSpecification != nil { + in, out := &in.InputSpecification, &out.InputSpecification + *out = new(InputSpecificationParameters) + (*in).DeepCopyInto(*out) + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } + if in.Maintenance != nil { + in, out := &in.Maintenance, &out.Maintenance + *out = new(MaintenanceParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StartChannel != nil { + in, out := &in.StartChannel, &out.StartChannel + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPC != nil { + in, out := &in.VPC, &out.VPC + *out = new(VPCParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelParameters. +func (in *ChannelParameters) DeepCopy() *ChannelParameters { + if in == nil { + return nil + } + out := new(ChannelParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChannelSpec) DeepCopyInto(out *ChannelSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelSpec. +func (in *ChannelSpec) DeepCopy() *ChannelSpec { + if in == nil { + return nil + } + out := new(ChannelSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChannelStatus) DeepCopyInto(out *ChannelStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelStatus. +func (in *ChannelStatus) DeepCopy() *ChannelStatus { + if in == nil { + return nil + } + out := new(ChannelStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodecSettingsInitParameters) DeepCopyInto(out *CodecSettingsInitParameters) { + *out = *in + if in.AacSettings != nil { + in, out := &in.AacSettings, &out.AacSettings + *out = new(AacSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Ac3Settings != nil { + in, out := &in.Ac3Settings, &out.Ac3Settings + *out = new(Ac3SettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Eac3AtmosSettings != nil { + in, out := &in.Eac3AtmosSettings, &out.Eac3AtmosSettings + *out = new(Eac3AtmosSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Eac3Settings != nil { + in, out := &in.Eac3Settings, &out.Eac3Settings + *out = new(Eac3SettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Mp2Settings != nil { + in, out := &in.Mp2Settings, &out.Mp2Settings + *out = new(Mp2SettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PassThroughSettings != nil { + in, out := &in.PassThroughSettings, &out.PassThroughSettings + *out = new(PassThroughSettingsInitParameters) + **out = **in + } + if in.WavSettings != nil { + in, out := &in.WavSettings, &out.WavSettings + *out = new(WavSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodecSettingsInitParameters. +func (in *CodecSettingsInitParameters) DeepCopy() *CodecSettingsInitParameters { + if in == nil { + return nil + } + out := new(CodecSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodecSettingsObservation) DeepCopyInto(out *CodecSettingsObservation) { + *out = *in + if in.AacSettings != nil { + in, out := &in.AacSettings, &out.AacSettings + *out = new(AacSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Ac3Settings != nil { + in, out := &in.Ac3Settings, &out.Ac3Settings + *out = new(Ac3SettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Eac3AtmosSettings != nil { + in, out := &in.Eac3AtmosSettings, &out.Eac3AtmosSettings + *out = new(Eac3AtmosSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Eac3Settings != nil { + in, out := &in.Eac3Settings, &out.Eac3Settings + *out = new(Eac3SettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Mp2Settings != nil { + in, out := &in.Mp2Settings, &out.Mp2Settings + *out = new(Mp2SettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.PassThroughSettings != nil { + in, out := &in.PassThroughSettings, &out.PassThroughSettings + *out = new(PassThroughSettingsParameters) + **out = **in + } + if in.WavSettings != nil { + in, out := &in.WavSettings, &out.WavSettings + *out = new(WavSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodecSettingsObservation. +func (in *CodecSettingsObservation) DeepCopy() *CodecSettingsObservation { + if in == nil { + return nil + } + out := new(CodecSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodecSettingsParameters) DeepCopyInto(out *CodecSettingsParameters) { + *out = *in + if in.AacSettings != nil { + in, out := &in.AacSettings, &out.AacSettings + *out = new(AacSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Ac3Settings != nil { + in, out := &in.Ac3Settings, &out.Ac3Settings + *out = new(Ac3SettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Eac3AtmosSettings != nil { + in, out := &in.Eac3AtmosSettings, &out.Eac3AtmosSettings + *out = new(Eac3AtmosSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Eac3Settings != nil { + in, out := &in.Eac3Settings, &out.Eac3Settings + *out = new(Eac3SettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Mp2Settings != nil { + in, out := &in.Mp2Settings, &out.Mp2Settings + *out = new(Mp2SettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.PassThroughSettings != nil { + in, out := &in.PassThroughSettings, &out.PassThroughSettings + *out = new(PassThroughSettingsParameters) + **out = **in + } + if in.WavSettings != nil { + in, out := &in.WavSettings, &out.WavSettings + *out = new(WavSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodecSettingsParameters. +func (in *CodecSettingsParameters) DeepCopy() *CodecSettingsParameters { + if in == nil { + return nil + } + out := new(CodecSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColorSpacePassthroughSettingsInitParameters) DeepCopyInto(out *ColorSpacePassthroughSettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColorSpacePassthroughSettingsInitParameters. +func (in *ColorSpacePassthroughSettingsInitParameters) DeepCopy() *ColorSpacePassthroughSettingsInitParameters { + if in == nil { + return nil + } + out := new(ColorSpacePassthroughSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColorSpacePassthroughSettingsObservation) DeepCopyInto(out *ColorSpacePassthroughSettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColorSpacePassthroughSettingsObservation. +func (in *ColorSpacePassthroughSettingsObservation) DeepCopy() *ColorSpacePassthroughSettingsObservation { + if in == nil { + return nil + } + out := new(ColorSpacePassthroughSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColorSpacePassthroughSettingsParameters) DeepCopyInto(out *ColorSpacePassthroughSettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColorSpacePassthroughSettingsParameters. +func (in *ColorSpacePassthroughSettingsParameters) DeepCopy() *ColorSpacePassthroughSettingsParameters { + if in == nil { + return nil + } + out := new(ColorSpacePassthroughSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColorSpaceSettingsInitParameters) DeepCopyInto(out *ColorSpaceSettingsInitParameters) { + *out = *in + if in.ColorSpacePassthroughSettings != nil { + in, out := &in.ColorSpacePassthroughSettings, &out.ColorSpacePassthroughSettings + *out = new(ColorSpacePassthroughSettingsInitParameters) + **out = **in + } + if in.DolbyVision81Settings != nil { + in, out := &in.DolbyVision81Settings, &out.DolbyVision81Settings + *out = new(DolbyVision81SettingsInitParameters) + **out = **in + } + if in.Hdr10Settings != nil { + in, out := &in.Hdr10Settings, &out.Hdr10Settings + *out = new(Hdr10SettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Rec601Settings != nil { + in, out := &in.Rec601Settings, &out.Rec601Settings + *out = new(Rec601SettingsInitParameters) + **out = **in + } + if in.Rec709Settings != nil { + in, out := &in.Rec709Settings, &out.Rec709Settings + *out = new(Rec709SettingsInitParameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColorSpaceSettingsInitParameters. +func (in *ColorSpaceSettingsInitParameters) DeepCopy() *ColorSpaceSettingsInitParameters { + if in == nil { + return nil + } + out := new(ColorSpaceSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColorSpaceSettingsObservation) DeepCopyInto(out *ColorSpaceSettingsObservation) { + *out = *in + if in.ColorSpacePassthroughSettings != nil { + in, out := &in.ColorSpacePassthroughSettings, &out.ColorSpacePassthroughSettings + *out = new(ColorSpacePassthroughSettingsParameters) + **out = **in + } + if in.DolbyVision81Settings != nil { + in, out := &in.DolbyVision81Settings, &out.DolbyVision81Settings + *out = new(DolbyVision81SettingsParameters) + **out = **in + } + if in.Hdr10Settings != nil { + in, out := &in.Hdr10Settings, &out.Hdr10Settings + *out = new(Hdr10SettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Rec601Settings != nil { + in, out := &in.Rec601Settings, &out.Rec601Settings + *out = new(Rec601SettingsParameters) + **out = **in + } + if in.Rec709Settings != nil { + in, out := &in.Rec709Settings, &out.Rec709Settings + *out = new(Rec709SettingsParameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColorSpaceSettingsObservation. +func (in *ColorSpaceSettingsObservation) DeepCopy() *ColorSpaceSettingsObservation { + if in == nil { + return nil + } + out := new(ColorSpaceSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColorSpaceSettingsParameters) DeepCopyInto(out *ColorSpaceSettingsParameters) { + *out = *in + if in.ColorSpacePassthroughSettings != nil { + in, out := &in.ColorSpacePassthroughSettings, &out.ColorSpacePassthroughSettings + *out = new(ColorSpacePassthroughSettingsParameters) + **out = **in + } + if in.DolbyVision81Settings != nil { + in, out := &in.DolbyVision81Settings, &out.DolbyVision81Settings + *out = new(DolbyVision81SettingsParameters) + **out = **in + } + if in.Hdr10Settings != nil { + in, out := &in.Hdr10Settings, &out.Hdr10Settings + *out = new(Hdr10SettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Rec601Settings != nil { + in, out := &in.Rec601Settings, &out.Rec601Settings + *out = new(Rec601SettingsParameters) + **out = **in + } + if in.Rec709Settings != nil { + in, out := &in.Rec709Settings, &out.Rec709Settings + *out = new(Rec709SettingsParameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColorSpaceSettingsParameters. +func (in *ColorSpaceSettingsParameters) DeepCopy() *ColorSpaceSettingsParameters { + if in == nil { + return nil + } + out := new(ColorSpaceSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerSettingsInitParameters) DeepCopyInto(out *ContainerSettingsInitParameters) { + *out = *in + if in.M2TsSettings != nil { + in, out := &in.M2TsSettings, &out.M2TsSettings + *out = new(M2TsSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RawSettings != nil { + in, out := &in.RawSettings, &out.RawSettings + *out = new(RawSettingsInitParameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSettingsInitParameters. +func (in *ContainerSettingsInitParameters) DeepCopy() *ContainerSettingsInitParameters { + if in == nil { + return nil + } + out := new(ContainerSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerSettingsM2TsSettingsInitParameters) DeepCopyInto(out *ContainerSettingsM2TsSettingsInitParameters) { + *out = *in + if in.AbsentInputAudioBehavior != nil { + in, out := &in.AbsentInputAudioBehavior, &out.AbsentInputAudioBehavior + *out = new(string) + **out = **in + } + if in.Arib != nil { + in, out := &in.Arib, &out.Arib + *out = new(string) + **out = **in + } + if in.AribCaptionsPid != nil { + in, out := &in.AribCaptionsPid, &out.AribCaptionsPid + *out = new(string) + **out = **in + } + if in.AribCaptionsPidControl != nil { + in, out := &in.AribCaptionsPidControl, &out.AribCaptionsPidControl + *out = new(string) + **out = **in + } + if in.AudioBufferModel != nil { + in, out := &in.AudioBufferModel, &out.AudioBufferModel + *out = new(string) + **out = **in + } + if in.AudioFramesPerPes != nil { + in, out := &in.AudioFramesPerPes, &out.AudioFramesPerPes + *out = new(float64) + **out = **in + } + if in.AudioPids != nil { + in, out := &in.AudioPids, &out.AudioPids + *out = new(string) + **out = **in + } + if in.AudioStreamType != nil { + in, out := &in.AudioStreamType, &out.AudioStreamType + *out = new(string) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufferModel != nil { + in, out := &in.BufferModel, &out.BufferModel + *out = new(string) + **out = **in + } + if in.CcDescriptor != nil { + in, out := &in.CcDescriptor, &out.CcDescriptor + *out = new(string) + **out = **in + } + if in.DvbNitSettings != nil { + in, out := &in.DvbNitSettings, &out.DvbNitSettings + *out = new(M2TsSettingsDvbNitSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DvbSdtSettings != nil { + in, out := &in.DvbSdtSettings, &out.DvbSdtSettings + *out = new(M2TsSettingsDvbSdtSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DvbSubPids != nil { + in, out := &in.DvbSubPids, &out.DvbSubPids + *out = new(string) + **out = **in + } + if in.DvbTdtSettings != nil { + in, out := &in.DvbTdtSettings, &out.DvbTdtSettings + *out = new(M2TsSettingsDvbTdtSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DvbTeletextPid != nil { + in, out := &in.DvbTeletextPid, &out.DvbTeletextPid + *out = new(string) + **out = **in + } + if in.Ebif != nil { + in, out := &in.Ebif, &out.Ebif + *out = new(string) + **out = **in + } + if in.EbpAudioInterval != nil { + in, out := &in.EbpAudioInterval, &out.EbpAudioInterval + *out = new(string) + **out = **in + } + if in.EbpLookaheadMs != nil { + in, out := &in.EbpLookaheadMs, &out.EbpLookaheadMs + *out = new(float64) + **out = **in + } + if in.EbpPlacement != nil { + in, out := &in.EbpPlacement, &out.EbpPlacement + *out = new(string) + **out = **in + } + if in.EcmPid != nil { + in, out := &in.EcmPid, &out.EcmPid + *out = new(string) + **out = **in + } + if in.EsRateInPes != nil { + in, out := &in.EsRateInPes, &out.EsRateInPes + *out = new(string) + **out = **in + } + if in.EtvPlatformPid != nil { + in, out := &in.EtvPlatformPid, &out.EtvPlatformPid + *out = new(string) + **out = **in + } + if in.EtvSignalPid != nil { + in, out := &in.EtvSignalPid, &out.EtvSignalPid + *out = new(string) + **out = **in + } + if in.FragmentTime != nil { + in, out := &in.FragmentTime, &out.FragmentTime + *out = new(float64) + **out = **in + } + if in.Klv != nil { + in, out := &in.Klv, &out.Klv + *out = new(string) + **out = **in + } + if in.KlvDataPids != nil { + in, out := &in.KlvDataPids, &out.KlvDataPids + *out = new(string) + **out = **in + } + if in.NielsenId3Behavior != nil { + in, out := &in.NielsenId3Behavior, &out.NielsenId3Behavior + *out = new(string) + **out = **in + } + if in.NullPacketBitrate != nil { + in, out := &in.NullPacketBitrate, &out.NullPacketBitrate + *out = new(float64) + **out = **in + } + if in.PatInterval != nil { + in, out := &in.PatInterval, &out.PatInterval + *out = new(float64) + **out = **in + } + if in.PcrControl != nil { + in, out := &in.PcrControl, &out.PcrControl + *out = new(string) + **out = **in + } + if in.PcrPeriod != nil { + in, out := &in.PcrPeriod, &out.PcrPeriod + *out = new(float64) + **out = **in + } + if in.PcrPid != nil { + in, out := &in.PcrPid, &out.PcrPid + *out = new(string) + **out = **in + } + if in.PmtInterval != nil { + in, out := &in.PmtInterval, &out.PmtInterval + *out = new(float64) + **out = **in + } + if in.PmtPid != nil { + in, out := &in.PmtPid, &out.PmtPid + *out = new(string) + **out = **in + } + if in.ProgramNum != nil { + in, out := &in.ProgramNum, &out.ProgramNum + *out = new(float64) + **out = **in + } + if in.RateMode != nil { + in, out := &in.RateMode, &out.RateMode + *out = new(string) + **out = **in + } + if in.Scte27Pids != nil { + in, out := &in.Scte27Pids, &out.Scte27Pids + *out = new(string) + **out = **in + } + if in.Scte35Control != nil { + in, out := &in.Scte35Control, &out.Scte35Control + *out = new(string) + **out = **in + } + if in.Scte35Pid != nil { + in, out := &in.Scte35Pid, &out.Scte35Pid + *out = new(string) + **out = **in + } + if in.SegmentationMarkers != nil { + in, out := &in.SegmentationMarkers, &out.SegmentationMarkers + *out = new(string) + **out = **in + } + if in.SegmentationStyle != nil { + in, out := &in.SegmentationStyle, &out.SegmentationStyle + *out = new(string) + **out = **in + } + if in.SegmentationTime != nil { + in, out := &in.SegmentationTime, &out.SegmentationTime + *out = new(float64) + **out = **in + } + if in.TimedMetadataBehavior != nil { + in, out := &in.TimedMetadataBehavior, &out.TimedMetadataBehavior + *out = new(string) + **out = **in + } + if in.TimedMetadataPid != nil { + in, out := &in.TimedMetadataPid, &out.TimedMetadataPid + *out = new(string) + **out = **in + } + if in.TransportStreamID != nil { + in, out := &in.TransportStreamID, &out.TransportStreamID + *out = new(float64) + **out = **in + } + if in.VideoPid != nil { + in, out := &in.VideoPid, &out.VideoPid + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSettingsM2TsSettingsInitParameters. +func (in *ContainerSettingsM2TsSettingsInitParameters) DeepCopy() *ContainerSettingsM2TsSettingsInitParameters { + if in == nil { + return nil + } + out := new(ContainerSettingsM2TsSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerSettingsM2TsSettingsObservation) DeepCopyInto(out *ContainerSettingsM2TsSettingsObservation) { + *out = *in + if in.AbsentInputAudioBehavior != nil { + in, out := &in.AbsentInputAudioBehavior, &out.AbsentInputAudioBehavior + *out = new(string) + **out = **in + } + if in.Arib != nil { + in, out := &in.Arib, &out.Arib + *out = new(string) + **out = **in + } + if in.AribCaptionsPid != nil { + in, out := &in.AribCaptionsPid, &out.AribCaptionsPid + *out = new(string) + **out = **in + } + if in.AribCaptionsPidControl != nil { + in, out := &in.AribCaptionsPidControl, &out.AribCaptionsPidControl + *out = new(string) + **out = **in + } + if in.AudioBufferModel != nil { + in, out := &in.AudioBufferModel, &out.AudioBufferModel + *out = new(string) + **out = **in + } + if in.AudioFramesPerPes != nil { + in, out := &in.AudioFramesPerPes, &out.AudioFramesPerPes + *out = new(float64) + **out = **in + } + if in.AudioPids != nil { + in, out := &in.AudioPids, &out.AudioPids + *out = new(string) + **out = **in + } + if in.AudioStreamType != nil { + in, out := &in.AudioStreamType, &out.AudioStreamType + *out = new(string) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufferModel != nil { + in, out := &in.BufferModel, &out.BufferModel + *out = new(string) + **out = **in + } + if in.CcDescriptor != nil { + in, out := &in.CcDescriptor, &out.CcDescriptor + *out = new(string) + **out = **in + } + if in.DvbNitSettings != nil { + in, out := &in.DvbNitSettings, &out.DvbNitSettings + *out = new(M2TsSettingsDvbNitSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.DvbSdtSettings != nil { + in, out := &in.DvbSdtSettings, &out.DvbSdtSettings + *out = new(M2TsSettingsDvbSdtSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.DvbSubPids != nil { + in, out := &in.DvbSubPids, &out.DvbSubPids + *out = new(string) + **out = **in + } + if in.DvbTdtSettings != nil { + in, out := &in.DvbTdtSettings, &out.DvbTdtSettings + *out = new(M2TsSettingsDvbTdtSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.DvbTeletextPid != nil { + in, out := &in.DvbTeletextPid, &out.DvbTeletextPid + *out = new(string) + **out = **in + } + if in.Ebif != nil { + in, out := &in.Ebif, &out.Ebif + *out = new(string) + **out = **in + } + if in.EbpAudioInterval != nil { + in, out := &in.EbpAudioInterval, &out.EbpAudioInterval + *out = new(string) + **out = **in + } + if in.EbpLookaheadMs != nil { + in, out := &in.EbpLookaheadMs, &out.EbpLookaheadMs + *out = new(float64) + **out = **in + } + if in.EbpPlacement != nil { + in, out := &in.EbpPlacement, &out.EbpPlacement + *out = new(string) + **out = **in + } + if in.EcmPid != nil { + in, out := &in.EcmPid, &out.EcmPid + *out = new(string) + **out = **in + } + if in.EsRateInPes != nil { + in, out := &in.EsRateInPes, &out.EsRateInPes + *out = new(string) + **out = **in + } + if in.EtvPlatformPid != nil { + in, out := &in.EtvPlatformPid, &out.EtvPlatformPid + *out = new(string) + **out = **in + } + if in.EtvSignalPid != nil { + in, out := &in.EtvSignalPid, &out.EtvSignalPid + *out = new(string) + **out = **in + } + if in.FragmentTime != nil { + in, out := &in.FragmentTime, &out.FragmentTime + *out = new(float64) + **out = **in + } + if in.Klv != nil { + in, out := &in.Klv, &out.Klv + *out = new(string) + **out = **in + } + if in.KlvDataPids != nil { + in, out := &in.KlvDataPids, &out.KlvDataPids + *out = new(string) + **out = **in + } + if in.NielsenId3Behavior != nil { + in, out := &in.NielsenId3Behavior, &out.NielsenId3Behavior + *out = new(string) + **out = **in + } + if in.NullPacketBitrate != nil { + in, out := &in.NullPacketBitrate, &out.NullPacketBitrate + *out = new(float64) + **out = **in + } + if in.PatInterval != nil { + in, out := &in.PatInterval, &out.PatInterval + *out = new(float64) + **out = **in + } + if in.PcrControl != nil { + in, out := &in.PcrControl, &out.PcrControl + *out = new(string) + **out = **in + } + if in.PcrPeriod != nil { + in, out := &in.PcrPeriod, &out.PcrPeriod + *out = new(float64) + **out = **in + } + if in.PcrPid != nil { + in, out := &in.PcrPid, &out.PcrPid + *out = new(string) + **out = **in + } + if in.PmtInterval != nil { + in, out := &in.PmtInterval, &out.PmtInterval + *out = new(float64) + **out = **in + } + if in.PmtPid != nil { + in, out := &in.PmtPid, &out.PmtPid + *out = new(string) + **out = **in + } + if in.ProgramNum != nil { + in, out := &in.ProgramNum, &out.ProgramNum + *out = new(float64) + **out = **in + } + if in.RateMode != nil { + in, out := &in.RateMode, &out.RateMode + *out = new(string) + **out = **in + } + if in.Scte27Pids != nil { + in, out := &in.Scte27Pids, &out.Scte27Pids + *out = new(string) + **out = **in + } + if in.Scte35Control != nil { + in, out := &in.Scte35Control, &out.Scte35Control + *out = new(string) + **out = **in + } + if in.Scte35Pid != nil { + in, out := &in.Scte35Pid, &out.Scte35Pid + *out = new(string) + **out = **in + } + if in.SegmentationMarkers != nil { + in, out := &in.SegmentationMarkers, &out.SegmentationMarkers + *out = new(string) + **out = **in + } + if in.SegmentationStyle != nil { + in, out := &in.SegmentationStyle, &out.SegmentationStyle + *out = new(string) + **out = **in + } + if in.SegmentationTime != nil { + in, out := &in.SegmentationTime, &out.SegmentationTime + *out = new(float64) + **out = **in + } + if in.TimedMetadataBehavior != nil { + in, out := &in.TimedMetadataBehavior, &out.TimedMetadataBehavior + *out = new(string) + **out = **in + } + if in.TimedMetadataPid != nil { + in, out := &in.TimedMetadataPid, &out.TimedMetadataPid + *out = new(string) + **out = **in + } + if in.TransportStreamID != nil { + in, out := &in.TransportStreamID, &out.TransportStreamID + *out = new(float64) + **out = **in + } + if in.VideoPid != nil { + in, out := &in.VideoPid, &out.VideoPid + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSettingsM2TsSettingsObservation. +func (in *ContainerSettingsM2TsSettingsObservation) DeepCopy() *ContainerSettingsM2TsSettingsObservation { + if in == nil { + return nil + } + out := new(ContainerSettingsM2TsSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerSettingsM2TsSettingsParameters) DeepCopyInto(out *ContainerSettingsM2TsSettingsParameters) { + *out = *in + if in.AbsentInputAudioBehavior != nil { + in, out := &in.AbsentInputAudioBehavior, &out.AbsentInputAudioBehavior + *out = new(string) + **out = **in + } + if in.Arib != nil { + in, out := &in.Arib, &out.Arib + *out = new(string) + **out = **in + } + if in.AribCaptionsPid != nil { + in, out := &in.AribCaptionsPid, &out.AribCaptionsPid + *out = new(string) + **out = **in + } + if in.AribCaptionsPidControl != nil { + in, out := &in.AribCaptionsPidControl, &out.AribCaptionsPidControl + *out = new(string) + **out = **in + } + if in.AudioBufferModel != nil { + in, out := &in.AudioBufferModel, &out.AudioBufferModel + *out = new(string) + **out = **in + } + if in.AudioFramesPerPes != nil { + in, out := &in.AudioFramesPerPes, &out.AudioFramesPerPes + *out = new(float64) + **out = **in + } + if in.AudioPids != nil { + in, out := &in.AudioPids, &out.AudioPids + *out = new(string) + **out = **in + } + if in.AudioStreamType != nil { + in, out := &in.AudioStreamType, &out.AudioStreamType + *out = new(string) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufferModel != nil { + in, out := &in.BufferModel, &out.BufferModel + *out = new(string) + **out = **in + } + if in.CcDescriptor != nil { + in, out := &in.CcDescriptor, &out.CcDescriptor + *out = new(string) + **out = **in + } + if in.DvbNitSettings != nil { + in, out := &in.DvbNitSettings, &out.DvbNitSettings + *out = new(M2TsSettingsDvbNitSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.DvbSdtSettings != nil { + in, out := &in.DvbSdtSettings, &out.DvbSdtSettings + *out = new(M2TsSettingsDvbSdtSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.DvbSubPids != nil { + in, out := &in.DvbSubPids, &out.DvbSubPids + *out = new(string) + **out = **in + } + if in.DvbTdtSettings != nil { + in, out := &in.DvbTdtSettings, &out.DvbTdtSettings + *out = new(M2TsSettingsDvbTdtSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.DvbTeletextPid != nil { + in, out := &in.DvbTeletextPid, &out.DvbTeletextPid + *out = new(string) + **out = **in + } + if in.Ebif != nil { + in, out := &in.Ebif, &out.Ebif + *out = new(string) + **out = **in + } + if in.EbpAudioInterval != nil { + in, out := &in.EbpAudioInterval, &out.EbpAudioInterval + *out = new(string) + **out = **in + } + if in.EbpLookaheadMs != nil { + in, out := &in.EbpLookaheadMs, &out.EbpLookaheadMs + *out = new(float64) + **out = **in + } + if in.EbpPlacement != nil { + in, out := &in.EbpPlacement, &out.EbpPlacement + *out = new(string) + **out = **in + } + if in.EcmPid != nil { + in, out := &in.EcmPid, &out.EcmPid + *out = new(string) + **out = **in + } + if in.EsRateInPes != nil { + in, out := &in.EsRateInPes, &out.EsRateInPes + *out = new(string) + **out = **in + } + if in.EtvPlatformPid != nil { + in, out := &in.EtvPlatformPid, &out.EtvPlatformPid + *out = new(string) + **out = **in + } + if in.EtvSignalPid != nil { + in, out := &in.EtvSignalPid, &out.EtvSignalPid + *out = new(string) + **out = **in + } + if in.FragmentTime != nil { + in, out := &in.FragmentTime, &out.FragmentTime + *out = new(float64) + **out = **in + } + if in.Klv != nil { + in, out := &in.Klv, &out.Klv + *out = new(string) + **out = **in + } + if in.KlvDataPids != nil { + in, out := &in.KlvDataPids, &out.KlvDataPids + *out = new(string) + **out = **in + } + if in.NielsenId3Behavior != nil { + in, out := &in.NielsenId3Behavior, &out.NielsenId3Behavior + *out = new(string) + **out = **in + } + if in.NullPacketBitrate != nil { + in, out := &in.NullPacketBitrate, &out.NullPacketBitrate + *out = new(float64) + **out = **in + } + if in.PatInterval != nil { + in, out := &in.PatInterval, &out.PatInterval + *out = new(float64) + **out = **in + } + if in.PcrControl != nil { + in, out := &in.PcrControl, &out.PcrControl + *out = new(string) + **out = **in + } + if in.PcrPeriod != nil { + in, out := &in.PcrPeriod, &out.PcrPeriod + *out = new(float64) + **out = **in + } + if in.PcrPid != nil { + in, out := &in.PcrPid, &out.PcrPid + *out = new(string) + **out = **in + } + if in.PmtInterval != nil { + in, out := &in.PmtInterval, &out.PmtInterval + *out = new(float64) + **out = **in + } + if in.PmtPid != nil { + in, out := &in.PmtPid, &out.PmtPid + *out = new(string) + **out = **in + } + if in.ProgramNum != nil { + in, out := &in.ProgramNum, &out.ProgramNum + *out = new(float64) + **out = **in + } + if in.RateMode != nil { + in, out := &in.RateMode, &out.RateMode + *out = new(string) + **out = **in + } + if in.Scte27Pids != nil { + in, out := &in.Scte27Pids, &out.Scte27Pids + *out = new(string) + **out = **in + } + if in.Scte35Control != nil { + in, out := &in.Scte35Control, &out.Scte35Control + *out = new(string) + **out = **in + } + if in.Scte35Pid != nil { + in, out := &in.Scte35Pid, &out.Scte35Pid + *out = new(string) + **out = **in + } + if in.SegmentationMarkers != nil { + in, out := &in.SegmentationMarkers, &out.SegmentationMarkers + *out = new(string) + **out = **in + } + if in.SegmentationStyle != nil { + in, out := &in.SegmentationStyle, &out.SegmentationStyle + *out = new(string) + **out = **in + } + if in.SegmentationTime != nil { + in, out := &in.SegmentationTime, &out.SegmentationTime + *out = new(float64) + **out = **in + } + if in.TimedMetadataBehavior != nil { + in, out := &in.TimedMetadataBehavior, &out.TimedMetadataBehavior + *out = new(string) + **out = **in + } + if in.TimedMetadataPid != nil { + in, out := &in.TimedMetadataPid, &out.TimedMetadataPid + *out = new(string) + **out = **in + } + if in.TransportStreamID != nil { + in, out := &in.TransportStreamID, &out.TransportStreamID + *out = new(float64) + **out = **in + } + if in.VideoPid != nil { + in, out := &in.VideoPid, &out.VideoPid + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSettingsM2TsSettingsParameters. +func (in *ContainerSettingsM2TsSettingsParameters) DeepCopy() *ContainerSettingsM2TsSettingsParameters { + if in == nil { + return nil + } + out := new(ContainerSettingsM2TsSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerSettingsObservation) DeepCopyInto(out *ContainerSettingsObservation) { + *out = *in + if in.M2TsSettings != nil { + in, out := &in.M2TsSettings, &out.M2TsSettings + *out = new(M2TsSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.RawSettings != nil { + in, out := &in.RawSettings, &out.RawSettings + *out = new(RawSettingsParameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSettingsObservation. +func (in *ContainerSettingsObservation) DeepCopy() *ContainerSettingsObservation { + if in == nil { + return nil + } + out := new(ContainerSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerSettingsParameters) DeepCopyInto(out *ContainerSettingsParameters) { + *out = *in + if in.M2TsSettings != nil { + in, out := &in.M2TsSettings, &out.M2TsSettings + *out = new(M2TsSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.RawSettings != nil { + in, out := &in.RawSettings, &out.RawSettings + *out = new(RawSettingsParameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSettingsParameters. +func (in *ContainerSettingsParameters) DeepCopy() *ContainerSettingsParameters { + if in == nil { + return nil + } + out := new(ContainerSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationInitParameters) DeepCopyInto(out *DestinationInitParameters) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationInitParameters. +func (in *DestinationInitParameters) DeepCopy() *DestinationInitParameters { + if in == nil { + return nil + } + out := new(DestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationObservation) DeepCopyInto(out *DestinationObservation) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationObservation. +func (in *DestinationObservation) DeepCopy() *DestinationObservation { + if in == nil { + return nil + } + out := new(DestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationParameters) DeepCopyInto(out *DestinationParameters) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationParameters. +func (in *DestinationParameters) DeepCopy() *DestinationParameters { + if in == nil { + return nil + } + out := new(DestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationSettingsInitParameters) DeepCopyInto(out *DestinationSettingsInitParameters) { + *out = *in + if in.AribDestinationSettings != nil { + in, out := &in.AribDestinationSettings, &out.AribDestinationSettings + *out = new(AribDestinationSettingsInitParameters) + **out = **in + } + if in.BurnInDestinationSettings != nil { + in, out := &in.BurnInDestinationSettings, &out.BurnInDestinationSettings + *out = new(BurnInDestinationSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DvbSubDestinationSettings != nil { + in, out := &in.DvbSubDestinationSettings, &out.DvbSubDestinationSettings + *out = new(DvbSubDestinationSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EbuTtDDestinationSettings != nil { + in, out := &in.EbuTtDDestinationSettings, &out.EbuTtDDestinationSettings + *out = new(EbuTtDDestinationSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EmbeddedDestinationSettings != nil { + in, out := &in.EmbeddedDestinationSettings, &out.EmbeddedDestinationSettings + *out = new(EmbeddedDestinationSettingsInitParameters) + **out = **in + } + if in.EmbeddedPlusScte20DestinationSettings != nil { + in, out := &in.EmbeddedPlusScte20DestinationSettings, &out.EmbeddedPlusScte20DestinationSettings + *out = new(EmbeddedPlusScte20DestinationSettingsInitParameters) + **out = **in + } + if in.RtmpCaptionInfoDestinationSettings != nil { + in, out := &in.RtmpCaptionInfoDestinationSettings, &out.RtmpCaptionInfoDestinationSettings + *out = new(RtmpCaptionInfoDestinationSettingsInitParameters) + **out = **in + } + if in.Scte20PlusEmbeddedDestinationSettings != nil { + in, out := &in.Scte20PlusEmbeddedDestinationSettings, &out.Scte20PlusEmbeddedDestinationSettings + *out = new(Scte20PlusEmbeddedDestinationSettingsInitParameters) + **out = **in + } + if in.Scte27DestinationSettings != nil { + in, out := &in.Scte27DestinationSettings, &out.Scte27DestinationSettings + *out = new(Scte27DestinationSettingsInitParameters) + **out = **in + } + if in.SmpteTtDestinationSettings != nil { + in, out := &in.SmpteTtDestinationSettings, &out.SmpteTtDestinationSettings + *out = new(SmpteTtDestinationSettingsInitParameters) + **out = **in + } + if in.TeletextDestinationSettings != nil { + in, out := &in.TeletextDestinationSettings, &out.TeletextDestinationSettings + *out = new(TeletextDestinationSettingsInitParameters) + **out = **in + } + if in.TtmlDestinationSettings != nil { + in, out := &in.TtmlDestinationSettings, &out.TtmlDestinationSettings + *out = new(TtmlDestinationSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WebvttDestinationSettings != nil { + in, out := &in.WebvttDestinationSettings, &out.WebvttDestinationSettings + *out = new(WebvttDestinationSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationSettingsInitParameters. +func (in *DestinationSettingsInitParameters) DeepCopy() *DestinationSettingsInitParameters { + if in == nil { + return nil + } + out := new(DestinationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationSettingsObservation) DeepCopyInto(out *DestinationSettingsObservation) { + *out = *in + if in.AribDestinationSettings != nil { + in, out := &in.AribDestinationSettings, &out.AribDestinationSettings + *out = new(AribDestinationSettingsParameters) + **out = **in + } + if in.BurnInDestinationSettings != nil { + in, out := &in.BurnInDestinationSettings, &out.BurnInDestinationSettings + *out = new(BurnInDestinationSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.DvbSubDestinationSettings != nil { + in, out := &in.DvbSubDestinationSettings, &out.DvbSubDestinationSettings + *out = new(DvbSubDestinationSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.EbuTtDDestinationSettings != nil { + in, out := &in.EbuTtDDestinationSettings, &out.EbuTtDDestinationSettings + *out = new(EbuTtDDestinationSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.EmbeddedDestinationSettings != nil { + in, out := &in.EmbeddedDestinationSettings, &out.EmbeddedDestinationSettings + *out = new(EmbeddedDestinationSettingsParameters) + **out = **in + } + if in.EmbeddedPlusScte20DestinationSettings != nil { + in, out := &in.EmbeddedPlusScte20DestinationSettings, &out.EmbeddedPlusScte20DestinationSettings + *out = new(EmbeddedPlusScte20DestinationSettingsParameters) + **out = **in + } + if in.RtmpCaptionInfoDestinationSettings != nil { + in, out := &in.RtmpCaptionInfoDestinationSettings, &out.RtmpCaptionInfoDestinationSettings + *out = new(RtmpCaptionInfoDestinationSettingsParameters) + **out = **in + } + if in.Scte20PlusEmbeddedDestinationSettings != nil { + in, out := &in.Scte20PlusEmbeddedDestinationSettings, &out.Scte20PlusEmbeddedDestinationSettings + *out = new(Scte20PlusEmbeddedDestinationSettingsParameters) + **out = **in + } + if in.Scte27DestinationSettings != nil { + in, out := &in.Scte27DestinationSettings, &out.Scte27DestinationSettings + *out = new(Scte27DestinationSettingsParameters) + **out = **in + } + if in.SmpteTtDestinationSettings != nil { + in, out := &in.SmpteTtDestinationSettings, &out.SmpteTtDestinationSettings + *out = new(SmpteTtDestinationSettingsParameters) + **out = **in + } + if in.TeletextDestinationSettings != nil { + in, out := &in.TeletextDestinationSettings, &out.TeletextDestinationSettings + *out = new(TeletextDestinationSettingsParameters) + **out = **in + } + if in.TtmlDestinationSettings != nil { + in, out := &in.TtmlDestinationSettings, &out.TtmlDestinationSettings + *out = new(TtmlDestinationSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.WebvttDestinationSettings != nil { + in, out := &in.WebvttDestinationSettings, &out.WebvttDestinationSettings + *out = new(WebvttDestinationSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationSettingsObservation. +func (in *DestinationSettingsObservation) DeepCopy() *DestinationSettingsObservation { + if in == nil { + return nil + } + out := new(DestinationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationSettingsParameters) DeepCopyInto(out *DestinationSettingsParameters) { + *out = *in + if in.AribDestinationSettings != nil { + in, out := &in.AribDestinationSettings, &out.AribDestinationSettings + *out = new(AribDestinationSettingsParameters) + **out = **in + } + if in.BurnInDestinationSettings != nil { + in, out := &in.BurnInDestinationSettings, &out.BurnInDestinationSettings + *out = new(BurnInDestinationSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.DvbSubDestinationSettings != nil { + in, out := &in.DvbSubDestinationSettings, &out.DvbSubDestinationSettings + *out = new(DvbSubDestinationSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.EbuTtDDestinationSettings != nil { + in, out := &in.EbuTtDDestinationSettings, &out.EbuTtDDestinationSettings + *out = new(EbuTtDDestinationSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.EmbeddedDestinationSettings != nil { + in, out := &in.EmbeddedDestinationSettings, &out.EmbeddedDestinationSettings + *out = new(EmbeddedDestinationSettingsParameters) + **out = **in + } + if in.EmbeddedPlusScte20DestinationSettings != nil { + in, out := &in.EmbeddedPlusScte20DestinationSettings, &out.EmbeddedPlusScte20DestinationSettings + *out = new(EmbeddedPlusScte20DestinationSettingsParameters) + **out = **in + } + if in.RtmpCaptionInfoDestinationSettings != nil { + in, out := &in.RtmpCaptionInfoDestinationSettings, &out.RtmpCaptionInfoDestinationSettings + *out = new(RtmpCaptionInfoDestinationSettingsParameters) + **out = **in + } + if in.Scte20PlusEmbeddedDestinationSettings != nil { + in, out := &in.Scte20PlusEmbeddedDestinationSettings, &out.Scte20PlusEmbeddedDestinationSettings + *out = new(Scte20PlusEmbeddedDestinationSettingsParameters) + **out = **in + } + if in.Scte27DestinationSettings != nil { + in, out := &in.Scte27DestinationSettings, &out.Scte27DestinationSettings + *out = new(Scte27DestinationSettingsParameters) + **out = **in + } + if in.SmpteTtDestinationSettings != nil { + in, out := &in.SmpteTtDestinationSettings, &out.SmpteTtDestinationSettings + *out = new(SmpteTtDestinationSettingsParameters) + **out = **in + } + if in.TeletextDestinationSettings != nil { + in, out := &in.TeletextDestinationSettings, &out.TeletextDestinationSettings + *out = new(TeletextDestinationSettingsParameters) + **out = **in + } + if in.TtmlDestinationSettings != nil { + in, out := &in.TtmlDestinationSettings, &out.TtmlDestinationSettings + *out = new(TtmlDestinationSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.WebvttDestinationSettings != nil { + in, out := &in.WebvttDestinationSettings, &out.WebvttDestinationSettings + *out = new(WebvttDestinationSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationSettingsParameters. +func (in *DestinationSettingsParameters) DeepCopy() *DestinationSettingsParameters { + if in == nil { + return nil + } + out := new(DestinationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationsInitParameters) DeepCopyInto(out *DestinationsInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MediaPackageSettings != nil { + in, out := &in.MediaPackageSettings, &out.MediaPackageSettings + *out = make([]MediaPackageSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MultiplexSettings != nil { + in, out := &in.MultiplexSettings, &out.MultiplexSettings + *out = new(MultiplexSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]SettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationsInitParameters. +func (in *DestinationsInitParameters) DeepCopy() *DestinationsInitParameters { + if in == nil { + return nil + } + out := new(DestinationsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationsObservation) DeepCopyInto(out *DestinationsObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MediaPackageSettings != nil { + in, out := &in.MediaPackageSettings, &out.MediaPackageSettings + *out = make([]MediaPackageSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MultiplexSettings != nil { + in, out := &in.MultiplexSettings, &out.MultiplexSettings + *out = new(MultiplexSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]SettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationsObservation. +func (in *DestinationsObservation) DeepCopy() *DestinationsObservation { + if in == nil { + return nil + } + out := new(DestinationsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationsParameters) DeepCopyInto(out *DestinationsParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MediaPackageSettings != nil { + in, out := &in.MediaPackageSettings, &out.MediaPackageSettings + *out = make([]MediaPackageSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MultiplexSettings != nil { + in, out := &in.MultiplexSettings, &out.MultiplexSettings + *out = new(MultiplexSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]SettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationsParameters. +func (in *DestinationsParameters) DeepCopy() *DestinationsParameters { + if in == nil { + return nil + } + out := new(DestinationsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DolbyEDecodeInitParameters) DeepCopyInto(out *DolbyEDecodeInitParameters) { + *out = *in + if in.ProgramSelection != nil { + in, out := &in.ProgramSelection, &out.ProgramSelection + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DolbyEDecodeInitParameters. +func (in *DolbyEDecodeInitParameters) DeepCopy() *DolbyEDecodeInitParameters { + if in == nil { + return nil + } + out := new(DolbyEDecodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DolbyEDecodeObservation) DeepCopyInto(out *DolbyEDecodeObservation) { + *out = *in + if in.ProgramSelection != nil { + in, out := &in.ProgramSelection, &out.ProgramSelection + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DolbyEDecodeObservation. +func (in *DolbyEDecodeObservation) DeepCopy() *DolbyEDecodeObservation { + if in == nil { + return nil + } + out := new(DolbyEDecodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DolbyEDecodeParameters) DeepCopyInto(out *DolbyEDecodeParameters) { + *out = *in + if in.ProgramSelection != nil { + in, out := &in.ProgramSelection, &out.ProgramSelection + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DolbyEDecodeParameters. +func (in *DolbyEDecodeParameters) DeepCopy() *DolbyEDecodeParameters { + if in == nil { + return nil + } + out := new(DolbyEDecodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DolbyVision81SettingsInitParameters) DeepCopyInto(out *DolbyVision81SettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DolbyVision81SettingsInitParameters. +func (in *DolbyVision81SettingsInitParameters) DeepCopy() *DolbyVision81SettingsInitParameters { + if in == nil { + return nil + } + out := new(DolbyVision81SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DolbyVision81SettingsObservation) DeepCopyInto(out *DolbyVision81SettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DolbyVision81SettingsObservation. +func (in *DolbyVision81SettingsObservation) DeepCopy() *DolbyVision81SettingsObservation { + if in == nil { + return nil + } + out := new(DolbyVision81SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DolbyVision81SettingsParameters) DeepCopyInto(out *DolbyVision81SettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DolbyVision81SettingsParameters. +func (in *DolbyVision81SettingsParameters) DeepCopy() *DolbyVision81SettingsParameters { + if in == nil { + return nil + } + out := new(DolbyVision81SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbNitSettingsInitParameters) DeepCopyInto(out *DvbNitSettingsInitParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(float64) + **out = **in + } + if in.NetworkName != nil { + in, out := &in.NetworkName, &out.NetworkName + *out = new(string) + **out = **in + } + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbNitSettingsInitParameters. +func (in *DvbNitSettingsInitParameters) DeepCopy() *DvbNitSettingsInitParameters { + if in == nil { + return nil + } + out := new(DvbNitSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbNitSettingsObservation) DeepCopyInto(out *DvbNitSettingsObservation) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(float64) + **out = **in + } + if in.NetworkName != nil { + in, out := &in.NetworkName, &out.NetworkName + *out = new(string) + **out = **in + } + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbNitSettingsObservation. +func (in *DvbNitSettingsObservation) DeepCopy() *DvbNitSettingsObservation { + if in == nil { + return nil + } + out := new(DvbNitSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbNitSettingsParameters) DeepCopyInto(out *DvbNitSettingsParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(float64) + **out = **in + } + if in.NetworkName != nil { + in, out := &in.NetworkName, &out.NetworkName + *out = new(string) + **out = **in + } + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbNitSettingsParameters. +func (in *DvbNitSettingsParameters) DeepCopy() *DvbNitSettingsParameters { + if in == nil { + return nil + } + out := new(DvbNitSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbSdtSettingsInitParameters) DeepCopyInto(out *DvbSdtSettingsInitParameters) { + *out = *in + if in.OutputSdt != nil { + in, out := &in.OutputSdt, &out.OutputSdt + *out = new(string) + **out = **in + } + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.ServiceProviderName != nil { + in, out := &in.ServiceProviderName, &out.ServiceProviderName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbSdtSettingsInitParameters. +func (in *DvbSdtSettingsInitParameters) DeepCopy() *DvbSdtSettingsInitParameters { + if in == nil { + return nil + } + out := new(DvbSdtSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbSdtSettingsObservation) DeepCopyInto(out *DvbSdtSettingsObservation) { + *out = *in + if in.OutputSdt != nil { + in, out := &in.OutputSdt, &out.OutputSdt + *out = new(string) + **out = **in + } + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.ServiceProviderName != nil { + in, out := &in.ServiceProviderName, &out.ServiceProviderName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbSdtSettingsObservation. +func (in *DvbSdtSettingsObservation) DeepCopy() *DvbSdtSettingsObservation { + if in == nil { + return nil + } + out := new(DvbSdtSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbSdtSettingsParameters) DeepCopyInto(out *DvbSdtSettingsParameters) { + *out = *in + if in.OutputSdt != nil { + in, out := &in.OutputSdt, &out.OutputSdt + *out = new(string) + **out = **in + } + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.ServiceProviderName != nil { + in, out := &in.ServiceProviderName, &out.ServiceProviderName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbSdtSettingsParameters. +func (in *DvbSdtSettingsParameters) DeepCopy() *DvbSdtSettingsParameters { + if in == nil { + return nil + } + out := new(DvbSdtSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbSubDestinationSettingsFontInitParameters) DeepCopyInto(out *DvbSubDestinationSettingsFontInitParameters) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbSubDestinationSettingsFontInitParameters. +func (in *DvbSubDestinationSettingsFontInitParameters) DeepCopy() *DvbSubDestinationSettingsFontInitParameters { + if in == nil { + return nil + } + out := new(DvbSubDestinationSettingsFontInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbSubDestinationSettingsFontObservation) DeepCopyInto(out *DvbSubDestinationSettingsFontObservation) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbSubDestinationSettingsFontObservation. +func (in *DvbSubDestinationSettingsFontObservation) DeepCopy() *DvbSubDestinationSettingsFontObservation { + if in == nil { + return nil + } + out := new(DvbSubDestinationSettingsFontObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbSubDestinationSettingsFontParameters) DeepCopyInto(out *DvbSubDestinationSettingsFontParameters) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbSubDestinationSettingsFontParameters. +func (in *DvbSubDestinationSettingsFontParameters) DeepCopy() *DvbSubDestinationSettingsFontParameters { + if in == nil { + return nil + } + out := new(DvbSubDestinationSettingsFontParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbSubDestinationSettingsInitParameters) DeepCopyInto(out *DvbSubDestinationSettingsInitParameters) { + *out = *in + if in.Alignment != nil { + in, out := &in.Alignment, &out.Alignment + *out = new(string) + **out = **in + } + if in.BackgroundColor != nil { + in, out := &in.BackgroundColor, &out.BackgroundColor + *out = new(string) + **out = **in + } + if in.BackgroundOpacity != nil { + in, out := &in.BackgroundOpacity, &out.BackgroundOpacity + *out = new(float64) + **out = **in + } + if in.Font != nil { + in, out := &in.Font, &out.Font + *out = new(DvbSubDestinationSettingsFontInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FontColor != nil { + in, out := &in.FontColor, &out.FontColor + *out = new(string) + **out = **in + } + if in.FontOpacity != nil { + in, out := &in.FontOpacity, &out.FontOpacity + *out = new(float64) + **out = **in + } + if in.FontResolution != nil { + in, out := &in.FontResolution, &out.FontResolution + *out = new(float64) + **out = **in + } + if in.FontSize != nil { + in, out := &in.FontSize, &out.FontSize + *out = new(string) + **out = **in + } + if in.OutlineColor != nil { + in, out := &in.OutlineColor, &out.OutlineColor + *out = new(string) + **out = **in + } + if in.OutlineSize != nil { + in, out := &in.OutlineSize, &out.OutlineSize + *out = new(float64) + **out = **in + } + if in.ShadowColor != nil { + in, out := &in.ShadowColor, &out.ShadowColor + *out = new(string) + **out = **in + } + if in.ShadowOpacity != nil { + in, out := &in.ShadowOpacity, &out.ShadowOpacity + *out = new(float64) + **out = **in + } + if in.ShadowXOffset != nil { + in, out := &in.ShadowXOffset, &out.ShadowXOffset + *out = new(float64) + **out = **in + } + if in.ShadowYOffset != nil { + in, out := &in.ShadowYOffset, &out.ShadowYOffset + *out = new(float64) + **out = **in + } + if in.TeletextGridControl != nil { + in, out := &in.TeletextGridControl, &out.TeletextGridControl + *out = new(string) + **out = **in + } + if in.XPosition != nil { + in, out := &in.XPosition, &out.XPosition + *out = new(float64) + **out = **in + } + if in.YPosition != nil { + in, out := &in.YPosition, &out.YPosition + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbSubDestinationSettingsInitParameters. +func (in *DvbSubDestinationSettingsInitParameters) DeepCopy() *DvbSubDestinationSettingsInitParameters { + if in == nil { + return nil + } + out := new(DvbSubDestinationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbSubDestinationSettingsObservation) DeepCopyInto(out *DvbSubDestinationSettingsObservation) { + *out = *in + if in.Alignment != nil { + in, out := &in.Alignment, &out.Alignment + *out = new(string) + **out = **in + } + if in.BackgroundColor != nil { + in, out := &in.BackgroundColor, &out.BackgroundColor + *out = new(string) + **out = **in + } + if in.BackgroundOpacity != nil { + in, out := &in.BackgroundOpacity, &out.BackgroundOpacity + *out = new(float64) + **out = **in + } + if in.Font != nil { + in, out := &in.Font, &out.Font + *out = new(DvbSubDestinationSettingsFontObservation) + (*in).DeepCopyInto(*out) + } + if in.FontColor != nil { + in, out := &in.FontColor, &out.FontColor + *out = new(string) + **out = **in + } + if in.FontOpacity != nil { + in, out := &in.FontOpacity, &out.FontOpacity + *out = new(float64) + **out = **in + } + if in.FontResolution != nil { + in, out := &in.FontResolution, &out.FontResolution + *out = new(float64) + **out = **in + } + if in.FontSize != nil { + in, out := &in.FontSize, &out.FontSize + *out = new(string) + **out = **in + } + if in.OutlineColor != nil { + in, out := &in.OutlineColor, &out.OutlineColor + *out = new(string) + **out = **in + } + if in.OutlineSize != nil { + in, out := &in.OutlineSize, &out.OutlineSize + *out = new(float64) + **out = **in + } + if in.ShadowColor != nil { + in, out := &in.ShadowColor, &out.ShadowColor + *out = new(string) + **out = **in + } + if in.ShadowOpacity != nil { + in, out := &in.ShadowOpacity, &out.ShadowOpacity + *out = new(float64) + **out = **in + } + if in.ShadowXOffset != nil { + in, out := &in.ShadowXOffset, &out.ShadowXOffset + *out = new(float64) + **out = **in + } + if in.ShadowYOffset != nil { + in, out := &in.ShadowYOffset, &out.ShadowYOffset + *out = new(float64) + **out = **in + } + if in.TeletextGridControl != nil { + in, out := &in.TeletextGridControl, &out.TeletextGridControl + *out = new(string) + **out = **in + } + if in.XPosition != nil { + in, out := &in.XPosition, &out.XPosition + *out = new(float64) + **out = **in + } + if in.YPosition != nil { + in, out := &in.YPosition, &out.YPosition + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbSubDestinationSettingsObservation. +func (in *DvbSubDestinationSettingsObservation) DeepCopy() *DvbSubDestinationSettingsObservation { + if in == nil { + return nil + } + out := new(DvbSubDestinationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbSubDestinationSettingsParameters) DeepCopyInto(out *DvbSubDestinationSettingsParameters) { + *out = *in + if in.Alignment != nil { + in, out := &in.Alignment, &out.Alignment + *out = new(string) + **out = **in + } + if in.BackgroundColor != nil { + in, out := &in.BackgroundColor, &out.BackgroundColor + *out = new(string) + **out = **in + } + if in.BackgroundOpacity != nil { + in, out := &in.BackgroundOpacity, &out.BackgroundOpacity + *out = new(float64) + **out = **in + } + if in.Font != nil { + in, out := &in.Font, &out.Font + *out = new(DvbSubDestinationSettingsFontParameters) + (*in).DeepCopyInto(*out) + } + if in.FontColor != nil { + in, out := &in.FontColor, &out.FontColor + *out = new(string) + **out = **in + } + if in.FontOpacity != nil { + in, out := &in.FontOpacity, &out.FontOpacity + *out = new(float64) + **out = **in + } + if in.FontResolution != nil { + in, out := &in.FontResolution, &out.FontResolution + *out = new(float64) + **out = **in + } + if in.FontSize != nil { + in, out := &in.FontSize, &out.FontSize + *out = new(string) + **out = **in + } + if in.OutlineColor != nil { + in, out := &in.OutlineColor, &out.OutlineColor + *out = new(string) + **out = **in + } + if in.OutlineSize != nil { + in, out := &in.OutlineSize, &out.OutlineSize + *out = new(float64) + **out = **in + } + if in.ShadowColor != nil { + in, out := &in.ShadowColor, &out.ShadowColor + *out = new(string) + **out = **in + } + if in.ShadowOpacity != nil { + in, out := &in.ShadowOpacity, &out.ShadowOpacity + *out = new(float64) + **out = **in + } + if in.ShadowXOffset != nil { + in, out := &in.ShadowXOffset, &out.ShadowXOffset + *out = new(float64) + **out = **in + } + if in.ShadowYOffset != nil { + in, out := &in.ShadowYOffset, &out.ShadowYOffset + *out = new(float64) + **out = **in + } + if in.TeletextGridControl != nil { + in, out := &in.TeletextGridControl, &out.TeletextGridControl + *out = new(string) + **out = **in + } + if in.XPosition != nil { + in, out := &in.XPosition, &out.XPosition + *out = new(float64) + **out = **in + } + if in.YPosition != nil { + in, out := &in.YPosition, &out.YPosition + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbSubDestinationSettingsParameters. +func (in *DvbSubDestinationSettingsParameters) DeepCopy() *DvbSubDestinationSettingsParameters { + if in == nil { + return nil + } + out := new(DvbSubDestinationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbSubSourceSettingsInitParameters) DeepCopyInto(out *DvbSubSourceSettingsInitParameters) { + *out = *in + if in.OcrLanguage != nil { + in, out := &in.OcrLanguage, &out.OcrLanguage + *out = new(string) + **out = **in + } + if in.Pid != nil { + in, out := &in.Pid, &out.Pid + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbSubSourceSettingsInitParameters. +func (in *DvbSubSourceSettingsInitParameters) DeepCopy() *DvbSubSourceSettingsInitParameters { + if in == nil { + return nil + } + out := new(DvbSubSourceSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbSubSourceSettingsObservation) DeepCopyInto(out *DvbSubSourceSettingsObservation) { + *out = *in + if in.OcrLanguage != nil { + in, out := &in.OcrLanguage, &out.OcrLanguage + *out = new(string) + **out = **in + } + if in.Pid != nil { + in, out := &in.Pid, &out.Pid + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbSubSourceSettingsObservation. +func (in *DvbSubSourceSettingsObservation) DeepCopy() *DvbSubSourceSettingsObservation { + if in == nil { + return nil + } + out := new(DvbSubSourceSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbSubSourceSettingsParameters) DeepCopyInto(out *DvbSubSourceSettingsParameters) { + *out = *in + if in.OcrLanguage != nil { + in, out := &in.OcrLanguage, &out.OcrLanguage + *out = new(string) + **out = **in + } + if in.Pid != nil { + in, out := &in.Pid, &out.Pid + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbSubSourceSettingsParameters. +func (in *DvbSubSourceSettingsParameters) DeepCopy() *DvbSubSourceSettingsParameters { + if in == nil { + return nil + } + out := new(DvbSubSourceSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbTdtSettingsInitParameters) DeepCopyInto(out *DvbTdtSettingsInitParameters) { + *out = *in + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbTdtSettingsInitParameters. +func (in *DvbTdtSettingsInitParameters) DeepCopy() *DvbTdtSettingsInitParameters { + if in == nil { + return nil + } + out := new(DvbTdtSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbTdtSettingsObservation) DeepCopyInto(out *DvbTdtSettingsObservation) { + *out = *in + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbTdtSettingsObservation. +func (in *DvbTdtSettingsObservation) DeepCopy() *DvbTdtSettingsObservation { + if in == nil { + return nil + } + out := new(DvbTdtSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DvbTdtSettingsParameters) DeepCopyInto(out *DvbTdtSettingsParameters) { + *out = *in + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DvbTdtSettingsParameters. +func (in *DvbTdtSettingsParameters) DeepCopy() *DvbTdtSettingsParameters { + if in == nil { + return nil + } + out := new(DvbTdtSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Eac3AtmosSettingsInitParameters) DeepCopyInto(out *Eac3AtmosSettingsInitParameters) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.Dialnorm != nil { + in, out := &in.Dialnorm, &out.Dialnorm + *out = new(float64) + **out = **in + } + if in.DrcLine != nil { + in, out := &in.DrcLine, &out.DrcLine + *out = new(string) + **out = **in + } + if in.DrcRf != nil { + in, out := &in.DrcRf, &out.DrcRf + *out = new(string) + **out = **in + } + if in.HeightTrim != nil { + in, out := &in.HeightTrim, &out.HeightTrim + *out = new(float64) + **out = **in + } + if in.SurroundTrim != nil { + in, out := &in.SurroundTrim, &out.SurroundTrim + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Eac3AtmosSettingsInitParameters. +func (in *Eac3AtmosSettingsInitParameters) DeepCopy() *Eac3AtmosSettingsInitParameters { + if in == nil { + return nil + } + out := new(Eac3AtmosSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Eac3AtmosSettingsObservation) DeepCopyInto(out *Eac3AtmosSettingsObservation) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.Dialnorm != nil { + in, out := &in.Dialnorm, &out.Dialnorm + *out = new(float64) + **out = **in + } + if in.DrcLine != nil { + in, out := &in.DrcLine, &out.DrcLine + *out = new(string) + **out = **in + } + if in.DrcRf != nil { + in, out := &in.DrcRf, &out.DrcRf + *out = new(string) + **out = **in + } + if in.HeightTrim != nil { + in, out := &in.HeightTrim, &out.HeightTrim + *out = new(float64) + **out = **in + } + if in.SurroundTrim != nil { + in, out := &in.SurroundTrim, &out.SurroundTrim + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Eac3AtmosSettingsObservation. +func (in *Eac3AtmosSettingsObservation) DeepCopy() *Eac3AtmosSettingsObservation { + if in == nil { + return nil + } + out := new(Eac3AtmosSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Eac3AtmosSettingsParameters) DeepCopyInto(out *Eac3AtmosSettingsParameters) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.Dialnorm != nil { + in, out := &in.Dialnorm, &out.Dialnorm + *out = new(float64) + **out = **in + } + if in.DrcLine != nil { + in, out := &in.DrcLine, &out.DrcLine + *out = new(string) + **out = **in + } + if in.DrcRf != nil { + in, out := &in.DrcRf, &out.DrcRf + *out = new(string) + **out = **in + } + if in.HeightTrim != nil { + in, out := &in.HeightTrim, &out.HeightTrim + *out = new(float64) + **out = **in + } + if in.SurroundTrim != nil { + in, out := &in.SurroundTrim, &out.SurroundTrim + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Eac3AtmosSettingsParameters. +func (in *Eac3AtmosSettingsParameters) DeepCopy() *Eac3AtmosSettingsParameters { + if in == nil { + return nil + } + out := new(Eac3AtmosSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Eac3SettingsInitParameters) DeepCopyInto(out *Eac3SettingsInitParameters) { + *out = *in + if in.AttenuationControl != nil { + in, out := &in.AttenuationControl, &out.AttenuationControl + *out = new(string) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BitstreamMode != nil { + in, out := &in.BitstreamMode, &out.BitstreamMode + *out = new(string) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.DcFilter != nil { + in, out := &in.DcFilter, &out.DcFilter + *out = new(string) + **out = **in + } + if in.Dialnorm != nil { + in, out := &in.Dialnorm, &out.Dialnorm + *out = new(float64) + **out = **in + } + if in.DrcLine != nil { + in, out := &in.DrcLine, &out.DrcLine + *out = new(string) + **out = **in + } + if in.DrcRf != nil { + in, out := &in.DrcRf, &out.DrcRf + *out = new(string) + **out = **in + } + if in.LfeControl != nil { + in, out := &in.LfeControl, &out.LfeControl + *out = new(string) + **out = **in + } + if in.LfeFilter != nil { + in, out := &in.LfeFilter, &out.LfeFilter + *out = new(string) + **out = **in + } + if in.LoRoCenterMixLevel != nil { + in, out := &in.LoRoCenterMixLevel, &out.LoRoCenterMixLevel + *out = new(float64) + **out = **in + } + if in.LoRoSurroundMixLevel != nil { + in, out := &in.LoRoSurroundMixLevel, &out.LoRoSurroundMixLevel + *out = new(float64) + **out = **in + } + if in.LtRtCenterMixLevel != nil { + in, out := &in.LtRtCenterMixLevel, &out.LtRtCenterMixLevel + *out = new(float64) + **out = **in + } + if in.LtRtSurroundMixLevel != nil { + in, out := &in.LtRtSurroundMixLevel, &out.LtRtSurroundMixLevel + *out = new(float64) + **out = **in + } + if in.MetadataControl != nil { + in, out := &in.MetadataControl, &out.MetadataControl + *out = new(string) + **out = **in + } + if in.PassthroughControl != nil { + in, out := &in.PassthroughControl, &out.PassthroughControl + *out = new(string) + **out = **in + } + if in.PhaseControl != nil { + in, out := &in.PhaseControl, &out.PhaseControl + *out = new(string) + **out = **in + } + if in.StereoDownmix != nil { + in, out := &in.StereoDownmix, &out.StereoDownmix + *out = new(string) + **out = **in + } + if in.SurroundExMode != nil { + in, out := &in.SurroundExMode, &out.SurroundExMode + *out = new(string) + **out = **in + } + if in.SurroundMode != nil { + in, out := &in.SurroundMode, &out.SurroundMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Eac3SettingsInitParameters. +func (in *Eac3SettingsInitParameters) DeepCopy() *Eac3SettingsInitParameters { + if in == nil { + return nil + } + out := new(Eac3SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Eac3SettingsObservation) DeepCopyInto(out *Eac3SettingsObservation) { + *out = *in + if in.AttenuationControl != nil { + in, out := &in.AttenuationControl, &out.AttenuationControl + *out = new(string) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BitstreamMode != nil { + in, out := &in.BitstreamMode, &out.BitstreamMode + *out = new(string) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.DcFilter != nil { + in, out := &in.DcFilter, &out.DcFilter + *out = new(string) + **out = **in + } + if in.Dialnorm != nil { + in, out := &in.Dialnorm, &out.Dialnorm + *out = new(float64) + **out = **in + } + if in.DrcLine != nil { + in, out := &in.DrcLine, &out.DrcLine + *out = new(string) + **out = **in + } + if in.DrcRf != nil { + in, out := &in.DrcRf, &out.DrcRf + *out = new(string) + **out = **in + } + if in.LfeControl != nil { + in, out := &in.LfeControl, &out.LfeControl + *out = new(string) + **out = **in + } + if in.LfeFilter != nil { + in, out := &in.LfeFilter, &out.LfeFilter + *out = new(string) + **out = **in + } + if in.LoRoCenterMixLevel != nil { + in, out := &in.LoRoCenterMixLevel, &out.LoRoCenterMixLevel + *out = new(float64) + **out = **in + } + if in.LoRoSurroundMixLevel != nil { + in, out := &in.LoRoSurroundMixLevel, &out.LoRoSurroundMixLevel + *out = new(float64) + **out = **in + } + if in.LtRtCenterMixLevel != nil { + in, out := &in.LtRtCenterMixLevel, &out.LtRtCenterMixLevel + *out = new(float64) + **out = **in + } + if in.LtRtSurroundMixLevel != nil { + in, out := &in.LtRtSurroundMixLevel, &out.LtRtSurroundMixLevel + *out = new(float64) + **out = **in + } + if in.MetadataControl != nil { + in, out := &in.MetadataControl, &out.MetadataControl + *out = new(string) + **out = **in + } + if in.PassthroughControl != nil { + in, out := &in.PassthroughControl, &out.PassthroughControl + *out = new(string) + **out = **in + } + if in.PhaseControl != nil { + in, out := &in.PhaseControl, &out.PhaseControl + *out = new(string) + **out = **in + } + if in.StereoDownmix != nil { + in, out := &in.StereoDownmix, &out.StereoDownmix + *out = new(string) + **out = **in + } + if in.SurroundExMode != nil { + in, out := &in.SurroundExMode, &out.SurroundExMode + *out = new(string) + **out = **in + } + if in.SurroundMode != nil { + in, out := &in.SurroundMode, &out.SurroundMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Eac3SettingsObservation. +func (in *Eac3SettingsObservation) DeepCopy() *Eac3SettingsObservation { + if in == nil { + return nil + } + out := new(Eac3SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Eac3SettingsParameters) DeepCopyInto(out *Eac3SettingsParameters) { + *out = *in + if in.AttenuationControl != nil { + in, out := &in.AttenuationControl, &out.AttenuationControl + *out = new(string) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BitstreamMode != nil { + in, out := &in.BitstreamMode, &out.BitstreamMode + *out = new(string) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.DcFilter != nil { + in, out := &in.DcFilter, &out.DcFilter + *out = new(string) + **out = **in + } + if in.Dialnorm != nil { + in, out := &in.Dialnorm, &out.Dialnorm + *out = new(float64) + **out = **in + } + if in.DrcLine != nil { + in, out := &in.DrcLine, &out.DrcLine + *out = new(string) + **out = **in + } + if in.DrcRf != nil { + in, out := &in.DrcRf, &out.DrcRf + *out = new(string) + **out = **in + } + if in.LfeControl != nil { + in, out := &in.LfeControl, &out.LfeControl + *out = new(string) + **out = **in + } + if in.LfeFilter != nil { + in, out := &in.LfeFilter, &out.LfeFilter + *out = new(string) + **out = **in + } + if in.LoRoCenterMixLevel != nil { + in, out := &in.LoRoCenterMixLevel, &out.LoRoCenterMixLevel + *out = new(float64) + **out = **in + } + if in.LoRoSurroundMixLevel != nil { + in, out := &in.LoRoSurroundMixLevel, &out.LoRoSurroundMixLevel + *out = new(float64) + **out = **in + } + if in.LtRtCenterMixLevel != nil { + in, out := &in.LtRtCenterMixLevel, &out.LtRtCenterMixLevel + *out = new(float64) + **out = **in + } + if in.LtRtSurroundMixLevel != nil { + in, out := &in.LtRtSurroundMixLevel, &out.LtRtSurroundMixLevel + *out = new(float64) + **out = **in + } + if in.MetadataControl != nil { + in, out := &in.MetadataControl, &out.MetadataControl + *out = new(string) + **out = **in + } + if in.PassthroughControl != nil { + in, out := &in.PassthroughControl, &out.PassthroughControl + *out = new(string) + **out = **in + } + if in.PhaseControl != nil { + in, out := &in.PhaseControl, &out.PhaseControl + *out = new(string) + **out = **in + } + if in.StereoDownmix != nil { + in, out := &in.StereoDownmix, &out.StereoDownmix + *out = new(string) + **out = **in + } + if in.SurroundExMode != nil { + in, out := &in.SurroundExMode, &out.SurroundExMode + *out = new(string) + **out = **in + } + if in.SurroundMode != nil { + in, out := &in.SurroundMode, &out.SurroundMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Eac3SettingsParameters. +func (in *Eac3SettingsParameters) DeepCopy() *Eac3SettingsParameters { + if in == nil { + return nil + } + out := new(Eac3SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EbuTtDDestinationSettingsInitParameters) DeepCopyInto(out *EbuTtDDestinationSettingsInitParameters) { + *out = *in + if in.CopyrightHolder != nil { + in, out := &in.CopyrightHolder, &out.CopyrightHolder + *out = new(string) + **out = **in + } + if in.FillLineGap != nil { + in, out := &in.FillLineGap, &out.FillLineGap + *out = new(string) + **out = **in + } + if in.FontFamily != nil { + in, out := &in.FontFamily, &out.FontFamily + *out = new(string) + **out = **in + } + if in.StyleControl != nil { + in, out := &in.StyleControl, &out.StyleControl + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EbuTtDDestinationSettingsInitParameters. +func (in *EbuTtDDestinationSettingsInitParameters) DeepCopy() *EbuTtDDestinationSettingsInitParameters { + if in == nil { + return nil + } + out := new(EbuTtDDestinationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EbuTtDDestinationSettingsObservation) DeepCopyInto(out *EbuTtDDestinationSettingsObservation) { + *out = *in + if in.CopyrightHolder != nil { + in, out := &in.CopyrightHolder, &out.CopyrightHolder + *out = new(string) + **out = **in + } + if in.FillLineGap != nil { + in, out := &in.FillLineGap, &out.FillLineGap + *out = new(string) + **out = **in + } + if in.FontFamily != nil { + in, out := &in.FontFamily, &out.FontFamily + *out = new(string) + **out = **in + } + if in.StyleControl != nil { + in, out := &in.StyleControl, &out.StyleControl + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EbuTtDDestinationSettingsObservation. +func (in *EbuTtDDestinationSettingsObservation) DeepCopy() *EbuTtDDestinationSettingsObservation { + if in == nil { + return nil + } + out := new(EbuTtDDestinationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EbuTtDDestinationSettingsParameters) DeepCopyInto(out *EbuTtDDestinationSettingsParameters) { + *out = *in + if in.CopyrightHolder != nil { + in, out := &in.CopyrightHolder, &out.CopyrightHolder + *out = new(string) + **out = **in + } + if in.FillLineGap != nil { + in, out := &in.FillLineGap, &out.FillLineGap + *out = new(string) + **out = **in + } + if in.FontFamily != nil { + in, out := &in.FontFamily, &out.FontFamily + *out = new(string) + **out = **in + } + if in.StyleControl != nil { + in, out := &in.StyleControl, &out.StyleControl + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EbuTtDDestinationSettingsParameters. +func (in *EbuTtDDestinationSettingsParameters) DeepCopy() *EbuTtDDestinationSettingsParameters { + if in == nil { + return nil + } + out := new(EbuTtDDestinationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedDestinationSettingsInitParameters) DeepCopyInto(out *EmbeddedDestinationSettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedDestinationSettingsInitParameters. +func (in *EmbeddedDestinationSettingsInitParameters) DeepCopy() *EmbeddedDestinationSettingsInitParameters { + if in == nil { + return nil + } + out := new(EmbeddedDestinationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedDestinationSettingsObservation) DeepCopyInto(out *EmbeddedDestinationSettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedDestinationSettingsObservation. +func (in *EmbeddedDestinationSettingsObservation) DeepCopy() *EmbeddedDestinationSettingsObservation { + if in == nil { + return nil + } + out := new(EmbeddedDestinationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedDestinationSettingsParameters) DeepCopyInto(out *EmbeddedDestinationSettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedDestinationSettingsParameters. +func (in *EmbeddedDestinationSettingsParameters) DeepCopy() *EmbeddedDestinationSettingsParameters { + if in == nil { + return nil + } + out := new(EmbeddedDestinationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedPlusScte20DestinationSettingsInitParameters) DeepCopyInto(out *EmbeddedPlusScte20DestinationSettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedPlusScte20DestinationSettingsInitParameters. +func (in *EmbeddedPlusScte20DestinationSettingsInitParameters) DeepCopy() *EmbeddedPlusScte20DestinationSettingsInitParameters { + if in == nil { + return nil + } + out := new(EmbeddedPlusScte20DestinationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedPlusScte20DestinationSettingsObservation) DeepCopyInto(out *EmbeddedPlusScte20DestinationSettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedPlusScte20DestinationSettingsObservation. +func (in *EmbeddedPlusScte20DestinationSettingsObservation) DeepCopy() *EmbeddedPlusScte20DestinationSettingsObservation { + if in == nil { + return nil + } + out := new(EmbeddedPlusScte20DestinationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedPlusScte20DestinationSettingsParameters) DeepCopyInto(out *EmbeddedPlusScte20DestinationSettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedPlusScte20DestinationSettingsParameters. +func (in *EmbeddedPlusScte20DestinationSettingsParameters) DeepCopy() *EmbeddedPlusScte20DestinationSettingsParameters { + if in == nil { + return nil + } + out := new(EmbeddedPlusScte20DestinationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedSourceSettingsInitParameters) DeepCopyInto(out *EmbeddedSourceSettingsInitParameters) { + *out = *in + if in.Convert608To708 != nil { + in, out := &in.Convert608To708, &out.Convert608To708 + *out = new(string) + **out = **in + } + if in.Scte20Detection != nil { + in, out := &in.Scte20Detection, &out.Scte20Detection + *out = new(string) + **out = **in + } + if in.Source608ChannelNumber != nil { + in, out := &in.Source608ChannelNumber, &out.Source608ChannelNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedSourceSettingsInitParameters. +func (in *EmbeddedSourceSettingsInitParameters) DeepCopy() *EmbeddedSourceSettingsInitParameters { + if in == nil { + return nil + } + out := new(EmbeddedSourceSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedSourceSettingsObservation) DeepCopyInto(out *EmbeddedSourceSettingsObservation) { + *out = *in + if in.Convert608To708 != nil { + in, out := &in.Convert608To708, &out.Convert608To708 + *out = new(string) + **out = **in + } + if in.Scte20Detection != nil { + in, out := &in.Scte20Detection, &out.Scte20Detection + *out = new(string) + **out = **in + } + if in.Source608ChannelNumber != nil { + in, out := &in.Source608ChannelNumber, &out.Source608ChannelNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedSourceSettingsObservation. +func (in *EmbeddedSourceSettingsObservation) DeepCopy() *EmbeddedSourceSettingsObservation { + if in == nil { + return nil + } + out := new(EmbeddedSourceSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedSourceSettingsParameters) DeepCopyInto(out *EmbeddedSourceSettingsParameters) { + *out = *in + if in.Convert608To708 != nil { + in, out := &in.Convert608To708, &out.Convert608To708 + *out = new(string) + **out = **in + } + if in.Scte20Detection != nil { + in, out := &in.Scte20Detection, &out.Scte20Detection + *out = new(string) + **out = **in + } + if in.Source608ChannelNumber != nil { + in, out := &in.Source608ChannelNumber, &out.Source608ChannelNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedSourceSettingsParameters. +func (in *EmbeddedSourceSettingsParameters) DeepCopy() *EmbeddedSourceSettingsParameters { + if in == nil { + return nil + } + out := new(EmbeddedSourceSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncoderSettingsInitParameters) DeepCopyInto(out *EncoderSettingsInitParameters) { + *out = *in + if in.AudioDescriptions != nil { + in, out := &in.AudioDescriptions, &out.AudioDescriptions + *out = make([]AudioDescriptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AvailBlanking != nil { + in, out := &in.AvailBlanking, &out.AvailBlanking + *out = new(AvailBlankingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CaptionDescriptions != nil { + in, out := &in.CaptionDescriptions, &out.CaptionDescriptions + *out = make([]CaptionDescriptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GlobalConfiguration != nil { + in, out := &in.GlobalConfiguration, &out.GlobalConfiguration + *out = new(GlobalConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MotionGraphicsConfiguration != nil { + in, out := &in.MotionGraphicsConfiguration, &out.MotionGraphicsConfiguration + *out = new(MotionGraphicsConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NielsenConfiguration != nil { + in, out := &in.NielsenConfiguration, &out.NielsenConfiguration + *out = new(NielsenConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputGroups != nil { + in, out := &in.OutputGroups, &out.OutputGroups + *out = make([]OutputGroupsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimecodeConfig != nil { + in, out := &in.TimecodeConfig, &out.TimecodeConfig + *out = new(TimecodeConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VideoDescriptions != nil { + in, out := &in.VideoDescriptions, &out.VideoDescriptions + *out = make([]VideoDescriptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncoderSettingsInitParameters. +func (in *EncoderSettingsInitParameters) DeepCopy() *EncoderSettingsInitParameters { + if in == nil { + return nil + } + out := new(EncoderSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncoderSettingsObservation) DeepCopyInto(out *EncoderSettingsObservation) { + *out = *in + if in.AudioDescriptions != nil { + in, out := &in.AudioDescriptions, &out.AudioDescriptions + *out = make([]AudioDescriptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AvailBlanking != nil { + in, out := &in.AvailBlanking, &out.AvailBlanking + *out = new(AvailBlankingObservation) + (*in).DeepCopyInto(*out) + } + if in.CaptionDescriptions != nil { + in, out := &in.CaptionDescriptions, &out.CaptionDescriptions + *out = make([]CaptionDescriptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GlobalConfiguration != nil { + in, out := &in.GlobalConfiguration, &out.GlobalConfiguration + *out = new(GlobalConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.MotionGraphicsConfiguration != nil { + in, out := &in.MotionGraphicsConfiguration, &out.MotionGraphicsConfiguration + *out = new(MotionGraphicsConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.NielsenConfiguration != nil { + in, out := &in.NielsenConfiguration, &out.NielsenConfiguration + *out = new(NielsenConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.OutputGroups != nil { + in, out := &in.OutputGroups, &out.OutputGroups + *out = make([]OutputGroupsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimecodeConfig != nil { + in, out := &in.TimecodeConfig, &out.TimecodeConfig + *out = new(TimecodeConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.VideoDescriptions != nil { + in, out := &in.VideoDescriptions, &out.VideoDescriptions + *out = make([]VideoDescriptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncoderSettingsObservation. +func (in *EncoderSettingsObservation) DeepCopy() *EncoderSettingsObservation { + if in == nil { + return nil + } + out := new(EncoderSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncoderSettingsParameters) DeepCopyInto(out *EncoderSettingsParameters) { + *out = *in + if in.AudioDescriptions != nil { + in, out := &in.AudioDescriptions, &out.AudioDescriptions + *out = make([]AudioDescriptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AvailBlanking != nil { + in, out := &in.AvailBlanking, &out.AvailBlanking + *out = new(AvailBlankingParameters) + (*in).DeepCopyInto(*out) + } + if in.CaptionDescriptions != nil { + in, out := &in.CaptionDescriptions, &out.CaptionDescriptions + *out = make([]CaptionDescriptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GlobalConfiguration != nil { + in, out := &in.GlobalConfiguration, &out.GlobalConfiguration + *out = new(GlobalConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.MotionGraphicsConfiguration != nil { + in, out := &in.MotionGraphicsConfiguration, &out.MotionGraphicsConfiguration + *out = new(MotionGraphicsConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.NielsenConfiguration != nil { + in, out := &in.NielsenConfiguration, &out.NielsenConfiguration + *out = new(NielsenConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputGroups != nil { + in, out := &in.OutputGroups, &out.OutputGroups + *out = make([]OutputGroupsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimecodeConfig != nil { + in, out := &in.TimecodeConfig, &out.TimecodeConfig + *out = new(TimecodeConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.VideoDescriptions != nil { + in, out := &in.VideoDescriptions, &out.VideoDescriptions + *out = make([]VideoDescriptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncoderSettingsParameters. +func (in *EncoderSettingsParameters) DeepCopy() *EncoderSettingsParameters { + if in == nil { + return nil + } + out := new(EncoderSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverConditionInitParameters) DeepCopyInto(out *FailoverConditionInitParameters) { + *out = *in + if in.FailoverConditionSettings != nil { + in, out := &in.FailoverConditionSettings, &out.FailoverConditionSettings + *out = new(FailoverConditionSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverConditionInitParameters. +func (in *FailoverConditionInitParameters) DeepCopy() *FailoverConditionInitParameters { + if in == nil { + return nil + } + out := new(FailoverConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverConditionObservation) DeepCopyInto(out *FailoverConditionObservation) { + *out = *in + if in.FailoverConditionSettings != nil { + in, out := &in.FailoverConditionSettings, &out.FailoverConditionSettings + *out = new(FailoverConditionSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverConditionObservation. +func (in *FailoverConditionObservation) DeepCopy() *FailoverConditionObservation { + if in == nil { + return nil + } + out := new(FailoverConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverConditionParameters) DeepCopyInto(out *FailoverConditionParameters) { + *out = *in + if in.FailoverConditionSettings != nil { + in, out := &in.FailoverConditionSettings, &out.FailoverConditionSettings + *out = new(FailoverConditionSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverConditionParameters. +func (in *FailoverConditionParameters) DeepCopy() *FailoverConditionParameters { + if in == nil { + return nil + } + out := new(FailoverConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverConditionSettingsInitParameters) DeepCopyInto(out *FailoverConditionSettingsInitParameters) { + *out = *in + if in.AudioSilenceSettings != nil { + in, out := &in.AudioSilenceSettings, &out.AudioSilenceSettings + *out = new(AudioSilenceSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InputLossSettings != nil { + in, out := &in.InputLossSettings, &out.InputLossSettings + *out = new(InputLossSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VideoBlackSettings != nil { + in, out := &in.VideoBlackSettings, &out.VideoBlackSettings + *out = new(VideoBlackSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverConditionSettingsInitParameters. +func (in *FailoverConditionSettingsInitParameters) DeepCopy() *FailoverConditionSettingsInitParameters { + if in == nil { + return nil + } + out := new(FailoverConditionSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverConditionSettingsObservation) DeepCopyInto(out *FailoverConditionSettingsObservation) { + *out = *in + if in.AudioSilenceSettings != nil { + in, out := &in.AudioSilenceSettings, &out.AudioSilenceSettings + *out = new(AudioSilenceSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.InputLossSettings != nil { + in, out := &in.InputLossSettings, &out.InputLossSettings + *out = new(InputLossSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.VideoBlackSettings != nil { + in, out := &in.VideoBlackSettings, &out.VideoBlackSettings + *out = new(VideoBlackSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverConditionSettingsObservation. +func (in *FailoverConditionSettingsObservation) DeepCopy() *FailoverConditionSettingsObservation { + if in == nil { + return nil + } + out := new(FailoverConditionSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverConditionSettingsParameters) DeepCopyInto(out *FailoverConditionSettingsParameters) { + *out = *in + if in.AudioSilenceSettings != nil { + in, out := &in.AudioSilenceSettings, &out.AudioSilenceSettings + *out = new(AudioSilenceSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.InputLossSettings != nil { + in, out := &in.InputLossSettings, &out.InputLossSettings + *out = new(InputLossSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.VideoBlackSettings != nil { + in, out := &in.VideoBlackSettings, &out.VideoBlackSettings + *out = new(VideoBlackSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverConditionSettingsParameters. +func (in *FailoverConditionSettingsParameters) DeepCopy() *FailoverConditionSettingsParameters { + if in == nil { + return nil + } + out := new(FailoverConditionSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FecOutputSettingsInitParameters) DeepCopyInto(out *FecOutputSettingsInitParameters) { + *out = *in + if in.ColumnDepth != nil { + in, out := &in.ColumnDepth, &out.ColumnDepth + *out = new(float64) + **out = **in + } + if in.IncludeFec != nil { + in, out := &in.IncludeFec, &out.IncludeFec + *out = new(string) + **out = **in + } + if in.RowLength != nil { + in, out := &in.RowLength, &out.RowLength + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FecOutputSettingsInitParameters. +func (in *FecOutputSettingsInitParameters) DeepCopy() *FecOutputSettingsInitParameters { + if in == nil { + return nil + } + out := new(FecOutputSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FecOutputSettingsObservation) DeepCopyInto(out *FecOutputSettingsObservation) { + *out = *in + if in.ColumnDepth != nil { + in, out := &in.ColumnDepth, &out.ColumnDepth + *out = new(float64) + **out = **in + } + if in.IncludeFec != nil { + in, out := &in.IncludeFec, &out.IncludeFec + *out = new(string) + **out = **in + } + if in.RowLength != nil { + in, out := &in.RowLength, &out.RowLength + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FecOutputSettingsObservation. +func (in *FecOutputSettingsObservation) DeepCopy() *FecOutputSettingsObservation { + if in == nil { + return nil + } + out := new(FecOutputSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FecOutputSettingsParameters) DeepCopyInto(out *FecOutputSettingsParameters) { + *out = *in + if in.ColumnDepth != nil { + in, out := &in.ColumnDepth, &out.ColumnDepth + *out = new(float64) + **out = **in + } + if in.IncludeFec != nil { + in, out := &in.IncludeFec, &out.IncludeFec + *out = new(string) + **out = **in + } + if in.RowLength != nil { + in, out := &in.RowLength, &out.RowLength + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FecOutputSettingsParameters. +func (in *FecOutputSettingsParameters) DeepCopy() *FecOutputSettingsParameters { + if in == nil { + return nil + } + out := new(FecOutputSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterSettingsInitParameters) DeepCopyInto(out *FilterSettingsInitParameters) { + *out = *in + if in.TemporalFilterSettings != nil { + in, out := &in.TemporalFilterSettings, &out.TemporalFilterSettings + *out = new(TemporalFilterSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterSettingsInitParameters. +func (in *FilterSettingsInitParameters) DeepCopy() *FilterSettingsInitParameters { + if in == nil { + return nil + } + out := new(FilterSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterSettingsObservation) DeepCopyInto(out *FilterSettingsObservation) { + *out = *in + if in.TemporalFilterSettings != nil { + in, out := &in.TemporalFilterSettings, &out.TemporalFilterSettings + *out = new(TemporalFilterSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterSettingsObservation. +func (in *FilterSettingsObservation) DeepCopy() *FilterSettingsObservation { + if in == nil { + return nil + } + out := new(FilterSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterSettingsParameters) DeepCopyInto(out *FilterSettingsParameters) { + *out = *in + if in.TemporalFilterSettings != nil { + in, out := &in.TemporalFilterSettings, &out.TemporalFilterSettings + *out = new(TemporalFilterSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterSettingsParameters. +func (in *FilterSettingsParameters) DeepCopy() *FilterSettingsParameters { + if in == nil { + return nil + } + out := new(FilterSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterSettingsTemporalFilterSettingsInitParameters) DeepCopyInto(out *FilterSettingsTemporalFilterSettingsInitParameters) { + *out = *in + if in.PostFilterSharpening != nil { + in, out := &in.PostFilterSharpening, &out.PostFilterSharpening + *out = new(string) + **out = **in + } + if in.Strength != nil { + in, out := &in.Strength, &out.Strength + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterSettingsTemporalFilterSettingsInitParameters. +func (in *FilterSettingsTemporalFilterSettingsInitParameters) DeepCopy() *FilterSettingsTemporalFilterSettingsInitParameters { + if in == nil { + return nil + } + out := new(FilterSettingsTemporalFilterSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterSettingsTemporalFilterSettingsObservation) DeepCopyInto(out *FilterSettingsTemporalFilterSettingsObservation) { + *out = *in + if in.PostFilterSharpening != nil { + in, out := &in.PostFilterSharpening, &out.PostFilterSharpening + *out = new(string) + **out = **in + } + if in.Strength != nil { + in, out := &in.Strength, &out.Strength + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterSettingsTemporalFilterSettingsObservation. +func (in *FilterSettingsTemporalFilterSettingsObservation) DeepCopy() *FilterSettingsTemporalFilterSettingsObservation { + if in == nil { + return nil + } + out := new(FilterSettingsTemporalFilterSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterSettingsTemporalFilterSettingsParameters) DeepCopyInto(out *FilterSettingsTemporalFilterSettingsParameters) { + *out = *in + if in.PostFilterSharpening != nil { + in, out := &in.PostFilterSharpening, &out.PostFilterSharpening + *out = new(string) + **out = **in + } + if in.Strength != nil { + in, out := &in.Strength, &out.Strength + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterSettingsTemporalFilterSettingsParameters. +func (in *FilterSettingsTemporalFilterSettingsParameters) DeepCopy() *FilterSettingsTemporalFilterSettingsParameters { + if in == nil { + return nil + } + out := new(FilterSettingsTemporalFilterSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Fmp4HlsSettingsInitParameters) DeepCopyInto(out *Fmp4HlsSettingsInitParameters) { + *out = *in + if in.AudioRenditionSets != nil { + in, out := &in.AudioRenditionSets, &out.AudioRenditionSets + *out = new(string) + **out = **in + } + if in.NielsenId3Behavior != nil { + in, out := &in.NielsenId3Behavior, &out.NielsenId3Behavior + *out = new(string) + **out = **in + } + if in.TimedMetadataBehavior != nil { + in, out := &in.TimedMetadataBehavior, &out.TimedMetadataBehavior + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fmp4HlsSettingsInitParameters. +func (in *Fmp4HlsSettingsInitParameters) DeepCopy() *Fmp4HlsSettingsInitParameters { + if in == nil { + return nil + } + out := new(Fmp4HlsSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Fmp4HlsSettingsObservation) DeepCopyInto(out *Fmp4HlsSettingsObservation) { + *out = *in + if in.AudioRenditionSets != nil { + in, out := &in.AudioRenditionSets, &out.AudioRenditionSets + *out = new(string) + **out = **in + } + if in.NielsenId3Behavior != nil { + in, out := &in.NielsenId3Behavior, &out.NielsenId3Behavior + *out = new(string) + **out = **in + } + if in.TimedMetadataBehavior != nil { + in, out := &in.TimedMetadataBehavior, &out.TimedMetadataBehavior + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fmp4HlsSettingsObservation. +func (in *Fmp4HlsSettingsObservation) DeepCopy() *Fmp4HlsSettingsObservation { + if in == nil { + return nil + } + out := new(Fmp4HlsSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Fmp4HlsSettingsParameters) DeepCopyInto(out *Fmp4HlsSettingsParameters) { + *out = *in + if in.AudioRenditionSets != nil { + in, out := &in.AudioRenditionSets, &out.AudioRenditionSets + *out = new(string) + **out = **in + } + if in.NielsenId3Behavior != nil { + in, out := &in.NielsenId3Behavior, &out.NielsenId3Behavior + *out = new(string) + **out = **in + } + if in.TimedMetadataBehavior != nil { + in, out := &in.TimedMetadataBehavior, &out.TimedMetadataBehavior + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fmp4HlsSettingsParameters. +func (in *Fmp4HlsSettingsParameters) DeepCopy() *Fmp4HlsSettingsParameters { + if in == nil { + return nil + } + out := new(Fmp4HlsSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FontInitParameters) DeepCopyInto(out *FontInitParameters) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FontInitParameters. +func (in *FontInitParameters) DeepCopy() *FontInitParameters { + if in == nil { + return nil + } + out := new(FontInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FontObservation) DeepCopyInto(out *FontObservation) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FontObservation. +func (in *FontObservation) DeepCopy() *FontObservation { + if in == nil { + return nil + } + out := new(FontObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FontParameters) DeepCopyInto(out *FontParameters) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FontParameters. +func (in *FontParameters) DeepCopy() *FontParameters { + if in == nil { + return nil + } + out := new(FontParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureCdnSettingsInitParameters) DeepCopyInto(out *FrameCaptureCdnSettingsInitParameters) { + *out = *in + if in.FrameCaptureS3Settings != nil { + in, out := &in.FrameCaptureS3Settings, &out.FrameCaptureS3Settings + *out = new(FrameCaptureS3SettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureCdnSettingsInitParameters. +func (in *FrameCaptureCdnSettingsInitParameters) DeepCopy() *FrameCaptureCdnSettingsInitParameters { + if in == nil { + return nil + } + out := new(FrameCaptureCdnSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureCdnSettingsObservation) DeepCopyInto(out *FrameCaptureCdnSettingsObservation) { + *out = *in + if in.FrameCaptureS3Settings != nil { + in, out := &in.FrameCaptureS3Settings, &out.FrameCaptureS3Settings + *out = new(FrameCaptureS3SettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureCdnSettingsObservation. +func (in *FrameCaptureCdnSettingsObservation) DeepCopy() *FrameCaptureCdnSettingsObservation { + if in == nil { + return nil + } + out := new(FrameCaptureCdnSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureCdnSettingsParameters) DeepCopyInto(out *FrameCaptureCdnSettingsParameters) { + *out = *in + if in.FrameCaptureS3Settings != nil { + in, out := &in.FrameCaptureS3Settings, &out.FrameCaptureS3Settings + *out = new(FrameCaptureS3SettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureCdnSettingsParameters. +func (in *FrameCaptureCdnSettingsParameters) DeepCopy() *FrameCaptureCdnSettingsParameters { + if in == nil { + return nil + } + out := new(FrameCaptureCdnSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureGroupSettingsDestinationInitParameters) DeepCopyInto(out *FrameCaptureGroupSettingsDestinationInitParameters) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureGroupSettingsDestinationInitParameters. +func (in *FrameCaptureGroupSettingsDestinationInitParameters) DeepCopy() *FrameCaptureGroupSettingsDestinationInitParameters { + if in == nil { + return nil + } + out := new(FrameCaptureGroupSettingsDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureGroupSettingsDestinationObservation) DeepCopyInto(out *FrameCaptureGroupSettingsDestinationObservation) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureGroupSettingsDestinationObservation. +func (in *FrameCaptureGroupSettingsDestinationObservation) DeepCopy() *FrameCaptureGroupSettingsDestinationObservation { + if in == nil { + return nil + } + out := new(FrameCaptureGroupSettingsDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureGroupSettingsDestinationParameters) DeepCopyInto(out *FrameCaptureGroupSettingsDestinationParameters) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureGroupSettingsDestinationParameters. +func (in *FrameCaptureGroupSettingsDestinationParameters) DeepCopy() *FrameCaptureGroupSettingsDestinationParameters { + if in == nil { + return nil + } + out := new(FrameCaptureGroupSettingsDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureGroupSettingsInitParameters) DeepCopyInto(out *FrameCaptureGroupSettingsInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(FrameCaptureGroupSettingsDestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FrameCaptureCdnSettings != nil { + in, out := &in.FrameCaptureCdnSettings, &out.FrameCaptureCdnSettings + *out = new(FrameCaptureCdnSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureGroupSettingsInitParameters. +func (in *FrameCaptureGroupSettingsInitParameters) DeepCopy() *FrameCaptureGroupSettingsInitParameters { + if in == nil { + return nil + } + out := new(FrameCaptureGroupSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureGroupSettingsObservation) DeepCopyInto(out *FrameCaptureGroupSettingsObservation) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(FrameCaptureGroupSettingsDestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.FrameCaptureCdnSettings != nil { + in, out := &in.FrameCaptureCdnSettings, &out.FrameCaptureCdnSettings + *out = new(FrameCaptureCdnSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureGroupSettingsObservation. +func (in *FrameCaptureGroupSettingsObservation) DeepCopy() *FrameCaptureGroupSettingsObservation { + if in == nil { + return nil + } + out := new(FrameCaptureGroupSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureGroupSettingsParameters) DeepCopyInto(out *FrameCaptureGroupSettingsParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(FrameCaptureGroupSettingsDestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.FrameCaptureCdnSettings != nil { + in, out := &in.FrameCaptureCdnSettings, &out.FrameCaptureCdnSettings + *out = new(FrameCaptureCdnSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureGroupSettingsParameters. +func (in *FrameCaptureGroupSettingsParameters) DeepCopy() *FrameCaptureGroupSettingsParameters { + if in == nil { + return nil + } + out := new(FrameCaptureGroupSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureHlsSettingsInitParameters) DeepCopyInto(out *FrameCaptureHlsSettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureHlsSettingsInitParameters. +func (in *FrameCaptureHlsSettingsInitParameters) DeepCopy() *FrameCaptureHlsSettingsInitParameters { + if in == nil { + return nil + } + out := new(FrameCaptureHlsSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureHlsSettingsObservation) DeepCopyInto(out *FrameCaptureHlsSettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureHlsSettingsObservation. +func (in *FrameCaptureHlsSettingsObservation) DeepCopy() *FrameCaptureHlsSettingsObservation { + if in == nil { + return nil + } + out := new(FrameCaptureHlsSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureHlsSettingsParameters) DeepCopyInto(out *FrameCaptureHlsSettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureHlsSettingsParameters. +func (in *FrameCaptureHlsSettingsParameters) DeepCopy() *FrameCaptureHlsSettingsParameters { + if in == nil { + return nil + } + out := new(FrameCaptureHlsSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureOutputSettingsInitParameters) DeepCopyInto(out *FrameCaptureOutputSettingsInitParameters) { + *out = *in + if in.NameModifier != nil { + in, out := &in.NameModifier, &out.NameModifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureOutputSettingsInitParameters. +func (in *FrameCaptureOutputSettingsInitParameters) DeepCopy() *FrameCaptureOutputSettingsInitParameters { + if in == nil { + return nil + } + out := new(FrameCaptureOutputSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureOutputSettingsObservation) DeepCopyInto(out *FrameCaptureOutputSettingsObservation) { + *out = *in + if in.NameModifier != nil { + in, out := &in.NameModifier, &out.NameModifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureOutputSettingsObservation. +func (in *FrameCaptureOutputSettingsObservation) DeepCopy() *FrameCaptureOutputSettingsObservation { + if in == nil { + return nil + } + out := new(FrameCaptureOutputSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureOutputSettingsParameters) DeepCopyInto(out *FrameCaptureOutputSettingsParameters) { + *out = *in + if in.NameModifier != nil { + in, out := &in.NameModifier, &out.NameModifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureOutputSettingsParameters. +func (in *FrameCaptureOutputSettingsParameters) DeepCopy() *FrameCaptureOutputSettingsParameters { + if in == nil { + return nil + } + out := new(FrameCaptureOutputSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureS3SettingsInitParameters) DeepCopyInto(out *FrameCaptureS3SettingsInitParameters) { + *out = *in + if in.CannedACL != nil { + in, out := &in.CannedACL, &out.CannedACL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureS3SettingsInitParameters. +func (in *FrameCaptureS3SettingsInitParameters) DeepCopy() *FrameCaptureS3SettingsInitParameters { + if in == nil { + return nil + } + out := new(FrameCaptureS3SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureS3SettingsObservation) DeepCopyInto(out *FrameCaptureS3SettingsObservation) { + *out = *in + if in.CannedACL != nil { + in, out := &in.CannedACL, &out.CannedACL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureS3SettingsObservation. +func (in *FrameCaptureS3SettingsObservation) DeepCopy() *FrameCaptureS3SettingsObservation { + if in == nil { + return nil + } + out := new(FrameCaptureS3SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureS3SettingsParameters) DeepCopyInto(out *FrameCaptureS3SettingsParameters) { + *out = *in + if in.CannedACL != nil { + in, out := &in.CannedACL, &out.CannedACL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureS3SettingsParameters. +func (in *FrameCaptureS3SettingsParameters) DeepCopy() *FrameCaptureS3SettingsParameters { + if in == nil { + return nil + } + out := new(FrameCaptureS3SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureSettingsInitParameters) DeepCopyInto(out *FrameCaptureSettingsInitParameters) { + *out = *in + if in.CaptureInterval != nil { + in, out := &in.CaptureInterval, &out.CaptureInterval + *out = new(float64) + **out = **in + } + if in.CaptureIntervalUnits != nil { + in, out := &in.CaptureIntervalUnits, &out.CaptureIntervalUnits + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureSettingsInitParameters. +func (in *FrameCaptureSettingsInitParameters) DeepCopy() *FrameCaptureSettingsInitParameters { + if in == nil { + return nil + } + out := new(FrameCaptureSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureSettingsObservation) DeepCopyInto(out *FrameCaptureSettingsObservation) { + *out = *in + if in.CaptureInterval != nil { + in, out := &in.CaptureInterval, &out.CaptureInterval + *out = new(float64) + **out = **in + } + if in.CaptureIntervalUnits != nil { + in, out := &in.CaptureIntervalUnits, &out.CaptureIntervalUnits + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureSettingsObservation. +func (in *FrameCaptureSettingsObservation) DeepCopy() *FrameCaptureSettingsObservation { + if in == nil { + return nil + } + out := new(FrameCaptureSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrameCaptureSettingsParameters) DeepCopyInto(out *FrameCaptureSettingsParameters) { + *out = *in + if in.CaptureInterval != nil { + in, out := &in.CaptureInterval, &out.CaptureInterval + *out = new(float64) + **out = **in + } + if in.CaptureIntervalUnits != nil { + in, out := &in.CaptureIntervalUnits, &out.CaptureIntervalUnits + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrameCaptureSettingsParameters. +func (in *FrameCaptureSettingsParameters) DeepCopy() *FrameCaptureSettingsParameters { + if in == nil { + return nil + } + out := new(FrameCaptureSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalConfigurationInitParameters) DeepCopyInto(out *GlobalConfigurationInitParameters) { + *out = *in + if in.InitialAudioGain != nil { + in, out := &in.InitialAudioGain, &out.InitialAudioGain + *out = new(float64) + **out = **in + } + if in.InputEndAction != nil { + in, out := &in.InputEndAction, &out.InputEndAction + *out = new(string) + **out = **in + } + if in.InputLossBehavior != nil { + in, out := &in.InputLossBehavior, &out.InputLossBehavior + *out = new(InputLossBehaviorInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputLockingMode != nil { + in, out := &in.OutputLockingMode, &out.OutputLockingMode + *out = new(string) + **out = **in + } + if in.OutputTimingSource != nil { + in, out := &in.OutputTimingSource, &out.OutputTimingSource + *out = new(string) + **out = **in + } + if in.SupportLowFramerateInputs != nil { + in, out := &in.SupportLowFramerateInputs, &out.SupportLowFramerateInputs + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalConfigurationInitParameters. +func (in *GlobalConfigurationInitParameters) DeepCopy() *GlobalConfigurationInitParameters { + if in == nil { + return nil + } + out := new(GlobalConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalConfigurationObservation) DeepCopyInto(out *GlobalConfigurationObservation) { + *out = *in + if in.InitialAudioGain != nil { + in, out := &in.InitialAudioGain, &out.InitialAudioGain + *out = new(float64) + **out = **in + } + if in.InputEndAction != nil { + in, out := &in.InputEndAction, &out.InputEndAction + *out = new(string) + **out = **in + } + if in.InputLossBehavior != nil { + in, out := &in.InputLossBehavior, &out.InputLossBehavior + *out = new(InputLossBehaviorObservation) + (*in).DeepCopyInto(*out) + } + if in.OutputLockingMode != nil { + in, out := &in.OutputLockingMode, &out.OutputLockingMode + *out = new(string) + **out = **in + } + if in.OutputTimingSource != nil { + in, out := &in.OutputTimingSource, &out.OutputTimingSource + *out = new(string) + **out = **in + } + if in.SupportLowFramerateInputs != nil { + in, out := &in.SupportLowFramerateInputs, &out.SupportLowFramerateInputs + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalConfigurationObservation. +func (in *GlobalConfigurationObservation) DeepCopy() *GlobalConfigurationObservation { + if in == nil { + return nil + } + out := new(GlobalConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalConfigurationParameters) DeepCopyInto(out *GlobalConfigurationParameters) { + *out = *in + if in.InitialAudioGain != nil { + in, out := &in.InitialAudioGain, &out.InitialAudioGain + *out = new(float64) + **out = **in + } + if in.InputEndAction != nil { + in, out := &in.InputEndAction, &out.InputEndAction + *out = new(string) + **out = **in + } + if in.InputLossBehavior != nil { + in, out := &in.InputLossBehavior, &out.InputLossBehavior + *out = new(InputLossBehaviorParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputLockingMode != nil { + in, out := &in.OutputLockingMode, &out.OutputLockingMode + *out = new(string) + **out = **in + } + if in.OutputTimingSource != nil { + in, out := &in.OutputTimingSource, &out.OutputTimingSource + *out = new(string) + **out = **in + } + if in.SupportLowFramerateInputs != nil { + in, out := &in.SupportLowFramerateInputs, &out.SupportLowFramerateInputs + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalConfigurationParameters. +func (in *GlobalConfigurationParameters) DeepCopy() *GlobalConfigurationParameters { + if in == nil { + return nil + } + out := new(GlobalConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H264SettingsInitParameters) DeepCopyInto(out *H264SettingsInitParameters) { + *out = *in + if in.AdaptiveQuantization != nil { + in, out := &in.AdaptiveQuantization, &out.AdaptiveQuantization + *out = new(string) + **out = **in + } + if in.AfdSignaling != nil { + in, out := &in.AfdSignaling, &out.AfdSignaling + *out = new(string) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufFillPct != nil { + in, out := &in.BufFillPct, &out.BufFillPct + *out = new(float64) + **out = **in + } + if in.BufSize != nil { + in, out := &in.BufSize, &out.BufSize + *out = new(float64) + **out = **in + } + if in.ColorMetadata != nil { + in, out := &in.ColorMetadata, &out.ColorMetadata + *out = new(string) + **out = **in + } + if in.EntropyEncoding != nil { + in, out := &in.EntropyEncoding, &out.EntropyEncoding + *out = new(string) + **out = **in + } + if in.FilterSettings != nil { + in, out := &in.FilterSettings, &out.FilterSettings + *out = new(FilterSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FixedAfd != nil { + in, out := &in.FixedAfd, &out.FixedAfd + *out = new(string) + **out = **in + } + if in.FlickerAq != nil { + in, out := &in.FlickerAq, &out.FlickerAq + *out = new(string) + **out = **in + } + if in.ForceFieldPictures != nil { + in, out := &in.ForceFieldPictures, &out.ForceFieldPictures + *out = new(string) + **out = **in + } + if in.FramerateControl != nil { + in, out := &in.FramerateControl, &out.FramerateControl + *out = new(string) + **out = **in + } + if in.FramerateDenominator != nil { + in, out := &in.FramerateDenominator, &out.FramerateDenominator + *out = new(float64) + **out = **in + } + if in.FramerateNumerator != nil { + in, out := &in.FramerateNumerator, &out.FramerateNumerator + *out = new(float64) + **out = **in + } + if in.GopBReference != nil { + in, out := &in.GopBReference, &out.GopBReference + *out = new(string) + **out = **in + } + if in.GopClosedCadence != nil { + in, out := &in.GopClosedCadence, &out.GopClosedCadence + *out = new(float64) + **out = **in + } + if in.GopNumBFrames != nil { + in, out := &in.GopNumBFrames, &out.GopNumBFrames + *out = new(float64) + **out = **in + } + if in.GopSize != nil { + in, out := &in.GopSize, &out.GopSize + *out = new(float64) + **out = **in + } + if in.GopSizeUnits != nil { + in, out := &in.GopSizeUnits, &out.GopSizeUnits + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.LookAheadRateControl != nil { + in, out := &in.LookAheadRateControl, &out.LookAheadRateControl + *out = new(string) + **out = **in + } + if in.MaxBitrate != nil { + in, out := &in.MaxBitrate, &out.MaxBitrate + *out = new(float64) + **out = **in + } + if in.MinIInterval != nil { + in, out := &in.MinIInterval, &out.MinIInterval + *out = new(float64) + **out = **in + } + if in.NumRefFrames != nil { + in, out := &in.NumRefFrames, &out.NumRefFrames + *out = new(float64) + **out = **in + } + if in.ParControl != nil { + in, out := &in.ParControl, &out.ParControl + *out = new(string) + **out = **in + } + if in.ParDenominator != nil { + in, out := &in.ParDenominator, &out.ParDenominator + *out = new(float64) + **out = **in + } + if in.ParNumerator != nil { + in, out := &in.ParNumerator, &out.ParNumerator + *out = new(float64) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.QualityLevel != nil { + in, out := &in.QualityLevel, &out.QualityLevel + *out = new(string) + **out = **in + } + if in.QvbrQualityLevel != nil { + in, out := &in.QvbrQualityLevel, &out.QvbrQualityLevel + *out = new(float64) + **out = **in + } + if in.RateControlMode != nil { + in, out := &in.RateControlMode, &out.RateControlMode + *out = new(string) + **out = **in + } + if in.ScanType != nil { + in, out := &in.ScanType, &out.ScanType + *out = new(string) + **out = **in + } + if in.SceneChangeDetect != nil { + in, out := &in.SceneChangeDetect, &out.SceneChangeDetect + *out = new(string) + **out = **in + } + if in.Slices != nil { + in, out := &in.Slices, &out.Slices + *out = new(float64) + **out = **in + } + if in.Softness != nil { + in, out := &in.Softness, &out.Softness + *out = new(float64) + **out = **in + } + if in.SpatialAq != nil { + in, out := &in.SpatialAq, &out.SpatialAq + *out = new(string) + **out = **in + } + if in.SubgopLength != nil { + in, out := &in.SubgopLength, &out.SubgopLength + *out = new(string) + **out = **in + } + if in.Syntax != nil { + in, out := &in.Syntax, &out.Syntax + *out = new(string) + **out = **in + } + if in.TemporalAq != nil { + in, out := &in.TemporalAq, &out.TemporalAq + *out = new(string) + **out = **in + } + if in.TimecodeInsertion != nil { + in, out := &in.TimecodeInsertion, &out.TimecodeInsertion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H264SettingsInitParameters. +func (in *H264SettingsInitParameters) DeepCopy() *H264SettingsInitParameters { + if in == nil { + return nil + } + out := new(H264SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H264SettingsObservation) DeepCopyInto(out *H264SettingsObservation) { + *out = *in + if in.AdaptiveQuantization != nil { + in, out := &in.AdaptiveQuantization, &out.AdaptiveQuantization + *out = new(string) + **out = **in + } + if in.AfdSignaling != nil { + in, out := &in.AfdSignaling, &out.AfdSignaling + *out = new(string) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufFillPct != nil { + in, out := &in.BufFillPct, &out.BufFillPct + *out = new(float64) + **out = **in + } + if in.BufSize != nil { + in, out := &in.BufSize, &out.BufSize + *out = new(float64) + **out = **in + } + if in.ColorMetadata != nil { + in, out := &in.ColorMetadata, &out.ColorMetadata + *out = new(string) + **out = **in + } + if in.EntropyEncoding != nil { + in, out := &in.EntropyEncoding, &out.EntropyEncoding + *out = new(string) + **out = **in + } + if in.FilterSettings != nil { + in, out := &in.FilterSettings, &out.FilterSettings + *out = new(FilterSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.FixedAfd != nil { + in, out := &in.FixedAfd, &out.FixedAfd + *out = new(string) + **out = **in + } + if in.FlickerAq != nil { + in, out := &in.FlickerAq, &out.FlickerAq + *out = new(string) + **out = **in + } + if in.ForceFieldPictures != nil { + in, out := &in.ForceFieldPictures, &out.ForceFieldPictures + *out = new(string) + **out = **in + } + if in.FramerateControl != nil { + in, out := &in.FramerateControl, &out.FramerateControl + *out = new(string) + **out = **in + } + if in.FramerateDenominator != nil { + in, out := &in.FramerateDenominator, &out.FramerateDenominator + *out = new(float64) + **out = **in + } + if in.FramerateNumerator != nil { + in, out := &in.FramerateNumerator, &out.FramerateNumerator + *out = new(float64) + **out = **in + } + if in.GopBReference != nil { + in, out := &in.GopBReference, &out.GopBReference + *out = new(string) + **out = **in + } + if in.GopClosedCadence != nil { + in, out := &in.GopClosedCadence, &out.GopClosedCadence + *out = new(float64) + **out = **in + } + if in.GopNumBFrames != nil { + in, out := &in.GopNumBFrames, &out.GopNumBFrames + *out = new(float64) + **out = **in + } + if in.GopSize != nil { + in, out := &in.GopSize, &out.GopSize + *out = new(float64) + **out = **in + } + if in.GopSizeUnits != nil { + in, out := &in.GopSizeUnits, &out.GopSizeUnits + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.LookAheadRateControl != nil { + in, out := &in.LookAheadRateControl, &out.LookAheadRateControl + *out = new(string) + **out = **in + } + if in.MaxBitrate != nil { + in, out := &in.MaxBitrate, &out.MaxBitrate + *out = new(float64) + **out = **in + } + if in.MinIInterval != nil { + in, out := &in.MinIInterval, &out.MinIInterval + *out = new(float64) + **out = **in + } + if in.NumRefFrames != nil { + in, out := &in.NumRefFrames, &out.NumRefFrames + *out = new(float64) + **out = **in + } + if in.ParControl != nil { + in, out := &in.ParControl, &out.ParControl + *out = new(string) + **out = **in + } + if in.ParDenominator != nil { + in, out := &in.ParDenominator, &out.ParDenominator + *out = new(float64) + **out = **in + } + if in.ParNumerator != nil { + in, out := &in.ParNumerator, &out.ParNumerator + *out = new(float64) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.QualityLevel != nil { + in, out := &in.QualityLevel, &out.QualityLevel + *out = new(string) + **out = **in + } + if in.QvbrQualityLevel != nil { + in, out := &in.QvbrQualityLevel, &out.QvbrQualityLevel + *out = new(float64) + **out = **in + } + if in.RateControlMode != nil { + in, out := &in.RateControlMode, &out.RateControlMode + *out = new(string) + **out = **in + } + if in.ScanType != nil { + in, out := &in.ScanType, &out.ScanType + *out = new(string) + **out = **in + } + if in.SceneChangeDetect != nil { + in, out := &in.SceneChangeDetect, &out.SceneChangeDetect + *out = new(string) + **out = **in + } + if in.Slices != nil { + in, out := &in.Slices, &out.Slices + *out = new(float64) + **out = **in + } + if in.Softness != nil { + in, out := &in.Softness, &out.Softness + *out = new(float64) + **out = **in + } + if in.SpatialAq != nil { + in, out := &in.SpatialAq, &out.SpatialAq + *out = new(string) + **out = **in + } + if in.SubgopLength != nil { + in, out := &in.SubgopLength, &out.SubgopLength + *out = new(string) + **out = **in + } + if in.Syntax != nil { + in, out := &in.Syntax, &out.Syntax + *out = new(string) + **out = **in + } + if in.TemporalAq != nil { + in, out := &in.TemporalAq, &out.TemporalAq + *out = new(string) + **out = **in + } + if in.TimecodeInsertion != nil { + in, out := &in.TimecodeInsertion, &out.TimecodeInsertion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H264SettingsObservation. +func (in *H264SettingsObservation) DeepCopy() *H264SettingsObservation { + if in == nil { + return nil + } + out := new(H264SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H264SettingsParameters) DeepCopyInto(out *H264SettingsParameters) { + *out = *in + if in.AdaptiveQuantization != nil { + in, out := &in.AdaptiveQuantization, &out.AdaptiveQuantization + *out = new(string) + **out = **in + } + if in.AfdSignaling != nil { + in, out := &in.AfdSignaling, &out.AfdSignaling + *out = new(string) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufFillPct != nil { + in, out := &in.BufFillPct, &out.BufFillPct + *out = new(float64) + **out = **in + } + if in.BufSize != nil { + in, out := &in.BufSize, &out.BufSize + *out = new(float64) + **out = **in + } + if in.ColorMetadata != nil { + in, out := &in.ColorMetadata, &out.ColorMetadata + *out = new(string) + **out = **in + } + if in.EntropyEncoding != nil { + in, out := &in.EntropyEncoding, &out.EntropyEncoding + *out = new(string) + **out = **in + } + if in.FilterSettings != nil { + in, out := &in.FilterSettings, &out.FilterSettings + *out = new(FilterSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.FixedAfd != nil { + in, out := &in.FixedAfd, &out.FixedAfd + *out = new(string) + **out = **in + } + if in.FlickerAq != nil { + in, out := &in.FlickerAq, &out.FlickerAq + *out = new(string) + **out = **in + } + if in.ForceFieldPictures != nil { + in, out := &in.ForceFieldPictures, &out.ForceFieldPictures + *out = new(string) + **out = **in + } + if in.FramerateControl != nil { + in, out := &in.FramerateControl, &out.FramerateControl + *out = new(string) + **out = **in + } + if in.FramerateDenominator != nil { + in, out := &in.FramerateDenominator, &out.FramerateDenominator + *out = new(float64) + **out = **in + } + if in.FramerateNumerator != nil { + in, out := &in.FramerateNumerator, &out.FramerateNumerator + *out = new(float64) + **out = **in + } + if in.GopBReference != nil { + in, out := &in.GopBReference, &out.GopBReference + *out = new(string) + **out = **in + } + if in.GopClosedCadence != nil { + in, out := &in.GopClosedCadence, &out.GopClosedCadence + *out = new(float64) + **out = **in + } + if in.GopNumBFrames != nil { + in, out := &in.GopNumBFrames, &out.GopNumBFrames + *out = new(float64) + **out = **in + } + if in.GopSize != nil { + in, out := &in.GopSize, &out.GopSize + *out = new(float64) + **out = **in + } + if in.GopSizeUnits != nil { + in, out := &in.GopSizeUnits, &out.GopSizeUnits + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.LookAheadRateControl != nil { + in, out := &in.LookAheadRateControl, &out.LookAheadRateControl + *out = new(string) + **out = **in + } + if in.MaxBitrate != nil { + in, out := &in.MaxBitrate, &out.MaxBitrate + *out = new(float64) + **out = **in + } + if in.MinIInterval != nil { + in, out := &in.MinIInterval, &out.MinIInterval + *out = new(float64) + **out = **in + } + if in.NumRefFrames != nil { + in, out := &in.NumRefFrames, &out.NumRefFrames + *out = new(float64) + **out = **in + } + if in.ParControl != nil { + in, out := &in.ParControl, &out.ParControl + *out = new(string) + **out = **in + } + if in.ParDenominator != nil { + in, out := &in.ParDenominator, &out.ParDenominator + *out = new(float64) + **out = **in + } + if in.ParNumerator != nil { + in, out := &in.ParNumerator, &out.ParNumerator + *out = new(float64) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.QualityLevel != nil { + in, out := &in.QualityLevel, &out.QualityLevel + *out = new(string) + **out = **in + } + if in.QvbrQualityLevel != nil { + in, out := &in.QvbrQualityLevel, &out.QvbrQualityLevel + *out = new(float64) + **out = **in + } + if in.RateControlMode != nil { + in, out := &in.RateControlMode, &out.RateControlMode + *out = new(string) + **out = **in + } + if in.ScanType != nil { + in, out := &in.ScanType, &out.ScanType + *out = new(string) + **out = **in + } + if in.SceneChangeDetect != nil { + in, out := &in.SceneChangeDetect, &out.SceneChangeDetect + *out = new(string) + **out = **in + } + if in.Slices != nil { + in, out := &in.Slices, &out.Slices + *out = new(float64) + **out = **in + } + if in.Softness != nil { + in, out := &in.Softness, &out.Softness + *out = new(float64) + **out = **in + } + if in.SpatialAq != nil { + in, out := &in.SpatialAq, &out.SpatialAq + *out = new(string) + **out = **in + } + if in.SubgopLength != nil { + in, out := &in.SubgopLength, &out.SubgopLength + *out = new(string) + **out = **in + } + if in.Syntax != nil { + in, out := &in.Syntax, &out.Syntax + *out = new(string) + **out = **in + } + if in.TemporalAq != nil { + in, out := &in.TemporalAq, &out.TemporalAq + *out = new(string) + **out = **in + } + if in.TimecodeInsertion != nil { + in, out := &in.TimecodeInsertion, &out.TimecodeInsertion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H264SettingsParameters. +func (in *H264SettingsParameters) DeepCopy() *H264SettingsParameters { + if in == nil { + return nil + } + out := new(H264SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H265SettingsFilterSettingsInitParameters) DeepCopyInto(out *H265SettingsFilterSettingsInitParameters) { + *out = *in + if in.TemporalFilterSettings != nil { + in, out := &in.TemporalFilterSettings, &out.TemporalFilterSettings + *out = new(FilterSettingsTemporalFilterSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H265SettingsFilterSettingsInitParameters. +func (in *H265SettingsFilterSettingsInitParameters) DeepCopy() *H265SettingsFilterSettingsInitParameters { + if in == nil { + return nil + } + out := new(H265SettingsFilterSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H265SettingsFilterSettingsObservation) DeepCopyInto(out *H265SettingsFilterSettingsObservation) { + *out = *in + if in.TemporalFilterSettings != nil { + in, out := &in.TemporalFilterSettings, &out.TemporalFilterSettings + *out = new(FilterSettingsTemporalFilterSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H265SettingsFilterSettingsObservation. +func (in *H265SettingsFilterSettingsObservation) DeepCopy() *H265SettingsFilterSettingsObservation { + if in == nil { + return nil + } + out := new(H265SettingsFilterSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H265SettingsFilterSettingsParameters) DeepCopyInto(out *H265SettingsFilterSettingsParameters) { + *out = *in + if in.TemporalFilterSettings != nil { + in, out := &in.TemporalFilterSettings, &out.TemporalFilterSettings + *out = new(FilterSettingsTemporalFilterSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H265SettingsFilterSettingsParameters. +func (in *H265SettingsFilterSettingsParameters) DeepCopy() *H265SettingsFilterSettingsParameters { + if in == nil { + return nil + } + out := new(H265SettingsFilterSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H265SettingsInitParameters) DeepCopyInto(out *H265SettingsInitParameters) { + *out = *in + if in.AdaptiveQuantization != nil { + in, out := &in.AdaptiveQuantization, &out.AdaptiveQuantization + *out = new(string) + **out = **in + } + if in.AfdSignaling != nil { + in, out := &in.AfdSignaling, &out.AfdSignaling + *out = new(string) + **out = **in + } + if in.AlternativeTransferFunction != nil { + in, out := &in.AlternativeTransferFunction, &out.AlternativeTransferFunction + *out = new(string) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufSize != nil { + in, out := &in.BufSize, &out.BufSize + *out = new(float64) + **out = **in + } + if in.ColorMetadata != nil { + in, out := &in.ColorMetadata, &out.ColorMetadata + *out = new(string) + **out = **in + } + if in.ColorSpaceSettings != nil { + in, out := &in.ColorSpaceSettings, &out.ColorSpaceSettings + *out = new(ColorSpaceSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FilterSettings != nil { + in, out := &in.FilterSettings, &out.FilterSettings + *out = new(H265SettingsFilterSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FixedAfd != nil { + in, out := &in.FixedAfd, &out.FixedAfd + *out = new(string) + **out = **in + } + if in.FlickerAq != nil { + in, out := &in.FlickerAq, &out.FlickerAq + *out = new(string) + **out = **in + } + if in.FramerateDenominator != nil { + in, out := &in.FramerateDenominator, &out.FramerateDenominator + *out = new(float64) + **out = **in + } + if in.FramerateNumerator != nil { + in, out := &in.FramerateNumerator, &out.FramerateNumerator + *out = new(float64) + **out = **in + } + if in.GopClosedCadence != nil { + in, out := &in.GopClosedCadence, &out.GopClosedCadence + *out = new(float64) + **out = **in + } + if in.GopSize != nil { + in, out := &in.GopSize, &out.GopSize + *out = new(float64) + **out = **in + } + if in.GopSizeUnits != nil { + in, out := &in.GopSizeUnits, &out.GopSizeUnits + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.LookAheadRateControl != nil { + in, out := &in.LookAheadRateControl, &out.LookAheadRateControl + *out = new(string) + **out = **in + } + if in.MaxBitrate != nil { + in, out := &in.MaxBitrate, &out.MaxBitrate + *out = new(float64) + **out = **in + } + if in.MinIInterval != nil { + in, out := &in.MinIInterval, &out.MinIInterval + *out = new(float64) + **out = **in + } + if in.ParDenominator != nil { + in, out := &in.ParDenominator, &out.ParDenominator + *out = new(float64) + **out = **in + } + if in.ParNumerator != nil { + in, out := &in.ParNumerator, &out.ParNumerator + *out = new(float64) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.QvbrQualityLevel != nil { + in, out := &in.QvbrQualityLevel, &out.QvbrQualityLevel + *out = new(float64) + **out = **in + } + if in.RateControlMode != nil { + in, out := &in.RateControlMode, &out.RateControlMode + *out = new(string) + **out = **in + } + if in.ScanType != nil { + in, out := &in.ScanType, &out.ScanType + *out = new(string) + **out = **in + } + if in.SceneChangeDetect != nil { + in, out := &in.SceneChangeDetect, &out.SceneChangeDetect + *out = new(string) + **out = **in + } + if in.Slices != nil { + in, out := &in.Slices, &out.Slices + *out = new(float64) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } + if in.TimecodeBurninSettings != nil { + in, out := &in.TimecodeBurninSettings, &out.TimecodeBurninSettings + *out = new(TimecodeBurninSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TimecodeInsertion != nil { + in, out := &in.TimecodeInsertion, &out.TimecodeInsertion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H265SettingsInitParameters. +func (in *H265SettingsInitParameters) DeepCopy() *H265SettingsInitParameters { + if in == nil { + return nil + } + out := new(H265SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H265SettingsObservation) DeepCopyInto(out *H265SettingsObservation) { + *out = *in + if in.AdaptiveQuantization != nil { + in, out := &in.AdaptiveQuantization, &out.AdaptiveQuantization + *out = new(string) + **out = **in + } + if in.AfdSignaling != nil { + in, out := &in.AfdSignaling, &out.AfdSignaling + *out = new(string) + **out = **in + } + if in.AlternativeTransferFunction != nil { + in, out := &in.AlternativeTransferFunction, &out.AlternativeTransferFunction + *out = new(string) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufSize != nil { + in, out := &in.BufSize, &out.BufSize + *out = new(float64) + **out = **in + } + if in.ColorMetadata != nil { + in, out := &in.ColorMetadata, &out.ColorMetadata + *out = new(string) + **out = **in + } + if in.ColorSpaceSettings != nil { + in, out := &in.ColorSpaceSettings, &out.ColorSpaceSettings + *out = new(ColorSpaceSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.FilterSettings != nil { + in, out := &in.FilterSettings, &out.FilterSettings + *out = new(H265SettingsFilterSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.FixedAfd != nil { + in, out := &in.FixedAfd, &out.FixedAfd + *out = new(string) + **out = **in + } + if in.FlickerAq != nil { + in, out := &in.FlickerAq, &out.FlickerAq + *out = new(string) + **out = **in + } + if in.FramerateDenominator != nil { + in, out := &in.FramerateDenominator, &out.FramerateDenominator + *out = new(float64) + **out = **in + } + if in.FramerateNumerator != nil { + in, out := &in.FramerateNumerator, &out.FramerateNumerator + *out = new(float64) + **out = **in + } + if in.GopClosedCadence != nil { + in, out := &in.GopClosedCadence, &out.GopClosedCadence + *out = new(float64) + **out = **in + } + if in.GopSize != nil { + in, out := &in.GopSize, &out.GopSize + *out = new(float64) + **out = **in + } + if in.GopSizeUnits != nil { + in, out := &in.GopSizeUnits, &out.GopSizeUnits + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.LookAheadRateControl != nil { + in, out := &in.LookAheadRateControl, &out.LookAheadRateControl + *out = new(string) + **out = **in + } + if in.MaxBitrate != nil { + in, out := &in.MaxBitrate, &out.MaxBitrate + *out = new(float64) + **out = **in + } + if in.MinIInterval != nil { + in, out := &in.MinIInterval, &out.MinIInterval + *out = new(float64) + **out = **in + } + if in.ParDenominator != nil { + in, out := &in.ParDenominator, &out.ParDenominator + *out = new(float64) + **out = **in + } + if in.ParNumerator != nil { + in, out := &in.ParNumerator, &out.ParNumerator + *out = new(float64) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.QvbrQualityLevel != nil { + in, out := &in.QvbrQualityLevel, &out.QvbrQualityLevel + *out = new(float64) + **out = **in + } + if in.RateControlMode != nil { + in, out := &in.RateControlMode, &out.RateControlMode + *out = new(string) + **out = **in + } + if in.ScanType != nil { + in, out := &in.ScanType, &out.ScanType + *out = new(string) + **out = **in + } + if in.SceneChangeDetect != nil { + in, out := &in.SceneChangeDetect, &out.SceneChangeDetect + *out = new(string) + **out = **in + } + if in.Slices != nil { + in, out := &in.Slices, &out.Slices + *out = new(float64) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } + if in.TimecodeBurninSettings != nil { + in, out := &in.TimecodeBurninSettings, &out.TimecodeBurninSettings + *out = new(TimecodeBurninSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.TimecodeInsertion != nil { + in, out := &in.TimecodeInsertion, &out.TimecodeInsertion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H265SettingsObservation. +func (in *H265SettingsObservation) DeepCopy() *H265SettingsObservation { + if in == nil { + return nil + } + out := new(H265SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H265SettingsParameters) DeepCopyInto(out *H265SettingsParameters) { + *out = *in + if in.AdaptiveQuantization != nil { + in, out := &in.AdaptiveQuantization, &out.AdaptiveQuantization + *out = new(string) + **out = **in + } + if in.AfdSignaling != nil { + in, out := &in.AfdSignaling, &out.AfdSignaling + *out = new(string) + **out = **in + } + if in.AlternativeTransferFunction != nil { + in, out := &in.AlternativeTransferFunction, &out.AlternativeTransferFunction + *out = new(string) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufSize != nil { + in, out := &in.BufSize, &out.BufSize + *out = new(float64) + **out = **in + } + if in.ColorMetadata != nil { + in, out := &in.ColorMetadata, &out.ColorMetadata + *out = new(string) + **out = **in + } + if in.ColorSpaceSettings != nil { + in, out := &in.ColorSpaceSettings, &out.ColorSpaceSettings + *out = new(ColorSpaceSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.FilterSettings != nil { + in, out := &in.FilterSettings, &out.FilterSettings + *out = new(H265SettingsFilterSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.FixedAfd != nil { + in, out := &in.FixedAfd, &out.FixedAfd + *out = new(string) + **out = **in + } + if in.FlickerAq != nil { + in, out := &in.FlickerAq, &out.FlickerAq + *out = new(string) + **out = **in + } + if in.FramerateDenominator != nil { + in, out := &in.FramerateDenominator, &out.FramerateDenominator + *out = new(float64) + **out = **in + } + if in.FramerateNumerator != nil { + in, out := &in.FramerateNumerator, &out.FramerateNumerator + *out = new(float64) + **out = **in + } + if in.GopClosedCadence != nil { + in, out := &in.GopClosedCadence, &out.GopClosedCadence + *out = new(float64) + **out = **in + } + if in.GopSize != nil { + in, out := &in.GopSize, &out.GopSize + *out = new(float64) + **out = **in + } + if in.GopSizeUnits != nil { + in, out := &in.GopSizeUnits, &out.GopSizeUnits + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.LookAheadRateControl != nil { + in, out := &in.LookAheadRateControl, &out.LookAheadRateControl + *out = new(string) + **out = **in + } + if in.MaxBitrate != nil { + in, out := &in.MaxBitrate, &out.MaxBitrate + *out = new(float64) + **out = **in + } + if in.MinIInterval != nil { + in, out := &in.MinIInterval, &out.MinIInterval + *out = new(float64) + **out = **in + } + if in.ParDenominator != nil { + in, out := &in.ParDenominator, &out.ParDenominator + *out = new(float64) + **out = **in + } + if in.ParNumerator != nil { + in, out := &in.ParNumerator, &out.ParNumerator + *out = new(float64) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.QvbrQualityLevel != nil { + in, out := &in.QvbrQualityLevel, &out.QvbrQualityLevel + *out = new(float64) + **out = **in + } + if in.RateControlMode != nil { + in, out := &in.RateControlMode, &out.RateControlMode + *out = new(string) + **out = **in + } + if in.ScanType != nil { + in, out := &in.ScanType, &out.ScanType + *out = new(string) + **out = **in + } + if in.SceneChangeDetect != nil { + in, out := &in.SceneChangeDetect, &out.SceneChangeDetect + *out = new(string) + **out = **in + } + if in.Slices != nil { + in, out := &in.Slices, &out.Slices + *out = new(float64) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } + if in.TimecodeBurninSettings != nil { + in, out := &in.TimecodeBurninSettings, &out.TimecodeBurninSettings + *out = new(TimecodeBurninSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.TimecodeInsertion != nil { + in, out := &in.TimecodeInsertion, &out.TimecodeInsertion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H265SettingsParameters. +func (in *H265SettingsParameters) DeepCopy() *H265SettingsParameters { + if in == nil { + return nil + } + out := new(H265SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTMLMotionGraphicsSettingsInitParameters) DeepCopyInto(out *HTMLMotionGraphicsSettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTMLMotionGraphicsSettingsInitParameters. +func (in *HTMLMotionGraphicsSettingsInitParameters) DeepCopy() *HTMLMotionGraphicsSettingsInitParameters { + if in == nil { + return nil + } + out := new(HTMLMotionGraphicsSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTMLMotionGraphicsSettingsObservation) DeepCopyInto(out *HTMLMotionGraphicsSettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTMLMotionGraphicsSettingsObservation. +func (in *HTMLMotionGraphicsSettingsObservation) DeepCopy() *HTMLMotionGraphicsSettingsObservation { + if in == nil { + return nil + } + out := new(HTMLMotionGraphicsSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTMLMotionGraphicsSettingsParameters) DeepCopyInto(out *HTMLMotionGraphicsSettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTMLMotionGraphicsSettingsParameters. +func (in *HTMLMotionGraphicsSettingsParameters) DeepCopy() *HTMLMotionGraphicsSettingsParameters { + if in == nil { + return nil + } + out := new(HTMLMotionGraphicsSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Hdr10SettingsInitParameters) DeepCopyInto(out *Hdr10SettingsInitParameters) { + *out = *in + if in.MaxCll != nil { + in, out := &in.MaxCll, &out.MaxCll + *out = new(float64) + **out = **in + } + if in.MaxFall != nil { + in, out := &in.MaxFall, &out.MaxFall + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hdr10SettingsInitParameters. +func (in *Hdr10SettingsInitParameters) DeepCopy() *Hdr10SettingsInitParameters { + if in == nil { + return nil + } + out := new(Hdr10SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Hdr10SettingsObservation) DeepCopyInto(out *Hdr10SettingsObservation) { + *out = *in + if in.MaxCll != nil { + in, out := &in.MaxCll, &out.MaxCll + *out = new(float64) + **out = **in + } + if in.MaxFall != nil { + in, out := &in.MaxFall, &out.MaxFall + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hdr10SettingsObservation. +func (in *Hdr10SettingsObservation) DeepCopy() *Hdr10SettingsObservation { + if in == nil { + return nil + } + out := new(Hdr10SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Hdr10SettingsParameters) DeepCopyInto(out *Hdr10SettingsParameters) { + *out = *in + if in.MaxCll != nil { + in, out := &in.MaxCll, &out.MaxCll + *out = new(float64) + **out = **in + } + if in.MaxFall != nil { + in, out := &in.MaxFall, &out.MaxFall + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hdr10SettingsParameters. +func (in *Hdr10SettingsParameters) DeepCopy() *Hdr10SettingsParameters { + if in == nil { + return nil + } + out := new(Hdr10SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsAkamaiSettingsInitParameters) DeepCopyInto(out *HlsAkamaiSettingsInitParameters) { + *out = *in + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.FilecacheDuration != nil { + in, out := &in.FilecacheDuration, &out.FilecacheDuration + *out = new(float64) + **out = **in + } + if in.HTTPTransferMode != nil { + in, out := &in.HTTPTransferMode, &out.HTTPTransferMode + *out = new(string) + **out = **in + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } + if in.Salt != nil { + in, out := &in.Salt, &out.Salt + *out = new(string) + **out = **in + } + if in.Token != nil { + in, out := &in.Token, &out.Token + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsAkamaiSettingsInitParameters. +func (in *HlsAkamaiSettingsInitParameters) DeepCopy() *HlsAkamaiSettingsInitParameters { + if in == nil { + return nil + } + out := new(HlsAkamaiSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsAkamaiSettingsObservation) DeepCopyInto(out *HlsAkamaiSettingsObservation) { + *out = *in + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.FilecacheDuration != nil { + in, out := &in.FilecacheDuration, &out.FilecacheDuration + *out = new(float64) + **out = **in + } + if in.HTTPTransferMode != nil { + in, out := &in.HTTPTransferMode, &out.HTTPTransferMode + *out = new(string) + **out = **in + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } + if in.Salt != nil { + in, out := &in.Salt, &out.Salt + *out = new(string) + **out = **in + } + if in.Token != nil { + in, out := &in.Token, &out.Token + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsAkamaiSettingsObservation. +func (in *HlsAkamaiSettingsObservation) DeepCopy() *HlsAkamaiSettingsObservation { + if in == nil { + return nil + } + out := new(HlsAkamaiSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsAkamaiSettingsParameters) DeepCopyInto(out *HlsAkamaiSettingsParameters) { + *out = *in + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.FilecacheDuration != nil { + in, out := &in.FilecacheDuration, &out.FilecacheDuration + *out = new(float64) + **out = **in + } + if in.HTTPTransferMode != nil { + in, out := &in.HTTPTransferMode, &out.HTTPTransferMode + *out = new(string) + **out = **in + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } + if in.Salt != nil { + in, out := &in.Salt, &out.Salt + *out = new(string) + **out = **in + } + if in.Token != nil { + in, out := &in.Token, &out.Token + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsAkamaiSettingsParameters. +func (in *HlsAkamaiSettingsParameters) DeepCopy() *HlsAkamaiSettingsParameters { + if in == nil { + return nil + } + out := new(HlsAkamaiSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsBasicPutSettingsInitParameters) DeepCopyInto(out *HlsBasicPutSettingsInitParameters) { + *out = *in + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.FilecacheDuration != nil { + in, out := &in.FilecacheDuration, &out.FilecacheDuration + *out = new(float64) + **out = **in + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsBasicPutSettingsInitParameters. +func (in *HlsBasicPutSettingsInitParameters) DeepCopy() *HlsBasicPutSettingsInitParameters { + if in == nil { + return nil + } + out := new(HlsBasicPutSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsBasicPutSettingsObservation) DeepCopyInto(out *HlsBasicPutSettingsObservation) { + *out = *in + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.FilecacheDuration != nil { + in, out := &in.FilecacheDuration, &out.FilecacheDuration + *out = new(float64) + **out = **in + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsBasicPutSettingsObservation. +func (in *HlsBasicPutSettingsObservation) DeepCopy() *HlsBasicPutSettingsObservation { + if in == nil { + return nil + } + out := new(HlsBasicPutSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsBasicPutSettingsParameters) DeepCopyInto(out *HlsBasicPutSettingsParameters) { + *out = *in + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.FilecacheDuration != nil { + in, out := &in.FilecacheDuration, &out.FilecacheDuration + *out = new(float64) + **out = **in + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsBasicPutSettingsParameters. +func (in *HlsBasicPutSettingsParameters) DeepCopy() *HlsBasicPutSettingsParameters { + if in == nil { + return nil + } + out := new(HlsBasicPutSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsCdnSettingsInitParameters) DeepCopyInto(out *HlsCdnSettingsInitParameters) { + *out = *in + if in.HlsAkamaiSettings != nil { + in, out := &in.HlsAkamaiSettings, &out.HlsAkamaiSettings + *out = new(HlsAkamaiSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HlsBasicPutSettings != nil { + in, out := &in.HlsBasicPutSettings, &out.HlsBasicPutSettings + *out = new(HlsBasicPutSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HlsMediaStoreSettings != nil { + in, out := &in.HlsMediaStoreSettings, &out.HlsMediaStoreSettings + *out = new(HlsMediaStoreSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HlsS3Settings != nil { + in, out := &in.HlsS3Settings, &out.HlsS3Settings + *out = new(HlsS3SettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HlsWebdavSettings != nil { + in, out := &in.HlsWebdavSettings, &out.HlsWebdavSettings + *out = new(HlsWebdavSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsCdnSettingsInitParameters. +func (in *HlsCdnSettingsInitParameters) DeepCopy() *HlsCdnSettingsInitParameters { + if in == nil { + return nil + } + out := new(HlsCdnSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsCdnSettingsObservation) DeepCopyInto(out *HlsCdnSettingsObservation) { + *out = *in + if in.HlsAkamaiSettings != nil { + in, out := &in.HlsAkamaiSettings, &out.HlsAkamaiSettings + *out = new(HlsAkamaiSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.HlsBasicPutSettings != nil { + in, out := &in.HlsBasicPutSettings, &out.HlsBasicPutSettings + *out = new(HlsBasicPutSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.HlsMediaStoreSettings != nil { + in, out := &in.HlsMediaStoreSettings, &out.HlsMediaStoreSettings + *out = new(HlsMediaStoreSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.HlsS3Settings != nil { + in, out := &in.HlsS3Settings, &out.HlsS3Settings + *out = new(HlsS3SettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.HlsWebdavSettings != nil { + in, out := &in.HlsWebdavSettings, &out.HlsWebdavSettings + *out = new(HlsWebdavSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsCdnSettingsObservation. +func (in *HlsCdnSettingsObservation) DeepCopy() *HlsCdnSettingsObservation { + if in == nil { + return nil + } + out := new(HlsCdnSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsCdnSettingsParameters) DeepCopyInto(out *HlsCdnSettingsParameters) { + *out = *in + if in.HlsAkamaiSettings != nil { + in, out := &in.HlsAkamaiSettings, &out.HlsAkamaiSettings + *out = new(HlsAkamaiSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.HlsBasicPutSettings != nil { + in, out := &in.HlsBasicPutSettings, &out.HlsBasicPutSettings + *out = new(HlsBasicPutSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.HlsMediaStoreSettings != nil { + in, out := &in.HlsMediaStoreSettings, &out.HlsMediaStoreSettings + *out = new(HlsMediaStoreSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.HlsS3Settings != nil { + in, out := &in.HlsS3Settings, &out.HlsS3Settings + *out = new(HlsS3SettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.HlsWebdavSettings != nil { + in, out := &in.HlsWebdavSettings, &out.HlsWebdavSettings + *out = new(HlsWebdavSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsCdnSettingsParameters. +func (in *HlsCdnSettingsParameters) DeepCopy() *HlsCdnSettingsParameters { + if in == nil { + return nil + } + out := new(HlsCdnSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsGroupSettingsDestinationInitParameters) DeepCopyInto(out *HlsGroupSettingsDestinationInitParameters) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsGroupSettingsDestinationInitParameters. +func (in *HlsGroupSettingsDestinationInitParameters) DeepCopy() *HlsGroupSettingsDestinationInitParameters { + if in == nil { + return nil + } + out := new(HlsGroupSettingsDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsGroupSettingsDestinationObservation) DeepCopyInto(out *HlsGroupSettingsDestinationObservation) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsGroupSettingsDestinationObservation. +func (in *HlsGroupSettingsDestinationObservation) DeepCopy() *HlsGroupSettingsDestinationObservation { + if in == nil { + return nil + } + out := new(HlsGroupSettingsDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsGroupSettingsDestinationParameters) DeepCopyInto(out *HlsGroupSettingsDestinationParameters) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsGroupSettingsDestinationParameters. +func (in *HlsGroupSettingsDestinationParameters) DeepCopy() *HlsGroupSettingsDestinationParameters { + if in == nil { + return nil + } + out := new(HlsGroupSettingsDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsGroupSettingsInitParameters) DeepCopyInto(out *HlsGroupSettingsInitParameters) { + *out = *in + if in.AdMarkers != nil { + in, out := &in.AdMarkers, &out.AdMarkers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BaseURLContent != nil { + in, out := &in.BaseURLContent, &out.BaseURLContent + *out = new(string) + **out = **in + } + if in.BaseURLContent1 != nil { + in, out := &in.BaseURLContent1, &out.BaseURLContent1 + *out = new(string) + **out = **in + } + if in.BaseURLManifest != nil { + in, out := &in.BaseURLManifest, &out.BaseURLManifest + *out = new(string) + **out = **in + } + if in.BaseURLManifest1 != nil { + in, out := &in.BaseURLManifest1, &out.BaseURLManifest1 + *out = new(string) + **out = **in + } + if in.CaptionLanguageMappings != nil { + in, out := &in.CaptionLanguageMappings, &out.CaptionLanguageMappings + *out = make([]CaptionLanguageMappingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CaptionLanguageSetting != nil { + in, out := &in.CaptionLanguageSetting, &out.CaptionLanguageSetting + *out = new(string) + **out = **in + } + if in.ClientCache != nil { + in, out := &in.ClientCache, &out.ClientCache + *out = new(string) + **out = **in + } + if in.CodecSpecification != nil { + in, out := &in.CodecSpecification, &out.CodecSpecification + *out = new(string) + **out = **in + } + if in.ConstantIv != nil { + in, out := &in.ConstantIv, &out.ConstantIv + *out = new(string) + **out = **in + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(HlsGroupSettingsDestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DirectoryStructure != nil { + in, out := &in.DirectoryStructure, &out.DirectoryStructure + *out = new(string) + **out = **in + } + if in.DiscontinuityTags != nil { + in, out := &in.DiscontinuityTags, &out.DiscontinuityTags + *out = new(string) + **out = **in + } + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.HlsCdnSettings != nil { + in, out := &in.HlsCdnSettings, &out.HlsCdnSettings + *out = make([]HlsCdnSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HlsId3SegmentTagging != nil { + in, out := &in.HlsId3SegmentTagging, &out.HlsId3SegmentTagging + *out = new(string) + **out = **in + } + if in.IframeOnlyPlaylists != nil { + in, out := &in.IframeOnlyPlaylists, &out.IframeOnlyPlaylists + *out = new(string) + **out = **in + } + if in.IncompleteSegmentBehavior != nil { + in, out := &in.IncompleteSegmentBehavior, &out.IncompleteSegmentBehavior + *out = new(string) + **out = **in + } + if in.IndexNSegments != nil { + in, out := &in.IndexNSegments, &out.IndexNSegments + *out = new(float64) + **out = **in + } + if in.InputLossAction != nil { + in, out := &in.InputLossAction, &out.InputLossAction + *out = new(string) + **out = **in + } + if in.IvInManifest != nil { + in, out := &in.IvInManifest, &out.IvInManifest + *out = new(string) + **out = **in + } + if in.IvSource != nil { + in, out := &in.IvSource, &out.IvSource + *out = new(string) + **out = **in + } + if in.KeepSegments != nil { + in, out := &in.KeepSegments, &out.KeepSegments + *out = new(float64) + **out = **in + } + if in.KeyFormat != nil { + in, out := &in.KeyFormat, &out.KeyFormat + *out = new(string) + **out = **in + } + if in.KeyFormatVersions != nil { + in, out := &in.KeyFormatVersions, &out.KeyFormatVersions + *out = new(string) + **out = **in + } + if in.KeyProviderSettings != nil { + in, out := &in.KeyProviderSettings, &out.KeyProviderSettings + *out = new(KeyProviderSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ManifestCompression != nil { + in, out := &in.ManifestCompression, &out.ManifestCompression + *out = new(string) + **out = **in + } + if in.ManifestDurationFormat != nil { + in, out := &in.ManifestDurationFormat, &out.ManifestDurationFormat + *out = new(string) + **out = **in + } + if in.MinSegmentLength != nil { + in, out := &in.MinSegmentLength, &out.MinSegmentLength + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.OutputSelection != nil { + in, out := &in.OutputSelection, &out.OutputSelection + *out = new(string) + **out = **in + } + if in.ProgramDateTime != nil { + in, out := &in.ProgramDateTime, &out.ProgramDateTime + *out = new(string) + **out = **in + } + if in.ProgramDateTimeClock != nil { + in, out := &in.ProgramDateTimeClock, &out.ProgramDateTimeClock + *out = new(string) + **out = **in + } + if in.ProgramDateTimePeriod != nil { + in, out := &in.ProgramDateTimePeriod, &out.ProgramDateTimePeriod + *out = new(float64) + **out = **in + } + if in.RedundantManifest != nil { + in, out := &in.RedundantManifest, &out.RedundantManifest + *out = new(string) + **out = **in + } + if in.SegmentLength != nil { + in, out := &in.SegmentLength, &out.SegmentLength + *out = new(float64) + **out = **in + } + if in.SegmentsPerSubdirectory != nil { + in, out := &in.SegmentsPerSubdirectory, &out.SegmentsPerSubdirectory + *out = new(float64) + **out = **in + } + if in.StreamInfResolution != nil { + in, out := &in.StreamInfResolution, &out.StreamInfResolution + *out = new(string) + **out = **in + } + if in.TSFileMode != nil { + in, out := &in.TSFileMode, &out.TSFileMode + *out = new(string) + **out = **in + } + if in.TimedMetadataId3Frame != nil { + in, out := &in.TimedMetadataId3Frame, &out.TimedMetadataId3Frame + *out = new(string) + **out = **in + } + if in.TimedMetadataId3Period != nil { + in, out := &in.TimedMetadataId3Period, &out.TimedMetadataId3Period + *out = new(float64) + **out = **in + } + if in.TimestampDeltaMilliseconds != nil { + in, out := &in.TimestampDeltaMilliseconds, &out.TimestampDeltaMilliseconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsGroupSettingsInitParameters. +func (in *HlsGroupSettingsInitParameters) DeepCopy() *HlsGroupSettingsInitParameters { + if in == nil { + return nil + } + out := new(HlsGroupSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsGroupSettingsObservation) DeepCopyInto(out *HlsGroupSettingsObservation) { + *out = *in + if in.AdMarkers != nil { + in, out := &in.AdMarkers, &out.AdMarkers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BaseURLContent != nil { + in, out := &in.BaseURLContent, &out.BaseURLContent + *out = new(string) + **out = **in + } + if in.BaseURLContent1 != nil { + in, out := &in.BaseURLContent1, &out.BaseURLContent1 + *out = new(string) + **out = **in + } + if in.BaseURLManifest != nil { + in, out := &in.BaseURLManifest, &out.BaseURLManifest + *out = new(string) + **out = **in + } + if in.BaseURLManifest1 != nil { + in, out := &in.BaseURLManifest1, &out.BaseURLManifest1 + *out = new(string) + **out = **in + } + if in.CaptionLanguageMappings != nil { + in, out := &in.CaptionLanguageMappings, &out.CaptionLanguageMappings + *out = make([]CaptionLanguageMappingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CaptionLanguageSetting != nil { + in, out := &in.CaptionLanguageSetting, &out.CaptionLanguageSetting + *out = new(string) + **out = **in + } + if in.ClientCache != nil { + in, out := &in.ClientCache, &out.ClientCache + *out = new(string) + **out = **in + } + if in.CodecSpecification != nil { + in, out := &in.CodecSpecification, &out.CodecSpecification + *out = new(string) + **out = **in + } + if in.ConstantIv != nil { + in, out := &in.ConstantIv, &out.ConstantIv + *out = new(string) + **out = **in + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(HlsGroupSettingsDestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.DirectoryStructure != nil { + in, out := &in.DirectoryStructure, &out.DirectoryStructure + *out = new(string) + **out = **in + } + if in.DiscontinuityTags != nil { + in, out := &in.DiscontinuityTags, &out.DiscontinuityTags + *out = new(string) + **out = **in + } + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.HlsCdnSettings != nil { + in, out := &in.HlsCdnSettings, &out.HlsCdnSettings + *out = make([]HlsCdnSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HlsId3SegmentTagging != nil { + in, out := &in.HlsId3SegmentTagging, &out.HlsId3SegmentTagging + *out = new(string) + **out = **in + } + if in.IframeOnlyPlaylists != nil { + in, out := &in.IframeOnlyPlaylists, &out.IframeOnlyPlaylists + *out = new(string) + **out = **in + } + if in.IncompleteSegmentBehavior != nil { + in, out := &in.IncompleteSegmentBehavior, &out.IncompleteSegmentBehavior + *out = new(string) + **out = **in + } + if in.IndexNSegments != nil { + in, out := &in.IndexNSegments, &out.IndexNSegments + *out = new(float64) + **out = **in + } + if in.InputLossAction != nil { + in, out := &in.InputLossAction, &out.InputLossAction + *out = new(string) + **out = **in + } + if in.IvInManifest != nil { + in, out := &in.IvInManifest, &out.IvInManifest + *out = new(string) + **out = **in + } + if in.IvSource != nil { + in, out := &in.IvSource, &out.IvSource + *out = new(string) + **out = **in + } + if in.KeepSegments != nil { + in, out := &in.KeepSegments, &out.KeepSegments + *out = new(float64) + **out = **in + } + if in.KeyFormat != nil { + in, out := &in.KeyFormat, &out.KeyFormat + *out = new(string) + **out = **in + } + if in.KeyFormatVersions != nil { + in, out := &in.KeyFormatVersions, &out.KeyFormatVersions + *out = new(string) + **out = **in + } + if in.KeyProviderSettings != nil { + in, out := &in.KeyProviderSettings, &out.KeyProviderSettings + *out = new(KeyProviderSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ManifestCompression != nil { + in, out := &in.ManifestCompression, &out.ManifestCompression + *out = new(string) + **out = **in + } + if in.ManifestDurationFormat != nil { + in, out := &in.ManifestDurationFormat, &out.ManifestDurationFormat + *out = new(string) + **out = **in + } + if in.MinSegmentLength != nil { + in, out := &in.MinSegmentLength, &out.MinSegmentLength + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.OutputSelection != nil { + in, out := &in.OutputSelection, &out.OutputSelection + *out = new(string) + **out = **in + } + if in.ProgramDateTime != nil { + in, out := &in.ProgramDateTime, &out.ProgramDateTime + *out = new(string) + **out = **in + } + if in.ProgramDateTimeClock != nil { + in, out := &in.ProgramDateTimeClock, &out.ProgramDateTimeClock + *out = new(string) + **out = **in + } + if in.ProgramDateTimePeriod != nil { + in, out := &in.ProgramDateTimePeriod, &out.ProgramDateTimePeriod + *out = new(float64) + **out = **in + } + if in.RedundantManifest != nil { + in, out := &in.RedundantManifest, &out.RedundantManifest + *out = new(string) + **out = **in + } + if in.SegmentLength != nil { + in, out := &in.SegmentLength, &out.SegmentLength + *out = new(float64) + **out = **in + } + if in.SegmentsPerSubdirectory != nil { + in, out := &in.SegmentsPerSubdirectory, &out.SegmentsPerSubdirectory + *out = new(float64) + **out = **in + } + if in.StreamInfResolution != nil { + in, out := &in.StreamInfResolution, &out.StreamInfResolution + *out = new(string) + **out = **in + } + if in.TSFileMode != nil { + in, out := &in.TSFileMode, &out.TSFileMode + *out = new(string) + **out = **in + } + if in.TimedMetadataId3Frame != nil { + in, out := &in.TimedMetadataId3Frame, &out.TimedMetadataId3Frame + *out = new(string) + **out = **in + } + if in.TimedMetadataId3Period != nil { + in, out := &in.TimedMetadataId3Period, &out.TimedMetadataId3Period + *out = new(float64) + **out = **in + } + if in.TimestampDeltaMilliseconds != nil { + in, out := &in.TimestampDeltaMilliseconds, &out.TimestampDeltaMilliseconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsGroupSettingsObservation. +func (in *HlsGroupSettingsObservation) DeepCopy() *HlsGroupSettingsObservation { + if in == nil { + return nil + } + out := new(HlsGroupSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsGroupSettingsParameters) DeepCopyInto(out *HlsGroupSettingsParameters) { + *out = *in + if in.AdMarkers != nil { + in, out := &in.AdMarkers, &out.AdMarkers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BaseURLContent != nil { + in, out := &in.BaseURLContent, &out.BaseURLContent + *out = new(string) + **out = **in + } + if in.BaseURLContent1 != nil { + in, out := &in.BaseURLContent1, &out.BaseURLContent1 + *out = new(string) + **out = **in + } + if in.BaseURLManifest != nil { + in, out := &in.BaseURLManifest, &out.BaseURLManifest + *out = new(string) + **out = **in + } + if in.BaseURLManifest1 != nil { + in, out := &in.BaseURLManifest1, &out.BaseURLManifest1 + *out = new(string) + **out = **in + } + if in.CaptionLanguageMappings != nil { + in, out := &in.CaptionLanguageMappings, &out.CaptionLanguageMappings + *out = make([]CaptionLanguageMappingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CaptionLanguageSetting != nil { + in, out := &in.CaptionLanguageSetting, &out.CaptionLanguageSetting + *out = new(string) + **out = **in + } + if in.ClientCache != nil { + in, out := &in.ClientCache, &out.ClientCache + *out = new(string) + **out = **in + } + if in.CodecSpecification != nil { + in, out := &in.CodecSpecification, &out.CodecSpecification + *out = new(string) + **out = **in + } + if in.ConstantIv != nil { + in, out := &in.ConstantIv, &out.ConstantIv + *out = new(string) + **out = **in + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(HlsGroupSettingsDestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.DirectoryStructure != nil { + in, out := &in.DirectoryStructure, &out.DirectoryStructure + *out = new(string) + **out = **in + } + if in.DiscontinuityTags != nil { + in, out := &in.DiscontinuityTags, &out.DiscontinuityTags + *out = new(string) + **out = **in + } + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.HlsCdnSettings != nil { + in, out := &in.HlsCdnSettings, &out.HlsCdnSettings + *out = make([]HlsCdnSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HlsId3SegmentTagging != nil { + in, out := &in.HlsId3SegmentTagging, &out.HlsId3SegmentTagging + *out = new(string) + **out = **in + } + if in.IframeOnlyPlaylists != nil { + in, out := &in.IframeOnlyPlaylists, &out.IframeOnlyPlaylists + *out = new(string) + **out = **in + } + if in.IncompleteSegmentBehavior != nil { + in, out := &in.IncompleteSegmentBehavior, &out.IncompleteSegmentBehavior + *out = new(string) + **out = **in + } + if in.IndexNSegments != nil { + in, out := &in.IndexNSegments, &out.IndexNSegments + *out = new(float64) + **out = **in + } + if in.InputLossAction != nil { + in, out := &in.InputLossAction, &out.InputLossAction + *out = new(string) + **out = **in + } + if in.IvInManifest != nil { + in, out := &in.IvInManifest, &out.IvInManifest + *out = new(string) + **out = **in + } + if in.IvSource != nil { + in, out := &in.IvSource, &out.IvSource + *out = new(string) + **out = **in + } + if in.KeepSegments != nil { + in, out := &in.KeepSegments, &out.KeepSegments + *out = new(float64) + **out = **in + } + if in.KeyFormat != nil { + in, out := &in.KeyFormat, &out.KeyFormat + *out = new(string) + **out = **in + } + if in.KeyFormatVersions != nil { + in, out := &in.KeyFormatVersions, &out.KeyFormatVersions + *out = new(string) + **out = **in + } + if in.KeyProviderSettings != nil { + in, out := &in.KeyProviderSettings, &out.KeyProviderSettings + *out = new(KeyProviderSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.ManifestCompression != nil { + in, out := &in.ManifestCompression, &out.ManifestCompression + *out = new(string) + **out = **in + } + if in.ManifestDurationFormat != nil { + in, out := &in.ManifestDurationFormat, &out.ManifestDurationFormat + *out = new(string) + **out = **in + } + if in.MinSegmentLength != nil { + in, out := &in.MinSegmentLength, &out.MinSegmentLength + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.OutputSelection != nil { + in, out := &in.OutputSelection, &out.OutputSelection + *out = new(string) + **out = **in + } + if in.ProgramDateTime != nil { + in, out := &in.ProgramDateTime, &out.ProgramDateTime + *out = new(string) + **out = **in + } + if in.ProgramDateTimeClock != nil { + in, out := &in.ProgramDateTimeClock, &out.ProgramDateTimeClock + *out = new(string) + **out = **in + } + if in.ProgramDateTimePeriod != nil { + in, out := &in.ProgramDateTimePeriod, &out.ProgramDateTimePeriod + *out = new(float64) + **out = **in + } + if in.RedundantManifest != nil { + in, out := &in.RedundantManifest, &out.RedundantManifest + *out = new(string) + **out = **in + } + if in.SegmentLength != nil { + in, out := &in.SegmentLength, &out.SegmentLength + *out = new(float64) + **out = **in + } + if in.SegmentsPerSubdirectory != nil { + in, out := &in.SegmentsPerSubdirectory, &out.SegmentsPerSubdirectory + *out = new(float64) + **out = **in + } + if in.StreamInfResolution != nil { + in, out := &in.StreamInfResolution, &out.StreamInfResolution + *out = new(string) + **out = **in + } + if in.TSFileMode != nil { + in, out := &in.TSFileMode, &out.TSFileMode + *out = new(string) + **out = **in + } + if in.TimedMetadataId3Frame != nil { + in, out := &in.TimedMetadataId3Frame, &out.TimedMetadataId3Frame + *out = new(string) + **out = **in + } + if in.TimedMetadataId3Period != nil { + in, out := &in.TimedMetadataId3Period, &out.TimedMetadataId3Period + *out = new(float64) + **out = **in + } + if in.TimestampDeltaMilliseconds != nil { + in, out := &in.TimestampDeltaMilliseconds, &out.TimestampDeltaMilliseconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsGroupSettingsParameters. +func (in *HlsGroupSettingsParameters) DeepCopy() *HlsGroupSettingsParameters { + if in == nil { + return nil + } + out := new(HlsGroupSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsInputSettingsInitParameters) DeepCopyInto(out *HlsInputSettingsInitParameters) { + *out = *in + if in.Bandwidth != nil { + in, out := &in.Bandwidth, &out.Bandwidth + *out = new(float64) + **out = **in + } + if in.BufferSegments != nil { + in, out := &in.BufferSegments, &out.BufferSegments + *out = new(float64) + **out = **in + } + if in.Retries != nil { + in, out := &in.Retries, &out.Retries + *out = new(float64) + **out = **in + } + if in.RetryInterval != nil { + in, out := &in.RetryInterval, &out.RetryInterval + *out = new(float64) + **out = **in + } + if in.Scte35Source != nil { + in, out := &in.Scte35Source, &out.Scte35Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsInputSettingsInitParameters. +func (in *HlsInputSettingsInitParameters) DeepCopy() *HlsInputSettingsInitParameters { + if in == nil { + return nil + } + out := new(HlsInputSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsInputSettingsObservation) DeepCopyInto(out *HlsInputSettingsObservation) { + *out = *in + if in.Bandwidth != nil { + in, out := &in.Bandwidth, &out.Bandwidth + *out = new(float64) + **out = **in + } + if in.BufferSegments != nil { + in, out := &in.BufferSegments, &out.BufferSegments + *out = new(float64) + **out = **in + } + if in.Retries != nil { + in, out := &in.Retries, &out.Retries + *out = new(float64) + **out = **in + } + if in.RetryInterval != nil { + in, out := &in.RetryInterval, &out.RetryInterval + *out = new(float64) + **out = **in + } + if in.Scte35Source != nil { + in, out := &in.Scte35Source, &out.Scte35Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsInputSettingsObservation. +func (in *HlsInputSettingsObservation) DeepCopy() *HlsInputSettingsObservation { + if in == nil { + return nil + } + out := new(HlsInputSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsInputSettingsParameters) DeepCopyInto(out *HlsInputSettingsParameters) { + *out = *in + if in.Bandwidth != nil { + in, out := &in.Bandwidth, &out.Bandwidth + *out = new(float64) + **out = **in + } + if in.BufferSegments != nil { + in, out := &in.BufferSegments, &out.BufferSegments + *out = new(float64) + **out = **in + } + if in.Retries != nil { + in, out := &in.Retries, &out.Retries + *out = new(float64) + **out = **in + } + if in.RetryInterval != nil { + in, out := &in.RetryInterval, &out.RetryInterval + *out = new(float64) + **out = **in + } + if in.Scte35Source != nil { + in, out := &in.Scte35Source, &out.Scte35Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsInputSettingsParameters. +func (in *HlsInputSettingsParameters) DeepCopy() *HlsInputSettingsParameters { + if in == nil { + return nil + } + out := new(HlsInputSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsMediaStoreSettingsInitParameters) DeepCopyInto(out *HlsMediaStoreSettingsInitParameters) { + *out = *in + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.FilecacheDuration != nil { + in, out := &in.FilecacheDuration, &out.FilecacheDuration + *out = new(float64) + **out = **in + } + if in.MediaStoreStorageClass != nil { + in, out := &in.MediaStoreStorageClass, &out.MediaStoreStorageClass + *out = new(string) + **out = **in + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsMediaStoreSettingsInitParameters. +func (in *HlsMediaStoreSettingsInitParameters) DeepCopy() *HlsMediaStoreSettingsInitParameters { + if in == nil { + return nil + } + out := new(HlsMediaStoreSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsMediaStoreSettingsObservation) DeepCopyInto(out *HlsMediaStoreSettingsObservation) { + *out = *in + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.FilecacheDuration != nil { + in, out := &in.FilecacheDuration, &out.FilecacheDuration + *out = new(float64) + **out = **in + } + if in.MediaStoreStorageClass != nil { + in, out := &in.MediaStoreStorageClass, &out.MediaStoreStorageClass + *out = new(string) + **out = **in + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsMediaStoreSettingsObservation. +func (in *HlsMediaStoreSettingsObservation) DeepCopy() *HlsMediaStoreSettingsObservation { + if in == nil { + return nil + } + out := new(HlsMediaStoreSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsMediaStoreSettingsParameters) DeepCopyInto(out *HlsMediaStoreSettingsParameters) { + *out = *in + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.FilecacheDuration != nil { + in, out := &in.FilecacheDuration, &out.FilecacheDuration + *out = new(float64) + **out = **in + } + if in.MediaStoreStorageClass != nil { + in, out := &in.MediaStoreStorageClass, &out.MediaStoreStorageClass + *out = new(string) + **out = **in + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsMediaStoreSettingsParameters. +func (in *HlsMediaStoreSettingsParameters) DeepCopy() *HlsMediaStoreSettingsParameters { + if in == nil { + return nil + } + out := new(HlsMediaStoreSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsOutputSettingsInitParameters) DeepCopyInto(out *HlsOutputSettingsInitParameters) { + *out = *in + if in.H265PackagingType != nil { + in, out := &in.H265PackagingType, &out.H265PackagingType + *out = new(string) + **out = **in + } + if in.HlsSettings != nil { + in, out := &in.HlsSettings, &out.HlsSettings + *out = new(HlsSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NameModifier != nil { + in, out := &in.NameModifier, &out.NameModifier + *out = new(string) + **out = **in + } + if in.SegmentModifier != nil { + in, out := &in.SegmentModifier, &out.SegmentModifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsOutputSettingsInitParameters. +func (in *HlsOutputSettingsInitParameters) DeepCopy() *HlsOutputSettingsInitParameters { + if in == nil { + return nil + } + out := new(HlsOutputSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsOutputSettingsObservation) DeepCopyInto(out *HlsOutputSettingsObservation) { + *out = *in + if in.H265PackagingType != nil { + in, out := &in.H265PackagingType, &out.H265PackagingType + *out = new(string) + **out = **in + } + if in.HlsSettings != nil { + in, out := &in.HlsSettings, &out.HlsSettings + *out = new(HlsSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.NameModifier != nil { + in, out := &in.NameModifier, &out.NameModifier + *out = new(string) + **out = **in + } + if in.SegmentModifier != nil { + in, out := &in.SegmentModifier, &out.SegmentModifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsOutputSettingsObservation. +func (in *HlsOutputSettingsObservation) DeepCopy() *HlsOutputSettingsObservation { + if in == nil { + return nil + } + out := new(HlsOutputSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsOutputSettingsParameters) DeepCopyInto(out *HlsOutputSettingsParameters) { + *out = *in + if in.H265PackagingType != nil { + in, out := &in.H265PackagingType, &out.H265PackagingType + *out = new(string) + **out = **in + } + if in.HlsSettings != nil { + in, out := &in.HlsSettings, &out.HlsSettings + *out = new(HlsSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.NameModifier != nil { + in, out := &in.NameModifier, &out.NameModifier + *out = new(string) + **out = **in + } + if in.SegmentModifier != nil { + in, out := &in.SegmentModifier, &out.SegmentModifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsOutputSettingsParameters. +func (in *HlsOutputSettingsParameters) DeepCopy() *HlsOutputSettingsParameters { + if in == nil { + return nil + } + out := new(HlsOutputSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsS3SettingsInitParameters) DeepCopyInto(out *HlsS3SettingsInitParameters) { + *out = *in + if in.CannedACL != nil { + in, out := &in.CannedACL, &out.CannedACL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsS3SettingsInitParameters. +func (in *HlsS3SettingsInitParameters) DeepCopy() *HlsS3SettingsInitParameters { + if in == nil { + return nil + } + out := new(HlsS3SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsS3SettingsObservation) DeepCopyInto(out *HlsS3SettingsObservation) { + *out = *in + if in.CannedACL != nil { + in, out := &in.CannedACL, &out.CannedACL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsS3SettingsObservation. +func (in *HlsS3SettingsObservation) DeepCopy() *HlsS3SettingsObservation { + if in == nil { + return nil + } + out := new(HlsS3SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsS3SettingsParameters) DeepCopyInto(out *HlsS3SettingsParameters) { + *out = *in + if in.CannedACL != nil { + in, out := &in.CannedACL, &out.CannedACL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsS3SettingsParameters. +func (in *HlsS3SettingsParameters) DeepCopy() *HlsS3SettingsParameters { + if in == nil { + return nil + } + out := new(HlsS3SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsSettingsInitParameters) DeepCopyInto(out *HlsSettingsInitParameters) { + *out = *in + if in.AudioOnlyHlsSettings != nil { + in, out := &in.AudioOnlyHlsSettings, &out.AudioOnlyHlsSettings + *out = new(AudioOnlyHlsSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Fmp4HlsSettings != nil { + in, out := &in.Fmp4HlsSettings, &out.Fmp4HlsSettings + *out = new(Fmp4HlsSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FrameCaptureHlsSettings != nil { + in, out := &in.FrameCaptureHlsSettings, &out.FrameCaptureHlsSettings + *out = new(FrameCaptureHlsSettingsInitParameters) + **out = **in + } + if in.StandardHlsSettings != nil { + in, out := &in.StandardHlsSettings, &out.StandardHlsSettings + *out = new(StandardHlsSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsSettingsInitParameters. +func (in *HlsSettingsInitParameters) DeepCopy() *HlsSettingsInitParameters { + if in == nil { + return nil + } + out := new(HlsSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsSettingsObservation) DeepCopyInto(out *HlsSettingsObservation) { + *out = *in + if in.AudioOnlyHlsSettings != nil { + in, out := &in.AudioOnlyHlsSettings, &out.AudioOnlyHlsSettings + *out = new(AudioOnlyHlsSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Fmp4HlsSettings != nil { + in, out := &in.Fmp4HlsSettings, &out.Fmp4HlsSettings + *out = new(Fmp4HlsSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.FrameCaptureHlsSettings != nil { + in, out := &in.FrameCaptureHlsSettings, &out.FrameCaptureHlsSettings + *out = new(FrameCaptureHlsSettingsParameters) + **out = **in + } + if in.StandardHlsSettings != nil { + in, out := &in.StandardHlsSettings, &out.StandardHlsSettings + *out = new(StandardHlsSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsSettingsObservation. +func (in *HlsSettingsObservation) DeepCopy() *HlsSettingsObservation { + if in == nil { + return nil + } + out := new(HlsSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsSettingsParameters) DeepCopyInto(out *HlsSettingsParameters) { + *out = *in + if in.AudioOnlyHlsSettings != nil { + in, out := &in.AudioOnlyHlsSettings, &out.AudioOnlyHlsSettings + *out = new(AudioOnlyHlsSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Fmp4HlsSettings != nil { + in, out := &in.Fmp4HlsSettings, &out.Fmp4HlsSettings + *out = new(Fmp4HlsSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.FrameCaptureHlsSettings != nil { + in, out := &in.FrameCaptureHlsSettings, &out.FrameCaptureHlsSettings + *out = new(FrameCaptureHlsSettingsParameters) + **out = **in + } + if in.StandardHlsSettings != nil { + in, out := &in.StandardHlsSettings, &out.StandardHlsSettings + *out = new(StandardHlsSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsSettingsParameters. +func (in *HlsSettingsParameters) DeepCopy() *HlsSettingsParameters { + if in == nil { + return nil + } + out := new(HlsSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsWebdavSettingsInitParameters) DeepCopyInto(out *HlsWebdavSettingsInitParameters) { + *out = *in + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.FilecacheDuration != nil { + in, out := &in.FilecacheDuration, &out.FilecacheDuration + *out = new(float64) + **out = **in + } + if in.HTTPTransferMode != nil { + in, out := &in.HTTPTransferMode, &out.HTTPTransferMode + *out = new(string) + **out = **in + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsWebdavSettingsInitParameters. +func (in *HlsWebdavSettingsInitParameters) DeepCopy() *HlsWebdavSettingsInitParameters { + if in == nil { + return nil + } + out := new(HlsWebdavSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsWebdavSettingsObservation) DeepCopyInto(out *HlsWebdavSettingsObservation) { + *out = *in + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.FilecacheDuration != nil { + in, out := &in.FilecacheDuration, &out.FilecacheDuration + *out = new(float64) + **out = **in + } + if in.HTTPTransferMode != nil { + in, out := &in.HTTPTransferMode, &out.HTTPTransferMode + *out = new(string) + **out = **in + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsWebdavSettingsObservation. +func (in *HlsWebdavSettingsObservation) DeepCopy() *HlsWebdavSettingsObservation { + if in == nil { + return nil + } + out := new(HlsWebdavSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HlsWebdavSettingsParameters) DeepCopyInto(out *HlsWebdavSettingsParameters) { + *out = *in + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.FilecacheDuration != nil { + in, out := &in.FilecacheDuration, &out.FilecacheDuration + *out = new(float64) + **out = **in + } + if in.HTTPTransferMode != nil { + in, out := &in.HTTPTransferMode, &out.HTTPTransferMode + *out = new(string) + **out = **in + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HlsWebdavSettingsParameters. +func (in *HlsWebdavSettingsParameters) DeepCopy() *HlsWebdavSettingsParameters { + if in == nil { + return nil + } + out := new(HlsWebdavSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Input) DeepCopyInto(out *Input) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Input. +func (in *Input) DeepCopy() *Input { + if in == nil { + return nil + } + out := new(Input) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Input) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputAttachmentsInitParameters) DeepCopyInto(out *InputAttachmentsInitParameters) { + *out = *in + if in.AutomaticInputFailoverSettings != nil { + in, out := &in.AutomaticInputFailoverSettings, &out.AutomaticInputFailoverSettings + *out = new(AutomaticInputFailoverSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InputAttachmentName != nil { + in, out := &in.InputAttachmentName, &out.InputAttachmentName + *out = new(string) + **out = **in + } + if in.InputID != nil { + in, out := &in.InputID, &out.InputID + *out = new(string) + **out = **in + } + if in.InputIDRef != nil { + in, out := &in.InputIDRef, &out.InputIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InputIDSelector != nil { + in, out := &in.InputIDSelector, &out.InputIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InputSettings != nil { + in, out := &in.InputSettings, &out.InputSettings + *out = new(InputSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputAttachmentsInitParameters. +func (in *InputAttachmentsInitParameters) DeepCopy() *InputAttachmentsInitParameters { + if in == nil { + return nil + } + out := new(InputAttachmentsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputAttachmentsObservation) DeepCopyInto(out *InputAttachmentsObservation) { + *out = *in + if in.AutomaticInputFailoverSettings != nil { + in, out := &in.AutomaticInputFailoverSettings, &out.AutomaticInputFailoverSettings + *out = new(AutomaticInputFailoverSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.InputAttachmentName != nil { + in, out := &in.InputAttachmentName, &out.InputAttachmentName + *out = new(string) + **out = **in + } + if in.InputID != nil { + in, out := &in.InputID, &out.InputID + *out = new(string) + **out = **in + } + if in.InputSettings != nil { + in, out := &in.InputSettings, &out.InputSettings + *out = new(InputSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputAttachmentsObservation. +func (in *InputAttachmentsObservation) DeepCopy() *InputAttachmentsObservation { + if in == nil { + return nil + } + out := new(InputAttachmentsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputAttachmentsParameters) DeepCopyInto(out *InputAttachmentsParameters) { + *out = *in + if in.AutomaticInputFailoverSettings != nil { + in, out := &in.AutomaticInputFailoverSettings, &out.AutomaticInputFailoverSettings + *out = new(AutomaticInputFailoverSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.InputAttachmentName != nil { + in, out := &in.InputAttachmentName, &out.InputAttachmentName + *out = new(string) + **out = **in + } + if in.InputID != nil { + in, out := &in.InputID, &out.InputID + *out = new(string) + **out = **in + } + if in.InputIDRef != nil { + in, out := &in.InputIDRef, &out.InputIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.InputIDSelector != nil { + in, out := &in.InputIDSelector, &out.InputIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InputSettings != nil { + in, out := &in.InputSettings, &out.InputSettings + *out = new(InputSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputAttachmentsParameters. +func (in *InputAttachmentsParameters) DeepCopy() *InputAttachmentsParameters { + if in == nil { + return nil + } + out := new(InputAttachmentsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputChannelLevelsInitParameters) DeepCopyInto(out *InputChannelLevelsInitParameters) { + *out = *in + if in.Gain != nil { + in, out := &in.Gain, &out.Gain + *out = new(float64) + **out = **in + } + if in.InputChannel != nil { + in, out := &in.InputChannel, &out.InputChannel + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputChannelLevelsInitParameters. +func (in *InputChannelLevelsInitParameters) DeepCopy() *InputChannelLevelsInitParameters { + if in == nil { + return nil + } + out := new(InputChannelLevelsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputChannelLevelsObservation) DeepCopyInto(out *InputChannelLevelsObservation) { + *out = *in + if in.Gain != nil { + in, out := &in.Gain, &out.Gain + *out = new(float64) + **out = **in + } + if in.InputChannel != nil { + in, out := &in.InputChannel, &out.InputChannel + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputChannelLevelsObservation. +func (in *InputChannelLevelsObservation) DeepCopy() *InputChannelLevelsObservation { + if in == nil { + return nil + } + out := new(InputChannelLevelsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputChannelLevelsParameters) DeepCopyInto(out *InputChannelLevelsParameters) { + *out = *in + if in.Gain != nil { + in, out := &in.Gain, &out.Gain + *out = new(float64) + **out = **in + } + if in.InputChannel != nil { + in, out := &in.InputChannel, &out.InputChannel + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputChannelLevelsParameters. +func (in *InputChannelLevelsParameters) DeepCopy() *InputChannelLevelsParameters { + if in == nil { + return nil + } + out := new(InputChannelLevelsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputDestinationsInitParameters) DeepCopyInto(out *InputDestinationsInitParameters) { + *out = *in + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputDestinationsInitParameters. +func (in *InputDestinationsInitParameters) DeepCopy() *InputDestinationsInitParameters { + if in == nil { + return nil + } + out := new(InputDestinationsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputDestinationsObservation) DeepCopyInto(out *InputDestinationsObservation) { + *out = *in + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputDestinationsObservation. +func (in *InputDestinationsObservation) DeepCopy() *InputDestinationsObservation { + if in == nil { + return nil + } + out := new(InputDestinationsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputDestinationsParameters) DeepCopyInto(out *InputDestinationsParameters) { + *out = *in + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputDestinationsParameters. +func (in *InputDestinationsParameters) DeepCopy() *InputDestinationsParameters { + if in == nil { + return nil + } + out := new(InputDestinationsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputDevicesInitParameters) DeepCopyInto(out *InputDevicesInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputDevicesInitParameters. +func (in *InputDevicesInitParameters) DeepCopy() *InputDevicesInitParameters { + if in == nil { + return nil + } + out := new(InputDevicesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputDevicesObservation) DeepCopyInto(out *InputDevicesObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputDevicesObservation. +func (in *InputDevicesObservation) DeepCopy() *InputDevicesObservation { + if in == nil { + return nil + } + out := new(InputDevicesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputDevicesParameters) DeepCopyInto(out *InputDevicesParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputDevicesParameters. +func (in *InputDevicesParameters) DeepCopy() *InputDevicesParameters { + if in == nil { + return nil + } + out := new(InputDevicesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputInitParameters) DeepCopyInto(out *InputInitParameters) { + *out = *in + if in.Destinations != nil { + in, out := &in.Destinations, &out.Destinations + *out = make([]InputDestinationsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputDevices != nil { + in, out := &in.InputDevices, &out.InputDevices + *out = make([]InputDevicesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputSecurityGroups != nil { + in, out := &in.InputSecurityGroups, &out.InputSecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MediaConnectFlows != nil { + in, out := &in.MediaConnectFlows, &out.MediaConnectFlows + *out = make([]MediaConnectFlowsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]SourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.VPC != nil { + in, out := &in.VPC, &out.VPC + *out = new(InputVPCInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputInitParameters. +func (in *InputInitParameters) DeepCopy() *InputInitParameters { + if in == nil { + return nil + } + out := new(InputInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputList) DeepCopyInto(out *InputList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Input, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputList. +func (in *InputList) DeepCopy() *InputList { + if in == nil { + return nil + } + out := new(InputList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InputList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputLossBehaviorInitParameters) DeepCopyInto(out *InputLossBehaviorInitParameters) { + *out = *in + if in.BlackFrameMsec != nil { + in, out := &in.BlackFrameMsec, &out.BlackFrameMsec + *out = new(float64) + **out = **in + } + if in.InputLossImageColor != nil { + in, out := &in.InputLossImageColor, &out.InputLossImageColor + *out = new(string) + **out = **in + } + if in.InputLossImageSlate != nil { + in, out := &in.InputLossImageSlate, &out.InputLossImageSlate + *out = new(InputLossImageSlateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InputLossImageType != nil { + in, out := &in.InputLossImageType, &out.InputLossImageType + *out = new(string) + **out = **in + } + if in.RepeatFrameMsec != nil { + in, out := &in.RepeatFrameMsec, &out.RepeatFrameMsec + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputLossBehaviorInitParameters. +func (in *InputLossBehaviorInitParameters) DeepCopy() *InputLossBehaviorInitParameters { + if in == nil { + return nil + } + out := new(InputLossBehaviorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputLossBehaviorObservation) DeepCopyInto(out *InputLossBehaviorObservation) { + *out = *in + if in.BlackFrameMsec != nil { + in, out := &in.BlackFrameMsec, &out.BlackFrameMsec + *out = new(float64) + **out = **in + } + if in.InputLossImageColor != nil { + in, out := &in.InputLossImageColor, &out.InputLossImageColor + *out = new(string) + **out = **in + } + if in.InputLossImageSlate != nil { + in, out := &in.InputLossImageSlate, &out.InputLossImageSlate + *out = new(InputLossImageSlateObservation) + (*in).DeepCopyInto(*out) + } + if in.InputLossImageType != nil { + in, out := &in.InputLossImageType, &out.InputLossImageType + *out = new(string) + **out = **in + } + if in.RepeatFrameMsec != nil { + in, out := &in.RepeatFrameMsec, &out.RepeatFrameMsec + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputLossBehaviorObservation. +func (in *InputLossBehaviorObservation) DeepCopy() *InputLossBehaviorObservation { + if in == nil { + return nil + } + out := new(InputLossBehaviorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputLossBehaviorParameters) DeepCopyInto(out *InputLossBehaviorParameters) { + *out = *in + if in.BlackFrameMsec != nil { + in, out := &in.BlackFrameMsec, &out.BlackFrameMsec + *out = new(float64) + **out = **in + } + if in.InputLossImageColor != nil { + in, out := &in.InputLossImageColor, &out.InputLossImageColor + *out = new(string) + **out = **in + } + if in.InputLossImageSlate != nil { + in, out := &in.InputLossImageSlate, &out.InputLossImageSlate + *out = new(InputLossImageSlateParameters) + (*in).DeepCopyInto(*out) + } + if in.InputLossImageType != nil { + in, out := &in.InputLossImageType, &out.InputLossImageType + *out = new(string) + **out = **in + } + if in.RepeatFrameMsec != nil { + in, out := &in.RepeatFrameMsec, &out.RepeatFrameMsec + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputLossBehaviorParameters. +func (in *InputLossBehaviorParameters) DeepCopy() *InputLossBehaviorParameters { + if in == nil { + return nil + } + out := new(InputLossBehaviorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputLossImageSlateInitParameters) DeepCopyInto(out *InputLossImageSlateInitParameters) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputLossImageSlateInitParameters. +func (in *InputLossImageSlateInitParameters) DeepCopy() *InputLossImageSlateInitParameters { + if in == nil { + return nil + } + out := new(InputLossImageSlateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputLossImageSlateObservation) DeepCopyInto(out *InputLossImageSlateObservation) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputLossImageSlateObservation. +func (in *InputLossImageSlateObservation) DeepCopy() *InputLossImageSlateObservation { + if in == nil { + return nil + } + out := new(InputLossImageSlateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputLossImageSlateParameters) DeepCopyInto(out *InputLossImageSlateParameters) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputLossImageSlateParameters. +func (in *InputLossImageSlateParameters) DeepCopy() *InputLossImageSlateParameters { + if in == nil { + return nil + } + out := new(InputLossImageSlateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputLossSettingsInitParameters) DeepCopyInto(out *InputLossSettingsInitParameters) { + *out = *in + if in.InputLossThresholdMsec != nil { + in, out := &in.InputLossThresholdMsec, &out.InputLossThresholdMsec + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputLossSettingsInitParameters. +func (in *InputLossSettingsInitParameters) DeepCopy() *InputLossSettingsInitParameters { + if in == nil { + return nil + } + out := new(InputLossSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputLossSettingsObservation) DeepCopyInto(out *InputLossSettingsObservation) { + *out = *in + if in.InputLossThresholdMsec != nil { + in, out := &in.InputLossThresholdMsec, &out.InputLossThresholdMsec + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputLossSettingsObservation. +func (in *InputLossSettingsObservation) DeepCopy() *InputLossSettingsObservation { + if in == nil { + return nil + } + out := new(InputLossSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputLossSettingsParameters) DeepCopyInto(out *InputLossSettingsParameters) { + *out = *in + if in.InputLossThresholdMsec != nil { + in, out := &in.InputLossThresholdMsec, &out.InputLossThresholdMsec + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputLossSettingsParameters. +func (in *InputLossSettingsParameters) DeepCopy() *InputLossSettingsParameters { + if in == nil { + return nil + } + out := new(InputLossSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputObservation) DeepCopyInto(out *InputObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AttachedChannels != nil { + in, out := &in.AttachedChannels, &out.AttachedChannels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Destinations != nil { + in, out := &in.Destinations, &out.Destinations + *out = make([]InputDestinationsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InputClass != nil { + in, out := &in.InputClass, &out.InputClass + *out = new(string) + **out = **in + } + if in.InputDevices != nil { + in, out := &in.InputDevices, &out.InputDevices + *out = make([]InputDevicesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputPartnerIds != nil { + in, out := &in.InputPartnerIds, &out.InputPartnerIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InputSecurityGroups != nil { + in, out := &in.InputSecurityGroups, &out.InputSecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.InputSourceType != nil { + in, out := &in.InputSourceType, &out.InputSourceType + *out = new(string) + **out = **in + } + if in.MediaConnectFlows != nil { + in, out := &in.MediaConnectFlows, &out.MediaConnectFlows + *out = make([]MediaConnectFlowsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]SourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.VPC != nil { + in, out := &in.VPC, &out.VPC + *out = new(InputVPCObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputObservation. +func (in *InputObservation) DeepCopy() *InputObservation { + if in == nil { + return nil + } + out := new(InputObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputParameters) DeepCopyInto(out *InputParameters) { + *out = *in + if in.Destinations != nil { + in, out := &in.Destinations, &out.Destinations + *out = make([]InputDestinationsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputDevices != nil { + in, out := &in.InputDevices, &out.InputDevices + *out = make([]InputDevicesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputSecurityGroups != nil { + in, out := &in.InputSecurityGroups, &out.InputSecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MediaConnectFlows != nil { + in, out := &in.MediaConnectFlows, &out.MediaConnectFlows + *out = make([]MediaConnectFlowsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]SourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.VPC != nil { + in, out := &in.VPC, &out.VPC + *out = new(InputVPCParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputParameters. +func (in *InputParameters) DeepCopy() *InputParameters { + if in == nil { + return nil + } + out := new(InputParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputSettingsInitParameters) DeepCopyInto(out *InputSettingsInitParameters) { + *out = *in + if in.AudioSelector != nil { + in, out := &in.AudioSelector, &out.AudioSelector + *out = make([]AudioSelectorInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CaptionSelector != nil { + in, out := &in.CaptionSelector, &out.CaptionSelector + *out = make([]CaptionSelectorInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeblockFilter != nil { + in, out := &in.DeblockFilter, &out.DeblockFilter + *out = new(string) + **out = **in + } + if in.DenoiseFilter != nil { + in, out := &in.DenoiseFilter, &out.DenoiseFilter + *out = new(string) + **out = **in + } + if in.FilterStrength != nil { + in, out := &in.FilterStrength, &out.FilterStrength + *out = new(float64) + **out = **in + } + if in.InputFilter != nil { + in, out := &in.InputFilter, &out.InputFilter + *out = new(string) + **out = **in + } + if in.NetworkInputSettings != nil { + in, out := &in.NetworkInputSettings, &out.NetworkInputSettings + *out = new(NetworkInputSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Scte35Pid != nil { + in, out := &in.Scte35Pid, &out.Scte35Pid + *out = new(float64) + **out = **in + } + if in.Smpte2038DataPreference != nil { + in, out := &in.Smpte2038DataPreference, &out.Smpte2038DataPreference + *out = new(string) + **out = **in + } + if in.SourceEndBehavior != nil { + in, out := &in.SourceEndBehavior, &out.SourceEndBehavior + *out = new(string) + **out = **in + } + if in.VideoSelector != nil { + in, out := &in.VideoSelector, &out.VideoSelector + *out = new(VideoSelectorInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputSettingsInitParameters. +func (in *InputSettingsInitParameters) DeepCopy() *InputSettingsInitParameters { + if in == nil { + return nil + } + out := new(InputSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputSettingsObservation) DeepCopyInto(out *InputSettingsObservation) { + *out = *in + if in.AudioSelector != nil { + in, out := &in.AudioSelector, &out.AudioSelector + *out = make([]AudioSelectorObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CaptionSelector != nil { + in, out := &in.CaptionSelector, &out.CaptionSelector + *out = make([]CaptionSelectorObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeblockFilter != nil { + in, out := &in.DeblockFilter, &out.DeblockFilter + *out = new(string) + **out = **in + } + if in.DenoiseFilter != nil { + in, out := &in.DenoiseFilter, &out.DenoiseFilter + *out = new(string) + **out = **in + } + if in.FilterStrength != nil { + in, out := &in.FilterStrength, &out.FilterStrength + *out = new(float64) + **out = **in + } + if in.InputFilter != nil { + in, out := &in.InputFilter, &out.InputFilter + *out = new(string) + **out = **in + } + if in.NetworkInputSettings != nil { + in, out := &in.NetworkInputSettings, &out.NetworkInputSettings + *out = new(NetworkInputSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Scte35Pid != nil { + in, out := &in.Scte35Pid, &out.Scte35Pid + *out = new(float64) + **out = **in + } + if in.Smpte2038DataPreference != nil { + in, out := &in.Smpte2038DataPreference, &out.Smpte2038DataPreference + *out = new(string) + **out = **in + } + if in.SourceEndBehavior != nil { + in, out := &in.SourceEndBehavior, &out.SourceEndBehavior + *out = new(string) + **out = **in + } + if in.VideoSelector != nil { + in, out := &in.VideoSelector, &out.VideoSelector + *out = new(VideoSelectorObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputSettingsObservation. +func (in *InputSettingsObservation) DeepCopy() *InputSettingsObservation { + if in == nil { + return nil + } + out := new(InputSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputSettingsParameters) DeepCopyInto(out *InputSettingsParameters) { + *out = *in + if in.AudioSelector != nil { + in, out := &in.AudioSelector, &out.AudioSelector + *out = make([]AudioSelectorParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CaptionSelector != nil { + in, out := &in.CaptionSelector, &out.CaptionSelector + *out = make([]CaptionSelectorParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeblockFilter != nil { + in, out := &in.DeblockFilter, &out.DeblockFilter + *out = new(string) + **out = **in + } + if in.DenoiseFilter != nil { + in, out := &in.DenoiseFilter, &out.DenoiseFilter + *out = new(string) + **out = **in + } + if in.FilterStrength != nil { + in, out := &in.FilterStrength, &out.FilterStrength + *out = new(float64) + **out = **in + } + if in.InputFilter != nil { + in, out := &in.InputFilter, &out.InputFilter + *out = new(string) + **out = **in + } + if in.NetworkInputSettings != nil { + in, out := &in.NetworkInputSettings, &out.NetworkInputSettings + *out = new(NetworkInputSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Scte35Pid != nil { + in, out := &in.Scte35Pid, &out.Scte35Pid + *out = new(float64) + **out = **in + } + if in.Smpte2038DataPreference != nil { + in, out := &in.Smpte2038DataPreference, &out.Smpte2038DataPreference + *out = new(string) + **out = **in + } + if in.SourceEndBehavior != nil { + in, out := &in.SourceEndBehavior, &out.SourceEndBehavior + *out = new(string) + **out = **in + } + if in.VideoSelector != nil { + in, out := &in.VideoSelector, &out.VideoSelector + *out = new(VideoSelectorParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputSettingsParameters. +func (in *InputSettingsParameters) DeepCopy() *InputSettingsParameters { + if in == nil { + return nil + } + out := new(InputSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputSpec) DeepCopyInto(out *InputSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputSpec. +func (in *InputSpec) DeepCopy() *InputSpec { + if in == nil { + return nil + } + out := new(InputSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputSpecificationInitParameters) DeepCopyInto(out *InputSpecificationInitParameters) { + *out = *in + if in.Codec != nil { + in, out := &in.Codec, &out.Codec + *out = new(string) + **out = **in + } + if in.InputResolution != nil { + in, out := &in.InputResolution, &out.InputResolution + *out = new(string) + **out = **in + } + if in.MaximumBitrate != nil { + in, out := &in.MaximumBitrate, &out.MaximumBitrate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputSpecificationInitParameters. +func (in *InputSpecificationInitParameters) DeepCopy() *InputSpecificationInitParameters { + if in == nil { + return nil + } + out := new(InputSpecificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputSpecificationObservation) DeepCopyInto(out *InputSpecificationObservation) { + *out = *in + if in.Codec != nil { + in, out := &in.Codec, &out.Codec + *out = new(string) + **out = **in + } + if in.InputResolution != nil { + in, out := &in.InputResolution, &out.InputResolution + *out = new(string) + **out = **in + } + if in.MaximumBitrate != nil { + in, out := &in.MaximumBitrate, &out.MaximumBitrate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputSpecificationObservation. +func (in *InputSpecificationObservation) DeepCopy() *InputSpecificationObservation { + if in == nil { + return nil + } + out := new(InputSpecificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputSpecificationParameters) DeepCopyInto(out *InputSpecificationParameters) { + *out = *in + if in.Codec != nil { + in, out := &in.Codec, &out.Codec + *out = new(string) + **out = **in + } + if in.InputResolution != nil { + in, out := &in.InputResolution, &out.InputResolution + *out = new(string) + **out = **in + } + if in.MaximumBitrate != nil { + in, out := &in.MaximumBitrate, &out.MaximumBitrate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputSpecificationParameters. +func (in *InputSpecificationParameters) DeepCopy() *InputSpecificationParameters { + if in == nil { + return nil + } + out := new(InputSpecificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputStatus) DeepCopyInto(out *InputStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputStatus. +func (in *InputStatus) DeepCopy() *InputStatus { + if in == nil { + return nil + } + out := new(InputStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputVPCInitParameters) DeepCopyInto(out *InputVPCInitParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputVPCInitParameters. +func (in *InputVPCInitParameters) DeepCopy() *InputVPCInitParameters { + if in == nil { + return nil + } + out := new(InputVPCInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputVPCObservation) DeepCopyInto(out *InputVPCObservation) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputVPCObservation. +func (in *InputVPCObservation) DeepCopy() *InputVPCObservation { + if in == nil { + return nil + } + out := new(InputVPCObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputVPCParameters) DeepCopyInto(out *InputVPCParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputVPCParameters. +func (in *InputVPCParameters) DeepCopy() *InputVPCParameters { + if in == nil { + return nil + } + out := new(InputVPCParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyProviderServerInitParameters) DeepCopyInto(out *KeyProviderServerInitParameters) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyProviderServerInitParameters. +func (in *KeyProviderServerInitParameters) DeepCopy() *KeyProviderServerInitParameters { + if in == nil { + return nil + } + out := new(KeyProviderServerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyProviderServerObservation) DeepCopyInto(out *KeyProviderServerObservation) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyProviderServerObservation. +func (in *KeyProviderServerObservation) DeepCopy() *KeyProviderServerObservation { + if in == nil { + return nil + } + out := new(KeyProviderServerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyProviderServerParameters) DeepCopyInto(out *KeyProviderServerParameters) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyProviderServerParameters. +func (in *KeyProviderServerParameters) DeepCopy() *KeyProviderServerParameters { + if in == nil { + return nil + } + out := new(KeyProviderServerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyProviderSettingsInitParameters) DeepCopyInto(out *KeyProviderSettingsInitParameters) { + *out = *in + if in.StaticKeySettings != nil { + in, out := &in.StaticKeySettings, &out.StaticKeySettings + *out = make([]StaticKeySettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyProviderSettingsInitParameters. +func (in *KeyProviderSettingsInitParameters) DeepCopy() *KeyProviderSettingsInitParameters { + if in == nil { + return nil + } + out := new(KeyProviderSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyProviderSettingsObservation) DeepCopyInto(out *KeyProviderSettingsObservation) { + *out = *in + if in.StaticKeySettings != nil { + in, out := &in.StaticKeySettings, &out.StaticKeySettings + *out = make([]StaticKeySettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyProviderSettingsObservation. +func (in *KeyProviderSettingsObservation) DeepCopy() *KeyProviderSettingsObservation { + if in == nil { + return nil + } + out := new(KeyProviderSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyProviderSettingsParameters) DeepCopyInto(out *KeyProviderSettingsParameters) { + *out = *in + if in.StaticKeySettings != nil { + in, out := &in.StaticKeySettings, &out.StaticKeySettings + *out = make([]StaticKeySettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyProviderSettingsParameters. +func (in *KeyProviderSettingsParameters) DeepCopy() *KeyProviderSettingsParameters { + if in == nil { + return nil + } + out := new(KeyProviderSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *M2TsSettingsDvbNitSettingsInitParameters) DeepCopyInto(out *M2TsSettingsDvbNitSettingsInitParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(float64) + **out = **in + } + if in.NetworkName != nil { + in, out := &in.NetworkName, &out.NetworkName + *out = new(string) + **out = **in + } + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new M2TsSettingsDvbNitSettingsInitParameters. +func (in *M2TsSettingsDvbNitSettingsInitParameters) DeepCopy() *M2TsSettingsDvbNitSettingsInitParameters { + if in == nil { + return nil + } + out := new(M2TsSettingsDvbNitSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *M2TsSettingsDvbNitSettingsObservation) DeepCopyInto(out *M2TsSettingsDvbNitSettingsObservation) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(float64) + **out = **in + } + if in.NetworkName != nil { + in, out := &in.NetworkName, &out.NetworkName + *out = new(string) + **out = **in + } + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new M2TsSettingsDvbNitSettingsObservation. +func (in *M2TsSettingsDvbNitSettingsObservation) DeepCopy() *M2TsSettingsDvbNitSettingsObservation { + if in == nil { + return nil + } + out := new(M2TsSettingsDvbNitSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *M2TsSettingsDvbNitSettingsParameters) DeepCopyInto(out *M2TsSettingsDvbNitSettingsParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(float64) + **out = **in + } + if in.NetworkName != nil { + in, out := &in.NetworkName, &out.NetworkName + *out = new(string) + **out = **in + } + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new M2TsSettingsDvbNitSettingsParameters. +func (in *M2TsSettingsDvbNitSettingsParameters) DeepCopy() *M2TsSettingsDvbNitSettingsParameters { + if in == nil { + return nil + } + out := new(M2TsSettingsDvbNitSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *M2TsSettingsDvbSdtSettingsInitParameters) DeepCopyInto(out *M2TsSettingsDvbSdtSettingsInitParameters) { + *out = *in + if in.OutputSdt != nil { + in, out := &in.OutputSdt, &out.OutputSdt + *out = new(string) + **out = **in + } + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.ServiceProviderName != nil { + in, out := &in.ServiceProviderName, &out.ServiceProviderName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new M2TsSettingsDvbSdtSettingsInitParameters. +func (in *M2TsSettingsDvbSdtSettingsInitParameters) DeepCopy() *M2TsSettingsDvbSdtSettingsInitParameters { + if in == nil { + return nil + } + out := new(M2TsSettingsDvbSdtSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *M2TsSettingsDvbSdtSettingsObservation) DeepCopyInto(out *M2TsSettingsDvbSdtSettingsObservation) { + *out = *in + if in.OutputSdt != nil { + in, out := &in.OutputSdt, &out.OutputSdt + *out = new(string) + **out = **in + } + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.ServiceProviderName != nil { + in, out := &in.ServiceProviderName, &out.ServiceProviderName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new M2TsSettingsDvbSdtSettingsObservation. +func (in *M2TsSettingsDvbSdtSettingsObservation) DeepCopy() *M2TsSettingsDvbSdtSettingsObservation { + if in == nil { + return nil + } + out := new(M2TsSettingsDvbSdtSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *M2TsSettingsDvbSdtSettingsParameters) DeepCopyInto(out *M2TsSettingsDvbSdtSettingsParameters) { + *out = *in + if in.OutputSdt != nil { + in, out := &in.OutputSdt, &out.OutputSdt + *out = new(string) + **out = **in + } + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.ServiceProviderName != nil { + in, out := &in.ServiceProviderName, &out.ServiceProviderName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new M2TsSettingsDvbSdtSettingsParameters. +func (in *M2TsSettingsDvbSdtSettingsParameters) DeepCopy() *M2TsSettingsDvbSdtSettingsParameters { + if in == nil { + return nil + } + out := new(M2TsSettingsDvbSdtSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *M2TsSettingsDvbTdtSettingsInitParameters) DeepCopyInto(out *M2TsSettingsDvbTdtSettingsInitParameters) { + *out = *in + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new M2TsSettingsDvbTdtSettingsInitParameters. +func (in *M2TsSettingsDvbTdtSettingsInitParameters) DeepCopy() *M2TsSettingsDvbTdtSettingsInitParameters { + if in == nil { + return nil + } + out := new(M2TsSettingsDvbTdtSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *M2TsSettingsDvbTdtSettingsObservation) DeepCopyInto(out *M2TsSettingsDvbTdtSettingsObservation) { + *out = *in + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new M2TsSettingsDvbTdtSettingsObservation. +func (in *M2TsSettingsDvbTdtSettingsObservation) DeepCopy() *M2TsSettingsDvbTdtSettingsObservation { + if in == nil { + return nil + } + out := new(M2TsSettingsDvbTdtSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *M2TsSettingsDvbTdtSettingsParameters) DeepCopyInto(out *M2TsSettingsDvbTdtSettingsParameters) { + *out = *in + if in.RepInterval != nil { + in, out := &in.RepInterval, &out.RepInterval + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new M2TsSettingsDvbTdtSettingsParameters. +func (in *M2TsSettingsDvbTdtSettingsParameters) DeepCopy() *M2TsSettingsDvbTdtSettingsParameters { + if in == nil { + return nil + } + out := new(M2TsSettingsDvbTdtSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *M2TsSettingsInitParameters) DeepCopyInto(out *M2TsSettingsInitParameters) { + *out = *in + if in.AbsentInputAudioBehavior != nil { + in, out := &in.AbsentInputAudioBehavior, &out.AbsentInputAudioBehavior + *out = new(string) + **out = **in + } + if in.Arib != nil { + in, out := &in.Arib, &out.Arib + *out = new(string) + **out = **in + } + if in.AribCaptionsPid != nil { + in, out := &in.AribCaptionsPid, &out.AribCaptionsPid + *out = new(string) + **out = **in + } + if in.AribCaptionsPidControl != nil { + in, out := &in.AribCaptionsPidControl, &out.AribCaptionsPidControl + *out = new(string) + **out = **in + } + if in.AudioBufferModel != nil { + in, out := &in.AudioBufferModel, &out.AudioBufferModel + *out = new(string) + **out = **in + } + if in.AudioFramesPerPes != nil { + in, out := &in.AudioFramesPerPes, &out.AudioFramesPerPes + *out = new(float64) + **out = **in + } + if in.AudioPids != nil { + in, out := &in.AudioPids, &out.AudioPids + *out = new(string) + **out = **in + } + if in.AudioStreamType != nil { + in, out := &in.AudioStreamType, &out.AudioStreamType + *out = new(string) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufferModel != nil { + in, out := &in.BufferModel, &out.BufferModel + *out = new(string) + **out = **in + } + if in.CcDescriptor != nil { + in, out := &in.CcDescriptor, &out.CcDescriptor + *out = new(string) + **out = **in + } + if in.DvbNitSettings != nil { + in, out := &in.DvbNitSettings, &out.DvbNitSettings + *out = new(DvbNitSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DvbSdtSettings != nil { + in, out := &in.DvbSdtSettings, &out.DvbSdtSettings + *out = new(DvbSdtSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DvbSubPids != nil { + in, out := &in.DvbSubPids, &out.DvbSubPids + *out = new(string) + **out = **in + } + if in.DvbTdtSettings != nil { + in, out := &in.DvbTdtSettings, &out.DvbTdtSettings + *out = new(DvbTdtSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DvbTeletextPid != nil { + in, out := &in.DvbTeletextPid, &out.DvbTeletextPid + *out = new(string) + **out = **in + } + if in.Ebif != nil { + in, out := &in.Ebif, &out.Ebif + *out = new(string) + **out = **in + } + if in.EbpAudioInterval != nil { + in, out := &in.EbpAudioInterval, &out.EbpAudioInterval + *out = new(string) + **out = **in + } + if in.EbpLookaheadMs != nil { + in, out := &in.EbpLookaheadMs, &out.EbpLookaheadMs + *out = new(float64) + **out = **in + } + if in.EbpPlacement != nil { + in, out := &in.EbpPlacement, &out.EbpPlacement + *out = new(string) + **out = **in + } + if in.EcmPid != nil { + in, out := &in.EcmPid, &out.EcmPid + *out = new(string) + **out = **in + } + if in.EsRateInPes != nil { + in, out := &in.EsRateInPes, &out.EsRateInPes + *out = new(string) + **out = **in + } + if in.EtvPlatformPid != nil { + in, out := &in.EtvPlatformPid, &out.EtvPlatformPid + *out = new(string) + **out = **in + } + if in.EtvSignalPid != nil { + in, out := &in.EtvSignalPid, &out.EtvSignalPid + *out = new(string) + **out = **in + } + if in.FragmentTime != nil { + in, out := &in.FragmentTime, &out.FragmentTime + *out = new(float64) + **out = **in + } + if in.Klv != nil { + in, out := &in.Klv, &out.Klv + *out = new(string) + **out = **in + } + if in.KlvDataPids != nil { + in, out := &in.KlvDataPids, &out.KlvDataPids + *out = new(string) + **out = **in + } + if in.NielsenId3Behavior != nil { + in, out := &in.NielsenId3Behavior, &out.NielsenId3Behavior + *out = new(string) + **out = **in + } + if in.NullPacketBitrate != nil { + in, out := &in.NullPacketBitrate, &out.NullPacketBitrate + *out = new(float64) + **out = **in + } + if in.PatInterval != nil { + in, out := &in.PatInterval, &out.PatInterval + *out = new(float64) + **out = **in + } + if in.PcrControl != nil { + in, out := &in.PcrControl, &out.PcrControl + *out = new(string) + **out = **in + } + if in.PcrPeriod != nil { + in, out := &in.PcrPeriod, &out.PcrPeriod + *out = new(float64) + **out = **in + } + if in.PcrPid != nil { + in, out := &in.PcrPid, &out.PcrPid + *out = new(string) + **out = **in + } + if in.PmtInterval != nil { + in, out := &in.PmtInterval, &out.PmtInterval + *out = new(float64) + **out = **in + } + if in.PmtPid != nil { + in, out := &in.PmtPid, &out.PmtPid + *out = new(string) + **out = **in + } + if in.ProgramNum != nil { + in, out := &in.ProgramNum, &out.ProgramNum + *out = new(float64) + **out = **in + } + if in.RateMode != nil { + in, out := &in.RateMode, &out.RateMode + *out = new(string) + **out = **in + } + if in.Scte27Pids != nil { + in, out := &in.Scte27Pids, &out.Scte27Pids + *out = new(string) + **out = **in + } + if in.Scte35Control != nil { + in, out := &in.Scte35Control, &out.Scte35Control + *out = new(string) + **out = **in + } + if in.Scte35Pid != nil { + in, out := &in.Scte35Pid, &out.Scte35Pid + *out = new(string) + **out = **in + } + if in.SegmentationMarkers != nil { + in, out := &in.SegmentationMarkers, &out.SegmentationMarkers + *out = new(string) + **out = **in + } + if in.SegmentationStyle != nil { + in, out := &in.SegmentationStyle, &out.SegmentationStyle + *out = new(string) + **out = **in + } + if in.SegmentationTime != nil { + in, out := &in.SegmentationTime, &out.SegmentationTime + *out = new(float64) + **out = **in + } + if in.TimedMetadataBehavior != nil { + in, out := &in.TimedMetadataBehavior, &out.TimedMetadataBehavior + *out = new(string) + **out = **in + } + if in.TimedMetadataPid != nil { + in, out := &in.TimedMetadataPid, &out.TimedMetadataPid + *out = new(string) + **out = **in + } + if in.TransportStreamID != nil { + in, out := &in.TransportStreamID, &out.TransportStreamID + *out = new(float64) + **out = **in + } + if in.VideoPid != nil { + in, out := &in.VideoPid, &out.VideoPid + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new M2TsSettingsInitParameters. +func (in *M2TsSettingsInitParameters) DeepCopy() *M2TsSettingsInitParameters { + if in == nil { + return nil + } + out := new(M2TsSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *M2TsSettingsObservation) DeepCopyInto(out *M2TsSettingsObservation) { + *out = *in + if in.AbsentInputAudioBehavior != nil { + in, out := &in.AbsentInputAudioBehavior, &out.AbsentInputAudioBehavior + *out = new(string) + **out = **in + } + if in.Arib != nil { + in, out := &in.Arib, &out.Arib + *out = new(string) + **out = **in + } + if in.AribCaptionsPid != nil { + in, out := &in.AribCaptionsPid, &out.AribCaptionsPid + *out = new(string) + **out = **in + } + if in.AribCaptionsPidControl != nil { + in, out := &in.AribCaptionsPidControl, &out.AribCaptionsPidControl + *out = new(string) + **out = **in + } + if in.AudioBufferModel != nil { + in, out := &in.AudioBufferModel, &out.AudioBufferModel + *out = new(string) + **out = **in + } + if in.AudioFramesPerPes != nil { + in, out := &in.AudioFramesPerPes, &out.AudioFramesPerPes + *out = new(float64) + **out = **in + } + if in.AudioPids != nil { + in, out := &in.AudioPids, &out.AudioPids + *out = new(string) + **out = **in + } + if in.AudioStreamType != nil { + in, out := &in.AudioStreamType, &out.AudioStreamType + *out = new(string) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufferModel != nil { + in, out := &in.BufferModel, &out.BufferModel + *out = new(string) + **out = **in + } + if in.CcDescriptor != nil { + in, out := &in.CcDescriptor, &out.CcDescriptor + *out = new(string) + **out = **in + } + if in.DvbNitSettings != nil { + in, out := &in.DvbNitSettings, &out.DvbNitSettings + *out = new(DvbNitSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.DvbSdtSettings != nil { + in, out := &in.DvbSdtSettings, &out.DvbSdtSettings + *out = new(DvbSdtSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.DvbSubPids != nil { + in, out := &in.DvbSubPids, &out.DvbSubPids + *out = new(string) + **out = **in + } + if in.DvbTdtSettings != nil { + in, out := &in.DvbTdtSettings, &out.DvbTdtSettings + *out = new(DvbTdtSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.DvbTeletextPid != nil { + in, out := &in.DvbTeletextPid, &out.DvbTeletextPid + *out = new(string) + **out = **in + } + if in.Ebif != nil { + in, out := &in.Ebif, &out.Ebif + *out = new(string) + **out = **in + } + if in.EbpAudioInterval != nil { + in, out := &in.EbpAudioInterval, &out.EbpAudioInterval + *out = new(string) + **out = **in + } + if in.EbpLookaheadMs != nil { + in, out := &in.EbpLookaheadMs, &out.EbpLookaheadMs + *out = new(float64) + **out = **in + } + if in.EbpPlacement != nil { + in, out := &in.EbpPlacement, &out.EbpPlacement + *out = new(string) + **out = **in + } + if in.EcmPid != nil { + in, out := &in.EcmPid, &out.EcmPid + *out = new(string) + **out = **in + } + if in.EsRateInPes != nil { + in, out := &in.EsRateInPes, &out.EsRateInPes + *out = new(string) + **out = **in + } + if in.EtvPlatformPid != nil { + in, out := &in.EtvPlatformPid, &out.EtvPlatformPid + *out = new(string) + **out = **in + } + if in.EtvSignalPid != nil { + in, out := &in.EtvSignalPid, &out.EtvSignalPid + *out = new(string) + **out = **in + } + if in.FragmentTime != nil { + in, out := &in.FragmentTime, &out.FragmentTime + *out = new(float64) + **out = **in + } + if in.Klv != nil { + in, out := &in.Klv, &out.Klv + *out = new(string) + **out = **in + } + if in.KlvDataPids != nil { + in, out := &in.KlvDataPids, &out.KlvDataPids + *out = new(string) + **out = **in + } + if in.NielsenId3Behavior != nil { + in, out := &in.NielsenId3Behavior, &out.NielsenId3Behavior + *out = new(string) + **out = **in + } + if in.NullPacketBitrate != nil { + in, out := &in.NullPacketBitrate, &out.NullPacketBitrate + *out = new(float64) + **out = **in + } + if in.PatInterval != nil { + in, out := &in.PatInterval, &out.PatInterval + *out = new(float64) + **out = **in + } + if in.PcrControl != nil { + in, out := &in.PcrControl, &out.PcrControl + *out = new(string) + **out = **in + } + if in.PcrPeriod != nil { + in, out := &in.PcrPeriod, &out.PcrPeriod + *out = new(float64) + **out = **in + } + if in.PcrPid != nil { + in, out := &in.PcrPid, &out.PcrPid + *out = new(string) + **out = **in + } + if in.PmtInterval != nil { + in, out := &in.PmtInterval, &out.PmtInterval + *out = new(float64) + **out = **in + } + if in.PmtPid != nil { + in, out := &in.PmtPid, &out.PmtPid + *out = new(string) + **out = **in + } + if in.ProgramNum != nil { + in, out := &in.ProgramNum, &out.ProgramNum + *out = new(float64) + **out = **in + } + if in.RateMode != nil { + in, out := &in.RateMode, &out.RateMode + *out = new(string) + **out = **in + } + if in.Scte27Pids != nil { + in, out := &in.Scte27Pids, &out.Scte27Pids + *out = new(string) + **out = **in + } + if in.Scte35Control != nil { + in, out := &in.Scte35Control, &out.Scte35Control + *out = new(string) + **out = **in + } + if in.Scte35Pid != nil { + in, out := &in.Scte35Pid, &out.Scte35Pid + *out = new(string) + **out = **in + } + if in.SegmentationMarkers != nil { + in, out := &in.SegmentationMarkers, &out.SegmentationMarkers + *out = new(string) + **out = **in + } + if in.SegmentationStyle != nil { + in, out := &in.SegmentationStyle, &out.SegmentationStyle + *out = new(string) + **out = **in + } + if in.SegmentationTime != nil { + in, out := &in.SegmentationTime, &out.SegmentationTime + *out = new(float64) + **out = **in + } + if in.TimedMetadataBehavior != nil { + in, out := &in.TimedMetadataBehavior, &out.TimedMetadataBehavior + *out = new(string) + **out = **in + } + if in.TimedMetadataPid != nil { + in, out := &in.TimedMetadataPid, &out.TimedMetadataPid + *out = new(string) + **out = **in + } + if in.TransportStreamID != nil { + in, out := &in.TransportStreamID, &out.TransportStreamID + *out = new(float64) + **out = **in + } + if in.VideoPid != nil { + in, out := &in.VideoPid, &out.VideoPid + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new M2TsSettingsObservation. +func (in *M2TsSettingsObservation) DeepCopy() *M2TsSettingsObservation { + if in == nil { + return nil + } + out := new(M2TsSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *M2TsSettingsParameters) DeepCopyInto(out *M2TsSettingsParameters) { + *out = *in + if in.AbsentInputAudioBehavior != nil { + in, out := &in.AbsentInputAudioBehavior, &out.AbsentInputAudioBehavior + *out = new(string) + **out = **in + } + if in.Arib != nil { + in, out := &in.Arib, &out.Arib + *out = new(string) + **out = **in + } + if in.AribCaptionsPid != nil { + in, out := &in.AribCaptionsPid, &out.AribCaptionsPid + *out = new(string) + **out = **in + } + if in.AribCaptionsPidControl != nil { + in, out := &in.AribCaptionsPidControl, &out.AribCaptionsPidControl + *out = new(string) + **out = **in + } + if in.AudioBufferModel != nil { + in, out := &in.AudioBufferModel, &out.AudioBufferModel + *out = new(string) + **out = **in + } + if in.AudioFramesPerPes != nil { + in, out := &in.AudioFramesPerPes, &out.AudioFramesPerPes + *out = new(float64) + **out = **in + } + if in.AudioPids != nil { + in, out := &in.AudioPids, &out.AudioPids + *out = new(string) + **out = **in + } + if in.AudioStreamType != nil { + in, out := &in.AudioStreamType, &out.AudioStreamType + *out = new(string) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufferModel != nil { + in, out := &in.BufferModel, &out.BufferModel + *out = new(string) + **out = **in + } + if in.CcDescriptor != nil { + in, out := &in.CcDescriptor, &out.CcDescriptor + *out = new(string) + **out = **in + } + if in.DvbNitSettings != nil { + in, out := &in.DvbNitSettings, &out.DvbNitSettings + *out = new(DvbNitSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.DvbSdtSettings != nil { + in, out := &in.DvbSdtSettings, &out.DvbSdtSettings + *out = new(DvbSdtSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.DvbSubPids != nil { + in, out := &in.DvbSubPids, &out.DvbSubPids + *out = new(string) + **out = **in + } + if in.DvbTdtSettings != nil { + in, out := &in.DvbTdtSettings, &out.DvbTdtSettings + *out = new(DvbTdtSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.DvbTeletextPid != nil { + in, out := &in.DvbTeletextPid, &out.DvbTeletextPid + *out = new(string) + **out = **in + } + if in.Ebif != nil { + in, out := &in.Ebif, &out.Ebif + *out = new(string) + **out = **in + } + if in.EbpAudioInterval != nil { + in, out := &in.EbpAudioInterval, &out.EbpAudioInterval + *out = new(string) + **out = **in + } + if in.EbpLookaheadMs != nil { + in, out := &in.EbpLookaheadMs, &out.EbpLookaheadMs + *out = new(float64) + **out = **in + } + if in.EbpPlacement != nil { + in, out := &in.EbpPlacement, &out.EbpPlacement + *out = new(string) + **out = **in + } + if in.EcmPid != nil { + in, out := &in.EcmPid, &out.EcmPid + *out = new(string) + **out = **in + } + if in.EsRateInPes != nil { + in, out := &in.EsRateInPes, &out.EsRateInPes + *out = new(string) + **out = **in + } + if in.EtvPlatformPid != nil { + in, out := &in.EtvPlatformPid, &out.EtvPlatformPid + *out = new(string) + **out = **in + } + if in.EtvSignalPid != nil { + in, out := &in.EtvSignalPid, &out.EtvSignalPid + *out = new(string) + **out = **in + } + if in.FragmentTime != nil { + in, out := &in.FragmentTime, &out.FragmentTime + *out = new(float64) + **out = **in + } + if in.Klv != nil { + in, out := &in.Klv, &out.Klv + *out = new(string) + **out = **in + } + if in.KlvDataPids != nil { + in, out := &in.KlvDataPids, &out.KlvDataPids + *out = new(string) + **out = **in + } + if in.NielsenId3Behavior != nil { + in, out := &in.NielsenId3Behavior, &out.NielsenId3Behavior + *out = new(string) + **out = **in + } + if in.NullPacketBitrate != nil { + in, out := &in.NullPacketBitrate, &out.NullPacketBitrate + *out = new(float64) + **out = **in + } + if in.PatInterval != nil { + in, out := &in.PatInterval, &out.PatInterval + *out = new(float64) + **out = **in + } + if in.PcrControl != nil { + in, out := &in.PcrControl, &out.PcrControl + *out = new(string) + **out = **in + } + if in.PcrPeriod != nil { + in, out := &in.PcrPeriod, &out.PcrPeriod + *out = new(float64) + **out = **in + } + if in.PcrPid != nil { + in, out := &in.PcrPid, &out.PcrPid + *out = new(string) + **out = **in + } + if in.PmtInterval != nil { + in, out := &in.PmtInterval, &out.PmtInterval + *out = new(float64) + **out = **in + } + if in.PmtPid != nil { + in, out := &in.PmtPid, &out.PmtPid + *out = new(string) + **out = **in + } + if in.ProgramNum != nil { + in, out := &in.ProgramNum, &out.ProgramNum + *out = new(float64) + **out = **in + } + if in.RateMode != nil { + in, out := &in.RateMode, &out.RateMode + *out = new(string) + **out = **in + } + if in.Scte27Pids != nil { + in, out := &in.Scte27Pids, &out.Scte27Pids + *out = new(string) + **out = **in + } + if in.Scte35Control != nil { + in, out := &in.Scte35Control, &out.Scte35Control + *out = new(string) + **out = **in + } + if in.Scte35Pid != nil { + in, out := &in.Scte35Pid, &out.Scte35Pid + *out = new(string) + **out = **in + } + if in.SegmentationMarkers != nil { + in, out := &in.SegmentationMarkers, &out.SegmentationMarkers + *out = new(string) + **out = **in + } + if in.SegmentationStyle != nil { + in, out := &in.SegmentationStyle, &out.SegmentationStyle + *out = new(string) + **out = **in + } + if in.SegmentationTime != nil { + in, out := &in.SegmentationTime, &out.SegmentationTime + *out = new(float64) + **out = **in + } + if in.TimedMetadataBehavior != nil { + in, out := &in.TimedMetadataBehavior, &out.TimedMetadataBehavior + *out = new(string) + **out = **in + } + if in.TimedMetadataPid != nil { + in, out := &in.TimedMetadataPid, &out.TimedMetadataPid + *out = new(string) + **out = **in + } + if in.TransportStreamID != nil { + in, out := &in.TransportStreamID, &out.TransportStreamID + *out = new(float64) + **out = **in + } + if in.VideoPid != nil { + in, out := &in.VideoPid, &out.VideoPid + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new M2TsSettingsParameters. +func (in *M2TsSettingsParameters) DeepCopy() *M2TsSettingsParameters { + if in == nil { + return nil + } + out := new(M2TsSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *M3U8SettingsInitParameters) DeepCopyInto(out *M3U8SettingsInitParameters) { + *out = *in + if in.AudioFramesPerPes != nil { + in, out := &in.AudioFramesPerPes, &out.AudioFramesPerPes + *out = new(float64) + **out = **in + } + if in.AudioPids != nil { + in, out := &in.AudioPids, &out.AudioPids + *out = new(string) + **out = **in + } + if in.EcmPid != nil { + in, out := &in.EcmPid, &out.EcmPid + *out = new(string) + **out = **in + } + if in.NielsenId3Behavior != nil { + in, out := &in.NielsenId3Behavior, &out.NielsenId3Behavior + *out = new(string) + **out = **in + } + if in.PatInterval != nil { + in, out := &in.PatInterval, &out.PatInterval + *out = new(float64) + **out = **in + } + if in.PcrControl != nil { + in, out := &in.PcrControl, &out.PcrControl + *out = new(string) + **out = **in + } + if in.PcrPeriod != nil { + in, out := &in.PcrPeriod, &out.PcrPeriod + *out = new(float64) + **out = **in + } + if in.PcrPid != nil { + in, out := &in.PcrPid, &out.PcrPid + *out = new(string) + **out = **in + } + if in.PmtInterval != nil { + in, out := &in.PmtInterval, &out.PmtInterval + *out = new(float64) + **out = **in + } + if in.PmtPid != nil { + in, out := &in.PmtPid, &out.PmtPid + *out = new(string) + **out = **in + } + if in.ProgramNum != nil { + in, out := &in.ProgramNum, &out.ProgramNum + *out = new(float64) + **out = **in + } + if in.Scte35Behavior != nil { + in, out := &in.Scte35Behavior, &out.Scte35Behavior + *out = new(string) + **out = **in + } + if in.Scte35Pid != nil { + in, out := &in.Scte35Pid, &out.Scte35Pid + *out = new(string) + **out = **in + } + if in.TimedMetadataBehavior != nil { + in, out := &in.TimedMetadataBehavior, &out.TimedMetadataBehavior + *out = new(string) + **out = **in + } + if in.TimedMetadataPid != nil { + in, out := &in.TimedMetadataPid, &out.TimedMetadataPid + *out = new(string) + **out = **in + } + if in.TransportStreamID != nil { + in, out := &in.TransportStreamID, &out.TransportStreamID + *out = new(float64) + **out = **in + } + if in.VideoPid != nil { + in, out := &in.VideoPid, &out.VideoPid + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new M3U8SettingsInitParameters. +func (in *M3U8SettingsInitParameters) DeepCopy() *M3U8SettingsInitParameters { + if in == nil { + return nil + } + out := new(M3U8SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *M3U8SettingsObservation) DeepCopyInto(out *M3U8SettingsObservation) { + *out = *in + if in.AudioFramesPerPes != nil { + in, out := &in.AudioFramesPerPes, &out.AudioFramesPerPes + *out = new(float64) + **out = **in + } + if in.AudioPids != nil { + in, out := &in.AudioPids, &out.AudioPids + *out = new(string) + **out = **in + } + if in.EcmPid != nil { + in, out := &in.EcmPid, &out.EcmPid + *out = new(string) + **out = **in + } + if in.NielsenId3Behavior != nil { + in, out := &in.NielsenId3Behavior, &out.NielsenId3Behavior + *out = new(string) + **out = **in + } + if in.PatInterval != nil { + in, out := &in.PatInterval, &out.PatInterval + *out = new(float64) + **out = **in + } + if in.PcrControl != nil { + in, out := &in.PcrControl, &out.PcrControl + *out = new(string) + **out = **in + } + if in.PcrPeriod != nil { + in, out := &in.PcrPeriod, &out.PcrPeriod + *out = new(float64) + **out = **in + } + if in.PcrPid != nil { + in, out := &in.PcrPid, &out.PcrPid + *out = new(string) + **out = **in + } + if in.PmtInterval != nil { + in, out := &in.PmtInterval, &out.PmtInterval + *out = new(float64) + **out = **in + } + if in.PmtPid != nil { + in, out := &in.PmtPid, &out.PmtPid + *out = new(string) + **out = **in + } + if in.ProgramNum != nil { + in, out := &in.ProgramNum, &out.ProgramNum + *out = new(float64) + **out = **in + } + if in.Scte35Behavior != nil { + in, out := &in.Scte35Behavior, &out.Scte35Behavior + *out = new(string) + **out = **in + } + if in.Scte35Pid != nil { + in, out := &in.Scte35Pid, &out.Scte35Pid + *out = new(string) + **out = **in + } + if in.TimedMetadataBehavior != nil { + in, out := &in.TimedMetadataBehavior, &out.TimedMetadataBehavior + *out = new(string) + **out = **in + } + if in.TimedMetadataPid != nil { + in, out := &in.TimedMetadataPid, &out.TimedMetadataPid + *out = new(string) + **out = **in + } + if in.TransportStreamID != nil { + in, out := &in.TransportStreamID, &out.TransportStreamID + *out = new(float64) + **out = **in + } + if in.VideoPid != nil { + in, out := &in.VideoPid, &out.VideoPid + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new M3U8SettingsObservation. +func (in *M3U8SettingsObservation) DeepCopy() *M3U8SettingsObservation { + if in == nil { + return nil + } + out := new(M3U8SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *M3U8SettingsParameters) DeepCopyInto(out *M3U8SettingsParameters) { + *out = *in + if in.AudioFramesPerPes != nil { + in, out := &in.AudioFramesPerPes, &out.AudioFramesPerPes + *out = new(float64) + **out = **in + } + if in.AudioPids != nil { + in, out := &in.AudioPids, &out.AudioPids + *out = new(string) + **out = **in + } + if in.EcmPid != nil { + in, out := &in.EcmPid, &out.EcmPid + *out = new(string) + **out = **in + } + if in.NielsenId3Behavior != nil { + in, out := &in.NielsenId3Behavior, &out.NielsenId3Behavior + *out = new(string) + **out = **in + } + if in.PatInterval != nil { + in, out := &in.PatInterval, &out.PatInterval + *out = new(float64) + **out = **in + } + if in.PcrControl != nil { + in, out := &in.PcrControl, &out.PcrControl + *out = new(string) + **out = **in + } + if in.PcrPeriod != nil { + in, out := &in.PcrPeriod, &out.PcrPeriod + *out = new(float64) + **out = **in + } + if in.PcrPid != nil { + in, out := &in.PcrPid, &out.PcrPid + *out = new(string) + **out = **in + } + if in.PmtInterval != nil { + in, out := &in.PmtInterval, &out.PmtInterval + *out = new(float64) + **out = **in + } + if in.PmtPid != nil { + in, out := &in.PmtPid, &out.PmtPid + *out = new(string) + **out = **in + } + if in.ProgramNum != nil { + in, out := &in.ProgramNum, &out.ProgramNum + *out = new(float64) + **out = **in + } + if in.Scte35Behavior != nil { + in, out := &in.Scte35Behavior, &out.Scte35Behavior + *out = new(string) + **out = **in + } + if in.Scte35Pid != nil { + in, out := &in.Scte35Pid, &out.Scte35Pid + *out = new(string) + **out = **in + } + if in.TimedMetadataBehavior != nil { + in, out := &in.TimedMetadataBehavior, &out.TimedMetadataBehavior + *out = new(string) + **out = **in + } + if in.TimedMetadataPid != nil { + in, out := &in.TimedMetadataPid, &out.TimedMetadataPid + *out = new(string) + **out = **in + } + if in.TransportStreamID != nil { + in, out := &in.TransportStreamID, &out.TransportStreamID + *out = new(float64) + **out = **in + } + if in.VideoPid != nil { + in, out := &in.VideoPid, &out.VideoPid + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new M3U8SettingsParameters. +func (in *M3U8SettingsParameters) DeepCopy() *M3U8SettingsParameters { + if in == nil { + return nil + } + out := new(M3U8SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceInitParameters) DeepCopyInto(out *MaintenanceInitParameters) { + *out = *in + if in.MaintenanceDay != nil { + in, out := &in.MaintenanceDay, &out.MaintenanceDay + *out = new(string) + **out = **in + } + if in.MaintenanceStartTime != nil { + in, out := &in.MaintenanceStartTime, &out.MaintenanceStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceInitParameters. +func (in *MaintenanceInitParameters) DeepCopy() *MaintenanceInitParameters { + if in == nil { + return nil + } + out := new(MaintenanceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceObservation) DeepCopyInto(out *MaintenanceObservation) { + *out = *in + if in.MaintenanceDay != nil { + in, out := &in.MaintenanceDay, &out.MaintenanceDay + *out = new(string) + **out = **in + } + if in.MaintenanceStartTime != nil { + in, out := &in.MaintenanceStartTime, &out.MaintenanceStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceObservation. +func (in *MaintenanceObservation) DeepCopy() *MaintenanceObservation { + if in == nil { + return nil + } + out := new(MaintenanceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceParameters) DeepCopyInto(out *MaintenanceParameters) { + *out = *in + if in.MaintenanceDay != nil { + in, out := &in.MaintenanceDay, &out.MaintenanceDay + *out = new(string) + **out = **in + } + if in.MaintenanceStartTime != nil { + in, out := &in.MaintenanceStartTime, &out.MaintenanceStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceParameters. +func (in *MaintenanceParameters) DeepCopy() *MaintenanceParameters { + if in == nil { + return nil + } + out := new(MaintenanceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaConnectFlowsInitParameters) DeepCopyInto(out *MediaConnectFlowsInitParameters) { + *out = *in + if in.FlowArn != nil { + in, out := &in.FlowArn, &out.FlowArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaConnectFlowsInitParameters. +func (in *MediaConnectFlowsInitParameters) DeepCopy() *MediaConnectFlowsInitParameters { + if in == nil { + return nil + } + out := new(MediaConnectFlowsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaConnectFlowsObservation) DeepCopyInto(out *MediaConnectFlowsObservation) { + *out = *in + if in.FlowArn != nil { + in, out := &in.FlowArn, &out.FlowArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaConnectFlowsObservation. +func (in *MediaConnectFlowsObservation) DeepCopy() *MediaConnectFlowsObservation { + if in == nil { + return nil + } + out := new(MediaConnectFlowsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaConnectFlowsParameters) DeepCopyInto(out *MediaConnectFlowsParameters) { + *out = *in + if in.FlowArn != nil { + in, out := &in.FlowArn, &out.FlowArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaConnectFlowsParameters. +func (in *MediaConnectFlowsParameters) DeepCopy() *MediaConnectFlowsParameters { + if in == nil { + return nil + } + out := new(MediaConnectFlowsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaPackageGroupSettingsDestinationInitParameters) DeepCopyInto(out *MediaPackageGroupSettingsDestinationInitParameters) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaPackageGroupSettingsDestinationInitParameters. +func (in *MediaPackageGroupSettingsDestinationInitParameters) DeepCopy() *MediaPackageGroupSettingsDestinationInitParameters { + if in == nil { + return nil + } + out := new(MediaPackageGroupSettingsDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaPackageGroupSettingsDestinationObservation) DeepCopyInto(out *MediaPackageGroupSettingsDestinationObservation) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaPackageGroupSettingsDestinationObservation. +func (in *MediaPackageGroupSettingsDestinationObservation) DeepCopy() *MediaPackageGroupSettingsDestinationObservation { + if in == nil { + return nil + } + out := new(MediaPackageGroupSettingsDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaPackageGroupSettingsDestinationParameters) DeepCopyInto(out *MediaPackageGroupSettingsDestinationParameters) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaPackageGroupSettingsDestinationParameters. +func (in *MediaPackageGroupSettingsDestinationParameters) DeepCopy() *MediaPackageGroupSettingsDestinationParameters { + if in == nil { + return nil + } + out := new(MediaPackageGroupSettingsDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaPackageGroupSettingsInitParameters) DeepCopyInto(out *MediaPackageGroupSettingsInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(MediaPackageGroupSettingsDestinationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaPackageGroupSettingsInitParameters. +func (in *MediaPackageGroupSettingsInitParameters) DeepCopy() *MediaPackageGroupSettingsInitParameters { + if in == nil { + return nil + } + out := new(MediaPackageGroupSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaPackageGroupSettingsObservation) DeepCopyInto(out *MediaPackageGroupSettingsObservation) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(MediaPackageGroupSettingsDestinationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaPackageGroupSettingsObservation. +func (in *MediaPackageGroupSettingsObservation) DeepCopy() *MediaPackageGroupSettingsObservation { + if in == nil { + return nil + } + out := new(MediaPackageGroupSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaPackageGroupSettingsParameters) DeepCopyInto(out *MediaPackageGroupSettingsParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(MediaPackageGroupSettingsDestinationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaPackageGroupSettingsParameters. +func (in *MediaPackageGroupSettingsParameters) DeepCopy() *MediaPackageGroupSettingsParameters { + if in == nil { + return nil + } + out := new(MediaPackageGroupSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaPackageOutputSettingsInitParameters) DeepCopyInto(out *MediaPackageOutputSettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaPackageOutputSettingsInitParameters. +func (in *MediaPackageOutputSettingsInitParameters) DeepCopy() *MediaPackageOutputSettingsInitParameters { + if in == nil { + return nil + } + out := new(MediaPackageOutputSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaPackageOutputSettingsObservation) DeepCopyInto(out *MediaPackageOutputSettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaPackageOutputSettingsObservation. +func (in *MediaPackageOutputSettingsObservation) DeepCopy() *MediaPackageOutputSettingsObservation { + if in == nil { + return nil + } + out := new(MediaPackageOutputSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaPackageOutputSettingsParameters) DeepCopyInto(out *MediaPackageOutputSettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaPackageOutputSettingsParameters. +func (in *MediaPackageOutputSettingsParameters) DeepCopy() *MediaPackageOutputSettingsParameters { + if in == nil { + return nil + } + out := new(MediaPackageOutputSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaPackageSettingsInitParameters) DeepCopyInto(out *MediaPackageSettingsInitParameters) { + *out = *in + if in.ChannelID != nil { + in, out := &in.ChannelID, &out.ChannelID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaPackageSettingsInitParameters. +func (in *MediaPackageSettingsInitParameters) DeepCopy() *MediaPackageSettingsInitParameters { + if in == nil { + return nil + } + out := new(MediaPackageSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaPackageSettingsObservation) DeepCopyInto(out *MediaPackageSettingsObservation) { + *out = *in + if in.ChannelID != nil { + in, out := &in.ChannelID, &out.ChannelID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaPackageSettingsObservation. +func (in *MediaPackageSettingsObservation) DeepCopy() *MediaPackageSettingsObservation { + if in == nil { + return nil + } + out := new(MediaPackageSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediaPackageSettingsParameters) DeepCopyInto(out *MediaPackageSettingsParameters) { + *out = *in + if in.ChannelID != nil { + in, out := &in.ChannelID, &out.ChannelID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediaPackageSettingsParameters. +func (in *MediaPackageSettingsParameters) DeepCopy() *MediaPackageSettingsParameters { + if in == nil { + return nil + } + out := new(MediaPackageSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MotionGraphicsConfigurationInitParameters) DeepCopyInto(out *MotionGraphicsConfigurationInitParameters) { + *out = *in + if in.MotionGraphicsInsertion != nil { + in, out := &in.MotionGraphicsInsertion, &out.MotionGraphicsInsertion + *out = new(string) + **out = **in + } + if in.MotionGraphicsSettings != nil { + in, out := &in.MotionGraphicsSettings, &out.MotionGraphicsSettings + *out = new(MotionGraphicsSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MotionGraphicsConfigurationInitParameters. +func (in *MotionGraphicsConfigurationInitParameters) DeepCopy() *MotionGraphicsConfigurationInitParameters { + if in == nil { + return nil + } + out := new(MotionGraphicsConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MotionGraphicsConfigurationObservation) DeepCopyInto(out *MotionGraphicsConfigurationObservation) { + *out = *in + if in.MotionGraphicsInsertion != nil { + in, out := &in.MotionGraphicsInsertion, &out.MotionGraphicsInsertion + *out = new(string) + **out = **in + } + if in.MotionGraphicsSettings != nil { + in, out := &in.MotionGraphicsSettings, &out.MotionGraphicsSettings + *out = new(MotionGraphicsSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MotionGraphicsConfigurationObservation. +func (in *MotionGraphicsConfigurationObservation) DeepCopy() *MotionGraphicsConfigurationObservation { + if in == nil { + return nil + } + out := new(MotionGraphicsConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MotionGraphicsConfigurationParameters) DeepCopyInto(out *MotionGraphicsConfigurationParameters) { + *out = *in + if in.MotionGraphicsInsertion != nil { + in, out := &in.MotionGraphicsInsertion, &out.MotionGraphicsInsertion + *out = new(string) + **out = **in + } + if in.MotionGraphicsSettings != nil { + in, out := &in.MotionGraphicsSettings, &out.MotionGraphicsSettings + *out = new(MotionGraphicsSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MotionGraphicsConfigurationParameters. +func (in *MotionGraphicsConfigurationParameters) DeepCopy() *MotionGraphicsConfigurationParameters { + if in == nil { + return nil + } + out := new(MotionGraphicsConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MotionGraphicsSettingsInitParameters) DeepCopyInto(out *MotionGraphicsSettingsInitParameters) { + *out = *in + if in.HTMLMotionGraphicsSettings != nil { + in, out := &in.HTMLMotionGraphicsSettings, &out.HTMLMotionGraphicsSettings + *out = new(HTMLMotionGraphicsSettingsInitParameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MotionGraphicsSettingsInitParameters. +func (in *MotionGraphicsSettingsInitParameters) DeepCopy() *MotionGraphicsSettingsInitParameters { + if in == nil { + return nil + } + out := new(MotionGraphicsSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MotionGraphicsSettingsObservation) DeepCopyInto(out *MotionGraphicsSettingsObservation) { + *out = *in + if in.HTMLMotionGraphicsSettings != nil { + in, out := &in.HTMLMotionGraphicsSettings, &out.HTMLMotionGraphicsSettings + *out = new(HTMLMotionGraphicsSettingsParameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MotionGraphicsSettingsObservation. +func (in *MotionGraphicsSettingsObservation) DeepCopy() *MotionGraphicsSettingsObservation { + if in == nil { + return nil + } + out := new(MotionGraphicsSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MotionGraphicsSettingsParameters) DeepCopyInto(out *MotionGraphicsSettingsParameters) { + *out = *in + if in.HTMLMotionGraphicsSettings != nil { + in, out := &in.HTMLMotionGraphicsSettings, &out.HTMLMotionGraphicsSettings + *out = new(HTMLMotionGraphicsSettingsParameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MotionGraphicsSettingsParameters. +func (in *MotionGraphicsSettingsParameters) DeepCopy() *MotionGraphicsSettingsParameters { + if in == nil { + return nil + } + out := new(MotionGraphicsSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mp2SettingsInitParameters) DeepCopyInto(out *Mp2SettingsInitParameters) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.SampleRate != nil { + in, out := &in.SampleRate, &out.SampleRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mp2SettingsInitParameters. +func (in *Mp2SettingsInitParameters) DeepCopy() *Mp2SettingsInitParameters { + if in == nil { + return nil + } + out := new(Mp2SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mp2SettingsObservation) DeepCopyInto(out *Mp2SettingsObservation) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.SampleRate != nil { + in, out := &in.SampleRate, &out.SampleRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mp2SettingsObservation. +func (in *Mp2SettingsObservation) DeepCopy() *Mp2SettingsObservation { + if in == nil { + return nil + } + out := new(Mp2SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mp2SettingsParameters) DeepCopyInto(out *Mp2SettingsParameters) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.SampleRate != nil { + in, out := &in.SampleRate, &out.SampleRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mp2SettingsParameters. +func (in *Mp2SettingsParameters) DeepCopy() *Mp2SettingsParameters { + if in == nil { + return nil + } + out := new(Mp2SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MsSmoothGroupSettingsDestinationInitParameters) DeepCopyInto(out *MsSmoothGroupSettingsDestinationInitParameters) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MsSmoothGroupSettingsDestinationInitParameters. +func (in *MsSmoothGroupSettingsDestinationInitParameters) DeepCopy() *MsSmoothGroupSettingsDestinationInitParameters { + if in == nil { + return nil + } + out := new(MsSmoothGroupSettingsDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MsSmoothGroupSettingsDestinationObservation) DeepCopyInto(out *MsSmoothGroupSettingsDestinationObservation) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MsSmoothGroupSettingsDestinationObservation. +func (in *MsSmoothGroupSettingsDestinationObservation) DeepCopy() *MsSmoothGroupSettingsDestinationObservation { + if in == nil { + return nil + } + out := new(MsSmoothGroupSettingsDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MsSmoothGroupSettingsDestinationParameters) DeepCopyInto(out *MsSmoothGroupSettingsDestinationParameters) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MsSmoothGroupSettingsDestinationParameters. +func (in *MsSmoothGroupSettingsDestinationParameters) DeepCopy() *MsSmoothGroupSettingsDestinationParameters { + if in == nil { + return nil + } + out := new(MsSmoothGroupSettingsDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MsSmoothGroupSettingsInitParameters) DeepCopyInto(out *MsSmoothGroupSettingsInitParameters) { + *out = *in + if in.AcquisitionPointID != nil { + in, out := &in.AcquisitionPointID, &out.AcquisitionPointID + *out = new(string) + **out = **in + } + if in.AudioOnlyTimecodeControl != nil { + in, out := &in.AudioOnlyTimecodeControl, &out.AudioOnlyTimecodeControl + *out = new(string) + **out = **in + } + if in.CertificateMode != nil { + in, out := &in.CertificateMode, &out.CertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(MsSmoothGroupSettingsDestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EventID != nil { + in, out := &in.EventID, &out.EventID + *out = new(string) + **out = **in + } + if in.EventIDMode != nil { + in, out := &in.EventIDMode, &out.EventIDMode + *out = new(string) + **out = **in + } + if in.EventStopBehavior != nil { + in, out := &in.EventStopBehavior, &out.EventStopBehavior + *out = new(string) + **out = **in + } + if in.FilecacheDuration != nil { + in, out := &in.FilecacheDuration, &out.FilecacheDuration + *out = new(float64) + **out = **in + } + if in.FragmentLength != nil { + in, out := &in.FragmentLength, &out.FragmentLength + *out = new(float64) + **out = **in + } + if in.InputLossAction != nil { + in, out := &in.InputLossAction, &out.InputLossAction + *out = new(string) + **out = **in + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } + if in.SegmentationMode != nil { + in, out := &in.SegmentationMode, &out.SegmentationMode + *out = new(string) + **out = **in + } + if in.SendDelayMs != nil { + in, out := &in.SendDelayMs, &out.SendDelayMs + *out = new(float64) + **out = **in + } + if in.SparseTrackType != nil { + in, out := &in.SparseTrackType, &out.SparseTrackType + *out = new(string) + **out = **in + } + if in.StreamManifestBehavior != nil { + in, out := &in.StreamManifestBehavior, &out.StreamManifestBehavior + *out = new(string) + **out = **in + } + if in.TimestampOffset != nil { + in, out := &in.TimestampOffset, &out.TimestampOffset + *out = new(string) + **out = **in + } + if in.TimestampOffsetMode != nil { + in, out := &in.TimestampOffsetMode, &out.TimestampOffsetMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MsSmoothGroupSettingsInitParameters. +func (in *MsSmoothGroupSettingsInitParameters) DeepCopy() *MsSmoothGroupSettingsInitParameters { + if in == nil { + return nil + } + out := new(MsSmoothGroupSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MsSmoothGroupSettingsObservation) DeepCopyInto(out *MsSmoothGroupSettingsObservation) { + *out = *in + if in.AcquisitionPointID != nil { + in, out := &in.AcquisitionPointID, &out.AcquisitionPointID + *out = new(string) + **out = **in + } + if in.AudioOnlyTimecodeControl != nil { + in, out := &in.AudioOnlyTimecodeControl, &out.AudioOnlyTimecodeControl + *out = new(string) + **out = **in + } + if in.CertificateMode != nil { + in, out := &in.CertificateMode, &out.CertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(MsSmoothGroupSettingsDestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.EventID != nil { + in, out := &in.EventID, &out.EventID + *out = new(string) + **out = **in + } + if in.EventIDMode != nil { + in, out := &in.EventIDMode, &out.EventIDMode + *out = new(string) + **out = **in + } + if in.EventStopBehavior != nil { + in, out := &in.EventStopBehavior, &out.EventStopBehavior + *out = new(string) + **out = **in + } + if in.FilecacheDuration != nil { + in, out := &in.FilecacheDuration, &out.FilecacheDuration + *out = new(float64) + **out = **in + } + if in.FragmentLength != nil { + in, out := &in.FragmentLength, &out.FragmentLength + *out = new(float64) + **out = **in + } + if in.InputLossAction != nil { + in, out := &in.InputLossAction, &out.InputLossAction + *out = new(string) + **out = **in + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } + if in.SegmentationMode != nil { + in, out := &in.SegmentationMode, &out.SegmentationMode + *out = new(string) + **out = **in + } + if in.SendDelayMs != nil { + in, out := &in.SendDelayMs, &out.SendDelayMs + *out = new(float64) + **out = **in + } + if in.SparseTrackType != nil { + in, out := &in.SparseTrackType, &out.SparseTrackType + *out = new(string) + **out = **in + } + if in.StreamManifestBehavior != nil { + in, out := &in.StreamManifestBehavior, &out.StreamManifestBehavior + *out = new(string) + **out = **in + } + if in.TimestampOffset != nil { + in, out := &in.TimestampOffset, &out.TimestampOffset + *out = new(string) + **out = **in + } + if in.TimestampOffsetMode != nil { + in, out := &in.TimestampOffsetMode, &out.TimestampOffsetMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MsSmoothGroupSettingsObservation. +func (in *MsSmoothGroupSettingsObservation) DeepCopy() *MsSmoothGroupSettingsObservation { + if in == nil { + return nil + } + out := new(MsSmoothGroupSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MsSmoothGroupSettingsParameters) DeepCopyInto(out *MsSmoothGroupSettingsParameters) { + *out = *in + if in.AcquisitionPointID != nil { + in, out := &in.AcquisitionPointID, &out.AcquisitionPointID + *out = new(string) + **out = **in + } + if in.AudioOnlyTimecodeControl != nil { + in, out := &in.AudioOnlyTimecodeControl, &out.AudioOnlyTimecodeControl + *out = new(string) + **out = **in + } + if in.CertificateMode != nil { + in, out := &in.CertificateMode, &out.CertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(MsSmoothGroupSettingsDestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.EventID != nil { + in, out := &in.EventID, &out.EventID + *out = new(string) + **out = **in + } + if in.EventIDMode != nil { + in, out := &in.EventIDMode, &out.EventIDMode + *out = new(string) + **out = **in + } + if in.EventStopBehavior != nil { + in, out := &in.EventStopBehavior, &out.EventStopBehavior + *out = new(string) + **out = **in + } + if in.FilecacheDuration != nil { + in, out := &in.FilecacheDuration, &out.FilecacheDuration + *out = new(float64) + **out = **in + } + if in.FragmentLength != nil { + in, out := &in.FragmentLength, &out.FragmentLength + *out = new(float64) + **out = **in + } + if in.InputLossAction != nil { + in, out := &in.InputLossAction, &out.InputLossAction + *out = new(string) + **out = **in + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } + if in.SegmentationMode != nil { + in, out := &in.SegmentationMode, &out.SegmentationMode + *out = new(string) + **out = **in + } + if in.SendDelayMs != nil { + in, out := &in.SendDelayMs, &out.SendDelayMs + *out = new(float64) + **out = **in + } + if in.SparseTrackType != nil { + in, out := &in.SparseTrackType, &out.SparseTrackType + *out = new(string) + **out = **in + } + if in.StreamManifestBehavior != nil { + in, out := &in.StreamManifestBehavior, &out.StreamManifestBehavior + *out = new(string) + **out = **in + } + if in.TimestampOffset != nil { + in, out := &in.TimestampOffset, &out.TimestampOffset + *out = new(string) + **out = **in + } + if in.TimestampOffsetMode != nil { + in, out := &in.TimestampOffsetMode, &out.TimestampOffsetMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MsSmoothGroupSettingsParameters. +func (in *MsSmoothGroupSettingsParameters) DeepCopy() *MsSmoothGroupSettingsParameters { + if in == nil { + return nil + } + out := new(MsSmoothGroupSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MsSmoothOutputSettingsInitParameters) DeepCopyInto(out *MsSmoothOutputSettingsInitParameters) { + *out = *in + if in.H265PackagingType != nil { + in, out := &in.H265PackagingType, &out.H265PackagingType + *out = new(string) + **out = **in + } + if in.NameModifier != nil { + in, out := &in.NameModifier, &out.NameModifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MsSmoothOutputSettingsInitParameters. +func (in *MsSmoothOutputSettingsInitParameters) DeepCopy() *MsSmoothOutputSettingsInitParameters { + if in == nil { + return nil + } + out := new(MsSmoothOutputSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MsSmoothOutputSettingsObservation) DeepCopyInto(out *MsSmoothOutputSettingsObservation) { + *out = *in + if in.H265PackagingType != nil { + in, out := &in.H265PackagingType, &out.H265PackagingType + *out = new(string) + **out = **in + } + if in.NameModifier != nil { + in, out := &in.NameModifier, &out.NameModifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MsSmoothOutputSettingsObservation. +func (in *MsSmoothOutputSettingsObservation) DeepCopy() *MsSmoothOutputSettingsObservation { + if in == nil { + return nil + } + out := new(MsSmoothOutputSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MsSmoothOutputSettingsParameters) DeepCopyInto(out *MsSmoothOutputSettingsParameters) { + *out = *in + if in.H265PackagingType != nil { + in, out := &in.H265PackagingType, &out.H265PackagingType + *out = new(string) + **out = **in + } + if in.NameModifier != nil { + in, out := &in.NameModifier, &out.NameModifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MsSmoothOutputSettingsParameters. +func (in *MsSmoothOutputSettingsParameters) DeepCopy() *MsSmoothOutputSettingsParameters { + if in == nil { + return nil + } + out := new(MsSmoothOutputSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Multiplex) DeepCopyInto(out *Multiplex) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Multiplex. +func (in *Multiplex) DeepCopy() *Multiplex { + if in == nil { + return nil + } + out := new(Multiplex) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Multiplex) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexGroupSettingsInitParameters) DeepCopyInto(out *MultiplexGroupSettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexGroupSettingsInitParameters. +func (in *MultiplexGroupSettingsInitParameters) DeepCopy() *MultiplexGroupSettingsInitParameters { + if in == nil { + return nil + } + out := new(MultiplexGroupSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexGroupSettingsObservation) DeepCopyInto(out *MultiplexGroupSettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexGroupSettingsObservation. +func (in *MultiplexGroupSettingsObservation) DeepCopy() *MultiplexGroupSettingsObservation { + if in == nil { + return nil + } + out := new(MultiplexGroupSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexGroupSettingsParameters) DeepCopyInto(out *MultiplexGroupSettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexGroupSettingsParameters. +func (in *MultiplexGroupSettingsParameters) DeepCopy() *MultiplexGroupSettingsParameters { + if in == nil { + return nil + } + out := new(MultiplexGroupSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexInitParameters) DeepCopyInto(out *MultiplexInitParameters) { + *out = *in + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MultiplexSettings != nil { + in, out := &in.MultiplexSettings, &out.MultiplexSettings + *out = new(MultiplexMultiplexSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StartMultiplex != nil { + in, out := &in.StartMultiplex, &out.StartMultiplex + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexInitParameters. +func (in *MultiplexInitParameters) DeepCopy() *MultiplexInitParameters { + if in == nil { + return nil + } + out := new(MultiplexInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexList) DeepCopyInto(out *MultiplexList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Multiplex, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexList. +func (in *MultiplexList) DeepCopy() *MultiplexList { + if in == nil { + return nil + } + out := new(MultiplexList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MultiplexList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexMultiplexSettingsInitParameters) DeepCopyInto(out *MultiplexMultiplexSettingsInitParameters) { + *out = *in + if in.MaximumVideoBufferDelayMilliseconds != nil { + in, out := &in.MaximumVideoBufferDelayMilliseconds, &out.MaximumVideoBufferDelayMilliseconds + *out = new(float64) + **out = **in + } + if in.TransportStreamBitrate != nil { + in, out := &in.TransportStreamBitrate, &out.TransportStreamBitrate + *out = new(float64) + **out = **in + } + if in.TransportStreamID != nil { + in, out := &in.TransportStreamID, &out.TransportStreamID + *out = new(float64) + **out = **in + } + if in.TransportStreamReservedBitrate != nil { + in, out := &in.TransportStreamReservedBitrate, &out.TransportStreamReservedBitrate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexMultiplexSettingsInitParameters. +func (in *MultiplexMultiplexSettingsInitParameters) DeepCopy() *MultiplexMultiplexSettingsInitParameters { + if in == nil { + return nil + } + out := new(MultiplexMultiplexSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexMultiplexSettingsObservation) DeepCopyInto(out *MultiplexMultiplexSettingsObservation) { + *out = *in + if in.MaximumVideoBufferDelayMilliseconds != nil { + in, out := &in.MaximumVideoBufferDelayMilliseconds, &out.MaximumVideoBufferDelayMilliseconds + *out = new(float64) + **out = **in + } + if in.TransportStreamBitrate != nil { + in, out := &in.TransportStreamBitrate, &out.TransportStreamBitrate + *out = new(float64) + **out = **in + } + if in.TransportStreamID != nil { + in, out := &in.TransportStreamID, &out.TransportStreamID + *out = new(float64) + **out = **in + } + if in.TransportStreamReservedBitrate != nil { + in, out := &in.TransportStreamReservedBitrate, &out.TransportStreamReservedBitrate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexMultiplexSettingsObservation. +func (in *MultiplexMultiplexSettingsObservation) DeepCopy() *MultiplexMultiplexSettingsObservation { + if in == nil { + return nil + } + out := new(MultiplexMultiplexSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexMultiplexSettingsParameters) DeepCopyInto(out *MultiplexMultiplexSettingsParameters) { + *out = *in + if in.MaximumVideoBufferDelayMilliseconds != nil { + in, out := &in.MaximumVideoBufferDelayMilliseconds, &out.MaximumVideoBufferDelayMilliseconds + *out = new(float64) + **out = **in + } + if in.TransportStreamBitrate != nil { + in, out := &in.TransportStreamBitrate, &out.TransportStreamBitrate + *out = new(float64) + **out = **in + } + if in.TransportStreamID != nil { + in, out := &in.TransportStreamID, &out.TransportStreamID + *out = new(float64) + **out = **in + } + if in.TransportStreamReservedBitrate != nil { + in, out := &in.TransportStreamReservedBitrate, &out.TransportStreamReservedBitrate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexMultiplexSettingsParameters. +func (in *MultiplexMultiplexSettingsParameters) DeepCopy() *MultiplexMultiplexSettingsParameters { + if in == nil { + return nil + } + out := new(MultiplexMultiplexSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexObservation) DeepCopyInto(out *MultiplexObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MultiplexSettings != nil { + in, out := &in.MultiplexSettings, &out.MultiplexSettings + *out = new(MultiplexMultiplexSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StartMultiplex != nil { + in, out := &in.StartMultiplex, &out.StartMultiplex + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexObservation. +func (in *MultiplexObservation) DeepCopy() *MultiplexObservation { + if in == nil { + return nil + } + out := new(MultiplexObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexOutputSettingsDestinationInitParameters) DeepCopyInto(out *MultiplexOutputSettingsDestinationInitParameters) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexOutputSettingsDestinationInitParameters. +func (in *MultiplexOutputSettingsDestinationInitParameters) DeepCopy() *MultiplexOutputSettingsDestinationInitParameters { + if in == nil { + return nil + } + out := new(MultiplexOutputSettingsDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexOutputSettingsDestinationObservation) DeepCopyInto(out *MultiplexOutputSettingsDestinationObservation) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexOutputSettingsDestinationObservation. +func (in *MultiplexOutputSettingsDestinationObservation) DeepCopy() *MultiplexOutputSettingsDestinationObservation { + if in == nil { + return nil + } + out := new(MultiplexOutputSettingsDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexOutputSettingsDestinationParameters) DeepCopyInto(out *MultiplexOutputSettingsDestinationParameters) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexOutputSettingsDestinationParameters. +func (in *MultiplexOutputSettingsDestinationParameters) DeepCopy() *MultiplexOutputSettingsDestinationParameters { + if in == nil { + return nil + } + out := new(MultiplexOutputSettingsDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexOutputSettingsInitParameters) DeepCopyInto(out *MultiplexOutputSettingsInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(MultiplexOutputSettingsDestinationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexOutputSettingsInitParameters. +func (in *MultiplexOutputSettingsInitParameters) DeepCopy() *MultiplexOutputSettingsInitParameters { + if in == nil { + return nil + } + out := new(MultiplexOutputSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexOutputSettingsObservation) DeepCopyInto(out *MultiplexOutputSettingsObservation) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(MultiplexOutputSettingsDestinationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexOutputSettingsObservation. +func (in *MultiplexOutputSettingsObservation) DeepCopy() *MultiplexOutputSettingsObservation { + if in == nil { + return nil + } + out := new(MultiplexOutputSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexOutputSettingsParameters) DeepCopyInto(out *MultiplexOutputSettingsParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(MultiplexOutputSettingsDestinationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexOutputSettingsParameters. +func (in *MultiplexOutputSettingsParameters) DeepCopy() *MultiplexOutputSettingsParameters { + if in == nil { + return nil + } + out := new(MultiplexOutputSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexParameters) DeepCopyInto(out *MultiplexParameters) { + *out = *in + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MultiplexSettings != nil { + in, out := &in.MultiplexSettings, &out.MultiplexSettings + *out = new(MultiplexMultiplexSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.StartMultiplex != nil { + in, out := &in.StartMultiplex, &out.StartMultiplex + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexParameters. +func (in *MultiplexParameters) DeepCopy() *MultiplexParameters { + if in == nil { + return nil + } + out := new(MultiplexParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexSettingsInitParameters) DeepCopyInto(out *MultiplexSettingsInitParameters) { + *out = *in + if in.MultiplexID != nil { + in, out := &in.MultiplexID, &out.MultiplexID + *out = new(string) + **out = **in + } + if in.ProgramName != nil { + in, out := &in.ProgramName, &out.ProgramName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexSettingsInitParameters. +func (in *MultiplexSettingsInitParameters) DeepCopy() *MultiplexSettingsInitParameters { + if in == nil { + return nil + } + out := new(MultiplexSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexSettingsObservation) DeepCopyInto(out *MultiplexSettingsObservation) { + *out = *in + if in.MultiplexID != nil { + in, out := &in.MultiplexID, &out.MultiplexID + *out = new(string) + **out = **in + } + if in.ProgramName != nil { + in, out := &in.ProgramName, &out.ProgramName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexSettingsObservation. +func (in *MultiplexSettingsObservation) DeepCopy() *MultiplexSettingsObservation { + if in == nil { + return nil + } + out := new(MultiplexSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexSettingsParameters) DeepCopyInto(out *MultiplexSettingsParameters) { + *out = *in + if in.MultiplexID != nil { + in, out := &in.MultiplexID, &out.MultiplexID + *out = new(string) + **out = **in + } + if in.ProgramName != nil { + in, out := &in.ProgramName, &out.ProgramName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexSettingsParameters. +func (in *MultiplexSettingsParameters) DeepCopy() *MultiplexSettingsParameters { + if in == nil { + return nil + } + out := new(MultiplexSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexSpec) DeepCopyInto(out *MultiplexSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexSpec. +func (in *MultiplexSpec) DeepCopy() *MultiplexSpec { + if in == nil { + return nil + } + out := new(MultiplexSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiplexStatus) DeepCopyInto(out *MultiplexStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiplexStatus. +func (in *MultiplexStatus) DeepCopy() *MultiplexStatus { + if in == nil { + return nil + } + out := new(MultiplexStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInputSettingsInitParameters) DeepCopyInto(out *NetworkInputSettingsInitParameters) { + *out = *in + if in.HlsInputSettings != nil { + in, out := &in.HlsInputSettings, &out.HlsInputSettings + *out = new(HlsInputSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServerValidation != nil { + in, out := &in.ServerValidation, &out.ServerValidation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInputSettingsInitParameters. +func (in *NetworkInputSettingsInitParameters) DeepCopy() *NetworkInputSettingsInitParameters { + if in == nil { + return nil + } + out := new(NetworkInputSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInputSettingsObservation) DeepCopyInto(out *NetworkInputSettingsObservation) { + *out = *in + if in.HlsInputSettings != nil { + in, out := &in.HlsInputSettings, &out.HlsInputSettings + *out = new(HlsInputSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ServerValidation != nil { + in, out := &in.ServerValidation, &out.ServerValidation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInputSettingsObservation. +func (in *NetworkInputSettingsObservation) DeepCopy() *NetworkInputSettingsObservation { + if in == nil { + return nil + } + out := new(NetworkInputSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInputSettingsParameters) DeepCopyInto(out *NetworkInputSettingsParameters) { + *out = *in + if in.HlsInputSettings != nil { + in, out := &in.HlsInputSettings, &out.HlsInputSettings + *out = new(HlsInputSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.ServerValidation != nil { + in, out := &in.ServerValidation, &out.ServerValidation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInputSettingsParameters. +func (in *NetworkInputSettingsParameters) DeepCopy() *NetworkInputSettingsParameters { + if in == nil { + return nil + } + out := new(NetworkInputSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NielsenCbetSettingsInitParameters) DeepCopyInto(out *NielsenCbetSettingsInitParameters) { + *out = *in + if in.CbetCheckDigitString != nil { + in, out := &in.CbetCheckDigitString, &out.CbetCheckDigitString + *out = new(string) + **out = **in + } + if in.CbetStepaside != nil { + in, out := &in.CbetStepaside, &out.CbetStepaside + *out = new(string) + **out = **in + } + if in.Csid != nil { + in, out := &in.Csid, &out.Csid + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NielsenCbetSettingsInitParameters. +func (in *NielsenCbetSettingsInitParameters) DeepCopy() *NielsenCbetSettingsInitParameters { + if in == nil { + return nil + } + out := new(NielsenCbetSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NielsenCbetSettingsObservation) DeepCopyInto(out *NielsenCbetSettingsObservation) { + *out = *in + if in.CbetCheckDigitString != nil { + in, out := &in.CbetCheckDigitString, &out.CbetCheckDigitString + *out = new(string) + **out = **in + } + if in.CbetStepaside != nil { + in, out := &in.CbetStepaside, &out.CbetStepaside + *out = new(string) + **out = **in + } + if in.Csid != nil { + in, out := &in.Csid, &out.Csid + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NielsenCbetSettingsObservation. +func (in *NielsenCbetSettingsObservation) DeepCopy() *NielsenCbetSettingsObservation { + if in == nil { + return nil + } + out := new(NielsenCbetSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NielsenCbetSettingsParameters) DeepCopyInto(out *NielsenCbetSettingsParameters) { + *out = *in + if in.CbetCheckDigitString != nil { + in, out := &in.CbetCheckDigitString, &out.CbetCheckDigitString + *out = new(string) + **out = **in + } + if in.CbetStepaside != nil { + in, out := &in.CbetStepaside, &out.CbetStepaside + *out = new(string) + **out = **in + } + if in.Csid != nil { + in, out := &in.Csid, &out.Csid + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NielsenCbetSettingsParameters. +func (in *NielsenCbetSettingsParameters) DeepCopy() *NielsenCbetSettingsParameters { + if in == nil { + return nil + } + out := new(NielsenCbetSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NielsenConfigurationInitParameters) DeepCopyInto(out *NielsenConfigurationInitParameters) { + *out = *in + if in.DistributorID != nil { + in, out := &in.DistributorID, &out.DistributorID + *out = new(string) + **out = **in + } + if in.NielsenPcmToId3Tagging != nil { + in, out := &in.NielsenPcmToId3Tagging, &out.NielsenPcmToId3Tagging + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NielsenConfigurationInitParameters. +func (in *NielsenConfigurationInitParameters) DeepCopy() *NielsenConfigurationInitParameters { + if in == nil { + return nil + } + out := new(NielsenConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NielsenConfigurationObservation) DeepCopyInto(out *NielsenConfigurationObservation) { + *out = *in + if in.DistributorID != nil { + in, out := &in.DistributorID, &out.DistributorID + *out = new(string) + **out = **in + } + if in.NielsenPcmToId3Tagging != nil { + in, out := &in.NielsenPcmToId3Tagging, &out.NielsenPcmToId3Tagging + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NielsenConfigurationObservation. +func (in *NielsenConfigurationObservation) DeepCopy() *NielsenConfigurationObservation { + if in == nil { + return nil + } + out := new(NielsenConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NielsenConfigurationParameters) DeepCopyInto(out *NielsenConfigurationParameters) { + *out = *in + if in.DistributorID != nil { + in, out := &in.DistributorID, &out.DistributorID + *out = new(string) + **out = **in + } + if in.NielsenPcmToId3Tagging != nil { + in, out := &in.NielsenPcmToId3Tagging, &out.NielsenPcmToId3Tagging + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NielsenConfigurationParameters. +func (in *NielsenConfigurationParameters) DeepCopy() *NielsenConfigurationParameters { + if in == nil { + return nil + } + out := new(NielsenConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NielsenNaesIiNwSettingsInitParameters) DeepCopyInto(out *NielsenNaesIiNwSettingsInitParameters) { + *out = *in + if in.CheckDigitString != nil { + in, out := &in.CheckDigitString, &out.CheckDigitString + *out = new(string) + **out = **in + } + if in.Sid != nil { + in, out := &in.Sid, &out.Sid + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NielsenNaesIiNwSettingsInitParameters. +func (in *NielsenNaesIiNwSettingsInitParameters) DeepCopy() *NielsenNaesIiNwSettingsInitParameters { + if in == nil { + return nil + } + out := new(NielsenNaesIiNwSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NielsenNaesIiNwSettingsObservation) DeepCopyInto(out *NielsenNaesIiNwSettingsObservation) { + *out = *in + if in.CheckDigitString != nil { + in, out := &in.CheckDigitString, &out.CheckDigitString + *out = new(string) + **out = **in + } + if in.Sid != nil { + in, out := &in.Sid, &out.Sid + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NielsenNaesIiNwSettingsObservation. +func (in *NielsenNaesIiNwSettingsObservation) DeepCopy() *NielsenNaesIiNwSettingsObservation { + if in == nil { + return nil + } + out := new(NielsenNaesIiNwSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NielsenNaesIiNwSettingsParameters) DeepCopyInto(out *NielsenNaesIiNwSettingsParameters) { + *out = *in + if in.CheckDigitString != nil { + in, out := &in.CheckDigitString, &out.CheckDigitString + *out = new(string) + **out = **in + } + if in.Sid != nil { + in, out := &in.Sid, &out.Sid + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NielsenNaesIiNwSettingsParameters. +func (in *NielsenNaesIiNwSettingsParameters) DeepCopy() *NielsenNaesIiNwSettingsParameters { + if in == nil { + return nil + } + out := new(NielsenNaesIiNwSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NielsenWatermarksSettingsInitParameters) DeepCopyInto(out *NielsenWatermarksSettingsInitParameters) { + *out = *in + if in.NielsenCbetSettings != nil { + in, out := &in.NielsenCbetSettings, &out.NielsenCbetSettings + *out = new(NielsenCbetSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NielsenDistributionType != nil { + in, out := &in.NielsenDistributionType, &out.NielsenDistributionType + *out = new(string) + **out = **in + } + if in.NielsenNaesIiNwSettings != nil { + in, out := &in.NielsenNaesIiNwSettings, &out.NielsenNaesIiNwSettings + *out = make([]NielsenNaesIiNwSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NielsenWatermarksSettingsInitParameters. +func (in *NielsenWatermarksSettingsInitParameters) DeepCopy() *NielsenWatermarksSettingsInitParameters { + if in == nil { + return nil + } + out := new(NielsenWatermarksSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NielsenWatermarksSettingsObservation) DeepCopyInto(out *NielsenWatermarksSettingsObservation) { + *out = *in + if in.NielsenCbetSettings != nil { + in, out := &in.NielsenCbetSettings, &out.NielsenCbetSettings + *out = new(NielsenCbetSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.NielsenDistributionType != nil { + in, out := &in.NielsenDistributionType, &out.NielsenDistributionType + *out = new(string) + **out = **in + } + if in.NielsenNaesIiNwSettings != nil { + in, out := &in.NielsenNaesIiNwSettings, &out.NielsenNaesIiNwSettings + *out = make([]NielsenNaesIiNwSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NielsenWatermarksSettingsObservation. +func (in *NielsenWatermarksSettingsObservation) DeepCopy() *NielsenWatermarksSettingsObservation { + if in == nil { + return nil + } + out := new(NielsenWatermarksSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NielsenWatermarksSettingsParameters) DeepCopyInto(out *NielsenWatermarksSettingsParameters) { + *out = *in + if in.NielsenCbetSettings != nil { + in, out := &in.NielsenCbetSettings, &out.NielsenCbetSettings + *out = new(NielsenCbetSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.NielsenDistributionType != nil { + in, out := &in.NielsenDistributionType, &out.NielsenDistributionType + *out = new(string) + **out = **in + } + if in.NielsenNaesIiNwSettings != nil { + in, out := &in.NielsenNaesIiNwSettings, &out.NielsenNaesIiNwSettings + *out = make([]NielsenNaesIiNwSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NielsenWatermarksSettingsParameters. +func (in *NielsenWatermarksSettingsParameters) DeepCopy() *NielsenWatermarksSettingsParameters { + if in == nil { + return nil + } + out := new(NielsenWatermarksSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputGroupSettingsInitParameters) DeepCopyInto(out *OutputGroupSettingsInitParameters) { + *out = *in + if in.ArchiveGroupSettings != nil { + in, out := &in.ArchiveGroupSettings, &out.ArchiveGroupSettings + *out = make([]ArchiveGroupSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FrameCaptureGroupSettings != nil { + in, out := &in.FrameCaptureGroupSettings, &out.FrameCaptureGroupSettings + *out = new(FrameCaptureGroupSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HlsGroupSettings != nil { + in, out := &in.HlsGroupSettings, &out.HlsGroupSettings + *out = new(HlsGroupSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MediaPackageGroupSettings != nil { + in, out := &in.MediaPackageGroupSettings, &out.MediaPackageGroupSettings + *out = new(MediaPackageGroupSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MsSmoothGroupSettings != nil { + in, out := &in.MsSmoothGroupSettings, &out.MsSmoothGroupSettings + *out = new(MsSmoothGroupSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MultiplexGroupSettings != nil { + in, out := &in.MultiplexGroupSettings, &out.MultiplexGroupSettings + *out = new(MultiplexGroupSettingsInitParameters) + **out = **in + } + if in.RtmpGroupSettings != nil { + in, out := &in.RtmpGroupSettings, &out.RtmpGroupSettings + *out = new(RtmpGroupSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UDPGroupSettings != nil { + in, out := &in.UDPGroupSettings, &out.UDPGroupSettings + *out = new(UDPGroupSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputGroupSettingsInitParameters. +func (in *OutputGroupSettingsInitParameters) DeepCopy() *OutputGroupSettingsInitParameters { + if in == nil { + return nil + } + out := new(OutputGroupSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputGroupSettingsObservation) DeepCopyInto(out *OutputGroupSettingsObservation) { + *out = *in + if in.ArchiveGroupSettings != nil { + in, out := &in.ArchiveGroupSettings, &out.ArchiveGroupSettings + *out = make([]ArchiveGroupSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FrameCaptureGroupSettings != nil { + in, out := &in.FrameCaptureGroupSettings, &out.FrameCaptureGroupSettings + *out = new(FrameCaptureGroupSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.HlsGroupSettings != nil { + in, out := &in.HlsGroupSettings, &out.HlsGroupSettings + *out = new(HlsGroupSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.MediaPackageGroupSettings != nil { + in, out := &in.MediaPackageGroupSettings, &out.MediaPackageGroupSettings + *out = new(MediaPackageGroupSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.MsSmoothGroupSettings != nil { + in, out := &in.MsSmoothGroupSettings, &out.MsSmoothGroupSettings + *out = new(MsSmoothGroupSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.MultiplexGroupSettings != nil { + in, out := &in.MultiplexGroupSettings, &out.MultiplexGroupSettings + *out = new(MultiplexGroupSettingsParameters) + **out = **in + } + if in.RtmpGroupSettings != nil { + in, out := &in.RtmpGroupSettings, &out.RtmpGroupSettings + *out = new(RtmpGroupSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.UDPGroupSettings != nil { + in, out := &in.UDPGroupSettings, &out.UDPGroupSettings + *out = new(UDPGroupSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputGroupSettingsObservation. +func (in *OutputGroupSettingsObservation) DeepCopy() *OutputGroupSettingsObservation { + if in == nil { + return nil + } + out := new(OutputGroupSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputGroupSettingsParameters) DeepCopyInto(out *OutputGroupSettingsParameters) { + *out = *in + if in.ArchiveGroupSettings != nil { + in, out := &in.ArchiveGroupSettings, &out.ArchiveGroupSettings + *out = make([]ArchiveGroupSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FrameCaptureGroupSettings != nil { + in, out := &in.FrameCaptureGroupSettings, &out.FrameCaptureGroupSettings + *out = new(FrameCaptureGroupSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.HlsGroupSettings != nil { + in, out := &in.HlsGroupSettings, &out.HlsGroupSettings + *out = new(HlsGroupSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.MediaPackageGroupSettings != nil { + in, out := &in.MediaPackageGroupSettings, &out.MediaPackageGroupSettings + *out = new(MediaPackageGroupSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.MsSmoothGroupSettings != nil { + in, out := &in.MsSmoothGroupSettings, &out.MsSmoothGroupSettings + *out = new(MsSmoothGroupSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.MultiplexGroupSettings != nil { + in, out := &in.MultiplexGroupSettings, &out.MultiplexGroupSettings + *out = new(MultiplexGroupSettingsParameters) + **out = **in + } + if in.RtmpGroupSettings != nil { + in, out := &in.RtmpGroupSettings, &out.RtmpGroupSettings + *out = new(RtmpGroupSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.UDPGroupSettings != nil { + in, out := &in.UDPGroupSettings, &out.UDPGroupSettings + *out = new(UDPGroupSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputGroupSettingsParameters. +func (in *OutputGroupSettingsParameters) DeepCopy() *OutputGroupSettingsParameters { + if in == nil { + return nil + } + out := new(OutputGroupSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputGroupsInitParameters) DeepCopyInto(out *OutputGroupsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutputGroupSettings != nil { + in, out := &in.OutputGroupSettings, &out.OutputGroupSettings + *out = new(OutputGroupSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Outputs != nil { + in, out := &in.Outputs, &out.Outputs + *out = make([]OutputsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputGroupsInitParameters. +func (in *OutputGroupsInitParameters) DeepCopy() *OutputGroupsInitParameters { + if in == nil { + return nil + } + out := new(OutputGroupsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputGroupsObservation) DeepCopyInto(out *OutputGroupsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutputGroupSettings != nil { + in, out := &in.OutputGroupSettings, &out.OutputGroupSettings + *out = new(OutputGroupSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Outputs != nil { + in, out := &in.Outputs, &out.Outputs + *out = make([]OutputsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputGroupsObservation. +func (in *OutputGroupsObservation) DeepCopy() *OutputGroupsObservation { + if in == nil { + return nil + } + out := new(OutputGroupsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputGroupsParameters) DeepCopyInto(out *OutputGroupsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutputGroupSettings != nil { + in, out := &in.OutputGroupSettings, &out.OutputGroupSettings + *out = new(OutputGroupSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Outputs != nil { + in, out := &in.Outputs, &out.Outputs + *out = make([]OutputsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputGroupsParameters. +func (in *OutputGroupsParameters) DeepCopy() *OutputGroupsParameters { + if in == nil { + return nil + } + out := new(OutputGroupsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputRectangleInitParameters) DeepCopyInto(out *OutputRectangleInitParameters) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(float64) + **out = **in + } + if in.LeftOffset != nil { + in, out := &in.LeftOffset, &out.LeftOffset + *out = new(float64) + **out = **in + } + if in.TopOffset != nil { + in, out := &in.TopOffset, &out.TopOffset + *out = new(float64) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputRectangleInitParameters. +func (in *OutputRectangleInitParameters) DeepCopy() *OutputRectangleInitParameters { + if in == nil { + return nil + } + out := new(OutputRectangleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputRectangleObservation) DeepCopyInto(out *OutputRectangleObservation) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(float64) + **out = **in + } + if in.LeftOffset != nil { + in, out := &in.LeftOffset, &out.LeftOffset + *out = new(float64) + **out = **in + } + if in.TopOffset != nil { + in, out := &in.TopOffset, &out.TopOffset + *out = new(float64) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputRectangleObservation. +func (in *OutputRectangleObservation) DeepCopy() *OutputRectangleObservation { + if in == nil { + return nil + } + out := new(OutputRectangleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputRectangleParameters) DeepCopyInto(out *OutputRectangleParameters) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(float64) + **out = **in + } + if in.LeftOffset != nil { + in, out := &in.LeftOffset, &out.LeftOffset + *out = new(float64) + **out = **in + } + if in.TopOffset != nil { + in, out := &in.TopOffset, &out.TopOffset + *out = new(float64) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputRectangleParameters. +func (in *OutputRectangleParameters) DeepCopy() *OutputRectangleParameters { + if in == nil { + return nil + } + out := new(OutputRectangleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputSettingsInitParameters) DeepCopyInto(out *OutputSettingsInitParameters) { + *out = *in + if in.ArchiveOutputSettings != nil { + in, out := &in.ArchiveOutputSettings, &out.ArchiveOutputSettings + *out = new(ArchiveOutputSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FrameCaptureOutputSettings != nil { + in, out := &in.FrameCaptureOutputSettings, &out.FrameCaptureOutputSettings + *out = new(FrameCaptureOutputSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HlsOutputSettings != nil { + in, out := &in.HlsOutputSettings, &out.HlsOutputSettings + *out = new(HlsOutputSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MediaPackageOutputSettings != nil { + in, out := &in.MediaPackageOutputSettings, &out.MediaPackageOutputSettings + *out = new(MediaPackageOutputSettingsInitParameters) + **out = **in + } + if in.MsSmoothOutputSettings != nil { + in, out := &in.MsSmoothOutputSettings, &out.MsSmoothOutputSettings + *out = new(MsSmoothOutputSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MultiplexOutputSettings != nil { + in, out := &in.MultiplexOutputSettings, &out.MultiplexOutputSettings + *out = new(MultiplexOutputSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RtmpOutputSettings != nil { + in, out := &in.RtmpOutputSettings, &out.RtmpOutputSettings + *out = new(RtmpOutputSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UDPOutputSettings != nil { + in, out := &in.UDPOutputSettings, &out.UDPOutputSettings + *out = new(UDPOutputSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputSettingsInitParameters. +func (in *OutputSettingsInitParameters) DeepCopy() *OutputSettingsInitParameters { + if in == nil { + return nil + } + out := new(OutputSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputSettingsObservation) DeepCopyInto(out *OutputSettingsObservation) { + *out = *in + if in.ArchiveOutputSettings != nil { + in, out := &in.ArchiveOutputSettings, &out.ArchiveOutputSettings + *out = new(ArchiveOutputSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.FrameCaptureOutputSettings != nil { + in, out := &in.FrameCaptureOutputSettings, &out.FrameCaptureOutputSettings + *out = new(FrameCaptureOutputSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.HlsOutputSettings != nil { + in, out := &in.HlsOutputSettings, &out.HlsOutputSettings + *out = new(HlsOutputSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.MediaPackageOutputSettings != nil { + in, out := &in.MediaPackageOutputSettings, &out.MediaPackageOutputSettings + *out = new(MediaPackageOutputSettingsParameters) + **out = **in + } + if in.MsSmoothOutputSettings != nil { + in, out := &in.MsSmoothOutputSettings, &out.MsSmoothOutputSettings + *out = new(MsSmoothOutputSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.MultiplexOutputSettings != nil { + in, out := &in.MultiplexOutputSettings, &out.MultiplexOutputSettings + *out = new(MultiplexOutputSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.RtmpOutputSettings != nil { + in, out := &in.RtmpOutputSettings, &out.RtmpOutputSettings + *out = new(RtmpOutputSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.UDPOutputSettings != nil { + in, out := &in.UDPOutputSettings, &out.UDPOutputSettings + *out = new(UDPOutputSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputSettingsObservation. +func (in *OutputSettingsObservation) DeepCopy() *OutputSettingsObservation { + if in == nil { + return nil + } + out := new(OutputSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputSettingsParameters) DeepCopyInto(out *OutputSettingsParameters) { + *out = *in + if in.ArchiveOutputSettings != nil { + in, out := &in.ArchiveOutputSettings, &out.ArchiveOutputSettings + *out = new(ArchiveOutputSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.FrameCaptureOutputSettings != nil { + in, out := &in.FrameCaptureOutputSettings, &out.FrameCaptureOutputSettings + *out = new(FrameCaptureOutputSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.HlsOutputSettings != nil { + in, out := &in.HlsOutputSettings, &out.HlsOutputSettings + *out = new(HlsOutputSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.MediaPackageOutputSettings != nil { + in, out := &in.MediaPackageOutputSettings, &out.MediaPackageOutputSettings + *out = new(MediaPackageOutputSettingsParameters) + **out = **in + } + if in.MsSmoothOutputSettings != nil { + in, out := &in.MsSmoothOutputSettings, &out.MsSmoothOutputSettings + *out = new(MsSmoothOutputSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.MultiplexOutputSettings != nil { + in, out := &in.MultiplexOutputSettings, &out.MultiplexOutputSettings + *out = new(MultiplexOutputSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.RtmpOutputSettings != nil { + in, out := &in.RtmpOutputSettings, &out.RtmpOutputSettings + *out = new(RtmpOutputSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.UDPOutputSettings != nil { + in, out := &in.UDPOutputSettings, &out.UDPOutputSettings + *out = new(UDPOutputSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputSettingsParameters. +func (in *OutputSettingsParameters) DeepCopy() *OutputSettingsParameters { + if in == nil { + return nil + } + out := new(OutputSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsInitParameters) DeepCopyInto(out *OutputsInitParameters) { + *out = *in + if in.AudioDescriptionNames != nil { + in, out := &in.AudioDescriptionNames, &out.AudioDescriptionNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CaptionDescriptionNames != nil { + in, out := &in.CaptionDescriptionNames, &out.CaptionDescriptionNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutputName != nil { + in, out := &in.OutputName, &out.OutputName + *out = new(string) + **out = **in + } + if in.OutputSettings != nil { + in, out := &in.OutputSettings, &out.OutputSettings + *out = new(OutputSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VideoDescriptionName != nil { + in, out := &in.VideoDescriptionName, &out.VideoDescriptionName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsInitParameters. +func (in *OutputsInitParameters) DeepCopy() *OutputsInitParameters { + if in == nil { + return nil + } + out := new(OutputsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsObservation) DeepCopyInto(out *OutputsObservation) { + *out = *in + if in.AudioDescriptionNames != nil { + in, out := &in.AudioDescriptionNames, &out.AudioDescriptionNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CaptionDescriptionNames != nil { + in, out := &in.CaptionDescriptionNames, &out.CaptionDescriptionNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutputName != nil { + in, out := &in.OutputName, &out.OutputName + *out = new(string) + **out = **in + } + if in.OutputSettings != nil { + in, out := &in.OutputSettings, &out.OutputSettings + *out = new(OutputSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.VideoDescriptionName != nil { + in, out := &in.VideoDescriptionName, &out.VideoDescriptionName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsObservation. +func (in *OutputsObservation) DeepCopy() *OutputsObservation { + if in == nil { + return nil + } + out := new(OutputsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputsParameters) DeepCopyInto(out *OutputsParameters) { + *out = *in + if in.AudioDescriptionNames != nil { + in, out := &in.AudioDescriptionNames, &out.AudioDescriptionNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CaptionDescriptionNames != nil { + in, out := &in.CaptionDescriptionNames, &out.CaptionDescriptionNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutputName != nil { + in, out := &in.OutputName, &out.OutputName + *out = new(string) + **out = **in + } + if in.OutputSettings != nil { + in, out := &in.OutputSettings, &out.OutputSettings + *out = new(OutputSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.VideoDescriptionName != nil { + in, out := &in.VideoDescriptionName, &out.VideoDescriptionName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputsParameters. +func (in *OutputsParameters) DeepCopy() *OutputsParameters { + if in == nil { + return nil + } + out := new(OutputsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PassThroughSettingsInitParameters) DeepCopyInto(out *PassThroughSettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PassThroughSettingsInitParameters. +func (in *PassThroughSettingsInitParameters) DeepCopy() *PassThroughSettingsInitParameters { + if in == nil { + return nil + } + out := new(PassThroughSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PassThroughSettingsObservation) DeepCopyInto(out *PassThroughSettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PassThroughSettingsObservation. +func (in *PassThroughSettingsObservation) DeepCopy() *PassThroughSettingsObservation { + if in == nil { + return nil + } + out := new(PassThroughSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PassThroughSettingsParameters) DeepCopyInto(out *PassThroughSettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PassThroughSettingsParameters. +func (in *PassThroughSettingsParameters) DeepCopy() *PassThroughSettingsParameters { + if in == nil { + return nil + } + out := new(PassThroughSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RawSettingsInitParameters) DeepCopyInto(out *RawSettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RawSettingsInitParameters. +func (in *RawSettingsInitParameters) DeepCopy() *RawSettingsInitParameters { + if in == nil { + return nil + } + out := new(RawSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RawSettingsObservation) DeepCopyInto(out *RawSettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RawSettingsObservation. +func (in *RawSettingsObservation) DeepCopy() *RawSettingsObservation { + if in == nil { + return nil + } + out := new(RawSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RawSettingsParameters) DeepCopyInto(out *RawSettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RawSettingsParameters. +func (in *RawSettingsParameters) DeepCopy() *RawSettingsParameters { + if in == nil { + return nil + } + out := new(RawSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rec601SettingsInitParameters) DeepCopyInto(out *Rec601SettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rec601SettingsInitParameters. +func (in *Rec601SettingsInitParameters) DeepCopy() *Rec601SettingsInitParameters { + if in == nil { + return nil + } + out := new(Rec601SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rec601SettingsObservation) DeepCopyInto(out *Rec601SettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rec601SettingsObservation. +func (in *Rec601SettingsObservation) DeepCopy() *Rec601SettingsObservation { + if in == nil { + return nil + } + out := new(Rec601SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rec601SettingsParameters) DeepCopyInto(out *Rec601SettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rec601SettingsParameters. +func (in *Rec601SettingsParameters) DeepCopy() *Rec601SettingsParameters { + if in == nil { + return nil + } + out := new(Rec601SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rec709SettingsInitParameters) DeepCopyInto(out *Rec709SettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rec709SettingsInitParameters. +func (in *Rec709SettingsInitParameters) DeepCopy() *Rec709SettingsInitParameters { + if in == nil { + return nil + } + out := new(Rec709SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rec709SettingsObservation) DeepCopyInto(out *Rec709SettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rec709SettingsObservation. +func (in *Rec709SettingsObservation) DeepCopy() *Rec709SettingsObservation { + if in == nil { + return nil + } + out := new(Rec709SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rec709SettingsParameters) DeepCopyInto(out *Rec709SettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rec709SettingsParameters. +func (in *Rec709SettingsParameters) DeepCopy() *Rec709SettingsParameters { + if in == nil { + return nil + } + out := new(Rec709SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemixSettingsInitParameters) DeepCopyInto(out *RemixSettingsInitParameters) { + *out = *in + if in.ChannelMappings != nil { + in, out := &in.ChannelMappings, &out.ChannelMappings + *out = make([]ChannelMappingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ChannelsIn != nil { + in, out := &in.ChannelsIn, &out.ChannelsIn + *out = new(float64) + **out = **in + } + if in.ChannelsOut != nil { + in, out := &in.ChannelsOut, &out.ChannelsOut + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemixSettingsInitParameters. +func (in *RemixSettingsInitParameters) DeepCopy() *RemixSettingsInitParameters { + if in == nil { + return nil + } + out := new(RemixSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemixSettingsObservation) DeepCopyInto(out *RemixSettingsObservation) { + *out = *in + if in.ChannelMappings != nil { + in, out := &in.ChannelMappings, &out.ChannelMappings + *out = make([]ChannelMappingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ChannelsIn != nil { + in, out := &in.ChannelsIn, &out.ChannelsIn + *out = new(float64) + **out = **in + } + if in.ChannelsOut != nil { + in, out := &in.ChannelsOut, &out.ChannelsOut + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemixSettingsObservation. +func (in *RemixSettingsObservation) DeepCopy() *RemixSettingsObservation { + if in == nil { + return nil + } + out := new(RemixSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemixSettingsParameters) DeepCopyInto(out *RemixSettingsParameters) { + *out = *in + if in.ChannelMappings != nil { + in, out := &in.ChannelMappings, &out.ChannelMappings + *out = make([]ChannelMappingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ChannelsIn != nil { + in, out := &in.ChannelsIn, &out.ChannelsIn + *out = new(float64) + **out = **in + } + if in.ChannelsOut != nil { + in, out := &in.ChannelsOut, &out.ChannelsOut + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemixSettingsParameters. +func (in *RemixSettingsParameters) DeepCopy() *RemixSettingsParameters { + if in == nil { + return nil + } + out := new(RemixSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RtmpCaptionInfoDestinationSettingsInitParameters) DeepCopyInto(out *RtmpCaptionInfoDestinationSettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RtmpCaptionInfoDestinationSettingsInitParameters. +func (in *RtmpCaptionInfoDestinationSettingsInitParameters) DeepCopy() *RtmpCaptionInfoDestinationSettingsInitParameters { + if in == nil { + return nil + } + out := new(RtmpCaptionInfoDestinationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RtmpCaptionInfoDestinationSettingsObservation) DeepCopyInto(out *RtmpCaptionInfoDestinationSettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RtmpCaptionInfoDestinationSettingsObservation. +func (in *RtmpCaptionInfoDestinationSettingsObservation) DeepCopy() *RtmpCaptionInfoDestinationSettingsObservation { + if in == nil { + return nil + } + out := new(RtmpCaptionInfoDestinationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RtmpCaptionInfoDestinationSettingsParameters) DeepCopyInto(out *RtmpCaptionInfoDestinationSettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RtmpCaptionInfoDestinationSettingsParameters. +func (in *RtmpCaptionInfoDestinationSettingsParameters) DeepCopy() *RtmpCaptionInfoDestinationSettingsParameters { + if in == nil { + return nil + } + out := new(RtmpCaptionInfoDestinationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RtmpGroupSettingsInitParameters) DeepCopyInto(out *RtmpGroupSettingsInitParameters) { + *out = *in + if in.AdMarkers != nil { + in, out := &in.AdMarkers, &out.AdMarkers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AuthenticationScheme != nil { + in, out := &in.AuthenticationScheme, &out.AuthenticationScheme + *out = new(string) + **out = **in + } + if in.CacheFullBehavior != nil { + in, out := &in.CacheFullBehavior, &out.CacheFullBehavior + *out = new(string) + **out = **in + } + if in.CacheLength != nil { + in, out := &in.CacheLength, &out.CacheLength + *out = new(float64) + **out = **in + } + if in.CaptionData != nil { + in, out := &in.CaptionData, &out.CaptionData + *out = new(string) + **out = **in + } + if in.InputLossAction != nil { + in, out := &in.InputLossAction, &out.InputLossAction + *out = new(string) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RtmpGroupSettingsInitParameters. +func (in *RtmpGroupSettingsInitParameters) DeepCopy() *RtmpGroupSettingsInitParameters { + if in == nil { + return nil + } + out := new(RtmpGroupSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RtmpGroupSettingsObservation) DeepCopyInto(out *RtmpGroupSettingsObservation) { + *out = *in + if in.AdMarkers != nil { + in, out := &in.AdMarkers, &out.AdMarkers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AuthenticationScheme != nil { + in, out := &in.AuthenticationScheme, &out.AuthenticationScheme + *out = new(string) + **out = **in + } + if in.CacheFullBehavior != nil { + in, out := &in.CacheFullBehavior, &out.CacheFullBehavior + *out = new(string) + **out = **in + } + if in.CacheLength != nil { + in, out := &in.CacheLength, &out.CacheLength + *out = new(float64) + **out = **in + } + if in.CaptionData != nil { + in, out := &in.CaptionData, &out.CaptionData + *out = new(string) + **out = **in + } + if in.InputLossAction != nil { + in, out := &in.InputLossAction, &out.InputLossAction + *out = new(string) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RtmpGroupSettingsObservation. +func (in *RtmpGroupSettingsObservation) DeepCopy() *RtmpGroupSettingsObservation { + if in == nil { + return nil + } + out := new(RtmpGroupSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RtmpGroupSettingsParameters) DeepCopyInto(out *RtmpGroupSettingsParameters) { + *out = *in + if in.AdMarkers != nil { + in, out := &in.AdMarkers, &out.AdMarkers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AuthenticationScheme != nil { + in, out := &in.AuthenticationScheme, &out.AuthenticationScheme + *out = new(string) + **out = **in + } + if in.CacheFullBehavior != nil { + in, out := &in.CacheFullBehavior, &out.CacheFullBehavior + *out = new(string) + **out = **in + } + if in.CacheLength != nil { + in, out := &in.CacheLength, &out.CacheLength + *out = new(float64) + **out = **in + } + if in.CaptionData != nil { + in, out := &in.CaptionData, &out.CaptionData + *out = new(string) + **out = **in + } + if in.InputLossAction != nil { + in, out := &in.InputLossAction, &out.InputLossAction + *out = new(string) + **out = **in + } + if in.RestartDelay != nil { + in, out := &in.RestartDelay, &out.RestartDelay + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RtmpGroupSettingsParameters. +func (in *RtmpGroupSettingsParameters) DeepCopy() *RtmpGroupSettingsParameters { + if in == nil { + return nil + } + out := new(RtmpGroupSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RtmpOutputSettingsDestinationInitParameters) DeepCopyInto(out *RtmpOutputSettingsDestinationInitParameters) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RtmpOutputSettingsDestinationInitParameters. +func (in *RtmpOutputSettingsDestinationInitParameters) DeepCopy() *RtmpOutputSettingsDestinationInitParameters { + if in == nil { + return nil + } + out := new(RtmpOutputSettingsDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RtmpOutputSettingsDestinationObservation) DeepCopyInto(out *RtmpOutputSettingsDestinationObservation) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RtmpOutputSettingsDestinationObservation. +func (in *RtmpOutputSettingsDestinationObservation) DeepCopy() *RtmpOutputSettingsDestinationObservation { + if in == nil { + return nil + } + out := new(RtmpOutputSettingsDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RtmpOutputSettingsDestinationParameters) DeepCopyInto(out *RtmpOutputSettingsDestinationParameters) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RtmpOutputSettingsDestinationParameters. +func (in *RtmpOutputSettingsDestinationParameters) DeepCopy() *RtmpOutputSettingsDestinationParameters { + if in == nil { + return nil + } + out := new(RtmpOutputSettingsDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RtmpOutputSettingsInitParameters) DeepCopyInto(out *RtmpOutputSettingsInitParameters) { + *out = *in + if in.CertificateMode != nil { + in, out := &in.CertificateMode, &out.CertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(RtmpOutputSettingsDestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RtmpOutputSettingsInitParameters. +func (in *RtmpOutputSettingsInitParameters) DeepCopy() *RtmpOutputSettingsInitParameters { + if in == nil { + return nil + } + out := new(RtmpOutputSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RtmpOutputSettingsObservation) DeepCopyInto(out *RtmpOutputSettingsObservation) { + *out = *in + if in.CertificateMode != nil { + in, out := &in.CertificateMode, &out.CertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(RtmpOutputSettingsDestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RtmpOutputSettingsObservation. +func (in *RtmpOutputSettingsObservation) DeepCopy() *RtmpOutputSettingsObservation { + if in == nil { + return nil + } + out := new(RtmpOutputSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RtmpOutputSettingsParameters) DeepCopyInto(out *RtmpOutputSettingsParameters) { + *out = *in + if in.CertificateMode != nil { + in, out := &in.CertificateMode, &out.CertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionRetryInterval != nil { + in, out := &in.ConnectionRetryInterval, &out.ConnectionRetryInterval + *out = new(float64) + **out = **in + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(RtmpOutputSettingsDestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.NumRetries != nil { + in, out := &in.NumRetries, &out.NumRetries + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RtmpOutputSettingsParameters. +func (in *RtmpOutputSettingsParameters) DeepCopy() *RtmpOutputSettingsParameters { + if in == nil { + return nil + } + out := new(RtmpOutputSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scte20PlusEmbeddedDestinationSettingsInitParameters) DeepCopyInto(out *Scte20PlusEmbeddedDestinationSettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scte20PlusEmbeddedDestinationSettingsInitParameters. +func (in *Scte20PlusEmbeddedDestinationSettingsInitParameters) DeepCopy() *Scte20PlusEmbeddedDestinationSettingsInitParameters { + if in == nil { + return nil + } + out := new(Scte20PlusEmbeddedDestinationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scte20PlusEmbeddedDestinationSettingsObservation) DeepCopyInto(out *Scte20PlusEmbeddedDestinationSettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scte20PlusEmbeddedDestinationSettingsObservation. +func (in *Scte20PlusEmbeddedDestinationSettingsObservation) DeepCopy() *Scte20PlusEmbeddedDestinationSettingsObservation { + if in == nil { + return nil + } + out := new(Scte20PlusEmbeddedDestinationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scte20PlusEmbeddedDestinationSettingsParameters) DeepCopyInto(out *Scte20PlusEmbeddedDestinationSettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scte20PlusEmbeddedDestinationSettingsParameters. +func (in *Scte20PlusEmbeddedDestinationSettingsParameters) DeepCopy() *Scte20PlusEmbeddedDestinationSettingsParameters { + if in == nil { + return nil + } + out := new(Scte20PlusEmbeddedDestinationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scte20SourceSettingsInitParameters) DeepCopyInto(out *Scte20SourceSettingsInitParameters) { + *out = *in + if in.Convert608To708 != nil { + in, out := &in.Convert608To708, &out.Convert608To708 + *out = new(string) + **out = **in + } + if in.Source608ChannelNumber != nil { + in, out := &in.Source608ChannelNumber, &out.Source608ChannelNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scte20SourceSettingsInitParameters. +func (in *Scte20SourceSettingsInitParameters) DeepCopy() *Scte20SourceSettingsInitParameters { + if in == nil { + return nil + } + out := new(Scte20SourceSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scte20SourceSettingsObservation) DeepCopyInto(out *Scte20SourceSettingsObservation) { + *out = *in + if in.Convert608To708 != nil { + in, out := &in.Convert608To708, &out.Convert608To708 + *out = new(string) + **out = **in + } + if in.Source608ChannelNumber != nil { + in, out := &in.Source608ChannelNumber, &out.Source608ChannelNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scte20SourceSettingsObservation. +func (in *Scte20SourceSettingsObservation) DeepCopy() *Scte20SourceSettingsObservation { + if in == nil { + return nil + } + out := new(Scte20SourceSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scte20SourceSettingsParameters) DeepCopyInto(out *Scte20SourceSettingsParameters) { + *out = *in + if in.Convert608To708 != nil { + in, out := &in.Convert608To708, &out.Convert608To708 + *out = new(string) + **out = **in + } + if in.Source608ChannelNumber != nil { + in, out := &in.Source608ChannelNumber, &out.Source608ChannelNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scte20SourceSettingsParameters. +func (in *Scte20SourceSettingsParameters) DeepCopy() *Scte20SourceSettingsParameters { + if in == nil { + return nil + } + out := new(Scte20SourceSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scte27DestinationSettingsInitParameters) DeepCopyInto(out *Scte27DestinationSettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scte27DestinationSettingsInitParameters. +func (in *Scte27DestinationSettingsInitParameters) DeepCopy() *Scte27DestinationSettingsInitParameters { + if in == nil { + return nil + } + out := new(Scte27DestinationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scte27DestinationSettingsObservation) DeepCopyInto(out *Scte27DestinationSettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scte27DestinationSettingsObservation. +func (in *Scte27DestinationSettingsObservation) DeepCopy() *Scte27DestinationSettingsObservation { + if in == nil { + return nil + } + out := new(Scte27DestinationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scte27DestinationSettingsParameters) DeepCopyInto(out *Scte27DestinationSettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scte27DestinationSettingsParameters. +func (in *Scte27DestinationSettingsParameters) DeepCopy() *Scte27DestinationSettingsParameters { + if in == nil { + return nil + } + out := new(Scte27DestinationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scte27SourceSettingsInitParameters) DeepCopyInto(out *Scte27SourceSettingsInitParameters) { + *out = *in + if in.OcrLanguage != nil { + in, out := &in.OcrLanguage, &out.OcrLanguage + *out = new(string) + **out = **in + } + if in.Pid != nil { + in, out := &in.Pid, &out.Pid + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scte27SourceSettingsInitParameters. +func (in *Scte27SourceSettingsInitParameters) DeepCopy() *Scte27SourceSettingsInitParameters { + if in == nil { + return nil + } + out := new(Scte27SourceSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scte27SourceSettingsObservation) DeepCopyInto(out *Scte27SourceSettingsObservation) { + *out = *in + if in.OcrLanguage != nil { + in, out := &in.OcrLanguage, &out.OcrLanguage + *out = new(string) + **out = **in + } + if in.Pid != nil { + in, out := &in.Pid, &out.Pid + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scte27SourceSettingsObservation. +func (in *Scte27SourceSettingsObservation) DeepCopy() *Scte27SourceSettingsObservation { + if in == nil { + return nil + } + out := new(Scte27SourceSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scte27SourceSettingsParameters) DeepCopyInto(out *Scte27SourceSettingsParameters) { + *out = *in + if in.OcrLanguage != nil { + in, out := &in.OcrLanguage, &out.OcrLanguage + *out = new(string) + **out = **in + } + if in.Pid != nil { + in, out := &in.Pid, &out.Pid + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scte27SourceSettingsParameters. +func (in *Scte27SourceSettingsParameters) DeepCopy() *Scte27SourceSettingsParameters { + if in == nil { + return nil + } + out := new(Scte27SourceSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectorSettingsInitParameters) DeepCopyInto(out *SelectorSettingsInitParameters) { + *out = *in + if in.AudioHlsRenditionSelection != nil { + in, out := &in.AudioHlsRenditionSelection, &out.AudioHlsRenditionSelection + *out = new(AudioHlsRenditionSelectionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AudioLanguageSelection != nil { + in, out := &in.AudioLanguageSelection, &out.AudioLanguageSelection + *out = new(AudioLanguageSelectionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AudioPidSelection != nil { + in, out := &in.AudioPidSelection, &out.AudioPidSelection + *out = new(AudioPidSelectionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AudioTrackSelection != nil { + in, out := &in.AudioTrackSelection, &out.AudioTrackSelection + *out = new(AudioTrackSelectionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectorSettingsInitParameters. +func (in *SelectorSettingsInitParameters) DeepCopy() *SelectorSettingsInitParameters { + if in == nil { + return nil + } + out := new(SelectorSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectorSettingsObservation) DeepCopyInto(out *SelectorSettingsObservation) { + *out = *in + if in.AudioHlsRenditionSelection != nil { + in, out := &in.AudioHlsRenditionSelection, &out.AudioHlsRenditionSelection + *out = new(AudioHlsRenditionSelectionObservation) + (*in).DeepCopyInto(*out) + } + if in.AudioLanguageSelection != nil { + in, out := &in.AudioLanguageSelection, &out.AudioLanguageSelection + *out = new(AudioLanguageSelectionObservation) + (*in).DeepCopyInto(*out) + } + if in.AudioPidSelection != nil { + in, out := &in.AudioPidSelection, &out.AudioPidSelection + *out = new(AudioPidSelectionObservation) + (*in).DeepCopyInto(*out) + } + if in.AudioTrackSelection != nil { + in, out := &in.AudioTrackSelection, &out.AudioTrackSelection + *out = new(AudioTrackSelectionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectorSettingsObservation. +func (in *SelectorSettingsObservation) DeepCopy() *SelectorSettingsObservation { + if in == nil { + return nil + } + out := new(SelectorSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectorSettingsParameters) DeepCopyInto(out *SelectorSettingsParameters) { + *out = *in + if in.AudioHlsRenditionSelection != nil { + in, out := &in.AudioHlsRenditionSelection, &out.AudioHlsRenditionSelection + *out = new(AudioHlsRenditionSelectionParameters) + (*in).DeepCopyInto(*out) + } + if in.AudioLanguageSelection != nil { + in, out := &in.AudioLanguageSelection, &out.AudioLanguageSelection + *out = new(AudioLanguageSelectionParameters) + (*in).DeepCopyInto(*out) + } + if in.AudioPidSelection != nil { + in, out := &in.AudioPidSelection, &out.AudioPidSelection + *out = new(AudioPidSelectionParameters) + (*in).DeepCopyInto(*out) + } + if in.AudioTrackSelection != nil { + in, out := &in.AudioTrackSelection, &out.AudioTrackSelection + *out = new(AudioTrackSelectionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectorSettingsParameters. +func (in *SelectorSettingsParameters) DeepCopy() *SelectorSettingsParameters { + if in == nil { + return nil + } + out := new(SelectorSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsInitParameters) DeepCopyInto(out *SettingsInitParameters) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsInitParameters. +func (in *SettingsInitParameters) DeepCopy() *SettingsInitParameters { + if in == nil { + return nil + } + out := new(SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsObservation) DeepCopyInto(out *SettingsObservation) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsObservation. +func (in *SettingsObservation) DeepCopy() *SettingsObservation { + if in == nil { + return nil + } + out := new(SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsParameters) DeepCopyInto(out *SettingsParameters) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsParameters. +func (in *SettingsParameters) DeepCopy() *SettingsParameters { + if in == nil { + return nil + } + out := new(SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SmpteTtDestinationSettingsInitParameters) DeepCopyInto(out *SmpteTtDestinationSettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SmpteTtDestinationSettingsInitParameters. +func (in *SmpteTtDestinationSettingsInitParameters) DeepCopy() *SmpteTtDestinationSettingsInitParameters { + if in == nil { + return nil + } + out := new(SmpteTtDestinationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SmpteTtDestinationSettingsObservation) DeepCopyInto(out *SmpteTtDestinationSettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SmpteTtDestinationSettingsObservation. +func (in *SmpteTtDestinationSettingsObservation) DeepCopy() *SmpteTtDestinationSettingsObservation { + if in == nil { + return nil + } + out := new(SmpteTtDestinationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SmpteTtDestinationSettingsParameters) DeepCopyInto(out *SmpteTtDestinationSettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SmpteTtDestinationSettingsParameters. +func (in *SmpteTtDestinationSettingsParameters) DeepCopy() *SmpteTtDestinationSettingsParameters { + if in == nil { + return nil + } + out := new(SmpteTtDestinationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourcesInitParameters) DeepCopyInto(out *SourcesInitParameters) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourcesInitParameters. +func (in *SourcesInitParameters) DeepCopy() *SourcesInitParameters { + if in == nil { + return nil + } + out := new(SourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourcesObservation) DeepCopyInto(out *SourcesObservation) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourcesObservation. +func (in *SourcesObservation) DeepCopy() *SourcesObservation { + if in == nil { + return nil + } + out := new(SourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourcesParameters) DeepCopyInto(out *SourcesParameters) { + *out = *in + if in.PasswordParam != nil { + in, out := &in.PasswordParam, &out.PasswordParam + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourcesParameters. +func (in *SourcesParameters) DeepCopy() *SourcesParameters { + if in == nil { + return nil + } + out := new(SourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StandardHlsSettingsInitParameters) DeepCopyInto(out *StandardHlsSettingsInitParameters) { + *out = *in + if in.AudioRenditionSets != nil { + in, out := &in.AudioRenditionSets, &out.AudioRenditionSets + *out = new(string) + **out = **in + } + if in.M3U8Settings != nil { + in, out := &in.M3U8Settings, &out.M3U8Settings + *out = new(M3U8SettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandardHlsSettingsInitParameters. +func (in *StandardHlsSettingsInitParameters) DeepCopy() *StandardHlsSettingsInitParameters { + if in == nil { + return nil + } + out := new(StandardHlsSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StandardHlsSettingsObservation) DeepCopyInto(out *StandardHlsSettingsObservation) { + *out = *in + if in.AudioRenditionSets != nil { + in, out := &in.AudioRenditionSets, &out.AudioRenditionSets + *out = new(string) + **out = **in + } + if in.M3U8Settings != nil { + in, out := &in.M3U8Settings, &out.M3U8Settings + *out = new(M3U8SettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandardHlsSettingsObservation. +func (in *StandardHlsSettingsObservation) DeepCopy() *StandardHlsSettingsObservation { + if in == nil { + return nil + } + out := new(StandardHlsSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StandardHlsSettingsParameters) DeepCopyInto(out *StandardHlsSettingsParameters) { + *out = *in + if in.AudioRenditionSets != nil { + in, out := &in.AudioRenditionSets, &out.AudioRenditionSets + *out = new(string) + **out = **in + } + if in.M3U8Settings != nil { + in, out := &in.M3U8Settings, &out.M3U8Settings + *out = new(M3U8SettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandardHlsSettingsParameters. +func (in *StandardHlsSettingsParameters) DeepCopy() *StandardHlsSettingsParameters { + if in == nil { + return nil + } + out := new(StandardHlsSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticKeySettingsInitParameters) DeepCopyInto(out *StaticKeySettingsInitParameters) { + *out = *in + if in.KeyProviderServer != nil { + in, out := &in.KeyProviderServer, &out.KeyProviderServer + *out = new(KeyProviderServerInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StaticKeyValue != nil { + in, out := &in.StaticKeyValue, &out.StaticKeyValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticKeySettingsInitParameters. +func (in *StaticKeySettingsInitParameters) DeepCopy() *StaticKeySettingsInitParameters { + if in == nil { + return nil + } + out := new(StaticKeySettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticKeySettingsObservation) DeepCopyInto(out *StaticKeySettingsObservation) { + *out = *in + if in.KeyProviderServer != nil { + in, out := &in.KeyProviderServer, &out.KeyProviderServer + *out = new(KeyProviderServerObservation) + (*in).DeepCopyInto(*out) + } + if in.StaticKeyValue != nil { + in, out := &in.StaticKeyValue, &out.StaticKeyValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticKeySettingsObservation. +func (in *StaticKeySettingsObservation) DeepCopy() *StaticKeySettingsObservation { + if in == nil { + return nil + } + out := new(StaticKeySettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticKeySettingsParameters) DeepCopyInto(out *StaticKeySettingsParameters) { + *out = *in + if in.KeyProviderServer != nil { + in, out := &in.KeyProviderServer, &out.KeyProviderServer + *out = new(KeyProviderServerParameters) + (*in).DeepCopyInto(*out) + } + if in.StaticKeyValue != nil { + in, out := &in.StaticKeyValue, &out.StaticKeyValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticKeySettingsParameters. +func (in *StaticKeySettingsParameters) DeepCopy() *StaticKeySettingsParameters { + if in == nil { + return nil + } + out := new(StaticKeySettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TeletextDestinationSettingsInitParameters) DeepCopyInto(out *TeletextDestinationSettingsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeletextDestinationSettingsInitParameters. +func (in *TeletextDestinationSettingsInitParameters) DeepCopy() *TeletextDestinationSettingsInitParameters { + if in == nil { + return nil + } + out := new(TeletextDestinationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TeletextDestinationSettingsObservation) DeepCopyInto(out *TeletextDestinationSettingsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeletextDestinationSettingsObservation. +func (in *TeletextDestinationSettingsObservation) DeepCopy() *TeletextDestinationSettingsObservation { + if in == nil { + return nil + } + out := new(TeletextDestinationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TeletextDestinationSettingsParameters) DeepCopyInto(out *TeletextDestinationSettingsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeletextDestinationSettingsParameters. +func (in *TeletextDestinationSettingsParameters) DeepCopy() *TeletextDestinationSettingsParameters { + if in == nil { + return nil + } + out := new(TeletextDestinationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TeletextSourceSettingsInitParameters) DeepCopyInto(out *TeletextSourceSettingsInitParameters) { + *out = *in + if in.OutputRectangle != nil { + in, out := &in.OutputRectangle, &out.OutputRectangle + *out = new(OutputRectangleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PageNumber != nil { + in, out := &in.PageNumber, &out.PageNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeletextSourceSettingsInitParameters. +func (in *TeletextSourceSettingsInitParameters) DeepCopy() *TeletextSourceSettingsInitParameters { + if in == nil { + return nil + } + out := new(TeletextSourceSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TeletextSourceSettingsObservation) DeepCopyInto(out *TeletextSourceSettingsObservation) { + *out = *in + if in.OutputRectangle != nil { + in, out := &in.OutputRectangle, &out.OutputRectangle + *out = new(OutputRectangleObservation) + (*in).DeepCopyInto(*out) + } + if in.PageNumber != nil { + in, out := &in.PageNumber, &out.PageNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeletextSourceSettingsObservation. +func (in *TeletextSourceSettingsObservation) DeepCopy() *TeletextSourceSettingsObservation { + if in == nil { + return nil + } + out := new(TeletextSourceSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TeletextSourceSettingsParameters) DeepCopyInto(out *TeletextSourceSettingsParameters) { + *out = *in + if in.OutputRectangle != nil { + in, out := &in.OutputRectangle, &out.OutputRectangle + *out = new(OutputRectangleParameters) + (*in).DeepCopyInto(*out) + } + if in.PageNumber != nil { + in, out := &in.PageNumber, &out.PageNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeletextSourceSettingsParameters. +func (in *TeletextSourceSettingsParameters) DeepCopy() *TeletextSourceSettingsParameters { + if in == nil { + return nil + } + out := new(TeletextSourceSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemporalFilterSettingsInitParameters) DeepCopyInto(out *TemporalFilterSettingsInitParameters) { + *out = *in + if in.PostFilterSharpening != nil { + in, out := &in.PostFilterSharpening, &out.PostFilterSharpening + *out = new(string) + **out = **in + } + if in.Strength != nil { + in, out := &in.Strength, &out.Strength + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemporalFilterSettingsInitParameters. +func (in *TemporalFilterSettingsInitParameters) DeepCopy() *TemporalFilterSettingsInitParameters { + if in == nil { + return nil + } + out := new(TemporalFilterSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemporalFilterSettingsObservation) DeepCopyInto(out *TemporalFilterSettingsObservation) { + *out = *in + if in.PostFilterSharpening != nil { + in, out := &in.PostFilterSharpening, &out.PostFilterSharpening + *out = new(string) + **out = **in + } + if in.Strength != nil { + in, out := &in.Strength, &out.Strength + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemporalFilterSettingsObservation. +func (in *TemporalFilterSettingsObservation) DeepCopy() *TemporalFilterSettingsObservation { + if in == nil { + return nil + } + out := new(TemporalFilterSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemporalFilterSettingsParameters) DeepCopyInto(out *TemporalFilterSettingsParameters) { + *out = *in + if in.PostFilterSharpening != nil { + in, out := &in.PostFilterSharpening, &out.PostFilterSharpening + *out = new(string) + **out = **in + } + if in.Strength != nil { + in, out := &in.Strength, &out.Strength + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemporalFilterSettingsParameters. +func (in *TemporalFilterSettingsParameters) DeepCopy() *TemporalFilterSettingsParameters { + if in == nil { + return nil + } + out := new(TemporalFilterSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimecodeBurninSettingsInitParameters) DeepCopyInto(out *TimecodeBurninSettingsInitParameters) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.TimecodeBurninFontSize != nil { + in, out := &in.TimecodeBurninFontSize, &out.TimecodeBurninFontSize + *out = new(string) + **out = **in + } + if in.TimecodeBurninPosition != nil { + in, out := &in.TimecodeBurninPosition, &out.TimecodeBurninPosition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimecodeBurninSettingsInitParameters. +func (in *TimecodeBurninSettingsInitParameters) DeepCopy() *TimecodeBurninSettingsInitParameters { + if in == nil { + return nil + } + out := new(TimecodeBurninSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimecodeBurninSettingsObservation) DeepCopyInto(out *TimecodeBurninSettingsObservation) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.TimecodeBurninFontSize != nil { + in, out := &in.TimecodeBurninFontSize, &out.TimecodeBurninFontSize + *out = new(string) + **out = **in + } + if in.TimecodeBurninPosition != nil { + in, out := &in.TimecodeBurninPosition, &out.TimecodeBurninPosition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimecodeBurninSettingsObservation. +func (in *TimecodeBurninSettingsObservation) DeepCopy() *TimecodeBurninSettingsObservation { + if in == nil { + return nil + } + out := new(TimecodeBurninSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimecodeBurninSettingsParameters) DeepCopyInto(out *TimecodeBurninSettingsParameters) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.TimecodeBurninFontSize != nil { + in, out := &in.TimecodeBurninFontSize, &out.TimecodeBurninFontSize + *out = new(string) + **out = **in + } + if in.TimecodeBurninPosition != nil { + in, out := &in.TimecodeBurninPosition, &out.TimecodeBurninPosition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimecodeBurninSettingsParameters. +func (in *TimecodeBurninSettingsParameters) DeepCopy() *TimecodeBurninSettingsParameters { + if in == nil { + return nil + } + out := new(TimecodeBurninSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimecodeConfigInitParameters) DeepCopyInto(out *TimecodeConfigInitParameters) { + *out = *in + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.SyncThreshold != nil { + in, out := &in.SyncThreshold, &out.SyncThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimecodeConfigInitParameters. +func (in *TimecodeConfigInitParameters) DeepCopy() *TimecodeConfigInitParameters { + if in == nil { + return nil + } + out := new(TimecodeConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimecodeConfigObservation) DeepCopyInto(out *TimecodeConfigObservation) { + *out = *in + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.SyncThreshold != nil { + in, out := &in.SyncThreshold, &out.SyncThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimecodeConfigObservation. +func (in *TimecodeConfigObservation) DeepCopy() *TimecodeConfigObservation { + if in == nil { + return nil + } + out := new(TimecodeConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimecodeConfigParameters) DeepCopyInto(out *TimecodeConfigParameters) { + *out = *in + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.SyncThreshold != nil { + in, out := &in.SyncThreshold, &out.SyncThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimecodeConfigParameters. +func (in *TimecodeConfigParameters) DeepCopy() *TimecodeConfigParameters { + if in == nil { + return nil + } + out := new(TimecodeConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracksInitParameters) DeepCopyInto(out *TracksInitParameters) { + *out = *in + if in.Track != nil { + in, out := &in.Track, &out.Track + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracksInitParameters. +func (in *TracksInitParameters) DeepCopy() *TracksInitParameters { + if in == nil { + return nil + } + out := new(TracksInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracksObservation) DeepCopyInto(out *TracksObservation) { + *out = *in + if in.Track != nil { + in, out := &in.Track, &out.Track + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracksObservation. +func (in *TracksObservation) DeepCopy() *TracksObservation { + if in == nil { + return nil + } + out := new(TracksObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracksParameters) DeepCopyInto(out *TracksParameters) { + *out = *in + if in.Track != nil { + in, out := &in.Track, &out.Track + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracksParameters. +func (in *TracksParameters) DeepCopy() *TracksParameters { + if in == nil { + return nil + } + out := new(TracksParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TtmlDestinationSettingsInitParameters) DeepCopyInto(out *TtmlDestinationSettingsInitParameters) { + *out = *in + if in.StyleControl != nil { + in, out := &in.StyleControl, &out.StyleControl + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TtmlDestinationSettingsInitParameters. +func (in *TtmlDestinationSettingsInitParameters) DeepCopy() *TtmlDestinationSettingsInitParameters { + if in == nil { + return nil + } + out := new(TtmlDestinationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TtmlDestinationSettingsObservation) DeepCopyInto(out *TtmlDestinationSettingsObservation) { + *out = *in + if in.StyleControl != nil { + in, out := &in.StyleControl, &out.StyleControl + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TtmlDestinationSettingsObservation. +func (in *TtmlDestinationSettingsObservation) DeepCopy() *TtmlDestinationSettingsObservation { + if in == nil { + return nil + } + out := new(TtmlDestinationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TtmlDestinationSettingsParameters) DeepCopyInto(out *TtmlDestinationSettingsParameters) { + *out = *in + if in.StyleControl != nil { + in, out := &in.StyleControl, &out.StyleControl + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TtmlDestinationSettingsParameters. +func (in *TtmlDestinationSettingsParameters) DeepCopy() *TtmlDestinationSettingsParameters { + if in == nil { + return nil + } + out := new(TtmlDestinationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDPGroupSettingsInitParameters) DeepCopyInto(out *UDPGroupSettingsInitParameters) { + *out = *in + if in.InputLossAction != nil { + in, out := &in.InputLossAction, &out.InputLossAction + *out = new(string) + **out = **in + } + if in.TimedMetadataId3Frame != nil { + in, out := &in.TimedMetadataId3Frame, &out.TimedMetadataId3Frame + *out = new(string) + **out = **in + } + if in.TimedMetadataId3Period != nil { + in, out := &in.TimedMetadataId3Period, &out.TimedMetadataId3Period + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDPGroupSettingsInitParameters. +func (in *UDPGroupSettingsInitParameters) DeepCopy() *UDPGroupSettingsInitParameters { + if in == nil { + return nil + } + out := new(UDPGroupSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDPGroupSettingsObservation) DeepCopyInto(out *UDPGroupSettingsObservation) { + *out = *in + if in.InputLossAction != nil { + in, out := &in.InputLossAction, &out.InputLossAction + *out = new(string) + **out = **in + } + if in.TimedMetadataId3Frame != nil { + in, out := &in.TimedMetadataId3Frame, &out.TimedMetadataId3Frame + *out = new(string) + **out = **in + } + if in.TimedMetadataId3Period != nil { + in, out := &in.TimedMetadataId3Period, &out.TimedMetadataId3Period + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDPGroupSettingsObservation. +func (in *UDPGroupSettingsObservation) DeepCopy() *UDPGroupSettingsObservation { + if in == nil { + return nil + } + out := new(UDPGroupSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDPGroupSettingsParameters) DeepCopyInto(out *UDPGroupSettingsParameters) { + *out = *in + if in.InputLossAction != nil { + in, out := &in.InputLossAction, &out.InputLossAction + *out = new(string) + **out = **in + } + if in.TimedMetadataId3Frame != nil { + in, out := &in.TimedMetadataId3Frame, &out.TimedMetadataId3Frame + *out = new(string) + **out = **in + } + if in.TimedMetadataId3Period != nil { + in, out := &in.TimedMetadataId3Period, &out.TimedMetadataId3Period + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDPGroupSettingsParameters. +func (in *UDPGroupSettingsParameters) DeepCopy() *UDPGroupSettingsParameters { + if in == nil { + return nil + } + out := new(UDPGroupSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDPOutputSettingsContainerSettingsInitParameters) DeepCopyInto(out *UDPOutputSettingsContainerSettingsInitParameters) { + *out = *in + if in.M2TsSettings != nil { + in, out := &in.M2TsSettings, &out.M2TsSettings + *out = new(ContainerSettingsM2TsSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDPOutputSettingsContainerSettingsInitParameters. +func (in *UDPOutputSettingsContainerSettingsInitParameters) DeepCopy() *UDPOutputSettingsContainerSettingsInitParameters { + if in == nil { + return nil + } + out := new(UDPOutputSettingsContainerSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDPOutputSettingsContainerSettingsObservation) DeepCopyInto(out *UDPOutputSettingsContainerSettingsObservation) { + *out = *in + if in.M2TsSettings != nil { + in, out := &in.M2TsSettings, &out.M2TsSettings + *out = new(ContainerSettingsM2TsSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDPOutputSettingsContainerSettingsObservation. +func (in *UDPOutputSettingsContainerSettingsObservation) DeepCopy() *UDPOutputSettingsContainerSettingsObservation { + if in == nil { + return nil + } + out := new(UDPOutputSettingsContainerSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDPOutputSettingsContainerSettingsParameters) DeepCopyInto(out *UDPOutputSettingsContainerSettingsParameters) { + *out = *in + if in.M2TsSettings != nil { + in, out := &in.M2TsSettings, &out.M2TsSettings + *out = new(ContainerSettingsM2TsSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDPOutputSettingsContainerSettingsParameters. +func (in *UDPOutputSettingsContainerSettingsParameters) DeepCopy() *UDPOutputSettingsContainerSettingsParameters { + if in == nil { + return nil + } + out := new(UDPOutputSettingsContainerSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDPOutputSettingsDestinationInitParameters) DeepCopyInto(out *UDPOutputSettingsDestinationInitParameters) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDPOutputSettingsDestinationInitParameters. +func (in *UDPOutputSettingsDestinationInitParameters) DeepCopy() *UDPOutputSettingsDestinationInitParameters { + if in == nil { + return nil + } + out := new(UDPOutputSettingsDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDPOutputSettingsDestinationObservation) DeepCopyInto(out *UDPOutputSettingsDestinationObservation) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDPOutputSettingsDestinationObservation. +func (in *UDPOutputSettingsDestinationObservation) DeepCopy() *UDPOutputSettingsDestinationObservation { + if in == nil { + return nil + } + out := new(UDPOutputSettingsDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDPOutputSettingsDestinationParameters) DeepCopyInto(out *UDPOutputSettingsDestinationParameters) { + *out = *in + if in.DestinationRefID != nil { + in, out := &in.DestinationRefID, &out.DestinationRefID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDPOutputSettingsDestinationParameters. +func (in *UDPOutputSettingsDestinationParameters) DeepCopy() *UDPOutputSettingsDestinationParameters { + if in == nil { + return nil + } + out := new(UDPOutputSettingsDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDPOutputSettingsInitParameters) DeepCopyInto(out *UDPOutputSettingsInitParameters) { + *out = *in + if in.BufferMsec != nil { + in, out := &in.BufferMsec, &out.BufferMsec + *out = new(float64) + **out = **in + } + if in.ContainerSettings != nil { + in, out := &in.ContainerSettings, &out.ContainerSettings + *out = new(UDPOutputSettingsContainerSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(UDPOutputSettingsDestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FecOutputSettings != nil { + in, out := &in.FecOutputSettings, &out.FecOutputSettings + *out = new(FecOutputSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDPOutputSettingsInitParameters. +func (in *UDPOutputSettingsInitParameters) DeepCopy() *UDPOutputSettingsInitParameters { + if in == nil { + return nil + } + out := new(UDPOutputSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDPOutputSettingsObservation) DeepCopyInto(out *UDPOutputSettingsObservation) { + *out = *in + if in.BufferMsec != nil { + in, out := &in.BufferMsec, &out.BufferMsec + *out = new(float64) + **out = **in + } + if in.ContainerSettings != nil { + in, out := &in.ContainerSettings, &out.ContainerSettings + *out = new(UDPOutputSettingsContainerSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(UDPOutputSettingsDestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.FecOutputSettings != nil { + in, out := &in.FecOutputSettings, &out.FecOutputSettings + *out = new(FecOutputSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDPOutputSettingsObservation. +func (in *UDPOutputSettingsObservation) DeepCopy() *UDPOutputSettingsObservation { + if in == nil { + return nil + } + out := new(UDPOutputSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDPOutputSettingsParameters) DeepCopyInto(out *UDPOutputSettingsParameters) { + *out = *in + if in.BufferMsec != nil { + in, out := &in.BufferMsec, &out.BufferMsec + *out = new(float64) + **out = **in + } + if in.ContainerSettings != nil { + in, out := &in.ContainerSettings, &out.ContainerSettings + *out = new(UDPOutputSettingsContainerSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(UDPOutputSettingsDestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.FecOutputSettings != nil { + in, out := &in.FecOutputSettings, &out.FecOutputSettings + *out = new(FecOutputSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDPOutputSettingsParameters. +func (in *UDPOutputSettingsParameters) DeepCopy() *UDPOutputSettingsParameters { + if in == nil { + return nil + } + out := new(UDPOutputSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCInitParameters) DeepCopyInto(out *VPCInitParameters) { + *out = *in + if in.PublicAddressAllocationIds != nil { + in, out := &in.PublicAddressAllocationIds, &out.PublicAddressAllocationIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCInitParameters. +func (in *VPCInitParameters) DeepCopy() *VPCInitParameters { + if in == nil { + return nil + } + out := new(VPCInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCObservation) DeepCopyInto(out *VPCObservation) { + *out = *in + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NetworkInterfaceIds != nil { + in, out := &in.NetworkInterfaceIds, &out.NetworkInterfaceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PublicAddressAllocationIds != nil { + in, out := &in.PublicAddressAllocationIds, &out.PublicAddressAllocationIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCObservation. +func (in *VPCObservation) DeepCopy() *VPCObservation { + if in == nil { + return nil + } + out := new(VPCObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCParameters) DeepCopyInto(out *VPCParameters) { + *out = *in + if in.PublicAddressAllocationIds != nil { + in, out := &in.PublicAddressAllocationIds, &out.PublicAddressAllocationIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCParameters. +func (in *VPCParameters) DeepCopy() *VPCParameters { + if in == nil { + return nil + } + out := new(VPCParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoBlackSettingsInitParameters) DeepCopyInto(out *VideoBlackSettingsInitParameters) { + *out = *in + if in.BlackDetectThreshold != nil { + in, out := &in.BlackDetectThreshold, &out.BlackDetectThreshold + *out = new(float64) + **out = **in + } + if in.VideoBlackThresholdMsec != nil { + in, out := &in.VideoBlackThresholdMsec, &out.VideoBlackThresholdMsec + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoBlackSettingsInitParameters. +func (in *VideoBlackSettingsInitParameters) DeepCopy() *VideoBlackSettingsInitParameters { + if in == nil { + return nil + } + out := new(VideoBlackSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoBlackSettingsObservation) DeepCopyInto(out *VideoBlackSettingsObservation) { + *out = *in + if in.BlackDetectThreshold != nil { + in, out := &in.BlackDetectThreshold, &out.BlackDetectThreshold + *out = new(float64) + **out = **in + } + if in.VideoBlackThresholdMsec != nil { + in, out := &in.VideoBlackThresholdMsec, &out.VideoBlackThresholdMsec + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoBlackSettingsObservation. +func (in *VideoBlackSettingsObservation) DeepCopy() *VideoBlackSettingsObservation { + if in == nil { + return nil + } + out := new(VideoBlackSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoBlackSettingsParameters) DeepCopyInto(out *VideoBlackSettingsParameters) { + *out = *in + if in.BlackDetectThreshold != nil { + in, out := &in.BlackDetectThreshold, &out.BlackDetectThreshold + *out = new(float64) + **out = **in + } + if in.VideoBlackThresholdMsec != nil { + in, out := &in.VideoBlackThresholdMsec, &out.VideoBlackThresholdMsec + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoBlackSettingsParameters. +func (in *VideoBlackSettingsParameters) DeepCopy() *VideoBlackSettingsParameters { + if in == nil { + return nil + } + out := new(VideoBlackSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoDescriptionsCodecSettingsInitParameters) DeepCopyInto(out *VideoDescriptionsCodecSettingsInitParameters) { + *out = *in + if in.FrameCaptureSettings != nil { + in, out := &in.FrameCaptureSettings, &out.FrameCaptureSettings + *out = new(FrameCaptureSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.H264Settings != nil { + in, out := &in.H264Settings, &out.H264Settings + *out = new(H264SettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.H265Settings != nil { + in, out := &in.H265Settings, &out.H265Settings + *out = new(H265SettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoDescriptionsCodecSettingsInitParameters. +func (in *VideoDescriptionsCodecSettingsInitParameters) DeepCopy() *VideoDescriptionsCodecSettingsInitParameters { + if in == nil { + return nil + } + out := new(VideoDescriptionsCodecSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoDescriptionsCodecSettingsObservation) DeepCopyInto(out *VideoDescriptionsCodecSettingsObservation) { + *out = *in + if in.FrameCaptureSettings != nil { + in, out := &in.FrameCaptureSettings, &out.FrameCaptureSettings + *out = new(FrameCaptureSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.H264Settings != nil { + in, out := &in.H264Settings, &out.H264Settings + *out = new(H264SettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.H265Settings != nil { + in, out := &in.H265Settings, &out.H265Settings + *out = new(H265SettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoDescriptionsCodecSettingsObservation. +func (in *VideoDescriptionsCodecSettingsObservation) DeepCopy() *VideoDescriptionsCodecSettingsObservation { + if in == nil { + return nil + } + out := new(VideoDescriptionsCodecSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoDescriptionsCodecSettingsParameters) DeepCopyInto(out *VideoDescriptionsCodecSettingsParameters) { + *out = *in + if in.FrameCaptureSettings != nil { + in, out := &in.FrameCaptureSettings, &out.FrameCaptureSettings + *out = new(FrameCaptureSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.H264Settings != nil { + in, out := &in.H264Settings, &out.H264Settings + *out = new(H264SettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.H265Settings != nil { + in, out := &in.H265Settings, &out.H265Settings + *out = new(H265SettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoDescriptionsCodecSettingsParameters. +func (in *VideoDescriptionsCodecSettingsParameters) DeepCopy() *VideoDescriptionsCodecSettingsParameters { + if in == nil { + return nil + } + out := new(VideoDescriptionsCodecSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoDescriptionsInitParameters) DeepCopyInto(out *VideoDescriptionsInitParameters) { + *out = *in + if in.CodecSettings != nil { + in, out := &in.CodecSettings, &out.CodecSettings + *out = new(VideoDescriptionsCodecSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RespondToAfd != nil { + in, out := &in.RespondToAfd, &out.RespondToAfd + *out = new(string) + **out = **in + } + if in.ScalingBehavior != nil { + in, out := &in.ScalingBehavior, &out.ScalingBehavior + *out = new(string) + **out = **in + } + if in.Sharpness != nil { + in, out := &in.Sharpness, &out.Sharpness + *out = new(float64) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoDescriptionsInitParameters. +func (in *VideoDescriptionsInitParameters) DeepCopy() *VideoDescriptionsInitParameters { + if in == nil { + return nil + } + out := new(VideoDescriptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoDescriptionsObservation) DeepCopyInto(out *VideoDescriptionsObservation) { + *out = *in + if in.CodecSettings != nil { + in, out := &in.CodecSettings, &out.CodecSettings + *out = new(VideoDescriptionsCodecSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RespondToAfd != nil { + in, out := &in.RespondToAfd, &out.RespondToAfd + *out = new(string) + **out = **in + } + if in.ScalingBehavior != nil { + in, out := &in.ScalingBehavior, &out.ScalingBehavior + *out = new(string) + **out = **in + } + if in.Sharpness != nil { + in, out := &in.Sharpness, &out.Sharpness + *out = new(float64) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoDescriptionsObservation. +func (in *VideoDescriptionsObservation) DeepCopy() *VideoDescriptionsObservation { + if in == nil { + return nil + } + out := new(VideoDescriptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoDescriptionsParameters) DeepCopyInto(out *VideoDescriptionsParameters) { + *out = *in + if in.CodecSettings != nil { + in, out := &in.CodecSettings, &out.CodecSettings + *out = new(VideoDescriptionsCodecSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RespondToAfd != nil { + in, out := &in.RespondToAfd, &out.RespondToAfd + *out = new(string) + **out = **in + } + if in.ScalingBehavior != nil { + in, out := &in.ScalingBehavior, &out.ScalingBehavior + *out = new(string) + **out = **in + } + if in.Sharpness != nil { + in, out := &in.Sharpness, &out.Sharpness + *out = new(float64) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoDescriptionsParameters. +func (in *VideoDescriptionsParameters) DeepCopy() *VideoDescriptionsParameters { + if in == nil { + return nil + } + out := new(VideoDescriptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoSelectorInitParameters) DeepCopyInto(out *VideoSelectorInitParameters) { + *out = *in + if in.ColorSpace != nil { + in, out := &in.ColorSpace, &out.ColorSpace + *out = new(string) + **out = **in + } + if in.ColorSpaceUsage != nil { + in, out := &in.ColorSpaceUsage, &out.ColorSpaceUsage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoSelectorInitParameters. +func (in *VideoSelectorInitParameters) DeepCopy() *VideoSelectorInitParameters { + if in == nil { + return nil + } + out := new(VideoSelectorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoSelectorObservation) DeepCopyInto(out *VideoSelectorObservation) { + *out = *in + if in.ColorSpace != nil { + in, out := &in.ColorSpace, &out.ColorSpace + *out = new(string) + **out = **in + } + if in.ColorSpaceUsage != nil { + in, out := &in.ColorSpaceUsage, &out.ColorSpaceUsage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoSelectorObservation. +func (in *VideoSelectorObservation) DeepCopy() *VideoSelectorObservation { + if in == nil { + return nil + } + out := new(VideoSelectorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoSelectorParameters) DeepCopyInto(out *VideoSelectorParameters) { + *out = *in + if in.ColorSpace != nil { + in, out := &in.ColorSpace, &out.ColorSpace + *out = new(string) + **out = **in + } + if in.ColorSpaceUsage != nil { + in, out := &in.ColorSpaceUsage, &out.ColorSpaceUsage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoSelectorParameters. +func (in *VideoSelectorParameters) DeepCopy() *VideoSelectorParameters { + if in == nil { + return nil + } + out := new(VideoSelectorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WavSettingsInitParameters) DeepCopyInto(out *WavSettingsInitParameters) { + *out = *in + if in.BitDepth != nil { + in, out := &in.BitDepth, &out.BitDepth + *out = new(float64) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.SampleRate != nil { + in, out := &in.SampleRate, &out.SampleRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WavSettingsInitParameters. +func (in *WavSettingsInitParameters) DeepCopy() *WavSettingsInitParameters { + if in == nil { + return nil + } + out := new(WavSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WavSettingsObservation) DeepCopyInto(out *WavSettingsObservation) { + *out = *in + if in.BitDepth != nil { + in, out := &in.BitDepth, &out.BitDepth + *out = new(float64) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.SampleRate != nil { + in, out := &in.SampleRate, &out.SampleRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WavSettingsObservation. +func (in *WavSettingsObservation) DeepCopy() *WavSettingsObservation { + if in == nil { + return nil + } + out := new(WavSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WavSettingsParameters) DeepCopyInto(out *WavSettingsParameters) { + *out = *in + if in.BitDepth != nil { + in, out := &in.BitDepth, &out.BitDepth + *out = new(float64) + **out = **in + } + if in.CodingMode != nil { + in, out := &in.CodingMode, &out.CodingMode + *out = new(string) + **out = **in + } + if in.SampleRate != nil { + in, out := &in.SampleRate, &out.SampleRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WavSettingsParameters. +func (in *WavSettingsParameters) DeepCopy() *WavSettingsParameters { + if in == nil { + return nil + } + out := new(WavSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebvttDestinationSettingsInitParameters) DeepCopyInto(out *WebvttDestinationSettingsInitParameters) { + *out = *in + if in.StyleControl != nil { + in, out := &in.StyleControl, &out.StyleControl + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebvttDestinationSettingsInitParameters. +func (in *WebvttDestinationSettingsInitParameters) DeepCopy() *WebvttDestinationSettingsInitParameters { + if in == nil { + return nil + } + out := new(WebvttDestinationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebvttDestinationSettingsObservation) DeepCopyInto(out *WebvttDestinationSettingsObservation) { + *out = *in + if in.StyleControl != nil { + in, out := &in.StyleControl, &out.StyleControl + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebvttDestinationSettingsObservation. +func (in *WebvttDestinationSettingsObservation) DeepCopy() *WebvttDestinationSettingsObservation { + if in == nil { + return nil + } + out := new(WebvttDestinationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebvttDestinationSettingsParameters) DeepCopyInto(out *WebvttDestinationSettingsParameters) { + *out = *in + if in.StyleControl != nil { + in, out := &in.StyleControl, &out.StyleControl + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebvttDestinationSettingsParameters. +func (in *WebvttDestinationSettingsParameters) DeepCopy() *WebvttDestinationSettingsParameters { + if in == nil { + return nil + } + out := new(WebvttDestinationSettingsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/medialive/v1beta2/zz_generated.managed.go b/apis/medialive/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..1159f7687a --- /dev/null +++ b/apis/medialive/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Channel. +func (mg *Channel) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Channel. +func (mg *Channel) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Channel. +func (mg *Channel) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Channel. +func (mg *Channel) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Channel. +func (mg *Channel) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Channel. +func (mg *Channel) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Channel. +func (mg *Channel) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Channel. +func (mg *Channel) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Channel. +func (mg *Channel) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Channel. +func (mg *Channel) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Channel. +func (mg *Channel) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Channel. +func (mg *Channel) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Input. +func (mg *Input) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Input. +func (mg *Input) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Input. +func (mg *Input) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Input. +func (mg *Input) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Input. +func (mg *Input) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Input. +func (mg *Input) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Input. +func (mg *Input) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Input. +func (mg *Input) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Input. +func (mg *Input) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Input. +func (mg *Input) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Input. +func (mg *Input) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Input. +func (mg *Input) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Multiplex. +func (mg *Multiplex) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Multiplex. +func (mg *Multiplex) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Multiplex. +func (mg *Multiplex) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Multiplex. +func (mg *Multiplex) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Multiplex. +func (mg *Multiplex) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Multiplex. +func (mg *Multiplex) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Multiplex. +func (mg *Multiplex) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Multiplex. +func (mg *Multiplex) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Multiplex. +func (mg *Multiplex) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Multiplex. +func (mg *Multiplex) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Multiplex. +func (mg *Multiplex) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Multiplex. +func (mg *Multiplex) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/medialive/v1beta2/zz_generated.managedlist.go b/apis/medialive/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..07b8f278ca --- /dev/null +++ b/apis/medialive/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ChannelList. +func (l *ChannelList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this InputList. +func (l *InputList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MultiplexList. +func (l *MultiplexList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/medialive/v1beta2/zz_generated.resolvers.go b/apis/medialive/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..2de1c76ddb --- /dev/null +++ b/apis/medialive/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,162 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Channel. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Channel) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.InputAttachments); i3++ { + { + m, l, err = apisresolver.GetManagedResource("medialive.aws.upbound.io", "v1beta2", "Input", "InputList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InputAttachments[i3].InputID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.InputAttachments[i3].InputIDRef, + Selector: mg.Spec.ForProvider.InputAttachments[i3].InputIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InputAttachments[i3].InputID") + } + mg.Spec.ForProvider.InputAttachments[i3].InputID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InputAttachments[i3].InputIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.InputAttachments); i3++ { + { + m, l, err = apisresolver.GetManagedResource("medialive.aws.upbound.io", "v1beta2", "Input", "InputList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.InputAttachments[i3].InputID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.InputAttachments[i3].InputIDRef, + Selector: mg.Spec.InitProvider.InputAttachments[i3].InputIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InputAttachments[i3].InputID") + } + mg.Spec.InitProvider.InputAttachments[i3].InputID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.InputAttachments[i3].InputIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Input. +func (mg *Input) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/medialive/v1beta2/zz_groupversion_info.go b/apis/medialive/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..6d62c8ff94 --- /dev/null +++ b/apis/medialive/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=medialive.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "medialive.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/medialive/v1beta2/zz_input_terraformed.go b/apis/medialive/v1beta2/zz_input_terraformed.go new file mode 100755 index 0000000000..90082fba4a --- /dev/null +++ b/apis/medialive/v1beta2/zz_input_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Input +func (mg *Input) GetTerraformResourceType() string { + return "aws_medialive_input" +} + +// GetConnectionDetailsMapping for this Input +func (tr *Input) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Input +func (tr *Input) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Input +func (tr *Input) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Input +func (tr *Input) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Input +func (tr *Input) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Input +func (tr *Input) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Input +func (tr *Input) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Input +func (tr *Input) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Input using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Input) LateInitialize(attrs []byte) (bool, error) { + params := &InputParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Input) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/medialive/v1beta2/zz_input_types.go b/apis/medialive/v1beta2/zz_input_types.go new file mode 100755 index 0000000000..1d438e7d1b --- /dev/null +++ b/apis/medialive/v1beta2/zz_input_types.go @@ -0,0 +1,359 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type InputDestinationsInitParameters struct { + + // A unique name for the location the RTMP stream is being pushed to. + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` +} + +type InputDestinationsObservation struct { + + // A unique name for the location the RTMP stream is being pushed to. + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` +} + +type InputDestinationsParameters struct { + + // A unique name for the location the RTMP stream is being pushed to. + // +kubebuilder:validation:Optional + StreamName *string `json:"streamName" tf:"stream_name,omitempty"` +} + +type InputDevicesInitParameters struct { + + // The unique ID for the device. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type InputDevicesObservation struct { + + // The unique ID for the device. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type InputDevicesParameters struct { + + // The unique ID for the device. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` +} + +type InputInitParameters struct { + + // Destination settings for PUSH type inputs. See Destinations for more details. + Destinations []InputDestinationsInitParameters `json:"destinations,omitempty" tf:"destinations,omitempty"` + + // Settings for the devices. See Input Devices for more details. + InputDevices []InputDevicesInitParameters `json:"inputDevices,omitempty" tf:"input_devices,omitempty"` + + // List of input security groups. + InputSecurityGroups []*string `json:"inputSecurityGroups,omitempty" tf:"input_security_groups,omitempty"` + + // A list of the MediaConnect Flows. See Media Connect Flows for more details. + MediaConnectFlows []MediaConnectFlowsInitParameters `json:"mediaConnectFlows,omitempty" tf:"media_connect_flows,omitempty"` + + // Name of the input. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ARN of the role this input assumes during and after creation. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The source URLs for a PULL-type input. See Sources for more details. + Sources []SourcesInitParameters `json:"sources,omitempty" tf:"sources,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The different types of inputs that AWS Elemental MediaLive supports. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Settings for a private VPC Input. See VPC for more details. + VPC *InputVPCInitParameters `json:"vpc,omitempty" tf:"vpc,omitempty"` +} + +type InputObservation struct { + + // ARN of the Input. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Channels attached to Input. + AttachedChannels []*string `json:"attachedChannels,omitempty" tf:"attached_channels,omitempty"` + + // Destination settings for PUSH type inputs. See Destinations for more details. + Destinations []InputDestinationsObservation `json:"destinations,omitempty" tf:"destinations,omitempty"` + + // The unique ID for the device. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The input class. + InputClass *string `json:"inputClass,omitempty" tf:"input_class,omitempty"` + + // Settings for the devices. See Input Devices for more details. + InputDevices []InputDevicesObservation `json:"inputDevices,omitempty" tf:"input_devices,omitempty"` + + // A list of IDs for all Inputs which are partners of this one. + InputPartnerIds []*string `json:"inputPartnerIds,omitempty" tf:"input_partner_ids,omitempty"` + + // List of input security groups. + InputSecurityGroups []*string `json:"inputSecurityGroups,omitempty" tf:"input_security_groups,omitempty"` + + // Source type of the input. + InputSourceType *string `json:"inputSourceType,omitempty" tf:"input_source_type,omitempty"` + + // A list of the MediaConnect Flows. See Media Connect Flows for more details. + MediaConnectFlows []MediaConnectFlowsObservation `json:"mediaConnectFlows,omitempty" tf:"media_connect_flows,omitempty"` + + // Name of the input. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ARN of the role this input assumes during and after creation. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The source URLs for a PULL-type input. See Sources for more details. + Sources []SourcesObservation `json:"sources,omitempty" tf:"sources,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The different types of inputs that AWS Elemental MediaLive supports. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Settings for a private VPC Input. See VPC for more details. + VPC *InputVPCObservation `json:"vpc,omitempty" tf:"vpc,omitempty"` +} + +type InputParameters struct { + + // Destination settings for PUSH type inputs. See Destinations for more details. + // +kubebuilder:validation:Optional + Destinations []InputDestinationsParameters `json:"destinations,omitempty" tf:"destinations,omitempty"` + + // Settings for the devices. See Input Devices for more details. + // +kubebuilder:validation:Optional + InputDevices []InputDevicesParameters `json:"inputDevices,omitempty" tf:"input_devices,omitempty"` + + // List of input security groups. + // +kubebuilder:validation:Optional + InputSecurityGroups []*string `json:"inputSecurityGroups,omitempty" tf:"input_security_groups,omitempty"` + + // A list of the MediaConnect Flows. See Media Connect Flows for more details. + // +kubebuilder:validation:Optional + MediaConnectFlows []MediaConnectFlowsParameters `json:"mediaConnectFlows,omitempty" tf:"media_connect_flows,omitempty"` + + // Name of the input. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The ARN of the role this input assumes during and after creation. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The source URLs for a PULL-type input. See Sources for more details. + // +kubebuilder:validation:Optional + Sources []SourcesParameters `json:"sources,omitempty" tf:"sources,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The different types of inputs that AWS Elemental MediaLive supports. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Settings for a private VPC Input. See VPC for more details. + // +kubebuilder:validation:Optional + VPC *InputVPCParameters `json:"vpc,omitempty" tf:"vpc,omitempty"` +} + +type InputVPCInitParameters struct { + + // A list of up to 5 EC2 VPC security group IDs to attach to the Input. + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of 2 VPC subnet IDs from the same VPC. + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type InputVPCObservation struct { + + // A list of up to 5 EC2 VPC security group IDs to attach to the Input. + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of 2 VPC subnet IDs from the same VPC. + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type InputVPCParameters struct { + + // A list of up to 5 EC2 VPC security group IDs to attach to the Input. + // +kubebuilder:validation:Optional + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of 2 VPC subnet IDs from the same VPC. + // +kubebuilder:validation:Optional + SubnetIds []*string `json:"subnetIds" tf:"subnet_ids,omitempty"` +} + +type MediaConnectFlowsInitParameters struct { + + // The ARN of the MediaConnect Flow + FlowArn *string `json:"flowArn,omitempty" tf:"flow_arn,omitempty"` +} + +type MediaConnectFlowsObservation struct { + + // The ARN of the MediaConnect Flow + FlowArn *string `json:"flowArn,omitempty" tf:"flow_arn,omitempty"` +} + +type MediaConnectFlowsParameters struct { + + // The ARN of the MediaConnect Flow + // +kubebuilder:validation:Optional + FlowArn *string `json:"flowArn" tf:"flow_arn,omitempty"` +} + +type SourcesInitParameters struct { + + // The key used to extract the password from EC2 Parameter store. + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // The URL where the stream is pulled from. + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // The username for the input source. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SourcesObservation struct { + + // The key used to extract the password from EC2 Parameter store. + PasswordParam *string `json:"passwordParam,omitempty" tf:"password_param,omitempty"` + + // The URL where the stream is pulled from. + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // The username for the input source. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SourcesParameters struct { + + // The key used to extract the password from EC2 Parameter store. + // +kubebuilder:validation:Optional + PasswordParam *string `json:"passwordParam" tf:"password_param,omitempty"` + + // The URL where the stream is pulled from. + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` + + // The username for the input source. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +// InputSpec defines the desired state of Input +type InputSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider InputParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider InputInitParameters `json:"initProvider,omitempty"` +} + +// InputStatus defines the observed state of Input. +type InputStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider InputObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Input is the Schema for the Inputs API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Input struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + Spec InputSpec `json:"spec"` + Status InputStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// InputList contains a list of Inputs +type InputList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Input `json:"items"` +} + +// Repository type metadata. +var ( + Input_Kind = "Input" + Input_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Input_Kind}.String() + Input_KindAPIVersion = Input_Kind + "." + CRDGroupVersion.String() + Input_GroupVersionKind = CRDGroupVersion.WithKind(Input_Kind) +) + +func init() { + SchemeBuilder.Register(&Input{}, &InputList{}) +} diff --git a/apis/medialive/v1beta2/zz_multiplex_terraformed.go b/apis/medialive/v1beta2/zz_multiplex_terraformed.go new file mode 100755 index 0000000000..6b821fdd45 --- /dev/null +++ b/apis/medialive/v1beta2/zz_multiplex_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Multiplex +func (mg *Multiplex) GetTerraformResourceType() string { + return "aws_medialive_multiplex" +} + +// GetConnectionDetailsMapping for this Multiplex +func (tr *Multiplex) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Multiplex +func (tr *Multiplex) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Multiplex +func (tr *Multiplex) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Multiplex +func (tr *Multiplex) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Multiplex +func (tr *Multiplex) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Multiplex +func (tr *Multiplex) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Multiplex +func (tr *Multiplex) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Multiplex +func (tr *Multiplex) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Multiplex using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Multiplex) LateInitialize(attrs []byte) (bool, error) { + params := &MultiplexParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Multiplex) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/medialive/v1beta2/zz_multiplex_types.go b/apis/medialive/v1beta2/zz_multiplex_types.go new file mode 100755 index 0000000000..c7a47134dd --- /dev/null +++ b/apis/medialive/v1beta2/zz_multiplex_types.go @@ -0,0 +1,200 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MultiplexInitParameters struct { + + // A list of availability zones. You must specify exactly two. + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // Multiplex settings. See Multiplex Settings for more details. + MultiplexSettings *MultiplexMultiplexSettingsInitParameters `json:"multiplexSettings,omitempty" tf:"multiplex_settings,omitempty"` + + // name of Multiplex. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether to start the Multiplex. Defaults to false. + StartMultiplex *bool `json:"startMultiplex,omitempty" tf:"start_multiplex,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MultiplexMultiplexSettingsInitParameters struct { + + // Maximum video buffer delay. + MaximumVideoBufferDelayMilliseconds *float64 `json:"maximumVideoBufferDelayMilliseconds,omitempty" tf:"maximum_video_buffer_delay_milliseconds,omitempty"` + + // Transport stream bit rate. + TransportStreamBitrate *float64 `json:"transportStreamBitrate,omitempty" tf:"transport_stream_bitrate,omitempty"` + + // Unique ID for each multiplex. + TransportStreamID *float64 `json:"transportStreamId,omitempty" tf:"transport_stream_id,omitempty"` + + // Transport stream reserved bit rate. + TransportStreamReservedBitrate *float64 `json:"transportStreamReservedBitrate,omitempty" tf:"transport_stream_reserved_bitrate,omitempty"` +} + +type MultiplexMultiplexSettingsObservation struct { + + // Maximum video buffer delay. + MaximumVideoBufferDelayMilliseconds *float64 `json:"maximumVideoBufferDelayMilliseconds,omitempty" tf:"maximum_video_buffer_delay_milliseconds,omitempty"` + + // Transport stream bit rate. + TransportStreamBitrate *float64 `json:"transportStreamBitrate,omitempty" tf:"transport_stream_bitrate,omitempty"` + + // Unique ID for each multiplex. + TransportStreamID *float64 `json:"transportStreamId,omitempty" tf:"transport_stream_id,omitempty"` + + // Transport stream reserved bit rate. + TransportStreamReservedBitrate *float64 `json:"transportStreamReservedBitrate,omitempty" tf:"transport_stream_reserved_bitrate,omitempty"` +} + +type MultiplexMultiplexSettingsParameters struct { + + // Maximum video buffer delay. + // +kubebuilder:validation:Optional + MaximumVideoBufferDelayMilliseconds *float64 `json:"maximumVideoBufferDelayMilliseconds,omitempty" tf:"maximum_video_buffer_delay_milliseconds,omitempty"` + + // Transport stream bit rate. + // +kubebuilder:validation:Optional + TransportStreamBitrate *float64 `json:"transportStreamBitrate" tf:"transport_stream_bitrate,omitempty"` + + // Unique ID for each multiplex. + // +kubebuilder:validation:Optional + TransportStreamID *float64 `json:"transportStreamId" tf:"transport_stream_id,omitempty"` + + // Transport stream reserved bit rate. + // +kubebuilder:validation:Optional + TransportStreamReservedBitrate *float64 `json:"transportStreamReservedBitrate,omitempty" tf:"transport_stream_reserved_bitrate,omitempty"` +} + +type MultiplexObservation struct { + + // ARN of the Multiplex. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A list of availability zones. You must specify exactly two. + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Multiplex settings. See Multiplex Settings for more details. + MultiplexSettings *MultiplexMultiplexSettingsObservation `json:"multiplexSettings,omitempty" tf:"multiplex_settings,omitempty"` + + // name of Multiplex. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether to start the Multiplex. Defaults to false. + StartMultiplex *bool `json:"startMultiplex,omitempty" tf:"start_multiplex,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type MultiplexParameters struct { + + // A list of availability zones. You must specify exactly two. + // +kubebuilder:validation:Optional + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // Multiplex settings. See Multiplex Settings for more details. + // +kubebuilder:validation:Optional + MultiplexSettings *MultiplexMultiplexSettingsParameters `json:"multiplexSettings,omitempty" tf:"multiplex_settings,omitempty"` + + // name of Multiplex. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Whether to start the Multiplex. Defaults to false. + // +kubebuilder:validation:Optional + StartMultiplex *bool `json:"startMultiplex,omitempty" tf:"start_multiplex,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// MultiplexSpec defines the desired state of Multiplex +type MultiplexSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MultiplexParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MultiplexInitParameters `json:"initProvider,omitempty"` +} + +// MultiplexStatus defines the observed state of Multiplex. +type MultiplexStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MultiplexObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Multiplex is the Schema for the Multiplexs API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws},path=multiplices +type Multiplex struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.availabilityZones) || (has(self.initProvider) && has(self.initProvider.availabilityZones))",message="spec.forProvider.availabilityZones is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec MultiplexSpec `json:"spec"` + Status MultiplexStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MultiplexList contains a list of Multiplexs +type MultiplexList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Multiplex `json:"items"` +} + +// Repository type metadata. +var ( + Multiplex_Kind = "Multiplex" + Multiplex_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Multiplex_Kind}.String() + Multiplex_KindAPIVersion = Multiplex_Kind + "." + CRDGroupVersion.String() + Multiplex_GroupVersionKind = CRDGroupVersion.WithKind(Multiplex_Kind) +) + +func init() { + SchemeBuilder.Register(&Multiplex{}, &MultiplexList{}) +} diff --git a/apis/memorydb/v1beta1/zz_generated.conversion_hubs.go b/apis/memorydb/v1beta1/zz_generated.conversion_hubs.go index e1c66c93a6..48f76967e9 100755 --- a/apis/memorydb/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/memorydb/v1beta1/zz_generated.conversion_hubs.go @@ -20,6 +20,3 @@ func (tr *Snapshot) Hub() {} // Hub marks this type as a conversion hub. func (tr *SubnetGroup) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *User) Hub() {} diff --git a/apis/memorydb/v1beta1/zz_generated.conversion_spokes.go b/apis/memorydb/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..12f2214eea --- /dev/null +++ b/apis/memorydb/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this User to the hub type. +func (tr *User) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the User type. +func (tr *User) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/memorydb/v1beta2/zz_generated.conversion_hubs.go b/apis/memorydb/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..89554cb8a5 --- /dev/null +++ b/apis/memorydb/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *User) Hub() {} diff --git a/apis/memorydb/v1beta2/zz_generated.deepcopy.go b/apis/memorydb/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..8b798b9896 --- /dev/null +++ b/apis/memorydb/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,352 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationModeInitParameters) DeepCopyInto(out *AuthenticationModeInitParameters) { + *out = *in + if in.Passwords != nil { + in, out := &in.Passwords, &out.Passwords + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationModeInitParameters. +func (in *AuthenticationModeInitParameters) DeepCopy() *AuthenticationModeInitParameters { + if in == nil { + return nil + } + out := new(AuthenticationModeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationModeObservation) DeepCopyInto(out *AuthenticationModeObservation) { + *out = *in + if in.PasswordCount != nil { + in, out := &in.PasswordCount, &out.PasswordCount + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationModeObservation. +func (in *AuthenticationModeObservation) DeepCopy() *AuthenticationModeObservation { + if in == nil { + return nil + } + out := new(AuthenticationModeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationModeParameters) DeepCopyInto(out *AuthenticationModeParameters) { + *out = *in + if in.PasswordsSecretRef != nil { + in, out := &in.PasswordsSecretRef, &out.PasswordsSecretRef + *out = new([]v1.SecretKeySelector) + if **in != nil { + in, out := *in, *out + *out = make([]v1.SecretKeySelector, len(*in)) + copy(*out, *in) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationModeParameters. +func (in *AuthenticationModeParameters) DeepCopy() *AuthenticationModeParameters { + if in == nil { + return nil + } + out := new(AuthenticationModeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *User) DeepCopyInto(out *User) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User. +func (in *User) DeepCopy() *User { + if in == nil { + return nil + } + out := new(User) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *User) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserInitParameters) DeepCopyInto(out *UserInitParameters) { + *out = *in + if in.AccessString != nil { + in, out := &in.AccessString, &out.AccessString + *out = new(string) + **out = **in + } + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(AuthenticationModeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInitParameters. +func (in *UserInitParameters) DeepCopy() *UserInitParameters { + if in == nil { + return nil + } + out := new(UserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserList) DeepCopyInto(out *UserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]User, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserList. +func (in *UserList) DeepCopy() *UserList { + if in == nil { + return nil + } + out := new(UserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserObservation) DeepCopyInto(out *UserObservation) { + *out = *in + if in.AccessString != nil { + in, out := &in.AccessString, &out.AccessString + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(AuthenticationModeObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MinimumEngineVersion != nil { + in, out := &in.MinimumEngineVersion, &out.MinimumEngineVersion + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserObservation. +func (in *UserObservation) DeepCopy() *UserObservation { + if in == nil { + return nil + } + out := new(UserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserParameters) DeepCopyInto(out *UserParameters) { + *out = *in + if in.AccessString != nil { + in, out := &in.AccessString, &out.AccessString + *out = new(string) + **out = **in + } + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(AuthenticationModeParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserParameters. +func (in *UserParameters) DeepCopy() *UserParameters { + if in == nil { + return nil + } + out := new(UserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSpec) DeepCopyInto(out *UserSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSpec. +func (in *UserSpec) DeepCopy() *UserSpec { + if in == nil { + return nil + } + out := new(UserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserStatus) DeepCopyInto(out *UserStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserStatus. +func (in *UserStatus) DeepCopy() *UserStatus { + if in == nil { + return nil + } + out := new(UserStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/memorydb/v1beta2/zz_generated.managed.go b/apis/memorydb/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..9c3f149b93 --- /dev/null +++ b/apis/memorydb/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this User. +func (mg *User) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this User. +func (mg *User) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this User. +func (mg *User) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this User. +func (mg *User) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this User. +func (mg *User) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this User. +func (mg *User) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this User. +func (mg *User) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this User. +func (mg *User) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this User. +func (mg *User) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this User. +func (mg *User) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this User. +func (mg *User) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this User. +func (mg *User) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/memorydb/v1beta2/zz_generated.managedlist.go b/apis/memorydb/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..06af949ffc --- /dev/null +++ b/apis/memorydb/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this UserList. +func (l *UserList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/memorydb/v1beta2/zz_groupversion_info.go b/apis/memorydb/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..c5662d2f15 --- /dev/null +++ b/apis/memorydb/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=memorydb.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "memorydb.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/memorydb/v1beta2/zz_user_terraformed.go b/apis/memorydb/v1beta2/zz_user_terraformed.go new file mode 100755 index 0000000000..70d40cebbf --- /dev/null +++ b/apis/memorydb/v1beta2/zz_user_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this User +func (mg *User) GetTerraformResourceType() string { + return "aws_memorydb_user" +} + +// GetConnectionDetailsMapping for this User +func (tr *User) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"authentication_mode[*].passwords[*]": "authenticationMode[*].passwordsSecretRef[*]"} +} + +// GetObservation of this User +func (tr *User) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this User +func (tr *User) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this User +func (tr *User) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this User +func (tr *User) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this User +func (tr *User) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this User +func (tr *User) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this User +func (tr *User) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this User using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *User) LateInitialize(attrs []byte) (bool, error) { + params := &UserParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *User) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/memorydb/v1beta2/zz_user_types.go b/apis/memorydb/v1beta2/zz_user_types.go new file mode 100755 index 0000000000..abfe223788 --- /dev/null +++ b/apis/memorydb/v1beta2/zz_user_types.go @@ -0,0 +1,163 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuthenticationModeInitParameters struct { + Passwords []*string `json:"passwordsSecretRef,omitempty" tf:"-"` + + // Specifies the authentication type. Valid values are: password or iam. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type AuthenticationModeObservation struct { + + // Number of passwords belonging to the user if type is set to password. + PasswordCount *float64 `json:"passwordCount,omitempty" tf:"password_count,omitempty"` + + // Specifies the authentication type. Valid values are: password or iam. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type AuthenticationModeParameters struct { + + // Set of passwords used for authentication if type is set to password. You can create up to two passwords for each user. + // +kubebuilder:validation:Optional + PasswordsSecretRef *[]v1.SecretKeySelector `json:"passwordsSecretRef,omitempty" tf:"-"` + + // Specifies the authentication type. Valid values are: password or iam. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type UserInitParameters struct { + + // Access permissions string used for this user. + AccessString *string `json:"accessString,omitempty" tf:"access_string,omitempty"` + + // Denotes the user's authentication properties. Detailed below. + AuthenticationMode *AuthenticationModeInitParameters `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type UserObservation struct { + + // Access permissions string used for this user. + AccessString *string `json:"accessString,omitempty" tf:"access_string,omitempty"` + + // ARN of the user. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Denotes the user's authentication properties. Detailed below. + AuthenticationMode *AuthenticationModeObservation `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // Same as user_name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Minimum engine version supported for the user. + MinimumEngineVersion *string `json:"minimumEngineVersion,omitempty" tf:"minimum_engine_version,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type UserParameters struct { + + // Access permissions string used for this user. + // +kubebuilder:validation:Optional + AccessString *string `json:"accessString,omitempty" tf:"access_string,omitempty"` + + // Denotes the user's authentication properties. Detailed below. + // +kubebuilder:validation:Optional + AuthenticationMode *AuthenticationModeParameters `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// UserSpec defines the desired state of User +type UserSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider UserParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider UserInitParameters `json:"initProvider,omitempty"` +} + +// UserStatus defines the observed state of User. +type UserStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider UserObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// User is the Schema for the Users API. Provides a MemoryDB User. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type User struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.accessString) || (has(self.initProvider) && has(self.initProvider.accessString))",message="spec.forProvider.accessString is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.authenticationMode) || (has(self.initProvider) && has(self.initProvider.authenticationMode))",message="spec.forProvider.authenticationMode is a required parameter" + Spec UserSpec `json:"spec"` + Status UserStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// UserList contains a list of Users +type UserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []User `json:"items"` +} + +// Repository type metadata. +var ( + User_Kind = "User" + User_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: User_Kind}.String() + User_KindAPIVersion = User_Kind + "." + CRDGroupVersion.String() + User_GroupVersionKind = CRDGroupVersion.WithKind(User_Kind) +) + +func init() { + SchemeBuilder.Register(&User{}, &UserList{}) +} diff --git a/apis/mq/v1alpha1/zz_generated.resolvers.go b/apis/mq/v1alpha1/zz_generated.resolvers.go index 32c653e58a..54a2f81892 100644 --- a/apis/mq/v1alpha1/zz_generated.resolvers.go +++ b/apis/mq/v1alpha1/zz_generated.resolvers.go @@ -26,7 +26,7 @@ func (mg *User) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("mq.aws.upbound.io", "v1beta1", "Broker", "BrokerList") + m, l, err = apisresolver.GetManagedResource("mq.aws.upbound.io", "v1beta2", "Broker", "BrokerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -45,7 +45,7 @@ func (mg *User) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.ForProvider.BrokerID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.BrokerIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("mq.aws.upbound.io", "v1beta1", "Broker", "BrokerList") + m, l, err = apisresolver.GetManagedResource("mq.aws.upbound.io", "v1beta2", "Broker", "BrokerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/mq/v1alpha1/zz_user_types.go b/apis/mq/v1alpha1/zz_user_types.go index a070f9f970..d3af8bcf7c 100755 --- a/apis/mq/v1alpha1/zz_user_types.go +++ b/apis/mq/v1alpha1/zz_user_types.go @@ -15,7 +15,7 @@ import ( type UserInitParameters struct { - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/mq/v1beta1.Broker + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/mq/v1beta2.Broker BrokerID *string `json:"brokerId,omitempty" tf:"broker_id,omitempty"` // Reference to a Broker in mq to populate brokerId. @@ -53,7 +53,7 @@ type UserObservation struct { type UserParameters struct { - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/mq/v1beta1.Broker + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/mq/v1beta2.Broker // +kubebuilder:validation:Optional BrokerID *string `json:"brokerId,omitempty" tf:"broker_id,omitempty"` diff --git a/apis/mq/v1beta1/zz_generated.conversion_hubs.go b/apis/mq/v1beta1/zz_generated.conversion_hubs.go index 0ea2dd8193..f233d9ca01 100755 --- a/apis/mq/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/mq/v1beta1/zz_generated.conversion_hubs.go @@ -6,8 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Broker) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Configuration) Hub() {} diff --git a/apis/mq/v1beta1/zz_generated.conversion_spokes.go b/apis/mq/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..ae75b31766 --- /dev/null +++ b/apis/mq/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Broker to the hub type. +func (tr *Broker) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Broker type. +func (tr *Broker) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/mq/v1beta2/zz_broker_terraformed.go b/apis/mq/v1beta2/zz_broker_terraformed.go new file mode 100755 index 0000000000..147d2ec99c --- /dev/null +++ b/apis/mq/v1beta2/zz_broker_terraformed.go @@ -0,0 +1,134 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Broker +func (mg *Broker) GetTerraformResourceType() string { + return "aws_mq_broker" +} + +// GetConnectionDetailsMapping for this Broker +func (tr *Broker) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"ldap_server_metadata[*].service_account_password": "ldapServerMetadata[*].serviceAccountPasswordSecretRef", "user[*].password": "user[*].passwordSecretRef"} +} + +// GetObservation of this Broker +func (tr *Broker) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Broker +func (tr *Broker) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Broker +func (tr *Broker) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Broker +func (tr *Broker) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Broker +func (tr *Broker) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Broker +func (tr *Broker) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Broker +func (tr *Broker) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Broker using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Broker) LateInitialize(attrs []byte) (bool, error) { + params := &BrokerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + initParams, err := tr.GetInitParameters() + if err != nil { + return false, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + opts = append(opts, resource.WithConditionalFilter("User", initParams)) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Broker) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mq/v1beta2/zz_broker_types.go b/apis/mq/v1beta2/zz_broker_types.go new file mode 100755 index 0000000000..f6b515fece --- /dev/null +++ b/apis/mq/v1beta2/zz_broker_types.go @@ -0,0 +1,731 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BrokerInitParameters struct { + + // Specifies whether any broker modifications are applied immediately, or during the next maintenance window. Default is false. + ApplyImmediately *bool `json:"applyImmediately,omitempty" tf:"apply_immediately,omitempty"` + + // Authentication strategy used to secure the broker. Valid values are simple and ldap. ldap is not supported for engine_type RabbitMQ. + AuthenticationStrategy *string `json:"authenticationStrategy,omitempty" tf:"authentication_strategy,omitempty"` + + // Whether to automatically upgrade to new minor versions of brokers as Amazon MQ makes releases available. + AutoMinorVersionUpgrade *bool `json:"autoMinorVersionUpgrade,omitempty" tf:"auto_minor_version_upgrade,omitempty"` + + // Name of the broker. + BrokerName *string `json:"brokerName,omitempty" tf:"broker_name,omitempty"` + + // Configuration block for broker configuration. Applies to engine_type of ActiveMQ and RabbitMQ only. Detailed below. + Configuration *ConfigurationInitParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // Defines whether this broker is a part of a data replication pair. Valid values are CRDR and NONE. + DataReplicationMode *string `json:"dataReplicationMode,omitempty" tf:"data_replication_mode,omitempty"` + + // The Amazon Resource Name (ARN) of the primary broker that is used to replicate data from in a data replication pair, and is applied to the replica broker. Must be set when data_replication_mode is CRDR. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/mq/v1beta2.Broker + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + DataReplicationPrimaryBrokerArn *string `json:"dataReplicationPrimaryBrokerArn,omitempty" tf:"data_replication_primary_broker_arn,omitempty"` + + // Reference to a Broker in mq to populate dataReplicationPrimaryBrokerArn. + // +kubebuilder:validation:Optional + DataReplicationPrimaryBrokerArnRef *v1.Reference `json:"dataReplicationPrimaryBrokerArnRef,omitempty" tf:"-"` + + // Selector for a Broker in mq to populate dataReplicationPrimaryBrokerArn. + // +kubebuilder:validation:Optional + DataReplicationPrimaryBrokerArnSelector *v1.Selector `json:"dataReplicationPrimaryBrokerArnSelector,omitempty" tf:"-"` + + // Deployment mode of the broker. Valid values are SINGLE_INSTANCE, ACTIVE_STANDBY_MULTI_AZ, and CLUSTER_MULTI_AZ. Default is SINGLE_INSTANCE. + DeploymentMode *string `json:"deploymentMode,omitempty" tf:"deployment_mode,omitempty"` + + // Configuration block containing encryption options. Detailed below. + EncryptionOptions *EncryptionOptionsInitParameters `json:"encryptionOptions,omitempty" tf:"encryption_options,omitempty"` + + // Type of broker engine. Valid values are ActiveMQ and RabbitMQ. + EngineType *string `json:"engineType,omitempty" tf:"engine_type,omitempty"` + + // Version of the broker engine. See the AmazonMQ Broker Engine docs for supported versions. For example, 5.17.6. + EngineVersion *string `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + // Broker's instance type. For example, mq.t3.micro, mq.m5.large. + HostInstanceType *string `json:"hostInstanceType,omitempty" tf:"host_instance_type,omitempty"` + + // Configuration block for the LDAP server used to authenticate and authorize connections to the broker. Not supported for engine_type RabbitMQ. Detailed below. (Currently, AWS may not process changes to LDAP server metadata.) + LdapServerMetadata *LdapServerMetadataInitParameters `json:"ldapServerMetadata,omitempty" tf:"ldap_server_metadata,omitempty"` + + // Configuration block for the logging configuration of the broker. Detailed below. + Logs *LogsInitParameters `json:"logs,omitempty" tf:"logs,omitempty"` + + // Configuration block for the maintenance window start time. Detailed below. + MaintenanceWindowStartTime *MaintenanceWindowStartTimeInitParameters `json:"maintenanceWindowStartTime,omitempty" tf:"maintenance_window_start_time,omitempty"` + + // Whether to enable connections from applications outside of the VPC that hosts the broker's subnets. + PubliclyAccessible *bool `json:"publiclyAccessible,omitempty" tf:"publicly_accessible,omitempty"` + + // References to SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupRefs []v1.Reference `json:"securityGroupRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupSelector *v1.Selector `json:"securityGroupSelector,omitempty" tf:"-"` + + // List of security group IDs assigned to the broker. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupSelector + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // Storage type of the broker. For engine_type ActiveMQ, the valid values are efs and ebs, and the AWS-default is efs. For engine_type RabbitMQ, only ebs is supported. When using ebs, only the mq.m5 broker instance type family is supported. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // List of subnet IDs in which to launch the broker. A SINGLE_INSTANCE deployment requires one subnet. An ACTIVE_STANDBY_MULTI_AZ deployment requires multiple subnets. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for broker users. For engine_type of RabbitMQ, Amazon MQ does not return broker users preventing this resource from making user updates and drift detection. Detailed below. + User []UserInitParameters `json:"user,omitempty" tf:"user,omitempty"` +} + +type BrokerObservation struct { + + // Specifies whether any broker modifications are applied immediately, or during the next maintenance window. Default is false. + ApplyImmediately *bool `json:"applyImmediately,omitempty" tf:"apply_immediately,omitempty"` + + // ARN of the broker. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Authentication strategy used to secure the broker. Valid values are simple and ldap. ldap is not supported for engine_type RabbitMQ. + AuthenticationStrategy *string `json:"authenticationStrategy,omitempty" tf:"authentication_strategy,omitempty"` + + // Whether to automatically upgrade to new minor versions of brokers as Amazon MQ makes releases available. + AutoMinorVersionUpgrade *bool `json:"autoMinorVersionUpgrade,omitempty" tf:"auto_minor_version_upgrade,omitempty"` + + // Name of the broker. + BrokerName *string `json:"brokerName,omitempty" tf:"broker_name,omitempty"` + + // Configuration block for broker configuration. Applies to engine_type of ActiveMQ and RabbitMQ only. Detailed below. + Configuration *ConfigurationObservation `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // Defines whether this broker is a part of a data replication pair. Valid values are CRDR and NONE. + DataReplicationMode *string `json:"dataReplicationMode,omitempty" tf:"data_replication_mode,omitempty"` + + // The Amazon Resource Name (ARN) of the primary broker that is used to replicate data from in a data replication pair, and is applied to the replica broker. Must be set when data_replication_mode is CRDR. + DataReplicationPrimaryBrokerArn *string `json:"dataReplicationPrimaryBrokerArn,omitempty" tf:"data_replication_primary_broker_arn,omitempty"` + + // Deployment mode of the broker. Valid values are SINGLE_INSTANCE, ACTIVE_STANDBY_MULTI_AZ, and CLUSTER_MULTI_AZ. Default is SINGLE_INSTANCE. + DeploymentMode *string `json:"deploymentMode,omitempty" tf:"deployment_mode,omitempty"` + + // Configuration block containing encryption options. Detailed below. + EncryptionOptions *EncryptionOptionsObservation `json:"encryptionOptions,omitempty" tf:"encryption_options,omitempty"` + + // Type of broker engine. Valid values are ActiveMQ and RabbitMQ. + EngineType *string `json:"engineType,omitempty" tf:"engine_type,omitempty"` + + // Version of the broker engine. See the AmazonMQ Broker Engine docs for supported versions. For example, 5.17.6. + EngineVersion *string `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + // Broker's instance type. For example, mq.t3.micro, mq.m5.large. + HostInstanceType *string `json:"hostInstanceType,omitempty" tf:"host_instance_type,omitempty"` + + // Unique ID that Amazon MQ generates for the broker. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // List of information about allocated brokers (both active & standby). + Instances []InstancesObservation `json:"instances,omitempty" tf:"instances,omitempty"` + + // Configuration block for the LDAP server used to authenticate and authorize connections to the broker. Not supported for engine_type RabbitMQ. Detailed below. (Currently, AWS may not process changes to LDAP server metadata.) + LdapServerMetadata *LdapServerMetadataObservation `json:"ldapServerMetadata,omitempty" tf:"ldap_server_metadata,omitempty"` + + // Configuration block for the logging configuration of the broker. Detailed below. + Logs *LogsObservation `json:"logs,omitempty" tf:"logs,omitempty"` + + // Configuration block for the maintenance window start time. Detailed below. + MaintenanceWindowStartTime *MaintenanceWindowStartTimeObservation `json:"maintenanceWindowStartTime,omitempty" tf:"maintenance_window_start_time,omitempty"` + + // The data replication mode that will be applied after reboot. + PendingDataReplicationMode *string `json:"pendingDataReplicationMode,omitempty" tf:"pending_data_replication_mode,omitempty"` + + // Whether to enable connections from applications outside of the VPC that hosts the broker's subnets. + PubliclyAccessible *bool `json:"publiclyAccessible,omitempty" tf:"publicly_accessible,omitempty"` + + // List of security group IDs assigned to the broker. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // Storage type of the broker. For engine_type ActiveMQ, the valid values are efs and ebs, and the AWS-default is efs. For engine_type RabbitMQ, only ebs is supported. When using ebs, only the mq.m5 broker instance type family is supported. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // List of subnet IDs in which to launch the broker. A SINGLE_INSTANCE deployment requires one subnet. An ACTIVE_STANDBY_MULTI_AZ deployment requires multiple subnets. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Configuration block for broker users. For engine_type of RabbitMQ, Amazon MQ does not return broker users preventing this resource from making user updates and drift detection. Detailed below. + User []UserObservation `json:"user,omitempty" tf:"user,omitempty"` +} + +type BrokerParameters struct { + + // Specifies whether any broker modifications are applied immediately, or during the next maintenance window. Default is false. + // +kubebuilder:validation:Optional + ApplyImmediately *bool `json:"applyImmediately,omitempty" tf:"apply_immediately,omitempty"` + + // Authentication strategy used to secure the broker. Valid values are simple and ldap. ldap is not supported for engine_type RabbitMQ. + // +kubebuilder:validation:Optional + AuthenticationStrategy *string `json:"authenticationStrategy,omitempty" tf:"authentication_strategy,omitempty"` + + // Whether to automatically upgrade to new minor versions of brokers as Amazon MQ makes releases available. + // +kubebuilder:validation:Optional + AutoMinorVersionUpgrade *bool `json:"autoMinorVersionUpgrade,omitempty" tf:"auto_minor_version_upgrade,omitempty"` + + // Name of the broker. + // +kubebuilder:validation:Optional + BrokerName *string `json:"brokerName,omitempty" tf:"broker_name,omitempty"` + + // Configuration block for broker configuration. Applies to engine_type of ActiveMQ and RabbitMQ only. Detailed below. + // +kubebuilder:validation:Optional + Configuration *ConfigurationParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // Defines whether this broker is a part of a data replication pair. Valid values are CRDR and NONE. + // +kubebuilder:validation:Optional + DataReplicationMode *string `json:"dataReplicationMode,omitempty" tf:"data_replication_mode,omitempty"` + + // The Amazon Resource Name (ARN) of the primary broker that is used to replicate data from in a data replication pair, and is applied to the replica broker. Must be set when data_replication_mode is CRDR. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/mq/v1beta2.Broker + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + DataReplicationPrimaryBrokerArn *string `json:"dataReplicationPrimaryBrokerArn,omitempty" tf:"data_replication_primary_broker_arn,omitempty"` + + // Reference to a Broker in mq to populate dataReplicationPrimaryBrokerArn. + // +kubebuilder:validation:Optional + DataReplicationPrimaryBrokerArnRef *v1.Reference `json:"dataReplicationPrimaryBrokerArnRef,omitempty" tf:"-"` + + // Selector for a Broker in mq to populate dataReplicationPrimaryBrokerArn. + // +kubebuilder:validation:Optional + DataReplicationPrimaryBrokerArnSelector *v1.Selector `json:"dataReplicationPrimaryBrokerArnSelector,omitempty" tf:"-"` + + // Deployment mode of the broker. Valid values are SINGLE_INSTANCE, ACTIVE_STANDBY_MULTI_AZ, and CLUSTER_MULTI_AZ. Default is SINGLE_INSTANCE. + // +kubebuilder:validation:Optional + DeploymentMode *string `json:"deploymentMode,omitempty" tf:"deployment_mode,omitempty"` + + // Configuration block containing encryption options. Detailed below. + // +kubebuilder:validation:Optional + EncryptionOptions *EncryptionOptionsParameters `json:"encryptionOptions,omitempty" tf:"encryption_options,omitempty"` + + // Type of broker engine. Valid values are ActiveMQ and RabbitMQ. + // +kubebuilder:validation:Optional + EngineType *string `json:"engineType,omitempty" tf:"engine_type,omitempty"` + + // Version of the broker engine. See the AmazonMQ Broker Engine docs for supported versions. For example, 5.17.6. + // +kubebuilder:validation:Optional + EngineVersion *string `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + // Broker's instance type. For example, mq.t3.micro, mq.m5.large. + // +kubebuilder:validation:Optional + HostInstanceType *string `json:"hostInstanceType,omitempty" tf:"host_instance_type,omitempty"` + + // Configuration block for the LDAP server used to authenticate and authorize connections to the broker. Not supported for engine_type RabbitMQ. Detailed below. (Currently, AWS may not process changes to LDAP server metadata.) + // +kubebuilder:validation:Optional + LdapServerMetadata *LdapServerMetadataParameters `json:"ldapServerMetadata,omitempty" tf:"ldap_server_metadata,omitempty"` + + // Configuration block for the logging configuration of the broker. Detailed below. + // +kubebuilder:validation:Optional + Logs *LogsParameters `json:"logs,omitempty" tf:"logs,omitempty"` + + // Configuration block for the maintenance window start time. Detailed below. + // +kubebuilder:validation:Optional + MaintenanceWindowStartTime *MaintenanceWindowStartTimeParameters `json:"maintenanceWindowStartTime,omitempty" tf:"maintenance_window_start_time,omitempty"` + + // Whether to enable connections from applications outside of the VPC that hosts the broker's subnets. + // +kubebuilder:validation:Optional + PubliclyAccessible *bool `json:"publiclyAccessible,omitempty" tf:"publicly_accessible,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // References to SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupRefs []v1.Reference `json:"securityGroupRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroups. + // +kubebuilder:validation:Optional + SecurityGroupSelector *v1.Selector `json:"securityGroupSelector,omitempty" tf:"-"` + + // List of security group IDs assigned to the broker. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupSelector + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // Storage type of the broker. For engine_type ActiveMQ, the valid values are efs and ebs, and the AWS-default is efs. For engine_type RabbitMQ, only ebs is supported. When using ebs, only the mq.m5 broker instance type family is supported. + // +kubebuilder:validation:Optional + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // List of subnet IDs in which to launch the broker. A SINGLE_INSTANCE deployment requires one subnet. An ACTIVE_STANDBY_MULTI_AZ deployment requires multiple subnets. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for broker users. For engine_type of RabbitMQ, Amazon MQ does not return broker users preventing this resource from making user updates and drift detection. Detailed below. + // +kubebuilder:validation:Optional + User []UserParameters `json:"user,omitempty" tf:"user,omitempty"` +} + +type ConfigurationInitParameters struct { + + // The Configuration ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/mq/v1beta1.Configuration + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Reference to a Configuration in mq to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + + // Selector for a Configuration in mq to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` + + // Revision of the Configuration. + Revision *float64 `json:"revision,omitempty" tf:"revision,omitempty"` +} + +type ConfigurationObservation struct { + + // The Configuration ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Revision of the Configuration. + Revision *float64 `json:"revision,omitempty" tf:"revision,omitempty"` +} + +type ConfigurationParameters struct { + + // The Configuration ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/mq/v1beta1.Configuration + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Reference to a Configuration in mq to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + + // Selector for a Configuration in mq to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` + + // Revision of the Configuration. + // +kubebuilder:validation:Optional + Revision *float64 `json:"revision,omitempty" tf:"revision,omitempty"` +} + +type EncryptionOptionsInitParameters struct { + + // Amazon Resource Name (ARN) of Key Management Service (KMS) Customer Master Key (CMK) to use for encryption at rest. Requires setting use_aws_owned_key to false. To perform drift detection when AWS-managed CMKs or customer-managed CMKs are in use, this value must be configured. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Whether to enable an AWS-owned KMS CMK that is not in your account. Defaults to true. Setting to false without configuring kms_key_id will create an AWS-managed CMK aliased to aws/mq in your account. + UseAwsOwnedKey *bool `json:"useAwsOwnedKey,omitempty" tf:"use_aws_owned_key,omitempty"` +} + +type EncryptionOptionsObservation struct { + + // Amazon Resource Name (ARN) of Key Management Service (KMS) Customer Master Key (CMK) to use for encryption at rest. Requires setting use_aws_owned_key to false. To perform drift detection when AWS-managed CMKs or customer-managed CMKs are in use, this value must be configured. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Whether to enable an AWS-owned KMS CMK that is not in your account. Defaults to true. Setting to false without configuring kms_key_id will create an AWS-managed CMK aliased to aws/mq in your account. + UseAwsOwnedKey *bool `json:"useAwsOwnedKey,omitempty" tf:"use_aws_owned_key,omitempty"` +} + +type EncryptionOptionsParameters struct { + + // Amazon Resource Name (ARN) of Key Management Service (KMS) Customer Master Key (CMK) to use for encryption at rest. Requires setting use_aws_owned_key to false. To perform drift detection when AWS-managed CMKs or customer-managed CMKs are in use, this value must be configured. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Whether to enable an AWS-owned KMS CMK that is not in your account. Defaults to true. Setting to false without configuring kms_key_id will create an AWS-managed CMK aliased to aws/mq in your account. + // +kubebuilder:validation:Optional + UseAwsOwnedKey *bool `json:"useAwsOwnedKey,omitempty" tf:"use_aws_owned_key,omitempty"` +} + +type InstancesInitParameters struct { +} + +type InstancesObservation struct { + + // The URL of the ActiveMQ Web Console or the RabbitMQ Management UI depending on engine_type. + ConsoleURL *string `json:"consoleUrl,omitempty" tf:"console_url,omitempty"` + + // Broker's wire-level protocol endpoints in the following order & format referenceable e.g., as instances.0.endpoints.0 (SSL): + Endpoints []*string `json:"endpoints,omitempty" tf:"endpoints,omitempty"` + + // IP Address of the broker. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` +} + +type InstancesParameters struct { +} + +type LdapServerMetadataInitParameters struct { + + // List of a fully qualified domain name of the LDAP server and an optional failover server. + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + + // Fully qualified name of the directory to search for a user’s groups. + RoleBase *string `json:"roleBase,omitempty" tf:"role_base,omitempty"` + + // Specifies the LDAP attribute that identifies the group name attribute in the object returned from the group membership query. + RoleName *string `json:"roleName,omitempty" tf:"role_name,omitempty"` + + // Search criteria for groups. + RoleSearchMatching *string `json:"roleSearchMatching,omitempty" tf:"role_search_matching,omitempty"` + + // Whether the directory search scope is the entire sub-tree. + RoleSearchSubtree *bool `json:"roleSearchSubtree,omitempty" tf:"role_search_subtree,omitempty"` + + // Service account password. + ServiceAccountPasswordSecretRef *v1.SecretKeySelector `json:"serviceAccountPasswordSecretRef,omitempty" tf:"-"` + + // Service account username. + ServiceAccountUsername *string `json:"serviceAccountUsername,omitempty" tf:"service_account_username,omitempty"` + + // Fully qualified name of the directory where you want to search for users. + UserBase *string `json:"userBase,omitempty" tf:"user_base,omitempty"` + + // Specifies the name of the LDAP attribute for the user group membership. + UserRoleName *string `json:"userRoleName,omitempty" tf:"user_role_name,omitempty"` + + // Search criteria for users. + UserSearchMatching *string `json:"userSearchMatching,omitempty" tf:"user_search_matching,omitempty"` + + // Whether the directory search scope is the entire sub-tree. + UserSearchSubtree *bool `json:"userSearchSubtree,omitempty" tf:"user_search_subtree,omitempty"` +} + +type LdapServerMetadataObservation struct { + + // List of a fully qualified domain name of the LDAP server and an optional failover server. + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + + // Fully qualified name of the directory to search for a user’s groups. + RoleBase *string `json:"roleBase,omitempty" tf:"role_base,omitempty"` + + // Specifies the LDAP attribute that identifies the group name attribute in the object returned from the group membership query. + RoleName *string `json:"roleName,omitempty" tf:"role_name,omitempty"` + + // Search criteria for groups. + RoleSearchMatching *string `json:"roleSearchMatching,omitempty" tf:"role_search_matching,omitempty"` + + // Whether the directory search scope is the entire sub-tree. + RoleSearchSubtree *bool `json:"roleSearchSubtree,omitempty" tf:"role_search_subtree,omitempty"` + + // Service account username. + ServiceAccountUsername *string `json:"serviceAccountUsername,omitempty" tf:"service_account_username,omitempty"` + + // Fully qualified name of the directory where you want to search for users. + UserBase *string `json:"userBase,omitempty" tf:"user_base,omitempty"` + + // Specifies the name of the LDAP attribute for the user group membership. + UserRoleName *string `json:"userRoleName,omitempty" tf:"user_role_name,omitempty"` + + // Search criteria for users. + UserSearchMatching *string `json:"userSearchMatching,omitempty" tf:"user_search_matching,omitempty"` + + // Whether the directory search scope is the entire sub-tree. + UserSearchSubtree *bool `json:"userSearchSubtree,omitempty" tf:"user_search_subtree,omitempty"` +} + +type LdapServerMetadataParameters struct { + + // List of a fully qualified domain name of the LDAP server and an optional failover server. + // +kubebuilder:validation:Optional + Hosts []*string `json:"hosts,omitempty" tf:"hosts,omitempty"` + + // Fully qualified name of the directory to search for a user’s groups. + // +kubebuilder:validation:Optional + RoleBase *string `json:"roleBase,omitempty" tf:"role_base,omitempty"` + + // Specifies the LDAP attribute that identifies the group name attribute in the object returned from the group membership query. + // +kubebuilder:validation:Optional + RoleName *string `json:"roleName,omitempty" tf:"role_name,omitempty"` + + // Search criteria for groups. + // +kubebuilder:validation:Optional + RoleSearchMatching *string `json:"roleSearchMatching,omitempty" tf:"role_search_matching,omitempty"` + + // Whether the directory search scope is the entire sub-tree. + // +kubebuilder:validation:Optional + RoleSearchSubtree *bool `json:"roleSearchSubtree,omitempty" tf:"role_search_subtree,omitempty"` + + // Service account password. + // +kubebuilder:validation:Optional + ServiceAccountPasswordSecretRef *v1.SecretKeySelector `json:"serviceAccountPasswordSecretRef,omitempty" tf:"-"` + + // Service account username. + // +kubebuilder:validation:Optional + ServiceAccountUsername *string `json:"serviceAccountUsername,omitempty" tf:"service_account_username,omitempty"` + + // Fully qualified name of the directory where you want to search for users. + // +kubebuilder:validation:Optional + UserBase *string `json:"userBase,omitempty" tf:"user_base,omitempty"` + + // Specifies the name of the LDAP attribute for the user group membership. + // +kubebuilder:validation:Optional + UserRoleName *string `json:"userRoleName,omitempty" tf:"user_role_name,omitempty"` + + // Search criteria for users. + // +kubebuilder:validation:Optional + UserSearchMatching *string `json:"userSearchMatching,omitempty" tf:"user_search_matching,omitempty"` + + // Whether the directory search scope is the entire sub-tree. + // +kubebuilder:validation:Optional + UserSearchSubtree *bool `json:"userSearchSubtree,omitempty" tf:"user_search_subtree,omitempty"` +} + +type LogsInitParameters struct { + + // Enables audit logging. Auditing is only possible for engine_type of ActiveMQ. User management action made using JMX or the ActiveMQ Web Console is logged. Defaults to false. + Audit *string `json:"audit,omitempty" tf:"audit,omitempty"` + + // Enables general logging via CloudWatch. Defaults to false. + General *bool `json:"general,omitempty" tf:"general,omitempty"` +} + +type LogsObservation struct { + + // Enables audit logging. Auditing is only possible for engine_type of ActiveMQ. User management action made using JMX or the ActiveMQ Web Console is logged. Defaults to false. + Audit *string `json:"audit,omitempty" tf:"audit,omitempty"` + + // Enables general logging via CloudWatch. Defaults to false. + General *bool `json:"general,omitempty" tf:"general,omitempty"` +} + +type LogsParameters struct { + + // Enables audit logging. Auditing is only possible for engine_type of ActiveMQ. User management action made using JMX or the ActiveMQ Web Console is logged. Defaults to false. + // +kubebuilder:validation:Optional + Audit *string `json:"audit,omitempty" tf:"audit,omitempty"` + + // Enables general logging via CloudWatch. Defaults to false. + // +kubebuilder:validation:Optional + General *bool `json:"general,omitempty" tf:"general,omitempty"` +} + +type MaintenanceWindowStartTimeInitParameters struct { + + // Day of the week, e.g., MONDAY, TUESDAY, or WEDNESDAY. + DayOfWeek *string `json:"dayOfWeek,omitempty" tf:"day_of_week,omitempty"` + + // Time, in 24-hour format, e.g., 02:00. + TimeOfDay *string `json:"timeOfDay,omitempty" tf:"time_of_day,omitempty"` + + // Time zone in either the Country/City format or the UTC offset format, e.g., CET. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type MaintenanceWindowStartTimeObservation struct { + + // Day of the week, e.g., MONDAY, TUESDAY, or WEDNESDAY. + DayOfWeek *string `json:"dayOfWeek,omitempty" tf:"day_of_week,omitempty"` + + // Time, in 24-hour format, e.g., 02:00. + TimeOfDay *string `json:"timeOfDay,omitempty" tf:"time_of_day,omitempty"` + + // Time zone in either the Country/City format or the UTC offset format, e.g., CET. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type MaintenanceWindowStartTimeParameters struct { + + // Day of the week, e.g., MONDAY, TUESDAY, or WEDNESDAY. + // +kubebuilder:validation:Optional + DayOfWeek *string `json:"dayOfWeek" tf:"day_of_week,omitempty"` + + // Time, in 24-hour format, e.g., 02:00. + // +kubebuilder:validation:Optional + TimeOfDay *string `json:"timeOfDay" tf:"time_of_day,omitempty"` + + // Time zone in either the Country/City format or the UTC offset format, e.g., CET. + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone" tf:"time_zone,omitempty"` +} + +type UserInitParameters struct { + + // Whether to enable access to the ActiveMQ Web Console for the user. Applies to engine_type of ActiveMQ only. + ConsoleAccess *bool `json:"consoleAccess,omitempty" tf:"console_access,omitempty"` + + // List of groups (20 maximum) to which the ActiveMQ user belongs. Applies to engine_type of ActiveMQ only. + // +listType=set + Groups []*string `json:"groups,omitempty" tf:"groups,omitempty"` + + // Password of the user. It must be 12 to 250 characters long, at least 4 unique characters, and must not contain commas. + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // Whether to set set replication user. Defaults to false. + ReplicationUser *bool `json:"replicationUser,omitempty" tf:"replication_user,omitempty"` + + // Username of the user. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type UserObservation struct { + + // Whether to enable access to the ActiveMQ Web Console for the user. Applies to engine_type of ActiveMQ only. + ConsoleAccess *bool `json:"consoleAccess,omitempty" tf:"console_access,omitempty"` + + // List of groups (20 maximum) to which the ActiveMQ user belongs. Applies to engine_type of ActiveMQ only. + // +listType=set + Groups []*string `json:"groups,omitempty" tf:"groups,omitempty"` + + // Whether to set set replication user. Defaults to false. + ReplicationUser *bool `json:"replicationUser,omitempty" tf:"replication_user,omitempty"` + + // Username of the user. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type UserParameters struct { + + // Whether to enable access to the ActiveMQ Web Console for the user. Applies to engine_type of ActiveMQ only. + // +kubebuilder:validation:Optional + ConsoleAccess *bool `json:"consoleAccess,omitempty" tf:"console_access,omitempty"` + + // List of groups (20 maximum) to which the ActiveMQ user belongs. Applies to engine_type of ActiveMQ only. + // +kubebuilder:validation:Optional + // +listType=set + Groups []*string `json:"groups,omitempty" tf:"groups,omitempty"` + + // Password of the user. It must be 12 to 250 characters long, at least 4 unique characters, and must not contain commas. + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // Whether to set set replication user. Defaults to false. + // +kubebuilder:validation:Optional + ReplicationUser *bool `json:"replicationUser,omitempty" tf:"replication_user,omitempty"` + + // Username of the user. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +// BrokerSpec defines the desired state of Broker +type BrokerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BrokerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BrokerInitParameters `json:"initProvider,omitempty"` +} + +// BrokerStatus defines the observed state of Broker. +type BrokerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BrokerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Broker is the Schema for the Brokers API. Provides an MQ Broker Resource +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Broker struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.brokerName) || (has(self.initProvider) && has(self.initProvider.brokerName))",message="spec.forProvider.brokerName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.engineType) || (has(self.initProvider) && has(self.initProvider.engineType))",message="spec.forProvider.engineType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.engineVersion) || (has(self.initProvider) && has(self.initProvider.engineVersion))",message="spec.forProvider.engineVersion is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.hostInstanceType) || (has(self.initProvider) && has(self.initProvider.hostInstanceType))",message="spec.forProvider.hostInstanceType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.user) || (has(self.initProvider) && has(self.initProvider.user))",message="spec.forProvider.user is a required parameter" + Spec BrokerSpec `json:"spec"` + Status BrokerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BrokerList contains a list of Brokers +type BrokerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Broker `json:"items"` +} + +// Repository type metadata. +var ( + Broker_Kind = "Broker" + Broker_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Broker_Kind}.String() + Broker_KindAPIVersion = Broker_Kind + "." + CRDGroupVersion.String() + Broker_GroupVersionKind = CRDGroupVersion.WithKind(Broker_Kind) +) + +func init() { + SchemeBuilder.Register(&Broker{}, &BrokerList{}) +} diff --git a/apis/mq/v1beta2/zz_generated.conversion_hubs.go b/apis/mq/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..a08aa5173d --- /dev/null +++ b/apis/mq/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Broker) Hub() {} diff --git a/apis/mq/v1beta2/zz_generated.deepcopy.go b/apis/mq/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..860965409b --- /dev/null +++ b/apis/mq/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1403 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Broker) DeepCopyInto(out *Broker) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Broker. +func (in *Broker) DeepCopy() *Broker { + if in == nil { + return nil + } + out := new(Broker) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Broker) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerInitParameters) DeepCopyInto(out *BrokerInitParameters) { + *out = *in + if in.ApplyImmediately != nil { + in, out := &in.ApplyImmediately, &out.ApplyImmediately + *out = new(bool) + **out = **in + } + if in.AuthenticationStrategy != nil { + in, out := &in.AuthenticationStrategy, &out.AuthenticationStrategy + *out = new(string) + **out = **in + } + if in.AutoMinorVersionUpgrade != nil { + in, out := &in.AutoMinorVersionUpgrade, &out.AutoMinorVersionUpgrade + *out = new(bool) + **out = **in + } + if in.BrokerName != nil { + in, out := &in.BrokerName, &out.BrokerName + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DataReplicationMode != nil { + in, out := &in.DataReplicationMode, &out.DataReplicationMode + *out = new(string) + **out = **in + } + if in.DataReplicationPrimaryBrokerArn != nil { + in, out := &in.DataReplicationPrimaryBrokerArn, &out.DataReplicationPrimaryBrokerArn + *out = new(string) + **out = **in + } + if in.DataReplicationPrimaryBrokerArnRef != nil { + in, out := &in.DataReplicationPrimaryBrokerArnRef, &out.DataReplicationPrimaryBrokerArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataReplicationPrimaryBrokerArnSelector != nil { + in, out := &in.DataReplicationPrimaryBrokerArnSelector, &out.DataReplicationPrimaryBrokerArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DeploymentMode != nil { + in, out := &in.DeploymentMode, &out.DeploymentMode + *out = new(string) + **out = **in + } + if in.EncryptionOptions != nil { + in, out := &in.EncryptionOptions, &out.EncryptionOptions + *out = new(EncryptionOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EngineType != nil { + in, out := &in.EngineType, &out.EngineType + *out = new(string) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.HostInstanceType != nil { + in, out := &in.HostInstanceType, &out.HostInstanceType + *out = new(string) + **out = **in + } + if in.LdapServerMetadata != nil { + in, out := &in.LdapServerMetadata, &out.LdapServerMetadata + *out = new(LdapServerMetadataInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(LogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceWindowStartTime != nil { + in, out := &in.MaintenanceWindowStartTime, &out.MaintenanceWindowStartTime + *out = new(MaintenanceWindowStartTimeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PubliclyAccessible != nil { + in, out := &in.PubliclyAccessible, &out.PubliclyAccessible + *out = new(bool) + **out = **in + } + if in.SecurityGroupRefs != nil { + in, out := &in.SecurityGroupRefs, &out.SecurityGroupRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupSelector != nil { + in, out := &in.SecurityGroupSelector, &out.SecurityGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]UserInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerInitParameters. +func (in *BrokerInitParameters) DeepCopy() *BrokerInitParameters { + if in == nil { + return nil + } + out := new(BrokerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerList) DeepCopyInto(out *BrokerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Broker, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerList. +func (in *BrokerList) DeepCopy() *BrokerList { + if in == nil { + return nil + } + out := new(BrokerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BrokerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerObservation) DeepCopyInto(out *BrokerObservation) { + *out = *in + if in.ApplyImmediately != nil { + in, out := &in.ApplyImmediately, &out.ApplyImmediately + *out = new(bool) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AuthenticationStrategy != nil { + in, out := &in.AuthenticationStrategy, &out.AuthenticationStrategy + *out = new(string) + **out = **in + } + if in.AutoMinorVersionUpgrade != nil { + in, out := &in.AutoMinorVersionUpgrade, &out.AutoMinorVersionUpgrade + *out = new(bool) + **out = **in + } + if in.BrokerName != nil { + in, out := &in.BrokerName, &out.BrokerName + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.DataReplicationMode != nil { + in, out := &in.DataReplicationMode, &out.DataReplicationMode + *out = new(string) + **out = **in + } + if in.DataReplicationPrimaryBrokerArn != nil { + in, out := &in.DataReplicationPrimaryBrokerArn, &out.DataReplicationPrimaryBrokerArn + *out = new(string) + **out = **in + } + if in.DeploymentMode != nil { + in, out := &in.DeploymentMode, &out.DeploymentMode + *out = new(string) + **out = **in + } + if in.EncryptionOptions != nil { + in, out := &in.EncryptionOptions, &out.EncryptionOptions + *out = new(EncryptionOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.EngineType != nil { + in, out := &in.EngineType, &out.EngineType + *out = new(string) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.HostInstanceType != nil { + in, out := &in.HostInstanceType, &out.HostInstanceType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = make([]InstancesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LdapServerMetadata != nil { + in, out := &in.LdapServerMetadata, &out.LdapServerMetadata + *out = new(LdapServerMetadataObservation) + (*in).DeepCopyInto(*out) + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(LogsObservation) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceWindowStartTime != nil { + in, out := &in.MaintenanceWindowStartTime, &out.MaintenanceWindowStartTime + *out = new(MaintenanceWindowStartTimeObservation) + (*in).DeepCopyInto(*out) + } + if in.PendingDataReplicationMode != nil { + in, out := &in.PendingDataReplicationMode, &out.PendingDataReplicationMode + *out = new(string) + **out = **in + } + if in.PubliclyAccessible != nil { + in, out := &in.PubliclyAccessible, &out.PubliclyAccessible + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]UserObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerObservation. +func (in *BrokerObservation) DeepCopy() *BrokerObservation { + if in == nil { + return nil + } + out := new(BrokerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerParameters) DeepCopyInto(out *BrokerParameters) { + *out = *in + if in.ApplyImmediately != nil { + in, out := &in.ApplyImmediately, &out.ApplyImmediately + *out = new(bool) + **out = **in + } + if in.AuthenticationStrategy != nil { + in, out := &in.AuthenticationStrategy, &out.AuthenticationStrategy + *out = new(string) + **out = **in + } + if in.AutoMinorVersionUpgrade != nil { + in, out := &in.AutoMinorVersionUpgrade, &out.AutoMinorVersionUpgrade + *out = new(bool) + **out = **in + } + if in.BrokerName != nil { + in, out := &in.BrokerName, &out.BrokerName + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.DataReplicationMode != nil { + in, out := &in.DataReplicationMode, &out.DataReplicationMode + *out = new(string) + **out = **in + } + if in.DataReplicationPrimaryBrokerArn != nil { + in, out := &in.DataReplicationPrimaryBrokerArn, &out.DataReplicationPrimaryBrokerArn + *out = new(string) + **out = **in + } + if in.DataReplicationPrimaryBrokerArnRef != nil { + in, out := &in.DataReplicationPrimaryBrokerArnRef, &out.DataReplicationPrimaryBrokerArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataReplicationPrimaryBrokerArnSelector != nil { + in, out := &in.DataReplicationPrimaryBrokerArnSelector, &out.DataReplicationPrimaryBrokerArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DeploymentMode != nil { + in, out := &in.DeploymentMode, &out.DeploymentMode + *out = new(string) + **out = **in + } + if in.EncryptionOptions != nil { + in, out := &in.EncryptionOptions, &out.EncryptionOptions + *out = new(EncryptionOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.EngineType != nil { + in, out := &in.EngineType, &out.EngineType + *out = new(string) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.HostInstanceType != nil { + in, out := &in.HostInstanceType, &out.HostInstanceType + *out = new(string) + **out = **in + } + if in.LdapServerMetadata != nil { + in, out := &in.LdapServerMetadata, &out.LdapServerMetadata + *out = new(LdapServerMetadataParameters) + (*in).DeepCopyInto(*out) + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(LogsParameters) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceWindowStartTime != nil { + in, out := &in.MaintenanceWindowStartTime, &out.MaintenanceWindowStartTime + *out = new(MaintenanceWindowStartTimeParameters) + (*in).DeepCopyInto(*out) + } + if in.PubliclyAccessible != nil { + in, out := &in.PubliclyAccessible, &out.PubliclyAccessible + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SecurityGroupRefs != nil { + in, out := &in.SecurityGroupRefs, &out.SecurityGroupRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupSelector != nil { + in, out := &in.SecurityGroupSelector, &out.SecurityGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = make([]UserParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerParameters. +func (in *BrokerParameters) DeepCopy() *BrokerParameters { + if in == nil { + return nil + } + out := new(BrokerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerSpec) DeepCopyInto(out *BrokerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerSpec. +func (in *BrokerSpec) DeepCopy() *BrokerSpec { + if in == nil { + return nil + } + out := new(BrokerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerStatus) DeepCopyInto(out *BrokerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerStatus. +func (in *BrokerStatus) DeepCopy() *BrokerStatus { + if in == nil { + return nil + } + out := new(BrokerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationInitParameters) DeepCopyInto(out *ConfigurationInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationInitParameters. +func (in *ConfigurationInitParameters) DeepCopy() *ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationObservation) DeepCopyInto(out *ConfigurationObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationObservation. +func (in *ConfigurationObservation) DeepCopy() *ConfigurationObservation { + if in == nil { + return nil + } + out := new(ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationParameters) DeepCopyInto(out *ConfigurationParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationParameters. +func (in *ConfigurationParameters) DeepCopy() *ConfigurationParameters { + if in == nil { + return nil + } + out := new(ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionOptionsInitParameters) DeepCopyInto(out *EncryptionOptionsInitParameters) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.UseAwsOwnedKey != nil { + in, out := &in.UseAwsOwnedKey, &out.UseAwsOwnedKey + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionOptionsInitParameters. +func (in *EncryptionOptionsInitParameters) DeepCopy() *EncryptionOptionsInitParameters { + if in == nil { + return nil + } + out := new(EncryptionOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionOptionsObservation) DeepCopyInto(out *EncryptionOptionsObservation) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.UseAwsOwnedKey != nil { + in, out := &in.UseAwsOwnedKey, &out.UseAwsOwnedKey + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionOptionsObservation. +func (in *EncryptionOptionsObservation) DeepCopy() *EncryptionOptionsObservation { + if in == nil { + return nil + } + out := new(EncryptionOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionOptionsParameters) DeepCopyInto(out *EncryptionOptionsParameters) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.UseAwsOwnedKey != nil { + in, out := &in.UseAwsOwnedKey, &out.UseAwsOwnedKey + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionOptionsParameters. +func (in *EncryptionOptionsParameters) DeepCopy() *EncryptionOptionsParameters { + if in == nil { + return nil + } + out := new(EncryptionOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancesInitParameters) DeepCopyInto(out *InstancesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesInitParameters. +func (in *InstancesInitParameters) DeepCopy() *InstancesInitParameters { + if in == nil { + return nil + } + out := new(InstancesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancesObservation) DeepCopyInto(out *InstancesObservation) { + *out = *in + if in.ConsoleURL != nil { + in, out := &in.ConsoleURL, &out.ConsoleURL + *out = new(string) + **out = **in + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesObservation. +func (in *InstancesObservation) DeepCopy() *InstancesObservation { + if in == nil { + return nil + } + out := new(InstancesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancesParameters) DeepCopyInto(out *InstancesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancesParameters. +func (in *InstancesParameters) DeepCopy() *InstancesParameters { + if in == nil { + return nil + } + out := new(InstancesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LdapServerMetadataInitParameters) DeepCopyInto(out *LdapServerMetadataInitParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RoleBase != nil { + in, out := &in.RoleBase, &out.RoleBase + *out = new(string) + **out = **in + } + if in.RoleName != nil { + in, out := &in.RoleName, &out.RoleName + *out = new(string) + **out = **in + } + if in.RoleSearchMatching != nil { + in, out := &in.RoleSearchMatching, &out.RoleSearchMatching + *out = new(string) + **out = **in + } + if in.RoleSearchSubtree != nil { + in, out := &in.RoleSearchSubtree, &out.RoleSearchSubtree + *out = new(bool) + **out = **in + } + if in.ServiceAccountPasswordSecretRef != nil { + in, out := &in.ServiceAccountPasswordSecretRef, &out.ServiceAccountPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ServiceAccountUsername != nil { + in, out := &in.ServiceAccountUsername, &out.ServiceAccountUsername + *out = new(string) + **out = **in + } + if in.UserBase != nil { + in, out := &in.UserBase, &out.UserBase + *out = new(string) + **out = **in + } + if in.UserRoleName != nil { + in, out := &in.UserRoleName, &out.UserRoleName + *out = new(string) + **out = **in + } + if in.UserSearchMatching != nil { + in, out := &in.UserSearchMatching, &out.UserSearchMatching + *out = new(string) + **out = **in + } + if in.UserSearchSubtree != nil { + in, out := &in.UserSearchSubtree, &out.UserSearchSubtree + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LdapServerMetadataInitParameters. +func (in *LdapServerMetadataInitParameters) DeepCopy() *LdapServerMetadataInitParameters { + if in == nil { + return nil + } + out := new(LdapServerMetadataInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LdapServerMetadataObservation) DeepCopyInto(out *LdapServerMetadataObservation) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RoleBase != nil { + in, out := &in.RoleBase, &out.RoleBase + *out = new(string) + **out = **in + } + if in.RoleName != nil { + in, out := &in.RoleName, &out.RoleName + *out = new(string) + **out = **in + } + if in.RoleSearchMatching != nil { + in, out := &in.RoleSearchMatching, &out.RoleSearchMatching + *out = new(string) + **out = **in + } + if in.RoleSearchSubtree != nil { + in, out := &in.RoleSearchSubtree, &out.RoleSearchSubtree + *out = new(bool) + **out = **in + } + if in.ServiceAccountUsername != nil { + in, out := &in.ServiceAccountUsername, &out.ServiceAccountUsername + *out = new(string) + **out = **in + } + if in.UserBase != nil { + in, out := &in.UserBase, &out.UserBase + *out = new(string) + **out = **in + } + if in.UserRoleName != nil { + in, out := &in.UserRoleName, &out.UserRoleName + *out = new(string) + **out = **in + } + if in.UserSearchMatching != nil { + in, out := &in.UserSearchMatching, &out.UserSearchMatching + *out = new(string) + **out = **in + } + if in.UserSearchSubtree != nil { + in, out := &in.UserSearchSubtree, &out.UserSearchSubtree + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LdapServerMetadataObservation. +func (in *LdapServerMetadataObservation) DeepCopy() *LdapServerMetadataObservation { + if in == nil { + return nil + } + out := new(LdapServerMetadataObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LdapServerMetadataParameters) DeepCopyInto(out *LdapServerMetadataParameters) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RoleBase != nil { + in, out := &in.RoleBase, &out.RoleBase + *out = new(string) + **out = **in + } + if in.RoleName != nil { + in, out := &in.RoleName, &out.RoleName + *out = new(string) + **out = **in + } + if in.RoleSearchMatching != nil { + in, out := &in.RoleSearchMatching, &out.RoleSearchMatching + *out = new(string) + **out = **in + } + if in.RoleSearchSubtree != nil { + in, out := &in.RoleSearchSubtree, &out.RoleSearchSubtree + *out = new(bool) + **out = **in + } + if in.ServiceAccountPasswordSecretRef != nil { + in, out := &in.ServiceAccountPasswordSecretRef, &out.ServiceAccountPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ServiceAccountUsername != nil { + in, out := &in.ServiceAccountUsername, &out.ServiceAccountUsername + *out = new(string) + **out = **in + } + if in.UserBase != nil { + in, out := &in.UserBase, &out.UserBase + *out = new(string) + **out = **in + } + if in.UserRoleName != nil { + in, out := &in.UserRoleName, &out.UserRoleName + *out = new(string) + **out = **in + } + if in.UserSearchMatching != nil { + in, out := &in.UserSearchMatching, &out.UserSearchMatching + *out = new(string) + **out = **in + } + if in.UserSearchSubtree != nil { + in, out := &in.UserSearchSubtree, &out.UserSearchSubtree + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LdapServerMetadataParameters. +func (in *LdapServerMetadataParameters) DeepCopy() *LdapServerMetadataParameters { + if in == nil { + return nil + } + out := new(LdapServerMetadataParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsInitParameters) DeepCopyInto(out *LogsInitParameters) { + *out = *in + if in.Audit != nil { + in, out := &in.Audit, &out.Audit + *out = new(string) + **out = **in + } + if in.General != nil { + in, out := &in.General, &out.General + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsInitParameters. +func (in *LogsInitParameters) DeepCopy() *LogsInitParameters { + if in == nil { + return nil + } + out := new(LogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsObservation) DeepCopyInto(out *LogsObservation) { + *out = *in + if in.Audit != nil { + in, out := &in.Audit, &out.Audit + *out = new(string) + **out = **in + } + if in.General != nil { + in, out := &in.General, &out.General + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsObservation. +func (in *LogsObservation) DeepCopy() *LogsObservation { + if in == nil { + return nil + } + out := new(LogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsParameters) DeepCopyInto(out *LogsParameters) { + *out = *in + if in.Audit != nil { + in, out := &in.Audit, &out.Audit + *out = new(string) + **out = **in + } + if in.General != nil { + in, out := &in.General, &out.General + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsParameters. +func (in *LogsParameters) DeepCopy() *LogsParameters { + if in == nil { + return nil + } + out := new(LogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowStartTimeInitParameters) DeepCopyInto(out *MaintenanceWindowStartTimeInitParameters) { + *out = *in + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(string) + **out = **in + } + if in.TimeOfDay != nil { + in, out := &in.TimeOfDay, &out.TimeOfDay + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowStartTimeInitParameters. +func (in *MaintenanceWindowStartTimeInitParameters) DeepCopy() *MaintenanceWindowStartTimeInitParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowStartTimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowStartTimeObservation) DeepCopyInto(out *MaintenanceWindowStartTimeObservation) { + *out = *in + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(string) + **out = **in + } + if in.TimeOfDay != nil { + in, out := &in.TimeOfDay, &out.TimeOfDay + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowStartTimeObservation. +func (in *MaintenanceWindowStartTimeObservation) DeepCopy() *MaintenanceWindowStartTimeObservation { + if in == nil { + return nil + } + out := new(MaintenanceWindowStartTimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowStartTimeParameters) DeepCopyInto(out *MaintenanceWindowStartTimeParameters) { + *out = *in + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(string) + **out = **in + } + if in.TimeOfDay != nil { + in, out := &in.TimeOfDay, &out.TimeOfDay + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowStartTimeParameters. +func (in *MaintenanceWindowStartTimeParameters) DeepCopy() *MaintenanceWindowStartTimeParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowStartTimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserInitParameters) DeepCopyInto(out *UserInitParameters) { + *out = *in + if in.ConsoleAccess != nil { + in, out := &in.ConsoleAccess, &out.ConsoleAccess + *out = new(bool) + **out = **in + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.ReplicationUser != nil { + in, out := &in.ReplicationUser, &out.ReplicationUser + *out = new(bool) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInitParameters. +func (in *UserInitParameters) DeepCopy() *UserInitParameters { + if in == nil { + return nil + } + out := new(UserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserObservation) DeepCopyInto(out *UserObservation) { + *out = *in + if in.ConsoleAccess != nil { + in, out := &in.ConsoleAccess, &out.ConsoleAccess + *out = new(bool) + **out = **in + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ReplicationUser != nil { + in, out := &in.ReplicationUser, &out.ReplicationUser + *out = new(bool) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserObservation. +func (in *UserObservation) DeepCopy() *UserObservation { + if in == nil { + return nil + } + out := new(UserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserParameters) DeepCopyInto(out *UserParameters) { + *out = *in + if in.ConsoleAccess != nil { + in, out := &in.ConsoleAccess, &out.ConsoleAccess + *out = new(bool) + **out = **in + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.ReplicationUser != nil { + in, out := &in.ReplicationUser, &out.ReplicationUser + *out = new(bool) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserParameters. +func (in *UserParameters) DeepCopy() *UserParameters { + if in == nil { + return nil + } + out := new(UserParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/mq/v1beta2/zz_generated.managed.go b/apis/mq/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..c40d56705b --- /dev/null +++ b/apis/mq/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Broker. +func (mg *Broker) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Broker. +func (mg *Broker) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Broker. +func (mg *Broker) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Broker. +func (mg *Broker) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Broker. +func (mg *Broker) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Broker. +func (mg *Broker) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Broker. +func (mg *Broker) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Broker. +func (mg *Broker) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Broker. +func (mg *Broker) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Broker. +func (mg *Broker) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Broker. +func (mg *Broker) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Broker. +func (mg *Broker) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/mq/v1beta2/zz_generated.managedlist.go b/apis/mq/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..a6d054b6a3 --- /dev/null +++ b/apis/mq/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this BrokerList. +func (l *BrokerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/mq/v1beta2/zz_generated.resolvers.go b/apis/mq/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..cc27232949 --- /dev/null +++ b/apis/mq/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Broker. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Broker) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + if mg.Spec.ForProvider.Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("mq.aws.upbound.io", "v1beta1", "Configuration", "ConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Configuration.ID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Configuration.IDRef, + Selector: mg.Spec.ForProvider.Configuration.IDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Configuration.ID") + } + mg.Spec.ForProvider.Configuration.ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Configuration.IDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("mq.aws.upbound.io", "v1beta2", "Broker", "BrokerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataReplicationPrimaryBrokerArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.DataReplicationPrimaryBrokerArnRef, + Selector: mg.Spec.ForProvider.DataReplicationPrimaryBrokerArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataReplicationPrimaryBrokerArn") + } + mg.Spec.ForProvider.DataReplicationPrimaryBrokerArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataReplicationPrimaryBrokerArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SecurityGroupRefs, + Selector: mg.Spec.ForProvider.SecurityGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityGroups") + } + mg.Spec.ForProvider.SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SecurityGroupRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SubnetIDRefs, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetIds") + } + mg.Spec.ForProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SubnetIDRefs = mrsp.ResolvedReferences + + if mg.Spec.InitProvider.Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("mq.aws.upbound.io", "v1beta1", "Configuration", "ConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Configuration.ID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Configuration.IDRef, + Selector: mg.Spec.InitProvider.Configuration.IDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Configuration.ID") + } + mg.Spec.InitProvider.Configuration.ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Configuration.IDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("mq.aws.upbound.io", "v1beta2", "Broker", "BrokerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DataReplicationPrimaryBrokerArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.DataReplicationPrimaryBrokerArnRef, + Selector: mg.Spec.InitProvider.DataReplicationPrimaryBrokerArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DataReplicationPrimaryBrokerArn") + } + mg.Spec.InitProvider.DataReplicationPrimaryBrokerArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DataReplicationPrimaryBrokerArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SecurityGroups), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SecurityGroupRefs, + Selector: mg.Spec.InitProvider.SecurityGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityGroups") + } + mg.Spec.InitProvider.SecurityGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SecurityGroupRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SubnetIDRefs, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetIds") + } + mg.Spec.InitProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SubnetIDRefs = mrsp.ResolvedReferences + + return nil +} diff --git a/apis/mq/v1beta2/zz_groupversion_info.go b/apis/mq/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..2db14e134f --- /dev/null +++ b/apis/mq/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=mq.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "mq.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/neptune/v1beta1/zz_clusterendpoint_types.go b/apis/neptune/v1beta1/zz_clusterendpoint_types.go index dfba206c33..a49213d176 100755 --- a/apis/neptune/v1beta1/zz_clusterendpoint_types.go +++ b/apis/neptune/v1beta1/zz_clusterendpoint_types.go @@ -16,7 +16,7 @@ import ( type ClusterEndpointInitParameters struct { // The DB cluster identifier of the DB cluster associated with the endpoint. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta2.Cluster ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` // Reference to a Cluster in neptune to populate clusterIdentifier. @@ -80,7 +80,7 @@ type ClusterEndpointObservation struct { type ClusterEndpointParameters struct { // The DB cluster identifier of the DB cluster associated with the endpoint. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta2.Cluster // +kubebuilder:validation:Optional ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` diff --git a/apis/neptune/v1beta1/zz_clusterinstance_types.go b/apis/neptune/v1beta1/zz_clusterinstance_types.go index c38da8210c..0021d998d1 100755 --- a/apis/neptune/v1beta1/zz_clusterinstance_types.go +++ b/apis/neptune/v1beta1/zz_clusterinstance_types.go @@ -26,7 +26,7 @@ type ClusterInstanceInitParameters struct { AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` // The identifier of the aws_neptune_cluster in which to launch this instance. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta2.Cluster ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` // Reference to a Cluster in neptune to populate clusterIdentifier. @@ -195,7 +195,7 @@ type ClusterInstanceParameters struct { AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` // The identifier of the aws_neptune_cluster in which to launch this instance. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta2.Cluster // +kubebuilder:validation:Optional ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` diff --git a/apis/neptune/v1beta1/zz_clustersnapshot_types.go b/apis/neptune/v1beta1/zz_clustersnapshot_types.go index 5a0bb2fe08..bde8203ae8 100755 --- a/apis/neptune/v1beta1/zz_clustersnapshot_types.go +++ b/apis/neptune/v1beta1/zz_clustersnapshot_types.go @@ -16,7 +16,7 @@ import ( type ClusterSnapshotInitParameters struct { // The DB Cluster Identifier from which to take the snapshot. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta2.Cluster DBClusterIdentifier *string `json:"dbClusterIdentifier,omitempty" tf:"db_cluster_identifier,omitempty"` // Reference to a Cluster in neptune to populate dbClusterIdentifier. @@ -77,7 +77,7 @@ type ClusterSnapshotObservation struct { type ClusterSnapshotParameters struct { // The DB Cluster Identifier from which to take the snapshot. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta2.Cluster // +kubebuilder:validation:Optional DBClusterIdentifier *string `json:"dbClusterIdentifier,omitempty" tf:"db_cluster_identifier,omitempty"` diff --git a/apis/neptune/v1beta1/zz_generated.conversion_hubs.go b/apis/neptune/v1beta1/zz_generated.conversion_hubs.go index 0e1ce941e9..7a6792fee8 100755 --- a/apis/neptune/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/neptune/v1beta1/zz_generated.conversion_hubs.go @@ -6,9 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Cluster) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ClusterEndpoint) Hub() {} diff --git a/apis/neptune/v1beta1/zz_generated.conversion_spokes.go b/apis/neptune/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..49d5c675ce --- /dev/null +++ b/apis/neptune/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Cluster to the hub type. +func (tr *Cluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Cluster type. +func (tr *Cluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/neptune/v1beta1/zz_generated.resolvers.go b/apis/neptune/v1beta1/zz_generated.resolvers.go index 3f1b512035..8c9a23328a 100644 --- a/apis/neptune/v1beta1/zz_generated.resolvers.go +++ b/apis/neptune/v1beta1/zz_generated.resolvers.go @@ -306,7 +306,7 @@ func (mg *ClusterEndpoint) ResolveReferences(ctx context.Context, c client.Reade var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -325,7 +325,7 @@ func (mg *ClusterEndpoint) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.ForProvider.ClusterIdentifier = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ClusterIdentifierRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -356,7 +356,7 @@ func (mg *ClusterInstance) ResolveReferences(ctx context.Context, c client.Reade var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -413,7 +413,7 @@ func (mg *ClusterInstance) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.ForProvider.NeptuneSubnetGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.NeptuneSubnetGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -482,7 +482,7 @@ func (mg *ClusterSnapshot) ResolveReferences(ctx context.Context, c client.Reade var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -501,7 +501,7 @@ func (mg *ClusterSnapshot) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.ForProvider.DBClusterIdentifier = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DBClusterIdentifierRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -582,7 +582,7 @@ func (mg *GlobalCluster) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -601,7 +601,7 @@ func (mg *GlobalCluster) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.SourceDBClusterIdentifier = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SourceDBClusterIdentifierRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/neptune/v1beta1/zz_globalcluster_types.go b/apis/neptune/v1beta1/zz_globalcluster_types.go index 602db31f23..c0febbed67 100755 --- a/apis/neptune/v1beta1/zz_globalcluster_types.go +++ b/apis/neptune/v1beta1/zz_globalcluster_types.go @@ -25,7 +25,7 @@ type GlobalClusterInitParameters struct { EngineVersion *string `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` // Amazon Resource Name (ARN) to use as the primary DB Cluster of the Global Cluster on creation. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) SourceDBClusterIdentifier *string `json:"sourceDbClusterIdentifier,omitempty" tf:"source_db_cluster_identifier,omitempty"` @@ -108,7 +108,7 @@ type GlobalClusterParameters struct { Region *string `json:"region" tf:"-"` // Amazon Resource Name (ARN) to use as the primary DB Cluster of the Global Cluster on creation. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional SourceDBClusterIdentifier *string `json:"sourceDbClusterIdentifier,omitempty" tf:"source_db_cluster_identifier,omitempty"` diff --git a/apis/neptune/v1beta2/zz_cluster_terraformed.go b/apis/neptune/v1beta2/zz_cluster_terraformed.go new file mode 100755 index 0000000000..d4fb893de4 --- /dev/null +++ b/apis/neptune/v1beta2/zz_cluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Cluster +func (mg *Cluster) GetTerraformResourceType() string { + return "aws_neptune_cluster" +} + +// GetConnectionDetailsMapping for this Cluster +func (tr *Cluster) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Cluster +func (tr *Cluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Cluster +func (tr *Cluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Cluster +func (tr *Cluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Cluster +func (tr *Cluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Cluster +func (tr *Cluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Cluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Cluster) LateInitialize(attrs []byte) (bool, error) { + params := &ClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Cluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/neptune/v1beta2/zz_cluster_types.go b/apis/neptune/v1beta2/zz_cluster_types.go new file mode 100755 index 0000000000..639856df8d --- /dev/null +++ b/apis/neptune/v1beta2/zz_cluster_types.go @@ -0,0 +1,572 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ClusterInitParameters struct { + + // Specifies whether upgrades between different major versions are allowed. You must set it to true when providing an engine_version parameter that uses a different major version than the DB cluster's current version. Default is false. + AllowMajorVersionUpgrade *bool `json:"allowMajorVersionUpgrade,omitempty" tf:"allow_major_version_upgrade,omitempty"` + + // Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is false. + ApplyImmediately *bool `json:"applyImmediately,omitempty" tf:"apply_immediately,omitempty"` + + // A list of EC2 Availability Zones that instances in the Neptune cluster can be created in. + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // The days to retain backups for. Default 1 + BackupRetentionPeriod *float64 `json:"backupRetentionPeriod,omitempty" tf:"backup_retention_period,omitempty"` + + // If set to true, tags are copied to any snapshot of the DB cluster that is created. + CopyTagsToSnapshot *bool `json:"copyTagsToSnapshot,omitempty" tf:"copy_tags_to_snapshot,omitempty"` + + // A value that indicates whether the DB cluster has deletion protection enabled.The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // A list of the log types this DB cluster is configured to export to Cloudwatch Logs. Currently only supports audit and slowquery. + // +listType=set + EnableCloudwatchLogsExports []*string `json:"enableCloudwatchLogsExports,omitempty" tf:"enable_cloudwatch_logs_exports,omitempty"` + + // The name of the database engine to be used for this Neptune cluster. Defaults to neptune. + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // The database engine version. + EngineVersion *string `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + // The name of your final Neptune snapshot when this Neptune cluster is deleted. If omitted, no final snapshot will be made. + FinalSnapshotIdentifier *string `json:"finalSnapshotIdentifier,omitempty" tf:"final_snapshot_identifier,omitempty"` + + // The global cluster identifier specified on aws_neptune_global_cluster. + GlobalClusterIdentifier *string `json:"globalClusterIdentifier,omitempty" tf:"global_cluster_identifier,omitempty"` + + // Specifies whether or not mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled. + IAMDatabaseAuthenticationEnabled *bool `json:"iamDatabaseAuthenticationEnabled,omitempty" tf:"iam_database_authentication_enabled,omitempty"` + + // References to Role in iam to populate iamRoles. + // +kubebuilder:validation:Optional + IAMRoleRefs []v1.Reference `json:"iamRoleRefs,omitempty" tf:"-"` + + // Selector for a list of Role in iam to populate iamRoles. + // +kubebuilder:validation:Optional + IAMRoleSelector *v1.Selector `json:"iamRoleSelector,omitempty" tf:"-"` + + // A List of ARNs for the IAM roles to associate to the Neptune Cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:refFieldName=IAMRoleRefs + // +crossplane:generate:reference:selectorFieldName=IAMRoleSelector + // +listType=set + IAMRoles []*string `json:"iamRoles,omitempty" tf:"iam_roles,omitempty"` + + // The ARN for the KMS encryption key. When specifying kms_key_arn, storage_encrypted needs to be set to true. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` + + // A cluster parameter group to associate with the cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta1.ClusterParameterGroup + NeptuneClusterParameterGroupName *string `json:"neptuneClusterParameterGroupName,omitempty" tf:"neptune_cluster_parameter_group_name,omitempty"` + + // Reference to a ClusterParameterGroup in neptune to populate neptuneClusterParameterGroupName. + // +kubebuilder:validation:Optional + NeptuneClusterParameterGroupNameRef *v1.Reference `json:"neptuneClusterParameterGroupNameRef,omitempty" tf:"-"` + + // Selector for a ClusterParameterGroup in neptune to populate neptuneClusterParameterGroupName. + // +kubebuilder:validation:Optional + NeptuneClusterParameterGroupNameSelector *v1.Selector `json:"neptuneClusterParameterGroupNameSelector,omitempty" tf:"-"` + + // The name of the DB parameter group to apply to all instances of the DB cluster. + NeptuneInstanceParameterGroupName *string `json:"neptuneInstanceParameterGroupName,omitempty" tf:"neptune_instance_parameter_group_name,omitempty"` + + // A Neptune subnet group to associate with this Neptune instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta1.SubnetGroup + NeptuneSubnetGroupName *string `json:"neptuneSubnetGroupName,omitempty" tf:"neptune_subnet_group_name,omitempty"` + + // Reference to a SubnetGroup in neptune to populate neptuneSubnetGroupName. + // +kubebuilder:validation:Optional + NeptuneSubnetGroupNameRef *v1.Reference `json:"neptuneSubnetGroupNameRef,omitempty" tf:"-"` + + // Selector for a SubnetGroup in neptune to populate neptuneSubnetGroupName. + // +kubebuilder:validation:Optional + NeptuneSubnetGroupNameSelector *v1.Selector `json:"neptuneSubnetGroupNameSelector,omitempty" tf:"-"` + + // The port on which the Neptune accepts connections. Default is 8182. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter. Time in UTC. Default: A 30-minute window selected at random from an 8-hour block of time per regionE.g., 04:00-09:00 + PreferredBackupWindow *string `json:"preferredBackupWindow,omitempty" tf:"preferred_backup_window,omitempty"` + + // The weekly time range during which system maintenance can occur, in (UTC) e.g., wed:04:00-wed:04:30 + PreferredMaintenanceWindow *string `json:"preferredMaintenanceWindow,omitempty" tf:"preferred_maintenance_window,omitempty"` + + // ARN of a source Neptune cluster or Neptune instance if this Neptune cluster is to be created as a Read Replica. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta2.Cluster + ReplicationSourceIdentifier *string `json:"replicationSourceIdentifier,omitempty" tf:"replication_source_identifier,omitempty"` + + // Reference to a Cluster in neptune to populate replicationSourceIdentifier. + // +kubebuilder:validation:Optional + ReplicationSourceIdentifierRef *v1.Reference `json:"replicationSourceIdentifierRef,omitempty" tf:"-"` + + // Selector for a Cluster in neptune to populate replicationSourceIdentifier. + // +kubebuilder:validation:Optional + ReplicationSourceIdentifierSelector *v1.Selector `json:"replicationSourceIdentifierSelector,omitempty" tf:"-"` + + // If set, create the Neptune cluster as a serverless one. See Serverless for example block attributes. + ServerlessV2ScalingConfiguration *ServerlessV2ScalingConfigurationInitParameters `json:"serverlessV2ScalingConfiguration,omitempty" tf:"serverless_v2_scaling_configuration,omitempty"` + + // Determines whether a final Neptune snapshot is created before the Neptune cluster is deleted. If true is specified, no Neptune snapshot is created. If false is specified, a Neptune snapshot is created before the Neptune cluster is deleted, using the value from final_snapshot_identifier. Default is false. + SkipFinalSnapshot *bool `json:"skipFinalSnapshot,omitempty" tf:"skip_final_snapshot,omitempty"` + + // Specifies whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a Neptune cluster snapshot, or the ARN when specifying a Neptune snapshot. Automated snapshots should not be used for this attribute, unless from a different cluster. Automated snapshots are deleted as part of cluster destruction when the resource is replaced. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta1.ClusterSnapshot + SnapshotIdentifier *string `json:"snapshotIdentifier,omitempty" tf:"snapshot_identifier,omitempty"` + + // Reference to a ClusterSnapshot in neptune to populate snapshotIdentifier. + // +kubebuilder:validation:Optional + SnapshotIdentifierRef *v1.Reference `json:"snapshotIdentifierRef,omitempty" tf:"-"` + + // Selector for a ClusterSnapshot in neptune to populate snapshotIdentifier. + // +kubebuilder:validation:Optional + SnapshotIdentifierSelector *v1.Selector `json:"snapshotIdentifierSelector,omitempty" tf:"-"` + + // Specifies whether the Neptune cluster is encrypted. The default is false if not specified. + StorageEncrypted *bool `json:"storageEncrypted,omitempty" tf:"storage_encrypted,omitempty"` + + // Storage type associated with the cluster standard/iopt1. Default: standard + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDRefs []v1.Reference `json:"vpcSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDSelector *v1.Selector `json:"vpcSecurityGroupIdSelector,omitempty" tf:"-"` + + // List of VPC security groups to associate with the Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=VPCSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=VPCSecurityGroupIDSelector + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` +} + +type ClusterObservation struct { + + // Specifies whether upgrades between different major versions are allowed. You must set it to true when providing an engine_version parameter that uses a different major version than the DB cluster's current version. Default is false. + AllowMajorVersionUpgrade *bool `json:"allowMajorVersionUpgrade,omitempty" tf:"allow_major_version_upgrade,omitempty"` + + // Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is false. + ApplyImmediately *bool `json:"applyImmediately,omitempty" tf:"apply_immediately,omitempty"` + + // The Neptune Cluster Amazon Resource Name (ARN) + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A list of EC2 Availability Zones that instances in the Neptune cluster can be created in. + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // The days to retain backups for. Default 1 + BackupRetentionPeriod *float64 `json:"backupRetentionPeriod,omitempty" tf:"backup_retention_period,omitempty"` + + // – List of Neptune Instances that are a part of this cluster + // +listType=set + ClusterMembers []*string `json:"clusterMembers,omitempty" tf:"cluster_members,omitempty"` + + // The Neptune Cluster Resource ID + ClusterResourceID *string `json:"clusterResourceId,omitempty" tf:"cluster_resource_id,omitempty"` + + // If set to true, tags are copied to any snapshot of the DB cluster that is created. + CopyTagsToSnapshot *bool `json:"copyTagsToSnapshot,omitempty" tf:"copy_tags_to_snapshot,omitempty"` + + // A value that indicates whether the DB cluster has deletion protection enabled.The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // A list of the log types this DB cluster is configured to export to Cloudwatch Logs. Currently only supports audit and slowquery. + // +listType=set + EnableCloudwatchLogsExports []*string `json:"enableCloudwatchLogsExports,omitempty" tf:"enable_cloudwatch_logs_exports,omitempty"` + + // The DNS address of the Neptune instance + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The name of the database engine to be used for this Neptune cluster. Defaults to neptune. + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // The database engine version. + EngineVersion *string `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + // The name of your final Neptune snapshot when this Neptune cluster is deleted. If omitted, no final snapshot will be made. + FinalSnapshotIdentifier *string `json:"finalSnapshotIdentifier,omitempty" tf:"final_snapshot_identifier,omitempty"` + + // The global cluster identifier specified on aws_neptune_global_cluster. + GlobalClusterIdentifier *string `json:"globalClusterIdentifier,omitempty" tf:"global_cluster_identifier,omitempty"` + + // The Route53 Hosted Zone ID of the endpoint + HostedZoneID *string `json:"hostedZoneId,omitempty" tf:"hosted_zone_id,omitempty"` + + // Specifies whether or not mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled. + IAMDatabaseAuthenticationEnabled *bool `json:"iamDatabaseAuthenticationEnabled,omitempty" tf:"iam_database_authentication_enabled,omitempty"` + + // A List of ARNs for the IAM roles to associate to the Neptune Cluster. + // +listType=set + IAMRoles []*string `json:"iamRoles,omitempty" tf:"iam_roles,omitempty"` + + // The Neptune Cluster Identifier + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The ARN for the KMS encryption key. When specifying kms_key_arn, storage_encrypted needs to be set to true. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // A cluster parameter group to associate with the cluster. + NeptuneClusterParameterGroupName *string `json:"neptuneClusterParameterGroupName,omitempty" tf:"neptune_cluster_parameter_group_name,omitempty"` + + // The name of the DB parameter group to apply to all instances of the DB cluster. + NeptuneInstanceParameterGroupName *string `json:"neptuneInstanceParameterGroupName,omitempty" tf:"neptune_instance_parameter_group_name,omitempty"` + + // A Neptune subnet group to associate with this Neptune instance. + NeptuneSubnetGroupName *string `json:"neptuneSubnetGroupName,omitempty" tf:"neptune_subnet_group_name,omitempty"` + + // The port on which the Neptune accepts connections. Default is 8182. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter. Time in UTC. Default: A 30-minute window selected at random from an 8-hour block of time per regionE.g., 04:00-09:00 + PreferredBackupWindow *string `json:"preferredBackupWindow,omitempty" tf:"preferred_backup_window,omitempty"` + + // The weekly time range during which system maintenance can occur, in (UTC) e.g., wed:04:00-wed:04:30 + PreferredMaintenanceWindow *string `json:"preferredMaintenanceWindow,omitempty" tf:"preferred_maintenance_window,omitempty"` + + // A read-only endpoint for the Neptune cluster, automatically load-balanced across replicas + ReaderEndpoint *string `json:"readerEndpoint,omitempty" tf:"reader_endpoint,omitempty"` + + // ARN of a source Neptune cluster or Neptune instance if this Neptune cluster is to be created as a Read Replica. + ReplicationSourceIdentifier *string `json:"replicationSourceIdentifier,omitempty" tf:"replication_source_identifier,omitempty"` + + // If set, create the Neptune cluster as a serverless one. See Serverless for example block attributes. + ServerlessV2ScalingConfiguration *ServerlessV2ScalingConfigurationObservation `json:"serverlessV2ScalingConfiguration,omitempty" tf:"serverless_v2_scaling_configuration,omitempty"` + + // Determines whether a final Neptune snapshot is created before the Neptune cluster is deleted. If true is specified, no Neptune snapshot is created. If false is specified, a Neptune snapshot is created before the Neptune cluster is deleted, using the value from final_snapshot_identifier. Default is false. + SkipFinalSnapshot *bool `json:"skipFinalSnapshot,omitempty" tf:"skip_final_snapshot,omitempty"` + + // Specifies whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a Neptune cluster snapshot, or the ARN when specifying a Neptune snapshot. Automated snapshots should not be used for this attribute, unless from a different cluster. Automated snapshots are deleted as part of cluster destruction when the resource is replaced. + SnapshotIdentifier *string `json:"snapshotIdentifier,omitempty" tf:"snapshot_identifier,omitempty"` + + // Specifies whether the Neptune cluster is encrypted. The default is false if not specified. + StorageEncrypted *bool `json:"storageEncrypted,omitempty" tf:"storage_encrypted,omitempty"` + + // Storage type associated with the cluster standard/iopt1. Default: standard + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // List of VPC security groups to associate with the Cluster + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` +} + +type ClusterParameters struct { + + // Specifies whether upgrades between different major versions are allowed. You must set it to true when providing an engine_version parameter that uses a different major version than the DB cluster's current version. Default is false. + // +kubebuilder:validation:Optional + AllowMajorVersionUpgrade *bool `json:"allowMajorVersionUpgrade,omitempty" tf:"allow_major_version_upgrade,omitempty"` + + // Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is false. + // +kubebuilder:validation:Optional + ApplyImmediately *bool `json:"applyImmediately,omitempty" tf:"apply_immediately,omitempty"` + + // A list of EC2 Availability Zones that instances in the Neptune cluster can be created in. + // +kubebuilder:validation:Optional + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // The days to retain backups for. Default 1 + // +kubebuilder:validation:Optional + BackupRetentionPeriod *float64 `json:"backupRetentionPeriod,omitempty" tf:"backup_retention_period,omitempty"` + + // If set to true, tags are copied to any snapshot of the DB cluster that is created. + // +kubebuilder:validation:Optional + CopyTagsToSnapshot *bool `json:"copyTagsToSnapshot,omitempty" tf:"copy_tags_to_snapshot,omitempty"` + + // A value that indicates whether the DB cluster has deletion protection enabled.The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // A list of the log types this DB cluster is configured to export to Cloudwatch Logs. Currently only supports audit and slowquery. + // +kubebuilder:validation:Optional + // +listType=set + EnableCloudwatchLogsExports []*string `json:"enableCloudwatchLogsExports,omitempty" tf:"enable_cloudwatch_logs_exports,omitempty"` + + // The name of the database engine to be used for this Neptune cluster. Defaults to neptune. + // +kubebuilder:validation:Optional + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // The database engine version. + // +kubebuilder:validation:Optional + EngineVersion *string `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + // The name of your final Neptune snapshot when this Neptune cluster is deleted. If omitted, no final snapshot will be made. + // +kubebuilder:validation:Optional + FinalSnapshotIdentifier *string `json:"finalSnapshotIdentifier,omitempty" tf:"final_snapshot_identifier,omitempty"` + + // The global cluster identifier specified on aws_neptune_global_cluster. + // +kubebuilder:validation:Optional + GlobalClusterIdentifier *string `json:"globalClusterIdentifier,omitempty" tf:"global_cluster_identifier,omitempty"` + + // Specifies whether or not mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled. + // +kubebuilder:validation:Optional + IAMDatabaseAuthenticationEnabled *bool `json:"iamDatabaseAuthenticationEnabled,omitempty" tf:"iam_database_authentication_enabled,omitempty"` + + // References to Role in iam to populate iamRoles. + // +kubebuilder:validation:Optional + IAMRoleRefs []v1.Reference `json:"iamRoleRefs,omitempty" tf:"-"` + + // Selector for a list of Role in iam to populate iamRoles. + // +kubebuilder:validation:Optional + IAMRoleSelector *v1.Selector `json:"iamRoleSelector,omitempty" tf:"-"` + + // A List of ARNs for the IAM roles to associate to the Neptune Cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:refFieldName=IAMRoleRefs + // +crossplane:generate:reference:selectorFieldName=IAMRoleSelector + // +kubebuilder:validation:Optional + // +listType=set + IAMRoles []*string `json:"iamRoles,omitempty" tf:"iam_roles,omitempty"` + + // The ARN for the KMS encryption key. When specifying kms_key_arn, storage_encrypted needs to be set to true. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` + + // A cluster parameter group to associate with the cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta1.ClusterParameterGroup + // +kubebuilder:validation:Optional + NeptuneClusterParameterGroupName *string `json:"neptuneClusterParameterGroupName,omitempty" tf:"neptune_cluster_parameter_group_name,omitempty"` + + // Reference to a ClusterParameterGroup in neptune to populate neptuneClusterParameterGroupName. + // +kubebuilder:validation:Optional + NeptuneClusterParameterGroupNameRef *v1.Reference `json:"neptuneClusterParameterGroupNameRef,omitempty" tf:"-"` + + // Selector for a ClusterParameterGroup in neptune to populate neptuneClusterParameterGroupName. + // +kubebuilder:validation:Optional + NeptuneClusterParameterGroupNameSelector *v1.Selector `json:"neptuneClusterParameterGroupNameSelector,omitempty" tf:"-"` + + // The name of the DB parameter group to apply to all instances of the DB cluster. + // +kubebuilder:validation:Optional + NeptuneInstanceParameterGroupName *string `json:"neptuneInstanceParameterGroupName,omitempty" tf:"neptune_instance_parameter_group_name,omitempty"` + + // A Neptune subnet group to associate with this Neptune instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta1.SubnetGroup + // +kubebuilder:validation:Optional + NeptuneSubnetGroupName *string `json:"neptuneSubnetGroupName,omitempty" tf:"neptune_subnet_group_name,omitempty"` + + // Reference to a SubnetGroup in neptune to populate neptuneSubnetGroupName. + // +kubebuilder:validation:Optional + NeptuneSubnetGroupNameRef *v1.Reference `json:"neptuneSubnetGroupNameRef,omitempty" tf:"-"` + + // Selector for a SubnetGroup in neptune to populate neptuneSubnetGroupName. + // +kubebuilder:validation:Optional + NeptuneSubnetGroupNameSelector *v1.Selector `json:"neptuneSubnetGroupNameSelector,omitempty" tf:"-"` + + // The port on which the Neptune accepts connections. Default is 8182. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter. Time in UTC. Default: A 30-minute window selected at random from an 8-hour block of time per regionE.g., 04:00-09:00 + // +kubebuilder:validation:Optional + PreferredBackupWindow *string `json:"preferredBackupWindow,omitempty" tf:"preferred_backup_window,omitempty"` + + // The weekly time range during which system maintenance can occur, in (UTC) e.g., wed:04:00-wed:04:30 + // +kubebuilder:validation:Optional + PreferredMaintenanceWindow *string `json:"preferredMaintenanceWindow,omitempty" tf:"preferred_maintenance_window,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // ARN of a source Neptune cluster or Neptune instance if this Neptune cluster is to be created as a Read Replica. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta2.Cluster + // +kubebuilder:validation:Optional + ReplicationSourceIdentifier *string `json:"replicationSourceIdentifier,omitempty" tf:"replication_source_identifier,omitempty"` + + // Reference to a Cluster in neptune to populate replicationSourceIdentifier. + // +kubebuilder:validation:Optional + ReplicationSourceIdentifierRef *v1.Reference `json:"replicationSourceIdentifierRef,omitempty" tf:"-"` + + // Selector for a Cluster in neptune to populate replicationSourceIdentifier. + // +kubebuilder:validation:Optional + ReplicationSourceIdentifierSelector *v1.Selector `json:"replicationSourceIdentifierSelector,omitempty" tf:"-"` + + // If set, create the Neptune cluster as a serverless one. See Serverless for example block attributes. + // +kubebuilder:validation:Optional + ServerlessV2ScalingConfiguration *ServerlessV2ScalingConfigurationParameters `json:"serverlessV2ScalingConfiguration,omitempty" tf:"serverless_v2_scaling_configuration,omitempty"` + + // Determines whether a final Neptune snapshot is created before the Neptune cluster is deleted. If true is specified, no Neptune snapshot is created. If false is specified, a Neptune snapshot is created before the Neptune cluster is deleted, using the value from final_snapshot_identifier. Default is false. + // +kubebuilder:validation:Optional + SkipFinalSnapshot *bool `json:"skipFinalSnapshot,omitempty" tf:"skip_final_snapshot,omitempty"` + + // Specifies whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a Neptune cluster snapshot, or the ARN when specifying a Neptune snapshot. Automated snapshots should not be used for this attribute, unless from a different cluster. Automated snapshots are deleted as part of cluster destruction when the resource is replaced. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/neptune/v1beta1.ClusterSnapshot + // +kubebuilder:validation:Optional + SnapshotIdentifier *string `json:"snapshotIdentifier,omitempty" tf:"snapshot_identifier,omitempty"` + + // Reference to a ClusterSnapshot in neptune to populate snapshotIdentifier. + // +kubebuilder:validation:Optional + SnapshotIdentifierRef *v1.Reference `json:"snapshotIdentifierRef,omitempty" tf:"-"` + + // Selector for a ClusterSnapshot in neptune to populate snapshotIdentifier. + // +kubebuilder:validation:Optional + SnapshotIdentifierSelector *v1.Selector `json:"snapshotIdentifierSelector,omitempty" tf:"-"` + + // Specifies whether the Neptune cluster is encrypted. The default is false if not specified. + // +kubebuilder:validation:Optional + StorageEncrypted *bool `json:"storageEncrypted,omitempty" tf:"storage_encrypted,omitempty"` + + // Storage type associated with the cluster standard/iopt1. Default: standard + // +kubebuilder:validation:Optional + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDRefs []v1.Reference `json:"vpcSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDSelector *v1.Selector `json:"vpcSecurityGroupIdSelector,omitempty" tf:"-"` + + // List of VPC security groups to associate with the Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=VPCSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=VPCSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` +} + +type ServerlessV2ScalingConfigurationInitParameters struct { + + // : (default: 128) The maximum Neptune Capacity Units (NCUs) for this cluster. Must be lower or equal than 128. See AWS Documentation for more details. + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // : (default: 2.5) The minimum Neptune Capacity Units (NCUs) for this cluster. Must be greater or equal than 1. See AWS Documentation for more details. + MinCapacity *float64 `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` +} + +type ServerlessV2ScalingConfigurationObservation struct { + + // : (default: 128) The maximum Neptune Capacity Units (NCUs) for this cluster. Must be lower or equal than 128. See AWS Documentation for more details. + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // : (default: 2.5) The minimum Neptune Capacity Units (NCUs) for this cluster. Must be greater or equal than 1. See AWS Documentation for more details. + MinCapacity *float64 `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` +} + +type ServerlessV2ScalingConfigurationParameters struct { + + // : (default: 128) The maximum Neptune Capacity Units (NCUs) for this cluster. Must be lower or equal than 128. See AWS Documentation for more details. + // +kubebuilder:validation:Optional + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // : (default: 2.5) The minimum Neptune Capacity Units (NCUs) for this cluster. Must be greater or equal than 1. See AWS Documentation for more details. + // +kubebuilder:validation:Optional + MinCapacity *float64 `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` +} + +// ClusterSpec defines the desired state of Cluster +type ClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ClusterInitParameters `json:"initProvider,omitempty"` +} + +// ClusterStatus defines the observed state of Cluster. +type ClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Cluster is the Schema for the Clusters API. Provides an Neptune Cluster Resource +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ClusterSpec `json:"spec"` + Status ClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterList contains a list of Clusters +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +// Repository type metadata. +var ( + Cluster_Kind = "Cluster" + Cluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Cluster_Kind}.String() + Cluster_KindAPIVersion = Cluster_Kind + "." + CRDGroupVersion.String() + Cluster_GroupVersionKind = CRDGroupVersion.WithKind(Cluster_Kind) +) + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/apis/neptune/v1beta2/zz_generated.conversion_hubs.go b/apis/neptune/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..5640ab69b4 --- /dev/null +++ b/apis/neptune/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Cluster) Hub() {} diff --git a/apis/neptune/v1beta2/zz_generated.deepcopy.go b/apis/neptune/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..a095670adb --- /dev/null +++ b/apis/neptune/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,963 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { + *out = *in + if in.AllowMajorVersionUpgrade != nil { + in, out := &in.AllowMajorVersionUpgrade, &out.AllowMajorVersionUpgrade + *out = new(bool) + **out = **in + } + if in.ApplyImmediately != nil { + in, out := &in.ApplyImmediately, &out.ApplyImmediately + *out = new(bool) + **out = **in + } + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BackupRetentionPeriod != nil { + in, out := &in.BackupRetentionPeriod, &out.BackupRetentionPeriod + *out = new(float64) + **out = **in + } + if in.CopyTagsToSnapshot != nil { + in, out := &in.CopyTagsToSnapshot, &out.CopyTagsToSnapshot + *out = new(bool) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.EnableCloudwatchLogsExports != nil { + in, out := &in.EnableCloudwatchLogsExports, &out.EnableCloudwatchLogsExports + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.FinalSnapshotIdentifier != nil { + in, out := &in.FinalSnapshotIdentifier, &out.FinalSnapshotIdentifier + *out = new(string) + **out = **in + } + if in.GlobalClusterIdentifier != nil { + in, out := &in.GlobalClusterIdentifier, &out.GlobalClusterIdentifier + *out = new(string) + **out = **in + } + if in.IAMDatabaseAuthenticationEnabled != nil { + in, out := &in.IAMDatabaseAuthenticationEnabled, &out.IAMDatabaseAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.IAMRoleRefs != nil { + in, out := &in.IAMRoleRefs, &out.IAMRoleRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IAMRoleSelector != nil { + in, out := &in.IAMRoleSelector, &out.IAMRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IAMRoles != nil { + in, out := &in.IAMRoles, &out.IAMRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NeptuneClusterParameterGroupName != nil { + in, out := &in.NeptuneClusterParameterGroupName, &out.NeptuneClusterParameterGroupName + *out = new(string) + **out = **in + } + if in.NeptuneClusterParameterGroupNameRef != nil { + in, out := &in.NeptuneClusterParameterGroupNameRef, &out.NeptuneClusterParameterGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NeptuneClusterParameterGroupNameSelector != nil { + in, out := &in.NeptuneClusterParameterGroupNameSelector, &out.NeptuneClusterParameterGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NeptuneInstanceParameterGroupName != nil { + in, out := &in.NeptuneInstanceParameterGroupName, &out.NeptuneInstanceParameterGroupName + *out = new(string) + **out = **in + } + if in.NeptuneSubnetGroupName != nil { + in, out := &in.NeptuneSubnetGroupName, &out.NeptuneSubnetGroupName + *out = new(string) + **out = **in + } + if in.NeptuneSubnetGroupNameRef != nil { + in, out := &in.NeptuneSubnetGroupNameRef, &out.NeptuneSubnetGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NeptuneSubnetGroupNameSelector != nil { + in, out := &in.NeptuneSubnetGroupNameSelector, &out.NeptuneSubnetGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PreferredBackupWindow != nil { + in, out := &in.PreferredBackupWindow, &out.PreferredBackupWindow + *out = new(string) + **out = **in + } + if in.PreferredMaintenanceWindow != nil { + in, out := &in.PreferredMaintenanceWindow, &out.PreferredMaintenanceWindow + *out = new(string) + **out = **in + } + if in.ReplicationSourceIdentifier != nil { + in, out := &in.ReplicationSourceIdentifier, &out.ReplicationSourceIdentifier + *out = new(string) + **out = **in + } + if in.ReplicationSourceIdentifierRef != nil { + in, out := &in.ReplicationSourceIdentifierRef, &out.ReplicationSourceIdentifierRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ReplicationSourceIdentifierSelector != nil { + in, out := &in.ReplicationSourceIdentifierSelector, &out.ReplicationSourceIdentifierSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServerlessV2ScalingConfiguration != nil { + in, out := &in.ServerlessV2ScalingConfiguration, &out.ServerlessV2ScalingConfiguration + *out = new(ServerlessV2ScalingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SkipFinalSnapshot != nil { + in, out := &in.SkipFinalSnapshot, &out.SkipFinalSnapshot + *out = new(bool) + **out = **in + } + if in.SnapshotIdentifier != nil { + in, out := &in.SnapshotIdentifier, &out.SnapshotIdentifier + *out = new(string) + **out = **in + } + if in.SnapshotIdentifierRef != nil { + in, out := &in.SnapshotIdentifierRef, &out.SnapshotIdentifierRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SnapshotIdentifierSelector != nil { + in, out := &in.SnapshotIdentifierSelector, &out.SnapshotIdentifierSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageEncrypted != nil { + in, out := &in.StorageEncrypted, &out.StorageEncrypted + *out = new(bool) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCSecurityGroupIDRefs != nil { + in, out := &in.VPCSecurityGroupIDRefs, &out.VPCSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCSecurityGroupIDSelector != nil { + in, out := &in.VPCSecurityGroupIDSelector, &out.VPCSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInitParameters. +func (in *ClusterInitParameters) DeepCopy() *ClusterInitParameters { + if in == nil { + return nil + } + out := new(ClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { + *out = *in + if in.AllowMajorVersionUpgrade != nil { + in, out := &in.AllowMajorVersionUpgrade, &out.AllowMajorVersionUpgrade + *out = new(bool) + **out = **in + } + if in.ApplyImmediately != nil { + in, out := &in.ApplyImmediately, &out.ApplyImmediately + *out = new(bool) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BackupRetentionPeriod != nil { + in, out := &in.BackupRetentionPeriod, &out.BackupRetentionPeriod + *out = new(float64) + **out = **in + } + if in.ClusterMembers != nil { + in, out := &in.ClusterMembers, &out.ClusterMembers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClusterResourceID != nil { + in, out := &in.ClusterResourceID, &out.ClusterResourceID + *out = new(string) + **out = **in + } + if in.CopyTagsToSnapshot != nil { + in, out := &in.CopyTagsToSnapshot, &out.CopyTagsToSnapshot + *out = new(bool) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.EnableCloudwatchLogsExports != nil { + in, out := &in.EnableCloudwatchLogsExports, &out.EnableCloudwatchLogsExports + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.FinalSnapshotIdentifier != nil { + in, out := &in.FinalSnapshotIdentifier, &out.FinalSnapshotIdentifier + *out = new(string) + **out = **in + } + if in.GlobalClusterIdentifier != nil { + in, out := &in.GlobalClusterIdentifier, &out.GlobalClusterIdentifier + *out = new(string) + **out = **in + } + if in.HostedZoneID != nil { + in, out := &in.HostedZoneID, &out.HostedZoneID + *out = new(string) + **out = **in + } + if in.IAMDatabaseAuthenticationEnabled != nil { + in, out := &in.IAMDatabaseAuthenticationEnabled, &out.IAMDatabaseAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.IAMRoles != nil { + in, out := &in.IAMRoles, &out.IAMRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.NeptuneClusterParameterGroupName != nil { + in, out := &in.NeptuneClusterParameterGroupName, &out.NeptuneClusterParameterGroupName + *out = new(string) + **out = **in + } + if in.NeptuneInstanceParameterGroupName != nil { + in, out := &in.NeptuneInstanceParameterGroupName, &out.NeptuneInstanceParameterGroupName + *out = new(string) + **out = **in + } + if in.NeptuneSubnetGroupName != nil { + in, out := &in.NeptuneSubnetGroupName, &out.NeptuneSubnetGroupName + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PreferredBackupWindow != nil { + in, out := &in.PreferredBackupWindow, &out.PreferredBackupWindow + *out = new(string) + **out = **in + } + if in.PreferredMaintenanceWindow != nil { + in, out := &in.PreferredMaintenanceWindow, &out.PreferredMaintenanceWindow + *out = new(string) + **out = **in + } + if in.ReaderEndpoint != nil { + in, out := &in.ReaderEndpoint, &out.ReaderEndpoint + *out = new(string) + **out = **in + } + if in.ReplicationSourceIdentifier != nil { + in, out := &in.ReplicationSourceIdentifier, &out.ReplicationSourceIdentifier + *out = new(string) + **out = **in + } + if in.ServerlessV2ScalingConfiguration != nil { + in, out := &in.ServerlessV2ScalingConfiguration, &out.ServerlessV2ScalingConfiguration + *out = new(ServerlessV2ScalingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.SkipFinalSnapshot != nil { + in, out := &in.SkipFinalSnapshot, &out.SkipFinalSnapshot + *out = new(bool) + **out = **in + } + if in.SnapshotIdentifier != nil { + in, out := &in.SnapshotIdentifier, &out.SnapshotIdentifier + *out = new(string) + **out = **in + } + if in.StorageEncrypted != nil { + in, out := &in.StorageEncrypted, &out.StorageEncrypted + *out = new(bool) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservation. +func (in *ClusterObservation) DeepCopy() *ClusterObservation { + if in == nil { + return nil + } + out := new(ClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { + *out = *in + if in.AllowMajorVersionUpgrade != nil { + in, out := &in.AllowMajorVersionUpgrade, &out.AllowMajorVersionUpgrade + *out = new(bool) + **out = **in + } + if in.ApplyImmediately != nil { + in, out := &in.ApplyImmediately, &out.ApplyImmediately + *out = new(bool) + **out = **in + } + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BackupRetentionPeriod != nil { + in, out := &in.BackupRetentionPeriod, &out.BackupRetentionPeriod + *out = new(float64) + **out = **in + } + if in.CopyTagsToSnapshot != nil { + in, out := &in.CopyTagsToSnapshot, &out.CopyTagsToSnapshot + *out = new(bool) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.EnableCloudwatchLogsExports != nil { + in, out := &in.EnableCloudwatchLogsExports, &out.EnableCloudwatchLogsExports + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.FinalSnapshotIdentifier != nil { + in, out := &in.FinalSnapshotIdentifier, &out.FinalSnapshotIdentifier + *out = new(string) + **out = **in + } + if in.GlobalClusterIdentifier != nil { + in, out := &in.GlobalClusterIdentifier, &out.GlobalClusterIdentifier + *out = new(string) + **out = **in + } + if in.IAMDatabaseAuthenticationEnabled != nil { + in, out := &in.IAMDatabaseAuthenticationEnabled, &out.IAMDatabaseAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.IAMRoleRefs != nil { + in, out := &in.IAMRoleRefs, &out.IAMRoleRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IAMRoleSelector != nil { + in, out := &in.IAMRoleSelector, &out.IAMRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IAMRoles != nil { + in, out := &in.IAMRoles, &out.IAMRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NeptuneClusterParameterGroupName != nil { + in, out := &in.NeptuneClusterParameterGroupName, &out.NeptuneClusterParameterGroupName + *out = new(string) + **out = **in + } + if in.NeptuneClusterParameterGroupNameRef != nil { + in, out := &in.NeptuneClusterParameterGroupNameRef, &out.NeptuneClusterParameterGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NeptuneClusterParameterGroupNameSelector != nil { + in, out := &in.NeptuneClusterParameterGroupNameSelector, &out.NeptuneClusterParameterGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NeptuneInstanceParameterGroupName != nil { + in, out := &in.NeptuneInstanceParameterGroupName, &out.NeptuneInstanceParameterGroupName + *out = new(string) + **out = **in + } + if in.NeptuneSubnetGroupName != nil { + in, out := &in.NeptuneSubnetGroupName, &out.NeptuneSubnetGroupName + *out = new(string) + **out = **in + } + if in.NeptuneSubnetGroupNameRef != nil { + in, out := &in.NeptuneSubnetGroupNameRef, &out.NeptuneSubnetGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NeptuneSubnetGroupNameSelector != nil { + in, out := &in.NeptuneSubnetGroupNameSelector, &out.NeptuneSubnetGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PreferredBackupWindow != nil { + in, out := &in.PreferredBackupWindow, &out.PreferredBackupWindow + *out = new(string) + **out = **in + } + if in.PreferredMaintenanceWindow != nil { + in, out := &in.PreferredMaintenanceWindow, &out.PreferredMaintenanceWindow + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ReplicationSourceIdentifier != nil { + in, out := &in.ReplicationSourceIdentifier, &out.ReplicationSourceIdentifier + *out = new(string) + **out = **in + } + if in.ReplicationSourceIdentifierRef != nil { + in, out := &in.ReplicationSourceIdentifierRef, &out.ReplicationSourceIdentifierRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ReplicationSourceIdentifierSelector != nil { + in, out := &in.ReplicationSourceIdentifierSelector, &out.ReplicationSourceIdentifierSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServerlessV2ScalingConfiguration != nil { + in, out := &in.ServerlessV2ScalingConfiguration, &out.ServerlessV2ScalingConfiguration + *out = new(ServerlessV2ScalingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.SkipFinalSnapshot != nil { + in, out := &in.SkipFinalSnapshot, &out.SkipFinalSnapshot + *out = new(bool) + **out = **in + } + if in.SnapshotIdentifier != nil { + in, out := &in.SnapshotIdentifier, &out.SnapshotIdentifier + *out = new(string) + **out = **in + } + if in.SnapshotIdentifierRef != nil { + in, out := &in.SnapshotIdentifierRef, &out.SnapshotIdentifierRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SnapshotIdentifierSelector != nil { + in, out := &in.SnapshotIdentifierSelector, &out.SnapshotIdentifierSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageEncrypted != nil { + in, out := &in.StorageEncrypted, &out.StorageEncrypted + *out = new(bool) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCSecurityGroupIDRefs != nil { + in, out := &in.VPCSecurityGroupIDRefs, &out.VPCSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCSecurityGroupIDSelector != nil { + in, out := &in.VPCSecurityGroupIDSelector, &out.VPCSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. +func (in *ClusterParameters) DeepCopy() *ClusterParameters { + if in == nil { + return nil + } + out := new(ClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessV2ScalingConfigurationInitParameters) DeepCopyInto(out *ServerlessV2ScalingConfigurationInitParameters) { + *out = *in + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessV2ScalingConfigurationInitParameters. +func (in *ServerlessV2ScalingConfigurationInitParameters) DeepCopy() *ServerlessV2ScalingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ServerlessV2ScalingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessV2ScalingConfigurationObservation) DeepCopyInto(out *ServerlessV2ScalingConfigurationObservation) { + *out = *in + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessV2ScalingConfigurationObservation. +func (in *ServerlessV2ScalingConfigurationObservation) DeepCopy() *ServerlessV2ScalingConfigurationObservation { + if in == nil { + return nil + } + out := new(ServerlessV2ScalingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessV2ScalingConfigurationParameters) DeepCopyInto(out *ServerlessV2ScalingConfigurationParameters) { + *out = *in + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessV2ScalingConfigurationParameters. +func (in *ServerlessV2ScalingConfigurationParameters) DeepCopy() *ServerlessV2ScalingConfigurationParameters { + if in == nil { + return nil + } + out := new(ServerlessV2ScalingConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/neptune/v1beta2/zz_generated.managed.go b/apis/neptune/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..8a26829392 --- /dev/null +++ b/apis/neptune/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Cluster. +func (mg *Cluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Cluster. +func (mg *Cluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Cluster. +func (mg *Cluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Cluster. +func (mg *Cluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Cluster. +func (mg *Cluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Cluster. +func (mg *Cluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Cluster. +func (mg *Cluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Cluster. +func (mg *Cluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/neptune/v1beta2/zz_generated.managedlist.go b/apis/neptune/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..a6628c8e29 --- /dev/null +++ b/apis/neptune/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ClusterList. +func (l *ClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/neptune/v1beta2/zz_generated.resolvers.go b/apis/neptune/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..a1591f3a44 --- /dev/null +++ b/apis/neptune/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,297 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Cluster. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Cluster) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.IAMRoles), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.IAMRoleRefs, + Selector: mg.Spec.ForProvider.IAMRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IAMRoles") + } + mg.Spec.ForProvider.IAMRoles = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.IAMRoleRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSKeyArnRef, + Selector: mg.Spec.ForProvider.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyArn") + } + mg.Spec.ForProvider.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta1", "ClusterParameterGroup", "ClusterParameterGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NeptuneClusterParameterGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NeptuneClusterParameterGroupNameRef, + Selector: mg.Spec.ForProvider.NeptuneClusterParameterGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NeptuneClusterParameterGroupName") + } + mg.Spec.ForProvider.NeptuneClusterParameterGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NeptuneClusterParameterGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta1", "SubnetGroup", "SubnetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NeptuneSubnetGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NeptuneSubnetGroupNameRef, + Selector: mg.Spec.ForProvider.NeptuneSubnetGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NeptuneSubnetGroupName") + } + mg.Spec.ForProvider.NeptuneSubnetGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NeptuneSubnetGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ReplicationSourceIdentifier), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ReplicationSourceIdentifierRef, + Selector: mg.Spec.ForProvider.ReplicationSourceIdentifierSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ReplicationSourceIdentifier") + } + mg.Spec.ForProvider.ReplicationSourceIdentifier = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ReplicationSourceIdentifierRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta1", "ClusterSnapshot", "ClusterSnapshotList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SnapshotIdentifier), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.SnapshotIdentifierRef, + Selector: mg.Spec.ForProvider.SnapshotIdentifierSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SnapshotIdentifier") + } + mg.Spec.ForProvider.SnapshotIdentifier = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SnapshotIdentifierRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.VPCSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCSecurityGroupIds") + } + mg.Spec.ForProvider.VPCSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.IAMRoles), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.IAMRoleRefs, + Selector: mg.Spec.InitProvider.IAMRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IAMRoles") + } + mg.Spec.InitProvider.IAMRoles = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.IAMRoleRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSKeyArnRef, + Selector: mg.Spec.InitProvider.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyArn") + } + mg.Spec.InitProvider.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta1", "ClusterParameterGroup", "ClusterParameterGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NeptuneClusterParameterGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NeptuneClusterParameterGroupNameRef, + Selector: mg.Spec.InitProvider.NeptuneClusterParameterGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NeptuneClusterParameterGroupName") + } + mg.Spec.InitProvider.NeptuneClusterParameterGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NeptuneClusterParameterGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta1", "SubnetGroup", "SubnetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NeptuneSubnetGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NeptuneSubnetGroupNameRef, + Selector: mg.Spec.InitProvider.NeptuneSubnetGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NeptuneSubnetGroupName") + } + mg.Spec.InitProvider.NeptuneSubnetGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NeptuneSubnetGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ReplicationSourceIdentifier), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ReplicationSourceIdentifierRef, + Selector: mg.Spec.InitProvider.ReplicationSourceIdentifierSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ReplicationSourceIdentifier") + } + mg.Spec.InitProvider.ReplicationSourceIdentifier = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ReplicationSourceIdentifierRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("neptune.aws.upbound.io", "v1beta1", "ClusterSnapshot", "ClusterSnapshotList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SnapshotIdentifier), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.SnapshotIdentifierRef, + Selector: mg.Spec.InitProvider.SnapshotIdentifierSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SnapshotIdentifier") + } + mg.Spec.InitProvider.SnapshotIdentifier = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SnapshotIdentifierRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.VPCSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCSecurityGroupIds") + } + mg.Spec.InitProvider.VPCSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCSecurityGroupIDRefs = mrsp.ResolvedReferences + + return nil +} diff --git a/apis/neptune/v1beta2/zz_groupversion_info.go b/apis/neptune/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..a3b6e45dd9 --- /dev/null +++ b/apis/neptune/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=neptune.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "neptune.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/networkfirewall/v1beta1/zz_generated.conversion_spokes.go b/apis/networkfirewall/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..a6d3d6ea71 --- /dev/null +++ b/apis/networkfirewall/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,94 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Firewall to the hub type. +func (tr *Firewall) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Firewall type. +func (tr *Firewall) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FirewallPolicy to the hub type. +func (tr *FirewallPolicy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FirewallPolicy type. +func (tr *FirewallPolicy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LoggingConfiguration to the hub type. +func (tr *LoggingConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LoggingConfiguration type. +func (tr *LoggingConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this RuleGroup to the hub type. +func (tr *RuleGroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the RuleGroup type. +func (tr *RuleGroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/networkfirewall/v1beta2/zz_firewall_terraformed.go b/apis/networkfirewall/v1beta2/zz_firewall_terraformed.go new file mode 100755 index 0000000000..9863473f32 --- /dev/null +++ b/apis/networkfirewall/v1beta2/zz_firewall_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Firewall +func (mg *Firewall) GetTerraformResourceType() string { + return "aws_networkfirewall_firewall" +} + +// GetConnectionDetailsMapping for this Firewall +func (tr *Firewall) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Firewall +func (tr *Firewall) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Firewall +func (tr *Firewall) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Firewall +func (tr *Firewall) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Firewall +func (tr *Firewall) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Firewall +func (tr *Firewall) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Firewall +func (tr *Firewall) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Firewall +func (tr *Firewall) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Firewall using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Firewall) LateInitialize(attrs []byte) (bool, error) { + params := &FirewallParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Firewall) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/networkfirewall/v1beta2/zz_firewall_types.go b/apis/networkfirewall/v1beta2/zz_firewall_types.go new file mode 100755 index 0000000000..0ab92e29e4 --- /dev/null +++ b/apis/networkfirewall/v1beta2/zz_firewall_types.go @@ -0,0 +1,367 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AttachmentInitParameters struct { +} + +type AttachmentObservation struct { + + // The identifier of the firewall endpoint that AWS Network Firewall has instantiated in the subnet. You use this to identify the firewall endpoint in the VPC route tables, when you redirect the VPC traffic through the endpoint. + EndpointID *string `json:"endpointId,omitempty" tf:"endpoint_id,omitempty"` + + // The unique identifier for the subnet. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type AttachmentParameters struct { +} + +type EncryptionConfigurationInitParameters struct { + + // The ID of the customer managed key. You can use any of the key identifiers that KMS supports, unless you're using a key that's managed by another account. If you're using a key managed by another account, then specify the key ARN. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // The type of AWS KMS key to use for encryption of your Network Firewall resources. Valid values are CUSTOMER_KMS and AWS_OWNED_KMS_KEY. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EncryptionConfigurationObservation struct { + + // The ID of the customer managed key. You can use any of the key identifiers that KMS supports, unless you're using a key that's managed by another account. If you're using a key managed by another account, then specify the key ARN. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // The type of AWS KMS key to use for encryption of your Network Firewall resources. Valid values are CUSTOMER_KMS and AWS_OWNED_KMS_KEY. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EncryptionConfigurationParameters struct { + + // The ID of the customer managed key. You can use any of the key identifiers that KMS supports, unless you're using a key that's managed by another account. If you're using a key managed by another account, then specify the key ARN. + // +kubebuilder:validation:Optional + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // The type of AWS KMS key to use for encryption of your Network Firewall resources. Valid values are CUSTOMER_KMS and AWS_OWNED_KMS_KEY. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type FirewallInitParameters struct { + + // A flag indicating whether the firewall is protected against deletion. Use this setting to protect against accidentally deleting a firewall that is in use. Defaults to false. + DeleteProtection *bool `json:"deleteProtection,omitempty" tf:"delete_protection,omitempty"` + + // A friendly description of the firewall. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // KMS encryption configuration settings. See Encryption Configuration below for details. + EncryptionConfiguration *EncryptionConfigurationInitParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // The Amazon Resource Name (ARN) of the VPC Firewall policy. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkfirewall/v1beta2.FirewallPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + FirewallPolicyArn *string `json:"firewallPolicyArn,omitempty" tf:"firewall_policy_arn,omitempty"` + + // Reference to a FirewallPolicy in networkfirewall to populate firewallPolicyArn. + // +kubebuilder:validation:Optional + FirewallPolicyArnRef *v1.Reference `json:"firewallPolicyArnRef,omitempty" tf:"-"` + + // Selector for a FirewallPolicy in networkfirewall to populate firewallPolicyArn. + // +kubebuilder:validation:Optional + FirewallPolicyArnSelector *v1.Selector `json:"firewallPolicyArnSelector,omitempty" tf:"-"` + + // A flag indicating whether the firewall is protected against a change to the firewall policy association. Use this setting to protect against accidentally modifying the firewall policy for a firewall that is in use. Defaults to false. + FirewallPolicyChangeProtection *bool `json:"firewallPolicyChangeProtection,omitempty" tf:"firewall_policy_change_protection,omitempty"` + + // A friendly name of the firewall. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A flag indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. Defaults to false. + SubnetChangeProtection *bool `json:"subnetChangeProtection,omitempty" tf:"subnet_change_protection,omitempty"` + + // Set of configuration blocks describing the public subnets. Each subnet must belong to a different Availability Zone in the VPC. AWS Network Firewall creates a firewall endpoint in each subnet. See Subnet Mapping below for details. + SubnetMapping []SubnetMappingInitParameters `json:"subnetMapping,omitempty" tf:"subnet_mapping,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The unique identifier of the VPC where AWS Network Firewall should create the firewall. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type FirewallObservation struct { + + // The Amazon Resource Name (ARN) that identifies the firewall. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A flag indicating whether the firewall is protected against deletion. Use this setting to protect against accidentally deleting a firewall that is in use. Defaults to false. + DeleteProtection *bool `json:"deleteProtection,omitempty" tf:"delete_protection,omitempty"` + + // A friendly description of the firewall. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // KMS encryption configuration settings. See Encryption Configuration below for details. + EncryptionConfiguration *EncryptionConfigurationObservation `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // The Amazon Resource Name (ARN) of the VPC Firewall policy. + FirewallPolicyArn *string `json:"firewallPolicyArn,omitempty" tf:"firewall_policy_arn,omitempty"` + + // A flag indicating whether the firewall is protected against a change to the firewall policy association. Use this setting to protect against accidentally modifying the firewall policy for a firewall that is in use. Defaults to false. + FirewallPolicyChangeProtection *bool `json:"firewallPolicyChangeProtection,omitempty" tf:"firewall_policy_change_protection,omitempty"` + + // Nested list of information about the current status of the firewall. + FirewallStatus []FirewallStatusObservation `json:"firewallStatus,omitempty" tf:"firewall_status,omitempty"` + + // The Amazon Resource Name (ARN) that identifies the firewall. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A friendly name of the firewall. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A flag indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. Defaults to false. + SubnetChangeProtection *bool `json:"subnetChangeProtection,omitempty" tf:"subnet_change_protection,omitempty"` + + // Set of configuration blocks describing the public subnets. Each subnet must belong to a different Availability Zone in the VPC. AWS Network Firewall creates a firewall endpoint in each subnet. See Subnet Mapping below for details. + SubnetMapping []SubnetMappingObservation `json:"subnetMapping,omitempty" tf:"subnet_mapping,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // A string token used when updating a firewall. + UpdateToken *string `json:"updateToken,omitempty" tf:"update_token,omitempty"` + + // The unique identifier of the VPC where AWS Network Firewall should create the firewall. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type FirewallParameters struct { + + // A flag indicating whether the firewall is protected against deletion. Use this setting to protect against accidentally deleting a firewall that is in use. Defaults to false. + // +kubebuilder:validation:Optional + DeleteProtection *bool `json:"deleteProtection,omitempty" tf:"delete_protection,omitempty"` + + // A friendly description of the firewall. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // KMS encryption configuration settings. See Encryption Configuration below for details. + // +kubebuilder:validation:Optional + EncryptionConfiguration *EncryptionConfigurationParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // The Amazon Resource Name (ARN) of the VPC Firewall policy. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkfirewall/v1beta2.FirewallPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + FirewallPolicyArn *string `json:"firewallPolicyArn,omitempty" tf:"firewall_policy_arn,omitempty"` + + // Reference to a FirewallPolicy in networkfirewall to populate firewallPolicyArn. + // +kubebuilder:validation:Optional + FirewallPolicyArnRef *v1.Reference `json:"firewallPolicyArnRef,omitempty" tf:"-"` + + // Selector for a FirewallPolicy in networkfirewall to populate firewallPolicyArn. + // +kubebuilder:validation:Optional + FirewallPolicyArnSelector *v1.Selector `json:"firewallPolicyArnSelector,omitempty" tf:"-"` + + // A flag indicating whether the firewall is protected against a change to the firewall policy association. Use this setting to protect against accidentally modifying the firewall policy for a firewall that is in use. Defaults to false. + // +kubebuilder:validation:Optional + FirewallPolicyChangeProtection *bool `json:"firewallPolicyChangeProtection,omitempty" tf:"firewall_policy_change_protection,omitempty"` + + // A friendly name of the firewall. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // A flag indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. Defaults to false. + // +kubebuilder:validation:Optional + SubnetChangeProtection *bool `json:"subnetChangeProtection,omitempty" tf:"subnet_change_protection,omitempty"` + + // Set of configuration blocks describing the public subnets. Each subnet must belong to a different Availability Zone in the VPC. AWS Network Firewall creates a firewall endpoint in each subnet. See Subnet Mapping below for details. + // +kubebuilder:validation:Optional + SubnetMapping []SubnetMappingParameters `json:"subnetMapping,omitempty" tf:"subnet_mapping,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The unique identifier of the VPC where AWS Network Firewall should create the firewall. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +kubebuilder:validation:Optional + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type FirewallStatusInitParameters struct { +} + +type FirewallStatusObservation struct { + + // Set of subnets configured for use by the firewall. + SyncStates []SyncStatesObservation `json:"syncStates,omitempty" tf:"sync_states,omitempty"` +} + +type FirewallStatusParameters struct { +} + +type SubnetMappingInitParameters struct { + + // The subnet's IP address type. Valida values: "DUALSTACK", "IPV4". + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // The unique identifier for the subnet. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type SubnetMappingObservation struct { + + // The subnet's IP address type. Valida values: "DUALSTACK", "IPV4". + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // The unique identifier for the subnet. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type SubnetMappingParameters struct { + + // The subnet's IP address type. Valida values: "DUALSTACK", "IPV4". + // +kubebuilder:validation:Optional + IPAddressType *string `json:"ipAddressType,omitempty" tf:"ip_address_type,omitempty"` + + // The unique identifier for the subnet. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type SyncStatesInitParameters struct { +} + +type SyncStatesObservation struct { + + // Nested list describing the attachment status of the firewall's association with a single VPC subnet. + Attachment []AttachmentObservation `json:"attachment,omitempty" tf:"attachment,omitempty"` + + // The Availability Zone where the subnet is configured. + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` +} + +type SyncStatesParameters struct { +} + +// FirewallSpec defines the desired state of Firewall +type FirewallSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FirewallParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FirewallInitParameters `json:"initProvider,omitempty"` +} + +// FirewallStatus defines the observed state of Firewall. +type FirewallStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FirewallObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Firewall is the Schema for the Firewalls API. Provides an AWS Network Firewall Firewall resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Firewall struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.subnetMapping) || (has(self.initProvider) && has(self.initProvider.subnetMapping))",message="spec.forProvider.subnetMapping is a required parameter" + Spec FirewallSpec `json:"spec"` + Status FirewallStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FirewallList contains a list of Firewalls +type FirewallList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Firewall `json:"items"` +} + +// Repository type metadata. +var ( + Firewall_Kind = "Firewall" + Firewall_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Firewall_Kind}.String() + Firewall_KindAPIVersion = Firewall_Kind + "." + CRDGroupVersion.String() + Firewall_GroupVersionKind = CRDGroupVersion.WithKind(Firewall_Kind) +) + +func init() { + SchemeBuilder.Register(&Firewall{}, &FirewallList{}) +} diff --git a/apis/networkfirewall/v1beta2/zz_firewallpolicy_terraformed.go b/apis/networkfirewall/v1beta2/zz_firewallpolicy_terraformed.go new file mode 100755 index 0000000000..56e52d011a --- /dev/null +++ b/apis/networkfirewall/v1beta2/zz_firewallpolicy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FirewallPolicy +func (mg *FirewallPolicy) GetTerraformResourceType() string { + return "aws_networkfirewall_firewall_policy" +} + +// GetConnectionDetailsMapping for this FirewallPolicy +func (tr *FirewallPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FirewallPolicy +func (tr *FirewallPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FirewallPolicy +func (tr *FirewallPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FirewallPolicy +func (tr *FirewallPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FirewallPolicy +func (tr *FirewallPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FirewallPolicy +func (tr *FirewallPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FirewallPolicy +func (tr *FirewallPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FirewallPolicy +func (tr *FirewallPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FirewallPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FirewallPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &FirewallPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FirewallPolicy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/networkfirewall/v1beta2/zz_firewallpolicy_types.go b/apis/networkfirewall/v1beta2/zz_firewallpolicy_types.go new file mode 100755 index 0000000000..ca0acd140f --- /dev/null +++ b/apis/networkfirewall/v1beta2/zz_firewallpolicy_types.go @@ -0,0 +1,595 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionDefinitionInitParameters struct { + + // A configuration block describing the stateless inspection criteria that publishes the specified metrics to Amazon CloudWatch for the matching packet. You can pair this custom action with any of the standard stateless rule actions. See Publish Metric Action below for details. + PublishMetricAction *PublishMetricActionInitParameters `json:"publishMetricAction,omitempty" tf:"publish_metric_action,omitempty"` +} + +type ActionDefinitionObservation struct { + + // A configuration block describing the stateless inspection criteria that publishes the specified metrics to Amazon CloudWatch for the matching packet. You can pair this custom action with any of the standard stateless rule actions. See Publish Metric Action below for details. + PublishMetricAction *PublishMetricActionObservation `json:"publishMetricAction,omitempty" tf:"publish_metric_action,omitempty"` +} + +type ActionDefinitionParameters struct { + + // A configuration block describing the stateless inspection criteria that publishes the specified metrics to Amazon CloudWatch for the matching packet. You can pair this custom action with any of the standard stateless rule actions. See Publish Metric Action below for details. + // +kubebuilder:validation:Optional + PublishMetricAction *PublishMetricActionParameters `json:"publishMetricAction" tf:"publish_metric_action,omitempty"` +} + +type DimensionInitParameters struct { + + // The string value to use in the custom metric dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DimensionObservation struct { + + // The string value to use in the custom metric dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DimensionParameters struct { + + // The string value to use in the custom metric dimension. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type FirewallPolicyEncryptionConfigurationInitParameters struct { + + // The ID of the customer managed key. You can use any of the key identifiers that KMS supports, unless you're using a key that's managed by another account. If you're using a key managed by another account, then specify the key ARN. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // The type of AWS KMS key to use for encryption of your Network Firewall resources. Valid values are CUSTOMER_KMS and AWS_OWNED_KMS_KEY. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FirewallPolicyEncryptionConfigurationObservation struct { + + // The ID of the customer managed key. You can use any of the key identifiers that KMS supports, unless you're using a key that's managed by another account. If you're using a key managed by another account, then specify the key ARN. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // The type of AWS KMS key to use for encryption of your Network Firewall resources. Valid values are CUSTOMER_KMS and AWS_OWNED_KMS_KEY. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FirewallPolicyEncryptionConfigurationParameters struct { + + // The ID of the customer managed key. You can use any of the key identifiers that KMS supports, unless you're using a key that's managed by another account. If you're using a key managed by another account, then specify the key ARN. + // +kubebuilder:validation:Optional + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // The type of AWS KMS key to use for encryption of your Network Firewall resources. Valid values are CUSTOMER_KMS and AWS_OWNED_KMS_KEY. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type FirewallPolicyFirewallPolicyInitParameters struct { + + // . Contains variables that you can use to override default Suricata settings in your firewall policy. See Rule Variables for details. + PolicyVariables *PolicyVariablesInitParameters `json:"policyVariables,omitempty" tf:"policy_variables,omitempty"` + + // Set of actions to take on a packet if it does not match any stateful rules in the policy. This can only be specified if the policy has a stateful_engine_options block with a rule_order value of STRICT_ORDER. You can specify one of either or neither values of aws:drop_strict or aws:drop_established, as well as any combination of aws:alert_strict and aws:alert_established. + // +listType=set + StatefulDefaultActions []*string `json:"statefulDefaultActions,omitempty" tf:"stateful_default_actions,omitempty"` + + // A configuration block that defines options on how the policy handles stateful rules. See Stateful Engine Options below for details. + StatefulEngineOptions *StatefulEngineOptionsInitParameters `json:"statefulEngineOptions,omitempty" tf:"stateful_engine_options,omitempty"` + + // Set of configuration blocks containing references to the stateful rule groups that are used in the policy. See Stateful Rule Group Reference below for details. + StatefulRuleGroupReference []StatefulRuleGroupReferenceInitParameters `json:"statefulRuleGroupReference,omitempty" tf:"stateful_rule_group_reference,omitempty"` + + // Set of configuration blocks describing the custom action definitions that are available for use in the firewall policy's stateless_default_actions. See Stateless Custom Action below for details. + StatelessCustomAction []StatelessCustomActionInitParameters `json:"statelessCustomAction,omitempty" tf:"stateless_custom_action,omitempty"` + + // Set of actions to take on a packet if it does not match any of the stateless rules in the policy. You must specify one of the standard actions including: aws:drop, aws:pass, or aws:forward_to_sfe. + // In addition, you can specify custom actions that are compatible with your standard action choice. If you want non-matching packets to be forwarded for stateful inspection, specify aws:forward_to_sfe. + // +listType=set + StatelessDefaultActions []*string `json:"statelessDefaultActions,omitempty" tf:"stateless_default_actions,omitempty"` + + // Set of actions to take on a fragmented packet if it does not match any of the stateless rules in the policy. You must specify one of the standard actions including: aws:drop, aws:pass, or aws:forward_to_sfe. + // In addition, you can specify custom actions that are compatible with your standard action choice. If you want non-matching packets to be forwarded for stateful inspection, specify aws:forward_to_sfe. + // +listType=set + StatelessFragmentDefaultActions []*string `json:"statelessFragmentDefaultActions,omitempty" tf:"stateless_fragment_default_actions,omitempty"` + + // Set of configuration blocks containing references to the stateless rule groups that are used in the policy. See Stateless Rule Group Reference below for details. + StatelessRuleGroupReference []StatelessRuleGroupReferenceInitParameters `json:"statelessRuleGroupReference,omitempty" tf:"stateless_rule_group_reference,omitempty"` + + // The (ARN) of the TLS Inspection policy to attach to the FW Policy. This must be added at creation of the resource per AWS documentation. "You can only add a TLS inspection configuration to a new policy, not to an existing policy." This cannot be removed from a FW Policy. + TLSInspectionConfigurationArn *string `json:"tlsInspectionConfigurationArn,omitempty" tf:"tls_inspection_configuration_arn,omitempty"` +} + +type FirewallPolicyFirewallPolicyObservation struct { + + // . Contains variables that you can use to override default Suricata settings in your firewall policy. See Rule Variables for details. + PolicyVariables *PolicyVariablesObservation `json:"policyVariables,omitempty" tf:"policy_variables,omitempty"` + + // Set of actions to take on a packet if it does not match any stateful rules in the policy. This can only be specified if the policy has a stateful_engine_options block with a rule_order value of STRICT_ORDER. You can specify one of either or neither values of aws:drop_strict or aws:drop_established, as well as any combination of aws:alert_strict and aws:alert_established. + // +listType=set + StatefulDefaultActions []*string `json:"statefulDefaultActions,omitempty" tf:"stateful_default_actions,omitempty"` + + // A configuration block that defines options on how the policy handles stateful rules. See Stateful Engine Options below for details. + StatefulEngineOptions *StatefulEngineOptionsObservation `json:"statefulEngineOptions,omitempty" tf:"stateful_engine_options,omitempty"` + + // Set of configuration blocks containing references to the stateful rule groups that are used in the policy. See Stateful Rule Group Reference below for details. + StatefulRuleGroupReference []StatefulRuleGroupReferenceObservation `json:"statefulRuleGroupReference,omitempty" tf:"stateful_rule_group_reference,omitempty"` + + // Set of configuration blocks describing the custom action definitions that are available for use in the firewall policy's stateless_default_actions. See Stateless Custom Action below for details. + StatelessCustomAction []StatelessCustomActionObservation `json:"statelessCustomAction,omitempty" tf:"stateless_custom_action,omitempty"` + + // Set of actions to take on a packet if it does not match any of the stateless rules in the policy. You must specify one of the standard actions including: aws:drop, aws:pass, or aws:forward_to_sfe. + // In addition, you can specify custom actions that are compatible with your standard action choice. If you want non-matching packets to be forwarded for stateful inspection, specify aws:forward_to_sfe. + // +listType=set + StatelessDefaultActions []*string `json:"statelessDefaultActions,omitempty" tf:"stateless_default_actions,omitempty"` + + // Set of actions to take on a fragmented packet if it does not match any of the stateless rules in the policy. You must specify one of the standard actions including: aws:drop, aws:pass, or aws:forward_to_sfe. + // In addition, you can specify custom actions that are compatible with your standard action choice. If you want non-matching packets to be forwarded for stateful inspection, specify aws:forward_to_sfe. + // +listType=set + StatelessFragmentDefaultActions []*string `json:"statelessFragmentDefaultActions,omitempty" tf:"stateless_fragment_default_actions,omitempty"` + + // Set of configuration blocks containing references to the stateless rule groups that are used in the policy. See Stateless Rule Group Reference below for details. + StatelessRuleGroupReference []StatelessRuleGroupReferenceObservation `json:"statelessRuleGroupReference,omitempty" tf:"stateless_rule_group_reference,omitempty"` + + // The (ARN) of the TLS Inspection policy to attach to the FW Policy. This must be added at creation of the resource per AWS documentation. "You can only add a TLS inspection configuration to a new policy, not to an existing policy." This cannot be removed from a FW Policy. + TLSInspectionConfigurationArn *string `json:"tlsInspectionConfigurationArn,omitempty" tf:"tls_inspection_configuration_arn,omitempty"` +} + +type FirewallPolicyFirewallPolicyParameters struct { + + // . Contains variables that you can use to override default Suricata settings in your firewall policy. See Rule Variables for details. + // +kubebuilder:validation:Optional + PolicyVariables *PolicyVariablesParameters `json:"policyVariables,omitempty" tf:"policy_variables,omitempty"` + + // Set of actions to take on a packet if it does not match any stateful rules in the policy. This can only be specified if the policy has a stateful_engine_options block with a rule_order value of STRICT_ORDER. You can specify one of either or neither values of aws:drop_strict or aws:drop_established, as well as any combination of aws:alert_strict and aws:alert_established. + // +kubebuilder:validation:Optional + // +listType=set + StatefulDefaultActions []*string `json:"statefulDefaultActions,omitempty" tf:"stateful_default_actions,omitempty"` + + // A configuration block that defines options on how the policy handles stateful rules. See Stateful Engine Options below for details. + // +kubebuilder:validation:Optional + StatefulEngineOptions *StatefulEngineOptionsParameters `json:"statefulEngineOptions,omitempty" tf:"stateful_engine_options,omitempty"` + + // Set of configuration blocks containing references to the stateful rule groups that are used in the policy. See Stateful Rule Group Reference below for details. + // +kubebuilder:validation:Optional + StatefulRuleGroupReference []StatefulRuleGroupReferenceParameters `json:"statefulRuleGroupReference,omitempty" tf:"stateful_rule_group_reference,omitempty"` + + // Set of configuration blocks describing the custom action definitions that are available for use in the firewall policy's stateless_default_actions. See Stateless Custom Action below for details. + // +kubebuilder:validation:Optional + StatelessCustomAction []StatelessCustomActionParameters `json:"statelessCustomAction,omitempty" tf:"stateless_custom_action,omitempty"` + + // Set of actions to take on a packet if it does not match any of the stateless rules in the policy. You must specify one of the standard actions including: aws:drop, aws:pass, or aws:forward_to_sfe. + // In addition, you can specify custom actions that are compatible with your standard action choice. If you want non-matching packets to be forwarded for stateful inspection, specify aws:forward_to_sfe. + // +kubebuilder:validation:Optional + // +listType=set + StatelessDefaultActions []*string `json:"statelessDefaultActions" tf:"stateless_default_actions,omitempty"` + + // Set of actions to take on a fragmented packet if it does not match any of the stateless rules in the policy. You must specify one of the standard actions including: aws:drop, aws:pass, or aws:forward_to_sfe. + // In addition, you can specify custom actions that are compatible with your standard action choice. If you want non-matching packets to be forwarded for stateful inspection, specify aws:forward_to_sfe. + // +kubebuilder:validation:Optional + // +listType=set + StatelessFragmentDefaultActions []*string `json:"statelessFragmentDefaultActions" tf:"stateless_fragment_default_actions,omitempty"` + + // Set of configuration blocks containing references to the stateless rule groups that are used in the policy. See Stateless Rule Group Reference below for details. + // +kubebuilder:validation:Optional + StatelessRuleGroupReference []StatelessRuleGroupReferenceParameters `json:"statelessRuleGroupReference,omitempty" tf:"stateless_rule_group_reference,omitempty"` + + // The (ARN) of the TLS Inspection policy to attach to the FW Policy. This must be added at creation of the resource per AWS documentation. "You can only add a TLS inspection configuration to a new policy, not to an existing policy." This cannot be removed from a FW Policy. + // +kubebuilder:validation:Optional + TLSInspectionConfigurationArn *string `json:"tlsInspectionConfigurationArn,omitempty" tf:"tls_inspection_configuration_arn,omitempty"` +} + +type FirewallPolicyInitParameters struct { + + // A friendly description of the firewall policy. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // KMS encryption configuration settings. See Encryption Configuration below for details. + EncryptionConfiguration *FirewallPolicyEncryptionConfigurationInitParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // A configuration block describing the rule groups and policy actions to use in the firewall policy. See Firewall Policy below for details. + FirewallPolicy *FirewallPolicyFirewallPolicyInitParameters `json:"firewallPolicy,omitempty" tf:"firewall_policy,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type FirewallPolicyObservation struct { + + // The Amazon Resource Name (ARN) that identifies the firewall policy. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A friendly description of the firewall policy. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // KMS encryption configuration settings. See Encryption Configuration below for details. + EncryptionConfiguration *FirewallPolicyEncryptionConfigurationObservation `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // A configuration block describing the rule groups and policy actions to use in the firewall policy. See Firewall Policy below for details. + FirewallPolicy *FirewallPolicyFirewallPolicyObservation `json:"firewallPolicy,omitempty" tf:"firewall_policy,omitempty"` + + // The Amazon Resource Name (ARN) that identifies the firewall policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // A string token used when updating a firewall policy. + UpdateToken *string `json:"updateToken,omitempty" tf:"update_token,omitempty"` +} + +type FirewallPolicyParameters struct { + + // A friendly description of the firewall policy. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // KMS encryption configuration settings. See Encryption Configuration below for details. + // +kubebuilder:validation:Optional + EncryptionConfiguration *FirewallPolicyEncryptionConfigurationParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // A configuration block describing the rule groups and policy actions to use in the firewall policy. See Firewall Policy below for details. + // +kubebuilder:validation:Optional + FirewallPolicy *FirewallPolicyFirewallPolicyParameters `json:"firewallPolicy,omitempty" tf:"firewall_policy,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IPSetInitParameters struct { + + // Set of IPv4 or IPv6 addresses in CIDR notation to use for the Suricata HOME_NET variable. + // +listType=set + Definition []*string `json:"definition,omitempty" tf:"definition,omitempty"` +} + +type IPSetObservation struct { + + // Set of IPv4 or IPv6 addresses in CIDR notation to use for the Suricata HOME_NET variable. + // +listType=set + Definition []*string `json:"definition,omitempty" tf:"definition,omitempty"` +} + +type IPSetParameters struct { + + // Set of IPv4 or IPv6 addresses in CIDR notation to use for the Suricata HOME_NET variable. + // +kubebuilder:validation:Optional + // +listType=set + Definition []*string `json:"definition" tf:"definition,omitempty"` +} + +type OverrideInitParameters struct { + + // The action that changes the rule group from DROP to ALERT . This only applies to managed rule groups. + Action *string `json:"action,omitempty" tf:"action,omitempty"` +} + +type OverrideObservation struct { + + // The action that changes the rule group from DROP to ALERT . This only applies to managed rule groups. + Action *string `json:"action,omitempty" tf:"action,omitempty"` +} + +type OverrideParameters struct { + + // The action that changes the rule group from DROP to ALERT . This only applies to managed rule groups. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` +} + +type PolicyVariablesInitParameters struct { + RuleVariables []RuleVariablesInitParameters `json:"ruleVariables,omitempty" tf:"rule_variables,omitempty"` +} + +type PolicyVariablesObservation struct { + RuleVariables []RuleVariablesObservation `json:"ruleVariables,omitempty" tf:"rule_variables,omitempty"` +} + +type PolicyVariablesParameters struct { + + // +kubebuilder:validation:Optional + RuleVariables []RuleVariablesParameters `json:"ruleVariables,omitempty" tf:"rule_variables,omitempty"` +} + +type PublishMetricActionInitParameters struct { + + // Set of configuration blocks describing dimension settings to use for Amazon CloudWatch custom metrics. See Dimension below for more details. + Dimension []DimensionInitParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` +} + +type PublishMetricActionObservation struct { + + // Set of configuration blocks describing dimension settings to use for Amazon CloudWatch custom metrics. See Dimension below for more details. + Dimension []DimensionObservation `json:"dimension,omitempty" tf:"dimension,omitempty"` +} + +type PublishMetricActionParameters struct { + + // Set of configuration blocks describing dimension settings to use for Amazon CloudWatch custom metrics. See Dimension below for more details. + // +kubebuilder:validation:Optional + Dimension []DimensionParameters `json:"dimension" tf:"dimension,omitempty"` +} + +type RuleVariablesInitParameters struct { + + // A configuration block that defines a set of IP addresses. See IP Set below for details. + IPSet *IPSetInitParameters `json:"ipSet,omitempty" tf:"ip_set,omitempty"` + + // An alphanumeric string to identify the ip_set. Valid values: HOME_NET + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type RuleVariablesObservation struct { + + // A configuration block that defines a set of IP addresses. See IP Set below for details. + IPSet *IPSetObservation `json:"ipSet,omitempty" tf:"ip_set,omitempty"` + + // An alphanumeric string to identify the ip_set. Valid values: HOME_NET + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type RuleVariablesParameters struct { + + // A configuration block that defines a set of IP addresses. See IP Set below for details. + // +kubebuilder:validation:Optional + IPSet *IPSetParameters `json:"ipSet" tf:"ip_set,omitempty"` + + // An alphanumeric string to identify the ip_set. Valid values: HOME_NET + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` +} + +type StatefulEngineOptionsInitParameters struct { + + // Indicates how to manage the order of stateful rule evaluation for the policy. Default value: DEFAULT_ACTION_ORDER. Valid values: DEFAULT_ACTION_ORDER, STRICT_ORDER. + RuleOrder *string `json:"ruleOrder,omitempty" tf:"rule_order,omitempty"` + + // Describes how to treat traffic which has broken midstream. Default value: DROP. Valid values: DROP, CONTINUE, REJECT. + StreamExceptionPolicy *string `json:"streamExceptionPolicy,omitempty" tf:"stream_exception_policy,omitempty"` +} + +type StatefulEngineOptionsObservation struct { + + // Indicates how to manage the order of stateful rule evaluation for the policy. Default value: DEFAULT_ACTION_ORDER. Valid values: DEFAULT_ACTION_ORDER, STRICT_ORDER. + RuleOrder *string `json:"ruleOrder,omitempty" tf:"rule_order,omitempty"` + + // Describes how to treat traffic which has broken midstream. Default value: DROP. Valid values: DROP, CONTINUE, REJECT. + StreamExceptionPolicy *string `json:"streamExceptionPolicy,omitempty" tf:"stream_exception_policy,omitempty"` +} + +type StatefulEngineOptionsParameters struct { + + // Indicates how to manage the order of stateful rule evaluation for the policy. Default value: DEFAULT_ACTION_ORDER. Valid values: DEFAULT_ACTION_ORDER, STRICT_ORDER. + // +kubebuilder:validation:Optional + RuleOrder *string `json:"ruleOrder,omitempty" tf:"rule_order,omitempty"` + + // Describes how to treat traffic which has broken midstream. Default value: DROP. Valid values: DROP, CONTINUE, REJECT. + // +kubebuilder:validation:Optional + StreamExceptionPolicy *string `json:"streamExceptionPolicy,omitempty" tf:"stream_exception_policy,omitempty"` +} + +type StatefulRuleGroupReferenceInitParameters struct { + + // Configuration block for override values + Override *OverrideInitParameters `json:"override,omitempty" tf:"override,omitempty"` + + // An integer setting that indicates the order in which to run the stateless rule groups in a single policy. AWS Network Firewall applies each stateless rule group to a packet starting with the group that has the lowest priority setting. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Amazon Resource Name (ARN) of the stateless rule group. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkfirewall/v1beta2.RuleGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // Reference to a RuleGroup in networkfirewall to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnRef *v1.Reference `json:"resourceArnRef,omitempty" tf:"-"` + + // Selector for a RuleGroup in networkfirewall to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnSelector *v1.Selector `json:"resourceArnSelector,omitempty" tf:"-"` +} + +type StatefulRuleGroupReferenceObservation struct { + + // Configuration block for override values + Override *OverrideObservation `json:"override,omitempty" tf:"override,omitempty"` + + // An integer setting that indicates the order in which to run the stateless rule groups in a single policy. AWS Network Firewall applies each stateless rule group to a packet starting with the group that has the lowest priority setting. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Amazon Resource Name (ARN) of the stateless rule group. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` +} + +type StatefulRuleGroupReferenceParameters struct { + + // Configuration block for override values + // +kubebuilder:validation:Optional + Override *OverrideParameters `json:"override,omitempty" tf:"override,omitempty"` + + // An integer setting that indicates the order in which to run the stateless rule groups in a single policy. AWS Network Firewall applies each stateless rule group to a packet starting with the group that has the lowest priority setting. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Amazon Resource Name (ARN) of the stateless rule group. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkfirewall/v1beta2.RuleGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // Reference to a RuleGroup in networkfirewall to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnRef *v1.Reference `json:"resourceArnRef,omitempty" tf:"-"` + + // Selector for a RuleGroup in networkfirewall to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnSelector *v1.Selector `json:"resourceArnSelector,omitempty" tf:"-"` +} + +type StatelessCustomActionInitParameters struct { + + // A configuration block describing the custom action associated with the action_name. See Action Definition below for details. + ActionDefinition *ActionDefinitionInitParameters `json:"actionDefinition,omitempty" tf:"action_definition,omitempty"` + + // A friendly name of the custom action. + ActionName *string `json:"actionName,omitempty" tf:"action_name,omitempty"` +} + +type StatelessCustomActionObservation struct { + + // A configuration block describing the custom action associated with the action_name. See Action Definition below for details. + ActionDefinition *ActionDefinitionObservation `json:"actionDefinition,omitempty" tf:"action_definition,omitempty"` + + // A friendly name of the custom action. + ActionName *string `json:"actionName,omitempty" tf:"action_name,omitempty"` +} + +type StatelessCustomActionParameters struct { + + // A configuration block describing the custom action associated with the action_name. See Action Definition below for details. + // +kubebuilder:validation:Optional + ActionDefinition *ActionDefinitionParameters `json:"actionDefinition" tf:"action_definition,omitempty"` + + // A friendly name of the custom action. + // +kubebuilder:validation:Optional + ActionName *string `json:"actionName" tf:"action_name,omitempty"` +} + +type StatelessRuleGroupReferenceInitParameters struct { + + // An integer setting that indicates the order in which to run the stateless rule groups in a single policy. AWS Network Firewall applies each stateless rule group to a packet starting with the group that has the lowest priority setting. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Amazon Resource Name (ARN) of the stateless rule group. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkfirewall/v1beta2.RuleGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // Reference to a RuleGroup in networkfirewall to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnRef *v1.Reference `json:"resourceArnRef,omitempty" tf:"-"` + + // Selector for a RuleGroup in networkfirewall to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnSelector *v1.Selector `json:"resourceArnSelector,omitempty" tf:"-"` +} + +type StatelessRuleGroupReferenceObservation struct { + + // An integer setting that indicates the order in which to run the stateless rule groups in a single policy. AWS Network Firewall applies each stateless rule group to a packet starting with the group that has the lowest priority setting. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Amazon Resource Name (ARN) of the stateless rule group. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` +} + +type StatelessRuleGroupReferenceParameters struct { + + // An integer setting that indicates the order in which to run the stateless rule groups in a single policy. AWS Network Firewall applies each stateless rule group to a packet starting with the group that has the lowest priority setting. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority" tf:"priority,omitempty"` + + // The Amazon Resource Name (ARN) of the stateless rule group. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkfirewall/v1beta2.RuleGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // Reference to a RuleGroup in networkfirewall to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnRef *v1.Reference `json:"resourceArnRef,omitempty" tf:"-"` + + // Selector for a RuleGroup in networkfirewall to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnSelector *v1.Selector `json:"resourceArnSelector,omitempty" tf:"-"` +} + +// FirewallPolicySpec defines the desired state of FirewallPolicy +type FirewallPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FirewallPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FirewallPolicyInitParameters `json:"initProvider,omitempty"` +} + +// FirewallPolicyStatus defines the observed state of FirewallPolicy. +type FirewallPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FirewallPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FirewallPolicy is the Schema for the FirewallPolicys API. Provides an AWS Network Firewall Policy resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type FirewallPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.firewallPolicy) || (has(self.initProvider) && has(self.initProvider.firewallPolicy))",message="spec.forProvider.firewallPolicy is a required parameter" + Spec FirewallPolicySpec `json:"spec"` + Status FirewallPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FirewallPolicyList contains a list of FirewallPolicys +type FirewallPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FirewallPolicy `json:"items"` +} + +// Repository type metadata. +var ( + FirewallPolicy_Kind = "FirewallPolicy" + FirewallPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FirewallPolicy_Kind}.String() + FirewallPolicy_KindAPIVersion = FirewallPolicy_Kind + "." + CRDGroupVersion.String() + FirewallPolicy_GroupVersionKind = CRDGroupVersion.WithKind(FirewallPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&FirewallPolicy{}, &FirewallPolicyList{}) +} diff --git a/apis/networkfirewall/v1beta2/zz_generated.conversion_hubs.go b/apis/networkfirewall/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..1a608a6ac0 --- /dev/null +++ b/apis/networkfirewall/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,19 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Firewall) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FirewallPolicy) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LoggingConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *RuleGroup) Hub() {} diff --git a/apis/networkfirewall/v1beta2/zz_generated.deepcopy.go b/apis/networkfirewall/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..c456a07600 --- /dev/null +++ b/apis/networkfirewall/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,5391 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionDefinitionInitParameters) DeepCopyInto(out *ActionDefinitionInitParameters) { + *out = *in + if in.PublishMetricAction != nil { + in, out := &in.PublishMetricAction, &out.PublishMetricAction + *out = new(PublishMetricActionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionDefinitionInitParameters. +func (in *ActionDefinitionInitParameters) DeepCopy() *ActionDefinitionInitParameters { + if in == nil { + return nil + } + out := new(ActionDefinitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionDefinitionObservation) DeepCopyInto(out *ActionDefinitionObservation) { + *out = *in + if in.PublishMetricAction != nil { + in, out := &in.PublishMetricAction, &out.PublishMetricAction + *out = new(PublishMetricActionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionDefinitionObservation. +func (in *ActionDefinitionObservation) DeepCopy() *ActionDefinitionObservation { + if in == nil { + return nil + } + out := new(ActionDefinitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionDefinitionParameters) DeepCopyInto(out *ActionDefinitionParameters) { + *out = *in + if in.PublishMetricAction != nil { + in, out := &in.PublishMetricAction, &out.PublishMetricAction + *out = new(PublishMetricActionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionDefinitionParameters. +func (in *ActionDefinitionParameters) DeepCopy() *ActionDefinitionParameters { + if in == nil { + return nil + } + out := new(ActionDefinitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionDefinitionPublishMetricActionInitParameters) DeepCopyInto(out *ActionDefinitionPublishMetricActionInitParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]PublishMetricActionDimensionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionDefinitionPublishMetricActionInitParameters. +func (in *ActionDefinitionPublishMetricActionInitParameters) DeepCopy() *ActionDefinitionPublishMetricActionInitParameters { + if in == nil { + return nil + } + out := new(ActionDefinitionPublishMetricActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionDefinitionPublishMetricActionObservation) DeepCopyInto(out *ActionDefinitionPublishMetricActionObservation) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]PublishMetricActionDimensionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionDefinitionPublishMetricActionObservation. +func (in *ActionDefinitionPublishMetricActionObservation) DeepCopy() *ActionDefinitionPublishMetricActionObservation { + if in == nil { + return nil + } + out := new(ActionDefinitionPublishMetricActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionDefinitionPublishMetricActionParameters) DeepCopyInto(out *ActionDefinitionPublishMetricActionParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]PublishMetricActionDimensionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionDefinitionPublishMetricActionParameters. +func (in *ActionDefinitionPublishMetricActionParameters) DeepCopy() *ActionDefinitionPublishMetricActionParameters { + if in == nil { + return nil + } + out := new(ActionDefinitionPublishMetricActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachmentInitParameters) DeepCopyInto(out *AttachmentInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachmentInitParameters. +func (in *AttachmentInitParameters) DeepCopy() *AttachmentInitParameters { + if in == nil { + return nil + } + out := new(AttachmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachmentObservation) DeepCopyInto(out *AttachmentObservation) { + *out = *in + if in.EndpointID != nil { + in, out := &in.EndpointID, &out.EndpointID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachmentObservation. +func (in *AttachmentObservation) DeepCopy() *AttachmentObservation { + if in == nil { + return nil + } + out := new(AttachmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachmentParameters) DeepCopyInto(out *AttachmentParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachmentParameters. +func (in *AttachmentParameters) DeepCopy() *AttachmentParameters { + if in == nil { + return nil + } + out := new(AttachmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomActionActionDefinitionInitParameters) DeepCopyInto(out *CustomActionActionDefinitionInitParameters) { + *out = *in + if in.PublishMetricAction != nil { + in, out := &in.PublishMetricAction, &out.PublishMetricAction + *out = new(ActionDefinitionPublishMetricActionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomActionActionDefinitionInitParameters. +func (in *CustomActionActionDefinitionInitParameters) DeepCopy() *CustomActionActionDefinitionInitParameters { + if in == nil { + return nil + } + out := new(CustomActionActionDefinitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomActionActionDefinitionObservation) DeepCopyInto(out *CustomActionActionDefinitionObservation) { + *out = *in + if in.PublishMetricAction != nil { + in, out := &in.PublishMetricAction, &out.PublishMetricAction + *out = new(ActionDefinitionPublishMetricActionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomActionActionDefinitionObservation. +func (in *CustomActionActionDefinitionObservation) DeepCopy() *CustomActionActionDefinitionObservation { + if in == nil { + return nil + } + out := new(CustomActionActionDefinitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomActionActionDefinitionParameters) DeepCopyInto(out *CustomActionActionDefinitionParameters) { + *out = *in + if in.PublishMetricAction != nil { + in, out := &in.PublishMetricAction, &out.PublishMetricAction + *out = new(ActionDefinitionPublishMetricActionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomActionActionDefinitionParameters. +func (in *CustomActionActionDefinitionParameters) DeepCopy() *CustomActionActionDefinitionParameters { + if in == nil { + return nil + } + out := new(CustomActionActionDefinitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomActionInitParameters) DeepCopyInto(out *CustomActionInitParameters) { + *out = *in + if in.ActionDefinition != nil { + in, out := &in.ActionDefinition, &out.ActionDefinition + *out = new(CustomActionActionDefinitionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ActionName != nil { + in, out := &in.ActionName, &out.ActionName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomActionInitParameters. +func (in *CustomActionInitParameters) DeepCopy() *CustomActionInitParameters { + if in == nil { + return nil + } + out := new(CustomActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomActionObservation) DeepCopyInto(out *CustomActionObservation) { + *out = *in + if in.ActionDefinition != nil { + in, out := &in.ActionDefinition, &out.ActionDefinition + *out = new(CustomActionActionDefinitionObservation) + (*in).DeepCopyInto(*out) + } + if in.ActionName != nil { + in, out := &in.ActionName, &out.ActionName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomActionObservation. +func (in *CustomActionObservation) DeepCopy() *CustomActionObservation { + if in == nil { + return nil + } + out := new(CustomActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomActionParameters) DeepCopyInto(out *CustomActionParameters) { + *out = *in + if in.ActionDefinition != nil { + in, out := &in.ActionDefinition, &out.ActionDefinition + *out = new(CustomActionActionDefinitionParameters) + (*in).DeepCopyInto(*out) + } + if in.ActionName != nil { + in, out := &in.ActionName, &out.ActionName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomActionParameters. +func (in *CustomActionParameters) DeepCopy() *CustomActionParameters { + if in == nil { + return nil + } + out := new(CustomActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationInitParameters) DeepCopyInto(out *DestinationInitParameters) { + *out = *in + if in.AddressDefinition != nil { + in, out := &in.AddressDefinition, &out.AddressDefinition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationInitParameters. +func (in *DestinationInitParameters) DeepCopy() *DestinationInitParameters { + if in == nil { + return nil + } + out := new(DestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationObservation) DeepCopyInto(out *DestinationObservation) { + *out = *in + if in.AddressDefinition != nil { + in, out := &in.AddressDefinition, &out.AddressDefinition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationObservation. +func (in *DestinationObservation) DeepCopy() *DestinationObservation { + if in == nil { + return nil + } + out := new(DestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationParameters) DeepCopyInto(out *DestinationParameters) { + *out = *in + if in.AddressDefinition != nil { + in, out := &in.AddressDefinition, &out.AddressDefinition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationParameters. +func (in *DestinationParameters) DeepCopy() *DestinationParameters { + if in == nil { + return nil + } + out := new(DestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationPortInitParameters) DeepCopyInto(out *DestinationPortInitParameters) { + *out = *in + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationPortInitParameters. +func (in *DestinationPortInitParameters) DeepCopy() *DestinationPortInitParameters { + if in == nil { + return nil + } + out := new(DestinationPortInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationPortObservation) DeepCopyInto(out *DestinationPortObservation) { + *out = *in + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationPortObservation. +func (in *DestinationPortObservation) DeepCopy() *DestinationPortObservation { + if in == nil { + return nil + } + out := new(DestinationPortObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationPortParameters) DeepCopyInto(out *DestinationPortParameters) { + *out = *in + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationPortParameters. +func (in *DestinationPortParameters) DeepCopy() *DestinationPortParameters { + if in == nil { + return nil + } + out := new(DestinationPortParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionInitParameters) DeepCopyInto(out *DimensionInitParameters) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionInitParameters. +func (in *DimensionInitParameters) DeepCopy() *DimensionInitParameters { + if in == nil { + return nil + } + out := new(DimensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionObservation) DeepCopyInto(out *DimensionObservation) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionObservation. +func (in *DimensionObservation) DeepCopy() *DimensionObservation { + if in == nil { + return nil + } + out := new(DimensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionParameters) DeepCopyInto(out *DimensionParameters) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionParameters. +func (in *DimensionParameters) DeepCopy() *DimensionParameters { + if in == nil { + return nil + } + out := new(DimensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationInitParameters) DeepCopyInto(out *EncryptionConfigurationInitParameters) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationInitParameters. +func (in *EncryptionConfigurationInitParameters) DeepCopy() *EncryptionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationObservation) DeepCopyInto(out *EncryptionConfigurationObservation) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationObservation. +func (in *EncryptionConfigurationObservation) DeepCopy() *EncryptionConfigurationObservation { + if in == nil { + return nil + } + out := new(EncryptionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationParameters) DeepCopyInto(out *EncryptionConfigurationParameters) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationParameters. +func (in *EncryptionConfigurationParameters) DeepCopy() *EncryptionConfigurationParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Firewall) DeepCopyInto(out *Firewall) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Firewall. +func (in *Firewall) DeepCopy() *Firewall { + if in == nil { + return nil + } + out := new(Firewall) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Firewall) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallInitParameters) DeepCopyInto(out *FirewallInitParameters) { + *out = *in + if in.DeleteProtection != nil { + in, out := &in.DeleteProtection, &out.DeleteProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FirewallPolicyArn != nil { + in, out := &in.FirewallPolicyArn, &out.FirewallPolicyArn + *out = new(string) + **out = **in + } + if in.FirewallPolicyArnRef != nil { + in, out := &in.FirewallPolicyArnRef, &out.FirewallPolicyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FirewallPolicyArnSelector != nil { + in, out := &in.FirewallPolicyArnSelector, &out.FirewallPolicyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FirewallPolicyChangeProtection != nil { + in, out := &in.FirewallPolicyChangeProtection, &out.FirewallPolicyChangeProtection + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetChangeProtection != nil { + in, out := &in.SubnetChangeProtection, &out.SubnetChangeProtection + *out = new(bool) + **out = **in + } + if in.SubnetMapping != nil { + in, out := &in.SubnetMapping, &out.SubnetMapping + *out = make([]SubnetMappingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallInitParameters. +func (in *FirewallInitParameters) DeepCopy() *FirewallInitParameters { + if in == nil { + return nil + } + out := new(FirewallInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallList) DeepCopyInto(out *FirewallList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Firewall, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallList. +func (in *FirewallList) DeepCopy() *FirewallList { + if in == nil { + return nil + } + out := new(FirewallList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FirewallList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallObservation) DeepCopyInto(out *FirewallObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DeleteProtection != nil { + in, out := &in.DeleteProtection, &out.DeleteProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.FirewallPolicyArn != nil { + in, out := &in.FirewallPolicyArn, &out.FirewallPolicyArn + *out = new(string) + **out = **in + } + if in.FirewallPolicyChangeProtection != nil { + in, out := &in.FirewallPolicyChangeProtection, &out.FirewallPolicyChangeProtection + *out = new(bool) + **out = **in + } + if in.FirewallStatus != nil { + in, out := &in.FirewallStatus, &out.FirewallStatus + *out = make([]FirewallStatusObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetChangeProtection != nil { + in, out := &in.SubnetChangeProtection, &out.SubnetChangeProtection + *out = new(bool) + **out = **in + } + if in.SubnetMapping != nil { + in, out := &in.SubnetMapping, &out.SubnetMapping + *out = make([]SubnetMappingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UpdateToken != nil { + in, out := &in.UpdateToken, &out.UpdateToken + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallObservation. +func (in *FirewallObservation) DeepCopy() *FirewallObservation { + if in == nil { + return nil + } + out := new(FirewallObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallParameters) DeepCopyInto(out *FirewallParameters) { + *out = *in + if in.DeleteProtection != nil { + in, out := &in.DeleteProtection, &out.DeleteProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.FirewallPolicyArn != nil { + in, out := &in.FirewallPolicyArn, &out.FirewallPolicyArn + *out = new(string) + **out = **in + } + if in.FirewallPolicyArnRef != nil { + in, out := &in.FirewallPolicyArnRef, &out.FirewallPolicyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FirewallPolicyArnSelector != nil { + in, out := &in.FirewallPolicyArnSelector, &out.FirewallPolicyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FirewallPolicyChangeProtection != nil { + in, out := &in.FirewallPolicyChangeProtection, &out.FirewallPolicyChangeProtection + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SubnetChangeProtection != nil { + in, out := &in.SubnetChangeProtection, &out.SubnetChangeProtection + *out = new(bool) + **out = **in + } + if in.SubnetMapping != nil { + in, out := &in.SubnetMapping, &out.SubnetMapping + *out = make([]SubnetMappingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallParameters. +func (in *FirewallParameters) DeepCopy() *FirewallParameters { + if in == nil { + return nil + } + out := new(FirewallParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicy) DeepCopyInto(out *FirewallPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicy. +func (in *FirewallPolicy) DeepCopy() *FirewallPolicy { + if in == nil { + return nil + } + out := new(FirewallPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FirewallPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyEncryptionConfigurationInitParameters) DeepCopyInto(out *FirewallPolicyEncryptionConfigurationInitParameters) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyEncryptionConfigurationInitParameters. +func (in *FirewallPolicyEncryptionConfigurationInitParameters) DeepCopy() *FirewallPolicyEncryptionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(FirewallPolicyEncryptionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyEncryptionConfigurationObservation) DeepCopyInto(out *FirewallPolicyEncryptionConfigurationObservation) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyEncryptionConfigurationObservation. +func (in *FirewallPolicyEncryptionConfigurationObservation) DeepCopy() *FirewallPolicyEncryptionConfigurationObservation { + if in == nil { + return nil + } + out := new(FirewallPolicyEncryptionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyEncryptionConfigurationParameters) DeepCopyInto(out *FirewallPolicyEncryptionConfigurationParameters) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyEncryptionConfigurationParameters. +func (in *FirewallPolicyEncryptionConfigurationParameters) DeepCopy() *FirewallPolicyEncryptionConfigurationParameters { + if in == nil { + return nil + } + out := new(FirewallPolicyEncryptionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyFirewallPolicyInitParameters) DeepCopyInto(out *FirewallPolicyFirewallPolicyInitParameters) { + *out = *in + if in.PolicyVariables != nil { + in, out := &in.PolicyVariables, &out.PolicyVariables + *out = new(PolicyVariablesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StatefulDefaultActions != nil { + in, out := &in.StatefulDefaultActions, &out.StatefulDefaultActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StatefulEngineOptions != nil { + in, out := &in.StatefulEngineOptions, &out.StatefulEngineOptions + *out = new(StatefulEngineOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StatefulRuleGroupReference != nil { + in, out := &in.StatefulRuleGroupReference, &out.StatefulRuleGroupReference + *out = make([]StatefulRuleGroupReferenceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StatelessCustomAction != nil { + in, out := &in.StatelessCustomAction, &out.StatelessCustomAction + *out = make([]StatelessCustomActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StatelessDefaultActions != nil { + in, out := &in.StatelessDefaultActions, &out.StatelessDefaultActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StatelessFragmentDefaultActions != nil { + in, out := &in.StatelessFragmentDefaultActions, &out.StatelessFragmentDefaultActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StatelessRuleGroupReference != nil { + in, out := &in.StatelessRuleGroupReference, &out.StatelessRuleGroupReference + *out = make([]StatelessRuleGroupReferenceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSInspectionConfigurationArn != nil { + in, out := &in.TLSInspectionConfigurationArn, &out.TLSInspectionConfigurationArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyFirewallPolicyInitParameters. +func (in *FirewallPolicyFirewallPolicyInitParameters) DeepCopy() *FirewallPolicyFirewallPolicyInitParameters { + if in == nil { + return nil + } + out := new(FirewallPolicyFirewallPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyFirewallPolicyObservation) DeepCopyInto(out *FirewallPolicyFirewallPolicyObservation) { + *out = *in + if in.PolicyVariables != nil { + in, out := &in.PolicyVariables, &out.PolicyVariables + *out = new(PolicyVariablesObservation) + (*in).DeepCopyInto(*out) + } + if in.StatefulDefaultActions != nil { + in, out := &in.StatefulDefaultActions, &out.StatefulDefaultActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StatefulEngineOptions != nil { + in, out := &in.StatefulEngineOptions, &out.StatefulEngineOptions + *out = new(StatefulEngineOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.StatefulRuleGroupReference != nil { + in, out := &in.StatefulRuleGroupReference, &out.StatefulRuleGroupReference + *out = make([]StatefulRuleGroupReferenceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StatelessCustomAction != nil { + in, out := &in.StatelessCustomAction, &out.StatelessCustomAction + *out = make([]StatelessCustomActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StatelessDefaultActions != nil { + in, out := &in.StatelessDefaultActions, &out.StatelessDefaultActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StatelessFragmentDefaultActions != nil { + in, out := &in.StatelessFragmentDefaultActions, &out.StatelessFragmentDefaultActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StatelessRuleGroupReference != nil { + in, out := &in.StatelessRuleGroupReference, &out.StatelessRuleGroupReference + *out = make([]StatelessRuleGroupReferenceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSInspectionConfigurationArn != nil { + in, out := &in.TLSInspectionConfigurationArn, &out.TLSInspectionConfigurationArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyFirewallPolicyObservation. +func (in *FirewallPolicyFirewallPolicyObservation) DeepCopy() *FirewallPolicyFirewallPolicyObservation { + if in == nil { + return nil + } + out := new(FirewallPolicyFirewallPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyFirewallPolicyParameters) DeepCopyInto(out *FirewallPolicyFirewallPolicyParameters) { + *out = *in + if in.PolicyVariables != nil { + in, out := &in.PolicyVariables, &out.PolicyVariables + *out = new(PolicyVariablesParameters) + (*in).DeepCopyInto(*out) + } + if in.StatefulDefaultActions != nil { + in, out := &in.StatefulDefaultActions, &out.StatefulDefaultActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StatefulEngineOptions != nil { + in, out := &in.StatefulEngineOptions, &out.StatefulEngineOptions + *out = new(StatefulEngineOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.StatefulRuleGroupReference != nil { + in, out := &in.StatefulRuleGroupReference, &out.StatefulRuleGroupReference + *out = make([]StatefulRuleGroupReferenceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StatelessCustomAction != nil { + in, out := &in.StatelessCustomAction, &out.StatelessCustomAction + *out = make([]StatelessCustomActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StatelessDefaultActions != nil { + in, out := &in.StatelessDefaultActions, &out.StatelessDefaultActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StatelessFragmentDefaultActions != nil { + in, out := &in.StatelessFragmentDefaultActions, &out.StatelessFragmentDefaultActions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StatelessRuleGroupReference != nil { + in, out := &in.StatelessRuleGroupReference, &out.StatelessRuleGroupReference + *out = make([]StatelessRuleGroupReferenceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSInspectionConfigurationArn != nil { + in, out := &in.TLSInspectionConfigurationArn, &out.TLSInspectionConfigurationArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyFirewallPolicyParameters. +func (in *FirewallPolicyFirewallPolicyParameters) DeepCopy() *FirewallPolicyFirewallPolicyParameters { + if in == nil { + return nil + } + out := new(FirewallPolicyFirewallPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyInitParameters) DeepCopyInto(out *FirewallPolicyInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(FirewallPolicyEncryptionConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FirewallPolicy != nil { + in, out := &in.FirewallPolicy, &out.FirewallPolicy + *out = new(FirewallPolicyFirewallPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyInitParameters. +func (in *FirewallPolicyInitParameters) DeepCopy() *FirewallPolicyInitParameters { + if in == nil { + return nil + } + out := new(FirewallPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyList) DeepCopyInto(out *FirewallPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FirewallPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyList. +func (in *FirewallPolicyList) DeepCopy() *FirewallPolicyList { + if in == nil { + return nil + } + out := new(FirewallPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FirewallPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyObservation) DeepCopyInto(out *FirewallPolicyObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(FirewallPolicyEncryptionConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.FirewallPolicy != nil { + in, out := &in.FirewallPolicy, &out.FirewallPolicy + *out = new(FirewallPolicyFirewallPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UpdateToken != nil { + in, out := &in.UpdateToken, &out.UpdateToken + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyObservation. +func (in *FirewallPolicyObservation) DeepCopy() *FirewallPolicyObservation { + if in == nil { + return nil + } + out := new(FirewallPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyParameters) DeepCopyInto(out *FirewallPolicyParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(FirewallPolicyEncryptionConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.FirewallPolicy != nil { + in, out := &in.FirewallPolicy, &out.FirewallPolicy + *out = new(FirewallPolicyFirewallPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyParameters. +func (in *FirewallPolicyParameters) DeepCopy() *FirewallPolicyParameters { + if in == nil { + return nil + } + out := new(FirewallPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicySpec) DeepCopyInto(out *FirewallPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicySpec. +func (in *FirewallPolicySpec) DeepCopy() *FirewallPolicySpec { + if in == nil { + return nil + } + out := new(FirewallPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyStatus) DeepCopyInto(out *FirewallPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyStatus. +func (in *FirewallPolicyStatus) DeepCopy() *FirewallPolicyStatus { + if in == nil { + return nil + } + out := new(FirewallPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallSpec) DeepCopyInto(out *FirewallSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallSpec. +func (in *FirewallSpec) DeepCopy() *FirewallSpec { + if in == nil { + return nil + } + out := new(FirewallSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallStatus) DeepCopyInto(out *FirewallStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallStatus. +func (in *FirewallStatus) DeepCopy() *FirewallStatus { + if in == nil { + return nil + } + out := new(FirewallStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallStatusInitParameters) DeepCopyInto(out *FirewallStatusInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallStatusInitParameters. +func (in *FirewallStatusInitParameters) DeepCopy() *FirewallStatusInitParameters { + if in == nil { + return nil + } + out := new(FirewallStatusInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallStatusObservation) DeepCopyInto(out *FirewallStatusObservation) { + *out = *in + if in.SyncStates != nil { + in, out := &in.SyncStates, &out.SyncStates + *out = make([]SyncStatesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallStatusObservation. +func (in *FirewallStatusObservation) DeepCopy() *FirewallStatusObservation { + if in == nil { + return nil + } + out := new(FirewallStatusObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallStatusParameters) DeepCopyInto(out *FirewallStatusParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallStatusParameters. +func (in *FirewallStatusParameters) DeepCopy() *FirewallStatusParameters { + if in == nil { + return nil + } + out := new(FirewallStatusParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderInitParameters) DeepCopyInto(out *HeaderInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.DestinationPort != nil { + in, out := &in.DestinationPort, &out.DestinationPort + *out = new(string) + **out = **in + } + if in.Direction != nil { + in, out := &in.Direction, &out.Direction + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.SourcePort != nil { + in, out := &in.SourcePort, &out.SourcePort + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderInitParameters. +func (in *HeaderInitParameters) DeepCopy() *HeaderInitParameters { + if in == nil { + return nil + } + out := new(HeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderObservation) DeepCopyInto(out *HeaderObservation) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.DestinationPort != nil { + in, out := &in.DestinationPort, &out.DestinationPort + *out = new(string) + **out = **in + } + if in.Direction != nil { + in, out := &in.Direction, &out.Direction + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.SourcePort != nil { + in, out := &in.SourcePort, &out.SourcePort + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderObservation. +func (in *HeaderObservation) DeepCopy() *HeaderObservation { + if in == nil { + return nil + } + out := new(HeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderParameters) DeepCopyInto(out *HeaderParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.DestinationPort != nil { + in, out := &in.DestinationPort, &out.DestinationPort + *out = new(string) + **out = **in + } + if in.Direction != nil { + in, out := &in.Direction, &out.Direction + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.SourcePort != nil { + in, out := &in.SourcePort, &out.SourcePort + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderParameters. +func (in *HeaderParameters) DeepCopy() *HeaderParameters { + if in == nil { + return nil + } + out := new(HeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetInitParameters) DeepCopyInto(out *IPSetInitParameters) { + *out = *in + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetInitParameters. +func (in *IPSetInitParameters) DeepCopy() *IPSetInitParameters { + if in == nil { + return nil + } + out := new(IPSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetObservation) DeepCopyInto(out *IPSetObservation) { + *out = *in + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetObservation. +func (in *IPSetObservation) DeepCopy() *IPSetObservation { + if in == nil { + return nil + } + out := new(IPSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetParameters) DeepCopyInto(out *IPSetParameters) { + *out = *in + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetParameters. +func (in *IPSetParameters) DeepCopy() *IPSetParameters { + if in == nil { + return nil + } + out := new(IPSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetReferenceInitParameters) DeepCopyInto(out *IPSetReferenceInitParameters) { + *out = *in + if in.ReferenceArn != nil { + in, out := &in.ReferenceArn, &out.ReferenceArn + *out = new(string) + **out = **in + } + if in.ReferenceArnRef != nil { + in, out := &in.ReferenceArnRef, &out.ReferenceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ReferenceArnSelector != nil { + in, out := &in.ReferenceArnSelector, &out.ReferenceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetReferenceInitParameters. +func (in *IPSetReferenceInitParameters) DeepCopy() *IPSetReferenceInitParameters { + if in == nil { + return nil + } + out := new(IPSetReferenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetReferenceObservation) DeepCopyInto(out *IPSetReferenceObservation) { + *out = *in + if in.ReferenceArn != nil { + in, out := &in.ReferenceArn, &out.ReferenceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetReferenceObservation. +func (in *IPSetReferenceObservation) DeepCopy() *IPSetReferenceObservation { + if in == nil { + return nil + } + out := new(IPSetReferenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetReferenceParameters) DeepCopyInto(out *IPSetReferenceParameters) { + *out = *in + if in.ReferenceArn != nil { + in, out := &in.ReferenceArn, &out.ReferenceArn + *out = new(string) + **out = **in + } + if in.ReferenceArnRef != nil { + in, out := &in.ReferenceArnRef, &out.ReferenceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ReferenceArnSelector != nil { + in, out := &in.ReferenceArnSelector, &out.ReferenceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetReferenceParameters. +func (in *IPSetReferenceParameters) DeepCopy() *IPSetReferenceParameters { + if in == nil { + return nil + } + out := new(IPSetReferenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetReferencesInitParameters) DeepCopyInto(out *IPSetReferencesInitParameters) { + *out = *in + if in.IPSetReference != nil { + in, out := &in.IPSetReference, &out.IPSetReference + *out = make([]IPSetReferenceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetReferencesInitParameters. +func (in *IPSetReferencesInitParameters) DeepCopy() *IPSetReferencesInitParameters { + if in == nil { + return nil + } + out := new(IPSetReferencesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetReferencesObservation) DeepCopyInto(out *IPSetReferencesObservation) { + *out = *in + if in.IPSetReference != nil { + in, out := &in.IPSetReference, &out.IPSetReference + *out = make([]IPSetReferenceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetReferencesObservation. +func (in *IPSetReferencesObservation) DeepCopy() *IPSetReferencesObservation { + if in == nil { + return nil + } + out := new(IPSetReferencesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetReferencesParameters) DeepCopyInto(out *IPSetReferencesParameters) { + *out = *in + if in.IPSetReference != nil { + in, out := &in.IPSetReference, &out.IPSetReference + *out = make([]IPSetReferenceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetReferencesParameters. +func (in *IPSetReferencesParameters) DeepCopy() *IPSetReferencesParameters { + if in == nil { + return nil + } + out := new(IPSetReferencesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetsIPSetInitParameters) DeepCopyInto(out *IPSetsIPSetInitParameters) { + *out = *in + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetsIPSetInitParameters. +func (in *IPSetsIPSetInitParameters) DeepCopy() *IPSetsIPSetInitParameters { + if in == nil { + return nil + } + out := new(IPSetsIPSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetsIPSetObservation) DeepCopyInto(out *IPSetsIPSetObservation) { + *out = *in + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetsIPSetObservation. +func (in *IPSetsIPSetObservation) DeepCopy() *IPSetsIPSetObservation { + if in == nil { + return nil + } + out := new(IPSetsIPSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetsIPSetParameters) DeepCopyInto(out *IPSetsIPSetParameters) { + *out = *in + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetsIPSetParameters. +func (in *IPSetsIPSetParameters) DeepCopy() *IPSetsIPSetParameters { + if in == nil { + return nil + } + out := new(IPSetsIPSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetsInitParameters) DeepCopyInto(out *IPSetsInitParameters) { + *out = *in + if in.IPSet != nil { + in, out := &in.IPSet, &out.IPSet + *out = new(IPSetsIPSetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetsInitParameters. +func (in *IPSetsInitParameters) DeepCopy() *IPSetsInitParameters { + if in == nil { + return nil + } + out := new(IPSetsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetsObservation) DeepCopyInto(out *IPSetsObservation) { + *out = *in + if in.IPSet != nil { + in, out := &in.IPSet, &out.IPSet + *out = new(IPSetsIPSetObservation) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetsObservation. +func (in *IPSetsObservation) DeepCopy() *IPSetsObservation { + if in == nil { + return nil + } + out := new(IPSetsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSetsParameters) DeepCopyInto(out *IPSetsParameters) { + *out = *in + if in.IPSet != nil { + in, out := &in.IPSet, &out.IPSet + *out = new(IPSetsIPSetParameters) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSetsParameters. +func (in *IPSetsParameters) DeepCopy() *IPSetsParameters { + if in == nil { + return nil + } + out := new(IPSetsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogDestinationConfigInitParameters) DeepCopyInto(out *LogDestinationConfigInitParameters) { + *out = *in + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogDestinationType != nil { + in, out := &in.LogDestinationType, &out.LogDestinationType + *out = new(string) + **out = **in + } + if in.LogType != nil { + in, out := &in.LogType, &out.LogType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogDestinationConfigInitParameters. +func (in *LogDestinationConfigInitParameters) DeepCopy() *LogDestinationConfigInitParameters { + if in == nil { + return nil + } + out := new(LogDestinationConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogDestinationConfigObservation) DeepCopyInto(out *LogDestinationConfigObservation) { + *out = *in + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogDestinationType != nil { + in, out := &in.LogDestinationType, &out.LogDestinationType + *out = new(string) + **out = **in + } + if in.LogType != nil { + in, out := &in.LogType, &out.LogType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogDestinationConfigObservation. +func (in *LogDestinationConfigObservation) DeepCopy() *LogDestinationConfigObservation { + if in == nil { + return nil + } + out := new(LogDestinationConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogDestinationConfigParameters) DeepCopyInto(out *LogDestinationConfigParameters) { + *out = *in + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogDestinationType != nil { + in, out := &in.LogDestinationType, &out.LogDestinationType + *out = new(string) + **out = **in + } + if in.LogType != nil { + in, out := &in.LogType, &out.LogType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogDestinationConfigParameters. +func (in *LogDestinationConfigParameters) DeepCopy() *LogDestinationConfigParameters { + if in == nil { + return nil + } + out := new(LogDestinationConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfiguration) DeepCopyInto(out *LoggingConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfiguration. +func (in *LoggingConfiguration) DeepCopy() *LoggingConfiguration { + if in == nil { + return nil + } + out := new(LoggingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LoggingConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationInitParameters) DeepCopyInto(out *LoggingConfigurationInitParameters) { + *out = *in + if in.FirewallArn != nil { + in, out := &in.FirewallArn, &out.FirewallArn + *out = new(string) + **out = **in + } + if in.FirewallArnRef != nil { + in, out := &in.FirewallArnRef, &out.FirewallArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FirewallArnSelector != nil { + in, out := &in.FirewallArnSelector, &out.FirewallArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = new(LoggingConfigurationLoggingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationInitParameters. +func (in *LoggingConfigurationInitParameters) DeepCopy() *LoggingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(LoggingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationList) DeepCopyInto(out *LoggingConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LoggingConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationList. +func (in *LoggingConfigurationList) DeepCopy() *LoggingConfigurationList { + if in == nil { + return nil + } + out := new(LoggingConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LoggingConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationLoggingConfigurationInitParameters) DeepCopyInto(out *LoggingConfigurationLoggingConfigurationInitParameters) { + *out = *in + if in.LogDestinationConfig != nil { + in, out := &in.LogDestinationConfig, &out.LogDestinationConfig + *out = make([]LogDestinationConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationLoggingConfigurationInitParameters. +func (in *LoggingConfigurationLoggingConfigurationInitParameters) DeepCopy() *LoggingConfigurationLoggingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(LoggingConfigurationLoggingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationLoggingConfigurationObservation) DeepCopyInto(out *LoggingConfigurationLoggingConfigurationObservation) { + *out = *in + if in.LogDestinationConfig != nil { + in, out := &in.LogDestinationConfig, &out.LogDestinationConfig + *out = make([]LogDestinationConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationLoggingConfigurationObservation. +func (in *LoggingConfigurationLoggingConfigurationObservation) DeepCopy() *LoggingConfigurationLoggingConfigurationObservation { + if in == nil { + return nil + } + out := new(LoggingConfigurationLoggingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationLoggingConfigurationParameters) DeepCopyInto(out *LoggingConfigurationLoggingConfigurationParameters) { + *out = *in + if in.LogDestinationConfig != nil { + in, out := &in.LogDestinationConfig, &out.LogDestinationConfig + *out = make([]LogDestinationConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationLoggingConfigurationParameters. +func (in *LoggingConfigurationLoggingConfigurationParameters) DeepCopy() *LoggingConfigurationLoggingConfigurationParameters { + if in == nil { + return nil + } + out := new(LoggingConfigurationLoggingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationObservation) DeepCopyInto(out *LoggingConfigurationObservation) { + *out = *in + if in.FirewallArn != nil { + in, out := &in.FirewallArn, &out.FirewallArn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = new(LoggingConfigurationLoggingConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationObservation. +func (in *LoggingConfigurationObservation) DeepCopy() *LoggingConfigurationObservation { + if in == nil { + return nil + } + out := new(LoggingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationParameters) DeepCopyInto(out *LoggingConfigurationParameters) { + *out = *in + if in.FirewallArn != nil { + in, out := &in.FirewallArn, &out.FirewallArn + *out = new(string) + **out = **in + } + if in.FirewallArnRef != nil { + in, out := &in.FirewallArnRef, &out.FirewallArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FirewallArnSelector != nil { + in, out := &in.FirewallArnSelector, &out.FirewallArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = new(LoggingConfigurationLoggingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationParameters. +func (in *LoggingConfigurationParameters) DeepCopy() *LoggingConfigurationParameters { + if in == nil { + return nil + } + out := new(LoggingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationSpec) DeepCopyInto(out *LoggingConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationSpec. +func (in *LoggingConfigurationSpec) DeepCopy() *LoggingConfigurationSpec { + if in == nil { + return nil + } + out := new(LoggingConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationStatus) DeepCopyInto(out *LoggingConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationStatus. +func (in *LoggingConfigurationStatus) DeepCopy() *LoggingConfigurationStatus { + if in == nil { + return nil + } + out := new(LoggingConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchAttributesInitParameters) DeepCopyInto(out *MatchAttributesInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = make([]DestinationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DestinationPort != nil { + in, out := &in.DestinationPort, &out.DestinationPort + *out = make([]DestinationPortInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Protocols != nil { + in, out := &in.Protocols, &out.Protocols + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = make([]SourceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourcePort != nil { + in, out := &in.SourcePort, &out.SourcePort + *out = make([]SourcePortInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TCPFlag != nil { + in, out := &in.TCPFlag, &out.TCPFlag + *out = make([]TCPFlagInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchAttributesInitParameters. +func (in *MatchAttributesInitParameters) DeepCopy() *MatchAttributesInitParameters { + if in == nil { + return nil + } + out := new(MatchAttributesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchAttributesObservation) DeepCopyInto(out *MatchAttributesObservation) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = make([]DestinationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DestinationPort != nil { + in, out := &in.DestinationPort, &out.DestinationPort + *out = make([]DestinationPortObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Protocols != nil { + in, out := &in.Protocols, &out.Protocols + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = make([]SourceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourcePort != nil { + in, out := &in.SourcePort, &out.SourcePort + *out = make([]SourcePortObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TCPFlag != nil { + in, out := &in.TCPFlag, &out.TCPFlag + *out = make([]TCPFlagObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchAttributesObservation. +func (in *MatchAttributesObservation) DeepCopy() *MatchAttributesObservation { + if in == nil { + return nil + } + out := new(MatchAttributesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchAttributesParameters) DeepCopyInto(out *MatchAttributesParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = make([]DestinationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DestinationPort != nil { + in, out := &in.DestinationPort, &out.DestinationPort + *out = make([]DestinationPortParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Protocols != nil { + in, out := &in.Protocols, &out.Protocols + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = make([]SourceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourcePort != nil { + in, out := &in.SourcePort, &out.SourcePort + *out = make([]SourcePortParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TCPFlag != nil { + in, out := &in.TCPFlag, &out.TCPFlag + *out = make([]TCPFlagParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchAttributesParameters. +func (in *MatchAttributesParameters) DeepCopy() *MatchAttributesParameters { + if in == nil { + return nil + } + out := new(MatchAttributesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideInitParameters) DeepCopyInto(out *OverrideInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideInitParameters. +func (in *OverrideInitParameters) DeepCopy() *OverrideInitParameters { + if in == nil { + return nil + } + out := new(OverrideInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideObservation) DeepCopyInto(out *OverrideObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideObservation. +func (in *OverrideObservation) DeepCopy() *OverrideObservation { + if in == nil { + return nil + } + out := new(OverrideObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideParameters) DeepCopyInto(out *OverrideParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideParameters. +func (in *OverrideParameters) DeepCopy() *OverrideParameters { + if in == nil { + return nil + } + out := new(OverrideParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyVariablesInitParameters) DeepCopyInto(out *PolicyVariablesInitParameters) { + *out = *in + if in.RuleVariables != nil { + in, out := &in.RuleVariables, &out.RuleVariables + *out = make([]RuleVariablesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyVariablesInitParameters. +func (in *PolicyVariablesInitParameters) DeepCopy() *PolicyVariablesInitParameters { + if in == nil { + return nil + } + out := new(PolicyVariablesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyVariablesObservation) DeepCopyInto(out *PolicyVariablesObservation) { + *out = *in + if in.RuleVariables != nil { + in, out := &in.RuleVariables, &out.RuleVariables + *out = make([]RuleVariablesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyVariablesObservation. +func (in *PolicyVariablesObservation) DeepCopy() *PolicyVariablesObservation { + if in == nil { + return nil + } + out := new(PolicyVariablesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyVariablesParameters) DeepCopyInto(out *PolicyVariablesParameters) { + *out = *in + if in.RuleVariables != nil { + in, out := &in.RuleVariables, &out.RuleVariables + *out = make([]RuleVariablesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyVariablesParameters. +func (in *PolicyVariablesParameters) DeepCopy() *PolicyVariablesParameters { + if in == nil { + return nil + } + out := new(PolicyVariablesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortSetInitParameters) DeepCopyInto(out *PortSetInitParameters) { + *out = *in + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortSetInitParameters. +func (in *PortSetInitParameters) DeepCopy() *PortSetInitParameters { + if in == nil { + return nil + } + out := new(PortSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortSetObservation) DeepCopyInto(out *PortSetObservation) { + *out = *in + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortSetObservation. +func (in *PortSetObservation) DeepCopy() *PortSetObservation { + if in == nil { + return nil + } + out := new(PortSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortSetParameters) DeepCopyInto(out *PortSetParameters) { + *out = *in + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortSetParameters. +func (in *PortSetParameters) DeepCopy() *PortSetParameters { + if in == nil { + return nil + } + out := new(PortSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortSetsInitParameters) DeepCopyInto(out *PortSetsInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.PortSet != nil { + in, out := &in.PortSet, &out.PortSet + *out = new(PortSetInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortSetsInitParameters. +func (in *PortSetsInitParameters) DeepCopy() *PortSetsInitParameters { + if in == nil { + return nil + } + out := new(PortSetsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortSetsObservation) DeepCopyInto(out *PortSetsObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.PortSet != nil { + in, out := &in.PortSet, &out.PortSet + *out = new(PortSetObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortSetsObservation. +func (in *PortSetsObservation) DeepCopy() *PortSetsObservation { + if in == nil { + return nil + } + out := new(PortSetsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortSetsParameters) DeepCopyInto(out *PortSetsParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.PortSet != nil { + in, out := &in.PortSet, &out.PortSet + *out = new(PortSetParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortSetsParameters. +func (in *PortSetsParameters) DeepCopy() *PortSetsParameters { + if in == nil { + return nil + } + out := new(PortSetsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublishMetricActionDimensionInitParameters) DeepCopyInto(out *PublishMetricActionDimensionInitParameters) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublishMetricActionDimensionInitParameters. +func (in *PublishMetricActionDimensionInitParameters) DeepCopy() *PublishMetricActionDimensionInitParameters { + if in == nil { + return nil + } + out := new(PublishMetricActionDimensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublishMetricActionDimensionObservation) DeepCopyInto(out *PublishMetricActionDimensionObservation) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublishMetricActionDimensionObservation. +func (in *PublishMetricActionDimensionObservation) DeepCopy() *PublishMetricActionDimensionObservation { + if in == nil { + return nil + } + out := new(PublishMetricActionDimensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublishMetricActionDimensionParameters) DeepCopyInto(out *PublishMetricActionDimensionParameters) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublishMetricActionDimensionParameters. +func (in *PublishMetricActionDimensionParameters) DeepCopy() *PublishMetricActionDimensionParameters { + if in == nil { + return nil + } + out := new(PublishMetricActionDimensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublishMetricActionInitParameters) DeepCopyInto(out *PublishMetricActionInitParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]DimensionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublishMetricActionInitParameters. +func (in *PublishMetricActionInitParameters) DeepCopy() *PublishMetricActionInitParameters { + if in == nil { + return nil + } + out := new(PublishMetricActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublishMetricActionObservation) DeepCopyInto(out *PublishMetricActionObservation) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]DimensionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublishMetricActionObservation. +func (in *PublishMetricActionObservation) DeepCopy() *PublishMetricActionObservation { + if in == nil { + return nil + } + out := new(PublishMetricActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublishMetricActionParameters) DeepCopyInto(out *PublishMetricActionParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]DimensionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublishMetricActionParameters. +func (in *PublishMetricActionParameters) DeepCopy() *PublishMetricActionParameters { + if in == nil { + return nil + } + out := new(PublishMetricActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceSetsInitParameters) DeepCopyInto(out *ReferenceSetsInitParameters) { + *out = *in + if in.IPSetReferences != nil { + in, out := &in.IPSetReferences, &out.IPSetReferences + *out = make([]IPSetReferencesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceSetsInitParameters. +func (in *ReferenceSetsInitParameters) DeepCopy() *ReferenceSetsInitParameters { + if in == nil { + return nil + } + out := new(ReferenceSetsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceSetsObservation) DeepCopyInto(out *ReferenceSetsObservation) { + *out = *in + if in.IPSetReferences != nil { + in, out := &in.IPSetReferences, &out.IPSetReferences + *out = make([]IPSetReferencesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceSetsObservation. +func (in *ReferenceSetsObservation) DeepCopy() *ReferenceSetsObservation { + if in == nil { + return nil + } + out := new(ReferenceSetsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceSetsParameters) DeepCopyInto(out *ReferenceSetsParameters) { + *out = *in + if in.IPSetReferences != nil { + in, out := &in.IPSetReferences, &out.IPSetReferences + *out = make([]IPSetReferencesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceSetsParameters. +func (in *ReferenceSetsParameters) DeepCopy() *ReferenceSetsParameters { + if in == nil { + return nil + } + out := new(ReferenceSetsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleDefinitionInitParameters) DeepCopyInto(out *RuleDefinitionInitParameters) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MatchAttributes != nil { + in, out := &in.MatchAttributes, &out.MatchAttributes + *out = new(MatchAttributesInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleDefinitionInitParameters. +func (in *RuleDefinitionInitParameters) DeepCopy() *RuleDefinitionInitParameters { + if in == nil { + return nil + } + out := new(RuleDefinitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleDefinitionObservation) DeepCopyInto(out *RuleDefinitionObservation) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MatchAttributes != nil { + in, out := &in.MatchAttributes, &out.MatchAttributes + *out = new(MatchAttributesObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleDefinitionObservation. +func (in *RuleDefinitionObservation) DeepCopy() *RuleDefinitionObservation { + if in == nil { + return nil + } + out := new(RuleDefinitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleDefinitionParameters) DeepCopyInto(out *RuleDefinitionParameters) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MatchAttributes != nil { + in, out := &in.MatchAttributes, &out.MatchAttributes + *out = new(MatchAttributesParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleDefinitionParameters. +func (in *RuleDefinitionParameters) DeepCopy() *RuleDefinitionParameters { + if in == nil { + return nil + } + out := new(RuleDefinitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroup) DeepCopyInto(out *RuleGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroup. +func (in *RuleGroup) DeepCopy() *RuleGroup { + if in == nil { + return nil + } + out := new(RuleGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RuleGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupEncryptionConfigurationInitParameters) DeepCopyInto(out *RuleGroupEncryptionConfigurationInitParameters) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupEncryptionConfigurationInitParameters. +func (in *RuleGroupEncryptionConfigurationInitParameters) DeepCopy() *RuleGroupEncryptionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RuleGroupEncryptionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupEncryptionConfigurationObservation) DeepCopyInto(out *RuleGroupEncryptionConfigurationObservation) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupEncryptionConfigurationObservation. +func (in *RuleGroupEncryptionConfigurationObservation) DeepCopy() *RuleGroupEncryptionConfigurationObservation { + if in == nil { + return nil + } + out := new(RuleGroupEncryptionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupEncryptionConfigurationParameters) DeepCopyInto(out *RuleGroupEncryptionConfigurationParameters) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupEncryptionConfigurationParameters. +func (in *RuleGroupEncryptionConfigurationParameters) DeepCopy() *RuleGroupEncryptionConfigurationParameters { + if in == nil { + return nil + } + out := new(RuleGroupEncryptionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupInitParameters) DeepCopyInto(out *RuleGroupInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(RuleGroupEncryptionConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RuleGroup != nil { + in, out := &in.RuleGroup, &out.RuleGroup + *out = new(RuleGroupRuleGroupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupInitParameters. +func (in *RuleGroupInitParameters) DeepCopy() *RuleGroupInitParameters { + if in == nil { + return nil + } + out := new(RuleGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupList) DeepCopyInto(out *RuleGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RuleGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupList. +func (in *RuleGroupList) DeepCopy() *RuleGroupList { + if in == nil { + return nil + } + out := new(RuleGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RuleGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupObservation) DeepCopyInto(out *RuleGroupObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(RuleGroupEncryptionConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RuleGroup != nil { + in, out := &in.RuleGroup, &out.RuleGroup + *out = new(RuleGroupRuleGroupObservation) + (*in).DeepCopyInto(*out) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UpdateToken != nil { + in, out := &in.UpdateToken, &out.UpdateToken + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupObservation. +func (in *RuleGroupObservation) DeepCopy() *RuleGroupObservation { + if in == nil { + return nil + } + out := new(RuleGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupParameters) DeepCopyInto(out *RuleGroupParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(RuleGroupEncryptionConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RuleGroup != nil { + in, out := &in.RuleGroup, &out.RuleGroup + *out = new(RuleGroupRuleGroupParameters) + (*in).DeepCopyInto(*out) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupParameters. +func (in *RuleGroupParameters) DeepCopy() *RuleGroupParameters { + if in == nil { + return nil + } + out := new(RuleGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupRuleGroupInitParameters) DeepCopyInto(out *RuleGroupRuleGroupInitParameters) { + *out = *in + if in.ReferenceSets != nil { + in, out := &in.ReferenceSets, &out.ReferenceSets + *out = new(ReferenceSetsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RuleVariables != nil { + in, out := &in.RuleVariables, &out.RuleVariables + *out = new(RuleGroupRuleVariablesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RulesSource != nil { + in, out := &in.RulesSource, &out.RulesSource + *out = new(RulesSourceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StatefulRuleOptions != nil { + in, out := &in.StatefulRuleOptions, &out.StatefulRuleOptions + *out = new(StatefulRuleOptionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupRuleGroupInitParameters. +func (in *RuleGroupRuleGroupInitParameters) DeepCopy() *RuleGroupRuleGroupInitParameters { + if in == nil { + return nil + } + out := new(RuleGroupRuleGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupRuleGroupObservation) DeepCopyInto(out *RuleGroupRuleGroupObservation) { + *out = *in + if in.ReferenceSets != nil { + in, out := &in.ReferenceSets, &out.ReferenceSets + *out = new(ReferenceSetsObservation) + (*in).DeepCopyInto(*out) + } + if in.RuleVariables != nil { + in, out := &in.RuleVariables, &out.RuleVariables + *out = new(RuleGroupRuleVariablesObservation) + (*in).DeepCopyInto(*out) + } + if in.RulesSource != nil { + in, out := &in.RulesSource, &out.RulesSource + *out = new(RulesSourceObservation) + (*in).DeepCopyInto(*out) + } + if in.StatefulRuleOptions != nil { + in, out := &in.StatefulRuleOptions, &out.StatefulRuleOptions + *out = new(StatefulRuleOptionsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupRuleGroupObservation. +func (in *RuleGroupRuleGroupObservation) DeepCopy() *RuleGroupRuleGroupObservation { + if in == nil { + return nil + } + out := new(RuleGroupRuleGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupRuleGroupParameters) DeepCopyInto(out *RuleGroupRuleGroupParameters) { + *out = *in + if in.ReferenceSets != nil { + in, out := &in.ReferenceSets, &out.ReferenceSets + *out = new(ReferenceSetsParameters) + (*in).DeepCopyInto(*out) + } + if in.RuleVariables != nil { + in, out := &in.RuleVariables, &out.RuleVariables + *out = new(RuleGroupRuleVariablesParameters) + (*in).DeepCopyInto(*out) + } + if in.RulesSource != nil { + in, out := &in.RulesSource, &out.RulesSource + *out = new(RulesSourceParameters) + (*in).DeepCopyInto(*out) + } + if in.StatefulRuleOptions != nil { + in, out := &in.StatefulRuleOptions, &out.StatefulRuleOptions + *out = new(StatefulRuleOptionsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupRuleGroupParameters. +func (in *RuleGroupRuleGroupParameters) DeepCopy() *RuleGroupRuleGroupParameters { + if in == nil { + return nil + } + out := new(RuleGroupRuleGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupRuleVariablesInitParameters) DeepCopyInto(out *RuleGroupRuleVariablesInitParameters) { + *out = *in + if in.IPSets != nil { + in, out := &in.IPSets, &out.IPSets + *out = make([]IPSetsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PortSets != nil { + in, out := &in.PortSets, &out.PortSets + *out = make([]PortSetsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupRuleVariablesInitParameters. +func (in *RuleGroupRuleVariablesInitParameters) DeepCopy() *RuleGroupRuleVariablesInitParameters { + if in == nil { + return nil + } + out := new(RuleGroupRuleVariablesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupRuleVariablesObservation) DeepCopyInto(out *RuleGroupRuleVariablesObservation) { + *out = *in + if in.IPSets != nil { + in, out := &in.IPSets, &out.IPSets + *out = make([]IPSetsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PortSets != nil { + in, out := &in.PortSets, &out.PortSets + *out = make([]PortSetsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupRuleVariablesObservation. +func (in *RuleGroupRuleVariablesObservation) DeepCopy() *RuleGroupRuleVariablesObservation { + if in == nil { + return nil + } + out := new(RuleGroupRuleVariablesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupRuleVariablesParameters) DeepCopyInto(out *RuleGroupRuleVariablesParameters) { + *out = *in + if in.IPSets != nil { + in, out := &in.IPSets, &out.IPSets + *out = make([]IPSetsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PortSets != nil { + in, out := &in.PortSets, &out.PortSets + *out = make([]PortSetsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupRuleVariablesParameters. +func (in *RuleGroupRuleVariablesParameters) DeepCopy() *RuleGroupRuleVariablesParameters { + if in == nil { + return nil + } + out := new(RuleGroupRuleVariablesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupSpec) DeepCopyInto(out *RuleGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupSpec. +func (in *RuleGroupSpec) DeepCopy() *RuleGroupSpec { + if in == nil { + return nil + } + out := new(RuleGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupStatus) DeepCopyInto(out *RuleGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupStatus. +func (in *RuleGroupStatus) DeepCopy() *RuleGroupStatus { + if in == nil { + return nil + } + out := new(RuleGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleOptionInitParameters) DeepCopyInto(out *RuleOptionInitParameters) { + *out = *in + if in.Keyword != nil { + in, out := &in.Keyword, &out.Keyword + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleOptionInitParameters. +func (in *RuleOptionInitParameters) DeepCopy() *RuleOptionInitParameters { + if in == nil { + return nil + } + out := new(RuleOptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleOptionObservation) DeepCopyInto(out *RuleOptionObservation) { + *out = *in + if in.Keyword != nil { + in, out := &in.Keyword, &out.Keyword + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleOptionObservation. +func (in *RuleOptionObservation) DeepCopy() *RuleOptionObservation { + if in == nil { + return nil + } + out := new(RuleOptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleOptionParameters) DeepCopyInto(out *RuleOptionParameters) { + *out = *in + if in.Keyword != nil { + in, out := &in.Keyword, &out.Keyword + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleOptionParameters. +func (in *RuleOptionParameters) DeepCopy() *RuleOptionParameters { + if in == nil { + return nil + } + out := new(RuleOptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleVariablesInitParameters) DeepCopyInto(out *RuleVariablesInitParameters) { + *out = *in + if in.IPSet != nil { + in, out := &in.IPSet, &out.IPSet + *out = new(IPSetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleVariablesInitParameters. +func (in *RuleVariablesInitParameters) DeepCopy() *RuleVariablesInitParameters { + if in == nil { + return nil + } + out := new(RuleVariablesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleVariablesObservation) DeepCopyInto(out *RuleVariablesObservation) { + *out = *in + if in.IPSet != nil { + in, out := &in.IPSet, &out.IPSet + *out = new(IPSetObservation) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleVariablesObservation. +func (in *RuleVariablesObservation) DeepCopy() *RuleVariablesObservation { + if in == nil { + return nil + } + out := new(RuleVariablesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleVariablesParameters) DeepCopyInto(out *RuleVariablesParameters) { + *out = *in + if in.IPSet != nil { + in, out := &in.IPSet, &out.IPSet + *out = new(IPSetParameters) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleVariablesParameters. +func (in *RuleVariablesParameters) DeepCopy() *RuleVariablesParameters { + if in == nil { + return nil + } + out := new(RuleVariablesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesSourceInitParameters) DeepCopyInto(out *RulesSourceInitParameters) { + *out = *in + if in.RulesSourceList != nil { + in, out := &in.RulesSourceList, &out.RulesSourceList + *out = new(RulesSourceListInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RulesString != nil { + in, out := &in.RulesString, &out.RulesString + *out = new(string) + **out = **in + } + if in.StatefulRule != nil { + in, out := &in.StatefulRule, &out.StatefulRule + *out = make([]StatefulRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StatelessRulesAndCustomActions != nil { + in, out := &in.StatelessRulesAndCustomActions, &out.StatelessRulesAndCustomActions + *out = new(StatelessRulesAndCustomActionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesSourceInitParameters. +func (in *RulesSourceInitParameters) DeepCopy() *RulesSourceInitParameters { + if in == nil { + return nil + } + out := new(RulesSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesSourceListInitParameters) DeepCopyInto(out *RulesSourceListInitParameters) { + *out = *in + if in.GeneratedRulesType != nil { + in, out := &in.GeneratedRulesType, &out.GeneratedRulesType + *out = new(string) + **out = **in + } + if in.TargetTypes != nil { + in, out := &in.TargetTypes, &out.TargetTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesSourceListInitParameters. +func (in *RulesSourceListInitParameters) DeepCopy() *RulesSourceListInitParameters { + if in == nil { + return nil + } + out := new(RulesSourceListInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesSourceListObservation) DeepCopyInto(out *RulesSourceListObservation) { + *out = *in + if in.GeneratedRulesType != nil { + in, out := &in.GeneratedRulesType, &out.GeneratedRulesType + *out = new(string) + **out = **in + } + if in.TargetTypes != nil { + in, out := &in.TargetTypes, &out.TargetTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesSourceListObservation. +func (in *RulesSourceListObservation) DeepCopy() *RulesSourceListObservation { + if in == nil { + return nil + } + out := new(RulesSourceListObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesSourceListParameters) DeepCopyInto(out *RulesSourceListParameters) { + *out = *in + if in.GeneratedRulesType != nil { + in, out := &in.GeneratedRulesType, &out.GeneratedRulesType + *out = new(string) + **out = **in + } + if in.TargetTypes != nil { + in, out := &in.TargetTypes, &out.TargetTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesSourceListParameters. +func (in *RulesSourceListParameters) DeepCopy() *RulesSourceListParameters { + if in == nil { + return nil + } + out := new(RulesSourceListParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesSourceObservation) DeepCopyInto(out *RulesSourceObservation) { + *out = *in + if in.RulesSourceList != nil { + in, out := &in.RulesSourceList, &out.RulesSourceList + *out = new(RulesSourceListObservation) + (*in).DeepCopyInto(*out) + } + if in.RulesString != nil { + in, out := &in.RulesString, &out.RulesString + *out = new(string) + **out = **in + } + if in.StatefulRule != nil { + in, out := &in.StatefulRule, &out.StatefulRule + *out = make([]StatefulRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StatelessRulesAndCustomActions != nil { + in, out := &in.StatelessRulesAndCustomActions, &out.StatelessRulesAndCustomActions + *out = new(StatelessRulesAndCustomActionsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesSourceObservation. +func (in *RulesSourceObservation) DeepCopy() *RulesSourceObservation { + if in == nil { + return nil + } + out := new(RulesSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesSourceParameters) DeepCopyInto(out *RulesSourceParameters) { + *out = *in + if in.RulesSourceList != nil { + in, out := &in.RulesSourceList, &out.RulesSourceList + *out = new(RulesSourceListParameters) + (*in).DeepCopyInto(*out) + } + if in.RulesString != nil { + in, out := &in.RulesString, &out.RulesString + *out = new(string) + **out = **in + } + if in.StatefulRule != nil { + in, out := &in.StatefulRule, &out.StatefulRule + *out = make([]StatefulRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StatelessRulesAndCustomActions != nil { + in, out := &in.StatelessRulesAndCustomActions, &out.StatelessRulesAndCustomActions + *out = new(StatelessRulesAndCustomActionsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesSourceParameters. +func (in *RulesSourceParameters) DeepCopy() *RulesSourceParameters { + if in == nil { + return nil + } + out := new(RulesSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceInitParameters) DeepCopyInto(out *SourceInitParameters) { + *out = *in + if in.AddressDefinition != nil { + in, out := &in.AddressDefinition, &out.AddressDefinition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceInitParameters. +func (in *SourceInitParameters) DeepCopy() *SourceInitParameters { + if in == nil { + return nil + } + out := new(SourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceObservation) DeepCopyInto(out *SourceObservation) { + *out = *in + if in.AddressDefinition != nil { + in, out := &in.AddressDefinition, &out.AddressDefinition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceObservation. +func (in *SourceObservation) DeepCopy() *SourceObservation { + if in == nil { + return nil + } + out := new(SourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceParameters) DeepCopyInto(out *SourceParameters) { + *out = *in + if in.AddressDefinition != nil { + in, out := &in.AddressDefinition, &out.AddressDefinition + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceParameters. +func (in *SourceParameters) DeepCopy() *SourceParameters { + if in == nil { + return nil + } + out := new(SourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourcePortInitParameters) DeepCopyInto(out *SourcePortInitParameters) { + *out = *in + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourcePortInitParameters. +func (in *SourcePortInitParameters) DeepCopy() *SourcePortInitParameters { + if in == nil { + return nil + } + out := new(SourcePortInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourcePortObservation) DeepCopyInto(out *SourcePortObservation) { + *out = *in + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourcePortObservation. +func (in *SourcePortObservation) DeepCopy() *SourcePortObservation { + if in == nil { + return nil + } + out := new(SourcePortObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourcePortParameters) DeepCopyInto(out *SourcePortParameters) { + *out = *in + if in.FromPort != nil { + in, out := &in.FromPort, &out.FromPort + *out = new(float64) + **out = **in + } + if in.ToPort != nil { + in, out := &in.ToPort, &out.ToPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourcePortParameters. +func (in *SourcePortParameters) DeepCopy() *SourcePortParameters { + if in == nil { + return nil + } + out := new(SourcePortParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulEngineOptionsInitParameters) DeepCopyInto(out *StatefulEngineOptionsInitParameters) { + *out = *in + if in.RuleOrder != nil { + in, out := &in.RuleOrder, &out.RuleOrder + *out = new(string) + **out = **in + } + if in.StreamExceptionPolicy != nil { + in, out := &in.StreamExceptionPolicy, &out.StreamExceptionPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulEngineOptionsInitParameters. +func (in *StatefulEngineOptionsInitParameters) DeepCopy() *StatefulEngineOptionsInitParameters { + if in == nil { + return nil + } + out := new(StatefulEngineOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulEngineOptionsObservation) DeepCopyInto(out *StatefulEngineOptionsObservation) { + *out = *in + if in.RuleOrder != nil { + in, out := &in.RuleOrder, &out.RuleOrder + *out = new(string) + **out = **in + } + if in.StreamExceptionPolicy != nil { + in, out := &in.StreamExceptionPolicy, &out.StreamExceptionPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulEngineOptionsObservation. +func (in *StatefulEngineOptionsObservation) DeepCopy() *StatefulEngineOptionsObservation { + if in == nil { + return nil + } + out := new(StatefulEngineOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulEngineOptionsParameters) DeepCopyInto(out *StatefulEngineOptionsParameters) { + *out = *in + if in.RuleOrder != nil { + in, out := &in.RuleOrder, &out.RuleOrder + *out = new(string) + **out = **in + } + if in.StreamExceptionPolicy != nil { + in, out := &in.StreamExceptionPolicy, &out.StreamExceptionPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulEngineOptionsParameters. +func (in *StatefulEngineOptionsParameters) DeepCopy() *StatefulEngineOptionsParameters { + if in == nil { + return nil + } + out := new(StatefulEngineOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulRuleGroupReferenceInitParameters) DeepCopyInto(out *StatefulRuleGroupReferenceInitParameters) { + *out = *in + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(OverrideInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.ResourceArnRef != nil { + in, out := &in.ResourceArnRef, &out.ResourceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceArnSelector != nil { + in, out := &in.ResourceArnSelector, &out.ResourceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulRuleGroupReferenceInitParameters. +func (in *StatefulRuleGroupReferenceInitParameters) DeepCopy() *StatefulRuleGroupReferenceInitParameters { + if in == nil { + return nil + } + out := new(StatefulRuleGroupReferenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulRuleGroupReferenceObservation) DeepCopyInto(out *StatefulRuleGroupReferenceObservation) { + *out = *in + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(OverrideObservation) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulRuleGroupReferenceObservation. +func (in *StatefulRuleGroupReferenceObservation) DeepCopy() *StatefulRuleGroupReferenceObservation { + if in == nil { + return nil + } + out := new(StatefulRuleGroupReferenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulRuleGroupReferenceParameters) DeepCopyInto(out *StatefulRuleGroupReferenceParameters) { + *out = *in + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = new(OverrideParameters) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.ResourceArnRef != nil { + in, out := &in.ResourceArnRef, &out.ResourceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceArnSelector != nil { + in, out := &in.ResourceArnSelector, &out.ResourceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulRuleGroupReferenceParameters. +func (in *StatefulRuleGroupReferenceParameters) DeepCopy() *StatefulRuleGroupReferenceParameters { + if in == nil { + return nil + } + out := new(StatefulRuleGroupReferenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulRuleInitParameters) DeepCopyInto(out *StatefulRuleInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = new(HeaderInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RuleOption != nil { + in, out := &in.RuleOption, &out.RuleOption + *out = make([]RuleOptionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulRuleInitParameters. +func (in *StatefulRuleInitParameters) DeepCopy() *StatefulRuleInitParameters { + if in == nil { + return nil + } + out := new(StatefulRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulRuleObservation) DeepCopyInto(out *StatefulRuleObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = new(HeaderObservation) + (*in).DeepCopyInto(*out) + } + if in.RuleOption != nil { + in, out := &in.RuleOption, &out.RuleOption + *out = make([]RuleOptionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulRuleObservation. +func (in *StatefulRuleObservation) DeepCopy() *StatefulRuleObservation { + if in == nil { + return nil + } + out := new(StatefulRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulRuleOptionsInitParameters) DeepCopyInto(out *StatefulRuleOptionsInitParameters) { + *out = *in + if in.RuleOrder != nil { + in, out := &in.RuleOrder, &out.RuleOrder + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulRuleOptionsInitParameters. +func (in *StatefulRuleOptionsInitParameters) DeepCopy() *StatefulRuleOptionsInitParameters { + if in == nil { + return nil + } + out := new(StatefulRuleOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulRuleOptionsObservation) DeepCopyInto(out *StatefulRuleOptionsObservation) { + *out = *in + if in.RuleOrder != nil { + in, out := &in.RuleOrder, &out.RuleOrder + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulRuleOptionsObservation. +func (in *StatefulRuleOptionsObservation) DeepCopy() *StatefulRuleOptionsObservation { + if in == nil { + return nil + } + out := new(StatefulRuleOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulRuleOptionsParameters) DeepCopyInto(out *StatefulRuleOptionsParameters) { + *out = *in + if in.RuleOrder != nil { + in, out := &in.RuleOrder, &out.RuleOrder + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulRuleOptionsParameters. +func (in *StatefulRuleOptionsParameters) DeepCopy() *StatefulRuleOptionsParameters { + if in == nil { + return nil + } + out := new(StatefulRuleOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulRuleParameters) DeepCopyInto(out *StatefulRuleParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = new(HeaderParameters) + (*in).DeepCopyInto(*out) + } + if in.RuleOption != nil { + in, out := &in.RuleOption, &out.RuleOption + *out = make([]RuleOptionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulRuleParameters. +func (in *StatefulRuleParameters) DeepCopy() *StatefulRuleParameters { + if in == nil { + return nil + } + out := new(StatefulRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatelessCustomActionInitParameters) DeepCopyInto(out *StatelessCustomActionInitParameters) { + *out = *in + if in.ActionDefinition != nil { + in, out := &in.ActionDefinition, &out.ActionDefinition + *out = new(ActionDefinitionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ActionName != nil { + in, out := &in.ActionName, &out.ActionName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatelessCustomActionInitParameters. +func (in *StatelessCustomActionInitParameters) DeepCopy() *StatelessCustomActionInitParameters { + if in == nil { + return nil + } + out := new(StatelessCustomActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatelessCustomActionObservation) DeepCopyInto(out *StatelessCustomActionObservation) { + *out = *in + if in.ActionDefinition != nil { + in, out := &in.ActionDefinition, &out.ActionDefinition + *out = new(ActionDefinitionObservation) + (*in).DeepCopyInto(*out) + } + if in.ActionName != nil { + in, out := &in.ActionName, &out.ActionName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatelessCustomActionObservation. +func (in *StatelessCustomActionObservation) DeepCopy() *StatelessCustomActionObservation { + if in == nil { + return nil + } + out := new(StatelessCustomActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatelessCustomActionParameters) DeepCopyInto(out *StatelessCustomActionParameters) { + *out = *in + if in.ActionDefinition != nil { + in, out := &in.ActionDefinition, &out.ActionDefinition + *out = new(ActionDefinitionParameters) + (*in).DeepCopyInto(*out) + } + if in.ActionName != nil { + in, out := &in.ActionName, &out.ActionName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatelessCustomActionParameters. +func (in *StatelessCustomActionParameters) DeepCopy() *StatelessCustomActionParameters { + if in == nil { + return nil + } + out := new(StatelessCustomActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatelessRuleGroupReferenceInitParameters) DeepCopyInto(out *StatelessRuleGroupReferenceInitParameters) { + *out = *in + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.ResourceArnRef != nil { + in, out := &in.ResourceArnRef, &out.ResourceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceArnSelector != nil { + in, out := &in.ResourceArnSelector, &out.ResourceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatelessRuleGroupReferenceInitParameters. +func (in *StatelessRuleGroupReferenceInitParameters) DeepCopy() *StatelessRuleGroupReferenceInitParameters { + if in == nil { + return nil + } + out := new(StatelessRuleGroupReferenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatelessRuleGroupReferenceObservation) DeepCopyInto(out *StatelessRuleGroupReferenceObservation) { + *out = *in + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatelessRuleGroupReferenceObservation. +func (in *StatelessRuleGroupReferenceObservation) DeepCopy() *StatelessRuleGroupReferenceObservation { + if in == nil { + return nil + } + out := new(StatelessRuleGroupReferenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatelessRuleGroupReferenceParameters) DeepCopyInto(out *StatelessRuleGroupReferenceParameters) { + *out = *in + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.ResourceArnRef != nil { + in, out := &in.ResourceArnRef, &out.ResourceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceArnSelector != nil { + in, out := &in.ResourceArnSelector, &out.ResourceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatelessRuleGroupReferenceParameters. +func (in *StatelessRuleGroupReferenceParameters) DeepCopy() *StatelessRuleGroupReferenceParameters { + if in == nil { + return nil + } + out := new(StatelessRuleGroupReferenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatelessRuleInitParameters) DeepCopyInto(out *StatelessRuleInitParameters) { + *out = *in + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RuleDefinition != nil { + in, out := &in.RuleDefinition, &out.RuleDefinition + *out = new(RuleDefinitionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatelessRuleInitParameters. +func (in *StatelessRuleInitParameters) DeepCopy() *StatelessRuleInitParameters { + if in == nil { + return nil + } + out := new(StatelessRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatelessRuleObservation) DeepCopyInto(out *StatelessRuleObservation) { + *out = *in + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RuleDefinition != nil { + in, out := &in.RuleDefinition, &out.RuleDefinition + *out = new(RuleDefinitionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatelessRuleObservation. +func (in *StatelessRuleObservation) DeepCopy() *StatelessRuleObservation { + if in == nil { + return nil + } + out := new(StatelessRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatelessRuleParameters) DeepCopyInto(out *StatelessRuleParameters) { + *out = *in + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RuleDefinition != nil { + in, out := &in.RuleDefinition, &out.RuleDefinition + *out = new(RuleDefinitionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatelessRuleParameters. +func (in *StatelessRuleParameters) DeepCopy() *StatelessRuleParameters { + if in == nil { + return nil + } + out := new(StatelessRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatelessRulesAndCustomActionsInitParameters) DeepCopyInto(out *StatelessRulesAndCustomActionsInitParameters) { + *out = *in + if in.CustomAction != nil { + in, out := &in.CustomAction, &out.CustomAction + *out = make([]CustomActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StatelessRule != nil { + in, out := &in.StatelessRule, &out.StatelessRule + *out = make([]StatelessRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatelessRulesAndCustomActionsInitParameters. +func (in *StatelessRulesAndCustomActionsInitParameters) DeepCopy() *StatelessRulesAndCustomActionsInitParameters { + if in == nil { + return nil + } + out := new(StatelessRulesAndCustomActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatelessRulesAndCustomActionsObservation) DeepCopyInto(out *StatelessRulesAndCustomActionsObservation) { + *out = *in + if in.CustomAction != nil { + in, out := &in.CustomAction, &out.CustomAction + *out = make([]CustomActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StatelessRule != nil { + in, out := &in.StatelessRule, &out.StatelessRule + *out = make([]StatelessRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatelessRulesAndCustomActionsObservation. +func (in *StatelessRulesAndCustomActionsObservation) DeepCopy() *StatelessRulesAndCustomActionsObservation { + if in == nil { + return nil + } + out := new(StatelessRulesAndCustomActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatelessRulesAndCustomActionsParameters) DeepCopyInto(out *StatelessRulesAndCustomActionsParameters) { + *out = *in + if in.CustomAction != nil { + in, out := &in.CustomAction, &out.CustomAction + *out = make([]CustomActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StatelessRule != nil { + in, out := &in.StatelessRule, &out.StatelessRule + *out = make([]StatelessRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatelessRulesAndCustomActionsParameters. +func (in *StatelessRulesAndCustomActionsParameters) DeepCopy() *StatelessRulesAndCustomActionsParameters { + if in == nil { + return nil + } + out := new(StatelessRulesAndCustomActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetMappingInitParameters) DeepCopyInto(out *SubnetMappingInitParameters) { + *out = *in + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetMappingInitParameters. +func (in *SubnetMappingInitParameters) DeepCopy() *SubnetMappingInitParameters { + if in == nil { + return nil + } + out := new(SubnetMappingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetMappingObservation) DeepCopyInto(out *SubnetMappingObservation) { + *out = *in + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetMappingObservation. +func (in *SubnetMappingObservation) DeepCopy() *SubnetMappingObservation { + if in == nil { + return nil + } + out := new(SubnetMappingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetMappingParameters) DeepCopyInto(out *SubnetMappingParameters) { + *out = *in + if in.IPAddressType != nil { + in, out := &in.IPAddressType, &out.IPAddressType + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetMappingParameters. +func (in *SubnetMappingParameters) DeepCopy() *SubnetMappingParameters { + if in == nil { + return nil + } + out := new(SubnetMappingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncStatesInitParameters) DeepCopyInto(out *SyncStatesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncStatesInitParameters. +func (in *SyncStatesInitParameters) DeepCopy() *SyncStatesInitParameters { + if in == nil { + return nil + } + out := new(SyncStatesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncStatesObservation) DeepCopyInto(out *SyncStatesObservation) { + *out = *in + if in.Attachment != nil { + in, out := &in.Attachment, &out.Attachment + *out = make([]AttachmentObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncStatesObservation. +func (in *SyncStatesObservation) DeepCopy() *SyncStatesObservation { + if in == nil { + return nil + } + out := new(SyncStatesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncStatesParameters) DeepCopyInto(out *SyncStatesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncStatesParameters. +func (in *SyncStatesParameters) DeepCopy() *SyncStatesParameters { + if in == nil { + return nil + } + out := new(SyncStatesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFlagInitParameters) DeepCopyInto(out *TCPFlagInitParameters) { + *out = *in + if in.Flags != nil { + in, out := &in.Flags, &out.Flags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Masks != nil { + in, out := &in.Masks, &out.Masks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFlagInitParameters. +func (in *TCPFlagInitParameters) DeepCopy() *TCPFlagInitParameters { + if in == nil { + return nil + } + out := new(TCPFlagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFlagObservation) DeepCopyInto(out *TCPFlagObservation) { + *out = *in + if in.Flags != nil { + in, out := &in.Flags, &out.Flags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Masks != nil { + in, out := &in.Masks, &out.Masks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFlagObservation. +func (in *TCPFlagObservation) DeepCopy() *TCPFlagObservation { + if in == nil { + return nil + } + out := new(TCPFlagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPFlagParameters) DeepCopyInto(out *TCPFlagParameters) { + *out = *in + if in.Flags != nil { + in, out := &in.Flags, &out.Flags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Masks != nil { + in, out := &in.Masks, &out.Masks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFlagParameters. +func (in *TCPFlagParameters) DeepCopy() *TCPFlagParameters { + if in == nil { + return nil + } + out := new(TCPFlagParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/networkfirewall/v1beta2/zz_generated.managed.go b/apis/networkfirewall/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..3f7a97ab73 --- /dev/null +++ b/apis/networkfirewall/v1beta2/zz_generated.managed.go @@ -0,0 +1,248 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Firewall. +func (mg *Firewall) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Firewall. +func (mg *Firewall) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Firewall. +func (mg *Firewall) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Firewall. +func (mg *Firewall) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Firewall. +func (mg *Firewall) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Firewall. +func (mg *Firewall) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Firewall. +func (mg *Firewall) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Firewall. +func (mg *Firewall) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Firewall. +func (mg *Firewall) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Firewall. +func (mg *Firewall) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Firewall. +func (mg *Firewall) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Firewall. +func (mg *Firewall) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FirewallPolicy. +func (mg *FirewallPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FirewallPolicy. +func (mg *FirewallPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FirewallPolicy. +func (mg *FirewallPolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FirewallPolicy. +func (mg *FirewallPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FirewallPolicy. +func (mg *FirewallPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FirewallPolicy. +func (mg *FirewallPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FirewallPolicy. +func (mg *FirewallPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FirewallPolicy. +func (mg *FirewallPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FirewallPolicy. +func (mg *FirewallPolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FirewallPolicy. +func (mg *FirewallPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FirewallPolicy. +func (mg *FirewallPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FirewallPolicy. +func (mg *FirewallPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LoggingConfiguration. +func (mg *LoggingConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LoggingConfiguration. +func (mg *LoggingConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LoggingConfiguration. +func (mg *LoggingConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LoggingConfiguration. +func (mg *LoggingConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LoggingConfiguration. +func (mg *LoggingConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LoggingConfiguration. +func (mg *LoggingConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LoggingConfiguration. +func (mg *LoggingConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LoggingConfiguration. +func (mg *LoggingConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LoggingConfiguration. +func (mg *LoggingConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LoggingConfiguration. +func (mg *LoggingConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LoggingConfiguration. +func (mg *LoggingConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LoggingConfiguration. +func (mg *LoggingConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this RuleGroup. +func (mg *RuleGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this RuleGroup. +func (mg *RuleGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this RuleGroup. +func (mg *RuleGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this RuleGroup. +func (mg *RuleGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this RuleGroup. +func (mg *RuleGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this RuleGroup. +func (mg *RuleGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this RuleGroup. +func (mg *RuleGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this RuleGroup. +func (mg *RuleGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this RuleGroup. +func (mg *RuleGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this RuleGroup. +func (mg *RuleGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this RuleGroup. +func (mg *RuleGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this RuleGroup. +func (mg *RuleGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/networkfirewall/v1beta2/zz_generated.managedlist.go b/apis/networkfirewall/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..d76d84e9f7 --- /dev/null +++ b/apis/networkfirewall/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this FirewallList. +func (l *FirewallList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FirewallPolicyList. +func (l *FirewallPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LoggingConfigurationList. +func (l *LoggingConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this RuleGroupList. +func (l *RuleGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/networkfirewall/v1beta2/zz_generated.resolvers.go b/apis/networkfirewall/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..6f022fe7fc --- /dev/null +++ b/apis/networkfirewall/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,372 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Firewall. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Firewall) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("networkfirewall.aws.upbound.io", "v1beta2", "FirewallPolicy", "FirewallPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FirewallPolicyArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.FirewallPolicyArnRef, + Selector: mg.Spec.ForProvider.FirewallPolicyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FirewallPolicyArn") + } + mg.Spec.ForProvider.FirewallPolicyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FirewallPolicyArnRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.SubnetMapping); i3++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SubnetMapping[i3].SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SubnetMapping[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.SubnetMapping[i3].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetMapping[i3].SubnetID") + } + mg.Spec.ForProvider.SubnetMapping[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SubnetMapping[i3].SubnetIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.VPCIDRef, + Selector: mg.Spec.ForProvider.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCID") + } + mg.Spec.ForProvider.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPCIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("networkfirewall.aws.upbound.io", "v1beta2", "FirewallPolicy", "FirewallPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FirewallPolicyArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.FirewallPolicyArnRef, + Selector: mg.Spec.InitProvider.FirewallPolicyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FirewallPolicyArn") + } + mg.Spec.InitProvider.FirewallPolicyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FirewallPolicyArnRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.SubnetMapping); i3++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SubnetMapping[i3].SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SubnetMapping[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.SubnetMapping[i3].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetMapping[i3].SubnetID") + } + mg.Spec.InitProvider.SubnetMapping[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SubnetMapping[i3].SubnetIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.VPCIDRef, + Selector: mg.Spec.InitProvider.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCID") + } + mg.Spec.InitProvider.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPCIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this FirewallPolicy. +func (mg *FirewallPolicy) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.FirewallPolicy != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.FirewallPolicy.StatefulRuleGroupReference); i4++ { + { + m, l, err = apisresolver.GetManagedResource("networkfirewall.aws.upbound.io", "v1beta2", "RuleGroup", "RuleGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FirewallPolicy.StatefulRuleGroupReference[i4].ResourceArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.FirewallPolicy.StatefulRuleGroupReference[i4].ResourceArnRef, + Selector: mg.Spec.ForProvider.FirewallPolicy.StatefulRuleGroupReference[i4].ResourceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FirewallPolicy.StatefulRuleGroupReference[i4].ResourceArn") + } + mg.Spec.ForProvider.FirewallPolicy.StatefulRuleGroupReference[i4].ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FirewallPolicy.StatefulRuleGroupReference[i4].ResourceArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.FirewallPolicy != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.FirewallPolicy.StatelessRuleGroupReference); i4++ { + { + m, l, err = apisresolver.GetManagedResource("networkfirewall.aws.upbound.io", "v1beta2", "RuleGroup", "RuleGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FirewallPolicy.StatelessRuleGroupReference[i4].ResourceArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.FirewallPolicy.StatelessRuleGroupReference[i4].ResourceArnRef, + Selector: mg.Spec.ForProvider.FirewallPolicy.StatelessRuleGroupReference[i4].ResourceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FirewallPolicy.StatelessRuleGroupReference[i4].ResourceArn") + } + mg.Spec.ForProvider.FirewallPolicy.StatelessRuleGroupReference[i4].ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FirewallPolicy.StatelessRuleGroupReference[i4].ResourceArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.FirewallPolicy != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.FirewallPolicy.StatefulRuleGroupReference); i4++ { + { + m, l, err = apisresolver.GetManagedResource("networkfirewall.aws.upbound.io", "v1beta2", "RuleGroup", "RuleGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FirewallPolicy.StatefulRuleGroupReference[i4].ResourceArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.FirewallPolicy.StatefulRuleGroupReference[i4].ResourceArnRef, + Selector: mg.Spec.InitProvider.FirewallPolicy.StatefulRuleGroupReference[i4].ResourceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FirewallPolicy.StatefulRuleGroupReference[i4].ResourceArn") + } + mg.Spec.InitProvider.FirewallPolicy.StatefulRuleGroupReference[i4].ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FirewallPolicy.StatefulRuleGroupReference[i4].ResourceArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.FirewallPolicy != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.FirewallPolicy.StatelessRuleGroupReference); i4++ { + { + m, l, err = apisresolver.GetManagedResource("networkfirewall.aws.upbound.io", "v1beta2", "RuleGroup", "RuleGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FirewallPolicy.StatelessRuleGroupReference[i4].ResourceArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.FirewallPolicy.StatelessRuleGroupReference[i4].ResourceArnRef, + Selector: mg.Spec.InitProvider.FirewallPolicy.StatelessRuleGroupReference[i4].ResourceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FirewallPolicy.StatelessRuleGroupReference[i4].ResourceArn") + } + mg.Spec.InitProvider.FirewallPolicy.StatelessRuleGroupReference[i4].ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FirewallPolicy.StatelessRuleGroupReference[i4].ResourceArnRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this LoggingConfiguration. +func (mg *LoggingConfiguration) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("networkfirewall.aws.upbound.io", "v1beta2", "Firewall", "FirewallList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FirewallArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.FirewallArnRef, + Selector: mg.Spec.ForProvider.FirewallArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FirewallArn") + } + mg.Spec.ForProvider.FirewallArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FirewallArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("networkfirewall.aws.upbound.io", "v1beta2", "Firewall", "FirewallList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FirewallArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.FirewallArnRef, + Selector: mg.Spec.InitProvider.FirewallArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FirewallArn") + } + mg.Spec.InitProvider.FirewallArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FirewallArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this RuleGroup. +func (mg *RuleGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.RuleGroup != nil { + if mg.Spec.ForProvider.RuleGroup.ReferenceSets != nil { + for i5 := 0; i5 < len(mg.Spec.ForProvider.RuleGroup.ReferenceSets.IPSetReferences); i5++ { + for i6 := 0; i6 < len(mg.Spec.ForProvider.RuleGroup.ReferenceSets.IPSetReferences[i5].IPSetReference); i6++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "ManagedPrefixList", "ManagedPrefixListList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RuleGroup.ReferenceSets.IPSetReferences[i5].IPSetReference[i6].ReferenceArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.RuleGroup.ReferenceSets.IPSetReferences[i5].IPSetReference[i6].ReferenceArnRef, + Selector: mg.Spec.ForProvider.RuleGroup.ReferenceSets.IPSetReferences[i5].IPSetReference[i6].ReferenceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RuleGroup.ReferenceSets.IPSetReferences[i5].IPSetReference[i6].ReferenceArn") + } + mg.Spec.ForProvider.RuleGroup.ReferenceSets.IPSetReferences[i5].IPSetReference[i6].ReferenceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RuleGroup.ReferenceSets.IPSetReferences[i5].IPSetReference[i6].ReferenceArnRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.InitProvider.RuleGroup != nil { + if mg.Spec.InitProvider.RuleGroup.ReferenceSets != nil { + for i5 := 0; i5 < len(mg.Spec.InitProvider.RuleGroup.ReferenceSets.IPSetReferences); i5++ { + for i6 := 0; i6 < len(mg.Spec.InitProvider.RuleGroup.ReferenceSets.IPSetReferences[i5].IPSetReference); i6++ { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "ManagedPrefixList", "ManagedPrefixListList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RuleGroup.ReferenceSets.IPSetReferences[i5].IPSetReference[i6].ReferenceArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.RuleGroup.ReferenceSets.IPSetReferences[i5].IPSetReference[i6].ReferenceArnRef, + Selector: mg.Spec.InitProvider.RuleGroup.ReferenceSets.IPSetReferences[i5].IPSetReference[i6].ReferenceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RuleGroup.ReferenceSets.IPSetReferences[i5].IPSetReference[i6].ReferenceArn") + } + mg.Spec.InitProvider.RuleGroup.ReferenceSets.IPSetReferences[i5].IPSetReference[i6].ReferenceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RuleGroup.ReferenceSets.IPSetReferences[i5].IPSetReference[i6].ReferenceArnRef = rsp.ResolvedReference + + } + } + } + } + + return nil +} diff --git a/apis/networkfirewall/v1beta2/zz_groupversion_info.go b/apis/networkfirewall/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..607eab92ef --- /dev/null +++ b/apis/networkfirewall/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=networkfirewall.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "networkfirewall.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/networkfirewall/v1beta2/zz_loggingconfiguration_terraformed.go b/apis/networkfirewall/v1beta2/zz_loggingconfiguration_terraformed.go new file mode 100755 index 0000000000..d6fb1e3c66 --- /dev/null +++ b/apis/networkfirewall/v1beta2/zz_loggingconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LoggingConfiguration +func (mg *LoggingConfiguration) GetTerraformResourceType() string { + return "aws_networkfirewall_logging_configuration" +} + +// GetConnectionDetailsMapping for this LoggingConfiguration +func (tr *LoggingConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LoggingConfiguration +func (tr *LoggingConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LoggingConfiguration +func (tr *LoggingConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LoggingConfiguration +func (tr *LoggingConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LoggingConfiguration +func (tr *LoggingConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LoggingConfiguration +func (tr *LoggingConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LoggingConfiguration +func (tr *LoggingConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LoggingConfiguration +func (tr *LoggingConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LoggingConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LoggingConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &LoggingConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LoggingConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/networkfirewall/v1beta2/zz_loggingconfiguration_types.go b/apis/networkfirewall/v1beta2/zz_loggingconfiguration_types.go new file mode 100755 index 0000000000..a5bce6068d --- /dev/null +++ b/apis/networkfirewall/v1beta2/zz_loggingconfiguration_types.go @@ -0,0 +1,193 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type LogDestinationConfigInitParameters struct { + + // A map describing the logging destination for the chosen log_destination_type. + // +mapType=granular + LogDestination map[string]*string `json:"logDestination,omitempty" tf:"log_destination,omitempty"` + + // The location to send logs to. Valid values: S3, CloudWatchLogs, KinesisDataFirehose. + LogDestinationType *string `json:"logDestinationType,omitempty" tf:"log_destination_type,omitempty"` + + // The type of log to send. Valid values: ALERT or FLOW. Alert logs report traffic that matches a StatefulRule with an action setting that sends a log message. Flow logs are standard network traffic flow logs. + LogType *string `json:"logType,omitempty" tf:"log_type,omitempty"` +} + +type LogDestinationConfigObservation struct { + + // A map describing the logging destination for the chosen log_destination_type. + // +mapType=granular + LogDestination map[string]*string `json:"logDestination,omitempty" tf:"log_destination,omitempty"` + + // The location to send logs to. Valid values: S3, CloudWatchLogs, KinesisDataFirehose. + LogDestinationType *string `json:"logDestinationType,omitempty" tf:"log_destination_type,omitempty"` + + // The type of log to send. Valid values: ALERT or FLOW. Alert logs report traffic that matches a StatefulRule with an action setting that sends a log message. Flow logs are standard network traffic flow logs. + LogType *string `json:"logType,omitempty" tf:"log_type,omitempty"` +} + +type LogDestinationConfigParameters struct { + + // A map describing the logging destination for the chosen log_destination_type. + // +kubebuilder:validation:Optional + // +mapType=granular + LogDestination map[string]*string `json:"logDestination" tf:"log_destination,omitempty"` + + // The location to send logs to. Valid values: S3, CloudWatchLogs, KinesisDataFirehose. + // +kubebuilder:validation:Optional + LogDestinationType *string `json:"logDestinationType" tf:"log_destination_type,omitempty"` + + // The type of log to send. Valid values: ALERT or FLOW. Alert logs report traffic that matches a StatefulRule with an action setting that sends a log message. Flow logs are standard network traffic flow logs. + // +kubebuilder:validation:Optional + LogType *string `json:"logType" tf:"log_type,omitempty"` +} + +type LoggingConfigurationInitParameters struct { + + // The Amazon Resource Name (ARN) of the Network Firewall firewall. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkfirewall/v1beta2.Firewall + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + FirewallArn *string `json:"firewallArn,omitempty" tf:"firewall_arn,omitempty"` + + // Reference to a Firewall in networkfirewall to populate firewallArn. + // +kubebuilder:validation:Optional + FirewallArnRef *v1.Reference `json:"firewallArnRef,omitempty" tf:"-"` + + // Selector for a Firewall in networkfirewall to populate firewallArn. + // +kubebuilder:validation:Optional + FirewallArnSelector *v1.Selector `json:"firewallArnSelector,omitempty" tf:"-"` + + // A configuration block describing how AWS Network Firewall performs logging for a firewall. See Logging Configuration below for details. + LoggingConfiguration *LoggingConfigurationLoggingConfigurationInitParameters `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` +} + +type LoggingConfigurationLoggingConfigurationInitParameters struct { + + // Set of configuration blocks describing the logging details for a firewall. See Log Destination Config below for details. At most, only two blocks can be specified; one for FLOW logs and one for ALERT logs. + LogDestinationConfig []LogDestinationConfigInitParameters `json:"logDestinationConfig,omitempty" tf:"log_destination_config,omitempty"` +} + +type LoggingConfigurationLoggingConfigurationObservation struct { + + // Set of configuration blocks describing the logging details for a firewall. See Log Destination Config below for details. At most, only two blocks can be specified; one for FLOW logs and one for ALERT logs. + LogDestinationConfig []LogDestinationConfigObservation `json:"logDestinationConfig,omitempty" tf:"log_destination_config,omitempty"` +} + +type LoggingConfigurationLoggingConfigurationParameters struct { + + // Set of configuration blocks describing the logging details for a firewall. See Log Destination Config below for details. At most, only two blocks can be specified; one for FLOW logs and one for ALERT logs. + // +kubebuilder:validation:Optional + LogDestinationConfig []LogDestinationConfigParameters `json:"logDestinationConfig" tf:"log_destination_config,omitempty"` +} + +type LoggingConfigurationObservation struct { + + // The Amazon Resource Name (ARN) of the Network Firewall firewall. + FirewallArn *string `json:"firewallArn,omitempty" tf:"firewall_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the associated firewall. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A configuration block describing how AWS Network Firewall performs logging for a firewall. See Logging Configuration below for details. + LoggingConfiguration *LoggingConfigurationLoggingConfigurationObservation `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` +} + +type LoggingConfigurationParameters struct { + + // The Amazon Resource Name (ARN) of the Network Firewall firewall. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkfirewall/v1beta2.Firewall + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + FirewallArn *string `json:"firewallArn,omitempty" tf:"firewall_arn,omitempty"` + + // Reference to a Firewall in networkfirewall to populate firewallArn. + // +kubebuilder:validation:Optional + FirewallArnRef *v1.Reference `json:"firewallArnRef,omitempty" tf:"-"` + + // Selector for a Firewall in networkfirewall to populate firewallArn. + // +kubebuilder:validation:Optional + FirewallArnSelector *v1.Selector `json:"firewallArnSelector,omitempty" tf:"-"` + + // A configuration block describing how AWS Network Firewall performs logging for a firewall. See Logging Configuration below for details. + // +kubebuilder:validation:Optional + LoggingConfiguration *LoggingConfigurationLoggingConfigurationParameters `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +// LoggingConfigurationSpec defines the desired state of LoggingConfiguration +type LoggingConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LoggingConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LoggingConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// LoggingConfigurationStatus defines the observed state of LoggingConfiguration. +type LoggingConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LoggingConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LoggingConfiguration is the Schema for the LoggingConfigurations API. Provides an AWS Network Firewall Logging Configuration resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type LoggingConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.loggingConfiguration) || (has(self.initProvider) && has(self.initProvider.loggingConfiguration))",message="spec.forProvider.loggingConfiguration is a required parameter" + Spec LoggingConfigurationSpec `json:"spec"` + Status LoggingConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LoggingConfigurationList contains a list of LoggingConfigurations +type LoggingConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LoggingConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + LoggingConfiguration_Kind = "LoggingConfiguration" + LoggingConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LoggingConfiguration_Kind}.String() + LoggingConfiguration_KindAPIVersion = LoggingConfiguration_Kind + "." + CRDGroupVersion.String() + LoggingConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(LoggingConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&LoggingConfiguration{}, &LoggingConfigurationList{}) +} diff --git a/apis/networkfirewall/v1beta2/zz_rulegroup_terraformed.go b/apis/networkfirewall/v1beta2/zz_rulegroup_terraformed.go new file mode 100755 index 0000000000..5a4e0be630 --- /dev/null +++ b/apis/networkfirewall/v1beta2/zz_rulegroup_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this RuleGroup +func (mg *RuleGroup) GetTerraformResourceType() string { + return "aws_networkfirewall_rule_group" +} + +// GetConnectionDetailsMapping for this RuleGroup +func (tr *RuleGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this RuleGroup +func (tr *RuleGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this RuleGroup +func (tr *RuleGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this RuleGroup +func (tr *RuleGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this RuleGroup +func (tr *RuleGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this RuleGroup +func (tr *RuleGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this RuleGroup +func (tr *RuleGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this RuleGroup +func (tr *RuleGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this RuleGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *RuleGroup) LateInitialize(attrs []byte) (bool, error) { + params := &RuleGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *RuleGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/networkfirewall/v1beta2/zz_rulegroup_types.go b/apis/networkfirewall/v1beta2/zz_rulegroup_types.go new file mode 100755 index 0000000000..712a61ef92 --- /dev/null +++ b/apis/networkfirewall/v1beta2/zz_rulegroup_types.go @@ -0,0 +1,1119 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionDefinitionPublishMetricActionInitParameters struct { + + // Set of configuration blocks containing the dimension settings to use for Amazon CloudWatch custom metrics. See Dimension below for details. + Dimension []PublishMetricActionDimensionInitParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` +} + +type ActionDefinitionPublishMetricActionObservation struct { + + // Set of configuration blocks containing the dimension settings to use for Amazon CloudWatch custom metrics. See Dimension below for details. + Dimension []PublishMetricActionDimensionObservation `json:"dimension,omitempty" tf:"dimension,omitempty"` +} + +type ActionDefinitionPublishMetricActionParameters struct { + + // Set of configuration blocks containing the dimension settings to use for Amazon CloudWatch custom metrics. See Dimension below for details. + // +kubebuilder:validation:Optional + Dimension []PublishMetricActionDimensionParameters `json:"dimension" tf:"dimension,omitempty"` +} + +type CustomActionActionDefinitionInitParameters struct { + + // A configuration block describing the stateless inspection criteria that publishes the specified metrics to Amazon CloudWatch for the matching packet. You can pair this custom action with any of the standard stateless rule actions. See Publish Metric Action below for details. + PublishMetricAction *ActionDefinitionPublishMetricActionInitParameters `json:"publishMetricAction,omitempty" tf:"publish_metric_action,omitempty"` +} + +type CustomActionActionDefinitionObservation struct { + + // A configuration block describing the stateless inspection criteria that publishes the specified metrics to Amazon CloudWatch for the matching packet. You can pair this custom action with any of the standard stateless rule actions. See Publish Metric Action below for details. + PublishMetricAction *ActionDefinitionPublishMetricActionObservation `json:"publishMetricAction,omitempty" tf:"publish_metric_action,omitempty"` +} + +type CustomActionActionDefinitionParameters struct { + + // A configuration block describing the stateless inspection criteria that publishes the specified metrics to Amazon CloudWatch for the matching packet. You can pair this custom action with any of the standard stateless rule actions. See Publish Metric Action below for details. + // +kubebuilder:validation:Optional + PublishMetricAction *ActionDefinitionPublishMetricActionParameters `json:"publishMetricAction" tf:"publish_metric_action,omitempty"` +} + +type CustomActionInitParameters struct { + + // A configuration block describing the custom action associated with the action_name. See Action Definition below for details. + ActionDefinition *CustomActionActionDefinitionInitParameters `json:"actionDefinition,omitempty" tf:"action_definition,omitempty"` + + // A friendly name of the custom action. + ActionName *string `json:"actionName,omitempty" tf:"action_name,omitempty"` +} + +type CustomActionObservation struct { + + // A configuration block describing the custom action associated with the action_name. See Action Definition below for details. + ActionDefinition *CustomActionActionDefinitionObservation `json:"actionDefinition,omitempty" tf:"action_definition,omitempty"` + + // A friendly name of the custom action. + ActionName *string `json:"actionName,omitempty" tf:"action_name,omitempty"` +} + +type CustomActionParameters struct { + + // A configuration block describing the custom action associated with the action_name. See Action Definition below for details. + // +kubebuilder:validation:Optional + ActionDefinition *CustomActionActionDefinitionParameters `json:"actionDefinition" tf:"action_definition,omitempty"` + + // A friendly name of the custom action. + // +kubebuilder:validation:Optional + ActionName *string `json:"actionName" tf:"action_name,omitempty"` +} + +type DestinationInitParameters struct { + + // An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. + AddressDefinition *string `json:"addressDefinition,omitempty" tf:"address_definition,omitempty"` +} + +type DestinationObservation struct { + + // An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. + AddressDefinition *string `json:"addressDefinition,omitempty" tf:"address_definition,omitempty"` +} + +type DestinationParameters struct { + + // An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. + // +kubebuilder:validation:Optional + AddressDefinition *string `json:"addressDefinition" tf:"address_definition,omitempty"` +} + +type DestinationPortInitParameters struct { + + // The lower limit of the port range. This must be less than or equal to the to_port. + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // The upper limit of the port range. This must be greater than or equal to the from_port. + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` +} + +type DestinationPortObservation struct { + + // The lower limit of the port range. This must be less than or equal to the to_port. + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // The upper limit of the port range. This must be greater than or equal to the from_port. + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` +} + +type DestinationPortParameters struct { + + // The lower limit of the port range. This must be less than or equal to the to_port. + // +kubebuilder:validation:Optional + FromPort *float64 `json:"fromPort" tf:"from_port,omitempty"` + + // The upper limit of the port range. This must be greater than or equal to the from_port. + // +kubebuilder:validation:Optional + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` +} + +type HeaderInitParameters struct { + + // Set of configuration blocks describing the destination IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any destination address. See Destination below for details. + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // Set of configuration blocks describing the destination ports to inspect for. If not specified, this matches with any destination port. See Destination Port below for details. + DestinationPort *string `json:"destinationPort,omitempty" tf:"destination_port,omitempty"` + + // The direction of traffic flow to inspect. Valid values: ANY or FORWARD. + Direction *string `json:"direction,omitempty" tf:"direction,omitempty"` + + // The protocol to inspect. Valid values: IP, TCP, UDP, ICMP, HTTP, FTP, TLS, SMB, DNS, DCERPC, SSH, SMTP, IMAP, MSN, KRB5, IKEV2, TFTP, NTP, DHCP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Set of configuration blocks describing the source IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any source address. See Source below for details. + Source *string `json:"source,omitempty" tf:"source,omitempty"` + + // Set of configuration blocks describing the source ports to inspect for. If not specified, this matches with any source port. See Source Port below for details. + SourcePort *string `json:"sourcePort,omitempty" tf:"source_port,omitempty"` +} + +type HeaderObservation struct { + + // Set of configuration blocks describing the destination IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any destination address. See Destination below for details. + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // Set of configuration blocks describing the destination ports to inspect for. If not specified, this matches with any destination port. See Destination Port below for details. + DestinationPort *string `json:"destinationPort,omitempty" tf:"destination_port,omitempty"` + + // The direction of traffic flow to inspect. Valid values: ANY or FORWARD. + Direction *string `json:"direction,omitempty" tf:"direction,omitempty"` + + // The protocol to inspect. Valid values: IP, TCP, UDP, ICMP, HTTP, FTP, TLS, SMB, DNS, DCERPC, SSH, SMTP, IMAP, MSN, KRB5, IKEV2, TFTP, NTP, DHCP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Set of configuration blocks describing the source IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any source address. See Source below for details. + Source *string `json:"source,omitempty" tf:"source,omitempty"` + + // Set of configuration blocks describing the source ports to inspect for. If not specified, this matches with any source port. See Source Port below for details. + SourcePort *string `json:"sourcePort,omitempty" tf:"source_port,omitempty"` +} + +type HeaderParameters struct { + + // Set of configuration blocks describing the destination IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any destination address. See Destination below for details. + // +kubebuilder:validation:Optional + Destination *string `json:"destination" tf:"destination,omitempty"` + + // Set of configuration blocks describing the destination ports to inspect for. If not specified, this matches with any destination port. See Destination Port below for details. + // +kubebuilder:validation:Optional + DestinationPort *string `json:"destinationPort" tf:"destination_port,omitempty"` + + // The direction of traffic flow to inspect. Valid values: ANY or FORWARD. + // +kubebuilder:validation:Optional + Direction *string `json:"direction" tf:"direction,omitempty"` + + // The protocol to inspect. Valid values: IP, TCP, UDP, ICMP, HTTP, FTP, TLS, SMB, DNS, DCERPC, SSH, SMTP, IMAP, MSN, KRB5, IKEV2, TFTP, NTP, DHCP. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` + + // Set of configuration blocks describing the source IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any source address. See Source below for details. + // +kubebuilder:validation:Optional + Source *string `json:"source" tf:"source,omitempty"` + + // Set of configuration blocks describing the source ports to inspect for. If not specified, this matches with any source port. See Source Port below for details. + // +kubebuilder:validation:Optional + SourcePort *string `json:"sourcePort" tf:"source_port,omitempty"` +} + +type IPSetReferenceInitParameters struct { + + // Set of Managed Prefix IP ARN(s) + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.ManagedPrefixList + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + ReferenceArn *string `json:"referenceArn,omitempty" tf:"reference_arn,omitempty"` + + // Reference to a ManagedPrefixList in ec2 to populate referenceArn. + // +kubebuilder:validation:Optional + ReferenceArnRef *v1.Reference `json:"referenceArnRef,omitempty" tf:"-"` + + // Selector for a ManagedPrefixList in ec2 to populate referenceArn. + // +kubebuilder:validation:Optional + ReferenceArnSelector *v1.Selector `json:"referenceArnSelector,omitempty" tf:"-"` +} + +type IPSetReferenceObservation struct { + + // Set of Managed Prefix IP ARN(s) + ReferenceArn *string `json:"referenceArn,omitempty" tf:"reference_arn,omitempty"` +} + +type IPSetReferenceParameters struct { + + // Set of Managed Prefix IP ARN(s) + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.ManagedPrefixList + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + ReferenceArn *string `json:"referenceArn,omitempty" tf:"reference_arn,omitempty"` + + // Reference to a ManagedPrefixList in ec2 to populate referenceArn. + // +kubebuilder:validation:Optional + ReferenceArnRef *v1.Reference `json:"referenceArnRef,omitempty" tf:"-"` + + // Selector for a ManagedPrefixList in ec2 to populate referenceArn. + // +kubebuilder:validation:Optional + ReferenceArnSelector *v1.Selector `json:"referenceArnSelector,omitempty" tf:"-"` +} + +type IPSetReferencesInitParameters struct { + + // Set of configuration blocks that define the IP Reference information. See IP Set Reference below for details. + IPSetReference []IPSetReferenceInitParameters `json:"ipSetReference,omitempty" tf:"ip_set_reference,omitempty"` + + // An unique alphanumeric string to identify the port_set. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type IPSetReferencesObservation struct { + + // Set of configuration blocks that define the IP Reference information. See IP Set Reference below for details. + IPSetReference []IPSetReferenceObservation `json:"ipSetReference,omitempty" tf:"ip_set_reference,omitempty"` + + // An unique alphanumeric string to identify the port_set. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type IPSetReferencesParameters struct { + + // Set of configuration blocks that define the IP Reference information. See IP Set Reference below for details. + // +kubebuilder:validation:Optional + IPSetReference []IPSetReferenceParameters `json:"ipSetReference" tf:"ip_set_reference,omitempty"` + + // An unique alphanumeric string to identify the port_set. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` +} + +type IPSetsIPSetInitParameters struct { + + // Set of port ranges. + // +listType=set + Definition []*string `json:"definition,omitempty" tf:"definition,omitempty"` +} + +type IPSetsIPSetObservation struct { + + // Set of port ranges. + // +listType=set + Definition []*string `json:"definition,omitempty" tf:"definition,omitempty"` +} + +type IPSetsIPSetParameters struct { + + // Set of port ranges. + // +kubebuilder:validation:Optional + // +listType=set + Definition []*string `json:"definition" tf:"definition,omitempty"` +} + +type IPSetsInitParameters struct { + + // A configuration block that defines a set of IP addresses. See IP Set below for details. + IPSet *IPSetsIPSetInitParameters `json:"ipSet,omitempty" tf:"ip_set,omitempty"` + + // An unique alphanumeric string to identify the port_set. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type IPSetsObservation struct { + + // A configuration block that defines a set of IP addresses. See IP Set below for details. + IPSet *IPSetsIPSetObservation `json:"ipSet,omitempty" tf:"ip_set,omitempty"` + + // An unique alphanumeric string to identify the port_set. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type IPSetsParameters struct { + + // A configuration block that defines a set of IP addresses. See IP Set below for details. + // +kubebuilder:validation:Optional + IPSet *IPSetsIPSetParameters `json:"ipSet" tf:"ip_set,omitempty"` + + // An unique alphanumeric string to identify the port_set. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` +} + +type MatchAttributesInitParameters struct { + + // Set of configuration blocks describing the destination IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any destination address. See Destination below for details. + Destination []DestinationInitParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + // Set of configuration blocks describing the destination ports to inspect for. If not specified, this matches with any destination port. See Destination Port below for details. + DestinationPort []DestinationPortInitParameters `json:"destinationPort,omitempty" tf:"destination_port,omitempty"` + + // Set of protocols to inspect for, specified using the protocol's assigned internet protocol number (IANA). If not specified, this matches with any protocol. + // +listType=set + Protocols []*float64 `json:"protocols,omitempty" tf:"protocols,omitempty"` + + // Set of configuration blocks describing the source IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any source address. See Source below for details. + Source []SourceInitParameters `json:"source,omitempty" tf:"source,omitempty"` + + // Set of configuration blocks describing the source ports to inspect for. If not specified, this matches with any source port. See Source Port below for details. + SourcePort []SourcePortInitParameters `json:"sourcePort,omitempty" tf:"source_port,omitempty"` + + // Set of configuration blocks containing the TCP flags and masks to inspect for. If not specified, this matches with any settings. + TCPFlag []TCPFlagInitParameters `json:"tcpFlag,omitempty" tf:"tcp_flag,omitempty"` +} + +type MatchAttributesObservation struct { + + // Set of configuration blocks describing the destination IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any destination address. See Destination below for details. + Destination []DestinationObservation `json:"destination,omitempty" tf:"destination,omitempty"` + + // Set of configuration blocks describing the destination ports to inspect for. If not specified, this matches with any destination port. See Destination Port below for details. + DestinationPort []DestinationPortObservation `json:"destinationPort,omitempty" tf:"destination_port,omitempty"` + + // Set of protocols to inspect for, specified using the protocol's assigned internet protocol number (IANA). If not specified, this matches with any protocol. + // +listType=set + Protocols []*float64 `json:"protocols,omitempty" tf:"protocols,omitempty"` + + // Set of configuration blocks describing the source IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any source address. See Source below for details. + Source []SourceObservation `json:"source,omitempty" tf:"source,omitempty"` + + // Set of configuration blocks describing the source ports to inspect for. If not specified, this matches with any source port. See Source Port below for details. + SourcePort []SourcePortObservation `json:"sourcePort,omitempty" tf:"source_port,omitempty"` + + // Set of configuration blocks containing the TCP flags and masks to inspect for. If not specified, this matches with any settings. + TCPFlag []TCPFlagObservation `json:"tcpFlag,omitempty" tf:"tcp_flag,omitempty"` +} + +type MatchAttributesParameters struct { + + // Set of configuration blocks describing the destination IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any destination address. See Destination below for details. + // +kubebuilder:validation:Optional + Destination []DestinationParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + // Set of configuration blocks describing the destination ports to inspect for. If not specified, this matches with any destination port. See Destination Port below for details. + // +kubebuilder:validation:Optional + DestinationPort []DestinationPortParameters `json:"destinationPort,omitempty" tf:"destination_port,omitempty"` + + // Set of protocols to inspect for, specified using the protocol's assigned internet protocol number (IANA). If not specified, this matches with any protocol. + // +kubebuilder:validation:Optional + // +listType=set + Protocols []*float64 `json:"protocols,omitempty" tf:"protocols,omitempty"` + + // Set of configuration blocks describing the source IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any source address. See Source below for details. + // +kubebuilder:validation:Optional + Source []SourceParameters `json:"source,omitempty" tf:"source,omitempty"` + + // Set of configuration blocks describing the source ports to inspect for. If not specified, this matches with any source port. See Source Port below for details. + // +kubebuilder:validation:Optional + SourcePort []SourcePortParameters `json:"sourcePort,omitempty" tf:"source_port,omitempty"` + + // Set of configuration blocks containing the TCP flags and masks to inspect for. If not specified, this matches with any settings. + // +kubebuilder:validation:Optional + TCPFlag []TCPFlagParameters `json:"tcpFlag,omitempty" tf:"tcp_flag,omitempty"` +} + +type PortSetInitParameters struct { + + // Set of port ranges. + // +listType=set + Definition []*string `json:"definition,omitempty" tf:"definition,omitempty"` +} + +type PortSetObservation struct { + + // Set of port ranges. + // +listType=set + Definition []*string `json:"definition,omitempty" tf:"definition,omitempty"` +} + +type PortSetParameters struct { + + // Set of port ranges. + // +kubebuilder:validation:Optional + // +listType=set + Definition []*string `json:"definition" tf:"definition,omitempty"` +} + +type PortSetsInitParameters struct { + + // An unique alphanumeric string to identify the port_set. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A configuration block that defines a set of port ranges. See Port Set below for details. + PortSet *PortSetInitParameters `json:"portSet,omitempty" tf:"port_set,omitempty"` +} + +type PortSetsObservation struct { + + // An unique alphanumeric string to identify the port_set. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A configuration block that defines a set of port ranges. See Port Set below for details. + PortSet *PortSetObservation `json:"portSet,omitempty" tf:"port_set,omitempty"` +} + +type PortSetsParameters struct { + + // An unique alphanumeric string to identify the port_set. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // A configuration block that defines a set of port ranges. See Port Set below for details. + // +kubebuilder:validation:Optional + PortSet *PortSetParameters `json:"portSet" tf:"port_set,omitempty"` +} + +type PublishMetricActionDimensionInitParameters struct { + + // The value to use in the custom metric dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type PublishMetricActionDimensionObservation struct { + + // The value to use in the custom metric dimension. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type PublishMetricActionDimensionParameters struct { + + // The value to use in the custom metric dimension. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ReferenceSetsInitParameters struct { + IPSetReferences []IPSetReferencesInitParameters `json:"ipSetReferences,omitempty" tf:"ip_set_references,omitempty"` +} + +type ReferenceSetsObservation struct { + IPSetReferences []IPSetReferencesObservation `json:"ipSetReferences,omitempty" tf:"ip_set_references,omitempty"` +} + +type ReferenceSetsParameters struct { + + // +kubebuilder:validation:Optional + IPSetReferences []IPSetReferencesParameters `json:"ipSetReferences,omitempty" tf:"ip_set_references,omitempty"` +} + +type RuleDefinitionInitParameters struct { + + // Set of actions to take on a packet that matches one of the stateless rule definition's match_attributes. For every rule you must specify 1 standard action, and you can add custom actions. Standard actions include: aws:pass, aws:drop, aws:forward_to_sfe. + // +listType=set + Actions []*string `json:"actions,omitempty" tf:"actions,omitempty"` + + // A configuration block containing criteria for AWS Network Firewall to use to inspect an individual packet in stateless rule inspection. See Match Attributes below for details. + MatchAttributes *MatchAttributesInitParameters `json:"matchAttributes,omitempty" tf:"match_attributes,omitempty"` +} + +type RuleDefinitionObservation struct { + + // Set of actions to take on a packet that matches one of the stateless rule definition's match_attributes. For every rule you must specify 1 standard action, and you can add custom actions. Standard actions include: aws:pass, aws:drop, aws:forward_to_sfe. + // +listType=set + Actions []*string `json:"actions,omitempty" tf:"actions,omitempty"` + + // A configuration block containing criteria for AWS Network Firewall to use to inspect an individual packet in stateless rule inspection. See Match Attributes below for details. + MatchAttributes *MatchAttributesObservation `json:"matchAttributes,omitempty" tf:"match_attributes,omitempty"` +} + +type RuleDefinitionParameters struct { + + // Set of actions to take on a packet that matches one of the stateless rule definition's match_attributes. For every rule you must specify 1 standard action, and you can add custom actions. Standard actions include: aws:pass, aws:drop, aws:forward_to_sfe. + // +kubebuilder:validation:Optional + // +listType=set + Actions []*string `json:"actions" tf:"actions,omitempty"` + + // A configuration block containing criteria for AWS Network Firewall to use to inspect an individual packet in stateless rule inspection. See Match Attributes below for details. + // +kubebuilder:validation:Optional + MatchAttributes *MatchAttributesParameters `json:"matchAttributes" tf:"match_attributes,omitempty"` +} + +type RuleGroupEncryptionConfigurationInitParameters struct { + + // The ID of the customer managed key. You can use any of the key identifiers that KMS supports, unless you're using a key that's managed by another account. If you're using a key managed by another account, then specify the key ARN. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // The type of AWS KMS key to use for encryption of your Network Firewall resources. Valid values are CUSTOMER_KMS and AWS_OWNED_KMS_KEY. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RuleGroupEncryptionConfigurationObservation struct { + + // The ID of the customer managed key. You can use any of the key identifiers that KMS supports, unless you're using a key that's managed by another account. If you're using a key managed by another account, then specify the key ARN. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // The type of AWS KMS key to use for encryption of your Network Firewall resources. Valid values are CUSTOMER_KMS and AWS_OWNED_KMS_KEY. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RuleGroupEncryptionConfigurationParameters struct { + + // The ID of the customer managed key. You can use any of the key identifiers that KMS supports, unless you're using a key that's managed by another account. If you're using a key managed by another account, then specify the key ARN. + // +kubebuilder:validation:Optional + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // The type of AWS KMS key to use for encryption of your Network Firewall resources. Valid values are CUSTOMER_KMS and AWS_OWNED_KMS_KEY. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type RuleGroupInitParameters struct { + + // The maximum number of operating resources that this rule group can use. For a stateless rule group, the capacity required is the sum of the capacity requirements of the individual rules. For a stateful rule group, the minimum capacity required is the number of individual rules. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // A friendly description of the rule group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // KMS encryption configuration settings. See Encryption Configuration below for details. + EncryptionConfiguration *RuleGroupEncryptionConfigurationInitParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // A friendly name of the rule group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A configuration block that defines the rule group rules. Required unless rules is specified. See Rule Group below for details. + RuleGroup *RuleGroupRuleGroupInitParameters `json:"ruleGroup,omitempty" tf:"rule_group,omitempty"` + + // The stateful rule group rules specifications in Suricata file format, with one rule per line. Use this to import your existing Suricata compatible rule groups. Required unless rule_group is specified. + Rules *string `json:"rules,omitempty" tf:"rules,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether the rule group is stateless (containing stateless rules) or stateful (containing stateful rules). Valid values include: STATEFUL or STATELESS. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RuleGroupObservation struct { + + // The Amazon Resource Name (ARN) that identifies the rule group. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The maximum number of operating resources that this rule group can use. For a stateless rule group, the capacity required is the sum of the capacity requirements of the individual rules. For a stateful rule group, the minimum capacity required is the number of individual rules. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // A friendly description of the rule group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // KMS encryption configuration settings. See Encryption Configuration below for details. + EncryptionConfiguration *RuleGroupEncryptionConfigurationObservation `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // The Amazon Resource Name (ARN) that identifies the rule group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A friendly name of the rule group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A configuration block that defines the rule group rules. Required unless rules is specified. See Rule Group below for details. + RuleGroup *RuleGroupRuleGroupObservation `json:"ruleGroup,omitempty" tf:"rule_group,omitempty"` + + // The stateful rule group rules specifications in Suricata file format, with one rule per line. Use this to import your existing Suricata compatible rule groups. Required unless rule_group is specified. + Rules *string `json:"rules,omitempty" tf:"rules,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Whether the rule group is stateless (containing stateless rules) or stateful (containing stateful rules). Valid values include: STATEFUL or STATELESS. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A string token used when updating the rule group. + UpdateToken *string `json:"updateToken,omitempty" tf:"update_token,omitempty"` +} + +type RuleGroupParameters struct { + + // The maximum number of operating resources that this rule group can use. For a stateless rule group, the capacity required is the sum of the capacity requirements of the individual rules. For a stateful rule group, the minimum capacity required is the number of individual rules. + // +kubebuilder:validation:Optional + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // A friendly description of the rule group. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // KMS encryption configuration settings. See Encryption Configuration below for details. + // +kubebuilder:validation:Optional + EncryptionConfiguration *RuleGroupEncryptionConfigurationParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // A friendly name of the rule group. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // A configuration block that defines the rule group rules. Required unless rules is specified. See Rule Group below for details. + // +kubebuilder:validation:Optional + RuleGroup *RuleGroupRuleGroupParameters `json:"ruleGroup,omitempty" tf:"rule_group,omitempty"` + + // The stateful rule group rules specifications in Suricata file format, with one rule per line. Use this to import your existing Suricata compatible rule groups. Required unless rule_group is specified. + // +kubebuilder:validation:Optional + Rules *string `json:"rules,omitempty" tf:"rules,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether the rule group is stateless (containing stateless rules) or stateful (containing stateful rules). Valid values include: STATEFUL or STATELESS. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RuleGroupRuleGroupInitParameters struct { + + // A configuration block that defines the IP Set References for the rule group. See Reference Sets below for details. Please notes that there can only be a maximum of 5 reference_sets in a rule_group. See the AWS documentation for details. + ReferenceSets *ReferenceSetsInitParameters `json:"referenceSets,omitempty" tf:"reference_sets,omitempty"` + + // A configuration block that defines additional settings available to use in the rules defined in the rule group. Can only be specified for stateful rule groups. See Rule Variables below for details. + RuleVariables *RuleGroupRuleVariablesInitParameters `json:"ruleVariables,omitempty" tf:"rule_variables,omitempty"` + + // A configuration block that defines the stateful or stateless rules for the rule group. See Rules Source below for details. + RulesSource *RulesSourceInitParameters `json:"rulesSource,omitempty" tf:"rules_source,omitempty"` + + // A configuration block that defines stateful rule options for the rule group. See Stateful Rule Options below for details. + StatefulRuleOptions *StatefulRuleOptionsInitParameters `json:"statefulRuleOptions,omitempty" tf:"stateful_rule_options,omitempty"` +} + +type RuleGroupRuleGroupObservation struct { + + // A configuration block that defines the IP Set References for the rule group. See Reference Sets below for details. Please notes that there can only be a maximum of 5 reference_sets in a rule_group. See the AWS documentation for details. + ReferenceSets *ReferenceSetsObservation `json:"referenceSets,omitempty" tf:"reference_sets,omitempty"` + + // A configuration block that defines additional settings available to use in the rules defined in the rule group. Can only be specified for stateful rule groups. See Rule Variables below for details. + RuleVariables *RuleGroupRuleVariablesObservation `json:"ruleVariables,omitempty" tf:"rule_variables,omitempty"` + + // A configuration block that defines the stateful or stateless rules for the rule group. See Rules Source below for details. + RulesSource *RulesSourceObservation `json:"rulesSource,omitempty" tf:"rules_source,omitempty"` + + // A configuration block that defines stateful rule options for the rule group. See Stateful Rule Options below for details. + StatefulRuleOptions *StatefulRuleOptionsObservation `json:"statefulRuleOptions,omitempty" tf:"stateful_rule_options,omitempty"` +} + +type RuleGroupRuleGroupParameters struct { + + // A configuration block that defines the IP Set References for the rule group. See Reference Sets below for details. Please notes that there can only be a maximum of 5 reference_sets in a rule_group. See the AWS documentation for details. + // +kubebuilder:validation:Optional + ReferenceSets *ReferenceSetsParameters `json:"referenceSets,omitempty" tf:"reference_sets,omitempty"` + + // A configuration block that defines additional settings available to use in the rules defined in the rule group. Can only be specified for stateful rule groups. See Rule Variables below for details. + // +kubebuilder:validation:Optional + RuleVariables *RuleGroupRuleVariablesParameters `json:"ruleVariables,omitempty" tf:"rule_variables,omitempty"` + + // A configuration block that defines the stateful or stateless rules for the rule group. See Rules Source below for details. + // +kubebuilder:validation:Optional + RulesSource *RulesSourceParameters `json:"rulesSource" tf:"rules_source,omitempty"` + + // A configuration block that defines stateful rule options for the rule group. See Stateful Rule Options below for details. + // +kubebuilder:validation:Optional + StatefulRuleOptions *StatefulRuleOptionsParameters `json:"statefulRuleOptions,omitempty" tf:"stateful_rule_options,omitempty"` +} + +type RuleGroupRuleVariablesInitParameters struct { + + // Set of configuration blocks that define IP address information. See IP Sets below for details. + IPSets []IPSetsInitParameters `json:"ipSets,omitempty" tf:"ip_sets,omitempty"` + + // Set of configuration blocks that define port range information. See Port Sets below for details. + PortSets []PortSetsInitParameters `json:"portSets,omitempty" tf:"port_sets,omitempty"` +} + +type RuleGroupRuleVariablesObservation struct { + + // Set of configuration blocks that define IP address information. See IP Sets below for details. + IPSets []IPSetsObservation `json:"ipSets,omitempty" tf:"ip_sets,omitempty"` + + // Set of configuration blocks that define port range information. See Port Sets below for details. + PortSets []PortSetsObservation `json:"portSets,omitempty" tf:"port_sets,omitempty"` +} + +type RuleGroupRuleVariablesParameters struct { + + // Set of configuration blocks that define IP address information. See IP Sets below for details. + // +kubebuilder:validation:Optional + IPSets []IPSetsParameters `json:"ipSets,omitempty" tf:"ip_sets,omitempty"` + + // Set of configuration blocks that define port range information. See Port Sets below for details. + // +kubebuilder:validation:Optional + PortSets []PortSetsParameters `json:"portSets,omitempty" tf:"port_sets,omitempty"` +} + +type RuleOptionInitParameters struct { + + // Keyword defined by open source detection systems like Snort or Suricata for stateful rule inspection. + // See Snort General Rule Options or Suricata Rule Options for more details. + Keyword *string `json:"keyword,omitempty" tf:"keyword,omitempty"` + + // Set of strings for additional settings to use in stateful rule inspection. + // +listType=set + Settings []*string `json:"settings,omitempty" tf:"settings,omitempty"` +} + +type RuleOptionObservation struct { + + // Keyword defined by open source detection systems like Snort or Suricata for stateful rule inspection. + // See Snort General Rule Options or Suricata Rule Options for more details. + Keyword *string `json:"keyword,omitempty" tf:"keyword,omitempty"` + + // Set of strings for additional settings to use in stateful rule inspection. + // +listType=set + Settings []*string `json:"settings,omitempty" tf:"settings,omitempty"` +} + +type RuleOptionParameters struct { + + // Keyword defined by open source detection systems like Snort or Suricata for stateful rule inspection. + // See Snort General Rule Options or Suricata Rule Options for more details. + // +kubebuilder:validation:Optional + Keyword *string `json:"keyword" tf:"keyword,omitempty"` + + // Set of strings for additional settings to use in stateful rule inspection. + // +kubebuilder:validation:Optional + // +listType=set + Settings []*string `json:"settings,omitempty" tf:"settings,omitempty"` +} + +type RulesSourceInitParameters struct { + + // A configuration block containing stateful inspection criteria for a domain list rule group. See Rules Source List below for details. + RulesSourceList *RulesSourceListInitParameters `json:"rulesSourceList,omitempty" tf:"rules_source_list,omitempty"` + + // The fully qualified name of a file in an S3 bucket that contains Suricata compatible intrusion preventions system (IPS) rules or the Suricata rules as a string. These rules contain stateful inspection criteria and the action to take for traffic that matches the criteria. + RulesString *string `json:"rulesString,omitempty" tf:"rules_string,omitempty"` + + // Set of configuration blocks containing stateful inspection criteria for 5-tuple rules to be used together in a rule group. See Stateful Rule below for details. + StatefulRule []StatefulRuleInitParameters `json:"statefulRule,omitempty" tf:"stateful_rule,omitempty"` + + // A configuration block containing stateless inspection criteria for a stateless rule group. See Stateless Rules and Custom Actions below for details. + StatelessRulesAndCustomActions *StatelessRulesAndCustomActionsInitParameters `json:"statelessRulesAndCustomActions,omitempty" tf:"stateless_rules_and_custom_actions,omitempty"` +} + +type RulesSourceListInitParameters struct { + + // String value to specify whether domains in the target list are allowed or denied access. Valid values: ALLOWLIST, DENYLIST. + GeneratedRulesType *string `json:"generatedRulesType,omitempty" tf:"generated_rules_type,omitempty"` + + // Set of types of domain specifications that are provided in the targets argument. Valid values: HTTP_HOST, TLS_SNI. + // +listType=set + TargetTypes []*string `json:"targetTypes,omitempty" tf:"target_types,omitempty"` + + // Set of domains that you want to inspect for in your traffic flows. + // +listType=set + Targets []*string `json:"targets,omitempty" tf:"targets,omitempty"` +} + +type RulesSourceListObservation struct { + + // String value to specify whether domains in the target list are allowed or denied access. Valid values: ALLOWLIST, DENYLIST. + GeneratedRulesType *string `json:"generatedRulesType,omitempty" tf:"generated_rules_type,omitempty"` + + // Set of types of domain specifications that are provided in the targets argument. Valid values: HTTP_HOST, TLS_SNI. + // +listType=set + TargetTypes []*string `json:"targetTypes,omitempty" tf:"target_types,omitempty"` + + // Set of domains that you want to inspect for in your traffic flows. + // +listType=set + Targets []*string `json:"targets,omitempty" tf:"targets,omitempty"` +} + +type RulesSourceListParameters struct { + + // String value to specify whether domains in the target list are allowed or denied access. Valid values: ALLOWLIST, DENYLIST. + // +kubebuilder:validation:Optional + GeneratedRulesType *string `json:"generatedRulesType" tf:"generated_rules_type,omitempty"` + + // Set of types of domain specifications that are provided in the targets argument. Valid values: HTTP_HOST, TLS_SNI. + // +kubebuilder:validation:Optional + // +listType=set + TargetTypes []*string `json:"targetTypes" tf:"target_types,omitempty"` + + // Set of domains that you want to inspect for in your traffic flows. + // +kubebuilder:validation:Optional + // +listType=set + Targets []*string `json:"targets" tf:"targets,omitempty"` +} + +type RulesSourceObservation struct { + + // A configuration block containing stateful inspection criteria for a domain list rule group. See Rules Source List below for details. + RulesSourceList *RulesSourceListObservation `json:"rulesSourceList,omitempty" tf:"rules_source_list,omitempty"` + + // The fully qualified name of a file in an S3 bucket that contains Suricata compatible intrusion preventions system (IPS) rules or the Suricata rules as a string. These rules contain stateful inspection criteria and the action to take for traffic that matches the criteria. + RulesString *string `json:"rulesString,omitempty" tf:"rules_string,omitempty"` + + // Set of configuration blocks containing stateful inspection criteria for 5-tuple rules to be used together in a rule group. See Stateful Rule below for details. + StatefulRule []StatefulRuleObservation `json:"statefulRule,omitempty" tf:"stateful_rule,omitempty"` + + // A configuration block containing stateless inspection criteria for a stateless rule group. See Stateless Rules and Custom Actions below for details. + StatelessRulesAndCustomActions *StatelessRulesAndCustomActionsObservation `json:"statelessRulesAndCustomActions,omitempty" tf:"stateless_rules_and_custom_actions,omitempty"` +} + +type RulesSourceParameters struct { + + // A configuration block containing stateful inspection criteria for a domain list rule group. See Rules Source List below for details. + // +kubebuilder:validation:Optional + RulesSourceList *RulesSourceListParameters `json:"rulesSourceList,omitempty" tf:"rules_source_list,omitempty"` + + // The fully qualified name of a file in an S3 bucket that contains Suricata compatible intrusion preventions system (IPS) rules or the Suricata rules as a string. These rules contain stateful inspection criteria and the action to take for traffic that matches the criteria. + // +kubebuilder:validation:Optional + RulesString *string `json:"rulesString,omitempty" tf:"rules_string,omitempty"` + + // Set of configuration blocks containing stateful inspection criteria for 5-tuple rules to be used together in a rule group. See Stateful Rule below for details. + // +kubebuilder:validation:Optional + StatefulRule []StatefulRuleParameters `json:"statefulRule,omitempty" tf:"stateful_rule,omitempty"` + + // A configuration block containing stateless inspection criteria for a stateless rule group. See Stateless Rules and Custom Actions below for details. + // +kubebuilder:validation:Optional + StatelessRulesAndCustomActions *StatelessRulesAndCustomActionsParameters `json:"statelessRulesAndCustomActions,omitempty" tf:"stateless_rules_and_custom_actions,omitempty"` +} + +type SourceInitParameters struct { + + // An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. + AddressDefinition *string `json:"addressDefinition,omitempty" tf:"address_definition,omitempty"` +} + +type SourceObservation struct { + + // An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. + AddressDefinition *string `json:"addressDefinition,omitempty" tf:"address_definition,omitempty"` +} + +type SourceParameters struct { + + // An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. + // +kubebuilder:validation:Optional + AddressDefinition *string `json:"addressDefinition" tf:"address_definition,omitempty"` +} + +type SourcePortInitParameters struct { + + // The lower limit of the port range. This must be less than or equal to the to_port. + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // The upper limit of the port range. This must be greater than or equal to the from_port. + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` +} + +type SourcePortObservation struct { + + // The lower limit of the port range. This must be less than or equal to the to_port. + FromPort *float64 `json:"fromPort,omitempty" tf:"from_port,omitempty"` + + // The upper limit of the port range. This must be greater than or equal to the from_port. + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` +} + +type SourcePortParameters struct { + + // The lower limit of the port range. This must be less than or equal to the to_port. + // +kubebuilder:validation:Optional + FromPort *float64 `json:"fromPort" tf:"from_port,omitempty"` + + // The upper limit of the port range. This must be greater than or equal to the from_port. + // +kubebuilder:validation:Optional + ToPort *float64 `json:"toPort,omitempty" tf:"to_port,omitempty"` +} + +type StatefulRuleInitParameters struct { + + // Action to take with packets in a traffic flow when the flow matches the stateful rule criteria. For all actions, AWS Network Firewall performs the specified action and discontinues stateful inspection of the traffic flow. Valid values: ALERT, DROP, PASS, or REJECT. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // A configuration block containing the stateful 5-tuple inspection criteria for the rule, used to inspect traffic flows. See Header below for details. + Header *HeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` + + // Set of configuration blocks containing additional settings for a stateful rule. See Rule Option below for details. + RuleOption []RuleOptionInitParameters `json:"ruleOption,omitempty" tf:"rule_option,omitempty"` +} + +type StatefulRuleObservation struct { + + // Action to take with packets in a traffic flow when the flow matches the stateful rule criteria. For all actions, AWS Network Firewall performs the specified action and discontinues stateful inspection of the traffic flow. Valid values: ALERT, DROP, PASS, or REJECT. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // A configuration block containing the stateful 5-tuple inspection criteria for the rule, used to inspect traffic flows. See Header below for details. + Header *HeaderObservation `json:"header,omitempty" tf:"header,omitempty"` + + // Set of configuration blocks containing additional settings for a stateful rule. See Rule Option below for details. + RuleOption []RuleOptionObservation `json:"ruleOption,omitempty" tf:"rule_option,omitempty"` +} + +type StatefulRuleOptionsInitParameters struct { + + // Indicates how to manage the order of the rule evaluation for the rule group. Default value: DEFAULT_ACTION_ORDER. Valid values: DEFAULT_ACTION_ORDER, STRICT_ORDER. + RuleOrder *string `json:"ruleOrder,omitempty" tf:"rule_order,omitempty"` +} + +type StatefulRuleOptionsObservation struct { + + // Indicates how to manage the order of the rule evaluation for the rule group. Default value: DEFAULT_ACTION_ORDER. Valid values: DEFAULT_ACTION_ORDER, STRICT_ORDER. + RuleOrder *string `json:"ruleOrder,omitempty" tf:"rule_order,omitempty"` +} + +type StatefulRuleOptionsParameters struct { + + // Indicates how to manage the order of the rule evaluation for the rule group. Default value: DEFAULT_ACTION_ORDER. Valid values: DEFAULT_ACTION_ORDER, STRICT_ORDER. + // +kubebuilder:validation:Optional + RuleOrder *string `json:"ruleOrder" tf:"rule_order,omitempty"` +} + +type StatefulRuleParameters struct { + + // Action to take with packets in a traffic flow when the flow matches the stateful rule criteria. For all actions, AWS Network Firewall performs the specified action and discontinues stateful inspection of the traffic flow. Valid values: ALERT, DROP, PASS, or REJECT. + // +kubebuilder:validation:Optional + Action *string `json:"action" tf:"action,omitempty"` + + // A configuration block containing the stateful 5-tuple inspection criteria for the rule, used to inspect traffic flows. See Header below for details. + // +kubebuilder:validation:Optional + Header *HeaderParameters `json:"header" tf:"header,omitempty"` + + // Set of configuration blocks containing additional settings for a stateful rule. See Rule Option below for details. + // +kubebuilder:validation:Optional + RuleOption []RuleOptionParameters `json:"ruleOption" tf:"rule_option,omitempty"` +} + +type StatelessRuleInitParameters struct { + + // A setting that indicates the order in which to run this rule relative to all of the rules that are defined for a stateless rule group. AWS Network Firewall evaluates the rules in a rule group starting with the lowest priority setting. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // A configuration block defining the stateless 5-tuple packet inspection criteria and the action to take on a packet that matches the criteria. See Rule Definition below for details. + RuleDefinition *RuleDefinitionInitParameters `json:"ruleDefinition,omitempty" tf:"rule_definition,omitempty"` +} + +type StatelessRuleObservation struct { + + // A setting that indicates the order in which to run this rule relative to all of the rules that are defined for a stateless rule group. AWS Network Firewall evaluates the rules in a rule group starting with the lowest priority setting. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // A configuration block defining the stateless 5-tuple packet inspection criteria and the action to take on a packet that matches the criteria. See Rule Definition below for details. + RuleDefinition *RuleDefinitionObservation `json:"ruleDefinition,omitempty" tf:"rule_definition,omitempty"` +} + +type StatelessRuleParameters struct { + + // A setting that indicates the order in which to run this rule relative to all of the rules that are defined for a stateless rule group. AWS Network Firewall evaluates the rules in a rule group starting with the lowest priority setting. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority" tf:"priority,omitempty"` + + // A configuration block defining the stateless 5-tuple packet inspection criteria and the action to take on a packet that matches the criteria. See Rule Definition below for details. + // +kubebuilder:validation:Optional + RuleDefinition *RuleDefinitionParameters `json:"ruleDefinition" tf:"rule_definition,omitempty"` +} + +type StatelessRulesAndCustomActionsInitParameters struct { + + // Set of configuration blocks containing custom action definitions that are available for use by the set of stateless rule. See Custom Action below for details. + CustomAction []CustomActionInitParameters `json:"customAction,omitempty" tf:"custom_action,omitempty"` + + // Set of configuration blocks containing the stateless rules for use in the stateless rule group. See Stateless Rule below for details. + StatelessRule []StatelessRuleInitParameters `json:"statelessRule,omitempty" tf:"stateless_rule,omitempty"` +} + +type StatelessRulesAndCustomActionsObservation struct { + + // Set of configuration blocks containing custom action definitions that are available for use by the set of stateless rule. See Custom Action below for details. + CustomAction []CustomActionObservation `json:"customAction,omitempty" tf:"custom_action,omitempty"` + + // Set of configuration blocks containing the stateless rules for use in the stateless rule group. See Stateless Rule below for details. + StatelessRule []StatelessRuleObservation `json:"statelessRule,omitempty" tf:"stateless_rule,omitempty"` +} + +type StatelessRulesAndCustomActionsParameters struct { + + // Set of configuration blocks containing custom action definitions that are available for use by the set of stateless rule. See Custom Action below for details. + // +kubebuilder:validation:Optional + CustomAction []CustomActionParameters `json:"customAction,omitempty" tf:"custom_action,omitempty"` + + // Set of configuration blocks containing the stateless rules for use in the stateless rule group. See Stateless Rule below for details. + // +kubebuilder:validation:Optional + StatelessRule []StatelessRuleParameters `json:"statelessRule" tf:"stateless_rule,omitempty"` +} + +type TCPFlagInitParameters struct { + + // Set of flags to look for in a packet. This setting can only specify values that are also specified in masks. + // Valid values: FIN, SYN, RST, PSH, ACK, URG, ECE, CWR. + // +listType=set + Flags []*string `json:"flags,omitempty" tf:"flags,omitempty"` + + // Set of flags to consider in the inspection. To inspect all flags, leave this empty. + // Valid values: FIN, SYN, RST, PSH, ACK, URG, ECE, CWR. + // +listType=set + Masks []*string `json:"masks,omitempty" tf:"masks,omitempty"` +} + +type TCPFlagObservation struct { + + // Set of flags to look for in a packet. This setting can only specify values that are also specified in masks. + // Valid values: FIN, SYN, RST, PSH, ACK, URG, ECE, CWR. + // +listType=set + Flags []*string `json:"flags,omitempty" tf:"flags,omitempty"` + + // Set of flags to consider in the inspection. To inspect all flags, leave this empty. + // Valid values: FIN, SYN, RST, PSH, ACK, URG, ECE, CWR. + // +listType=set + Masks []*string `json:"masks,omitempty" tf:"masks,omitempty"` +} + +type TCPFlagParameters struct { + + // Set of flags to look for in a packet. This setting can only specify values that are also specified in masks. + // Valid values: FIN, SYN, RST, PSH, ACK, URG, ECE, CWR. + // +kubebuilder:validation:Optional + // +listType=set + Flags []*string `json:"flags" tf:"flags,omitempty"` + + // Set of flags to consider in the inspection. To inspect all flags, leave this empty. + // Valid values: FIN, SYN, RST, PSH, ACK, URG, ECE, CWR. + // +kubebuilder:validation:Optional + // +listType=set + Masks []*string `json:"masks,omitempty" tf:"masks,omitempty"` +} + +// RuleGroupSpec defines the desired state of RuleGroup +type RuleGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RuleGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RuleGroupInitParameters `json:"initProvider,omitempty"` +} + +// RuleGroupStatus defines the observed state of RuleGroup. +type RuleGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RuleGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// RuleGroup is the Schema for the RuleGroups API. Provides an AWS Network Firewall Rule Group resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type RuleGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.capacity) || (has(self.initProvider) && has(self.initProvider.capacity))",message="spec.forProvider.capacity is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + Spec RuleGroupSpec `json:"spec"` + Status RuleGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RuleGroupList contains a list of RuleGroups +type RuleGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RuleGroup `json:"items"` +} + +// Repository type metadata. +var ( + RuleGroup_Kind = "RuleGroup" + RuleGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: RuleGroup_Kind}.String() + RuleGroup_KindAPIVersion = RuleGroup_Kind + "." + CRDGroupVersion.String() + RuleGroup_GroupVersionKind = CRDGroupVersion.WithKind(RuleGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&RuleGroup{}, &RuleGroupList{}) +} diff --git a/apis/networkmanager/v1beta1/zz_attachmentaccepter_types.go b/apis/networkmanager/v1beta1/zz_attachmentaccepter_types.go index 8a4e1e2a04..9b14e2969c 100755 --- a/apis/networkmanager/v1beta1/zz_attachmentaccepter_types.go +++ b/apis/networkmanager/v1beta1/zz_attachmentaccepter_types.go @@ -16,7 +16,7 @@ import ( type AttachmentAccepterInitParameters struct { // The ID of the attachment. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.VPCAttachment + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.VPCAttachment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() AttachmentID *string `json:"attachmentId,omitempty" tf:"attachment_id,omitempty"` @@ -29,7 +29,7 @@ type AttachmentAccepterInitParameters struct { AttachmentIDSelector *v1.Selector `json:"attachmentIdSelector,omitempty" tf:"-"` // The type of attachment. Valid values can be found in the AWS Documentation - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.VPCAttachment + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.VPCAttachment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("attachment_type",true) AttachmentType *string `json:"attachmentType,omitempty" tf:"attachment_type,omitempty"` @@ -80,7 +80,7 @@ type AttachmentAccepterObservation struct { type AttachmentAccepterParameters struct { // The ID of the attachment. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.VPCAttachment + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.VPCAttachment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional AttachmentID *string `json:"attachmentId,omitempty" tf:"attachment_id,omitempty"` @@ -94,7 +94,7 @@ type AttachmentAccepterParameters struct { AttachmentIDSelector *v1.Selector `json:"attachmentIdSelector,omitempty" tf:"-"` // The type of attachment. Valid values can be found in the AWS Documentation - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.VPCAttachment + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.VPCAttachment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("attachment_type",true) // +kubebuilder:validation:Optional AttachmentType *string `json:"attachmentType,omitempty" tf:"attachment_type,omitempty"` diff --git a/apis/networkmanager/v1beta1/zz_connection_types.go b/apis/networkmanager/v1beta1/zz_connection_types.go index 51dc2ac936..98e7fce1d0 100755 --- a/apis/networkmanager/v1beta1/zz_connection_types.go +++ b/apis/networkmanager/v1beta1/zz_connection_types.go @@ -16,7 +16,7 @@ import ( type ConnectionInitParameters struct { // The ID of the second device in the connection. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.Device + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.Device // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() ConnectedDeviceID *string `json:"connectedDeviceId,omitempty" tf:"connected_device_id,omitempty"` @@ -35,7 +35,7 @@ type ConnectionInitParameters struct { Description *string `json:"description,omitempty" tf:"description,omitempty"` // The ID of the first device in the connection. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.Device + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.Device // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() DeviceID *string `json:"deviceId,omitempty" tf:"device_id,omitempty"` @@ -105,7 +105,7 @@ type ConnectionObservation struct { type ConnectionParameters struct { // The ID of the second device in the connection. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.Device + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.Device // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ConnectedDeviceID *string `json:"connectedDeviceId,omitempty" tf:"connected_device_id,omitempty"` @@ -127,7 +127,7 @@ type ConnectionParameters struct { Description *string `json:"description,omitempty" tf:"description,omitempty"` // The ID of the first device in the connection. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.Device + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.Device // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DeviceID *string `json:"deviceId,omitempty" tf:"device_id,omitempty"` diff --git a/apis/networkmanager/v1beta1/zz_customergatewayassociation_types.go b/apis/networkmanager/v1beta1/zz_customergatewayassociation_types.go index 8ce2320fd7..645235d5ba 100755 --- a/apis/networkmanager/v1beta1/zz_customergatewayassociation_types.go +++ b/apis/networkmanager/v1beta1/zz_customergatewayassociation_types.go @@ -16,7 +16,7 @@ import ( type CustomerGatewayAssociationInitParameters struct { // The ID of the device. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.Device + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.Device // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() DeviceID *string `json:"deviceId,omitempty" tf:"device_id,omitempty"` @@ -66,7 +66,7 @@ type CustomerGatewayAssociationParameters struct { CustomerGatewayArnSelector *v1.Selector `json:"customerGatewayArnSelector,omitempty" tf:"-"` // The ID of the device. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.Device + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.Device // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DeviceID *string `json:"deviceId,omitempty" tf:"device_id,omitempty"` diff --git a/apis/networkmanager/v1beta1/zz_generated.conversion_hubs.go b/apis/networkmanager/v1beta1/zz_generated.conversion_hubs.go index 76d0d61615..18ff48c8c7 100755 --- a/apis/networkmanager/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/networkmanager/v1beta1/zz_generated.conversion_hubs.go @@ -9,9 +9,6 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *AttachmentAccepter) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ConnectAttachment) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Connection) Hub() {} @@ -21,26 +18,14 @@ func (tr *CoreNetwork) Hub() {} // Hub marks this type as a conversion hub. func (tr *CustomerGatewayAssociation) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Device) Hub() {} - // Hub marks this type as a conversion hub. func (tr *GlobalNetwork) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Link) Hub() {} - // Hub marks this type as a conversion hub. func (tr *LinkAssociation) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Site) Hub() {} - // Hub marks this type as a conversion hub. func (tr *TransitGatewayConnectPeerAssociation) Hub() {} // Hub marks this type as a conversion hub. func (tr *TransitGatewayRegistration) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *VPCAttachment) Hub() {} diff --git a/apis/networkmanager/v1beta1/zz_generated.conversion_spokes.go b/apis/networkmanager/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..eaf8aafd86 --- /dev/null +++ b/apis/networkmanager/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,114 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ConnectAttachment to the hub type. +func (tr *ConnectAttachment) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ConnectAttachment type. +func (tr *ConnectAttachment) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Device to the hub type. +func (tr *Device) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Device type. +func (tr *Device) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Link to the hub type. +func (tr *Link) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Link type. +func (tr *Link) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Site to the hub type. +func (tr *Site) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Site type. +func (tr *Site) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VPCAttachment to the hub type. +func (tr *VPCAttachment) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VPCAttachment type. +func (tr *VPCAttachment) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/networkmanager/v1beta1/zz_generated.resolvers.go b/apis/networkmanager/v1beta1/zz_generated.resolvers.go index 05fc648179..f50519aeda 100644 --- a/apis/networkmanager/v1beta1/zz_generated.resolvers.go +++ b/apis/networkmanager/v1beta1/zz_generated.resolvers.go @@ -27,7 +27,7 @@ func (mg *AttachmentAccepter) ResolveReferences( // ResolveReferences of this At var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "VPCAttachment", "VPCAttachmentList") + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "VPCAttachment", "VPCAttachmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -46,7 +46,7 @@ func (mg *AttachmentAccepter) ResolveReferences( // ResolveReferences of this At mg.Spec.ForProvider.AttachmentID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.AttachmentIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "VPCAttachment", "VPCAttachmentList") + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "VPCAttachment", "VPCAttachmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -65,7 +65,7 @@ func (mg *AttachmentAccepter) ResolveReferences( // ResolveReferences of this At mg.Spec.ForProvider.AttachmentType = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.AttachmentTypeRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "VPCAttachment", "VPCAttachmentList") + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "VPCAttachment", "VPCAttachmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -84,7 +84,7 @@ func (mg *AttachmentAccepter) ResolveReferences( // ResolveReferences of this At mg.Spec.InitProvider.AttachmentID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.AttachmentIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "VPCAttachment", "VPCAttachmentList") + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "VPCAttachment", "VPCAttachmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -241,7 +241,7 @@ func (mg *Connection) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "Device", "DeviceList") + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "Device", "DeviceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -260,7 +260,7 @@ func (mg *Connection) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.ConnectedDeviceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ConnectedDeviceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "Device", "DeviceList") + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "Device", "DeviceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -298,7 +298,7 @@ func (mg *Connection) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.GlobalNetworkID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.GlobalNetworkIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "Device", "DeviceList") + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "Device", "DeviceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -317,7 +317,7 @@ func (mg *Connection) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.InitProvider.ConnectedDeviceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ConnectedDeviceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "Device", "DeviceList") + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "Device", "DeviceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -436,7 +436,7 @@ func (mg *CustomerGatewayAssociation) ResolveReferences(ctx context.Context, c c mg.Spec.ForProvider.CustomerGatewayArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.CustomerGatewayArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "Device", "DeviceList") + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "Device", "DeviceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -474,7 +474,7 @@ func (mg *CustomerGatewayAssociation) ResolveReferences(ctx context.Context, c c mg.Spec.ForProvider.GlobalNetworkID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.GlobalNetworkIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "Device", "DeviceList") + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "Device", "DeviceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -681,7 +681,7 @@ func (mg *LinkAssociation) ResolveReferences(ctx context.Context, c client.Reade var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "Device", "DeviceList") + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "Device", "DeviceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -719,7 +719,7 @@ func (mg *LinkAssociation) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.ForProvider.GlobalNetworkID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.GlobalNetworkIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "Link", "LinkList") + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "Link", "LinkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -800,7 +800,7 @@ func (mg *TransitGatewayConnectPeerAssociation) ResolveReferences(ctx context.Co var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "Device", "DeviceList") + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "Device", "DeviceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -857,7 +857,7 @@ func (mg *TransitGatewayConnectPeerAssociation) ResolveReferences(ctx context.Co mg.Spec.ForProvider.TransitGatewayConnectPeerArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TransitGatewayConnectPeerArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "Device", "DeviceList") + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "Device", "DeviceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/networkmanager/v1beta1/zz_linkassociation_types.go b/apis/networkmanager/v1beta1/zz_linkassociation_types.go index 8d379f9671..e4d0079a79 100755 --- a/apis/networkmanager/v1beta1/zz_linkassociation_types.go +++ b/apis/networkmanager/v1beta1/zz_linkassociation_types.go @@ -33,7 +33,7 @@ type LinkAssociationObservation struct { type LinkAssociationParameters struct { // The ID of the device. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.Device + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.Device // +kubebuilder:validation:Optional DeviceID *string `json:"deviceId,omitempty" tf:"device_id,omitempty"` @@ -60,7 +60,7 @@ type LinkAssociationParameters struct { GlobalNetworkIDSelector *v1.Selector `json:"globalNetworkIdSelector,omitempty" tf:"-"` // The ID of the link. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.Link + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.Link // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional LinkID *string `json:"linkId,omitempty" tf:"link_id,omitempty"` diff --git a/apis/networkmanager/v1beta1/zz_transitgatewayconnectpeerassociation_types.go b/apis/networkmanager/v1beta1/zz_transitgatewayconnectpeerassociation_types.go index 94cd34a18a..f1d1d51a83 100755 --- a/apis/networkmanager/v1beta1/zz_transitgatewayconnectpeerassociation_types.go +++ b/apis/networkmanager/v1beta1/zz_transitgatewayconnectpeerassociation_types.go @@ -16,7 +16,7 @@ import ( type TransitGatewayConnectPeerAssociationInitParameters struct { // The ID of the device. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.Device + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.Device // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() DeviceID *string `json:"deviceId,omitempty" tf:"device_id,omitempty"` @@ -52,7 +52,7 @@ type TransitGatewayConnectPeerAssociationObservation struct { type TransitGatewayConnectPeerAssociationParameters struct { // The ID of the device. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.Device + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.Device // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DeviceID *string `json:"deviceId,omitempty" tf:"device_id,omitempty"` diff --git a/apis/networkmanager/v1beta2/zz_connectattachment_terraformed.go b/apis/networkmanager/v1beta2/zz_connectattachment_terraformed.go new file mode 100755 index 0000000000..129dbfff5f --- /dev/null +++ b/apis/networkmanager/v1beta2/zz_connectattachment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ConnectAttachment +func (mg *ConnectAttachment) GetTerraformResourceType() string { + return "aws_networkmanager_connect_attachment" +} + +// GetConnectionDetailsMapping for this ConnectAttachment +func (tr *ConnectAttachment) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ConnectAttachment +func (tr *ConnectAttachment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ConnectAttachment +func (tr *ConnectAttachment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ConnectAttachment +func (tr *ConnectAttachment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ConnectAttachment +func (tr *ConnectAttachment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ConnectAttachment +func (tr *ConnectAttachment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ConnectAttachment +func (tr *ConnectAttachment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ConnectAttachment +func (tr *ConnectAttachment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ConnectAttachment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ConnectAttachment) LateInitialize(attrs []byte) (bool, error) { + params := &ConnectAttachmentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ConnectAttachment) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/networkmanager/v1beta2/zz_connectattachment_types.go b/apis/networkmanager/v1beta2/zz_connectattachment_types.go new file mode 100755 index 0000000000..a8c4c5e643 --- /dev/null +++ b/apis/networkmanager/v1beta2/zz_connectattachment_types.go @@ -0,0 +1,253 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConnectAttachmentInitParameters struct { + + // The ID of a core network where you want to create the attachment. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.CoreNetwork + CoreNetworkID *string `json:"coreNetworkId,omitempty" tf:"core_network_id,omitempty"` + + // Reference to a CoreNetwork in networkmanager to populate coreNetworkId. + // +kubebuilder:validation:Optional + CoreNetworkIDRef *v1.Reference `json:"coreNetworkIdRef,omitempty" tf:"-"` + + // Selector for a CoreNetwork in networkmanager to populate coreNetworkId. + // +kubebuilder:validation:Optional + CoreNetworkIDSelector *v1.Selector `json:"coreNetworkIdSelector,omitempty" tf:"-"` + + // The Region where the edge is located. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.VPCAttachment + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("edge_location",true) + EdgeLocation *string `json:"edgeLocation,omitempty" tf:"edge_location,omitempty"` + + // Reference to a VPCAttachment in networkmanager to populate edgeLocation. + // +kubebuilder:validation:Optional + EdgeLocationRef *v1.Reference `json:"edgeLocationRef,omitempty" tf:"-"` + + // Selector for a VPCAttachment in networkmanager to populate edgeLocation. + // +kubebuilder:validation:Optional + EdgeLocationSelector *v1.Selector `json:"edgeLocationSelector,omitempty" tf:"-"` + + // Options block. See options for more information. + Options *OptionsInitParameters `json:"options,omitempty" tf:"options,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ID of the attachment between the two connections. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.VPCAttachment + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + TransportAttachmentID *string `json:"transportAttachmentId,omitempty" tf:"transport_attachment_id,omitempty"` + + // Reference to a VPCAttachment in networkmanager to populate transportAttachmentId. + // +kubebuilder:validation:Optional + TransportAttachmentIDRef *v1.Reference `json:"transportAttachmentIdRef,omitempty" tf:"-"` + + // Selector for a VPCAttachment in networkmanager to populate transportAttachmentId. + // +kubebuilder:validation:Optional + TransportAttachmentIDSelector *v1.Selector `json:"transportAttachmentIdSelector,omitempty" tf:"-"` +} + +type ConnectAttachmentObservation struct { + + // The ARN of the attachment. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The ID of the attachment. + AttachmentID *string `json:"attachmentId,omitempty" tf:"attachment_id,omitempty"` + + // The policy rule number associated with the attachment. + AttachmentPolicyRuleNumber *float64 `json:"attachmentPolicyRuleNumber,omitempty" tf:"attachment_policy_rule_number,omitempty"` + + // The type of attachment. + AttachmentType *string `json:"attachmentType,omitempty" tf:"attachment_type,omitempty"` + + // The ARN of a core network. + CoreNetworkArn *string `json:"coreNetworkArn,omitempty" tf:"core_network_arn,omitempty"` + + // The ID of a core network where you want to create the attachment. + CoreNetworkID *string `json:"coreNetworkId,omitempty" tf:"core_network_id,omitempty"` + + // The Region where the edge is located. + EdgeLocation *string `json:"edgeLocation,omitempty" tf:"edge_location,omitempty"` + + // The ID of the attachment. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Options block. See options for more information. + Options *OptionsObservation `json:"options,omitempty" tf:"options,omitempty"` + + // The ID of the attachment account owner. + OwnerAccountID *string `json:"ownerAccountId,omitempty" tf:"owner_account_id,omitempty"` + + // The attachment resource ARN. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // The name of the segment attachment. + SegmentName *string `json:"segmentName,omitempty" tf:"segment_name,omitempty"` + + // The state of the attachment. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The ID of the attachment between the two connections. + TransportAttachmentID *string `json:"transportAttachmentId,omitempty" tf:"transport_attachment_id,omitempty"` +} + +type ConnectAttachmentParameters struct { + + // The ID of a core network where you want to create the attachment. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.CoreNetwork + // +kubebuilder:validation:Optional + CoreNetworkID *string `json:"coreNetworkId,omitempty" tf:"core_network_id,omitempty"` + + // Reference to a CoreNetwork in networkmanager to populate coreNetworkId. + // +kubebuilder:validation:Optional + CoreNetworkIDRef *v1.Reference `json:"coreNetworkIdRef,omitempty" tf:"-"` + + // Selector for a CoreNetwork in networkmanager to populate coreNetworkId. + // +kubebuilder:validation:Optional + CoreNetworkIDSelector *v1.Selector `json:"coreNetworkIdSelector,omitempty" tf:"-"` + + // The Region where the edge is located. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.VPCAttachment + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("edge_location",true) + // +kubebuilder:validation:Optional + EdgeLocation *string `json:"edgeLocation,omitempty" tf:"edge_location,omitempty"` + + // Reference to a VPCAttachment in networkmanager to populate edgeLocation. + // +kubebuilder:validation:Optional + EdgeLocationRef *v1.Reference `json:"edgeLocationRef,omitempty" tf:"-"` + + // Selector for a VPCAttachment in networkmanager to populate edgeLocation. + // +kubebuilder:validation:Optional + EdgeLocationSelector *v1.Selector `json:"edgeLocationSelector,omitempty" tf:"-"` + + // Options block. See options for more information. + // +kubebuilder:validation:Optional + Options *OptionsParameters `json:"options,omitempty" tf:"options,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ID of the attachment between the two connections. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.VPCAttachment + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + TransportAttachmentID *string `json:"transportAttachmentId,omitempty" tf:"transport_attachment_id,omitempty"` + + // Reference to a VPCAttachment in networkmanager to populate transportAttachmentId. + // +kubebuilder:validation:Optional + TransportAttachmentIDRef *v1.Reference `json:"transportAttachmentIdRef,omitempty" tf:"-"` + + // Selector for a VPCAttachment in networkmanager to populate transportAttachmentId. + // +kubebuilder:validation:Optional + TransportAttachmentIDSelector *v1.Selector `json:"transportAttachmentIdSelector,omitempty" tf:"-"` +} + +type OptionsInitParameters struct { + + // The protocol used for the attachment connection. Possible values are GRE and NO_ENCAP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type OptionsObservation struct { + + // The protocol used for the attachment connection. Possible values are GRE and NO_ENCAP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type OptionsParameters struct { + + // The protocol used for the attachment connection. Possible values are GRE and NO_ENCAP. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +// ConnectAttachmentSpec defines the desired state of ConnectAttachment +type ConnectAttachmentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ConnectAttachmentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ConnectAttachmentInitParameters `json:"initProvider,omitempty"` +} + +// ConnectAttachmentStatus defines the observed state of ConnectAttachment. +type ConnectAttachmentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ConnectAttachmentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ConnectAttachment is the Schema for the ConnectAttachments API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ConnectAttachment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.options) || (has(self.initProvider) && has(self.initProvider.options))",message="spec.forProvider.options is a required parameter" + Spec ConnectAttachmentSpec `json:"spec"` + Status ConnectAttachmentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConnectAttachmentList contains a list of ConnectAttachments +type ConnectAttachmentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ConnectAttachment `json:"items"` +} + +// Repository type metadata. +var ( + ConnectAttachment_Kind = "ConnectAttachment" + ConnectAttachment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ConnectAttachment_Kind}.String() + ConnectAttachment_KindAPIVersion = ConnectAttachment_Kind + "." + CRDGroupVersion.String() + ConnectAttachment_GroupVersionKind = CRDGroupVersion.WithKind(ConnectAttachment_Kind) +) + +func init() { + SchemeBuilder.Register(&ConnectAttachment{}, &ConnectAttachmentList{}) +} diff --git a/apis/networkmanager/v1beta2/zz_device_terraformed.go b/apis/networkmanager/v1beta2/zz_device_terraformed.go new file mode 100755 index 0000000000..ed0d9f407a --- /dev/null +++ b/apis/networkmanager/v1beta2/zz_device_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Device +func (mg *Device) GetTerraformResourceType() string { + return "aws_networkmanager_device" +} + +// GetConnectionDetailsMapping for this Device +func (tr *Device) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Device +func (tr *Device) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Device +func (tr *Device) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Device +func (tr *Device) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Device +func (tr *Device) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Device +func (tr *Device) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Device +func (tr *Device) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Device +func (tr *Device) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Device using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Device) LateInitialize(attrs []byte) (bool, error) { + params := &DeviceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Device) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/networkmanager/v1beta2/zz_device_types.go b/apis/networkmanager/v1beta2/zz_device_types.go new file mode 100755 index 0000000000..04233ffd13 --- /dev/null +++ b/apis/networkmanager/v1beta2/zz_device_types.go @@ -0,0 +1,308 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AwsLocationInitParameters struct { + + // The Amazon Resource Name (ARN) of the subnet that the device is located in. + SubnetArn *string `json:"subnetArn,omitempty" tf:"subnet_arn,omitempty"` + + // The Zone that the device is located in. Specify the ID of an Availability Zone, Local Zone, Wavelength Zone, or an Outpost. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type AwsLocationObservation struct { + + // The Amazon Resource Name (ARN) of the subnet that the device is located in. + SubnetArn *string `json:"subnetArn,omitempty" tf:"subnet_arn,omitempty"` + + // The Zone that the device is located in. Specify the ID of an Availability Zone, Local Zone, Wavelength Zone, or an Outpost. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type AwsLocationParameters struct { + + // The Amazon Resource Name (ARN) of the subnet that the device is located in. + // +kubebuilder:validation:Optional + SubnetArn *string `json:"subnetArn,omitempty" tf:"subnet_arn,omitempty"` + + // The Zone that the device is located in. Specify the ID of an Availability Zone, Local Zone, Wavelength Zone, or an Outpost. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type DeviceInitParameters struct { + + // The AWS location of the device. Documented below. + AwsLocation *AwsLocationInitParameters `json:"awsLocation,omitempty" tf:"aws_location,omitempty"` + + // A description of the device. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the global network. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.GlobalNetwork + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + GlobalNetworkID *string `json:"globalNetworkId,omitempty" tf:"global_network_id,omitempty"` + + // Reference to a GlobalNetwork in networkmanager to populate globalNetworkId. + // +kubebuilder:validation:Optional + GlobalNetworkIDRef *v1.Reference `json:"globalNetworkIdRef,omitempty" tf:"-"` + + // Selector for a GlobalNetwork in networkmanager to populate globalNetworkId. + // +kubebuilder:validation:Optional + GlobalNetworkIDSelector *v1.Selector `json:"globalNetworkIdSelector,omitempty" tf:"-"` + + // The location of the device. Documented below. + Location *LocationInitParameters `json:"location,omitempty" tf:"location,omitempty"` + + // The model of device. + Model *string `json:"model,omitempty" tf:"model,omitempty"` + + // The serial number of the device. + SerialNumber *string `json:"serialNumber,omitempty" tf:"serial_number,omitempty"` + + // The ID of the site. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.Site + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SiteID *string `json:"siteId,omitempty" tf:"site_id,omitempty"` + + // Reference to a Site in networkmanager to populate siteId. + // +kubebuilder:validation:Optional + SiteIDRef *v1.Reference `json:"siteIdRef,omitempty" tf:"-"` + + // Selector for a Site in networkmanager to populate siteId. + // +kubebuilder:validation:Optional + SiteIDSelector *v1.Selector `json:"siteIdSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The type of device. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The vendor of the device. + Vendor *string `json:"vendor,omitempty" tf:"vendor,omitempty"` +} + +type DeviceObservation struct { + + // The Amazon Resource Name (ARN) of the device. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The AWS location of the device. Documented below. + AwsLocation *AwsLocationObservation `json:"awsLocation,omitempty" tf:"aws_location,omitempty"` + + // A description of the device. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the global network. + GlobalNetworkID *string `json:"globalNetworkId,omitempty" tf:"global_network_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The location of the device. Documented below. + Location *LocationObservation `json:"location,omitempty" tf:"location,omitempty"` + + // The model of device. + Model *string `json:"model,omitempty" tf:"model,omitempty"` + + // The serial number of the device. + SerialNumber *string `json:"serialNumber,omitempty" tf:"serial_number,omitempty"` + + // The ID of the site. + SiteID *string `json:"siteId,omitempty" tf:"site_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The type of device. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The vendor of the device. + Vendor *string `json:"vendor,omitempty" tf:"vendor,omitempty"` +} + +type DeviceParameters struct { + + // The AWS location of the device. Documented below. + // +kubebuilder:validation:Optional + AwsLocation *AwsLocationParameters `json:"awsLocation,omitempty" tf:"aws_location,omitempty"` + + // A description of the device. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the global network. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.GlobalNetwork + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + GlobalNetworkID *string `json:"globalNetworkId,omitempty" tf:"global_network_id,omitempty"` + + // Reference to a GlobalNetwork in networkmanager to populate globalNetworkId. + // +kubebuilder:validation:Optional + GlobalNetworkIDRef *v1.Reference `json:"globalNetworkIdRef,omitempty" tf:"-"` + + // Selector for a GlobalNetwork in networkmanager to populate globalNetworkId. + // +kubebuilder:validation:Optional + GlobalNetworkIDSelector *v1.Selector `json:"globalNetworkIdSelector,omitempty" tf:"-"` + + // The location of the device. Documented below. + // +kubebuilder:validation:Optional + Location *LocationParameters `json:"location,omitempty" tf:"location,omitempty"` + + // The model of device. + // +kubebuilder:validation:Optional + Model *string `json:"model,omitempty" tf:"model,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The serial number of the device. + // +kubebuilder:validation:Optional + SerialNumber *string `json:"serialNumber,omitempty" tf:"serial_number,omitempty"` + + // The ID of the site. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.Site + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SiteID *string `json:"siteId,omitempty" tf:"site_id,omitempty"` + + // Reference to a Site in networkmanager to populate siteId. + // +kubebuilder:validation:Optional + SiteIDRef *v1.Reference `json:"siteIdRef,omitempty" tf:"-"` + + // Selector for a Site in networkmanager to populate siteId. + // +kubebuilder:validation:Optional + SiteIDSelector *v1.Selector `json:"siteIdSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The type of device. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The vendor of the device. + // +kubebuilder:validation:Optional + Vendor *string `json:"vendor,omitempty" tf:"vendor,omitempty"` +} + +type LocationInitParameters struct { + + // The physical address. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The latitude. + Latitude *string `json:"latitude,omitempty" tf:"latitude,omitempty"` + + // The longitude. + Longitude *string `json:"longitude,omitempty" tf:"longitude,omitempty"` +} + +type LocationObservation struct { + + // The physical address. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The latitude. + Latitude *string `json:"latitude,omitempty" tf:"latitude,omitempty"` + + // The longitude. + Longitude *string `json:"longitude,omitempty" tf:"longitude,omitempty"` +} + +type LocationParameters struct { + + // The physical address. + // +kubebuilder:validation:Optional + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The latitude. + // +kubebuilder:validation:Optional + Latitude *string `json:"latitude,omitempty" tf:"latitude,omitempty"` + + // The longitude. + // +kubebuilder:validation:Optional + Longitude *string `json:"longitude,omitempty" tf:"longitude,omitempty"` +} + +// DeviceSpec defines the desired state of Device +type DeviceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DeviceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DeviceInitParameters `json:"initProvider,omitempty"` +} + +// DeviceStatus defines the observed state of Device. +type DeviceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DeviceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Device is the Schema for the Devices API. Creates a device in a global network. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Device struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DeviceSpec `json:"spec"` + Status DeviceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DeviceList contains a list of Devices +type DeviceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Device `json:"items"` +} + +// Repository type metadata. +var ( + Device_Kind = "Device" + Device_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Device_Kind}.String() + Device_KindAPIVersion = Device_Kind + "." + CRDGroupVersion.String() + Device_GroupVersionKind = CRDGroupVersion.WithKind(Device_Kind) +) + +func init() { + SchemeBuilder.Register(&Device{}, &DeviceList{}) +} diff --git a/apis/networkmanager/v1beta2/zz_generated.conversion_hubs.go b/apis/networkmanager/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..8949fd5994 --- /dev/null +++ b/apis/networkmanager/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ConnectAttachment) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Device) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Link) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Site) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VPCAttachment) Hub() {} diff --git a/apis/networkmanager/v1beta2/zz_generated.deepcopy.go b/apis/networkmanager/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..f1de6e8732 --- /dev/null +++ b/apis/networkmanager/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2281 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsLocationInitParameters) DeepCopyInto(out *AwsLocationInitParameters) { + *out = *in + if in.SubnetArn != nil { + in, out := &in.SubnetArn, &out.SubnetArn + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsLocationInitParameters. +func (in *AwsLocationInitParameters) DeepCopy() *AwsLocationInitParameters { + if in == nil { + return nil + } + out := new(AwsLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsLocationObservation) DeepCopyInto(out *AwsLocationObservation) { + *out = *in + if in.SubnetArn != nil { + in, out := &in.SubnetArn, &out.SubnetArn + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsLocationObservation. +func (in *AwsLocationObservation) DeepCopy() *AwsLocationObservation { + if in == nil { + return nil + } + out := new(AwsLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsLocationParameters) DeepCopyInto(out *AwsLocationParameters) { + *out = *in + if in.SubnetArn != nil { + in, out := &in.SubnetArn, &out.SubnetArn + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsLocationParameters. +func (in *AwsLocationParameters) DeepCopy() *AwsLocationParameters { + if in == nil { + return nil + } + out := new(AwsLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BandwidthInitParameters) DeepCopyInto(out *BandwidthInitParameters) { + *out = *in + if in.DownloadSpeed != nil { + in, out := &in.DownloadSpeed, &out.DownloadSpeed + *out = new(float64) + **out = **in + } + if in.UploadSpeed != nil { + in, out := &in.UploadSpeed, &out.UploadSpeed + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BandwidthInitParameters. +func (in *BandwidthInitParameters) DeepCopy() *BandwidthInitParameters { + if in == nil { + return nil + } + out := new(BandwidthInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BandwidthObservation) DeepCopyInto(out *BandwidthObservation) { + *out = *in + if in.DownloadSpeed != nil { + in, out := &in.DownloadSpeed, &out.DownloadSpeed + *out = new(float64) + **out = **in + } + if in.UploadSpeed != nil { + in, out := &in.UploadSpeed, &out.UploadSpeed + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BandwidthObservation. +func (in *BandwidthObservation) DeepCopy() *BandwidthObservation { + if in == nil { + return nil + } + out := new(BandwidthObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BandwidthParameters) DeepCopyInto(out *BandwidthParameters) { + *out = *in + if in.DownloadSpeed != nil { + in, out := &in.DownloadSpeed, &out.DownloadSpeed + *out = new(float64) + **out = **in + } + if in.UploadSpeed != nil { + in, out := &in.UploadSpeed, &out.UploadSpeed + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BandwidthParameters. +func (in *BandwidthParameters) DeepCopy() *BandwidthParameters { + if in == nil { + return nil + } + out := new(BandwidthParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectAttachment) DeepCopyInto(out *ConnectAttachment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectAttachment. +func (in *ConnectAttachment) DeepCopy() *ConnectAttachment { + if in == nil { + return nil + } + out := new(ConnectAttachment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConnectAttachment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectAttachmentInitParameters) DeepCopyInto(out *ConnectAttachmentInitParameters) { + *out = *in + if in.CoreNetworkID != nil { + in, out := &in.CoreNetworkID, &out.CoreNetworkID + *out = new(string) + **out = **in + } + if in.CoreNetworkIDRef != nil { + in, out := &in.CoreNetworkIDRef, &out.CoreNetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CoreNetworkIDSelector != nil { + in, out := &in.CoreNetworkIDSelector, &out.CoreNetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EdgeLocation != nil { + in, out := &in.EdgeLocation, &out.EdgeLocation + *out = new(string) + **out = **in + } + if in.EdgeLocationRef != nil { + in, out := &in.EdgeLocationRef, &out.EdgeLocationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EdgeLocationSelector != nil { + in, out := &in.EdgeLocationSelector, &out.EdgeLocationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = new(OptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TransportAttachmentID != nil { + in, out := &in.TransportAttachmentID, &out.TransportAttachmentID + *out = new(string) + **out = **in + } + if in.TransportAttachmentIDRef != nil { + in, out := &in.TransportAttachmentIDRef, &out.TransportAttachmentIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TransportAttachmentIDSelector != nil { + in, out := &in.TransportAttachmentIDSelector, &out.TransportAttachmentIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectAttachmentInitParameters. +func (in *ConnectAttachmentInitParameters) DeepCopy() *ConnectAttachmentInitParameters { + if in == nil { + return nil + } + out := new(ConnectAttachmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectAttachmentList) DeepCopyInto(out *ConnectAttachmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConnectAttachment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectAttachmentList. +func (in *ConnectAttachmentList) DeepCopy() *ConnectAttachmentList { + if in == nil { + return nil + } + out := new(ConnectAttachmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConnectAttachmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectAttachmentObservation) DeepCopyInto(out *ConnectAttachmentObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AttachmentID != nil { + in, out := &in.AttachmentID, &out.AttachmentID + *out = new(string) + **out = **in + } + if in.AttachmentPolicyRuleNumber != nil { + in, out := &in.AttachmentPolicyRuleNumber, &out.AttachmentPolicyRuleNumber + *out = new(float64) + **out = **in + } + if in.AttachmentType != nil { + in, out := &in.AttachmentType, &out.AttachmentType + *out = new(string) + **out = **in + } + if in.CoreNetworkArn != nil { + in, out := &in.CoreNetworkArn, &out.CoreNetworkArn + *out = new(string) + **out = **in + } + if in.CoreNetworkID != nil { + in, out := &in.CoreNetworkID, &out.CoreNetworkID + *out = new(string) + **out = **in + } + if in.EdgeLocation != nil { + in, out := &in.EdgeLocation, &out.EdgeLocation + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = new(OptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.OwnerAccountID != nil { + in, out := &in.OwnerAccountID, &out.OwnerAccountID + *out = new(string) + **out = **in + } + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.SegmentName != nil { + in, out := &in.SegmentName, &out.SegmentName + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TransportAttachmentID != nil { + in, out := &in.TransportAttachmentID, &out.TransportAttachmentID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectAttachmentObservation. +func (in *ConnectAttachmentObservation) DeepCopy() *ConnectAttachmentObservation { + if in == nil { + return nil + } + out := new(ConnectAttachmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectAttachmentParameters) DeepCopyInto(out *ConnectAttachmentParameters) { + *out = *in + if in.CoreNetworkID != nil { + in, out := &in.CoreNetworkID, &out.CoreNetworkID + *out = new(string) + **out = **in + } + if in.CoreNetworkIDRef != nil { + in, out := &in.CoreNetworkIDRef, &out.CoreNetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CoreNetworkIDSelector != nil { + in, out := &in.CoreNetworkIDSelector, &out.CoreNetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EdgeLocation != nil { + in, out := &in.EdgeLocation, &out.EdgeLocation + *out = new(string) + **out = **in + } + if in.EdgeLocationRef != nil { + in, out := &in.EdgeLocationRef, &out.EdgeLocationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EdgeLocationSelector != nil { + in, out := &in.EdgeLocationSelector, &out.EdgeLocationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = new(OptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TransportAttachmentID != nil { + in, out := &in.TransportAttachmentID, &out.TransportAttachmentID + *out = new(string) + **out = **in + } + if in.TransportAttachmentIDRef != nil { + in, out := &in.TransportAttachmentIDRef, &out.TransportAttachmentIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TransportAttachmentIDSelector != nil { + in, out := &in.TransportAttachmentIDSelector, &out.TransportAttachmentIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectAttachmentParameters. +func (in *ConnectAttachmentParameters) DeepCopy() *ConnectAttachmentParameters { + if in == nil { + return nil + } + out := new(ConnectAttachmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectAttachmentSpec) DeepCopyInto(out *ConnectAttachmentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectAttachmentSpec. +func (in *ConnectAttachmentSpec) DeepCopy() *ConnectAttachmentSpec { + if in == nil { + return nil + } + out := new(ConnectAttachmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectAttachmentStatus) DeepCopyInto(out *ConnectAttachmentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectAttachmentStatus. +func (in *ConnectAttachmentStatus) DeepCopy() *ConnectAttachmentStatus { + if in == nil { + return nil + } + out := new(ConnectAttachmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Device) DeepCopyInto(out *Device) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device. +func (in *Device) DeepCopy() *Device { + if in == nil { + return nil + } + out := new(Device) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Device) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceInitParameters) DeepCopyInto(out *DeviceInitParameters) { + *out = *in + if in.AwsLocation != nil { + in, out := &in.AwsLocation, &out.AwsLocation + *out = new(AwsLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.GlobalNetworkID != nil { + in, out := &in.GlobalNetworkID, &out.GlobalNetworkID + *out = new(string) + **out = **in + } + if in.GlobalNetworkIDRef != nil { + in, out := &in.GlobalNetworkIDRef, &out.GlobalNetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GlobalNetworkIDSelector != nil { + in, out := &in.GlobalNetworkIDSelector, &out.GlobalNetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(LocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Model != nil { + in, out := &in.Model, &out.Model + *out = new(string) + **out = **in + } + if in.SerialNumber != nil { + in, out := &in.SerialNumber, &out.SerialNumber + *out = new(string) + **out = **in + } + if in.SiteID != nil { + in, out := &in.SiteID, &out.SiteID + *out = new(string) + **out = **in + } + if in.SiteIDRef != nil { + in, out := &in.SiteIDRef, &out.SiteIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SiteIDSelector != nil { + in, out := &in.SiteIDSelector, &out.SiteIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Vendor != nil { + in, out := &in.Vendor, &out.Vendor + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceInitParameters. +func (in *DeviceInitParameters) DeepCopy() *DeviceInitParameters { + if in == nil { + return nil + } + out := new(DeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceList) DeepCopyInto(out *DeviceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Device, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceList. +func (in *DeviceList) DeepCopy() *DeviceList { + if in == nil { + return nil + } + out := new(DeviceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeviceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceObservation) DeepCopyInto(out *DeviceObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AwsLocation != nil { + in, out := &in.AwsLocation, &out.AwsLocation + *out = new(AwsLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.GlobalNetworkID != nil { + in, out := &in.GlobalNetworkID, &out.GlobalNetworkID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(LocationObservation) + (*in).DeepCopyInto(*out) + } + if in.Model != nil { + in, out := &in.Model, &out.Model + *out = new(string) + **out = **in + } + if in.SerialNumber != nil { + in, out := &in.SerialNumber, &out.SerialNumber + *out = new(string) + **out = **in + } + if in.SiteID != nil { + in, out := &in.SiteID, &out.SiteID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Vendor != nil { + in, out := &in.Vendor, &out.Vendor + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceObservation. +func (in *DeviceObservation) DeepCopy() *DeviceObservation { + if in == nil { + return nil + } + out := new(DeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceParameters) DeepCopyInto(out *DeviceParameters) { + *out = *in + if in.AwsLocation != nil { + in, out := &in.AwsLocation, &out.AwsLocation + *out = new(AwsLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.GlobalNetworkID != nil { + in, out := &in.GlobalNetworkID, &out.GlobalNetworkID + *out = new(string) + **out = **in + } + if in.GlobalNetworkIDRef != nil { + in, out := &in.GlobalNetworkIDRef, &out.GlobalNetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GlobalNetworkIDSelector != nil { + in, out := &in.GlobalNetworkIDSelector, &out.GlobalNetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(LocationParameters) + (*in).DeepCopyInto(*out) + } + if in.Model != nil { + in, out := &in.Model, &out.Model + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SerialNumber != nil { + in, out := &in.SerialNumber, &out.SerialNumber + *out = new(string) + **out = **in + } + if in.SiteID != nil { + in, out := &in.SiteID, &out.SiteID + *out = new(string) + **out = **in + } + if in.SiteIDRef != nil { + in, out := &in.SiteIDRef, &out.SiteIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SiteIDSelector != nil { + in, out := &in.SiteIDSelector, &out.SiteIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Vendor != nil { + in, out := &in.Vendor, &out.Vendor + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceParameters. +func (in *DeviceParameters) DeepCopy() *DeviceParameters { + if in == nil { + return nil + } + out := new(DeviceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceSpec) DeepCopyInto(out *DeviceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceSpec. +func (in *DeviceSpec) DeepCopy() *DeviceSpec { + if in == nil { + return nil + } + out := new(DeviceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceStatus) DeepCopyInto(out *DeviceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceStatus. +func (in *DeviceStatus) DeepCopy() *DeviceStatus { + if in == nil { + return nil + } + out := new(DeviceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Link) DeepCopyInto(out *Link) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Link. +func (in *Link) DeepCopy() *Link { + if in == nil { + return nil + } + out := new(Link) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Link) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkInitParameters) DeepCopyInto(out *LinkInitParameters) { + *out = *in + if in.Bandwidth != nil { + in, out := &in.Bandwidth, &out.Bandwidth + *out = new(BandwidthInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.GlobalNetworkID != nil { + in, out := &in.GlobalNetworkID, &out.GlobalNetworkID + *out = new(string) + **out = **in + } + if in.GlobalNetworkIDRef != nil { + in, out := &in.GlobalNetworkIDRef, &out.GlobalNetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GlobalNetworkIDSelector != nil { + in, out := &in.GlobalNetworkIDSelector, &out.GlobalNetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ProviderName != nil { + in, out := &in.ProviderName, &out.ProviderName + *out = new(string) + **out = **in + } + if in.SiteID != nil { + in, out := &in.SiteID, &out.SiteID + *out = new(string) + **out = **in + } + if in.SiteIDRef != nil { + in, out := &in.SiteIDRef, &out.SiteIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SiteIDSelector != nil { + in, out := &in.SiteIDSelector, &out.SiteIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkInitParameters. +func (in *LinkInitParameters) DeepCopy() *LinkInitParameters { + if in == nil { + return nil + } + out := new(LinkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkList) DeepCopyInto(out *LinkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Link, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkList. +func (in *LinkList) DeepCopy() *LinkList { + if in == nil { + return nil + } + out := new(LinkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkObservation) DeepCopyInto(out *LinkObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Bandwidth != nil { + in, out := &in.Bandwidth, &out.Bandwidth + *out = new(BandwidthObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.GlobalNetworkID != nil { + in, out := &in.GlobalNetworkID, &out.GlobalNetworkID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ProviderName != nil { + in, out := &in.ProviderName, &out.ProviderName + *out = new(string) + **out = **in + } + if in.SiteID != nil { + in, out := &in.SiteID, &out.SiteID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkObservation. +func (in *LinkObservation) DeepCopy() *LinkObservation { + if in == nil { + return nil + } + out := new(LinkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkParameters) DeepCopyInto(out *LinkParameters) { + *out = *in + if in.Bandwidth != nil { + in, out := &in.Bandwidth, &out.Bandwidth + *out = new(BandwidthParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.GlobalNetworkID != nil { + in, out := &in.GlobalNetworkID, &out.GlobalNetworkID + *out = new(string) + **out = **in + } + if in.GlobalNetworkIDRef != nil { + in, out := &in.GlobalNetworkIDRef, &out.GlobalNetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GlobalNetworkIDSelector != nil { + in, out := &in.GlobalNetworkIDSelector, &out.GlobalNetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ProviderName != nil { + in, out := &in.ProviderName, &out.ProviderName + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SiteID != nil { + in, out := &in.SiteID, &out.SiteID + *out = new(string) + **out = **in + } + if in.SiteIDRef != nil { + in, out := &in.SiteIDRef, &out.SiteIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SiteIDSelector != nil { + in, out := &in.SiteIDSelector, &out.SiteIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkParameters. +func (in *LinkParameters) DeepCopy() *LinkParameters { + if in == nil { + return nil + } + out := new(LinkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkSpec) DeepCopyInto(out *LinkSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkSpec. +func (in *LinkSpec) DeepCopy() *LinkSpec { + if in == nil { + return nil + } + out := new(LinkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkStatus) DeepCopyInto(out *LinkStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkStatus. +func (in *LinkStatus) DeepCopy() *LinkStatus { + if in == nil { + return nil + } + out := new(LinkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationInitParameters) DeepCopyInto(out *LocationInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Latitude != nil { + in, out := &in.Latitude, &out.Latitude + *out = new(string) + **out = **in + } + if in.Longitude != nil { + in, out := &in.Longitude, &out.Longitude + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationInitParameters. +func (in *LocationInitParameters) DeepCopy() *LocationInitParameters { + if in == nil { + return nil + } + out := new(LocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationObservation) DeepCopyInto(out *LocationObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Latitude != nil { + in, out := &in.Latitude, &out.Latitude + *out = new(string) + **out = **in + } + if in.Longitude != nil { + in, out := &in.Longitude, &out.Longitude + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationObservation. +func (in *LocationObservation) DeepCopy() *LocationObservation { + if in == nil { + return nil + } + out := new(LocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationParameters) DeepCopyInto(out *LocationParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Latitude != nil { + in, out := &in.Latitude, &out.Latitude + *out = new(string) + **out = **in + } + if in.Longitude != nil { + in, out := &in.Longitude, &out.Longitude + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationParameters. +func (in *LocationParameters) DeepCopy() *LocationParameters { + if in == nil { + return nil + } + out := new(LocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionsInitParameters) DeepCopyInto(out *OptionsInitParameters) { + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionsInitParameters. +func (in *OptionsInitParameters) DeepCopy() *OptionsInitParameters { + if in == nil { + return nil + } + out := new(OptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionsObservation) DeepCopyInto(out *OptionsObservation) { + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionsObservation. +func (in *OptionsObservation) DeepCopy() *OptionsObservation { + if in == nil { + return nil + } + out := new(OptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionsParameters) DeepCopyInto(out *OptionsParameters) { + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionsParameters. +func (in *OptionsParameters) DeepCopy() *OptionsParameters { + if in == nil { + return nil + } + out := new(OptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Site) DeepCopyInto(out *Site) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Site. +func (in *Site) DeepCopy() *Site { + if in == nil { + return nil + } + out := new(Site) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Site) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteInitParameters) DeepCopyInto(out *SiteInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.GlobalNetworkID != nil { + in, out := &in.GlobalNetworkID, &out.GlobalNetworkID + *out = new(string) + **out = **in + } + if in.GlobalNetworkIDRef != nil { + in, out := &in.GlobalNetworkIDRef, &out.GlobalNetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GlobalNetworkIDSelector != nil { + in, out := &in.GlobalNetworkIDSelector, &out.GlobalNetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(SiteLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteInitParameters. +func (in *SiteInitParameters) DeepCopy() *SiteInitParameters { + if in == nil { + return nil + } + out := new(SiteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteList) DeepCopyInto(out *SiteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Site, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteList. +func (in *SiteList) DeepCopy() *SiteList { + if in == nil { + return nil + } + out := new(SiteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SiteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteLocationInitParameters) DeepCopyInto(out *SiteLocationInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Latitude != nil { + in, out := &in.Latitude, &out.Latitude + *out = new(string) + **out = **in + } + if in.Longitude != nil { + in, out := &in.Longitude, &out.Longitude + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteLocationInitParameters. +func (in *SiteLocationInitParameters) DeepCopy() *SiteLocationInitParameters { + if in == nil { + return nil + } + out := new(SiteLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteLocationObservation) DeepCopyInto(out *SiteLocationObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Latitude != nil { + in, out := &in.Latitude, &out.Latitude + *out = new(string) + **out = **in + } + if in.Longitude != nil { + in, out := &in.Longitude, &out.Longitude + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteLocationObservation. +func (in *SiteLocationObservation) DeepCopy() *SiteLocationObservation { + if in == nil { + return nil + } + out := new(SiteLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteLocationParameters) DeepCopyInto(out *SiteLocationParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Latitude != nil { + in, out := &in.Latitude, &out.Latitude + *out = new(string) + **out = **in + } + if in.Longitude != nil { + in, out := &in.Longitude, &out.Longitude + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteLocationParameters. +func (in *SiteLocationParameters) DeepCopy() *SiteLocationParameters { + if in == nil { + return nil + } + out := new(SiteLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteObservation) DeepCopyInto(out *SiteObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.GlobalNetworkID != nil { + in, out := &in.GlobalNetworkID, &out.GlobalNetworkID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(SiteLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteObservation. +func (in *SiteObservation) DeepCopy() *SiteObservation { + if in == nil { + return nil + } + out := new(SiteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteParameters) DeepCopyInto(out *SiteParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.GlobalNetworkID != nil { + in, out := &in.GlobalNetworkID, &out.GlobalNetworkID + *out = new(string) + **out = **in + } + if in.GlobalNetworkIDRef != nil { + in, out := &in.GlobalNetworkIDRef, &out.GlobalNetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GlobalNetworkIDSelector != nil { + in, out := &in.GlobalNetworkIDSelector, &out.GlobalNetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(SiteLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteParameters. +func (in *SiteParameters) DeepCopy() *SiteParameters { + if in == nil { + return nil + } + out := new(SiteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteSpec) DeepCopyInto(out *SiteSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteSpec. +func (in *SiteSpec) DeepCopy() *SiteSpec { + if in == nil { + return nil + } + out := new(SiteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteStatus) DeepCopyInto(out *SiteStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteStatus. +func (in *SiteStatus) DeepCopy() *SiteStatus { + if in == nil { + return nil + } + out := new(SiteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCAttachment) DeepCopyInto(out *VPCAttachment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCAttachment. +func (in *VPCAttachment) DeepCopy() *VPCAttachment { + if in == nil { + return nil + } + out := new(VPCAttachment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPCAttachment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCAttachmentInitParameters) DeepCopyInto(out *VPCAttachmentInitParameters) { + *out = *in + if in.CoreNetworkID != nil { + in, out := &in.CoreNetworkID, &out.CoreNetworkID + *out = new(string) + **out = **in + } + if in.CoreNetworkIDRef != nil { + in, out := &in.CoreNetworkIDRef, &out.CoreNetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CoreNetworkIDSelector != nil { + in, out := &in.CoreNetworkIDSelector, &out.CoreNetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = new(VPCAttachmentOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SubnetArns != nil { + in, out := &in.SubnetArns, &out.SubnetArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetArnsRefs != nil { + in, out := &in.SubnetArnsRefs, &out.SubnetArnsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetArnsSelector != nil { + in, out := &in.SubnetArnsSelector, &out.SubnetArnsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCArn != nil { + in, out := &in.VPCArn, &out.VPCArn + *out = new(string) + **out = **in + } + if in.VPCArnRef != nil { + in, out := &in.VPCArnRef, &out.VPCArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCArnSelector != nil { + in, out := &in.VPCArnSelector, &out.VPCArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCAttachmentInitParameters. +func (in *VPCAttachmentInitParameters) DeepCopy() *VPCAttachmentInitParameters { + if in == nil { + return nil + } + out := new(VPCAttachmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCAttachmentList) DeepCopyInto(out *VPCAttachmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VPCAttachment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCAttachmentList. +func (in *VPCAttachmentList) DeepCopy() *VPCAttachmentList { + if in == nil { + return nil + } + out := new(VPCAttachmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPCAttachmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCAttachmentObservation) DeepCopyInto(out *VPCAttachmentObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AttachmentPolicyRuleNumber != nil { + in, out := &in.AttachmentPolicyRuleNumber, &out.AttachmentPolicyRuleNumber + *out = new(float64) + **out = **in + } + if in.AttachmentType != nil { + in, out := &in.AttachmentType, &out.AttachmentType + *out = new(string) + **out = **in + } + if in.CoreNetworkArn != nil { + in, out := &in.CoreNetworkArn, &out.CoreNetworkArn + *out = new(string) + **out = **in + } + if in.CoreNetworkID != nil { + in, out := &in.CoreNetworkID, &out.CoreNetworkID + *out = new(string) + **out = **in + } + if in.EdgeLocation != nil { + in, out := &in.EdgeLocation, &out.EdgeLocation + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = new(VPCAttachmentOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.OwnerAccountID != nil { + in, out := &in.OwnerAccountID, &out.OwnerAccountID + *out = new(string) + **out = **in + } + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.SegmentName != nil { + in, out := &in.SegmentName, &out.SegmentName + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.SubnetArns != nil { + in, out := &in.SubnetArns, &out.SubnetArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCArn != nil { + in, out := &in.VPCArn, &out.VPCArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCAttachmentObservation. +func (in *VPCAttachmentObservation) DeepCopy() *VPCAttachmentObservation { + if in == nil { + return nil + } + out := new(VPCAttachmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCAttachmentOptionsInitParameters) DeepCopyInto(out *VPCAttachmentOptionsInitParameters) { + *out = *in + if in.ApplianceModeSupport != nil { + in, out := &in.ApplianceModeSupport, &out.ApplianceModeSupport + *out = new(bool) + **out = **in + } + if in.IPv6Support != nil { + in, out := &in.IPv6Support, &out.IPv6Support + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCAttachmentOptionsInitParameters. +func (in *VPCAttachmentOptionsInitParameters) DeepCopy() *VPCAttachmentOptionsInitParameters { + if in == nil { + return nil + } + out := new(VPCAttachmentOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCAttachmentOptionsObservation) DeepCopyInto(out *VPCAttachmentOptionsObservation) { + *out = *in + if in.ApplianceModeSupport != nil { + in, out := &in.ApplianceModeSupport, &out.ApplianceModeSupport + *out = new(bool) + **out = **in + } + if in.IPv6Support != nil { + in, out := &in.IPv6Support, &out.IPv6Support + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCAttachmentOptionsObservation. +func (in *VPCAttachmentOptionsObservation) DeepCopy() *VPCAttachmentOptionsObservation { + if in == nil { + return nil + } + out := new(VPCAttachmentOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCAttachmentOptionsParameters) DeepCopyInto(out *VPCAttachmentOptionsParameters) { + *out = *in + if in.ApplianceModeSupport != nil { + in, out := &in.ApplianceModeSupport, &out.ApplianceModeSupport + *out = new(bool) + **out = **in + } + if in.IPv6Support != nil { + in, out := &in.IPv6Support, &out.IPv6Support + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCAttachmentOptionsParameters. +func (in *VPCAttachmentOptionsParameters) DeepCopy() *VPCAttachmentOptionsParameters { + if in == nil { + return nil + } + out := new(VPCAttachmentOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCAttachmentParameters) DeepCopyInto(out *VPCAttachmentParameters) { + *out = *in + if in.CoreNetworkID != nil { + in, out := &in.CoreNetworkID, &out.CoreNetworkID + *out = new(string) + **out = **in + } + if in.CoreNetworkIDRef != nil { + in, out := &in.CoreNetworkIDRef, &out.CoreNetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CoreNetworkIDSelector != nil { + in, out := &in.CoreNetworkIDSelector, &out.CoreNetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = new(VPCAttachmentOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SubnetArns != nil { + in, out := &in.SubnetArns, &out.SubnetArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetArnsRefs != nil { + in, out := &in.SubnetArnsRefs, &out.SubnetArnsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetArnsSelector != nil { + in, out := &in.SubnetArnsSelector, &out.SubnetArnsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCArn != nil { + in, out := &in.VPCArn, &out.VPCArn + *out = new(string) + **out = **in + } + if in.VPCArnRef != nil { + in, out := &in.VPCArnRef, &out.VPCArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCArnSelector != nil { + in, out := &in.VPCArnSelector, &out.VPCArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCAttachmentParameters. +func (in *VPCAttachmentParameters) DeepCopy() *VPCAttachmentParameters { + if in == nil { + return nil + } + out := new(VPCAttachmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCAttachmentSpec) DeepCopyInto(out *VPCAttachmentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCAttachmentSpec. +func (in *VPCAttachmentSpec) DeepCopy() *VPCAttachmentSpec { + if in == nil { + return nil + } + out := new(VPCAttachmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCAttachmentStatus) DeepCopyInto(out *VPCAttachmentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCAttachmentStatus. +func (in *VPCAttachmentStatus) DeepCopy() *VPCAttachmentStatus { + if in == nil { + return nil + } + out := new(VPCAttachmentStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/networkmanager/v1beta2/zz_generated.managed.go b/apis/networkmanager/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..f6d0644151 --- /dev/null +++ b/apis/networkmanager/v1beta2/zz_generated.managed.go @@ -0,0 +1,308 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ConnectAttachment. +func (mg *ConnectAttachment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ConnectAttachment. +func (mg *ConnectAttachment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ConnectAttachment. +func (mg *ConnectAttachment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ConnectAttachment. +func (mg *ConnectAttachment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ConnectAttachment. +func (mg *ConnectAttachment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ConnectAttachment. +func (mg *ConnectAttachment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ConnectAttachment. +func (mg *ConnectAttachment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ConnectAttachment. +func (mg *ConnectAttachment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ConnectAttachment. +func (mg *ConnectAttachment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ConnectAttachment. +func (mg *ConnectAttachment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ConnectAttachment. +func (mg *ConnectAttachment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ConnectAttachment. +func (mg *ConnectAttachment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Device. +func (mg *Device) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Device. +func (mg *Device) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Device. +func (mg *Device) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Device. +func (mg *Device) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Device. +func (mg *Device) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Device. +func (mg *Device) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Device. +func (mg *Device) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Device. +func (mg *Device) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Device. +func (mg *Device) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Device. +func (mg *Device) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Device. +func (mg *Device) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Device. +func (mg *Device) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Link. +func (mg *Link) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Link. +func (mg *Link) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Link. +func (mg *Link) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Link. +func (mg *Link) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Link. +func (mg *Link) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Link. +func (mg *Link) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Link. +func (mg *Link) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Link. +func (mg *Link) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Link. +func (mg *Link) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Link. +func (mg *Link) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Link. +func (mg *Link) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Link. +func (mg *Link) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Site. +func (mg *Site) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Site. +func (mg *Site) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Site. +func (mg *Site) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Site. +func (mg *Site) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Site. +func (mg *Site) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Site. +func (mg *Site) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Site. +func (mg *Site) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Site. +func (mg *Site) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Site. +func (mg *Site) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Site. +func (mg *Site) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Site. +func (mg *Site) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Site. +func (mg *Site) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VPCAttachment. +func (mg *VPCAttachment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VPCAttachment. +func (mg *VPCAttachment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VPCAttachment. +func (mg *VPCAttachment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VPCAttachment. +func (mg *VPCAttachment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VPCAttachment. +func (mg *VPCAttachment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VPCAttachment. +func (mg *VPCAttachment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VPCAttachment. +func (mg *VPCAttachment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VPCAttachment. +func (mg *VPCAttachment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VPCAttachment. +func (mg *VPCAttachment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VPCAttachment. +func (mg *VPCAttachment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VPCAttachment. +func (mg *VPCAttachment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VPCAttachment. +func (mg *VPCAttachment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/networkmanager/v1beta2/zz_generated.managedlist.go b/apis/networkmanager/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..c8d55d1b0b --- /dev/null +++ b/apis/networkmanager/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,53 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ConnectAttachmentList. +func (l *ConnectAttachmentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DeviceList. +func (l *DeviceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinkList. +func (l *LinkList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SiteList. +func (l *SiteList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VPCAttachmentList. +func (l *VPCAttachmentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/networkmanager/v1beta2/zz_generated.resolvers.go b/apis/networkmanager/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..cbdbd32876 --- /dev/null +++ b/apis/networkmanager/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,498 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *ConnectAttachment) ResolveReferences( // ResolveReferences of this ConnectAttachment. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "CoreNetwork", "CoreNetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CoreNetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.CoreNetworkIDRef, + Selector: mg.Spec.ForProvider.CoreNetworkIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CoreNetworkID") + } + mg.Spec.ForProvider.CoreNetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CoreNetworkIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "VPCAttachment", "VPCAttachmentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EdgeLocation), + Extract: resource.ExtractParamPath("edge_location", true), + Reference: mg.Spec.ForProvider.EdgeLocationRef, + Selector: mg.Spec.ForProvider.EdgeLocationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EdgeLocation") + } + mg.Spec.ForProvider.EdgeLocation = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EdgeLocationRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "VPCAttachment", "VPCAttachmentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TransportAttachmentID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.TransportAttachmentIDRef, + Selector: mg.Spec.ForProvider.TransportAttachmentIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TransportAttachmentID") + } + mg.Spec.ForProvider.TransportAttachmentID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TransportAttachmentIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "CoreNetwork", "CoreNetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CoreNetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.CoreNetworkIDRef, + Selector: mg.Spec.InitProvider.CoreNetworkIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CoreNetworkID") + } + mg.Spec.InitProvider.CoreNetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CoreNetworkIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "VPCAttachment", "VPCAttachmentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EdgeLocation), + Extract: resource.ExtractParamPath("edge_location", true), + Reference: mg.Spec.InitProvider.EdgeLocationRef, + Selector: mg.Spec.InitProvider.EdgeLocationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EdgeLocation") + } + mg.Spec.InitProvider.EdgeLocation = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EdgeLocationRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "VPCAttachment", "VPCAttachmentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TransportAttachmentID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.TransportAttachmentIDRef, + Selector: mg.Spec.InitProvider.TransportAttachmentIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TransportAttachmentID") + } + mg.Spec.InitProvider.TransportAttachmentID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TransportAttachmentIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Device. +func (mg *Device) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "GlobalNetwork", "GlobalNetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.GlobalNetworkID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.GlobalNetworkIDRef, + Selector: mg.Spec.ForProvider.GlobalNetworkIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.GlobalNetworkID") + } + mg.Spec.ForProvider.GlobalNetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.GlobalNetworkIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "Site", "SiteList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteIDRef, + Selector: mg.Spec.ForProvider.SiteIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteID") + } + mg.Spec.ForProvider.SiteID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "GlobalNetwork", "GlobalNetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.GlobalNetworkID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.GlobalNetworkIDRef, + Selector: mg.Spec.InitProvider.GlobalNetworkIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.GlobalNetworkID") + } + mg.Spec.InitProvider.GlobalNetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.GlobalNetworkIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "Site", "SiteList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteIDRef, + Selector: mg.Spec.InitProvider.SiteIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteID") + } + mg.Spec.InitProvider.SiteID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Link. +func (mg *Link) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "GlobalNetwork", "GlobalNetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.GlobalNetworkID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.GlobalNetworkIDRef, + Selector: mg.Spec.ForProvider.GlobalNetworkIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.GlobalNetworkID") + } + mg.Spec.ForProvider.GlobalNetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.GlobalNetworkIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "Site", "SiteList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.SiteIDRef, + Selector: mg.Spec.ForProvider.SiteIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteID") + } + mg.Spec.ForProvider.SiteID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "GlobalNetwork", "GlobalNetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.GlobalNetworkID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.GlobalNetworkIDRef, + Selector: mg.Spec.InitProvider.GlobalNetworkIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.GlobalNetworkID") + } + mg.Spec.InitProvider.GlobalNetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.GlobalNetworkIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta2", "Site", "SiteList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.SiteIDRef, + Selector: mg.Spec.InitProvider.SiteIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteID") + } + mg.Spec.InitProvider.SiteID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Site. +func (mg *Site) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "GlobalNetwork", "GlobalNetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.GlobalNetworkID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.GlobalNetworkIDRef, + Selector: mg.Spec.ForProvider.GlobalNetworkIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.GlobalNetworkID") + } + mg.Spec.ForProvider.GlobalNetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.GlobalNetworkIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "GlobalNetwork", "GlobalNetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.GlobalNetworkID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.GlobalNetworkIDRef, + Selector: mg.Spec.InitProvider.GlobalNetworkIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.GlobalNetworkID") + } + mg.Spec.InitProvider.GlobalNetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.GlobalNetworkIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this VPCAttachment. +func (mg *VPCAttachment) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "CoreNetwork", "CoreNetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CoreNetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.CoreNetworkIDRef, + Selector: mg.Spec.ForProvider.CoreNetworkIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CoreNetworkID") + } + mg.Spec.ForProvider.CoreNetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CoreNetworkIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SubnetArns), + Extract: common.ARNExtractor(), + References: mg.Spec.ForProvider.SubnetArnsRefs, + Selector: mg.Spec.ForProvider.SubnetArnsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetArns") + } + mg.Spec.ForProvider.SubnetArns = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SubnetArnsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.VPCArnRef, + Selector: mg.Spec.ForProvider.VPCArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCArn") + } + mg.Spec.ForProvider.VPCArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPCArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("networkmanager.aws.upbound.io", "v1beta1", "CoreNetwork", "CoreNetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CoreNetworkID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.CoreNetworkIDRef, + Selector: mg.Spec.InitProvider.CoreNetworkIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CoreNetworkID") + } + mg.Spec.InitProvider.CoreNetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CoreNetworkIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SubnetArns), + Extract: common.ARNExtractor(), + References: mg.Spec.InitProvider.SubnetArnsRefs, + Selector: mg.Spec.InitProvider.SubnetArnsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetArns") + } + mg.Spec.InitProvider.SubnetArns = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SubnetArnsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.VPCArnRef, + Selector: mg.Spec.InitProvider.VPCArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCArn") + } + mg.Spec.InitProvider.VPCArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPCArnRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/networkmanager/v1beta2/zz_groupversion_info.go b/apis/networkmanager/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..a449d3412e --- /dev/null +++ b/apis/networkmanager/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=networkmanager.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "networkmanager.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/networkmanager/v1beta2/zz_link_terraformed.go b/apis/networkmanager/v1beta2/zz_link_terraformed.go new file mode 100755 index 0000000000..969b84e94b --- /dev/null +++ b/apis/networkmanager/v1beta2/zz_link_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Link +func (mg *Link) GetTerraformResourceType() string { + return "aws_networkmanager_link" +} + +// GetConnectionDetailsMapping for this Link +func (tr *Link) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Link +func (tr *Link) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Link +func (tr *Link) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Link +func (tr *Link) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Link +func (tr *Link) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Link +func (tr *Link) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Link +func (tr *Link) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Link +func (tr *Link) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Link using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Link) LateInitialize(attrs []byte) (bool, error) { + params := &LinkParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Link) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/networkmanager/v1beta2/zz_link_types.go b/apis/networkmanager/v1beta2/zz_link_types.go new file mode 100755 index 0000000000..379dd857ee --- /dev/null +++ b/apis/networkmanager/v1beta2/zz_link_types.go @@ -0,0 +1,238 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BandwidthInitParameters struct { + + // Download speed in Mbps. + DownloadSpeed *float64 `json:"downloadSpeed,omitempty" tf:"download_speed,omitempty"` + + // Upload speed in Mbps. + UploadSpeed *float64 `json:"uploadSpeed,omitempty" tf:"upload_speed,omitempty"` +} + +type BandwidthObservation struct { + + // Download speed in Mbps. + DownloadSpeed *float64 `json:"downloadSpeed,omitempty" tf:"download_speed,omitempty"` + + // Upload speed in Mbps. + UploadSpeed *float64 `json:"uploadSpeed,omitempty" tf:"upload_speed,omitempty"` +} + +type BandwidthParameters struct { + + // Download speed in Mbps. + // +kubebuilder:validation:Optional + DownloadSpeed *float64 `json:"downloadSpeed,omitempty" tf:"download_speed,omitempty"` + + // Upload speed in Mbps. + // +kubebuilder:validation:Optional + UploadSpeed *float64 `json:"uploadSpeed,omitempty" tf:"upload_speed,omitempty"` +} + +type LinkInitParameters struct { + + // The upload speed and download speed in Mbps. Documented below. + Bandwidth *BandwidthInitParameters `json:"bandwidth,omitempty" tf:"bandwidth,omitempty"` + + // A description of the link. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the global network. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.GlobalNetwork + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + GlobalNetworkID *string `json:"globalNetworkId,omitempty" tf:"global_network_id,omitempty"` + + // Reference to a GlobalNetwork in networkmanager to populate globalNetworkId. + // +kubebuilder:validation:Optional + GlobalNetworkIDRef *v1.Reference `json:"globalNetworkIdRef,omitempty" tf:"-"` + + // Selector for a GlobalNetwork in networkmanager to populate globalNetworkId. + // +kubebuilder:validation:Optional + GlobalNetworkIDSelector *v1.Selector `json:"globalNetworkIdSelector,omitempty" tf:"-"` + + // The provider of the link. + ProviderName *string `json:"providerName,omitempty" tf:"provider_name,omitempty"` + + // The ID of the site. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.Site + SiteID *string `json:"siteId,omitempty" tf:"site_id,omitempty"` + + // Reference to a Site in networkmanager to populate siteId. + // +kubebuilder:validation:Optional + SiteIDRef *v1.Reference `json:"siteIdRef,omitempty" tf:"-"` + + // Selector for a Site in networkmanager to populate siteId. + // +kubebuilder:validation:Optional + SiteIDSelector *v1.Selector `json:"siteIdSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The type of the link. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinkObservation struct { + + // Link Amazon Resource Name (ARN). + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The upload speed and download speed in Mbps. Documented below. + Bandwidth *BandwidthObservation `json:"bandwidth,omitempty" tf:"bandwidth,omitempty"` + + // A description of the link. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the global network. + GlobalNetworkID *string `json:"globalNetworkId,omitempty" tf:"global_network_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The provider of the link. + ProviderName *string `json:"providerName,omitempty" tf:"provider_name,omitempty"` + + // The ID of the site. + SiteID *string `json:"siteId,omitempty" tf:"site_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The type of the link. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinkParameters struct { + + // The upload speed and download speed in Mbps. Documented below. + // +kubebuilder:validation:Optional + Bandwidth *BandwidthParameters `json:"bandwidth,omitempty" tf:"bandwidth,omitempty"` + + // A description of the link. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the global network. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.GlobalNetwork + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + GlobalNetworkID *string `json:"globalNetworkId,omitempty" tf:"global_network_id,omitempty"` + + // Reference to a GlobalNetwork in networkmanager to populate globalNetworkId. + // +kubebuilder:validation:Optional + GlobalNetworkIDRef *v1.Reference `json:"globalNetworkIdRef,omitempty" tf:"-"` + + // Selector for a GlobalNetwork in networkmanager to populate globalNetworkId. + // +kubebuilder:validation:Optional + GlobalNetworkIDSelector *v1.Selector `json:"globalNetworkIdSelector,omitempty" tf:"-"` + + // The provider of the link. + // +kubebuilder:validation:Optional + ProviderName *string `json:"providerName,omitempty" tf:"provider_name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The ID of the site. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta2.Site + // +kubebuilder:validation:Optional + SiteID *string `json:"siteId,omitempty" tf:"site_id,omitempty"` + + // Reference to a Site in networkmanager to populate siteId. + // +kubebuilder:validation:Optional + SiteIDRef *v1.Reference `json:"siteIdRef,omitempty" tf:"-"` + + // Selector for a Site in networkmanager to populate siteId. + // +kubebuilder:validation:Optional + SiteIDSelector *v1.Selector `json:"siteIdSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The type of the link. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +// LinkSpec defines the desired state of Link +type LinkSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinkParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinkInitParameters `json:"initProvider,omitempty"` +} + +// LinkStatus defines the observed state of Link. +type LinkStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinkObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Link is the Schema for the Links API. Creates a link for a site. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Link struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.bandwidth) || (has(self.initProvider) && has(self.initProvider.bandwidth))",message="spec.forProvider.bandwidth is a required parameter" + Spec LinkSpec `json:"spec"` + Status LinkStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinkList contains a list of Links +type LinkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Link `json:"items"` +} + +// Repository type metadata. +var ( + Link_Kind = "Link" + Link_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Link_Kind}.String() + Link_KindAPIVersion = Link_Kind + "." + CRDGroupVersion.String() + Link_GroupVersionKind = CRDGroupVersion.WithKind(Link_Kind) +) + +func init() { + SchemeBuilder.Register(&Link{}, &LinkList{}) +} diff --git a/apis/networkmanager/v1beta2/zz_site_terraformed.go b/apis/networkmanager/v1beta2/zz_site_terraformed.go new file mode 100755 index 0000000000..7896ba827b --- /dev/null +++ b/apis/networkmanager/v1beta2/zz_site_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Site +func (mg *Site) GetTerraformResourceType() string { + return "aws_networkmanager_site" +} + +// GetConnectionDetailsMapping for this Site +func (tr *Site) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Site +func (tr *Site) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Site +func (tr *Site) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Site +func (tr *Site) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Site +func (tr *Site) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Site +func (tr *Site) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Site +func (tr *Site) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Site +func (tr *Site) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Site using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Site) LateInitialize(attrs []byte) (bool, error) { + params := &SiteParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Site) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/networkmanager/v1beta2/zz_site_types.go b/apis/networkmanager/v1beta2/zz_site_types.go new file mode 100755 index 0000000000..09fc8cb47c --- /dev/null +++ b/apis/networkmanager/v1beta2/zz_site_types.go @@ -0,0 +1,199 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SiteInitParameters struct { + + // Description of the Site. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Global Network to create the site in. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.GlobalNetwork + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + GlobalNetworkID *string `json:"globalNetworkId,omitempty" tf:"global_network_id,omitempty"` + + // Reference to a GlobalNetwork in networkmanager to populate globalNetworkId. + // +kubebuilder:validation:Optional + GlobalNetworkIDRef *v1.Reference `json:"globalNetworkIdRef,omitempty" tf:"-"` + + // Selector for a GlobalNetwork in networkmanager to populate globalNetworkId. + // +kubebuilder:validation:Optional + GlobalNetworkIDSelector *v1.Selector `json:"globalNetworkIdSelector,omitempty" tf:"-"` + + // The site location as documented below. + Location *SiteLocationInitParameters `json:"location,omitempty" tf:"location,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SiteLocationInitParameters struct { + + // Address of the location. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // Latitude of the location. + Latitude *string `json:"latitude,omitempty" tf:"latitude,omitempty"` + + // Longitude of the location. + Longitude *string `json:"longitude,omitempty" tf:"longitude,omitempty"` +} + +type SiteLocationObservation struct { + + // Address of the location. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // Latitude of the location. + Latitude *string `json:"latitude,omitempty" tf:"latitude,omitempty"` + + // Longitude of the location. + Longitude *string `json:"longitude,omitempty" tf:"longitude,omitempty"` +} + +type SiteLocationParameters struct { + + // Address of the location. + // +kubebuilder:validation:Optional + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // Latitude of the location. + // +kubebuilder:validation:Optional + Latitude *string `json:"latitude,omitempty" tf:"latitude,omitempty"` + + // Longitude of the location. + // +kubebuilder:validation:Optional + Longitude *string `json:"longitude,omitempty" tf:"longitude,omitempty"` +} + +type SiteObservation struct { + + // Site Amazon Resource Name (ARN) + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Description of the Site. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Global Network to create the site in. + GlobalNetworkID *string `json:"globalNetworkId,omitempty" tf:"global_network_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The site location as documented below. + Location *SiteLocationObservation `json:"location,omitempty" tf:"location,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type SiteParameters struct { + + // Description of the Site. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Global Network to create the site in. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.GlobalNetwork + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + GlobalNetworkID *string `json:"globalNetworkId,omitempty" tf:"global_network_id,omitempty"` + + // Reference to a GlobalNetwork in networkmanager to populate globalNetworkId. + // +kubebuilder:validation:Optional + GlobalNetworkIDRef *v1.Reference `json:"globalNetworkIdRef,omitempty" tf:"-"` + + // Selector for a GlobalNetwork in networkmanager to populate globalNetworkId. + // +kubebuilder:validation:Optional + GlobalNetworkIDSelector *v1.Selector `json:"globalNetworkIdSelector,omitempty" tf:"-"` + + // The site location as documented below. + // +kubebuilder:validation:Optional + Location *SiteLocationParameters `json:"location,omitempty" tf:"location,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// SiteSpec defines the desired state of Site +type SiteSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SiteParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SiteInitParameters `json:"initProvider,omitempty"` +} + +// SiteStatus defines the observed state of Site. +type SiteStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SiteObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Site is the Schema for the Sites API. Creates a site in a global network. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Site struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec SiteSpec `json:"spec"` + Status SiteStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SiteList contains a list of Sites +type SiteList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Site `json:"items"` +} + +// Repository type metadata. +var ( + Site_Kind = "Site" + Site_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Site_Kind}.String() + Site_KindAPIVersion = Site_Kind + "." + CRDGroupVersion.String() + Site_GroupVersionKind = CRDGroupVersion.WithKind(Site_Kind) +) + +func init() { + SchemeBuilder.Register(&Site{}, &SiteList{}) +} diff --git a/apis/networkmanager/v1beta2/zz_vpcattachment_terraformed.go b/apis/networkmanager/v1beta2/zz_vpcattachment_terraformed.go new file mode 100755 index 0000000000..4a5f26bbda --- /dev/null +++ b/apis/networkmanager/v1beta2/zz_vpcattachment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VPCAttachment +func (mg *VPCAttachment) GetTerraformResourceType() string { + return "aws_networkmanager_vpc_attachment" +} + +// GetConnectionDetailsMapping for this VPCAttachment +func (tr *VPCAttachment) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VPCAttachment +func (tr *VPCAttachment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VPCAttachment +func (tr *VPCAttachment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VPCAttachment +func (tr *VPCAttachment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VPCAttachment +func (tr *VPCAttachment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VPCAttachment +func (tr *VPCAttachment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VPCAttachment +func (tr *VPCAttachment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VPCAttachment +func (tr *VPCAttachment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VPCAttachment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VPCAttachment) LateInitialize(attrs []byte) (bool, error) { + params := &VPCAttachmentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VPCAttachment) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/networkmanager/v1beta2/zz_vpcattachment_types.go b/apis/networkmanager/v1beta2/zz_vpcattachment_types.go new file mode 100755 index 0000000000..1e651e328c --- /dev/null +++ b/apis/networkmanager/v1beta2/zz_vpcattachment_types.go @@ -0,0 +1,274 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type VPCAttachmentInitParameters struct { + + // The ID of a core network for the VPC attachment. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.CoreNetwork + CoreNetworkID *string `json:"coreNetworkId,omitempty" tf:"core_network_id,omitempty"` + + // Reference to a CoreNetwork in networkmanager to populate coreNetworkId. + // +kubebuilder:validation:Optional + CoreNetworkIDRef *v1.Reference `json:"coreNetworkIdRef,omitempty" tf:"-"` + + // Selector for a CoreNetwork in networkmanager to populate coreNetworkId. + // +kubebuilder:validation:Optional + CoreNetworkIDSelector *v1.Selector `json:"coreNetworkIdSelector,omitempty" tf:"-"` + + // Options for the VPC attachment. + Options *VPCAttachmentOptionsInitParameters `json:"options,omitempty" tf:"options,omitempty"` + + // The subnet ARN of the VPC attachment. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +listType=set + SubnetArns []*string `json:"subnetArns,omitempty" tf:"subnet_arns,omitempty"` + + // References to Subnet in ec2 to populate subnetArns. + // +kubebuilder:validation:Optional + SubnetArnsRefs []v1.Reference `json:"subnetArnsRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetArns. + // +kubebuilder:validation:Optional + SubnetArnsSelector *v1.Selector `json:"subnetArnsSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ARN of the VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + VPCArn *string `json:"vpcArn,omitempty" tf:"vpc_arn,omitempty"` + + // Reference to a VPC in ec2 to populate vpcArn. + // +kubebuilder:validation:Optional + VPCArnRef *v1.Reference `json:"vpcArnRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcArn. + // +kubebuilder:validation:Optional + VPCArnSelector *v1.Selector `json:"vpcArnSelector,omitempty" tf:"-"` +} + +type VPCAttachmentObservation struct { + + // The ARN of the attachment. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The policy rule number associated with the attachment. + AttachmentPolicyRuleNumber *float64 `json:"attachmentPolicyRuleNumber,omitempty" tf:"attachment_policy_rule_number,omitempty"` + + // The type of attachment. + AttachmentType *string `json:"attachmentType,omitempty" tf:"attachment_type,omitempty"` + + // The ARN of a core network. + CoreNetworkArn *string `json:"coreNetworkArn,omitempty" tf:"core_network_arn,omitempty"` + + // The ID of a core network for the VPC attachment. + CoreNetworkID *string `json:"coreNetworkId,omitempty" tf:"core_network_id,omitempty"` + + // The Region where the edge is located. + EdgeLocation *string `json:"edgeLocation,omitempty" tf:"edge_location,omitempty"` + + // The ID of the attachment. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Options for the VPC attachment. + Options *VPCAttachmentOptionsObservation `json:"options,omitempty" tf:"options,omitempty"` + + // The ID of the attachment account owner. + OwnerAccountID *string `json:"ownerAccountId,omitempty" tf:"owner_account_id,omitempty"` + + // The attachment resource ARN. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // The name of the segment attachment. + SegmentName *string `json:"segmentName,omitempty" tf:"segment_name,omitempty"` + + // The state of the attachment. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // The subnet ARN of the VPC attachment. + // +listType=set + SubnetArns []*string `json:"subnetArns,omitempty" tf:"subnet_arns,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The ARN of the VPC. + VPCArn *string `json:"vpcArn,omitempty" tf:"vpc_arn,omitempty"` +} + +type VPCAttachmentOptionsInitParameters struct { + + // Indicates whether appliance mode is supported. + // If enabled, traffic flow between a source and destination use the same Availability Zone for the VPC attachment for the lifetime of that flow. + // If the VPC attachment is pending acceptance, changing this value will recreate the resource. + ApplianceModeSupport *bool `json:"applianceModeSupport,omitempty" tf:"appliance_mode_support,omitempty"` + + // Indicates whether IPv6 is supported. + // If the VPC attachment is pending acceptance, changing this value will recreate the resource. + IPv6Support *bool `json:"ipv6Support,omitempty" tf:"ipv6_support,omitempty"` +} + +type VPCAttachmentOptionsObservation struct { + + // Indicates whether appliance mode is supported. + // If enabled, traffic flow between a source and destination use the same Availability Zone for the VPC attachment for the lifetime of that flow. + // If the VPC attachment is pending acceptance, changing this value will recreate the resource. + ApplianceModeSupport *bool `json:"applianceModeSupport,omitempty" tf:"appliance_mode_support,omitempty"` + + // Indicates whether IPv6 is supported. + // If the VPC attachment is pending acceptance, changing this value will recreate the resource. + IPv6Support *bool `json:"ipv6Support,omitempty" tf:"ipv6_support,omitempty"` +} + +type VPCAttachmentOptionsParameters struct { + + // Indicates whether appliance mode is supported. + // If enabled, traffic flow between a source and destination use the same Availability Zone for the VPC attachment for the lifetime of that flow. + // If the VPC attachment is pending acceptance, changing this value will recreate the resource. + // +kubebuilder:validation:Optional + ApplianceModeSupport *bool `json:"applianceModeSupport,omitempty" tf:"appliance_mode_support,omitempty"` + + // Indicates whether IPv6 is supported. + // If the VPC attachment is pending acceptance, changing this value will recreate the resource. + // +kubebuilder:validation:Optional + IPv6Support *bool `json:"ipv6Support,omitempty" tf:"ipv6_support,omitempty"` +} + +type VPCAttachmentParameters struct { + + // The ID of a core network for the VPC attachment. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/networkmanager/v1beta1.CoreNetwork + // +kubebuilder:validation:Optional + CoreNetworkID *string `json:"coreNetworkId,omitempty" tf:"core_network_id,omitempty"` + + // Reference to a CoreNetwork in networkmanager to populate coreNetworkId. + // +kubebuilder:validation:Optional + CoreNetworkIDRef *v1.Reference `json:"coreNetworkIdRef,omitempty" tf:"-"` + + // Selector for a CoreNetwork in networkmanager to populate coreNetworkId. + // +kubebuilder:validation:Optional + CoreNetworkIDSelector *v1.Selector `json:"coreNetworkIdSelector,omitempty" tf:"-"` + + // Options for the VPC attachment. + // +kubebuilder:validation:Optional + Options *VPCAttachmentOptionsParameters `json:"options,omitempty" tf:"options,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The subnet ARN of the VPC attachment. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + // +listType=set + SubnetArns []*string `json:"subnetArns,omitempty" tf:"subnet_arns,omitempty"` + + // References to Subnet in ec2 to populate subnetArns. + // +kubebuilder:validation:Optional + SubnetArnsRefs []v1.Reference `json:"subnetArnsRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetArns. + // +kubebuilder:validation:Optional + SubnetArnsSelector *v1.Selector `json:"subnetArnsSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ARN of the VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + VPCArn *string `json:"vpcArn,omitempty" tf:"vpc_arn,omitempty"` + + // Reference to a VPC in ec2 to populate vpcArn. + // +kubebuilder:validation:Optional + VPCArnRef *v1.Reference `json:"vpcArnRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcArn. + // +kubebuilder:validation:Optional + VPCArnSelector *v1.Selector `json:"vpcArnSelector,omitempty" tf:"-"` +} + +// VPCAttachmentSpec defines the desired state of VPCAttachment +type VPCAttachmentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VPCAttachmentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VPCAttachmentInitParameters `json:"initProvider,omitempty"` +} + +// VPCAttachmentStatus defines the observed state of VPCAttachment. +type VPCAttachmentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VPCAttachmentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VPCAttachment is the Schema for the VPCAttachments API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type VPCAttachment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec VPCAttachmentSpec `json:"spec"` + Status VPCAttachmentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VPCAttachmentList contains a list of VPCAttachments +type VPCAttachmentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VPCAttachment `json:"items"` +} + +// Repository type metadata. +var ( + VPCAttachment_Kind = "VPCAttachment" + VPCAttachment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VPCAttachment_Kind}.String() + VPCAttachment_KindAPIVersion = VPCAttachment_Kind + "." + CRDGroupVersion.String() + VPCAttachment_GroupVersionKind = CRDGroupVersion.WithKind(VPCAttachment_Kind) +) + +func init() { + SchemeBuilder.Register(&VPCAttachment{}, &VPCAttachmentList{}) +} diff --git a/apis/opensearch/v1beta1/zz_domainpolicy_types.go b/apis/opensearch/v1beta1/zz_domainpolicy_types.go index eab1039491..f75804d392 100755 --- a/apis/opensearch/v1beta1/zz_domainpolicy_types.go +++ b/apis/opensearch/v1beta1/zz_domainpolicy_types.go @@ -19,7 +19,7 @@ type DomainPolicyInitParameters struct { AccessPolicies *string `json:"accessPolicies,omitempty" tf:"access_policies,omitempty"` // Name of the domain. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opensearch/v1beta1.Domain + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opensearch/v1beta2.Domain DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` // Reference to a Domain in opensearch to populate domainName. @@ -49,7 +49,7 @@ type DomainPolicyParameters struct { AccessPolicies *string `json:"accessPolicies,omitempty" tf:"access_policies,omitempty"` // Name of the domain. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opensearch/v1beta1.Domain + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opensearch/v1beta2.Domain // +kubebuilder:validation:Optional DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` diff --git a/apis/opensearch/v1beta1/zz_generated.conversion_hubs.go b/apis/opensearch/v1beta1/zz_generated.conversion_hubs.go index e1c18871a9..69c4e36efe 100755 --- a/apis/opensearch/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/opensearch/v1beta1/zz_generated.conversion_hubs.go @@ -6,11 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Domain) Hub() {} - // Hub marks this type as a conversion hub. func (tr *DomainPolicy) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *DomainSAMLOptions) Hub() {} diff --git a/apis/opensearch/v1beta1/zz_generated.conversion_spokes.go b/apis/opensearch/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..9128ba7d2b --- /dev/null +++ b/apis/opensearch/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Domain to the hub type. +func (tr *Domain) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Domain type. +func (tr *Domain) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DomainSAMLOptions to the hub type. +func (tr *DomainSAMLOptions) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DomainSAMLOptions type. +func (tr *DomainSAMLOptions) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/opensearch/v1beta1/zz_generated.resolvers.go b/apis/opensearch/v1beta1/zz_generated.resolvers.go index b47b8a5291..263a9673df 100644 --- a/apis/opensearch/v1beta1/zz_generated.resolvers.go +++ b/apis/opensearch/v1beta1/zz_generated.resolvers.go @@ -210,7 +210,7 @@ func (mg *DomainPolicy) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("opensearch.aws.upbound.io", "v1beta1", "Domain", "DomainList") + m, l, err = apisresolver.GetManagedResource("opensearch.aws.upbound.io", "v1beta2", "Domain", "DomainList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -229,7 +229,7 @@ func (mg *DomainPolicy) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.DomainName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DomainNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("opensearch.aws.upbound.io", "v1beta1", "Domain", "DomainList") + m, l, err = apisresolver.GetManagedResource("opensearch.aws.upbound.io", "v1beta2", "Domain", "DomainList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/opensearch/v1beta2/zz_domain_terraformed.go b/apis/opensearch/v1beta2/zz_domain_terraformed.go new file mode 100755 index 0000000000..40795129e4 --- /dev/null +++ b/apis/opensearch/v1beta2/zz_domain_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Domain +func (mg *Domain) GetTerraformResourceType() string { + return "aws_opensearch_domain" +} + +// GetConnectionDetailsMapping for this Domain +func (tr *Domain) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"advanced_security_options[*].master_user_options[*].master_user_password": "advancedSecurityOptions[*].masterUserOptions[*].masterUserPasswordSecretRef"} +} + +// GetObservation of this Domain +func (tr *Domain) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Domain +func (tr *Domain) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Domain +func (tr *Domain) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Domain +func (tr *Domain) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Domain +func (tr *Domain) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Domain +func (tr *Domain) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Domain +func (tr *Domain) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Domain using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Domain) LateInitialize(attrs []byte) (bool, error) { + params := &DomainParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Domain) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opensearch/v1beta2/zz_domain_types.go b/apis/opensearch/v1beta2/zz_domain_types.go new file mode 100755 index 0000000000..79b65343d3 --- /dev/null +++ b/apis/opensearch/v1beta2/zz_domain_types.go @@ -0,0 +1,1086 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AdvancedSecurityOptionsInitParameters struct { + + // Whether Anonymous auth is enabled. Enables fine-grained access control on an existing domain. Ignored unless advanced_security_options are enabled. Can only be enabled on an existing domain. + AnonymousAuthEnabled *bool `json:"anonymousAuthEnabled,omitempty" tf:"anonymous_auth_enabled,omitempty"` + + // Whether advanced security is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Whether the internal user database is enabled. Default is false. + InternalUserDatabaseEnabled *bool `json:"internalUserDatabaseEnabled,omitempty" tf:"internal_user_database_enabled,omitempty"` + + // Configuration block for the main user. Detailed below. + MasterUserOptions *MasterUserOptionsInitParameters `json:"masterUserOptions,omitempty" tf:"master_user_options,omitempty"` +} + +type AdvancedSecurityOptionsObservation struct { + + // Whether Anonymous auth is enabled. Enables fine-grained access control on an existing domain. Ignored unless advanced_security_options are enabled. Can only be enabled on an existing domain. + AnonymousAuthEnabled *bool `json:"anonymousAuthEnabled,omitempty" tf:"anonymous_auth_enabled,omitempty"` + + // Whether advanced security is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Whether the internal user database is enabled. Default is false. + InternalUserDatabaseEnabled *bool `json:"internalUserDatabaseEnabled,omitempty" tf:"internal_user_database_enabled,omitempty"` + + // Configuration block for the main user. Detailed below. + MasterUserOptions *MasterUserOptionsObservation `json:"masterUserOptions,omitempty" tf:"master_user_options,omitempty"` +} + +type AdvancedSecurityOptionsParameters struct { + + // Whether Anonymous auth is enabled. Enables fine-grained access control on an existing domain. Ignored unless advanced_security_options are enabled. Can only be enabled on an existing domain. + // +kubebuilder:validation:Optional + AnonymousAuthEnabled *bool `json:"anonymousAuthEnabled,omitempty" tf:"anonymous_auth_enabled,omitempty"` + + // Whether advanced security is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Whether the internal user database is enabled. Default is false. + // +kubebuilder:validation:Optional + InternalUserDatabaseEnabled *bool `json:"internalUserDatabaseEnabled,omitempty" tf:"internal_user_database_enabled,omitempty"` + + // Configuration block for the main user. Detailed below. + // +kubebuilder:validation:Optional + MasterUserOptions *MasterUserOptionsParameters `json:"masterUserOptions,omitempty" tf:"master_user_options,omitempty"` +} + +type AutoTuneOptionsInitParameters struct { + + // Auto-Tune desired state for the domain. Valid values: ENABLED or DISABLED. + DesiredState *string `json:"desiredState,omitempty" tf:"desired_state,omitempty"` + + // Configuration block for Auto-Tune maintenance windows. Can be specified multiple times for each maintenance window. Detailed below. + MaintenanceSchedule []MaintenanceScheduleInitParameters `json:"maintenanceSchedule,omitempty" tf:"maintenance_schedule,omitempty"` + + // Whether to roll back to default Auto-Tune settings when disabling Auto-Tune. Valid values: DEFAULT_ROLLBACK or NO_ROLLBACK. + RollbackOnDisable *string `json:"rollbackOnDisable,omitempty" tf:"rollback_on_disable,omitempty"` + + // Whether to schedule Auto-Tune optimizations that require blue/green deployments during the domain's configured daily off-peak window. Defaults to false. + UseOffPeakWindow *bool `json:"useOffPeakWindow,omitempty" tf:"use_off_peak_window,omitempty"` +} + +type AutoTuneOptionsObservation struct { + + // Auto-Tune desired state for the domain. Valid values: ENABLED or DISABLED. + DesiredState *string `json:"desiredState,omitempty" tf:"desired_state,omitempty"` + + // Configuration block for Auto-Tune maintenance windows. Can be specified multiple times for each maintenance window. Detailed below. + MaintenanceSchedule []MaintenanceScheduleObservation `json:"maintenanceSchedule,omitempty" tf:"maintenance_schedule,omitempty"` + + // Whether to roll back to default Auto-Tune settings when disabling Auto-Tune. Valid values: DEFAULT_ROLLBACK or NO_ROLLBACK. + RollbackOnDisable *string `json:"rollbackOnDisable,omitempty" tf:"rollback_on_disable,omitempty"` + + // Whether to schedule Auto-Tune optimizations that require blue/green deployments during the domain's configured daily off-peak window. Defaults to false. + UseOffPeakWindow *bool `json:"useOffPeakWindow,omitempty" tf:"use_off_peak_window,omitempty"` +} + +type AutoTuneOptionsParameters struct { + + // Auto-Tune desired state for the domain. Valid values: ENABLED or DISABLED. + // +kubebuilder:validation:Optional + DesiredState *string `json:"desiredState" tf:"desired_state,omitempty"` + + // Configuration block for Auto-Tune maintenance windows. Can be specified multiple times for each maintenance window. Detailed below. + // +kubebuilder:validation:Optional + MaintenanceSchedule []MaintenanceScheduleParameters `json:"maintenanceSchedule,omitempty" tf:"maintenance_schedule,omitempty"` + + // Whether to roll back to default Auto-Tune settings when disabling Auto-Tune. Valid values: DEFAULT_ROLLBACK or NO_ROLLBACK. + // +kubebuilder:validation:Optional + RollbackOnDisable *string `json:"rollbackOnDisable,omitempty" tf:"rollback_on_disable,omitempty"` + + // Whether to schedule Auto-Tune optimizations that require blue/green deployments during the domain's configured daily off-peak window. Defaults to false. + // +kubebuilder:validation:Optional + UseOffPeakWindow *bool `json:"useOffPeakWindow,omitempty" tf:"use_off_peak_window,omitempty"` +} + +type ClusterConfigInitParameters struct { + + // Configuration block containing cold storage configuration. Detailed below. + ColdStorageOptions *ColdStorageOptionsInitParameters `json:"coldStorageOptions,omitempty" tf:"cold_storage_options,omitempty"` + + // Number of dedicated main nodes in the cluster. + DedicatedMasterCount *float64 `json:"dedicatedMasterCount,omitempty" tf:"dedicated_master_count,omitempty"` + + // Whether dedicated main nodes are enabled for the cluster. + DedicatedMasterEnabled *bool `json:"dedicatedMasterEnabled,omitempty" tf:"dedicated_master_enabled,omitempty"` + + // Instance type of the dedicated main nodes in the cluster. + DedicatedMasterType *string `json:"dedicatedMasterType,omitempty" tf:"dedicated_master_type,omitempty"` + + // Number of instances in the cluster. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // Instance type of data nodes in the cluster. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // Whether a multi-AZ domain is turned on with a standby AZ. For more information, see Configuring a multi-AZ domain in Amazon OpenSearch Service. + MultiAzWithStandbyEnabled *bool `json:"multiAzWithStandbyEnabled,omitempty" tf:"multi_az_with_standby_enabled,omitempty"` + + // Number of warm nodes in the cluster. Valid values are between 2 and 150. warm_count can be only and must be set when warm_enabled is set to true. + WarmCount *float64 `json:"warmCount,omitempty" tf:"warm_count,omitempty"` + + // Whether to enable warm storage. + WarmEnabled *bool `json:"warmEnabled,omitempty" tf:"warm_enabled,omitempty"` + + // Instance type for the OpenSearch cluster's warm nodes. Valid values are ultrawarm1.medium.search, ultrawarm1.large.search and ultrawarm1.xlarge.search. warm_type can be only and must be set when warm_enabled is set to true. + WarmType *string `json:"warmType,omitempty" tf:"warm_type,omitempty"` + + // Configuration block containing zone awareness settings. Detailed below. + ZoneAwarenessConfig *ZoneAwarenessConfigInitParameters `json:"zoneAwarenessConfig,omitempty" tf:"zone_awareness_config,omitempty"` + + // Whether zone awareness is enabled, set to true for multi-az deployment. To enable awareness with three Availability Zones, the availability_zone_count within the zone_awareness_config must be set to 3. + ZoneAwarenessEnabled *bool `json:"zoneAwarenessEnabled,omitempty" tf:"zone_awareness_enabled,omitempty"` +} + +type ClusterConfigObservation struct { + + // Configuration block containing cold storage configuration. Detailed below. + ColdStorageOptions *ColdStorageOptionsObservation `json:"coldStorageOptions,omitempty" tf:"cold_storage_options,omitempty"` + + // Number of dedicated main nodes in the cluster. + DedicatedMasterCount *float64 `json:"dedicatedMasterCount,omitempty" tf:"dedicated_master_count,omitempty"` + + // Whether dedicated main nodes are enabled for the cluster. + DedicatedMasterEnabled *bool `json:"dedicatedMasterEnabled,omitempty" tf:"dedicated_master_enabled,omitempty"` + + // Instance type of the dedicated main nodes in the cluster. + DedicatedMasterType *string `json:"dedicatedMasterType,omitempty" tf:"dedicated_master_type,omitempty"` + + // Number of instances in the cluster. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // Instance type of data nodes in the cluster. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // Whether a multi-AZ domain is turned on with a standby AZ. For more information, see Configuring a multi-AZ domain in Amazon OpenSearch Service. + MultiAzWithStandbyEnabled *bool `json:"multiAzWithStandbyEnabled,omitempty" tf:"multi_az_with_standby_enabled,omitempty"` + + // Number of warm nodes in the cluster. Valid values are between 2 and 150. warm_count can be only and must be set when warm_enabled is set to true. + WarmCount *float64 `json:"warmCount,omitempty" tf:"warm_count,omitempty"` + + // Whether to enable warm storage. + WarmEnabled *bool `json:"warmEnabled,omitempty" tf:"warm_enabled,omitempty"` + + // Instance type for the OpenSearch cluster's warm nodes. Valid values are ultrawarm1.medium.search, ultrawarm1.large.search and ultrawarm1.xlarge.search. warm_type can be only and must be set when warm_enabled is set to true. + WarmType *string `json:"warmType,omitempty" tf:"warm_type,omitempty"` + + // Configuration block containing zone awareness settings. Detailed below. + ZoneAwarenessConfig *ZoneAwarenessConfigObservation `json:"zoneAwarenessConfig,omitempty" tf:"zone_awareness_config,omitempty"` + + // Whether zone awareness is enabled, set to true for multi-az deployment. To enable awareness with three Availability Zones, the availability_zone_count within the zone_awareness_config must be set to 3. + ZoneAwarenessEnabled *bool `json:"zoneAwarenessEnabled,omitempty" tf:"zone_awareness_enabled,omitempty"` +} + +type ClusterConfigParameters struct { + + // Configuration block containing cold storage configuration. Detailed below. + // +kubebuilder:validation:Optional + ColdStorageOptions *ColdStorageOptionsParameters `json:"coldStorageOptions,omitempty" tf:"cold_storage_options,omitempty"` + + // Number of dedicated main nodes in the cluster. + // +kubebuilder:validation:Optional + DedicatedMasterCount *float64 `json:"dedicatedMasterCount,omitempty" tf:"dedicated_master_count,omitempty"` + + // Whether dedicated main nodes are enabled for the cluster. + // +kubebuilder:validation:Optional + DedicatedMasterEnabled *bool `json:"dedicatedMasterEnabled,omitempty" tf:"dedicated_master_enabled,omitempty"` + + // Instance type of the dedicated main nodes in the cluster. + // +kubebuilder:validation:Optional + DedicatedMasterType *string `json:"dedicatedMasterType,omitempty" tf:"dedicated_master_type,omitempty"` + + // Number of instances in the cluster. + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // Instance type of data nodes in the cluster. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // Whether a multi-AZ domain is turned on with a standby AZ. For more information, see Configuring a multi-AZ domain in Amazon OpenSearch Service. + // +kubebuilder:validation:Optional + MultiAzWithStandbyEnabled *bool `json:"multiAzWithStandbyEnabled,omitempty" tf:"multi_az_with_standby_enabled,omitempty"` + + // Number of warm nodes in the cluster. Valid values are between 2 and 150. warm_count can be only and must be set when warm_enabled is set to true. + // +kubebuilder:validation:Optional + WarmCount *float64 `json:"warmCount,omitempty" tf:"warm_count,omitempty"` + + // Whether to enable warm storage. + // +kubebuilder:validation:Optional + WarmEnabled *bool `json:"warmEnabled,omitempty" tf:"warm_enabled,omitempty"` + + // Instance type for the OpenSearch cluster's warm nodes. Valid values are ultrawarm1.medium.search, ultrawarm1.large.search and ultrawarm1.xlarge.search. warm_type can be only and must be set when warm_enabled is set to true. + // +kubebuilder:validation:Optional + WarmType *string `json:"warmType,omitempty" tf:"warm_type,omitempty"` + + // Configuration block containing zone awareness settings. Detailed below. + // +kubebuilder:validation:Optional + ZoneAwarenessConfig *ZoneAwarenessConfigParameters `json:"zoneAwarenessConfig,omitempty" tf:"zone_awareness_config,omitempty"` + + // Whether zone awareness is enabled, set to true for multi-az deployment. To enable awareness with three Availability Zones, the availability_zone_count within the zone_awareness_config must be set to 3. + // +kubebuilder:validation:Optional + ZoneAwarenessEnabled *bool `json:"zoneAwarenessEnabled,omitempty" tf:"zone_awareness_enabled,omitempty"` +} + +type CognitoOptionsInitParameters struct { + + // Whether Amazon Cognito authentication with Dashboard is enabled or not. Default is false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // ID of the Cognito Identity Pool to use. + IdentityPoolID *string `json:"identityPoolId,omitempty" tf:"identity_pool_id,omitempty"` + + // ARN of the IAM role that has the AmazonOpenSearchServiceCognitoAccess policy attached. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // ID of the Cognito User Pool to use. + UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` +} + +type CognitoOptionsObservation struct { + + // Whether Amazon Cognito authentication with Dashboard is enabled or not. Default is false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // ID of the Cognito Identity Pool to use. + IdentityPoolID *string `json:"identityPoolId,omitempty" tf:"identity_pool_id,omitempty"` + + // ARN of the IAM role that has the AmazonOpenSearchServiceCognitoAccess policy attached. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // ID of the Cognito User Pool to use. + UserPoolID *string `json:"userPoolId,omitempty" tf:"user_pool_id,omitempty"` +} + +type CognitoOptionsParameters struct { + + // Whether Amazon Cognito authentication with Dashboard is enabled or not. Default is false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // ID of the Cognito Identity Pool to use. + // +kubebuilder:validation:Optional + IdentityPoolID *string `json:"identityPoolId" tf:"identity_pool_id,omitempty"` + + // ARN of the IAM role that has the AmazonOpenSearchServiceCognitoAccess policy attached. + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn" tf:"role_arn,omitempty"` + + // ID of the Cognito User Pool to use. + // +kubebuilder:validation:Optional + UserPoolID *string `json:"userPoolId" tf:"user_pool_id,omitempty"` +} + +type ColdStorageOptionsInitParameters struct { + + // Boolean to enable cold storage for an OpenSearch domain. Defaults to false. Master and ultrawarm nodes must be enabled for cold storage. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ColdStorageOptionsObservation struct { + + // Boolean to enable cold storage for an OpenSearch domain. Defaults to false. Master and ultrawarm nodes must be enabled for cold storage. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ColdStorageOptionsParameters struct { + + // Boolean to enable cold storage for an OpenSearch domain. Defaults to false. Master and ultrawarm nodes must be enabled for cold storage. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type DomainEndpointOptionsInitParameters struct { + + // Fully qualified domain for your custom endpoint. + CustomEndpoint *string `json:"customEndpoint,omitempty" tf:"custom_endpoint,omitempty"` + + // ACM certificate ARN for your custom endpoint. + CustomEndpointCertificateArn *string `json:"customEndpointCertificateArn,omitempty" tf:"custom_endpoint_certificate_arn,omitempty"` + + // Whether to enable custom endpoint for the OpenSearch domain. + CustomEndpointEnabled *bool `json:"customEndpointEnabled,omitempty" tf:"custom_endpoint_enabled,omitempty"` + + // Whether or not to require HTTPS. Defaults to true. + EnforceHTTPS *bool `json:"enforceHttps,omitempty" tf:"enforce_https,omitempty"` + + // Name of the TLS security policy that needs to be applied to the HTTPS endpoint. For valid values, refer to the AWS documentation. + TLSSecurityPolicy *string `json:"tlsSecurityPolicy,omitempty" tf:"tls_security_policy,omitempty"` +} + +type DomainEndpointOptionsObservation struct { + + // Fully qualified domain for your custom endpoint. + CustomEndpoint *string `json:"customEndpoint,omitempty" tf:"custom_endpoint,omitempty"` + + // ACM certificate ARN for your custom endpoint. + CustomEndpointCertificateArn *string `json:"customEndpointCertificateArn,omitempty" tf:"custom_endpoint_certificate_arn,omitempty"` + + // Whether to enable custom endpoint for the OpenSearch domain. + CustomEndpointEnabled *bool `json:"customEndpointEnabled,omitempty" tf:"custom_endpoint_enabled,omitempty"` + + // Whether or not to require HTTPS. Defaults to true. + EnforceHTTPS *bool `json:"enforceHttps,omitempty" tf:"enforce_https,omitempty"` + + // Name of the TLS security policy that needs to be applied to the HTTPS endpoint. For valid values, refer to the AWS documentation. + TLSSecurityPolicy *string `json:"tlsSecurityPolicy,omitempty" tf:"tls_security_policy,omitempty"` +} + +type DomainEndpointOptionsParameters struct { + + // Fully qualified domain for your custom endpoint. + // +kubebuilder:validation:Optional + CustomEndpoint *string `json:"customEndpoint,omitempty" tf:"custom_endpoint,omitempty"` + + // ACM certificate ARN for your custom endpoint. + // +kubebuilder:validation:Optional + CustomEndpointCertificateArn *string `json:"customEndpointCertificateArn,omitempty" tf:"custom_endpoint_certificate_arn,omitempty"` + + // Whether to enable custom endpoint for the OpenSearch domain. + // +kubebuilder:validation:Optional + CustomEndpointEnabled *bool `json:"customEndpointEnabled,omitempty" tf:"custom_endpoint_enabled,omitempty"` + + // Whether or not to require HTTPS. Defaults to true. + // +kubebuilder:validation:Optional + EnforceHTTPS *bool `json:"enforceHttps,omitempty" tf:"enforce_https,omitempty"` + + // Name of the TLS security policy that needs to be applied to the HTTPS endpoint. For valid values, refer to the AWS documentation. + // +kubebuilder:validation:Optional + TLSSecurityPolicy *string `json:"tlsSecurityPolicy,omitempty" tf:"tls_security_policy,omitempty"` +} + +type DomainInitParameters struct { + + // Key-value string pairs to specify advanced configuration options. + // +mapType=granular + AdvancedOptions map[string]*string `json:"advancedOptions,omitempty" tf:"advanced_options,omitempty"` + + // Configuration block for fine-grained access control. Detailed below. + AdvancedSecurityOptions *AdvancedSecurityOptionsInitParameters `json:"advancedSecurityOptions,omitempty" tf:"advanced_security_options,omitempty"` + + // Configuration block for the Auto-Tune options of the domain. Detailed below. + AutoTuneOptions *AutoTuneOptionsInitParameters `json:"autoTuneOptions,omitempty" tf:"auto_tune_options,omitempty"` + + // Configuration block for the cluster of the domain. Detailed below. + ClusterConfig *ClusterConfigInitParameters `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` + + // Configuration block for authenticating dashboard with Cognito. Detailed below. + CognitoOptions *CognitoOptionsInitParameters `json:"cognitoOptions,omitempty" tf:"cognito_options,omitempty"` + + // Configuration block for domain endpoint HTTP(S) related options. Detailed below. + DomainEndpointOptions *DomainEndpointOptionsInitParameters `json:"domainEndpointOptions,omitempty" tf:"domain_endpoint_options,omitempty"` + + // Name of the domain. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Configuration block for EBS related options, may be required based on chosen instance size. Detailed below. + EBSOptions *EBSOptionsInitParameters `json:"ebsOptions,omitempty" tf:"ebs_options,omitempty"` + + // Configuration block for encrypt at rest options. Only available for certain instance types. Detailed below. + EncryptAtRest *EncryptAtRestInitParameters `json:"encryptAtRest,omitempty" tf:"encrypt_at_rest,omitempty"` + + // while Elasticsearch has elasticsearch_version + EngineVersion *string `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + // Configuration block for publishing slow and application logs to CloudWatch Logs. This block can be declared multiple times, for each log_type, within the same resource. Detailed below. + LogPublishingOptions []LogPublishingOptionsInitParameters `json:"logPublishingOptions,omitempty" tf:"log_publishing_options,omitempty"` + + // Configuration block for node-to-node encryption options. Detailed below. + NodeToNodeEncryption *NodeToNodeEncryptionInitParameters `json:"nodeToNodeEncryption,omitempty" tf:"node_to_node_encryption,omitempty"` + + // Configuration to add Off Peak update options. (documentation). Detailed below. + OffPeakWindowOptions *OffPeakWindowOptionsInitParameters `json:"offPeakWindowOptions,omitempty" tf:"off_peak_window_options,omitempty"` + + // Configuration block for snapshot related options. Detailed below. DEPRECATED. For domains running OpenSearch 5.3 and later, Amazon OpenSearch takes hourly automated snapshots, making this setting irrelevant. For domains running earlier versions, OpenSearch takes daily automated snapshots. + SnapshotOptions *SnapshotOptionsInitParameters `json:"snapshotOptions,omitempty" tf:"snapshot_options,omitempty"` + + // Software update options for the domain. Detailed below. + SoftwareUpdateOptions *SoftwareUpdateOptionsInitParameters `json:"softwareUpdateOptions,omitempty" tf:"software_update_options,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for VPC related options. Adding or removing this configuration forces a new resource (documentation). Detailed below. + VPCOptions *VPCOptionsInitParameters `json:"vpcOptions,omitempty" tf:"vpc_options,omitempty"` +} + +type DomainObservation struct { + + // , are prefaced with es: for both. + AccessPolicies *string `json:"accessPolicies,omitempty" tf:"access_policies,omitempty"` + + // Key-value string pairs to specify advanced configuration options. + // +mapType=granular + AdvancedOptions map[string]*string `json:"advancedOptions,omitempty" tf:"advanced_options,omitempty"` + + // Configuration block for fine-grained access control. Detailed below. + AdvancedSecurityOptions *AdvancedSecurityOptionsObservation `json:"advancedSecurityOptions,omitempty" tf:"advanced_security_options,omitempty"` + + // ARN of the domain. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Configuration block for the Auto-Tune options of the domain. Detailed below. + AutoTuneOptions *AutoTuneOptionsObservation `json:"autoTuneOptions,omitempty" tf:"auto_tune_options,omitempty"` + + // Configuration block for the cluster of the domain. Detailed below. + ClusterConfig *ClusterConfigObservation `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` + + // Configuration block for authenticating dashboard with Cognito. Detailed below. + CognitoOptions *CognitoOptionsObservation `json:"cognitoOptions,omitempty" tf:"cognito_options,omitempty"` + + // Domain-specific endpoint for Dashboard without https scheme. + DashboardEndpoint *string `json:"dashboardEndpoint,omitempty" tf:"dashboard_endpoint,omitempty"` + + // Configuration block for domain endpoint HTTP(S) related options. Detailed below. + DomainEndpointOptions *DomainEndpointOptionsObservation `json:"domainEndpointOptions,omitempty" tf:"domain_endpoint_options,omitempty"` + + // Unique identifier for the domain. + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` + + // Name of the domain. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Configuration block for EBS related options, may be required based on chosen instance size. Detailed below. + EBSOptions *EBSOptionsObservation `json:"ebsOptions,omitempty" tf:"ebs_options,omitempty"` + + // Configuration block for encrypt at rest options. Only available for certain instance types. Detailed below. + EncryptAtRest *EncryptAtRestObservation `json:"encryptAtRest,omitempty" tf:"encrypt_at_rest,omitempty"` + + // Domain-specific endpoint used to submit index, search, and data upload requests. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // while Elasticsearch has elasticsearch_version + EngineVersion *string `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // (Deprecated) Domain-specific endpoint for kibana without https scheme. Use the dashboard_endpoint attribute instead. + KibanaEndpoint *string `json:"kibanaEndpoint,omitempty" tf:"kibana_endpoint,omitempty"` + + // Configuration block for publishing slow and application logs to CloudWatch Logs. This block can be declared multiple times, for each log_type, within the same resource. Detailed below. + LogPublishingOptions []LogPublishingOptionsObservation `json:"logPublishingOptions,omitempty" tf:"log_publishing_options,omitempty"` + + // Configuration block for node-to-node encryption options. Detailed below. + NodeToNodeEncryption *NodeToNodeEncryptionObservation `json:"nodeToNodeEncryption,omitempty" tf:"node_to_node_encryption,omitempty"` + + // Configuration to add Off Peak update options. (documentation). Detailed below. + OffPeakWindowOptions *OffPeakWindowOptionsObservation `json:"offPeakWindowOptions,omitempty" tf:"off_peak_window_options,omitempty"` + + // Configuration block for snapshot related options. Detailed below. DEPRECATED. For domains running OpenSearch 5.3 and later, Amazon OpenSearch takes hourly automated snapshots, making this setting irrelevant. For domains running earlier versions, OpenSearch takes daily automated snapshots. + SnapshotOptions *SnapshotOptionsObservation `json:"snapshotOptions,omitempty" tf:"snapshot_options,omitempty"` + + // Software update options for the domain. Detailed below. + SoftwareUpdateOptions *SoftwareUpdateOptionsObservation `json:"softwareUpdateOptions,omitempty" tf:"software_update_options,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Configuration block for VPC related options. Adding or removing this configuration forces a new resource (documentation). Detailed below. + VPCOptions *VPCOptionsObservation `json:"vpcOptions,omitempty" tf:"vpc_options,omitempty"` +} + +type DomainParameters struct { + + // Key-value string pairs to specify advanced configuration options. + // +kubebuilder:validation:Optional + // +mapType=granular + AdvancedOptions map[string]*string `json:"advancedOptions,omitempty" tf:"advanced_options,omitempty"` + + // Configuration block for fine-grained access control. Detailed below. + // +kubebuilder:validation:Optional + AdvancedSecurityOptions *AdvancedSecurityOptionsParameters `json:"advancedSecurityOptions,omitempty" tf:"advanced_security_options,omitempty"` + + // Configuration block for the Auto-Tune options of the domain. Detailed below. + // +kubebuilder:validation:Optional + AutoTuneOptions *AutoTuneOptionsParameters `json:"autoTuneOptions,omitempty" tf:"auto_tune_options,omitempty"` + + // Configuration block for the cluster of the domain. Detailed below. + // +kubebuilder:validation:Optional + ClusterConfig *ClusterConfigParameters `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` + + // Configuration block for authenticating dashboard with Cognito. Detailed below. + // +kubebuilder:validation:Optional + CognitoOptions *CognitoOptionsParameters `json:"cognitoOptions,omitempty" tf:"cognito_options,omitempty"` + + // Configuration block for domain endpoint HTTP(S) related options. Detailed below. + // +kubebuilder:validation:Optional + DomainEndpointOptions *DomainEndpointOptionsParameters `json:"domainEndpointOptions,omitempty" tf:"domain_endpoint_options,omitempty"` + + // Name of the domain. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Configuration block for EBS related options, may be required based on chosen instance size. Detailed below. + // +kubebuilder:validation:Optional + EBSOptions *EBSOptionsParameters `json:"ebsOptions,omitempty" tf:"ebs_options,omitempty"` + + // Configuration block for encrypt at rest options. Only available for certain instance types. Detailed below. + // +kubebuilder:validation:Optional + EncryptAtRest *EncryptAtRestParameters `json:"encryptAtRest,omitempty" tf:"encrypt_at_rest,omitempty"` + + // while Elasticsearch has elasticsearch_version + // +kubebuilder:validation:Optional + EngineVersion *string `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + // Configuration block for publishing slow and application logs to CloudWatch Logs. This block can be declared multiple times, for each log_type, within the same resource. Detailed below. + // +kubebuilder:validation:Optional + LogPublishingOptions []LogPublishingOptionsParameters `json:"logPublishingOptions,omitempty" tf:"log_publishing_options,omitempty"` + + // Configuration block for node-to-node encryption options. Detailed below. + // +kubebuilder:validation:Optional + NodeToNodeEncryption *NodeToNodeEncryptionParameters `json:"nodeToNodeEncryption,omitempty" tf:"node_to_node_encryption,omitempty"` + + // Configuration to add Off Peak update options. (documentation). Detailed below. + // +kubebuilder:validation:Optional + OffPeakWindowOptions *OffPeakWindowOptionsParameters `json:"offPeakWindowOptions,omitempty" tf:"off_peak_window_options,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration block for snapshot related options. Detailed below. DEPRECATED. For domains running OpenSearch 5.3 and later, Amazon OpenSearch takes hourly automated snapshots, making this setting irrelevant. For domains running earlier versions, OpenSearch takes daily automated snapshots. + // +kubebuilder:validation:Optional + SnapshotOptions *SnapshotOptionsParameters `json:"snapshotOptions,omitempty" tf:"snapshot_options,omitempty"` + + // Software update options for the domain. Detailed below. + // +kubebuilder:validation:Optional + SoftwareUpdateOptions *SoftwareUpdateOptionsParameters `json:"softwareUpdateOptions,omitempty" tf:"software_update_options,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Configuration block for VPC related options. Adding or removing this configuration forces a new resource (documentation). Detailed below. + // +kubebuilder:validation:Optional + VPCOptions *VPCOptionsParameters `json:"vpcOptions,omitempty" tf:"vpc_options,omitempty"` +} + +type DurationInitParameters struct { + + // Unit of time specifying the duration of an Auto-Tune maintenance window. Valid values: HOURS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // An integer specifying the value of the duration of an Auto-Tune maintenance window. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type DurationObservation struct { + + // Unit of time specifying the duration of an Auto-Tune maintenance window. Valid values: HOURS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // An integer specifying the value of the duration of an Auto-Tune maintenance window. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type DurationParameters struct { + + // Unit of time specifying the duration of an Auto-Tune maintenance window. Valid values: HOURS. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // An integer specifying the value of the duration of an Auto-Tune maintenance window. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type EBSOptionsInitParameters struct { + + // Whether EBS volumes are attached to data nodes in the domain. + EBSEnabled *bool `json:"ebsEnabled,omitempty" tf:"ebs_enabled,omitempty"` + + // Baseline input/output (I/O) performance of EBS volumes attached to data nodes. Applicable only for the GP3 and Provisioned IOPS EBS volume types. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Specifies the throughput (in MiB/s) of the EBS volumes attached to data nodes. Applicable only for the gp3 volume type. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // Size of EBS volumes attached to data nodes (in GiB). + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of EBS volumes attached to data nodes. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type EBSOptionsObservation struct { + + // Whether EBS volumes are attached to data nodes in the domain. + EBSEnabled *bool `json:"ebsEnabled,omitempty" tf:"ebs_enabled,omitempty"` + + // Baseline input/output (I/O) performance of EBS volumes attached to data nodes. Applicable only for the GP3 and Provisioned IOPS EBS volume types. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Specifies the throughput (in MiB/s) of the EBS volumes attached to data nodes. Applicable only for the gp3 volume type. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // Size of EBS volumes attached to data nodes (in GiB). + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of EBS volumes attached to data nodes. + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type EBSOptionsParameters struct { + + // Whether EBS volumes are attached to data nodes in the domain. + // +kubebuilder:validation:Optional + EBSEnabled *bool `json:"ebsEnabled" tf:"ebs_enabled,omitempty"` + + // Baseline input/output (I/O) performance of EBS volumes attached to data nodes. Applicable only for the GP3 and Provisioned IOPS EBS volume types. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // Specifies the throughput (in MiB/s) of the EBS volumes attached to data nodes. Applicable only for the gp3 volume type. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // Size of EBS volumes attached to data nodes (in GiB). + // +kubebuilder:validation:Optional + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` + + // Type of EBS volumes attached to data nodes. + // +kubebuilder:validation:Optional + VolumeType *string `json:"volumeType,omitempty" tf:"volume_type,omitempty"` +} + +type EncryptAtRestInitParameters struct { + + // Whether to enable encryption at rest. If the encrypt_at_rest block is not provided then this defaults to false. Enabling encryption on new domains requires an engine_version of OpenSearch_X.Y or Elasticsearch_5.1 or greater. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // KMS key ARN to encrypt the Elasticsearch domain with. If not specified then it defaults to using the aws/es service KMS key. Note that KMS will accept a KMS key ID but will return the key ARN. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type EncryptAtRestObservation struct { + + // Whether to enable encryption at rest. If the encrypt_at_rest block is not provided then this defaults to false. Enabling encryption on new domains requires an engine_version of OpenSearch_X.Y or Elasticsearch_5.1 or greater. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // KMS key ARN to encrypt the Elasticsearch domain with. If not specified then it defaults to using the aws/es service KMS key. Note that KMS will accept a KMS key ID but will return the key ARN. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type EncryptAtRestParameters struct { + + // Whether to enable encryption at rest. If the encrypt_at_rest block is not provided then this defaults to false. Enabling encryption on new domains requires an engine_version of OpenSearch_X.Y or Elasticsearch_5.1 or greater. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // KMS key ARN to encrypt the Elasticsearch domain with. If not specified then it defaults to using the aws/es service KMS key. Note that KMS will accept a KMS key ID but will return the key ARN. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type LogPublishingOptionsInitParameters struct { + + // ARN of the Cloudwatch log group to which log needs to be published. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Group + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + CloudwatchLogGroupArn *string `json:"cloudwatchLogGroupArn,omitempty" tf:"cloudwatch_log_group_arn,omitempty"` + + // Reference to a Group in cloudwatchlogs to populate cloudwatchLogGroupArn. + // +kubebuilder:validation:Optional + CloudwatchLogGroupArnRef *v1.Reference `json:"cloudwatchLogGroupArnRef,omitempty" tf:"-"` + + // Selector for a Group in cloudwatchlogs to populate cloudwatchLogGroupArn. + // +kubebuilder:validation:Optional + CloudwatchLogGroupArnSelector *v1.Selector `json:"cloudwatchLogGroupArnSelector,omitempty" tf:"-"` + + // Whether given log publishing option is enabled or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Type of OpenSearch log. Valid values: INDEX_SLOW_LOGS, SEARCH_SLOW_LOGS, ES_APPLICATION_LOGS, AUDIT_LOGS. + LogType *string `json:"logType,omitempty" tf:"log_type,omitempty"` +} + +type LogPublishingOptionsObservation struct { + + // ARN of the Cloudwatch log group to which log needs to be published. + CloudwatchLogGroupArn *string `json:"cloudwatchLogGroupArn,omitempty" tf:"cloudwatch_log_group_arn,omitempty"` + + // Whether given log publishing option is enabled or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Type of OpenSearch log. Valid values: INDEX_SLOW_LOGS, SEARCH_SLOW_LOGS, ES_APPLICATION_LOGS, AUDIT_LOGS. + LogType *string `json:"logType,omitempty" tf:"log_type,omitempty"` +} + +type LogPublishingOptionsParameters struct { + + // ARN of the Cloudwatch log group to which log needs to be published. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1.Group + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + CloudwatchLogGroupArn *string `json:"cloudwatchLogGroupArn,omitempty" tf:"cloudwatch_log_group_arn,omitempty"` + + // Reference to a Group in cloudwatchlogs to populate cloudwatchLogGroupArn. + // +kubebuilder:validation:Optional + CloudwatchLogGroupArnRef *v1.Reference `json:"cloudwatchLogGroupArnRef,omitempty" tf:"-"` + + // Selector for a Group in cloudwatchlogs to populate cloudwatchLogGroupArn. + // +kubebuilder:validation:Optional + CloudwatchLogGroupArnSelector *v1.Selector `json:"cloudwatchLogGroupArnSelector,omitempty" tf:"-"` + + // Whether given log publishing option is enabled or not. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Type of OpenSearch log. Valid values: INDEX_SLOW_LOGS, SEARCH_SLOW_LOGS, ES_APPLICATION_LOGS, AUDIT_LOGS. + // +kubebuilder:validation:Optional + LogType *string `json:"logType" tf:"log_type,omitempty"` +} + +type MaintenanceScheduleInitParameters struct { + + // A cron expression specifying the recurrence pattern for an Auto-Tune maintenance schedule. + CronExpressionForRecurrence *string `json:"cronExpressionForRecurrence,omitempty" tf:"cron_expression_for_recurrence,omitempty"` + + // Configuration block for the duration of the Auto-Tune maintenance window. Detailed below. + Duration *DurationInitParameters `json:"duration,omitempty" tf:"duration,omitempty"` + + // Date and time at which to start the Auto-Tune maintenance schedule in RFC3339 format. + StartAt *string `json:"startAt,omitempty" tf:"start_at,omitempty"` +} + +type MaintenanceScheduleObservation struct { + + // A cron expression specifying the recurrence pattern for an Auto-Tune maintenance schedule. + CronExpressionForRecurrence *string `json:"cronExpressionForRecurrence,omitempty" tf:"cron_expression_for_recurrence,omitempty"` + + // Configuration block for the duration of the Auto-Tune maintenance window. Detailed below. + Duration *DurationObservation `json:"duration,omitempty" tf:"duration,omitempty"` + + // Date and time at which to start the Auto-Tune maintenance schedule in RFC3339 format. + StartAt *string `json:"startAt,omitempty" tf:"start_at,omitempty"` +} + +type MaintenanceScheduleParameters struct { + + // A cron expression specifying the recurrence pattern for an Auto-Tune maintenance schedule. + // +kubebuilder:validation:Optional + CronExpressionForRecurrence *string `json:"cronExpressionForRecurrence" tf:"cron_expression_for_recurrence,omitempty"` + + // Configuration block for the duration of the Auto-Tune maintenance window. Detailed below. + // +kubebuilder:validation:Optional + Duration *DurationParameters `json:"duration" tf:"duration,omitempty"` + + // Date and time at which to start the Auto-Tune maintenance schedule in RFC3339 format. + // +kubebuilder:validation:Optional + StartAt *string `json:"startAt" tf:"start_at,omitempty"` +} + +type MasterUserOptionsInitParameters struct { + + // ARN for the main user. Only specify if internal_user_database_enabled is not set or set to false. + MasterUserArn *string `json:"masterUserArn,omitempty" tf:"master_user_arn,omitempty"` + + // Main user's username, which is stored in the Amazon OpenSearch Service domain's internal database. Only specify if internal_user_database_enabled is set to true. + MasterUserName *string `json:"masterUserName,omitempty" tf:"master_user_name,omitempty"` + + // Main user's password, which is stored in the Amazon OpenSearch Service domain's internal database. Only specify if internal_user_database_enabled is set to true. + MasterUserPasswordSecretRef *v1.SecretKeySelector `json:"masterUserPasswordSecretRef,omitempty" tf:"-"` +} + +type MasterUserOptionsObservation struct { + + // ARN for the main user. Only specify if internal_user_database_enabled is not set or set to false. + MasterUserArn *string `json:"masterUserArn,omitempty" tf:"master_user_arn,omitempty"` + + // Main user's username, which is stored in the Amazon OpenSearch Service domain's internal database. Only specify if internal_user_database_enabled is set to true. + MasterUserName *string `json:"masterUserName,omitempty" tf:"master_user_name,omitempty"` +} + +type MasterUserOptionsParameters struct { + + // ARN for the main user. Only specify if internal_user_database_enabled is not set or set to false. + // +kubebuilder:validation:Optional + MasterUserArn *string `json:"masterUserArn,omitempty" tf:"master_user_arn,omitempty"` + + // Main user's username, which is stored in the Amazon OpenSearch Service domain's internal database. Only specify if internal_user_database_enabled is set to true. + // +kubebuilder:validation:Optional + MasterUserName *string `json:"masterUserName,omitempty" tf:"master_user_name,omitempty"` + + // Main user's password, which is stored in the Amazon OpenSearch Service domain's internal database. Only specify if internal_user_database_enabled is set to true. + // +kubebuilder:validation:Optional + MasterUserPasswordSecretRef *v1.SecretKeySelector `json:"masterUserPasswordSecretRef,omitempty" tf:"-"` +} + +type NodeToNodeEncryptionInitParameters struct { + + // Whether to enable node-to-node encryption. If the node_to_node_encryption block is not provided then this defaults to false. Enabling node-to-node encryption of a new domain requires an engine_version of OpenSearch_X.Y or Elasticsearch_6.0 or greater. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type NodeToNodeEncryptionObservation struct { + + // Whether to enable node-to-node encryption. If the node_to_node_encryption block is not provided then this defaults to false. Enabling node-to-node encryption of a new domain requires an engine_version of OpenSearch_X.Y or Elasticsearch_6.0 or greater. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type NodeToNodeEncryptionParameters struct { + + // Whether to enable node-to-node encryption. If the node_to_node_encryption block is not provided then this defaults to false. Enabling node-to-node encryption of a new domain requires an engine_version of OpenSearch_X.Y or Elasticsearch_6.0 or greater. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + +type OffPeakWindowInitParameters struct { + + // 10h window for updates + WindowStartTime *WindowStartTimeInitParameters `json:"windowStartTime,omitempty" tf:"window_start_time,omitempty"` +} + +type OffPeakWindowObservation struct { + + // 10h window for updates + WindowStartTime *WindowStartTimeObservation `json:"windowStartTime,omitempty" tf:"window_start_time,omitempty"` +} + +type OffPeakWindowOptionsInitParameters struct { + + // Enabled disabled toggle for off-peak update window. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + OffPeakWindow *OffPeakWindowInitParameters `json:"offPeakWindow,omitempty" tf:"off_peak_window,omitempty"` +} + +type OffPeakWindowOptionsObservation struct { + + // Enabled disabled toggle for off-peak update window. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + OffPeakWindow *OffPeakWindowObservation `json:"offPeakWindow,omitempty" tf:"off_peak_window,omitempty"` +} + +type OffPeakWindowOptionsParameters struct { + + // Enabled disabled toggle for off-peak update window. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // +kubebuilder:validation:Optional + OffPeakWindow *OffPeakWindowParameters `json:"offPeakWindow,omitempty" tf:"off_peak_window,omitempty"` +} + +type OffPeakWindowParameters struct { + + // 10h window for updates + // +kubebuilder:validation:Optional + WindowStartTime *WindowStartTimeParameters `json:"windowStartTime,omitempty" tf:"window_start_time,omitempty"` +} + +type SnapshotOptionsInitParameters struct { + + // Hour during which the service takes an automated daily snapshot of the indices in the domain. + AutomatedSnapshotStartHour *float64 `json:"automatedSnapshotStartHour,omitempty" tf:"automated_snapshot_start_hour,omitempty"` +} + +type SnapshotOptionsObservation struct { + + // Hour during which the service takes an automated daily snapshot of the indices in the domain. + AutomatedSnapshotStartHour *float64 `json:"automatedSnapshotStartHour,omitempty" tf:"automated_snapshot_start_hour,omitempty"` +} + +type SnapshotOptionsParameters struct { + + // Hour during which the service takes an automated daily snapshot of the indices in the domain. + // +kubebuilder:validation:Optional + AutomatedSnapshotStartHour *float64 `json:"automatedSnapshotStartHour" tf:"automated_snapshot_start_hour,omitempty"` +} + +type SoftwareUpdateOptionsInitParameters struct { + + // Whether automatic service software updates are enabled for the domain. Defaults to false. + AutoSoftwareUpdateEnabled *bool `json:"autoSoftwareUpdateEnabled,omitempty" tf:"auto_software_update_enabled,omitempty"` +} + +type SoftwareUpdateOptionsObservation struct { + + // Whether automatic service software updates are enabled for the domain. Defaults to false. + AutoSoftwareUpdateEnabled *bool `json:"autoSoftwareUpdateEnabled,omitempty" tf:"auto_software_update_enabled,omitempty"` +} + +type SoftwareUpdateOptionsParameters struct { + + // Whether automatic service software updates are enabled for the domain. Defaults to false. + // +kubebuilder:validation:Optional + AutoSoftwareUpdateEnabled *bool `json:"autoSoftwareUpdateEnabled,omitempty" tf:"auto_software_update_enabled,omitempty"` +} + +type VPCOptionsInitParameters struct { + + // List of VPC Security Group IDs to be applied to the OpenSearch domain endpoints. If omitted, the default Security Group for the VPC will be used. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // List of VPC Subnet IDs for the OpenSearch domain endpoints to be created in. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type VPCOptionsObservation struct { + + // If the domain was created inside a VPC, the names of the availability zones the configured subnet_ids were created inside. + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // List of VPC Security Group IDs to be applied to the OpenSearch domain endpoints. If omitted, the default Security Group for the VPC will be used. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // List of VPC Subnet IDs for the OpenSearch domain endpoints to be created in. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // If the domain was created inside a VPC, the ID of the VPC. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type VPCOptionsParameters struct { + + // List of VPC Security Group IDs to be applied to the OpenSearch domain endpoints. If omitted, the default Security Group for the VPC will be used. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // List of VPC Subnet IDs for the OpenSearch domain endpoints to be created in. + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type WindowStartTimeInitParameters struct { + + // Starting hour of the 10-hour window for updates + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // Starting minute of the 10-hour window for updates + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` +} + +type WindowStartTimeObservation struct { + + // Starting hour of the 10-hour window for updates + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // Starting minute of the 10-hour window for updates + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` +} + +type WindowStartTimeParameters struct { + + // Starting hour of the 10-hour window for updates + // +kubebuilder:validation:Optional + Hours *float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // Starting minute of the 10-hour window for updates + // +kubebuilder:validation:Optional + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` +} + +type ZoneAwarenessConfigInitParameters struct { + + // Number of Availability Zones for the domain to use with zone_awareness_enabled. Defaults to 2. Valid values: 2 or 3. + AvailabilityZoneCount *float64 `json:"availabilityZoneCount,omitempty" tf:"availability_zone_count,omitempty"` +} + +type ZoneAwarenessConfigObservation struct { + + // Number of Availability Zones for the domain to use with zone_awareness_enabled. Defaults to 2. Valid values: 2 or 3. + AvailabilityZoneCount *float64 `json:"availabilityZoneCount,omitempty" tf:"availability_zone_count,omitempty"` +} + +type ZoneAwarenessConfigParameters struct { + + // Number of Availability Zones for the domain to use with zone_awareness_enabled. Defaults to 2. Valid values: 2 or 3. + // +kubebuilder:validation:Optional + AvailabilityZoneCount *float64 `json:"availabilityZoneCount,omitempty" tf:"availability_zone_count,omitempty"` +} + +// DomainSpec defines the desired state of Domain +type DomainSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DomainParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DomainInitParameters `json:"initProvider,omitempty"` +} + +// DomainStatus defines the observed state of Domain. +type DomainStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DomainObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Domain is the Schema for the Domains API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Domain struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.domainName) || (has(self.initProvider) && has(self.initProvider.domainName))",message="spec.forProvider.domainName is a required parameter" + Spec DomainSpec `json:"spec"` + Status DomainStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DomainList contains a list of Domains +type DomainList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Domain `json:"items"` +} + +// Repository type metadata. +var ( + Domain_Kind = "Domain" + Domain_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Domain_Kind}.String() + Domain_KindAPIVersion = Domain_Kind + "." + CRDGroupVersion.String() + Domain_GroupVersionKind = CRDGroupVersion.WithKind(Domain_Kind) +) + +func init() { + SchemeBuilder.Register(&Domain{}, &DomainList{}) +} diff --git a/apis/opensearch/v1beta2/zz_domainsamloptions_terraformed.go b/apis/opensearch/v1beta2/zz_domainsamloptions_terraformed.go new file mode 100755 index 0000000000..18a776de8e --- /dev/null +++ b/apis/opensearch/v1beta2/zz_domainsamloptions_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DomainSAMLOptions +func (mg *DomainSAMLOptions) GetTerraformResourceType() string { + return "aws_opensearch_domain_saml_options" +} + +// GetConnectionDetailsMapping for this DomainSAMLOptions +func (tr *DomainSAMLOptions) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"saml_options[*].master_user_name": "samlOptions[*].masterUserNameSecretRef"} +} + +// GetObservation of this DomainSAMLOptions +func (tr *DomainSAMLOptions) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DomainSAMLOptions +func (tr *DomainSAMLOptions) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DomainSAMLOptions +func (tr *DomainSAMLOptions) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DomainSAMLOptions +func (tr *DomainSAMLOptions) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DomainSAMLOptions +func (tr *DomainSAMLOptions) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DomainSAMLOptions +func (tr *DomainSAMLOptions) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DomainSAMLOptions +func (tr *DomainSAMLOptions) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DomainSAMLOptions using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DomainSAMLOptions) LateInitialize(attrs []byte) (bool, error) { + params := &DomainSAMLOptionsParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DomainSAMLOptions) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opensearch/v1beta2/zz_domainsamloptions_types.go b/apis/opensearch/v1beta2/zz_domainsamloptions_types.go new file mode 100755 index 0000000000..b898ccf62d --- /dev/null +++ b/apis/opensearch/v1beta2/zz_domainsamloptions_types.go @@ -0,0 +1,236 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DomainSAMLOptionsInitParameters struct { + + // Name of the domain. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opensearch/v1beta2.Domain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("domain_name",false) + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Reference to a Domain in opensearch to populate domainName. + // +kubebuilder:validation:Optional + DomainNameRef *v1.Reference `json:"domainNameRef,omitempty" tf:"-"` + + // Selector for a Domain in opensearch to populate domainName. + // +kubebuilder:validation:Optional + DomainNameSelector *v1.Selector `json:"domainNameSelector,omitempty" tf:"-"` + + // SAML authentication options for an AWS OpenSearch Domain. + SAMLOptions *SAMLOptionsInitParameters `json:"samlOptions,omitempty" tf:"saml_options,omitempty"` +} + +type DomainSAMLOptionsObservation struct { + + // Name of the domain. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Name of the domain the SAML options are associated with. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // SAML authentication options for an AWS OpenSearch Domain. + SAMLOptions *SAMLOptionsObservation `json:"samlOptions,omitempty" tf:"saml_options,omitempty"` +} + +type DomainSAMLOptionsParameters struct { + + // Name of the domain. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opensearch/v1beta2.Domain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("domain_name",false) + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Reference to a Domain in opensearch to populate domainName. + // +kubebuilder:validation:Optional + DomainNameRef *v1.Reference `json:"domainNameRef,omitempty" tf:"-"` + + // Selector for a Domain in opensearch to populate domainName. + // +kubebuilder:validation:Optional + DomainNameSelector *v1.Selector `json:"domainNameSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // SAML authentication options for an AWS OpenSearch Domain. + // +kubebuilder:validation:Optional + SAMLOptions *SAMLOptionsParameters `json:"samlOptions,omitempty" tf:"saml_options,omitempty"` +} + +type IdpInitParameters struct { + + // Unique Entity ID of the application in SAML Identity Provider. + EntityID *string `json:"entityId,omitempty" tf:"entity_id,omitempty"` + + // Metadata of the SAML application in xml format. + MetadataContent *string `json:"metadataContent,omitempty" tf:"metadata_content,omitempty"` +} + +type IdpObservation struct { + + // Unique Entity ID of the application in SAML Identity Provider. + EntityID *string `json:"entityId,omitempty" tf:"entity_id,omitempty"` + + // Metadata of the SAML application in xml format. + MetadataContent *string `json:"metadataContent,omitempty" tf:"metadata_content,omitempty"` +} + +type IdpParameters struct { + + // Unique Entity ID of the application in SAML Identity Provider. + // +kubebuilder:validation:Optional + EntityID *string `json:"entityId" tf:"entity_id,omitempty"` + + // Metadata of the SAML application in xml format. + // +kubebuilder:validation:Optional + MetadataContent *string `json:"metadataContent" tf:"metadata_content,omitempty"` +} + +type SAMLOptionsInitParameters struct { + + // Whether SAML authentication is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Information from your identity provider. + Idp *IdpInitParameters `json:"idp,omitempty" tf:"idp,omitempty"` + + // This backend role from the SAML IdP receives full permissions to the cluster, equivalent to a new master user. + MasterBackendRole *string `json:"masterBackendRole,omitempty" tf:"master_backend_role,omitempty"` + + // This username from the SAML IdP receives full permissions to the cluster, equivalent to a new master user. + MasterUserNameSecretRef *v1.SecretKeySelector `json:"masterUserNameSecretRef,omitempty" tf:"-"` + + // Element of the SAML assertion to use for backend roles. Default is roles. + RolesKey *string `json:"rolesKey,omitempty" tf:"roles_key,omitempty"` + + // Duration of a session in minutes after a user logs in. Default is 60. Maximum value is 1,440. + SessionTimeoutMinutes *float64 `json:"sessionTimeoutMinutes,omitempty" tf:"session_timeout_minutes,omitempty"` + + // Element of the SAML assertion to use for username. Default is NameID. + SubjectKey *string `json:"subjectKey,omitempty" tf:"subject_key,omitempty"` +} + +type SAMLOptionsObservation struct { + + // Whether SAML authentication is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Information from your identity provider. + Idp *IdpObservation `json:"idp,omitempty" tf:"idp,omitempty"` + + // This backend role from the SAML IdP receives full permissions to the cluster, equivalent to a new master user. + MasterBackendRole *string `json:"masterBackendRole,omitempty" tf:"master_backend_role,omitempty"` + + // Element of the SAML assertion to use for backend roles. Default is roles. + RolesKey *string `json:"rolesKey,omitempty" tf:"roles_key,omitempty"` + + // Duration of a session in minutes after a user logs in. Default is 60. Maximum value is 1,440. + SessionTimeoutMinutes *float64 `json:"sessionTimeoutMinutes,omitempty" tf:"session_timeout_minutes,omitempty"` + + // Element of the SAML assertion to use for username. Default is NameID. + SubjectKey *string `json:"subjectKey,omitempty" tf:"subject_key,omitempty"` +} + +type SAMLOptionsParameters struct { + + // Whether SAML authentication is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Information from your identity provider. + // +kubebuilder:validation:Optional + Idp *IdpParameters `json:"idp,omitempty" tf:"idp,omitempty"` + + // This backend role from the SAML IdP receives full permissions to the cluster, equivalent to a new master user. + // +kubebuilder:validation:Optional + MasterBackendRole *string `json:"masterBackendRole,omitempty" tf:"master_backend_role,omitempty"` + + // This username from the SAML IdP receives full permissions to the cluster, equivalent to a new master user. + // +kubebuilder:validation:Optional + MasterUserNameSecretRef *v1.SecretKeySelector `json:"masterUserNameSecretRef,omitempty" tf:"-"` + + // Element of the SAML assertion to use for backend roles. Default is roles. + // +kubebuilder:validation:Optional + RolesKey *string `json:"rolesKey,omitempty" tf:"roles_key,omitempty"` + + // Duration of a session in minutes after a user logs in. Default is 60. Maximum value is 1,440. + // +kubebuilder:validation:Optional + SessionTimeoutMinutes *float64 `json:"sessionTimeoutMinutes,omitempty" tf:"session_timeout_minutes,omitempty"` + + // Element of the SAML assertion to use for username. Default is NameID. + // +kubebuilder:validation:Optional + SubjectKey *string `json:"subjectKey,omitempty" tf:"subject_key,omitempty"` +} + +// DomainSAMLOptionsSpec defines the desired state of DomainSAMLOptions +type DomainSAMLOptionsSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DomainSAMLOptionsParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DomainSAMLOptionsInitParameters `json:"initProvider,omitempty"` +} + +// DomainSAMLOptionsStatus defines the observed state of DomainSAMLOptions. +type DomainSAMLOptionsStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DomainSAMLOptionsObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DomainSAMLOptions is the Schema for the DomainSAMLOptionss API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type DomainSAMLOptions struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DomainSAMLOptionsSpec `json:"spec"` + Status DomainSAMLOptionsStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DomainSAMLOptionsList contains a list of DomainSAMLOptionss +type DomainSAMLOptionsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DomainSAMLOptions `json:"items"` +} + +// Repository type metadata. +var ( + DomainSAMLOptions_Kind = "DomainSAMLOptions" + DomainSAMLOptions_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DomainSAMLOptions_Kind}.String() + DomainSAMLOptions_KindAPIVersion = DomainSAMLOptions_Kind + "." + CRDGroupVersion.String() + DomainSAMLOptions_GroupVersionKind = CRDGroupVersion.WithKind(DomainSAMLOptions_Kind) +) + +func init() { + SchemeBuilder.Register(&DomainSAMLOptions{}, &DomainSAMLOptionsList{}) +} diff --git a/apis/opensearch/v1beta2/zz_generated.conversion_hubs.go b/apis/opensearch/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..7f35a0518a --- /dev/null +++ b/apis/opensearch/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Domain) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DomainSAMLOptions) Hub() {} diff --git a/apis/opensearch/v1beta2/zz_generated.deepcopy.go b/apis/opensearch/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..17f099a36d --- /dev/null +++ b/apis/opensearch/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2813 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedSecurityOptionsInitParameters) DeepCopyInto(out *AdvancedSecurityOptionsInitParameters) { + *out = *in + if in.AnonymousAuthEnabled != nil { + in, out := &in.AnonymousAuthEnabled, &out.AnonymousAuthEnabled + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.InternalUserDatabaseEnabled != nil { + in, out := &in.InternalUserDatabaseEnabled, &out.InternalUserDatabaseEnabled + *out = new(bool) + **out = **in + } + if in.MasterUserOptions != nil { + in, out := &in.MasterUserOptions, &out.MasterUserOptions + *out = new(MasterUserOptionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedSecurityOptionsInitParameters. +func (in *AdvancedSecurityOptionsInitParameters) DeepCopy() *AdvancedSecurityOptionsInitParameters { + if in == nil { + return nil + } + out := new(AdvancedSecurityOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedSecurityOptionsObservation) DeepCopyInto(out *AdvancedSecurityOptionsObservation) { + *out = *in + if in.AnonymousAuthEnabled != nil { + in, out := &in.AnonymousAuthEnabled, &out.AnonymousAuthEnabled + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.InternalUserDatabaseEnabled != nil { + in, out := &in.InternalUserDatabaseEnabled, &out.InternalUserDatabaseEnabled + *out = new(bool) + **out = **in + } + if in.MasterUserOptions != nil { + in, out := &in.MasterUserOptions, &out.MasterUserOptions + *out = new(MasterUserOptionsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedSecurityOptionsObservation. +func (in *AdvancedSecurityOptionsObservation) DeepCopy() *AdvancedSecurityOptionsObservation { + if in == nil { + return nil + } + out := new(AdvancedSecurityOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedSecurityOptionsParameters) DeepCopyInto(out *AdvancedSecurityOptionsParameters) { + *out = *in + if in.AnonymousAuthEnabled != nil { + in, out := &in.AnonymousAuthEnabled, &out.AnonymousAuthEnabled + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.InternalUserDatabaseEnabled != nil { + in, out := &in.InternalUserDatabaseEnabled, &out.InternalUserDatabaseEnabled + *out = new(bool) + **out = **in + } + if in.MasterUserOptions != nil { + in, out := &in.MasterUserOptions, &out.MasterUserOptions + *out = new(MasterUserOptionsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedSecurityOptionsParameters. +func (in *AdvancedSecurityOptionsParameters) DeepCopy() *AdvancedSecurityOptionsParameters { + if in == nil { + return nil + } + out := new(AdvancedSecurityOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoTuneOptionsInitParameters) DeepCopyInto(out *AutoTuneOptionsInitParameters) { + *out = *in + if in.DesiredState != nil { + in, out := &in.DesiredState, &out.DesiredState + *out = new(string) + **out = **in + } + if in.MaintenanceSchedule != nil { + in, out := &in.MaintenanceSchedule, &out.MaintenanceSchedule + *out = make([]MaintenanceScheduleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RollbackOnDisable != nil { + in, out := &in.RollbackOnDisable, &out.RollbackOnDisable + *out = new(string) + **out = **in + } + if in.UseOffPeakWindow != nil { + in, out := &in.UseOffPeakWindow, &out.UseOffPeakWindow + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoTuneOptionsInitParameters. +func (in *AutoTuneOptionsInitParameters) DeepCopy() *AutoTuneOptionsInitParameters { + if in == nil { + return nil + } + out := new(AutoTuneOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoTuneOptionsObservation) DeepCopyInto(out *AutoTuneOptionsObservation) { + *out = *in + if in.DesiredState != nil { + in, out := &in.DesiredState, &out.DesiredState + *out = new(string) + **out = **in + } + if in.MaintenanceSchedule != nil { + in, out := &in.MaintenanceSchedule, &out.MaintenanceSchedule + *out = make([]MaintenanceScheduleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RollbackOnDisable != nil { + in, out := &in.RollbackOnDisable, &out.RollbackOnDisable + *out = new(string) + **out = **in + } + if in.UseOffPeakWindow != nil { + in, out := &in.UseOffPeakWindow, &out.UseOffPeakWindow + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoTuneOptionsObservation. +func (in *AutoTuneOptionsObservation) DeepCopy() *AutoTuneOptionsObservation { + if in == nil { + return nil + } + out := new(AutoTuneOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoTuneOptionsParameters) DeepCopyInto(out *AutoTuneOptionsParameters) { + *out = *in + if in.DesiredState != nil { + in, out := &in.DesiredState, &out.DesiredState + *out = new(string) + **out = **in + } + if in.MaintenanceSchedule != nil { + in, out := &in.MaintenanceSchedule, &out.MaintenanceSchedule + *out = make([]MaintenanceScheduleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RollbackOnDisable != nil { + in, out := &in.RollbackOnDisable, &out.RollbackOnDisable + *out = new(string) + **out = **in + } + if in.UseOffPeakWindow != nil { + in, out := &in.UseOffPeakWindow, &out.UseOffPeakWindow + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoTuneOptionsParameters. +func (in *AutoTuneOptionsParameters) DeepCopy() *AutoTuneOptionsParameters { + if in == nil { + return nil + } + out := new(AutoTuneOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigInitParameters) DeepCopyInto(out *ClusterConfigInitParameters) { + *out = *in + if in.ColdStorageOptions != nil { + in, out := &in.ColdStorageOptions, &out.ColdStorageOptions + *out = new(ColdStorageOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DedicatedMasterCount != nil { + in, out := &in.DedicatedMasterCount, &out.DedicatedMasterCount + *out = new(float64) + **out = **in + } + if in.DedicatedMasterEnabled != nil { + in, out := &in.DedicatedMasterEnabled, &out.DedicatedMasterEnabled + *out = new(bool) + **out = **in + } + if in.DedicatedMasterType != nil { + in, out := &in.DedicatedMasterType, &out.DedicatedMasterType + *out = new(string) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.MultiAzWithStandbyEnabled != nil { + in, out := &in.MultiAzWithStandbyEnabled, &out.MultiAzWithStandbyEnabled + *out = new(bool) + **out = **in + } + if in.WarmCount != nil { + in, out := &in.WarmCount, &out.WarmCount + *out = new(float64) + **out = **in + } + if in.WarmEnabled != nil { + in, out := &in.WarmEnabled, &out.WarmEnabled + *out = new(bool) + **out = **in + } + if in.WarmType != nil { + in, out := &in.WarmType, &out.WarmType + *out = new(string) + **out = **in + } + if in.ZoneAwarenessConfig != nil { + in, out := &in.ZoneAwarenessConfig, &out.ZoneAwarenessConfig + *out = new(ZoneAwarenessConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ZoneAwarenessEnabled != nil { + in, out := &in.ZoneAwarenessEnabled, &out.ZoneAwarenessEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigInitParameters. +func (in *ClusterConfigInitParameters) DeepCopy() *ClusterConfigInitParameters { + if in == nil { + return nil + } + out := new(ClusterConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigObservation) DeepCopyInto(out *ClusterConfigObservation) { + *out = *in + if in.ColdStorageOptions != nil { + in, out := &in.ColdStorageOptions, &out.ColdStorageOptions + *out = new(ColdStorageOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.DedicatedMasterCount != nil { + in, out := &in.DedicatedMasterCount, &out.DedicatedMasterCount + *out = new(float64) + **out = **in + } + if in.DedicatedMasterEnabled != nil { + in, out := &in.DedicatedMasterEnabled, &out.DedicatedMasterEnabled + *out = new(bool) + **out = **in + } + if in.DedicatedMasterType != nil { + in, out := &in.DedicatedMasterType, &out.DedicatedMasterType + *out = new(string) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.MultiAzWithStandbyEnabled != nil { + in, out := &in.MultiAzWithStandbyEnabled, &out.MultiAzWithStandbyEnabled + *out = new(bool) + **out = **in + } + if in.WarmCount != nil { + in, out := &in.WarmCount, &out.WarmCount + *out = new(float64) + **out = **in + } + if in.WarmEnabled != nil { + in, out := &in.WarmEnabled, &out.WarmEnabled + *out = new(bool) + **out = **in + } + if in.WarmType != nil { + in, out := &in.WarmType, &out.WarmType + *out = new(string) + **out = **in + } + if in.ZoneAwarenessConfig != nil { + in, out := &in.ZoneAwarenessConfig, &out.ZoneAwarenessConfig + *out = new(ZoneAwarenessConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ZoneAwarenessEnabled != nil { + in, out := &in.ZoneAwarenessEnabled, &out.ZoneAwarenessEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigObservation. +func (in *ClusterConfigObservation) DeepCopy() *ClusterConfigObservation { + if in == nil { + return nil + } + out := new(ClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigParameters) DeepCopyInto(out *ClusterConfigParameters) { + *out = *in + if in.ColdStorageOptions != nil { + in, out := &in.ColdStorageOptions, &out.ColdStorageOptions + *out = new(ColdStorageOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.DedicatedMasterCount != nil { + in, out := &in.DedicatedMasterCount, &out.DedicatedMasterCount + *out = new(float64) + **out = **in + } + if in.DedicatedMasterEnabled != nil { + in, out := &in.DedicatedMasterEnabled, &out.DedicatedMasterEnabled + *out = new(bool) + **out = **in + } + if in.DedicatedMasterType != nil { + in, out := &in.DedicatedMasterType, &out.DedicatedMasterType + *out = new(string) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.MultiAzWithStandbyEnabled != nil { + in, out := &in.MultiAzWithStandbyEnabled, &out.MultiAzWithStandbyEnabled + *out = new(bool) + **out = **in + } + if in.WarmCount != nil { + in, out := &in.WarmCount, &out.WarmCount + *out = new(float64) + **out = **in + } + if in.WarmEnabled != nil { + in, out := &in.WarmEnabled, &out.WarmEnabled + *out = new(bool) + **out = **in + } + if in.WarmType != nil { + in, out := &in.WarmType, &out.WarmType + *out = new(string) + **out = **in + } + if in.ZoneAwarenessConfig != nil { + in, out := &in.ZoneAwarenessConfig, &out.ZoneAwarenessConfig + *out = new(ZoneAwarenessConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.ZoneAwarenessEnabled != nil { + in, out := &in.ZoneAwarenessEnabled, &out.ZoneAwarenessEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigParameters. +func (in *ClusterConfigParameters) DeepCopy() *ClusterConfigParameters { + if in == nil { + return nil + } + out := new(ClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CognitoOptionsInitParameters) DeepCopyInto(out *CognitoOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IdentityPoolID != nil { + in, out := &in.IdentityPoolID, &out.IdentityPoolID + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.UserPoolID != nil { + in, out := &in.UserPoolID, &out.UserPoolID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CognitoOptionsInitParameters. +func (in *CognitoOptionsInitParameters) DeepCopy() *CognitoOptionsInitParameters { + if in == nil { + return nil + } + out := new(CognitoOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CognitoOptionsObservation) DeepCopyInto(out *CognitoOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IdentityPoolID != nil { + in, out := &in.IdentityPoolID, &out.IdentityPoolID + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.UserPoolID != nil { + in, out := &in.UserPoolID, &out.UserPoolID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CognitoOptionsObservation. +func (in *CognitoOptionsObservation) DeepCopy() *CognitoOptionsObservation { + if in == nil { + return nil + } + out := new(CognitoOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CognitoOptionsParameters) DeepCopyInto(out *CognitoOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IdentityPoolID != nil { + in, out := &in.IdentityPoolID, &out.IdentityPoolID + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.UserPoolID != nil { + in, out := &in.UserPoolID, &out.UserPoolID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CognitoOptionsParameters. +func (in *CognitoOptionsParameters) DeepCopy() *CognitoOptionsParameters { + if in == nil { + return nil + } + out := new(CognitoOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColdStorageOptionsInitParameters) DeepCopyInto(out *ColdStorageOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColdStorageOptionsInitParameters. +func (in *ColdStorageOptionsInitParameters) DeepCopy() *ColdStorageOptionsInitParameters { + if in == nil { + return nil + } + out := new(ColdStorageOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColdStorageOptionsObservation) DeepCopyInto(out *ColdStorageOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColdStorageOptionsObservation. +func (in *ColdStorageOptionsObservation) DeepCopy() *ColdStorageOptionsObservation { + if in == nil { + return nil + } + out := new(ColdStorageOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColdStorageOptionsParameters) DeepCopyInto(out *ColdStorageOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColdStorageOptionsParameters. +func (in *ColdStorageOptionsParameters) DeepCopy() *ColdStorageOptionsParameters { + if in == nil { + return nil + } + out := new(ColdStorageOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Domain) DeepCopyInto(out *Domain) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Domain. +func (in *Domain) DeepCopy() *Domain { + if in == nil { + return nil + } + out := new(Domain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Domain) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainEndpointOptionsInitParameters) DeepCopyInto(out *DomainEndpointOptionsInitParameters) { + *out = *in + if in.CustomEndpoint != nil { + in, out := &in.CustomEndpoint, &out.CustomEndpoint + *out = new(string) + **out = **in + } + if in.CustomEndpointCertificateArn != nil { + in, out := &in.CustomEndpointCertificateArn, &out.CustomEndpointCertificateArn + *out = new(string) + **out = **in + } + if in.CustomEndpointEnabled != nil { + in, out := &in.CustomEndpointEnabled, &out.CustomEndpointEnabled + *out = new(bool) + **out = **in + } + if in.EnforceHTTPS != nil { + in, out := &in.EnforceHTTPS, &out.EnforceHTTPS + *out = new(bool) + **out = **in + } + if in.TLSSecurityPolicy != nil { + in, out := &in.TLSSecurityPolicy, &out.TLSSecurityPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainEndpointOptionsInitParameters. +func (in *DomainEndpointOptionsInitParameters) DeepCopy() *DomainEndpointOptionsInitParameters { + if in == nil { + return nil + } + out := new(DomainEndpointOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainEndpointOptionsObservation) DeepCopyInto(out *DomainEndpointOptionsObservation) { + *out = *in + if in.CustomEndpoint != nil { + in, out := &in.CustomEndpoint, &out.CustomEndpoint + *out = new(string) + **out = **in + } + if in.CustomEndpointCertificateArn != nil { + in, out := &in.CustomEndpointCertificateArn, &out.CustomEndpointCertificateArn + *out = new(string) + **out = **in + } + if in.CustomEndpointEnabled != nil { + in, out := &in.CustomEndpointEnabled, &out.CustomEndpointEnabled + *out = new(bool) + **out = **in + } + if in.EnforceHTTPS != nil { + in, out := &in.EnforceHTTPS, &out.EnforceHTTPS + *out = new(bool) + **out = **in + } + if in.TLSSecurityPolicy != nil { + in, out := &in.TLSSecurityPolicy, &out.TLSSecurityPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainEndpointOptionsObservation. +func (in *DomainEndpointOptionsObservation) DeepCopy() *DomainEndpointOptionsObservation { + if in == nil { + return nil + } + out := new(DomainEndpointOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainEndpointOptionsParameters) DeepCopyInto(out *DomainEndpointOptionsParameters) { + *out = *in + if in.CustomEndpoint != nil { + in, out := &in.CustomEndpoint, &out.CustomEndpoint + *out = new(string) + **out = **in + } + if in.CustomEndpointCertificateArn != nil { + in, out := &in.CustomEndpointCertificateArn, &out.CustomEndpointCertificateArn + *out = new(string) + **out = **in + } + if in.CustomEndpointEnabled != nil { + in, out := &in.CustomEndpointEnabled, &out.CustomEndpointEnabled + *out = new(bool) + **out = **in + } + if in.EnforceHTTPS != nil { + in, out := &in.EnforceHTTPS, &out.EnforceHTTPS + *out = new(bool) + **out = **in + } + if in.TLSSecurityPolicy != nil { + in, out := &in.TLSSecurityPolicy, &out.TLSSecurityPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainEndpointOptionsParameters. +func (in *DomainEndpointOptionsParameters) DeepCopy() *DomainEndpointOptionsParameters { + if in == nil { + return nil + } + out := new(DomainEndpointOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainInitParameters) DeepCopyInto(out *DomainInitParameters) { + *out = *in + if in.AdvancedOptions != nil { + in, out := &in.AdvancedOptions, &out.AdvancedOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AdvancedSecurityOptions != nil { + in, out := &in.AdvancedSecurityOptions, &out.AdvancedSecurityOptions + *out = new(AdvancedSecurityOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoTuneOptions != nil { + in, out := &in.AutoTuneOptions, &out.AutoTuneOptions + *out = new(AutoTuneOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClusterConfig != nil { + in, out := &in.ClusterConfig, &out.ClusterConfig + *out = new(ClusterConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CognitoOptions != nil { + in, out := &in.CognitoOptions, &out.CognitoOptions + *out = new(CognitoOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DomainEndpointOptions != nil { + in, out := &in.DomainEndpointOptions, &out.DomainEndpointOptions + *out = new(DomainEndpointOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.EBSOptions != nil { + in, out := &in.EBSOptions, &out.EBSOptions + *out = new(EBSOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EncryptAtRest != nil { + in, out := &in.EncryptAtRest, &out.EncryptAtRest + *out = new(EncryptAtRestInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.LogPublishingOptions != nil { + in, out := &in.LogPublishingOptions, &out.LogPublishingOptions + *out = make([]LogPublishingOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NodeToNodeEncryption != nil { + in, out := &in.NodeToNodeEncryption, &out.NodeToNodeEncryption + *out = new(NodeToNodeEncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OffPeakWindowOptions != nil { + in, out := &in.OffPeakWindowOptions, &out.OffPeakWindowOptions + *out = new(OffPeakWindowOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SnapshotOptions != nil { + in, out := &in.SnapshotOptions, &out.SnapshotOptions + *out = new(SnapshotOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SoftwareUpdateOptions != nil { + in, out := &in.SoftwareUpdateOptions, &out.SoftwareUpdateOptions + *out = new(SoftwareUpdateOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCOptions != nil { + in, out := &in.VPCOptions, &out.VPCOptions + *out = new(VPCOptionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainInitParameters. +func (in *DomainInitParameters) DeepCopy() *DomainInitParameters { + if in == nil { + return nil + } + out := new(DomainInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainList) DeepCopyInto(out *DomainList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Domain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainList. +func (in *DomainList) DeepCopy() *DomainList { + if in == nil { + return nil + } + out := new(DomainList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DomainList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainObservation) DeepCopyInto(out *DomainObservation) { + *out = *in + if in.AccessPolicies != nil { + in, out := &in.AccessPolicies, &out.AccessPolicies + *out = new(string) + **out = **in + } + if in.AdvancedOptions != nil { + in, out := &in.AdvancedOptions, &out.AdvancedOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AdvancedSecurityOptions != nil { + in, out := &in.AdvancedSecurityOptions, &out.AdvancedSecurityOptions + *out = new(AdvancedSecurityOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoTuneOptions != nil { + in, out := &in.AutoTuneOptions, &out.AutoTuneOptions + *out = new(AutoTuneOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.ClusterConfig != nil { + in, out := &in.ClusterConfig, &out.ClusterConfig + *out = new(ClusterConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.CognitoOptions != nil { + in, out := &in.CognitoOptions, &out.CognitoOptions + *out = new(CognitoOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.DashboardEndpoint != nil { + in, out := &in.DashboardEndpoint, &out.DashboardEndpoint + *out = new(string) + **out = **in + } + if in.DomainEndpointOptions != nil { + in, out := &in.DomainEndpointOptions, &out.DomainEndpointOptions + *out = new(DomainEndpointOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.EBSOptions != nil { + in, out := &in.EBSOptions, &out.EBSOptions + *out = new(EBSOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.EncryptAtRest != nil { + in, out := &in.EncryptAtRest, &out.EncryptAtRest + *out = new(EncryptAtRestObservation) + (*in).DeepCopyInto(*out) + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KibanaEndpoint != nil { + in, out := &in.KibanaEndpoint, &out.KibanaEndpoint + *out = new(string) + **out = **in + } + if in.LogPublishingOptions != nil { + in, out := &in.LogPublishingOptions, &out.LogPublishingOptions + *out = make([]LogPublishingOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NodeToNodeEncryption != nil { + in, out := &in.NodeToNodeEncryption, &out.NodeToNodeEncryption + *out = new(NodeToNodeEncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.OffPeakWindowOptions != nil { + in, out := &in.OffPeakWindowOptions, &out.OffPeakWindowOptions + *out = new(OffPeakWindowOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.SnapshotOptions != nil { + in, out := &in.SnapshotOptions, &out.SnapshotOptions + *out = new(SnapshotOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.SoftwareUpdateOptions != nil { + in, out := &in.SoftwareUpdateOptions, &out.SoftwareUpdateOptions + *out = new(SoftwareUpdateOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCOptions != nil { + in, out := &in.VPCOptions, &out.VPCOptions + *out = new(VPCOptionsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainObservation. +func (in *DomainObservation) DeepCopy() *DomainObservation { + if in == nil { + return nil + } + out := new(DomainObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainParameters) DeepCopyInto(out *DomainParameters) { + *out = *in + if in.AdvancedOptions != nil { + in, out := &in.AdvancedOptions, &out.AdvancedOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AdvancedSecurityOptions != nil { + in, out := &in.AdvancedSecurityOptions, &out.AdvancedSecurityOptions + *out = new(AdvancedSecurityOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoTuneOptions != nil { + in, out := &in.AutoTuneOptions, &out.AutoTuneOptions + *out = new(AutoTuneOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.ClusterConfig != nil { + in, out := &in.ClusterConfig, &out.ClusterConfig + *out = new(ClusterConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.CognitoOptions != nil { + in, out := &in.CognitoOptions, &out.CognitoOptions + *out = new(CognitoOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.DomainEndpointOptions != nil { + in, out := &in.DomainEndpointOptions, &out.DomainEndpointOptions + *out = new(DomainEndpointOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.EBSOptions != nil { + in, out := &in.EBSOptions, &out.EBSOptions + *out = new(EBSOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.EncryptAtRest != nil { + in, out := &in.EncryptAtRest, &out.EncryptAtRest + *out = new(EncryptAtRestParameters) + (*in).DeepCopyInto(*out) + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.LogPublishingOptions != nil { + in, out := &in.LogPublishingOptions, &out.LogPublishingOptions + *out = make([]LogPublishingOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NodeToNodeEncryption != nil { + in, out := &in.NodeToNodeEncryption, &out.NodeToNodeEncryption + *out = new(NodeToNodeEncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.OffPeakWindowOptions != nil { + in, out := &in.OffPeakWindowOptions, &out.OffPeakWindowOptions + *out = new(OffPeakWindowOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SnapshotOptions != nil { + in, out := &in.SnapshotOptions, &out.SnapshotOptions + *out = new(SnapshotOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.SoftwareUpdateOptions != nil { + in, out := &in.SoftwareUpdateOptions, &out.SoftwareUpdateOptions + *out = new(SoftwareUpdateOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCOptions != nil { + in, out := &in.VPCOptions, &out.VPCOptions + *out = new(VPCOptionsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainParameters. +func (in *DomainParameters) DeepCopy() *DomainParameters { + if in == nil { + return nil + } + out := new(DomainParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSAMLOptions) DeepCopyInto(out *DomainSAMLOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSAMLOptions. +func (in *DomainSAMLOptions) DeepCopy() *DomainSAMLOptions { + if in == nil { + return nil + } + out := new(DomainSAMLOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DomainSAMLOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSAMLOptionsInitParameters) DeepCopyInto(out *DomainSAMLOptionsInitParameters) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainNameRef != nil { + in, out := &in.DomainNameRef, &out.DomainNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DomainNameSelector != nil { + in, out := &in.DomainNameSelector, &out.DomainNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SAMLOptions != nil { + in, out := &in.SAMLOptions, &out.SAMLOptions + *out = new(SAMLOptionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSAMLOptionsInitParameters. +func (in *DomainSAMLOptionsInitParameters) DeepCopy() *DomainSAMLOptionsInitParameters { + if in == nil { + return nil + } + out := new(DomainSAMLOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSAMLOptionsList) DeepCopyInto(out *DomainSAMLOptionsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DomainSAMLOptions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSAMLOptionsList. +func (in *DomainSAMLOptionsList) DeepCopy() *DomainSAMLOptionsList { + if in == nil { + return nil + } + out := new(DomainSAMLOptionsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DomainSAMLOptionsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSAMLOptionsObservation) DeepCopyInto(out *DomainSAMLOptionsObservation) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.SAMLOptions != nil { + in, out := &in.SAMLOptions, &out.SAMLOptions + *out = new(SAMLOptionsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSAMLOptionsObservation. +func (in *DomainSAMLOptionsObservation) DeepCopy() *DomainSAMLOptionsObservation { + if in == nil { + return nil + } + out := new(DomainSAMLOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSAMLOptionsParameters) DeepCopyInto(out *DomainSAMLOptionsParameters) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainNameRef != nil { + in, out := &in.DomainNameRef, &out.DomainNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DomainNameSelector != nil { + in, out := &in.DomainNameSelector, &out.DomainNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SAMLOptions != nil { + in, out := &in.SAMLOptions, &out.SAMLOptions + *out = new(SAMLOptionsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSAMLOptionsParameters. +func (in *DomainSAMLOptionsParameters) DeepCopy() *DomainSAMLOptionsParameters { + if in == nil { + return nil + } + out := new(DomainSAMLOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSAMLOptionsSpec) DeepCopyInto(out *DomainSAMLOptionsSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSAMLOptionsSpec. +func (in *DomainSAMLOptionsSpec) DeepCopy() *DomainSAMLOptionsSpec { + if in == nil { + return nil + } + out := new(DomainSAMLOptionsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSAMLOptionsStatus) DeepCopyInto(out *DomainSAMLOptionsStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSAMLOptionsStatus. +func (in *DomainSAMLOptionsStatus) DeepCopy() *DomainSAMLOptionsStatus { + if in == nil { + return nil + } + out := new(DomainSAMLOptionsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSpec) DeepCopyInto(out *DomainSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSpec. +func (in *DomainSpec) DeepCopy() *DomainSpec { + if in == nil { + return nil + } + out := new(DomainSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainStatus) DeepCopyInto(out *DomainStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainStatus. +func (in *DomainStatus) DeepCopy() *DomainStatus { + if in == nil { + return nil + } + out := new(DomainStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DurationInitParameters) DeepCopyInto(out *DurationInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DurationInitParameters. +func (in *DurationInitParameters) DeepCopy() *DurationInitParameters { + if in == nil { + return nil + } + out := new(DurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DurationObservation) DeepCopyInto(out *DurationObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DurationObservation. +func (in *DurationObservation) DeepCopy() *DurationObservation { + if in == nil { + return nil + } + out := new(DurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DurationParameters) DeepCopyInto(out *DurationParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DurationParameters. +func (in *DurationParameters) DeepCopy() *DurationParameters { + if in == nil { + return nil + } + out := new(DurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSOptionsInitParameters) DeepCopyInto(out *EBSOptionsInitParameters) { + *out = *in + if in.EBSEnabled != nil { + in, out := &in.EBSEnabled, &out.EBSEnabled + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSOptionsInitParameters. +func (in *EBSOptionsInitParameters) DeepCopy() *EBSOptionsInitParameters { + if in == nil { + return nil + } + out := new(EBSOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSOptionsObservation) DeepCopyInto(out *EBSOptionsObservation) { + *out = *in + if in.EBSEnabled != nil { + in, out := &in.EBSEnabled, &out.EBSEnabled + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSOptionsObservation. +func (in *EBSOptionsObservation) DeepCopy() *EBSOptionsObservation { + if in == nil { + return nil + } + out := new(EBSOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSOptionsParameters) DeepCopyInto(out *EBSOptionsParameters) { + *out = *in + if in.EBSEnabled != nil { + in, out := &in.EBSEnabled, &out.EBSEnabled + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSOptionsParameters. +func (in *EBSOptionsParameters) DeepCopy() *EBSOptionsParameters { + if in == nil { + return nil + } + out := new(EBSOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptAtRestInitParameters) DeepCopyInto(out *EncryptAtRestInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptAtRestInitParameters. +func (in *EncryptAtRestInitParameters) DeepCopy() *EncryptAtRestInitParameters { + if in == nil { + return nil + } + out := new(EncryptAtRestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptAtRestObservation) DeepCopyInto(out *EncryptAtRestObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptAtRestObservation. +func (in *EncryptAtRestObservation) DeepCopy() *EncryptAtRestObservation { + if in == nil { + return nil + } + out := new(EncryptAtRestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptAtRestParameters) DeepCopyInto(out *EncryptAtRestParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptAtRestParameters. +func (in *EncryptAtRestParameters) DeepCopy() *EncryptAtRestParameters { + if in == nil { + return nil + } + out := new(EncryptAtRestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdpInitParameters) DeepCopyInto(out *IdpInitParameters) { + *out = *in + if in.EntityID != nil { + in, out := &in.EntityID, &out.EntityID + *out = new(string) + **out = **in + } + if in.MetadataContent != nil { + in, out := &in.MetadataContent, &out.MetadataContent + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdpInitParameters. +func (in *IdpInitParameters) DeepCopy() *IdpInitParameters { + if in == nil { + return nil + } + out := new(IdpInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdpObservation) DeepCopyInto(out *IdpObservation) { + *out = *in + if in.EntityID != nil { + in, out := &in.EntityID, &out.EntityID + *out = new(string) + **out = **in + } + if in.MetadataContent != nil { + in, out := &in.MetadataContent, &out.MetadataContent + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdpObservation. +func (in *IdpObservation) DeepCopy() *IdpObservation { + if in == nil { + return nil + } + out := new(IdpObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdpParameters) DeepCopyInto(out *IdpParameters) { + *out = *in + if in.EntityID != nil { + in, out := &in.EntityID, &out.EntityID + *out = new(string) + **out = **in + } + if in.MetadataContent != nil { + in, out := &in.MetadataContent, &out.MetadataContent + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdpParameters. +func (in *IdpParameters) DeepCopy() *IdpParameters { + if in == nil { + return nil + } + out := new(IdpParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogPublishingOptionsInitParameters) DeepCopyInto(out *LogPublishingOptionsInitParameters) { + *out = *in + if in.CloudwatchLogGroupArn != nil { + in, out := &in.CloudwatchLogGroupArn, &out.CloudwatchLogGroupArn + *out = new(string) + **out = **in + } + if in.CloudwatchLogGroupArnRef != nil { + in, out := &in.CloudwatchLogGroupArnRef, &out.CloudwatchLogGroupArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CloudwatchLogGroupArnSelector != nil { + in, out := &in.CloudwatchLogGroupArnSelector, &out.CloudwatchLogGroupArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogType != nil { + in, out := &in.LogType, &out.LogType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogPublishingOptionsInitParameters. +func (in *LogPublishingOptionsInitParameters) DeepCopy() *LogPublishingOptionsInitParameters { + if in == nil { + return nil + } + out := new(LogPublishingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogPublishingOptionsObservation) DeepCopyInto(out *LogPublishingOptionsObservation) { + *out = *in + if in.CloudwatchLogGroupArn != nil { + in, out := &in.CloudwatchLogGroupArn, &out.CloudwatchLogGroupArn + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogType != nil { + in, out := &in.LogType, &out.LogType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogPublishingOptionsObservation. +func (in *LogPublishingOptionsObservation) DeepCopy() *LogPublishingOptionsObservation { + if in == nil { + return nil + } + out := new(LogPublishingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogPublishingOptionsParameters) DeepCopyInto(out *LogPublishingOptionsParameters) { + *out = *in + if in.CloudwatchLogGroupArn != nil { + in, out := &in.CloudwatchLogGroupArn, &out.CloudwatchLogGroupArn + *out = new(string) + **out = **in + } + if in.CloudwatchLogGroupArnRef != nil { + in, out := &in.CloudwatchLogGroupArnRef, &out.CloudwatchLogGroupArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CloudwatchLogGroupArnSelector != nil { + in, out := &in.CloudwatchLogGroupArnSelector, &out.CloudwatchLogGroupArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogType != nil { + in, out := &in.LogType, &out.LogType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogPublishingOptionsParameters. +func (in *LogPublishingOptionsParameters) DeepCopy() *LogPublishingOptionsParameters { + if in == nil { + return nil + } + out := new(LogPublishingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceScheduleInitParameters) DeepCopyInto(out *MaintenanceScheduleInitParameters) { + *out = *in + if in.CronExpressionForRecurrence != nil { + in, out := &in.CronExpressionForRecurrence, &out.CronExpressionForRecurrence + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(DurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StartAt != nil { + in, out := &in.StartAt, &out.StartAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceScheduleInitParameters. +func (in *MaintenanceScheduleInitParameters) DeepCopy() *MaintenanceScheduleInitParameters { + if in == nil { + return nil + } + out := new(MaintenanceScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceScheduleObservation) DeepCopyInto(out *MaintenanceScheduleObservation) { + *out = *in + if in.CronExpressionForRecurrence != nil { + in, out := &in.CronExpressionForRecurrence, &out.CronExpressionForRecurrence + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(DurationObservation) + (*in).DeepCopyInto(*out) + } + if in.StartAt != nil { + in, out := &in.StartAt, &out.StartAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceScheduleObservation. +func (in *MaintenanceScheduleObservation) DeepCopy() *MaintenanceScheduleObservation { + if in == nil { + return nil + } + out := new(MaintenanceScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceScheduleParameters) DeepCopyInto(out *MaintenanceScheduleParameters) { + *out = *in + if in.CronExpressionForRecurrence != nil { + in, out := &in.CronExpressionForRecurrence, &out.CronExpressionForRecurrence + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(DurationParameters) + (*in).DeepCopyInto(*out) + } + if in.StartAt != nil { + in, out := &in.StartAt, &out.StartAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceScheduleParameters. +func (in *MaintenanceScheduleParameters) DeepCopy() *MaintenanceScheduleParameters { + if in == nil { + return nil + } + out := new(MaintenanceScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterUserOptionsInitParameters) DeepCopyInto(out *MasterUserOptionsInitParameters) { + *out = *in + if in.MasterUserArn != nil { + in, out := &in.MasterUserArn, &out.MasterUserArn + *out = new(string) + **out = **in + } + if in.MasterUserName != nil { + in, out := &in.MasterUserName, &out.MasterUserName + *out = new(string) + **out = **in + } + if in.MasterUserPasswordSecretRef != nil { + in, out := &in.MasterUserPasswordSecretRef, &out.MasterUserPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterUserOptionsInitParameters. +func (in *MasterUserOptionsInitParameters) DeepCopy() *MasterUserOptionsInitParameters { + if in == nil { + return nil + } + out := new(MasterUserOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterUserOptionsObservation) DeepCopyInto(out *MasterUserOptionsObservation) { + *out = *in + if in.MasterUserArn != nil { + in, out := &in.MasterUserArn, &out.MasterUserArn + *out = new(string) + **out = **in + } + if in.MasterUserName != nil { + in, out := &in.MasterUserName, &out.MasterUserName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterUserOptionsObservation. +func (in *MasterUserOptionsObservation) DeepCopy() *MasterUserOptionsObservation { + if in == nil { + return nil + } + out := new(MasterUserOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterUserOptionsParameters) DeepCopyInto(out *MasterUserOptionsParameters) { + *out = *in + if in.MasterUserArn != nil { + in, out := &in.MasterUserArn, &out.MasterUserArn + *out = new(string) + **out = **in + } + if in.MasterUserName != nil { + in, out := &in.MasterUserName, &out.MasterUserName + *out = new(string) + **out = **in + } + if in.MasterUserPasswordSecretRef != nil { + in, out := &in.MasterUserPasswordSecretRef, &out.MasterUserPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterUserOptionsParameters. +func (in *MasterUserOptionsParameters) DeepCopy() *MasterUserOptionsParameters { + if in == nil { + return nil + } + out := new(MasterUserOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeToNodeEncryptionInitParameters) DeepCopyInto(out *NodeToNodeEncryptionInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeToNodeEncryptionInitParameters. +func (in *NodeToNodeEncryptionInitParameters) DeepCopy() *NodeToNodeEncryptionInitParameters { + if in == nil { + return nil + } + out := new(NodeToNodeEncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeToNodeEncryptionObservation) DeepCopyInto(out *NodeToNodeEncryptionObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeToNodeEncryptionObservation. +func (in *NodeToNodeEncryptionObservation) DeepCopy() *NodeToNodeEncryptionObservation { + if in == nil { + return nil + } + out := new(NodeToNodeEncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeToNodeEncryptionParameters) DeepCopyInto(out *NodeToNodeEncryptionParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeToNodeEncryptionParameters. +func (in *NodeToNodeEncryptionParameters) DeepCopy() *NodeToNodeEncryptionParameters { + if in == nil { + return nil + } + out := new(NodeToNodeEncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OffPeakWindowInitParameters) DeepCopyInto(out *OffPeakWindowInitParameters) { + *out = *in + if in.WindowStartTime != nil { + in, out := &in.WindowStartTime, &out.WindowStartTime + *out = new(WindowStartTimeInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OffPeakWindowInitParameters. +func (in *OffPeakWindowInitParameters) DeepCopy() *OffPeakWindowInitParameters { + if in == nil { + return nil + } + out := new(OffPeakWindowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OffPeakWindowObservation) DeepCopyInto(out *OffPeakWindowObservation) { + *out = *in + if in.WindowStartTime != nil { + in, out := &in.WindowStartTime, &out.WindowStartTime + *out = new(WindowStartTimeObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OffPeakWindowObservation. +func (in *OffPeakWindowObservation) DeepCopy() *OffPeakWindowObservation { + if in == nil { + return nil + } + out := new(OffPeakWindowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OffPeakWindowOptionsInitParameters) DeepCopyInto(out *OffPeakWindowOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.OffPeakWindow != nil { + in, out := &in.OffPeakWindow, &out.OffPeakWindow + *out = new(OffPeakWindowInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OffPeakWindowOptionsInitParameters. +func (in *OffPeakWindowOptionsInitParameters) DeepCopy() *OffPeakWindowOptionsInitParameters { + if in == nil { + return nil + } + out := new(OffPeakWindowOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OffPeakWindowOptionsObservation) DeepCopyInto(out *OffPeakWindowOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.OffPeakWindow != nil { + in, out := &in.OffPeakWindow, &out.OffPeakWindow + *out = new(OffPeakWindowObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OffPeakWindowOptionsObservation. +func (in *OffPeakWindowOptionsObservation) DeepCopy() *OffPeakWindowOptionsObservation { + if in == nil { + return nil + } + out := new(OffPeakWindowOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OffPeakWindowOptionsParameters) DeepCopyInto(out *OffPeakWindowOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.OffPeakWindow != nil { + in, out := &in.OffPeakWindow, &out.OffPeakWindow + *out = new(OffPeakWindowParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OffPeakWindowOptionsParameters. +func (in *OffPeakWindowOptionsParameters) DeepCopy() *OffPeakWindowOptionsParameters { + if in == nil { + return nil + } + out := new(OffPeakWindowOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OffPeakWindowParameters) DeepCopyInto(out *OffPeakWindowParameters) { + *out = *in + if in.WindowStartTime != nil { + in, out := &in.WindowStartTime, &out.WindowStartTime + *out = new(WindowStartTimeParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OffPeakWindowParameters. +func (in *OffPeakWindowParameters) DeepCopy() *OffPeakWindowParameters { + if in == nil { + return nil + } + out := new(OffPeakWindowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLOptionsInitParameters) DeepCopyInto(out *SAMLOptionsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Idp != nil { + in, out := &in.Idp, &out.Idp + *out = new(IdpInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MasterBackendRole != nil { + in, out := &in.MasterBackendRole, &out.MasterBackendRole + *out = new(string) + **out = **in + } + if in.MasterUserNameSecretRef != nil { + in, out := &in.MasterUserNameSecretRef, &out.MasterUserNameSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.RolesKey != nil { + in, out := &in.RolesKey, &out.RolesKey + *out = new(string) + **out = **in + } + if in.SessionTimeoutMinutes != nil { + in, out := &in.SessionTimeoutMinutes, &out.SessionTimeoutMinutes + *out = new(float64) + **out = **in + } + if in.SubjectKey != nil { + in, out := &in.SubjectKey, &out.SubjectKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLOptionsInitParameters. +func (in *SAMLOptionsInitParameters) DeepCopy() *SAMLOptionsInitParameters { + if in == nil { + return nil + } + out := new(SAMLOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLOptionsObservation) DeepCopyInto(out *SAMLOptionsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Idp != nil { + in, out := &in.Idp, &out.Idp + *out = new(IdpObservation) + (*in).DeepCopyInto(*out) + } + if in.MasterBackendRole != nil { + in, out := &in.MasterBackendRole, &out.MasterBackendRole + *out = new(string) + **out = **in + } + if in.RolesKey != nil { + in, out := &in.RolesKey, &out.RolesKey + *out = new(string) + **out = **in + } + if in.SessionTimeoutMinutes != nil { + in, out := &in.SessionTimeoutMinutes, &out.SessionTimeoutMinutes + *out = new(float64) + **out = **in + } + if in.SubjectKey != nil { + in, out := &in.SubjectKey, &out.SubjectKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLOptionsObservation. +func (in *SAMLOptionsObservation) DeepCopy() *SAMLOptionsObservation { + if in == nil { + return nil + } + out := new(SAMLOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLOptionsParameters) DeepCopyInto(out *SAMLOptionsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Idp != nil { + in, out := &in.Idp, &out.Idp + *out = new(IdpParameters) + (*in).DeepCopyInto(*out) + } + if in.MasterBackendRole != nil { + in, out := &in.MasterBackendRole, &out.MasterBackendRole + *out = new(string) + **out = **in + } + if in.MasterUserNameSecretRef != nil { + in, out := &in.MasterUserNameSecretRef, &out.MasterUserNameSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.RolesKey != nil { + in, out := &in.RolesKey, &out.RolesKey + *out = new(string) + **out = **in + } + if in.SessionTimeoutMinutes != nil { + in, out := &in.SessionTimeoutMinutes, &out.SessionTimeoutMinutes + *out = new(float64) + **out = **in + } + if in.SubjectKey != nil { + in, out := &in.SubjectKey, &out.SubjectKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLOptionsParameters. +func (in *SAMLOptionsParameters) DeepCopy() *SAMLOptionsParameters { + if in == nil { + return nil + } + out := new(SAMLOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotOptionsInitParameters) DeepCopyInto(out *SnapshotOptionsInitParameters) { + *out = *in + if in.AutomatedSnapshotStartHour != nil { + in, out := &in.AutomatedSnapshotStartHour, &out.AutomatedSnapshotStartHour + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotOptionsInitParameters. +func (in *SnapshotOptionsInitParameters) DeepCopy() *SnapshotOptionsInitParameters { + if in == nil { + return nil + } + out := new(SnapshotOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotOptionsObservation) DeepCopyInto(out *SnapshotOptionsObservation) { + *out = *in + if in.AutomatedSnapshotStartHour != nil { + in, out := &in.AutomatedSnapshotStartHour, &out.AutomatedSnapshotStartHour + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotOptionsObservation. +func (in *SnapshotOptionsObservation) DeepCopy() *SnapshotOptionsObservation { + if in == nil { + return nil + } + out := new(SnapshotOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotOptionsParameters) DeepCopyInto(out *SnapshotOptionsParameters) { + *out = *in + if in.AutomatedSnapshotStartHour != nil { + in, out := &in.AutomatedSnapshotStartHour, &out.AutomatedSnapshotStartHour + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotOptionsParameters. +func (in *SnapshotOptionsParameters) DeepCopy() *SnapshotOptionsParameters { + if in == nil { + return nil + } + out := new(SnapshotOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SoftwareUpdateOptionsInitParameters) DeepCopyInto(out *SoftwareUpdateOptionsInitParameters) { + *out = *in + if in.AutoSoftwareUpdateEnabled != nil { + in, out := &in.AutoSoftwareUpdateEnabled, &out.AutoSoftwareUpdateEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareUpdateOptionsInitParameters. +func (in *SoftwareUpdateOptionsInitParameters) DeepCopy() *SoftwareUpdateOptionsInitParameters { + if in == nil { + return nil + } + out := new(SoftwareUpdateOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SoftwareUpdateOptionsObservation) DeepCopyInto(out *SoftwareUpdateOptionsObservation) { + *out = *in + if in.AutoSoftwareUpdateEnabled != nil { + in, out := &in.AutoSoftwareUpdateEnabled, &out.AutoSoftwareUpdateEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareUpdateOptionsObservation. +func (in *SoftwareUpdateOptionsObservation) DeepCopy() *SoftwareUpdateOptionsObservation { + if in == nil { + return nil + } + out := new(SoftwareUpdateOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SoftwareUpdateOptionsParameters) DeepCopyInto(out *SoftwareUpdateOptionsParameters) { + *out = *in + if in.AutoSoftwareUpdateEnabled != nil { + in, out := &in.AutoSoftwareUpdateEnabled, &out.AutoSoftwareUpdateEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareUpdateOptionsParameters. +func (in *SoftwareUpdateOptionsParameters) DeepCopy() *SoftwareUpdateOptionsParameters { + if in == nil { + return nil + } + out := new(SoftwareUpdateOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCOptionsInitParameters) DeepCopyInto(out *VPCOptionsInitParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCOptionsInitParameters. +func (in *VPCOptionsInitParameters) DeepCopy() *VPCOptionsInitParameters { + if in == nil { + return nil + } + out := new(VPCOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCOptionsObservation) DeepCopyInto(out *VPCOptionsObservation) { + *out = *in + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCOptionsObservation. +func (in *VPCOptionsObservation) DeepCopy() *VPCOptionsObservation { + if in == nil { + return nil + } + out := new(VPCOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCOptionsParameters) DeepCopyInto(out *VPCOptionsParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCOptionsParameters. +func (in *VPCOptionsParameters) DeepCopy() *VPCOptionsParameters { + if in == nil { + return nil + } + out := new(VPCOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowStartTimeInitParameters) DeepCopyInto(out *WindowStartTimeInitParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowStartTimeInitParameters. +func (in *WindowStartTimeInitParameters) DeepCopy() *WindowStartTimeInitParameters { + if in == nil { + return nil + } + out := new(WindowStartTimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowStartTimeObservation) DeepCopyInto(out *WindowStartTimeObservation) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowStartTimeObservation. +func (in *WindowStartTimeObservation) DeepCopy() *WindowStartTimeObservation { + if in == nil { + return nil + } + out := new(WindowStartTimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowStartTimeParameters) DeepCopyInto(out *WindowStartTimeParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = new(float64) + **out = **in + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowStartTimeParameters. +func (in *WindowStartTimeParameters) DeepCopy() *WindowStartTimeParameters { + if in == nil { + return nil + } + out := new(WindowStartTimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneAwarenessConfigInitParameters) DeepCopyInto(out *ZoneAwarenessConfigInitParameters) { + *out = *in + if in.AvailabilityZoneCount != nil { + in, out := &in.AvailabilityZoneCount, &out.AvailabilityZoneCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneAwarenessConfigInitParameters. +func (in *ZoneAwarenessConfigInitParameters) DeepCopy() *ZoneAwarenessConfigInitParameters { + if in == nil { + return nil + } + out := new(ZoneAwarenessConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneAwarenessConfigObservation) DeepCopyInto(out *ZoneAwarenessConfigObservation) { + *out = *in + if in.AvailabilityZoneCount != nil { + in, out := &in.AvailabilityZoneCount, &out.AvailabilityZoneCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneAwarenessConfigObservation. +func (in *ZoneAwarenessConfigObservation) DeepCopy() *ZoneAwarenessConfigObservation { + if in == nil { + return nil + } + out := new(ZoneAwarenessConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneAwarenessConfigParameters) DeepCopyInto(out *ZoneAwarenessConfigParameters) { + *out = *in + if in.AvailabilityZoneCount != nil { + in, out := &in.AvailabilityZoneCount, &out.AvailabilityZoneCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneAwarenessConfigParameters. +func (in *ZoneAwarenessConfigParameters) DeepCopy() *ZoneAwarenessConfigParameters { + if in == nil { + return nil + } + out := new(ZoneAwarenessConfigParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/opensearch/v1beta2/zz_generated.managed.go b/apis/opensearch/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..3ba7ff25a9 --- /dev/null +++ b/apis/opensearch/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Domain. +func (mg *Domain) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Domain. +func (mg *Domain) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Domain. +func (mg *Domain) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Domain. +func (mg *Domain) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Domain. +func (mg *Domain) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Domain. +func (mg *Domain) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Domain. +func (mg *Domain) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Domain. +func (mg *Domain) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Domain. +func (mg *Domain) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Domain. +func (mg *Domain) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Domain. +func (mg *Domain) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Domain. +func (mg *Domain) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/opensearch/v1beta2/zz_generated.managedlist.go b/apis/opensearch/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..6e3b0b4953 --- /dev/null +++ b/apis/opensearch/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this DomainList. +func (l *DomainList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DomainSAMLOptionsList. +func (l *DomainSAMLOptionsList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/opensearch/v1beta2/zz_generated.resolvers.go b/apis/opensearch/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..6ada605bd1 --- /dev/null +++ b/apis/opensearch/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,124 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Domain. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Domain) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.LogPublishingOptions); i3++ { + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Group", "GroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LogPublishingOptions[i3].CloudwatchLogGroupArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.LogPublishingOptions[i3].CloudwatchLogGroupArnRef, + Selector: mg.Spec.ForProvider.LogPublishingOptions[i3].CloudwatchLogGroupArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LogPublishingOptions[i3].CloudwatchLogGroupArn") + } + mg.Spec.ForProvider.LogPublishingOptions[i3].CloudwatchLogGroupArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LogPublishingOptions[i3].CloudwatchLogGroupArnRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.LogPublishingOptions); i3++ { + { + m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Group", "GroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LogPublishingOptions[i3].CloudwatchLogGroupArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.LogPublishingOptions[i3].CloudwatchLogGroupArnRef, + Selector: mg.Spec.InitProvider.LogPublishingOptions[i3].CloudwatchLogGroupArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LogPublishingOptions[i3].CloudwatchLogGroupArn") + } + mg.Spec.InitProvider.LogPublishingOptions[i3].CloudwatchLogGroupArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LogPublishingOptions[i3].CloudwatchLogGroupArnRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this DomainSAMLOptions. +func (mg *DomainSAMLOptions) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("opensearch.aws.upbound.io", "v1beta2", "Domain", "DomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DomainName), + Extract: resource.ExtractParamPath("domain_name", false), + Reference: mg.Spec.ForProvider.DomainNameRef, + Selector: mg.Spec.ForProvider.DomainNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DomainName") + } + mg.Spec.ForProvider.DomainName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DomainNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("opensearch.aws.upbound.io", "v1beta2", "Domain", "DomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DomainName), + Extract: resource.ExtractParamPath("domain_name", false), + Reference: mg.Spec.InitProvider.DomainNameRef, + Selector: mg.Spec.InitProvider.DomainNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DomainName") + } + mg.Spec.InitProvider.DomainName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DomainNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/opensearch/v1beta2/zz_groupversion_info.go b/apis/opensearch/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..7dfd9fa7af --- /dev/null +++ b/apis/opensearch/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=opensearch.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "opensearch.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/opensearchserverless/v1beta1/zz_generated.conversion_hubs.go b/apis/opensearchserverless/v1beta1/zz_generated.conversion_hubs.go index 89e575faa8..c32f5352fd 100755 --- a/apis/opensearchserverless/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/opensearchserverless/v1beta1/zz_generated.conversion_hubs.go @@ -15,9 +15,6 @@ func (tr *Collection) Hub() {} // Hub marks this type as a conversion hub. func (tr *LifecyclePolicy) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *SecurityConfig) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SecurityPolicy) Hub() {} diff --git a/apis/opensearchserverless/v1beta1/zz_generated.conversion_spokes.go b/apis/opensearchserverless/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..02d2057ba6 --- /dev/null +++ b/apis/opensearchserverless/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this SecurityConfig to the hub type. +func (tr *SecurityConfig) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SecurityConfig type. +func (tr *SecurityConfig) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/opensearchserverless/v1beta2/zz_generated.conversion_hubs.go b/apis/opensearchserverless/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..607288f4ed --- /dev/null +++ b/apis/opensearchserverless/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *SecurityConfig) Hub() {} diff --git a/apis/opensearchserverless/v1beta2/zz_generated.deepcopy.go b/apis/opensearchserverless/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..89a77e6677 --- /dev/null +++ b/apis/opensearchserverless/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,312 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLOptionsInitParameters) DeepCopyInto(out *SAMLOptionsInitParameters) { + *out = *in + if in.GroupAttribute != nil { + in, out := &in.GroupAttribute, &out.GroupAttribute + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(string) + **out = **in + } + if in.SessionTimeout != nil { + in, out := &in.SessionTimeout, &out.SessionTimeout + *out = new(float64) + **out = **in + } + if in.UserAttribute != nil { + in, out := &in.UserAttribute, &out.UserAttribute + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLOptionsInitParameters. +func (in *SAMLOptionsInitParameters) DeepCopy() *SAMLOptionsInitParameters { + if in == nil { + return nil + } + out := new(SAMLOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLOptionsObservation) DeepCopyInto(out *SAMLOptionsObservation) { + *out = *in + if in.GroupAttribute != nil { + in, out := &in.GroupAttribute, &out.GroupAttribute + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(string) + **out = **in + } + if in.SessionTimeout != nil { + in, out := &in.SessionTimeout, &out.SessionTimeout + *out = new(float64) + **out = **in + } + if in.UserAttribute != nil { + in, out := &in.UserAttribute, &out.UserAttribute + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLOptionsObservation. +func (in *SAMLOptionsObservation) DeepCopy() *SAMLOptionsObservation { + if in == nil { + return nil + } + out := new(SAMLOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SAMLOptionsParameters) DeepCopyInto(out *SAMLOptionsParameters) { + *out = *in + if in.GroupAttribute != nil { + in, out := &in.GroupAttribute, &out.GroupAttribute + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(string) + **out = **in + } + if in.SessionTimeout != nil { + in, out := &in.SessionTimeout, &out.SessionTimeout + *out = new(float64) + **out = **in + } + if in.UserAttribute != nil { + in, out := &in.UserAttribute, &out.UserAttribute + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLOptionsParameters. +func (in *SAMLOptionsParameters) DeepCopy() *SAMLOptionsParameters { + if in == nil { + return nil + } + out := new(SAMLOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfig) DeepCopyInto(out *SecurityConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfig. +func (in *SecurityConfig) DeepCopy() *SecurityConfig { + if in == nil { + return nil + } + out := new(SecurityConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecurityConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigInitParameters) DeepCopyInto(out *SecurityConfigInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.SAMLOptions != nil { + in, out := &in.SAMLOptions, &out.SAMLOptions + *out = new(SAMLOptionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigInitParameters. +func (in *SecurityConfigInitParameters) DeepCopy() *SecurityConfigInitParameters { + if in == nil { + return nil + } + out := new(SecurityConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigList) DeepCopyInto(out *SecurityConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SecurityConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigList. +func (in *SecurityConfigList) DeepCopy() *SecurityConfigList { + if in == nil { + return nil + } + out := new(SecurityConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecurityConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigObservation) DeepCopyInto(out *SecurityConfigObservation) { + *out = *in + if in.ConfigVersion != nil { + in, out := &in.ConfigVersion, &out.ConfigVersion + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.SAMLOptions != nil { + in, out := &in.SAMLOptions, &out.SAMLOptions + *out = new(SAMLOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigObservation. +func (in *SecurityConfigObservation) DeepCopy() *SecurityConfigObservation { + if in == nil { + return nil + } + out := new(SecurityConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigParameters) DeepCopyInto(out *SecurityConfigParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SAMLOptions != nil { + in, out := &in.SAMLOptions, &out.SAMLOptions + *out = new(SAMLOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigParameters. +func (in *SecurityConfigParameters) DeepCopy() *SecurityConfigParameters { + if in == nil { + return nil + } + out := new(SecurityConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigSpec) DeepCopyInto(out *SecurityConfigSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigSpec. +func (in *SecurityConfigSpec) DeepCopy() *SecurityConfigSpec { + if in == nil { + return nil + } + out := new(SecurityConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigStatus) DeepCopyInto(out *SecurityConfigStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigStatus. +func (in *SecurityConfigStatus) DeepCopy() *SecurityConfigStatus { + if in == nil { + return nil + } + out := new(SecurityConfigStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/opensearchserverless/v1beta2/zz_generated.managed.go b/apis/opensearchserverless/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..8fb23e1f30 --- /dev/null +++ b/apis/opensearchserverless/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this SecurityConfig. +func (mg *SecurityConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SecurityConfig. +func (mg *SecurityConfig) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SecurityConfig. +func (mg *SecurityConfig) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SecurityConfig. +func (mg *SecurityConfig) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SecurityConfig. +func (mg *SecurityConfig) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SecurityConfig. +func (mg *SecurityConfig) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SecurityConfig. +func (mg *SecurityConfig) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SecurityConfig. +func (mg *SecurityConfig) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SecurityConfig. +func (mg *SecurityConfig) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SecurityConfig. +func (mg *SecurityConfig) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SecurityConfig. +func (mg *SecurityConfig) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SecurityConfig. +func (mg *SecurityConfig) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/opensearchserverless/v1beta2/zz_generated.managedlist.go b/apis/opensearchserverless/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..086a8fdeab --- /dev/null +++ b/apis/opensearchserverless/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this SecurityConfigList. +func (l *SecurityConfigList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/opensearchserverless/v1beta2/zz_groupversion_info.go b/apis/opensearchserverless/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..240888af55 --- /dev/null +++ b/apis/opensearchserverless/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=opensearchserverless.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "opensearchserverless.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/opensearchserverless/v1beta2/zz_securityconfig_terraformed.go b/apis/opensearchserverless/v1beta2/zz_securityconfig_terraformed.go new file mode 100755 index 0000000000..2f8bd64d95 --- /dev/null +++ b/apis/opensearchserverless/v1beta2/zz_securityconfig_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SecurityConfig +func (mg *SecurityConfig) GetTerraformResourceType() string { + return "aws_opensearchserverless_security_config" +} + +// GetConnectionDetailsMapping for this SecurityConfig +func (tr *SecurityConfig) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SecurityConfig +func (tr *SecurityConfig) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SecurityConfig +func (tr *SecurityConfig) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SecurityConfig +func (tr *SecurityConfig) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SecurityConfig +func (tr *SecurityConfig) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SecurityConfig +func (tr *SecurityConfig) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SecurityConfig +func (tr *SecurityConfig) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SecurityConfig +func (tr *SecurityConfig) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SecurityConfig using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SecurityConfig) LateInitialize(attrs []byte) (bool, error) { + params := &SecurityConfigParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SecurityConfig) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opensearchserverless/v1beta2/zz_securityconfig_types.go b/apis/opensearchserverless/v1beta2/zz_securityconfig_types.go new file mode 100755 index 0000000000..30b4d306b9 --- /dev/null +++ b/apis/opensearchserverless/v1beta2/zz_securityconfig_types.go @@ -0,0 +1,170 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SAMLOptionsInitParameters struct { + + // Group attribute for this SAML integration. + GroupAttribute *string `json:"groupAttribute,omitempty" tf:"group_attribute,omitempty"` + + // The XML IdP metadata file generated from your identity provider. + Metadata *string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Session timeout, in minutes. Minimum is 5 minutes and maximum is 720 minutes (12 hours). Default is 60 minutes. + SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` + + // User attribute for this SAML integration. + UserAttribute *string `json:"userAttribute,omitempty" tf:"user_attribute,omitempty"` +} + +type SAMLOptionsObservation struct { + + // Group attribute for this SAML integration. + GroupAttribute *string `json:"groupAttribute,omitempty" tf:"group_attribute,omitempty"` + + // The XML IdP metadata file generated from your identity provider. + Metadata *string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Session timeout, in minutes. Minimum is 5 minutes and maximum is 720 minutes (12 hours). Default is 60 minutes. + SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` + + // User attribute for this SAML integration. + UserAttribute *string `json:"userAttribute,omitempty" tf:"user_attribute,omitempty"` +} + +type SAMLOptionsParameters struct { + + // Group attribute for this SAML integration. + // +kubebuilder:validation:Optional + GroupAttribute *string `json:"groupAttribute,omitempty" tf:"group_attribute,omitempty"` + + // The XML IdP metadata file generated from your identity provider. + // +kubebuilder:validation:Optional + Metadata *string `json:"metadata" tf:"metadata,omitempty"` + + // Session timeout, in minutes. Minimum is 5 minutes and maximum is 720 minutes (12 hours). Default is 60 minutes. + // +kubebuilder:validation:Optional + SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` + + // User attribute for this SAML integration. + // +kubebuilder:validation:Optional + UserAttribute *string `json:"userAttribute,omitempty" tf:"user_attribute,omitempty"` +} + +type SecurityConfigInitParameters struct { + + // Description of the security configuration. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Configuration block for SAML options. + SAMLOptions *SAMLOptionsInitParameters `json:"samlOptions,omitempty" tf:"saml_options,omitempty"` +} + +type SecurityConfigObservation struct { + + // Version of the configuration. + ConfigVersion *string `json:"configVersion,omitempty" tf:"config_version,omitempty"` + + // Description of the security configuration. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration block for SAML options. + SAMLOptions *SAMLOptionsObservation `json:"samlOptions,omitempty" tf:"saml_options,omitempty"` + + // Type of configuration. Must be saml. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SecurityConfigParameters struct { + + // Description of the security configuration. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration block for SAML options. + // +kubebuilder:validation:Optional + SAMLOptions *SAMLOptionsParameters `json:"samlOptions,omitempty" tf:"saml_options,omitempty"` + + // Type of configuration. Must be saml. + // +kubebuilder:validation:Required + Type *string `json:"type" tf:"type,omitempty"` +} + +// SecurityConfigSpec defines the desired state of SecurityConfig +type SecurityConfigSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SecurityConfigParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SecurityConfigInitParameters `json:"initProvider,omitempty"` +} + +// SecurityConfigStatus defines the observed state of SecurityConfig. +type SecurityConfigStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SecurityConfigObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SecurityConfig is the Schema for the SecurityConfigs API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type SecurityConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.samlOptions) || (has(self.initProvider) && has(self.initProvider.samlOptions))",message="spec.forProvider.samlOptions is a required parameter" + Spec SecurityConfigSpec `json:"spec"` + Status SecurityConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SecurityConfigList contains a list of SecurityConfigs +type SecurityConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SecurityConfig `json:"items"` +} + +// Repository type metadata. +var ( + SecurityConfig_Kind = "SecurityConfig" + SecurityConfig_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SecurityConfig_Kind}.String() + SecurityConfig_KindAPIVersion = SecurityConfig_Kind + "." + CRDGroupVersion.String() + SecurityConfig_GroupVersionKind = CRDGroupVersion.WithKind(SecurityConfig_Kind) +) + +func init() { + SchemeBuilder.Register(&SecurityConfig{}, &SecurityConfigList{}) +} diff --git a/apis/opsworks/v1beta1/zz_application_types.go b/apis/opsworks/v1beta1/zz_application_types.go index c119b4b986..d58eef9800 100755 --- a/apis/opsworks/v1beta1/zz_application_types.go +++ b/apis/opsworks/v1beta1/zz_application_types.go @@ -124,7 +124,7 @@ type ApplicationInitParameters struct { ShortName *string `json:"shortName,omitempty" tf:"short_name,omitempty"` // ID of the stack the application will belong to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta1.Stack + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` @@ -260,7 +260,7 @@ type ApplicationParameters struct { ShortName *string `json:"shortName,omitempty" tf:"short_name,omitempty"` // ID of the stack the application will belong to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta1.Stack + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` diff --git a/apis/opsworks/v1beta1/zz_generated.conversion_hubs.go b/apis/opsworks/v1beta1/zz_generated.conversion_hubs.go index e787a1fd80..e3b4578442 100755 --- a/apis/opsworks/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/opsworks/v1beta1/zz_generated.conversion_hubs.go @@ -9,50 +9,14 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *Application) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *CustomLayer) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *EcsClusterLayer) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *GangliaLayer) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *HAProxyLayer) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Instance) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *JavaAppLayer) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *MemcachedLayer) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *MySQLLayer) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *NodeJSAppLayer) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Permission) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *PHPAppLayer) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *RailsAppLayer) Hub() {} - // Hub marks this type as a conversion hub. func (tr *RDSDBInstance) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Stack) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *StaticWebLayer) Hub() {} - // Hub marks this type as a conversion hub. func (tr *UserProfile) Hub() {} diff --git a/apis/opsworks/v1beta1/zz_generated.conversion_spokes.go b/apis/opsworks/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..2cb6cb7e16 --- /dev/null +++ b/apis/opsworks/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,254 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this CustomLayer to the hub type. +func (tr *CustomLayer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CustomLayer type. +func (tr *CustomLayer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this EcsClusterLayer to the hub type. +func (tr *EcsClusterLayer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the EcsClusterLayer type. +func (tr *EcsClusterLayer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this GangliaLayer to the hub type. +func (tr *GangliaLayer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the GangliaLayer type. +func (tr *GangliaLayer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this HAProxyLayer to the hub type. +func (tr *HAProxyLayer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the HAProxyLayer type. +func (tr *HAProxyLayer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this JavaAppLayer to the hub type. +func (tr *JavaAppLayer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the JavaAppLayer type. +func (tr *JavaAppLayer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MemcachedLayer to the hub type. +func (tr *MemcachedLayer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MemcachedLayer type. +func (tr *MemcachedLayer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MySQLLayer to the hub type. +func (tr *MySQLLayer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MySQLLayer type. +func (tr *MySQLLayer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this NodeJSAppLayer to the hub type. +func (tr *NodeJSAppLayer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the NodeJSAppLayer type. +func (tr *NodeJSAppLayer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this PHPAppLayer to the hub type. +func (tr *PHPAppLayer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the PHPAppLayer type. +func (tr *PHPAppLayer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this RailsAppLayer to the hub type. +func (tr *RailsAppLayer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the RailsAppLayer type. +func (tr *RailsAppLayer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Stack to the hub type. +func (tr *Stack) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Stack type. +func (tr *Stack) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this StaticWebLayer to the hub type. +func (tr *StaticWebLayer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the StaticWebLayer type. +func (tr *StaticWebLayer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/opsworks/v1beta1/zz_generated.resolvers.go b/apis/opsworks/v1beta1/zz_generated.resolvers.go index 748bdca67f..9b755127fe 100644 --- a/apis/opsworks/v1beta1/zz_generated.resolvers.go +++ b/apis/opsworks/v1beta1/zz_generated.resolvers.go @@ -27,7 +27,7 @@ func (mg *Application) ResolveReferences( // ResolveReferences of this Applicati var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta1", "Stack", "StackList") + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -46,7 +46,7 @@ func (mg *Application) ResolveReferences( // ResolveReferences of this Applicati mg.Spec.ForProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StackIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta1", "Stack", "StackList") + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -472,7 +472,7 @@ func (mg *Instance) ResolveReferences(ctx context.Context, c client.Reader) erro var mrsp reference.MultiResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta1", "CustomLayer", "CustomLayerList") + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "CustomLayer", "CustomLayerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -510,7 +510,7 @@ func (mg *Instance) ResolveReferences(ctx context.Context, c client.Reader) erro mg.Spec.ForProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) mg.Spec.ForProvider.SecurityGroupIDRefs = mrsp.ResolvedReferences { - m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta1", "Stack", "StackList") + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -548,7 +548,7 @@ func (mg *Instance) ResolveReferences(ctx context.Context, c client.Reader) erro mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta1", "CustomLayer", "CustomLayerList") + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "CustomLayer", "CustomLayerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -586,7 +586,7 @@ func (mg *Instance) ResolveReferences(ctx context.Context, c client.Reader) erro mg.Spec.InitProvider.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) mg.Spec.InitProvider.SecurityGroupIDRefs = mrsp.ResolvedReferences { - m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta1", "Stack", "StackList") + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1081,7 +1081,7 @@ func (mg *Permission) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta1", "Stack", "StackList") + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1119,7 +1119,7 @@ func (mg *Permission) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.UserArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.UserArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta1", "Stack", "StackList") + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1169,7 +1169,7 @@ func (mg *RDSDBInstance) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta3", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1188,7 +1188,7 @@ func (mg *RDSDBInstance) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.RDSDBInstanceArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RDSDBInstanceArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta1", "Stack", "StackList") + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1207,7 +1207,7 @@ func (mg *RDSDBInstance) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StackIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta3", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1226,7 +1226,7 @@ func (mg *RDSDBInstance) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.InitProvider.RDSDBInstanceArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.RDSDBInstanceArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta1", "Stack", "StackList") + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/opsworks/v1beta1/zz_instance_types.go b/apis/opsworks/v1beta1/zz_instance_types.go index 6eb023a323..5fdd718669 100755 --- a/apis/opsworks/v1beta1/zz_instance_types.go +++ b/apis/opsworks/v1beta1/zz_instance_types.go @@ -168,7 +168,7 @@ type InstanceInitParameters struct { InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` // List of the layers the instance will belong to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta1.CustomLayer + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.CustomLayer LayerIds []*string `json:"layerIds,omitempty" tf:"layer_ids,omitempty"` // References to CustomLayer in opsworks to populate layerIds. @@ -206,7 +206,7 @@ type InstanceInitParameters struct { SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` // Identifier of the stack the instance will belong to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta1.Stack + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` @@ -459,7 +459,7 @@ type InstanceParameters struct { InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` // List of the layers the instance will belong to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta1.CustomLayer + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.CustomLayer // +kubebuilder:validation:Optional LayerIds []*string `json:"layerIds,omitempty" tf:"layer_ids,omitempty"` @@ -503,7 +503,7 @@ type InstanceParameters struct { SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` // Identifier of the stack the instance will belong to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta1.Stack + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` diff --git a/apis/opsworks/v1beta1/zz_permission_types.go b/apis/opsworks/v1beta1/zz_permission_types.go index 360140c362..3f42579018 100755 --- a/apis/opsworks/v1beta1/zz_permission_types.go +++ b/apis/opsworks/v1beta1/zz_permission_types.go @@ -25,7 +25,7 @@ type PermissionInitParameters struct { Level *string `json:"level,omitempty" tf:"level,omitempty"` // The stack to set the permissions for - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta1.Stack + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` @@ -87,7 +87,7 @@ type PermissionParameters struct { Level *string `json:"level,omitempty" tf:"level,omitempty"` // The stack to set the permissions for - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta1.Stack + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` diff --git a/apis/opsworks/v1beta1/zz_rdsdbinstance_types.go b/apis/opsworks/v1beta1/zz_rdsdbinstance_types.go index 01cbe2d4a3..61c9320a5e 100755 --- a/apis/opsworks/v1beta1/zz_rdsdbinstance_types.go +++ b/apis/opsworks/v1beta1/zz_rdsdbinstance_types.go @@ -22,7 +22,7 @@ type RDSDBInstanceInitParameters struct { DBUser *string `json:"dbUser,omitempty" tf:"db_user,omitempty"` // The db instance to register for this stack. Changing this will force a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta3.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) RDSDBInstanceArn *string `json:"rdsDbInstanceArn,omitempty" tf:"rds_db_instance_arn,omitempty"` @@ -35,7 +35,7 @@ type RDSDBInstanceInitParameters struct { RDSDBInstanceArnSelector *v1.Selector `json:"rdsDbInstanceArnSelector,omitempty" tf:"-"` // The stack to register a db instance for. Changing this will force a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta1.Stack + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` @@ -74,7 +74,7 @@ type RDSDBInstanceParameters struct { DBUser *string `json:"dbUser,omitempty" tf:"db_user,omitempty"` // The db instance to register for this stack. Changing this will force a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta3.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional RDSDBInstanceArn *string `json:"rdsDbInstanceArn,omitempty" tf:"rds_db_instance_arn,omitempty"` @@ -88,7 +88,7 @@ type RDSDBInstanceParameters struct { RDSDBInstanceArnSelector *v1.Selector `json:"rdsDbInstanceArnSelector,omitempty" tf:"-"` // The stack to register a db instance for. Changing this will force a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta1.Stack + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` diff --git a/apis/opsworks/v1beta2/zz_customlayer_terraformed.go b/apis/opsworks/v1beta2/zz_customlayer_terraformed.go new file mode 100755 index 0000000000..b97d283799 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_customlayer_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CustomLayer +func (mg *CustomLayer) GetTerraformResourceType() string { + return "aws_opsworks_custom_layer" +} + +// GetConnectionDetailsMapping for this CustomLayer +func (tr *CustomLayer) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CustomLayer +func (tr *CustomLayer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CustomLayer +func (tr *CustomLayer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CustomLayer +func (tr *CustomLayer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CustomLayer +func (tr *CustomLayer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CustomLayer +func (tr *CustomLayer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CustomLayer +func (tr *CustomLayer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CustomLayer +func (tr *CustomLayer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CustomLayer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CustomLayer) LateInitialize(attrs []byte) (bool, error) { + params := &CustomLayerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CustomLayer) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opsworks/v1beta2/zz_customlayer_types.go b/apis/opsworks/v1beta2/zz_customlayer_types.go new file mode 100755 index 0000000000..a713bcbeac --- /dev/null +++ b/apis/opsworks/v1beta2/zz_customlayer_types.go @@ -0,0 +1,790 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CloudwatchConfigurationInitParameters struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A block the specifies how an opsworks logs look like. See Log Streams. + LogStreams []LogStreamsInitParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type CloudwatchConfigurationObservation struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A block the specifies how an opsworks logs look like. See Log Streams. + LogStreams []LogStreamsObservation `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type CloudwatchConfigurationParameters struct { + + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A block the specifies how an opsworks logs look like. See Log Streams. + // +kubebuilder:validation:Optional + LogStreams []LogStreamsParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type CustomLayerInitParameters struct { + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + // Will create an EBS volume and connect it to the layer's instances. See Cloudwatch Configuration. + CloudwatchConfiguration *CloudwatchConfigurationInitParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // Will create an EBS volume and connect it to the layer's instances. See EBS Volume. + EBSVolume []EBSVolumeInitParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + // Load-based auto scaling configuration. See Load Based AutoScaling + LoadBasedAutoScaling *LoadBasedAutoScalingInitParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short, machine-readable name for the layer, which will be used to identify it in the Chef node JSON. + ShortName *string `json:"shortName,omitempty" tf:"short_name,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type CustomLayerObservation struct { + + // The Amazon Resource Name(ARN) of the layer. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + // Will create an EBS volume and connect it to the layer's instances. See Cloudwatch Configuration. + CloudwatchConfiguration *CloudwatchConfigurationObservation `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // Ids for a set of security groups to apply to the layer's instances. + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // Will create an EBS volume and connect it to the layer's instances. See EBS Volume. + EBSVolume []EBSVolumeObservation `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // The id of the layer. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + // Load-based auto scaling configuration. See Load Based AutoScaling + LoadBasedAutoScaling *LoadBasedAutoScalingObservation `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short, machine-readable name for the layer, which will be used to identify it in the Chef node JSON. + ShortName *string `json:"shortName,omitempty" tf:"short_name,omitempty"` + + // ID of the stack the layer will belong to. + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type CustomLayerParameters struct { + + // Whether to automatically assign an elastic IP address to the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + // +kubebuilder:validation:Optional + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + // Will create an EBS volume and connect it to the layer's instances. See Cloudwatch Configuration. + // +kubebuilder:validation:Optional + CloudwatchConfiguration *CloudwatchConfigurationParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + // +kubebuilder:validation:Optional + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + // +kubebuilder:validation:Optional + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + // +kubebuilder:validation:Optional + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + // +kubebuilder:validation:Optional + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + // +kubebuilder:validation:Optional + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // Will create an EBS volume and connect it to the layer's instances. See EBS Volume. + // +kubebuilder:validation:Optional + EBSVolume []EBSVolumeParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + // +kubebuilder:validation:Optional + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + // +kubebuilder:validation:Optional + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + // +kubebuilder:validation:Optional + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + // Load-based auto scaling configuration. See Load Based AutoScaling + // +kubebuilder:validation:Optional + LoadBasedAutoScaling *LoadBasedAutoScalingParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short, machine-readable name for the layer, which will be used to identify it in the Chef node JSON. + // +kubebuilder:validation:Optional + ShortName *string `json:"shortName,omitempty" tf:"short_name,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +kubebuilder:validation:Optional + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + // +kubebuilder:validation:Optional + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type DownscalingInitParameters struct { + + // Custom Cloudwatch auto scaling alarms, to be used as thresholds. This parameter takes a list of up to five alarm names, which are case sensitive and must be in the same region as the stack. + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // The CPU utilization threshold, as a percent of the available CPU. A value of -1 disables the threshold. + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // The amount of time (in minutes) after a scaling event occurs that AWS OpsWorks Stacks should ignore metrics and suppress additional scaling events. + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // The number of instances to add or remove when the load exceeds a threshold. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // The load threshold. A value of -1 disables the threshold. + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // The memory utilization threshold, as a percent of the available memory. A value of -1 disables the threshold. + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // The amount of time, in minutes, that the load must exceed a threshold before more instances are added or removed. + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type DownscalingObservation struct { + + // Custom Cloudwatch auto scaling alarms, to be used as thresholds. This parameter takes a list of up to five alarm names, which are case sensitive and must be in the same region as the stack. + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // The CPU utilization threshold, as a percent of the available CPU. A value of -1 disables the threshold. + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // The amount of time (in minutes) after a scaling event occurs that AWS OpsWorks Stacks should ignore metrics and suppress additional scaling events. + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // The number of instances to add or remove when the load exceeds a threshold. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // The load threshold. A value of -1 disables the threshold. + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // The memory utilization threshold, as a percent of the available memory. A value of -1 disables the threshold. + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // The amount of time, in minutes, that the load must exceed a threshold before more instances are added or removed. + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type DownscalingParameters struct { + + // Custom Cloudwatch auto scaling alarms, to be used as thresholds. This parameter takes a list of up to five alarm names, which are case sensitive and must be in the same region as the stack. + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // The CPU utilization threshold, as a percent of the available CPU. A value of -1 disables the threshold. + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // The amount of time (in minutes) after a scaling event occurs that AWS OpsWorks Stacks should ignore metrics and suppress additional scaling events. + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // The number of instances to add or remove when the load exceeds a threshold. + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // The load threshold. A value of -1 disables the threshold. + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // The memory utilization threshold, as a percent of the available memory. A value of -1 disables the threshold. + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // The amount of time, in minutes, that the load must exceed a threshold before more instances are added or removed. + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type EBSVolumeInitParameters struct { + + // Encrypt the volume. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EBSVolumeObservation struct { + + // Encrypt the volume. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EBSVolumeParameters struct { + + // Encrypt the volume. + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + // +kubebuilder:validation:Optional + MountPoint *string `json:"mountPoint" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + // +kubebuilder:validation:Optional + NumberOfDisks *float64 `json:"numberOfDisks" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + // +kubebuilder:validation:Optional + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + // +kubebuilder:validation:Optional + Size *float64 `json:"size" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LoadBasedAutoScalingInitParameters struct { + + // The downscaling settings, as defined below, used for load-based autoscaling + Downscaling *DownscalingInitParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + // Whether load-based auto scaling is enabled for the layer. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // The upscaling settings, as defined below, used for load-based autoscaling + Upscaling *UpscalingInitParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type LoadBasedAutoScalingObservation struct { + + // The downscaling settings, as defined below, used for load-based autoscaling + Downscaling *DownscalingObservation `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + // Whether load-based auto scaling is enabled for the layer. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // The upscaling settings, as defined below, used for load-based autoscaling + Upscaling *UpscalingObservation `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type LoadBasedAutoScalingParameters struct { + + // The downscaling settings, as defined below, used for load-based autoscaling + // +kubebuilder:validation:Optional + Downscaling *DownscalingParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + // Whether load-based auto scaling is enabled for the layer. + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // The upscaling settings, as defined below, used for load-based autoscaling + // +kubebuilder:validation:Optional + Upscaling *UpscalingParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type LogStreamsInitParameters struct { + + // Specifies the max number of log events in a batch, up to 10000. The default value is 1000. + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + // Specifies the maximum size of log events in a batch, in bytes, up to 1048576 bytes. The default value is 32768 bytes. + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Specifies the time duration for the batching of log events. The minimum value is 5000 and default value is 5000. + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + // Specifies how the timestamp is extracted from logs. For more information, see the CloudWatch Logs Agent Reference (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AgentReference.html). + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + // Specifies the encoding of the log file so that the file can be read correctly. The default is utf_8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // Specifies log files that you want to push to CloudWatch Logs. File can point to a specific file or multiple files (by using wild card characters such as /var/log/system.log*). + File *string `json:"file,omitempty" tf:"file,omitempty"` + + // Specifies the range of lines for identifying a file. The valid values are one number, or two dash-delimited numbers, such as 1, 2-5. The default value is 1. + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + // Specifies where to start to read data (start_of_file or end_of_file). The default is start_of_file. + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // Specifies the destination log group. A log group is created automatically if it doesn't already exist. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // Specifies the pattern for identifying the start of a log message. + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + // Specifies the time zone of log event time stamps. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type LogStreamsObservation struct { + + // Specifies the max number of log events in a batch, up to 10000. The default value is 1000. + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + // Specifies the maximum size of log events in a batch, in bytes, up to 1048576 bytes. The default value is 32768 bytes. + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Specifies the time duration for the batching of log events. The minimum value is 5000 and default value is 5000. + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + // Specifies how the timestamp is extracted from logs. For more information, see the CloudWatch Logs Agent Reference (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AgentReference.html). + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + // Specifies the encoding of the log file so that the file can be read correctly. The default is utf_8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // Specifies log files that you want to push to CloudWatch Logs. File can point to a specific file or multiple files (by using wild card characters such as /var/log/system.log*). + File *string `json:"file,omitempty" tf:"file,omitempty"` + + // Specifies the range of lines for identifying a file. The valid values are one number, or two dash-delimited numbers, such as 1, 2-5. The default value is 1. + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + // Specifies where to start to read data (start_of_file or end_of_file). The default is start_of_file. + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // Specifies the destination log group. A log group is created automatically if it doesn't already exist. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + // Specifies the pattern for identifying the start of a log message. + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + // Specifies the time zone of log event time stamps. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type LogStreamsParameters struct { + + // Specifies the max number of log events in a batch, up to 10000. The default value is 1000. + // +kubebuilder:validation:Optional + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + // Specifies the maximum size of log events in a batch, in bytes, up to 1048576 bytes. The default value is 32768 bytes. + // +kubebuilder:validation:Optional + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Specifies the time duration for the batching of log events. The minimum value is 5000 and default value is 5000. + // +kubebuilder:validation:Optional + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + // Specifies how the timestamp is extracted from logs. For more information, see the CloudWatch Logs Agent Reference (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AgentReference.html). + // +kubebuilder:validation:Optional + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + // Specifies the encoding of the log file so that the file can be read correctly. The default is utf_8. + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // Specifies log files that you want to push to CloudWatch Logs. File can point to a specific file or multiple files (by using wild card characters such as /var/log/system.log*). + // +kubebuilder:validation:Optional + File *string `json:"file" tf:"file,omitempty"` + + // Specifies the range of lines for identifying a file. The valid values are one number, or two dash-delimited numbers, such as 1, 2-5. The default value is 1. + // +kubebuilder:validation:Optional + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + // Specifies where to start to read data (start_of_file or end_of_file). The default is start_of_file. + // +kubebuilder:validation:Optional + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // Specifies the destination log group. A log group is created automatically if it doesn't already exist. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName" tf:"log_group_name,omitempty"` + + // Specifies the pattern for identifying the start of a log message. + // +kubebuilder:validation:Optional + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + // Specifies the time zone of log event time stamps. + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type UpscalingInitParameters struct { + + // Custom Cloudwatch auto scaling alarms, to be used as thresholds. This parameter takes a list of up to five alarm names, which are case sensitive and must be in the same region as the stack. + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // The CPU utilization threshold, as a percent of the available CPU. A value of -1 disables the threshold. + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // The amount of time (in minutes) after a scaling event occurs that AWS OpsWorks Stacks should ignore metrics and suppress additional scaling events. + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // The number of instances to add or remove when the load exceeds a threshold. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // The load threshold. A value of -1 disables the threshold. + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // The memory utilization threshold, as a percent of the available memory. A value of -1 disables the threshold. + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // The amount of time, in minutes, that the load must exceed a threshold before more instances are added or removed. + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type UpscalingObservation struct { + + // Custom Cloudwatch auto scaling alarms, to be used as thresholds. This parameter takes a list of up to five alarm names, which are case sensitive and must be in the same region as the stack. + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // The CPU utilization threshold, as a percent of the available CPU. A value of -1 disables the threshold. + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // The amount of time (in minutes) after a scaling event occurs that AWS OpsWorks Stacks should ignore metrics and suppress additional scaling events. + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // The number of instances to add or remove when the load exceeds a threshold. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // The load threshold. A value of -1 disables the threshold. + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // The memory utilization threshold, as a percent of the available memory. A value of -1 disables the threshold. + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // The amount of time, in minutes, that the load must exceed a threshold before more instances are added or removed. + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type UpscalingParameters struct { + + // Custom Cloudwatch auto scaling alarms, to be used as thresholds. This parameter takes a list of up to five alarm names, which are case sensitive and must be in the same region as the stack. + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // The CPU utilization threshold, as a percent of the available CPU. A value of -1 disables the threshold. + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // The amount of time (in minutes) after a scaling event occurs that AWS OpsWorks Stacks should ignore metrics and suppress additional scaling events. + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // The number of instances to add or remove when the load exceeds a threshold. + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // The load threshold. A value of -1 disables the threshold. + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // The memory utilization threshold, as a percent of the available memory. A value of -1 disables the threshold. + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // The amount of time, in minutes, that the load must exceed a threshold before more instances are added or removed. + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +// CustomLayerSpec defines the desired state of CustomLayer +type CustomLayerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CustomLayerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CustomLayerInitParameters `json:"initProvider,omitempty"` +} + +// CustomLayerStatus defines the observed state of CustomLayer. +type CustomLayerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CustomLayerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CustomLayer is the Schema for the CustomLayers API. Provides an OpsWorks custom layer resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type CustomLayer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.shortName) || (has(self.initProvider) && has(self.initProvider.shortName))",message="spec.forProvider.shortName is a required parameter" + Spec CustomLayerSpec `json:"spec"` + Status CustomLayerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CustomLayerList contains a list of CustomLayers +type CustomLayerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CustomLayer `json:"items"` +} + +// Repository type metadata. +var ( + CustomLayer_Kind = "CustomLayer" + CustomLayer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CustomLayer_Kind}.String() + CustomLayer_KindAPIVersion = CustomLayer_Kind + "." + CRDGroupVersion.String() + CustomLayer_GroupVersionKind = CRDGroupVersion.WithKind(CustomLayer_Kind) +) + +func init() { + SchemeBuilder.Register(&CustomLayer{}, &CustomLayerList{}) +} diff --git a/apis/opsworks/v1beta2/zz_ecsclusterlayer_terraformed.go b/apis/opsworks/v1beta2/zz_ecsclusterlayer_terraformed.go new file mode 100755 index 0000000000..0a6911610f --- /dev/null +++ b/apis/opsworks/v1beta2/zz_ecsclusterlayer_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this EcsClusterLayer +func (mg *EcsClusterLayer) GetTerraformResourceType() string { + return "aws_opsworks_ecs_cluster_layer" +} + +// GetConnectionDetailsMapping for this EcsClusterLayer +func (tr *EcsClusterLayer) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this EcsClusterLayer +func (tr *EcsClusterLayer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this EcsClusterLayer +func (tr *EcsClusterLayer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this EcsClusterLayer +func (tr *EcsClusterLayer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this EcsClusterLayer +func (tr *EcsClusterLayer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this EcsClusterLayer +func (tr *EcsClusterLayer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this EcsClusterLayer +func (tr *EcsClusterLayer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this EcsClusterLayer +func (tr *EcsClusterLayer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this EcsClusterLayer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *EcsClusterLayer) LateInitialize(attrs []byte) (bool, error) { + params := &EcsClusterLayerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *EcsClusterLayer) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opsworks/v1beta2/zz_ecsclusterlayer_types.go b/apis/opsworks/v1beta2/zz_ecsclusterlayer_types.go new file mode 100755 index 0000000000..60fd747ac5 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_ecsclusterlayer_types.go @@ -0,0 +1,704 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CloudwatchConfigurationLogStreamsInitParameters struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type CloudwatchConfigurationLogStreamsObservation struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type CloudwatchConfigurationLogStreamsParameters struct { + + // +kubebuilder:validation:Optional + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + // +kubebuilder:validation:Optional + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // +kubebuilder:validation:Optional + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + // +kubebuilder:validation:Optional + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // +kubebuilder:validation:Optional + File *string `json:"file" tf:"file,omitempty"` + + // +kubebuilder:validation:Optional + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + // +kubebuilder:validation:Optional + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName" tf:"log_group_name,omitempty"` + + // +kubebuilder:validation:Optional + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type EcsClusterLayerCloudwatchConfigurationInitParameters struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []CloudwatchConfigurationLogStreamsInitParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type EcsClusterLayerCloudwatchConfigurationObservation struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []CloudwatchConfigurationLogStreamsObservation `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type EcsClusterLayerCloudwatchConfigurationParameters struct { + + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // +kubebuilder:validation:Optional + LogStreams []CloudwatchConfigurationLogStreamsParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type EcsClusterLayerEBSVolumeInitParameters struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EcsClusterLayerEBSVolumeObservation struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EcsClusterLayerEBSVolumeParameters struct { + + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + // +kubebuilder:validation:Optional + MountPoint *string `json:"mountPoint" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + // +kubebuilder:validation:Optional + NumberOfDisks *float64 `json:"numberOfDisks" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + // +kubebuilder:validation:Optional + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + // +kubebuilder:validation:Optional + Size *float64 `json:"size" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EcsClusterLayerInitParameters struct { + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *EcsClusterLayerCloudwatchConfigurationInitParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []EcsClusterLayerEBSVolumeInitParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // The ECS Cluster ARN of the layer. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecs/v1beta2.Cluster + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + EcsClusterArn *string `json:"ecsClusterArn,omitempty" tf:"ecs_cluster_arn,omitempty"` + + // Reference to a Cluster in ecs to populate ecsClusterArn. + // +kubebuilder:validation:Optional + EcsClusterArnRef *v1.Reference `json:"ecsClusterArnRef,omitempty" tf:"-"` + + // Selector for a Cluster in ecs to populate ecsClusterArn. + // +kubebuilder:validation:Optional + EcsClusterArnSelector *v1.Selector `json:"ecsClusterArnSelector,omitempty" tf:"-"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *EcsClusterLayerLoadBasedAutoScalingInitParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type EcsClusterLayerLoadBasedAutoScalingInitParameters struct { + Downscaling *LoadBasedAutoScalingDownscalingInitParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *LoadBasedAutoScalingUpscalingInitParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type EcsClusterLayerLoadBasedAutoScalingObservation struct { + Downscaling *LoadBasedAutoScalingDownscalingObservation `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *LoadBasedAutoScalingUpscalingObservation `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type EcsClusterLayerLoadBasedAutoScalingParameters struct { + + // +kubebuilder:validation:Optional + Downscaling *LoadBasedAutoScalingDownscalingParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // +kubebuilder:validation:Optional + Upscaling *LoadBasedAutoScalingUpscalingParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type EcsClusterLayerObservation struct { + + // The Amazon Resource Name(ARN) of the layer. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *EcsClusterLayerCloudwatchConfigurationObservation `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // Ids for a set of security groups to apply to the layer's instances. + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []EcsClusterLayerEBSVolumeObservation `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // The ECS Cluster ARN of the layer. + EcsClusterArn *string `json:"ecsClusterArn,omitempty" tf:"ecs_cluster_arn,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // The id of the layer. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *EcsClusterLayerLoadBasedAutoScalingObservation `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type EcsClusterLayerParameters struct { + + // Whether to automatically assign an elastic IP address to the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + // +kubebuilder:validation:Optional + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + // +kubebuilder:validation:Optional + CloudwatchConfiguration *EcsClusterLayerCloudwatchConfigurationParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + // +kubebuilder:validation:Optional + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + // +kubebuilder:validation:Optional + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + // +kubebuilder:validation:Optional + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + // +kubebuilder:validation:Optional + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + // +kubebuilder:validation:Optional + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + // +kubebuilder:validation:Optional + EBSVolume []EcsClusterLayerEBSVolumeParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // The ECS Cluster ARN of the layer. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ecs/v1beta2.Cluster + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + EcsClusterArn *string `json:"ecsClusterArn,omitempty" tf:"ecs_cluster_arn,omitempty"` + + // Reference to a Cluster in ecs to populate ecsClusterArn. + // +kubebuilder:validation:Optional + EcsClusterArnRef *v1.Reference `json:"ecsClusterArnRef,omitempty" tf:"-"` + + // Selector for a Cluster in ecs to populate ecsClusterArn. + // +kubebuilder:validation:Optional + EcsClusterArnSelector *v1.Selector `json:"ecsClusterArnSelector,omitempty" tf:"-"` + + // Name of an Elastic Load Balancer to attach to this layer + // +kubebuilder:validation:Optional + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + // +kubebuilder:validation:Optional + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + // +kubebuilder:validation:Optional + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + // +kubebuilder:validation:Optional + LoadBasedAutoScaling *EcsClusterLayerLoadBasedAutoScalingParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +kubebuilder:validation:Optional + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + // +kubebuilder:validation:Optional + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type LoadBasedAutoScalingDownscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type LoadBasedAutoScalingDownscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type LoadBasedAutoScalingDownscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type LoadBasedAutoScalingUpscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type LoadBasedAutoScalingUpscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type LoadBasedAutoScalingUpscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +// EcsClusterLayerSpec defines the desired state of EcsClusterLayer +type EcsClusterLayerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider EcsClusterLayerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider EcsClusterLayerInitParameters `json:"initProvider,omitempty"` +} + +// EcsClusterLayerStatus defines the observed state of EcsClusterLayer. +type EcsClusterLayerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider EcsClusterLayerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// EcsClusterLayer is the Schema for the EcsClusterLayers API. Provides an OpsWorks HAProxy layer resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type EcsClusterLayer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec EcsClusterLayerSpec `json:"spec"` + Status EcsClusterLayerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EcsClusterLayerList contains a list of EcsClusterLayers +type EcsClusterLayerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []EcsClusterLayer `json:"items"` +} + +// Repository type metadata. +var ( + EcsClusterLayer_Kind = "EcsClusterLayer" + EcsClusterLayer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: EcsClusterLayer_Kind}.String() + EcsClusterLayer_KindAPIVersion = EcsClusterLayer_Kind + "." + CRDGroupVersion.String() + EcsClusterLayer_GroupVersionKind = CRDGroupVersion.WithKind(EcsClusterLayer_Kind) +) + +func init() { + SchemeBuilder.Register(&EcsClusterLayer{}, &EcsClusterLayerList{}) +} diff --git a/apis/opsworks/v1beta2/zz_ganglialayer_terraformed.go b/apis/opsworks/v1beta2/zz_ganglialayer_terraformed.go new file mode 100755 index 0000000000..fd2ff533d9 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_ganglialayer_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this GangliaLayer +func (mg *GangliaLayer) GetTerraformResourceType() string { + return "aws_opsworks_ganglia_layer" +} + +// GetConnectionDetailsMapping for this GangliaLayer +func (tr *GangliaLayer) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this GangliaLayer +func (tr *GangliaLayer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this GangliaLayer +func (tr *GangliaLayer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this GangliaLayer +func (tr *GangliaLayer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this GangliaLayer +func (tr *GangliaLayer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this GangliaLayer +func (tr *GangliaLayer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this GangliaLayer +func (tr *GangliaLayer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this GangliaLayer +func (tr *GangliaLayer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this GangliaLayer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *GangliaLayer) LateInitialize(attrs []byte) (bool, error) { + params := &GangliaLayerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *GangliaLayer) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opsworks/v1beta2/zz_ganglialayer_types.go b/apis/opsworks/v1beta2/zz_ganglialayer_types.go new file mode 100755 index 0000000000..eb0c9f8578 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_ganglialayer_types.go @@ -0,0 +1,706 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type GangliaLayerCloudwatchConfigurationInitParameters struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []GangliaLayerCloudwatchConfigurationLogStreamsInitParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type GangliaLayerCloudwatchConfigurationLogStreamsInitParameters struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type GangliaLayerCloudwatchConfigurationLogStreamsObservation struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type GangliaLayerCloudwatchConfigurationLogStreamsParameters struct { + + // +kubebuilder:validation:Optional + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + // +kubebuilder:validation:Optional + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // +kubebuilder:validation:Optional + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + // +kubebuilder:validation:Optional + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // +kubebuilder:validation:Optional + File *string `json:"file" tf:"file,omitempty"` + + // +kubebuilder:validation:Optional + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + // +kubebuilder:validation:Optional + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName" tf:"log_group_name,omitempty"` + + // +kubebuilder:validation:Optional + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type GangliaLayerCloudwatchConfigurationObservation struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []GangliaLayerCloudwatchConfigurationLogStreamsObservation `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type GangliaLayerCloudwatchConfigurationParameters struct { + + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // +kubebuilder:validation:Optional + LogStreams []GangliaLayerCloudwatchConfigurationLogStreamsParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type GangliaLayerEBSVolumeInitParameters struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type GangliaLayerEBSVolumeObservation struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type GangliaLayerEBSVolumeParameters struct { + + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + // +kubebuilder:validation:Optional + MountPoint *string `json:"mountPoint" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + // +kubebuilder:validation:Optional + NumberOfDisks *float64 `json:"numberOfDisks" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + // +kubebuilder:validation:Optional + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + // +kubebuilder:validation:Optional + Size *float64 `json:"size" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type GangliaLayerInitParameters struct { + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *GangliaLayerCloudwatchConfigurationInitParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []GangliaLayerEBSVolumeInitParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *GangliaLayerLoadBasedAutoScalingInitParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The password to use for Ganglia. + Password *string `json:"password,omitempty" tf:"password,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The URL path to use for Ganglia. Defaults to "/ganglia". + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` + + // (Optiona) The username to use for Ganglia. Defaults to "opsworks". + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type GangliaLayerLoadBasedAutoScalingDownscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type GangliaLayerLoadBasedAutoScalingDownscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type GangliaLayerLoadBasedAutoScalingDownscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type GangliaLayerLoadBasedAutoScalingInitParameters struct { + Downscaling *GangliaLayerLoadBasedAutoScalingDownscalingInitParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *GangliaLayerLoadBasedAutoScalingUpscalingInitParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type GangliaLayerLoadBasedAutoScalingObservation struct { + Downscaling *GangliaLayerLoadBasedAutoScalingDownscalingObservation `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *GangliaLayerLoadBasedAutoScalingUpscalingObservation `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type GangliaLayerLoadBasedAutoScalingParameters struct { + + // +kubebuilder:validation:Optional + Downscaling *GangliaLayerLoadBasedAutoScalingDownscalingParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // +kubebuilder:validation:Optional + Upscaling *GangliaLayerLoadBasedAutoScalingUpscalingParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type GangliaLayerLoadBasedAutoScalingUpscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type GangliaLayerLoadBasedAutoScalingUpscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type GangliaLayerLoadBasedAutoScalingUpscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type GangliaLayerObservation struct { + + // The Amazon Resource Name(ARN) of the layer. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *GangliaLayerCloudwatchConfigurationObservation `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // Ids for a set of security groups to apply to the layer's instances. + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []GangliaLayerEBSVolumeObservation `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // The id of the layer. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *GangliaLayerLoadBasedAutoScalingObservation `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The password to use for Ganglia. + Password *string `json:"password,omitempty" tf:"password,omitempty"` + + // ID of the stack the layer will belong to. + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The URL path to use for Ganglia. Defaults to "/ganglia". + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` + + // (Optiona) The username to use for Ganglia. Defaults to "opsworks". + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type GangliaLayerParameters struct { + + // Whether to automatically assign an elastic IP address to the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + // +kubebuilder:validation:Optional + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + // +kubebuilder:validation:Optional + CloudwatchConfiguration *GangliaLayerCloudwatchConfigurationParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + // +kubebuilder:validation:Optional + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + // +kubebuilder:validation:Optional + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + // +kubebuilder:validation:Optional + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + // +kubebuilder:validation:Optional + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + // +kubebuilder:validation:Optional + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + // +kubebuilder:validation:Optional + EBSVolume []GangliaLayerEBSVolumeParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + // +kubebuilder:validation:Optional + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + // +kubebuilder:validation:Optional + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + // +kubebuilder:validation:Optional + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + // +kubebuilder:validation:Optional + LoadBasedAutoScaling *GangliaLayerLoadBasedAutoScalingParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The password to use for Ganglia. + // +kubebuilder:validation:Optional + Password *string `json:"password,omitempty" tf:"password,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +kubebuilder:validation:Optional + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The URL path to use for Ganglia. Defaults to "/ganglia". + // +kubebuilder:validation:Optional + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // Whether to use EBS-optimized instances. + // +kubebuilder:validation:Optional + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` + + // (Optiona) The username to use for Ganglia. Defaults to "opsworks". + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +// GangliaLayerSpec defines the desired state of GangliaLayer +type GangliaLayerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GangliaLayerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GangliaLayerInitParameters `json:"initProvider,omitempty"` +} + +// GangliaLayerStatus defines the observed state of GangliaLayer. +type GangliaLayerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GangliaLayerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// GangliaLayer is the Schema for the GangliaLayers API. Provides an OpsWorks Ganglia layer resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type GangliaLayer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.password) || (has(self.initProvider) && has(self.initProvider.password))",message="spec.forProvider.password is a required parameter" + Spec GangliaLayerSpec `json:"spec"` + Status GangliaLayerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GangliaLayerList contains a list of GangliaLayers +type GangliaLayerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GangliaLayer `json:"items"` +} + +// Repository type metadata. +var ( + GangliaLayer_Kind = "GangliaLayer" + GangliaLayer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: GangliaLayer_Kind}.String() + GangliaLayer_KindAPIVersion = GangliaLayer_Kind + "." + CRDGroupVersion.String() + GangliaLayer_GroupVersionKind = CRDGroupVersion.WithKind(GangliaLayer_Kind) +) + +func init() { + SchemeBuilder.Register(&GangliaLayer{}, &GangliaLayerList{}) +} diff --git a/apis/opsworks/v1beta2/zz_generated.conversion_hubs.go b/apis/opsworks/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..325c9f6d95 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,43 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *CustomLayer) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *EcsClusterLayer) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *GangliaLayer) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *HAProxyLayer) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *JavaAppLayer) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MemcachedLayer) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MySQLLayer) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *NodeJSAppLayer) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *PHPAppLayer) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *RailsAppLayer) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Stack) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *StaticWebLayer) Hub() {} diff --git a/apis/opsworks/v1beta2/zz_generated.deepcopy.go b/apis/opsworks/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..6aade81919 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,18603 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchConfigurationInitParameters) DeepCopyInto(out *CloudwatchConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]LogStreamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchConfigurationInitParameters. +func (in *CloudwatchConfigurationInitParameters) DeepCopy() *CloudwatchConfigurationInitParameters { + if in == nil { + return nil + } + out := new(CloudwatchConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchConfigurationLogStreamsInitParameters) DeepCopyInto(out *CloudwatchConfigurationLogStreamsInitParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchConfigurationLogStreamsInitParameters. +func (in *CloudwatchConfigurationLogStreamsInitParameters) DeepCopy() *CloudwatchConfigurationLogStreamsInitParameters { + if in == nil { + return nil + } + out := new(CloudwatchConfigurationLogStreamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchConfigurationLogStreamsObservation) DeepCopyInto(out *CloudwatchConfigurationLogStreamsObservation) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchConfigurationLogStreamsObservation. +func (in *CloudwatchConfigurationLogStreamsObservation) DeepCopy() *CloudwatchConfigurationLogStreamsObservation { + if in == nil { + return nil + } + out := new(CloudwatchConfigurationLogStreamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchConfigurationLogStreamsParameters) DeepCopyInto(out *CloudwatchConfigurationLogStreamsParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchConfigurationLogStreamsParameters. +func (in *CloudwatchConfigurationLogStreamsParameters) DeepCopy() *CloudwatchConfigurationLogStreamsParameters { + if in == nil { + return nil + } + out := new(CloudwatchConfigurationLogStreamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchConfigurationObservation) DeepCopyInto(out *CloudwatchConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]LogStreamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchConfigurationObservation. +func (in *CloudwatchConfigurationObservation) DeepCopy() *CloudwatchConfigurationObservation { + if in == nil { + return nil + } + out := new(CloudwatchConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchConfigurationParameters) DeepCopyInto(out *CloudwatchConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]LogStreamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchConfigurationParameters. +func (in *CloudwatchConfigurationParameters) DeepCopy() *CloudwatchConfigurationParameters { + if in == nil { + return nil + } + out := new(CloudwatchConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomCookbooksSourceInitParameters) DeepCopyInto(out *CustomCookbooksSourceInitParameters) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(string) + **out = **in + } + if in.SSHKeySecretRef != nil { + in, out := &in.SSHKeySecretRef, &out.SSHKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomCookbooksSourceInitParameters. +func (in *CustomCookbooksSourceInitParameters) DeepCopy() *CustomCookbooksSourceInitParameters { + if in == nil { + return nil + } + out := new(CustomCookbooksSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomCookbooksSourceObservation) DeepCopyInto(out *CustomCookbooksSourceObservation) { + *out = *in + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomCookbooksSourceObservation. +func (in *CustomCookbooksSourceObservation) DeepCopy() *CustomCookbooksSourceObservation { + if in == nil { + return nil + } + out := new(CustomCookbooksSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomCookbooksSourceParameters) DeepCopyInto(out *CustomCookbooksSourceParameters) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(string) + **out = **in + } + if in.SSHKeySecretRef != nil { + in, out := &in.SSHKeySecretRef, &out.SSHKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomCookbooksSourceParameters. +func (in *CustomCookbooksSourceParameters) DeepCopy() *CustomCookbooksSourceParameters { + if in == nil { + return nil + } + out := new(CustomCookbooksSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomLayer) DeepCopyInto(out *CustomLayer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomLayer. +func (in *CustomLayer) DeepCopy() *CustomLayer { + if in == nil { + return nil + } + out := new(CustomLayer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomLayer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomLayerInitParameters) DeepCopyInto(out *CustomLayerInitParameters) { + *out = *in + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(CloudwatchConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]EBSVolumeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(LoadBasedAutoScalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShortName != nil { + in, out := &in.ShortName, &out.ShortName + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomLayerInitParameters. +func (in *CustomLayerInitParameters) DeepCopy() *CustomLayerInitParameters { + if in == nil { + return nil + } + out := new(CustomLayerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomLayerList) DeepCopyInto(out *CustomLayerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomLayer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomLayerList. +func (in *CustomLayerList) DeepCopy() *CustomLayerList { + if in == nil { + return nil + } + out := new(CustomLayerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomLayerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomLayerObservation) DeepCopyInto(out *CustomLayerObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(CloudwatchConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]EBSVolumeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(LoadBasedAutoScalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShortName != nil { + in, out := &in.ShortName, &out.ShortName + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomLayerObservation. +func (in *CustomLayerObservation) DeepCopy() *CustomLayerObservation { + if in == nil { + return nil + } + out := new(CustomLayerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomLayerParameters) DeepCopyInto(out *CustomLayerParameters) { + *out = *in + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(CloudwatchConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]EBSVolumeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(LoadBasedAutoScalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShortName != nil { + in, out := &in.ShortName, &out.ShortName + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomLayerParameters. +func (in *CustomLayerParameters) DeepCopy() *CustomLayerParameters { + if in == nil { + return nil + } + out := new(CustomLayerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomLayerSpec) DeepCopyInto(out *CustomLayerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomLayerSpec. +func (in *CustomLayerSpec) DeepCopy() *CustomLayerSpec { + if in == nil { + return nil + } + out := new(CustomLayerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomLayerStatus) DeepCopyInto(out *CustomLayerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomLayerStatus. +func (in *CustomLayerStatus) DeepCopy() *CustomLayerStatus { + if in == nil { + return nil + } + out := new(CustomLayerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownscalingInitParameters) DeepCopyInto(out *DownscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownscalingInitParameters. +func (in *DownscalingInitParameters) DeepCopy() *DownscalingInitParameters { + if in == nil { + return nil + } + out := new(DownscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownscalingObservation) DeepCopyInto(out *DownscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownscalingObservation. +func (in *DownscalingObservation) DeepCopy() *DownscalingObservation { + if in == nil { + return nil + } + out := new(DownscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownscalingParameters) DeepCopyInto(out *DownscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownscalingParameters. +func (in *DownscalingParameters) DeepCopy() *DownscalingParameters { + if in == nil { + return nil + } + out := new(DownscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSVolumeInitParameters) DeepCopyInto(out *EBSVolumeInitParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSVolumeInitParameters. +func (in *EBSVolumeInitParameters) DeepCopy() *EBSVolumeInitParameters { + if in == nil { + return nil + } + out := new(EBSVolumeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSVolumeObservation) DeepCopyInto(out *EBSVolumeObservation) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSVolumeObservation. +func (in *EBSVolumeObservation) DeepCopy() *EBSVolumeObservation { + if in == nil { + return nil + } + out := new(EBSVolumeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSVolumeParameters) DeepCopyInto(out *EBSVolumeParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSVolumeParameters. +func (in *EBSVolumeParameters) DeepCopy() *EBSVolumeParameters { + if in == nil { + return nil + } + out := new(EBSVolumeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsClusterLayer) DeepCopyInto(out *EcsClusterLayer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsClusterLayer. +func (in *EcsClusterLayer) DeepCopy() *EcsClusterLayer { + if in == nil { + return nil + } + out := new(EcsClusterLayer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EcsClusterLayer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsClusterLayerCloudwatchConfigurationInitParameters) DeepCopyInto(out *EcsClusterLayerCloudwatchConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]CloudwatchConfigurationLogStreamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsClusterLayerCloudwatchConfigurationInitParameters. +func (in *EcsClusterLayerCloudwatchConfigurationInitParameters) DeepCopy() *EcsClusterLayerCloudwatchConfigurationInitParameters { + if in == nil { + return nil + } + out := new(EcsClusterLayerCloudwatchConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsClusterLayerCloudwatchConfigurationObservation) DeepCopyInto(out *EcsClusterLayerCloudwatchConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]CloudwatchConfigurationLogStreamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsClusterLayerCloudwatchConfigurationObservation. +func (in *EcsClusterLayerCloudwatchConfigurationObservation) DeepCopy() *EcsClusterLayerCloudwatchConfigurationObservation { + if in == nil { + return nil + } + out := new(EcsClusterLayerCloudwatchConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsClusterLayerCloudwatchConfigurationParameters) DeepCopyInto(out *EcsClusterLayerCloudwatchConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]CloudwatchConfigurationLogStreamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsClusterLayerCloudwatchConfigurationParameters. +func (in *EcsClusterLayerCloudwatchConfigurationParameters) DeepCopy() *EcsClusterLayerCloudwatchConfigurationParameters { + if in == nil { + return nil + } + out := new(EcsClusterLayerCloudwatchConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsClusterLayerEBSVolumeInitParameters) DeepCopyInto(out *EcsClusterLayerEBSVolumeInitParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsClusterLayerEBSVolumeInitParameters. +func (in *EcsClusterLayerEBSVolumeInitParameters) DeepCopy() *EcsClusterLayerEBSVolumeInitParameters { + if in == nil { + return nil + } + out := new(EcsClusterLayerEBSVolumeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsClusterLayerEBSVolumeObservation) DeepCopyInto(out *EcsClusterLayerEBSVolumeObservation) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsClusterLayerEBSVolumeObservation. +func (in *EcsClusterLayerEBSVolumeObservation) DeepCopy() *EcsClusterLayerEBSVolumeObservation { + if in == nil { + return nil + } + out := new(EcsClusterLayerEBSVolumeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsClusterLayerEBSVolumeParameters) DeepCopyInto(out *EcsClusterLayerEBSVolumeParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsClusterLayerEBSVolumeParameters. +func (in *EcsClusterLayerEBSVolumeParameters) DeepCopy() *EcsClusterLayerEBSVolumeParameters { + if in == nil { + return nil + } + out := new(EcsClusterLayerEBSVolumeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsClusterLayerInitParameters) DeepCopyInto(out *EcsClusterLayerInitParameters) { + *out = *in + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(EcsClusterLayerCloudwatchConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]EcsClusterLayerEBSVolumeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EcsClusterArn != nil { + in, out := &in.EcsClusterArn, &out.EcsClusterArn + *out = new(string) + **out = **in + } + if in.EcsClusterArnRef != nil { + in, out := &in.EcsClusterArnRef, &out.EcsClusterArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EcsClusterArnSelector != nil { + in, out := &in.EcsClusterArnSelector, &out.EcsClusterArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(EcsClusterLayerLoadBasedAutoScalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsClusterLayerInitParameters. +func (in *EcsClusterLayerInitParameters) DeepCopy() *EcsClusterLayerInitParameters { + if in == nil { + return nil + } + out := new(EcsClusterLayerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsClusterLayerList) DeepCopyInto(out *EcsClusterLayerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EcsClusterLayer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsClusterLayerList. +func (in *EcsClusterLayerList) DeepCopy() *EcsClusterLayerList { + if in == nil { + return nil + } + out := new(EcsClusterLayerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EcsClusterLayerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsClusterLayerLoadBasedAutoScalingInitParameters) DeepCopyInto(out *EcsClusterLayerLoadBasedAutoScalingInitParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(LoadBasedAutoScalingDownscalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(LoadBasedAutoScalingUpscalingInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsClusterLayerLoadBasedAutoScalingInitParameters. +func (in *EcsClusterLayerLoadBasedAutoScalingInitParameters) DeepCopy() *EcsClusterLayerLoadBasedAutoScalingInitParameters { + if in == nil { + return nil + } + out := new(EcsClusterLayerLoadBasedAutoScalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsClusterLayerLoadBasedAutoScalingObservation) DeepCopyInto(out *EcsClusterLayerLoadBasedAutoScalingObservation) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(LoadBasedAutoScalingDownscalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(LoadBasedAutoScalingUpscalingObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsClusterLayerLoadBasedAutoScalingObservation. +func (in *EcsClusterLayerLoadBasedAutoScalingObservation) DeepCopy() *EcsClusterLayerLoadBasedAutoScalingObservation { + if in == nil { + return nil + } + out := new(EcsClusterLayerLoadBasedAutoScalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsClusterLayerLoadBasedAutoScalingParameters) DeepCopyInto(out *EcsClusterLayerLoadBasedAutoScalingParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(LoadBasedAutoScalingDownscalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(LoadBasedAutoScalingUpscalingParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsClusterLayerLoadBasedAutoScalingParameters. +func (in *EcsClusterLayerLoadBasedAutoScalingParameters) DeepCopy() *EcsClusterLayerLoadBasedAutoScalingParameters { + if in == nil { + return nil + } + out := new(EcsClusterLayerLoadBasedAutoScalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsClusterLayerObservation) DeepCopyInto(out *EcsClusterLayerObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(EcsClusterLayerCloudwatchConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]EcsClusterLayerEBSVolumeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EcsClusterArn != nil { + in, out := &in.EcsClusterArn, &out.EcsClusterArn + *out = new(string) + **out = **in + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(EcsClusterLayerLoadBasedAutoScalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsClusterLayerObservation. +func (in *EcsClusterLayerObservation) DeepCopy() *EcsClusterLayerObservation { + if in == nil { + return nil + } + out := new(EcsClusterLayerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsClusterLayerParameters) DeepCopyInto(out *EcsClusterLayerParameters) { + *out = *in + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(EcsClusterLayerCloudwatchConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]EcsClusterLayerEBSVolumeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EcsClusterArn != nil { + in, out := &in.EcsClusterArn, &out.EcsClusterArn + *out = new(string) + **out = **in + } + if in.EcsClusterArnRef != nil { + in, out := &in.EcsClusterArnRef, &out.EcsClusterArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EcsClusterArnSelector != nil { + in, out := &in.EcsClusterArnSelector, &out.EcsClusterArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(EcsClusterLayerLoadBasedAutoScalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsClusterLayerParameters. +func (in *EcsClusterLayerParameters) DeepCopy() *EcsClusterLayerParameters { + if in == nil { + return nil + } + out := new(EcsClusterLayerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsClusterLayerSpec) DeepCopyInto(out *EcsClusterLayerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsClusterLayerSpec. +func (in *EcsClusterLayerSpec) DeepCopy() *EcsClusterLayerSpec { + if in == nil { + return nil + } + out := new(EcsClusterLayerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsClusterLayerStatus) DeepCopyInto(out *EcsClusterLayerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsClusterLayerStatus. +func (in *EcsClusterLayerStatus) DeepCopy() *EcsClusterLayerStatus { + if in == nil { + return nil + } + out := new(EcsClusterLayerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayer) DeepCopyInto(out *GangliaLayer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayer. +func (in *GangliaLayer) DeepCopy() *GangliaLayer { + if in == nil { + return nil + } + out := new(GangliaLayer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GangliaLayer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerCloudwatchConfigurationInitParameters) DeepCopyInto(out *GangliaLayerCloudwatchConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]GangliaLayerCloudwatchConfigurationLogStreamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerCloudwatchConfigurationInitParameters. +func (in *GangliaLayerCloudwatchConfigurationInitParameters) DeepCopy() *GangliaLayerCloudwatchConfigurationInitParameters { + if in == nil { + return nil + } + out := new(GangliaLayerCloudwatchConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopyInto(out *GangliaLayerCloudwatchConfigurationLogStreamsInitParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerCloudwatchConfigurationLogStreamsInitParameters. +func (in *GangliaLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopy() *GangliaLayerCloudwatchConfigurationLogStreamsInitParameters { + if in == nil { + return nil + } + out := new(GangliaLayerCloudwatchConfigurationLogStreamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerCloudwatchConfigurationLogStreamsObservation) DeepCopyInto(out *GangliaLayerCloudwatchConfigurationLogStreamsObservation) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerCloudwatchConfigurationLogStreamsObservation. +func (in *GangliaLayerCloudwatchConfigurationLogStreamsObservation) DeepCopy() *GangliaLayerCloudwatchConfigurationLogStreamsObservation { + if in == nil { + return nil + } + out := new(GangliaLayerCloudwatchConfigurationLogStreamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerCloudwatchConfigurationLogStreamsParameters) DeepCopyInto(out *GangliaLayerCloudwatchConfigurationLogStreamsParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerCloudwatchConfigurationLogStreamsParameters. +func (in *GangliaLayerCloudwatchConfigurationLogStreamsParameters) DeepCopy() *GangliaLayerCloudwatchConfigurationLogStreamsParameters { + if in == nil { + return nil + } + out := new(GangliaLayerCloudwatchConfigurationLogStreamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerCloudwatchConfigurationObservation) DeepCopyInto(out *GangliaLayerCloudwatchConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]GangliaLayerCloudwatchConfigurationLogStreamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerCloudwatchConfigurationObservation. +func (in *GangliaLayerCloudwatchConfigurationObservation) DeepCopy() *GangliaLayerCloudwatchConfigurationObservation { + if in == nil { + return nil + } + out := new(GangliaLayerCloudwatchConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerCloudwatchConfigurationParameters) DeepCopyInto(out *GangliaLayerCloudwatchConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]GangliaLayerCloudwatchConfigurationLogStreamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerCloudwatchConfigurationParameters. +func (in *GangliaLayerCloudwatchConfigurationParameters) DeepCopy() *GangliaLayerCloudwatchConfigurationParameters { + if in == nil { + return nil + } + out := new(GangliaLayerCloudwatchConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerEBSVolumeInitParameters) DeepCopyInto(out *GangliaLayerEBSVolumeInitParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerEBSVolumeInitParameters. +func (in *GangliaLayerEBSVolumeInitParameters) DeepCopy() *GangliaLayerEBSVolumeInitParameters { + if in == nil { + return nil + } + out := new(GangliaLayerEBSVolumeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerEBSVolumeObservation) DeepCopyInto(out *GangliaLayerEBSVolumeObservation) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerEBSVolumeObservation. +func (in *GangliaLayerEBSVolumeObservation) DeepCopy() *GangliaLayerEBSVolumeObservation { + if in == nil { + return nil + } + out := new(GangliaLayerEBSVolumeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerEBSVolumeParameters) DeepCopyInto(out *GangliaLayerEBSVolumeParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerEBSVolumeParameters. +func (in *GangliaLayerEBSVolumeParameters) DeepCopy() *GangliaLayerEBSVolumeParameters { + if in == nil { + return nil + } + out := new(GangliaLayerEBSVolumeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerInitParameters) DeepCopyInto(out *GangliaLayerInitParameters) { + *out = *in + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(GangliaLayerCloudwatchConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]GangliaLayerEBSVolumeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(GangliaLayerLoadBasedAutoScalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerInitParameters. +func (in *GangliaLayerInitParameters) DeepCopy() *GangliaLayerInitParameters { + if in == nil { + return nil + } + out := new(GangliaLayerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerList) DeepCopyInto(out *GangliaLayerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GangliaLayer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerList. +func (in *GangliaLayerList) DeepCopy() *GangliaLayerList { + if in == nil { + return nil + } + out := new(GangliaLayerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GangliaLayerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopyInto(out *GangliaLayerLoadBasedAutoScalingDownscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerLoadBasedAutoScalingDownscalingInitParameters. +func (in *GangliaLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopy() *GangliaLayerLoadBasedAutoScalingDownscalingInitParameters { + if in == nil { + return nil + } + out := new(GangliaLayerLoadBasedAutoScalingDownscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerLoadBasedAutoScalingDownscalingObservation) DeepCopyInto(out *GangliaLayerLoadBasedAutoScalingDownscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerLoadBasedAutoScalingDownscalingObservation. +func (in *GangliaLayerLoadBasedAutoScalingDownscalingObservation) DeepCopy() *GangliaLayerLoadBasedAutoScalingDownscalingObservation { + if in == nil { + return nil + } + out := new(GangliaLayerLoadBasedAutoScalingDownscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerLoadBasedAutoScalingDownscalingParameters) DeepCopyInto(out *GangliaLayerLoadBasedAutoScalingDownscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerLoadBasedAutoScalingDownscalingParameters. +func (in *GangliaLayerLoadBasedAutoScalingDownscalingParameters) DeepCopy() *GangliaLayerLoadBasedAutoScalingDownscalingParameters { + if in == nil { + return nil + } + out := new(GangliaLayerLoadBasedAutoScalingDownscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerLoadBasedAutoScalingInitParameters) DeepCopyInto(out *GangliaLayerLoadBasedAutoScalingInitParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(GangliaLayerLoadBasedAutoScalingDownscalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(GangliaLayerLoadBasedAutoScalingUpscalingInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerLoadBasedAutoScalingInitParameters. +func (in *GangliaLayerLoadBasedAutoScalingInitParameters) DeepCopy() *GangliaLayerLoadBasedAutoScalingInitParameters { + if in == nil { + return nil + } + out := new(GangliaLayerLoadBasedAutoScalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerLoadBasedAutoScalingObservation) DeepCopyInto(out *GangliaLayerLoadBasedAutoScalingObservation) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(GangliaLayerLoadBasedAutoScalingDownscalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(GangliaLayerLoadBasedAutoScalingUpscalingObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerLoadBasedAutoScalingObservation. +func (in *GangliaLayerLoadBasedAutoScalingObservation) DeepCopy() *GangliaLayerLoadBasedAutoScalingObservation { + if in == nil { + return nil + } + out := new(GangliaLayerLoadBasedAutoScalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerLoadBasedAutoScalingParameters) DeepCopyInto(out *GangliaLayerLoadBasedAutoScalingParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(GangliaLayerLoadBasedAutoScalingDownscalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(GangliaLayerLoadBasedAutoScalingUpscalingParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerLoadBasedAutoScalingParameters. +func (in *GangliaLayerLoadBasedAutoScalingParameters) DeepCopy() *GangliaLayerLoadBasedAutoScalingParameters { + if in == nil { + return nil + } + out := new(GangliaLayerLoadBasedAutoScalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopyInto(out *GangliaLayerLoadBasedAutoScalingUpscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerLoadBasedAutoScalingUpscalingInitParameters. +func (in *GangliaLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopy() *GangliaLayerLoadBasedAutoScalingUpscalingInitParameters { + if in == nil { + return nil + } + out := new(GangliaLayerLoadBasedAutoScalingUpscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerLoadBasedAutoScalingUpscalingObservation) DeepCopyInto(out *GangliaLayerLoadBasedAutoScalingUpscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerLoadBasedAutoScalingUpscalingObservation. +func (in *GangliaLayerLoadBasedAutoScalingUpscalingObservation) DeepCopy() *GangliaLayerLoadBasedAutoScalingUpscalingObservation { + if in == nil { + return nil + } + out := new(GangliaLayerLoadBasedAutoScalingUpscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerLoadBasedAutoScalingUpscalingParameters) DeepCopyInto(out *GangliaLayerLoadBasedAutoScalingUpscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerLoadBasedAutoScalingUpscalingParameters. +func (in *GangliaLayerLoadBasedAutoScalingUpscalingParameters) DeepCopy() *GangliaLayerLoadBasedAutoScalingUpscalingParameters { + if in == nil { + return nil + } + out := new(GangliaLayerLoadBasedAutoScalingUpscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerObservation) DeepCopyInto(out *GangliaLayerObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(GangliaLayerCloudwatchConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]GangliaLayerEBSVolumeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(GangliaLayerLoadBasedAutoScalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerObservation. +func (in *GangliaLayerObservation) DeepCopy() *GangliaLayerObservation { + if in == nil { + return nil + } + out := new(GangliaLayerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerParameters) DeepCopyInto(out *GangliaLayerParameters) { + *out = *in + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(GangliaLayerCloudwatchConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]GangliaLayerEBSVolumeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(GangliaLayerLoadBasedAutoScalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerParameters. +func (in *GangliaLayerParameters) DeepCopy() *GangliaLayerParameters { + if in == nil { + return nil + } + out := new(GangliaLayerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerSpec) DeepCopyInto(out *GangliaLayerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerSpec. +func (in *GangliaLayerSpec) DeepCopy() *GangliaLayerSpec { + if in == nil { + return nil + } + out := new(GangliaLayerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GangliaLayerStatus) DeepCopyInto(out *GangliaLayerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangliaLayerStatus. +func (in *GangliaLayerStatus) DeepCopy() *GangliaLayerStatus { + if in == nil { + return nil + } + out := new(GangliaLayerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayer) DeepCopyInto(out *HAProxyLayer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayer. +func (in *HAProxyLayer) DeepCopy() *HAProxyLayer { + if in == nil { + return nil + } + out := new(HAProxyLayer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HAProxyLayer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerCloudwatchConfigurationInitParameters) DeepCopyInto(out *HAProxyLayerCloudwatchConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]HAProxyLayerCloudwatchConfigurationLogStreamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerCloudwatchConfigurationInitParameters. +func (in *HAProxyLayerCloudwatchConfigurationInitParameters) DeepCopy() *HAProxyLayerCloudwatchConfigurationInitParameters { + if in == nil { + return nil + } + out := new(HAProxyLayerCloudwatchConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopyInto(out *HAProxyLayerCloudwatchConfigurationLogStreamsInitParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerCloudwatchConfigurationLogStreamsInitParameters. +func (in *HAProxyLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopy() *HAProxyLayerCloudwatchConfigurationLogStreamsInitParameters { + if in == nil { + return nil + } + out := new(HAProxyLayerCloudwatchConfigurationLogStreamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerCloudwatchConfigurationLogStreamsObservation) DeepCopyInto(out *HAProxyLayerCloudwatchConfigurationLogStreamsObservation) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerCloudwatchConfigurationLogStreamsObservation. +func (in *HAProxyLayerCloudwatchConfigurationLogStreamsObservation) DeepCopy() *HAProxyLayerCloudwatchConfigurationLogStreamsObservation { + if in == nil { + return nil + } + out := new(HAProxyLayerCloudwatchConfigurationLogStreamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerCloudwatchConfigurationLogStreamsParameters) DeepCopyInto(out *HAProxyLayerCloudwatchConfigurationLogStreamsParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerCloudwatchConfigurationLogStreamsParameters. +func (in *HAProxyLayerCloudwatchConfigurationLogStreamsParameters) DeepCopy() *HAProxyLayerCloudwatchConfigurationLogStreamsParameters { + if in == nil { + return nil + } + out := new(HAProxyLayerCloudwatchConfigurationLogStreamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerCloudwatchConfigurationObservation) DeepCopyInto(out *HAProxyLayerCloudwatchConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]HAProxyLayerCloudwatchConfigurationLogStreamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerCloudwatchConfigurationObservation. +func (in *HAProxyLayerCloudwatchConfigurationObservation) DeepCopy() *HAProxyLayerCloudwatchConfigurationObservation { + if in == nil { + return nil + } + out := new(HAProxyLayerCloudwatchConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerCloudwatchConfigurationParameters) DeepCopyInto(out *HAProxyLayerCloudwatchConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]HAProxyLayerCloudwatchConfigurationLogStreamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerCloudwatchConfigurationParameters. +func (in *HAProxyLayerCloudwatchConfigurationParameters) DeepCopy() *HAProxyLayerCloudwatchConfigurationParameters { + if in == nil { + return nil + } + out := new(HAProxyLayerCloudwatchConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerEBSVolumeInitParameters) DeepCopyInto(out *HAProxyLayerEBSVolumeInitParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerEBSVolumeInitParameters. +func (in *HAProxyLayerEBSVolumeInitParameters) DeepCopy() *HAProxyLayerEBSVolumeInitParameters { + if in == nil { + return nil + } + out := new(HAProxyLayerEBSVolumeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerEBSVolumeObservation) DeepCopyInto(out *HAProxyLayerEBSVolumeObservation) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerEBSVolumeObservation. +func (in *HAProxyLayerEBSVolumeObservation) DeepCopy() *HAProxyLayerEBSVolumeObservation { + if in == nil { + return nil + } + out := new(HAProxyLayerEBSVolumeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerEBSVolumeParameters) DeepCopyInto(out *HAProxyLayerEBSVolumeParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerEBSVolumeParameters. +func (in *HAProxyLayerEBSVolumeParameters) DeepCopy() *HAProxyLayerEBSVolumeParameters { + if in == nil { + return nil + } + out := new(HAProxyLayerEBSVolumeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerInitParameters) DeepCopyInto(out *HAProxyLayerInitParameters) { + *out = *in + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(HAProxyLayerCloudwatchConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]HAProxyLayerEBSVolumeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.HealthcheckMethod != nil { + in, out := &in.HealthcheckMethod, &out.HealthcheckMethod + *out = new(string) + **out = **in + } + if in.HealthcheckURL != nil { + in, out := &in.HealthcheckURL, &out.HealthcheckURL + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(HAProxyLayerLoadBasedAutoScalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StatsEnabled != nil { + in, out := &in.StatsEnabled, &out.StatsEnabled + *out = new(bool) + **out = **in + } + if in.StatsPassword != nil { + in, out := &in.StatsPassword, &out.StatsPassword + *out = new(string) + **out = **in + } + if in.StatsURL != nil { + in, out := &in.StatsURL, &out.StatsURL + *out = new(string) + **out = **in + } + if in.StatsUser != nil { + in, out := &in.StatsUser, &out.StatsUser + *out = new(string) + **out = **in + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerInitParameters. +func (in *HAProxyLayerInitParameters) DeepCopy() *HAProxyLayerInitParameters { + if in == nil { + return nil + } + out := new(HAProxyLayerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerList) DeepCopyInto(out *HAProxyLayerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HAProxyLayer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerList. +func (in *HAProxyLayerList) DeepCopy() *HAProxyLayerList { + if in == nil { + return nil + } + out := new(HAProxyLayerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HAProxyLayerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopyInto(out *HAProxyLayerLoadBasedAutoScalingDownscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerLoadBasedAutoScalingDownscalingInitParameters. +func (in *HAProxyLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopy() *HAProxyLayerLoadBasedAutoScalingDownscalingInitParameters { + if in == nil { + return nil + } + out := new(HAProxyLayerLoadBasedAutoScalingDownscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerLoadBasedAutoScalingDownscalingObservation) DeepCopyInto(out *HAProxyLayerLoadBasedAutoScalingDownscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerLoadBasedAutoScalingDownscalingObservation. +func (in *HAProxyLayerLoadBasedAutoScalingDownscalingObservation) DeepCopy() *HAProxyLayerLoadBasedAutoScalingDownscalingObservation { + if in == nil { + return nil + } + out := new(HAProxyLayerLoadBasedAutoScalingDownscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerLoadBasedAutoScalingDownscalingParameters) DeepCopyInto(out *HAProxyLayerLoadBasedAutoScalingDownscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerLoadBasedAutoScalingDownscalingParameters. +func (in *HAProxyLayerLoadBasedAutoScalingDownscalingParameters) DeepCopy() *HAProxyLayerLoadBasedAutoScalingDownscalingParameters { + if in == nil { + return nil + } + out := new(HAProxyLayerLoadBasedAutoScalingDownscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerLoadBasedAutoScalingInitParameters) DeepCopyInto(out *HAProxyLayerLoadBasedAutoScalingInitParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(HAProxyLayerLoadBasedAutoScalingDownscalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(HAProxyLayerLoadBasedAutoScalingUpscalingInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerLoadBasedAutoScalingInitParameters. +func (in *HAProxyLayerLoadBasedAutoScalingInitParameters) DeepCopy() *HAProxyLayerLoadBasedAutoScalingInitParameters { + if in == nil { + return nil + } + out := new(HAProxyLayerLoadBasedAutoScalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerLoadBasedAutoScalingObservation) DeepCopyInto(out *HAProxyLayerLoadBasedAutoScalingObservation) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(HAProxyLayerLoadBasedAutoScalingDownscalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(HAProxyLayerLoadBasedAutoScalingUpscalingObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerLoadBasedAutoScalingObservation. +func (in *HAProxyLayerLoadBasedAutoScalingObservation) DeepCopy() *HAProxyLayerLoadBasedAutoScalingObservation { + if in == nil { + return nil + } + out := new(HAProxyLayerLoadBasedAutoScalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerLoadBasedAutoScalingParameters) DeepCopyInto(out *HAProxyLayerLoadBasedAutoScalingParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(HAProxyLayerLoadBasedAutoScalingDownscalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(HAProxyLayerLoadBasedAutoScalingUpscalingParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerLoadBasedAutoScalingParameters. +func (in *HAProxyLayerLoadBasedAutoScalingParameters) DeepCopy() *HAProxyLayerLoadBasedAutoScalingParameters { + if in == nil { + return nil + } + out := new(HAProxyLayerLoadBasedAutoScalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopyInto(out *HAProxyLayerLoadBasedAutoScalingUpscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerLoadBasedAutoScalingUpscalingInitParameters. +func (in *HAProxyLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopy() *HAProxyLayerLoadBasedAutoScalingUpscalingInitParameters { + if in == nil { + return nil + } + out := new(HAProxyLayerLoadBasedAutoScalingUpscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerLoadBasedAutoScalingUpscalingObservation) DeepCopyInto(out *HAProxyLayerLoadBasedAutoScalingUpscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerLoadBasedAutoScalingUpscalingObservation. +func (in *HAProxyLayerLoadBasedAutoScalingUpscalingObservation) DeepCopy() *HAProxyLayerLoadBasedAutoScalingUpscalingObservation { + if in == nil { + return nil + } + out := new(HAProxyLayerLoadBasedAutoScalingUpscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerLoadBasedAutoScalingUpscalingParameters) DeepCopyInto(out *HAProxyLayerLoadBasedAutoScalingUpscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerLoadBasedAutoScalingUpscalingParameters. +func (in *HAProxyLayerLoadBasedAutoScalingUpscalingParameters) DeepCopy() *HAProxyLayerLoadBasedAutoScalingUpscalingParameters { + if in == nil { + return nil + } + out := new(HAProxyLayerLoadBasedAutoScalingUpscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerObservation) DeepCopyInto(out *HAProxyLayerObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(HAProxyLayerCloudwatchConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]HAProxyLayerEBSVolumeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.HealthcheckMethod != nil { + in, out := &in.HealthcheckMethod, &out.HealthcheckMethod + *out = new(string) + **out = **in + } + if in.HealthcheckURL != nil { + in, out := &in.HealthcheckURL, &out.HealthcheckURL + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(HAProxyLayerLoadBasedAutoScalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StatsEnabled != nil { + in, out := &in.StatsEnabled, &out.StatsEnabled + *out = new(bool) + **out = **in + } + if in.StatsPassword != nil { + in, out := &in.StatsPassword, &out.StatsPassword + *out = new(string) + **out = **in + } + if in.StatsURL != nil { + in, out := &in.StatsURL, &out.StatsURL + *out = new(string) + **out = **in + } + if in.StatsUser != nil { + in, out := &in.StatsUser, &out.StatsUser + *out = new(string) + **out = **in + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerObservation. +func (in *HAProxyLayerObservation) DeepCopy() *HAProxyLayerObservation { + if in == nil { + return nil + } + out := new(HAProxyLayerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerParameters) DeepCopyInto(out *HAProxyLayerParameters) { + *out = *in + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(HAProxyLayerCloudwatchConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]HAProxyLayerEBSVolumeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.HealthcheckMethod != nil { + in, out := &in.HealthcheckMethod, &out.HealthcheckMethod + *out = new(string) + **out = **in + } + if in.HealthcheckURL != nil { + in, out := &in.HealthcheckURL, &out.HealthcheckURL + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(HAProxyLayerLoadBasedAutoScalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StatsEnabled != nil { + in, out := &in.StatsEnabled, &out.StatsEnabled + *out = new(bool) + **out = **in + } + if in.StatsPassword != nil { + in, out := &in.StatsPassword, &out.StatsPassword + *out = new(string) + **out = **in + } + if in.StatsURL != nil { + in, out := &in.StatsURL, &out.StatsURL + *out = new(string) + **out = **in + } + if in.StatsUser != nil { + in, out := &in.StatsUser, &out.StatsUser + *out = new(string) + **out = **in + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerParameters. +func (in *HAProxyLayerParameters) DeepCopy() *HAProxyLayerParameters { + if in == nil { + return nil + } + out := new(HAProxyLayerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerSpec) DeepCopyInto(out *HAProxyLayerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerSpec. +func (in *HAProxyLayerSpec) DeepCopy() *HAProxyLayerSpec { + if in == nil { + return nil + } + out := new(HAProxyLayerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HAProxyLayerStatus) DeepCopyInto(out *HAProxyLayerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HAProxyLayerStatus. +func (in *HAProxyLayerStatus) DeepCopy() *HAProxyLayerStatus { + if in == nil { + return nil + } + out := new(HAProxyLayerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayer) DeepCopyInto(out *JavaAppLayer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayer. +func (in *JavaAppLayer) DeepCopy() *JavaAppLayer { + if in == nil { + return nil + } + out := new(JavaAppLayer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *JavaAppLayer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerCloudwatchConfigurationInitParameters) DeepCopyInto(out *JavaAppLayerCloudwatchConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]JavaAppLayerCloudwatchConfigurationLogStreamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerCloudwatchConfigurationInitParameters. +func (in *JavaAppLayerCloudwatchConfigurationInitParameters) DeepCopy() *JavaAppLayerCloudwatchConfigurationInitParameters { + if in == nil { + return nil + } + out := new(JavaAppLayerCloudwatchConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopyInto(out *JavaAppLayerCloudwatchConfigurationLogStreamsInitParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerCloudwatchConfigurationLogStreamsInitParameters. +func (in *JavaAppLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopy() *JavaAppLayerCloudwatchConfigurationLogStreamsInitParameters { + if in == nil { + return nil + } + out := new(JavaAppLayerCloudwatchConfigurationLogStreamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerCloudwatchConfigurationLogStreamsObservation) DeepCopyInto(out *JavaAppLayerCloudwatchConfigurationLogStreamsObservation) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerCloudwatchConfigurationLogStreamsObservation. +func (in *JavaAppLayerCloudwatchConfigurationLogStreamsObservation) DeepCopy() *JavaAppLayerCloudwatchConfigurationLogStreamsObservation { + if in == nil { + return nil + } + out := new(JavaAppLayerCloudwatchConfigurationLogStreamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerCloudwatchConfigurationLogStreamsParameters) DeepCopyInto(out *JavaAppLayerCloudwatchConfigurationLogStreamsParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerCloudwatchConfigurationLogStreamsParameters. +func (in *JavaAppLayerCloudwatchConfigurationLogStreamsParameters) DeepCopy() *JavaAppLayerCloudwatchConfigurationLogStreamsParameters { + if in == nil { + return nil + } + out := new(JavaAppLayerCloudwatchConfigurationLogStreamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerCloudwatchConfigurationObservation) DeepCopyInto(out *JavaAppLayerCloudwatchConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]JavaAppLayerCloudwatchConfigurationLogStreamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerCloudwatchConfigurationObservation. +func (in *JavaAppLayerCloudwatchConfigurationObservation) DeepCopy() *JavaAppLayerCloudwatchConfigurationObservation { + if in == nil { + return nil + } + out := new(JavaAppLayerCloudwatchConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerCloudwatchConfigurationParameters) DeepCopyInto(out *JavaAppLayerCloudwatchConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]JavaAppLayerCloudwatchConfigurationLogStreamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerCloudwatchConfigurationParameters. +func (in *JavaAppLayerCloudwatchConfigurationParameters) DeepCopy() *JavaAppLayerCloudwatchConfigurationParameters { + if in == nil { + return nil + } + out := new(JavaAppLayerCloudwatchConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerEBSVolumeInitParameters) DeepCopyInto(out *JavaAppLayerEBSVolumeInitParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerEBSVolumeInitParameters. +func (in *JavaAppLayerEBSVolumeInitParameters) DeepCopy() *JavaAppLayerEBSVolumeInitParameters { + if in == nil { + return nil + } + out := new(JavaAppLayerEBSVolumeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerEBSVolumeObservation) DeepCopyInto(out *JavaAppLayerEBSVolumeObservation) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerEBSVolumeObservation. +func (in *JavaAppLayerEBSVolumeObservation) DeepCopy() *JavaAppLayerEBSVolumeObservation { + if in == nil { + return nil + } + out := new(JavaAppLayerEBSVolumeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerEBSVolumeParameters) DeepCopyInto(out *JavaAppLayerEBSVolumeParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerEBSVolumeParameters. +func (in *JavaAppLayerEBSVolumeParameters) DeepCopy() *JavaAppLayerEBSVolumeParameters { + if in == nil { + return nil + } + out := new(JavaAppLayerEBSVolumeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerInitParameters) DeepCopyInto(out *JavaAppLayerInitParameters) { + *out = *in + if in.AppServer != nil { + in, out := &in.AppServer, &out.AppServer + *out = new(string) + **out = **in + } + if in.AppServerVersion != nil { + in, out := &in.AppServerVersion, &out.AppServerVersion + *out = new(string) + **out = **in + } + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(JavaAppLayerCloudwatchConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]JavaAppLayerEBSVolumeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.JvmOptions != nil { + in, out := &in.JvmOptions, &out.JvmOptions + *out = new(string) + **out = **in + } + if in.JvmType != nil { + in, out := &in.JvmType, &out.JvmType + *out = new(string) + **out = **in + } + if in.JvmVersion != nil { + in, out := &in.JvmVersion, &out.JvmVersion + *out = new(string) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(JavaAppLayerLoadBasedAutoScalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerInitParameters. +func (in *JavaAppLayerInitParameters) DeepCopy() *JavaAppLayerInitParameters { + if in == nil { + return nil + } + out := new(JavaAppLayerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerList) DeepCopyInto(out *JavaAppLayerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]JavaAppLayer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerList. +func (in *JavaAppLayerList) DeepCopy() *JavaAppLayerList { + if in == nil { + return nil + } + out := new(JavaAppLayerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *JavaAppLayerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopyInto(out *JavaAppLayerLoadBasedAutoScalingDownscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerLoadBasedAutoScalingDownscalingInitParameters. +func (in *JavaAppLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopy() *JavaAppLayerLoadBasedAutoScalingDownscalingInitParameters { + if in == nil { + return nil + } + out := new(JavaAppLayerLoadBasedAutoScalingDownscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerLoadBasedAutoScalingDownscalingObservation) DeepCopyInto(out *JavaAppLayerLoadBasedAutoScalingDownscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerLoadBasedAutoScalingDownscalingObservation. +func (in *JavaAppLayerLoadBasedAutoScalingDownscalingObservation) DeepCopy() *JavaAppLayerLoadBasedAutoScalingDownscalingObservation { + if in == nil { + return nil + } + out := new(JavaAppLayerLoadBasedAutoScalingDownscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerLoadBasedAutoScalingDownscalingParameters) DeepCopyInto(out *JavaAppLayerLoadBasedAutoScalingDownscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerLoadBasedAutoScalingDownscalingParameters. +func (in *JavaAppLayerLoadBasedAutoScalingDownscalingParameters) DeepCopy() *JavaAppLayerLoadBasedAutoScalingDownscalingParameters { + if in == nil { + return nil + } + out := new(JavaAppLayerLoadBasedAutoScalingDownscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerLoadBasedAutoScalingInitParameters) DeepCopyInto(out *JavaAppLayerLoadBasedAutoScalingInitParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(JavaAppLayerLoadBasedAutoScalingDownscalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(JavaAppLayerLoadBasedAutoScalingUpscalingInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerLoadBasedAutoScalingInitParameters. +func (in *JavaAppLayerLoadBasedAutoScalingInitParameters) DeepCopy() *JavaAppLayerLoadBasedAutoScalingInitParameters { + if in == nil { + return nil + } + out := new(JavaAppLayerLoadBasedAutoScalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerLoadBasedAutoScalingObservation) DeepCopyInto(out *JavaAppLayerLoadBasedAutoScalingObservation) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(JavaAppLayerLoadBasedAutoScalingDownscalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(JavaAppLayerLoadBasedAutoScalingUpscalingObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerLoadBasedAutoScalingObservation. +func (in *JavaAppLayerLoadBasedAutoScalingObservation) DeepCopy() *JavaAppLayerLoadBasedAutoScalingObservation { + if in == nil { + return nil + } + out := new(JavaAppLayerLoadBasedAutoScalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerLoadBasedAutoScalingParameters) DeepCopyInto(out *JavaAppLayerLoadBasedAutoScalingParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(JavaAppLayerLoadBasedAutoScalingDownscalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(JavaAppLayerLoadBasedAutoScalingUpscalingParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerLoadBasedAutoScalingParameters. +func (in *JavaAppLayerLoadBasedAutoScalingParameters) DeepCopy() *JavaAppLayerLoadBasedAutoScalingParameters { + if in == nil { + return nil + } + out := new(JavaAppLayerLoadBasedAutoScalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopyInto(out *JavaAppLayerLoadBasedAutoScalingUpscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerLoadBasedAutoScalingUpscalingInitParameters. +func (in *JavaAppLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopy() *JavaAppLayerLoadBasedAutoScalingUpscalingInitParameters { + if in == nil { + return nil + } + out := new(JavaAppLayerLoadBasedAutoScalingUpscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerLoadBasedAutoScalingUpscalingObservation) DeepCopyInto(out *JavaAppLayerLoadBasedAutoScalingUpscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerLoadBasedAutoScalingUpscalingObservation. +func (in *JavaAppLayerLoadBasedAutoScalingUpscalingObservation) DeepCopy() *JavaAppLayerLoadBasedAutoScalingUpscalingObservation { + if in == nil { + return nil + } + out := new(JavaAppLayerLoadBasedAutoScalingUpscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerLoadBasedAutoScalingUpscalingParameters) DeepCopyInto(out *JavaAppLayerLoadBasedAutoScalingUpscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerLoadBasedAutoScalingUpscalingParameters. +func (in *JavaAppLayerLoadBasedAutoScalingUpscalingParameters) DeepCopy() *JavaAppLayerLoadBasedAutoScalingUpscalingParameters { + if in == nil { + return nil + } + out := new(JavaAppLayerLoadBasedAutoScalingUpscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerObservation) DeepCopyInto(out *JavaAppLayerObservation) { + *out = *in + if in.AppServer != nil { + in, out := &in.AppServer, &out.AppServer + *out = new(string) + **out = **in + } + if in.AppServerVersion != nil { + in, out := &in.AppServerVersion, &out.AppServerVersion + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(JavaAppLayerCloudwatchConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]JavaAppLayerEBSVolumeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.JvmOptions != nil { + in, out := &in.JvmOptions, &out.JvmOptions + *out = new(string) + **out = **in + } + if in.JvmType != nil { + in, out := &in.JvmType, &out.JvmType + *out = new(string) + **out = **in + } + if in.JvmVersion != nil { + in, out := &in.JvmVersion, &out.JvmVersion + *out = new(string) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(JavaAppLayerLoadBasedAutoScalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerObservation. +func (in *JavaAppLayerObservation) DeepCopy() *JavaAppLayerObservation { + if in == nil { + return nil + } + out := new(JavaAppLayerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerParameters) DeepCopyInto(out *JavaAppLayerParameters) { + *out = *in + if in.AppServer != nil { + in, out := &in.AppServer, &out.AppServer + *out = new(string) + **out = **in + } + if in.AppServerVersion != nil { + in, out := &in.AppServerVersion, &out.AppServerVersion + *out = new(string) + **out = **in + } + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(JavaAppLayerCloudwatchConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]JavaAppLayerEBSVolumeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.JvmOptions != nil { + in, out := &in.JvmOptions, &out.JvmOptions + *out = new(string) + **out = **in + } + if in.JvmType != nil { + in, out := &in.JvmType, &out.JvmType + *out = new(string) + **out = **in + } + if in.JvmVersion != nil { + in, out := &in.JvmVersion, &out.JvmVersion + *out = new(string) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(JavaAppLayerLoadBasedAutoScalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerParameters. +func (in *JavaAppLayerParameters) DeepCopy() *JavaAppLayerParameters { + if in == nil { + return nil + } + out := new(JavaAppLayerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerSpec) DeepCopyInto(out *JavaAppLayerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerSpec. +func (in *JavaAppLayerSpec) DeepCopy() *JavaAppLayerSpec { + if in == nil { + return nil + } + out := new(JavaAppLayerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JavaAppLayerStatus) DeepCopyInto(out *JavaAppLayerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JavaAppLayerStatus. +func (in *JavaAppLayerStatus) DeepCopy() *JavaAppLayerStatus { + if in == nil { + return nil + } + out := new(JavaAppLayerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBasedAutoScalingDownscalingInitParameters) DeepCopyInto(out *LoadBasedAutoScalingDownscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBasedAutoScalingDownscalingInitParameters. +func (in *LoadBasedAutoScalingDownscalingInitParameters) DeepCopy() *LoadBasedAutoScalingDownscalingInitParameters { + if in == nil { + return nil + } + out := new(LoadBasedAutoScalingDownscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBasedAutoScalingDownscalingObservation) DeepCopyInto(out *LoadBasedAutoScalingDownscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBasedAutoScalingDownscalingObservation. +func (in *LoadBasedAutoScalingDownscalingObservation) DeepCopy() *LoadBasedAutoScalingDownscalingObservation { + if in == nil { + return nil + } + out := new(LoadBasedAutoScalingDownscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBasedAutoScalingDownscalingParameters) DeepCopyInto(out *LoadBasedAutoScalingDownscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBasedAutoScalingDownscalingParameters. +func (in *LoadBasedAutoScalingDownscalingParameters) DeepCopy() *LoadBasedAutoScalingDownscalingParameters { + if in == nil { + return nil + } + out := new(LoadBasedAutoScalingDownscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBasedAutoScalingInitParameters) DeepCopyInto(out *LoadBasedAutoScalingInitParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(DownscalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(UpscalingInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBasedAutoScalingInitParameters. +func (in *LoadBasedAutoScalingInitParameters) DeepCopy() *LoadBasedAutoScalingInitParameters { + if in == nil { + return nil + } + out := new(LoadBasedAutoScalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBasedAutoScalingObservation) DeepCopyInto(out *LoadBasedAutoScalingObservation) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(DownscalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(UpscalingObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBasedAutoScalingObservation. +func (in *LoadBasedAutoScalingObservation) DeepCopy() *LoadBasedAutoScalingObservation { + if in == nil { + return nil + } + out := new(LoadBasedAutoScalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBasedAutoScalingParameters) DeepCopyInto(out *LoadBasedAutoScalingParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(DownscalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(UpscalingParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBasedAutoScalingParameters. +func (in *LoadBasedAutoScalingParameters) DeepCopy() *LoadBasedAutoScalingParameters { + if in == nil { + return nil + } + out := new(LoadBasedAutoScalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBasedAutoScalingUpscalingInitParameters) DeepCopyInto(out *LoadBasedAutoScalingUpscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBasedAutoScalingUpscalingInitParameters. +func (in *LoadBasedAutoScalingUpscalingInitParameters) DeepCopy() *LoadBasedAutoScalingUpscalingInitParameters { + if in == nil { + return nil + } + out := new(LoadBasedAutoScalingUpscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBasedAutoScalingUpscalingObservation) DeepCopyInto(out *LoadBasedAutoScalingUpscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBasedAutoScalingUpscalingObservation. +func (in *LoadBasedAutoScalingUpscalingObservation) DeepCopy() *LoadBasedAutoScalingUpscalingObservation { + if in == nil { + return nil + } + out := new(LoadBasedAutoScalingUpscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBasedAutoScalingUpscalingParameters) DeepCopyInto(out *LoadBasedAutoScalingUpscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBasedAutoScalingUpscalingParameters. +func (in *LoadBasedAutoScalingUpscalingParameters) DeepCopy() *LoadBasedAutoScalingUpscalingParameters { + if in == nil { + return nil + } + out := new(LoadBasedAutoScalingUpscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogStreamsInitParameters) DeepCopyInto(out *LogStreamsInitParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogStreamsInitParameters. +func (in *LogStreamsInitParameters) DeepCopy() *LogStreamsInitParameters { + if in == nil { + return nil + } + out := new(LogStreamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogStreamsObservation) DeepCopyInto(out *LogStreamsObservation) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogStreamsObservation. +func (in *LogStreamsObservation) DeepCopy() *LogStreamsObservation { + if in == nil { + return nil + } + out := new(LogStreamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogStreamsParameters) DeepCopyInto(out *LogStreamsParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogStreamsParameters. +func (in *LogStreamsParameters) DeepCopy() *LogStreamsParameters { + if in == nil { + return nil + } + out := new(LogStreamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayer) DeepCopyInto(out *MemcachedLayer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayer. +func (in *MemcachedLayer) DeepCopy() *MemcachedLayer { + if in == nil { + return nil + } + out := new(MemcachedLayer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MemcachedLayer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerCloudwatchConfigurationInitParameters) DeepCopyInto(out *MemcachedLayerCloudwatchConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]MemcachedLayerCloudwatchConfigurationLogStreamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerCloudwatchConfigurationInitParameters. +func (in *MemcachedLayerCloudwatchConfigurationInitParameters) DeepCopy() *MemcachedLayerCloudwatchConfigurationInitParameters { + if in == nil { + return nil + } + out := new(MemcachedLayerCloudwatchConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopyInto(out *MemcachedLayerCloudwatchConfigurationLogStreamsInitParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerCloudwatchConfigurationLogStreamsInitParameters. +func (in *MemcachedLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopy() *MemcachedLayerCloudwatchConfigurationLogStreamsInitParameters { + if in == nil { + return nil + } + out := new(MemcachedLayerCloudwatchConfigurationLogStreamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerCloudwatchConfigurationLogStreamsObservation) DeepCopyInto(out *MemcachedLayerCloudwatchConfigurationLogStreamsObservation) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerCloudwatchConfigurationLogStreamsObservation. +func (in *MemcachedLayerCloudwatchConfigurationLogStreamsObservation) DeepCopy() *MemcachedLayerCloudwatchConfigurationLogStreamsObservation { + if in == nil { + return nil + } + out := new(MemcachedLayerCloudwatchConfigurationLogStreamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerCloudwatchConfigurationLogStreamsParameters) DeepCopyInto(out *MemcachedLayerCloudwatchConfigurationLogStreamsParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerCloudwatchConfigurationLogStreamsParameters. +func (in *MemcachedLayerCloudwatchConfigurationLogStreamsParameters) DeepCopy() *MemcachedLayerCloudwatchConfigurationLogStreamsParameters { + if in == nil { + return nil + } + out := new(MemcachedLayerCloudwatchConfigurationLogStreamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerCloudwatchConfigurationObservation) DeepCopyInto(out *MemcachedLayerCloudwatchConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]MemcachedLayerCloudwatchConfigurationLogStreamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerCloudwatchConfigurationObservation. +func (in *MemcachedLayerCloudwatchConfigurationObservation) DeepCopy() *MemcachedLayerCloudwatchConfigurationObservation { + if in == nil { + return nil + } + out := new(MemcachedLayerCloudwatchConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerCloudwatchConfigurationParameters) DeepCopyInto(out *MemcachedLayerCloudwatchConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]MemcachedLayerCloudwatchConfigurationLogStreamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerCloudwatchConfigurationParameters. +func (in *MemcachedLayerCloudwatchConfigurationParameters) DeepCopy() *MemcachedLayerCloudwatchConfigurationParameters { + if in == nil { + return nil + } + out := new(MemcachedLayerCloudwatchConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerEBSVolumeInitParameters) DeepCopyInto(out *MemcachedLayerEBSVolumeInitParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerEBSVolumeInitParameters. +func (in *MemcachedLayerEBSVolumeInitParameters) DeepCopy() *MemcachedLayerEBSVolumeInitParameters { + if in == nil { + return nil + } + out := new(MemcachedLayerEBSVolumeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerEBSVolumeObservation) DeepCopyInto(out *MemcachedLayerEBSVolumeObservation) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerEBSVolumeObservation. +func (in *MemcachedLayerEBSVolumeObservation) DeepCopy() *MemcachedLayerEBSVolumeObservation { + if in == nil { + return nil + } + out := new(MemcachedLayerEBSVolumeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerEBSVolumeParameters) DeepCopyInto(out *MemcachedLayerEBSVolumeParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerEBSVolumeParameters. +func (in *MemcachedLayerEBSVolumeParameters) DeepCopy() *MemcachedLayerEBSVolumeParameters { + if in == nil { + return nil + } + out := new(MemcachedLayerEBSVolumeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerInitParameters) DeepCopyInto(out *MemcachedLayerInitParameters) { + *out = *in + if in.AllocatedMemory != nil { + in, out := &in.AllocatedMemory, &out.AllocatedMemory + *out = new(float64) + **out = **in + } + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(MemcachedLayerCloudwatchConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]MemcachedLayerEBSVolumeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(MemcachedLayerLoadBasedAutoScalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerInitParameters. +func (in *MemcachedLayerInitParameters) DeepCopy() *MemcachedLayerInitParameters { + if in == nil { + return nil + } + out := new(MemcachedLayerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerList) DeepCopyInto(out *MemcachedLayerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MemcachedLayer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerList. +func (in *MemcachedLayerList) DeepCopy() *MemcachedLayerList { + if in == nil { + return nil + } + out := new(MemcachedLayerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MemcachedLayerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopyInto(out *MemcachedLayerLoadBasedAutoScalingDownscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerLoadBasedAutoScalingDownscalingInitParameters. +func (in *MemcachedLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopy() *MemcachedLayerLoadBasedAutoScalingDownscalingInitParameters { + if in == nil { + return nil + } + out := new(MemcachedLayerLoadBasedAutoScalingDownscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerLoadBasedAutoScalingDownscalingObservation) DeepCopyInto(out *MemcachedLayerLoadBasedAutoScalingDownscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerLoadBasedAutoScalingDownscalingObservation. +func (in *MemcachedLayerLoadBasedAutoScalingDownscalingObservation) DeepCopy() *MemcachedLayerLoadBasedAutoScalingDownscalingObservation { + if in == nil { + return nil + } + out := new(MemcachedLayerLoadBasedAutoScalingDownscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerLoadBasedAutoScalingDownscalingParameters) DeepCopyInto(out *MemcachedLayerLoadBasedAutoScalingDownscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerLoadBasedAutoScalingDownscalingParameters. +func (in *MemcachedLayerLoadBasedAutoScalingDownscalingParameters) DeepCopy() *MemcachedLayerLoadBasedAutoScalingDownscalingParameters { + if in == nil { + return nil + } + out := new(MemcachedLayerLoadBasedAutoScalingDownscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerLoadBasedAutoScalingInitParameters) DeepCopyInto(out *MemcachedLayerLoadBasedAutoScalingInitParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(MemcachedLayerLoadBasedAutoScalingDownscalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(MemcachedLayerLoadBasedAutoScalingUpscalingInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerLoadBasedAutoScalingInitParameters. +func (in *MemcachedLayerLoadBasedAutoScalingInitParameters) DeepCopy() *MemcachedLayerLoadBasedAutoScalingInitParameters { + if in == nil { + return nil + } + out := new(MemcachedLayerLoadBasedAutoScalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerLoadBasedAutoScalingObservation) DeepCopyInto(out *MemcachedLayerLoadBasedAutoScalingObservation) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(MemcachedLayerLoadBasedAutoScalingDownscalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(MemcachedLayerLoadBasedAutoScalingUpscalingObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerLoadBasedAutoScalingObservation. +func (in *MemcachedLayerLoadBasedAutoScalingObservation) DeepCopy() *MemcachedLayerLoadBasedAutoScalingObservation { + if in == nil { + return nil + } + out := new(MemcachedLayerLoadBasedAutoScalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerLoadBasedAutoScalingParameters) DeepCopyInto(out *MemcachedLayerLoadBasedAutoScalingParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(MemcachedLayerLoadBasedAutoScalingDownscalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(MemcachedLayerLoadBasedAutoScalingUpscalingParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerLoadBasedAutoScalingParameters. +func (in *MemcachedLayerLoadBasedAutoScalingParameters) DeepCopy() *MemcachedLayerLoadBasedAutoScalingParameters { + if in == nil { + return nil + } + out := new(MemcachedLayerLoadBasedAutoScalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopyInto(out *MemcachedLayerLoadBasedAutoScalingUpscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerLoadBasedAutoScalingUpscalingInitParameters. +func (in *MemcachedLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopy() *MemcachedLayerLoadBasedAutoScalingUpscalingInitParameters { + if in == nil { + return nil + } + out := new(MemcachedLayerLoadBasedAutoScalingUpscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerLoadBasedAutoScalingUpscalingObservation) DeepCopyInto(out *MemcachedLayerLoadBasedAutoScalingUpscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerLoadBasedAutoScalingUpscalingObservation. +func (in *MemcachedLayerLoadBasedAutoScalingUpscalingObservation) DeepCopy() *MemcachedLayerLoadBasedAutoScalingUpscalingObservation { + if in == nil { + return nil + } + out := new(MemcachedLayerLoadBasedAutoScalingUpscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerLoadBasedAutoScalingUpscalingParameters) DeepCopyInto(out *MemcachedLayerLoadBasedAutoScalingUpscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerLoadBasedAutoScalingUpscalingParameters. +func (in *MemcachedLayerLoadBasedAutoScalingUpscalingParameters) DeepCopy() *MemcachedLayerLoadBasedAutoScalingUpscalingParameters { + if in == nil { + return nil + } + out := new(MemcachedLayerLoadBasedAutoScalingUpscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerObservation) DeepCopyInto(out *MemcachedLayerObservation) { + *out = *in + if in.AllocatedMemory != nil { + in, out := &in.AllocatedMemory, &out.AllocatedMemory + *out = new(float64) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(MemcachedLayerCloudwatchConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]MemcachedLayerEBSVolumeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(MemcachedLayerLoadBasedAutoScalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerObservation. +func (in *MemcachedLayerObservation) DeepCopy() *MemcachedLayerObservation { + if in == nil { + return nil + } + out := new(MemcachedLayerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerParameters) DeepCopyInto(out *MemcachedLayerParameters) { + *out = *in + if in.AllocatedMemory != nil { + in, out := &in.AllocatedMemory, &out.AllocatedMemory + *out = new(float64) + **out = **in + } + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(MemcachedLayerCloudwatchConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]MemcachedLayerEBSVolumeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(MemcachedLayerLoadBasedAutoScalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerParameters. +func (in *MemcachedLayerParameters) DeepCopy() *MemcachedLayerParameters { + if in == nil { + return nil + } + out := new(MemcachedLayerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerSpec) DeepCopyInto(out *MemcachedLayerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerSpec. +func (in *MemcachedLayerSpec) DeepCopy() *MemcachedLayerSpec { + if in == nil { + return nil + } + out := new(MemcachedLayerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemcachedLayerStatus) DeepCopyInto(out *MemcachedLayerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedLayerStatus. +func (in *MemcachedLayerStatus) DeepCopy() *MemcachedLayerStatus { + if in == nil { + return nil + } + out := new(MemcachedLayerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayer) DeepCopyInto(out *MySQLLayer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayer. +func (in *MySQLLayer) DeepCopy() *MySQLLayer { + if in == nil { + return nil + } + out := new(MySQLLayer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MySQLLayer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerCloudwatchConfigurationInitParameters) DeepCopyInto(out *MySQLLayerCloudwatchConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]MySQLLayerCloudwatchConfigurationLogStreamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerCloudwatchConfigurationInitParameters. +func (in *MySQLLayerCloudwatchConfigurationInitParameters) DeepCopy() *MySQLLayerCloudwatchConfigurationInitParameters { + if in == nil { + return nil + } + out := new(MySQLLayerCloudwatchConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopyInto(out *MySQLLayerCloudwatchConfigurationLogStreamsInitParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerCloudwatchConfigurationLogStreamsInitParameters. +func (in *MySQLLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopy() *MySQLLayerCloudwatchConfigurationLogStreamsInitParameters { + if in == nil { + return nil + } + out := new(MySQLLayerCloudwatchConfigurationLogStreamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerCloudwatchConfigurationLogStreamsObservation) DeepCopyInto(out *MySQLLayerCloudwatchConfigurationLogStreamsObservation) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerCloudwatchConfigurationLogStreamsObservation. +func (in *MySQLLayerCloudwatchConfigurationLogStreamsObservation) DeepCopy() *MySQLLayerCloudwatchConfigurationLogStreamsObservation { + if in == nil { + return nil + } + out := new(MySQLLayerCloudwatchConfigurationLogStreamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerCloudwatchConfigurationLogStreamsParameters) DeepCopyInto(out *MySQLLayerCloudwatchConfigurationLogStreamsParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerCloudwatchConfigurationLogStreamsParameters. +func (in *MySQLLayerCloudwatchConfigurationLogStreamsParameters) DeepCopy() *MySQLLayerCloudwatchConfigurationLogStreamsParameters { + if in == nil { + return nil + } + out := new(MySQLLayerCloudwatchConfigurationLogStreamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerCloudwatchConfigurationObservation) DeepCopyInto(out *MySQLLayerCloudwatchConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]MySQLLayerCloudwatchConfigurationLogStreamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerCloudwatchConfigurationObservation. +func (in *MySQLLayerCloudwatchConfigurationObservation) DeepCopy() *MySQLLayerCloudwatchConfigurationObservation { + if in == nil { + return nil + } + out := new(MySQLLayerCloudwatchConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerCloudwatchConfigurationParameters) DeepCopyInto(out *MySQLLayerCloudwatchConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]MySQLLayerCloudwatchConfigurationLogStreamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerCloudwatchConfigurationParameters. +func (in *MySQLLayerCloudwatchConfigurationParameters) DeepCopy() *MySQLLayerCloudwatchConfigurationParameters { + if in == nil { + return nil + } + out := new(MySQLLayerCloudwatchConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerEBSVolumeInitParameters) DeepCopyInto(out *MySQLLayerEBSVolumeInitParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerEBSVolumeInitParameters. +func (in *MySQLLayerEBSVolumeInitParameters) DeepCopy() *MySQLLayerEBSVolumeInitParameters { + if in == nil { + return nil + } + out := new(MySQLLayerEBSVolumeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerEBSVolumeObservation) DeepCopyInto(out *MySQLLayerEBSVolumeObservation) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerEBSVolumeObservation. +func (in *MySQLLayerEBSVolumeObservation) DeepCopy() *MySQLLayerEBSVolumeObservation { + if in == nil { + return nil + } + out := new(MySQLLayerEBSVolumeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerEBSVolumeParameters) DeepCopyInto(out *MySQLLayerEBSVolumeParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerEBSVolumeParameters. +func (in *MySQLLayerEBSVolumeParameters) DeepCopy() *MySQLLayerEBSVolumeParameters { + if in == nil { + return nil + } + out := new(MySQLLayerEBSVolumeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerInitParameters) DeepCopyInto(out *MySQLLayerInitParameters) { + *out = *in + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(MySQLLayerCloudwatchConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]MySQLLayerEBSVolumeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(MySQLLayerLoadBasedAutoScalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RootPassword != nil { + in, out := &in.RootPassword, &out.RootPassword + *out = new(string) + **out = **in + } + if in.RootPasswordOnAllInstances != nil { + in, out := &in.RootPasswordOnAllInstances, &out.RootPasswordOnAllInstances + *out = new(bool) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerInitParameters. +func (in *MySQLLayerInitParameters) DeepCopy() *MySQLLayerInitParameters { + if in == nil { + return nil + } + out := new(MySQLLayerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerList) DeepCopyInto(out *MySQLLayerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MySQLLayer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerList. +func (in *MySQLLayerList) DeepCopy() *MySQLLayerList { + if in == nil { + return nil + } + out := new(MySQLLayerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MySQLLayerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopyInto(out *MySQLLayerLoadBasedAutoScalingDownscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerLoadBasedAutoScalingDownscalingInitParameters. +func (in *MySQLLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopy() *MySQLLayerLoadBasedAutoScalingDownscalingInitParameters { + if in == nil { + return nil + } + out := new(MySQLLayerLoadBasedAutoScalingDownscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerLoadBasedAutoScalingDownscalingObservation) DeepCopyInto(out *MySQLLayerLoadBasedAutoScalingDownscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerLoadBasedAutoScalingDownscalingObservation. +func (in *MySQLLayerLoadBasedAutoScalingDownscalingObservation) DeepCopy() *MySQLLayerLoadBasedAutoScalingDownscalingObservation { + if in == nil { + return nil + } + out := new(MySQLLayerLoadBasedAutoScalingDownscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerLoadBasedAutoScalingDownscalingParameters) DeepCopyInto(out *MySQLLayerLoadBasedAutoScalingDownscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerLoadBasedAutoScalingDownscalingParameters. +func (in *MySQLLayerLoadBasedAutoScalingDownscalingParameters) DeepCopy() *MySQLLayerLoadBasedAutoScalingDownscalingParameters { + if in == nil { + return nil + } + out := new(MySQLLayerLoadBasedAutoScalingDownscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerLoadBasedAutoScalingInitParameters) DeepCopyInto(out *MySQLLayerLoadBasedAutoScalingInitParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(MySQLLayerLoadBasedAutoScalingDownscalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(MySQLLayerLoadBasedAutoScalingUpscalingInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerLoadBasedAutoScalingInitParameters. +func (in *MySQLLayerLoadBasedAutoScalingInitParameters) DeepCopy() *MySQLLayerLoadBasedAutoScalingInitParameters { + if in == nil { + return nil + } + out := new(MySQLLayerLoadBasedAutoScalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerLoadBasedAutoScalingObservation) DeepCopyInto(out *MySQLLayerLoadBasedAutoScalingObservation) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(MySQLLayerLoadBasedAutoScalingDownscalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(MySQLLayerLoadBasedAutoScalingUpscalingObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerLoadBasedAutoScalingObservation. +func (in *MySQLLayerLoadBasedAutoScalingObservation) DeepCopy() *MySQLLayerLoadBasedAutoScalingObservation { + if in == nil { + return nil + } + out := new(MySQLLayerLoadBasedAutoScalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerLoadBasedAutoScalingParameters) DeepCopyInto(out *MySQLLayerLoadBasedAutoScalingParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(MySQLLayerLoadBasedAutoScalingDownscalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(MySQLLayerLoadBasedAutoScalingUpscalingParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerLoadBasedAutoScalingParameters. +func (in *MySQLLayerLoadBasedAutoScalingParameters) DeepCopy() *MySQLLayerLoadBasedAutoScalingParameters { + if in == nil { + return nil + } + out := new(MySQLLayerLoadBasedAutoScalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopyInto(out *MySQLLayerLoadBasedAutoScalingUpscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerLoadBasedAutoScalingUpscalingInitParameters. +func (in *MySQLLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopy() *MySQLLayerLoadBasedAutoScalingUpscalingInitParameters { + if in == nil { + return nil + } + out := new(MySQLLayerLoadBasedAutoScalingUpscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerLoadBasedAutoScalingUpscalingObservation) DeepCopyInto(out *MySQLLayerLoadBasedAutoScalingUpscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerLoadBasedAutoScalingUpscalingObservation. +func (in *MySQLLayerLoadBasedAutoScalingUpscalingObservation) DeepCopy() *MySQLLayerLoadBasedAutoScalingUpscalingObservation { + if in == nil { + return nil + } + out := new(MySQLLayerLoadBasedAutoScalingUpscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerLoadBasedAutoScalingUpscalingParameters) DeepCopyInto(out *MySQLLayerLoadBasedAutoScalingUpscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerLoadBasedAutoScalingUpscalingParameters. +func (in *MySQLLayerLoadBasedAutoScalingUpscalingParameters) DeepCopy() *MySQLLayerLoadBasedAutoScalingUpscalingParameters { + if in == nil { + return nil + } + out := new(MySQLLayerLoadBasedAutoScalingUpscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerObservation) DeepCopyInto(out *MySQLLayerObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(MySQLLayerCloudwatchConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]MySQLLayerEBSVolumeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(MySQLLayerLoadBasedAutoScalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RootPassword != nil { + in, out := &in.RootPassword, &out.RootPassword + *out = new(string) + **out = **in + } + if in.RootPasswordOnAllInstances != nil { + in, out := &in.RootPasswordOnAllInstances, &out.RootPasswordOnAllInstances + *out = new(bool) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerObservation. +func (in *MySQLLayerObservation) DeepCopy() *MySQLLayerObservation { + if in == nil { + return nil + } + out := new(MySQLLayerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerParameters) DeepCopyInto(out *MySQLLayerParameters) { + *out = *in + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(MySQLLayerCloudwatchConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]MySQLLayerEBSVolumeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(MySQLLayerLoadBasedAutoScalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RootPassword != nil { + in, out := &in.RootPassword, &out.RootPassword + *out = new(string) + **out = **in + } + if in.RootPasswordOnAllInstances != nil { + in, out := &in.RootPasswordOnAllInstances, &out.RootPasswordOnAllInstances + *out = new(bool) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerParameters. +func (in *MySQLLayerParameters) DeepCopy() *MySQLLayerParameters { + if in == nil { + return nil + } + out := new(MySQLLayerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerSpec) DeepCopyInto(out *MySQLLayerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerSpec. +func (in *MySQLLayerSpec) DeepCopy() *MySQLLayerSpec { + if in == nil { + return nil + } + out := new(MySQLLayerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLLayerStatus) DeepCopyInto(out *MySQLLayerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLLayerStatus. +func (in *MySQLLayerStatus) DeepCopy() *MySQLLayerStatus { + if in == nil { + return nil + } + out := new(MySQLLayerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayer) DeepCopyInto(out *NodeJSAppLayer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayer. +func (in *NodeJSAppLayer) DeepCopy() *NodeJSAppLayer { + if in == nil { + return nil + } + out := new(NodeJSAppLayer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeJSAppLayer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerCloudwatchConfigurationInitParameters) DeepCopyInto(out *NodeJSAppLayerCloudwatchConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]NodeJSAppLayerCloudwatchConfigurationLogStreamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerCloudwatchConfigurationInitParameters. +func (in *NodeJSAppLayerCloudwatchConfigurationInitParameters) DeepCopy() *NodeJSAppLayerCloudwatchConfigurationInitParameters { + if in == nil { + return nil + } + out := new(NodeJSAppLayerCloudwatchConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopyInto(out *NodeJSAppLayerCloudwatchConfigurationLogStreamsInitParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerCloudwatchConfigurationLogStreamsInitParameters. +func (in *NodeJSAppLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopy() *NodeJSAppLayerCloudwatchConfigurationLogStreamsInitParameters { + if in == nil { + return nil + } + out := new(NodeJSAppLayerCloudwatchConfigurationLogStreamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerCloudwatchConfigurationLogStreamsObservation) DeepCopyInto(out *NodeJSAppLayerCloudwatchConfigurationLogStreamsObservation) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerCloudwatchConfigurationLogStreamsObservation. +func (in *NodeJSAppLayerCloudwatchConfigurationLogStreamsObservation) DeepCopy() *NodeJSAppLayerCloudwatchConfigurationLogStreamsObservation { + if in == nil { + return nil + } + out := new(NodeJSAppLayerCloudwatchConfigurationLogStreamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerCloudwatchConfigurationLogStreamsParameters) DeepCopyInto(out *NodeJSAppLayerCloudwatchConfigurationLogStreamsParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerCloudwatchConfigurationLogStreamsParameters. +func (in *NodeJSAppLayerCloudwatchConfigurationLogStreamsParameters) DeepCopy() *NodeJSAppLayerCloudwatchConfigurationLogStreamsParameters { + if in == nil { + return nil + } + out := new(NodeJSAppLayerCloudwatchConfigurationLogStreamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerCloudwatchConfigurationObservation) DeepCopyInto(out *NodeJSAppLayerCloudwatchConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]NodeJSAppLayerCloudwatchConfigurationLogStreamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerCloudwatchConfigurationObservation. +func (in *NodeJSAppLayerCloudwatchConfigurationObservation) DeepCopy() *NodeJSAppLayerCloudwatchConfigurationObservation { + if in == nil { + return nil + } + out := new(NodeJSAppLayerCloudwatchConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerCloudwatchConfigurationParameters) DeepCopyInto(out *NodeJSAppLayerCloudwatchConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]NodeJSAppLayerCloudwatchConfigurationLogStreamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerCloudwatchConfigurationParameters. +func (in *NodeJSAppLayerCloudwatchConfigurationParameters) DeepCopy() *NodeJSAppLayerCloudwatchConfigurationParameters { + if in == nil { + return nil + } + out := new(NodeJSAppLayerCloudwatchConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerEBSVolumeInitParameters) DeepCopyInto(out *NodeJSAppLayerEBSVolumeInitParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerEBSVolumeInitParameters. +func (in *NodeJSAppLayerEBSVolumeInitParameters) DeepCopy() *NodeJSAppLayerEBSVolumeInitParameters { + if in == nil { + return nil + } + out := new(NodeJSAppLayerEBSVolumeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerEBSVolumeObservation) DeepCopyInto(out *NodeJSAppLayerEBSVolumeObservation) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerEBSVolumeObservation. +func (in *NodeJSAppLayerEBSVolumeObservation) DeepCopy() *NodeJSAppLayerEBSVolumeObservation { + if in == nil { + return nil + } + out := new(NodeJSAppLayerEBSVolumeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerEBSVolumeParameters) DeepCopyInto(out *NodeJSAppLayerEBSVolumeParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerEBSVolumeParameters. +func (in *NodeJSAppLayerEBSVolumeParameters) DeepCopy() *NodeJSAppLayerEBSVolumeParameters { + if in == nil { + return nil + } + out := new(NodeJSAppLayerEBSVolumeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerInitParameters) DeepCopyInto(out *NodeJSAppLayerInitParameters) { + *out = *in + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(NodeJSAppLayerCloudwatchConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]NodeJSAppLayerEBSVolumeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(NodeJSAppLayerLoadBasedAutoScalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NodeJSVersion != nil { + in, out := &in.NodeJSVersion, &out.NodeJSVersion + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerInitParameters. +func (in *NodeJSAppLayerInitParameters) DeepCopy() *NodeJSAppLayerInitParameters { + if in == nil { + return nil + } + out := new(NodeJSAppLayerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerList) DeepCopyInto(out *NodeJSAppLayerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodeJSAppLayer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerList. +func (in *NodeJSAppLayerList) DeepCopy() *NodeJSAppLayerList { + if in == nil { + return nil + } + out := new(NodeJSAppLayerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeJSAppLayerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopyInto(out *NodeJSAppLayerLoadBasedAutoScalingDownscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerLoadBasedAutoScalingDownscalingInitParameters. +func (in *NodeJSAppLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopy() *NodeJSAppLayerLoadBasedAutoScalingDownscalingInitParameters { + if in == nil { + return nil + } + out := new(NodeJSAppLayerLoadBasedAutoScalingDownscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerLoadBasedAutoScalingDownscalingObservation) DeepCopyInto(out *NodeJSAppLayerLoadBasedAutoScalingDownscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerLoadBasedAutoScalingDownscalingObservation. +func (in *NodeJSAppLayerLoadBasedAutoScalingDownscalingObservation) DeepCopy() *NodeJSAppLayerLoadBasedAutoScalingDownscalingObservation { + if in == nil { + return nil + } + out := new(NodeJSAppLayerLoadBasedAutoScalingDownscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerLoadBasedAutoScalingDownscalingParameters) DeepCopyInto(out *NodeJSAppLayerLoadBasedAutoScalingDownscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerLoadBasedAutoScalingDownscalingParameters. +func (in *NodeJSAppLayerLoadBasedAutoScalingDownscalingParameters) DeepCopy() *NodeJSAppLayerLoadBasedAutoScalingDownscalingParameters { + if in == nil { + return nil + } + out := new(NodeJSAppLayerLoadBasedAutoScalingDownscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerLoadBasedAutoScalingInitParameters) DeepCopyInto(out *NodeJSAppLayerLoadBasedAutoScalingInitParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(NodeJSAppLayerLoadBasedAutoScalingDownscalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(NodeJSAppLayerLoadBasedAutoScalingUpscalingInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerLoadBasedAutoScalingInitParameters. +func (in *NodeJSAppLayerLoadBasedAutoScalingInitParameters) DeepCopy() *NodeJSAppLayerLoadBasedAutoScalingInitParameters { + if in == nil { + return nil + } + out := new(NodeJSAppLayerLoadBasedAutoScalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerLoadBasedAutoScalingObservation) DeepCopyInto(out *NodeJSAppLayerLoadBasedAutoScalingObservation) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(NodeJSAppLayerLoadBasedAutoScalingDownscalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(NodeJSAppLayerLoadBasedAutoScalingUpscalingObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerLoadBasedAutoScalingObservation. +func (in *NodeJSAppLayerLoadBasedAutoScalingObservation) DeepCopy() *NodeJSAppLayerLoadBasedAutoScalingObservation { + if in == nil { + return nil + } + out := new(NodeJSAppLayerLoadBasedAutoScalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerLoadBasedAutoScalingParameters) DeepCopyInto(out *NodeJSAppLayerLoadBasedAutoScalingParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(NodeJSAppLayerLoadBasedAutoScalingDownscalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(NodeJSAppLayerLoadBasedAutoScalingUpscalingParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerLoadBasedAutoScalingParameters. +func (in *NodeJSAppLayerLoadBasedAutoScalingParameters) DeepCopy() *NodeJSAppLayerLoadBasedAutoScalingParameters { + if in == nil { + return nil + } + out := new(NodeJSAppLayerLoadBasedAutoScalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopyInto(out *NodeJSAppLayerLoadBasedAutoScalingUpscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerLoadBasedAutoScalingUpscalingInitParameters. +func (in *NodeJSAppLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopy() *NodeJSAppLayerLoadBasedAutoScalingUpscalingInitParameters { + if in == nil { + return nil + } + out := new(NodeJSAppLayerLoadBasedAutoScalingUpscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerLoadBasedAutoScalingUpscalingObservation) DeepCopyInto(out *NodeJSAppLayerLoadBasedAutoScalingUpscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerLoadBasedAutoScalingUpscalingObservation. +func (in *NodeJSAppLayerLoadBasedAutoScalingUpscalingObservation) DeepCopy() *NodeJSAppLayerLoadBasedAutoScalingUpscalingObservation { + if in == nil { + return nil + } + out := new(NodeJSAppLayerLoadBasedAutoScalingUpscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerLoadBasedAutoScalingUpscalingParameters) DeepCopyInto(out *NodeJSAppLayerLoadBasedAutoScalingUpscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerLoadBasedAutoScalingUpscalingParameters. +func (in *NodeJSAppLayerLoadBasedAutoScalingUpscalingParameters) DeepCopy() *NodeJSAppLayerLoadBasedAutoScalingUpscalingParameters { + if in == nil { + return nil + } + out := new(NodeJSAppLayerLoadBasedAutoScalingUpscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerObservation) DeepCopyInto(out *NodeJSAppLayerObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(NodeJSAppLayerCloudwatchConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]NodeJSAppLayerEBSVolumeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(NodeJSAppLayerLoadBasedAutoScalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NodeJSVersion != nil { + in, out := &in.NodeJSVersion, &out.NodeJSVersion + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerObservation. +func (in *NodeJSAppLayerObservation) DeepCopy() *NodeJSAppLayerObservation { + if in == nil { + return nil + } + out := new(NodeJSAppLayerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerParameters) DeepCopyInto(out *NodeJSAppLayerParameters) { + *out = *in + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(NodeJSAppLayerCloudwatchConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]NodeJSAppLayerEBSVolumeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(NodeJSAppLayerLoadBasedAutoScalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NodeJSVersion != nil { + in, out := &in.NodeJSVersion, &out.NodeJSVersion + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerParameters. +func (in *NodeJSAppLayerParameters) DeepCopy() *NodeJSAppLayerParameters { + if in == nil { + return nil + } + out := new(NodeJSAppLayerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerSpec) DeepCopyInto(out *NodeJSAppLayerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerSpec. +func (in *NodeJSAppLayerSpec) DeepCopy() *NodeJSAppLayerSpec { + if in == nil { + return nil + } + out := new(NodeJSAppLayerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeJSAppLayerStatus) DeepCopyInto(out *NodeJSAppLayerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeJSAppLayerStatus. +func (in *NodeJSAppLayerStatus) DeepCopy() *NodeJSAppLayerStatus { + if in == nil { + return nil + } + out := new(NodeJSAppLayerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayer) DeepCopyInto(out *PHPAppLayer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayer. +func (in *PHPAppLayer) DeepCopy() *PHPAppLayer { + if in == nil { + return nil + } + out := new(PHPAppLayer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PHPAppLayer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerCloudwatchConfigurationInitParameters) DeepCopyInto(out *PHPAppLayerCloudwatchConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]PHPAppLayerCloudwatchConfigurationLogStreamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerCloudwatchConfigurationInitParameters. +func (in *PHPAppLayerCloudwatchConfigurationInitParameters) DeepCopy() *PHPAppLayerCloudwatchConfigurationInitParameters { + if in == nil { + return nil + } + out := new(PHPAppLayerCloudwatchConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopyInto(out *PHPAppLayerCloudwatchConfigurationLogStreamsInitParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerCloudwatchConfigurationLogStreamsInitParameters. +func (in *PHPAppLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopy() *PHPAppLayerCloudwatchConfigurationLogStreamsInitParameters { + if in == nil { + return nil + } + out := new(PHPAppLayerCloudwatchConfigurationLogStreamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerCloudwatchConfigurationLogStreamsObservation) DeepCopyInto(out *PHPAppLayerCloudwatchConfigurationLogStreamsObservation) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerCloudwatchConfigurationLogStreamsObservation. +func (in *PHPAppLayerCloudwatchConfigurationLogStreamsObservation) DeepCopy() *PHPAppLayerCloudwatchConfigurationLogStreamsObservation { + if in == nil { + return nil + } + out := new(PHPAppLayerCloudwatchConfigurationLogStreamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerCloudwatchConfigurationLogStreamsParameters) DeepCopyInto(out *PHPAppLayerCloudwatchConfigurationLogStreamsParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerCloudwatchConfigurationLogStreamsParameters. +func (in *PHPAppLayerCloudwatchConfigurationLogStreamsParameters) DeepCopy() *PHPAppLayerCloudwatchConfigurationLogStreamsParameters { + if in == nil { + return nil + } + out := new(PHPAppLayerCloudwatchConfigurationLogStreamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerCloudwatchConfigurationObservation) DeepCopyInto(out *PHPAppLayerCloudwatchConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]PHPAppLayerCloudwatchConfigurationLogStreamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerCloudwatchConfigurationObservation. +func (in *PHPAppLayerCloudwatchConfigurationObservation) DeepCopy() *PHPAppLayerCloudwatchConfigurationObservation { + if in == nil { + return nil + } + out := new(PHPAppLayerCloudwatchConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerCloudwatchConfigurationParameters) DeepCopyInto(out *PHPAppLayerCloudwatchConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]PHPAppLayerCloudwatchConfigurationLogStreamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerCloudwatchConfigurationParameters. +func (in *PHPAppLayerCloudwatchConfigurationParameters) DeepCopy() *PHPAppLayerCloudwatchConfigurationParameters { + if in == nil { + return nil + } + out := new(PHPAppLayerCloudwatchConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerEBSVolumeInitParameters) DeepCopyInto(out *PHPAppLayerEBSVolumeInitParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerEBSVolumeInitParameters. +func (in *PHPAppLayerEBSVolumeInitParameters) DeepCopy() *PHPAppLayerEBSVolumeInitParameters { + if in == nil { + return nil + } + out := new(PHPAppLayerEBSVolumeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerEBSVolumeObservation) DeepCopyInto(out *PHPAppLayerEBSVolumeObservation) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerEBSVolumeObservation. +func (in *PHPAppLayerEBSVolumeObservation) DeepCopy() *PHPAppLayerEBSVolumeObservation { + if in == nil { + return nil + } + out := new(PHPAppLayerEBSVolumeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerEBSVolumeParameters) DeepCopyInto(out *PHPAppLayerEBSVolumeParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerEBSVolumeParameters. +func (in *PHPAppLayerEBSVolumeParameters) DeepCopy() *PHPAppLayerEBSVolumeParameters { + if in == nil { + return nil + } + out := new(PHPAppLayerEBSVolumeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerInitParameters) DeepCopyInto(out *PHPAppLayerInitParameters) { + *out = *in + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(PHPAppLayerCloudwatchConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]PHPAppLayerEBSVolumeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(PHPAppLayerLoadBasedAutoScalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerInitParameters. +func (in *PHPAppLayerInitParameters) DeepCopy() *PHPAppLayerInitParameters { + if in == nil { + return nil + } + out := new(PHPAppLayerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerList) DeepCopyInto(out *PHPAppLayerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PHPAppLayer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerList. +func (in *PHPAppLayerList) DeepCopy() *PHPAppLayerList { + if in == nil { + return nil + } + out := new(PHPAppLayerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PHPAppLayerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopyInto(out *PHPAppLayerLoadBasedAutoScalingDownscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerLoadBasedAutoScalingDownscalingInitParameters. +func (in *PHPAppLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopy() *PHPAppLayerLoadBasedAutoScalingDownscalingInitParameters { + if in == nil { + return nil + } + out := new(PHPAppLayerLoadBasedAutoScalingDownscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerLoadBasedAutoScalingDownscalingObservation) DeepCopyInto(out *PHPAppLayerLoadBasedAutoScalingDownscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerLoadBasedAutoScalingDownscalingObservation. +func (in *PHPAppLayerLoadBasedAutoScalingDownscalingObservation) DeepCopy() *PHPAppLayerLoadBasedAutoScalingDownscalingObservation { + if in == nil { + return nil + } + out := new(PHPAppLayerLoadBasedAutoScalingDownscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerLoadBasedAutoScalingDownscalingParameters) DeepCopyInto(out *PHPAppLayerLoadBasedAutoScalingDownscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerLoadBasedAutoScalingDownscalingParameters. +func (in *PHPAppLayerLoadBasedAutoScalingDownscalingParameters) DeepCopy() *PHPAppLayerLoadBasedAutoScalingDownscalingParameters { + if in == nil { + return nil + } + out := new(PHPAppLayerLoadBasedAutoScalingDownscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerLoadBasedAutoScalingInitParameters) DeepCopyInto(out *PHPAppLayerLoadBasedAutoScalingInitParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(PHPAppLayerLoadBasedAutoScalingDownscalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(PHPAppLayerLoadBasedAutoScalingUpscalingInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerLoadBasedAutoScalingInitParameters. +func (in *PHPAppLayerLoadBasedAutoScalingInitParameters) DeepCopy() *PHPAppLayerLoadBasedAutoScalingInitParameters { + if in == nil { + return nil + } + out := new(PHPAppLayerLoadBasedAutoScalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerLoadBasedAutoScalingObservation) DeepCopyInto(out *PHPAppLayerLoadBasedAutoScalingObservation) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(PHPAppLayerLoadBasedAutoScalingDownscalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(PHPAppLayerLoadBasedAutoScalingUpscalingObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerLoadBasedAutoScalingObservation. +func (in *PHPAppLayerLoadBasedAutoScalingObservation) DeepCopy() *PHPAppLayerLoadBasedAutoScalingObservation { + if in == nil { + return nil + } + out := new(PHPAppLayerLoadBasedAutoScalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerLoadBasedAutoScalingParameters) DeepCopyInto(out *PHPAppLayerLoadBasedAutoScalingParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(PHPAppLayerLoadBasedAutoScalingDownscalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(PHPAppLayerLoadBasedAutoScalingUpscalingParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerLoadBasedAutoScalingParameters. +func (in *PHPAppLayerLoadBasedAutoScalingParameters) DeepCopy() *PHPAppLayerLoadBasedAutoScalingParameters { + if in == nil { + return nil + } + out := new(PHPAppLayerLoadBasedAutoScalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopyInto(out *PHPAppLayerLoadBasedAutoScalingUpscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerLoadBasedAutoScalingUpscalingInitParameters. +func (in *PHPAppLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopy() *PHPAppLayerLoadBasedAutoScalingUpscalingInitParameters { + if in == nil { + return nil + } + out := new(PHPAppLayerLoadBasedAutoScalingUpscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerLoadBasedAutoScalingUpscalingObservation) DeepCopyInto(out *PHPAppLayerLoadBasedAutoScalingUpscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerLoadBasedAutoScalingUpscalingObservation. +func (in *PHPAppLayerLoadBasedAutoScalingUpscalingObservation) DeepCopy() *PHPAppLayerLoadBasedAutoScalingUpscalingObservation { + if in == nil { + return nil + } + out := new(PHPAppLayerLoadBasedAutoScalingUpscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerLoadBasedAutoScalingUpscalingParameters) DeepCopyInto(out *PHPAppLayerLoadBasedAutoScalingUpscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerLoadBasedAutoScalingUpscalingParameters. +func (in *PHPAppLayerLoadBasedAutoScalingUpscalingParameters) DeepCopy() *PHPAppLayerLoadBasedAutoScalingUpscalingParameters { + if in == nil { + return nil + } + out := new(PHPAppLayerLoadBasedAutoScalingUpscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerObservation) DeepCopyInto(out *PHPAppLayerObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(PHPAppLayerCloudwatchConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]PHPAppLayerEBSVolumeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(PHPAppLayerLoadBasedAutoScalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerObservation. +func (in *PHPAppLayerObservation) DeepCopy() *PHPAppLayerObservation { + if in == nil { + return nil + } + out := new(PHPAppLayerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerParameters) DeepCopyInto(out *PHPAppLayerParameters) { + *out = *in + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(PHPAppLayerCloudwatchConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]PHPAppLayerEBSVolumeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(PHPAppLayerLoadBasedAutoScalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerParameters. +func (in *PHPAppLayerParameters) DeepCopy() *PHPAppLayerParameters { + if in == nil { + return nil + } + out := new(PHPAppLayerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerSpec) DeepCopyInto(out *PHPAppLayerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerSpec. +func (in *PHPAppLayerSpec) DeepCopy() *PHPAppLayerSpec { + if in == nil { + return nil + } + out := new(PHPAppLayerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PHPAppLayerStatus) DeepCopyInto(out *PHPAppLayerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PHPAppLayerStatus. +func (in *PHPAppLayerStatus) DeepCopy() *PHPAppLayerStatus { + if in == nil { + return nil + } + out := new(PHPAppLayerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayer) DeepCopyInto(out *RailsAppLayer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayer. +func (in *RailsAppLayer) DeepCopy() *RailsAppLayer { + if in == nil { + return nil + } + out := new(RailsAppLayer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RailsAppLayer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerCloudwatchConfigurationInitParameters) DeepCopyInto(out *RailsAppLayerCloudwatchConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]RailsAppLayerCloudwatchConfigurationLogStreamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerCloudwatchConfigurationInitParameters. +func (in *RailsAppLayerCloudwatchConfigurationInitParameters) DeepCopy() *RailsAppLayerCloudwatchConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RailsAppLayerCloudwatchConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopyInto(out *RailsAppLayerCloudwatchConfigurationLogStreamsInitParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerCloudwatchConfigurationLogStreamsInitParameters. +func (in *RailsAppLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopy() *RailsAppLayerCloudwatchConfigurationLogStreamsInitParameters { + if in == nil { + return nil + } + out := new(RailsAppLayerCloudwatchConfigurationLogStreamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerCloudwatchConfigurationLogStreamsObservation) DeepCopyInto(out *RailsAppLayerCloudwatchConfigurationLogStreamsObservation) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerCloudwatchConfigurationLogStreamsObservation. +func (in *RailsAppLayerCloudwatchConfigurationLogStreamsObservation) DeepCopy() *RailsAppLayerCloudwatchConfigurationLogStreamsObservation { + if in == nil { + return nil + } + out := new(RailsAppLayerCloudwatchConfigurationLogStreamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerCloudwatchConfigurationLogStreamsParameters) DeepCopyInto(out *RailsAppLayerCloudwatchConfigurationLogStreamsParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerCloudwatchConfigurationLogStreamsParameters. +func (in *RailsAppLayerCloudwatchConfigurationLogStreamsParameters) DeepCopy() *RailsAppLayerCloudwatchConfigurationLogStreamsParameters { + if in == nil { + return nil + } + out := new(RailsAppLayerCloudwatchConfigurationLogStreamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerCloudwatchConfigurationObservation) DeepCopyInto(out *RailsAppLayerCloudwatchConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]RailsAppLayerCloudwatchConfigurationLogStreamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerCloudwatchConfigurationObservation. +func (in *RailsAppLayerCloudwatchConfigurationObservation) DeepCopy() *RailsAppLayerCloudwatchConfigurationObservation { + if in == nil { + return nil + } + out := new(RailsAppLayerCloudwatchConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerCloudwatchConfigurationParameters) DeepCopyInto(out *RailsAppLayerCloudwatchConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]RailsAppLayerCloudwatchConfigurationLogStreamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerCloudwatchConfigurationParameters. +func (in *RailsAppLayerCloudwatchConfigurationParameters) DeepCopy() *RailsAppLayerCloudwatchConfigurationParameters { + if in == nil { + return nil + } + out := new(RailsAppLayerCloudwatchConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerEBSVolumeInitParameters) DeepCopyInto(out *RailsAppLayerEBSVolumeInitParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerEBSVolumeInitParameters. +func (in *RailsAppLayerEBSVolumeInitParameters) DeepCopy() *RailsAppLayerEBSVolumeInitParameters { + if in == nil { + return nil + } + out := new(RailsAppLayerEBSVolumeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerEBSVolumeObservation) DeepCopyInto(out *RailsAppLayerEBSVolumeObservation) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerEBSVolumeObservation. +func (in *RailsAppLayerEBSVolumeObservation) DeepCopy() *RailsAppLayerEBSVolumeObservation { + if in == nil { + return nil + } + out := new(RailsAppLayerEBSVolumeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerEBSVolumeParameters) DeepCopyInto(out *RailsAppLayerEBSVolumeParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerEBSVolumeParameters. +func (in *RailsAppLayerEBSVolumeParameters) DeepCopy() *RailsAppLayerEBSVolumeParameters { + if in == nil { + return nil + } + out := new(RailsAppLayerEBSVolumeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerInitParameters) DeepCopyInto(out *RailsAppLayerInitParameters) { + *out = *in + if in.AppServer != nil { + in, out := &in.AppServer, &out.AppServer + *out = new(string) + **out = **in + } + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.BundlerVersion != nil { + in, out := &in.BundlerVersion, &out.BundlerVersion + *out = new(string) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(RailsAppLayerCloudwatchConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]RailsAppLayerEBSVolumeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(RailsAppLayerLoadBasedAutoScalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ManageBundler != nil { + in, out := &in.ManageBundler, &out.ManageBundler + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PassengerVersion != nil { + in, out := &in.PassengerVersion, &out.PassengerVersion + *out = new(string) + **out = **in + } + if in.RubyVersion != nil { + in, out := &in.RubyVersion, &out.RubyVersion + *out = new(string) + **out = **in + } + if in.RubygemsVersion != nil { + in, out := &in.RubygemsVersion, &out.RubygemsVersion + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerInitParameters. +func (in *RailsAppLayerInitParameters) DeepCopy() *RailsAppLayerInitParameters { + if in == nil { + return nil + } + out := new(RailsAppLayerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerList) DeepCopyInto(out *RailsAppLayerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RailsAppLayer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerList. +func (in *RailsAppLayerList) DeepCopy() *RailsAppLayerList { + if in == nil { + return nil + } + out := new(RailsAppLayerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RailsAppLayerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopyInto(out *RailsAppLayerLoadBasedAutoScalingDownscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerLoadBasedAutoScalingDownscalingInitParameters. +func (in *RailsAppLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopy() *RailsAppLayerLoadBasedAutoScalingDownscalingInitParameters { + if in == nil { + return nil + } + out := new(RailsAppLayerLoadBasedAutoScalingDownscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerLoadBasedAutoScalingDownscalingObservation) DeepCopyInto(out *RailsAppLayerLoadBasedAutoScalingDownscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerLoadBasedAutoScalingDownscalingObservation. +func (in *RailsAppLayerLoadBasedAutoScalingDownscalingObservation) DeepCopy() *RailsAppLayerLoadBasedAutoScalingDownscalingObservation { + if in == nil { + return nil + } + out := new(RailsAppLayerLoadBasedAutoScalingDownscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerLoadBasedAutoScalingDownscalingParameters) DeepCopyInto(out *RailsAppLayerLoadBasedAutoScalingDownscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerLoadBasedAutoScalingDownscalingParameters. +func (in *RailsAppLayerLoadBasedAutoScalingDownscalingParameters) DeepCopy() *RailsAppLayerLoadBasedAutoScalingDownscalingParameters { + if in == nil { + return nil + } + out := new(RailsAppLayerLoadBasedAutoScalingDownscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerLoadBasedAutoScalingInitParameters) DeepCopyInto(out *RailsAppLayerLoadBasedAutoScalingInitParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(RailsAppLayerLoadBasedAutoScalingDownscalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(RailsAppLayerLoadBasedAutoScalingUpscalingInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerLoadBasedAutoScalingInitParameters. +func (in *RailsAppLayerLoadBasedAutoScalingInitParameters) DeepCopy() *RailsAppLayerLoadBasedAutoScalingInitParameters { + if in == nil { + return nil + } + out := new(RailsAppLayerLoadBasedAutoScalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerLoadBasedAutoScalingObservation) DeepCopyInto(out *RailsAppLayerLoadBasedAutoScalingObservation) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(RailsAppLayerLoadBasedAutoScalingDownscalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(RailsAppLayerLoadBasedAutoScalingUpscalingObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerLoadBasedAutoScalingObservation. +func (in *RailsAppLayerLoadBasedAutoScalingObservation) DeepCopy() *RailsAppLayerLoadBasedAutoScalingObservation { + if in == nil { + return nil + } + out := new(RailsAppLayerLoadBasedAutoScalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerLoadBasedAutoScalingParameters) DeepCopyInto(out *RailsAppLayerLoadBasedAutoScalingParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(RailsAppLayerLoadBasedAutoScalingDownscalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(RailsAppLayerLoadBasedAutoScalingUpscalingParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerLoadBasedAutoScalingParameters. +func (in *RailsAppLayerLoadBasedAutoScalingParameters) DeepCopy() *RailsAppLayerLoadBasedAutoScalingParameters { + if in == nil { + return nil + } + out := new(RailsAppLayerLoadBasedAutoScalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopyInto(out *RailsAppLayerLoadBasedAutoScalingUpscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerLoadBasedAutoScalingUpscalingInitParameters. +func (in *RailsAppLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopy() *RailsAppLayerLoadBasedAutoScalingUpscalingInitParameters { + if in == nil { + return nil + } + out := new(RailsAppLayerLoadBasedAutoScalingUpscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerLoadBasedAutoScalingUpscalingObservation) DeepCopyInto(out *RailsAppLayerLoadBasedAutoScalingUpscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerLoadBasedAutoScalingUpscalingObservation. +func (in *RailsAppLayerLoadBasedAutoScalingUpscalingObservation) DeepCopy() *RailsAppLayerLoadBasedAutoScalingUpscalingObservation { + if in == nil { + return nil + } + out := new(RailsAppLayerLoadBasedAutoScalingUpscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerLoadBasedAutoScalingUpscalingParameters) DeepCopyInto(out *RailsAppLayerLoadBasedAutoScalingUpscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerLoadBasedAutoScalingUpscalingParameters. +func (in *RailsAppLayerLoadBasedAutoScalingUpscalingParameters) DeepCopy() *RailsAppLayerLoadBasedAutoScalingUpscalingParameters { + if in == nil { + return nil + } + out := new(RailsAppLayerLoadBasedAutoScalingUpscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerObservation) DeepCopyInto(out *RailsAppLayerObservation) { + *out = *in + if in.AppServer != nil { + in, out := &in.AppServer, &out.AppServer + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.BundlerVersion != nil { + in, out := &in.BundlerVersion, &out.BundlerVersion + *out = new(string) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(RailsAppLayerCloudwatchConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]RailsAppLayerEBSVolumeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(RailsAppLayerLoadBasedAutoScalingObservation) + (*in).DeepCopyInto(*out) + } + if in.ManageBundler != nil { + in, out := &in.ManageBundler, &out.ManageBundler + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PassengerVersion != nil { + in, out := &in.PassengerVersion, &out.PassengerVersion + *out = new(string) + **out = **in + } + if in.RubyVersion != nil { + in, out := &in.RubyVersion, &out.RubyVersion + *out = new(string) + **out = **in + } + if in.RubygemsVersion != nil { + in, out := &in.RubygemsVersion, &out.RubygemsVersion + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerObservation. +func (in *RailsAppLayerObservation) DeepCopy() *RailsAppLayerObservation { + if in == nil { + return nil + } + out := new(RailsAppLayerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerParameters) DeepCopyInto(out *RailsAppLayerParameters) { + *out = *in + if in.AppServer != nil { + in, out := &in.AppServer, &out.AppServer + *out = new(string) + **out = **in + } + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.BundlerVersion != nil { + in, out := &in.BundlerVersion, &out.BundlerVersion + *out = new(string) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(RailsAppLayerCloudwatchConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]RailsAppLayerEBSVolumeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(RailsAppLayerLoadBasedAutoScalingParameters) + (*in).DeepCopyInto(*out) + } + if in.ManageBundler != nil { + in, out := &in.ManageBundler, &out.ManageBundler + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PassengerVersion != nil { + in, out := &in.PassengerVersion, &out.PassengerVersion + *out = new(string) + **out = **in + } + if in.RubyVersion != nil { + in, out := &in.RubyVersion, &out.RubyVersion + *out = new(string) + **out = **in + } + if in.RubygemsVersion != nil { + in, out := &in.RubygemsVersion, &out.RubygemsVersion + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerParameters. +func (in *RailsAppLayerParameters) DeepCopy() *RailsAppLayerParameters { + if in == nil { + return nil + } + out := new(RailsAppLayerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerSpec) DeepCopyInto(out *RailsAppLayerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerSpec. +func (in *RailsAppLayerSpec) DeepCopy() *RailsAppLayerSpec { + if in == nil { + return nil + } + out := new(RailsAppLayerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RailsAppLayerStatus) DeepCopyInto(out *RailsAppLayerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RailsAppLayerStatus. +func (in *RailsAppLayerStatus) DeepCopy() *RailsAppLayerStatus { + if in == nil { + return nil + } + out := new(RailsAppLayerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Stack) DeepCopyInto(out *Stack) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Stack. +func (in *Stack) DeepCopy() *Stack { + if in == nil { + return nil + } + out := new(Stack) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Stack) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackInitParameters) DeepCopyInto(out *StackInitParameters) { + *out = *in + if in.AgentVersion != nil { + in, out := &in.AgentVersion, &out.AgentVersion + *out = new(string) + **out = **in + } + if in.BerkshelfVersion != nil { + in, out := &in.BerkshelfVersion, &out.BerkshelfVersion + *out = new(string) + **out = **in + } + if in.Color != nil { + in, out := &in.Color, &out.Color + *out = new(string) + **out = **in + } + if in.ConfigurationManagerName != nil { + in, out := &in.ConfigurationManagerName, &out.ConfigurationManagerName + *out = new(string) + **out = **in + } + if in.ConfigurationManagerVersion != nil { + in, out := &in.ConfigurationManagerVersion, &out.ConfigurationManagerVersion + *out = new(string) + **out = **in + } + if in.CustomCookbooksSource != nil { + in, out := &in.CustomCookbooksSource, &out.CustomCookbooksSource + *out = new(CustomCookbooksSourceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.DefaultAvailabilityZone != nil { + in, out := &in.DefaultAvailabilityZone, &out.DefaultAvailabilityZone + *out = new(string) + **out = **in + } + if in.DefaultInstanceProfileArn != nil { + in, out := &in.DefaultInstanceProfileArn, &out.DefaultInstanceProfileArn + *out = new(string) + **out = **in + } + if in.DefaultInstanceProfileArnRef != nil { + in, out := &in.DefaultInstanceProfileArnRef, &out.DefaultInstanceProfileArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DefaultInstanceProfileArnSelector != nil { + in, out := &in.DefaultInstanceProfileArnSelector, &out.DefaultInstanceProfileArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DefaultOs != nil { + in, out := &in.DefaultOs, &out.DefaultOs + *out = new(string) + **out = **in + } + if in.DefaultRootDeviceType != nil { + in, out := &in.DefaultRootDeviceType, &out.DefaultRootDeviceType + *out = new(string) + **out = **in + } + if in.DefaultSSHKeyName != nil { + in, out := &in.DefaultSSHKeyName, &out.DefaultSSHKeyName + *out = new(string) + **out = **in + } + if in.DefaultSubnetID != nil { + in, out := &in.DefaultSubnetID, &out.DefaultSubnetID + *out = new(string) + **out = **in + } + if in.DefaultSubnetIDRef != nil { + in, out := &in.DefaultSubnetIDRef, &out.DefaultSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DefaultSubnetIDSelector != nil { + in, out := &in.DefaultSubnetIDSelector, &out.DefaultSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HostnameTheme != nil { + in, out := &in.HostnameTheme, &out.HostnameTheme + *out = new(string) + **out = **in + } + if in.ManageBerkshelf != nil { + in, out := &in.ManageBerkshelf, &out.ManageBerkshelf + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ServiceRoleArn != nil { + in, out := &in.ServiceRoleArn, &out.ServiceRoleArn + *out = new(string) + **out = **in + } + if in.ServiceRoleArnRef != nil { + in, out := &in.ServiceRoleArnRef, &out.ServiceRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceRoleArnSelector != nil { + in, out := &in.ServiceRoleArnSelector, &out.ServiceRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseCustomCookbooks != nil { + in, out := &in.UseCustomCookbooks, &out.UseCustomCookbooks + *out = new(bool) + **out = **in + } + if in.UseOpsworksSecurityGroups != nil { + in, out := &in.UseOpsworksSecurityGroups, &out.UseOpsworksSecurityGroups + *out = new(bool) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackInitParameters. +func (in *StackInitParameters) DeepCopy() *StackInitParameters { + if in == nil { + return nil + } + out := new(StackInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackList) DeepCopyInto(out *StackList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Stack, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackList. +func (in *StackList) DeepCopy() *StackList { + if in == nil { + return nil + } + out := new(StackList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StackList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackObservation) DeepCopyInto(out *StackObservation) { + *out = *in + if in.AgentVersion != nil { + in, out := &in.AgentVersion, &out.AgentVersion + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.BerkshelfVersion != nil { + in, out := &in.BerkshelfVersion, &out.BerkshelfVersion + *out = new(string) + **out = **in + } + if in.Color != nil { + in, out := &in.Color, &out.Color + *out = new(string) + **out = **in + } + if in.ConfigurationManagerName != nil { + in, out := &in.ConfigurationManagerName, &out.ConfigurationManagerName + *out = new(string) + **out = **in + } + if in.ConfigurationManagerVersion != nil { + in, out := &in.ConfigurationManagerVersion, &out.ConfigurationManagerVersion + *out = new(string) + **out = **in + } + if in.CustomCookbooksSource != nil { + in, out := &in.CustomCookbooksSource, &out.CustomCookbooksSource + *out = new(CustomCookbooksSourceObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.DefaultAvailabilityZone != nil { + in, out := &in.DefaultAvailabilityZone, &out.DefaultAvailabilityZone + *out = new(string) + **out = **in + } + if in.DefaultInstanceProfileArn != nil { + in, out := &in.DefaultInstanceProfileArn, &out.DefaultInstanceProfileArn + *out = new(string) + **out = **in + } + if in.DefaultOs != nil { + in, out := &in.DefaultOs, &out.DefaultOs + *out = new(string) + **out = **in + } + if in.DefaultRootDeviceType != nil { + in, out := &in.DefaultRootDeviceType, &out.DefaultRootDeviceType + *out = new(string) + **out = **in + } + if in.DefaultSSHKeyName != nil { + in, out := &in.DefaultSSHKeyName, &out.DefaultSSHKeyName + *out = new(string) + **out = **in + } + if in.DefaultSubnetID != nil { + in, out := &in.DefaultSubnetID, &out.DefaultSubnetID + *out = new(string) + **out = **in + } + if in.HostnameTheme != nil { + in, out := &in.HostnameTheme, &out.HostnameTheme + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ManageBerkshelf != nil { + in, out := &in.ManageBerkshelf, &out.ManageBerkshelf + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ServiceRoleArn != nil { + in, out := &in.ServiceRoleArn, &out.ServiceRoleArn + *out = new(string) + **out = **in + } + if in.StackEndpoint != nil { + in, out := &in.StackEndpoint, &out.StackEndpoint + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseCustomCookbooks != nil { + in, out := &in.UseCustomCookbooks, &out.UseCustomCookbooks + *out = new(bool) + **out = **in + } + if in.UseOpsworksSecurityGroups != nil { + in, out := &in.UseOpsworksSecurityGroups, &out.UseOpsworksSecurityGroups + *out = new(bool) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackObservation. +func (in *StackObservation) DeepCopy() *StackObservation { + if in == nil { + return nil + } + out := new(StackObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackParameters) DeepCopyInto(out *StackParameters) { + *out = *in + if in.AgentVersion != nil { + in, out := &in.AgentVersion, &out.AgentVersion + *out = new(string) + **out = **in + } + if in.BerkshelfVersion != nil { + in, out := &in.BerkshelfVersion, &out.BerkshelfVersion + *out = new(string) + **out = **in + } + if in.Color != nil { + in, out := &in.Color, &out.Color + *out = new(string) + **out = **in + } + if in.ConfigurationManagerName != nil { + in, out := &in.ConfigurationManagerName, &out.ConfigurationManagerName + *out = new(string) + **out = **in + } + if in.ConfigurationManagerVersion != nil { + in, out := &in.ConfigurationManagerVersion, &out.ConfigurationManagerVersion + *out = new(string) + **out = **in + } + if in.CustomCookbooksSource != nil { + in, out := &in.CustomCookbooksSource, &out.CustomCookbooksSource + *out = new(CustomCookbooksSourceParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.DefaultAvailabilityZone != nil { + in, out := &in.DefaultAvailabilityZone, &out.DefaultAvailabilityZone + *out = new(string) + **out = **in + } + if in.DefaultInstanceProfileArn != nil { + in, out := &in.DefaultInstanceProfileArn, &out.DefaultInstanceProfileArn + *out = new(string) + **out = **in + } + if in.DefaultInstanceProfileArnRef != nil { + in, out := &in.DefaultInstanceProfileArnRef, &out.DefaultInstanceProfileArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DefaultInstanceProfileArnSelector != nil { + in, out := &in.DefaultInstanceProfileArnSelector, &out.DefaultInstanceProfileArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DefaultOs != nil { + in, out := &in.DefaultOs, &out.DefaultOs + *out = new(string) + **out = **in + } + if in.DefaultRootDeviceType != nil { + in, out := &in.DefaultRootDeviceType, &out.DefaultRootDeviceType + *out = new(string) + **out = **in + } + if in.DefaultSSHKeyName != nil { + in, out := &in.DefaultSSHKeyName, &out.DefaultSSHKeyName + *out = new(string) + **out = **in + } + if in.DefaultSubnetID != nil { + in, out := &in.DefaultSubnetID, &out.DefaultSubnetID + *out = new(string) + **out = **in + } + if in.DefaultSubnetIDRef != nil { + in, out := &in.DefaultSubnetIDRef, &out.DefaultSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DefaultSubnetIDSelector != nil { + in, out := &in.DefaultSubnetIDSelector, &out.DefaultSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HostnameTheme != nil { + in, out := &in.HostnameTheme, &out.HostnameTheme + *out = new(string) + **out = **in + } + if in.ManageBerkshelf != nil { + in, out := &in.ManageBerkshelf, &out.ManageBerkshelf + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ServiceRoleArn != nil { + in, out := &in.ServiceRoleArn, &out.ServiceRoleArn + *out = new(string) + **out = **in + } + if in.ServiceRoleArnRef != nil { + in, out := &in.ServiceRoleArnRef, &out.ServiceRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceRoleArnSelector != nil { + in, out := &in.ServiceRoleArnSelector, &out.ServiceRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseCustomCookbooks != nil { + in, out := &in.UseCustomCookbooks, &out.UseCustomCookbooks + *out = new(bool) + **out = **in + } + if in.UseOpsworksSecurityGroups != nil { + in, out := &in.UseOpsworksSecurityGroups, &out.UseOpsworksSecurityGroups + *out = new(bool) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackParameters. +func (in *StackParameters) DeepCopy() *StackParameters { + if in == nil { + return nil + } + out := new(StackParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackSpec) DeepCopyInto(out *StackSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackSpec. +func (in *StackSpec) DeepCopy() *StackSpec { + if in == nil { + return nil + } + out := new(StackSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackStatus) DeepCopyInto(out *StackStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackStatus. +func (in *StackStatus) DeepCopy() *StackStatus { + if in == nil { + return nil + } + out := new(StackStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayer) DeepCopyInto(out *StaticWebLayer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayer. +func (in *StaticWebLayer) DeepCopy() *StaticWebLayer { + if in == nil { + return nil + } + out := new(StaticWebLayer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StaticWebLayer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerCloudwatchConfigurationInitParameters) DeepCopyInto(out *StaticWebLayerCloudwatchConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]StaticWebLayerCloudwatchConfigurationLogStreamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerCloudwatchConfigurationInitParameters. +func (in *StaticWebLayerCloudwatchConfigurationInitParameters) DeepCopy() *StaticWebLayerCloudwatchConfigurationInitParameters { + if in == nil { + return nil + } + out := new(StaticWebLayerCloudwatchConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopyInto(out *StaticWebLayerCloudwatchConfigurationLogStreamsInitParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerCloudwatchConfigurationLogStreamsInitParameters. +func (in *StaticWebLayerCloudwatchConfigurationLogStreamsInitParameters) DeepCopy() *StaticWebLayerCloudwatchConfigurationLogStreamsInitParameters { + if in == nil { + return nil + } + out := new(StaticWebLayerCloudwatchConfigurationLogStreamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerCloudwatchConfigurationLogStreamsObservation) DeepCopyInto(out *StaticWebLayerCloudwatchConfigurationLogStreamsObservation) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerCloudwatchConfigurationLogStreamsObservation. +func (in *StaticWebLayerCloudwatchConfigurationLogStreamsObservation) DeepCopy() *StaticWebLayerCloudwatchConfigurationLogStreamsObservation { + if in == nil { + return nil + } + out := new(StaticWebLayerCloudwatchConfigurationLogStreamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerCloudwatchConfigurationLogStreamsParameters) DeepCopyInto(out *StaticWebLayerCloudwatchConfigurationLogStreamsParameters) { + *out = *in + if in.BatchCount != nil { + in, out := &in.BatchCount, &out.BatchCount + *out = new(float64) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.BufferDuration != nil { + in, out := &in.BufferDuration, &out.BufferDuration + *out = new(float64) + **out = **in + } + if in.DatetimeFormat != nil { + in, out := &in.DatetimeFormat, &out.DatetimeFormat + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(string) + **out = **in + } + if in.FileFingerprintLines != nil { + in, out := &in.FileFingerprintLines, &out.FileFingerprintLines + *out = new(string) + **out = **in + } + if in.InitialPosition != nil { + in, out := &in.InitialPosition, &out.InitialPosition + *out = new(string) + **out = **in + } + if in.LogGroupName != nil { + in, out := &in.LogGroupName, &out.LogGroupName + *out = new(string) + **out = **in + } + if in.MultilineStartPattern != nil { + in, out := &in.MultilineStartPattern, &out.MultilineStartPattern + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerCloudwatchConfigurationLogStreamsParameters. +func (in *StaticWebLayerCloudwatchConfigurationLogStreamsParameters) DeepCopy() *StaticWebLayerCloudwatchConfigurationLogStreamsParameters { + if in == nil { + return nil + } + out := new(StaticWebLayerCloudwatchConfigurationLogStreamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerCloudwatchConfigurationObservation) DeepCopyInto(out *StaticWebLayerCloudwatchConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]StaticWebLayerCloudwatchConfigurationLogStreamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerCloudwatchConfigurationObservation. +func (in *StaticWebLayerCloudwatchConfigurationObservation) DeepCopy() *StaticWebLayerCloudwatchConfigurationObservation { + if in == nil { + return nil + } + out := new(StaticWebLayerCloudwatchConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerCloudwatchConfigurationParameters) DeepCopyInto(out *StaticWebLayerCloudwatchConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogStreams != nil { + in, out := &in.LogStreams, &out.LogStreams + *out = make([]StaticWebLayerCloudwatchConfigurationLogStreamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerCloudwatchConfigurationParameters. +func (in *StaticWebLayerCloudwatchConfigurationParameters) DeepCopy() *StaticWebLayerCloudwatchConfigurationParameters { + if in == nil { + return nil + } + out := new(StaticWebLayerCloudwatchConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerEBSVolumeInitParameters) DeepCopyInto(out *StaticWebLayerEBSVolumeInitParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerEBSVolumeInitParameters. +func (in *StaticWebLayerEBSVolumeInitParameters) DeepCopy() *StaticWebLayerEBSVolumeInitParameters { + if in == nil { + return nil + } + out := new(StaticWebLayerEBSVolumeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerEBSVolumeObservation) DeepCopyInto(out *StaticWebLayerEBSVolumeObservation) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerEBSVolumeObservation. +func (in *StaticWebLayerEBSVolumeObservation) DeepCopy() *StaticWebLayerEBSVolumeObservation { + if in == nil { + return nil + } + out := new(StaticWebLayerEBSVolumeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerEBSVolumeParameters) DeepCopyInto(out *StaticWebLayerEBSVolumeParameters) { + *out = *in + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.MountPoint != nil { + in, out := &in.MountPoint, &out.MountPoint + *out = new(string) + **out = **in + } + if in.NumberOfDisks != nil { + in, out := &in.NumberOfDisks, &out.NumberOfDisks + *out = new(float64) + **out = **in + } + if in.RaidLevel != nil { + in, out := &in.RaidLevel, &out.RaidLevel + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerEBSVolumeParameters. +func (in *StaticWebLayerEBSVolumeParameters) DeepCopy() *StaticWebLayerEBSVolumeParameters { + if in == nil { + return nil + } + out := new(StaticWebLayerEBSVolumeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerInitParameters) DeepCopyInto(out *StaticWebLayerInitParameters) { + *out = *in + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(StaticWebLayerCloudwatchConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]StaticWebLayerEBSVolumeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(StaticWebLayerLoadBasedAutoScalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerInitParameters. +func (in *StaticWebLayerInitParameters) DeepCopy() *StaticWebLayerInitParameters { + if in == nil { + return nil + } + out := new(StaticWebLayerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerList) DeepCopyInto(out *StaticWebLayerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StaticWebLayer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerList. +func (in *StaticWebLayerList) DeepCopy() *StaticWebLayerList { + if in == nil { + return nil + } + out := new(StaticWebLayerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StaticWebLayerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopyInto(out *StaticWebLayerLoadBasedAutoScalingDownscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerLoadBasedAutoScalingDownscalingInitParameters. +func (in *StaticWebLayerLoadBasedAutoScalingDownscalingInitParameters) DeepCopy() *StaticWebLayerLoadBasedAutoScalingDownscalingInitParameters { + if in == nil { + return nil + } + out := new(StaticWebLayerLoadBasedAutoScalingDownscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerLoadBasedAutoScalingDownscalingObservation) DeepCopyInto(out *StaticWebLayerLoadBasedAutoScalingDownscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerLoadBasedAutoScalingDownscalingObservation. +func (in *StaticWebLayerLoadBasedAutoScalingDownscalingObservation) DeepCopy() *StaticWebLayerLoadBasedAutoScalingDownscalingObservation { + if in == nil { + return nil + } + out := new(StaticWebLayerLoadBasedAutoScalingDownscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerLoadBasedAutoScalingDownscalingParameters) DeepCopyInto(out *StaticWebLayerLoadBasedAutoScalingDownscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerLoadBasedAutoScalingDownscalingParameters. +func (in *StaticWebLayerLoadBasedAutoScalingDownscalingParameters) DeepCopy() *StaticWebLayerLoadBasedAutoScalingDownscalingParameters { + if in == nil { + return nil + } + out := new(StaticWebLayerLoadBasedAutoScalingDownscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerLoadBasedAutoScalingInitParameters) DeepCopyInto(out *StaticWebLayerLoadBasedAutoScalingInitParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(StaticWebLayerLoadBasedAutoScalingDownscalingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(StaticWebLayerLoadBasedAutoScalingUpscalingInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerLoadBasedAutoScalingInitParameters. +func (in *StaticWebLayerLoadBasedAutoScalingInitParameters) DeepCopy() *StaticWebLayerLoadBasedAutoScalingInitParameters { + if in == nil { + return nil + } + out := new(StaticWebLayerLoadBasedAutoScalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerLoadBasedAutoScalingObservation) DeepCopyInto(out *StaticWebLayerLoadBasedAutoScalingObservation) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(StaticWebLayerLoadBasedAutoScalingDownscalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(StaticWebLayerLoadBasedAutoScalingUpscalingObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerLoadBasedAutoScalingObservation. +func (in *StaticWebLayerLoadBasedAutoScalingObservation) DeepCopy() *StaticWebLayerLoadBasedAutoScalingObservation { + if in == nil { + return nil + } + out := new(StaticWebLayerLoadBasedAutoScalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerLoadBasedAutoScalingParameters) DeepCopyInto(out *StaticWebLayerLoadBasedAutoScalingParameters) { + *out = *in + if in.Downscaling != nil { + in, out := &in.Downscaling, &out.Downscaling + *out = new(StaticWebLayerLoadBasedAutoScalingDownscalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Upscaling != nil { + in, out := &in.Upscaling, &out.Upscaling + *out = new(StaticWebLayerLoadBasedAutoScalingUpscalingParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerLoadBasedAutoScalingParameters. +func (in *StaticWebLayerLoadBasedAutoScalingParameters) DeepCopy() *StaticWebLayerLoadBasedAutoScalingParameters { + if in == nil { + return nil + } + out := new(StaticWebLayerLoadBasedAutoScalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopyInto(out *StaticWebLayerLoadBasedAutoScalingUpscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerLoadBasedAutoScalingUpscalingInitParameters. +func (in *StaticWebLayerLoadBasedAutoScalingUpscalingInitParameters) DeepCopy() *StaticWebLayerLoadBasedAutoScalingUpscalingInitParameters { + if in == nil { + return nil + } + out := new(StaticWebLayerLoadBasedAutoScalingUpscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerLoadBasedAutoScalingUpscalingObservation) DeepCopyInto(out *StaticWebLayerLoadBasedAutoScalingUpscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerLoadBasedAutoScalingUpscalingObservation. +func (in *StaticWebLayerLoadBasedAutoScalingUpscalingObservation) DeepCopy() *StaticWebLayerLoadBasedAutoScalingUpscalingObservation { + if in == nil { + return nil + } + out := new(StaticWebLayerLoadBasedAutoScalingUpscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerLoadBasedAutoScalingUpscalingParameters) DeepCopyInto(out *StaticWebLayerLoadBasedAutoScalingUpscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerLoadBasedAutoScalingUpscalingParameters. +func (in *StaticWebLayerLoadBasedAutoScalingUpscalingParameters) DeepCopy() *StaticWebLayerLoadBasedAutoScalingUpscalingParameters { + if in == nil { + return nil + } + out := new(StaticWebLayerLoadBasedAutoScalingUpscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerObservation) DeepCopyInto(out *StaticWebLayerObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(StaticWebLayerCloudwatchConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]StaticWebLayerEBSVolumeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(StaticWebLayerLoadBasedAutoScalingObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerObservation. +func (in *StaticWebLayerObservation) DeepCopy() *StaticWebLayerObservation { + if in == nil { + return nil + } + out := new(StaticWebLayerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerParameters) DeepCopyInto(out *StaticWebLayerParameters) { + *out = *in + if in.AutoAssignElasticIps != nil { + in, out := &in.AutoAssignElasticIps, &out.AutoAssignElasticIps + *out = new(bool) + **out = **in + } + if in.AutoAssignPublicIps != nil { + in, out := &in.AutoAssignPublicIps, &out.AutoAssignPublicIps + *out = new(bool) + **out = **in + } + if in.AutoHealing != nil { + in, out := &in.AutoHealing, &out.AutoHealing + *out = new(bool) + **out = **in + } + if in.CloudwatchConfiguration != nil { + in, out := &in.CloudwatchConfiguration, &out.CloudwatchConfiguration + *out = new(StaticWebLayerCloudwatchConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomConfigureRecipes != nil { + in, out := &in.CustomConfigureRecipes, &out.CustomConfigureRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomDeployRecipes != nil { + in, out := &in.CustomDeployRecipes, &out.CustomDeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomInstanceProfileArn != nil { + in, out := &in.CustomInstanceProfileArn, &out.CustomInstanceProfileArn + *out = new(string) + **out = **in + } + if in.CustomJSON != nil { + in, out := &in.CustomJSON, &out.CustomJSON + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRefs != nil { + in, out := &in.CustomSecurityGroupIDRefs, &out.CustomSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIds != nil { + in, out := &in.CustomSecurityGroupIds, &out.CustomSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomSetupRecipes != nil { + in, out := &in.CustomSetupRecipes, &out.CustomSetupRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomShutdownRecipes != nil { + in, out := &in.CustomShutdownRecipes, &out.CustomShutdownRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomUndeployRecipes != nil { + in, out := &in.CustomUndeployRecipes, &out.CustomUndeployRecipes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DrainELBOnShutdown != nil { + in, out := &in.DrainELBOnShutdown, &out.DrainELBOnShutdown + *out = new(bool) + **out = **in + } + if in.EBSVolume != nil { + in, out := &in.EBSVolume, &out.EBSVolume + *out = make([]StaticWebLayerEBSVolumeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ElasticLoadBalancer != nil { + in, out := &in.ElasticLoadBalancer, &out.ElasticLoadBalancer + *out = new(string) + **out = **in + } + if in.InstallUpdatesOnBoot != nil { + in, out := &in.InstallUpdatesOnBoot, &out.InstallUpdatesOnBoot + *out = new(bool) + **out = **in + } + if in.InstanceShutdownTimeout != nil { + in, out := &in.InstanceShutdownTimeout, &out.InstanceShutdownTimeout + *out = new(float64) + **out = **in + } + if in.LoadBasedAutoScaling != nil { + in, out := &in.LoadBasedAutoScaling, &out.LoadBasedAutoScaling + *out = new(StaticWebLayerLoadBasedAutoScalingParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StackID != nil { + in, out := &in.StackID, &out.StackID + *out = new(string) + **out = **in + } + if in.StackIDRef != nil { + in, out := &in.StackIDRef, &out.StackIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StackIDSelector != nil { + in, out := &in.StackIDSelector, &out.StackIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPackages != nil { + in, out := &in.SystemPackages, &out.SystemPackages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UseEBSOptimizedInstances != nil { + in, out := &in.UseEBSOptimizedInstances, &out.UseEBSOptimizedInstances + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerParameters. +func (in *StaticWebLayerParameters) DeepCopy() *StaticWebLayerParameters { + if in == nil { + return nil + } + out := new(StaticWebLayerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerSpec) DeepCopyInto(out *StaticWebLayerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerSpec. +func (in *StaticWebLayerSpec) DeepCopy() *StaticWebLayerSpec { + if in == nil { + return nil + } + out := new(StaticWebLayerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebLayerStatus) DeepCopyInto(out *StaticWebLayerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebLayerStatus. +func (in *StaticWebLayerStatus) DeepCopy() *StaticWebLayerStatus { + if in == nil { + return nil + } + out := new(StaticWebLayerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpscalingInitParameters) DeepCopyInto(out *UpscalingInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpscalingInitParameters. +func (in *UpscalingInitParameters) DeepCopy() *UpscalingInitParameters { + if in == nil { + return nil + } + out := new(UpscalingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpscalingObservation) DeepCopyInto(out *UpscalingObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpscalingObservation. +func (in *UpscalingObservation) DeepCopy() *UpscalingObservation { + if in == nil { + return nil + } + out := new(UpscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpscalingParameters) DeepCopyInto(out *UpscalingParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUThreshold != nil { + in, out := &in.CPUThreshold, &out.CPUThreshold + *out = new(float64) + **out = **in + } + if in.IgnoreMetricsTime != nil { + in, out := &in.IgnoreMetricsTime, &out.IgnoreMetricsTime + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LoadThreshold != nil { + in, out := &in.LoadThreshold, &out.LoadThreshold + *out = new(float64) + **out = **in + } + if in.MemoryThreshold != nil { + in, out := &in.MemoryThreshold, &out.MemoryThreshold + *out = new(float64) + **out = **in + } + if in.ThresholdsWaitTime != nil { + in, out := &in.ThresholdsWaitTime, &out.ThresholdsWaitTime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpscalingParameters. +func (in *UpscalingParameters) DeepCopy() *UpscalingParameters { + if in == nil { + return nil + } + out := new(UpscalingParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/opsworks/v1beta2/zz_generated.managed.go b/apis/opsworks/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..bbb6817a96 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_generated.managed.go @@ -0,0 +1,728 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this CustomLayer. +func (mg *CustomLayer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CustomLayer. +func (mg *CustomLayer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CustomLayer. +func (mg *CustomLayer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CustomLayer. +func (mg *CustomLayer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CustomLayer. +func (mg *CustomLayer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CustomLayer. +func (mg *CustomLayer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CustomLayer. +func (mg *CustomLayer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CustomLayer. +func (mg *CustomLayer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CustomLayer. +func (mg *CustomLayer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CustomLayer. +func (mg *CustomLayer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CustomLayer. +func (mg *CustomLayer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CustomLayer. +func (mg *CustomLayer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this EcsClusterLayer. +func (mg *EcsClusterLayer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this EcsClusterLayer. +func (mg *EcsClusterLayer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this EcsClusterLayer. +func (mg *EcsClusterLayer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this EcsClusterLayer. +func (mg *EcsClusterLayer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this EcsClusterLayer. +func (mg *EcsClusterLayer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this EcsClusterLayer. +func (mg *EcsClusterLayer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this EcsClusterLayer. +func (mg *EcsClusterLayer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this EcsClusterLayer. +func (mg *EcsClusterLayer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this EcsClusterLayer. +func (mg *EcsClusterLayer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this EcsClusterLayer. +func (mg *EcsClusterLayer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this EcsClusterLayer. +func (mg *EcsClusterLayer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this EcsClusterLayer. +func (mg *EcsClusterLayer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this GangliaLayer. +func (mg *GangliaLayer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this GangliaLayer. +func (mg *GangliaLayer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this GangliaLayer. +func (mg *GangliaLayer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this GangliaLayer. +func (mg *GangliaLayer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this GangliaLayer. +func (mg *GangliaLayer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this GangliaLayer. +func (mg *GangliaLayer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this GangliaLayer. +func (mg *GangliaLayer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this GangliaLayer. +func (mg *GangliaLayer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this GangliaLayer. +func (mg *GangliaLayer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this GangliaLayer. +func (mg *GangliaLayer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this GangliaLayer. +func (mg *GangliaLayer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this GangliaLayer. +func (mg *GangliaLayer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this HAProxyLayer. +func (mg *HAProxyLayer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this HAProxyLayer. +func (mg *HAProxyLayer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this HAProxyLayer. +func (mg *HAProxyLayer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this HAProxyLayer. +func (mg *HAProxyLayer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this HAProxyLayer. +func (mg *HAProxyLayer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this HAProxyLayer. +func (mg *HAProxyLayer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this HAProxyLayer. +func (mg *HAProxyLayer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this HAProxyLayer. +func (mg *HAProxyLayer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this HAProxyLayer. +func (mg *HAProxyLayer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this HAProxyLayer. +func (mg *HAProxyLayer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this HAProxyLayer. +func (mg *HAProxyLayer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this HAProxyLayer. +func (mg *HAProxyLayer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this JavaAppLayer. +func (mg *JavaAppLayer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this JavaAppLayer. +func (mg *JavaAppLayer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this JavaAppLayer. +func (mg *JavaAppLayer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this JavaAppLayer. +func (mg *JavaAppLayer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this JavaAppLayer. +func (mg *JavaAppLayer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this JavaAppLayer. +func (mg *JavaAppLayer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this JavaAppLayer. +func (mg *JavaAppLayer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this JavaAppLayer. +func (mg *JavaAppLayer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this JavaAppLayer. +func (mg *JavaAppLayer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this JavaAppLayer. +func (mg *JavaAppLayer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this JavaAppLayer. +func (mg *JavaAppLayer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this JavaAppLayer. +func (mg *JavaAppLayer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MemcachedLayer. +func (mg *MemcachedLayer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MemcachedLayer. +func (mg *MemcachedLayer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MemcachedLayer. +func (mg *MemcachedLayer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MemcachedLayer. +func (mg *MemcachedLayer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MemcachedLayer. +func (mg *MemcachedLayer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MemcachedLayer. +func (mg *MemcachedLayer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MemcachedLayer. +func (mg *MemcachedLayer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MemcachedLayer. +func (mg *MemcachedLayer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MemcachedLayer. +func (mg *MemcachedLayer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MemcachedLayer. +func (mg *MemcachedLayer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MemcachedLayer. +func (mg *MemcachedLayer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MemcachedLayer. +func (mg *MemcachedLayer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MySQLLayer. +func (mg *MySQLLayer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MySQLLayer. +func (mg *MySQLLayer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MySQLLayer. +func (mg *MySQLLayer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MySQLLayer. +func (mg *MySQLLayer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MySQLLayer. +func (mg *MySQLLayer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MySQLLayer. +func (mg *MySQLLayer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MySQLLayer. +func (mg *MySQLLayer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MySQLLayer. +func (mg *MySQLLayer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MySQLLayer. +func (mg *MySQLLayer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MySQLLayer. +func (mg *MySQLLayer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MySQLLayer. +func (mg *MySQLLayer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MySQLLayer. +func (mg *MySQLLayer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this NodeJSAppLayer. +func (mg *NodeJSAppLayer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this NodeJSAppLayer. +func (mg *NodeJSAppLayer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this NodeJSAppLayer. +func (mg *NodeJSAppLayer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this NodeJSAppLayer. +func (mg *NodeJSAppLayer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this NodeJSAppLayer. +func (mg *NodeJSAppLayer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this NodeJSAppLayer. +func (mg *NodeJSAppLayer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this NodeJSAppLayer. +func (mg *NodeJSAppLayer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this NodeJSAppLayer. +func (mg *NodeJSAppLayer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this NodeJSAppLayer. +func (mg *NodeJSAppLayer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this NodeJSAppLayer. +func (mg *NodeJSAppLayer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this NodeJSAppLayer. +func (mg *NodeJSAppLayer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this NodeJSAppLayer. +func (mg *NodeJSAppLayer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this PHPAppLayer. +func (mg *PHPAppLayer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this PHPAppLayer. +func (mg *PHPAppLayer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this PHPAppLayer. +func (mg *PHPAppLayer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this PHPAppLayer. +func (mg *PHPAppLayer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this PHPAppLayer. +func (mg *PHPAppLayer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this PHPAppLayer. +func (mg *PHPAppLayer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this PHPAppLayer. +func (mg *PHPAppLayer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this PHPAppLayer. +func (mg *PHPAppLayer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this PHPAppLayer. +func (mg *PHPAppLayer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this PHPAppLayer. +func (mg *PHPAppLayer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this PHPAppLayer. +func (mg *PHPAppLayer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this PHPAppLayer. +func (mg *PHPAppLayer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this RailsAppLayer. +func (mg *RailsAppLayer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this RailsAppLayer. +func (mg *RailsAppLayer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this RailsAppLayer. +func (mg *RailsAppLayer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this RailsAppLayer. +func (mg *RailsAppLayer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this RailsAppLayer. +func (mg *RailsAppLayer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this RailsAppLayer. +func (mg *RailsAppLayer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this RailsAppLayer. +func (mg *RailsAppLayer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this RailsAppLayer. +func (mg *RailsAppLayer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this RailsAppLayer. +func (mg *RailsAppLayer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this RailsAppLayer. +func (mg *RailsAppLayer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this RailsAppLayer. +func (mg *RailsAppLayer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this RailsAppLayer. +func (mg *RailsAppLayer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Stack. +func (mg *Stack) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Stack. +func (mg *Stack) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Stack. +func (mg *Stack) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Stack. +func (mg *Stack) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Stack. +func (mg *Stack) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Stack. +func (mg *Stack) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Stack. +func (mg *Stack) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Stack. +func (mg *Stack) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Stack. +func (mg *Stack) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Stack. +func (mg *Stack) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Stack. +func (mg *Stack) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Stack. +func (mg *Stack) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this StaticWebLayer. +func (mg *StaticWebLayer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this StaticWebLayer. +func (mg *StaticWebLayer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this StaticWebLayer. +func (mg *StaticWebLayer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this StaticWebLayer. +func (mg *StaticWebLayer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this StaticWebLayer. +func (mg *StaticWebLayer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this StaticWebLayer. +func (mg *StaticWebLayer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this StaticWebLayer. +func (mg *StaticWebLayer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this StaticWebLayer. +func (mg *StaticWebLayer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this StaticWebLayer. +func (mg *StaticWebLayer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this StaticWebLayer. +func (mg *StaticWebLayer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this StaticWebLayer. +func (mg *StaticWebLayer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this StaticWebLayer. +func (mg *StaticWebLayer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/opsworks/v1beta2/zz_generated.managedlist.go b/apis/opsworks/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..87b1f46b6c --- /dev/null +++ b/apis/opsworks/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,116 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this CustomLayerList. +func (l *CustomLayerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this EcsClusterLayerList. +func (l *EcsClusterLayerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this GangliaLayerList. +func (l *GangliaLayerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this HAProxyLayerList. +func (l *HAProxyLayerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this JavaAppLayerList. +func (l *JavaAppLayerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MemcachedLayerList. +func (l *MemcachedLayerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MySQLLayerList. +func (l *MySQLLayerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this NodeJSAppLayerList. +func (l *NodeJSAppLayerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this PHPAppLayerList. +func (l *PHPAppLayerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this RailsAppLayerList. +func (l *RailsAppLayerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this StackList. +func (l *StackList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this StaticWebLayerList. +func (l *StaticWebLayerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/opsworks/v1beta2/zz_generated.resolvers.go b/apis/opsworks/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..6172db22ff --- /dev/null +++ b/apis/opsworks/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,1200 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *CustomLayer) ResolveReferences( // ResolveReferences of this CustomLayer. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomSecurityGroupIds") + } + mg.Spec.ForProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StackIDRef, + Selector: mg.Spec.ForProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StackID") + } + mg.Spec.ForProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StackIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CustomSecurityGroupIds") + } + mg.Spec.InitProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StackIDRef, + Selector: mg.Spec.InitProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StackID") + } + mg.Spec.InitProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StackIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this EcsClusterLayer. +func (mg *EcsClusterLayer) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomSecurityGroupIds") + } + mg.Spec.ForProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ecs.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EcsClusterArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.EcsClusterArnRef, + Selector: mg.Spec.ForProvider.EcsClusterArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EcsClusterArn") + } + mg.Spec.ForProvider.EcsClusterArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EcsClusterArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StackIDRef, + Selector: mg.Spec.ForProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StackID") + } + mg.Spec.ForProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StackIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CustomSecurityGroupIds") + } + mg.Spec.InitProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ecs.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EcsClusterArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.EcsClusterArnRef, + Selector: mg.Spec.InitProvider.EcsClusterArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EcsClusterArn") + } + mg.Spec.InitProvider.EcsClusterArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EcsClusterArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StackIDRef, + Selector: mg.Spec.InitProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StackID") + } + mg.Spec.InitProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StackIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this GangliaLayer. +func (mg *GangliaLayer) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomSecurityGroupIds") + } + mg.Spec.ForProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StackIDRef, + Selector: mg.Spec.ForProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StackID") + } + mg.Spec.ForProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StackIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CustomSecurityGroupIds") + } + mg.Spec.InitProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StackIDRef, + Selector: mg.Spec.InitProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StackID") + } + mg.Spec.InitProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StackIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this HAProxyLayer. +func (mg *HAProxyLayer) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomSecurityGroupIds") + } + mg.Spec.ForProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StackIDRef, + Selector: mg.Spec.ForProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StackID") + } + mg.Spec.ForProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StackIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CustomSecurityGroupIds") + } + mg.Spec.InitProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StackIDRef, + Selector: mg.Spec.InitProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StackID") + } + mg.Spec.InitProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StackIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this JavaAppLayer. +func (mg *JavaAppLayer) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomSecurityGroupIds") + } + mg.Spec.ForProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StackIDRef, + Selector: mg.Spec.ForProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StackID") + } + mg.Spec.ForProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StackIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CustomSecurityGroupIds") + } + mg.Spec.InitProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StackIDRef, + Selector: mg.Spec.InitProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StackID") + } + mg.Spec.InitProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StackIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MemcachedLayer. +func (mg *MemcachedLayer) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomSecurityGroupIds") + } + mg.Spec.ForProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StackIDRef, + Selector: mg.Spec.ForProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StackID") + } + mg.Spec.ForProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StackIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CustomSecurityGroupIds") + } + mg.Spec.InitProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StackIDRef, + Selector: mg.Spec.InitProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StackID") + } + mg.Spec.InitProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StackIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MySQLLayer. +func (mg *MySQLLayer) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomSecurityGroupIds") + } + mg.Spec.ForProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StackIDRef, + Selector: mg.Spec.ForProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StackID") + } + mg.Spec.ForProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StackIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CustomSecurityGroupIds") + } + mg.Spec.InitProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StackIDRef, + Selector: mg.Spec.InitProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StackID") + } + mg.Spec.InitProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StackIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this NodeJSAppLayer. +func (mg *NodeJSAppLayer) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomSecurityGroupIds") + } + mg.Spec.ForProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StackIDRef, + Selector: mg.Spec.ForProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StackID") + } + mg.Spec.ForProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StackIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CustomSecurityGroupIds") + } + mg.Spec.InitProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StackIDRef, + Selector: mg.Spec.InitProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StackID") + } + mg.Spec.InitProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StackIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this PHPAppLayer. +func (mg *PHPAppLayer) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomSecurityGroupIds") + } + mg.Spec.ForProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StackIDRef, + Selector: mg.Spec.ForProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StackID") + } + mg.Spec.ForProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StackIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CustomSecurityGroupIds") + } + mg.Spec.InitProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StackIDRef, + Selector: mg.Spec.InitProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StackID") + } + mg.Spec.InitProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StackIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this RailsAppLayer. +func (mg *RailsAppLayer) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomSecurityGroupIds") + } + mg.Spec.ForProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StackIDRef, + Selector: mg.Spec.ForProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StackID") + } + mg.Spec.ForProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StackIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CustomSecurityGroupIds") + } + mg.Spec.InitProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StackIDRef, + Selector: mg.Spec.InitProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StackID") + } + mg.Spec.InitProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StackIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Stack. +func (mg *Stack) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "InstanceProfile", "InstanceProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DefaultInstanceProfileArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.DefaultInstanceProfileArnRef, + Selector: mg.Spec.ForProvider.DefaultInstanceProfileArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DefaultInstanceProfileArn") + } + mg.Spec.ForProvider.DefaultInstanceProfileArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DefaultInstanceProfileArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DefaultSubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DefaultSubnetIDRef, + Selector: mg.Spec.ForProvider.DefaultSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DefaultSubnetID") + } + mg.Spec.ForProvider.DefaultSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DefaultSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ServiceRoleArnRef, + Selector: mg.Spec.ForProvider.ServiceRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceRoleArn") + } + mg.Spec.ForProvider.ServiceRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.VPCIDRef, + Selector: mg.Spec.ForProvider.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCID") + } + mg.Spec.ForProvider.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPCIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "InstanceProfile", "InstanceProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DefaultInstanceProfileArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.DefaultInstanceProfileArnRef, + Selector: mg.Spec.InitProvider.DefaultInstanceProfileArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DefaultInstanceProfileArn") + } + mg.Spec.InitProvider.DefaultInstanceProfileArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DefaultInstanceProfileArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DefaultSubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DefaultSubnetIDRef, + Selector: mg.Spec.InitProvider.DefaultSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DefaultSubnetID") + } + mg.Spec.InitProvider.DefaultSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DefaultSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ServiceRoleArnRef, + Selector: mg.Spec.InitProvider.ServiceRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceRoleArn") + } + mg.Spec.InitProvider.ServiceRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.VPCIDRef, + Selector: mg.Spec.InitProvider.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCID") + } + mg.Spec.InitProvider.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPCIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this StaticWebLayer. +func (mg *StaticWebLayer) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomSecurityGroupIds") + } + mg.Spec.ForProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StackIDRef, + Selector: mg.Spec.ForProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StackID") + } + mg.Spec.ForProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StackIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.CustomSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.CustomSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CustomSecurityGroupIds") + } + mg.Spec.InitProvider.CustomSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.CustomSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("opsworks.aws.upbound.io", "v1beta2", "Stack", "StackList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StackID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StackIDRef, + Selector: mg.Spec.InitProvider.StackIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StackID") + } + mg.Spec.InitProvider.StackID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StackIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/opsworks/v1beta2/zz_groupversion_info.go b/apis/opsworks/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..6fc62fd0db --- /dev/null +++ b/apis/opsworks/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=opsworks.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "opsworks.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/opsworks/v1beta2/zz_haproxylayer_terraformed.go b/apis/opsworks/v1beta2/zz_haproxylayer_terraformed.go new file mode 100755 index 0000000000..91f36894e3 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_haproxylayer_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this HAProxyLayer +func (mg *HAProxyLayer) GetTerraformResourceType() string { + return "aws_opsworks_haproxy_layer" +} + +// GetConnectionDetailsMapping for this HAProxyLayer +func (tr *HAProxyLayer) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this HAProxyLayer +func (tr *HAProxyLayer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this HAProxyLayer +func (tr *HAProxyLayer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this HAProxyLayer +func (tr *HAProxyLayer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this HAProxyLayer +func (tr *HAProxyLayer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this HAProxyLayer +func (tr *HAProxyLayer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this HAProxyLayer +func (tr *HAProxyLayer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this HAProxyLayer +func (tr *HAProxyLayer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this HAProxyLayer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *HAProxyLayer) LateInitialize(attrs []byte) (bool, error) { + params := &HAProxyLayerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *HAProxyLayer) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opsworks/v1beta2/zz_haproxylayer_types.go b/apis/opsworks/v1beta2/zz_haproxylayer_types.go new file mode 100755 index 0000000000..fa63026a57 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_haproxylayer_types.go @@ -0,0 +1,736 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type HAProxyLayerCloudwatchConfigurationInitParameters struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []HAProxyLayerCloudwatchConfigurationLogStreamsInitParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type HAProxyLayerCloudwatchConfigurationLogStreamsInitParameters struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type HAProxyLayerCloudwatchConfigurationLogStreamsObservation struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type HAProxyLayerCloudwatchConfigurationLogStreamsParameters struct { + + // +kubebuilder:validation:Optional + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + // +kubebuilder:validation:Optional + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // +kubebuilder:validation:Optional + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + // +kubebuilder:validation:Optional + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // +kubebuilder:validation:Optional + File *string `json:"file" tf:"file,omitempty"` + + // +kubebuilder:validation:Optional + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + // +kubebuilder:validation:Optional + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName" tf:"log_group_name,omitempty"` + + // +kubebuilder:validation:Optional + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type HAProxyLayerCloudwatchConfigurationObservation struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []HAProxyLayerCloudwatchConfigurationLogStreamsObservation `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type HAProxyLayerCloudwatchConfigurationParameters struct { + + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // +kubebuilder:validation:Optional + LogStreams []HAProxyLayerCloudwatchConfigurationLogStreamsParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type HAProxyLayerEBSVolumeInitParameters struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type HAProxyLayerEBSVolumeObservation struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type HAProxyLayerEBSVolumeParameters struct { + + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + // +kubebuilder:validation:Optional + MountPoint *string `json:"mountPoint" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + // +kubebuilder:validation:Optional + NumberOfDisks *float64 `json:"numberOfDisks" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + // +kubebuilder:validation:Optional + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + // +kubebuilder:validation:Optional + Size *float64 `json:"size" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type HAProxyLayerInitParameters struct { + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *HAProxyLayerCloudwatchConfigurationInitParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []HAProxyLayerEBSVolumeInitParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // HTTP method to use for instance healthchecks. Defaults to "OPTIONS". + HealthcheckMethod *string `json:"healthcheckMethod,omitempty" tf:"healthcheck_method,omitempty"` + + // URL path to use for instance healthchecks. Defaults to "/". + HealthcheckURL *string `json:"healthcheckUrl,omitempty" tf:"healthcheck_url,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *HAProxyLayerLoadBasedAutoScalingInitParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Whether to enable HAProxy stats. + StatsEnabled *bool `json:"statsEnabled,omitempty" tf:"stats_enabled,omitempty"` + + // The password to use for HAProxy stats. + StatsPassword *string `json:"statsPassword,omitempty" tf:"stats_password,omitempty"` + + // The HAProxy stats URL. Defaults to "/haproxy?stats". + StatsURL *string `json:"statsUrl,omitempty" tf:"stats_url,omitempty"` + + // The username for HAProxy stats. Defaults to "opsworks". + StatsUser *string `json:"statsUser,omitempty" tf:"stats_user,omitempty"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type HAProxyLayerLoadBasedAutoScalingDownscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type HAProxyLayerLoadBasedAutoScalingDownscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type HAProxyLayerLoadBasedAutoScalingDownscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type HAProxyLayerLoadBasedAutoScalingInitParameters struct { + Downscaling *HAProxyLayerLoadBasedAutoScalingDownscalingInitParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *HAProxyLayerLoadBasedAutoScalingUpscalingInitParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type HAProxyLayerLoadBasedAutoScalingObservation struct { + Downscaling *HAProxyLayerLoadBasedAutoScalingDownscalingObservation `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *HAProxyLayerLoadBasedAutoScalingUpscalingObservation `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type HAProxyLayerLoadBasedAutoScalingParameters struct { + + // +kubebuilder:validation:Optional + Downscaling *HAProxyLayerLoadBasedAutoScalingDownscalingParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // +kubebuilder:validation:Optional + Upscaling *HAProxyLayerLoadBasedAutoScalingUpscalingParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type HAProxyLayerLoadBasedAutoScalingUpscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type HAProxyLayerLoadBasedAutoScalingUpscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type HAProxyLayerLoadBasedAutoScalingUpscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type HAProxyLayerObservation struct { + + // The Amazon Resource Name(ARN) of the layer. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *HAProxyLayerCloudwatchConfigurationObservation `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // Ids for a set of security groups to apply to the layer's instances. + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []HAProxyLayerEBSVolumeObservation `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // HTTP method to use for instance healthchecks. Defaults to "OPTIONS". + HealthcheckMethod *string `json:"healthcheckMethod,omitempty" tf:"healthcheck_method,omitempty"` + + // URL path to use for instance healthchecks. Defaults to "/". + HealthcheckURL *string `json:"healthcheckUrl,omitempty" tf:"healthcheck_url,omitempty"` + + // The id of the layer. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *HAProxyLayerLoadBasedAutoScalingObservation `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Whether to enable HAProxy stats. + StatsEnabled *bool `json:"statsEnabled,omitempty" tf:"stats_enabled,omitempty"` + + // The password to use for HAProxy stats. + StatsPassword *string `json:"statsPassword,omitempty" tf:"stats_password,omitempty"` + + // The HAProxy stats URL. Defaults to "/haproxy?stats". + StatsURL *string `json:"statsUrl,omitempty" tf:"stats_url,omitempty"` + + // The username for HAProxy stats. Defaults to "opsworks". + StatsUser *string `json:"statsUser,omitempty" tf:"stats_user,omitempty"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type HAProxyLayerParameters struct { + + // Whether to automatically assign an elastic IP address to the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + // +kubebuilder:validation:Optional + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + // +kubebuilder:validation:Optional + CloudwatchConfiguration *HAProxyLayerCloudwatchConfigurationParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + // +kubebuilder:validation:Optional + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + // +kubebuilder:validation:Optional + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + // +kubebuilder:validation:Optional + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + // +kubebuilder:validation:Optional + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + // +kubebuilder:validation:Optional + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + // +kubebuilder:validation:Optional + EBSVolume []HAProxyLayerEBSVolumeParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + // +kubebuilder:validation:Optional + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // HTTP method to use for instance healthchecks. Defaults to "OPTIONS". + // +kubebuilder:validation:Optional + HealthcheckMethod *string `json:"healthcheckMethod,omitempty" tf:"healthcheck_method,omitempty"` + + // URL path to use for instance healthchecks. Defaults to "/". + // +kubebuilder:validation:Optional + HealthcheckURL *string `json:"healthcheckUrl,omitempty" tf:"healthcheck_url,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + // +kubebuilder:validation:Optional + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + // +kubebuilder:validation:Optional + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + // +kubebuilder:validation:Optional + LoadBasedAutoScaling *HAProxyLayerLoadBasedAutoScalingParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Whether to enable HAProxy stats. + // +kubebuilder:validation:Optional + StatsEnabled *bool `json:"statsEnabled,omitempty" tf:"stats_enabled,omitempty"` + + // The password to use for HAProxy stats. + // +kubebuilder:validation:Optional + StatsPassword *string `json:"statsPassword,omitempty" tf:"stats_password,omitempty"` + + // The HAProxy stats URL. Defaults to "/haproxy?stats". + // +kubebuilder:validation:Optional + StatsURL *string `json:"statsUrl,omitempty" tf:"stats_url,omitempty"` + + // The username for HAProxy stats. Defaults to "opsworks". + // +kubebuilder:validation:Optional + StatsUser *string `json:"statsUser,omitempty" tf:"stats_user,omitempty"` + + // Names of a set of system packages to install on the layer's instances. + // +kubebuilder:validation:Optional + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + // +kubebuilder:validation:Optional + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +// HAProxyLayerSpec defines the desired state of HAProxyLayer +type HAProxyLayerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider HAProxyLayerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider HAProxyLayerInitParameters `json:"initProvider,omitempty"` +} + +// HAProxyLayerStatus defines the observed state of HAProxyLayer. +type HAProxyLayerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider HAProxyLayerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// HAProxyLayer is the Schema for the HAProxyLayers API. Provides an OpsWorks HAProxy layer resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type HAProxyLayer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.statsPassword) || (has(self.initProvider) && has(self.initProvider.statsPassword))",message="spec.forProvider.statsPassword is a required parameter" + Spec HAProxyLayerSpec `json:"spec"` + Status HAProxyLayerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HAProxyLayerList contains a list of HAProxyLayers +type HAProxyLayerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HAProxyLayer `json:"items"` +} + +// Repository type metadata. +var ( + HAProxyLayer_Kind = "HAProxyLayer" + HAProxyLayer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: HAProxyLayer_Kind}.String() + HAProxyLayer_KindAPIVersion = HAProxyLayer_Kind + "." + CRDGroupVersion.String() + HAProxyLayer_GroupVersionKind = CRDGroupVersion.WithKind(HAProxyLayer_Kind) +) + +func init() { + SchemeBuilder.Register(&HAProxyLayer{}, &HAProxyLayerList{}) +} diff --git a/apis/opsworks/v1beta2/zz_javaapplayer_terraformed.go b/apis/opsworks/v1beta2/zz_javaapplayer_terraformed.go new file mode 100755 index 0000000000..94fd8654fd --- /dev/null +++ b/apis/opsworks/v1beta2/zz_javaapplayer_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this JavaAppLayer +func (mg *JavaAppLayer) GetTerraformResourceType() string { + return "aws_opsworks_java_app_layer" +} + +// GetConnectionDetailsMapping for this JavaAppLayer +func (tr *JavaAppLayer) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this JavaAppLayer +func (tr *JavaAppLayer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this JavaAppLayer +func (tr *JavaAppLayer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this JavaAppLayer +func (tr *JavaAppLayer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this JavaAppLayer +func (tr *JavaAppLayer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this JavaAppLayer +func (tr *JavaAppLayer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this JavaAppLayer +func (tr *JavaAppLayer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this JavaAppLayer +func (tr *JavaAppLayer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this JavaAppLayer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *JavaAppLayer) LateInitialize(attrs []byte) (bool, error) { + params := &JavaAppLayerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *JavaAppLayer) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opsworks/v1beta2/zz_javaapplayer_types.go b/apis/opsworks/v1beta2/zz_javaapplayer_types.go new file mode 100755 index 0000000000..2a070c50ff --- /dev/null +++ b/apis/opsworks/v1beta2/zz_javaapplayer_types.go @@ -0,0 +1,725 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type JavaAppLayerCloudwatchConfigurationInitParameters struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []JavaAppLayerCloudwatchConfigurationLogStreamsInitParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type JavaAppLayerCloudwatchConfigurationLogStreamsInitParameters struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type JavaAppLayerCloudwatchConfigurationLogStreamsObservation struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type JavaAppLayerCloudwatchConfigurationLogStreamsParameters struct { + + // +kubebuilder:validation:Optional + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + // +kubebuilder:validation:Optional + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // +kubebuilder:validation:Optional + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + // +kubebuilder:validation:Optional + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // +kubebuilder:validation:Optional + File *string `json:"file" tf:"file,omitempty"` + + // +kubebuilder:validation:Optional + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + // +kubebuilder:validation:Optional + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName" tf:"log_group_name,omitempty"` + + // +kubebuilder:validation:Optional + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type JavaAppLayerCloudwatchConfigurationObservation struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []JavaAppLayerCloudwatchConfigurationLogStreamsObservation `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type JavaAppLayerCloudwatchConfigurationParameters struct { + + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // +kubebuilder:validation:Optional + LogStreams []JavaAppLayerCloudwatchConfigurationLogStreamsParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type JavaAppLayerEBSVolumeInitParameters struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type JavaAppLayerEBSVolumeObservation struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type JavaAppLayerEBSVolumeParameters struct { + + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + // +kubebuilder:validation:Optional + MountPoint *string `json:"mountPoint" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + // +kubebuilder:validation:Optional + NumberOfDisks *float64 `json:"numberOfDisks" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + // +kubebuilder:validation:Optional + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + // +kubebuilder:validation:Optional + Size *float64 `json:"size" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type JavaAppLayerInitParameters struct { + + // Keyword for the application container to use. Defaults to "tomcat". + AppServer *string `json:"appServer,omitempty" tf:"app_server,omitempty"` + + // Version of the selected application container to use. Defaults to "7". + AppServerVersion *string `json:"appServerVersion,omitempty" tf:"app_server_version,omitempty"` + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *JavaAppLayerCloudwatchConfigurationInitParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []JavaAppLayerEBSVolumeInitParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + // Options to set for the JVM. + JvmOptions *string `json:"jvmOptions,omitempty" tf:"jvm_options,omitempty"` + + // Keyword for the type of JVM to use. Defaults to openjdk. + JvmType *string `json:"jvmType,omitempty" tf:"jvm_type,omitempty"` + + // Version of JVM to use. Defaults to "7". + JvmVersion *string `json:"jvmVersion,omitempty" tf:"jvm_version,omitempty"` + + LoadBasedAutoScaling *JavaAppLayerLoadBasedAutoScalingInitParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type JavaAppLayerLoadBasedAutoScalingDownscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type JavaAppLayerLoadBasedAutoScalingDownscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type JavaAppLayerLoadBasedAutoScalingDownscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type JavaAppLayerLoadBasedAutoScalingInitParameters struct { + Downscaling *JavaAppLayerLoadBasedAutoScalingDownscalingInitParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *JavaAppLayerLoadBasedAutoScalingUpscalingInitParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type JavaAppLayerLoadBasedAutoScalingObservation struct { + Downscaling *JavaAppLayerLoadBasedAutoScalingDownscalingObservation `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *JavaAppLayerLoadBasedAutoScalingUpscalingObservation `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type JavaAppLayerLoadBasedAutoScalingParameters struct { + + // +kubebuilder:validation:Optional + Downscaling *JavaAppLayerLoadBasedAutoScalingDownscalingParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // +kubebuilder:validation:Optional + Upscaling *JavaAppLayerLoadBasedAutoScalingUpscalingParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type JavaAppLayerLoadBasedAutoScalingUpscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type JavaAppLayerLoadBasedAutoScalingUpscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type JavaAppLayerLoadBasedAutoScalingUpscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type JavaAppLayerObservation struct { + + // Keyword for the application container to use. Defaults to "tomcat". + AppServer *string `json:"appServer,omitempty" tf:"app_server,omitempty"` + + // Version of the selected application container to use. Defaults to "7". + AppServerVersion *string `json:"appServerVersion,omitempty" tf:"app_server_version,omitempty"` + + // The Amazon Resource Name(ARN) of the layer. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *JavaAppLayerCloudwatchConfigurationObservation `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // Ids for a set of security groups to apply to the layer's instances. + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []JavaAppLayerEBSVolumeObservation `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // The id of the layer. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + // Options to set for the JVM. + JvmOptions *string `json:"jvmOptions,omitempty" tf:"jvm_options,omitempty"` + + // Keyword for the type of JVM to use. Defaults to openjdk. + JvmType *string `json:"jvmType,omitempty" tf:"jvm_type,omitempty"` + + // Version of JVM to use. Defaults to "7". + JvmVersion *string `json:"jvmVersion,omitempty" tf:"jvm_version,omitempty"` + + LoadBasedAutoScaling *JavaAppLayerLoadBasedAutoScalingObservation `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type JavaAppLayerParameters struct { + + // Keyword for the application container to use. Defaults to "tomcat". + // +kubebuilder:validation:Optional + AppServer *string `json:"appServer,omitempty" tf:"app_server,omitempty"` + + // Version of the selected application container to use. Defaults to "7". + // +kubebuilder:validation:Optional + AppServerVersion *string `json:"appServerVersion,omitempty" tf:"app_server_version,omitempty"` + + // Whether to automatically assign an elastic IP address to the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + // +kubebuilder:validation:Optional + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + // +kubebuilder:validation:Optional + CloudwatchConfiguration *JavaAppLayerCloudwatchConfigurationParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + // +kubebuilder:validation:Optional + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + // +kubebuilder:validation:Optional + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + // +kubebuilder:validation:Optional + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + // +kubebuilder:validation:Optional + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + // +kubebuilder:validation:Optional + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + // +kubebuilder:validation:Optional + EBSVolume []JavaAppLayerEBSVolumeParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + // +kubebuilder:validation:Optional + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + // +kubebuilder:validation:Optional + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + // +kubebuilder:validation:Optional + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + // Options to set for the JVM. + // +kubebuilder:validation:Optional + JvmOptions *string `json:"jvmOptions,omitempty" tf:"jvm_options,omitempty"` + + // Keyword for the type of JVM to use. Defaults to openjdk. + // +kubebuilder:validation:Optional + JvmType *string `json:"jvmType,omitempty" tf:"jvm_type,omitempty"` + + // Version of JVM to use. Defaults to "7". + // +kubebuilder:validation:Optional + JvmVersion *string `json:"jvmVersion,omitempty" tf:"jvm_version,omitempty"` + + // +kubebuilder:validation:Optional + LoadBasedAutoScaling *JavaAppLayerLoadBasedAutoScalingParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +kubebuilder:validation:Optional + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + // +kubebuilder:validation:Optional + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +// JavaAppLayerSpec defines the desired state of JavaAppLayer +type JavaAppLayerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider JavaAppLayerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider JavaAppLayerInitParameters `json:"initProvider,omitempty"` +} + +// JavaAppLayerStatus defines the observed state of JavaAppLayer. +type JavaAppLayerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider JavaAppLayerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// JavaAppLayer is the Schema for the JavaAppLayers API. Provides an OpsWorks Java application layer resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type JavaAppLayer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec JavaAppLayerSpec `json:"spec"` + Status JavaAppLayerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// JavaAppLayerList contains a list of JavaAppLayers +type JavaAppLayerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []JavaAppLayer `json:"items"` +} + +// Repository type metadata. +var ( + JavaAppLayer_Kind = "JavaAppLayer" + JavaAppLayer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: JavaAppLayer_Kind}.String() + JavaAppLayer_KindAPIVersion = JavaAppLayer_Kind + "." + CRDGroupVersion.String() + JavaAppLayer_GroupVersionKind = CRDGroupVersion.WithKind(JavaAppLayer_Kind) +) + +func init() { + SchemeBuilder.Register(&JavaAppLayer{}, &JavaAppLayerList{}) +} diff --git a/apis/opsworks/v1beta2/zz_memcachedlayer_terraformed.go b/apis/opsworks/v1beta2/zz_memcachedlayer_terraformed.go new file mode 100755 index 0000000000..fcd1344ee0 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_memcachedlayer_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MemcachedLayer +func (mg *MemcachedLayer) GetTerraformResourceType() string { + return "aws_opsworks_memcached_layer" +} + +// GetConnectionDetailsMapping for this MemcachedLayer +func (tr *MemcachedLayer) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MemcachedLayer +func (tr *MemcachedLayer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MemcachedLayer +func (tr *MemcachedLayer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MemcachedLayer +func (tr *MemcachedLayer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MemcachedLayer +func (tr *MemcachedLayer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MemcachedLayer +func (tr *MemcachedLayer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MemcachedLayer +func (tr *MemcachedLayer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MemcachedLayer +func (tr *MemcachedLayer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MemcachedLayer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MemcachedLayer) LateInitialize(attrs []byte) (bool, error) { + params := &MemcachedLayerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MemcachedLayer) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opsworks/v1beta2/zz_memcachedlayer_types.go b/apis/opsworks/v1beta2/zz_memcachedlayer_types.go new file mode 100755 index 0000000000..f5228c3dd8 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_memcachedlayer_types.go @@ -0,0 +1,685 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MemcachedLayerCloudwatchConfigurationInitParameters struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []MemcachedLayerCloudwatchConfigurationLogStreamsInitParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type MemcachedLayerCloudwatchConfigurationLogStreamsInitParameters struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type MemcachedLayerCloudwatchConfigurationLogStreamsObservation struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type MemcachedLayerCloudwatchConfigurationLogStreamsParameters struct { + + // +kubebuilder:validation:Optional + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + // +kubebuilder:validation:Optional + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // +kubebuilder:validation:Optional + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + // +kubebuilder:validation:Optional + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // +kubebuilder:validation:Optional + File *string `json:"file" tf:"file,omitempty"` + + // +kubebuilder:validation:Optional + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + // +kubebuilder:validation:Optional + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName" tf:"log_group_name,omitempty"` + + // +kubebuilder:validation:Optional + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type MemcachedLayerCloudwatchConfigurationObservation struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []MemcachedLayerCloudwatchConfigurationLogStreamsObservation `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type MemcachedLayerCloudwatchConfigurationParameters struct { + + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // +kubebuilder:validation:Optional + LogStreams []MemcachedLayerCloudwatchConfigurationLogStreamsParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type MemcachedLayerEBSVolumeInitParameters struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type MemcachedLayerEBSVolumeObservation struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type MemcachedLayerEBSVolumeParameters struct { + + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + // +kubebuilder:validation:Optional + MountPoint *string `json:"mountPoint" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + // +kubebuilder:validation:Optional + NumberOfDisks *float64 `json:"numberOfDisks" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + // +kubebuilder:validation:Optional + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + // +kubebuilder:validation:Optional + Size *float64 `json:"size" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type MemcachedLayerInitParameters struct { + + // Amount of memory to allocate for the cache on each instance, in megabytes. Defaults to 512MB. + AllocatedMemory *float64 `json:"allocatedMemory,omitempty" tf:"allocated_memory,omitempty"` + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *MemcachedLayerCloudwatchConfigurationInitParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []MemcachedLayerEBSVolumeInitParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *MemcachedLayerLoadBasedAutoScalingInitParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type MemcachedLayerLoadBasedAutoScalingDownscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type MemcachedLayerLoadBasedAutoScalingDownscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type MemcachedLayerLoadBasedAutoScalingDownscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type MemcachedLayerLoadBasedAutoScalingInitParameters struct { + Downscaling *MemcachedLayerLoadBasedAutoScalingDownscalingInitParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *MemcachedLayerLoadBasedAutoScalingUpscalingInitParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type MemcachedLayerLoadBasedAutoScalingObservation struct { + Downscaling *MemcachedLayerLoadBasedAutoScalingDownscalingObservation `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *MemcachedLayerLoadBasedAutoScalingUpscalingObservation `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type MemcachedLayerLoadBasedAutoScalingParameters struct { + + // +kubebuilder:validation:Optional + Downscaling *MemcachedLayerLoadBasedAutoScalingDownscalingParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // +kubebuilder:validation:Optional + Upscaling *MemcachedLayerLoadBasedAutoScalingUpscalingParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type MemcachedLayerLoadBasedAutoScalingUpscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type MemcachedLayerLoadBasedAutoScalingUpscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type MemcachedLayerLoadBasedAutoScalingUpscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type MemcachedLayerObservation struct { + + // Amount of memory to allocate for the cache on each instance, in megabytes. Defaults to 512MB. + AllocatedMemory *float64 `json:"allocatedMemory,omitempty" tf:"allocated_memory,omitempty"` + + // The Amazon Resource Name(ARN) of the layer. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *MemcachedLayerCloudwatchConfigurationObservation `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // Ids for a set of security groups to apply to the layer's instances. + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []MemcachedLayerEBSVolumeObservation `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // The id of the layer. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *MemcachedLayerLoadBasedAutoScalingObservation `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type MemcachedLayerParameters struct { + + // Amount of memory to allocate for the cache on each instance, in megabytes. Defaults to 512MB. + // +kubebuilder:validation:Optional + AllocatedMemory *float64 `json:"allocatedMemory,omitempty" tf:"allocated_memory,omitempty"` + + // Whether to automatically assign an elastic IP address to the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + // +kubebuilder:validation:Optional + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + // +kubebuilder:validation:Optional + CloudwatchConfiguration *MemcachedLayerCloudwatchConfigurationParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + // +kubebuilder:validation:Optional + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + // +kubebuilder:validation:Optional + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + // +kubebuilder:validation:Optional + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + // +kubebuilder:validation:Optional + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + // +kubebuilder:validation:Optional + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + // +kubebuilder:validation:Optional + EBSVolume []MemcachedLayerEBSVolumeParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + // +kubebuilder:validation:Optional + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + // +kubebuilder:validation:Optional + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + // +kubebuilder:validation:Optional + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + // +kubebuilder:validation:Optional + LoadBasedAutoScaling *MemcachedLayerLoadBasedAutoScalingParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +kubebuilder:validation:Optional + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + // +kubebuilder:validation:Optional + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +// MemcachedLayerSpec defines the desired state of MemcachedLayer +type MemcachedLayerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MemcachedLayerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MemcachedLayerInitParameters `json:"initProvider,omitempty"` +} + +// MemcachedLayerStatus defines the observed state of MemcachedLayer. +type MemcachedLayerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MemcachedLayerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MemcachedLayer is the Schema for the MemcachedLayers API. Provides an OpsWorks memcached layer resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type MemcachedLayer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec MemcachedLayerSpec `json:"spec"` + Status MemcachedLayerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MemcachedLayerList contains a list of MemcachedLayers +type MemcachedLayerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MemcachedLayer `json:"items"` +} + +// Repository type metadata. +var ( + MemcachedLayer_Kind = "MemcachedLayer" + MemcachedLayer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MemcachedLayer_Kind}.String() + MemcachedLayer_KindAPIVersion = MemcachedLayer_Kind + "." + CRDGroupVersion.String() + MemcachedLayer_GroupVersionKind = CRDGroupVersion.WithKind(MemcachedLayer_Kind) +) + +func init() { + SchemeBuilder.Register(&MemcachedLayer{}, &MemcachedLayerList{}) +} diff --git a/apis/opsworks/v1beta2/zz_mysqllayer_terraformed.go b/apis/opsworks/v1beta2/zz_mysqllayer_terraformed.go new file mode 100755 index 0000000000..e579122deb --- /dev/null +++ b/apis/opsworks/v1beta2/zz_mysqllayer_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MySQLLayer +func (mg *MySQLLayer) GetTerraformResourceType() string { + return "aws_opsworks_mysql_layer" +} + +// GetConnectionDetailsMapping for this MySQLLayer +func (tr *MySQLLayer) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MySQLLayer +func (tr *MySQLLayer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MySQLLayer +func (tr *MySQLLayer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MySQLLayer +func (tr *MySQLLayer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MySQLLayer +func (tr *MySQLLayer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MySQLLayer +func (tr *MySQLLayer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MySQLLayer +func (tr *MySQLLayer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MySQLLayer +func (tr *MySQLLayer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MySQLLayer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MySQLLayer) LateInitialize(attrs []byte) (bool, error) { + params := &MySQLLayerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MySQLLayer) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opsworks/v1beta2/zz_mysqllayer_types.go b/apis/opsworks/v1beta2/zz_mysqllayer_types.go new file mode 100755 index 0000000000..e0ac5dd78e --- /dev/null +++ b/apis/opsworks/v1beta2/zz_mysqllayer_types.go @@ -0,0 +1,695 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MySQLLayerCloudwatchConfigurationInitParameters struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []MySQLLayerCloudwatchConfigurationLogStreamsInitParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type MySQLLayerCloudwatchConfigurationLogStreamsInitParameters struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type MySQLLayerCloudwatchConfigurationLogStreamsObservation struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type MySQLLayerCloudwatchConfigurationLogStreamsParameters struct { + + // +kubebuilder:validation:Optional + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + // +kubebuilder:validation:Optional + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // +kubebuilder:validation:Optional + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + // +kubebuilder:validation:Optional + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // +kubebuilder:validation:Optional + File *string `json:"file" tf:"file,omitempty"` + + // +kubebuilder:validation:Optional + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + // +kubebuilder:validation:Optional + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName" tf:"log_group_name,omitempty"` + + // +kubebuilder:validation:Optional + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type MySQLLayerCloudwatchConfigurationObservation struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []MySQLLayerCloudwatchConfigurationLogStreamsObservation `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type MySQLLayerCloudwatchConfigurationParameters struct { + + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // +kubebuilder:validation:Optional + LogStreams []MySQLLayerCloudwatchConfigurationLogStreamsParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type MySQLLayerEBSVolumeInitParameters struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type MySQLLayerEBSVolumeObservation struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type MySQLLayerEBSVolumeParameters struct { + + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + // +kubebuilder:validation:Optional + MountPoint *string `json:"mountPoint" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + // +kubebuilder:validation:Optional + NumberOfDisks *float64 `json:"numberOfDisks" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + // +kubebuilder:validation:Optional + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + // +kubebuilder:validation:Optional + Size *float64 `json:"size" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type MySQLLayerInitParameters struct { + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *MySQLLayerCloudwatchConfigurationInitParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []MySQLLayerEBSVolumeInitParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *MySQLLayerLoadBasedAutoScalingInitParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Root password to use for MySQL. + RootPassword *string `json:"rootPassword,omitempty" tf:"root_password,omitempty"` + + // Whether to set the root user password to all instances in the stack so they can access the instances in this layer. + RootPasswordOnAllInstances *bool `json:"rootPasswordOnAllInstances,omitempty" tf:"root_password_on_all_instances,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type MySQLLayerLoadBasedAutoScalingDownscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type MySQLLayerLoadBasedAutoScalingDownscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type MySQLLayerLoadBasedAutoScalingDownscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type MySQLLayerLoadBasedAutoScalingInitParameters struct { + Downscaling *MySQLLayerLoadBasedAutoScalingDownscalingInitParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *MySQLLayerLoadBasedAutoScalingUpscalingInitParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type MySQLLayerLoadBasedAutoScalingObservation struct { + Downscaling *MySQLLayerLoadBasedAutoScalingDownscalingObservation `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *MySQLLayerLoadBasedAutoScalingUpscalingObservation `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type MySQLLayerLoadBasedAutoScalingParameters struct { + + // +kubebuilder:validation:Optional + Downscaling *MySQLLayerLoadBasedAutoScalingDownscalingParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // +kubebuilder:validation:Optional + Upscaling *MySQLLayerLoadBasedAutoScalingUpscalingParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type MySQLLayerLoadBasedAutoScalingUpscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type MySQLLayerLoadBasedAutoScalingUpscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type MySQLLayerLoadBasedAutoScalingUpscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type MySQLLayerObservation struct { + + // The Amazon Resource Name(ARN) of the layer. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *MySQLLayerCloudwatchConfigurationObservation `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // Ids for a set of security groups to apply to the layer's instances. + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []MySQLLayerEBSVolumeObservation `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // The id of the layer. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *MySQLLayerLoadBasedAutoScalingObservation `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Root password to use for MySQL. + RootPassword *string `json:"rootPassword,omitempty" tf:"root_password,omitempty"` + + // Whether to set the root user password to all instances in the stack so they can access the instances in this layer. + RootPasswordOnAllInstances *bool `json:"rootPasswordOnAllInstances,omitempty" tf:"root_password_on_all_instances,omitempty"` + + // ID of the stack the layer will belong to. + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type MySQLLayerParameters struct { + + // Whether to automatically assign an elastic IP address to the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + // +kubebuilder:validation:Optional + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + // +kubebuilder:validation:Optional + CloudwatchConfiguration *MySQLLayerCloudwatchConfigurationParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + // +kubebuilder:validation:Optional + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + // +kubebuilder:validation:Optional + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + // +kubebuilder:validation:Optional + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + // +kubebuilder:validation:Optional + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + // +kubebuilder:validation:Optional + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + // +kubebuilder:validation:Optional + EBSVolume []MySQLLayerEBSVolumeParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + // +kubebuilder:validation:Optional + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + // +kubebuilder:validation:Optional + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + // +kubebuilder:validation:Optional + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + // +kubebuilder:validation:Optional + LoadBasedAutoScaling *MySQLLayerLoadBasedAutoScalingParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Root password to use for MySQL. + // +kubebuilder:validation:Optional + RootPassword *string `json:"rootPassword,omitempty" tf:"root_password,omitempty"` + + // Whether to set the root user password to all instances in the stack so they can access the instances in this layer. + // +kubebuilder:validation:Optional + RootPasswordOnAllInstances *bool `json:"rootPasswordOnAllInstances,omitempty" tf:"root_password_on_all_instances,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +kubebuilder:validation:Optional + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + // +kubebuilder:validation:Optional + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +// MySQLLayerSpec defines the desired state of MySQLLayer +type MySQLLayerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MySQLLayerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MySQLLayerInitParameters `json:"initProvider,omitempty"` +} + +// MySQLLayerStatus defines the observed state of MySQLLayer. +type MySQLLayerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MySQLLayerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MySQLLayer is the Schema for the MySQLLayers API. Provides an OpsWorks MySQL layer resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type MySQLLayer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec MySQLLayerSpec `json:"spec"` + Status MySQLLayerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MySQLLayerList contains a list of MySQLLayers +type MySQLLayerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MySQLLayer `json:"items"` +} + +// Repository type metadata. +var ( + MySQLLayer_Kind = "MySQLLayer" + MySQLLayer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MySQLLayer_Kind}.String() + MySQLLayer_KindAPIVersion = MySQLLayer_Kind + "." + CRDGroupVersion.String() + MySQLLayer_GroupVersionKind = CRDGroupVersion.WithKind(MySQLLayer_Kind) +) + +func init() { + SchemeBuilder.Register(&MySQLLayer{}, &MySQLLayerList{}) +} diff --git a/apis/opsworks/v1beta2/zz_nodejsapplayer_terraformed.go b/apis/opsworks/v1beta2/zz_nodejsapplayer_terraformed.go new file mode 100755 index 0000000000..da2d6c23b8 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_nodejsapplayer_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this NodeJSAppLayer +func (mg *NodeJSAppLayer) GetTerraformResourceType() string { + return "aws_opsworks_nodejs_app_layer" +} + +// GetConnectionDetailsMapping for this NodeJSAppLayer +func (tr *NodeJSAppLayer) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this NodeJSAppLayer +func (tr *NodeJSAppLayer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this NodeJSAppLayer +func (tr *NodeJSAppLayer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this NodeJSAppLayer +func (tr *NodeJSAppLayer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this NodeJSAppLayer +func (tr *NodeJSAppLayer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this NodeJSAppLayer +func (tr *NodeJSAppLayer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this NodeJSAppLayer +func (tr *NodeJSAppLayer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this NodeJSAppLayer +func (tr *NodeJSAppLayer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this NodeJSAppLayer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *NodeJSAppLayer) LateInitialize(attrs []byte) (bool, error) { + params := &NodeJSAppLayerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *NodeJSAppLayer) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opsworks/v1beta2/zz_nodejsapplayer_types.go b/apis/opsworks/v1beta2/zz_nodejsapplayer_types.go new file mode 100755 index 0000000000..dd7847c150 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_nodejsapplayer_types.go @@ -0,0 +1,685 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type NodeJSAppLayerCloudwatchConfigurationInitParameters struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []NodeJSAppLayerCloudwatchConfigurationLogStreamsInitParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type NodeJSAppLayerCloudwatchConfigurationLogStreamsInitParameters struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type NodeJSAppLayerCloudwatchConfigurationLogStreamsObservation struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type NodeJSAppLayerCloudwatchConfigurationLogStreamsParameters struct { + + // +kubebuilder:validation:Optional + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + // +kubebuilder:validation:Optional + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // +kubebuilder:validation:Optional + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + // +kubebuilder:validation:Optional + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // +kubebuilder:validation:Optional + File *string `json:"file" tf:"file,omitempty"` + + // +kubebuilder:validation:Optional + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + // +kubebuilder:validation:Optional + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName" tf:"log_group_name,omitempty"` + + // +kubebuilder:validation:Optional + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type NodeJSAppLayerCloudwatchConfigurationObservation struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []NodeJSAppLayerCloudwatchConfigurationLogStreamsObservation `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type NodeJSAppLayerCloudwatchConfigurationParameters struct { + + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // +kubebuilder:validation:Optional + LogStreams []NodeJSAppLayerCloudwatchConfigurationLogStreamsParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type NodeJSAppLayerEBSVolumeInitParameters struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type NodeJSAppLayerEBSVolumeObservation struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type NodeJSAppLayerEBSVolumeParameters struct { + + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + // +kubebuilder:validation:Optional + MountPoint *string `json:"mountPoint" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + // +kubebuilder:validation:Optional + NumberOfDisks *float64 `json:"numberOfDisks" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + // +kubebuilder:validation:Optional + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + // +kubebuilder:validation:Optional + Size *float64 `json:"size" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type NodeJSAppLayerInitParameters struct { + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *NodeJSAppLayerCloudwatchConfigurationInitParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []NodeJSAppLayerEBSVolumeInitParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *NodeJSAppLayerLoadBasedAutoScalingInitParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The version of NodeJS to use. Defaults to "0.10.38". + NodeJSVersion *string `json:"nodejsVersion,omitempty" tf:"nodejs_version,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type NodeJSAppLayerLoadBasedAutoScalingDownscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type NodeJSAppLayerLoadBasedAutoScalingDownscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type NodeJSAppLayerLoadBasedAutoScalingDownscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type NodeJSAppLayerLoadBasedAutoScalingInitParameters struct { + Downscaling *NodeJSAppLayerLoadBasedAutoScalingDownscalingInitParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *NodeJSAppLayerLoadBasedAutoScalingUpscalingInitParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type NodeJSAppLayerLoadBasedAutoScalingObservation struct { + Downscaling *NodeJSAppLayerLoadBasedAutoScalingDownscalingObservation `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *NodeJSAppLayerLoadBasedAutoScalingUpscalingObservation `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type NodeJSAppLayerLoadBasedAutoScalingParameters struct { + + // +kubebuilder:validation:Optional + Downscaling *NodeJSAppLayerLoadBasedAutoScalingDownscalingParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // +kubebuilder:validation:Optional + Upscaling *NodeJSAppLayerLoadBasedAutoScalingUpscalingParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type NodeJSAppLayerLoadBasedAutoScalingUpscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type NodeJSAppLayerLoadBasedAutoScalingUpscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type NodeJSAppLayerLoadBasedAutoScalingUpscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type NodeJSAppLayerObservation struct { + + // The Amazon Resource Name(ARN) of the layer. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *NodeJSAppLayerCloudwatchConfigurationObservation `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // Ids for a set of security groups to apply to the layer's instances. + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []NodeJSAppLayerEBSVolumeObservation `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // The id of the layer. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *NodeJSAppLayerLoadBasedAutoScalingObservation `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The version of NodeJS to use. Defaults to "0.10.38". + NodeJSVersion *string `json:"nodejsVersion,omitempty" tf:"nodejs_version,omitempty"` + + // ID of the stack the layer will belong to. + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type NodeJSAppLayerParameters struct { + + // Whether to automatically assign an elastic IP address to the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + // +kubebuilder:validation:Optional + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + // +kubebuilder:validation:Optional + CloudwatchConfiguration *NodeJSAppLayerCloudwatchConfigurationParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + // +kubebuilder:validation:Optional + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + // +kubebuilder:validation:Optional + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + // +kubebuilder:validation:Optional + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + // +kubebuilder:validation:Optional + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + // +kubebuilder:validation:Optional + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + // +kubebuilder:validation:Optional + EBSVolume []NodeJSAppLayerEBSVolumeParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + // +kubebuilder:validation:Optional + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + // +kubebuilder:validation:Optional + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + // +kubebuilder:validation:Optional + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + // +kubebuilder:validation:Optional + LoadBasedAutoScaling *NodeJSAppLayerLoadBasedAutoScalingParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The version of NodeJS to use. Defaults to "0.10.38". + // +kubebuilder:validation:Optional + NodeJSVersion *string `json:"nodejsVersion,omitempty" tf:"nodejs_version,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +kubebuilder:validation:Optional + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + // +kubebuilder:validation:Optional + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +// NodeJSAppLayerSpec defines the desired state of NodeJSAppLayer +type NodeJSAppLayerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider NodeJSAppLayerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider NodeJSAppLayerInitParameters `json:"initProvider,omitempty"` +} + +// NodeJSAppLayerStatus defines the observed state of NodeJSAppLayer. +type NodeJSAppLayerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider NodeJSAppLayerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NodeJSAppLayer is the Schema for the NodeJSAppLayers API. Provides an OpsWorks NodeJS application layer resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type NodeJSAppLayer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec NodeJSAppLayerSpec `json:"spec"` + Status NodeJSAppLayerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NodeJSAppLayerList contains a list of NodeJSAppLayers +type NodeJSAppLayerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NodeJSAppLayer `json:"items"` +} + +// Repository type metadata. +var ( + NodeJSAppLayer_Kind = "NodeJSAppLayer" + NodeJSAppLayer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: NodeJSAppLayer_Kind}.String() + NodeJSAppLayer_KindAPIVersion = NodeJSAppLayer_Kind + "." + CRDGroupVersion.String() + NodeJSAppLayer_GroupVersionKind = CRDGroupVersion.WithKind(NodeJSAppLayer_Kind) +) + +func init() { + SchemeBuilder.Register(&NodeJSAppLayer{}, &NodeJSAppLayerList{}) +} diff --git a/apis/opsworks/v1beta2/zz_phpapplayer_terraformed.go b/apis/opsworks/v1beta2/zz_phpapplayer_terraformed.go new file mode 100755 index 0000000000..8d0c819003 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_phpapplayer_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this PHPAppLayer +func (mg *PHPAppLayer) GetTerraformResourceType() string { + return "aws_opsworks_php_app_layer" +} + +// GetConnectionDetailsMapping for this PHPAppLayer +func (tr *PHPAppLayer) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this PHPAppLayer +func (tr *PHPAppLayer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this PHPAppLayer +func (tr *PHPAppLayer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this PHPAppLayer +func (tr *PHPAppLayer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this PHPAppLayer +func (tr *PHPAppLayer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this PHPAppLayer +func (tr *PHPAppLayer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this PHPAppLayer +func (tr *PHPAppLayer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this PHPAppLayer +func (tr *PHPAppLayer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this PHPAppLayer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *PHPAppLayer) LateInitialize(attrs []byte) (bool, error) { + params := &PHPAppLayerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *PHPAppLayer) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opsworks/v1beta2/zz_phpapplayer_types.go b/apis/opsworks/v1beta2/zz_phpapplayer_types.go new file mode 100755 index 0000000000..c5fda480fd --- /dev/null +++ b/apis/opsworks/v1beta2/zz_phpapplayer_types.go @@ -0,0 +1,675 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type PHPAppLayerCloudwatchConfigurationInitParameters struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []PHPAppLayerCloudwatchConfigurationLogStreamsInitParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type PHPAppLayerCloudwatchConfigurationLogStreamsInitParameters struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type PHPAppLayerCloudwatchConfigurationLogStreamsObservation struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type PHPAppLayerCloudwatchConfigurationLogStreamsParameters struct { + + // +kubebuilder:validation:Optional + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + // +kubebuilder:validation:Optional + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // +kubebuilder:validation:Optional + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + // +kubebuilder:validation:Optional + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // +kubebuilder:validation:Optional + File *string `json:"file" tf:"file,omitempty"` + + // +kubebuilder:validation:Optional + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + // +kubebuilder:validation:Optional + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName" tf:"log_group_name,omitempty"` + + // +kubebuilder:validation:Optional + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type PHPAppLayerCloudwatchConfigurationObservation struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []PHPAppLayerCloudwatchConfigurationLogStreamsObservation `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type PHPAppLayerCloudwatchConfigurationParameters struct { + + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // +kubebuilder:validation:Optional + LogStreams []PHPAppLayerCloudwatchConfigurationLogStreamsParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type PHPAppLayerEBSVolumeInitParameters struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PHPAppLayerEBSVolumeObservation struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PHPAppLayerEBSVolumeParameters struct { + + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + // +kubebuilder:validation:Optional + MountPoint *string `json:"mountPoint" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + // +kubebuilder:validation:Optional + NumberOfDisks *float64 `json:"numberOfDisks" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + // +kubebuilder:validation:Optional + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + // +kubebuilder:validation:Optional + Size *float64 `json:"size" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PHPAppLayerInitParameters struct { + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *PHPAppLayerCloudwatchConfigurationInitParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []PHPAppLayerEBSVolumeInitParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *PHPAppLayerLoadBasedAutoScalingInitParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type PHPAppLayerLoadBasedAutoScalingDownscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type PHPAppLayerLoadBasedAutoScalingDownscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type PHPAppLayerLoadBasedAutoScalingDownscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type PHPAppLayerLoadBasedAutoScalingInitParameters struct { + Downscaling *PHPAppLayerLoadBasedAutoScalingDownscalingInitParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *PHPAppLayerLoadBasedAutoScalingUpscalingInitParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type PHPAppLayerLoadBasedAutoScalingObservation struct { + Downscaling *PHPAppLayerLoadBasedAutoScalingDownscalingObservation `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *PHPAppLayerLoadBasedAutoScalingUpscalingObservation `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type PHPAppLayerLoadBasedAutoScalingParameters struct { + + // +kubebuilder:validation:Optional + Downscaling *PHPAppLayerLoadBasedAutoScalingDownscalingParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // +kubebuilder:validation:Optional + Upscaling *PHPAppLayerLoadBasedAutoScalingUpscalingParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type PHPAppLayerLoadBasedAutoScalingUpscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type PHPAppLayerLoadBasedAutoScalingUpscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type PHPAppLayerLoadBasedAutoScalingUpscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type PHPAppLayerObservation struct { + + // The Amazon Resource Name(ARN) of the layer. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *PHPAppLayerCloudwatchConfigurationObservation `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // Ids for a set of security groups to apply to the layer's instances. + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []PHPAppLayerEBSVolumeObservation `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // The id of the layer. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *PHPAppLayerLoadBasedAutoScalingObservation `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type PHPAppLayerParameters struct { + + // Whether to automatically assign an elastic IP address to the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + // +kubebuilder:validation:Optional + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + // +kubebuilder:validation:Optional + CloudwatchConfiguration *PHPAppLayerCloudwatchConfigurationParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + // +kubebuilder:validation:Optional + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + // +kubebuilder:validation:Optional + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + // +kubebuilder:validation:Optional + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + // +kubebuilder:validation:Optional + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + // +kubebuilder:validation:Optional + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + // +kubebuilder:validation:Optional + EBSVolume []PHPAppLayerEBSVolumeParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + // +kubebuilder:validation:Optional + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + // +kubebuilder:validation:Optional + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + // +kubebuilder:validation:Optional + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + // +kubebuilder:validation:Optional + LoadBasedAutoScaling *PHPAppLayerLoadBasedAutoScalingParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +kubebuilder:validation:Optional + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + // +kubebuilder:validation:Optional + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +// PHPAppLayerSpec defines the desired state of PHPAppLayer +type PHPAppLayerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PHPAppLayerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PHPAppLayerInitParameters `json:"initProvider,omitempty"` +} + +// PHPAppLayerStatus defines the observed state of PHPAppLayer. +type PHPAppLayerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PHPAppLayerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// PHPAppLayer is the Schema for the PHPAppLayers API. Provides an OpsWorks PHP application layer resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type PHPAppLayer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec PHPAppLayerSpec `json:"spec"` + Status PHPAppLayerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PHPAppLayerList contains a list of PHPAppLayers +type PHPAppLayerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PHPAppLayer `json:"items"` +} + +// Repository type metadata. +var ( + PHPAppLayer_Kind = "PHPAppLayer" + PHPAppLayer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: PHPAppLayer_Kind}.String() + PHPAppLayer_KindAPIVersion = PHPAppLayer_Kind + "." + CRDGroupVersion.String() + PHPAppLayer_GroupVersionKind = CRDGroupVersion.WithKind(PHPAppLayer_Kind) +) + +func init() { + SchemeBuilder.Register(&PHPAppLayer{}, &PHPAppLayerList{}) +} diff --git a/apis/opsworks/v1beta2/zz_railsapplayer_terraformed.go b/apis/opsworks/v1beta2/zz_railsapplayer_terraformed.go new file mode 100755 index 0000000000..0b35c7134b --- /dev/null +++ b/apis/opsworks/v1beta2/zz_railsapplayer_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this RailsAppLayer +func (mg *RailsAppLayer) GetTerraformResourceType() string { + return "aws_opsworks_rails_app_layer" +} + +// GetConnectionDetailsMapping for this RailsAppLayer +func (tr *RailsAppLayer) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this RailsAppLayer +func (tr *RailsAppLayer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this RailsAppLayer +func (tr *RailsAppLayer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this RailsAppLayer +func (tr *RailsAppLayer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this RailsAppLayer +func (tr *RailsAppLayer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this RailsAppLayer +func (tr *RailsAppLayer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this RailsAppLayer +func (tr *RailsAppLayer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this RailsAppLayer +func (tr *RailsAppLayer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this RailsAppLayer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *RailsAppLayer) LateInitialize(attrs []byte) (bool, error) { + params := &RailsAppLayerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *RailsAppLayer) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opsworks/v1beta2/zz_railsapplayer_types.go b/apis/opsworks/v1beta2/zz_railsapplayer_types.go new file mode 100755 index 0000000000..d20123b161 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_railsapplayer_types.go @@ -0,0 +1,735 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type RailsAppLayerCloudwatchConfigurationInitParameters struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []RailsAppLayerCloudwatchConfigurationLogStreamsInitParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type RailsAppLayerCloudwatchConfigurationLogStreamsInitParameters struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type RailsAppLayerCloudwatchConfigurationLogStreamsObservation struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type RailsAppLayerCloudwatchConfigurationLogStreamsParameters struct { + + // +kubebuilder:validation:Optional + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + // +kubebuilder:validation:Optional + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // +kubebuilder:validation:Optional + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + // +kubebuilder:validation:Optional + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // +kubebuilder:validation:Optional + File *string `json:"file" tf:"file,omitempty"` + + // +kubebuilder:validation:Optional + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + // +kubebuilder:validation:Optional + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName" tf:"log_group_name,omitempty"` + + // +kubebuilder:validation:Optional + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type RailsAppLayerCloudwatchConfigurationObservation struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []RailsAppLayerCloudwatchConfigurationLogStreamsObservation `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type RailsAppLayerCloudwatchConfigurationParameters struct { + + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // +kubebuilder:validation:Optional + LogStreams []RailsAppLayerCloudwatchConfigurationLogStreamsParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type RailsAppLayerEBSVolumeInitParameters struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RailsAppLayerEBSVolumeObservation struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RailsAppLayerEBSVolumeParameters struct { + + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + // +kubebuilder:validation:Optional + MountPoint *string `json:"mountPoint" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + // +kubebuilder:validation:Optional + NumberOfDisks *float64 `json:"numberOfDisks" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + // +kubebuilder:validation:Optional + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + // +kubebuilder:validation:Optional + Size *float64 `json:"size" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RailsAppLayerInitParameters struct { + + // Keyword for the app server to use. Defaults to "apache_passenger". + AppServer *string `json:"appServer,omitempty" tf:"app_server,omitempty"` + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + // When OpsWorks is managing Bundler, which version to use. Defaults to "1.5.3". + BundlerVersion *string `json:"bundlerVersion,omitempty" tf:"bundler_version,omitempty"` + + CloudwatchConfiguration *RailsAppLayerCloudwatchConfigurationInitParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []RailsAppLayerEBSVolumeInitParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *RailsAppLayerLoadBasedAutoScalingInitParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // Whether OpsWorks should manage bundler. On by default. + ManageBundler *bool `json:"manageBundler,omitempty" tf:"manage_bundler,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The version of Passenger to use. Defaults to "4.0.46". + PassengerVersion *string `json:"passengerVersion,omitempty" tf:"passenger_version,omitempty"` + + // The version of Ruby to use. Defaults to "2.0.0". + RubyVersion *string `json:"rubyVersion,omitempty" tf:"ruby_version,omitempty"` + + // The version of RubyGems to use. Defaults to "2.2.2". + RubygemsVersion *string `json:"rubygemsVersion,omitempty" tf:"rubygems_version,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type RailsAppLayerLoadBasedAutoScalingDownscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type RailsAppLayerLoadBasedAutoScalingDownscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type RailsAppLayerLoadBasedAutoScalingDownscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type RailsAppLayerLoadBasedAutoScalingInitParameters struct { + Downscaling *RailsAppLayerLoadBasedAutoScalingDownscalingInitParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *RailsAppLayerLoadBasedAutoScalingUpscalingInitParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type RailsAppLayerLoadBasedAutoScalingObservation struct { + Downscaling *RailsAppLayerLoadBasedAutoScalingDownscalingObservation `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *RailsAppLayerLoadBasedAutoScalingUpscalingObservation `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type RailsAppLayerLoadBasedAutoScalingParameters struct { + + // +kubebuilder:validation:Optional + Downscaling *RailsAppLayerLoadBasedAutoScalingDownscalingParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // +kubebuilder:validation:Optional + Upscaling *RailsAppLayerLoadBasedAutoScalingUpscalingParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type RailsAppLayerLoadBasedAutoScalingUpscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type RailsAppLayerLoadBasedAutoScalingUpscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type RailsAppLayerLoadBasedAutoScalingUpscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type RailsAppLayerObservation struct { + + // Keyword for the app server to use. Defaults to "apache_passenger". + AppServer *string `json:"appServer,omitempty" tf:"app_server,omitempty"` + + // The Amazon Resource Name(ARN) of the layer. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + // When OpsWorks is managing Bundler, which version to use. Defaults to "1.5.3". + BundlerVersion *string `json:"bundlerVersion,omitempty" tf:"bundler_version,omitempty"` + + CloudwatchConfiguration *RailsAppLayerCloudwatchConfigurationObservation `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // Ids for a set of security groups to apply to the layer's instances. + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []RailsAppLayerEBSVolumeObservation `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // The id of the layer. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *RailsAppLayerLoadBasedAutoScalingObservation `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // Whether OpsWorks should manage bundler. On by default. + ManageBundler *bool `json:"manageBundler,omitempty" tf:"manage_bundler,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The version of Passenger to use. Defaults to "4.0.46". + PassengerVersion *string `json:"passengerVersion,omitempty" tf:"passenger_version,omitempty"` + + // The version of Ruby to use. Defaults to "2.0.0". + RubyVersion *string `json:"rubyVersion,omitempty" tf:"ruby_version,omitempty"` + + // The version of RubyGems to use. Defaults to "2.2.2". + RubygemsVersion *string `json:"rubygemsVersion,omitempty" tf:"rubygems_version,omitempty"` + + // ID of the stack the layer will belong to. + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type RailsAppLayerParameters struct { + + // Keyword for the app server to use. Defaults to "apache_passenger". + // +kubebuilder:validation:Optional + AppServer *string `json:"appServer,omitempty" tf:"app_server,omitempty"` + + // Whether to automatically assign an elastic IP address to the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + // +kubebuilder:validation:Optional + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + // When OpsWorks is managing Bundler, which version to use. Defaults to "1.5.3". + // +kubebuilder:validation:Optional + BundlerVersion *string `json:"bundlerVersion,omitempty" tf:"bundler_version,omitempty"` + + // +kubebuilder:validation:Optional + CloudwatchConfiguration *RailsAppLayerCloudwatchConfigurationParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + // +kubebuilder:validation:Optional + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + // +kubebuilder:validation:Optional + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // Custom JSON attributes to apply to the layer. + // +kubebuilder:validation:Optional + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + // +kubebuilder:validation:Optional + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + // +kubebuilder:validation:Optional + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + // +kubebuilder:validation:Optional + EBSVolume []RailsAppLayerEBSVolumeParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + // +kubebuilder:validation:Optional + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + // +kubebuilder:validation:Optional + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + // +kubebuilder:validation:Optional + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + // +kubebuilder:validation:Optional + LoadBasedAutoScaling *RailsAppLayerLoadBasedAutoScalingParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // Whether OpsWorks should manage bundler. On by default. + // +kubebuilder:validation:Optional + ManageBundler *bool `json:"manageBundler,omitempty" tf:"manage_bundler,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The version of Passenger to use. Defaults to "4.0.46". + // +kubebuilder:validation:Optional + PassengerVersion *string `json:"passengerVersion,omitempty" tf:"passenger_version,omitempty"` + + // The version of Ruby to use. Defaults to "2.0.0". + // +kubebuilder:validation:Optional + RubyVersion *string `json:"rubyVersion,omitempty" tf:"ruby_version,omitempty"` + + // The version of RubyGems to use. Defaults to "2.2.2". + // +kubebuilder:validation:Optional + RubygemsVersion *string `json:"rubygemsVersion,omitempty" tf:"rubygems_version,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +kubebuilder:validation:Optional + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + // +kubebuilder:validation:Optional + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +// RailsAppLayerSpec defines the desired state of RailsAppLayer +type RailsAppLayerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RailsAppLayerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RailsAppLayerInitParameters `json:"initProvider,omitempty"` +} + +// RailsAppLayerStatus defines the observed state of RailsAppLayer. +type RailsAppLayerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RailsAppLayerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// RailsAppLayer is the Schema for the RailsAppLayers API. Provides an OpsWorks Ruby on Rails application layer resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type RailsAppLayer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec RailsAppLayerSpec `json:"spec"` + Status RailsAppLayerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RailsAppLayerList contains a list of RailsAppLayers +type RailsAppLayerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RailsAppLayer `json:"items"` +} + +// Repository type metadata. +var ( + RailsAppLayer_Kind = "RailsAppLayer" + RailsAppLayer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: RailsAppLayer_Kind}.String() + RailsAppLayer_KindAPIVersion = RailsAppLayer_Kind + "." + CRDGroupVersion.String() + RailsAppLayer_GroupVersionKind = CRDGroupVersion.WithKind(RailsAppLayer_Kind) +) + +func init() { + SchemeBuilder.Register(&RailsAppLayer{}, &RailsAppLayerList{}) +} diff --git a/apis/opsworks/v1beta2/zz_stack_terraformed.go b/apis/opsworks/v1beta2/zz_stack_terraformed.go new file mode 100755 index 0000000000..4de58cd825 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_stack_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Stack +func (mg *Stack) GetTerraformResourceType() string { + return "aws_opsworks_stack" +} + +// GetConnectionDetailsMapping for this Stack +func (tr *Stack) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"custom_cookbooks_source[*].password": "customCookbooksSource[*].passwordSecretRef", "custom_cookbooks_source[*].ssh_key": "customCookbooksSource[*].sshKeySecretRef"} +} + +// GetObservation of this Stack +func (tr *Stack) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Stack +func (tr *Stack) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Stack +func (tr *Stack) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Stack +func (tr *Stack) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Stack +func (tr *Stack) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Stack +func (tr *Stack) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Stack +func (tr *Stack) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Stack using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Stack) LateInitialize(attrs []byte) (bool, error) { + params := &StackParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Stack) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opsworks/v1beta2/zz_stack_types.go b/apis/opsworks/v1beta2/zz_stack_types.go new file mode 100755 index 0000000000..4683543387 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_stack_types.go @@ -0,0 +1,463 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomCookbooksSourceInitParameters struct { + + // Password to use when authenticating to the source. + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // For sources that are version-aware, the revision to use. + Revision *string `json:"revision,omitempty" tf:"revision,omitempty"` + + // SSH key to use when authenticating to the source. + SSHKeySecretRef *v1.SecretKeySelector `json:"sshKeySecretRef,omitempty" tf:"-"` + + // The type of source to use. For example, "archive". + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The URL where the cookbooks resource can be found. + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // Username to use when authenticating to the source. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type CustomCookbooksSourceObservation struct { + + // For sources that are version-aware, the revision to use. + Revision *string `json:"revision,omitempty" tf:"revision,omitempty"` + + // The type of source to use. For example, "archive". + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The URL where the cookbooks resource can be found. + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // Username to use when authenticating to the source. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type CustomCookbooksSourceParameters struct { + + // Password to use when authenticating to the source. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // For sources that are version-aware, the revision to use. + // +kubebuilder:validation:Optional + Revision *string `json:"revision,omitempty" tf:"revision,omitempty"` + + // SSH key to use when authenticating to the source. + // +kubebuilder:validation:Optional + SSHKeySecretRef *v1.SecretKeySelector `json:"sshKeySecretRef,omitempty" tf:"-"` + + // The type of source to use. For example, "archive". + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The URL where the cookbooks resource can be found. + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` + + // Username to use when authenticating to the source. + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type StackInitParameters struct { + + // If set to "LATEST", OpsWorks will automatically install the latest version. + AgentVersion *string `json:"agentVersion,omitempty" tf:"agent_version,omitempty"` + + // If manage_berkshelf is enabled, the version of Berkshelf to use. + BerkshelfVersion *string `json:"berkshelfVersion,omitempty" tf:"berkshelf_version,omitempty"` + + // Color to paint next to the stack's resources in the OpsWorks console. + Color *string `json:"color,omitempty" tf:"color,omitempty"` + + // Name of the configuration manager to use. Defaults to "Chef". + ConfigurationManagerName *string `json:"configurationManagerName,omitempty" tf:"configuration_manager_name,omitempty"` + + // Version of the configuration manager to use. Defaults to "11.4". + ConfigurationManagerVersion *string `json:"configurationManagerVersion,omitempty" tf:"configuration_manager_version,omitempty"` + + // When use_custom_cookbooks is set, provide this sub-object as described below. + CustomCookbooksSource *CustomCookbooksSourceInitParameters `json:"customCookbooksSource,omitempty" tf:"custom_cookbooks_source,omitempty"` + + // User defined JSON passed to "Chef". Use a "here doc" for multiline JSON. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // Name of the availability zone where instances will be created by default. + // Cannot be set when vpc_id is set. + DefaultAvailabilityZone *string `json:"defaultAvailabilityZone,omitempty" tf:"default_availability_zone,omitempty"` + + // The ARN of an IAM Instance Profile that created instances will have by default. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.InstanceProfile + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + DefaultInstanceProfileArn *string `json:"defaultInstanceProfileArn,omitempty" tf:"default_instance_profile_arn,omitempty"` + + // Reference to a InstanceProfile in iam to populate defaultInstanceProfileArn. + // +kubebuilder:validation:Optional + DefaultInstanceProfileArnRef *v1.Reference `json:"defaultInstanceProfileArnRef,omitempty" tf:"-"` + + // Selector for a InstanceProfile in iam to populate defaultInstanceProfileArn. + // +kubebuilder:validation:Optional + DefaultInstanceProfileArnSelector *v1.Selector `json:"defaultInstanceProfileArnSelector,omitempty" tf:"-"` + + // Name of OS that will be installed on instances by default. + DefaultOs *string `json:"defaultOs,omitempty" tf:"default_os,omitempty"` + + // Name of the type of root device instances will have by default. + DefaultRootDeviceType *string `json:"defaultRootDeviceType,omitempty" tf:"default_root_device_type,omitempty"` + + // Name of the SSH keypair that instances will have by default. + DefaultSSHKeyName *string `json:"defaultSshKeyName,omitempty" tf:"default_ssh_key_name,omitempty"` + + // ID of the subnet in which instances will be created by default. + // Required if vpc_id is set to a VPC other than the default VPC, and forbidden if it isn't. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + DefaultSubnetID *string `json:"defaultSubnetId,omitempty" tf:"default_subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate defaultSubnetId. + // +kubebuilder:validation:Optional + DefaultSubnetIDRef *v1.Reference `json:"defaultSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate defaultSubnetId. + // +kubebuilder:validation:Optional + DefaultSubnetIDSelector *v1.Selector `json:"defaultSubnetIdSelector,omitempty" tf:"-"` + + // Keyword representing the naming scheme that will be used for instance hostnames within this stack. + HostnameTheme *string `json:"hostnameTheme,omitempty" tf:"hostname_theme,omitempty"` + + // Boolean value controlling whether Opsworks will run Berkshelf for this stack. + ManageBerkshelf *bool `json:"manageBerkshelf,omitempty" tf:"manage_berkshelf,omitempty"` + + // The name of the stack. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ARN of an IAM role that the OpsWorks service will act as. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + ServiceRoleArn *string `json:"serviceRoleArn,omitempty" tf:"service_role_arn,omitempty"` + + // Reference to a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnRef *v1.Reference `json:"serviceRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnSelector *v1.Selector `json:"serviceRoleArnSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Boolean value controlling whether the custom cookbook settings are enabled. + UseCustomCookbooks *bool `json:"useCustomCookbooks,omitempty" tf:"use_custom_cookbooks,omitempty"` + + // Boolean value controlling whether the standard OpsWorks security groups apply to created instances. + UseOpsworksSecurityGroups *bool `json:"useOpsworksSecurityGroups,omitempty" tf:"use_opsworks_security_groups,omitempty"` + + // ID of the VPC that this stack belongs to. + // Defaults to the region's default VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type StackObservation struct { + + // If set to "LATEST", OpsWorks will automatically install the latest version. + AgentVersion *string `json:"agentVersion,omitempty" tf:"agent_version,omitempty"` + + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // If manage_berkshelf is enabled, the version of Berkshelf to use. + BerkshelfVersion *string `json:"berkshelfVersion,omitempty" tf:"berkshelf_version,omitempty"` + + // Color to paint next to the stack's resources in the OpsWorks console. + Color *string `json:"color,omitempty" tf:"color,omitempty"` + + // Name of the configuration manager to use. Defaults to "Chef". + ConfigurationManagerName *string `json:"configurationManagerName,omitempty" tf:"configuration_manager_name,omitempty"` + + // Version of the configuration manager to use. Defaults to "11.4". + ConfigurationManagerVersion *string `json:"configurationManagerVersion,omitempty" tf:"configuration_manager_version,omitempty"` + + // When use_custom_cookbooks is set, provide this sub-object as described below. + CustomCookbooksSource *CustomCookbooksSourceObservation `json:"customCookbooksSource,omitempty" tf:"custom_cookbooks_source,omitempty"` + + // User defined JSON passed to "Chef". Use a "here doc" for multiline JSON. + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // Name of the availability zone where instances will be created by default. + // Cannot be set when vpc_id is set. + DefaultAvailabilityZone *string `json:"defaultAvailabilityZone,omitempty" tf:"default_availability_zone,omitempty"` + + // The ARN of an IAM Instance Profile that created instances will have by default. + DefaultInstanceProfileArn *string `json:"defaultInstanceProfileArn,omitempty" tf:"default_instance_profile_arn,omitempty"` + + // Name of OS that will be installed on instances by default. + DefaultOs *string `json:"defaultOs,omitempty" tf:"default_os,omitempty"` + + // Name of the type of root device instances will have by default. + DefaultRootDeviceType *string `json:"defaultRootDeviceType,omitempty" tf:"default_root_device_type,omitempty"` + + // Name of the SSH keypair that instances will have by default. + DefaultSSHKeyName *string `json:"defaultSshKeyName,omitempty" tf:"default_ssh_key_name,omitempty"` + + // ID of the subnet in which instances will be created by default. + // Required if vpc_id is set to a VPC other than the default VPC, and forbidden if it isn't. + DefaultSubnetID *string `json:"defaultSubnetId,omitempty" tf:"default_subnet_id,omitempty"` + + // Keyword representing the naming scheme that will be used for instance hostnames within this stack. + HostnameTheme *string `json:"hostnameTheme,omitempty" tf:"hostname_theme,omitempty"` + + // The id of the stack. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Boolean value controlling whether Opsworks will run Berkshelf for this stack. + ManageBerkshelf *bool `json:"manageBerkshelf,omitempty" tf:"manage_berkshelf,omitempty"` + + // The name of the stack. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the region where the stack will exist. + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // The ARN of an IAM role that the OpsWorks service will act as. + ServiceRoleArn *string `json:"serviceRoleArn,omitempty" tf:"service_role_arn,omitempty"` + + StackEndpoint *string `json:"stackEndpoint,omitempty" tf:"stack_endpoint,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Boolean value controlling whether the custom cookbook settings are enabled. + UseCustomCookbooks *bool `json:"useCustomCookbooks,omitempty" tf:"use_custom_cookbooks,omitempty"` + + // Boolean value controlling whether the standard OpsWorks security groups apply to created instances. + UseOpsworksSecurityGroups *bool `json:"useOpsworksSecurityGroups,omitempty" tf:"use_opsworks_security_groups,omitempty"` + + // ID of the VPC that this stack belongs to. + // Defaults to the region's default VPC. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type StackParameters struct { + + // If set to "LATEST", OpsWorks will automatically install the latest version. + // +kubebuilder:validation:Optional + AgentVersion *string `json:"agentVersion,omitempty" tf:"agent_version,omitempty"` + + // If manage_berkshelf is enabled, the version of Berkshelf to use. + // +kubebuilder:validation:Optional + BerkshelfVersion *string `json:"berkshelfVersion,omitempty" tf:"berkshelf_version,omitempty"` + + // Color to paint next to the stack's resources in the OpsWorks console. + // +kubebuilder:validation:Optional + Color *string `json:"color,omitempty" tf:"color,omitempty"` + + // Name of the configuration manager to use. Defaults to "Chef". + // +kubebuilder:validation:Optional + ConfigurationManagerName *string `json:"configurationManagerName,omitempty" tf:"configuration_manager_name,omitempty"` + + // Version of the configuration manager to use. Defaults to "11.4". + // +kubebuilder:validation:Optional + ConfigurationManagerVersion *string `json:"configurationManagerVersion,omitempty" tf:"configuration_manager_version,omitempty"` + + // When use_custom_cookbooks is set, provide this sub-object as described below. + // +kubebuilder:validation:Optional + CustomCookbooksSource *CustomCookbooksSourceParameters `json:"customCookbooksSource,omitempty" tf:"custom_cookbooks_source,omitempty"` + + // User defined JSON passed to "Chef". Use a "here doc" for multiline JSON. + // +kubebuilder:validation:Optional + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // Name of the availability zone where instances will be created by default. + // Cannot be set when vpc_id is set. + // +kubebuilder:validation:Optional + DefaultAvailabilityZone *string `json:"defaultAvailabilityZone,omitempty" tf:"default_availability_zone,omitempty"` + + // The ARN of an IAM Instance Profile that created instances will have by default. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.InstanceProfile + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + DefaultInstanceProfileArn *string `json:"defaultInstanceProfileArn,omitempty" tf:"default_instance_profile_arn,omitempty"` + + // Reference to a InstanceProfile in iam to populate defaultInstanceProfileArn. + // +kubebuilder:validation:Optional + DefaultInstanceProfileArnRef *v1.Reference `json:"defaultInstanceProfileArnRef,omitempty" tf:"-"` + + // Selector for a InstanceProfile in iam to populate defaultInstanceProfileArn. + // +kubebuilder:validation:Optional + DefaultInstanceProfileArnSelector *v1.Selector `json:"defaultInstanceProfileArnSelector,omitempty" tf:"-"` + + // Name of OS that will be installed on instances by default. + // +kubebuilder:validation:Optional + DefaultOs *string `json:"defaultOs,omitempty" tf:"default_os,omitempty"` + + // Name of the type of root device instances will have by default. + // +kubebuilder:validation:Optional + DefaultRootDeviceType *string `json:"defaultRootDeviceType,omitempty" tf:"default_root_device_type,omitempty"` + + // Name of the SSH keypair that instances will have by default. + // +kubebuilder:validation:Optional + DefaultSSHKeyName *string `json:"defaultSshKeyName,omitempty" tf:"default_ssh_key_name,omitempty"` + + // ID of the subnet in which instances will be created by default. + // Required if vpc_id is set to a VPC other than the default VPC, and forbidden if it isn't. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +kubebuilder:validation:Optional + DefaultSubnetID *string `json:"defaultSubnetId,omitempty" tf:"default_subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate defaultSubnetId. + // +kubebuilder:validation:Optional + DefaultSubnetIDRef *v1.Reference `json:"defaultSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate defaultSubnetId. + // +kubebuilder:validation:Optional + DefaultSubnetIDSelector *v1.Selector `json:"defaultSubnetIdSelector,omitempty" tf:"-"` + + // Keyword representing the naming scheme that will be used for instance hostnames within this stack. + // +kubebuilder:validation:Optional + HostnameTheme *string `json:"hostnameTheme,omitempty" tf:"hostname_theme,omitempty"` + + // Boolean value controlling whether Opsworks will run Berkshelf for this stack. + // +kubebuilder:validation:Optional + ManageBerkshelf *bool `json:"manageBerkshelf,omitempty" tf:"manage_berkshelf,omitempty"` + + // The name of the stack. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the region where the stack will exist. + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"region,omitempty"` + + // The ARN of an IAM role that the OpsWorks service will act as. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + ServiceRoleArn *string `json:"serviceRoleArn,omitempty" tf:"service_role_arn,omitempty"` + + // Reference to a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnRef *v1.Reference `json:"serviceRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnSelector *v1.Selector `json:"serviceRoleArnSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Boolean value controlling whether the custom cookbook settings are enabled. + // +kubebuilder:validation:Optional + UseCustomCookbooks *bool `json:"useCustomCookbooks,omitempty" tf:"use_custom_cookbooks,omitempty"` + + // Boolean value controlling whether the standard OpsWorks security groups apply to created instances. + // +kubebuilder:validation:Optional + UseOpsworksSecurityGroups *bool `json:"useOpsworksSecurityGroups,omitempty" tf:"use_opsworks_security_groups,omitempty"` + + // ID of the VPC that this stack belongs to. + // Defaults to the region's default VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +kubebuilder:validation:Optional + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +// StackSpec defines the desired state of Stack +type StackSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StackParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StackInitParameters `json:"initProvider,omitempty"` +} + +// StackStatus defines the observed state of Stack. +type StackStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StackObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Stack is the Schema for the Stacks API. Provides an OpsWorks stack resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Stack struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec StackSpec `json:"spec"` + Status StackStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StackList contains a list of Stacks +type StackList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Stack `json:"items"` +} + +// Repository type metadata. +var ( + Stack_Kind = "Stack" + Stack_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Stack_Kind}.String() + Stack_KindAPIVersion = Stack_Kind + "." + CRDGroupVersion.String() + Stack_GroupVersionKind = CRDGroupVersion.WithKind(Stack_Kind) +) + +func init() { + SchemeBuilder.Register(&Stack{}, &StackList{}) +} diff --git a/apis/opsworks/v1beta2/zz_staticweblayer_terraformed.go b/apis/opsworks/v1beta2/zz_staticweblayer_terraformed.go new file mode 100755 index 0000000000..31db89a081 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_staticweblayer_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this StaticWebLayer +func (mg *StaticWebLayer) GetTerraformResourceType() string { + return "aws_opsworks_static_web_layer" +} + +// GetConnectionDetailsMapping for this StaticWebLayer +func (tr *StaticWebLayer) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this StaticWebLayer +func (tr *StaticWebLayer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this StaticWebLayer +func (tr *StaticWebLayer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this StaticWebLayer +func (tr *StaticWebLayer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this StaticWebLayer +func (tr *StaticWebLayer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this StaticWebLayer +func (tr *StaticWebLayer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this StaticWebLayer +func (tr *StaticWebLayer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this StaticWebLayer +func (tr *StaticWebLayer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this StaticWebLayer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *StaticWebLayer) LateInitialize(attrs []byte) (bool, error) { + params := &StaticWebLayerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *StaticWebLayer) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/opsworks/v1beta2/zz_staticweblayer_types.go b/apis/opsworks/v1beta2/zz_staticweblayer_types.go new file mode 100755 index 0000000000..f36c4370d1 --- /dev/null +++ b/apis/opsworks/v1beta2/zz_staticweblayer_types.go @@ -0,0 +1,672 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type StaticWebLayerCloudwatchConfigurationInitParameters struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []StaticWebLayerCloudwatchConfigurationLogStreamsInitParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type StaticWebLayerCloudwatchConfigurationLogStreamsInitParameters struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type StaticWebLayerCloudwatchConfigurationLogStreamsObservation struct { + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + File *string `json:"file,omitempty" tf:"file,omitempty"` + + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + LogGroupName *string `json:"logGroupName,omitempty" tf:"log_group_name,omitempty"` + + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type StaticWebLayerCloudwatchConfigurationLogStreamsParameters struct { + + // +kubebuilder:validation:Optional + BatchCount *float64 `json:"batchCount,omitempty" tf:"batch_count,omitempty"` + + // +kubebuilder:validation:Optional + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // +kubebuilder:validation:Optional + BufferDuration *float64 `json:"bufferDuration,omitempty" tf:"buffer_duration,omitempty"` + + // +kubebuilder:validation:Optional + DatetimeFormat *string `json:"datetimeFormat,omitempty" tf:"datetime_format,omitempty"` + + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // +kubebuilder:validation:Optional + File *string `json:"file" tf:"file,omitempty"` + + // +kubebuilder:validation:Optional + FileFingerprintLines *string `json:"fileFingerprintLines,omitempty" tf:"file_fingerprint_lines,omitempty"` + + // +kubebuilder:validation:Optional + InitialPosition *string `json:"initialPosition,omitempty" tf:"initial_position,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + LogGroupName *string `json:"logGroupName" tf:"log_group_name,omitempty"` + + // +kubebuilder:validation:Optional + MultilineStartPattern *string `json:"multilineStartPattern,omitempty" tf:"multiline_start_pattern,omitempty"` + + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type StaticWebLayerCloudwatchConfigurationObservation struct { + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + LogStreams []StaticWebLayerCloudwatchConfigurationLogStreamsObservation `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type StaticWebLayerCloudwatchConfigurationParameters struct { + + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // +kubebuilder:validation:Optional + LogStreams []StaticWebLayerCloudwatchConfigurationLogStreamsParameters `json:"logStreams,omitempty" tf:"log_streams,omitempty"` +} + +type StaticWebLayerEBSVolumeInitParameters struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StaticWebLayerEBSVolumeObservation struct { + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + MountPoint *string `json:"mountPoint,omitempty" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + NumberOfDisks *float64 `json:"numberOfDisks,omitempty" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StaticWebLayerEBSVolumeParameters struct { + + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // For PIOPS volumes, the IOPS per disk. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The path to mount the EBS volume on the layer's instances. + // +kubebuilder:validation:Optional + MountPoint *string `json:"mountPoint" tf:"mount_point,omitempty"` + + // The number of disks to use for the EBS volume. + // +kubebuilder:validation:Optional + NumberOfDisks *float64 `json:"numberOfDisks" tf:"number_of_disks,omitempty"` + + // The RAID level to use for the volume. + // +kubebuilder:validation:Optional + RaidLevel *string `json:"raidLevel,omitempty" tf:"raid_level,omitempty"` + + // The size of the volume in gigabytes. + // +kubebuilder:validation:Optional + Size *float64 `json:"size" tf:"size,omitempty"` + + // The type of volume to create. This may be standard (the default), io1 or gp2. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StaticWebLayerInitParameters struct { + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *StaticWebLayerCloudwatchConfigurationInitParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []StaticWebLayerEBSVolumeInitParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *StaticWebLayerLoadBasedAutoScalingInitParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type StaticWebLayerLoadBasedAutoScalingDownscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type StaticWebLayerLoadBasedAutoScalingDownscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type StaticWebLayerLoadBasedAutoScalingDownscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type StaticWebLayerLoadBasedAutoScalingInitParameters struct { + Downscaling *StaticWebLayerLoadBasedAutoScalingDownscalingInitParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *StaticWebLayerLoadBasedAutoScalingUpscalingInitParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type StaticWebLayerLoadBasedAutoScalingObservation struct { + Downscaling *StaticWebLayerLoadBasedAutoScalingDownscalingObservation `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + Upscaling *StaticWebLayerLoadBasedAutoScalingUpscalingObservation `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type StaticWebLayerLoadBasedAutoScalingParameters struct { + + // +kubebuilder:validation:Optional + Downscaling *StaticWebLayerLoadBasedAutoScalingDownscalingParameters `json:"downscaling,omitempty" tf:"downscaling,omitempty"` + + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // +kubebuilder:validation:Optional + Upscaling *StaticWebLayerLoadBasedAutoScalingUpscalingParameters `json:"upscaling,omitempty" tf:"upscaling,omitempty"` +} + +type StaticWebLayerLoadBasedAutoScalingUpscalingInitParameters struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type StaticWebLayerLoadBasedAutoScalingUpscalingObservation struct { + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type StaticWebLayerLoadBasedAutoScalingUpscalingParameters struct { + + // +kubebuilder:validation:Optional + Alarms []*string `json:"alarms,omitempty" tf:"alarms,omitempty"` + + // +kubebuilder:validation:Optional + CPUThreshold *float64 `json:"cpuThreshold,omitempty" tf:"cpu_threshold,omitempty"` + + // +kubebuilder:validation:Optional + IgnoreMetricsTime *float64 `json:"ignoreMetricsTime,omitempty" tf:"ignore_metrics_time,omitempty"` + + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // +kubebuilder:validation:Optional + LoadThreshold *float64 `json:"loadThreshold,omitempty" tf:"load_threshold,omitempty"` + + // +kubebuilder:validation:Optional + MemoryThreshold *float64 `json:"memoryThreshold,omitempty" tf:"memory_threshold,omitempty"` + + // +kubebuilder:validation:Optional + ThresholdsWaitTime *float64 `json:"thresholdsWaitTime,omitempty" tf:"thresholds_wait_time,omitempty"` +} + +type StaticWebLayerObservation struct { + + // The Amazon Resource Name(ARN) of the layer. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Whether to automatically assign an elastic IP address to the layer's instances. + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + CloudwatchConfiguration *StaticWebLayerCloudwatchConfigurationObservation `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // Ids for a set of security groups to apply to the layer's instances. + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + EBSVolume []StaticWebLayerEBSVolumeObservation `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // The id of the layer. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + LoadBasedAutoScaling *StaticWebLayerLoadBasedAutoScalingObservation `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Names of a set of system packages to install on the layer's instances. + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Whether to use EBS-optimized instances. + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +type StaticWebLayerParameters struct { + + // Whether to automatically assign an elastic IP address to the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignElasticIps *bool `json:"autoAssignElasticIps,omitempty" tf:"auto_assign_elastic_ips,omitempty"` + + // For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. + // +kubebuilder:validation:Optional + AutoAssignPublicIps *bool `json:"autoAssignPublicIps,omitempty" tf:"auto_assign_public_ips,omitempty"` + + // Whether to enable auto-healing for the layer. + // +kubebuilder:validation:Optional + AutoHealing *bool `json:"autoHealing,omitempty" tf:"auto_healing,omitempty"` + + // +kubebuilder:validation:Optional + CloudwatchConfiguration *StaticWebLayerCloudwatchConfigurationParameters `json:"cloudwatchConfiguration,omitempty" tf:"cloudwatch_configuration,omitempty"` + + // +kubebuilder:validation:Optional + CustomConfigureRecipes []*string `json:"customConfigureRecipes,omitempty" tf:"custom_configure_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomDeployRecipes []*string `json:"customDeployRecipes,omitempty" tf:"custom_deploy_recipes,omitempty"` + + // The ARN of an IAM profile that will be used for the layer's instances. + // +kubebuilder:validation:Optional + CustomInstanceProfileArn *string `json:"customInstanceProfileArn,omitempty" tf:"custom_instance_profile_arn,omitempty"` + + // +kubebuilder:validation:Optional + CustomJSON *string `json:"customJson,omitempty" tf:"custom_json,omitempty"` + + // References to SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRefs []v1.Reference `json:"customSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate customSecurityGroupIds. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // Ids for a set of security groups to apply to the layer's instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=CustomSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=CustomSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + CustomSecurityGroupIds []*string `json:"customSecurityGroupIds,omitempty" tf:"custom_security_group_ids,omitempty"` + + // +kubebuilder:validation:Optional + CustomSetupRecipes []*string `json:"customSetupRecipes,omitempty" tf:"custom_setup_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomShutdownRecipes []*string `json:"customShutdownRecipes,omitempty" tf:"custom_shutdown_recipes,omitempty"` + + // +kubebuilder:validation:Optional + CustomUndeployRecipes []*string `json:"customUndeployRecipes,omitempty" tf:"custom_undeploy_recipes,omitempty"` + + // Whether to enable Elastic Load Balancing connection draining. + // +kubebuilder:validation:Optional + DrainELBOnShutdown *bool `json:"drainElbOnShutdown,omitempty" tf:"drain_elb_on_shutdown,omitempty"` + + // ebs_volume blocks, as described below, will each create an EBS volume and connect it to the layer's instances. + // +kubebuilder:validation:Optional + EBSVolume []StaticWebLayerEBSVolumeParameters `json:"ebsVolume,omitempty" tf:"ebs_volume,omitempty"` + + // Name of an Elastic Load Balancer to attach to this layer + // +kubebuilder:validation:Optional + ElasticLoadBalancer *string `json:"elasticLoadBalancer,omitempty" tf:"elastic_load_balancer,omitempty"` + + // Whether to install OS and package updates on each instance when it boots. + // +kubebuilder:validation:Optional + InstallUpdatesOnBoot *bool `json:"installUpdatesOnBoot,omitempty" tf:"install_updates_on_boot,omitempty"` + + // The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. + // +kubebuilder:validation:Optional + InstanceShutdownTimeout *float64 `json:"instanceShutdownTimeout,omitempty" tf:"instance_shutdown_timeout,omitempty"` + + // +kubebuilder:validation:Optional + LoadBasedAutoScaling *StaticWebLayerLoadBasedAutoScalingParameters `json:"loadBasedAutoScaling,omitempty" tf:"load_based_auto_scaling,omitempty"` + + // A human-readable name for the layer. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // ID of the stack the layer will belong to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/opsworks/v1beta2.Stack + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StackID *string `json:"stackId,omitempty" tf:"stack_id,omitempty"` + + // Reference to a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDRef *v1.Reference `json:"stackIdRef,omitempty" tf:"-"` + + // Selector for a Stack in opsworks to populate stackId. + // +kubebuilder:validation:Optional + StackIDSelector *v1.Selector `json:"stackIdSelector,omitempty" tf:"-"` + + // Names of a set of system packages to install on the layer's instances. + // +kubebuilder:validation:Optional + // +listType=set + SystemPackages []*string `json:"systemPackages,omitempty" tf:"system_packages,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether to use EBS-optimized instances. + // +kubebuilder:validation:Optional + UseEBSOptimizedInstances *bool `json:"useEbsOptimizedInstances,omitempty" tf:"use_ebs_optimized_instances,omitempty"` +} + +// StaticWebLayerSpec defines the desired state of StaticWebLayer +type StaticWebLayerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StaticWebLayerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StaticWebLayerInitParameters `json:"initProvider,omitempty"` +} + +// StaticWebLayerStatus defines the observed state of StaticWebLayer. +type StaticWebLayerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StaticWebLayerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// StaticWebLayer is the Schema for the StaticWebLayers API. Provides an OpsWorks static web server layer resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type StaticWebLayer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec StaticWebLayerSpec `json:"spec"` + Status StaticWebLayerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StaticWebLayerList contains a list of StaticWebLayers +type StaticWebLayerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StaticWebLayer `json:"items"` +} + +// Repository type metadata. +var ( + StaticWebLayer_Kind = "StaticWebLayer" + StaticWebLayer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: StaticWebLayer_Kind}.String() + StaticWebLayer_KindAPIVersion = StaticWebLayer_Kind + "." + CRDGroupVersion.String() + StaticWebLayer_GroupVersionKind = CRDGroupVersion.WithKind(StaticWebLayer_Kind) +) + +func init() { + SchemeBuilder.Register(&StaticWebLayer{}, &StaticWebLayerList{}) +} diff --git a/apis/pinpoint/v1beta1/zz_generated.conversion_hubs.go b/apis/pinpoint/v1beta1/zz_generated.conversion_hubs.go index a7004b1cf9..23c56ef052 100755 --- a/apis/pinpoint/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/pinpoint/v1beta1/zz_generated.conversion_hubs.go @@ -6,8 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *App) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SMSChannel) Hub() {} diff --git a/apis/pinpoint/v1beta1/zz_generated.conversion_spokes.go b/apis/pinpoint/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..4e65f01a2d --- /dev/null +++ b/apis/pinpoint/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this App to the hub type. +func (tr *App) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the App type. +func (tr *App) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/pinpoint/v1beta1/zz_generated.resolvers.go b/apis/pinpoint/v1beta1/zz_generated.resolvers.go index 8c5abe8810..8b81bf9dbf 100644 --- a/apis/pinpoint/v1beta1/zz_generated.resolvers.go +++ b/apis/pinpoint/v1beta1/zz_generated.resolvers.go @@ -9,9 +9,10 @@ package v1beta1 import ( "context" reference "github.com/crossplane/crossplane-runtime/pkg/reference" - xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" resource "github.com/crossplane/upjet/pkg/resource" errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" apisresolver "github.com/upbound/provider-aws/internal/apis" client "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -25,7 +26,7 @@ func (mg *SMSChannel) ResolveReferences( // ResolveReferences of this SMSChannel var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("pinpoint.aws.upbound.io", "v1beta1", "App", "AppList") + m, l, err = apisresolver.GetManagedResource("pinpoint.aws.upbound.io", "v1beta2", "App", "AppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -44,7 +45,7 @@ func (mg *SMSChannel) ResolveReferences( // ResolveReferences of this SMSChannel mg.Spec.ForProvider.ApplicationID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ApplicationIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("pinpoint.aws.upbound.io", "v1beta1", "App", "AppList") + m, l, err = apisresolver.GetManagedResource("pinpoint.aws.upbound.io", "v1beta2", "App", "AppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/pinpoint/v1beta1/zz_smschannel_types.go b/apis/pinpoint/v1beta1/zz_smschannel_types.go index d917abc063..dfe632d373 100755 --- a/apis/pinpoint/v1beta1/zz_smschannel_types.go +++ b/apis/pinpoint/v1beta1/zz_smschannel_types.go @@ -16,7 +16,7 @@ import ( type SMSChannelInitParameters struct { // ID of the application. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/pinpoint/v1beta1.App + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/pinpoint/v1beta2.App // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("application_id",true) ApplicationID *string `json:"applicationId,omitempty" tf:"application_id,omitempty"` @@ -64,7 +64,7 @@ type SMSChannelObservation struct { type SMSChannelParameters struct { // ID of the application. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/pinpoint/v1beta1.App + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/pinpoint/v1beta2.App // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("application_id",true) // +kubebuilder:validation:Optional ApplicationID *string `json:"applicationId,omitempty" tf:"application_id,omitempty"` diff --git a/apis/pinpoint/v1beta2/zz_app_terraformed.go b/apis/pinpoint/v1beta2/zz_app_terraformed.go new file mode 100755 index 0000000000..7fbc4bcf4d --- /dev/null +++ b/apis/pinpoint/v1beta2/zz_app_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this App +func (mg *App) GetTerraformResourceType() string { + return "aws_pinpoint_app" +} + +// GetConnectionDetailsMapping for this App +func (tr *App) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this App +func (tr *App) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this App +func (tr *App) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this App +func (tr *App) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this App +func (tr *App) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this App +func (tr *App) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this App +func (tr *App) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this App +func (tr *App) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this App using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *App) LateInitialize(attrs []byte) (bool, error) { + params := &AppParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *App) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/pinpoint/v1beta2/zz_app_types.go b/apis/pinpoint/v1beta2/zz_app_types.go new file mode 100755 index 0000000000..f3b3a435d8 --- /dev/null +++ b/apis/pinpoint/v1beta2/zz_app_types.go @@ -0,0 +1,270 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AppInitParameters struct { + + // Specifies settings for invoking an AWS Lambda function that customizes a segment for a campaign + CampaignHook *CampaignHookInitParameters `json:"campaignHook,omitempty" tf:"campaign_hook,omitempty"` + + // The default campaign limits for the app. These limits apply to each campaign for the app, unless the campaign overrides the default with limits of its own + Limits *LimitsInitParameters `json:"limits,omitempty" tf:"limits,omitempty"` + + // The application name + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The default quiet time for the app. Each campaign for this app sends no messages during this time unless the campaign overrides the default with a quiet time of its own + QuietTime *QuietTimeInitParameters `json:"quietTime,omitempty" tf:"quiet_time,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AppObservation struct { + + // The Application ID of the Pinpoint App. + ApplicationID *string `json:"applicationId,omitempty" tf:"application_id,omitempty"` + + // Amazon Resource Name (ARN) of the PinPoint Application + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Specifies settings for invoking an AWS Lambda function that customizes a segment for a campaign + CampaignHook *CampaignHookObservation `json:"campaignHook,omitempty" tf:"campaign_hook,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The default campaign limits for the app. These limits apply to each campaign for the app, unless the campaign overrides the default with limits of its own + Limits *LimitsObservation `json:"limits,omitempty" tf:"limits,omitempty"` + + // The application name + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The default quiet time for the app. Each campaign for this app sends no messages during this time unless the campaign overrides the default with a quiet time of its own + QuietTime *QuietTimeObservation `json:"quietTime,omitempty" tf:"quiet_time,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type AppParameters struct { + + // Specifies settings for invoking an AWS Lambda function that customizes a segment for a campaign + // +kubebuilder:validation:Optional + CampaignHook *CampaignHookParameters `json:"campaignHook,omitempty" tf:"campaign_hook,omitempty"` + + // The default campaign limits for the app. These limits apply to each campaign for the app, unless the campaign overrides the default with limits of its own + // +kubebuilder:validation:Optional + Limits *LimitsParameters `json:"limits,omitempty" tf:"limits,omitempty"` + + // The application name + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The default quiet time for the app. Each campaign for this app sends no messages during this time unless the campaign overrides the default with a quiet time of its own + // +kubebuilder:validation:Optional + QuietTime *QuietTimeParameters `json:"quietTime,omitempty" tf:"quiet_time,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type CampaignHookInitParameters struct { + + // Lambda function name or ARN to be called for delivery. Conflicts with web_url + LambdaFunctionName *string `json:"lambdaFunctionName,omitempty" tf:"lambda_function_name,omitempty"` + + // What mode Lambda should be invoked in. Valid values for this parameter are DELIVERY, FILTER. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Web URL to call for hook. If the URL has authentication specified it will be added as authentication to the request. Conflicts with lambda_function_name + WebURL *string `json:"webUrl,omitempty" tf:"web_url,omitempty"` +} + +type CampaignHookObservation struct { + + // Lambda function name or ARN to be called for delivery. Conflicts with web_url + LambdaFunctionName *string `json:"lambdaFunctionName,omitempty" tf:"lambda_function_name,omitempty"` + + // What mode Lambda should be invoked in. Valid values for this parameter are DELIVERY, FILTER. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Web URL to call for hook. If the URL has authentication specified it will be added as authentication to the request. Conflicts with lambda_function_name + WebURL *string `json:"webUrl,omitempty" tf:"web_url,omitempty"` +} + +type CampaignHookParameters struct { + + // Lambda function name or ARN to be called for delivery. Conflicts with web_url + // +kubebuilder:validation:Optional + LambdaFunctionName *string `json:"lambdaFunctionName,omitempty" tf:"lambda_function_name,omitempty"` + + // What mode Lambda should be invoked in. Valid values for this parameter are DELIVERY, FILTER. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Web URL to call for hook. If the URL has authentication specified it will be added as authentication to the request. Conflicts with lambda_function_name + // +kubebuilder:validation:Optional + WebURL *string `json:"webUrl,omitempty" tf:"web_url,omitempty"` +} + +type LimitsInitParameters struct { + + // The maximum number of messages that the campaign can send daily. + Daily *float64 `json:"daily,omitempty" tf:"daily,omitempty"` + + // The length of time (in seconds) that the campaign can run before it ends and message deliveries stop. This duration begins at the scheduled start time for the campaign. The minimum value is 60. + MaximumDuration *float64 `json:"maximumDuration,omitempty" tf:"maximum_duration,omitempty"` + + // The number of messages that the campaign can send per second. The minimum value is 50, and the maximum is 20000. + MessagesPerSecond *float64 `json:"messagesPerSecond,omitempty" tf:"messages_per_second,omitempty"` + + // The maximum total number of messages that the campaign can send. + Total *float64 `json:"total,omitempty" tf:"total,omitempty"` +} + +type LimitsObservation struct { + + // The maximum number of messages that the campaign can send daily. + Daily *float64 `json:"daily,omitempty" tf:"daily,omitempty"` + + // The length of time (in seconds) that the campaign can run before it ends and message deliveries stop. This duration begins at the scheduled start time for the campaign. The minimum value is 60. + MaximumDuration *float64 `json:"maximumDuration,omitempty" tf:"maximum_duration,omitempty"` + + // The number of messages that the campaign can send per second. The minimum value is 50, and the maximum is 20000. + MessagesPerSecond *float64 `json:"messagesPerSecond,omitempty" tf:"messages_per_second,omitempty"` + + // The maximum total number of messages that the campaign can send. + Total *float64 `json:"total,omitempty" tf:"total,omitempty"` +} + +type LimitsParameters struct { + + // The maximum number of messages that the campaign can send daily. + // +kubebuilder:validation:Optional + Daily *float64 `json:"daily,omitempty" tf:"daily,omitempty"` + + // The length of time (in seconds) that the campaign can run before it ends and message deliveries stop. This duration begins at the scheduled start time for the campaign. The minimum value is 60. + // +kubebuilder:validation:Optional + MaximumDuration *float64 `json:"maximumDuration,omitempty" tf:"maximum_duration,omitempty"` + + // The number of messages that the campaign can send per second. The minimum value is 50, and the maximum is 20000. + // +kubebuilder:validation:Optional + MessagesPerSecond *float64 `json:"messagesPerSecond,omitempty" tf:"messages_per_second,omitempty"` + + // The maximum total number of messages that the campaign can send. + // +kubebuilder:validation:Optional + Total *float64 `json:"total,omitempty" tf:"total,omitempty"` +} + +type QuietTimeInitParameters struct { + + // The default end time for quiet time in ISO 8601 format. Required if start is set + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // The default start time for quiet time in ISO 8601 format. Required if end is set + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type QuietTimeObservation struct { + + // The default end time for quiet time in ISO 8601 format. Required if start is set + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // The default start time for quiet time in ISO 8601 format. Required if end is set + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type QuietTimeParameters struct { + + // The default end time for quiet time in ISO 8601 format. Required if start is set + // +kubebuilder:validation:Optional + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // The default start time for quiet time in ISO 8601 format. Required if end is set + // +kubebuilder:validation:Optional + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +// AppSpec defines the desired state of App +type AppSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AppParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AppInitParameters `json:"initProvider,omitempty"` +} + +// AppStatus defines the observed state of App. +type AppStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AppObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// App is the Schema for the Apps API. Provides a Pinpoint App resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type App struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec AppSpec `json:"spec"` + Status AppStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AppList contains a list of Apps +type AppList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []App `json:"items"` +} + +// Repository type metadata. +var ( + App_Kind = "App" + App_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: App_Kind}.String() + App_KindAPIVersion = App_Kind + "." + CRDGroupVersion.String() + App_GroupVersionKind = CRDGroupVersion.WithKind(App_Kind) +) + +func init() { + SchemeBuilder.Register(&App{}, &AppList{}) +} diff --git a/apis/pinpoint/v1beta2/zz_generated.conversion_hubs.go b/apis/pinpoint/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..0e133939e5 --- /dev/null +++ b/apis/pinpoint/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *App) Hub() {} diff --git a/apis/pinpoint/v1beta2/zz_generated.deepcopy.go b/apis/pinpoint/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..23f623498d --- /dev/null +++ b/apis/pinpoint/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,566 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *App) DeepCopyInto(out *App) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new App. +func (in *App) DeepCopy() *App { + if in == nil { + return nil + } + out := new(App) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *App) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppInitParameters) DeepCopyInto(out *AppInitParameters) { + *out = *in + if in.CampaignHook != nil { + in, out := &in.CampaignHook, &out.CampaignHook + *out = new(CampaignHookInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = new(LimitsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.QuietTime != nil { + in, out := &in.QuietTime, &out.QuietTime + *out = new(QuietTimeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppInitParameters. +func (in *AppInitParameters) DeepCopy() *AppInitParameters { + if in == nil { + return nil + } + out := new(AppInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppList) DeepCopyInto(out *AppList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]App, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppList. +func (in *AppList) DeepCopy() *AppList { + if in == nil { + return nil + } + out := new(AppList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppObservation) DeepCopyInto(out *AppObservation) { + *out = *in + if in.ApplicationID != nil { + in, out := &in.ApplicationID, &out.ApplicationID + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CampaignHook != nil { + in, out := &in.CampaignHook, &out.CampaignHook + *out = new(CampaignHookObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = new(LimitsObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.QuietTime != nil { + in, out := &in.QuietTime, &out.QuietTime + *out = new(QuietTimeObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppObservation. +func (in *AppObservation) DeepCopy() *AppObservation { + if in == nil { + return nil + } + out := new(AppObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppParameters) DeepCopyInto(out *AppParameters) { + *out = *in + if in.CampaignHook != nil { + in, out := &in.CampaignHook, &out.CampaignHook + *out = new(CampaignHookParameters) + (*in).DeepCopyInto(*out) + } + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = new(LimitsParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.QuietTime != nil { + in, out := &in.QuietTime, &out.QuietTime + *out = new(QuietTimeParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppParameters. +func (in *AppParameters) DeepCopy() *AppParameters { + if in == nil { + return nil + } + out := new(AppParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppSpec) DeepCopyInto(out *AppSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppSpec. +func (in *AppSpec) DeepCopy() *AppSpec { + if in == nil { + return nil + } + out := new(AppSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppStatus) DeepCopyInto(out *AppStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppStatus. +func (in *AppStatus) DeepCopy() *AppStatus { + if in == nil { + return nil + } + out := new(AppStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CampaignHookInitParameters) DeepCopyInto(out *CampaignHookInitParameters) { + *out = *in + if in.LambdaFunctionName != nil { + in, out := &in.LambdaFunctionName, &out.LambdaFunctionName + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.WebURL != nil { + in, out := &in.WebURL, &out.WebURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CampaignHookInitParameters. +func (in *CampaignHookInitParameters) DeepCopy() *CampaignHookInitParameters { + if in == nil { + return nil + } + out := new(CampaignHookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CampaignHookObservation) DeepCopyInto(out *CampaignHookObservation) { + *out = *in + if in.LambdaFunctionName != nil { + in, out := &in.LambdaFunctionName, &out.LambdaFunctionName + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.WebURL != nil { + in, out := &in.WebURL, &out.WebURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CampaignHookObservation. +func (in *CampaignHookObservation) DeepCopy() *CampaignHookObservation { + if in == nil { + return nil + } + out := new(CampaignHookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CampaignHookParameters) DeepCopyInto(out *CampaignHookParameters) { + *out = *in + if in.LambdaFunctionName != nil { + in, out := &in.LambdaFunctionName, &out.LambdaFunctionName + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.WebURL != nil { + in, out := &in.WebURL, &out.WebURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CampaignHookParameters. +func (in *CampaignHookParameters) DeepCopy() *CampaignHookParameters { + if in == nil { + return nil + } + out := new(CampaignHookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitsInitParameters) DeepCopyInto(out *LimitsInitParameters) { + *out = *in + if in.Daily != nil { + in, out := &in.Daily, &out.Daily + *out = new(float64) + **out = **in + } + if in.MaximumDuration != nil { + in, out := &in.MaximumDuration, &out.MaximumDuration + *out = new(float64) + **out = **in + } + if in.MessagesPerSecond != nil { + in, out := &in.MessagesPerSecond, &out.MessagesPerSecond + *out = new(float64) + **out = **in + } + if in.Total != nil { + in, out := &in.Total, &out.Total + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitsInitParameters. +func (in *LimitsInitParameters) DeepCopy() *LimitsInitParameters { + if in == nil { + return nil + } + out := new(LimitsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitsObservation) DeepCopyInto(out *LimitsObservation) { + *out = *in + if in.Daily != nil { + in, out := &in.Daily, &out.Daily + *out = new(float64) + **out = **in + } + if in.MaximumDuration != nil { + in, out := &in.MaximumDuration, &out.MaximumDuration + *out = new(float64) + **out = **in + } + if in.MessagesPerSecond != nil { + in, out := &in.MessagesPerSecond, &out.MessagesPerSecond + *out = new(float64) + **out = **in + } + if in.Total != nil { + in, out := &in.Total, &out.Total + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitsObservation. +func (in *LimitsObservation) DeepCopy() *LimitsObservation { + if in == nil { + return nil + } + out := new(LimitsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitsParameters) DeepCopyInto(out *LimitsParameters) { + *out = *in + if in.Daily != nil { + in, out := &in.Daily, &out.Daily + *out = new(float64) + **out = **in + } + if in.MaximumDuration != nil { + in, out := &in.MaximumDuration, &out.MaximumDuration + *out = new(float64) + **out = **in + } + if in.MessagesPerSecond != nil { + in, out := &in.MessagesPerSecond, &out.MessagesPerSecond + *out = new(float64) + **out = **in + } + if in.Total != nil { + in, out := &in.Total, &out.Total + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitsParameters. +func (in *LimitsParameters) DeepCopy() *LimitsParameters { + if in == nil { + return nil + } + out := new(LimitsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuietTimeInitParameters) DeepCopyInto(out *QuietTimeInitParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuietTimeInitParameters. +func (in *QuietTimeInitParameters) DeepCopy() *QuietTimeInitParameters { + if in == nil { + return nil + } + out := new(QuietTimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuietTimeObservation) DeepCopyInto(out *QuietTimeObservation) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuietTimeObservation. +func (in *QuietTimeObservation) DeepCopy() *QuietTimeObservation { + if in == nil { + return nil + } + out := new(QuietTimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuietTimeParameters) DeepCopyInto(out *QuietTimeParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuietTimeParameters. +func (in *QuietTimeParameters) DeepCopy() *QuietTimeParameters { + if in == nil { + return nil + } + out := new(QuietTimeParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/pinpoint/v1beta2/zz_generated.managed.go b/apis/pinpoint/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..9cb1884f54 --- /dev/null +++ b/apis/pinpoint/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this App. +func (mg *App) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this App. +func (mg *App) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this App. +func (mg *App) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this App. +func (mg *App) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this App. +func (mg *App) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this App. +func (mg *App) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this App. +func (mg *App) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this App. +func (mg *App) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this App. +func (mg *App) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this App. +func (mg *App) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this App. +func (mg *App) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this App. +func (mg *App) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/pinpoint/v1beta2/zz_generated.managedlist.go b/apis/pinpoint/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..5cfbc83e84 --- /dev/null +++ b/apis/pinpoint/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AppList. +func (l *AppList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/pinpoint/v1beta2/zz_groupversion_info.go b/apis/pinpoint/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..d7d08a2097 --- /dev/null +++ b/apis/pinpoint/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=pinpoint.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "pinpoint.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/qldb/v1beta1/zz_generated.conversion_hubs.go b/apis/qldb/v1beta1/zz_generated.conversion_hubs.go index f384c7c83a..21dc93e474 100755 --- a/apis/qldb/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/qldb/v1beta1/zz_generated.conversion_hubs.go @@ -8,6 +8,3 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *Ledger) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Stream) Hub() {} diff --git a/apis/qldb/v1beta1/zz_generated.conversion_spokes.go b/apis/qldb/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..8900b92697 --- /dev/null +++ b/apis/qldb/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Stream to the hub type. +func (tr *Stream) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Stream type. +func (tr *Stream) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/qldb/v1beta2/zz_generated.conversion_hubs.go b/apis/qldb/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..2f8b78f8b9 --- /dev/null +++ b/apis/qldb/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Stream) Hub() {} diff --git a/apis/qldb/v1beta2/zz_generated.deepcopy.go b/apis/qldb/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..e42518ee08 --- /dev/null +++ b/apis/qldb/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,457 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisConfigurationInitParameters) DeepCopyInto(out *KinesisConfigurationInitParameters) { + *out = *in + if in.AggregationEnabled != nil { + in, out := &in.AggregationEnabled, &out.AggregationEnabled + *out = new(bool) + **out = **in + } + if in.StreamArn != nil { + in, out := &in.StreamArn, &out.StreamArn + *out = new(string) + **out = **in + } + if in.StreamArnRef != nil { + in, out := &in.StreamArnRef, &out.StreamArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamArnSelector != nil { + in, out := &in.StreamArnSelector, &out.StreamArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisConfigurationInitParameters. +func (in *KinesisConfigurationInitParameters) DeepCopy() *KinesisConfigurationInitParameters { + if in == nil { + return nil + } + out := new(KinesisConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisConfigurationObservation) DeepCopyInto(out *KinesisConfigurationObservation) { + *out = *in + if in.AggregationEnabled != nil { + in, out := &in.AggregationEnabled, &out.AggregationEnabled + *out = new(bool) + **out = **in + } + if in.StreamArn != nil { + in, out := &in.StreamArn, &out.StreamArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisConfigurationObservation. +func (in *KinesisConfigurationObservation) DeepCopy() *KinesisConfigurationObservation { + if in == nil { + return nil + } + out := new(KinesisConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisConfigurationParameters) DeepCopyInto(out *KinesisConfigurationParameters) { + *out = *in + if in.AggregationEnabled != nil { + in, out := &in.AggregationEnabled, &out.AggregationEnabled + *out = new(bool) + **out = **in + } + if in.StreamArn != nil { + in, out := &in.StreamArn, &out.StreamArn + *out = new(string) + **out = **in + } + if in.StreamArnRef != nil { + in, out := &in.StreamArnRef, &out.StreamArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamArnSelector != nil { + in, out := &in.StreamArnSelector, &out.StreamArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisConfigurationParameters. +func (in *KinesisConfigurationParameters) DeepCopy() *KinesisConfigurationParameters { + if in == nil { + return nil + } + out := new(KinesisConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Stream) DeepCopyInto(out *Stream) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Stream. +func (in *Stream) DeepCopy() *Stream { + if in == nil { + return nil + } + out := new(Stream) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Stream) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInitParameters) DeepCopyInto(out *StreamInitParameters) { + *out = *in + if in.ExclusiveEndTime != nil { + in, out := &in.ExclusiveEndTime, &out.ExclusiveEndTime + *out = new(string) + **out = **in + } + if in.InclusiveStartTime != nil { + in, out := &in.InclusiveStartTime, &out.InclusiveStartTime + *out = new(string) + **out = **in + } + if in.KinesisConfiguration != nil { + in, out := &in.KinesisConfiguration, &out.KinesisConfiguration + *out = new(KinesisConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LedgerName != nil { + in, out := &in.LedgerName, &out.LedgerName + *out = new(string) + **out = **in + } + if in.LedgerNameRef != nil { + in, out := &in.LedgerNameRef, &out.LedgerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LedgerNameSelector != nil { + in, out := &in.LedgerNameSelector, &out.LedgerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInitParameters. +func (in *StreamInitParameters) DeepCopy() *StreamInitParameters { + if in == nil { + return nil + } + out := new(StreamInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamList) DeepCopyInto(out *StreamList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Stream, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamList. +func (in *StreamList) DeepCopy() *StreamList { + if in == nil { + return nil + } + out := new(StreamList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StreamList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamObservation) DeepCopyInto(out *StreamObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ExclusiveEndTime != nil { + in, out := &in.ExclusiveEndTime, &out.ExclusiveEndTime + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InclusiveStartTime != nil { + in, out := &in.InclusiveStartTime, &out.InclusiveStartTime + *out = new(string) + **out = **in + } + if in.KinesisConfiguration != nil { + in, out := &in.KinesisConfiguration, &out.KinesisConfiguration + *out = new(KinesisConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.LedgerName != nil { + in, out := &in.LedgerName, &out.LedgerName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamObservation. +func (in *StreamObservation) DeepCopy() *StreamObservation { + if in == nil { + return nil + } + out := new(StreamObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamParameters) DeepCopyInto(out *StreamParameters) { + *out = *in + if in.ExclusiveEndTime != nil { + in, out := &in.ExclusiveEndTime, &out.ExclusiveEndTime + *out = new(string) + **out = **in + } + if in.InclusiveStartTime != nil { + in, out := &in.InclusiveStartTime, &out.InclusiveStartTime + *out = new(string) + **out = **in + } + if in.KinesisConfiguration != nil { + in, out := &in.KinesisConfiguration, &out.KinesisConfiguration + *out = new(KinesisConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.LedgerName != nil { + in, out := &in.LedgerName, &out.LedgerName + *out = new(string) + **out = **in + } + if in.LedgerNameRef != nil { + in, out := &in.LedgerNameRef, &out.LedgerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LedgerNameSelector != nil { + in, out := &in.LedgerNameSelector, &out.LedgerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamParameters. +func (in *StreamParameters) DeepCopy() *StreamParameters { + if in == nil { + return nil + } + out := new(StreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamSpec) DeepCopyInto(out *StreamSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamSpec. +func (in *StreamSpec) DeepCopy() *StreamSpec { + if in == nil { + return nil + } + out := new(StreamSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamStatus) DeepCopyInto(out *StreamStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamStatus. +func (in *StreamStatus) DeepCopy() *StreamStatus { + if in == nil { + return nil + } + out := new(StreamStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/qldb/v1beta2/zz_generated.managed.go b/apis/qldb/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..0867e0bba5 --- /dev/null +++ b/apis/qldb/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Stream. +func (mg *Stream) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Stream. +func (mg *Stream) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Stream. +func (mg *Stream) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Stream. +func (mg *Stream) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Stream. +func (mg *Stream) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Stream. +func (mg *Stream) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Stream. +func (mg *Stream) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Stream. +func (mg *Stream) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Stream. +func (mg *Stream) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Stream. +func (mg *Stream) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Stream. +func (mg *Stream) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Stream. +func (mg *Stream) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/qldb/v1beta2/zz_generated.managedlist.go b/apis/qldb/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..8e2be3708a --- /dev/null +++ b/apis/qldb/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this StreamList. +func (l *StreamList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/qldb/v1beta2/zz_generated.resolvers.go b/apis/qldb/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..fb35f8ea73 --- /dev/null +++ b/apis/qldb/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,149 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Stream. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Stream) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.KinesisConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KinesisConfiguration.StreamArn), + Extract: common.TerraformID(), + Reference: mg.Spec.ForProvider.KinesisConfiguration.StreamArnRef, + Selector: mg.Spec.ForProvider.KinesisConfiguration.StreamArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KinesisConfiguration.StreamArn") + } + mg.Spec.ForProvider.KinesisConfiguration.StreamArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KinesisConfiguration.StreamArnRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("qldb.aws.upbound.io", "v1beta1", "Ledger", "LedgerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LedgerName), + Extract: common.TerraformID(), + Reference: mg.Spec.ForProvider.LedgerNameRef, + Selector: mg.Spec.ForProvider.LedgerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LedgerName") + } + mg.Spec.ForProvider.LedgerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LedgerNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.KinesisConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("kinesis.aws.upbound.io", "v1beta2", "Stream", "StreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KinesisConfiguration.StreamArn), + Extract: common.TerraformID(), + Reference: mg.Spec.InitProvider.KinesisConfiguration.StreamArnRef, + Selector: mg.Spec.InitProvider.KinesisConfiguration.StreamArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KinesisConfiguration.StreamArn") + } + mg.Spec.InitProvider.KinesisConfiguration.StreamArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KinesisConfiguration.StreamArnRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("qldb.aws.upbound.io", "v1beta1", "Ledger", "LedgerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LedgerName), + Extract: common.TerraformID(), + Reference: mg.Spec.InitProvider.LedgerNameRef, + Selector: mg.Spec.InitProvider.LedgerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LedgerName") + } + mg.Spec.InitProvider.LedgerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LedgerNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/qldb/v1beta2/zz_groupversion_info.go b/apis/qldb/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..ecf053a35f --- /dev/null +++ b/apis/qldb/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=qldb.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "qldb.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/qldb/v1beta2/zz_stream_terraformed.go b/apis/qldb/v1beta2/zz_stream_terraformed.go new file mode 100755 index 0000000000..ae7d8da6d5 --- /dev/null +++ b/apis/qldb/v1beta2/zz_stream_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Stream +func (mg *Stream) GetTerraformResourceType() string { + return "aws_qldb_stream" +} + +// GetConnectionDetailsMapping for this Stream +func (tr *Stream) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Stream +func (tr *Stream) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Stream +func (tr *Stream) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Stream +func (tr *Stream) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Stream +func (tr *Stream) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Stream +func (tr *Stream) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Stream +func (tr *Stream) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Stream +func (tr *Stream) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Stream using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Stream) LateInitialize(attrs []byte) (bool, error) { + params := &StreamParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Stream) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/qldb/v1beta2/zz_stream_types.go b/apis/qldb/v1beta2/zz_stream_types.go new file mode 100755 index 0000000000..6648050ee5 --- /dev/null +++ b/apis/qldb/v1beta2/zz_stream_types.go @@ -0,0 +1,263 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type KinesisConfigurationInitParameters struct { + + // Enables QLDB to publish multiple data records in a single Kinesis Data Streams record, increasing the number of records sent per API call. Default: true. + AggregationEnabled *bool `json:"aggregationEnabled,omitempty" tf:"aggregation_enabled,omitempty"` + + // The Amazon Resource Name (ARN) of the Kinesis Data Streams resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() + StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` + + // Reference to a Stream in kinesis to populate streamArn. + // +kubebuilder:validation:Optional + StreamArnRef *v1.Reference `json:"streamArnRef,omitempty" tf:"-"` + + // Selector for a Stream in kinesis to populate streamArn. + // +kubebuilder:validation:Optional + StreamArnSelector *v1.Selector `json:"streamArnSelector,omitempty" tf:"-"` +} + +type KinesisConfigurationObservation struct { + + // Enables QLDB to publish multiple data records in a single Kinesis Data Streams record, increasing the number of records sent per API call. Default: true. + AggregationEnabled *bool `json:"aggregationEnabled,omitempty" tf:"aggregation_enabled,omitempty"` + + // The Amazon Resource Name (ARN) of the Kinesis Data Streams resource. + StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` +} + +type KinesisConfigurationParameters struct { + + // Enables QLDB to publish multiple data records in a single Kinesis Data Streams record, increasing the number of records sent per API call. Default: true. + // +kubebuilder:validation:Optional + AggregationEnabled *bool `json:"aggregationEnabled,omitempty" tf:"aggregation_enabled,omitempty"` + + // The Amazon Resource Name (ARN) of the Kinesis Data Streams resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kinesis/v1beta2.Stream + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() + // +kubebuilder:validation:Optional + StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` + + // Reference to a Stream in kinesis to populate streamArn. + // +kubebuilder:validation:Optional + StreamArnRef *v1.Reference `json:"streamArnRef,omitempty" tf:"-"` + + // Selector for a Stream in kinesis to populate streamArn. + // +kubebuilder:validation:Optional + StreamArnSelector *v1.Selector `json:"streamArnSelector,omitempty" tf:"-"` +} + +type StreamInitParameters struct { + + // The exclusive date and time that specifies when the stream ends. If you don't define this parameter, the stream runs indefinitely until you cancel it. It must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: "2019-06-13T21:36:34Z". + ExclusiveEndTime *string `json:"exclusiveEndTime,omitempty" tf:"exclusive_end_time,omitempty"` + + // The inclusive start date and time from which to start streaming journal data. This parameter must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: "2019-06-13T21:36:34Z". This cannot be in the future and must be before exclusive_end_time. If you provide a value that is before the ledger's CreationDateTime, QLDB effectively defaults it to the ledger's CreationDateTime. + InclusiveStartTime *string `json:"inclusiveStartTime,omitempty" tf:"inclusive_start_time,omitempty"` + + // The configuration settings of the Kinesis Data Streams destination for your stream request. Documented below. + KinesisConfiguration *KinesisConfigurationInitParameters `json:"kinesisConfiguration,omitempty" tf:"kinesis_configuration,omitempty"` + + // The name of the QLDB ledger. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/qldb/v1beta1.Ledger + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() + LedgerName *string `json:"ledgerName,omitempty" tf:"ledger_name,omitempty"` + + // Reference to a Ledger in qldb to populate ledgerName. + // +kubebuilder:validation:Optional + LedgerNameRef *v1.Reference `json:"ledgerNameRef,omitempty" tf:"-"` + + // Selector for a Ledger in qldb to populate ledgerName. + // +kubebuilder:validation:Optional + LedgerNameSelector *v1.Selector `json:"ledgerNameSelector,omitempty" tf:"-"` + + // The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal stream to write data records to a Kinesis Data Streams resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The name that you want to assign to the QLDB journal stream. User-defined names can help identify and indicate the purpose of a stream. Your stream name must be unique among other active streams for a given ledger. Stream names have the same naming constraints as ledger names, as defined in the Amazon QLDB Developer Guide. + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type StreamObservation struct { + + // The ARN of the QLDB Stream. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The exclusive date and time that specifies when the stream ends. If you don't define this parameter, the stream runs indefinitely until you cancel it. It must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: "2019-06-13T21:36:34Z". + ExclusiveEndTime *string `json:"exclusiveEndTime,omitempty" tf:"exclusive_end_time,omitempty"` + + // The ID of the QLDB Stream. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The inclusive start date and time from which to start streaming journal data. This parameter must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: "2019-06-13T21:36:34Z". This cannot be in the future and must be before exclusive_end_time. If you provide a value that is before the ledger's CreationDateTime, QLDB effectively defaults it to the ledger's CreationDateTime. + InclusiveStartTime *string `json:"inclusiveStartTime,omitempty" tf:"inclusive_start_time,omitempty"` + + // The configuration settings of the Kinesis Data Streams destination for your stream request. Documented below. + KinesisConfiguration *KinesisConfigurationObservation `json:"kinesisConfiguration,omitempty" tf:"kinesis_configuration,omitempty"` + + // The name of the QLDB ledger. + LedgerName *string `json:"ledgerName,omitempty" tf:"ledger_name,omitempty"` + + // The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal stream to write data records to a Kinesis Data Streams resource. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The name that you want to assign to the QLDB journal stream. User-defined names can help identify and indicate the purpose of a stream. Your stream name must be unique among other active streams for a given ledger. Stream names have the same naming constraints as ledger names, as defined in the Amazon QLDB Developer Guide. + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type StreamParameters struct { + + // The exclusive date and time that specifies when the stream ends. If you don't define this parameter, the stream runs indefinitely until you cancel it. It must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: "2019-06-13T21:36:34Z". + // +kubebuilder:validation:Optional + ExclusiveEndTime *string `json:"exclusiveEndTime,omitempty" tf:"exclusive_end_time,omitempty"` + + // The inclusive start date and time from which to start streaming journal data. This parameter must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: "2019-06-13T21:36:34Z". This cannot be in the future and must be before exclusive_end_time. If you provide a value that is before the ledger's CreationDateTime, QLDB effectively defaults it to the ledger's CreationDateTime. + // +kubebuilder:validation:Optional + InclusiveStartTime *string `json:"inclusiveStartTime,omitempty" tf:"inclusive_start_time,omitempty"` + + // The configuration settings of the Kinesis Data Streams destination for your stream request. Documented below. + // +kubebuilder:validation:Optional + KinesisConfiguration *KinesisConfigurationParameters `json:"kinesisConfiguration,omitempty" tf:"kinesis_configuration,omitempty"` + + // The name of the QLDB ledger. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/qldb/v1beta1.Ledger + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() + // +kubebuilder:validation:Optional + LedgerName *string `json:"ledgerName,omitempty" tf:"ledger_name,omitempty"` + + // Reference to a Ledger in qldb to populate ledgerName. + // +kubebuilder:validation:Optional + LedgerNameRef *v1.Reference `json:"ledgerNameRef,omitempty" tf:"-"` + + // Selector for a Ledger in qldb to populate ledgerName. + // +kubebuilder:validation:Optional + LedgerNameSelector *v1.Selector `json:"ledgerNameSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal stream to write data records to a Kinesis Data Streams resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The name that you want to assign to the QLDB journal stream. User-defined names can help identify and indicate the purpose of a stream. Your stream name must be unique among other active streams for a given ledger. Stream names have the same naming constraints as ledger names, as defined in the Amazon QLDB Developer Guide. + // +kubebuilder:validation:Optional + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// StreamSpec defines the desired state of Stream +type StreamSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StreamParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StreamInitParameters `json:"initProvider,omitempty"` +} + +// StreamStatus defines the observed state of Stream. +type StreamStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StreamObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Stream is the Schema for the Streams API. Provides a QLDB Stream resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Stream struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.inclusiveStartTime) || (has(self.initProvider) && has(self.initProvider.inclusiveStartTime))",message="spec.forProvider.inclusiveStartTime is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.kinesisConfiguration) || (has(self.initProvider) && has(self.initProvider.kinesisConfiguration))",message="spec.forProvider.kinesisConfiguration is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.streamName) || (has(self.initProvider) && has(self.initProvider.streamName))",message="spec.forProvider.streamName is a required parameter" + Spec StreamSpec `json:"spec"` + Status StreamStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StreamList contains a list of Streams +type StreamList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Stream `json:"items"` +} + +// Repository type metadata. +var ( + Stream_Kind = "Stream" + Stream_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Stream_Kind}.String() + Stream_KindAPIVersion = Stream_Kind + "." + CRDGroupVersion.String() + Stream_GroupVersionKind = CRDGroupVersion.WithKind(Stream_Kind) +) + +func init() { + SchemeBuilder.Register(&Stream{}, &StreamList{}) +} diff --git a/apis/rds/v1beta1/zz_clusteractivitystream_types.go b/apis/rds/v1beta1/zz_clusteractivitystream_types.go index 922211c63b..3ed1fa5f1d 100755 --- a/apis/rds/v1beta1/zz_clusteractivitystream_types.go +++ b/apis/rds/v1beta1/zz_clusteractivitystream_types.go @@ -34,7 +34,7 @@ type ClusterActivityStreamInitParameters struct { Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` // The Amazon Resource Name (ARN) of the DB cluster. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` @@ -97,7 +97,7 @@ type ClusterActivityStreamParameters struct { Region *string `json:"region" tf:"-"` // The Amazon Resource Name (ARN) of the DB cluster. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` diff --git a/apis/rds/v1beta1/zz_clusterendpoint_types.go b/apis/rds/v1beta1/zz_clusterendpoint_types.go index 1b1d8839bb..74bf6301dd 100755 --- a/apis/rds/v1beta1/zz_clusterendpoint_types.go +++ b/apis/rds/v1beta1/zz_clusterendpoint_types.go @@ -16,7 +16,7 @@ import ( type ClusterEndpointInitParameters struct { // The cluster identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` @@ -81,7 +81,7 @@ type ClusterEndpointObservation struct { type ClusterEndpointParameters struct { // The cluster identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` diff --git a/apis/rds/v1beta1/zz_clusterinstance_types.go b/apis/rds/v1beta1/zz_clusterinstance_types.go index be7569f62c..8bf7d95f7f 100755 --- a/apis/rds/v1beta1/zz_clusterinstance_types.go +++ b/apis/rds/v1beta1/zz_clusterinstance_types.go @@ -28,7 +28,7 @@ type ClusterInstanceInitParameters struct { CACertIdentifier *string `json:"caCertIdentifier,omitempty" tf:"ca_cert_identifier,omitempty"` // Identifier of the aws_rds_cluster in which to launch this instance. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` @@ -255,7 +255,7 @@ type ClusterInstanceParameters struct { CACertIdentifier *string `json:"caCertIdentifier,omitempty" tf:"ca_cert_identifier,omitempty"` // Identifier of the aws_rds_cluster in which to launch this instance. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` diff --git a/apis/rds/v1beta1/zz_clusterroleassociation_types.go b/apis/rds/v1beta1/zz_clusterroleassociation_types.go index 4b9f433146..19c4dad19b 100755 --- a/apis/rds/v1beta1/zz_clusterroleassociation_types.go +++ b/apis/rds/v1beta1/zz_clusterroleassociation_types.go @@ -16,7 +16,7 @@ import ( type ClusterRoleAssociationInitParameters struct { // DB Cluster Identifier to associate with the IAM Role. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() DBClusterIdentifier *string `json:"dbClusterIdentifier,omitempty" tf:"db_cluster_identifier,omitempty"` @@ -63,7 +63,7 @@ type ClusterRoleAssociationObservation struct { type ClusterRoleAssociationParameters struct { // DB Cluster Identifier to associate with the IAM Role. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DBClusterIdentifier *string `json:"dbClusterIdentifier,omitempty" tf:"db_cluster_identifier,omitempty"` diff --git a/apis/rds/v1beta1/zz_clustersnapshot_types.go b/apis/rds/v1beta1/zz_clustersnapshot_types.go index f726c84502..e03b153ce6 100755 --- a/apis/rds/v1beta1/zz_clustersnapshot_types.go +++ b/apis/rds/v1beta1/zz_clustersnapshot_types.go @@ -16,7 +16,7 @@ import ( type ClusterSnapshotInitParameters struct { // The DB Cluster Identifier from which to take the snapshot. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() DBClusterIdentifier *string `json:"dbClusterIdentifier,omitempty" tf:"db_cluster_identifier,omitempty"` @@ -96,7 +96,7 @@ type ClusterSnapshotObservation struct { type ClusterSnapshotParameters struct { // The DB Cluster Identifier from which to take the snapshot. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DBClusterIdentifier *string `json:"dbClusterIdentifier,omitempty" tf:"db_cluster_identifier,omitempty"` diff --git a/apis/rds/v1beta1/zz_dbinstanceautomatedbackupsreplication_types.go b/apis/rds/v1beta1/zz_dbinstanceautomatedbackupsreplication_types.go index 892071378b..dd5b78432a 100755 --- a/apis/rds/v1beta1/zz_dbinstanceautomatedbackupsreplication_types.go +++ b/apis/rds/v1beta1/zz_dbinstanceautomatedbackupsreplication_types.go @@ -34,7 +34,7 @@ type DBInstanceAutomatedBackupsReplicationInitParameters struct { RetentionPeriod *float64 `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` // The Amazon Resource Name (ARN) of the source DB instance for the replicated automated backups, for example, arn:aws:rds:us-west-2:123456789012:db:mydatabase. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta3.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) SourceDBInstanceArn *string `json:"sourceDbInstanceArn,omitempty" tf:"source_db_instance_arn,omitempty"` @@ -94,7 +94,7 @@ type DBInstanceAutomatedBackupsReplicationParameters struct { RetentionPeriod *float64 `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` // The Amazon Resource Name (ARN) of the source DB instance for the replicated automated backups, for example, arn:aws:rds:us-west-2:123456789012:db:mydatabase. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta3.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional SourceDBInstanceArn *string `json:"sourceDbInstanceArn,omitempty" tf:"source_db_instance_arn,omitempty"` diff --git a/apis/rds/v1beta1/zz_generated.conversion_hubs.go b/apis/rds/v1beta1/zz_generated.conversion_hubs.go index daad7e07b3..2a0d20fcf7 100755 --- a/apis/rds/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/rds/v1beta1/zz_generated.conversion_hubs.go @@ -6,9 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Cluster) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ClusterActivityStream) Hub() {} @@ -51,9 +48,6 @@ func (tr *ParameterGroup) Hub() {} // Hub marks this type as a conversion hub. func (tr *Proxy) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ProxyDefaultTargetGroup) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ProxyEndpoint) Hub() {} diff --git a/apis/rds/v1beta1/zz_generated.conversion_spokes.go b/apis/rds/v1beta1/zz_generated.conversion_spokes.go index ad52b5df40..9dda3edc33 100755 --- a/apis/rds/v1beta1/zz_generated.conversion_spokes.go +++ b/apis/rds/v1beta1/zz_generated.conversion_spokes.go @@ -13,6 +13,26 @@ import ( "sigs.k8s.io/controller-runtime/pkg/conversion" ) +// ConvertTo converts this Cluster to the hub type. +func (tr *Cluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Cluster type. +func (tr *Cluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + // ConvertTo converts this Instance to the hub type. func (tr *Instance) ConvertTo(dstRaw conversion.Hub) error { spokeVersion := tr.GetObjectKind().GroupVersionKind().Version @@ -32,3 +52,23 @@ func (tr *Instance) ConvertFrom(srcRaw conversion.Hub) error { } return nil } + +// ConvertTo converts this ProxyDefaultTargetGroup to the hub type. +func (tr *ProxyDefaultTargetGroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ProxyDefaultTargetGroup type. +func (tr *ProxyDefaultTargetGroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/rds/v1beta1/zz_generated.resolvers.go b/apis/rds/v1beta1/zz_generated.resolvers.go index d82a73bc49..169902945c 100644 --- a/apis/rds/v1beta1/zz_generated.resolvers.go +++ b/apis/rds/v1beta1/zz_generated.resolvers.go @@ -372,7 +372,7 @@ func (mg *ClusterActivityStream) ResolveReferences(ctx context.Context, c client mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -410,7 +410,7 @@ func (mg *ClusterActivityStream) ResolveReferences(ctx context.Context, c client mg.Spec.InitProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.KMSKeyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -441,7 +441,7 @@ func (mg *ClusterEndpoint) ResolveReferences(ctx context.Context, c client.Reade var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -460,7 +460,7 @@ func (mg *ClusterEndpoint) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.ForProvider.ClusterIdentifier = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ClusterIdentifierRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -491,7 +491,7 @@ func (mg *ClusterInstance) ResolveReferences(ctx context.Context, c client.Reade var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -586,7 +586,7 @@ func (mg *ClusterInstance) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.ForProvider.PerformanceInsightsKMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.PerformanceInsightsKMSKeyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -693,7 +693,7 @@ func (mg *ClusterRoleAssociation) ResolveReferences(ctx context.Context, c clien var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -731,7 +731,7 @@ func (mg *ClusterRoleAssociation) ResolveReferences(ctx context.Context, c clien mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -781,7 +781,7 @@ func (mg *ClusterSnapshot) ResolveReferences(ctx context.Context, c client.Reade var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -800,7 +800,7 @@ func (mg *ClusterSnapshot) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.ForProvider.DBClusterIdentifier = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DBClusterIdentifierRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -850,7 +850,7 @@ func (mg *DBInstanceAutomatedBackupsReplication) ResolveReferences(ctx context.C mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta3", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -888,7 +888,7 @@ func (mg *DBInstanceAutomatedBackupsReplication) ResolveReferences(ctx context.C mg.Spec.InitProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.KMSKeyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta3", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1057,7 +1057,7 @@ func (mg *GlobalCluster) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1076,7 +1076,7 @@ func (mg *GlobalCluster) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.SourceDBClusterIdentifier = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SourceDBClusterIdentifierRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1348,7 +1348,7 @@ func (mg *InstanceRoleAssociation) ResolveReferences(ctx context.Context, c clie var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta3", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1386,7 +1386,7 @@ func (mg *InstanceRoleAssociation) ResolveReferences(ctx context.Context, c clie mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta3", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1687,7 +1687,7 @@ func (mg *ProxyTarget) ResolveReferences(ctx context.Context, c client.Reader) e var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta3", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1725,7 +1725,7 @@ func (mg *ProxyTarget) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.ForProvider.DBProxyName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DBProxyNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta3", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1775,7 +1775,7 @@ func (mg *Snapshot) ResolveReferences(ctx context.Context, c client.Reader) erro var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta3", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1794,7 +1794,7 @@ func (mg *Snapshot) ResolveReferences(ctx context.Context, c client.Reader) erro mg.Spec.ForProvider.DBInstanceIdentifier = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DBInstanceIdentifierRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Instance", "InstanceList") + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta3", "Instance", "InstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/rds/v1beta1/zz_globalcluster_types.go b/apis/rds/v1beta1/zz_globalcluster_types.go index 6150176fe3..5df8e69bf9 100755 --- a/apis/rds/v1beta1/zz_globalcluster_types.go +++ b/apis/rds/v1beta1/zz_globalcluster_types.go @@ -31,7 +31,7 @@ type GlobalClusterInitParameters struct { ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` // Amazon Resource Name (ARN) to use as the primary DB Cluster of the Global Cluster on creation. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) SourceDBClusterIdentifier *string `json:"sourceDbClusterIdentifier,omitempty" tf:"source_db_cluster_identifier,omitempty"` @@ -128,7 +128,7 @@ type GlobalClusterParameters struct { Region *string `json:"region" tf:"-"` // Amazon Resource Name (ARN) to use as the primary DB Cluster of the Global Cluster on creation. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional SourceDBClusterIdentifier *string `json:"sourceDbClusterIdentifier,omitempty" tf:"source_db_cluster_identifier,omitempty"` diff --git a/apis/rds/v1beta1/zz_instanceroleassociation_types.go b/apis/rds/v1beta1/zz_instanceroleassociation_types.go index 2fe85c2e2c..75a7f8eebd 100755 --- a/apis/rds/v1beta1/zz_instanceroleassociation_types.go +++ b/apis/rds/v1beta1/zz_instanceroleassociation_types.go @@ -16,7 +16,7 @@ import ( type InstanceRoleAssociationInitParameters struct { // DB Instance Identifier to associate with the IAM Role. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta3.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("identifier",false) DBInstanceIdentifier *string `json:"dbInstanceIdentifier,omitempty" tf:"db_instance_identifier,omitempty"` @@ -63,7 +63,7 @@ type InstanceRoleAssociationObservation struct { type InstanceRoleAssociationParameters struct { // DB Instance Identifier to associate with the IAM Role. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta3.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("identifier",false) // +kubebuilder:validation:Optional DBInstanceIdentifier *string `json:"dbInstanceIdentifier,omitempty" tf:"db_instance_identifier,omitempty"` diff --git a/apis/rds/v1beta1/zz_proxytarget_types.go b/apis/rds/v1beta1/zz_proxytarget_types.go index 9003ccfdaf..4df515f9f2 100755 --- a/apis/rds/v1beta1/zz_proxytarget_types.go +++ b/apis/rds/v1beta1/zz_proxytarget_types.go @@ -19,7 +19,7 @@ type ProxyTargetInitParameters struct { DBClusterIdentifier *string `json:"dbClusterIdentifier,omitempty" tf:"db_cluster_identifier,omitempty"` // DB instance identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta3.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("identifier",false) DBInstanceIdentifier *string `json:"dbInstanceIdentifier,omitempty" tf:"db_instance_identifier,omitempty"` @@ -90,7 +90,7 @@ type ProxyTargetParameters struct { DBClusterIdentifier *string `json:"dbClusterIdentifier,omitempty" tf:"db_cluster_identifier,omitempty"` // DB instance identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta3.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("identifier",false) // +kubebuilder:validation:Optional DBInstanceIdentifier *string `json:"dbInstanceIdentifier,omitempty" tf:"db_instance_identifier,omitempty"` diff --git a/apis/rds/v1beta1/zz_snapshot_types.go b/apis/rds/v1beta1/zz_snapshot_types.go index 1af550f88b..f22d8ca3d0 100755 --- a/apis/rds/v1beta1/zz_snapshot_types.go +++ b/apis/rds/v1beta1/zz_snapshot_types.go @@ -16,7 +16,7 @@ import ( type SnapshotInitParameters struct { // The DB Instance Identifier from which to take the snapshot. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta3.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("identifier",false) DBInstanceIdentifier *string `json:"dbInstanceIdentifier,omitempty" tf:"db_instance_identifier,omitempty"` @@ -109,7 +109,7 @@ type SnapshotObservation struct { type SnapshotParameters struct { // The DB Instance Identifier from which to take the snapshot. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Instance + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta3.Instance // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("identifier",false) // +kubebuilder:validation:Optional DBInstanceIdentifier *string `json:"dbInstanceIdentifier,omitempty" tf:"db_instance_identifier,omitempty"` diff --git a/apis/rds/v1beta2/zz_cluster_terraformed.go b/apis/rds/v1beta2/zz_cluster_terraformed.go new file mode 100755 index 0000000000..8485fe41da --- /dev/null +++ b/apis/rds/v1beta2/zz_cluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Cluster +func (mg *Cluster) GetTerraformResourceType() string { + return "aws_rds_cluster" +} + +// GetConnectionDetailsMapping for this Cluster +func (tr *Cluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"master_password": "masterPasswordSecretRef"} +} + +// GetObservation of this Cluster +func (tr *Cluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Cluster +func (tr *Cluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Cluster +func (tr *Cluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Cluster +func (tr *Cluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Cluster +func (tr *Cluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Cluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Cluster) LateInitialize(attrs []byte) (bool, error) { + params := &ClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Cluster) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/rds/v1beta2/zz_cluster_types.go b/apis/rds/v1beta2/zz_cluster_types.go new file mode 100755 index 0000000000..ce83dbad98 --- /dev/null +++ b/apis/rds/v1beta2/zz_cluster_types.go @@ -0,0 +1,1023 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ClusterInitParameters struct { + + // The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster. + AllocatedStorage *float64 `json:"allocatedStorage,omitempty" tf:"allocated_storage,omitempty"` + + // Enable to allow major engine version upgrades when changing engine versions. Defaults to false. + AllowMajorVersionUpgrade *bool `json:"allowMajorVersionUpgrade,omitempty" tf:"allow_major_version_upgrade,omitempty"` + + // Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is false. See Amazon RDS Documentation for more information. + ApplyImmediately *bool `json:"applyImmediately,omitempty" tf:"apply_immediately,omitempty"` + + // List of EC2 Availability Zones for the DB cluster storage where DB cluster instances can be created. + // We recommend specifying 3 AZs or using the if necessary. + // A maximum of 3 AZs can be configured. + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // Target backtrack window, in seconds. Only available for aurora and aurora-mysql engines currently. To disable backtracking, set this value to 0. Defaults to 0. Must be between 0 and 259200 (72 hours) + BacktrackWindow *float64 `json:"backtrackWindow,omitempty" tf:"backtrack_window,omitempty"` + + // Days to retain backups for. Default 1 + BackupRetentionPeriod *float64 `json:"backupRetentionPeriod,omitempty" tf:"backup_retention_period,omitempty"` + + // – List of RDS Instances that are a part of this cluster + // +listType=set + ClusterMembers []*string `json:"clusterMembers,omitempty" tf:"cluster_members,omitempty"` + + // – Copy all Cluster tags to snapshots. Default is false. + CopyTagsToSnapshot *bool `json:"copyTagsToSnapshot,omitempty" tf:"copy_tags_to_snapshot,omitempty"` + + // The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6g.xlarge. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes and availability for your engine, see DB instance class in the Amazon RDS User Guide. + DBClusterInstanceClass *string `json:"dbClusterInstanceClass,omitempty" tf:"db_cluster_instance_class,omitempty"` + + // A cluster parameter group to associate with the cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.ClusterParameterGroup + DBClusterParameterGroupName *string `json:"dbClusterParameterGroupName,omitempty" tf:"db_cluster_parameter_group_name,omitempty"` + + // Reference to a ClusterParameterGroup in rds to populate dbClusterParameterGroupName. + // +kubebuilder:validation:Optional + DBClusterParameterGroupNameRef *v1.Reference `json:"dbClusterParameterGroupNameRef,omitempty" tf:"-"` + + // Selector for a ClusterParameterGroup in rds to populate dbClusterParameterGroupName. + // +kubebuilder:validation:Optional + DBClusterParameterGroupNameSelector *v1.Selector `json:"dbClusterParameterGroupNameSelector,omitempty" tf:"-"` + + // Instance parameter group to associate with all instances of the DB cluster. The db_instance_parameter_group_name parameter is only valid in combination with the allow_major_version_upgrade parameter. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.ParameterGroup + DBInstanceParameterGroupName *string `json:"dbInstanceParameterGroupName,omitempty" tf:"db_instance_parameter_group_name,omitempty"` + + // Reference to a ParameterGroup in rds to populate dbInstanceParameterGroupName. + // +kubebuilder:validation:Optional + DBInstanceParameterGroupNameRef *v1.Reference `json:"dbInstanceParameterGroupNameRef,omitempty" tf:"-"` + + // Selector for a ParameterGroup in rds to populate dbInstanceParameterGroupName. + // +kubebuilder:validation:Optional + DBInstanceParameterGroupNameSelector *v1.Selector `json:"dbInstanceParameterGroupNameSelector,omitempty" tf:"-"` + + // DB subnet group to associate with this DB cluster. + // NOTE: This must match the db_subnet_group_name specified on every aws_rds_cluster_instance in the cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.SubnetGroup + DBSubnetGroupName *string `json:"dbSubnetGroupName,omitempty" tf:"db_subnet_group_name,omitempty"` + + // Reference to a SubnetGroup in rds to populate dbSubnetGroupName. + // +kubebuilder:validation:Optional + DBSubnetGroupNameRef *v1.Reference `json:"dbSubnetGroupNameRef,omitempty" tf:"-"` + + // Selector for a SubnetGroup in rds to populate dbSubnetGroupName. + // +kubebuilder:validation:Optional + DBSubnetGroupNameSelector *v1.Selector `json:"dbSubnetGroupNameSelector,omitempty" tf:"-"` + + // For use with RDS Custom. + DBSystemID *string `json:"dbSystemId,omitempty" tf:"db_system_id,omitempty"` + + // Name for an automatically created database on cluster creation. There are different naming restrictions per database engine: RDS Naming Constraints + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Specifies whether to remove automated backups immediately after the DB cluster is deleted. Default is true. + DeleteAutomatedBackups *bool `json:"deleteAutomatedBackups,omitempty" tf:"delete_automated_backups,omitempty"` + + // If the DB cluster should have deletion protection enabled. + // The database can't be deleted when this value is set to true. + // The default is false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // The ID of the Directory Service Active Directory domain to create the cluster in. + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // The name of the IAM role to be used when making API calls to the Directory Service. + DomainIAMRoleName *string `json:"domainIamRoleName,omitempty" tf:"domain_iam_role_name,omitempty"` + + // Whether cluster should forward writes to an associated global cluster. Applied to secondary clusters to enable them to forward writes to an aws_rds_global_cluster's primary cluster. See the User Guide for Aurora for more information. + EnableGlobalWriteForwarding *bool `json:"enableGlobalWriteForwarding,omitempty" tf:"enable_global_write_forwarding,omitempty"` + + // Enable HTTP endpoint (data API). Only valid for some combinations of engine_mode, engine and engine_version and only available in some regions. See the Region and version availability section of the documentation. This option also does not work with any of these options specified: snapshot_identifier, replication_source_identifier, s3_import. + EnableHTTPEndpoint *bool `json:"enableHttpEndpoint,omitempty" tf:"enable_http_endpoint,omitempty"` + + // Whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.. See the User Guide for Aurora for more information. NOTE: Local write forwarding requires Aurora MySQL version 3.04 or higher. + EnableLocalWriteForwarding *bool `json:"enableLocalWriteForwarding,omitempty" tf:"enable_local_write_forwarding,omitempty"` + + // Set of log types to export to cloudwatch. If omitted, no logs will be exported. The following log types are supported: audit, error, general, slowquery, postgresql (PostgreSQL). + // +listType=set + EnabledCloudwatchLogsExports []*string `json:"enabledCloudwatchLogsExports,omitempty" tf:"enabled_cloudwatch_logs_exports,omitempty"` + + // Name of the database engine to be used for this DB cluster. Valid Values: aurora-mysql, aurora-postgresql, mysql, postgres. (Note that mysql and postgres are Multi-AZ RDS clusters). + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // Database engine mode. Valid values: global (only valid for Aurora MySQL 1.21 and earlier), parallelquery, provisioned, serverless. Defaults to: provisioned. See the RDS User Guide for limitations when using serverless. + EngineMode *string `json:"engineMode,omitempty" tf:"engine_mode,omitempty"` + + // Database engine version. Updating this argument results in an outage. See the Aurora MySQL and Aurora Postgres documentation for your configured engine to determine this value, or by running aws rds describe-db-engine-versions. For example with Aurora MySQL 2, a potential value for this argument is 5.7.mysql_aurora.2.03.2. The value can contain a partial version where supported by the API. The actual engine version used is returned in the attribute engine_version_actual, , see Attribute Reference below. + EngineVersion *string `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + // Name of your final DB snapshot when this DB cluster is deleted. If omitted, no final snapshot will be made. + FinalSnapshotIdentifier *string `json:"finalSnapshotIdentifier,omitempty" tf:"final_snapshot_identifier,omitempty"` + + // Global cluster identifier specified on aws_rds_global_cluster. + GlobalClusterIdentifier *string `json:"globalClusterIdentifier,omitempty" tf:"global_cluster_identifier,omitempty"` + + // Specifies whether or not mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled. Please see AWS Documentation for availability and limitations. + IAMDatabaseAuthenticationEnabled *bool `json:"iamDatabaseAuthenticationEnabled,omitempty" tf:"iam_database_authentication_enabled,omitempty"` + + // Amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster. For information about valid Iops values, see Amazon RDS Provisioned IOPS storage to improve performance in the Amazon RDS User Guide. (This setting is required to create a Multi-AZ DB cluster). Must be a multiple between .5 and 50 of the storage amount for the DB cluster. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // ARN for the KMS encryption key. When specifying kms_key_id, storage_encrypted needs to be set to true. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Set to true to allow RDS to manage the master user password in Secrets Manager. Cannot be set if master_password is provided. + ManageMasterUserPassword *bool `json:"manageMasterUserPassword,omitempty" tf:"manage_master_user_password,omitempty"` + + // Password for the master DB user. Note that this may show up in logs, and it will be stored in the state file. Please refer to the RDS Naming Constraints. Cannot be set if manage_master_user_password is set to true. + // Password for the master DB user. If you set autoGeneratePassword to true, the Secret referenced here will be created or updated with generated password if it does not already contain one. + MasterPasswordSecretRef *v1.SecretKeySelector `json:"masterPasswordSecretRef,omitempty" tf:"-"` + + // Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If not specified, the default KMS key for your Amazon Web Services account is used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("key_id",true) + MasterUserSecretKMSKeyID *string `json:"masterUserSecretKmsKeyId,omitempty" tf:"master_user_secret_kms_key_id,omitempty"` + + // Reference to a Key in kms to populate masterUserSecretKmsKeyId. + // +kubebuilder:validation:Optional + MasterUserSecretKMSKeyIDRef *v1.Reference `json:"masterUserSecretKmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate masterUserSecretKmsKeyId. + // +kubebuilder:validation:Optional + MasterUserSecretKMSKeyIDSelector *v1.Selector `json:"masterUserSecretKmsKeyIdSelector,omitempty" tf:"-"` + + // Username for the master DB user. Please refer to the RDS Naming Constraints. This argument does not support in-place updates and cannot be changed during a restore from snapshot. + MasterUsername *string `json:"masterUsername,omitempty" tf:"master_username,omitempty"` + + // Network type of the cluster. Valid values: IPV4, DUAL. + NetworkType *string `json:"networkType,omitempty" tf:"network_type,omitempty"` + + // Port on which the DB accepts connections + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.Time in UTC. Default: A 30-minute window selected at random from an 8-hour block of time per regionE.g., 04:00-09:00 + PreferredBackupWindow *string `json:"preferredBackupWindow,omitempty" tf:"preferred_backup_window,omitempty"` + + // Weekly time range during which system maintenance can occur, in (UTC) e.g., wed:04:00-wed:04:30 + PreferredMaintenanceWindow *string `json:"preferredMaintenanceWindow,omitempty" tf:"preferred_maintenance_window,omitempty"` + + // ARN of a source DB cluster or DB instance if this DB cluster is to be created as a Read Replica. + ReplicationSourceIdentifier *string `json:"replicationSourceIdentifier,omitempty" tf:"replication_source_identifier,omitempty"` + + // Nested attribute for point in time restore. More details below. + RestoreToPointInTime *ClusterRestoreToPointInTimeInitParameters `json:"restoreToPointInTime,omitempty" tf:"restore_to_point_in_time,omitempty"` + + // Port on which the DB accepts connections + S3Import *ClusterS3ImportInitParameters `json:"s3Import,omitempty" tf:"s3_import,omitempty"` + + // Nested attribute with scaling properties. Only valid when engine_mode is set to serverless. More details below. + ScalingConfiguration *ScalingConfigurationInitParameters `json:"scalingConfiguration,omitempty" tf:"scaling_configuration,omitempty"` + + // Nested attribute with scaling properties for ServerlessV2. Only valid when engine_mode is set to provisioned. More details below. + Serverlessv2ScalingConfiguration *Serverlessv2ScalingConfigurationInitParameters `json:"serverlessv2ScalingConfiguration,omitempty" tf:"serverlessv2_scaling_configuration,omitempty"` + + // Determines whether a final DB snapshot is created before the DB cluster is deleted. If true is specified, no DB snapshot is created. If false is specified, a DB snapshot is created before the DB cluster is deleted, using the value from final_snapshot_identifier. Default is false. + SkipFinalSnapshot *bool `json:"skipFinalSnapshot,omitempty" tf:"skip_final_snapshot,omitempty"` + + // Specifies whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a DB cluster snapshot, or the ARN when specifying a DB snapshot. Conflicts with global_cluster_identifier. Clusters cannot be restored from snapshot and joined to an existing global cluster in a single operation. See the AWS documentation or the Global Cluster Restored From Snapshot example for instructions on building a global cluster starting with a snapshot. + SnapshotIdentifier *string `json:"snapshotIdentifier,omitempty" tf:"snapshot_identifier,omitempty"` + + // The source region for an encrypted replica DB cluster. + SourceRegion *string `json:"sourceRegion,omitempty" tf:"source_region,omitempty"` + + // Specifies whether the DB cluster is encrypted. The default is false for provisioned engine_mode and true for serverless engine_mode. When restoring an unencrypted snapshot_identifier, the kms_key_id argument must be provided to encrypt the restored cluster. + StorageEncrypted *bool `json:"storageEncrypted,omitempty" tf:"storage_encrypted,omitempty"` + + // (Forces new for Multi-AZ DB clusters) Specifies the storage type to be associated with the DB cluster. For Aurora DB clusters, storage_type modifications can be done in-place. For Multi-AZ DB Clusters, the iops argument must also be set. Valid values are: "", aurora-iopt1 (Aurora DB Clusters); io1, io2 (Multi-AZ DB Clusters). Default: "" (Aurora DB Clusters); io1 (Multi-AZ DB Clusters). + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDRefs []v1.Reference `json:"vpcSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDSelector *v1.Selector `json:"vpcSecurityGroupIdSelector,omitempty" tf:"-"` + + // List of VPC security groups to associate with the Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=VPCSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=VPCSecurityGroupIDSelector + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` +} + +type ClusterMasterUserSecretInitParameters struct { +} + +type ClusterMasterUserSecretObservation struct { + + // Amazon Web Services KMS key identifier that is used to encrypt the secret. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Amazon Resource Name (ARN) of the secret. + SecretArn *string `json:"secretArn,omitempty" tf:"secret_arn,omitempty"` + + // Status of the secret. Valid Values: creating | active | rotating | impaired. + SecretStatus *string `json:"secretStatus,omitempty" tf:"secret_status,omitempty"` +} + +type ClusterMasterUserSecretParameters struct { +} + +type ClusterObservation struct { + + // The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster. + AllocatedStorage *float64 `json:"allocatedStorage,omitempty" tf:"allocated_storage,omitempty"` + + // Enable to allow major engine version upgrades when changing engine versions. Defaults to false. + AllowMajorVersionUpgrade *bool `json:"allowMajorVersionUpgrade,omitempty" tf:"allow_major_version_upgrade,omitempty"` + + // Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is false. See Amazon RDS Documentation for more information. + ApplyImmediately *bool `json:"applyImmediately,omitempty" tf:"apply_immediately,omitempty"` + + // Amazon Resource Name (ARN) of cluster + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // List of EC2 Availability Zones for the DB cluster storage where DB cluster instances can be created. + // We recommend specifying 3 AZs or using the if necessary. + // A maximum of 3 AZs can be configured. + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // Target backtrack window, in seconds. Only available for aurora and aurora-mysql engines currently. To disable backtracking, set this value to 0. Defaults to 0. Must be between 0 and 259200 (72 hours) + BacktrackWindow *float64 `json:"backtrackWindow,omitempty" tf:"backtrack_window,omitempty"` + + // Days to retain backups for. Default 1 + BackupRetentionPeriod *float64 `json:"backupRetentionPeriod,omitempty" tf:"backup_retention_period,omitempty"` + + // – List of RDS Instances that are a part of this cluster + // +listType=set + ClusterMembers []*string `json:"clusterMembers,omitempty" tf:"cluster_members,omitempty"` + + // RDS Cluster Resource ID + ClusterResourceID *string `json:"clusterResourceId,omitempty" tf:"cluster_resource_id,omitempty"` + + // – Copy all Cluster tags to snapshots. Default is false. + CopyTagsToSnapshot *bool `json:"copyTagsToSnapshot,omitempty" tf:"copy_tags_to_snapshot,omitempty"` + + // The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6g.xlarge. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes and availability for your engine, see DB instance class in the Amazon RDS User Guide. + DBClusterInstanceClass *string `json:"dbClusterInstanceClass,omitempty" tf:"db_cluster_instance_class,omitempty"` + + // A cluster parameter group to associate with the cluster. + DBClusterParameterGroupName *string `json:"dbClusterParameterGroupName,omitempty" tf:"db_cluster_parameter_group_name,omitempty"` + + // Instance parameter group to associate with all instances of the DB cluster. The db_instance_parameter_group_name parameter is only valid in combination with the allow_major_version_upgrade parameter. + DBInstanceParameterGroupName *string `json:"dbInstanceParameterGroupName,omitempty" tf:"db_instance_parameter_group_name,omitempty"` + + // DB subnet group to associate with this DB cluster. + // NOTE: This must match the db_subnet_group_name specified on every aws_rds_cluster_instance in the cluster. + DBSubnetGroupName *string `json:"dbSubnetGroupName,omitempty" tf:"db_subnet_group_name,omitempty"` + + // For use with RDS Custom. + DBSystemID *string `json:"dbSystemId,omitempty" tf:"db_system_id,omitempty"` + + // Name for an automatically created database on cluster creation. There are different naming restrictions per database engine: RDS Naming Constraints + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Specifies whether to remove automated backups immediately after the DB cluster is deleted. Default is true. + DeleteAutomatedBackups *bool `json:"deleteAutomatedBackups,omitempty" tf:"delete_automated_backups,omitempty"` + + // If the DB cluster should have deletion protection enabled. + // The database can't be deleted when this value is set to true. + // The default is false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // The ID of the Directory Service Active Directory domain to create the cluster in. + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // The name of the IAM role to be used when making API calls to the Directory Service. + DomainIAMRoleName *string `json:"domainIamRoleName,omitempty" tf:"domain_iam_role_name,omitempty"` + + // Whether cluster should forward writes to an associated global cluster. Applied to secondary clusters to enable them to forward writes to an aws_rds_global_cluster's primary cluster. See the User Guide for Aurora for more information. + EnableGlobalWriteForwarding *bool `json:"enableGlobalWriteForwarding,omitempty" tf:"enable_global_write_forwarding,omitempty"` + + // Enable HTTP endpoint (data API). Only valid for some combinations of engine_mode, engine and engine_version and only available in some regions. See the Region and version availability section of the documentation. This option also does not work with any of these options specified: snapshot_identifier, replication_source_identifier, s3_import. + EnableHTTPEndpoint *bool `json:"enableHttpEndpoint,omitempty" tf:"enable_http_endpoint,omitempty"` + + // Whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.. See the User Guide for Aurora for more information. NOTE: Local write forwarding requires Aurora MySQL version 3.04 or higher. + EnableLocalWriteForwarding *bool `json:"enableLocalWriteForwarding,omitempty" tf:"enable_local_write_forwarding,omitempty"` + + // Set of log types to export to cloudwatch. If omitted, no logs will be exported. The following log types are supported: audit, error, general, slowquery, postgresql (PostgreSQL). + // +listType=set + EnabledCloudwatchLogsExports []*string `json:"enabledCloudwatchLogsExports,omitempty" tf:"enabled_cloudwatch_logs_exports,omitempty"` + + // DNS address of the RDS instance + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // Name of the database engine to be used for this DB cluster. Valid Values: aurora-mysql, aurora-postgresql, mysql, postgres. (Note that mysql and postgres are Multi-AZ RDS clusters). + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // Database engine mode. Valid values: global (only valid for Aurora MySQL 1.21 and earlier), parallelquery, provisioned, serverless. Defaults to: provisioned. See the RDS User Guide for limitations when using serverless. + EngineMode *string `json:"engineMode,omitempty" tf:"engine_mode,omitempty"` + + // Database engine version. Updating this argument results in an outage. See the Aurora MySQL and Aurora Postgres documentation for your configured engine to determine this value, or by running aws rds describe-db-engine-versions. For example with Aurora MySQL 2, a potential value for this argument is 5.7.mysql_aurora.2.03.2. The value can contain a partial version where supported by the API. The actual engine version used is returned in the attribute engine_version_actual, , see Attribute Reference below. + EngineVersion *string `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + // Running version of the database. + EngineVersionActual *string `json:"engineVersionActual,omitempty" tf:"engine_version_actual,omitempty"` + + // Name of your final DB snapshot when this DB cluster is deleted. If omitted, no final snapshot will be made. + FinalSnapshotIdentifier *string `json:"finalSnapshotIdentifier,omitempty" tf:"final_snapshot_identifier,omitempty"` + + // Global cluster identifier specified on aws_rds_global_cluster. + GlobalClusterIdentifier *string `json:"globalClusterIdentifier,omitempty" tf:"global_cluster_identifier,omitempty"` + + // Route53 Hosted Zone ID of the endpoint + HostedZoneID *string `json:"hostedZoneId,omitempty" tf:"hosted_zone_id,omitempty"` + + // Specifies whether or not mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled. Please see AWS Documentation for availability and limitations. + IAMDatabaseAuthenticationEnabled *bool `json:"iamDatabaseAuthenticationEnabled,omitempty" tf:"iam_database_authentication_enabled,omitempty"` + + // List of ARNs for the IAM roles to associate to the RDS Cluster. + // +listType=set + IAMRoles []*string `json:"iamRoles,omitempty" tf:"iam_roles,omitempty"` + + // RDS Cluster Identifier + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster. For information about valid Iops values, see Amazon RDS Provisioned IOPS storage to improve performance in the Amazon RDS User Guide. (This setting is required to create a Multi-AZ DB cluster). Must be a multiple between .5 and 50 of the storage amount for the DB cluster. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // ARN for the KMS encryption key. When specifying kms_key_id, storage_encrypted needs to be set to true. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Set to true to allow RDS to manage the master user password in Secrets Manager. Cannot be set if master_password is provided. + ManageMasterUserPassword *bool `json:"manageMasterUserPassword,omitempty" tf:"manage_master_user_password,omitempty"` + + // Block that specifies the master user secret. Only available when manage_master_user_password is set to true. Documented below. + MasterUserSecret []ClusterMasterUserSecretObservation `json:"masterUserSecret,omitempty" tf:"master_user_secret,omitempty"` + + // Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If not specified, the default KMS key for your Amazon Web Services account is used. + MasterUserSecretKMSKeyID *string `json:"masterUserSecretKmsKeyId,omitempty" tf:"master_user_secret_kms_key_id,omitempty"` + + // Username for the master DB user. Please refer to the RDS Naming Constraints. This argument does not support in-place updates and cannot be changed during a restore from snapshot. + MasterUsername *string `json:"masterUsername,omitempty" tf:"master_username,omitempty"` + + // Network type of the cluster. Valid values: IPV4, DUAL. + NetworkType *string `json:"networkType,omitempty" tf:"network_type,omitempty"` + + // Port on which the DB accepts connections + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.Time in UTC. Default: A 30-minute window selected at random from an 8-hour block of time per regionE.g., 04:00-09:00 + PreferredBackupWindow *string `json:"preferredBackupWindow,omitempty" tf:"preferred_backup_window,omitempty"` + + // Weekly time range during which system maintenance can occur, in (UTC) e.g., wed:04:00-wed:04:30 + PreferredMaintenanceWindow *string `json:"preferredMaintenanceWindow,omitempty" tf:"preferred_maintenance_window,omitempty"` + + // Read-only endpoint for the Aurora cluster, automatically + // load-balanced across replicas + ReaderEndpoint *string `json:"readerEndpoint,omitempty" tf:"reader_endpoint,omitempty"` + + // ARN of a source DB cluster or DB instance if this DB cluster is to be created as a Read Replica. + ReplicationSourceIdentifier *string `json:"replicationSourceIdentifier,omitempty" tf:"replication_source_identifier,omitempty"` + + // Nested attribute for point in time restore. More details below. + RestoreToPointInTime *ClusterRestoreToPointInTimeObservation `json:"restoreToPointInTime,omitempty" tf:"restore_to_point_in_time,omitempty"` + + // Port on which the DB accepts connections + S3Import *ClusterS3ImportObservation `json:"s3Import,omitempty" tf:"s3_import,omitempty"` + + // Nested attribute with scaling properties. Only valid when engine_mode is set to serverless. More details below. + ScalingConfiguration *ScalingConfigurationObservation `json:"scalingConfiguration,omitempty" tf:"scaling_configuration,omitempty"` + + // Nested attribute with scaling properties for ServerlessV2. Only valid when engine_mode is set to provisioned. More details below. + Serverlessv2ScalingConfiguration *Serverlessv2ScalingConfigurationObservation `json:"serverlessv2ScalingConfiguration,omitempty" tf:"serverlessv2_scaling_configuration,omitempty"` + + // Determines whether a final DB snapshot is created before the DB cluster is deleted. If true is specified, no DB snapshot is created. If false is specified, a DB snapshot is created before the DB cluster is deleted, using the value from final_snapshot_identifier. Default is false. + SkipFinalSnapshot *bool `json:"skipFinalSnapshot,omitempty" tf:"skip_final_snapshot,omitempty"` + + // Specifies whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a DB cluster snapshot, or the ARN when specifying a DB snapshot. Conflicts with global_cluster_identifier. Clusters cannot be restored from snapshot and joined to an existing global cluster in a single operation. See the AWS documentation or the Global Cluster Restored From Snapshot example for instructions on building a global cluster starting with a snapshot. + SnapshotIdentifier *string `json:"snapshotIdentifier,omitempty" tf:"snapshot_identifier,omitempty"` + + // The source region for an encrypted replica DB cluster. + SourceRegion *string `json:"sourceRegion,omitempty" tf:"source_region,omitempty"` + + // Specifies whether the DB cluster is encrypted. The default is false for provisioned engine_mode and true for serverless engine_mode. When restoring an unencrypted snapshot_identifier, the kms_key_id argument must be provided to encrypt the restored cluster. + StorageEncrypted *bool `json:"storageEncrypted,omitempty" tf:"storage_encrypted,omitempty"` + + // (Forces new for Multi-AZ DB clusters) Specifies the storage type to be associated with the DB cluster. For Aurora DB clusters, storage_type modifications can be done in-place. For Multi-AZ DB Clusters, the iops argument must also be set. Valid values are: "", aurora-iopt1 (Aurora DB Clusters); io1, io2 (Multi-AZ DB Clusters). Default: "" (Aurora DB Clusters); io1 (Multi-AZ DB Clusters). + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // List of VPC security groups to associate with the Cluster + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` +} + +type ClusterParameters struct { + + // The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster. + // +kubebuilder:validation:Optional + AllocatedStorage *float64 `json:"allocatedStorage,omitempty" tf:"allocated_storage,omitempty"` + + // Enable to allow major engine version upgrades when changing engine versions. Defaults to false. + // +kubebuilder:validation:Optional + AllowMajorVersionUpgrade *bool `json:"allowMajorVersionUpgrade,omitempty" tf:"allow_major_version_upgrade,omitempty"` + + // Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is false. See Amazon RDS Documentation for more information. + // +kubebuilder:validation:Optional + ApplyImmediately *bool `json:"applyImmediately,omitempty" tf:"apply_immediately,omitempty"` + + // If true, the password will be auto-generated and stored in the Secret referenced by the masterPasswordSecretRef field. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Optional + AutoGeneratePassword *bool `json:"autoGeneratePassword,omitempty" tf:"-"` + + // List of EC2 Availability Zones for the DB cluster storage where DB cluster instances can be created. + // We recommend specifying 3 AZs or using the if necessary. + // A maximum of 3 AZs can be configured. + // +kubebuilder:validation:Optional + // +listType=set + AvailabilityZones []*string `json:"availabilityZones,omitempty" tf:"availability_zones,omitempty"` + + // Target backtrack window, in seconds. Only available for aurora and aurora-mysql engines currently. To disable backtracking, set this value to 0. Defaults to 0. Must be between 0 and 259200 (72 hours) + // +kubebuilder:validation:Optional + BacktrackWindow *float64 `json:"backtrackWindow,omitempty" tf:"backtrack_window,omitempty"` + + // Days to retain backups for. Default 1 + // +kubebuilder:validation:Optional + BackupRetentionPeriod *float64 `json:"backupRetentionPeriod,omitempty" tf:"backup_retention_period,omitempty"` + + // – List of RDS Instances that are a part of this cluster + // +kubebuilder:validation:Optional + // +listType=set + ClusterMembers []*string `json:"clusterMembers,omitempty" tf:"cluster_members,omitempty"` + + // – Copy all Cluster tags to snapshots. Default is false. + // +kubebuilder:validation:Optional + CopyTagsToSnapshot *bool `json:"copyTagsToSnapshot,omitempty" tf:"copy_tags_to_snapshot,omitempty"` + + // The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6g.xlarge. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes and availability for your engine, see DB instance class in the Amazon RDS User Guide. + // +kubebuilder:validation:Optional + DBClusterInstanceClass *string `json:"dbClusterInstanceClass,omitempty" tf:"db_cluster_instance_class,omitempty"` + + // A cluster parameter group to associate with the cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.ClusterParameterGroup + // +kubebuilder:validation:Optional + DBClusterParameterGroupName *string `json:"dbClusterParameterGroupName,omitempty" tf:"db_cluster_parameter_group_name,omitempty"` + + // Reference to a ClusterParameterGroup in rds to populate dbClusterParameterGroupName. + // +kubebuilder:validation:Optional + DBClusterParameterGroupNameRef *v1.Reference `json:"dbClusterParameterGroupNameRef,omitempty" tf:"-"` + + // Selector for a ClusterParameterGroup in rds to populate dbClusterParameterGroupName. + // +kubebuilder:validation:Optional + DBClusterParameterGroupNameSelector *v1.Selector `json:"dbClusterParameterGroupNameSelector,omitempty" tf:"-"` + + // Instance parameter group to associate with all instances of the DB cluster. The db_instance_parameter_group_name parameter is only valid in combination with the allow_major_version_upgrade parameter. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.ParameterGroup + // +kubebuilder:validation:Optional + DBInstanceParameterGroupName *string `json:"dbInstanceParameterGroupName,omitempty" tf:"db_instance_parameter_group_name,omitempty"` + + // Reference to a ParameterGroup in rds to populate dbInstanceParameterGroupName. + // +kubebuilder:validation:Optional + DBInstanceParameterGroupNameRef *v1.Reference `json:"dbInstanceParameterGroupNameRef,omitempty" tf:"-"` + + // Selector for a ParameterGroup in rds to populate dbInstanceParameterGroupName. + // +kubebuilder:validation:Optional + DBInstanceParameterGroupNameSelector *v1.Selector `json:"dbInstanceParameterGroupNameSelector,omitempty" tf:"-"` + + // DB subnet group to associate with this DB cluster. + // NOTE: This must match the db_subnet_group_name specified on every aws_rds_cluster_instance in the cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.SubnetGroup + // +kubebuilder:validation:Optional + DBSubnetGroupName *string `json:"dbSubnetGroupName,omitempty" tf:"db_subnet_group_name,omitempty"` + + // Reference to a SubnetGroup in rds to populate dbSubnetGroupName. + // +kubebuilder:validation:Optional + DBSubnetGroupNameRef *v1.Reference `json:"dbSubnetGroupNameRef,omitempty" tf:"-"` + + // Selector for a SubnetGroup in rds to populate dbSubnetGroupName. + // +kubebuilder:validation:Optional + DBSubnetGroupNameSelector *v1.Selector `json:"dbSubnetGroupNameSelector,omitempty" tf:"-"` + + // For use with RDS Custom. + // +kubebuilder:validation:Optional + DBSystemID *string `json:"dbSystemId,omitempty" tf:"db_system_id,omitempty"` + + // Name for an automatically created database on cluster creation. There are different naming restrictions per database engine: RDS Naming Constraints + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Specifies whether to remove automated backups immediately after the DB cluster is deleted. Default is true. + // +kubebuilder:validation:Optional + DeleteAutomatedBackups *bool `json:"deleteAutomatedBackups,omitempty" tf:"delete_automated_backups,omitempty"` + + // If the DB cluster should have deletion protection enabled. + // The database can't be deleted when this value is set to true. + // The default is false. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // The ID of the Directory Service Active Directory domain to create the cluster in. + // +kubebuilder:validation:Optional + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // The name of the IAM role to be used when making API calls to the Directory Service. + // +kubebuilder:validation:Optional + DomainIAMRoleName *string `json:"domainIamRoleName,omitempty" tf:"domain_iam_role_name,omitempty"` + + // Whether cluster should forward writes to an associated global cluster. Applied to secondary clusters to enable them to forward writes to an aws_rds_global_cluster's primary cluster. See the User Guide for Aurora for more information. + // +kubebuilder:validation:Optional + EnableGlobalWriteForwarding *bool `json:"enableGlobalWriteForwarding,omitempty" tf:"enable_global_write_forwarding,omitempty"` + + // Enable HTTP endpoint (data API). Only valid for some combinations of engine_mode, engine and engine_version and only available in some regions. See the Region and version availability section of the documentation. This option also does not work with any of these options specified: snapshot_identifier, replication_source_identifier, s3_import. + // +kubebuilder:validation:Optional + EnableHTTPEndpoint *bool `json:"enableHttpEndpoint,omitempty" tf:"enable_http_endpoint,omitempty"` + + // Whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.. See the User Guide for Aurora for more information. NOTE: Local write forwarding requires Aurora MySQL version 3.04 or higher. + // +kubebuilder:validation:Optional + EnableLocalWriteForwarding *bool `json:"enableLocalWriteForwarding,omitempty" tf:"enable_local_write_forwarding,omitempty"` + + // Set of log types to export to cloudwatch. If omitted, no logs will be exported. The following log types are supported: audit, error, general, slowquery, postgresql (PostgreSQL). + // +kubebuilder:validation:Optional + // +listType=set + EnabledCloudwatchLogsExports []*string `json:"enabledCloudwatchLogsExports,omitempty" tf:"enabled_cloudwatch_logs_exports,omitempty"` + + // Name of the database engine to be used for this DB cluster. Valid Values: aurora-mysql, aurora-postgresql, mysql, postgres. (Note that mysql and postgres are Multi-AZ RDS clusters). + // +kubebuilder:validation:Optional + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // Database engine mode. Valid values: global (only valid for Aurora MySQL 1.21 and earlier), parallelquery, provisioned, serverless. Defaults to: provisioned. See the RDS User Guide for limitations when using serverless. + // +kubebuilder:validation:Optional + EngineMode *string `json:"engineMode,omitempty" tf:"engine_mode,omitempty"` + + // Database engine version. Updating this argument results in an outage. See the Aurora MySQL and Aurora Postgres documentation for your configured engine to determine this value, or by running aws rds describe-db-engine-versions. For example with Aurora MySQL 2, a potential value for this argument is 5.7.mysql_aurora.2.03.2. The value can contain a partial version where supported by the API. The actual engine version used is returned in the attribute engine_version_actual, , see Attribute Reference below. + // +kubebuilder:validation:Optional + EngineVersion *string `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + // Name of your final DB snapshot when this DB cluster is deleted. If omitted, no final snapshot will be made. + // +kubebuilder:validation:Optional + FinalSnapshotIdentifier *string `json:"finalSnapshotIdentifier,omitempty" tf:"final_snapshot_identifier,omitempty"` + + // Global cluster identifier specified on aws_rds_global_cluster. + // +kubebuilder:validation:Optional + GlobalClusterIdentifier *string `json:"globalClusterIdentifier,omitempty" tf:"global_cluster_identifier,omitempty"` + + // Specifies whether or not mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled. Please see AWS Documentation for availability and limitations. + // +kubebuilder:validation:Optional + IAMDatabaseAuthenticationEnabled *bool `json:"iamDatabaseAuthenticationEnabled,omitempty" tf:"iam_database_authentication_enabled,omitempty"` + + // Amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster. For information about valid Iops values, see Amazon RDS Provisioned IOPS storage to improve performance in the Amazon RDS User Guide. (This setting is required to create a Multi-AZ DB cluster). Must be a multiple between .5 and 50 of the storage amount for the DB cluster. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // ARN for the KMS encryption key. When specifying kms_key_id, storage_encrypted needs to be set to true. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Set to true to allow RDS to manage the master user password in Secrets Manager. Cannot be set if master_password is provided. + // +kubebuilder:validation:Optional + ManageMasterUserPassword *bool `json:"manageMasterUserPassword,omitempty" tf:"manage_master_user_password,omitempty"` + + // Password for the master DB user. Note that this may show up in logs, and it will be stored in the state file. Please refer to the RDS Naming Constraints. Cannot be set if manage_master_user_password is set to true. + // Password for the master DB user. If you set autoGeneratePassword to true, the Secret referenced here will be created or updated with generated password if it does not already contain one. + // +kubebuilder:validation:Optional + MasterPasswordSecretRef *v1.SecretKeySelector `json:"masterPasswordSecretRef,omitempty" tf:"-"` + + // Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If not specified, the default KMS key for your Amazon Web Services account is used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("key_id",true) + // +kubebuilder:validation:Optional + MasterUserSecretKMSKeyID *string `json:"masterUserSecretKmsKeyId,omitempty" tf:"master_user_secret_kms_key_id,omitempty"` + + // Reference to a Key in kms to populate masterUserSecretKmsKeyId. + // +kubebuilder:validation:Optional + MasterUserSecretKMSKeyIDRef *v1.Reference `json:"masterUserSecretKmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate masterUserSecretKmsKeyId. + // +kubebuilder:validation:Optional + MasterUserSecretKMSKeyIDSelector *v1.Selector `json:"masterUserSecretKmsKeyIdSelector,omitempty" tf:"-"` + + // Username for the master DB user. Please refer to the RDS Naming Constraints. This argument does not support in-place updates and cannot be changed during a restore from snapshot. + // +kubebuilder:validation:Optional + MasterUsername *string `json:"masterUsername,omitempty" tf:"master_username,omitempty"` + + // Network type of the cluster. Valid values: IPV4, DUAL. + // +kubebuilder:validation:Optional + NetworkType *string `json:"networkType,omitempty" tf:"network_type,omitempty"` + + // Port on which the DB accepts connections + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.Time in UTC. Default: A 30-minute window selected at random from an 8-hour block of time per regionE.g., 04:00-09:00 + // +kubebuilder:validation:Optional + PreferredBackupWindow *string `json:"preferredBackupWindow,omitempty" tf:"preferred_backup_window,omitempty"` + + // Weekly time range during which system maintenance can occur, in (UTC) e.g., wed:04:00-wed:04:30 + // +kubebuilder:validation:Optional + PreferredMaintenanceWindow *string `json:"preferredMaintenanceWindow,omitempty" tf:"preferred_maintenance_window,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // ARN of a source DB cluster or DB instance if this DB cluster is to be created as a Read Replica. + // +kubebuilder:validation:Optional + ReplicationSourceIdentifier *string `json:"replicationSourceIdentifier,omitempty" tf:"replication_source_identifier,omitempty"` + + // Nested attribute for point in time restore. More details below. + // +kubebuilder:validation:Optional + RestoreToPointInTime *ClusterRestoreToPointInTimeParameters `json:"restoreToPointInTime,omitempty" tf:"restore_to_point_in_time,omitempty"` + + // Port on which the DB accepts connections + // +kubebuilder:validation:Optional + S3Import *ClusterS3ImportParameters `json:"s3Import,omitempty" tf:"s3_import,omitempty"` + + // Nested attribute with scaling properties. Only valid when engine_mode is set to serverless. More details below. + // +kubebuilder:validation:Optional + ScalingConfiguration *ScalingConfigurationParameters `json:"scalingConfiguration,omitempty" tf:"scaling_configuration,omitempty"` + + // Nested attribute with scaling properties for ServerlessV2. Only valid when engine_mode is set to provisioned. More details below. + // +kubebuilder:validation:Optional + Serverlessv2ScalingConfiguration *Serverlessv2ScalingConfigurationParameters `json:"serverlessv2ScalingConfiguration,omitempty" tf:"serverlessv2_scaling_configuration,omitempty"` + + // Determines whether a final DB snapshot is created before the DB cluster is deleted. If true is specified, no DB snapshot is created. If false is specified, a DB snapshot is created before the DB cluster is deleted, using the value from final_snapshot_identifier. Default is false. + // +kubebuilder:validation:Optional + SkipFinalSnapshot *bool `json:"skipFinalSnapshot,omitempty" tf:"skip_final_snapshot,omitempty"` + + // Specifies whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a DB cluster snapshot, or the ARN when specifying a DB snapshot. Conflicts with global_cluster_identifier. Clusters cannot be restored from snapshot and joined to an existing global cluster in a single operation. See the AWS documentation or the Global Cluster Restored From Snapshot example for instructions on building a global cluster starting with a snapshot. + // +kubebuilder:validation:Optional + SnapshotIdentifier *string `json:"snapshotIdentifier,omitempty" tf:"snapshot_identifier,omitempty"` + + // The source region for an encrypted replica DB cluster. + // +kubebuilder:validation:Optional + SourceRegion *string `json:"sourceRegion,omitempty" tf:"source_region,omitempty"` + + // Specifies whether the DB cluster is encrypted. The default is false for provisioned engine_mode and true for serverless engine_mode. When restoring an unencrypted snapshot_identifier, the kms_key_id argument must be provided to encrypt the restored cluster. + // +kubebuilder:validation:Optional + StorageEncrypted *bool `json:"storageEncrypted,omitempty" tf:"storage_encrypted,omitempty"` + + // (Forces new for Multi-AZ DB clusters) Specifies the storage type to be associated with the DB cluster. For Aurora DB clusters, storage_type modifications can be done in-place. For Multi-AZ DB Clusters, the iops argument must also be set. Valid values are: "", aurora-iopt1 (Aurora DB Clusters); io1, io2 (Multi-AZ DB Clusters). Default: "" (Aurora DB Clusters); io1 (Multi-AZ DB Clusters). + // +kubebuilder:validation:Optional + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDRefs []v1.Reference `json:"vpcSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDSelector *v1.Selector `json:"vpcSecurityGroupIdSelector,omitempty" tf:"-"` + + // List of VPC security groups to associate with the Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=VPCSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=VPCSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` +} + +type ClusterRestoreToPointInTimeInitParameters struct { + + // Date and time in UTC format to restore the database cluster to. Conflicts with use_latest_restorable_time. + RestoreToTime *string `json:"restoreToTime,omitempty" tf:"restore_to_time,omitempty"` + + // Type of restore to be performed. + // Valid options are full-copy (default) and copy-on-write. + RestoreType *string `json:"restoreType,omitempty" tf:"restore_type,omitempty"` + + // Identifier of the source database cluster from which to restore. When restoring from a cluster in another AWS account, the identifier is the ARN of that cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Cluster + SourceClusterIdentifier *string `json:"sourceClusterIdentifier,omitempty" tf:"source_cluster_identifier,omitempty"` + + // Reference to a Cluster in rds to populate sourceClusterIdentifier. + // +kubebuilder:validation:Optional + SourceClusterIdentifierRef *v1.Reference `json:"sourceClusterIdentifierRef,omitempty" tf:"-"` + + // Selector for a Cluster in rds to populate sourceClusterIdentifier. + // +kubebuilder:validation:Optional + SourceClusterIdentifierSelector *v1.Selector `json:"sourceClusterIdentifierSelector,omitempty" tf:"-"` + + // Set to true to restore the database cluster to the latest restorable backup time. Defaults to false. Conflicts with restore_to_time. + UseLatestRestorableTime *bool `json:"useLatestRestorableTime,omitempty" tf:"use_latest_restorable_time,omitempty"` +} + +type ClusterRestoreToPointInTimeObservation struct { + + // Date and time in UTC format to restore the database cluster to. Conflicts with use_latest_restorable_time. + RestoreToTime *string `json:"restoreToTime,omitempty" tf:"restore_to_time,omitempty"` + + // Type of restore to be performed. + // Valid options are full-copy (default) and copy-on-write. + RestoreType *string `json:"restoreType,omitempty" tf:"restore_type,omitempty"` + + // Identifier of the source database cluster from which to restore. When restoring from a cluster in another AWS account, the identifier is the ARN of that cluster. + SourceClusterIdentifier *string `json:"sourceClusterIdentifier,omitempty" tf:"source_cluster_identifier,omitempty"` + + // Set to true to restore the database cluster to the latest restorable backup time. Defaults to false. Conflicts with restore_to_time. + UseLatestRestorableTime *bool `json:"useLatestRestorableTime,omitempty" tf:"use_latest_restorable_time,omitempty"` +} + +type ClusterRestoreToPointInTimeParameters struct { + + // Date and time in UTC format to restore the database cluster to. Conflicts with use_latest_restorable_time. + // +kubebuilder:validation:Optional + RestoreToTime *string `json:"restoreToTime,omitempty" tf:"restore_to_time,omitempty"` + + // Type of restore to be performed. + // Valid options are full-copy (default) and copy-on-write. + // +kubebuilder:validation:Optional + RestoreType *string `json:"restoreType,omitempty" tf:"restore_type,omitempty"` + + // Identifier of the source database cluster from which to restore. When restoring from a cluster in another AWS account, the identifier is the ARN of that cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta2.Cluster + // +kubebuilder:validation:Optional + SourceClusterIdentifier *string `json:"sourceClusterIdentifier,omitempty" tf:"source_cluster_identifier,omitempty"` + + // Reference to a Cluster in rds to populate sourceClusterIdentifier. + // +kubebuilder:validation:Optional + SourceClusterIdentifierRef *v1.Reference `json:"sourceClusterIdentifierRef,omitempty" tf:"-"` + + // Selector for a Cluster in rds to populate sourceClusterIdentifier. + // +kubebuilder:validation:Optional + SourceClusterIdentifierSelector *v1.Selector `json:"sourceClusterIdentifierSelector,omitempty" tf:"-"` + + // Set to true to restore the database cluster to the latest restorable backup time. Defaults to false. Conflicts with restore_to_time. + // +kubebuilder:validation:Optional + UseLatestRestorableTime *bool `json:"useLatestRestorableTime,omitempty" tf:"use_latest_restorable_time,omitempty"` +} + +type ClusterS3ImportInitParameters struct { + + // Bucket name where your backup is stored + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Reference to a Bucket in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameRef *v1.Reference `json:"bucketNameRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameSelector *v1.Selector `json:"bucketNameSelector,omitempty" tf:"-"` + + // Can be blank, but is the path to your backup + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Role applied to load the data. + IngestionRole *string `json:"ingestionRole,omitempty" tf:"ingestion_role,omitempty"` + + // Source engine for the backup + SourceEngine *string `json:"sourceEngine,omitempty" tf:"source_engine,omitempty"` + + // Version of the source engine used to make the backup + SourceEngineVersion *string `json:"sourceEngineVersion,omitempty" tf:"source_engine_version,omitempty"` +} + +type ClusterS3ImportObservation struct { + + // Bucket name where your backup is stored + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Can be blank, but is the path to your backup + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Role applied to load the data. + IngestionRole *string `json:"ingestionRole,omitempty" tf:"ingestion_role,omitempty"` + + // Source engine for the backup + SourceEngine *string `json:"sourceEngine,omitempty" tf:"source_engine,omitempty"` + + // Version of the source engine used to make the backup + SourceEngineVersion *string `json:"sourceEngineVersion,omitempty" tf:"source_engine_version,omitempty"` +} + +type ClusterS3ImportParameters struct { + + // Bucket name where your backup is stored + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Reference to a Bucket in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameRef *v1.Reference `json:"bucketNameRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameSelector *v1.Selector `json:"bucketNameSelector,omitempty" tf:"-"` + + // Can be blank, but is the path to your backup + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Role applied to load the data. + // +kubebuilder:validation:Optional + IngestionRole *string `json:"ingestionRole" tf:"ingestion_role,omitempty"` + + // Source engine for the backup + // +kubebuilder:validation:Optional + SourceEngine *string `json:"sourceEngine" tf:"source_engine,omitempty"` + + // Version of the source engine used to make the backup + // +kubebuilder:validation:Optional + SourceEngineVersion *string `json:"sourceEngineVersion" tf:"source_engine_version,omitempty"` +} + +type ScalingConfigurationInitParameters struct { + + // Whether to enable automatic pause. A DB cluster can be paused only when it's idle (it has no connections). If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot. In this case, the DB cluster is restored when there is a request to connect to it. Defaults to true. + AutoPause *bool `json:"autoPause,omitempty" tf:"auto_pause,omitempty"` + + // Maximum capacity for an Aurora DB cluster in serverless DB engine mode. The maximum capacity must be greater than or equal to the minimum capacity. Valid Aurora MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, 32, 64, 192, and 384). Defaults to 16. + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // Minimum capacity for an Aurora DB cluster in serverless DB engine mode. The minimum capacity must be lesser than or equal to the maximum capacity. Valid Aurora MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, 32, 64, 192, and 384). Defaults to 1. + MinCapacity *float64 `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` + + // Time, in seconds, before an Aurora DB cluster in serverless mode is paused. Valid values are 300 through 86400. Defaults to 300. + SecondsUntilAutoPause *float64 `json:"secondsUntilAutoPause,omitempty" tf:"seconds_until_auto_pause,omitempty"` + + // Action to take when the timeout is reached. Valid values: ForceApplyCapacityChange, RollbackCapacityChange. Defaults to RollbackCapacityChange. See documentation. + TimeoutAction *string `json:"timeoutAction,omitempty" tf:"timeout_action,omitempty"` +} + +type ScalingConfigurationObservation struct { + + // Whether to enable automatic pause. A DB cluster can be paused only when it's idle (it has no connections). If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot. In this case, the DB cluster is restored when there is a request to connect to it. Defaults to true. + AutoPause *bool `json:"autoPause,omitempty" tf:"auto_pause,omitempty"` + + // Maximum capacity for an Aurora DB cluster in serverless DB engine mode. The maximum capacity must be greater than or equal to the minimum capacity. Valid Aurora MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, 32, 64, 192, and 384). Defaults to 16. + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // Minimum capacity for an Aurora DB cluster in serverless DB engine mode. The minimum capacity must be lesser than or equal to the maximum capacity. Valid Aurora MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, 32, 64, 192, and 384). Defaults to 1. + MinCapacity *float64 `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` + + // Time, in seconds, before an Aurora DB cluster in serverless mode is paused. Valid values are 300 through 86400. Defaults to 300. + SecondsUntilAutoPause *float64 `json:"secondsUntilAutoPause,omitempty" tf:"seconds_until_auto_pause,omitempty"` + + // Action to take when the timeout is reached. Valid values: ForceApplyCapacityChange, RollbackCapacityChange. Defaults to RollbackCapacityChange. See documentation. + TimeoutAction *string `json:"timeoutAction,omitempty" tf:"timeout_action,omitempty"` +} + +type ScalingConfigurationParameters struct { + + // Whether to enable automatic pause. A DB cluster can be paused only when it's idle (it has no connections). If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot. In this case, the DB cluster is restored when there is a request to connect to it. Defaults to true. + // +kubebuilder:validation:Optional + AutoPause *bool `json:"autoPause,omitempty" tf:"auto_pause,omitempty"` + + // Maximum capacity for an Aurora DB cluster in serverless DB engine mode. The maximum capacity must be greater than or equal to the minimum capacity. Valid Aurora MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, 32, 64, 192, and 384). Defaults to 16. + // +kubebuilder:validation:Optional + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // Minimum capacity for an Aurora DB cluster in serverless DB engine mode. The minimum capacity must be lesser than or equal to the maximum capacity. Valid Aurora MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, 32, 64, 192, and 384). Defaults to 1. + // +kubebuilder:validation:Optional + MinCapacity *float64 `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` + + // Time, in seconds, before an Aurora DB cluster in serverless mode is paused. Valid values are 300 through 86400. Defaults to 300. + // +kubebuilder:validation:Optional + SecondsUntilAutoPause *float64 `json:"secondsUntilAutoPause,omitempty" tf:"seconds_until_auto_pause,omitempty"` + + // Action to take when the timeout is reached. Valid values: ForceApplyCapacityChange, RollbackCapacityChange. Defaults to RollbackCapacityChange. See documentation. + // +kubebuilder:validation:Optional + TimeoutAction *string `json:"timeoutAction,omitempty" tf:"timeout_action,omitempty"` +} + +type Serverlessv2ScalingConfigurationInitParameters struct { + + // Maximum capacity for an Aurora DB cluster in serverless DB engine mode. The maximum capacity must be greater than or equal to the minimum capacity. Valid Aurora MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, 32, 64, 192, and 384). Defaults to 16. + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // Minimum capacity for an Aurora DB cluster in serverless DB engine mode. The minimum capacity must be lesser than or equal to the maximum capacity. Valid Aurora MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, 32, 64, 192, and 384). Defaults to 1. + MinCapacity *float64 `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` +} + +type Serverlessv2ScalingConfigurationObservation struct { + + // Maximum capacity for an Aurora DB cluster in serverless DB engine mode. The maximum capacity must be greater than or equal to the minimum capacity. Valid Aurora MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, 32, 64, 192, and 384). Defaults to 16. + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // Minimum capacity for an Aurora DB cluster in serverless DB engine mode. The minimum capacity must be lesser than or equal to the maximum capacity. Valid Aurora MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, 32, 64, 192, and 384). Defaults to 1. + MinCapacity *float64 `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` +} + +type Serverlessv2ScalingConfigurationParameters struct { + + // Maximum capacity for an Aurora DB cluster in serverless DB engine mode. The maximum capacity must be greater than or equal to the minimum capacity. Valid Aurora MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, 32, 64, 192, and 384). Defaults to 16. + // +kubebuilder:validation:Optional + MaxCapacity *float64 `json:"maxCapacity" tf:"max_capacity,omitempty"` + + // Minimum capacity for an Aurora DB cluster in serverless DB engine mode. The minimum capacity must be lesser than or equal to the maximum capacity. Valid Aurora MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, 32, 64, 192, and 384). Defaults to 1. + // +kubebuilder:validation:Optional + MinCapacity *float64 `json:"minCapacity" tf:"min_capacity,omitempty"` +} + +// ClusterSpec defines the desired state of Cluster +type ClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ClusterInitParameters `json:"initProvider,omitempty"` +} + +// ClusterStatus defines the observed state of Cluster. +type ClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Cluster is the Schema for the Clusters API. Manages an RDS Aurora Cluster or a RDS Multi-AZ DB Cluster +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.engine) || (has(self.initProvider) && has(self.initProvider.engine))",message="spec.forProvider.engine is a required parameter" + Spec ClusterSpec `json:"spec"` + Status ClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterList contains a list of Clusters +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +// Repository type metadata. +var ( + Cluster_Kind = "Cluster" + Cluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Cluster_Kind}.String() + Cluster_KindAPIVersion = Cluster_Kind + "." + CRDGroupVersion.String() + Cluster_GroupVersionKind = CRDGroupVersion.WithKind(Cluster_Kind) +) + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/apis/rds/v1beta2/zz_generated.conversion_hubs.go b/apis/rds/v1beta2/zz_generated.conversion_hubs.go index d7a1e70651..5283e599a0 100755 --- a/apis/rds/v1beta2/zz_generated.conversion_hubs.go +++ b/apis/rds/v1beta2/zz_generated.conversion_hubs.go @@ -7,4 +7,7 @@ package v1beta2 // Hub marks this type as a conversion hub. -func (tr *Instance) Hub() {} +func (tr *Cluster) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ProxyDefaultTargetGroup) Hub() {} diff --git a/apis/rds/v1beta2/zz_generated.conversion_spokes.go b/apis/rds/v1beta2/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..b5fd338771 --- /dev/null +++ b/apis/rds/v1beta2/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Instance to the hub type. +func (tr *Instance) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Instance type. +func (tr *Instance) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/rds/v1beta2/zz_generated.deepcopy.go b/apis/rds/v1beta2/zz_generated.deepcopy.go index 1ae4141296..acac81217c 100644 --- a/apis/rds/v1beta2/zz_generated.deepcopy.go +++ b/apis/rds/v1beta2/zz_generated.deepcopy.go @@ -73,6 +73,1661 @@ func (in *BlueGreenUpdateParameters) DeepCopy() *BlueGreenUpdateParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { + *out = *in + if in.AllocatedStorage != nil { + in, out := &in.AllocatedStorage, &out.AllocatedStorage + *out = new(float64) + **out = **in + } + if in.AllowMajorVersionUpgrade != nil { + in, out := &in.AllowMajorVersionUpgrade, &out.AllowMajorVersionUpgrade + *out = new(bool) + **out = **in + } + if in.ApplyImmediately != nil { + in, out := &in.ApplyImmediately, &out.ApplyImmediately + *out = new(bool) + **out = **in + } + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BacktrackWindow != nil { + in, out := &in.BacktrackWindow, &out.BacktrackWindow + *out = new(float64) + **out = **in + } + if in.BackupRetentionPeriod != nil { + in, out := &in.BackupRetentionPeriod, &out.BackupRetentionPeriod + *out = new(float64) + **out = **in + } + if in.ClusterMembers != nil { + in, out := &in.ClusterMembers, &out.ClusterMembers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CopyTagsToSnapshot != nil { + in, out := &in.CopyTagsToSnapshot, &out.CopyTagsToSnapshot + *out = new(bool) + **out = **in + } + if in.DBClusterInstanceClass != nil { + in, out := &in.DBClusterInstanceClass, &out.DBClusterInstanceClass + *out = new(string) + **out = **in + } + if in.DBClusterParameterGroupName != nil { + in, out := &in.DBClusterParameterGroupName, &out.DBClusterParameterGroupName + *out = new(string) + **out = **in + } + if in.DBClusterParameterGroupNameRef != nil { + in, out := &in.DBClusterParameterGroupNameRef, &out.DBClusterParameterGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DBClusterParameterGroupNameSelector != nil { + in, out := &in.DBClusterParameterGroupNameSelector, &out.DBClusterParameterGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DBInstanceParameterGroupName != nil { + in, out := &in.DBInstanceParameterGroupName, &out.DBInstanceParameterGroupName + *out = new(string) + **out = **in + } + if in.DBInstanceParameterGroupNameRef != nil { + in, out := &in.DBInstanceParameterGroupNameRef, &out.DBInstanceParameterGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DBInstanceParameterGroupNameSelector != nil { + in, out := &in.DBInstanceParameterGroupNameSelector, &out.DBInstanceParameterGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DBSubnetGroupName != nil { + in, out := &in.DBSubnetGroupName, &out.DBSubnetGroupName + *out = new(string) + **out = **in + } + if in.DBSubnetGroupNameRef != nil { + in, out := &in.DBSubnetGroupNameRef, &out.DBSubnetGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DBSubnetGroupNameSelector != nil { + in, out := &in.DBSubnetGroupNameSelector, &out.DBSubnetGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DBSystemID != nil { + in, out := &in.DBSystemID, &out.DBSystemID + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DeleteAutomatedBackups != nil { + in, out := &in.DeleteAutomatedBackups, &out.DeleteAutomatedBackups + *out = new(bool) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.DomainIAMRoleName != nil { + in, out := &in.DomainIAMRoleName, &out.DomainIAMRoleName + *out = new(string) + **out = **in + } + if in.EnableGlobalWriteForwarding != nil { + in, out := &in.EnableGlobalWriteForwarding, &out.EnableGlobalWriteForwarding + *out = new(bool) + **out = **in + } + if in.EnableHTTPEndpoint != nil { + in, out := &in.EnableHTTPEndpoint, &out.EnableHTTPEndpoint + *out = new(bool) + **out = **in + } + if in.EnableLocalWriteForwarding != nil { + in, out := &in.EnableLocalWriteForwarding, &out.EnableLocalWriteForwarding + *out = new(bool) + **out = **in + } + if in.EnabledCloudwatchLogsExports != nil { + in, out := &in.EnabledCloudwatchLogsExports, &out.EnabledCloudwatchLogsExports + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.EngineMode != nil { + in, out := &in.EngineMode, &out.EngineMode + *out = new(string) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.FinalSnapshotIdentifier != nil { + in, out := &in.FinalSnapshotIdentifier, &out.FinalSnapshotIdentifier + *out = new(string) + **out = **in + } + if in.GlobalClusterIdentifier != nil { + in, out := &in.GlobalClusterIdentifier, &out.GlobalClusterIdentifier + *out = new(string) + **out = **in + } + if in.IAMDatabaseAuthenticationEnabled != nil { + in, out := &in.IAMDatabaseAuthenticationEnabled, &out.IAMDatabaseAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ManageMasterUserPassword != nil { + in, out := &in.ManageMasterUserPassword, &out.ManageMasterUserPassword + *out = new(bool) + **out = **in + } + if in.MasterPasswordSecretRef != nil { + in, out := &in.MasterPasswordSecretRef, &out.MasterPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.MasterUserSecretKMSKeyID != nil { + in, out := &in.MasterUserSecretKMSKeyID, &out.MasterUserSecretKMSKeyID + *out = new(string) + **out = **in + } + if in.MasterUserSecretKMSKeyIDRef != nil { + in, out := &in.MasterUserSecretKMSKeyIDRef, &out.MasterUserSecretKMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MasterUserSecretKMSKeyIDSelector != nil { + in, out := &in.MasterUserSecretKMSKeyIDSelector, &out.MasterUserSecretKMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MasterUsername != nil { + in, out := &in.MasterUsername, &out.MasterUsername + *out = new(string) + **out = **in + } + if in.NetworkType != nil { + in, out := &in.NetworkType, &out.NetworkType + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PreferredBackupWindow != nil { + in, out := &in.PreferredBackupWindow, &out.PreferredBackupWindow + *out = new(string) + **out = **in + } + if in.PreferredMaintenanceWindow != nil { + in, out := &in.PreferredMaintenanceWindow, &out.PreferredMaintenanceWindow + *out = new(string) + **out = **in + } + if in.ReplicationSourceIdentifier != nil { + in, out := &in.ReplicationSourceIdentifier, &out.ReplicationSourceIdentifier + *out = new(string) + **out = **in + } + if in.RestoreToPointInTime != nil { + in, out := &in.RestoreToPointInTime, &out.RestoreToPointInTime + *out = new(ClusterRestoreToPointInTimeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3Import != nil { + in, out := &in.S3Import, &out.S3Import + *out = new(ClusterS3ImportInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ScalingConfiguration != nil { + in, out := &in.ScalingConfiguration, &out.ScalingConfiguration + *out = new(ScalingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Serverlessv2ScalingConfiguration != nil { + in, out := &in.Serverlessv2ScalingConfiguration, &out.Serverlessv2ScalingConfiguration + *out = new(Serverlessv2ScalingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SkipFinalSnapshot != nil { + in, out := &in.SkipFinalSnapshot, &out.SkipFinalSnapshot + *out = new(bool) + **out = **in + } + if in.SnapshotIdentifier != nil { + in, out := &in.SnapshotIdentifier, &out.SnapshotIdentifier + *out = new(string) + **out = **in + } + if in.SourceRegion != nil { + in, out := &in.SourceRegion, &out.SourceRegion + *out = new(string) + **out = **in + } + if in.StorageEncrypted != nil { + in, out := &in.StorageEncrypted, &out.StorageEncrypted + *out = new(bool) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCSecurityGroupIDRefs != nil { + in, out := &in.VPCSecurityGroupIDRefs, &out.VPCSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCSecurityGroupIDSelector != nil { + in, out := &in.VPCSecurityGroupIDSelector, &out.VPCSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInitParameters. +func (in *ClusterInitParameters) DeepCopy() *ClusterInitParameters { + if in == nil { + return nil + } + out := new(ClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMasterUserSecretInitParameters) DeepCopyInto(out *ClusterMasterUserSecretInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMasterUserSecretInitParameters. +func (in *ClusterMasterUserSecretInitParameters) DeepCopy() *ClusterMasterUserSecretInitParameters { + if in == nil { + return nil + } + out := new(ClusterMasterUserSecretInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMasterUserSecretObservation) DeepCopyInto(out *ClusterMasterUserSecretObservation) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.SecretArn != nil { + in, out := &in.SecretArn, &out.SecretArn + *out = new(string) + **out = **in + } + if in.SecretStatus != nil { + in, out := &in.SecretStatus, &out.SecretStatus + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMasterUserSecretObservation. +func (in *ClusterMasterUserSecretObservation) DeepCopy() *ClusterMasterUserSecretObservation { + if in == nil { + return nil + } + out := new(ClusterMasterUserSecretObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMasterUserSecretParameters) DeepCopyInto(out *ClusterMasterUserSecretParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMasterUserSecretParameters. +func (in *ClusterMasterUserSecretParameters) DeepCopy() *ClusterMasterUserSecretParameters { + if in == nil { + return nil + } + out := new(ClusterMasterUserSecretParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { + *out = *in + if in.AllocatedStorage != nil { + in, out := &in.AllocatedStorage, &out.AllocatedStorage + *out = new(float64) + **out = **in + } + if in.AllowMajorVersionUpgrade != nil { + in, out := &in.AllowMajorVersionUpgrade, &out.AllowMajorVersionUpgrade + *out = new(bool) + **out = **in + } + if in.ApplyImmediately != nil { + in, out := &in.ApplyImmediately, &out.ApplyImmediately + *out = new(bool) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BacktrackWindow != nil { + in, out := &in.BacktrackWindow, &out.BacktrackWindow + *out = new(float64) + **out = **in + } + if in.BackupRetentionPeriod != nil { + in, out := &in.BackupRetentionPeriod, &out.BackupRetentionPeriod + *out = new(float64) + **out = **in + } + if in.ClusterMembers != nil { + in, out := &in.ClusterMembers, &out.ClusterMembers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClusterResourceID != nil { + in, out := &in.ClusterResourceID, &out.ClusterResourceID + *out = new(string) + **out = **in + } + if in.CopyTagsToSnapshot != nil { + in, out := &in.CopyTagsToSnapshot, &out.CopyTagsToSnapshot + *out = new(bool) + **out = **in + } + if in.DBClusterInstanceClass != nil { + in, out := &in.DBClusterInstanceClass, &out.DBClusterInstanceClass + *out = new(string) + **out = **in + } + if in.DBClusterParameterGroupName != nil { + in, out := &in.DBClusterParameterGroupName, &out.DBClusterParameterGroupName + *out = new(string) + **out = **in + } + if in.DBInstanceParameterGroupName != nil { + in, out := &in.DBInstanceParameterGroupName, &out.DBInstanceParameterGroupName + *out = new(string) + **out = **in + } + if in.DBSubnetGroupName != nil { + in, out := &in.DBSubnetGroupName, &out.DBSubnetGroupName + *out = new(string) + **out = **in + } + if in.DBSystemID != nil { + in, out := &in.DBSystemID, &out.DBSystemID + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DeleteAutomatedBackups != nil { + in, out := &in.DeleteAutomatedBackups, &out.DeleteAutomatedBackups + *out = new(bool) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.DomainIAMRoleName != nil { + in, out := &in.DomainIAMRoleName, &out.DomainIAMRoleName + *out = new(string) + **out = **in + } + if in.EnableGlobalWriteForwarding != nil { + in, out := &in.EnableGlobalWriteForwarding, &out.EnableGlobalWriteForwarding + *out = new(bool) + **out = **in + } + if in.EnableHTTPEndpoint != nil { + in, out := &in.EnableHTTPEndpoint, &out.EnableHTTPEndpoint + *out = new(bool) + **out = **in + } + if in.EnableLocalWriteForwarding != nil { + in, out := &in.EnableLocalWriteForwarding, &out.EnableLocalWriteForwarding + *out = new(bool) + **out = **in + } + if in.EnabledCloudwatchLogsExports != nil { + in, out := &in.EnabledCloudwatchLogsExports, &out.EnabledCloudwatchLogsExports + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.EngineMode != nil { + in, out := &in.EngineMode, &out.EngineMode + *out = new(string) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.EngineVersionActual != nil { + in, out := &in.EngineVersionActual, &out.EngineVersionActual + *out = new(string) + **out = **in + } + if in.FinalSnapshotIdentifier != nil { + in, out := &in.FinalSnapshotIdentifier, &out.FinalSnapshotIdentifier + *out = new(string) + **out = **in + } + if in.GlobalClusterIdentifier != nil { + in, out := &in.GlobalClusterIdentifier, &out.GlobalClusterIdentifier + *out = new(string) + **out = **in + } + if in.HostedZoneID != nil { + in, out := &in.HostedZoneID, &out.HostedZoneID + *out = new(string) + **out = **in + } + if in.IAMDatabaseAuthenticationEnabled != nil { + in, out := &in.IAMDatabaseAuthenticationEnabled, &out.IAMDatabaseAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.IAMRoles != nil { + in, out := &in.IAMRoles, &out.IAMRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.ManageMasterUserPassword != nil { + in, out := &in.ManageMasterUserPassword, &out.ManageMasterUserPassword + *out = new(bool) + **out = **in + } + if in.MasterUserSecret != nil { + in, out := &in.MasterUserSecret, &out.MasterUserSecret + *out = make([]ClusterMasterUserSecretObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MasterUserSecretKMSKeyID != nil { + in, out := &in.MasterUserSecretKMSKeyID, &out.MasterUserSecretKMSKeyID + *out = new(string) + **out = **in + } + if in.MasterUsername != nil { + in, out := &in.MasterUsername, &out.MasterUsername + *out = new(string) + **out = **in + } + if in.NetworkType != nil { + in, out := &in.NetworkType, &out.NetworkType + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PreferredBackupWindow != nil { + in, out := &in.PreferredBackupWindow, &out.PreferredBackupWindow + *out = new(string) + **out = **in + } + if in.PreferredMaintenanceWindow != nil { + in, out := &in.PreferredMaintenanceWindow, &out.PreferredMaintenanceWindow + *out = new(string) + **out = **in + } + if in.ReaderEndpoint != nil { + in, out := &in.ReaderEndpoint, &out.ReaderEndpoint + *out = new(string) + **out = **in + } + if in.ReplicationSourceIdentifier != nil { + in, out := &in.ReplicationSourceIdentifier, &out.ReplicationSourceIdentifier + *out = new(string) + **out = **in + } + if in.RestoreToPointInTime != nil { + in, out := &in.RestoreToPointInTime, &out.RestoreToPointInTime + *out = new(ClusterRestoreToPointInTimeObservation) + (*in).DeepCopyInto(*out) + } + if in.S3Import != nil { + in, out := &in.S3Import, &out.S3Import + *out = new(ClusterS3ImportObservation) + (*in).DeepCopyInto(*out) + } + if in.ScalingConfiguration != nil { + in, out := &in.ScalingConfiguration, &out.ScalingConfiguration + *out = new(ScalingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Serverlessv2ScalingConfiguration != nil { + in, out := &in.Serverlessv2ScalingConfiguration, &out.Serverlessv2ScalingConfiguration + *out = new(Serverlessv2ScalingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.SkipFinalSnapshot != nil { + in, out := &in.SkipFinalSnapshot, &out.SkipFinalSnapshot + *out = new(bool) + **out = **in + } + if in.SnapshotIdentifier != nil { + in, out := &in.SnapshotIdentifier, &out.SnapshotIdentifier + *out = new(string) + **out = **in + } + if in.SourceRegion != nil { + in, out := &in.SourceRegion, &out.SourceRegion + *out = new(string) + **out = **in + } + if in.StorageEncrypted != nil { + in, out := &in.StorageEncrypted, &out.StorageEncrypted + *out = new(bool) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservation. +func (in *ClusterObservation) DeepCopy() *ClusterObservation { + if in == nil { + return nil + } + out := new(ClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { + *out = *in + if in.AllocatedStorage != nil { + in, out := &in.AllocatedStorage, &out.AllocatedStorage + *out = new(float64) + **out = **in + } + if in.AllowMajorVersionUpgrade != nil { + in, out := &in.AllowMajorVersionUpgrade, &out.AllowMajorVersionUpgrade + *out = new(bool) + **out = **in + } + if in.ApplyImmediately != nil { + in, out := &in.ApplyImmediately, &out.ApplyImmediately + *out = new(bool) + **out = **in + } + if in.AutoGeneratePassword != nil { + in, out := &in.AutoGeneratePassword, &out.AutoGeneratePassword + *out = new(bool) + **out = **in + } + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BacktrackWindow != nil { + in, out := &in.BacktrackWindow, &out.BacktrackWindow + *out = new(float64) + **out = **in + } + if in.BackupRetentionPeriod != nil { + in, out := &in.BackupRetentionPeriod, &out.BackupRetentionPeriod + *out = new(float64) + **out = **in + } + if in.ClusterMembers != nil { + in, out := &in.ClusterMembers, &out.ClusterMembers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CopyTagsToSnapshot != nil { + in, out := &in.CopyTagsToSnapshot, &out.CopyTagsToSnapshot + *out = new(bool) + **out = **in + } + if in.DBClusterInstanceClass != nil { + in, out := &in.DBClusterInstanceClass, &out.DBClusterInstanceClass + *out = new(string) + **out = **in + } + if in.DBClusterParameterGroupName != nil { + in, out := &in.DBClusterParameterGroupName, &out.DBClusterParameterGroupName + *out = new(string) + **out = **in + } + if in.DBClusterParameterGroupNameRef != nil { + in, out := &in.DBClusterParameterGroupNameRef, &out.DBClusterParameterGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DBClusterParameterGroupNameSelector != nil { + in, out := &in.DBClusterParameterGroupNameSelector, &out.DBClusterParameterGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DBInstanceParameterGroupName != nil { + in, out := &in.DBInstanceParameterGroupName, &out.DBInstanceParameterGroupName + *out = new(string) + **out = **in + } + if in.DBInstanceParameterGroupNameRef != nil { + in, out := &in.DBInstanceParameterGroupNameRef, &out.DBInstanceParameterGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DBInstanceParameterGroupNameSelector != nil { + in, out := &in.DBInstanceParameterGroupNameSelector, &out.DBInstanceParameterGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DBSubnetGroupName != nil { + in, out := &in.DBSubnetGroupName, &out.DBSubnetGroupName + *out = new(string) + **out = **in + } + if in.DBSubnetGroupNameRef != nil { + in, out := &in.DBSubnetGroupNameRef, &out.DBSubnetGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DBSubnetGroupNameSelector != nil { + in, out := &in.DBSubnetGroupNameSelector, &out.DBSubnetGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DBSystemID != nil { + in, out := &in.DBSystemID, &out.DBSystemID + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DeleteAutomatedBackups != nil { + in, out := &in.DeleteAutomatedBackups, &out.DeleteAutomatedBackups + *out = new(bool) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.DomainIAMRoleName != nil { + in, out := &in.DomainIAMRoleName, &out.DomainIAMRoleName + *out = new(string) + **out = **in + } + if in.EnableGlobalWriteForwarding != nil { + in, out := &in.EnableGlobalWriteForwarding, &out.EnableGlobalWriteForwarding + *out = new(bool) + **out = **in + } + if in.EnableHTTPEndpoint != nil { + in, out := &in.EnableHTTPEndpoint, &out.EnableHTTPEndpoint + *out = new(bool) + **out = **in + } + if in.EnableLocalWriteForwarding != nil { + in, out := &in.EnableLocalWriteForwarding, &out.EnableLocalWriteForwarding + *out = new(bool) + **out = **in + } + if in.EnabledCloudwatchLogsExports != nil { + in, out := &in.EnabledCloudwatchLogsExports, &out.EnabledCloudwatchLogsExports + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.EngineMode != nil { + in, out := &in.EngineMode, &out.EngineMode + *out = new(string) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.FinalSnapshotIdentifier != nil { + in, out := &in.FinalSnapshotIdentifier, &out.FinalSnapshotIdentifier + *out = new(string) + **out = **in + } + if in.GlobalClusterIdentifier != nil { + in, out := &in.GlobalClusterIdentifier, &out.GlobalClusterIdentifier + *out = new(string) + **out = **in + } + if in.IAMDatabaseAuthenticationEnabled != nil { + in, out := &in.IAMDatabaseAuthenticationEnabled, &out.IAMDatabaseAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ManageMasterUserPassword != nil { + in, out := &in.ManageMasterUserPassword, &out.ManageMasterUserPassword + *out = new(bool) + **out = **in + } + if in.MasterPasswordSecretRef != nil { + in, out := &in.MasterPasswordSecretRef, &out.MasterPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.MasterUserSecretKMSKeyID != nil { + in, out := &in.MasterUserSecretKMSKeyID, &out.MasterUserSecretKMSKeyID + *out = new(string) + **out = **in + } + if in.MasterUserSecretKMSKeyIDRef != nil { + in, out := &in.MasterUserSecretKMSKeyIDRef, &out.MasterUserSecretKMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MasterUserSecretKMSKeyIDSelector != nil { + in, out := &in.MasterUserSecretKMSKeyIDSelector, &out.MasterUserSecretKMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MasterUsername != nil { + in, out := &in.MasterUsername, &out.MasterUsername + *out = new(string) + **out = **in + } + if in.NetworkType != nil { + in, out := &in.NetworkType, &out.NetworkType + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PreferredBackupWindow != nil { + in, out := &in.PreferredBackupWindow, &out.PreferredBackupWindow + *out = new(string) + **out = **in + } + if in.PreferredMaintenanceWindow != nil { + in, out := &in.PreferredMaintenanceWindow, &out.PreferredMaintenanceWindow + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ReplicationSourceIdentifier != nil { + in, out := &in.ReplicationSourceIdentifier, &out.ReplicationSourceIdentifier + *out = new(string) + **out = **in + } + if in.RestoreToPointInTime != nil { + in, out := &in.RestoreToPointInTime, &out.RestoreToPointInTime + *out = new(ClusterRestoreToPointInTimeParameters) + (*in).DeepCopyInto(*out) + } + if in.S3Import != nil { + in, out := &in.S3Import, &out.S3Import + *out = new(ClusterS3ImportParameters) + (*in).DeepCopyInto(*out) + } + if in.ScalingConfiguration != nil { + in, out := &in.ScalingConfiguration, &out.ScalingConfiguration + *out = new(ScalingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Serverlessv2ScalingConfiguration != nil { + in, out := &in.Serverlessv2ScalingConfiguration, &out.Serverlessv2ScalingConfiguration + *out = new(Serverlessv2ScalingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.SkipFinalSnapshot != nil { + in, out := &in.SkipFinalSnapshot, &out.SkipFinalSnapshot + *out = new(bool) + **out = **in + } + if in.SnapshotIdentifier != nil { + in, out := &in.SnapshotIdentifier, &out.SnapshotIdentifier + *out = new(string) + **out = **in + } + if in.SourceRegion != nil { + in, out := &in.SourceRegion, &out.SourceRegion + *out = new(string) + **out = **in + } + if in.StorageEncrypted != nil { + in, out := &in.StorageEncrypted, &out.StorageEncrypted + *out = new(bool) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCSecurityGroupIDRefs != nil { + in, out := &in.VPCSecurityGroupIDRefs, &out.VPCSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCSecurityGroupIDSelector != nil { + in, out := &in.VPCSecurityGroupIDSelector, &out.VPCSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. +func (in *ClusterParameters) DeepCopy() *ClusterParameters { + if in == nil { + return nil + } + out := new(ClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRestoreToPointInTimeInitParameters) DeepCopyInto(out *ClusterRestoreToPointInTimeInitParameters) { + *out = *in + if in.RestoreToTime != nil { + in, out := &in.RestoreToTime, &out.RestoreToTime + *out = new(string) + **out = **in + } + if in.RestoreType != nil { + in, out := &in.RestoreType, &out.RestoreType + *out = new(string) + **out = **in + } + if in.SourceClusterIdentifier != nil { + in, out := &in.SourceClusterIdentifier, &out.SourceClusterIdentifier + *out = new(string) + **out = **in + } + if in.SourceClusterIdentifierRef != nil { + in, out := &in.SourceClusterIdentifierRef, &out.SourceClusterIdentifierRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceClusterIdentifierSelector != nil { + in, out := &in.SourceClusterIdentifierSelector, &out.SourceClusterIdentifierSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UseLatestRestorableTime != nil { + in, out := &in.UseLatestRestorableTime, &out.UseLatestRestorableTime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRestoreToPointInTimeInitParameters. +func (in *ClusterRestoreToPointInTimeInitParameters) DeepCopy() *ClusterRestoreToPointInTimeInitParameters { + if in == nil { + return nil + } + out := new(ClusterRestoreToPointInTimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRestoreToPointInTimeObservation) DeepCopyInto(out *ClusterRestoreToPointInTimeObservation) { + *out = *in + if in.RestoreToTime != nil { + in, out := &in.RestoreToTime, &out.RestoreToTime + *out = new(string) + **out = **in + } + if in.RestoreType != nil { + in, out := &in.RestoreType, &out.RestoreType + *out = new(string) + **out = **in + } + if in.SourceClusterIdentifier != nil { + in, out := &in.SourceClusterIdentifier, &out.SourceClusterIdentifier + *out = new(string) + **out = **in + } + if in.UseLatestRestorableTime != nil { + in, out := &in.UseLatestRestorableTime, &out.UseLatestRestorableTime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRestoreToPointInTimeObservation. +func (in *ClusterRestoreToPointInTimeObservation) DeepCopy() *ClusterRestoreToPointInTimeObservation { + if in == nil { + return nil + } + out := new(ClusterRestoreToPointInTimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRestoreToPointInTimeParameters) DeepCopyInto(out *ClusterRestoreToPointInTimeParameters) { + *out = *in + if in.RestoreToTime != nil { + in, out := &in.RestoreToTime, &out.RestoreToTime + *out = new(string) + **out = **in + } + if in.RestoreType != nil { + in, out := &in.RestoreType, &out.RestoreType + *out = new(string) + **out = **in + } + if in.SourceClusterIdentifier != nil { + in, out := &in.SourceClusterIdentifier, &out.SourceClusterIdentifier + *out = new(string) + **out = **in + } + if in.SourceClusterIdentifierRef != nil { + in, out := &in.SourceClusterIdentifierRef, &out.SourceClusterIdentifierRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceClusterIdentifierSelector != nil { + in, out := &in.SourceClusterIdentifierSelector, &out.SourceClusterIdentifierSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UseLatestRestorableTime != nil { + in, out := &in.UseLatestRestorableTime, &out.UseLatestRestorableTime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRestoreToPointInTimeParameters. +func (in *ClusterRestoreToPointInTimeParameters) DeepCopy() *ClusterRestoreToPointInTimeParameters { + if in == nil { + return nil + } + out := new(ClusterRestoreToPointInTimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterS3ImportInitParameters) DeepCopyInto(out *ClusterS3ImportInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketNameRef != nil { + in, out := &in.BucketNameRef, &out.BucketNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketNameSelector != nil { + in, out := &in.BucketNameSelector, &out.BucketNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.IngestionRole != nil { + in, out := &in.IngestionRole, &out.IngestionRole + *out = new(string) + **out = **in + } + if in.SourceEngine != nil { + in, out := &in.SourceEngine, &out.SourceEngine + *out = new(string) + **out = **in + } + if in.SourceEngineVersion != nil { + in, out := &in.SourceEngineVersion, &out.SourceEngineVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterS3ImportInitParameters. +func (in *ClusterS3ImportInitParameters) DeepCopy() *ClusterS3ImportInitParameters { + if in == nil { + return nil + } + out := new(ClusterS3ImportInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterS3ImportObservation) DeepCopyInto(out *ClusterS3ImportObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.IngestionRole != nil { + in, out := &in.IngestionRole, &out.IngestionRole + *out = new(string) + **out = **in + } + if in.SourceEngine != nil { + in, out := &in.SourceEngine, &out.SourceEngine + *out = new(string) + **out = **in + } + if in.SourceEngineVersion != nil { + in, out := &in.SourceEngineVersion, &out.SourceEngineVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterS3ImportObservation. +func (in *ClusterS3ImportObservation) DeepCopy() *ClusterS3ImportObservation { + if in == nil { + return nil + } + out := new(ClusterS3ImportObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterS3ImportParameters) DeepCopyInto(out *ClusterS3ImportParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketNameRef != nil { + in, out := &in.BucketNameRef, &out.BucketNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketNameSelector != nil { + in, out := &in.BucketNameSelector, &out.BucketNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.IngestionRole != nil { + in, out := &in.IngestionRole, &out.IngestionRole + *out = new(string) + **out = **in + } + if in.SourceEngine != nil { + in, out := &in.SourceEngine, &out.SourceEngine + *out = new(string) + **out = **in + } + if in.SourceEngineVersion != nil { + in, out := &in.SourceEngineVersion, &out.SourceEngineVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterS3ImportParameters. +func (in *ClusterS3ImportParameters) DeepCopy() *ClusterS3ImportParameters { + if in == nil { + return nil + } + out := new(ClusterS3ImportParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolConfigInitParameters) DeepCopyInto(out *ConnectionPoolConfigInitParameters) { + *out = *in + if in.ConnectionBorrowTimeout != nil { + in, out := &in.ConnectionBorrowTimeout, &out.ConnectionBorrowTimeout + *out = new(float64) + **out = **in + } + if in.InitQuery != nil { + in, out := &in.InitQuery, &out.InitQuery + *out = new(string) + **out = **in + } + if in.MaxConnectionsPercent != nil { + in, out := &in.MaxConnectionsPercent, &out.MaxConnectionsPercent + *out = new(float64) + **out = **in + } + if in.MaxIdleConnectionsPercent != nil { + in, out := &in.MaxIdleConnectionsPercent, &out.MaxIdleConnectionsPercent + *out = new(float64) + **out = **in + } + if in.SessionPinningFilters != nil { + in, out := &in.SessionPinningFilters, &out.SessionPinningFilters + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolConfigInitParameters. +func (in *ConnectionPoolConfigInitParameters) DeepCopy() *ConnectionPoolConfigInitParameters { + if in == nil { + return nil + } + out := new(ConnectionPoolConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolConfigObservation) DeepCopyInto(out *ConnectionPoolConfigObservation) { + *out = *in + if in.ConnectionBorrowTimeout != nil { + in, out := &in.ConnectionBorrowTimeout, &out.ConnectionBorrowTimeout + *out = new(float64) + **out = **in + } + if in.InitQuery != nil { + in, out := &in.InitQuery, &out.InitQuery + *out = new(string) + **out = **in + } + if in.MaxConnectionsPercent != nil { + in, out := &in.MaxConnectionsPercent, &out.MaxConnectionsPercent + *out = new(float64) + **out = **in + } + if in.MaxIdleConnectionsPercent != nil { + in, out := &in.MaxIdleConnectionsPercent, &out.MaxIdleConnectionsPercent + *out = new(float64) + **out = **in + } + if in.SessionPinningFilters != nil { + in, out := &in.SessionPinningFilters, &out.SessionPinningFilters + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolConfigObservation. +func (in *ConnectionPoolConfigObservation) DeepCopy() *ConnectionPoolConfigObservation { + if in == nil { + return nil + } + out := new(ConnectionPoolConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolConfigParameters) DeepCopyInto(out *ConnectionPoolConfigParameters) { + *out = *in + if in.ConnectionBorrowTimeout != nil { + in, out := &in.ConnectionBorrowTimeout, &out.ConnectionBorrowTimeout + *out = new(float64) + **out = **in + } + if in.InitQuery != nil { + in, out := &in.InitQuery, &out.InitQuery + *out = new(string) + **out = **in + } + if in.MaxConnectionsPercent != nil { + in, out := &in.MaxConnectionsPercent, &out.MaxConnectionsPercent + *out = new(float64) + **out = **in + } + if in.MaxIdleConnectionsPercent != nil { + in, out := &in.MaxIdleConnectionsPercent, &out.MaxIdleConnectionsPercent + *out = new(float64) + **out = **in + } + if in.SessionPinningFilters != nil { + in, out := &in.SessionPinningFilters, &out.SessionPinningFilters + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolConfigParameters. +func (in *ConnectionPoolConfigParameters) DeepCopy() *ConnectionPoolConfigParameters { + if in == nil { + return nil + } + out := new(ConnectionPoolConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Instance) DeepCopyInto(out *Instance) { *out = *in @@ -1526,138 +3181,347 @@ func (in *InstanceSpec) DeepCopy() *InstanceSpec { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstanceStatus) DeepCopyInto(out *InstanceStatus) { +func (in *InstanceStatus) DeepCopyInto(out *InstanceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceStatus. +func (in *InstanceStatus) DeepCopy() *InstanceStatus { + if in == nil { + return nil + } + out := new(InstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerEndpointInitParameters) DeepCopyInto(out *ListenerEndpointInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerEndpointInitParameters. +func (in *ListenerEndpointInitParameters) DeepCopy() *ListenerEndpointInitParameters { + if in == nil { + return nil + } + out := new(ListenerEndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerEndpointObservation) DeepCopyInto(out *ListenerEndpointObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.HostedZoneID != nil { + in, out := &in.HostedZoneID, &out.HostedZoneID + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerEndpointObservation. +func (in *ListenerEndpointObservation) DeepCopy() *ListenerEndpointObservation { + if in == nil { + return nil + } + out := new(ListenerEndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerEndpointParameters) DeepCopyInto(out *ListenerEndpointParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerEndpointParameters. +func (in *ListenerEndpointParameters) DeepCopy() *ListenerEndpointParameters { + if in == nil { + return nil + } + out := new(ListenerEndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterUserSecretInitParameters) DeepCopyInto(out *MasterUserSecretInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterUserSecretInitParameters. +func (in *MasterUserSecretInitParameters) DeepCopy() *MasterUserSecretInitParameters { + if in == nil { + return nil + } + out := new(MasterUserSecretInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterUserSecretObservation) DeepCopyInto(out *MasterUserSecretObservation) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.SecretArn != nil { + in, out := &in.SecretArn, &out.SecretArn + *out = new(string) + **out = **in + } + if in.SecretStatus != nil { + in, out := &in.SecretStatus, &out.SecretStatus + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterUserSecretObservation. +func (in *MasterUserSecretObservation) DeepCopy() *MasterUserSecretObservation { + if in == nil { + return nil + } + out := new(MasterUserSecretObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterUserSecretParameters) DeepCopyInto(out *MasterUserSecretParameters) { *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - in.AtProvider.DeepCopyInto(&out.AtProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceStatus. -func (in *InstanceStatus) DeepCopy() *InstanceStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterUserSecretParameters. +func (in *MasterUserSecretParameters) DeepCopy() *MasterUserSecretParameters { if in == nil { return nil } - out := new(InstanceStatus) + out := new(MasterUserSecretParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ListenerEndpointInitParameters) DeepCopyInto(out *ListenerEndpointInitParameters) { +func (in *ProxyDefaultTargetGroup) DeepCopyInto(out *ProxyDefaultTargetGroup) { *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerEndpointInitParameters. -func (in *ListenerEndpointInitParameters) DeepCopy() *ListenerEndpointInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyDefaultTargetGroup. +func (in *ProxyDefaultTargetGroup) DeepCopy() *ProxyDefaultTargetGroup { if in == nil { return nil } - out := new(ListenerEndpointInitParameters) + out := new(ProxyDefaultTargetGroup) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxyDefaultTargetGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ListenerEndpointObservation) DeepCopyInto(out *ListenerEndpointObservation) { +func (in *ProxyDefaultTargetGroupInitParameters) DeepCopyInto(out *ProxyDefaultTargetGroupInitParameters) { *out = *in - if in.Address != nil { - in, out := &in.Address, &out.Address - *out = new(string) - **out = **in + if in.ConnectionPoolConfig != nil { + in, out := &in.ConnectionPoolConfig, &out.ConnectionPoolConfig + *out = new(ConnectionPoolConfigInitParameters) + (*in).DeepCopyInto(*out) } - if in.HostedZoneID != nil { - in, out := &in.HostedZoneID, &out.HostedZoneID + if in.DBProxyName != nil { + in, out := &in.DBProxyName, &out.DBProxyName *out = new(string) **out = **in } - if in.Port != nil { - in, out := &in.Port, &out.Port - *out = new(float64) - **out = **in + if in.DBProxyNameRef != nil { + in, out := &in.DBProxyNameRef, &out.DBProxyNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DBProxyNameSelector != nil { + in, out := &in.DBProxyNameSelector, &out.DBProxyNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerEndpointObservation. -func (in *ListenerEndpointObservation) DeepCopy() *ListenerEndpointObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyDefaultTargetGroupInitParameters. +func (in *ProxyDefaultTargetGroupInitParameters) DeepCopy() *ProxyDefaultTargetGroupInitParameters { if in == nil { return nil } - out := new(ListenerEndpointObservation) + out := new(ProxyDefaultTargetGroupInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ListenerEndpointParameters) DeepCopyInto(out *ListenerEndpointParameters) { +func (in *ProxyDefaultTargetGroupList) DeepCopyInto(out *ProxyDefaultTargetGroupList) { *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProxyDefaultTargetGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerEndpointParameters. -func (in *ListenerEndpointParameters) DeepCopy() *ListenerEndpointParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyDefaultTargetGroupList. +func (in *ProxyDefaultTargetGroupList) DeepCopy() *ProxyDefaultTargetGroupList { if in == nil { return nil } - out := new(ListenerEndpointParameters) + out := new(ProxyDefaultTargetGroupList) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxyDefaultTargetGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MasterUserSecretInitParameters) DeepCopyInto(out *MasterUserSecretInitParameters) { +func (in *ProxyDefaultTargetGroupObservation) DeepCopyInto(out *ProxyDefaultTargetGroupObservation) { *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ConnectionPoolConfig != nil { + in, out := &in.ConnectionPoolConfig, &out.ConnectionPoolConfig + *out = new(ConnectionPoolConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.DBProxyName != nil { + in, out := &in.DBProxyName, &out.DBProxyName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterUserSecretInitParameters. -func (in *MasterUserSecretInitParameters) DeepCopy() *MasterUserSecretInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyDefaultTargetGroupObservation. +func (in *ProxyDefaultTargetGroupObservation) DeepCopy() *ProxyDefaultTargetGroupObservation { if in == nil { return nil } - out := new(MasterUserSecretInitParameters) + out := new(ProxyDefaultTargetGroupObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MasterUserSecretObservation) DeepCopyInto(out *MasterUserSecretObservation) { +func (in *ProxyDefaultTargetGroupParameters) DeepCopyInto(out *ProxyDefaultTargetGroupParameters) { *out = *in - if in.KMSKeyID != nil { - in, out := &in.KMSKeyID, &out.KMSKeyID - *out = new(string) - **out = **in + if in.ConnectionPoolConfig != nil { + in, out := &in.ConnectionPoolConfig, &out.ConnectionPoolConfig + *out = new(ConnectionPoolConfigParameters) + (*in).DeepCopyInto(*out) } - if in.SecretArn != nil { - in, out := &in.SecretArn, &out.SecretArn + if in.DBProxyName != nil { + in, out := &in.DBProxyName, &out.DBProxyName *out = new(string) **out = **in } - if in.SecretStatus != nil { - in, out := &in.SecretStatus, &out.SecretStatus + if in.DBProxyNameRef != nil { + in, out := &in.DBProxyNameRef, &out.DBProxyNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DBProxyNameSelector != nil { + in, out := &in.DBProxyNameSelector, &out.DBProxyNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterUserSecretObservation. -func (in *MasterUserSecretObservation) DeepCopy() *MasterUserSecretObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyDefaultTargetGroupParameters. +func (in *ProxyDefaultTargetGroupParameters) DeepCopy() *ProxyDefaultTargetGroupParameters { if in == nil { return nil } - out := new(MasterUserSecretObservation) + out := new(ProxyDefaultTargetGroupParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MasterUserSecretParameters) DeepCopyInto(out *MasterUserSecretParameters) { +func (in *ProxyDefaultTargetGroupSpec) DeepCopyInto(out *ProxyDefaultTargetGroupSpec) { *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterUserSecretParameters. -func (in *MasterUserSecretParameters) DeepCopy() *MasterUserSecretParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyDefaultTargetGroupSpec. +func (in *ProxyDefaultTargetGroupSpec) DeepCopy() *ProxyDefaultTargetGroupSpec { if in == nil { return nil } - out := new(MasterUserSecretParameters) + out := new(ProxyDefaultTargetGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyDefaultTargetGroupStatus) DeepCopyInto(out *ProxyDefaultTargetGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyDefaultTargetGroupStatus. +func (in *ProxyDefaultTargetGroupStatus) DeepCopy() *ProxyDefaultTargetGroupStatus { + if in == nil { + return nil + } + out := new(ProxyDefaultTargetGroupStatus) in.DeepCopyInto(out) return out } @@ -1901,3 +3765,198 @@ func (in *S3ImportParameters) DeepCopy() *S3ImportParameters { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingConfigurationInitParameters) DeepCopyInto(out *ScalingConfigurationInitParameters) { + *out = *in + if in.AutoPause != nil { + in, out := &in.AutoPause, &out.AutoPause + *out = new(bool) + **out = **in + } + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } + if in.SecondsUntilAutoPause != nil { + in, out := &in.SecondsUntilAutoPause, &out.SecondsUntilAutoPause + *out = new(float64) + **out = **in + } + if in.TimeoutAction != nil { + in, out := &in.TimeoutAction, &out.TimeoutAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingConfigurationInitParameters. +func (in *ScalingConfigurationInitParameters) DeepCopy() *ScalingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ScalingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingConfigurationObservation) DeepCopyInto(out *ScalingConfigurationObservation) { + *out = *in + if in.AutoPause != nil { + in, out := &in.AutoPause, &out.AutoPause + *out = new(bool) + **out = **in + } + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } + if in.SecondsUntilAutoPause != nil { + in, out := &in.SecondsUntilAutoPause, &out.SecondsUntilAutoPause + *out = new(float64) + **out = **in + } + if in.TimeoutAction != nil { + in, out := &in.TimeoutAction, &out.TimeoutAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingConfigurationObservation. +func (in *ScalingConfigurationObservation) DeepCopy() *ScalingConfigurationObservation { + if in == nil { + return nil + } + out := new(ScalingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingConfigurationParameters) DeepCopyInto(out *ScalingConfigurationParameters) { + *out = *in + if in.AutoPause != nil { + in, out := &in.AutoPause, &out.AutoPause + *out = new(bool) + **out = **in + } + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } + if in.SecondsUntilAutoPause != nil { + in, out := &in.SecondsUntilAutoPause, &out.SecondsUntilAutoPause + *out = new(float64) + **out = **in + } + if in.TimeoutAction != nil { + in, out := &in.TimeoutAction, &out.TimeoutAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingConfigurationParameters. +func (in *ScalingConfigurationParameters) DeepCopy() *ScalingConfigurationParameters { + if in == nil { + return nil + } + out := new(ScalingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Serverlessv2ScalingConfigurationInitParameters) DeepCopyInto(out *Serverlessv2ScalingConfigurationInitParameters) { + *out = *in + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Serverlessv2ScalingConfigurationInitParameters. +func (in *Serverlessv2ScalingConfigurationInitParameters) DeepCopy() *Serverlessv2ScalingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(Serverlessv2ScalingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Serverlessv2ScalingConfigurationObservation) DeepCopyInto(out *Serverlessv2ScalingConfigurationObservation) { + *out = *in + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Serverlessv2ScalingConfigurationObservation. +func (in *Serverlessv2ScalingConfigurationObservation) DeepCopy() *Serverlessv2ScalingConfigurationObservation { + if in == nil { + return nil + } + out := new(Serverlessv2ScalingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Serverlessv2ScalingConfigurationParameters) DeepCopyInto(out *Serverlessv2ScalingConfigurationParameters) { + *out = *in + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Serverlessv2ScalingConfigurationParameters. +func (in *Serverlessv2ScalingConfigurationParameters) DeepCopy() *Serverlessv2ScalingConfigurationParameters { + if in == nil { + return nil + } + out := new(Serverlessv2ScalingConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/rds/v1beta2/zz_generated.managed.go b/apis/rds/v1beta2/zz_generated.managed.go index 37ec7f5cf6..ba8aab4153 100644 --- a/apis/rds/v1beta2/zz_generated.managed.go +++ b/apis/rds/v1beta2/zz_generated.managed.go @@ -7,6 +7,66 @@ package v1beta2 import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +// GetCondition of this Cluster. +func (mg *Cluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Cluster. +func (mg *Cluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Cluster. +func (mg *Cluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Cluster. +func (mg *Cluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Cluster. +func (mg *Cluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Cluster. +func (mg *Cluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Cluster. +func (mg *Cluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Cluster. +func (mg *Cluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this Instance. func (mg *Instance) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) @@ -66,3 +126,63 @@ func (mg *Instance) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetai func (mg *Instance) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { mg.Spec.WriteConnectionSecretToReference = r } + +// GetCondition of this ProxyDefaultTargetGroup. +func (mg *ProxyDefaultTargetGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ProxyDefaultTargetGroup. +func (mg *ProxyDefaultTargetGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ProxyDefaultTargetGroup. +func (mg *ProxyDefaultTargetGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ProxyDefaultTargetGroup. +func (mg *ProxyDefaultTargetGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ProxyDefaultTargetGroup. +func (mg *ProxyDefaultTargetGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ProxyDefaultTargetGroup. +func (mg *ProxyDefaultTargetGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ProxyDefaultTargetGroup. +func (mg *ProxyDefaultTargetGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ProxyDefaultTargetGroup. +func (mg *ProxyDefaultTargetGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ProxyDefaultTargetGroup. +func (mg *ProxyDefaultTargetGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ProxyDefaultTargetGroup. +func (mg *ProxyDefaultTargetGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ProxyDefaultTargetGroup. +func (mg *ProxyDefaultTargetGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ProxyDefaultTargetGroup. +func (mg *ProxyDefaultTargetGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/rds/v1beta2/zz_generated.managedlist.go b/apis/rds/v1beta2/zz_generated.managedlist.go index 28d925e649..6f5c36d91b 100644 --- a/apis/rds/v1beta2/zz_generated.managedlist.go +++ b/apis/rds/v1beta2/zz_generated.managedlist.go @@ -7,6 +7,15 @@ package v1beta2 import resource "github.com/crossplane/crossplane-runtime/pkg/resource" +// GetItems of this ClusterList. +func (l *ClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this InstanceList. func (l *InstanceList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) @@ -15,3 +24,12 @@ func (l *InstanceList) GetItems() []resource.Managed { } return items } + +// GetItems of this ProxyDefaultTargetGroupList. +func (l *ProxyDefaultTargetGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/rds/v1beta2/zz_generated.resolvers.go b/apis/rds/v1beta2/zz_generated.resolvers.go index 51cb876376..51cbf88951 100644 --- a/apis/rds/v1beta2/zz_generated.resolvers.go +++ b/apis/rds/v1beta2/zz_generated.resolvers.go @@ -16,10 +16,335 @@ import ( common "github.com/upbound/provider-aws/config/common" client "sigs.k8s.io/controller-runtime/pkg/client" - // ResolveReferences of this Instance. + // ResolveReferences of this Cluster. apisresolver "github.com/upbound/provider-aws/internal/apis" ) +func (mg *Cluster) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "ClusterParameterGroup", "ClusterParameterGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DBClusterParameterGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DBClusterParameterGroupNameRef, + Selector: mg.Spec.ForProvider.DBClusterParameterGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DBClusterParameterGroupName") + } + mg.Spec.ForProvider.DBClusterParameterGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DBClusterParameterGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "ParameterGroup", "ParameterGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DBInstanceParameterGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DBInstanceParameterGroupNameRef, + Selector: mg.Spec.ForProvider.DBInstanceParameterGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DBInstanceParameterGroupName") + } + mg.Spec.ForProvider.DBInstanceParameterGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DBInstanceParameterGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "SubnetGroup", "SubnetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DBSubnetGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DBSubnetGroupNameRef, + Selector: mg.Spec.ForProvider.DBSubnetGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DBSubnetGroupName") + } + mg.Spec.ForProvider.DBSubnetGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DBSubnetGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyID") + } + mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MasterUserSecretKMSKeyID), + Extract: resource.ExtractParamPath("key_id", true), + Reference: mg.Spec.ForProvider.MasterUserSecretKMSKeyIDRef, + Selector: mg.Spec.ForProvider.MasterUserSecretKMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MasterUserSecretKMSKeyID") + } + mg.Spec.ForProvider.MasterUserSecretKMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MasterUserSecretKMSKeyIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.RestoreToPointInTime != nil { + { + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RestoreToPointInTime.SourceClusterIdentifier), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.RestoreToPointInTime.SourceClusterIdentifierRef, + Selector: mg.Spec.ForProvider.RestoreToPointInTime.SourceClusterIdentifierSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RestoreToPointInTime.SourceClusterIdentifier") + } + mg.Spec.ForProvider.RestoreToPointInTime.SourceClusterIdentifier = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RestoreToPointInTime.SourceClusterIdentifierRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.S3Import != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.S3Import.BucketName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.S3Import.BucketNameRef, + Selector: mg.Spec.ForProvider.S3Import.BucketNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.S3Import.BucketName") + } + mg.Spec.ForProvider.S3Import.BucketName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.S3Import.BucketNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.VPCSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCSecurityGroupIds") + } + mg.Spec.ForProvider.VPCSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "ClusterParameterGroup", "ClusterParameterGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DBClusterParameterGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DBClusterParameterGroupNameRef, + Selector: mg.Spec.InitProvider.DBClusterParameterGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DBClusterParameterGroupName") + } + mg.Spec.InitProvider.DBClusterParameterGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DBClusterParameterGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "ParameterGroup", "ParameterGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DBInstanceParameterGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DBInstanceParameterGroupNameRef, + Selector: mg.Spec.InitProvider.DBInstanceParameterGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DBInstanceParameterGroupName") + } + mg.Spec.InitProvider.DBInstanceParameterGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DBInstanceParameterGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "SubnetGroup", "SubnetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DBSubnetGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DBSubnetGroupNameRef, + Selector: mg.Spec.InitProvider.DBSubnetGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DBSubnetGroupName") + } + mg.Spec.InitProvider.DBSubnetGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DBSubnetGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyID") + } + mg.Spec.InitProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.MasterUserSecretKMSKeyID), + Extract: resource.ExtractParamPath("key_id", true), + Reference: mg.Spec.InitProvider.MasterUserSecretKMSKeyIDRef, + Selector: mg.Spec.InitProvider.MasterUserSecretKMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.MasterUserSecretKMSKeyID") + } + mg.Spec.InitProvider.MasterUserSecretKMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.MasterUserSecretKMSKeyIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.RestoreToPointInTime != nil { + { + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RestoreToPointInTime.SourceClusterIdentifier), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.RestoreToPointInTime.SourceClusterIdentifierRef, + Selector: mg.Spec.InitProvider.RestoreToPointInTime.SourceClusterIdentifierSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RestoreToPointInTime.SourceClusterIdentifier") + } + mg.Spec.InitProvider.RestoreToPointInTime.SourceClusterIdentifier = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RestoreToPointInTime.SourceClusterIdentifierRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.S3Import != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.S3Import.BucketName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.S3Import.BucketNameRef, + Selector: mg.Spec.InitProvider.S3Import.BucketNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.S3Import.BucketName") + } + mg.Spec.InitProvider.S3Import.BucketName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.S3Import.BucketNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.VPCSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCSecurityGroupIds") + } + mg.Spec.InitProvider.VPCSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCSecurityGroupIDRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this Instance. func (mg *Instance) ResolveReferences(ctx context.Context, c client.Reader) error { var m xpresource.Managed var l xpresource.ManagedList @@ -297,3 +622,53 @@ func (mg *Instance) ResolveReferences(ctx context.Context, c client.Reader) erro return nil } + +// ResolveReferences of this ProxyDefaultTargetGroup. +func (mg *ProxyDefaultTargetGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "Proxy", "ProxyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DBProxyName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DBProxyNameRef, + Selector: mg.Spec.ForProvider.DBProxyNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DBProxyName") + } + mg.Spec.ForProvider.DBProxyName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DBProxyNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "Proxy", "ProxyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DBProxyName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DBProxyNameRef, + Selector: mg.Spec.InitProvider.DBProxyNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DBProxyName") + } + mg.Spec.InitProvider.DBProxyName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DBProxyNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/rds/v1beta2/zz_proxydefaulttargetgroup_terraformed.go b/apis/rds/v1beta2/zz_proxydefaulttargetgroup_terraformed.go new file mode 100755 index 0000000000..e7464d2265 --- /dev/null +++ b/apis/rds/v1beta2/zz_proxydefaulttargetgroup_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ProxyDefaultTargetGroup +func (mg *ProxyDefaultTargetGroup) GetTerraformResourceType() string { + return "aws_db_proxy_default_target_group" +} + +// GetConnectionDetailsMapping for this ProxyDefaultTargetGroup +func (tr *ProxyDefaultTargetGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ProxyDefaultTargetGroup +func (tr *ProxyDefaultTargetGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ProxyDefaultTargetGroup +func (tr *ProxyDefaultTargetGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ProxyDefaultTargetGroup +func (tr *ProxyDefaultTargetGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ProxyDefaultTargetGroup +func (tr *ProxyDefaultTargetGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ProxyDefaultTargetGroup +func (tr *ProxyDefaultTargetGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ProxyDefaultTargetGroup +func (tr *ProxyDefaultTargetGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ProxyDefaultTargetGroup +func (tr *ProxyDefaultTargetGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ProxyDefaultTargetGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ProxyDefaultTargetGroup) LateInitialize(attrs []byte) (bool, error) { + params := &ProxyDefaultTargetGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ProxyDefaultTargetGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/rds/v1beta2/zz_proxydefaulttargetgroup_types.go b/apis/rds/v1beta2/zz_proxydefaulttargetgroup_types.go new file mode 100755 index 0000000000..d6c5f92c1c --- /dev/null +++ b/apis/rds/v1beta2/zz_proxydefaulttargetgroup_types.go @@ -0,0 +1,197 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConnectionPoolConfigInitParameters struct { + + // The number of seconds for a proxy to wait for a connection to become available in the connection pool. Only applies when the proxy has opened its maximum number of connections and all connections are busy with client sessions. + ConnectionBorrowTimeout *float64 `json:"connectionBorrowTimeout,omitempty" tf:"connection_borrow_timeout,omitempty"` + + // One or more SQL statements for the proxy to run when opening each new database connection. Typically used with SET statements to make sure that each connection has identical settings such as time zone and character set. This setting is empty by default. For multiple statements, use semicolons as the separator. You can also include multiple variables in a single SET statement, such as SET x=1, y=2. + InitQuery *string `json:"initQuery,omitempty" tf:"init_query,omitempty"` + + // The maximum size of the connection pool for each target in a target group. For Aurora MySQL, it is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group. + MaxConnectionsPercent *float64 `json:"maxConnectionsPercent,omitempty" tf:"max_connections_percent,omitempty"` + + // Controls how actively the proxy closes idle database connections in the connection pool. A high value enables the proxy to leave a high percentage of idle connections open. A low value causes the proxy to close idle client connections and return the underlying database connections to the connection pool. For Aurora MySQL, it is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group. + MaxIdleConnectionsPercent *float64 `json:"maxIdleConnectionsPercent,omitempty" tf:"max_idle_connections_percent,omitempty"` + + // Each item in the list represents a class of SQL operations that normally cause all later statements in a session using a proxy to be pinned to the same underlying database connection. Including an item in the list exempts that class of SQL operations from the pinning behavior. Currently, the only allowed value is EXCLUDE_VARIABLE_SETS. + // +listType=set + SessionPinningFilters []*string `json:"sessionPinningFilters,omitempty" tf:"session_pinning_filters,omitempty"` +} + +type ConnectionPoolConfigObservation struct { + + // The number of seconds for a proxy to wait for a connection to become available in the connection pool. Only applies when the proxy has opened its maximum number of connections and all connections are busy with client sessions. + ConnectionBorrowTimeout *float64 `json:"connectionBorrowTimeout,omitempty" tf:"connection_borrow_timeout,omitempty"` + + // One or more SQL statements for the proxy to run when opening each new database connection. Typically used with SET statements to make sure that each connection has identical settings such as time zone and character set. This setting is empty by default. For multiple statements, use semicolons as the separator. You can also include multiple variables in a single SET statement, such as SET x=1, y=2. + InitQuery *string `json:"initQuery,omitempty" tf:"init_query,omitempty"` + + // The maximum size of the connection pool for each target in a target group. For Aurora MySQL, it is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group. + MaxConnectionsPercent *float64 `json:"maxConnectionsPercent,omitempty" tf:"max_connections_percent,omitempty"` + + // Controls how actively the proxy closes idle database connections in the connection pool. A high value enables the proxy to leave a high percentage of idle connections open. A low value causes the proxy to close idle client connections and return the underlying database connections to the connection pool. For Aurora MySQL, it is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group. + MaxIdleConnectionsPercent *float64 `json:"maxIdleConnectionsPercent,omitempty" tf:"max_idle_connections_percent,omitempty"` + + // Each item in the list represents a class of SQL operations that normally cause all later statements in a session using a proxy to be pinned to the same underlying database connection. Including an item in the list exempts that class of SQL operations from the pinning behavior. Currently, the only allowed value is EXCLUDE_VARIABLE_SETS. + // +listType=set + SessionPinningFilters []*string `json:"sessionPinningFilters,omitempty" tf:"session_pinning_filters,omitempty"` +} + +type ConnectionPoolConfigParameters struct { + + // The number of seconds for a proxy to wait for a connection to become available in the connection pool. Only applies when the proxy has opened its maximum number of connections and all connections are busy with client sessions. + // +kubebuilder:validation:Optional + ConnectionBorrowTimeout *float64 `json:"connectionBorrowTimeout,omitempty" tf:"connection_borrow_timeout,omitempty"` + + // One or more SQL statements for the proxy to run when opening each new database connection. Typically used with SET statements to make sure that each connection has identical settings such as time zone and character set. This setting is empty by default. For multiple statements, use semicolons as the separator. You can also include multiple variables in a single SET statement, such as SET x=1, y=2. + // +kubebuilder:validation:Optional + InitQuery *string `json:"initQuery,omitempty" tf:"init_query,omitempty"` + + // The maximum size of the connection pool for each target in a target group. For Aurora MySQL, it is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group. + // +kubebuilder:validation:Optional + MaxConnectionsPercent *float64 `json:"maxConnectionsPercent,omitempty" tf:"max_connections_percent,omitempty"` + + // Controls how actively the proxy closes idle database connections in the connection pool. A high value enables the proxy to leave a high percentage of idle connections open. A low value causes the proxy to close idle client connections and return the underlying database connections to the connection pool. For Aurora MySQL, it is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group. + // +kubebuilder:validation:Optional + MaxIdleConnectionsPercent *float64 `json:"maxIdleConnectionsPercent,omitempty" tf:"max_idle_connections_percent,omitempty"` + + // Each item in the list represents a class of SQL operations that normally cause all later statements in a session using a proxy to be pinned to the same underlying database connection. Including an item in the list exempts that class of SQL operations from the pinning behavior. Currently, the only allowed value is EXCLUDE_VARIABLE_SETS. + // +kubebuilder:validation:Optional + // +listType=set + SessionPinningFilters []*string `json:"sessionPinningFilters,omitempty" tf:"session_pinning_filters,omitempty"` +} + +type ProxyDefaultTargetGroupInitParameters struct { + + // The settings that determine the size and behavior of the connection pool for the target group. + ConnectionPoolConfig *ConnectionPoolConfigInitParameters `json:"connectionPoolConfig,omitempty" tf:"connection_pool_config,omitempty"` + + // Name of the RDS DB Proxy. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.Proxy + DBProxyName *string `json:"dbProxyName,omitempty" tf:"db_proxy_name,omitempty"` + + // Reference to a Proxy in rds to populate dbProxyName. + // +kubebuilder:validation:Optional + DBProxyNameRef *v1.Reference `json:"dbProxyNameRef,omitempty" tf:"-"` + + // Selector for a Proxy in rds to populate dbProxyName. + // +kubebuilder:validation:Optional + DBProxyNameSelector *v1.Selector `json:"dbProxyNameSelector,omitempty" tf:"-"` +} + +type ProxyDefaultTargetGroupObservation struct { + + // The Amazon Resource Name (ARN) representing the target group. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The settings that determine the size and behavior of the connection pool for the target group. + ConnectionPoolConfig *ConnectionPoolConfigObservation `json:"connectionPoolConfig,omitempty" tf:"connection_pool_config,omitempty"` + + // Name of the RDS DB Proxy. + DBProxyName *string `json:"dbProxyName,omitempty" tf:"db_proxy_name,omitempty"` + + // Name of the RDS DB Proxy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the default target group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ProxyDefaultTargetGroupParameters struct { + + // The settings that determine the size and behavior of the connection pool for the target group. + // +kubebuilder:validation:Optional + ConnectionPoolConfig *ConnectionPoolConfigParameters `json:"connectionPoolConfig,omitempty" tf:"connection_pool_config,omitempty"` + + // Name of the RDS DB Proxy. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.Proxy + // +kubebuilder:validation:Optional + DBProxyName *string `json:"dbProxyName,omitempty" tf:"db_proxy_name,omitempty"` + + // Reference to a Proxy in rds to populate dbProxyName. + // +kubebuilder:validation:Optional + DBProxyNameRef *v1.Reference `json:"dbProxyNameRef,omitempty" tf:"-"` + + // Selector for a Proxy in rds to populate dbProxyName. + // +kubebuilder:validation:Optional + DBProxyNameSelector *v1.Selector `json:"dbProxyNameSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +// ProxyDefaultTargetGroupSpec defines the desired state of ProxyDefaultTargetGroup +type ProxyDefaultTargetGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProxyDefaultTargetGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProxyDefaultTargetGroupInitParameters `json:"initProvider,omitempty"` +} + +// ProxyDefaultTargetGroupStatus defines the observed state of ProxyDefaultTargetGroup. +type ProxyDefaultTargetGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProxyDefaultTargetGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ProxyDefaultTargetGroup is the Schema for the ProxyDefaultTargetGroups API. Manage an RDS DB proxy default target group resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ProxyDefaultTargetGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ProxyDefaultTargetGroupSpec `json:"spec"` + Status ProxyDefaultTargetGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProxyDefaultTargetGroupList contains a list of ProxyDefaultTargetGroups +type ProxyDefaultTargetGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProxyDefaultTargetGroup `json:"items"` +} + +// Repository type metadata. +var ( + ProxyDefaultTargetGroup_Kind = "ProxyDefaultTargetGroup" + ProxyDefaultTargetGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ProxyDefaultTargetGroup_Kind}.String() + ProxyDefaultTargetGroup_KindAPIVersion = ProxyDefaultTargetGroup_Kind + "." + CRDGroupVersion.String() + ProxyDefaultTargetGroup_GroupVersionKind = CRDGroupVersion.WithKind(ProxyDefaultTargetGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&ProxyDefaultTargetGroup{}, &ProxyDefaultTargetGroupList{}) +} diff --git a/apis/rds/v1beta3/zz_generated.conversion_hubs.go b/apis/rds/v1beta3/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..1b52f34e62 --- /dev/null +++ b/apis/rds/v1beta3/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta3 + +// Hub marks this type as a conversion hub. +func (tr *Instance) Hub() {} diff --git a/apis/rds/v1beta3/zz_generated.deepcopy.go b/apis/rds/v1beta3/zz_generated.deepcopy.go new file mode 100644 index 0000000000..bd22b98310 --- /dev/null +++ b/apis/rds/v1beta3/zz_generated.deepcopy.go @@ -0,0 +1,1885 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta3 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlueGreenUpdateInitParameters) DeepCopyInto(out *BlueGreenUpdateInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlueGreenUpdateInitParameters. +func (in *BlueGreenUpdateInitParameters) DeepCopy() *BlueGreenUpdateInitParameters { + if in == nil { + return nil + } + out := new(BlueGreenUpdateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlueGreenUpdateObservation) DeepCopyInto(out *BlueGreenUpdateObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlueGreenUpdateObservation. +func (in *BlueGreenUpdateObservation) DeepCopy() *BlueGreenUpdateObservation { + if in == nil { + return nil + } + out := new(BlueGreenUpdateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlueGreenUpdateParameters) DeepCopyInto(out *BlueGreenUpdateParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlueGreenUpdateParameters. +func (in *BlueGreenUpdateParameters) DeepCopy() *BlueGreenUpdateParameters { + if in == nil { + return nil + } + out := new(BlueGreenUpdateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Instance) DeepCopyInto(out *Instance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance. +func (in *Instance) DeepCopy() *Instance { + if in == nil { + return nil + } + out := new(Instance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Instance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceInitParameters) DeepCopyInto(out *InstanceInitParameters) { + *out = *in + if in.AllocatedStorage != nil { + in, out := &in.AllocatedStorage, &out.AllocatedStorage + *out = new(float64) + **out = **in + } + if in.AllowMajorVersionUpgrade != nil { + in, out := &in.AllowMajorVersionUpgrade, &out.AllowMajorVersionUpgrade + *out = new(bool) + **out = **in + } + if in.ApplyImmediately != nil { + in, out := &in.ApplyImmediately, &out.ApplyImmediately + *out = new(bool) + **out = **in + } + if in.AutoMinorVersionUpgrade != nil { + in, out := &in.AutoMinorVersionUpgrade, &out.AutoMinorVersionUpgrade + *out = new(bool) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.BackupRetentionPeriod != nil { + in, out := &in.BackupRetentionPeriod, &out.BackupRetentionPeriod + *out = new(float64) + **out = **in + } + if in.BackupTarget != nil { + in, out := &in.BackupTarget, &out.BackupTarget + *out = new(string) + **out = **in + } + if in.BackupWindow != nil { + in, out := &in.BackupWindow, &out.BackupWindow + *out = new(string) + **out = **in + } + if in.BlueGreenUpdate != nil { + in, out := &in.BlueGreenUpdate, &out.BlueGreenUpdate + *out = new(BlueGreenUpdateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CACertIdentifier != nil { + in, out := &in.CACertIdentifier, &out.CACertIdentifier + *out = new(string) + **out = **in + } + if in.CharacterSetName != nil { + in, out := &in.CharacterSetName, &out.CharacterSetName + *out = new(string) + **out = **in + } + if in.CopyTagsToSnapshot != nil { + in, out := &in.CopyTagsToSnapshot, &out.CopyTagsToSnapshot + *out = new(bool) + **out = **in + } + if in.CustomIAMInstanceProfile != nil { + in, out := &in.CustomIAMInstanceProfile, &out.CustomIAMInstanceProfile + *out = new(string) + **out = **in + } + if in.CustomerOwnedIPEnabled != nil { + in, out := &in.CustomerOwnedIPEnabled, &out.CustomerOwnedIPEnabled + *out = new(bool) + **out = **in + } + if in.DBName != nil { + in, out := &in.DBName, &out.DBName + *out = new(string) + **out = **in + } + if in.DBSubnetGroupName != nil { + in, out := &in.DBSubnetGroupName, &out.DBSubnetGroupName + *out = new(string) + **out = **in + } + if in.DBSubnetGroupNameRef != nil { + in, out := &in.DBSubnetGroupNameRef, &out.DBSubnetGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DBSubnetGroupNameSelector != nil { + in, out := &in.DBSubnetGroupNameSelector, &out.DBSubnetGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DedicatedLogVolume != nil { + in, out := &in.DedicatedLogVolume, &out.DedicatedLogVolume + *out = new(bool) + **out = **in + } + if in.DeleteAutomatedBackups != nil { + in, out := &in.DeleteAutomatedBackups, &out.DeleteAutomatedBackups + *out = new(bool) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.DomainAuthSecretArn != nil { + in, out := &in.DomainAuthSecretArn, &out.DomainAuthSecretArn + *out = new(string) + **out = **in + } + if in.DomainDNSIps != nil { + in, out := &in.DomainDNSIps, &out.DomainDNSIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainFqdn != nil { + in, out := &in.DomainFqdn, &out.DomainFqdn + *out = new(string) + **out = **in + } + if in.DomainIAMRoleName != nil { + in, out := &in.DomainIAMRoleName, &out.DomainIAMRoleName + *out = new(string) + **out = **in + } + if in.DomainOu != nil { + in, out := &in.DomainOu, &out.DomainOu + *out = new(string) + **out = **in + } + if in.EnabledCloudwatchLogsExports != nil { + in, out := &in.EnabledCloudwatchLogsExports, &out.EnabledCloudwatchLogsExports + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.FinalSnapshotIdentifier != nil { + in, out := &in.FinalSnapshotIdentifier, &out.FinalSnapshotIdentifier + *out = new(string) + **out = **in + } + if in.IAMDatabaseAuthenticationEnabled != nil { + in, out := &in.IAMDatabaseAuthenticationEnabled, &out.IAMDatabaseAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(string) + **out = **in + } + if in.IdentifierPrefix != nil { + in, out := &in.IdentifierPrefix, &out.IdentifierPrefix + *out = new(string) + **out = **in + } + if in.InstanceClass != nil { + in, out := &in.InstanceClass, &out.InstanceClass + *out = new(string) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LicenseModel != nil { + in, out := &in.LicenseModel, &out.LicenseModel + *out = new(string) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(string) + **out = **in + } + if in.ManageMasterUserPassword != nil { + in, out := &in.ManageMasterUserPassword, &out.ManageMasterUserPassword + *out = new(bool) + **out = **in + } + if in.MasterUserSecretKMSKeyID != nil { + in, out := &in.MasterUserSecretKMSKeyID, &out.MasterUserSecretKMSKeyID + *out = new(string) + **out = **in + } + if in.MasterUserSecretKMSKeyIDRef != nil { + in, out := &in.MasterUserSecretKMSKeyIDRef, &out.MasterUserSecretKMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MasterUserSecretKMSKeyIDSelector != nil { + in, out := &in.MasterUserSecretKMSKeyIDSelector, &out.MasterUserSecretKMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MaxAllocatedStorage != nil { + in, out := &in.MaxAllocatedStorage, &out.MaxAllocatedStorage + *out = new(float64) + **out = **in + } + if in.MonitoringInterval != nil { + in, out := &in.MonitoringInterval, &out.MonitoringInterval + *out = new(float64) + **out = **in + } + if in.MonitoringRoleArn != nil { + in, out := &in.MonitoringRoleArn, &out.MonitoringRoleArn + *out = new(string) + **out = **in + } + if in.MonitoringRoleArnRef != nil { + in, out := &in.MonitoringRoleArnRef, &out.MonitoringRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MonitoringRoleArnSelector != nil { + in, out := &in.MonitoringRoleArnSelector, &out.MonitoringRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MultiAz != nil { + in, out := &in.MultiAz, &out.MultiAz + *out = new(bool) + **out = **in + } + if in.NcharCharacterSetName != nil { + in, out := &in.NcharCharacterSetName, &out.NcharCharacterSetName + *out = new(string) + **out = **in + } + if in.NetworkType != nil { + in, out := &in.NetworkType, &out.NetworkType + *out = new(string) + **out = **in + } + if in.OptionGroupName != nil { + in, out := &in.OptionGroupName, &out.OptionGroupName + *out = new(string) + **out = **in + } + if in.ParameterGroupName != nil { + in, out := &in.ParameterGroupName, &out.ParameterGroupName + *out = new(string) + **out = **in + } + if in.ParameterGroupNameRef != nil { + in, out := &in.ParameterGroupNameRef, &out.ParameterGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ParameterGroupNameSelector != nil { + in, out := &in.ParameterGroupNameSelector, &out.ParameterGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.PerformanceInsightsEnabled != nil { + in, out := &in.PerformanceInsightsEnabled, &out.PerformanceInsightsEnabled + *out = new(bool) + **out = **in + } + if in.PerformanceInsightsKMSKeyID != nil { + in, out := &in.PerformanceInsightsKMSKeyID, &out.PerformanceInsightsKMSKeyID + *out = new(string) + **out = **in + } + if in.PerformanceInsightsRetentionPeriod != nil { + in, out := &in.PerformanceInsightsRetentionPeriod, &out.PerformanceInsightsRetentionPeriod + *out = new(float64) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PubliclyAccessible != nil { + in, out := &in.PubliclyAccessible, &out.PubliclyAccessible + *out = new(bool) + **out = **in + } + if in.ReplicaMode != nil { + in, out := &in.ReplicaMode, &out.ReplicaMode + *out = new(string) + **out = **in + } + if in.ReplicateSourceDB != nil { + in, out := &in.ReplicateSourceDB, &out.ReplicateSourceDB + *out = new(string) + **out = **in + } + if in.ReplicateSourceDBRef != nil { + in, out := &in.ReplicateSourceDBRef, &out.ReplicateSourceDBRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ReplicateSourceDBSelector != nil { + in, out := &in.ReplicateSourceDBSelector, &out.ReplicateSourceDBSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RestoreToPointInTime != nil { + in, out := &in.RestoreToPointInTime, &out.RestoreToPointInTime + *out = new(RestoreToPointInTimeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3Import != nil { + in, out := &in.S3Import, &out.S3Import + *out = new(S3ImportInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SkipFinalSnapshot != nil { + in, out := &in.SkipFinalSnapshot, &out.SkipFinalSnapshot + *out = new(bool) + **out = **in + } + if in.SnapshotIdentifier != nil { + in, out := &in.SnapshotIdentifier, &out.SnapshotIdentifier + *out = new(string) + **out = **in + } + if in.StorageEncrypted != nil { + in, out := &in.StorageEncrypted, &out.StorageEncrypted + *out = new(bool) + **out = **in + } + if in.StorageThroughput != nil { + in, out := &in.StorageThroughput, &out.StorageThroughput + *out = new(float64) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VPCSecurityGroupIDRefs != nil { + in, out := &in.VPCSecurityGroupIDRefs, &out.VPCSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCSecurityGroupIDSelector != nil { + in, out := &in.VPCSecurityGroupIDSelector, &out.VPCSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceInitParameters. +func (in *InstanceInitParameters) DeepCopy() *InstanceInitParameters { + if in == nil { + return nil + } + out := new(InstanceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceList) DeepCopyInto(out *InstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Instance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceList. +func (in *InstanceList) DeepCopy() *InstanceList { + if in == nil { + return nil + } + out := new(InstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceObservation) DeepCopyInto(out *InstanceObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.AllocatedStorage != nil { + in, out := &in.AllocatedStorage, &out.AllocatedStorage + *out = new(float64) + **out = **in + } + if in.AllowMajorVersionUpgrade != nil { + in, out := &in.AllowMajorVersionUpgrade, &out.AllowMajorVersionUpgrade + *out = new(bool) + **out = **in + } + if in.ApplyImmediately != nil { + in, out := &in.ApplyImmediately, &out.ApplyImmediately + *out = new(bool) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutoMinorVersionUpgrade != nil { + in, out := &in.AutoMinorVersionUpgrade, &out.AutoMinorVersionUpgrade + *out = new(bool) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.BackupRetentionPeriod != nil { + in, out := &in.BackupRetentionPeriod, &out.BackupRetentionPeriod + *out = new(float64) + **out = **in + } + if in.BackupTarget != nil { + in, out := &in.BackupTarget, &out.BackupTarget + *out = new(string) + **out = **in + } + if in.BackupWindow != nil { + in, out := &in.BackupWindow, &out.BackupWindow + *out = new(string) + **out = **in + } + if in.BlueGreenUpdate != nil { + in, out := &in.BlueGreenUpdate, &out.BlueGreenUpdate + *out = new(BlueGreenUpdateObservation) + (*in).DeepCopyInto(*out) + } + if in.CACertIdentifier != nil { + in, out := &in.CACertIdentifier, &out.CACertIdentifier + *out = new(string) + **out = **in + } + if in.CharacterSetName != nil { + in, out := &in.CharacterSetName, &out.CharacterSetName + *out = new(string) + **out = **in + } + if in.CopyTagsToSnapshot != nil { + in, out := &in.CopyTagsToSnapshot, &out.CopyTagsToSnapshot + *out = new(bool) + **out = **in + } + if in.CustomIAMInstanceProfile != nil { + in, out := &in.CustomIAMInstanceProfile, &out.CustomIAMInstanceProfile + *out = new(string) + **out = **in + } + if in.CustomerOwnedIPEnabled != nil { + in, out := &in.CustomerOwnedIPEnabled, &out.CustomerOwnedIPEnabled + *out = new(bool) + **out = **in + } + if in.DBName != nil { + in, out := &in.DBName, &out.DBName + *out = new(string) + **out = **in + } + if in.DBSubnetGroupName != nil { + in, out := &in.DBSubnetGroupName, &out.DBSubnetGroupName + *out = new(string) + **out = **in + } + if in.DedicatedLogVolume != nil { + in, out := &in.DedicatedLogVolume, &out.DedicatedLogVolume + *out = new(bool) + **out = **in + } + if in.DeleteAutomatedBackups != nil { + in, out := &in.DeleteAutomatedBackups, &out.DeleteAutomatedBackups + *out = new(bool) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.DomainAuthSecretArn != nil { + in, out := &in.DomainAuthSecretArn, &out.DomainAuthSecretArn + *out = new(string) + **out = **in + } + if in.DomainDNSIps != nil { + in, out := &in.DomainDNSIps, &out.DomainDNSIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainFqdn != nil { + in, out := &in.DomainFqdn, &out.DomainFqdn + *out = new(string) + **out = **in + } + if in.DomainIAMRoleName != nil { + in, out := &in.DomainIAMRoleName, &out.DomainIAMRoleName + *out = new(string) + **out = **in + } + if in.DomainOu != nil { + in, out := &in.DomainOu, &out.DomainOu + *out = new(string) + **out = **in + } + if in.EnabledCloudwatchLogsExports != nil { + in, out := &in.EnabledCloudwatchLogsExports, &out.EnabledCloudwatchLogsExports + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.EngineVersionActual != nil { + in, out := &in.EngineVersionActual, &out.EngineVersionActual + *out = new(string) + **out = **in + } + if in.FinalSnapshotIdentifier != nil { + in, out := &in.FinalSnapshotIdentifier, &out.FinalSnapshotIdentifier + *out = new(string) + **out = **in + } + if in.HostedZoneID != nil { + in, out := &in.HostedZoneID, &out.HostedZoneID + *out = new(string) + **out = **in + } + if in.IAMDatabaseAuthenticationEnabled != nil { + in, out := &in.IAMDatabaseAuthenticationEnabled, &out.IAMDatabaseAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(string) + **out = **in + } + if in.IdentifierPrefix != nil { + in, out := &in.IdentifierPrefix, &out.IdentifierPrefix + *out = new(string) + **out = **in + } + if in.InstanceClass != nil { + in, out := &in.InstanceClass, &out.InstanceClass + *out = new(string) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.LatestRestorableTime != nil { + in, out := &in.LatestRestorableTime, &out.LatestRestorableTime + *out = new(string) + **out = **in + } + if in.LicenseModel != nil { + in, out := &in.LicenseModel, &out.LicenseModel + *out = new(string) + **out = **in + } + if in.ListenerEndpoint != nil { + in, out := &in.ListenerEndpoint, &out.ListenerEndpoint + *out = make([]ListenerEndpointObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(string) + **out = **in + } + if in.ManageMasterUserPassword != nil { + in, out := &in.ManageMasterUserPassword, &out.ManageMasterUserPassword + *out = new(bool) + **out = **in + } + if in.MasterUserSecret != nil { + in, out := &in.MasterUserSecret, &out.MasterUserSecret + *out = make([]MasterUserSecretObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MasterUserSecretKMSKeyID != nil { + in, out := &in.MasterUserSecretKMSKeyID, &out.MasterUserSecretKMSKeyID + *out = new(string) + **out = **in + } + if in.MaxAllocatedStorage != nil { + in, out := &in.MaxAllocatedStorage, &out.MaxAllocatedStorage + *out = new(float64) + **out = **in + } + if in.MonitoringInterval != nil { + in, out := &in.MonitoringInterval, &out.MonitoringInterval + *out = new(float64) + **out = **in + } + if in.MonitoringRoleArn != nil { + in, out := &in.MonitoringRoleArn, &out.MonitoringRoleArn + *out = new(string) + **out = **in + } + if in.MultiAz != nil { + in, out := &in.MultiAz, &out.MultiAz + *out = new(bool) + **out = **in + } + if in.NcharCharacterSetName != nil { + in, out := &in.NcharCharacterSetName, &out.NcharCharacterSetName + *out = new(string) + **out = **in + } + if in.NetworkType != nil { + in, out := &in.NetworkType, &out.NetworkType + *out = new(string) + **out = **in + } + if in.OptionGroupName != nil { + in, out := &in.OptionGroupName, &out.OptionGroupName + *out = new(string) + **out = **in + } + if in.ParameterGroupName != nil { + in, out := &in.ParameterGroupName, &out.ParameterGroupName + *out = new(string) + **out = **in + } + if in.PerformanceInsightsEnabled != nil { + in, out := &in.PerformanceInsightsEnabled, &out.PerformanceInsightsEnabled + *out = new(bool) + **out = **in + } + if in.PerformanceInsightsKMSKeyID != nil { + in, out := &in.PerformanceInsightsKMSKeyID, &out.PerformanceInsightsKMSKeyID + *out = new(string) + **out = **in + } + if in.PerformanceInsightsRetentionPeriod != nil { + in, out := &in.PerformanceInsightsRetentionPeriod, &out.PerformanceInsightsRetentionPeriod + *out = new(float64) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PubliclyAccessible != nil { + in, out := &in.PubliclyAccessible, &out.PubliclyAccessible + *out = new(bool) + **out = **in + } + if in.ReplicaMode != nil { + in, out := &in.ReplicaMode, &out.ReplicaMode + *out = new(string) + **out = **in + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ReplicateSourceDB != nil { + in, out := &in.ReplicateSourceDB, &out.ReplicateSourceDB + *out = new(string) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.RestoreToPointInTime != nil { + in, out := &in.RestoreToPointInTime, &out.RestoreToPointInTime + *out = new(RestoreToPointInTimeObservation) + (*in).DeepCopyInto(*out) + } + if in.S3Import != nil { + in, out := &in.S3Import, &out.S3Import + *out = new(S3ImportObservation) + (*in).DeepCopyInto(*out) + } + if in.SkipFinalSnapshot != nil { + in, out := &in.SkipFinalSnapshot, &out.SkipFinalSnapshot + *out = new(bool) + **out = **in + } + if in.SnapshotIdentifier != nil { + in, out := &in.SnapshotIdentifier, &out.SnapshotIdentifier + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.StorageEncrypted != nil { + in, out := &in.StorageEncrypted, &out.StorageEncrypted + *out = new(bool) + **out = **in + } + if in.StorageThroughput != nil { + in, out := &in.StorageThroughput, &out.StorageThroughput + *out = new(float64) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceObservation. +func (in *InstanceObservation) DeepCopy() *InstanceObservation { + if in == nil { + return nil + } + out := new(InstanceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceParameters) DeepCopyInto(out *InstanceParameters) { + *out = *in + if in.AllocatedStorage != nil { + in, out := &in.AllocatedStorage, &out.AllocatedStorage + *out = new(float64) + **out = **in + } + if in.AllowMajorVersionUpgrade != nil { + in, out := &in.AllowMajorVersionUpgrade, &out.AllowMajorVersionUpgrade + *out = new(bool) + **out = **in + } + if in.ApplyImmediately != nil { + in, out := &in.ApplyImmediately, &out.ApplyImmediately + *out = new(bool) + **out = **in + } + if in.AutoGeneratePassword != nil { + in, out := &in.AutoGeneratePassword, &out.AutoGeneratePassword + *out = new(bool) + **out = **in + } + if in.AutoMinorVersionUpgrade != nil { + in, out := &in.AutoMinorVersionUpgrade, &out.AutoMinorVersionUpgrade + *out = new(bool) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.BackupRetentionPeriod != nil { + in, out := &in.BackupRetentionPeriod, &out.BackupRetentionPeriod + *out = new(float64) + **out = **in + } + if in.BackupTarget != nil { + in, out := &in.BackupTarget, &out.BackupTarget + *out = new(string) + **out = **in + } + if in.BackupWindow != nil { + in, out := &in.BackupWindow, &out.BackupWindow + *out = new(string) + **out = **in + } + if in.BlueGreenUpdate != nil { + in, out := &in.BlueGreenUpdate, &out.BlueGreenUpdate + *out = new(BlueGreenUpdateParameters) + (*in).DeepCopyInto(*out) + } + if in.CACertIdentifier != nil { + in, out := &in.CACertIdentifier, &out.CACertIdentifier + *out = new(string) + **out = **in + } + if in.CharacterSetName != nil { + in, out := &in.CharacterSetName, &out.CharacterSetName + *out = new(string) + **out = **in + } + if in.CopyTagsToSnapshot != nil { + in, out := &in.CopyTagsToSnapshot, &out.CopyTagsToSnapshot + *out = new(bool) + **out = **in + } + if in.CustomIAMInstanceProfile != nil { + in, out := &in.CustomIAMInstanceProfile, &out.CustomIAMInstanceProfile + *out = new(string) + **out = **in + } + if in.CustomerOwnedIPEnabled != nil { + in, out := &in.CustomerOwnedIPEnabled, &out.CustomerOwnedIPEnabled + *out = new(bool) + **out = **in + } + if in.DBName != nil { + in, out := &in.DBName, &out.DBName + *out = new(string) + **out = **in + } + if in.DBSubnetGroupName != nil { + in, out := &in.DBSubnetGroupName, &out.DBSubnetGroupName + *out = new(string) + **out = **in + } + if in.DBSubnetGroupNameRef != nil { + in, out := &in.DBSubnetGroupNameRef, &out.DBSubnetGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DBSubnetGroupNameSelector != nil { + in, out := &in.DBSubnetGroupNameSelector, &out.DBSubnetGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DedicatedLogVolume != nil { + in, out := &in.DedicatedLogVolume, &out.DedicatedLogVolume + *out = new(bool) + **out = **in + } + if in.DeleteAutomatedBackups != nil { + in, out := &in.DeleteAutomatedBackups, &out.DeleteAutomatedBackups + *out = new(bool) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.DomainAuthSecretArn != nil { + in, out := &in.DomainAuthSecretArn, &out.DomainAuthSecretArn + *out = new(string) + **out = **in + } + if in.DomainDNSIps != nil { + in, out := &in.DomainDNSIps, &out.DomainDNSIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainFqdn != nil { + in, out := &in.DomainFqdn, &out.DomainFqdn + *out = new(string) + **out = **in + } + if in.DomainIAMRoleName != nil { + in, out := &in.DomainIAMRoleName, &out.DomainIAMRoleName + *out = new(string) + **out = **in + } + if in.DomainOu != nil { + in, out := &in.DomainOu, &out.DomainOu + *out = new(string) + **out = **in + } + if in.EnabledCloudwatchLogsExports != nil { + in, out := &in.EnabledCloudwatchLogsExports, &out.EnabledCloudwatchLogsExports + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.FinalSnapshotIdentifier != nil { + in, out := &in.FinalSnapshotIdentifier, &out.FinalSnapshotIdentifier + *out = new(string) + **out = **in + } + if in.IAMDatabaseAuthenticationEnabled != nil { + in, out := &in.IAMDatabaseAuthenticationEnabled, &out.IAMDatabaseAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(string) + **out = **in + } + if in.IdentifierPrefix != nil { + in, out := &in.IdentifierPrefix, &out.IdentifierPrefix + *out = new(string) + **out = **in + } + if in.InstanceClass != nil { + in, out := &in.InstanceClass, &out.InstanceClass + *out = new(string) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LicenseModel != nil { + in, out := &in.LicenseModel, &out.LicenseModel + *out = new(string) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(string) + **out = **in + } + if in.ManageMasterUserPassword != nil { + in, out := &in.ManageMasterUserPassword, &out.ManageMasterUserPassword + *out = new(bool) + **out = **in + } + if in.MasterUserSecretKMSKeyID != nil { + in, out := &in.MasterUserSecretKMSKeyID, &out.MasterUserSecretKMSKeyID + *out = new(string) + **out = **in + } + if in.MasterUserSecretKMSKeyIDRef != nil { + in, out := &in.MasterUserSecretKMSKeyIDRef, &out.MasterUserSecretKMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MasterUserSecretKMSKeyIDSelector != nil { + in, out := &in.MasterUserSecretKMSKeyIDSelector, &out.MasterUserSecretKMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MaxAllocatedStorage != nil { + in, out := &in.MaxAllocatedStorage, &out.MaxAllocatedStorage + *out = new(float64) + **out = **in + } + if in.MonitoringInterval != nil { + in, out := &in.MonitoringInterval, &out.MonitoringInterval + *out = new(float64) + **out = **in + } + if in.MonitoringRoleArn != nil { + in, out := &in.MonitoringRoleArn, &out.MonitoringRoleArn + *out = new(string) + **out = **in + } + if in.MonitoringRoleArnRef != nil { + in, out := &in.MonitoringRoleArnRef, &out.MonitoringRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MonitoringRoleArnSelector != nil { + in, out := &in.MonitoringRoleArnSelector, &out.MonitoringRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MultiAz != nil { + in, out := &in.MultiAz, &out.MultiAz + *out = new(bool) + **out = **in + } + if in.NcharCharacterSetName != nil { + in, out := &in.NcharCharacterSetName, &out.NcharCharacterSetName + *out = new(string) + **out = **in + } + if in.NetworkType != nil { + in, out := &in.NetworkType, &out.NetworkType + *out = new(string) + **out = **in + } + if in.OptionGroupName != nil { + in, out := &in.OptionGroupName, &out.OptionGroupName + *out = new(string) + **out = **in + } + if in.ParameterGroupName != nil { + in, out := &in.ParameterGroupName, &out.ParameterGroupName + *out = new(string) + **out = **in + } + if in.ParameterGroupNameRef != nil { + in, out := &in.ParameterGroupNameRef, &out.ParameterGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ParameterGroupNameSelector != nil { + in, out := &in.ParameterGroupNameSelector, &out.ParameterGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.PerformanceInsightsEnabled != nil { + in, out := &in.PerformanceInsightsEnabled, &out.PerformanceInsightsEnabled + *out = new(bool) + **out = **in + } + if in.PerformanceInsightsKMSKeyID != nil { + in, out := &in.PerformanceInsightsKMSKeyID, &out.PerformanceInsightsKMSKeyID + *out = new(string) + **out = **in + } + if in.PerformanceInsightsRetentionPeriod != nil { + in, out := &in.PerformanceInsightsRetentionPeriod, &out.PerformanceInsightsRetentionPeriod + *out = new(float64) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PubliclyAccessible != nil { + in, out := &in.PubliclyAccessible, &out.PubliclyAccessible + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ReplicaMode != nil { + in, out := &in.ReplicaMode, &out.ReplicaMode + *out = new(string) + **out = **in + } + if in.ReplicateSourceDB != nil { + in, out := &in.ReplicateSourceDB, &out.ReplicateSourceDB + *out = new(string) + **out = **in + } + if in.ReplicateSourceDBRef != nil { + in, out := &in.ReplicateSourceDBRef, &out.ReplicateSourceDBRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ReplicateSourceDBSelector != nil { + in, out := &in.ReplicateSourceDBSelector, &out.ReplicateSourceDBSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RestoreToPointInTime != nil { + in, out := &in.RestoreToPointInTime, &out.RestoreToPointInTime + *out = new(RestoreToPointInTimeParameters) + (*in).DeepCopyInto(*out) + } + if in.S3Import != nil { + in, out := &in.S3Import, &out.S3Import + *out = new(S3ImportParameters) + (*in).DeepCopyInto(*out) + } + if in.SkipFinalSnapshot != nil { + in, out := &in.SkipFinalSnapshot, &out.SkipFinalSnapshot + *out = new(bool) + **out = **in + } + if in.SnapshotIdentifier != nil { + in, out := &in.SnapshotIdentifier, &out.SnapshotIdentifier + *out = new(string) + **out = **in + } + if in.StorageEncrypted != nil { + in, out := &in.StorageEncrypted, &out.StorageEncrypted + *out = new(bool) + **out = **in + } + if in.StorageThroughput != nil { + in, out := &in.StorageThroughput, &out.StorageThroughput + *out = new(float64) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VPCSecurityGroupIDRefs != nil { + in, out := &in.VPCSecurityGroupIDRefs, &out.VPCSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCSecurityGroupIDSelector != nil { + in, out := &in.VPCSecurityGroupIDSelector, &out.VPCSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceParameters. +func (in *InstanceParameters) DeepCopy() *InstanceParameters { + if in == nil { + return nil + } + out := new(InstanceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceSpec) DeepCopyInto(out *InstanceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceSpec. +func (in *InstanceSpec) DeepCopy() *InstanceSpec { + if in == nil { + return nil + } + out := new(InstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceStatus) DeepCopyInto(out *InstanceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceStatus. +func (in *InstanceStatus) DeepCopy() *InstanceStatus { + if in == nil { + return nil + } + out := new(InstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerEndpointInitParameters) DeepCopyInto(out *ListenerEndpointInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerEndpointInitParameters. +func (in *ListenerEndpointInitParameters) DeepCopy() *ListenerEndpointInitParameters { + if in == nil { + return nil + } + out := new(ListenerEndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerEndpointObservation) DeepCopyInto(out *ListenerEndpointObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.HostedZoneID != nil { + in, out := &in.HostedZoneID, &out.HostedZoneID + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerEndpointObservation. +func (in *ListenerEndpointObservation) DeepCopy() *ListenerEndpointObservation { + if in == nil { + return nil + } + out := new(ListenerEndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerEndpointParameters) DeepCopyInto(out *ListenerEndpointParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerEndpointParameters. +func (in *ListenerEndpointParameters) DeepCopy() *ListenerEndpointParameters { + if in == nil { + return nil + } + out := new(ListenerEndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterUserSecretInitParameters) DeepCopyInto(out *MasterUserSecretInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterUserSecretInitParameters. +func (in *MasterUserSecretInitParameters) DeepCopy() *MasterUserSecretInitParameters { + if in == nil { + return nil + } + out := new(MasterUserSecretInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterUserSecretObservation) DeepCopyInto(out *MasterUserSecretObservation) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.SecretArn != nil { + in, out := &in.SecretArn, &out.SecretArn + *out = new(string) + **out = **in + } + if in.SecretStatus != nil { + in, out := &in.SecretStatus, &out.SecretStatus + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterUserSecretObservation. +func (in *MasterUserSecretObservation) DeepCopy() *MasterUserSecretObservation { + if in == nil { + return nil + } + out := new(MasterUserSecretObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterUserSecretParameters) DeepCopyInto(out *MasterUserSecretParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterUserSecretParameters. +func (in *MasterUserSecretParameters) DeepCopy() *MasterUserSecretParameters { + if in == nil { + return nil + } + out := new(MasterUserSecretParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreToPointInTimeInitParameters) DeepCopyInto(out *RestoreToPointInTimeInitParameters) { + *out = *in + if in.RestoreTime != nil { + in, out := &in.RestoreTime, &out.RestoreTime + *out = new(string) + **out = **in + } + if in.SourceDBInstanceAutomatedBackupsArn != nil { + in, out := &in.SourceDBInstanceAutomatedBackupsArn, &out.SourceDBInstanceAutomatedBackupsArn + *out = new(string) + **out = **in + } + if in.SourceDBInstanceIdentifier != nil { + in, out := &in.SourceDBInstanceIdentifier, &out.SourceDBInstanceIdentifier + *out = new(string) + **out = **in + } + if in.SourceDbiResourceID != nil { + in, out := &in.SourceDbiResourceID, &out.SourceDbiResourceID + *out = new(string) + **out = **in + } + if in.UseLatestRestorableTime != nil { + in, out := &in.UseLatestRestorableTime, &out.UseLatestRestorableTime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreToPointInTimeInitParameters. +func (in *RestoreToPointInTimeInitParameters) DeepCopy() *RestoreToPointInTimeInitParameters { + if in == nil { + return nil + } + out := new(RestoreToPointInTimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreToPointInTimeObservation) DeepCopyInto(out *RestoreToPointInTimeObservation) { + *out = *in + if in.RestoreTime != nil { + in, out := &in.RestoreTime, &out.RestoreTime + *out = new(string) + **out = **in + } + if in.SourceDBInstanceAutomatedBackupsArn != nil { + in, out := &in.SourceDBInstanceAutomatedBackupsArn, &out.SourceDBInstanceAutomatedBackupsArn + *out = new(string) + **out = **in + } + if in.SourceDBInstanceIdentifier != nil { + in, out := &in.SourceDBInstanceIdentifier, &out.SourceDBInstanceIdentifier + *out = new(string) + **out = **in + } + if in.SourceDbiResourceID != nil { + in, out := &in.SourceDbiResourceID, &out.SourceDbiResourceID + *out = new(string) + **out = **in + } + if in.UseLatestRestorableTime != nil { + in, out := &in.UseLatestRestorableTime, &out.UseLatestRestorableTime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreToPointInTimeObservation. +func (in *RestoreToPointInTimeObservation) DeepCopy() *RestoreToPointInTimeObservation { + if in == nil { + return nil + } + out := new(RestoreToPointInTimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreToPointInTimeParameters) DeepCopyInto(out *RestoreToPointInTimeParameters) { + *out = *in + if in.RestoreTime != nil { + in, out := &in.RestoreTime, &out.RestoreTime + *out = new(string) + **out = **in + } + if in.SourceDBInstanceAutomatedBackupsArn != nil { + in, out := &in.SourceDBInstanceAutomatedBackupsArn, &out.SourceDBInstanceAutomatedBackupsArn + *out = new(string) + **out = **in + } + if in.SourceDBInstanceIdentifier != nil { + in, out := &in.SourceDBInstanceIdentifier, &out.SourceDBInstanceIdentifier + *out = new(string) + **out = **in + } + if in.SourceDbiResourceID != nil { + in, out := &in.SourceDbiResourceID, &out.SourceDbiResourceID + *out = new(string) + **out = **in + } + if in.UseLatestRestorableTime != nil { + in, out := &in.UseLatestRestorableTime, &out.UseLatestRestorableTime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreToPointInTimeParameters. +func (in *RestoreToPointInTimeParameters) DeepCopy() *RestoreToPointInTimeParameters { + if in == nil { + return nil + } + out := new(RestoreToPointInTimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ImportInitParameters) DeepCopyInto(out *S3ImportInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.IngestionRole != nil { + in, out := &in.IngestionRole, &out.IngestionRole + *out = new(string) + **out = **in + } + if in.SourceEngine != nil { + in, out := &in.SourceEngine, &out.SourceEngine + *out = new(string) + **out = **in + } + if in.SourceEngineVersion != nil { + in, out := &in.SourceEngineVersion, &out.SourceEngineVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ImportInitParameters. +func (in *S3ImportInitParameters) DeepCopy() *S3ImportInitParameters { + if in == nil { + return nil + } + out := new(S3ImportInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ImportObservation) DeepCopyInto(out *S3ImportObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.IngestionRole != nil { + in, out := &in.IngestionRole, &out.IngestionRole + *out = new(string) + **out = **in + } + if in.SourceEngine != nil { + in, out := &in.SourceEngine, &out.SourceEngine + *out = new(string) + **out = **in + } + if in.SourceEngineVersion != nil { + in, out := &in.SourceEngineVersion, &out.SourceEngineVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ImportObservation. +func (in *S3ImportObservation) DeepCopy() *S3ImportObservation { + if in == nil { + return nil + } + out := new(S3ImportObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ImportParameters) DeepCopyInto(out *S3ImportParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketPrefix != nil { + in, out := &in.BucketPrefix, &out.BucketPrefix + *out = new(string) + **out = **in + } + if in.IngestionRole != nil { + in, out := &in.IngestionRole, &out.IngestionRole + *out = new(string) + **out = **in + } + if in.SourceEngine != nil { + in, out := &in.SourceEngine, &out.SourceEngine + *out = new(string) + **out = **in + } + if in.SourceEngineVersion != nil { + in, out := &in.SourceEngineVersion, &out.SourceEngineVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ImportParameters. +func (in *S3ImportParameters) DeepCopy() *S3ImportParameters { + if in == nil { + return nil + } + out := new(S3ImportParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/rds/v1beta3/zz_generated.managed.go b/apis/rds/v1beta3/zz_generated.managed.go new file mode 100644 index 0000000000..7f13dfe900 --- /dev/null +++ b/apis/rds/v1beta3/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta3 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Instance. +func (mg *Instance) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Instance. +func (mg *Instance) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Instance. +func (mg *Instance) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Instance. +func (mg *Instance) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Instance. +func (mg *Instance) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Instance. +func (mg *Instance) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Instance. +func (mg *Instance) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Instance. +func (mg *Instance) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Instance. +func (mg *Instance) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Instance. +func (mg *Instance) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Instance. +func (mg *Instance) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Instance. +func (mg *Instance) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/rds/v1beta3/zz_generated.managedlist.go b/apis/rds/v1beta3/zz_generated.managedlist.go new file mode 100644 index 0000000000..0fcfacd0a5 --- /dev/null +++ b/apis/rds/v1beta3/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta3 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this InstanceList. +func (l *InstanceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/rds/v1beta3/zz_generated.resolvers.go b/apis/rds/v1beta3/zz_generated.resolvers.go new file mode 100644 index 0000000000..154e21bec6 --- /dev/null +++ b/apis/rds/v1beta3/zz_generated.resolvers.go @@ -0,0 +1,299 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta3 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Instance. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Instance) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "SubnetGroup", "SubnetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DBSubnetGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DBSubnetGroupNameRef, + Selector: mg.Spec.ForProvider.DBSubnetGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DBSubnetGroupName") + } + mg.Spec.ForProvider.DBSubnetGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DBSubnetGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyID), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyID") + } + mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MasterUserSecretKMSKeyID), + Extract: resource.ExtractParamPath("key_id", true), + Reference: mg.Spec.ForProvider.MasterUserSecretKMSKeyIDRef, + Selector: mg.Spec.ForProvider.MasterUserSecretKMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MasterUserSecretKMSKeyID") + } + mg.Spec.ForProvider.MasterUserSecretKMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MasterUserSecretKMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MonitoringRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.MonitoringRoleArnRef, + Selector: mg.Spec.ForProvider.MonitoringRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MonitoringRoleArn") + } + mg.Spec.ForProvider.MonitoringRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MonitoringRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "ParameterGroup", "ParameterGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ParameterGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ParameterGroupNameRef, + Selector: mg.Spec.ForProvider.ParameterGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ParameterGroupName") + } + mg.Spec.ForProvider.ParameterGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ParameterGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta3", "Instance", "InstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ReplicateSourceDB), + Extract: resource.ExtractParamPath("identifier", false), + Reference: mg.Spec.ForProvider.ReplicateSourceDBRef, + Selector: mg.Spec.ForProvider.ReplicateSourceDBSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ReplicateSourceDB") + } + mg.Spec.ForProvider.ReplicateSourceDB = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ReplicateSourceDBRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.VPCSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCSecurityGroupIds") + } + mg.Spec.ForProvider.VPCSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "SubnetGroup", "SubnetGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DBSubnetGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DBSubnetGroupNameRef, + Selector: mg.Spec.InitProvider.DBSubnetGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DBSubnetGroupName") + } + mg.Spec.InitProvider.DBSubnetGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DBSubnetGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyID), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyID") + } + mg.Spec.InitProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.MasterUserSecretKMSKeyID), + Extract: resource.ExtractParamPath("key_id", true), + Reference: mg.Spec.InitProvider.MasterUserSecretKMSKeyIDRef, + Selector: mg.Spec.InitProvider.MasterUserSecretKMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.MasterUserSecretKMSKeyID") + } + mg.Spec.InitProvider.MasterUserSecretKMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.MasterUserSecretKMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.MonitoringRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.MonitoringRoleArnRef, + Selector: mg.Spec.InitProvider.MonitoringRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.MonitoringRoleArn") + } + mg.Spec.InitProvider.MonitoringRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.MonitoringRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta1", "ParameterGroup", "ParameterGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ParameterGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ParameterGroupNameRef, + Selector: mg.Spec.InitProvider.ParameterGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ParameterGroupName") + } + mg.Spec.InitProvider.ParameterGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ParameterGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("rds.aws.upbound.io", "v1beta3", "Instance", "InstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ReplicateSourceDB), + Extract: resource.ExtractParamPath("identifier", false), + Reference: mg.Spec.InitProvider.ReplicateSourceDBRef, + Selector: mg.Spec.InitProvider.ReplicateSourceDBSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ReplicateSourceDB") + } + mg.Spec.InitProvider.ReplicateSourceDB = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ReplicateSourceDBRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.VPCSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCSecurityGroupIds") + } + mg.Spec.InitProvider.VPCSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCSecurityGroupIDRefs = mrsp.ResolvedReferences + + return nil +} diff --git a/apis/rds/v1beta3/zz_groupversion_info.go b/apis/rds/v1beta3/zz_groupversion_info.go new file mode 100755 index 0000000000..a87bb55a84 --- /dev/null +++ b/apis/rds/v1beta3/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=rds.aws.upbound.io +// +versionName=v1beta3 +package v1beta3 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "rds.aws.upbound.io" + CRDVersion = "v1beta3" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/rds/v1beta3/zz_instance_terraformed.go b/apis/rds/v1beta3/zz_instance_terraformed.go new file mode 100755 index 0000000000..88da6e1c9a --- /dev/null +++ b/apis/rds/v1beta3/zz_instance_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta3 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Instance +func (mg *Instance) GetTerraformResourceType() string { + return "aws_db_instance" +} + +// GetConnectionDetailsMapping for this Instance +func (tr *Instance) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "passwordSecretRef"} +} + +// GetObservation of this Instance +func (tr *Instance) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Instance +func (tr *Instance) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Instance +func (tr *Instance) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Instance +func (tr *Instance) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Instance +func (tr *Instance) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Instance +func (tr *Instance) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Instance +func (tr *Instance) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Instance using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Instance) LateInitialize(attrs []byte) (bool, error) { + params := &InstanceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("DBName")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Instance) GetTerraformSchemaVersion() int { + return 2 +} diff --git a/apis/rds/v1beta3/zz_instance_types.go b/apis/rds/v1beta3/zz_instance_types.go new file mode 100755 index 0000000000..8a5472ddec --- /dev/null +++ b/apis/rds/v1beta3/zz_instance_types.go @@ -0,0 +1,1334 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BlueGreenUpdateInitParameters struct { + + // Enables low-downtime updates when true. + // Default is false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type BlueGreenUpdateObservation struct { + + // Enables low-downtime updates when true. + // Default is false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type BlueGreenUpdateParameters struct { + + // Enables low-downtime updates when true. + // Default is false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type InstanceInitParameters struct { + + // The allocated storage in gibibytes. If max_allocated_storage is configured, this argument represents the initial storage allocation and differences from the configuration will be ignored automatically when Storage Autoscaling occurs. If replicate_source_db is set, the value is ignored during the creation of the instance. + AllocatedStorage *float64 `json:"allocatedStorage,omitempty" tf:"allocated_storage,omitempty"` + + // Indicates that major version + // upgrades are allowed. Changing this parameter does not result in an outage and + // the change is asynchronously applied as soon as possible. + AllowMajorVersionUpgrade *bool `json:"allowMajorVersionUpgrade,omitempty" tf:"allow_major_version_upgrade,omitempty"` + + // Specifies whether any database modifications + // are applied immediately, or during the next maintenance window. Default is + // false. See Amazon RDS Documentation for more + // information. + ApplyImmediately *bool `json:"applyImmediately,omitempty" tf:"apply_immediately,omitempty"` + + // Indicates that minor engine upgrades + // will be applied automatically to the DB instance during the maintenance window. + // Defaults to true. + AutoMinorVersionUpgrade *bool `json:"autoMinorVersionUpgrade,omitempty" tf:"auto_minor_version_upgrade,omitempty"` + + // The AZ for the RDS instance. + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // The days to retain backups for. + // Must be between 0 and 35. + // Default is 0. + // Must be greater than 0 if the database is used as a source for a Read Replica, + // uses low-downtime updates, + // or will use RDS Blue/Green deployments. + BackupRetentionPeriod *float64 `json:"backupRetentionPeriod,omitempty" tf:"backup_retention_period,omitempty"` + + // Specifies where automated backups and manual snapshots are stored. Possible values are region (default) and outposts. See Working with Amazon RDS on AWS Outposts for more information. + BackupTarget *string `json:"backupTarget,omitempty" tf:"backup_target,omitempty"` + + // The daily time range (in UTC) during which automated backups are created if they are enabled. + // Example: "09:46-10:16". Must not overlap with maintenance_window. + BackupWindow *string `json:"backupWindow,omitempty" tf:"backup_window,omitempty"` + + // Enables low-downtime updates using RDS Blue/Green deployments. + // See blue_green_update below. + BlueGreenUpdate *BlueGreenUpdateInitParameters `json:"blueGreenUpdate,omitempty" tf:"blue_green_update,omitempty"` + + // The identifier of the CA certificate for the DB instance. + CACertIdentifier *string `json:"caCertIdentifier,omitempty" tf:"ca_cert_identifier,omitempty"` + + // The character set name to use for DB encoding in Oracle and Microsoft SQL instances (collation). + // This can't be changed. + // See Oracle Character Sets Supported in Amazon RDS or + // Server-Level Collation for Microsoft SQL Server for more information. + // Cannot be set with replicate_source_db, restore_to_point_in_time, s3_import, or snapshot_identifier. + CharacterSetName *string `json:"characterSetName,omitempty" tf:"character_set_name,omitempty"` + + // – Copy all Instance tags to snapshots. Default is false. + CopyTagsToSnapshot *bool `json:"copyTagsToSnapshot,omitempty" tf:"copy_tags_to_snapshot,omitempty"` + + // The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. + CustomIAMInstanceProfile *string `json:"customIamInstanceProfile,omitempty" tf:"custom_iam_instance_profile,omitempty"` + + // Indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance. See CoIP for RDS on Outposts for more information. + CustomerOwnedIPEnabled *bool `json:"customerOwnedIpEnabled,omitempty" tf:"customer_owned_ip_enabled,omitempty"` + + // The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance. Note that this does not apply for Oracle or SQL Server engines. See the AWS documentation for more details on what applies for those engines. If you are providing an Oracle db name, it needs to be in all upper case. Cannot be specified for a replica. + DBName *string `json:"dbName,omitempty" tf:"db_name,omitempty"` + + // Name of DB subnet group. DB instance will + // be created in the VPC associated with the DB subnet group. If unspecified, will + // be created in the default VPC, or in EC2 Classic, if available. When working + // with read replicas, it should be specified only if the source database + // specifies an instance in another AWS Region. See DBSubnetGroupName in API + // action CreateDBInstanceReadReplica + // for additional read replica constraints. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.SubnetGroup + DBSubnetGroupName *string `json:"dbSubnetGroupName,omitempty" tf:"db_subnet_group_name,omitempty"` + + // Reference to a SubnetGroup in rds to populate dbSubnetGroupName. + // +kubebuilder:validation:Optional + DBSubnetGroupNameRef *v1.Reference `json:"dbSubnetGroupNameRef,omitempty" tf:"-"` + + // Selector for a SubnetGroup in rds to populate dbSubnetGroupName. + // +kubebuilder:validation:Optional + DBSubnetGroupNameSelector *v1.Selector `json:"dbSubnetGroupNameSelector,omitempty" tf:"-"` + + // Use a dedicated log volume (DLV) for the DB instance. Requires Provisioned IOPS. See the AWS documentation for more details. + DedicatedLogVolume *bool `json:"dedicatedLogVolume,omitempty" tf:"dedicated_log_volume,omitempty"` + + // Specifies whether to remove automated backups immediately after the DB instance is deleted. Default is true. + DeleteAutomatedBackups *bool `json:"deleteAutomatedBackups,omitempty" tf:"delete_automated_backups,omitempty"` + + // If the DB instance should have deletion protection enabled. The database can't be deleted when this value is set to true. The default is false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // The ID of the Directory Service Active Directory domain to create the instance in. Conflicts with domain_fqdn, domain_ou, domain_auth_secret_arn and a domain_dns_ips. + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // The ARN for the Secrets Manager secret with the self managed Active Directory credentials for the user joining the domain. Conflicts with domain and domain_iam_role_name. + DomainAuthSecretArn *string `json:"domainAuthSecretArn,omitempty" tf:"domain_auth_secret_arn,omitempty"` + + // The IPv4 DNS IP addresses of your primary and secondary self managed Active Directory domain controllers. Two IP addresses must be provided. If there isn't a secondary domain controller, use the IP address of the primary domain controller for both entries in the list. Conflicts with domain and domain_iam_role_name. + // +listType=set + DomainDNSIps []*string `json:"domainDnsIps,omitempty" tf:"domain_dns_ips,omitempty"` + + // The fully qualified domain name (FQDN) of the self managed Active Directory domain. Conflicts with domain and domain_iam_role_name. + DomainFqdn *string `json:"domainFqdn,omitempty" tf:"domain_fqdn,omitempty"` + + // The name of the IAM role to be used when making API calls to the Directory Service. Conflicts with domain_fqdn, domain_ou, domain_auth_secret_arn and a domain_dns_ips. + DomainIAMRoleName *string `json:"domainIamRoleName,omitempty" tf:"domain_iam_role_name,omitempty"` + + // The self managed Active Directory organizational unit for your DB instance to join. Conflicts with domain and domain_iam_role_name. + DomainOu *string `json:"domainOu,omitempty" tf:"domain_ou,omitempty"` + + // Set of log types to enable for exporting to CloudWatch logs. If omitted, no logs will be exported. For supported values, see the EnableCloudwatchLogsExports.member.N parameter in API action CreateDBInstance. + // +listType=set + EnabledCloudwatchLogsExports []*string `json:"enabledCloudwatchLogsExports,omitempty" tf:"enabled_cloudwatch_logs_exports,omitempty"` + + // The database engine to use. For supported values, see the Engine parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). Note that for Amazon Aurora instances the engine must match the [DB Cluster](https://marketplace.upbound.io/providers/upbound/provider-aws/latest/resources/rds.aws.upbound.io/Cluster/v1beta1)'s engine'. For information on the difference between the available Aurora MySQL engines see Comparison in the [Amazon RDS Release Notes](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraMySQLReleaseNotes/Welcome.html). + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // The engine version to use. If `autoMinorVersionUpgrade` is enabled, you can provide a prefix of the version such as 5.7 (for 5.7.10). The actual engine version used is returned in the attribute `status.atProvider.engineVersionActual`. For supported values, see the EngineVersion parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). Note that for Amazon Aurora instances the engine version must match the [DB Cluster](https://marketplace.upbound.io/providers/upbound/provider-aws/latest/resources/rds.aws.upbound.io/Cluster/v1beta1)'s engine version'. + EngineVersion *string `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + // The name of your final DB snapshot + // when this DB instance is deleted. Must be provided if skip_final_snapshot is + // set to false. The value must begin with a letter, only contain alphanumeric characters and hyphens, and not end with a hyphen or contain two consecutive hyphens. Must not be provided when deleting a read replica. + FinalSnapshotIdentifier *string `json:"finalSnapshotIdentifier,omitempty" tf:"final_snapshot_identifier,omitempty"` + + // Specifies whether mappings of AWS Identity and Access Management (IAM) accounts to database + // accounts is enabled. + IAMDatabaseAuthenticationEnabled *bool `json:"iamDatabaseAuthenticationEnabled,omitempty" tf:"iam_database_authentication_enabled,omitempty"` + + // Required if restore_to_point_in_time is specified. + Identifier *string `json:"identifier,omitempty" tf:"identifier,omitempty"` + + // Creates a unique identifier beginning with the specified prefix. Conflicts with identifier. + IdentifierPrefix *string `json:"identifierPrefix,omitempty" tf:"identifier_prefix,omitempty"` + + // The instance type of the RDS instance. + InstanceClass *string `json:"instanceClass,omitempty" tf:"instance_class,omitempty"` + + // The amount of provisioned IOPS. Setting this implies a + // storage_type of "io1". Can only be set when storage_type is "io1" or "gp3". + // Cannot be specified for gp3 storage if the allocated_storage value is below a per-engine threshold. + // See the RDS User Guide for details. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The ARN for the KMS encryption key. If creating an + // encrypted replica, set this to the destination KMS ARN. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // License model information for this DB instance. Valid values for this field are as follows: + LicenseModel *string `json:"licenseModel,omitempty" tf:"license_model,omitempty"` + + // The window to perform maintenance in. + // Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00". See RDS + // Maintenance Window + // docs + // for more information. + MaintenanceWindow *string `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // Set to true to allow RDS to manage the master user password in Secrets Manager. Cannot be set if password is provided. + ManageMasterUserPassword *bool `json:"manageMasterUserPassword,omitempty" tf:"manage_master_user_password,omitempty"` + + // The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If not specified, the default KMS key for your Amazon Web Services account is used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("key_id",true) + MasterUserSecretKMSKeyID *string `json:"masterUserSecretKmsKeyId,omitempty" tf:"master_user_secret_kms_key_id,omitempty"` + + // Reference to a Key in kms to populate masterUserSecretKmsKeyId. + // +kubebuilder:validation:Optional + MasterUserSecretKMSKeyIDRef *v1.Reference `json:"masterUserSecretKmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate masterUserSecretKmsKeyId. + // +kubebuilder:validation:Optional + MasterUserSecretKMSKeyIDSelector *v1.Selector `json:"masterUserSecretKmsKeyIdSelector,omitempty" tf:"-"` + + // When configured, the upper limit to which Amazon RDS can automatically scale the storage of the DB instance. Configuring this will automatically ignore differences to allocated_storage. Must be greater than or equal to allocated_storage or 0 to disable Storage Autoscaling. + MaxAllocatedStorage *float64 `json:"maxAllocatedStorage,omitempty" tf:"max_allocated_storage,omitempty"` + + // The interval, in seconds, between points + // when Enhanced Monitoring metrics are collected for the DB instance. To disable + // collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid + // Values: 0, 1, 5, 10, 15, 30, 60. + MonitoringInterval *float64 `json:"monitoringInterval,omitempty" tf:"monitoring_interval,omitempty"` + + // The ARN for the IAM role that permits RDS + // to send enhanced monitoring metrics to CloudWatch Logs. You can find more + // information on the AWS + // Documentation + // what IAM permissions are needed to allow Enhanced Monitoring for RDS Instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + MonitoringRoleArn *string `json:"monitoringRoleArn,omitempty" tf:"monitoring_role_arn,omitempty"` + + // Reference to a Role in iam to populate monitoringRoleArn. + // +kubebuilder:validation:Optional + MonitoringRoleArnRef *v1.Reference `json:"monitoringRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate monitoringRoleArn. + // +kubebuilder:validation:Optional + MonitoringRoleArnSelector *v1.Selector `json:"monitoringRoleArnSelector,omitempty" tf:"-"` + + // Specifies if the RDS instance is multi-AZ + MultiAz *bool `json:"multiAz,omitempty" tf:"multi_az,omitempty"` + + // The national character set is used in the NCHAR, NVARCHAR2, and NCLOB data types for Oracle instances. This can't be changed. See Oracle Character Sets + // Supported in Amazon RDS. + NcharCharacterSetName *string `json:"ncharCharacterSetName,omitempty" tf:"nchar_character_set_name,omitempty"` + + // The network type of the DB instance. Valid values: IPV4, DUAL. + NetworkType *string `json:"networkType,omitempty" tf:"network_type,omitempty"` + + // Name of the DB option group to associate. + OptionGroupName *string `json:"optionGroupName,omitempty" tf:"option_group_name,omitempty"` + + // Name of the DB parameter group to associate. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.ParameterGroup + ParameterGroupName *string `json:"parameterGroupName,omitempty" tf:"parameter_group_name,omitempty"` + + // Reference to a ParameterGroup in rds to populate parameterGroupName. + // +kubebuilder:validation:Optional + ParameterGroupNameRef *v1.Reference `json:"parameterGroupNameRef,omitempty" tf:"-"` + + // Selector for a ParameterGroup in rds to populate parameterGroupName. + // +kubebuilder:validation:Optional + ParameterGroupNameSelector *v1.Selector `json:"parameterGroupNameSelector,omitempty" tf:"-"` + + // Password for the master DB user. Note that this may show up in + // logs, and it will be stored in the state file. Cannot be set if manage_master_user_password is set to true. + // Password for the master DB user. If you set autoGeneratePassword to true, the Secret referenced here will be created or updated with generated password if it does not already contain one. + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // Specifies whether Performance Insights are enabled. Defaults to false. + PerformanceInsightsEnabled *bool `json:"performanceInsightsEnabled,omitempty" tf:"performance_insights_enabled,omitempty"` + + // The ARN for the KMS key to encrypt Performance Insights data. When specifying performance_insights_kms_key_id, performance_insights_enabled needs to be set to true. Once KMS key is set, it can never be changed. + PerformanceInsightsKMSKeyID *string `json:"performanceInsightsKmsKeyId,omitempty" tf:"performance_insights_kms_key_id,omitempty"` + + // Amount of time in days to retain Performance Insights data. Valid values are 7, 731 (2 years) or a multiple of 31. When specifying performance_insights_retention_period, performance_insights_enabled needs to be set to true. Defaults to '7'. + PerformanceInsightsRetentionPeriod *float64 `json:"performanceInsightsRetentionPeriod,omitempty" tf:"performance_insights_retention_period,omitempty"` + + // The port on which the DB accepts connections. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Bool to control if instance is publicly + // accessible. Default is false. + PubliclyAccessible *bool `json:"publiclyAccessible,omitempty" tf:"publicly_accessible,omitempty"` + + // Specifies whether the replica is in either mounted or open-read-only mode. This attribute + // is only supported by Oracle instances. Oracle replicas operate in open-read-only mode unless otherwise specified. See Working with Oracle Read Replicas for more information. + ReplicaMode *string `json:"replicaMode,omitempty" tf:"replica_mode,omitempty"` + + // Specifies that this resource is a Replicate + // database, and to use this value as the source database. This correlates to the + // identifier of another Amazon RDS Database to replicate (if replicating within + // a single region) or ARN of the Amazon RDS Database to replicate (if replicating + // cross-region). Note that if you are + // creating a cross-region replica of an encrypted database you will also need to + // specify a kms_key_id. See DB Instance Replication and Working with + // PostgreSQL and MySQL Read Replicas + // for more information on using Replication. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta3.Instance + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("identifier",false) + ReplicateSourceDB *string `json:"replicateSourceDb,omitempty" tf:"replicate_source_db,omitempty"` + + // Reference to a Instance in rds to populate replicateSourceDb. + // +kubebuilder:validation:Optional + ReplicateSourceDBRef *v1.Reference `json:"replicateSourceDbRef,omitempty" tf:"-"` + + // Selector for a Instance in rds to populate replicateSourceDb. + // +kubebuilder:validation:Optional + ReplicateSourceDBSelector *v1.Selector `json:"replicateSourceDbSelector,omitempty" tf:"-"` + + // A configuration block for restoring a DB instance to an arbitrary point in time. Requires the identifier argument to be set with the name of the new DB instance to be created. See Restore To Point In Time below for details. + RestoreToPointInTime *RestoreToPointInTimeInitParameters `json:"restoreToPointInTime,omitempty" tf:"restore_to_point_in_time,omitempty"` + + // Restore from a Percona Xtrabackup in S3. See Importing Data into an Amazon RDS MySQL DB Instance + S3Import *S3ImportInitParameters `json:"s3Import,omitempty" tf:"s3_import,omitempty"` + + // Determines whether a final DB snapshot is + // created before the DB instance is deleted. If true is specified, no DBSnapshot + // is created. If false is specified, a DB snapshot is created before the DB + // instance is deleted, using the value from final_snapshot_identifier. Default + // is false. + SkipFinalSnapshot *bool `json:"skipFinalSnapshot,omitempty" tf:"skip_final_snapshot,omitempty"` + + // Specifies whether or not to create this + // database from a snapshot. This correlates to the snapshot ID you'd find in the + // RDS console, e.g: rds:production-2015-06-26-06-05. + SnapshotIdentifier *string `json:"snapshotIdentifier,omitempty" tf:"snapshot_identifier,omitempty"` + + // Specifies whether the DB instance is + // encrypted. Note that if you are creating a cross-region read replica this field + // is ignored and you should instead declare kms_key_id with a valid ARN. The + // default is false if not specified. + StorageEncrypted *bool `json:"storageEncrypted,omitempty" tf:"storage_encrypted,omitempty"` + + // The storage throughput value for the DB instance. Can only be set when storage_type is "gp3". Cannot be specified if the allocated_storage value is below a per-engine threshold. See the RDS User Guide for details. + StorageThroughput *float64 `json:"storageThroughput,omitempty" tf:"storage_throughput,omitempty"` + + // One of "standard" (magnetic), "gp2" (general + // purpose SSD), "gp3" (general purpose SSD that needs iops independently) + // or "io1" (provisioned IOPS SSD). The default is "io1" if iops is specified, + // "gp2" if not. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Time zone of the DB instance. timezone is currently + // only supported by Microsoft SQL Server. The timezone can only be set on + // creation. See MSSQL User + // Guide + // for more information. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // Username for the master DB user. Cannot be specified for a replica. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDRefs []v1.Reference `json:"vpcSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDSelector *v1.Selector `json:"vpcSecurityGroupIdSelector,omitempty" tf:"-"` + + // List of VPC security groups to + // associate. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=VPCSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=VPCSecurityGroupIDSelector + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` +} + +type InstanceObservation struct { + + // The hostname of the RDS instance. See also endpoint and port. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The allocated storage in gibibytes. If max_allocated_storage is configured, this argument represents the initial storage allocation and differences from the configuration will be ignored automatically when Storage Autoscaling occurs. If replicate_source_db is set, the value is ignored during the creation of the instance. + AllocatedStorage *float64 `json:"allocatedStorage,omitempty" tf:"allocated_storage,omitempty"` + + // Indicates that major version + // upgrades are allowed. Changing this parameter does not result in an outage and + // the change is asynchronously applied as soon as possible. + AllowMajorVersionUpgrade *bool `json:"allowMajorVersionUpgrade,omitempty" tf:"allow_major_version_upgrade,omitempty"` + + // Specifies whether any database modifications + // are applied immediately, or during the next maintenance window. Default is + // false. See Amazon RDS Documentation for more + // information. + ApplyImmediately *bool `json:"applyImmediately,omitempty" tf:"apply_immediately,omitempty"` + + // The ARN of the RDS instance. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Indicates that minor engine upgrades + // will be applied automatically to the DB instance during the maintenance window. + // Defaults to true. + AutoMinorVersionUpgrade *bool `json:"autoMinorVersionUpgrade,omitempty" tf:"auto_minor_version_upgrade,omitempty"` + + // The AZ for the RDS instance. + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // The days to retain backups for. + // Must be between 0 and 35. + // Default is 0. + // Must be greater than 0 if the database is used as a source for a Read Replica, + // uses low-downtime updates, + // or will use RDS Blue/Green deployments. + BackupRetentionPeriod *float64 `json:"backupRetentionPeriod,omitempty" tf:"backup_retention_period,omitempty"` + + // Specifies where automated backups and manual snapshots are stored. Possible values are region (default) and outposts. See Working with Amazon RDS on AWS Outposts for more information. + BackupTarget *string `json:"backupTarget,omitempty" tf:"backup_target,omitempty"` + + // The daily time range (in UTC) during which automated backups are created if they are enabled. + // Example: "09:46-10:16". Must not overlap with maintenance_window. + BackupWindow *string `json:"backupWindow,omitempty" tf:"backup_window,omitempty"` + + // Enables low-downtime updates using RDS Blue/Green deployments. + // See blue_green_update below. + BlueGreenUpdate *BlueGreenUpdateObservation `json:"blueGreenUpdate,omitempty" tf:"blue_green_update,omitempty"` + + // The identifier of the CA certificate for the DB instance. + CACertIdentifier *string `json:"caCertIdentifier,omitempty" tf:"ca_cert_identifier,omitempty"` + + // The character set name to use for DB encoding in Oracle and Microsoft SQL instances (collation). + // This can't be changed. + // See Oracle Character Sets Supported in Amazon RDS or + // Server-Level Collation for Microsoft SQL Server for more information. + // Cannot be set with replicate_source_db, restore_to_point_in_time, s3_import, or snapshot_identifier. + CharacterSetName *string `json:"characterSetName,omitempty" tf:"character_set_name,omitempty"` + + // – Copy all Instance tags to snapshots. Default is false. + CopyTagsToSnapshot *bool `json:"copyTagsToSnapshot,omitempty" tf:"copy_tags_to_snapshot,omitempty"` + + // The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. + CustomIAMInstanceProfile *string `json:"customIamInstanceProfile,omitempty" tf:"custom_iam_instance_profile,omitempty"` + + // Indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance. See CoIP for RDS on Outposts for more information. + CustomerOwnedIPEnabled *bool `json:"customerOwnedIpEnabled,omitempty" tf:"customer_owned_ip_enabled,omitempty"` + + // The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance. Note that this does not apply for Oracle or SQL Server engines. See the AWS documentation for more details on what applies for those engines. If you are providing an Oracle db name, it needs to be in all upper case. Cannot be specified for a replica. + DBName *string `json:"dbName,omitempty" tf:"db_name,omitempty"` + + // Name of DB subnet group. DB instance will + // be created in the VPC associated with the DB subnet group. If unspecified, will + // be created in the default VPC, or in EC2 Classic, if available. When working + // with read replicas, it should be specified only if the source database + // specifies an instance in another AWS Region. See DBSubnetGroupName in API + // action CreateDBInstanceReadReplica + // for additional read replica constraints. + DBSubnetGroupName *string `json:"dbSubnetGroupName,omitempty" tf:"db_subnet_group_name,omitempty"` + + // Use a dedicated log volume (DLV) for the DB instance. Requires Provisioned IOPS. See the AWS documentation for more details. + DedicatedLogVolume *bool `json:"dedicatedLogVolume,omitempty" tf:"dedicated_log_volume,omitempty"` + + // Specifies whether to remove automated backups immediately after the DB instance is deleted. Default is true. + DeleteAutomatedBackups *bool `json:"deleteAutomatedBackups,omitempty" tf:"delete_automated_backups,omitempty"` + + // If the DB instance should have deletion protection enabled. The database can't be deleted when this value is set to true. The default is false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // The ID of the Directory Service Active Directory domain to create the instance in. Conflicts with domain_fqdn, domain_ou, domain_auth_secret_arn and a domain_dns_ips. + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // The ARN for the Secrets Manager secret with the self managed Active Directory credentials for the user joining the domain. Conflicts with domain and domain_iam_role_name. + DomainAuthSecretArn *string `json:"domainAuthSecretArn,omitempty" tf:"domain_auth_secret_arn,omitempty"` + + // The IPv4 DNS IP addresses of your primary and secondary self managed Active Directory domain controllers. Two IP addresses must be provided. If there isn't a secondary domain controller, use the IP address of the primary domain controller for both entries in the list. Conflicts with domain and domain_iam_role_name. + // +listType=set + DomainDNSIps []*string `json:"domainDnsIps,omitempty" tf:"domain_dns_ips,omitempty"` + + // The fully qualified domain name (FQDN) of the self managed Active Directory domain. Conflicts with domain and domain_iam_role_name. + DomainFqdn *string `json:"domainFqdn,omitempty" tf:"domain_fqdn,omitempty"` + + // The name of the IAM role to be used when making API calls to the Directory Service. Conflicts with domain_fqdn, domain_ou, domain_auth_secret_arn and a domain_dns_ips. + DomainIAMRoleName *string `json:"domainIamRoleName,omitempty" tf:"domain_iam_role_name,omitempty"` + + // The self managed Active Directory organizational unit for your DB instance to join. Conflicts with domain and domain_iam_role_name. + DomainOu *string `json:"domainOu,omitempty" tf:"domain_ou,omitempty"` + + // Set of log types to enable for exporting to CloudWatch logs. If omitted, no logs will be exported. For supported values, see the EnableCloudwatchLogsExports.member.N parameter in API action CreateDBInstance. + // +listType=set + EnabledCloudwatchLogsExports []*string `json:"enabledCloudwatchLogsExports,omitempty" tf:"enabled_cloudwatch_logs_exports,omitempty"` + + // The connection endpoint in address:port format. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The database engine to use. For supported values, see the Engine parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). Note that for Amazon Aurora instances the engine must match the [DB Cluster](https://marketplace.upbound.io/providers/upbound/provider-aws/latest/resources/rds.aws.upbound.io/Cluster/v1beta1)'s engine'. For information on the difference between the available Aurora MySQL engines see Comparison in the [Amazon RDS Release Notes](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraMySQLReleaseNotes/Welcome.html). + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // The engine version to use. If `autoMinorVersionUpgrade` is enabled, you can provide a prefix of the version such as 5.7 (for 5.7.10). The actual engine version used is returned in the attribute `status.atProvider.engineVersionActual`. For supported values, see the EngineVersion parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). Note that for Amazon Aurora instances the engine version must match the [DB Cluster](https://marketplace.upbound.io/providers/upbound/provider-aws/latest/resources/rds.aws.upbound.io/Cluster/v1beta1)'s engine version'. + EngineVersion *string `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + // The running version of the database. + EngineVersionActual *string `json:"engineVersionActual,omitempty" tf:"engine_version_actual,omitempty"` + + // The name of your final DB snapshot + // when this DB instance is deleted. Must be provided if skip_final_snapshot is + // set to false. The value must begin with a letter, only contain alphanumeric characters and hyphens, and not end with a hyphen or contain two consecutive hyphens. Must not be provided when deleting a read replica. + FinalSnapshotIdentifier *string `json:"finalSnapshotIdentifier,omitempty" tf:"final_snapshot_identifier,omitempty"` + + // The canonical hosted zone ID of the DB instance (to be used + // in a Route 53 Alias record). + HostedZoneID *string `json:"hostedZoneId,omitempty" tf:"hosted_zone_id,omitempty"` + + // Specifies whether mappings of AWS Identity and Access Management (IAM) accounts to database + // accounts is enabled. + IAMDatabaseAuthenticationEnabled *bool `json:"iamDatabaseAuthenticationEnabled,omitempty" tf:"iam_database_authentication_enabled,omitempty"` + + // RDS DBI resource ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Required if restore_to_point_in_time is specified. + Identifier *string `json:"identifier,omitempty" tf:"identifier,omitempty"` + + // Creates a unique identifier beginning with the specified prefix. Conflicts with identifier. + IdentifierPrefix *string `json:"identifierPrefix,omitempty" tf:"identifier_prefix,omitempty"` + + // The instance type of the RDS instance. + InstanceClass *string `json:"instanceClass,omitempty" tf:"instance_class,omitempty"` + + // The amount of provisioned IOPS. Setting this implies a + // storage_type of "io1". Can only be set when storage_type is "io1" or "gp3". + // Cannot be specified for gp3 storage if the allocated_storage value is below a per-engine threshold. + // See the RDS User Guide for details. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The ARN for the KMS encryption key. If creating an + // encrypted replica, set this to the destination KMS ARN. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The latest time, in UTC RFC3339 format, to which a database can be restored with point-in-time restore. + LatestRestorableTime *string `json:"latestRestorableTime,omitempty" tf:"latest_restorable_time,omitempty"` + + // License model information for this DB instance. Valid values for this field are as follows: + LicenseModel *string `json:"licenseModel,omitempty" tf:"license_model,omitempty"` + + // Specifies the listener connection endpoint for SQL Server Always On. See endpoint below. + ListenerEndpoint []ListenerEndpointObservation `json:"listenerEndpoint,omitempty" tf:"listener_endpoint,omitempty"` + + // The window to perform maintenance in. + // Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00". See RDS + // Maintenance Window + // docs + // for more information. + MaintenanceWindow *string `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // Set to true to allow RDS to manage the master user password in Secrets Manager. Cannot be set if password is provided. + ManageMasterUserPassword *bool `json:"manageMasterUserPassword,omitempty" tf:"manage_master_user_password,omitempty"` + + // A block that specifies the master user secret. Only available when manage_master_user_password is set to true. Documented below. + MasterUserSecret []MasterUserSecretObservation `json:"masterUserSecret,omitempty" tf:"master_user_secret,omitempty"` + + // The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If not specified, the default KMS key for your Amazon Web Services account is used. + MasterUserSecretKMSKeyID *string `json:"masterUserSecretKmsKeyId,omitempty" tf:"master_user_secret_kms_key_id,omitempty"` + + // When configured, the upper limit to which Amazon RDS can automatically scale the storage of the DB instance. Configuring this will automatically ignore differences to allocated_storage. Must be greater than or equal to allocated_storage or 0 to disable Storage Autoscaling. + MaxAllocatedStorage *float64 `json:"maxAllocatedStorage,omitempty" tf:"max_allocated_storage,omitempty"` + + // The interval, in seconds, between points + // when Enhanced Monitoring metrics are collected for the DB instance. To disable + // collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid + // Values: 0, 1, 5, 10, 15, 30, 60. + MonitoringInterval *float64 `json:"monitoringInterval,omitempty" tf:"monitoring_interval,omitempty"` + + // The ARN for the IAM role that permits RDS + // to send enhanced monitoring metrics to CloudWatch Logs. You can find more + // information on the AWS + // Documentation + // what IAM permissions are needed to allow Enhanced Monitoring for RDS Instances. + MonitoringRoleArn *string `json:"monitoringRoleArn,omitempty" tf:"monitoring_role_arn,omitempty"` + + // Specifies if the RDS instance is multi-AZ + MultiAz *bool `json:"multiAz,omitempty" tf:"multi_az,omitempty"` + + // The national character set is used in the NCHAR, NVARCHAR2, and NCLOB data types for Oracle instances. This can't be changed. See Oracle Character Sets + // Supported in Amazon RDS. + NcharCharacterSetName *string `json:"ncharCharacterSetName,omitempty" tf:"nchar_character_set_name,omitempty"` + + // The network type of the DB instance. Valid values: IPV4, DUAL. + NetworkType *string `json:"networkType,omitempty" tf:"network_type,omitempty"` + + // Name of the DB option group to associate. + OptionGroupName *string `json:"optionGroupName,omitempty" tf:"option_group_name,omitempty"` + + // Name of the DB parameter group to associate. + ParameterGroupName *string `json:"parameterGroupName,omitempty" tf:"parameter_group_name,omitempty"` + + // Specifies whether Performance Insights are enabled. Defaults to false. + PerformanceInsightsEnabled *bool `json:"performanceInsightsEnabled,omitempty" tf:"performance_insights_enabled,omitempty"` + + // The ARN for the KMS key to encrypt Performance Insights data. When specifying performance_insights_kms_key_id, performance_insights_enabled needs to be set to true. Once KMS key is set, it can never be changed. + PerformanceInsightsKMSKeyID *string `json:"performanceInsightsKmsKeyId,omitempty" tf:"performance_insights_kms_key_id,omitempty"` + + // Amount of time in days to retain Performance Insights data. Valid values are 7, 731 (2 years) or a multiple of 31. When specifying performance_insights_retention_period, performance_insights_enabled needs to be set to true. Defaults to '7'. + PerformanceInsightsRetentionPeriod *float64 `json:"performanceInsightsRetentionPeriod,omitempty" tf:"performance_insights_retention_period,omitempty"` + + // The port on which the DB accepts connections. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Bool to control if instance is publicly + // accessible. Default is false. + PubliclyAccessible *bool `json:"publiclyAccessible,omitempty" tf:"publicly_accessible,omitempty"` + + // Specifies whether the replica is in either mounted or open-read-only mode. This attribute + // is only supported by Oracle instances. Oracle replicas operate in open-read-only mode unless otherwise specified. See Working with Oracle Read Replicas for more information. + ReplicaMode *string `json:"replicaMode,omitempty" tf:"replica_mode,omitempty"` + + Replicas []*string `json:"replicas,omitempty" tf:"replicas,omitempty"` + + // Specifies that this resource is a Replicate + // database, and to use this value as the source database. This correlates to the + // identifier of another Amazon RDS Database to replicate (if replicating within + // a single region) or ARN of the Amazon RDS Database to replicate (if replicating + // cross-region). Note that if you are + // creating a cross-region replica of an encrypted database you will also need to + // specify a kms_key_id. See DB Instance Replication and Working with + // PostgreSQL and MySQL Read Replicas + // for more information on using Replication. + ReplicateSourceDB *string `json:"replicateSourceDb,omitempty" tf:"replicate_source_db,omitempty"` + + // The RDS Resource ID of this instance. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // A configuration block for restoring a DB instance to an arbitrary point in time. Requires the identifier argument to be set with the name of the new DB instance to be created. See Restore To Point In Time below for details. + RestoreToPointInTime *RestoreToPointInTimeObservation `json:"restoreToPointInTime,omitempty" tf:"restore_to_point_in_time,omitempty"` + + // Restore from a Percona Xtrabackup in S3. See Importing Data into an Amazon RDS MySQL DB Instance + S3Import *S3ImportObservation `json:"s3Import,omitempty" tf:"s3_import,omitempty"` + + // Determines whether a final DB snapshot is + // created before the DB instance is deleted. If true is specified, no DBSnapshot + // is created. If false is specified, a DB snapshot is created before the DB + // instance is deleted, using the value from final_snapshot_identifier. Default + // is false. + SkipFinalSnapshot *bool `json:"skipFinalSnapshot,omitempty" tf:"skip_final_snapshot,omitempty"` + + // Specifies whether or not to create this + // database from a snapshot. This correlates to the snapshot ID you'd find in the + // RDS console, e.g: rds:production-2015-06-26-06-05. + SnapshotIdentifier *string `json:"snapshotIdentifier,omitempty" tf:"snapshot_identifier,omitempty"` + + // The RDS instance status. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Specifies whether the DB instance is + // encrypted. Note that if you are creating a cross-region read replica this field + // is ignored and you should instead declare kms_key_id with a valid ARN. The + // default is false if not specified. + StorageEncrypted *bool `json:"storageEncrypted,omitempty" tf:"storage_encrypted,omitempty"` + + // The storage throughput value for the DB instance. Can only be set when storage_type is "gp3". Cannot be specified if the allocated_storage value is below a per-engine threshold. See the RDS User Guide for details. + StorageThroughput *float64 `json:"storageThroughput,omitempty" tf:"storage_throughput,omitempty"` + + // One of "standard" (magnetic), "gp2" (general + // purpose SSD), "gp3" (general purpose SSD that needs iops independently) + // or "io1" (provisioned IOPS SSD). The default is "io1" if iops is specified, + // "gp2" if not. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Time zone of the DB instance. timezone is currently + // only supported by Microsoft SQL Server. The timezone can only be set on + // creation. See MSSQL User + // Guide + // for more information. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // Username for the master DB user. Cannot be specified for a replica. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // List of VPC security groups to + // associate. + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` +} + +type InstanceParameters struct { + + // The allocated storage in gibibytes. If max_allocated_storage is configured, this argument represents the initial storage allocation and differences from the configuration will be ignored automatically when Storage Autoscaling occurs. If replicate_source_db is set, the value is ignored during the creation of the instance. + // +kubebuilder:validation:Optional + AllocatedStorage *float64 `json:"allocatedStorage,omitempty" tf:"allocated_storage,omitempty"` + + // Indicates that major version + // upgrades are allowed. Changing this parameter does not result in an outage and + // the change is asynchronously applied as soon as possible. + // +kubebuilder:validation:Optional + AllowMajorVersionUpgrade *bool `json:"allowMajorVersionUpgrade,omitempty" tf:"allow_major_version_upgrade,omitempty"` + + // Specifies whether any database modifications + // are applied immediately, or during the next maintenance window. Default is + // false. See Amazon RDS Documentation for more + // information. + // +kubebuilder:validation:Optional + ApplyImmediately *bool `json:"applyImmediately,omitempty" tf:"apply_immediately,omitempty"` + + // Password for the master DB user. Note that this may show up in + // logs, and it will be stored in the state file. Cannot be set if manage_master_user_password is set to true. + // If true, the password will be auto-generated and stored in the Secret referenced by the passwordSecretRef field. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Optional + AutoGeneratePassword *bool `json:"autoGeneratePassword,omitempty" tf:"-"` + + // Indicates that minor engine upgrades + // will be applied automatically to the DB instance during the maintenance window. + // Defaults to true. + // +kubebuilder:validation:Optional + AutoMinorVersionUpgrade *bool `json:"autoMinorVersionUpgrade,omitempty" tf:"auto_minor_version_upgrade,omitempty"` + + // The AZ for the RDS instance. + // +kubebuilder:validation:Optional + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // The days to retain backups for. + // Must be between 0 and 35. + // Default is 0. + // Must be greater than 0 if the database is used as a source for a Read Replica, + // uses low-downtime updates, + // or will use RDS Blue/Green deployments. + // +kubebuilder:validation:Optional + BackupRetentionPeriod *float64 `json:"backupRetentionPeriod,omitempty" tf:"backup_retention_period,omitempty"` + + // Specifies where automated backups and manual snapshots are stored. Possible values are region (default) and outposts. See Working with Amazon RDS on AWS Outposts for more information. + // +kubebuilder:validation:Optional + BackupTarget *string `json:"backupTarget,omitempty" tf:"backup_target,omitempty"` + + // The daily time range (in UTC) during which automated backups are created if they are enabled. + // Example: "09:46-10:16". Must not overlap with maintenance_window. + // +kubebuilder:validation:Optional + BackupWindow *string `json:"backupWindow,omitempty" tf:"backup_window,omitempty"` + + // Enables low-downtime updates using RDS Blue/Green deployments. + // See blue_green_update below. + // +kubebuilder:validation:Optional + BlueGreenUpdate *BlueGreenUpdateParameters `json:"blueGreenUpdate,omitempty" tf:"blue_green_update,omitempty"` + + // The identifier of the CA certificate for the DB instance. + // +kubebuilder:validation:Optional + CACertIdentifier *string `json:"caCertIdentifier,omitempty" tf:"ca_cert_identifier,omitempty"` + + // The character set name to use for DB encoding in Oracle and Microsoft SQL instances (collation). + // This can't be changed. + // See Oracle Character Sets Supported in Amazon RDS or + // Server-Level Collation for Microsoft SQL Server for more information. + // Cannot be set with replicate_source_db, restore_to_point_in_time, s3_import, or snapshot_identifier. + // +kubebuilder:validation:Optional + CharacterSetName *string `json:"characterSetName,omitempty" tf:"character_set_name,omitempty"` + + // – Copy all Instance tags to snapshots. Default is false. + // +kubebuilder:validation:Optional + CopyTagsToSnapshot *bool `json:"copyTagsToSnapshot,omitempty" tf:"copy_tags_to_snapshot,omitempty"` + + // The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. + // +kubebuilder:validation:Optional + CustomIAMInstanceProfile *string `json:"customIamInstanceProfile,omitempty" tf:"custom_iam_instance_profile,omitempty"` + + // Indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance. See CoIP for RDS on Outposts for more information. + // +kubebuilder:validation:Optional + CustomerOwnedIPEnabled *bool `json:"customerOwnedIpEnabled,omitempty" tf:"customer_owned_ip_enabled,omitempty"` + + // The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance. Note that this does not apply for Oracle or SQL Server engines. See the AWS documentation for more details on what applies for those engines. If you are providing an Oracle db name, it needs to be in all upper case. Cannot be specified for a replica. + // +kubebuilder:validation:Optional + DBName *string `json:"dbName,omitempty" tf:"db_name,omitempty"` + + // Name of DB subnet group. DB instance will + // be created in the VPC associated with the DB subnet group. If unspecified, will + // be created in the default VPC, or in EC2 Classic, if available. When working + // with read replicas, it should be specified only if the source database + // specifies an instance in another AWS Region. See DBSubnetGroupName in API + // action CreateDBInstanceReadReplica + // for additional read replica constraints. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.SubnetGroup + // +kubebuilder:validation:Optional + DBSubnetGroupName *string `json:"dbSubnetGroupName,omitempty" tf:"db_subnet_group_name,omitempty"` + + // Reference to a SubnetGroup in rds to populate dbSubnetGroupName. + // +kubebuilder:validation:Optional + DBSubnetGroupNameRef *v1.Reference `json:"dbSubnetGroupNameRef,omitempty" tf:"-"` + + // Selector for a SubnetGroup in rds to populate dbSubnetGroupName. + // +kubebuilder:validation:Optional + DBSubnetGroupNameSelector *v1.Selector `json:"dbSubnetGroupNameSelector,omitempty" tf:"-"` + + // Use a dedicated log volume (DLV) for the DB instance. Requires Provisioned IOPS. See the AWS documentation for more details. + // +kubebuilder:validation:Optional + DedicatedLogVolume *bool `json:"dedicatedLogVolume,omitempty" tf:"dedicated_log_volume,omitempty"` + + // Specifies whether to remove automated backups immediately after the DB instance is deleted. Default is true. + // +kubebuilder:validation:Optional + DeleteAutomatedBackups *bool `json:"deleteAutomatedBackups,omitempty" tf:"delete_automated_backups,omitempty"` + + // If the DB instance should have deletion protection enabled. The database can't be deleted when this value is set to true. The default is false. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // The ID of the Directory Service Active Directory domain to create the instance in. Conflicts with domain_fqdn, domain_ou, domain_auth_secret_arn and a domain_dns_ips. + // +kubebuilder:validation:Optional + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // The ARN for the Secrets Manager secret with the self managed Active Directory credentials for the user joining the domain. Conflicts with domain and domain_iam_role_name. + // +kubebuilder:validation:Optional + DomainAuthSecretArn *string `json:"domainAuthSecretArn,omitempty" tf:"domain_auth_secret_arn,omitempty"` + + // The IPv4 DNS IP addresses of your primary and secondary self managed Active Directory domain controllers. Two IP addresses must be provided. If there isn't a secondary domain controller, use the IP address of the primary domain controller for both entries in the list. Conflicts with domain and domain_iam_role_name. + // +kubebuilder:validation:Optional + // +listType=set + DomainDNSIps []*string `json:"domainDnsIps,omitempty" tf:"domain_dns_ips,omitempty"` + + // The fully qualified domain name (FQDN) of the self managed Active Directory domain. Conflicts with domain and domain_iam_role_name. + // +kubebuilder:validation:Optional + DomainFqdn *string `json:"domainFqdn,omitempty" tf:"domain_fqdn,omitempty"` + + // The name of the IAM role to be used when making API calls to the Directory Service. Conflicts with domain_fqdn, domain_ou, domain_auth_secret_arn and a domain_dns_ips. + // +kubebuilder:validation:Optional + DomainIAMRoleName *string `json:"domainIamRoleName,omitempty" tf:"domain_iam_role_name,omitempty"` + + // The self managed Active Directory organizational unit for your DB instance to join. Conflicts with domain and domain_iam_role_name. + // +kubebuilder:validation:Optional + DomainOu *string `json:"domainOu,omitempty" tf:"domain_ou,omitempty"` + + // Set of log types to enable for exporting to CloudWatch logs. If omitted, no logs will be exported. For supported values, see the EnableCloudwatchLogsExports.member.N parameter in API action CreateDBInstance. + // +kubebuilder:validation:Optional + // +listType=set + EnabledCloudwatchLogsExports []*string `json:"enabledCloudwatchLogsExports,omitempty" tf:"enabled_cloudwatch_logs_exports,omitempty"` + + // The database engine to use. For supported values, see the Engine parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). Note that for Amazon Aurora instances the engine must match the [DB Cluster](https://marketplace.upbound.io/providers/upbound/provider-aws/latest/resources/rds.aws.upbound.io/Cluster/v1beta1)'s engine'. For information on the difference between the available Aurora MySQL engines see Comparison in the [Amazon RDS Release Notes](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraMySQLReleaseNotes/Welcome.html). + // +kubebuilder:validation:Optional + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // The engine version to use. If `autoMinorVersionUpgrade` is enabled, you can provide a prefix of the version such as 5.7 (for 5.7.10). The actual engine version used is returned in the attribute `status.atProvider.engineVersionActual`. For supported values, see the EngineVersion parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). Note that for Amazon Aurora instances the engine version must match the [DB Cluster](https://marketplace.upbound.io/providers/upbound/provider-aws/latest/resources/rds.aws.upbound.io/Cluster/v1beta1)'s engine version'. + // +kubebuilder:validation:Optional + EngineVersion *string `json:"engineVersion,omitempty" tf:"engine_version,omitempty"` + + // The name of your final DB snapshot + // when this DB instance is deleted. Must be provided if skip_final_snapshot is + // set to false. The value must begin with a letter, only contain alphanumeric characters and hyphens, and not end with a hyphen or contain two consecutive hyphens. Must not be provided when deleting a read replica. + // +kubebuilder:validation:Optional + FinalSnapshotIdentifier *string `json:"finalSnapshotIdentifier,omitempty" tf:"final_snapshot_identifier,omitempty"` + + // Specifies whether mappings of AWS Identity and Access Management (IAM) accounts to database + // accounts is enabled. + // +kubebuilder:validation:Optional + IAMDatabaseAuthenticationEnabled *bool `json:"iamDatabaseAuthenticationEnabled,omitempty" tf:"iam_database_authentication_enabled,omitempty"` + + // Required if restore_to_point_in_time is specified. + // +kubebuilder:validation:Optional + Identifier *string `json:"identifier,omitempty" tf:"identifier,omitempty"` + + // Creates a unique identifier beginning with the specified prefix. Conflicts with identifier. + // +kubebuilder:validation:Optional + IdentifierPrefix *string `json:"identifierPrefix,omitempty" tf:"identifier_prefix,omitempty"` + + // The instance type of the RDS instance. + // +kubebuilder:validation:Optional + InstanceClass *string `json:"instanceClass,omitempty" tf:"instance_class,omitempty"` + + // The amount of provisioned IOPS. Setting this implies a + // storage_type of "io1". Can only be set when storage_type is "io1" or "gp3". + // Cannot be specified for gp3 storage if the allocated_storage value is below a per-engine threshold. + // See the RDS User Guide for details. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The ARN for the KMS encryption key. If creating an + // encrypted replica, set this to the destination KMS ARN. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // License model information for this DB instance. Valid values for this field are as follows: + // +kubebuilder:validation:Optional + LicenseModel *string `json:"licenseModel,omitempty" tf:"license_model,omitempty"` + + // The window to perform maintenance in. + // Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00". See RDS + // Maintenance Window + // docs + // for more information. + // +kubebuilder:validation:Optional + MaintenanceWindow *string `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // Set to true to allow RDS to manage the master user password in Secrets Manager. Cannot be set if password is provided. + // +kubebuilder:validation:Optional + ManageMasterUserPassword *bool `json:"manageMasterUserPassword,omitempty" tf:"manage_master_user_password,omitempty"` + + // The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If not specified, the default KMS key for your Amazon Web Services account is used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("key_id",true) + // +kubebuilder:validation:Optional + MasterUserSecretKMSKeyID *string `json:"masterUserSecretKmsKeyId,omitempty" tf:"master_user_secret_kms_key_id,omitempty"` + + // Reference to a Key in kms to populate masterUserSecretKmsKeyId. + // +kubebuilder:validation:Optional + MasterUserSecretKMSKeyIDRef *v1.Reference `json:"masterUserSecretKmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate masterUserSecretKmsKeyId. + // +kubebuilder:validation:Optional + MasterUserSecretKMSKeyIDSelector *v1.Selector `json:"masterUserSecretKmsKeyIdSelector,omitempty" tf:"-"` + + // When configured, the upper limit to which Amazon RDS can automatically scale the storage of the DB instance. Configuring this will automatically ignore differences to allocated_storage. Must be greater than or equal to allocated_storage or 0 to disable Storage Autoscaling. + // +kubebuilder:validation:Optional + MaxAllocatedStorage *float64 `json:"maxAllocatedStorage,omitempty" tf:"max_allocated_storage,omitempty"` + + // The interval, in seconds, between points + // when Enhanced Monitoring metrics are collected for the DB instance. To disable + // collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid + // Values: 0, 1, 5, 10, 15, 30, 60. + // +kubebuilder:validation:Optional + MonitoringInterval *float64 `json:"monitoringInterval,omitempty" tf:"monitoring_interval,omitempty"` + + // The ARN for the IAM role that permits RDS + // to send enhanced monitoring metrics to CloudWatch Logs. You can find more + // information on the AWS + // Documentation + // what IAM permissions are needed to allow Enhanced Monitoring for RDS Instances. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + MonitoringRoleArn *string `json:"monitoringRoleArn,omitempty" tf:"monitoring_role_arn,omitempty"` + + // Reference to a Role in iam to populate monitoringRoleArn. + // +kubebuilder:validation:Optional + MonitoringRoleArnRef *v1.Reference `json:"monitoringRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate monitoringRoleArn. + // +kubebuilder:validation:Optional + MonitoringRoleArnSelector *v1.Selector `json:"monitoringRoleArnSelector,omitempty" tf:"-"` + + // Specifies if the RDS instance is multi-AZ + // +kubebuilder:validation:Optional + MultiAz *bool `json:"multiAz,omitempty" tf:"multi_az,omitempty"` + + // The national character set is used in the NCHAR, NVARCHAR2, and NCLOB data types for Oracle instances. This can't be changed. See Oracle Character Sets + // Supported in Amazon RDS. + // +kubebuilder:validation:Optional + NcharCharacterSetName *string `json:"ncharCharacterSetName,omitempty" tf:"nchar_character_set_name,omitempty"` + + // The network type of the DB instance. Valid values: IPV4, DUAL. + // +kubebuilder:validation:Optional + NetworkType *string `json:"networkType,omitempty" tf:"network_type,omitempty"` + + // Name of the DB option group to associate. + // +kubebuilder:validation:Optional + OptionGroupName *string `json:"optionGroupName,omitempty" tf:"option_group_name,omitempty"` + + // Name of the DB parameter group to associate. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta1.ParameterGroup + // +kubebuilder:validation:Optional + ParameterGroupName *string `json:"parameterGroupName,omitempty" tf:"parameter_group_name,omitempty"` + + // Reference to a ParameterGroup in rds to populate parameterGroupName. + // +kubebuilder:validation:Optional + ParameterGroupNameRef *v1.Reference `json:"parameterGroupNameRef,omitempty" tf:"-"` + + // Selector for a ParameterGroup in rds to populate parameterGroupName. + // +kubebuilder:validation:Optional + ParameterGroupNameSelector *v1.Selector `json:"parameterGroupNameSelector,omitempty" tf:"-"` + + // Password for the master DB user. Note that this may show up in + // logs, and it will be stored in the state file. Cannot be set if manage_master_user_password is set to true. + // Password for the master DB user. If you set autoGeneratePassword to true, the Secret referenced here will be created or updated with generated password if it does not already contain one. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // Specifies whether Performance Insights are enabled. Defaults to false. + // +kubebuilder:validation:Optional + PerformanceInsightsEnabled *bool `json:"performanceInsightsEnabled,omitempty" tf:"performance_insights_enabled,omitempty"` + + // The ARN for the KMS key to encrypt Performance Insights data. When specifying performance_insights_kms_key_id, performance_insights_enabled needs to be set to true. Once KMS key is set, it can never be changed. + // +kubebuilder:validation:Optional + PerformanceInsightsKMSKeyID *string `json:"performanceInsightsKmsKeyId,omitempty" tf:"performance_insights_kms_key_id,omitempty"` + + // Amount of time in days to retain Performance Insights data. Valid values are 7, 731 (2 years) or a multiple of 31. When specifying performance_insights_retention_period, performance_insights_enabled needs to be set to true. Defaults to '7'. + // +kubebuilder:validation:Optional + PerformanceInsightsRetentionPeriod *float64 `json:"performanceInsightsRetentionPeriod,omitempty" tf:"performance_insights_retention_period,omitempty"` + + // The port on which the DB accepts connections. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Bool to control if instance is publicly + // accessible. Default is false. + // +kubebuilder:validation:Optional + PubliclyAccessible *bool `json:"publiclyAccessible,omitempty" tf:"publicly_accessible,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Specifies whether the replica is in either mounted or open-read-only mode. This attribute + // is only supported by Oracle instances. Oracle replicas operate in open-read-only mode unless otherwise specified. See Working with Oracle Read Replicas for more information. + // +kubebuilder:validation:Optional + ReplicaMode *string `json:"replicaMode,omitempty" tf:"replica_mode,omitempty"` + + // Specifies that this resource is a Replicate + // database, and to use this value as the source database. This correlates to the + // identifier of another Amazon RDS Database to replicate (if replicating within + // a single region) or ARN of the Amazon RDS Database to replicate (if replicating + // cross-region). Note that if you are + // creating a cross-region replica of an encrypted database you will also need to + // specify a kms_key_id. See DB Instance Replication and Working with + // PostgreSQL and MySQL Read Replicas + // for more information on using Replication. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rds/v1beta3.Instance + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("identifier",false) + // +kubebuilder:validation:Optional + ReplicateSourceDB *string `json:"replicateSourceDb,omitempty" tf:"replicate_source_db,omitempty"` + + // Reference to a Instance in rds to populate replicateSourceDb. + // +kubebuilder:validation:Optional + ReplicateSourceDBRef *v1.Reference `json:"replicateSourceDbRef,omitempty" tf:"-"` + + // Selector for a Instance in rds to populate replicateSourceDb. + // +kubebuilder:validation:Optional + ReplicateSourceDBSelector *v1.Selector `json:"replicateSourceDbSelector,omitempty" tf:"-"` + + // A configuration block for restoring a DB instance to an arbitrary point in time. Requires the identifier argument to be set with the name of the new DB instance to be created. See Restore To Point In Time below for details. + // +kubebuilder:validation:Optional + RestoreToPointInTime *RestoreToPointInTimeParameters `json:"restoreToPointInTime,omitempty" tf:"restore_to_point_in_time,omitempty"` + + // Restore from a Percona Xtrabackup in S3. See Importing Data into an Amazon RDS MySQL DB Instance + // +kubebuilder:validation:Optional + S3Import *S3ImportParameters `json:"s3Import,omitempty" tf:"s3_import,omitempty"` + + // Determines whether a final DB snapshot is + // created before the DB instance is deleted. If true is specified, no DBSnapshot + // is created. If false is specified, a DB snapshot is created before the DB + // instance is deleted, using the value from final_snapshot_identifier. Default + // is false. + // +kubebuilder:validation:Optional + SkipFinalSnapshot *bool `json:"skipFinalSnapshot,omitempty" tf:"skip_final_snapshot,omitempty"` + + // Specifies whether or not to create this + // database from a snapshot. This correlates to the snapshot ID you'd find in the + // RDS console, e.g: rds:production-2015-06-26-06-05. + // +kubebuilder:validation:Optional + SnapshotIdentifier *string `json:"snapshotIdentifier,omitempty" tf:"snapshot_identifier,omitempty"` + + // Specifies whether the DB instance is + // encrypted. Note that if you are creating a cross-region read replica this field + // is ignored and you should instead declare kms_key_id with a valid ARN. The + // default is false if not specified. + // +kubebuilder:validation:Optional + StorageEncrypted *bool `json:"storageEncrypted,omitempty" tf:"storage_encrypted,omitempty"` + + // The storage throughput value for the DB instance. Can only be set when storage_type is "gp3". Cannot be specified if the allocated_storage value is below a per-engine threshold. See the RDS User Guide for details. + // +kubebuilder:validation:Optional + StorageThroughput *float64 `json:"storageThroughput,omitempty" tf:"storage_throughput,omitempty"` + + // One of "standard" (magnetic), "gp2" (general + // purpose SSD), "gp3" (general purpose SSD that needs iops independently) + // or "io1" (provisioned IOPS SSD). The default is "io1" if iops is specified, + // "gp2" if not. + // +kubebuilder:validation:Optional + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Time zone of the DB instance. timezone is currently + // only supported by Microsoft SQL Server. The timezone can only be set on + // creation. See MSSQL User + // Guide + // for more information. + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // Username for the master DB user. Cannot be specified for a replica. + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDRefs []v1.Reference `json:"vpcSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDSelector *v1.Selector `json:"vpcSecurityGroupIdSelector,omitempty" tf:"-"` + + // List of VPC security groups to + // associate. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=VPCSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=VPCSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` +} + +type ListenerEndpointInitParameters struct { +} + +type ListenerEndpointObservation struct { + + // The hostname of the RDS instance. See also endpoint and port. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The canonical hosted zone ID of the DB instance (to be used + // in a Route 53 Alias record). + HostedZoneID *string `json:"hostedZoneId,omitempty" tf:"hosted_zone_id,omitempty"` + + // The port on which the DB accepts connections. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` +} + +type ListenerEndpointParameters struct { +} + +type MasterUserSecretInitParameters struct { +} + +type MasterUserSecretObservation struct { + + // The Amazon Web Services KMS key identifier that is used to encrypt the secret. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The Amazon Resource Name (ARN) of the secret. + SecretArn *string `json:"secretArn,omitempty" tf:"secret_arn,omitempty"` + + // The status of the secret. Valid Values: creating | active | rotating | impaired. + SecretStatus *string `json:"secretStatus,omitempty" tf:"secret_status,omitempty"` +} + +type MasterUserSecretParameters struct { +} + +type RestoreToPointInTimeInitParameters struct { + + // The date and time to restore from. Value must be a time in Universal Coordinated Time (UTC) format and must be before the latest restorable time for the DB instance. Cannot be specified with use_latest_restorable_time. + RestoreTime *string `json:"restoreTime,omitempty" tf:"restore_time,omitempty"` + + // The ARN of the automated backup from which to restore. Required if source_db_instance_identifier or source_dbi_resource_id is not specified. + SourceDBInstanceAutomatedBackupsArn *string `json:"sourceDbInstanceAutomatedBackupsArn,omitempty" tf:"source_db_instance_automated_backups_arn,omitempty"` + + // The identifier of the source DB instance from which to restore. Must match the identifier of an existing DB instance. Required if source_db_instance_automated_backups_arn or source_dbi_resource_id is not specified. + SourceDBInstanceIdentifier *string `json:"sourceDbInstanceIdentifier,omitempty" tf:"source_db_instance_identifier,omitempty"` + + // The resource ID of the source DB instance from which to restore. Required if source_db_instance_identifier or source_db_instance_automated_backups_arn is not specified. + SourceDbiResourceID *string `json:"sourceDbiResourceId,omitempty" tf:"source_dbi_resource_id,omitempty"` + + // A boolean value that indicates whether the DB instance is restored from the latest backup time. Defaults to false. Cannot be specified with restore_time. + UseLatestRestorableTime *bool `json:"useLatestRestorableTime,omitempty" tf:"use_latest_restorable_time,omitempty"` +} + +type RestoreToPointInTimeObservation struct { + + // The date and time to restore from. Value must be a time in Universal Coordinated Time (UTC) format and must be before the latest restorable time for the DB instance. Cannot be specified with use_latest_restorable_time. + RestoreTime *string `json:"restoreTime,omitempty" tf:"restore_time,omitempty"` + + // The ARN of the automated backup from which to restore. Required if source_db_instance_identifier or source_dbi_resource_id is not specified. + SourceDBInstanceAutomatedBackupsArn *string `json:"sourceDbInstanceAutomatedBackupsArn,omitempty" tf:"source_db_instance_automated_backups_arn,omitempty"` + + // The identifier of the source DB instance from which to restore. Must match the identifier of an existing DB instance. Required if source_db_instance_automated_backups_arn or source_dbi_resource_id is not specified. + SourceDBInstanceIdentifier *string `json:"sourceDbInstanceIdentifier,omitempty" tf:"source_db_instance_identifier,omitempty"` + + // The resource ID of the source DB instance from which to restore. Required if source_db_instance_identifier or source_db_instance_automated_backups_arn is not specified. + SourceDbiResourceID *string `json:"sourceDbiResourceId,omitempty" tf:"source_dbi_resource_id,omitempty"` + + // A boolean value that indicates whether the DB instance is restored from the latest backup time. Defaults to false. Cannot be specified with restore_time. + UseLatestRestorableTime *bool `json:"useLatestRestorableTime,omitempty" tf:"use_latest_restorable_time,omitempty"` +} + +type RestoreToPointInTimeParameters struct { + + // The date and time to restore from. Value must be a time in Universal Coordinated Time (UTC) format and must be before the latest restorable time for the DB instance. Cannot be specified with use_latest_restorable_time. + // +kubebuilder:validation:Optional + RestoreTime *string `json:"restoreTime,omitempty" tf:"restore_time,omitempty"` + + // The ARN of the automated backup from which to restore. Required if source_db_instance_identifier or source_dbi_resource_id is not specified. + // +kubebuilder:validation:Optional + SourceDBInstanceAutomatedBackupsArn *string `json:"sourceDbInstanceAutomatedBackupsArn,omitempty" tf:"source_db_instance_automated_backups_arn,omitempty"` + + // The identifier of the source DB instance from which to restore. Must match the identifier of an existing DB instance. Required if source_db_instance_automated_backups_arn or source_dbi_resource_id is not specified. + // +kubebuilder:validation:Optional + SourceDBInstanceIdentifier *string `json:"sourceDbInstanceIdentifier,omitempty" tf:"source_db_instance_identifier,omitempty"` + + // The resource ID of the source DB instance from which to restore. Required if source_db_instance_identifier or source_db_instance_automated_backups_arn is not specified. + // +kubebuilder:validation:Optional + SourceDbiResourceID *string `json:"sourceDbiResourceId,omitempty" tf:"source_dbi_resource_id,omitempty"` + + // A boolean value that indicates whether the DB instance is restored from the latest backup time. Defaults to false. Cannot be specified with restore_time. + // +kubebuilder:validation:Optional + UseLatestRestorableTime *bool `json:"useLatestRestorableTime,omitempty" tf:"use_latest_restorable_time,omitempty"` +} + +type S3ImportInitParameters struct { + + // The bucket name where your backup is stored + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Can be blank, but is the path to your backup + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Role applied to load the data. + IngestionRole *string `json:"ingestionRole,omitempty" tf:"ingestion_role,omitempty"` + + // Source engine for the backup + SourceEngine *string `json:"sourceEngine,omitempty" tf:"source_engine,omitempty"` + + // Version of the source engine used to make the backup + SourceEngineVersion *string `json:"sourceEngineVersion,omitempty" tf:"source_engine_version,omitempty"` +} + +type S3ImportObservation struct { + + // The bucket name where your backup is stored + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Can be blank, but is the path to your backup + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Role applied to load the data. + IngestionRole *string `json:"ingestionRole,omitempty" tf:"ingestion_role,omitempty"` + + // Source engine for the backup + SourceEngine *string `json:"sourceEngine,omitempty" tf:"source_engine,omitempty"` + + // Version of the source engine used to make the backup + SourceEngineVersion *string `json:"sourceEngineVersion,omitempty" tf:"source_engine_version,omitempty"` +} + +type S3ImportParameters struct { + + // The bucket name where your backup is stored + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName" tf:"bucket_name,omitempty"` + + // Can be blank, but is the path to your backup + // +kubebuilder:validation:Optional + BucketPrefix *string `json:"bucketPrefix,omitempty" tf:"bucket_prefix,omitempty"` + + // Role applied to load the data. + // +kubebuilder:validation:Optional + IngestionRole *string `json:"ingestionRole" tf:"ingestion_role,omitempty"` + + // Source engine for the backup + // +kubebuilder:validation:Optional + SourceEngine *string `json:"sourceEngine" tf:"source_engine,omitempty"` + + // Version of the source engine used to make the backup + // +kubebuilder:validation:Optional + SourceEngineVersion *string `json:"sourceEngineVersion" tf:"source_engine_version,omitempty"` +} + +// InstanceSpec defines the desired state of Instance +type InstanceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider InstanceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider InstanceInitParameters `json:"initProvider,omitempty"` +} + +// InstanceStatus defines the observed state of Instance. +type InstanceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider InstanceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Instance is the Schema for the Instances API. Provides an RDS instance resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Instance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.instanceClass) || (has(self.initProvider) && has(self.initProvider.instanceClass))",message="spec.forProvider.instanceClass is a required parameter" + Spec InstanceSpec `json:"spec"` + Status InstanceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// InstanceList contains a list of Instances +type InstanceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Instance `json:"items"` +} + +// Repository type metadata. +var ( + Instance_Kind = "Instance" + Instance_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Instance_Kind}.String() + Instance_KindAPIVersion = Instance_Kind + "." + CRDGroupVersion.String() + Instance_GroupVersionKind = CRDGroupVersion.WithKind(Instance_Kind) +) + +func init() { + SchemeBuilder.Register(&Instance{}, &InstanceList{}) +} diff --git a/apis/redshift/v1beta1/zz_generated.conversion_hubs.go b/apis/redshift/v1beta1/zz_generated.conversion_hubs.go index 102e9aae82..bb716ef044 100755 --- a/apis/redshift/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/redshift/v1beta1/zz_generated.conversion_hubs.go @@ -9,12 +9,6 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *AuthenticationProfile) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Cluster) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *EndpointAccess) Hub() {} - // Hub marks this type as a conversion hub. func (tr *EventSubscription) Hub() {} @@ -27,9 +21,6 @@ func (tr *HSMConfiguration) Hub() {} // Hub marks this type as a conversion hub. func (tr *ParameterGroup) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ScheduledAction) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SnapshotCopyGrant) Hub() {} diff --git a/apis/redshift/v1beta1/zz_generated.conversion_spokes.go b/apis/redshift/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..7e7ea31418 --- /dev/null +++ b/apis/redshift/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Cluster to the hub type. +func (tr *Cluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Cluster type. +func (tr *Cluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ScheduledAction to the hub type. +func (tr *ScheduledAction) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ScheduledAction type. +func (tr *ScheduledAction) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/redshift/v1beta1/zz_generated.resolvers.go b/apis/redshift/v1beta1/zz_generated.resolvers.go index 386e42e64f..1b2766f6b7 100644 --- a/apis/redshift/v1beta1/zz_generated.resolvers.go +++ b/apis/redshift/v1beta1/zz_generated.resolvers.go @@ -470,7 +470,7 @@ func (mg *SnapshotScheduleAssociation) ResolveReferences(ctx context.Context, c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("redshift.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("redshift.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -508,7 +508,7 @@ func (mg *SnapshotScheduleAssociation) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.ScheduleIdentifier = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ScheduleIdentifierRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("redshift.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("redshift.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -608,7 +608,7 @@ func (mg *UsageLimit) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("redshift.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("redshift.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -627,7 +627,7 @@ func (mg *UsageLimit) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.ClusterIdentifier = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ClusterIdentifierRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("redshift.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("redshift.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/redshift/v1beta1/zz_snapshotscheduleassociation_types.go b/apis/redshift/v1beta1/zz_snapshotscheduleassociation_types.go index df4547080e..6c5fb2f2a1 100755 --- a/apis/redshift/v1beta1/zz_snapshotscheduleassociation_types.go +++ b/apis/redshift/v1beta1/zz_snapshotscheduleassociation_types.go @@ -16,7 +16,7 @@ import ( type SnapshotScheduleAssociationInitParameters struct { // The cluster identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/redshift/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/redshift/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` @@ -56,7 +56,7 @@ type SnapshotScheduleAssociationObservation struct { type SnapshotScheduleAssociationParameters struct { // The cluster identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/redshift/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/redshift/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` diff --git a/apis/redshift/v1beta1/zz_usagelimit_types.go b/apis/redshift/v1beta1/zz_usagelimit_types.go index a3adc91eac..08da73f50c 100755 --- a/apis/redshift/v1beta1/zz_usagelimit_types.go +++ b/apis/redshift/v1beta1/zz_usagelimit_types.go @@ -22,7 +22,7 @@ type UsageLimitInitParameters struct { BreachAction *string `json:"breachAction,omitempty" tf:"breach_action,omitempty"` // The identifier of the cluster that you want to limit usage. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/redshift/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/redshift/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` @@ -94,7 +94,7 @@ type UsageLimitParameters struct { BreachAction *string `json:"breachAction,omitempty" tf:"breach_action,omitempty"` // The identifier of the cluster that you want to limit usage. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/redshift/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/redshift/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` diff --git a/apis/redshift/v1beta2/zz_cluster_terraformed.go b/apis/redshift/v1beta2/zz_cluster_terraformed.go new file mode 100755 index 0000000000..b1f17c7fad --- /dev/null +++ b/apis/redshift/v1beta2/zz_cluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Cluster +func (mg *Cluster) GetTerraformResourceType() string { + return "aws_redshift_cluster" +} + +// GetConnectionDetailsMapping for this Cluster +func (tr *Cluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"master_password": "masterPasswordSecretRef"} +} + +// GetObservation of this Cluster +func (tr *Cluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Cluster +func (tr *Cluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Cluster +func (tr *Cluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Cluster +func (tr *Cluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Cluster +func (tr *Cluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Cluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Cluster) LateInitialize(attrs []byte) (bool, error) { + params := &ClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Cluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/redshift/v1beta2/zz_cluster_types.go b/apis/redshift/v1beta2/zz_cluster_types.go new file mode 100755 index 0000000000..b386a9769e --- /dev/null +++ b/apis/redshift/v1beta2/zz_cluster_types.go @@ -0,0 +1,779 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ClusterInitParameters struct { + + // If true , major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster. Default is true. + AllowVersionUpgrade *bool `json:"allowVersionUpgrade,omitempty" tf:"allow_version_upgrade,omitempty"` + + // Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is false. + ApplyImmediately *bool `json:"applyImmediately,omitempty" tf:"apply_immediately,omitempty"` + + // The value represents how the cluster is configured to use AQUA (Advanced Query Accelerator) after the cluster is restored. + // No longer supported by the AWS API. + // Always returns auto. + AquaConfigurationStatus *string `json:"aquaConfigurationStatus,omitempty" tf:"aqua_configuration_status,omitempty"` + + // The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with create-cluster-snapshot. Default is 1. + AutomatedSnapshotRetentionPeriod *float64 `json:"automatedSnapshotRetentionPeriod,omitempty" tf:"automated_snapshot_retention_period,omitempty"` + + // The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision the cluster. For example, if you have several EC2 instances running in a specific Availability Zone, then you might want the cluster to be provisioned in the same zone in order to decrease network latency. Can only be changed if availability_zone_relocation_enabled is true. + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // If true, the cluster can be relocated to another availabity zone, either automatically by AWS or when requested. Default is false. Available for use on clusters from the RA3 instance family. + AvailabilityZoneRelocationEnabled *bool `json:"availabilityZoneRelocationEnabled,omitempty" tf:"availability_zone_relocation_enabled,omitempty"` + + // The name of the parameter group to be associated with this cluster. + ClusterParameterGroupName *string `json:"clusterParameterGroupName,omitempty" tf:"cluster_parameter_group_name,omitempty"` + + // The public key for the cluster + ClusterPublicKey *string `json:"clusterPublicKey,omitempty" tf:"cluster_public_key,omitempty"` + + // The specific revision number of the database in the cluster + ClusterRevisionNumber *string `json:"clusterRevisionNumber,omitempty" tf:"cluster_revision_number,omitempty"` + + // The name of a cluster subnet group to be associated with this cluster. If this parameter is not provided the resulting cluster will be deployed outside virtual private cloud (VPC). + ClusterSubnetGroupName *string `json:"clusterSubnetGroupName,omitempty" tf:"cluster_subnet_group_name,omitempty"` + + // The cluster type to use. Either single-node or multi-node. + ClusterType *string `json:"clusterType,omitempty" tf:"cluster_type,omitempty"` + + // The version of the Amazon Redshift engine software that you want to deploy on the cluster. + // The version selected runs on all the nodes in the cluster. + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // The name of the first database to be created when the cluster is created. + // If you do not provide a name, Amazon Redshift will create a default database called dev. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The Amazon Resource Name (ARN) for the IAM role that was set as default for the cluster when the cluster was created. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + DefaultIAMRoleArn *string `json:"defaultIamRoleArn,omitempty" tf:"default_iam_role_arn,omitempty"` + + // Reference to a Role in iam to populate defaultIamRoleArn. + // +kubebuilder:validation:Optional + DefaultIAMRoleArnRef *v1.Reference `json:"defaultIamRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate defaultIamRoleArn. + // +kubebuilder:validation:Optional + DefaultIAMRoleArnSelector *v1.Selector `json:"defaultIamRoleArnSelector,omitempty" tf:"-"` + + // The Elastic IP (EIP) address for the cluster. + ElasticIP *string `json:"elasticIp,omitempty" tf:"elastic_ip,omitempty"` + + // If true , the data in the cluster is encrypted at rest. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The connection endpoint + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // If true , enhanced VPC routing is enabled. + EnhancedVPCRouting *bool `json:"enhancedVpcRouting,omitempty" tf:"enhanced_vpc_routing,omitempty"` + + // The identifier of the final snapshot that is to be created immediately before deleting the cluster. If this parameter is provided, skip_final_snapshot must be false. + FinalSnapshotIdentifier *string `json:"finalSnapshotIdentifier,omitempty" tf:"final_snapshot_identifier,omitempty"` + + // References to Role in iam to populate iamRoles. + // +kubebuilder:validation:Optional + IAMRoleRefs []v1.Reference `json:"iamRoleRefs,omitempty" tf:"-"` + + // Selector for a list of Role in iam to populate iamRoles. + // +kubebuilder:validation:Optional + IAMRoleSelector *v1.Selector `json:"iamRoleSelector,omitempty" tf:"-"` + + // A list of IAM Role ARNs to associate with the cluster. A Maximum of 10 can be associated to the cluster at any time. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:refFieldName=IAMRoleRefs + // +crossplane:generate:reference:selectorFieldName=IAMRoleSelector + // +listType=set + IAMRoles []*string `json:"iamRoles,omitempty" tf:"iam_roles,omitempty"` + + // The ARN for the KMS encryption key. When specifying kms_key_id, encrypted needs to be set to true. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Logging, documented below. + Logging *LoggingInitParameters `json:"logging,omitempty" tf:"logging,omitempty"` + + // The name of the maintenance track for the restored cluster. When you take a snapshot, the snapshot inherits the MaintenanceTrack value from the cluster. The snapshot might be on a different track than the cluster that was the source for the snapshot. For example, suppose that you take a snapshot of a cluster that is on the current track and then change the cluster to be on the trailing track. In this case, the snapshot and the source cluster are on different tracks. Default value is current. + MaintenanceTrackName *string `json:"maintenanceTrackName,omitempty" tf:"maintenance_track_name,omitempty"` + + // Whether to use AWS SecretsManager to manage the cluster admin credentials. + // Conflicts with master_password. + // One of master_password or manage_master_password is required unless snapshot_identifier is provided. + ManageMasterPassword *bool `json:"manageMasterPassword,omitempty" tf:"manage_master_password,omitempty"` + + // The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn't change the retention period of existing snapshots. Valid values are between -1 and 3653. Default value is -1. + ManualSnapshotRetentionPeriod *float64 `json:"manualSnapshotRetentionPeriod,omitempty" tf:"manual_snapshot_retention_period,omitempty"` + + // ID of the KMS key used to encrypt the cluster admin credentials secret. + MasterPasswordSecretKMSKeyID *string `json:"masterPasswordSecretKmsKeyId,omitempty" tf:"master_password_secret_kms_key_id,omitempty"` + + // Password for the master DB user. + // Conflicts with manage_master_password. + // One of master_password or manage_master_password is required unless snapshot_identifier is provided. + // Note that this may show up in logs, and it will be stored in the state file. + // Password must contain at least 8 characters and contain at least one uppercase letter, one lowercase letter, and one number. + MasterPasswordSecretRef *v1.SecretKeySelector `json:"masterPasswordSecretRef,omitempty" tf:"-"` + + // Username for the master DB user. + MasterUsername *string `json:"masterUsername,omitempty" tf:"master_username,omitempty"` + + // Specifies if the Redshift cluster is multi-AZ. + MultiAz *bool `json:"multiAz,omitempty" tf:"multi_az,omitempty"` + + // The node type to be provisioned for the cluster. + NodeType *string `json:"nodeType,omitempty" tf:"node_type,omitempty"` + + // The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node. Default is 1. + NumberOfNodes *float64 `json:"numberOfNodes,omitempty" tf:"number_of_nodes,omitempty"` + + // The AWS customer account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot. + OwnerAccount *string `json:"ownerAccount,omitempty" tf:"owner_account,omitempty"` + + // The port number on which the cluster accepts incoming connections. Valid values are between 1115 and 65535. + // The cluster is accessible only via the JDBC and ODBC connection strings. + // Part of the connection string requires the port on which the cluster will listen for incoming connections. + // Default port is 5439. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The weekly time range (in UTC) during which automated cluster maintenance can occur. + // Format: ddd:hh24:mi-ddd:hh24:mi + PreferredMaintenanceWindow *string `json:"preferredMaintenanceWindow,omitempty" tf:"preferred_maintenance_window,omitempty"` + + // If true, the cluster can be accessed from a public network. Default is true. + PubliclyAccessible *bool `json:"publiclyAccessible,omitempty" tf:"publicly_accessible,omitempty"` + + // Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true , a final cluster snapshot is not created. If false , a final cluster snapshot is created before the cluster is deleted. Default is false. + SkipFinalSnapshot *bool `json:"skipFinalSnapshot,omitempty" tf:"skip_final_snapshot,omitempty"` + + // The ARN of the snapshot from which to create the new cluster. Conflicts with snapshot_identifier. + SnapshotArn *string `json:"snapshotArn,omitempty" tf:"snapshot_arn,omitempty"` + + // The name of the cluster the source snapshot was created from. + SnapshotClusterIdentifier *string `json:"snapshotClusterIdentifier,omitempty" tf:"snapshot_cluster_identifier,omitempty"` + + // Configuration of automatic copy of snapshots from one region to another. Documented below. + SnapshotCopy *SnapshotCopyInitParameters `json:"snapshotCopy,omitempty" tf:"snapshot_copy,omitempty"` + + // The name of the snapshot from which to create the new cluster. Conflicts with snapshot_arn. + SnapshotIdentifier *string `json:"snapshotIdentifier,omitempty" tf:"snapshot_identifier,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDRefs []v1.Reference `json:"vpcSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDSelector *v1.Selector `json:"vpcSecurityGroupIdSelector,omitempty" tf:"-"` + + // A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=VPCSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=VPCSecurityGroupIDSelector + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` +} + +type ClusterNodesInitParameters struct { +} + +type ClusterNodesObservation struct { + + // Whether the node is a leader node or a compute node + NodeRole *string `json:"nodeRole,omitempty" tf:"node_role,omitempty"` + + // The private IP address of a node within a cluster + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // The public IP address of a node within a cluster + PublicIPAddress *string `json:"publicIpAddress,omitempty" tf:"public_ip_address,omitempty"` +} + +type ClusterNodesParameters struct { +} + +type ClusterObservation struct { + + // If true , major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster. Default is true. + AllowVersionUpgrade *bool `json:"allowVersionUpgrade,omitempty" tf:"allow_version_upgrade,omitempty"` + + // Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is false. + ApplyImmediately *bool `json:"applyImmediately,omitempty" tf:"apply_immediately,omitempty"` + + // The value represents how the cluster is configured to use AQUA (Advanced Query Accelerator) after the cluster is restored. + // No longer supported by the AWS API. + // Always returns auto. + AquaConfigurationStatus *string `json:"aquaConfigurationStatus,omitempty" tf:"aqua_configuration_status,omitempty"` + + // Amazon Resource Name (ARN) of cluster + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with create-cluster-snapshot. Default is 1. + AutomatedSnapshotRetentionPeriod *float64 `json:"automatedSnapshotRetentionPeriod,omitempty" tf:"automated_snapshot_retention_period,omitempty"` + + // The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision the cluster. For example, if you have several EC2 instances running in a specific Availability Zone, then you might want the cluster to be provisioned in the same zone in order to decrease network latency. Can only be changed if availability_zone_relocation_enabled is true. + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // If true, the cluster can be relocated to another availabity zone, either automatically by AWS or when requested. Default is false. Available for use on clusters from the RA3 instance family. + AvailabilityZoneRelocationEnabled *bool `json:"availabilityZoneRelocationEnabled,omitempty" tf:"availability_zone_relocation_enabled,omitempty"` + + // The namespace Amazon Resource Name (ARN) of the cluster + ClusterNamespaceArn *string `json:"clusterNamespaceArn,omitempty" tf:"cluster_namespace_arn,omitempty"` + + // The nodes in the cluster. Cluster node blocks are documented below + ClusterNodes []ClusterNodesObservation `json:"clusterNodes,omitempty" tf:"cluster_nodes,omitempty"` + + // The name of the parameter group to be associated with this cluster. + ClusterParameterGroupName *string `json:"clusterParameterGroupName,omitempty" tf:"cluster_parameter_group_name,omitempty"` + + // The public key for the cluster + ClusterPublicKey *string `json:"clusterPublicKey,omitempty" tf:"cluster_public_key,omitempty"` + + // The specific revision number of the database in the cluster + ClusterRevisionNumber *string `json:"clusterRevisionNumber,omitempty" tf:"cluster_revision_number,omitempty"` + + // The name of a cluster subnet group to be associated with this cluster. If this parameter is not provided the resulting cluster will be deployed outside virtual private cloud (VPC). + ClusterSubnetGroupName *string `json:"clusterSubnetGroupName,omitempty" tf:"cluster_subnet_group_name,omitempty"` + + // The cluster type to use. Either single-node or multi-node. + ClusterType *string `json:"clusterType,omitempty" tf:"cluster_type,omitempty"` + + // The version of the Amazon Redshift engine software that you want to deploy on the cluster. + // The version selected runs on all the nodes in the cluster. + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // The DNS name of the cluster + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // The name of the first database to be created when the cluster is created. + // If you do not provide a name, Amazon Redshift will create a default database called dev. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The Amazon Resource Name (ARN) for the IAM role that was set as default for the cluster when the cluster was created. + DefaultIAMRoleArn *string `json:"defaultIamRoleArn,omitempty" tf:"default_iam_role_arn,omitempty"` + + // The Elastic IP (EIP) address for the cluster. + ElasticIP *string `json:"elasticIp,omitempty" tf:"elastic_ip,omitempty"` + + // If true , the data in the cluster is encrypted at rest. + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The connection endpoint + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // If true , enhanced VPC routing is enabled. + EnhancedVPCRouting *bool `json:"enhancedVpcRouting,omitempty" tf:"enhanced_vpc_routing,omitempty"` + + // The identifier of the final snapshot that is to be created immediately before deleting the cluster. If this parameter is provided, skip_final_snapshot must be false. + FinalSnapshotIdentifier *string `json:"finalSnapshotIdentifier,omitempty" tf:"final_snapshot_identifier,omitempty"` + + // A list of IAM Role ARNs to associate with the cluster. A Maximum of 10 can be associated to the cluster at any time. + // +listType=set + IAMRoles []*string `json:"iamRoles,omitempty" tf:"iam_roles,omitempty"` + + // The Redshift Cluster ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The ARN for the KMS encryption key. When specifying kms_key_id, encrypted needs to be set to true. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Logging, documented below. + Logging *LoggingObservation `json:"logging,omitempty" tf:"logging,omitempty"` + + // The name of the maintenance track for the restored cluster. When you take a snapshot, the snapshot inherits the MaintenanceTrack value from the cluster. The snapshot might be on a different track than the cluster that was the source for the snapshot. For example, suppose that you take a snapshot of a cluster that is on the current track and then change the cluster to be on the trailing track. In this case, the snapshot and the source cluster are on different tracks. Default value is current. + MaintenanceTrackName *string `json:"maintenanceTrackName,omitempty" tf:"maintenance_track_name,omitempty"` + + // Whether to use AWS SecretsManager to manage the cluster admin credentials. + // Conflicts with master_password. + // One of master_password or manage_master_password is required unless snapshot_identifier is provided. + ManageMasterPassword *bool `json:"manageMasterPassword,omitempty" tf:"manage_master_password,omitempty"` + + // The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn't change the retention period of existing snapshots. Valid values are between -1 and 3653. Default value is -1. + ManualSnapshotRetentionPeriod *float64 `json:"manualSnapshotRetentionPeriod,omitempty" tf:"manual_snapshot_retention_period,omitempty"` + + // ARN of the cluster admin credentials secret + MasterPasswordSecretArn *string `json:"masterPasswordSecretArn,omitempty" tf:"master_password_secret_arn,omitempty"` + + // ID of the KMS key used to encrypt the cluster admin credentials secret. + MasterPasswordSecretKMSKeyID *string `json:"masterPasswordSecretKmsKeyId,omitempty" tf:"master_password_secret_kms_key_id,omitempty"` + + // Username for the master DB user. + MasterUsername *string `json:"masterUsername,omitempty" tf:"master_username,omitempty"` + + // Specifies if the Redshift cluster is multi-AZ. + MultiAz *bool `json:"multiAz,omitempty" tf:"multi_az,omitempty"` + + // The node type to be provisioned for the cluster. + NodeType *string `json:"nodeType,omitempty" tf:"node_type,omitempty"` + + // The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node. Default is 1. + NumberOfNodes *float64 `json:"numberOfNodes,omitempty" tf:"number_of_nodes,omitempty"` + + // The AWS customer account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot. + OwnerAccount *string `json:"ownerAccount,omitempty" tf:"owner_account,omitempty"` + + // The port number on which the cluster accepts incoming connections. Valid values are between 1115 and 65535. + // The cluster is accessible only via the JDBC and ODBC connection strings. + // Part of the connection string requires the port on which the cluster will listen for incoming connections. + // Default port is 5439. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The weekly time range (in UTC) during which automated cluster maintenance can occur. + // Format: ddd:hh24:mi-ddd:hh24:mi + PreferredMaintenanceWindow *string `json:"preferredMaintenanceWindow,omitempty" tf:"preferred_maintenance_window,omitempty"` + + // If true, the cluster can be accessed from a public network. Default is true. + PubliclyAccessible *bool `json:"publiclyAccessible,omitempty" tf:"publicly_accessible,omitempty"` + + // Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true , a final cluster snapshot is not created. If false , a final cluster snapshot is created before the cluster is deleted. Default is false. + SkipFinalSnapshot *bool `json:"skipFinalSnapshot,omitempty" tf:"skip_final_snapshot,omitempty"` + + // The ARN of the snapshot from which to create the new cluster. Conflicts with snapshot_identifier. + SnapshotArn *string `json:"snapshotArn,omitempty" tf:"snapshot_arn,omitempty"` + + // The name of the cluster the source snapshot was created from. + SnapshotClusterIdentifier *string `json:"snapshotClusterIdentifier,omitempty" tf:"snapshot_cluster_identifier,omitempty"` + + // Configuration of automatic copy of snapshots from one region to another. Documented below. + SnapshotCopy *SnapshotCopyObservation `json:"snapshotCopy,omitempty" tf:"snapshot_copy,omitempty"` + + // The name of the snapshot from which to create the new cluster. Conflicts with snapshot_arn. + SnapshotIdentifier *string `json:"snapshotIdentifier,omitempty" tf:"snapshot_identifier,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster. + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` +} + +type ClusterParameters struct { + + // If true , major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster. Default is true. + // +kubebuilder:validation:Optional + AllowVersionUpgrade *bool `json:"allowVersionUpgrade,omitempty" tf:"allow_version_upgrade,omitempty"` + + // Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is false. + // +kubebuilder:validation:Optional + ApplyImmediately *bool `json:"applyImmediately,omitempty" tf:"apply_immediately,omitempty"` + + // The value represents how the cluster is configured to use AQUA (Advanced Query Accelerator) after the cluster is restored. + // No longer supported by the AWS API. + // Always returns auto. + // +kubebuilder:validation:Optional + AquaConfigurationStatus *string `json:"aquaConfigurationStatus,omitempty" tf:"aqua_configuration_status,omitempty"` + + // The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with create-cluster-snapshot. Default is 1. + // +kubebuilder:validation:Optional + AutomatedSnapshotRetentionPeriod *float64 `json:"automatedSnapshotRetentionPeriod,omitempty" tf:"automated_snapshot_retention_period,omitempty"` + + // The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision the cluster. For example, if you have several EC2 instances running in a specific Availability Zone, then you might want the cluster to be provisioned in the same zone in order to decrease network latency. Can only be changed if availability_zone_relocation_enabled is true. + // +kubebuilder:validation:Optional + AvailabilityZone *string `json:"availabilityZone,omitempty" tf:"availability_zone,omitempty"` + + // If true, the cluster can be relocated to another availabity zone, either automatically by AWS or when requested. Default is false. Available for use on clusters from the RA3 instance family. + // +kubebuilder:validation:Optional + AvailabilityZoneRelocationEnabled *bool `json:"availabilityZoneRelocationEnabled,omitempty" tf:"availability_zone_relocation_enabled,omitempty"` + + // The name of the parameter group to be associated with this cluster. + // +kubebuilder:validation:Optional + ClusterParameterGroupName *string `json:"clusterParameterGroupName,omitempty" tf:"cluster_parameter_group_name,omitempty"` + + // The public key for the cluster + // +kubebuilder:validation:Optional + ClusterPublicKey *string `json:"clusterPublicKey,omitempty" tf:"cluster_public_key,omitempty"` + + // The specific revision number of the database in the cluster + // +kubebuilder:validation:Optional + ClusterRevisionNumber *string `json:"clusterRevisionNumber,omitempty" tf:"cluster_revision_number,omitempty"` + + // The name of a cluster subnet group to be associated with this cluster. If this parameter is not provided the resulting cluster will be deployed outside virtual private cloud (VPC). + // +kubebuilder:validation:Optional + ClusterSubnetGroupName *string `json:"clusterSubnetGroupName,omitempty" tf:"cluster_subnet_group_name,omitempty"` + + // The cluster type to use. Either single-node or multi-node. + // +kubebuilder:validation:Optional + ClusterType *string `json:"clusterType,omitempty" tf:"cluster_type,omitempty"` + + // The version of the Amazon Redshift engine software that you want to deploy on the cluster. + // The version selected runs on all the nodes in the cluster. + // +kubebuilder:validation:Optional + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // The name of the first database to be created when the cluster is created. + // If you do not provide a name, Amazon Redshift will create a default database called dev. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The Amazon Resource Name (ARN) for the IAM role that was set as default for the cluster when the cluster was created. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + DefaultIAMRoleArn *string `json:"defaultIamRoleArn,omitempty" tf:"default_iam_role_arn,omitempty"` + + // Reference to a Role in iam to populate defaultIamRoleArn. + // +kubebuilder:validation:Optional + DefaultIAMRoleArnRef *v1.Reference `json:"defaultIamRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate defaultIamRoleArn. + // +kubebuilder:validation:Optional + DefaultIAMRoleArnSelector *v1.Selector `json:"defaultIamRoleArnSelector,omitempty" tf:"-"` + + // The Elastic IP (EIP) address for the cluster. + // +kubebuilder:validation:Optional + ElasticIP *string `json:"elasticIp,omitempty" tf:"elastic_ip,omitempty"` + + // If true , the data in the cluster is encrypted at rest. + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The connection endpoint + // +kubebuilder:validation:Optional + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // If true , enhanced VPC routing is enabled. + // +kubebuilder:validation:Optional + EnhancedVPCRouting *bool `json:"enhancedVpcRouting,omitempty" tf:"enhanced_vpc_routing,omitempty"` + + // The identifier of the final snapshot that is to be created immediately before deleting the cluster. If this parameter is provided, skip_final_snapshot must be false. + // +kubebuilder:validation:Optional + FinalSnapshotIdentifier *string `json:"finalSnapshotIdentifier,omitempty" tf:"final_snapshot_identifier,omitempty"` + + // References to Role in iam to populate iamRoles. + // +kubebuilder:validation:Optional + IAMRoleRefs []v1.Reference `json:"iamRoleRefs,omitempty" tf:"-"` + + // Selector for a list of Role in iam to populate iamRoles. + // +kubebuilder:validation:Optional + IAMRoleSelector *v1.Selector `json:"iamRoleSelector,omitempty" tf:"-"` + + // A list of IAM Role ARNs to associate with the cluster. A Maximum of 10 can be associated to the cluster at any time. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:refFieldName=IAMRoleRefs + // +crossplane:generate:reference:selectorFieldName=IAMRoleSelector + // +kubebuilder:validation:Optional + // +listType=set + IAMRoles []*string `json:"iamRoles,omitempty" tf:"iam_roles,omitempty"` + + // The ARN for the KMS encryption key. When specifying kms_key_id, encrypted needs to be set to true. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Logging, documented below. + // +kubebuilder:validation:Optional + Logging *LoggingParameters `json:"logging,omitempty" tf:"logging,omitempty"` + + // The name of the maintenance track for the restored cluster. When you take a snapshot, the snapshot inherits the MaintenanceTrack value from the cluster. The snapshot might be on a different track than the cluster that was the source for the snapshot. For example, suppose that you take a snapshot of a cluster that is on the current track and then change the cluster to be on the trailing track. In this case, the snapshot and the source cluster are on different tracks. Default value is current. + // +kubebuilder:validation:Optional + MaintenanceTrackName *string `json:"maintenanceTrackName,omitempty" tf:"maintenance_track_name,omitempty"` + + // Whether to use AWS SecretsManager to manage the cluster admin credentials. + // Conflicts with master_password. + // One of master_password or manage_master_password is required unless snapshot_identifier is provided. + // +kubebuilder:validation:Optional + ManageMasterPassword *bool `json:"manageMasterPassword,omitempty" tf:"manage_master_password,omitempty"` + + // The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn't change the retention period of existing snapshots. Valid values are between -1 and 3653. Default value is -1. + // +kubebuilder:validation:Optional + ManualSnapshotRetentionPeriod *float64 `json:"manualSnapshotRetentionPeriod,omitempty" tf:"manual_snapshot_retention_period,omitempty"` + + // ID of the KMS key used to encrypt the cluster admin credentials secret. + // +kubebuilder:validation:Optional + MasterPasswordSecretKMSKeyID *string `json:"masterPasswordSecretKmsKeyId,omitempty" tf:"master_password_secret_kms_key_id,omitempty"` + + // Password for the master DB user. + // Conflicts with manage_master_password. + // One of master_password or manage_master_password is required unless snapshot_identifier is provided. + // Note that this may show up in logs, and it will be stored in the state file. + // Password must contain at least 8 characters and contain at least one uppercase letter, one lowercase letter, and one number. + // +kubebuilder:validation:Optional + MasterPasswordSecretRef *v1.SecretKeySelector `json:"masterPasswordSecretRef,omitempty" tf:"-"` + + // Username for the master DB user. + // +kubebuilder:validation:Optional + MasterUsername *string `json:"masterUsername,omitempty" tf:"master_username,omitempty"` + + // Specifies if the Redshift cluster is multi-AZ. + // +kubebuilder:validation:Optional + MultiAz *bool `json:"multiAz,omitempty" tf:"multi_az,omitempty"` + + // The node type to be provisioned for the cluster. + // +kubebuilder:validation:Optional + NodeType *string `json:"nodeType,omitempty" tf:"node_type,omitempty"` + + // The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node. Default is 1. + // +kubebuilder:validation:Optional + NumberOfNodes *float64 `json:"numberOfNodes,omitempty" tf:"number_of_nodes,omitempty"` + + // The AWS customer account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot. + // +kubebuilder:validation:Optional + OwnerAccount *string `json:"ownerAccount,omitempty" tf:"owner_account,omitempty"` + + // The port number on which the cluster accepts incoming connections. Valid values are between 1115 and 65535. + // The cluster is accessible only via the JDBC and ODBC connection strings. + // Part of the connection string requires the port on which the cluster will listen for incoming connections. + // Default port is 5439. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The weekly time range (in UTC) during which automated cluster maintenance can occur. + // Format: ddd:hh24:mi-ddd:hh24:mi + // +kubebuilder:validation:Optional + PreferredMaintenanceWindow *string `json:"preferredMaintenanceWindow,omitempty" tf:"preferred_maintenance_window,omitempty"` + + // If true, the cluster can be accessed from a public network. Default is true. + // +kubebuilder:validation:Optional + PubliclyAccessible *bool `json:"publiclyAccessible,omitempty" tf:"publicly_accessible,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true , a final cluster snapshot is not created. If false , a final cluster snapshot is created before the cluster is deleted. Default is false. + // +kubebuilder:validation:Optional + SkipFinalSnapshot *bool `json:"skipFinalSnapshot,omitempty" tf:"skip_final_snapshot,omitempty"` + + // The ARN of the snapshot from which to create the new cluster. Conflicts with snapshot_identifier. + // +kubebuilder:validation:Optional + SnapshotArn *string `json:"snapshotArn,omitempty" tf:"snapshot_arn,omitempty"` + + // The name of the cluster the source snapshot was created from. + // +kubebuilder:validation:Optional + SnapshotClusterIdentifier *string `json:"snapshotClusterIdentifier,omitempty" tf:"snapshot_cluster_identifier,omitempty"` + + // Configuration of automatic copy of snapshots from one region to another. Documented below. + // +kubebuilder:validation:Optional + SnapshotCopy *SnapshotCopyParameters `json:"snapshotCopy,omitempty" tf:"snapshot_copy,omitempty"` + + // The name of the snapshot from which to create the new cluster. Conflicts with snapshot_arn. + // +kubebuilder:validation:Optional + SnapshotIdentifier *string `json:"snapshotIdentifier,omitempty" tf:"snapshot_identifier,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDRefs []v1.Reference `json:"vpcSecurityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate vpcSecurityGroupIds. + // +kubebuilder:validation:Optional + VPCSecurityGroupIDSelector *v1.Selector `json:"vpcSecurityGroupIdSelector,omitempty" tf:"-"` + + // A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=VPCSecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=VPCSecurityGroupIDSelector + // +kubebuilder:validation:Optional + // +listType=set + VPCSecurityGroupIds []*string `json:"vpcSecurityGroupIds,omitempty" tf:"vpc_security_group_ids,omitempty"` +} + +type LoggingInitParameters struct { + + // The name of an existing S3 bucket where the log files are to be stored. Must be in the same region as the cluster and the cluster must have read bucket and put object permissions. + // For more information on the permissions required for the bucket, please read the AWS documentation + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Enables logging information such as queries and connection attempts, for the specified Amazon Redshift cluster. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // The log destination type. An enum with possible values of s3 and cloudwatch. + LogDestinationType *string `json:"logDestinationType,omitempty" tf:"log_destination_type,omitempty"` + + // The collection of exported log types. Log types include the connection log, user log and user activity log. Required when log_destination_type is cloudwatch. Valid log types are connectionlog, userlog, and useractivitylog. + // +listType=set + LogExports []*string `json:"logExports,omitempty" tf:"log_exports,omitempty"` + + // The prefix applied to the log file names. + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` +} + +type LoggingObservation struct { + + // The name of an existing S3 bucket where the log files are to be stored. Must be in the same region as the cluster and the cluster must have read bucket and put object permissions. + // For more information on the permissions required for the bucket, please read the AWS documentation + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Enables logging information such as queries and connection attempts, for the specified Amazon Redshift cluster. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // The log destination type. An enum with possible values of s3 and cloudwatch. + LogDestinationType *string `json:"logDestinationType,omitempty" tf:"log_destination_type,omitempty"` + + // The collection of exported log types. Log types include the connection log, user log and user activity log. Required when log_destination_type is cloudwatch. Valid log types are connectionlog, userlog, and useractivitylog. + // +listType=set + LogExports []*string `json:"logExports,omitempty" tf:"log_exports,omitempty"` + + // The prefix applied to the log file names. + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` +} + +type LoggingParameters struct { + + // The name of an existing S3 bucket where the log files are to be stored. Must be in the same region as the cluster and the cluster must have read bucket and put object permissions. + // For more information on the permissions required for the bucket, please read the AWS documentation + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Enables logging information such as queries and connection attempts, for the specified Amazon Redshift cluster. + // +kubebuilder:validation:Optional + Enable *bool `json:"enable" tf:"enable,omitempty"` + + // The log destination type. An enum with possible values of s3 and cloudwatch. + // +kubebuilder:validation:Optional + LogDestinationType *string `json:"logDestinationType,omitempty" tf:"log_destination_type,omitempty"` + + // The collection of exported log types. Log types include the connection log, user log and user activity log. Required when log_destination_type is cloudwatch. Valid log types are connectionlog, userlog, and useractivitylog. + // +kubebuilder:validation:Optional + // +listType=set + LogExports []*string `json:"logExports,omitempty" tf:"log_exports,omitempty"` + + // The prefix applied to the log file names. + // +kubebuilder:validation:Optional + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` +} + +type SnapshotCopyInitParameters struct { + + // The destination region that you want to copy snapshots to. + DestinationRegion *string `json:"destinationRegion,omitempty" tf:"destination_region,omitempty"` + + // The name of the snapshot copy grant to use when snapshots of an AWS KMS-encrypted cluster are copied to the destination region. + GrantName *string `json:"grantName,omitempty" tf:"grant_name,omitempty"` + + // The number of days to retain automated snapshots in the destination region after they are copied from the source region. Defaults to 7. + RetentionPeriod *float64 `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` +} + +type SnapshotCopyObservation struct { + + // The destination region that you want to copy snapshots to. + DestinationRegion *string `json:"destinationRegion,omitempty" tf:"destination_region,omitempty"` + + // The name of the snapshot copy grant to use when snapshots of an AWS KMS-encrypted cluster are copied to the destination region. + GrantName *string `json:"grantName,omitempty" tf:"grant_name,omitempty"` + + // The number of days to retain automated snapshots in the destination region after they are copied from the source region. Defaults to 7. + RetentionPeriod *float64 `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` +} + +type SnapshotCopyParameters struct { + + // The destination region that you want to copy snapshots to. + // +kubebuilder:validation:Optional + DestinationRegion *string `json:"destinationRegion" tf:"destination_region,omitempty"` + + // The name of the snapshot copy grant to use when snapshots of an AWS KMS-encrypted cluster are copied to the destination region. + // +kubebuilder:validation:Optional + GrantName *string `json:"grantName,omitempty" tf:"grant_name,omitempty"` + + // The number of days to retain automated snapshots in the destination region after they are copied from the source region. Defaults to 7. + // +kubebuilder:validation:Optional + RetentionPeriod *float64 `json:"retentionPeriod,omitempty" tf:"retention_period,omitempty"` +} + +// ClusterSpec defines the desired state of Cluster +type ClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ClusterInitParameters `json:"initProvider,omitempty"` +} + +// ClusterStatus defines the observed state of Cluster. +type ClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Cluster is the Schema for the Clusters API. Provides a Redshift Cluster resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.nodeType) || (has(self.initProvider) && has(self.initProvider.nodeType))",message="spec.forProvider.nodeType is a required parameter" + Spec ClusterSpec `json:"spec"` + Status ClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterList contains a list of Clusters +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +// Repository type metadata. +var ( + Cluster_Kind = "Cluster" + Cluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Cluster_Kind}.String() + Cluster_KindAPIVersion = Cluster_Kind + "." + CRDGroupVersion.String() + Cluster_GroupVersionKind = CRDGroupVersion.WithKind(Cluster_Kind) +) + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/apis/redshift/v1beta2/zz_generated.conversion_hubs.go b/apis/redshift/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..e8af69cea0 --- /dev/null +++ b/apis/redshift/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Cluster) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ScheduledAction) Hub() {} diff --git a/apis/redshift/v1beta2/zz_generated.deepcopy.go b/apis/redshift/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..5f55390800 --- /dev/null +++ b/apis/redshift/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1880 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { + *out = *in + if in.AllowVersionUpgrade != nil { + in, out := &in.AllowVersionUpgrade, &out.AllowVersionUpgrade + *out = new(bool) + **out = **in + } + if in.ApplyImmediately != nil { + in, out := &in.ApplyImmediately, &out.ApplyImmediately + *out = new(bool) + **out = **in + } + if in.AquaConfigurationStatus != nil { + in, out := &in.AquaConfigurationStatus, &out.AquaConfigurationStatus + *out = new(string) + **out = **in + } + if in.AutomatedSnapshotRetentionPeriod != nil { + in, out := &in.AutomatedSnapshotRetentionPeriod, &out.AutomatedSnapshotRetentionPeriod + *out = new(float64) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.AvailabilityZoneRelocationEnabled != nil { + in, out := &in.AvailabilityZoneRelocationEnabled, &out.AvailabilityZoneRelocationEnabled + *out = new(bool) + **out = **in + } + if in.ClusterParameterGroupName != nil { + in, out := &in.ClusterParameterGroupName, &out.ClusterParameterGroupName + *out = new(string) + **out = **in + } + if in.ClusterPublicKey != nil { + in, out := &in.ClusterPublicKey, &out.ClusterPublicKey + *out = new(string) + **out = **in + } + if in.ClusterRevisionNumber != nil { + in, out := &in.ClusterRevisionNumber, &out.ClusterRevisionNumber + *out = new(string) + **out = **in + } + if in.ClusterSubnetGroupName != nil { + in, out := &in.ClusterSubnetGroupName, &out.ClusterSubnetGroupName + *out = new(string) + **out = **in + } + if in.ClusterType != nil { + in, out := &in.ClusterType, &out.ClusterType + *out = new(string) + **out = **in + } + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DefaultIAMRoleArn != nil { + in, out := &in.DefaultIAMRoleArn, &out.DefaultIAMRoleArn + *out = new(string) + **out = **in + } + if in.DefaultIAMRoleArnRef != nil { + in, out := &in.DefaultIAMRoleArnRef, &out.DefaultIAMRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DefaultIAMRoleArnSelector != nil { + in, out := &in.DefaultIAMRoleArnSelector, &out.DefaultIAMRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ElasticIP != nil { + in, out := &in.ElasticIP, &out.ElasticIP + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.EnhancedVPCRouting != nil { + in, out := &in.EnhancedVPCRouting, &out.EnhancedVPCRouting + *out = new(bool) + **out = **in + } + if in.FinalSnapshotIdentifier != nil { + in, out := &in.FinalSnapshotIdentifier, &out.FinalSnapshotIdentifier + *out = new(string) + **out = **in + } + if in.IAMRoleRefs != nil { + in, out := &in.IAMRoleRefs, &out.IAMRoleRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IAMRoleSelector != nil { + in, out := &in.IAMRoleSelector, &out.IAMRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IAMRoles != nil { + in, out := &in.IAMRoles, &out.IAMRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(LoggingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceTrackName != nil { + in, out := &in.MaintenanceTrackName, &out.MaintenanceTrackName + *out = new(string) + **out = **in + } + if in.ManageMasterPassword != nil { + in, out := &in.ManageMasterPassword, &out.ManageMasterPassword + *out = new(bool) + **out = **in + } + if in.ManualSnapshotRetentionPeriod != nil { + in, out := &in.ManualSnapshotRetentionPeriod, &out.ManualSnapshotRetentionPeriod + *out = new(float64) + **out = **in + } + if in.MasterPasswordSecretKMSKeyID != nil { + in, out := &in.MasterPasswordSecretKMSKeyID, &out.MasterPasswordSecretKMSKeyID + *out = new(string) + **out = **in + } + if in.MasterPasswordSecretRef != nil { + in, out := &in.MasterPasswordSecretRef, &out.MasterPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.MasterUsername != nil { + in, out := &in.MasterUsername, &out.MasterUsername + *out = new(string) + **out = **in + } + if in.MultiAz != nil { + in, out := &in.MultiAz, &out.MultiAz + *out = new(bool) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = new(string) + **out = **in + } + if in.NumberOfNodes != nil { + in, out := &in.NumberOfNodes, &out.NumberOfNodes + *out = new(float64) + **out = **in + } + if in.OwnerAccount != nil { + in, out := &in.OwnerAccount, &out.OwnerAccount + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PreferredMaintenanceWindow != nil { + in, out := &in.PreferredMaintenanceWindow, &out.PreferredMaintenanceWindow + *out = new(string) + **out = **in + } + if in.PubliclyAccessible != nil { + in, out := &in.PubliclyAccessible, &out.PubliclyAccessible + *out = new(bool) + **out = **in + } + if in.SkipFinalSnapshot != nil { + in, out := &in.SkipFinalSnapshot, &out.SkipFinalSnapshot + *out = new(bool) + **out = **in + } + if in.SnapshotArn != nil { + in, out := &in.SnapshotArn, &out.SnapshotArn + *out = new(string) + **out = **in + } + if in.SnapshotClusterIdentifier != nil { + in, out := &in.SnapshotClusterIdentifier, &out.SnapshotClusterIdentifier + *out = new(string) + **out = **in + } + if in.SnapshotCopy != nil { + in, out := &in.SnapshotCopy, &out.SnapshotCopy + *out = new(SnapshotCopyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SnapshotIdentifier != nil { + in, out := &in.SnapshotIdentifier, &out.SnapshotIdentifier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCSecurityGroupIDRefs != nil { + in, out := &in.VPCSecurityGroupIDRefs, &out.VPCSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCSecurityGroupIDSelector != nil { + in, out := &in.VPCSecurityGroupIDSelector, &out.VPCSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInitParameters. +func (in *ClusterInitParameters) DeepCopy() *ClusterInitParameters { + if in == nil { + return nil + } + out := new(ClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNodesInitParameters) DeepCopyInto(out *ClusterNodesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNodesInitParameters. +func (in *ClusterNodesInitParameters) DeepCopy() *ClusterNodesInitParameters { + if in == nil { + return nil + } + out := new(ClusterNodesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNodesObservation) DeepCopyInto(out *ClusterNodesObservation) { + *out = *in + if in.NodeRole != nil { + in, out := &in.NodeRole, &out.NodeRole + *out = new(string) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.PublicIPAddress != nil { + in, out := &in.PublicIPAddress, &out.PublicIPAddress + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNodesObservation. +func (in *ClusterNodesObservation) DeepCopy() *ClusterNodesObservation { + if in == nil { + return nil + } + out := new(ClusterNodesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNodesParameters) DeepCopyInto(out *ClusterNodesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNodesParameters. +func (in *ClusterNodesParameters) DeepCopy() *ClusterNodesParameters { + if in == nil { + return nil + } + out := new(ClusterNodesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { + *out = *in + if in.AllowVersionUpgrade != nil { + in, out := &in.AllowVersionUpgrade, &out.AllowVersionUpgrade + *out = new(bool) + **out = **in + } + if in.ApplyImmediately != nil { + in, out := &in.ApplyImmediately, &out.ApplyImmediately + *out = new(bool) + **out = **in + } + if in.AquaConfigurationStatus != nil { + in, out := &in.AquaConfigurationStatus, &out.AquaConfigurationStatus + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AutomatedSnapshotRetentionPeriod != nil { + in, out := &in.AutomatedSnapshotRetentionPeriod, &out.AutomatedSnapshotRetentionPeriod + *out = new(float64) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.AvailabilityZoneRelocationEnabled != nil { + in, out := &in.AvailabilityZoneRelocationEnabled, &out.AvailabilityZoneRelocationEnabled + *out = new(bool) + **out = **in + } + if in.ClusterNamespaceArn != nil { + in, out := &in.ClusterNamespaceArn, &out.ClusterNamespaceArn + *out = new(string) + **out = **in + } + if in.ClusterNodes != nil { + in, out := &in.ClusterNodes, &out.ClusterNodes + *out = make([]ClusterNodesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterParameterGroupName != nil { + in, out := &in.ClusterParameterGroupName, &out.ClusterParameterGroupName + *out = new(string) + **out = **in + } + if in.ClusterPublicKey != nil { + in, out := &in.ClusterPublicKey, &out.ClusterPublicKey + *out = new(string) + **out = **in + } + if in.ClusterRevisionNumber != nil { + in, out := &in.ClusterRevisionNumber, &out.ClusterRevisionNumber + *out = new(string) + **out = **in + } + if in.ClusterSubnetGroupName != nil { + in, out := &in.ClusterSubnetGroupName, &out.ClusterSubnetGroupName + *out = new(string) + **out = **in + } + if in.ClusterType != nil { + in, out := &in.ClusterType, &out.ClusterType + *out = new(string) + **out = **in + } + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DefaultIAMRoleArn != nil { + in, out := &in.DefaultIAMRoleArn, &out.DefaultIAMRoleArn + *out = new(string) + **out = **in + } + if in.ElasticIP != nil { + in, out := &in.ElasticIP, &out.ElasticIP + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.EnhancedVPCRouting != nil { + in, out := &in.EnhancedVPCRouting, &out.EnhancedVPCRouting + *out = new(bool) + **out = **in + } + if in.FinalSnapshotIdentifier != nil { + in, out := &in.FinalSnapshotIdentifier, &out.FinalSnapshotIdentifier + *out = new(string) + **out = **in + } + if in.IAMRoles != nil { + in, out := &in.IAMRoles, &out.IAMRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(LoggingObservation) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceTrackName != nil { + in, out := &in.MaintenanceTrackName, &out.MaintenanceTrackName + *out = new(string) + **out = **in + } + if in.ManageMasterPassword != nil { + in, out := &in.ManageMasterPassword, &out.ManageMasterPassword + *out = new(bool) + **out = **in + } + if in.ManualSnapshotRetentionPeriod != nil { + in, out := &in.ManualSnapshotRetentionPeriod, &out.ManualSnapshotRetentionPeriod + *out = new(float64) + **out = **in + } + if in.MasterPasswordSecretArn != nil { + in, out := &in.MasterPasswordSecretArn, &out.MasterPasswordSecretArn + *out = new(string) + **out = **in + } + if in.MasterPasswordSecretKMSKeyID != nil { + in, out := &in.MasterPasswordSecretKMSKeyID, &out.MasterPasswordSecretKMSKeyID + *out = new(string) + **out = **in + } + if in.MasterUsername != nil { + in, out := &in.MasterUsername, &out.MasterUsername + *out = new(string) + **out = **in + } + if in.MultiAz != nil { + in, out := &in.MultiAz, &out.MultiAz + *out = new(bool) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = new(string) + **out = **in + } + if in.NumberOfNodes != nil { + in, out := &in.NumberOfNodes, &out.NumberOfNodes + *out = new(float64) + **out = **in + } + if in.OwnerAccount != nil { + in, out := &in.OwnerAccount, &out.OwnerAccount + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PreferredMaintenanceWindow != nil { + in, out := &in.PreferredMaintenanceWindow, &out.PreferredMaintenanceWindow + *out = new(string) + **out = **in + } + if in.PubliclyAccessible != nil { + in, out := &in.PubliclyAccessible, &out.PubliclyAccessible + *out = new(bool) + **out = **in + } + if in.SkipFinalSnapshot != nil { + in, out := &in.SkipFinalSnapshot, &out.SkipFinalSnapshot + *out = new(bool) + **out = **in + } + if in.SnapshotArn != nil { + in, out := &in.SnapshotArn, &out.SnapshotArn + *out = new(string) + **out = **in + } + if in.SnapshotClusterIdentifier != nil { + in, out := &in.SnapshotClusterIdentifier, &out.SnapshotClusterIdentifier + *out = new(string) + **out = **in + } + if in.SnapshotCopy != nil { + in, out := &in.SnapshotCopy, &out.SnapshotCopy + *out = new(SnapshotCopyObservation) + (*in).DeepCopyInto(*out) + } + if in.SnapshotIdentifier != nil { + in, out := &in.SnapshotIdentifier, &out.SnapshotIdentifier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservation. +func (in *ClusterObservation) DeepCopy() *ClusterObservation { + if in == nil { + return nil + } + out := new(ClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { + *out = *in + if in.AllowVersionUpgrade != nil { + in, out := &in.AllowVersionUpgrade, &out.AllowVersionUpgrade + *out = new(bool) + **out = **in + } + if in.ApplyImmediately != nil { + in, out := &in.ApplyImmediately, &out.ApplyImmediately + *out = new(bool) + **out = **in + } + if in.AquaConfigurationStatus != nil { + in, out := &in.AquaConfigurationStatus, &out.AquaConfigurationStatus + *out = new(string) + **out = **in + } + if in.AutomatedSnapshotRetentionPeriod != nil { + in, out := &in.AutomatedSnapshotRetentionPeriod, &out.AutomatedSnapshotRetentionPeriod + *out = new(float64) + **out = **in + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.AvailabilityZoneRelocationEnabled != nil { + in, out := &in.AvailabilityZoneRelocationEnabled, &out.AvailabilityZoneRelocationEnabled + *out = new(bool) + **out = **in + } + if in.ClusterParameterGroupName != nil { + in, out := &in.ClusterParameterGroupName, &out.ClusterParameterGroupName + *out = new(string) + **out = **in + } + if in.ClusterPublicKey != nil { + in, out := &in.ClusterPublicKey, &out.ClusterPublicKey + *out = new(string) + **out = **in + } + if in.ClusterRevisionNumber != nil { + in, out := &in.ClusterRevisionNumber, &out.ClusterRevisionNumber + *out = new(string) + **out = **in + } + if in.ClusterSubnetGroupName != nil { + in, out := &in.ClusterSubnetGroupName, &out.ClusterSubnetGroupName + *out = new(string) + **out = **in + } + if in.ClusterType != nil { + in, out := &in.ClusterType, &out.ClusterType + *out = new(string) + **out = **in + } + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DefaultIAMRoleArn != nil { + in, out := &in.DefaultIAMRoleArn, &out.DefaultIAMRoleArn + *out = new(string) + **out = **in + } + if in.DefaultIAMRoleArnRef != nil { + in, out := &in.DefaultIAMRoleArnRef, &out.DefaultIAMRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DefaultIAMRoleArnSelector != nil { + in, out := &in.DefaultIAMRoleArnSelector, &out.DefaultIAMRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ElasticIP != nil { + in, out := &in.ElasticIP, &out.ElasticIP + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.EnhancedVPCRouting != nil { + in, out := &in.EnhancedVPCRouting, &out.EnhancedVPCRouting + *out = new(bool) + **out = **in + } + if in.FinalSnapshotIdentifier != nil { + in, out := &in.FinalSnapshotIdentifier, &out.FinalSnapshotIdentifier + *out = new(string) + **out = **in + } + if in.IAMRoleRefs != nil { + in, out := &in.IAMRoleRefs, &out.IAMRoleRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IAMRoleSelector != nil { + in, out := &in.IAMRoleSelector, &out.IAMRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IAMRoles != nil { + in, out := &in.IAMRoles, &out.IAMRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(LoggingParameters) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceTrackName != nil { + in, out := &in.MaintenanceTrackName, &out.MaintenanceTrackName + *out = new(string) + **out = **in + } + if in.ManageMasterPassword != nil { + in, out := &in.ManageMasterPassword, &out.ManageMasterPassword + *out = new(bool) + **out = **in + } + if in.ManualSnapshotRetentionPeriod != nil { + in, out := &in.ManualSnapshotRetentionPeriod, &out.ManualSnapshotRetentionPeriod + *out = new(float64) + **out = **in + } + if in.MasterPasswordSecretKMSKeyID != nil { + in, out := &in.MasterPasswordSecretKMSKeyID, &out.MasterPasswordSecretKMSKeyID + *out = new(string) + **out = **in + } + if in.MasterPasswordSecretRef != nil { + in, out := &in.MasterPasswordSecretRef, &out.MasterPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.MasterUsername != nil { + in, out := &in.MasterUsername, &out.MasterUsername + *out = new(string) + **out = **in + } + if in.MultiAz != nil { + in, out := &in.MultiAz, &out.MultiAz + *out = new(bool) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = new(string) + **out = **in + } + if in.NumberOfNodes != nil { + in, out := &in.NumberOfNodes, &out.NumberOfNodes + *out = new(float64) + **out = **in + } + if in.OwnerAccount != nil { + in, out := &in.OwnerAccount, &out.OwnerAccount + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PreferredMaintenanceWindow != nil { + in, out := &in.PreferredMaintenanceWindow, &out.PreferredMaintenanceWindow + *out = new(string) + **out = **in + } + if in.PubliclyAccessible != nil { + in, out := &in.PubliclyAccessible, &out.PubliclyAccessible + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SkipFinalSnapshot != nil { + in, out := &in.SkipFinalSnapshot, &out.SkipFinalSnapshot + *out = new(bool) + **out = **in + } + if in.SnapshotArn != nil { + in, out := &in.SnapshotArn, &out.SnapshotArn + *out = new(string) + **out = **in + } + if in.SnapshotClusterIdentifier != nil { + in, out := &in.SnapshotClusterIdentifier, &out.SnapshotClusterIdentifier + *out = new(string) + **out = **in + } + if in.SnapshotCopy != nil { + in, out := &in.SnapshotCopy, &out.SnapshotCopy + *out = new(SnapshotCopyParameters) + (*in).DeepCopyInto(*out) + } + if in.SnapshotIdentifier != nil { + in, out := &in.SnapshotIdentifier, &out.SnapshotIdentifier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCSecurityGroupIDRefs != nil { + in, out := &in.VPCSecurityGroupIDRefs, &out.VPCSecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPCSecurityGroupIDSelector != nil { + in, out := &in.VPCSecurityGroupIDSelector, &out.VPCSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPCSecurityGroupIds != nil { + in, out := &in.VPCSecurityGroupIds, &out.VPCSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. +func (in *ClusterParameters) DeepCopy() *ClusterParameters { + if in == nil { + return nil + } + out := new(ClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingInitParameters) DeepCopyInto(out *LoggingInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.LogDestinationType != nil { + in, out := &in.LogDestinationType, &out.LogDestinationType + *out = new(string) + **out = **in + } + if in.LogExports != nil { + in, out := &in.LogExports, &out.LogExports + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingInitParameters. +func (in *LoggingInitParameters) DeepCopy() *LoggingInitParameters { + if in == nil { + return nil + } + out := new(LoggingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingObservation) DeepCopyInto(out *LoggingObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.LogDestinationType != nil { + in, out := &in.LogDestinationType, &out.LogDestinationType + *out = new(string) + **out = **in + } + if in.LogExports != nil { + in, out := &in.LogExports, &out.LogExports + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingObservation. +func (in *LoggingObservation) DeepCopy() *LoggingObservation { + if in == nil { + return nil + } + out := new(LoggingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingParameters) DeepCopyInto(out *LoggingParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.LogDestinationType != nil { + in, out := &in.LogDestinationType, &out.LogDestinationType + *out = new(string) + **out = **in + } + if in.LogExports != nil { + in, out := &in.LogExports, &out.LogExports + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingParameters. +func (in *LoggingParameters) DeepCopy() *LoggingParameters { + if in == nil { + return nil + } + out := new(LoggingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PauseClusterInitParameters) DeepCopyInto(out *PauseClusterInitParameters) { + *out = *in + if in.ClusterIdentifier != nil { + in, out := &in.ClusterIdentifier, &out.ClusterIdentifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PauseClusterInitParameters. +func (in *PauseClusterInitParameters) DeepCopy() *PauseClusterInitParameters { + if in == nil { + return nil + } + out := new(PauseClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PauseClusterObservation) DeepCopyInto(out *PauseClusterObservation) { + *out = *in + if in.ClusterIdentifier != nil { + in, out := &in.ClusterIdentifier, &out.ClusterIdentifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PauseClusterObservation. +func (in *PauseClusterObservation) DeepCopy() *PauseClusterObservation { + if in == nil { + return nil + } + out := new(PauseClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PauseClusterParameters) DeepCopyInto(out *PauseClusterParameters) { + *out = *in + if in.ClusterIdentifier != nil { + in, out := &in.ClusterIdentifier, &out.ClusterIdentifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PauseClusterParameters. +func (in *PauseClusterParameters) DeepCopy() *PauseClusterParameters { + if in == nil { + return nil + } + out := new(PauseClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResizeClusterInitParameters) DeepCopyInto(out *ResizeClusterInitParameters) { + *out = *in + if in.Classic != nil { + in, out := &in.Classic, &out.Classic + *out = new(bool) + **out = **in + } + if in.ClusterIdentifier != nil { + in, out := &in.ClusterIdentifier, &out.ClusterIdentifier + *out = new(string) + **out = **in + } + if in.ClusterType != nil { + in, out := &in.ClusterType, &out.ClusterType + *out = new(string) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = new(string) + **out = **in + } + if in.NumberOfNodes != nil { + in, out := &in.NumberOfNodes, &out.NumberOfNodes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResizeClusterInitParameters. +func (in *ResizeClusterInitParameters) DeepCopy() *ResizeClusterInitParameters { + if in == nil { + return nil + } + out := new(ResizeClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResizeClusterObservation) DeepCopyInto(out *ResizeClusterObservation) { + *out = *in + if in.Classic != nil { + in, out := &in.Classic, &out.Classic + *out = new(bool) + **out = **in + } + if in.ClusterIdentifier != nil { + in, out := &in.ClusterIdentifier, &out.ClusterIdentifier + *out = new(string) + **out = **in + } + if in.ClusterType != nil { + in, out := &in.ClusterType, &out.ClusterType + *out = new(string) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = new(string) + **out = **in + } + if in.NumberOfNodes != nil { + in, out := &in.NumberOfNodes, &out.NumberOfNodes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResizeClusterObservation. +func (in *ResizeClusterObservation) DeepCopy() *ResizeClusterObservation { + if in == nil { + return nil + } + out := new(ResizeClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResizeClusterParameters) DeepCopyInto(out *ResizeClusterParameters) { + *out = *in + if in.Classic != nil { + in, out := &in.Classic, &out.Classic + *out = new(bool) + **out = **in + } + if in.ClusterIdentifier != nil { + in, out := &in.ClusterIdentifier, &out.ClusterIdentifier + *out = new(string) + **out = **in + } + if in.ClusterType != nil { + in, out := &in.ClusterType, &out.ClusterType + *out = new(string) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = new(string) + **out = **in + } + if in.NumberOfNodes != nil { + in, out := &in.NumberOfNodes, &out.NumberOfNodes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResizeClusterParameters. +func (in *ResizeClusterParameters) DeepCopy() *ResizeClusterParameters { + if in == nil { + return nil + } + out := new(ResizeClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResumeClusterInitParameters) DeepCopyInto(out *ResumeClusterInitParameters) { + *out = *in + if in.ClusterIdentifier != nil { + in, out := &in.ClusterIdentifier, &out.ClusterIdentifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResumeClusterInitParameters. +func (in *ResumeClusterInitParameters) DeepCopy() *ResumeClusterInitParameters { + if in == nil { + return nil + } + out := new(ResumeClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResumeClusterObservation) DeepCopyInto(out *ResumeClusterObservation) { + *out = *in + if in.ClusterIdentifier != nil { + in, out := &in.ClusterIdentifier, &out.ClusterIdentifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResumeClusterObservation. +func (in *ResumeClusterObservation) DeepCopy() *ResumeClusterObservation { + if in == nil { + return nil + } + out := new(ResumeClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResumeClusterParameters) DeepCopyInto(out *ResumeClusterParameters) { + *out = *in + if in.ClusterIdentifier != nil { + in, out := &in.ClusterIdentifier, &out.ClusterIdentifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResumeClusterParameters. +func (in *ResumeClusterParameters) DeepCopy() *ResumeClusterParameters { + if in == nil { + return nil + } + out := new(ResumeClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledAction) DeepCopyInto(out *ScheduledAction) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledAction. +func (in *ScheduledAction) DeepCopy() *ScheduledAction { + if in == nil { + return nil + } + out := new(ScheduledAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScheduledAction) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledActionInitParameters) DeepCopyInto(out *ScheduledActionInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.IAMRole != nil { + in, out := &in.IAMRole, &out.IAMRole + *out = new(string) + **out = **in + } + if in.IAMRoleRef != nil { + in, out := &in.IAMRoleRef, &out.IAMRoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMRoleSelector != nil { + in, out := &in.IAMRoleSelector, &out.IAMRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.TargetAction != nil { + in, out := &in.TargetAction, &out.TargetAction + *out = new(TargetActionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledActionInitParameters. +func (in *ScheduledActionInitParameters) DeepCopy() *ScheduledActionInitParameters { + if in == nil { + return nil + } + out := new(ScheduledActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledActionList) DeepCopyInto(out *ScheduledActionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ScheduledAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledActionList. +func (in *ScheduledActionList) DeepCopy() *ScheduledActionList { + if in == nil { + return nil + } + out := new(ScheduledActionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScheduledActionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledActionObservation) DeepCopyInto(out *ScheduledActionObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.IAMRole != nil { + in, out := &in.IAMRole, &out.IAMRole + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.TargetAction != nil { + in, out := &in.TargetAction, &out.TargetAction + *out = new(TargetActionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledActionObservation. +func (in *ScheduledActionObservation) DeepCopy() *ScheduledActionObservation { + if in == nil { + return nil + } + out := new(ScheduledActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledActionParameters) DeepCopyInto(out *ScheduledActionParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.IAMRole != nil { + in, out := &in.IAMRole, &out.IAMRole + *out = new(string) + **out = **in + } + if in.IAMRoleRef != nil { + in, out := &in.IAMRoleRef, &out.IAMRoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMRoleSelector != nil { + in, out := &in.IAMRoleSelector, &out.IAMRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.TargetAction != nil { + in, out := &in.TargetAction, &out.TargetAction + *out = new(TargetActionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledActionParameters. +func (in *ScheduledActionParameters) DeepCopy() *ScheduledActionParameters { + if in == nil { + return nil + } + out := new(ScheduledActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledActionSpec) DeepCopyInto(out *ScheduledActionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledActionSpec. +func (in *ScheduledActionSpec) DeepCopy() *ScheduledActionSpec { + if in == nil { + return nil + } + out := new(ScheduledActionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledActionStatus) DeepCopyInto(out *ScheduledActionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledActionStatus. +func (in *ScheduledActionStatus) DeepCopy() *ScheduledActionStatus { + if in == nil { + return nil + } + out := new(ScheduledActionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotCopyInitParameters) DeepCopyInto(out *SnapshotCopyInitParameters) { + *out = *in + if in.DestinationRegion != nil { + in, out := &in.DestinationRegion, &out.DestinationRegion + *out = new(string) + **out = **in + } + if in.GrantName != nil { + in, out := &in.GrantName, &out.GrantName + *out = new(string) + **out = **in + } + if in.RetentionPeriod != nil { + in, out := &in.RetentionPeriod, &out.RetentionPeriod + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotCopyInitParameters. +func (in *SnapshotCopyInitParameters) DeepCopy() *SnapshotCopyInitParameters { + if in == nil { + return nil + } + out := new(SnapshotCopyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotCopyObservation) DeepCopyInto(out *SnapshotCopyObservation) { + *out = *in + if in.DestinationRegion != nil { + in, out := &in.DestinationRegion, &out.DestinationRegion + *out = new(string) + **out = **in + } + if in.GrantName != nil { + in, out := &in.GrantName, &out.GrantName + *out = new(string) + **out = **in + } + if in.RetentionPeriod != nil { + in, out := &in.RetentionPeriod, &out.RetentionPeriod + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotCopyObservation. +func (in *SnapshotCopyObservation) DeepCopy() *SnapshotCopyObservation { + if in == nil { + return nil + } + out := new(SnapshotCopyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotCopyParameters) DeepCopyInto(out *SnapshotCopyParameters) { + *out = *in + if in.DestinationRegion != nil { + in, out := &in.DestinationRegion, &out.DestinationRegion + *out = new(string) + **out = **in + } + if in.GrantName != nil { + in, out := &in.GrantName, &out.GrantName + *out = new(string) + **out = **in + } + if in.RetentionPeriod != nil { + in, out := &in.RetentionPeriod, &out.RetentionPeriod + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotCopyParameters. +func (in *SnapshotCopyParameters) DeepCopy() *SnapshotCopyParameters { + if in == nil { + return nil + } + out := new(SnapshotCopyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetActionInitParameters) DeepCopyInto(out *TargetActionInitParameters) { + *out = *in + if in.PauseCluster != nil { + in, out := &in.PauseCluster, &out.PauseCluster + *out = new(PauseClusterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ResizeCluster != nil { + in, out := &in.ResizeCluster, &out.ResizeCluster + *out = new(ResizeClusterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ResumeCluster != nil { + in, out := &in.ResumeCluster, &out.ResumeCluster + *out = new(ResumeClusterInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetActionInitParameters. +func (in *TargetActionInitParameters) DeepCopy() *TargetActionInitParameters { + if in == nil { + return nil + } + out := new(TargetActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetActionObservation) DeepCopyInto(out *TargetActionObservation) { + *out = *in + if in.PauseCluster != nil { + in, out := &in.PauseCluster, &out.PauseCluster + *out = new(PauseClusterObservation) + (*in).DeepCopyInto(*out) + } + if in.ResizeCluster != nil { + in, out := &in.ResizeCluster, &out.ResizeCluster + *out = new(ResizeClusterObservation) + (*in).DeepCopyInto(*out) + } + if in.ResumeCluster != nil { + in, out := &in.ResumeCluster, &out.ResumeCluster + *out = new(ResumeClusterObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetActionObservation. +func (in *TargetActionObservation) DeepCopy() *TargetActionObservation { + if in == nil { + return nil + } + out := new(TargetActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetActionParameters) DeepCopyInto(out *TargetActionParameters) { + *out = *in + if in.PauseCluster != nil { + in, out := &in.PauseCluster, &out.PauseCluster + *out = new(PauseClusterParameters) + (*in).DeepCopyInto(*out) + } + if in.ResizeCluster != nil { + in, out := &in.ResizeCluster, &out.ResizeCluster + *out = new(ResizeClusterParameters) + (*in).DeepCopyInto(*out) + } + if in.ResumeCluster != nil { + in, out := &in.ResumeCluster, &out.ResumeCluster + *out = new(ResumeClusterParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetActionParameters. +func (in *TargetActionParameters) DeepCopy() *TargetActionParameters { + if in == nil { + return nil + } + out := new(TargetActionParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/redshift/v1beta2/zz_generated.managed.go b/apis/redshift/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..bddb046b5e --- /dev/null +++ b/apis/redshift/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Cluster. +func (mg *Cluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Cluster. +func (mg *Cluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Cluster. +func (mg *Cluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Cluster. +func (mg *Cluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Cluster. +func (mg *Cluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Cluster. +func (mg *Cluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Cluster. +func (mg *Cluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Cluster. +func (mg *Cluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ScheduledAction. +func (mg *ScheduledAction) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ScheduledAction. +func (mg *ScheduledAction) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ScheduledAction. +func (mg *ScheduledAction) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ScheduledAction. +func (mg *ScheduledAction) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ScheduledAction. +func (mg *ScheduledAction) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ScheduledAction. +func (mg *ScheduledAction) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ScheduledAction. +func (mg *ScheduledAction) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ScheduledAction. +func (mg *ScheduledAction) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ScheduledAction. +func (mg *ScheduledAction) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ScheduledAction. +func (mg *ScheduledAction) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ScheduledAction. +func (mg *ScheduledAction) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ScheduledAction. +func (mg *ScheduledAction) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/redshift/v1beta2/zz_generated.managedlist.go b/apis/redshift/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..21eb1544aa --- /dev/null +++ b/apis/redshift/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ClusterList. +func (l *ClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ScheduledActionList. +func (l *ScheduledActionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/redshift/v1beta2/zz_generated.resolvers.go b/apis/redshift/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..9ac45f2ffe --- /dev/null +++ b/apis/redshift/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,235 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Cluster. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Cluster) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DefaultIAMRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.DefaultIAMRoleArnRef, + Selector: mg.Spec.ForProvider.DefaultIAMRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DefaultIAMRoleArn") + } + mg.Spec.ForProvider.DefaultIAMRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DefaultIAMRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.IAMRoles), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.IAMRoleRefs, + Selector: mg.Spec.ForProvider.IAMRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IAMRoles") + } + mg.Spec.ForProvider.IAMRoles = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.IAMRoleRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyID") + } + mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCSecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.VPCSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCSecurityGroupIds") + } + mg.Spec.ForProvider.VPCSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCSecurityGroupIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DefaultIAMRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.DefaultIAMRoleArnRef, + Selector: mg.Spec.InitProvider.DefaultIAMRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DefaultIAMRoleArn") + } + mg.Spec.InitProvider.DefaultIAMRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DefaultIAMRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.IAMRoles), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.IAMRoleRefs, + Selector: mg.Spec.InitProvider.IAMRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IAMRoles") + } + mg.Spec.InitProvider.IAMRoles = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.IAMRoleRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyID") + } + mg.Spec.InitProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCSecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCSecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.VPCSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCSecurityGroupIds") + } + mg.Spec.InitProvider.VPCSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCSecurityGroupIDRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this ScheduledAction. +func (mg *ScheduledAction) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IAMRole), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.IAMRoleRef, + Selector: mg.Spec.ForProvider.IAMRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IAMRole") + } + mg.Spec.ForProvider.IAMRole = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IAMRoleRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IAMRole), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.IAMRoleRef, + Selector: mg.Spec.InitProvider.IAMRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IAMRole") + } + mg.Spec.InitProvider.IAMRole = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IAMRoleRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/redshift/v1beta2/zz_groupversion_info.go b/apis/redshift/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..ee71081038 --- /dev/null +++ b/apis/redshift/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=redshift.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "redshift.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/redshift/v1beta2/zz_scheduledaction_terraformed.go b/apis/redshift/v1beta2/zz_scheduledaction_terraformed.go new file mode 100755 index 0000000000..e21a00bf94 --- /dev/null +++ b/apis/redshift/v1beta2/zz_scheduledaction_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ScheduledAction +func (mg *ScheduledAction) GetTerraformResourceType() string { + return "aws_redshift_scheduled_action" +} + +// GetConnectionDetailsMapping for this ScheduledAction +func (tr *ScheduledAction) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ScheduledAction +func (tr *ScheduledAction) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ScheduledAction +func (tr *ScheduledAction) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ScheduledAction +func (tr *ScheduledAction) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ScheduledAction +func (tr *ScheduledAction) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ScheduledAction +func (tr *ScheduledAction) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ScheduledAction +func (tr *ScheduledAction) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ScheduledAction +func (tr *ScheduledAction) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ScheduledAction using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ScheduledAction) LateInitialize(attrs []byte) (bool, error) { + params := &ScheduledActionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ScheduledAction) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/redshift/v1beta2/zz_scheduledaction_types.go b/apis/redshift/v1beta2/zz_scheduledaction_types.go new file mode 100755 index 0000000000..54a239affe --- /dev/null +++ b/apis/redshift/v1beta2/zz_scheduledaction_types.go @@ -0,0 +1,319 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type PauseClusterInitParameters struct { + + // The identifier of the cluster to be paused. + ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` +} + +type PauseClusterObservation struct { + + // The identifier of the cluster to be paused. + ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` +} + +type PauseClusterParameters struct { + + // The identifier of the cluster to be paused. + // +kubebuilder:validation:Optional + ClusterIdentifier *string `json:"clusterIdentifier" tf:"cluster_identifier,omitempty"` +} + +type ResizeClusterInitParameters struct { + + // A boolean value indicating whether the resize operation is using the classic resize process. Default: false. + Classic *bool `json:"classic,omitempty" tf:"classic,omitempty"` + + // The unique identifier for the cluster to resize. + ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` + + // The new cluster type for the specified cluster. + ClusterType *string `json:"clusterType,omitempty" tf:"cluster_type,omitempty"` + + // The new node type for the nodes you are adding. + NodeType *string `json:"nodeType,omitempty" tf:"node_type,omitempty"` + + // The new number of nodes for the cluster. + NumberOfNodes *float64 `json:"numberOfNodes,omitempty" tf:"number_of_nodes,omitempty"` +} + +type ResizeClusterObservation struct { + + // A boolean value indicating whether the resize operation is using the classic resize process. Default: false. + Classic *bool `json:"classic,omitempty" tf:"classic,omitempty"` + + // The unique identifier for the cluster to resize. + ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` + + // The new cluster type for the specified cluster. + ClusterType *string `json:"clusterType,omitempty" tf:"cluster_type,omitempty"` + + // The new node type for the nodes you are adding. + NodeType *string `json:"nodeType,omitempty" tf:"node_type,omitempty"` + + // The new number of nodes for the cluster. + NumberOfNodes *float64 `json:"numberOfNodes,omitempty" tf:"number_of_nodes,omitempty"` +} + +type ResizeClusterParameters struct { + + // A boolean value indicating whether the resize operation is using the classic resize process. Default: false. + // +kubebuilder:validation:Optional + Classic *bool `json:"classic,omitempty" tf:"classic,omitempty"` + + // The unique identifier for the cluster to resize. + // +kubebuilder:validation:Optional + ClusterIdentifier *string `json:"clusterIdentifier" tf:"cluster_identifier,omitempty"` + + // The new cluster type for the specified cluster. + // +kubebuilder:validation:Optional + ClusterType *string `json:"clusterType,omitempty" tf:"cluster_type,omitempty"` + + // The new node type for the nodes you are adding. + // +kubebuilder:validation:Optional + NodeType *string `json:"nodeType,omitempty" tf:"node_type,omitempty"` + + // The new number of nodes for the cluster. + // +kubebuilder:validation:Optional + NumberOfNodes *float64 `json:"numberOfNodes,omitempty" tf:"number_of_nodes,omitempty"` +} + +type ResumeClusterInitParameters struct { + + // The identifier of the cluster to be resumed. + ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` +} + +type ResumeClusterObservation struct { + + // The identifier of the cluster to be resumed. + ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` +} + +type ResumeClusterParameters struct { + + // The identifier of the cluster to be resumed. + // +kubebuilder:validation:Optional + ClusterIdentifier *string `json:"clusterIdentifier" tf:"cluster_identifier,omitempty"` +} + +type ScheduledActionInitParameters struct { + + // The description of the scheduled action. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether to enable the scheduled action. Default is true . + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // The end time in UTC when the schedule is active, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // The IAM role to assume to run the scheduled action. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + IAMRole *string `json:"iamRole,omitempty" tf:"iam_role,omitempty"` + + // Reference to a Role in iam to populate iamRole. + // +kubebuilder:validation:Optional + IAMRoleRef *v1.Reference `json:"iamRoleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate iamRole. + // +kubebuilder:validation:Optional + IAMRoleSelector *v1.Selector `json:"iamRoleSelector,omitempty" tf:"-"` + + // The schedule of action. The schedule is defined format of "at expression" or "cron expression", for example at(2016-03-04T17:27:00) or cron(0 10 ? * MON *). See Scheduled Action for more information. + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The start time in UTC when the schedule is active, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Target action. Documented below. + TargetAction *TargetActionInitParameters `json:"targetAction,omitempty" tf:"target_action,omitempty"` +} + +type ScheduledActionObservation struct { + + // The description of the scheduled action. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether to enable the scheduled action. Default is true . + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // The end time in UTC when the schedule is active, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // The IAM role to assume to run the scheduled action. + IAMRole *string `json:"iamRole,omitempty" tf:"iam_role,omitempty"` + + // The Redshift Scheduled Action name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The schedule of action. The schedule is defined format of "at expression" or "cron expression", for example at(2016-03-04T17:27:00) or cron(0 10 ? * MON *). See Scheduled Action for more information. + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The start time in UTC when the schedule is active, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Target action. Documented below. + TargetAction *TargetActionObservation `json:"targetAction,omitempty" tf:"target_action,omitempty"` +} + +type ScheduledActionParameters struct { + + // The description of the scheduled action. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether to enable the scheduled action. Default is true . + // +kubebuilder:validation:Optional + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // The end time in UTC when the schedule is active, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). + // +kubebuilder:validation:Optional + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // The IAM role to assume to run the scheduled action. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + IAMRole *string `json:"iamRole,omitempty" tf:"iam_role,omitempty"` + + // Reference to a Role in iam to populate iamRole. + // +kubebuilder:validation:Optional + IAMRoleRef *v1.Reference `json:"iamRoleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate iamRole. + // +kubebuilder:validation:Optional + IAMRoleSelector *v1.Selector `json:"iamRoleSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The schedule of action. The schedule is defined format of "at expression" or "cron expression", for example at(2016-03-04T17:27:00) or cron(0 10 ? * MON *). See Scheduled Action for more information. + // +kubebuilder:validation:Optional + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The start time in UTC when the schedule is active, in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Target action. Documented below. + // +kubebuilder:validation:Optional + TargetAction *TargetActionParameters `json:"targetAction,omitempty" tf:"target_action,omitempty"` +} + +type TargetActionInitParameters struct { + + // An action that runs a PauseCluster API operation. Documented below. + PauseCluster *PauseClusterInitParameters `json:"pauseCluster,omitempty" tf:"pause_cluster,omitempty"` + + // An action that runs a ResizeCluster API operation. Documented below. + ResizeCluster *ResizeClusterInitParameters `json:"resizeCluster,omitempty" tf:"resize_cluster,omitempty"` + + // An action that runs a ResumeCluster API operation. Documented below. + ResumeCluster *ResumeClusterInitParameters `json:"resumeCluster,omitempty" tf:"resume_cluster,omitempty"` +} + +type TargetActionObservation struct { + + // An action that runs a PauseCluster API operation. Documented below. + PauseCluster *PauseClusterObservation `json:"pauseCluster,omitempty" tf:"pause_cluster,omitempty"` + + // An action that runs a ResizeCluster API operation. Documented below. + ResizeCluster *ResizeClusterObservation `json:"resizeCluster,omitempty" tf:"resize_cluster,omitempty"` + + // An action that runs a ResumeCluster API operation. Documented below. + ResumeCluster *ResumeClusterObservation `json:"resumeCluster,omitempty" tf:"resume_cluster,omitempty"` +} + +type TargetActionParameters struct { + + // An action that runs a PauseCluster API operation. Documented below. + // +kubebuilder:validation:Optional + PauseCluster *PauseClusterParameters `json:"pauseCluster,omitempty" tf:"pause_cluster,omitempty"` + + // An action that runs a ResizeCluster API operation. Documented below. + // +kubebuilder:validation:Optional + ResizeCluster *ResizeClusterParameters `json:"resizeCluster,omitempty" tf:"resize_cluster,omitempty"` + + // An action that runs a ResumeCluster API operation. Documented below. + // +kubebuilder:validation:Optional + ResumeCluster *ResumeClusterParameters `json:"resumeCluster,omitempty" tf:"resume_cluster,omitempty"` +} + +// ScheduledActionSpec defines the desired state of ScheduledAction +type ScheduledActionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ScheduledActionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ScheduledActionInitParameters `json:"initProvider,omitempty"` +} + +// ScheduledActionStatus defines the observed state of ScheduledAction. +type ScheduledActionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ScheduledActionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ScheduledAction is the Schema for the ScheduledActions API. Provides a Redshift Scheduled Action resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ScheduledAction struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.schedule) || (has(self.initProvider) && has(self.initProvider.schedule))",message="spec.forProvider.schedule is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.targetAction) || (has(self.initProvider) && has(self.initProvider.targetAction))",message="spec.forProvider.targetAction is a required parameter" + Spec ScheduledActionSpec `json:"spec"` + Status ScheduledActionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ScheduledActionList contains a list of ScheduledActions +type ScheduledActionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ScheduledAction `json:"items"` +} + +// Repository type metadata. +var ( + ScheduledAction_Kind = "ScheduledAction" + ScheduledAction_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ScheduledAction_Kind}.String() + ScheduledAction_KindAPIVersion = ScheduledAction_Kind + "." + CRDGroupVersion.String() + ScheduledAction_GroupVersionKind = CRDGroupVersion.WithKind(ScheduledAction_Kind) +) + +func init() { + SchemeBuilder.Register(&ScheduledAction{}, &ScheduledActionList{}) +} diff --git a/apis/resourcegroups/v1beta1/zz_generated.conversion_spokes.go b/apis/resourcegroups/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..6e5344f630 --- /dev/null +++ b/apis/resourcegroups/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Group to the hub type. +func (tr *Group) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Group type. +func (tr *Group) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/resourcegroups/v1beta2/zz_generated.conversion_hubs.go b/apis/resourcegroups/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..471e2253ef --- /dev/null +++ b/apis/resourcegroups/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Group) Hub() {} diff --git a/apis/resourcegroups/v1beta2/zz_generated.deepcopy.go b/apis/resourcegroups/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..339545bc94 --- /dev/null +++ b/apis/resourcegroups/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,531 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationInitParameters) DeepCopyInto(out *ConfigurationInitParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ParametersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationInitParameters. +func (in *ConfigurationInitParameters) DeepCopy() *ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationObservation) DeepCopyInto(out *ConfigurationObservation) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ParametersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationObservation. +func (in *ConfigurationObservation) DeepCopy() *ConfigurationObservation { + if in == nil { + return nil + } + out := new(ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationParameters) DeepCopyInto(out *ConfigurationParameters) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ParametersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationParameters. +func (in *ConfigurationParameters) DeepCopy() *ConfigurationParameters { + if in == nil { + return nil + } + out := new(ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Group) DeepCopyInto(out *Group) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Group. +func (in *Group) DeepCopy() *Group { + if in == nil { + return nil + } + out := new(Group) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Group) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupInitParameters) DeepCopyInto(out *GroupInitParameters) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = make([]ConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ResourceQuery != nil { + in, out := &in.ResourceQuery, &out.ResourceQuery + *out = new(ResourceQueryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupInitParameters. +func (in *GroupInitParameters) DeepCopy() *GroupInitParameters { + if in == nil { + return nil + } + out := new(GroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupList) DeepCopyInto(out *GroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Group, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupList. +func (in *GroupList) DeepCopy() *GroupList { + if in == nil { + return nil + } + out := new(GroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupObservation) DeepCopyInto(out *GroupObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = make([]ConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ResourceQuery != nil { + in, out := &in.ResourceQuery, &out.ResourceQuery + *out = new(ResourceQueryObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupObservation. +func (in *GroupObservation) DeepCopy() *GroupObservation { + if in == nil { + return nil + } + out := new(GroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupParameters) DeepCopyInto(out *GroupParameters) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = make([]ConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ResourceQuery != nil { + in, out := &in.ResourceQuery, &out.ResourceQuery + *out = new(ResourceQueryParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupParameters. +func (in *GroupParameters) DeepCopy() *GroupParameters { + if in == nil { + return nil + } + out := new(GroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupSpec) DeepCopyInto(out *GroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSpec. +func (in *GroupSpec) DeepCopy() *GroupSpec { + if in == nil { + return nil + } + out := new(GroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupStatus) DeepCopyInto(out *GroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupStatus. +func (in *GroupStatus) DeepCopy() *GroupStatus { + if in == nil { + return nil + } + out := new(GroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersInitParameters) DeepCopyInto(out *ParametersInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersInitParameters. +func (in *ParametersInitParameters) DeepCopy() *ParametersInitParameters { + if in == nil { + return nil + } + out := new(ParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersObservation) DeepCopyInto(out *ParametersObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersObservation. +func (in *ParametersObservation) DeepCopy() *ParametersObservation { + if in == nil { + return nil + } + out := new(ParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersParameters) DeepCopyInto(out *ParametersParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersParameters. +func (in *ParametersParameters) DeepCopy() *ParametersParameters { + if in == nil { + return nil + } + out := new(ParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQueryInitParameters) DeepCopyInto(out *ResourceQueryInitParameters) { + *out = *in + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQueryInitParameters. +func (in *ResourceQueryInitParameters) DeepCopy() *ResourceQueryInitParameters { + if in == nil { + return nil + } + out := new(ResourceQueryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQueryObservation) DeepCopyInto(out *ResourceQueryObservation) { + *out = *in + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQueryObservation. +func (in *ResourceQueryObservation) DeepCopy() *ResourceQueryObservation { + if in == nil { + return nil + } + out := new(ResourceQueryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQueryParameters) DeepCopyInto(out *ResourceQueryParameters) { + *out = *in + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQueryParameters. +func (in *ResourceQueryParameters) DeepCopy() *ResourceQueryParameters { + if in == nil { + return nil + } + out := new(ResourceQueryParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/resourcegroups/v1beta2/zz_generated.managed.go b/apis/resourcegroups/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..93fa467759 --- /dev/null +++ b/apis/resourcegroups/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Group. +func (mg *Group) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Group. +func (mg *Group) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Group. +func (mg *Group) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Group. +func (mg *Group) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Group. +func (mg *Group) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Group. +func (mg *Group) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Group. +func (mg *Group) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Group. +func (mg *Group) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Group. +func (mg *Group) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Group. +func (mg *Group) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Group. +func (mg *Group) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Group. +func (mg *Group) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/resourcegroups/v1beta2/zz_generated.managedlist.go b/apis/resourcegroups/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..5274ced33e --- /dev/null +++ b/apis/resourcegroups/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this GroupList. +func (l *GroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/resourcegroups/v1beta2/zz_group_terraformed.go b/apis/resourcegroups/v1beta2/zz_group_terraformed.go new file mode 100755 index 0000000000..c1ac2840f1 --- /dev/null +++ b/apis/resourcegroups/v1beta2/zz_group_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Group +func (mg *Group) GetTerraformResourceType() string { + return "aws_resourcegroups_group" +} + +// GetConnectionDetailsMapping for this Group +func (tr *Group) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Group +func (tr *Group) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Group +func (tr *Group) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Group +func (tr *Group) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Group +func (tr *Group) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Group +func (tr *Group) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Group +func (tr *Group) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Group +func (tr *Group) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Group using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Group) LateInitialize(attrs []byte) (bool, error) { + params := &GroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Group) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/resourcegroups/v1beta2/zz_group_types.go b/apis/resourcegroups/v1beta2/zz_group_types.go new file mode 100755 index 0000000000..39928a451b --- /dev/null +++ b/apis/resourcegroups/v1beta2/zz_group_types.go @@ -0,0 +1,227 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConfigurationInitParameters struct { + + // A collection of parameters for this group configuration item. See below for details. + Parameters []ParametersInitParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Specifies the type of group configuration item. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ConfigurationObservation struct { + + // A collection of parameters for this group configuration item. See below for details. + Parameters []ParametersObservation `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Specifies the type of group configuration item. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ConfigurationParameters struct { + + // A collection of parameters for this group configuration item. See below for details. + // +kubebuilder:validation:Optional + Parameters []ParametersParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Specifies the type of group configuration item. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type GroupInitParameters struct { + + // A configuration associates the resource group with an AWS service and specifies how the service can interact with the resources in the group. See below for details. + Configuration []ConfigurationInitParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // A description of the resource group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A resource_query block. Resource queries are documented below. + ResourceQuery *ResourceQueryInitParameters `json:"resourceQuery,omitempty" tf:"resource_query,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type GroupObservation struct { + + // The ARN assigned by AWS for this resource group. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A configuration associates the resource group with an AWS service and specifies how the service can interact with the resources in the group. See below for details. + Configuration []ConfigurationObservation `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // A description of the resource group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A resource_query block. Resource queries are documented below. + ResourceQuery *ResourceQueryObservation `json:"resourceQuery,omitempty" tf:"resource_query,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type GroupParameters struct { + + // A configuration associates the resource group with an AWS service and specifies how the service can interact with the resources in the group. See below for details. + // +kubebuilder:validation:Optional + Configuration []ConfigurationParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // A description of the resource group. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // A resource_query block. Resource queries are documented below. + // +kubebuilder:validation:Optional + ResourceQuery *ResourceQueryParameters `json:"resourceQuery,omitempty" tf:"resource_query,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ParametersInitParameters struct { + + // The name of the group configuration parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value or values to be used for the specified parameter. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ParametersObservation struct { + + // The name of the group configuration parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value or values to be used for the specified parameter. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ParametersParameters struct { + + // The name of the group configuration parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value or values to be used for the specified parameter. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type ResourceQueryInitParameters struct { + + // The resource query as a JSON string. + Query *string `json:"query,omitempty" tf:"query,omitempty"` + + // The type of the resource query. Defaults to TAG_FILTERS_1_0. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ResourceQueryObservation struct { + + // The resource query as a JSON string. + Query *string `json:"query,omitempty" tf:"query,omitempty"` + + // The type of the resource query. Defaults to TAG_FILTERS_1_0. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ResourceQueryParameters struct { + + // The resource query as a JSON string. + // +kubebuilder:validation:Optional + Query *string `json:"query" tf:"query,omitempty"` + + // The type of the resource query. Defaults to TAG_FILTERS_1_0. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +// GroupSpec defines the desired state of Group +type GroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GroupInitParameters `json:"initProvider,omitempty"` +} + +// GroupStatus defines the observed state of Group. +type GroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Group is the Schema for the Groups API. Provides a Resource Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Group struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec GroupSpec `json:"spec"` + Status GroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GroupList contains a list of Groups +type GroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Group `json:"items"` +} + +// Repository type metadata. +var ( + Group_Kind = "Group" + Group_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Group_Kind}.String() + Group_KindAPIVersion = Group_Kind + "." + CRDGroupVersion.String() + Group_GroupVersionKind = CRDGroupVersion.WithKind(Group_Kind) +) + +func init() { + SchemeBuilder.Register(&Group{}, &GroupList{}) +} diff --git a/apis/resourcegroups/v1beta2/zz_groupversion_info.go b/apis/resourcegroups/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..1507f54022 --- /dev/null +++ b/apis/resourcegroups/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=resourcegroups.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "resourcegroups.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/route53/v1beta1/zz_generated.conversion_hubs.go b/apis/route53/v1beta1/zz_generated.conversion_hubs.go index c96e68a382..60e81952f4 100755 --- a/apis/route53/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/route53/v1beta1/zz_generated.conversion_hubs.go @@ -15,9 +15,6 @@ func (tr *HealthCheck) Hub() {} // Hub marks this type as a conversion hub. func (tr *HostedZoneDNSSEC) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Record) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ResolverConfig) Hub() {} diff --git a/apis/route53/v1beta1/zz_generated.conversion_spokes.go b/apis/route53/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..dd0a9fa11f --- /dev/null +++ b/apis/route53/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Record to the hub type. +func (tr *Record) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Record type. +func (tr *Record) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/route53/v1beta1/zz_generated.resolvers.go b/apis/route53/v1beta1/zz_generated.resolvers.go index de0e74a9fe..25a30ccfc3 100644 --- a/apis/route53/v1beta1/zz_generated.resolvers.go +++ b/apis/route53/v1beta1/zz_generated.resolvers.go @@ -26,7 +26,7 @@ func (mg *HealthCheck) ResolveReferences( // ResolveReferences of this HealthChe var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cloudwatch.aws.upbound.io", "v1beta1", "MetricAlarm", "MetricAlarmList") + m, l, err = apisresolver.GetManagedResource("cloudwatch.aws.upbound.io", "v1beta2", "MetricAlarm", "MetricAlarmList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -45,7 +45,7 @@ func (mg *HealthCheck) ResolveReferences( // ResolveReferences of this HealthChe mg.Spec.ForProvider.CloudwatchAlarmName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.CloudwatchAlarmNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cloudwatch.aws.upbound.io", "v1beta1", "MetricAlarm", "MetricAlarmList") + m, l, err = apisresolver.GetManagedResource("cloudwatch.aws.upbound.io", "v1beta2", "MetricAlarm", "MetricAlarmList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/route53/v1beta1/zz_healthcheck_types.go b/apis/route53/v1beta1/zz_healthcheck_types.go index 0ac46e2844..b74e9c5bdc 100755 --- a/apis/route53/v1beta1/zz_healthcheck_types.go +++ b/apis/route53/v1beta1/zz_healthcheck_types.go @@ -23,7 +23,7 @@ type HealthCheckInitParameters struct { ChildHealthchecks []*string `json:"childHealthchecks,omitempty" tf:"child_healthchecks,omitempty"` // The name of the CloudWatch alarm. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatch/v1beta1.MetricAlarm + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatch/v1beta2.MetricAlarm CloudwatchAlarmName *string `json:"cloudwatchAlarmName,omitempty" tf:"cloudwatch_alarm_name,omitempty"` // Reference to a MetricAlarm in cloudwatch to populate cloudwatchAlarmName. @@ -184,7 +184,7 @@ type HealthCheckParameters struct { ChildHealthchecks []*string `json:"childHealthchecks,omitempty" tf:"child_healthchecks,omitempty"` // The name of the CloudWatch alarm. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatch/v1beta1.MetricAlarm + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatch/v1beta2.MetricAlarm // +kubebuilder:validation:Optional CloudwatchAlarmName *string `json:"cloudwatchAlarmName,omitempty" tf:"cloudwatch_alarm_name,omitempty"` diff --git a/apis/route53/v1beta2/zz_generated.conversion_hubs.go b/apis/route53/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..5d237d75fa --- /dev/null +++ b/apis/route53/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Record) Hub() {} diff --git a/apis/route53/v1beta2/zz_generated.deepcopy.go b/apis/route53/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..a535a8ebf0 --- /dev/null +++ b/apis/route53/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1082 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AliasInitParameters) DeepCopyInto(out *AliasInitParameters) { + *out = *in + if in.EvaluateTargetHealth != nil { + in, out := &in.EvaluateTargetHealth, &out.EvaluateTargetHealth + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AliasInitParameters. +func (in *AliasInitParameters) DeepCopy() *AliasInitParameters { + if in == nil { + return nil + } + out := new(AliasInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AliasObservation) DeepCopyInto(out *AliasObservation) { + *out = *in + if in.EvaluateTargetHealth != nil { + in, out := &in.EvaluateTargetHealth, &out.EvaluateTargetHealth + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AliasObservation. +func (in *AliasObservation) DeepCopy() *AliasObservation { + if in == nil { + return nil + } + out := new(AliasObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AliasParameters) DeepCopyInto(out *AliasParameters) { + *out = *in + if in.EvaluateTargetHealth != nil { + in, out := &in.EvaluateTargetHealth, &out.EvaluateTargetHealth + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AliasParameters. +func (in *AliasParameters) DeepCopy() *AliasParameters { + if in == nil { + return nil + } + out := new(AliasParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CidrRoutingPolicyInitParameters) DeepCopyInto(out *CidrRoutingPolicyInitParameters) { + *out = *in + if in.CollectionID != nil { + in, out := &in.CollectionID, &out.CollectionID + *out = new(string) + **out = **in + } + if in.LocationName != nil { + in, out := &in.LocationName, &out.LocationName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CidrRoutingPolicyInitParameters. +func (in *CidrRoutingPolicyInitParameters) DeepCopy() *CidrRoutingPolicyInitParameters { + if in == nil { + return nil + } + out := new(CidrRoutingPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CidrRoutingPolicyObservation) DeepCopyInto(out *CidrRoutingPolicyObservation) { + *out = *in + if in.CollectionID != nil { + in, out := &in.CollectionID, &out.CollectionID + *out = new(string) + **out = **in + } + if in.LocationName != nil { + in, out := &in.LocationName, &out.LocationName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CidrRoutingPolicyObservation. +func (in *CidrRoutingPolicyObservation) DeepCopy() *CidrRoutingPolicyObservation { + if in == nil { + return nil + } + out := new(CidrRoutingPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CidrRoutingPolicyParameters) DeepCopyInto(out *CidrRoutingPolicyParameters) { + *out = *in + if in.CollectionID != nil { + in, out := &in.CollectionID, &out.CollectionID + *out = new(string) + **out = **in + } + if in.LocationName != nil { + in, out := &in.LocationName, &out.LocationName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CidrRoutingPolicyParameters. +func (in *CidrRoutingPolicyParameters) DeepCopy() *CidrRoutingPolicyParameters { + if in == nil { + return nil + } + out := new(CidrRoutingPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoordinatesInitParameters) DeepCopyInto(out *CoordinatesInitParameters) { + *out = *in + if in.Latitude != nil { + in, out := &in.Latitude, &out.Latitude + *out = new(string) + **out = **in + } + if in.Longitude != nil { + in, out := &in.Longitude, &out.Longitude + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoordinatesInitParameters. +func (in *CoordinatesInitParameters) DeepCopy() *CoordinatesInitParameters { + if in == nil { + return nil + } + out := new(CoordinatesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoordinatesObservation) DeepCopyInto(out *CoordinatesObservation) { + *out = *in + if in.Latitude != nil { + in, out := &in.Latitude, &out.Latitude + *out = new(string) + **out = **in + } + if in.Longitude != nil { + in, out := &in.Longitude, &out.Longitude + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoordinatesObservation. +func (in *CoordinatesObservation) DeepCopy() *CoordinatesObservation { + if in == nil { + return nil + } + out := new(CoordinatesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoordinatesParameters) DeepCopyInto(out *CoordinatesParameters) { + *out = *in + if in.Latitude != nil { + in, out := &in.Latitude, &out.Latitude + *out = new(string) + **out = **in + } + if in.Longitude != nil { + in, out := &in.Longitude, &out.Longitude + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoordinatesParameters. +func (in *CoordinatesParameters) DeepCopy() *CoordinatesParameters { + if in == nil { + return nil + } + out := new(CoordinatesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverRoutingPolicyInitParameters) DeepCopyInto(out *FailoverRoutingPolicyInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverRoutingPolicyInitParameters. +func (in *FailoverRoutingPolicyInitParameters) DeepCopy() *FailoverRoutingPolicyInitParameters { + if in == nil { + return nil + } + out := new(FailoverRoutingPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverRoutingPolicyObservation) DeepCopyInto(out *FailoverRoutingPolicyObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverRoutingPolicyObservation. +func (in *FailoverRoutingPolicyObservation) DeepCopy() *FailoverRoutingPolicyObservation { + if in == nil { + return nil + } + out := new(FailoverRoutingPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverRoutingPolicyParameters) DeepCopyInto(out *FailoverRoutingPolicyParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverRoutingPolicyParameters. +func (in *FailoverRoutingPolicyParameters) DeepCopy() *FailoverRoutingPolicyParameters { + if in == nil { + return nil + } + out := new(FailoverRoutingPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeolocationRoutingPolicyInitParameters) DeepCopyInto(out *GeolocationRoutingPolicyInitParameters) { + *out = *in + if in.Continent != nil { + in, out := &in.Continent, &out.Continent + *out = new(string) + **out = **in + } + if in.Country != nil { + in, out := &in.Country, &out.Country + *out = new(string) + **out = **in + } + if in.Subdivision != nil { + in, out := &in.Subdivision, &out.Subdivision + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeolocationRoutingPolicyInitParameters. +func (in *GeolocationRoutingPolicyInitParameters) DeepCopy() *GeolocationRoutingPolicyInitParameters { + if in == nil { + return nil + } + out := new(GeolocationRoutingPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeolocationRoutingPolicyObservation) DeepCopyInto(out *GeolocationRoutingPolicyObservation) { + *out = *in + if in.Continent != nil { + in, out := &in.Continent, &out.Continent + *out = new(string) + **out = **in + } + if in.Country != nil { + in, out := &in.Country, &out.Country + *out = new(string) + **out = **in + } + if in.Subdivision != nil { + in, out := &in.Subdivision, &out.Subdivision + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeolocationRoutingPolicyObservation. +func (in *GeolocationRoutingPolicyObservation) DeepCopy() *GeolocationRoutingPolicyObservation { + if in == nil { + return nil + } + out := new(GeolocationRoutingPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeolocationRoutingPolicyParameters) DeepCopyInto(out *GeolocationRoutingPolicyParameters) { + *out = *in + if in.Continent != nil { + in, out := &in.Continent, &out.Continent + *out = new(string) + **out = **in + } + if in.Country != nil { + in, out := &in.Country, &out.Country + *out = new(string) + **out = **in + } + if in.Subdivision != nil { + in, out := &in.Subdivision, &out.Subdivision + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeolocationRoutingPolicyParameters. +func (in *GeolocationRoutingPolicyParameters) DeepCopy() *GeolocationRoutingPolicyParameters { + if in == nil { + return nil + } + out := new(GeolocationRoutingPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoproximityRoutingPolicyInitParameters) DeepCopyInto(out *GeoproximityRoutingPolicyInitParameters) { + *out = *in + if in.AwsRegion != nil { + in, out := &in.AwsRegion, &out.AwsRegion + *out = new(string) + **out = **in + } + if in.Bias != nil { + in, out := &in.Bias, &out.Bias + *out = new(float64) + **out = **in + } + if in.Coordinates != nil { + in, out := &in.Coordinates, &out.Coordinates + *out = make([]CoordinatesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LocalZoneGroup != nil { + in, out := &in.LocalZoneGroup, &out.LocalZoneGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoproximityRoutingPolicyInitParameters. +func (in *GeoproximityRoutingPolicyInitParameters) DeepCopy() *GeoproximityRoutingPolicyInitParameters { + if in == nil { + return nil + } + out := new(GeoproximityRoutingPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoproximityRoutingPolicyObservation) DeepCopyInto(out *GeoproximityRoutingPolicyObservation) { + *out = *in + if in.AwsRegion != nil { + in, out := &in.AwsRegion, &out.AwsRegion + *out = new(string) + **out = **in + } + if in.Bias != nil { + in, out := &in.Bias, &out.Bias + *out = new(float64) + **out = **in + } + if in.Coordinates != nil { + in, out := &in.Coordinates, &out.Coordinates + *out = make([]CoordinatesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LocalZoneGroup != nil { + in, out := &in.LocalZoneGroup, &out.LocalZoneGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoproximityRoutingPolicyObservation. +func (in *GeoproximityRoutingPolicyObservation) DeepCopy() *GeoproximityRoutingPolicyObservation { + if in == nil { + return nil + } + out := new(GeoproximityRoutingPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoproximityRoutingPolicyParameters) DeepCopyInto(out *GeoproximityRoutingPolicyParameters) { + *out = *in + if in.AwsRegion != nil { + in, out := &in.AwsRegion, &out.AwsRegion + *out = new(string) + **out = **in + } + if in.Bias != nil { + in, out := &in.Bias, &out.Bias + *out = new(float64) + **out = **in + } + if in.Coordinates != nil { + in, out := &in.Coordinates, &out.Coordinates + *out = make([]CoordinatesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LocalZoneGroup != nil { + in, out := &in.LocalZoneGroup, &out.LocalZoneGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoproximityRoutingPolicyParameters. +func (in *GeoproximityRoutingPolicyParameters) DeepCopy() *GeoproximityRoutingPolicyParameters { + if in == nil { + return nil + } + out := new(GeoproximityRoutingPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LatencyRoutingPolicyInitParameters) DeepCopyInto(out *LatencyRoutingPolicyInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LatencyRoutingPolicyInitParameters. +func (in *LatencyRoutingPolicyInitParameters) DeepCopy() *LatencyRoutingPolicyInitParameters { + if in == nil { + return nil + } + out := new(LatencyRoutingPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LatencyRoutingPolicyObservation) DeepCopyInto(out *LatencyRoutingPolicyObservation) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LatencyRoutingPolicyObservation. +func (in *LatencyRoutingPolicyObservation) DeepCopy() *LatencyRoutingPolicyObservation { + if in == nil { + return nil + } + out := new(LatencyRoutingPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LatencyRoutingPolicyParameters) DeepCopyInto(out *LatencyRoutingPolicyParameters) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LatencyRoutingPolicyParameters. +func (in *LatencyRoutingPolicyParameters) DeepCopy() *LatencyRoutingPolicyParameters { + if in == nil { + return nil + } + out := new(LatencyRoutingPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Record) DeepCopyInto(out *Record) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Record. +func (in *Record) DeepCopy() *Record { + if in == nil { + return nil + } + out := new(Record) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Record) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordInitParameters) DeepCopyInto(out *RecordInitParameters) { + *out = *in + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(AliasInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AllowOverwrite != nil { + in, out := &in.AllowOverwrite, &out.AllowOverwrite + *out = new(bool) + **out = **in + } + if in.CidrRoutingPolicy != nil { + in, out := &in.CidrRoutingPolicy, &out.CidrRoutingPolicy + *out = new(CidrRoutingPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FailoverRoutingPolicy != nil { + in, out := &in.FailoverRoutingPolicy, &out.FailoverRoutingPolicy + *out = new(FailoverRoutingPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.GeolocationRoutingPolicy != nil { + in, out := &in.GeolocationRoutingPolicy, &out.GeolocationRoutingPolicy + *out = new(GeolocationRoutingPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.GeoproximityRoutingPolicy != nil { + in, out := &in.GeoproximityRoutingPolicy, &out.GeoproximityRoutingPolicy + *out = new(GeoproximityRoutingPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HealthCheckID != nil { + in, out := &in.HealthCheckID, &out.HealthCheckID + *out = new(string) + **out = **in + } + if in.HealthCheckIDRef != nil { + in, out := &in.HealthCheckIDRef, &out.HealthCheckIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.HealthCheckIDSelector != nil { + in, out := &in.HealthCheckIDSelector, &out.HealthCheckIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LatencyRoutingPolicy != nil { + in, out := &in.LatencyRoutingPolicy, &out.LatencyRoutingPolicy + *out = new(LatencyRoutingPolicyInitParameters) + **out = **in + } + if in.MultivalueAnswerRoutingPolicy != nil { + in, out := &in.MultivalueAnswerRoutingPolicy, &out.MultivalueAnswerRoutingPolicy + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Records != nil { + in, out := &in.Records, &out.Records + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SetIdentifier != nil { + in, out := &in.SetIdentifier, &out.SetIdentifier + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.WeightedRoutingPolicy != nil { + in, out := &in.WeightedRoutingPolicy, &out.WeightedRoutingPolicy + *out = new(WeightedRoutingPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } + if in.ZoneIDRef != nil { + in, out := &in.ZoneIDRef, &out.ZoneIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ZoneIDSelector != nil { + in, out := &in.ZoneIDSelector, &out.ZoneIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordInitParameters. +func (in *RecordInitParameters) DeepCopy() *RecordInitParameters { + if in == nil { + return nil + } + out := new(RecordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordList) DeepCopyInto(out *RecordList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Record, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordList. +func (in *RecordList) DeepCopy() *RecordList { + if in == nil { + return nil + } + out := new(RecordList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RecordList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordObservation) DeepCopyInto(out *RecordObservation) { + *out = *in + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(AliasObservation) + (*in).DeepCopyInto(*out) + } + if in.AllowOverwrite != nil { + in, out := &in.AllowOverwrite, &out.AllowOverwrite + *out = new(bool) + **out = **in + } + if in.CidrRoutingPolicy != nil { + in, out := &in.CidrRoutingPolicy, &out.CidrRoutingPolicy + *out = new(CidrRoutingPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.FailoverRoutingPolicy != nil { + in, out := &in.FailoverRoutingPolicy, &out.FailoverRoutingPolicy + *out = new(FailoverRoutingPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.GeolocationRoutingPolicy != nil { + in, out := &in.GeolocationRoutingPolicy, &out.GeolocationRoutingPolicy + *out = new(GeolocationRoutingPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.GeoproximityRoutingPolicy != nil { + in, out := &in.GeoproximityRoutingPolicy, &out.GeoproximityRoutingPolicy + *out = new(GeoproximityRoutingPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.HealthCheckID != nil { + in, out := &in.HealthCheckID, &out.HealthCheckID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LatencyRoutingPolicy != nil { + in, out := &in.LatencyRoutingPolicy, &out.LatencyRoutingPolicy + *out = new(LatencyRoutingPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.MultivalueAnswerRoutingPolicy != nil { + in, out := &in.MultivalueAnswerRoutingPolicy, &out.MultivalueAnswerRoutingPolicy + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Records != nil { + in, out := &in.Records, &out.Records + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SetIdentifier != nil { + in, out := &in.SetIdentifier, &out.SetIdentifier + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.WeightedRoutingPolicy != nil { + in, out := &in.WeightedRoutingPolicy, &out.WeightedRoutingPolicy + *out = new(WeightedRoutingPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordObservation. +func (in *RecordObservation) DeepCopy() *RecordObservation { + if in == nil { + return nil + } + out := new(RecordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordParameters) DeepCopyInto(out *RecordParameters) { + *out = *in + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(AliasParameters) + (*in).DeepCopyInto(*out) + } + if in.AllowOverwrite != nil { + in, out := &in.AllowOverwrite, &out.AllowOverwrite + *out = new(bool) + **out = **in + } + if in.CidrRoutingPolicy != nil { + in, out := &in.CidrRoutingPolicy, &out.CidrRoutingPolicy + *out = new(CidrRoutingPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.FailoverRoutingPolicy != nil { + in, out := &in.FailoverRoutingPolicy, &out.FailoverRoutingPolicy + *out = new(FailoverRoutingPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.GeolocationRoutingPolicy != nil { + in, out := &in.GeolocationRoutingPolicy, &out.GeolocationRoutingPolicy + *out = new(GeolocationRoutingPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.GeoproximityRoutingPolicy != nil { + in, out := &in.GeoproximityRoutingPolicy, &out.GeoproximityRoutingPolicy + *out = new(GeoproximityRoutingPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.HealthCheckID != nil { + in, out := &in.HealthCheckID, &out.HealthCheckID + *out = new(string) + **out = **in + } + if in.HealthCheckIDRef != nil { + in, out := &in.HealthCheckIDRef, &out.HealthCheckIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.HealthCheckIDSelector != nil { + in, out := &in.HealthCheckIDSelector, &out.HealthCheckIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LatencyRoutingPolicy != nil { + in, out := &in.LatencyRoutingPolicy, &out.LatencyRoutingPolicy + *out = new(LatencyRoutingPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.MultivalueAnswerRoutingPolicy != nil { + in, out := &in.MultivalueAnswerRoutingPolicy, &out.MultivalueAnswerRoutingPolicy + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Records != nil { + in, out := &in.Records, &out.Records + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SetIdentifier != nil { + in, out := &in.SetIdentifier, &out.SetIdentifier + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.WeightedRoutingPolicy != nil { + in, out := &in.WeightedRoutingPolicy, &out.WeightedRoutingPolicy + *out = new(WeightedRoutingPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } + if in.ZoneIDRef != nil { + in, out := &in.ZoneIDRef, &out.ZoneIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ZoneIDSelector != nil { + in, out := &in.ZoneIDSelector, &out.ZoneIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordParameters. +func (in *RecordParameters) DeepCopy() *RecordParameters { + if in == nil { + return nil + } + out := new(RecordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordSpec) DeepCopyInto(out *RecordSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordSpec. +func (in *RecordSpec) DeepCopy() *RecordSpec { + if in == nil { + return nil + } + out := new(RecordSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordStatus) DeepCopyInto(out *RecordStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordStatus. +func (in *RecordStatus) DeepCopy() *RecordStatus { + if in == nil { + return nil + } + out := new(RecordStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeightedRoutingPolicyInitParameters) DeepCopyInto(out *WeightedRoutingPolicyInitParameters) { + *out = *in + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedRoutingPolicyInitParameters. +func (in *WeightedRoutingPolicyInitParameters) DeepCopy() *WeightedRoutingPolicyInitParameters { + if in == nil { + return nil + } + out := new(WeightedRoutingPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeightedRoutingPolicyObservation) DeepCopyInto(out *WeightedRoutingPolicyObservation) { + *out = *in + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedRoutingPolicyObservation. +func (in *WeightedRoutingPolicyObservation) DeepCopy() *WeightedRoutingPolicyObservation { + if in == nil { + return nil + } + out := new(WeightedRoutingPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeightedRoutingPolicyParameters) DeepCopyInto(out *WeightedRoutingPolicyParameters) { + *out = *in + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedRoutingPolicyParameters. +func (in *WeightedRoutingPolicyParameters) DeepCopy() *WeightedRoutingPolicyParameters { + if in == nil { + return nil + } + out := new(WeightedRoutingPolicyParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/route53/v1beta2/zz_generated.managed.go b/apis/route53/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..7bbf119ed1 --- /dev/null +++ b/apis/route53/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Record. +func (mg *Record) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Record. +func (mg *Record) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Record. +func (mg *Record) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Record. +func (mg *Record) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Record. +func (mg *Record) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Record. +func (mg *Record) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Record. +func (mg *Record) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Record. +func (mg *Record) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Record. +func (mg *Record) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Record. +func (mg *Record) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Record. +func (mg *Record) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Record. +func (mg *Record) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/route53/v1beta2/zz_generated.managedlist.go b/apis/route53/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..aa12640612 --- /dev/null +++ b/apis/route53/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this RecordList. +func (l *RecordList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/route53/v1beta2/zz_generated.resolvers.go b/apis/route53/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..93762aaa88 --- /dev/null +++ b/apis/route53/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,106 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Record. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Record) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("route53.aws.upbound.io", "v1beta1", "HealthCheck", "HealthCheckList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.HealthCheckID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.HealthCheckIDRef, + Selector: mg.Spec.ForProvider.HealthCheckIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.HealthCheckID") + } + mg.Spec.ForProvider.HealthCheckID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.HealthCheckIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("route53.aws.upbound.io", "v1beta1", "Zone", "ZoneList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ZoneID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ZoneIDRef, + Selector: mg.Spec.ForProvider.ZoneIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ZoneID") + } + mg.Spec.ForProvider.ZoneID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ZoneIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("route53.aws.upbound.io", "v1beta1", "HealthCheck", "HealthCheckList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.HealthCheckID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.HealthCheckIDRef, + Selector: mg.Spec.InitProvider.HealthCheckIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.HealthCheckID") + } + mg.Spec.InitProvider.HealthCheckID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.HealthCheckIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("route53.aws.upbound.io", "v1beta1", "Zone", "ZoneList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ZoneID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ZoneIDRef, + Selector: mg.Spec.InitProvider.ZoneIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ZoneID") + } + mg.Spec.InitProvider.ZoneID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ZoneIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/route53/v1beta2/zz_groupversion_info.go b/apis/route53/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..6e1b94b43c --- /dev/null +++ b/apis/route53/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=route53.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "route53.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/route53/v1beta2/zz_record_terraformed.go b/apis/route53/v1beta2/zz_record_terraformed.go new file mode 100755 index 0000000000..d31cec1f81 --- /dev/null +++ b/apis/route53/v1beta2/zz_record_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Record +func (mg *Record) GetTerraformResourceType() string { + return "aws_route53_record" +} + +// GetConnectionDetailsMapping for this Record +func (tr *Record) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Record +func (tr *Record) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Record +func (tr *Record) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Record +func (tr *Record) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Record +func (tr *Record) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Record +func (tr *Record) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Record +func (tr *Record) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Record +func (tr *Record) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Record using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Record) LateInitialize(attrs []byte) (bool, error) { + params := &RecordParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Record) GetTerraformSchemaVersion() int { + return 2 +} diff --git a/apis/route53/v1beta2/zz_record_types.go b/apis/route53/v1beta2/zz_record_types.go new file mode 100755 index 0000000000..8247713610 --- /dev/null +++ b/apis/route53/v1beta2/zz_record_types.go @@ -0,0 +1,529 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AliasInitParameters struct { + + // Set to true if you want Route 53 to determine whether to respond to DNS queries using this resource record set by checking the health of the resource record set. Some resources have special requirements, see related part of documentation. + EvaluateTargetHealth *bool `json:"evaluateTargetHealth,omitempty" tf:"evaluate_target_health,omitempty"` + + // The name of the record. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the hosted zone to contain this record. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` +} + +type AliasObservation struct { + + // Set to true if you want Route 53 to determine whether to respond to DNS queries using this resource record set by checking the health of the resource record set. Some resources have special requirements, see related part of documentation. + EvaluateTargetHealth *bool `json:"evaluateTargetHealth,omitempty" tf:"evaluate_target_health,omitempty"` + + // The name of the record. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the hosted zone to contain this record. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` +} + +type AliasParameters struct { + + // Set to true if you want Route 53 to determine whether to respond to DNS queries using this resource record set by checking the health of the resource record set. Some resources have special requirements, see related part of documentation. + // +kubebuilder:validation:Optional + EvaluateTargetHealth *bool `json:"evaluateTargetHealth" tf:"evaluate_target_health,omitempty"` + + // The name of the record. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The ID of the hosted zone to contain this record. + // +kubebuilder:validation:Optional + ZoneID *string `json:"zoneId" tf:"zone_id,omitempty"` +} + +type CidrRoutingPolicyInitParameters struct { + + // The CIDR collection ID. See the aws_route53_cidr_collection resource for more details. + CollectionID *string `json:"collectionId,omitempty" tf:"collection_id,omitempty"` + + // The CIDR collection location name. See the aws_route53_cidr_location resource for more details. A location_name with an asterisk "*" can be used to create a default CIDR record. collection_id is still required for default record. + LocationName *string `json:"locationName,omitempty" tf:"location_name,omitempty"` +} + +type CidrRoutingPolicyObservation struct { + + // The CIDR collection ID. See the aws_route53_cidr_collection resource for more details. + CollectionID *string `json:"collectionId,omitempty" tf:"collection_id,omitempty"` + + // The CIDR collection location name. See the aws_route53_cidr_location resource for more details. A location_name with an asterisk "*" can be used to create a default CIDR record. collection_id is still required for default record. + LocationName *string `json:"locationName,omitempty" tf:"location_name,omitempty"` +} + +type CidrRoutingPolicyParameters struct { + + // The CIDR collection ID. See the aws_route53_cidr_collection resource for more details. + // +kubebuilder:validation:Optional + CollectionID *string `json:"collectionId" tf:"collection_id,omitempty"` + + // The CIDR collection location name. See the aws_route53_cidr_location resource for more details. A location_name with an asterisk "*" can be used to create a default CIDR record. collection_id is still required for default record. + // +kubebuilder:validation:Optional + LocationName *string `json:"locationName" tf:"location_name,omitempty"` +} + +type CoordinatesInitParameters struct { + Latitude *string `json:"latitude,omitempty" tf:"latitude,omitempty"` + + Longitude *string `json:"longitude,omitempty" tf:"longitude,omitempty"` +} + +type CoordinatesObservation struct { + Latitude *string `json:"latitude,omitempty" tf:"latitude,omitempty"` + + Longitude *string `json:"longitude,omitempty" tf:"longitude,omitempty"` +} + +type CoordinatesParameters struct { + + // +kubebuilder:validation:Optional + Latitude *string `json:"latitude" tf:"latitude,omitempty"` + + // +kubebuilder:validation:Optional + Longitude *string `json:"longitude" tf:"longitude,omitempty"` +} + +type FailoverRoutingPolicyInitParameters struct { + + // The record type. Valid values are A, AAAA, CAA, CNAME, DS, MX, NAPTR, NS, PTR, SOA, SPF, SRV and TXT. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FailoverRoutingPolicyObservation struct { + + // The record type. Valid values are A, AAAA, CAA, CNAME, DS, MX, NAPTR, NS, PTR, SOA, SPF, SRV and TXT. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FailoverRoutingPolicyParameters struct { + + // The record type. Valid values are A, AAAA, CAA, CNAME, DS, MX, NAPTR, NS, PTR, SOA, SPF, SRV and TXT. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type GeolocationRoutingPolicyInitParameters struct { + + // A two-letter continent code. See http://docs.aws.amazon.com/Route53/latest/APIReference/API_GetGeoLocation.html for code details. Either continent or country must be specified. + Continent *string `json:"continent,omitempty" tf:"continent,omitempty"` + + // A two-character country code or * to indicate a default resource record set. + Country *string `json:"country,omitempty" tf:"country,omitempty"` + + // A subdivision code for a country. + Subdivision *string `json:"subdivision,omitempty" tf:"subdivision,omitempty"` +} + +type GeolocationRoutingPolicyObservation struct { + + // A two-letter continent code. See http://docs.aws.amazon.com/Route53/latest/APIReference/API_GetGeoLocation.html for code details. Either continent or country must be specified. + Continent *string `json:"continent,omitempty" tf:"continent,omitempty"` + + // A two-character country code or * to indicate a default resource record set. + Country *string `json:"country,omitempty" tf:"country,omitempty"` + + // A subdivision code for a country. + Subdivision *string `json:"subdivision,omitempty" tf:"subdivision,omitempty"` +} + +type GeolocationRoutingPolicyParameters struct { + + // A two-letter continent code. See http://docs.aws.amazon.com/Route53/latest/APIReference/API_GetGeoLocation.html for code details. Either continent or country must be specified. + // +kubebuilder:validation:Optional + Continent *string `json:"continent,omitempty" tf:"continent,omitempty"` + + // A two-character country code or * to indicate a default resource record set. + // +kubebuilder:validation:Optional + Country *string `json:"country,omitempty" tf:"country,omitempty"` + + // A subdivision code for a country. + // +kubebuilder:validation:Optional + Subdivision *string `json:"subdivision,omitempty" tf:"subdivision,omitempty"` +} + +type GeoproximityRoutingPolicyInitParameters struct { + + // A AWS region where the resource is present. + AwsRegion *string `json:"awsRegion,omitempty" tf:"aws_region,omitempty"` + + // Route more traffic or less traffic to the resource by specifying a value ranges between -90 to 90. See https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy-geoproximity.html for bias details. + Bias *float64 `json:"bias,omitempty" tf:"bias,omitempty"` + + // Specify latitude and longitude for routing traffic to non-AWS resources. + Coordinates []CoordinatesInitParameters `json:"coordinates,omitempty" tf:"coordinates,omitempty"` + + // A AWS local zone group where the resource is present. See https://docs.aws.amazon.com/local-zones/latest/ug/available-local-zones.html for local zone group list. + LocalZoneGroup *string `json:"localZoneGroup,omitempty" tf:"local_zone_group,omitempty"` +} + +type GeoproximityRoutingPolicyObservation struct { + + // A AWS region where the resource is present. + AwsRegion *string `json:"awsRegion,omitempty" tf:"aws_region,omitempty"` + + // Route more traffic or less traffic to the resource by specifying a value ranges between -90 to 90. See https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy-geoproximity.html for bias details. + Bias *float64 `json:"bias,omitempty" tf:"bias,omitempty"` + + // Specify latitude and longitude for routing traffic to non-AWS resources. + Coordinates []CoordinatesObservation `json:"coordinates,omitempty" tf:"coordinates,omitempty"` + + // A AWS local zone group where the resource is present. See https://docs.aws.amazon.com/local-zones/latest/ug/available-local-zones.html for local zone group list. + LocalZoneGroup *string `json:"localZoneGroup,omitempty" tf:"local_zone_group,omitempty"` +} + +type GeoproximityRoutingPolicyParameters struct { + + // A AWS region where the resource is present. + // +kubebuilder:validation:Optional + AwsRegion *string `json:"awsRegion,omitempty" tf:"aws_region,omitempty"` + + // Route more traffic or less traffic to the resource by specifying a value ranges between -90 to 90. See https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy-geoproximity.html for bias details. + // +kubebuilder:validation:Optional + Bias *float64 `json:"bias,omitempty" tf:"bias,omitempty"` + + // Specify latitude and longitude for routing traffic to non-AWS resources. + // +kubebuilder:validation:Optional + Coordinates []CoordinatesParameters `json:"coordinates,omitempty" tf:"coordinates,omitempty"` + + // A AWS local zone group where the resource is present. See https://docs.aws.amazon.com/local-zones/latest/ug/available-local-zones.html for local zone group list. + // +kubebuilder:validation:Optional + LocalZoneGroup *string `json:"localZoneGroup,omitempty" tf:"local_zone_group,omitempty"` +} + +type LatencyRoutingPolicyInitParameters struct { +} + +type LatencyRoutingPolicyObservation struct { + + // An AWS region from which to measure latency. See http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-latency + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +type LatencyRoutingPolicyParameters struct { + + // An AWS region from which to measure latency. See http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-latency + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"region,omitempty"` +} + +type RecordInitParameters struct { + + // An alias block. Conflicts with ttl & records. + // Documented below. + Alias *AliasInitParameters `json:"alias,omitempty" tf:"alias,omitempty"` + + // false by default. This configuration is not recommended for most environments. + AllowOverwrite *bool `json:"allowOverwrite,omitempty" tf:"allow_overwrite,omitempty"` + + // A block indicating a routing policy based on the IP network ranges of requestors. Conflicts with any other routing policy. Documented below. + CidrRoutingPolicy *CidrRoutingPolicyInitParameters `json:"cidrRoutingPolicy,omitempty" tf:"cidr_routing_policy,omitempty"` + + // A block indicating the routing behavior when associated health check fails. Conflicts with any other routing policy. Documented below. + FailoverRoutingPolicy *FailoverRoutingPolicyInitParameters `json:"failoverRoutingPolicy,omitempty" tf:"failover_routing_policy,omitempty"` + + // A block indicating a routing policy based on the geolocation of the requestor. Conflicts with any other routing policy. Documented below. + GeolocationRoutingPolicy *GeolocationRoutingPolicyInitParameters `json:"geolocationRoutingPolicy,omitempty" tf:"geolocation_routing_policy,omitempty"` + + // A block indicating a routing policy based on the geoproximity of the requestor. Conflicts with any other routing policy. Documented below. + GeoproximityRoutingPolicy *GeoproximityRoutingPolicyInitParameters `json:"geoproximityRoutingPolicy,omitempty" tf:"geoproximity_routing_policy,omitempty"` + + // The health check the record should be associated with. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/route53/v1beta1.HealthCheck + HealthCheckID *string `json:"healthCheckId,omitempty" tf:"health_check_id,omitempty"` + + // Reference to a HealthCheck in route53 to populate healthCheckId. + // +kubebuilder:validation:Optional + HealthCheckIDRef *v1.Reference `json:"healthCheckIdRef,omitempty" tf:"-"` + + // Selector for a HealthCheck in route53 to populate healthCheckId. + // +kubebuilder:validation:Optional + HealthCheckIDSelector *v1.Selector `json:"healthCheckIdSelector,omitempty" tf:"-"` + + // A block indicating a routing policy based on the latency between the requestor and an AWS region. Conflicts with any other routing policy. Documented below. + LatencyRoutingPolicy *LatencyRoutingPolicyInitParameters `json:"latencyRoutingPolicy,omitempty" tf:"latency_routing_policy,omitempty"` + + // Set to true to indicate a multivalue answer routing policy. Conflicts with any other routing policy. + MultivalueAnswerRoutingPolicy *bool `json:"multivalueAnswerRoutingPolicy,omitempty" tf:"multivalue_answer_routing_policy,omitempty"` + + // The name of the record. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A string list of records.g., "first255characters\"\"morecharacters"). + // +listType=set + Records []*string `json:"records,omitempty" tf:"records,omitempty"` + + // Unique identifier to differentiate records with routing policies from one another. Required if using cidr_routing_policy, failover_routing_policy, geolocation_routing_policy,geoproximity_routing_policy, latency_routing_policy, multivalue_answer_routing_policy, or weighted_routing_policy. + SetIdentifier *string `json:"setIdentifier,omitempty" tf:"set_identifier,omitempty"` + + // The TTL of the record. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // The record type. Valid values are A, AAAA, CAA, CNAME, DS, MX, NAPTR, NS, PTR, SOA, SPF, SRV and TXT. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A block indicating a weighted routing policy. Conflicts with any other routing policy. Documented below. + WeightedRoutingPolicy *WeightedRoutingPolicyInitParameters `json:"weightedRoutingPolicy,omitempty" tf:"weighted_routing_policy,omitempty"` + + // The ID of the hosted zone to contain this record. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/route53/v1beta1.Zone + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` + + // Reference to a Zone in route53 to populate zoneId. + // +kubebuilder:validation:Optional + ZoneIDRef *v1.Reference `json:"zoneIdRef,omitempty" tf:"-"` + + // Selector for a Zone in route53 to populate zoneId. + // +kubebuilder:validation:Optional + ZoneIDSelector *v1.Selector `json:"zoneIdSelector,omitempty" tf:"-"` +} + +type RecordObservation struct { + + // An alias block. Conflicts with ttl & records. + // Documented below. + Alias *AliasObservation `json:"alias,omitempty" tf:"alias,omitempty"` + + // false by default. This configuration is not recommended for most environments. + AllowOverwrite *bool `json:"allowOverwrite,omitempty" tf:"allow_overwrite,omitempty"` + + // A block indicating a routing policy based on the IP network ranges of requestors. Conflicts with any other routing policy. Documented below. + CidrRoutingPolicy *CidrRoutingPolicyObservation `json:"cidrRoutingPolicy,omitempty" tf:"cidr_routing_policy,omitempty"` + + // A block indicating the routing behavior when associated health check fails. Conflicts with any other routing policy. Documented below. + FailoverRoutingPolicy *FailoverRoutingPolicyObservation `json:"failoverRoutingPolicy,omitempty" tf:"failover_routing_policy,omitempty"` + + // FQDN built using the zone domain and name. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // A block indicating a routing policy based on the geolocation of the requestor. Conflicts with any other routing policy. Documented below. + GeolocationRoutingPolicy *GeolocationRoutingPolicyObservation `json:"geolocationRoutingPolicy,omitempty" tf:"geolocation_routing_policy,omitempty"` + + // A block indicating a routing policy based on the geoproximity of the requestor. Conflicts with any other routing policy. Documented below. + GeoproximityRoutingPolicy *GeoproximityRoutingPolicyObservation `json:"geoproximityRoutingPolicy,omitempty" tf:"geoproximity_routing_policy,omitempty"` + + // The health check the record should be associated with. + HealthCheckID *string `json:"healthCheckId,omitempty" tf:"health_check_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A block indicating a routing policy based on the latency between the requestor and an AWS region. Conflicts with any other routing policy. Documented below. + LatencyRoutingPolicy *LatencyRoutingPolicyObservation `json:"latencyRoutingPolicy,omitempty" tf:"latency_routing_policy,omitempty"` + + // Set to true to indicate a multivalue answer routing policy. Conflicts with any other routing policy. + MultivalueAnswerRoutingPolicy *bool `json:"multivalueAnswerRoutingPolicy,omitempty" tf:"multivalue_answer_routing_policy,omitempty"` + + // The name of the record. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A string list of records.g., "first255characters\"\"morecharacters"). + // +listType=set + Records []*string `json:"records,omitempty" tf:"records,omitempty"` + + // Unique identifier to differentiate records with routing policies from one another. Required if using cidr_routing_policy, failover_routing_policy, geolocation_routing_policy,geoproximity_routing_policy, latency_routing_policy, multivalue_answer_routing_policy, or weighted_routing_policy. + SetIdentifier *string `json:"setIdentifier,omitempty" tf:"set_identifier,omitempty"` + + // The TTL of the record. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // The record type. Valid values are A, AAAA, CAA, CNAME, DS, MX, NAPTR, NS, PTR, SOA, SPF, SRV and TXT. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A block indicating a weighted routing policy. Conflicts with any other routing policy. Documented below. + WeightedRoutingPolicy *WeightedRoutingPolicyObservation `json:"weightedRoutingPolicy,omitempty" tf:"weighted_routing_policy,omitempty"` + + // The ID of the hosted zone to contain this record. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` +} + +type RecordParameters struct { + + // An alias block. Conflicts with ttl & records. + // Documented below. + // +kubebuilder:validation:Optional + Alias *AliasParameters `json:"alias,omitempty" tf:"alias,omitempty"` + + // false by default. This configuration is not recommended for most environments. + // +kubebuilder:validation:Optional + AllowOverwrite *bool `json:"allowOverwrite,omitempty" tf:"allow_overwrite,omitempty"` + + // A block indicating a routing policy based on the IP network ranges of requestors. Conflicts with any other routing policy. Documented below. + // +kubebuilder:validation:Optional + CidrRoutingPolicy *CidrRoutingPolicyParameters `json:"cidrRoutingPolicy,omitempty" tf:"cidr_routing_policy,omitempty"` + + // A block indicating the routing behavior when associated health check fails. Conflicts with any other routing policy. Documented below. + // +kubebuilder:validation:Optional + FailoverRoutingPolicy *FailoverRoutingPolicyParameters `json:"failoverRoutingPolicy,omitempty" tf:"failover_routing_policy,omitempty"` + + // A block indicating a routing policy based on the geolocation of the requestor. Conflicts with any other routing policy. Documented below. + // +kubebuilder:validation:Optional + GeolocationRoutingPolicy *GeolocationRoutingPolicyParameters `json:"geolocationRoutingPolicy,omitempty" tf:"geolocation_routing_policy,omitempty"` + + // A block indicating a routing policy based on the geoproximity of the requestor. Conflicts with any other routing policy. Documented below. + // +kubebuilder:validation:Optional + GeoproximityRoutingPolicy *GeoproximityRoutingPolicyParameters `json:"geoproximityRoutingPolicy,omitempty" tf:"geoproximity_routing_policy,omitempty"` + + // The health check the record should be associated with. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/route53/v1beta1.HealthCheck + // +kubebuilder:validation:Optional + HealthCheckID *string `json:"healthCheckId,omitempty" tf:"health_check_id,omitempty"` + + // Reference to a HealthCheck in route53 to populate healthCheckId. + // +kubebuilder:validation:Optional + HealthCheckIDRef *v1.Reference `json:"healthCheckIdRef,omitempty" tf:"-"` + + // Selector for a HealthCheck in route53 to populate healthCheckId. + // +kubebuilder:validation:Optional + HealthCheckIDSelector *v1.Selector `json:"healthCheckIdSelector,omitempty" tf:"-"` + + // A block indicating a routing policy based on the latency between the requestor and an AWS region. Conflicts with any other routing policy. Documented below. + // +kubebuilder:validation:Optional + LatencyRoutingPolicy *LatencyRoutingPolicyParameters `json:"latencyRoutingPolicy,omitempty" tf:"latency_routing_policy,omitempty"` + + // Set to true to indicate a multivalue answer routing policy. Conflicts with any other routing policy. + // +kubebuilder:validation:Optional + MultivalueAnswerRoutingPolicy *bool `json:"multivalueAnswerRoutingPolicy,omitempty" tf:"multivalue_answer_routing_policy,omitempty"` + + // The name of the record. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A string list of records.g., "first255characters\"\"morecharacters"). + // +kubebuilder:validation:Optional + // +listType=set + Records []*string `json:"records,omitempty" tf:"records,omitempty"` + + // An AWS region from which to measure latency. See http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-latency + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Unique identifier to differentiate records with routing policies from one another. Required if using cidr_routing_policy, failover_routing_policy, geolocation_routing_policy,geoproximity_routing_policy, latency_routing_policy, multivalue_answer_routing_policy, or weighted_routing_policy. + // +kubebuilder:validation:Optional + SetIdentifier *string `json:"setIdentifier,omitempty" tf:"set_identifier,omitempty"` + + // The TTL of the record. + // +kubebuilder:validation:Optional + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // The record type. Valid values are A, AAAA, CAA, CNAME, DS, MX, NAPTR, NS, PTR, SOA, SPF, SRV and TXT. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A block indicating a weighted routing policy. Conflicts with any other routing policy. Documented below. + // +kubebuilder:validation:Optional + WeightedRoutingPolicy *WeightedRoutingPolicyParameters `json:"weightedRoutingPolicy,omitempty" tf:"weighted_routing_policy,omitempty"` + + // The ID of the hosted zone to contain this record. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/route53/v1beta1.Zone + // +kubebuilder:validation:Optional + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` + + // Reference to a Zone in route53 to populate zoneId. + // +kubebuilder:validation:Optional + ZoneIDRef *v1.Reference `json:"zoneIdRef,omitempty" tf:"-"` + + // Selector for a Zone in route53 to populate zoneId. + // +kubebuilder:validation:Optional + ZoneIDSelector *v1.Selector `json:"zoneIdSelector,omitempty" tf:"-"` +} + +type WeightedRoutingPolicyInitParameters struct { + + // A numeric value indicating the relative weight of the record. See http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-weighted. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type WeightedRoutingPolicyObservation struct { + + // A numeric value indicating the relative weight of the record. See http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-weighted. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type WeightedRoutingPolicyParameters struct { + + // A numeric value indicating the relative weight of the record. See http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-weighted. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight" tf:"weight,omitempty"` +} + +// RecordSpec defines the desired state of Record +type RecordSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RecordParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RecordInitParameters `json:"initProvider,omitempty"` +} + +// RecordStatus defines the observed state of Record. +type RecordStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RecordObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Record is the Schema for the Records API. Provides a Route53 record resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Record struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + Spec RecordSpec `json:"spec"` + Status RecordStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RecordList contains a list of Records +type RecordList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Record `json:"items"` +} + +// Repository type metadata. +var ( + Record_Kind = "Record" + Record_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Record_Kind}.String() + Record_KindAPIVersion = Record_Kind + "." + CRDGroupVersion.String() + Record_GroupVersionKind = CRDGroupVersion.WithKind(Record_Kind) +) + +func init() { + SchemeBuilder.Register(&Record{}, &RecordList{}) +} diff --git a/apis/route53recoverycontrolconfig/v1beta1/zz_generated.conversion_hubs.go b/apis/route53recoverycontrolconfig/v1beta1/zz_generated.conversion_hubs.go index 773bed14bd..b0a68915d8 100755 --- a/apis/route53recoverycontrolconfig/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/route53recoverycontrolconfig/v1beta1/zz_generated.conversion_hubs.go @@ -14,6 +14,3 @@ func (tr *ControlPanel) Hub() {} // Hub marks this type as a conversion hub. func (tr *RoutingControl) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SafetyRule) Hub() {} diff --git a/apis/route53recoverycontrolconfig/v1beta1/zz_generated.conversion_spokes.go b/apis/route53recoverycontrolconfig/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..ffe63bfcfe --- /dev/null +++ b/apis/route53recoverycontrolconfig/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this SafetyRule to the hub type. +func (tr *SafetyRule) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SafetyRule type. +func (tr *SafetyRule) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/route53recoverycontrolconfig/v1beta2/zz_generated.conversion_hubs.go b/apis/route53recoverycontrolconfig/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..819154a09b --- /dev/null +++ b/apis/route53recoverycontrolconfig/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *SafetyRule) Hub() {} diff --git a/apis/route53recoverycontrolconfig/v1beta2/zz_generated.deepcopy.go b/apis/route53recoverycontrolconfig/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..f1dcd189ec --- /dev/null +++ b/apis/route53recoverycontrolconfig/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,466 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleConfigInitParameters) DeepCopyInto(out *RuleConfigInitParameters) { + *out = *in + if in.Inverted != nil { + in, out := &in.Inverted, &out.Inverted + *out = new(bool) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleConfigInitParameters. +func (in *RuleConfigInitParameters) DeepCopy() *RuleConfigInitParameters { + if in == nil { + return nil + } + out := new(RuleConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleConfigObservation) DeepCopyInto(out *RuleConfigObservation) { + *out = *in + if in.Inverted != nil { + in, out := &in.Inverted, &out.Inverted + *out = new(bool) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleConfigObservation. +func (in *RuleConfigObservation) DeepCopy() *RuleConfigObservation { + if in == nil { + return nil + } + out := new(RuleConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleConfigParameters) DeepCopyInto(out *RuleConfigParameters) { + *out = *in + if in.Inverted != nil { + in, out := &in.Inverted, &out.Inverted + *out = new(bool) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleConfigParameters. +func (in *RuleConfigParameters) DeepCopy() *RuleConfigParameters { + if in == nil { + return nil + } + out := new(RuleConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SafetyRule) DeepCopyInto(out *SafetyRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SafetyRule. +func (in *SafetyRule) DeepCopy() *SafetyRule { + if in == nil { + return nil + } + out := new(SafetyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SafetyRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SafetyRuleInitParameters) DeepCopyInto(out *SafetyRuleInitParameters) { + *out = *in + if in.AssertedControls != nil { + in, out := &in.AssertedControls, &out.AssertedControls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AssertedControlsRefs != nil { + in, out := &in.AssertedControlsRefs, &out.AssertedControlsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AssertedControlsSelector != nil { + in, out := &in.AssertedControlsSelector, &out.AssertedControlsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ControlPanelArn != nil { + in, out := &in.ControlPanelArn, &out.ControlPanelArn + *out = new(string) + **out = **in + } + if in.ControlPanelArnRef != nil { + in, out := &in.ControlPanelArnRef, &out.ControlPanelArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ControlPanelArnSelector != nil { + in, out := &in.ControlPanelArnSelector, &out.ControlPanelArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.GatingControls != nil { + in, out := &in.GatingControls, &out.GatingControls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RuleConfig != nil { + in, out := &in.RuleConfig, &out.RuleConfig + *out = new(RuleConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetControls != nil { + in, out := &in.TargetControls, &out.TargetControls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WaitPeriodMs != nil { + in, out := &in.WaitPeriodMs, &out.WaitPeriodMs + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SafetyRuleInitParameters. +func (in *SafetyRuleInitParameters) DeepCopy() *SafetyRuleInitParameters { + if in == nil { + return nil + } + out := new(SafetyRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SafetyRuleList) DeepCopyInto(out *SafetyRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SafetyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SafetyRuleList. +func (in *SafetyRuleList) DeepCopy() *SafetyRuleList { + if in == nil { + return nil + } + out := new(SafetyRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SafetyRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SafetyRuleObservation) DeepCopyInto(out *SafetyRuleObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AssertedControls != nil { + in, out := &in.AssertedControls, &out.AssertedControls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ControlPanelArn != nil { + in, out := &in.ControlPanelArn, &out.ControlPanelArn + *out = new(string) + **out = **in + } + if in.GatingControls != nil { + in, out := &in.GatingControls, &out.GatingControls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RuleConfig != nil { + in, out := &in.RuleConfig, &out.RuleConfig + *out = new(RuleConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.TargetControls != nil { + in, out := &in.TargetControls, &out.TargetControls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WaitPeriodMs != nil { + in, out := &in.WaitPeriodMs, &out.WaitPeriodMs + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SafetyRuleObservation. +func (in *SafetyRuleObservation) DeepCopy() *SafetyRuleObservation { + if in == nil { + return nil + } + out := new(SafetyRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SafetyRuleParameters) DeepCopyInto(out *SafetyRuleParameters) { + *out = *in + if in.AssertedControls != nil { + in, out := &in.AssertedControls, &out.AssertedControls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AssertedControlsRefs != nil { + in, out := &in.AssertedControlsRefs, &out.AssertedControlsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AssertedControlsSelector != nil { + in, out := &in.AssertedControlsSelector, &out.AssertedControlsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ControlPanelArn != nil { + in, out := &in.ControlPanelArn, &out.ControlPanelArn + *out = new(string) + **out = **in + } + if in.ControlPanelArnRef != nil { + in, out := &in.ControlPanelArnRef, &out.ControlPanelArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ControlPanelArnSelector != nil { + in, out := &in.ControlPanelArnSelector, &out.ControlPanelArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.GatingControls != nil { + in, out := &in.GatingControls, &out.GatingControls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RuleConfig != nil { + in, out := &in.RuleConfig, &out.RuleConfig + *out = new(RuleConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetControls != nil { + in, out := &in.TargetControls, &out.TargetControls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WaitPeriodMs != nil { + in, out := &in.WaitPeriodMs, &out.WaitPeriodMs + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SafetyRuleParameters. +func (in *SafetyRuleParameters) DeepCopy() *SafetyRuleParameters { + if in == nil { + return nil + } + out := new(SafetyRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SafetyRuleSpec) DeepCopyInto(out *SafetyRuleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SafetyRuleSpec. +func (in *SafetyRuleSpec) DeepCopy() *SafetyRuleSpec { + if in == nil { + return nil + } + out := new(SafetyRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SafetyRuleStatus) DeepCopyInto(out *SafetyRuleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SafetyRuleStatus. +func (in *SafetyRuleStatus) DeepCopy() *SafetyRuleStatus { + if in == nil { + return nil + } + out := new(SafetyRuleStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/route53recoverycontrolconfig/v1beta2/zz_generated.managed.go b/apis/route53recoverycontrolconfig/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..046b3924fc --- /dev/null +++ b/apis/route53recoverycontrolconfig/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this SafetyRule. +func (mg *SafetyRule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SafetyRule. +func (mg *SafetyRule) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SafetyRule. +func (mg *SafetyRule) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SafetyRule. +func (mg *SafetyRule) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SafetyRule. +func (mg *SafetyRule) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SafetyRule. +func (mg *SafetyRule) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SafetyRule. +func (mg *SafetyRule) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SafetyRule. +func (mg *SafetyRule) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SafetyRule. +func (mg *SafetyRule) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SafetyRule. +func (mg *SafetyRule) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SafetyRule. +func (mg *SafetyRule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SafetyRule. +func (mg *SafetyRule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/route53recoverycontrolconfig/v1beta2/zz_generated.managedlist.go b/apis/route53recoverycontrolconfig/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..8643983a96 --- /dev/null +++ b/apis/route53recoverycontrolconfig/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this SafetyRuleList. +func (l *SafetyRuleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/route53recoverycontrolconfig/v1beta2/zz_generated.resolvers.go b/apis/route53recoverycontrolconfig/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..1be13a202e --- /dev/null +++ b/apis/route53recoverycontrolconfig/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,107 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *SafetyRule) ResolveReferences( // ResolveReferences of this SafetyRule. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("route53recoverycontrolconfig.aws.upbound.io", "v1beta1", "RoutingControl", "RoutingControlList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.AssertedControls), + Extract: common.TerraformID(), + References: mg.Spec.ForProvider.AssertedControlsRefs, + Selector: mg.Spec.ForProvider.AssertedControlsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AssertedControls") + } + mg.Spec.ForProvider.AssertedControls = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.AssertedControlsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("route53recoverycontrolconfig.aws.upbound.io", "v1beta1", "ControlPanel", "ControlPanelList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ControlPanelArn), + Extract: common.TerraformID(), + Reference: mg.Spec.ForProvider.ControlPanelArnRef, + Selector: mg.Spec.ForProvider.ControlPanelArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ControlPanelArn") + } + mg.Spec.ForProvider.ControlPanelArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ControlPanelArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("route53recoverycontrolconfig.aws.upbound.io", "v1beta1", "RoutingControl", "RoutingControlList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.AssertedControls), + Extract: common.TerraformID(), + References: mg.Spec.InitProvider.AssertedControlsRefs, + Selector: mg.Spec.InitProvider.AssertedControlsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AssertedControls") + } + mg.Spec.InitProvider.AssertedControls = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.AssertedControlsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("route53recoverycontrolconfig.aws.upbound.io", "v1beta1", "ControlPanel", "ControlPanelList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ControlPanelArn), + Extract: common.TerraformID(), + Reference: mg.Spec.InitProvider.ControlPanelArnRef, + Selector: mg.Spec.InitProvider.ControlPanelArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ControlPanelArn") + } + mg.Spec.InitProvider.ControlPanelArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ControlPanelArnRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/route53recoverycontrolconfig/v1beta2/zz_groupversion_info.go b/apis/route53recoverycontrolconfig/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..4e465738b6 --- /dev/null +++ b/apis/route53recoverycontrolconfig/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=route53recoverycontrolconfig.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "route53recoverycontrolconfig.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/route53recoverycontrolconfig/v1beta2/zz_safetyrule_terraformed.go b/apis/route53recoverycontrolconfig/v1beta2/zz_safetyrule_terraformed.go new file mode 100755 index 0000000000..61108e6eb4 --- /dev/null +++ b/apis/route53recoverycontrolconfig/v1beta2/zz_safetyrule_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SafetyRule +func (mg *SafetyRule) GetTerraformResourceType() string { + return "aws_route53recoverycontrolconfig_safety_rule" +} + +// GetConnectionDetailsMapping for this SafetyRule +func (tr *SafetyRule) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SafetyRule +func (tr *SafetyRule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SafetyRule +func (tr *SafetyRule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SafetyRule +func (tr *SafetyRule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SafetyRule +func (tr *SafetyRule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SafetyRule +func (tr *SafetyRule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SafetyRule +func (tr *SafetyRule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SafetyRule +func (tr *SafetyRule) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SafetyRule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SafetyRule) LateInitialize(attrs []byte) (bool, error) { + params := &SafetyRuleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SafetyRule) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/route53recoverycontrolconfig/v1beta2/zz_safetyrule_types.go b/apis/route53recoverycontrolconfig/v1beta2/zz_safetyrule_types.go new file mode 100755 index 0000000000..fd14197a00 --- /dev/null +++ b/apis/route53recoverycontrolconfig/v1beta2/zz_safetyrule_types.go @@ -0,0 +1,248 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type RuleConfigInitParameters struct { + + // Logical negation of the rule. + Inverted *bool `json:"inverted,omitempty" tf:"inverted,omitempty"` + + // Number of controls that must be set when you specify an ATLEAST type rule. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` + + // Rule type. Valid values are ATLEAST, AND, and OR. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RuleConfigObservation struct { + + // Logical negation of the rule. + Inverted *bool `json:"inverted,omitempty" tf:"inverted,omitempty"` + + // Number of controls that must be set when you specify an ATLEAST type rule. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` + + // Rule type. Valid values are ATLEAST, AND, and OR. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RuleConfigParameters struct { + + // Logical negation of the rule. + // +kubebuilder:validation:Optional + Inverted *bool `json:"inverted" tf:"inverted,omitempty"` + + // Number of controls that must be set when you specify an ATLEAST type rule. + // +kubebuilder:validation:Optional + Threshold *float64 `json:"threshold" tf:"threshold,omitempty"` + + // Rule type. Valid values are ATLEAST, AND, and OR. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type SafetyRuleInitParameters struct { + + // Routing controls that are part of transactions that are evaluated to determine if a request to change a routing control state is allowed. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/route53recoverycontrolconfig/v1beta1.RoutingControl + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() + AssertedControls []*string `json:"assertedControls,omitempty" tf:"asserted_controls,omitempty"` + + // References to RoutingControl in route53recoverycontrolconfig to populate assertedControls. + // +kubebuilder:validation:Optional + AssertedControlsRefs []v1.Reference `json:"assertedControlsRefs,omitempty" tf:"-"` + + // Selector for a list of RoutingControl in route53recoverycontrolconfig to populate assertedControls. + // +kubebuilder:validation:Optional + AssertedControlsSelector *v1.Selector `json:"assertedControlsSelector,omitempty" tf:"-"` + + // ARN of the control panel in which this safety rule will reside. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/route53recoverycontrolconfig/v1beta1.ControlPanel + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() + ControlPanelArn *string `json:"controlPanelArn,omitempty" tf:"control_panel_arn,omitempty"` + + // Reference to a ControlPanel in route53recoverycontrolconfig to populate controlPanelArn. + // +kubebuilder:validation:Optional + ControlPanelArnRef *v1.Reference `json:"controlPanelArnRef,omitempty" tf:"-"` + + // Selector for a ControlPanel in route53recoverycontrolconfig to populate controlPanelArn. + // +kubebuilder:validation:Optional + ControlPanelArnSelector *v1.Selector `json:"controlPanelArnSelector,omitempty" tf:"-"` + + // Gating controls for the new gating rule. That is, routing controls that are evaluated by the rule configuration that you specify. + GatingControls []*string `json:"gatingControls,omitempty" tf:"gating_controls,omitempty"` + + // Name describing the safety rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration block for safety rule criteria. See below. + RuleConfig *RuleConfigInitParameters `json:"ruleConfig,omitempty" tf:"rule_config,omitempty"` + + // Routing controls that can only be set or unset if the specified rule_config evaluates to true for the specified gating_controls. + TargetControls []*string `json:"targetControls,omitempty" tf:"target_controls,omitempty"` + + // Evaluation period, in milliseconds (ms), during which any request against the target routing controls will fail. + WaitPeriodMs *float64 `json:"waitPeriodMs,omitempty" tf:"wait_period_ms,omitempty"` +} + +type SafetyRuleObservation struct { + + // ARN of the safety rule. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Routing controls that are part of transactions that are evaluated to determine if a request to change a routing control state is allowed. + AssertedControls []*string `json:"assertedControls,omitempty" tf:"asserted_controls,omitempty"` + + // ARN of the control panel in which this safety rule will reside. + ControlPanelArn *string `json:"controlPanelArn,omitempty" tf:"control_panel_arn,omitempty"` + + // Gating controls for the new gating rule. That is, routing controls that are evaluated by the rule configuration that you specify. + GatingControls []*string `json:"gatingControls,omitempty" tf:"gating_controls,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name describing the safety rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration block for safety rule criteria. See below. + RuleConfig *RuleConfigObservation `json:"ruleConfig,omitempty" tf:"rule_config,omitempty"` + + // Status of the safety rule. PENDING when it is being created/updated, PENDING_DELETION when it is being deleted, and DEPLOYED otherwise. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Routing controls that can only be set or unset if the specified rule_config evaluates to true for the specified gating_controls. + TargetControls []*string `json:"targetControls,omitempty" tf:"target_controls,omitempty"` + + // Evaluation period, in milliseconds (ms), during which any request against the target routing controls will fail. + WaitPeriodMs *float64 `json:"waitPeriodMs,omitempty" tf:"wait_period_ms,omitempty"` +} + +type SafetyRuleParameters struct { + + // Routing controls that are part of transactions that are evaluated to determine if a request to change a routing control state is allowed. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/route53recoverycontrolconfig/v1beta1.RoutingControl + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() + // +kubebuilder:validation:Optional + AssertedControls []*string `json:"assertedControls,omitempty" tf:"asserted_controls,omitempty"` + + // References to RoutingControl in route53recoverycontrolconfig to populate assertedControls. + // +kubebuilder:validation:Optional + AssertedControlsRefs []v1.Reference `json:"assertedControlsRefs,omitempty" tf:"-"` + + // Selector for a list of RoutingControl in route53recoverycontrolconfig to populate assertedControls. + // +kubebuilder:validation:Optional + AssertedControlsSelector *v1.Selector `json:"assertedControlsSelector,omitempty" tf:"-"` + + // ARN of the control panel in which this safety rule will reside. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/route53recoverycontrolconfig/v1beta1.ControlPanel + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.TerraformID() + // +kubebuilder:validation:Optional + ControlPanelArn *string `json:"controlPanelArn,omitempty" tf:"control_panel_arn,omitempty"` + + // Reference to a ControlPanel in route53recoverycontrolconfig to populate controlPanelArn. + // +kubebuilder:validation:Optional + ControlPanelArnRef *v1.Reference `json:"controlPanelArnRef,omitempty" tf:"-"` + + // Selector for a ControlPanel in route53recoverycontrolconfig to populate controlPanelArn. + // +kubebuilder:validation:Optional + ControlPanelArnSelector *v1.Selector `json:"controlPanelArnSelector,omitempty" tf:"-"` + + // Gating controls for the new gating rule. That is, routing controls that are evaluated by the rule configuration that you specify. + // +kubebuilder:validation:Optional + GatingControls []*string `json:"gatingControls,omitempty" tf:"gating_controls,omitempty"` + + // Name describing the safety rule. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration block for safety rule criteria. See below. + // +kubebuilder:validation:Optional + RuleConfig *RuleConfigParameters `json:"ruleConfig,omitempty" tf:"rule_config,omitempty"` + + // Routing controls that can only be set or unset if the specified rule_config evaluates to true for the specified gating_controls. + // +kubebuilder:validation:Optional + TargetControls []*string `json:"targetControls,omitempty" tf:"target_controls,omitempty"` + + // Evaluation period, in milliseconds (ms), during which any request against the target routing controls will fail. + // +kubebuilder:validation:Optional + WaitPeriodMs *float64 `json:"waitPeriodMs,omitempty" tf:"wait_period_ms,omitempty"` +} + +// SafetyRuleSpec defines the desired state of SafetyRule +type SafetyRuleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SafetyRuleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SafetyRuleInitParameters `json:"initProvider,omitempty"` +} + +// SafetyRuleStatus defines the observed state of SafetyRule. +type SafetyRuleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SafetyRuleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SafetyRule is the Schema for the SafetyRules API. Provides an AWS Route 53 Recovery Control Config Safety Rule +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type SafetyRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.ruleConfig) || (has(self.initProvider) && has(self.initProvider.ruleConfig))",message="spec.forProvider.ruleConfig is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.waitPeriodMs) || (has(self.initProvider) && has(self.initProvider.waitPeriodMs))",message="spec.forProvider.waitPeriodMs is a required parameter" + Spec SafetyRuleSpec `json:"spec"` + Status SafetyRuleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SafetyRuleList contains a list of SafetyRules +type SafetyRuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SafetyRule `json:"items"` +} + +// Repository type metadata. +var ( + SafetyRule_Kind = "SafetyRule" + SafetyRule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SafetyRule_Kind}.String() + SafetyRule_KindAPIVersion = SafetyRule_Kind + "." + CRDGroupVersion.String() + SafetyRule_GroupVersionKind = CRDGroupVersion.WithKind(SafetyRule_Kind) +) + +func init() { + SchemeBuilder.Register(&SafetyRule{}, &SafetyRuleList{}) +} diff --git a/apis/route53recoveryreadiness/v1beta1/zz_generated.conversion_hubs.go b/apis/route53recoveryreadiness/v1beta1/zz_generated.conversion_hubs.go index 2f8dcc420d..b8c039c00d 100755 --- a/apis/route53recoveryreadiness/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/route53recoveryreadiness/v1beta1/zz_generated.conversion_hubs.go @@ -14,6 +14,3 @@ func (tr *ReadinessCheck) Hub() {} // Hub marks this type as a conversion hub. func (tr *RecoveryGroup) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ResourceSet) Hub() {} diff --git a/apis/route53recoveryreadiness/v1beta1/zz_generated.conversion_spokes.go b/apis/route53recoveryreadiness/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..0e675f61a5 --- /dev/null +++ b/apis/route53recoveryreadiness/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ResourceSet to the hub type. +func (tr *ResourceSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ResourceSet type. +func (tr *ResourceSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/route53recoveryreadiness/v1beta2/zz_generated.conversion_hubs.go b/apis/route53recoveryreadiness/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..5fd10a516b --- /dev/null +++ b/apis/route53recoveryreadiness/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ResourceSet) Hub() {} diff --git a/apis/route53recoveryreadiness/v1beta2/zz_generated.deepcopy.go b/apis/route53recoveryreadiness/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..d95c342083 --- /dev/null +++ b/apis/route53recoveryreadiness/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,731 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSTargetResourceInitParameters) DeepCopyInto(out *DNSTargetResourceInitParameters) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.HostedZoneArn != nil { + in, out := &in.HostedZoneArn, &out.HostedZoneArn + *out = new(string) + **out = **in + } + if in.RecordSetID != nil { + in, out := &in.RecordSetID, &out.RecordSetID + *out = new(string) + **out = **in + } + if in.RecordType != nil { + in, out := &in.RecordType, &out.RecordType + *out = new(string) + **out = **in + } + if in.TargetResource != nil { + in, out := &in.TargetResource, &out.TargetResource + *out = new(TargetResourceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSTargetResourceInitParameters. +func (in *DNSTargetResourceInitParameters) DeepCopy() *DNSTargetResourceInitParameters { + if in == nil { + return nil + } + out := new(DNSTargetResourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSTargetResourceObservation) DeepCopyInto(out *DNSTargetResourceObservation) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.HostedZoneArn != nil { + in, out := &in.HostedZoneArn, &out.HostedZoneArn + *out = new(string) + **out = **in + } + if in.RecordSetID != nil { + in, out := &in.RecordSetID, &out.RecordSetID + *out = new(string) + **out = **in + } + if in.RecordType != nil { + in, out := &in.RecordType, &out.RecordType + *out = new(string) + **out = **in + } + if in.TargetResource != nil { + in, out := &in.TargetResource, &out.TargetResource + *out = new(TargetResourceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSTargetResourceObservation. +func (in *DNSTargetResourceObservation) DeepCopy() *DNSTargetResourceObservation { + if in == nil { + return nil + } + out := new(DNSTargetResourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSTargetResourceParameters) DeepCopyInto(out *DNSTargetResourceParameters) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.HostedZoneArn != nil { + in, out := &in.HostedZoneArn, &out.HostedZoneArn + *out = new(string) + **out = **in + } + if in.RecordSetID != nil { + in, out := &in.RecordSetID, &out.RecordSetID + *out = new(string) + **out = **in + } + if in.RecordType != nil { + in, out := &in.RecordType, &out.RecordType + *out = new(string) + **out = **in + } + if in.TargetResource != nil { + in, out := &in.TargetResource, &out.TargetResource + *out = new(TargetResourceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSTargetResourceParameters. +func (in *DNSTargetResourceParameters) DeepCopy() *DNSTargetResourceParameters { + if in == nil { + return nil + } + out := new(DNSTargetResourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NlbResourceInitParameters) DeepCopyInto(out *NlbResourceInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NlbResourceInitParameters. +func (in *NlbResourceInitParameters) DeepCopy() *NlbResourceInitParameters { + if in == nil { + return nil + } + out := new(NlbResourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NlbResourceObservation) DeepCopyInto(out *NlbResourceObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NlbResourceObservation. +func (in *NlbResourceObservation) DeepCopy() *NlbResourceObservation { + if in == nil { + return nil + } + out := new(NlbResourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NlbResourceParameters) DeepCopyInto(out *NlbResourceParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NlbResourceParameters. +func (in *NlbResourceParameters) DeepCopy() *NlbResourceParameters { + if in == nil { + return nil + } + out := new(NlbResourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *R53ResourceInitParameters) DeepCopyInto(out *R53ResourceInitParameters) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.RecordSetID != nil { + in, out := &in.RecordSetID, &out.RecordSetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new R53ResourceInitParameters. +func (in *R53ResourceInitParameters) DeepCopy() *R53ResourceInitParameters { + if in == nil { + return nil + } + out := new(R53ResourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *R53ResourceObservation) DeepCopyInto(out *R53ResourceObservation) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.RecordSetID != nil { + in, out := &in.RecordSetID, &out.RecordSetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new R53ResourceObservation. +func (in *R53ResourceObservation) DeepCopy() *R53ResourceObservation { + if in == nil { + return nil + } + out := new(R53ResourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *R53ResourceParameters) DeepCopyInto(out *R53ResourceParameters) { + *out = *in + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.RecordSetID != nil { + in, out := &in.RecordSetID, &out.RecordSetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new R53ResourceParameters. +func (in *R53ResourceParameters) DeepCopy() *R53ResourceParameters { + if in == nil { + return nil + } + out := new(R53ResourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSet) DeepCopyInto(out *ResourceSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSet. +func (in *ResourceSet) DeepCopy() *ResourceSet { + if in == nil { + return nil + } + out := new(ResourceSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSetInitParameters) DeepCopyInto(out *ResourceSetInitParameters) { + *out = *in + if in.ResourceSetType != nil { + in, out := &in.ResourceSetType, &out.ResourceSetType + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSetInitParameters. +func (in *ResourceSetInitParameters) DeepCopy() *ResourceSetInitParameters { + if in == nil { + return nil + } + out := new(ResourceSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSetList) DeepCopyInto(out *ResourceSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSetList. +func (in *ResourceSetList) DeepCopy() *ResourceSetList { + if in == nil { + return nil + } + out := new(ResourceSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSetObservation) DeepCopyInto(out *ResourceSetObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ResourceSetType != nil { + in, out := &in.ResourceSetType, &out.ResourceSetType + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSetObservation. +func (in *ResourceSetObservation) DeepCopy() *ResourceSetObservation { + if in == nil { + return nil + } + out := new(ResourceSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSetParameters) DeepCopyInto(out *ResourceSetParameters) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ResourceSetType != nil { + in, out := &in.ResourceSetType, &out.ResourceSetType + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSetParameters. +func (in *ResourceSetParameters) DeepCopy() *ResourceSetParameters { + if in == nil { + return nil + } + out := new(ResourceSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSetSpec) DeepCopyInto(out *ResourceSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSetSpec. +func (in *ResourceSetSpec) DeepCopy() *ResourceSetSpec { + if in == nil { + return nil + } + out := new(ResourceSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSetStatus) DeepCopyInto(out *ResourceSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSetStatus. +func (in *ResourceSetStatus) DeepCopy() *ResourceSetStatus { + if in == nil { + return nil + } + out := new(ResourceSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesInitParameters) DeepCopyInto(out *ResourcesInitParameters) { + *out = *in + if in.DNSTargetResource != nil { + in, out := &in.DNSTargetResource, &out.DNSTargetResource + *out = new(DNSTargetResourceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ReadinessScopes != nil { + in, out := &in.ReadinessScopes, &out.ReadinessScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.ResourceArnRef != nil { + in, out := &in.ResourceArnRef, &out.ResourceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceArnSelector != nil { + in, out := &in.ResourceArnSelector, &out.ResourceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesInitParameters. +func (in *ResourcesInitParameters) DeepCopy() *ResourcesInitParameters { + if in == nil { + return nil + } + out := new(ResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesObservation) DeepCopyInto(out *ResourcesObservation) { + *out = *in + if in.ComponentID != nil { + in, out := &in.ComponentID, &out.ComponentID + *out = new(string) + **out = **in + } + if in.DNSTargetResource != nil { + in, out := &in.DNSTargetResource, &out.DNSTargetResource + *out = new(DNSTargetResourceObservation) + (*in).DeepCopyInto(*out) + } + if in.ReadinessScopes != nil { + in, out := &in.ReadinessScopes, &out.ReadinessScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesObservation. +func (in *ResourcesObservation) DeepCopy() *ResourcesObservation { + if in == nil { + return nil + } + out := new(ResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesParameters) DeepCopyInto(out *ResourcesParameters) { + *out = *in + if in.DNSTargetResource != nil { + in, out := &in.DNSTargetResource, &out.DNSTargetResource + *out = new(DNSTargetResourceParameters) + (*in).DeepCopyInto(*out) + } + if in.ReadinessScopes != nil { + in, out := &in.ReadinessScopes, &out.ReadinessScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceArn != nil { + in, out := &in.ResourceArn, &out.ResourceArn + *out = new(string) + **out = **in + } + if in.ResourceArnRef != nil { + in, out := &in.ResourceArnRef, &out.ResourceArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceArnSelector != nil { + in, out := &in.ResourceArnSelector, &out.ResourceArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesParameters. +func (in *ResourcesParameters) DeepCopy() *ResourcesParameters { + if in == nil { + return nil + } + out := new(ResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetResourceInitParameters) DeepCopyInto(out *TargetResourceInitParameters) { + *out = *in + if in.NlbResource != nil { + in, out := &in.NlbResource, &out.NlbResource + *out = new(NlbResourceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.R53Resource != nil { + in, out := &in.R53Resource, &out.R53Resource + *out = new(R53ResourceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetResourceInitParameters. +func (in *TargetResourceInitParameters) DeepCopy() *TargetResourceInitParameters { + if in == nil { + return nil + } + out := new(TargetResourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetResourceObservation) DeepCopyInto(out *TargetResourceObservation) { + *out = *in + if in.NlbResource != nil { + in, out := &in.NlbResource, &out.NlbResource + *out = new(NlbResourceObservation) + (*in).DeepCopyInto(*out) + } + if in.R53Resource != nil { + in, out := &in.R53Resource, &out.R53Resource + *out = new(R53ResourceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetResourceObservation. +func (in *TargetResourceObservation) DeepCopy() *TargetResourceObservation { + if in == nil { + return nil + } + out := new(TargetResourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetResourceParameters) DeepCopyInto(out *TargetResourceParameters) { + *out = *in + if in.NlbResource != nil { + in, out := &in.NlbResource, &out.NlbResource + *out = new(NlbResourceParameters) + (*in).DeepCopyInto(*out) + } + if in.R53Resource != nil { + in, out := &in.R53Resource, &out.R53Resource + *out = new(R53ResourceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetResourceParameters. +func (in *TargetResourceParameters) DeepCopy() *TargetResourceParameters { + if in == nil { + return nil + } + out := new(TargetResourceParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/route53recoveryreadiness/v1beta2/zz_generated.managed.go b/apis/route53recoveryreadiness/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..920dffdc1d --- /dev/null +++ b/apis/route53recoveryreadiness/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ResourceSet. +func (mg *ResourceSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ResourceSet. +func (mg *ResourceSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ResourceSet. +func (mg *ResourceSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ResourceSet. +func (mg *ResourceSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ResourceSet. +func (mg *ResourceSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ResourceSet. +func (mg *ResourceSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ResourceSet. +func (mg *ResourceSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ResourceSet. +func (mg *ResourceSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ResourceSet. +func (mg *ResourceSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ResourceSet. +func (mg *ResourceSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ResourceSet. +func (mg *ResourceSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ResourceSet. +func (mg *ResourceSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/route53recoveryreadiness/v1beta2/zz_generated.managedlist.go b/apis/route53recoveryreadiness/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..ccaef4d46e --- /dev/null +++ b/apis/route53recoveryreadiness/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ResourceSetList. +func (l *ResourceSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/route53recoveryreadiness/v1beta2/zz_generated.resolvers.go b/apis/route53recoveryreadiness/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..97a6d36756 --- /dev/null +++ b/apis/route53recoveryreadiness/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,73 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *ResourceSet) ResolveReferences( // ResolveReferences of this ResourceSet. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Resources); i3++ { + { + m, l, err = apisresolver.GetManagedResource("cloudwatch.aws.upbound.io", "v1beta2", "MetricAlarm", "MetricAlarmList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Resources[i3].ResourceArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Resources[i3].ResourceArnRef, + Selector: mg.Spec.ForProvider.Resources[i3].ResourceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Resources[i3].ResourceArn") + } + mg.Spec.ForProvider.Resources[i3].ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Resources[i3].ResourceArnRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Resources); i3++ { + { + m, l, err = apisresolver.GetManagedResource("cloudwatch.aws.upbound.io", "v1beta2", "MetricAlarm", "MetricAlarmList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Resources[i3].ResourceArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Resources[i3].ResourceArnRef, + Selector: mg.Spec.InitProvider.Resources[i3].ResourceArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Resources[i3].ResourceArn") + } + mg.Spec.InitProvider.Resources[i3].ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Resources[i3].ResourceArnRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/route53recoveryreadiness/v1beta2/zz_groupversion_info.go b/apis/route53recoveryreadiness/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..a390459050 --- /dev/null +++ b/apis/route53recoveryreadiness/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=route53recoveryreadiness.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "route53recoveryreadiness.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/route53recoveryreadiness/v1beta2/zz_resourceset_terraformed.go b/apis/route53recoveryreadiness/v1beta2/zz_resourceset_terraformed.go new file mode 100755 index 0000000000..323907417b --- /dev/null +++ b/apis/route53recoveryreadiness/v1beta2/zz_resourceset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ResourceSet +func (mg *ResourceSet) GetTerraformResourceType() string { + return "aws_route53recoveryreadiness_resource_set" +} + +// GetConnectionDetailsMapping for this ResourceSet +func (tr *ResourceSet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ResourceSet +func (tr *ResourceSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ResourceSet +func (tr *ResourceSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ResourceSet +func (tr *ResourceSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ResourceSet +func (tr *ResourceSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ResourceSet +func (tr *ResourceSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ResourceSet +func (tr *ResourceSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ResourceSet +func (tr *ResourceSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ResourceSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ResourceSet) LateInitialize(attrs []byte) (bool, error) { + params := &ResourceSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ResourceSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/route53recoveryreadiness/v1beta2/zz_resourceset_types.go b/apis/route53recoveryreadiness/v1beta2/zz_resourceset_types.go new file mode 100755 index 0000000000..0382796ccb --- /dev/null +++ b/apis/route53recoveryreadiness/v1beta2/zz_resourceset_types.go @@ -0,0 +1,330 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DNSTargetResourceInitParameters struct { + + // DNS Name that acts as the ingress point to a portion of application. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Hosted Zone ARN that contains the DNS record with the provided name of target resource. + HostedZoneArn *string `json:"hostedZoneArn,omitempty" tf:"hosted_zone_arn,omitempty"` + + // Route53 record set id to uniquely identify a record given a domain_name and a record_type. + RecordSetID *string `json:"recordSetId,omitempty" tf:"record_set_id,omitempty"` + + // Type of DNS Record of target resource. + RecordType *string `json:"recordType,omitempty" tf:"record_type,omitempty"` + + // Target resource the R53 record specified with the above params points to. + TargetResource *TargetResourceInitParameters `json:"targetResource,omitempty" tf:"target_resource,omitempty"` +} + +type DNSTargetResourceObservation struct { + + // DNS Name that acts as the ingress point to a portion of application. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Hosted Zone ARN that contains the DNS record with the provided name of target resource. + HostedZoneArn *string `json:"hostedZoneArn,omitempty" tf:"hosted_zone_arn,omitempty"` + + // Route53 record set id to uniquely identify a record given a domain_name and a record_type. + RecordSetID *string `json:"recordSetId,omitempty" tf:"record_set_id,omitempty"` + + // Type of DNS Record of target resource. + RecordType *string `json:"recordType,omitempty" tf:"record_type,omitempty"` + + // Target resource the R53 record specified with the above params points to. + TargetResource *TargetResourceObservation `json:"targetResource,omitempty" tf:"target_resource,omitempty"` +} + +type DNSTargetResourceParameters struct { + + // DNS Name that acts as the ingress point to a portion of application. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName" tf:"domain_name,omitempty"` + + // Hosted Zone ARN that contains the DNS record with the provided name of target resource. + // +kubebuilder:validation:Optional + HostedZoneArn *string `json:"hostedZoneArn,omitempty" tf:"hosted_zone_arn,omitempty"` + + // Route53 record set id to uniquely identify a record given a domain_name and a record_type. + // +kubebuilder:validation:Optional + RecordSetID *string `json:"recordSetId,omitempty" tf:"record_set_id,omitempty"` + + // Type of DNS Record of target resource. + // +kubebuilder:validation:Optional + RecordType *string `json:"recordType,omitempty" tf:"record_type,omitempty"` + + // Target resource the R53 record specified with the above params points to. + // +kubebuilder:validation:Optional + TargetResource *TargetResourceParameters `json:"targetResource,omitempty" tf:"target_resource,omitempty"` +} + +type NlbResourceInitParameters struct { + + // NLB resource ARN. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` +} + +type NlbResourceObservation struct { + + // NLB resource ARN. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` +} + +type NlbResourceParameters struct { + + // NLB resource ARN. + // +kubebuilder:validation:Optional + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` +} + +type R53ResourceInitParameters struct { + + // Domain name that is targeted. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Resource record set ID that is targeted. + RecordSetID *string `json:"recordSetId,omitempty" tf:"record_set_id,omitempty"` +} + +type R53ResourceObservation struct { + + // Domain name that is targeted. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Resource record set ID that is targeted. + RecordSetID *string `json:"recordSetId,omitempty" tf:"record_set_id,omitempty"` +} + +type R53ResourceParameters struct { + + // Domain name that is targeted. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Resource record set ID that is targeted. + // +kubebuilder:validation:Optional + RecordSetID *string `json:"recordSetId,omitempty" tf:"record_set_id,omitempty"` +} + +type ResourceSetInitParameters struct { + + // Type of the resources in the resource set. + ResourceSetType *string `json:"resourceSetType,omitempty" tf:"resource_set_type,omitempty"` + + // List of resources to add to this resource set. See below. + Resources []ResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ResourceSetObservation struct { + + // ARN of the resource set + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Type of the resources in the resource set. + ResourceSetType *string `json:"resourceSetType,omitempty" tf:"resource_set_type,omitempty"` + + // List of resources to add to this resource set. See below. + Resources []ResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type ResourceSetParameters struct { + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Type of the resources in the resource set. + // +kubebuilder:validation:Optional + ResourceSetType *string `json:"resourceSetType,omitempty" tf:"resource_set_type,omitempty"` + + // List of resources to add to this resource set. See below. + // +kubebuilder:validation:Optional + Resources []ResourcesParameters `json:"resources,omitempty" tf:"resources,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ResourcesInitParameters struct { + + // Component for DNS/Routing Control Readiness Checks. + DNSTargetResource *DNSTargetResourceInitParameters `json:"dnsTargetResource,omitempty" tf:"dns_target_resource,omitempty"` + + // Recovery group ARN or cell ARN that contains this resource set. + ReadinessScopes []*string `json:"readinessScopes,omitempty" tf:"readiness_scopes,omitempty"` + + // ARN of the resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatch/v1beta2.MetricAlarm + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // Reference to a MetricAlarm in cloudwatch to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnRef *v1.Reference `json:"resourceArnRef,omitempty" tf:"-"` + + // Selector for a MetricAlarm in cloudwatch to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnSelector *v1.Selector `json:"resourceArnSelector,omitempty" tf:"-"` +} + +type ResourcesObservation struct { + + // Unique identified for DNS Target Resources, use for readiness checks. + ComponentID *string `json:"componentId,omitempty" tf:"component_id,omitempty"` + + // Component for DNS/Routing Control Readiness Checks. + DNSTargetResource *DNSTargetResourceObservation `json:"dnsTargetResource,omitempty" tf:"dns_target_resource,omitempty"` + + // Recovery group ARN or cell ARN that contains this resource set. + ReadinessScopes []*string `json:"readinessScopes,omitempty" tf:"readiness_scopes,omitempty"` + + // ARN of the resource. + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` +} + +type ResourcesParameters struct { + + // Component for DNS/Routing Control Readiness Checks. + // +kubebuilder:validation:Optional + DNSTargetResource *DNSTargetResourceParameters `json:"dnsTargetResource,omitempty" tf:"dns_target_resource,omitempty"` + + // Recovery group ARN or cell ARN that contains this resource set. + // +kubebuilder:validation:Optional + ReadinessScopes []*string `json:"readinessScopes,omitempty" tf:"readiness_scopes,omitempty"` + + // ARN of the resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cloudwatch/v1beta2.MetricAlarm + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` + + // Reference to a MetricAlarm in cloudwatch to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnRef *v1.Reference `json:"resourceArnRef,omitempty" tf:"-"` + + // Selector for a MetricAlarm in cloudwatch to populate resourceArn. + // +kubebuilder:validation:Optional + ResourceArnSelector *v1.Selector `json:"resourceArnSelector,omitempty" tf:"-"` +} + +type TargetResourceInitParameters struct { + + // NLB resource a DNS Target Resource points to. Required if r53_resource is not set. + NlbResource *NlbResourceInitParameters `json:"nlbResource,omitempty" tf:"nlb_resource,omitempty"` + + // Route53 resource a DNS Target Resource record points to. + R53Resource *R53ResourceInitParameters `json:"r53Resource,omitempty" tf:"r53_resource,omitempty"` +} + +type TargetResourceObservation struct { + + // NLB resource a DNS Target Resource points to. Required if r53_resource is not set. + NlbResource *NlbResourceObservation `json:"nlbResource,omitempty" tf:"nlb_resource,omitempty"` + + // Route53 resource a DNS Target Resource record points to. + R53Resource *R53ResourceObservation `json:"r53Resource,omitempty" tf:"r53_resource,omitempty"` +} + +type TargetResourceParameters struct { + + // NLB resource a DNS Target Resource points to. Required if r53_resource is not set. + // +kubebuilder:validation:Optional + NlbResource *NlbResourceParameters `json:"nlbResource,omitempty" tf:"nlb_resource,omitempty"` + + // Route53 resource a DNS Target Resource record points to. + // +kubebuilder:validation:Optional + R53Resource *R53ResourceParameters `json:"r53Resource,omitempty" tf:"r53_resource,omitempty"` +} + +// ResourceSetSpec defines the desired state of ResourceSet +type ResourceSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ResourceSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ResourceSetInitParameters `json:"initProvider,omitempty"` +} + +// ResourceSetStatus defines the observed state of ResourceSet. +type ResourceSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ResourceSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ResourceSet is the Schema for the ResourceSets API. Provides an AWS Route 53 Recovery Readiness Resource Set +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ResourceSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.resourceSetType) || (has(self.initProvider) && has(self.initProvider.resourceSetType))",message="spec.forProvider.resourceSetType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.resources) || (has(self.initProvider) && has(self.initProvider.resources))",message="spec.forProvider.resources is a required parameter" + Spec ResourceSetSpec `json:"spec"` + Status ResourceSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ResourceSetList contains a list of ResourceSets +type ResourceSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ResourceSet `json:"items"` +} + +// Repository type metadata. +var ( + ResourceSet_Kind = "ResourceSet" + ResourceSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ResourceSet_Kind}.String() + ResourceSet_KindAPIVersion = ResourceSet_Kind + "." + CRDGroupVersion.String() + ResourceSet_GroupVersionKind = CRDGroupVersion.WithKind(ResourceSet_Kind) +) + +func init() { + SchemeBuilder.Register(&ResourceSet{}, &ResourceSetList{}) +} diff --git a/apis/rum/v1beta1/zz_generated.conversion_hubs.go b/apis/rum/v1beta1/zz_generated.conversion_hubs.go index 369b53522e..771335b110 100755 --- a/apis/rum/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/rum/v1beta1/zz_generated.conversion_hubs.go @@ -6,8 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *AppMonitor) Hub() {} - // Hub marks this type as a conversion hub. func (tr *MetricsDestination) Hub() {} diff --git a/apis/rum/v1beta1/zz_generated.conversion_spokes.go b/apis/rum/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..789a0bb4ce --- /dev/null +++ b/apis/rum/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this AppMonitor to the hub type. +func (tr *AppMonitor) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the AppMonitor type. +func (tr *AppMonitor) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/rum/v1beta1/zz_generated.resolvers.go b/apis/rum/v1beta1/zz_generated.resolvers.go index a0dd9ffa06..782cead499 100644 --- a/apis/rum/v1beta1/zz_generated.resolvers.go +++ b/apis/rum/v1beta1/zz_generated.resolvers.go @@ -26,7 +26,7 @@ func (mg *MetricsDestination) ResolveReferences( // ResolveReferences of this Me var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("rum.aws.upbound.io", "v1beta1", "AppMonitor", "AppMonitorList") + m, l, err = apisresolver.GetManagedResource("rum.aws.upbound.io", "v1beta2", "AppMonitor", "AppMonitorList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -64,7 +64,7 @@ func (mg *MetricsDestination) ResolveReferences( // ResolveReferences of this Me mg.Spec.ForProvider.IAMRoleArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.IAMRoleArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("rum.aws.upbound.io", "v1beta1", "AppMonitor", "AppMonitorList") + m, l, err = apisresolver.GetManagedResource("rum.aws.upbound.io", "v1beta2", "AppMonitor", "AppMonitorList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/rum/v1beta1/zz_metricsdestination_types.go b/apis/rum/v1beta1/zz_metricsdestination_types.go index f81493e509..0651e5485d 100755 --- a/apis/rum/v1beta1/zz_metricsdestination_types.go +++ b/apis/rum/v1beta1/zz_metricsdestination_types.go @@ -16,7 +16,7 @@ import ( type MetricsDestinationInitParameters struct { // The name of the CloudWatch RUM app monitor that will send the metrics. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rum/v1beta1.AppMonitor + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rum/v1beta2.AppMonitor AppMonitorName *string `json:"appMonitorName,omitempty" tf:"app_monitor_name,omitempty"` // Reference to a AppMonitor in rum to populate appMonitorName. @@ -68,7 +68,7 @@ type MetricsDestinationObservation struct { type MetricsDestinationParameters struct { // The name of the CloudWatch RUM app monitor that will send the metrics. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rum/v1beta1.AppMonitor + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/rum/v1beta2.AppMonitor // +kubebuilder:validation:Optional AppMonitorName *string `json:"appMonitorName,omitempty" tf:"app_monitor_name,omitempty"` diff --git a/apis/rum/v1beta2/zz_appmonitor_terraformed.go b/apis/rum/v1beta2/zz_appmonitor_terraformed.go new file mode 100755 index 0000000000..90078edd53 --- /dev/null +++ b/apis/rum/v1beta2/zz_appmonitor_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AppMonitor +func (mg *AppMonitor) GetTerraformResourceType() string { + return "aws_rum_app_monitor" +} + +// GetConnectionDetailsMapping for this AppMonitor +func (tr *AppMonitor) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this AppMonitor +func (tr *AppMonitor) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AppMonitor +func (tr *AppMonitor) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AppMonitor +func (tr *AppMonitor) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AppMonitor +func (tr *AppMonitor) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AppMonitor +func (tr *AppMonitor) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this AppMonitor +func (tr *AppMonitor) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this AppMonitor +func (tr *AppMonitor) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this AppMonitor using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AppMonitor) LateInitialize(attrs []byte) (bool, error) { + params := &AppMonitorParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AppMonitor) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/rum/v1beta2/zz_appmonitor_types.go b/apis/rum/v1beta2/zz_appmonitor_types.go new file mode 100755 index 0000000000..307e80e1de --- /dev/null +++ b/apis/rum/v1beta2/zz_appmonitor_types.go @@ -0,0 +1,288 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AppMonitorConfigurationInitParameters struct { + + // If you set this to true, RUM web client sets two cookies, a session cookie and a user cookie. The cookies allow the RUM web client to collect data relating to the number of users an application has and the behavior of the application across a sequence of events. Cookies are stored in the top-level domain of the current page. + AllowCookies *bool `json:"allowCookies,omitempty" tf:"allow_cookies,omitempty"` + + // If you set this to true, RUM enables X-Ray tracing for the user sessions that RUM samples. RUM adds an X-Ray trace header to allowed HTTP requests. It also records an X-Ray segment for allowed HTTP requests. + EnableXray *bool `json:"enableXray,omitempty" tf:"enable_xray,omitempty"` + + // A list of URLs in your website or application to exclude from RUM data collection. + // +listType=set + ExcludedPages []*string `json:"excludedPages,omitempty" tf:"excluded_pages,omitempty"` + + // A list of pages in the CloudWatch RUM console that are to be displayed with a "favorite" icon. + // +listType=set + FavoritePages []*string `json:"favoritePages,omitempty" tf:"favorite_pages,omitempty"` + + // The ARN of the guest IAM role that is attached to the Amazon Cognito identity pool that is used to authorize the sending of data to RUM. + GuestRoleArn *string `json:"guestRoleArn,omitempty" tf:"guest_role_arn,omitempty"` + + // The ID of the Amazon Cognito identity pool that is used to authorize the sending of data to RUM. + IdentityPoolID *string `json:"identityPoolId,omitempty" tf:"identity_pool_id,omitempty"` + + // If this app monitor is to collect data from only certain pages in your application, this structure lists those pages. + // +listType=set + IncludedPages []*string `json:"includedPages,omitempty" tf:"included_pages,omitempty"` + + // Specifies the percentage of user sessions to use for RUM data collection. Choosing a higher percentage gives you more data but also incurs more costs. The number you specify is the percentage of user sessions that will be used. Default value is 0.1. + SessionSampleRate *float64 `json:"sessionSampleRate,omitempty" tf:"session_sample_rate,omitempty"` + + // An array that lists the types of telemetry data that this app monitor is to collect. Valid values are errors, performance, and http. + // +listType=set + Telemetries []*string `json:"telemetries,omitempty" tf:"telemetries,omitempty"` +} + +type AppMonitorConfigurationObservation struct { + + // If you set this to true, RUM web client sets two cookies, a session cookie and a user cookie. The cookies allow the RUM web client to collect data relating to the number of users an application has and the behavior of the application across a sequence of events. Cookies are stored in the top-level domain of the current page. + AllowCookies *bool `json:"allowCookies,omitempty" tf:"allow_cookies,omitempty"` + + // If you set this to true, RUM enables X-Ray tracing for the user sessions that RUM samples. RUM adds an X-Ray trace header to allowed HTTP requests. It also records an X-Ray segment for allowed HTTP requests. + EnableXray *bool `json:"enableXray,omitempty" tf:"enable_xray,omitempty"` + + // A list of URLs in your website or application to exclude from RUM data collection. + // +listType=set + ExcludedPages []*string `json:"excludedPages,omitempty" tf:"excluded_pages,omitempty"` + + // A list of pages in the CloudWatch RUM console that are to be displayed with a "favorite" icon. + // +listType=set + FavoritePages []*string `json:"favoritePages,omitempty" tf:"favorite_pages,omitempty"` + + // The ARN of the guest IAM role that is attached to the Amazon Cognito identity pool that is used to authorize the sending of data to RUM. + GuestRoleArn *string `json:"guestRoleArn,omitempty" tf:"guest_role_arn,omitempty"` + + // The ID of the Amazon Cognito identity pool that is used to authorize the sending of data to RUM. + IdentityPoolID *string `json:"identityPoolId,omitempty" tf:"identity_pool_id,omitempty"` + + // If this app monitor is to collect data from only certain pages in your application, this structure lists those pages. + // +listType=set + IncludedPages []*string `json:"includedPages,omitempty" tf:"included_pages,omitempty"` + + // Specifies the percentage of user sessions to use for RUM data collection. Choosing a higher percentage gives you more data but also incurs more costs. The number you specify is the percentage of user sessions that will be used. Default value is 0.1. + SessionSampleRate *float64 `json:"sessionSampleRate,omitempty" tf:"session_sample_rate,omitempty"` + + // An array that lists the types of telemetry data that this app monitor is to collect. Valid values are errors, performance, and http. + // +listType=set + Telemetries []*string `json:"telemetries,omitempty" tf:"telemetries,omitempty"` +} + +type AppMonitorConfigurationParameters struct { + + // If you set this to true, RUM web client sets two cookies, a session cookie and a user cookie. The cookies allow the RUM web client to collect data relating to the number of users an application has and the behavior of the application across a sequence of events. Cookies are stored in the top-level domain of the current page. + // +kubebuilder:validation:Optional + AllowCookies *bool `json:"allowCookies,omitempty" tf:"allow_cookies,omitempty"` + + // If you set this to true, RUM enables X-Ray tracing for the user sessions that RUM samples. RUM adds an X-Ray trace header to allowed HTTP requests. It also records an X-Ray segment for allowed HTTP requests. + // +kubebuilder:validation:Optional + EnableXray *bool `json:"enableXray,omitempty" tf:"enable_xray,omitempty"` + + // A list of URLs in your website or application to exclude from RUM data collection. + // +kubebuilder:validation:Optional + // +listType=set + ExcludedPages []*string `json:"excludedPages,omitempty" tf:"excluded_pages,omitempty"` + + // A list of pages in the CloudWatch RUM console that are to be displayed with a "favorite" icon. + // +kubebuilder:validation:Optional + // +listType=set + FavoritePages []*string `json:"favoritePages,omitempty" tf:"favorite_pages,omitempty"` + + // The ARN of the guest IAM role that is attached to the Amazon Cognito identity pool that is used to authorize the sending of data to RUM. + // +kubebuilder:validation:Optional + GuestRoleArn *string `json:"guestRoleArn,omitempty" tf:"guest_role_arn,omitempty"` + + // The ID of the Amazon Cognito identity pool that is used to authorize the sending of data to RUM. + // +kubebuilder:validation:Optional + IdentityPoolID *string `json:"identityPoolId,omitempty" tf:"identity_pool_id,omitempty"` + + // If this app monitor is to collect data from only certain pages in your application, this structure lists those pages. + // +kubebuilder:validation:Optional + // +listType=set + IncludedPages []*string `json:"includedPages,omitempty" tf:"included_pages,omitempty"` + + // Specifies the percentage of user sessions to use for RUM data collection. Choosing a higher percentage gives you more data but also incurs more costs. The number you specify is the percentage of user sessions that will be used. Default value is 0.1. + // +kubebuilder:validation:Optional + SessionSampleRate *float64 `json:"sessionSampleRate,omitempty" tf:"session_sample_rate,omitempty"` + + // An array that lists the types of telemetry data that this app monitor is to collect. Valid values are errors, performance, and http. + // +kubebuilder:validation:Optional + // +listType=set + Telemetries []*string `json:"telemetries,omitempty" tf:"telemetries,omitempty"` +} + +type AppMonitorInitParameters struct { + + // configuration data for the app monitor. See app_monitor_configuration below. + AppMonitorConfiguration *AppMonitorConfigurationInitParameters `json:"appMonitorConfiguration,omitempty" tf:"app_monitor_configuration,omitempty"` + + // Specifies whether this app monitor allows the web client to define and send custom events. If you omit this parameter, custom events are DISABLED. See custom_events below. + CustomEvents *CustomEventsInitParameters `json:"customEvents,omitempty" tf:"custom_events,omitempty"` + + // Data collected by RUM is kept by RUM for 30 days and then deleted. This parameter specifies whether RUM sends a copy of this telemetry data to Amazon CloudWatch Logs in your account. This enables you to keep the telemetry data for more than 30 days, but it does incur Amazon CloudWatch Logs charges. Default value is false. + CwLogEnabled *bool `json:"cwLogEnabled,omitempty" tf:"cw_log_enabled,omitempty"` + + // The top-level internet domain name for which your application has administrative authority. + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AppMonitorObservation struct { + + // configuration data for the app monitor. See app_monitor_configuration below. + AppMonitorConfiguration *AppMonitorConfigurationObservation `json:"appMonitorConfiguration,omitempty" tf:"app_monitor_configuration,omitempty"` + + // The unique ID of the app monitor. Useful for JS templates. + AppMonitorID *string `json:"appMonitorId,omitempty" tf:"app_monitor_id,omitempty"` + + // The Amazon Resource Name (ARN) specifying the app monitor. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Specifies whether this app monitor allows the web client to define and send custom events. If you omit this parameter, custom events are DISABLED. See custom_events below. + CustomEvents *CustomEventsObservation `json:"customEvents,omitempty" tf:"custom_events,omitempty"` + + // Data collected by RUM is kept by RUM for 30 days and then deleted. This parameter specifies whether RUM sends a copy of this telemetry data to Amazon CloudWatch Logs in your account. This enables you to keep the telemetry data for more than 30 days, but it does incur Amazon CloudWatch Logs charges. Default value is false. + CwLogEnabled *bool `json:"cwLogEnabled,omitempty" tf:"cw_log_enabled,omitempty"` + + // The name of the log group where the copies are stored. + CwLogGroup *string `json:"cwLogGroup,omitempty" tf:"cw_log_group,omitempty"` + + // The top-level internet domain name for which your application has administrative authority. + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // The CloudWatch RUM name as it is the identifier of a RUM. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type AppMonitorParameters struct { + + // configuration data for the app monitor. See app_monitor_configuration below. + // +kubebuilder:validation:Optional + AppMonitorConfiguration *AppMonitorConfigurationParameters `json:"appMonitorConfiguration,omitempty" tf:"app_monitor_configuration,omitempty"` + + // Specifies whether this app monitor allows the web client to define and send custom events. If you omit this parameter, custom events are DISABLED. See custom_events below. + // +kubebuilder:validation:Optional + CustomEvents *CustomEventsParameters `json:"customEvents,omitempty" tf:"custom_events,omitempty"` + + // Data collected by RUM is kept by RUM for 30 days and then deleted. This parameter specifies whether RUM sends a copy of this telemetry data to Amazon CloudWatch Logs in your account. This enables you to keep the telemetry data for more than 30 days, but it does incur Amazon CloudWatch Logs charges. Default value is false. + // +kubebuilder:validation:Optional + CwLogEnabled *bool `json:"cwLogEnabled,omitempty" tf:"cw_log_enabled,omitempty"` + + // The top-level internet domain name for which your application has administrative authority. + // +kubebuilder:validation:Optional + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type CustomEventsInitParameters struct { + + // Specifies whether this app monitor allows the web client to define and send custom events. The default is for custom events to be DISABLED. Valid values are DISABLED and ENABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type CustomEventsObservation struct { + + // Specifies whether this app monitor allows the web client to define and send custom events. The default is for custom events to be DISABLED. Valid values are DISABLED and ENABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type CustomEventsParameters struct { + + // Specifies whether this app monitor allows the web client to define and send custom events. The default is for custom events to be DISABLED. Valid values are DISABLED and ENABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +// AppMonitorSpec defines the desired state of AppMonitor +type AppMonitorSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AppMonitorParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AppMonitorInitParameters `json:"initProvider,omitempty"` +} + +// AppMonitorStatus defines the observed state of AppMonitor. +type AppMonitorStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AppMonitorObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// AppMonitor is the Schema for the AppMonitors API. Provides a CloudWatch RUM App Monitor resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type AppMonitor struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.domain) || (has(self.initProvider) && has(self.initProvider.domain))",message="spec.forProvider.domain is a required parameter" + Spec AppMonitorSpec `json:"spec"` + Status AppMonitorStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AppMonitorList contains a list of AppMonitors +type AppMonitorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AppMonitor `json:"items"` +} + +// Repository type metadata. +var ( + AppMonitor_Kind = "AppMonitor" + AppMonitor_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AppMonitor_Kind}.String() + AppMonitor_KindAPIVersion = AppMonitor_Kind + "." + CRDGroupVersion.String() + AppMonitor_GroupVersionKind = CRDGroupVersion.WithKind(AppMonitor_Kind) +) + +func init() { + SchemeBuilder.Register(&AppMonitor{}, &AppMonitorList{}) +} diff --git a/apis/rum/v1beta2/zz_generated.conversion_hubs.go b/apis/rum/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..4cfccf17f1 --- /dev/null +++ b/apis/rum/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *AppMonitor) Hub() {} diff --git a/apis/rum/v1beta2/zz_generated.deepcopy.go b/apis/rum/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..2512762bbe --- /dev/null +++ b/apis/rum/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,613 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppMonitor) DeepCopyInto(out *AppMonitor) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppMonitor. +func (in *AppMonitor) DeepCopy() *AppMonitor { + if in == nil { + return nil + } + out := new(AppMonitor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppMonitor) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppMonitorConfigurationInitParameters) DeepCopyInto(out *AppMonitorConfigurationInitParameters) { + *out = *in + if in.AllowCookies != nil { + in, out := &in.AllowCookies, &out.AllowCookies + *out = new(bool) + **out = **in + } + if in.EnableXray != nil { + in, out := &in.EnableXray, &out.EnableXray + *out = new(bool) + **out = **in + } + if in.ExcludedPages != nil { + in, out := &in.ExcludedPages, &out.ExcludedPages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FavoritePages != nil { + in, out := &in.FavoritePages, &out.FavoritePages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.GuestRoleArn != nil { + in, out := &in.GuestRoleArn, &out.GuestRoleArn + *out = new(string) + **out = **in + } + if in.IdentityPoolID != nil { + in, out := &in.IdentityPoolID, &out.IdentityPoolID + *out = new(string) + **out = **in + } + if in.IncludedPages != nil { + in, out := &in.IncludedPages, &out.IncludedPages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SessionSampleRate != nil { + in, out := &in.SessionSampleRate, &out.SessionSampleRate + *out = new(float64) + **out = **in + } + if in.Telemetries != nil { + in, out := &in.Telemetries, &out.Telemetries + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppMonitorConfigurationInitParameters. +func (in *AppMonitorConfigurationInitParameters) DeepCopy() *AppMonitorConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AppMonitorConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppMonitorConfigurationObservation) DeepCopyInto(out *AppMonitorConfigurationObservation) { + *out = *in + if in.AllowCookies != nil { + in, out := &in.AllowCookies, &out.AllowCookies + *out = new(bool) + **out = **in + } + if in.EnableXray != nil { + in, out := &in.EnableXray, &out.EnableXray + *out = new(bool) + **out = **in + } + if in.ExcludedPages != nil { + in, out := &in.ExcludedPages, &out.ExcludedPages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FavoritePages != nil { + in, out := &in.FavoritePages, &out.FavoritePages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.GuestRoleArn != nil { + in, out := &in.GuestRoleArn, &out.GuestRoleArn + *out = new(string) + **out = **in + } + if in.IdentityPoolID != nil { + in, out := &in.IdentityPoolID, &out.IdentityPoolID + *out = new(string) + **out = **in + } + if in.IncludedPages != nil { + in, out := &in.IncludedPages, &out.IncludedPages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SessionSampleRate != nil { + in, out := &in.SessionSampleRate, &out.SessionSampleRate + *out = new(float64) + **out = **in + } + if in.Telemetries != nil { + in, out := &in.Telemetries, &out.Telemetries + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppMonitorConfigurationObservation. +func (in *AppMonitorConfigurationObservation) DeepCopy() *AppMonitorConfigurationObservation { + if in == nil { + return nil + } + out := new(AppMonitorConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppMonitorConfigurationParameters) DeepCopyInto(out *AppMonitorConfigurationParameters) { + *out = *in + if in.AllowCookies != nil { + in, out := &in.AllowCookies, &out.AllowCookies + *out = new(bool) + **out = **in + } + if in.EnableXray != nil { + in, out := &in.EnableXray, &out.EnableXray + *out = new(bool) + **out = **in + } + if in.ExcludedPages != nil { + in, out := &in.ExcludedPages, &out.ExcludedPages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FavoritePages != nil { + in, out := &in.FavoritePages, &out.FavoritePages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.GuestRoleArn != nil { + in, out := &in.GuestRoleArn, &out.GuestRoleArn + *out = new(string) + **out = **in + } + if in.IdentityPoolID != nil { + in, out := &in.IdentityPoolID, &out.IdentityPoolID + *out = new(string) + **out = **in + } + if in.IncludedPages != nil { + in, out := &in.IncludedPages, &out.IncludedPages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SessionSampleRate != nil { + in, out := &in.SessionSampleRate, &out.SessionSampleRate + *out = new(float64) + **out = **in + } + if in.Telemetries != nil { + in, out := &in.Telemetries, &out.Telemetries + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppMonitorConfigurationParameters. +func (in *AppMonitorConfigurationParameters) DeepCopy() *AppMonitorConfigurationParameters { + if in == nil { + return nil + } + out := new(AppMonitorConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppMonitorInitParameters) DeepCopyInto(out *AppMonitorInitParameters) { + *out = *in + if in.AppMonitorConfiguration != nil { + in, out := &in.AppMonitorConfiguration, &out.AppMonitorConfiguration + *out = new(AppMonitorConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomEvents != nil { + in, out := &in.CustomEvents, &out.CustomEvents + *out = new(CustomEventsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CwLogEnabled != nil { + in, out := &in.CwLogEnabled, &out.CwLogEnabled + *out = new(bool) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppMonitorInitParameters. +func (in *AppMonitorInitParameters) DeepCopy() *AppMonitorInitParameters { + if in == nil { + return nil + } + out := new(AppMonitorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppMonitorList) DeepCopyInto(out *AppMonitorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AppMonitor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppMonitorList. +func (in *AppMonitorList) DeepCopy() *AppMonitorList { + if in == nil { + return nil + } + out := new(AppMonitorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppMonitorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppMonitorObservation) DeepCopyInto(out *AppMonitorObservation) { + *out = *in + if in.AppMonitorConfiguration != nil { + in, out := &in.AppMonitorConfiguration, &out.AppMonitorConfiguration + *out = new(AppMonitorConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.AppMonitorID != nil { + in, out := &in.AppMonitorID, &out.AppMonitorID + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CustomEvents != nil { + in, out := &in.CustomEvents, &out.CustomEvents + *out = new(CustomEventsObservation) + (*in).DeepCopyInto(*out) + } + if in.CwLogEnabled != nil { + in, out := &in.CwLogEnabled, &out.CwLogEnabled + *out = new(bool) + **out = **in + } + if in.CwLogGroup != nil { + in, out := &in.CwLogGroup, &out.CwLogGroup + *out = new(string) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppMonitorObservation. +func (in *AppMonitorObservation) DeepCopy() *AppMonitorObservation { + if in == nil { + return nil + } + out := new(AppMonitorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppMonitorParameters) DeepCopyInto(out *AppMonitorParameters) { + *out = *in + if in.AppMonitorConfiguration != nil { + in, out := &in.AppMonitorConfiguration, &out.AppMonitorConfiguration + *out = new(AppMonitorConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomEvents != nil { + in, out := &in.CustomEvents, &out.CustomEvents + *out = new(CustomEventsParameters) + (*in).DeepCopyInto(*out) + } + if in.CwLogEnabled != nil { + in, out := &in.CwLogEnabled, &out.CwLogEnabled + *out = new(bool) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppMonitorParameters. +func (in *AppMonitorParameters) DeepCopy() *AppMonitorParameters { + if in == nil { + return nil + } + out := new(AppMonitorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppMonitorSpec) DeepCopyInto(out *AppMonitorSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppMonitorSpec. +func (in *AppMonitorSpec) DeepCopy() *AppMonitorSpec { + if in == nil { + return nil + } + out := new(AppMonitorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppMonitorStatus) DeepCopyInto(out *AppMonitorStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppMonitorStatus. +func (in *AppMonitorStatus) DeepCopy() *AppMonitorStatus { + if in == nil { + return nil + } + out := new(AppMonitorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomEventsInitParameters) DeepCopyInto(out *CustomEventsInitParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomEventsInitParameters. +func (in *CustomEventsInitParameters) DeepCopy() *CustomEventsInitParameters { + if in == nil { + return nil + } + out := new(CustomEventsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomEventsObservation) DeepCopyInto(out *CustomEventsObservation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomEventsObservation. +func (in *CustomEventsObservation) DeepCopy() *CustomEventsObservation { + if in == nil { + return nil + } + out := new(CustomEventsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomEventsParameters) DeepCopyInto(out *CustomEventsParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomEventsParameters. +func (in *CustomEventsParameters) DeepCopy() *CustomEventsParameters { + if in == nil { + return nil + } + out := new(CustomEventsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/rum/v1beta2/zz_generated.managed.go b/apis/rum/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..e760a72bd5 --- /dev/null +++ b/apis/rum/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this AppMonitor. +func (mg *AppMonitor) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this AppMonitor. +func (mg *AppMonitor) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this AppMonitor. +func (mg *AppMonitor) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this AppMonitor. +func (mg *AppMonitor) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this AppMonitor. +func (mg *AppMonitor) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this AppMonitor. +func (mg *AppMonitor) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this AppMonitor. +func (mg *AppMonitor) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this AppMonitor. +func (mg *AppMonitor) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this AppMonitor. +func (mg *AppMonitor) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this AppMonitor. +func (mg *AppMonitor) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this AppMonitor. +func (mg *AppMonitor) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this AppMonitor. +func (mg *AppMonitor) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/rum/v1beta2/zz_generated.managedlist.go b/apis/rum/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..5703f2b3be --- /dev/null +++ b/apis/rum/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AppMonitorList. +func (l *AppMonitorList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/rum/v1beta2/zz_groupversion_info.go b/apis/rum/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..f0a3d846b8 --- /dev/null +++ b/apis/rum/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=rum.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "rum.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/s3/v1beta1/zz_bucketaccelerateconfiguration_types.go b/apis/s3/v1beta1/zz_bucketaccelerateconfiguration_types.go index 6f8e3e1b57..a52ba11aea 100755 --- a/apis/s3/v1beta1/zz_bucketaccelerateconfiguration_types.go +++ b/apis/s3/v1beta1/zz_bucketaccelerateconfiguration_types.go @@ -16,7 +16,7 @@ import ( type BucketAccelerateConfigurationInitParameters struct { // Name of the bucket. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` @@ -53,7 +53,7 @@ type BucketAccelerateConfigurationObservation struct { type BucketAccelerateConfigurationParameters struct { // Name of the bucket. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` diff --git a/apis/s3/v1beta1/zz_bucketcorsconfiguration_types.go b/apis/s3/v1beta1/zz_bucketcorsconfiguration_types.go index d601158a79..b94bfb1cb5 100755 --- a/apis/s3/v1beta1/zz_bucketcorsconfiguration_types.go +++ b/apis/s3/v1beta1/zz_bucketcorsconfiguration_types.go @@ -97,7 +97,7 @@ type BucketCorsConfigurationCorsRuleParameters struct { type BucketCorsConfigurationInitParameters struct { // Name of the bucket. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` @@ -134,7 +134,7 @@ type BucketCorsConfigurationObservation struct { type BucketCorsConfigurationParameters struct { // Name of the bucket. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` diff --git a/apis/s3/v1beta1/zz_bucketnotification_types.go b/apis/s3/v1beta1/zz_bucketnotification_types.go index 0d00377b10..48b53ac2ec 100755 --- a/apis/s3/v1beta1/zz_bucketnotification_types.go +++ b/apis/s3/v1beta1/zz_bucketnotification_types.go @@ -16,7 +16,7 @@ import ( type BucketNotificationInitParameters struct { // Name of the bucket for notification configuration. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` @@ -65,7 +65,7 @@ type BucketNotificationObservation struct { type BucketNotificationParameters struct { // Name of the bucket for notification configuration. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` diff --git a/apis/s3/v1beta1/zz_bucketobject_types.go b/apis/s3/v1beta1/zz_bucketobject_types.go index f3054edc74..d1f58bfeda 100755 --- a/apis/s3/v1beta1/zz_bucketobject_types.go +++ b/apis/s3/v1beta1/zz_bucketobject_types.go @@ -19,7 +19,7 @@ type BucketObjectInitParameters struct { ACL *string `json:"acl,omitempty" tf:"acl,omitempty"` // Name of the bucket to put the file in. Alternatively, an S3 access point ARN can be specified. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` @@ -205,7 +205,7 @@ type BucketObjectParameters struct { ACL *string `json:"acl,omitempty" tf:"acl,omitempty"` // Name of the bucket to put the file in. Alternatively, an S3 access point ARN can be specified. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` diff --git a/apis/s3/v1beta1/zz_bucketpolicy_types.go b/apis/s3/v1beta1/zz_bucketpolicy_types.go index 5a0d1f41ad..ea91f76b98 100755 --- a/apis/s3/v1beta1/zz_bucketpolicy_types.go +++ b/apis/s3/v1beta1/zz_bucketpolicy_types.go @@ -16,7 +16,7 @@ import ( type BucketPolicyInitParameters struct { // Name of the bucket to which to apply the policy. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` @@ -46,7 +46,7 @@ type BucketPolicyObservation struct { type BucketPolicyParameters struct { // Name of the bucket to which to apply the policy. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` diff --git a/apis/s3/v1beta1/zz_bucketpublicaccessblock_types.go b/apis/s3/v1beta1/zz_bucketpublicaccessblock_types.go index e85857fe09..f2f70f3e0d 100755 --- a/apis/s3/v1beta1/zz_bucketpublicaccessblock_types.go +++ b/apis/s3/v1beta1/zz_bucketpublicaccessblock_types.go @@ -22,7 +22,7 @@ type BucketPublicAccessBlockInitParameters struct { BlockPublicPolicy *bool `json:"blockPublicPolicy,omitempty" tf:"block_public_policy,omitempty"` // S3 Bucket to which this Public Access Block configuration should be applied. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` @@ -73,7 +73,7 @@ type BucketPublicAccessBlockParameters struct { BlockPublicPolicy *bool `json:"blockPublicPolicy,omitempty" tf:"block_public_policy,omitempty"` // S3 Bucket to which this Public Access Block configuration should be applied. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` diff --git a/apis/s3/v1beta1/zz_bucketrequestpaymentconfiguration_types.go b/apis/s3/v1beta1/zz_bucketrequestpaymentconfiguration_types.go index eef0d5450a..b6e7738a9e 100755 --- a/apis/s3/v1beta1/zz_bucketrequestpaymentconfiguration_types.go +++ b/apis/s3/v1beta1/zz_bucketrequestpaymentconfiguration_types.go @@ -16,7 +16,7 @@ import ( type BucketRequestPaymentConfigurationInitParameters struct { // Name of the bucket. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` @@ -53,7 +53,7 @@ type BucketRequestPaymentConfigurationObservation struct { type BucketRequestPaymentConfigurationParameters struct { // Name of the bucket. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` diff --git a/apis/s3/v1beta1/zz_generated.conversion_hubs.go b/apis/s3/v1beta1/zz_generated.conversion_hubs.go index 264c325df3..9aac1c64fb 100755 --- a/apis/s3/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/s3/v1beta1/zz_generated.conversion_hubs.go @@ -6,71 +6,26 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Bucket) Hub() {} - // Hub marks this type as a conversion hub. func (tr *BucketAccelerateConfiguration) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *BucketACL) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *BucketAnalyticsConfiguration) Hub() {} - // Hub marks this type as a conversion hub. func (tr *BucketCorsConfiguration) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *BucketIntelligentTieringConfiguration) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *BucketInventory) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *BucketLifecycleConfiguration) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *BucketLogging) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *BucketMetric) Hub() {} - // Hub marks this type as a conversion hub. func (tr *BucketNotification) Hub() {} // Hub marks this type as a conversion hub. func (tr *BucketObject) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *BucketObjectLockConfiguration) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *BucketOwnershipControls) Hub() {} - // Hub marks this type as a conversion hub. func (tr *BucketPolicy) Hub() {} // Hub marks this type as a conversion hub. func (tr *BucketPublicAccessBlock) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *BucketReplicationConfiguration) Hub() {} - // Hub marks this type as a conversion hub. func (tr *BucketRequestPaymentConfiguration) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *BucketServerSideEncryptionConfiguration) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *BucketVersioning) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *BucketWebsiteConfiguration) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Object) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ObjectCopy) Hub() {} diff --git a/apis/s3/v1beta1/zz_generated.conversion_spokes.go b/apis/s3/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..21889478da --- /dev/null +++ b/apis/s3/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,314 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Bucket to the hub type. +func (tr *Bucket) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Bucket type. +func (tr *Bucket) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BucketACL to the hub type. +func (tr *BucketACL) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BucketACL type. +func (tr *BucketACL) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BucketAnalyticsConfiguration to the hub type. +func (tr *BucketAnalyticsConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BucketAnalyticsConfiguration type. +func (tr *BucketAnalyticsConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BucketIntelligentTieringConfiguration to the hub type. +func (tr *BucketIntelligentTieringConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BucketIntelligentTieringConfiguration type. +func (tr *BucketIntelligentTieringConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BucketInventory to the hub type. +func (tr *BucketInventory) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BucketInventory type. +func (tr *BucketInventory) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BucketLifecycleConfiguration to the hub type. +func (tr *BucketLifecycleConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BucketLifecycleConfiguration type. +func (tr *BucketLifecycleConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BucketLogging to the hub type. +func (tr *BucketLogging) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BucketLogging type. +func (tr *BucketLogging) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BucketMetric to the hub type. +func (tr *BucketMetric) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BucketMetric type. +func (tr *BucketMetric) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BucketObjectLockConfiguration to the hub type. +func (tr *BucketObjectLockConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BucketObjectLockConfiguration type. +func (tr *BucketObjectLockConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BucketOwnershipControls to the hub type. +func (tr *BucketOwnershipControls) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BucketOwnershipControls type. +func (tr *BucketOwnershipControls) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BucketReplicationConfiguration to the hub type. +func (tr *BucketReplicationConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BucketReplicationConfiguration type. +func (tr *BucketReplicationConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BucketServerSideEncryptionConfiguration to the hub type. +func (tr *BucketServerSideEncryptionConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BucketServerSideEncryptionConfiguration type. +func (tr *BucketServerSideEncryptionConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BucketVersioning to the hub type. +func (tr *BucketVersioning) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BucketVersioning type. +func (tr *BucketVersioning) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BucketWebsiteConfiguration to the hub type. +func (tr *BucketWebsiteConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BucketWebsiteConfiguration type. +func (tr *BucketWebsiteConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Object to the hub type. +func (tr *Object) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Object type. +func (tr *Object) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/s3/v1beta1/zz_generated.resolvers.go b/apis/s3/v1beta1/zz_generated.resolvers.go index b16375ea5a..8f050deb89 100644 --- a/apis/s3/v1beta1/zz_generated.resolvers.go +++ b/apis/s3/v1beta1/zz_generated.resolvers.go @@ -76,7 +76,7 @@ func (mg *BucketAccelerateConfiguration) ResolveReferences(ctx context.Context, var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -95,7 +95,7 @@ func (mg *BucketAccelerateConfiguration) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -231,7 +231,7 @@ func (mg *BucketCorsConfiguration) ResolveReferences(ctx context.Context, c clie var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -250,7 +250,7 @@ func (mg *BucketCorsConfiguration) ResolveReferences(ctx context.Context, c clie mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -659,7 +659,7 @@ func (mg *BucketNotification) ResolveReferences(ctx context.Context, c client.Re var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -721,7 +721,7 @@ func (mg *BucketNotification) ResolveReferences(ctx context.Context, c client.Re } { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -794,7 +794,7 @@ func (mg *BucketObject) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -832,7 +832,7 @@ func (mg *BucketObject) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -982,7 +982,7 @@ func (mg *BucketPolicy) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1001,7 +1001,7 @@ func (mg *BucketPolicy) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1032,7 +1032,7 @@ func (mg *BucketPublicAccessBlock) ResolveReferences(ctx context.Context, c clie var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1051,7 +1051,7 @@ func (mg *BucketPublicAccessBlock) ResolveReferences(ctx context.Context, c clie mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1267,7 +1267,7 @@ func (mg *BucketRequestPaymentConfiguration) ResolveReferences(ctx context.Conte var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1286,7 +1286,7 @@ func (mg *BucketRequestPaymentConfiguration) ResolveReferences(ctx context.Conte mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/s3/v1beta2/zz_bucket_terraformed.go b/apis/s3/v1beta2/zz_bucket_terraformed.go new file mode 100755 index 0000000000..cbf6976853 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucket_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Bucket +func (mg *Bucket) GetTerraformResourceType() string { + return "aws_s3_bucket" +} + +// GetConnectionDetailsMapping for this Bucket +func (tr *Bucket) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Bucket +func (tr *Bucket) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Bucket +func (tr *Bucket) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Bucket +func (tr *Bucket) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Bucket +func (tr *Bucket) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Bucket +func (tr *Bucket) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Bucket +func (tr *Bucket) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Bucket +func (tr *Bucket) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Bucket using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Bucket) LateInitialize(attrs []byte) (bool, error) { + params := &BucketParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Bucket) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3/v1beta2/zz_bucket_types.go b/apis/s3/v1beta2/zz_bucket_types.go new file mode 100755 index 0000000000..0c7cf54074 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucket_types.go @@ -0,0 +1,649 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessControlTranslationInitParameters struct { +} + +type AccessControlTranslationObservation struct { + + // Specifies the replica ownership. For default and valid values, see PUT bucket replication in the Amazon S3 API Reference. The only valid value is Destination. + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` +} + +type AccessControlTranslationParameters struct { +} + +type ApplyServerSideEncryptionByDefaultInitParameters struct { +} + +type ApplyServerSideEncryptionByDefaultObservation struct { + + // AWS KMS master key ID used for the SSE-KMS encryption. This can only be used when you set the value of sse_algorithm as aws:kms. The default aws/s3 AWS KMS master key is used if this element is absent while the sse_algorithm is aws:kms. + KMSMasterKeyID *string `json:"kmsMasterKeyId,omitempty" tf:"kms_master_key_id,omitempty"` + + // Server-side encryption algorithm to use. Valid values are AES256 and aws:kms + SseAlgorithm *string `json:"sseAlgorithm,omitempty" tf:"sse_algorithm,omitempty"` +} + +type ApplyServerSideEncryptionByDefaultParameters struct { +} + +type BucketInitParameters struct { + + // Boolean that indicates all objects (including any locked objects) should be deleted from the bucket when the bucket is destroyed so that the bucket can be destroyed without error. These objects are not recoverable. This only deletes objects when the bucket is destroyed, not when setting this parameter to true. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // Indicates whether this bucket has an Object Lock configuration enabled. Valid values are true or false. This argument is not supported in all regions or partitions. + ObjectLockEnabled *bool `json:"objectLockEnabled,omitempty" tf:"object_lock_enabled,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type BucketObservation struct { + + // The canned ACL to apply. Valid values are private, public-read, public-read-write, aws-exec-read, authenticated-read, and log-delivery-write. Defaults to private. Conflicts with grant. Use the resource aws_s3_bucket_acl instead. + ACL *string `json:"acl,omitempty" tf:"acl,omitempty"` + + // Sets the accelerate configuration of an existing bucket. Can be Enabled or Suspended. Cannot be used in cn-north-1 or us-gov-west-1. + // Use the resource aws_s3_bucket_accelerate_configuration instead. + AccelerationStatus *string `json:"accelerationStatus,omitempty" tf:"acceleration_status,omitempty"` + + // ARN of the bucket. Will be of format arn:aws:s3:::bucketname. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Bucket domain name. Will be of format bucketname.s3.amazonaws.com. + BucketDomainName *string `json:"bucketDomainName,omitempty" tf:"bucket_domain_name,omitempty"` + + // The bucket region-specific domain name. The bucket domain name including the region name. Please refer to the S3 endpoints reference for format. Note: AWS CloudFront allows specifying an S3 region-specific endpoint when creating an S3 origin. This will prevent redirect issues from CloudFront to the S3 Origin URL. For more information, see the Virtual Hosted-Style Requests for Other Regions section in the AWS S3 User Guide. + BucketRegionalDomainName *string `json:"bucketRegionalDomainName,omitempty" tf:"bucket_regional_domain_name,omitempty"` + + // Rule of Cross-Origin Resource Sharing. See CORS rule below for details. Use the resource aws_s3_bucket_cors_configuration instead. + CorsRule []CorsRuleObservation `json:"corsRule,omitempty" tf:"cors_rule,omitempty"` + + // Boolean that indicates all objects (including any locked objects) should be deleted from the bucket when the bucket is destroyed so that the bucket can be destroyed without error. These objects are not recoverable. This only deletes objects when the bucket is destroyed, not when setting this parameter to true. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // An ACL policy grant. See Grant below for details. Conflicts with acl. Use the resource aws_s3_bucket_acl instead. + Grant []GrantObservation `json:"grant,omitempty" tf:"grant,omitempty"` + + // Route 53 Hosted Zone ID for this bucket's region. + HostedZoneID *string `json:"hostedZoneId,omitempty" tf:"hosted_zone_id,omitempty"` + + // Name of the bucket. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration of object lifecycle management. See Lifecycle Rule below for details. + // Use the resource aws_s3_bucket_lifecycle_configuration instead. + LifecycleRule []LifecycleRuleObservation `json:"lifecycleRule,omitempty" tf:"lifecycle_rule,omitempty"` + + // Configuration of S3 bucket logging parameters. See Logging below for details. + // Use the resource aws_s3_bucket_logging instead. + Logging *LoggingObservation `json:"logging,omitempty" tf:"logging,omitempty"` + + // Configuration of S3 object locking. See Object Lock Configuration below for details. + // Use the object_lock_enabled parameter and the resource aws_s3_bucket_object_lock_configuration instead. + ObjectLockConfiguration *ObjectLockConfigurationObservation `json:"objectLockConfiguration,omitempty" tf:"object_lock_configuration,omitempty"` + + // Indicates whether this bucket has an Object Lock configuration enabled. Valid values are true or false. This argument is not supported in all regions or partitions. + ObjectLockEnabled *bool `json:"objectLockEnabled,omitempty" tf:"object_lock_enabled,omitempty"` + + // Valid bucket policy JSON document. In this case, please make sure you use the verbose/specific version of the policy. + // Use the resource aws_s3_bucket_policy instead. + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` + + // AWS region this bucket resides in. + // Region is the region you'd like your resource to be created in. + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // Configuration of replication configuration. See Replication Configuration below for details. + // Use the resource aws_s3_bucket_replication_configuration instead. + ReplicationConfiguration *ReplicationConfigurationObservation `json:"replicationConfiguration,omitempty" tf:"replication_configuration,omitempty"` + + // Specifies who should bear the cost of Amazon S3 data transfer. + // Can be either BucketOwner or Requester. By default, the owner of the S3 bucket would incur the costs of any data transfer. + // See Requester Pays Buckets developer guide for more information. + // Use the resource aws_s3_bucket_request_payment_configuration instead. + RequestPayer *string `json:"requestPayer,omitempty" tf:"request_payer,omitempty"` + + // Configuration of server-side encryption configuration. See Server Side Encryption Configuration below for details. + // Use the resource aws_s3_bucket_server_side_encryption_configuration instead. + ServerSideEncryptionConfiguration *ServerSideEncryptionConfigurationObservation `json:"serverSideEncryptionConfiguration,omitempty" tf:"server_side_encryption_configuration,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Configuration of the S3 bucket versioning state. See Versioning below for details. Use the resource aws_s3_bucket_versioning instead. + Versioning *VersioningObservation `json:"versioning,omitempty" tf:"versioning,omitempty"` + + // Configuration of the S3 bucket website. See Website below for details. + // Use the resource aws_s3_bucket_website_configuration instead. + Website *WebsiteObservation `json:"website,omitempty" tf:"website,omitempty"` + + // (Deprecated) Domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records. Use the resource aws_s3_bucket_website_configuration instead. + WebsiteDomain *string `json:"websiteDomain,omitempty" tf:"website_domain,omitempty"` + + // (Deprecated) Website endpoint, if the bucket is configured with a website. If not, this will be an empty string. Use the resource aws_s3_bucket_website_configuration instead. + WebsiteEndpoint *string `json:"websiteEndpoint,omitempty" tf:"website_endpoint,omitempty"` +} + +type BucketParameters struct { + + // Boolean that indicates all objects (including any locked objects) should be deleted from the bucket when the bucket is destroyed so that the bucket can be destroyed without error. These objects are not recoverable. This only deletes objects when the bucket is destroyed, not when setting this parameter to true. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. + // +kubebuilder:validation:Optional + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // Indicates whether this bucket has an Object Lock configuration enabled. Valid values are true or false. This argument is not supported in all regions or partitions. + // +kubebuilder:validation:Optional + ObjectLockEnabled *bool `json:"objectLockEnabled,omitempty" tf:"object_lock_enabled,omitempty"` + + // AWS region this bucket resides in. + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type CorsRuleInitParameters struct { +} + +type CorsRuleObservation struct { + + // List of headers allowed. + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // One or more HTTP methods that you allow the origin to execute. Can be GET, PUT, POST, DELETE or HEAD. + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // One or more origins you want customers to be able to access the bucket from. + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // One or more headers in the response that you want customers to be able to access from their applications (for example, from a JavaScript XMLHttpRequest object). + ExposeHeaders []*string `json:"exposeHeaders,omitempty" tf:"expose_headers,omitempty"` + + // Specifies time in seconds that browser can cache the response for a preflight request. + MaxAgeSeconds *float64 `json:"maxAgeSeconds,omitempty" tf:"max_age_seconds,omitempty"` +} + +type CorsRuleParameters struct { +} + +type DefaultRetentionInitParameters struct { +} + +type DefaultRetentionObservation struct { + + // Number of days that you want to specify for the default retention period. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Default Object Lock retention mode you want to apply to new objects placed in this bucket. Valid values are GOVERNANCE and COMPLIANCE. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Number of years that you want to specify for the default retention period. + Years *float64 `json:"years,omitempty" tf:"years,omitempty"` +} + +type DefaultRetentionParameters struct { +} + +type DestinationInitParameters struct { +} + +type DestinationObservation struct { + + // Specifies the overrides to use for object owners on replication (documented below). Must be used in conjunction with account_id owner override configuration. + AccessControlTranslation *AccessControlTranslationObservation `json:"accessControlTranslation,omitempty" tf:"access_control_translation,omitempty"` + + // Account ID to use for overriding the object owner on replication. Must be used in conjunction with access_control_translation override configuration. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Enables replication metrics (documented below). + Metrics *MetricsObservation `json:"metrics,omitempty" tf:"metrics,omitempty"` + + // Destination KMS encryption key ARN for SSE-KMS replication. Must be used in conjunction with + // sse_kms_encrypted_objects source selection criteria. + ReplicaKMSKeyID *string `json:"replicaKmsKeyId,omitempty" tf:"replica_kms_key_id,omitempty"` + + // Enables S3 Replication Time Control (S3 RTC) (documented below). + ReplicationTime *ReplicationTimeObservation `json:"replicationTime,omitempty" tf:"replication_time,omitempty"` + + // Specifies the Amazon S3 storage class to which you want the object to transition. + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type DestinationParameters struct { +} + +type ExpirationInitParameters struct { +} + +type ExpirationObservation struct { + + // Specifies the date after which you want the corresponding action to take effect. + Date *string `json:"date,omitempty" tf:"date,omitempty"` + + // Specifies the number of days after object creation when the specific rule action takes effect. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Amazon S3 to delete expired object delete markers. This cannot be specified with Days or Date in a Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker *bool `json:"expiredObjectDeleteMarker,omitempty" tf:"expired_object_delete_marker,omitempty"` +} + +type ExpirationParameters struct { +} + +type FilterInitParameters struct { +} + +type FilterObservation struct { + + // Object keyname prefix that identifies subset of objects to which the rule applies. Must be less than or equal to 1024 characters in length. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // A map of tags that identifies subset of objects to which the rule applies. + // The rule applies only to objects having all the tags in its tagset. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type FilterParameters struct { +} + +type GrantInitParameters struct { +} + +type GrantObservation struct { + + // Canonical user id to grant for. Used only when type is CanonicalUser. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // List of permissions to apply for grantee. Valid values are READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL. + // +listType=set + Permissions []*string `json:"permissions,omitempty" tf:"permissions,omitempty"` + + // Type of grantee to apply for. Valid values are CanonicalUser and Group. AmazonCustomerByEmail is not supported. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Uri address to grant for. Used only when type is Group. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type GrantParameters struct { +} + +type LifecycleRuleInitParameters struct { +} + +type LifecycleRuleObservation struct { + + // Specifies the number of days after initiating a multipart upload when the multipart upload must be completed. + AbortIncompleteMultipartUploadDays *float64 `json:"abortIncompleteMultipartUploadDays,omitempty" tf:"abort_incomplete_multipart_upload_days,omitempty"` + + // Specifies lifecycle rule status. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies a period in the object's expire. See Expiration below for details. + Expiration *ExpirationObservation `json:"expiration,omitempty" tf:"expiration,omitempty"` + + // Unique identifier for the rule. Must be less than or equal to 255 characters in length. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies when noncurrent object versions expire. See Noncurrent Version Expiration below for details. + NoncurrentVersionExpiration *NoncurrentVersionExpirationObservation `json:"noncurrentVersionExpiration,omitempty" tf:"noncurrent_version_expiration,omitempty"` + + // Specifies when noncurrent object versions transitions. See Noncurrent Version Transition below for details. + NoncurrentVersionTransition []NoncurrentVersionTransitionObservation `json:"noncurrentVersionTransition,omitempty" tf:"noncurrent_version_transition,omitempty"` + + // Object key prefix identifying one or more objects to which the rule applies. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Specifies object tags key and value. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies a period in the object's transitions. See Transition below for details. + Transition []TransitionObservation `json:"transition,omitempty" tf:"transition,omitempty"` +} + +type LifecycleRuleParameters struct { +} + +type LoggingInitParameters struct { +} + +type LoggingObservation struct { + + // Name of the bucket that will receive the log objects. + TargetBucket *string `json:"targetBucket,omitempty" tf:"target_bucket,omitempty"` + + // To specify a key prefix for log objects. + TargetPrefix *string `json:"targetPrefix,omitempty" tf:"target_prefix,omitempty"` +} + +type LoggingParameters struct { +} + +type MetricsInitParameters struct { +} + +type MetricsObservation struct { + + // Threshold within which objects are to be replicated. The only valid value is 15. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + + // Status of RTC. Either Enabled or Disabled. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type MetricsParameters struct { +} + +type NoncurrentVersionExpirationInitParameters struct { +} + +type NoncurrentVersionExpirationObservation struct { + + // Specifies the number of days after object creation when the specific rule action takes effect. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` +} + +type NoncurrentVersionExpirationParameters struct { +} + +type NoncurrentVersionTransitionInitParameters struct { +} + +type NoncurrentVersionTransitionObservation struct { + + // Specifies the number of days after object creation when the specific rule action takes effect. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Specifies the Amazon S3 storage class to which you want the object to transition. + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type NoncurrentVersionTransitionParameters struct { +} + +type ObjectLockConfigurationInitParameters struct { +} + +type ObjectLockConfigurationObservation struct { + + // Indicates whether this bucket has an Object Lock configuration enabled. Valid value is Enabled. Use the top-level argument object_lock_enabled instead. + ObjectLockEnabled *string `json:"objectLockEnabled,omitempty" tf:"object_lock_enabled,omitempty"` + + // Object Lock rule in place for this bucket (documented below). + Rule *RuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type ObjectLockConfigurationParameters struct { +} + +type ReplicationConfigurationInitParameters struct { +} + +type ReplicationConfigurationObservation struct { + + // ARN of the IAM role for Amazon S3 to assume when replicating the objects. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // Specifies the rules managing the replication (documented below). + Rules []RulesObservation `json:"rules,omitempty" tf:"rules,omitempty"` +} + +type ReplicationConfigurationParameters struct { +} + +type ReplicationTimeInitParameters struct { +} + +type ReplicationTimeObservation struct { + + // Threshold within which objects are to be replicated. The only valid value is 15. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + + // Status of RTC. Either Enabled or Disabled. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ReplicationTimeParameters struct { +} + +type RuleInitParameters struct { +} + +type RuleObservation struct { + + // Default retention period that you want to apply to new objects placed in this bucket (documented below). + DefaultRetention *DefaultRetentionObservation `json:"defaultRetention,omitempty" tf:"default_retention,omitempty"` +} + +type RuleParameters struct { +} + +type RulesInitParameters struct { +} + +type RulesObservation struct { + + // Whether delete markers are replicated. The only valid value is Enabled. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when filter is used). + DeleteMarkerReplicationStatus *string `json:"deleteMarkerReplicationStatus,omitempty" tf:"delete_marker_replication_status,omitempty"` + + // Specifies the destination for the rule (documented below). + Destination *DestinationObservation `json:"destination,omitempty" tf:"destination,omitempty"` + + // Filter that identifies subset of objects to which the replication rule applies (documented below). + Filter *FilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + // Unique identifier for the rule. Must be less than or equal to 255 characters in length. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Object keyname prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Priority associated with the rule. Priority should only be set if filter is configured. If not provided, defaults to 0. Priority must be unique between multiple rules. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // Specifies special object selection criteria (documented below). + SourceSelectionCriteria *SourceSelectionCriteriaObservation `json:"sourceSelectionCriteria,omitempty" tf:"source_selection_criteria,omitempty"` + + // Status of the rule. Either Enabled or Disabled. The rule is ignored if status is not Enabled. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type RulesParameters struct { +} + +type ServerSideEncryptionConfigurationInitParameters struct { +} + +type ServerSideEncryptionConfigurationObservation struct { + + // Single object for server-side encryption by default configuration. (documented below) + Rule *ServerSideEncryptionConfigurationRuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type ServerSideEncryptionConfigurationParameters struct { +} + +type ServerSideEncryptionConfigurationRuleInitParameters struct { +} + +type ServerSideEncryptionConfigurationRuleObservation struct { + + // Single object for setting server-side encryption by default. (documented below) + ApplyServerSideEncryptionByDefault *ApplyServerSideEncryptionByDefaultObservation `json:"applyServerSideEncryptionByDefault,omitempty" tf:"apply_server_side_encryption_by_default,omitempty"` + + // Whether or not to use Amazon S3 Bucket Keys for SSE-KMS. + BucketKeyEnabled *bool `json:"bucketKeyEnabled,omitempty" tf:"bucket_key_enabled,omitempty"` +} + +type ServerSideEncryptionConfigurationRuleParameters struct { +} + +type SourceSelectionCriteriaInitParameters struct { +} + +type SourceSelectionCriteriaObservation struct { + + // Match SSE-KMS encrypted objects (documented below). If specified, replica_kms_key_id + // in destination must be specified as well. + SseKMSEncryptedObjects *SseKMSEncryptedObjectsObservation `json:"sseKmsEncryptedObjects,omitempty" tf:"sse_kms_encrypted_objects,omitempty"` +} + +type SourceSelectionCriteriaParameters struct { +} + +type SseKMSEncryptedObjectsInitParameters struct { +} + +type SseKMSEncryptedObjectsObservation struct { + + // Enable versioning. Once you version-enable a bucket, it can never return to an unversioned state. You can, however, suspend versioning on that bucket. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type SseKMSEncryptedObjectsParameters struct { +} + +type TransitionInitParameters struct { +} + +type TransitionObservation struct { + + // Specifies the date after which you want the corresponding action to take effect. + Date *string `json:"date,omitempty" tf:"date,omitempty"` + + // Specifies the number of days after object creation when the specific rule action takes effect. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Specifies the Amazon S3 storage class to which you want the object to transition. + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type TransitionParameters struct { +} + +type VersioningInitParameters struct { +} + +type VersioningObservation struct { + + // Enable versioning. Once you version-enable a bucket, it can never return to an unversioned state. You can, however, suspend versioning on that bucket. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Enable MFA delete for either Change the versioning state of your bucket or Permanently delete an object version. Default is false. This cannot be used to toggle this setting but is available to allow managed buckets to reflect the state in AWS + MfaDelete *bool `json:"mfaDelete,omitempty" tf:"mfa_delete,omitempty"` +} + +type VersioningParameters struct { +} + +type WebsiteInitParameters struct { +} + +type WebsiteObservation struct { + + // Absolute path to the document to return in case of a 4XX error. + ErrorDocument *string `json:"errorDocument,omitempty" tf:"error_document,omitempty"` + + // Amazon S3 returns this index document when requests are made to the root domain or any of the subfolders. + IndexDocument *string `json:"indexDocument,omitempty" tf:"index_document,omitempty"` + + // Hostname to redirect all website requests for this bucket to. Hostname can optionally be prefixed with a protocol (http:// or https://) to use when redirecting requests. The default is the protocol that is used in the original request. + RedirectAllRequestsTo *string `json:"redirectAllRequestsTo,omitempty" tf:"redirect_all_requests_to,omitempty"` + + // JSON array containing routing rules + // describing redirect behavior and when redirects are applied. + RoutingRules *string `json:"routingRules,omitempty" tf:"routing_rules,omitempty"` +} + +type WebsiteParameters struct { +} + +// BucketSpec defines the desired state of Bucket +type BucketSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BucketParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BucketInitParameters `json:"initProvider,omitempty"` +} + +// BucketStatus defines the observed state of Bucket. +type BucketStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BucketObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Bucket is the Schema for the Buckets API. Provides a S3 bucket resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Bucket struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec BucketSpec `json:"spec"` + Status BucketStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketList contains a list of Buckets +type BucketList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Bucket `json:"items"` +} + +// Repository type metadata. +var ( + Bucket_Kind = "Bucket" + Bucket_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Bucket_Kind}.String() + Bucket_KindAPIVersion = Bucket_Kind + "." + CRDGroupVersion.String() + Bucket_GroupVersionKind = CRDGroupVersion.WithKind(Bucket_Kind) +) + +func init() { + SchemeBuilder.Register(&Bucket{}, &BucketList{}) +} diff --git a/apis/s3/v1beta2/zz_bucketacl_terraformed.go b/apis/s3/v1beta2/zz_bucketacl_terraformed.go new file mode 100755 index 0000000000..2854c548ef --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketacl_terraformed.go @@ -0,0 +1,131 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BucketACL +func (mg *BucketACL) GetTerraformResourceType() string { + return "aws_s3_bucket_acl" +} + +// GetConnectionDetailsMapping for this BucketACL +func (tr *BucketACL) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BucketACL +func (tr *BucketACL) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BucketACL +func (tr *BucketACL) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BucketACL +func (tr *BucketACL) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BucketACL +func (tr *BucketACL) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BucketACL +func (tr *BucketACL) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BucketACL +func (tr *BucketACL) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BucketACL +func (tr *BucketACL) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BucketACL using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BucketACL) LateInitialize(attrs []byte) (bool, error) { + params := &BucketACLParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("AccessControlPolicy")) + opts = append(opts, resource.WithNameFilter("ACL")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BucketACL) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3/v1beta2/zz_bucketacl_types.go b/apis/s3/v1beta2/zz_bucketacl_types.go new file mode 100755 index 0000000000..8782b7ec81 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketacl_types.go @@ -0,0 +1,290 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessControlPolicyGrantInitParameters struct { + + // Configuration block for the person being granted permissions. See below. + Grantee *GranteeInitParameters `json:"grantee,omitempty" tf:"grantee,omitempty"` + + // Logging permissions assigned to the grantee for the bucket. Valid values: FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP. See What permissions can I grant? for more details about what each permission means in the context of buckets. + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` +} + +type AccessControlPolicyGrantObservation struct { + + // Configuration block for the person being granted permissions. See below. + Grantee *GranteeObservation `json:"grantee,omitempty" tf:"grantee,omitempty"` + + // Logging permissions assigned to the grantee for the bucket. Valid values: FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP. See What permissions can I grant? for more details about what each permission means in the context of buckets. + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` +} + +type AccessControlPolicyGrantParameters struct { + + // Configuration block for the person being granted permissions. See below. + // +kubebuilder:validation:Optional + Grantee *GranteeParameters `json:"grantee,omitempty" tf:"grantee,omitempty"` + + // Logging permissions assigned to the grantee for the bucket. Valid values: FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP. See What permissions can I grant? for more details about what each permission means in the context of buckets. + // +kubebuilder:validation:Optional + Permission *string `json:"permission" tf:"permission,omitempty"` +} + +type AccessControlPolicyInitParameters struct { + + // Set of grant configuration blocks. See below. + Grant []AccessControlPolicyGrantInitParameters `json:"grant,omitempty" tf:"grant,omitempty"` + + // Configuration block for the bucket owner's display name and ID. See below. + Owner *OwnerInitParameters `json:"owner,omitempty" tf:"owner,omitempty"` +} + +type AccessControlPolicyObservation struct { + + // Set of grant configuration blocks. See below. + Grant []AccessControlPolicyGrantObservation `json:"grant,omitempty" tf:"grant,omitempty"` + + // Configuration block for the bucket owner's display name and ID. See below. + Owner *OwnerObservation `json:"owner,omitempty" tf:"owner,omitempty"` +} + +type AccessControlPolicyParameters struct { + + // Set of grant configuration blocks. See below. + // +kubebuilder:validation:Optional + Grant []AccessControlPolicyGrantParameters `json:"grant,omitempty" tf:"grant,omitempty"` + + // Configuration block for the bucket owner's display name and ID. See below. + // +kubebuilder:validation:Optional + Owner *OwnerParameters `json:"owner" tf:"owner,omitempty"` +} + +type BucketACLInitParameters struct { + + // Canned ACL to apply to the bucket. + ACL *string `json:"acl,omitempty" tf:"acl,omitempty"` + + // Configuration block that sets the ACL permissions for an object per grantee. See below. + AccessControlPolicy *AccessControlPolicyInitParameters `json:"accessControlPolicy,omitempty" tf:"access_control_policy,omitempty"` + + // Bucket to which to apply the ACL. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Account ID of the expected bucket owner. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` +} + +type BucketACLObservation struct { + + // Canned ACL to apply to the bucket. + ACL *string `json:"acl,omitempty" tf:"acl,omitempty"` + + // Configuration block that sets the ACL permissions for an object per grantee. See below. + AccessControlPolicy *AccessControlPolicyObservation `json:"accessControlPolicy,omitempty" tf:"access_control_policy,omitempty"` + + // Bucket to which to apply the ACL. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Account ID of the expected bucket owner. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // The bucket, expected_bucket_owner (if configured), and acl (if configured) separated by commas (,). + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type BucketACLParameters struct { + + // Canned ACL to apply to the bucket. + // +kubebuilder:validation:Optional + ACL *string `json:"acl,omitempty" tf:"acl,omitempty"` + + // Configuration block that sets the ACL permissions for an object per grantee. See below. + // +kubebuilder:validation:Optional + AccessControlPolicy *AccessControlPolicyParameters `json:"accessControlPolicy,omitempty" tf:"access_control_policy,omitempty"` + + // Bucket to which to apply the ACL. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Account ID of the expected bucket owner. + // +kubebuilder:validation:Optional + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type GranteeInitParameters struct { + + // Email address of the grantee. See Regions and Endpoints for supported AWS regions where this argument can be specified. + EmailAddress *string `json:"emailAddress,omitempty" tf:"email_address,omitempty"` + + // ID of the owner. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Type of grantee. Valid values: CanonicalUser, AmazonCustomerByEmail, Group. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // URI of the grantee group. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type GranteeObservation struct { + + // Display name of the owner. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Email address of the grantee. See Regions and Endpoints for supported AWS regions where this argument can be specified. + EmailAddress *string `json:"emailAddress,omitempty" tf:"email_address,omitempty"` + + // ID of the owner. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Type of grantee. Valid values: CanonicalUser, AmazonCustomerByEmail, Group. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // URI of the grantee group. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type GranteeParameters struct { + + // Email address of the grantee. See Regions and Endpoints for supported AWS regions where this argument can be specified. + // +kubebuilder:validation:Optional + EmailAddress *string `json:"emailAddress,omitempty" tf:"email_address,omitempty"` + + // ID of the owner. + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Type of grantee. Valid values: CanonicalUser, AmazonCustomerByEmail, Group. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // URI of the grantee group. + // +kubebuilder:validation:Optional + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type OwnerInitParameters struct { + + // Display name of the owner. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // ID of the owner. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type OwnerObservation struct { + + // Display name of the owner. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // ID of the owner. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type OwnerParameters struct { + + // Display name of the owner. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // ID of the owner. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` +} + +// BucketACLSpec defines the desired state of BucketACL +type BucketACLSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BucketACLParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BucketACLInitParameters `json:"initProvider,omitempty"` +} + +// BucketACLStatus defines the observed state of BucketACL. +type BucketACLStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BucketACLObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BucketACL is the Schema for the BucketACLs API. Provides an S3 bucket ACL resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type BucketACL struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec BucketACLSpec `json:"spec"` + Status BucketACLStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketACLList contains a list of BucketACLs +type BucketACLList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BucketACL `json:"items"` +} + +// Repository type metadata. +var ( + BucketACL_Kind = "BucketACL" + BucketACL_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BucketACL_Kind}.String() + BucketACL_KindAPIVersion = BucketACL_Kind + "." + CRDGroupVersion.String() + BucketACL_GroupVersionKind = CRDGroupVersion.WithKind(BucketACL_Kind) +) + +func init() { + SchemeBuilder.Register(&BucketACL{}, &BucketACLList{}) +} diff --git a/apis/s3/v1beta2/zz_bucketanalyticsconfiguration_terraformed.go b/apis/s3/v1beta2/zz_bucketanalyticsconfiguration_terraformed.go new file mode 100755 index 0000000000..b204387078 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketanalyticsconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BucketAnalyticsConfiguration +func (mg *BucketAnalyticsConfiguration) GetTerraformResourceType() string { + return "aws_s3_bucket_analytics_configuration" +} + +// GetConnectionDetailsMapping for this BucketAnalyticsConfiguration +func (tr *BucketAnalyticsConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BucketAnalyticsConfiguration +func (tr *BucketAnalyticsConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BucketAnalyticsConfiguration +func (tr *BucketAnalyticsConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BucketAnalyticsConfiguration +func (tr *BucketAnalyticsConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BucketAnalyticsConfiguration +func (tr *BucketAnalyticsConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BucketAnalyticsConfiguration +func (tr *BucketAnalyticsConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BucketAnalyticsConfiguration +func (tr *BucketAnalyticsConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BucketAnalyticsConfiguration +func (tr *BucketAnalyticsConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BucketAnalyticsConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BucketAnalyticsConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &BucketAnalyticsConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BucketAnalyticsConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3/v1beta2/zz_bucketanalyticsconfiguration_types.go b/apis/s3/v1beta2/zz_bucketanalyticsconfiguration_types.go new file mode 100755 index 0000000000..2925b04398 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketanalyticsconfiguration_types.go @@ -0,0 +1,319 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BucketAnalyticsConfigurationFilterInitParameters struct { + + // Object prefix for filtering. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type BucketAnalyticsConfigurationFilterObservation struct { + + // Object prefix for filtering. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type BucketAnalyticsConfigurationFilterParameters struct { + + // Object prefix for filtering. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type BucketAnalyticsConfigurationInitParameters struct { + + // Name of the bucket this analytics configuration is associated with. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). + Filter *BucketAnalyticsConfigurationFilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Unique identifier of the analytics configuration for the bucket. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration for the analytics data export (documented below). + StorageClassAnalysis *StorageClassAnalysisInitParameters `json:"storageClassAnalysis,omitempty" tf:"storage_class_analysis,omitempty"` +} + +type BucketAnalyticsConfigurationObservation struct { + + // Name of the bucket this analytics configuration is associated with. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). + Filter *BucketAnalyticsConfigurationFilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Unique identifier of the analytics configuration for the bucket. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration for the analytics data export (documented below). + StorageClassAnalysis *StorageClassAnalysisObservation `json:"storageClassAnalysis,omitempty" tf:"storage_class_analysis,omitempty"` +} + +type BucketAnalyticsConfigurationParameters struct { + + // Name of the bucket this analytics configuration is associated with. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). + // +kubebuilder:validation:Optional + Filter *BucketAnalyticsConfigurationFilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Unique identifier of the analytics configuration for the bucket. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration for the analytics data export (documented below). + // +kubebuilder:validation:Optional + StorageClassAnalysis *StorageClassAnalysisParameters `json:"storageClassAnalysis,omitempty" tf:"storage_class_analysis,omitempty"` +} + +type DataExportDestinationInitParameters struct { + + // Analytics data export currently only supports an S3 bucket destination (documented below). + S3BucketDestination *S3BucketDestinationInitParameters `json:"s3BucketDestination,omitempty" tf:"s3_bucket_destination,omitempty"` +} + +type DataExportDestinationObservation struct { + + // Analytics data export currently only supports an S3 bucket destination (documented below). + S3BucketDestination *S3BucketDestinationObservation `json:"s3BucketDestination,omitempty" tf:"s3_bucket_destination,omitempty"` +} + +type DataExportDestinationParameters struct { + + // Analytics data export currently only supports an S3 bucket destination (documented below). + // +kubebuilder:validation:Optional + S3BucketDestination *S3BucketDestinationParameters `json:"s3BucketDestination" tf:"s3_bucket_destination,omitempty"` +} + +type DataExportInitParameters struct { + + // Specifies the destination for the exported analytics data (documented below). + Destination *DataExportDestinationInitParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + // Schema version of exported analytics data. Allowed values: V_1. Default value: V_1. + OutputSchemaVersion *string `json:"outputSchemaVersion,omitempty" tf:"output_schema_version,omitempty"` +} + +type DataExportObservation struct { + + // Specifies the destination for the exported analytics data (documented below). + Destination *DataExportDestinationObservation `json:"destination,omitempty" tf:"destination,omitempty"` + + // Schema version of exported analytics data. Allowed values: V_1. Default value: V_1. + OutputSchemaVersion *string `json:"outputSchemaVersion,omitempty" tf:"output_schema_version,omitempty"` +} + +type DataExportParameters struct { + + // Specifies the destination for the exported analytics data (documented below). + // +kubebuilder:validation:Optional + Destination *DataExportDestinationParameters `json:"destination" tf:"destination,omitempty"` + + // Schema version of exported analytics data. Allowed values: V_1. Default value: V_1. + // +kubebuilder:validation:Optional + OutputSchemaVersion *string `json:"outputSchemaVersion,omitempty" tf:"output_schema_version,omitempty"` +} + +type S3BucketDestinationInitParameters struct { + + // Account ID that owns the destination bucket. + BucketAccountID *string `json:"bucketAccountId,omitempty" tf:"bucket_account_id,omitempty"` + + // ARN of the destination bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Output format of exported analytics data. Allowed values: CSV. Default value: CSV. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // Object prefix for filtering. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type S3BucketDestinationObservation struct { + + // Account ID that owns the destination bucket. + BucketAccountID *string `json:"bucketAccountId,omitempty" tf:"bucket_account_id,omitempty"` + + // ARN of the destination bucket. + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Output format of exported analytics data. Allowed values: CSV. Default value: CSV. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // Object prefix for filtering. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type S3BucketDestinationParameters struct { + + // Account ID that owns the destination bucket. + // +kubebuilder:validation:Optional + BucketAccountID *string `json:"bucketAccountId,omitempty" tf:"bucket_account_id,omitempty"` + + // ARN of the destination bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Output format of exported analytics data. Allowed values: CSV. Default value: CSV. + // +kubebuilder:validation:Optional + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // Object prefix for filtering. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type StorageClassAnalysisInitParameters struct { + + // Data export configuration (documented below). + DataExport *DataExportInitParameters `json:"dataExport,omitempty" tf:"data_export,omitempty"` +} + +type StorageClassAnalysisObservation struct { + + // Data export configuration (documented below). + DataExport *DataExportObservation `json:"dataExport,omitempty" tf:"data_export,omitempty"` +} + +type StorageClassAnalysisParameters struct { + + // Data export configuration (documented below). + // +kubebuilder:validation:Optional + DataExport *DataExportParameters `json:"dataExport" tf:"data_export,omitempty"` +} + +// BucketAnalyticsConfigurationSpec defines the desired state of BucketAnalyticsConfiguration +type BucketAnalyticsConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BucketAnalyticsConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BucketAnalyticsConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// BucketAnalyticsConfigurationStatus defines the observed state of BucketAnalyticsConfiguration. +type BucketAnalyticsConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BucketAnalyticsConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BucketAnalyticsConfiguration is the Schema for the BucketAnalyticsConfigurations API. Provides a S3 bucket analytics configuration resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type BucketAnalyticsConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec BucketAnalyticsConfigurationSpec `json:"spec"` + Status BucketAnalyticsConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketAnalyticsConfigurationList contains a list of BucketAnalyticsConfigurations +type BucketAnalyticsConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BucketAnalyticsConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + BucketAnalyticsConfiguration_Kind = "BucketAnalyticsConfiguration" + BucketAnalyticsConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BucketAnalyticsConfiguration_Kind}.String() + BucketAnalyticsConfiguration_KindAPIVersion = BucketAnalyticsConfiguration_Kind + "." + CRDGroupVersion.String() + BucketAnalyticsConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(BucketAnalyticsConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&BucketAnalyticsConfiguration{}, &BucketAnalyticsConfigurationList{}) +} diff --git a/apis/s3/v1beta2/zz_bucketintelligenttieringconfiguration_terraformed.go b/apis/s3/v1beta2/zz_bucketintelligenttieringconfiguration_terraformed.go new file mode 100755 index 0000000000..c1ecda1b63 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketintelligenttieringconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BucketIntelligentTieringConfiguration +func (mg *BucketIntelligentTieringConfiguration) GetTerraformResourceType() string { + return "aws_s3_bucket_intelligent_tiering_configuration" +} + +// GetConnectionDetailsMapping for this BucketIntelligentTieringConfiguration +func (tr *BucketIntelligentTieringConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BucketIntelligentTieringConfiguration +func (tr *BucketIntelligentTieringConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BucketIntelligentTieringConfiguration +func (tr *BucketIntelligentTieringConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BucketIntelligentTieringConfiguration +func (tr *BucketIntelligentTieringConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BucketIntelligentTieringConfiguration +func (tr *BucketIntelligentTieringConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BucketIntelligentTieringConfiguration +func (tr *BucketIntelligentTieringConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BucketIntelligentTieringConfiguration +func (tr *BucketIntelligentTieringConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BucketIntelligentTieringConfiguration +func (tr *BucketIntelligentTieringConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BucketIntelligentTieringConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BucketIntelligentTieringConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &BucketIntelligentTieringConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BucketIntelligentTieringConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3/v1beta2/zz_bucketintelligenttieringconfiguration_types.go b/apis/s3/v1beta2/zz_bucketintelligenttieringconfiguration_types.go new file mode 100755 index 0000000000..b8321b0028 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketintelligenttieringconfiguration_types.go @@ -0,0 +1,223 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BucketIntelligentTieringConfigurationFilterInitParameters struct { + + // Object key name prefix that identifies the subset of objects to which the configuration applies. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type BucketIntelligentTieringConfigurationFilterObservation struct { + + // Object key name prefix that identifies the subset of objects to which the configuration applies. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type BucketIntelligentTieringConfigurationFilterParameters struct { + + // Object key name prefix that identifies the subset of objects to which the configuration applies. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type BucketIntelligentTieringConfigurationInitParameters struct { + + // Name of the bucket this intelligent tiering configuration is associated with. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Bucket filter. The configuration only includes objects that meet the filter's criteria (documented below). + Filter *BucketIntelligentTieringConfigurationFilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Unique name used to identify the S3 Intelligent-Tiering configuration for the bucket. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the status of the configuration. Valid values: Enabled, Disabled. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // S3 Intelligent-Tiering storage class tiers of the configuration (documented below). + Tiering []TieringInitParameters `json:"tiering,omitempty" tf:"tiering,omitempty"` +} + +type BucketIntelligentTieringConfigurationObservation struct { + + // Name of the bucket this intelligent tiering configuration is associated with. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Bucket filter. The configuration only includes objects that meet the filter's criteria (documented below). + Filter *BucketIntelligentTieringConfigurationFilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Unique name used to identify the S3 Intelligent-Tiering configuration for the bucket. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the status of the configuration. Valid values: Enabled, Disabled. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // S3 Intelligent-Tiering storage class tiers of the configuration (documented below). + Tiering []TieringObservation `json:"tiering,omitempty" tf:"tiering,omitempty"` +} + +type BucketIntelligentTieringConfigurationParameters struct { + + // Name of the bucket this intelligent tiering configuration is associated with. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Bucket filter. The configuration only includes objects that meet the filter's criteria (documented below). + // +kubebuilder:validation:Optional + Filter *BucketIntelligentTieringConfigurationFilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Unique name used to identify the S3 Intelligent-Tiering configuration for the bucket. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Specifies the status of the configuration. Valid values: Enabled, Disabled. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // S3 Intelligent-Tiering storage class tiers of the configuration (documented below). + // +kubebuilder:validation:Optional + Tiering []TieringParameters `json:"tiering,omitempty" tf:"tiering,omitempty"` +} + +type TieringInitParameters struct { + + // S3 Intelligent-Tiering access tier. Valid values: ARCHIVE_ACCESS, DEEP_ARCHIVE_ACCESS. + AccessTier *string `json:"accessTier,omitempty" tf:"access_tier,omitempty"` + + // Number of consecutive days of no access after which an object will be eligible to be transitioned to the corresponding tier. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` +} + +type TieringObservation struct { + + // S3 Intelligent-Tiering access tier. Valid values: ARCHIVE_ACCESS, DEEP_ARCHIVE_ACCESS. + AccessTier *string `json:"accessTier,omitempty" tf:"access_tier,omitempty"` + + // Number of consecutive days of no access after which an object will be eligible to be transitioned to the corresponding tier. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` +} + +type TieringParameters struct { + + // S3 Intelligent-Tiering access tier. Valid values: ARCHIVE_ACCESS, DEEP_ARCHIVE_ACCESS. + // +kubebuilder:validation:Optional + AccessTier *string `json:"accessTier" tf:"access_tier,omitempty"` + + // Number of consecutive days of no access after which an object will be eligible to be transitioned to the corresponding tier. + // +kubebuilder:validation:Optional + Days *float64 `json:"days" tf:"days,omitempty"` +} + +// BucketIntelligentTieringConfigurationSpec defines the desired state of BucketIntelligentTieringConfiguration +type BucketIntelligentTieringConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BucketIntelligentTieringConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BucketIntelligentTieringConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// BucketIntelligentTieringConfigurationStatus defines the observed state of BucketIntelligentTieringConfiguration. +type BucketIntelligentTieringConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BucketIntelligentTieringConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BucketIntelligentTieringConfiguration is the Schema for the BucketIntelligentTieringConfigurations API. Provides an S3 Intelligent-Tiering configuration resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type BucketIntelligentTieringConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.tiering) || (has(self.initProvider) && has(self.initProvider.tiering))",message="spec.forProvider.tiering is a required parameter" + Spec BucketIntelligentTieringConfigurationSpec `json:"spec"` + Status BucketIntelligentTieringConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketIntelligentTieringConfigurationList contains a list of BucketIntelligentTieringConfigurations +type BucketIntelligentTieringConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BucketIntelligentTieringConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + BucketIntelligentTieringConfiguration_Kind = "BucketIntelligentTieringConfiguration" + BucketIntelligentTieringConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BucketIntelligentTieringConfiguration_Kind}.String() + BucketIntelligentTieringConfiguration_KindAPIVersion = BucketIntelligentTieringConfiguration_Kind + "." + CRDGroupVersion.String() + BucketIntelligentTieringConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(BucketIntelligentTieringConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&BucketIntelligentTieringConfiguration{}, &BucketIntelligentTieringConfigurationList{}) +} diff --git a/apis/s3/v1beta2/zz_bucketinventory_terraformed.go b/apis/s3/v1beta2/zz_bucketinventory_terraformed.go new file mode 100755 index 0000000000..4041490598 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketinventory_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BucketInventory +func (mg *BucketInventory) GetTerraformResourceType() string { + return "aws_s3_bucket_inventory" +} + +// GetConnectionDetailsMapping for this BucketInventory +func (tr *BucketInventory) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BucketInventory +func (tr *BucketInventory) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BucketInventory +func (tr *BucketInventory) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BucketInventory +func (tr *BucketInventory) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BucketInventory +func (tr *BucketInventory) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BucketInventory +func (tr *BucketInventory) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BucketInventory +func (tr *BucketInventory) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BucketInventory +func (tr *BucketInventory) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BucketInventory using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BucketInventory) LateInitialize(attrs []byte) (bool, error) { + params := &BucketInventoryParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BucketInventory) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3/v1beta2/zz_bucketinventory_types.go b/apis/s3/v1beta2/zz_bucketinventory_types.go new file mode 100755 index 0000000000..383c27e766 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketinventory_types.go @@ -0,0 +1,390 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BucketInventoryDestinationInitParameters struct { + + // Name of the source bucket that inventory lists the objects for. + Bucket *DestinationBucketInitParameters `json:"bucket,omitempty" tf:"bucket,omitempty"` +} + +type BucketInventoryDestinationObservation struct { + + // Name of the source bucket that inventory lists the objects for. + Bucket *DestinationBucketObservation `json:"bucket,omitempty" tf:"bucket,omitempty"` +} + +type BucketInventoryDestinationParameters struct { + + // Name of the source bucket that inventory lists the objects for. + // +kubebuilder:validation:Optional + Bucket *DestinationBucketParameters `json:"bucket" tf:"bucket,omitempty"` +} + +type BucketInventoryFilterInitParameters struct { + + // Prefix that an object must have to be included in the inventory results. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type BucketInventoryFilterObservation struct { + + // Prefix that an object must have to be included in the inventory results. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type BucketInventoryFilterParameters struct { + + // Prefix that an object must have to be included in the inventory results. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type BucketInventoryInitParameters struct { + + // Name of the source bucket that inventory lists the objects for. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Contains information about where to publish the inventory results (documented below). + Destination *BucketInventoryDestinationInitParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + // Specifies whether the inventory is enabled or disabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies an inventory filter. The inventory only includes objects that meet the filter's criteria (documented below). + Filter *BucketInventoryFilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Object versions to include in the inventory list. Valid values: All, Current. + IncludedObjectVersions *string `json:"includedObjectVersions,omitempty" tf:"included_object_versions,omitempty"` + + // Unique identifier of the inventory configuration for the bucket. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // List of optional fields that are included in the inventory results. Please refer to the S3 documentation for more details. + // +listType=set + OptionalFields []*string `json:"optionalFields,omitempty" tf:"optional_fields,omitempty"` + + // Specifies the schedule for generating inventory results (documented below). + Schedule *ScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type BucketInventoryObservation struct { + + // Name of the source bucket that inventory lists the objects for. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Contains information about where to publish the inventory results (documented below). + Destination *BucketInventoryDestinationObservation `json:"destination,omitempty" tf:"destination,omitempty"` + + // Specifies whether the inventory is enabled or disabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies an inventory filter. The inventory only includes objects that meet the filter's criteria (documented below). + Filter *BucketInventoryFilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Object versions to include in the inventory list. Valid values: All, Current. + IncludedObjectVersions *string `json:"includedObjectVersions,omitempty" tf:"included_object_versions,omitempty"` + + // Unique identifier of the inventory configuration for the bucket. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // List of optional fields that are included in the inventory results. Please refer to the S3 documentation for more details. + // +listType=set + OptionalFields []*string `json:"optionalFields,omitempty" tf:"optional_fields,omitempty"` + + // Specifies the schedule for generating inventory results (documented below). + Schedule *ScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type BucketInventoryParameters struct { + + // Name of the source bucket that inventory lists the objects for. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Contains information about where to publish the inventory results (documented below). + // +kubebuilder:validation:Optional + Destination *BucketInventoryDestinationParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + // Specifies whether the inventory is enabled or disabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies an inventory filter. The inventory only includes objects that meet the filter's criteria (documented below). + // +kubebuilder:validation:Optional + Filter *BucketInventoryFilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Object versions to include in the inventory list. Valid values: All, Current. + // +kubebuilder:validation:Optional + IncludedObjectVersions *string `json:"includedObjectVersions,omitempty" tf:"included_object_versions,omitempty"` + + // Unique identifier of the inventory configuration for the bucket. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // List of optional fields that are included in the inventory results. Please refer to the S3 documentation for more details. + // +kubebuilder:validation:Optional + // +listType=set + OptionalFields []*string `json:"optionalFields,omitempty" tf:"optional_fields,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Specifies the schedule for generating inventory results (documented below). + // +kubebuilder:validation:Optional + Schedule *ScheduleParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type DestinationBucketInitParameters struct { + + // ID of the account that owns the destination bucket. Recommended to be set to prevent problems if the destination bucket ownership changes. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Amazon S3 bucket ARN of the destination. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Contains the type of server-side encryption to use to encrypt the inventory (documented below). + Encryption *EncryptionInitParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // Specifies the output format of the inventory results. Can be CSV, ORC or Parquet. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // Prefix that an object must have to be included in the inventory results. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type DestinationBucketObservation struct { + + // ID of the account that owns the destination bucket. Recommended to be set to prevent problems if the destination bucket ownership changes. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Amazon S3 bucket ARN of the destination. + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Contains the type of server-side encryption to use to encrypt the inventory (documented below). + Encryption *EncryptionObservation `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // Specifies the output format of the inventory results. Can be CSV, ORC or Parquet. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // Prefix that an object must have to be included in the inventory results. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type DestinationBucketParameters struct { + + // ID of the account that owns the destination bucket. Recommended to be set to prevent problems if the destination bucket ownership changes. + // +kubebuilder:validation:Optional + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Amazon S3 bucket ARN of the destination. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + BucketArn *string `json:"bucketArn,omitempty" tf:"bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnRef *v1.Reference `json:"bucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketArn. + // +kubebuilder:validation:Optional + BucketArnSelector *v1.Selector `json:"bucketArnSelector,omitempty" tf:"-"` + + // Contains the type of server-side encryption to use to encrypt the inventory (documented below). + // +kubebuilder:validation:Optional + Encryption *EncryptionParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // Specifies the output format of the inventory results. Can be CSV, ORC or Parquet. + // +kubebuilder:validation:Optional + Format *string `json:"format" tf:"format,omitempty"` + + // Prefix that an object must have to be included in the inventory results. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type EncryptionInitParameters struct { + + // Specifies to use server-side encryption with AWS KMS-managed keys to encrypt the inventory file (documented below). + SseKMS *SseKMSInitParameters `json:"sseKms,omitempty" tf:"sse_kms,omitempty"` + + // Specifies to use server-side encryption with Amazon S3-managed keys (SSE-S3) to encrypt the inventory file. + SseS3 *SseS3InitParameters `json:"sseS3,omitempty" tf:"sse_s3,omitempty"` +} + +type EncryptionObservation struct { + + // Specifies to use server-side encryption with AWS KMS-managed keys to encrypt the inventory file (documented below). + SseKMS *SseKMSObservation `json:"sseKms,omitempty" tf:"sse_kms,omitempty"` + + // Specifies to use server-side encryption with Amazon S3-managed keys (SSE-S3) to encrypt the inventory file. + SseS3 *SseS3Parameters `json:"sseS3,omitempty" tf:"sse_s3,omitempty"` +} + +type EncryptionParameters struct { + + // Specifies to use server-side encryption with AWS KMS-managed keys to encrypt the inventory file (documented below). + // +kubebuilder:validation:Optional + SseKMS *SseKMSParameters `json:"sseKms,omitempty" tf:"sse_kms,omitempty"` + + // Specifies to use server-side encryption with Amazon S3-managed keys (SSE-S3) to encrypt the inventory file. + // +kubebuilder:validation:Optional + SseS3 *SseS3Parameters `json:"sseS3,omitempty" tf:"sse_s3,omitempty"` +} + +type ScheduleInitParameters struct { + + // Specifies how frequently inventory results are produced. Valid values: Daily, Weekly. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` +} + +type ScheduleObservation struct { + + // Specifies how frequently inventory results are produced. Valid values: Daily, Weekly. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` +} + +type ScheduleParameters struct { + + // Specifies how frequently inventory results are produced. Valid values: Daily, Weekly. + // +kubebuilder:validation:Optional + Frequency *string `json:"frequency" tf:"frequency,omitempty"` +} + +type SseKMSInitParameters struct { + + // ARN of the KMS customer master key (CMK) used to encrypt the inventory file. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` +} + +type SseKMSObservation struct { + + // ARN of the KMS customer master key (CMK) used to encrypt the inventory file. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` +} + +type SseKMSParameters struct { + + // ARN of the KMS customer master key (CMK) used to encrypt the inventory file. + // +kubebuilder:validation:Optional + KeyID *string `json:"keyId" tf:"key_id,omitempty"` +} + +type SseS3InitParameters struct { +} + +type SseS3Observation struct { +} + +type SseS3Parameters struct { +} + +// BucketInventorySpec defines the desired state of BucketInventory +type BucketInventorySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BucketInventoryParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BucketInventoryInitParameters `json:"initProvider,omitempty"` +} + +// BucketInventoryStatus defines the observed state of BucketInventory. +type BucketInventoryStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BucketInventoryObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BucketInventory is the Schema for the BucketInventorys API. Provides a S3 bucket inventory configuration resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type BucketInventory struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.destination) || (has(self.initProvider) && has(self.initProvider.destination))",message="spec.forProvider.destination is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.includedObjectVersions) || (has(self.initProvider) && has(self.initProvider.includedObjectVersions))",message="spec.forProvider.includedObjectVersions is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.schedule) || (has(self.initProvider) && has(self.initProvider.schedule))",message="spec.forProvider.schedule is a required parameter" + Spec BucketInventorySpec `json:"spec"` + Status BucketInventoryStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketInventoryList contains a list of BucketInventorys +type BucketInventoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BucketInventory `json:"items"` +} + +// Repository type metadata. +var ( + BucketInventory_Kind = "BucketInventory" + BucketInventory_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BucketInventory_Kind}.String() + BucketInventory_KindAPIVersion = BucketInventory_Kind + "." + CRDGroupVersion.String() + BucketInventory_GroupVersionKind = CRDGroupVersion.WithKind(BucketInventory_Kind) +) + +func init() { + SchemeBuilder.Register(&BucketInventory{}, &BucketInventoryList{}) +} diff --git a/apis/s3/v1beta2/zz_bucketlifecycleconfiguration_terraformed.go b/apis/s3/v1beta2/zz_bucketlifecycleconfiguration_terraformed.go new file mode 100755 index 0000000000..9f9a42651a --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketlifecycleconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BucketLifecycleConfiguration +func (mg *BucketLifecycleConfiguration) GetTerraformResourceType() string { + return "aws_s3_bucket_lifecycle_configuration" +} + +// GetConnectionDetailsMapping for this BucketLifecycleConfiguration +func (tr *BucketLifecycleConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BucketLifecycleConfiguration +func (tr *BucketLifecycleConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BucketLifecycleConfiguration +func (tr *BucketLifecycleConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BucketLifecycleConfiguration +func (tr *BucketLifecycleConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BucketLifecycleConfiguration +func (tr *BucketLifecycleConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BucketLifecycleConfiguration +func (tr *BucketLifecycleConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BucketLifecycleConfiguration +func (tr *BucketLifecycleConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BucketLifecycleConfiguration +func (tr *BucketLifecycleConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BucketLifecycleConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BucketLifecycleConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &BucketLifecycleConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BucketLifecycleConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3/v1beta2/zz_bucketlifecycleconfiguration_types.go b/apis/s3/v1beta2/zz_bucketlifecycleconfiguration_types.go new file mode 100755 index 0000000000..4fdc3e6c13 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketlifecycleconfiguration_types.go @@ -0,0 +1,546 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AbortIncompleteMultipartUploadInitParameters struct { + + // Number of days after which Amazon S3 aborts an incomplete multipart upload. + DaysAfterInitiation *float64 `json:"daysAfterInitiation,omitempty" tf:"days_after_initiation,omitempty"` +} + +type AbortIncompleteMultipartUploadObservation struct { + + // Number of days after which Amazon S3 aborts an incomplete multipart upload. + DaysAfterInitiation *float64 `json:"daysAfterInitiation,omitempty" tf:"days_after_initiation,omitempty"` +} + +type AbortIncompleteMultipartUploadParameters struct { + + // Number of days after which Amazon S3 aborts an incomplete multipart upload. + // +kubebuilder:validation:Optional + DaysAfterInitiation *float64 `json:"daysAfterInitiation,omitempty" tf:"days_after_initiation,omitempty"` +} + +type AndInitParameters struct { + + // Minimum object size (in bytes) to which the rule applies. + ObjectSizeGreaterThan *float64 `json:"objectSizeGreaterThan,omitempty" tf:"object_size_greater_than,omitempty"` + + // Maximum object size (in bytes) to which the rule applies. + ObjectSizeLessThan *float64 `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` + + // DEPRECATED Use filter instead. This has been deprecated by Amazon S3. Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if filter is not specified. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Key-value map of resource tags. All of these tags must exist in the object's tag set in order for the rule to apply. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AndObservation struct { + + // Minimum object size (in bytes) to which the rule applies. + ObjectSizeGreaterThan *float64 `json:"objectSizeGreaterThan,omitempty" tf:"object_size_greater_than,omitempty"` + + // Maximum object size (in bytes) to which the rule applies. + ObjectSizeLessThan *float64 `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` + + // DEPRECATED Use filter instead. This has been deprecated by Amazon S3. Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if filter is not specified. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Key-value map of resource tags. All of these tags must exist in the object's tag set in order for the rule to apply. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AndParameters struct { + + // Minimum object size (in bytes) to which the rule applies. + // +kubebuilder:validation:Optional + ObjectSizeGreaterThan *float64 `json:"objectSizeGreaterThan,omitempty" tf:"object_size_greater_than,omitempty"` + + // Maximum object size (in bytes) to which the rule applies. + // +kubebuilder:validation:Optional + ObjectSizeLessThan *float64 `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` + + // DEPRECATED Use filter instead. This has been deprecated by Amazon S3. Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if filter is not specified. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Key-value map of resource tags. All of these tags must exist in the object's tag set in order for the rule to apply. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type BucketLifecycleConfigurationInitParameters struct { + + // Name of the source S3 bucket you want Amazon S3 to monitor. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // List of configuration blocks describing the rules managing the replication. See below. + Rule []BucketLifecycleConfigurationRuleInitParameters `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type BucketLifecycleConfigurationObservation struct { + + // Name of the source S3 bucket you want Amazon S3 to monitor. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // and status) + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // List of configuration blocks describing the rules managing the replication. See below. + Rule []BucketLifecycleConfigurationRuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type BucketLifecycleConfigurationParameters struct { + + // Name of the source S3 bucket you want Amazon S3 to monitor. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error. + // +kubebuilder:validation:Optional + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // List of configuration blocks describing the rules managing the replication. See below. + // +kubebuilder:validation:Optional + Rule []BucketLifecycleConfigurationRuleParameters `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type BucketLifecycleConfigurationRuleInitParameters struct { + + // Configuration block that specifies the days since the initiation of an incomplete multipart upload that Amazon S3 will wait before permanently removing all parts of the upload. See below. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUploadInitParameters `json:"abortIncompleteMultipartUpload,omitempty" tf:"abort_incomplete_multipart_upload,omitempty"` + + // Configuration block that specifies the expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker. See below. + Expiration *RuleExpirationInitParameters `json:"expiration,omitempty" tf:"expiration,omitempty"` + + // Configuration block used to identify objects that a Lifecycle Rule applies to. See below. If not specified, the rule will default to using prefix. + Filter *RuleFilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration block that specifies when noncurrent object versions expire. See below. + NoncurrentVersionExpiration *RuleNoncurrentVersionExpirationInitParameters `json:"noncurrentVersionExpiration,omitempty" tf:"noncurrent_version_expiration,omitempty"` + + // Set of configuration blocks that specify the transition rule for the lifecycle rule that describes when noncurrent objects transition to a specific storage class. See below. + NoncurrentVersionTransition []RuleNoncurrentVersionTransitionInitParameters `json:"noncurrentVersionTransition,omitempty" tf:"noncurrent_version_transition,omitempty"` + + // DEPRECATED Use filter instead. This has been deprecated by Amazon S3. Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if filter is not specified. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Whether the rule is currently being applied. Valid values: Enabled or Disabled. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Set of configuration blocks that specify when an Amazon S3 object transitions to a specified storage class. See below. + Transition []RuleTransitionInitParameters `json:"transition,omitempty" tf:"transition,omitempty"` +} + +type BucketLifecycleConfigurationRuleObservation struct { + + // Configuration block that specifies the days since the initiation of an incomplete multipart upload that Amazon S3 will wait before permanently removing all parts of the upload. See below. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUploadObservation `json:"abortIncompleteMultipartUpload,omitempty" tf:"abort_incomplete_multipart_upload,omitempty"` + + // Configuration block that specifies the expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker. See below. + Expiration *RuleExpirationObservation `json:"expiration,omitempty" tf:"expiration,omitempty"` + + // Configuration block used to identify objects that a Lifecycle Rule applies to. See below. If not specified, the rule will default to using prefix. + Filter *RuleFilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration block that specifies when noncurrent object versions expire. See below. + NoncurrentVersionExpiration *RuleNoncurrentVersionExpirationObservation `json:"noncurrentVersionExpiration,omitempty" tf:"noncurrent_version_expiration,omitempty"` + + // Set of configuration blocks that specify the transition rule for the lifecycle rule that describes when noncurrent objects transition to a specific storage class. See below. + NoncurrentVersionTransition []RuleNoncurrentVersionTransitionObservation `json:"noncurrentVersionTransition,omitempty" tf:"noncurrent_version_transition,omitempty"` + + // DEPRECATED Use filter instead. This has been deprecated by Amazon S3. Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if filter is not specified. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Whether the rule is currently being applied. Valid values: Enabled or Disabled. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Set of configuration blocks that specify when an Amazon S3 object transitions to a specified storage class. See below. + Transition []RuleTransitionObservation `json:"transition,omitempty" tf:"transition,omitempty"` +} + +type BucketLifecycleConfigurationRuleParameters struct { + + // Configuration block that specifies the days since the initiation of an incomplete multipart upload that Amazon S3 will wait before permanently removing all parts of the upload. See below. + // +kubebuilder:validation:Optional + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUploadParameters `json:"abortIncompleteMultipartUpload,omitempty" tf:"abort_incomplete_multipart_upload,omitempty"` + + // Configuration block that specifies the expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker. See below. + // +kubebuilder:validation:Optional + Expiration *RuleExpirationParameters `json:"expiration,omitempty" tf:"expiration,omitempty"` + + // Configuration block used to identify objects that a Lifecycle Rule applies to. See below. If not specified, the rule will default to using prefix. + // +kubebuilder:validation:Optional + Filter *RuleFilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // Configuration block that specifies when noncurrent object versions expire. See below. + // +kubebuilder:validation:Optional + NoncurrentVersionExpiration *RuleNoncurrentVersionExpirationParameters `json:"noncurrentVersionExpiration,omitempty" tf:"noncurrent_version_expiration,omitempty"` + + // Set of configuration blocks that specify the transition rule for the lifecycle rule that describes when noncurrent objects transition to a specific storage class. See below. + // +kubebuilder:validation:Optional + NoncurrentVersionTransition []RuleNoncurrentVersionTransitionParameters `json:"noncurrentVersionTransition,omitempty" tf:"noncurrent_version_transition,omitempty"` + + // DEPRECATED Use filter instead. This has been deprecated by Amazon S3. Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if filter is not specified. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Whether the rule is currently being applied. Valid values: Enabled or Disabled. + // +kubebuilder:validation:Optional + Status *string `json:"status" tf:"status,omitempty"` + + // Set of configuration blocks that specify when an Amazon S3 object transitions to a specified storage class. See below. + // +kubebuilder:validation:Optional + Transition []RuleTransitionParameters `json:"transition,omitempty" tf:"transition,omitempty"` +} + +type RuleExpirationInitParameters struct { + + // Date objects are transitioned to the specified storage class. The date value must be in RFC3339 full-date format e.g. 2023-08-22. + Date *string `json:"date,omitempty" tf:"date,omitempty"` + + // Number of days after creation when objects are transitioned to the specified storage class. The value must be a positive integer. If both days and date are not specified, defaults to 0. Valid values depend on storage_class, see Transition objects using Amazon S3 Lifecycle for more details. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no action. + ExpiredObjectDeleteMarker *bool `json:"expiredObjectDeleteMarker,omitempty" tf:"expired_object_delete_marker,omitempty"` +} + +type RuleExpirationObservation struct { + + // Date objects are transitioned to the specified storage class. The date value must be in RFC3339 full-date format e.g. 2023-08-22. + Date *string `json:"date,omitempty" tf:"date,omitempty"` + + // Number of days after creation when objects are transitioned to the specified storage class. The value must be a positive integer. If both days and date are not specified, defaults to 0. Valid values depend on storage_class, see Transition objects using Amazon S3 Lifecycle for more details. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no action. + ExpiredObjectDeleteMarker *bool `json:"expiredObjectDeleteMarker,omitempty" tf:"expired_object_delete_marker,omitempty"` +} + +type RuleExpirationParameters struct { + + // Date objects are transitioned to the specified storage class. The date value must be in RFC3339 full-date format e.g. 2023-08-22. + // +kubebuilder:validation:Optional + Date *string `json:"date,omitempty" tf:"date,omitempty"` + + // Number of days after creation when objects are transitioned to the specified storage class. The value must be a positive integer. If both days and date are not specified, defaults to 0. Valid values depend on storage_class, see Transition objects using Amazon S3 Lifecycle for more details. + // +kubebuilder:validation:Optional + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no action. + // +kubebuilder:validation:Optional + ExpiredObjectDeleteMarker *bool `json:"expiredObjectDeleteMarker,omitempty" tf:"expired_object_delete_marker,omitempty"` +} + +type RuleFilterInitParameters struct { + + // Configuration block used to apply a logical AND to two or more predicates. See below. The Lifecycle Rule will apply to any object matching all the predicates configured inside the and block. + And *AndInitParameters `json:"and,omitempty" tf:"and,omitempty"` + + // Minimum object size (in bytes) to which the rule applies. + ObjectSizeGreaterThan *string `json:"objectSizeGreaterThan,omitempty" tf:"object_size_greater_than,omitempty"` + + // Maximum object size (in bytes) to which the rule applies. + ObjectSizeLessThan *string `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` + + // DEPRECATED Use filter instead. This has been deprecated by Amazon S3. Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if filter is not specified. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Configuration block for specifying a tag key and value. See below. + Tag *TagInitParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type RuleFilterObservation struct { + + // Configuration block used to apply a logical AND to two or more predicates. See below. The Lifecycle Rule will apply to any object matching all the predicates configured inside the and block. + And *AndObservation `json:"and,omitempty" tf:"and,omitempty"` + + // Minimum object size (in bytes) to which the rule applies. + ObjectSizeGreaterThan *string `json:"objectSizeGreaterThan,omitempty" tf:"object_size_greater_than,omitempty"` + + // Maximum object size (in bytes) to which the rule applies. + ObjectSizeLessThan *string `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` + + // DEPRECATED Use filter instead. This has been deprecated by Amazon S3. Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if filter is not specified. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Configuration block for specifying a tag key and value. See below. + Tag *TagObservation `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type RuleFilterParameters struct { + + // Configuration block used to apply a logical AND to two or more predicates. See below. The Lifecycle Rule will apply to any object matching all the predicates configured inside the and block. + // +kubebuilder:validation:Optional + And *AndParameters `json:"and,omitempty" tf:"and,omitempty"` + + // Minimum object size (in bytes) to which the rule applies. + // +kubebuilder:validation:Optional + ObjectSizeGreaterThan *string `json:"objectSizeGreaterThan,omitempty" tf:"object_size_greater_than,omitempty"` + + // Maximum object size (in bytes) to which the rule applies. + // +kubebuilder:validation:Optional + ObjectSizeLessThan *string `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` + + // DEPRECATED Use filter instead. This has been deprecated by Amazon S3. Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if filter is not specified. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Configuration block for specifying a tag key and value. See below. + // +kubebuilder:validation:Optional + Tag *TagParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type RuleNoncurrentVersionExpirationInitParameters struct { + + // Number of noncurrent versions Amazon S3 will retain. Must be a non-zero positive integer. + NewerNoncurrentVersions *string `json:"newerNoncurrentVersions,omitempty" tf:"newer_noncurrent_versions,omitempty"` + + // Number of days an object is noncurrent before Amazon S3 can perform the associated action. + NoncurrentDays *float64 `json:"noncurrentDays,omitempty" tf:"noncurrent_days,omitempty"` +} + +type RuleNoncurrentVersionExpirationObservation struct { + + // Number of noncurrent versions Amazon S3 will retain. Must be a non-zero positive integer. + NewerNoncurrentVersions *string `json:"newerNoncurrentVersions,omitempty" tf:"newer_noncurrent_versions,omitempty"` + + // Number of days an object is noncurrent before Amazon S3 can perform the associated action. + NoncurrentDays *float64 `json:"noncurrentDays,omitempty" tf:"noncurrent_days,omitempty"` +} + +type RuleNoncurrentVersionExpirationParameters struct { + + // Number of noncurrent versions Amazon S3 will retain. Must be a non-zero positive integer. + // +kubebuilder:validation:Optional + NewerNoncurrentVersions *string `json:"newerNoncurrentVersions,omitempty" tf:"newer_noncurrent_versions,omitempty"` + + // Number of days an object is noncurrent before Amazon S3 can perform the associated action. + // +kubebuilder:validation:Optional + NoncurrentDays *float64 `json:"noncurrentDays,omitempty" tf:"noncurrent_days,omitempty"` +} + +type RuleNoncurrentVersionTransitionInitParameters struct { + + // Number of noncurrent versions Amazon S3 will retain. Must be a non-zero positive integer. + NewerNoncurrentVersions *string `json:"newerNoncurrentVersions,omitempty" tf:"newer_noncurrent_versions,omitempty"` + + // Number of days an object is noncurrent before Amazon S3 can perform the associated action. + NoncurrentDays *float64 `json:"noncurrentDays,omitempty" tf:"noncurrent_days,omitempty"` + + // Class of storage used to store the object. Valid Values: GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR. + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type RuleNoncurrentVersionTransitionObservation struct { + + // Number of noncurrent versions Amazon S3 will retain. Must be a non-zero positive integer. + NewerNoncurrentVersions *string `json:"newerNoncurrentVersions,omitempty" tf:"newer_noncurrent_versions,omitempty"` + + // Number of days an object is noncurrent before Amazon S3 can perform the associated action. + NoncurrentDays *float64 `json:"noncurrentDays,omitempty" tf:"noncurrent_days,omitempty"` + + // Class of storage used to store the object. Valid Values: GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR. + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type RuleNoncurrentVersionTransitionParameters struct { + + // Number of noncurrent versions Amazon S3 will retain. Must be a non-zero positive integer. + // +kubebuilder:validation:Optional + NewerNoncurrentVersions *string `json:"newerNoncurrentVersions,omitempty" tf:"newer_noncurrent_versions,omitempty"` + + // Number of days an object is noncurrent before Amazon S3 can perform the associated action. + // +kubebuilder:validation:Optional + NoncurrentDays *float64 `json:"noncurrentDays,omitempty" tf:"noncurrent_days,omitempty"` + + // Class of storage used to store the object. Valid Values: GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR. + // +kubebuilder:validation:Optional + StorageClass *string `json:"storageClass" tf:"storage_class,omitempty"` +} + +type RuleTransitionInitParameters struct { + + // Date objects are transitioned to the specified storage class. The date value must be in RFC3339 full-date format e.g. 2023-08-22. + Date *string `json:"date,omitempty" tf:"date,omitempty"` + + // Number of days after creation when objects are transitioned to the specified storage class. The value must be a positive integer. If both days and date are not specified, defaults to 0. Valid values depend on storage_class, see Transition objects using Amazon S3 Lifecycle for more details. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Class of storage used to store the object. Valid Values: GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR. + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type RuleTransitionObservation struct { + + // Date objects are transitioned to the specified storage class. The date value must be in RFC3339 full-date format e.g. 2023-08-22. + Date *string `json:"date,omitempty" tf:"date,omitempty"` + + // Number of days after creation when objects are transitioned to the specified storage class. The value must be a positive integer. If both days and date are not specified, defaults to 0. Valid values depend on storage_class, see Transition objects using Amazon S3 Lifecycle for more details. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Class of storage used to store the object. Valid Values: GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR. + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type RuleTransitionParameters struct { + + // Date objects are transitioned to the specified storage class. The date value must be in RFC3339 full-date format e.g. 2023-08-22. + // +kubebuilder:validation:Optional + Date *string `json:"date,omitempty" tf:"date,omitempty"` + + // Number of days after creation when objects are transitioned to the specified storage class. The value must be a positive integer. If both days and date are not specified, defaults to 0. Valid values depend on storage_class, see Transition objects using Amazon S3 Lifecycle for more details. + // +kubebuilder:validation:Optional + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Class of storage used to store the object. Valid Values: GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR. + // +kubebuilder:validation:Optional + StorageClass *string `json:"storageClass" tf:"storage_class,omitempty"` +} + +type TagInitParameters struct { + + // Name of the object key. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Value of the tag. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagObservation struct { + + // Name of the object key. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Value of the tag. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagParameters struct { + + // Name of the object key. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Value of the tag. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +// BucketLifecycleConfigurationSpec defines the desired state of BucketLifecycleConfiguration +type BucketLifecycleConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BucketLifecycleConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BucketLifecycleConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// BucketLifecycleConfigurationStatus defines the observed state of BucketLifecycleConfiguration. +type BucketLifecycleConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BucketLifecycleConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BucketLifecycleConfiguration is the Schema for the BucketLifecycleConfigurations API. Provides a S3 bucket lifecycle configuration resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type BucketLifecycleConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.rule) || (has(self.initProvider) && has(self.initProvider.rule))",message="spec.forProvider.rule is a required parameter" + Spec BucketLifecycleConfigurationSpec `json:"spec"` + Status BucketLifecycleConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketLifecycleConfigurationList contains a list of BucketLifecycleConfigurations +type BucketLifecycleConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BucketLifecycleConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + BucketLifecycleConfiguration_Kind = "BucketLifecycleConfiguration" + BucketLifecycleConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BucketLifecycleConfiguration_Kind}.String() + BucketLifecycleConfiguration_KindAPIVersion = BucketLifecycleConfiguration_Kind + "." + CRDGroupVersion.String() + BucketLifecycleConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(BucketLifecycleConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&BucketLifecycleConfiguration{}, &BucketLifecycleConfigurationList{}) +} diff --git a/apis/s3/v1beta2/zz_bucketlogging_terraformed.go b/apis/s3/v1beta2/zz_bucketlogging_terraformed.go new file mode 100755 index 0000000000..3d05487717 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketlogging_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BucketLogging +func (mg *BucketLogging) GetTerraformResourceType() string { + return "aws_s3_bucket_logging" +} + +// GetConnectionDetailsMapping for this BucketLogging +func (tr *BucketLogging) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BucketLogging +func (tr *BucketLogging) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BucketLogging +func (tr *BucketLogging) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BucketLogging +func (tr *BucketLogging) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BucketLogging +func (tr *BucketLogging) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BucketLogging +func (tr *BucketLogging) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BucketLogging +func (tr *BucketLogging) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BucketLogging +func (tr *BucketLogging) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BucketLogging using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BucketLogging) LateInitialize(attrs []byte) (bool, error) { + params := &BucketLoggingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BucketLogging) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3/v1beta2/zz_bucketlogging_types.go b/apis/s3/v1beta2/zz_bucketlogging_types.go new file mode 100755 index 0000000000..b2af14d5bb --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketlogging_types.go @@ -0,0 +1,328 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BucketLoggingInitParameters struct { + + // Name of the bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Account ID of the expected bucket owner. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Name of the bucket where you want Amazon S3 to store server access logs. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + TargetBucket *string `json:"targetBucket,omitempty" tf:"target_bucket,omitempty"` + + // Reference to a Bucket in s3 to populate targetBucket. + // +kubebuilder:validation:Optional + TargetBucketRef *v1.Reference `json:"targetBucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate targetBucket. + // +kubebuilder:validation:Optional + TargetBucketSelector *v1.Selector `json:"targetBucketSelector,omitempty" tf:"-"` + + // Set of configuration blocks with information for granting permissions. See below. + TargetGrant []TargetGrantInitParameters `json:"targetGrant,omitempty" tf:"target_grant,omitempty"` + + // Amazon S3 key format for log objects. See below. + TargetObjectKeyFormat *TargetObjectKeyFormatInitParameters `json:"targetObjectKeyFormat,omitempty" tf:"target_object_key_format,omitempty"` + + // Prefix for all log object keys. + TargetPrefix *string `json:"targetPrefix,omitempty" tf:"target_prefix,omitempty"` +} + +type BucketLoggingObservation struct { + + // Name of the bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Account ID of the expected bucket owner. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // The bucket or bucket and expected_bucket_owner separated by a comma (,) if the latter is provided. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the bucket where you want Amazon S3 to store server access logs. + TargetBucket *string `json:"targetBucket,omitempty" tf:"target_bucket,omitempty"` + + // Set of configuration blocks with information for granting permissions. See below. + TargetGrant []TargetGrantObservation `json:"targetGrant,omitempty" tf:"target_grant,omitempty"` + + // Amazon S3 key format for log objects. See below. + TargetObjectKeyFormat *TargetObjectKeyFormatObservation `json:"targetObjectKeyFormat,omitempty" tf:"target_object_key_format,omitempty"` + + // Prefix for all log object keys. + TargetPrefix *string `json:"targetPrefix,omitempty" tf:"target_prefix,omitempty"` +} + +type BucketLoggingParameters struct { + + // Name of the bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Account ID of the expected bucket owner. + // +kubebuilder:validation:Optional + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Name of the bucket where you want Amazon S3 to store server access logs. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + TargetBucket *string `json:"targetBucket,omitempty" tf:"target_bucket,omitempty"` + + // Reference to a Bucket in s3 to populate targetBucket. + // +kubebuilder:validation:Optional + TargetBucketRef *v1.Reference `json:"targetBucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate targetBucket. + // +kubebuilder:validation:Optional + TargetBucketSelector *v1.Selector `json:"targetBucketSelector,omitempty" tf:"-"` + + // Set of configuration blocks with information for granting permissions. See below. + // +kubebuilder:validation:Optional + TargetGrant []TargetGrantParameters `json:"targetGrant,omitempty" tf:"target_grant,omitempty"` + + // Amazon S3 key format for log objects. See below. + // +kubebuilder:validation:Optional + TargetObjectKeyFormat *TargetObjectKeyFormatParameters `json:"targetObjectKeyFormat,omitempty" tf:"target_object_key_format,omitempty"` + + // Prefix for all log object keys. + // +kubebuilder:validation:Optional + TargetPrefix *string `json:"targetPrefix,omitempty" tf:"target_prefix,omitempty"` +} + +type PartitionedPrefixInitParameters struct { + + // Specifies the partition date source for the partitioned prefix. Valid values: EventTime, DeliveryTime. + PartitionDateSource *string `json:"partitionDateSource,omitempty" tf:"partition_date_source,omitempty"` +} + +type PartitionedPrefixObservation struct { + + // Specifies the partition date source for the partitioned prefix. Valid values: EventTime, DeliveryTime. + PartitionDateSource *string `json:"partitionDateSource,omitempty" tf:"partition_date_source,omitempty"` +} + +type PartitionedPrefixParameters struct { + + // Specifies the partition date source for the partitioned prefix. Valid values: EventTime, DeliveryTime. + // +kubebuilder:validation:Optional + PartitionDateSource *string `json:"partitionDateSource" tf:"partition_date_source,omitempty"` +} + +type SimplePrefixInitParameters struct { +} + +type SimplePrefixObservation struct { +} + +type SimplePrefixParameters struct { +} + +type TargetGrantGranteeInitParameters struct { + + // Email address of the grantee. See Regions and Endpoints for supported AWS regions where this argument can be specified. + EmailAddress *string `json:"emailAddress,omitempty" tf:"email_address,omitempty"` + + // Canonical user ID of the grantee. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Type of grantee. Valid values: CanonicalUser, AmazonCustomerByEmail, Group. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // URI of the grantee group. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type TargetGrantGranteeObservation struct { + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Email address of the grantee. See Regions and Endpoints for supported AWS regions where this argument can be specified. + EmailAddress *string `json:"emailAddress,omitempty" tf:"email_address,omitempty"` + + // Canonical user ID of the grantee. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Type of grantee. Valid values: CanonicalUser, AmazonCustomerByEmail, Group. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // URI of the grantee group. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type TargetGrantGranteeParameters struct { + + // Email address of the grantee. See Regions and Endpoints for supported AWS regions where this argument can be specified. + // +kubebuilder:validation:Optional + EmailAddress *string `json:"emailAddress,omitempty" tf:"email_address,omitempty"` + + // Canonical user ID of the grantee. + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Type of grantee. Valid values: CanonicalUser, AmazonCustomerByEmail, Group. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // URI of the grantee group. + // +kubebuilder:validation:Optional + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type TargetGrantInitParameters struct { + + // Configuration block for the person being granted permissions. See below. + Grantee *TargetGrantGranteeInitParameters `json:"grantee,omitempty" tf:"grantee,omitempty"` + + // Logging permissions assigned to the grantee for the bucket. Valid values: FULL_CONTROL, READ, WRITE. + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` +} + +type TargetGrantObservation struct { + + // Configuration block for the person being granted permissions. See below. + Grantee *TargetGrantGranteeObservation `json:"grantee,omitempty" tf:"grantee,omitempty"` + + // Logging permissions assigned to the grantee for the bucket. Valid values: FULL_CONTROL, READ, WRITE. + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` +} + +type TargetGrantParameters struct { + + // Configuration block for the person being granted permissions. See below. + // +kubebuilder:validation:Optional + Grantee *TargetGrantGranteeParameters `json:"grantee" tf:"grantee,omitempty"` + + // Logging permissions assigned to the grantee for the bucket. Valid values: FULL_CONTROL, READ, WRITE. + // +kubebuilder:validation:Optional + Permission *string `json:"permission" tf:"permission,omitempty"` +} + +type TargetObjectKeyFormatInitParameters struct { + + // Partitioned S3 key for log objects. See below. + PartitionedPrefix *PartitionedPrefixInitParameters `json:"partitionedPrefix,omitempty" tf:"partitioned_prefix,omitempty"` + + // Use the simple format for S3 keys for log objects. To use, set simple_prefix {}. + SimplePrefix *SimplePrefixInitParameters `json:"simplePrefix,omitempty" tf:"simple_prefix,omitempty"` +} + +type TargetObjectKeyFormatObservation struct { + + // Partitioned S3 key for log objects. See below. + PartitionedPrefix *PartitionedPrefixObservation `json:"partitionedPrefix,omitempty" tf:"partitioned_prefix,omitempty"` + + // Use the simple format for S3 keys for log objects. To use, set simple_prefix {}. + SimplePrefix *SimplePrefixParameters `json:"simplePrefix,omitempty" tf:"simple_prefix,omitempty"` +} + +type TargetObjectKeyFormatParameters struct { + + // Partitioned S3 key for log objects. See below. + // +kubebuilder:validation:Optional + PartitionedPrefix *PartitionedPrefixParameters `json:"partitionedPrefix,omitempty" tf:"partitioned_prefix,omitempty"` + + // Use the simple format for S3 keys for log objects. To use, set simple_prefix {}. + // +kubebuilder:validation:Optional + SimplePrefix *SimplePrefixParameters `json:"simplePrefix,omitempty" tf:"simple_prefix,omitempty"` +} + +// BucketLoggingSpec defines the desired state of BucketLogging +type BucketLoggingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BucketLoggingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BucketLoggingInitParameters `json:"initProvider,omitempty"` +} + +// BucketLoggingStatus defines the observed state of BucketLogging. +type BucketLoggingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BucketLoggingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BucketLogging is the Schema for the BucketLoggings API. Provides an S3 bucket (server access) logging resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type BucketLogging struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.targetPrefix) || (has(self.initProvider) && has(self.initProvider.targetPrefix))",message="spec.forProvider.targetPrefix is a required parameter" + Spec BucketLoggingSpec `json:"spec"` + Status BucketLoggingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketLoggingList contains a list of BucketLoggings +type BucketLoggingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BucketLogging `json:"items"` +} + +// Repository type metadata. +var ( + BucketLogging_Kind = "BucketLogging" + BucketLogging_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BucketLogging_Kind}.String() + BucketLogging_KindAPIVersion = BucketLogging_Kind + "." + CRDGroupVersion.String() + BucketLogging_GroupVersionKind = CRDGroupVersion.WithKind(BucketLogging_Kind) +) + +func init() { + SchemeBuilder.Register(&BucketLogging{}, &BucketLoggingList{}) +} diff --git a/apis/s3/v1beta2/zz_bucketmetric_terraformed.go b/apis/s3/v1beta2/zz_bucketmetric_terraformed.go new file mode 100755 index 0000000000..217945d457 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketmetric_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BucketMetric +func (mg *BucketMetric) GetTerraformResourceType() string { + return "aws_s3_bucket_metric" +} + +// GetConnectionDetailsMapping for this BucketMetric +func (tr *BucketMetric) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BucketMetric +func (tr *BucketMetric) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BucketMetric +func (tr *BucketMetric) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BucketMetric +func (tr *BucketMetric) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BucketMetric +func (tr *BucketMetric) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BucketMetric +func (tr *BucketMetric) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BucketMetric +func (tr *BucketMetric) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BucketMetric +func (tr *BucketMetric) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BucketMetric using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BucketMetric) LateInitialize(attrs []byte) (bool, error) { + params := &BucketMetricParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BucketMetric) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3/v1beta2/zz_bucketmetric_types.go b/apis/s3/v1beta2/zz_bucketmetric_types.go new file mode 100755 index 0000000000..3f4aa95a9e --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketmetric_types.go @@ -0,0 +1,203 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BucketMetricFilterInitParameters struct { + + // S3 Access Point ARN for filtering (singular). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3control/v1beta2.AccessPoint + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + AccessPoint *string `json:"accessPoint,omitempty" tf:"access_point,omitempty"` + + // Reference to a AccessPoint in s3control to populate accessPoint. + // +kubebuilder:validation:Optional + AccessPointRef *v1.Reference `json:"accessPointRef,omitempty" tf:"-"` + + // Selector for a AccessPoint in s3control to populate accessPoint. + // +kubebuilder:validation:Optional + AccessPointSelector *v1.Selector `json:"accessPointSelector,omitempty" tf:"-"` + + // Object prefix for filtering (singular). + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type BucketMetricFilterObservation struct { + + // S3 Access Point ARN for filtering (singular). + AccessPoint *string `json:"accessPoint,omitempty" tf:"access_point,omitempty"` + + // Object prefix for filtering (singular). + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type BucketMetricFilterParameters struct { + + // S3 Access Point ARN for filtering (singular). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3control/v1beta2.AccessPoint + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + AccessPoint *string `json:"accessPoint,omitempty" tf:"access_point,omitempty"` + + // Reference to a AccessPoint in s3control to populate accessPoint. + // +kubebuilder:validation:Optional + AccessPointRef *v1.Reference `json:"accessPointRef,omitempty" tf:"-"` + + // Selector for a AccessPoint in s3control to populate accessPoint. + // +kubebuilder:validation:Optional + AccessPointSelector *v1.Selector `json:"accessPointSelector,omitempty" tf:"-"` + + // Object prefix for filtering (singular). + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type BucketMetricInitParameters struct { + + // Name of the bucket to put metric configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). + Filter *BucketMetricFilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Unique identifier of the metrics configuration for the bucket. Must be less than or equal to 64 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type BucketMetricObservation struct { + + // Name of the bucket to put metric configuration. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). + Filter *BucketMetricFilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Unique identifier of the metrics configuration for the bucket. Must be less than or equal to 64 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type BucketMetricParameters struct { + + // Name of the bucket to put metric configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). + // +kubebuilder:validation:Optional + Filter *BucketMetricFilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Unique identifier of the metrics configuration for the bucket. Must be less than or equal to 64 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +// BucketMetricSpec defines the desired state of BucketMetric +type BucketMetricSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BucketMetricParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BucketMetricInitParameters `json:"initProvider,omitempty"` +} + +// BucketMetricStatus defines the observed state of BucketMetric. +type BucketMetricStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BucketMetricObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BucketMetric is the Schema for the BucketMetrics API. Provides a S3 bucket metrics configuration resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type BucketMetric struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec BucketMetricSpec `json:"spec"` + Status BucketMetricStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketMetricList contains a list of BucketMetrics +type BucketMetricList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BucketMetric `json:"items"` +} + +// Repository type metadata. +var ( + BucketMetric_Kind = "BucketMetric" + BucketMetric_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BucketMetric_Kind}.String() + BucketMetric_KindAPIVersion = BucketMetric_Kind + "." + CRDGroupVersion.String() + BucketMetric_GroupVersionKind = CRDGroupVersion.WithKind(BucketMetric_Kind) +) + +func init() { + SchemeBuilder.Register(&BucketMetric{}, &BucketMetricList{}) +} diff --git a/apis/s3/v1beta2/zz_bucketobjectlockconfiguration_terraformed.go b/apis/s3/v1beta2/zz_bucketobjectlockconfiguration_terraformed.go new file mode 100755 index 0000000000..fe89298847 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketobjectlockconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BucketObjectLockConfiguration +func (mg *BucketObjectLockConfiguration) GetTerraformResourceType() string { + return "aws_s3_bucket_object_lock_configuration" +} + +// GetConnectionDetailsMapping for this BucketObjectLockConfiguration +func (tr *BucketObjectLockConfiguration) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"token": "tokenSecretRef"} +} + +// GetObservation of this BucketObjectLockConfiguration +func (tr *BucketObjectLockConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BucketObjectLockConfiguration +func (tr *BucketObjectLockConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BucketObjectLockConfiguration +func (tr *BucketObjectLockConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BucketObjectLockConfiguration +func (tr *BucketObjectLockConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BucketObjectLockConfiguration +func (tr *BucketObjectLockConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BucketObjectLockConfiguration +func (tr *BucketObjectLockConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BucketObjectLockConfiguration +func (tr *BucketObjectLockConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BucketObjectLockConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BucketObjectLockConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &BucketObjectLockConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BucketObjectLockConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3/v1beta2/zz_bucketobjectlockconfiguration_types.go b/apis/s3/v1beta2/zz_bucketobjectlockconfiguration_types.go new file mode 100755 index 0000000000..424ce7606a --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketobjectlockconfiguration_types.go @@ -0,0 +1,218 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BucketObjectLockConfigurationInitParameters struct { + + // Name of the bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Account ID of the expected bucket owner. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Indicates whether this bucket has an Object Lock configuration enabled. Defaults to Enabled. Valid values: Enabled. + ObjectLockEnabled *string `json:"objectLockEnabled,omitempty" tf:"object_lock_enabled,omitempty"` + + // Configuration block for specifying the Object Lock rule for the specified object. See below. + Rule *BucketObjectLockConfigurationRuleInitParameters `json:"rule,omitempty" tf:"rule,omitempty"` + + // Token to allow Object Lock to be enabled for an existing bucket. You must contact AWS support for the bucket's "Object Lock token". + // The token is generated in the back-end when versioning is enabled on a bucket. For more details on versioning, see the aws_s3_bucket_versioning resource. + TokenSecretRef *v1.SecretKeySelector `json:"tokenSecretRef,omitempty" tf:"-"` +} + +type BucketObjectLockConfigurationObservation struct { + + // Name of the bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Account ID of the expected bucket owner. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // The bucket or bucket and expected_bucket_owner separated by a comma (,) if the latter is provided. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Indicates whether this bucket has an Object Lock configuration enabled. Defaults to Enabled. Valid values: Enabled. + ObjectLockEnabled *string `json:"objectLockEnabled,omitempty" tf:"object_lock_enabled,omitempty"` + + // Configuration block for specifying the Object Lock rule for the specified object. See below. + Rule *BucketObjectLockConfigurationRuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type BucketObjectLockConfigurationParameters struct { + + // Name of the bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Account ID of the expected bucket owner. + // +kubebuilder:validation:Optional + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Indicates whether this bucket has an Object Lock configuration enabled. Defaults to Enabled. Valid values: Enabled. + // +kubebuilder:validation:Optional + ObjectLockEnabled *string `json:"objectLockEnabled,omitempty" tf:"object_lock_enabled,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration block for specifying the Object Lock rule for the specified object. See below. + // +kubebuilder:validation:Optional + Rule *BucketObjectLockConfigurationRuleParameters `json:"rule,omitempty" tf:"rule,omitempty"` + + // Token to allow Object Lock to be enabled for an existing bucket. You must contact AWS support for the bucket's "Object Lock token". + // The token is generated in the back-end when versioning is enabled on a bucket. For more details on versioning, see the aws_s3_bucket_versioning resource. + // +kubebuilder:validation:Optional + TokenSecretRef *v1.SecretKeySelector `json:"tokenSecretRef,omitempty" tf:"-"` +} + +type BucketObjectLockConfigurationRuleInitParameters struct { + + // Configuration block for specifying the default Object Lock retention settings for new objects placed in the specified bucket. See below. + DefaultRetention *RuleDefaultRetentionInitParameters `json:"defaultRetention,omitempty" tf:"default_retention,omitempty"` +} + +type BucketObjectLockConfigurationRuleObservation struct { + + // Configuration block for specifying the default Object Lock retention settings for new objects placed in the specified bucket. See below. + DefaultRetention *RuleDefaultRetentionObservation `json:"defaultRetention,omitempty" tf:"default_retention,omitempty"` +} + +type BucketObjectLockConfigurationRuleParameters struct { + + // Configuration block for specifying the default Object Lock retention settings for new objects placed in the specified bucket. See below. + // +kubebuilder:validation:Optional + DefaultRetention *RuleDefaultRetentionParameters `json:"defaultRetention" tf:"default_retention,omitempty"` +} + +type RuleDefaultRetentionInitParameters struct { + + // Number of days that you want to specify for the default retention period. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Default Object Lock retention mode you want to apply to new objects placed in the specified bucket. Valid values: COMPLIANCE, GOVERNANCE. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Number of years that you want to specify for the default retention period. + Years *float64 `json:"years,omitempty" tf:"years,omitempty"` +} + +type RuleDefaultRetentionObservation struct { + + // Number of days that you want to specify for the default retention period. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Default Object Lock retention mode you want to apply to new objects placed in the specified bucket. Valid values: COMPLIANCE, GOVERNANCE. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Number of years that you want to specify for the default retention period. + Years *float64 `json:"years,omitempty" tf:"years,omitempty"` +} + +type RuleDefaultRetentionParameters struct { + + // Number of days that you want to specify for the default retention period. + // +kubebuilder:validation:Optional + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Default Object Lock retention mode you want to apply to new objects placed in the specified bucket. Valid values: COMPLIANCE, GOVERNANCE. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Number of years that you want to specify for the default retention period. + // +kubebuilder:validation:Optional + Years *float64 `json:"years,omitempty" tf:"years,omitempty"` +} + +// BucketObjectLockConfigurationSpec defines the desired state of BucketObjectLockConfiguration +type BucketObjectLockConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BucketObjectLockConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BucketObjectLockConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// BucketObjectLockConfigurationStatus defines the observed state of BucketObjectLockConfiguration. +type BucketObjectLockConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BucketObjectLockConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BucketObjectLockConfiguration is the Schema for the BucketObjectLockConfigurations API. Provides an S3 bucket Object Lock configuration resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type BucketObjectLockConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec BucketObjectLockConfigurationSpec `json:"spec"` + Status BucketObjectLockConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketObjectLockConfigurationList contains a list of BucketObjectLockConfigurations +type BucketObjectLockConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BucketObjectLockConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + BucketObjectLockConfiguration_Kind = "BucketObjectLockConfiguration" + BucketObjectLockConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BucketObjectLockConfiguration_Kind}.String() + BucketObjectLockConfiguration_KindAPIVersion = BucketObjectLockConfiguration_Kind + "." + CRDGroupVersion.String() + BucketObjectLockConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(BucketObjectLockConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&BucketObjectLockConfiguration{}, &BucketObjectLockConfigurationList{}) +} diff --git a/apis/s3/v1beta2/zz_bucketownershipcontrols_terraformed.go b/apis/s3/v1beta2/zz_bucketownershipcontrols_terraformed.go new file mode 100755 index 0000000000..63a97c44d4 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketownershipcontrols_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BucketOwnershipControls +func (mg *BucketOwnershipControls) GetTerraformResourceType() string { + return "aws_s3_bucket_ownership_controls" +} + +// GetConnectionDetailsMapping for this BucketOwnershipControls +func (tr *BucketOwnershipControls) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BucketOwnershipControls +func (tr *BucketOwnershipControls) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BucketOwnershipControls +func (tr *BucketOwnershipControls) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BucketOwnershipControls +func (tr *BucketOwnershipControls) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BucketOwnershipControls +func (tr *BucketOwnershipControls) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BucketOwnershipControls +func (tr *BucketOwnershipControls) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BucketOwnershipControls +func (tr *BucketOwnershipControls) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BucketOwnershipControls +func (tr *BucketOwnershipControls) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BucketOwnershipControls using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BucketOwnershipControls) LateInitialize(attrs []byte) (bool, error) { + params := &BucketOwnershipControlsParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BucketOwnershipControls) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3/v1beta2/zz_bucketownershipcontrols_types.go b/apis/s3/v1beta2/zz_bucketownershipcontrols_types.go new file mode 100755 index 0000000000..3370878f58 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketownershipcontrols_types.go @@ -0,0 +1,151 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BucketOwnershipControlsInitParameters struct { + + // Name of the bucket that you want to associate this access point with. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Configuration block(s) with Ownership Controls rules. Detailed below. + Rule *BucketOwnershipControlsRuleInitParameters `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type BucketOwnershipControlsObservation struct { + + // Name of the bucket that you want to associate this access point with. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // S3 Bucket name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration block(s) with Ownership Controls rules. Detailed below. + Rule *BucketOwnershipControlsRuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type BucketOwnershipControlsParameters struct { + + // Name of the bucket that you want to associate this access point with. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration block(s) with Ownership Controls rules. Detailed below. + // +kubebuilder:validation:Optional + Rule *BucketOwnershipControlsRuleParameters `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type BucketOwnershipControlsRuleInitParameters struct { + + // Object ownership. Valid values: BucketOwnerPreferred, ObjectWriter or BucketOwnerEnforced + ObjectOwnership *string `json:"objectOwnership,omitempty" tf:"object_ownership,omitempty"` +} + +type BucketOwnershipControlsRuleObservation struct { + + // Object ownership. Valid values: BucketOwnerPreferred, ObjectWriter or BucketOwnerEnforced + ObjectOwnership *string `json:"objectOwnership,omitempty" tf:"object_ownership,omitempty"` +} + +type BucketOwnershipControlsRuleParameters struct { + + // Object ownership. Valid values: BucketOwnerPreferred, ObjectWriter or BucketOwnerEnforced + // +kubebuilder:validation:Optional + ObjectOwnership *string `json:"objectOwnership" tf:"object_ownership,omitempty"` +} + +// BucketOwnershipControlsSpec defines the desired state of BucketOwnershipControls +type BucketOwnershipControlsSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BucketOwnershipControlsParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BucketOwnershipControlsInitParameters `json:"initProvider,omitempty"` +} + +// BucketOwnershipControlsStatus defines the observed state of BucketOwnershipControls. +type BucketOwnershipControlsStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BucketOwnershipControlsObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BucketOwnershipControls is the Schema for the BucketOwnershipControlss API. Manages S3 Bucket Ownership Controls. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type BucketOwnershipControls struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.rule) || (has(self.initProvider) && has(self.initProvider.rule))",message="spec.forProvider.rule is a required parameter" + Spec BucketOwnershipControlsSpec `json:"spec"` + Status BucketOwnershipControlsStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketOwnershipControlsList contains a list of BucketOwnershipControlss +type BucketOwnershipControlsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BucketOwnershipControls `json:"items"` +} + +// Repository type metadata. +var ( + BucketOwnershipControls_Kind = "BucketOwnershipControls" + BucketOwnershipControls_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BucketOwnershipControls_Kind}.String() + BucketOwnershipControls_KindAPIVersion = BucketOwnershipControls_Kind + "." + CRDGroupVersion.String() + BucketOwnershipControls_GroupVersionKind = CRDGroupVersion.WithKind(BucketOwnershipControls_Kind) +) + +func init() { + SchemeBuilder.Register(&BucketOwnershipControls{}, &BucketOwnershipControlsList{}) +} diff --git a/apis/s3/v1beta2/zz_bucketreplicationconfiguration_terraformed.go b/apis/s3/v1beta2/zz_bucketreplicationconfiguration_terraformed.go new file mode 100755 index 0000000000..8e8eb7ec9b --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketreplicationconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BucketReplicationConfiguration +func (mg *BucketReplicationConfiguration) GetTerraformResourceType() string { + return "aws_s3_bucket_replication_configuration" +} + +// GetConnectionDetailsMapping for this BucketReplicationConfiguration +func (tr *BucketReplicationConfiguration) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"token": "tokenSecretRef"} +} + +// GetObservation of this BucketReplicationConfiguration +func (tr *BucketReplicationConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BucketReplicationConfiguration +func (tr *BucketReplicationConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BucketReplicationConfiguration +func (tr *BucketReplicationConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BucketReplicationConfiguration +func (tr *BucketReplicationConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BucketReplicationConfiguration +func (tr *BucketReplicationConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BucketReplicationConfiguration +func (tr *BucketReplicationConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BucketReplicationConfiguration +func (tr *BucketReplicationConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BucketReplicationConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BucketReplicationConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &BucketReplicationConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BucketReplicationConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3/v1beta2/zz_bucketreplicationconfiguration_types.go b/apis/s3/v1beta2/zz_bucketreplicationconfiguration_types.go new file mode 100755 index 0000000000..7ee779efcb --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketreplicationconfiguration_types.go @@ -0,0 +1,726 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BucketReplicationConfigurationInitParameters struct { + + // Name of the source S3 bucket you want Amazon S3 to monitor. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // ARN of the IAM role for Amazon S3 to assume when replicating the objects. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // Reference to a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleRef *v1.Reference `json:"roleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleSelector *v1.Selector `json:"roleSelector,omitempty" tf:"-"` + + // List of configuration blocks describing the rules managing the replication. See below. + Rule []BucketReplicationConfigurationRuleInitParameters `json:"rule,omitempty" tf:"rule,omitempty"` + + // Token to allow replication to be enabled on an Object Lock-enabled bucket. You must contact AWS support for the bucket's "Object Lock token". + // For more details, see Using S3 Object Lock with replication. + TokenSecretRef *v1.SecretKeySelector `json:"tokenSecretRef,omitempty" tf:"-"` +} + +type BucketReplicationConfigurationObservation struct { + + // Name of the source S3 bucket you want Amazon S3 to monitor. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // S3 source bucket name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ARN of the IAM role for Amazon S3 to assume when replicating the objects. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // List of configuration blocks describing the rules managing the replication. See below. + Rule []BucketReplicationConfigurationRuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type BucketReplicationConfigurationParameters struct { + + // Name of the source S3 bucket you want Amazon S3 to monitor. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // ARN of the IAM role for Amazon S3 to assume when replicating the objects. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // Reference to a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleRef *v1.Reference `json:"roleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleSelector *v1.Selector `json:"roleSelector,omitempty" tf:"-"` + + // List of configuration blocks describing the rules managing the replication. See below. + // +kubebuilder:validation:Optional + Rule []BucketReplicationConfigurationRuleParameters `json:"rule,omitempty" tf:"rule,omitempty"` + + // Token to allow replication to be enabled on an Object Lock-enabled bucket. You must contact AWS support for the bucket's "Object Lock token". + // For more details, see Using S3 Object Lock with replication. + // +kubebuilder:validation:Optional + TokenSecretRef *v1.SecretKeySelector `json:"tokenSecretRef,omitempty" tf:"-"` +} + +type BucketReplicationConfigurationRuleFilterInitParameters struct { + + // Configuration block for specifying rule filters. This element is required only if you specify more than one filter. See and below for more details. + And *FilterAndInitParameters `json:"and,omitempty" tf:"and,omitempty"` + + // Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. Defaults to an empty string ("") if filter is not specified. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Configuration block for specifying a tag key and value. See below. + Tag *FilterTagInitParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type BucketReplicationConfigurationRuleFilterObservation struct { + + // Configuration block for specifying rule filters. This element is required only if you specify more than one filter. See and below for more details. + And *FilterAndObservation `json:"and,omitempty" tf:"and,omitempty"` + + // Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. Defaults to an empty string ("") if filter is not specified. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Configuration block for specifying a tag key and value. See below. + Tag *FilterTagObservation `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type BucketReplicationConfigurationRuleFilterParameters struct { + + // Configuration block for specifying rule filters. This element is required only if you specify more than one filter. See and below for more details. + // +kubebuilder:validation:Optional + And *FilterAndParameters `json:"and,omitempty" tf:"and,omitempty"` + + // Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. Defaults to an empty string ("") if filter is not specified. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Configuration block for specifying a tag key and value. See below. + // +kubebuilder:validation:Optional + Tag *FilterTagParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type BucketReplicationConfigurationRuleInitParameters struct { + + // Whether delete markers are replicated. This argument is only valid with V2 replication configurations (i.e., when filter is used)documented below. + DeleteMarkerReplication *DeleteMarkerReplicationInitParameters `json:"deleteMarkerReplication,omitempty" tf:"delete_marker_replication,omitempty"` + + // Specifies the destination for the rule. See below. + Destination *RuleDestinationInitParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + // Replicate existing objects in the source bucket according to the rule configurations. See below. + ExistingObjectReplication *ExistingObjectReplicationInitParameters `json:"existingObjectReplication,omitempty" tf:"existing_object_replication,omitempty"` + + // Filter that identifies subset of objects to which the replication rule applies. See below. If not specified, the rule will default to using prefix. + Filter *BucketReplicationConfigurationRuleFilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Unique identifier for the rule. Must be less than or equal to 255 characters in length. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. Defaults to an empty string ("") if filter is not specified. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Priority associated with the rule. Priority should only be set if filter is configured. If not provided, defaults to 0. Priority must be unique between multiple rules. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // Specifies special object selection criteria. See below. + SourceSelectionCriteria *RuleSourceSelectionCriteriaInitParameters `json:"sourceSelectionCriteria,omitempty" tf:"source_selection_criteria,omitempty"` + + // Status of the rule. Either "Enabled" or "Disabled". The rule is ignored if status is not "Enabled". + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type BucketReplicationConfigurationRuleObservation struct { + + // Whether delete markers are replicated. This argument is only valid with V2 replication configurations (i.e., when filter is used)documented below. + DeleteMarkerReplication *DeleteMarkerReplicationObservation `json:"deleteMarkerReplication,omitempty" tf:"delete_marker_replication,omitempty"` + + // Specifies the destination for the rule. See below. + Destination *RuleDestinationObservation `json:"destination,omitempty" tf:"destination,omitempty"` + + // Replicate existing objects in the source bucket according to the rule configurations. See below. + ExistingObjectReplication *ExistingObjectReplicationObservation `json:"existingObjectReplication,omitempty" tf:"existing_object_replication,omitempty"` + + // Filter that identifies subset of objects to which the replication rule applies. See below. If not specified, the rule will default to using prefix. + Filter *BucketReplicationConfigurationRuleFilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + // Unique identifier for the rule. Must be less than or equal to 255 characters in length. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. Defaults to an empty string ("") if filter is not specified. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Priority associated with the rule. Priority should only be set if filter is configured. If not provided, defaults to 0. Priority must be unique between multiple rules. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // Specifies special object selection criteria. See below. + SourceSelectionCriteria *RuleSourceSelectionCriteriaObservation `json:"sourceSelectionCriteria,omitempty" tf:"source_selection_criteria,omitempty"` + + // Status of the rule. Either "Enabled" or "Disabled". The rule is ignored if status is not "Enabled". + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type BucketReplicationConfigurationRuleParameters struct { + + // Whether delete markers are replicated. This argument is only valid with V2 replication configurations (i.e., when filter is used)documented below. + // +kubebuilder:validation:Optional + DeleteMarkerReplication *DeleteMarkerReplicationParameters `json:"deleteMarkerReplication,omitempty" tf:"delete_marker_replication,omitempty"` + + // Specifies the destination for the rule. See below. + // +kubebuilder:validation:Optional + Destination *RuleDestinationParameters `json:"destination" tf:"destination,omitempty"` + + // Replicate existing objects in the source bucket according to the rule configurations. See below. + // +kubebuilder:validation:Optional + ExistingObjectReplication *ExistingObjectReplicationParameters `json:"existingObjectReplication,omitempty" tf:"existing_object_replication,omitempty"` + + // Filter that identifies subset of objects to which the replication rule applies. See below. If not specified, the rule will default to using prefix. + // +kubebuilder:validation:Optional + Filter *BucketReplicationConfigurationRuleFilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // Unique identifier for the rule. Must be less than or equal to 255 characters in length. + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. Defaults to an empty string ("") if filter is not specified. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Priority associated with the rule. Priority should only be set if filter is configured. If not provided, defaults to 0. Priority must be unique between multiple rules. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // Specifies special object selection criteria. See below. + // +kubebuilder:validation:Optional + SourceSelectionCriteria *RuleSourceSelectionCriteriaParameters `json:"sourceSelectionCriteria,omitempty" tf:"source_selection_criteria,omitempty"` + + // Status of the rule. Either "Enabled" or "Disabled". The rule is ignored if status is not "Enabled". + // +kubebuilder:validation:Optional + Status *string `json:"status" tf:"status,omitempty"` +} + +type DeleteMarkerReplicationInitParameters struct { + + // Whether delete markers should be replicated. Either "Enabled" or "Disabled". + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type DeleteMarkerReplicationObservation struct { + + // Whether delete markers should be replicated. Either "Enabled" or "Disabled". + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type DeleteMarkerReplicationParameters struct { + + // Whether delete markers should be replicated. Either "Enabled" or "Disabled". + // +kubebuilder:validation:Optional + Status *string `json:"status" tf:"status,omitempty"` +} + +type DestinationAccessControlTranslationInitParameters struct { + + // Specifies the replica ownership. For default and valid values, see PUT bucket replication in the Amazon S3 API Reference. Valid values: Destination. + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` +} + +type DestinationAccessControlTranslationObservation struct { + + // Specifies the replica ownership. For default and valid values, see PUT bucket replication in the Amazon S3 API Reference. Valid values: Destination. + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` +} + +type DestinationAccessControlTranslationParameters struct { + + // Specifies the replica ownership. For default and valid values, see PUT bucket replication in the Amazon S3 API Reference. Valid values: Destination. + // +kubebuilder:validation:Optional + Owner *string `json:"owner" tf:"owner,omitempty"` +} + +type DestinationMetricsInitParameters struct { + + // Configuration block that specifies the time threshold for emitting the s3:Replication:OperationMissedThreshold event. See below. + EventThreshold *EventThresholdInitParameters `json:"eventThreshold,omitempty" tf:"event_threshold,omitempty"` + + // Whether the existing objects should be replicated. Either "Enabled" or "Disabled". + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type DestinationMetricsObservation struct { + + // Configuration block that specifies the time threshold for emitting the s3:Replication:OperationMissedThreshold event. See below. + EventThreshold *EventThresholdObservation `json:"eventThreshold,omitempty" tf:"event_threshold,omitempty"` + + // Whether the existing objects should be replicated. Either "Enabled" or "Disabled". + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type DestinationMetricsParameters struct { + + // Configuration block that specifies the time threshold for emitting the s3:Replication:OperationMissedThreshold event. See below. + // +kubebuilder:validation:Optional + EventThreshold *EventThresholdParameters `json:"eventThreshold,omitempty" tf:"event_threshold,omitempty"` + + // Whether the existing objects should be replicated. Either "Enabled" or "Disabled". + // +kubebuilder:validation:Optional + Status *string `json:"status" tf:"status,omitempty"` +} + +type DestinationReplicationTimeInitParameters struct { + + // Whether the existing objects should be replicated. Either "Enabled" or "Disabled". + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Configuration block specifying the time by which replication should be complete for all objects and operations on objects. See below. + Time *TimeInitParameters `json:"time,omitempty" tf:"time,omitempty"` +} + +type DestinationReplicationTimeObservation struct { + + // Whether the existing objects should be replicated. Either "Enabled" or "Disabled". + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Configuration block specifying the time by which replication should be complete for all objects and operations on objects. See below. + Time *TimeObservation `json:"time,omitempty" tf:"time,omitempty"` +} + +type DestinationReplicationTimeParameters struct { + + // Whether the existing objects should be replicated. Either "Enabled" or "Disabled". + // +kubebuilder:validation:Optional + Status *string `json:"status" tf:"status,omitempty"` + + // Configuration block specifying the time by which replication should be complete for all objects and operations on objects. See below. + // +kubebuilder:validation:Optional + Time *TimeParameters `json:"time" tf:"time,omitempty"` +} + +type EncryptionConfigurationInitParameters struct { + + // ID (Key ARN or Alias ARN) of the customer managed AWS KMS key stored in AWS Key Management Service (KMS) for the destination bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + ReplicaKMSKeyID *string `json:"replicaKmsKeyId,omitempty" tf:"replica_kms_key_id,omitempty"` + + // Reference to a Key in kms to populate replicaKmsKeyId. + // +kubebuilder:validation:Optional + ReplicaKMSKeyIDRef *v1.Reference `json:"replicaKmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate replicaKmsKeyId. + // +kubebuilder:validation:Optional + ReplicaKMSKeyIDSelector *v1.Selector `json:"replicaKmsKeyIdSelector,omitempty" tf:"-"` +} + +type EncryptionConfigurationObservation struct { + + // ID (Key ARN or Alias ARN) of the customer managed AWS KMS key stored in AWS Key Management Service (KMS) for the destination bucket. + ReplicaKMSKeyID *string `json:"replicaKmsKeyId,omitempty" tf:"replica_kms_key_id,omitempty"` +} + +type EncryptionConfigurationParameters struct { + + // ID (Key ARN or Alias ARN) of the customer managed AWS KMS key stored in AWS Key Management Service (KMS) for the destination bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + ReplicaKMSKeyID *string `json:"replicaKmsKeyId,omitempty" tf:"replica_kms_key_id,omitempty"` + + // Reference to a Key in kms to populate replicaKmsKeyId. + // +kubebuilder:validation:Optional + ReplicaKMSKeyIDRef *v1.Reference `json:"replicaKmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate replicaKmsKeyId. + // +kubebuilder:validation:Optional + ReplicaKMSKeyIDSelector *v1.Selector `json:"replicaKmsKeyIdSelector,omitempty" tf:"-"` +} + +type EventThresholdInitParameters struct { + + // Time in minutes. Valid values: 15. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` +} + +type EventThresholdObservation struct { + + // Time in minutes. Valid values: 15. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` +} + +type EventThresholdParameters struct { + + // Time in minutes. Valid values: 15. + // +kubebuilder:validation:Optional + Minutes *float64 `json:"minutes" tf:"minutes,omitempty"` +} + +type ExistingObjectReplicationInitParameters struct { + + // Whether the existing objects should be replicated. Either "Enabled" or "Disabled". + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ExistingObjectReplicationObservation struct { + + // Whether the existing objects should be replicated. Either "Enabled" or "Disabled". + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ExistingObjectReplicationParameters struct { + + // Whether the existing objects should be replicated. Either "Enabled" or "Disabled". + // +kubebuilder:validation:Optional + Status *string `json:"status" tf:"status,omitempty"` +} + +type FilterAndInitParameters struct { + + // Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. Defaults to an empty string ("") if filter is not specified. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Map of tags (key and value pairs) that identifies a subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type FilterAndObservation struct { + + // Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. Defaults to an empty string ("") if filter is not specified. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Map of tags (key and value pairs) that identifies a subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type FilterAndParameters struct { + + // Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. Defaults to an empty string ("") if filter is not specified. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Map of tags (key and value pairs) that identifies a subset of objects to which the rule applies. The rule applies only to objects having all the tags in its tagset. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type FilterTagInitParameters struct { + + // Name of the object key. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Value of the tag. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FilterTagObservation struct { + + // Name of the object key. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Value of the tag. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FilterTagParameters struct { + + // Name of the object key. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Value of the tag. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ReplicaModificationsInitParameters struct { + + // Whether the existing objects should be replicated. Either "Enabled" or "Disabled". + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ReplicaModificationsObservation struct { + + // Whether the existing objects should be replicated. Either "Enabled" or "Disabled". + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ReplicaModificationsParameters struct { + + // Whether the existing objects should be replicated. Either "Enabled" or "Disabled". + // +kubebuilder:validation:Optional + Status *string `json:"status" tf:"status,omitempty"` +} + +type RuleDestinationInitParameters struct { + + // Configuration block that specifies the overrides to use for object owners on replication. See below. Specify this only in a cross-account scenario (where source and destination bucket owners are not the same), and you want to change replica ownership to the AWS account that owns the destination bucket. If this is not specified in the replication configuration, the replicas are owned by same AWS account that owns the source object. Must be used in conjunction with account owner override configuration. + AccessControlTranslation *DestinationAccessControlTranslationInitParameters `json:"accessControlTranslation,omitempty" tf:"access_control_translation,omitempty"` + + // Account ID to specify the replica ownership. Must be used in conjunction with access_control_translation override configuration. + Account *string `json:"account,omitempty" tf:"account,omitempty"` + + // ARN of the bucket where you want Amazon S3 to store the results. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Configuration block that provides information about encryption. See below. If source_selection_criteria is specified, you must specify this element. + EncryptionConfiguration *EncryptionConfigurationInitParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // Configuration block that specifies replication metrics-related settings enabling replication metrics and events. See below. + Metrics *DestinationMetricsInitParameters `json:"metrics,omitempty" tf:"metrics,omitempty"` + + // Configuration block that specifies S3 Replication Time Control (S3 RTC), including whether S3 RTC is enabled and the time when all objects and operations on objects must be replicated. See below. Replication Time Control must be used in conjunction with metrics. + ReplicationTime *DestinationReplicationTimeInitParameters `json:"replicationTime,omitempty" tf:"replication_time,omitempty"` + + // The storage class used to store the object. By default, Amazon S3 uses the storage class of the source object to create the object replica. + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type RuleDestinationObservation struct { + + // Configuration block that specifies the overrides to use for object owners on replication. See below. Specify this only in a cross-account scenario (where source and destination bucket owners are not the same), and you want to change replica ownership to the AWS account that owns the destination bucket. If this is not specified in the replication configuration, the replicas are owned by same AWS account that owns the source object. Must be used in conjunction with account owner override configuration. + AccessControlTranslation *DestinationAccessControlTranslationObservation `json:"accessControlTranslation,omitempty" tf:"access_control_translation,omitempty"` + + // Account ID to specify the replica ownership. Must be used in conjunction with access_control_translation override configuration. + Account *string `json:"account,omitempty" tf:"account,omitempty"` + + // ARN of the bucket where you want Amazon S3 to store the results. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Configuration block that provides information about encryption. See below. If source_selection_criteria is specified, you must specify this element. + EncryptionConfiguration *EncryptionConfigurationObservation `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // Configuration block that specifies replication metrics-related settings enabling replication metrics and events. See below. + Metrics *DestinationMetricsObservation `json:"metrics,omitempty" tf:"metrics,omitempty"` + + // Configuration block that specifies S3 Replication Time Control (S3 RTC), including whether S3 RTC is enabled and the time when all objects and operations on objects must be replicated. See below. Replication Time Control must be used in conjunction with metrics. + ReplicationTime *DestinationReplicationTimeObservation `json:"replicationTime,omitempty" tf:"replication_time,omitempty"` + + // The storage class used to store the object. By default, Amazon S3 uses the storage class of the source object to create the object replica. + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type RuleDestinationParameters struct { + + // Configuration block that specifies the overrides to use for object owners on replication. See below. Specify this only in a cross-account scenario (where source and destination bucket owners are not the same), and you want to change replica ownership to the AWS account that owns the destination bucket. If this is not specified in the replication configuration, the replicas are owned by same AWS account that owns the source object. Must be used in conjunction with account owner override configuration. + // +kubebuilder:validation:Optional + AccessControlTranslation *DestinationAccessControlTranslationParameters `json:"accessControlTranslation,omitempty" tf:"access_control_translation,omitempty"` + + // Account ID to specify the replica ownership. Must be used in conjunction with access_control_translation override configuration. + // +kubebuilder:validation:Optional + Account *string `json:"account,omitempty" tf:"account,omitempty"` + + // ARN of the bucket where you want Amazon S3 to store the results. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Configuration block that provides information about encryption. See below. If source_selection_criteria is specified, you must specify this element. + // +kubebuilder:validation:Optional + EncryptionConfiguration *EncryptionConfigurationParameters `json:"encryptionConfiguration,omitempty" tf:"encryption_configuration,omitempty"` + + // Configuration block that specifies replication metrics-related settings enabling replication metrics and events. See below. + // +kubebuilder:validation:Optional + Metrics *DestinationMetricsParameters `json:"metrics,omitempty" tf:"metrics,omitempty"` + + // Configuration block that specifies S3 Replication Time Control (S3 RTC), including whether S3 RTC is enabled and the time when all objects and operations on objects must be replicated. See below. Replication Time Control must be used in conjunction with metrics. + // +kubebuilder:validation:Optional + ReplicationTime *DestinationReplicationTimeParameters `json:"replicationTime,omitempty" tf:"replication_time,omitempty"` + + // The storage class used to store the object. By default, Amazon S3 uses the storage class of the source object to create the object replica. + // +kubebuilder:validation:Optional + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` +} + +type RuleSourceSelectionCriteriaInitParameters struct { + + // Configuration block that you can specify for selections for modifications on replicas. Amazon S3 doesn't replicate replica modifications by default. In the latest version of replication configuration (when filter is specified), you can specify this element and set the status to Enabled to replicate modifications on replicas. + ReplicaModifications *ReplicaModificationsInitParameters `json:"replicaModifications,omitempty" tf:"replica_modifications,omitempty"` + + // Configuration block for filter information for the selection of Amazon S3 objects encrypted with AWS KMS. If specified, replica_kms_key_id in destination encryption_configuration must be specified as well. + SseKMSEncryptedObjects *SourceSelectionCriteriaSseKMSEncryptedObjectsInitParameters `json:"sseKmsEncryptedObjects,omitempty" tf:"sse_kms_encrypted_objects,omitempty"` +} + +type RuleSourceSelectionCriteriaObservation struct { + + // Configuration block that you can specify for selections for modifications on replicas. Amazon S3 doesn't replicate replica modifications by default. In the latest version of replication configuration (when filter is specified), you can specify this element and set the status to Enabled to replicate modifications on replicas. + ReplicaModifications *ReplicaModificationsObservation `json:"replicaModifications,omitempty" tf:"replica_modifications,omitempty"` + + // Configuration block for filter information for the selection of Amazon S3 objects encrypted with AWS KMS. If specified, replica_kms_key_id in destination encryption_configuration must be specified as well. + SseKMSEncryptedObjects *SourceSelectionCriteriaSseKMSEncryptedObjectsObservation `json:"sseKmsEncryptedObjects,omitempty" tf:"sse_kms_encrypted_objects,omitempty"` +} + +type RuleSourceSelectionCriteriaParameters struct { + + // Configuration block that you can specify for selections for modifications on replicas. Amazon S3 doesn't replicate replica modifications by default. In the latest version of replication configuration (when filter is specified), you can specify this element and set the status to Enabled to replicate modifications on replicas. + // +kubebuilder:validation:Optional + ReplicaModifications *ReplicaModificationsParameters `json:"replicaModifications,omitempty" tf:"replica_modifications,omitempty"` + + // Configuration block for filter information for the selection of Amazon S3 objects encrypted with AWS KMS. If specified, replica_kms_key_id in destination encryption_configuration must be specified as well. + // +kubebuilder:validation:Optional + SseKMSEncryptedObjects *SourceSelectionCriteriaSseKMSEncryptedObjectsParameters `json:"sseKmsEncryptedObjects,omitempty" tf:"sse_kms_encrypted_objects,omitempty"` +} + +type SourceSelectionCriteriaSseKMSEncryptedObjectsInitParameters struct { + + // Whether the existing objects should be replicated. Either "Enabled" or "Disabled". + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type SourceSelectionCriteriaSseKMSEncryptedObjectsObservation struct { + + // Whether the existing objects should be replicated. Either "Enabled" or "Disabled". + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type SourceSelectionCriteriaSseKMSEncryptedObjectsParameters struct { + + // Whether the existing objects should be replicated. Either "Enabled" or "Disabled". + // +kubebuilder:validation:Optional + Status *string `json:"status" tf:"status,omitempty"` +} + +type TimeInitParameters struct { + + // Time in minutes. Valid values: 15. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` +} + +type TimeObservation struct { + + // Time in minutes. Valid values: 15. + Minutes *float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` +} + +type TimeParameters struct { + + // Time in minutes. Valid values: 15. + // +kubebuilder:validation:Optional + Minutes *float64 `json:"minutes" tf:"minutes,omitempty"` +} + +// BucketReplicationConfigurationSpec defines the desired state of BucketReplicationConfiguration +type BucketReplicationConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BucketReplicationConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BucketReplicationConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// BucketReplicationConfigurationStatus defines the observed state of BucketReplicationConfiguration. +type BucketReplicationConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BucketReplicationConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BucketReplicationConfiguration is the Schema for the BucketReplicationConfigurations API. Provides a S3 bucket replication configuration resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type BucketReplicationConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.rule) || (has(self.initProvider) && has(self.initProvider.rule))",message="spec.forProvider.rule is a required parameter" + Spec BucketReplicationConfigurationSpec `json:"spec"` + Status BucketReplicationConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketReplicationConfigurationList contains a list of BucketReplicationConfigurations +type BucketReplicationConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BucketReplicationConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + BucketReplicationConfiguration_Kind = "BucketReplicationConfiguration" + BucketReplicationConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BucketReplicationConfiguration_Kind}.String() + BucketReplicationConfiguration_KindAPIVersion = BucketReplicationConfiguration_Kind + "." + CRDGroupVersion.String() + BucketReplicationConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(BucketReplicationConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&BucketReplicationConfiguration{}, &BucketReplicationConfigurationList{}) +} diff --git a/apis/s3/v1beta2/zz_bucketserversideencryptionconfiguration_terraformed.go b/apis/s3/v1beta2/zz_bucketserversideencryptionconfiguration_terraformed.go new file mode 100755 index 0000000000..17a1790ea9 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketserversideencryptionconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BucketServerSideEncryptionConfiguration +func (mg *BucketServerSideEncryptionConfiguration) GetTerraformResourceType() string { + return "aws_s3_bucket_server_side_encryption_configuration" +} + +// GetConnectionDetailsMapping for this BucketServerSideEncryptionConfiguration +func (tr *BucketServerSideEncryptionConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BucketServerSideEncryptionConfiguration +func (tr *BucketServerSideEncryptionConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BucketServerSideEncryptionConfiguration +func (tr *BucketServerSideEncryptionConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BucketServerSideEncryptionConfiguration +func (tr *BucketServerSideEncryptionConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BucketServerSideEncryptionConfiguration +func (tr *BucketServerSideEncryptionConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BucketServerSideEncryptionConfiguration +func (tr *BucketServerSideEncryptionConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BucketServerSideEncryptionConfiguration +func (tr *BucketServerSideEncryptionConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BucketServerSideEncryptionConfiguration +func (tr *BucketServerSideEncryptionConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BucketServerSideEncryptionConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BucketServerSideEncryptionConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &BucketServerSideEncryptionConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BucketServerSideEncryptionConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3/v1beta2/zz_bucketserversideencryptionconfiguration_types.go b/apis/s3/v1beta2/zz_bucketserversideencryptionconfiguration_types.go new file mode 100755 index 0000000000..021eb76160 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketserversideencryptionconfiguration_types.go @@ -0,0 +1,220 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BucketServerSideEncryptionConfigurationInitParameters struct { + + // ID (name) of the bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Account ID of the expected bucket owner. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Set of server-side encryption configuration rules. See below. Currently, only a single rule is supported. + Rule []BucketServerSideEncryptionConfigurationRuleInitParameters `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type BucketServerSideEncryptionConfigurationObservation struct { + + // ID (name) of the bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Account ID of the expected bucket owner. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // The bucket or bucket and expected_bucket_owner separated by a comma (,) if the latter is provided. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Set of server-side encryption configuration rules. See below. Currently, only a single rule is supported. + Rule []BucketServerSideEncryptionConfigurationRuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type BucketServerSideEncryptionConfigurationParameters struct { + + // ID (name) of the bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Account ID of the expected bucket owner. + // +kubebuilder:validation:Optional + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Set of server-side encryption configuration rules. See below. Currently, only a single rule is supported. + // +kubebuilder:validation:Optional + Rule []BucketServerSideEncryptionConfigurationRuleParameters `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type BucketServerSideEncryptionConfigurationRuleInitParameters struct { + + // Single object for setting server-side encryption by default. See below. + ApplyServerSideEncryptionByDefault *RuleApplyServerSideEncryptionByDefaultInitParameters `json:"applyServerSideEncryptionByDefault,omitempty" tf:"apply_server_side_encryption_by_default,omitempty"` + + // Whether or not to use Amazon S3 Bucket Keys for SSE-KMS. + BucketKeyEnabled *bool `json:"bucketKeyEnabled,omitempty" tf:"bucket_key_enabled,omitempty"` +} + +type BucketServerSideEncryptionConfigurationRuleObservation struct { + + // Single object for setting server-side encryption by default. See below. + ApplyServerSideEncryptionByDefault *RuleApplyServerSideEncryptionByDefaultObservation `json:"applyServerSideEncryptionByDefault,omitempty" tf:"apply_server_side_encryption_by_default,omitempty"` + + // Whether or not to use Amazon S3 Bucket Keys for SSE-KMS. + BucketKeyEnabled *bool `json:"bucketKeyEnabled,omitempty" tf:"bucket_key_enabled,omitempty"` +} + +type BucketServerSideEncryptionConfigurationRuleParameters struct { + + // Single object for setting server-side encryption by default. See below. + // +kubebuilder:validation:Optional + ApplyServerSideEncryptionByDefault *RuleApplyServerSideEncryptionByDefaultParameters `json:"applyServerSideEncryptionByDefault,omitempty" tf:"apply_server_side_encryption_by_default,omitempty"` + + // Whether or not to use Amazon S3 Bucket Keys for SSE-KMS. + // +kubebuilder:validation:Optional + BucketKeyEnabled *bool `json:"bucketKeyEnabled,omitempty" tf:"bucket_key_enabled,omitempty"` +} + +type RuleApplyServerSideEncryptionByDefaultInitParameters struct { + + // AWS KMS master key ID used for the SSE-KMS encryption. This can only be used when you set the value of sse_algorithm as aws:kms. The default aws/s3 AWS KMS master key is used if this element is absent while the sse_algorithm is aws:kms. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + KMSMasterKeyID *string `json:"kmsMasterKeyId,omitempty" tf:"kms_master_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsMasterKeyId. + // +kubebuilder:validation:Optional + KMSMasterKeyIDRef *v1.Reference `json:"kmsMasterKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsMasterKeyId. + // +kubebuilder:validation:Optional + KMSMasterKeyIDSelector *v1.Selector `json:"kmsMasterKeyIdSelector,omitempty" tf:"-"` + + // Server-side encryption algorithm to use. Valid values are AES256, aws:kms, and aws:kms:dsse + SseAlgorithm *string `json:"sseAlgorithm,omitempty" tf:"sse_algorithm,omitempty"` +} + +type RuleApplyServerSideEncryptionByDefaultObservation struct { + + // AWS KMS master key ID used for the SSE-KMS encryption. This can only be used when you set the value of sse_algorithm as aws:kms. The default aws/s3 AWS KMS master key is used if this element is absent while the sse_algorithm is aws:kms. + KMSMasterKeyID *string `json:"kmsMasterKeyId,omitempty" tf:"kms_master_key_id,omitempty"` + + // Server-side encryption algorithm to use. Valid values are AES256, aws:kms, and aws:kms:dsse + SseAlgorithm *string `json:"sseAlgorithm,omitempty" tf:"sse_algorithm,omitempty"` +} + +type RuleApplyServerSideEncryptionByDefaultParameters struct { + + // AWS KMS master key ID used for the SSE-KMS encryption. This can only be used when you set the value of sse_algorithm as aws:kms. The default aws/s3 AWS KMS master key is used if this element is absent while the sse_algorithm is aws:kms. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + KMSMasterKeyID *string `json:"kmsMasterKeyId,omitempty" tf:"kms_master_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsMasterKeyId. + // +kubebuilder:validation:Optional + KMSMasterKeyIDRef *v1.Reference `json:"kmsMasterKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsMasterKeyId. + // +kubebuilder:validation:Optional + KMSMasterKeyIDSelector *v1.Selector `json:"kmsMasterKeyIdSelector,omitempty" tf:"-"` + + // Server-side encryption algorithm to use. Valid values are AES256, aws:kms, and aws:kms:dsse + // +kubebuilder:validation:Optional + SseAlgorithm *string `json:"sseAlgorithm" tf:"sse_algorithm,omitempty"` +} + +// BucketServerSideEncryptionConfigurationSpec defines the desired state of BucketServerSideEncryptionConfiguration +type BucketServerSideEncryptionConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BucketServerSideEncryptionConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BucketServerSideEncryptionConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// BucketServerSideEncryptionConfigurationStatus defines the observed state of BucketServerSideEncryptionConfiguration. +type BucketServerSideEncryptionConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BucketServerSideEncryptionConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BucketServerSideEncryptionConfiguration is the Schema for the BucketServerSideEncryptionConfigurations API. Provides a S3 bucket server-side encryption configuration resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type BucketServerSideEncryptionConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.rule) || (has(self.initProvider) && has(self.initProvider.rule))",message="spec.forProvider.rule is a required parameter" + Spec BucketServerSideEncryptionConfigurationSpec `json:"spec"` + Status BucketServerSideEncryptionConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketServerSideEncryptionConfigurationList contains a list of BucketServerSideEncryptionConfigurations +type BucketServerSideEncryptionConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BucketServerSideEncryptionConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + BucketServerSideEncryptionConfiguration_Kind = "BucketServerSideEncryptionConfiguration" + BucketServerSideEncryptionConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BucketServerSideEncryptionConfiguration_Kind}.String() + BucketServerSideEncryptionConfiguration_KindAPIVersion = BucketServerSideEncryptionConfiguration_Kind + "." + CRDGroupVersion.String() + BucketServerSideEncryptionConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(BucketServerSideEncryptionConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&BucketServerSideEncryptionConfiguration{}, &BucketServerSideEncryptionConfigurationList{}) +} diff --git a/apis/s3/v1beta2/zz_bucketversioning_terraformed.go b/apis/s3/v1beta2/zz_bucketversioning_terraformed.go new file mode 100755 index 0000000000..d45416ae75 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketversioning_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BucketVersioning +func (mg *BucketVersioning) GetTerraformResourceType() string { + return "aws_s3_bucket_versioning" +} + +// GetConnectionDetailsMapping for this BucketVersioning +func (tr *BucketVersioning) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BucketVersioning +func (tr *BucketVersioning) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BucketVersioning +func (tr *BucketVersioning) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BucketVersioning +func (tr *BucketVersioning) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BucketVersioning +func (tr *BucketVersioning) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BucketVersioning +func (tr *BucketVersioning) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BucketVersioning +func (tr *BucketVersioning) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BucketVersioning +func (tr *BucketVersioning) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BucketVersioning using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BucketVersioning) LateInitialize(attrs []byte) (bool, error) { + params := &BucketVersioningParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BucketVersioning) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3/v1beta2/zz_bucketversioning_types.go b/apis/s3/v1beta2/zz_bucketversioning_types.go new file mode 100755 index 0000000000..f0c91ca2e0 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketversioning_types.go @@ -0,0 +1,181 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BucketVersioningInitParameters struct { + + // Name of the S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Account ID of the expected bucket owner. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device. + Mfa *string `json:"mfa,omitempty" tf:"mfa,omitempty"` + + // Configuration block for the versioning parameters. See below. + VersioningConfiguration *VersioningConfigurationInitParameters `json:"versioningConfiguration,omitempty" tf:"versioning_configuration,omitempty"` +} + +type BucketVersioningObservation struct { + + // Name of the S3 bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Account ID of the expected bucket owner. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // The bucket or bucket and expected_bucket_owner separated by a comma (,) if the latter is provided. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device. + Mfa *string `json:"mfa,omitempty" tf:"mfa,omitempty"` + + // Configuration block for the versioning parameters. See below. + VersioningConfiguration *VersioningConfigurationObservation `json:"versioningConfiguration,omitempty" tf:"versioning_configuration,omitempty"` +} + +type BucketVersioningParameters struct { + + // Name of the S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Account ID of the expected bucket owner. + // +kubebuilder:validation:Optional + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device. + // +kubebuilder:validation:Optional + Mfa *string `json:"mfa,omitempty" tf:"mfa,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration block for the versioning parameters. See below. + // +kubebuilder:validation:Optional + VersioningConfiguration *VersioningConfigurationParameters `json:"versioningConfiguration,omitempty" tf:"versioning_configuration,omitempty"` +} + +type VersioningConfigurationInitParameters struct { + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. Valid values: Enabled or Disabled. + MfaDelete *string `json:"mfaDelete,omitempty" tf:"mfa_delete,omitempty"` + + // Versioning state of the bucket. Valid values: Enabled, Suspended, or Disabled. Disabled should only be used when creating or importing resources that correspond to unversioned S3 buckets. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type VersioningConfigurationObservation struct { + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. Valid values: Enabled or Disabled. + MfaDelete *string `json:"mfaDelete,omitempty" tf:"mfa_delete,omitempty"` + + // Versioning state of the bucket. Valid values: Enabled, Suspended, or Disabled. Disabled should only be used when creating or importing resources that correspond to unversioned S3 buckets. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type VersioningConfigurationParameters struct { + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. Valid values: Enabled or Disabled. + // +kubebuilder:validation:Optional + MfaDelete *string `json:"mfaDelete,omitempty" tf:"mfa_delete,omitempty"` + + // Versioning state of the bucket. Valid values: Enabled, Suspended, or Disabled. Disabled should only be used when creating or importing resources that correspond to unversioned S3 buckets. + // +kubebuilder:validation:Optional + Status *string `json:"status" tf:"status,omitempty"` +} + +// BucketVersioningSpec defines the desired state of BucketVersioning +type BucketVersioningSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BucketVersioningParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BucketVersioningInitParameters `json:"initProvider,omitempty"` +} + +// BucketVersioningStatus defines the observed state of BucketVersioning. +type BucketVersioningStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BucketVersioningObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BucketVersioning is the Schema for the BucketVersionings API. Provides an S3 bucket versioning resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type BucketVersioning struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.versioningConfiguration) || (has(self.initProvider) && has(self.initProvider.versioningConfiguration))",message="spec.forProvider.versioningConfiguration is a required parameter" + Spec BucketVersioningSpec `json:"spec"` + Status BucketVersioningStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketVersioningList contains a list of BucketVersionings +type BucketVersioningList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BucketVersioning `json:"items"` +} + +// Repository type metadata. +var ( + BucketVersioning_Kind = "BucketVersioning" + BucketVersioning_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BucketVersioning_Kind}.String() + BucketVersioning_KindAPIVersion = BucketVersioning_Kind + "." + CRDGroupVersion.String() + BucketVersioning_GroupVersionKind = CRDGroupVersion.WithKind(BucketVersioning_Kind) +) + +func init() { + SchemeBuilder.Register(&BucketVersioning{}, &BucketVersioningList{}) +} diff --git a/apis/s3/v1beta2/zz_bucketwebsiteconfiguration_terraformed.go b/apis/s3/v1beta2/zz_bucketwebsiteconfiguration_terraformed.go new file mode 100755 index 0000000000..546a20624b --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketwebsiteconfiguration_terraformed.go @@ -0,0 +1,131 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BucketWebsiteConfiguration +func (mg *BucketWebsiteConfiguration) GetTerraformResourceType() string { + return "aws_s3_bucket_website_configuration" +} + +// GetConnectionDetailsMapping for this BucketWebsiteConfiguration +func (tr *BucketWebsiteConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BucketWebsiteConfiguration +func (tr *BucketWebsiteConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BucketWebsiteConfiguration +func (tr *BucketWebsiteConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BucketWebsiteConfiguration +func (tr *BucketWebsiteConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BucketWebsiteConfiguration +func (tr *BucketWebsiteConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BucketWebsiteConfiguration +func (tr *BucketWebsiteConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BucketWebsiteConfiguration +func (tr *BucketWebsiteConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BucketWebsiteConfiguration +func (tr *BucketWebsiteConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BucketWebsiteConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BucketWebsiteConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &BucketWebsiteConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("RoutingRule")) + opts = append(opts, resource.WithNameFilter("RoutingRules")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BucketWebsiteConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3/v1beta2/zz_bucketwebsiteconfiguration_types.go b/apis/s3/v1beta2/zz_bucketwebsiteconfiguration_types.go new file mode 100755 index 0000000000..e77bc3ec18 --- /dev/null +++ b/apis/s3/v1beta2/zz_bucketwebsiteconfiguration_types.go @@ -0,0 +1,380 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BucketWebsiteConfigurationInitParameters struct { + + // Name of the bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Name of the error document for the website. See below. + ErrorDocument *ErrorDocumentInitParameters `json:"errorDocument,omitempty" tf:"error_document,omitempty"` + + // Account ID of the expected bucket owner. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Name of the index document for the website. See below. + IndexDocument *IndexDocumentInitParameters `json:"indexDocument,omitempty" tf:"index_document,omitempty"` + + // Redirect behavior for every request to this bucket's website endpoint. See below. Conflicts with error_document, index_document, and routing_rule. + RedirectAllRequestsTo *RedirectAllRequestsToInitParameters `json:"redirectAllRequestsTo,omitempty" tf:"redirect_all_requests_to,omitempty"` + + // List of rules that define when a redirect is applied and the redirect behavior. See below. + RoutingRule []RoutingRuleInitParameters `json:"routingRule,omitempty" tf:"routing_rule,omitempty"` + + // JSON array containing routing rules + // describing redirect behavior and when redirects are applied. Use this parameter when your routing rules contain empty String values ("") as seen in the example above. + RoutingRules *string `json:"routingRules,omitempty" tf:"routing_rules,omitempty"` +} + +type BucketWebsiteConfigurationObservation struct { + + // Name of the bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Name of the error document for the website. See below. + ErrorDocument *ErrorDocumentObservation `json:"errorDocument,omitempty" tf:"error_document,omitempty"` + + // Account ID of the expected bucket owner. + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // The bucket or bucket and expected_bucket_owner separated by a comma (,) if the latter is provided. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the index document for the website. See below. + IndexDocument *IndexDocumentObservation `json:"indexDocument,omitempty" tf:"index_document,omitempty"` + + // Redirect behavior for every request to this bucket's website endpoint. See below. Conflicts with error_document, index_document, and routing_rule. + RedirectAllRequestsTo *RedirectAllRequestsToObservation `json:"redirectAllRequestsTo,omitempty" tf:"redirect_all_requests_to,omitempty"` + + // List of rules that define when a redirect is applied and the redirect behavior. See below. + RoutingRule []RoutingRuleObservation `json:"routingRule,omitempty" tf:"routing_rule,omitempty"` + + // JSON array containing routing rules + // describing redirect behavior and when redirects are applied. Use this parameter when your routing rules contain empty String values ("") as seen in the example above. + RoutingRules *string `json:"routingRules,omitempty" tf:"routing_rules,omitempty"` + + // Domain of the website endpoint. This is used to create Route 53 alias records. + WebsiteDomain *string `json:"websiteDomain,omitempty" tf:"website_domain,omitempty"` + + // Website endpoint. + WebsiteEndpoint *string `json:"websiteEndpoint,omitempty" tf:"website_endpoint,omitempty"` +} + +type BucketWebsiteConfigurationParameters struct { + + // Name of the bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Name of the error document for the website. See below. + // +kubebuilder:validation:Optional + ErrorDocument *ErrorDocumentParameters `json:"errorDocument,omitempty" tf:"error_document,omitempty"` + + // Account ID of the expected bucket owner. + // +kubebuilder:validation:Optional + ExpectedBucketOwner *string `json:"expectedBucketOwner,omitempty" tf:"expected_bucket_owner,omitempty"` + + // Name of the index document for the website. See below. + // +kubebuilder:validation:Optional + IndexDocument *IndexDocumentParameters `json:"indexDocument,omitempty" tf:"index_document,omitempty"` + + // Redirect behavior for every request to this bucket's website endpoint. See below. Conflicts with error_document, index_document, and routing_rule. + // +kubebuilder:validation:Optional + RedirectAllRequestsTo *RedirectAllRequestsToParameters `json:"redirectAllRequestsTo,omitempty" tf:"redirect_all_requests_to,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // List of rules that define when a redirect is applied and the redirect behavior. See below. + // +kubebuilder:validation:Optional + RoutingRule []RoutingRuleParameters `json:"routingRule,omitempty" tf:"routing_rule,omitempty"` + + // JSON array containing routing rules + // describing redirect behavior and when redirects are applied. Use this parameter when your routing rules contain empty String values ("") as seen in the example above. + // +kubebuilder:validation:Optional + RoutingRules *string `json:"routingRules,omitempty" tf:"routing_rules,omitempty"` +} + +type ConditionInitParameters struct { + + // HTTP error code when the redirect is applied. If specified with key_prefix_equals, then both must be true for the redirect to be applied. + HTTPErrorCodeReturnedEquals *string `json:"httpErrorCodeReturnedEquals,omitempty" tf:"http_error_code_returned_equals,omitempty"` + + // Object key name prefix when the redirect is applied. If specified with http_error_code_returned_equals, then both must be true for the redirect to be applied. + KeyPrefixEquals *string `json:"keyPrefixEquals,omitempty" tf:"key_prefix_equals,omitempty"` +} + +type ConditionObservation struct { + + // HTTP error code when the redirect is applied. If specified with key_prefix_equals, then both must be true for the redirect to be applied. + HTTPErrorCodeReturnedEquals *string `json:"httpErrorCodeReturnedEquals,omitempty" tf:"http_error_code_returned_equals,omitempty"` + + // Object key name prefix when the redirect is applied. If specified with http_error_code_returned_equals, then both must be true for the redirect to be applied. + KeyPrefixEquals *string `json:"keyPrefixEquals,omitempty" tf:"key_prefix_equals,omitempty"` +} + +type ConditionParameters struct { + + // HTTP error code when the redirect is applied. If specified with key_prefix_equals, then both must be true for the redirect to be applied. + // +kubebuilder:validation:Optional + HTTPErrorCodeReturnedEquals *string `json:"httpErrorCodeReturnedEquals,omitempty" tf:"http_error_code_returned_equals,omitempty"` + + // Object key name prefix when the redirect is applied. If specified with http_error_code_returned_equals, then both must be true for the redirect to be applied. + // +kubebuilder:validation:Optional + KeyPrefixEquals *string `json:"keyPrefixEquals,omitempty" tf:"key_prefix_equals,omitempty"` +} + +type ErrorDocumentInitParameters struct { + + // Object key name to use when a 4XX class error occurs. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type ErrorDocumentObservation struct { + + // Object key name to use when a 4XX class error occurs. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type ErrorDocumentParameters struct { + + // Object key name to use when a 4XX class error occurs. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` +} + +type IndexDocumentInitParameters struct { + + // Suffix that is appended to a request that is for a directory on the website endpoint. + // For example, if the suffix is index.html and you make a request to samplebucket/images/, the data that is returned will be for the object with the key name images/index.html. + // The suffix must not be empty and must not include a slash character. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type IndexDocumentObservation struct { + + // Suffix that is appended to a request that is for a directory on the website endpoint. + // For example, if the suffix is index.html and you make a request to samplebucket/images/, the data that is returned will be for the object with the key name images/index.html. + // The suffix must not be empty and must not include a slash character. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type IndexDocumentParameters struct { + + // Suffix that is appended to a request that is for a directory on the website endpoint. + // For example, if the suffix is index.html and you make a request to samplebucket/images/, the data that is returned will be for the object with the key name images/index.html. + // The suffix must not be empty and must not include a slash character. + // +kubebuilder:validation:Optional + Suffix *string `json:"suffix" tf:"suffix,omitempty"` +} + +type RedirectAllRequestsToInitParameters struct { + + // Name of the host where requests are redirected. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // Protocol to use when redirecting requests. The default is the protocol that is used in the original request. Valid values: http, https. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type RedirectAllRequestsToObservation struct { + + // Name of the host where requests are redirected. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // Protocol to use when redirecting requests. The default is the protocol that is used in the original request. Valid values: http, https. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type RedirectAllRequestsToParameters struct { + + // Name of the host where requests are redirected. + // +kubebuilder:validation:Optional + HostName *string `json:"hostName" tf:"host_name,omitempty"` + + // Protocol to use when redirecting requests. The default is the protocol that is used in the original request. Valid values: http, https. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type RedirectInitParameters struct { + + // HTTP redirect code to use on the response. + HTTPRedirectCode *string `json:"httpRedirectCode,omitempty" tf:"http_redirect_code,omitempty"` + + // Name of the host where requests are redirected. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // Protocol to use when redirecting requests. The default is the protocol that is used in the original request. Valid values: http, https. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Object key prefix to use in the redirect request. For example, to redirect requests for all pages with prefix docs/ (objects in the docs/ folder) to documents/, you can set a condition block with key_prefix_equals set to docs/ and in the redirect set replace_key_prefix_with to /documents. + ReplaceKeyPrefixWith *string `json:"replaceKeyPrefixWith,omitempty" tf:"replace_key_prefix_with,omitempty"` + + // Specific object key to use in the redirect request. For example, redirect request to error.html. + ReplaceKeyWith *string `json:"replaceKeyWith,omitempty" tf:"replace_key_with,omitempty"` +} + +type RedirectObservation struct { + + // HTTP redirect code to use on the response. + HTTPRedirectCode *string `json:"httpRedirectCode,omitempty" tf:"http_redirect_code,omitempty"` + + // Name of the host where requests are redirected. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // Protocol to use when redirecting requests. The default is the protocol that is used in the original request. Valid values: http, https. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Object key prefix to use in the redirect request. For example, to redirect requests for all pages with prefix docs/ (objects in the docs/ folder) to documents/, you can set a condition block with key_prefix_equals set to docs/ and in the redirect set replace_key_prefix_with to /documents. + ReplaceKeyPrefixWith *string `json:"replaceKeyPrefixWith,omitempty" tf:"replace_key_prefix_with,omitempty"` + + // Specific object key to use in the redirect request. For example, redirect request to error.html. + ReplaceKeyWith *string `json:"replaceKeyWith,omitempty" tf:"replace_key_with,omitempty"` +} + +type RedirectParameters struct { + + // HTTP redirect code to use on the response. + // +kubebuilder:validation:Optional + HTTPRedirectCode *string `json:"httpRedirectCode,omitempty" tf:"http_redirect_code,omitempty"` + + // Name of the host where requests are redirected. + // +kubebuilder:validation:Optional + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // Protocol to use when redirecting requests. The default is the protocol that is used in the original request. Valid values: http, https. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Object key prefix to use in the redirect request. For example, to redirect requests for all pages with prefix docs/ (objects in the docs/ folder) to documents/, you can set a condition block with key_prefix_equals set to docs/ and in the redirect set replace_key_prefix_with to /documents. + // +kubebuilder:validation:Optional + ReplaceKeyPrefixWith *string `json:"replaceKeyPrefixWith,omitempty" tf:"replace_key_prefix_with,omitempty"` + + // Specific object key to use in the redirect request. For example, redirect request to error.html. + // +kubebuilder:validation:Optional + ReplaceKeyWith *string `json:"replaceKeyWith,omitempty" tf:"replace_key_with,omitempty"` +} + +type RoutingRuleInitParameters struct { + + // Configuration block for describing a condition that must be met for the specified redirect to apply. See below. + Condition *ConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // Configuration block for redirect information. See below. + Redirect *RedirectInitParameters `json:"redirect,omitempty" tf:"redirect,omitempty"` +} + +type RoutingRuleObservation struct { + + // Configuration block for describing a condition that must be met for the specified redirect to apply. See below. + Condition *ConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` + + // Configuration block for redirect information. See below. + Redirect *RedirectObservation `json:"redirect,omitempty" tf:"redirect,omitempty"` +} + +type RoutingRuleParameters struct { + + // Configuration block for describing a condition that must be met for the specified redirect to apply. See below. + // +kubebuilder:validation:Optional + Condition *ConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // Configuration block for redirect information. See below. + // +kubebuilder:validation:Optional + Redirect *RedirectParameters `json:"redirect" tf:"redirect,omitempty"` +} + +// BucketWebsiteConfigurationSpec defines the desired state of BucketWebsiteConfiguration +type BucketWebsiteConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BucketWebsiteConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BucketWebsiteConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// BucketWebsiteConfigurationStatus defines the observed state of BucketWebsiteConfiguration. +type BucketWebsiteConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BucketWebsiteConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BucketWebsiteConfiguration is the Schema for the BucketWebsiteConfigurations API. Provides an S3 bucket website configuration resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type BucketWebsiteConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec BucketWebsiteConfigurationSpec `json:"spec"` + Status BucketWebsiteConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketWebsiteConfigurationList contains a list of BucketWebsiteConfigurations +type BucketWebsiteConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BucketWebsiteConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + BucketWebsiteConfiguration_Kind = "BucketWebsiteConfiguration" + BucketWebsiteConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BucketWebsiteConfiguration_Kind}.String() + BucketWebsiteConfiguration_KindAPIVersion = BucketWebsiteConfiguration_Kind + "." + CRDGroupVersion.String() + BucketWebsiteConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(BucketWebsiteConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&BucketWebsiteConfiguration{}, &BucketWebsiteConfigurationList{}) +} diff --git a/apis/s3/v1beta2/zz_generated.conversion_hubs.go b/apis/s3/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..f19971d090 --- /dev/null +++ b/apis/s3/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,52 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Bucket) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BucketACL) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BucketAnalyticsConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BucketIntelligentTieringConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BucketInventory) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BucketLifecycleConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BucketLogging) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BucketMetric) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BucketObjectLockConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BucketOwnershipControls) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BucketReplicationConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BucketServerSideEncryptionConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BucketVersioning) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BucketWebsiteConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Object) Hub() {} diff --git a/apis/s3/v1beta2/zz_generated.deepcopy.go b/apis/s3/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..a594765853 --- /dev/null +++ b/apis/s3/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,11069 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AbortIncompleteMultipartUploadInitParameters) DeepCopyInto(out *AbortIncompleteMultipartUploadInitParameters) { + *out = *in + if in.DaysAfterInitiation != nil { + in, out := &in.DaysAfterInitiation, &out.DaysAfterInitiation + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AbortIncompleteMultipartUploadInitParameters. +func (in *AbortIncompleteMultipartUploadInitParameters) DeepCopy() *AbortIncompleteMultipartUploadInitParameters { + if in == nil { + return nil + } + out := new(AbortIncompleteMultipartUploadInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AbortIncompleteMultipartUploadObservation) DeepCopyInto(out *AbortIncompleteMultipartUploadObservation) { + *out = *in + if in.DaysAfterInitiation != nil { + in, out := &in.DaysAfterInitiation, &out.DaysAfterInitiation + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AbortIncompleteMultipartUploadObservation. +func (in *AbortIncompleteMultipartUploadObservation) DeepCopy() *AbortIncompleteMultipartUploadObservation { + if in == nil { + return nil + } + out := new(AbortIncompleteMultipartUploadObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AbortIncompleteMultipartUploadParameters) DeepCopyInto(out *AbortIncompleteMultipartUploadParameters) { + *out = *in + if in.DaysAfterInitiation != nil { + in, out := &in.DaysAfterInitiation, &out.DaysAfterInitiation + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AbortIncompleteMultipartUploadParameters. +func (in *AbortIncompleteMultipartUploadParameters) DeepCopy() *AbortIncompleteMultipartUploadParameters { + if in == nil { + return nil + } + out := new(AbortIncompleteMultipartUploadParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlPolicyGrantInitParameters) DeepCopyInto(out *AccessControlPolicyGrantInitParameters) { + *out = *in + if in.Grantee != nil { + in, out := &in.Grantee, &out.Grantee + *out = new(GranteeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlPolicyGrantInitParameters. +func (in *AccessControlPolicyGrantInitParameters) DeepCopy() *AccessControlPolicyGrantInitParameters { + if in == nil { + return nil + } + out := new(AccessControlPolicyGrantInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlPolicyGrantObservation) DeepCopyInto(out *AccessControlPolicyGrantObservation) { + *out = *in + if in.Grantee != nil { + in, out := &in.Grantee, &out.Grantee + *out = new(GranteeObservation) + (*in).DeepCopyInto(*out) + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlPolicyGrantObservation. +func (in *AccessControlPolicyGrantObservation) DeepCopy() *AccessControlPolicyGrantObservation { + if in == nil { + return nil + } + out := new(AccessControlPolicyGrantObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlPolicyGrantParameters) DeepCopyInto(out *AccessControlPolicyGrantParameters) { + *out = *in + if in.Grantee != nil { + in, out := &in.Grantee, &out.Grantee + *out = new(GranteeParameters) + (*in).DeepCopyInto(*out) + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlPolicyGrantParameters. +func (in *AccessControlPolicyGrantParameters) DeepCopy() *AccessControlPolicyGrantParameters { + if in == nil { + return nil + } + out := new(AccessControlPolicyGrantParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlPolicyInitParameters) DeepCopyInto(out *AccessControlPolicyInitParameters) { + *out = *in + if in.Grant != nil { + in, out := &in.Grant, &out.Grant + *out = make([]AccessControlPolicyGrantInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(OwnerInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlPolicyInitParameters. +func (in *AccessControlPolicyInitParameters) DeepCopy() *AccessControlPolicyInitParameters { + if in == nil { + return nil + } + out := new(AccessControlPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlPolicyObservation) DeepCopyInto(out *AccessControlPolicyObservation) { + *out = *in + if in.Grant != nil { + in, out := &in.Grant, &out.Grant + *out = make([]AccessControlPolicyGrantObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(OwnerObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlPolicyObservation. +func (in *AccessControlPolicyObservation) DeepCopy() *AccessControlPolicyObservation { + if in == nil { + return nil + } + out := new(AccessControlPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlPolicyParameters) DeepCopyInto(out *AccessControlPolicyParameters) { + *out = *in + if in.Grant != nil { + in, out := &in.Grant, &out.Grant + *out = make([]AccessControlPolicyGrantParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(OwnerParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlPolicyParameters. +func (in *AccessControlPolicyParameters) DeepCopy() *AccessControlPolicyParameters { + if in == nil { + return nil + } + out := new(AccessControlPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlTranslationInitParameters) DeepCopyInto(out *AccessControlTranslationInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlTranslationInitParameters. +func (in *AccessControlTranslationInitParameters) DeepCopy() *AccessControlTranslationInitParameters { + if in == nil { + return nil + } + out := new(AccessControlTranslationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlTranslationObservation) DeepCopyInto(out *AccessControlTranslationObservation) { + *out = *in + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlTranslationObservation. +func (in *AccessControlTranslationObservation) DeepCopy() *AccessControlTranslationObservation { + if in == nil { + return nil + } + out := new(AccessControlTranslationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlTranslationParameters) DeepCopyInto(out *AccessControlTranslationParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlTranslationParameters. +func (in *AccessControlTranslationParameters) DeepCopy() *AccessControlTranslationParameters { + if in == nil { + return nil + } + out := new(AccessControlTranslationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndInitParameters) DeepCopyInto(out *AndInitParameters) { + *out = *in + if in.ObjectSizeGreaterThan != nil { + in, out := &in.ObjectSizeGreaterThan, &out.ObjectSizeGreaterThan + *out = new(float64) + **out = **in + } + if in.ObjectSizeLessThan != nil { + in, out := &in.ObjectSizeLessThan, &out.ObjectSizeLessThan + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndInitParameters. +func (in *AndInitParameters) DeepCopy() *AndInitParameters { + if in == nil { + return nil + } + out := new(AndInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndObservation) DeepCopyInto(out *AndObservation) { + *out = *in + if in.ObjectSizeGreaterThan != nil { + in, out := &in.ObjectSizeGreaterThan, &out.ObjectSizeGreaterThan + *out = new(float64) + **out = **in + } + if in.ObjectSizeLessThan != nil { + in, out := &in.ObjectSizeLessThan, &out.ObjectSizeLessThan + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndObservation. +func (in *AndObservation) DeepCopy() *AndObservation { + if in == nil { + return nil + } + out := new(AndObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AndParameters) DeepCopyInto(out *AndParameters) { + *out = *in + if in.ObjectSizeGreaterThan != nil { + in, out := &in.ObjectSizeGreaterThan, &out.ObjectSizeGreaterThan + *out = new(float64) + **out = **in + } + if in.ObjectSizeLessThan != nil { + in, out := &in.ObjectSizeLessThan, &out.ObjectSizeLessThan + *out = new(float64) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AndParameters. +func (in *AndParameters) DeepCopy() *AndParameters { + if in == nil { + return nil + } + out := new(AndParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplyServerSideEncryptionByDefaultInitParameters) DeepCopyInto(out *ApplyServerSideEncryptionByDefaultInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyServerSideEncryptionByDefaultInitParameters. +func (in *ApplyServerSideEncryptionByDefaultInitParameters) DeepCopy() *ApplyServerSideEncryptionByDefaultInitParameters { + if in == nil { + return nil + } + out := new(ApplyServerSideEncryptionByDefaultInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplyServerSideEncryptionByDefaultObservation) DeepCopyInto(out *ApplyServerSideEncryptionByDefaultObservation) { + *out = *in + if in.KMSMasterKeyID != nil { + in, out := &in.KMSMasterKeyID, &out.KMSMasterKeyID + *out = new(string) + **out = **in + } + if in.SseAlgorithm != nil { + in, out := &in.SseAlgorithm, &out.SseAlgorithm + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyServerSideEncryptionByDefaultObservation. +func (in *ApplyServerSideEncryptionByDefaultObservation) DeepCopy() *ApplyServerSideEncryptionByDefaultObservation { + if in == nil { + return nil + } + out := new(ApplyServerSideEncryptionByDefaultObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplyServerSideEncryptionByDefaultParameters) DeepCopyInto(out *ApplyServerSideEncryptionByDefaultParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyServerSideEncryptionByDefaultParameters. +func (in *ApplyServerSideEncryptionByDefaultParameters) DeepCopy() *ApplyServerSideEncryptionByDefaultParameters { + if in == nil { + return nil + } + out := new(ApplyServerSideEncryptionByDefaultParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Bucket) DeepCopyInto(out *Bucket) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bucket. +func (in *Bucket) DeepCopy() *Bucket { + if in == nil { + return nil + } + out := new(Bucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Bucket) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketACL) DeepCopyInto(out *BucketACL) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketACL. +func (in *BucketACL) DeepCopy() *BucketACL { + if in == nil { + return nil + } + out := new(BucketACL) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketACL) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketACLInitParameters) DeepCopyInto(out *BucketACLInitParameters) { + *out = *in + if in.ACL != nil { + in, out := &in.ACL, &out.ACL + *out = new(string) + **out = **in + } + if in.AccessControlPolicy != nil { + in, out := &in.AccessControlPolicy, &out.AccessControlPolicy + *out = new(AccessControlPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketACLInitParameters. +func (in *BucketACLInitParameters) DeepCopy() *BucketACLInitParameters { + if in == nil { + return nil + } + out := new(BucketACLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketACLList) DeepCopyInto(out *BucketACLList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BucketACL, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketACLList. +func (in *BucketACLList) DeepCopy() *BucketACLList { + if in == nil { + return nil + } + out := new(BucketACLList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketACLList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketACLObservation) DeepCopyInto(out *BucketACLObservation) { + *out = *in + if in.ACL != nil { + in, out := &in.ACL, &out.ACL + *out = new(string) + **out = **in + } + if in.AccessControlPolicy != nil { + in, out := &in.AccessControlPolicy, &out.AccessControlPolicy + *out = new(AccessControlPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketACLObservation. +func (in *BucketACLObservation) DeepCopy() *BucketACLObservation { + if in == nil { + return nil + } + out := new(BucketACLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketACLParameters) DeepCopyInto(out *BucketACLParameters) { + *out = *in + if in.ACL != nil { + in, out := &in.ACL, &out.ACL + *out = new(string) + **out = **in + } + if in.AccessControlPolicy != nil { + in, out := &in.AccessControlPolicy, &out.AccessControlPolicy + *out = new(AccessControlPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketACLParameters. +func (in *BucketACLParameters) DeepCopy() *BucketACLParameters { + if in == nil { + return nil + } + out := new(BucketACLParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketACLSpec) DeepCopyInto(out *BucketACLSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketACLSpec. +func (in *BucketACLSpec) DeepCopy() *BucketACLSpec { + if in == nil { + return nil + } + out := new(BucketACLSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketACLStatus) DeepCopyInto(out *BucketACLStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketACLStatus. +func (in *BucketACLStatus) DeepCopy() *BucketACLStatus { + if in == nil { + return nil + } + out := new(BucketACLStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketAnalyticsConfiguration) DeepCopyInto(out *BucketAnalyticsConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketAnalyticsConfiguration. +func (in *BucketAnalyticsConfiguration) DeepCopy() *BucketAnalyticsConfiguration { + if in == nil { + return nil + } + out := new(BucketAnalyticsConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketAnalyticsConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketAnalyticsConfigurationFilterInitParameters) DeepCopyInto(out *BucketAnalyticsConfigurationFilterInitParameters) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketAnalyticsConfigurationFilterInitParameters. +func (in *BucketAnalyticsConfigurationFilterInitParameters) DeepCopy() *BucketAnalyticsConfigurationFilterInitParameters { + if in == nil { + return nil + } + out := new(BucketAnalyticsConfigurationFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketAnalyticsConfigurationFilterObservation) DeepCopyInto(out *BucketAnalyticsConfigurationFilterObservation) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketAnalyticsConfigurationFilterObservation. +func (in *BucketAnalyticsConfigurationFilterObservation) DeepCopy() *BucketAnalyticsConfigurationFilterObservation { + if in == nil { + return nil + } + out := new(BucketAnalyticsConfigurationFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketAnalyticsConfigurationFilterParameters) DeepCopyInto(out *BucketAnalyticsConfigurationFilterParameters) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketAnalyticsConfigurationFilterParameters. +func (in *BucketAnalyticsConfigurationFilterParameters) DeepCopy() *BucketAnalyticsConfigurationFilterParameters { + if in == nil { + return nil + } + out := new(BucketAnalyticsConfigurationFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketAnalyticsConfigurationInitParameters) DeepCopyInto(out *BucketAnalyticsConfigurationInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BucketAnalyticsConfigurationFilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageClassAnalysis != nil { + in, out := &in.StorageClassAnalysis, &out.StorageClassAnalysis + *out = new(StorageClassAnalysisInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketAnalyticsConfigurationInitParameters. +func (in *BucketAnalyticsConfigurationInitParameters) DeepCopy() *BucketAnalyticsConfigurationInitParameters { + if in == nil { + return nil + } + out := new(BucketAnalyticsConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketAnalyticsConfigurationList) DeepCopyInto(out *BucketAnalyticsConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BucketAnalyticsConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketAnalyticsConfigurationList. +func (in *BucketAnalyticsConfigurationList) DeepCopy() *BucketAnalyticsConfigurationList { + if in == nil { + return nil + } + out := new(BucketAnalyticsConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketAnalyticsConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketAnalyticsConfigurationObservation) DeepCopyInto(out *BucketAnalyticsConfigurationObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BucketAnalyticsConfigurationFilterObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageClassAnalysis != nil { + in, out := &in.StorageClassAnalysis, &out.StorageClassAnalysis + *out = new(StorageClassAnalysisObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketAnalyticsConfigurationObservation. +func (in *BucketAnalyticsConfigurationObservation) DeepCopy() *BucketAnalyticsConfigurationObservation { + if in == nil { + return nil + } + out := new(BucketAnalyticsConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketAnalyticsConfigurationParameters) DeepCopyInto(out *BucketAnalyticsConfigurationParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BucketAnalyticsConfigurationFilterParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.StorageClassAnalysis != nil { + in, out := &in.StorageClassAnalysis, &out.StorageClassAnalysis + *out = new(StorageClassAnalysisParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketAnalyticsConfigurationParameters. +func (in *BucketAnalyticsConfigurationParameters) DeepCopy() *BucketAnalyticsConfigurationParameters { + if in == nil { + return nil + } + out := new(BucketAnalyticsConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketAnalyticsConfigurationSpec) DeepCopyInto(out *BucketAnalyticsConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketAnalyticsConfigurationSpec. +func (in *BucketAnalyticsConfigurationSpec) DeepCopy() *BucketAnalyticsConfigurationSpec { + if in == nil { + return nil + } + out := new(BucketAnalyticsConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketAnalyticsConfigurationStatus) DeepCopyInto(out *BucketAnalyticsConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketAnalyticsConfigurationStatus. +func (in *BucketAnalyticsConfigurationStatus) DeepCopy() *BucketAnalyticsConfigurationStatus { + if in == nil { + return nil + } + out := new(BucketAnalyticsConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketInitParameters) DeepCopyInto(out *BucketInitParameters) { + *out = *in + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.ObjectLockEnabled != nil { + in, out := &in.ObjectLockEnabled, &out.ObjectLockEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketInitParameters. +func (in *BucketInitParameters) DeepCopy() *BucketInitParameters { + if in == nil { + return nil + } + out := new(BucketInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketIntelligentTieringConfiguration) DeepCopyInto(out *BucketIntelligentTieringConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketIntelligentTieringConfiguration. +func (in *BucketIntelligentTieringConfiguration) DeepCopy() *BucketIntelligentTieringConfiguration { + if in == nil { + return nil + } + out := new(BucketIntelligentTieringConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketIntelligentTieringConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketIntelligentTieringConfigurationFilterInitParameters) DeepCopyInto(out *BucketIntelligentTieringConfigurationFilterInitParameters) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketIntelligentTieringConfigurationFilterInitParameters. +func (in *BucketIntelligentTieringConfigurationFilterInitParameters) DeepCopy() *BucketIntelligentTieringConfigurationFilterInitParameters { + if in == nil { + return nil + } + out := new(BucketIntelligentTieringConfigurationFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketIntelligentTieringConfigurationFilterObservation) DeepCopyInto(out *BucketIntelligentTieringConfigurationFilterObservation) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketIntelligentTieringConfigurationFilterObservation. +func (in *BucketIntelligentTieringConfigurationFilterObservation) DeepCopy() *BucketIntelligentTieringConfigurationFilterObservation { + if in == nil { + return nil + } + out := new(BucketIntelligentTieringConfigurationFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketIntelligentTieringConfigurationFilterParameters) DeepCopyInto(out *BucketIntelligentTieringConfigurationFilterParameters) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketIntelligentTieringConfigurationFilterParameters. +func (in *BucketIntelligentTieringConfigurationFilterParameters) DeepCopy() *BucketIntelligentTieringConfigurationFilterParameters { + if in == nil { + return nil + } + out := new(BucketIntelligentTieringConfigurationFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketIntelligentTieringConfigurationInitParameters) DeepCopyInto(out *BucketIntelligentTieringConfigurationInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BucketIntelligentTieringConfigurationFilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tiering != nil { + in, out := &in.Tiering, &out.Tiering + *out = make([]TieringInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketIntelligentTieringConfigurationInitParameters. +func (in *BucketIntelligentTieringConfigurationInitParameters) DeepCopy() *BucketIntelligentTieringConfigurationInitParameters { + if in == nil { + return nil + } + out := new(BucketIntelligentTieringConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketIntelligentTieringConfigurationList) DeepCopyInto(out *BucketIntelligentTieringConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BucketIntelligentTieringConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketIntelligentTieringConfigurationList. +func (in *BucketIntelligentTieringConfigurationList) DeepCopy() *BucketIntelligentTieringConfigurationList { + if in == nil { + return nil + } + out := new(BucketIntelligentTieringConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketIntelligentTieringConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketIntelligentTieringConfigurationObservation) DeepCopyInto(out *BucketIntelligentTieringConfigurationObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BucketIntelligentTieringConfigurationFilterObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tiering != nil { + in, out := &in.Tiering, &out.Tiering + *out = make([]TieringObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketIntelligentTieringConfigurationObservation. +func (in *BucketIntelligentTieringConfigurationObservation) DeepCopy() *BucketIntelligentTieringConfigurationObservation { + if in == nil { + return nil + } + out := new(BucketIntelligentTieringConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketIntelligentTieringConfigurationParameters) DeepCopyInto(out *BucketIntelligentTieringConfigurationParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BucketIntelligentTieringConfigurationFilterParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tiering != nil { + in, out := &in.Tiering, &out.Tiering + *out = make([]TieringParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketIntelligentTieringConfigurationParameters. +func (in *BucketIntelligentTieringConfigurationParameters) DeepCopy() *BucketIntelligentTieringConfigurationParameters { + if in == nil { + return nil + } + out := new(BucketIntelligentTieringConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketIntelligentTieringConfigurationSpec) DeepCopyInto(out *BucketIntelligentTieringConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketIntelligentTieringConfigurationSpec. +func (in *BucketIntelligentTieringConfigurationSpec) DeepCopy() *BucketIntelligentTieringConfigurationSpec { + if in == nil { + return nil + } + out := new(BucketIntelligentTieringConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketIntelligentTieringConfigurationStatus) DeepCopyInto(out *BucketIntelligentTieringConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketIntelligentTieringConfigurationStatus. +func (in *BucketIntelligentTieringConfigurationStatus) DeepCopy() *BucketIntelligentTieringConfigurationStatus { + if in == nil { + return nil + } + out := new(BucketIntelligentTieringConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketInventory) DeepCopyInto(out *BucketInventory) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketInventory. +func (in *BucketInventory) DeepCopy() *BucketInventory { + if in == nil { + return nil + } + out := new(BucketInventory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketInventory) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketInventoryDestinationInitParameters) DeepCopyInto(out *BucketInventoryDestinationInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(DestinationBucketInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketInventoryDestinationInitParameters. +func (in *BucketInventoryDestinationInitParameters) DeepCopy() *BucketInventoryDestinationInitParameters { + if in == nil { + return nil + } + out := new(BucketInventoryDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketInventoryDestinationObservation) DeepCopyInto(out *BucketInventoryDestinationObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(DestinationBucketObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketInventoryDestinationObservation. +func (in *BucketInventoryDestinationObservation) DeepCopy() *BucketInventoryDestinationObservation { + if in == nil { + return nil + } + out := new(BucketInventoryDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketInventoryDestinationParameters) DeepCopyInto(out *BucketInventoryDestinationParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(DestinationBucketParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketInventoryDestinationParameters. +func (in *BucketInventoryDestinationParameters) DeepCopy() *BucketInventoryDestinationParameters { + if in == nil { + return nil + } + out := new(BucketInventoryDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketInventoryFilterInitParameters) DeepCopyInto(out *BucketInventoryFilterInitParameters) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketInventoryFilterInitParameters. +func (in *BucketInventoryFilterInitParameters) DeepCopy() *BucketInventoryFilterInitParameters { + if in == nil { + return nil + } + out := new(BucketInventoryFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketInventoryFilterObservation) DeepCopyInto(out *BucketInventoryFilterObservation) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketInventoryFilterObservation. +func (in *BucketInventoryFilterObservation) DeepCopy() *BucketInventoryFilterObservation { + if in == nil { + return nil + } + out := new(BucketInventoryFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketInventoryFilterParameters) DeepCopyInto(out *BucketInventoryFilterParameters) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketInventoryFilterParameters. +func (in *BucketInventoryFilterParameters) DeepCopy() *BucketInventoryFilterParameters { + if in == nil { + return nil + } + out := new(BucketInventoryFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketInventoryInitParameters) DeepCopyInto(out *BucketInventoryInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(BucketInventoryDestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BucketInventoryFilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IncludedObjectVersions != nil { + in, out := &in.IncludedObjectVersions, &out.IncludedObjectVersions + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OptionalFields != nil { + in, out := &in.OptionalFields, &out.OptionalFields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketInventoryInitParameters. +func (in *BucketInventoryInitParameters) DeepCopy() *BucketInventoryInitParameters { + if in == nil { + return nil + } + out := new(BucketInventoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketInventoryList) DeepCopyInto(out *BucketInventoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BucketInventory, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketInventoryList. +func (in *BucketInventoryList) DeepCopy() *BucketInventoryList { + if in == nil { + return nil + } + out := new(BucketInventoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketInventoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketInventoryObservation) DeepCopyInto(out *BucketInventoryObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(BucketInventoryDestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BucketInventoryFilterObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IncludedObjectVersions != nil { + in, out := &in.IncludedObjectVersions, &out.IncludedObjectVersions + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OptionalFields != nil { + in, out := &in.OptionalFields, &out.OptionalFields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketInventoryObservation. +func (in *BucketInventoryObservation) DeepCopy() *BucketInventoryObservation { + if in == nil { + return nil + } + out := new(BucketInventoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketInventoryParameters) DeepCopyInto(out *BucketInventoryParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(BucketInventoryDestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BucketInventoryFilterParameters) + (*in).DeepCopyInto(*out) + } + if in.IncludedObjectVersions != nil { + in, out := &in.IncludedObjectVersions, &out.IncludedObjectVersions + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OptionalFields != nil { + in, out := &in.OptionalFields, &out.OptionalFields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketInventoryParameters. +func (in *BucketInventoryParameters) DeepCopy() *BucketInventoryParameters { + if in == nil { + return nil + } + out := new(BucketInventoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketInventorySpec) DeepCopyInto(out *BucketInventorySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketInventorySpec. +func (in *BucketInventorySpec) DeepCopy() *BucketInventorySpec { + if in == nil { + return nil + } + out := new(BucketInventorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketInventoryStatus) DeepCopyInto(out *BucketInventoryStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketInventoryStatus. +func (in *BucketInventoryStatus) DeepCopy() *BucketInventoryStatus { + if in == nil { + return nil + } + out := new(BucketInventoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLifecycleConfiguration) DeepCopyInto(out *BucketLifecycleConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLifecycleConfiguration. +func (in *BucketLifecycleConfiguration) DeepCopy() *BucketLifecycleConfiguration { + if in == nil { + return nil + } + out := new(BucketLifecycleConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketLifecycleConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLifecycleConfigurationInitParameters) DeepCopyInto(out *BucketLifecycleConfigurationInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]BucketLifecycleConfigurationRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLifecycleConfigurationInitParameters. +func (in *BucketLifecycleConfigurationInitParameters) DeepCopy() *BucketLifecycleConfigurationInitParameters { + if in == nil { + return nil + } + out := new(BucketLifecycleConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLifecycleConfigurationList) DeepCopyInto(out *BucketLifecycleConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BucketLifecycleConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLifecycleConfigurationList. +func (in *BucketLifecycleConfigurationList) DeepCopy() *BucketLifecycleConfigurationList { + if in == nil { + return nil + } + out := new(BucketLifecycleConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketLifecycleConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLifecycleConfigurationObservation) DeepCopyInto(out *BucketLifecycleConfigurationObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]BucketLifecycleConfigurationRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLifecycleConfigurationObservation. +func (in *BucketLifecycleConfigurationObservation) DeepCopy() *BucketLifecycleConfigurationObservation { + if in == nil { + return nil + } + out := new(BucketLifecycleConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLifecycleConfigurationParameters) DeepCopyInto(out *BucketLifecycleConfigurationParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]BucketLifecycleConfigurationRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLifecycleConfigurationParameters. +func (in *BucketLifecycleConfigurationParameters) DeepCopy() *BucketLifecycleConfigurationParameters { + if in == nil { + return nil + } + out := new(BucketLifecycleConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLifecycleConfigurationRuleInitParameters) DeepCopyInto(out *BucketLifecycleConfigurationRuleInitParameters) { + *out = *in + if in.AbortIncompleteMultipartUpload != nil { + in, out := &in.AbortIncompleteMultipartUpload, &out.AbortIncompleteMultipartUpload + *out = new(AbortIncompleteMultipartUploadInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Expiration != nil { + in, out := &in.Expiration, &out.Expiration + *out = new(RuleExpirationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(RuleFilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.NoncurrentVersionExpiration != nil { + in, out := &in.NoncurrentVersionExpiration, &out.NoncurrentVersionExpiration + *out = new(RuleNoncurrentVersionExpirationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NoncurrentVersionTransition != nil { + in, out := &in.NoncurrentVersionTransition, &out.NoncurrentVersionTransition + *out = make([]RuleNoncurrentVersionTransitionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Transition != nil { + in, out := &in.Transition, &out.Transition + *out = make([]RuleTransitionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLifecycleConfigurationRuleInitParameters. +func (in *BucketLifecycleConfigurationRuleInitParameters) DeepCopy() *BucketLifecycleConfigurationRuleInitParameters { + if in == nil { + return nil + } + out := new(BucketLifecycleConfigurationRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLifecycleConfigurationRuleObservation) DeepCopyInto(out *BucketLifecycleConfigurationRuleObservation) { + *out = *in + if in.AbortIncompleteMultipartUpload != nil { + in, out := &in.AbortIncompleteMultipartUpload, &out.AbortIncompleteMultipartUpload + *out = new(AbortIncompleteMultipartUploadObservation) + (*in).DeepCopyInto(*out) + } + if in.Expiration != nil { + in, out := &in.Expiration, &out.Expiration + *out = new(RuleExpirationObservation) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(RuleFilterObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.NoncurrentVersionExpiration != nil { + in, out := &in.NoncurrentVersionExpiration, &out.NoncurrentVersionExpiration + *out = new(RuleNoncurrentVersionExpirationObservation) + (*in).DeepCopyInto(*out) + } + if in.NoncurrentVersionTransition != nil { + in, out := &in.NoncurrentVersionTransition, &out.NoncurrentVersionTransition + *out = make([]RuleNoncurrentVersionTransitionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Transition != nil { + in, out := &in.Transition, &out.Transition + *out = make([]RuleTransitionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLifecycleConfigurationRuleObservation. +func (in *BucketLifecycleConfigurationRuleObservation) DeepCopy() *BucketLifecycleConfigurationRuleObservation { + if in == nil { + return nil + } + out := new(BucketLifecycleConfigurationRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLifecycleConfigurationRuleParameters) DeepCopyInto(out *BucketLifecycleConfigurationRuleParameters) { + *out = *in + if in.AbortIncompleteMultipartUpload != nil { + in, out := &in.AbortIncompleteMultipartUpload, &out.AbortIncompleteMultipartUpload + *out = new(AbortIncompleteMultipartUploadParameters) + (*in).DeepCopyInto(*out) + } + if in.Expiration != nil { + in, out := &in.Expiration, &out.Expiration + *out = new(RuleExpirationParameters) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(RuleFilterParameters) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.NoncurrentVersionExpiration != nil { + in, out := &in.NoncurrentVersionExpiration, &out.NoncurrentVersionExpiration + *out = new(RuleNoncurrentVersionExpirationParameters) + (*in).DeepCopyInto(*out) + } + if in.NoncurrentVersionTransition != nil { + in, out := &in.NoncurrentVersionTransition, &out.NoncurrentVersionTransition + *out = make([]RuleNoncurrentVersionTransitionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Transition != nil { + in, out := &in.Transition, &out.Transition + *out = make([]RuleTransitionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLifecycleConfigurationRuleParameters. +func (in *BucketLifecycleConfigurationRuleParameters) DeepCopy() *BucketLifecycleConfigurationRuleParameters { + if in == nil { + return nil + } + out := new(BucketLifecycleConfigurationRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLifecycleConfigurationSpec) DeepCopyInto(out *BucketLifecycleConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLifecycleConfigurationSpec. +func (in *BucketLifecycleConfigurationSpec) DeepCopy() *BucketLifecycleConfigurationSpec { + if in == nil { + return nil + } + out := new(BucketLifecycleConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLifecycleConfigurationStatus) DeepCopyInto(out *BucketLifecycleConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLifecycleConfigurationStatus. +func (in *BucketLifecycleConfigurationStatus) DeepCopy() *BucketLifecycleConfigurationStatus { + if in == nil { + return nil + } + out := new(BucketLifecycleConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketList) DeepCopyInto(out *BucketList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Bucket, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketList. +func (in *BucketList) DeepCopy() *BucketList { + if in == nil { + return nil + } + out := new(BucketList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLogging) DeepCopyInto(out *BucketLogging) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLogging. +func (in *BucketLogging) DeepCopy() *BucketLogging { + if in == nil { + return nil + } + out := new(BucketLogging) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketLogging) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLoggingInitParameters) DeepCopyInto(out *BucketLoggingInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.TargetBucket != nil { + in, out := &in.TargetBucket, &out.TargetBucket + *out = new(string) + **out = **in + } + if in.TargetBucketRef != nil { + in, out := &in.TargetBucketRef, &out.TargetBucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetBucketSelector != nil { + in, out := &in.TargetBucketSelector, &out.TargetBucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetGrant != nil { + in, out := &in.TargetGrant, &out.TargetGrant + *out = make([]TargetGrantInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetObjectKeyFormat != nil { + in, out := &in.TargetObjectKeyFormat, &out.TargetObjectKeyFormat + *out = new(TargetObjectKeyFormatInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetPrefix != nil { + in, out := &in.TargetPrefix, &out.TargetPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLoggingInitParameters. +func (in *BucketLoggingInitParameters) DeepCopy() *BucketLoggingInitParameters { + if in == nil { + return nil + } + out := new(BucketLoggingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLoggingList) DeepCopyInto(out *BucketLoggingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BucketLogging, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLoggingList. +func (in *BucketLoggingList) DeepCopy() *BucketLoggingList { + if in == nil { + return nil + } + out := new(BucketLoggingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketLoggingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLoggingObservation) DeepCopyInto(out *BucketLoggingObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.TargetBucket != nil { + in, out := &in.TargetBucket, &out.TargetBucket + *out = new(string) + **out = **in + } + if in.TargetGrant != nil { + in, out := &in.TargetGrant, &out.TargetGrant + *out = make([]TargetGrantObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetObjectKeyFormat != nil { + in, out := &in.TargetObjectKeyFormat, &out.TargetObjectKeyFormat + *out = new(TargetObjectKeyFormatObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetPrefix != nil { + in, out := &in.TargetPrefix, &out.TargetPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLoggingObservation. +func (in *BucketLoggingObservation) DeepCopy() *BucketLoggingObservation { + if in == nil { + return nil + } + out := new(BucketLoggingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLoggingParameters) DeepCopyInto(out *BucketLoggingParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.TargetBucket != nil { + in, out := &in.TargetBucket, &out.TargetBucket + *out = new(string) + **out = **in + } + if in.TargetBucketRef != nil { + in, out := &in.TargetBucketRef, &out.TargetBucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetBucketSelector != nil { + in, out := &in.TargetBucketSelector, &out.TargetBucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetGrant != nil { + in, out := &in.TargetGrant, &out.TargetGrant + *out = make([]TargetGrantParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetObjectKeyFormat != nil { + in, out := &in.TargetObjectKeyFormat, &out.TargetObjectKeyFormat + *out = new(TargetObjectKeyFormatParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetPrefix != nil { + in, out := &in.TargetPrefix, &out.TargetPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLoggingParameters. +func (in *BucketLoggingParameters) DeepCopy() *BucketLoggingParameters { + if in == nil { + return nil + } + out := new(BucketLoggingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLoggingSpec) DeepCopyInto(out *BucketLoggingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLoggingSpec. +func (in *BucketLoggingSpec) DeepCopy() *BucketLoggingSpec { + if in == nil { + return nil + } + out := new(BucketLoggingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLoggingStatus) DeepCopyInto(out *BucketLoggingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLoggingStatus. +func (in *BucketLoggingStatus) DeepCopy() *BucketLoggingStatus { + if in == nil { + return nil + } + out := new(BucketLoggingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketMetric) DeepCopyInto(out *BucketMetric) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketMetric. +func (in *BucketMetric) DeepCopy() *BucketMetric { + if in == nil { + return nil + } + out := new(BucketMetric) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketMetric) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketMetricFilterInitParameters) DeepCopyInto(out *BucketMetricFilterInitParameters) { + *out = *in + if in.AccessPoint != nil { + in, out := &in.AccessPoint, &out.AccessPoint + *out = new(string) + **out = **in + } + if in.AccessPointRef != nil { + in, out := &in.AccessPointRef, &out.AccessPointRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccessPointSelector != nil { + in, out := &in.AccessPointSelector, &out.AccessPointSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketMetricFilterInitParameters. +func (in *BucketMetricFilterInitParameters) DeepCopy() *BucketMetricFilterInitParameters { + if in == nil { + return nil + } + out := new(BucketMetricFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketMetricFilterObservation) DeepCopyInto(out *BucketMetricFilterObservation) { + *out = *in + if in.AccessPoint != nil { + in, out := &in.AccessPoint, &out.AccessPoint + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketMetricFilterObservation. +func (in *BucketMetricFilterObservation) DeepCopy() *BucketMetricFilterObservation { + if in == nil { + return nil + } + out := new(BucketMetricFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketMetricFilterParameters) DeepCopyInto(out *BucketMetricFilterParameters) { + *out = *in + if in.AccessPoint != nil { + in, out := &in.AccessPoint, &out.AccessPoint + *out = new(string) + **out = **in + } + if in.AccessPointRef != nil { + in, out := &in.AccessPointRef, &out.AccessPointRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccessPointSelector != nil { + in, out := &in.AccessPointSelector, &out.AccessPointSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketMetricFilterParameters. +func (in *BucketMetricFilterParameters) DeepCopy() *BucketMetricFilterParameters { + if in == nil { + return nil + } + out := new(BucketMetricFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketMetricInitParameters) DeepCopyInto(out *BucketMetricInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BucketMetricFilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketMetricInitParameters. +func (in *BucketMetricInitParameters) DeepCopy() *BucketMetricInitParameters { + if in == nil { + return nil + } + out := new(BucketMetricInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketMetricList) DeepCopyInto(out *BucketMetricList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BucketMetric, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketMetricList. +func (in *BucketMetricList) DeepCopy() *BucketMetricList { + if in == nil { + return nil + } + out := new(BucketMetricList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketMetricList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketMetricObservation) DeepCopyInto(out *BucketMetricObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BucketMetricFilterObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketMetricObservation. +func (in *BucketMetricObservation) DeepCopy() *BucketMetricObservation { + if in == nil { + return nil + } + out := new(BucketMetricObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketMetricParameters) DeepCopyInto(out *BucketMetricParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BucketMetricFilterParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketMetricParameters. +func (in *BucketMetricParameters) DeepCopy() *BucketMetricParameters { + if in == nil { + return nil + } + out := new(BucketMetricParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketMetricSpec) DeepCopyInto(out *BucketMetricSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketMetricSpec. +func (in *BucketMetricSpec) DeepCopy() *BucketMetricSpec { + if in == nil { + return nil + } + out := new(BucketMetricSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketMetricStatus) DeepCopyInto(out *BucketMetricStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketMetricStatus. +func (in *BucketMetricStatus) DeepCopy() *BucketMetricStatus { + if in == nil { + return nil + } + out := new(BucketMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketObjectLockConfiguration) DeepCopyInto(out *BucketObjectLockConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketObjectLockConfiguration. +func (in *BucketObjectLockConfiguration) DeepCopy() *BucketObjectLockConfiguration { + if in == nil { + return nil + } + out := new(BucketObjectLockConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketObjectLockConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketObjectLockConfigurationInitParameters) DeepCopyInto(out *BucketObjectLockConfigurationInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.ObjectLockEnabled != nil { + in, out := &in.ObjectLockEnabled, &out.ObjectLockEnabled + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(BucketObjectLockConfigurationRuleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TokenSecretRef != nil { + in, out := &in.TokenSecretRef, &out.TokenSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketObjectLockConfigurationInitParameters. +func (in *BucketObjectLockConfigurationInitParameters) DeepCopy() *BucketObjectLockConfigurationInitParameters { + if in == nil { + return nil + } + out := new(BucketObjectLockConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketObjectLockConfigurationList) DeepCopyInto(out *BucketObjectLockConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BucketObjectLockConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketObjectLockConfigurationList. +func (in *BucketObjectLockConfigurationList) DeepCopy() *BucketObjectLockConfigurationList { + if in == nil { + return nil + } + out := new(BucketObjectLockConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketObjectLockConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketObjectLockConfigurationObservation) DeepCopyInto(out *BucketObjectLockConfigurationObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ObjectLockEnabled != nil { + in, out := &in.ObjectLockEnabled, &out.ObjectLockEnabled + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(BucketObjectLockConfigurationRuleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketObjectLockConfigurationObservation. +func (in *BucketObjectLockConfigurationObservation) DeepCopy() *BucketObjectLockConfigurationObservation { + if in == nil { + return nil + } + out := new(BucketObjectLockConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketObjectLockConfigurationParameters) DeepCopyInto(out *BucketObjectLockConfigurationParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.ObjectLockEnabled != nil { + in, out := &in.ObjectLockEnabled, &out.ObjectLockEnabled + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(BucketObjectLockConfigurationRuleParameters) + (*in).DeepCopyInto(*out) + } + if in.TokenSecretRef != nil { + in, out := &in.TokenSecretRef, &out.TokenSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketObjectLockConfigurationParameters. +func (in *BucketObjectLockConfigurationParameters) DeepCopy() *BucketObjectLockConfigurationParameters { + if in == nil { + return nil + } + out := new(BucketObjectLockConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketObjectLockConfigurationRuleInitParameters) DeepCopyInto(out *BucketObjectLockConfigurationRuleInitParameters) { + *out = *in + if in.DefaultRetention != nil { + in, out := &in.DefaultRetention, &out.DefaultRetention + *out = new(RuleDefaultRetentionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketObjectLockConfigurationRuleInitParameters. +func (in *BucketObjectLockConfigurationRuleInitParameters) DeepCopy() *BucketObjectLockConfigurationRuleInitParameters { + if in == nil { + return nil + } + out := new(BucketObjectLockConfigurationRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketObjectLockConfigurationRuleObservation) DeepCopyInto(out *BucketObjectLockConfigurationRuleObservation) { + *out = *in + if in.DefaultRetention != nil { + in, out := &in.DefaultRetention, &out.DefaultRetention + *out = new(RuleDefaultRetentionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketObjectLockConfigurationRuleObservation. +func (in *BucketObjectLockConfigurationRuleObservation) DeepCopy() *BucketObjectLockConfigurationRuleObservation { + if in == nil { + return nil + } + out := new(BucketObjectLockConfigurationRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketObjectLockConfigurationRuleParameters) DeepCopyInto(out *BucketObjectLockConfigurationRuleParameters) { + *out = *in + if in.DefaultRetention != nil { + in, out := &in.DefaultRetention, &out.DefaultRetention + *out = new(RuleDefaultRetentionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketObjectLockConfigurationRuleParameters. +func (in *BucketObjectLockConfigurationRuleParameters) DeepCopy() *BucketObjectLockConfigurationRuleParameters { + if in == nil { + return nil + } + out := new(BucketObjectLockConfigurationRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketObjectLockConfigurationSpec) DeepCopyInto(out *BucketObjectLockConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketObjectLockConfigurationSpec. +func (in *BucketObjectLockConfigurationSpec) DeepCopy() *BucketObjectLockConfigurationSpec { + if in == nil { + return nil + } + out := new(BucketObjectLockConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketObjectLockConfigurationStatus) DeepCopyInto(out *BucketObjectLockConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketObjectLockConfigurationStatus. +func (in *BucketObjectLockConfigurationStatus) DeepCopy() *BucketObjectLockConfigurationStatus { + if in == nil { + return nil + } + out := new(BucketObjectLockConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketObservation) DeepCopyInto(out *BucketObservation) { + *out = *in + if in.ACL != nil { + in, out := &in.ACL, &out.ACL + *out = new(string) + **out = **in + } + if in.AccelerationStatus != nil { + in, out := &in.AccelerationStatus, &out.AccelerationStatus + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.BucketDomainName != nil { + in, out := &in.BucketDomainName, &out.BucketDomainName + *out = new(string) + **out = **in + } + if in.BucketRegionalDomainName != nil { + in, out := &in.BucketRegionalDomainName, &out.BucketRegionalDomainName + *out = new(string) + **out = **in + } + if in.CorsRule != nil { + in, out := &in.CorsRule, &out.CorsRule + *out = make([]CorsRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.Grant != nil { + in, out := &in.Grant, &out.Grant + *out = make([]GrantObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostedZoneID != nil { + in, out := &in.HostedZoneID, &out.HostedZoneID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LifecycleRule != nil { + in, out := &in.LifecycleRule, &out.LifecycleRule + *out = make([]LifecycleRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(LoggingObservation) + (*in).DeepCopyInto(*out) + } + if in.ObjectLockConfiguration != nil { + in, out := &in.ObjectLockConfiguration, &out.ObjectLockConfiguration + *out = new(ObjectLockConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ObjectLockEnabled != nil { + in, out := &in.ObjectLockEnabled, &out.ObjectLockEnabled + *out = new(bool) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ReplicationConfiguration != nil { + in, out := &in.ReplicationConfiguration, &out.ReplicationConfiguration + *out = new(ReplicationConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RequestPayer != nil { + in, out := &in.RequestPayer, &out.RequestPayer + *out = new(string) + **out = **in + } + if in.ServerSideEncryptionConfiguration != nil { + in, out := &in.ServerSideEncryptionConfiguration, &out.ServerSideEncryptionConfiguration + *out = new(ServerSideEncryptionConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Versioning != nil { + in, out := &in.Versioning, &out.Versioning + *out = new(VersioningObservation) + (*in).DeepCopyInto(*out) + } + if in.Website != nil { + in, out := &in.Website, &out.Website + *out = new(WebsiteObservation) + (*in).DeepCopyInto(*out) + } + if in.WebsiteDomain != nil { + in, out := &in.WebsiteDomain, &out.WebsiteDomain + *out = new(string) + **out = **in + } + if in.WebsiteEndpoint != nil { + in, out := &in.WebsiteEndpoint, &out.WebsiteEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketObservation. +func (in *BucketObservation) DeepCopy() *BucketObservation { + if in == nil { + return nil + } + out := new(BucketObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketOwnershipControls) DeepCopyInto(out *BucketOwnershipControls) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketOwnershipControls. +func (in *BucketOwnershipControls) DeepCopy() *BucketOwnershipControls { + if in == nil { + return nil + } + out := new(BucketOwnershipControls) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketOwnershipControls) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketOwnershipControlsInitParameters) DeepCopyInto(out *BucketOwnershipControlsInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(BucketOwnershipControlsRuleInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketOwnershipControlsInitParameters. +func (in *BucketOwnershipControlsInitParameters) DeepCopy() *BucketOwnershipControlsInitParameters { + if in == nil { + return nil + } + out := new(BucketOwnershipControlsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketOwnershipControlsList) DeepCopyInto(out *BucketOwnershipControlsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BucketOwnershipControls, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketOwnershipControlsList. +func (in *BucketOwnershipControlsList) DeepCopy() *BucketOwnershipControlsList { + if in == nil { + return nil + } + out := new(BucketOwnershipControlsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketOwnershipControlsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketOwnershipControlsObservation) DeepCopyInto(out *BucketOwnershipControlsObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(BucketOwnershipControlsRuleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketOwnershipControlsObservation. +func (in *BucketOwnershipControlsObservation) DeepCopy() *BucketOwnershipControlsObservation { + if in == nil { + return nil + } + out := new(BucketOwnershipControlsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketOwnershipControlsParameters) DeepCopyInto(out *BucketOwnershipControlsParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(BucketOwnershipControlsRuleParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketOwnershipControlsParameters. +func (in *BucketOwnershipControlsParameters) DeepCopy() *BucketOwnershipControlsParameters { + if in == nil { + return nil + } + out := new(BucketOwnershipControlsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketOwnershipControlsRuleInitParameters) DeepCopyInto(out *BucketOwnershipControlsRuleInitParameters) { + *out = *in + if in.ObjectOwnership != nil { + in, out := &in.ObjectOwnership, &out.ObjectOwnership + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketOwnershipControlsRuleInitParameters. +func (in *BucketOwnershipControlsRuleInitParameters) DeepCopy() *BucketOwnershipControlsRuleInitParameters { + if in == nil { + return nil + } + out := new(BucketOwnershipControlsRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketOwnershipControlsRuleObservation) DeepCopyInto(out *BucketOwnershipControlsRuleObservation) { + *out = *in + if in.ObjectOwnership != nil { + in, out := &in.ObjectOwnership, &out.ObjectOwnership + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketOwnershipControlsRuleObservation. +func (in *BucketOwnershipControlsRuleObservation) DeepCopy() *BucketOwnershipControlsRuleObservation { + if in == nil { + return nil + } + out := new(BucketOwnershipControlsRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketOwnershipControlsRuleParameters) DeepCopyInto(out *BucketOwnershipControlsRuleParameters) { + *out = *in + if in.ObjectOwnership != nil { + in, out := &in.ObjectOwnership, &out.ObjectOwnership + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketOwnershipControlsRuleParameters. +func (in *BucketOwnershipControlsRuleParameters) DeepCopy() *BucketOwnershipControlsRuleParameters { + if in == nil { + return nil + } + out := new(BucketOwnershipControlsRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketOwnershipControlsSpec) DeepCopyInto(out *BucketOwnershipControlsSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketOwnershipControlsSpec. +func (in *BucketOwnershipControlsSpec) DeepCopy() *BucketOwnershipControlsSpec { + if in == nil { + return nil + } + out := new(BucketOwnershipControlsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketOwnershipControlsStatus) DeepCopyInto(out *BucketOwnershipControlsStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketOwnershipControlsStatus. +func (in *BucketOwnershipControlsStatus) DeepCopy() *BucketOwnershipControlsStatus { + if in == nil { + return nil + } + out := new(BucketOwnershipControlsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketParameters) DeepCopyInto(out *BucketParameters) { + *out = *in + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.ObjectLockEnabled != nil { + in, out := &in.ObjectLockEnabled, &out.ObjectLockEnabled + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketParameters. +func (in *BucketParameters) DeepCopy() *BucketParameters { + if in == nil { + return nil + } + out := new(BucketParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketReplicationConfiguration) DeepCopyInto(out *BucketReplicationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketReplicationConfiguration. +func (in *BucketReplicationConfiguration) DeepCopy() *BucketReplicationConfiguration { + if in == nil { + return nil + } + out := new(BucketReplicationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketReplicationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketReplicationConfigurationInitParameters) DeepCopyInto(out *BucketReplicationConfigurationInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.RoleRef != nil { + in, out := &in.RoleRef, &out.RoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleSelector != nil { + in, out := &in.RoleSelector, &out.RoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]BucketReplicationConfigurationRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TokenSecretRef != nil { + in, out := &in.TokenSecretRef, &out.TokenSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketReplicationConfigurationInitParameters. +func (in *BucketReplicationConfigurationInitParameters) DeepCopy() *BucketReplicationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(BucketReplicationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketReplicationConfigurationList) DeepCopyInto(out *BucketReplicationConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BucketReplicationConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketReplicationConfigurationList. +func (in *BucketReplicationConfigurationList) DeepCopy() *BucketReplicationConfigurationList { + if in == nil { + return nil + } + out := new(BucketReplicationConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketReplicationConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketReplicationConfigurationObservation) DeepCopyInto(out *BucketReplicationConfigurationObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]BucketReplicationConfigurationRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketReplicationConfigurationObservation. +func (in *BucketReplicationConfigurationObservation) DeepCopy() *BucketReplicationConfigurationObservation { + if in == nil { + return nil + } + out := new(BucketReplicationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketReplicationConfigurationParameters) DeepCopyInto(out *BucketReplicationConfigurationParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.RoleRef != nil { + in, out := &in.RoleRef, &out.RoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleSelector != nil { + in, out := &in.RoleSelector, &out.RoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]BucketReplicationConfigurationRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TokenSecretRef != nil { + in, out := &in.TokenSecretRef, &out.TokenSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketReplicationConfigurationParameters. +func (in *BucketReplicationConfigurationParameters) DeepCopy() *BucketReplicationConfigurationParameters { + if in == nil { + return nil + } + out := new(BucketReplicationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketReplicationConfigurationRuleFilterInitParameters) DeepCopyInto(out *BucketReplicationConfigurationRuleFilterInitParameters) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = new(FilterAndInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(FilterTagInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketReplicationConfigurationRuleFilterInitParameters. +func (in *BucketReplicationConfigurationRuleFilterInitParameters) DeepCopy() *BucketReplicationConfigurationRuleFilterInitParameters { + if in == nil { + return nil + } + out := new(BucketReplicationConfigurationRuleFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketReplicationConfigurationRuleFilterObservation) DeepCopyInto(out *BucketReplicationConfigurationRuleFilterObservation) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = new(FilterAndObservation) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(FilterTagObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketReplicationConfigurationRuleFilterObservation. +func (in *BucketReplicationConfigurationRuleFilterObservation) DeepCopy() *BucketReplicationConfigurationRuleFilterObservation { + if in == nil { + return nil + } + out := new(BucketReplicationConfigurationRuleFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketReplicationConfigurationRuleFilterParameters) DeepCopyInto(out *BucketReplicationConfigurationRuleFilterParameters) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = new(FilterAndParameters) + (*in).DeepCopyInto(*out) + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(FilterTagParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketReplicationConfigurationRuleFilterParameters. +func (in *BucketReplicationConfigurationRuleFilterParameters) DeepCopy() *BucketReplicationConfigurationRuleFilterParameters { + if in == nil { + return nil + } + out := new(BucketReplicationConfigurationRuleFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketReplicationConfigurationRuleInitParameters) DeepCopyInto(out *BucketReplicationConfigurationRuleInitParameters) { + *out = *in + if in.DeleteMarkerReplication != nil { + in, out := &in.DeleteMarkerReplication, &out.DeleteMarkerReplication + *out = new(DeleteMarkerReplicationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(RuleDestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ExistingObjectReplication != nil { + in, out := &in.ExistingObjectReplication, &out.ExistingObjectReplication + *out = new(ExistingObjectReplicationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BucketReplicationConfigurationRuleFilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.SourceSelectionCriteria != nil { + in, out := &in.SourceSelectionCriteria, &out.SourceSelectionCriteria + *out = new(RuleSourceSelectionCriteriaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketReplicationConfigurationRuleInitParameters. +func (in *BucketReplicationConfigurationRuleInitParameters) DeepCopy() *BucketReplicationConfigurationRuleInitParameters { + if in == nil { + return nil + } + out := new(BucketReplicationConfigurationRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketReplicationConfigurationRuleObservation) DeepCopyInto(out *BucketReplicationConfigurationRuleObservation) { + *out = *in + if in.DeleteMarkerReplication != nil { + in, out := &in.DeleteMarkerReplication, &out.DeleteMarkerReplication + *out = new(DeleteMarkerReplicationObservation) + (*in).DeepCopyInto(*out) + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(RuleDestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.ExistingObjectReplication != nil { + in, out := &in.ExistingObjectReplication, &out.ExistingObjectReplication + *out = new(ExistingObjectReplicationObservation) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BucketReplicationConfigurationRuleFilterObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.SourceSelectionCriteria != nil { + in, out := &in.SourceSelectionCriteria, &out.SourceSelectionCriteria + *out = new(RuleSourceSelectionCriteriaObservation) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketReplicationConfigurationRuleObservation. +func (in *BucketReplicationConfigurationRuleObservation) DeepCopy() *BucketReplicationConfigurationRuleObservation { + if in == nil { + return nil + } + out := new(BucketReplicationConfigurationRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketReplicationConfigurationRuleParameters) DeepCopyInto(out *BucketReplicationConfigurationRuleParameters) { + *out = *in + if in.DeleteMarkerReplication != nil { + in, out := &in.DeleteMarkerReplication, &out.DeleteMarkerReplication + *out = new(DeleteMarkerReplicationParameters) + (*in).DeepCopyInto(*out) + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(RuleDestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.ExistingObjectReplication != nil { + in, out := &in.ExistingObjectReplication, &out.ExistingObjectReplication + *out = new(ExistingObjectReplicationParameters) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BucketReplicationConfigurationRuleFilterParameters) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.SourceSelectionCriteria != nil { + in, out := &in.SourceSelectionCriteria, &out.SourceSelectionCriteria + *out = new(RuleSourceSelectionCriteriaParameters) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketReplicationConfigurationRuleParameters. +func (in *BucketReplicationConfigurationRuleParameters) DeepCopy() *BucketReplicationConfigurationRuleParameters { + if in == nil { + return nil + } + out := new(BucketReplicationConfigurationRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketReplicationConfigurationSpec) DeepCopyInto(out *BucketReplicationConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketReplicationConfigurationSpec. +func (in *BucketReplicationConfigurationSpec) DeepCopy() *BucketReplicationConfigurationSpec { + if in == nil { + return nil + } + out := new(BucketReplicationConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketReplicationConfigurationStatus) DeepCopyInto(out *BucketReplicationConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketReplicationConfigurationStatus. +func (in *BucketReplicationConfigurationStatus) DeepCopy() *BucketReplicationConfigurationStatus { + if in == nil { + return nil + } + out := new(BucketReplicationConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketServerSideEncryptionConfiguration) DeepCopyInto(out *BucketServerSideEncryptionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketServerSideEncryptionConfiguration. +func (in *BucketServerSideEncryptionConfiguration) DeepCopy() *BucketServerSideEncryptionConfiguration { + if in == nil { + return nil + } + out := new(BucketServerSideEncryptionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketServerSideEncryptionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketServerSideEncryptionConfigurationInitParameters) DeepCopyInto(out *BucketServerSideEncryptionConfigurationInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]BucketServerSideEncryptionConfigurationRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketServerSideEncryptionConfigurationInitParameters. +func (in *BucketServerSideEncryptionConfigurationInitParameters) DeepCopy() *BucketServerSideEncryptionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(BucketServerSideEncryptionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketServerSideEncryptionConfigurationList) DeepCopyInto(out *BucketServerSideEncryptionConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BucketServerSideEncryptionConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketServerSideEncryptionConfigurationList. +func (in *BucketServerSideEncryptionConfigurationList) DeepCopy() *BucketServerSideEncryptionConfigurationList { + if in == nil { + return nil + } + out := new(BucketServerSideEncryptionConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketServerSideEncryptionConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketServerSideEncryptionConfigurationObservation) DeepCopyInto(out *BucketServerSideEncryptionConfigurationObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]BucketServerSideEncryptionConfigurationRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketServerSideEncryptionConfigurationObservation. +func (in *BucketServerSideEncryptionConfigurationObservation) DeepCopy() *BucketServerSideEncryptionConfigurationObservation { + if in == nil { + return nil + } + out := new(BucketServerSideEncryptionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketServerSideEncryptionConfigurationParameters) DeepCopyInto(out *BucketServerSideEncryptionConfigurationParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]BucketServerSideEncryptionConfigurationRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketServerSideEncryptionConfigurationParameters. +func (in *BucketServerSideEncryptionConfigurationParameters) DeepCopy() *BucketServerSideEncryptionConfigurationParameters { + if in == nil { + return nil + } + out := new(BucketServerSideEncryptionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketServerSideEncryptionConfigurationRuleInitParameters) DeepCopyInto(out *BucketServerSideEncryptionConfigurationRuleInitParameters) { + *out = *in + if in.ApplyServerSideEncryptionByDefault != nil { + in, out := &in.ApplyServerSideEncryptionByDefault, &out.ApplyServerSideEncryptionByDefault + *out = new(RuleApplyServerSideEncryptionByDefaultInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BucketKeyEnabled != nil { + in, out := &in.BucketKeyEnabled, &out.BucketKeyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketServerSideEncryptionConfigurationRuleInitParameters. +func (in *BucketServerSideEncryptionConfigurationRuleInitParameters) DeepCopy() *BucketServerSideEncryptionConfigurationRuleInitParameters { + if in == nil { + return nil + } + out := new(BucketServerSideEncryptionConfigurationRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketServerSideEncryptionConfigurationRuleObservation) DeepCopyInto(out *BucketServerSideEncryptionConfigurationRuleObservation) { + *out = *in + if in.ApplyServerSideEncryptionByDefault != nil { + in, out := &in.ApplyServerSideEncryptionByDefault, &out.ApplyServerSideEncryptionByDefault + *out = new(RuleApplyServerSideEncryptionByDefaultObservation) + (*in).DeepCopyInto(*out) + } + if in.BucketKeyEnabled != nil { + in, out := &in.BucketKeyEnabled, &out.BucketKeyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketServerSideEncryptionConfigurationRuleObservation. +func (in *BucketServerSideEncryptionConfigurationRuleObservation) DeepCopy() *BucketServerSideEncryptionConfigurationRuleObservation { + if in == nil { + return nil + } + out := new(BucketServerSideEncryptionConfigurationRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketServerSideEncryptionConfigurationRuleParameters) DeepCopyInto(out *BucketServerSideEncryptionConfigurationRuleParameters) { + *out = *in + if in.ApplyServerSideEncryptionByDefault != nil { + in, out := &in.ApplyServerSideEncryptionByDefault, &out.ApplyServerSideEncryptionByDefault + *out = new(RuleApplyServerSideEncryptionByDefaultParameters) + (*in).DeepCopyInto(*out) + } + if in.BucketKeyEnabled != nil { + in, out := &in.BucketKeyEnabled, &out.BucketKeyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketServerSideEncryptionConfigurationRuleParameters. +func (in *BucketServerSideEncryptionConfigurationRuleParameters) DeepCopy() *BucketServerSideEncryptionConfigurationRuleParameters { + if in == nil { + return nil + } + out := new(BucketServerSideEncryptionConfigurationRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketServerSideEncryptionConfigurationSpec) DeepCopyInto(out *BucketServerSideEncryptionConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketServerSideEncryptionConfigurationSpec. +func (in *BucketServerSideEncryptionConfigurationSpec) DeepCopy() *BucketServerSideEncryptionConfigurationSpec { + if in == nil { + return nil + } + out := new(BucketServerSideEncryptionConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketServerSideEncryptionConfigurationStatus) DeepCopyInto(out *BucketServerSideEncryptionConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketServerSideEncryptionConfigurationStatus. +func (in *BucketServerSideEncryptionConfigurationStatus) DeepCopy() *BucketServerSideEncryptionConfigurationStatus { + if in == nil { + return nil + } + out := new(BucketServerSideEncryptionConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSpec. +func (in *BucketSpec) DeepCopy() *BucketSpec { + if in == nil { + return nil + } + out := new(BucketSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketStatus) DeepCopyInto(out *BucketStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketStatus. +func (in *BucketStatus) DeepCopy() *BucketStatus { + if in == nil { + return nil + } + out := new(BucketStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketVersioning) DeepCopyInto(out *BucketVersioning) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketVersioning. +func (in *BucketVersioning) DeepCopy() *BucketVersioning { + if in == nil { + return nil + } + out := new(BucketVersioning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketVersioning) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketVersioningInitParameters) DeepCopyInto(out *BucketVersioningInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.Mfa != nil { + in, out := &in.Mfa, &out.Mfa + *out = new(string) + **out = **in + } + if in.VersioningConfiguration != nil { + in, out := &in.VersioningConfiguration, &out.VersioningConfiguration + *out = new(VersioningConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketVersioningInitParameters. +func (in *BucketVersioningInitParameters) DeepCopy() *BucketVersioningInitParameters { + if in == nil { + return nil + } + out := new(BucketVersioningInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketVersioningList) DeepCopyInto(out *BucketVersioningList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BucketVersioning, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketVersioningList. +func (in *BucketVersioningList) DeepCopy() *BucketVersioningList { + if in == nil { + return nil + } + out := new(BucketVersioningList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketVersioningList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketVersioningObservation) DeepCopyInto(out *BucketVersioningObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Mfa != nil { + in, out := &in.Mfa, &out.Mfa + *out = new(string) + **out = **in + } + if in.VersioningConfiguration != nil { + in, out := &in.VersioningConfiguration, &out.VersioningConfiguration + *out = new(VersioningConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketVersioningObservation. +func (in *BucketVersioningObservation) DeepCopy() *BucketVersioningObservation { + if in == nil { + return nil + } + out := new(BucketVersioningObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketVersioningParameters) DeepCopyInto(out *BucketVersioningParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.Mfa != nil { + in, out := &in.Mfa, &out.Mfa + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.VersioningConfiguration != nil { + in, out := &in.VersioningConfiguration, &out.VersioningConfiguration + *out = new(VersioningConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketVersioningParameters. +func (in *BucketVersioningParameters) DeepCopy() *BucketVersioningParameters { + if in == nil { + return nil + } + out := new(BucketVersioningParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketVersioningSpec) DeepCopyInto(out *BucketVersioningSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketVersioningSpec. +func (in *BucketVersioningSpec) DeepCopy() *BucketVersioningSpec { + if in == nil { + return nil + } + out := new(BucketVersioningSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketVersioningStatus) DeepCopyInto(out *BucketVersioningStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketVersioningStatus. +func (in *BucketVersioningStatus) DeepCopy() *BucketVersioningStatus { + if in == nil { + return nil + } + out := new(BucketVersioningStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketWebsiteConfiguration) DeepCopyInto(out *BucketWebsiteConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketWebsiteConfiguration. +func (in *BucketWebsiteConfiguration) DeepCopy() *BucketWebsiteConfiguration { + if in == nil { + return nil + } + out := new(BucketWebsiteConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketWebsiteConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketWebsiteConfigurationInitParameters) DeepCopyInto(out *BucketWebsiteConfigurationInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ErrorDocument != nil { + in, out := &in.ErrorDocument, &out.ErrorDocument + *out = new(ErrorDocumentInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.IndexDocument != nil { + in, out := &in.IndexDocument, &out.IndexDocument + *out = new(IndexDocumentInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RedirectAllRequestsTo != nil { + in, out := &in.RedirectAllRequestsTo, &out.RedirectAllRequestsTo + *out = new(RedirectAllRequestsToInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RoutingRule != nil { + in, out := &in.RoutingRule, &out.RoutingRule + *out = make([]RoutingRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoutingRules != nil { + in, out := &in.RoutingRules, &out.RoutingRules + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketWebsiteConfigurationInitParameters. +func (in *BucketWebsiteConfigurationInitParameters) DeepCopy() *BucketWebsiteConfigurationInitParameters { + if in == nil { + return nil + } + out := new(BucketWebsiteConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketWebsiteConfigurationList) DeepCopyInto(out *BucketWebsiteConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BucketWebsiteConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketWebsiteConfigurationList. +func (in *BucketWebsiteConfigurationList) DeepCopy() *BucketWebsiteConfigurationList { + if in == nil { + return nil + } + out := new(BucketWebsiteConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketWebsiteConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketWebsiteConfigurationObservation) DeepCopyInto(out *BucketWebsiteConfigurationObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.ErrorDocument != nil { + in, out := &in.ErrorDocument, &out.ErrorDocument + *out = new(ErrorDocumentObservation) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IndexDocument != nil { + in, out := &in.IndexDocument, &out.IndexDocument + *out = new(IndexDocumentObservation) + (*in).DeepCopyInto(*out) + } + if in.RedirectAllRequestsTo != nil { + in, out := &in.RedirectAllRequestsTo, &out.RedirectAllRequestsTo + *out = new(RedirectAllRequestsToObservation) + (*in).DeepCopyInto(*out) + } + if in.RoutingRule != nil { + in, out := &in.RoutingRule, &out.RoutingRule + *out = make([]RoutingRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoutingRules != nil { + in, out := &in.RoutingRules, &out.RoutingRules + *out = new(string) + **out = **in + } + if in.WebsiteDomain != nil { + in, out := &in.WebsiteDomain, &out.WebsiteDomain + *out = new(string) + **out = **in + } + if in.WebsiteEndpoint != nil { + in, out := &in.WebsiteEndpoint, &out.WebsiteEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketWebsiteConfigurationObservation. +func (in *BucketWebsiteConfigurationObservation) DeepCopy() *BucketWebsiteConfigurationObservation { + if in == nil { + return nil + } + out := new(BucketWebsiteConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketWebsiteConfigurationParameters) DeepCopyInto(out *BucketWebsiteConfigurationParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ErrorDocument != nil { + in, out := &in.ErrorDocument, &out.ErrorDocument + *out = new(ErrorDocumentParameters) + (*in).DeepCopyInto(*out) + } + if in.ExpectedBucketOwner != nil { + in, out := &in.ExpectedBucketOwner, &out.ExpectedBucketOwner + *out = new(string) + **out = **in + } + if in.IndexDocument != nil { + in, out := &in.IndexDocument, &out.IndexDocument + *out = new(IndexDocumentParameters) + (*in).DeepCopyInto(*out) + } + if in.RedirectAllRequestsTo != nil { + in, out := &in.RedirectAllRequestsTo, &out.RedirectAllRequestsTo + *out = new(RedirectAllRequestsToParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoutingRule != nil { + in, out := &in.RoutingRule, &out.RoutingRule + *out = make([]RoutingRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoutingRules != nil { + in, out := &in.RoutingRules, &out.RoutingRules + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketWebsiteConfigurationParameters. +func (in *BucketWebsiteConfigurationParameters) DeepCopy() *BucketWebsiteConfigurationParameters { + if in == nil { + return nil + } + out := new(BucketWebsiteConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketWebsiteConfigurationSpec) DeepCopyInto(out *BucketWebsiteConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketWebsiteConfigurationSpec. +func (in *BucketWebsiteConfigurationSpec) DeepCopy() *BucketWebsiteConfigurationSpec { + if in == nil { + return nil + } + out := new(BucketWebsiteConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketWebsiteConfigurationStatus) DeepCopyInto(out *BucketWebsiteConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketWebsiteConfigurationStatus. +func (in *BucketWebsiteConfigurationStatus) DeepCopy() *BucketWebsiteConfigurationStatus { + if in == nil { + return nil + } + out := new(BucketWebsiteConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionInitParameters) DeepCopyInto(out *ConditionInitParameters) { + *out = *in + if in.HTTPErrorCodeReturnedEquals != nil { + in, out := &in.HTTPErrorCodeReturnedEquals, &out.HTTPErrorCodeReturnedEquals + *out = new(string) + **out = **in + } + if in.KeyPrefixEquals != nil { + in, out := &in.KeyPrefixEquals, &out.KeyPrefixEquals + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionInitParameters. +func (in *ConditionInitParameters) DeepCopy() *ConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionObservation) DeepCopyInto(out *ConditionObservation) { + *out = *in + if in.HTTPErrorCodeReturnedEquals != nil { + in, out := &in.HTTPErrorCodeReturnedEquals, &out.HTTPErrorCodeReturnedEquals + *out = new(string) + **out = **in + } + if in.KeyPrefixEquals != nil { + in, out := &in.KeyPrefixEquals, &out.KeyPrefixEquals + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionObservation. +func (in *ConditionObservation) DeepCopy() *ConditionObservation { + if in == nil { + return nil + } + out := new(ConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionParameters) DeepCopyInto(out *ConditionParameters) { + *out = *in + if in.HTTPErrorCodeReturnedEquals != nil { + in, out := &in.HTTPErrorCodeReturnedEquals, &out.HTTPErrorCodeReturnedEquals + *out = new(string) + **out = **in + } + if in.KeyPrefixEquals != nil { + in, out := &in.KeyPrefixEquals, &out.KeyPrefixEquals + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionParameters. +func (in *ConditionParameters) DeepCopy() *ConditionParameters { + if in == nil { + return nil + } + out := new(ConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsRuleInitParameters) DeepCopyInto(out *CorsRuleInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsRuleInitParameters. +func (in *CorsRuleInitParameters) DeepCopy() *CorsRuleInitParameters { + if in == nil { + return nil + } + out := new(CorsRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsRuleObservation) DeepCopyInto(out *CorsRuleObservation) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposeHeaders != nil { + in, out := &in.ExposeHeaders, &out.ExposeHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeSeconds != nil { + in, out := &in.MaxAgeSeconds, &out.MaxAgeSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsRuleObservation. +func (in *CorsRuleObservation) DeepCopy() *CorsRuleObservation { + if in == nil { + return nil + } + out := new(CorsRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsRuleParameters) DeepCopyInto(out *CorsRuleParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsRuleParameters. +func (in *CorsRuleParameters) DeepCopy() *CorsRuleParameters { + if in == nil { + return nil + } + out := new(CorsRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataExportDestinationInitParameters) DeepCopyInto(out *DataExportDestinationInitParameters) { + *out = *in + if in.S3BucketDestination != nil { + in, out := &in.S3BucketDestination, &out.S3BucketDestination + *out = new(S3BucketDestinationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataExportDestinationInitParameters. +func (in *DataExportDestinationInitParameters) DeepCopy() *DataExportDestinationInitParameters { + if in == nil { + return nil + } + out := new(DataExportDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataExportDestinationObservation) DeepCopyInto(out *DataExportDestinationObservation) { + *out = *in + if in.S3BucketDestination != nil { + in, out := &in.S3BucketDestination, &out.S3BucketDestination + *out = new(S3BucketDestinationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataExportDestinationObservation. +func (in *DataExportDestinationObservation) DeepCopy() *DataExportDestinationObservation { + if in == nil { + return nil + } + out := new(DataExportDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataExportDestinationParameters) DeepCopyInto(out *DataExportDestinationParameters) { + *out = *in + if in.S3BucketDestination != nil { + in, out := &in.S3BucketDestination, &out.S3BucketDestination + *out = new(S3BucketDestinationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataExportDestinationParameters. +func (in *DataExportDestinationParameters) DeepCopy() *DataExportDestinationParameters { + if in == nil { + return nil + } + out := new(DataExportDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataExportInitParameters) DeepCopyInto(out *DataExportInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(DataExportDestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputSchemaVersion != nil { + in, out := &in.OutputSchemaVersion, &out.OutputSchemaVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataExportInitParameters. +func (in *DataExportInitParameters) DeepCopy() *DataExportInitParameters { + if in == nil { + return nil + } + out := new(DataExportInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataExportObservation) DeepCopyInto(out *DataExportObservation) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(DataExportDestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.OutputSchemaVersion != nil { + in, out := &in.OutputSchemaVersion, &out.OutputSchemaVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataExportObservation. +func (in *DataExportObservation) DeepCopy() *DataExportObservation { + if in == nil { + return nil + } + out := new(DataExportObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataExportParameters) DeepCopyInto(out *DataExportParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(DataExportDestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputSchemaVersion != nil { + in, out := &in.OutputSchemaVersion, &out.OutputSchemaVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataExportParameters. +func (in *DataExportParameters) DeepCopy() *DataExportParameters { + if in == nil { + return nil + } + out := new(DataExportParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultRetentionInitParameters) DeepCopyInto(out *DefaultRetentionInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultRetentionInitParameters. +func (in *DefaultRetentionInitParameters) DeepCopy() *DefaultRetentionInitParameters { + if in == nil { + return nil + } + out := new(DefaultRetentionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultRetentionObservation) DeepCopyInto(out *DefaultRetentionObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Years != nil { + in, out := &in.Years, &out.Years + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultRetentionObservation. +func (in *DefaultRetentionObservation) DeepCopy() *DefaultRetentionObservation { + if in == nil { + return nil + } + out := new(DefaultRetentionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultRetentionParameters) DeepCopyInto(out *DefaultRetentionParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultRetentionParameters. +func (in *DefaultRetentionParameters) DeepCopy() *DefaultRetentionParameters { + if in == nil { + return nil + } + out := new(DefaultRetentionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultTagsInitParameters) DeepCopyInto(out *DefaultTagsInitParameters) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultTagsInitParameters. +func (in *DefaultTagsInitParameters) DeepCopy() *DefaultTagsInitParameters { + if in == nil { + return nil + } + out := new(DefaultTagsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultTagsObservation) DeepCopyInto(out *DefaultTagsObservation) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultTagsObservation. +func (in *DefaultTagsObservation) DeepCopy() *DefaultTagsObservation { + if in == nil { + return nil + } + out := new(DefaultTagsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultTagsParameters) DeepCopyInto(out *DefaultTagsParameters) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultTagsParameters. +func (in *DefaultTagsParameters) DeepCopy() *DefaultTagsParameters { + if in == nil { + return nil + } + out := new(DefaultTagsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteMarkerReplicationInitParameters) DeepCopyInto(out *DeleteMarkerReplicationInitParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteMarkerReplicationInitParameters. +func (in *DeleteMarkerReplicationInitParameters) DeepCopy() *DeleteMarkerReplicationInitParameters { + if in == nil { + return nil + } + out := new(DeleteMarkerReplicationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteMarkerReplicationObservation) DeepCopyInto(out *DeleteMarkerReplicationObservation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteMarkerReplicationObservation. +func (in *DeleteMarkerReplicationObservation) DeepCopy() *DeleteMarkerReplicationObservation { + if in == nil { + return nil + } + out := new(DeleteMarkerReplicationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteMarkerReplicationParameters) DeepCopyInto(out *DeleteMarkerReplicationParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteMarkerReplicationParameters. +func (in *DeleteMarkerReplicationParameters) DeepCopy() *DeleteMarkerReplicationParameters { + if in == nil { + return nil + } + out := new(DeleteMarkerReplicationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationAccessControlTranslationInitParameters) DeepCopyInto(out *DestinationAccessControlTranslationInitParameters) { + *out = *in + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationAccessControlTranslationInitParameters. +func (in *DestinationAccessControlTranslationInitParameters) DeepCopy() *DestinationAccessControlTranslationInitParameters { + if in == nil { + return nil + } + out := new(DestinationAccessControlTranslationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationAccessControlTranslationObservation) DeepCopyInto(out *DestinationAccessControlTranslationObservation) { + *out = *in + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationAccessControlTranslationObservation. +func (in *DestinationAccessControlTranslationObservation) DeepCopy() *DestinationAccessControlTranslationObservation { + if in == nil { + return nil + } + out := new(DestinationAccessControlTranslationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationAccessControlTranslationParameters) DeepCopyInto(out *DestinationAccessControlTranslationParameters) { + *out = *in + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationAccessControlTranslationParameters. +func (in *DestinationAccessControlTranslationParameters) DeepCopy() *DestinationAccessControlTranslationParameters { + if in == nil { + return nil + } + out := new(DestinationAccessControlTranslationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationBucketInitParameters) DeepCopyInto(out *DestinationBucketInitParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationBucketInitParameters. +func (in *DestinationBucketInitParameters) DeepCopy() *DestinationBucketInitParameters { + if in == nil { + return nil + } + out := new(DestinationBucketInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationBucketObservation) DeepCopyInto(out *DestinationBucketObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationBucketObservation. +func (in *DestinationBucketObservation) DeepCopy() *DestinationBucketObservation { + if in == nil { + return nil + } + out := new(DestinationBucketObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationBucketParameters) DeepCopyInto(out *DestinationBucketParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationBucketParameters. +func (in *DestinationBucketParameters) DeepCopy() *DestinationBucketParameters { + if in == nil { + return nil + } + out := new(DestinationBucketParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationInitParameters) DeepCopyInto(out *DestinationInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationInitParameters. +func (in *DestinationInitParameters) DeepCopy() *DestinationInitParameters { + if in == nil { + return nil + } + out := new(DestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationMetricsInitParameters) DeepCopyInto(out *DestinationMetricsInitParameters) { + *out = *in + if in.EventThreshold != nil { + in, out := &in.EventThreshold, &out.EventThreshold + *out = new(EventThresholdInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationMetricsInitParameters. +func (in *DestinationMetricsInitParameters) DeepCopy() *DestinationMetricsInitParameters { + if in == nil { + return nil + } + out := new(DestinationMetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationMetricsObservation) DeepCopyInto(out *DestinationMetricsObservation) { + *out = *in + if in.EventThreshold != nil { + in, out := &in.EventThreshold, &out.EventThreshold + *out = new(EventThresholdObservation) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationMetricsObservation. +func (in *DestinationMetricsObservation) DeepCopy() *DestinationMetricsObservation { + if in == nil { + return nil + } + out := new(DestinationMetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationMetricsParameters) DeepCopyInto(out *DestinationMetricsParameters) { + *out = *in + if in.EventThreshold != nil { + in, out := &in.EventThreshold, &out.EventThreshold + *out = new(EventThresholdParameters) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationMetricsParameters. +func (in *DestinationMetricsParameters) DeepCopy() *DestinationMetricsParameters { + if in == nil { + return nil + } + out := new(DestinationMetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationObservation) DeepCopyInto(out *DestinationObservation) { + *out = *in + if in.AccessControlTranslation != nil { + in, out := &in.AccessControlTranslation, &out.AccessControlTranslation + *out = new(AccessControlTranslationObservation) + (*in).DeepCopyInto(*out) + } + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = new(MetricsObservation) + (*in).DeepCopyInto(*out) + } + if in.ReplicaKMSKeyID != nil { + in, out := &in.ReplicaKMSKeyID, &out.ReplicaKMSKeyID + *out = new(string) + **out = **in + } + if in.ReplicationTime != nil { + in, out := &in.ReplicationTime, &out.ReplicationTime + *out = new(ReplicationTimeObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationObservation. +func (in *DestinationObservation) DeepCopy() *DestinationObservation { + if in == nil { + return nil + } + out := new(DestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationParameters) DeepCopyInto(out *DestinationParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationParameters. +func (in *DestinationParameters) DeepCopy() *DestinationParameters { + if in == nil { + return nil + } + out := new(DestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationReplicationTimeInitParameters) DeepCopyInto(out *DestinationReplicationTimeInitParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(TimeInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationReplicationTimeInitParameters. +func (in *DestinationReplicationTimeInitParameters) DeepCopy() *DestinationReplicationTimeInitParameters { + if in == nil { + return nil + } + out := new(DestinationReplicationTimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationReplicationTimeObservation) DeepCopyInto(out *DestinationReplicationTimeObservation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(TimeObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationReplicationTimeObservation. +func (in *DestinationReplicationTimeObservation) DeepCopy() *DestinationReplicationTimeObservation { + if in == nil { + return nil + } + out := new(DestinationReplicationTimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationReplicationTimeParameters) DeepCopyInto(out *DestinationReplicationTimeParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(TimeParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationReplicationTimeParameters. +func (in *DestinationReplicationTimeParameters) DeepCopy() *DestinationReplicationTimeParameters { + if in == nil { + return nil + } + out := new(DestinationReplicationTimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationInitParameters) DeepCopyInto(out *EncryptionConfigurationInitParameters) { + *out = *in + if in.ReplicaKMSKeyID != nil { + in, out := &in.ReplicaKMSKeyID, &out.ReplicaKMSKeyID + *out = new(string) + **out = **in + } + if in.ReplicaKMSKeyIDRef != nil { + in, out := &in.ReplicaKMSKeyIDRef, &out.ReplicaKMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ReplicaKMSKeyIDSelector != nil { + in, out := &in.ReplicaKMSKeyIDSelector, &out.ReplicaKMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationInitParameters. +func (in *EncryptionConfigurationInitParameters) DeepCopy() *EncryptionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationObservation) DeepCopyInto(out *EncryptionConfigurationObservation) { + *out = *in + if in.ReplicaKMSKeyID != nil { + in, out := &in.ReplicaKMSKeyID, &out.ReplicaKMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationObservation. +func (in *EncryptionConfigurationObservation) DeepCopy() *EncryptionConfigurationObservation { + if in == nil { + return nil + } + out := new(EncryptionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigurationParameters) DeepCopyInto(out *EncryptionConfigurationParameters) { + *out = *in + if in.ReplicaKMSKeyID != nil { + in, out := &in.ReplicaKMSKeyID, &out.ReplicaKMSKeyID + *out = new(string) + **out = **in + } + if in.ReplicaKMSKeyIDRef != nil { + in, out := &in.ReplicaKMSKeyIDRef, &out.ReplicaKMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ReplicaKMSKeyIDSelector != nil { + in, out := &in.ReplicaKMSKeyIDSelector, &out.ReplicaKMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigurationParameters. +func (in *EncryptionConfigurationParameters) DeepCopy() *EncryptionConfigurationParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionInitParameters) DeepCopyInto(out *EncryptionInitParameters) { + *out = *in + if in.SseKMS != nil { + in, out := &in.SseKMS, &out.SseKMS + *out = new(SseKMSInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SseS3 != nil { + in, out := &in.SseS3, &out.SseS3 + *out = new(SseS3InitParameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionInitParameters. +func (in *EncryptionInitParameters) DeepCopy() *EncryptionInitParameters { + if in == nil { + return nil + } + out := new(EncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionObservation) DeepCopyInto(out *EncryptionObservation) { + *out = *in + if in.SseKMS != nil { + in, out := &in.SseKMS, &out.SseKMS + *out = new(SseKMSObservation) + (*in).DeepCopyInto(*out) + } + if in.SseS3 != nil { + in, out := &in.SseS3, &out.SseS3 + *out = new(SseS3Parameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionObservation. +func (in *EncryptionObservation) DeepCopy() *EncryptionObservation { + if in == nil { + return nil + } + out := new(EncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionParameters) DeepCopyInto(out *EncryptionParameters) { + *out = *in + if in.SseKMS != nil { + in, out := &in.SseKMS, &out.SseKMS + *out = new(SseKMSParameters) + (*in).DeepCopyInto(*out) + } + if in.SseS3 != nil { + in, out := &in.SseS3, &out.SseS3 + *out = new(SseS3Parameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionParameters. +func (in *EncryptionParameters) DeepCopy() *EncryptionParameters { + if in == nil { + return nil + } + out := new(EncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorDocumentInitParameters) DeepCopyInto(out *ErrorDocumentInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorDocumentInitParameters. +func (in *ErrorDocumentInitParameters) DeepCopy() *ErrorDocumentInitParameters { + if in == nil { + return nil + } + out := new(ErrorDocumentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorDocumentObservation) DeepCopyInto(out *ErrorDocumentObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorDocumentObservation. +func (in *ErrorDocumentObservation) DeepCopy() *ErrorDocumentObservation { + if in == nil { + return nil + } + out := new(ErrorDocumentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorDocumentParameters) DeepCopyInto(out *ErrorDocumentParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorDocumentParameters. +func (in *ErrorDocumentParameters) DeepCopy() *ErrorDocumentParameters { + if in == nil { + return nil + } + out := new(ErrorDocumentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventThresholdInitParameters) DeepCopyInto(out *EventThresholdInitParameters) { + *out = *in + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventThresholdInitParameters. +func (in *EventThresholdInitParameters) DeepCopy() *EventThresholdInitParameters { + if in == nil { + return nil + } + out := new(EventThresholdInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventThresholdObservation) DeepCopyInto(out *EventThresholdObservation) { + *out = *in + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventThresholdObservation. +func (in *EventThresholdObservation) DeepCopy() *EventThresholdObservation { + if in == nil { + return nil + } + out := new(EventThresholdObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventThresholdParameters) DeepCopyInto(out *EventThresholdParameters) { + *out = *in + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventThresholdParameters. +func (in *EventThresholdParameters) DeepCopy() *EventThresholdParameters { + if in == nil { + return nil + } + out := new(EventThresholdParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExistingObjectReplicationInitParameters) DeepCopyInto(out *ExistingObjectReplicationInitParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExistingObjectReplicationInitParameters. +func (in *ExistingObjectReplicationInitParameters) DeepCopy() *ExistingObjectReplicationInitParameters { + if in == nil { + return nil + } + out := new(ExistingObjectReplicationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExistingObjectReplicationObservation) DeepCopyInto(out *ExistingObjectReplicationObservation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExistingObjectReplicationObservation. +func (in *ExistingObjectReplicationObservation) DeepCopy() *ExistingObjectReplicationObservation { + if in == nil { + return nil + } + out := new(ExistingObjectReplicationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExistingObjectReplicationParameters) DeepCopyInto(out *ExistingObjectReplicationParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExistingObjectReplicationParameters. +func (in *ExistingObjectReplicationParameters) DeepCopy() *ExistingObjectReplicationParameters { + if in == nil { + return nil + } + out := new(ExistingObjectReplicationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpirationInitParameters) DeepCopyInto(out *ExpirationInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpirationInitParameters. +func (in *ExpirationInitParameters) DeepCopy() *ExpirationInitParameters { + if in == nil { + return nil + } + out := new(ExpirationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpirationObservation) DeepCopyInto(out *ExpirationObservation) { + *out = *in + if in.Date != nil { + in, out := &in.Date, &out.Date + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.ExpiredObjectDeleteMarker != nil { + in, out := &in.ExpiredObjectDeleteMarker, &out.ExpiredObjectDeleteMarker + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpirationObservation. +func (in *ExpirationObservation) DeepCopy() *ExpirationObservation { + if in == nil { + return nil + } + out := new(ExpirationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpirationParameters) DeepCopyInto(out *ExpirationParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpirationParameters. +func (in *ExpirationParameters) DeepCopy() *ExpirationParameters { + if in == nil { + return nil + } + out := new(ExpirationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterAndInitParameters) DeepCopyInto(out *FilterAndInitParameters) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterAndInitParameters. +func (in *FilterAndInitParameters) DeepCopy() *FilterAndInitParameters { + if in == nil { + return nil + } + out := new(FilterAndInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterAndObservation) DeepCopyInto(out *FilterAndObservation) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterAndObservation. +func (in *FilterAndObservation) DeepCopy() *FilterAndObservation { + if in == nil { + return nil + } + out := new(FilterAndObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterAndParameters) DeepCopyInto(out *FilterAndParameters) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterAndParameters. +func (in *FilterAndParameters) DeepCopy() *FilterAndParameters { + if in == nil { + return nil + } + out := new(FilterAndParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterInitParameters) DeepCopyInto(out *FilterInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterInitParameters. +func (in *FilterInitParameters) DeepCopy() *FilterInitParameters { + if in == nil { + return nil + } + out := new(FilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterObservation) DeepCopyInto(out *FilterObservation) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterObservation. +func (in *FilterObservation) DeepCopy() *FilterObservation { + if in == nil { + return nil + } + out := new(FilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterParameters) DeepCopyInto(out *FilterParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterParameters. +func (in *FilterParameters) DeepCopy() *FilterParameters { + if in == nil { + return nil + } + out := new(FilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterTagInitParameters) DeepCopyInto(out *FilterTagInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterTagInitParameters. +func (in *FilterTagInitParameters) DeepCopy() *FilterTagInitParameters { + if in == nil { + return nil + } + out := new(FilterTagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterTagObservation) DeepCopyInto(out *FilterTagObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterTagObservation. +func (in *FilterTagObservation) DeepCopy() *FilterTagObservation { + if in == nil { + return nil + } + out := new(FilterTagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterTagParameters) DeepCopyInto(out *FilterTagParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterTagParameters. +func (in *FilterTagParameters) DeepCopy() *FilterTagParameters { + if in == nil { + return nil + } + out := new(FilterTagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrantInitParameters) DeepCopyInto(out *GrantInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrantInitParameters. +func (in *GrantInitParameters) DeepCopy() *GrantInitParameters { + if in == nil { + return nil + } + out := new(GrantInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrantObservation) DeepCopyInto(out *GrantObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrantObservation. +func (in *GrantObservation) DeepCopy() *GrantObservation { + if in == nil { + return nil + } + out := new(GrantObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrantParameters) DeepCopyInto(out *GrantParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrantParameters. +func (in *GrantParameters) DeepCopy() *GrantParameters { + if in == nil { + return nil + } + out := new(GrantParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GranteeInitParameters) DeepCopyInto(out *GranteeInitParameters) { + *out = *in + if in.EmailAddress != nil { + in, out := &in.EmailAddress, &out.EmailAddress + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GranteeInitParameters. +func (in *GranteeInitParameters) DeepCopy() *GranteeInitParameters { + if in == nil { + return nil + } + out := new(GranteeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GranteeObservation) DeepCopyInto(out *GranteeObservation) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.EmailAddress != nil { + in, out := &in.EmailAddress, &out.EmailAddress + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GranteeObservation. +func (in *GranteeObservation) DeepCopy() *GranteeObservation { + if in == nil { + return nil + } + out := new(GranteeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GranteeParameters) DeepCopyInto(out *GranteeParameters) { + *out = *in + if in.EmailAddress != nil { + in, out := &in.EmailAddress, &out.EmailAddress + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GranteeParameters. +func (in *GranteeParameters) DeepCopy() *GranteeParameters { + if in == nil { + return nil + } + out := new(GranteeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexDocumentInitParameters) DeepCopyInto(out *IndexDocumentInitParameters) { + *out = *in + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexDocumentInitParameters. +func (in *IndexDocumentInitParameters) DeepCopy() *IndexDocumentInitParameters { + if in == nil { + return nil + } + out := new(IndexDocumentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexDocumentObservation) DeepCopyInto(out *IndexDocumentObservation) { + *out = *in + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexDocumentObservation. +func (in *IndexDocumentObservation) DeepCopy() *IndexDocumentObservation { + if in == nil { + return nil + } + out := new(IndexDocumentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexDocumentParameters) DeepCopyInto(out *IndexDocumentParameters) { + *out = *in + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexDocumentParameters. +func (in *IndexDocumentParameters) DeepCopy() *IndexDocumentParameters { + if in == nil { + return nil + } + out := new(IndexDocumentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleRuleInitParameters) DeepCopyInto(out *LifecycleRuleInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleRuleInitParameters. +func (in *LifecycleRuleInitParameters) DeepCopy() *LifecycleRuleInitParameters { + if in == nil { + return nil + } + out := new(LifecycleRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleRuleObservation) DeepCopyInto(out *LifecycleRuleObservation) { + *out = *in + if in.AbortIncompleteMultipartUploadDays != nil { + in, out := &in.AbortIncompleteMultipartUploadDays, &out.AbortIncompleteMultipartUploadDays + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Expiration != nil { + in, out := &in.Expiration, &out.Expiration + *out = new(ExpirationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.NoncurrentVersionExpiration != nil { + in, out := &in.NoncurrentVersionExpiration, &out.NoncurrentVersionExpiration + *out = new(NoncurrentVersionExpirationObservation) + (*in).DeepCopyInto(*out) + } + if in.NoncurrentVersionTransition != nil { + in, out := &in.NoncurrentVersionTransition, &out.NoncurrentVersionTransition + *out = make([]NoncurrentVersionTransitionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Transition != nil { + in, out := &in.Transition, &out.Transition + *out = make([]TransitionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleRuleObservation. +func (in *LifecycleRuleObservation) DeepCopy() *LifecycleRuleObservation { + if in == nil { + return nil + } + out := new(LifecycleRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleRuleParameters) DeepCopyInto(out *LifecycleRuleParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleRuleParameters. +func (in *LifecycleRuleParameters) DeepCopy() *LifecycleRuleParameters { + if in == nil { + return nil + } + out := new(LifecycleRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingInitParameters) DeepCopyInto(out *LoggingInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingInitParameters. +func (in *LoggingInitParameters) DeepCopy() *LoggingInitParameters { + if in == nil { + return nil + } + out := new(LoggingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingObservation) DeepCopyInto(out *LoggingObservation) { + *out = *in + if in.TargetBucket != nil { + in, out := &in.TargetBucket, &out.TargetBucket + *out = new(string) + **out = **in + } + if in.TargetPrefix != nil { + in, out := &in.TargetPrefix, &out.TargetPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingObservation. +func (in *LoggingObservation) DeepCopy() *LoggingObservation { + if in == nil { + return nil + } + out := new(LoggingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingParameters) DeepCopyInto(out *LoggingParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingParameters. +func (in *LoggingParameters) DeepCopy() *LoggingParameters { + if in == nil { + return nil + } + out := new(LoggingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsInitParameters) DeepCopyInto(out *MetricsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsInitParameters. +func (in *MetricsInitParameters) DeepCopy() *MetricsInitParameters { + if in == nil { + return nil + } + out := new(MetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsObservation) DeepCopyInto(out *MetricsObservation) { + *out = *in + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsObservation. +func (in *MetricsObservation) DeepCopy() *MetricsObservation { + if in == nil { + return nil + } + out := new(MetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsParameters) DeepCopyInto(out *MetricsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsParameters. +func (in *MetricsParameters) DeepCopy() *MetricsParameters { + if in == nil { + return nil + } + out := new(MetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoncurrentVersionExpirationInitParameters) DeepCopyInto(out *NoncurrentVersionExpirationInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoncurrentVersionExpirationInitParameters. +func (in *NoncurrentVersionExpirationInitParameters) DeepCopy() *NoncurrentVersionExpirationInitParameters { + if in == nil { + return nil + } + out := new(NoncurrentVersionExpirationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoncurrentVersionExpirationObservation) DeepCopyInto(out *NoncurrentVersionExpirationObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoncurrentVersionExpirationObservation. +func (in *NoncurrentVersionExpirationObservation) DeepCopy() *NoncurrentVersionExpirationObservation { + if in == nil { + return nil + } + out := new(NoncurrentVersionExpirationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoncurrentVersionExpirationParameters) DeepCopyInto(out *NoncurrentVersionExpirationParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoncurrentVersionExpirationParameters. +func (in *NoncurrentVersionExpirationParameters) DeepCopy() *NoncurrentVersionExpirationParameters { + if in == nil { + return nil + } + out := new(NoncurrentVersionExpirationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoncurrentVersionTransitionInitParameters) DeepCopyInto(out *NoncurrentVersionTransitionInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoncurrentVersionTransitionInitParameters. +func (in *NoncurrentVersionTransitionInitParameters) DeepCopy() *NoncurrentVersionTransitionInitParameters { + if in == nil { + return nil + } + out := new(NoncurrentVersionTransitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoncurrentVersionTransitionObservation) DeepCopyInto(out *NoncurrentVersionTransitionObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoncurrentVersionTransitionObservation. +func (in *NoncurrentVersionTransitionObservation) DeepCopy() *NoncurrentVersionTransitionObservation { + if in == nil { + return nil + } + out := new(NoncurrentVersionTransitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoncurrentVersionTransitionParameters) DeepCopyInto(out *NoncurrentVersionTransitionParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoncurrentVersionTransitionParameters. +func (in *NoncurrentVersionTransitionParameters) DeepCopy() *NoncurrentVersionTransitionParameters { + if in == nil { + return nil + } + out := new(NoncurrentVersionTransitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Object) DeepCopyInto(out *Object) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Object. +func (in *Object) DeepCopy() *Object { + if in == nil { + return nil + } + out := new(Object) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Object) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectInitParameters) DeepCopyInto(out *ObjectInitParameters) { + *out = *in + if in.ACL != nil { + in, out := &in.ACL, &out.ACL + *out = new(string) + **out = **in + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketKeyEnabled != nil { + in, out := &in.BucketKeyEnabled, &out.BucketKeyEnabled + *out = new(bool) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CacheControl != nil { + in, out := &in.CacheControl, &out.CacheControl + *out = new(string) + **out = **in + } + if in.ChecksumAlgorithm != nil { + in, out := &in.ChecksumAlgorithm, &out.ChecksumAlgorithm + *out = new(string) + **out = **in + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentBase64 != nil { + in, out := &in.ContentBase64, &out.ContentBase64 + *out = new(string) + **out = **in + } + if in.ContentDisposition != nil { + in, out := &in.ContentDisposition, &out.ContentDisposition + *out = new(string) + **out = **in + } + if in.ContentEncoding != nil { + in, out := &in.ContentEncoding, &out.ContentEncoding + *out = new(string) + **out = **in + } + if in.ContentLanguage != nil { + in, out := &in.ContentLanguage, &out.ContentLanguage + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ObjectLockLegalHoldStatus != nil { + in, out := &in.ObjectLockLegalHoldStatus, &out.ObjectLockLegalHoldStatus + *out = new(string) + **out = **in + } + if in.ObjectLockMode != nil { + in, out := &in.ObjectLockMode, &out.ObjectLockMode + *out = new(string) + **out = **in + } + if in.ObjectLockRetainUntilDate != nil { + in, out := &in.ObjectLockRetainUntilDate, &out.ObjectLockRetainUntilDate + *out = new(string) + **out = **in + } + if in.OverrideProvider != nil { + in, out := &in.OverrideProvider, &out.OverrideProvider + *out = new(OverrideProviderInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServerSideEncryption != nil { + in, out := &in.ServerSideEncryption, &out.ServerSideEncryption + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.SourceHash != nil { + in, out := &in.SourceHash, &out.SourceHash + *out = new(string) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WebsiteRedirect != nil { + in, out := &in.WebsiteRedirect, &out.WebsiteRedirect + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectInitParameters. +func (in *ObjectInitParameters) DeepCopy() *ObjectInitParameters { + if in == nil { + return nil + } + out := new(ObjectInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectList) DeepCopyInto(out *ObjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Object, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectList. +func (in *ObjectList) DeepCopy() *ObjectList { + if in == nil { + return nil + } + out := new(ObjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectLockConfigurationInitParameters) DeepCopyInto(out *ObjectLockConfigurationInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectLockConfigurationInitParameters. +func (in *ObjectLockConfigurationInitParameters) DeepCopy() *ObjectLockConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ObjectLockConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectLockConfigurationObservation) DeepCopyInto(out *ObjectLockConfigurationObservation) { + *out = *in + if in.ObjectLockEnabled != nil { + in, out := &in.ObjectLockEnabled, &out.ObjectLockEnabled + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(RuleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectLockConfigurationObservation. +func (in *ObjectLockConfigurationObservation) DeepCopy() *ObjectLockConfigurationObservation { + if in == nil { + return nil + } + out := new(ObjectLockConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectLockConfigurationParameters) DeepCopyInto(out *ObjectLockConfigurationParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectLockConfigurationParameters. +func (in *ObjectLockConfigurationParameters) DeepCopy() *ObjectLockConfigurationParameters { + if in == nil { + return nil + } + out := new(ObjectLockConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectObservation) DeepCopyInto(out *ObjectObservation) { + *out = *in + if in.ACL != nil { + in, out := &in.ACL, &out.ACL + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketKeyEnabled != nil { + in, out := &in.BucketKeyEnabled, &out.BucketKeyEnabled + *out = new(bool) + **out = **in + } + if in.CacheControl != nil { + in, out := &in.CacheControl, &out.CacheControl + *out = new(string) + **out = **in + } + if in.ChecksumAlgorithm != nil { + in, out := &in.ChecksumAlgorithm, &out.ChecksumAlgorithm + *out = new(string) + **out = **in + } + if in.ChecksumCrc32 != nil { + in, out := &in.ChecksumCrc32, &out.ChecksumCrc32 + *out = new(string) + **out = **in + } + if in.ChecksumCrc32C != nil { + in, out := &in.ChecksumCrc32C, &out.ChecksumCrc32C + *out = new(string) + **out = **in + } + if in.ChecksumSha1 != nil { + in, out := &in.ChecksumSha1, &out.ChecksumSha1 + *out = new(string) + **out = **in + } + if in.ChecksumSha256 != nil { + in, out := &in.ChecksumSha256, &out.ChecksumSha256 + *out = new(string) + **out = **in + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentBase64 != nil { + in, out := &in.ContentBase64, &out.ContentBase64 + *out = new(string) + **out = **in + } + if in.ContentDisposition != nil { + in, out := &in.ContentDisposition, &out.ContentDisposition + *out = new(string) + **out = **in + } + if in.ContentEncoding != nil { + in, out := &in.ContentEncoding, &out.ContentEncoding + *out = new(string) + **out = **in + } + if in.ContentLanguage != nil { + in, out := &in.ContentLanguage, &out.ContentLanguage + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ObjectLockLegalHoldStatus != nil { + in, out := &in.ObjectLockLegalHoldStatus, &out.ObjectLockLegalHoldStatus + *out = new(string) + **out = **in + } + if in.ObjectLockMode != nil { + in, out := &in.ObjectLockMode, &out.ObjectLockMode + *out = new(string) + **out = **in + } + if in.ObjectLockRetainUntilDate != nil { + in, out := &in.ObjectLockRetainUntilDate, &out.ObjectLockRetainUntilDate + *out = new(string) + **out = **in + } + if in.OverrideProvider != nil { + in, out := &in.OverrideProvider, &out.OverrideProvider + *out = new(OverrideProviderObservation) + (*in).DeepCopyInto(*out) + } + if in.ServerSideEncryption != nil { + in, out := &in.ServerSideEncryption, &out.ServerSideEncryption + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.SourceHash != nil { + in, out := &in.SourceHash, &out.SourceHash + *out = new(string) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } + if in.WebsiteRedirect != nil { + in, out := &in.WebsiteRedirect, &out.WebsiteRedirect + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectObservation. +func (in *ObjectObservation) DeepCopy() *ObjectObservation { + if in == nil { + return nil + } + out := new(ObjectObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectParameters) DeepCopyInto(out *ObjectParameters) { + *out = *in + if in.ACL != nil { + in, out := &in.ACL, &out.ACL + *out = new(string) + **out = **in + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketKeyEnabled != nil { + in, out := &in.BucketKeyEnabled, &out.BucketKeyEnabled + *out = new(bool) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CacheControl != nil { + in, out := &in.CacheControl, &out.CacheControl + *out = new(string) + **out = **in + } + if in.ChecksumAlgorithm != nil { + in, out := &in.ChecksumAlgorithm, &out.ChecksumAlgorithm + *out = new(string) + **out = **in + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.ContentBase64 != nil { + in, out := &in.ContentBase64, &out.ContentBase64 + *out = new(string) + **out = **in + } + if in.ContentDisposition != nil { + in, out := &in.ContentDisposition, &out.ContentDisposition + *out = new(string) + **out = **in + } + if in.ContentEncoding != nil { + in, out := &in.ContentEncoding, &out.ContentEncoding + *out = new(string) + **out = **in + } + if in.ContentLanguage != nil { + in, out := &in.ContentLanguage, &out.ContentLanguage + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ObjectLockLegalHoldStatus != nil { + in, out := &in.ObjectLockLegalHoldStatus, &out.ObjectLockLegalHoldStatus + *out = new(string) + **out = **in + } + if in.ObjectLockMode != nil { + in, out := &in.ObjectLockMode, &out.ObjectLockMode + *out = new(string) + **out = **in + } + if in.ObjectLockRetainUntilDate != nil { + in, out := &in.ObjectLockRetainUntilDate, &out.ObjectLockRetainUntilDate + *out = new(string) + **out = **in + } + if in.OverrideProvider != nil { + in, out := &in.OverrideProvider, &out.OverrideProvider + *out = new(OverrideProviderParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ServerSideEncryption != nil { + in, out := &in.ServerSideEncryption, &out.ServerSideEncryption + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } + if in.SourceHash != nil { + in, out := &in.SourceHash, &out.SourceHash + *out = new(string) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WebsiteRedirect != nil { + in, out := &in.WebsiteRedirect, &out.WebsiteRedirect + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectParameters. +func (in *ObjectParameters) DeepCopy() *ObjectParameters { + if in == nil { + return nil + } + out := new(ObjectParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectSpec) DeepCopyInto(out *ObjectSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectSpec. +func (in *ObjectSpec) DeepCopy() *ObjectSpec { + if in == nil { + return nil + } + out := new(ObjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStatus) DeepCopyInto(out *ObjectStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStatus. +func (in *ObjectStatus) DeepCopy() *ObjectStatus { + if in == nil { + return nil + } + out := new(ObjectStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideProviderInitParameters) DeepCopyInto(out *OverrideProviderInitParameters) { + *out = *in + if in.DefaultTags != nil { + in, out := &in.DefaultTags, &out.DefaultTags + *out = new(DefaultTagsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideProviderInitParameters. +func (in *OverrideProviderInitParameters) DeepCopy() *OverrideProviderInitParameters { + if in == nil { + return nil + } + out := new(OverrideProviderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideProviderObservation) DeepCopyInto(out *OverrideProviderObservation) { + *out = *in + if in.DefaultTags != nil { + in, out := &in.DefaultTags, &out.DefaultTags + *out = new(DefaultTagsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideProviderObservation. +func (in *OverrideProviderObservation) DeepCopy() *OverrideProviderObservation { + if in == nil { + return nil + } + out := new(OverrideProviderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideProviderParameters) DeepCopyInto(out *OverrideProviderParameters) { + *out = *in + if in.DefaultTags != nil { + in, out := &in.DefaultTags, &out.DefaultTags + *out = new(DefaultTagsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideProviderParameters. +func (in *OverrideProviderParameters) DeepCopy() *OverrideProviderParameters { + if in == nil { + return nil + } + out := new(OverrideProviderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OwnerInitParameters) DeepCopyInto(out *OwnerInitParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnerInitParameters. +func (in *OwnerInitParameters) DeepCopy() *OwnerInitParameters { + if in == nil { + return nil + } + out := new(OwnerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OwnerObservation) DeepCopyInto(out *OwnerObservation) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnerObservation. +func (in *OwnerObservation) DeepCopy() *OwnerObservation { + if in == nil { + return nil + } + out := new(OwnerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OwnerParameters) DeepCopyInto(out *OwnerParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnerParameters. +func (in *OwnerParameters) DeepCopy() *OwnerParameters { + if in == nil { + return nil + } + out := new(OwnerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionedPrefixInitParameters) DeepCopyInto(out *PartitionedPrefixInitParameters) { + *out = *in + if in.PartitionDateSource != nil { + in, out := &in.PartitionDateSource, &out.PartitionDateSource + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionedPrefixInitParameters. +func (in *PartitionedPrefixInitParameters) DeepCopy() *PartitionedPrefixInitParameters { + if in == nil { + return nil + } + out := new(PartitionedPrefixInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionedPrefixObservation) DeepCopyInto(out *PartitionedPrefixObservation) { + *out = *in + if in.PartitionDateSource != nil { + in, out := &in.PartitionDateSource, &out.PartitionDateSource + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionedPrefixObservation. +func (in *PartitionedPrefixObservation) DeepCopy() *PartitionedPrefixObservation { + if in == nil { + return nil + } + out := new(PartitionedPrefixObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionedPrefixParameters) DeepCopyInto(out *PartitionedPrefixParameters) { + *out = *in + if in.PartitionDateSource != nil { + in, out := &in.PartitionDateSource, &out.PartitionDateSource + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionedPrefixParameters. +func (in *PartitionedPrefixParameters) DeepCopy() *PartitionedPrefixParameters { + if in == nil { + return nil + } + out := new(PartitionedPrefixParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectAllRequestsToInitParameters) DeepCopyInto(out *RedirectAllRequestsToInitParameters) { + *out = *in + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectAllRequestsToInitParameters. +func (in *RedirectAllRequestsToInitParameters) DeepCopy() *RedirectAllRequestsToInitParameters { + if in == nil { + return nil + } + out := new(RedirectAllRequestsToInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectAllRequestsToObservation) DeepCopyInto(out *RedirectAllRequestsToObservation) { + *out = *in + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectAllRequestsToObservation. +func (in *RedirectAllRequestsToObservation) DeepCopy() *RedirectAllRequestsToObservation { + if in == nil { + return nil + } + out := new(RedirectAllRequestsToObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectAllRequestsToParameters) DeepCopyInto(out *RedirectAllRequestsToParameters) { + *out = *in + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectAllRequestsToParameters. +func (in *RedirectAllRequestsToParameters) DeepCopy() *RedirectAllRequestsToParameters { + if in == nil { + return nil + } + out := new(RedirectAllRequestsToParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectInitParameters) DeepCopyInto(out *RedirectInitParameters) { + *out = *in + if in.HTTPRedirectCode != nil { + in, out := &in.HTTPRedirectCode, &out.HTTPRedirectCode + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.ReplaceKeyPrefixWith != nil { + in, out := &in.ReplaceKeyPrefixWith, &out.ReplaceKeyPrefixWith + *out = new(string) + **out = **in + } + if in.ReplaceKeyWith != nil { + in, out := &in.ReplaceKeyWith, &out.ReplaceKeyWith + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectInitParameters. +func (in *RedirectInitParameters) DeepCopy() *RedirectInitParameters { + if in == nil { + return nil + } + out := new(RedirectInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectObservation) DeepCopyInto(out *RedirectObservation) { + *out = *in + if in.HTTPRedirectCode != nil { + in, out := &in.HTTPRedirectCode, &out.HTTPRedirectCode + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.ReplaceKeyPrefixWith != nil { + in, out := &in.ReplaceKeyPrefixWith, &out.ReplaceKeyPrefixWith + *out = new(string) + **out = **in + } + if in.ReplaceKeyWith != nil { + in, out := &in.ReplaceKeyWith, &out.ReplaceKeyWith + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectObservation. +func (in *RedirectObservation) DeepCopy() *RedirectObservation { + if in == nil { + return nil + } + out := new(RedirectObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectParameters) DeepCopyInto(out *RedirectParameters) { + *out = *in + if in.HTTPRedirectCode != nil { + in, out := &in.HTTPRedirectCode, &out.HTTPRedirectCode + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.ReplaceKeyPrefixWith != nil { + in, out := &in.ReplaceKeyPrefixWith, &out.ReplaceKeyPrefixWith + *out = new(string) + **out = **in + } + if in.ReplaceKeyWith != nil { + in, out := &in.ReplaceKeyWith, &out.ReplaceKeyWith + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectParameters. +func (in *RedirectParameters) DeepCopy() *RedirectParameters { + if in == nil { + return nil + } + out := new(RedirectParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaModificationsInitParameters) DeepCopyInto(out *ReplicaModificationsInitParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaModificationsInitParameters. +func (in *ReplicaModificationsInitParameters) DeepCopy() *ReplicaModificationsInitParameters { + if in == nil { + return nil + } + out := new(ReplicaModificationsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaModificationsObservation) DeepCopyInto(out *ReplicaModificationsObservation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaModificationsObservation. +func (in *ReplicaModificationsObservation) DeepCopy() *ReplicaModificationsObservation { + if in == nil { + return nil + } + out := new(ReplicaModificationsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaModificationsParameters) DeepCopyInto(out *ReplicaModificationsParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaModificationsParameters. +func (in *ReplicaModificationsParameters) DeepCopy() *ReplicaModificationsParameters { + if in == nil { + return nil + } + out := new(ReplicaModificationsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationInitParameters) DeepCopyInto(out *ReplicationConfigurationInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationInitParameters. +func (in *ReplicationConfigurationInitParameters) DeepCopy() *ReplicationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ReplicationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationObservation) DeepCopyInto(out *ReplicationConfigurationObservation) { + *out = *in + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RulesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationObservation. +func (in *ReplicationConfigurationObservation) DeepCopy() *ReplicationConfigurationObservation { + if in == nil { + return nil + } + out := new(ReplicationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationConfigurationParameters) DeepCopyInto(out *ReplicationConfigurationParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationConfigurationParameters. +func (in *ReplicationConfigurationParameters) DeepCopy() *ReplicationConfigurationParameters { + if in == nil { + return nil + } + out := new(ReplicationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationTimeInitParameters) DeepCopyInto(out *ReplicationTimeInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationTimeInitParameters. +func (in *ReplicationTimeInitParameters) DeepCopy() *ReplicationTimeInitParameters { + if in == nil { + return nil + } + out := new(ReplicationTimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationTimeObservation) DeepCopyInto(out *ReplicationTimeObservation) { + *out = *in + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationTimeObservation. +func (in *ReplicationTimeObservation) DeepCopy() *ReplicationTimeObservation { + if in == nil { + return nil + } + out := new(ReplicationTimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationTimeParameters) DeepCopyInto(out *ReplicationTimeParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationTimeParameters. +func (in *ReplicationTimeParameters) DeepCopy() *ReplicationTimeParameters { + if in == nil { + return nil + } + out := new(ReplicationTimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingRuleInitParameters) DeepCopyInto(out *RoutingRuleInitParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(ConditionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Redirect != nil { + in, out := &in.Redirect, &out.Redirect + *out = new(RedirectInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingRuleInitParameters. +func (in *RoutingRuleInitParameters) DeepCopy() *RoutingRuleInitParameters { + if in == nil { + return nil + } + out := new(RoutingRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingRuleObservation) DeepCopyInto(out *RoutingRuleObservation) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(ConditionObservation) + (*in).DeepCopyInto(*out) + } + if in.Redirect != nil { + in, out := &in.Redirect, &out.Redirect + *out = new(RedirectObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingRuleObservation. +func (in *RoutingRuleObservation) DeepCopy() *RoutingRuleObservation { + if in == nil { + return nil + } + out := new(RoutingRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingRuleParameters) DeepCopyInto(out *RoutingRuleParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(ConditionParameters) + (*in).DeepCopyInto(*out) + } + if in.Redirect != nil { + in, out := &in.Redirect, &out.Redirect + *out = new(RedirectParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingRuleParameters. +func (in *RoutingRuleParameters) DeepCopy() *RoutingRuleParameters { + if in == nil { + return nil + } + out := new(RoutingRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleApplyServerSideEncryptionByDefaultInitParameters) DeepCopyInto(out *RuleApplyServerSideEncryptionByDefaultInitParameters) { + *out = *in + if in.KMSMasterKeyID != nil { + in, out := &in.KMSMasterKeyID, &out.KMSMasterKeyID + *out = new(string) + **out = **in + } + if in.KMSMasterKeyIDRef != nil { + in, out := &in.KMSMasterKeyIDRef, &out.KMSMasterKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSMasterKeyIDSelector != nil { + in, out := &in.KMSMasterKeyIDSelector, &out.KMSMasterKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SseAlgorithm != nil { + in, out := &in.SseAlgorithm, &out.SseAlgorithm + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleApplyServerSideEncryptionByDefaultInitParameters. +func (in *RuleApplyServerSideEncryptionByDefaultInitParameters) DeepCopy() *RuleApplyServerSideEncryptionByDefaultInitParameters { + if in == nil { + return nil + } + out := new(RuleApplyServerSideEncryptionByDefaultInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleApplyServerSideEncryptionByDefaultObservation) DeepCopyInto(out *RuleApplyServerSideEncryptionByDefaultObservation) { + *out = *in + if in.KMSMasterKeyID != nil { + in, out := &in.KMSMasterKeyID, &out.KMSMasterKeyID + *out = new(string) + **out = **in + } + if in.SseAlgorithm != nil { + in, out := &in.SseAlgorithm, &out.SseAlgorithm + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleApplyServerSideEncryptionByDefaultObservation. +func (in *RuleApplyServerSideEncryptionByDefaultObservation) DeepCopy() *RuleApplyServerSideEncryptionByDefaultObservation { + if in == nil { + return nil + } + out := new(RuleApplyServerSideEncryptionByDefaultObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleApplyServerSideEncryptionByDefaultParameters) DeepCopyInto(out *RuleApplyServerSideEncryptionByDefaultParameters) { + *out = *in + if in.KMSMasterKeyID != nil { + in, out := &in.KMSMasterKeyID, &out.KMSMasterKeyID + *out = new(string) + **out = **in + } + if in.KMSMasterKeyIDRef != nil { + in, out := &in.KMSMasterKeyIDRef, &out.KMSMasterKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSMasterKeyIDSelector != nil { + in, out := &in.KMSMasterKeyIDSelector, &out.KMSMasterKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SseAlgorithm != nil { + in, out := &in.SseAlgorithm, &out.SseAlgorithm + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleApplyServerSideEncryptionByDefaultParameters. +func (in *RuleApplyServerSideEncryptionByDefaultParameters) DeepCopy() *RuleApplyServerSideEncryptionByDefaultParameters { + if in == nil { + return nil + } + out := new(RuleApplyServerSideEncryptionByDefaultParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleDefaultRetentionInitParameters) DeepCopyInto(out *RuleDefaultRetentionInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Years != nil { + in, out := &in.Years, &out.Years + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleDefaultRetentionInitParameters. +func (in *RuleDefaultRetentionInitParameters) DeepCopy() *RuleDefaultRetentionInitParameters { + if in == nil { + return nil + } + out := new(RuleDefaultRetentionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleDefaultRetentionObservation) DeepCopyInto(out *RuleDefaultRetentionObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Years != nil { + in, out := &in.Years, &out.Years + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleDefaultRetentionObservation. +func (in *RuleDefaultRetentionObservation) DeepCopy() *RuleDefaultRetentionObservation { + if in == nil { + return nil + } + out := new(RuleDefaultRetentionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleDefaultRetentionParameters) DeepCopyInto(out *RuleDefaultRetentionParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Years != nil { + in, out := &in.Years, &out.Years + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleDefaultRetentionParameters. +func (in *RuleDefaultRetentionParameters) DeepCopy() *RuleDefaultRetentionParameters { + if in == nil { + return nil + } + out := new(RuleDefaultRetentionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleDestinationInitParameters) DeepCopyInto(out *RuleDestinationInitParameters) { + *out = *in + if in.AccessControlTranslation != nil { + in, out := &in.AccessControlTranslation, &out.AccessControlTranslation + *out = new(DestinationAccessControlTranslationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Account != nil { + in, out := &in.Account, &out.Account + *out = new(string) + **out = **in + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = new(DestinationMetricsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ReplicationTime != nil { + in, out := &in.ReplicationTime, &out.ReplicationTime + *out = new(DestinationReplicationTimeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleDestinationInitParameters. +func (in *RuleDestinationInitParameters) DeepCopy() *RuleDestinationInitParameters { + if in == nil { + return nil + } + out := new(RuleDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleDestinationObservation) DeepCopyInto(out *RuleDestinationObservation) { + *out = *in + if in.AccessControlTranslation != nil { + in, out := &in.AccessControlTranslation, &out.AccessControlTranslation + *out = new(DestinationAccessControlTranslationObservation) + (*in).DeepCopyInto(*out) + } + if in.Account != nil { + in, out := &in.Account, &out.Account + *out = new(string) + **out = **in + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = new(DestinationMetricsObservation) + (*in).DeepCopyInto(*out) + } + if in.ReplicationTime != nil { + in, out := &in.ReplicationTime, &out.ReplicationTime + *out = new(DestinationReplicationTimeObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleDestinationObservation. +func (in *RuleDestinationObservation) DeepCopy() *RuleDestinationObservation { + if in == nil { + return nil + } + out := new(RuleDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleDestinationParameters) DeepCopyInto(out *RuleDestinationParameters) { + *out = *in + if in.AccessControlTranslation != nil { + in, out := &in.AccessControlTranslation, &out.AccessControlTranslation + *out = new(DestinationAccessControlTranslationParameters) + (*in).DeepCopyInto(*out) + } + if in.Account != nil { + in, out := &in.Account, &out.Account + *out = new(string) + **out = **in + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EncryptionConfiguration != nil { + in, out := &in.EncryptionConfiguration, &out.EncryptionConfiguration + *out = new(EncryptionConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = new(DestinationMetricsParameters) + (*in).DeepCopyInto(*out) + } + if in.ReplicationTime != nil { + in, out := &in.ReplicationTime, &out.ReplicationTime + *out = new(DestinationReplicationTimeParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleDestinationParameters. +func (in *RuleDestinationParameters) DeepCopy() *RuleDestinationParameters { + if in == nil { + return nil + } + out := new(RuleDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleExpirationInitParameters) DeepCopyInto(out *RuleExpirationInitParameters) { + *out = *in + if in.Date != nil { + in, out := &in.Date, &out.Date + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.ExpiredObjectDeleteMarker != nil { + in, out := &in.ExpiredObjectDeleteMarker, &out.ExpiredObjectDeleteMarker + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleExpirationInitParameters. +func (in *RuleExpirationInitParameters) DeepCopy() *RuleExpirationInitParameters { + if in == nil { + return nil + } + out := new(RuleExpirationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleExpirationObservation) DeepCopyInto(out *RuleExpirationObservation) { + *out = *in + if in.Date != nil { + in, out := &in.Date, &out.Date + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.ExpiredObjectDeleteMarker != nil { + in, out := &in.ExpiredObjectDeleteMarker, &out.ExpiredObjectDeleteMarker + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleExpirationObservation. +func (in *RuleExpirationObservation) DeepCopy() *RuleExpirationObservation { + if in == nil { + return nil + } + out := new(RuleExpirationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleExpirationParameters) DeepCopyInto(out *RuleExpirationParameters) { + *out = *in + if in.Date != nil { + in, out := &in.Date, &out.Date + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.ExpiredObjectDeleteMarker != nil { + in, out := &in.ExpiredObjectDeleteMarker, &out.ExpiredObjectDeleteMarker + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleExpirationParameters. +func (in *RuleExpirationParameters) DeepCopy() *RuleExpirationParameters { + if in == nil { + return nil + } + out := new(RuleExpirationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleFilterInitParameters) DeepCopyInto(out *RuleFilterInitParameters) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = new(AndInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ObjectSizeGreaterThan != nil { + in, out := &in.ObjectSizeGreaterThan, &out.ObjectSizeGreaterThan + *out = new(string) + **out = **in + } + if in.ObjectSizeLessThan != nil { + in, out := &in.ObjectSizeLessThan, &out.ObjectSizeLessThan + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(TagInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleFilterInitParameters. +func (in *RuleFilterInitParameters) DeepCopy() *RuleFilterInitParameters { + if in == nil { + return nil + } + out := new(RuleFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleFilterObservation) DeepCopyInto(out *RuleFilterObservation) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = new(AndObservation) + (*in).DeepCopyInto(*out) + } + if in.ObjectSizeGreaterThan != nil { + in, out := &in.ObjectSizeGreaterThan, &out.ObjectSizeGreaterThan + *out = new(string) + **out = **in + } + if in.ObjectSizeLessThan != nil { + in, out := &in.ObjectSizeLessThan, &out.ObjectSizeLessThan + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(TagObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleFilterObservation. +func (in *RuleFilterObservation) DeepCopy() *RuleFilterObservation { + if in == nil { + return nil + } + out := new(RuleFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleFilterParameters) DeepCopyInto(out *RuleFilterParameters) { + *out = *in + if in.And != nil { + in, out := &in.And, &out.And + *out = new(AndParameters) + (*in).DeepCopyInto(*out) + } + if in.ObjectSizeGreaterThan != nil { + in, out := &in.ObjectSizeGreaterThan, &out.ObjectSizeGreaterThan + *out = new(string) + **out = **in + } + if in.ObjectSizeLessThan != nil { + in, out := &in.ObjectSizeLessThan, &out.ObjectSizeLessThan + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(TagParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleFilterParameters. +func (in *RuleFilterParameters) DeepCopy() *RuleFilterParameters { + if in == nil { + return nil + } + out := new(RuleFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleInitParameters) DeepCopyInto(out *RuleInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleInitParameters. +func (in *RuleInitParameters) DeepCopy() *RuleInitParameters { + if in == nil { + return nil + } + out := new(RuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleNoncurrentVersionExpirationInitParameters) DeepCopyInto(out *RuleNoncurrentVersionExpirationInitParameters) { + *out = *in + if in.NewerNoncurrentVersions != nil { + in, out := &in.NewerNoncurrentVersions, &out.NewerNoncurrentVersions + *out = new(string) + **out = **in + } + if in.NoncurrentDays != nil { + in, out := &in.NoncurrentDays, &out.NoncurrentDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleNoncurrentVersionExpirationInitParameters. +func (in *RuleNoncurrentVersionExpirationInitParameters) DeepCopy() *RuleNoncurrentVersionExpirationInitParameters { + if in == nil { + return nil + } + out := new(RuleNoncurrentVersionExpirationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleNoncurrentVersionExpirationObservation) DeepCopyInto(out *RuleNoncurrentVersionExpirationObservation) { + *out = *in + if in.NewerNoncurrentVersions != nil { + in, out := &in.NewerNoncurrentVersions, &out.NewerNoncurrentVersions + *out = new(string) + **out = **in + } + if in.NoncurrentDays != nil { + in, out := &in.NoncurrentDays, &out.NoncurrentDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleNoncurrentVersionExpirationObservation. +func (in *RuleNoncurrentVersionExpirationObservation) DeepCopy() *RuleNoncurrentVersionExpirationObservation { + if in == nil { + return nil + } + out := new(RuleNoncurrentVersionExpirationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleNoncurrentVersionExpirationParameters) DeepCopyInto(out *RuleNoncurrentVersionExpirationParameters) { + *out = *in + if in.NewerNoncurrentVersions != nil { + in, out := &in.NewerNoncurrentVersions, &out.NewerNoncurrentVersions + *out = new(string) + **out = **in + } + if in.NoncurrentDays != nil { + in, out := &in.NoncurrentDays, &out.NoncurrentDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleNoncurrentVersionExpirationParameters. +func (in *RuleNoncurrentVersionExpirationParameters) DeepCopy() *RuleNoncurrentVersionExpirationParameters { + if in == nil { + return nil + } + out := new(RuleNoncurrentVersionExpirationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleNoncurrentVersionTransitionInitParameters) DeepCopyInto(out *RuleNoncurrentVersionTransitionInitParameters) { + *out = *in + if in.NewerNoncurrentVersions != nil { + in, out := &in.NewerNoncurrentVersions, &out.NewerNoncurrentVersions + *out = new(string) + **out = **in + } + if in.NoncurrentDays != nil { + in, out := &in.NoncurrentDays, &out.NoncurrentDays + *out = new(float64) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleNoncurrentVersionTransitionInitParameters. +func (in *RuleNoncurrentVersionTransitionInitParameters) DeepCopy() *RuleNoncurrentVersionTransitionInitParameters { + if in == nil { + return nil + } + out := new(RuleNoncurrentVersionTransitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleNoncurrentVersionTransitionObservation) DeepCopyInto(out *RuleNoncurrentVersionTransitionObservation) { + *out = *in + if in.NewerNoncurrentVersions != nil { + in, out := &in.NewerNoncurrentVersions, &out.NewerNoncurrentVersions + *out = new(string) + **out = **in + } + if in.NoncurrentDays != nil { + in, out := &in.NoncurrentDays, &out.NoncurrentDays + *out = new(float64) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleNoncurrentVersionTransitionObservation. +func (in *RuleNoncurrentVersionTransitionObservation) DeepCopy() *RuleNoncurrentVersionTransitionObservation { + if in == nil { + return nil + } + out := new(RuleNoncurrentVersionTransitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleNoncurrentVersionTransitionParameters) DeepCopyInto(out *RuleNoncurrentVersionTransitionParameters) { + *out = *in + if in.NewerNoncurrentVersions != nil { + in, out := &in.NewerNoncurrentVersions, &out.NewerNoncurrentVersions + *out = new(string) + **out = **in + } + if in.NoncurrentDays != nil { + in, out := &in.NoncurrentDays, &out.NoncurrentDays + *out = new(float64) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleNoncurrentVersionTransitionParameters. +func (in *RuleNoncurrentVersionTransitionParameters) DeepCopy() *RuleNoncurrentVersionTransitionParameters { + if in == nil { + return nil + } + out := new(RuleNoncurrentVersionTransitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleObservation) DeepCopyInto(out *RuleObservation) { + *out = *in + if in.DefaultRetention != nil { + in, out := &in.DefaultRetention, &out.DefaultRetention + *out = new(DefaultRetentionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleObservation. +func (in *RuleObservation) DeepCopy() *RuleObservation { + if in == nil { + return nil + } + out := new(RuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleParameters) DeepCopyInto(out *RuleParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleParameters. +func (in *RuleParameters) DeepCopy() *RuleParameters { + if in == nil { + return nil + } + out := new(RuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleSourceSelectionCriteriaInitParameters) DeepCopyInto(out *RuleSourceSelectionCriteriaInitParameters) { + *out = *in + if in.ReplicaModifications != nil { + in, out := &in.ReplicaModifications, &out.ReplicaModifications + *out = new(ReplicaModificationsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SseKMSEncryptedObjects != nil { + in, out := &in.SseKMSEncryptedObjects, &out.SseKMSEncryptedObjects + *out = new(SourceSelectionCriteriaSseKMSEncryptedObjectsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleSourceSelectionCriteriaInitParameters. +func (in *RuleSourceSelectionCriteriaInitParameters) DeepCopy() *RuleSourceSelectionCriteriaInitParameters { + if in == nil { + return nil + } + out := new(RuleSourceSelectionCriteriaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleSourceSelectionCriteriaObservation) DeepCopyInto(out *RuleSourceSelectionCriteriaObservation) { + *out = *in + if in.ReplicaModifications != nil { + in, out := &in.ReplicaModifications, &out.ReplicaModifications + *out = new(ReplicaModificationsObservation) + (*in).DeepCopyInto(*out) + } + if in.SseKMSEncryptedObjects != nil { + in, out := &in.SseKMSEncryptedObjects, &out.SseKMSEncryptedObjects + *out = new(SourceSelectionCriteriaSseKMSEncryptedObjectsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleSourceSelectionCriteriaObservation. +func (in *RuleSourceSelectionCriteriaObservation) DeepCopy() *RuleSourceSelectionCriteriaObservation { + if in == nil { + return nil + } + out := new(RuleSourceSelectionCriteriaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleSourceSelectionCriteriaParameters) DeepCopyInto(out *RuleSourceSelectionCriteriaParameters) { + *out = *in + if in.ReplicaModifications != nil { + in, out := &in.ReplicaModifications, &out.ReplicaModifications + *out = new(ReplicaModificationsParameters) + (*in).DeepCopyInto(*out) + } + if in.SseKMSEncryptedObjects != nil { + in, out := &in.SseKMSEncryptedObjects, &out.SseKMSEncryptedObjects + *out = new(SourceSelectionCriteriaSseKMSEncryptedObjectsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleSourceSelectionCriteriaParameters. +func (in *RuleSourceSelectionCriteriaParameters) DeepCopy() *RuleSourceSelectionCriteriaParameters { + if in == nil { + return nil + } + out := new(RuleSourceSelectionCriteriaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleTransitionInitParameters) DeepCopyInto(out *RuleTransitionInitParameters) { + *out = *in + if in.Date != nil { + in, out := &in.Date, &out.Date + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleTransitionInitParameters. +func (in *RuleTransitionInitParameters) DeepCopy() *RuleTransitionInitParameters { + if in == nil { + return nil + } + out := new(RuleTransitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleTransitionObservation) DeepCopyInto(out *RuleTransitionObservation) { + *out = *in + if in.Date != nil { + in, out := &in.Date, &out.Date + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleTransitionObservation. +func (in *RuleTransitionObservation) DeepCopy() *RuleTransitionObservation { + if in == nil { + return nil + } + out := new(RuleTransitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleTransitionParameters) DeepCopyInto(out *RuleTransitionParameters) { + *out = *in + if in.Date != nil { + in, out := &in.Date, &out.Date + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleTransitionParameters. +func (in *RuleTransitionParameters) DeepCopy() *RuleTransitionParameters { + if in == nil { + return nil + } + out := new(RuleTransitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesInitParameters) DeepCopyInto(out *RulesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesInitParameters. +func (in *RulesInitParameters) DeepCopy() *RulesInitParameters { + if in == nil { + return nil + } + out := new(RulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesObservation) DeepCopyInto(out *RulesObservation) { + *out = *in + if in.DeleteMarkerReplicationStatus != nil { + in, out := &in.DeleteMarkerReplicationStatus, &out.DeleteMarkerReplicationStatus + *out = new(string) + **out = **in + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(DestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(FilterObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.SourceSelectionCriteria != nil { + in, out := &in.SourceSelectionCriteria, &out.SourceSelectionCriteria + *out = new(SourceSelectionCriteriaObservation) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesObservation. +func (in *RulesObservation) DeepCopy() *RulesObservation { + if in == nil { + return nil + } + out := new(RulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesParameters) DeepCopyInto(out *RulesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesParameters. +func (in *RulesParameters) DeepCopy() *RulesParameters { + if in == nil { + return nil + } + out := new(RulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3BucketDestinationInitParameters) DeepCopyInto(out *S3BucketDestinationInitParameters) { + *out = *in + if in.BucketAccountID != nil { + in, out := &in.BucketAccountID, &out.BucketAccountID + *out = new(string) + **out = **in + } + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BucketDestinationInitParameters. +func (in *S3BucketDestinationInitParameters) DeepCopy() *S3BucketDestinationInitParameters { + if in == nil { + return nil + } + out := new(S3BucketDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3BucketDestinationObservation) DeepCopyInto(out *S3BucketDestinationObservation) { + *out = *in + if in.BucketAccountID != nil { + in, out := &in.BucketAccountID, &out.BucketAccountID + *out = new(string) + **out = **in + } + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BucketDestinationObservation. +func (in *S3BucketDestinationObservation) DeepCopy() *S3BucketDestinationObservation { + if in == nil { + return nil + } + out := new(S3BucketDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3BucketDestinationParameters) DeepCopyInto(out *S3BucketDestinationParameters) { + *out = *in + if in.BucketAccountID != nil { + in, out := &in.BucketAccountID, &out.BucketAccountID + *out = new(string) + **out = **in + } + if in.BucketArn != nil { + in, out := &in.BucketArn, &out.BucketArn + *out = new(string) + **out = **in + } + if in.BucketArnRef != nil { + in, out := &in.BucketArnRef, &out.BucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketArnSelector != nil { + in, out := &in.BucketArnSelector, &out.BucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BucketDestinationParameters. +func (in *S3BucketDestinationParameters) DeepCopy() *S3BucketDestinationParameters { + if in == nil { + return nil + } + out := new(S3BucketDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleInitParameters) DeepCopyInto(out *ScheduleInitParameters) { + *out = *in + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleInitParameters. +func (in *ScheduleInitParameters) DeepCopy() *ScheduleInitParameters { + if in == nil { + return nil + } + out := new(ScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleObservation) DeepCopyInto(out *ScheduleObservation) { + *out = *in + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleObservation. +func (in *ScheduleObservation) DeepCopy() *ScheduleObservation { + if in == nil { + return nil + } + out := new(ScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleParameters) DeepCopyInto(out *ScheduleParameters) { + *out = *in + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleParameters. +func (in *ScheduleParameters) DeepCopy() *ScheduleParameters { + if in == nil { + return nil + } + out := new(ScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionConfigurationInitParameters) DeepCopyInto(out *ServerSideEncryptionConfigurationInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionConfigurationInitParameters. +func (in *ServerSideEncryptionConfigurationInitParameters) DeepCopy() *ServerSideEncryptionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ServerSideEncryptionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionConfigurationObservation) DeepCopyInto(out *ServerSideEncryptionConfigurationObservation) { + *out = *in + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(ServerSideEncryptionConfigurationRuleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionConfigurationObservation. +func (in *ServerSideEncryptionConfigurationObservation) DeepCopy() *ServerSideEncryptionConfigurationObservation { + if in == nil { + return nil + } + out := new(ServerSideEncryptionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionConfigurationParameters) DeepCopyInto(out *ServerSideEncryptionConfigurationParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionConfigurationParameters. +func (in *ServerSideEncryptionConfigurationParameters) DeepCopy() *ServerSideEncryptionConfigurationParameters { + if in == nil { + return nil + } + out := new(ServerSideEncryptionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionConfigurationRuleInitParameters) DeepCopyInto(out *ServerSideEncryptionConfigurationRuleInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionConfigurationRuleInitParameters. +func (in *ServerSideEncryptionConfigurationRuleInitParameters) DeepCopy() *ServerSideEncryptionConfigurationRuleInitParameters { + if in == nil { + return nil + } + out := new(ServerSideEncryptionConfigurationRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionConfigurationRuleObservation) DeepCopyInto(out *ServerSideEncryptionConfigurationRuleObservation) { + *out = *in + if in.ApplyServerSideEncryptionByDefault != nil { + in, out := &in.ApplyServerSideEncryptionByDefault, &out.ApplyServerSideEncryptionByDefault + *out = new(ApplyServerSideEncryptionByDefaultObservation) + (*in).DeepCopyInto(*out) + } + if in.BucketKeyEnabled != nil { + in, out := &in.BucketKeyEnabled, &out.BucketKeyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionConfigurationRuleObservation. +func (in *ServerSideEncryptionConfigurationRuleObservation) DeepCopy() *ServerSideEncryptionConfigurationRuleObservation { + if in == nil { + return nil + } + out := new(ServerSideEncryptionConfigurationRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSideEncryptionConfigurationRuleParameters) DeepCopyInto(out *ServerSideEncryptionConfigurationRuleParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideEncryptionConfigurationRuleParameters. +func (in *ServerSideEncryptionConfigurationRuleParameters) DeepCopy() *ServerSideEncryptionConfigurationRuleParameters { + if in == nil { + return nil + } + out := new(ServerSideEncryptionConfigurationRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SimplePrefixInitParameters) DeepCopyInto(out *SimplePrefixInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimplePrefixInitParameters. +func (in *SimplePrefixInitParameters) DeepCopy() *SimplePrefixInitParameters { + if in == nil { + return nil + } + out := new(SimplePrefixInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SimplePrefixObservation) DeepCopyInto(out *SimplePrefixObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimplePrefixObservation. +func (in *SimplePrefixObservation) DeepCopy() *SimplePrefixObservation { + if in == nil { + return nil + } + out := new(SimplePrefixObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SimplePrefixParameters) DeepCopyInto(out *SimplePrefixParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimplePrefixParameters. +func (in *SimplePrefixParameters) DeepCopy() *SimplePrefixParameters { + if in == nil { + return nil + } + out := new(SimplePrefixParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceSelectionCriteriaInitParameters) DeepCopyInto(out *SourceSelectionCriteriaInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceSelectionCriteriaInitParameters. +func (in *SourceSelectionCriteriaInitParameters) DeepCopy() *SourceSelectionCriteriaInitParameters { + if in == nil { + return nil + } + out := new(SourceSelectionCriteriaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceSelectionCriteriaObservation) DeepCopyInto(out *SourceSelectionCriteriaObservation) { + *out = *in + if in.SseKMSEncryptedObjects != nil { + in, out := &in.SseKMSEncryptedObjects, &out.SseKMSEncryptedObjects + *out = new(SseKMSEncryptedObjectsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceSelectionCriteriaObservation. +func (in *SourceSelectionCriteriaObservation) DeepCopy() *SourceSelectionCriteriaObservation { + if in == nil { + return nil + } + out := new(SourceSelectionCriteriaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceSelectionCriteriaParameters) DeepCopyInto(out *SourceSelectionCriteriaParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceSelectionCriteriaParameters. +func (in *SourceSelectionCriteriaParameters) DeepCopy() *SourceSelectionCriteriaParameters { + if in == nil { + return nil + } + out := new(SourceSelectionCriteriaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceSelectionCriteriaSseKMSEncryptedObjectsInitParameters) DeepCopyInto(out *SourceSelectionCriteriaSseKMSEncryptedObjectsInitParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceSelectionCriteriaSseKMSEncryptedObjectsInitParameters. +func (in *SourceSelectionCriteriaSseKMSEncryptedObjectsInitParameters) DeepCopy() *SourceSelectionCriteriaSseKMSEncryptedObjectsInitParameters { + if in == nil { + return nil + } + out := new(SourceSelectionCriteriaSseKMSEncryptedObjectsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceSelectionCriteriaSseKMSEncryptedObjectsObservation) DeepCopyInto(out *SourceSelectionCriteriaSseKMSEncryptedObjectsObservation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceSelectionCriteriaSseKMSEncryptedObjectsObservation. +func (in *SourceSelectionCriteriaSseKMSEncryptedObjectsObservation) DeepCopy() *SourceSelectionCriteriaSseKMSEncryptedObjectsObservation { + if in == nil { + return nil + } + out := new(SourceSelectionCriteriaSseKMSEncryptedObjectsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceSelectionCriteriaSseKMSEncryptedObjectsParameters) DeepCopyInto(out *SourceSelectionCriteriaSseKMSEncryptedObjectsParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceSelectionCriteriaSseKMSEncryptedObjectsParameters. +func (in *SourceSelectionCriteriaSseKMSEncryptedObjectsParameters) DeepCopy() *SourceSelectionCriteriaSseKMSEncryptedObjectsParameters { + if in == nil { + return nil + } + out := new(SourceSelectionCriteriaSseKMSEncryptedObjectsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SseKMSEncryptedObjectsInitParameters) DeepCopyInto(out *SseKMSEncryptedObjectsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SseKMSEncryptedObjectsInitParameters. +func (in *SseKMSEncryptedObjectsInitParameters) DeepCopy() *SseKMSEncryptedObjectsInitParameters { + if in == nil { + return nil + } + out := new(SseKMSEncryptedObjectsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SseKMSEncryptedObjectsObservation) DeepCopyInto(out *SseKMSEncryptedObjectsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SseKMSEncryptedObjectsObservation. +func (in *SseKMSEncryptedObjectsObservation) DeepCopy() *SseKMSEncryptedObjectsObservation { + if in == nil { + return nil + } + out := new(SseKMSEncryptedObjectsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SseKMSEncryptedObjectsParameters) DeepCopyInto(out *SseKMSEncryptedObjectsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SseKMSEncryptedObjectsParameters. +func (in *SseKMSEncryptedObjectsParameters) DeepCopy() *SseKMSEncryptedObjectsParameters { + if in == nil { + return nil + } + out := new(SseKMSEncryptedObjectsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SseKMSInitParameters) DeepCopyInto(out *SseKMSInitParameters) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SseKMSInitParameters. +func (in *SseKMSInitParameters) DeepCopy() *SseKMSInitParameters { + if in == nil { + return nil + } + out := new(SseKMSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SseKMSObservation) DeepCopyInto(out *SseKMSObservation) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SseKMSObservation. +func (in *SseKMSObservation) DeepCopy() *SseKMSObservation { + if in == nil { + return nil + } + out := new(SseKMSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SseKMSParameters) DeepCopyInto(out *SseKMSParameters) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SseKMSParameters. +func (in *SseKMSParameters) DeepCopy() *SseKMSParameters { + if in == nil { + return nil + } + out := new(SseKMSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SseS3InitParameters) DeepCopyInto(out *SseS3InitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SseS3InitParameters. +func (in *SseS3InitParameters) DeepCopy() *SseS3InitParameters { + if in == nil { + return nil + } + out := new(SseS3InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SseS3Observation) DeepCopyInto(out *SseS3Observation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SseS3Observation. +func (in *SseS3Observation) DeepCopy() *SseS3Observation { + if in == nil { + return nil + } + out := new(SseS3Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SseS3Parameters) DeepCopyInto(out *SseS3Parameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SseS3Parameters. +func (in *SseS3Parameters) DeepCopy() *SseS3Parameters { + if in == nil { + return nil + } + out := new(SseS3Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClassAnalysisInitParameters) DeepCopyInto(out *StorageClassAnalysisInitParameters) { + *out = *in + if in.DataExport != nil { + in, out := &in.DataExport, &out.DataExport + *out = new(DataExportInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassAnalysisInitParameters. +func (in *StorageClassAnalysisInitParameters) DeepCopy() *StorageClassAnalysisInitParameters { + if in == nil { + return nil + } + out := new(StorageClassAnalysisInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClassAnalysisObservation) DeepCopyInto(out *StorageClassAnalysisObservation) { + *out = *in + if in.DataExport != nil { + in, out := &in.DataExport, &out.DataExport + *out = new(DataExportObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassAnalysisObservation. +func (in *StorageClassAnalysisObservation) DeepCopy() *StorageClassAnalysisObservation { + if in == nil { + return nil + } + out := new(StorageClassAnalysisObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClassAnalysisParameters) DeepCopyInto(out *StorageClassAnalysisParameters) { + *out = *in + if in.DataExport != nil { + in, out := &in.DataExport, &out.DataExport + *out = new(DataExportParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassAnalysisParameters. +func (in *StorageClassAnalysisParameters) DeepCopy() *StorageClassAnalysisParameters { + if in == nil { + return nil + } + out := new(StorageClassAnalysisParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagInitParameters) DeepCopyInto(out *TagInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagInitParameters. +func (in *TagInitParameters) DeepCopy() *TagInitParameters { + if in == nil { + return nil + } + out := new(TagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagObservation) DeepCopyInto(out *TagObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagObservation. +func (in *TagObservation) DeepCopy() *TagObservation { + if in == nil { + return nil + } + out := new(TagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagParameters) DeepCopyInto(out *TagParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagParameters. +func (in *TagParameters) DeepCopy() *TagParameters { + if in == nil { + return nil + } + out := new(TagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGrantGranteeInitParameters) DeepCopyInto(out *TargetGrantGranteeInitParameters) { + *out = *in + if in.EmailAddress != nil { + in, out := &in.EmailAddress, &out.EmailAddress + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGrantGranteeInitParameters. +func (in *TargetGrantGranteeInitParameters) DeepCopy() *TargetGrantGranteeInitParameters { + if in == nil { + return nil + } + out := new(TargetGrantGranteeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGrantGranteeObservation) DeepCopyInto(out *TargetGrantGranteeObservation) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.EmailAddress != nil { + in, out := &in.EmailAddress, &out.EmailAddress + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGrantGranteeObservation. +func (in *TargetGrantGranteeObservation) DeepCopy() *TargetGrantGranteeObservation { + if in == nil { + return nil + } + out := new(TargetGrantGranteeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGrantGranteeParameters) DeepCopyInto(out *TargetGrantGranteeParameters) { + *out = *in + if in.EmailAddress != nil { + in, out := &in.EmailAddress, &out.EmailAddress + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGrantGranteeParameters. +func (in *TargetGrantGranteeParameters) DeepCopy() *TargetGrantGranteeParameters { + if in == nil { + return nil + } + out := new(TargetGrantGranteeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGrantInitParameters) DeepCopyInto(out *TargetGrantInitParameters) { + *out = *in + if in.Grantee != nil { + in, out := &in.Grantee, &out.Grantee + *out = new(TargetGrantGranteeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGrantInitParameters. +func (in *TargetGrantInitParameters) DeepCopy() *TargetGrantInitParameters { + if in == nil { + return nil + } + out := new(TargetGrantInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGrantObservation) DeepCopyInto(out *TargetGrantObservation) { + *out = *in + if in.Grantee != nil { + in, out := &in.Grantee, &out.Grantee + *out = new(TargetGrantGranteeObservation) + (*in).DeepCopyInto(*out) + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGrantObservation. +func (in *TargetGrantObservation) DeepCopy() *TargetGrantObservation { + if in == nil { + return nil + } + out := new(TargetGrantObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGrantParameters) DeepCopyInto(out *TargetGrantParameters) { + *out = *in + if in.Grantee != nil { + in, out := &in.Grantee, &out.Grantee + *out = new(TargetGrantGranteeParameters) + (*in).DeepCopyInto(*out) + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGrantParameters. +func (in *TargetGrantParameters) DeepCopy() *TargetGrantParameters { + if in == nil { + return nil + } + out := new(TargetGrantParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetObjectKeyFormatInitParameters) DeepCopyInto(out *TargetObjectKeyFormatInitParameters) { + *out = *in + if in.PartitionedPrefix != nil { + in, out := &in.PartitionedPrefix, &out.PartitionedPrefix + *out = new(PartitionedPrefixInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SimplePrefix != nil { + in, out := &in.SimplePrefix, &out.SimplePrefix + *out = new(SimplePrefixInitParameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetObjectKeyFormatInitParameters. +func (in *TargetObjectKeyFormatInitParameters) DeepCopy() *TargetObjectKeyFormatInitParameters { + if in == nil { + return nil + } + out := new(TargetObjectKeyFormatInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetObjectKeyFormatObservation) DeepCopyInto(out *TargetObjectKeyFormatObservation) { + *out = *in + if in.PartitionedPrefix != nil { + in, out := &in.PartitionedPrefix, &out.PartitionedPrefix + *out = new(PartitionedPrefixObservation) + (*in).DeepCopyInto(*out) + } + if in.SimplePrefix != nil { + in, out := &in.SimplePrefix, &out.SimplePrefix + *out = new(SimplePrefixParameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetObjectKeyFormatObservation. +func (in *TargetObjectKeyFormatObservation) DeepCopy() *TargetObjectKeyFormatObservation { + if in == nil { + return nil + } + out := new(TargetObjectKeyFormatObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetObjectKeyFormatParameters) DeepCopyInto(out *TargetObjectKeyFormatParameters) { + *out = *in + if in.PartitionedPrefix != nil { + in, out := &in.PartitionedPrefix, &out.PartitionedPrefix + *out = new(PartitionedPrefixParameters) + (*in).DeepCopyInto(*out) + } + if in.SimplePrefix != nil { + in, out := &in.SimplePrefix, &out.SimplePrefix + *out = new(SimplePrefixParameters) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetObjectKeyFormatParameters. +func (in *TargetObjectKeyFormatParameters) DeepCopy() *TargetObjectKeyFormatParameters { + if in == nil { + return nil + } + out := new(TargetObjectKeyFormatParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TieringInitParameters) DeepCopyInto(out *TieringInitParameters) { + *out = *in + if in.AccessTier != nil { + in, out := &in.AccessTier, &out.AccessTier + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TieringInitParameters. +func (in *TieringInitParameters) DeepCopy() *TieringInitParameters { + if in == nil { + return nil + } + out := new(TieringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TieringObservation) DeepCopyInto(out *TieringObservation) { + *out = *in + if in.AccessTier != nil { + in, out := &in.AccessTier, &out.AccessTier + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TieringObservation. +func (in *TieringObservation) DeepCopy() *TieringObservation { + if in == nil { + return nil + } + out := new(TieringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TieringParameters) DeepCopyInto(out *TieringParameters) { + *out = *in + if in.AccessTier != nil { + in, out := &in.AccessTier, &out.AccessTier + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TieringParameters. +func (in *TieringParameters) DeepCopy() *TieringParameters { + if in == nil { + return nil + } + out := new(TieringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeInitParameters) DeepCopyInto(out *TimeInitParameters) { + *out = *in + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeInitParameters. +func (in *TimeInitParameters) DeepCopy() *TimeInitParameters { + if in == nil { + return nil + } + out := new(TimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeObservation) DeepCopyInto(out *TimeObservation) { + *out = *in + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeObservation. +func (in *TimeObservation) DeepCopy() *TimeObservation { + if in == nil { + return nil + } + out := new(TimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeParameters) DeepCopyInto(out *TimeParameters) { + *out = *in + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeParameters. +func (in *TimeParameters) DeepCopy() *TimeParameters { + if in == nil { + return nil + } + out := new(TimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransitionInitParameters) DeepCopyInto(out *TransitionInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransitionInitParameters. +func (in *TransitionInitParameters) DeepCopy() *TransitionInitParameters { + if in == nil { + return nil + } + out := new(TransitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransitionObservation) DeepCopyInto(out *TransitionObservation) { + *out = *in + if in.Date != nil { + in, out := &in.Date, &out.Date + *out = new(string) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransitionObservation. +func (in *TransitionObservation) DeepCopy() *TransitionObservation { + if in == nil { + return nil + } + out := new(TransitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransitionParameters) DeepCopyInto(out *TransitionParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransitionParameters. +func (in *TransitionParameters) DeepCopy() *TransitionParameters { + if in == nil { + return nil + } + out := new(TransitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersioningConfigurationInitParameters) DeepCopyInto(out *VersioningConfigurationInitParameters) { + *out = *in + if in.MfaDelete != nil { + in, out := &in.MfaDelete, &out.MfaDelete + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersioningConfigurationInitParameters. +func (in *VersioningConfigurationInitParameters) DeepCopy() *VersioningConfigurationInitParameters { + if in == nil { + return nil + } + out := new(VersioningConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersioningConfigurationObservation) DeepCopyInto(out *VersioningConfigurationObservation) { + *out = *in + if in.MfaDelete != nil { + in, out := &in.MfaDelete, &out.MfaDelete + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersioningConfigurationObservation. +func (in *VersioningConfigurationObservation) DeepCopy() *VersioningConfigurationObservation { + if in == nil { + return nil + } + out := new(VersioningConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersioningConfigurationParameters) DeepCopyInto(out *VersioningConfigurationParameters) { + *out = *in + if in.MfaDelete != nil { + in, out := &in.MfaDelete, &out.MfaDelete + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersioningConfigurationParameters. +func (in *VersioningConfigurationParameters) DeepCopy() *VersioningConfigurationParameters { + if in == nil { + return nil + } + out := new(VersioningConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersioningInitParameters) DeepCopyInto(out *VersioningInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersioningInitParameters. +func (in *VersioningInitParameters) DeepCopy() *VersioningInitParameters { + if in == nil { + return nil + } + out := new(VersioningInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersioningObservation) DeepCopyInto(out *VersioningObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.MfaDelete != nil { + in, out := &in.MfaDelete, &out.MfaDelete + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersioningObservation. +func (in *VersioningObservation) DeepCopy() *VersioningObservation { + if in == nil { + return nil + } + out := new(VersioningObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersioningParameters) DeepCopyInto(out *VersioningParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersioningParameters. +func (in *VersioningParameters) DeepCopy() *VersioningParameters { + if in == nil { + return nil + } + out := new(VersioningParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebsiteInitParameters) DeepCopyInto(out *WebsiteInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebsiteInitParameters. +func (in *WebsiteInitParameters) DeepCopy() *WebsiteInitParameters { + if in == nil { + return nil + } + out := new(WebsiteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebsiteObservation) DeepCopyInto(out *WebsiteObservation) { + *out = *in + if in.ErrorDocument != nil { + in, out := &in.ErrorDocument, &out.ErrorDocument + *out = new(string) + **out = **in + } + if in.IndexDocument != nil { + in, out := &in.IndexDocument, &out.IndexDocument + *out = new(string) + **out = **in + } + if in.RedirectAllRequestsTo != nil { + in, out := &in.RedirectAllRequestsTo, &out.RedirectAllRequestsTo + *out = new(string) + **out = **in + } + if in.RoutingRules != nil { + in, out := &in.RoutingRules, &out.RoutingRules + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebsiteObservation. +func (in *WebsiteObservation) DeepCopy() *WebsiteObservation { + if in == nil { + return nil + } + out := new(WebsiteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebsiteParameters) DeepCopyInto(out *WebsiteParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebsiteParameters. +func (in *WebsiteParameters) DeepCopy() *WebsiteParameters { + if in == nil { + return nil + } + out := new(WebsiteParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/s3/v1beta2/zz_generated.managed.go b/apis/s3/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..1309ad91e5 --- /dev/null +++ b/apis/s3/v1beta2/zz_generated.managed.go @@ -0,0 +1,908 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Bucket. +func (mg *Bucket) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Bucket. +func (mg *Bucket) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Bucket. +func (mg *Bucket) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Bucket. +func (mg *Bucket) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Bucket. +func (mg *Bucket) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Bucket. +func (mg *Bucket) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Bucket. +func (mg *Bucket) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Bucket. +func (mg *Bucket) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Bucket. +func (mg *Bucket) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Bucket. +func (mg *Bucket) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Bucket. +func (mg *Bucket) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Bucket. +func (mg *Bucket) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BucketACL. +func (mg *BucketACL) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BucketACL. +func (mg *BucketACL) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BucketACL. +func (mg *BucketACL) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BucketACL. +func (mg *BucketACL) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BucketACL. +func (mg *BucketACL) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BucketACL. +func (mg *BucketACL) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BucketACL. +func (mg *BucketACL) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BucketACL. +func (mg *BucketACL) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BucketACL. +func (mg *BucketACL) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BucketACL. +func (mg *BucketACL) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BucketACL. +func (mg *BucketACL) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BucketACL. +func (mg *BucketACL) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BucketAnalyticsConfiguration. +func (mg *BucketAnalyticsConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BucketAnalyticsConfiguration. +func (mg *BucketAnalyticsConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BucketAnalyticsConfiguration. +func (mg *BucketAnalyticsConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BucketAnalyticsConfiguration. +func (mg *BucketAnalyticsConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BucketAnalyticsConfiguration. +func (mg *BucketAnalyticsConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BucketAnalyticsConfiguration. +func (mg *BucketAnalyticsConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BucketAnalyticsConfiguration. +func (mg *BucketAnalyticsConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BucketAnalyticsConfiguration. +func (mg *BucketAnalyticsConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BucketAnalyticsConfiguration. +func (mg *BucketAnalyticsConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BucketAnalyticsConfiguration. +func (mg *BucketAnalyticsConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BucketAnalyticsConfiguration. +func (mg *BucketAnalyticsConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BucketAnalyticsConfiguration. +func (mg *BucketAnalyticsConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BucketIntelligentTieringConfiguration. +func (mg *BucketIntelligentTieringConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BucketIntelligentTieringConfiguration. +func (mg *BucketIntelligentTieringConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BucketIntelligentTieringConfiguration. +func (mg *BucketIntelligentTieringConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BucketIntelligentTieringConfiguration. +func (mg *BucketIntelligentTieringConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BucketIntelligentTieringConfiguration. +func (mg *BucketIntelligentTieringConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BucketIntelligentTieringConfiguration. +func (mg *BucketIntelligentTieringConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BucketIntelligentTieringConfiguration. +func (mg *BucketIntelligentTieringConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BucketIntelligentTieringConfiguration. +func (mg *BucketIntelligentTieringConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BucketIntelligentTieringConfiguration. +func (mg *BucketIntelligentTieringConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BucketIntelligentTieringConfiguration. +func (mg *BucketIntelligentTieringConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BucketIntelligentTieringConfiguration. +func (mg *BucketIntelligentTieringConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BucketIntelligentTieringConfiguration. +func (mg *BucketIntelligentTieringConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BucketInventory. +func (mg *BucketInventory) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BucketInventory. +func (mg *BucketInventory) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BucketInventory. +func (mg *BucketInventory) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BucketInventory. +func (mg *BucketInventory) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BucketInventory. +func (mg *BucketInventory) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BucketInventory. +func (mg *BucketInventory) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BucketInventory. +func (mg *BucketInventory) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BucketInventory. +func (mg *BucketInventory) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BucketInventory. +func (mg *BucketInventory) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BucketInventory. +func (mg *BucketInventory) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BucketInventory. +func (mg *BucketInventory) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BucketInventory. +func (mg *BucketInventory) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BucketLifecycleConfiguration. +func (mg *BucketLifecycleConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BucketLifecycleConfiguration. +func (mg *BucketLifecycleConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BucketLifecycleConfiguration. +func (mg *BucketLifecycleConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BucketLifecycleConfiguration. +func (mg *BucketLifecycleConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BucketLifecycleConfiguration. +func (mg *BucketLifecycleConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BucketLifecycleConfiguration. +func (mg *BucketLifecycleConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BucketLifecycleConfiguration. +func (mg *BucketLifecycleConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BucketLifecycleConfiguration. +func (mg *BucketLifecycleConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BucketLifecycleConfiguration. +func (mg *BucketLifecycleConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BucketLifecycleConfiguration. +func (mg *BucketLifecycleConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BucketLifecycleConfiguration. +func (mg *BucketLifecycleConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BucketLifecycleConfiguration. +func (mg *BucketLifecycleConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BucketLogging. +func (mg *BucketLogging) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BucketLogging. +func (mg *BucketLogging) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BucketLogging. +func (mg *BucketLogging) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BucketLogging. +func (mg *BucketLogging) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BucketLogging. +func (mg *BucketLogging) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BucketLogging. +func (mg *BucketLogging) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BucketLogging. +func (mg *BucketLogging) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BucketLogging. +func (mg *BucketLogging) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BucketLogging. +func (mg *BucketLogging) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BucketLogging. +func (mg *BucketLogging) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BucketLogging. +func (mg *BucketLogging) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BucketLogging. +func (mg *BucketLogging) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BucketMetric. +func (mg *BucketMetric) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BucketMetric. +func (mg *BucketMetric) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BucketMetric. +func (mg *BucketMetric) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BucketMetric. +func (mg *BucketMetric) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BucketMetric. +func (mg *BucketMetric) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BucketMetric. +func (mg *BucketMetric) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BucketMetric. +func (mg *BucketMetric) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BucketMetric. +func (mg *BucketMetric) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BucketMetric. +func (mg *BucketMetric) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BucketMetric. +func (mg *BucketMetric) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BucketMetric. +func (mg *BucketMetric) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BucketMetric. +func (mg *BucketMetric) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BucketObjectLockConfiguration. +func (mg *BucketObjectLockConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BucketObjectLockConfiguration. +func (mg *BucketObjectLockConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BucketObjectLockConfiguration. +func (mg *BucketObjectLockConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BucketObjectLockConfiguration. +func (mg *BucketObjectLockConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BucketObjectLockConfiguration. +func (mg *BucketObjectLockConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BucketObjectLockConfiguration. +func (mg *BucketObjectLockConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BucketObjectLockConfiguration. +func (mg *BucketObjectLockConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BucketObjectLockConfiguration. +func (mg *BucketObjectLockConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BucketObjectLockConfiguration. +func (mg *BucketObjectLockConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BucketObjectLockConfiguration. +func (mg *BucketObjectLockConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BucketObjectLockConfiguration. +func (mg *BucketObjectLockConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BucketObjectLockConfiguration. +func (mg *BucketObjectLockConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BucketOwnershipControls. +func (mg *BucketOwnershipControls) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BucketOwnershipControls. +func (mg *BucketOwnershipControls) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BucketOwnershipControls. +func (mg *BucketOwnershipControls) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BucketOwnershipControls. +func (mg *BucketOwnershipControls) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BucketOwnershipControls. +func (mg *BucketOwnershipControls) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BucketOwnershipControls. +func (mg *BucketOwnershipControls) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BucketOwnershipControls. +func (mg *BucketOwnershipControls) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BucketOwnershipControls. +func (mg *BucketOwnershipControls) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BucketOwnershipControls. +func (mg *BucketOwnershipControls) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BucketOwnershipControls. +func (mg *BucketOwnershipControls) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BucketOwnershipControls. +func (mg *BucketOwnershipControls) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BucketOwnershipControls. +func (mg *BucketOwnershipControls) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BucketReplicationConfiguration. +func (mg *BucketReplicationConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BucketReplicationConfiguration. +func (mg *BucketReplicationConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BucketReplicationConfiguration. +func (mg *BucketReplicationConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BucketReplicationConfiguration. +func (mg *BucketReplicationConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BucketReplicationConfiguration. +func (mg *BucketReplicationConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BucketReplicationConfiguration. +func (mg *BucketReplicationConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BucketReplicationConfiguration. +func (mg *BucketReplicationConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BucketReplicationConfiguration. +func (mg *BucketReplicationConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BucketReplicationConfiguration. +func (mg *BucketReplicationConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BucketReplicationConfiguration. +func (mg *BucketReplicationConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BucketReplicationConfiguration. +func (mg *BucketReplicationConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BucketReplicationConfiguration. +func (mg *BucketReplicationConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BucketServerSideEncryptionConfiguration. +func (mg *BucketServerSideEncryptionConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BucketServerSideEncryptionConfiguration. +func (mg *BucketServerSideEncryptionConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BucketServerSideEncryptionConfiguration. +func (mg *BucketServerSideEncryptionConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BucketServerSideEncryptionConfiguration. +func (mg *BucketServerSideEncryptionConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BucketServerSideEncryptionConfiguration. +func (mg *BucketServerSideEncryptionConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BucketServerSideEncryptionConfiguration. +func (mg *BucketServerSideEncryptionConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BucketServerSideEncryptionConfiguration. +func (mg *BucketServerSideEncryptionConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BucketServerSideEncryptionConfiguration. +func (mg *BucketServerSideEncryptionConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BucketServerSideEncryptionConfiguration. +func (mg *BucketServerSideEncryptionConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BucketServerSideEncryptionConfiguration. +func (mg *BucketServerSideEncryptionConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BucketServerSideEncryptionConfiguration. +func (mg *BucketServerSideEncryptionConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BucketServerSideEncryptionConfiguration. +func (mg *BucketServerSideEncryptionConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BucketVersioning. +func (mg *BucketVersioning) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BucketVersioning. +func (mg *BucketVersioning) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BucketVersioning. +func (mg *BucketVersioning) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BucketVersioning. +func (mg *BucketVersioning) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BucketVersioning. +func (mg *BucketVersioning) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BucketVersioning. +func (mg *BucketVersioning) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BucketVersioning. +func (mg *BucketVersioning) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BucketVersioning. +func (mg *BucketVersioning) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BucketVersioning. +func (mg *BucketVersioning) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BucketVersioning. +func (mg *BucketVersioning) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BucketVersioning. +func (mg *BucketVersioning) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BucketVersioning. +func (mg *BucketVersioning) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BucketWebsiteConfiguration. +func (mg *BucketWebsiteConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BucketWebsiteConfiguration. +func (mg *BucketWebsiteConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BucketWebsiteConfiguration. +func (mg *BucketWebsiteConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BucketWebsiteConfiguration. +func (mg *BucketWebsiteConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BucketWebsiteConfiguration. +func (mg *BucketWebsiteConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BucketWebsiteConfiguration. +func (mg *BucketWebsiteConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BucketWebsiteConfiguration. +func (mg *BucketWebsiteConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BucketWebsiteConfiguration. +func (mg *BucketWebsiteConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BucketWebsiteConfiguration. +func (mg *BucketWebsiteConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BucketWebsiteConfiguration. +func (mg *BucketWebsiteConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BucketWebsiteConfiguration. +func (mg *BucketWebsiteConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BucketWebsiteConfiguration. +func (mg *BucketWebsiteConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Object. +func (mg *Object) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Object. +func (mg *Object) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Object. +func (mg *Object) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Object. +func (mg *Object) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Object. +func (mg *Object) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Object. +func (mg *Object) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Object. +func (mg *Object) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Object. +func (mg *Object) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Object. +func (mg *Object) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Object. +func (mg *Object) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Object. +func (mg *Object) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Object. +func (mg *Object) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/s3/v1beta2/zz_generated.managedlist.go b/apis/s3/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..edd1801080 --- /dev/null +++ b/apis/s3/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,143 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this BucketACLList. +func (l *BucketACLList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BucketAnalyticsConfigurationList. +func (l *BucketAnalyticsConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BucketIntelligentTieringConfigurationList. +func (l *BucketIntelligentTieringConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BucketInventoryList. +func (l *BucketInventoryList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BucketLifecycleConfigurationList. +func (l *BucketLifecycleConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BucketList. +func (l *BucketList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BucketLoggingList. +func (l *BucketLoggingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BucketMetricList. +func (l *BucketMetricList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BucketObjectLockConfigurationList. +func (l *BucketObjectLockConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BucketOwnershipControlsList. +func (l *BucketOwnershipControlsList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BucketReplicationConfigurationList. +func (l *BucketReplicationConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BucketServerSideEncryptionConfigurationList. +func (l *BucketServerSideEncryptionConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BucketVersioningList. +func (l *BucketVersioningList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BucketWebsiteConfigurationList. +func (l *BucketWebsiteConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ObjectList. +func (l *ObjectList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/s3/v1beta2/zz_generated.resolvers.go b/apis/s3/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..a2d2201566 --- /dev/null +++ b/apis/s3/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,1121 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *BucketACL) ResolveReferences( // ResolveReferences of this BucketACL. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this BucketAnalyticsConfiguration. +func (mg *BucketAnalyticsConfiguration) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.StorageClassAnalysis != nil { + if mg.Spec.ForProvider.StorageClassAnalysis.DataExport != nil { + if mg.Spec.ForProvider.StorageClassAnalysis.DataExport.Destination != nil { + if mg.Spec.ForProvider.StorageClassAnalysis.DataExport.Destination.S3BucketDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketArnRef, + Selector: mg.Spec.ForProvider.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketArn") + } + mg.Spec.ForProvider.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketArnRef = rsp.ResolvedReference + + } + } + } + } + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.StorageClassAnalysis != nil { + if mg.Spec.InitProvider.StorageClassAnalysis.DataExport != nil { + if mg.Spec.InitProvider.StorageClassAnalysis.DataExport.Destination != nil { + if mg.Spec.InitProvider.StorageClassAnalysis.DataExport.Destination.S3BucketDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketArnRef, + Selector: mg.Spec.InitProvider.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketArn") + } + mg.Spec.InitProvider.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageClassAnalysis.DataExport.Destination.S3BucketDestination.BucketArnRef = rsp.ResolvedReference + + } + } + } + } + + return nil +} + +// ResolveReferences of this BucketIntelligentTieringConfiguration. +func (mg *BucketIntelligentTieringConfiguration) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this BucketInventory. +func (mg *BucketInventory) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Destination != nil { + if mg.Spec.ForProvider.Destination.Bucket != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Destination.Bucket.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Destination.Bucket.BucketArnRef, + Selector: mg.Spec.ForProvider.Destination.Bucket.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Destination.Bucket.BucketArn") + } + mg.Spec.ForProvider.Destination.Bucket.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Destination.Bucket.BucketArnRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Destination != nil { + if mg.Spec.InitProvider.Destination.Bucket != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Destination.Bucket.BucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Destination.Bucket.BucketArnRef, + Selector: mg.Spec.InitProvider.Destination.Bucket.BucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Destination.Bucket.BucketArn") + } + mg.Spec.InitProvider.Destination.Bucket.BucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Destination.Bucket.BucketArnRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this BucketLifecycleConfiguration. +func (mg *BucketLifecycleConfiguration) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this BucketLogging. +func (mg *BucketLogging) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TargetBucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.TargetBucketRef, + Selector: mg.Spec.ForProvider.TargetBucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TargetBucket") + } + mg.Spec.ForProvider.TargetBucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TargetBucketRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TargetBucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.TargetBucketRef, + Selector: mg.Spec.InitProvider.TargetBucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TargetBucket") + } + mg.Spec.InitProvider.TargetBucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TargetBucketRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this BucketMetric. +func (mg *BucketMetric) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Filter != nil { + { + m, l, err = apisresolver.GetManagedResource("s3control.aws.upbound.io", "v1beta2", "AccessPoint", "AccessPointList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Filter.AccessPoint), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Filter.AccessPointRef, + Selector: mg.Spec.ForProvider.Filter.AccessPointSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Filter.AccessPoint") + } + mg.Spec.ForProvider.Filter.AccessPoint = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Filter.AccessPointRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Filter != nil { + { + m, l, err = apisresolver.GetManagedResource("s3control.aws.upbound.io", "v1beta2", "AccessPoint", "AccessPointList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Filter.AccessPoint), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Filter.AccessPointRef, + Selector: mg.Spec.InitProvider.Filter.AccessPointSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Filter.AccessPoint") + } + mg.Spec.InitProvider.Filter.AccessPoint = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Filter.AccessPointRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this BucketObjectLockConfiguration. +func (mg *BucketObjectLockConfiguration) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this BucketOwnershipControls. +func (mg *BucketOwnershipControls) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this BucketReplicationConfiguration. +func (mg *BucketReplicationConfiguration) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Role), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.RoleRef, + Selector: mg.Spec.ForProvider.RoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Role") + } + mg.Spec.ForProvider.Role = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Rule); i3++ { + if mg.Spec.ForProvider.Rule[i3].Destination != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Rule[i3].Destination.Bucket), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Rule[i3].Destination.BucketRef, + Selector: mg.Spec.ForProvider.Rule[i3].Destination.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Rule[i3].Destination.Bucket") + } + mg.Spec.ForProvider.Rule[i3].Destination.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Rule[i3].Destination.BucketRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Rule); i3++ { + if mg.Spec.ForProvider.Rule[i3].Destination != nil { + if mg.Spec.ForProvider.Rule[i3].Destination.EncryptionConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Rule[i3].Destination.EncryptionConfiguration.ReplicaKMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Rule[i3].Destination.EncryptionConfiguration.ReplicaKMSKeyIDRef, + Selector: mg.Spec.ForProvider.Rule[i3].Destination.EncryptionConfiguration.ReplicaKMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Rule[i3].Destination.EncryptionConfiguration.ReplicaKMSKeyID") + } + mg.Spec.ForProvider.Rule[i3].Destination.EncryptionConfiguration.ReplicaKMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Rule[i3].Destination.EncryptionConfiguration.ReplicaKMSKeyIDRef = rsp.ResolvedReference + + } + } + } + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Role), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.RoleRef, + Selector: mg.Spec.InitProvider.RoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Role") + } + mg.Spec.InitProvider.Role = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Rule); i3++ { + if mg.Spec.InitProvider.Rule[i3].Destination != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Rule[i3].Destination.Bucket), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Rule[i3].Destination.BucketRef, + Selector: mg.Spec.InitProvider.Rule[i3].Destination.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Rule[i3].Destination.Bucket") + } + mg.Spec.InitProvider.Rule[i3].Destination.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Rule[i3].Destination.BucketRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Rule); i3++ { + if mg.Spec.InitProvider.Rule[i3].Destination != nil { + if mg.Spec.InitProvider.Rule[i3].Destination.EncryptionConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Rule[i3].Destination.EncryptionConfiguration.ReplicaKMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Rule[i3].Destination.EncryptionConfiguration.ReplicaKMSKeyIDRef, + Selector: mg.Spec.InitProvider.Rule[i3].Destination.EncryptionConfiguration.ReplicaKMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Rule[i3].Destination.EncryptionConfiguration.ReplicaKMSKeyID") + } + mg.Spec.InitProvider.Rule[i3].Destination.EncryptionConfiguration.ReplicaKMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Rule[i3].Destination.EncryptionConfiguration.ReplicaKMSKeyIDRef = rsp.ResolvedReference + + } + } + } + + return nil +} + +// ResolveReferences of this BucketServerSideEncryptionConfiguration. +func (mg *BucketServerSideEncryptionConfiguration) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Rule); i3++ { + if mg.Spec.ForProvider.Rule[i3].ApplyServerSideEncryptionByDefault != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Rule[i3].ApplyServerSideEncryptionByDefault.KMSMasterKeyID), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Rule[i3].ApplyServerSideEncryptionByDefault.KMSMasterKeyIDRef, + Selector: mg.Spec.ForProvider.Rule[i3].ApplyServerSideEncryptionByDefault.KMSMasterKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Rule[i3].ApplyServerSideEncryptionByDefault.KMSMasterKeyID") + } + mg.Spec.ForProvider.Rule[i3].ApplyServerSideEncryptionByDefault.KMSMasterKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Rule[i3].ApplyServerSideEncryptionByDefault.KMSMasterKeyIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Rule); i3++ { + if mg.Spec.InitProvider.Rule[i3].ApplyServerSideEncryptionByDefault != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Rule[i3].ApplyServerSideEncryptionByDefault.KMSMasterKeyID), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Rule[i3].ApplyServerSideEncryptionByDefault.KMSMasterKeyIDRef, + Selector: mg.Spec.InitProvider.Rule[i3].ApplyServerSideEncryptionByDefault.KMSMasterKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Rule[i3].ApplyServerSideEncryptionByDefault.KMSMasterKeyID") + } + mg.Spec.InitProvider.Rule[i3].ApplyServerSideEncryptionByDefault.KMSMasterKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Rule[i3].ApplyServerSideEncryptionByDefault.KMSMasterKeyIDRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this BucketVersioning. +func (mg *BucketVersioning) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this BucketWebsiteConfiguration. +func (mg *BucketWebsiteConfiguration) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Object. +func (mg *Object) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyID") + } + mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyID") + } + mg.Spec.InitProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/s3/v1beta2/zz_groupversion_info.go b/apis/s3/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..3ba92451fb --- /dev/null +++ b/apis/s3/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=s3.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "s3.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/s3/v1beta2/zz_object_terraformed.go b/apis/s3/v1beta2/zz_object_terraformed.go new file mode 100755 index 0000000000..288cfaf227 --- /dev/null +++ b/apis/s3/v1beta2/zz_object_terraformed.go @@ -0,0 +1,131 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Object +func (mg *Object) GetTerraformResourceType() string { + return "aws_s3_object" +} + +// GetConnectionDetailsMapping for this Object +func (tr *Object) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Object +func (tr *Object) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Object +func (tr *Object) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Object +func (tr *Object) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Object +func (tr *Object) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Object +func (tr *Object) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Object +func (tr *Object) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Object +func (tr *Object) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Object using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Object) LateInitialize(attrs []byte) (bool, error) { + params := &ObjectParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("Etag")) + opts = append(opts, resource.WithNameFilter("KMSKeyID")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Object) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3/v1beta2/zz_object_types.go b/apis/s3/v1beta2/zz_object_types.go new file mode 100755 index 0000000000..5b23d80517 --- /dev/null +++ b/apis/s3/v1beta2/zz_object_types.go @@ -0,0 +1,458 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DefaultTagsInitParameters struct { + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type DefaultTagsObservation struct { + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type DefaultTagsParameters struct { + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ObjectInitParameters struct { + + // Canned ACL to apply. Valid values are private, public-read, public-read-write, aws-exec-read, authenticated-read, bucket-owner-read, and bucket-owner-full-control. + ACL *string `json:"acl,omitempty" tf:"acl,omitempty"` + + // Name of the bucket to put the file in. Alternatively, an S3 access point ARN can be specified. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Whether or not to use Amazon S3 Bucket Keys for SSE-KMS. + BucketKeyEnabled *bool `json:"bucketKeyEnabled,omitempty" tf:"bucket_key_enabled,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Caching behavior along the request/reply chain Read w3c cache_control for further details. + CacheControl *string `json:"cacheControl,omitempty" tf:"cache_control,omitempty"` + + // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the kms:Decrypt action. Valid values: CRC32, CRC32C, SHA1, SHA256. + ChecksumAlgorithm *string `json:"checksumAlgorithm,omitempty" tf:"checksum_algorithm,omitempty"` + + // Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the gzipbase64 function with small text strings. For larger objects, use source to stream the content from a disk file. + ContentBase64 *string `json:"contentBase64,omitempty" tf:"content_base64,omitempty"` + + // Presentational information for the object. Read w3c content_disposition for further information. + ContentDisposition *string `json:"contentDisposition,omitempty" tf:"content_disposition,omitempty"` + + // Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read w3c content encoding for further information. + ContentEncoding *string `json:"contentEncoding,omitempty" tf:"content_encoding,omitempty"` + + // Language the content is in e.g., en-US or en-GB. + ContentLanguage *string `json:"contentLanguage,omitempty" tf:"content_language,omitempty"` + + // Standard MIME type describing the format of the object data, e.g., application/octet-stream. All Valid MIME Types are valid for this input. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Triggers updates when the value changes.11.11.11 or earlier). This attribute is not compatible with KMS encryption, kms_key_id or server_side_encryption = "aws:kms", also if an object is larger than 16 MB, the AWS Management Console will upload or copy that object as a Multipart Upload, and therefore the ETag will not be an MD5 digest (see source_hash instead). + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // Whether to allow the object to be deleted by removing any legal hold on any object version. Default is false. This value should be set to true only if the bucket has S3 object lock enabled. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // ARN of the KMS Key to use for object encryption. If the S3 Bucket has server-side encryption enabled, that value will automatically be used. If referencing the aws_kms_key resource, use the arn attribute. If referencing the aws_kms_alias data source or resource, use the target_key_arn attribute. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Name of the object once it is in the bucket. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Map of keys/values to provision metadata (will be automatically prefixed by x-amz-meta-, note that only lowercase label are currently supported by the AWS Go API). + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Legal hold status that you want to apply to the specified object. Valid values are ON and OFF. + ObjectLockLegalHoldStatus *string `json:"objectLockLegalHoldStatus,omitempty" tf:"object_lock_legal_hold_status,omitempty"` + + // Object lock retention mode that you want to apply to this object. Valid values are GOVERNANCE and COMPLIANCE. + ObjectLockMode *string `json:"objectLockMode,omitempty" tf:"object_lock_mode,omitempty"` + + // Date and time, in RFC3339 format, when this object's object lock will expire. + ObjectLockRetainUntilDate *string `json:"objectLockRetainUntilDate,omitempty" tf:"object_lock_retain_until_date,omitempty"` + + // Override provider-level configuration options. See Override Provider below for more details. + OverrideProvider *OverrideProviderInitParameters `json:"overrideProvider,omitempty" tf:"override_provider,omitempty"` + + // Server-side encryption of the object in S3. Valid values are "AES256" and "aws:kms". + ServerSideEncryption *string `json:"serverSideEncryption,omitempty" tf:"server_side_encryption,omitempty"` + + // Path to a file that will be read and uploaded as raw bytes for the object content. + Source *string `json:"source,omitempty" tf:"source,omitempty"` + + // Triggers updates like etag but useful to address etag encryption limitations.11.12 or later). (The value is only stored in state and not saved by AWS.) + SourceHash *string `json:"sourceHash,omitempty" tf:"source_hash,omitempty"` + + // Storage Class for the object. Defaults to "STANDARD". + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Target URL for website redirect. + WebsiteRedirect *string `json:"websiteRedirect,omitempty" tf:"website_redirect,omitempty"` +} + +type ObjectObservation struct { + + // Canned ACL to apply. Valid values are private, public-read, public-read-write, aws-exec-read, authenticated-read, bucket-owner-read, and bucket-owner-full-control. + ACL *string `json:"acl,omitempty" tf:"acl,omitempty"` + + // ARN of the object. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Name of the bucket to put the file in. Alternatively, an S3 access point ARN can be specified. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Whether or not to use Amazon S3 Bucket Keys for SSE-KMS. + BucketKeyEnabled *bool `json:"bucketKeyEnabled,omitempty" tf:"bucket_key_enabled,omitempty"` + + // Caching behavior along the request/reply chain Read w3c cache_control for further details. + CacheControl *string `json:"cacheControl,omitempty" tf:"cache_control,omitempty"` + + // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the kms:Decrypt action. Valid values: CRC32, CRC32C, SHA1, SHA256. + ChecksumAlgorithm *string `json:"checksumAlgorithm,omitempty" tf:"checksum_algorithm,omitempty"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. + ChecksumCrc32 *string `json:"checksumCrc32,omitempty" tf:"checksum_crc32,omitempty"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. + ChecksumCrc32C *string `json:"checksumCrc32C,omitempty" tf:"checksum_crc32c,omitempty"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. + ChecksumSha1 *string `json:"checksumSha1,omitempty" tf:"checksum_sha1,omitempty"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. + ChecksumSha256 *string `json:"checksumSha256,omitempty" tf:"checksum_sha256,omitempty"` + + // Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the gzipbase64 function with small text strings. For larger objects, use source to stream the content from a disk file. + ContentBase64 *string `json:"contentBase64,omitempty" tf:"content_base64,omitempty"` + + // Presentational information for the object. Read w3c content_disposition for further information. + ContentDisposition *string `json:"contentDisposition,omitempty" tf:"content_disposition,omitempty"` + + // Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read w3c content encoding for further information. + ContentEncoding *string `json:"contentEncoding,omitempty" tf:"content_encoding,omitempty"` + + // Language the content is in e.g., en-US or en-GB. + ContentLanguage *string `json:"contentLanguage,omitempty" tf:"content_language,omitempty"` + + // Standard MIME type describing the format of the object data, e.g., application/octet-stream. All Valid MIME Types are valid for this input. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Triggers updates when the value changes.11.11.11 or earlier). This attribute is not compatible with KMS encryption, kms_key_id or server_side_encryption = "aws:kms", also if an object is larger than 16 MB, the AWS Management Console will upload or copy that object as a Multipart Upload, and therefore the ETag will not be an MD5 digest (see source_hash instead). + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // Whether to allow the object to be deleted by removing any legal hold on any object version. Default is false. This value should be set to true only if the bucket has S3 object lock enabled. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ARN of the KMS Key to use for object encryption. If the S3 Bucket has server-side encryption enabled, that value will automatically be used. If referencing the aws_kms_key resource, use the arn attribute. If referencing the aws_kms_alias data source or resource, use the target_key_arn attribute. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Name of the object once it is in the bucket. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Map of keys/values to provision metadata (will be automatically prefixed by x-amz-meta-, note that only lowercase label are currently supported by the AWS Go API). + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Legal hold status that you want to apply to the specified object. Valid values are ON and OFF. + ObjectLockLegalHoldStatus *string `json:"objectLockLegalHoldStatus,omitempty" tf:"object_lock_legal_hold_status,omitempty"` + + // Object lock retention mode that you want to apply to this object. Valid values are GOVERNANCE and COMPLIANCE. + ObjectLockMode *string `json:"objectLockMode,omitempty" tf:"object_lock_mode,omitempty"` + + // Date and time, in RFC3339 format, when this object's object lock will expire. + ObjectLockRetainUntilDate *string `json:"objectLockRetainUntilDate,omitempty" tf:"object_lock_retain_until_date,omitempty"` + + // Override provider-level configuration options. See Override Provider below for more details. + OverrideProvider *OverrideProviderObservation `json:"overrideProvider,omitempty" tf:"override_provider,omitempty"` + + // Server-side encryption of the object in S3. Valid values are "AES256" and "aws:kms". + ServerSideEncryption *string `json:"serverSideEncryption,omitempty" tf:"server_side_encryption,omitempty"` + + // Path to a file that will be read and uploaded as raw bytes for the object content. + Source *string `json:"source,omitempty" tf:"source,omitempty"` + + // Triggers updates like etag but useful to address etag encryption limitations.11.12 or later). (The value is only stored in state and not saved by AWS.) + SourceHash *string `json:"sourceHash,omitempty" tf:"source_hash,omitempty"` + + // Storage Class for the object. Defaults to "STANDARD". + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Unique version ID value for the object, if bucket versioning is enabled. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` + + // Target URL for website redirect. + WebsiteRedirect *string `json:"websiteRedirect,omitempty" tf:"website_redirect,omitempty"` +} + +type ObjectParameters struct { + + // Canned ACL to apply. Valid values are private, public-read, public-read-write, aws-exec-read, authenticated-read, bucket-owner-read, and bucket-owner-full-control. + // +kubebuilder:validation:Optional + ACL *string `json:"acl,omitempty" tf:"acl,omitempty"` + + // Name of the bucket to put the file in. Alternatively, an S3 access point ARN can be specified. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Whether or not to use Amazon S3 Bucket Keys for SSE-KMS. + // +kubebuilder:validation:Optional + BucketKeyEnabled *bool `json:"bucketKeyEnabled,omitempty" tf:"bucket_key_enabled,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Caching behavior along the request/reply chain Read w3c cache_control for further details. + // +kubebuilder:validation:Optional + CacheControl *string `json:"cacheControl,omitempty" tf:"cache_control,omitempty"` + + // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the kms:Decrypt action. Valid values: CRC32, CRC32C, SHA1, SHA256. + // +kubebuilder:validation:Optional + ChecksumAlgorithm *string `json:"checksumAlgorithm,omitempty" tf:"checksum_algorithm,omitempty"` + + // Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text. + // +kubebuilder:validation:Optional + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the gzipbase64 function with small text strings. For larger objects, use source to stream the content from a disk file. + // +kubebuilder:validation:Optional + ContentBase64 *string `json:"contentBase64,omitempty" tf:"content_base64,omitempty"` + + // Presentational information for the object. Read w3c content_disposition for further information. + // +kubebuilder:validation:Optional + ContentDisposition *string `json:"contentDisposition,omitempty" tf:"content_disposition,omitempty"` + + // Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read w3c content encoding for further information. + // +kubebuilder:validation:Optional + ContentEncoding *string `json:"contentEncoding,omitempty" tf:"content_encoding,omitempty"` + + // Language the content is in e.g., en-US or en-GB. + // +kubebuilder:validation:Optional + ContentLanguage *string `json:"contentLanguage,omitempty" tf:"content_language,omitempty"` + + // Standard MIME type describing the format of the object data, e.g., application/octet-stream. All Valid MIME Types are valid for this input. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Triggers updates when the value changes.11.11.11 or earlier). This attribute is not compatible with KMS encryption, kms_key_id or server_side_encryption = "aws:kms", also if an object is larger than 16 MB, the AWS Management Console will upload or copy that object as a Multipart Upload, and therefore the ETag will not be an MD5 digest (see source_hash instead). + // +kubebuilder:validation:Optional + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // Whether to allow the object to be deleted by removing any legal hold on any object version. Default is false. This value should be set to true only if the bucket has S3 object lock enabled. + // +kubebuilder:validation:Optional + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // ARN of the KMS Key to use for object encryption. If the S3 Bucket has server-side encryption enabled, that value will automatically be used. If referencing the aws_kms_key resource, use the arn attribute. If referencing the aws_kms_alias data source or resource, use the target_key_arn attribute. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Name of the object once it is in the bucket. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Map of keys/values to provision metadata (will be automatically prefixed by x-amz-meta-, note that only lowercase label are currently supported by the AWS Go API). + // +kubebuilder:validation:Optional + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Legal hold status that you want to apply to the specified object. Valid values are ON and OFF. + // +kubebuilder:validation:Optional + ObjectLockLegalHoldStatus *string `json:"objectLockLegalHoldStatus,omitempty" tf:"object_lock_legal_hold_status,omitempty"` + + // Object lock retention mode that you want to apply to this object. Valid values are GOVERNANCE and COMPLIANCE. + // +kubebuilder:validation:Optional + ObjectLockMode *string `json:"objectLockMode,omitempty" tf:"object_lock_mode,omitempty"` + + // Date and time, in RFC3339 format, when this object's object lock will expire. + // +kubebuilder:validation:Optional + ObjectLockRetainUntilDate *string `json:"objectLockRetainUntilDate,omitempty" tf:"object_lock_retain_until_date,omitempty"` + + // Override provider-level configuration options. See Override Provider below for more details. + // +kubebuilder:validation:Optional + OverrideProvider *OverrideProviderParameters `json:"overrideProvider,omitempty" tf:"override_provider,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Server-side encryption of the object in S3. Valid values are "AES256" and "aws:kms". + // +kubebuilder:validation:Optional + ServerSideEncryption *string `json:"serverSideEncryption,omitempty" tf:"server_side_encryption,omitempty"` + + // Path to a file that will be read and uploaded as raw bytes for the object content. + // +kubebuilder:validation:Optional + Source *string `json:"source,omitempty" tf:"source,omitempty"` + + // Triggers updates like etag but useful to address etag encryption limitations.11.12 or later). (The value is only stored in state and not saved by AWS.) + // +kubebuilder:validation:Optional + SourceHash *string `json:"sourceHash,omitempty" tf:"source_hash,omitempty"` + + // Storage Class for the object. Defaults to "STANDARD". + // +kubebuilder:validation:Optional + StorageClass *string `json:"storageClass,omitempty" tf:"storage_class,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Target URL for website redirect. + // +kubebuilder:validation:Optional + WebsiteRedirect *string `json:"websiteRedirect,omitempty" tf:"website_redirect,omitempty"` +} + +type OverrideProviderInitParameters struct { + + // Override the provider default_tags configuration block. + DefaultTags *DefaultTagsInitParameters `json:"defaultTags,omitempty" tf:"default_tags,omitempty"` +} + +type OverrideProviderObservation struct { + + // Override the provider default_tags configuration block. + DefaultTags *DefaultTagsObservation `json:"defaultTags,omitempty" tf:"default_tags,omitempty"` +} + +type OverrideProviderParameters struct { + + // Override the provider default_tags configuration block. + // +kubebuilder:validation:Optional + DefaultTags *DefaultTagsParameters `json:"defaultTags,omitempty" tf:"default_tags,omitempty"` +} + +// ObjectSpec defines the desired state of Object +type ObjectSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ObjectParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ObjectInitParameters `json:"initProvider,omitempty"` +} + +// ObjectStatus defines the observed state of Object. +type ObjectStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ObjectObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Object is the Schema for the Objects API. Provides an S3 object resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Object struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.key) || (has(self.initProvider) && has(self.initProvider.key))",message="spec.forProvider.key is a required parameter" + Spec ObjectSpec `json:"spec"` + Status ObjectStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ObjectList contains a list of Objects +type ObjectList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Object `json:"items"` +} + +// Repository type metadata. +var ( + Object_Kind = "Object" + Object_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Object_Kind}.String() + Object_KindAPIVersion = Object_Kind + "." + CRDGroupVersion.String() + Object_GroupVersionKind = CRDGroupVersion.WithKind(Object_Kind) +) + +func init() { + SchemeBuilder.Register(&Object{}, &ObjectList{}) +} diff --git a/apis/s3control/v1beta1/zz_accesspointpolicy_types.go b/apis/s3control/v1beta1/zz_accesspointpolicy_types.go index 657becc65d..888d043366 100755 --- a/apis/s3control/v1beta1/zz_accesspointpolicy_types.go +++ b/apis/s3control/v1beta1/zz_accesspointpolicy_types.go @@ -16,7 +16,7 @@ import ( type AccessPointPolicyInitParameters struct { // The ARN of the access point that you want to associate with the specified policy. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3control/v1beta1.AccessPoint + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3control/v1beta2.AccessPoint // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) AccessPointArn *string `json:"accessPointArn,omitempty" tf:"access_point_arn,omitempty"` @@ -50,7 +50,7 @@ type AccessPointPolicyObservation struct { type AccessPointPolicyParameters struct { // The ARN of the access point that you want to associate with the specified policy. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3control/v1beta1.AccessPoint + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3control/v1beta2.AccessPoint // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional AccessPointArn *string `json:"accessPointArn,omitempty" tf:"access_point_arn,omitempty"` diff --git a/apis/s3control/v1beta1/zz_generated.conversion_hubs.go b/apis/s3control/v1beta1/zz_generated.conversion_hubs.go index 06d964b69d..f65a9f8bbe 100755 --- a/apis/s3control/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/s3control/v1beta1/zz_generated.conversion_hubs.go @@ -6,26 +6,11 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *AccessPoint) Hub() {} - // Hub marks this type as a conversion hub. func (tr *AccessPointPolicy) Hub() {} // Hub marks this type as a conversion hub. func (tr *AccountPublicAccessBlock) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *MultiRegionAccessPoint) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *MultiRegionAccessPointPolicy) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ObjectLambdaAccessPoint) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ObjectLambdaAccessPointPolicy) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *StorageLensConfiguration) Hub() {} diff --git a/apis/s3control/v1beta1/zz_generated.conversion_spokes.go b/apis/s3control/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..126309f5f2 --- /dev/null +++ b/apis/s3control/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,114 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this AccessPoint to the hub type. +func (tr *AccessPoint) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the AccessPoint type. +func (tr *AccessPoint) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MultiRegionAccessPoint to the hub type. +func (tr *MultiRegionAccessPoint) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MultiRegionAccessPoint type. +func (tr *MultiRegionAccessPoint) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MultiRegionAccessPointPolicy to the hub type. +func (tr *MultiRegionAccessPointPolicy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MultiRegionAccessPointPolicy type. +func (tr *MultiRegionAccessPointPolicy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ObjectLambdaAccessPoint to the hub type. +func (tr *ObjectLambdaAccessPoint) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ObjectLambdaAccessPoint type. +func (tr *ObjectLambdaAccessPoint) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this StorageLensConfiguration to the hub type. +func (tr *StorageLensConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the StorageLensConfiguration type. +func (tr *StorageLensConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/s3control/v1beta1/zz_generated.resolvers.go b/apis/s3control/v1beta1/zz_generated.resolvers.go index 6d0d0810f1..13e5e14a40 100644 --- a/apis/s3control/v1beta1/zz_generated.resolvers.go +++ b/apis/s3control/v1beta1/zz_generated.resolvers.go @@ -119,7 +119,7 @@ func (mg *AccessPointPolicy) ResolveReferences(ctx context.Context, c client.Rea var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("s3control.aws.upbound.io", "v1beta1", "AccessPoint", "AccessPointList") + m, l, err = apisresolver.GetManagedResource("s3control.aws.upbound.io", "v1beta2", "AccessPoint", "AccessPointList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -138,7 +138,7 @@ func (mg *AccessPointPolicy) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.ForProvider.AccessPointArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.AccessPointArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("s3control.aws.upbound.io", "v1beta1", "AccessPoint", "AccessPointList") + m, l, err = apisresolver.GetManagedResource("s3control.aws.upbound.io", "v1beta2", "AccessPoint", "AccessPointList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -314,7 +314,7 @@ func (mg *ObjectLambdaAccessPointPolicy) ResolveReferences(ctx context.Context, var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("s3control.aws.upbound.io", "v1beta1", "ObjectLambdaAccessPoint", "ObjectLambdaAccessPointList") + m, l, err = apisresolver.GetManagedResource("s3control.aws.upbound.io", "v1beta2", "ObjectLambdaAccessPoint", "ObjectLambdaAccessPointList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -333,7 +333,7 @@ func (mg *ObjectLambdaAccessPointPolicy) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.Name = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.NameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("s3control.aws.upbound.io", "v1beta1", "ObjectLambdaAccessPoint", "ObjectLambdaAccessPointList") + m, l, err = apisresolver.GetManagedResource("s3control.aws.upbound.io", "v1beta2", "ObjectLambdaAccessPoint", "ObjectLambdaAccessPointList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/s3control/v1beta1/zz_objectlambdaaccesspointpolicy_types.go b/apis/s3control/v1beta1/zz_objectlambdaaccesspointpolicy_types.go index 09db772cf1..83e6f27998 100755 --- a/apis/s3control/v1beta1/zz_objectlambdaaccesspointpolicy_types.go +++ b/apis/s3control/v1beta1/zz_objectlambdaaccesspointpolicy_types.go @@ -19,7 +19,7 @@ type ObjectLambdaAccessPointPolicyInitParameters struct { AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` // The name of the Object Lambda Access Point. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3control/v1beta1.ObjectLambdaAccessPoint + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3control/v1beta2.ObjectLambdaAccessPoint // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) Name *string `json:"name,omitempty" tf:"name,omitempty"` @@ -60,7 +60,7 @@ type ObjectLambdaAccessPointPolicyParameters struct { AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` // The name of the Object Lambda Access Point. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3control/v1beta1.ObjectLambdaAccessPoint + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3control/v1beta2.ObjectLambdaAccessPoint // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) // +kubebuilder:validation:Optional Name *string `json:"name,omitempty" tf:"name,omitempty"` diff --git a/apis/s3control/v1beta2/zz_accesspoint_terraformed.go b/apis/s3control/v1beta2/zz_accesspoint_terraformed.go new file mode 100755 index 0000000000..a80a7f4a8a --- /dev/null +++ b/apis/s3control/v1beta2/zz_accesspoint_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AccessPoint +func (mg *AccessPoint) GetTerraformResourceType() string { + return "aws_s3_access_point" +} + +// GetConnectionDetailsMapping for this AccessPoint +func (tr *AccessPoint) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this AccessPoint +func (tr *AccessPoint) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AccessPoint +func (tr *AccessPoint) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AccessPoint +func (tr *AccessPoint) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AccessPoint +func (tr *AccessPoint) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AccessPoint +func (tr *AccessPoint) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this AccessPoint +func (tr *AccessPoint) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this AccessPoint +func (tr *AccessPoint) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this AccessPoint using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AccessPoint) LateInitialize(attrs []byte) (bool, error) { + params := &AccessPointParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AccessPoint) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3control/v1beta2/zz_accesspoint_types.go b/apis/s3control/v1beta2/zz_accesspoint_types.go new file mode 100755 index 0000000000..42c6bdeb29 --- /dev/null +++ b/apis/s3control/v1beta2/zz_accesspoint_types.go @@ -0,0 +1,290 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessPointInitParameters struct { + + // AWS account ID for the owner of the bucket for which you want to create an access point. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Name of an AWS Partition S3 General Purpose Bucket or the ARN of S3 on Outposts Bucket that you want to associate this access point with. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // AWS account ID associated with the S3 bucket associated with this access point. + BucketAccountID *string `json:"bucketAccountId,omitempty" tf:"bucket_account_id,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Name you want to assign to this access point. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Valid JSON document that specifies the policy that you want to apply to this access point. Removing policy from your configuration or setting policy to null or an empty string (i.e., policy = "") will not delete the policy since it could have been set by aws_s3control_access_point_policy. To remove the policy, set it to "{}" (an empty JSON document). + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` + + // Configuration block to manage the PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. Detailed below. + PublicAccessBlockConfiguration *PublicAccessBlockConfigurationInitParameters `json:"publicAccessBlockConfiguration,omitempty" tf:"public_access_block_configuration,omitempty"` + + // Configuration block to restrict access to this access point to requests from the specified Virtual Private Cloud (VPC). Required for S3 on Outposts. Detailed below. + VPCConfiguration *VPCConfigurationInitParameters `json:"vpcConfiguration,omitempty" tf:"vpc_configuration,omitempty"` +} + +type AccessPointObservation struct { + + // AWS account ID for the owner of the bucket for which you want to create an access point. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Alias of the S3 Access Point. + Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` + + // ARN of the S3 Access Point. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Name of an AWS Partition S3 General Purpose Bucket or the ARN of S3 on Outposts Bucket that you want to associate this access point with. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // AWS account ID associated with the S3 bucket associated with this access point. + BucketAccountID *string `json:"bucketAccountId,omitempty" tf:"bucket_account_id,omitempty"` + + // DNS domain name of the S3 Access Point in the format name-account_id.s3-accesspoint.region.amazonaws.com. + // Note: S3 access points only support secure access by HTTPS. HTTP isn't supported. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // VPC endpoints for the S3 Access Point. + // +mapType=granular + Endpoints map[string]*string `json:"endpoints,omitempty" tf:"endpoints,omitempty"` + + // Indicates whether this access point currently has a policy that allows public access. + HasPublicAccessPolicy *bool `json:"hasPublicAccessPolicy,omitempty" tf:"has_public_access_policy,omitempty"` + + // For Access Point of an AWS Partition S3 Bucket, the AWS account ID and access point name separated by a colon (:). For S3 on Outposts Bucket, the ARN of the Access Point. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name you want to assign to this access point. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Indicates whether this access point allows access from the public Internet. Values are VPC (the access point doesn't allow access from the public Internet) and Internet (the access point allows access from the public Internet, subject to the access point and bucket access policies). + NetworkOrigin *string `json:"networkOrigin,omitempty" tf:"network_origin,omitempty"` + + // Valid JSON document that specifies the policy that you want to apply to this access point. Removing policy from your configuration or setting policy to null or an empty string (i.e., policy = "") will not delete the policy since it could have been set by aws_s3control_access_point_policy. To remove the policy, set it to "{}" (an empty JSON document). + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` + + // Configuration block to manage the PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. Detailed below. + PublicAccessBlockConfiguration *PublicAccessBlockConfigurationObservation `json:"publicAccessBlockConfiguration,omitempty" tf:"public_access_block_configuration,omitempty"` + + // Configuration block to restrict access to this access point to requests from the specified Virtual Private Cloud (VPC). Required for S3 on Outposts. Detailed below. + VPCConfiguration *VPCConfigurationObservation `json:"vpcConfiguration,omitempty" tf:"vpc_configuration,omitempty"` +} + +type AccessPointParameters struct { + + // AWS account ID for the owner of the bucket for which you want to create an access point. + // +kubebuilder:validation:Optional + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Name of an AWS Partition S3 General Purpose Bucket or the ARN of S3 on Outposts Bucket that you want to associate this access point with. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // AWS account ID associated with the S3 bucket associated with this access point. + // +kubebuilder:validation:Optional + BucketAccountID *string `json:"bucketAccountId,omitempty" tf:"bucket_account_id,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Name you want to assign to this access point. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Valid JSON document that specifies the policy that you want to apply to this access point. Removing policy from your configuration or setting policy to null or an empty string (i.e., policy = "") will not delete the policy since it could have been set by aws_s3control_access_point_policy. To remove the policy, set it to "{}" (an empty JSON document). + // +kubebuilder:validation:Optional + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` + + // Configuration block to manage the PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. Detailed below. + // +kubebuilder:validation:Optional + PublicAccessBlockConfiguration *PublicAccessBlockConfigurationParameters `json:"publicAccessBlockConfiguration,omitempty" tf:"public_access_block_configuration,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration block to restrict access to this access point to requests from the specified Virtual Private Cloud (VPC). Required for S3 on Outposts. Detailed below. + // +kubebuilder:validation:Optional + VPCConfiguration *VPCConfigurationParameters `json:"vpcConfiguration,omitempty" tf:"vpc_configuration,omitempty"` +} + +type PublicAccessBlockConfigurationInitParameters struct { + + // Whether Amazon S3 should block public ACLs for buckets in this account. Defaults to true. Enabling this setting does not affect existing policies or ACLs. When set to true causes the following behavior: + BlockPublicAcls *bool `json:"blockPublicAcls,omitempty" tf:"block_public_acls,omitempty"` + + // Whether Amazon S3 should block public bucket policies for buckets in this account. Defaults to true. Enabling this setting does not affect existing bucket policies. When set to true causes Amazon S3 to: + BlockPublicPolicy *bool `json:"blockPublicPolicy,omitempty" tf:"block_public_policy,omitempty"` + + // Whether Amazon S3 should ignore public ACLs for buckets in this account. Defaults to true. Enabling this setting does not affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set. When set to true causes Amazon S3 to: + IgnorePublicAcls *bool `json:"ignorePublicAcls,omitempty" tf:"ignore_public_acls,omitempty"` + + // Whether Amazon S3 should restrict public bucket policies for buckets in this account. Defaults to true. Enabling this setting does not affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked. When set to true: + RestrictPublicBuckets *bool `json:"restrictPublicBuckets,omitempty" tf:"restrict_public_buckets,omitempty"` +} + +type PublicAccessBlockConfigurationObservation struct { + + // Whether Amazon S3 should block public ACLs for buckets in this account. Defaults to true. Enabling this setting does not affect existing policies or ACLs. When set to true causes the following behavior: + BlockPublicAcls *bool `json:"blockPublicAcls,omitempty" tf:"block_public_acls,omitempty"` + + // Whether Amazon S3 should block public bucket policies for buckets in this account. Defaults to true. Enabling this setting does not affect existing bucket policies. When set to true causes Amazon S3 to: + BlockPublicPolicy *bool `json:"blockPublicPolicy,omitempty" tf:"block_public_policy,omitempty"` + + // Whether Amazon S3 should ignore public ACLs for buckets in this account. Defaults to true. Enabling this setting does not affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set. When set to true causes Amazon S3 to: + IgnorePublicAcls *bool `json:"ignorePublicAcls,omitempty" tf:"ignore_public_acls,omitempty"` + + // Whether Amazon S3 should restrict public bucket policies for buckets in this account. Defaults to true. Enabling this setting does not affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked. When set to true: + RestrictPublicBuckets *bool `json:"restrictPublicBuckets,omitempty" tf:"restrict_public_buckets,omitempty"` +} + +type PublicAccessBlockConfigurationParameters struct { + + // Whether Amazon S3 should block public ACLs for buckets in this account. Defaults to true. Enabling this setting does not affect existing policies or ACLs. When set to true causes the following behavior: + // +kubebuilder:validation:Optional + BlockPublicAcls *bool `json:"blockPublicAcls,omitempty" tf:"block_public_acls,omitempty"` + + // Whether Amazon S3 should block public bucket policies for buckets in this account. Defaults to true. Enabling this setting does not affect existing bucket policies. When set to true causes Amazon S3 to: + // +kubebuilder:validation:Optional + BlockPublicPolicy *bool `json:"blockPublicPolicy,omitempty" tf:"block_public_policy,omitempty"` + + // Whether Amazon S3 should ignore public ACLs for buckets in this account. Defaults to true. Enabling this setting does not affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set. When set to true causes Amazon S3 to: + // +kubebuilder:validation:Optional + IgnorePublicAcls *bool `json:"ignorePublicAcls,omitempty" tf:"ignore_public_acls,omitempty"` + + // Whether Amazon S3 should restrict public bucket policies for buckets in this account. Defaults to true. Enabling this setting does not affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked. When set to true: + // +kubebuilder:validation:Optional + RestrictPublicBuckets *bool `json:"restrictPublicBuckets,omitempty" tf:"restrict_public_buckets,omitempty"` +} + +type VPCConfigurationInitParameters struct { + + // This access point will only allow connections from the specified VPC ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type VPCConfigurationObservation struct { + + // This access point will only allow connections from the specified VPC ID. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type VPCConfigurationParameters struct { + + // This access point will only allow connections from the specified VPC ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +// AccessPointSpec defines the desired state of AccessPoint +type AccessPointSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AccessPointParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AccessPointInitParameters `json:"initProvider,omitempty"` +} + +// AccessPointStatus defines the observed state of AccessPoint. +type AccessPointStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AccessPointObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// AccessPoint is the Schema for the AccessPoints API. Manages an S3 Access Point. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type AccessPoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec AccessPointSpec `json:"spec"` + Status AccessPointStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AccessPointList contains a list of AccessPoints +type AccessPointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AccessPoint `json:"items"` +} + +// Repository type metadata. +var ( + AccessPoint_Kind = "AccessPoint" + AccessPoint_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AccessPoint_Kind}.String() + AccessPoint_KindAPIVersion = AccessPoint_Kind + "." + CRDGroupVersion.String() + AccessPoint_GroupVersionKind = CRDGroupVersion.WithKind(AccessPoint_Kind) +) + +func init() { + SchemeBuilder.Register(&AccessPoint{}, &AccessPointList{}) +} diff --git a/apis/s3control/v1beta2/zz_generated.conversion_hubs.go b/apis/s3control/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..b1278e067f --- /dev/null +++ b/apis/s3control/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *AccessPoint) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MultiRegionAccessPoint) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MultiRegionAccessPointPolicy) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ObjectLambdaAccessPoint) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *StorageLensConfiguration) Hub() {} diff --git a/apis/s3control/v1beta2/zz_generated.deepcopy.go b/apis/s3control/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..60161e416a --- /dev/null +++ b/apis/s3control/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,3967 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPoint) DeepCopyInto(out *AccessPoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPoint. +func (in *AccessPoint) DeepCopy() *AccessPoint { + if in == nil { + return nil + } + out := new(AccessPoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccessPoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPointInitParameters) DeepCopyInto(out *AccessPointInitParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketAccountID != nil { + in, out := &in.BucketAccountID, &out.BucketAccountID + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } + if in.PublicAccessBlockConfiguration != nil { + in, out := &in.PublicAccessBlockConfiguration, &out.PublicAccessBlockConfiguration + *out = new(PublicAccessBlockConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPointInitParameters. +func (in *AccessPointInitParameters) DeepCopy() *AccessPointInitParameters { + if in == nil { + return nil + } + out := new(AccessPointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPointList) DeepCopyInto(out *AccessPointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AccessPoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPointList. +func (in *AccessPointList) DeepCopy() *AccessPointList { + if in == nil { + return nil + } + out := new(AccessPointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccessPointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPointObservation) DeepCopyInto(out *AccessPointObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketAccountID != nil { + in, out := &in.BucketAccountID, &out.BucketAccountID + *out = new(string) + **out = **in + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.HasPublicAccessPolicy != nil { + in, out := &in.HasPublicAccessPolicy, &out.HasPublicAccessPolicy + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkOrigin != nil { + in, out := &in.NetworkOrigin, &out.NetworkOrigin + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } + if in.PublicAccessBlockConfiguration != nil { + in, out := &in.PublicAccessBlockConfiguration, &out.PublicAccessBlockConfiguration + *out = new(PublicAccessBlockConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPointObservation. +func (in *AccessPointObservation) DeepCopy() *AccessPointObservation { + if in == nil { + return nil + } + out := new(AccessPointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPointParameters) DeepCopyInto(out *AccessPointParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketAccountID != nil { + in, out := &in.BucketAccountID, &out.BucketAccountID + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } + if in.PublicAccessBlockConfiguration != nil { + in, out := &in.PublicAccessBlockConfiguration, &out.PublicAccessBlockConfiguration + *out = new(PublicAccessBlockConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.VPCConfiguration != nil { + in, out := &in.VPCConfiguration, &out.VPCConfiguration + *out = new(VPCConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPointParameters. +func (in *AccessPointParameters) DeepCopy() *AccessPointParameters { + if in == nil { + return nil + } + out := new(AccessPointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPointSpec) DeepCopyInto(out *AccessPointSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPointSpec. +func (in *AccessPointSpec) DeepCopy() *AccessPointSpec { + if in == nil { + return nil + } + out := new(AccessPointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPointStatus) DeepCopyInto(out *AccessPointStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPointStatus. +func (in *AccessPointStatus) DeepCopy() *AccessPointStatus { + if in == nil { + return nil + } + out := new(AccessPointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountLevelDetailedStatusCodeMetricsInitParameters) DeepCopyInto(out *AccountLevelDetailedStatusCodeMetricsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountLevelDetailedStatusCodeMetricsInitParameters. +func (in *AccountLevelDetailedStatusCodeMetricsInitParameters) DeepCopy() *AccountLevelDetailedStatusCodeMetricsInitParameters { + if in == nil { + return nil + } + out := new(AccountLevelDetailedStatusCodeMetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountLevelDetailedStatusCodeMetricsObservation) DeepCopyInto(out *AccountLevelDetailedStatusCodeMetricsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountLevelDetailedStatusCodeMetricsObservation. +func (in *AccountLevelDetailedStatusCodeMetricsObservation) DeepCopy() *AccountLevelDetailedStatusCodeMetricsObservation { + if in == nil { + return nil + } + out := new(AccountLevelDetailedStatusCodeMetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountLevelDetailedStatusCodeMetricsParameters) DeepCopyInto(out *AccountLevelDetailedStatusCodeMetricsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountLevelDetailedStatusCodeMetricsParameters. +func (in *AccountLevelDetailedStatusCodeMetricsParameters) DeepCopy() *AccountLevelDetailedStatusCodeMetricsParameters { + if in == nil { + return nil + } + out := new(AccountLevelDetailedStatusCodeMetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountLevelInitParameters) DeepCopyInto(out *AccountLevelInitParameters) { + *out = *in + if in.ActivityMetrics != nil { + in, out := &in.ActivityMetrics, &out.ActivityMetrics + *out = new(ActivityMetricsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdvancedCostOptimizationMetrics != nil { + in, out := &in.AdvancedCostOptimizationMetrics, &out.AdvancedCostOptimizationMetrics + *out = new(AdvancedCostOptimizationMetricsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdvancedDataProtectionMetrics != nil { + in, out := &in.AdvancedDataProtectionMetrics, &out.AdvancedDataProtectionMetrics + *out = new(AdvancedDataProtectionMetricsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BucketLevel != nil { + in, out := &in.BucketLevel, &out.BucketLevel + *out = new(BucketLevelInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DetailedStatusCodeMetrics != nil { + in, out := &in.DetailedStatusCodeMetrics, &out.DetailedStatusCodeMetrics + *out = new(AccountLevelDetailedStatusCodeMetricsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountLevelInitParameters. +func (in *AccountLevelInitParameters) DeepCopy() *AccountLevelInitParameters { + if in == nil { + return nil + } + out := new(AccountLevelInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountLevelObservation) DeepCopyInto(out *AccountLevelObservation) { + *out = *in + if in.ActivityMetrics != nil { + in, out := &in.ActivityMetrics, &out.ActivityMetrics + *out = new(ActivityMetricsObservation) + (*in).DeepCopyInto(*out) + } + if in.AdvancedCostOptimizationMetrics != nil { + in, out := &in.AdvancedCostOptimizationMetrics, &out.AdvancedCostOptimizationMetrics + *out = new(AdvancedCostOptimizationMetricsObservation) + (*in).DeepCopyInto(*out) + } + if in.AdvancedDataProtectionMetrics != nil { + in, out := &in.AdvancedDataProtectionMetrics, &out.AdvancedDataProtectionMetrics + *out = new(AdvancedDataProtectionMetricsObservation) + (*in).DeepCopyInto(*out) + } + if in.BucketLevel != nil { + in, out := &in.BucketLevel, &out.BucketLevel + *out = new(BucketLevelObservation) + (*in).DeepCopyInto(*out) + } + if in.DetailedStatusCodeMetrics != nil { + in, out := &in.DetailedStatusCodeMetrics, &out.DetailedStatusCodeMetrics + *out = new(AccountLevelDetailedStatusCodeMetricsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountLevelObservation. +func (in *AccountLevelObservation) DeepCopy() *AccountLevelObservation { + if in == nil { + return nil + } + out := new(AccountLevelObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountLevelParameters) DeepCopyInto(out *AccountLevelParameters) { + *out = *in + if in.ActivityMetrics != nil { + in, out := &in.ActivityMetrics, &out.ActivityMetrics + *out = new(ActivityMetricsParameters) + (*in).DeepCopyInto(*out) + } + if in.AdvancedCostOptimizationMetrics != nil { + in, out := &in.AdvancedCostOptimizationMetrics, &out.AdvancedCostOptimizationMetrics + *out = new(AdvancedCostOptimizationMetricsParameters) + (*in).DeepCopyInto(*out) + } + if in.AdvancedDataProtectionMetrics != nil { + in, out := &in.AdvancedDataProtectionMetrics, &out.AdvancedDataProtectionMetrics + *out = new(AdvancedDataProtectionMetricsParameters) + (*in).DeepCopyInto(*out) + } + if in.BucketLevel != nil { + in, out := &in.BucketLevel, &out.BucketLevel + *out = new(BucketLevelParameters) + (*in).DeepCopyInto(*out) + } + if in.DetailedStatusCodeMetrics != nil { + in, out := &in.DetailedStatusCodeMetrics, &out.DetailedStatusCodeMetrics + *out = new(AccountLevelDetailedStatusCodeMetricsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountLevelParameters. +func (in *AccountLevelParameters) DeepCopy() *AccountLevelParameters { + if in == nil { + return nil + } + out := new(AccountLevelParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActivityMetricsInitParameters) DeepCopyInto(out *ActivityMetricsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActivityMetricsInitParameters. +func (in *ActivityMetricsInitParameters) DeepCopy() *ActivityMetricsInitParameters { + if in == nil { + return nil + } + out := new(ActivityMetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActivityMetricsObservation) DeepCopyInto(out *ActivityMetricsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActivityMetricsObservation. +func (in *ActivityMetricsObservation) DeepCopy() *ActivityMetricsObservation { + if in == nil { + return nil + } + out := new(ActivityMetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActivityMetricsParameters) DeepCopyInto(out *ActivityMetricsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActivityMetricsParameters. +func (in *ActivityMetricsParameters) DeepCopy() *ActivityMetricsParameters { + if in == nil { + return nil + } + out := new(ActivityMetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedCostOptimizationMetricsInitParameters) DeepCopyInto(out *AdvancedCostOptimizationMetricsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedCostOptimizationMetricsInitParameters. +func (in *AdvancedCostOptimizationMetricsInitParameters) DeepCopy() *AdvancedCostOptimizationMetricsInitParameters { + if in == nil { + return nil + } + out := new(AdvancedCostOptimizationMetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedCostOptimizationMetricsObservation) DeepCopyInto(out *AdvancedCostOptimizationMetricsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedCostOptimizationMetricsObservation. +func (in *AdvancedCostOptimizationMetricsObservation) DeepCopy() *AdvancedCostOptimizationMetricsObservation { + if in == nil { + return nil + } + out := new(AdvancedCostOptimizationMetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedCostOptimizationMetricsParameters) DeepCopyInto(out *AdvancedCostOptimizationMetricsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedCostOptimizationMetricsParameters. +func (in *AdvancedCostOptimizationMetricsParameters) DeepCopy() *AdvancedCostOptimizationMetricsParameters { + if in == nil { + return nil + } + out := new(AdvancedCostOptimizationMetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedDataProtectionMetricsInitParameters) DeepCopyInto(out *AdvancedDataProtectionMetricsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedDataProtectionMetricsInitParameters. +func (in *AdvancedDataProtectionMetricsInitParameters) DeepCopy() *AdvancedDataProtectionMetricsInitParameters { + if in == nil { + return nil + } + out := new(AdvancedDataProtectionMetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedDataProtectionMetricsObservation) DeepCopyInto(out *AdvancedDataProtectionMetricsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedDataProtectionMetricsObservation. +func (in *AdvancedDataProtectionMetricsObservation) DeepCopy() *AdvancedDataProtectionMetricsObservation { + if in == nil { + return nil + } + out := new(AdvancedDataProtectionMetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedDataProtectionMetricsParameters) DeepCopyInto(out *AdvancedDataProtectionMetricsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedDataProtectionMetricsParameters. +func (in *AdvancedDataProtectionMetricsParameters) DeepCopy() *AdvancedDataProtectionMetricsParameters { + if in == nil { + return nil + } + out := new(AdvancedDataProtectionMetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsLambdaInitParameters) DeepCopyInto(out *AwsLambdaInitParameters) { + *out = *in + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } + if in.FunctionArnRef != nil { + in, out := &in.FunctionArnRef, &out.FunctionArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FunctionArnSelector != nil { + in, out := &in.FunctionArnSelector, &out.FunctionArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FunctionPayload != nil { + in, out := &in.FunctionPayload, &out.FunctionPayload + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsLambdaInitParameters. +func (in *AwsLambdaInitParameters) DeepCopy() *AwsLambdaInitParameters { + if in == nil { + return nil + } + out := new(AwsLambdaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsLambdaObservation) DeepCopyInto(out *AwsLambdaObservation) { + *out = *in + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } + if in.FunctionPayload != nil { + in, out := &in.FunctionPayload, &out.FunctionPayload + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsLambdaObservation. +func (in *AwsLambdaObservation) DeepCopy() *AwsLambdaObservation { + if in == nil { + return nil + } + out := new(AwsLambdaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsLambdaParameters) DeepCopyInto(out *AwsLambdaParameters) { + *out = *in + if in.FunctionArn != nil { + in, out := &in.FunctionArn, &out.FunctionArn + *out = new(string) + **out = **in + } + if in.FunctionArnRef != nil { + in, out := &in.FunctionArnRef, &out.FunctionArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FunctionArnSelector != nil { + in, out := &in.FunctionArnSelector, &out.FunctionArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FunctionPayload != nil { + in, out := &in.FunctionPayload, &out.FunctionPayload + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsLambdaParameters. +func (in *AwsLambdaParameters) DeepCopy() *AwsLambdaParameters { + if in == nil { + return nil + } + out := new(AwsLambdaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsOrgInitParameters) DeepCopyInto(out *AwsOrgInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsOrgInitParameters. +func (in *AwsOrgInitParameters) DeepCopy() *AwsOrgInitParameters { + if in == nil { + return nil + } + out := new(AwsOrgInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsOrgObservation) DeepCopyInto(out *AwsOrgObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsOrgObservation. +func (in *AwsOrgObservation) DeepCopy() *AwsOrgObservation { + if in == nil { + return nil + } + out := new(AwsOrgObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsOrgParameters) DeepCopyInto(out *AwsOrgParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsOrgParameters. +func (in *AwsOrgParameters) DeepCopy() *AwsOrgParameters { + if in == nil { + return nil + } + out := new(AwsOrgParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLevelActivityMetricsInitParameters) DeepCopyInto(out *BucketLevelActivityMetricsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLevelActivityMetricsInitParameters. +func (in *BucketLevelActivityMetricsInitParameters) DeepCopy() *BucketLevelActivityMetricsInitParameters { + if in == nil { + return nil + } + out := new(BucketLevelActivityMetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLevelActivityMetricsObservation) DeepCopyInto(out *BucketLevelActivityMetricsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLevelActivityMetricsObservation. +func (in *BucketLevelActivityMetricsObservation) DeepCopy() *BucketLevelActivityMetricsObservation { + if in == nil { + return nil + } + out := new(BucketLevelActivityMetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLevelActivityMetricsParameters) DeepCopyInto(out *BucketLevelActivityMetricsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLevelActivityMetricsParameters. +func (in *BucketLevelActivityMetricsParameters) DeepCopy() *BucketLevelActivityMetricsParameters { + if in == nil { + return nil + } + out := new(BucketLevelActivityMetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLevelAdvancedCostOptimizationMetricsInitParameters) DeepCopyInto(out *BucketLevelAdvancedCostOptimizationMetricsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLevelAdvancedCostOptimizationMetricsInitParameters. +func (in *BucketLevelAdvancedCostOptimizationMetricsInitParameters) DeepCopy() *BucketLevelAdvancedCostOptimizationMetricsInitParameters { + if in == nil { + return nil + } + out := new(BucketLevelAdvancedCostOptimizationMetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLevelAdvancedCostOptimizationMetricsObservation) DeepCopyInto(out *BucketLevelAdvancedCostOptimizationMetricsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLevelAdvancedCostOptimizationMetricsObservation. +func (in *BucketLevelAdvancedCostOptimizationMetricsObservation) DeepCopy() *BucketLevelAdvancedCostOptimizationMetricsObservation { + if in == nil { + return nil + } + out := new(BucketLevelAdvancedCostOptimizationMetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLevelAdvancedCostOptimizationMetricsParameters) DeepCopyInto(out *BucketLevelAdvancedCostOptimizationMetricsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLevelAdvancedCostOptimizationMetricsParameters. +func (in *BucketLevelAdvancedCostOptimizationMetricsParameters) DeepCopy() *BucketLevelAdvancedCostOptimizationMetricsParameters { + if in == nil { + return nil + } + out := new(BucketLevelAdvancedCostOptimizationMetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLevelAdvancedDataProtectionMetricsInitParameters) DeepCopyInto(out *BucketLevelAdvancedDataProtectionMetricsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLevelAdvancedDataProtectionMetricsInitParameters. +func (in *BucketLevelAdvancedDataProtectionMetricsInitParameters) DeepCopy() *BucketLevelAdvancedDataProtectionMetricsInitParameters { + if in == nil { + return nil + } + out := new(BucketLevelAdvancedDataProtectionMetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLevelAdvancedDataProtectionMetricsObservation) DeepCopyInto(out *BucketLevelAdvancedDataProtectionMetricsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLevelAdvancedDataProtectionMetricsObservation. +func (in *BucketLevelAdvancedDataProtectionMetricsObservation) DeepCopy() *BucketLevelAdvancedDataProtectionMetricsObservation { + if in == nil { + return nil + } + out := new(BucketLevelAdvancedDataProtectionMetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLevelAdvancedDataProtectionMetricsParameters) DeepCopyInto(out *BucketLevelAdvancedDataProtectionMetricsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLevelAdvancedDataProtectionMetricsParameters. +func (in *BucketLevelAdvancedDataProtectionMetricsParameters) DeepCopy() *BucketLevelAdvancedDataProtectionMetricsParameters { + if in == nil { + return nil + } + out := new(BucketLevelAdvancedDataProtectionMetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLevelInitParameters) DeepCopyInto(out *BucketLevelInitParameters) { + *out = *in + if in.ActivityMetrics != nil { + in, out := &in.ActivityMetrics, &out.ActivityMetrics + *out = new(BucketLevelActivityMetricsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdvancedCostOptimizationMetrics != nil { + in, out := &in.AdvancedCostOptimizationMetrics, &out.AdvancedCostOptimizationMetrics + *out = new(BucketLevelAdvancedCostOptimizationMetricsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdvancedDataProtectionMetrics != nil { + in, out := &in.AdvancedDataProtectionMetrics, &out.AdvancedDataProtectionMetrics + *out = new(BucketLevelAdvancedDataProtectionMetricsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DetailedStatusCodeMetrics != nil { + in, out := &in.DetailedStatusCodeMetrics, &out.DetailedStatusCodeMetrics + *out = new(DetailedStatusCodeMetricsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PrefixLevel != nil { + in, out := &in.PrefixLevel, &out.PrefixLevel + *out = new(PrefixLevelInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLevelInitParameters. +func (in *BucketLevelInitParameters) DeepCopy() *BucketLevelInitParameters { + if in == nil { + return nil + } + out := new(BucketLevelInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLevelObservation) DeepCopyInto(out *BucketLevelObservation) { + *out = *in + if in.ActivityMetrics != nil { + in, out := &in.ActivityMetrics, &out.ActivityMetrics + *out = new(BucketLevelActivityMetricsObservation) + (*in).DeepCopyInto(*out) + } + if in.AdvancedCostOptimizationMetrics != nil { + in, out := &in.AdvancedCostOptimizationMetrics, &out.AdvancedCostOptimizationMetrics + *out = new(BucketLevelAdvancedCostOptimizationMetricsObservation) + (*in).DeepCopyInto(*out) + } + if in.AdvancedDataProtectionMetrics != nil { + in, out := &in.AdvancedDataProtectionMetrics, &out.AdvancedDataProtectionMetrics + *out = new(BucketLevelAdvancedDataProtectionMetricsObservation) + (*in).DeepCopyInto(*out) + } + if in.DetailedStatusCodeMetrics != nil { + in, out := &in.DetailedStatusCodeMetrics, &out.DetailedStatusCodeMetrics + *out = new(DetailedStatusCodeMetricsObservation) + (*in).DeepCopyInto(*out) + } + if in.PrefixLevel != nil { + in, out := &in.PrefixLevel, &out.PrefixLevel + *out = new(PrefixLevelObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLevelObservation. +func (in *BucketLevelObservation) DeepCopy() *BucketLevelObservation { + if in == nil { + return nil + } + out := new(BucketLevelObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketLevelParameters) DeepCopyInto(out *BucketLevelParameters) { + *out = *in + if in.ActivityMetrics != nil { + in, out := &in.ActivityMetrics, &out.ActivityMetrics + *out = new(BucketLevelActivityMetricsParameters) + (*in).DeepCopyInto(*out) + } + if in.AdvancedCostOptimizationMetrics != nil { + in, out := &in.AdvancedCostOptimizationMetrics, &out.AdvancedCostOptimizationMetrics + *out = new(BucketLevelAdvancedCostOptimizationMetricsParameters) + (*in).DeepCopyInto(*out) + } + if in.AdvancedDataProtectionMetrics != nil { + in, out := &in.AdvancedDataProtectionMetrics, &out.AdvancedDataProtectionMetrics + *out = new(BucketLevelAdvancedDataProtectionMetricsParameters) + (*in).DeepCopyInto(*out) + } + if in.DetailedStatusCodeMetrics != nil { + in, out := &in.DetailedStatusCodeMetrics, &out.DetailedStatusCodeMetrics + *out = new(DetailedStatusCodeMetricsParameters) + (*in).DeepCopyInto(*out) + } + if in.PrefixLevel != nil { + in, out := &in.PrefixLevel, &out.PrefixLevel + *out = new(PrefixLevelParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketLevelParameters. +func (in *BucketLevelParameters) DeepCopy() *BucketLevelParameters { + if in == nil { + return nil + } + out := new(BucketLevelParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudWatchMetricsInitParameters) DeepCopyInto(out *CloudWatchMetricsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudWatchMetricsInitParameters. +func (in *CloudWatchMetricsInitParameters) DeepCopy() *CloudWatchMetricsInitParameters { + if in == nil { + return nil + } + out := new(CloudWatchMetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudWatchMetricsObservation) DeepCopyInto(out *CloudWatchMetricsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudWatchMetricsObservation. +func (in *CloudWatchMetricsObservation) DeepCopy() *CloudWatchMetricsObservation { + if in == nil { + return nil + } + out := new(CloudWatchMetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudWatchMetricsParameters) DeepCopyInto(out *CloudWatchMetricsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudWatchMetricsParameters. +func (in *CloudWatchMetricsParameters) DeepCopy() *CloudWatchMetricsParameters { + if in == nil { + return nil + } + out := new(CloudWatchMetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationInitParameters) DeepCopyInto(out *ConfigurationInitParameters) { + *out = *in + if in.AllowedFeatures != nil { + in, out := &in.AllowedFeatures, &out.AllowedFeatures + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CloudWatchMetricsEnabled != nil { + in, out := &in.CloudWatchMetricsEnabled, &out.CloudWatchMetricsEnabled + *out = new(bool) + **out = **in + } + if in.SupportingAccessPoint != nil { + in, out := &in.SupportingAccessPoint, &out.SupportingAccessPoint + *out = new(string) + **out = **in + } + if in.SupportingAccessPointRef != nil { + in, out := &in.SupportingAccessPointRef, &out.SupportingAccessPointRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SupportingAccessPointSelector != nil { + in, out := &in.SupportingAccessPointSelector, &out.SupportingAccessPointSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TransformationConfiguration != nil { + in, out := &in.TransformationConfiguration, &out.TransformationConfiguration + *out = make([]TransformationConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationInitParameters. +func (in *ConfigurationInitParameters) DeepCopy() *ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationObservation) DeepCopyInto(out *ConfigurationObservation) { + *out = *in + if in.AllowedFeatures != nil { + in, out := &in.AllowedFeatures, &out.AllowedFeatures + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CloudWatchMetricsEnabled != nil { + in, out := &in.CloudWatchMetricsEnabled, &out.CloudWatchMetricsEnabled + *out = new(bool) + **out = **in + } + if in.SupportingAccessPoint != nil { + in, out := &in.SupportingAccessPoint, &out.SupportingAccessPoint + *out = new(string) + **out = **in + } + if in.TransformationConfiguration != nil { + in, out := &in.TransformationConfiguration, &out.TransformationConfiguration + *out = make([]TransformationConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationObservation. +func (in *ConfigurationObservation) DeepCopy() *ConfigurationObservation { + if in == nil { + return nil + } + out := new(ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationParameters) DeepCopyInto(out *ConfigurationParameters) { + *out = *in + if in.AllowedFeatures != nil { + in, out := &in.AllowedFeatures, &out.AllowedFeatures + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CloudWatchMetricsEnabled != nil { + in, out := &in.CloudWatchMetricsEnabled, &out.CloudWatchMetricsEnabled + *out = new(bool) + **out = **in + } + if in.SupportingAccessPoint != nil { + in, out := &in.SupportingAccessPoint, &out.SupportingAccessPoint + *out = new(string) + **out = **in + } + if in.SupportingAccessPointRef != nil { + in, out := &in.SupportingAccessPointRef, &out.SupportingAccessPointRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SupportingAccessPointSelector != nil { + in, out := &in.SupportingAccessPointSelector, &out.SupportingAccessPointSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TransformationConfiguration != nil { + in, out := &in.TransformationConfiguration, &out.TransformationConfiguration + *out = make([]TransformationConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationParameters. +func (in *ConfigurationParameters) DeepCopy() *ConfigurationParameters { + if in == nil { + return nil + } + out := new(ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentTransformationInitParameters) DeepCopyInto(out *ContentTransformationInitParameters) { + *out = *in + if in.AwsLambda != nil { + in, out := &in.AwsLambda, &out.AwsLambda + *out = new(AwsLambdaInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentTransformationInitParameters. +func (in *ContentTransformationInitParameters) DeepCopy() *ContentTransformationInitParameters { + if in == nil { + return nil + } + out := new(ContentTransformationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentTransformationObservation) DeepCopyInto(out *ContentTransformationObservation) { + *out = *in + if in.AwsLambda != nil { + in, out := &in.AwsLambda, &out.AwsLambda + *out = new(AwsLambdaObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentTransformationObservation. +func (in *ContentTransformationObservation) DeepCopy() *ContentTransformationObservation { + if in == nil { + return nil + } + out := new(ContentTransformationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentTransformationParameters) DeepCopyInto(out *ContentTransformationParameters) { + *out = *in + if in.AwsLambda != nil { + in, out := &in.AwsLambda, &out.AwsLambda + *out = new(AwsLambdaParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentTransformationParameters. +func (in *ContentTransformationParameters) DeepCopy() *ContentTransformationParameters { + if in == nil { + return nil + } + out := new(ContentTransformationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataExportInitParameters) DeepCopyInto(out *DataExportInitParameters) { + *out = *in + if in.CloudWatchMetrics != nil { + in, out := &in.CloudWatchMetrics, &out.CloudWatchMetrics + *out = new(CloudWatchMetricsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3BucketDestination != nil { + in, out := &in.S3BucketDestination, &out.S3BucketDestination + *out = new(S3BucketDestinationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataExportInitParameters. +func (in *DataExportInitParameters) DeepCopy() *DataExportInitParameters { + if in == nil { + return nil + } + out := new(DataExportInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataExportObservation) DeepCopyInto(out *DataExportObservation) { + *out = *in + if in.CloudWatchMetrics != nil { + in, out := &in.CloudWatchMetrics, &out.CloudWatchMetrics + *out = new(CloudWatchMetricsObservation) + (*in).DeepCopyInto(*out) + } + if in.S3BucketDestination != nil { + in, out := &in.S3BucketDestination, &out.S3BucketDestination + *out = new(S3BucketDestinationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataExportObservation. +func (in *DataExportObservation) DeepCopy() *DataExportObservation { + if in == nil { + return nil + } + out := new(DataExportObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataExportParameters) DeepCopyInto(out *DataExportParameters) { + *out = *in + if in.CloudWatchMetrics != nil { + in, out := &in.CloudWatchMetrics, &out.CloudWatchMetrics + *out = new(CloudWatchMetricsParameters) + (*in).DeepCopyInto(*out) + } + if in.S3BucketDestination != nil { + in, out := &in.S3BucketDestination, &out.S3BucketDestination + *out = new(S3BucketDestinationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataExportParameters. +func (in *DataExportParameters) DeepCopy() *DataExportParameters { + if in == nil { + return nil + } + out := new(DataExportParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetailedStatusCodeMetricsInitParameters) DeepCopyInto(out *DetailedStatusCodeMetricsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetailedStatusCodeMetricsInitParameters. +func (in *DetailedStatusCodeMetricsInitParameters) DeepCopy() *DetailedStatusCodeMetricsInitParameters { + if in == nil { + return nil + } + out := new(DetailedStatusCodeMetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetailedStatusCodeMetricsObservation) DeepCopyInto(out *DetailedStatusCodeMetricsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetailedStatusCodeMetricsObservation. +func (in *DetailedStatusCodeMetricsObservation) DeepCopy() *DetailedStatusCodeMetricsObservation { + if in == nil { + return nil + } + out := new(DetailedStatusCodeMetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetailedStatusCodeMetricsParameters) DeepCopyInto(out *DetailedStatusCodeMetricsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetailedStatusCodeMetricsParameters. +func (in *DetailedStatusCodeMetricsParameters) DeepCopy() *DetailedStatusCodeMetricsParameters { + if in == nil { + return nil + } + out := new(DetailedStatusCodeMetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetailsInitParameters) DeepCopyInto(out *DetailsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicAccessBlock != nil { + in, out := &in.PublicAccessBlock, &out.PublicAccessBlock + *out = new(PublicAccessBlockInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetailsInitParameters. +func (in *DetailsInitParameters) DeepCopy() *DetailsInitParameters { + if in == nil { + return nil + } + out := new(DetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetailsObservation) DeepCopyInto(out *DetailsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicAccessBlock != nil { + in, out := &in.PublicAccessBlock, &out.PublicAccessBlock + *out = new(PublicAccessBlockObservation) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = make([]RegionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetailsObservation. +func (in *DetailsObservation) DeepCopy() *DetailsObservation { + if in == nil { + return nil + } + out := new(DetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetailsParameters) DeepCopyInto(out *DetailsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicAccessBlock != nil { + in, out := &in.PublicAccessBlock, &out.PublicAccessBlock + *out = new(PublicAccessBlockParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = make([]RegionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetailsParameters. +func (in *DetailsParameters) DeepCopy() *DetailsParameters { + if in == nil { + return nil + } + out := new(DetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionInitParameters) DeepCopyInto(out *EncryptionInitParameters) { + *out = *in + if in.SseKMS != nil { + in, out := &in.SseKMS, &out.SseKMS + *out = new(SseKMSInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SseS3 != nil { + in, out := &in.SseS3, &out.SseS3 + *out = make([]SseS3InitParameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionInitParameters. +func (in *EncryptionInitParameters) DeepCopy() *EncryptionInitParameters { + if in == nil { + return nil + } + out := new(EncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionObservation) DeepCopyInto(out *EncryptionObservation) { + *out = *in + if in.SseKMS != nil { + in, out := &in.SseKMS, &out.SseKMS + *out = new(SseKMSObservation) + (*in).DeepCopyInto(*out) + } + if in.SseS3 != nil { + in, out := &in.SseS3, &out.SseS3 + *out = make([]SseS3Parameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionObservation. +func (in *EncryptionObservation) DeepCopy() *EncryptionObservation { + if in == nil { + return nil + } + out := new(EncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionParameters) DeepCopyInto(out *EncryptionParameters) { + *out = *in + if in.SseKMS != nil { + in, out := &in.SseKMS, &out.SseKMS + *out = new(SseKMSParameters) + (*in).DeepCopyInto(*out) + } + if in.SseS3 != nil { + in, out := &in.SseS3, &out.SseS3 + *out = make([]SseS3Parameters, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionParameters. +func (in *EncryptionParameters) DeepCopy() *EncryptionParameters { + if in == nil { + return nil + } + out := new(EncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludeInitParameters) DeepCopyInto(out *ExcludeInitParameters) { + *out = *in + if in.Buckets != nil { + in, out := &in.Buckets, &out.Buckets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludeInitParameters. +func (in *ExcludeInitParameters) DeepCopy() *ExcludeInitParameters { + if in == nil { + return nil + } + out := new(ExcludeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludeObservation) DeepCopyInto(out *ExcludeObservation) { + *out = *in + if in.Buckets != nil { + in, out := &in.Buckets, &out.Buckets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludeObservation. +func (in *ExcludeObservation) DeepCopy() *ExcludeObservation { + if in == nil { + return nil + } + out := new(ExcludeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludeParameters) DeepCopyInto(out *ExcludeParameters) { + *out = *in + if in.Buckets != nil { + in, out := &in.Buckets, &out.Buckets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludeParameters. +func (in *ExcludeParameters) DeepCopy() *ExcludeParameters { + if in == nil { + return nil + } + out := new(ExcludeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludeInitParameters) DeepCopyInto(out *IncludeInitParameters) { + *out = *in + if in.Buckets != nil { + in, out := &in.Buckets, &out.Buckets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludeInitParameters. +func (in *IncludeInitParameters) DeepCopy() *IncludeInitParameters { + if in == nil { + return nil + } + out := new(IncludeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludeObservation) DeepCopyInto(out *IncludeObservation) { + *out = *in + if in.Buckets != nil { + in, out := &in.Buckets, &out.Buckets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludeObservation. +func (in *IncludeObservation) DeepCopy() *IncludeObservation { + if in == nil { + return nil + } + out := new(IncludeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludeParameters) DeepCopyInto(out *IncludeParameters) { + *out = *in + if in.Buckets != nil { + in, out := &in.Buckets, &out.Buckets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludeParameters. +func (in *IncludeParameters) DeepCopy() *IncludeParameters { + if in == nil { + return nil + } + out := new(IncludeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiRegionAccessPoint) DeepCopyInto(out *MultiRegionAccessPoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiRegionAccessPoint. +func (in *MultiRegionAccessPoint) DeepCopy() *MultiRegionAccessPoint { + if in == nil { + return nil + } + out := new(MultiRegionAccessPoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MultiRegionAccessPoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiRegionAccessPointInitParameters) DeepCopyInto(out *MultiRegionAccessPointInitParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = new(DetailsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiRegionAccessPointInitParameters. +func (in *MultiRegionAccessPointInitParameters) DeepCopy() *MultiRegionAccessPointInitParameters { + if in == nil { + return nil + } + out := new(MultiRegionAccessPointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiRegionAccessPointList) DeepCopyInto(out *MultiRegionAccessPointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MultiRegionAccessPoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiRegionAccessPointList. +func (in *MultiRegionAccessPointList) DeepCopy() *MultiRegionAccessPointList { + if in == nil { + return nil + } + out := new(MultiRegionAccessPointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MultiRegionAccessPointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiRegionAccessPointObservation) DeepCopyInto(out *MultiRegionAccessPointObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = new(DetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiRegionAccessPointObservation. +func (in *MultiRegionAccessPointObservation) DeepCopy() *MultiRegionAccessPointObservation { + if in == nil { + return nil + } + out := new(MultiRegionAccessPointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiRegionAccessPointParameters) DeepCopyInto(out *MultiRegionAccessPointParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = new(DetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiRegionAccessPointParameters. +func (in *MultiRegionAccessPointParameters) DeepCopy() *MultiRegionAccessPointParameters { + if in == nil { + return nil + } + out := new(MultiRegionAccessPointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiRegionAccessPointPolicy) DeepCopyInto(out *MultiRegionAccessPointPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiRegionAccessPointPolicy. +func (in *MultiRegionAccessPointPolicy) DeepCopy() *MultiRegionAccessPointPolicy { + if in == nil { + return nil + } + out := new(MultiRegionAccessPointPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MultiRegionAccessPointPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiRegionAccessPointPolicyDetailsInitParameters) DeepCopyInto(out *MultiRegionAccessPointPolicyDetailsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiRegionAccessPointPolicyDetailsInitParameters. +func (in *MultiRegionAccessPointPolicyDetailsInitParameters) DeepCopy() *MultiRegionAccessPointPolicyDetailsInitParameters { + if in == nil { + return nil + } + out := new(MultiRegionAccessPointPolicyDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiRegionAccessPointPolicyDetailsObservation) DeepCopyInto(out *MultiRegionAccessPointPolicyDetailsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiRegionAccessPointPolicyDetailsObservation. +func (in *MultiRegionAccessPointPolicyDetailsObservation) DeepCopy() *MultiRegionAccessPointPolicyDetailsObservation { + if in == nil { + return nil + } + out := new(MultiRegionAccessPointPolicyDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiRegionAccessPointPolicyDetailsParameters) DeepCopyInto(out *MultiRegionAccessPointPolicyDetailsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiRegionAccessPointPolicyDetailsParameters. +func (in *MultiRegionAccessPointPolicyDetailsParameters) DeepCopy() *MultiRegionAccessPointPolicyDetailsParameters { + if in == nil { + return nil + } + out := new(MultiRegionAccessPointPolicyDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiRegionAccessPointPolicyInitParameters) DeepCopyInto(out *MultiRegionAccessPointPolicyInitParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = new(MultiRegionAccessPointPolicyDetailsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiRegionAccessPointPolicyInitParameters. +func (in *MultiRegionAccessPointPolicyInitParameters) DeepCopy() *MultiRegionAccessPointPolicyInitParameters { + if in == nil { + return nil + } + out := new(MultiRegionAccessPointPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiRegionAccessPointPolicyList) DeepCopyInto(out *MultiRegionAccessPointPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MultiRegionAccessPointPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiRegionAccessPointPolicyList. +func (in *MultiRegionAccessPointPolicyList) DeepCopy() *MultiRegionAccessPointPolicyList { + if in == nil { + return nil + } + out := new(MultiRegionAccessPointPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MultiRegionAccessPointPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiRegionAccessPointPolicyObservation) DeepCopyInto(out *MultiRegionAccessPointPolicyObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = new(MultiRegionAccessPointPolicyDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.Established != nil { + in, out := &in.Established, &out.Established + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Proposed != nil { + in, out := &in.Proposed, &out.Proposed + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiRegionAccessPointPolicyObservation. +func (in *MultiRegionAccessPointPolicyObservation) DeepCopy() *MultiRegionAccessPointPolicyObservation { + if in == nil { + return nil + } + out := new(MultiRegionAccessPointPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiRegionAccessPointPolicyParameters) DeepCopyInto(out *MultiRegionAccessPointPolicyParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = new(MultiRegionAccessPointPolicyDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiRegionAccessPointPolicyParameters. +func (in *MultiRegionAccessPointPolicyParameters) DeepCopy() *MultiRegionAccessPointPolicyParameters { + if in == nil { + return nil + } + out := new(MultiRegionAccessPointPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiRegionAccessPointPolicySpec) DeepCopyInto(out *MultiRegionAccessPointPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiRegionAccessPointPolicySpec. +func (in *MultiRegionAccessPointPolicySpec) DeepCopy() *MultiRegionAccessPointPolicySpec { + if in == nil { + return nil + } + out := new(MultiRegionAccessPointPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiRegionAccessPointPolicyStatus) DeepCopyInto(out *MultiRegionAccessPointPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiRegionAccessPointPolicyStatus. +func (in *MultiRegionAccessPointPolicyStatus) DeepCopy() *MultiRegionAccessPointPolicyStatus { + if in == nil { + return nil + } + out := new(MultiRegionAccessPointPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiRegionAccessPointSpec) DeepCopyInto(out *MultiRegionAccessPointSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiRegionAccessPointSpec. +func (in *MultiRegionAccessPointSpec) DeepCopy() *MultiRegionAccessPointSpec { + if in == nil { + return nil + } + out := new(MultiRegionAccessPointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiRegionAccessPointStatus) DeepCopyInto(out *MultiRegionAccessPointStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiRegionAccessPointStatus. +func (in *MultiRegionAccessPointStatus) DeepCopy() *MultiRegionAccessPointStatus { + if in == nil { + return nil + } + out := new(MultiRegionAccessPointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectLambdaAccessPoint) DeepCopyInto(out *ObjectLambdaAccessPoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectLambdaAccessPoint. +func (in *ObjectLambdaAccessPoint) DeepCopy() *ObjectLambdaAccessPoint { + if in == nil { + return nil + } + out := new(ObjectLambdaAccessPoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObjectLambdaAccessPoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectLambdaAccessPointInitParameters) DeepCopyInto(out *ObjectLambdaAccessPointInitParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectLambdaAccessPointInitParameters. +func (in *ObjectLambdaAccessPointInitParameters) DeepCopy() *ObjectLambdaAccessPointInitParameters { + if in == nil { + return nil + } + out := new(ObjectLambdaAccessPointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectLambdaAccessPointList) DeepCopyInto(out *ObjectLambdaAccessPointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ObjectLambdaAccessPoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectLambdaAccessPointList. +func (in *ObjectLambdaAccessPointList) DeepCopy() *ObjectLambdaAccessPointList { + if in == nil { + return nil + } + out := new(ObjectLambdaAccessPointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObjectLambdaAccessPointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectLambdaAccessPointObservation) DeepCopyInto(out *ObjectLambdaAccessPointObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectLambdaAccessPointObservation. +func (in *ObjectLambdaAccessPointObservation) DeepCopy() *ObjectLambdaAccessPointObservation { + if in == nil { + return nil + } + out := new(ObjectLambdaAccessPointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectLambdaAccessPointParameters) DeepCopyInto(out *ObjectLambdaAccessPointParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectLambdaAccessPointParameters. +func (in *ObjectLambdaAccessPointParameters) DeepCopy() *ObjectLambdaAccessPointParameters { + if in == nil { + return nil + } + out := new(ObjectLambdaAccessPointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectLambdaAccessPointSpec) DeepCopyInto(out *ObjectLambdaAccessPointSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectLambdaAccessPointSpec. +func (in *ObjectLambdaAccessPointSpec) DeepCopy() *ObjectLambdaAccessPointSpec { + if in == nil { + return nil + } + out := new(ObjectLambdaAccessPointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectLambdaAccessPointStatus) DeepCopyInto(out *ObjectLambdaAccessPointStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectLambdaAccessPointStatus. +func (in *ObjectLambdaAccessPointStatus) DeepCopy() *ObjectLambdaAccessPointStatus { + if in == nil { + return nil + } + out := new(ObjectLambdaAccessPointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixLevelInitParameters) DeepCopyInto(out *PrefixLevelInitParameters) { + *out = *in + if in.StorageMetrics != nil { + in, out := &in.StorageMetrics, &out.StorageMetrics + *out = new(StorageMetricsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixLevelInitParameters. +func (in *PrefixLevelInitParameters) DeepCopy() *PrefixLevelInitParameters { + if in == nil { + return nil + } + out := new(PrefixLevelInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixLevelObservation) DeepCopyInto(out *PrefixLevelObservation) { + *out = *in + if in.StorageMetrics != nil { + in, out := &in.StorageMetrics, &out.StorageMetrics + *out = new(StorageMetricsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixLevelObservation. +func (in *PrefixLevelObservation) DeepCopy() *PrefixLevelObservation { + if in == nil { + return nil + } + out := new(PrefixLevelObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixLevelParameters) DeepCopyInto(out *PrefixLevelParameters) { + *out = *in + if in.StorageMetrics != nil { + in, out := &in.StorageMetrics, &out.StorageMetrics + *out = new(StorageMetricsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixLevelParameters. +func (in *PrefixLevelParameters) DeepCopy() *PrefixLevelParameters { + if in == nil { + return nil + } + out := new(PrefixLevelParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicAccessBlockConfigurationInitParameters) DeepCopyInto(out *PublicAccessBlockConfigurationInitParameters) { + *out = *in + if in.BlockPublicAcls != nil { + in, out := &in.BlockPublicAcls, &out.BlockPublicAcls + *out = new(bool) + **out = **in + } + if in.BlockPublicPolicy != nil { + in, out := &in.BlockPublicPolicy, &out.BlockPublicPolicy + *out = new(bool) + **out = **in + } + if in.IgnorePublicAcls != nil { + in, out := &in.IgnorePublicAcls, &out.IgnorePublicAcls + *out = new(bool) + **out = **in + } + if in.RestrictPublicBuckets != nil { + in, out := &in.RestrictPublicBuckets, &out.RestrictPublicBuckets + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicAccessBlockConfigurationInitParameters. +func (in *PublicAccessBlockConfigurationInitParameters) DeepCopy() *PublicAccessBlockConfigurationInitParameters { + if in == nil { + return nil + } + out := new(PublicAccessBlockConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicAccessBlockConfigurationObservation) DeepCopyInto(out *PublicAccessBlockConfigurationObservation) { + *out = *in + if in.BlockPublicAcls != nil { + in, out := &in.BlockPublicAcls, &out.BlockPublicAcls + *out = new(bool) + **out = **in + } + if in.BlockPublicPolicy != nil { + in, out := &in.BlockPublicPolicy, &out.BlockPublicPolicy + *out = new(bool) + **out = **in + } + if in.IgnorePublicAcls != nil { + in, out := &in.IgnorePublicAcls, &out.IgnorePublicAcls + *out = new(bool) + **out = **in + } + if in.RestrictPublicBuckets != nil { + in, out := &in.RestrictPublicBuckets, &out.RestrictPublicBuckets + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicAccessBlockConfigurationObservation. +func (in *PublicAccessBlockConfigurationObservation) DeepCopy() *PublicAccessBlockConfigurationObservation { + if in == nil { + return nil + } + out := new(PublicAccessBlockConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicAccessBlockConfigurationParameters) DeepCopyInto(out *PublicAccessBlockConfigurationParameters) { + *out = *in + if in.BlockPublicAcls != nil { + in, out := &in.BlockPublicAcls, &out.BlockPublicAcls + *out = new(bool) + **out = **in + } + if in.BlockPublicPolicy != nil { + in, out := &in.BlockPublicPolicy, &out.BlockPublicPolicy + *out = new(bool) + **out = **in + } + if in.IgnorePublicAcls != nil { + in, out := &in.IgnorePublicAcls, &out.IgnorePublicAcls + *out = new(bool) + **out = **in + } + if in.RestrictPublicBuckets != nil { + in, out := &in.RestrictPublicBuckets, &out.RestrictPublicBuckets + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicAccessBlockConfigurationParameters. +func (in *PublicAccessBlockConfigurationParameters) DeepCopy() *PublicAccessBlockConfigurationParameters { + if in == nil { + return nil + } + out := new(PublicAccessBlockConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicAccessBlockInitParameters) DeepCopyInto(out *PublicAccessBlockInitParameters) { + *out = *in + if in.BlockPublicAcls != nil { + in, out := &in.BlockPublicAcls, &out.BlockPublicAcls + *out = new(bool) + **out = **in + } + if in.BlockPublicPolicy != nil { + in, out := &in.BlockPublicPolicy, &out.BlockPublicPolicy + *out = new(bool) + **out = **in + } + if in.IgnorePublicAcls != nil { + in, out := &in.IgnorePublicAcls, &out.IgnorePublicAcls + *out = new(bool) + **out = **in + } + if in.RestrictPublicBuckets != nil { + in, out := &in.RestrictPublicBuckets, &out.RestrictPublicBuckets + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicAccessBlockInitParameters. +func (in *PublicAccessBlockInitParameters) DeepCopy() *PublicAccessBlockInitParameters { + if in == nil { + return nil + } + out := new(PublicAccessBlockInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicAccessBlockObservation) DeepCopyInto(out *PublicAccessBlockObservation) { + *out = *in + if in.BlockPublicAcls != nil { + in, out := &in.BlockPublicAcls, &out.BlockPublicAcls + *out = new(bool) + **out = **in + } + if in.BlockPublicPolicy != nil { + in, out := &in.BlockPublicPolicy, &out.BlockPublicPolicy + *out = new(bool) + **out = **in + } + if in.IgnorePublicAcls != nil { + in, out := &in.IgnorePublicAcls, &out.IgnorePublicAcls + *out = new(bool) + **out = **in + } + if in.RestrictPublicBuckets != nil { + in, out := &in.RestrictPublicBuckets, &out.RestrictPublicBuckets + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicAccessBlockObservation. +func (in *PublicAccessBlockObservation) DeepCopy() *PublicAccessBlockObservation { + if in == nil { + return nil + } + out := new(PublicAccessBlockObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicAccessBlockParameters) DeepCopyInto(out *PublicAccessBlockParameters) { + *out = *in + if in.BlockPublicAcls != nil { + in, out := &in.BlockPublicAcls, &out.BlockPublicAcls + *out = new(bool) + **out = **in + } + if in.BlockPublicPolicy != nil { + in, out := &in.BlockPublicPolicy, &out.BlockPublicPolicy + *out = new(bool) + **out = **in + } + if in.IgnorePublicAcls != nil { + in, out := &in.IgnorePublicAcls, &out.IgnorePublicAcls + *out = new(bool) + **out = **in + } + if in.RestrictPublicBuckets != nil { + in, out := &in.RestrictPublicBuckets, &out.RestrictPublicBuckets + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicAccessBlockParameters. +func (in *PublicAccessBlockParameters) DeepCopy() *PublicAccessBlockParameters { + if in == nil { + return nil + } + out := new(PublicAccessBlockParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionInitParameters) DeepCopyInto(out *RegionInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketAccountID != nil { + in, out := &in.BucketAccountID, &out.BucketAccountID + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionInitParameters. +func (in *RegionInitParameters) DeepCopy() *RegionInitParameters { + if in == nil { + return nil + } + out := new(RegionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionObservation) DeepCopyInto(out *RegionObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketAccountID != nil { + in, out := &in.BucketAccountID, &out.BucketAccountID + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionObservation. +func (in *RegionObservation) DeepCopy() *RegionObservation { + if in == nil { + return nil + } + out := new(RegionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegionParameters) DeepCopyInto(out *RegionParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketAccountID != nil { + in, out := &in.BucketAccountID, &out.BucketAccountID + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionParameters. +func (in *RegionParameters) DeepCopy() *RegionParameters { + if in == nil { + return nil + } + out := new(RegionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3BucketDestinationInitParameters) DeepCopyInto(out *S3BucketDestinationInitParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.OutputSchemaVersion != nil { + in, out := &in.OutputSchemaVersion, &out.OutputSchemaVersion + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BucketDestinationInitParameters. +func (in *S3BucketDestinationInitParameters) DeepCopy() *S3BucketDestinationInitParameters { + if in == nil { + return nil + } + out := new(S3BucketDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3BucketDestinationObservation) DeepCopyInto(out *S3BucketDestinationObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.OutputSchemaVersion != nil { + in, out := &in.OutputSchemaVersion, &out.OutputSchemaVersion + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BucketDestinationObservation. +func (in *S3BucketDestinationObservation) DeepCopy() *S3BucketDestinationObservation { + if in == nil { + return nil + } + out := new(S3BucketDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3BucketDestinationParameters) DeepCopyInto(out *S3BucketDestinationParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.OutputSchemaVersion != nil { + in, out := &in.OutputSchemaVersion, &out.OutputSchemaVersion + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BucketDestinationParameters. +func (in *S3BucketDestinationParameters) DeepCopy() *S3BucketDestinationParameters { + if in == nil { + return nil + } + out := new(S3BucketDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectionCriteriaInitParameters) DeepCopyInto(out *SelectionCriteriaInitParameters) { + *out = *in + if in.Delimiter != nil { + in, out := &in.Delimiter, &out.Delimiter + *out = new(string) + **out = **in + } + if in.MaxDepth != nil { + in, out := &in.MaxDepth, &out.MaxDepth + *out = new(float64) + **out = **in + } + if in.MinStorageBytesPercentage != nil { + in, out := &in.MinStorageBytesPercentage, &out.MinStorageBytesPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectionCriteriaInitParameters. +func (in *SelectionCriteriaInitParameters) DeepCopy() *SelectionCriteriaInitParameters { + if in == nil { + return nil + } + out := new(SelectionCriteriaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectionCriteriaObservation) DeepCopyInto(out *SelectionCriteriaObservation) { + *out = *in + if in.Delimiter != nil { + in, out := &in.Delimiter, &out.Delimiter + *out = new(string) + **out = **in + } + if in.MaxDepth != nil { + in, out := &in.MaxDepth, &out.MaxDepth + *out = new(float64) + **out = **in + } + if in.MinStorageBytesPercentage != nil { + in, out := &in.MinStorageBytesPercentage, &out.MinStorageBytesPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectionCriteriaObservation. +func (in *SelectionCriteriaObservation) DeepCopy() *SelectionCriteriaObservation { + if in == nil { + return nil + } + out := new(SelectionCriteriaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectionCriteriaParameters) DeepCopyInto(out *SelectionCriteriaParameters) { + *out = *in + if in.Delimiter != nil { + in, out := &in.Delimiter, &out.Delimiter + *out = new(string) + **out = **in + } + if in.MaxDepth != nil { + in, out := &in.MaxDepth, &out.MaxDepth + *out = new(float64) + **out = **in + } + if in.MinStorageBytesPercentage != nil { + in, out := &in.MinStorageBytesPercentage, &out.MinStorageBytesPercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectionCriteriaParameters. +func (in *SelectionCriteriaParameters) DeepCopy() *SelectionCriteriaParameters { + if in == nil { + return nil + } + out := new(SelectionCriteriaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SseKMSInitParameters) DeepCopyInto(out *SseKMSInitParameters) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SseKMSInitParameters. +func (in *SseKMSInitParameters) DeepCopy() *SseKMSInitParameters { + if in == nil { + return nil + } + out := new(SseKMSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SseKMSObservation) DeepCopyInto(out *SseKMSObservation) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SseKMSObservation. +func (in *SseKMSObservation) DeepCopy() *SseKMSObservation { + if in == nil { + return nil + } + out := new(SseKMSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SseKMSParameters) DeepCopyInto(out *SseKMSParameters) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SseKMSParameters. +func (in *SseKMSParameters) DeepCopy() *SseKMSParameters { + if in == nil { + return nil + } + out := new(SseKMSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SseS3InitParameters) DeepCopyInto(out *SseS3InitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SseS3InitParameters. +func (in *SseS3InitParameters) DeepCopy() *SseS3InitParameters { + if in == nil { + return nil + } + out := new(SseS3InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SseS3Observation) DeepCopyInto(out *SseS3Observation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SseS3Observation. +func (in *SseS3Observation) DeepCopy() *SseS3Observation { + if in == nil { + return nil + } + out := new(SseS3Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SseS3Parameters) DeepCopyInto(out *SseS3Parameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SseS3Parameters. +func (in *SseS3Parameters) DeepCopy() *SseS3Parameters { + if in == nil { + return nil + } + out := new(SseS3Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageLensConfiguration) DeepCopyInto(out *StorageLensConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageLensConfiguration. +func (in *StorageLensConfiguration) DeepCopy() *StorageLensConfiguration { + if in == nil { + return nil + } + out := new(StorageLensConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageLensConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageLensConfigurationInitParameters) DeepCopyInto(out *StorageLensConfigurationInitParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.ConfigID != nil { + in, out := &in.ConfigID, &out.ConfigID + *out = new(string) + **out = **in + } + if in.StorageLensConfiguration != nil { + in, out := &in.StorageLensConfiguration, &out.StorageLensConfiguration + *out = new(StorageLensConfigurationStorageLensConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageLensConfigurationInitParameters. +func (in *StorageLensConfigurationInitParameters) DeepCopy() *StorageLensConfigurationInitParameters { + if in == nil { + return nil + } + out := new(StorageLensConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageLensConfigurationList) DeepCopyInto(out *StorageLensConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageLensConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageLensConfigurationList. +func (in *StorageLensConfigurationList) DeepCopy() *StorageLensConfigurationList { + if in == nil { + return nil + } + out := new(StorageLensConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageLensConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageLensConfigurationObservation) DeepCopyInto(out *StorageLensConfigurationObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ConfigID != nil { + in, out := &in.ConfigID, &out.ConfigID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.StorageLensConfiguration != nil { + in, out := &in.StorageLensConfiguration, &out.StorageLensConfiguration + *out = new(StorageLensConfigurationStorageLensConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageLensConfigurationObservation. +func (in *StorageLensConfigurationObservation) DeepCopy() *StorageLensConfigurationObservation { + if in == nil { + return nil + } + out := new(StorageLensConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageLensConfigurationParameters) DeepCopyInto(out *StorageLensConfigurationParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.ConfigID != nil { + in, out := &in.ConfigID, &out.ConfigID + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.StorageLensConfiguration != nil { + in, out := &in.StorageLensConfiguration, &out.StorageLensConfiguration + *out = new(StorageLensConfigurationStorageLensConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageLensConfigurationParameters. +func (in *StorageLensConfigurationParameters) DeepCopy() *StorageLensConfigurationParameters { + if in == nil { + return nil + } + out := new(StorageLensConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageLensConfigurationSpec) DeepCopyInto(out *StorageLensConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageLensConfigurationSpec. +func (in *StorageLensConfigurationSpec) DeepCopy() *StorageLensConfigurationSpec { + if in == nil { + return nil + } + out := new(StorageLensConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageLensConfigurationStatus) DeepCopyInto(out *StorageLensConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageLensConfigurationStatus. +func (in *StorageLensConfigurationStatus) DeepCopy() *StorageLensConfigurationStatus { + if in == nil { + return nil + } + out := new(StorageLensConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageLensConfigurationStorageLensConfigurationInitParameters) DeepCopyInto(out *StorageLensConfigurationStorageLensConfigurationInitParameters) { + *out = *in + if in.AccountLevel != nil { + in, out := &in.AccountLevel, &out.AccountLevel + *out = new(AccountLevelInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AwsOrg != nil { + in, out := &in.AwsOrg, &out.AwsOrg + *out = new(AwsOrgInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DataExport != nil { + in, out := &in.DataExport, &out.DataExport + *out = new(DataExportInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Exclude != nil { + in, out := &in.Exclude, &out.Exclude + *out = new(ExcludeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = new(IncludeInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageLensConfigurationStorageLensConfigurationInitParameters. +func (in *StorageLensConfigurationStorageLensConfigurationInitParameters) DeepCopy() *StorageLensConfigurationStorageLensConfigurationInitParameters { + if in == nil { + return nil + } + out := new(StorageLensConfigurationStorageLensConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageLensConfigurationStorageLensConfigurationObservation) DeepCopyInto(out *StorageLensConfigurationStorageLensConfigurationObservation) { + *out = *in + if in.AccountLevel != nil { + in, out := &in.AccountLevel, &out.AccountLevel + *out = new(AccountLevelObservation) + (*in).DeepCopyInto(*out) + } + if in.AwsOrg != nil { + in, out := &in.AwsOrg, &out.AwsOrg + *out = new(AwsOrgObservation) + (*in).DeepCopyInto(*out) + } + if in.DataExport != nil { + in, out := &in.DataExport, &out.DataExport + *out = new(DataExportObservation) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Exclude != nil { + in, out := &in.Exclude, &out.Exclude + *out = new(ExcludeObservation) + (*in).DeepCopyInto(*out) + } + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = new(IncludeObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageLensConfigurationStorageLensConfigurationObservation. +func (in *StorageLensConfigurationStorageLensConfigurationObservation) DeepCopy() *StorageLensConfigurationStorageLensConfigurationObservation { + if in == nil { + return nil + } + out := new(StorageLensConfigurationStorageLensConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageLensConfigurationStorageLensConfigurationParameters) DeepCopyInto(out *StorageLensConfigurationStorageLensConfigurationParameters) { + *out = *in + if in.AccountLevel != nil { + in, out := &in.AccountLevel, &out.AccountLevel + *out = new(AccountLevelParameters) + (*in).DeepCopyInto(*out) + } + if in.AwsOrg != nil { + in, out := &in.AwsOrg, &out.AwsOrg + *out = new(AwsOrgParameters) + (*in).DeepCopyInto(*out) + } + if in.DataExport != nil { + in, out := &in.DataExport, &out.DataExport + *out = new(DataExportParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Exclude != nil { + in, out := &in.Exclude, &out.Exclude + *out = new(ExcludeParameters) + (*in).DeepCopyInto(*out) + } + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = new(IncludeParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageLensConfigurationStorageLensConfigurationParameters. +func (in *StorageLensConfigurationStorageLensConfigurationParameters) DeepCopy() *StorageLensConfigurationStorageLensConfigurationParameters { + if in == nil { + return nil + } + out := new(StorageLensConfigurationStorageLensConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageMetricsInitParameters) DeepCopyInto(out *StorageMetricsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SelectionCriteria != nil { + in, out := &in.SelectionCriteria, &out.SelectionCriteria + *out = new(SelectionCriteriaInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageMetricsInitParameters. +func (in *StorageMetricsInitParameters) DeepCopy() *StorageMetricsInitParameters { + if in == nil { + return nil + } + out := new(StorageMetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageMetricsObservation) DeepCopyInto(out *StorageMetricsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SelectionCriteria != nil { + in, out := &in.SelectionCriteria, &out.SelectionCriteria + *out = new(SelectionCriteriaObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageMetricsObservation. +func (in *StorageMetricsObservation) DeepCopy() *StorageMetricsObservation { + if in == nil { + return nil + } + out := new(StorageMetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageMetricsParameters) DeepCopyInto(out *StorageMetricsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.SelectionCriteria != nil { + in, out := &in.SelectionCriteria, &out.SelectionCriteria + *out = new(SelectionCriteriaParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageMetricsParameters. +func (in *StorageMetricsParameters) DeepCopy() *StorageMetricsParameters { + if in == nil { + return nil + } + out := new(StorageMetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationConfigurationInitParameters) DeepCopyInto(out *TransformationConfigurationInitParameters) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContentTransformation != nil { + in, out := &in.ContentTransformation, &out.ContentTransformation + *out = new(ContentTransformationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationConfigurationInitParameters. +func (in *TransformationConfigurationInitParameters) DeepCopy() *TransformationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(TransformationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationConfigurationObservation) DeepCopyInto(out *TransformationConfigurationObservation) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContentTransformation != nil { + in, out := &in.ContentTransformation, &out.ContentTransformation + *out = new(ContentTransformationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationConfigurationObservation. +func (in *TransformationConfigurationObservation) DeepCopy() *TransformationConfigurationObservation { + if in == nil { + return nil + } + out := new(TransformationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationConfigurationParameters) DeepCopyInto(out *TransformationConfigurationParameters) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContentTransformation != nil { + in, out := &in.ContentTransformation, &out.ContentTransformation + *out = new(ContentTransformationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationConfigurationParameters. +func (in *TransformationConfigurationParameters) DeepCopy() *TransformationConfigurationParameters { + if in == nil { + return nil + } + out := new(TransformationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigurationInitParameters) DeepCopyInto(out *VPCConfigurationInitParameters) { + *out = *in + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigurationInitParameters. +func (in *VPCConfigurationInitParameters) DeepCopy() *VPCConfigurationInitParameters { + if in == nil { + return nil + } + out := new(VPCConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigurationObservation) DeepCopyInto(out *VPCConfigurationObservation) { + *out = *in + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigurationObservation. +func (in *VPCConfigurationObservation) DeepCopy() *VPCConfigurationObservation { + if in == nil { + return nil + } + out := new(VPCConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigurationParameters) DeepCopyInto(out *VPCConfigurationParameters) { + *out = *in + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigurationParameters. +func (in *VPCConfigurationParameters) DeepCopy() *VPCConfigurationParameters { + if in == nil { + return nil + } + out := new(VPCConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/s3control/v1beta2/zz_generated.managed.go b/apis/s3control/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..ca0c6274f7 --- /dev/null +++ b/apis/s3control/v1beta2/zz_generated.managed.go @@ -0,0 +1,308 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this AccessPoint. +func (mg *AccessPoint) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this AccessPoint. +func (mg *AccessPoint) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this AccessPoint. +func (mg *AccessPoint) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this AccessPoint. +func (mg *AccessPoint) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this AccessPoint. +func (mg *AccessPoint) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this AccessPoint. +func (mg *AccessPoint) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this AccessPoint. +func (mg *AccessPoint) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this AccessPoint. +func (mg *AccessPoint) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this AccessPoint. +func (mg *AccessPoint) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this AccessPoint. +func (mg *AccessPoint) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this AccessPoint. +func (mg *AccessPoint) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this AccessPoint. +func (mg *AccessPoint) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MultiRegionAccessPoint. +func (mg *MultiRegionAccessPoint) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MultiRegionAccessPoint. +func (mg *MultiRegionAccessPoint) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MultiRegionAccessPoint. +func (mg *MultiRegionAccessPoint) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MultiRegionAccessPoint. +func (mg *MultiRegionAccessPoint) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MultiRegionAccessPoint. +func (mg *MultiRegionAccessPoint) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MultiRegionAccessPoint. +func (mg *MultiRegionAccessPoint) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MultiRegionAccessPoint. +func (mg *MultiRegionAccessPoint) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MultiRegionAccessPoint. +func (mg *MultiRegionAccessPoint) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MultiRegionAccessPoint. +func (mg *MultiRegionAccessPoint) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MultiRegionAccessPoint. +func (mg *MultiRegionAccessPoint) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MultiRegionAccessPoint. +func (mg *MultiRegionAccessPoint) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MultiRegionAccessPoint. +func (mg *MultiRegionAccessPoint) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MultiRegionAccessPointPolicy. +func (mg *MultiRegionAccessPointPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MultiRegionAccessPointPolicy. +func (mg *MultiRegionAccessPointPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MultiRegionAccessPointPolicy. +func (mg *MultiRegionAccessPointPolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MultiRegionAccessPointPolicy. +func (mg *MultiRegionAccessPointPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MultiRegionAccessPointPolicy. +func (mg *MultiRegionAccessPointPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MultiRegionAccessPointPolicy. +func (mg *MultiRegionAccessPointPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MultiRegionAccessPointPolicy. +func (mg *MultiRegionAccessPointPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MultiRegionAccessPointPolicy. +func (mg *MultiRegionAccessPointPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MultiRegionAccessPointPolicy. +func (mg *MultiRegionAccessPointPolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MultiRegionAccessPointPolicy. +func (mg *MultiRegionAccessPointPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MultiRegionAccessPointPolicy. +func (mg *MultiRegionAccessPointPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MultiRegionAccessPointPolicy. +func (mg *MultiRegionAccessPointPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ObjectLambdaAccessPoint. +func (mg *ObjectLambdaAccessPoint) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ObjectLambdaAccessPoint. +func (mg *ObjectLambdaAccessPoint) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ObjectLambdaAccessPoint. +func (mg *ObjectLambdaAccessPoint) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ObjectLambdaAccessPoint. +func (mg *ObjectLambdaAccessPoint) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ObjectLambdaAccessPoint. +func (mg *ObjectLambdaAccessPoint) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ObjectLambdaAccessPoint. +func (mg *ObjectLambdaAccessPoint) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ObjectLambdaAccessPoint. +func (mg *ObjectLambdaAccessPoint) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ObjectLambdaAccessPoint. +func (mg *ObjectLambdaAccessPoint) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ObjectLambdaAccessPoint. +func (mg *ObjectLambdaAccessPoint) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ObjectLambdaAccessPoint. +func (mg *ObjectLambdaAccessPoint) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ObjectLambdaAccessPoint. +func (mg *ObjectLambdaAccessPoint) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ObjectLambdaAccessPoint. +func (mg *ObjectLambdaAccessPoint) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this StorageLensConfiguration. +func (mg *StorageLensConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this StorageLensConfiguration. +func (mg *StorageLensConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this StorageLensConfiguration. +func (mg *StorageLensConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this StorageLensConfiguration. +func (mg *StorageLensConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this StorageLensConfiguration. +func (mg *StorageLensConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this StorageLensConfiguration. +func (mg *StorageLensConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this StorageLensConfiguration. +func (mg *StorageLensConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this StorageLensConfiguration. +func (mg *StorageLensConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this StorageLensConfiguration. +func (mg *StorageLensConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this StorageLensConfiguration. +func (mg *StorageLensConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this StorageLensConfiguration. +func (mg *StorageLensConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this StorageLensConfiguration. +func (mg *StorageLensConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/s3control/v1beta2/zz_generated.managedlist.go b/apis/s3control/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..296a96a99b --- /dev/null +++ b/apis/s3control/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,53 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AccessPointList. +func (l *AccessPointList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MultiRegionAccessPointList. +func (l *MultiRegionAccessPointList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MultiRegionAccessPointPolicyList. +func (l *MultiRegionAccessPointPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ObjectLambdaAccessPointList. +func (l *ObjectLambdaAccessPointList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this StorageLensConfigurationList. +func (l *StorageLensConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/s3control/v1beta2/zz_generated.resolvers.go b/apis/s3control/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..a58389d201 --- /dev/null +++ b/apis/s3control/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,319 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *AccessPoint) ResolveReferences( // ResolveReferences of this AccessPoint. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.VPCConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCConfiguration.VPCID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VPCConfiguration.VPCIDRef, + Selector: mg.Spec.ForProvider.VPCConfiguration.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCConfiguration.VPCID") + } + mg.Spec.ForProvider.VPCConfiguration.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPCConfiguration.VPCIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.VPCConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCConfiguration.VPCID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VPCConfiguration.VPCIDRef, + Selector: mg.Spec.InitProvider.VPCConfiguration.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCConfiguration.VPCID") + } + mg.Spec.InitProvider.VPCConfiguration.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPCConfiguration.VPCIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this MultiRegionAccessPoint. +func (mg *MultiRegionAccessPoint) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.Details != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Details.Region); i4++ { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Details.Region[i4].Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Details.Region[i4].BucketRef, + Selector: mg.Spec.ForProvider.Details.Region[i4].BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Details.Region[i4].Bucket") + } + mg.Spec.ForProvider.Details.Region[i4].Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Details.Region[i4].BucketRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this ObjectLambdaAccessPoint. +func (mg *ObjectLambdaAccessPoint) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3control.aws.upbound.io", "v1beta2", "AccessPoint", "AccessPointList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Configuration.SupportingAccessPoint), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Configuration.SupportingAccessPointRef, + Selector: mg.Spec.ForProvider.Configuration.SupportingAccessPointSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Configuration.SupportingAccessPoint") + } + mg.Spec.ForProvider.Configuration.SupportingAccessPoint = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Configuration.SupportingAccessPointRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.Configuration != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Configuration.TransformationConfiguration); i4++ { + if mg.Spec.ForProvider.Configuration.TransformationConfiguration[i4].ContentTransformation != nil { + if mg.Spec.ForProvider.Configuration.TransformationConfiguration[i4].ContentTransformation.AwsLambda != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Configuration.TransformationConfiguration[i4].ContentTransformation.AwsLambda.FunctionArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Configuration.TransformationConfiguration[i4].ContentTransformation.AwsLambda.FunctionArnRef, + Selector: mg.Spec.ForProvider.Configuration.TransformationConfiguration[i4].ContentTransformation.AwsLambda.FunctionArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Configuration.TransformationConfiguration[i4].ContentTransformation.AwsLambda.FunctionArn") + } + mg.Spec.ForProvider.Configuration.TransformationConfiguration[i4].ContentTransformation.AwsLambda.FunctionArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Configuration.TransformationConfiguration[i4].ContentTransformation.AwsLambda.FunctionArnRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.InitProvider.Configuration != nil { + { + m, l, err = apisresolver.GetManagedResource("s3control.aws.upbound.io", "v1beta2", "AccessPoint", "AccessPointList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Configuration.SupportingAccessPoint), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Configuration.SupportingAccessPointRef, + Selector: mg.Spec.InitProvider.Configuration.SupportingAccessPointSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Configuration.SupportingAccessPoint") + } + mg.Spec.InitProvider.Configuration.SupportingAccessPoint = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Configuration.SupportingAccessPointRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Configuration != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Configuration.TransformationConfiguration); i4++ { + if mg.Spec.InitProvider.Configuration.TransformationConfiguration[i4].ContentTransformation != nil { + if mg.Spec.InitProvider.Configuration.TransformationConfiguration[i4].ContentTransformation.AwsLambda != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Configuration.TransformationConfiguration[i4].ContentTransformation.AwsLambda.FunctionArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Configuration.TransformationConfiguration[i4].ContentTransformation.AwsLambda.FunctionArnRef, + Selector: mg.Spec.InitProvider.Configuration.TransformationConfiguration[i4].ContentTransformation.AwsLambda.FunctionArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Configuration.TransformationConfiguration[i4].ContentTransformation.AwsLambda.FunctionArn") + } + mg.Spec.InitProvider.Configuration.TransformationConfiguration[i4].ContentTransformation.AwsLambda.FunctionArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Configuration.TransformationConfiguration[i4].ContentTransformation.AwsLambda.FunctionArnRef = rsp.ResolvedReference + + } + } + } + } + + return nil +} + +// ResolveReferences of this StorageLensConfiguration. +func (mg *StorageLensConfiguration) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.StorageLensConfiguration != nil { + if mg.Spec.ForProvider.StorageLensConfiguration.DataExport != nil { + if mg.Spec.ForProvider.StorageLensConfiguration.DataExport.S3BucketDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageLensConfiguration.DataExport.S3BucketDestination.Arn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.StorageLensConfiguration.DataExport.S3BucketDestination.ArnRef, + Selector: mg.Spec.ForProvider.StorageLensConfiguration.DataExport.S3BucketDestination.ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageLensConfiguration.DataExport.S3BucketDestination.Arn") + } + mg.Spec.ForProvider.StorageLensConfiguration.DataExport.S3BucketDestination.Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageLensConfiguration.DataExport.S3BucketDestination.ArnRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.InitProvider.StorageLensConfiguration != nil { + if mg.Spec.InitProvider.StorageLensConfiguration.DataExport != nil { + if mg.Spec.InitProvider.StorageLensConfiguration.DataExport.S3BucketDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageLensConfiguration.DataExport.S3BucketDestination.Arn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.StorageLensConfiguration.DataExport.S3BucketDestination.ArnRef, + Selector: mg.Spec.InitProvider.StorageLensConfiguration.DataExport.S3BucketDestination.ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageLensConfiguration.DataExport.S3BucketDestination.Arn") + } + mg.Spec.InitProvider.StorageLensConfiguration.DataExport.S3BucketDestination.Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageLensConfiguration.DataExport.S3BucketDestination.ArnRef = rsp.ResolvedReference + + } + } + } + + return nil +} diff --git a/apis/s3control/v1beta2/zz_groupversion_info.go b/apis/s3control/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..449ceeef1a --- /dev/null +++ b/apis/s3control/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=s3control.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "s3control.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/s3control/v1beta2/zz_multiregionaccesspoint_terraformed.go b/apis/s3control/v1beta2/zz_multiregionaccesspoint_terraformed.go new file mode 100755 index 0000000000..19d5634f5c --- /dev/null +++ b/apis/s3control/v1beta2/zz_multiregionaccesspoint_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MultiRegionAccessPoint +func (mg *MultiRegionAccessPoint) GetTerraformResourceType() string { + return "aws_s3control_multi_region_access_point" +} + +// GetConnectionDetailsMapping for this MultiRegionAccessPoint +func (tr *MultiRegionAccessPoint) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MultiRegionAccessPoint +func (tr *MultiRegionAccessPoint) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MultiRegionAccessPoint +func (tr *MultiRegionAccessPoint) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MultiRegionAccessPoint +func (tr *MultiRegionAccessPoint) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MultiRegionAccessPoint +func (tr *MultiRegionAccessPoint) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MultiRegionAccessPoint +func (tr *MultiRegionAccessPoint) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MultiRegionAccessPoint +func (tr *MultiRegionAccessPoint) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MultiRegionAccessPoint +func (tr *MultiRegionAccessPoint) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MultiRegionAccessPoint using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MultiRegionAccessPoint) LateInitialize(attrs []byte) (bool, error) { + params := &MultiRegionAccessPointParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MultiRegionAccessPoint) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3control/v1beta2/zz_multiregionaccesspoint_types.go b/apis/s3control/v1beta2/zz_multiregionaccesspoint_types.go new file mode 100755 index 0000000000..1904da24a8 --- /dev/null +++ b/apis/s3control/v1beta2/zz_multiregionaccesspoint_types.go @@ -0,0 +1,262 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DetailsInitParameters struct { + + // The name of the Multi-Region Access Point. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration block to manage the PublicAccessBlock configuration that you want to apply to this Multi-Region Access Point. You can enable the configuration options in any combination. See Public Access Block Configuration below for more details. + PublicAccessBlock *PublicAccessBlockInitParameters `json:"publicAccessBlock,omitempty" tf:"public_access_block,omitempty"` +} + +type DetailsObservation struct { + + // The name of the Multi-Region Access Point. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration block to manage the PublicAccessBlock configuration that you want to apply to this Multi-Region Access Point. You can enable the configuration options in any combination. See Public Access Block Configuration below for more details. + PublicAccessBlock *PublicAccessBlockObservation `json:"publicAccessBlock,omitempty" tf:"public_access_block,omitempty"` + + // The Region configuration block to specify the bucket associated with the Multi-Region Access Point. See Region Configuration below for more details. + Region []RegionObservation `json:"region,omitempty" tf:"region,omitempty"` +} + +type DetailsParameters struct { + + // The name of the Multi-Region Access Point. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Configuration block to manage the PublicAccessBlock configuration that you want to apply to this Multi-Region Access Point. You can enable the configuration options in any combination. See Public Access Block Configuration below for more details. + // +kubebuilder:validation:Optional + PublicAccessBlock *PublicAccessBlockParameters `json:"publicAccessBlock,omitempty" tf:"public_access_block,omitempty"` + + // The Region configuration block to specify the bucket associated with the Multi-Region Access Point. See Region Configuration below for more details. + // +kubebuilder:validation:Required + Region []RegionParameters `json:"region" tf:"region,omitempty"` +} + +type MultiRegionAccessPointInitParameters struct { + + // The AWS account ID for the owner of the buckets for which you want to create a Multi-Region Access Point. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // A configuration block containing details about the Multi-Region Access Point. See Details Configuration Block below for more details + Details *DetailsInitParameters `json:"details,omitempty" tf:"details,omitempty"` +} + +type MultiRegionAccessPointObservation struct { + + // The AWS account ID for the owner of the buckets for which you want to create a Multi-Region Access Point. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // The alias for the Multi-Region Access Point. + Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` + + // Amazon Resource Name (ARN) of the Multi-Region Access Point. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A configuration block containing details about the Multi-Region Access Point. See Details Configuration Block below for more details + Details *DetailsObservation `json:"details,omitempty" tf:"details,omitempty"` + + // The DNS domain name of the S3 Multi-Region Access Point in the format alias.accesspoint.s3-global.amazonaws.com. For more information, see the documentation on Multi-Region Access Point Requests. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The AWS account ID and access point name separated by a colon (:). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The current status of the Multi-Region Access Point. One of: READY, INCONSISTENT_ACROSS_REGIONS, CREATING, PARTIALLY_CREATED, PARTIALLY_DELETED, DELETING. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type MultiRegionAccessPointParameters struct { + + // The AWS account ID for the owner of the buckets for which you want to create a Multi-Region Access Point. + // +kubebuilder:validation:Optional + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // A configuration block containing details about the Multi-Region Access Point. See Details Configuration Block below for more details + // +kubebuilder:validation:Optional + Details *DetailsParameters `json:"details,omitempty" tf:"details,omitempty"` + + // The Region configuration block to specify the bucket associated with the Multi-Region Access Point. See Region Configuration below for more details. + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type PublicAccessBlockInitParameters struct { + + // Whether Amazon S3 should block public ACLs for buckets in this account. Defaults to true. Enabling this setting does not affect existing policies or ACLs. When set to true causes the following behavior: + BlockPublicAcls *bool `json:"blockPublicAcls,omitempty" tf:"block_public_acls,omitempty"` + + // Whether Amazon S3 should block public bucket policies for buckets in this account. Defaults to true. Enabling this setting does not affect existing bucket policies. When set to true causes Amazon S3 to: + BlockPublicPolicy *bool `json:"blockPublicPolicy,omitempty" tf:"block_public_policy,omitempty"` + + // Whether Amazon S3 should ignore public ACLs for buckets in this account. Defaults to true. Enabling this setting does not affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set. When set to true causes Amazon S3 to: + IgnorePublicAcls *bool `json:"ignorePublicAcls,omitempty" tf:"ignore_public_acls,omitempty"` + + // Whether Amazon S3 should restrict public bucket policies for buckets in this account. Defaults to true. Enabling this setting does not affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked. When set to true: + RestrictPublicBuckets *bool `json:"restrictPublicBuckets,omitempty" tf:"restrict_public_buckets,omitempty"` +} + +type PublicAccessBlockObservation struct { + + // Whether Amazon S3 should block public ACLs for buckets in this account. Defaults to true. Enabling this setting does not affect existing policies or ACLs. When set to true causes the following behavior: + BlockPublicAcls *bool `json:"blockPublicAcls,omitempty" tf:"block_public_acls,omitempty"` + + // Whether Amazon S3 should block public bucket policies for buckets in this account. Defaults to true. Enabling this setting does not affect existing bucket policies. When set to true causes Amazon S3 to: + BlockPublicPolicy *bool `json:"blockPublicPolicy,omitempty" tf:"block_public_policy,omitempty"` + + // Whether Amazon S3 should ignore public ACLs for buckets in this account. Defaults to true. Enabling this setting does not affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set. When set to true causes Amazon S3 to: + IgnorePublicAcls *bool `json:"ignorePublicAcls,omitempty" tf:"ignore_public_acls,omitempty"` + + // Whether Amazon S3 should restrict public bucket policies for buckets in this account. Defaults to true. Enabling this setting does not affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked. When set to true: + RestrictPublicBuckets *bool `json:"restrictPublicBuckets,omitempty" tf:"restrict_public_buckets,omitempty"` +} + +type PublicAccessBlockParameters struct { + + // Whether Amazon S3 should block public ACLs for buckets in this account. Defaults to true. Enabling this setting does not affect existing policies or ACLs. When set to true causes the following behavior: + // +kubebuilder:validation:Optional + BlockPublicAcls *bool `json:"blockPublicAcls,omitempty" tf:"block_public_acls,omitempty"` + + // Whether Amazon S3 should block public bucket policies for buckets in this account. Defaults to true. Enabling this setting does not affect existing bucket policies. When set to true causes Amazon S3 to: + // +kubebuilder:validation:Optional + BlockPublicPolicy *bool `json:"blockPublicPolicy,omitempty" tf:"block_public_policy,omitempty"` + + // Whether Amazon S3 should ignore public ACLs for buckets in this account. Defaults to true. Enabling this setting does not affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set. When set to true causes Amazon S3 to: + // +kubebuilder:validation:Optional + IgnorePublicAcls *bool `json:"ignorePublicAcls,omitempty" tf:"ignore_public_acls,omitempty"` + + // Whether Amazon S3 should restrict public bucket policies for buckets in this account. Defaults to true. Enabling this setting does not affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked. When set to true: + // +kubebuilder:validation:Optional + RestrictPublicBuckets *bool `json:"restrictPublicBuckets,omitempty" tf:"restrict_public_buckets,omitempty"` +} + +type RegionInitParameters struct { + + // The name of the associated bucket for the Region. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The AWS account ID that owns the Amazon S3 bucket that's associated with this Multi-Region Access Point. + BucketAccountID *string `json:"bucketAccountId,omitempty" tf:"bucket_account_id,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` +} + +type RegionObservation struct { + + // The name of the associated bucket for the Region. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The AWS account ID that owns the Amazon S3 bucket that's associated with this Multi-Region Access Point. + BucketAccountID *string `json:"bucketAccountId,omitempty" tf:"bucket_account_id,omitempty"` + + // The Region configuration block to specify the bucket associated with the Multi-Region Access Point. See Region Configuration below for more details. + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +type RegionParameters struct { + + // The name of the associated bucket for the Region. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The AWS account ID that owns the Amazon S3 bucket that's associated with this Multi-Region Access Point. + // +kubebuilder:validation:Optional + BucketAccountID *string `json:"bucketAccountId,omitempty" tf:"bucket_account_id,omitempty"` + + // Reference to a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` +} + +// MultiRegionAccessPointSpec defines the desired state of MultiRegionAccessPoint +type MultiRegionAccessPointSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MultiRegionAccessPointParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MultiRegionAccessPointInitParameters `json:"initProvider,omitempty"` +} + +// MultiRegionAccessPointStatus defines the observed state of MultiRegionAccessPoint. +type MultiRegionAccessPointStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MultiRegionAccessPointObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MultiRegionAccessPoint is the Schema for the MultiRegionAccessPoints API. Provides a resource to manage an S3 Multi-Region Access Point associated with specified buckets. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type MultiRegionAccessPoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.details) || (has(self.initProvider) && has(self.initProvider.details))",message="spec.forProvider.details is a required parameter" + Spec MultiRegionAccessPointSpec `json:"spec"` + Status MultiRegionAccessPointStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MultiRegionAccessPointList contains a list of MultiRegionAccessPoints +type MultiRegionAccessPointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MultiRegionAccessPoint `json:"items"` +} + +// Repository type metadata. +var ( + MultiRegionAccessPoint_Kind = "MultiRegionAccessPoint" + MultiRegionAccessPoint_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MultiRegionAccessPoint_Kind}.String() + MultiRegionAccessPoint_KindAPIVersion = MultiRegionAccessPoint_Kind + "." + CRDGroupVersion.String() + MultiRegionAccessPoint_GroupVersionKind = CRDGroupVersion.WithKind(MultiRegionAccessPoint_Kind) +) + +func init() { + SchemeBuilder.Register(&MultiRegionAccessPoint{}, &MultiRegionAccessPointList{}) +} diff --git a/apis/s3control/v1beta2/zz_multiregionaccesspointpolicy_terraformed.go b/apis/s3control/v1beta2/zz_multiregionaccesspointpolicy_terraformed.go new file mode 100755 index 0000000000..6027f32cf2 --- /dev/null +++ b/apis/s3control/v1beta2/zz_multiregionaccesspointpolicy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MultiRegionAccessPointPolicy +func (mg *MultiRegionAccessPointPolicy) GetTerraformResourceType() string { + return "aws_s3control_multi_region_access_point_policy" +} + +// GetConnectionDetailsMapping for this MultiRegionAccessPointPolicy +func (tr *MultiRegionAccessPointPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MultiRegionAccessPointPolicy +func (tr *MultiRegionAccessPointPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MultiRegionAccessPointPolicy +func (tr *MultiRegionAccessPointPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MultiRegionAccessPointPolicy +func (tr *MultiRegionAccessPointPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MultiRegionAccessPointPolicy +func (tr *MultiRegionAccessPointPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MultiRegionAccessPointPolicy +func (tr *MultiRegionAccessPointPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MultiRegionAccessPointPolicy +func (tr *MultiRegionAccessPointPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MultiRegionAccessPointPolicy +func (tr *MultiRegionAccessPointPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MultiRegionAccessPointPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MultiRegionAccessPointPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &MultiRegionAccessPointPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MultiRegionAccessPointPolicy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3control/v1beta2/zz_multiregionaccesspointpolicy_types.go b/apis/s3control/v1beta2/zz_multiregionaccesspointpolicy_types.go new file mode 100755 index 0000000000..146ab3595d --- /dev/null +++ b/apis/s3control/v1beta2/zz_multiregionaccesspointpolicy_types.go @@ -0,0 +1,147 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MultiRegionAccessPointPolicyDetailsInitParameters struct { + + // The name of the Multi-Region Access Point. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A valid JSON document that specifies the policy that you want to associate with this Multi-Region Access Point. Once applied, the policy can be edited, but not deleted. For more information, see the documentation on Multi-Region Access Point Permissions. + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` +} + +type MultiRegionAccessPointPolicyDetailsObservation struct { + + // The name of the Multi-Region Access Point. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A valid JSON document that specifies the policy that you want to associate with this Multi-Region Access Point. Once applied, the policy can be edited, but not deleted. For more information, see the documentation on Multi-Region Access Point Permissions. + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` +} + +type MultiRegionAccessPointPolicyDetailsParameters struct { + + // The name of the Multi-Region Access Point. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A valid JSON document that specifies the policy that you want to associate with this Multi-Region Access Point. Once applied, the policy can be edited, but not deleted. For more information, see the documentation on Multi-Region Access Point Permissions. + // +kubebuilder:validation:Optional + Policy *string `json:"policy" tf:"policy,omitempty"` +} + +type MultiRegionAccessPointPolicyInitParameters struct { + + // The AWS account ID for the owner of the Multi-Region Access Point. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // A configuration block containing details about the policy for the Multi-Region Access Point. See Details Configuration Block below for more details + Details *MultiRegionAccessPointPolicyDetailsInitParameters `json:"details,omitempty" tf:"details,omitempty"` +} + +type MultiRegionAccessPointPolicyObservation struct { + + // The AWS account ID for the owner of the Multi-Region Access Point. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // A configuration block containing details about the policy for the Multi-Region Access Point. See Details Configuration Block below for more details + Details *MultiRegionAccessPointPolicyDetailsObservation `json:"details,omitempty" tf:"details,omitempty"` + + // The last established policy for the Multi-Region Access Point. + Established *string `json:"established,omitempty" tf:"established,omitempty"` + + // The AWS account ID and access point name separated by a colon (:). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The proposed policy for the Multi-Region Access Point. + Proposed *string `json:"proposed,omitempty" tf:"proposed,omitempty"` +} + +type MultiRegionAccessPointPolicyParameters struct { + + // The AWS account ID for the owner of the Multi-Region Access Point. + // +kubebuilder:validation:Optional + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // A configuration block containing details about the policy for the Multi-Region Access Point. See Details Configuration Block below for more details + // +kubebuilder:validation:Optional + Details *MultiRegionAccessPointPolicyDetailsParameters `json:"details,omitempty" tf:"details,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +// MultiRegionAccessPointPolicySpec defines the desired state of MultiRegionAccessPointPolicy +type MultiRegionAccessPointPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MultiRegionAccessPointPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MultiRegionAccessPointPolicyInitParameters `json:"initProvider,omitempty"` +} + +// MultiRegionAccessPointPolicyStatus defines the observed state of MultiRegionAccessPointPolicy. +type MultiRegionAccessPointPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MultiRegionAccessPointPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MultiRegionAccessPointPolicy is the Schema for the MultiRegionAccessPointPolicys API. Provides a resource to manage an S3 Multi-Region Access Point access control policy. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type MultiRegionAccessPointPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.details) || (has(self.initProvider) && has(self.initProvider.details))",message="spec.forProvider.details is a required parameter" + Spec MultiRegionAccessPointPolicySpec `json:"spec"` + Status MultiRegionAccessPointPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MultiRegionAccessPointPolicyList contains a list of MultiRegionAccessPointPolicys +type MultiRegionAccessPointPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MultiRegionAccessPointPolicy `json:"items"` +} + +// Repository type metadata. +var ( + MultiRegionAccessPointPolicy_Kind = "MultiRegionAccessPointPolicy" + MultiRegionAccessPointPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MultiRegionAccessPointPolicy_Kind}.String() + MultiRegionAccessPointPolicy_KindAPIVersion = MultiRegionAccessPointPolicy_Kind + "." + CRDGroupVersion.String() + MultiRegionAccessPointPolicy_GroupVersionKind = CRDGroupVersion.WithKind(MultiRegionAccessPointPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&MultiRegionAccessPointPolicy{}, &MultiRegionAccessPointPolicyList{}) +} diff --git a/apis/s3control/v1beta2/zz_objectlambdaaccesspoint_terraformed.go b/apis/s3control/v1beta2/zz_objectlambdaaccesspoint_terraformed.go new file mode 100755 index 0000000000..605975658a --- /dev/null +++ b/apis/s3control/v1beta2/zz_objectlambdaaccesspoint_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ObjectLambdaAccessPoint +func (mg *ObjectLambdaAccessPoint) GetTerraformResourceType() string { + return "aws_s3control_object_lambda_access_point" +} + +// GetConnectionDetailsMapping for this ObjectLambdaAccessPoint +func (tr *ObjectLambdaAccessPoint) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ObjectLambdaAccessPoint +func (tr *ObjectLambdaAccessPoint) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ObjectLambdaAccessPoint +func (tr *ObjectLambdaAccessPoint) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ObjectLambdaAccessPoint +func (tr *ObjectLambdaAccessPoint) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ObjectLambdaAccessPoint +func (tr *ObjectLambdaAccessPoint) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ObjectLambdaAccessPoint +func (tr *ObjectLambdaAccessPoint) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ObjectLambdaAccessPoint +func (tr *ObjectLambdaAccessPoint) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ObjectLambdaAccessPoint +func (tr *ObjectLambdaAccessPoint) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ObjectLambdaAccessPoint using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ObjectLambdaAccessPoint) LateInitialize(attrs []byte) (bool, error) { + params := &ObjectLambdaAccessPointParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ObjectLambdaAccessPoint) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3control/v1beta2/zz_objectlambdaaccesspoint_types.go b/apis/s3control/v1beta2/zz_objectlambdaaccesspoint_types.go new file mode 100755 index 0000000000..d338faf1fd --- /dev/null +++ b/apis/s3control/v1beta2/zz_objectlambdaaccesspoint_types.go @@ -0,0 +1,301 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AwsLambdaInitParameters struct { + + // The Amazon Resource Name (ARN) of the AWS Lambda function. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + FunctionArn *string `json:"functionArn,omitempty" tf:"function_arn,omitempty"` + + // Reference to a Function in lambda to populate functionArn. + // +kubebuilder:validation:Optional + FunctionArnRef *v1.Reference `json:"functionArnRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate functionArn. + // +kubebuilder:validation:Optional + FunctionArnSelector *v1.Selector `json:"functionArnSelector,omitempty" tf:"-"` + + // Additional JSON that provides supplemental data to the Lambda function used to transform objects. + FunctionPayload *string `json:"functionPayload,omitempty" tf:"function_payload,omitempty"` +} + +type AwsLambdaObservation struct { + + // The Amazon Resource Name (ARN) of the AWS Lambda function. + FunctionArn *string `json:"functionArn,omitempty" tf:"function_arn,omitempty"` + + // Additional JSON that provides supplemental data to the Lambda function used to transform objects. + FunctionPayload *string `json:"functionPayload,omitempty" tf:"function_payload,omitempty"` +} + +type AwsLambdaParameters struct { + + // The Amazon Resource Name (ARN) of the AWS Lambda function. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + FunctionArn *string `json:"functionArn,omitempty" tf:"function_arn,omitempty"` + + // Reference to a Function in lambda to populate functionArn. + // +kubebuilder:validation:Optional + FunctionArnRef *v1.Reference `json:"functionArnRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate functionArn. + // +kubebuilder:validation:Optional + FunctionArnSelector *v1.Selector `json:"functionArnSelector,omitempty" tf:"-"` + + // Additional JSON that provides supplemental data to the Lambda function used to transform objects. + // +kubebuilder:validation:Optional + FunctionPayload *string `json:"functionPayload,omitempty" tf:"function_payload,omitempty"` +} + +type ConfigurationInitParameters struct { + + // Allowed features. Valid values: GetObject-Range, GetObject-PartNumber. + // +listType=set + AllowedFeatures []*string `json:"allowedFeatures,omitempty" tf:"allowed_features,omitempty"` + + // Whether or not the CloudWatch metrics configuration is enabled. + CloudWatchMetricsEnabled *bool `json:"cloudWatchMetricsEnabled,omitempty" tf:"cloud_watch_metrics_enabled,omitempty"` + + // Standard access point associated with the Object Lambda Access Point. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3control/v1beta2.AccessPoint + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + SupportingAccessPoint *string `json:"supportingAccessPoint,omitempty" tf:"supporting_access_point,omitempty"` + + // Reference to a AccessPoint in s3control to populate supportingAccessPoint. + // +kubebuilder:validation:Optional + SupportingAccessPointRef *v1.Reference `json:"supportingAccessPointRef,omitempty" tf:"-"` + + // Selector for a AccessPoint in s3control to populate supportingAccessPoint. + // +kubebuilder:validation:Optional + SupportingAccessPointSelector *v1.Selector `json:"supportingAccessPointSelector,omitempty" tf:"-"` + + // List of transformation configurations for the Object Lambda Access Point. See Transformation Configuration below for more details. + TransformationConfiguration []TransformationConfigurationInitParameters `json:"transformationConfiguration,omitempty" tf:"transformation_configuration,omitempty"` +} + +type ConfigurationObservation struct { + + // Allowed features. Valid values: GetObject-Range, GetObject-PartNumber. + // +listType=set + AllowedFeatures []*string `json:"allowedFeatures,omitempty" tf:"allowed_features,omitempty"` + + // Whether or not the CloudWatch metrics configuration is enabled. + CloudWatchMetricsEnabled *bool `json:"cloudWatchMetricsEnabled,omitempty" tf:"cloud_watch_metrics_enabled,omitempty"` + + // Standard access point associated with the Object Lambda Access Point. + SupportingAccessPoint *string `json:"supportingAccessPoint,omitempty" tf:"supporting_access_point,omitempty"` + + // List of transformation configurations for the Object Lambda Access Point. See Transformation Configuration below for more details. + TransformationConfiguration []TransformationConfigurationObservation `json:"transformationConfiguration,omitempty" tf:"transformation_configuration,omitempty"` +} + +type ConfigurationParameters struct { + + // Allowed features. Valid values: GetObject-Range, GetObject-PartNumber. + // +kubebuilder:validation:Optional + // +listType=set + AllowedFeatures []*string `json:"allowedFeatures,omitempty" tf:"allowed_features,omitempty"` + + // Whether or not the CloudWatch metrics configuration is enabled. + // +kubebuilder:validation:Optional + CloudWatchMetricsEnabled *bool `json:"cloudWatchMetricsEnabled,omitempty" tf:"cloud_watch_metrics_enabled,omitempty"` + + // Standard access point associated with the Object Lambda Access Point. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3control/v1beta2.AccessPoint + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + SupportingAccessPoint *string `json:"supportingAccessPoint,omitempty" tf:"supporting_access_point,omitempty"` + + // Reference to a AccessPoint in s3control to populate supportingAccessPoint. + // +kubebuilder:validation:Optional + SupportingAccessPointRef *v1.Reference `json:"supportingAccessPointRef,omitempty" tf:"-"` + + // Selector for a AccessPoint in s3control to populate supportingAccessPoint. + // +kubebuilder:validation:Optional + SupportingAccessPointSelector *v1.Selector `json:"supportingAccessPointSelector,omitempty" tf:"-"` + + // List of transformation configurations for the Object Lambda Access Point. See Transformation Configuration below for more details. + // +kubebuilder:validation:Optional + TransformationConfiguration []TransformationConfigurationParameters `json:"transformationConfiguration" tf:"transformation_configuration,omitempty"` +} + +type ContentTransformationInitParameters struct { + + // Configuration for an AWS Lambda function. See AWS Lambda below for more details. + AwsLambda *AwsLambdaInitParameters `json:"awsLambda,omitempty" tf:"aws_lambda,omitempty"` +} + +type ContentTransformationObservation struct { + + // Configuration for an AWS Lambda function. See AWS Lambda below for more details. + AwsLambda *AwsLambdaObservation `json:"awsLambda,omitempty" tf:"aws_lambda,omitempty"` +} + +type ContentTransformationParameters struct { + + // Configuration for an AWS Lambda function. See AWS Lambda below for more details. + // +kubebuilder:validation:Optional + AwsLambda *AwsLambdaParameters `json:"awsLambda" tf:"aws_lambda,omitempty"` +} + +type ObjectLambdaAccessPointInitParameters struct { + + // The AWS account ID for the owner of the bucket for which you want to create an Object Lambda Access Point. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // A configuration block containing details about the Object Lambda Access Point. See Configuration below for more details. + Configuration *ConfigurationInitParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // The name for this Object Lambda Access Point. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ObjectLambdaAccessPointObservation struct { + + // The AWS account ID for the owner of the bucket for which you want to create an Object Lambda Access Point. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Alias for the S3 Object Lambda Access Point. + Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` + + // Amazon Resource Name (ARN) of the Object Lambda Access Point. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A configuration block containing details about the Object Lambda Access Point. See Configuration below for more details. + Configuration *ConfigurationObservation `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // The AWS account ID and access point name separated by a colon (:). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name for this Object Lambda Access Point. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ObjectLambdaAccessPointParameters struct { + + // The AWS account ID for the owner of the bucket for which you want to create an Object Lambda Access Point. + // +kubebuilder:validation:Optional + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // A configuration block containing details about the Object Lambda Access Point. See Configuration below for more details. + // +kubebuilder:validation:Optional + Configuration *ConfigurationParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // The name for this Object Lambda Access Point. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type TransformationConfigurationInitParameters struct { + + // The actions of an Object Lambda Access Point configuration. Valid values: GetObject. + // +listType=set + Actions []*string `json:"actions,omitempty" tf:"actions,omitempty"` + + // The content transformation of an Object Lambda Access Point configuration. See Content Transformation below for more details. + ContentTransformation *ContentTransformationInitParameters `json:"contentTransformation,omitempty" tf:"content_transformation,omitempty"` +} + +type TransformationConfigurationObservation struct { + + // The actions of an Object Lambda Access Point configuration. Valid values: GetObject. + // +listType=set + Actions []*string `json:"actions,omitempty" tf:"actions,omitempty"` + + // The content transformation of an Object Lambda Access Point configuration. See Content Transformation below for more details. + ContentTransformation *ContentTransformationObservation `json:"contentTransformation,omitempty" tf:"content_transformation,omitempty"` +} + +type TransformationConfigurationParameters struct { + + // The actions of an Object Lambda Access Point configuration. Valid values: GetObject. + // +kubebuilder:validation:Optional + // +listType=set + Actions []*string `json:"actions" tf:"actions,omitempty"` + + // The content transformation of an Object Lambda Access Point configuration. See Content Transformation below for more details. + // +kubebuilder:validation:Optional + ContentTransformation *ContentTransformationParameters `json:"contentTransformation" tf:"content_transformation,omitempty"` +} + +// ObjectLambdaAccessPointSpec defines the desired state of ObjectLambdaAccessPoint +type ObjectLambdaAccessPointSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ObjectLambdaAccessPointParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ObjectLambdaAccessPointInitParameters `json:"initProvider,omitempty"` +} + +// ObjectLambdaAccessPointStatus defines the observed state of ObjectLambdaAccessPoint. +type ObjectLambdaAccessPointStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ObjectLambdaAccessPointObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ObjectLambdaAccessPoint is the Schema for the ObjectLambdaAccessPoints API. Provides a resource to manage an S3 Object Lambda Access Point. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ObjectLambdaAccessPoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.configuration) || (has(self.initProvider) && has(self.initProvider.configuration))",message="spec.forProvider.configuration is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ObjectLambdaAccessPointSpec `json:"spec"` + Status ObjectLambdaAccessPointStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ObjectLambdaAccessPointList contains a list of ObjectLambdaAccessPoints +type ObjectLambdaAccessPointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ObjectLambdaAccessPoint `json:"items"` +} + +// Repository type metadata. +var ( + ObjectLambdaAccessPoint_Kind = "ObjectLambdaAccessPoint" + ObjectLambdaAccessPoint_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ObjectLambdaAccessPoint_Kind}.String() + ObjectLambdaAccessPoint_KindAPIVersion = ObjectLambdaAccessPoint_Kind + "." + CRDGroupVersion.String() + ObjectLambdaAccessPoint_GroupVersionKind = CRDGroupVersion.WithKind(ObjectLambdaAccessPoint_Kind) +) + +func init() { + SchemeBuilder.Register(&ObjectLambdaAccessPoint{}, &ObjectLambdaAccessPointList{}) +} diff --git a/apis/s3control/v1beta2/zz_storagelensconfiguration_terraformed.go b/apis/s3control/v1beta2/zz_storagelensconfiguration_terraformed.go new file mode 100755 index 0000000000..99acd94a48 --- /dev/null +++ b/apis/s3control/v1beta2/zz_storagelensconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this StorageLensConfiguration +func (mg *StorageLensConfiguration) GetTerraformResourceType() string { + return "aws_s3control_storage_lens_configuration" +} + +// GetConnectionDetailsMapping for this StorageLensConfiguration +func (tr *StorageLensConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this StorageLensConfiguration +func (tr *StorageLensConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this StorageLensConfiguration +func (tr *StorageLensConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this StorageLensConfiguration +func (tr *StorageLensConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this StorageLensConfiguration +func (tr *StorageLensConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this StorageLensConfiguration +func (tr *StorageLensConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this StorageLensConfiguration +func (tr *StorageLensConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this StorageLensConfiguration +func (tr *StorageLensConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this StorageLensConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *StorageLensConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &StorageLensConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *StorageLensConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/s3control/v1beta2/zz_storagelensconfiguration_types.go b/apis/s3control/v1beta2/zz_storagelensconfiguration_types.go new file mode 100755 index 0000000000..e885f434fa --- /dev/null +++ b/apis/s3control/v1beta2/zz_storagelensconfiguration_types.go @@ -0,0 +1,851 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccountLevelDetailedStatusCodeMetricsInitParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AccountLevelDetailedStatusCodeMetricsObservation struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AccountLevelDetailedStatusCodeMetricsParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AccountLevelInitParameters struct { + + // S3 Storage Lens activity metrics. See Activity Metrics below for more details. + ActivityMetrics *ActivityMetricsInitParameters `json:"activityMetrics,omitempty" tf:"activity_metrics,omitempty"` + + // optimization metrics for S3 Storage Lens. See Advanced Cost-Optimization Metrics below for more details. + AdvancedCostOptimizationMetrics *AdvancedCostOptimizationMetricsInitParameters `json:"advancedCostOptimizationMetrics,omitempty" tf:"advanced_cost_optimization_metrics,omitempty"` + + // protection metrics for S3 Storage Lens. See Advanced Data-Protection Metrics below for more details. + AdvancedDataProtectionMetrics *AdvancedDataProtectionMetricsInitParameters `json:"advancedDataProtectionMetrics,omitempty" tf:"advanced_data_protection_metrics,omitempty"` + + // level configuration. See Bucket Level below for more details. + BucketLevel *BucketLevelInitParameters `json:"bucketLevel,omitempty" tf:"bucket_level,omitempty"` + + // Detailed status code metrics for S3 Storage Lens. See Detailed Status Code Metrics below for more details. + DetailedStatusCodeMetrics *AccountLevelDetailedStatusCodeMetricsInitParameters `json:"detailedStatusCodeMetrics,omitempty" tf:"detailed_status_code_metrics,omitempty"` +} + +type AccountLevelObservation struct { + + // S3 Storage Lens activity metrics. See Activity Metrics below for more details. + ActivityMetrics *ActivityMetricsObservation `json:"activityMetrics,omitempty" tf:"activity_metrics,omitempty"` + + // optimization metrics for S3 Storage Lens. See Advanced Cost-Optimization Metrics below for more details. + AdvancedCostOptimizationMetrics *AdvancedCostOptimizationMetricsObservation `json:"advancedCostOptimizationMetrics,omitempty" tf:"advanced_cost_optimization_metrics,omitempty"` + + // protection metrics for S3 Storage Lens. See Advanced Data-Protection Metrics below for more details. + AdvancedDataProtectionMetrics *AdvancedDataProtectionMetricsObservation `json:"advancedDataProtectionMetrics,omitempty" tf:"advanced_data_protection_metrics,omitempty"` + + // level configuration. See Bucket Level below for more details. + BucketLevel *BucketLevelObservation `json:"bucketLevel,omitempty" tf:"bucket_level,omitempty"` + + // Detailed status code metrics for S3 Storage Lens. See Detailed Status Code Metrics below for more details. + DetailedStatusCodeMetrics *AccountLevelDetailedStatusCodeMetricsObservation `json:"detailedStatusCodeMetrics,omitempty" tf:"detailed_status_code_metrics,omitempty"` +} + +type AccountLevelParameters struct { + + // S3 Storage Lens activity metrics. See Activity Metrics below for more details. + // +kubebuilder:validation:Optional + ActivityMetrics *ActivityMetricsParameters `json:"activityMetrics,omitempty" tf:"activity_metrics,omitempty"` + + // optimization metrics for S3 Storage Lens. See Advanced Cost-Optimization Metrics below for more details. + // +kubebuilder:validation:Optional + AdvancedCostOptimizationMetrics *AdvancedCostOptimizationMetricsParameters `json:"advancedCostOptimizationMetrics,omitempty" tf:"advanced_cost_optimization_metrics,omitempty"` + + // protection metrics for S3 Storage Lens. See Advanced Data-Protection Metrics below for more details. + // +kubebuilder:validation:Optional + AdvancedDataProtectionMetrics *AdvancedDataProtectionMetricsParameters `json:"advancedDataProtectionMetrics,omitempty" tf:"advanced_data_protection_metrics,omitempty"` + + // level configuration. See Bucket Level below for more details. + // +kubebuilder:validation:Optional + BucketLevel *BucketLevelParameters `json:"bucketLevel" tf:"bucket_level,omitempty"` + + // Detailed status code metrics for S3 Storage Lens. See Detailed Status Code Metrics below for more details. + // +kubebuilder:validation:Optional + DetailedStatusCodeMetrics *AccountLevelDetailedStatusCodeMetricsParameters `json:"detailedStatusCodeMetrics,omitempty" tf:"detailed_status_code_metrics,omitempty"` +} + +type ActivityMetricsInitParameters struct { + + // Whether the activity metrics are enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ActivityMetricsObservation struct { + + // Whether the activity metrics are enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ActivityMetricsParameters struct { + + // Whether the activity metrics are enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AdvancedCostOptimizationMetricsInitParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AdvancedCostOptimizationMetricsObservation struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AdvancedCostOptimizationMetricsParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AdvancedDataProtectionMetricsInitParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AdvancedDataProtectionMetricsObservation struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AdvancedDataProtectionMetricsParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AwsOrgInitParameters struct { + + // The Amazon Resource Name (ARN) of the bucket. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` +} + +type AwsOrgObservation struct { + + // The Amazon Resource Name (ARN) of the bucket. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` +} + +type AwsOrgParameters struct { + + // The Amazon Resource Name (ARN) of the bucket. + // +kubebuilder:validation:Optional + Arn *string `json:"arn" tf:"arn,omitempty"` +} + +type BucketLevelActivityMetricsInitParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type BucketLevelActivityMetricsObservation struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type BucketLevelActivityMetricsParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type BucketLevelAdvancedCostOptimizationMetricsInitParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type BucketLevelAdvancedCostOptimizationMetricsObservation struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type BucketLevelAdvancedCostOptimizationMetricsParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type BucketLevelAdvancedDataProtectionMetricsInitParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type BucketLevelAdvancedDataProtectionMetricsObservation struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type BucketLevelAdvancedDataProtectionMetricsParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type BucketLevelInitParameters struct { + + // S3 Storage Lens activity metrics. See Activity Metrics below for more details. + ActivityMetrics *BucketLevelActivityMetricsInitParameters `json:"activityMetrics,omitempty" tf:"activity_metrics,omitempty"` + + // optimization metrics for S3 Storage Lens. See Advanced Cost-Optimization Metrics below for more details. + AdvancedCostOptimizationMetrics *BucketLevelAdvancedCostOptimizationMetricsInitParameters `json:"advancedCostOptimizationMetrics,omitempty" tf:"advanced_cost_optimization_metrics,omitempty"` + + // protection metrics for S3 Storage Lens. See Advanced Data-Protection Metrics below for more details. + AdvancedDataProtectionMetrics *BucketLevelAdvancedDataProtectionMetricsInitParameters `json:"advancedDataProtectionMetrics,omitempty" tf:"advanced_data_protection_metrics,omitempty"` + + // Detailed status code metrics for S3 Storage Lens. See Detailed Status Code Metrics below for more details. + DetailedStatusCodeMetrics *DetailedStatusCodeMetricsInitParameters `json:"detailedStatusCodeMetrics,omitempty" tf:"detailed_status_code_metrics,omitempty"` + + // level metrics for S3 Storage Lens. See Prefix Level below for more details. + PrefixLevel *PrefixLevelInitParameters `json:"prefixLevel,omitempty" tf:"prefix_level,omitempty"` +} + +type BucketLevelObservation struct { + + // S3 Storage Lens activity metrics. See Activity Metrics below for more details. + ActivityMetrics *BucketLevelActivityMetricsObservation `json:"activityMetrics,omitempty" tf:"activity_metrics,omitempty"` + + // optimization metrics for S3 Storage Lens. See Advanced Cost-Optimization Metrics below for more details. + AdvancedCostOptimizationMetrics *BucketLevelAdvancedCostOptimizationMetricsObservation `json:"advancedCostOptimizationMetrics,omitempty" tf:"advanced_cost_optimization_metrics,omitempty"` + + // protection metrics for S3 Storage Lens. See Advanced Data-Protection Metrics below for more details. + AdvancedDataProtectionMetrics *BucketLevelAdvancedDataProtectionMetricsObservation `json:"advancedDataProtectionMetrics,omitempty" tf:"advanced_data_protection_metrics,omitempty"` + + // Detailed status code metrics for S3 Storage Lens. See Detailed Status Code Metrics below for more details. + DetailedStatusCodeMetrics *DetailedStatusCodeMetricsObservation `json:"detailedStatusCodeMetrics,omitempty" tf:"detailed_status_code_metrics,omitempty"` + + // level metrics for S3 Storage Lens. See Prefix Level below for more details. + PrefixLevel *PrefixLevelObservation `json:"prefixLevel,omitempty" tf:"prefix_level,omitempty"` +} + +type BucketLevelParameters struct { + + // S3 Storage Lens activity metrics. See Activity Metrics below for more details. + // +kubebuilder:validation:Optional + ActivityMetrics *BucketLevelActivityMetricsParameters `json:"activityMetrics,omitempty" tf:"activity_metrics,omitempty"` + + // optimization metrics for S3 Storage Lens. See Advanced Cost-Optimization Metrics below for more details. + // +kubebuilder:validation:Optional + AdvancedCostOptimizationMetrics *BucketLevelAdvancedCostOptimizationMetricsParameters `json:"advancedCostOptimizationMetrics,omitempty" tf:"advanced_cost_optimization_metrics,omitempty"` + + // protection metrics for S3 Storage Lens. See Advanced Data-Protection Metrics below for more details. + // +kubebuilder:validation:Optional + AdvancedDataProtectionMetrics *BucketLevelAdvancedDataProtectionMetricsParameters `json:"advancedDataProtectionMetrics,omitempty" tf:"advanced_data_protection_metrics,omitempty"` + + // Detailed status code metrics for S3 Storage Lens. See Detailed Status Code Metrics below for more details. + // +kubebuilder:validation:Optional + DetailedStatusCodeMetrics *DetailedStatusCodeMetricsParameters `json:"detailedStatusCodeMetrics,omitempty" tf:"detailed_status_code_metrics,omitempty"` + + // level metrics for S3 Storage Lens. See Prefix Level below for more details. + // +kubebuilder:validation:Optional + PrefixLevel *PrefixLevelParameters `json:"prefixLevel,omitempty" tf:"prefix_level,omitempty"` +} + +type CloudWatchMetricsInitParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type CloudWatchMetricsObservation struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type CloudWatchMetricsParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + +type DataExportInitParameters struct { + + // Amazon CloudWatch publishing for S3 Storage Lens metrics. See Cloud Watch Metrics below for more details. + CloudWatchMetrics *CloudWatchMetricsInitParameters `json:"cloudWatchMetrics,omitempty" tf:"cloud_watch_metrics,omitempty"` + + // The bucket where the S3 Storage Lens metrics export will be located. See S3 Bucket Destination below for more details. + S3BucketDestination *S3BucketDestinationInitParameters `json:"s3BucketDestination,omitempty" tf:"s3_bucket_destination,omitempty"` +} + +type DataExportObservation struct { + + // Amazon CloudWatch publishing for S3 Storage Lens metrics. See Cloud Watch Metrics below for more details. + CloudWatchMetrics *CloudWatchMetricsObservation `json:"cloudWatchMetrics,omitempty" tf:"cloud_watch_metrics,omitempty"` + + // The bucket where the S3 Storage Lens metrics export will be located. See S3 Bucket Destination below for more details. + S3BucketDestination *S3BucketDestinationObservation `json:"s3BucketDestination,omitempty" tf:"s3_bucket_destination,omitempty"` +} + +type DataExportParameters struct { + + // Amazon CloudWatch publishing for S3 Storage Lens metrics. See Cloud Watch Metrics below for more details. + // +kubebuilder:validation:Optional + CloudWatchMetrics *CloudWatchMetricsParameters `json:"cloudWatchMetrics,omitempty" tf:"cloud_watch_metrics,omitempty"` + + // The bucket where the S3 Storage Lens metrics export will be located. See S3 Bucket Destination below for more details. + // +kubebuilder:validation:Optional + S3BucketDestination *S3BucketDestinationParameters `json:"s3BucketDestination,omitempty" tf:"s3_bucket_destination,omitempty"` +} + +type DetailedStatusCodeMetricsInitParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type DetailedStatusCodeMetricsObservation struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type DetailedStatusCodeMetricsParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type EncryptionInitParameters struct { + + // KMS encryption. See SSE KMS below for more details. + SseKMS *SseKMSInitParameters `json:"sseKms,omitempty" tf:"sse_kms,omitempty"` + + // S3 encryption. An empty configuration block {} should be used. + SseS3 []SseS3InitParameters `json:"sseS3,omitempty" tf:"sse_s3,omitempty"` +} + +type EncryptionObservation struct { + + // KMS encryption. See SSE KMS below for more details. + SseKMS *SseKMSObservation `json:"sseKms,omitempty" tf:"sse_kms,omitempty"` + + // S3 encryption. An empty configuration block {} should be used. + SseS3 []SseS3Parameters `json:"sseS3,omitempty" tf:"sse_s3,omitempty"` +} + +type EncryptionParameters struct { + + // KMS encryption. See SSE KMS below for more details. + // +kubebuilder:validation:Optional + SseKMS *SseKMSParameters `json:"sseKms,omitempty" tf:"sse_kms,omitempty"` + + // S3 encryption. An empty configuration block {} should be used. + // +kubebuilder:validation:Optional + SseS3 []SseS3Parameters `json:"sseS3,omitempty" tf:"sse_s3,omitempty"` +} + +type ExcludeInitParameters struct { + + // List of S3 bucket ARNs. + // +listType=set + Buckets []*string `json:"buckets,omitempty" tf:"buckets,omitempty"` + + // List of AWS Regions. + // +listType=set + Regions []*string `json:"regions,omitempty" tf:"regions,omitempty"` +} + +type ExcludeObservation struct { + + // List of S3 bucket ARNs. + // +listType=set + Buckets []*string `json:"buckets,omitempty" tf:"buckets,omitempty"` + + // List of AWS Regions. + // +listType=set + Regions []*string `json:"regions,omitempty" tf:"regions,omitempty"` +} + +type ExcludeParameters struct { + + // List of S3 bucket ARNs. + // +kubebuilder:validation:Optional + // +listType=set + Buckets []*string `json:"buckets,omitempty" tf:"buckets,omitempty"` + + // List of AWS Regions. + // +kubebuilder:validation:Optional + // +listType=set + Regions []*string `json:"regions,omitempty" tf:"regions,omitempty"` +} + +type IncludeInitParameters struct { + + // List of S3 bucket ARNs. + // +listType=set + Buckets []*string `json:"buckets,omitempty" tf:"buckets,omitempty"` + + // List of AWS Regions. + // +listType=set + Regions []*string `json:"regions,omitempty" tf:"regions,omitempty"` +} + +type IncludeObservation struct { + + // List of S3 bucket ARNs. + // +listType=set + Buckets []*string `json:"buckets,omitempty" tf:"buckets,omitempty"` + + // List of AWS Regions. + // +listType=set + Regions []*string `json:"regions,omitempty" tf:"regions,omitempty"` +} + +type IncludeParameters struct { + + // List of S3 bucket ARNs. + // +kubebuilder:validation:Optional + // +listType=set + Buckets []*string `json:"buckets,omitempty" tf:"buckets,omitempty"` + + // List of AWS Regions. + // +kubebuilder:validation:Optional + // +listType=set + Regions []*string `json:"regions,omitempty" tf:"regions,omitempty"` +} + +type PrefixLevelInitParameters struct { + + // level storage metrics for S3 Storage Lens. See Prefix Level Storage Metrics below for more details. + StorageMetrics *StorageMetricsInitParameters `json:"storageMetrics,omitempty" tf:"storage_metrics,omitempty"` +} + +type PrefixLevelObservation struct { + + // level storage metrics for S3 Storage Lens. See Prefix Level Storage Metrics below for more details. + StorageMetrics *StorageMetricsObservation `json:"storageMetrics,omitempty" tf:"storage_metrics,omitempty"` +} + +type PrefixLevelParameters struct { + + // level storage metrics for S3 Storage Lens. See Prefix Level Storage Metrics below for more details. + // +kubebuilder:validation:Optional + StorageMetrics *StorageMetricsParameters `json:"storageMetrics" tf:"storage_metrics,omitempty"` +} + +type S3BucketDestinationInitParameters struct { + + // The account ID of the owner of the S3 Storage Lens metrics export bucket. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // The Amazon Resource Name (ARN) of the bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a Bucket in s3 to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // Encryption of the metrics exports in this bucket. See Encryption below for more details. + Encryption *EncryptionInitParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // The export format. Valid values: CSV, Parquet. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The schema version of the export file. Valid values: V_1. + OutputSchemaVersion *string `json:"outputSchemaVersion,omitempty" tf:"output_schema_version,omitempty"` + + // The prefix of the destination bucket where the metrics export will be delivered. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type S3BucketDestinationObservation struct { + + // The account ID of the owner of the S3 Storage Lens metrics export bucket. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // The Amazon Resource Name (ARN) of the bucket. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Encryption of the metrics exports in this bucket. See Encryption below for more details. + Encryption *EncryptionObservation `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // The export format. Valid values: CSV, Parquet. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The schema version of the export file. Valid values: V_1. + OutputSchemaVersion *string `json:"outputSchemaVersion,omitempty" tf:"output_schema_version,omitempty"` + + // The prefix of the destination bucket where the metrics export will be delivered. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type S3BucketDestinationParameters struct { + + // The account ID of the owner of the S3 Storage Lens metrics export bucket. + // +kubebuilder:validation:Optional + AccountID *string `json:"accountId" tf:"account_id,omitempty"` + + // The Amazon Resource Name (ARN) of the bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a Bucket in s3 to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // Encryption of the metrics exports in this bucket. See Encryption below for more details. + // +kubebuilder:validation:Optional + Encryption *EncryptionParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // The export format. Valid values: CSV, Parquet. + // +kubebuilder:validation:Optional + Format *string `json:"format" tf:"format,omitempty"` + + // The schema version of the export file. Valid values: V_1. + // +kubebuilder:validation:Optional + OutputSchemaVersion *string `json:"outputSchemaVersion" tf:"output_schema_version,omitempty"` + + // The prefix of the destination bucket where the metrics export will be delivered. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type SelectionCriteriaInitParameters struct { + + // The delimiter of the selection criteria being used. + Delimiter *string `json:"delimiter,omitempty" tf:"delimiter,omitempty"` + + // The max depth of the selection criteria. + MaxDepth *float64 `json:"maxDepth,omitempty" tf:"max_depth,omitempty"` + + // The minimum number of storage bytes percentage whose metrics will be selected. + MinStorageBytesPercentage *float64 `json:"minStorageBytesPercentage,omitempty" tf:"min_storage_bytes_percentage,omitempty"` +} + +type SelectionCriteriaObservation struct { + + // The delimiter of the selection criteria being used. + Delimiter *string `json:"delimiter,omitempty" tf:"delimiter,omitempty"` + + // The max depth of the selection criteria. + MaxDepth *float64 `json:"maxDepth,omitempty" tf:"max_depth,omitempty"` + + // The minimum number of storage bytes percentage whose metrics will be selected. + MinStorageBytesPercentage *float64 `json:"minStorageBytesPercentage,omitempty" tf:"min_storage_bytes_percentage,omitempty"` +} + +type SelectionCriteriaParameters struct { + + // The delimiter of the selection criteria being used. + // +kubebuilder:validation:Optional + Delimiter *string `json:"delimiter,omitempty" tf:"delimiter,omitempty"` + + // The max depth of the selection criteria. + // +kubebuilder:validation:Optional + MaxDepth *float64 `json:"maxDepth,omitempty" tf:"max_depth,omitempty"` + + // The minimum number of storage bytes percentage whose metrics will be selected. + // +kubebuilder:validation:Optional + MinStorageBytesPercentage *float64 `json:"minStorageBytesPercentage,omitempty" tf:"min_storage_bytes_percentage,omitempty"` +} + +type SseKMSInitParameters struct { + + // KMS key ARN. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` +} + +type SseKMSObservation struct { + + // KMS key ARN. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` +} + +type SseKMSParameters struct { + + // KMS key ARN. + // +kubebuilder:validation:Optional + KeyID *string `json:"keyId" tf:"key_id,omitempty"` +} + +type SseS3InitParameters struct { +} + +type SseS3Observation struct { +} + +type SseS3Parameters struct { +} + +type StorageLensConfigurationInitParameters struct { + + // The AWS account ID for the S3 Storage Lens configuration. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // The ID of the S3 Storage Lens configuration. + ConfigID *string `json:"configId,omitempty" tf:"config_id,omitempty"` + + // The S3 Storage Lens configuration. See Storage Lens Configuration below for more details. + StorageLensConfiguration *StorageLensConfigurationStorageLensConfigurationInitParameters `json:"storageLensConfiguration,omitempty" tf:"storage_lens_configuration,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type StorageLensConfigurationObservation struct { + + // The AWS account ID for the S3 Storage Lens configuration. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Amazon Resource Name (ARN) of the S3 Storage Lens configuration. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The ID of the S3 Storage Lens configuration. + ConfigID *string `json:"configId,omitempty" tf:"config_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The S3 Storage Lens configuration. See Storage Lens Configuration below for more details. + StorageLensConfiguration *StorageLensConfigurationStorageLensConfigurationObservation `json:"storageLensConfiguration,omitempty" tf:"storage_lens_configuration,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type StorageLensConfigurationParameters struct { + + // The AWS account ID for the S3 Storage Lens configuration. + // +kubebuilder:validation:Optional + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // The ID of the S3 Storage Lens configuration. + // +kubebuilder:validation:Optional + ConfigID *string `json:"configId,omitempty" tf:"config_id,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The S3 Storage Lens configuration. See Storage Lens Configuration below for more details. + // +kubebuilder:validation:Optional + StorageLensConfiguration *StorageLensConfigurationStorageLensConfigurationParameters `json:"storageLensConfiguration,omitempty" tf:"storage_lens_configuration,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type StorageLensConfigurationStorageLensConfigurationInitParameters struct { + + // level configurations of the S3 Storage Lens configuration. See Account Level below for more details. + AccountLevel *AccountLevelInitParameters `json:"accountLevel,omitempty" tf:"account_level,omitempty"` + + // The Amazon Web Services organization for the S3 Storage Lens configuration. See AWS Org below for more details. + AwsOrg *AwsOrgInitParameters `json:"awsOrg,omitempty" tf:"aws_org,omitempty"` + + // Properties of S3 Storage Lens metrics export including the destination, schema and format. See Data Export below for more details. + DataExport *DataExportInitParameters `json:"dataExport,omitempty" tf:"data_export,omitempty"` + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // What is excluded in this configuration. Conflicts with include. See Exclude below for more details. + Exclude *ExcludeInitParameters `json:"exclude,omitempty" tf:"exclude,omitempty"` + + // What is included in this configuration. Conflicts with exclude. See Include below for more details. + Include *IncludeInitParameters `json:"include,omitempty" tf:"include,omitempty"` +} + +type StorageLensConfigurationStorageLensConfigurationObservation struct { + + // level configurations of the S3 Storage Lens configuration. See Account Level below for more details. + AccountLevel *AccountLevelObservation `json:"accountLevel,omitempty" tf:"account_level,omitempty"` + + // The Amazon Web Services organization for the S3 Storage Lens configuration. See AWS Org below for more details. + AwsOrg *AwsOrgObservation `json:"awsOrg,omitempty" tf:"aws_org,omitempty"` + + // Properties of S3 Storage Lens metrics export including the destination, schema and format. See Data Export below for more details. + DataExport *DataExportObservation `json:"dataExport,omitempty" tf:"data_export,omitempty"` + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // What is excluded in this configuration. Conflicts with include. See Exclude below for more details. + Exclude *ExcludeObservation `json:"exclude,omitempty" tf:"exclude,omitempty"` + + // What is included in this configuration. Conflicts with exclude. See Include below for more details. + Include *IncludeObservation `json:"include,omitempty" tf:"include,omitempty"` +} + +type StorageLensConfigurationStorageLensConfigurationParameters struct { + + // level configurations of the S3 Storage Lens configuration. See Account Level below for more details. + // +kubebuilder:validation:Optional + AccountLevel *AccountLevelParameters `json:"accountLevel" tf:"account_level,omitempty"` + + // The Amazon Web Services organization for the S3 Storage Lens configuration. See AWS Org below for more details. + // +kubebuilder:validation:Optional + AwsOrg *AwsOrgParameters `json:"awsOrg,omitempty" tf:"aws_org,omitempty"` + + // Properties of S3 Storage Lens metrics export including the destination, schema and format. See Data Export below for more details. + // +kubebuilder:validation:Optional + DataExport *DataExportParameters `json:"dataExport,omitempty" tf:"data_export,omitempty"` + + // Whether the S3 Storage Lens configuration is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // What is excluded in this configuration. Conflicts with include. See Exclude below for more details. + // +kubebuilder:validation:Optional + Exclude *ExcludeParameters `json:"exclude,omitempty" tf:"exclude,omitempty"` + + // What is included in this configuration. Conflicts with exclude. See Include below for more details. + // +kubebuilder:validation:Optional + Include *IncludeParameters `json:"include,omitempty" tf:"include,omitempty"` +} + +type StorageMetricsInitParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Selection criteria. See Selection Criteria below for more details. + SelectionCriteria *SelectionCriteriaInitParameters `json:"selectionCriteria,omitempty" tf:"selection_criteria,omitempty"` +} + +type StorageMetricsObservation struct { + + // Whether the S3 Storage Lens configuration is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Selection criteria. See Selection Criteria below for more details. + SelectionCriteria *SelectionCriteriaObservation `json:"selectionCriteria,omitempty" tf:"selection_criteria,omitempty"` +} + +type StorageMetricsParameters struct { + + // Whether the S3 Storage Lens configuration is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Selection criteria. See Selection Criteria below for more details. + // +kubebuilder:validation:Optional + SelectionCriteria *SelectionCriteriaParameters `json:"selectionCriteria,omitempty" tf:"selection_criteria,omitempty"` +} + +// StorageLensConfigurationSpec defines the desired state of StorageLensConfiguration +type StorageLensConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StorageLensConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StorageLensConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// StorageLensConfigurationStatus defines the observed state of StorageLensConfiguration. +type StorageLensConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StorageLensConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// StorageLensConfiguration is the Schema for the StorageLensConfigurations API. Provides a resource to manage an S3 Storage Lens configuration. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type StorageLensConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.configId) || (has(self.initProvider) && has(self.initProvider.configId))",message="spec.forProvider.configId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageLensConfiguration) || (has(self.initProvider) && has(self.initProvider.storageLensConfiguration))",message="spec.forProvider.storageLensConfiguration is a required parameter" + Spec StorageLensConfigurationSpec `json:"spec"` + Status StorageLensConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StorageLensConfigurationList contains a list of StorageLensConfigurations +type StorageLensConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StorageLensConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + StorageLensConfiguration_Kind = "StorageLensConfiguration" + StorageLensConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: StorageLensConfiguration_Kind}.String() + StorageLensConfiguration_KindAPIVersion = StorageLensConfiguration_Kind + "." + CRDGroupVersion.String() + StorageLensConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(StorageLensConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&StorageLensConfiguration{}, &StorageLensConfigurationList{}) +} diff --git a/apis/sagemaker/v1beta1/zz_generated.conversion_hubs.go b/apis/sagemaker/v1beta1/zz_generated.conversion_hubs.go index 5ae09a0e3b..305a184627 100755 --- a/apis/sagemaker/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/sagemaker/v1beta1/zz_generated.conversion_hubs.go @@ -6,68 +6,23 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *App) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *AppImageConfig) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *CodeRepository) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Device) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *DeviceFleet) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Domain) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Endpoint) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *EndpointConfiguration) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *FeatureGroup) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Image) Hub() {} // Hub marks this type as a conversion hub. func (tr *ImageVersion) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Model) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ModelPackageGroup) Hub() {} // Hub marks this type as a conversion hub. func (tr *ModelPackageGroupPolicy) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *NotebookInstance) Hub() {} - // Hub marks this type as a conversion hub. func (tr *NotebookInstanceLifecycleConfiguration) Hub() {} // Hub marks this type as a conversion hub. func (tr *ServicecatalogPortfolioStatus) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Space) Hub() {} - // Hub marks this type as a conversion hub. func (tr *StudioLifecycleConfig) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *UserProfile) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Workforce) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Workteam) Hub() {} diff --git a/apis/sagemaker/v1beta1/zz_generated.conversion_spokes.go b/apis/sagemaker/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..0b024076bb --- /dev/null +++ b/apis/sagemaker/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,314 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this App to the hub type. +func (tr *App) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the App type. +func (tr *App) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this AppImageConfig to the hub type. +func (tr *AppImageConfig) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the AppImageConfig type. +func (tr *AppImageConfig) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this CodeRepository to the hub type. +func (tr *CodeRepository) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CodeRepository type. +func (tr *CodeRepository) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Device to the hub type. +func (tr *Device) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Device type. +func (tr *Device) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DeviceFleet to the hub type. +func (tr *DeviceFleet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DeviceFleet type. +func (tr *DeviceFleet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Domain to the hub type. +func (tr *Domain) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Domain type. +func (tr *Domain) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Endpoint to the hub type. +func (tr *Endpoint) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Endpoint type. +func (tr *Endpoint) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this EndpointConfiguration to the hub type. +func (tr *EndpointConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the EndpointConfiguration type. +func (tr *EndpointConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FeatureGroup to the hub type. +func (tr *FeatureGroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FeatureGroup type. +func (tr *FeatureGroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Model to the hub type. +func (tr *Model) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Model type. +func (tr *Model) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this NotebookInstance to the hub type. +func (tr *NotebookInstance) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the NotebookInstance type. +func (tr *NotebookInstance) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Space to the hub type. +func (tr *Space) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Space type. +func (tr *Space) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this UserProfile to the hub type. +func (tr *UserProfile) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the UserProfile type. +func (tr *UserProfile) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Workforce to the hub type. +func (tr *Workforce) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Workforce type. +func (tr *Workforce) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Workteam to the hub type. +func (tr *Workteam) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Workteam type. +func (tr *Workteam) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/sagemaker/v1beta2/zz_app_terraformed.go b/apis/sagemaker/v1beta2/zz_app_terraformed.go new file mode 100755 index 0000000000..f5a187bee9 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_app_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this App +func (mg *App) GetTerraformResourceType() string { + return "aws_sagemaker_app" +} + +// GetConnectionDetailsMapping for this App +func (tr *App) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this App +func (tr *App) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this App +func (tr *App) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this App +func (tr *App) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this App +func (tr *App) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this App +func (tr *App) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this App +func (tr *App) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this App +func (tr *App) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this App using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *App) LateInitialize(attrs []byte) (bool, error) { + params := &AppParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *App) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sagemaker/v1beta2/zz_app_types.go b/apis/sagemaker/v1beta2/zz_app_types.go new file mode 100755 index 0000000000..14c9ac077e --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_app_types.go @@ -0,0 +1,272 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AppInitParameters struct { + + // The name of the app. + AppName *string `json:"appName,omitempty" tf:"app_name,omitempty"` + + // The type of app. Valid values are JupyterServer, KernelGateway, RStudioServerPro, RSessionGateway and TensorBoard. + AppType *string `json:"appType,omitempty" tf:"app_type,omitempty"` + + // The domain ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.Domain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` + + // Reference to a Domain in sagemaker to populate domainId. + // +kubebuilder:validation:Optional + DomainIDRef *v1.Reference `json:"domainIdRef,omitempty" tf:"-"` + + // Selector for a Domain in sagemaker to populate domainId. + // +kubebuilder:validation:Optional + DomainIDSelector *v1.Selector `json:"domainIdSelector,omitempty" tf:"-"` + + // The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.See Resource Spec below. + ResourceSpec *ResourceSpecInitParameters `json:"resourceSpec,omitempty" tf:"resource_spec,omitempty"` + + // The name of the space. At least one of user_profile_name or space_name required. + SpaceName *string `json:"spaceName,omitempty" tf:"space_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The user profile name. At least one of user_profile_name or space_name required. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.UserProfile + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("user_profile_name",false) + UserProfileName *string `json:"userProfileName,omitempty" tf:"user_profile_name,omitempty"` + + // Reference to a UserProfile in sagemaker to populate userProfileName. + // +kubebuilder:validation:Optional + UserProfileNameRef *v1.Reference `json:"userProfileNameRef,omitempty" tf:"-"` + + // Selector for a UserProfile in sagemaker to populate userProfileName. + // +kubebuilder:validation:Optional + UserProfileNameSelector *v1.Selector `json:"userProfileNameSelector,omitempty" tf:"-"` +} + +type AppObservation struct { + + // The name of the app. + AppName *string `json:"appName,omitempty" tf:"app_name,omitempty"` + + // The type of app. Valid values are JupyterServer, KernelGateway, RStudioServerPro, RSessionGateway and TensorBoard. + AppType *string `json:"appType,omitempty" tf:"app_type,omitempty"` + + // The Amazon Resource Name (ARN) of the app. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The domain ID. + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` + + // The Amazon Resource Name (ARN) of the app. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.See Resource Spec below. + ResourceSpec *ResourceSpecObservation `json:"resourceSpec,omitempty" tf:"resource_spec,omitempty"` + + // The name of the space. At least one of user_profile_name or space_name required. + SpaceName *string `json:"spaceName,omitempty" tf:"space_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The user profile name. At least one of user_profile_name or space_name required. + UserProfileName *string `json:"userProfileName,omitempty" tf:"user_profile_name,omitempty"` +} + +type AppParameters struct { + + // The name of the app. + // +kubebuilder:validation:Optional + AppName *string `json:"appName,omitempty" tf:"app_name,omitempty"` + + // The type of app. Valid values are JupyterServer, KernelGateway, RStudioServerPro, RSessionGateway and TensorBoard. + // +kubebuilder:validation:Optional + AppType *string `json:"appType,omitempty" tf:"app_type,omitempty"` + + // The domain ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.Domain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` + + // Reference to a Domain in sagemaker to populate domainId. + // +kubebuilder:validation:Optional + DomainIDRef *v1.Reference `json:"domainIdRef,omitempty" tf:"-"` + + // Selector for a Domain in sagemaker to populate domainId. + // +kubebuilder:validation:Optional + DomainIDSelector *v1.Selector `json:"domainIdSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.See Resource Spec below. + // +kubebuilder:validation:Optional + ResourceSpec *ResourceSpecParameters `json:"resourceSpec,omitempty" tf:"resource_spec,omitempty"` + + // The name of the space. At least one of user_profile_name or space_name required. + // +kubebuilder:validation:Optional + SpaceName *string `json:"spaceName,omitempty" tf:"space_name,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The user profile name. At least one of user_profile_name or space_name required. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.UserProfile + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("user_profile_name",false) + // +kubebuilder:validation:Optional + UserProfileName *string `json:"userProfileName,omitempty" tf:"user_profile_name,omitempty"` + + // Reference to a UserProfile in sagemaker to populate userProfileName. + // +kubebuilder:validation:Optional + UserProfileNameRef *v1.Reference `json:"userProfileNameRef,omitempty" tf:"-"` + + // Selector for a UserProfile in sagemaker to populate userProfileName. + // +kubebuilder:validation:Optional + UserProfileNameSelector *v1.Selector `json:"userProfileNameSelector,omitempty" tf:"-"` +} + +type ResourceSpecInitParameters struct { + + // The instance type that the image version runs on. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type ResourceSpecObservation struct { + + // The instance type that the image version runs on. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type ResourceSpecParameters struct { + + // The instance type that the image version runs on. For valid values see SageMaker Instance Types. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +// AppSpec defines the desired state of App +type AppSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AppParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AppInitParameters `json:"initProvider,omitempty"` +} + +// AppStatus defines the observed state of App. +type AppStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AppObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// App is the Schema for the Apps API. Provides a SageMaker App resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type App struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.appName) || (has(self.initProvider) && has(self.initProvider.appName))",message="spec.forProvider.appName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.appType) || (has(self.initProvider) && has(self.initProvider.appType))",message="spec.forProvider.appType is a required parameter" + Spec AppSpec `json:"spec"` + Status AppStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AppList contains a list of Apps +type AppList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []App `json:"items"` +} + +// Repository type metadata. +var ( + App_Kind = "App" + App_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: App_Kind}.String() + App_KindAPIVersion = App_Kind + "." + CRDGroupVersion.String() + App_GroupVersionKind = CRDGroupVersion.WithKind(App_Kind) +) + +func init() { + SchemeBuilder.Register(&App{}, &AppList{}) +} diff --git a/apis/sagemaker/v1beta2/zz_appimageconfig_terraformed.go b/apis/sagemaker/v1beta2/zz_appimageconfig_terraformed.go new file mode 100755 index 0000000000..26ec0068a3 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_appimageconfig_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AppImageConfig +func (mg *AppImageConfig) GetTerraformResourceType() string { + return "aws_sagemaker_app_image_config" +} + +// GetConnectionDetailsMapping for this AppImageConfig +func (tr *AppImageConfig) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this AppImageConfig +func (tr *AppImageConfig) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AppImageConfig +func (tr *AppImageConfig) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AppImageConfig +func (tr *AppImageConfig) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AppImageConfig +func (tr *AppImageConfig) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AppImageConfig +func (tr *AppImageConfig) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this AppImageConfig +func (tr *AppImageConfig) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this AppImageConfig +func (tr *AppImageConfig) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this AppImageConfig using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AppImageConfig) LateInitialize(attrs []byte) (bool, error) { + params := &AppImageConfigParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AppImageConfig) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sagemaker/v1beta2/zz_appimageconfig_types.go b/apis/sagemaker/v1beta2/zz_appimageconfig_types.go new file mode 100755 index 0000000000..fad8aa38ee --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_appimageconfig_types.go @@ -0,0 +1,458 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AppImageConfigInitParameters struct { + + // The CodeEditorAppImageConfig. You can only specify one image kernel in the AppImageConfig API. This kernel is shown to users before the image starts. After the image runs, all kernels are visible in Code Editor. See Code Editor App Image Config details below. + CodeEditorAppImageConfig *CodeEditorAppImageConfigInitParameters `json:"codeEditorAppImageConfig,omitempty" tf:"code_editor_app_image_config,omitempty"` + + // The JupyterLabAppImageConfig. You can only specify one image kernel in the AppImageConfig API. This kernel is shown to users before the image starts. After the image runs, all kernels are visible in JupyterLab. See Jupyter Lab Image Config details below. + JupyterLabImageConfig *JupyterLabImageConfigInitParameters `json:"jupyterLabImageConfig,omitempty" tf:"jupyter_lab_image_config,omitempty"` + + // The configuration for the file system and kernels in a SageMaker image running as a KernelGateway app. See Kernel Gateway Image Config details below. + KernelGatewayImageConfig *KernelGatewayImageConfigInitParameters `json:"kernelGatewayImageConfig,omitempty" tf:"kernel_gateway_image_config,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AppImageConfigObservation struct { + + // The Amazon Resource Name (ARN) assigned by AWS to this App Image Config. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The CodeEditorAppImageConfig. You can only specify one image kernel in the AppImageConfig API. This kernel is shown to users before the image starts. After the image runs, all kernels are visible in Code Editor. See Code Editor App Image Config details below. + CodeEditorAppImageConfig *CodeEditorAppImageConfigObservation `json:"codeEditorAppImageConfig,omitempty" tf:"code_editor_app_image_config,omitempty"` + + // The name of the App Image Config. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The JupyterLabAppImageConfig. You can only specify one image kernel in the AppImageConfig API. This kernel is shown to users before the image starts. After the image runs, all kernels are visible in JupyterLab. See Jupyter Lab Image Config details below. + JupyterLabImageConfig *JupyterLabImageConfigObservation `json:"jupyterLabImageConfig,omitempty" tf:"jupyter_lab_image_config,omitempty"` + + // The configuration for the file system and kernels in a SageMaker image running as a KernelGateway app. See Kernel Gateway Image Config details below. + KernelGatewayImageConfig *KernelGatewayImageConfigObservation `json:"kernelGatewayImageConfig,omitempty" tf:"kernel_gateway_image_config,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type AppImageConfigParameters struct { + + // The CodeEditorAppImageConfig. You can only specify one image kernel in the AppImageConfig API. This kernel is shown to users before the image starts. After the image runs, all kernels are visible in Code Editor. See Code Editor App Image Config details below. + // +kubebuilder:validation:Optional + CodeEditorAppImageConfig *CodeEditorAppImageConfigParameters `json:"codeEditorAppImageConfig,omitempty" tf:"code_editor_app_image_config,omitempty"` + + // The JupyterLabAppImageConfig. You can only specify one image kernel in the AppImageConfig API. This kernel is shown to users before the image starts. After the image runs, all kernels are visible in JupyterLab. See Jupyter Lab Image Config details below. + // +kubebuilder:validation:Optional + JupyterLabImageConfig *JupyterLabImageConfigParameters `json:"jupyterLabImageConfig,omitempty" tf:"jupyter_lab_image_config,omitempty"` + + // The configuration for the file system and kernels in a SageMaker image running as a KernelGateway app. See Kernel Gateway Image Config details below. + // +kubebuilder:validation:Optional + KernelGatewayImageConfig *KernelGatewayImageConfigParameters `json:"kernelGatewayImageConfig,omitempty" tf:"kernel_gateway_image_config,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type CodeEditorAppImageConfigInitParameters struct { + + // The configuration used to run the application image container. See Container Config details below. + ContainerConfig *ContainerConfigInitParameters `json:"containerConfig,omitempty" tf:"container_config,omitempty"` + + // The URL where the Git repository is located. See File System Config details below. + FileSystemConfig *FileSystemConfigInitParameters `json:"fileSystemConfig,omitempty" tf:"file_system_config,omitempty"` +} + +type CodeEditorAppImageConfigObservation struct { + + // The configuration used to run the application image container. See Container Config details below. + ContainerConfig *ContainerConfigObservation `json:"containerConfig,omitempty" tf:"container_config,omitempty"` + + // The URL where the Git repository is located. See File System Config details below. + FileSystemConfig *FileSystemConfigObservation `json:"fileSystemConfig,omitempty" tf:"file_system_config,omitempty"` +} + +type CodeEditorAppImageConfigParameters struct { + + // The configuration used to run the application image container. See Container Config details below. + // +kubebuilder:validation:Optional + ContainerConfig *ContainerConfigParameters `json:"containerConfig,omitempty" tf:"container_config,omitempty"` + + // The URL where the Git repository is located. See File System Config details below. + // +kubebuilder:validation:Optional + FileSystemConfig *FileSystemConfigParameters `json:"fileSystemConfig,omitempty" tf:"file_system_config,omitempty"` +} + +type ContainerConfigInitParameters struct { + + // The arguments for the container when you're running the application. + ContainerArguments []*string `json:"containerArguments,omitempty" tf:"container_arguments,omitempty"` + + // The entrypoint used to run the application in the container. + ContainerEntrypoint []*string `json:"containerEntrypoint,omitempty" tf:"container_entrypoint,omitempty"` + + // The environment variables to set in the container. + // +mapType=granular + ContainerEnvironmentVariables map[string]*string `json:"containerEnvironmentVariables,omitempty" tf:"container_environment_variables,omitempty"` +} + +type ContainerConfigObservation struct { + + // The arguments for the container when you're running the application. + ContainerArguments []*string `json:"containerArguments,omitempty" tf:"container_arguments,omitempty"` + + // The entrypoint used to run the application in the container. + ContainerEntrypoint []*string `json:"containerEntrypoint,omitempty" tf:"container_entrypoint,omitempty"` + + // The environment variables to set in the container. + // +mapType=granular + ContainerEnvironmentVariables map[string]*string `json:"containerEnvironmentVariables,omitempty" tf:"container_environment_variables,omitempty"` +} + +type ContainerConfigParameters struct { + + // The arguments for the container when you're running the application. + // +kubebuilder:validation:Optional + ContainerArguments []*string `json:"containerArguments,omitempty" tf:"container_arguments,omitempty"` + + // The entrypoint used to run the application in the container. + // +kubebuilder:validation:Optional + ContainerEntrypoint []*string `json:"containerEntrypoint,omitempty" tf:"container_entrypoint,omitempty"` + + // The environment variables to set in the container. + // +kubebuilder:validation:Optional + // +mapType=granular + ContainerEnvironmentVariables map[string]*string `json:"containerEnvironmentVariables,omitempty" tf:"container_environment_variables,omitempty"` +} + +type FileSystemConfigInitParameters struct { + + // The default POSIX group ID (GID). If not specified, defaults to 100. Valid values are 0 and 100. + DefaultGID *float64 `json:"defaultGid,omitempty" tf:"default_gid,omitempty"` + + // The default POSIX user ID (UID). If not specified, defaults to 1000. Valid values are 0 and 1000. + DefaultUID *float64 `json:"defaultUid,omitempty" tf:"default_uid,omitempty"` + + // The path within the image to mount the user's EFS home directory. The directory should be empty. If not specified, defaults to /home/sagemaker-user. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` +} + +type FileSystemConfigObservation struct { + + // The default POSIX group ID (GID). If not specified, defaults to 100. Valid values are 0 and 100. + DefaultGID *float64 `json:"defaultGid,omitempty" tf:"default_gid,omitempty"` + + // The default POSIX user ID (UID). If not specified, defaults to 1000. Valid values are 0 and 1000. + DefaultUID *float64 `json:"defaultUid,omitempty" tf:"default_uid,omitempty"` + + // The path within the image to mount the user's EFS home directory. The directory should be empty. If not specified, defaults to /home/sagemaker-user. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` +} + +type FileSystemConfigParameters struct { + + // The default POSIX group ID (GID). If not specified, defaults to 100. Valid values are 0 and 100. + // +kubebuilder:validation:Optional + DefaultGID *float64 `json:"defaultGid,omitempty" tf:"default_gid,omitempty"` + + // The default POSIX user ID (UID). If not specified, defaults to 1000. Valid values are 0 and 1000. + // +kubebuilder:validation:Optional + DefaultUID *float64 `json:"defaultUid,omitempty" tf:"default_uid,omitempty"` + + // The path within the image to mount the user's EFS home directory. The directory should be empty. If not specified, defaults to /home/sagemaker-user. + // +kubebuilder:validation:Optional + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` +} + +type JupyterLabImageConfigContainerConfigInitParameters struct { + + // The arguments for the container when you're running the application. + ContainerArguments []*string `json:"containerArguments,omitempty" tf:"container_arguments,omitempty"` + + // The entrypoint used to run the application in the container. + ContainerEntrypoint []*string `json:"containerEntrypoint,omitempty" tf:"container_entrypoint,omitempty"` + + // The environment variables to set in the container. + // +mapType=granular + ContainerEnvironmentVariables map[string]*string `json:"containerEnvironmentVariables,omitempty" tf:"container_environment_variables,omitempty"` +} + +type JupyterLabImageConfigContainerConfigObservation struct { + + // The arguments for the container when you're running the application. + ContainerArguments []*string `json:"containerArguments,omitempty" tf:"container_arguments,omitempty"` + + // The entrypoint used to run the application in the container. + ContainerEntrypoint []*string `json:"containerEntrypoint,omitempty" tf:"container_entrypoint,omitempty"` + + // The environment variables to set in the container. + // +mapType=granular + ContainerEnvironmentVariables map[string]*string `json:"containerEnvironmentVariables,omitempty" tf:"container_environment_variables,omitempty"` +} + +type JupyterLabImageConfigContainerConfigParameters struct { + + // The arguments for the container when you're running the application. + // +kubebuilder:validation:Optional + ContainerArguments []*string `json:"containerArguments,omitempty" tf:"container_arguments,omitempty"` + + // The entrypoint used to run the application in the container. + // +kubebuilder:validation:Optional + ContainerEntrypoint []*string `json:"containerEntrypoint,omitempty" tf:"container_entrypoint,omitempty"` + + // The environment variables to set in the container. + // +kubebuilder:validation:Optional + // +mapType=granular + ContainerEnvironmentVariables map[string]*string `json:"containerEnvironmentVariables,omitempty" tf:"container_environment_variables,omitempty"` +} + +type JupyterLabImageConfigFileSystemConfigInitParameters struct { + + // The default POSIX group ID (GID). If not specified, defaults to 100. Valid values are 0 and 100. + DefaultGID *float64 `json:"defaultGid,omitempty" tf:"default_gid,omitempty"` + + // The default POSIX user ID (UID). If not specified, defaults to 1000. Valid values are 0 and 1000. + DefaultUID *float64 `json:"defaultUid,omitempty" tf:"default_uid,omitempty"` + + // The path within the image to mount the user's EFS home directory. The directory should be empty. If not specified, defaults to /home/sagemaker-user. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` +} + +type JupyterLabImageConfigFileSystemConfigObservation struct { + + // The default POSIX group ID (GID). If not specified, defaults to 100. Valid values are 0 and 100. + DefaultGID *float64 `json:"defaultGid,omitempty" tf:"default_gid,omitempty"` + + // The default POSIX user ID (UID). If not specified, defaults to 1000. Valid values are 0 and 1000. + DefaultUID *float64 `json:"defaultUid,omitempty" tf:"default_uid,omitempty"` + + // The path within the image to mount the user's EFS home directory. The directory should be empty. If not specified, defaults to /home/sagemaker-user. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` +} + +type JupyterLabImageConfigFileSystemConfigParameters struct { + + // The default POSIX group ID (GID). If not specified, defaults to 100. Valid values are 0 and 100. + // +kubebuilder:validation:Optional + DefaultGID *float64 `json:"defaultGid,omitempty" tf:"default_gid,omitempty"` + + // The default POSIX user ID (UID). If not specified, defaults to 1000. Valid values are 0 and 1000. + // +kubebuilder:validation:Optional + DefaultUID *float64 `json:"defaultUid,omitempty" tf:"default_uid,omitempty"` + + // The path within the image to mount the user's EFS home directory. The directory should be empty. If not specified, defaults to /home/sagemaker-user. + // +kubebuilder:validation:Optional + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` +} + +type JupyterLabImageConfigInitParameters struct { + + // The configuration used to run the application image container. See Container Config details below. + ContainerConfig *JupyterLabImageConfigContainerConfigInitParameters `json:"containerConfig,omitempty" tf:"container_config,omitempty"` + + // The URL where the Git repository is located. See File System Config details below. + FileSystemConfig *JupyterLabImageConfigFileSystemConfigInitParameters `json:"fileSystemConfig,omitempty" tf:"file_system_config,omitempty"` +} + +type JupyterLabImageConfigObservation struct { + + // The configuration used to run the application image container. See Container Config details below. + ContainerConfig *JupyterLabImageConfigContainerConfigObservation `json:"containerConfig,omitempty" tf:"container_config,omitempty"` + + // The URL where the Git repository is located. See File System Config details below. + FileSystemConfig *JupyterLabImageConfigFileSystemConfigObservation `json:"fileSystemConfig,omitempty" tf:"file_system_config,omitempty"` +} + +type JupyterLabImageConfigParameters struct { + + // The configuration used to run the application image container. See Container Config details below. + // +kubebuilder:validation:Optional + ContainerConfig *JupyterLabImageConfigContainerConfigParameters `json:"containerConfig,omitempty" tf:"container_config,omitempty"` + + // The URL where the Git repository is located. See File System Config details below. + // +kubebuilder:validation:Optional + FileSystemConfig *JupyterLabImageConfigFileSystemConfigParameters `json:"fileSystemConfig,omitempty" tf:"file_system_config,omitempty"` +} + +type KernelGatewayImageConfigFileSystemConfigInitParameters struct { + + // The default POSIX group ID (GID). If not specified, defaults to 100. Valid values are 0 and 100. + DefaultGID *float64 `json:"defaultGid,omitempty" tf:"default_gid,omitempty"` + + // The default POSIX user ID (UID). If not specified, defaults to 1000. Valid values are 0 and 1000. + DefaultUID *float64 `json:"defaultUid,omitempty" tf:"default_uid,omitempty"` + + // The path within the image to mount the user's EFS home directory. The directory should be empty. If not specified, defaults to /home/sagemaker-user. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` +} + +type KernelGatewayImageConfigFileSystemConfigObservation struct { + + // The default POSIX group ID (GID). If not specified, defaults to 100. Valid values are 0 and 100. + DefaultGID *float64 `json:"defaultGid,omitempty" tf:"default_gid,omitempty"` + + // The default POSIX user ID (UID). If not specified, defaults to 1000. Valid values are 0 and 1000. + DefaultUID *float64 `json:"defaultUid,omitempty" tf:"default_uid,omitempty"` + + // The path within the image to mount the user's EFS home directory. The directory should be empty. If not specified, defaults to /home/sagemaker-user. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` +} + +type KernelGatewayImageConfigFileSystemConfigParameters struct { + + // The default POSIX group ID (GID). If not specified, defaults to 100. Valid values are 0 and 100. + // +kubebuilder:validation:Optional + DefaultGID *float64 `json:"defaultGid,omitempty" tf:"default_gid,omitempty"` + + // The default POSIX user ID (UID). If not specified, defaults to 1000. Valid values are 0 and 1000. + // +kubebuilder:validation:Optional + DefaultUID *float64 `json:"defaultUid,omitempty" tf:"default_uid,omitempty"` + + // The path within the image to mount the user's EFS home directory. The directory should be empty. If not specified, defaults to /home/sagemaker-user. + // +kubebuilder:validation:Optional + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` +} + +type KernelGatewayImageConfigInitParameters struct { + + // The URL where the Git repository is located. See File System Config details below. + FileSystemConfig *KernelGatewayImageConfigFileSystemConfigInitParameters `json:"fileSystemConfig,omitempty" tf:"file_system_config,omitempty"` + + // The default branch for the Git repository. See Kernel Spec details below. + KernelSpec []KernelSpecInitParameters `json:"kernelSpec,omitempty" tf:"kernel_spec,omitempty"` +} + +type KernelGatewayImageConfigObservation struct { + + // The URL where the Git repository is located. See File System Config details below. + FileSystemConfig *KernelGatewayImageConfigFileSystemConfigObservation `json:"fileSystemConfig,omitempty" tf:"file_system_config,omitempty"` + + // The default branch for the Git repository. See Kernel Spec details below. + KernelSpec []KernelSpecObservation `json:"kernelSpec,omitempty" tf:"kernel_spec,omitempty"` +} + +type KernelGatewayImageConfigParameters struct { + + // The URL where the Git repository is located. See File System Config details below. + // +kubebuilder:validation:Optional + FileSystemConfig *KernelGatewayImageConfigFileSystemConfigParameters `json:"fileSystemConfig,omitempty" tf:"file_system_config,omitempty"` + + // The default branch for the Git repository. See Kernel Spec details below. + // +kubebuilder:validation:Optional + KernelSpec []KernelSpecParameters `json:"kernelSpec" tf:"kernel_spec,omitempty"` +} + +type KernelSpecInitParameters struct { + + // The display name of the kernel. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // The name of the kernel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type KernelSpecObservation struct { + + // The display name of the kernel. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // The name of the kernel. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type KernelSpecParameters struct { + + // The display name of the kernel. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // The name of the kernel. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +// AppImageConfigSpec defines the desired state of AppImageConfig +type AppImageConfigSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AppImageConfigParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AppImageConfigInitParameters `json:"initProvider,omitempty"` +} + +// AppImageConfigStatus defines the observed state of AppImageConfig. +type AppImageConfigStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AppImageConfigObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// AppImageConfig is the Schema for the AppImageConfigs API. Provides a SageMaker App Image Config resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type AppImageConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec AppImageConfigSpec `json:"spec"` + Status AppImageConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AppImageConfigList contains a list of AppImageConfigs +type AppImageConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AppImageConfig `json:"items"` +} + +// Repository type metadata. +var ( + AppImageConfig_Kind = "AppImageConfig" + AppImageConfig_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AppImageConfig_Kind}.String() + AppImageConfig_KindAPIVersion = AppImageConfig_Kind + "." + CRDGroupVersion.String() + AppImageConfig_GroupVersionKind = CRDGroupVersion.WithKind(AppImageConfig_Kind) +) + +func init() { + SchemeBuilder.Register(&AppImageConfig{}, &AppImageConfigList{}) +} diff --git a/apis/sagemaker/v1beta2/zz_coderepository_terraformed.go b/apis/sagemaker/v1beta2/zz_coderepository_terraformed.go new file mode 100755 index 0000000000..636c68919e --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_coderepository_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CodeRepository +func (mg *CodeRepository) GetTerraformResourceType() string { + return "aws_sagemaker_code_repository" +} + +// GetConnectionDetailsMapping for this CodeRepository +func (tr *CodeRepository) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CodeRepository +func (tr *CodeRepository) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CodeRepository +func (tr *CodeRepository) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CodeRepository +func (tr *CodeRepository) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CodeRepository +func (tr *CodeRepository) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CodeRepository +func (tr *CodeRepository) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CodeRepository +func (tr *CodeRepository) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CodeRepository +func (tr *CodeRepository) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CodeRepository using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CodeRepository) LateInitialize(attrs []byte) (bool, error) { + params := &CodeRepositoryParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CodeRepository) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sagemaker/v1beta2/zz_coderepository_types.go b/apis/sagemaker/v1beta2/zz_coderepository_types.go new file mode 100755 index 0000000000..7d10d20d04 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_coderepository_types.go @@ -0,0 +1,181 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CodeRepositoryInitParameters struct { + + // Specifies details about the repository. see Git Config details below. + GitConfig *GitConfigInitParameters `json:"gitConfig,omitempty" tf:"git_config,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type CodeRepositoryObservation struct { + + // The Amazon Resource Name (ARN) assigned by AWS to this Code Repository. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Specifies details about the repository. see Git Config details below. + GitConfig *GitConfigObservation `json:"gitConfig,omitempty" tf:"git_config,omitempty"` + + // The name of the Code Repository. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type CodeRepositoryParameters struct { + + // Specifies details about the repository. see Git Config details below. + // +kubebuilder:validation:Optional + GitConfig *GitConfigParameters `json:"gitConfig,omitempty" tf:"git_config,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type GitConfigInitParameters struct { + + // The default branch for the Git repository. + Branch *string `json:"branch,omitempty" tf:"branch,omitempty"` + + // The URL where the Git repository is located. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` + + // The Amazon Resource Name (ARN) of the AWS Secrets Manager secret that contains the credentials used to access the git repository. The secret must have a staging label of AWSCURRENT and must be in the following format: {"username": UserName, "password": Password} + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/secretsmanager/v1beta1.Secret + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + SecretArn *string `json:"secretArn,omitempty" tf:"secret_arn,omitempty"` + + // Reference to a Secret in secretsmanager to populate secretArn. + // +kubebuilder:validation:Optional + SecretArnRef *v1.Reference `json:"secretArnRef,omitempty" tf:"-"` + + // Selector for a Secret in secretsmanager to populate secretArn. + // +kubebuilder:validation:Optional + SecretArnSelector *v1.Selector `json:"secretArnSelector,omitempty" tf:"-"` +} + +type GitConfigObservation struct { + + // The default branch for the Git repository. + Branch *string `json:"branch,omitempty" tf:"branch,omitempty"` + + // The URL where the Git repository is located. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` + + // The Amazon Resource Name (ARN) of the AWS Secrets Manager secret that contains the credentials used to access the git repository. The secret must have a staging label of AWSCURRENT and must be in the following format: {"username": UserName, "password": Password} + SecretArn *string `json:"secretArn,omitempty" tf:"secret_arn,omitempty"` +} + +type GitConfigParameters struct { + + // The default branch for the Git repository. + // +kubebuilder:validation:Optional + Branch *string `json:"branch,omitempty" tf:"branch,omitempty"` + + // The URL where the Git repository is located. + // +kubebuilder:validation:Optional + RepositoryURL *string `json:"repositoryUrl" tf:"repository_url,omitempty"` + + // The Amazon Resource Name (ARN) of the AWS Secrets Manager secret that contains the credentials used to access the git repository. The secret must have a staging label of AWSCURRENT and must be in the following format: {"username": UserName, "password": Password} + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/secretsmanager/v1beta1.Secret + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + SecretArn *string `json:"secretArn,omitempty" tf:"secret_arn,omitempty"` + + // Reference to a Secret in secretsmanager to populate secretArn. + // +kubebuilder:validation:Optional + SecretArnRef *v1.Reference `json:"secretArnRef,omitempty" tf:"-"` + + // Selector for a Secret in secretsmanager to populate secretArn. + // +kubebuilder:validation:Optional + SecretArnSelector *v1.Selector `json:"secretArnSelector,omitempty" tf:"-"` +} + +// CodeRepositorySpec defines the desired state of CodeRepository +type CodeRepositorySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CodeRepositoryParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CodeRepositoryInitParameters `json:"initProvider,omitempty"` +} + +// CodeRepositoryStatus defines the observed state of CodeRepository. +type CodeRepositoryStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CodeRepositoryObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CodeRepository is the Schema for the CodeRepositorys API. Provides a SageMaker Code Repository resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type CodeRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.gitConfig) || (has(self.initProvider) && has(self.initProvider.gitConfig))",message="spec.forProvider.gitConfig is a required parameter" + Spec CodeRepositorySpec `json:"spec"` + Status CodeRepositoryStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CodeRepositoryList contains a list of CodeRepositorys +type CodeRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CodeRepository `json:"items"` +} + +// Repository type metadata. +var ( + CodeRepository_Kind = "CodeRepository" + CodeRepository_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CodeRepository_Kind}.String() + CodeRepository_KindAPIVersion = CodeRepository_Kind + "." + CRDGroupVersion.String() + CodeRepository_GroupVersionKind = CRDGroupVersion.WithKind(CodeRepository_Kind) +) + +func init() { + SchemeBuilder.Register(&CodeRepository{}, &CodeRepositoryList{}) +} diff --git a/apis/sagemaker/v1beta2/zz_device_terraformed.go b/apis/sagemaker/v1beta2/zz_device_terraformed.go new file mode 100755 index 0000000000..d004aa97ee --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_device_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Device +func (mg *Device) GetTerraformResourceType() string { + return "aws_sagemaker_device" +} + +// GetConnectionDetailsMapping for this Device +func (tr *Device) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Device +func (tr *Device) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Device +func (tr *Device) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Device +func (tr *Device) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Device +func (tr *Device) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Device +func (tr *Device) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Device +func (tr *Device) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Device +func (tr *Device) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Device using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Device) LateInitialize(attrs []byte) (bool, error) { + params := &DeviceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Device) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sagemaker/v1beta2/zz_device_types.go b/apis/sagemaker/v1beta2/zz_device_types.go new file mode 100755 index 0000000000..185c04443d --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_device_types.go @@ -0,0 +1,173 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DeviceDeviceInitParameters struct { + + // A description for the device. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the device. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Amazon Web Services Internet of Things (IoT) object name. + IotThingName *string `json:"iotThingName,omitempty" tf:"iot_thing_name,omitempty"` +} + +type DeviceDeviceObservation struct { + + // A description for the device. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the device. + DeviceName *string `json:"deviceName,omitempty" tf:"device_name,omitempty"` + + // Amazon Web Services Internet of Things (IoT) object name. + IotThingName *string `json:"iotThingName,omitempty" tf:"iot_thing_name,omitempty"` +} + +type DeviceDeviceParameters struct { + + // A description for the device. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the device. + // +kubebuilder:validation:Optional + DeviceName *string `json:"deviceName" tf:"device_name,omitempty"` + + // Amazon Web Services Internet of Things (IoT) object name. + // +kubebuilder:validation:Optional + IotThingName *string `json:"iotThingName,omitempty" tf:"iot_thing_name,omitempty"` +} + +type DeviceInitParameters struct { + + // The device to register with SageMaker Edge Manager. See Device details below. + Device *DeviceDeviceInitParameters `json:"device,omitempty" tf:"device,omitempty"` + + // The name of the Device Fleet. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.DeviceFleet + DeviceFleetName *string `json:"deviceFleetName,omitempty" tf:"device_fleet_name,omitempty"` + + // Reference to a DeviceFleet in sagemaker to populate deviceFleetName. + // +kubebuilder:validation:Optional + DeviceFleetNameRef *v1.Reference `json:"deviceFleetNameRef,omitempty" tf:"-"` + + // Selector for a DeviceFleet in sagemaker to populate deviceFleetName. + // +kubebuilder:validation:Optional + DeviceFleetNameSelector *v1.Selector `json:"deviceFleetNameSelector,omitempty" tf:"-"` +} + +type DeviceObservation struct { + AgentVersion *string `json:"agentVersion,omitempty" tf:"agent_version,omitempty"` + + // The Amazon Resource Name (ARN) assigned by AWS to this Device. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The device to register with SageMaker Edge Manager. See Device details below. + Device *DeviceDeviceObservation `json:"device,omitempty" tf:"device,omitempty"` + + // The name of the Device Fleet. + DeviceFleetName *string `json:"deviceFleetName,omitempty" tf:"device_fleet_name,omitempty"` + + // The id is constructed from device-fleet-name/device-name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type DeviceParameters struct { + + // The device to register with SageMaker Edge Manager. See Device details below. + // +kubebuilder:validation:Optional + Device *DeviceDeviceParameters `json:"device,omitempty" tf:"device,omitempty"` + + // The name of the Device Fleet. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.DeviceFleet + // +kubebuilder:validation:Optional + DeviceFleetName *string `json:"deviceFleetName,omitempty" tf:"device_fleet_name,omitempty"` + + // Reference to a DeviceFleet in sagemaker to populate deviceFleetName. + // +kubebuilder:validation:Optional + DeviceFleetNameRef *v1.Reference `json:"deviceFleetNameRef,omitempty" tf:"-"` + + // Selector for a DeviceFleet in sagemaker to populate deviceFleetName. + // +kubebuilder:validation:Optional + DeviceFleetNameSelector *v1.Selector `json:"deviceFleetNameSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +// DeviceSpec defines the desired state of Device +type DeviceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DeviceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DeviceInitParameters `json:"initProvider,omitempty"` +} + +// DeviceStatus defines the observed state of Device. +type DeviceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DeviceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Device is the Schema for the Devices API. Provides a SageMaker Device resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Device struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.device) || (has(self.initProvider) && has(self.initProvider.device))",message="spec.forProvider.device is a required parameter" + Spec DeviceSpec `json:"spec"` + Status DeviceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DeviceList contains a list of Devices +type DeviceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Device `json:"items"` +} + +// Repository type metadata. +var ( + Device_Kind = "Device" + Device_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Device_Kind}.String() + Device_KindAPIVersion = Device_Kind + "." + CRDGroupVersion.String() + Device_GroupVersionKind = CRDGroupVersion.WithKind(Device_Kind) +) + +func init() { + SchemeBuilder.Register(&Device{}, &DeviceList{}) +} diff --git a/apis/sagemaker/v1beta2/zz_devicefleet_terraformed.go b/apis/sagemaker/v1beta2/zz_devicefleet_terraformed.go new file mode 100755 index 0000000000..266cf1da8f --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_devicefleet_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DeviceFleet +func (mg *DeviceFleet) GetTerraformResourceType() string { + return "aws_sagemaker_device_fleet" +} + +// GetConnectionDetailsMapping for this DeviceFleet +func (tr *DeviceFleet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DeviceFleet +func (tr *DeviceFleet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DeviceFleet +func (tr *DeviceFleet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DeviceFleet +func (tr *DeviceFleet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DeviceFleet +func (tr *DeviceFleet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DeviceFleet +func (tr *DeviceFleet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DeviceFleet +func (tr *DeviceFleet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DeviceFleet +func (tr *DeviceFleet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DeviceFleet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DeviceFleet) LateInitialize(attrs []byte) (bool, error) { + params := &DeviceFleetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DeviceFleet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sagemaker/v1beta2/zz_devicefleet_types.go b/apis/sagemaker/v1beta2/zz_devicefleet_types.go new file mode 100755 index 0000000000..076e6d7612 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_devicefleet_types.go @@ -0,0 +1,203 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DeviceFleetInitParameters struct { + + // A description of the fleet. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}". + EnableIotRoleAlias *bool `json:"enableIotRoleAlias,omitempty" tf:"enable_iot_role_alias,omitempty"` + + // Specifies details about the repository. see Output Config details below. + OutputConfig *OutputConfigInitParameters `json:"outputConfig,omitempty" tf:"output_config,omitempty"` + + // The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type DeviceFleetObservation struct { + + // The Amazon Resource Name (ARN) assigned by AWS to this Device Fleet. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A description of the fleet. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}". + EnableIotRoleAlias *bool `json:"enableIotRoleAlias,omitempty" tf:"enable_iot_role_alias,omitempty"` + + // The name of the Device Fleet. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + IotRoleAlias *string `json:"iotRoleAlias,omitempty" tf:"iot_role_alias,omitempty"` + + // Specifies details about the repository. see Output Config details below. + OutputConfig *OutputConfigObservation `json:"outputConfig,omitempty" tf:"output_config,omitempty"` + + // The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT). + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type DeviceFleetParameters struct { + + // A description of the fleet. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}". + // +kubebuilder:validation:Optional + EnableIotRoleAlias *bool `json:"enableIotRoleAlias,omitempty" tf:"enable_iot_role_alias,omitempty"` + + // Specifies details about the repository. see Output Config details below. + // +kubebuilder:validation:Optional + OutputConfig *OutputConfigParameters `json:"outputConfig,omitempty" tf:"output_config,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type OutputConfigInitParameters struct { + + // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The Amazon Simple Storage (S3) bucker URI. + S3OutputLocation *string `json:"s3OutputLocation,omitempty" tf:"s3_output_location,omitempty"` +} + +type OutputConfigObservation struct { + + // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The Amazon Simple Storage (S3) bucker URI. + S3OutputLocation *string `json:"s3OutputLocation,omitempty" tf:"s3_output_location,omitempty"` +} + +type OutputConfigParameters struct { + + // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The Amazon Simple Storage (S3) bucker URI. + // +kubebuilder:validation:Optional + S3OutputLocation *string `json:"s3OutputLocation" tf:"s3_output_location,omitempty"` +} + +// DeviceFleetSpec defines the desired state of DeviceFleet +type DeviceFleetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DeviceFleetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DeviceFleetInitParameters `json:"initProvider,omitempty"` +} + +// DeviceFleetStatus defines the observed state of DeviceFleet. +type DeviceFleetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DeviceFleetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DeviceFleet is the Schema for the DeviceFleets API. Provides a SageMaker Device Fleet resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws},path=devicefleet +type DeviceFleet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.outputConfig) || (has(self.initProvider) && has(self.initProvider.outputConfig))",message="spec.forProvider.outputConfig is a required parameter" + Spec DeviceFleetSpec `json:"spec"` + Status DeviceFleetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DeviceFleetList contains a list of DeviceFleets +type DeviceFleetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DeviceFleet `json:"items"` +} + +// Repository type metadata. +var ( + DeviceFleet_Kind = "DeviceFleet" + DeviceFleet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DeviceFleet_Kind}.String() + DeviceFleet_KindAPIVersion = DeviceFleet_Kind + "." + CRDGroupVersion.String() + DeviceFleet_GroupVersionKind = CRDGroupVersion.WithKind(DeviceFleet_Kind) +) + +func init() { + SchemeBuilder.Register(&DeviceFleet{}, &DeviceFleetList{}) +} diff --git a/apis/sagemaker/v1beta2/zz_domain_terraformed.go b/apis/sagemaker/v1beta2/zz_domain_terraformed.go new file mode 100755 index 0000000000..de49fc412c --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_domain_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Domain +func (mg *Domain) GetTerraformResourceType() string { + return "aws_sagemaker_domain" +} + +// GetConnectionDetailsMapping for this Domain +func (tr *Domain) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Domain +func (tr *Domain) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Domain +func (tr *Domain) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Domain +func (tr *Domain) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Domain +func (tr *Domain) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Domain +func (tr *Domain) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Domain +func (tr *Domain) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Domain +func (tr *Domain) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Domain using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Domain) LateInitialize(attrs []byte) (bool, error) { + params := &DomainParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Domain) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sagemaker/v1beta2/zz_domain_types.go b/apis/sagemaker/v1beta2/zz_domain_types.go new file mode 100755 index 0000000000..bc335916c9 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_domain_types.go @@ -0,0 +1,2162 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CanvasAppSettingsInitParameters struct { + + // The model deployment settings for the SageMaker Canvas application. See direct_deploy_settings Block below. + DirectDeploySettings *DirectDeploySettingsInitParameters `json:"directDeploySettings,omitempty" tf:"direct_deploy_settings,omitempty"` + + // The settings for connecting to an external data source with OAuth. See identity_provider_oauth_settings Block below. + IdentityProviderOauthSettings []IdentityProviderOauthSettingsInitParameters `json:"identityProviderOauthSettings,omitempty" tf:"identity_provider_oauth_settings,omitempty"` + + // The settings for document querying. See kendra_settings Block below. + KendraSettings *KendraSettingsInitParameters `json:"kendraSettings,omitempty" tf:"kendra_settings,omitempty"` + + // The model registry settings for the SageMaker Canvas application. See model_register_settings Block below. + ModelRegisterSettings *ModelRegisterSettingsInitParameters `json:"modelRegisterSettings,omitempty" tf:"model_register_settings,omitempty"` + + // Time series forecast settings for the Canvas app. See time_series_forecasting_settings Block below. + TimeSeriesForecastingSettings *TimeSeriesForecastingSettingsInitParameters `json:"timeSeriesForecastingSettings,omitempty" tf:"time_series_forecasting_settings,omitempty"` + + // The workspace settings for the SageMaker Canvas application. See workspace_settings Block below. + WorkspaceSettings *WorkspaceSettingsInitParameters `json:"workspaceSettings,omitempty" tf:"workspace_settings,omitempty"` +} + +type CanvasAppSettingsObservation struct { + + // The model deployment settings for the SageMaker Canvas application. See direct_deploy_settings Block below. + DirectDeploySettings *DirectDeploySettingsObservation `json:"directDeploySettings,omitempty" tf:"direct_deploy_settings,omitempty"` + + // The settings for connecting to an external data source with OAuth. See identity_provider_oauth_settings Block below. + IdentityProviderOauthSettings []IdentityProviderOauthSettingsObservation `json:"identityProviderOauthSettings,omitempty" tf:"identity_provider_oauth_settings,omitempty"` + + // The settings for document querying. See kendra_settings Block below. + KendraSettings *KendraSettingsObservation `json:"kendraSettings,omitempty" tf:"kendra_settings,omitempty"` + + // The model registry settings for the SageMaker Canvas application. See model_register_settings Block below. + ModelRegisterSettings *ModelRegisterSettingsObservation `json:"modelRegisterSettings,omitempty" tf:"model_register_settings,omitempty"` + + // Time series forecast settings for the Canvas app. See time_series_forecasting_settings Block below. + TimeSeriesForecastingSettings *TimeSeriesForecastingSettingsObservation `json:"timeSeriesForecastingSettings,omitempty" tf:"time_series_forecasting_settings,omitempty"` + + // The workspace settings for the SageMaker Canvas application. See workspace_settings Block below. + WorkspaceSettings *WorkspaceSettingsObservation `json:"workspaceSettings,omitempty" tf:"workspace_settings,omitempty"` +} + +type CanvasAppSettingsParameters struct { + + // The model deployment settings for the SageMaker Canvas application. See direct_deploy_settings Block below. + // +kubebuilder:validation:Optional + DirectDeploySettings *DirectDeploySettingsParameters `json:"directDeploySettings,omitempty" tf:"direct_deploy_settings,omitempty"` + + // The settings for connecting to an external data source with OAuth. See identity_provider_oauth_settings Block below. + // +kubebuilder:validation:Optional + IdentityProviderOauthSettings []IdentityProviderOauthSettingsParameters `json:"identityProviderOauthSettings,omitempty" tf:"identity_provider_oauth_settings,omitempty"` + + // The settings for document querying. See kendra_settings Block below. + // +kubebuilder:validation:Optional + KendraSettings *KendraSettingsParameters `json:"kendraSettings,omitempty" tf:"kendra_settings,omitempty"` + + // The model registry settings for the SageMaker Canvas application. See model_register_settings Block below. + // +kubebuilder:validation:Optional + ModelRegisterSettings *ModelRegisterSettingsParameters `json:"modelRegisterSettings,omitempty" tf:"model_register_settings,omitempty"` + + // Time series forecast settings for the Canvas app. See time_series_forecasting_settings Block below. + // +kubebuilder:validation:Optional + TimeSeriesForecastingSettings *TimeSeriesForecastingSettingsParameters `json:"timeSeriesForecastingSettings,omitempty" tf:"time_series_forecasting_settings,omitempty"` + + // The workspace settings for the SageMaker Canvas application. See workspace_settings Block below. + // +kubebuilder:validation:Optional + WorkspaceSettings *WorkspaceSettingsParameters `json:"workspaceSettings,omitempty" tf:"workspace_settings,omitempty"` +} + +type CodeEditorAppSettingsDefaultResourceSpecInitParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type CodeEditorAppSettingsDefaultResourceSpecObservation struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type CodeEditorAppSettingsDefaultResourceSpecParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type CodeEditorAppSettingsInitParameters struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *CodeEditorAppSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type CodeEditorAppSettingsObservation struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *CodeEditorAppSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type CodeEditorAppSettingsParameters struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *CodeEditorAppSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +kubebuilder:validation:Optional + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type CustomFileSystemConfigInitParameters struct { + + // The default EBS storage settings for a private space. See efs_file_system_config Block below. + EFSFileSystemConfig *EFSFileSystemConfigInitParameters `json:"efsFileSystemConfig,omitempty" tf:"efs_file_system_config,omitempty"` +} + +type CustomFileSystemConfigObservation struct { + + // The default EBS storage settings for a private space. See efs_file_system_config Block below. + EFSFileSystemConfig *EFSFileSystemConfigObservation `json:"efsFileSystemConfig,omitempty" tf:"efs_file_system_config,omitempty"` +} + +type CustomFileSystemConfigParameters struct { + + // The default EBS storage settings for a private space. See efs_file_system_config Block below. + // +kubebuilder:validation:Optional + EFSFileSystemConfig *EFSFileSystemConfigParameters `json:"efsFileSystemConfig,omitempty" tf:"efs_file_system_config,omitempty"` +} + +type CustomImageInitParameters struct { + + // The name of the App Image Config. + AppImageConfigName *string `json:"appImageConfigName,omitempty" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type CustomImageObservation struct { + + // The name of the App Image Config. + AppImageConfigName *string `json:"appImageConfigName,omitempty" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type CustomImageParameters struct { + + // The name of the App Image Config. + // +kubebuilder:validation:Optional + AppImageConfigName *string `json:"appImageConfigName" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + // +kubebuilder:validation:Optional + ImageName *string `json:"imageName" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + // +kubebuilder:validation:Optional + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type CustomPosixUserConfigInitParameters struct { + + // The POSIX group ID. + GID *float64 `json:"gid,omitempty" tf:"gid,omitempty"` + + // The POSIX user ID. + UID *float64 `json:"uid,omitempty" tf:"uid,omitempty"` +} + +type CustomPosixUserConfigObservation struct { + + // The POSIX group ID. + GID *float64 `json:"gid,omitempty" tf:"gid,omitempty"` + + // The POSIX user ID. + UID *float64 `json:"uid,omitempty" tf:"uid,omitempty"` +} + +type CustomPosixUserConfigParameters struct { + + // The POSIX group ID. + // +kubebuilder:validation:Optional + GID *float64 `json:"gid" tf:"gid,omitempty"` + + // The POSIX user ID. + // +kubebuilder:validation:Optional + UID *float64 `json:"uid" tf:"uid,omitempty"` +} + +type DefaultEBSStorageSettingsInitParameters struct { + + // The default size of the EBS storage volume for a private space. + DefaultEBSVolumeSizeInGb *float64 `json:"defaultEbsVolumeSizeInGb,omitempty" tf:"default_ebs_volume_size_in_gb,omitempty"` + + // The maximum size of the EBS storage volume for a private space. + MaximumEBSVolumeSizeInGb *float64 `json:"maximumEbsVolumeSizeInGb,omitempty" tf:"maximum_ebs_volume_size_in_gb,omitempty"` +} + +type DefaultEBSStorageSettingsObservation struct { + + // The default size of the EBS storage volume for a private space. + DefaultEBSVolumeSizeInGb *float64 `json:"defaultEbsVolumeSizeInGb,omitempty" tf:"default_ebs_volume_size_in_gb,omitempty"` + + // The maximum size of the EBS storage volume for a private space. + MaximumEBSVolumeSizeInGb *float64 `json:"maximumEbsVolumeSizeInGb,omitempty" tf:"maximum_ebs_volume_size_in_gb,omitempty"` +} + +type DefaultEBSStorageSettingsParameters struct { + + // The default size of the EBS storage volume for a private space. + // +kubebuilder:validation:Optional + DefaultEBSVolumeSizeInGb *float64 `json:"defaultEbsVolumeSizeInGb" tf:"default_ebs_volume_size_in_gb,omitempty"` + + // The maximum size of the EBS storage volume for a private space. + // +kubebuilder:validation:Optional + MaximumEBSVolumeSizeInGb *float64 `json:"maximumEbsVolumeSizeInGb" tf:"maximum_ebs_volume_size_in_gb,omitempty"` +} + +type DefaultResourceSpecInitParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type DefaultResourceSpecObservation struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type DefaultResourceSpecParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type DefaultSpaceSettingsInitParameters struct { + + // The execution role for the space. + ExecutionRole *string `json:"executionRole,omitempty" tf:"execution_role,omitempty"` + + // The Jupyter server's app settings. See jupyter_server_app_settings Block below. + JupyterServerAppSettings *JupyterServerAppSettingsInitParameters `json:"jupyterServerAppSettings,omitempty" tf:"jupyter_server_app_settings,omitempty"` + + // The kernel gateway app settings. See kernel_gateway_app_settings Block below. + KernelGatewayAppSettings *KernelGatewayAppSettingsInitParameters `json:"kernelGatewayAppSettings,omitempty" tf:"kernel_gateway_app_settings,omitempty"` + + // The security groups for the Amazon Virtual Private Cloud that the space uses for communication. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` +} + +type DefaultSpaceSettingsObservation struct { + + // The execution role for the space. + ExecutionRole *string `json:"executionRole,omitempty" tf:"execution_role,omitempty"` + + // The Jupyter server's app settings. See jupyter_server_app_settings Block below. + JupyterServerAppSettings *JupyterServerAppSettingsObservation `json:"jupyterServerAppSettings,omitempty" tf:"jupyter_server_app_settings,omitempty"` + + // The kernel gateway app settings. See kernel_gateway_app_settings Block below. + KernelGatewayAppSettings *KernelGatewayAppSettingsObservation `json:"kernelGatewayAppSettings,omitempty" tf:"kernel_gateway_app_settings,omitempty"` + + // The security groups for the Amazon Virtual Private Cloud that the space uses for communication. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` +} + +type DefaultSpaceSettingsParameters struct { + + // The execution role for the space. + // +kubebuilder:validation:Optional + ExecutionRole *string `json:"executionRole" tf:"execution_role,omitempty"` + + // The Jupyter server's app settings. See jupyter_server_app_settings Block below. + // +kubebuilder:validation:Optional + JupyterServerAppSettings *JupyterServerAppSettingsParameters `json:"jupyterServerAppSettings,omitempty" tf:"jupyter_server_app_settings,omitempty"` + + // The kernel gateway app settings. See kernel_gateway_app_settings Block below. + // +kubebuilder:validation:Optional + KernelGatewayAppSettings *KernelGatewayAppSettingsParameters `json:"kernelGatewayAppSettings,omitempty" tf:"kernel_gateway_app_settings,omitempty"` + + // The security groups for the Amazon Virtual Private Cloud that the space uses for communication. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` +} + +type DefaultUserSettingsInitParameters struct { + + // The Canvas app settings. See canvas_app_settings Block below. + CanvasAppSettings *CanvasAppSettingsInitParameters `json:"canvasAppSettings,omitempty" tf:"canvas_app_settings,omitempty"` + + // The Code Editor application settings. See code_editor_app_settings Block below. + CodeEditorAppSettings *CodeEditorAppSettingsInitParameters `json:"codeEditorAppSettings,omitempty" tf:"code_editor_app_settings,omitempty"` + + // The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio. See custom_file_system_config Block below. + CustomFileSystemConfig []CustomFileSystemConfigInitParameters `json:"customFileSystemConfig,omitempty" tf:"custom_file_system_config,omitempty"` + + // Details about the POSIX identity that is used for file system operations. See custom_posix_user_config Block below. + CustomPosixUserConfig *CustomPosixUserConfigInitParameters `json:"customPosixUserConfig,omitempty" tf:"custom_posix_user_config,omitempty"` + + // The default experience that the user is directed to when accessing the domain. The supported values are: studio::: Indicates that Studio is the default experience. This value can only be passed if StudioWebPortal is set to ENABLED. app:JupyterServer:: Indicates that Studio Classic is the default experience. + DefaultLandingURI *string `json:"defaultLandingUri,omitempty" tf:"default_landing_uri,omitempty"` + + // The execution role ARN for the user. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + ExecutionRole *string `json:"executionRole,omitempty" tf:"execution_role,omitempty"` + + // Reference to a Role in iam to populate executionRole. + // +kubebuilder:validation:Optional + ExecutionRoleRef *v1.Reference `json:"executionRoleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate executionRole. + // +kubebuilder:validation:Optional + ExecutionRoleSelector *v1.Selector `json:"executionRoleSelector,omitempty" tf:"-"` + + // The settings for the JupyterLab application. See jupyter_lab_app_settings Block below. + JupyterLabAppSettings *JupyterLabAppSettingsInitParameters `json:"jupyterLabAppSettings,omitempty" tf:"jupyter_lab_app_settings,omitempty"` + + // The Jupyter server's app settings. See jupyter_server_app_settings Block below. + JupyterServerAppSettings *DefaultUserSettingsJupyterServerAppSettingsInitParameters `json:"jupyterServerAppSettings,omitempty" tf:"jupyter_server_app_settings,omitempty"` + + // The kernel gateway app settings. See kernel_gateway_app_settings Block below. + KernelGatewayAppSettings *DefaultUserSettingsKernelGatewayAppSettingsInitParameters `json:"kernelGatewayAppSettings,omitempty" tf:"kernel_gateway_app_settings,omitempty"` + + // The RSession app settings. See r_session_app_settings Block below. + RSessionAppSettings *RSessionAppSettingsInitParameters `json:"rSessionAppSettings,omitempty" tf:"r_session_app_settings,omitempty"` + + // A collection of settings that configure user interaction with the RStudioServerPro app. See r_studio_server_pro_app_settings Block below. + RStudioServerProAppSettings *RStudioServerProAppSettingsInitParameters `json:"rStudioServerProAppSettings,omitempty" tf:"r_studio_server_pro_app_settings,omitempty"` + + // A list of security group IDs that will be attached to the user. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The sharing settings. See sharing_settings Block below. + SharingSettings *SharingSettingsInitParameters `json:"sharingSettings,omitempty" tf:"sharing_settings,omitempty"` + + // The storage settings for a private space. See space_storage_settings Block below. + SpaceStorageSettings *SpaceStorageSettingsInitParameters `json:"spaceStorageSettings,omitempty" tf:"space_storage_settings,omitempty"` + + // Whether the user can access Studio. If this value is set to DISABLED, the user cannot access Studio, even if that is the default experience for the domain. Valid values are ENABLED and DISABLED. + StudioWebPortal *string `json:"studioWebPortal,omitempty" tf:"studio_web_portal,omitempty"` + + // The TensorBoard app settings. See tensor_board_app_settings Block below. + TensorBoardAppSettings *TensorBoardAppSettingsInitParameters `json:"tensorBoardAppSettings,omitempty" tf:"tensor_board_app_settings,omitempty"` +} + +type DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters struct { + + // The URL of the Git repository. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` +} + +type DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryObservation struct { + + // The URL of the Git repository. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` +} + +type DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryParameters struct { + + // The URL of the Git repository. + // +kubebuilder:validation:Optional + RepositoryURL *string `json:"repositoryUrl" tf:"repository_url,omitempty"` +} + +type DefaultUserSettingsJupyterServerAppSettingsInitParameters struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see code_repository Block below. + CodeRepository []DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *JupyterServerAppSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type DefaultUserSettingsJupyterServerAppSettingsObservation struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see code_repository Block below. + CodeRepository []DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryObservation `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *JupyterServerAppSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type DefaultUserSettingsJupyterServerAppSettingsParameters struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see code_repository Block below. + // +kubebuilder:validation:Optional + CodeRepository []DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryParameters `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *JupyterServerAppSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +kubebuilder:validation:Optional + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type DefaultUserSettingsKernelGatewayAppSettingsInitParameters struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see custom_image Block below. + CustomImage []KernelGatewayAppSettingsCustomImageInitParameters `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type DefaultUserSettingsKernelGatewayAppSettingsObservation struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see custom_image Block below. + CustomImage []KernelGatewayAppSettingsCustomImageObservation `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type DefaultUserSettingsKernelGatewayAppSettingsParameters struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see custom_image Block below. + // +kubebuilder:validation:Optional + CustomImage []KernelGatewayAppSettingsCustomImageParameters `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +kubebuilder:validation:Optional + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type DefaultUserSettingsObservation struct { + + // The Canvas app settings. See canvas_app_settings Block below. + CanvasAppSettings *CanvasAppSettingsObservation `json:"canvasAppSettings,omitempty" tf:"canvas_app_settings,omitempty"` + + // The Code Editor application settings. See code_editor_app_settings Block below. + CodeEditorAppSettings *CodeEditorAppSettingsObservation `json:"codeEditorAppSettings,omitempty" tf:"code_editor_app_settings,omitempty"` + + // The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio. See custom_file_system_config Block below. + CustomFileSystemConfig []CustomFileSystemConfigObservation `json:"customFileSystemConfig,omitempty" tf:"custom_file_system_config,omitempty"` + + // Details about the POSIX identity that is used for file system operations. See custom_posix_user_config Block below. + CustomPosixUserConfig *CustomPosixUserConfigObservation `json:"customPosixUserConfig,omitempty" tf:"custom_posix_user_config,omitempty"` + + // The default experience that the user is directed to when accessing the domain. The supported values are: studio::: Indicates that Studio is the default experience. This value can only be passed if StudioWebPortal is set to ENABLED. app:JupyterServer:: Indicates that Studio Classic is the default experience. + DefaultLandingURI *string `json:"defaultLandingUri,omitempty" tf:"default_landing_uri,omitempty"` + + // The execution role ARN for the user. + ExecutionRole *string `json:"executionRole,omitempty" tf:"execution_role,omitempty"` + + // The settings for the JupyterLab application. See jupyter_lab_app_settings Block below. + JupyterLabAppSettings *JupyterLabAppSettingsObservation `json:"jupyterLabAppSettings,omitempty" tf:"jupyter_lab_app_settings,omitempty"` + + // The Jupyter server's app settings. See jupyter_server_app_settings Block below. + JupyterServerAppSettings *DefaultUserSettingsJupyterServerAppSettingsObservation `json:"jupyterServerAppSettings,omitempty" tf:"jupyter_server_app_settings,omitempty"` + + // The kernel gateway app settings. See kernel_gateway_app_settings Block below. + KernelGatewayAppSettings *DefaultUserSettingsKernelGatewayAppSettingsObservation `json:"kernelGatewayAppSettings,omitempty" tf:"kernel_gateway_app_settings,omitempty"` + + // The RSession app settings. See r_session_app_settings Block below. + RSessionAppSettings *RSessionAppSettingsObservation `json:"rSessionAppSettings,omitempty" tf:"r_session_app_settings,omitempty"` + + // A collection of settings that configure user interaction with the RStudioServerPro app. See r_studio_server_pro_app_settings Block below. + RStudioServerProAppSettings *RStudioServerProAppSettingsObservation `json:"rStudioServerProAppSettings,omitempty" tf:"r_studio_server_pro_app_settings,omitempty"` + + // A list of security group IDs that will be attached to the user. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The sharing settings. See sharing_settings Block below. + SharingSettings *SharingSettingsObservation `json:"sharingSettings,omitempty" tf:"sharing_settings,omitempty"` + + // The storage settings for a private space. See space_storage_settings Block below. + SpaceStorageSettings *SpaceStorageSettingsObservation `json:"spaceStorageSettings,omitempty" tf:"space_storage_settings,omitempty"` + + // Whether the user can access Studio. If this value is set to DISABLED, the user cannot access Studio, even if that is the default experience for the domain. Valid values are ENABLED and DISABLED. + StudioWebPortal *string `json:"studioWebPortal,omitempty" tf:"studio_web_portal,omitempty"` + + // The TensorBoard app settings. See tensor_board_app_settings Block below. + TensorBoardAppSettings *TensorBoardAppSettingsObservation `json:"tensorBoardAppSettings,omitempty" tf:"tensor_board_app_settings,omitempty"` +} + +type DefaultUserSettingsParameters struct { + + // The Canvas app settings. See canvas_app_settings Block below. + // +kubebuilder:validation:Optional + CanvasAppSettings *CanvasAppSettingsParameters `json:"canvasAppSettings,omitempty" tf:"canvas_app_settings,omitempty"` + + // The Code Editor application settings. See code_editor_app_settings Block below. + // +kubebuilder:validation:Optional + CodeEditorAppSettings *CodeEditorAppSettingsParameters `json:"codeEditorAppSettings,omitempty" tf:"code_editor_app_settings,omitempty"` + + // The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio. See custom_file_system_config Block below. + // +kubebuilder:validation:Optional + CustomFileSystemConfig []CustomFileSystemConfigParameters `json:"customFileSystemConfig,omitempty" tf:"custom_file_system_config,omitempty"` + + // Details about the POSIX identity that is used for file system operations. See custom_posix_user_config Block below. + // +kubebuilder:validation:Optional + CustomPosixUserConfig *CustomPosixUserConfigParameters `json:"customPosixUserConfig,omitempty" tf:"custom_posix_user_config,omitempty"` + + // The default experience that the user is directed to when accessing the domain. The supported values are: studio::: Indicates that Studio is the default experience. This value can only be passed if StudioWebPortal is set to ENABLED. app:JupyterServer:: Indicates that Studio Classic is the default experience. + // +kubebuilder:validation:Optional + DefaultLandingURI *string `json:"defaultLandingUri,omitempty" tf:"default_landing_uri,omitempty"` + + // The execution role ARN for the user. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + ExecutionRole *string `json:"executionRole,omitempty" tf:"execution_role,omitempty"` + + // Reference to a Role in iam to populate executionRole. + // +kubebuilder:validation:Optional + ExecutionRoleRef *v1.Reference `json:"executionRoleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate executionRole. + // +kubebuilder:validation:Optional + ExecutionRoleSelector *v1.Selector `json:"executionRoleSelector,omitempty" tf:"-"` + + // The settings for the JupyterLab application. See jupyter_lab_app_settings Block below. + // +kubebuilder:validation:Optional + JupyterLabAppSettings *JupyterLabAppSettingsParameters `json:"jupyterLabAppSettings,omitempty" tf:"jupyter_lab_app_settings,omitempty"` + + // The Jupyter server's app settings. See jupyter_server_app_settings Block below. + // +kubebuilder:validation:Optional + JupyterServerAppSettings *DefaultUserSettingsJupyterServerAppSettingsParameters `json:"jupyterServerAppSettings,omitempty" tf:"jupyter_server_app_settings,omitempty"` + + // The kernel gateway app settings. See kernel_gateway_app_settings Block below. + // +kubebuilder:validation:Optional + KernelGatewayAppSettings *DefaultUserSettingsKernelGatewayAppSettingsParameters `json:"kernelGatewayAppSettings,omitempty" tf:"kernel_gateway_app_settings,omitempty"` + + // The RSession app settings. See r_session_app_settings Block below. + // +kubebuilder:validation:Optional + RSessionAppSettings *RSessionAppSettingsParameters `json:"rSessionAppSettings,omitempty" tf:"r_session_app_settings,omitempty"` + + // A collection of settings that configure user interaction with the RStudioServerPro app. See r_studio_server_pro_app_settings Block below. + // +kubebuilder:validation:Optional + RStudioServerProAppSettings *RStudioServerProAppSettingsParameters `json:"rStudioServerProAppSettings,omitempty" tf:"r_studio_server_pro_app_settings,omitempty"` + + // A list of security group IDs that will be attached to the user. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The sharing settings. See sharing_settings Block below. + // +kubebuilder:validation:Optional + SharingSettings *SharingSettingsParameters `json:"sharingSettings,omitempty" tf:"sharing_settings,omitempty"` + + // The storage settings for a private space. See space_storage_settings Block below. + // +kubebuilder:validation:Optional + SpaceStorageSettings *SpaceStorageSettingsParameters `json:"spaceStorageSettings,omitempty" tf:"space_storage_settings,omitempty"` + + // Whether the user can access Studio. If this value is set to DISABLED, the user cannot access Studio, even if that is the default experience for the domain. Valid values are ENABLED and DISABLED. + // +kubebuilder:validation:Optional + StudioWebPortal *string `json:"studioWebPortal,omitempty" tf:"studio_web_portal,omitempty"` + + // The TensorBoard app settings. See tensor_board_app_settings Block below. + // +kubebuilder:validation:Optional + TensorBoardAppSettings *TensorBoardAppSettingsParameters `json:"tensorBoardAppSettings,omitempty" tf:"tensor_board_app_settings,omitempty"` +} + +type DirectDeploySettingsInitParameters struct { + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type DirectDeploySettingsObservation struct { + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type DirectDeploySettingsParameters struct { + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type DomainInitParameters struct { + + // Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. Valid values are PublicInternetOnly and VpcOnly. + AppNetworkAccessType *string `json:"appNetworkAccessType,omitempty" tf:"app_network_access_type,omitempty"` + + // The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Valid values are Service and Customer. + AppSecurityGroupManagement *string `json:"appSecurityGroupManagement,omitempty" tf:"app_security_group_management,omitempty"` + + // The mode of authentication that members use to access the domain. Valid values are IAM and SSO. + AuthMode *string `json:"authMode,omitempty" tf:"auth_mode,omitempty"` + + // The default space settings. See default_space_settings Block below. + DefaultSpaceSettings *DefaultSpaceSettingsInitParameters `json:"defaultSpaceSettings,omitempty" tf:"default_space_settings,omitempty"` + + // The default user settings. See default_user_settings Block below. + DefaultUserSettings *DefaultUserSettingsInitParameters `json:"defaultUserSettings,omitempty" tf:"default_user_settings,omitempty"` + + // The domain name. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The domain settings. See domain_settings Block below. + DomainSettings *DomainSettingsInitParameters `json:"domainSettings,omitempty" tf:"domain_settings,omitempty"` + + // The AWS KMS customer managed CMK used to encrypt the EFS volume attached to the domain. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // The retention policy for this domain, which specifies whether resources will be retained after the Domain is deleted. By default, all resources are retained. See retention_policy Block below. + RetentionPolicy *RetentionPolicyInitParameters `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The VPC subnets that Studio uses for communication. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ID of the Amazon Virtual Private Cloud (VPC) that Studio uses for communication. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type DomainObservation struct { + + // Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. Valid values are PublicInternetOnly and VpcOnly. + AppNetworkAccessType *string `json:"appNetworkAccessType,omitempty" tf:"app_network_access_type,omitempty"` + + // The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Valid values are Service and Customer. + AppSecurityGroupManagement *string `json:"appSecurityGroupManagement,omitempty" tf:"app_security_group_management,omitempty"` + + // The Amazon Resource Name (ARN) assigned by AWS to this Domain. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The mode of authentication that members use to access the domain. Valid values are IAM and SSO. + AuthMode *string `json:"authMode,omitempty" tf:"auth_mode,omitempty"` + + // The default space settings. See default_space_settings Block below. + DefaultSpaceSettings *DefaultSpaceSettingsObservation `json:"defaultSpaceSettings,omitempty" tf:"default_space_settings,omitempty"` + + // The default user settings. See default_user_settings Block below. + DefaultUserSettings *DefaultUserSettingsObservation `json:"defaultUserSettings,omitempty" tf:"default_user_settings,omitempty"` + + // The domain name. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The domain settings. See domain_settings Block below. + DomainSettings *DomainSettingsObservation `json:"domainSettings,omitempty" tf:"domain_settings,omitempty"` + + // The ID of the Amazon Elastic File System (EFS) managed by this Domain. + HomeEFSFileSystemID *string `json:"homeEfsFileSystemId,omitempty" tf:"home_efs_file_system_id,omitempty"` + + // The ID of the Domain. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The AWS KMS customer managed CMK used to encrypt the EFS volume attached to the domain. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The retention policy for this domain, which specifies whether resources will be retained after the Domain is deleted. By default, all resources are retained. See retention_policy Block below. + RetentionPolicy *RetentionPolicyObservation `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` + + // The ID of the security group that authorizes traffic between the RSessionGateway apps and the RStudioServerPro app. + SecurityGroupIDForDomainBoundary *string `json:"securityGroupIdForDomainBoundary,omitempty" tf:"security_group_id_for_domain_boundary,omitempty"` + + // The ARN of the application managed by SageMaker in IAM Identity Center. This value is only returned for domains created after September 19, 2023. + SingleSignOnApplicationArn *string `json:"singleSignOnApplicationArn,omitempty" tf:"single_sign_on_application_arn,omitempty"` + + // The SSO managed application instance ID. + SingleSignOnManagedApplicationInstanceID *string `json:"singleSignOnManagedApplicationInstanceId,omitempty" tf:"single_sign_on_managed_application_instance_id,omitempty"` + + // The VPC subnets that Studio uses for communication. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The domain's URL. + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // The ID of the Amazon Virtual Private Cloud (VPC) that Studio uses for communication. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type DomainParameters struct { + + // Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. Valid values are PublicInternetOnly and VpcOnly. + // +kubebuilder:validation:Optional + AppNetworkAccessType *string `json:"appNetworkAccessType,omitempty" tf:"app_network_access_type,omitempty"` + + // The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Valid values are Service and Customer. + // +kubebuilder:validation:Optional + AppSecurityGroupManagement *string `json:"appSecurityGroupManagement,omitempty" tf:"app_security_group_management,omitempty"` + + // The mode of authentication that members use to access the domain. Valid values are IAM and SSO. + // +kubebuilder:validation:Optional + AuthMode *string `json:"authMode,omitempty" tf:"auth_mode,omitempty"` + + // The default space settings. See default_space_settings Block below. + // +kubebuilder:validation:Optional + DefaultSpaceSettings *DefaultSpaceSettingsParameters `json:"defaultSpaceSettings,omitempty" tf:"default_space_settings,omitempty"` + + // The default user settings. See default_user_settings Block below. + // +kubebuilder:validation:Optional + DefaultUserSettings *DefaultUserSettingsParameters `json:"defaultUserSettings,omitempty" tf:"default_user_settings,omitempty"` + + // The domain name. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The domain settings. See domain_settings Block below. + // +kubebuilder:validation:Optional + DomainSettings *DomainSettingsParameters `json:"domainSettings,omitempty" tf:"domain_settings,omitempty"` + + // The AWS KMS customer managed CMK used to encrypt the EFS volume attached to the domain. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The retention policy for this domain, which specifies whether resources will be retained after the Domain is deleted. By default, all resources are retained. See retention_policy Block below. + // +kubebuilder:validation:Optional + RetentionPolicy *RetentionPolicyParameters `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The VPC subnets that Studio uses for communication. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ID of the Amazon Virtual Private Cloud (VPC) that Studio uses for communication. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +kubebuilder:validation:Optional + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type DomainSettingsInitParameters struct { + + // The configuration for attaching a SageMaker user profile name to the execution role as a sts:SourceIdentity key AWS Docs. Valid values are USER_PROFILE_NAME and DISABLED. + ExecutionRoleIdentityConfig *string `json:"executionRoleIdentityConfig,omitempty" tf:"execution_role_identity_config,omitempty"` + + // A collection of settings that configure the RStudioServerPro Domain-level app. see r_studio_server_pro_domain_settings Block below. + RStudioServerProDomainSettings *RStudioServerProDomainSettingsInitParameters `json:"rStudioServerProDomainSettings,omitempty" tf:"r_studio_server_pro_domain_settings,omitempty"` + + // The security groups for the Amazon Virtual Private Cloud that the Domain uses for communication between Domain-level apps and user apps. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` +} + +type DomainSettingsObservation struct { + + // The configuration for attaching a SageMaker user profile name to the execution role as a sts:SourceIdentity key AWS Docs. Valid values are USER_PROFILE_NAME and DISABLED. + ExecutionRoleIdentityConfig *string `json:"executionRoleIdentityConfig,omitempty" tf:"execution_role_identity_config,omitempty"` + + // A collection of settings that configure the RStudioServerPro Domain-level app. see r_studio_server_pro_domain_settings Block below. + RStudioServerProDomainSettings *RStudioServerProDomainSettingsObservation `json:"rStudioServerProDomainSettings,omitempty" tf:"r_studio_server_pro_domain_settings,omitempty"` + + // The security groups for the Amazon Virtual Private Cloud that the Domain uses for communication between Domain-level apps and user apps. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` +} + +type DomainSettingsParameters struct { + + // The configuration for attaching a SageMaker user profile name to the execution role as a sts:SourceIdentity key AWS Docs. Valid values are USER_PROFILE_NAME and DISABLED. + // +kubebuilder:validation:Optional + ExecutionRoleIdentityConfig *string `json:"executionRoleIdentityConfig,omitempty" tf:"execution_role_identity_config,omitempty"` + + // A collection of settings that configure the RStudioServerPro Domain-level app. see r_studio_server_pro_domain_settings Block below. + // +kubebuilder:validation:Optional + RStudioServerProDomainSettings *RStudioServerProDomainSettingsParameters `json:"rStudioServerProDomainSettings,omitempty" tf:"r_studio_server_pro_domain_settings,omitempty"` + + // The security groups for the Amazon Virtual Private Cloud that the Domain uses for communication between Domain-level apps and user apps. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` +} + +type EFSFileSystemConfigInitParameters struct { + + // The ID of your Amazon EFS file system. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below. + FileSystemPath *string `json:"fileSystemPath,omitempty" tf:"file_system_path,omitempty"` +} + +type EFSFileSystemConfigObservation struct { + + // The ID of your Amazon EFS file system. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below. + FileSystemPath *string `json:"fileSystemPath,omitempty" tf:"file_system_path,omitempty"` +} + +type EFSFileSystemConfigParameters struct { + + // The ID of your Amazon EFS file system. + // +kubebuilder:validation:Optional + FileSystemID *string `json:"fileSystemId" tf:"file_system_id,omitempty"` + + // The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below. + // +kubebuilder:validation:Optional + FileSystemPath *string `json:"fileSystemPath" tf:"file_system_path,omitempty"` +} + +type IdentityProviderOauthSettingsInitParameters struct { + + // The name of the data source that you're connecting to. Canvas currently supports OAuth for Snowflake and Salesforce Data Cloud. Valid values are SalesforceGenie and Snowflake. + DataSourceName *string `json:"dataSourceName,omitempty" tf:"data_source_name,omitempty"` + + // The ARN of an Amazon Web Services Secrets Manager secret that stores the credentials from your identity provider, such as the client ID and secret, authorization URL, and token URL. + SecretArn *string `json:"secretArn,omitempty" tf:"secret_arn,omitempty"` + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type IdentityProviderOauthSettingsObservation struct { + + // The name of the data source that you're connecting to. Canvas currently supports OAuth for Snowflake and Salesforce Data Cloud. Valid values are SalesforceGenie and Snowflake. + DataSourceName *string `json:"dataSourceName,omitempty" tf:"data_source_name,omitempty"` + + // The ARN of an Amazon Web Services Secrets Manager secret that stores the credentials from your identity provider, such as the client ID and secret, authorization URL, and token URL. + SecretArn *string `json:"secretArn,omitempty" tf:"secret_arn,omitempty"` + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type IdentityProviderOauthSettingsParameters struct { + + // The name of the data source that you're connecting to. Canvas currently supports OAuth for Snowflake and Salesforce Data Cloud. Valid values are SalesforceGenie and Snowflake. + // +kubebuilder:validation:Optional + DataSourceName *string `json:"dataSourceName,omitempty" tf:"data_source_name,omitempty"` + + // The ARN of an Amazon Web Services Secrets Manager secret that stores the credentials from your identity provider, such as the client ID and secret, authorization URL, and token URL. + // +kubebuilder:validation:Optional + SecretArn *string `json:"secretArn" tf:"secret_arn,omitempty"` + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type JupyterLabAppSettingsCodeRepositoryInitParameters struct { + + // The URL of the Git repository. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` +} + +type JupyterLabAppSettingsCodeRepositoryObservation struct { + + // The URL of the Git repository. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` +} + +type JupyterLabAppSettingsCodeRepositoryParameters struct { + + // The URL of the Git repository. + // +kubebuilder:validation:Optional + RepositoryURL *string `json:"repositoryUrl" tf:"repository_url,omitempty"` +} + +type JupyterLabAppSettingsCustomImageInitParameters struct { + + // The name of the App Image Config. + AppImageConfigName *string `json:"appImageConfigName,omitempty" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type JupyterLabAppSettingsCustomImageObservation struct { + + // The name of the App Image Config. + AppImageConfigName *string `json:"appImageConfigName,omitempty" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type JupyterLabAppSettingsCustomImageParameters struct { + + // The name of the App Image Config. + // +kubebuilder:validation:Optional + AppImageConfigName *string `json:"appImageConfigName" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + // +kubebuilder:validation:Optional + ImageName *string `json:"imageName" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + // +kubebuilder:validation:Optional + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type JupyterLabAppSettingsDefaultResourceSpecInitParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type JupyterLabAppSettingsDefaultResourceSpecObservation struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type JupyterLabAppSettingsDefaultResourceSpecParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type JupyterLabAppSettingsInitParameters struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see code_repository Block below. + CodeRepository []JupyterLabAppSettingsCodeRepositoryInitParameters `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see custom_image Block below. + CustomImage []JupyterLabAppSettingsCustomImageInitParameters `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *JupyterLabAppSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type JupyterLabAppSettingsObservation struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see code_repository Block below. + CodeRepository []JupyterLabAppSettingsCodeRepositoryObservation `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see custom_image Block below. + CustomImage []JupyterLabAppSettingsCustomImageObservation `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *JupyterLabAppSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type JupyterLabAppSettingsParameters struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see code_repository Block below. + // +kubebuilder:validation:Optional + CodeRepository []JupyterLabAppSettingsCodeRepositoryParameters `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see custom_image Block below. + // +kubebuilder:validation:Optional + CustomImage []JupyterLabAppSettingsCustomImageParameters `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *JupyterLabAppSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +kubebuilder:validation:Optional + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type JupyterServerAppSettingsCodeRepositoryInitParameters struct { + + // The URL of the Git repository. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` +} + +type JupyterServerAppSettingsCodeRepositoryObservation struct { + + // The URL of the Git repository. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` +} + +type JupyterServerAppSettingsCodeRepositoryParameters struct { + + // The URL of the Git repository. + // +kubebuilder:validation:Optional + RepositoryURL *string `json:"repositoryUrl" tf:"repository_url,omitempty"` +} + +type JupyterServerAppSettingsDefaultResourceSpecInitParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type JupyterServerAppSettingsDefaultResourceSpecObservation struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type JupyterServerAppSettingsDefaultResourceSpecParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type JupyterServerAppSettingsInitParameters struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see code_repository Block below. + CodeRepository []JupyterServerAppSettingsCodeRepositoryInitParameters `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *DefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type JupyterServerAppSettingsObservation struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see code_repository Block below. + CodeRepository []JupyterServerAppSettingsCodeRepositoryObservation `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *DefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type JupyterServerAppSettingsParameters struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see code_repository Block below. + // +kubebuilder:validation:Optional + CodeRepository []JupyterServerAppSettingsCodeRepositoryParameters `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *DefaultResourceSpecParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +kubebuilder:validation:Optional + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type KendraSettingsInitParameters struct { + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type KendraSettingsObservation struct { + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type KendraSettingsParameters struct { + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type KernelGatewayAppSettingsCustomImageInitParameters struct { + + // The name of the App Image Config. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.AppImageConfig + AppImageConfigName *string `json:"appImageConfigName,omitempty" tf:"app_image_config_name,omitempty"` + + // Reference to a AppImageConfig in sagemaker to populate appImageConfigName. + // +kubebuilder:validation:Optional + AppImageConfigNameRef *v1.Reference `json:"appImageConfigNameRef,omitempty" tf:"-"` + + // Selector for a AppImageConfig in sagemaker to populate appImageConfigName. + // +kubebuilder:validation:Optional + AppImageConfigNameSelector *v1.Selector `json:"appImageConfigNameSelector,omitempty" tf:"-"` + + // The name of the Custom Image. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta1.ImageVersion + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("image_name",false) + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // Reference to a ImageVersion in sagemaker to populate imageName. + // +kubebuilder:validation:Optional + ImageNameRef *v1.Reference `json:"imageNameRef,omitempty" tf:"-"` + + // Selector for a ImageVersion in sagemaker to populate imageName. + // +kubebuilder:validation:Optional + ImageNameSelector *v1.Selector `json:"imageNameSelector,omitempty" tf:"-"` + + // The version number of the Custom Image. + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type KernelGatewayAppSettingsCustomImageObservation struct { + + // The name of the App Image Config. + AppImageConfigName *string `json:"appImageConfigName,omitempty" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type KernelGatewayAppSettingsCustomImageParameters struct { + + // The name of the App Image Config. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.AppImageConfig + // +kubebuilder:validation:Optional + AppImageConfigName *string `json:"appImageConfigName,omitempty" tf:"app_image_config_name,omitempty"` + + // Reference to a AppImageConfig in sagemaker to populate appImageConfigName. + // +kubebuilder:validation:Optional + AppImageConfigNameRef *v1.Reference `json:"appImageConfigNameRef,omitempty" tf:"-"` + + // Selector for a AppImageConfig in sagemaker to populate appImageConfigName. + // +kubebuilder:validation:Optional + AppImageConfigNameSelector *v1.Selector `json:"appImageConfigNameSelector,omitempty" tf:"-"` + + // The name of the Custom Image. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta1.ImageVersion + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("image_name",false) + // +kubebuilder:validation:Optional + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // Reference to a ImageVersion in sagemaker to populate imageName. + // +kubebuilder:validation:Optional + ImageNameRef *v1.Reference `json:"imageNameRef,omitempty" tf:"-"` + + // Selector for a ImageVersion in sagemaker to populate imageName. + // +kubebuilder:validation:Optional + ImageNameSelector *v1.Selector `json:"imageNameSelector,omitempty" tf:"-"` + + // The version number of the Custom Image. + // +kubebuilder:validation:Optional + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type KernelGatewayAppSettingsDefaultResourceSpecInitParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type KernelGatewayAppSettingsDefaultResourceSpecObservation struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type KernelGatewayAppSettingsDefaultResourceSpecParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type KernelGatewayAppSettingsInitParameters struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see custom_image Block below. + CustomImage []CustomImageInitParameters `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *KernelGatewayAppSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type KernelGatewayAppSettingsObservation struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see custom_image Block below. + CustomImage []CustomImageObservation `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *KernelGatewayAppSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type KernelGatewayAppSettingsParameters struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see custom_image Block below. + // +kubebuilder:validation:Optional + CustomImage []CustomImageParameters `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *KernelGatewayAppSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +kubebuilder:validation:Optional + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type ModelRegisterSettingsInitParameters struct { + + // The Amazon Resource Name (ARN) of the SageMaker model registry account. Required only to register model versions created by a different SageMaker Canvas AWS account than the AWS account in which SageMaker model registry is set up. + CrossAccountModelRegisterRoleArn *string `json:"crossAccountModelRegisterRoleArn,omitempty" tf:"cross_account_model_register_role_arn,omitempty"` + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ModelRegisterSettingsObservation struct { + + // The Amazon Resource Name (ARN) of the SageMaker model registry account. Required only to register model versions created by a different SageMaker Canvas AWS account than the AWS account in which SageMaker model registry is set up. + CrossAccountModelRegisterRoleArn *string `json:"crossAccountModelRegisterRoleArn,omitempty" tf:"cross_account_model_register_role_arn,omitempty"` + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type ModelRegisterSettingsParameters struct { + + // The Amazon Resource Name (ARN) of the SageMaker model registry account. Required only to register model versions created by a different SageMaker Canvas AWS account than the AWS account in which SageMaker model registry is set up. + // +kubebuilder:validation:Optional + CrossAccountModelRegisterRoleArn *string `json:"crossAccountModelRegisterRoleArn,omitempty" tf:"cross_account_model_register_role_arn,omitempty"` + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type RSessionAppSettingsCustomImageInitParameters struct { + + // The name of the App Image Config. + AppImageConfigName *string `json:"appImageConfigName,omitempty" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type RSessionAppSettingsCustomImageObservation struct { + + // The name of the App Image Config. + AppImageConfigName *string `json:"appImageConfigName,omitempty" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type RSessionAppSettingsCustomImageParameters struct { + + // The name of the App Image Config. + // +kubebuilder:validation:Optional + AppImageConfigName *string `json:"appImageConfigName" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + // +kubebuilder:validation:Optional + ImageName *string `json:"imageName" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + // +kubebuilder:validation:Optional + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type RSessionAppSettingsDefaultResourceSpecInitParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type RSessionAppSettingsDefaultResourceSpecObservation struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type RSessionAppSettingsDefaultResourceSpecParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type RSessionAppSettingsInitParameters struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see custom_image Block below. + CustomImage []RSessionAppSettingsCustomImageInitParameters `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *RSessionAppSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` +} + +type RSessionAppSettingsObservation struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see custom_image Block below. + CustomImage []RSessionAppSettingsCustomImageObservation `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *RSessionAppSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` +} + +type RSessionAppSettingsParameters struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see custom_image Block below. + // +kubebuilder:validation:Optional + CustomImage []RSessionAppSettingsCustomImageParameters `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *RSessionAppSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` +} + +type RStudioServerProAppSettingsInitParameters struct { + + // Indicates whether the current user has access to the RStudioServerPro app. Valid values are ENABLED and DISABLED. + AccessStatus *string `json:"accessStatus,omitempty" tf:"access_status,omitempty"` + + // The level of permissions that the user has within the RStudioServerPro app. This value defaults to R_STUDIO_USER. The R_STUDIO_ADMIN value allows the user access to the RStudio Administrative Dashboard. Valid values are R_STUDIO_USER and R_STUDIO_ADMIN. + UserGroup *string `json:"userGroup,omitempty" tf:"user_group,omitempty"` +} + +type RStudioServerProAppSettingsObservation struct { + + // Indicates whether the current user has access to the RStudioServerPro app. Valid values are ENABLED and DISABLED. + AccessStatus *string `json:"accessStatus,omitempty" tf:"access_status,omitempty"` + + // The level of permissions that the user has within the RStudioServerPro app. This value defaults to R_STUDIO_USER. The R_STUDIO_ADMIN value allows the user access to the RStudio Administrative Dashboard. Valid values are R_STUDIO_USER and R_STUDIO_ADMIN. + UserGroup *string `json:"userGroup,omitempty" tf:"user_group,omitempty"` +} + +type RStudioServerProAppSettingsParameters struct { + + // Indicates whether the current user has access to the RStudioServerPro app. Valid values are ENABLED and DISABLED. + // +kubebuilder:validation:Optional + AccessStatus *string `json:"accessStatus,omitempty" tf:"access_status,omitempty"` + + // The level of permissions that the user has within the RStudioServerPro app. This value defaults to R_STUDIO_USER. The R_STUDIO_ADMIN value allows the user access to the RStudio Administrative Dashboard. Valid values are R_STUDIO_USER and R_STUDIO_ADMIN. + // +kubebuilder:validation:Optional + UserGroup *string `json:"userGroup,omitempty" tf:"user_group,omitempty"` +} + +type RStudioServerProDomainSettingsDefaultResourceSpecInitParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type RStudioServerProDomainSettingsDefaultResourceSpecObservation struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type RStudioServerProDomainSettingsDefaultResourceSpecParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type RStudioServerProDomainSettingsInitParameters struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *RStudioServerProDomainSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The ARN of the execution role for the RStudioServerPro Domain-level app. + DomainExecutionRoleArn *string `json:"domainExecutionRoleArn,omitempty" tf:"domain_execution_role_arn,omitempty"` + + // A URL pointing to an RStudio Connect server. + RStudioConnectURL *string `json:"rStudioConnectUrl,omitempty" tf:"r_studio_connect_url,omitempty"` + + // A URL pointing to an RStudio Package Manager server. + RStudioPackageManagerURL *string `json:"rStudioPackageManagerUrl,omitempty" tf:"r_studio_package_manager_url,omitempty"` +} + +type RStudioServerProDomainSettingsObservation struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *RStudioServerProDomainSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The ARN of the execution role for the RStudioServerPro Domain-level app. + DomainExecutionRoleArn *string `json:"domainExecutionRoleArn,omitempty" tf:"domain_execution_role_arn,omitempty"` + + // A URL pointing to an RStudio Connect server. + RStudioConnectURL *string `json:"rStudioConnectUrl,omitempty" tf:"r_studio_connect_url,omitempty"` + + // A URL pointing to an RStudio Package Manager server. + RStudioPackageManagerURL *string `json:"rStudioPackageManagerUrl,omitempty" tf:"r_studio_package_manager_url,omitempty"` +} + +type RStudioServerProDomainSettingsParameters struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *RStudioServerProDomainSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The ARN of the execution role for the RStudioServerPro Domain-level app. + // +kubebuilder:validation:Optional + DomainExecutionRoleArn *string `json:"domainExecutionRoleArn" tf:"domain_execution_role_arn,omitempty"` + + // A URL pointing to an RStudio Connect server. + // +kubebuilder:validation:Optional + RStudioConnectURL *string `json:"rStudioConnectUrl,omitempty" tf:"r_studio_connect_url,omitempty"` + + // A URL pointing to an RStudio Package Manager server. + // +kubebuilder:validation:Optional + RStudioPackageManagerURL *string `json:"rStudioPackageManagerUrl,omitempty" tf:"r_studio_package_manager_url,omitempty"` +} + +type RetentionPolicyInitParameters struct { + + // The retention policy for data stored on an Amazon Elastic File System (EFS) volume. Valid values are Retain or Delete. Default value is Retain. + HomeEFSFileSystem *string `json:"homeEfsFileSystem,omitempty" tf:"home_efs_file_system,omitempty"` +} + +type RetentionPolicyObservation struct { + + // The retention policy for data stored on an Amazon Elastic File System (EFS) volume. Valid values are Retain or Delete. Default value is Retain. + HomeEFSFileSystem *string `json:"homeEfsFileSystem,omitempty" tf:"home_efs_file_system,omitempty"` +} + +type RetentionPolicyParameters struct { + + // The retention policy for data stored on an Amazon Elastic File System (EFS) volume. Valid values are Retain or Delete. Default value is Retain. + // +kubebuilder:validation:Optional + HomeEFSFileSystem *string `json:"homeEfsFileSystem,omitempty" tf:"home_efs_file_system,omitempty"` +} + +type SharingSettingsInitParameters struct { + + // Whether to include the notebook cell output when sharing the notebook. The default is Disabled. Valid values are Allowed and Disabled. + NotebookOutputOption *string `json:"notebookOutputOption,omitempty" tf:"notebook_output_option,omitempty"` + + // The Amazon Web Services Key Management Service (KMS) encryption key ID that is used to encrypt artifacts generated by Canvas in the Amazon S3 bucket. + S3KMSKeyID *string `json:"s3KmsKeyId,omitempty" tf:"s3_kms_key_id,omitempty"` + + // When notebook_output_option is Allowed, the Amazon S3 bucket used to save the notebook cell output. + S3OutputPath *string `json:"s3OutputPath,omitempty" tf:"s3_output_path,omitempty"` +} + +type SharingSettingsObservation struct { + + // Whether to include the notebook cell output when sharing the notebook. The default is Disabled. Valid values are Allowed and Disabled. + NotebookOutputOption *string `json:"notebookOutputOption,omitempty" tf:"notebook_output_option,omitempty"` + + // The Amazon Web Services Key Management Service (KMS) encryption key ID that is used to encrypt artifacts generated by Canvas in the Amazon S3 bucket. + S3KMSKeyID *string `json:"s3KmsKeyId,omitempty" tf:"s3_kms_key_id,omitempty"` + + // When notebook_output_option is Allowed, the Amazon S3 bucket used to save the notebook cell output. + S3OutputPath *string `json:"s3OutputPath,omitempty" tf:"s3_output_path,omitempty"` +} + +type SharingSettingsParameters struct { + + // Whether to include the notebook cell output when sharing the notebook. The default is Disabled. Valid values are Allowed and Disabled. + // +kubebuilder:validation:Optional + NotebookOutputOption *string `json:"notebookOutputOption,omitempty" tf:"notebook_output_option,omitempty"` + + // The Amazon Web Services Key Management Service (KMS) encryption key ID that is used to encrypt artifacts generated by Canvas in the Amazon S3 bucket. + // +kubebuilder:validation:Optional + S3KMSKeyID *string `json:"s3KmsKeyId,omitempty" tf:"s3_kms_key_id,omitempty"` + + // When notebook_output_option is Allowed, the Amazon S3 bucket used to save the notebook cell output. + // +kubebuilder:validation:Optional + S3OutputPath *string `json:"s3OutputPath,omitempty" tf:"s3_output_path,omitempty"` +} + +type SpaceStorageSettingsInitParameters struct { + + // The default EBS storage settings for a private space. See default_ebs_storage_settings Block below. + DefaultEBSStorageSettings *DefaultEBSStorageSettingsInitParameters `json:"defaultEbsStorageSettings,omitempty" tf:"default_ebs_storage_settings,omitempty"` +} + +type SpaceStorageSettingsObservation struct { + + // The default EBS storage settings for a private space. See default_ebs_storage_settings Block below. + DefaultEBSStorageSettings *DefaultEBSStorageSettingsObservation `json:"defaultEbsStorageSettings,omitempty" tf:"default_ebs_storage_settings,omitempty"` +} + +type SpaceStorageSettingsParameters struct { + + // The default EBS storage settings for a private space. See default_ebs_storage_settings Block below. + // +kubebuilder:validation:Optional + DefaultEBSStorageSettings *DefaultEBSStorageSettingsParameters `json:"defaultEbsStorageSettings,omitempty" tf:"default_ebs_storage_settings,omitempty"` +} + +type TensorBoardAppSettingsDefaultResourceSpecInitParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type TensorBoardAppSettingsDefaultResourceSpecObservation struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type TensorBoardAppSettingsDefaultResourceSpecParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type TensorBoardAppSettingsInitParameters struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *TensorBoardAppSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` +} + +type TensorBoardAppSettingsObservation struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + DefaultResourceSpec *TensorBoardAppSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` +} + +type TensorBoardAppSettingsParameters struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see default_resource_spec Block below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *TensorBoardAppSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` +} + +type TimeSeriesForecastingSettingsInitParameters struct { + + // The IAM role that Canvas passes to Amazon Forecast for time series forecasting. By default, Canvas uses the execution role specified in the UserProfile that launches the Canvas app. If an execution role is not specified in the UserProfile, Canvas uses the execution role specified in the Domain that owns the UserProfile. To allow time series forecasting, this IAM role should have the AmazonSageMakerCanvasForecastAccess policy attached and forecast.amazonaws.com added in the trust relationship as a service principal. + AmazonForecastRoleArn *string `json:"amazonForecastRoleArn,omitempty" tf:"amazon_forecast_role_arn,omitempty"` + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type TimeSeriesForecastingSettingsObservation struct { + + // The IAM role that Canvas passes to Amazon Forecast for time series forecasting. By default, Canvas uses the execution role specified in the UserProfile that launches the Canvas app. If an execution role is not specified in the UserProfile, Canvas uses the execution role specified in the Domain that owns the UserProfile. To allow time series forecasting, this IAM role should have the AmazonSageMakerCanvasForecastAccess policy attached and forecast.amazonaws.com added in the trust relationship as a service principal. + AmazonForecastRoleArn *string `json:"amazonForecastRoleArn,omitempty" tf:"amazon_forecast_role_arn,omitempty"` + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type TimeSeriesForecastingSettingsParameters struct { + + // The IAM role that Canvas passes to Amazon Forecast for time series forecasting. By default, Canvas uses the execution role specified in the UserProfile that launches the Canvas app. If an execution role is not specified in the UserProfile, Canvas uses the execution role specified in the Domain that owns the UserProfile. To allow time series forecasting, this IAM role should have the AmazonSageMakerCanvasForecastAccess policy attached and forecast.amazonaws.com added in the trust relationship as a service principal. + // +kubebuilder:validation:Optional + AmazonForecastRoleArn *string `json:"amazonForecastRoleArn,omitempty" tf:"amazon_forecast_role_arn,omitempty"` + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type WorkspaceSettingsInitParameters struct { + + // The Amazon S3 bucket used to store artifacts generated by Canvas. Updating the Amazon S3 location impacts existing configuration settings, and Canvas users no longer have access to their artifacts. Canvas users must log out and log back in to apply the new location. + S3ArtifactPath *string `json:"s3ArtifactPath,omitempty" tf:"s3_artifact_path,omitempty"` + + // The Amazon Web Services Key Management Service (KMS) encryption key ID that is used to encrypt artifacts generated by Canvas in the Amazon S3 bucket. + S3KMSKeyID *string `json:"s3KmsKeyId,omitempty" tf:"s3_kms_key_id,omitempty"` +} + +type WorkspaceSettingsObservation struct { + + // The Amazon S3 bucket used to store artifacts generated by Canvas. Updating the Amazon S3 location impacts existing configuration settings, and Canvas users no longer have access to their artifacts. Canvas users must log out and log back in to apply the new location. + S3ArtifactPath *string `json:"s3ArtifactPath,omitempty" tf:"s3_artifact_path,omitempty"` + + // The Amazon Web Services Key Management Service (KMS) encryption key ID that is used to encrypt artifacts generated by Canvas in the Amazon S3 bucket. + S3KMSKeyID *string `json:"s3KmsKeyId,omitempty" tf:"s3_kms_key_id,omitempty"` +} + +type WorkspaceSettingsParameters struct { + + // The Amazon S3 bucket used to store artifacts generated by Canvas. Updating the Amazon S3 location impacts existing configuration settings, and Canvas users no longer have access to their artifacts. Canvas users must log out and log back in to apply the new location. + // +kubebuilder:validation:Optional + S3ArtifactPath *string `json:"s3ArtifactPath,omitempty" tf:"s3_artifact_path,omitempty"` + + // The Amazon Web Services Key Management Service (KMS) encryption key ID that is used to encrypt artifacts generated by Canvas in the Amazon S3 bucket. + // +kubebuilder:validation:Optional + S3KMSKeyID *string `json:"s3KmsKeyId,omitempty" tf:"s3_kms_key_id,omitempty"` +} + +// DomainSpec defines the desired state of Domain +type DomainSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DomainParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DomainInitParameters `json:"initProvider,omitempty"` +} + +// DomainStatus defines the observed state of Domain. +type DomainStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DomainObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Domain is the Schema for the Domains API. Provides a SageMaker Domain resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Domain struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.authMode) || (has(self.initProvider) && has(self.initProvider.authMode))",message="spec.forProvider.authMode is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.defaultUserSettings) || (has(self.initProvider) && has(self.initProvider.defaultUserSettings))",message="spec.forProvider.defaultUserSettings is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.domainName) || (has(self.initProvider) && has(self.initProvider.domainName))",message="spec.forProvider.domainName is a required parameter" + Spec DomainSpec `json:"spec"` + Status DomainStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DomainList contains a list of Domains +type DomainList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Domain `json:"items"` +} + +// Repository type metadata. +var ( + Domain_Kind = "Domain" + Domain_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Domain_Kind}.String() + Domain_KindAPIVersion = Domain_Kind + "." + CRDGroupVersion.String() + Domain_GroupVersionKind = CRDGroupVersion.WithKind(Domain_Kind) +) + +func init() { + SchemeBuilder.Register(&Domain{}, &DomainList{}) +} diff --git a/apis/sagemaker/v1beta2/zz_endpoint_terraformed.go b/apis/sagemaker/v1beta2/zz_endpoint_terraformed.go new file mode 100755 index 0000000000..684451521b --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_endpoint_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Endpoint +func (mg *Endpoint) GetTerraformResourceType() string { + return "aws_sagemaker_endpoint" +} + +// GetConnectionDetailsMapping for this Endpoint +func (tr *Endpoint) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Endpoint +func (tr *Endpoint) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Endpoint +func (tr *Endpoint) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Endpoint +func (tr *Endpoint) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Endpoint +func (tr *Endpoint) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Endpoint +func (tr *Endpoint) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Endpoint +func (tr *Endpoint) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Endpoint +func (tr *Endpoint) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Endpoint using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Endpoint) LateInitialize(attrs []byte) (bool, error) { + params := &EndpointParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Endpoint) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sagemaker/v1beta2/zz_endpoint_types.go b/apis/sagemaker/v1beta2/zz_endpoint_types.go new file mode 100755 index 0000000000..91edce1a17 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_endpoint_types.go @@ -0,0 +1,478 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AlarmsInitParameters struct { + + // The name of a CloudWatch alarm in your account. + AlarmName *string `json:"alarmName,omitempty" tf:"alarm_name,omitempty"` +} + +type AlarmsObservation struct { + + // The name of a CloudWatch alarm in your account. + AlarmName *string `json:"alarmName,omitempty" tf:"alarm_name,omitempty"` +} + +type AlarmsParameters struct { + + // The name of a CloudWatch alarm in your account. + // +kubebuilder:validation:Optional + AlarmName *string `json:"alarmName" tf:"alarm_name,omitempty"` +} + +type AutoRollbackConfigurationInitParameters struct { + + // List of CloudWatch alarms in your account that are configured to monitor metrics on an endpoint. If any alarms are tripped during a deployment, SageMaker rolls back the deployment. See Alarms. + Alarms []AlarmsInitParameters `json:"alarms,omitempty" tf:"alarms,omitempty"` +} + +type AutoRollbackConfigurationObservation struct { + + // List of CloudWatch alarms in your account that are configured to monitor metrics on an endpoint. If any alarms are tripped during a deployment, SageMaker rolls back the deployment. See Alarms. + Alarms []AlarmsObservation `json:"alarms,omitempty" tf:"alarms,omitempty"` +} + +type AutoRollbackConfigurationParameters struct { + + // List of CloudWatch alarms in your account that are configured to monitor metrics on an endpoint. If any alarms are tripped during a deployment, SageMaker rolls back the deployment. See Alarms. + // +kubebuilder:validation:Optional + Alarms []AlarmsParameters `json:"alarms,omitempty" tf:"alarms,omitempty"` +} + +type BlueGreenUpdatePolicyInitParameters struct { + + // Maximum execution timeout for the deployment. Note that the timeout value should be larger than the total waiting time specified in termination_wait_in_seconds and wait_interval_in_seconds. Valid values are between 600 and 14400. + MaximumExecutionTimeoutInSeconds *float64 `json:"maximumExecutionTimeoutInSeconds,omitempty" tf:"maximum_execution_timeout_in_seconds,omitempty"` + + // Additional waiting time in seconds after the completion of an endpoint deployment before terminating the old endpoint fleet. Default is 0. Valid values are between 0 and 3600. + TerminationWaitInSeconds *float64 `json:"terminationWaitInSeconds,omitempty" tf:"termination_wait_in_seconds,omitempty"` + + // Defines the traffic routing strategy to shift traffic from the old fleet to the new fleet during an endpoint deployment. See Traffic Routing Configuration. + TrafficRoutingConfiguration *TrafficRoutingConfigurationInitParameters `json:"trafficRoutingConfiguration,omitempty" tf:"traffic_routing_configuration,omitempty"` +} + +type BlueGreenUpdatePolicyObservation struct { + + // Maximum execution timeout for the deployment. Note that the timeout value should be larger than the total waiting time specified in termination_wait_in_seconds and wait_interval_in_seconds. Valid values are between 600 and 14400. + MaximumExecutionTimeoutInSeconds *float64 `json:"maximumExecutionTimeoutInSeconds,omitempty" tf:"maximum_execution_timeout_in_seconds,omitempty"` + + // Additional waiting time in seconds after the completion of an endpoint deployment before terminating the old endpoint fleet. Default is 0. Valid values are between 0 and 3600. + TerminationWaitInSeconds *float64 `json:"terminationWaitInSeconds,omitempty" tf:"termination_wait_in_seconds,omitempty"` + + // Defines the traffic routing strategy to shift traffic from the old fleet to the new fleet during an endpoint deployment. See Traffic Routing Configuration. + TrafficRoutingConfiguration *TrafficRoutingConfigurationObservation `json:"trafficRoutingConfiguration,omitempty" tf:"traffic_routing_configuration,omitempty"` +} + +type BlueGreenUpdatePolicyParameters struct { + + // Maximum execution timeout for the deployment. Note that the timeout value should be larger than the total waiting time specified in termination_wait_in_seconds and wait_interval_in_seconds. Valid values are between 600 and 14400. + // +kubebuilder:validation:Optional + MaximumExecutionTimeoutInSeconds *float64 `json:"maximumExecutionTimeoutInSeconds,omitempty" tf:"maximum_execution_timeout_in_seconds,omitempty"` + + // Additional waiting time in seconds after the completion of an endpoint deployment before terminating the old endpoint fleet. Default is 0. Valid values are between 0 and 3600. + // +kubebuilder:validation:Optional + TerminationWaitInSeconds *float64 `json:"terminationWaitInSeconds,omitempty" tf:"termination_wait_in_seconds,omitempty"` + + // Defines the traffic routing strategy to shift traffic from the old fleet to the new fleet during an endpoint deployment. See Traffic Routing Configuration. + // +kubebuilder:validation:Optional + TrafficRoutingConfiguration *TrafficRoutingConfigurationParameters `json:"trafficRoutingConfiguration" tf:"traffic_routing_configuration,omitempty"` +} + +type CanarySizeInitParameters struct { + + // Traffic routing strategy type. Valid values are: ALL_AT_ONCE, CANARY, and LINEAR. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Defines the capacity size, either as a number of instances or a capacity percentage. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type CanarySizeObservation struct { + + // Traffic routing strategy type. Valid values are: ALL_AT_ONCE, CANARY, and LINEAR. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Defines the capacity size, either as a number of instances or a capacity percentage. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type CanarySizeParameters struct { + + // Traffic routing strategy type. Valid values are: ALL_AT_ONCE, CANARY, and LINEAR. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // Defines the capacity size, either as a number of instances or a capacity percentage. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type DeploymentConfigInitParameters struct { + + // Automatic rollback configuration for handling endpoint deployment failures and recovery. See Auto Rollback Configuration. + AutoRollbackConfiguration *AutoRollbackConfigurationInitParameters `json:"autoRollbackConfiguration,omitempty" tf:"auto_rollback_configuration,omitempty"` + + // Update policy for a blue/green deployment. If this update policy is specified, SageMaker creates a new fleet during the deployment while maintaining the old fleet. SageMaker flips traffic to the new fleet according to the specified traffic routing configuration. Only one update policy should be used in the deployment configuration. If no update policy is specified, SageMaker uses a blue/green deployment strategy with all at once traffic shifting by default. See Blue Green Update Config. + BlueGreenUpdatePolicy *BlueGreenUpdatePolicyInitParameters `json:"blueGreenUpdatePolicy,omitempty" tf:"blue_green_update_policy,omitempty"` + + // Specifies a rolling deployment strategy for updating a SageMaker endpoint. See Rolling Update Policy. + RollingUpdatePolicy *RollingUpdatePolicyInitParameters `json:"rollingUpdatePolicy,omitempty" tf:"rolling_update_policy,omitempty"` +} + +type DeploymentConfigObservation struct { + + // Automatic rollback configuration for handling endpoint deployment failures and recovery. See Auto Rollback Configuration. + AutoRollbackConfiguration *AutoRollbackConfigurationObservation `json:"autoRollbackConfiguration,omitempty" tf:"auto_rollback_configuration,omitempty"` + + // Update policy for a blue/green deployment. If this update policy is specified, SageMaker creates a new fleet during the deployment while maintaining the old fleet. SageMaker flips traffic to the new fleet according to the specified traffic routing configuration. Only one update policy should be used in the deployment configuration. If no update policy is specified, SageMaker uses a blue/green deployment strategy with all at once traffic shifting by default. See Blue Green Update Config. + BlueGreenUpdatePolicy *BlueGreenUpdatePolicyObservation `json:"blueGreenUpdatePolicy,omitempty" tf:"blue_green_update_policy,omitempty"` + + // Specifies a rolling deployment strategy for updating a SageMaker endpoint. See Rolling Update Policy. + RollingUpdatePolicy *RollingUpdatePolicyObservation `json:"rollingUpdatePolicy,omitempty" tf:"rolling_update_policy,omitempty"` +} + +type DeploymentConfigParameters struct { + + // Automatic rollback configuration for handling endpoint deployment failures and recovery. See Auto Rollback Configuration. + // +kubebuilder:validation:Optional + AutoRollbackConfiguration *AutoRollbackConfigurationParameters `json:"autoRollbackConfiguration,omitempty" tf:"auto_rollback_configuration,omitempty"` + + // Update policy for a blue/green deployment. If this update policy is specified, SageMaker creates a new fleet during the deployment while maintaining the old fleet. SageMaker flips traffic to the new fleet according to the specified traffic routing configuration. Only one update policy should be used in the deployment configuration. If no update policy is specified, SageMaker uses a blue/green deployment strategy with all at once traffic shifting by default. See Blue Green Update Config. + // +kubebuilder:validation:Optional + BlueGreenUpdatePolicy *BlueGreenUpdatePolicyParameters `json:"blueGreenUpdatePolicy,omitempty" tf:"blue_green_update_policy,omitempty"` + + // Specifies a rolling deployment strategy for updating a SageMaker endpoint. See Rolling Update Policy. + // +kubebuilder:validation:Optional + RollingUpdatePolicy *RollingUpdatePolicyParameters `json:"rollingUpdatePolicy,omitempty" tf:"rolling_update_policy,omitempty"` +} + +type EndpointInitParameters struct { + + // The deployment configuration for an endpoint, which contains the desired deployment strategy and rollback configurations. See Deployment Config. + DeploymentConfig *DeploymentConfigInitParameters `json:"deploymentConfig,omitempty" tf:"deployment_config,omitempty"` + + // The name of the endpoint configuration to use. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.EndpointConfiguration + EndpointConfigName *string `json:"endpointConfigName,omitempty" tf:"endpoint_config_name,omitempty"` + + // Reference to a EndpointConfiguration in sagemaker to populate endpointConfigName. + // +kubebuilder:validation:Optional + EndpointConfigNameRef *v1.Reference `json:"endpointConfigNameRef,omitempty" tf:"-"` + + // Selector for a EndpointConfiguration in sagemaker to populate endpointConfigName. + // +kubebuilder:validation:Optional + EndpointConfigNameSelector *v1.Selector `json:"endpointConfigNameSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type EndpointObservation struct { + + // The Amazon Resource Name (ARN) assigned by AWS to this endpoint. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The deployment configuration for an endpoint, which contains the desired deployment strategy and rollback configurations. See Deployment Config. + DeploymentConfig *DeploymentConfigObservation `json:"deploymentConfig,omitempty" tf:"deployment_config,omitempty"` + + // The name of the endpoint configuration to use. + EndpointConfigName *string `json:"endpointConfigName,omitempty" tf:"endpoint_config_name,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type EndpointParameters struct { + + // The deployment configuration for an endpoint, which contains the desired deployment strategy and rollback configurations. See Deployment Config. + // +kubebuilder:validation:Optional + DeploymentConfig *DeploymentConfigParameters `json:"deploymentConfig,omitempty" tf:"deployment_config,omitempty"` + + // The name of the endpoint configuration to use. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.EndpointConfiguration + // +kubebuilder:validation:Optional + EndpointConfigName *string `json:"endpointConfigName,omitempty" tf:"endpoint_config_name,omitempty"` + + // Reference to a EndpointConfiguration in sagemaker to populate endpointConfigName. + // +kubebuilder:validation:Optional + EndpointConfigNameRef *v1.Reference `json:"endpointConfigNameRef,omitempty" tf:"-"` + + // Selector for a EndpointConfiguration in sagemaker to populate endpointConfigName. + // +kubebuilder:validation:Optional + EndpointConfigNameSelector *v1.Selector `json:"endpointConfigNameSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type LinearStepSizeInitParameters struct { + + // Traffic routing strategy type. Valid values are: ALL_AT_ONCE, CANARY, and LINEAR. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Defines the capacity size, either as a number of instances or a capacity percentage. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type LinearStepSizeObservation struct { + + // Traffic routing strategy type. Valid values are: ALL_AT_ONCE, CANARY, and LINEAR. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Defines the capacity size, either as a number of instances or a capacity percentage. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type LinearStepSizeParameters struct { + + // Traffic routing strategy type. Valid values are: ALL_AT_ONCE, CANARY, and LINEAR. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // Defines the capacity size, either as a number of instances or a capacity percentage. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type MaximumBatchSizeInitParameters struct { + + // Traffic routing strategy type. Valid values are: ALL_AT_ONCE, CANARY, and LINEAR. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Defines the capacity size, either as a number of instances or a capacity percentage. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type MaximumBatchSizeObservation struct { + + // Traffic routing strategy type. Valid values are: ALL_AT_ONCE, CANARY, and LINEAR. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Defines the capacity size, either as a number of instances or a capacity percentage. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type MaximumBatchSizeParameters struct { + + // Traffic routing strategy type. Valid values are: ALL_AT_ONCE, CANARY, and LINEAR. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // Defines the capacity size, either as a number of instances or a capacity percentage. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type RollbackMaximumBatchSizeInitParameters struct { + + // Traffic routing strategy type. Valid values are: ALL_AT_ONCE, CANARY, and LINEAR. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Defines the capacity size, either as a number of instances or a capacity percentage. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type RollbackMaximumBatchSizeObservation struct { + + // Traffic routing strategy type. Valid values are: ALL_AT_ONCE, CANARY, and LINEAR. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Defines the capacity size, either as a number of instances or a capacity percentage. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type RollbackMaximumBatchSizeParameters struct { + + // Traffic routing strategy type. Valid values are: ALL_AT_ONCE, CANARY, and LINEAR. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // Defines the capacity size, either as a number of instances or a capacity percentage. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type RollingUpdatePolicyInitParameters struct { + + // Batch size for each rolling step to provision capacity and turn on traffic on the new endpoint fleet, and terminate capacity on the old endpoint fleet. Value must be between 5% to 50% of the variant's total instance count. See Maximum Batch Size. + MaximumBatchSize *MaximumBatchSizeInitParameters `json:"maximumBatchSize,omitempty" tf:"maximum_batch_size,omitempty"` + + // Maximum execution timeout for the deployment. Note that the timeout value should be larger than the total waiting time specified in termination_wait_in_seconds and wait_interval_in_seconds. Valid values are between 600 and 14400. + MaximumExecutionTimeoutInSeconds *float64 `json:"maximumExecutionTimeoutInSeconds,omitempty" tf:"maximum_execution_timeout_in_seconds,omitempty"` + + // Batch size for rollback to the old endpoint fleet. Each rolling step to provision capacity and turn on traffic on the old endpoint fleet, and terminate capacity on the new endpoint fleet. If this field is absent, the default value will be set to 100% of total capacity which means to bring up the whole capacity of the old fleet at once during rollback. See Rollback Maximum Batch Size. + RollbackMaximumBatchSize *RollbackMaximumBatchSizeInitParameters `json:"rollbackMaximumBatchSize,omitempty" tf:"rollback_maximum_batch_size,omitempty"` + + // The length of the baking period, during which SageMaker monitors alarms for each batch on the new fleet. Valid values are between 0 and 3600. + WaitIntervalInSeconds *float64 `json:"waitIntervalInSeconds,omitempty" tf:"wait_interval_in_seconds,omitempty"` +} + +type RollingUpdatePolicyObservation struct { + + // Batch size for each rolling step to provision capacity and turn on traffic on the new endpoint fleet, and terminate capacity on the old endpoint fleet. Value must be between 5% to 50% of the variant's total instance count. See Maximum Batch Size. + MaximumBatchSize *MaximumBatchSizeObservation `json:"maximumBatchSize,omitempty" tf:"maximum_batch_size,omitempty"` + + // Maximum execution timeout for the deployment. Note that the timeout value should be larger than the total waiting time specified in termination_wait_in_seconds and wait_interval_in_seconds. Valid values are between 600 and 14400. + MaximumExecutionTimeoutInSeconds *float64 `json:"maximumExecutionTimeoutInSeconds,omitempty" tf:"maximum_execution_timeout_in_seconds,omitempty"` + + // Batch size for rollback to the old endpoint fleet. Each rolling step to provision capacity and turn on traffic on the old endpoint fleet, and terminate capacity on the new endpoint fleet. If this field is absent, the default value will be set to 100% of total capacity which means to bring up the whole capacity of the old fleet at once during rollback. See Rollback Maximum Batch Size. + RollbackMaximumBatchSize *RollbackMaximumBatchSizeObservation `json:"rollbackMaximumBatchSize,omitempty" tf:"rollback_maximum_batch_size,omitempty"` + + // The length of the baking period, during which SageMaker monitors alarms for each batch on the new fleet. Valid values are between 0 and 3600. + WaitIntervalInSeconds *float64 `json:"waitIntervalInSeconds,omitempty" tf:"wait_interval_in_seconds,omitempty"` +} + +type RollingUpdatePolicyParameters struct { + + // Batch size for each rolling step to provision capacity and turn on traffic on the new endpoint fleet, and terminate capacity on the old endpoint fleet. Value must be between 5% to 50% of the variant's total instance count. See Maximum Batch Size. + // +kubebuilder:validation:Optional + MaximumBatchSize *MaximumBatchSizeParameters `json:"maximumBatchSize" tf:"maximum_batch_size,omitempty"` + + // Maximum execution timeout for the deployment. Note that the timeout value should be larger than the total waiting time specified in termination_wait_in_seconds and wait_interval_in_seconds. Valid values are between 600 and 14400. + // +kubebuilder:validation:Optional + MaximumExecutionTimeoutInSeconds *float64 `json:"maximumExecutionTimeoutInSeconds,omitempty" tf:"maximum_execution_timeout_in_seconds,omitempty"` + + // Batch size for rollback to the old endpoint fleet. Each rolling step to provision capacity and turn on traffic on the old endpoint fleet, and terminate capacity on the new endpoint fleet. If this field is absent, the default value will be set to 100% of total capacity which means to bring up the whole capacity of the old fleet at once during rollback. See Rollback Maximum Batch Size. + // +kubebuilder:validation:Optional + RollbackMaximumBatchSize *RollbackMaximumBatchSizeParameters `json:"rollbackMaximumBatchSize,omitempty" tf:"rollback_maximum_batch_size,omitempty"` + + // The length of the baking period, during which SageMaker monitors alarms for each batch on the new fleet. Valid values are between 0 and 3600. + // +kubebuilder:validation:Optional + WaitIntervalInSeconds *float64 `json:"waitIntervalInSeconds" tf:"wait_interval_in_seconds,omitempty"` +} + +type TrafficRoutingConfigurationInitParameters struct { + + // Batch size for the first step to turn on traffic on the new endpoint fleet. Value must be less than or equal to 50% of the variant's total instance count. See Canary Size. + CanarySize *CanarySizeInitParameters `json:"canarySize,omitempty" tf:"canary_size,omitempty"` + + // Batch size for each step to turn on traffic on the new endpoint fleet. Value must be 10-50% of the variant's total instance count. See Linear Step Size. + LinearStepSize *LinearStepSizeInitParameters `json:"linearStepSize,omitempty" tf:"linear_step_size,omitempty"` + + // Traffic routing strategy type. Valid values are: ALL_AT_ONCE, CANARY, and LINEAR. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The length of the baking period, during which SageMaker monitors alarms for each batch on the new fleet. Valid values are between 0 and 3600. + WaitIntervalInSeconds *float64 `json:"waitIntervalInSeconds,omitempty" tf:"wait_interval_in_seconds,omitempty"` +} + +type TrafficRoutingConfigurationObservation struct { + + // Batch size for the first step to turn on traffic on the new endpoint fleet. Value must be less than or equal to 50% of the variant's total instance count. See Canary Size. + CanarySize *CanarySizeObservation `json:"canarySize,omitempty" tf:"canary_size,omitempty"` + + // Batch size for each step to turn on traffic on the new endpoint fleet. Value must be 10-50% of the variant's total instance count. See Linear Step Size. + LinearStepSize *LinearStepSizeObservation `json:"linearStepSize,omitempty" tf:"linear_step_size,omitempty"` + + // Traffic routing strategy type. Valid values are: ALL_AT_ONCE, CANARY, and LINEAR. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The length of the baking period, during which SageMaker monitors alarms for each batch on the new fleet. Valid values are between 0 and 3600. + WaitIntervalInSeconds *float64 `json:"waitIntervalInSeconds,omitempty" tf:"wait_interval_in_seconds,omitempty"` +} + +type TrafficRoutingConfigurationParameters struct { + + // Batch size for the first step to turn on traffic on the new endpoint fleet. Value must be less than or equal to 50% of the variant's total instance count. See Canary Size. + // +kubebuilder:validation:Optional + CanarySize *CanarySizeParameters `json:"canarySize,omitempty" tf:"canary_size,omitempty"` + + // Batch size for each step to turn on traffic on the new endpoint fleet. Value must be 10-50% of the variant's total instance count. See Linear Step Size. + // +kubebuilder:validation:Optional + LinearStepSize *LinearStepSizeParameters `json:"linearStepSize,omitempty" tf:"linear_step_size,omitempty"` + + // Traffic routing strategy type. Valid values are: ALL_AT_ONCE, CANARY, and LINEAR. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The length of the baking period, during which SageMaker monitors alarms for each batch on the new fleet. Valid values are between 0 and 3600. + // +kubebuilder:validation:Optional + WaitIntervalInSeconds *float64 `json:"waitIntervalInSeconds" tf:"wait_interval_in_seconds,omitempty"` +} + +// EndpointSpec defines the desired state of Endpoint +type EndpointSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider EndpointParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider EndpointInitParameters `json:"initProvider,omitempty"` +} + +// EndpointStatus defines the observed state of Endpoint. +type EndpointStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider EndpointObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Endpoint is the Schema for the Endpoints API. Provides a SageMaker Endpoint resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Endpoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec EndpointSpec `json:"spec"` + Status EndpointStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EndpointList contains a list of Endpoints +type EndpointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Endpoint `json:"items"` +} + +// Repository type metadata. +var ( + Endpoint_Kind = "Endpoint" + Endpoint_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Endpoint_Kind}.String() + Endpoint_KindAPIVersion = Endpoint_Kind + "." + CRDGroupVersion.String() + Endpoint_GroupVersionKind = CRDGroupVersion.WithKind(Endpoint_Kind) +) + +func init() { + SchemeBuilder.Register(&Endpoint{}, &EndpointList{}) +} diff --git a/apis/sagemaker/v1beta2/zz_endpointconfiguration_terraformed.go b/apis/sagemaker/v1beta2/zz_endpointconfiguration_terraformed.go new file mode 100755 index 0000000000..4c3da763dd --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_endpointconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this EndpointConfiguration +func (mg *EndpointConfiguration) GetTerraformResourceType() string { + return "aws_sagemaker_endpoint_configuration" +} + +// GetConnectionDetailsMapping for this EndpointConfiguration +func (tr *EndpointConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this EndpointConfiguration +func (tr *EndpointConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this EndpointConfiguration +func (tr *EndpointConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this EndpointConfiguration +func (tr *EndpointConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this EndpointConfiguration +func (tr *EndpointConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this EndpointConfiguration +func (tr *EndpointConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this EndpointConfiguration +func (tr *EndpointConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this EndpointConfiguration +func (tr *EndpointConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this EndpointConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *EndpointConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &EndpointConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *EndpointConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sagemaker/v1beta2/zz_endpointconfiguration_types.go b/apis/sagemaker/v1beta2/zz_endpointconfiguration_types.go new file mode 100755 index 0000000000..243176ab74 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_endpointconfiguration_types.go @@ -0,0 +1,911 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AsyncInferenceConfigInitParameters struct { + + // Configures the behavior of the client used by Amazon SageMaker to interact with the model container during asynchronous inference. + ClientConfig *ClientConfigInitParameters `json:"clientConfig,omitempty" tf:"client_config,omitempty"` + + // Specifies the configuration for asynchronous inference invocation outputs. + OutputConfig *AsyncInferenceConfigOutputConfigInitParameters `json:"outputConfig,omitempty" tf:"output_config,omitempty"` +} + +type AsyncInferenceConfigObservation struct { + + // Configures the behavior of the client used by Amazon SageMaker to interact with the model container during asynchronous inference. + ClientConfig *ClientConfigObservation `json:"clientConfig,omitempty" tf:"client_config,omitempty"` + + // Specifies the configuration for asynchronous inference invocation outputs. + OutputConfig *AsyncInferenceConfigOutputConfigObservation `json:"outputConfig,omitempty" tf:"output_config,omitempty"` +} + +type AsyncInferenceConfigOutputConfigInitParameters struct { + + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the asynchronous inference output in Amazon S3. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Specifies the configuration for notifications of inference results for asynchronous inference. + NotificationConfig *NotificationConfigInitParameters `json:"notificationConfig,omitempty" tf:"notification_config,omitempty"` + + // The Amazon S3 location to upload failure inference responses to. + S3FailurePath *string `json:"s3FailurePath,omitempty" tf:"s3_failure_path,omitempty"` + + // The Amazon S3 location to upload inference responses to. + S3OutputPath *string `json:"s3OutputPath,omitempty" tf:"s3_output_path,omitempty"` +} + +type AsyncInferenceConfigOutputConfigObservation struct { + + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the asynchronous inference output in Amazon S3. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Specifies the configuration for notifications of inference results for asynchronous inference. + NotificationConfig *NotificationConfigObservation `json:"notificationConfig,omitempty" tf:"notification_config,omitempty"` + + // The Amazon S3 location to upload failure inference responses to. + S3FailurePath *string `json:"s3FailurePath,omitempty" tf:"s3_failure_path,omitempty"` + + // The Amazon S3 location to upload inference responses to. + S3OutputPath *string `json:"s3OutputPath,omitempty" tf:"s3_output_path,omitempty"` +} + +type AsyncInferenceConfigOutputConfigParameters struct { + + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the asynchronous inference output in Amazon S3. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Specifies the configuration for notifications of inference results for asynchronous inference. + // +kubebuilder:validation:Optional + NotificationConfig *NotificationConfigParameters `json:"notificationConfig,omitempty" tf:"notification_config,omitempty"` + + // The Amazon S3 location to upload failure inference responses to. + // +kubebuilder:validation:Optional + S3FailurePath *string `json:"s3FailurePath,omitempty" tf:"s3_failure_path,omitempty"` + + // The Amazon S3 location to upload inference responses to. + // +kubebuilder:validation:Optional + S3OutputPath *string `json:"s3OutputPath" tf:"s3_output_path,omitempty"` +} + +type AsyncInferenceConfigParameters struct { + + // Configures the behavior of the client used by Amazon SageMaker to interact with the model container during asynchronous inference. + // +kubebuilder:validation:Optional + ClientConfig *ClientConfigParameters `json:"clientConfig,omitempty" tf:"client_config,omitempty"` + + // Specifies the configuration for asynchronous inference invocation outputs. + // +kubebuilder:validation:Optional + OutputConfig *AsyncInferenceConfigOutputConfigParameters `json:"outputConfig" tf:"output_config,omitempty"` +} + +type CaptureContentTypeHeaderInitParameters struct { + + // The CSV content type headers to capture. + // +listType=set + CsvContentTypes []*string `json:"csvContentTypes,omitempty" tf:"csv_content_types,omitempty"` + + // The JSON content type headers to capture. + // +listType=set + JSONContentTypes []*string `json:"jsonContentTypes,omitempty" tf:"json_content_types,omitempty"` +} + +type CaptureContentTypeHeaderObservation struct { + + // The CSV content type headers to capture. + // +listType=set + CsvContentTypes []*string `json:"csvContentTypes,omitempty" tf:"csv_content_types,omitempty"` + + // The JSON content type headers to capture. + // +listType=set + JSONContentTypes []*string `json:"jsonContentTypes,omitempty" tf:"json_content_types,omitempty"` +} + +type CaptureContentTypeHeaderParameters struct { + + // The CSV content type headers to capture. + // +kubebuilder:validation:Optional + // +listType=set + CsvContentTypes []*string `json:"csvContentTypes,omitempty" tf:"csv_content_types,omitempty"` + + // The JSON content type headers to capture. + // +kubebuilder:validation:Optional + // +listType=set + JSONContentTypes []*string `json:"jsonContentTypes,omitempty" tf:"json_content_types,omitempty"` +} + +type CaptureOptionsInitParameters struct { + + // Specifies the data to be captured. Should be one of Input or Output. + CaptureMode *string `json:"captureMode,omitempty" tf:"capture_mode,omitempty"` +} + +type CaptureOptionsObservation struct { + + // Specifies the data to be captured. Should be one of Input or Output. + CaptureMode *string `json:"captureMode,omitempty" tf:"capture_mode,omitempty"` +} + +type CaptureOptionsParameters struct { + + // Specifies the data to be captured. Should be one of Input or Output. + // +kubebuilder:validation:Optional + CaptureMode *string `json:"captureMode" tf:"capture_mode,omitempty"` +} + +type ClientConfigInitParameters struct { + + // The maximum number of concurrent requests sent by the SageMaker client to the model container. If no value is provided, Amazon SageMaker will choose an optimal value for you. + MaxConcurrentInvocationsPerInstance *float64 `json:"maxConcurrentInvocationsPerInstance,omitempty" tf:"max_concurrent_invocations_per_instance,omitempty"` +} + +type ClientConfigObservation struct { + + // The maximum number of concurrent requests sent by the SageMaker client to the model container. If no value is provided, Amazon SageMaker will choose an optimal value for you. + MaxConcurrentInvocationsPerInstance *float64 `json:"maxConcurrentInvocationsPerInstance,omitempty" tf:"max_concurrent_invocations_per_instance,omitempty"` +} + +type ClientConfigParameters struct { + + // The maximum number of concurrent requests sent by the SageMaker client to the model container. If no value is provided, Amazon SageMaker will choose an optimal value for you. + // +kubebuilder:validation:Optional + MaxConcurrentInvocationsPerInstance *float64 `json:"maxConcurrentInvocationsPerInstance,omitempty" tf:"max_concurrent_invocations_per_instance,omitempty"` +} + +type CoreDumpConfigInitParameters struct { + + // The Amazon S3 bucket to send the core dump to. + DestinationS3URI *string `json:"destinationS3Uri,omitempty" tf:"destination_s3_uri,omitempty"` + + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker uses to encrypt the core dump data at rest using Amazon S3 server-side encryption. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type CoreDumpConfigObservation struct { + + // The Amazon S3 bucket to send the core dump to. + DestinationS3URI *string `json:"destinationS3Uri,omitempty" tf:"destination_s3_uri,omitempty"` + + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker uses to encrypt the core dump data at rest using Amazon S3 server-side encryption. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type CoreDumpConfigParameters struct { + + // The Amazon S3 bucket to send the core dump to. + // +kubebuilder:validation:Optional + DestinationS3URI *string `json:"destinationS3Uri" tf:"destination_s3_uri,omitempty"` + + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker uses to encrypt the core dump data at rest using Amazon S3 server-side encryption. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type DataCaptureConfigInitParameters struct { + + // The content type headers to capture. Fields are documented below. + CaptureContentTypeHeader *CaptureContentTypeHeaderInitParameters `json:"captureContentTypeHeader,omitempty" tf:"capture_content_type_header,omitempty"` + + // Specifies what data to capture. Fields are documented below. + CaptureOptions []CaptureOptionsInitParameters `json:"captureOptions,omitempty" tf:"capture_options,omitempty"` + + // The URL for S3 location where the captured data is stored. + DestinationS3URI *string `json:"destinationS3Uri,omitempty" tf:"destination_s3_uri,omitempty"` + + // Flag to enable data capture. Defaults to false. + EnableCapture *bool `json:"enableCapture,omitempty" tf:"enable_capture,omitempty"` + + // Portion of data to capture. Should be between 0 and 100. + InitialSamplingPercentage *float64 `json:"initialSamplingPercentage,omitempty" tf:"initial_sampling_percentage,omitempty"` + + // Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt the captured data on Amazon S3. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type DataCaptureConfigObservation struct { + + // The content type headers to capture. Fields are documented below. + CaptureContentTypeHeader *CaptureContentTypeHeaderObservation `json:"captureContentTypeHeader,omitempty" tf:"capture_content_type_header,omitempty"` + + // Specifies what data to capture. Fields are documented below. + CaptureOptions []CaptureOptionsObservation `json:"captureOptions,omitempty" tf:"capture_options,omitempty"` + + // The URL for S3 location where the captured data is stored. + DestinationS3URI *string `json:"destinationS3Uri,omitempty" tf:"destination_s3_uri,omitempty"` + + // Flag to enable data capture. Defaults to false. + EnableCapture *bool `json:"enableCapture,omitempty" tf:"enable_capture,omitempty"` + + // Portion of data to capture. Should be between 0 and 100. + InitialSamplingPercentage *float64 `json:"initialSamplingPercentage,omitempty" tf:"initial_sampling_percentage,omitempty"` + + // Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt the captured data on Amazon S3. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type DataCaptureConfigParameters struct { + + // The content type headers to capture. Fields are documented below. + // +kubebuilder:validation:Optional + CaptureContentTypeHeader *CaptureContentTypeHeaderParameters `json:"captureContentTypeHeader,omitempty" tf:"capture_content_type_header,omitempty"` + + // Specifies what data to capture. Fields are documented below. + // +kubebuilder:validation:Optional + CaptureOptions []CaptureOptionsParameters `json:"captureOptions" tf:"capture_options,omitempty"` + + // The URL for S3 location where the captured data is stored. + // +kubebuilder:validation:Optional + DestinationS3URI *string `json:"destinationS3Uri" tf:"destination_s3_uri,omitempty"` + + // Flag to enable data capture. Defaults to false. + // +kubebuilder:validation:Optional + EnableCapture *bool `json:"enableCapture,omitempty" tf:"enable_capture,omitempty"` + + // Portion of data to capture. Should be between 0 and 100. + // +kubebuilder:validation:Optional + InitialSamplingPercentage *float64 `json:"initialSamplingPercentage" tf:"initial_sampling_percentage,omitempty"` + + // Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt the captured data on Amazon S3. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type EndpointConfigurationInitParameters struct { + + // Specifies configuration for how an endpoint performs asynchronous inference. + AsyncInferenceConfig *AsyncInferenceConfigInitParameters `json:"asyncInferenceConfig,omitempty" tf:"async_inference_config,omitempty"` + + // Specifies the parameters to capture input/output of SageMaker models endpoints. Fields are documented below. + DataCaptureConfig *DataCaptureConfigInitParameters `json:"dataCaptureConfig,omitempty" tf:"data_capture_config,omitempty"` + + // Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` + + // An list of ProductionVariant objects, one for each model that you want to host at this endpoint. Fields are documented below. + ProductionVariants []ProductionVariantsInitParameters `json:"productionVariants,omitempty" tf:"production_variants,omitempty"` + + // Array of ProductionVariant objects. There is one for each model that you want to host at this endpoint in shadow mode with production traffic replicated from the model specified on ProductionVariants. If you use this field, you can only specify one variant for ProductionVariants and one variant for ShadowProductionVariants. Fields are documented below. + ShadowProductionVariants []ShadowProductionVariantsInitParameters `json:"shadowProductionVariants,omitempty" tf:"shadow_production_variants,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type EndpointConfigurationObservation struct { + + // The Amazon Resource Name (ARN) assigned by AWS to this endpoint configuration. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Specifies configuration for how an endpoint performs asynchronous inference. + AsyncInferenceConfig *AsyncInferenceConfigObservation `json:"asyncInferenceConfig,omitempty" tf:"async_inference_config,omitempty"` + + // Specifies the parameters to capture input/output of SageMaker models endpoints. Fields are documented below. + DataCaptureConfig *DataCaptureConfigObservation `json:"dataCaptureConfig,omitempty" tf:"data_capture_config,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // An list of ProductionVariant objects, one for each model that you want to host at this endpoint. Fields are documented below. + ProductionVariants []ProductionVariantsObservation `json:"productionVariants,omitempty" tf:"production_variants,omitempty"` + + // Array of ProductionVariant objects. There is one for each model that you want to host at this endpoint in shadow mode with production traffic replicated from the model specified on ProductionVariants. If you use this field, you can only specify one variant for ProductionVariants and one variant for ShadowProductionVariants. Fields are documented below. + ShadowProductionVariants []ShadowProductionVariantsObservation `json:"shadowProductionVariants,omitempty" tf:"shadow_production_variants,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type EndpointConfigurationParameters struct { + + // Specifies configuration for how an endpoint performs asynchronous inference. + // +kubebuilder:validation:Optional + AsyncInferenceConfig *AsyncInferenceConfigParameters `json:"asyncInferenceConfig,omitempty" tf:"async_inference_config,omitempty"` + + // Specifies the parameters to capture input/output of SageMaker models endpoints. Fields are documented below. + // +kubebuilder:validation:Optional + DataCaptureConfig *DataCaptureConfigParameters `json:"dataCaptureConfig,omitempty" tf:"data_capture_config,omitempty"` + + // Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` + + // An list of ProductionVariant objects, one for each model that you want to host at this endpoint. Fields are documented below. + // +kubebuilder:validation:Optional + ProductionVariants []ProductionVariantsParameters `json:"productionVariants,omitempty" tf:"production_variants,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Array of ProductionVariant objects. There is one for each model that you want to host at this endpoint in shadow mode with production traffic replicated from the model specified on ProductionVariants. If you use this field, you can only specify one variant for ProductionVariants and one variant for ShadowProductionVariants. Fields are documented below. + // +kubebuilder:validation:Optional + ShadowProductionVariants []ShadowProductionVariantsParameters `json:"shadowProductionVariants,omitempty" tf:"shadow_production_variants,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type NotificationConfigInitParameters struct { + + // Amazon SNS topic to post a notification to when inference fails. If no topic is provided, no notification is sent on failure. + ErrorTopic *string `json:"errorTopic,omitempty" tf:"error_topic,omitempty"` + + // The Amazon SNS topics where you want the inference response to be included. Valid values are SUCCESS_NOTIFICATION_TOPIC and ERROR_NOTIFICATION_TOPIC. + // +listType=set + IncludeInferenceResponseIn []*string `json:"includeInferenceResponseIn,omitempty" tf:"include_inference_response_in,omitempty"` + + // Amazon SNS topic to post a notification to when inference completes successfully. If no topic is provided, no notification is sent on success. + SuccessTopic *string `json:"successTopic,omitempty" tf:"success_topic,omitempty"` +} + +type NotificationConfigObservation struct { + + // Amazon SNS topic to post a notification to when inference fails. If no topic is provided, no notification is sent on failure. + ErrorTopic *string `json:"errorTopic,omitempty" tf:"error_topic,omitempty"` + + // The Amazon SNS topics where you want the inference response to be included. Valid values are SUCCESS_NOTIFICATION_TOPIC and ERROR_NOTIFICATION_TOPIC. + // +listType=set + IncludeInferenceResponseIn []*string `json:"includeInferenceResponseIn,omitempty" tf:"include_inference_response_in,omitempty"` + + // Amazon SNS topic to post a notification to when inference completes successfully. If no topic is provided, no notification is sent on success. + SuccessTopic *string `json:"successTopic,omitempty" tf:"success_topic,omitempty"` +} + +type NotificationConfigParameters struct { + + // Amazon SNS topic to post a notification to when inference fails. If no topic is provided, no notification is sent on failure. + // +kubebuilder:validation:Optional + ErrorTopic *string `json:"errorTopic,omitempty" tf:"error_topic,omitempty"` + + // The Amazon SNS topics where you want the inference response to be included. Valid values are SUCCESS_NOTIFICATION_TOPIC and ERROR_NOTIFICATION_TOPIC. + // +kubebuilder:validation:Optional + // +listType=set + IncludeInferenceResponseIn []*string `json:"includeInferenceResponseIn,omitempty" tf:"include_inference_response_in,omitempty"` + + // Amazon SNS topic to post a notification to when inference completes successfully. If no topic is provided, no notification is sent on success. + // +kubebuilder:validation:Optional + SuccessTopic *string `json:"successTopic,omitempty" tf:"success_topic,omitempty"` +} + +type ProductionVariantsInitParameters struct { + + // The size of the Elastic Inference (EI) instance to use for the production variant. + AcceleratorType *string `json:"acceleratorType,omitempty" tf:"accelerator_type,omitempty"` + + // The timeout value, in seconds, for your inference container to pass health check by SageMaker Hosting. For more information about health check, see How Your Container Should Respond to Health Check (Ping) Requests. Valid values between 60 and 3600. + ContainerStartupHealthCheckTimeoutInSeconds *float64 `json:"containerStartupHealthCheckTimeoutInSeconds,omitempty" tf:"container_startup_health_check_timeout_in_seconds,omitempty"` + + // Specifies configuration for a core dump from the model container when the process crashes. Fields are documented below. + CoreDumpConfig *CoreDumpConfigInitParameters `json:"coreDumpConfig,omitempty" tf:"core_dump_config,omitempty"` + + // You can use this parameter to turn on native Amazon Web Services Systems Manager (SSM) access for a production variant behind an endpoint. By default, SSM access is disabled for all production variants behind an endpoints. + EnableSsmAccess *bool `json:"enableSsmAccess,omitempty" tf:"enable_ssm_access,omitempty"` + + // Initial number of instances used for auto-scaling. + InitialInstanceCount *float64 `json:"initialInstanceCount,omitempty" tf:"initial_instance_count,omitempty"` + + // Determines initial traffic distribution among all of the models that you specify in the endpoint configuration. If unspecified, it defaults to 1.0. + InitialVariantWeight *float64 `json:"initialVariantWeight,omitempty" tf:"initial_variant_weight,omitempty"` + + // The type of instance to start. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The timeout value, in seconds, to download and extract the model that you want to host from Amazon S3 to the individual inference instance associated with this production variant. Valid values between 60 and 3600. + ModelDataDownloadTimeoutInSeconds *float64 `json:"modelDataDownloadTimeoutInSeconds,omitempty" tf:"model_data_download_timeout_in_seconds,omitempty"` + + // The name of the model to use. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.Model + ModelName *string `json:"modelName,omitempty" tf:"model_name,omitempty"` + + // Reference to a Model in sagemaker to populate modelName. + // +kubebuilder:validation:Optional + ModelNameRef *v1.Reference `json:"modelNameRef,omitempty" tf:"-"` + + // Selector for a Model in sagemaker to populate modelName. + // +kubebuilder:validation:Optional + ModelNameSelector *v1.Selector `json:"modelNameSelector,omitempty" tf:"-"` + + // Sets how the endpoint routes incoming traffic. See routing_config below. + RoutingConfig []RoutingConfigInitParameters `json:"routingConfig,omitempty" tf:"routing_config,omitempty"` + + // Specifies configuration for how an endpoint performs asynchronous inference. + ServerlessConfig *ServerlessConfigInitParameters `json:"serverlessConfig,omitempty" tf:"serverless_config,omitempty"` + + // The name of the variant. + VariantName *string `json:"variantName,omitempty" tf:"variant_name,omitempty"` + + // The size, in GB, of the ML storage volume attached to individual inference instance associated with the production variant. Valid values between 1 and 512. + VolumeSizeInGb *float64 `json:"volumeSizeInGb,omitempty" tf:"volume_size_in_gb,omitempty"` +} + +type ProductionVariantsObservation struct { + + // The size of the Elastic Inference (EI) instance to use for the production variant. + AcceleratorType *string `json:"acceleratorType,omitempty" tf:"accelerator_type,omitempty"` + + // The timeout value, in seconds, for your inference container to pass health check by SageMaker Hosting. For more information about health check, see How Your Container Should Respond to Health Check (Ping) Requests. Valid values between 60 and 3600. + ContainerStartupHealthCheckTimeoutInSeconds *float64 `json:"containerStartupHealthCheckTimeoutInSeconds,omitempty" tf:"container_startup_health_check_timeout_in_seconds,omitempty"` + + // Specifies configuration for a core dump from the model container when the process crashes. Fields are documented below. + CoreDumpConfig *CoreDumpConfigObservation `json:"coreDumpConfig,omitempty" tf:"core_dump_config,omitempty"` + + // You can use this parameter to turn on native Amazon Web Services Systems Manager (SSM) access for a production variant behind an endpoint. By default, SSM access is disabled for all production variants behind an endpoints. + EnableSsmAccess *bool `json:"enableSsmAccess,omitempty" tf:"enable_ssm_access,omitempty"` + + // Initial number of instances used for auto-scaling. + InitialInstanceCount *float64 `json:"initialInstanceCount,omitempty" tf:"initial_instance_count,omitempty"` + + // Determines initial traffic distribution among all of the models that you specify in the endpoint configuration. If unspecified, it defaults to 1.0. + InitialVariantWeight *float64 `json:"initialVariantWeight,omitempty" tf:"initial_variant_weight,omitempty"` + + // The type of instance to start. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The timeout value, in seconds, to download and extract the model that you want to host from Amazon S3 to the individual inference instance associated with this production variant. Valid values between 60 and 3600. + ModelDataDownloadTimeoutInSeconds *float64 `json:"modelDataDownloadTimeoutInSeconds,omitempty" tf:"model_data_download_timeout_in_seconds,omitempty"` + + // The name of the model to use. + ModelName *string `json:"modelName,omitempty" tf:"model_name,omitempty"` + + // Sets how the endpoint routes incoming traffic. See routing_config below. + RoutingConfig []RoutingConfigObservation `json:"routingConfig,omitempty" tf:"routing_config,omitempty"` + + // Specifies configuration for how an endpoint performs asynchronous inference. + ServerlessConfig *ServerlessConfigObservation `json:"serverlessConfig,omitempty" tf:"serverless_config,omitempty"` + + // The name of the variant. + VariantName *string `json:"variantName,omitempty" tf:"variant_name,omitempty"` + + // The size, in GB, of the ML storage volume attached to individual inference instance associated with the production variant. Valid values between 1 and 512. + VolumeSizeInGb *float64 `json:"volumeSizeInGb,omitempty" tf:"volume_size_in_gb,omitempty"` +} + +type ProductionVariantsParameters struct { + + // The size of the Elastic Inference (EI) instance to use for the production variant. + // +kubebuilder:validation:Optional + AcceleratorType *string `json:"acceleratorType,omitempty" tf:"accelerator_type,omitempty"` + + // The timeout value, in seconds, for your inference container to pass health check by SageMaker Hosting. For more information about health check, see How Your Container Should Respond to Health Check (Ping) Requests. Valid values between 60 and 3600. + // +kubebuilder:validation:Optional + ContainerStartupHealthCheckTimeoutInSeconds *float64 `json:"containerStartupHealthCheckTimeoutInSeconds,omitempty" tf:"container_startup_health_check_timeout_in_seconds,omitempty"` + + // Specifies configuration for a core dump from the model container when the process crashes. Fields are documented below. + // +kubebuilder:validation:Optional + CoreDumpConfig *CoreDumpConfigParameters `json:"coreDumpConfig,omitempty" tf:"core_dump_config,omitempty"` + + // You can use this parameter to turn on native Amazon Web Services Systems Manager (SSM) access for a production variant behind an endpoint. By default, SSM access is disabled for all production variants behind an endpoints. + // +kubebuilder:validation:Optional + EnableSsmAccess *bool `json:"enableSsmAccess,omitempty" tf:"enable_ssm_access,omitempty"` + + // Initial number of instances used for auto-scaling. + // +kubebuilder:validation:Optional + InitialInstanceCount *float64 `json:"initialInstanceCount,omitempty" tf:"initial_instance_count,omitempty"` + + // Determines initial traffic distribution among all of the models that you specify in the endpoint configuration. If unspecified, it defaults to 1.0. + // +kubebuilder:validation:Optional + InitialVariantWeight *float64 `json:"initialVariantWeight,omitempty" tf:"initial_variant_weight,omitempty"` + + // The type of instance to start. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The timeout value, in seconds, to download and extract the model that you want to host from Amazon S3 to the individual inference instance associated with this production variant. Valid values between 60 and 3600. + // +kubebuilder:validation:Optional + ModelDataDownloadTimeoutInSeconds *float64 `json:"modelDataDownloadTimeoutInSeconds,omitempty" tf:"model_data_download_timeout_in_seconds,omitempty"` + + // The name of the model to use. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.Model + // +kubebuilder:validation:Optional + ModelName *string `json:"modelName,omitempty" tf:"model_name,omitempty"` + + // Reference to a Model in sagemaker to populate modelName. + // +kubebuilder:validation:Optional + ModelNameRef *v1.Reference `json:"modelNameRef,omitempty" tf:"-"` + + // Selector for a Model in sagemaker to populate modelName. + // +kubebuilder:validation:Optional + ModelNameSelector *v1.Selector `json:"modelNameSelector,omitempty" tf:"-"` + + // Sets how the endpoint routes incoming traffic. See routing_config below. + // +kubebuilder:validation:Optional + RoutingConfig []RoutingConfigParameters `json:"routingConfig,omitempty" tf:"routing_config,omitempty"` + + // Specifies configuration for how an endpoint performs asynchronous inference. + // +kubebuilder:validation:Optional + ServerlessConfig *ServerlessConfigParameters `json:"serverlessConfig,omitempty" tf:"serverless_config,omitempty"` + + // The name of the variant. + // +kubebuilder:validation:Optional + VariantName *string `json:"variantName,omitempty" tf:"variant_name,omitempty"` + + // The size, in GB, of the ML storage volume attached to individual inference instance associated with the production variant. Valid values between 1 and 512. + // +kubebuilder:validation:Optional + VolumeSizeInGb *float64 `json:"volumeSizeInGb,omitempty" tf:"volume_size_in_gb,omitempty"` +} + +type RoutingConfigInitParameters struct { + + // Sets how the endpoint routes incoming traffic. Valid values are LEAST_OUTSTANDING_REQUESTS and RANDOM. LEAST_OUTSTANDING_REQUESTS routes requests to the specific instances that have more capacity to process them. RANDOM routes each request to a randomly chosen instance. + RoutingStrategy *string `json:"routingStrategy,omitempty" tf:"routing_strategy,omitempty"` +} + +type RoutingConfigObservation struct { + + // Sets how the endpoint routes incoming traffic. Valid values are LEAST_OUTSTANDING_REQUESTS and RANDOM. LEAST_OUTSTANDING_REQUESTS routes requests to the specific instances that have more capacity to process them. RANDOM routes each request to a randomly chosen instance. + RoutingStrategy *string `json:"routingStrategy,omitempty" tf:"routing_strategy,omitempty"` +} + +type RoutingConfigParameters struct { + + // Sets how the endpoint routes incoming traffic. Valid values are LEAST_OUTSTANDING_REQUESTS and RANDOM. LEAST_OUTSTANDING_REQUESTS routes requests to the specific instances that have more capacity to process them. RANDOM routes each request to a randomly chosen instance. + // +kubebuilder:validation:Optional + RoutingStrategy *string `json:"routingStrategy" tf:"routing_strategy,omitempty"` +} + +type ServerlessConfigInitParameters struct { + + // The maximum number of concurrent invocations your serverless endpoint can process. Valid values are between 1 and 200. + MaxConcurrency *float64 `json:"maxConcurrency,omitempty" tf:"max_concurrency,omitempty"` + + // The memory size of your serverless endpoint. Valid values are in 1 GB increments: 1024 MB, 2048 MB, 3072 MB, 4096 MB, 5120 MB, or 6144 MB. + MemorySizeInMb *float64 `json:"memorySizeInMb,omitempty" tf:"memory_size_in_mb,omitempty"` + + // The amount of provisioned concurrency to allocate for the serverless endpoint. Should be less than or equal to max_concurrency. Valid values are between 1 and 200. + ProvisionedConcurrency *float64 `json:"provisionedConcurrency,omitempty" tf:"provisioned_concurrency,omitempty"` +} + +type ServerlessConfigObservation struct { + + // The maximum number of concurrent invocations your serverless endpoint can process. Valid values are between 1 and 200. + MaxConcurrency *float64 `json:"maxConcurrency,omitempty" tf:"max_concurrency,omitempty"` + + // The memory size of your serverless endpoint. Valid values are in 1 GB increments: 1024 MB, 2048 MB, 3072 MB, 4096 MB, 5120 MB, or 6144 MB. + MemorySizeInMb *float64 `json:"memorySizeInMb,omitempty" tf:"memory_size_in_mb,omitempty"` + + // The amount of provisioned concurrency to allocate for the serverless endpoint. Should be less than or equal to max_concurrency. Valid values are between 1 and 200. + ProvisionedConcurrency *float64 `json:"provisionedConcurrency,omitempty" tf:"provisioned_concurrency,omitempty"` +} + +type ServerlessConfigParameters struct { + + // The maximum number of concurrent invocations your serverless endpoint can process. Valid values are between 1 and 200. + // +kubebuilder:validation:Optional + MaxConcurrency *float64 `json:"maxConcurrency" tf:"max_concurrency,omitempty"` + + // The memory size of your serverless endpoint. Valid values are in 1 GB increments: 1024 MB, 2048 MB, 3072 MB, 4096 MB, 5120 MB, or 6144 MB. + // +kubebuilder:validation:Optional + MemorySizeInMb *float64 `json:"memorySizeInMb" tf:"memory_size_in_mb,omitempty"` + + // The amount of provisioned concurrency to allocate for the serverless endpoint. Should be less than or equal to max_concurrency. Valid values are between 1 and 200. + // +kubebuilder:validation:Optional + ProvisionedConcurrency *float64 `json:"provisionedConcurrency,omitempty" tf:"provisioned_concurrency,omitempty"` +} + +type ShadowProductionVariantsCoreDumpConfigInitParameters struct { + + // The Amazon S3 bucket to send the core dump to. + DestinationS3URI *string `json:"destinationS3Uri,omitempty" tf:"destination_s3_uri,omitempty"` + + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker uses to encrypt the core dump data at rest using Amazon S3 server-side encryption. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type ShadowProductionVariantsCoreDumpConfigObservation struct { + + // The Amazon S3 bucket to send the core dump to. + DestinationS3URI *string `json:"destinationS3Uri,omitempty" tf:"destination_s3_uri,omitempty"` + + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker uses to encrypt the core dump data at rest using Amazon S3 server-side encryption. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type ShadowProductionVariantsCoreDumpConfigParameters struct { + + // The Amazon S3 bucket to send the core dump to. + // +kubebuilder:validation:Optional + DestinationS3URI *string `json:"destinationS3Uri" tf:"destination_s3_uri,omitempty"` + + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker uses to encrypt the core dump data at rest using Amazon S3 server-side encryption. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId" tf:"kms_key_id,omitempty"` +} + +type ShadowProductionVariantsInitParameters struct { + + // The size of the Elastic Inference (EI) instance to use for the production variant. + AcceleratorType *string `json:"acceleratorType,omitempty" tf:"accelerator_type,omitempty"` + + // The timeout value, in seconds, for your inference container to pass health check by SageMaker Hosting. For more information about health check, see How Your Container Should Respond to Health Check (Ping) Requests. Valid values between 60 and 3600. + ContainerStartupHealthCheckTimeoutInSeconds *float64 `json:"containerStartupHealthCheckTimeoutInSeconds,omitempty" tf:"container_startup_health_check_timeout_in_seconds,omitempty"` + + // Specifies configuration for a core dump from the model container when the process crashes. Fields are documented below. + CoreDumpConfig *ShadowProductionVariantsCoreDumpConfigInitParameters `json:"coreDumpConfig,omitempty" tf:"core_dump_config,omitempty"` + + // You can use this parameter to turn on native Amazon Web Services Systems Manager (SSM) access for a production variant behind an endpoint. By default, SSM access is disabled for all production variants behind an endpoints. + EnableSsmAccess *bool `json:"enableSsmAccess,omitempty" tf:"enable_ssm_access,omitempty"` + + // Initial number of instances used for auto-scaling. + InitialInstanceCount *float64 `json:"initialInstanceCount,omitempty" tf:"initial_instance_count,omitempty"` + + // Determines initial traffic distribution among all of the models that you specify in the endpoint configuration. If unspecified, it defaults to 1.0. + InitialVariantWeight *float64 `json:"initialVariantWeight,omitempty" tf:"initial_variant_weight,omitempty"` + + // The type of instance to start. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The timeout value, in seconds, to download and extract the model that you want to host from Amazon S3 to the individual inference instance associated with this production variant. Valid values between 60 and 3600. + ModelDataDownloadTimeoutInSeconds *float64 `json:"modelDataDownloadTimeoutInSeconds,omitempty" tf:"model_data_download_timeout_in_seconds,omitempty"` + + // The name of the model to use. + ModelName *string `json:"modelName,omitempty" tf:"model_name,omitempty"` + + // Sets how the endpoint routes incoming traffic. See routing_config below. + RoutingConfig []ShadowProductionVariantsRoutingConfigInitParameters `json:"routingConfig,omitempty" tf:"routing_config,omitempty"` + + // Specifies configuration for how an endpoint performs asynchronous inference. + ServerlessConfig *ShadowProductionVariantsServerlessConfigInitParameters `json:"serverlessConfig,omitempty" tf:"serverless_config,omitempty"` + + // The name of the variant. + VariantName *string `json:"variantName,omitempty" tf:"variant_name,omitempty"` + + // The size, in GB, of the ML storage volume attached to individual inference instance associated with the production variant. Valid values between 1 and 512. + VolumeSizeInGb *float64 `json:"volumeSizeInGb,omitempty" tf:"volume_size_in_gb,omitempty"` +} + +type ShadowProductionVariantsObservation struct { + + // The size of the Elastic Inference (EI) instance to use for the production variant. + AcceleratorType *string `json:"acceleratorType,omitempty" tf:"accelerator_type,omitempty"` + + // The timeout value, in seconds, for your inference container to pass health check by SageMaker Hosting. For more information about health check, see How Your Container Should Respond to Health Check (Ping) Requests. Valid values between 60 and 3600. + ContainerStartupHealthCheckTimeoutInSeconds *float64 `json:"containerStartupHealthCheckTimeoutInSeconds,omitempty" tf:"container_startup_health_check_timeout_in_seconds,omitempty"` + + // Specifies configuration for a core dump from the model container when the process crashes. Fields are documented below. + CoreDumpConfig *ShadowProductionVariantsCoreDumpConfigObservation `json:"coreDumpConfig,omitempty" tf:"core_dump_config,omitempty"` + + // You can use this parameter to turn on native Amazon Web Services Systems Manager (SSM) access for a production variant behind an endpoint. By default, SSM access is disabled for all production variants behind an endpoints. + EnableSsmAccess *bool `json:"enableSsmAccess,omitempty" tf:"enable_ssm_access,omitempty"` + + // Initial number of instances used for auto-scaling. + InitialInstanceCount *float64 `json:"initialInstanceCount,omitempty" tf:"initial_instance_count,omitempty"` + + // Determines initial traffic distribution among all of the models that you specify in the endpoint configuration. If unspecified, it defaults to 1.0. + InitialVariantWeight *float64 `json:"initialVariantWeight,omitempty" tf:"initial_variant_weight,omitempty"` + + // The type of instance to start. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The timeout value, in seconds, to download and extract the model that you want to host from Amazon S3 to the individual inference instance associated with this production variant. Valid values between 60 and 3600. + ModelDataDownloadTimeoutInSeconds *float64 `json:"modelDataDownloadTimeoutInSeconds,omitempty" tf:"model_data_download_timeout_in_seconds,omitempty"` + + // The name of the model to use. + ModelName *string `json:"modelName,omitempty" tf:"model_name,omitempty"` + + // Sets how the endpoint routes incoming traffic. See routing_config below. + RoutingConfig []ShadowProductionVariantsRoutingConfigObservation `json:"routingConfig,omitempty" tf:"routing_config,omitempty"` + + // Specifies configuration for how an endpoint performs asynchronous inference. + ServerlessConfig *ShadowProductionVariantsServerlessConfigObservation `json:"serverlessConfig,omitempty" tf:"serverless_config,omitempty"` + + // The name of the variant. + VariantName *string `json:"variantName,omitempty" tf:"variant_name,omitempty"` + + // The size, in GB, of the ML storage volume attached to individual inference instance associated with the production variant. Valid values between 1 and 512. + VolumeSizeInGb *float64 `json:"volumeSizeInGb,omitempty" tf:"volume_size_in_gb,omitempty"` +} + +type ShadowProductionVariantsParameters struct { + + // The size of the Elastic Inference (EI) instance to use for the production variant. + // +kubebuilder:validation:Optional + AcceleratorType *string `json:"acceleratorType,omitempty" tf:"accelerator_type,omitempty"` + + // The timeout value, in seconds, for your inference container to pass health check by SageMaker Hosting. For more information about health check, see How Your Container Should Respond to Health Check (Ping) Requests. Valid values between 60 and 3600. + // +kubebuilder:validation:Optional + ContainerStartupHealthCheckTimeoutInSeconds *float64 `json:"containerStartupHealthCheckTimeoutInSeconds,omitempty" tf:"container_startup_health_check_timeout_in_seconds,omitempty"` + + // Specifies configuration for a core dump from the model container when the process crashes. Fields are documented below. + // +kubebuilder:validation:Optional + CoreDumpConfig *ShadowProductionVariantsCoreDumpConfigParameters `json:"coreDumpConfig,omitempty" tf:"core_dump_config,omitempty"` + + // You can use this parameter to turn on native Amazon Web Services Systems Manager (SSM) access for a production variant behind an endpoint. By default, SSM access is disabled for all production variants behind an endpoints. + // +kubebuilder:validation:Optional + EnableSsmAccess *bool `json:"enableSsmAccess,omitempty" tf:"enable_ssm_access,omitempty"` + + // Initial number of instances used for auto-scaling. + // +kubebuilder:validation:Optional + InitialInstanceCount *float64 `json:"initialInstanceCount,omitempty" tf:"initial_instance_count,omitempty"` + + // Determines initial traffic distribution among all of the models that you specify in the endpoint configuration. If unspecified, it defaults to 1.0. + // +kubebuilder:validation:Optional + InitialVariantWeight *float64 `json:"initialVariantWeight,omitempty" tf:"initial_variant_weight,omitempty"` + + // The type of instance to start. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The timeout value, in seconds, to download and extract the model that you want to host from Amazon S3 to the individual inference instance associated with this production variant. Valid values between 60 and 3600. + // +kubebuilder:validation:Optional + ModelDataDownloadTimeoutInSeconds *float64 `json:"modelDataDownloadTimeoutInSeconds,omitempty" tf:"model_data_download_timeout_in_seconds,omitempty"` + + // The name of the model to use. + // +kubebuilder:validation:Optional + ModelName *string `json:"modelName" tf:"model_name,omitempty"` + + // Sets how the endpoint routes incoming traffic. See routing_config below. + // +kubebuilder:validation:Optional + RoutingConfig []ShadowProductionVariantsRoutingConfigParameters `json:"routingConfig,omitempty" tf:"routing_config,omitempty"` + + // Specifies configuration for how an endpoint performs asynchronous inference. + // +kubebuilder:validation:Optional + ServerlessConfig *ShadowProductionVariantsServerlessConfigParameters `json:"serverlessConfig,omitempty" tf:"serverless_config,omitempty"` + + // The name of the variant. + // +kubebuilder:validation:Optional + VariantName *string `json:"variantName,omitempty" tf:"variant_name,omitempty"` + + // The size, in GB, of the ML storage volume attached to individual inference instance associated with the production variant. Valid values between 1 and 512. + // +kubebuilder:validation:Optional + VolumeSizeInGb *float64 `json:"volumeSizeInGb,omitempty" tf:"volume_size_in_gb,omitempty"` +} + +type ShadowProductionVariantsRoutingConfigInitParameters struct { + + // Sets how the endpoint routes incoming traffic. Valid values are LEAST_OUTSTANDING_REQUESTS and RANDOM. LEAST_OUTSTANDING_REQUESTS routes requests to the specific instances that have more capacity to process them. RANDOM routes each request to a randomly chosen instance. + RoutingStrategy *string `json:"routingStrategy,omitempty" tf:"routing_strategy,omitempty"` +} + +type ShadowProductionVariantsRoutingConfigObservation struct { + + // Sets how the endpoint routes incoming traffic. Valid values are LEAST_OUTSTANDING_REQUESTS and RANDOM. LEAST_OUTSTANDING_REQUESTS routes requests to the specific instances that have more capacity to process them. RANDOM routes each request to a randomly chosen instance. + RoutingStrategy *string `json:"routingStrategy,omitempty" tf:"routing_strategy,omitempty"` +} + +type ShadowProductionVariantsRoutingConfigParameters struct { + + // Sets how the endpoint routes incoming traffic. Valid values are LEAST_OUTSTANDING_REQUESTS and RANDOM. LEAST_OUTSTANDING_REQUESTS routes requests to the specific instances that have more capacity to process them. RANDOM routes each request to a randomly chosen instance. + // +kubebuilder:validation:Optional + RoutingStrategy *string `json:"routingStrategy" tf:"routing_strategy,omitempty"` +} + +type ShadowProductionVariantsServerlessConfigInitParameters struct { + + // The maximum number of concurrent invocations your serverless endpoint can process. Valid values are between 1 and 200. + MaxConcurrency *float64 `json:"maxConcurrency,omitempty" tf:"max_concurrency,omitempty"` + + // The memory size of your serverless endpoint. Valid values are in 1 GB increments: 1024 MB, 2048 MB, 3072 MB, 4096 MB, 5120 MB, or 6144 MB. + MemorySizeInMb *float64 `json:"memorySizeInMb,omitempty" tf:"memory_size_in_mb,omitempty"` + + // The amount of provisioned concurrency to allocate for the serverless endpoint. Should be less than or equal to max_concurrency. Valid values are between 1 and 200. + ProvisionedConcurrency *float64 `json:"provisionedConcurrency,omitempty" tf:"provisioned_concurrency,omitempty"` +} + +type ShadowProductionVariantsServerlessConfigObservation struct { + + // The maximum number of concurrent invocations your serverless endpoint can process. Valid values are between 1 and 200. + MaxConcurrency *float64 `json:"maxConcurrency,omitempty" tf:"max_concurrency,omitempty"` + + // The memory size of your serverless endpoint. Valid values are in 1 GB increments: 1024 MB, 2048 MB, 3072 MB, 4096 MB, 5120 MB, or 6144 MB. + MemorySizeInMb *float64 `json:"memorySizeInMb,omitempty" tf:"memory_size_in_mb,omitempty"` + + // The amount of provisioned concurrency to allocate for the serverless endpoint. Should be less than or equal to max_concurrency. Valid values are between 1 and 200. + ProvisionedConcurrency *float64 `json:"provisionedConcurrency,omitempty" tf:"provisioned_concurrency,omitempty"` +} + +type ShadowProductionVariantsServerlessConfigParameters struct { + + // The maximum number of concurrent invocations your serverless endpoint can process. Valid values are between 1 and 200. + // +kubebuilder:validation:Optional + MaxConcurrency *float64 `json:"maxConcurrency" tf:"max_concurrency,omitempty"` + + // The memory size of your serverless endpoint. Valid values are in 1 GB increments: 1024 MB, 2048 MB, 3072 MB, 4096 MB, 5120 MB, or 6144 MB. + // +kubebuilder:validation:Optional + MemorySizeInMb *float64 `json:"memorySizeInMb" tf:"memory_size_in_mb,omitempty"` + + // The amount of provisioned concurrency to allocate for the serverless endpoint. Should be less than or equal to max_concurrency. Valid values are between 1 and 200. + // +kubebuilder:validation:Optional + ProvisionedConcurrency *float64 `json:"provisionedConcurrency,omitempty" tf:"provisioned_concurrency,omitempty"` +} + +// EndpointConfigurationSpec defines the desired state of EndpointConfiguration +type EndpointConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider EndpointConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider EndpointConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// EndpointConfigurationStatus defines the observed state of EndpointConfiguration. +type EndpointConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider EndpointConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// EndpointConfiguration is the Schema for the EndpointConfigurations API. Provides a SageMaker Endpoint Configuration resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type EndpointConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.productionVariants) || (has(self.initProvider) && has(self.initProvider.productionVariants))",message="spec.forProvider.productionVariants is a required parameter" + Spec EndpointConfigurationSpec `json:"spec"` + Status EndpointConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EndpointConfigurationList contains a list of EndpointConfigurations +type EndpointConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []EndpointConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + EndpointConfiguration_Kind = "EndpointConfiguration" + EndpointConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: EndpointConfiguration_Kind}.String() + EndpointConfiguration_KindAPIVersion = EndpointConfiguration_Kind + "." + CRDGroupVersion.String() + EndpointConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(EndpointConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&EndpointConfiguration{}, &EndpointConfigurationList{}) +} diff --git a/apis/sagemaker/v1beta2/zz_featuregroup_terraformed.go b/apis/sagemaker/v1beta2/zz_featuregroup_terraformed.go new file mode 100755 index 0000000000..b901701765 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_featuregroup_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FeatureGroup +func (mg *FeatureGroup) GetTerraformResourceType() string { + return "aws_sagemaker_feature_group" +} + +// GetConnectionDetailsMapping for this FeatureGroup +func (tr *FeatureGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FeatureGroup +func (tr *FeatureGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FeatureGroup +func (tr *FeatureGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FeatureGroup +func (tr *FeatureGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FeatureGroup +func (tr *FeatureGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FeatureGroup +func (tr *FeatureGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FeatureGroup +func (tr *FeatureGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FeatureGroup +func (tr *FeatureGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FeatureGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FeatureGroup) LateInitialize(attrs []byte) (bool, error) { + params := &FeatureGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FeatureGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sagemaker/v1beta2/zz_featuregroup_types.go b/apis/sagemaker/v1beta2/zz_featuregroup_types.go new file mode 100755 index 0000000000..eeb6957062 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_featuregroup_types.go @@ -0,0 +1,456 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DataCatalogConfigInitParameters struct { + + // The name of the Glue table catalog. + Catalog *string `json:"catalog,omitempty" tf:"catalog,omitempty"` + + // The name of the Glue table database. + Database *string `json:"database,omitempty" tf:"database,omitempty"` + + // The name of the Glue table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type DataCatalogConfigObservation struct { + + // The name of the Glue table catalog. + Catalog *string `json:"catalog,omitempty" tf:"catalog,omitempty"` + + // The name of the Glue table database. + Database *string `json:"database,omitempty" tf:"database,omitempty"` + + // The name of the Glue table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type DataCatalogConfigParameters struct { + + // The name of the Glue table catalog. + // +kubebuilder:validation:Optional + Catalog *string `json:"catalog,omitempty" tf:"catalog,omitempty"` + + // The name of the Glue table database. + // +kubebuilder:validation:Optional + Database *string `json:"database,omitempty" tf:"database,omitempty"` + + // The name of the Glue table. + // +kubebuilder:validation:Optional + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type FeatureDefinitionInitParameters struct { + + // The name of a feature. feature_name cannot be any of the following: is_deleted, write_time, api_invocation_time. + FeatureName *string `json:"featureName,omitempty" tf:"feature_name,omitempty"` + + // The value type of a feature. Valid values are Integral, Fractional, or String. + FeatureType *string `json:"featureType,omitempty" tf:"feature_type,omitempty"` +} + +type FeatureDefinitionObservation struct { + + // The name of a feature. feature_name cannot be any of the following: is_deleted, write_time, api_invocation_time. + FeatureName *string `json:"featureName,omitempty" tf:"feature_name,omitempty"` + + // The value type of a feature. Valid values are Integral, Fractional, or String. + FeatureType *string `json:"featureType,omitempty" tf:"feature_type,omitempty"` +} + +type FeatureDefinitionParameters struct { + + // The name of a feature. feature_name cannot be any of the following: is_deleted, write_time, api_invocation_time. + // +kubebuilder:validation:Optional + FeatureName *string `json:"featureName,omitempty" tf:"feature_name,omitempty"` + + // The value type of a feature. Valid values are Integral, Fractional, or String. + // +kubebuilder:validation:Optional + FeatureType *string `json:"featureType,omitempty" tf:"feature_type,omitempty"` +} + +type FeatureGroupInitParameters struct { + + // A free-form description of a Feature Group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the feature that stores the EventTime of a Record in a Feature Group. + EventTimeFeatureName *string `json:"eventTimeFeatureName,omitempty" tf:"event_time_feature_name,omitempty"` + + // A list of Feature names and types. See Feature Definition Below. + FeatureDefinition []FeatureDefinitionInitParameters `json:"featureDefinition,omitempty" tf:"feature_definition,omitempty"` + + // The Offline Feature Store Configuration. See Offline Store Config Below. + OfflineStoreConfig *OfflineStoreConfigInitParameters `json:"offlineStoreConfig,omitempty" tf:"offline_store_config,omitempty"` + + // The Online Feature Store Configuration. See Online Store Config Below. + OnlineStoreConfig *OnlineStoreConfigInitParameters `json:"onlineStoreConfig,omitempty" tf:"online_store_config,omitempty"` + + // The name of the Feature whose value uniquely identifies a Record defined in the Feature Store. Only the latest record per identifier value will be stored in the Online Store. + RecordIdentifierFeatureName *string `json:"recordIdentifierFeatureName,omitempty" tf:"record_identifier_feature_name,omitempty"` + + // The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the Offline Store if an offline_store_config is provided. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type FeatureGroupObservation struct { + + // The Amazon Resource Name (ARN) assigned by AWS to this feature_group. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A free-form description of a Feature Group. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the feature that stores the EventTime of a Record in a Feature Group. + EventTimeFeatureName *string `json:"eventTimeFeatureName,omitempty" tf:"event_time_feature_name,omitempty"` + + // A list of Feature names and types. See Feature Definition Below. + FeatureDefinition []FeatureDefinitionObservation `json:"featureDefinition,omitempty" tf:"feature_definition,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Offline Feature Store Configuration. See Offline Store Config Below. + OfflineStoreConfig *OfflineStoreConfigObservation `json:"offlineStoreConfig,omitempty" tf:"offline_store_config,omitempty"` + + // The Online Feature Store Configuration. See Online Store Config Below. + OnlineStoreConfig *OnlineStoreConfigObservation `json:"onlineStoreConfig,omitempty" tf:"online_store_config,omitempty"` + + // The name of the Feature whose value uniquely identifies a Record defined in the Feature Store. Only the latest record per identifier value will be stored in the Online Store. + RecordIdentifierFeatureName *string `json:"recordIdentifierFeatureName,omitempty" tf:"record_identifier_feature_name,omitempty"` + + // The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the Offline Store if an offline_store_config is provided. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type FeatureGroupParameters struct { + + // A free-form description of a Feature Group. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the feature that stores the EventTime of a Record in a Feature Group. + // +kubebuilder:validation:Optional + EventTimeFeatureName *string `json:"eventTimeFeatureName,omitempty" tf:"event_time_feature_name,omitempty"` + + // A list of Feature names and types. See Feature Definition Below. + // +kubebuilder:validation:Optional + FeatureDefinition []FeatureDefinitionParameters `json:"featureDefinition,omitempty" tf:"feature_definition,omitempty"` + + // The Offline Feature Store Configuration. See Offline Store Config Below. + // +kubebuilder:validation:Optional + OfflineStoreConfig *OfflineStoreConfigParameters `json:"offlineStoreConfig,omitempty" tf:"offline_store_config,omitempty"` + + // The Online Feature Store Configuration. See Online Store Config Below. + // +kubebuilder:validation:Optional + OnlineStoreConfig *OnlineStoreConfigParameters `json:"onlineStoreConfig,omitempty" tf:"online_store_config,omitempty"` + + // The name of the Feature whose value uniquely identifies a Record defined in the Feature Store. Only the latest record per identifier value will be stored in the Online Store. + // +kubebuilder:validation:Optional + RecordIdentifierFeatureName *string `json:"recordIdentifierFeatureName,omitempty" tf:"record_identifier_feature_name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the Offline Store if an offline_store_config is provided. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type OfflineStoreConfigInitParameters struct { + + // The meta data of the Glue table that is autogenerated when an OfflineStore is created. See Data Catalog Config Below. + DataCatalogConfig *DataCatalogConfigInitParameters `json:"dataCatalogConfig,omitempty" tf:"data_catalog_config,omitempty"` + + // Set to true to turn Online Store On. + DisableGlueTableCreation *bool `json:"disableGlueTableCreation,omitempty" tf:"disable_glue_table_creation,omitempty"` + + // The Amazon Simple Storage (Amazon S3) location of OfflineStore. See S3 Storage Config Below. + S3StorageConfig *S3StorageConfigInitParameters `json:"s3StorageConfig,omitempty" tf:"s3_storage_config,omitempty"` + + // Format for the offline store table. Supported formats are Glue (Default) and Apache Iceberg (https://iceberg.apache.org/). + TableFormat *string `json:"tableFormat,omitempty" tf:"table_format,omitempty"` +} + +type OfflineStoreConfigObservation struct { + + // The meta data of the Glue table that is autogenerated when an OfflineStore is created. See Data Catalog Config Below. + DataCatalogConfig *DataCatalogConfigObservation `json:"dataCatalogConfig,omitempty" tf:"data_catalog_config,omitempty"` + + // Set to true to turn Online Store On. + DisableGlueTableCreation *bool `json:"disableGlueTableCreation,omitempty" tf:"disable_glue_table_creation,omitempty"` + + // The Amazon Simple Storage (Amazon S3) location of OfflineStore. See S3 Storage Config Below. + S3StorageConfig *S3StorageConfigObservation `json:"s3StorageConfig,omitempty" tf:"s3_storage_config,omitempty"` + + // Format for the offline store table. Supported formats are Glue (Default) and Apache Iceberg (https://iceberg.apache.org/). + TableFormat *string `json:"tableFormat,omitempty" tf:"table_format,omitempty"` +} + +type OfflineStoreConfigParameters struct { + + // The meta data of the Glue table that is autogenerated when an OfflineStore is created. See Data Catalog Config Below. + // +kubebuilder:validation:Optional + DataCatalogConfig *DataCatalogConfigParameters `json:"dataCatalogConfig,omitempty" tf:"data_catalog_config,omitempty"` + + // Set to true to turn Online Store On. + // +kubebuilder:validation:Optional + DisableGlueTableCreation *bool `json:"disableGlueTableCreation,omitempty" tf:"disable_glue_table_creation,omitempty"` + + // The Amazon Simple Storage (Amazon S3) location of OfflineStore. See S3 Storage Config Below. + // +kubebuilder:validation:Optional + S3StorageConfig *S3StorageConfigParameters `json:"s3StorageConfig" tf:"s3_storage_config,omitempty"` + + // Format for the offline store table. Supported formats are Glue (Default) and Apache Iceberg (https://iceberg.apache.org/). + // +kubebuilder:validation:Optional + TableFormat *string `json:"tableFormat,omitempty" tf:"table_format,omitempty"` +} + +type OnlineStoreConfigInitParameters struct { + + // Set to true to disable the automatic creation of an AWS Glue table when configuring an OfflineStore. + EnableOnlineStore *bool `json:"enableOnlineStore,omitempty" tf:"enable_online_store,omitempty"` + + // Security config for at-rest encryption of your OnlineStore. See Security Config Below. + SecurityConfig *SecurityConfigInitParameters `json:"securityConfig,omitempty" tf:"security_config,omitempty"` + + // Option for different tiers of low latency storage for real-time data retrieval. Valid values are Standard, or InMemory. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // Time to live duration, where the record is hard deleted after the expiration time is reached; ExpiresAt = EventTime + TtlDuration.. See TTl Duration Below. + TTLDuration *TTLDurationInitParameters `json:"ttlDuration,omitempty" tf:"ttl_duration,omitempty"` +} + +type OnlineStoreConfigObservation struct { + + // Set to true to disable the automatic creation of an AWS Glue table when configuring an OfflineStore. + EnableOnlineStore *bool `json:"enableOnlineStore,omitempty" tf:"enable_online_store,omitempty"` + + // Security config for at-rest encryption of your OnlineStore. See Security Config Below. + SecurityConfig *SecurityConfigObservation `json:"securityConfig,omitempty" tf:"security_config,omitempty"` + + // Option for different tiers of low latency storage for real-time data retrieval. Valid values are Standard, or InMemory. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // Time to live duration, where the record is hard deleted after the expiration time is reached; ExpiresAt = EventTime + TtlDuration.. See TTl Duration Below. + TTLDuration *TTLDurationObservation `json:"ttlDuration,omitempty" tf:"ttl_duration,omitempty"` +} + +type OnlineStoreConfigParameters struct { + + // Set to true to disable the automatic creation of an AWS Glue table when configuring an OfflineStore. + // +kubebuilder:validation:Optional + EnableOnlineStore *bool `json:"enableOnlineStore,omitempty" tf:"enable_online_store,omitempty"` + + // Security config for at-rest encryption of your OnlineStore. See Security Config Below. + // +kubebuilder:validation:Optional + SecurityConfig *SecurityConfigParameters `json:"securityConfig,omitempty" tf:"security_config,omitempty"` + + // Option for different tiers of low latency storage for real-time data retrieval. Valid values are Standard, or InMemory. + // +kubebuilder:validation:Optional + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // Time to live duration, where the record is hard deleted after the expiration time is reached; ExpiresAt = EventTime + TtlDuration.. See TTl Duration Below. + // +kubebuilder:validation:Optional + TTLDuration *TTLDurationParameters `json:"ttlDuration,omitempty" tf:"ttl_duration,omitempty"` +} + +type S3StorageConfigInitParameters struct { + + // The AWS Key Management Service (KMS) key ID of the key used to encrypt any objects written into the OfflineStore S3 location. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The S3 path where offline records are written. + ResolvedOutputS3URI *string `json:"resolvedOutputS3Uri,omitempty" tf:"resolved_output_s3_uri,omitempty"` + + // The S3 URI, or location in Amazon S3, of OfflineStore. + S3URI *string `json:"s3Uri,omitempty" tf:"s3_uri,omitempty"` +} + +type S3StorageConfigObservation struct { + + // The AWS Key Management Service (KMS) key ID of the key used to encrypt any objects written into the OfflineStore S3 location. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The S3 path where offline records are written. + ResolvedOutputS3URI *string `json:"resolvedOutputS3Uri,omitempty" tf:"resolved_output_s3_uri,omitempty"` + + // The S3 URI, or location in Amazon S3, of OfflineStore. + S3URI *string `json:"s3Uri,omitempty" tf:"s3_uri,omitempty"` +} + +type S3StorageConfigParameters struct { + + // The AWS Key Management Service (KMS) key ID of the key used to encrypt any objects written into the OfflineStore S3 location. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The S3 path where offline records are written. + // +kubebuilder:validation:Optional + ResolvedOutputS3URI *string `json:"resolvedOutputS3Uri,omitempty" tf:"resolved_output_s3_uri,omitempty"` + + // The S3 URI, or location in Amazon S3, of OfflineStore. + // +kubebuilder:validation:Optional + S3URI *string `json:"s3Uri" tf:"s3_uri,omitempty"` +} + +type SecurityConfigInitParameters struct { + + // The AWS Key Management Service (KMS) key ID of the key used to encrypt any objects written into the OfflineStore S3 location. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type SecurityConfigObservation struct { + + // The AWS Key Management Service (KMS) key ID of the key used to encrypt any objects written into the OfflineStore S3 location. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type SecurityConfigParameters struct { + + // The AWS Key Management Service (KMS) key ID of the key used to encrypt any objects written into the OfflineStore S3 location. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` +} + +type TTLDurationInitParameters struct { + + // TtlDuration time unit. Valid values are Seconds, Minutes, Hours, Days, or Weeks. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // TtlDuration time value. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type TTLDurationObservation struct { + + // TtlDuration time unit. Valid values are Seconds, Minutes, Hours, Days, or Weeks. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // TtlDuration time value. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type TTLDurationParameters struct { + + // TtlDuration time unit. Valid values are Seconds, Minutes, Hours, Days, or Weeks. + // +kubebuilder:validation:Optional + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // TtlDuration time value. + // +kubebuilder:validation:Optional + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +// FeatureGroupSpec defines the desired state of FeatureGroup +type FeatureGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FeatureGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FeatureGroupInitParameters `json:"initProvider,omitempty"` +} + +// FeatureGroupStatus defines the observed state of FeatureGroup. +type FeatureGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FeatureGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FeatureGroup is the Schema for the FeatureGroups API. Provides a SageMaker Feature Group resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type FeatureGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.eventTimeFeatureName) || (has(self.initProvider) && has(self.initProvider.eventTimeFeatureName))",message="spec.forProvider.eventTimeFeatureName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.featureDefinition) || (has(self.initProvider) && has(self.initProvider.featureDefinition))",message="spec.forProvider.featureDefinition is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.recordIdentifierFeatureName) || (has(self.initProvider) && has(self.initProvider.recordIdentifierFeatureName))",message="spec.forProvider.recordIdentifierFeatureName is a required parameter" + Spec FeatureGroupSpec `json:"spec"` + Status FeatureGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FeatureGroupList contains a list of FeatureGroups +type FeatureGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FeatureGroup `json:"items"` +} + +// Repository type metadata. +var ( + FeatureGroup_Kind = "FeatureGroup" + FeatureGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FeatureGroup_Kind}.String() + FeatureGroup_KindAPIVersion = FeatureGroup_Kind + "." + CRDGroupVersion.String() + FeatureGroup_GroupVersionKind = CRDGroupVersion.WithKind(FeatureGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&FeatureGroup{}, &FeatureGroupList{}) +} diff --git a/apis/sagemaker/v1beta2/zz_generated.conversion_hubs.go b/apis/sagemaker/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..298e872297 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,52 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *App) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *AppImageConfig) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *CodeRepository) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Device) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DeviceFleet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Domain) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Endpoint) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *EndpointConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FeatureGroup) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Model) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *NotebookInstance) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Space) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *UserProfile) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Workforce) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Workteam) Hub() {} diff --git a/apis/sagemaker/v1beta2/zz_generated.deepcopy.go b/apis/sagemaker/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..24155b9026 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,20246 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlarmsInitParameters) DeepCopyInto(out *AlarmsInitParameters) { + *out = *in + if in.AlarmName != nil { + in, out := &in.AlarmName, &out.AlarmName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlarmsInitParameters. +func (in *AlarmsInitParameters) DeepCopy() *AlarmsInitParameters { + if in == nil { + return nil + } + out := new(AlarmsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlarmsObservation) DeepCopyInto(out *AlarmsObservation) { + *out = *in + if in.AlarmName != nil { + in, out := &in.AlarmName, &out.AlarmName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlarmsObservation. +func (in *AlarmsObservation) DeepCopy() *AlarmsObservation { + if in == nil { + return nil + } + out := new(AlarmsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlarmsParameters) DeepCopyInto(out *AlarmsParameters) { + *out = *in + if in.AlarmName != nil { + in, out := &in.AlarmName, &out.AlarmName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlarmsParameters. +func (in *AlarmsParameters) DeepCopy() *AlarmsParameters { + if in == nil { + return nil + } + out := new(AlarmsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *App) DeepCopyInto(out *App) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new App. +func (in *App) DeepCopy() *App { + if in == nil { + return nil + } + out := new(App) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *App) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppImageConfig) DeepCopyInto(out *AppImageConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppImageConfig. +func (in *AppImageConfig) DeepCopy() *AppImageConfig { + if in == nil { + return nil + } + out := new(AppImageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppImageConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppImageConfigInitParameters) DeepCopyInto(out *AppImageConfigInitParameters) { + *out = *in + if in.CodeEditorAppImageConfig != nil { + in, out := &in.CodeEditorAppImageConfig, &out.CodeEditorAppImageConfig + *out = new(CodeEditorAppImageConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.JupyterLabImageConfig != nil { + in, out := &in.JupyterLabImageConfig, &out.JupyterLabImageConfig + *out = new(JupyterLabImageConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KernelGatewayImageConfig != nil { + in, out := &in.KernelGatewayImageConfig, &out.KernelGatewayImageConfig + *out = new(KernelGatewayImageConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppImageConfigInitParameters. +func (in *AppImageConfigInitParameters) DeepCopy() *AppImageConfigInitParameters { + if in == nil { + return nil + } + out := new(AppImageConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppImageConfigList) DeepCopyInto(out *AppImageConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AppImageConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppImageConfigList. +func (in *AppImageConfigList) DeepCopy() *AppImageConfigList { + if in == nil { + return nil + } + out := new(AppImageConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppImageConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppImageConfigObservation) DeepCopyInto(out *AppImageConfigObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CodeEditorAppImageConfig != nil { + in, out := &in.CodeEditorAppImageConfig, &out.CodeEditorAppImageConfig + *out = new(CodeEditorAppImageConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.JupyterLabImageConfig != nil { + in, out := &in.JupyterLabImageConfig, &out.JupyterLabImageConfig + *out = new(JupyterLabImageConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.KernelGatewayImageConfig != nil { + in, out := &in.KernelGatewayImageConfig, &out.KernelGatewayImageConfig + *out = new(KernelGatewayImageConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppImageConfigObservation. +func (in *AppImageConfigObservation) DeepCopy() *AppImageConfigObservation { + if in == nil { + return nil + } + out := new(AppImageConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppImageConfigParameters) DeepCopyInto(out *AppImageConfigParameters) { + *out = *in + if in.CodeEditorAppImageConfig != nil { + in, out := &in.CodeEditorAppImageConfig, &out.CodeEditorAppImageConfig + *out = new(CodeEditorAppImageConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.JupyterLabImageConfig != nil { + in, out := &in.JupyterLabImageConfig, &out.JupyterLabImageConfig + *out = new(JupyterLabImageConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.KernelGatewayImageConfig != nil { + in, out := &in.KernelGatewayImageConfig, &out.KernelGatewayImageConfig + *out = new(KernelGatewayImageConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppImageConfigParameters. +func (in *AppImageConfigParameters) DeepCopy() *AppImageConfigParameters { + if in == nil { + return nil + } + out := new(AppImageConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppImageConfigSpec) DeepCopyInto(out *AppImageConfigSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppImageConfigSpec. +func (in *AppImageConfigSpec) DeepCopy() *AppImageConfigSpec { + if in == nil { + return nil + } + out := new(AppImageConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppImageConfigStatus) DeepCopyInto(out *AppImageConfigStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppImageConfigStatus. +func (in *AppImageConfigStatus) DeepCopy() *AppImageConfigStatus { + if in == nil { + return nil + } + out := new(AppImageConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppInitParameters) DeepCopyInto(out *AppInitParameters) { + *out = *in + if in.AppName != nil { + in, out := &in.AppName, &out.AppName + *out = new(string) + **out = **in + } + if in.AppType != nil { + in, out := &in.AppType, &out.AppType + *out = new(string) + **out = **in + } + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.DomainIDRef != nil { + in, out := &in.DomainIDRef, &out.DomainIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DomainIDSelector != nil { + in, out := &in.DomainIDSelector, &out.DomainIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceSpec != nil { + in, out := &in.ResourceSpec, &out.ResourceSpec + *out = new(ResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SpaceName != nil { + in, out := &in.SpaceName, &out.SpaceName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserProfileName != nil { + in, out := &in.UserProfileName, &out.UserProfileName + *out = new(string) + **out = **in + } + if in.UserProfileNameRef != nil { + in, out := &in.UserProfileNameRef, &out.UserProfileNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserProfileNameSelector != nil { + in, out := &in.UserProfileNameSelector, &out.UserProfileNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppInitParameters. +func (in *AppInitParameters) DeepCopy() *AppInitParameters { + if in == nil { + return nil + } + out := new(AppInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppList) DeepCopyInto(out *AppList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]App, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppList. +func (in *AppList) DeepCopy() *AppList { + if in == nil { + return nil + } + out := new(AppList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppObservation) DeepCopyInto(out *AppObservation) { + *out = *in + if in.AppName != nil { + in, out := &in.AppName, &out.AppName + *out = new(string) + **out = **in + } + if in.AppType != nil { + in, out := &in.AppType, &out.AppType + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ResourceSpec != nil { + in, out := &in.ResourceSpec, &out.ResourceSpec + *out = new(ResourceSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.SpaceName != nil { + in, out := &in.SpaceName, &out.SpaceName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserProfileName != nil { + in, out := &in.UserProfileName, &out.UserProfileName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppObservation. +func (in *AppObservation) DeepCopy() *AppObservation { + if in == nil { + return nil + } + out := new(AppObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppParameters) DeepCopyInto(out *AppParameters) { + *out = *in + if in.AppName != nil { + in, out := &in.AppName, &out.AppName + *out = new(string) + **out = **in + } + if in.AppType != nil { + in, out := &in.AppType, &out.AppType + *out = new(string) + **out = **in + } + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.DomainIDRef != nil { + in, out := &in.DomainIDRef, &out.DomainIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DomainIDSelector != nil { + in, out := &in.DomainIDSelector, &out.DomainIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ResourceSpec != nil { + in, out := &in.ResourceSpec, &out.ResourceSpec + *out = new(ResourceSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.SpaceName != nil { + in, out := &in.SpaceName, &out.SpaceName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserProfileName != nil { + in, out := &in.UserProfileName, &out.UserProfileName + *out = new(string) + **out = **in + } + if in.UserProfileNameRef != nil { + in, out := &in.UserProfileNameRef, &out.UserProfileNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserProfileNameSelector != nil { + in, out := &in.UserProfileNameSelector, &out.UserProfileNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppParameters. +func (in *AppParameters) DeepCopy() *AppParameters { + if in == nil { + return nil + } + out := new(AppParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppSpec) DeepCopyInto(out *AppSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppSpec. +func (in *AppSpec) DeepCopy() *AppSpec { + if in == nil { + return nil + } + out := new(AppSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppStatus) DeepCopyInto(out *AppStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppStatus. +func (in *AppStatus) DeepCopy() *AppStatus { + if in == nil { + return nil + } + out := new(AppStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsyncInferenceConfigInitParameters) DeepCopyInto(out *AsyncInferenceConfigInitParameters) { + *out = *in + if in.ClientConfig != nil { + in, out := &in.ClientConfig, &out.ClientConfig + *out = new(ClientConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputConfig != nil { + in, out := &in.OutputConfig, &out.OutputConfig + *out = new(AsyncInferenceConfigOutputConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsyncInferenceConfigInitParameters. +func (in *AsyncInferenceConfigInitParameters) DeepCopy() *AsyncInferenceConfigInitParameters { + if in == nil { + return nil + } + out := new(AsyncInferenceConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsyncInferenceConfigObservation) DeepCopyInto(out *AsyncInferenceConfigObservation) { + *out = *in + if in.ClientConfig != nil { + in, out := &in.ClientConfig, &out.ClientConfig + *out = new(ClientConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.OutputConfig != nil { + in, out := &in.OutputConfig, &out.OutputConfig + *out = new(AsyncInferenceConfigOutputConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsyncInferenceConfigObservation. +func (in *AsyncInferenceConfigObservation) DeepCopy() *AsyncInferenceConfigObservation { + if in == nil { + return nil + } + out := new(AsyncInferenceConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsyncInferenceConfigOutputConfigInitParameters) DeepCopyInto(out *AsyncInferenceConfigOutputConfigInitParameters) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.NotificationConfig != nil { + in, out := &in.NotificationConfig, &out.NotificationConfig + *out = new(NotificationConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3FailurePath != nil { + in, out := &in.S3FailurePath, &out.S3FailurePath + *out = new(string) + **out = **in + } + if in.S3OutputPath != nil { + in, out := &in.S3OutputPath, &out.S3OutputPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsyncInferenceConfigOutputConfigInitParameters. +func (in *AsyncInferenceConfigOutputConfigInitParameters) DeepCopy() *AsyncInferenceConfigOutputConfigInitParameters { + if in == nil { + return nil + } + out := new(AsyncInferenceConfigOutputConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsyncInferenceConfigOutputConfigObservation) DeepCopyInto(out *AsyncInferenceConfigOutputConfigObservation) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.NotificationConfig != nil { + in, out := &in.NotificationConfig, &out.NotificationConfig + *out = new(NotificationConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.S3FailurePath != nil { + in, out := &in.S3FailurePath, &out.S3FailurePath + *out = new(string) + **out = **in + } + if in.S3OutputPath != nil { + in, out := &in.S3OutputPath, &out.S3OutputPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsyncInferenceConfigOutputConfigObservation. +func (in *AsyncInferenceConfigOutputConfigObservation) DeepCopy() *AsyncInferenceConfigOutputConfigObservation { + if in == nil { + return nil + } + out := new(AsyncInferenceConfigOutputConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsyncInferenceConfigOutputConfigParameters) DeepCopyInto(out *AsyncInferenceConfigOutputConfigParameters) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.NotificationConfig != nil { + in, out := &in.NotificationConfig, &out.NotificationConfig + *out = new(NotificationConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.S3FailurePath != nil { + in, out := &in.S3FailurePath, &out.S3FailurePath + *out = new(string) + **out = **in + } + if in.S3OutputPath != nil { + in, out := &in.S3OutputPath, &out.S3OutputPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsyncInferenceConfigOutputConfigParameters. +func (in *AsyncInferenceConfigOutputConfigParameters) DeepCopy() *AsyncInferenceConfigOutputConfigParameters { + if in == nil { + return nil + } + out := new(AsyncInferenceConfigOutputConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsyncInferenceConfigParameters) DeepCopyInto(out *AsyncInferenceConfigParameters) { + *out = *in + if in.ClientConfig != nil { + in, out := &in.ClientConfig, &out.ClientConfig + *out = new(ClientConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputConfig != nil { + in, out := &in.OutputConfig, &out.OutputConfig + *out = new(AsyncInferenceConfigOutputConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsyncInferenceConfigParameters. +func (in *AsyncInferenceConfigParameters) DeepCopy() *AsyncInferenceConfigParameters { + if in == nil { + return nil + } + out := new(AsyncInferenceConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoRollbackConfigurationInitParameters) DeepCopyInto(out *AutoRollbackConfigurationInitParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]AlarmsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoRollbackConfigurationInitParameters. +func (in *AutoRollbackConfigurationInitParameters) DeepCopy() *AutoRollbackConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AutoRollbackConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoRollbackConfigurationObservation) DeepCopyInto(out *AutoRollbackConfigurationObservation) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]AlarmsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoRollbackConfigurationObservation. +func (in *AutoRollbackConfigurationObservation) DeepCopy() *AutoRollbackConfigurationObservation { + if in == nil { + return nil + } + out := new(AutoRollbackConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoRollbackConfigurationParameters) DeepCopyInto(out *AutoRollbackConfigurationParameters) { + *out = *in + if in.Alarms != nil { + in, out := &in.Alarms, &out.Alarms + *out = make([]AlarmsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoRollbackConfigurationParameters. +func (in *AutoRollbackConfigurationParameters) DeepCopy() *AutoRollbackConfigurationParameters { + if in == nil { + return nil + } + out := new(AutoRollbackConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlueGreenUpdatePolicyInitParameters) DeepCopyInto(out *BlueGreenUpdatePolicyInitParameters) { + *out = *in + if in.MaximumExecutionTimeoutInSeconds != nil { + in, out := &in.MaximumExecutionTimeoutInSeconds, &out.MaximumExecutionTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.TerminationWaitInSeconds != nil { + in, out := &in.TerminationWaitInSeconds, &out.TerminationWaitInSeconds + *out = new(float64) + **out = **in + } + if in.TrafficRoutingConfiguration != nil { + in, out := &in.TrafficRoutingConfiguration, &out.TrafficRoutingConfiguration + *out = new(TrafficRoutingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlueGreenUpdatePolicyInitParameters. +func (in *BlueGreenUpdatePolicyInitParameters) DeepCopy() *BlueGreenUpdatePolicyInitParameters { + if in == nil { + return nil + } + out := new(BlueGreenUpdatePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlueGreenUpdatePolicyObservation) DeepCopyInto(out *BlueGreenUpdatePolicyObservation) { + *out = *in + if in.MaximumExecutionTimeoutInSeconds != nil { + in, out := &in.MaximumExecutionTimeoutInSeconds, &out.MaximumExecutionTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.TerminationWaitInSeconds != nil { + in, out := &in.TerminationWaitInSeconds, &out.TerminationWaitInSeconds + *out = new(float64) + **out = **in + } + if in.TrafficRoutingConfiguration != nil { + in, out := &in.TrafficRoutingConfiguration, &out.TrafficRoutingConfiguration + *out = new(TrafficRoutingConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlueGreenUpdatePolicyObservation. +func (in *BlueGreenUpdatePolicyObservation) DeepCopy() *BlueGreenUpdatePolicyObservation { + if in == nil { + return nil + } + out := new(BlueGreenUpdatePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlueGreenUpdatePolicyParameters) DeepCopyInto(out *BlueGreenUpdatePolicyParameters) { + *out = *in + if in.MaximumExecutionTimeoutInSeconds != nil { + in, out := &in.MaximumExecutionTimeoutInSeconds, &out.MaximumExecutionTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.TerminationWaitInSeconds != nil { + in, out := &in.TerminationWaitInSeconds, &out.TerminationWaitInSeconds + *out = new(float64) + **out = **in + } + if in.TrafficRoutingConfiguration != nil { + in, out := &in.TrafficRoutingConfiguration, &out.TrafficRoutingConfiguration + *out = new(TrafficRoutingConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlueGreenUpdatePolicyParameters. +func (in *BlueGreenUpdatePolicyParameters) DeepCopy() *BlueGreenUpdatePolicyParameters { + if in == nil { + return nil + } + out := new(BlueGreenUpdatePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanarySizeInitParameters) DeepCopyInto(out *CanarySizeInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanarySizeInitParameters. +func (in *CanarySizeInitParameters) DeepCopy() *CanarySizeInitParameters { + if in == nil { + return nil + } + out := new(CanarySizeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanarySizeObservation) DeepCopyInto(out *CanarySizeObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanarySizeObservation. +func (in *CanarySizeObservation) DeepCopy() *CanarySizeObservation { + if in == nil { + return nil + } + out := new(CanarySizeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanarySizeParameters) DeepCopyInto(out *CanarySizeParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanarySizeParameters. +func (in *CanarySizeParameters) DeepCopy() *CanarySizeParameters { + if in == nil { + return nil + } + out := new(CanarySizeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsDirectDeploySettingsInitParameters) DeepCopyInto(out *CanvasAppSettingsDirectDeploySettingsInitParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsDirectDeploySettingsInitParameters. +func (in *CanvasAppSettingsDirectDeploySettingsInitParameters) DeepCopy() *CanvasAppSettingsDirectDeploySettingsInitParameters { + if in == nil { + return nil + } + out := new(CanvasAppSettingsDirectDeploySettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsDirectDeploySettingsObservation) DeepCopyInto(out *CanvasAppSettingsDirectDeploySettingsObservation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsDirectDeploySettingsObservation. +func (in *CanvasAppSettingsDirectDeploySettingsObservation) DeepCopy() *CanvasAppSettingsDirectDeploySettingsObservation { + if in == nil { + return nil + } + out := new(CanvasAppSettingsDirectDeploySettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsDirectDeploySettingsParameters) DeepCopyInto(out *CanvasAppSettingsDirectDeploySettingsParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsDirectDeploySettingsParameters. +func (in *CanvasAppSettingsDirectDeploySettingsParameters) DeepCopy() *CanvasAppSettingsDirectDeploySettingsParameters { + if in == nil { + return nil + } + out := new(CanvasAppSettingsDirectDeploySettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsIdentityProviderOauthSettingsInitParameters) DeepCopyInto(out *CanvasAppSettingsIdentityProviderOauthSettingsInitParameters) { + *out = *in + if in.DataSourceName != nil { + in, out := &in.DataSourceName, &out.DataSourceName + *out = new(string) + **out = **in + } + if in.SecretArn != nil { + in, out := &in.SecretArn, &out.SecretArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsIdentityProviderOauthSettingsInitParameters. +func (in *CanvasAppSettingsIdentityProviderOauthSettingsInitParameters) DeepCopy() *CanvasAppSettingsIdentityProviderOauthSettingsInitParameters { + if in == nil { + return nil + } + out := new(CanvasAppSettingsIdentityProviderOauthSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsIdentityProviderOauthSettingsObservation) DeepCopyInto(out *CanvasAppSettingsIdentityProviderOauthSettingsObservation) { + *out = *in + if in.DataSourceName != nil { + in, out := &in.DataSourceName, &out.DataSourceName + *out = new(string) + **out = **in + } + if in.SecretArn != nil { + in, out := &in.SecretArn, &out.SecretArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsIdentityProviderOauthSettingsObservation. +func (in *CanvasAppSettingsIdentityProviderOauthSettingsObservation) DeepCopy() *CanvasAppSettingsIdentityProviderOauthSettingsObservation { + if in == nil { + return nil + } + out := new(CanvasAppSettingsIdentityProviderOauthSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsIdentityProviderOauthSettingsParameters) DeepCopyInto(out *CanvasAppSettingsIdentityProviderOauthSettingsParameters) { + *out = *in + if in.DataSourceName != nil { + in, out := &in.DataSourceName, &out.DataSourceName + *out = new(string) + **out = **in + } + if in.SecretArn != nil { + in, out := &in.SecretArn, &out.SecretArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsIdentityProviderOauthSettingsParameters. +func (in *CanvasAppSettingsIdentityProviderOauthSettingsParameters) DeepCopy() *CanvasAppSettingsIdentityProviderOauthSettingsParameters { + if in == nil { + return nil + } + out := new(CanvasAppSettingsIdentityProviderOauthSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsInitParameters) DeepCopyInto(out *CanvasAppSettingsInitParameters) { + *out = *in + if in.DirectDeploySettings != nil { + in, out := &in.DirectDeploySettings, &out.DirectDeploySettings + *out = new(DirectDeploySettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IdentityProviderOauthSettings != nil { + in, out := &in.IdentityProviderOauthSettings, &out.IdentityProviderOauthSettings + *out = make([]IdentityProviderOauthSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KendraSettings != nil { + in, out := &in.KendraSettings, &out.KendraSettings + *out = new(KendraSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ModelRegisterSettings != nil { + in, out := &in.ModelRegisterSettings, &out.ModelRegisterSettings + *out = new(ModelRegisterSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TimeSeriesForecastingSettings != nil { + in, out := &in.TimeSeriesForecastingSettings, &out.TimeSeriesForecastingSettings + *out = new(TimeSeriesForecastingSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceSettings != nil { + in, out := &in.WorkspaceSettings, &out.WorkspaceSettings + *out = new(WorkspaceSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsInitParameters. +func (in *CanvasAppSettingsInitParameters) DeepCopy() *CanvasAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(CanvasAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsKendraSettingsInitParameters) DeepCopyInto(out *CanvasAppSettingsKendraSettingsInitParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsKendraSettingsInitParameters. +func (in *CanvasAppSettingsKendraSettingsInitParameters) DeepCopy() *CanvasAppSettingsKendraSettingsInitParameters { + if in == nil { + return nil + } + out := new(CanvasAppSettingsKendraSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsKendraSettingsObservation) DeepCopyInto(out *CanvasAppSettingsKendraSettingsObservation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsKendraSettingsObservation. +func (in *CanvasAppSettingsKendraSettingsObservation) DeepCopy() *CanvasAppSettingsKendraSettingsObservation { + if in == nil { + return nil + } + out := new(CanvasAppSettingsKendraSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsKendraSettingsParameters) DeepCopyInto(out *CanvasAppSettingsKendraSettingsParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsKendraSettingsParameters. +func (in *CanvasAppSettingsKendraSettingsParameters) DeepCopy() *CanvasAppSettingsKendraSettingsParameters { + if in == nil { + return nil + } + out := new(CanvasAppSettingsKendraSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsModelRegisterSettingsInitParameters) DeepCopyInto(out *CanvasAppSettingsModelRegisterSettingsInitParameters) { + *out = *in + if in.CrossAccountModelRegisterRoleArn != nil { + in, out := &in.CrossAccountModelRegisterRoleArn, &out.CrossAccountModelRegisterRoleArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsModelRegisterSettingsInitParameters. +func (in *CanvasAppSettingsModelRegisterSettingsInitParameters) DeepCopy() *CanvasAppSettingsModelRegisterSettingsInitParameters { + if in == nil { + return nil + } + out := new(CanvasAppSettingsModelRegisterSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsModelRegisterSettingsObservation) DeepCopyInto(out *CanvasAppSettingsModelRegisterSettingsObservation) { + *out = *in + if in.CrossAccountModelRegisterRoleArn != nil { + in, out := &in.CrossAccountModelRegisterRoleArn, &out.CrossAccountModelRegisterRoleArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsModelRegisterSettingsObservation. +func (in *CanvasAppSettingsModelRegisterSettingsObservation) DeepCopy() *CanvasAppSettingsModelRegisterSettingsObservation { + if in == nil { + return nil + } + out := new(CanvasAppSettingsModelRegisterSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsModelRegisterSettingsParameters) DeepCopyInto(out *CanvasAppSettingsModelRegisterSettingsParameters) { + *out = *in + if in.CrossAccountModelRegisterRoleArn != nil { + in, out := &in.CrossAccountModelRegisterRoleArn, &out.CrossAccountModelRegisterRoleArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsModelRegisterSettingsParameters. +func (in *CanvasAppSettingsModelRegisterSettingsParameters) DeepCopy() *CanvasAppSettingsModelRegisterSettingsParameters { + if in == nil { + return nil + } + out := new(CanvasAppSettingsModelRegisterSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsObservation) DeepCopyInto(out *CanvasAppSettingsObservation) { + *out = *in + if in.DirectDeploySettings != nil { + in, out := &in.DirectDeploySettings, &out.DirectDeploySettings + *out = new(DirectDeploySettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.IdentityProviderOauthSettings != nil { + in, out := &in.IdentityProviderOauthSettings, &out.IdentityProviderOauthSettings + *out = make([]IdentityProviderOauthSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KendraSettings != nil { + in, out := &in.KendraSettings, &out.KendraSettings + *out = new(KendraSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ModelRegisterSettings != nil { + in, out := &in.ModelRegisterSettings, &out.ModelRegisterSettings + *out = new(ModelRegisterSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.TimeSeriesForecastingSettings != nil { + in, out := &in.TimeSeriesForecastingSettings, &out.TimeSeriesForecastingSettings + *out = new(TimeSeriesForecastingSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceSettings != nil { + in, out := &in.WorkspaceSettings, &out.WorkspaceSettings + *out = new(WorkspaceSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsObservation. +func (in *CanvasAppSettingsObservation) DeepCopy() *CanvasAppSettingsObservation { + if in == nil { + return nil + } + out := new(CanvasAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsParameters) DeepCopyInto(out *CanvasAppSettingsParameters) { + *out = *in + if in.DirectDeploySettings != nil { + in, out := &in.DirectDeploySettings, &out.DirectDeploySettings + *out = new(DirectDeploySettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.IdentityProviderOauthSettings != nil { + in, out := &in.IdentityProviderOauthSettings, &out.IdentityProviderOauthSettings + *out = make([]IdentityProviderOauthSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KendraSettings != nil { + in, out := &in.KendraSettings, &out.KendraSettings + *out = new(KendraSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.ModelRegisterSettings != nil { + in, out := &in.ModelRegisterSettings, &out.ModelRegisterSettings + *out = new(ModelRegisterSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.TimeSeriesForecastingSettings != nil { + in, out := &in.TimeSeriesForecastingSettings, &out.TimeSeriesForecastingSettings + *out = new(TimeSeriesForecastingSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceSettings != nil { + in, out := &in.WorkspaceSettings, &out.WorkspaceSettings + *out = new(WorkspaceSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsParameters. +func (in *CanvasAppSettingsParameters) DeepCopy() *CanvasAppSettingsParameters { + if in == nil { + return nil + } + out := new(CanvasAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsTimeSeriesForecastingSettingsInitParameters) DeepCopyInto(out *CanvasAppSettingsTimeSeriesForecastingSettingsInitParameters) { + *out = *in + if in.AmazonForecastRoleArn != nil { + in, out := &in.AmazonForecastRoleArn, &out.AmazonForecastRoleArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsTimeSeriesForecastingSettingsInitParameters. +func (in *CanvasAppSettingsTimeSeriesForecastingSettingsInitParameters) DeepCopy() *CanvasAppSettingsTimeSeriesForecastingSettingsInitParameters { + if in == nil { + return nil + } + out := new(CanvasAppSettingsTimeSeriesForecastingSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsTimeSeriesForecastingSettingsObservation) DeepCopyInto(out *CanvasAppSettingsTimeSeriesForecastingSettingsObservation) { + *out = *in + if in.AmazonForecastRoleArn != nil { + in, out := &in.AmazonForecastRoleArn, &out.AmazonForecastRoleArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsTimeSeriesForecastingSettingsObservation. +func (in *CanvasAppSettingsTimeSeriesForecastingSettingsObservation) DeepCopy() *CanvasAppSettingsTimeSeriesForecastingSettingsObservation { + if in == nil { + return nil + } + out := new(CanvasAppSettingsTimeSeriesForecastingSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsTimeSeriesForecastingSettingsParameters) DeepCopyInto(out *CanvasAppSettingsTimeSeriesForecastingSettingsParameters) { + *out = *in + if in.AmazonForecastRoleArn != nil { + in, out := &in.AmazonForecastRoleArn, &out.AmazonForecastRoleArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsTimeSeriesForecastingSettingsParameters. +func (in *CanvasAppSettingsTimeSeriesForecastingSettingsParameters) DeepCopy() *CanvasAppSettingsTimeSeriesForecastingSettingsParameters { + if in == nil { + return nil + } + out := new(CanvasAppSettingsTimeSeriesForecastingSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsWorkspaceSettingsInitParameters) DeepCopyInto(out *CanvasAppSettingsWorkspaceSettingsInitParameters) { + *out = *in + if in.S3ArtifactPath != nil { + in, out := &in.S3ArtifactPath, &out.S3ArtifactPath + *out = new(string) + **out = **in + } + if in.S3KMSKeyID != nil { + in, out := &in.S3KMSKeyID, &out.S3KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsWorkspaceSettingsInitParameters. +func (in *CanvasAppSettingsWorkspaceSettingsInitParameters) DeepCopy() *CanvasAppSettingsWorkspaceSettingsInitParameters { + if in == nil { + return nil + } + out := new(CanvasAppSettingsWorkspaceSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsWorkspaceSettingsObservation) DeepCopyInto(out *CanvasAppSettingsWorkspaceSettingsObservation) { + *out = *in + if in.S3ArtifactPath != nil { + in, out := &in.S3ArtifactPath, &out.S3ArtifactPath + *out = new(string) + **out = **in + } + if in.S3KMSKeyID != nil { + in, out := &in.S3KMSKeyID, &out.S3KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsWorkspaceSettingsObservation. +func (in *CanvasAppSettingsWorkspaceSettingsObservation) DeepCopy() *CanvasAppSettingsWorkspaceSettingsObservation { + if in == nil { + return nil + } + out := new(CanvasAppSettingsWorkspaceSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanvasAppSettingsWorkspaceSettingsParameters) DeepCopyInto(out *CanvasAppSettingsWorkspaceSettingsParameters) { + *out = *in + if in.S3ArtifactPath != nil { + in, out := &in.S3ArtifactPath, &out.S3ArtifactPath + *out = new(string) + **out = **in + } + if in.S3KMSKeyID != nil { + in, out := &in.S3KMSKeyID, &out.S3KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanvasAppSettingsWorkspaceSettingsParameters. +func (in *CanvasAppSettingsWorkspaceSettingsParameters) DeepCopy() *CanvasAppSettingsWorkspaceSettingsParameters { + if in == nil { + return nil + } + out := new(CanvasAppSettingsWorkspaceSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureContentTypeHeaderInitParameters) DeepCopyInto(out *CaptureContentTypeHeaderInitParameters) { + *out = *in + if in.CsvContentTypes != nil { + in, out := &in.CsvContentTypes, &out.CsvContentTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JSONContentTypes != nil { + in, out := &in.JSONContentTypes, &out.JSONContentTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureContentTypeHeaderInitParameters. +func (in *CaptureContentTypeHeaderInitParameters) DeepCopy() *CaptureContentTypeHeaderInitParameters { + if in == nil { + return nil + } + out := new(CaptureContentTypeHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureContentTypeHeaderObservation) DeepCopyInto(out *CaptureContentTypeHeaderObservation) { + *out = *in + if in.CsvContentTypes != nil { + in, out := &in.CsvContentTypes, &out.CsvContentTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JSONContentTypes != nil { + in, out := &in.JSONContentTypes, &out.JSONContentTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureContentTypeHeaderObservation. +func (in *CaptureContentTypeHeaderObservation) DeepCopy() *CaptureContentTypeHeaderObservation { + if in == nil { + return nil + } + out := new(CaptureContentTypeHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureContentTypeHeaderParameters) DeepCopyInto(out *CaptureContentTypeHeaderParameters) { + *out = *in + if in.CsvContentTypes != nil { + in, out := &in.CsvContentTypes, &out.CsvContentTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JSONContentTypes != nil { + in, out := &in.JSONContentTypes, &out.JSONContentTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureContentTypeHeaderParameters. +func (in *CaptureContentTypeHeaderParameters) DeepCopy() *CaptureContentTypeHeaderParameters { + if in == nil { + return nil + } + out := new(CaptureContentTypeHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureOptionsInitParameters) DeepCopyInto(out *CaptureOptionsInitParameters) { + *out = *in + if in.CaptureMode != nil { + in, out := &in.CaptureMode, &out.CaptureMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureOptionsInitParameters. +func (in *CaptureOptionsInitParameters) DeepCopy() *CaptureOptionsInitParameters { + if in == nil { + return nil + } + out := new(CaptureOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureOptionsObservation) DeepCopyInto(out *CaptureOptionsObservation) { + *out = *in + if in.CaptureMode != nil { + in, out := &in.CaptureMode, &out.CaptureMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureOptionsObservation. +func (in *CaptureOptionsObservation) DeepCopy() *CaptureOptionsObservation { + if in == nil { + return nil + } + out := new(CaptureOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureOptionsParameters) DeepCopyInto(out *CaptureOptionsParameters) { + *out = *in + if in.CaptureMode != nil { + in, out := &in.CaptureMode, &out.CaptureMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureOptionsParameters. +func (in *CaptureOptionsParameters) DeepCopy() *CaptureOptionsParameters { + if in == nil { + return nil + } + out := new(CaptureOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientConfigInitParameters) DeepCopyInto(out *ClientConfigInitParameters) { + *out = *in + if in.MaxConcurrentInvocationsPerInstance != nil { + in, out := &in.MaxConcurrentInvocationsPerInstance, &out.MaxConcurrentInvocationsPerInstance + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConfigInitParameters. +func (in *ClientConfigInitParameters) DeepCopy() *ClientConfigInitParameters { + if in == nil { + return nil + } + out := new(ClientConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientConfigObservation) DeepCopyInto(out *ClientConfigObservation) { + *out = *in + if in.MaxConcurrentInvocationsPerInstance != nil { + in, out := &in.MaxConcurrentInvocationsPerInstance, &out.MaxConcurrentInvocationsPerInstance + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConfigObservation. +func (in *ClientConfigObservation) DeepCopy() *ClientConfigObservation { + if in == nil { + return nil + } + out := new(ClientConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientConfigParameters) DeepCopyInto(out *ClientConfigParameters) { + *out = *in + if in.MaxConcurrentInvocationsPerInstance != nil { + in, out := &in.MaxConcurrentInvocationsPerInstance, &out.MaxConcurrentInvocationsPerInstance + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConfigParameters. +func (in *ClientConfigParameters) DeepCopy() *ClientConfigParameters { + if in == nil { + return nil + } + out := new(ClientConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeEditorAppImageConfigInitParameters) DeepCopyInto(out *CodeEditorAppImageConfigInitParameters) { + *out = *in + if in.ContainerConfig != nil { + in, out := &in.ContainerConfig, &out.ContainerConfig + *out = new(ContainerConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemConfig != nil { + in, out := &in.FileSystemConfig, &out.FileSystemConfig + *out = new(FileSystemConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeEditorAppImageConfigInitParameters. +func (in *CodeEditorAppImageConfigInitParameters) DeepCopy() *CodeEditorAppImageConfigInitParameters { + if in == nil { + return nil + } + out := new(CodeEditorAppImageConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeEditorAppImageConfigObservation) DeepCopyInto(out *CodeEditorAppImageConfigObservation) { + *out = *in + if in.ContainerConfig != nil { + in, out := &in.ContainerConfig, &out.ContainerConfig + *out = new(ContainerConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.FileSystemConfig != nil { + in, out := &in.FileSystemConfig, &out.FileSystemConfig + *out = new(FileSystemConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeEditorAppImageConfigObservation. +func (in *CodeEditorAppImageConfigObservation) DeepCopy() *CodeEditorAppImageConfigObservation { + if in == nil { + return nil + } + out := new(CodeEditorAppImageConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeEditorAppImageConfigParameters) DeepCopyInto(out *CodeEditorAppImageConfigParameters) { + *out = *in + if in.ContainerConfig != nil { + in, out := &in.ContainerConfig, &out.ContainerConfig + *out = new(ContainerConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemConfig != nil { + in, out := &in.FileSystemConfig, &out.FileSystemConfig + *out = new(FileSystemConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeEditorAppImageConfigParameters. +func (in *CodeEditorAppImageConfigParameters) DeepCopy() *CodeEditorAppImageConfigParameters { + if in == nil { + return nil + } + out := new(CodeEditorAppImageConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeEditorAppSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *CodeEditorAppSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeEditorAppSettingsDefaultResourceSpecInitParameters. +func (in *CodeEditorAppSettingsDefaultResourceSpecInitParameters) DeepCopy() *CodeEditorAppSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(CodeEditorAppSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeEditorAppSettingsDefaultResourceSpecObservation) DeepCopyInto(out *CodeEditorAppSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeEditorAppSettingsDefaultResourceSpecObservation. +func (in *CodeEditorAppSettingsDefaultResourceSpecObservation) DeepCopy() *CodeEditorAppSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(CodeEditorAppSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeEditorAppSettingsDefaultResourceSpecParameters) DeepCopyInto(out *CodeEditorAppSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeEditorAppSettingsDefaultResourceSpecParameters. +func (in *CodeEditorAppSettingsDefaultResourceSpecParameters) DeepCopy() *CodeEditorAppSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(CodeEditorAppSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeEditorAppSettingsInitParameters) DeepCopyInto(out *CodeEditorAppSettingsInitParameters) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(CodeEditorAppSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeEditorAppSettingsInitParameters. +func (in *CodeEditorAppSettingsInitParameters) DeepCopy() *CodeEditorAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(CodeEditorAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeEditorAppSettingsObservation) DeepCopyInto(out *CodeEditorAppSettingsObservation) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(CodeEditorAppSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeEditorAppSettingsObservation. +func (in *CodeEditorAppSettingsObservation) DeepCopy() *CodeEditorAppSettingsObservation { + if in == nil { + return nil + } + out := new(CodeEditorAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeEditorAppSettingsParameters) DeepCopyInto(out *CodeEditorAppSettingsParameters) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(CodeEditorAppSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeEditorAppSettingsParameters. +func (in *CodeEditorAppSettingsParameters) DeepCopy() *CodeEditorAppSettingsParameters { + if in == nil { + return nil + } + out := new(CodeEditorAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeRepository) DeepCopyInto(out *CodeRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeRepository. +func (in *CodeRepository) DeepCopy() *CodeRepository { + if in == nil { + return nil + } + out := new(CodeRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CodeRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeRepositoryInitParameters) DeepCopyInto(out *CodeRepositoryInitParameters) { + *out = *in + if in.GitConfig != nil { + in, out := &in.GitConfig, &out.GitConfig + *out = new(GitConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeRepositoryInitParameters. +func (in *CodeRepositoryInitParameters) DeepCopy() *CodeRepositoryInitParameters { + if in == nil { + return nil + } + out := new(CodeRepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeRepositoryList) DeepCopyInto(out *CodeRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CodeRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeRepositoryList. +func (in *CodeRepositoryList) DeepCopy() *CodeRepositoryList { + if in == nil { + return nil + } + out := new(CodeRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CodeRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeRepositoryObservation) DeepCopyInto(out *CodeRepositoryObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.GitConfig != nil { + in, out := &in.GitConfig, &out.GitConfig + *out = new(GitConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeRepositoryObservation. +func (in *CodeRepositoryObservation) DeepCopy() *CodeRepositoryObservation { + if in == nil { + return nil + } + out := new(CodeRepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeRepositoryParameters) DeepCopyInto(out *CodeRepositoryParameters) { + *out = *in + if in.GitConfig != nil { + in, out := &in.GitConfig, &out.GitConfig + *out = new(GitConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeRepositoryParameters. +func (in *CodeRepositoryParameters) DeepCopy() *CodeRepositoryParameters { + if in == nil { + return nil + } + out := new(CodeRepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeRepositorySpec) DeepCopyInto(out *CodeRepositorySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeRepositorySpec. +func (in *CodeRepositorySpec) DeepCopy() *CodeRepositorySpec { + if in == nil { + return nil + } + out := new(CodeRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeRepositoryStatus) DeepCopyInto(out *CodeRepositoryStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeRepositoryStatus. +func (in *CodeRepositoryStatus) DeepCopy() *CodeRepositoryStatus { + if in == nil { + return nil + } + out := new(CodeRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CognitoConfigInitParameters) DeepCopyInto(out *CognitoConfigInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientIDRef != nil { + in, out := &in.ClientIDRef, &out.ClientIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClientIDSelector != nil { + in, out := &in.ClientIDSelector, &out.ClientIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserPool != nil { + in, out := &in.UserPool, &out.UserPool + *out = new(string) + **out = **in + } + if in.UserPoolRef != nil { + in, out := &in.UserPoolRef, &out.UserPoolRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserPoolSelector != nil { + in, out := &in.UserPoolSelector, &out.UserPoolSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CognitoConfigInitParameters. +func (in *CognitoConfigInitParameters) DeepCopy() *CognitoConfigInitParameters { + if in == nil { + return nil + } + out := new(CognitoConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CognitoConfigObservation) DeepCopyInto(out *CognitoConfigObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.UserPool != nil { + in, out := &in.UserPool, &out.UserPool + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CognitoConfigObservation. +func (in *CognitoConfigObservation) DeepCopy() *CognitoConfigObservation { + if in == nil { + return nil + } + out := new(CognitoConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CognitoConfigParameters) DeepCopyInto(out *CognitoConfigParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientIDRef != nil { + in, out := &in.ClientIDRef, &out.ClientIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClientIDSelector != nil { + in, out := &in.ClientIDSelector, &out.ClientIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserPool != nil { + in, out := &in.UserPool, &out.UserPool + *out = new(string) + **out = **in + } + if in.UserPoolRef != nil { + in, out := &in.UserPoolRef, &out.UserPoolRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserPoolSelector != nil { + in, out := &in.UserPoolSelector, &out.UserPoolSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CognitoConfigParameters. +func (in *CognitoConfigParameters) DeepCopy() *CognitoConfigParameters { + if in == nil { + return nil + } + out := new(CognitoConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CognitoMemberDefinitionInitParameters) DeepCopyInto(out *CognitoMemberDefinitionInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientIDRef != nil { + in, out := &in.ClientIDRef, &out.ClientIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClientIDSelector != nil { + in, out := &in.ClientIDSelector, &out.ClientIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserGroup != nil { + in, out := &in.UserGroup, &out.UserGroup + *out = new(string) + **out = **in + } + if in.UserGroupRef != nil { + in, out := &in.UserGroupRef, &out.UserGroupRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserGroupSelector != nil { + in, out := &in.UserGroupSelector, &out.UserGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserPool != nil { + in, out := &in.UserPool, &out.UserPool + *out = new(string) + **out = **in + } + if in.UserPoolRef != nil { + in, out := &in.UserPoolRef, &out.UserPoolRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserPoolSelector != nil { + in, out := &in.UserPoolSelector, &out.UserPoolSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CognitoMemberDefinitionInitParameters. +func (in *CognitoMemberDefinitionInitParameters) DeepCopy() *CognitoMemberDefinitionInitParameters { + if in == nil { + return nil + } + out := new(CognitoMemberDefinitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CognitoMemberDefinitionObservation) DeepCopyInto(out *CognitoMemberDefinitionObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.UserGroup != nil { + in, out := &in.UserGroup, &out.UserGroup + *out = new(string) + **out = **in + } + if in.UserPool != nil { + in, out := &in.UserPool, &out.UserPool + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CognitoMemberDefinitionObservation. +func (in *CognitoMemberDefinitionObservation) DeepCopy() *CognitoMemberDefinitionObservation { + if in == nil { + return nil + } + out := new(CognitoMemberDefinitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CognitoMemberDefinitionParameters) DeepCopyInto(out *CognitoMemberDefinitionParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientIDRef != nil { + in, out := &in.ClientIDRef, &out.ClientIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClientIDSelector != nil { + in, out := &in.ClientIDSelector, &out.ClientIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserGroup != nil { + in, out := &in.UserGroup, &out.UserGroup + *out = new(string) + **out = **in + } + if in.UserGroupRef != nil { + in, out := &in.UserGroupRef, &out.UserGroupRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserGroupSelector != nil { + in, out := &in.UserGroupSelector, &out.UserGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserPool != nil { + in, out := &in.UserPool, &out.UserPool + *out = new(string) + **out = **in + } + if in.UserPoolRef != nil { + in, out := &in.UserPoolRef, &out.UserPoolRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserPoolSelector != nil { + in, out := &in.UserPoolSelector, &out.UserPoolSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CognitoMemberDefinitionParameters. +func (in *CognitoMemberDefinitionParameters) DeepCopy() *CognitoMemberDefinitionParameters { + if in == nil { + return nil + } + out := new(CognitoMemberDefinitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerConfigInitParameters) DeepCopyInto(out *ContainerConfigInitParameters) { + *out = *in + if in.ContainerArguments != nil { + in, out := &in.ContainerArguments, &out.ContainerArguments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContainerEntrypoint != nil { + in, out := &in.ContainerEntrypoint, &out.ContainerEntrypoint + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContainerEnvironmentVariables != nil { + in, out := &in.ContainerEnvironmentVariables, &out.ContainerEnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerConfigInitParameters. +func (in *ContainerConfigInitParameters) DeepCopy() *ContainerConfigInitParameters { + if in == nil { + return nil + } + out := new(ContainerConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerConfigObservation) DeepCopyInto(out *ContainerConfigObservation) { + *out = *in + if in.ContainerArguments != nil { + in, out := &in.ContainerArguments, &out.ContainerArguments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContainerEntrypoint != nil { + in, out := &in.ContainerEntrypoint, &out.ContainerEntrypoint + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContainerEnvironmentVariables != nil { + in, out := &in.ContainerEnvironmentVariables, &out.ContainerEnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerConfigObservation. +func (in *ContainerConfigObservation) DeepCopy() *ContainerConfigObservation { + if in == nil { + return nil + } + out := new(ContainerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerConfigParameters) DeepCopyInto(out *ContainerConfigParameters) { + *out = *in + if in.ContainerArguments != nil { + in, out := &in.ContainerArguments, &out.ContainerArguments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContainerEntrypoint != nil { + in, out := &in.ContainerEntrypoint, &out.ContainerEntrypoint + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContainerEnvironmentVariables != nil { + in, out := &in.ContainerEnvironmentVariables, &out.ContainerEnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerConfigParameters. +func (in *ContainerConfigParameters) DeepCopy() *ContainerConfigParameters { + if in == nil { + return nil + } + out := new(ContainerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerInitParameters) DeepCopyInto(out *ContainerInitParameters) { + *out = *in + if in.ContainerHostname != nil { + in, out := &in.ContainerHostname, &out.ContainerHostname + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImageConfig != nil { + in, out := &in.ImageConfig, &out.ImageConfig + *out = new(ImageConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.ModelDataSource != nil { + in, out := &in.ModelDataSource, &out.ModelDataSource + *out = new(ModelDataSourceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ModelDataURL != nil { + in, out := &in.ModelDataURL, &out.ModelDataURL + *out = new(string) + **out = **in + } + if in.ModelPackageName != nil { + in, out := &in.ModelPackageName, &out.ModelPackageName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerInitParameters. +func (in *ContainerInitParameters) DeepCopy() *ContainerInitParameters { + if in == nil { + return nil + } + out := new(ContainerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerObservation) DeepCopyInto(out *ContainerObservation) { + *out = *in + if in.ContainerHostname != nil { + in, out := &in.ContainerHostname, &out.ContainerHostname + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImageConfig != nil { + in, out := &in.ImageConfig, &out.ImageConfig + *out = new(ImageConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.ModelDataSource != nil { + in, out := &in.ModelDataSource, &out.ModelDataSource + *out = new(ModelDataSourceObservation) + (*in).DeepCopyInto(*out) + } + if in.ModelDataURL != nil { + in, out := &in.ModelDataURL, &out.ModelDataURL + *out = new(string) + **out = **in + } + if in.ModelPackageName != nil { + in, out := &in.ModelPackageName, &out.ModelPackageName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerObservation. +func (in *ContainerObservation) DeepCopy() *ContainerObservation { + if in == nil { + return nil + } + out := new(ContainerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerParameters) DeepCopyInto(out *ContainerParameters) { + *out = *in + if in.ContainerHostname != nil { + in, out := &in.ContainerHostname, &out.ContainerHostname + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImageConfig != nil { + in, out := &in.ImageConfig, &out.ImageConfig + *out = new(ImageConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.ModelDataSource != nil { + in, out := &in.ModelDataSource, &out.ModelDataSource + *out = new(ModelDataSourceParameters) + (*in).DeepCopyInto(*out) + } + if in.ModelDataURL != nil { + in, out := &in.ModelDataURL, &out.ModelDataURL + *out = new(string) + **out = **in + } + if in.ModelPackageName != nil { + in, out := &in.ModelPackageName, &out.ModelPackageName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerParameters. +func (in *ContainerParameters) DeepCopy() *ContainerParameters { + if in == nil { + return nil + } + out := new(ContainerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreDumpConfigInitParameters) DeepCopyInto(out *CoreDumpConfigInitParameters) { + *out = *in + if in.DestinationS3URI != nil { + in, out := &in.DestinationS3URI, &out.DestinationS3URI + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreDumpConfigInitParameters. +func (in *CoreDumpConfigInitParameters) DeepCopy() *CoreDumpConfigInitParameters { + if in == nil { + return nil + } + out := new(CoreDumpConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreDumpConfigObservation) DeepCopyInto(out *CoreDumpConfigObservation) { + *out = *in + if in.DestinationS3URI != nil { + in, out := &in.DestinationS3URI, &out.DestinationS3URI + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreDumpConfigObservation. +func (in *CoreDumpConfigObservation) DeepCopy() *CoreDumpConfigObservation { + if in == nil { + return nil + } + out := new(CoreDumpConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreDumpConfigParameters) DeepCopyInto(out *CoreDumpConfigParameters) { + *out = *in + if in.DestinationS3URI != nil { + in, out := &in.DestinationS3URI, &out.DestinationS3URI + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreDumpConfigParameters. +func (in *CoreDumpConfigParameters) DeepCopy() *CoreDumpConfigParameters { + if in == nil { + return nil + } + out := new(CoreDumpConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFileSystemConfigEFSFileSystemConfigInitParameters) DeepCopyInto(out *CustomFileSystemConfigEFSFileSystemConfigInitParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.FileSystemPath != nil { + in, out := &in.FileSystemPath, &out.FileSystemPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFileSystemConfigEFSFileSystemConfigInitParameters. +func (in *CustomFileSystemConfigEFSFileSystemConfigInitParameters) DeepCopy() *CustomFileSystemConfigEFSFileSystemConfigInitParameters { + if in == nil { + return nil + } + out := new(CustomFileSystemConfigEFSFileSystemConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFileSystemConfigEFSFileSystemConfigObservation) DeepCopyInto(out *CustomFileSystemConfigEFSFileSystemConfigObservation) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.FileSystemPath != nil { + in, out := &in.FileSystemPath, &out.FileSystemPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFileSystemConfigEFSFileSystemConfigObservation. +func (in *CustomFileSystemConfigEFSFileSystemConfigObservation) DeepCopy() *CustomFileSystemConfigEFSFileSystemConfigObservation { + if in == nil { + return nil + } + out := new(CustomFileSystemConfigEFSFileSystemConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFileSystemConfigEFSFileSystemConfigParameters) DeepCopyInto(out *CustomFileSystemConfigEFSFileSystemConfigParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.FileSystemPath != nil { + in, out := &in.FileSystemPath, &out.FileSystemPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFileSystemConfigEFSFileSystemConfigParameters. +func (in *CustomFileSystemConfigEFSFileSystemConfigParameters) DeepCopy() *CustomFileSystemConfigEFSFileSystemConfigParameters { + if in == nil { + return nil + } + out := new(CustomFileSystemConfigEFSFileSystemConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFileSystemConfigInitParameters) DeepCopyInto(out *CustomFileSystemConfigInitParameters) { + *out = *in + if in.EFSFileSystemConfig != nil { + in, out := &in.EFSFileSystemConfig, &out.EFSFileSystemConfig + *out = new(EFSFileSystemConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFileSystemConfigInitParameters. +func (in *CustomFileSystemConfigInitParameters) DeepCopy() *CustomFileSystemConfigInitParameters { + if in == nil { + return nil + } + out := new(CustomFileSystemConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFileSystemConfigObservation) DeepCopyInto(out *CustomFileSystemConfigObservation) { + *out = *in + if in.EFSFileSystemConfig != nil { + in, out := &in.EFSFileSystemConfig, &out.EFSFileSystemConfig + *out = new(EFSFileSystemConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFileSystemConfigObservation. +func (in *CustomFileSystemConfigObservation) DeepCopy() *CustomFileSystemConfigObservation { + if in == nil { + return nil + } + out := new(CustomFileSystemConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFileSystemConfigParameters) DeepCopyInto(out *CustomFileSystemConfigParameters) { + *out = *in + if in.EFSFileSystemConfig != nil { + in, out := &in.EFSFileSystemConfig, &out.EFSFileSystemConfig + *out = new(EFSFileSystemConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFileSystemConfigParameters. +func (in *CustomFileSystemConfigParameters) DeepCopy() *CustomFileSystemConfigParameters { + if in == nil { + return nil + } + out := new(CustomFileSystemConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFileSystemInitParameters) DeepCopyInto(out *CustomFileSystemInitParameters) { + *out = *in + if in.EFSFileSystem != nil { + in, out := &in.EFSFileSystem, &out.EFSFileSystem + *out = new(EFSFileSystemInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFileSystemInitParameters. +func (in *CustomFileSystemInitParameters) DeepCopy() *CustomFileSystemInitParameters { + if in == nil { + return nil + } + out := new(CustomFileSystemInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFileSystemObservation) DeepCopyInto(out *CustomFileSystemObservation) { + *out = *in + if in.EFSFileSystem != nil { + in, out := &in.EFSFileSystem, &out.EFSFileSystem + *out = new(EFSFileSystemObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFileSystemObservation. +func (in *CustomFileSystemObservation) DeepCopy() *CustomFileSystemObservation { + if in == nil { + return nil + } + out := new(CustomFileSystemObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFileSystemParameters) DeepCopyInto(out *CustomFileSystemParameters) { + *out = *in + if in.EFSFileSystem != nil { + in, out := &in.EFSFileSystem, &out.EFSFileSystem + *out = new(EFSFileSystemParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFileSystemParameters. +func (in *CustomFileSystemParameters) DeepCopy() *CustomFileSystemParameters { + if in == nil { + return nil + } + out := new(CustomFileSystemParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomImageInitParameters) DeepCopyInto(out *CustomImageInitParameters) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomImageInitParameters. +func (in *CustomImageInitParameters) DeepCopy() *CustomImageInitParameters { + if in == nil { + return nil + } + out := new(CustomImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomImageObservation) DeepCopyInto(out *CustomImageObservation) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomImageObservation. +func (in *CustomImageObservation) DeepCopy() *CustomImageObservation { + if in == nil { + return nil + } + out := new(CustomImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomImageParameters) DeepCopyInto(out *CustomImageParameters) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomImageParameters. +func (in *CustomImageParameters) DeepCopy() *CustomImageParameters { + if in == nil { + return nil + } + out := new(CustomImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPosixUserConfigInitParameters) DeepCopyInto(out *CustomPosixUserConfigInitParameters) { + *out = *in + if in.GID != nil { + in, out := &in.GID, &out.GID + *out = new(float64) + **out = **in + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPosixUserConfigInitParameters. +func (in *CustomPosixUserConfigInitParameters) DeepCopy() *CustomPosixUserConfigInitParameters { + if in == nil { + return nil + } + out := new(CustomPosixUserConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPosixUserConfigObservation) DeepCopyInto(out *CustomPosixUserConfigObservation) { + *out = *in + if in.GID != nil { + in, out := &in.GID, &out.GID + *out = new(float64) + **out = **in + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPosixUserConfigObservation. +func (in *CustomPosixUserConfigObservation) DeepCopy() *CustomPosixUserConfigObservation { + if in == nil { + return nil + } + out := new(CustomPosixUserConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPosixUserConfigParameters) DeepCopyInto(out *CustomPosixUserConfigParameters) { + *out = *in + if in.GID != nil { + in, out := &in.GID, &out.GID + *out = new(float64) + **out = **in + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPosixUserConfigParameters. +func (in *CustomPosixUserConfigParameters) DeepCopy() *CustomPosixUserConfigParameters { + if in == nil { + return nil + } + out := new(CustomPosixUserConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCaptureConfigInitParameters) DeepCopyInto(out *DataCaptureConfigInitParameters) { + *out = *in + if in.CaptureContentTypeHeader != nil { + in, out := &in.CaptureContentTypeHeader, &out.CaptureContentTypeHeader + *out = new(CaptureContentTypeHeaderInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CaptureOptions != nil { + in, out := &in.CaptureOptions, &out.CaptureOptions + *out = make([]CaptureOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DestinationS3URI != nil { + in, out := &in.DestinationS3URI, &out.DestinationS3URI + *out = new(string) + **out = **in + } + if in.EnableCapture != nil { + in, out := &in.EnableCapture, &out.EnableCapture + *out = new(bool) + **out = **in + } + if in.InitialSamplingPercentage != nil { + in, out := &in.InitialSamplingPercentage, &out.InitialSamplingPercentage + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCaptureConfigInitParameters. +func (in *DataCaptureConfigInitParameters) DeepCopy() *DataCaptureConfigInitParameters { + if in == nil { + return nil + } + out := new(DataCaptureConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCaptureConfigObservation) DeepCopyInto(out *DataCaptureConfigObservation) { + *out = *in + if in.CaptureContentTypeHeader != nil { + in, out := &in.CaptureContentTypeHeader, &out.CaptureContentTypeHeader + *out = new(CaptureContentTypeHeaderObservation) + (*in).DeepCopyInto(*out) + } + if in.CaptureOptions != nil { + in, out := &in.CaptureOptions, &out.CaptureOptions + *out = make([]CaptureOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DestinationS3URI != nil { + in, out := &in.DestinationS3URI, &out.DestinationS3URI + *out = new(string) + **out = **in + } + if in.EnableCapture != nil { + in, out := &in.EnableCapture, &out.EnableCapture + *out = new(bool) + **out = **in + } + if in.InitialSamplingPercentage != nil { + in, out := &in.InitialSamplingPercentage, &out.InitialSamplingPercentage + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCaptureConfigObservation. +func (in *DataCaptureConfigObservation) DeepCopy() *DataCaptureConfigObservation { + if in == nil { + return nil + } + out := new(DataCaptureConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCaptureConfigParameters) DeepCopyInto(out *DataCaptureConfigParameters) { + *out = *in + if in.CaptureContentTypeHeader != nil { + in, out := &in.CaptureContentTypeHeader, &out.CaptureContentTypeHeader + *out = new(CaptureContentTypeHeaderParameters) + (*in).DeepCopyInto(*out) + } + if in.CaptureOptions != nil { + in, out := &in.CaptureOptions, &out.CaptureOptions + *out = make([]CaptureOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DestinationS3URI != nil { + in, out := &in.DestinationS3URI, &out.DestinationS3URI + *out = new(string) + **out = **in + } + if in.EnableCapture != nil { + in, out := &in.EnableCapture, &out.EnableCapture + *out = new(bool) + **out = **in + } + if in.InitialSamplingPercentage != nil { + in, out := &in.InitialSamplingPercentage, &out.InitialSamplingPercentage + *out = new(float64) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCaptureConfigParameters. +func (in *DataCaptureConfigParameters) DeepCopy() *DataCaptureConfigParameters { + if in == nil { + return nil + } + out := new(DataCaptureConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCatalogConfigInitParameters) DeepCopyInto(out *DataCatalogConfigInitParameters) { + *out = *in + if in.Catalog != nil { + in, out := &in.Catalog, &out.Catalog + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCatalogConfigInitParameters. +func (in *DataCatalogConfigInitParameters) DeepCopy() *DataCatalogConfigInitParameters { + if in == nil { + return nil + } + out := new(DataCatalogConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCatalogConfigObservation) DeepCopyInto(out *DataCatalogConfigObservation) { + *out = *in + if in.Catalog != nil { + in, out := &in.Catalog, &out.Catalog + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCatalogConfigObservation. +func (in *DataCatalogConfigObservation) DeepCopy() *DataCatalogConfigObservation { + if in == nil { + return nil + } + out := new(DataCatalogConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataCatalogConfigParameters) DeepCopyInto(out *DataCatalogConfigParameters) { + *out = *in + if in.Catalog != nil { + in, out := &in.Catalog, &out.Catalog + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCatalogConfigParameters. +func (in *DataCatalogConfigParameters) DeepCopy() *DataCatalogConfigParameters { + if in == nil { + return nil + } + out := new(DataCatalogConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultEBSStorageSettingsInitParameters) DeepCopyInto(out *DefaultEBSStorageSettingsInitParameters) { + *out = *in + if in.DefaultEBSVolumeSizeInGb != nil { + in, out := &in.DefaultEBSVolumeSizeInGb, &out.DefaultEBSVolumeSizeInGb + *out = new(float64) + **out = **in + } + if in.MaximumEBSVolumeSizeInGb != nil { + in, out := &in.MaximumEBSVolumeSizeInGb, &out.MaximumEBSVolumeSizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultEBSStorageSettingsInitParameters. +func (in *DefaultEBSStorageSettingsInitParameters) DeepCopy() *DefaultEBSStorageSettingsInitParameters { + if in == nil { + return nil + } + out := new(DefaultEBSStorageSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultEBSStorageSettingsObservation) DeepCopyInto(out *DefaultEBSStorageSettingsObservation) { + *out = *in + if in.DefaultEBSVolumeSizeInGb != nil { + in, out := &in.DefaultEBSVolumeSizeInGb, &out.DefaultEBSVolumeSizeInGb + *out = new(float64) + **out = **in + } + if in.MaximumEBSVolumeSizeInGb != nil { + in, out := &in.MaximumEBSVolumeSizeInGb, &out.MaximumEBSVolumeSizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultEBSStorageSettingsObservation. +func (in *DefaultEBSStorageSettingsObservation) DeepCopy() *DefaultEBSStorageSettingsObservation { + if in == nil { + return nil + } + out := new(DefaultEBSStorageSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultEBSStorageSettingsParameters) DeepCopyInto(out *DefaultEBSStorageSettingsParameters) { + *out = *in + if in.DefaultEBSVolumeSizeInGb != nil { + in, out := &in.DefaultEBSVolumeSizeInGb, &out.DefaultEBSVolumeSizeInGb + *out = new(float64) + **out = **in + } + if in.MaximumEBSVolumeSizeInGb != nil { + in, out := &in.MaximumEBSVolumeSizeInGb, &out.MaximumEBSVolumeSizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultEBSStorageSettingsParameters. +func (in *DefaultEBSStorageSettingsParameters) DeepCopy() *DefaultEBSStorageSettingsParameters { + if in == nil { + return nil + } + out := new(DefaultEBSStorageSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultResourceSpecInitParameters) DeepCopyInto(out *DefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultResourceSpecInitParameters. +func (in *DefaultResourceSpecInitParameters) DeepCopy() *DefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(DefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultResourceSpecObservation) DeepCopyInto(out *DefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultResourceSpecObservation. +func (in *DefaultResourceSpecObservation) DeepCopy() *DefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(DefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultResourceSpecParameters) DeepCopyInto(out *DefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultResourceSpecParameters. +func (in *DefaultResourceSpecParameters) DeepCopy() *DefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(DefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultSpaceSettingsInitParameters) DeepCopyInto(out *DefaultSpaceSettingsInitParameters) { + *out = *in + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.JupyterServerAppSettings != nil { + in, out := &in.JupyterServerAppSettings, &out.JupyterServerAppSettings + *out = new(JupyterServerAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KernelGatewayAppSettings != nil { + in, out := &in.KernelGatewayAppSettings, &out.KernelGatewayAppSettings + *out = new(KernelGatewayAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultSpaceSettingsInitParameters. +func (in *DefaultSpaceSettingsInitParameters) DeepCopy() *DefaultSpaceSettingsInitParameters { + if in == nil { + return nil + } + out := new(DefaultSpaceSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultSpaceSettingsObservation) DeepCopyInto(out *DefaultSpaceSettingsObservation) { + *out = *in + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.JupyterServerAppSettings != nil { + in, out := &in.JupyterServerAppSettings, &out.JupyterServerAppSettings + *out = new(JupyterServerAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.KernelGatewayAppSettings != nil { + in, out := &in.KernelGatewayAppSettings, &out.KernelGatewayAppSettings + *out = new(KernelGatewayAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultSpaceSettingsObservation. +func (in *DefaultSpaceSettingsObservation) DeepCopy() *DefaultSpaceSettingsObservation { + if in == nil { + return nil + } + out := new(DefaultSpaceSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultSpaceSettingsParameters) DeepCopyInto(out *DefaultSpaceSettingsParameters) { + *out = *in + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.JupyterServerAppSettings != nil { + in, out := &in.JupyterServerAppSettings, &out.JupyterServerAppSettings + *out = new(JupyterServerAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.KernelGatewayAppSettings != nil { + in, out := &in.KernelGatewayAppSettings, &out.KernelGatewayAppSettings + *out = new(KernelGatewayAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultSpaceSettingsParameters. +func (in *DefaultSpaceSettingsParameters) DeepCopy() *DefaultSpaceSettingsParameters { + if in == nil { + return nil + } + out := new(DefaultSpaceSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultUserSettingsInitParameters) DeepCopyInto(out *DefaultUserSettingsInitParameters) { + *out = *in + if in.CanvasAppSettings != nil { + in, out := &in.CanvasAppSettings, &out.CanvasAppSettings + *out = new(CanvasAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CodeEditorAppSettings != nil { + in, out := &in.CodeEditorAppSettings, &out.CodeEditorAppSettings + *out = new(CodeEditorAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomFileSystemConfig != nil { + in, out := &in.CustomFileSystemConfig, &out.CustomFileSystemConfig + *out = make([]CustomFileSystemConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomPosixUserConfig != nil { + in, out := &in.CustomPosixUserConfig, &out.CustomPosixUserConfig + *out = new(CustomPosixUserConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultLandingURI != nil { + in, out := &in.DefaultLandingURI, &out.DefaultLandingURI + *out = new(string) + **out = **in + } + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.ExecutionRoleRef != nil { + in, out := &in.ExecutionRoleRef, &out.ExecutionRoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleSelector != nil { + in, out := &in.ExecutionRoleSelector, &out.ExecutionRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.JupyterLabAppSettings != nil { + in, out := &in.JupyterLabAppSettings, &out.JupyterLabAppSettings + *out = new(JupyterLabAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.JupyterServerAppSettings != nil { + in, out := &in.JupyterServerAppSettings, &out.JupyterServerAppSettings + *out = new(DefaultUserSettingsJupyterServerAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KernelGatewayAppSettings != nil { + in, out := &in.KernelGatewayAppSettings, &out.KernelGatewayAppSettings + *out = new(DefaultUserSettingsKernelGatewayAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RSessionAppSettings != nil { + in, out := &in.RSessionAppSettings, &out.RSessionAppSettings + *out = new(RSessionAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RStudioServerProAppSettings != nil { + in, out := &in.RStudioServerProAppSettings, &out.RStudioServerProAppSettings + *out = new(RStudioServerProAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SharingSettings != nil { + in, out := &in.SharingSettings, &out.SharingSettings + *out = new(SharingSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SpaceStorageSettings != nil { + in, out := &in.SpaceStorageSettings, &out.SpaceStorageSettings + *out = new(SpaceStorageSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StudioWebPortal != nil { + in, out := &in.StudioWebPortal, &out.StudioWebPortal + *out = new(string) + **out = **in + } + if in.TensorBoardAppSettings != nil { + in, out := &in.TensorBoardAppSettings, &out.TensorBoardAppSettings + *out = new(TensorBoardAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultUserSettingsInitParameters. +func (in *DefaultUserSettingsInitParameters) DeepCopy() *DefaultUserSettingsInitParameters { + if in == nil { + return nil + } + out := new(DefaultUserSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters) DeepCopyInto(out *DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters. +func (in *DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters) DeepCopy() *DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters { + if in == nil { + return nil + } + out := new(DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryObservation) DeepCopyInto(out *DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryObservation) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryObservation. +func (in *DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryObservation) DeepCopy() *DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryObservation { + if in == nil { + return nil + } + out := new(DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryParameters) DeepCopyInto(out *DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryParameters) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryParameters. +func (in *DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryParameters) DeepCopy() *DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryParameters { + if in == nil { + return nil + } + out := new(DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultUserSettingsJupyterServerAppSettingsInitParameters) DeepCopyInto(out *DefaultUserSettingsJupyterServerAppSettingsInitParameters) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(JupyterServerAppSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultUserSettingsJupyterServerAppSettingsInitParameters. +func (in *DefaultUserSettingsJupyterServerAppSettingsInitParameters) DeepCopy() *DefaultUserSettingsJupyterServerAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(DefaultUserSettingsJupyterServerAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultUserSettingsJupyterServerAppSettingsObservation) DeepCopyInto(out *DefaultUserSettingsJupyterServerAppSettingsObservation) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(JupyterServerAppSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultUserSettingsJupyterServerAppSettingsObservation. +func (in *DefaultUserSettingsJupyterServerAppSettingsObservation) DeepCopy() *DefaultUserSettingsJupyterServerAppSettingsObservation { + if in == nil { + return nil + } + out := new(DefaultUserSettingsJupyterServerAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultUserSettingsJupyterServerAppSettingsParameters) DeepCopyInto(out *DefaultUserSettingsJupyterServerAppSettingsParameters) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]DefaultUserSettingsJupyterServerAppSettingsCodeRepositoryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(JupyterServerAppSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultUserSettingsJupyterServerAppSettingsParameters. +func (in *DefaultUserSettingsJupyterServerAppSettingsParameters) DeepCopy() *DefaultUserSettingsJupyterServerAppSettingsParameters { + if in == nil { + return nil + } + out := new(DefaultUserSettingsJupyterServerAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters. +func (in *DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters) DeepCopy() *DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation) DeepCopyInto(out *DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation. +func (in *DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation) DeepCopy() *DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters) DeepCopyInto(out *DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters. +func (in *DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters) DeepCopy() *DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultUserSettingsKernelGatewayAppSettingsInitParameters) DeepCopyInto(out *DefaultUserSettingsKernelGatewayAppSettingsInitParameters) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]KernelGatewayAppSettingsCustomImageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultUserSettingsKernelGatewayAppSettingsInitParameters. +func (in *DefaultUserSettingsKernelGatewayAppSettingsInitParameters) DeepCopy() *DefaultUserSettingsKernelGatewayAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(DefaultUserSettingsKernelGatewayAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultUserSettingsKernelGatewayAppSettingsObservation) DeepCopyInto(out *DefaultUserSettingsKernelGatewayAppSettingsObservation) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]KernelGatewayAppSettingsCustomImageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultUserSettingsKernelGatewayAppSettingsObservation. +func (in *DefaultUserSettingsKernelGatewayAppSettingsObservation) DeepCopy() *DefaultUserSettingsKernelGatewayAppSettingsObservation { + if in == nil { + return nil + } + out := new(DefaultUserSettingsKernelGatewayAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultUserSettingsKernelGatewayAppSettingsParameters) DeepCopyInto(out *DefaultUserSettingsKernelGatewayAppSettingsParameters) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]KernelGatewayAppSettingsCustomImageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(DefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultUserSettingsKernelGatewayAppSettingsParameters. +func (in *DefaultUserSettingsKernelGatewayAppSettingsParameters) DeepCopy() *DefaultUserSettingsKernelGatewayAppSettingsParameters { + if in == nil { + return nil + } + out := new(DefaultUserSettingsKernelGatewayAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultUserSettingsObservation) DeepCopyInto(out *DefaultUserSettingsObservation) { + *out = *in + if in.CanvasAppSettings != nil { + in, out := &in.CanvasAppSettings, &out.CanvasAppSettings + *out = new(CanvasAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.CodeEditorAppSettings != nil { + in, out := &in.CodeEditorAppSettings, &out.CodeEditorAppSettings + *out = new(CodeEditorAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomFileSystemConfig != nil { + in, out := &in.CustomFileSystemConfig, &out.CustomFileSystemConfig + *out = make([]CustomFileSystemConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomPosixUserConfig != nil { + in, out := &in.CustomPosixUserConfig, &out.CustomPosixUserConfig + *out = new(CustomPosixUserConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultLandingURI != nil { + in, out := &in.DefaultLandingURI, &out.DefaultLandingURI + *out = new(string) + **out = **in + } + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.JupyterLabAppSettings != nil { + in, out := &in.JupyterLabAppSettings, &out.JupyterLabAppSettings + *out = new(JupyterLabAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.JupyterServerAppSettings != nil { + in, out := &in.JupyterServerAppSettings, &out.JupyterServerAppSettings + *out = new(DefaultUserSettingsJupyterServerAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.KernelGatewayAppSettings != nil { + in, out := &in.KernelGatewayAppSettings, &out.KernelGatewayAppSettings + *out = new(DefaultUserSettingsKernelGatewayAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.RSessionAppSettings != nil { + in, out := &in.RSessionAppSettings, &out.RSessionAppSettings + *out = new(RSessionAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.RStudioServerProAppSettings != nil { + in, out := &in.RStudioServerProAppSettings, &out.RStudioServerProAppSettings + *out = new(RStudioServerProAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SharingSettings != nil { + in, out := &in.SharingSettings, &out.SharingSettings + *out = new(SharingSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.SpaceStorageSettings != nil { + in, out := &in.SpaceStorageSettings, &out.SpaceStorageSettings + *out = new(SpaceStorageSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.StudioWebPortal != nil { + in, out := &in.StudioWebPortal, &out.StudioWebPortal + *out = new(string) + **out = **in + } + if in.TensorBoardAppSettings != nil { + in, out := &in.TensorBoardAppSettings, &out.TensorBoardAppSettings + *out = new(TensorBoardAppSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultUserSettingsObservation. +func (in *DefaultUserSettingsObservation) DeepCopy() *DefaultUserSettingsObservation { + if in == nil { + return nil + } + out := new(DefaultUserSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultUserSettingsParameters) DeepCopyInto(out *DefaultUserSettingsParameters) { + *out = *in + if in.CanvasAppSettings != nil { + in, out := &in.CanvasAppSettings, &out.CanvasAppSettings + *out = new(CanvasAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.CodeEditorAppSettings != nil { + in, out := &in.CodeEditorAppSettings, &out.CodeEditorAppSettings + *out = new(CodeEditorAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomFileSystemConfig != nil { + in, out := &in.CustomFileSystemConfig, &out.CustomFileSystemConfig + *out = make([]CustomFileSystemConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomPosixUserConfig != nil { + in, out := &in.CustomPosixUserConfig, &out.CustomPosixUserConfig + *out = new(CustomPosixUserConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultLandingURI != nil { + in, out := &in.DefaultLandingURI, &out.DefaultLandingURI + *out = new(string) + **out = **in + } + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.ExecutionRoleRef != nil { + in, out := &in.ExecutionRoleRef, &out.ExecutionRoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleSelector != nil { + in, out := &in.ExecutionRoleSelector, &out.ExecutionRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.JupyterLabAppSettings != nil { + in, out := &in.JupyterLabAppSettings, &out.JupyterLabAppSettings + *out = new(JupyterLabAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.JupyterServerAppSettings != nil { + in, out := &in.JupyterServerAppSettings, &out.JupyterServerAppSettings + *out = new(DefaultUserSettingsJupyterServerAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.KernelGatewayAppSettings != nil { + in, out := &in.KernelGatewayAppSettings, &out.KernelGatewayAppSettings + *out = new(DefaultUserSettingsKernelGatewayAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.RSessionAppSettings != nil { + in, out := &in.RSessionAppSettings, &out.RSessionAppSettings + *out = new(RSessionAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.RStudioServerProAppSettings != nil { + in, out := &in.RStudioServerProAppSettings, &out.RStudioServerProAppSettings + *out = new(RStudioServerProAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SharingSettings != nil { + in, out := &in.SharingSettings, &out.SharingSettings + *out = new(SharingSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.SpaceStorageSettings != nil { + in, out := &in.SpaceStorageSettings, &out.SpaceStorageSettings + *out = new(SpaceStorageSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.StudioWebPortal != nil { + in, out := &in.StudioWebPortal, &out.StudioWebPortal + *out = new(string) + **out = **in + } + if in.TensorBoardAppSettings != nil { + in, out := &in.TensorBoardAppSettings, &out.TensorBoardAppSettings + *out = new(TensorBoardAppSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultUserSettingsParameters. +func (in *DefaultUserSettingsParameters) DeepCopy() *DefaultUserSettingsParameters { + if in == nil { + return nil + } + out := new(DefaultUserSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigInitParameters) DeepCopyInto(out *DeploymentConfigInitParameters) { + *out = *in + if in.AutoRollbackConfiguration != nil { + in, out := &in.AutoRollbackConfiguration, &out.AutoRollbackConfiguration + *out = new(AutoRollbackConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BlueGreenUpdatePolicy != nil { + in, out := &in.BlueGreenUpdatePolicy, &out.BlueGreenUpdatePolicy + *out = new(BlueGreenUpdatePolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RollingUpdatePolicy != nil { + in, out := &in.RollingUpdatePolicy, &out.RollingUpdatePolicy + *out = new(RollingUpdatePolicyInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigInitParameters. +func (in *DeploymentConfigInitParameters) DeepCopy() *DeploymentConfigInitParameters { + if in == nil { + return nil + } + out := new(DeploymentConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigObservation) DeepCopyInto(out *DeploymentConfigObservation) { + *out = *in + if in.AutoRollbackConfiguration != nil { + in, out := &in.AutoRollbackConfiguration, &out.AutoRollbackConfiguration + *out = new(AutoRollbackConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.BlueGreenUpdatePolicy != nil { + in, out := &in.BlueGreenUpdatePolicy, &out.BlueGreenUpdatePolicy + *out = new(BlueGreenUpdatePolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.RollingUpdatePolicy != nil { + in, out := &in.RollingUpdatePolicy, &out.RollingUpdatePolicy + *out = new(RollingUpdatePolicyObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigObservation. +func (in *DeploymentConfigObservation) DeepCopy() *DeploymentConfigObservation { + if in == nil { + return nil + } + out := new(DeploymentConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigParameters) DeepCopyInto(out *DeploymentConfigParameters) { + *out = *in + if in.AutoRollbackConfiguration != nil { + in, out := &in.AutoRollbackConfiguration, &out.AutoRollbackConfiguration + *out = new(AutoRollbackConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.BlueGreenUpdatePolicy != nil { + in, out := &in.BlueGreenUpdatePolicy, &out.BlueGreenUpdatePolicy + *out = new(BlueGreenUpdatePolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.RollingUpdatePolicy != nil { + in, out := &in.RollingUpdatePolicy, &out.RollingUpdatePolicy + *out = new(RollingUpdatePolicyParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigParameters. +func (in *DeploymentConfigParameters) DeepCopy() *DeploymentConfigParameters { + if in == nil { + return nil + } + out := new(DeploymentConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Device) DeepCopyInto(out *Device) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device. +func (in *Device) DeepCopy() *Device { + if in == nil { + return nil + } + out := new(Device) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Device) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceDeviceInitParameters) DeepCopyInto(out *DeviceDeviceInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.IotThingName != nil { + in, out := &in.IotThingName, &out.IotThingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceDeviceInitParameters. +func (in *DeviceDeviceInitParameters) DeepCopy() *DeviceDeviceInitParameters { + if in == nil { + return nil + } + out := new(DeviceDeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceDeviceObservation) DeepCopyInto(out *DeviceDeviceObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.IotThingName != nil { + in, out := &in.IotThingName, &out.IotThingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceDeviceObservation. +func (in *DeviceDeviceObservation) DeepCopy() *DeviceDeviceObservation { + if in == nil { + return nil + } + out := new(DeviceDeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceDeviceParameters) DeepCopyInto(out *DeviceDeviceParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.IotThingName != nil { + in, out := &in.IotThingName, &out.IotThingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceDeviceParameters. +func (in *DeviceDeviceParameters) DeepCopy() *DeviceDeviceParameters { + if in == nil { + return nil + } + out := new(DeviceDeviceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceFleet) DeepCopyInto(out *DeviceFleet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceFleet. +func (in *DeviceFleet) DeepCopy() *DeviceFleet { + if in == nil { + return nil + } + out := new(DeviceFleet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeviceFleet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceFleetInitParameters) DeepCopyInto(out *DeviceFleetInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EnableIotRoleAlias != nil { + in, out := &in.EnableIotRoleAlias, &out.EnableIotRoleAlias + *out = new(bool) + **out = **in + } + if in.OutputConfig != nil { + in, out := &in.OutputConfig, &out.OutputConfig + *out = new(OutputConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceFleetInitParameters. +func (in *DeviceFleetInitParameters) DeepCopy() *DeviceFleetInitParameters { + if in == nil { + return nil + } + out := new(DeviceFleetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceFleetList) DeepCopyInto(out *DeviceFleetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeviceFleet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceFleetList. +func (in *DeviceFleetList) DeepCopy() *DeviceFleetList { + if in == nil { + return nil + } + out := new(DeviceFleetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeviceFleetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceFleetObservation) DeepCopyInto(out *DeviceFleetObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EnableIotRoleAlias != nil { + in, out := &in.EnableIotRoleAlias, &out.EnableIotRoleAlias + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IotRoleAlias != nil { + in, out := &in.IotRoleAlias, &out.IotRoleAlias + *out = new(string) + **out = **in + } + if in.OutputConfig != nil { + in, out := &in.OutputConfig, &out.OutputConfig + *out = new(OutputConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceFleetObservation. +func (in *DeviceFleetObservation) DeepCopy() *DeviceFleetObservation { + if in == nil { + return nil + } + out := new(DeviceFleetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceFleetParameters) DeepCopyInto(out *DeviceFleetParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EnableIotRoleAlias != nil { + in, out := &in.EnableIotRoleAlias, &out.EnableIotRoleAlias + *out = new(bool) + **out = **in + } + if in.OutputConfig != nil { + in, out := &in.OutputConfig, &out.OutputConfig + *out = new(OutputConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceFleetParameters. +func (in *DeviceFleetParameters) DeepCopy() *DeviceFleetParameters { + if in == nil { + return nil + } + out := new(DeviceFleetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceFleetSpec) DeepCopyInto(out *DeviceFleetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceFleetSpec. +func (in *DeviceFleetSpec) DeepCopy() *DeviceFleetSpec { + if in == nil { + return nil + } + out := new(DeviceFleetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceFleetStatus) DeepCopyInto(out *DeviceFleetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceFleetStatus. +func (in *DeviceFleetStatus) DeepCopy() *DeviceFleetStatus { + if in == nil { + return nil + } + out := new(DeviceFleetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceInitParameters) DeepCopyInto(out *DeviceInitParameters) { + *out = *in + if in.Device != nil { + in, out := &in.Device, &out.Device + *out = new(DeviceDeviceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DeviceFleetName != nil { + in, out := &in.DeviceFleetName, &out.DeviceFleetName + *out = new(string) + **out = **in + } + if in.DeviceFleetNameRef != nil { + in, out := &in.DeviceFleetNameRef, &out.DeviceFleetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DeviceFleetNameSelector != nil { + in, out := &in.DeviceFleetNameSelector, &out.DeviceFleetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceInitParameters. +func (in *DeviceInitParameters) DeepCopy() *DeviceInitParameters { + if in == nil { + return nil + } + out := new(DeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceList) DeepCopyInto(out *DeviceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Device, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceList. +func (in *DeviceList) DeepCopy() *DeviceList { + if in == nil { + return nil + } + out := new(DeviceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeviceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceObservation) DeepCopyInto(out *DeviceObservation) { + *out = *in + if in.AgentVersion != nil { + in, out := &in.AgentVersion, &out.AgentVersion + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Device != nil { + in, out := &in.Device, &out.Device + *out = new(DeviceDeviceObservation) + (*in).DeepCopyInto(*out) + } + if in.DeviceFleetName != nil { + in, out := &in.DeviceFleetName, &out.DeviceFleetName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceObservation. +func (in *DeviceObservation) DeepCopy() *DeviceObservation { + if in == nil { + return nil + } + out := new(DeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceParameters) DeepCopyInto(out *DeviceParameters) { + *out = *in + if in.Device != nil { + in, out := &in.Device, &out.Device + *out = new(DeviceDeviceParameters) + (*in).DeepCopyInto(*out) + } + if in.DeviceFleetName != nil { + in, out := &in.DeviceFleetName, &out.DeviceFleetName + *out = new(string) + **out = **in + } + if in.DeviceFleetNameRef != nil { + in, out := &in.DeviceFleetNameRef, &out.DeviceFleetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DeviceFleetNameSelector != nil { + in, out := &in.DeviceFleetNameSelector, &out.DeviceFleetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceParameters. +func (in *DeviceParameters) DeepCopy() *DeviceParameters { + if in == nil { + return nil + } + out := new(DeviceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceSpec) DeepCopyInto(out *DeviceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceSpec. +func (in *DeviceSpec) DeepCopy() *DeviceSpec { + if in == nil { + return nil + } + out := new(DeviceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceStatus) DeepCopyInto(out *DeviceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceStatus. +func (in *DeviceStatus) DeepCopy() *DeviceStatus { + if in == nil { + return nil + } + out := new(DeviceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectDeploySettingsInitParameters) DeepCopyInto(out *DirectDeploySettingsInitParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectDeploySettingsInitParameters. +func (in *DirectDeploySettingsInitParameters) DeepCopy() *DirectDeploySettingsInitParameters { + if in == nil { + return nil + } + out := new(DirectDeploySettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectDeploySettingsObservation) DeepCopyInto(out *DirectDeploySettingsObservation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectDeploySettingsObservation. +func (in *DirectDeploySettingsObservation) DeepCopy() *DirectDeploySettingsObservation { + if in == nil { + return nil + } + out := new(DirectDeploySettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectDeploySettingsParameters) DeepCopyInto(out *DirectDeploySettingsParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectDeploySettingsParameters. +func (in *DirectDeploySettingsParameters) DeepCopy() *DirectDeploySettingsParameters { + if in == nil { + return nil + } + out := new(DirectDeploySettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Domain) DeepCopyInto(out *Domain) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Domain. +func (in *Domain) DeepCopy() *Domain { + if in == nil { + return nil + } + out := new(Domain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Domain) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainInitParameters) DeepCopyInto(out *DomainInitParameters) { + *out = *in + if in.AppNetworkAccessType != nil { + in, out := &in.AppNetworkAccessType, &out.AppNetworkAccessType + *out = new(string) + **out = **in + } + if in.AppSecurityGroupManagement != nil { + in, out := &in.AppSecurityGroupManagement, &out.AppSecurityGroupManagement + *out = new(string) + **out = **in + } + if in.AuthMode != nil { + in, out := &in.AuthMode, &out.AuthMode + *out = new(string) + **out = **in + } + if in.DefaultSpaceSettings != nil { + in, out := &in.DefaultSpaceSettings, &out.DefaultSpaceSettings + *out = new(DefaultSpaceSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultUserSettings != nil { + in, out := &in.DefaultUserSettings, &out.DefaultUserSettings + *out = new(DefaultUserSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainSettings != nil { + in, out := &in.DomainSettings, &out.DomainSettings + *out = new(DomainSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(RetentionPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainInitParameters. +func (in *DomainInitParameters) DeepCopy() *DomainInitParameters { + if in == nil { + return nil + } + out := new(DomainInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainList) DeepCopyInto(out *DomainList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Domain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainList. +func (in *DomainList) DeepCopy() *DomainList { + if in == nil { + return nil + } + out := new(DomainList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DomainList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainObservation) DeepCopyInto(out *DomainObservation) { + *out = *in + if in.AppNetworkAccessType != nil { + in, out := &in.AppNetworkAccessType, &out.AppNetworkAccessType + *out = new(string) + **out = **in + } + if in.AppSecurityGroupManagement != nil { + in, out := &in.AppSecurityGroupManagement, &out.AppSecurityGroupManagement + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AuthMode != nil { + in, out := &in.AuthMode, &out.AuthMode + *out = new(string) + **out = **in + } + if in.DefaultSpaceSettings != nil { + in, out := &in.DefaultSpaceSettings, &out.DefaultSpaceSettings + *out = new(DefaultSpaceSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultUserSettings != nil { + in, out := &in.DefaultUserSettings, &out.DefaultUserSettings + *out = new(DefaultUserSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainSettings != nil { + in, out := &in.DomainSettings, &out.DomainSettings + *out = new(DomainSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.HomeEFSFileSystemID != nil { + in, out := &in.HomeEFSFileSystemID, &out.HomeEFSFileSystemID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(RetentionPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIDForDomainBoundary != nil { + in, out := &in.SecurityGroupIDForDomainBoundary, &out.SecurityGroupIDForDomainBoundary + *out = new(string) + **out = **in + } + if in.SingleSignOnApplicationArn != nil { + in, out := &in.SingleSignOnApplicationArn, &out.SingleSignOnApplicationArn + *out = new(string) + **out = **in + } + if in.SingleSignOnManagedApplicationInstanceID != nil { + in, out := &in.SingleSignOnManagedApplicationInstanceID, &out.SingleSignOnManagedApplicationInstanceID + *out = new(string) + **out = **in + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainObservation. +func (in *DomainObservation) DeepCopy() *DomainObservation { + if in == nil { + return nil + } + out := new(DomainObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainParameters) DeepCopyInto(out *DomainParameters) { + *out = *in + if in.AppNetworkAccessType != nil { + in, out := &in.AppNetworkAccessType, &out.AppNetworkAccessType + *out = new(string) + **out = **in + } + if in.AppSecurityGroupManagement != nil { + in, out := &in.AppSecurityGroupManagement, &out.AppSecurityGroupManagement + *out = new(string) + **out = **in + } + if in.AuthMode != nil { + in, out := &in.AuthMode, &out.AuthMode + *out = new(string) + **out = **in + } + if in.DefaultSpaceSettings != nil { + in, out := &in.DefaultSpaceSettings, &out.DefaultSpaceSettings + *out = new(DefaultSpaceSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultUserSettings != nil { + in, out := &in.DefaultUserSettings, &out.DefaultUserSettings + *out = new(DefaultUserSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainSettings != nil { + in, out := &in.DomainSettings, &out.DomainSettings + *out = new(DomainSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(RetentionPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainParameters. +func (in *DomainParameters) DeepCopy() *DomainParameters { + if in == nil { + return nil + } + out := new(DomainParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSettingsInitParameters) DeepCopyInto(out *DomainSettingsInitParameters) { + *out = *in + if in.ExecutionRoleIdentityConfig != nil { + in, out := &in.ExecutionRoleIdentityConfig, &out.ExecutionRoleIdentityConfig + *out = new(string) + **out = **in + } + if in.RStudioServerProDomainSettings != nil { + in, out := &in.RStudioServerProDomainSettings, &out.RStudioServerProDomainSettings + *out = new(RStudioServerProDomainSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSettingsInitParameters. +func (in *DomainSettingsInitParameters) DeepCopy() *DomainSettingsInitParameters { + if in == nil { + return nil + } + out := new(DomainSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSettingsObservation) DeepCopyInto(out *DomainSettingsObservation) { + *out = *in + if in.ExecutionRoleIdentityConfig != nil { + in, out := &in.ExecutionRoleIdentityConfig, &out.ExecutionRoleIdentityConfig + *out = new(string) + **out = **in + } + if in.RStudioServerProDomainSettings != nil { + in, out := &in.RStudioServerProDomainSettings, &out.RStudioServerProDomainSettings + *out = new(RStudioServerProDomainSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSettingsObservation. +func (in *DomainSettingsObservation) DeepCopy() *DomainSettingsObservation { + if in == nil { + return nil + } + out := new(DomainSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSettingsParameters) DeepCopyInto(out *DomainSettingsParameters) { + *out = *in + if in.ExecutionRoleIdentityConfig != nil { + in, out := &in.ExecutionRoleIdentityConfig, &out.ExecutionRoleIdentityConfig + *out = new(string) + **out = **in + } + if in.RStudioServerProDomainSettings != nil { + in, out := &in.RStudioServerProDomainSettings, &out.RStudioServerProDomainSettings + *out = new(RStudioServerProDomainSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSettingsParameters. +func (in *DomainSettingsParameters) DeepCopy() *DomainSettingsParameters { + if in == nil { + return nil + } + out := new(DomainSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSpec) DeepCopyInto(out *DomainSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSpec. +func (in *DomainSpec) DeepCopy() *DomainSpec { + if in == nil { + return nil + } + out := new(DomainSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainStatus) DeepCopyInto(out *DomainStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainStatus. +func (in *DomainStatus) DeepCopy() *DomainStatus { + if in == nil { + return nil + } + out := new(DomainStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSStorageSettingsInitParameters) DeepCopyInto(out *EBSStorageSettingsInitParameters) { + *out = *in + if in.EBSVolumeSizeInGb != nil { + in, out := &in.EBSVolumeSizeInGb, &out.EBSVolumeSizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSStorageSettingsInitParameters. +func (in *EBSStorageSettingsInitParameters) DeepCopy() *EBSStorageSettingsInitParameters { + if in == nil { + return nil + } + out := new(EBSStorageSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSStorageSettingsObservation) DeepCopyInto(out *EBSStorageSettingsObservation) { + *out = *in + if in.EBSVolumeSizeInGb != nil { + in, out := &in.EBSVolumeSizeInGb, &out.EBSVolumeSizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSStorageSettingsObservation. +func (in *EBSStorageSettingsObservation) DeepCopy() *EBSStorageSettingsObservation { + if in == nil { + return nil + } + out := new(EBSStorageSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSStorageSettingsParameters) DeepCopyInto(out *EBSStorageSettingsParameters) { + *out = *in + if in.EBSVolumeSizeInGb != nil { + in, out := &in.EBSVolumeSizeInGb, &out.EBSVolumeSizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSStorageSettingsParameters. +func (in *EBSStorageSettingsParameters) DeepCopy() *EBSStorageSettingsParameters { + if in == nil { + return nil + } + out := new(EBSStorageSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EFSFileSystemConfigInitParameters) DeepCopyInto(out *EFSFileSystemConfigInitParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.FileSystemPath != nil { + in, out := &in.FileSystemPath, &out.FileSystemPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EFSFileSystemConfigInitParameters. +func (in *EFSFileSystemConfigInitParameters) DeepCopy() *EFSFileSystemConfigInitParameters { + if in == nil { + return nil + } + out := new(EFSFileSystemConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EFSFileSystemConfigObservation) DeepCopyInto(out *EFSFileSystemConfigObservation) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.FileSystemPath != nil { + in, out := &in.FileSystemPath, &out.FileSystemPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EFSFileSystemConfigObservation. +func (in *EFSFileSystemConfigObservation) DeepCopy() *EFSFileSystemConfigObservation { + if in == nil { + return nil + } + out := new(EFSFileSystemConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EFSFileSystemConfigParameters) DeepCopyInto(out *EFSFileSystemConfigParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.FileSystemPath != nil { + in, out := &in.FileSystemPath, &out.FileSystemPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EFSFileSystemConfigParameters. +func (in *EFSFileSystemConfigParameters) DeepCopy() *EFSFileSystemConfigParameters { + if in == nil { + return nil + } + out := new(EFSFileSystemConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EFSFileSystemInitParameters) DeepCopyInto(out *EFSFileSystemInitParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EFSFileSystemInitParameters. +func (in *EFSFileSystemInitParameters) DeepCopy() *EFSFileSystemInitParameters { + if in == nil { + return nil + } + out := new(EFSFileSystemInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EFSFileSystemObservation) DeepCopyInto(out *EFSFileSystemObservation) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EFSFileSystemObservation. +func (in *EFSFileSystemObservation) DeepCopy() *EFSFileSystemObservation { + if in == nil { + return nil + } + out := new(EFSFileSystemObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EFSFileSystemParameters) DeepCopyInto(out *EFSFileSystemParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EFSFileSystemParameters. +func (in *EFSFileSystemParameters) DeepCopy() *EFSFileSystemParameters { + if in == nil { + return nil + } + out := new(EFSFileSystemParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Endpoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConfiguration) DeepCopyInto(out *EndpointConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConfiguration. +func (in *EndpointConfiguration) DeepCopy() *EndpointConfiguration { + if in == nil { + return nil + } + out := new(EndpointConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConfigurationInitParameters) DeepCopyInto(out *EndpointConfigurationInitParameters) { + *out = *in + if in.AsyncInferenceConfig != nil { + in, out := &in.AsyncInferenceConfig, &out.AsyncInferenceConfig + *out = new(AsyncInferenceConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DataCaptureConfig != nil { + in, out := &in.DataCaptureConfig, &out.DataCaptureConfig + *out = new(DataCaptureConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ProductionVariants != nil { + in, out := &in.ProductionVariants, &out.ProductionVariants + *out = make([]ProductionVariantsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShadowProductionVariants != nil { + in, out := &in.ShadowProductionVariants, &out.ShadowProductionVariants + *out = make([]ShadowProductionVariantsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConfigurationInitParameters. +func (in *EndpointConfigurationInitParameters) DeepCopy() *EndpointConfigurationInitParameters { + if in == nil { + return nil + } + out := new(EndpointConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConfigurationList) DeepCopyInto(out *EndpointConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EndpointConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConfigurationList. +func (in *EndpointConfigurationList) DeepCopy() *EndpointConfigurationList { + if in == nil { + return nil + } + out := new(EndpointConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConfigurationObservation) DeepCopyInto(out *EndpointConfigurationObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AsyncInferenceConfig != nil { + in, out := &in.AsyncInferenceConfig, &out.AsyncInferenceConfig + *out = new(AsyncInferenceConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.DataCaptureConfig != nil { + in, out := &in.DataCaptureConfig, &out.DataCaptureConfig + *out = new(DataCaptureConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.ProductionVariants != nil { + in, out := &in.ProductionVariants, &out.ProductionVariants + *out = make([]ProductionVariantsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShadowProductionVariants != nil { + in, out := &in.ShadowProductionVariants, &out.ShadowProductionVariants + *out = make([]ShadowProductionVariantsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConfigurationObservation. +func (in *EndpointConfigurationObservation) DeepCopy() *EndpointConfigurationObservation { + if in == nil { + return nil + } + out := new(EndpointConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConfigurationParameters) DeepCopyInto(out *EndpointConfigurationParameters) { + *out = *in + if in.AsyncInferenceConfig != nil { + in, out := &in.AsyncInferenceConfig, &out.AsyncInferenceConfig + *out = new(AsyncInferenceConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.DataCaptureConfig != nil { + in, out := &in.DataCaptureConfig, &out.DataCaptureConfig + *out = new(DataCaptureConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ProductionVariants != nil { + in, out := &in.ProductionVariants, &out.ProductionVariants + *out = make([]ProductionVariantsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ShadowProductionVariants != nil { + in, out := &in.ShadowProductionVariants, &out.ShadowProductionVariants + *out = make([]ShadowProductionVariantsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConfigurationParameters. +func (in *EndpointConfigurationParameters) DeepCopy() *EndpointConfigurationParameters { + if in == nil { + return nil + } + out := new(EndpointConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConfigurationSpec) DeepCopyInto(out *EndpointConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConfigurationSpec. +func (in *EndpointConfigurationSpec) DeepCopy() *EndpointConfigurationSpec { + if in == nil { + return nil + } + out := new(EndpointConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConfigurationStatus) DeepCopyInto(out *EndpointConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConfigurationStatus. +func (in *EndpointConfigurationStatus) DeepCopy() *EndpointConfigurationStatus { + if in == nil { + return nil + } + out := new(EndpointConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointInitParameters) DeepCopyInto(out *EndpointInitParameters) { + *out = *in + if in.DeploymentConfig != nil { + in, out := &in.DeploymentConfig, &out.DeploymentConfig + *out = new(DeploymentConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EndpointConfigName != nil { + in, out := &in.EndpointConfigName, &out.EndpointConfigName + *out = new(string) + **out = **in + } + if in.EndpointConfigNameRef != nil { + in, out := &in.EndpointConfigNameRef, &out.EndpointConfigNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EndpointConfigNameSelector != nil { + in, out := &in.EndpointConfigNameSelector, &out.EndpointConfigNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointInitParameters. +func (in *EndpointInitParameters) DeepCopy() *EndpointInitParameters { + if in == nil { + return nil + } + out := new(EndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointList) DeepCopyInto(out *EndpointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointList. +func (in *EndpointList) DeepCopy() *EndpointList { + if in == nil { + return nil + } + out := new(EndpointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointObservation) DeepCopyInto(out *EndpointObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DeploymentConfig != nil { + in, out := &in.DeploymentConfig, &out.DeploymentConfig + *out = new(DeploymentConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.EndpointConfigName != nil { + in, out := &in.EndpointConfigName, &out.EndpointConfigName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointObservation. +func (in *EndpointObservation) DeepCopy() *EndpointObservation { + if in == nil { + return nil + } + out := new(EndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointParameters) DeepCopyInto(out *EndpointParameters) { + *out = *in + if in.DeploymentConfig != nil { + in, out := &in.DeploymentConfig, &out.DeploymentConfig + *out = new(DeploymentConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.EndpointConfigName != nil { + in, out := &in.EndpointConfigName, &out.EndpointConfigName + *out = new(string) + **out = **in + } + if in.EndpointConfigNameRef != nil { + in, out := &in.EndpointConfigNameRef, &out.EndpointConfigNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EndpointConfigNameSelector != nil { + in, out := &in.EndpointConfigNameSelector, &out.EndpointConfigNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointParameters. +func (in *EndpointParameters) DeepCopy() *EndpointParameters { + if in == nil { + return nil + } + out := new(EndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSpec) DeepCopyInto(out *EndpointSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSpec. +func (in *EndpointSpec) DeepCopy() *EndpointSpec { + if in == nil { + return nil + } + out := new(EndpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointStatus) DeepCopyInto(out *EndpointStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointStatus. +func (in *EndpointStatus) DeepCopy() *EndpointStatus { + if in == nil { + return nil + } + out := new(EndpointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureDefinitionInitParameters) DeepCopyInto(out *FeatureDefinitionInitParameters) { + *out = *in + if in.FeatureName != nil { + in, out := &in.FeatureName, &out.FeatureName + *out = new(string) + **out = **in + } + if in.FeatureType != nil { + in, out := &in.FeatureType, &out.FeatureType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureDefinitionInitParameters. +func (in *FeatureDefinitionInitParameters) DeepCopy() *FeatureDefinitionInitParameters { + if in == nil { + return nil + } + out := new(FeatureDefinitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureDefinitionObservation) DeepCopyInto(out *FeatureDefinitionObservation) { + *out = *in + if in.FeatureName != nil { + in, out := &in.FeatureName, &out.FeatureName + *out = new(string) + **out = **in + } + if in.FeatureType != nil { + in, out := &in.FeatureType, &out.FeatureType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureDefinitionObservation. +func (in *FeatureDefinitionObservation) DeepCopy() *FeatureDefinitionObservation { + if in == nil { + return nil + } + out := new(FeatureDefinitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureDefinitionParameters) DeepCopyInto(out *FeatureDefinitionParameters) { + *out = *in + if in.FeatureName != nil { + in, out := &in.FeatureName, &out.FeatureName + *out = new(string) + **out = **in + } + if in.FeatureType != nil { + in, out := &in.FeatureType, &out.FeatureType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureDefinitionParameters. +func (in *FeatureDefinitionParameters) DeepCopy() *FeatureDefinitionParameters { + if in == nil { + return nil + } + out := new(FeatureDefinitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGroup) DeepCopyInto(out *FeatureGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGroup. +func (in *FeatureGroup) DeepCopy() *FeatureGroup { + if in == nil { + return nil + } + out := new(FeatureGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FeatureGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGroupInitParameters) DeepCopyInto(out *FeatureGroupInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EventTimeFeatureName != nil { + in, out := &in.EventTimeFeatureName, &out.EventTimeFeatureName + *out = new(string) + **out = **in + } + if in.FeatureDefinition != nil { + in, out := &in.FeatureDefinition, &out.FeatureDefinition + *out = make([]FeatureDefinitionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OfflineStoreConfig != nil { + in, out := &in.OfflineStoreConfig, &out.OfflineStoreConfig + *out = new(OfflineStoreConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OnlineStoreConfig != nil { + in, out := &in.OnlineStoreConfig, &out.OnlineStoreConfig + *out = new(OnlineStoreConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RecordIdentifierFeatureName != nil { + in, out := &in.RecordIdentifierFeatureName, &out.RecordIdentifierFeatureName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGroupInitParameters. +func (in *FeatureGroupInitParameters) DeepCopy() *FeatureGroupInitParameters { + if in == nil { + return nil + } + out := new(FeatureGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGroupList) DeepCopyInto(out *FeatureGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FeatureGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGroupList. +func (in *FeatureGroupList) DeepCopy() *FeatureGroupList { + if in == nil { + return nil + } + out := new(FeatureGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FeatureGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGroupObservation) DeepCopyInto(out *FeatureGroupObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EventTimeFeatureName != nil { + in, out := &in.EventTimeFeatureName, &out.EventTimeFeatureName + *out = new(string) + **out = **in + } + if in.FeatureDefinition != nil { + in, out := &in.FeatureDefinition, &out.FeatureDefinition + *out = make([]FeatureDefinitionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.OfflineStoreConfig != nil { + in, out := &in.OfflineStoreConfig, &out.OfflineStoreConfig + *out = new(OfflineStoreConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.OnlineStoreConfig != nil { + in, out := &in.OnlineStoreConfig, &out.OnlineStoreConfig + *out = new(OnlineStoreConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.RecordIdentifierFeatureName != nil { + in, out := &in.RecordIdentifierFeatureName, &out.RecordIdentifierFeatureName + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGroupObservation. +func (in *FeatureGroupObservation) DeepCopy() *FeatureGroupObservation { + if in == nil { + return nil + } + out := new(FeatureGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGroupParameters) DeepCopyInto(out *FeatureGroupParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EventTimeFeatureName != nil { + in, out := &in.EventTimeFeatureName, &out.EventTimeFeatureName + *out = new(string) + **out = **in + } + if in.FeatureDefinition != nil { + in, out := &in.FeatureDefinition, &out.FeatureDefinition + *out = make([]FeatureDefinitionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OfflineStoreConfig != nil { + in, out := &in.OfflineStoreConfig, &out.OfflineStoreConfig + *out = new(OfflineStoreConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.OnlineStoreConfig != nil { + in, out := &in.OnlineStoreConfig, &out.OnlineStoreConfig + *out = new(OnlineStoreConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.RecordIdentifierFeatureName != nil { + in, out := &in.RecordIdentifierFeatureName, &out.RecordIdentifierFeatureName + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGroupParameters. +func (in *FeatureGroupParameters) DeepCopy() *FeatureGroupParameters { + if in == nil { + return nil + } + out := new(FeatureGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGroupSpec) DeepCopyInto(out *FeatureGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGroupSpec. +func (in *FeatureGroupSpec) DeepCopy() *FeatureGroupSpec { + if in == nil { + return nil + } + out := new(FeatureGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGroupStatus) DeepCopyInto(out *FeatureGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGroupStatus. +func (in *FeatureGroupStatus) DeepCopy() *FeatureGroupStatus { + if in == nil { + return nil + } + out := new(FeatureGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystemConfigInitParameters) DeepCopyInto(out *FileSystemConfigInitParameters) { + *out = *in + if in.DefaultGID != nil { + in, out := &in.DefaultGID, &out.DefaultGID + *out = new(float64) + **out = **in + } + if in.DefaultUID != nil { + in, out := &in.DefaultUID, &out.DefaultUID + *out = new(float64) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystemConfigInitParameters. +func (in *FileSystemConfigInitParameters) DeepCopy() *FileSystemConfigInitParameters { + if in == nil { + return nil + } + out := new(FileSystemConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystemConfigObservation) DeepCopyInto(out *FileSystemConfigObservation) { + *out = *in + if in.DefaultGID != nil { + in, out := &in.DefaultGID, &out.DefaultGID + *out = new(float64) + **out = **in + } + if in.DefaultUID != nil { + in, out := &in.DefaultUID, &out.DefaultUID + *out = new(float64) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystemConfigObservation. +func (in *FileSystemConfigObservation) DeepCopy() *FileSystemConfigObservation { + if in == nil { + return nil + } + out := new(FileSystemConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystemConfigParameters) DeepCopyInto(out *FileSystemConfigParameters) { + *out = *in + if in.DefaultGID != nil { + in, out := &in.DefaultGID, &out.DefaultGID + *out = new(float64) + **out = **in + } + if in.DefaultUID != nil { + in, out := &in.DefaultUID, &out.DefaultUID + *out = new(float64) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystemConfigParameters. +func (in *FileSystemConfigParameters) DeepCopy() *FileSystemConfigParameters { + if in == nil { + return nil + } + out := new(FileSystemConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitConfigInitParameters) DeepCopyInto(out *GitConfigInitParameters) { + *out = *in + if in.Branch != nil { + in, out := &in.Branch, &out.Branch + *out = new(string) + **out = **in + } + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } + if in.SecretArn != nil { + in, out := &in.SecretArn, &out.SecretArn + *out = new(string) + **out = **in + } + if in.SecretArnRef != nil { + in, out := &in.SecretArnRef, &out.SecretArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SecretArnSelector != nil { + in, out := &in.SecretArnSelector, &out.SecretArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitConfigInitParameters. +func (in *GitConfigInitParameters) DeepCopy() *GitConfigInitParameters { + if in == nil { + return nil + } + out := new(GitConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitConfigObservation) DeepCopyInto(out *GitConfigObservation) { + *out = *in + if in.Branch != nil { + in, out := &in.Branch, &out.Branch + *out = new(string) + **out = **in + } + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } + if in.SecretArn != nil { + in, out := &in.SecretArn, &out.SecretArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitConfigObservation. +func (in *GitConfigObservation) DeepCopy() *GitConfigObservation { + if in == nil { + return nil + } + out := new(GitConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitConfigParameters) DeepCopyInto(out *GitConfigParameters) { + *out = *in + if in.Branch != nil { + in, out := &in.Branch, &out.Branch + *out = new(string) + **out = **in + } + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } + if in.SecretArn != nil { + in, out := &in.SecretArn, &out.SecretArn + *out = new(string) + **out = **in + } + if in.SecretArnRef != nil { + in, out := &in.SecretArnRef, &out.SecretArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SecretArnSelector != nil { + in, out := &in.SecretArnSelector, &out.SecretArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitConfigParameters. +func (in *GitConfigParameters) DeepCopy() *GitConfigParameters { + if in == nil { + return nil + } + out := new(GitConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderOauthSettingsInitParameters) DeepCopyInto(out *IdentityProviderOauthSettingsInitParameters) { + *out = *in + if in.DataSourceName != nil { + in, out := &in.DataSourceName, &out.DataSourceName + *out = new(string) + **out = **in + } + if in.SecretArn != nil { + in, out := &in.SecretArn, &out.SecretArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderOauthSettingsInitParameters. +func (in *IdentityProviderOauthSettingsInitParameters) DeepCopy() *IdentityProviderOauthSettingsInitParameters { + if in == nil { + return nil + } + out := new(IdentityProviderOauthSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderOauthSettingsObservation) DeepCopyInto(out *IdentityProviderOauthSettingsObservation) { + *out = *in + if in.DataSourceName != nil { + in, out := &in.DataSourceName, &out.DataSourceName + *out = new(string) + **out = **in + } + if in.SecretArn != nil { + in, out := &in.SecretArn, &out.SecretArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderOauthSettingsObservation. +func (in *IdentityProviderOauthSettingsObservation) DeepCopy() *IdentityProviderOauthSettingsObservation { + if in == nil { + return nil + } + out := new(IdentityProviderOauthSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderOauthSettingsParameters) DeepCopyInto(out *IdentityProviderOauthSettingsParameters) { + *out = *in + if in.DataSourceName != nil { + in, out := &in.DataSourceName, &out.DataSourceName + *out = new(string) + **out = **in + } + if in.SecretArn != nil { + in, out := &in.SecretArn, &out.SecretArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderOauthSettingsParameters. +func (in *IdentityProviderOauthSettingsParameters) DeepCopy() *IdentityProviderOauthSettingsParameters { + if in == nil { + return nil + } + out := new(IdentityProviderOauthSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfigInitParameters) DeepCopyInto(out *ImageConfigInitParameters) { + *out = *in + if in.RepositoryAccessMode != nil { + in, out := &in.RepositoryAccessMode, &out.RepositoryAccessMode + *out = new(string) + **out = **in + } + if in.RepositoryAuthConfig != nil { + in, out := &in.RepositoryAuthConfig, &out.RepositoryAuthConfig + *out = new(RepositoryAuthConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfigInitParameters. +func (in *ImageConfigInitParameters) DeepCopy() *ImageConfigInitParameters { + if in == nil { + return nil + } + out := new(ImageConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfigObservation) DeepCopyInto(out *ImageConfigObservation) { + *out = *in + if in.RepositoryAccessMode != nil { + in, out := &in.RepositoryAccessMode, &out.RepositoryAccessMode + *out = new(string) + **out = **in + } + if in.RepositoryAuthConfig != nil { + in, out := &in.RepositoryAuthConfig, &out.RepositoryAuthConfig + *out = new(RepositoryAuthConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfigObservation. +func (in *ImageConfigObservation) DeepCopy() *ImageConfigObservation { + if in == nil { + return nil + } + out := new(ImageConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfigParameters) DeepCopyInto(out *ImageConfigParameters) { + *out = *in + if in.RepositoryAccessMode != nil { + in, out := &in.RepositoryAccessMode, &out.RepositoryAccessMode + *out = new(string) + **out = **in + } + if in.RepositoryAuthConfig != nil { + in, out := &in.RepositoryAuthConfig, &out.RepositoryAuthConfig + *out = new(RepositoryAuthConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfigParameters. +func (in *ImageConfigParameters) DeepCopy() *ImageConfigParameters { + if in == nil { + return nil + } + out := new(ImageConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfigRepositoryAuthConfigInitParameters) DeepCopyInto(out *ImageConfigRepositoryAuthConfigInitParameters) { + *out = *in + if in.RepositoryCredentialsProviderArn != nil { + in, out := &in.RepositoryCredentialsProviderArn, &out.RepositoryCredentialsProviderArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfigRepositoryAuthConfigInitParameters. +func (in *ImageConfigRepositoryAuthConfigInitParameters) DeepCopy() *ImageConfigRepositoryAuthConfigInitParameters { + if in == nil { + return nil + } + out := new(ImageConfigRepositoryAuthConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfigRepositoryAuthConfigObservation) DeepCopyInto(out *ImageConfigRepositoryAuthConfigObservation) { + *out = *in + if in.RepositoryCredentialsProviderArn != nil { + in, out := &in.RepositoryCredentialsProviderArn, &out.RepositoryCredentialsProviderArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfigRepositoryAuthConfigObservation. +func (in *ImageConfigRepositoryAuthConfigObservation) DeepCopy() *ImageConfigRepositoryAuthConfigObservation { + if in == nil { + return nil + } + out := new(ImageConfigRepositoryAuthConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfigRepositoryAuthConfigParameters) DeepCopyInto(out *ImageConfigRepositoryAuthConfigParameters) { + *out = *in + if in.RepositoryCredentialsProviderArn != nil { + in, out := &in.RepositoryCredentialsProviderArn, &out.RepositoryCredentialsProviderArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfigRepositoryAuthConfigParameters. +func (in *ImageConfigRepositoryAuthConfigParameters) DeepCopy() *ImageConfigRepositoryAuthConfigParameters { + if in == nil { + return nil + } + out := new(ImageConfigRepositoryAuthConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InferenceExecutionConfigInitParameters) DeepCopyInto(out *InferenceExecutionConfigInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InferenceExecutionConfigInitParameters. +func (in *InferenceExecutionConfigInitParameters) DeepCopy() *InferenceExecutionConfigInitParameters { + if in == nil { + return nil + } + out := new(InferenceExecutionConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InferenceExecutionConfigObservation) DeepCopyInto(out *InferenceExecutionConfigObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InferenceExecutionConfigObservation. +func (in *InferenceExecutionConfigObservation) DeepCopy() *InferenceExecutionConfigObservation { + if in == nil { + return nil + } + out := new(InferenceExecutionConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InferenceExecutionConfigParameters) DeepCopyInto(out *InferenceExecutionConfigParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InferenceExecutionConfigParameters. +func (in *InferenceExecutionConfigParameters) DeepCopy() *InferenceExecutionConfigParameters { + if in == nil { + return nil + } + out := new(InferenceExecutionConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMetadataServiceConfigurationInitParameters) DeepCopyInto(out *InstanceMetadataServiceConfigurationInitParameters) { + *out = *in + if in.MinimumInstanceMetadataServiceVersion != nil { + in, out := &in.MinimumInstanceMetadataServiceVersion, &out.MinimumInstanceMetadataServiceVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMetadataServiceConfigurationInitParameters. +func (in *InstanceMetadataServiceConfigurationInitParameters) DeepCopy() *InstanceMetadataServiceConfigurationInitParameters { + if in == nil { + return nil + } + out := new(InstanceMetadataServiceConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMetadataServiceConfigurationObservation) DeepCopyInto(out *InstanceMetadataServiceConfigurationObservation) { + *out = *in + if in.MinimumInstanceMetadataServiceVersion != nil { + in, out := &in.MinimumInstanceMetadataServiceVersion, &out.MinimumInstanceMetadataServiceVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMetadataServiceConfigurationObservation. +func (in *InstanceMetadataServiceConfigurationObservation) DeepCopy() *InstanceMetadataServiceConfigurationObservation { + if in == nil { + return nil + } + out := new(InstanceMetadataServiceConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMetadataServiceConfigurationParameters) DeepCopyInto(out *InstanceMetadataServiceConfigurationParameters) { + *out = *in + if in.MinimumInstanceMetadataServiceVersion != nil { + in, out := &in.MinimumInstanceMetadataServiceVersion, &out.MinimumInstanceMetadataServiceVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMetadataServiceConfigurationParameters. +func (in *InstanceMetadataServiceConfigurationParameters) DeepCopy() *InstanceMetadataServiceConfigurationParameters { + if in == nil { + return nil + } + out := new(InstanceMetadataServiceConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabAppSettingsCodeRepositoryInitParameters) DeepCopyInto(out *JupyterLabAppSettingsCodeRepositoryInitParameters) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabAppSettingsCodeRepositoryInitParameters. +func (in *JupyterLabAppSettingsCodeRepositoryInitParameters) DeepCopy() *JupyterLabAppSettingsCodeRepositoryInitParameters { + if in == nil { + return nil + } + out := new(JupyterLabAppSettingsCodeRepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabAppSettingsCodeRepositoryObservation) DeepCopyInto(out *JupyterLabAppSettingsCodeRepositoryObservation) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabAppSettingsCodeRepositoryObservation. +func (in *JupyterLabAppSettingsCodeRepositoryObservation) DeepCopy() *JupyterLabAppSettingsCodeRepositoryObservation { + if in == nil { + return nil + } + out := new(JupyterLabAppSettingsCodeRepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabAppSettingsCodeRepositoryParameters) DeepCopyInto(out *JupyterLabAppSettingsCodeRepositoryParameters) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabAppSettingsCodeRepositoryParameters. +func (in *JupyterLabAppSettingsCodeRepositoryParameters) DeepCopy() *JupyterLabAppSettingsCodeRepositoryParameters { + if in == nil { + return nil + } + out := new(JupyterLabAppSettingsCodeRepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabAppSettingsCustomImageInitParameters) DeepCopyInto(out *JupyterLabAppSettingsCustomImageInitParameters) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabAppSettingsCustomImageInitParameters. +func (in *JupyterLabAppSettingsCustomImageInitParameters) DeepCopy() *JupyterLabAppSettingsCustomImageInitParameters { + if in == nil { + return nil + } + out := new(JupyterLabAppSettingsCustomImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabAppSettingsCustomImageObservation) DeepCopyInto(out *JupyterLabAppSettingsCustomImageObservation) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabAppSettingsCustomImageObservation. +func (in *JupyterLabAppSettingsCustomImageObservation) DeepCopy() *JupyterLabAppSettingsCustomImageObservation { + if in == nil { + return nil + } + out := new(JupyterLabAppSettingsCustomImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabAppSettingsCustomImageParameters) DeepCopyInto(out *JupyterLabAppSettingsCustomImageParameters) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabAppSettingsCustomImageParameters. +func (in *JupyterLabAppSettingsCustomImageParameters) DeepCopy() *JupyterLabAppSettingsCustomImageParameters { + if in == nil { + return nil + } + out := new(JupyterLabAppSettingsCustomImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabAppSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *JupyterLabAppSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabAppSettingsDefaultResourceSpecInitParameters. +func (in *JupyterLabAppSettingsDefaultResourceSpecInitParameters) DeepCopy() *JupyterLabAppSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(JupyterLabAppSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabAppSettingsDefaultResourceSpecObservation) DeepCopyInto(out *JupyterLabAppSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabAppSettingsDefaultResourceSpecObservation. +func (in *JupyterLabAppSettingsDefaultResourceSpecObservation) DeepCopy() *JupyterLabAppSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(JupyterLabAppSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabAppSettingsDefaultResourceSpecParameters) DeepCopyInto(out *JupyterLabAppSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabAppSettingsDefaultResourceSpecParameters. +func (in *JupyterLabAppSettingsDefaultResourceSpecParameters) DeepCopy() *JupyterLabAppSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(JupyterLabAppSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabAppSettingsInitParameters) DeepCopyInto(out *JupyterLabAppSettingsInitParameters) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]JupyterLabAppSettingsCodeRepositoryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]JupyterLabAppSettingsCustomImageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(JupyterLabAppSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabAppSettingsInitParameters. +func (in *JupyterLabAppSettingsInitParameters) DeepCopy() *JupyterLabAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(JupyterLabAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabAppSettingsObservation) DeepCopyInto(out *JupyterLabAppSettingsObservation) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]JupyterLabAppSettingsCodeRepositoryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]JupyterLabAppSettingsCustomImageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(JupyterLabAppSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabAppSettingsObservation. +func (in *JupyterLabAppSettingsObservation) DeepCopy() *JupyterLabAppSettingsObservation { + if in == nil { + return nil + } + out := new(JupyterLabAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabAppSettingsParameters) DeepCopyInto(out *JupyterLabAppSettingsParameters) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]JupyterLabAppSettingsCodeRepositoryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]JupyterLabAppSettingsCustomImageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(JupyterLabAppSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabAppSettingsParameters. +func (in *JupyterLabAppSettingsParameters) DeepCopy() *JupyterLabAppSettingsParameters { + if in == nil { + return nil + } + out := new(JupyterLabAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabImageConfigContainerConfigInitParameters) DeepCopyInto(out *JupyterLabImageConfigContainerConfigInitParameters) { + *out = *in + if in.ContainerArguments != nil { + in, out := &in.ContainerArguments, &out.ContainerArguments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContainerEntrypoint != nil { + in, out := &in.ContainerEntrypoint, &out.ContainerEntrypoint + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContainerEnvironmentVariables != nil { + in, out := &in.ContainerEnvironmentVariables, &out.ContainerEnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabImageConfigContainerConfigInitParameters. +func (in *JupyterLabImageConfigContainerConfigInitParameters) DeepCopy() *JupyterLabImageConfigContainerConfigInitParameters { + if in == nil { + return nil + } + out := new(JupyterLabImageConfigContainerConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabImageConfigContainerConfigObservation) DeepCopyInto(out *JupyterLabImageConfigContainerConfigObservation) { + *out = *in + if in.ContainerArguments != nil { + in, out := &in.ContainerArguments, &out.ContainerArguments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContainerEntrypoint != nil { + in, out := &in.ContainerEntrypoint, &out.ContainerEntrypoint + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContainerEnvironmentVariables != nil { + in, out := &in.ContainerEnvironmentVariables, &out.ContainerEnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabImageConfigContainerConfigObservation. +func (in *JupyterLabImageConfigContainerConfigObservation) DeepCopy() *JupyterLabImageConfigContainerConfigObservation { + if in == nil { + return nil + } + out := new(JupyterLabImageConfigContainerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabImageConfigContainerConfigParameters) DeepCopyInto(out *JupyterLabImageConfigContainerConfigParameters) { + *out = *in + if in.ContainerArguments != nil { + in, out := &in.ContainerArguments, &out.ContainerArguments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContainerEntrypoint != nil { + in, out := &in.ContainerEntrypoint, &out.ContainerEntrypoint + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContainerEnvironmentVariables != nil { + in, out := &in.ContainerEnvironmentVariables, &out.ContainerEnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabImageConfigContainerConfigParameters. +func (in *JupyterLabImageConfigContainerConfigParameters) DeepCopy() *JupyterLabImageConfigContainerConfigParameters { + if in == nil { + return nil + } + out := new(JupyterLabImageConfigContainerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabImageConfigFileSystemConfigInitParameters) DeepCopyInto(out *JupyterLabImageConfigFileSystemConfigInitParameters) { + *out = *in + if in.DefaultGID != nil { + in, out := &in.DefaultGID, &out.DefaultGID + *out = new(float64) + **out = **in + } + if in.DefaultUID != nil { + in, out := &in.DefaultUID, &out.DefaultUID + *out = new(float64) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabImageConfigFileSystemConfigInitParameters. +func (in *JupyterLabImageConfigFileSystemConfigInitParameters) DeepCopy() *JupyterLabImageConfigFileSystemConfigInitParameters { + if in == nil { + return nil + } + out := new(JupyterLabImageConfigFileSystemConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabImageConfigFileSystemConfigObservation) DeepCopyInto(out *JupyterLabImageConfigFileSystemConfigObservation) { + *out = *in + if in.DefaultGID != nil { + in, out := &in.DefaultGID, &out.DefaultGID + *out = new(float64) + **out = **in + } + if in.DefaultUID != nil { + in, out := &in.DefaultUID, &out.DefaultUID + *out = new(float64) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabImageConfigFileSystemConfigObservation. +func (in *JupyterLabImageConfigFileSystemConfigObservation) DeepCopy() *JupyterLabImageConfigFileSystemConfigObservation { + if in == nil { + return nil + } + out := new(JupyterLabImageConfigFileSystemConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabImageConfigFileSystemConfigParameters) DeepCopyInto(out *JupyterLabImageConfigFileSystemConfigParameters) { + *out = *in + if in.DefaultGID != nil { + in, out := &in.DefaultGID, &out.DefaultGID + *out = new(float64) + **out = **in + } + if in.DefaultUID != nil { + in, out := &in.DefaultUID, &out.DefaultUID + *out = new(float64) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabImageConfigFileSystemConfigParameters. +func (in *JupyterLabImageConfigFileSystemConfigParameters) DeepCopy() *JupyterLabImageConfigFileSystemConfigParameters { + if in == nil { + return nil + } + out := new(JupyterLabImageConfigFileSystemConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabImageConfigInitParameters) DeepCopyInto(out *JupyterLabImageConfigInitParameters) { + *out = *in + if in.ContainerConfig != nil { + in, out := &in.ContainerConfig, &out.ContainerConfig + *out = new(JupyterLabImageConfigContainerConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemConfig != nil { + in, out := &in.FileSystemConfig, &out.FileSystemConfig + *out = new(JupyterLabImageConfigFileSystemConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabImageConfigInitParameters. +func (in *JupyterLabImageConfigInitParameters) DeepCopy() *JupyterLabImageConfigInitParameters { + if in == nil { + return nil + } + out := new(JupyterLabImageConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabImageConfigObservation) DeepCopyInto(out *JupyterLabImageConfigObservation) { + *out = *in + if in.ContainerConfig != nil { + in, out := &in.ContainerConfig, &out.ContainerConfig + *out = new(JupyterLabImageConfigContainerConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.FileSystemConfig != nil { + in, out := &in.FileSystemConfig, &out.FileSystemConfig + *out = new(JupyterLabImageConfigFileSystemConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabImageConfigObservation. +func (in *JupyterLabImageConfigObservation) DeepCopy() *JupyterLabImageConfigObservation { + if in == nil { + return nil + } + out := new(JupyterLabImageConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterLabImageConfigParameters) DeepCopyInto(out *JupyterLabImageConfigParameters) { + *out = *in + if in.ContainerConfig != nil { + in, out := &in.ContainerConfig, &out.ContainerConfig + *out = new(JupyterLabImageConfigContainerConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemConfig != nil { + in, out := &in.FileSystemConfig, &out.FileSystemConfig + *out = new(JupyterLabImageConfigFileSystemConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterLabImageConfigParameters. +func (in *JupyterLabImageConfigParameters) DeepCopy() *JupyterLabImageConfigParameters { + if in == nil { + return nil + } + out := new(JupyterLabImageConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterServerAppSettingsCodeRepositoryInitParameters) DeepCopyInto(out *JupyterServerAppSettingsCodeRepositoryInitParameters) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterServerAppSettingsCodeRepositoryInitParameters. +func (in *JupyterServerAppSettingsCodeRepositoryInitParameters) DeepCopy() *JupyterServerAppSettingsCodeRepositoryInitParameters { + if in == nil { + return nil + } + out := new(JupyterServerAppSettingsCodeRepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterServerAppSettingsCodeRepositoryObservation) DeepCopyInto(out *JupyterServerAppSettingsCodeRepositoryObservation) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterServerAppSettingsCodeRepositoryObservation. +func (in *JupyterServerAppSettingsCodeRepositoryObservation) DeepCopy() *JupyterServerAppSettingsCodeRepositoryObservation { + if in == nil { + return nil + } + out := new(JupyterServerAppSettingsCodeRepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterServerAppSettingsCodeRepositoryParameters) DeepCopyInto(out *JupyterServerAppSettingsCodeRepositoryParameters) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterServerAppSettingsCodeRepositoryParameters. +func (in *JupyterServerAppSettingsCodeRepositoryParameters) DeepCopy() *JupyterServerAppSettingsCodeRepositoryParameters { + if in == nil { + return nil + } + out := new(JupyterServerAppSettingsCodeRepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterServerAppSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *JupyterServerAppSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterServerAppSettingsDefaultResourceSpecInitParameters. +func (in *JupyterServerAppSettingsDefaultResourceSpecInitParameters) DeepCopy() *JupyterServerAppSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(JupyterServerAppSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterServerAppSettingsDefaultResourceSpecObservation) DeepCopyInto(out *JupyterServerAppSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterServerAppSettingsDefaultResourceSpecObservation. +func (in *JupyterServerAppSettingsDefaultResourceSpecObservation) DeepCopy() *JupyterServerAppSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(JupyterServerAppSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterServerAppSettingsDefaultResourceSpecParameters) DeepCopyInto(out *JupyterServerAppSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterServerAppSettingsDefaultResourceSpecParameters. +func (in *JupyterServerAppSettingsDefaultResourceSpecParameters) DeepCopy() *JupyterServerAppSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(JupyterServerAppSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterServerAppSettingsInitParameters) DeepCopyInto(out *JupyterServerAppSettingsInitParameters) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]JupyterServerAppSettingsCodeRepositoryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(DefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterServerAppSettingsInitParameters. +func (in *JupyterServerAppSettingsInitParameters) DeepCopy() *JupyterServerAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(JupyterServerAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterServerAppSettingsObservation) DeepCopyInto(out *JupyterServerAppSettingsObservation) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]JupyterServerAppSettingsCodeRepositoryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(DefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterServerAppSettingsObservation. +func (in *JupyterServerAppSettingsObservation) DeepCopy() *JupyterServerAppSettingsObservation { + if in == nil { + return nil + } + out := new(JupyterServerAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JupyterServerAppSettingsParameters) DeepCopyInto(out *JupyterServerAppSettingsParameters) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]JupyterServerAppSettingsCodeRepositoryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(DefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JupyterServerAppSettingsParameters. +func (in *JupyterServerAppSettingsParameters) DeepCopy() *JupyterServerAppSettingsParameters { + if in == nil { + return nil + } + out := new(JupyterServerAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KendraSettingsInitParameters) DeepCopyInto(out *KendraSettingsInitParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KendraSettingsInitParameters. +func (in *KendraSettingsInitParameters) DeepCopy() *KendraSettingsInitParameters { + if in == nil { + return nil + } + out := new(KendraSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KendraSettingsObservation) DeepCopyInto(out *KendraSettingsObservation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KendraSettingsObservation. +func (in *KendraSettingsObservation) DeepCopy() *KendraSettingsObservation { + if in == nil { + return nil + } + out := new(KendraSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KendraSettingsParameters) DeepCopyInto(out *KendraSettingsParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KendraSettingsParameters. +func (in *KendraSettingsParameters) DeepCopy() *KendraSettingsParameters { + if in == nil { + return nil + } + out := new(KendraSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelGatewayAppSettingsCustomImageInitParameters) DeepCopyInto(out *KernelGatewayAppSettingsCustomImageInitParameters) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.AppImageConfigNameRef != nil { + in, out := &in.AppImageConfigNameRef, &out.AppImageConfigNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AppImageConfigNameSelector != nil { + in, out := &in.AppImageConfigNameSelector, &out.AppImageConfigNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageNameRef != nil { + in, out := &in.ImageNameRef, &out.ImageNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ImageNameSelector != nil { + in, out := &in.ImageNameSelector, &out.ImageNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelGatewayAppSettingsCustomImageInitParameters. +func (in *KernelGatewayAppSettingsCustomImageInitParameters) DeepCopy() *KernelGatewayAppSettingsCustomImageInitParameters { + if in == nil { + return nil + } + out := new(KernelGatewayAppSettingsCustomImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelGatewayAppSettingsCustomImageObservation) DeepCopyInto(out *KernelGatewayAppSettingsCustomImageObservation) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelGatewayAppSettingsCustomImageObservation. +func (in *KernelGatewayAppSettingsCustomImageObservation) DeepCopy() *KernelGatewayAppSettingsCustomImageObservation { + if in == nil { + return nil + } + out := new(KernelGatewayAppSettingsCustomImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelGatewayAppSettingsCustomImageParameters) DeepCopyInto(out *KernelGatewayAppSettingsCustomImageParameters) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.AppImageConfigNameRef != nil { + in, out := &in.AppImageConfigNameRef, &out.AppImageConfigNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AppImageConfigNameSelector != nil { + in, out := &in.AppImageConfigNameSelector, &out.AppImageConfigNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageNameRef != nil { + in, out := &in.ImageNameRef, &out.ImageNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ImageNameSelector != nil { + in, out := &in.ImageNameSelector, &out.ImageNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelGatewayAppSettingsCustomImageParameters. +func (in *KernelGatewayAppSettingsCustomImageParameters) DeepCopy() *KernelGatewayAppSettingsCustomImageParameters { + if in == nil { + return nil + } + out := new(KernelGatewayAppSettingsCustomImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelGatewayAppSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *KernelGatewayAppSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelGatewayAppSettingsDefaultResourceSpecInitParameters. +func (in *KernelGatewayAppSettingsDefaultResourceSpecInitParameters) DeepCopy() *KernelGatewayAppSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(KernelGatewayAppSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelGatewayAppSettingsDefaultResourceSpecObservation) DeepCopyInto(out *KernelGatewayAppSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelGatewayAppSettingsDefaultResourceSpecObservation. +func (in *KernelGatewayAppSettingsDefaultResourceSpecObservation) DeepCopy() *KernelGatewayAppSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(KernelGatewayAppSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelGatewayAppSettingsDefaultResourceSpecParameters) DeepCopyInto(out *KernelGatewayAppSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelGatewayAppSettingsDefaultResourceSpecParameters. +func (in *KernelGatewayAppSettingsDefaultResourceSpecParameters) DeepCopy() *KernelGatewayAppSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(KernelGatewayAppSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelGatewayAppSettingsInitParameters) DeepCopyInto(out *KernelGatewayAppSettingsInitParameters) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]CustomImageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(KernelGatewayAppSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelGatewayAppSettingsInitParameters. +func (in *KernelGatewayAppSettingsInitParameters) DeepCopy() *KernelGatewayAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(KernelGatewayAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelGatewayAppSettingsObservation) DeepCopyInto(out *KernelGatewayAppSettingsObservation) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]CustomImageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(KernelGatewayAppSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelGatewayAppSettingsObservation. +func (in *KernelGatewayAppSettingsObservation) DeepCopy() *KernelGatewayAppSettingsObservation { + if in == nil { + return nil + } + out := new(KernelGatewayAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelGatewayAppSettingsParameters) DeepCopyInto(out *KernelGatewayAppSettingsParameters) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]CustomImageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(KernelGatewayAppSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelGatewayAppSettingsParameters. +func (in *KernelGatewayAppSettingsParameters) DeepCopy() *KernelGatewayAppSettingsParameters { + if in == nil { + return nil + } + out := new(KernelGatewayAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelGatewayImageConfigFileSystemConfigInitParameters) DeepCopyInto(out *KernelGatewayImageConfigFileSystemConfigInitParameters) { + *out = *in + if in.DefaultGID != nil { + in, out := &in.DefaultGID, &out.DefaultGID + *out = new(float64) + **out = **in + } + if in.DefaultUID != nil { + in, out := &in.DefaultUID, &out.DefaultUID + *out = new(float64) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelGatewayImageConfigFileSystemConfigInitParameters. +func (in *KernelGatewayImageConfigFileSystemConfigInitParameters) DeepCopy() *KernelGatewayImageConfigFileSystemConfigInitParameters { + if in == nil { + return nil + } + out := new(KernelGatewayImageConfigFileSystemConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelGatewayImageConfigFileSystemConfigObservation) DeepCopyInto(out *KernelGatewayImageConfigFileSystemConfigObservation) { + *out = *in + if in.DefaultGID != nil { + in, out := &in.DefaultGID, &out.DefaultGID + *out = new(float64) + **out = **in + } + if in.DefaultUID != nil { + in, out := &in.DefaultUID, &out.DefaultUID + *out = new(float64) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelGatewayImageConfigFileSystemConfigObservation. +func (in *KernelGatewayImageConfigFileSystemConfigObservation) DeepCopy() *KernelGatewayImageConfigFileSystemConfigObservation { + if in == nil { + return nil + } + out := new(KernelGatewayImageConfigFileSystemConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelGatewayImageConfigFileSystemConfigParameters) DeepCopyInto(out *KernelGatewayImageConfigFileSystemConfigParameters) { + *out = *in + if in.DefaultGID != nil { + in, out := &in.DefaultGID, &out.DefaultGID + *out = new(float64) + **out = **in + } + if in.DefaultUID != nil { + in, out := &in.DefaultUID, &out.DefaultUID + *out = new(float64) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelGatewayImageConfigFileSystemConfigParameters. +func (in *KernelGatewayImageConfigFileSystemConfigParameters) DeepCopy() *KernelGatewayImageConfigFileSystemConfigParameters { + if in == nil { + return nil + } + out := new(KernelGatewayImageConfigFileSystemConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelGatewayImageConfigInitParameters) DeepCopyInto(out *KernelGatewayImageConfigInitParameters) { + *out = *in + if in.FileSystemConfig != nil { + in, out := &in.FileSystemConfig, &out.FileSystemConfig + *out = new(KernelGatewayImageConfigFileSystemConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KernelSpec != nil { + in, out := &in.KernelSpec, &out.KernelSpec + *out = make([]KernelSpecInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelGatewayImageConfigInitParameters. +func (in *KernelGatewayImageConfigInitParameters) DeepCopy() *KernelGatewayImageConfigInitParameters { + if in == nil { + return nil + } + out := new(KernelGatewayImageConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelGatewayImageConfigObservation) DeepCopyInto(out *KernelGatewayImageConfigObservation) { + *out = *in + if in.FileSystemConfig != nil { + in, out := &in.FileSystemConfig, &out.FileSystemConfig + *out = new(KernelGatewayImageConfigFileSystemConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.KernelSpec != nil { + in, out := &in.KernelSpec, &out.KernelSpec + *out = make([]KernelSpecObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelGatewayImageConfigObservation. +func (in *KernelGatewayImageConfigObservation) DeepCopy() *KernelGatewayImageConfigObservation { + if in == nil { + return nil + } + out := new(KernelGatewayImageConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelGatewayImageConfigParameters) DeepCopyInto(out *KernelGatewayImageConfigParameters) { + *out = *in + if in.FileSystemConfig != nil { + in, out := &in.FileSystemConfig, &out.FileSystemConfig + *out = new(KernelGatewayImageConfigFileSystemConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.KernelSpec != nil { + in, out := &in.KernelSpec, &out.KernelSpec + *out = make([]KernelSpecParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelGatewayImageConfigParameters. +func (in *KernelGatewayImageConfigParameters) DeepCopy() *KernelGatewayImageConfigParameters { + if in == nil { + return nil + } + out := new(KernelGatewayImageConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelSpecInitParameters) DeepCopyInto(out *KernelSpecInitParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelSpecInitParameters. +func (in *KernelSpecInitParameters) DeepCopy() *KernelSpecInitParameters { + if in == nil { + return nil + } + out := new(KernelSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelSpecObservation) DeepCopyInto(out *KernelSpecObservation) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelSpecObservation. +func (in *KernelSpecObservation) DeepCopy() *KernelSpecObservation { + if in == nil { + return nil + } + out := new(KernelSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelSpecParameters) DeepCopyInto(out *KernelSpecParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelSpecParameters. +func (in *KernelSpecParameters) DeepCopy() *KernelSpecParameters { + if in == nil { + return nil + } + out := new(KernelSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinearStepSizeInitParameters) DeepCopyInto(out *LinearStepSizeInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinearStepSizeInitParameters. +func (in *LinearStepSizeInitParameters) DeepCopy() *LinearStepSizeInitParameters { + if in == nil { + return nil + } + out := new(LinearStepSizeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinearStepSizeObservation) DeepCopyInto(out *LinearStepSizeObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinearStepSizeObservation. +func (in *LinearStepSizeObservation) DeepCopy() *LinearStepSizeObservation { + if in == nil { + return nil + } + out := new(LinearStepSizeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinearStepSizeParameters) DeepCopyInto(out *LinearStepSizeParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinearStepSizeParameters. +func (in *LinearStepSizeParameters) DeepCopy() *LinearStepSizeParameters { + if in == nil { + return nil + } + out := new(LinearStepSizeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaximumBatchSizeInitParameters) DeepCopyInto(out *MaximumBatchSizeInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaximumBatchSizeInitParameters. +func (in *MaximumBatchSizeInitParameters) DeepCopy() *MaximumBatchSizeInitParameters { + if in == nil { + return nil + } + out := new(MaximumBatchSizeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaximumBatchSizeObservation) DeepCopyInto(out *MaximumBatchSizeObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaximumBatchSizeObservation. +func (in *MaximumBatchSizeObservation) DeepCopy() *MaximumBatchSizeObservation { + if in == nil { + return nil + } + out := new(MaximumBatchSizeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaximumBatchSizeParameters) DeepCopyInto(out *MaximumBatchSizeParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaximumBatchSizeParameters. +func (in *MaximumBatchSizeParameters) DeepCopy() *MaximumBatchSizeParameters { + if in == nil { + return nil + } + out := new(MaximumBatchSizeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemberDefinitionInitParameters) DeepCopyInto(out *MemberDefinitionInitParameters) { + *out = *in + if in.CognitoMemberDefinition != nil { + in, out := &in.CognitoMemberDefinition, &out.CognitoMemberDefinition + *out = new(CognitoMemberDefinitionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OidcMemberDefinition != nil { + in, out := &in.OidcMemberDefinition, &out.OidcMemberDefinition + *out = new(OidcMemberDefinitionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemberDefinitionInitParameters. +func (in *MemberDefinitionInitParameters) DeepCopy() *MemberDefinitionInitParameters { + if in == nil { + return nil + } + out := new(MemberDefinitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemberDefinitionObservation) DeepCopyInto(out *MemberDefinitionObservation) { + *out = *in + if in.CognitoMemberDefinition != nil { + in, out := &in.CognitoMemberDefinition, &out.CognitoMemberDefinition + *out = new(CognitoMemberDefinitionObservation) + (*in).DeepCopyInto(*out) + } + if in.OidcMemberDefinition != nil { + in, out := &in.OidcMemberDefinition, &out.OidcMemberDefinition + *out = new(OidcMemberDefinitionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemberDefinitionObservation. +func (in *MemberDefinitionObservation) DeepCopy() *MemberDefinitionObservation { + if in == nil { + return nil + } + out := new(MemberDefinitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemberDefinitionParameters) DeepCopyInto(out *MemberDefinitionParameters) { + *out = *in + if in.CognitoMemberDefinition != nil { + in, out := &in.CognitoMemberDefinition, &out.CognitoMemberDefinition + *out = new(CognitoMemberDefinitionParameters) + (*in).DeepCopyInto(*out) + } + if in.OidcMemberDefinition != nil { + in, out := &in.OidcMemberDefinition, &out.OidcMemberDefinition + *out = new(OidcMemberDefinitionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemberDefinitionParameters. +func (in *MemberDefinitionParameters) DeepCopy() *MemberDefinitionParameters { + if in == nil { + return nil + } + out := new(MemberDefinitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Model) DeepCopyInto(out *Model) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Model. +func (in *Model) DeepCopy() *Model { + if in == nil { + return nil + } + out := new(Model) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Model) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelDataSourceInitParameters) DeepCopyInto(out *ModelDataSourceInitParameters) { + *out = *in + if in.S3DataSource != nil { + in, out := &in.S3DataSource, &out.S3DataSource + *out = make([]S3DataSourceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelDataSourceInitParameters. +func (in *ModelDataSourceInitParameters) DeepCopy() *ModelDataSourceInitParameters { + if in == nil { + return nil + } + out := new(ModelDataSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelDataSourceObservation) DeepCopyInto(out *ModelDataSourceObservation) { + *out = *in + if in.S3DataSource != nil { + in, out := &in.S3DataSource, &out.S3DataSource + *out = make([]S3DataSourceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelDataSourceObservation. +func (in *ModelDataSourceObservation) DeepCopy() *ModelDataSourceObservation { + if in == nil { + return nil + } + out := new(ModelDataSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelDataSourceParameters) DeepCopyInto(out *ModelDataSourceParameters) { + *out = *in + if in.S3DataSource != nil { + in, out := &in.S3DataSource, &out.S3DataSource + *out = make([]S3DataSourceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelDataSourceParameters. +func (in *ModelDataSourceParameters) DeepCopy() *ModelDataSourceParameters { + if in == nil { + return nil + } + out := new(ModelDataSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelDataSourceS3DataSourceInitParameters) DeepCopyInto(out *ModelDataSourceS3DataSourceInitParameters) { + *out = *in + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.S3DataType != nil { + in, out := &in.S3DataType, &out.S3DataType + *out = new(string) + **out = **in + } + if in.S3URI != nil { + in, out := &in.S3URI, &out.S3URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelDataSourceS3DataSourceInitParameters. +func (in *ModelDataSourceS3DataSourceInitParameters) DeepCopy() *ModelDataSourceS3DataSourceInitParameters { + if in == nil { + return nil + } + out := new(ModelDataSourceS3DataSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelDataSourceS3DataSourceObservation) DeepCopyInto(out *ModelDataSourceS3DataSourceObservation) { + *out = *in + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.S3DataType != nil { + in, out := &in.S3DataType, &out.S3DataType + *out = new(string) + **out = **in + } + if in.S3URI != nil { + in, out := &in.S3URI, &out.S3URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelDataSourceS3DataSourceObservation. +func (in *ModelDataSourceS3DataSourceObservation) DeepCopy() *ModelDataSourceS3DataSourceObservation { + if in == nil { + return nil + } + out := new(ModelDataSourceS3DataSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelDataSourceS3DataSourceParameters) DeepCopyInto(out *ModelDataSourceS3DataSourceParameters) { + *out = *in + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.S3DataType != nil { + in, out := &in.S3DataType, &out.S3DataType + *out = new(string) + **out = **in + } + if in.S3URI != nil { + in, out := &in.S3URI, &out.S3URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelDataSourceS3DataSourceParameters. +func (in *ModelDataSourceS3DataSourceParameters) DeepCopy() *ModelDataSourceS3DataSourceParameters { + if in == nil { + return nil + } + out := new(ModelDataSourceS3DataSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelInitParameters) DeepCopyInto(out *ModelInitParameters) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = make([]ContainerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableNetworkIsolation != nil { + in, out := &in.EnableNetworkIsolation, &out.EnableNetworkIsolation + *out = new(bool) + **out = **in + } + if in.ExecutionRoleArn != nil { + in, out := &in.ExecutionRoleArn, &out.ExecutionRoleArn + *out = new(string) + **out = **in + } + if in.ExecutionRoleArnRef != nil { + in, out := &in.ExecutionRoleArnRef, &out.ExecutionRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleArnSelector != nil { + in, out := &in.ExecutionRoleArnSelector, &out.ExecutionRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InferenceExecutionConfig != nil { + in, out := &in.InferenceExecutionConfig, &out.InferenceExecutionConfig + *out = new(InferenceExecutionConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PrimaryContainer != nil { + in, out := &in.PrimaryContainer, &out.PrimaryContainer + *out = new(PrimaryContainerInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelInitParameters. +func (in *ModelInitParameters) DeepCopy() *ModelInitParameters { + if in == nil { + return nil + } + out := new(ModelInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelList) DeepCopyInto(out *ModelList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Model, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelList. +func (in *ModelList) DeepCopy() *ModelList { + if in == nil { + return nil + } + out := new(ModelList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ModelList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelObservation) DeepCopyInto(out *ModelObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = make([]ContainerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableNetworkIsolation != nil { + in, out := &in.EnableNetworkIsolation, &out.EnableNetworkIsolation + *out = new(bool) + **out = **in + } + if in.ExecutionRoleArn != nil { + in, out := &in.ExecutionRoleArn, &out.ExecutionRoleArn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InferenceExecutionConfig != nil { + in, out := &in.InferenceExecutionConfig, &out.InferenceExecutionConfig + *out = new(InferenceExecutionConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.PrimaryContainer != nil { + in, out := &in.PrimaryContainer, &out.PrimaryContainer + *out = new(PrimaryContainerObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelObservation. +func (in *ModelObservation) DeepCopy() *ModelObservation { + if in == nil { + return nil + } + out := new(ModelObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelParameters) DeepCopyInto(out *ModelParameters) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = make([]ContainerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableNetworkIsolation != nil { + in, out := &in.EnableNetworkIsolation, &out.EnableNetworkIsolation + *out = new(bool) + **out = **in + } + if in.ExecutionRoleArn != nil { + in, out := &in.ExecutionRoleArn, &out.ExecutionRoleArn + *out = new(string) + **out = **in + } + if in.ExecutionRoleArnRef != nil { + in, out := &in.ExecutionRoleArnRef, &out.ExecutionRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleArnSelector != nil { + in, out := &in.ExecutionRoleArnSelector, &out.ExecutionRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InferenceExecutionConfig != nil { + in, out := &in.InferenceExecutionConfig, &out.InferenceExecutionConfig + *out = new(InferenceExecutionConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.PrimaryContainer != nil { + in, out := &in.PrimaryContainer, &out.PrimaryContainer + *out = new(PrimaryContainerParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPCConfig != nil { + in, out := &in.VPCConfig, &out.VPCConfig + *out = new(VPCConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelParameters. +func (in *ModelParameters) DeepCopy() *ModelParameters { + if in == nil { + return nil + } + out := new(ModelParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelRegisterSettingsInitParameters) DeepCopyInto(out *ModelRegisterSettingsInitParameters) { + *out = *in + if in.CrossAccountModelRegisterRoleArn != nil { + in, out := &in.CrossAccountModelRegisterRoleArn, &out.CrossAccountModelRegisterRoleArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelRegisterSettingsInitParameters. +func (in *ModelRegisterSettingsInitParameters) DeepCopy() *ModelRegisterSettingsInitParameters { + if in == nil { + return nil + } + out := new(ModelRegisterSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelRegisterSettingsObservation) DeepCopyInto(out *ModelRegisterSettingsObservation) { + *out = *in + if in.CrossAccountModelRegisterRoleArn != nil { + in, out := &in.CrossAccountModelRegisterRoleArn, &out.CrossAccountModelRegisterRoleArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelRegisterSettingsObservation. +func (in *ModelRegisterSettingsObservation) DeepCopy() *ModelRegisterSettingsObservation { + if in == nil { + return nil + } + out := new(ModelRegisterSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelRegisterSettingsParameters) DeepCopyInto(out *ModelRegisterSettingsParameters) { + *out = *in + if in.CrossAccountModelRegisterRoleArn != nil { + in, out := &in.CrossAccountModelRegisterRoleArn, &out.CrossAccountModelRegisterRoleArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelRegisterSettingsParameters. +func (in *ModelRegisterSettingsParameters) DeepCopy() *ModelRegisterSettingsParameters { + if in == nil { + return nil + } + out := new(ModelRegisterSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelSpec) DeepCopyInto(out *ModelSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelSpec. +func (in *ModelSpec) DeepCopy() *ModelSpec { + if in == nil { + return nil + } + out := new(ModelSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelStatus) DeepCopyInto(out *ModelStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelStatus. +func (in *ModelStatus) DeepCopy() *ModelStatus { + if in == nil { + return nil + } + out := new(ModelStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotebookInstance) DeepCopyInto(out *NotebookInstance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotebookInstance. +func (in *NotebookInstance) DeepCopy() *NotebookInstance { + if in == nil { + return nil + } + out := new(NotebookInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NotebookInstance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotebookInstanceInitParameters) DeepCopyInto(out *NotebookInstanceInitParameters) { + *out = *in + if in.AcceleratorTypes != nil { + in, out := &in.AcceleratorTypes, &out.AcceleratorTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AdditionalCodeRepositories != nil { + in, out := &in.AdditionalCodeRepositories, &out.AdditionalCodeRepositories + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultCodeRepository != nil { + in, out := &in.DefaultCodeRepository, &out.DefaultCodeRepository + *out = new(string) + **out = **in + } + if in.DefaultCodeRepositoryRef != nil { + in, out := &in.DefaultCodeRepositoryRef, &out.DefaultCodeRepositoryRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DefaultCodeRepositorySelector != nil { + in, out := &in.DefaultCodeRepositorySelector, &out.DefaultCodeRepositorySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DirectInternetAccess != nil { + in, out := &in.DirectInternetAccess, &out.DirectInternetAccess + *out = new(string) + **out = **in + } + if in.InstanceMetadataServiceConfiguration != nil { + in, out := &in.InstanceMetadataServiceConfiguration, &out.InstanceMetadataServiceConfiguration + *out = new(InstanceMetadataServiceConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigName != nil { + in, out := &in.LifecycleConfigName, &out.LifecycleConfigName + *out = new(string) + **out = **in + } + if in.PlatformIdentifier != nil { + in, out := &in.PlatformIdentifier, &out.PlatformIdentifier + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RootAccess != nil { + in, out := &in.RootAccess, &out.RootAccess + *out = new(string) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotebookInstanceInitParameters. +func (in *NotebookInstanceInitParameters) DeepCopy() *NotebookInstanceInitParameters { + if in == nil { + return nil + } + out := new(NotebookInstanceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotebookInstanceList) DeepCopyInto(out *NotebookInstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NotebookInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotebookInstanceList. +func (in *NotebookInstanceList) DeepCopy() *NotebookInstanceList { + if in == nil { + return nil + } + out := new(NotebookInstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NotebookInstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotebookInstanceObservation) DeepCopyInto(out *NotebookInstanceObservation) { + *out = *in + if in.AcceleratorTypes != nil { + in, out := &in.AcceleratorTypes, &out.AcceleratorTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AdditionalCodeRepositories != nil { + in, out := &in.AdditionalCodeRepositories, &out.AdditionalCodeRepositories + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DefaultCodeRepository != nil { + in, out := &in.DefaultCodeRepository, &out.DefaultCodeRepository + *out = new(string) + **out = **in + } + if in.DirectInternetAccess != nil { + in, out := &in.DirectInternetAccess, &out.DirectInternetAccess + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceMetadataServiceConfiguration != nil { + in, out := &in.InstanceMetadataServiceConfiguration, &out.InstanceMetadataServiceConfiguration + *out = new(InstanceMetadataServiceConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.LifecycleConfigName != nil { + in, out := &in.LifecycleConfigName, &out.LifecycleConfigName + *out = new(string) + **out = **in + } + if in.NetworkInterfaceID != nil { + in, out := &in.NetworkInterfaceID, &out.NetworkInterfaceID + *out = new(string) + **out = **in + } + if in.PlatformIdentifier != nil { + in, out := &in.PlatformIdentifier, &out.PlatformIdentifier + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RootAccess != nil { + in, out := &in.RootAccess, &out.RootAccess + *out = new(string) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotebookInstanceObservation. +func (in *NotebookInstanceObservation) DeepCopy() *NotebookInstanceObservation { + if in == nil { + return nil + } + out := new(NotebookInstanceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotebookInstanceParameters) DeepCopyInto(out *NotebookInstanceParameters) { + *out = *in + if in.AcceleratorTypes != nil { + in, out := &in.AcceleratorTypes, &out.AcceleratorTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AdditionalCodeRepositories != nil { + in, out := &in.AdditionalCodeRepositories, &out.AdditionalCodeRepositories + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultCodeRepository != nil { + in, out := &in.DefaultCodeRepository, &out.DefaultCodeRepository + *out = new(string) + **out = **in + } + if in.DefaultCodeRepositoryRef != nil { + in, out := &in.DefaultCodeRepositoryRef, &out.DefaultCodeRepositoryRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DefaultCodeRepositorySelector != nil { + in, out := &in.DefaultCodeRepositorySelector, &out.DefaultCodeRepositorySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DirectInternetAccess != nil { + in, out := &in.DirectInternetAccess, &out.DirectInternetAccess + *out = new(string) + **out = **in + } + if in.InstanceMetadataServiceConfiguration != nil { + in, out := &in.InstanceMetadataServiceConfiguration, &out.InstanceMetadataServiceConfiguration + *out = new(InstanceMetadataServiceConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigName != nil { + in, out := &in.LifecycleConfigName, &out.LifecycleConfigName + *out = new(string) + **out = **in + } + if in.PlatformIdentifier != nil { + in, out := &in.PlatformIdentifier, &out.PlatformIdentifier + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RootAccess != nil { + in, out := &in.RootAccess, &out.RootAccess + *out = new(string) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotebookInstanceParameters. +func (in *NotebookInstanceParameters) DeepCopy() *NotebookInstanceParameters { + if in == nil { + return nil + } + out := new(NotebookInstanceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotebookInstanceSpec) DeepCopyInto(out *NotebookInstanceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotebookInstanceSpec. +func (in *NotebookInstanceSpec) DeepCopy() *NotebookInstanceSpec { + if in == nil { + return nil + } + out := new(NotebookInstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotebookInstanceStatus) DeepCopyInto(out *NotebookInstanceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotebookInstanceStatus. +func (in *NotebookInstanceStatus) DeepCopy() *NotebookInstanceStatus { + if in == nil { + return nil + } + out := new(NotebookInstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationConfigInitParameters) DeepCopyInto(out *NotificationConfigInitParameters) { + *out = *in + if in.ErrorTopic != nil { + in, out := &in.ErrorTopic, &out.ErrorTopic + *out = new(string) + **out = **in + } + if in.IncludeInferenceResponseIn != nil { + in, out := &in.IncludeInferenceResponseIn, &out.IncludeInferenceResponseIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SuccessTopic != nil { + in, out := &in.SuccessTopic, &out.SuccessTopic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationConfigInitParameters. +func (in *NotificationConfigInitParameters) DeepCopy() *NotificationConfigInitParameters { + if in == nil { + return nil + } + out := new(NotificationConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationConfigObservation) DeepCopyInto(out *NotificationConfigObservation) { + *out = *in + if in.ErrorTopic != nil { + in, out := &in.ErrorTopic, &out.ErrorTopic + *out = new(string) + **out = **in + } + if in.IncludeInferenceResponseIn != nil { + in, out := &in.IncludeInferenceResponseIn, &out.IncludeInferenceResponseIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SuccessTopic != nil { + in, out := &in.SuccessTopic, &out.SuccessTopic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationConfigObservation. +func (in *NotificationConfigObservation) DeepCopy() *NotificationConfigObservation { + if in == nil { + return nil + } + out := new(NotificationConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationConfigParameters) DeepCopyInto(out *NotificationConfigParameters) { + *out = *in + if in.ErrorTopic != nil { + in, out := &in.ErrorTopic, &out.ErrorTopic + *out = new(string) + **out = **in + } + if in.IncludeInferenceResponseIn != nil { + in, out := &in.IncludeInferenceResponseIn, &out.IncludeInferenceResponseIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SuccessTopic != nil { + in, out := &in.SuccessTopic, &out.SuccessTopic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationConfigParameters. +func (in *NotificationConfigParameters) DeepCopy() *NotificationConfigParameters { + if in == nil { + return nil + } + out := new(NotificationConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationConfigurationInitParameters) DeepCopyInto(out *NotificationConfigurationInitParameters) { + *out = *in + if in.NotificationTopicArn != nil { + in, out := &in.NotificationTopicArn, &out.NotificationTopicArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationConfigurationInitParameters. +func (in *NotificationConfigurationInitParameters) DeepCopy() *NotificationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(NotificationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationConfigurationObservation) DeepCopyInto(out *NotificationConfigurationObservation) { + *out = *in + if in.NotificationTopicArn != nil { + in, out := &in.NotificationTopicArn, &out.NotificationTopicArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationConfigurationObservation. +func (in *NotificationConfigurationObservation) DeepCopy() *NotificationConfigurationObservation { + if in == nil { + return nil + } + out := new(NotificationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationConfigurationParameters) DeepCopyInto(out *NotificationConfigurationParameters) { + *out = *in + if in.NotificationTopicArn != nil { + in, out := &in.NotificationTopicArn, &out.NotificationTopicArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationConfigurationParameters. +func (in *NotificationConfigurationParameters) DeepCopy() *NotificationConfigurationParameters { + if in == nil { + return nil + } + out := new(NotificationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OfflineStoreConfigInitParameters) DeepCopyInto(out *OfflineStoreConfigInitParameters) { + *out = *in + if in.DataCatalogConfig != nil { + in, out := &in.DataCatalogConfig, &out.DataCatalogConfig + *out = new(DataCatalogConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DisableGlueTableCreation != nil { + in, out := &in.DisableGlueTableCreation, &out.DisableGlueTableCreation + *out = new(bool) + **out = **in + } + if in.S3StorageConfig != nil { + in, out := &in.S3StorageConfig, &out.S3StorageConfig + *out = new(S3StorageConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TableFormat != nil { + in, out := &in.TableFormat, &out.TableFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OfflineStoreConfigInitParameters. +func (in *OfflineStoreConfigInitParameters) DeepCopy() *OfflineStoreConfigInitParameters { + if in == nil { + return nil + } + out := new(OfflineStoreConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OfflineStoreConfigObservation) DeepCopyInto(out *OfflineStoreConfigObservation) { + *out = *in + if in.DataCatalogConfig != nil { + in, out := &in.DataCatalogConfig, &out.DataCatalogConfig + *out = new(DataCatalogConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.DisableGlueTableCreation != nil { + in, out := &in.DisableGlueTableCreation, &out.DisableGlueTableCreation + *out = new(bool) + **out = **in + } + if in.S3StorageConfig != nil { + in, out := &in.S3StorageConfig, &out.S3StorageConfig + *out = new(S3StorageConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.TableFormat != nil { + in, out := &in.TableFormat, &out.TableFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OfflineStoreConfigObservation. +func (in *OfflineStoreConfigObservation) DeepCopy() *OfflineStoreConfigObservation { + if in == nil { + return nil + } + out := new(OfflineStoreConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OfflineStoreConfigParameters) DeepCopyInto(out *OfflineStoreConfigParameters) { + *out = *in + if in.DataCatalogConfig != nil { + in, out := &in.DataCatalogConfig, &out.DataCatalogConfig + *out = new(DataCatalogConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.DisableGlueTableCreation != nil { + in, out := &in.DisableGlueTableCreation, &out.DisableGlueTableCreation + *out = new(bool) + **out = **in + } + if in.S3StorageConfig != nil { + in, out := &in.S3StorageConfig, &out.S3StorageConfig + *out = new(S3StorageConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.TableFormat != nil { + in, out := &in.TableFormat, &out.TableFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OfflineStoreConfigParameters. +func (in *OfflineStoreConfigParameters) DeepCopy() *OfflineStoreConfigParameters { + if in == nil { + return nil + } + out := new(OfflineStoreConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OidcConfigInitParameters) DeepCopyInto(out *OidcConfigInitParameters) { + *out = *in + if in.AuthorizationEndpoint != nil { + in, out := &in.AuthorizationEndpoint, &out.AuthorizationEndpoint + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + out.ClientSecretSecretRef = in.ClientSecretSecretRef + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.JwksURI != nil { + in, out := &in.JwksURI, &out.JwksURI + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } + if in.UserInfoEndpoint != nil { + in, out := &in.UserInfoEndpoint, &out.UserInfoEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OidcConfigInitParameters. +func (in *OidcConfigInitParameters) DeepCopy() *OidcConfigInitParameters { + if in == nil { + return nil + } + out := new(OidcConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OidcConfigObservation) DeepCopyInto(out *OidcConfigObservation) { + *out = *in + if in.AuthorizationEndpoint != nil { + in, out := &in.AuthorizationEndpoint, &out.AuthorizationEndpoint + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.JwksURI != nil { + in, out := &in.JwksURI, &out.JwksURI + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } + if in.UserInfoEndpoint != nil { + in, out := &in.UserInfoEndpoint, &out.UserInfoEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OidcConfigObservation. +func (in *OidcConfigObservation) DeepCopy() *OidcConfigObservation { + if in == nil { + return nil + } + out := new(OidcConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OidcConfigParameters) DeepCopyInto(out *OidcConfigParameters) { + *out = *in + if in.AuthorizationEndpoint != nil { + in, out := &in.AuthorizationEndpoint, &out.AuthorizationEndpoint + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + out.ClientSecretSecretRef = in.ClientSecretSecretRef + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.JwksURI != nil { + in, out := &in.JwksURI, &out.JwksURI + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } + if in.UserInfoEndpoint != nil { + in, out := &in.UserInfoEndpoint, &out.UserInfoEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OidcConfigParameters. +func (in *OidcConfigParameters) DeepCopy() *OidcConfigParameters { + if in == nil { + return nil + } + out := new(OidcConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OidcMemberDefinitionInitParameters) DeepCopyInto(out *OidcMemberDefinitionInitParameters) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OidcMemberDefinitionInitParameters. +func (in *OidcMemberDefinitionInitParameters) DeepCopy() *OidcMemberDefinitionInitParameters { + if in == nil { + return nil + } + out := new(OidcMemberDefinitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OidcMemberDefinitionObservation) DeepCopyInto(out *OidcMemberDefinitionObservation) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OidcMemberDefinitionObservation. +func (in *OidcMemberDefinitionObservation) DeepCopy() *OidcMemberDefinitionObservation { + if in == nil { + return nil + } + out := new(OidcMemberDefinitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OidcMemberDefinitionParameters) DeepCopyInto(out *OidcMemberDefinitionParameters) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OidcMemberDefinitionParameters. +func (in *OidcMemberDefinitionParameters) DeepCopy() *OidcMemberDefinitionParameters { + if in == nil { + return nil + } + out := new(OidcMemberDefinitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnlineStoreConfigInitParameters) DeepCopyInto(out *OnlineStoreConfigInitParameters) { + *out = *in + if in.EnableOnlineStore != nil { + in, out := &in.EnableOnlineStore, &out.EnableOnlineStore + *out = new(bool) + **out = **in + } + if in.SecurityConfig != nil { + in, out := &in.SecurityConfig, &out.SecurityConfig + *out = new(SecurityConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.TTLDuration != nil { + in, out := &in.TTLDuration, &out.TTLDuration + *out = new(TTLDurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnlineStoreConfigInitParameters. +func (in *OnlineStoreConfigInitParameters) DeepCopy() *OnlineStoreConfigInitParameters { + if in == nil { + return nil + } + out := new(OnlineStoreConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnlineStoreConfigObservation) DeepCopyInto(out *OnlineStoreConfigObservation) { + *out = *in + if in.EnableOnlineStore != nil { + in, out := &in.EnableOnlineStore, &out.EnableOnlineStore + *out = new(bool) + **out = **in + } + if in.SecurityConfig != nil { + in, out := &in.SecurityConfig, &out.SecurityConfig + *out = new(SecurityConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.TTLDuration != nil { + in, out := &in.TTLDuration, &out.TTLDuration + *out = new(TTLDurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnlineStoreConfigObservation. +func (in *OnlineStoreConfigObservation) DeepCopy() *OnlineStoreConfigObservation { + if in == nil { + return nil + } + out := new(OnlineStoreConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnlineStoreConfigParameters) DeepCopyInto(out *OnlineStoreConfigParameters) { + *out = *in + if in.EnableOnlineStore != nil { + in, out := &in.EnableOnlineStore, &out.EnableOnlineStore + *out = new(bool) + **out = **in + } + if in.SecurityConfig != nil { + in, out := &in.SecurityConfig, &out.SecurityConfig + *out = new(SecurityConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.TTLDuration != nil { + in, out := &in.TTLDuration, &out.TTLDuration + *out = new(TTLDurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnlineStoreConfigParameters. +func (in *OnlineStoreConfigParameters) DeepCopy() *OnlineStoreConfigParameters { + if in == nil { + return nil + } + out := new(OnlineStoreConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputConfigInitParameters) DeepCopyInto(out *OutputConfigInitParameters) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.S3OutputLocation != nil { + in, out := &in.S3OutputLocation, &out.S3OutputLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputConfigInitParameters. +func (in *OutputConfigInitParameters) DeepCopy() *OutputConfigInitParameters { + if in == nil { + return nil + } + out := new(OutputConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputConfigObservation) DeepCopyInto(out *OutputConfigObservation) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.S3OutputLocation != nil { + in, out := &in.S3OutputLocation, &out.S3OutputLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputConfigObservation. +func (in *OutputConfigObservation) DeepCopy() *OutputConfigObservation { + if in == nil { + return nil + } + out := new(OutputConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputConfigParameters) DeepCopyInto(out *OutputConfigParameters) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.S3OutputLocation != nil { + in, out := &in.S3OutputLocation, &out.S3OutputLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputConfigParameters. +func (in *OutputConfigParameters) DeepCopy() *OutputConfigParameters { + if in == nil { + return nil + } + out := new(OutputConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OwnershipSettingsInitParameters) DeepCopyInto(out *OwnershipSettingsInitParameters) { + *out = *in + if in.OwnerUserProfileName != nil { + in, out := &in.OwnerUserProfileName, &out.OwnerUserProfileName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnershipSettingsInitParameters. +func (in *OwnershipSettingsInitParameters) DeepCopy() *OwnershipSettingsInitParameters { + if in == nil { + return nil + } + out := new(OwnershipSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OwnershipSettingsObservation) DeepCopyInto(out *OwnershipSettingsObservation) { + *out = *in + if in.OwnerUserProfileName != nil { + in, out := &in.OwnerUserProfileName, &out.OwnerUserProfileName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnershipSettingsObservation. +func (in *OwnershipSettingsObservation) DeepCopy() *OwnershipSettingsObservation { + if in == nil { + return nil + } + out := new(OwnershipSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OwnershipSettingsParameters) DeepCopyInto(out *OwnershipSettingsParameters) { + *out = *in + if in.OwnerUserProfileName != nil { + in, out := &in.OwnerUserProfileName, &out.OwnerUserProfileName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnershipSettingsParameters. +func (in *OwnershipSettingsParameters) DeepCopy() *OwnershipSettingsParameters { + if in == nil { + return nil + } + out := new(OwnershipSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryContainerImageConfigInitParameters) DeepCopyInto(out *PrimaryContainerImageConfigInitParameters) { + *out = *in + if in.RepositoryAccessMode != nil { + in, out := &in.RepositoryAccessMode, &out.RepositoryAccessMode + *out = new(string) + **out = **in + } + if in.RepositoryAuthConfig != nil { + in, out := &in.RepositoryAuthConfig, &out.RepositoryAuthConfig + *out = new(ImageConfigRepositoryAuthConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryContainerImageConfigInitParameters. +func (in *PrimaryContainerImageConfigInitParameters) DeepCopy() *PrimaryContainerImageConfigInitParameters { + if in == nil { + return nil + } + out := new(PrimaryContainerImageConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryContainerImageConfigObservation) DeepCopyInto(out *PrimaryContainerImageConfigObservation) { + *out = *in + if in.RepositoryAccessMode != nil { + in, out := &in.RepositoryAccessMode, &out.RepositoryAccessMode + *out = new(string) + **out = **in + } + if in.RepositoryAuthConfig != nil { + in, out := &in.RepositoryAuthConfig, &out.RepositoryAuthConfig + *out = new(ImageConfigRepositoryAuthConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryContainerImageConfigObservation. +func (in *PrimaryContainerImageConfigObservation) DeepCopy() *PrimaryContainerImageConfigObservation { + if in == nil { + return nil + } + out := new(PrimaryContainerImageConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryContainerImageConfigParameters) DeepCopyInto(out *PrimaryContainerImageConfigParameters) { + *out = *in + if in.RepositoryAccessMode != nil { + in, out := &in.RepositoryAccessMode, &out.RepositoryAccessMode + *out = new(string) + **out = **in + } + if in.RepositoryAuthConfig != nil { + in, out := &in.RepositoryAuthConfig, &out.RepositoryAuthConfig + *out = new(ImageConfigRepositoryAuthConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryContainerImageConfigParameters. +func (in *PrimaryContainerImageConfigParameters) DeepCopy() *PrimaryContainerImageConfigParameters { + if in == nil { + return nil + } + out := new(PrimaryContainerImageConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryContainerInitParameters) DeepCopyInto(out *PrimaryContainerInitParameters) { + *out = *in + if in.ContainerHostname != nil { + in, out := &in.ContainerHostname, &out.ContainerHostname + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImageConfig != nil { + in, out := &in.ImageConfig, &out.ImageConfig + *out = new(PrimaryContainerImageConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.ModelDataSource != nil { + in, out := &in.ModelDataSource, &out.ModelDataSource + *out = new(PrimaryContainerModelDataSourceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ModelDataURL != nil { + in, out := &in.ModelDataURL, &out.ModelDataURL + *out = new(string) + **out = **in + } + if in.ModelPackageName != nil { + in, out := &in.ModelPackageName, &out.ModelPackageName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryContainerInitParameters. +func (in *PrimaryContainerInitParameters) DeepCopy() *PrimaryContainerInitParameters { + if in == nil { + return nil + } + out := new(PrimaryContainerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryContainerModelDataSourceInitParameters) DeepCopyInto(out *PrimaryContainerModelDataSourceInitParameters) { + *out = *in + if in.S3DataSource != nil { + in, out := &in.S3DataSource, &out.S3DataSource + *out = make([]ModelDataSourceS3DataSourceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryContainerModelDataSourceInitParameters. +func (in *PrimaryContainerModelDataSourceInitParameters) DeepCopy() *PrimaryContainerModelDataSourceInitParameters { + if in == nil { + return nil + } + out := new(PrimaryContainerModelDataSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryContainerModelDataSourceObservation) DeepCopyInto(out *PrimaryContainerModelDataSourceObservation) { + *out = *in + if in.S3DataSource != nil { + in, out := &in.S3DataSource, &out.S3DataSource + *out = make([]ModelDataSourceS3DataSourceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryContainerModelDataSourceObservation. +func (in *PrimaryContainerModelDataSourceObservation) DeepCopy() *PrimaryContainerModelDataSourceObservation { + if in == nil { + return nil + } + out := new(PrimaryContainerModelDataSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryContainerModelDataSourceParameters) DeepCopyInto(out *PrimaryContainerModelDataSourceParameters) { + *out = *in + if in.S3DataSource != nil { + in, out := &in.S3DataSource, &out.S3DataSource + *out = make([]ModelDataSourceS3DataSourceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryContainerModelDataSourceParameters. +func (in *PrimaryContainerModelDataSourceParameters) DeepCopy() *PrimaryContainerModelDataSourceParameters { + if in == nil { + return nil + } + out := new(PrimaryContainerModelDataSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryContainerObservation) DeepCopyInto(out *PrimaryContainerObservation) { + *out = *in + if in.ContainerHostname != nil { + in, out := &in.ContainerHostname, &out.ContainerHostname + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImageConfig != nil { + in, out := &in.ImageConfig, &out.ImageConfig + *out = new(PrimaryContainerImageConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.ModelDataSource != nil { + in, out := &in.ModelDataSource, &out.ModelDataSource + *out = new(PrimaryContainerModelDataSourceObservation) + (*in).DeepCopyInto(*out) + } + if in.ModelDataURL != nil { + in, out := &in.ModelDataURL, &out.ModelDataURL + *out = new(string) + **out = **in + } + if in.ModelPackageName != nil { + in, out := &in.ModelPackageName, &out.ModelPackageName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryContainerObservation. +func (in *PrimaryContainerObservation) DeepCopy() *PrimaryContainerObservation { + if in == nil { + return nil + } + out := new(PrimaryContainerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryContainerParameters) DeepCopyInto(out *PrimaryContainerParameters) { + *out = *in + if in.ContainerHostname != nil { + in, out := &in.ContainerHostname, &out.ContainerHostname + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImageConfig != nil { + in, out := &in.ImageConfig, &out.ImageConfig + *out = new(PrimaryContainerImageConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.ModelDataSource != nil { + in, out := &in.ModelDataSource, &out.ModelDataSource + *out = new(PrimaryContainerModelDataSourceParameters) + (*in).DeepCopyInto(*out) + } + if in.ModelDataURL != nil { + in, out := &in.ModelDataURL, &out.ModelDataURL + *out = new(string) + **out = **in + } + if in.ModelPackageName != nil { + in, out := &in.ModelPackageName, &out.ModelPackageName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryContainerParameters. +func (in *PrimaryContainerParameters) DeepCopy() *PrimaryContainerParameters { + if in == nil { + return nil + } + out := new(PrimaryContainerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductionVariantsInitParameters) DeepCopyInto(out *ProductionVariantsInitParameters) { + *out = *in + if in.AcceleratorType != nil { + in, out := &in.AcceleratorType, &out.AcceleratorType + *out = new(string) + **out = **in + } + if in.ContainerStartupHealthCheckTimeoutInSeconds != nil { + in, out := &in.ContainerStartupHealthCheckTimeoutInSeconds, &out.ContainerStartupHealthCheckTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.CoreDumpConfig != nil { + in, out := &in.CoreDumpConfig, &out.CoreDumpConfig + *out = new(CoreDumpConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EnableSsmAccess != nil { + in, out := &in.EnableSsmAccess, &out.EnableSsmAccess + *out = new(bool) + **out = **in + } + if in.InitialInstanceCount != nil { + in, out := &in.InitialInstanceCount, &out.InitialInstanceCount + *out = new(float64) + **out = **in + } + if in.InitialVariantWeight != nil { + in, out := &in.InitialVariantWeight, &out.InitialVariantWeight + *out = new(float64) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.ModelDataDownloadTimeoutInSeconds != nil { + in, out := &in.ModelDataDownloadTimeoutInSeconds, &out.ModelDataDownloadTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.ModelName != nil { + in, out := &in.ModelName, &out.ModelName + *out = new(string) + **out = **in + } + if in.ModelNameRef != nil { + in, out := &in.ModelNameRef, &out.ModelNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ModelNameSelector != nil { + in, out := &in.ModelNameSelector, &out.ModelNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RoutingConfig != nil { + in, out := &in.RoutingConfig, &out.RoutingConfig + *out = make([]RoutingConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServerlessConfig != nil { + in, out := &in.ServerlessConfig, &out.ServerlessConfig + *out = new(ServerlessConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VariantName != nil { + in, out := &in.VariantName, &out.VariantName + *out = new(string) + **out = **in + } + if in.VolumeSizeInGb != nil { + in, out := &in.VolumeSizeInGb, &out.VolumeSizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductionVariantsInitParameters. +func (in *ProductionVariantsInitParameters) DeepCopy() *ProductionVariantsInitParameters { + if in == nil { + return nil + } + out := new(ProductionVariantsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductionVariantsObservation) DeepCopyInto(out *ProductionVariantsObservation) { + *out = *in + if in.AcceleratorType != nil { + in, out := &in.AcceleratorType, &out.AcceleratorType + *out = new(string) + **out = **in + } + if in.ContainerStartupHealthCheckTimeoutInSeconds != nil { + in, out := &in.ContainerStartupHealthCheckTimeoutInSeconds, &out.ContainerStartupHealthCheckTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.CoreDumpConfig != nil { + in, out := &in.CoreDumpConfig, &out.CoreDumpConfig + *out = new(CoreDumpConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.EnableSsmAccess != nil { + in, out := &in.EnableSsmAccess, &out.EnableSsmAccess + *out = new(bool) + **out = **in + } + if in.InitialInstanceCount != nil { + in, out := &in.InitialInstanceCount, &out.InitialInstanceCount + *out = new(float64) + **out = **in + } + if in.InitialVariantWeight != nil { + in, out := &in.InitialVariantWeight, &out.InitialVariantWeight + *out = new(float64) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.ModelDataDownloadTimeoutInSeconds != nil { + in, out := &in.ModelDataDownloadTimeoutInSeconds, &out.ModelDataDownloadTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.ModelName != nil { + in, out := &in.ModelName, &out.ModelName + *out = new(string) + **out = **in + } + if in.RoutingConfig != nil { + in, out := &in.RoutingConfig, &out.RoutingConfig + *out = make([]RoutingConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServerlessConfig != nil { + in, out := &in.ServerlessConfig, &out.ServerlessConfig + *out = new(ServerlessConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.VariantName != nil { + in, out := &in.VariantName, &out.VariantName + *out = new(string) + **out = **in + } + if in.VolumeSizeInGb != nil { + in, out := &in.VolumeSizeInGb, &out.VolumeSizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductionVariantsObservation. +func (in *ProductionVariantsObservation) DeepCopy() *ProductionVariantsObservation { + if in == nil { + return nil + } + out := new(ProductionVariantsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductionVariantsParameters) DeepCopyInto(out *ProductionVariantsParameters) { + *out = *in + if in.AcceleratorType != nil { + in, out := &in.AcceleratorType, &out.AcceleratorType + *out = new(string) + **out = **in + } + if in.ContainerStartupHealthCheckTimeoutInSeconds != nil { + in, out := &in.ContainerStartupHealthCheckTimeoutInSeconds, &out.ContainerStartupHealthCheckTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.CoreDumpConfig != nil { + in, out := &in.CoreDumpConfig, &out.CoreDumpConfig + *out = new(CoreDumpConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.EnableSsmAccess != nil { + in, out := &in.EnableSsmAccess, &out.EnableSsmAccess + *out = new(bool) + **out = **in + } + if in.InitialInstanceCount != nil { + in, out := &in.InitialInstanceCount, &out.InitialInstanceCount + *out = new(float64) + **out = **in + } + if in.InitialVariantWeight != nil { + in, out := &in.InitialVariantWeight, &out.InitialVariantWeight + *out = new(float64) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.ModelDataDownloadTimeoutInSeconds != nil { + in, out := &in.ModelDataDownloadTimeoutInSeconds, &out.ModelDataDownloadTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.ModelName != nil { + in, out := &in.ModelName, &out.ModelName + *out = new(string) + **out = **in + } + if in.ModelNameRef != nil { + in, out := &in.ModelNameRef, &out.ModelNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ModelNameSelector != nil { + in, out := &in.ModelNameSelector, &out.ModelNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RoutingConfig != nil { + in, out := &in.RoutingConfig, &out.RoutingConfig + *out = make([]RoutingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServerlessConfig != nil { + in, out := &in.ServerlessConfig, &out.ServerlessConfig + *out = new(ServerlessConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.VariantName != nil { + in, out := &in.VariantName, &out.VariantName + *out = new(string) + **out = **in + } + if in.VolumeSizeInGb != nil { + in, out := &in.VolumeSizeInGb, &out.VolumeSizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductionVariantsParameters. +func (in *ProductionVariantsParameters) DeepCopy() *ProductionVariantsParameters { + if in == nil { + return nil + } + out := new(ProductionVariantsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RSessionAppSettingsCustomImageInitParameters) DeepCopyInto(out *RSessionAppSettingsCustomImageInitParameters) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RSessionAppSettingsCustomImageInitParameters. +func (in *RSessionAppSettingsCustomImageInitParameters) DeepCopy() *RSessionAppSettingsCustomImageInitParameters { + if in == nil { + return nil + } + out := new(RSessionAppSettingsCustomImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RSessionAppSettingsCustomImageObservation) DeepCopyInto(out *RSessionAppSettingsCustomImageObservation) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RSessionAppSettingsCustomImageObservation. +func (in *RSessionAppSettingsCustomImageObservation) DeepCopy() *RSessionAppSettingsCustomImageObservation { + if in == nil { + return nil + } + out := new(RSessionAppSettingsCustomImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RSessionAppSettingsCustomImageParameters) DeepCopyInto(out *RSessionAppSettingsCustomImageParameters) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RSessionAppSettingsCustomImageParameters. +func (in *RSessionAppSettingsCustomImageParameters) DeepCopy() *RSessionAppSettingsCustomImageParameters { + if in == nil { + return nil + } + out := new(RSessionAppSettingsCustomImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RSessionAppSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *RSessionAppSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RSessionAppSettingsDefaultResourceSpecInitParameters. +func (in *RSessionAppSettingsDefaultResourceSpecInitParameters) DeepCopy() *RSessionAppSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(RSessionAppSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RSessionAppSettingsDefaultResourceSpecObservation) DeepCopyInto(out *RSessionAppSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RSessionAppSettingsDefaultResourceSpecObservation. +func (in *RSessionAppSettingsDefaultResourceSpecObservation) DeepCopy() *RSessionAppSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(RSessionAppSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RSessionAppSettingsDefaultResourceSpecParameters) DeepCopyInto(out *RSessionAppSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RSessionAppSettingsDefaultResourceSpecParameters. +func (in *RSessionAppSettingsDefaultResourceSpecParameters) DeepCopy() *RSessionAppSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(RSessionAppSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RSessionAppSettingsInitParameters) DeepCopyInto(out *RSessionAppSettingsInitParameters) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]RSessionAppSettingsCustomImageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(RSessionAppSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RSessionAppSettingsInitParameters. +func (in *RSessionAppSettingsInitParameters) DeepCopy() *RSessionAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(RSessionAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RSessionAppSettingsObservation) DeepCopyInto(out *RSessionAppSettingsObservation) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]RSessionAppSettingsCustomImageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(RSessionAppSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RSessionAppSettingsObservation. +func (in *RSessionAppSettingsObservation) DeepCopy() *RSessionAppSettingsObservation { + if in == nil { + return nil + } + out := new(RSessionAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RSessionAppSettingsParameters) DeepCopyInto(out *RSessionAppSettingsParameters) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]RSessionAppSettingsCustomImageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(RSessionAppSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RSessionAppSettingsParameters. +func (in *RSessionAppSettingsParameters) DeepCopy() *RSessionAppSettingsParameters { + if in == nil { + return nil + } + out := new(RSessionAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RStudioServerProAppSettingsInitParameters) DeepCopyInto(out *RStudioServerProAppSettingsInitParameters) { + *out = *in + if in.AccessStatus != nil { + in, out := &in.AccessStatus, &out.AccessStatus + *out = new(string) + **out = **in + } + if in.UserGroup != nil { + in, out := &in.UserGroup, &out.UserGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RStudioServerProAppSettingsInitParameters. +func (in *RStudioServerProAppSettingsInitParameters) DeepCopy() *RStudioServerProAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(RStudioServerProAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RStudioServerProAppSettingsObservation) DeepCopyInto(out *RStudioServerProAppSettingsObservation) { + *out = *in + if in.AccessStatus != nil { + in, out := &in.AccessStatus, &out.AccessStatus + *out = new(string) + **out = **in + } + if in.UserGroup != nil { + in, out := &in.UserGroup, &out.UserGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RStudioServerProAppSettingsObservation. +func (in *RStudioServerProAppSettingsObservation) DeepCopy() *RStudioServerProAppSettingsObservation { + if in == nil { + return nil + } + out := new(RStudioServerProAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RStudioServerProAppSettingsParameters) DeepCopyInto(out *RStudioServerProAppSettingsParameters) { + *out = *in + if in.AccessStatus != nil { + in, out := &in.AccessStatus, &out.AccessStatus + *out = new(string) + **out = **in + } + if in.UserGroup != nil { + in, out := &in.UserGroup, &out.UserGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RStudioServerProAppSettingsParameters. +func (in *RStudioServerProAppSettingsParameters) DeepCopy() *RStudioServerProAppSettingsParameters { + if in == nil { + return nil + } + out := new(RStudioServerProAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RStudioServerProDomainSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *RStudioServerProDomainSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RStudioServerProDomainSettingsDefaultResourceSpecInitParameters. +func (in *RStudioServerProDomainSettingsDefaultResourceSpecInitParameters) DeepCopy() *RStudioServerProDomainSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(RStudioServerProDomainSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RStudioServerProDomainSettingsDefaultResourceSpecObservation) DeepCopyInto(out *RStudioServerProDomainSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RStudioServerProDomainSettingsDefaultResourceSpecObservation. +func (in *RStudioServerProDomainSettingsDefaultResourceSpecObservation) DeepCopy() *RStudioServerProDomainSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(RStudioServerProDomainSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RStudioServerProDomainSettingsDefaultResourceSpecParameters) DeepCopyInto(out *RStudioServerProDomainSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RStudioServerProDomainSettingsDefaultResourceSpecParameters. +func (in *RStudioServerProDomainSettingsDefaultResourceSpecParameters) DeepCopy() *RStudioServerProDomainSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(RStudioServerProDomainSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RStudioServerProDomainSettingsInitParameters) DeepCopyInto(out *RStudioServerProDomainSettingsInitParameters) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(RStudioServerProDomainSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DomainExecutionRoleArn != nil { + in, out := &in.DomainExecutionRoleArn, &out.DomainExecutionRoleArn + *out = new(string) + **out = **in + } + if in.RStudioConnectURL != nil { + in, out := &in.RStudioConnectURL, &out.RStudioConnectURL + *out = new(string) + **out = **in + } + if in.RStudioPackageManagerURL != nil { + in, out := &in.RStudioPackageManagerURL, &out.RStudioPackageManagerURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RStudioServerProDomainSettingsInitParameters. +func (in *RStudioServerProDomainSettingsInitParameters) DeepCopy() *RStudioServerProDomainSettingsInitParameters { + if in == nil { + return nil + } + out := new(RStudioServerProDomainSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RStudioServerProDomainSettingsObservation) DeepCopyInto(out *RStudioServerProDomainSettingsObservation) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(RStudioServerProDomainSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.DomainExecutionRoleArn != nil { + in, out := &in.DomainExecutionRoleArn, &out.DomainExecutionRoleArn + *out = new(string) + **out = **in + } + if in.RStudioConnectURL != nil { + in, out := &in.RStudioConnectURL, &out.RStudioConnectURL + *out = new(string) + **out = **in + } + if in.RStudioPackageManagerURL != nil { + in, out := &in.RStudioPackageManagerURL, &out.RStudioPackageManagerURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RStudioServerProDomainSettingsObservation. +func (in *RStudioServerProDomainSettingsObservation) DeepCopy() *RStudioServerProDomainSettingsObservation { + if in == nil { + return nil + } + out := new(RStudioServerProDomainSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RStudioServerProDomainSettingsParameters) DeepCopyInto(out *RStudioServerProDomainSettingsParameters) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(RStudioServerProDomainSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.DomainExecutionRoleArn != nil { + in, out := &in.DomainExecutionRoleArn, &out.DomainExecutionRoleArn + *out = new(string) + **out = **in + } + if in.RStudioConnectURL != nil { + in, out := &in.RStudioConnectURL, &out.RStudioConnectURL + *out = new(string) + **out = **in + } + if in.RStudioPackageManagerURL != nil { + in, out := &in.RStudioPackageManagerURL, &out.RStudioPackageManagerURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RStudioServerProDomainSettingsParameters. +func (in *RStudioServerProDomainSettingsParameters) DeepCopy() *RStudioServerProDomainSettingsParameters { + if in == nil { + return nil + } + out := new(RStudioServerProDomainSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryAuthConfigInitParameters) DeepCopyInto(out *RepositoryAuthConfigInitParameters) { + *out = *in + if in.RepositoryCredentialsProviderArn != nil { + in, out := &in.RepositoryCredentialsProviderArn, &out.RepositoryCredentialsProviderArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryAuthConfigInitParameters. +func (in *RepositoryAuthConfigInitParameters) DeepCopy() *RepositoryAuthConfigInitParameters { + if in == nil { + return nil + } + out := new(RepositoryAuthConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryAuthConfigObservation) DeepCopyInto(out *RepositoryAuthConfigObservation) { + *out = *in + if in.RepositoryCredentialsProviderArn != nil { + in, out := &in.RepositoryCredentialsProviderArn, &out.RepositoryCredentialsProviderArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryAuthConfigObservation. +func (in *RepositoryAuthConfigObservation) DeepCopy() *RepositoryAuthConfigObservation { + if in == nil { + return nil + } + out := new(RepositoryAuthConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryAuthConfigParameters) DeepCopyInto(out *RepositoryAuthConfigParameters) { + *out = *in + if in.RepositoryCredentialsProviderArn != nil { + in, out := &in.RepositoryCredentialsProviderArn, &out.RepositoryCredentialsProviderArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryAuthConfigParameters. +func (in *RepositoryAuthConfigParameters) DeepCopy() *RepositoryAuthConfigParameters { + if in == nil { + return nil + } + out := new(RepositoryAuthConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSpecInitParameters) DeepCopyInto(out *ResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpecInitParameters. +func (in *ResourceSpecInitParameters) DeepCopy() *ResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(ResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSpecObservation) DeepCopyInto(out *ResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpecObservation. +func (in *ResourceSpecObservation) DeepCopy() *ResourceSpecObservation { + if in == nil { + return nil + } + out := new(ResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSpecParameters) DeepCopyInto(out *ResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpecParameters. +func (in *ResourceSpecParameters) DeepCopy() *ResourceSpecParameters { + if in == nil { + return nil + } + out := new(ResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicyInitParameters) DeepCopyInto(out *RetentionPolicyInitParameters) { + *out = *in + if in.HomeEFSFileSystem != nil { + in, out := &in.HomeEFSFileSystem, &out.HomeEFSFileSystem + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicyInitParameters. +func (in *RetentionPolicyInitParameters) DeepCopy() *RetentionPolicyInitParameters { + if in == nil { + return nil + } + out := new(RetentionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicyObservation) DeepCopyInto(out *RetentionPolicyObservation) { + *out = *in + if in.HomeEFSFileSystem != nil { + in, out := &in.HomeEFSFileSystem, &out.HomeEFSFileSystem + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicyObservation. +func (in *RetentionPolicyObservation) DeepCopy() *RetentionPolicyObservation { + if in == nil { + return nil + } + out := new(RetentionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicyParameters) DeepCopyInto(out *RetentionPolicyParameters) { + *out = *in + if in.HomeEFSFileSystem != nil { + in, out := &in.HomeEFSFileSystem, &out.HomeEFSFileSystem + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicyParameters. +func (in *RetentionPolicyParameters) DeepCopy() *RetentionPolicyParameters { + if in == nil { + return nil + } + out := new(RetentionPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollbackMaximumBatchSizeInitParameters) DeepCopyInto(out *RollbackMaximumBatchSizeInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackMaximumBatchSizeInitParameters. +func (in *RollbackMaximumBatchSizeInitParameters) DeepCopy() *RollbackMaximumBatchSizeInitParameters { + if in == nil { + return nil + } + out := new(RollbackMaximumBatchSizeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollbackMaximumBatchSizeObservation) DeepCopyInto(out *RollbackMaximumBatchSizeObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackMaximumBatchSizeObservation. +func (in *RollbackMaximumBatchSizeObservation) DeepCopy() *RollbackMaximumBatchSizeObservation { + if in == nil { + return nil + } + out := new(RollbackMaximumBatchSizeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollbackMaximumBatchSizeParameters) DeepCopyInto(out *RollbackMaximumBatchSizeParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackMaximumBatchSizeParameters. +func (in *RollbackMaximumBatchSizeParameters) DeepCopy() *RollbackMaximumBatchSizeParameters { + if in == nil { + return nil + } + out := new(RollbackMaximumBatchSizeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdatePolicyInitParameters) DeepCopyInto(out *RollingUpdatePolicyInitParameters) { + *out = *in + if in.MaximumBatchSize != nil { + in, out := &in.MaximumBatchSize, &out.MaximumBatchSize + *out = new(MaximumBatchSizeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaximumExecutionTimeoutInSeconds != nil { + in, out := &in.MaximumExecutionTimeoutInSeconds, &out.MaximumExecutionTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.RollbackMaximumBatchSize != nil { + in, out := &in.RollbackMaximumBatchSize, &out.RollbackMaximumBatchSize + *out = new(RollbackMaximumBatchSizeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WaitIntervalInSeconds != nil { + in, out := &in.WaitIntervalInSeconds, &out.WaitIntervalInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdatePolicyInitParameters. +func (in *RollingUpdatePolicyInitParameters) DeepCopy() *RollingUpdatePolicyInitParameters { + if in == nil { + return nil + } + out := new(RollingUpdatePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdatePolicyObservation) DeepCopyInto(out *RollingUpdatePolicyObservation) { + *out = *in + if in.MaximumBatchSize != nil { + in, out := &in.MaximumBatchSize, &out.MaximumBatchSize + *out = new(MaximumBatchSizeObservation) + (*in).DeepCopyInto(*out) + } + if in.MaximumExecutionTimeoutInSeconds != nil { + in, out := &in.MaximumExecutionTimeoutInSeconds, &out.MaximumExecutionTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.RollbackMaximumBatchSize != nil { + in, out := &in.RollbackMaximumBatchSize, &out.RollbackMaximumBatchSize + *out = new(RollbackMaximumBatchSizeObservation) + (*in).DeepCopyInto(*out) + } + if in.WaitIntervalInSeconds != nil { + in, out := &in.WaitIntervalInSeconds, &out.WaitIntervalInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdatePolicyObservation. +func (in *RollingUpdatePolicyObservation) DeepCopy() *RollingUpdatePolicyObservation { + if in == nil { + return nil + } + out := new(RollingUpdatePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdatePolicyParameters) DeepCopyInto(out *RollingUpdatePolicyParameters) { + *out = *in + if in.MaximumBatchSize != nil { + in, out := &in.MaximumBatchSize, &out.MaximumBatchSize + *out = new(MaximumBatchSizeParameters) + (*in).DeepCopyInto(*out) + } + if in.MaximumExecutionTimeoutInSeconds != nil { + in, out := &in.MaximumExecutionTimeoutInSeconds, &out.MaximumExecutionTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.RollbackMaximumBatchSize != nil { + in, out := &in.RollbackMaximumBatchSize, &out.RollbackMaximumBatchSize + *out = new(RollbackMaximumBatchSizeParameters) + (*in).DeepCopyInto(*out) + } + if in.WaitIntervalInSeconds != nil { + in, out := &in.WaitIntervalInSeconds, &out.WaitIntervalInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdatePolicyParameters. +func (in *RollingUpdatePolicyParameters) DeepCopy() *RollingUpdatePolicyParameters { + if in == nil { + return nil + } + out := new(RollingUpdatePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingConfigInitParameters) DeepCopyInto(out *RoutingConfigInitParameters) { + *out = *in + if in.RoutingStrategy != nil { + in, out := &in.RoutingStrategy, &out.RoutingStrategy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingConfigInitParameters. +func (in *RoutingConfigInitParameters) DeepCopy() *RoutingConfigInitParameters { + if in == nil { + return nil + } + out := new(RoutingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingConfigObservation) DeepCopyInto(out *RoutingConfigObservation) { + *out = *in + if in.RoutingStrategy != nil { + in, out := &in.RoutingStrategy, &out.RoutingStrategy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingConfigObservation. +func (in *RoutingConfigObservation) DeepCopy() *RoutingConfigObservation { + if in == nil { + return nil + } + out := new(RoutingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingConfigParameters) DeepCopyInto(out *RoutingConfigParameters) { + *out = *in + if in.RoutingStrategy != nil { + in, out := &in.RoutingStrategy, &out.RoutingStrategy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingConfigParameters. +func (in *RoutingConfigParameters) DeepCopy() *RoutingConfigParameters { + if in == nil { + return nil + } + out := new(RoutingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DataSourceInitParameters) DeepCopyInto(out *S3DataSourceInitParameters) { + *out = *in + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.S3DataType != nil { + in, out := &in.S3DataType, &out.S3DataType + *out = new(string) + **out = **in + } + if in.S3URI != nil { + in, out := &in.S3URI, &out.S3URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DataSourceInitParameters. +func (in *S3DataSourceInitParameters) DeepCopy() *S3DataSourceInitParameters { + if in == nil { + return nil + } + out := new(S3DataSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DataSourceObservation) DeepCopyInto(out *S3DataSourceObservation) { + *out = *in + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.S3DataType != nil { + in, out := &in.S3DataType, &out.S3DataType + *out = new(string) + **out = **in + } + if in.S3URI != nil { + in, out := &in.S3URI, &out.S3URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DataSourceObservation. +func (in *S3DataSourceObservation) DeepCopy() *S3DataSourceObservation { + if in == nil { + return nil + } + out := new(S3DataSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DataSourceParameters) DeepCopyInto(out *S3DataSourceParameters) { + *out = *in + if in.CompressionType != nil { + in, out := &in.CompressionType, &out.CompressionType + *out = new(string) + **out = **in + } + if in.S3DataType != nil { + in, out := &in.S3DataType, &out.S3DataType + *out = new(string) + **out = **in + } + if in.S3URI != nil { + in, out := &in.S3URI, &out.S3URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DataSourceParameters. +func (in *S3DataSourceParameters) DeepCopy() *S3DataSourceParameters { + if in == nil { + return nil + } + out := new(S3DataSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3StorageConfigInitParameters) DeepCopyInto(out *S3StorageConfigInitParameters) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.ResolvedOutputS3URI != nil { + in, out := &in.ResolvedOutputS3URI, &out.ResolvedOutputS3URI + *out = new(string) + **out = **in + } + if in.S3URI != nil { + in, out := &in.S3URI, &out.S3URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3StorageConfigInitParameters. +func (in *S3StorageConfigInitParameters) DeepCopy() *S3StorageConfigInitParameters { + if in == nil { + return nil + } + out := new(S3StorageConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3StorageConfigObservation) DeepCopyInto(out *S3StorageConfigObservation) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.ResolvedOutputS3URI != nil { + in, out := &in.ResolvedOutputS3URI, &out.ResolvedOutputS3URI + *out = new(string) + **out = **in + } + if in.S3URI != nil { + in, out := &in.S3URI, &out.S3URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3StorageConfigObservation. +func (in *S3StorageConfigObservation) DeepCopy() *S3StorageConfigObservation { + if in == nil { + return nil + } + out := new(S3StorageConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3StorageConfigParameters) DeepCopyInto(out *S3StorageConfigParameters) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.ResolvedOutputS3URI != nil { + in, out := &in.ResolvedOutputS3URI, &out.ResolvedOutputS3URI + *out = new(string) + **out = **in + } + if in.S3URI != nil { + in, out := &in.S3URI, &out.S3URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3StorageConfigParameters. +func (in *S3StorageConfigParameters) DeepCopy() *S3StorageConfigParameters { + if in == nil { + return nil + } + out := new(S3StorageConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigInitParameters) DeepCopyInto(out *SecurityConfigInitParameters) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigInitParameters. +func (in *SecurityConfigInitParameters) DeepCopy() *SecurityConfigInitParameters { + if in == nil { + return nil + } + out := new(SecurityConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigObservation) DeepCopyInto(out *SecurityConfigObservation) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigObservation. +func (in *SecurityConfigObservation) DeepCopy() *SecurityConfigObservation { + if in == nil { + return nil + } + out := new(SecurityConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigParameters) DeepCopyInto(out *SecurityConfigParameters) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigParameters. +func (in *SecurityConfigParameters) DeepCopy() *SecurityConfigParameters { + if in == nil { + return nil + } + out := new(SecurityConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessConfigInitParameters) DeepCopyInto(out *ServerlessConfigInitParameters) { + *out = *in + if in.MaxConcurrency != nil { + in, out := &in.MaxConcurrency, &out.MaxConcurrency + *out = new(float64) + **out = **in + } + if in.MemorySizeInMb != nil { + in, out := &in.MemorySizeInMb, &out.MemorySizeInMb + *out = new(float64) + **out = **in + } + if in.ProvisionedConcurrency != nil { + in, out := &in.ProvisionedConcurrency, &out.ProvisionedConcurrency + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessConfigInitParameters. +func (in *ServerlessConfigInitParameters) DeepCopy() *ServerlessConfigInitParameters { + if in == nil { + return nil + } + out := new(ServerlessConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessConfigObservation) DeepCopyInto(out *ServerlessConfigObservation) { + *out = *in + if in.MaxConcurrency != nil { + in, out := &in.MaxConcurrency, &out.MaxConcurrency + *out = new(float64) + **out = **in + } + if in.MemorySizeInMb != nil { + in, out := &in.MemorySizeInMb, &out.MemorySizeInMb + *out = new(float64) + **out = **in + } + if in.ProvisionedConcurrency != nil { + in, out := &in.ProvisionedConcurrency, &out.ProvisionedConcurrency + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessConfigObservation. +func (in *ServerlessConfigObservation) DeepCopy() *ServerlessConfigObservation { + if in == nil { + return nil + } + out := new(ServerlessConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessConfigParameters) DeepCopyInto(out *ServerlessConfigParameters) { + *out = *in + if in.MaxConcurrency != nil { + in, out := &in.MaxConcurrency, &out.MaxConcurrency + *out = new(float64) + **out = **in + } + if in.MemorySizeInMb != nil { + in, out := &in.MemorySizeInMb, &out.MemorySizeInMb + *out = new(float64) + **out = **in + } + if in.ProvisionedConcurrency != nil { + in, out := &in.ProvisionedConcurrency, &out.ProvisionedConcurrency + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessConfigParameters. +func (in *ServerlessConfigParameters) DeepCopy() *ServerlessConfigParameters { + if in == nil { + return nil + } + out := new(ServerlessConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShadowProductionVariantsCoreDumpConfigInitParameters) DeepCopyInto(out *ShadowProductionVariantsCoreDumpConfigInitParameters) { + *out = *in + if in.DestinationS3URI != nil { + in, out := &in.DestinationS3URI, &out.DestinationS3URI + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShadowProductionVariantsCoreDumpConfigInitParameters. +func (in *ShadowProductionVariantsCoreDumpConfigInitParameters) DeepCopy() *ShadowProductionVariantsCoreDumpConfigInitParameters { + if in == nil { + return nil + } + out := new(ShadowProductionVariantsCoreDumpConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShadowProductionVariantsCoreDumpConfigObservation) DeepCopyInto(out *ShadowProductionVariantsCoreDumpConfigObservation) { + *out = *in + if in.DestinationS3URI != nil { + in, out := &in.DestinationS3URI, &out.DestinationS3URI + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShadowProductionVariantsCoreDumpConfigObservation. +func (in *ShadowProductionVariantsCoreDumpConfigObservation) DeepCopy() *ShadowProductionVariantsCoreDumpConfigObservation { + if in == nil { + return nil + } + out := new(ShadowProductionVariantsCoreDumpConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShadowProductionVariantsCoreDumpConfigParameters) DeepCopyInto(out *ShadowProductionVariantsCoreDumpConfigParameters) { + *out = *in + if in.DestinationS3URI != nil { + in, out := &in.DestinationS3URI, &out.DestinationS3URI + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShadowProductionVariantsCoreDumpConfigParameters. +func (in *ShadowProductionVariantsCoreDumpConfigParameters) DeepCopy() *ShadowProductionVariantsCoreDumpConfigParameters { + if in == nil { + return nil + } + out := new(ShadowProductionVariantsCoreDumpConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShadowProductionVariantsInitParameters) DeepCopyInto(out *ShadowProductionVariantsInitParameters) { + *out = *in + if in.AcceleratorType != nil { + in, out := &in.AcceleratorType, &out.AcceleratorType + *out = new(string) + **out = **in + } + if in.ContainerStartupHealthCheckTimeoutInSeconds != nil { + in, out := &in.ContainerStartupHealthCheckTimeoutInSeconds, &out.ContainerStartupHealthCheckTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.CoreDumpConfig != nil { + in, out := &in.CoreDumpConfig, &out.CoreDumpConfig + *out = new(ShadowProductionVariantsCoreDumpConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EnableSsmAccess != nil { + in, out := &in.EnableSsmAccess, &out.EnableSsmAccess + *out = new(bool) + **out = **in + } + if in.InitialInstanceCount != nil { + in, out := &in.InitialInstanceCount, &out.InitialInstanceCount + *out = new(float64) + **out = **in + } + if in.InitialVariantWeight != nil { + in, out := &in.InitialVariantWeight, &out.InitialVariantWeight + *out = new(float64) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.ModelDataDownloadTimeoutInSeconds != nil { + in, out := &in.ModelDataDownloadTimeoutInSeconds, &out.ModelDataDownloadTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.ModelName != nil { + in, out := &in.ModelName, &out.ModelName + *out = new(string) + **out = **in + } + if in.RoutingConfig != nil { + in, out := &in.RoutingConfig, &out.RoutingConfig + *out = make([]ShadowProductionVariantsRoutingConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServerlessConfig != nil { + in, out := &in.ServerlessConfig, &out.ServerlessConfig + *out = new(ShadowProductionVariantsServerlessConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VariantName != nil { + in, out := &in.VariantName, &out.VariantName + *out = new(string) + **out = **in + } + if in.VolumeSizeInGb != nil { + in, out := &in.VolumeSizeInGb, &out.VolumeSizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShadowProductionVariantsInitParameters. +func (in *ShadowProductionVariantsInitParameters) DeepCopy() *ShadowProductionVariantsInitParameters { + if in == nil { + return nil + } + out := new(ShadowProductionVariantsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShadowProductionVariantsObservation) DeepCopyInto(out *ShadowProductionVariantsObservation) { + *out = *in + if in.AcceleratorType != nil { + in, out := &in.AcceleratorType, &out.AcceleratorType + *out = new(string) + **out = **in + } + if in.ContainerStartupHealthCheckTimeoutInSeconds != nil { + in, out := &in.ContainerStartupHealthCheckTimeoutInSeconds, &out.ContainerStartupHealthCheckTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.CoreDumpConfig != nil { + in, out := &in.CoreDumpConfig, &out.CoreDumpConfig + *out = new(ShadowProductionVariantsCoreDumpConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.EnableSsmAccess != nil { + in, out := &in.EnableSsmAccess, &out.EnableSsmAccess + *out = new(bool) + **out = **in + } + if in.InitialInstanceCount != nil { + in, out := &in.InitialInstanceCount, &out.InitialInstanceCount + *out = new(float64) + **out = **in + } + if in.InitialVariantWeight != nil { + in, out := &in.InitialVariantWeight, &out.InitialVariantWeight + *out = new(float64) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.ModelDataDownloadTimeoutInSeconds != nil { + in, out := &in.ModelDataDownloadTimeoutInSeconds, &out.ModelDataDownloadTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.ModelName != nil { + in, out := &in.ModelName, &out.ModelName + *out = new(string) + **out = **in + } + if in.RoutingConfig != nil { + in, out := &in.RoutingConfig, &out.RoutingConfig + *out = make([]ShadowProductionVariantsRoutingConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServerlessConfig != nil { + in, out := &in.ServerlessConfig, &out.ServerlessConfig + *out = new(ShadowProductionVariantsServerlessConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.VariantName != nil { + in, out := &in.VariantName, &out.VariantName + *out = new(string) + **out = **in + } + if in.VolumeSizeInGb != nil { + in, out := &in.VolumeSizeInGb, &out.VolumeSizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShadowProductionVariantsObservation. +func (in *ShadowProductionVariantsObservation) DeepCopy() *ShadowProductionVariantsObservation { + if in == nil { + return nil + } + out := new(ShadowProductionVariantsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShadowProductionVariantsParameters) DeepCopyInto(out *ShadowProductionVariantsParameters) { + *out = *in + if in.AcceleratorType != nil { + in, out := &in.AcceleratorType, &out.AcceleratorType + *out = new(string) + **out = **in + } + if in.ContainerStartupHealthCheckTimeoutInSeconds != nil { + in, out := &in.ContainerStartupHealthCheckTimeoutInSeconds, &out.ContainerStartupHealthCheckTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.CoreDumpConfig != nil { + in, out := &in.CoreDumpConfig, &out.CoreDumpConfig + *out = new(ShadowProductionVariantsCoreDumpConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.EnableSsmAccess != nil { + in, out := &in.EnableSsmAccess, &out.EnableSsmAccess + *out = new(bool) + **out = **in + } + if in.InitialInstanceCount != nil { + in, out := &in.InitialInstanceCount, &out.InitialInstanceCount + *out = new(float64) + **out = **in + } + if in.InitialVariantWeight != nil { + in, out := &in.InitialVariantWeight, &out.InitialVariantWeight + *out = new(float64) + **out = **in + } + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.ModelDataDownloadTimeoutInSeconds != nil { + in, out := &in.ModelDataDownloadTimeoutInSeconds, &out.ModelDataDownloadTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.ModelName != nil { + in, out := &in.ModelName, &out.ModelName + *out = new(string) + **out = **in + } + if in.RoutingConfig != nil { + in, out := &in.RoutingConfig, &out.RoutingConfig + *out = make([]ShadowProductionVariantsRoutingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServerlessConfig != nil { + in, out := &in.ServerlessConfig, &out.ServerlessConfig + *out = new(ShadowProductionVariantsServerlessConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.VariantName != nil { + in, out := &in.VariantName, &out.VariantName + *out = new(string) + **out = **in + } + if in.VolumeSizeInGb != nil { + in, out := &in.VolumeSizeInGb, &out.VolumeSizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShadowProductionVariantsParameters. +func (in *ShadowProductionVariantsParameters) DeepCopy() *ShadowProductionVariantsParameters { + if in == nil { + return nil + } + out := new(ShadowProductionVariantsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShadowProductionVariantsRoutingConfigInitParameters) DeepCopyInto(out *ShadowProductionVariantsRoutingConfigInitParameters) { + *out = *in + if in.RoutingStrategy != nil { + in, out := &in.RoutingStrategy, &out.RoutingStrategy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShadowProductionVariantsRoutingConfigInitParameters. +func (in *ShadowProductionVariantsRoutingConfigInitParameters) DeepCopy() *ShadowProductionVariantsRoutingConfigInitParameters { + if in == nil { + return nil + } + out := new(ShadowProductionVariantsRoutingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShadowProductionVariantsRoutingConfigObservation) DeepCopyInto(out *ShadowProductionVariantsRoutingConfigObservation) { + *out = *in + if in.RoutingStrategy != nil { + in, out := &in.RoutingStrategy, &out.RoutingStrategy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShadowProductionVariantsRoutingConfigObservation. +func (in *ShadowProductionVariantsRoutingConfigObservation) DeepCopy() *ShadowProductionVariantsRoutingConfigObservation { + if in == nil { + return nil + } + out := new(ShadowProductionVariantsRoutingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShadowProductionVariantsRoutingConfigParameters) DeepCopyInto(out *ShadowProductionVariantsRoutingConfigParameters) { + *out = *in + if in.RoutingStrategy != nil { + in, out := &in.RoutingStrategy, &out.RoutingStrategy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShadowProductionVariantsRoutingConfigParameters. +func (in *ShadowProductionVariantsRoutingConfigParameters) DeepCopy() *ShadowProductionVariantsRoutingConfigParameters { + if in == nil { + return nil + } + out := new(ShadowProductionVariantsRoutingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShadowProductionVariantsServerlessConfigInitParameters) DeepCopyInto(out *ShadowProductionVariantsServerlessConfigInitParameters) { + *out = *in + if in.MaxConcurrency != nil { + in, out := &in.MaxConcurrency, &out.MaxConcurrency + *out = new(float64) + **out = **in + } + if in.MemorySizeInMb != nil { + in, out := &in.MemorySizeInMb, &out.MemorySizeInMb + *out = new(float64) + **out = **in + } + if in.ProvisionedConcurrency != nil { + in, out := &in.ProvisionedConcurrency, &out.ProvisionedConcurrency + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShadowProductionVariantsServerlessConfigInitParameters. +func (in *ShadowProductionVariantsServerlessConfigInitParameters) DeepCopy() *ShadowProductionVariantsServerlessConfigInitParameters { + if in == nil { + return nil + } + out := new(ShadowProductionVariantsServerlessConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShadowProductionVariantsServerlessConfigObservation) DeepCopyInto(out *ShadowProductionVariantsServerlessConfigObservation) { + *out = *in + if in.MaxConcurrency != nil { + in, out := &in.MaxConcurrency, &out.MaxConcurrency + *out = new(float64) + **out = **in + } + if in.MemorySizeInMb != nil { + in, out := &in.MemorySizeInMb, &out.MemorySizeInMb + *out = new(float64) + **out = **in + } + if in.ProvisionedConcurrency != nil { + in, out := &in.ProvisionedConcurrency, &out.ProvisionedConcurrency + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShadowProductionVariantsServerlessConfigObservation. +func (in *ShadowProductionVariantsServerlessConfigObservation) DeepCopy() *ShadowProductionVariantsServerlessConfigObservation { + if in == nil { + return nil + } + out := new(ShadowProductionVariantsServerlessConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShadowProductionVariantsServerlessConfigParameters) DeepCopyInto(out *ShadowProductionVariantsServerlessConfigParameters) { + *out = *in + if in.MaxConcurrency != nil { + in, out := &in.MaxConcurrency, &out.MaxConcurrency + *out = new(float64) + **out = **in + } + if in.MemorySizeInMb != nil { + in, out := &in.MemorySizeInMb, &out.MemorySizeInMb + *out = new(float64) + **out = **in + } + if in.ProvisionedConcurrency != nil { + in, out := &in.ProvisionedConcurrency, &out.ProvisionedConcurrency + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShadowProductionVariantsServerlessConfigParameters. +func (in *ShadowProductionVariantsServerlessConfigParameters) DeepCopy() *ShadowProductionVariantsServerlessConfigParameters { + if in == nil { + return nil + } + out := new(ShadowProductionVariantsServerlessConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharingSettingsInitParameters) DeepCopyInto(out *SharingSettingsInitParameters) { + *out = *in + if in.NotebookOutputOption != nil { + in, out := &in.NotebookOutputOption, &out.NotebookOutputOption + *out = new(string) + **out = **in + } + if in.S3KMSKeyID != nil { + in, out := &in.S3KMSKeyID, &out.S3KMSKeyID + *out = new(string) + **out = **in + } + if in.S3OutputPath != nil { + in, out := &in.S3OutputPath, &out.S3OutputPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharingSettingsInitParameters. +func (in *SharingSettingsInitParameters) DeepCopy() *SharingSettingsInitParameters { + if in == nil { + return nil + } + out := new(SharingSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharingSettingsObservation) DeepCopyInto(out *SharingSettingsObservation) { + *out = *in + if in.NotebookOutputOption != nil { + in, out := &in.NotebookOutputOption, &out.NotebookOutputOption + *out = new(string) + **out = **in + } + if in.S3KMSKeyID != nil { + in, out := &in.S3KMSKeyID, &out.S3KMSKeyID + *out = new(string) + **out = **in + } + if in.S3OutputPath != nil { + in, out := &in.S3OutputPath, &out.S3OutputPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharingSettingsObservation. +func (in *SharingSettingsObservation) DeepCopy() *SharingSettingsObservation { + if in == nil { + return nil + } + out := new(SharingSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharingSettingsParameters) DeepCopyInto(out *SharingSettingsParameters) { + *out = *in + if in.NotebookOutputOption != nil { + in, out := &in.NotebookOutputOption, &out.NotebookOutputOption + *out = new(string) + **out = **in + } + if in.S3KMSKeyID != nil { + in, out := &in.S3KMSKeyID, &out.S3KMSKeyID + *out = new(string) + **out = **in + } + if in.S3OutputPath != nil { + in, out := &in.S3OutputPath, &out.S3OutputPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharingSettingsParameters. +func (in *SharingSettingsParameters) DeepCopy() *SharingSettingsParameters { + if in == nil { + return nil + } + out := new(SharingSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPConfigInitParameters) DeepCopyInto(out *SourceIPConfigInitParameters) { + *out = *in + if in.Cidrs != nil { + in, out := &in.Cidrs, &out.Cidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPConfigInitParameters. +func (in *SourceIPConfigInitParameters) DeepCopy() *SourceIPConfigInitParameters { + if in == nil { + return nil + } + out := new(SourceIPConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPConfigObservation) DeepCopyInto(out *SourceIPConfigObservation) { + *out = *in + if in.Cidrs != nil { + in, out := &in.Cidrs, &out.Cidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPConfigObservation. +func (in *SourceIPConfigObservation) DeepCopy() *SourceIPConfigObservation { + if in == nil { + return nil + } + out := new(SourceIPConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceIPConfigParameters) DeepCopyInto(out *SourceIPConfigParameters) { + *out = *in + if in.Cidrs != nil { + in, out := &in.Cidrs, &out.Cidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceIPConfigParameters. +func (in *SourceIPConfigParameters) DeepCopy() *SourceIPConfigParameters { + if in == nil { + return nil + } + out := new(SourceIPConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Space) DeepCopyInto(out *Space) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Space. +func (in *Space) DeepCopy() *Space { + if in == nil { + return nil + } + out := new(Space) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Space) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceInitParameters) DeepCopyInto(out *SpaceInitParameters) { + *out = *in + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.DomainIDRef != nil { + in, out := &in.DomainIDRef, &out.DomainIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DomainIDSelector != nil { + in, out := &in.DomainIDSelector, &out.DomainIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OwnershipSettings != nil { + in, out := &in.OwnershipSettings, &out.OwnershipSettings + *out = new(OwnershipSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SpaceDisplayName != nil { + in, out := &in.SpaceDisplayName, &out.SpaceDisplayName + *out = new(string) + **out = **in + } + if in.SpaceName != nil { + in, out := &in.SpaceName, &out.SpaceName + *out = new(string) + **out = **in + } + if in.SpaceSettings != nil { + in, out := &in.SpaceSettings, &out.SpaceSettings + *out = new(SpaceSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SpaceSharingSettings != nil { + in, out := &in.SpaceSharingSettings, &out.SpaceSharingSettings + *out = new(SpaceSharingSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceInitParameters. +func (in *SpaceInitParameters) DeepCopy() *SpaceInitParameters { + if in == nil { + return nil + } + out := new(SpaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceList) DeepCopyInto(out *SpaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Space, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceList. +func (in *SpaceList) DeepCopy() *SpaceList { + if in == nil { + return nil + } + out := new(SpaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceObservation) DeepCopyInto(out *SpaceObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.HomeEFSFileSystemUID != nil { + in, out := &in.HomeEFSFileSystemUID, &out.HomeEFSFileSystemUID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.OwnershipSettings != nil { + in, out := &in.OwnershipSettings, &out.OwnershipSettings + *out = new(OwnershipSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.SpaceDisplayName != nil { + in, out := &in.SpaceDisplayName, &out.SpaceDisplayName + *out = new(string) + **out = **in + } + if in.SpaceName != nil { + in, out := &in.SpaceName, &out.SpaceName + *out = new(string) + **out = **in + } + if in.SpaceSettings != nil { + in, out := &in.SpaceSettings, &out.SpaceSettings + *out = new(SpaceSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.SpaceSharingSettings != nil { + in, out := &in.SpaceSharingSettings, &out.SpaceSharingSettings + *out = new(SpaceSharingSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceObservation. +func (in *SpaceObservation) DeepCopy() *SpaceObservation { + if in == nil { + return nil + } + out := new(SpaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceParameters) DeepCopyInto(out *SpaceParameters) { + *out = *in + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.DomainIDRef != nil { + in, out := &in.DomainIDRef, &out.DomainIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DomainIDSelector != nil { + in, out := &in.DomainIDSelector, &out.DomainIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OwnershipSettings != nil { + in, out := &in.OwnershipSettings, &out.OwnershipSettings + *out = new(OwnershipSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SpaceDisplayName != nil { + in, out := &in.SpaceDisplayName, &out.SpaceDisplayName + *out = new(string) + **out = **in + } + if in.SpaceName != nil { + in, out := &in.SpaceName, &out.SpaceName + *out = new(string) + **out = **in + } + if in.SpaceSettings != nil { + in, out := &in.SpaceSettings, &out.SpaceSettings + *out = new(SpaceSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.SpaceSharingSettings != nil { + in, out := &in.SpaceSharingSettings, &out.SpaceSharingSettings + *out = new(SpaceSharingSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceParameters. +func (in *SpaceParameters) DeepCopy() *SpaceParameters { + if in == nil { + return nil + } + out := new(SpaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters. +func (in *SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters) DeepCopy() *SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecObservation) DeepCopyInto(out *SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecObservation. +func (in *SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecObservation) DeepCopy() *SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecParameters) DeepCopyInto(out *SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecParameters. +func (in *SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecParameters) DeepCopy() *SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsCodeEditorAppSettingsInitParameters) DeepCopyInto(out *SpaceSettingsCodeEditorAppSettingsInitParameters) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsCodeEditorAppSettingsInitParameters. +func (in *SpaceSettingsCodeEditorAppSettingsInitParameters) DeepCopy() *SpaceSettingsCodeEditorAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsCodeEditorAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsCodeEditorAppSettingsObservation) DeepCopyInto(out *SpaceSettingsCodeEditorAppSettingsObservation) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsCodeEditorAppSettingsObservation. +func (in *SpaceSettingsCodeEditorAppSettingsObservation) DeepCopy() *SpaceSettingsCodeEditorAppSettingsObservation { + if in == nil { + return nil + } + out := new(SpaceSettingsCodeEditorAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsCodeEditorAppSettingsParameters) DeepCopyInto(out *SpaceSettingsCodeEditorAppSettingsParameters) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsCodeEditorAppSettingsParameters. +func (in *SpaceSettingsCodeEditorAppSettingsParameters) DeepCopy() *SpaceSettingsCodeEditorAppSettingsParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsCodeEditorAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsInitParameters) DeepCopyInto(out *SpaceSettingsInitParameters) { + *out = *in + if in.AppType != nil { + in, out := &in.AppType, &out.AppType + *out = new(string) + **out = **in + } + if in.CodeEditorAppSettings != nil { + in, out := &in.CodeEditorAppSettings, &out.CodeEditorAppSettings + *out = new(SpaceSettingsCodeEditorAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomFileSystem != nil { + in, out := &in.CustomFileSystem, &out.CustomFileSystem + *out = make([]CustomFileSystemInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JupyterLabAppSettings != nil { + in, out := &in.JupyterLabAppSettings, &out.JupyterLabAppSettings + *out = new(SpaceSettingsJupyterLabAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.JupyterServerAppSettings != nil { + in, out := &in.JupyterServerAppSettings, &out.JupyterServerAppSettings + *out = new(SpaceSettingsJupyterServerAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KernelGatewayAppSettings != nil { + in, out := &in.KernelGatewayAppSettings, &out.KernelGatewayAppSettings + *out = new(SpaceSettingsKernelGatewayAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SpaceStorageSettings != nil { + in, out := &in.SpaceStorageSettings, &out.SpaceStorageSettings + *out = new(SpaceSettingsSpaceStorageSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsInitParameters. +func (in *SpaceSettingsInitParameters) DeepCopy() *SpaceSettingsInitParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterLabAppSettingsCodeRepositoryInitParameters) DeepCopyInto(out *SpaceSettingsJupyterLabAppSettingsCodeRepositoryInitParameters) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterLabAppSettingsCodeRepositoryInitParameters. +func (in *SpaceSettingsJupyterLabAppSettingsCodeRepositoryInitParameters) DeepCopy() *SpaceSettingsJupyterLabAppSettingsCodeRepositoryInitParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterLabAppSettingsCodeRepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterLabAppSettingsCodeRepositoryObservation) DeepCopyInto(out *SpaceSettingsJupyterLabAppSettingsCodeRepositoryObservation) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterLabAppSettingsCodeRepositoryObservation. +func (in *SpaceSettingsJupyterLabAppSettingsCodeRepositoryObservation) DeepCopy() *SpaceSettingsJupyterLabAppSettingsCodeRepositoryObservation { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterLabAppSettingsCodeRepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterLabAppSettingsCodeRepositoryParameters) DeepCopyInto(out *SpaceSettingsJupyterLabAppSettingsCodeRepositoryParameters) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterLabAppSettingsCodeRepositoryParameters. +func (in *SpaceSettingsJupyterLabAppSettingsCodeRepositoryParameters) DeepCopy() *SpaceSettingsJupyterLabAppSettingsCodeRepositoryParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterLabAppSettingsCodeRepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters. +func (in *SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters) DeepCopy() *SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecObservation) DeepCopyInto(out *SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecObservation. +func (in *SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecObservation) DeepCopy() *SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecParameters) DeepCopyInto(out *SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecParameters. +func (in *SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecParameters) DeepCopy() *SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterLabAppSettingsInitParameters) DeepCopyInto(out *SpaceSettingsJupyterLabAppSettingsInitParameters) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]SpaceSettingsJupyterLabAppSettingsCodeRepositoryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterLabAppSettingsInitParameters. +func (in *SpaceSettingsJupyterLabAppSettingsInitParameters) DeepCopy() *SpaceSettingsJupyterLabAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterLabAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterLabAppSettingsObservation) DeepCopyInto(out *SpaceSettingsJupyterLabAppSettingsObservation) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]SpaceSettingsJupyterLabAppSettingsCodeRepositoryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterLabAppSettingsObservation. +func (in *SpaceSettingsJupyterLabAppSettingsObservation) DeepCopy() *SpaceSettingsJupyterLabAppSettingsObservation { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterLabAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterLabAppSettingsParameters) DeepCopyInto(out *SpaceSettingsJupyterLabAppSettingsParameters) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]SpaceSettingsJupyterLabAppSettingsCodeRepositoryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterLabAppSettingsParameters. +func (in *SpaceSettingsJupyterLabAppSettingsParameters) DeepCopy() *SpaceSettingsJupyterLabAppSettingsParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterLabAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterServerAppSettingsCodeRepositoryInitParameters) DeepCopyInto(out *SpaceSettingsJupyterServerAppSettingsCodeRepositoryInitParameters) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterServerAppSettingsCodeRepositoryInitParameters. +func (in *SpaceSettingsJupyterServerAppSettingsCodeRepositoryInitParameters) DeepCopy() *SpaceSettingsJupyterServerAppSettingsCodeRepositoryInitParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterServerAppSettingsCodeRepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterServerAppSettingsCodeRepositoryObservation) DeepCopyInto(out *SpaceSettingsJupyterServerAppSettingsCodeRepositoryObservation) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterServerAppSettingsCodeRepositoryObservation. +func (in *SpaceSettingsJupyterServerAppSettingsCodeRepositoryObservation) DeepCopy() *SpaceSettingsJupyterServerAppSettingsCodeRepositoryObservation { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterServerAppSettingsCodeRepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterServerAppSettingsCodeRepositoryParameters) DeepCopyInto(out *SpaceSettingsJupyterServerAppSettingsCodeRepositoryParameters) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterServerAppSettingsCodeRepositoryParameters. +func (in *SpaceSettingsJupyterServerAppSettingsCodeRepositoryParameters) DeepCopy() *SpaceSettingsJupyterServerAppSettingsCodeRepositoryParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterServerAppSettingsCodeRepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters. +func (in *SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters) DeepCopy() *SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecObservation) DeepCopyInto(out *SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecObservation. +func (in *SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecObservation) DeepCopy() *SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecParameters) DeepCopyInto(out *SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecParameters. +func (in *SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecParameters) DeepCopy() *SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterServerAppSettingsInitParameters) DeepCopyInto(out *SpaceSettingsJupyterServerAppSettingsInitParameters) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]SpaceSettingsJupyterServerAppSettingsCodeRepositoryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterServerAppSettingsInitParameters. +func (in *SpaceSettingsJupyterServerAppSettingsInitParameters) DeepCopy() *SpaceSettingsJupyterServerAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterServerAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterServerAppSettingsObservation) DeepCopyInto(out *SpaceSettingsJupyterServerAppSettingsObservation) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]SpaceSettingsJupyterServerAppSettingsCodeRepositoryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterServerAppSettingsObservation. +func (in *SpaceSettingsJupyterServerAppSettingsObservation) DeepCopy() *SpaceSettingsJupyterServerAppSettingsObservation { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterServerAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsJupyterServerAppSettingsParameters) DeepCopyInto(out *SpaceSettingsJupyterServerAppSettingsParameters) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]SpaceSettingsJupyterServerAppSettingsCodeRepositoryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsJupyterServerAppSettingsParameters. +func (in *SpaceSettingsJupyterServerAppSettingsParameters) DeepCopy() *SpaceSettingsJupyterServerAppSettingsParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsJupyterServerAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsKernelGatewayAppSettingsCustomImageInitParameters) DeepCopyInto(out *SpaceSettingsKernelGatewayAppSettingsCustomImageInitParameters) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsKernelGatewayAppSettingsCustomImageInitParameters. +func (in *SpaceSettingsKernelGatewayAppSettingsCustomImageInitParameters) DeepCopy() *SpaceSettingsKernelGatewayAppSettingsCustomImageInitParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsKernelGatewayAppSettingsCustomImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsKernelGatewayAppSettingsCustomImageObservation) DeepCopyInto(out *SpaceSettingsKernelGatewayAppSettingsCustomImageObservation) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsKernelGatewayAppSettingsCustomImageObservation. +func (in *SpaceSettingsKernelGatewayAppSettingsCustomImageObservation) DeepCopy() *SpaceSettingsKernelGatewayAppSettingsCustomImageObservation { + if in == nil { + return nil + } + out := new(SpaceSettingsKernelGatewayAppSettingsCustomImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsKernelGatewayAppSettingsCustomImageParameters) DeepCopyInto(out *SpaceSettingsKernelGatewayAppSettingsCustomImageParameters) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsKernelGatewayAppSettingsCustomImageParameters. +func (in *SpaceSettingsKernelGatewayAppSettingsCustomImageParameters) DeepCopy() *SpaceSettingsKernelGatewayAppSettingsCustomImageParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsKernelGatewayAppSettingsCustomImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters. +func (in *SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters) DeepCopy() *SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation) DeepCopyInto(out *SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation. +func (in *SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation) DeepCopy() *SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters) DeepCopyInto(out *SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters. +func (in *SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters) DeepCopy() *SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsKernelGatewayAppSettingsInitParameters) DeepCopyInto(out *SpaceSettingsKernelGatewayAppSettingsInitParameters) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]SpaceSettingsKernelGatewayAppSettingsCustomImageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsKernelGatewayAppSettingsInitParameters. +func (in *SpaceSettingsKernelGatewayAppSettingsInitParameters) DeepCopy() *SpaceSettingsKernelGatewayAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsKernelGatewayAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsKernelGatewayAppSettingsObservation) DeepCopyInto(out *SpaceSettingsKernelGatewayAppSettingsObservation) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]SpaceSettingsKernelGatewayAppSettingsCustomImageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsKernelGatewayAppSettingsObservation. +func (in *SpaceSettingsKernelGatewayAppSettingsObservation) DeepCopy() *SpaceSettingsKernelGatewayAppSettingsObservation { + if in == nil { + return nil + } + out := new(SpaceSettingsKernelGatewayAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsKernelGatewayAppSettingsParameters) DeepCopyInto(out *SpaceSettingsKernelGatewayAppSettingsParameters) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]SpaceSettingsKernelGatewayAppSettingsCustomImageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsKernelGatewayAppSettingsParameters. +func (in *SpaceSettingsKernelGatewayAppSettingsParameters) DeepCopy() *SpaceSettingsKernelGatewayAppSettingsParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsKernelGatewayAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsObservation) DeepCopyInto(out *SpaceSettingsObservation) { + *out = *in + if in.AppType != nil { + in, out := &in.AppType, &out.AppType + *out = new(string) + **out = **in + } + if in.CodeEditorAppSettings != nil { + in, out := &in.CodeEditorAppSettings, &out.CodeEditorAppSettings + *out = new(SpaceSettingsCodeEditorAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomFileSystem != nil { + in, out := &in.CustomFileSystem, &out.CustomFileSystem + *out = make([]CustomFileSystemObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JupyterLabAppSettings != nil { + in, out := &in.JupyterLabAppSettings, &out.JupyterLabAppSettings + *out = new(SpaceSettingsJupyterLabAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.JupyterServerAppSettings != nil { + in, out := &in.JupyterServerAppSettings, &out.JupyterServerAppSettings + *out = new(SpaceSettingsJupyterServerAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.KernelGatewayAppSettings != nil { + in, out := &in.KernelGatewayAppSettings, &out.KernelGatewayAppSettings + *out = new(SpaceSettingsKernelGatewayAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.SpaceStorageSettings != nil { + in, out := &in.SpaceStorageSettings, &out.SpaceStorageSettings + *out = new(SpaceSettingsSpaceStorageSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsObservation. +func (in *SpaceSettingsObservation) DeepCopy() *SpaceSettingsObservation { + if in == nil { + return nil + } + out := new(SpaceSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsParameters) DeepCopyInto(out *SpaceSettingsParameters) { + *out = *in + if in.AppType != nil { + in, out := &in.AppType, &out.AppType + *out = new(string) + **out = **in + } + if in.CodeEditorAppSettings != nil { + in, out := &in.CodeEditorAppSettings, &out.CodeEditorAppSettings + *out = new(SpaceSettingsCodeEditorAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomFileSystem != nil { + in, out := &in.CustomFileSystem, &out.CustomFileSystem + *out = make([]CustomFileSystemParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JupyterLabAppSettings != nil { + in, out := &in.JupyterLabAppSettings, &out.JupyterLabAppSettings + *out = new(SpaceSettingsJupyterLabAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.JupyterServerAppSettings != nil { + in, out := &in.JupyterServerAppSettings, &out.JupyterServerAppSettings + *out = new(SpaceSettingsJupyterServerAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.KernelGatewayAppSettings != nil { + in, out := &in.KernelGatewayAppSettings, &out.KernelGatewayAppSettings + *out = new(SpaceSettingsKernelGatewayAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.SpaceStorageSettings != nil { + in, out := &in.SpaceStorageSettings, &out.SpaceStorageSettings + *out = new(SpaceSettingsSpaceStorageSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsParameters. +func (in *SpaceSettingsParameters) DeepCopy() *SpaceSettingsParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsSpaceStorageSettingsInitParameters) DeepCopyInto(out *SpaceSettingsSpaceStorageSettingsInitParameters) { + *out = *in + if in.EBSStorageSettings != nil { + in, out := &in.EBSStorageSettings, &out.EBSStorageSettings + *out = new(EBSStorageSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsSpaceStorageSettingsInitParameters. +func (in *SpaceSettingsSpaceStorageSettingsInitParameters) DeepCopy() *SpaceSettingsSpaceStorageSettingsInitParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsSpaceStorageSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsSpaceStorageSettingsObservation) DeepCopyInto(out *SpaceSettingsSpaceStorageSettingsObservation) { + *out = *in + if in.EBSStorageSettings != nil { + in, out := &in.EBSStorageSettings, &out.EBSStorageSettings + *out = new(EBSStorageSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsSpaceStorageSettingsObservation. +func (in *SpaceSettingsSpaceStorageSettingsObservation) DeepCopy() *SpaceSettingsSpaceStorageSettingsObservation { + if in == nil { + return nil + } + out := new(SpaceSettingsSpaceStorageSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSettingsSpaceStorageSettingsParameters) DeepCopyInto(out *SpaceSettingsSpaceStorageSettingsParameters) { + *out = *in + if in.EBSStorageSettings != nil { + in, out := &in.EBSStorageSettings, &out.EBSStorageSettings + *out = new(EBSStorageSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSettingsSpaceStorageSettingsParameters. +func (in *SpaceSettingsSpaceStorageSettingsParameters) DeepCopy() *SpaceSettingsSpaceStorageSettingsParameters { + if in == nil { + return nil + } + out := new(SpaceSettingsSpaceStorageSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSharingSettingsInitParameters) DeepCopyInto(out *SpaceSharingSettingsInitParameters) { + *out = *in + if in.SharingType != nil { + in, out := &in.SharingType, &out.SharingType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSharingSettingsInitParameters. +func (in *SpaceSharingSettingsInitParameters) DeepCopy() *SpaceSharingSettingsInitParameters { + if in == nil { + return nil + } + out := new(SpaceSharingSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSharingSettingsObservation) DeepCopyInto(out *SpaceSharingSettingsObservation) { + *out = *in + if in.SharingType != nil { + in, out := &in.SharingType, &out.SharingType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSharingSettingsObservation. +func (in *SpaceSharingSettingsObservation) DeepCopy() *SpaceSharingSettingsObservation { + if in == nil { + return nil + } + out := new(SpaceSharingSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSharingSettingsParameters) DeepCopyInto(out *SpaceSharingSettingsParameters) { + *out = *in + if in.SharingType != nil { + in, out := &in.SharingType, &out.SharingType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSharingSettingsParameters. +func (in *SpaceSharingSettingsParameters) DeepCopy() *SpaceSharingSettingsParameters { + if in == nil { + return nil + } + out := new(SpaceSharingSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceSpec) DeepCopyInto(out *SpaceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceSpec. +func (in *SpaceSpec) DeepCopy() *SpaceSpec { + if in == nil { + return nil + } + out := new(SpaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceStatus) DeepCopyInto(out *SpaceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceStatus. +func (in *SpaceStatus) DeepCopy() *SpaceStatus { + if in == nil { + return nil + } + out := new(SpaceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceStorageSettingsDefaultEBSStorageSettingsInitParameters) DeepCopyInto(out *SpaceStorageSettingsDefaultEBSStorageSettingsInitParameters) { + *out = *in + if in.DefaultEBSVolumeSizeInGb != nil { + in, out := &in.DefaultEBSVolumeSizeInGb, &out.DefaultEBSVolumeSizeInGb + *out = new(float64) + **out = **in + } + if in.MaximumEBSVolumeSizeInGb != nil { + in, out := &in.MaximumEBSVolumeSizeInGb, &out.MaximumEBSVolumeSizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceStorageSettingsDefaultEBSStorageSettingsInitParameters. +func (in *SpaceStorageSettingsDefaultEBSStorageSettingsInitParameters) DeepCopy() *SpaceStorageSettingsDefaultEBSStorageSettingsInitParameters { + if in == nil { + return nil + } + out := new(SpaceStorageSettingsDefaultEBSStorageSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceStorageSettingsDefaultEBSStorageSettingsObservation) DeepCopyInto(out *SpaceStorageSettingsDefaultEBSStorageSettingsObservation) { + *out = *in + if in.DefaultEBSVolumeSizeInGb != nil { + in, out := &in.DefaultEBSVolumeSizeInGb, &out.DefaultEBSVolumeSizeInGb + *out = new(float64) + **out = **in + } + if in.MaximumEBSVolumeSizeInGb != nil { + in, out := &in.MaximumEBSVolumeSizeInGb, &out.MaximumEBSVolumeSizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceStorageSettingsDefaultEBSStorageSettingsObservation. +func (in *SpaceStorageSettingsDefaultEBSStorageSettingsObservation) DeepCopy() *SpaceStorageSettingsDefaultEBSStorageSettingsObservation { + if in == nil { + return nil + } + out := new(SpaceStorageSettingsDefaultEBSStorageSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceStorageSettingsDefaultEBSStorageSettingsParameters) DeepCopyInto(out *SpaceStorageSettingsDefaultEBSStorageSettingsParameters) { + *out = *in + if in.DefaultEBSVolumeSizeInGb != nil { + in, out := &in.DefaultEBSVolumeSizeInGb, &out.DefaultEBSVolumeSizeInGb + *out = new(float64) + **out = **in + } + if in.MaximumEBSVolumeSizeInGb != nil { + in, out := &in.MaximumEBSVolumeSizeInGb, &out.MaximumEBSVolumeSizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceStorageSettingsDefaultEBSStorageSettingsParameters. +func (in *SpaceStorageSettingsDefaultEBSStorageSettingsParameters) DeepCopy() *SpaceStorageSettingsDefaultEBSStorageSettingsParameters { + if in == nil { + return nil + } + out := new(SpaceStorageSettingsDefaultEBSStorageSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceStorageSettingsInitParameters) DeepCopyInto(out *SpaceStorageSettingsInitParameters) { + *out = *in + if in.DefaultEBSStorageSettings != nil { + in, out := &in.DefaultEBSStorageSettings, &out.DefaultEBSStorageSettings + *out = new(DefaultEBSStorageSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceStorageSettingsInitParameters. +func (in *SpaceStorageSettingsInitParameters) DeepCopy() *SpaceStorageSettingsInitParameters { + if in == nil { + return nil + } + out := new(SpaceStorageSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceStorageSettingsObservation) DeepCopyInto(out *SpaceStorageSettingsObservation) { + *out = *in + if in.DefaultEBSStorageSettings != nil { + in, out := &in.DefaultEBSStorageSettings, &out.DefaultEBSStorageSettings + *out = new(DefaultEBSStorageSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceStorageSettingsObservation. +func (in *SpaceStorageSettingsObservation) DeepCopy() *SpaceStorageSettingsObservation { + if in == nil { + return nil + } + out := new(SpaceStorageSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpaceStorageSettingsParameters) DeepCopyInto(out *SpaceStorageSettingsParameters) { + *out = *in + if in.DefaultEBSStorageSettings != nil { + in, out := &in.DefaultEBSStorageSettings, &out.DefaultEBSStorageSettings + *out = new(DefaultEBSStorageSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpaceStorageSettingsParameters. +func (in *SpaceStorageSettingsParameters) DeepCopy() *SpaceStorageSettingsParameters { + if in == nil { + return nil + } + out := new(SpaceStorageSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TTLDurationInitParameters) DeepCopyInto(out *TTLDurationInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TTLDurationInitParameters. +func (in *TTLDurationInitParameters) DeepCopy() *TTLDurationInitParameters { + if in == nil { + return nil + } + out := new(TTLDurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TTLDurationObservation) DeepCopyInto(out *TTLDurationObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TTLDurationObservation. +func (in *TTLDurationObservation) DeepCopy() *TTLDurationObservation { + if in == nil { + return nil + } + out := new(TTLDurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TTLDurationParameters) DeepCopyInto(out *TTLDurationParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TTLDurationParameters. +func (in *TTLDurationParameters) DeepCopy() *TTLDurationParameters { + if in == nil { + return nil + } + out := new(TTLDurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TensorBoardAppSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *TensorBoardAppSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TensorBoardAppSettingsDefaultResourceSpecInitParameters. +func (in *TensorBoardAppSettingsDefaultResourceSpecInitParameters) DeepCopy() *TensorBoardAppSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(TensorBoardAppSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TensorBoardAppSettingsDefaultResourceSpecObservation) DeepCopyInto(out *TensorBoardAppSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TensorBoardAppSettingsDefaultResourceSpecObservation. +func (in *TensorBoardAppSettingsDefaultResourceSpecObservation) DeepCopy() *TensorBoardAppSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(TensorBoardAppSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TensorBoardAppSettingsDefaultResourceSpecParameters) DeepCopyInto(out *TensorBoardAppSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TensorBoardAppSettingsDefaultResourceSpecParameters. +func (in *TensorBoardAppSettingsDefaultResourceSpecParameters) DeepCopy() *TensorBoardAppSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(TensorBoardAppSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TensorBoardAppSettingsInitParameters) DeepCopyInto(out *TensorBoardAppSettingsInitParameters) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(TensorBoardAppSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TensorBoardAppSettingsInitParameters. +func (in *TensorBoardAppSettingsInitParameters) DeepCopy() *TensorBoardAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(TensorBoardAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TensorBoardAppSettingsObservation) DeepCopyInto(out *TensorBoardAppSettingsObservation) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(TensorBoardAppSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TensorBoardAppSettingsObservation. +func (in *TensorBoardAppSettingsObservation) DeepCopy() *TensorBoardAppSettingsObservation { + if in == nil { + return nil + } + out := new(TensorBoardAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TensorBoardAppSettingsParameters) DeepCopyInto(out *TensorBoardAppSettingsParameters) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(TensorBoardAppSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TensorBoardAppSettingsParameters. +func (in *TensorBoardAppSettingsParameters) DeepCopy() *TensorBoardAppSettingsParameters { + if in == nil { + return nil + } + out := new(TensorBoardAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeSeriesForecastingSettingsInitParameters) DeepCopyInto(out *TimeSeriesForecastingSettingsInitParameters) { + *out = *in + if in.AmazonForecastRoleArn != nil { + in, out := &in.AmazonForecastRoleArn, &out.AmazonForecastRoleArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeSeriesForecastingSettingsInitParameters. +func (in *TimeSeriesForecastingSettingsInitParameters) DeepCopy() *TimeSeriesForecastingSettingsInitParameters { + if in == nil { + return nil + } + out := new(TimeSeriesForecastingSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeSeriesForecastingSettingsObservation) DeepCopyInto(out *TimeSeriesForecastingSettingsObservation) { + *out = *in + if in.AmazonForecastRoleArn != nil { + in, out := &in.AmazonForecastRoleArn, &out.AmazonForecastRoleArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeSeriesForecastingSettingsObservation. +func (in *TimeSeriesForecastingSettingsObservation) DeepCopy() *TimeSeriesForecastingSettingsObservation { + if in == nil { + return nil + } + out := new(TimeSeriesForecastingSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeSeriesForecastingSettingsParameters) DeepCopyInto(out *TimeSeriesForecastingSettingsParameters) { + *out = *in + if in.AmazonForecastRoleArn != nil { + in, out := &in.AmazonForecastRoleArn, &out.AmazonForecastRoleArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeSeriesForecastingSettingsParameters. +func (in *TimeSeriesForecastingSettingsParameters) DeepCopy() *TimeSeriesForecastingSettingsParameters { + if in == nil { + return nil + } + out := new(TimeSeriesForecastingSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficRoutingConfigurationInitParameters) DeepCopyInto(out *TrafficRoutingConfigurationInitParameters) { + *out = *in + if in.CanarySize != nil { + in, out := &in.CanarySize, &out.CanarySize + *out = new(CanarySizeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LinearStepSize != nil { + in, out := &in.LinearStepSize, &out.LinearStepSize + *out = new(LinearStepSizeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.WaitIntervalInSeconds != nil { + in, out := &in.WaitIntervalInSeconds, &out.WaitIntervalInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficRoutingConfigurationInitParameters. +func (in *TrafficRoutingConfigurationInitParameters) DeepCopy() *TrafficRoutingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(TrafficRoutingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficRoutingConfigurationObservation) DeepCopyInto(out *TrafficRoutingConfigurationObservation) { + *out = *in + if in.CanarySize != nil { + in, out := &in.CanarySize, &out.CanarySize + *out = new(CanarySizeObservation) + (*in).DeepCopyInto(*out) + } + if in.LinearStepSize != nil { + in, out := &in.LinearStepSize, &out.LinearStepSize + *out = new(LinearStepSizeObservation) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.WaitIntervalInSeconds != nil { + in, out := &in.WaitIntervalInSeconds, &out.WaitIntervalInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficRoutingConfigurationObservation. +func (in *TrafficRoutingConfigurationObservation) DeepCopy() *TrafficRoutingConfigurationObservation { + if in == nil { + return nil + } + out := new(TrafficRoutingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficRoutingConfigurationParameters) DeepCopyInto(out *TrafficRoutingConfigurationParameters) { + *out = *in + if in.CanarySize != nil { + in, out := &in.CanarySize, &out.CanarySize + *out = new(CanarySizeParameters) + (*in).DeepCopyInto(*out) + } + if in.LinearStepSize != nil { + in, out := &in.LinearStepSize, &out.LinearStepSize + *out = new(LinearStepSizeParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.WaitIntervalInSeconds != nil { + in, out := &in.WaitIntervalInSeconds, &out.WaitIntervalInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficRoutingConfigurationParameters. +func (in *TrafficRoutingConfigurationParameters) DeepCopy() *TrafficRoutingConfigurationParameters { + if in == nil { + return nil + } + out := new(TrafficRoutingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserProfile) DeepCopyInto(out *UserProfile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserProfile. +func (in *UserProfile) DeepCopy() *UserProfile { + if in == nil { + return nil + } + out := new(UserProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserProfileInitParameters) DeepCopyInto(out *UserProfileInitParameters) { + *out = *in + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.DomainIDRef != nil { + in, out := &in.DomainIDRef, &out.DomainIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DomainIDSelector != nil { + in, out := &in.DomainIDSelector, &out.DomainIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SingleSignOnUserIdentifier != nil { + in, out := &in.SingleSignOnUserIdentifier, &out.SingleSignOnUserIdentifier + *out = new(string) + **out = **in + } + if in.SingleSignOnUserValue != nil { + in, out := &in.SingleSignOnUserValue, &out.SingleSignOnUserValue + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserProfileName != nil { + in, out := &in.UserProfileName, &out.UserProfileName + *out = new(string) + **out = **in + } + if in.UserSettings != nil { + in, out := &in.UserSettings, &out.UserSettings + *out = new(UserSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserProfileInitParameters. +func (in *UserProfileInitParameters) DeepCopy() *UserProfileInitParameters { + if in == nil { + return nil + } + out := new(UserProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserProfileList) DeepCopyInto(out *UserProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]UserProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserProfileList. +func (in *UserProfileList) DeepCopy() *UserProfileList { + if in == nil { + return nil + } + out := new(UserProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserProfileObservation) DeepCopyInto(out *UserProfileObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.HomeEFSFileSystemUID != nil { + in, out := &in.HomeEFSFileSystemUID, &out.HomeEFSFileSystemUID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.SingleSignOnUserIdentifier != nil { + in, out := &in.SingleSignOnUserIdentifier, &out.SingleSignOnUserIdentifier + *out = new(string) + **out = **in + } + if in.SingleSignOnUserValue != nil { + in, out := &in.SingleSignOnUserValue, &out.SingleSignOnUserValue + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserProfileName != nil { + in, out := &in.UserProfileName, &out.UserProfileName + *out = new(string) + **out = **in + } + if in.UserSettings != nil { + in, out := &in.UserSettings, &out.UserSettings + *out = new(UserSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserProfileObservation. +func (in *UserProfileObservation) DeepCopy() *UserProfileObservation { + if in == nil { + return nil + } + out := new(UserProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserProfileParameters) DeepCopyInto(out *UserProfileParameters) { + *out = *in + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.DomainIDRef != nil { + in, out := &in.DomainIDRef, &out.DomainIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DomainIDSelector != nil { + in, out := &in.DomainIDSelector, &out.DomainIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SingleSignOnUserIdentifier != nil { + in, out := &in.SingleSignOnUserIdentifier, &out.SingleSignOnUserIdentifier + *out = new(string) + **out = **in + } + if in.SingleSignOnUserValue != nil { + in, out := &in.SingleSignOnUserValue, &out.SingleSignOnUserValue + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserProfileName != nil { + in, out := &in.UserProfileName, &out.UserProfileName + *out = new(string) + **out = **in + } + if in.UserSettings != nil { + in, out := &in.UserSettings, &out.UserSettings + *out = new(UserSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserProfileParameters. +func (in *UserProfileParameters) DeepCopy() *UserProfileParameters { + if in == nil { + return nil + } + out := new(UserProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserProfileSpec) DeepCopyInto(out *UserProfileSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserProfileSpec. +func (in *UserProfileSpec) DeepCopy() *UserProfileSpec { + if in == nil { + return nil + } + out := new(UserProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserProfileStatus) DeepCopyInto(out *UserProfileStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserProfileStatus. +func (in *UserProfileStatus) DeepCopy() *UserProfileStatus { + if in == nil { + return nil + } + out := new(UserProfileStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsCanvasAppSettingsInitParameters) DeepCopyInto(out *UserSettingsCanvasAppSettingsInitParameters) { + *out = *in + if in.DirectDeploySettings != nil { + in, out := &in.DirectDeploySettings, &out.DirectDeploySettings + *out = new(CanvasAppSettingsDirectDeploySettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IdentityProviderOauthSettings != nil { + in, out := &in.IdentityProviderOauthSettings, &out.IdentityProviderOauthSettings + *out = make([]CanvasAppSettingsIdentityProviderOauthSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KendraSettings != nil { + in, out := &in.KendraSettings, &out.KendraSettings + *out = new(CanvasAppSettingsKendraSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ModelRegisterSettings != nil { + in, out := &in.ModelRegisterSettings, &out.ModelRegisterSettings + *out = new(CanvasAppSettingsModelRegisterSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TimeSeriesForecastingSettings != nil { + in, out := &in.TimeSeriesForecastingSettings, &out.TimeSeriesForecastingSettings + *out = new(CanvasAppSettingsTimeSeriesForecastingSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceSettings != nil { + in, out := &in.WorkspaceSettings, &out.WorkspaceSettings + *out = new(CanvasAppSettingsWorkspaceSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsCanvasAppSettingsInitParameters. +func (in *UserSettingsCanvasAppSettingsInitParameters) DeepCopy() *UserSettingsCanvasAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsCanvasAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsCanvasAppSettingsObservation) DeepCopyInto(out *UserSettingsCanvasAppSettingsObservation) { + *out = *in + if in.DirectDeploySettings != nil { + in, out := &in.DirectDeploySettings, &out.DirectDeploySettings + *out = new(CanvasAppSettingsDirectDeploySettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.IdentityProviderOauthSettings != nil { + in, out := &in.IdentityProviderOauthSettings, &out.IdentityProviderOauthSettings + *out = make([]CanvasAppSettingsIdentityProviderOauthSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KendraSettings != nil { + in, out := &in.KendraSettings, &out.KendraSettings + *out = new(CanvasAppSettingsKendraSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ModelRegisterSettings != nil { + in, out := &in.ModelRegisterSettings, &out.ModelRegisterSettings + *out = new(CanvasAppSettingsModelRegisterSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.TimeSeriesForecastingSettings != nil { + in, out := &in.TimeSeriesForecastingSettings, &out.TimeSeriesForecastingSettings + *out = new(CanvasAppSettingsTimeSeriesForecastingSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceSettings != nil { + in, out := &in.WorkspaceSettings, &out.WorkspaceSettings + *out = new(CanvasAppSettingsWorkspaceSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsCanvasAppSettingsObservation. +func (in *UserSettingsCanvasAppSettingsObservation) DeepCopy() *UserSettingsCanvasAppSettingsObservation { + if in == nil { + return nil + } + out := new(UserSettingsCanvasAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsCanvasAppSettingsParameters) DeepCopyInto(out *UserSettingsCanvasAppSettingsParameters) { + *out = *in + if in.DirectDeploySettings != nil { + in, out := &in.DirectDeploySettings, &out.DirectDeploySettings + *out = new(CanvasAppSettingsDirectDeploySettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.IdentityProviderOauthSettings != nil { + in, out := &in.IdentityProviderOauthSettings, &out.IdentityProviderOauthSettings + *out = make([]CanvasAppSettingsIdentityProviderOauthSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KendraSettings != nil { + in, out := &in.KendraSettings, &out.KendraSettings + *out = new(CanvasAppSettingsKendraSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.ModelRegisterSettings != nil { + in, out := &in.ModelRegisterSettings, &out.ModelRegisterSettings + *out = new(CanvasAppSettingsModelRegisterSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.TimeSeriesForecastingSettings != nil { + in, out := &in.TimeSeriesForecastingSettings, &out.TimeSeriesForecastingSettings + *out = new(CanvasAppSettingsTimeSeriesForecastingSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceSettings != nil { + in, out := &in.WorkspaceSettings, &out.WorkspaceSettings + *out = new(CanvasAppSettingsWorkspaceSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsCanvasAppSettingsParameters. +func (in *UserSettingsCanvasAppSettingsParameters) DeepCopy() *UserSettingsCanvasAppSettingsParameters { + if in == nil { + return nil + } + out := new(UserSettingsCanvasAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *UserSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters. +func (in *UserSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters) DeepCopy() *UserSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsCodeEditorAppSettingsDefaultResourceSpecObservation) DeepCopyInto(out *UserSettingsCodeEditorAppSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsCodeEditorAppSettingsDefaultResourceSpecObservation. +func (in *UserSettingsCodeEditorAppSettingsDefaultResourceSpecObservation) DeepCopy() *UserSettingsCodeEditorAppSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(UserSettingsCodeEditorAppSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsCodeEditorAppSettingsDefaultResourceSpecParameters) DeepCopyInto(out *UserSettingsCodeEditorAppSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsCodeEditorAppSettingsDefaultResourceSpecParameters. +func (in *UserSettingsCodeEditorAppSettingsDefaultResourceSpecParameters) DeepCopy() *UserSettingsCodeEditorAppSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(UserSettingsCodeEditorAppSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsCodeEditorAppSettingsInitParameters) DeepCopyInto(out *UserSettingsCodeEditorAppSettingsInitParameters) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsCodeEditorAppSettingsInitParameters. +func (in *UserSettingsCodeEditorAppSettingsInitParameters) DeepCopy() *UserSettingsCodeEditorAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsCodeEditorAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsCodeEditorAppSettingsObservation) DeepCopyInto(out *UserSettingsCodeEditorAppSettingsObservation) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsCodeEditorAppSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsCodeEditorAppSettingsObservation. +func (in *UserSettingsCodeEditorAppSettingsObservation) DeepCopy() *UserSettingsCodeEditorAppSettingsObservation { + if in == nil { + return nil + } + out := new(UserSettingsCodeEditorAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsCodeEditorAppSettingsParameters) DeepCopyInto(out *UserSettingsCodeEditorAppSettingsParameters) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsCodeEditorAppSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsCodeEditorAppSettingsParameters. +func (in *UserSettingsCodeEditorAppSettingsParameters) DeepCopy() *UserSettingsCodeEditorAppSettingsParameters { + if in == nil { + return nil + } + out := new(UserSettingsCodeEditorAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsCustomFileSystemConfigInitParameters) DeepCopyInto(out *UserSettingsCustomFileSystemConfigInitParameters) { + *out = *in + if in.EFSFileSystemConfig != nil { + in, out := &in.EFSFileSystemConfig, &out.EFSFileSystemConfig + *out = make([]CustomFileSystemConfigEFSFileSystemConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsCustomFileSystemConfigInitParameters. +func (in *UserSettingsCustomFileSystemConfigInitParameters) DeepCopy() *UserSettingsCustomFileSystemConfigInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsCustomFileSystemConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsCustomFileSystemConfigObservation) DeepCopyInto(out *UserSettingsCustomFileSystemConfigObservation) { + *out = *in + if in.EFSFileSystemConfig != nil { + in, out := &in.EFSFileSystemConfig, &out.EFSFileSystemConfig + *out = make([]CustomFileSystemConfigEFSFileSystemConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsCustomFileSystemConfigObservation. +func (in *UserSettingsCustomFileSystemConfigObservation) DeepCopy() *UserSettingsCustomFileSystemConfigObservation { + if in == nil { + return nil + } + out := new(UserSettingsCustomFileSystemConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsCustomFileSystemConfigParameters) DeepCopyInto(out *UserSettingsCustomFileSystemConfigParameters) { + *out = *in + if in.EFSFileSystemConfig != nil { + in, out := &in.EFSFileSystemConfig, &out.EFSFileSystemConfig + *out = make([]CustomFileSystemConfigEFSFileSystemConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsCustomFileSystemConfigParameters. +func (in *UserSettingsCustomFileSystemConfigParameters) DeepCopy() *UserSettingsCustomFileSystemConfigParameters { + if in == nil { + return nil + } + out := new(UserSettingsCustomFileSystemConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsCustomPosixUserConfigInitParameters) DeepCopyInto(out *UserSettingsCustomPosixUserConfigInitParameters) { + *out = *in + if in.GID != nil { + in, out := &in.GID, &out.GID + *out = new(float64) + **out = **in + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsCustomPosixUserConfigInitParameters. +func (in *UserSettingsCustomPosixUserConfigInitParameters) DeepCopy() *UserSettingsCustomPosixUserConfigInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsCustomPosixUserConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsCustomPosixUserConfigObservation) DeepCopyInto(out *UserSettingsCustomPosixUserConfigObservation) { + *out = *in + if in.GID != nil { + in, out := &in.GID, &out.GID + *out = new(float64) + **out = **in + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsCustomPosixUserConfigObservation. +func (in *UserSettingsCustomPosixUserConfigObservation) DeepCopy() *UserSettingsCustomPosixUserConfigObservation { + if in == nil { + return nil + } + out := new(UserSettingsCustomPosixUserConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsCustomPosixUserConfigParameters) DeepCopyInto(out *UserSettingsCustomPosixUserConfigParameters) { + *out = *in + if in.GID != nil { + in, out := &in.GID, &out.GID + *out = new(float64) + **out = **in + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsCustomPosixUserConfigParameters. +func (in *UserSettingsCustomPosixUserConfigParameters) DeepCopy() *UserSettingsCustomPosixUserConfigParameters { + if in == nil { + return nil + } + out := new(UserSettingsCustomPosixUserConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsInitParameters) DeepCopyInto(out *UserSettingsInitParameters) { + *out = *in + if in.CanvasAppSettings != nil { + in, out := &in.CanvasAppSettings, &out.CanvasAppSettings + *out = new(UserSettingsCanvasAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CodeEditorAppSettings != nil { + in, out := &in.CodeEditorAppSettings, &out.CodeEditorAppSettings + *out = new(UserSettingsCodeEditorAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomFileSystemConfig != nil { + in, out := &in.CustomFileSystemConfig, &out.CustomFileSystemConfig + *out = make([]UserSettingsCustomFileSystemConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomPosixUserConfig != nil { + in, out := &in.CustomPosixUserConfig, &out.CustomPosixUserConfig + *out = new(UserSettingsCustomPosixUserConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultLandingURI != nil { + in, out := &in.DefaultLandingURI, &out.DefaultLandingURI + *out = new(string) + **out = **in + } + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.JupyterLabAppSettings != nil { + in, out := &in.JupyterLabAppSettings, &out.JupyterLabAppSettings + *out = new(UserSettingsJupyterLabAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.JupyterServerAppSettings != nil { + in, out := &in.JupyterServerAppSettings, &out.JupyterServerAppSettings + *out = new(UserSettingsJupyterServerAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KernelGatewayAppSettings != nil { + in, out := &in.KernelGatewayAppSettings, &out.KernelGatewayAppSettings + *out = new(UserSettingsKernelGatewayAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RSessionAppSettings != nil { + in, out := &in.RSessionAppSettings, &out.RSessionAppSettings + *out = new(UserSettingsRSessionAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RStudioServerProAppSettings != nil { + in, out := &in.RStudioServerProAppSettings, &out.RStudioServerProAppSettings + *out = new(UserSettingsRStudioServerProAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SharingSettings != nil { + in, out := &in.SharingSettings, &out.SharingSettings + *out = new(UserSettingsSharingSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SpaceStorageSettings != nil { + in, out := &in.SpaceStorageSettings, &out.SpaceStorageSettings + *out = new(UserSettingsSpaceStorageSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StudioWebPortal != nil { + in, out := &in.StudioWebPortal, &out.StudioWebPortal + *out = new(string) + **out = **in + } + if in.TensorBoardAppSettings != nil { + in, out := &in.TensorBoardAppSettings, &out.TensorBoardAppSettings + *out = new(UserSettingsTensorBoardAppSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsInitParameters. +func (in *UserSettingsInitParameters) DeepCopy() *UserSettingsInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterLabAppSettingsCodeRepositoryInitParameters) DeepCopyInto(out *UserSettingsJupyterLabAppSettingsCodeRepositoryInitParameters) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterLabAppSettingsCodeRepositoryInitParameters. +func (in *UserSettingsJupyterLabAppSettingsCodeRepositoryInitParameters) DeepCopy() *UserSettingsJupyterLabAppSettingsCodeRepositoryInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsJupyterLabAppSettingsCodeRepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterLabAppSettingsCodeRepositoryObservation) DeepCopyInto(out *UserSettingsJupyterLabAppSettingsCodeRepositoryObservation) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterLabAppSettingsCodeRepositoryObservation. +func (in *UserSettingsJupyterLabAppSettingsCodeRepositoryObservation) DeepCopy() *UserSettingsJupyterLabAppSettingsCodeRepositoryObservation { + if in == nil { + return nil + } + out := new(UserSettingsJupyterLabAppSettingsCodeRepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterLabAppSettingsCodeRepositoryParameters) DeepCopyInto(out *UserSettingsJupyterLabAppSettingsCodeRepositoryParameters) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterLabAppSettingsCodeRepositoryParameters. +func (in *UserSettingsJupyterLabAppSettingsCodeRepositoryParameters) DeepCopy() *UserSettingsJupyterLabAppSettingsCodeRepositoryParameters { + if in == nil { + return nil + } + out := new(UserSettingsJupyterLabAppSettingsCodeRepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterLabAppSettingsCustomImageInitParameters) DeepCopyInto(out *UserSettingsJupyterLabAppSettingsCustomImageInitParameters) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterLabAppSettingsCustomImageInitParameters. +func (in *UserSettingsJupyterLabAppSettingsCustomImageInitParameters) DeepCopy() *UserSettingsJupyterLabAppSettingsCustomImageInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsJupyterLabAppSettingsCustomImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterLabAppSettingsCustomImageObservation) DeepCopyInto(out *UserSettingsJupyterLabAppSettingsCustomImageObservation) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterLabAppSettingsCustomImageObservation. +func (in *UserSettingsJupyterLabAppSettingsCustomImageObservation) DeepCopy() *UserSettingsJupyterLabAppSettingsCustomImageObservation { + if in == nil { + return nil + } + out := new(UserSettingsJupyterLabAppSettingsCustomImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterLabAppSettingsCustomImageParameters) DeepCopyInto(out *UserSettingsJupyterLabAppSettingsCustomImageParameters) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterLabAppSettingsCustomImageParameters. +func (in *UserSettingsJupyterLabAppSettingsCustomImageParameters) DeepCopy() *UserSettingsJupyterLabAppSettingsCustomImageParameters { + if in == nil { + return nil + } + out := new(UserSettingsJupyterLabAppSettingsCustomImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *UserSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters. +func (in *UserSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters) DeepCopy() *UserSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterLabAppSettingsDefaultResourceSpecObservation) DeepCopyInto(out *UserSettingsJupyterLabAppSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterLabAppSettingsDefaultResourceSpecObservation. +func (in *UserSettingsJupyterLabAppSettingsDefaultResourceSpecObservation) DeepCopy() *UserSettingsJupyterLabAppSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(UserSettingsJupyterLabAppSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterLabAppSettingsDefaultResourceSpecParameters) DeepCopyInto(out *UserSettingsJupyterLabAppSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterLabAppSettingsDefaultResourceSpecParameters. +func (in *UserSettingsJupyterLabAppSettingsDefaultResourceSpecParameters) DeepCopy() *UserSettingsJupyterLabAppSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(UserSettingsJupyterLabAppSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterLabAppSettingsInitParameters) DeepCopyInto(out *UserSettingsJupyterLabAppSettingsInitParameters) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]UserSettingsJupyterLabAppSettingsCodeRepositoryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]UserSettingsJupyterLabAppSettingsCustomImageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterLabAppSettingsInitParameters. +func (in *UserSettingsJupyterLabAppSettingsInitParameters) DeepCopy() *UserSettingsJupyterLabAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsJupyterLabAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterLabAppSettingsObservation) DeepCopyInto(out *UserSettingsJupyterLabAppSettingsObservation) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]UserSettingsJupyterLabAppSettingsCodeRepositoryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]UserSettingsJupyterLabAppSettingsCustomImageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsJupyterLabAppSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterLabAppSettingsObservation. +func (in *UserSettingsJupyterLabAppSettingsObservation) DeepCopy() *UserSettingsJupyterLabAppSettingsObservation { + if in == nil { + return nil + } + out := new(UserSettingsJupyterLabAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterLabAppSettingsParameters) DeepCopyInto(out *UserSettingsJupyterLabAppSettingsParameters) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]UserSettingsJupyterLabAppSettingsCodeRepositoryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]UserSettingsJupyterLabAppSettingsCustomImageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsJupyterLabAppSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterLabAppSettingsParameters. +func (in *UserSettingsJupyterLabAppSettingsParameters) DeepCopy() *UserSettingsJupyterLabAppSettingsParameters { + if in == nil { + return nil + } + out := new(UserSettingsJupyterLabAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters) DeepCopyInto(out *UserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters. +func (in *UserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters) DeepCopy() *UserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterServerAppSettingsCodeRepositoryObservation) DeepCopyInto(out *UserSettingsJupyterServerAppSettingsCodeRepositoryObservation) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterServerAppSettingsCodeRepositoryObservation. +func (in *UserSettingsJupyterServerAppSettingsCodeRepositoryObservation) DeepCopy() *UserSettingsJupyterServerAppSettingsCodeRepositoryObservation { + if in == nil { + return nil + } + out := new(UserSettingsJupyterServerAppSettingsCodeRepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterServerAppSettingsCodeRepositoryParameters) DeepCopyInto(out *UserSettingsJupyterServerAppSettingsCodeRepositoryParameters) { + *out = *in + if in.RepositoryURL != nil { + in, out := &in.RepositoryURL, &out.RepositoryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterServerAppSettingsCodeRepositoryParameters. +func (in *UserSettingsJupyterServerAppSettingsCodeRepositoryParameters) DeepCopy() *UserSettingsJupyterServerAppSettingsCodeRepositoryParameters { + if in == nil { + return nil + } + out := new(UserSettingsJupyterServerAppSettingsCodeRepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *UserSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters. +func (in *UserSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters) DeepCopy() *UserSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterServerAppSettingsDefaultResourceSpecObservation) DeepCopyInto(out *UserSettingsJupyterServerAppSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterServerAppSettingsDefaultResourceSpecObservation. +func (in *UserSettingsJupyterServerAppSettingsDefaultResourceSpecObservation) DeepCopy() *UserSettingsJupyterServerAppSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(UserSettingsJupyterServerAppSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterServerAppSettingsDefaultResourceSpecParameters) DeepCopyInto(out *UserSettingsJupyterServerAppSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterServerAppSettingsDefaultResourceSpecParameters. +func (in *UserSettingsJupyterServerAppSettingsDefaultResourceSpecParameters) DeepCopy() *UserSettingsJupyterServerAppSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(UserSettingsJupyterServerAppSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterServerAppSettingsInitParameters) DeepCopyInto(out *UserSettingsJupyterServerAppSettingsInitParameters) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]UserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterServerAppSettingsInitParameters. +func (in *UserSettingsJupyterServerAppSettingsInitParameters) DeepCopy() *UserSettingsJupyterServerAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsJupyterServerAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterServerAppSettingsObservation) DeepCopyInto(out *UserSettingsJupyterServerAppSettingsObservation) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]UserSettingsJupyterServerAppSettingsCodeRepositoryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsJupyterServerAppSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterServerAppSettingsObservation. +func (in *UserSettingsJupyterServerAppSettingsObservation) DeepCopy() *UserSettingsJupyterServerAppSettingsObservation { + if in == nil { + return nil + } + out := new(UserSettingsJupyterServerAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsJupyterServerAppSettingsParameters) DeepCopyInto(out *UserSettingsJupyterServerAppSettingsParameters) { + *out = *in + if in.CodeRepository != nil { + in, out := &in.CodeRepository, &out.CodeRepository + *out = make([]UserSettingsJupyterServerAppSettingsCodeRepositoryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsJupyterServerAppSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsJupyterServerAppSettingsParameters. +func (in *UserSettingsJupyterServerAppSettingsParameters) DeepCopy() *UserSettingsJupyterServerAppSettingsParameters { + if in == nil { + return nil + } + out := new(UserSettingsJupyterServerAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsKernelGatewayAppSettingsCustomImageInitParameters) DeepCopyInto(out *UserSettingsKernelGatewayAppSettingsCustomImageInitParameters) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsKernelGatewayAppSettingsCustomImageInitParameters. +func (in *UserSettingsKernelGatewayAppSettingsCustomImageInitParameters) DeepCopy() *UserSettingsKernelGatewayAppSettingsCustomImageInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsKernelGatewayAppSettingsCustomImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsKernelGatewayAppSettingsCustomImageObservation) DeepCopyInto(out *UserSettingsKernelGatewayAppSettingsCustomImageObservation) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsKernelGatewayAppSettingsCustomImageObservation. +func (in *UserSettingsKernelGatewayAppSettingsCustomImageObservation) DeepCopy() *UserSettingsKernelGatewayAppSettingsCustomImageObservation { + if in == nil { + return nil + } + out := new(UserSettingsKernelGatewayAppSettingsCustomImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsKernelGatewayAppSettingsCustomImageParameters) DeepCopyInto(out *UserSettingsKernelGatewayAppSettingsCustomImageParameters) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsKernelGatewayAppSettingsCustomImageParameters. +func (in *UserSettingsKernelGatewayAppSettingsCustomImageParameters) DeepCopy() *UserSettingsKernelGatewayAppSettingsCustomImageParameters { + if in == nil { + return nil + } + out := new(UserSettingsKernelGatewayAppSettingsCustomImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *UserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters. +func (in *UserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters) DeepCopy() *UserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation) DeepCopyInto(out *UserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation. +func (in *UserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation) DeepCopy() *UserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(UserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters) DeepCopyInto(out *UserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters. +func (in *UserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters) DeepCopy() *UserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(UserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsKernelGatewayAppSettingsInitParameters) DeepCopyInto(out *UserSettingsKernelGatewayAppSettingsInitParameters) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]UserSettingsKernelGatewayAppSettingsCustomImageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsKernelGatewayAppSettingsInitParameters. +func (in *UserSettingsKernelGatewayAppSettingsInitParameters) DeepCopy() *UserSettingsKernelGatewayAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsKernelGatewayAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsKernelGatewayAppSettingsObservation) DeepCopyInto(out *UserSettingsKernelGatewayAppSettingsObservation) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]UserSettingsKernelGatewayAppSettingsCustomImageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsKernelGatewayAppSettingsObservation. +func (in *UserSettingsKernelGatewayAppSettingsObservation) DeepCopy() *UserSettingsKernelGatewayAppSettingsObservation { + if in == nil { + return nil + } + out := new(UserSettingsKernelGatewayAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsKernelGatewayAppSettingsParameters) DeepCopyInto(out *UserSettingsKernelGatewayAppSettingsParameters) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]UserSettingsKernelGatewayAppSettingsCustomImageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } + if in.LifecycleConfigArns != nil { + in, out := &in.LifecycleConfigArns, &out.LifecycleConfigArns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsKernelGatewayAppSettingsParameters. +func (in *UserSettingsKernelGatewayAppSettingsParameters) DeepCopy() *UserSettingsKernelGatewayAppSettingsParameters { + if in == nil { + return nil + } + out := new(UserSettingsKernelGatewayAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsObservation) DeepCopyInto(out *UserSettingsObservation) { + *out = *in + if in.CanvasAppSettings != nil { + in, out := &in.CanvasAppSettings, &out.CanvasAppSettings + *out = new(UserSettingsCanvasAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.CodeEditorAppSettings != nil { + in, out := &in.CodeEditorAppSettings, &out.CodeEditorAppSettings + *out = new(UserSettingsCodeEditorAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomFileSystemConfig != nil { + in, out := &in.CustomFileSystemConfig, &out.CustomFileSystemConfig + *out = make([]UserSettingsCustomFileSystemConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomPosixUserConfig != nil { + in, out := &in.CustomPosixUserConfig, &out.CustomPosixUserConfig + *out = new(UserSettingsCustomPosixUserConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultLandingURI != nil { + in, out := &in.DefaultLandingURI, &out.DefaultLandingURI + *out = new(string) + **out = **in + } + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.JupyterLabAppSettings != nil { + in, out := &in.JupyterLabAppSettings, &out.JupyterLabAppSettings + *out = new(UserSettingsJupyterLabAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.JupyterServerAppSettings != nil { + in, out := &in.JupyterServerAppSettings, &out.JupyterServerAppSettings + *out = new(UserSettingsJupyterServerAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.KernelGatewayAppSettings != nil { + in, out := &in.KernelGatewayAppSettings, &out.KernelGatewayAppSettings + *out = new(UserSettingsKernelGatewayAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.RSessionAppSettings != nil { + in, out := &in.RSessionAppSettings, &out.RSessionAppSettings + *out = new(UserSettingsRSessionAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.RStudioServerProAppSettings != nil { + in, out := &in.RStudioServerProAppSettings, &out.RStudioServerProAppSettings + *out = new(UserSettingsRStudioServerProAppSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SharingSettings != nil { + in, out := &in.SharingSettings, &out.SharingSettings + *out = new(UserSettingsSharingSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.SpaceStorageSettings != nil { + in, out := &in.SpaceStorageSettings, &out.SpaceStorageSettings + *out = new(UserSettingsSpaceStorageSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.StudioWebPortal != nil { + in, out := &in.StudioWebPortal, &out.StudioWebPortal + *out = new(string) + **out = **in + } + if in.TensorBoardAppSettings != nil { + in, out := &in.TensorBoardAppSettings, &out.TensorBoardAppSettings + *out = new(UserSettingsTensorBoardAppSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsObservation. +func (in *UserSettingsObservation) DeepCopy() *UserSettingsObservation { + if in == nil { + return nil + } + out := new(UserSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsParameters) DeepCopyInto(out *UserSettingsParameters) { + *out = *in + if in.CanvasAppSettings != nil { + in, out := &in.CanvasAppSettings, &out.CanvasAppSettings + *out = new(UserSettingsCanvasAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.CodeEditorAppSettings != nil { + in, out := &in.CodeEditorAppSettings, &out.CodeEditorAppSettings + *out = new(UserSettingsCodeEditorAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomFileSystemConfig != nil { + in, out := &in.CustomFileSystemConfig, &out.CustomFileSystemConfig + *out = make([]UserSettingsCustomFileSystemConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomPosixUserConfig != nil { + in, out := &in.CustomPosixUserConfig, &out.CustomPosixUserConfig + *out = new(UserSettingsCustomPosixUserConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultLandingURI != nil { + in, out := &in.DefaultLandingURI, &out.DefaultLandingURI + *out = new(string) + **out = **in + } + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.JupyterLabAppSettings != nil { + in, out := &in.JupyterLabAppSettings, &out.JupyterLabAppSettings + *out = new(UserSettingsJupyterLabAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.JupyterServerAppSettings != nil { + in, out := &in.JupyterServerAppSettings, &out.JupyterServerAppSettings + *out = new(UserSettingsJupyterServerAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.KernelGatewayAppSettings != nil { + in, out := &in.KernelGatewayAppSettings, &out.KernelGatewayAppSettings + *out = new(UserSettingsKernelGatewayAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.RSessionAppSettings != nil { + in, out := &in.RSessionAppSettings, &out.RSessionAppSettings + *out = new(UserSettingsRSessionAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.RStudioServerProAppSettings != nil { + in, out := &in.RStudioServerProAppSettings, &out.RStudioServerProAppSettings + *out = new(UserSettingsRStudioServerProAppSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SharingSettings != nil { + in, out := &in.SharingSettings, &out.SharingSettings + *out = new(UserSettingsSharingSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.SpaceStorageSettings != nil { + in, out := &in.SpaceStorageSettings, &out.SpaceStorageSettings + *out = new(UserSettingsSpaceStorageSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.StudioWebPortal != nil { + in, out := &in.StudioWebPortal, &out.StudioWebPortal + *out = new(string) + **out = **in + } + if in.TensorBoardAppSettings != nil { + in, out := &in.TensorBoardAppSettings, &out.TensorBoardAppSettings + *out = new(UserSettingsTensorBoardAppSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsParameters. +func (in *UserSettingsParameters) DeepCopy() *UserSettingsParameters { + if in == nil { + return nil + } + out := new(UserSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsRSessionAppSettingsCustomImageInitParameters) DeepCopyInto(out *UserSettingsRSessionAppSettingsCustomImageInitParameters) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsRSessionAppSettingsCustomImageInitParameters. +func (in *UserSettingsRSessionAppSettingsCustomImageInitParameters) DeepCopy() *UserSettingsRSessionAppSettingsCustomImageInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsRSessionAppSettingsCustomImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsRSessionAppSettingsCustomImageObservation) DeepCopyInto(out *UserSettingsRSessionAppSettingsCustomImageObservation) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsRSessionAppSettingsCustomImageObservation. +func (in *UserSettingsRSessionAppSettingsCustomImageObservation) DeepCopy() *UserSettingsRSessionAppSettingsCustomImageObservation { + if in == nil { + return nil + } + out := new(UserSettingsRSessionAppSettingsCustomImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsRSessionAppSettingsCustomImageParameters) DeepCopyInto(out *UserSettingsRSessionAppSettingsCustomImageParameters) { + *out = *in + if in.AppImageConfigName != nil { + in, out := &in.AppImageConfigName, &out.AppImageConfigName + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageVersionNumber != nil { + in, out := &in.ImageVersionNumber, &out.ImageVersionNumber + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsRSessionAppSettingsCustomImageParameters. +func (in *UserSettingsRSessionAppSettingsCustomImageParameters) DeepCopy() *UserSettingsRSessionAppSettingsCustomImageParameters { + if in == nil { + return nil + } + out := new(UserSettingsRSessionAppSettingsCustomImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsRSessionAppSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *UserSettingsRSessionAppSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsRSessionAppSettingsDefaultResourceSpecInitParameters. +func (in *UserSettingsRSessionAppSettingsDefaultResourceSpecInitParameters) DeepCopy() *UserSettingsRSessionAppSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsRSessionAppSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsRSessionAppSettingsDefaultResourceSpecObservation) DeepCopyInto(out *UserSettingsRSessionAppSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsRSessionAppSettingsDefaultResourceSpecObservation. +func (in *UserSettingsRSessionAppSettingsDefaultResourceSpecObservation) DeepCopy() *UserSettingsRSessionAppSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(UserSettingsRSessionAppSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsRSessionAppSettingsDefaultResourceSpecParameters) DeepCopyInto(out *UserSettingsRSessionAppSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsRSessionAppSettingsDefaultResourceSpecParameters. +func (in *UserSettingsRSessionAppSettingsDefaultResourceSpecParameters) DeepCopy() *UserSettingsRSessionAppSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(UserSettingsRSessionAppSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsRSessionAppSettingsInitParameters) DeepCopyInto(out *UserSettingsRSessionAppSettingsInitParameters) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]UserSettingsRSessionAppSettingsCustomImageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsRSessionAppSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsRSessionAppSettingsInitParameters. +func (in *UserSettingsRSessionAppSettingsInitParameters) DeepCopy() *UserSettingsRSessionAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsRSessionAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsRSessionAppSettingsObservation) DeepCopyInto(out *UserSettingsRSessionAppSettingsObservation) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]UserSettingsRSessionAppSettingsCustomImageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsRSessionAppSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsRSessionAppSettingsObservation. +func (in *UserSettingsRSessionAppSettingsObservation) DeepCopy() *UserSettingsRSessionAppSettingsObservation { + if in == nil { + return nil + } + out := new(UserSettingsRSessionAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsRSessionAppSettingsParameters) DeepCopyInto(out *UserSettingsRSessionAppSettingsParameters) { + *out = *in + if in.CustomImage != nil { + in, out := &in.CustomImage, &out.CustomImage + *out = make([]UserSettingsRSessionAppSettingsCustomImageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsRSessionAppSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsRSessionAppSettingsParameters. +func (in *UserSettingsRSessionAppSettingsParameters) DeepCopy() *UserSettingsRSessionAppSettingsParameters { + if in == nil { + return nil + } + out := new(UserSettingsRSessionAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsRStudioServerProAppSettingsInitParameters) DeepCopyInto(out *UserSettingsRStudioServerProAppSettingsInitParameters) { + *out = *in + if in.AccessStatus != nil { + in, out := &in.AccessStatus, &out.AccessStatus + *out = new(string) + **out = **in + } + if in.UserGroup != nil { + in, out := &in.UserGroup, &out.UserGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsRStudioServerProAppSettingsInitParameters. +func (in *UserSettingsRStudioServerProAppSettingsInitParameters) DeepCopy() *UserSettingsRStudioServerProAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsRStudioServerProAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsRStudioServerProAppSettingsObservation) DeepCopyInto(out *UserSettingsRStudioServerProAppSettingsObservation) { + *out = *in + if in.AccessStatus != nil { + in, out := &in.AccessStatus, &out.AccessStatus + *out = new(string) + **out = **in + } + if in.UserGroup != nil { + in, out := &in.UserGroup, &out.UserGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsRStudioServerProAppSettingsObservation. +func (in *UserSettingsRStudioServerProAppSettingsObservation) DeepCopy() *UserSettingsRStudioServerProAppSettingsObservation { + if in == nil { + return nil + } + out := new(UserSettingsRStudioServerProAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsRStudioServerProAppSettingsParameters) DeepCopyInto(out *UserSettingsRStudioServerProAppSettingsParameters) { + *out = *in + if in.AccessStatus != nil { + in, out := &in.AccessStatus, &out.AccessStatus + *out = new(string) + **out = **in + } + if in.UserGroup != nil { + in, out := &in.UserGroup, &out.UserGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsRStudioServerProAppSettingsParameters. +func (in *UserSettingsRStudioServerProAppSettingsParameters) DeepCopy() *UserSettingsRStudioServerProAppSettingsParameters { + if in == nil { + return nil + } + out := new(UserSettingsRStudioServerProAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsSharingSettingsInitParameters) DeepCopyInto(out *UserSettingsSharingSettingsInitParameters) { + *out = *in + if in.NotebookOutputOption != nil { + in, out := &in.NotebookOutputOption, &out.NotebookOutputOption + *out = new(string) + **out = **in + } + if in.S3KMSKeyID != nil { + in, out := &in.S3KMSKeyID, &out.S3KMSKeyID + *out = new(string) + **out = **in + } + if in.S3OutputPath != nil { + in, out := &in.S3OutputPath, &out.S3OutputPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsSharingSettingsInitParameters. +func (in *UserSettingsSharingSettingsInitParameters) DeepCopy() *UserSettingsSharingSettingsInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsSharingSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsSharingSettingsObservation) DeepCopyInto(out *UserSettingsSharingSettingsObservation) { + *out = *in + if in.NotebookOutputOption != nil { + in, out := &in.NotebookOutputOption, &out.NotebookOutputOption + *out = new(string) + **out = **in + } + if in.S3KMSKeyID != nil { + in, out := &in.S3KMSKeyID, &out.S3KMSKeyID + *out = new(string) + **out = **in + } + if in.S3OutputPath != nil { + in, out := &in.S3OutputPath, &out.S3OutputPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsSharingSettingsObservation. +func (in *UserSettingsSharingSettingsObservation) DeepCopy() *UserSettingsSharingSettingsObservation { + if in == nil { + return nil + } + out := new(UserSettingsSharingSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsSharingSettingsParameters) DeepCopyInto(out *UserSettingsSharingSettingsParameters) { + *out = *in + if in.NotebookOutputOption != nil { + in, out := &in.NotebookOutputOption, &out.NotebookOutputOption + *out = new(string) + **out = **in + } + if in.S3KMSKeyID != nil { + in, out := &in.S3KMSKeyID, &out.S3KMSKeyID + *out = new(string) + **out = **in + } + if in.S3OutputPath != nil { + in, out := &in.S3OutputPath, &out.S3OutputPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsSharingSettingsParameters. +func (in *UserSettingsSharingSettingsParameters) DeepCopy() *UserSettingsSharingSettingsParameters { + if in == nil { + return nil + } + out := new(UserSettingsSharingSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsSpaceStorageSettingsInitParameters) DeepCopyInto(out *UserSettingsSpaceStorageSettingsInitParameters) { + *out = *in + if in.DefaultEBSStorageSettings != nil { + in, out := &in.DefaultEBSStorageSettings, &out.DefaultEBSStorageSettings + *out = new(SpaceStorageSettingsDefaultEBSStorageSettingsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsSpaceStorageSettingsInitParameters. +func (in *UserSettingsSpaceStorageSettingsInitParameters) DeepCopy() *UserSettingsSpaceStorageSettingsInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsSpaceStorageSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsSpaceStorageSettingsObservation) DeepCopyInto(out *UserSettingsSpaceStorageSettingsObservation) { + *out = *in + if in.DefaultEBSStorageSettings != nil { + in, out := &in.DefaultEBSStorageSettings, &out.DefaultEBSStorageSettings + *out = new(SpaceStorageSettingsDefaultEBSStorageSettingsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsSpaceStorageSettingsObservation. +func (in *UserSettingsSpaceStorageSettingsObservation) DeepCopy() *UserSettingsSpaceStorageSettingsObservation { + if in == nil { + return nil + } + out := new(UserSettingsSpaceStorageSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsSpaceStorageSettingsParameters) DeepCopyInto(out *UserSettingsSpaceStorageSettingsParameters) { + *out = *in + if in.DefaultEBSStorageSettings != nil { + in, out := &in.DefaultEBSStorageSettings, &out.DefaultEBSStorageSettings + *out = new(SpaceStorageSettingsDefaultEBSStorageSettingsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsSpaceStorageSettingsParameters. +func (in *UserSettingsSpaceStorageSettingsParameters) DeepCopy() *UserSettingsSpaceStorageSettingsParameters { + if in == nil { + return nil + } + out := new(UserSettingsSpaceStorageSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsTensorBoardAppSettingsDefaultResourceSpecInitParameters) DeepCopyInto(out *UserSettingsTensorBoardAppSettingsDefaultResourceSpecInitParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsTensorBoardAppSettingsDefaultResourceSpecInitParameters. +func (in *UserSettingsTensorBoardAppSettingsDefaultResourceSpecInitParameters) DeepCopy() *UserSettingsTensorBoardAppSettingsDefaultResourceSpecInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsTensorBoardAppSettingsDefaultResourceSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsTensorBoardAppSettingsDefaultResourceSpecObservation) DeepCopyInto(out *UserSettingsTensorBoardAppSettingsDefaultResourceSpecObservation) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsTensorBoardAppSettingsDefaultResourceSpecObservation. +func (in *UserSettingsTensorBoardAppSettingsDefaultResourceSpecObservation) DeepCopy() *UserSettingsTensorBoardAppSettingsDefaultResourceSpecObservation { + if in == nil { + return nil + } + out := new(UserSettingsTensorBoardAppSettingsDefaultResourceSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsTensorBoardAppSettingsDefaultResourceSpecParameters) DeepCopyInto(out *UserSettingsTensorBoardAppSettingsDefaultResourceSpecParameters) { + *out = *in + if in.InstanceType != nil { + in, out := &in.InstanceType, &out.InstanceType + *out = new(string) + **out = **in + } + if in.LifecycleConfigArn != nil { + in, out := &in.LifecycleConfigArn, &out.LifecycleConfigArn + *out = new(string) + **out = **in + } + if in.SagemakerImageArn != nil { + in, out := &in.SagemakerImageArn, &out.SagemakerImageArn + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionAlias != nil { + in, out := &in.SagemakerImageVersionAlias, &out.SagemakerImageVersionAlias + *out = new(string) + **out = **in + } + if in.SagemakerImageVersionArn != nil { + in, out := &in.SagemakerImageVersionArn, &out.SagemakerImageVersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsTensorBoardAppSettingsDefaultResourceSpecParameters. +func (in *UserSettingsTensorBoardAppSettingsDefaultResourceSpecParameters) DeepCopy() *UserSettingsTensorBoardAppSettingsDefaultResourceSpecParameters { + if in == nil { + return nil + } + out := new(UserSettingsTensorBoardAppSettingsDefaultResourceSpecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsTensorBoardAppSettingsInitParameters) DeepCopyInto(out *UserSettingsTensorBoardAppSettingsInitParameters) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsTensorBoardAppSettingsDefaultResourceSpecInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsTensorBoardAppSettingsInitParameters. +func (in *UserSettingsTensorBoardAppSettingsInitParameters) DeepCopy() *UserSettingsTensorBoardAppSettingsInitParameters { + if in == nil { + return nil + } + out := new(UserSettingsTensorBoardAppSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsTensorBoardAppSettingsObservation) DeepCopyInto(out *UserSettingsTensorBoardAppSettingsObservation) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsTensorBoardAppSettingsDefaultResourceSpecObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsTensorBoardAppSettingsObservation. +func (in *UserSettingsTensorBoardAppSettingsObservation) DeepCopy() *UserSettingsTensorBoardAppSettingsObservation { + if in == nil { + return nil + } + out := new(UserSettingsTensorBoardAppSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSettingsTensorBoardAppSettingsParameters) DeepCopyInto(out *UserSettingsTensorBoardAppSettingsParameters) { + *out = *in + if in.DefaultResourceSpec != nil { + in, out := &in.DefaultResourceSpec, &out.DefaultResourceSpec + *out = new(UserSettingsTensorBoardAppSettingsDefaultResourceSpecParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSettingsTensorBoardAppSettingsParameters. +func (in *UserSettingsTensorBoardAppSettingsParameters) DeepCopy() *UserSettingsTensorBoardAppSettingsParameters { + if in == nil { + return nil + } + out := new(UserSettingsTensorBoardAppSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigInitParameters) DeepCopyInto(out *VPCConfigInitParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigInitParameters. +func (in *VPCConfigInitParameters) DeepCopy() *VPCConfigInitParameters { + if in == nil { + return nil + } + out := new(VPCConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigObservation) DeepCopyInto(out *VPCConfigObservation) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigObservation. +func (in *VPCConfigObservation) DeepCopy() *VPCConfigObservation { + if in == nil { + return nil + } + out := new(VPCConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCConfigParameters) DeepCopyInto(out *VPCConfigParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCConfigParameters. +func (in *VPCConfigParameters) DeepCopy() *VPCConfigParameters { + if in == nil { + return nil + } + out := new(VPCConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workforce) DeepCopyInto(out *Workforce) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workforce. +func (in *Workforce) DeepCopy() *Workforce { + if in == nil { + return nil + } + out := new(Workforce) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Workforce) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkforceInitParameters) DeepCopyInto(out *WorkforceInitParameters) { + *out = *in + if in.CognitoConfig != nil { + in, out := &in.CognitoConfig, &out.CognitoConfig + *out = new(CognitoConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OidcConfig != nil { + in, out := &in.OidcConfig, &out.OidcConfig + *out = new(OidcConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SourceIPConfig != nil { + in, out := &in.SourceIPConfig, &out.SourceIPConfig + *out = new(SourceIPConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkforceVPCConfig != nil { + in, out := &in.WorkforceVPCConfig, &out.WorkforceVPCConfig + *out = new(WorkforceVPCConfigInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkforceInitParameters. +func (in *WorkforceInitParameters) DeepCopy() *WorkforceInitParameters { + if in == nil { + return nil + } + out := new(WorkforceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkforceList) DeepCopyInto(out *WorkforceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Workforce, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkforceList. +func (in *WorkforceList) DeepCopy() *WorkforceList { + if in == nil { + return nil + } + out := new(WorkforceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkforceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkforceObservation) DeepCopyInto(out *WorkforceObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CognitoConfig != nil { + in, out := &in.CognitoConfig, &out.CognitoConfig + *out = new(CognitoConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.OidcConfig != nil { + in, out := &in.OidcConfig, &out.OidcConfig + *out = new(OidcConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.SourceIPConfig != nil { + in, out := &in.SourceIPConfig, &out.SourceIPConfig + *out = new(SourceIPConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Subdomain != nil { + in, out := &in.Subdomain, &out.Subdomain + *out = new(string) + **out = **in + } + if in.WorkforceVPCConfig != nil { + in, out := &in.WorkforceVPCConfig, &out.WorkforceVPCConfig + *out = new(WorkforceVPCConfigObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkforceObservation. +func (in *WorkforceObservation) DeepCopy() *WorkforceObservation { + if in == nil { + return nil + } + out := new(WorkforceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkforceParameters) DeepCopyInto(out *WorkforceParameters) { + *out = *in + if in.CognitoConfig != nil { + in, out := &in.CognitoConfig, &out.CognitoConfig + *out = new(CognitoConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.OidcConfig != nil { + in, out := &in.OidcConfig, &out.OidcConfig + *out = new(OidcConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SourceIPConfig != nil { + in, out := &in.SourceIPConfig, &out.SourceIPConfig + *out = new(SourceIPConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkforceVPCConfig != nil { + in, out := &in.WorkforceVPCConfig, &out.WorkforceVPCConfig + *out = new(WorkforceVPCConfigParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkforceParameters. +func (in *WorkforceParameters) DeepCopy() *WorkforceParameters { + if in == nil { + return nil + } + out := new(WorkforceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkforceSpec) DeepCopyInto(out *WorkforceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkforceSpec. +func (in *WorkforceSpec) DeepCopy() *WorkforceSpec { + if in == nil { + return nil + } + out := new(WorkforceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkforceStatus) DeepCopyInto(out *WorkforceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkforceStatus. +func (in *WorkforceStatus) DeepCopy() *WorkforceStatus { + if in == nil { + return nil + } + out := new(WorkforceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkforceVPCConfigInitParameters) DeepCopyInto(out *WorkforceVPCConfigInitParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkforceVPCConfigInitParameters. +func (in *WorkforceVPCConfigInitParameters) DeepCopy() *WorkforceVPCConfigInitParameters { + if in == nil { + return nil + } + out := new(WorkforceVPCConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkforceVPCConfigObservation) DeepCopyInto(out *WorkforceVPCConfigObservation) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCEndpointID != nil { + in, out := &in.VPCEndpointID, &out.VPCEndpointID + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkforceVPCConfigObservation. +func (in *WorkforceVPCConfigObservation) DeepCopy() *WorkforceVPCConfigObservation { + if in == nil { + return nil + } + out := new(WorkforceVPCConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkforceVPCConfigParameters) DeepCopyInto(out *WorkforceVPCConfigParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkforceVPCConfigParameters. +func (in *WorkforceVPCConfigParameters) DeepCopy() *WorkforceVPCConfigParameters { + if in == nil { + return nil + } + out := new(WorkforceVPCConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceSettingsInitParameters) DeepCopyInto(out *WorkspaceSettingsInitParameters) { + *out = *in + if in.S3ArtifactPath != nil { + in, out := &in.S3ArtifactPath, &out.S3ArtifactPath + *out = new(string) + **out = **in + } + if in.S3KMSKeyID != nil { + in, out := &in.S3KMSKeyID, &out.S3KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceSettingsInitParameters. +func (in *WorkspaceSettingsInitParameters) DeepCopy() *WorkspaceSettingsInitParameters { + if in == nil { + return nil + } + out := new(WorkspaceSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceSettingsObservation) DeepCopyInto(out *WorkspaceSettingsObservation) { + *out = *in + if in.S3ArtifactPath != nil { + in, out := &in.S3ArtifactPath, &out.S3ArtifactPath + *out = new(string) + **out = **in + } + if in.S3KMSKeyID != nil { + in, out := &in.S3KMSKeyID, &out.S3KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceSettingsObservation. +func (in *WorkspaceSettingsObservation) DeepCopy() *WorkspaceSettingsObservation { + if in == nil { + return nil + } + out := new(WorkspaceSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceSettingsParameters) DeepCopyInto(out *WorkspaceSettingsParameters) { + *out = *in + if in.S3ArtifactPath != nil { + in, out := &in.S3ArtifactPath, &out.S3ArtifactPath + *out = new(string) + **out = **in + } + if in.S3KMSKeyID != nil { + in, out := &in.S3KMSKeyID, &out.S3KMSKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceSettingsParameters. +func (in *WorkspaceSettingsParameters) DeepCopy() *WorkspaceSettingsParameters { + if in == nil { + return nil + } + out := new(WorkspaceSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workteam) DeepCopyInto(out *Workteam) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workteam. +func (in *Workteam) DeepCopy() *Workteam { + if in == nil { + return nil + } + out := new(Workteam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Workteam) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkteamInitParameters) DeepCopyInto(out *WorkteamInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.MemberDefinition != nil { + in, out := &in.MemberDefinition, &out.MemberDefinition + *out = make([]MemberDefinitionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotificationConfiguration != nil { + in, out := &in.NotificationConfiguration, &out.NotificationConfiguration + *out = new(NotificationConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkforceName != nil { + in, out := &in.WorkforceName, &out.WorkforceName + *out = new(string) + **out = **in + } + if in.WorkforceNameRef != nil { + in, out := &in.WorkforceNameRef, &out.WorkforceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WorkforceNameSelector != nil { + in, out := &in.WorkforceNameSelector, &out.WorkforceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkteamInitParameters. +func (in *WorkteamInitParameters) DeepCopy() *WorkteamInitParameters { + if in == nil { + return nil + } + out := new(WorkteamInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkteamList) DeepCopyInto(out *WorkteamList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Workteam, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkteamList. +func (in *WorkteamList) DeepCopy() *WorkteamList { + if in == nil { + return nil + } + out := new(WorkteamList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkteamList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkteamObservation) DeepCopyInto(out *WorkteamObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MemberDefinition != nil { + in, out := &in.MemberDefinition, &out.MemberDefinition + *out = make([]MemberDefinitionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotificationConfiguration != nil { + in, out := &in.NotificationConfiguration, &out.NotificationConfiguration + *out = new(NotificationConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Subdomain != nil { + in, out := &in.Subdomain, &out.Subdomain + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkforceName != nil { + in, out := &in.WorkforceName, &out.WorkforceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkteamObservation. +func (in *WorkteamObservation) DeepCopy() *WorkteamObservation { + if in == nil { + return nil + } + out := new(WorkteamObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkteamParameters) DeepCopyInto(out *WorkteamParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.MemberDefinition != nil { + in, out := &in.MemberDefinition, &out.MemberDefinition + *out = make([]MemberDefinitionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotificationConfiguration != nil { + in, out := &in.NotificationConfiguration, &out.NotificationConfiguration + *out = new(NotificationConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkforceName != nil { + in, out := &in.WorkforceName, &out.WorkforceName + *out = new(string) + **out = **in + } + if in.WorkforceNameRef != nil { + in, out := &in.WorkforceNameRef, &out.WorkforceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WorkforceNameSelector != nil { + in, out := &in.WorkforceNameSelector, &out.WorkforceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkteamParameters. +func (in *WorkteamParameters) DeepCopy() *WorkteamParameters { + if in == nil { + return nil + } + out := new(WorkteamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkteamSpec) DeepCopyInto(out *WorkteamSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkteamSpec. +func (in *WorkteamSpec) DeepCopy() *WorkteamSpec { + if in == nil { + return nil + } + out := new(WorkteamSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkteamStatus) DeepCopyInto(out *WorkteamStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkteamStatus. +func (in *WorkteamStatus) DeepCopy() *WorkteamStatus { + if in == nil { + return nil + } + out := new(WorkteamStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/sagemaker/v1beta2/zz_generated.managed.go b/apis/sagemaker/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..581455f1de --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_generated.managed.go @@ -0,0 +1,908 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this App. +func (mg *App) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this App. +func (mg *App) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this App. +func (mg *App) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this App. +func (mg *App) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this App. +func (mg *App) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this App. +func (mg *App) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this App. +func (mg *App) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this App. +func (mg *App) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this App. +func (mg *App) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this App. +func (mg *App) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this App. +func (mg *App) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this App. +func (mg *App) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this AppImageConfig. +func (mg *AppImageConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this AppImageConfig. +func (mg *AppImageConfig) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this AppImageConfig. +func (mg *AppImageConfig) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this AppImageConfig. +func (mg *AppImageConfig) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this AppImageConfig. +func (mg *AppImageConfig) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this AppImageConfig. +func (mg *AppImageConfig) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this AppImageConfig. +func (mg *AppImageConfig) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this AppImageConfig. +func (mg *AppImageConfig) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this AppImageConfig. +func (mg *AppImageConfig) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this AppImageConfig. +func (mg *AppImageConfig) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this AppImageConfig. +func (mg *AppImageConfig) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this AppImageConfig. +func (mg *AppImageConfig) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this CodeRepository. +func (mg *CodeRepository) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CodeRepository. +func (mg *CodeRepository) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CodeRepository. +func (mg *CodeRepository) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CodeRepository. +func (mg *CodeRepository) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CodeRepository. +func (mg *CodeRepository) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CodeRepository. +func (mg *CodeRepository) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CodeRepository. +func (mg *CodeRepository) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CodeRepository. +func (mg *CodeRepository) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CodeRepository. +func (mg *CodeRepository) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CodeRepository. +func (mg *CodeRepository) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CodeRepository. +func (mg *CodeRepository) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CodeRepository. +func (mg *CodeRepository) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Device. +func (mg *Device) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Device. +func (mg *Device) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Device. +func (mg *Device) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Device. +func (mg *Device) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Device. +func (mg *Device) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Device. +func (mg *Device) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Device. +func (mg *Device) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Device. +func (mg *Device) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Device. +func (mg *Device) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Device. +func (mg *Device) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Device. +func (mg *Device) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Device. +func (mg *Device) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DeviceFleet. +func (mg *DeviceFleet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DeviceFleet. +func (mg *DeviceFleet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DeviceFleet. +func (mg *DeviceFleet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DeviceFleet. +func (mg *DeviceFleet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DeviceFleet. +func (mg *DeviceFleet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DeviceFleet. +func (mg *DeviceFleet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DeviceFleet. +func (mg *DeviceFleet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DeviceFleet. +func (mg *DeviceFleet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DeviceFleet. +func (mg *DeviceFleet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DeviceFleet. +func (mg *DeviceFleet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DeviceFleet. +func (mg *DeviceFleet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DeviceFleet. +func (mg *DeviceFleet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Domain. +func (mg *Domain) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Domain. +func (mg *Domain) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Domain. +func (mg *Domain) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Domain. +func (mg *Domain) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Domain. +func (mg *Domain) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Domain. +func (mg *Domain) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Domain. +func (mg *Domain) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Domain. +func (mg *Domain) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Domain. +func (mg *Domain) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Domain. +func (mg *Domain) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Domain. +func (mg *Domain) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Domain. +func (mg *Domain) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Endpoint. +func (mg *Endpoint) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Endpoint. +func (mg *Endpoint) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Endpoint. +func (mg *Endpoint) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Endpoint. +func (mg *Endpoint) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Endpoint. +func (mg *Endpoint) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Endpoint. +func (mg *Endpoint) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Endpoint. +func (mg *Endpoint) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Endpoint. +func (mg *Endpoint) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Endpoint. +func (mg *Endpoint) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Endpoint. +func (mg *Endpoint) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Endpoint. +func (mg *Endpoint) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Endpoint. +func (mg *Endpoint) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this EndpointConfiguration. +func (mg *EndpointConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this EndpointConfiguration. +func (mg *EndpointConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this EndpointConfiguration. +func (mg *EndpointConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this EndpointConfiguration. +func (mg *EndpointConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this EndpointConfiguration. +func (mg *EndpointConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this EndpointConfiguration. +func (mg *EndpointConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this EndpointConfiguration. +func (mg *EndpointConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this EndpointConfiguration. +func (mg *EndpointConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this EndpointConfiguration. +func (mg *EndpointConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this EndpointConfiguration. +func (mg *EndpointConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this EndpointConfiguration. +func (mg *EndpointConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this EndpointConfiguration. +func (mg *EndpointConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FeatureGroup. +func (mg *FeatureGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FeatureGroup. +func (mg *FeatureGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FeatureGroup. +func (mg *FeatureGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FeatureGroup. +func (mg *FeatureGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FeatureGroup. +func (mg *FeatureGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FeatureGroup. +func (mg *FeatureGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FeatureGroup. +func (mg *FeatureGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FeatureGroup. +func (mg *FeatureGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FeatureGroup. +func (mg *FeatureGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FeatureGroup. +func (mg *FeatureGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FeatureGroup. +func (mg *FeatureGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FeatureGroup. +func (mg *FeatureGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Model. +func (mg *Model) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Model. +func (mg *Model) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Model. +func (mg *Model) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Model. +func (mg *Model) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Model. +func (mg *Model) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Model. +func (mg *Model) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Model. +func (mg *Model) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Model. +func (mg *Model) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Model. +func (mg *Model) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Model. +func (mg *Model) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Model. +func (mg *Model) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Model. +func (mg *Model) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this NotebookInstance. +func (mg *NotebookInstance) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this NotebookInstance. +func (mg *NotebookInstance) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this NotebookInstance. +func (mg *NotebookInstance) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this NotebookInstance. +func (mg *NotebookInstance) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this NotebookInstance. +func (mg *NotebookInstance) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this NotebookInstance. +func (mg *NotebookInstance) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this NotebookInstance. +func (mg *NotebookInstance) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this NotebookInstance. +func (mg *NotebookInstance) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this NotebookInstance. +func (mg *NotebookInstance) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this NotebookInstance. +func (mg *NotebookInstance) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this NotebookInstance. +func (mg *NotebookInstance) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this NotebookInstance. +func (mg *NotebookInstance) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Space. +func (mg *Space) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Space. +func (mg *Space) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Space. +func (mg *Space) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Space. +func (mg *Space) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Space. +func (mg *Space) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Space. +func (mg *Space) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Space. +func (mg *Space) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Space. +func (mg *Space) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Space. +func (mg *Space) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Space. +func (mg *Space) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Space. +func (mg *Space) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Space. +func (mg *Space) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this UserProfile. +func (mg *UserProfile) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this UserProfile. +func (mg *UserProfile) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this UserProfile. +func (mg *UserProfile) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this UserProfile. +func (mg *UserProfile) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this UserProfile. +func (mg *UserProfile) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this UserProfile. +func (mg *UserProfile) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this UserProfile. +func (mg *UserProfile) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this UserProfile. +func (mg *UserProfile) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this UserProfile. +func (mg *UserProfile) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this UserProfile. +func (mg *UserProfile) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this UserProfile. +func (mg *UserProfile) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this UserProfile. +func (mg *UserProfile) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Workforce. +func (mg *Workforce) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Workforce. +func (mg *Workforce) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Workforce. +func (mg *Workforce) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Workforce. +func (mg *Workforce) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Workforce. +func (mg *Workforce) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Workforce. +func (mg *Workforce) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Workforce. +func (mg *Workforce) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Workforce. +func (mg *Workforce) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Workforce. +func (mg *Workforce) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Workforce. +func (mg *Workforce) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Workforce. +func (mg *Workforce) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Workforce. +func (mg *Workforce) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Workteam. +func (mg *Workteam) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Workteam. +func (mg *Workteam) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Workteam. +func (mg *Workteam) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Workteam. +func (mg *Workteam) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Workteam. +func (mg *Workteam) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Workteam. +func (mg *Workteam) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Workteam. +func (mg *Workteam) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Workteam. +func (mg *Workteam) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Workteam. +func (mg *Workteam) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Workteam. +func (mg *Workteam) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Workteam. +func (mg *Workteam) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Workteam. +func (mg *Workteam) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/sagemaker/v1beta2/zz_generated.managedlist.go b/apis/sagemaker/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..7e33a95968 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,143 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AppImageConfigList. +func (l *AppImageConfigList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this AppList. +func (l *AppList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this CodeRepositoryList. +func (l *CodeRepositoryList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DeviceFleetList. +func (l *DeviceFleetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DeviceList. +func (l *DeviceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DomainList. +func (l *DomainList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this EndpointConfigurationList. +func (l *EndpointConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this EndpointList. +func (l *EndpointList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FeatureGroupList. +func (l *FeatureGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ModelList. +func (l *ModelList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this NotebookInstanceList. +func (l *NotebookInstanceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SpaceList. +func (l *SpaceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this UserProfileList. +func (l *UserProfileList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WorkforceList. +func (l *WorkforceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WorkteamList. +func (l *WorkteamList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/sagemaker/v1beta2/zz_generated.resolvers.go b/apis/sagemaker/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..0f7bebadc8 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,1324 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this App. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *App) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "Domain", "DomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DomainID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DomainIDRef, + Selector: mg.Spec.ForProvider.DomainIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DomainID") + } + mg.Spec.ForProvider.DomainID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DomainIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "UserProfile", "UserProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.UserProfileName), + Extract: resource.ExtractParamPath("user_profile_name", false), + Reference: mg.Spec.ForProvider.UserProfileNameRef, + Selector: mg.Spec.ForProvider.UserProfileNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.UserProfileName") + } + mg.Spec.ForProvider.UserProfileName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.UserProfileNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "Domain", "DomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DomainID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DomainIDRef, + Selector: mg.Spec.InitProvider.DomainIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DomainID") + } + mg.Spec.InitProvider.DomainID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DomainIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "UserProfile", "UserProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.UserProfileName), + Extract: resource.ExtractParamPath("user_profile_name", false), + Reference: mg.Spec.InitProvider.UserProfileNameRef, + Selector: mg.Spec.InitProvider.UserProfileNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.UserProfileName") + } + mg.Spec.InitProvider.UserProfileName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.UserProfileNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this CodeRepository. +func (mg *CodeRepository) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.GitConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("secretsmanager.aws.upbound.io", "v1beta1", "Secret", "SecretList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.GitConfig.SecretArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.GitConfig.SecretArnRef, + Selector: mg.Spec.ForProvider.GitConfig.SecretArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.GitConfig.SecretArn") + } + mg.Spec.ForProvider.GitConfig.SecretArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.GitConfig.SecretArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.GitConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("secretsmanager.aws.upbound.io", "v1beta1", "Secret", "SecretList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.GitConfig.SecretArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.GitConfig.SecretArnRef, + Selector: mg.Spec.InitProvider.GitConfig.SecretArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.GitConfig.SecretArn") + } + mg.Spec.InitProvider.GitConfig.SecretArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.GitConfig.SecretArnRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this Device. +func (mg *Device) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "DeviceFleet", "DeviceFleetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DeviceFleetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DeviceFleetNameRef, + Selector: mg.Spec.ForProvider.DeviceFleetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DeviceFleetName") + } + mg.Spec.ForProvider.DeviceFleetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DeviceFleetNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "DeviceFleet", "DeviceFleetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DeviceFleetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DeviceFleetNameRef, + Selector: mg.Spec.InitProvider.DeviceFleetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DeviceFleetName") + } + mg.Spec.InitProvider.DeviceFleetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DeviceFleetNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this DeviceFleet. +func (mg *DeviceFleet) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Domain. +func (mg *Domain) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + if mg.Spec.ForProvider.DefaultUserSettings != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DefaultUserSettings.ExecutionRole), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.DefaultUserSettings.ExecutionRoleRef, + Selector: mg.Spec.ForProvider.DefaultUserSettings.ExecutionRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DefaultUserSettings.ExecutionRole") + } + mg.Spec.ForProvider.DefaultUserSettings.ExecutionRole = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DefaultUserSettings.ExecutionRoleRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.DefaultUserSettings != nil { + if mg.Spec.ForProvider.DefaultUserSettings.KernelGatewayAppSettings != nil { + for i5 := 0; i5 < len(mg.Spec.ForProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage); i5++ { + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "AppImageConfig", "AppImageConfigList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].AppImageConfigName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].AppImageConfigNameRef, + Selector: mg.Spec.ForProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].AppImageConfigNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].AppImageConfigName") + } + mg.Spec.ForProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].AppImageConfigName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].AppImageConfigNameRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.ForProvider.DefaultUserSettings != nil { + if mg.Spec.ForProvider.DefaultUserSettings.KernelGatewayAppSettings != nil { + for i5 := 0; i5 < len(mg.Spec.ForProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage); i5++ { + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta1", "ImageVersion", "ImageVersionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].ImageName), + Extract: resource.ExtractParamPath("image_name", false), + Reference: mg.Spec.ForProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].ImageNameRef, + Selector: mg.Spec.ForProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].ImageNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].ImageName") + } + mg.Spec.ForProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].ImageName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].ImageNameRef = rsp.ResolvedReference + + } + } + } + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyID") + } + mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SubnetIDRefs, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetIds") + } + mg.Spec.ForProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SubnetIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPCID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.VPCIDRef, + Selector: mg.Spec.ForProvider.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCID") + } + mg.Spec.ForProvider.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPCIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.DefaultUserSettings != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DefaultUserSettings.ExecutionRole), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.DefaultUserSettings.ExecutionRoleRef, + Selector: mg.Spec.InitProvider.DefaultUserSettings.ExecutionRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DefaultUserSettings.ExecutionRole") + } + mg.Spec.InitProvider.DefaultUserSettings.ExecutionRole = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DefaultUserSettings.ExecutionRoleRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.DefaultUserSettings != nil { + if mg.Spec.InitProvider.DefaultUserSettings.KernelGatewayAppSettings != nil { + for i5 := 0; i5 < len(mg.Spec.InitProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage); i5++ { + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "AppImageConfig", "AppImageConfigList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].AppImageConfigName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].AppImageConfigNameRef, + Selector: mg.Spec.InitProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].AppImageConfigNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].AppImageConfigName") + } + mg.Spec.InitProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].AppImageConfigName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].AppImageConfigNameRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.InitProvider.DefaultUserSettings != nil { + if mg.Spec.InitProvider.DefaultUserSettings.KernelGatewayAppSettings != nil { + for i5 := 0; i5 < len(mg.Spec.InitProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage); i5++ { + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta1", "ImageVersion", "ImageVersionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].ImageName), + Extract: resource.ExtractParamPath("image_name", false), + Reference: mg.Spec.InitProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].ImageNameRef, + Selector: mg.Spec.InitProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].ImageNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].ImageName") + } + mg.Spec.InitProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].ImageName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DefaultUserSettings.KernelGatewayAppSettings.CustomImage[i5].ImageNameRef = rsp.ResolvedReference + + } + } + } + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyID") + } + mg.Spec.InitProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SubnetIDRefs, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetIds") + } + mg.Spec.InitProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SubnetIDRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPCID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.VPCIDRef, + Selector: mg.Spec.InitProvider.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCID") + } + mg.Spec.InitProvider.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPCIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Endpoint. +func (mg *Endpoint) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "EndpointConfiguration", "EndpointConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EndpointConfigName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.EndpointConfigNameRef, + Selector: mg.Spec.ForProvider.EndpointConfigNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EndpointConfigName") + } + mg.Spec.ForProvider.EndpointConfigName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EndpointConfigNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "EndpointConfiguration", "EndpointConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EndpointConfigName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.EndpointConfigNameRef, + Selector: mg.Spec.InitProvider.EndpointConfigNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EndpointConfigName") + } + mg.Spec.InitProvider.EndpointConfigName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EndpointConfigNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this EndpointConfiguration. +func (mg *EndpointConfiguration) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSKeyArnRef, + Selector: mg.Spec.ForProvider.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyArn") + } + mg.Spec.ForProvider.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyArnRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.ProductionVariants); i3++ { + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "Model", "ModelList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ProductionVariants[i3].ModelName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ProductionVariants[i3].ModelNameRef, + Selector: mg.Spec.ForProvider.ProductionVariants[i3].ModelNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ProductionVariants[i3].ModelName") + } + mg.Spec.ForProvider.ProductionVariants[i3].ModelName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ProductionVariants[i3].ModelNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSKeyArnRef, + Selector: mg.Spec.InitProvider.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyArn") + } + mg.Spec.InitProvider.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyArnRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.ProductionVariants); i3++ { + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "Model", "ModelList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ProductionVariants[i3].ModelName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ProductionVariants[i3].ModelNameRef, + Selector: mg.Spec.InitProvider.ProductionVariants[i3].ModelNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ProductionVariants[i3].ModelName") + } + mg.Spec.InitProvider.ProductionVariants[i3].ModelName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ProductionVariants[i3].ModelNameRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this FeatureGroup. +func (mg *FeatureGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Model. +func (mg *Model) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ExecutionRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ExecutionRoleArnRef, + Selector: mg.Spec.ForProvider.ExecutionRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ExecutionRoleArn") + } + mg.Spec.ForProvider.ExecutionRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ExecutionRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ExecutionRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ExecutionRoleArnRef, + Selector: mg.Spec.InitProvider.ExecutionRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ExecutionRoleArn") + } + mg.Spec.InitProvider.ExecutionRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ExecutionRoleArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this NotebookInstance. +func (mg *NotebookInstance) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "CodeRepository", "CodeRepositoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DefaultCodeRepository), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DefaultCodeRepositoryRef, + Selector: mg.Spec.ForProvider.DefaultCodeRepositorySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DefaultCodeRepository") + } + mg.Spec.ForProvider.DefaultCodeRepository = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DefaultCodeRepositoryRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyID") + } + mg.Spec.ForProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.SubnetIDRef, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetID") + } + mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "CodeRepository", "CodeRepositoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DefaultCodeRepository), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DefaultCodeRepositoryRef, + Selector: mg.Spec.InitProvider.DefaultCodeRepositorySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DefaultCodeRepository") + } + mg.Spec.InitProvider.DefaultCodeRepository = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DefaultCodeRepositoryRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyID") + } + mg.Spec.InitProvider.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SubnetID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.SubnetIDRef, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetID") + } + mg.Spec.InitProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SubnetIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Space. +func (mg *Space) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "Domain", "DomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DomainID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DomainIDRef, + Selector: mg.Spec.ForProvider.DomainIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DomainID") + } + mg.Spec.ForProvider.DomainID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DomainIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "Domain", "DomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DomainID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DomainIDRef, + Selector: mg.Spec.InitProvider.DomainIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DomainID") + } + mg.Spec.InitProvider.DomainID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DomainIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this UserProfile. +func (mg *UserProfile) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "Domain", "DomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DomainID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DomainIDRef, + Selector: mg.Spec.ForProvider.DomainIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DomainID") + } + mg.Spec.ForProvider.DomainID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DomainIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "Domain", "DomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DomainID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DomainIDRef, + Selector: mg.Spec.InitProvider.DomainIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DomainID") + } + mg.Spec.InitProvider.DomainID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DomainIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Workforce. +func (mg *Workforce) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.CognitoConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPoolClient", "UserPoolClientList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CognitoConfig.ClientID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.CognitoConfig.ClientIDRef, + Selector: mg.Spec.ForProvider.CognitoConfig.ClientIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CognitoConfig.ClientID") + } + mg.Spec.ForProvider.CognitoConfig.ClientID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CognitoConfig.ClientIDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.CognitoConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPoolDomain", "UserPoolDomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CognitoConfig.UserPool), + Extract: resource.ExtractParamPath("user_pool_id", false), + Reference: mg.Spec.ForProvider.CognitoConfig.UserPoolRef, + Selector: mg.Spec.ForProvider.CognitoConfig.UserPoolSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CognitoConfig.UserPool") + } + mg.Spec.ForProvider.CognitoConfig.UserPool = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CognitoConfig.UserPoolRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.CognitoConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPoolClient", "UserPoolClientList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CognitoConfig.ClientID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.CognitoConfig.ClientIDRef, + Selector: mg.Spec.InitProvider.CognitoConfig.ClientIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CognitoConfig.ClientID") + } + mg.Spec.InitProvider.CognitoConfig.ClientID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CognitoConfig.ClientIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.CognitoConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPoolDomain", "UserPoolDomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CognitoConfig.UserPool), + Extract: resource.ExtractParamPath("user_pool_id", false), + Reference: mg.Spec.InitProvider.CognitoConfig.UserPoolRef, + Selector: mg.Spec.InitProvider.CognitoConfig.UserPoolSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CognitoConfig.UserPool") + } + mg.Spec.InitProvider.CognitoConfig.UserPool = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CognitoConfig.UserPoolRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this Workteam. +func (mg *Workteam) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.MemberDefinition); i3++ { + if mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPoolClient", "UserPoolClientList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.ClientID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.ClientIDRef, + Selector: mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.ClientIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.ClientID") + } + mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.ClientID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.ClientIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.MemberDefinition); i3++ { + if mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserGroup", "UserGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.UserGroup), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.UserGroupRef, + Selector: mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.UserGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.UserGroup") + } + mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.UserGroup = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.UserGroupRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.MemberDefinition); i3++ { + if mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPoolDomain", "UserPoolDomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.UserPool), + Extract: resource.ExtractParamPath("user_pool_id", false), + Reference: mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.UserPoolRef, + Selector: mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.UserPoolSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.UserPool") + } + mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.UserPool = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MemberDefinition[i3].CognitoMemberDefinition.UserPoolRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "Workforce", "WorkforceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.WorkforceName), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.WorkforceNameRef, + Selector: mg.Spec.ForProvider.WorkforceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.WorkforceName") + } + mg.Spec.ForProvider.WorkforceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.WorkforceNameRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.MemberDefinition); i3++ { + if mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPoolClient", "UserPoolClientList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.ClientID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.ClientIDRef, + Selector: mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.ClientIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.ClientID") + } + mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.ClientID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.ClientIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.MemberDefinition); i3++ { + if mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserGroup", "UserGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.UserGroup), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.UserGroupRef, + Selector: mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.UserGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.UserGroup") + } + mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.UserGroup = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.UserGroupRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.MemberDefinition); i3++ { + if mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition != nil { + { + m, l, err = apisresolver.GetManagedResource("cognitoidp.aws.upbound.io", "v1beta1", "UserPoolDomain", "UserPoolDomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.UserPool), + Extract: resource.ExtractParamPath("user_pool_id", false), + Reference: mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.UserPoolRef, + Selector: mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.UserPoolSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.UserPool") + } + mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.UserPool = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.MemberDefinition[i3].CognitoMemberDefinition.UserPoolRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("sagemaker.aws.upbound.io", "v1beta2", "Workforce", "WorkforceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.WorkforceName), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.WorkforceNameRef, + Selector: mg.Spec.InitProvider.WorkforceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.WorkforceName") + } + mg.Spec.InitProvider.WorkforceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.WorkforceNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/sagemaker/v1beta2/zz_groupversion_info.go b/apis/sagemaker/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..7ea039e182 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=sagemaker.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "sagemaker.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/sagemaker/v1beta2/zz_model_terraformed.go b/apis/sagemaker/v1beta2/zz_model_terraformed.go new file mode 100755 index 0000000000..8bf9df3056 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_model_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Model +func (mg *Model) GetTerraformResourceType() string { + return "aws_sagemaker_model" +} + +// GetConnectionDetailsMapping for this Model +func (tr *Model) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Model +func (tr *Model) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Model +func (tr *Model) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Model +func (tr *Model) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Model +func (tr *Model) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Model +func (tr *Model) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Model +func (tr *Model) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Model +func (tr *Model) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Model using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Model) LateInitialize(attrs []byte) (bool, error) { + params := &ModelParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Model) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sagemaker/v1beta2/zz_model_types.go b/apis/sagemaker/v1beta2/zz_model_types.go new file mode 100755 index 0000000000..93796a5638 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_model_types.go @@ -0,0 +1,640 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ContainerInitParameters struct { + + // The DNS host name for the container. + ContainerHostname *string `json:"containerHostname,omitempty" tf:"container_hostname,omitempty"` + + // Environment variables for the Docker container. + // A list of key value pairs. + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The registry path where the inference code image is stored in Amazon ECR. + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config. + ImageConfig *ImageConfigInitParameters `json:"imageConfig,omitempty" tf:"image_config,omitempty"` + + // The container hosts value SingleModel/MultiModel. The default value is SingleModel. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide. + ModelDataSource *ModelDataSourceInitParameters `json:"modelDataSource,omitempty" tf:"model_data_source,omitempty"` + + // The URL for the S3 location where model artifacts are stored. + ModelDataURL *string `json:"modelDataUrl,omitempty" tf:"model_data_url,omitempty"` + + // The Amazon Resource Name (ARN) of the model package to use to create the model. + ModelPackageName *string `json:"modelPackageName,omitempty" tf:"model_package_name,omitempty"` +} + +type ContainerObservation struct { + + // The DNS host name for the container. + ContainerHostname *string `json:"containerHostname,omitempty" tf:"container_hostname,omitempty"` + + // Environment variables for the Docker container. + // A list of key value pairs. + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The registry path where the inference code image is stored in Amazon ECR. + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config. + ImageConfig *ImageConfigObservation `json:"imageConfig,omitempty" tf:"image_config,omitempty"` + + // The container hosts value SingleModel/MultiModel. The default value is SingleModel. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide. + ModelDataSource *ModelDataSourceObservation `json:"modelDataSource,omitempty" tf:"model_data_source,omitempty"` + + // The URL for the S3 location where model artifacts are stored. + ModelDataURL *string `json:"modelDataUrl,omitempty" tf:"model_data_url,omitempty"` + + // The Amazon Resource Name (ARN) of the model package to use to create the model. + ModelPackageName *string `json:"modelPackageName,omitempty" tf:"model_package_name,omitempty"` +} + +type ContainerParameters struct { + + // The DNS host name for the container. + // +kubebuilder:validation:Optional + ContainerHostname *string `json:"containerHostname,omitempty" tf:"container_hostname,omitempty"` + + // Environment variables for the Docker container. + // A list of key value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The registry path where the inference code image is stored in Amazon ECR. + // +kubebuilder:validation:Optional + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config. + // +kubebuilder:validation:Optional + ImageConfig *ImageConfigParameters `json:"imageConfig,omitempty" tf:"image_config,omitempty"` + + // The container hosts value SingleModel/MultiModel. The default value is SingleModel. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide. + // +kubebuilder:validation:Optional + ModelDataSource *ModelDataSourceParameters `json:"modelDataSource,omitempty" tf:"model_data_source,omitempty"` + + // The URL for the S3 location where model artifacts are stored. + // +kubebuilder:validation:Optional + ModelDataURL *string `json:"modelDataUrl,omitempty" tf:"model_data_url,omitempty"` + + // The Amazon Resource Name (ARN) of the model package to use to create the model. + // +kubebuilder:validation:Optional + ModelPackageName *string `json:"modelPackageName,omitempty" tf:"model_package_name,omitempty"` +} + +type ImageConfigInitParameters struct { + + // Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc. + RepositoryAccessMode *string `json:"repositoryAccessMode,omitempty" tf:"repository_access_mode,omitempty"` + + // Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config. + RepositoryAuthConfig *RepositoryAuthConfigInitParameters `json:"repositoryAuthConfig,omitempty" tf:"repository_auth_config,omitempty"` +} + +type ImageConfigObservation struct { + + // Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc. + RepositoryAccessMode *string `json:"repositoryAccessMode,omitempty" tf:"repository_access_mode,omitempty"` + + // Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config. + RepositoryAuthConfig *RepositoryAuthConfigObservation `json:"repositoryAuthConfig,omitempty" tf:"repository_auth_config,omitempty"` +} + +type ImageConfigParameters struct { + + // Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc. + // +kubebuilder:validation:Optional + RepositoryAccessMode *string `json:"repositoryAccessMode" tf:"repository_access_mode,omitempty"` + + // Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config. + // +kubebuilder:validation:Optional + RepositoryAuthConfig *RepositoryAuthConfigParameters `json:"repositoryAuthConfig,omitempty" tf:"repository_auth_config,omitempty"` +} + +type ImageConfigRepositoryAuthConfigInitParameters struct { + + // The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide. + RepositoryCredentialsProviderArn *string `json:"repositoryCredentialsProviderArn,omitempty" tf:"repository_credentials_provider_arn,omitempty"` +} + +type ImageConfigRepositoryAuthConfigObservation struct { + + // The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide. + RepositoryCredentialsProviderArn *string `json:"repositoryCredentialsProviderArn,omitempty" tf:"repository_credentials_provider_arn,omitempty"` +} + +type ImageConfigRepositoryAuthConfigParameters struct { + + // The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide. + // +kubebuilder:validation:Optional + RepositoryCredentialsProviderArn *string `json:"repositoryCredentialsProviderArn" tf:"repository_credentials_provider_arn,omitempty"` +} + +type InferenceExecutionConfigInitParameters struct { + + // The container hosts value SingleModel/MultiModel. The default value is SingleModel. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type InferenceExecutionConfigObservation struct { + + // The container hosts value SingleModel/MultiModel. The default value is SingleModel. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type InferenceExecutionConfigParameters struct { + + // The container hosts value SingleModel/MultiModel. The default value is SingleModel. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` +} + +type ModelDataSourceInitParameters struct { + + // The S3 location of model data to deploy. + S3DataSource []S3DataSourceInitParameters `json:"s3DataSource,omitempty" tf:"s3_data_source,omitempty"` +} + +type ModelDataSourceObservation struct { + + // The S3 location of model data to deploy. + S3DataSource []S3DataSourceObservation `json:"s3DataSource,omitempty" tf:"s3_data_source,omitempty"` +} + +type ModelDataSourceParameters struct { + + // The S3 location of model data to deploy. + // +kubebuilder:validation:Optional + S3DataSource []S3DataSourceParameters `json:"s3DataSource" tf:"s3_data_source,omitempty"` +} + +type ModelDataSourceS3DataSourceInitParameters struct { + + // How the model data is prepared. Allowed values are: None and Gzip. + CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` + + // The type of model data to deploy. Allowed values are: S3Object and S3Prefix. + S3DataType *string `json:"s3DataType,omitempty" tf:"s3_data_type,omitempty"` + + // The S3 path of model data to deploy. + S3URI *string `json:"s3Uri,omitempty" tf:"s3_uri,omitempty"` +} + +type ModelDataSourceS3DataSourceObservation struct { + + // How the model data is prepared. Allowed values are: None and Gzip. + CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` + + // The type of model data to deploy. Allowed values are: S3Object and S3Prefix. + S3DataType *string `json:"s3DataType,omitempty" tf:"s3_data_type,omitempty"` + + // The S3 path of model data to deploy. + S3URI *string `json:"s3Uri,omitempty" tf:"s3_uri,omitempty"` +} + +type ModelDataSourceS3DataSourceParameters struct { + + // How the model data is prepared. Allowed values are: None and Gzip. + // +kubebuilder:validation:Optional + CompressionType *string `json:"compressionType" tf:"compression_type,omitempty"` + + // The type of model data to deploy. Allowed values are: S3Object and S3Prefix. + // +kubebuilder:validation:Optional + S3DataType *string `json:"s3DataType" tf:"s3_data_type,omitempty"` + + // The S3 path of model data to deploy. + // +kubebuilder:validation:Optional + S3URI *string `json:"s3Uri" tf:"s3_uri,omitempty"` +} + +type ModelInitParameters struct { + + // Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below. + Container []ContainerInitParameters `json:"container,omitempty" tf:"container,omitempty"` + + // Isolates the model container. No inbound or outbound network calls can be made to or from the model container. + EnableNetworkIsolation *bool `json:"enableNetworkIsolation,omitempty" tf:"enable_network_isolation,omitempty"` + + // A role that SageMaker can assume to access model artifacts and docker images for deployment. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + ExecutionRoleArn *string `json:"executionRoleArn,omitempty" tf:"execution_role_arn,omitempty"` + + // Reference to a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnRef *v1.Reference `json:"executionRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnSelector *v1.Selector `json:"executionRoleArnSelector,omitempty" tf:"-"` + + // Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config. + InferenceExecutionConfig *InferenceExecutionConfigInitParameters `json:"inferenceExecutionConfig,omitempty" tf:"inference_execution_config,omitempty"` + + // The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below. + PrimaryContainer *PrimaryContainerInitParameters `json:"primaryContainer,omitempty" tf:"primary_container,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform. + VPCConfig *VPCConfigInitParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type ModelObservation struct { + + // The Amazon Resource Name (ARN) assigned by AWS to this model. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below. + Container []ContainerObservation `json:"container,omitempty" tf:"container,omitempty"` + + // Isolates the model container. No inbound or outbound network calls can be made to or from the model container. + EnableNetworkIsolation *bool `json:"enableNetworkIsolation,omitempty" tf:"enable_network_isolation,omitempty"` + + // A role that SageMaker can assume to access model artifacts and docker images for deployment. + ExecutionRoleArn *string `json:"executionRoleArn,omitempty" tf:"execution_role_arn,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config. + InferenceExecutionConfig *InferenceExecutionConfigObservation `json:"inferenceExecutionConfig,omitempty" tf:"inference_execution_config,omitempty"` + + // The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below. + PrimaryContainer *PrimaryContainerObservation `json:"primaryContainer,omitempty" tf:"primary_container,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform. + VPCConfig *VPCConfigObservation `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type ModelParameters struct { + + // Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below. + // +kubebuilder:validation:Optional + Container []ContainerParameters `json:"container,omitempty" tf:"container,omitempty"` + + // Isolates the model container. No inbound or outbound network calls can be made to or from the model container. + // +kubebuilder:validation:Optional + EnableNetworkIsolation *bool `json:"enableNetworkIsolation,omitempty" tf:"enable_network_isolation,omitempty"` + + // A role that SageMaker can assume to access model artifacts and docker images for deployment. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + ExecutionRoleArn *string `json:"executionRoleArn,omitempty" tf:"execution_role_arn,omitempty"` + + // Reference to a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnRef *v1.Reference `json:"executionRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnSelector *v1.Selector `json:"executionRoleArnSelector,omitempty" tf:"-"` + + // Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config. + // +kubebuilder:validation:Optional + InferenceExecutionConfig *InferenceExecutionConfigParameters `json:"inferenceExecutionConfig,omitempty" tf:"inference_execution_config,omitempty"` + + // The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below. + // +kubebuilder:validation:Optional + PrimaryContainer *PrimaryContainerParameters `json:"primaryContainer,omitempty" tf:"primary_container,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform. + // +kubebuilder:validation:Optional + VPCConfig *VPCConfigParameters `json:"vpcConfig,omitempty" tf:"vpc_config,omitempty"` +} + +type PrimaryContainerImageConfigInitParameters struct { + + // Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc. + RepositoryAccessMode *string `json:"repositoryAccessMode,omitempty" tf:"repository_access_mode,omitempty"` + + // Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config. + RepositoryAuthConfig *ImageConfigRepositoryAuthConfigInitParameters `json:"repositoryAuthConfig,omitempty" tf:"repository_auth_config,omitempty"` +} + +type PrimaryContainerImageConfigObservation struct { + + // Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc. + RepositoryAccessMode *string `json:"repositoryAccessMode,omitempty" tf:"repository_access_mode,omitempty"` + + // Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config. + RepositoryAuthConfig *ImageConfigRepositoryAuthConfigObservation `json:"repositoryAuthConfig,omitempty" tf:"repository_auth_config,omitempty"` +} + +type PrimaryContainerImageConfigParameters struct { + + // Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc. + // +kubebuilder:validation:Optional + RepositoryAccessMode *string `json:"repositoryAccessMode" tf:"repository_access_mode,omitempty"` + + // Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config. + // +kubebuilder:validation:Optional + RepositoryAuthConfig *ImageConfigRepositoryAuthConfigParameters `json:"repositoryAuthConfig,omitempty" tf:"repository_auth_config,omitempty"` +} + +type PrimaryContainerInitParameters struct { + + // The DNS host name for the container. + ContainerHostname *string `json:"containerHostname,omitempty" tf:"container_hostname,omitempty"` + + // Environment variables for the Docker container. + // A list of key value pairs. + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The registry path where the inference code image is stored in Amazon ECR. + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config. + ImageConfig *PrimaryContainerImageConfigInitParameters `json:"imageConfig,omitempty" tf:"image_config,omitempty"` + + // The container hosts value SingleModel/MultiModel. The default value is SingleModel. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide. + ModelDataSource *PrimaryContainerModelDataSourceInitParameters `json:"modelDataSource,omitempty" tf:"model_data_source,omitempty"` + + // The URL for the S3 location where model artifacts are stored. + ModelDataURL *string `json:"modelDataUrl,omitempty" tf:"model_data_url,omitempty"` + + // The Amazon Resource Name (ARN) of the model package to use to create the model. + ModelPackageName *string `json:"modelPackageName,omitempty" tf:"model_package_name,omitempty"` +} + +type PrimaryContainerModelDataSourceInitParameters struct { + + // The S3 location of model data to deploy. + S3DataSource []ModelDataSourceS3DataSourceInitParameters `json:"s3DataSource,omitempty" tf:"s3_data_source,omitempty"` +} + +type PrimaryContainerModelDataSourceObservation struct { + + // The S3 location of model data to deploy. + S3DataSource []ModelDataSourceS3DataSourceObservation `json:"s3DataSource,omitempty" tf:"s3_data_source,omitempty"` +} + +type PrimaryContainerModelDataSourceParameters struct { + + // The S3 location of model data to deploy. + // +kubebuilder:validation:Optional + S3DataSource []ModelDataSourceS3DataSourceParameters `json:"s3DataSource" tf:"s3_data_source,omitempty"` +} + +type PrimaryContainerObservation struct { + + // The DNS host name for the container. + ContainerHostname *string `json:"containerHostname,omitempty" tf:"container_hostname,omitempty"` + + // Environment variables for the Docker container. + // A list of key value pairs. + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The registry path where the inference code image is stored in Amazon ECR. + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config. + ImageConfig *PrimaryContainerImageConfigObservation `json:"imageConfig,omitempty" tf:"image_config,omitempty"` + + // The container hosts value SingleModel/MultiModel. The default value is SingleModel. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide. + ModelDataSource *PrimaryContainerModelDataSourceObservation `json:"modelDataSource,omitempty" tf:"model_data_source,omitempty"` + + // The URL for the S3 location where model artifacts are stored. + ModelDataURL *string `json:"modelDataUrl,omitempty" tf:"model_data_url,omitempty"` + + // The Amazon Resource Name (ARN) of the model package to use to create the model. + ModelPackageName *string `json:"modelPackageName,omitempty" tf:"model_package_name,omitempty"` +} + +type PrimaryContainerParameters struct { + + // The DNS host name for the container. + // +kubebuilder:validation:Optional + ContainerHostname *string `json:"containerHostname,omitempty" tf:"container_hostname,omitempty"` + + // Environment variables for the Docker container. + // A list of key value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The registry path where the inference code image is stored in Amazon ECR. + // +kubebuilder:validation:Optional + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config. + // +kubebuilder:validation:Optional + ImageConfig *PrimaryContainerImageConfigParameters `json:"imageConfig,omitempty" tf:"image_config,omitempty"` + + // The container hosts value SingleModel/MultiModel. The default value is SingleModel. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide. + // +kubebuilder:validation:Optional + ModelDataSource *PrimaryContainerModelDataSourceParameters `json:"modelDataSource,omitempty" tf:"model_data_source,omitempty"` + + // The URL for the S3 location where model artifacts are stored. + // +kubebuilder:validation:Optional + ModelDataURL *string `json:"modelDataUrl,omitempty" tf:"model_data_url,omitempty"` + + // The Amazon Resource Name (ARN) of the model package to use to create the model. + // +kubebuilder:validation:Optional + ModelPackageName *string `json:"modelPackageName,omitempty" tf:"model_package_name,omitempty"` +} + +type RepositoryAuthConfigInitParameters struct { + + // The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide. + RepositoryCredentialsProviderArn *string `json:"repositoryCredentialsProviderArn,omitempty" tf:"repository_credentials_provider_arn,omitempty"` +} + +type RepositoryAuthConfigObservation struct { + + // The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide. + RepositoryCredentialsProviderArn *string `json:"repositoryCredentialsProviderArn,omitempty" tf:"repository_credentials_provider_arn,omitempty"` +} + +type RepositoryAuthConfigParameters struct { + + // The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide. + // +kubebuilder:validation:Optional + RepositoryCredentialsProviderArn *string `json:"repositoryCredentialsProviderArn" tf:"repository_credentials_provider_arn,omitempty"` +} + +type S3DataSourceInitParameters struct { + + // How the model data is prepared. Allowed values are: None and Gzip. + CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` + + // The type of model data to deploy. Allowed values are: S3Object and S3Prefix. + S3DataType *string `json:"s3DataType,omitempty" tf:"s3_data_type,omitempty"` + + // The S3 path of model data to deploy. + S3URI *string `json:"s3Uri,omitempty" tf:"s3_uri,omitempty"` +} + +type S3DataSourceObservation struct { + + // How the model data is prepared. Allowed values are: None and Gzip. + CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"` + + // The type of model data to deploy. Allowed values are: S3Object and S3Prefix. + S3DataType *string `json:"s3DataType,omitempty" tf:"s3_data_type,omitempty"` + + // The S3 path of model data to deploy. + S3URI *string `json:"s3Uri,omitempty" tf:"s3_uri,omitempty"` +} + +type S3DataSourceParameters struct { + + // How the model data is prepared. Allowed values are: None and Gzip. + // +kubebuilder:validation:Optional + CompressionType *string `json:"compressionType" tf:"compression_type,omitempty"` + + // The type of model data to deploy. Allowed values are: S3Object and S3Prefix. + // +kubebuilder:validation:Optional + S3DataType *string `json:"s3DataType" tf:"s3_data_type,omitempty"` + + // The S3 path of model data to deploy. + // +kubebuilder:validation:Optional + S3URI *string `json:"s3Uri" tf:"s3_uri,omitempty"` +} + +type VPCConfigInitParameters struct { + + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` +} + +type VPCConfigObservation struct { + + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` +} + +type VPCConfigParameters struct { + + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds" tf:"security_group_ids,omitempty"` + + // +kubebuilder:validation:Optional + // +listType=set + Subnets []*string `json:"subnets" tf:"subnets,omitempty"` +} + +// ModelSpec defines the desired state of Model +type ModelSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ModelParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ModelInitParameters `json:"initProvider,omitempty"` +} + +// ModelStatus defines the observed state of Model. +type ModelStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ModelObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Model is the Schema for the Models API. Provides a SageMaker model resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Model struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ModelSpec `json:"spec"` + Status ModelStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ModelList contains a list of Models +type ModelList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Model `json:"items"` +} + +// Repository type metadata. +var ( + Model_Kind = "Model" + Model_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Model_Kind}.String() + Model_KindAPIVersion = Model_Kind + "." + CRDGroupVersion.String() + Model_GroupVersionKind = CRDGroupVersion.WithKind(Model_Kind) +) + +func init() { + SchemeBuilder.Register(&Model{}, &ModelList{}) +} diff --git a/apis/sagemaker/v1beta2/zz_notebookinstance_terraformed.go b/apis/sagemaker/v1beta2/zz_notebookinstance_terraformed.go new file mode 100755 index 0000000000..a7231e417b --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_notebookinstance_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this NotebookInstance +func (mg *NotebookInstance) GetTerraformResourceType() string { + return "aws_sagemaker_notebook_instance" +} + +// GetConnectionDetailsMapping for this NotebookInstance +func (tr *NotebookInstance) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this NotebookInstance +func (tr *NotebookInstance) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this NotebookInstance +func (tr *NotebookInstance) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this NotebookInstance +func (tr *NotebookInstance) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this NotebookInstance +func (tr *NotebookInstance) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this NotebookInstance +func (tr *NotebookInstance) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this NotebookInstance +func (tr *NotebookInstance) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this NotebookInstance +func (tr *NotebookInstance) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this NotebookInstance using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *NotebookInstance) LateInitialize(attrs []byte) (bool, error) { + params := &NotebookInstanceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *NotebookInstance) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sagemaker/v1beta2/zz_notebookinstance_types.go b/apis/sagemaker/v1beta2/zz_notebookinstance_types.go new file mode 100755 index 0000000000..8415def134 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_notebookinstance_types.go @@ -0,0 +1,363 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type InstanceMetadataServiceConfigurationInitParameters struct { + + // Indicates the minimum IMDS version that the notebook instance supports. When passed "1" is passed. This means that both IMDSv1 and IMDSv2 are supported. Valid values are 1 and 2. + MinimumInstanceMetadataServiceVersion *string `json:"minimumInstanceMetadataServiceVersion,omitempty" tf:"minimum_instance_metadata_service_version,omitempty"` +} + +type InstanceMetadataServiceConfigurationObservation struct { + + // Indicates the minimum IMDS version that the notebook instance supports. When passed "1" is passed. This means that both IMDSv1 and IMDSv2 are supported. Valid values are 1 and 2. + MinimumInstanceMetadataServiceVersion *string `json:"minimumInstanceMetadataServiceVersion,omitempty" tf:"minimum_instance_metadata_service_version,omitempty"` +} + +type InstanceMetadataServiceConfigurationParameters struct { + + // Indicates the minimum IMDS version that the notebook instance supports. When passed "1" is passed. This means that both IMDSv1 and IMDSv2 are supported. Valid values are 1 and 2. + // +kubebuilder:validation:Optional + MinimumInstanceMetadataServiceVersion *string `json:"minimumInstanceMetadataServiceVersion,omitempty" tf:"minimum_instance_metadata_service_version,omitempty"` +} + +type NotebookInstanceInitParameters struct { + + // A list of Elastic Inference (EI) instance types to associate with this notebook instance. See Elastic Inference Accelerator for more details. Valid values: ml.eia1.medium, ml.eia1.large, ml.eia1.xlarge, ml.eia2.medium, ml.eia2.large, ml.eia2.xlarge. + // +listType=set + AcceleratorTypes []*string `json:"acceleratorTypes,omitempty" tf:"accelerator_types,omitempty"` + + // An array of up to three Git repositories to associate with the notebook instance. + // These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. + // +listType=set + AdditionalCodeRepositories []*string `json:"additionalCodeRepositories,omitempty" tf:"additional_code_repositories,omitempty"` + + // The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.CodeRepository + DefaultCodeRepository *string `json:"defaultCodeRepository,omitempty" tf:"default_code_repository,omitempty"` + + // Reference to a CodeRepository in sagemaker to populate defaultCodeRepository. + // +kubebuilder:validation:Optional + DefaultCodeRepositoryRef *v1.Reference `json:"defaultCodeRepositoryRef,omitempty" tf:"-"` + + // Selector for a CodeRepository in sagemaker to populate defaultCodeRepository. + // +kubebuilder:validation:Optional + DefaultCodeRepositorySelector *v1.Selector `json:"defaultCodeRepositorySelector,omitempty" tf:"-"` + + // Set to Disabled to disable internet access to notebook. Requires security_groups and subnet_id to be set. Supported values: Enabled (Default) or Disabled. If set to Disabled, the notebook instance will be able to access resources only in your VPC, and will not be able to connect to Amazon SageMaker training and endpoint services unless your configure a NAT Gateway in your VPC. + DirectInternetAccess *string `json:"directInternetAccess,omitempty" tf:"direct_internet_access,omitempty"` + + // Information on the IMDS configuration of the notebook instance. Conflicts with instance_metadata_service_configuration. see details below. + InstanceMetadataServiceConfiguration *InstanceMetadataServiceConfigurationInitParameters `json:"instanceMetadataServiceConfiguration,omitempty" tf:"instance_metadata_service_configuration,omitempty"` + + // The name of ML compute instance type. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // The name of a lifecycle configuration to associate with the notebook instance. + LifecycleConfigName *string `json:"lifecycleConfigName,omitempty" tf:"lifecycle_config_name,omitempty"` + + // The platform identifier of the notebook instance runtime environment. This value can be either notebook-al1-v1, notebook-al2-v1, or notebook-al2-v2, depending on which version of Amazon Linux you require. + PlatformIdentifier *string `json:"platformIdentifier,omitempty" tf:"platform_identifier,omitempty"` + + // The ARN of the IAM role to be used by the notebook instance which allows SageMaker to call other services on your behalf. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Whether root access is Enabled or Disabled for users of the notebook instance. The default value is Enabled. + RootAccess *string `json:"rootAccess,omitempty" tf:"root_access,omitempty"` + + // The associated security groups. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The VPC subnet ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB. + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` +} + +type NotebookInstanceObservation struct { + + // A list of Elastic Inference (EI) instance types to associate with this notebook instance. See Elastic Inference Accelerator for more details. Valid values: ml.eia1.medium, ml.eia1.large, ml.eia1.xlarge, ml.eia2.medium, ml.eia2.large, ml.eia2.xlarge. + // +listType=set + AcceleratorTypes []*string `json:"acceleratorTypes,omitempty" tf:"accelerator_types,omitempty"` + + // An array of up to three Git repositories to associate with the notebook instance. + // These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. + // +listType=set + AdditionalCodeRepositories []*string `json:"additionalCodeRepositories,omitempty" tf:"additional_code_repositories,omitempty"` + + // The Amazon Resource Name (ARN) assigned by AWS to this notebook instance. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. + DefaultCodeRepository *string `json:"defaultCodeRepository,omitempty" tf:"default_code_repository,omitempty"` + + // Set to Disabled to disable internet access to notebook. Requires security_groups and subnet_id to be set. Supported values: Enabled (Default) or Disabled. If set to Disabled, the notebook instance will be able to access resources only in your VPC, and will not be able to connect to Amazon SageMaker training and endpoint services unless your configure a NAT Gateway in your VPC. + DirectInternetAccess *string `json:"directInternetAccess,omitempty" tf:"direct_internet_access,omitempty"` + + // The name of the notebook instance. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Information on the IMDS configuration of the notebook instance. Conflicts with instance_metadata_service_configuration. see details below. + InstanceMetadataServiceConfiguration *InstanceMetadataServiceConfigurationObservation `json:"instanceMetadataServiceConfiguration,omitempty" tf:"instance_metadata_service_configuration,omitempty"` + + // The name of ML compute instance type. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // The name of a lifecycle configuration to associate with the notebook instance. + LifecycleConfigName *string `json:"lifecycleConfigName,omitempty" tf:"lifecycle_config_name,omitempty"` + + // The network interface ID that Amazon SageMaker created at the time of creating the instance. Only available when setting subnet_id. + NetworkInterfaceID *string `json:"networkInterfaceId,omitempty" tf:"network_interface_id,omitempty"` + + // The platform identifier of the notebook instance runtime environment. This value can be either notebook-al1-v1, notebook-al2-v1, or notebook-al2-v2, depending on which version of Amazon Linux you require. + PlatformIdentifier *string `json:"platformIdentifier,omitempty" tf:"platform_identifier,omitempty"` + + // The ARN of the IAM role to be used by the notebook instance which allows SageMaker to call other services on your behalf. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Whether root access is Enabled or Disabled for users of the notebook instance. The default value is Enabled. + RootAccess *string `json:"rootAccess,omitempty" tf:"root_access,omitempty"` + + // The associated security groups. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The VPC subnet ID. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The URL that you use to connect to the Jupyter notebook that is running in your notebook instance. + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB. + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` +} + +type NotebookInstanceParameters struct { + + // A list of Elastic Inference (EI) instance types to associate with this notebook instance. See Elastic Inference Accelerator for more details. Valid values: ml.eia1.medium, ml.eia1.large, ml.eia1.xlarge, ml.eia2.medium, ml.eia2.large, ml.eia2.xlarge. + // +kubebuilder:validation:Optional + // +listType=set + AcceleratorTypes []*string `json:"acceleratorTypes,omitempty" tf:"accelerator_types,omitempty"` + + // An array of up to three Git repositories to associate with the notebook instance. + // These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. + // +kubebuilder:validation:Optional + // +listType=set + AdditionalCodeRepositories []*string `json:"additionalCodeRepositories,omitempty" tf:"additional_code_repositories,omitempty"` + + // The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.CodeRepository + // +kubebuilder:validation:Optional + DefaultCodeRepository *string `json:"defaultCodeRepository,omitempty" tf:"default_code_repository,omitempty"` + + // Reference to a CodeRepository in sagemaker to populate defaultCodeRepository. + // +kubebuilder:validation:Optional + DefaultCodeRepositoryRef *v1.Reference `json:"defaultCodeRepositoryRef,omitempty" tf:"-"` + + // Selector for a CodeRepository in sagemaker to populate defaultCodeRepository. + // +kubebuilder:validation:Optional + DefaultCodeRepositorySelector *v1.Selector `json:"defaultCodeRepositorySelector,omitempty" tf:"-"` + + // Set to Disabled to disable internet access to notebook. Requires security_groups and subnet_id to be set. Supported values: Enabled (Default) or Disabled. If set to Disabled, the notebook instance will be able to access resources only in your VPC, and will not be able to connect to Amazon SageMaker training and endpoint services unless your configure a NAT Gateway in your VPC. + // +kubebuilder:validation:Optional + DirectInternetAccess *string `json:"directInternetAccess,omitempty" tf:"direct_internet_access,omitempty"` + + // Information on the IMDS configuration of the notebook instance. Conflicts with instance_metadata_service_configuration. see details below. + // +kubebuilder:validation:Optional + InstanceMetadataServiceConfiguration *InstanceMetadataServiceConfigurationParameters `json:"instanceMetadataServiceConfiguration,omitempty" tf:"instance_metadata_service_configuration,omitempty"` + + // The name of ML compute instance type. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` + + // The name of a lifecycle configuration to associate with the notebook instance. + // +kubebuilder:validation:Optional + LifecycleConfigName *string `json:"lifecycleConfigName,omitempty" tf:"lifecycle_config_name,omitempty"` + + // The platform identifier of the notebook instance runtime environment. This value can be either notebook-al1-v1, notebook-al2-v1, or notebook-al2-v2, depending on which version of Amazon Linux you require. + // +kubebuilder:validation:Optional + PlatformIdentifier *string `json:"platformIdentifier,omitempty" tf:"platform_identifier,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The ARN of the IAM role to be used by the notebook instance which allows SageMaker to call other services on your behalf. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Whether root access is Enabled or Disabled for users of the notebook instance. The default value is Enabled. + // +kubebuilder:validation:Optional + RootAccess *string `json:"rootAccess,omitempty" tf:"root_access,omitempty"` + + // The associated security groups. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The VPC subnet ID. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in ec2 to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB. + // +kubebuilder:validation:Optional + VolumeSize *float64 `json:"volumeSize,omitempty" tf:"volume_size,omitempty"` +} + +// NotebookInstanceSpec defines the desired state of NotebookInstance +type NotebookInstanceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider NotebookInstanceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider NotebookInstanceInitParameters `json:"initProvider,omitempty"` +} + +// NotebookInstanceStatus defines the observed state of NotebookInstance. +type NotebookInstanceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider NotebookInstanceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NotebookInstance is the Schema for the NotebookInstances API. Provides a SageMaker Notebook Instance resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type NotebookInstance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.instanceType) || (has(self.initProvider) && has(self.initProvider.instanceType))",message="spec.forProvider.instanceType is a required parameter" + Spec NotebookInstanceSpec `json:"spec"` + Status NotebookInstanceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NotebookInstanceList contains a list of NotebookInstances +type NotebookInstanceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NotebookInstance `json:"items"` +} + +// Repository type metadata. +var ( + NotebookInstance_Kind = "NotebookInstance" + NotebookInstance_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: NotebookInstance_Kind}.String() + NotebookInstance_KindAPIVersion = NotebookInstance_Kind + "." + CRDGroupVersion.String() + NotebookInstance_GroupVersionKind = CRDGroupVersion.WithKind(NotebookInstance_Kind) +) + +func init() { + SchemeBuilder.Register(&NotebookInstance{}, &NotebookInstanceList{}) +} diff --git a/apis/sagemaker/v1beta2/zz_space_terraformed.go b/apis/sagemaker/v1beta2/zz_space_terraformed.go new file mode 100755 index 0000000000..87a97d4a03 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_space_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Space +func (mg *Space) GetTerraformResourceType() string { + return "aws_sagemaker_space" +} + +// GetConnectionDetailsMapping for this Space +func (tr *Space) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Space +func (tr *Space) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Space +func (tr *Space) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Space +func (tr *Space) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Space +func (tr *Space) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Space +func (tr *Space) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Space +func (tr *Space) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Space +func (tr *Space) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Space using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Space) LateInitialize(attrs []byte) (bool, error) { + params := &SpaceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Space) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sagemaker/v1beta2/zz_space_types.go b/apis/sagemaker/v1beta2/zz_space_types.go new file mode 100755 index 0000000000..5095ad3757 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_space_types.go @@ -0,0 +1,823 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomFileSystemInitParameters struct { + + // A custom file system in Amazon EFS. see EFS File System below. + EFSFileSystem *EFSFileSystemInitParameters `json:"efsFileSystem,omitempty" tf:"efs_file_system,omitempty"` +} + +type CustomFileSystemObservation struct { + + // A custom file system in Amazon EFS. see EFS File System below. + EFSFileSystem *EFSFileSystemObservation `json:"efsFileSystem,omitempty" tf:"efs_file_system,omitempty"` +} + +type CustomFileSystemParameters struct { + + // A custom file system in Amazon EFS. see EFS File System below. + // +kubebuilder:validation:Optional + EFSFileSystem *EFSFileSystemParameters `json:"efsFileSystem" tf:"efs_file_system,omitempty"` +} + +type EBSStorageSettingsInitParameters struct { + EBSVolumeSizeInGb *float64 `json:"ebsVolumeSizeInGb,omitempty" tf:"ebs_volume_size_in_gb,omitempty"` +} + +type EBSStorageSettingsObservation struct { + EBSVolumeSizeInGb *float64 `json:"ebsVolumeSizeInGb,omitempty" tf:"ebs_volume_size_in_gb,omitempty"` +} + +type EBSStorageSettingsParameters struct { + + // +kubebuilder:validation:Optional + EBSVolumeSizeInGb *float64 `json:"ebsVolumeSizeInGb" tf:"ebs_volume_size_in_gb,omitempty"` +} + +type EFSFileSystemInitParameters struct { + + // The ID of your Amazon EFS file system. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` +} + +type EFSFileSystemObservation struct { + + // The ID of your Amazon EFS file system. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` +} + +type EFSFileSystemParameters struct { + + // The ID of your Amazon EFS file system. + // +kubebuilder:validation:Optional + FileSystemID *string `json:"fileSystemId" tf:"file_system_id,omitempty"` +} + +type OwnershipSettingsInitParameters struct { + + // The user profile who is the owner of the private space. + OwnerUserProfileName *string `json:"ownerUserProfileName,omitempty" tf:"owner_user_profile_name,omitempty"` +} + +type OwnershipSettingsObservation struct { + + // The user profile who is the owner of the private space. + OwnerUserProfileName *string `json:"ownerUserProfileName,omitempty" tf:"owner_user_profile_name,omitempty"` +} + +type OwnershipSettingsParameters struct { + + // The user profile who is the owner of the private space. + // +kubebuilder:validation:Optional + OwnerUserProfileName *string `json:"ownerUserProfileName" tf:"owner_user_profile_name,omitempty"` +} + +type SpaceInitParameters struct { + + // The ID of the associated Domain. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.Domain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` + + // Reference to a Domain in sagemaker to populate domainId. + // +kubebuilder:validation:Optional + DomainIDRef *v1.Reference `json:"domainIdRef,omitempty" tf:"-"` + + // Selector for a Domain in sagemaker to populate domainId. + // +kubebuilder:validation:Optional + DomainIDSelector *v1.Selector `json:"domainIdSelector,omitempty" tf:"-"` + + // A collection of ownership settings. See Ownership Settings below. + OwnershipSettings *OwnershipSettingsInitParameters `json:"ownershipSettings,omitempty" tf:"ownership_settings,omitempty"` + + // The name of the space that appears in the SageMaker Studio UI. + SpaceDisplayName *string `json:"spaceDisplayName,omitempty" tf:"space_display_name,omitempty"` + + // The name of the space. + SpaceName *string `json:"spaceName,omitempty" tf:"space_name,omitempty"` + + // A collection of space settings. See Space Settings below. + SpaceSettings *SpaceSettingsInitParameters `json:"spaceSettings,omitempty" tf:"space_settings,omitempty"` + + // A collection of space sharing settings. See Space Sharing Settings below. + SpaceSharingSettings *SpaceSharingSettingsInitParameters `json:"spaceSharingSettings,omitempty" tf:"space_sharing_settings,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SpaceObservation struct { + + // The space's Amazon Resource Name (ARN). + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The ID of the associated Domain. + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` + + // The ID of the space's profile in the Amazon Elastic File System volume. + HomeEFSFileSystemUID *string `json:"homeEfsFileSystemUid,omitempty" tf:"home_efs_file_system_uid,omitempty"` + + // The space's Amazon Resource Name (ARN). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A collection of ownership settings. See Ownership Settings below. + OwnershipSettings *OwnershipSettingsObservation `json:"ownershipSettings,omitempty" tf:"ownership_settings,omitempty"` + + // The name of the space that appears in the SageMaker Studio UI. + SpaceDisplayName *string `json:"spaceDisplayName,omitempty" tf:"space_display_name,omitempty"` + + // The name of the space. + SpaceName *string `json:"spaceName,omitempty" tf:"space_name,omitempty"` + + // A collection of space settings. See Space Settings below. + SpaceSettings *SpaceSettingsObservation `json:"spaceSettings,omitempty" tf:"space_settings,omitempty"` + + // A collection of space sharing settings. See Space Sharing Settings below. + SpaceSharingSettings *SpaceSharingSettingsObservation `json:"spaceSharingSettings,omitempty" tf:"space_sharing_settings,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Returns the URL of the space. If the space is created with Amazon Web Services IAM Identity Center (Successor to Amazon Web Services Single Sign-On) authentication, users can navigate to the URL after appending the respective redirect parameter for the application type to be federated through Amazon Web Services IAM Identity Center. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type SpaceParameters struct { + + // The ID of the associated Domain. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.Domain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` + + // Reference to a Domain in sagemaker to populate domainId. + // +kubebuilder:validation:Optional + DomainIDRef *v1.Reference `json:"domainIdRef,omitempty" tf:"-"` + + // Selector for a Domain in sagemaker to populate domainId. + // +kubebuilder:validation:Optional + DomainIDSelector *v1.Selector `json:"domainIdSelector,omitempty" tf:"-"` + + // A collection of ownership settings. See Ownership Settings below. + // +kubebuilder:validation:Optional + OwnershipSettings *OwnershipSettingsParameters `json:"ownershipSettings,omitempty" tf:"ownership_settings,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The name of the space that appears in the SageMaker Studio UI. + // +kubebuilder:validation:Optional + SpaceDisplayName *string `json:"spaceDisplayName,omitempty" tf:"space_display_name,omitempty"` + + // The name of the space. + // +kubebuilder:validation:Optional + SpaceName *string `json:"spaceName,omitempty" tf:"space_name,omitempty"` + + // A collection of space settings. See Space Settings below. + // +kubebuilder:validation:Optional + SpaceSettings *SpaceSettingsParameters `json:"spaceSettings,omitempty" tf:"space_settings,omitempty"` + + // A collection of space sharing settings. See Space Sharing Settings below. + // +kubebuilder:validation:Optional + SpaceSharingSettings *SpaceSharingSettingsParameters `json:"spaceSharingSettings,omitempty" tf:"space_sharing_settings,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters struct { + + // The instance type. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the SageMaker image created on the instance. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecObservation struct { + + // The instance type. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the SageMaker image created on the instance. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecParameters struct { + + // The instance type. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the SageMaker image created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type SpaceSettingsCodeEditorAppSettingsInitParameters struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` +} + +type SpaceSettingsCodeEditorAppSettingsObservation struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` +} + +type SpaceSettingsCodeEditorAppSettingsParameters struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *SpaceSettingsCodeEditorAppSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec" tf:"default_resource_spec,omitempty"` +} + +type SpaceSettingsInitParameters struct { + + // The type of app created within the space. + AppType *string `json:"appType,omitempty" tf:"app_type,omitempty"` + + // The Code Editor application settings. See Code Editor App Settings below. + CodeEditorAppSettings *SpaceSettingsCodeEditorAppSettingsInitParameters `json:"codeEditorAppSettings,omitempty" tf:"code_editor_app_settings,omitempty"` + + // A file system, created by you, that you assign to a space for an Amazon SageMaker Domain. See Custom File System below. + CustomFileSystem []CustomFileSystemInitParameters `json:"customFileSystem,omitempty" tf:"custom_file_system,omitempty"` + + // The settings for the JupyterLab application. See Jupyter Lab App Settings below. + JupyterLabAppSettings *SpaceSettingsJupyterLabAppSettingsInitParameters `json:"jupyterLabAppSettings,omitempty" tf:"jupyter_lab_app_settings,omitempty"` + + // The Jupyter server's app settings. See Jupyter Server App Settings below. + JupyterServerAppSettings *SpaceSettingsJupyterServerAppSettingsInitParameters `json:"jupyterServerAppSettings,omitempty" tf:"jupyter_server_app_settings,omitempty"` + + // The kernel gateway app settings. See Kernel Gateway App Settings below. + KernelGatewayAppSettings *SpaceSettingsKernelGatewayAppSettingsInitParameters `json:"kernelGatewayAppSettings,omitempty" tf:"kernel_gateway_app_settings,omitempty"` + + SpaceStorageSettings *SpaceSettingsSpaceStorageSettingsInitParameters `json:"spaceStorageSettings,omitempty" tf:"space_storage_settings,omitempty"` +} + +type SpaceSettingsJupyterLabAppSettingsCodeRepositoryInitParameters struct { + + // The URL of the Git repository. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` +} + +type SpaceSettingsJupyterLabAppSettingsCodeRepositoryObservation struct { + + // The URL of the Git repository. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` +} + +type SpaceSettingsJupyterLabAppSettingsCodeRepositoryParameters struct { + + // The URL of the Git repository. + // +kubebuilder:validation:Optional + RepositoryURL *string `json:"repositoryUrl" tf:"repository_url,omitempty"` +} + +type SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters struct { + + // The instance type. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the SageMaker image created on the instance. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecObservation struct { + + // The instance type. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the SageMaker image created on the instance. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecParameters struct { + + // The instance type. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the SageMaker image created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type SpaceSettingsJupyterLabAppSettingsInitParameters struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see Code Repository below. + CodeRepository []SpaceSettingsJupyterLabAppSettingsCodeRepositoryInitParameters `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` +} + +type SpaceSettingsJupyterLabAppSettingsObservation struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see Code Repository below. + CodeRepository []SpaceSettingsJupyterLabAppSettingsCodeRepositoryObservation `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` +} + +type SpaceSettingsJupyterLabAppSettingsParameters struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see Code Repository below. + // +kubebuilder:validation:Optional + CodeRepository []SpaceSettingsJupyterLabAppSettingsCodeRepositoryParameters `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *SpaceSettingsJupyterLabAppSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec" tf:"default_resource_spec,omitempty"` +} + +type SpaceSettingsJupyterServerAppSettingsCodeRepositoryInitParameters struct { + + // The URL of the Git repository. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` +} + +type SpaceSettingsJupyterServerAppSettingsCodeRepositoryObservation struct { + + // The URL of the Git repository. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` +} + +type SpaceSettingsJupyterServerAppSettingsCodeRepositoryParameters struct { + + // The URL of the Git repository. + // +kubebuilder:validation:Optional + RepositoryURL *string `json:"repositoryUrl" tf:"repository_url,omitempty"` +} + +type SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters struct { + + // The instance type. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the SageMaker image created on the instance. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecObservation struct { + + // The instance type. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the SageMaker image created on the instance. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecParameters struct { + + // The instance type. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the SageMaker image created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type SpaceSettingsJupyterServerAppSettingsInitParameters struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see Code Repository below. + CodeRepository []SpaceSettingsJupyterServerAppSettingsCodeRepositoryInitParameters `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type SpaceSettingsJupyterServerAppSettingsObservation struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see Code Repository below. + CodeRepository []SpaceSettingsJupyterServerAppSettingsCodeRepositoryObservation `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type SpaceSettingsJupyterServerAppSettingsParameters struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see Code Repository below. + // +kubebuilder:validation:Optional + CodeRepository []SpaceSettingsJupyterServerAppSettingsCodeRepositoryParameters `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *SpaceSettingsJupyterServerAppSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +kubebuilder:validation:Optional + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type SpaceSettingsKernelGatewayAppSettingsCustomImageInitParameters struct { + + // The name of the App Image Config. + AppImageConfigName *string `json:"appImageConfigName,omitempty" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type SpaceSettingsKernelGatewayAppSettingsCustomImageObservation struct { + + // The name of the App Image Config. + AppImageConfigName *string `json:"appImageConfigName,omitempty" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type SpaceSettingsKernelGatewayAppSettingsCustomImageParameters struct { + + // The name of the App Image Config. + // +kubebuilder:validation:Optional + AppImageConfigName *string `json:"appImageConfigName" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + // +kubebuilder:validation:Optional + ImageName *string `json:"imageName" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + // +kubebuilder:validation:Optional + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters struct { + + // The instance type. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the SageMaker image created on the instance. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation struct { + + // The instance type. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the SageMaker image created on the instance. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters struct { + + // The instance type. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the SageMaker image created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type SpaceSettingsKernelGatewayAppSettingsInitParameters struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see Custom Image below. + CustomImage []SpaceSettingsKernelGatewayAppSettingsCustomImageInitParameters `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type SpaceSettingsKernelGatewayAppSettingsObservation struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see Custom Image below. + CustomImage []SpaceSettingsKernelGatewayAppSettingsCustomImageObservation `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type SpaceSettingsKernelGatewayAppSettingsParameters struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see Custom Image below. + // +kubebuilder:validation:Optional + CustomImage []SpaceSettingsKernelGatewayAppSettingsCustomImageParameters `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *SpaceSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +kubebuilder:validation:Optional + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type SpaceSettingsObservation struct { + + // The type of app created within the space. + AppType *string `json:"appType,omitempty" tf:"app_type,omitempty"` + + // The Code Editor application settings. See Code Editor App Settings below. + CodeEditorAppSettings *SpaceSettingsCodeEditorAppSettingsObservation `json:"codeEditorAppSettings,omitempty" tf:"code_editor_app_settings,omitempty"` + + // A file system, created by you, that you assign to a space for an Amazon SageMaker Domain. See Custom File System below. + CustomFileSystem []CustomFileSystemObservation `json:"customFileSystem,omitempty" tf:"custom_file_system,omitempty"` + + // The settings for the JupyterLab application. See Jupyter Lab App Settings below. + JupyterLabAppSettings *SpaceSettingsJupyterLabAppSettingsObservation `json:"jupyterLabAppSettings,omitempty" tf:"jupyter_lab_app_settings,omitempty"` + + // The Jupyter server's app settings. See Jupyter Server App Settings below. + JupyterServerAppSettings *SpaceSettingsJupyterServerAppSettingsObservation `json:"jupyterServerAppSettings,omitempty" tf:"jupyter_server_app_settings,omitempty"` + + // The kernel gateway app settings. See Kernel Gateway App Settings below. + KernelGatewayAppSettings *SpaceSettingsKernelGatewayAppSettingsObservation `json:"kernelGatewayAppSettings,omitempty" tf:"kernel_gateway_app_settings,omitempty"` + + SpaceStorageSettings *SpaceSettingsSpaceStorageSettingsObservation `json:"spaceStorageSettings,omitempty" tf:"space_storage_settings,omitempty"` +} + +type SpaceSettingsParameters struct { + + // The type of app created within the space. + // +kubebuilder:validation:Optional + AppType *string `json:"appType,omitempty" tf:"app_type,omitempty"` + + // The Code Editor application settings. See Code Editor App Settings below. + // +kubebuilder:validation:Optional + CodeEditorAppSettings *SpaceSettingsCodeEditorAppSettingsParameters `json:"codeEditorAppSettings,omitempty" tf:"code_editor_app_settings,omitempty"` + + // A file system, created by you, that you assign to a space for an Amazon SageMaker Domain. See Custom File System below. + // +kubebuilder:validation:Optional + CustomFileSystem []CustomFileSystemParameters `json:"customFileSystem,omitempty" tf:"custom_file_system,omitempty"` + + // The settings for the JupyterLab application. See Jupyter Lab App Settings below. + // +kubebuilder:validation:Optional + JupyterLabAppSettings *SpaceSettingsJupyterLabAppSettingsParameters `json:"jupyterLabAppSettings,omitempty" tf:"jupyter_lab_app_settings,omitempty"` + + // The Jupyter server's app settings. See Jupyter Server App Settings below. + // +kubebuilder:validation:Optional + JupyterServerAppSettings *SpaceSettingsJupyterServerAppSettingsParameters `json:"jupyterServerAppSettings,omitempty" tf:"jupyter_server_app_settings,omitempty"` + + // The kernel gateway app settings. See Kernel Gateway App Settings below. + // +kubebuilder:validation:Optional + KernelGatewayAppSettings *SpaceSettingsKernelGatewayAppSettingsParameters `json:"kernelGatewayAppSettings,omitempty" tf:"kernel_gateway_app_settings,omitempty"` + + // +kubebuilder:validation:Optional + SpaceStorageSettings *SpaceSettingsSpaceStorageSettingsParameters `json:"spaceStorageSettings,omitempty" tf:"space_storage_settings,omitempty"` +} + +type SpaceSettingsSpaceStorageSettingsInitParameters struct { + EBSStorageSettings *EBSStorageSettingsInitParameters `json:"ebsStorageSettings,omitempty" tf:"ebs_storage_settings,omitempty"` +} + +type SpaceSettingsSpaceStorageSettingsObservation struct { + EBSStorageSettings *EBSStorageSettingsObservation `json:"ebsStorageSettings,omitempty" tf:"ebs_storage_settings,omitempty"` +} + +type SpaceSettingsSpaceStorageSettingsParameters struct { + + // +kubebuilder:validation:Optional + EBSStorageSettings *EBSStorageSettingsParameters `json:"ebsStorageSettings" tf:"ebs_storage_settings,omitempty"` +} + +type SpaceSharingSettingsInitParameters struct { + + // Specifies the sharing type of the space. Valid values are Private and Shared. + SharingType *string `json:"sharingType,omitempty" tf:"sharing_type,omitempty"` +} + +type SpaceSharingSettingsObservation struct { + + // Specifies the sharing type of the space. Valid values are Private and Shared. + SharingType *string `json:"sharingType,omitempty" tf:"sharing_type,omitempty"` +} + +type SpaceSharingSettingsParameters struct { + + // Specifies the sharing type of the space. Valid values are Private and Shared. + // +kubebuilder:validation:Optional + SharingType *string `json:"sharingType" tf:"sharing_type,omitempty"` +} + +// SpaceSpec defines the desired state of Space +type SpaceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SpaceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SpaceInitParameters `json:"initProvider,omitempty"` +} + +// SpaceStatus defines the observed state of Space. +type SpaceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SpaceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Space is the Schema for the Spaces API. Provides a SageMaker Space resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Space struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.spaceName) || (has(self.initProvider) && has(self.initProvider.spaceName))",message="spec.forProvider.spaceName is a required parameter" + Spec SpaceSpec `json:"spec"` + Status SpaceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SpaceList contains a list of Spaces +type SpaceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Space `json:"items"` +} + +// Repository type metadata. +var ( + Space_Kind = "Space" + Space_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Space_Kind}.String() + Space_KindAPIVersion = Space_Kind + "." + CRDGroupVersion.String() + Space_GroupVersionKind = CRDGroupVersion.WithKind(Space_Kind) +) + +func init() { + SchemeBuilder.Register(&Space{}, &SpaceList{}) +} diff --git a/apis/sagemaker/v1beta2/zz_userprofile_terraformed.go b/apis/sagemaker/v1beta2/zz_userprofile_terraformed.go new file mode 100755 index 0000000000..4e280001a8 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_userprofile_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this UserProfile +func (mg *UserProfile) GetTerraformResourceType() string { + return "aws_sagemaker_user_profile" +} + +// GetConnectionDetailsMapping for this UserProfile +func (tr *UserProfile) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this UserProfile +func (tr *UserProfile) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this UserProfile +func (tr *UserProfile) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this UserProfile +func (tr *UserProfile) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this UserProfile +func (tr *UserProfile) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this UserProfile +func (tr *UserProfile) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this UserProfile +func (tr *UserProfile) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this UserProfile +func (tr *UserProfile) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this UserProfile using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *UserProfile) LateInitialize(attrs []byte) (bool, error) { + params := &UserProfileParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *UserProfile) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sagemaker/v1beta2/zz_userprofile_types.go b/apis/sagemaker/v1beta2/zz_userprofile_types.go new file mode 100755 index 0000000000..2c8fe54852 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_userprofile_types.go @@ -0,0 +1,1508 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CanvasAppSettingsDirectDeploySettingsInitParameters struct { + + // Describes whether model deployment permissions are enabled or disabled in the Canvas application. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type CanvasAppSettingsDirectDeploySettingsObservation struct { + + // Describes whether model deployment permissions are enabled or disabled in the Canvas application. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type CanvasAppSettingsDirectDeploySettingsParameters struct { + + // Describes whether model deployment permissions are enabled or disabled in the Canvas application. Valid values are ENABLED and DISABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type CanvasAppSettingsIdentityProviderOauthSettingsInitParameters struct { + + // The name of the data source that you're connecting to. Canvas currently supports OAuth for Snowflake and Salesforce Data Cloud. Valid values are SalesforceGenie and Snowflake. + DataSourceName *string `json:"dataSourceName,omitempty" tf:"data_source_name,omitempty"` + + // The ARN of an Amazon Web Services Secrets Manager secret that stores the credentials from your identity provider, such as the client ID and secret, authorization URL, and token URL. + SecretArn *string `json:"secretArn,omitempty" tf:"secret_arn,omitempty"` + + // Describes whether OAuth for a data source is enabled or disabled in the Canvas application. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type CanvasAppSettingsIdentityProviderOauthSettingsObservation struct { + + // The name of the data source that you're connecting to. Canvas currently supports OAuth for Snowflake and Salesforce Data Cloud. Valid values are SalesforceGenie and Snowflake. + DataSourceName *string `json:"dataSourceName,omitempty" tf:"data_source_name,omitempty"` + + // The ARN of an Amazon Web Services Secrets Manager secret that stores the credentials from your identity provider, such as the client ID and secret, authorization URL, and token URL. + SecretArn *string `json:"secretArn,omitempty" tf:"secret_arn,omitempty"` + + // Describes whether OAuth for a data source is enabled or disabled in the Canvas application. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type CanvasAppSettingsIdentityProviderOauthSettingsParameters struct { + + // The name of the data source that you're connecting to. Canvas currently supports OAuth for Snowflake and Salesforce Data Cloud. Valid values are SalesforceGenie and Snowflake. + // +kubebuilder:validation:Optional + DataSourceName *string `json:"dataSourceName,omitempty" tf:"data_source_name,omitempty"` + + // The ARN of an Amazon Web Services Secrets Manager secret that stores the credentials from your identity provider, such as the client ID and secret, authorization URL, and token URL. + // +kubebuilder:validation:Optional + SecretArn *string `json:"secretArn" tf:"secret_arn,omitempty"` + + // Describes whether OAuth for a data source is enabled or disabled in the Canvas application. Valid values are ENABLED and DISABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type CanvasAppSettingsKendraSettingsInitParameters struct { + + // Describes whether the document querying feature is enabled or disabled in the Canvas application. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type CanvasAppSettingsKendraSettingsObservation struct { + + // Describes whether the document querying feature is enabled or disabled in the Canvas application. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type CanvasAppSettingsKendraSettingsParameters struct { + + // Describes whether the document querying feature is enabled or disabled in the Canvas application. Valid values are ENABLED and DISABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type CanvasAppSettingsModelRegisterSettingsInitParameters struct { + + // The Amazon Resource Name (ARN) of the SageMaker model registry account. Required only to register model versions created by a different SageMaker Canvas AWS account than the AWS account in which SageMaker model registry is set up. + CrossAccountModelRegisterRoleArn *string `json:"crossAccountModelRegisterRoleArn,omitempty" tf:"cross_account_model_register_role_arn,omitempty"` + + // Describes whether the integration to the model registry is enabled or disabled in the Canvas application. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type CanvasAppSettingsModelRegisterSettingsObservation struct { + + // The Amazon Resource Name (ARN) of the SageMaker model registry account. Required only to register model versions created by a different SageMaker Canvas AWS account than the AWS account in which SageMaker model registry is set up. + CrossAccountModelRegisterRoleArn *string `json:"crossAccountModelRegisterRoleArn,omitempty" tf:"cross_account_model_register_role_arn,omitempty"` + + // Describes whether the integration to the model registry is enabled or disabled in the Canvas application. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type CanvasAppSettingsModelRegisterSettingsParameters struct { + + // The Amazon Resource Name (ARN) of the SageMaker model registry account. Required only to register model versions created by a different SageMaker Canvas AWS account than the AWS account in which SageMaker model registry is set up. + // +kubebuilder:validation:Optional + CrossAccountModelRegisterRoleArn *string `json:"crossAccountModelRegisterRoleArn,omitempty" tf:"cross_account_model_register_role_arn,omitempty"` + + // Describes whether the integration to the model registry is enabled or disabled in the Canvas application. Valid values are ENABLED and DISABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type CanvasAppSettingsTimeSeriesForecastingSettingsInitParameters struct { + + // The IAM role that Canvas passes to Amazon Forecast for time series forecasting. By default, Canvas uses the execution role specified in the UserProfile that launches the Canvas app. If an execution role is not specified in the UserProfile, Canvas uses the execution role specified in the Domain that owns the UserProfile. To allow time series forecasting, this IAM role should have the AmazonSageMakerCanvasForecastAccess policy attached and forecast.amazonaws.com added in the trust relationship as a service principal. + AmazonForecastRoleArn *string `json:"amazonForecastRoleArn,omitempty" tf:"amazon_forecast_role_arn,omitempty"` + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type CanvasAppSettingsTimeSeriesForecastingSettingsObservation struct { + + // The IAM role that Canvas passes to Amazon Forecast for time series forecasting. By default, Canvas uses the execution role specified in the UserProfile that launches the Canvas app. If an execution role is not specified in the UserProfile, Canvas uses the execution role specified in the Domain that owns the UserProfile. To allow time series forecasting, this IAM role should have the AmazonSageMakerCanvasForecastAccess policy attached and forecast.amazonaws.com added in the trust relationship as a service principal. + AmazonForecastRoleArn *string `json:"amazonForecastRoleArn,omitempty" tf:"amazon_forecast_role_arn,omitempty"` + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type CanvasAppSettingsTimeSeriesForecastingSettingsParameters struct { + + // The IAM role that Canvas passes to Amazon Forecast for time series forecasting. By default, Canvas uses the execution role specified in the UserProfile that launches the Canvas app. If an execution role is not specified in the UserProfile, Canvas uses the execution role specified in the Domain that owns the UserProfile. To allow time series forecasting, this IAM role should have the AmazonSageMakerCanvasForecastAccess policy attached and forecast.amazonaws.com added in the trust relationship as a service principal. + // +kubebuilder:validation:Optional + AmazonForecastRoleArn *string `json:"amazonForecastRoleArn,omitempty" tf:"amazon_forecast_role_arn,omitempty"` + + // Describes whether time series forecasting is enabled or disabled in the Canvas app. Valid values are ENABLED and DISABLED. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type CanvasAppSettingsWorkspaceSettingsInitParameters struct { + + // The Amazon S3 bucket used to store artifacts generated by Canvas. Updating the Amazon S3 location impacts existing configuration settings, and Canvas users no longer have access to their artifacts. Canvas users must log out and log back in to apply the new location. + S3ArtifactPath *string `json:"s3ArtifactPath,omitempty" tf:"s3_artifact_path,omitempty"` + + // The Amazon Web Services Key Management Service (KMS) encryption key ID that is used to encrypt artifacts generated by Canvas in the Amazon S3 bucket. + S3KMSKeyID *string `json:"s3KmsKeyId,omitempty" tf:"s3_kms_key_id,omitempty"` +} + +type CanvasAppSettingsWorkspaceSettingsObservation struct { + + // The Amazon S3 bucket used to store artifacts generated by Canvas. Updating the Amazon S3 location impacts existing configuration settings, and Canvas users no longer have access to their artifacts. Canvas users must log out and log back in to apply the new location. + S3ArtifactPath *string `json:"s3ArtifactPath,omitempty" tf:"s3_artifact_path,omitempty"` + + // The Amazon Web Services Key Management Service (KMS) encryption key ID that is used to encrypt artifacts generated by Canvas in the Amazon S3 bucket. + S3KMSKeyID *string `json:"s3KmsKeyId,omitempty" tf:"s3_kms_key_id,omitempty"` +} + +type CanvasAppSettingsWorkspaceSettingsParameters struct { + + // The Amazon S3 bucket used to store artifacts generated by Canvas. Updating the Amazon S3 location impacts existing configuration settings, and Canvas users no longer have access to their artifacts. Canvas users must log out and log back in to apply the new location. + // +kubebuilder:validation:Optional + S3ArtifactPath *string `json:"s3ArtifactPath,omitempty" tf:"s3_artifact_path,omitempty"` + + // The Amazon Web Services Key Management Service (KMS) encryption key ID that is used to encrypt artifacts generated by Canvas in the Amazon S3 bucket. + // +kubebuilder:validation:Optional + S3KMSKeyID *string `json:"s3KmsKeyId,omitempty" tf:"s3_kms_key_id,omitempty"` +} + +type CustomFileSystemConfigEFSFileSystemConfigInitParameters struct { + + // The ID of your Amazon EFS file system. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below. + FileSystemPath *string `json:"fileSystemPath,omitempty" tf:"file_system_path,omitempty"` +} + +type CustomFileSystemConfigEFSFileSystemConfigObservation struct { + + // The ID of your Amazon EFS file system. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below. + FileSystemPath *string `json:"fileSystemPath,omitempty" tf:"file_system_path,omitempty"` +} + +type CustomFileSystemConfigEFSFileSystemConfigParameters struct { + + // The ID of your Amazon EFS file system. + // +kubebuilder:validation:Optional + FileSystemID *string `json:"fileSystemId" tf:"file_system_id,omitempty"` + + // The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below. + // +kubebuilder:validation:Optional + FileSystemPath *string `json:"fileSystemPath,omitempty" tf:"file_system_path,omitempty"` +} + +type SpaceStorageSettingsDefaultEBSStorageSettingsInitParameters struct { + + // The default size of the EBS storage volume for a private space. + DefaultEBSVolumeSizeInGb *float64 `json:"defaultEbsVolumeSizeInGb,omitempty" tf:"default_ebs_volume_size_in_gb,omitempty"` + + // The maximum size of the EBS storage volume for a private space. + MaximumEBSVolumeSizeInGb *float64 `json:"maximumEbsVolumeSizeInGb,omitempty" tf:"maximum_ebs_volume_size_in_gb,omitempty"` +} + +type SpaceStorageSettingsDefaultEBSStorageSettingsObservation struct { + + // The default size of the EBS storage volume for a private space. + DefaultEBSVolumeSizeInGb *float64 `json:"defaultEbsVolumeSizeInGb,omitempty" tf:"default_ebs_volume_size_in_gb,omitempty"` + + // The maximum size of the EBS storage volume for a private space. + MaximumEBSVolumeSizeInGb *float64 `json:"maximumEbsVolumeSizeInGb,omitempty" tf:"maximum_ebs_volume_size_in_gb,omitempty"` +} + +type SpaceStorageSettingsDefaultEBSStorageSettingsParameters struct { + + // The default size of the EBS storage volume for a private space. + // +kubebuilder:validation:Optional + DefaultEBSVolumeSizeInGb *float64 `json:"defaultEbsVolumeSizeInGb" tf:"default_ebs_volume_size_in_gb,omitempty"` + + // The maximum size of the EBS storage volume for a private space. + // +kubebuilder:validation:Optional + MaximumEBSVolumeSizeInGb *float64 `json:"maximumEbsVolumeSizeInGb" tf:"maximum_ebs_volume_size_in_gb,omitempty"` +} + +type UserProfileInitParameters struct { + + // The ID of the associated Domain. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.Domain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` + + // Reference to a Domain in sagemaker to populate domainId. + // +kubebuilder:validation:Optional + DomainIDRef *v1.Reference `json:"domainIdRef,omitempty" tf:"-"` + + // Selector for a Domain in sagemaker to populate domainId. + // +kubebuilder:validation:Optional + DomainIDSelector *v1.Selector `json:"domainIdSelector,omitempty" tf:"-"` + + // A specifier for the type of value specified in single_sign_on_user_value. Currently, the only supported value is UserName. If the Domain's AuthMode is SSO, this field is required. If the Domain's AuthMode is not SSO, this field cannot be specified. + SingleSignOnUserIdentifier *string `json:"singleSignOnUserIdentifier,omitempty" tf:"single_sign_on_user_identifier,omitempty"` + + // The username of the associated AWS Single Sign-On User for this User Profile. If the Domain's AuthMode is SSO, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not SSO, this field cannot be specified. + SingleSignOnUserValue *string `json:"singleSignOnUserValue,omitempty" tf:"single_sign_on_user_value,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The name for the User Profile. + UserProfileName *string `json:"userProfileName,omitempty" tf:"user_profile_name,omitempty"` + + // The user settings. See User Settings below. + UserSettings *UserSettingsInitParameters `json:"userSettings,omitempty" tf:"user_settings,omitempty"` +} + +type UserProfileObservation struct { + + // The user profile Amazon Resource Name (ARN). + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The ID of the associated Domain. + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` + + // The ID of the user's profile in the Amazon Elastic File System (EFS) volume. + HomeEFSFileSystemUID *string `json:"homeEfsFileSystemUid,omitempty" tf:"home_efs_file_system_uid,omitempty"` + + // The user profile Amazon Resource Name (ARN). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A specifier for the type of value specified in single_sign_on_user_value. Currently, the only supported value is UserName. If the Domain's AuthMode is SSO, this field is required. If the Domain's AuthMode is not SSO, this field cannot be specified. + SingleSignOnUserIdentifier *string `json:"singleSignOnUserIdentifier,omitempty" tf:"single_sign_on_user_identifier,omitempty"` + + // The username of the associated AWS Single Sign-On User for this User Profile. If the Domain's AuthMode is SSO, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not SSO, this field cannot be specified. + SingleSignOnUserValue *string `json:"singleSignOnUserValue,omitempty" tf:"single_sign_on_user_value,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The name for the User Profile. + UserProfileName *string `json:"userProfileName,omitempty" tf:"user_profile_name,omitempty"` + + // The user settings. See User Settings below. + UserSettings *UserSettingsObservation `json:"userSettings,omitempty" tf:"user_settings,omitempty"` +} + +type UserProfileParameters struct { + + // The ID of the associated Domain. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.Domain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` + + // Reference to a Domain in sagemaker to populate domainId. + // +kubebuilder:validation:Optional + DomainIDRef *v1.Reference `json:"domainIdRef,omitempty" tf:"-"` + + // Selector for a Domain in sagemaker to populate domainId. + // +kubebuilder:validation:Optional + DomainIDSelector *v1.Selector `json:"domainIdSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // A specifier for the type of value specified in single_sign_on_user_value. Currently, the only supported value is UserName. If the Domain's AuthMode is SSO, this field is required. If the Domain's AuthMode is not SSO, this field cannot be specified. + // +kubebuilder:validation:Optional + SingleSignOnUserIdentifier *string `json:"singleSignOnUserIdentifier,omitempty" tf:"single_sign_on_user_identifier,omitempty"` + + // The username of the associated AWS Single Sign-On User for this User Profile. If the Domain's AuthMode is SSO, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not SSO, this field cannot be specified. + // +kubebuilder:validation:Optional + SingleSignOnUserValue *string `json:"singleSignOnUserValue,omitempty" tf:"single_sign_on_user_value,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The name for the User Profile. + // +kubebuilder:validation:Optional + UserProfileName *string `json:"userProfileName,omitempty" tf:"user_profile_name,omitempty"` + + // The user settings. See User Settings below. + // +kubebuilder:validation:Optional + UserSettings *UserSettingsParameters `json:"userSettings,omitempty" tf:"user_settings,omitempty"` +} + +type UserSettingsCanvasAppSettingsInitParameters struct { + + // The model deployment settings for the SageMaker Canvas application. See Direct Deploy Settings below. + DirectDeploySettings *CanvasAppSettingsDirectDeploySettingsInitParameters `json:"directDeploySettings,omitempty" tf:"direct_deploy_settings,omitempty"` + + // The settings for connecting to an external data source with OAuth. See Identity Provider OAuth Settings below. + IdentityProviderOauthSettings []CanvasAppSettingsIdentityProviderOauthSettingsInitParameters `json:"identityProviderOauthSettings,omitempty" tf:"identity_provider_oauth_settings,omitempty"` + + // The settings for document querying. See Kendra Settings below. + KendraSettings *CanvasAppSettingsKendraSettingsInitParameters `json:"kendraSettings,omitempty" tf:"kendra_settings,omitempty"` + + // The model registry settings for the SageMaker Canvas application. See Model Register Settings below. + ModelRegisterSettings *CanvasAppSettingsModelRegisterSettingsInitParameters `json:"modelRegisterSettings,omitempty" tf:"model_register_settings,omitempty"` + + // Time series forecast settings for the Canvas app. See Time Series Forecasting Settings below. + TimeSeriesForecastingSettings *CanvasAppSettingsTimeSeriesForecastingSettingsInitParameters `json:"timeSeriesForecastingSettings,omitempty" tf:"time_series_forecasting_settings,omitempty"` + + // The workspace settings for the SageMaker Canvas application. See Workspace Settings below. + WorkspaceSettings *CanvasAppSettingsWorkspaceSettingsInitParameters `json:"workspaceSettings,omitempty" tf:"workspace_settings,omitempty"` +} + +type UserSettingsCanvasAppSettingsObservation struct { + + // The model deployment settings for the SageMaker Canvas application. See Direct Deploy Settings below. + DirectDeploySettings *CanvasAppSettingsDirectDeploySettingsObservation `json:"directDeploySettings,omitempty" tf:"direct_deploy_settings,omitempty"` + + // The settings for connecting to an external data source with OAuth. See Identity Provider OAuth Settings below. + IdentityProviderOauthSettings []CanvasAppSettingsIdentityProviderOauthSettingsObservation `json:"identityProviderOauthSettings,omitempty" tf:"identity_provider_oauth_settings,omitempty"` + + // The settings for document querying. See Kendra Settings below. + KendraSettings *CanvasAppSettingsKendraSettingsObservation `json:"kendraSettings,omitempty" tf:"kendra_settings,omitempty"` + + // The model registry settings for the SageMaker Canvas application. See Model Register Settings below. + ModelRegisterSettings *CanvasAppSettingsModelRegisterSettingsObservation `json:"modelRegisterSettings,omitempty" tf:"model_register_settings,omitempty"` + + // Time series forecast settings for the Canvas app. See Time Series Forecasting Settings below. + TimeSeriesForecastingSettings *CanvasAppSettingsTimeSeriesForecastingSettingsObservation `json:"timeSeriesForecastingSettings,omitempty" tf:"time_series_forecasting_settings,omitempty"` + + // The workspace settings for the SageMaker Canvas application. See Workspace Settings below. + WorkspaceSettings *CanvasAppSettingsWorkspaceSettingsObservation `json:"workspaceSettings,omitempty" tf:"workspace_settings,omitempty"` +} + +type UserSettingsCanvasAppSettingsParameters struct { + + // The model deployment settings for the SageMaker Canvas application. See Direct Deploy Settings below. + // +kubebuilder:validation:Optional + DirectDeploySettings *CanvasAppSettingsDirectDeploySettingsParameters `json:"directDeploySettings,omitempty" tf:"direct_deploy_settings,omitempty"` + + // The settings for connecting to an external data source with OAuth. See Identity Provider OAuth Settings below. + // +kubebuilder:validation:Optional + IdentityProviderOauthSettings []CanvasAppSettingsIdentityProviderOauthSettingsParameters `json:"identityProviderOauthSettings,omitempty" tf:"identity_provider_oauth_settings,omitempty"` + + // The settings for document querying. See Kendra Settings below. + // +kubebuilder:validation:Optional + KendraSettings *CanvasAppSettingsKendraSettingsParameters `json:"kendraSettings,omitempty" tf:"kendra_settings,omitempty"` + + // The model registry settings for the SageMaker Canvas application. See Model Register Settings below. + // +kubebuilder:validation:Optional + ModelRegisterSettings *CanvasAppSettingsModelRegisterSettingsParameters `json:"modelRegisterSettings,omitempty" tf:"model_register_settings,omitempty"` + + // Time series forecast settings for the Canvas app. See Time Series Forecasting Settings below. + // +kubebuilder:validation:Optional + TimeSeriesForecastingSettings *CanvasAppSettingsTimeSeriesForecastingSettingsParameters `json:"timeSeriesForecastingSettings,omitempty" tf:"time_series_forecasting_settings,omitempty"` + + // The workspace settings for the SageMaker Canvas application. See Workspace Settings below. + // +kubebuilder:validation:Optional + WorkspaceSettings *CanvasAppSettingsWorkspaceSettingsParameters `json:"workspaceSettings,omitempty" tf:"workspace_settings,omitempty"` +} + +type UserSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsCodeEditorAppSettingsDefaultResourceSpecObservation struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsCodeEditorAppSettingsDefaultResourceSpecParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsCodeEditorAppSettingsInitParameters struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *UserSettingsCodeEditorAppSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type UserSettingsCodeEditorAppSettingsObservation struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *UserSettingsCodeEditorAppSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type UserSettingsCodeEditorAppSettingsParameters struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *UserSettingsCodeEditorAppSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +kubebuilder:validation:Optional + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type UserSettingsCustomFileSystemConfigInitParameters struct { + + // The default EBS storage settings for a private space. See EFS File System Config below. + EFSFileSystemConfig []CustomFileSystemConfigEFSFileSystemConfigInitParameters `json:"efsFileSystemConfig,omitempty" tf:"efs_file_system_config,omitempty"` +} + +type UserSettingsCustomFileSystemConfigObservation struct { + + // The default EBS storage settings for a private space. See EFS File System Config below. + EFSFileSystemConfig []CustomFileSystemConfigEFSFileSystemConfigObservation `json:"efsFileSystemConfig,omitempty" tf:"efs_file_system_config,omitempty"` +} + +type UserSettingsCustomFileSystemConfigParameters struct { + + // The default EBS storage settings for a private space. See EFS File System Config below. + // +kubebuilder:validation:Optional + EFSFileSystemConfig []CustomFileSystemConfigEFSFileSystemConfigParameters `json:"efsFileSystemConfig,omitempty" tf:"efs_file_system_config,omitempty"` +} + +type UserSettingsCustomPosixUserConfigInitParameters struct { + + // The POSIX group ID. + GID *float64 `json:"gid,omitempty" tf:"gid,omitempty"` + + // The POSIX user ID. + UID *float64 `json:"uid,omitempty" tf:"uid,omitempty"` +} + +type UserSettingsCustomPosixUserConfigObservation struct { + + // The POSIX group ID. + GID *float64 `json:"gid,omitempty" tf:"gid,omitempty"` + + // The POSIX user ID. + UID *float64 `json:"uid,omitempty" tf:"uid,omitempty"` +} + +type UserSettingsCustomPosixUserConfigParameters struct { + + // The POSIX group ID. + // +kubebuilder:validation:Optional + GID *float64 `json:"gid" tf:"gid,omitempty"` + + // The POSIX user ID. + // +kubebuilder:validation:Optional + UID *float64 `json:"uid" tf:"uid,omitempty"` +} + +type UserSettingsInitParameters struct { + + // The Canvas app settings. See Canvas App Settings below. + CanvasAppSettings *UserSettingsCanvasAppSettingsInitParameters `json:"canvasAppSettings,omitempty" tf:"canvas_app_settings,omitempty"` + + // The Code Editor application settings. See Code Editor App Settings below. + CodeEditorAppSettings *UserSettingsCodeEditorAppSettingsInitParameters `json:"codeEditorAppSettings,omitempty" tf:"code_editor_app_settings,omitempty"` + + // The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio. See Custom File System Config below. + CustomFileSystemConfig []UserSettingsCustomFileSystemConfigInitParameters `json:"customFileSystemConfig,omitempty" tf:"custom_file_system_config,omitempty"` + + // Details about the POSIX identity that is used for file system operations. See Custom Posix User Config below. + CustomPosixUserConfig *UserSettingsCustomPosixUserConfigInitParameters `json:"customPosixUserConfig,omitempty" tf:"custom_posix_user_config,omitempty"` + + // The default experience that the user is directed to when accessing the domain. The supported values are: studio::: Indicates that Studio is the default experience. This value can only be passed if StudioWebPortal is set to ENABLED. app:JupyterServer:: Indicates that Studio Classic is the default experience. + DefaultLandingURI *string `json:"defaultLandingUri,omitempty" tf:"default_landing_uri,omitempty"` + + // The execution role ARN for the user. + ExecutionRole *string `json:"executionRole,omitempty" tf:"execution_role,omitempty"` + + // The settings for the JupyterLab application. See Jupyter Lab App Settings below. + JupyterLabAppSettings *UserSettingsJupyterLabAppSettingsInitParameters `json:"jupyterLabAppSettings,omitempty" tf:"jupyter_lab_app_settings,omitempty"` + + // The Jupyter server's app settings. See Jupyter Server App Settings below. + JupyterServerAppSettings *UserSettingsJupyterServerAppSettingsInitParameters `json:"jupyterServerAppSettings,omitempty" tf:"jupyter_server_app_settings,omitempty"` + + // The kernel gateway app settings. See Kernel Gateway App Settings below. + KernelGatewayAppSettings *UserSettingsKernelGatewayAppSettingsInitParameters `json:"kernelGatewayAppSettings,omitempty" tf:"kernel_gateway_app_settings,omitempty"` + + // The RSession app settings. See RSession App Settings below. + RSessionAppSettings *UserSettingsRSessionAppSettingsInitParameters `json:"rSessionAppSettings,omitempty" tf:"r_session_app_settings,omitempty"` + + // A collection of settings that configure user interaction with the RStudioServerPro app. See RStudioServerProAppSettings below. + RStudioServerProAppSettings *UserSettingsRStudioServerProAppSettingsInitParameters `json:"rStudioServerProAppSettings,omitempty" tf:"r_studio_server_pro_app_settings,omitempty"` + + // A list of security group IDs that will be attached to the user. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The sharing settings. See Sharing Settings below. + SharingSettings *UserSettingsSharingSettingsInitParameters `json:"sharingSettings,omitempty" tf:"sharing_settings,omitempty"` + + // The storage settings for a private space. See Space Storage Settings below. + SpaceStorageSettings *UserSettingsSpaceStorageSettingsInitParameters `json:"spaceStorageSettings,omitempty" tf:"space_storage_settings,omitempty"` + + // Whether the user can access Studio. If this value is set to DISABLED, the user cannot access Studio, even if that is the default experience for the domain. Valid values are ENABLED and DISABLED. + StudioWebPortal *string `json:"studioWebPortal,omitempty" tf:"studio_web_portal,omitempty"` + + // The TensorBoard app settings. See TensorBoard App Settings below. + TensorBoardAppSettings *UserSettingsTensorBoardAppSettingsInitParameters `json:"tensorBoardAppSettings,omitempty" tf:"tensor_board_app_settings,omitempty"` +} + +type UserSettingsJupyterLabAppSettingsCodeRepositoryInitParameters struct { + + // The URL of the Git repository. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` +} + +type UserSettingsJupyterLabAppSettingsCodeRepositoryObservation struct { + + // The URL of the Git repository. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` +} + +type UserSettingsJupyterLabAppSettingsCodeRepositoryParameters struct { + + // The URL of the Git repository. + // +kubebuilder:validation:Optional + RepositoryURL *string `json:"repositoryUrl" tf:"repository_url,omitempty"` +} + +type UserSettingsJupyterLabAppSettingsCustomImageInitParameters struct { + + // The name of the App Image Config. + AppImageConfigName *string `json:"appImageConfigName,omitempty" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type UserSettingsJupyterLabAppSettingsCustomImageObservation struct { + + // The name of the App Image Config. + AppImageConfigName *string `json:"appImageConfigName,omitempty" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type UserSettingsJupyterLabAppSettingsCustomImageParameters struct { + + // The name of the App Image Config. + // +kubebuilder:validation:Optional + AppImageConfigName *string `json:"appImageConfigName" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + // +kubebuilder:validation:Optional + ImageName *string `json:"imageName" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + // +kubebuilder:validation:Optional + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type UserSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsJupyterLabAppSettingsDefaultResourceSpecObservation struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsJupyterLabAppSettingsDefaultResourceSpecParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsJupyterLabAppSettingsInitParameters struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see Code Repository below. + CodeRepository []UserSettingsJupyterLabAppSettingsCodeRepositoryInitParameters `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see Custom Image below. + CustomImage []UserSettingsJupyterLabAppSettingsCustomImageInitParameters `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *UserSettingsJupyterLabAppSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type UserSettingsJupyterLabAppSettingsObservation struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see Code Repository below. + CodeRepository []UserSettingsJupyterLabAppSettingsCodeRepositoryObservation `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see Custom Image below. + CustomImage []UserSettingsJupyterLabAppSettingsCustomImageObservation `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *UserSettingsJupyterLabAppSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type UserSettingsJupyterLabAppSettingsParameters struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see Code Repository below. + // +kubebuilder:validation:Optional + CodeRepository []UserSettingsJupyterLabAppSettingsCodeRepositoryParameters `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see Custom Image below. + // +kubebuilder:validation:Optional + CustomImage []UserSettingsJupyterLabAppSettingsCustomImageParameters `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *UserSettingsJupyterLabAppSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +kubebuilder:validation:Optional + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type UserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters struct { + + // The URL of the Git repository. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` +} + +type UserSettingsJupyterServerAppSettingsCodeRepositoryObservation struct { + + // The URL of the Git repository. + RepositoryURL *string `json:"repositoryUrl,omitempty" tf:"repository_url,omitempty"` +} + +type UserSettingsJupyterServerAppSettingsCodeRepositoryParameters struct { + + // The URL of the Git repository. + // +kubebuilder:validation:Optional + RepositoryURL *string `json:"repositoryUrl" tf:"repository_url,omitempty"` +} + +type UserSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsJupyterServerAppSettingsDefaultResourceSpecObservation struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsJupyterServerAppSettingsDefaultResourceSpecParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsJupyterServerAppSettingsInitParameters struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see Code Repository below. + CodeRepository []UserSettingsJupyterServerAppSettingsCodeRepositoryInitParameters `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *UserSettingsJupyterServerAppSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type UserSettingsJupyterServerAppSettingsObservation struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see Code Repository below. + CodeRepository []UserSettingsJupyterServerAppSettingsCodeRepositoryObservation `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *UserSettingsJupyterServerAppSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type UserSettingsJupyterServerAppSettingsParameters struct { + + // A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see Code Repository below. + // +kubebuilder:validation:Optional + CodeRepository []UserSettingsJupyterServerAppSettingsCodeRepositoryParameters `json:"codeRepository,omitempty" tf:"code_repository,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *UserSettingsJupyterServerAppSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +kubebuilder:validation:Optional + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type UserSettingsKernelGatewayAppSettingsCustomImageInitParameters struct { + + // The name of the App Image Config. + AppImageConfigName *string `json:"appImageConfigName,omitempty" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type UserSettingsKernelGatewayAppSettingsCustomImageObservation struct { + + // The name of the App Image Config. + AppImageConfigName *string `json:"appImageConfigName,omitempty" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type UserSettingsKernelGatewayAppSettingsCustomImageParameters struct { + + // The name of the App Image Config. + // +kubebuilder:validation:Optional + AppImageConfigName *string `json:"appImageConfigName" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + // +kubebuilder:validation:Optional + ImageName *string `json:"imageName" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + // +kubebuilder:validation:Optional + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type UserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsKernelGatewayAppSettingsInitParameters struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see Custom Image below. + CustomImage []UserSettingsKernelGatewayAppSettingsCustomImageInitParameters `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *UserSettingsKernelGatewayAppSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type UserSettingsKernelGatewayAppSettingsObservation struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see Custom Image below. + CustomImage []UserSettingsKernelGatewayAppSettingsCustomImageObservation `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *UserSettingsKernelGatewayAppSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type UserSettingsKernelGatewayAppSettingsParameters struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see Custom Image below. + // +kubebuilder:validation:Optional + CustomImage []UserSettingsKernelGatewayAppSettingsCustomImageParameters `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *UserSettingsKernelGatewayAppSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configurations. + // +kubebuilder:validation:Optional + // +listType=set + LifecycleConfigArns []*string `json:"lifecycleConfigArns,omitempty" tf:"lifecycle_config_arns,omitempty"` +} + +type UserSettingsObservation struct { + + // The Canvas app settings. See Canvas App Settings below. + CanvasAppSettings *UserSettingsCanvasAppSettingsObservation `json:"canvasAppSettings,omitempty" tf:"canvas_app_settings,omitempty"` + + // The Code Editor application settings. See Code Editor App Settings below. + CodeEditorAppSettings *UserSettingsCodeEditorAppSettingsObservation `json:"codeEditorAppSettings,omitempty" tf:"code_editor_app_settings,omitempty"` + + // The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio. See Custom File System Config below. + CustomFileSystemConfig []UserSettingsCustomFileSystemConfigObservation `json:"customFileSystemConfig,omitempty" tf:"custom_file_system_config,omitempty"` + + // Details about the POSIX identity that is used for file system operations. See Custom Posix User Config below. + CustomPosixUserConfig *UserSettingsCustomPosixUserConfigObservation `json:"customPosixUserConfig,omitempty" tf:"custom_posix_user_config,omitempty"` + + // The default experience that the user is directed to when accessing the domain. The supported values are: studio::: Indicates that Studio is the default experience. This value can only be passed if StudioWebPortal is set to ENABLED. app:JupyterServer:: Indicates that Studio Classic is the default experience. + DefaultLandingURI *string `json:"defaultLandingUri,omitempty" tf:"default_landing_uri,omitempty"` + + // The execution role ARN for the user. + ExecutionRole *string `json:"executionRole,omitempty" tf:"execution_role,omitempty"` + + // The settings for the JupyterLab application. See Jupyter Lab App Settings below. + JupyterLabAppSettings *UserSettingsJupyterLabAppSettingsObservation `json:"jupyterLabAppSettings,omitempty" tf:"jupyter_lab_app_settings,omitempty"` + + // The Jupyter server's app settings. See Jupyter Server App Settings below. + JupyterServerAppSettings *UserSettingsJupyterServerAppSettingsObservation `json:"jupyterServerAppSettings,omitempty" tf:"jupyter_server_app_settings,omitempty"` + + // The kernel gateway app settings. See Kernel Gateway App Settings below. + KernelGatewayAppSettings *UserSettingsKernelGatewayAppSettingsObservation `json:"kernelGatewayAppSettings,omitempty" tf:"kernel_gateway_app_settings,omitempty"` + + // The RSession app settings. See RSession App Settings below. + RSessionAppSettings *UserSettingsRSessionAppSettingsObservation `json:"rSessionAppSettings,omitempty" tf:"r_session_app_settings,omitempty"` + + // A collection of settings that configure user interaction with the RStudioServerPro app. See RStudioServerProAppSettings below. + RStudioServerProAppSettings *UserSettingsRStudioServerProAppSettingsObservation `json:"rStudioServerProAppSettings,omitempty" tf:"r_studio_server_pro_app_settings,omitempty"` + + // A list of security group IDs that will be attached to the user. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The sharing settings. See Sharing Settings below. + SharingSettings *UserSettingsSharingSettingsObservation `json:"sharingSettings,omitempty" tf:"sharing_settings,omitempty"` + + // The storage settings for a private space. See Space Storage Settings below. + SpaceStorageSettings *UserSettingsSpaceStorageSettingsObservation `json:"spaceStorageSettings,omitempty" tf:"space_storage_settings,omitempty"` + + // Whether the user can access Studio. If this value is set to DISABLED, the user cannot access Studio, even if that is the default experience for the domain. Valid values are ENABLED and DISABLED. + StudioWebPortal *string `json:"studioWebPortal,omitempty" tf:"studio_web_portal,omitempty"` + + // The TensorBoard app settings. See TensorBoard App Settings below. + TensorBoardAppSettings *UserSettingsTensorBoardAppSettingsObservation `json:"tensorBoardAppSettings,omitempty" tf:"tensor_board_app_settings,omitempty"` +} + +type UserSettingsParameters struct { + + // The Canvas app settings. See Canvas App Settings below. + // +kubebuilder:validation:Optional + CanvasAppSettings *UserSettingsCanvasAppSettingsParameters `json:"canvasAppSettings,omitempty" tf:"canvas_app_settings,omitempty"` + + // The Code Editor application settings. See Code Editor App Settings below. + // +kubebuilder:validation:Optional + CodeEditorAppSettings *UserSettingsCodeEditorAppSettingsParameters `json:"codeEditorAppSettings,omitempty" tf:"code_editor_app_settings,omitempty"` + + // The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio. See Custom File System Config below. + // +kubebuilder:validation:Optional + CustomFileSystemConfig []UserSettingsCustomFileSystemConfigParameters `json:"customFileSystemConfig,omitempty" tf:"custom_file_system_config,omitempty"` + + // Details about the POSIX identity that is used for file system operations. See Custom Posix User Config below. + // +kubebuilder:validation:Optional + CustomPosixUserConfig *UserSettingsCustomPosixUserConfigParameters `json:"customPosixUserConfig,omitempty" tf:"custom_posix_user_config,omitempty"` + + // The default experience that the user is directed to when accessing the domain. The supported values are: studio::: Indicates that Studio is the default experience. This value can only be passed if StudioWebPortal is set to ENABLED. app:JupyterServer:: Indicates that Studio Classic is the default experience. + // +kubebuilder:validation:Optional + DefaultLandingURI *string `json:"defaultLandingUri,omitempty" tf:"default_landing_uri,omitempty"` + + // The execution role ARN for the user. + // +kubebuilder:validation:Optional + ExecutionRole *string `json:"executionRole" tf:"execution_role,omitempty"` + + // The settings for the JupyterLab application. See Jupyter Lab App Settings below. + // +kubebuilder:validation:Optional + JupyterLabAppSettings *UserSettingsJupyterLabAppSettingsParameters `json:"jupyterLabAppSettings,omitempty" tf:"jupyter_lab_app_settings,omitempty"` + + // The Jupyter server's app settings. See Jupyter Server App Settings below. + // +kubebuilder:validation:Optional + JupyterServerAppSettings *UserSettingsJupyterServerAppSettingsParameters `json:"jupyterServerAppSettings,omitempty" tf:"jupyter_server_app_settings,omitempty"` + + // The kernel gateway app settings. See Kernel Gateway App Settings below. + // +kubebuilder:validation:Optional + KernelGatewayAppSettings *UserSettingsKernelGatewayAppSettingsParameters `json:"kernelGatewayAppSettings,omitempty" tf:"kernel_gateway_app_settings,omitempty"` + + // The RSession app settings. See RSession App Settings below. + // +kubebuilder:validation:Optional + RSessionAppSettings *UserSettingsRSessionAppSettingsParameters `json:"rSessionAppSettings,omitempty" tf:"r_session_app_settings,omitempty"` + + // A collection of settings that configure user interaction with the RStudioServerPro app. See RStudioServerProAppSettings below. + // +kubebuilder:validation:Optional + RStudioServerProAppSettings *UserSettingsRStudioServerProAppSettingsParameters `json:"rStudioServerProAppSettings,omitempty" tf:"r_studio_server_pro_app_settings,omitempty"` + + // A list of security group IDs that will be attached to the user. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // The sharing settings. See Sharing Settings below. + // +kubebuilder:validation:Optional + SharingSettings *UserSettingsSharingSettingsParameters `json:"sharingSettings,omitempty" tf:"sharing_settings,omitempty"` + + // The storage settings for a private space. See Space Storage Settings below. + // +kubebuilder:validation:Optional + SpaceStorageSettings *UserSettingsSpaceStorageSettingsParameters `json:"spaceStorageSettings,omitempty" tf:"space_storage_settings,omitempty"` + + // Whether the user can access Studio. If this value is set to DISABLED, the user cannot access Studio, even if that is the default experience for the domain. Valid values are ENABLED and DISABLED. + // +kubebuilder:validation:Optional + StudioWebPortal *string `json:"studioWebPortal,omitempty" tf:"studio_web_portal,omitempty"` + + // The TensorBoard app settings. See TensorBoard App Settings below. + // +kubebuilder:validation:Optional + TensorBoardAppSettings *UserSettingsTensorBoardAppSettingsParameters `json:"tensorBoardAppSettings,omitempty" tf:"tensor_board_app_settings,omitempty"` +} + +type UserSettingsRSessionAppSettingsCustomImageInitParameters struct { + + // The name of the App Image Config. + AppImageConfigName *string `json:"appImageConfigName,omitempty" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type UserSettingsRSessionAppSettingsCustomImageObservation struct { + + // The name of the App Image Config. + AppImageConfigName *string `json:"appImageConfigName,omitempty" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type UserSettingsRSessionAppSettingsCustomImageParameters struct { + + // The name of the App Image Config. + // +kubebuilder:validation:Optional + AppImageConfigName *string `json:"appImageConfigName" tf:"app_image_config_name,omitempty"` + + // The name of the Custom Image. + // +kubebuilder:validation:Optional + ImageName *string `json:"imageName" tf:"image_name,omitempty"` + + // The version number of the Custom Image. + // +kubebuilder:validation:Optional + ImageVersionNumber *float64 `json:"imageVersionNumber,omitempty" tf:"image_version_number,omitempty"` +} + +type UserSettingsRSessionAppSettingsDefaultResourceSpecInitParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsRSessionAppSettingsDefaultResourceSpecObservation struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsRSessionAppSettingsDefaultResourceSpecParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsRSessionAppSettingsInitParameters struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see Custom Image below. + CustomImage []UserSettingsRSessionAppSettingsCustomImageInitParameters `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *UserSettingsRSessionAppSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` +} + +type UserSettingsRSessionAppSettingsObservation struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see Custom Image below. + CustomImage []UserSettingsRSessionAppSettingsCustomImageObservation `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *UserSettingsRSessionAppSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` +} + +type UserSettingsRSessionAppSettingsParameters struct { + + // A list of custom SageMaker images that are configured to run as a KernelGateway app. see Custom Image below. + // +kubebuilder:validation:Optional + CustomImage []UserSettingsRSessionAppSettingsCustomImageParameters `json:"customImage,omitempty" tf:"custom_image,omitempty"` + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *UserSettingsRSessionAppSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` +} + +type UserSettingsRStudioServerProAppSettingsInitParameters struct { + + // Indicates whether the current user has access to the RStudioServerPro app. Valid values are ENABLED and DISABLED. + AccessStatus *string `json:"accessStatus,omitempty" tf:"access_status,omitempty"` + + // The level of permissions that the user has within the RStudioServerPro app. This value defaults to R_STUDIO_USER. The R_STUDIO_ADMIN value allows the user access to the RStudio Administrative Dashboard. Valid values are R_STUDIO_USER and R_STUDIO_ADMIN. + UserGroup *string `json:"userGroup,omitempty" tf:"user_group,omitempty"` +} + +type UserSettingsRStudioServerProAppSettingsObservation struct { + + // Indicates whether the current user has access to the RStudioServerPro app. Valid values are ENABLED and DISABLED. + AccessStatus *string `json:"accessStatus,omitempty" tf:"access_status,omitempty"` + + // The level of permissions that the user has within the RStudioServerPro app. This value defaults to R_STUDIO_USER. The R_STUDIO_ADMIN value allows the user access to the RStudio Administrative Dashboard. Valid values are R_STUDIO_USER and R_STUDIO_ADMIN. + UserGroup *string `json:"userGroup,omitempty" tf:"user_group,omitempty"` +} + +type UserSettingsRStudioServerProAppSettingsParameters struct { + + // Indicates whether the current user has access to the RStudioServerPro app. Valid values are ENABLED and DISABLED. + // +kubebuilder:validation:Optional + AccessStatus *string `json:"accessStatus,omitempty" tf:"access_status,omitempty"` + + // The level of permissions that the user has within the RStudioServerPro app. This value defaults to R_STUDIO_USER. The R_STUDIO_ADMIN value allows the user access to the RStudio Administrative Dashboard. Valid values are R_STUDIO_USER and R_STUDIO_ADMIN. + // +kubebuilder:validation:Optional + UserGroup *string `json:"userGroup,omitempty" tf:"user_group,omitempty"` +} + +type UserSettingsSharingSettingsInitParameters struct { + + // Whether to include the notebook cell output when sharing the notebook. The default is Disabled. Valid values are Allowed and Disabled. + NotebookOutputOption *string `json:"notebookOutputOption,omitempty" tf:"notebook_output_option,omitempty"` + + // When notebook_output_option is Allowed, the AWS Key Management Service (KMS) encryption key ID used to encrypt the notebook cell output in the Amazon S3 bucket. + S3KMSKeyID *string `json:"s3KmsKeyId,omitempty" tf:"s3_kms_key_id,omitempty"` + + // When notebook_output_option is Allowed, the Amazon S3 bucket used to save the notebook cell output. + S3OutputPath *string `json:"s3OutputPath,omitempty" tf:"s3_output_path,omitempty"` +} + +type UserSettingsSharingSettingsObservation struct { + + // Whether to include the notebook cell output when sharing the notebook. The default is Disabled. Valid values are Allowed and Disabled. + NotebookOutputOption *string `json:"notebookOutputOption,omitempty" tf:"notebook_output_option,omitempty"` + + // When notebook_output_option is Allowed, the AWS Key Management Service (KMS) encryption key ID used to encrypt the notebook cell output in the Amazon S3 bucket. + S3KMSKeyID *string `json:"s3KmsKeyId,omitempty" tf:"s3_kms_key_id,omitempty"` + + // When notebook_output_option is Allowed, the Amazon S3 bucket used to save the notebook cell output. + S3OutputPath *string `json:"s3OutputPath,omitempty" tf:"s3_output_path,omitempty"` +} + +type UserSettingsSharingSettingsParameters struct { + + // Whether to include the notebook cell output when sharing the notebook. The default is Disabled. Valid values are Allowed and Disabled. + // +kubebuilder:validation:Optional + NotebookOutputOption *string `json:"notebookOutputOption,omitempty" tf:"notebook_output_option,omitempty"` + + // When notebook_output_option is Allowed, the AWS Key Management Service (KMS) encryption key ID used to encrypt the notebook cell output in the Amazon S3 bucket. + // +kubebuilder:validation:Optional + S3KMSKeyID *string `json:"s3KmsKeyId,omitempty" tf:"s3_kms_key_id,omitempty"` + + // When notebook_output_option is Allowed, the Amazon S3 bucket used to save the notebook cell output. + // +kubebuilder:validation:Optional + S3OutputPath *string `json:"s3OutputPath,omitempty" tf:"s3_output_path,omitempty"` +} + +type UserSettingsSpaceStorageSettingsInitParameters struct { + + // The default EBS storage settings for a private space. See Default EBS Storage Settings below. + DefaultEBSStorageSettings *SpaceStorageSettingsDefaultEBSStorageSettingsInitParameters `json:"defaultEbsStorageSettings,omitempty" tf:"default_ebs_storage_settings,omitempty"` +} + +type UserSettingsSpaceStorageSettingsObservation struct { + + // The default EBS storage settings for a private space. See Default EBS Storage Settings below. + DefaultEBSStorageSettings *SpaceStorageSettingsDefaultEBSStorageSettingsObservation `json:"defaultEbsStorageSettings,omitempty" tf:"default_ebs_storage_settings,omitempty"` +} + +type UserSettingsSpaceStorageSettingsParameters struct { + + // The default EBS storage settings for a private space. See Default EBS Storage Settings below. + // +kubebuilder:validation:Optional + DefaultEBSStorageSettings *SpaceStorageSettingsDefaultEBSStorageSettingsParameters `json:"defaultEbsStorageSettings,omitempty" tf:"default_ebs_storage_settings,omitempty"` +} + +type UserSettingsTensorBoardAppSettingsDefaultResourceSpecInitParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsTensorBoardAppSettingsDefaultResourceSpecObservation struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsTensorBoardAppSettingsDefaultResourceSpecParameters struct { + + // The instance type that the image version runs on.. For valid values see SageMaker Instance Types. + // +kubebuilder:validation:Optional + InstanceType *string `json:"instanceType,omitempty" tf:"instance_type,omitempty"` + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. + // +kubebuilder:validation:Optional + LifecycleConfigArn *string `json:"lifecycleConfigArn,omitempty" tf:"lifecycle_config_arn,omitempty"` + + // The ARN of the SageMaker image that the image version belongs to. + // +kubebuilder:validation:Optional + SagemakerImageArn *string `json:"sagemakerImageArn,omitempty" tf:"sagemaker_image_arn,omitempty"` + + // The SageMaker Image Version Alias. + // +kubebuilder:validation:Optional + SagemakerImageVersionAlias *string `json:"sagemakerImageVersionAlias,omitempty" tf:"sagemaker_image_version_alias,omitempty"` + + // The ARN of the image version created on the instance. + // +kubebuilder:validation:Optional + SagemakerImageVersionArn *string `json:"sagemakerImageVersionArn,omitempty" tf:"sagemaker_image_version_arn,omitempty"` +} + +type UserSettingsTensorBoardAppSettingsInitParameters struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *UserSettingsTensorBoardAppSettingsDefaultResourceSpecInitParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` +} + +type UserSettingsTensorBoardAppSettingsObservation struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + DefaultResourceSpec *UserSettingsTensorBoardAppSettingsDefaultResourceSpecObservation `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` +} + +type UserSettingsTensorBoardAppSettingsParameters struct { + + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see Default Resource Spec below. + // +kubebuilder:validation:Optional + DefaultResourceSpec *UserSettingsTensorBoardAppSettingsDefaultResourceSpecParameters `json:"defaultResourceSpec,omitempty" tf:"default_resource_spec,omitempty"` +} + +// UserProfileSpec defines the desired state of UserProfile +type UserProfileSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider UserProfileParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider UserProfileInitParameters `json:"initProvider,omitempty"` +} + +// UserProfileStatus defines the observed state of UserProfile. +type UserProfileStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider UserProfileObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// UserProfile is the Schema for the UserProfiles API. Provides a SageMaker User Profile resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type UserProfile struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.userProfileName) || (has(self.initProvider) && has(self.initProvider.userProfileName))",message="spec.forProvider.userProfileName is a required parameter" + Spec UserProfileSpec `json:"spec"` + Status UserProfileStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// UserProfileList contains a list of UserProfiles +type UserProfileList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []UserProfile `json:"items"` +} + +// Repository type metadata. +var ( + UserProfile_Kind = "UserProfile" + UserProfile_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: UserProfile_Kind}.String() + UserProfile_KindAPIVersion = UserProfile_Kind + "." + CRDGroupVersion.String() + UserProfile_GroupVersionKind = CRDGroupVersion.WithKind(UserProfile_Kind) +) + +func init() { + SchemeBuilder.Register(&UserProfile{}, &UserProfileList{}) +} diff --git a/apis/sagemaker/v1beta2/zz_workforce_terraformed.go b/apis/sagemaker/v1beta2/zz_workforce_terraformed.go new file mode 100755 index 0000000000..78ce42ae17 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_workforce_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Workforce +func (mg *Workforce) GetTerraformResourceType() string { + return "aws_sagemaker_workforce" +} + +// GetConnectionDetailsMapping for this Workforce +func (tr *Workforce) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"oidc_config[*].client_secret": "oidcConfig[*].clientSecretSecretRef"} +} + +// GetObservation of this Workforce +func (tr *Workforce) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Workforce +func (tr *Workforce) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Workforce +func (tr *Workforce) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Workforce +func (tr *Workforce) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Workforce +func (tr *Workforce) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Workforce +func (tr *Workforce) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Workforce +func (tr *Workforce) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Workforce using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Workforce) LateInitialize(attrs []byte) (bool, error) { + params := &WorkforceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("SourceIPConfig")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Workforce) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sagemaker/v1beta2/zz_workforce_types.go b/apis/sagemaker/v1beta2/zz_workforce_types.go new file mode 100755 index 0000000000..589db55431 --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_workforce_types.go @@ -0,0 +1,362 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CognitoConfigInitParameters struct { + + // The client ID for your Amazon Cognito user pool. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPoolClient + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Reference to a UserPoolClient in cognitoidp to populate clientId. + // +kubebuilder:validation:Optional + ClientIDRef *v1.Reference `json:"clientIdRef,omitempty" tf:"-"` + + // Selector for a UserPoolClient in cognitoidp to populate clientId. + // +kubebuilder:validation:Optional + ClientIDSelector *v1.Selector `json:"clientIdSelector,omitempty" tf:"-"` + + // ID for your Amazon Cognito user pool. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPoolDomain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("user_pool_id",false) + UserPool *string `json:"userPool,omitempty" tf:"user_pool,omitempty"` + + // Reference to a UserPoolDomain in cognitoidp to populate userPool. + // +kubebuilder:validation:Optional + UserPoolRef *v1.Reference `json:"userPoolRef,omitempty" tf:"-"` + + // Selector for a UserPoolDomain in cognitoidp to populate userPool. + // +kubebuilder:validation:Optional + UserPoolSelector *v1.Selector `json:"userPoolSelector,omitempty" tf:"-"` +} + +type CognitoConfigObservation struct { + + // The client ID for your Amazon Cognito user pool. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // ID for your Amazon Cognito user pool. + UserPool *string `json:"userPool,omitempty" tf:"user_pool,omitempty"` +} + +type CognitoConfigParameters struct { + + // The client ID for your Amazon Cognito user pool. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPoolClient + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Reference to a UserPoolClient in cognitoidp to populate clientId. + // +kubebuilder:validation:Optional + ClientIDRef *v1.Reference `json:"clientIdRef,omitempty" tf:"-"` + + // Selector for a UserPoolClient in cognitoidp to populate clientId. + // +kubebuilder:validation:Optional + ClientIDSelector *v1.Selector `json:"clientIdSelector,omitempty" tf:"-"` + + // ID for your Amazon Cognito user pool. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPoolDomain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("user_pool_id",false) + // +kubebuilder:validation:Optional + UserPool *string `json:"userPool,omitempty" tf:"user_pool,omitempty"` + + // Reference to a UserPoolDomain in cognitoidp to populate userPool. + // +kubebuilder:validation:Optional + UserPoolRef *v1.Reference `json:"userPoolRef,omitempty" tf:"-"` + + // Selector for a UserPoolDomain in cognitoidp to populate userPool. + // +kubebuilder:validation:Optional + UserPoolSelector *v1.Selector `json:"userPoolSelector,omitempty" tf:"-"` +} + +type OidcConfigInitParameters struct { + + // The OIDC IdP authorization endpoint used to configure your private workforce. + AuthorizationEndpoint *string `json:"authorizationEndpoint,omitempty" tf:"authorization_endpoint,omitempty"` + + // The client ID for your Amazon Cognito user pool. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The OIDC IdP client secret used to configure your private workforce. + ClientSecretSecretRef v1.SecretKeySelector `json:"clientSecretSecretRef" tf:"-"` + + // The OIDC IdP issuer used to configure your private workforce. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce. + JwksURI *string `json:"jwksUri,omitempty" tf:"jwks_uri,omitempty"` + + // The OIDC IdP logout endpoint used to configure your private workforce. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The OIDC IdP token endpoint used to configure your private workforce. + TokenEndpoint *string `json:"tokenEndpoint,omitempty" tf:"token_endpoint,omitempty"` + + // The OIDC IdP user information endpoint used to configure your private workforce. + UserInfoEndpoint *string `json:"userInfoEndpoint,omitempty" tf:"user_info_endpoint,omitempty"` +} + +type OidcConfigObservation struct { + + // The OIDC IdP authorization endpoint used to configure your private workforce. + AuthorizationEndpoint *string `json:"authorizationEndpoint,omitempty" tf:"authorization_endpoint,omitempty"` + + // The client ID for your Amazon Cognito user pool. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The OIDC IdP issuer used to configure your private workforce. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce. + JwksURI *string `json:"jwksUri,omitempty" tf:"jwks_uri,omitempty"` + + // The OIDC IdP logout endpoint used to configure your private workforce. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The OIDC IdP token endpoint used to configure your private workforce. + TokenEndpoint *string `json:"tokenEndpoint,omitempty" tf:"token_endpoint,omitempty"` + + // The OIDC IdP user information endpoint used to configure your private workforce. + UserInfoEndpoint *string `json:"userInfoEndpoint,omitempty" tf:"user_info_endpoint,omitempty"` +} + +type OidcConfigParameters struct { + + // The OIDC IdP authorization endpoint used to configure your private workforce. + // +kubebuilder:validation:Optional + AuthorizationEndpoint *string `json:"authorizationEndpoint" tf:"authorization_endpoint,omitempty"` + + // The client ID for your Amazon Cognito user pool. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OIDC IdP client secret used to configure your private workforce. + // +kubebuilder:validation:Optional + ClientSecretSecretRef v1.SecretKeySelector `json:"clientSecretSecretRef" tf:"-"` + + // The OIDC IdP issuer used to configure your private workforce. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer" tf:"issuer,omitempty"` + + // The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce. + // +kubebuilder:validation:Optional + JwksURI *string `json:"jwksUri" tf:"jwks_uri,omitempty"` + + // The OIDC IdP logout endpoint used to configure your private workforce. + // +kubebuilder:validation:Optional + LogoutEndpoint *string `json:"logoutEndpoint" tf:"logout_endpoint,omitempty"` + + // The OIDC IdP token endpoint used to configure your private workforce. + // +kubebuilder:validation:Optional + TokenEndpoint *string `json:"tokenEndpoint" tf:"token_endpoint,omitempty"` + + // The OIDC IdP user information endpoint used to configure your private workforce. + // +kubebuilder:validation:Optional + UserInfoEndpoint *string `json:"userInfoEndpoint" tf:"user_info_endpoint,omitempty"` +} + +type SourceIPConfigInitParameters struct { + + // A list of up to 10 CIDR values. + // +listType=set + Cidrs []*string `json:"cidrs,omitempty" tf:"cidrs,omitempty"` +} + +type SourceIPConfigObservation struct { + + // A list of up to 10 CIDR values. + // +listType=set + Cidrs []*string `json:"cidrs,omitempty" tf:"cidrs,omitempty"` +} + +type SourceIPConfigParameters struct { + + // A list of up to 10 CIDR values. + // +kubebuilder:validation:Optional + // +listType=set + Cidrs []*string `json:"cidrs" tf:"cidrs,omitempty"` +} + +type WorkforceInitParameters struct { + + // Use this parameter to configure an Amazon Cognito private workforce. A single Cognito workforce is created using and corresponds to a single Amazon Cognito user pool. Conflicts with oidc_config. see Cognito Config details below. + CognitoConfig *CognitoConfigInitParameters `json:"cognitoConfig,omitempty" tf:"cognito_config,omitempty"` + + // Use this parameter to configure a private workforce using your own OIDC Identity Provider. Conflicts with cognito_config. see OIDC Config details below. + OidcConfig *OidcConfigInitParameters `json:"oidcConfig,omitempty" tf:"oidc_config,omitempty"` + + // A list of IP address ranges Used to create an allow list of IP addresses for a private workforce. By default, a workforce isn't restricted to specific IP addresses. see Source Ip Config details below. + SourceIPConfig *SourceIPConfigInitParameters `json:"sourceIpConfig,omitempty" tf:"source_ip_config,omitempty"` + + // configure a workforce using VPC. see Workforce VPC Config details below. + WorkforceVPCConfig *WorkforceVPCConfigInitParameters `json:"workforceVpcConfig,omitempty" tf:"workforce_vpc_config,omitempty"` +} + +type WorkforceObservation struct { + + // The Amazon Resource Name (ARN) assigned by AWS to this Workforce. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Use this parameter to configure an Amazon Cognito private workforce. A single Cognito workforce is created using and corresponds to a single Amazon Cognito user pool. Conflicts with oidc_config. see Cognito Config details below. + CognitoConfig *CognitoConfigObservation `json:"cognitoConfig,omitempty" tf:"cognito_config,omitempty"` + + // The name of the Workforce. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Use this parameter to configure a private workforce using your own OIDC Identity Provider. Conflicts with cognito_config. see OIDC Config details below. + OidcConfig *OidcConfigObservation `json:"oidcConfig,omitempty" tf:"oidc_config,omitempty"` + + // A list of IP address ranges Used to create an allow list of IP addresses for a private workforce. By default, a workforce isn't restricted to specific IP addresses. see Source Ip Config details below. + SourceIPConfig *SourceIPConfigObservation `json:"sourceIpConfig,omitempty" tf:"source_ip_config,omitempty"` + + // The subdomain for your OIDC Identity Provider. + Subdomain *string `json:"subdomain,omitempty" tf:"subdomain,omitempty"` + + // configure a workforce using VPC. see Workforce VPC Config details below. + WorkforceVPCConfig *WorkforceVPCConfigObservation `json:"workforceVpcConfig,omitempty" tf:"workforce_vpc_config,omitempty"` +} + +type WorkforceParameters struct { + + // Use this parameter to configure an Amazon Cognito private workforce. A single Cognito workforce is created using and corresponds to a single Amazon Cognito user pool. Conflicts with oidc_config. see Cognito Config details below. + // +kubebuilder:validation:Optional + CognitoConfig *CognitoConfigParameters `json:"cognitoConfig,omitempty" tf:"cognito_config,omitempty"` + + // Use this parameter to configure a private workforce using your own OIDC Identity Provider. Conflicts with cognito_config. see OIDC Config details below. + // +kubebuilder:validation:Optional + OidcConfig *OidcConfigParameters `json:"oidcConfig,omitempty" tf:"oidc_config,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // A list of IP address ranges Used to create an allow list of IP addresses for a private workforce. By default, a workforce isn't restricted to specific IP addresses. see Source Ip Config details below. + // +kubebuilder:validation:Optional + SourceIPConfig *SourceIPConfigParameters `json:"sourceIpConfig,omitempty" tf:"source_ip_config,omitempty"` + + // configure a workforce using VPC. see Workforce VPC Config details below. + // +kubebuilder:validation:Optional + WorkforceVPCConfig *WorkforceVPCConfigParameters `json:"workforceVpcConfig,omitempty" tf:"workforce_vpc_config,omitempty"` +} + +type WorkforceVPCConfigInitParameters struct { + + // The VPC security group IDs. The security groups must be for the same VPC as specified in the subnet. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The ID of the subnets in the VPC that you want to connect. + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` + + // The ID of the VPC that the workforce uses for communication. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type WorkforceVPCConfigObservation struct { + + // The VPC security group IDs. The security groups must be for the same VPC as specified in the subnet. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The ID of the subnets in the VPC that you want to connect. + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` + + // The IDs for the VPC service endpoints of your VPC workforce. + VPCEndpointID *string `json:"vpcEndpointId,omitempty" tf:"vpc_endpoint_id,omitempty"` + + // The ID of the VPC that the workforce uses for communication. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type WorkforceVPCConfigParameters struct { + + // The VPC security group IDs. The security groups must be for the same VPC as specified in the subnet. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The ID of the subnets in the VPC that you want to connect. + // +kubebuilder:validation:Optional + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` + + // The ID of the VPC that the workforce uses for communication. + // +kubebuilder:validation:Optional + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +// WorkforceSpec defines the desired state of Workforce +type WorkforceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WorkforceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WorkforceInitParameters `json:"initProvider,omitempty"` +} + +// WorkforceStatus defines the observed state of Workforce. +type WorkforceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WorkforceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Workforce is the Schema for the Workforces API. Provides a SageMaker Workforce resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Workforce struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec WorkforceSpec `json:"spec"` + Status WorkforceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WorkforceList contains a list of Workforces +type WorkforceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Workforce `json:"items"` +} + +// Repository type metadata. +var ( + Workforce_Kind = "Workforce" + Workforce_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Workforce_Kind}.String() + Workforce_KindAPIVersion = Workforce_Kind + "." + CRDGroupVersion.String() + Workforce_GroupVersionKind = CRDGroupVersion.WithKind(Workforce_Kind) +) + +func init() { + SchemeBuilder.Register(&Workforce{}, &WorkforceList{}) +} diff --git a/apis/sagemaker/v1beta2/zz_workteam_terraformed.go b/apis/sagemaker/v1beta2/zz_workteam_terraformed.go new file mode 100755 index 0000000000..6e7d2536ab --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_workteam_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Workteam +func (mg *Workteam) GetTerraformResourceType() string { + return "aws_sagemaker_workteam" +} + +// GetConnectionDetailsMapping for this Workteam +func (tr *Workteam) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Workteam +func (tr *Workteam) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Workteam +func (tr *Workteam) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Workteam +func (tr *Workteam) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Workteam +func (tr *Workteam) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Workteam +func (tr *Workteam) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Workteam +func (tr *Workteam) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Workteam +func (tr *Workteam) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Workteam using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Workteam) LateInitialize(attrs []byte) (bool, error) { + params := &WorkteamParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Workteam) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sagemaker/v1beta2/zz_workteam_types.go b/apis/sagemaker/v1beta2/zz_workteam_types.go new file mode 100755 index 0000000000..5217c6608b --- /dev/null +++ b/apis/sagemaker/v1beta2/zz_workteam_types.go @@ -0,0 +1,345 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CognitoMemberDefinitionInitParameters struct { + + // An identifier for an application client. You must create the app client ID using Amazon Cognito. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPoolClient + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Reference to a UserPoolClient in cognitoidp to populate clientId. + // +kubebuilder:validation:Optional + ClientIDRef *v1.Reference `json:"clientIdRef,omitempty" tf:"-"` + + // Selector for a UserPoolClient in cognitoidp to populate clientId. + // +kubebuilder:validation:Optional + ClientIDSelector *v1.Selector `json:"clientIdSelector,omitempty" tf:"-"` + + // An identifier for a user group. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + UserGroup *string `json:"userGroup,omitempty" tf:"user_group,omitempty"` + + // Reference to a UserGroup in cognitoidp to populate userGroup. + // +kubebuilder:validation:Optional + UserGroupRef *v1.Reference `json:"userGroupRef,omitempty" tf:"-"` + + // Selector for a UserGroup in cognitoidp to populate userGroup. + // +kubebuilder:validation:Optional + UserGroupSelector *v1.Selector `json:"userGroupSelector,omitempty" tf:"-"` + + // An identifier for a user pool. The user pool must be in the same region as the service that you are calling. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPoolDomain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("user_pool_id",false) + UserPool *string `json:"userPool,omitempty" tf:"user_pool,omitempty"` + + // Reference to a UserPoolDomain in cognitoidp to populate userPool. + // +kubebuilder:validation:Optional + UserPoolRef *v1.Reference `json:"userPoolRef,omitempty" tf:"-"` + + // Selector for a UserPoolDomain in cognitoidp to populate userPool. + // +kubebuilder:validation:Optional + UserPoolSelector *v1.Selector `json:"userPoolSelector,omitempty" tf:"-"` +} + +type CognitoMemberDefinitionObservation struct { + + // An identifier for an application client. You must create the app client ID using Amazon Cognito. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // An identifier for a user group. + UserGroup *string `json:"userGroup,omitempty" tf:"user_group,omitempty"` + + // An identifier for a user pool. The user pool must be in the same region as the service that you are calling. + UserPool *string `json:"userPool,omitempty" tf:"user_pool,omitempty"` +} + +type CognitoMemberDefinitionParameters struct { + + // An identifier for an application client. You must create the app client ID using Amazon Cognito. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPoolClient + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Reference to a UserPoolClient in cognitoidp to populate clientId. + // +kubebuilder:validation:Optional + ClientIDRef *v1.Reference `json:"clientIdRef,omitempty" tf:"-"` + + // Selector for a UserPoolClient in cognitoidp to populate clientId. + // +kubebuilder:validation:Optional + ClientIDSelector *v1.Selector `json:"clientIdSelector,omitempty" tf:"-"` + + // An identifier for a user group. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + UserGroup *string `json:"userGroup,omitempty" tf:"user_group,omitempty"` + + // Reference to a UserGroup in cognitoidp to populate userGroup. + // +kubebuilder:validation:Optional + UserGroupRef *v1.Reference `json:"userGroupRef,omitempty" tf:"-"` + + // Selector for a UserGroup in cognitoidp to populate userGroup. + // +kubebuilder:validation:Optional + UserGroupSelector *v1.Selector `json:"userGroupSelector,omitempty" tf:"-"` + + // An identifier for a user pool. The user pool must be in the same region as the service that you are calling. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/cognitoidp/v1beta1.UserPoolDomain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("user_pool_id",false) + // +kubebuilder:validation:Optional + UserPool *string `json:"userPool,omitempty" tf:"user_pool,omitempty"` + + // Reference to a UserPoolDomain in cognitoidp to populate userPool. + // +kubebuilder:validation:Optional + UserPoolRef *v1.Reference `json:"userPoolRef,omitempty" tf:"-"` + + // Selector for a UserPoolDomain in cognitoidp to populate userPool. + // +kubebuilder:validation:Optional + UserPoolSelector *v1.Selector `json:"userPoolSelector,omitempty" tf:"-"` +} + +type MemberDefinitionInitParameters struct { + + // The Amazon Cognito user group that is part of the work team. See Cognito Member Definition details below. + CognitoMemberDefinition *CognitoMemberDefinitionInitParameters `json:"cognitoMemberDefinition,omitempty" tf:"cognito_member_definition,omitempty"` + + // A list user groups that exist in your OIDC Identity Provider (IdP). One to ten groups can be used to create a single private work team. See Cognito Member Definition details below. + OidcMemberDefinition *OidcMemberDefinitionInitParameters `json:"oidcMemberDefinition,omitempty" tf:"oidc_member_definition,omitempty"` +} + +type MemberDefinitionObservation struct { + + // The Amazon Cognito user group that is part of the work team. See Cognito Member Definition details below. + CognitoMemberDefinition *CognitoMemberDefinitionObservation `json:"cognitoMemberDefinition,omitempty" tf:"cognito_member_definition,omitempty"` + + // A list user groups that exist in your OIDC Identity Provider (IdP). One to ten groups can be used to create a single private work team. See Cognito Member Definition details below. + OidcMemberDefinition *OidcMemberDefinitionObservation `json:"oidcMemberDefinition,omitempty" tf:"oidc_member_definition,omitempty"` +} + +type MemberDefinitionParameters struct { + + // The Amazon Cognito user group that is part of the work team. See Cognito Member Definition details below. + // +kubebuilder:validation:Optional + CognitoMemberDefinition *CognitoMemberDefinitionParameters `json:"cognitoMemberDefinition,omitempty" tf:"cognito_member_definition,omitempty"` + + // A list user groups that exist in your OIDC Identity Provider (IdP). One to ten groups can be used to create a single private work team. See Cognito Member Definition details below. + // +kubebuilder:validation:Optional + OidcMemberDefinition *OidcMemberDefinitionParameters `json:"oidcMemberDefinition,omitempty" tf:"oidc_member_definition,omitempty"` +} + +type NotificationConfigurationInitParameters struct { + + // The ARN for the SNS topic to which notifications should be published. + NotificationTopicArn *string `json:"notificationTopicArn,omitempty" tf:"notification_topic_arn,omitempty"` +} + +type NotificationConfigurationObservation struct { + + // The ARN for the SNS topic to which notifications should be published. + NotificationTopicArn *string `json:"notificationTopicArn,omitempty" tf:"notification_topic_arn,omitempty"` +} + +type NotificationConfigurationParameters struct { + + // The ARN for the SNS topic to which notifications should be published. + // +kubebuilder:validation:Optional + NotificationTopicArn *string `json:"notificationTopicArn,omitempty" tf:"notification_topic_arn,omitempty"` +} + +type OidcMemberDefinitionInitParameters struct { + + // A list of comma separated strings that identifies user groups in your OIDC IdP. Each user group is made up of a group of private workers. + // +listType=set + Groups []*string `json:"groups,omitempty" tf:"groups,omitempty"` +} + +type OidcMemberDefinitionObservation struct { + + // A list of comma separated strings that identifies user groups in your OIDC IdP. Each user group is made up of a group of private workers. + // +listType=set + Groups []*string `json:"groups,omitempty" tf:"groups,omitempty"` +} + +type OidcMemberDefinitionParameters struct { + + // A list of comma separated strings that identifies user groups in your OIDC IdP. Each user group is made up of a group of private workers. + // +kubebuilder:validation:Optional + // +listType=set + Groups []*string `json:"groups" tf:"groups,omitempty"` +} + +type WorkteamInitParameters struct { + + // A description of the work team. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A list of Member Definitions that contains objects that identify the workers that make up the work team. Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). For private workforces created using Amazon Cognito use cognito_member_definition. For workforces created using your own OIDC identity provider (IdP) use oidc_member_definition. Do not provide input for both of these parameters in a single request. see Member Definition details below. + MemberDefinition []MemberDefinitionInitParameters `json:"memberDefinition,omitempty" tf:"member_definition,omitempty"` + + // Configures notification of workers regarding available or expiring work items. see Notification Configuration details below. + NotificationConfiguration *NotificationConfigurationInitParameters `json:"notificationConfiguration,omitempty" tf:"notification_configuration,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The name of the Workteam (must be unique). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.Workforce + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + WorkforceName *string `json:"workforceName,omitempty" tf:"workforce_name,omitempty"` + + // Reference to a Workforce in sagemaker to populate workforceName. + // +kubebuilder:validation:Optional + WorkforceNameRef *v1.Reference `json:"workforceNameRef,omitempty" tf:"-"` + + // Selector for a Workforce in sagemaker to populate workforceName. + // +kubebuilder:validation:Optional + WorkforceNameSelector *v1.Selector `json:"workforceNameSelector,omitempty" tf:"-"` +} + +type WorkteamObservation struct { + + // The Amazon Resource Name (ARN) assigned by AWS to this Workteam. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A description of the work team. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the Workteam. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A list of Member Definitions that contains objects that identify the workers that make up the work team. Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). For private workforces created using Amazon Cognito use cognito_member_definition. For workforces created using your own OIDC identity provider (IdP) use oidc_member_definition. Do not provide input for both of these parameters in a single request. see Member Definition details below. + MemberDefinition []MemberDefinitionObservation `json:"memberDefinition,omitempty" tf:"member_definition,omitempty"` + + // Configures notification of workers regarding available or expiring work items. see Notification Configuration details below. + NotificationConfiguration *NotificationConfigurationObservation `json:"notificationConfiguration,omitempty" tf:"notification_configuration,omitempty"` + + // The subdomain for your OIDC Identity Provider. + Subdomain *string `json:"subdomain,omitempty" tf:"subdomain,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The name of the Workteam (must be unique). + WorkforceName *string `json:"workforceName,omitempty" tf:"workforce_name,omitempty"` +} + +type WorkteamParameters struct { + + // A description of the work team. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A list of Member Definitions that contains objects that identify the workers that make up the work team. Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). For private workforces created using Amazon Cognito use cognito_member_definition. For workforces created using your own OIDC identity provider (IdP) use oidc_member_definition. Do not provide input for both of these parameters in a single request. see Member Definition details below. + // +kubebuilder:validation:Optional + MemberDefinition []MemberDefinitionParameters `json:"memberDefinition,omitempty" tf:"member_definition,omitempty"` + + // Configures notification of workers regarding available or expiring work items. see Notification Configuration details below. + // +kubebuilder:validation:Optional + NotificationConfiguration *NotificationConfigurationParameters `json:"notificationConfiguration,omitempty" tf:"notification_configuration,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The name of the Workteam (must be unique). + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sagemaker/v1beta2.Workforce + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + WorkforceName *string `json:"workforceName,omitempty" tf:"workforce_name,omitempty"` + + // Reference to a Workforce in sagemaker to populate workforceName. + // +kubebuilder:validation:Optional + WorkforceNameRef *v1.Reference `json:"workforceNameRef,omitempty" tf:"-"` + + // Selector for a Workforce in sagemaker to populate workforceName. + // +kubebuilder:validation:Optional + WorkforceNameSelector *v1.Selector `json:"workforceNameSelector,omitempty" tf:"-"` +} + +// WorkteamSpec defines the desired state of Workteam +type WorkteamSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WorkteamParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WorkteamInitParameters `json:"initProvider,omitempty"` +} + +// WorkteamStatus defines the observed state of Workteam. +type WorkteamStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WorkteamObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Workteam is the Schema for the Workteams API. Provides a SageMaker Workteam resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Workteam struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.description) || (has(self.initProvider) && has(self.initProvider.description))",message="spec.forProvider.description is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.memberDefinition) || (has(self.initProvider) && has(self.initProvider.memberDefinition))",message="spec.forProvider.memberDefinition is a required parameter" + Spec WorkteamSpec `json:"spec"` + Status WorkteamStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WorkteamList contains a list of Workteams +type WorkteamList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Workteam `json:"items"` +} + +// Repository type metadata. +var ( + Workteam_Kind = "Workteam" + Workteam_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Workteam_Kind}.String() + Workteam_KindAPIVersion = Workteam_Kind + "." + CRDGroupVersion.String() + Workteam_GroupVersionKind = CRDGroupVersion.WithKind(Workteam_Kind) +) + +func init() { + SchemeBuilder.Register(&Workteam{}, &WorkteamList{}) +} diff --git a/apis/scheduler/v1beta1/zz_generated.conversion_hubs.go b/apis/scheduler/v1beta1/zz_generated.conversion_hubs.go index 4e69dd9a89..cd733fb66e 100755 --- a/apis/scheduler/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/scheduler/v1beta1/zz_generated.conversion_hubs.go @@ -6,8 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Schedule) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ScheduleGroup) Hub() {} diff --git a/apis/scheduler/v1beta1/zz_generated.conversion_spokes.go b/apis/scheduler/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..d235698ff4 --- /dev/null +++ b/apis/scheduler/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Schedule to the hub type. +func (tr *Schedule) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Schedule type. +func (tr *Schedule) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/scheduler/v1beta2/zz_generated.conversion_hubs.go b/apis/scheduler/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..1c9fc4967c --- /dev/null +++ b/apis/scheduler/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Schedule) Hub() {} diff --git a/apis/scheduler/v1beta2/zz_generated.deepcopy.go b/apis/scheduler/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..8d97f894a7 --- /dev/null +++ b/apis/scheduler/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1806 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityProviderStrategyInitParameters) DeepCopyInto(out *CapacityProviderStrategyInitParameters) { + *out = *in + if in.Base != nil { + in, out := &in.Base, &out.Base + *out = new(float64) + **out = **in + } + if in.CapacityProvider != nil { + in, out := &in.CapacityProvider, &out.CapacityProvider + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityProviderStrategyInitParameters. +func (in *CapacityProviderStrategyInitParameters) DeepCopy() *CapacityProviderStrategyInitParameters { + if in == nil { + return nil + } + out := new(CapacityProviderStrategyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityProviderStrategyObservation) DeepCopyInto(out *CapacityProviderStrategyObservation) { + *out = *in + if in.Base != nil { + in, out := &in.Base, &out.Base + *out = new(float64) + **out = **in + } + if in.CapacityProvider != nil { + in, out := &in.CapacityProvider, &out.CapacityProvider + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityProviderStrategyObservation. +func (in *CapacityProviderStrategyObservation) DeepCopy() *CapacityProviderStrategyObservation { + if in == nil { + return nil + } + out := new(CapacityProviderStrategyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityProviderStrategyParameters) DeepCopyInto(out *CapacityProviderStrategyParameters) { + *out = *in + if in.Base != nil { + in, out := &in.Base, &out.Base + *out = new(float64) + **out = **in + } + if in.CapacityProvider != nil { + in, out := &in.CapacityProvider, &out.CapacityProvider + *out = new(string) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityProviderStrategyParameters. +func (in *CapacityProviderStrategyParameters) DeepCopy() *CapacityProviderStrategyParameters { + if in == nil { + return nil + } + out := new(CapacityProviderStrategyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeadLetterConfigInitParameters) DeepCopyInto(out *DeadLetterConfigInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeadLetterConfigInitParameters. +func (in *DeadLetterConfigInitParameters) DeepCopy() *DeadLetterConfigInitParameters { + if in == nil { + return nil + } + out := new(DeadLetterConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeadLetterConfigObservation) DeepCopyInto(out *DeadLetterConfigObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeadLetterConfigObservation. +func (in *DeadLetterConfigObservation) DeepCopy() *DeadLetterConfigObservation { + if in == nil { + return nil + } + out := new(DeadLetterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeadLetterConfigParameters) DeepCopyInto(out *DeadLetterConfigParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeadLetterConfigParameters. +func (in *DeadLetterConfigParameters) DeepCopy() *DeadLetterConfigParameters { + if in == nil { + return nil + } + out := new(DeadLetterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsParametersInitParameters) DeepCopyInto(out *EcsParametersInitParameters) { + *out = *in + if in.CapacityProviderStrategy != nil { + in, out := &in.CapacityProviderStrategy, &out.CapacityProviderStrategy + *out = make([]CapacityProviderStrategyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableEcsManagedTags != nil { + in, out := &in.EnableEcsManagedTags, &out.EnableEcsManagedTags + *out = new(bool) + **out = **in + } + if in.EnableExecuteCommand != nil { + in, out := &in.EnableExecuteCommand, &out.EnableExecuteCommand + *out = new(bool) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(string) + **out = **in + } + if in.LaunchType != nil { + in, out := &in.LaunchType, &out.LaunchType + *out = new(string) + **out = **in + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = new(NetworkConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PlacementConstraints != nil { + in, out := &in.PlacementConstraints, &out.PlacementConstraints + *out = make([]PlacementConstraintsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementStrategy != nil { + in, out := &in.PlacementStrategy, &out.PlacementStrategy + *out = make([]PlacementStrategyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformVersion != nil { + in, out := &in.PlatformVersion, &out.PlatformVersion + *out = new(string) + **out = **in + } + if in.PropagateTags != nil { + in, out := &in.PropagateTags, &out.PropagateTags + *out = new(string) + **out = **in + } + if in.ReferenceID != nil { + in, out := &in.ReferenceID, &out.ReferenceID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskCount != nil { + in, out := &in.TaskCount, &out.TaskCount + *out = new(float64) + **out = **in + } + if in.TaskDefinitionArn != nil { + in, out := &in.TaskDefinitionArn, &out.TaskDefinitionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsParametersInitParameters. +func (in *EcsParametersInitParameters) DeepCopy() *EcsParametersInitParameters { + if in == nil { + return nil + } + out := new(EcsParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsParametersObservation) DeepCopyInto(out *EcsParametersObservation) { + *out = *in + if in.CapacityProviderStrategy != nil { + in, out := &in.CapacityProviderStrategy, &out.CapacityProviderStrategy + *out = make([]CapacityProviderStrategyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableEcsManagedTags != nil { + in, out := &in.EnableEcsManagedTags, &out.EnableEcsManagedTags + *out = new(bool) + **out = **in + } + if in.EnableExecuteCommand != nil { + in, out := &in.EnableExecuteCommand, &out.EnableExecuteCommand + *out = new(bool) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(string) + **out = **in + } + if in.LaunchType != nil { + in, out := &in.LaunchType, &out.LaunchType + *out = new(string) + **out = **in + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = new(NetworkConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.PlacementConstraints != nil { + in, out := &in.PlacementConstraints, &out.PlacementConstraints + *out = make([]PlacementConstraintsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementStrategy != nil { + in, out := &in.PlacementStrategy, &out.PlacementStrategy + *out = make([]PlacementStrategyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformVersion != nil { + in, out := &in.PlatformVersion, &out.PlatformVersion + *out = new(string) + **out = **in + } + if in.PropagateTags != nil { + in, out := &in.PropagateTags, &out.PropagateTags + *out = new(string) + **out = **in + } + if in.ReferenceID != nil { + in, out := &in.ReferenceID, &out.ReferenceID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskCount != nil { + in, out := &in.TaskCount, &out.TaskCount + *out = new(float64) + **out = **in + } + if in.TaskDefinitionArn != nil { + in, out := &in.TaskDefinitionArn, &out.TaskDefinitionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsParametersObservation. +func (in *EcsParametersObservation) DeepCopy() *EcsParametersObservation { + if in == nil { + return nil + } + out := new(EcsParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EcsParametersParameters) DeepCopyInto(out *EcsParametersParameters) { + *out = *in + if in.CapacityProviderStrategy != nil { + in, out := &in.CapacityProviderStrategy, &out.CapacityProviderStrategy + *out = make([]CapacityProviderStrategyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableEcsManagedTags != nil { + in, out := &in.EnableEcsManagedTags, &out.EnableEcsManagedTags + *out = new(bool) + **out = **in + } + if in.EnableExecuteCommand != nil { + in, out := &in.EnableExecuteCommand, &out.EnableExecuteCommand + *out = new(bool) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(string) + **out = **in + } + if in.LaunchType != nil { + in, out := &in.LaunchType, &out.LaunchType + *out = new(string) + **out = **in + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = new(NetworkConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.PlacementConstraints != nil { + in, out := &in.PlacementConstraints, &out.PlacementConstraints + *out = make([]PlacementConstraintsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementStrategy != nil { + in, out := &in.PlacementStrategy, &out.PlacementStrategy + *out = make([]PlacementStrategyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformVersion != nil { + in, out := &in.PlatformVersion, &out.PlatformVersion + *out = new(string) + **out = **in + } + if in.PropagateTags != nil { + in, out := &in.PropagateTags, &out.PropagateTags + *out = new(string) + **out = **in + } + if in.ReferenceID != nil { + in, out := &in.ReferenceID, &out.ReferenceID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskCount != nil { + in, out := &in.TaskCount, &out.TaskCount + *out = new(float64) + **out = **in + } + if in.TaskDefinitionArn != nil { + in, out := &in.TaskDefinitionArn, &out.TaskDefinitionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EcsParametersParameters. +func (in *EcsParametersParameters) DeepCopy() *EcsParametersParameters { + if in == nil { + return nil + } + out := new(EcsParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventbridgeParametersInitParameters) DeepCopyInto(out *EventbridgeParametersInitParameters) { + *out = *in + if in.DetailType != nil { + in, out := &in.DetailType, &out.DetailType + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventbridgeParametersInitParameters. +func (in *EventbridgeParametersInitParameters) DeepCopy() *EventbridgeParametersInitParameters { + if in == nil { + return nil + } + out := new(EventbridgeParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventbridgeParametersObservation) DeepCopyInto(out *EventbridgeParametersObservation) { + *out = *in + if in.DetailType != nil { + in, out := &in.DetailType, &out.DetailType + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventbridgeParametersObservation. +func (in *EventbridgeParametersObservation) DeepCopy() *EventbridgeParametersObservation { + if in == nil { + return nil + } + out := new(EventbridgeParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventbridgeParametersParameters) DeepCopyInto(out *EventbridgeParametersParameters) { + *out = *in + if in.DetailType != nil { + in, out := &in.DetailType, &out.DetailType + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventbridgeParametersParameters. +func (in *EventbridgeParametersParameters) DeepCopy() *EventbridgeParametersParameters { + if in == nil { + return nil + } + out := new(EventbridgeParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexibleTimeWindowInitParameters) DeepCopyInto(out *FlexibleTimeWindowInitParameters) { + *out = *in + if in.MaximumWindowInMinutes != nil { + in, out := &in.MaximumWindowInMinutes, &out.MaximumWindowInMinutes + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexibleTimeWindowInitParameters. +func (in *FlexibleTimeWindowInitParameters) DeepCopy() *FlexibleTimeWindowInitParameters { + if in == nil { + return nil + } + out := new(FlexibleTimeWindowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexibleTimeWindowObservation) DeepCopyInto(out *FlexibleTimeWindowObservation) { + *out = *in + if in.MaximumWindowInMinutes != nil { + in, out := &in.MaximumWindowInMinutes, &out.MaximumWindowInMinutes + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexibleTimeWindowObservation. +func (in *FlexibleTimeWindowObservation) DeepCopy() *FlexibleTimeWindowObservation { + if in == nil { + return nil + } + out := new(FlexibleTimeWindowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexibleTimeWindowParameters) DeepCopyInto(out *FlexibleTimeWindowParameters) { + *out = *in + if in.MaximumWindowInMinutes != nil { + in, out := &in.MaximumWindowInMinutes, &out.MaximumWindowInMinutes + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexibleTimeWindowParameters. +func (in *FlexibleTimeWindowParameters) DeepCopy() *FlexibleTimeWindowParameters { + if in == nil { + return nil + } + out := new(FlexibleTimeWindowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisParametersInitParameters) DeepCopyInto(out *KinesisParametersInitParameters) { + *out = *in + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisParametersInitParameters. +func (in *KinesisParametersInitParameters) DeepCopy() *KinesisParametersInitParameters { + if in == nil { + return nil + } + out := new(KinesisParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisParametersObservation) DeepCopyInto(out *KinesisParametersObservation) { + *out = *in + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisParametersObservation. +func (in *KinesisParametersObservation) DeepCopy() *KinesisParametersObservation { + if in == nil { + return nil + } + out := new(KinesisParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisParametersParameters) DeepCopyInto(out *KinesisParametersParameters) { + *out = *in + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisParametersParameters. +func (in *KinesisParametersParameters) DeepCopy() *KinesisParametersParameters { + if in == nil { + return nil + } + out := new(KinesisParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationInitParameters) DeepCopyInto(out *NetworkConfigurationInitParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationInitParameters. +func (in *NetworkConfigurationInitParameters) DeepCopy() *NetworkConfigurationInitParameters { + if in == nil { + return nil + } + out := new(NetworkConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationObservation) DeepCopyInto(out *NetworkConfigurationObservation) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationObservation. +func (in *NetworkConfigurationObservation) DeepCopy() *NetworkConfigurationObservation { + if in == nil { + return nil + } + out := new(NetworkConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationParameters) DeepCopyInto(out *NetworkConfigurationParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationParameters. +func (in *NetworkConfigurationParameters) DeepCopy() *NetworkConfigurationParameters { + if in == nil { + return nil + } + out := new(NetworkConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineParameterInitParameters) DeepCopyInto(out *PipelineParameterInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineParameterInitParameters. +func (in *PipelineParameterInitParameters) DeepCopy() *PipelineParameterInitParameters { + if in == nil { + return nil + } + out := new(PipelineParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineParameterObservation) DeepCopyInto(out *PipelineParameterObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineParameterObservation. +func (in *PipelineParameterObservation) DeepCopy() *PipelineParameterObservation { + if in == nil { + return nil + } + out := new(PipelineParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineParameterParameters) DeepCopyInto(out *PipelineParameterParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineParameterParameters. +func (in *PipelineParameterParameters) DeepCopy() *PipelineParameterParameters { + if in == nil { + return nil + } + out := new(PipelineParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementConstraintsInitParameters) DeepCopyInto(out *PlacementConstraintsInitParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementConstraintsInitParameters. +func (in *PlacementConstraintsInitParameters) DeepCopy() *PlacementConstraintsInitParameters { + if in == nil { + return nil + } + out := new(PlacementConstraintsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementConstraintsObservation) DeepCopyInto(out *PlacementConstraintsObservation) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementConstraintsObservation. +func (in *PlacementConstraintsObservation) DeepCopy() *PlacementConstraintsObservation { + if in == nil { + return nil + } + out := new(PlacementConstraintsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementConstraintsParameters) DeepCopyInto(out *PlacementConstraintsParameters) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementConstraintsParameters. +func (in *PlacementConstraintsParameters) DeepCopy() *PlacementConstraintsParameters { + if in == nil { + return nil + } + out := new(PlacementConstraintsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementStrategyInitParameters) DeepCopyInto(out *PlacementStrategyInitParameters) { + *out = *in + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementStrategyInitParameters. +func (in *PlacementStrategyInitParameters) DeepCopy() *PlacementStrategyInitParameters { + if in == nil { + return nil + } + out := new(PlacementStrategyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementStrategyObservation) DeepCopyInto(out *PlacementStrategyObservation) { + *out = *in + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementStrategyObservation. +func (in *PlacementStrategyObservation) DeepCopy() *PlacementStrategyObservation { + if in == nil { + return nil + } + out := new(PlacementStrategyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementStrategyParameters) DeepCopyInto(out *PlacementStrategyParameters) { + *out = *in + if in.Field != nil { + in, out := &in.Field, &out.Field + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementStrategyParameters. +func (in *PlacementStrategyParameters) DeepCopy() *PlacementStrategyParameters { + if in == nil { + return nil + } + out := new(PlacementStrategyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryPolicyInitParameters) DeepCopyInto(out *RetryPolicyInitParameters) { + *out = *in + if in.MaximumEventAgeInSeconds != nil { + in, out := &in.MaximumEventAgeInSeconds, &out.MaximumEventAgeInSeconds + *out = new(float64) + **out = **in + } + if in.MaximumRetryAttempts != nil { + in, out := &in.MaximumRetryAttempts, &out.MaximumRetryAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicyInitParameters. +func (in *RetryPolicyInitParameters) DeepCopy() *RetryPolicyInitParameters { + if in == nil { + return nil + } + out := new(RetryPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryPolicyObservation) DeepCopyInto(out *RetryPolicyObservation) { + *out = *in + if in.MaximumEventAgeInSeconds != nil { + in, out := &in.MaximumEventAgeInSeconds, &out.MaximumEventAgeInSeconds + *out = new(float64) + **out = **in + } + if in.MaximumRetryAttempts != nil { + in, out := &in.MaximumRetryAttempts, &out.MaximumRetryAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicyObservation. +func (in *RetryPolicyObservation) DeepCopy() *RetryPolicyObservation { + if in == nil { + return nil + } + out := new(RetryPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryPolicyParameters) DeepCopyInto(out *RetryPolicyParameters) { + *out = *in + if in.MaximumEventAgeInSeconds != nil { + in, out := &in.MaximumEventAgeInSeconds, &out.MaximumEventAgeInSeconds + *out = new(float64) + **out = **in + } + if in.MaximumRetryAttempts != nil { + in, out := &in.MaximumRetryAttempts, &out.MaximumRetryAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicyParameters. +func (in *RetryPolicyParameters) DeepCopy() *RetryPolicyParameters { + if in == nil { + return nil + } + out := new(RetryPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SagemakerPipelineParametersInitParameters) DeepCopyInto(out *SagemakerPipelineParametersInitParameters) { + *out = *in + if in.PipelineParameter != nil { + in, out := &in.PipelineParameter, &out.PipelineParameter + *out = make([]PipelineParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SagemakerPipelineParametersInitParameters. +func (in *SagemakerPipelineParametersInitParameters) DeepCopy() *SagemakerPipelineParametersInitParameters { + if in == nil { + return nil + } + out := new(SagemakerPipelineParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SagemakerPipelineParametersObservation) DeepCopyInto(out *SagemakerPipelineParametersObservation) { + *out = *in + if in.PipelineParameter != nil { + in, out := &in.PipelineParameter, &out.PipelineParameter + *out = make([]PipelineParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SagemakerPipelineParametersObservation. +func (in *SagemakerPipelineParametersObservation) DeepCopy() *SagemakerPipelineParametersObservation { + if in == nil { + return nil + } + out := new(SagemakerPipelineParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SagemakerPipelineParametersParameters) DeepCopyInto(out *SagemakerPipelineParametersParameters) { + *out = *in + if in.PipelineParameter != nil { + in, out := &in.PipelineParameter, &out.PipelineParameter + *out = make([]PipelineParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SagemakerPipelineParametersParameters. +func (in *SagemakerPipelineParametersParameters) DeepCopy() *SagemakerPipelineParametersParameters { + if in == nil { + return nil + } + out := new(SagemakerPipelineParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Schedule) DeepCopyInto(out *Schedule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Schedule. +func (in *Schedule) DeepCopy() *Schedule { + if in == nil { + return nil + } + out := new(Schedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Schedule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleInitParameters) DeepCopyInto(out *ScheduleInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EndDate != nil { + in, out := &in.EndDate, &out.EndDate + *out = new(string) + **out = **in + } + if in.FlexibleTimeWindow != nil { + in, out := &in.FlexibleTimeWindow, &out.FlexibleTimeWindow + *out = new(FlexibleTimeWindowInitParameters) + (*in).DeepCopyInto(*out) + } + if in.GroupName != nil { + in, out := &in.GroupName, &out.GroupName + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } + if in.ScheduleExpressionTimezone != nil { + in, out := &in.ScheduleExpressionTimezone, &out.ScheduleExpressionTimezone + *out = new(string) + **out = **in + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(TargetInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleInitParameters. +func (in *ScheduleInitParameters) DeepCopy() *ScheduleInitParameters { + if in == nil { + return nil + } + out := new(ScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleList) DeepCopyInto(out *ScheduleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Schedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleList. +func (in *ScheduleList) DeepCopy() *ScheduleList { + if in == nil { + return nil + } + out := new(ScheduleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScheduleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleObservation) DeepCopyInto(out *ScheduleObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EndDate != nil { + in, out := &in.EndDate, &out.EndDate + *out = new(string) + **out = **in + } + if in.FlexibleTimeWindow != nil { + in, out := &in.FlexibleTimeWindow, &out.FlexibleTimeWindow + *out = new(FlexibleTimeWindowObservation) + (*in).DeepCopyInto(*out) + } + if in.GroupName != nil { + in, out := &in.GroupName, &out.GroupName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } + if in.ScheduleExpressionTimezone != nil { + in, out := &in.ScheduleExpressionTimezone, &out.ScheduleExpressionTimezone + *out = new(string) + **out = **in + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(TargetObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleObservation. +func (in *ScheduleObservation) DeepCopy() *ScheduleObservation { + if in == nil { + return nil + } + out := new(ScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleParameters) DeepCopyInto(out *ScheduleParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EndDate != nil { + in, out := &in.EndDate, &out.EndDate + *out = new(string) + **out = **in + } + if in.FlexibleTimeWindow != nil { + in, out := &in.FlexibleTimeWindow, &out.FlexibleTimeWindow + *out = new(FlexibleTimeWindowParameters) + (*in).DeepCopyInto(*out) + } + if in.GroupName != nil { + in, out := &in.GroupName, &out.GroupName + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.KMSKeyArnRef != nil { + in, out := &in.KMSKeyArnRef, &out.KMSKeyArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArnSelector != nil { + in, out := &in.KMSKeyArnSelector, &out.KMSKeyArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } + if in.ScheduleExpressionTimezone != nil { + in, out := &in.ScheduleExpressionTimezone, &out.ScheduleExpressionTimezone + *out = new(string) + **out = **in + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(TargetParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleParameters. +func (in *ScheduleParameters) DeepCopy() *ScheduleParameters { + if in == nil { + return nil + } + out := new(ScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleSpec) DeepCopyInto(out *ScheduleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleSpec. +func (in *ScheduleSpec) DeepCopy() *ScheduleSpec { + if in == nil { + return nil + } + out := new(ScheduleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleStatus) DeepCopyInto(out *ScheduleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleStatus. +func (in *ScheduleStatus) DeepCopy() *ScheduleStatus { + if in == nil { + return nil + } + out := new(ScheduleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqsParametersInitParameters) DeepCopyInto(out *SqsParametersInitParameters) { + *out = *in + if in.MessageGroupID != nil { + in, out := &in.MessageGroupID, &out.MessageGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqsParametersInitParameters. +func (in *SqsParametersInitParameters) DeepCopy() *SqsParametersInitParameters { + if in == nil { + return nil + } + out := new(SqsParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqsParametersObservation) DeepCopyInto(out *SqsParametersObservation) { + *out = *in + if in.MessageGroupID != nil { + in, out := &in.MessageGroupID, &out.MessageGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqsParametersObservation. +func (in *SqsParametersObservation) DeepCopy() *SqsParametersObservation { + if in == nil { + return nil + } + out := new(SqsParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SqsParametersParameters) DeepCopyInto(out *SqsParametersParameters) { + *out = *in + if in.MessageGroupID != nil { + in, out := &in.MessageGroupID, &out.MessageGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SqsParametersParameters. +func (in *SqsParametersParameters) DeepCopy() *SqsParametersParameters { + if in == nil { + return nil + } + out := new(SqsParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetInitParameters) DeepCopyInto(out *TargetInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DeadLetterConfig != nil { + in, out := &in.DeadLetterConfig, &out.DeadLetterConfig + *out = new(DeadLetterConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EcsParameters != nil { + in, out := &in.EcsParameters, &out.EcsParameters + *out = new(EcsParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EventbridgeParameters != nil { + in, out := &in.EventbridgeParameters, &out.EventbridgeParameters + *out = new(EventbridgeParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Input != nil { + in, out := &in.Input, &out.Input + *out = new(string) + **out = **in + } + if in.KinesisParameters != nil { + in, out := &in.KinesisParameters, &out.KinesisParameters + *out = new(KinesisParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(RetryPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SagemakerPipelineParameters != nil { + in, out := &in.SagemakerPipelineParameters, &out.SagemakerPipelineParameters + *out = new(SagemakerPipelineParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SqsParameters != nil { + in, out := &in.SqsParameters, &out.SqsParameters + *out = new(SqsParametersInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetInitParameters. +func (in *TargetInitParameters) DeepCopy() *TargetInitParameters { + if in == nil { + return nil + } + out := new(TargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetObservation) DeepCopyInto(out *TargetObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DeadLetterConfig != nil { + in, out := &in.DeadLetterConfig, &out.DeadLetterConfig + *out = new(DeadLetterConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.EcsParameters != nil { + in, out := &in.EcsParameters, &out.EcsParameters + *out = new(EcsParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.EventbridgeParameters != nil { + in, out := &in.EventbridgeParameters, &out.EventbridgeParameters + *out = new(EventbridgeParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.Input != nil { + in, out := &in.Input, &out.Input + *out = new(string) + **out = **in + } + if in.KinesisParameters != nil { + in, out := &in.KinesisParameters, &out.KinesisParameters + *out = new(KinesisParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(RetryPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.SagemakerPipelineParameters != nil { + in, out := &in.SagemakerPipelineParameters, &out.SagemakerPipelineParameters + *out = new(SagemakerPipelineParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.SqsParameters != nil { + in, out := &in.SqsParameters, &out.SqsParameters + *out = new(SqsParametersObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetObservation. +func (in *TargetObservation) DeepCopy() *TargetObservation { + if in == nil { + return nil + } + out := new(TargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetParameters) DeepCopyInto(out *TargetParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ArnRef != nil { + in, out := &in.ArnRef, &out.ArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ArnSelector != nil { + in, out := &in.ArnSelector, &out.ArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DeadLetterConfig != nil { + in, out := &in.DeadLetterConfig, &out.DeadLetterConfig + *out = new(DeadLetterConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.EcsParameters != nil { + in, out := &in.EcsParameters, &out.EcsParameters + *out = new(EcsParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.EventbridgeParameters != nil { + in, out := &in.EventbridgeParameters, &out.EventbridgeParameters + *out = new(EventbridgeParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.Input != nil { + in, out := &in.Input, &out.Input + *out = new(string) + **out = **in + } + if in.KinesisParameters != nil { + in, out := &in.KinesisParameters, &out.KinesisParameters + *out = new(KinesisParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(RetryPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SagemakerPipelineParameters != nil { + in, out := &in.SagemakerPipelineParameters, &out.SagemakerPipelineParameters + *out = new(SagemakerPipelineParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.SqsParameters != nil { + in, out := &in.SqsParameters, &out.SqsParameters + *out = new(SqsParametersParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetParameters. +func (in *TargetParameters) DeepCopy() *TargetParameters { + if in == nil { + return nil + } + out := new(TargetParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/scheduler/v1beta2/zz_generated.managed.go b/apis/scheduler/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..eddb234f33 --- /dev/null +++ b/apis/scheduler/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Schedule. +func (mg *Schedule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Schedule. +func (mg *Schedule) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Schedule. +func (mg *Schedule) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Schedule. +func (mg *Schedule) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Schedule. +func (mg *Schedule) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Schedule. +func (mg *Schedule) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Schedule. +func (mg *Schedule) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Schedule. +func (mg *Schedule) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Schedule. +func (mg *Schedule) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Schedule. +func (mg *Schedule) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Schedule. +func (mg *Schedule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Schedule. +func (mg *Schedule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/scheduler/v1beta2/zz_generated.managedlist.go b/apis/scheduler/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..460e0bc0c2 --- /dev/null +++ b/apis/scheduler/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ScheduleList. +func (l *ScheduleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/scheduler/v1beta2/zz_generated.resolvers.go b/apis/scheduler/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..ded747d19c --- /dev/null +++ b/apis/scheduler/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,154 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Schedule. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Schedule) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKeyArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSKeyArnRef, + Selector: mg.Spec.ForProvider.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKeyArn") + } + mg.Spec.ForProvider.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyArnRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Target != nil { + { + m, l, err = apisresolver.GetManagedResource("sqs.aws.upbound.io", "v1beta1", "Queue", "QueueList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Target.Arn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Target.ArnRef, + Selector: mg.Spec.ForProvider.Target.ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Target.Arn") + } + mg.Spec.ForProvider.Target.Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Target.ArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.Target != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Target.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Target.RoleArnRef, + Selector: mg.Spec.ForProvider.Target.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Target.RoleArn") + } + mg.Spec.ForProvider.Target.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Target.RoleArnRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKeyArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSKeyArnRef, + Selector: mg.Spec.InitProvider.KMSKeyArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKeyArn") + } + mg.Spec.InitProvider.KMSKeyArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyArnRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Target != nil { + { + m, l, err = apisresolver.GetManagedResource("sqs.aws.upbound.io", "v1beta1", "Queue", "QueueList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Target.Arn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Target.ArnRef, + Selector: mg.Spec.InitProvider.Target.ArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Target.Arn") + } + mg.Spec.InitProvider.Target.Arn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Target.ArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Target != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Target.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Target.RoleArnRef, + Selector: mg.Spec.InitProvider.Target.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Target.RoleArn") + } + mg.Spec.InitProvider.Target.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Target.RoleArnRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/scheduler/v1beta2/zz_groupversion_info.go b/apis/scheduler/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..f473e86098 --- /dev/null +++ b/apis/scheduler/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=scheduler.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "scheduler.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/scheduler/v1beta2/zz_schedule_terraformed.go b/apis/scheduler/v1beta2/zz_schedule_terraformed.go new file mode 100755 index 0000000000..3fb7313c40 --- /dev/null +++ b/apis/scheduler/v1beta2/zz_schedule_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Schedule +func (mg *Schedule) GetTerraformResourceType() string { + return "aws_scheduler_schedule" +} + +// GetConnectionDetailsMapping for this Schedule +func (tr *Schedule) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Schedule +func (tr *Schedule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Schedule +func (tr *Schedule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Schedule +func (tr *Schedule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Schedule +func (tr *Schedule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Schedule +func (tr *Schedule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Schedule +func (tr *Schedule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Schedule +func (tr *Schedule) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Schedule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Schedule) LateInitialize(attrs []byte) (bool, error) { + params := &ScheduleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Schedule) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/scheduler/v1beta2/zz_schedule_types.go b/apis/scheduler/v1beta2/zz_schedule_types.go new file mode 100755 index 0000000000..b3c191ffdd --- /dev/null +++ b/apis/scheduler/v1beta2/zz_schedule_types.go @@ -0,0 +1,860 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CapacityProviderStrategyInitParameters struct { + + // How many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. Ranges from 0 (default) to 100000. + Base *float64 `json:"base,omitempty" tf:"base,omitempty"` + + // Short name of the capacity provider. + CapacityProvider *string `json:"capacityProvider,omitempty" tf:"capacity_provider,omitempty"` + + // Designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The weight value is taken into consideration after the base value, if defined, is satisfied. Ranges from from 0 to 1000. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type CapacityProviderStrategyObservation struct { + + // How many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. Ranges from 0 (default) to 100000. + Base *float64 `json:"base,omitempty" tf:"base,omitempty"` + + // Short name of the capacity provider. + CapacityProvider *string `json:"capacityProvider,omitempty" tf:"capacity_provider,omitempty"` + + // Designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The weight value is taken into consideration after the base value, if defined, is satisfied. Ranges from from 0 to 1000. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type CapacityProviderStrategyParameters struct { + + // How many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. Ranges from 0 (default) to 100000. + // +kubebuilder:validation:Optional + Base *float64 `json:"base,omitempty" tf:"base,omitempty"` + + // Short name of the capacity provider. + // +kubebuilder:validation:Optional + CapacityProvider *string `json:"capacityProvider" tf:"capacity_provider,omitempty"` + + // Designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The weight value is taken into consideration after the base value, if defined, is satisfied. Ranges from from 0 to 1000. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type DeadLetterConfigInitParameters struct { + + // ARN of the target of this schedule, such as a SQS queue or ECS cluster. For universal targets, this is a Service ARN specific to the target service. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` +} + +type DeadLetterConfigObservation struct { + + // ARN of the target of this schedule, such as a SQS queue or ECS cluster. For universal targets, this is a Service ARN specific to the target service. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` +} + +type DeadLetterConfigParameters struct { + + // ARN of the target of this schedule, such as a SQS queue or ECS cluster. For universal targets, this is a Service ARN specific to the target service. + // +kubebuilder:validation:Optional + Arn *string `json:"arn" tf:"arn,omitempty"` +} + +type EcsParametersInitParameters struct { + + // Up to 6 capacity provider strategies to use for the task. Detailed below. + CapacityProviderStrategy []CapacityProviderStrategyInitParameters `json:"capacityProviderStrategy,omitempty" tf:"capacity_provider_strategy,omitempty"` + + // Specifies whether to enable Amazon ECS managed tags for the task. For more information, see Tagging Your Amazon ECS Resources in the Amazon ECS Developer Guide. + EnableEcsManagedTags *bool `json:"enableEcsManagedTags,omitempty" tf:"enable_ecs_managed_tags,omitempty"` + + // Specifies whether to enable the execute command functionality for the containers in this task. + EnableExecuteCommand *bool `json:"enableExecuteCommand,omitempty" tf:"enable_execute_command,omitempty"` + + // Specifies an ECS task group for the task. At most 255 characters. + Group *string `json:"group,omitempty" tf:"group,omitempty"` + + // Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. One of: EC2, FARGATE, EXTERNAL. + LaunchType *string `json:"launchType,omitempty" tf:"launch_type,omitempty"` + + // Configures the networking associated with the task. Detailed below. + NetworkConfiguration *NetworkConfigurationInitParameters `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // A set of up to 10 placement constraints to use for the task. Detailed below. + PlacementConstraints []PlacementConstraintsInitParameters `json:"placementConstraints,omitempty" tf:"placement_constraints,omitempty"` + + // A set of up to 5 placement strategies. Detailed below. + PlacementStrategy []PlacementStrategyInitParameters `json:"placementStrategy,omitempty" tf:"placement_strategy,omitempty"` + + // Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0. + PlatformVersion *string `json:"platformVersion,omitempty" tf:"platform_version,omitempty"` + + // Specifies whether to propagate the tags from the task definition to the task. One of: TASK_DEFINITION. + PropagateTags *string `json:"propagateTags,omitempty" tf:"propagate_tags,omitempty"` + + // Reference ID to use for the task. + ReferenceID *string `json:"referenceId,omitempty" tf:"reference_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The number of tasks to create. Ranges from 1 (default) to 10. + TaskCount *float64 `json:"taskCount,omitempty" tf:"task_count,omitempty"` + + // ARN of the task definition to use. + TaskDefinitionArn *string `json:"taskDefinitionArn,omitempty" tf:"task_definition_arn,omitempty"` +} + +type EcsParametersObservation struct { + + // Up to 6 capacity provider strategies to use for the task. Detailed below. + CapacityProviderStrategy []CapacityProviderStrategyObservation `json:"capacityProviderStrategy,omitempty" tf:"capacity_provider_strategy,omitempty"` + + // Specifies whether to enable Amazon ECS managed tags for the task. For more information, see Tagging Your Amazon ECS Resources in the Amazon ECS Developer Guide. + EnableEcsManagedTags *bool `json:"enableEcsManagedTags,omitempty" tf:"enable_ecs_managed_tags,omitempty"` + + // Specifies whether to enable the execute command functionality for the containers in this task. + EnableExecuteCommand *bool `json:"enableExecuteCommand,omitempty" tf:"enable_execute_command,omitempty"` + + // Specifies an ECS task group for the task. At most 255 characters. + Group *string `json:"group,omitempty" tf:"group,omitempty"` + + // Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. One of: EC2, FARGATE, EXTERNAL. + LaunchType *string `json:"launchType,omitempty" tf:"launch_type,omitempty"` + + // Configures the networking associated with the task. Detailed below. + NetworkConfiguration *NetworkConfigurationObservation `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // A set of up to 10 placement constraints to use for the task. Detailed below. + PlacementConstraints []PlacementConstraintsObservation `json:"placementConstraints,omitempty" tf:"placement_constraints,omitempty"` + + // A set of up to 5 placement strategies. Detailed below. + PlacementStrategy []PlacementStrategyObservation `json:"placementStrategy,omitempty" tf:"placement_strategy,omitempty"` + + // Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0. + PlatformVersion *string `json:"platformVersion,omitempty" tf:"platform_version,omitempty"` + + // Specifies whether to propagate the tags from the task definition to the task. One of: TASK_DEFINITION. + PropagateTags *string `json:"propagateTags,omitempty" tf:"propagate_tags,omitempty"` + + // Reference ID to use for the task. + ReferenceID *string `json:"referenceId,omitempty" tf:"reference_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The number of tasks to create. Ranges from 1 (default) to 10. + TaskCount *float64 `json:"taskCount,omitempty" tf:"task_count,omitempty"` + + // ARN of the task definition to use. + TaskDefinitionArn *string `json:"taskDefinitionArn,omitempty" tf:"task_definition_arn,omitempty"` +} + +type EcsParametersParameters struct { + + // Up to 6 capacity provider strategies to use for the task. Detailed below. + // +kubebuilder:validation:Optional + CapacityProviderStrategy []CapacityProviderStrategyParameters `json:"capacityProviderStrategy,omitempty" tf:"capacity_provider_strategy,omitempty"` + + // Specifies whether to enable Amazon ECS managed tags for the task. For more information, see Tagging Your Amazon ECS Resources in the Amazon ECS Developer Guide. + // +kubebuilder:validation:Optional + EnableEcsManagedTags *bool `json:"enableEcsManagedTags,omitempty" tf:"enable_ecs_managed_tags,omitempty"` + + // Specifies whether to enable the execute command functionality for the containers in this task. + // +kubebuilder:validation:Optional + EnableExecuteCommand *bool `json:"enableExecuteCommand,omitempty" tf:"enable_execute_command,omitempty"` + + // Specifies an ECS task group for the task. At most 255 characters. + // +kubebuilder:validation:Optional + Group *string `json:"group,omitempty" tf:"group,omitempty"` + + // Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. One of: EC2, FARGATE, EXTERNAL. + // +kubebuilder:validation:Optional + LaunchType *string `json:"launchType,omitempty" tf:"launch_type,omitempty"` + + // Configures the networking associated with the task. Detailed below. + // +kubebuilder:validation:Optional + NetworkConfiguration *NetworkConfigurationParameters `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // A set of up to 10 placement constraints to use for the task. Detailed below. + // +kubebuilder:validation:Optional + PlacementConstraints []PlacementConstraintsParameters `json:"placementConstraints,omitempty" tf:"placement_constraints,omitempty"` + + // A set of up to 5 placement strategies. Detailed below. + // +kubebuilder:validation:Optional + PlacementStrategy []PlacementStrategyParameters `json:"placementStrategy,omitempty" tf:"placement_strategy,omitempty"` + + // Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0. + // +kubebuilder:validation:Optional + PlatformVersion *string `json:"platformVersion,omitempty" tf:"platform_version,omitempty"` + + // Specifies whether to propagate the tags from the task definition to the task. One of: TASK_DEFINITION. + // +kubebuilder:validation:Optional + PropagateTags *string `json:"propagateTags,omitempty" tf:"propagate_tags,omitempty"` + + // Reference ID to use for the task. + // +kubebuilder:validation:Optional + ReferenceID *string `json:"referenceId,omitempty" tf:"reference_id,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The number of tasks to create. Ranges from 1 (default) to 10. + // +kubebuilder:validation:Optional + TaskCount *float64 `json:"taskCount,omitempty" tf:"task_count,omitempty"` + + // ARN of the task definition to use. + // +kubebuilder:validation:Optional + TaskDefinitionArn *string `json:"taskDefinitionArn" tf:"task_definition_arn,omitempty"` +} + +type EventbridgeParametersInitParameters struct { + + // Free-form string used to decide what fields to expect in the event detail. Up to 128 characters. + DetailType *string `json:"detailType,omitempty" tf:"detail_type,omitempty"` + + // Source of the event. + Source *string `json:"source,omitempty" tf:"source,omitempty"` +} + +type EventbridgeParametersObservation struct { + + // Free-form string used to decide what fields to expect in the event detail. Up to 128 characters. + DetailType *string `json:"detailType,omitempty" tf:"detail_type,omitempty"` + + // Source of the event. + Source *string `json:"source,omitempty" tf:"source,omitempty"` +} + +type EventbridgeParametersParameters struct { + + // Free-form string used to decide what fields to expect in the event detail. Up to 128 characters. + // +kubebuilder:validation:Optional + DetailType *string `json:"detailType" tf:"detail_type,omitempty"` + + // Source of the event. + // +kubebuilder:validation:Optional + Source *string `json:"source" tf:"source,omitempty"` +} + +type FlexibleTimeWindowInitParameters struct { + + // Maximum time window during which a schedule can be invoked. Ranges from 1 to 1440 minutes. + MaximumWindowInMinutes *float64 `json:"maximumWindowInMinutes,omitempty" tf:"maximum_window_in_minutes,omitempty"` + + // Determines whether the schedule is invoked within a flexible time window. One of: OFF, FLEXIBLE. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type FlexibleTimeWindowObservation struct { + + // Maximum time window during which a schedule can be invoked. Ranges from 1 to 1440 minutes. + MaximumWindowInMinutes *float64 `json:"maximumWindowInMinutes,omitempty" tf:"maximum_window_in_minutes,omitempty"` + + // Determines whether the schedule is invoked within a flexible time window. One of: OFF, FLEXIBLE. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type FlexibleTimeWindowParameters struct { + + // Maximum time window during which a schedule can be invoked. Ranges from 1 to 1440 minutes. + // +kubebuilder:validation:Optional + MaximumWindowInMinutes *float64 `json:"maximumWindowInMinutes,omitempty" tf:"maximum_window_in_minutes,omitempty"` + + // Determines whether the schedule is invoked within a flexible time window. One of: OFF, FLEXIBLE. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` +} + +type KinesisParametersInitParameters struct { + + // Specifies the shard to which EventBridge Scheduler sends the event. Up to 256 characters. + PartitionKey *string `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` +} + +type KinesisParametersObservation struct { + + // Specifies the shard to which EventBridge Scheduler sends the event. Up to 256 characters. + PartitionKey *string `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` +} + +type KinesisParametersParameters struct { + + // Specifies the shard to which EventBridge Scheduler sends the event. Up to 256 characters. + // +kubebuilder:validation:Optional + PartitionKey *string `json:"partitionKey" tf:"partition_key,omitempty"` +} + +type NetworkConfigurationInitParameters struct { + + // Specifies whether the task's elastic network interface receives a public IP address. This attribute is a boolean type, where true maps to ENABLED and false to DISABLED. You can specify true only when the launch_type is set to FARGATE. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // Set of 1 to 5 Security Group ID-s to be associated with the task. These security groups must all be in the same VPC. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // Set of 1 to 16 subnets to be associated with the task. These subnets must all be in the same VPC. + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` +} + +type NetworkConfigurationObservation struct { + + // Specifies whether the task's elastic network interface receives a public IP address. This attribute is a boolean type, where true maps to ENABLED and false to DISABLED. You can specify true only when the launch_type is set to FARGATE. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // Set of 1 to 5 Security Group ID-s to be associated with the task. These security groups must all be in the same VPC. + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // Set of 1 to 16 subnets to be associated with the task. These subnets must all be in the same VPC. + // +listType=set + Subnets []*string `json:"subnets,omitempty" tf:"subnets,omitempty"` +} + +type NetworkConfigurationParameters struct { + + // Specifies whether the task's elastic network interface receives a public IP address. This attribute is a boolean type, where true maps to ENABLED and false to DISABLED. You can specify true only when the launch_type is set to FARGATE. + // +kubebuilder:validation:Optional + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // Set of 1 to 5 Security Group ID-s to be associated with the task. These security groups must all be in the same VPC. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroups []*string `json:"securityGroups,omitempty" tf:"security_groups,omitempty"` + + // Set of 1 to 16 subnets to be associated with the task. These subnets must all be in the same VPC. + // +kubebuilder:validation:Optional + // +listType=set + Subnets []*string `json:"subnets" tf:"subnets,omitempty"` +} + +type PipelineParameterInitParameters struct { + + // Name of parameter to start execution of a SageMaker Model Building Pipeline. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of parameter to start execution of a SageMaker Model Building Pipeline. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type PipelineParameterObservation struct { + + // Name of parameter to start execution of a SageMaker Model Building Pipeline. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Value of parameter to start execution of a SageMaker Model Building Pipeline. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type PipelineParameterParameters struct { + + // Name of parameter to start execution of a SageMaker Model Building Pipeline. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Value of parameter to start execution of a SageMaker Model Building Pipeline. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type PlacementConstraintsInitParameters struct { + + // A cluster query language expression to apply to the constraint. You cannot specify an expression if the constraint type is distinctInstance. For more information, see Cluster query language in the Amazon ECS Developer Guide. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // The type of placement strategy. One of: random, spread, binpack. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PlacementConstraintsObservation struct { + + // A cluster query language expression to apply to the constraint. You cannot specify an expression if the constraint type is distinctInstance. For more information, see Cluster query language in the Amazon ECS Developer Guide. + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // The type of placement strategy. One of: random, spread, binpack. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PlacementConstraintsParameters struct { + + // A cluster query language expression to apply to the constraint. You cannot specify an expression if the constraint type is distinctInstance. For more information, see Cluster query language in the Amazon ECS Developer Guide. + // +kubebuilder:validation:Optional + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + // The type of placement strategy. One of: random, spread, binpack. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type PlacementStrategyInitParameters struct { + + // The field to apply the placement strategy against. + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + // The type of placement strategy. One of: random, spread, binpack. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PlacementStrategyObservation struct { + + // The field to apply the placement strategy against. + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + // The type of placement strategy. One of: random, spread, binpack. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PlacementStrategyParameters struct { + + // The field to apply the placement strategy against. + // +kubebuilder:validation:Optional + Field *string `json:"field,omitempty" tf:"field,omitempty"` + + // The type of placement strategy. One of: random, spread, binpack. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type RetryPolicyInitParameters struct { + + // Maximum amount of time, in seconds, to continue to make retry attempts. Ranges from 60 to 86400 (default). + MaximumEventAgeInSeconds *float64 `json:"maximumEventAgeInSeconds,omitempty" tf:"maximum_event_age_in_seconds,omitempty"` + + // Maximum number of retry attempts to make before the request fails. Ranges from 0 to 185 (default). + MaximumRetryAttempts *float64 `json:"maximumRetryAttempts,omitempty" tf:"maximum_retry_attempts,omitempty"` +} + +type RetryPolicyObservation struct { + + // Maximum amount of time, in seconds, to continue to make retry attempts. Ranges from 60 to 86400 (default). + MaximumEventAgeInSeconds *float64 `json:"maximumEventAgeInSeconds,omitempty" tf:"maximum_event_age_in_seconds,omitempty"` + + // Maximum number of retry attempts to make before the request fails. Ranges from 0 to 185 (default). + MaximumRetryAttempts *float64 `json:"maximumRetryAttempts,omitempty" tf:"maximum_retry_attempts,omitempty"` +} + +type RetryPolicyParameters struct { + + // Maximum amount of time, in seconds, to continue to make retry attempts. Ranges from 60 to 86400 (default). + // +kubebuilder:validation:Optional + MaximumEventAgeInSeconds *float64 `json:"maximumEventAgeInSeconds,omitempty" tf:"maximum_event_age_in_seconds,omitempty"` + + // Maximum number of retry attempts to make before the request fails. Ranges from 0 to 185 (default). + // +kubebuilder:validation:Optional + MaximumRetryAttempts *float64 `json:"maximumRetryAttempts,omitempty" tf:"maximum_retry_attempts,omitempty"` +} + +type SagemakerPipelineParametersInitParameters struct { + + // Set of up to 200 parameter names and values to use when executing the SageMaker Model Building Pipeline. Detailed below. + PipelineParameter []PipelineParameterInitParameters `json:"pipelineParameter,omitempty" tf:"pipeline_parameter,omitempty"` +} + +type SagemakerPipelineParametersObservation struct { + + // Set of up to 200 parameter names and values to use when executing the SageMaker Model Building Pipeline. Detailed below. + PipelineParameter []PipelineParameterObservation `json:"pipelineParameter,omitempty" tf:"pipeline_parameter,omitempty"` +} + +type SagemakerPipelineParametersParameters struct { + + // Set of up to 200 parameter names and values to use when executing the SageMaker Model Building Pipeline. Detailed below. + // +kubebuilder:validation:Optional + PipelineParameter []PipelineParameterParameters `json:"pipelineParameter,omitempty" tf:"pipeline_parameter,omitempty"` +} + +type ScheduleInitParameters struct { + + // Brief description of the schedule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The date, in UTC, before which the schedule can invoke its target. Depending on the schedule's recurrence expression, invocations might stop on, or before, the end date you specify. EventBridge Scheduler ignores the end date for one-time schedules. Example: 2030-01-01T01:00:00Z. + EndDate *string `json:"endDate,omitempty" tf:"end_date,omitempty"` + + // Configures a time window during which EventBridge Scheduler invokes the schedule. Detailed below. + FlexibleTimeWindow *FlexibleTimeWindowInitParameters `json:"flexibleTimeWindow,omitempty" tf:"flexible_time_window,omitempty"` + + // Name of the schedule group to associate with this schedule. When omitted, the default schedule group is used. + GroupName *string `json:"groupName,omitempty" tf:"group_name,omitempty"` + + // ARN for the customer managed KMS key that EventBridge Scheduler will use to encrypt and decrypt your data. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` + + // Name of the schedule. Conflicts with name_prefix. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Defines when the schedule runs. Read more in Schedule types on EventBridge Scheduler. + ScheduleExpression *string `json:"scheduleExpression,omitempty" tf:"schedule_expression,omitempty"` + + // Timezone in which the scheduling expression is evaluated. Defaults to UTC. Example: Australia/Sydney. + ScheduleExpressionTimezone *string `json:"scheduleExpressionTimezone,omitempty" tf:"schedule_expression_timezone,omitempty"` + + // The date, in UTC, after which the schedule can begin invoking its target. Depending on the schedule's recurrence expression, invocations might occur on, or after, the start date you specify. EventBridge Scheduler ignores the start date for one-time schedules. Example: 2030-01-01T01:00:00Z. + StartDate *string `json:"startDate,omitempty" tf:"start_date,omitempty"` + + // Specifies whether the schedule is enabled or disabled. One of: ENABLED (default), DISABLED. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Configures the target of the schedule. Detailed below. + Target *TargetInitParameters `json:"target,omitempty" tf:"target,omitempty"` +} + +type ScheduleObservation struct { + + // ARN of the SQS queue specified as the destination for the dead-letter queue. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Brief description of the schedule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The date, in UTC, before which the schedule can invoke its target. Depending on the schedule's recurrence expression, invocations might stop on, or before, the end date you specify. EventBridge Scheduler ignores the end date for one-time schedules. Example: 2030-01-01T01:00:00Z. + EndDate *string `json:"endDate,omitempty" tf:"end_date,omitempty"` + + // Configures a time window during which EventBridge Scheduler invokes the schedule. Detailed below. + FlexibleTimeWindow *FlexibleTimeWindowObservation `json:"flexibleTimeWindow,omitempty" tf:"flexible_time_window,omitempty"` + + // Name of the schedule group to associate with this schedule. When omitted, the default schedule group is used. + GroupName *string `json:"groupName,omitempty" tf:"group_name,omitempty"` + + // Name of the schedule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ARN for the customer managed KMS key that EventBridge Scheduler will use to encrypt and decrypt your data. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Name of the schedule. Conflicts with name_prefix. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Defines when the schedule runs. Read more in Schedule types on EventBridge Scheduler. + ScheduleExpression *string `json:"scheduleExpression,omitempty" tf:"schedule_expression,omitempty"` + + // Timezone in which the scheduling expression is evaluated. Defaults to UTC. Example: Australia/Sydney. + ScheduleExpressionTimezone *string `json:"scheduleExpressionTimezone,omitempty" tf:"schedule_expression_timezone,omitempty"` + + // The date, in UTC, after which the schedule can begin invoking its target. Depending on the schedule's recurrence expression, invocations might occur on, or after, the start date you specify. EventBridge Scheduler ignores the start date for one-time schedules. Example: 2030-01-01T01:00:00Z. + StartDate *string `json:"startDate,omitempty" tf:"start_date,omitempty"` + + // Specifies whether the schedule is enabled or disabled. One of: ENABLED (default), DISABLED. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Configures the target of the schedule. Detailed below. + Target *TargetObservation `json:"target,omitempty" tf:"target,omitempty"` +} + +type ScheduleParameters struct { + + // Brief description of the schedule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The date, in UTC, before which the schedule can invoke its target. Depending on the schedule's recurrence expression, invocations might stop on, or before, the end date you specify. EventBridge Scheduler ignores the end date for one-time schedules. Example: 2030-01-01T01:00:00Z. + // +kubebuilder:validation:Optional + EndDate *string `json:"endDate,omitempty" tf:"end_date,omitempty"` + + // Configures a time window during which EventBridge Scheduler invokes the schedule. Detailed below. + // +kubebuilder:validation:Optional + FlexibleTimeWindow *FlexibleTimeWindowParameters `json:"flexibleTimeWindow,omitempty" tf:"flexible_time_window,omitempty"` + + // Name of the schedule group to associate with this schedule. When omitted, the default schedule group is used. + // +kubebuilder:validation:Optional + GroupName *string `json:"groupName,omitempty" tf:"group_name,omitempty"` + + // ARN for the customer managed KMS key that EventBridge Scheduler will use to encrypt and decrypt your data. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Reference to a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyArn. + // +kubebuilder:validation:Optional + KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"` + + // Name of the schedule. Conflicts with name_prefix. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Defines when the schedule runs. Read more in Schedule types on EventBridge Scheduler. + // +kubebuilder:validation:Optional + ScheduleExpression *string `json:"scheduleExpression,omitempty" tf:"schedule_expression,omitempty"` + + // Timezone in which the scheduling expression is evaluated. Defaults to UTC. Example: Australia/Sydney. + // +kubebuilder:validation:Optional + ScheduleExpressionTimezone *string `json:"scheduleExpressionTimezone,omitempty" tf:"schedule_expression_timezone,omitempty"` + + // The date, in UTC, after which the schedule can begin invoking its target. Depending on the schedule's recurrence expression, invocations might occur on, or after, the start date you specify. EventBridge Scheduler ignores the start date for one-time schedules. Example: 2030-01-01T01:00:00Z. + // +kubebuilder:validation:Optional + StartDate *string `json:"startDate,omitempty" tf:"start_date,omitempty"` + + // Specifies whether the schedule is enabled or disabled. One of: ENABLED (default), DISABLED. + // +kubebuilder:validation:Optional + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Configures the target of the schedule. Detailed below. + // +kubebuilder:validation:Optional + Target *TargetParameters `json:"target,omitempty" tf:"target,omitempty"` +} + +type SqsParametersInitParameters struct { + + // FIFO message group ID to use as the target. + MessageGroupID *string `json:"messageGroupId,omitempty" tf:"message_group_id,omitempty"` +} + +type SqsParametersObservation struct { + + // FIFO message group ID to use as the target. + MessageGroupID *string `json:"messageGroupId,omitempty" tf:"message_group_id,omitempty"` +} + +type SqsParametersParameters struct { + + // FIFO message group ID to use as the target. + // +kubebuilder:validation:Optional + MessageGroupID *string `json:"messageGroupId,omitempty" tf:"message_group_id,omitempty"` +} + +type TargetInitParameters struct { + + // ARN of the target of this schedule, such as a SQS queue or ECS cluster. For universal targets, this is a Service ARN specific to the target service. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sqs/v1beta1.Queue + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a Queue in sqs to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a Queue in sqs to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // Information about an Amazon SQS queue that EventBridge Scheduler uses as a dead-letter queue for your schedule. If specified, EventBridge Scheduler delivers failed events that could not be successfully delivered to a target to the queue. Detailed below. + DeadLetterConfig *DeadLetterConfigInitParameters `json:"deadLetterConfig,omitempty" tf:"dead_letter_config,omitempty"` + + // Templated target type for the Amazon ECS RunTask API operation. Detailed below. + EcsParameters *EcsParametersInitParameters `json:"ecsParameters,omitempty" tf:"ecs_parameters,omitempty"` + + // Templated target type for the EventBridge PutEvents API operation. Detailed below. + EventbridgeParameters *EventbridgeParametersInitParameters `json:"eventbridgeParameters,omitempty" tf:"eventbridge_parameters,omitempty"` + + // Text, or well-formed JSON, passed to the target. Read more in Universal target. + Input *string `json:"input,omitempty" tf:"input,omitempty"` + + // Templated target type for the Amazon Kinesis PutRecord API operation. Detailed below. + KinesisParameters *KinesisParametersInitParameters `json:"kinesisParameters,omitempty" tf:"kinesis_parameters,omitempty"` + + // Information about the retry policy settings. Detailed below. + RetryPolicy *RetryPolicyInitParameters `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // ARN of the IAM role that EventBridge Scheduler will use for this target when the schedule is invoked. Read more in Set up the execution role. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Templated target type for the Amazon SageMaker StartPipelineExecution API operation. Detailed below. + SagemakerPipelineParameters *SagemakerPipelineParametersInitParameters `json:"sagemakerPipelineParameters,omitempty" tf:"sagemaker_pipeline_parameters,omitempty"` + + // The templated target type for the Amazon SQS SendMessage API operation. Detailed below. + SqsParameters *SqsParametersInitParameters `json:"sqsParameters,omitempty" tf:"sqs_parameters,omitempty"` +} + +type TargetObservation struct { + + // ARN of the target of this schedule, such as a SQS queue or ECS cluster. For universal targets, this is a Service ARN specific to the target service. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Information about an Amazon SQS queue that EventBridge Scheduler uses as a dead-letter queue for your schedule. If specified, EventBridge Scheduler delivers failed events that could not be successfully delivered to a target to the queue. Detailed below. + DeadLetterConfig *DeadLetterConfigObservation `json:"deadLetterConfig,omitempty" tf:"dead_letter_config,omitempty"` + + // Templated target type for the Amazon ECS RunTask API operation. Detailed below. + EcsParameters *EcsParametersObservation `json:"ecsParameters,omitempty" tf:"ecs_parameters,omitempty"` + + // Templated target type for the EventBridge PutEvents API operation. Detailed below. + EventbridgeParameters *EventbridgeParametersObservation `json:"eventbridgeParameters,omitempty" tf:"eventbridge_parameters,omitempty"` + + // Text, or well-formed JSON, passed to the target. Read more in Universal target. + Input *string `json:"input,omitempty" tf:"input,omitempty"` + + // Templated target type for the Amazon Kinesis PutRecord API operation. Detailed below. + KinesisParameters *KinesisParametersObservation `json:"kinesisParameters,omitempty" tf:"kinesis_parameters,omitempty"` + + // Information about the retry policy settings. Detailed below. + RetryPolicy *RetryPolicyObservation `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // ARN of the IAM role that EventBridge Scheduler will use for this target when the schedule is invoked. Read more in Set up the execution role. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Templated target type for the Amazon SageMaker StartPipelineExecution API operation. Detailed below. + SagemakerPipelineParameters *SagemakerPipelineParametersObservation `json:"sagemakerPipelineParameters,omitempty" tf:"sagemaker_pipeline_parameters,omitempty"` + + // The templated target type for the Amazon SQS SendMessage API operation. Detailed below. + SqsParameters *SqsParametersObservation `json:"sqsParameters,omitempty" tf:"sqs_parameters,omitempty"` +} + +type TargetParameters struct { + + // ARN of the target of this schedule, such as a SQS queue or ECS cluster. For universal targets, this is a Service ARN specific to the target service. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sqs/v1beta1.Queue + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Reference to a Queue in sqs to populate arn. + // +kubebuilder:validation:Optional + ArnRef *v1.Reference `json:"arnRef,omitempty" tf:"-"` + + // Selector for a Queue in sqs to populate arn. + // +kubebuilder:validation:Optional + ArnSelector *v1.Selector `json:"arnSelector,omitempty" tf:"-"` + + // Information about an Amazon SQS queue that EventBridge Scheduler uses as a dead-letter queue for your schedule. If specified, EventBridge Scheduler delivers failed events that could not be successfully delivered to a target to the queue. Detailed below. + // +kubebuilder:validation:Optional + DeadLetterConfig *DeadLetterConfigParameters `json:"deadLetterConfig,omitempty" tf:"dead_letter_config,omitempty"` + + // Templated target type for the Amazon ECS RunTask API operation. Detailed below. + // +kubebuilder:validation:Optional + EcsParameters *EcsParametersParameters `json:"ecsParameters,omitempty" tf:"ecs_parameters,omitempty"` + + // Templated target type for the EventBridge PutEvents API operation. Detailed below. + // +kubebuilder:validation:Optional + EventbridgeParameters *EventbridgeParametersParameters `json:"eventbridgeParameters,omitempty" tf:"eventbridge_parameters,omitempty"` + + // Text, or well-formed JSON, passed to the target. Read more in Universal target. + // +kubebuilder:validation:Optional + Input *string `json:"input,omitempty" tf:"input,omitempty"` + + // Templated target type for the Amazon Kinesis PutRecord API operation. Detailed below. + // +kubebuilder:validation:Optional + KinesisParameters *KinesisParametersParameters `json:"kinesisParameters,omitempty" tf:"kinesis_parameters,omitempty"` + + // Information about the retry policy settings. Detailed below. + // +kubebuilder:validation:Optional + RetryPolicy *RetryPolicyParameters `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // ARN of the IAM role that EventBridge Scheduler will use for this target when the schedule is invoked. Read more in Set up the execution role. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Templated target type for the Amazon SageMaker StartPipelineExecution API operation. Detailed below. + // +kubebuilder:validation:Optional + SagemakerPipelineParameters *SagemakerPipelineParametersParameters `json:"sagemakerPipelineParameters,omitempty" tf:"sagemaker_pipeline_parameters,omitempty"` + + // The templated target type for the Amazon SQS SendMessage API operation. Detailed below. + // +kubebuilder:validation:Optional + SqsParameters *SqsParametersParameters `json:"sqsParameters,omitempty" tf:"sqs_parameters,omitempty"` +} + +// ScheduleSpec defines the desired state of Schedule +type ScheduleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ScheduleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ScheduleInitParameters `json:"initProvider,omitempty"` +} + +// ScheduleStatus defines the observed state of Schedule. +type ScheduleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ScheduleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Schedule is the Schema for the Schedules API. Provides an EventBridge Scheduler Schedule resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Schedule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.flexibleTimeWindow) || (has(self.initProvider) && has(self.initProvider.flexibleTimeWindow))",message="spec.forProvider.flexibleTimeWindow is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scheduleExpression) || (has(self.initProvider) && has(self.initProvider.scheduleExpression))",message="spec.forProvider.scheduleExpression is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.target) || (has(self.initProvider) && has(self.initProvider.target))",message="spec.forProvider.target is a required parameter" + Spec ScheduleSpec `json:"spec"` + Status ScheduleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ScheduleList contains a list of Schedules +type ScheduleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Schedule `json:"items"` +} + +// Repository type metadata. +var ( + Schedule_Kind = "Schedule" + Schedule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Schedule_Kind}.String() + Schedule_KindAPIVersion = Schedule_Kind + "." + CRDGroupVersion.String() + Schedule_GroupVersionKind = CRDGroupVersion.WithKind(Schedule_Kind) +) + +func init() { + SchemeBuilder.Register(&Schedule{}, &ScheduleList{}) +} diff --git a/apis/secretsmanager/v1beta1/zz_generated.conversion_hubs.go b/apis/secretsmanager/v1beta1/zz_generated.conversion_hubs.go index 45b0bc9254..0243902255 100755 --- a/apis/secretsmanager/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/secretsmanager/v1beta1/zz_generated.conversion_hubs.go @@ -12,8 +12,5 @@ func (tr *Secret) Hub() {} // Hub marks this type as a conversion hub. func (tr *SecretPolicy) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *SecretRotation) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SecretVersion) Hub() {} diff --git a/apis/secretsmanager/v1beta1/zz_generated.conversion_spokes.go b/apis/secretsmanager/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..8ad6dfd348 --- /dev/null +++ b/apis/secretsmanager/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this SecretRotation to the hub type. +func (tr *SecretRotation) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SecretRotation type. +func (tr *SecretRotation) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/secretsmanager/v1beta2/zz_generated.conversion_hubs.go b/apis/secretsmanager/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..758daf2226 --- /dev/null +++ b/apis/secretsmanager/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *SecretRotation) Hub() {} diff --git a/apis/secretsmanager/v1beta2/zz_generated.deepcopy.go b/apis/secretsmanager/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..4d75e623e8 --- /dev/null +++ b/apis/secretsmanager/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,358 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RotationRulesInitParameters) DeepCopyInto(out *RotationRulesInitParameters) { + *out = *in + if in.AutomaticallyAfterDays != nil { + in, out := &in.AutomaticallyAfterDays, &out.AutomaticallyAfterDays + *out = new(float64) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RotationRulesInitParameters. +func (in *RotationRulesInitParameters) DeepCopy() *RotationRulesInitParameters { + if in == nil { + return nil + } + out := new(RotationRulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RotationRulesObservation) DeepCopyInto(out *RotationRulesObservation) { + *out = *in + if in.AutomaticallyAfterDays != nil { + in, out := &in.AutomaticallyAfterDays, &out.AutomaticallyAfterDays + *out = new(float64) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RotationRulesObservation. +func (in *RotationRulesObservation) DeepCopy() *RotationRulesObservation { + if in == nil { + return nil + } + out := new(RotationRulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RotationRulesParameters) DeepCopyInto(out *RotationRulesParameters) { + *out = *in + if in.AutomaticallyAfterDays != nil { + in, out := &in.AutomaticallyAfterDays, &out.AutomaticallyAfterDays + *out = new(float64) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RotationRulesParameters. +func (in *RotationRulesParameters) DeepCopy() *RotationRulesParameters { + if in == nil { + return nil + } + out := new(RotationRulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretRotation) DeepCopyInto(out *SecretRotation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretRotation. +func (in *SecretRotation) DeepCopy() *SecretRotation { + if in == nil { + return nil + } + out := new(SecretRotation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretRotation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretRotationInitParameters) DeepCopyInto(out *SecretRotationInitParameters) { + *out = *in + if in.RotateImmediately != nil { + in, out := &in.RotateImmediately, &out.RotateImmediately + *out = new(bool) + **out = **in + } + if in.RotationLambdaArn != nil { + in, out := &in.RotationLambdaArn, &out.RotationLambdaArn + *out = new(string) + **out = **in + } + if in.RotationLambdaArnRef != nil { + in, out := &in.RotationLambdaArnRef, &out.RotationLambdaArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RotationLambdaArnSelector != nil { + in, out := &in.RotationLambdaArnSelector, &out.RotationLambdaArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RotationRules != nil { + in, out := &in.RotationRules, &out.RotationRules + *out = new(RotationRulesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } + if in.SecretIDRef != nil { + in, out := &in.SecretIDRef, &out.SecretIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SecretIDSelector != nil { + in, out := &in.SecretIDSelector, &out.SecretIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretRotationInitParameters. +func (in *SecretRotationInitParameters) DeepCopy() *SecretRotationInitParameters { + if in == nil { + return nil + } + out := new(SecretRotationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretRotationList) DeepCopyInto(out *SecretRotationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SecretRotation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretRotationList. +func (in *SecretRotationList) DeepCopy() *SecretRotationList { + if in == nil { + return nil + } + out := new(SecretRotationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretRotationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretRotationObservation) DeepCopyInto(out *SecretRotationObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RotateImmediately != nil { + in, out := &in.RotateImmediately, &out.RotateImmediately + *out = new(bool) + **out = **in + } + if in.RotationEnabled != nil { + in, out := &in.RotationEnabled, &out.RotationEnabled + *out = new(bool) + **out = **in + } + if in.RotationLambdaArn != nil { + in, out := &in.RotationLambdaArn, &out.RotationLambdaArn + *out = new(string) + **out = **in + } + if in.RotationRules != nil { + in, out := &in.RotationRules, &out.RotationRules + *out = new(RotationRulesObservation) + (*in).DeepCopyInto(*out) + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretRotationObservation. +func (in *SecretRotationObservation) DeepCopy() *SecretRotationObservation { + if in == nil { + return nil + } + out := new(SecretRotationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretRotationParameters) DeepCopyInto(out *SecretRotationParameters) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RotateImmediately != nil { + in, out := &in.RotateImmediately, &out.RotateImmediately + *out = new(bool) + **out = **in + } + if in.RotationLambdaArn != nil { + in, out := &in.RotationLambdaArn, &out.RotationLambdaArn + *out = new(string) + **out = **in + } + if in.RotationLambdaArnRef != nil { + in, out := &in.RotationLambdaArnRef, &out.RotationLambdaArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RotationLambdaArnSelector != nil { + in, out := &in.RotationLambdaArnSelector, &out.RotationLambdaArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RotationRules != nil { + in, out := &in.RotationRules, &out.RotationRules + *out = new(RotationRulesParameters) + (*in).DeepCopyInto(*out) + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } + if in.SecretIDRef != nil { + in, out := &in.SecretIDRef, &out.SecretIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SecretIDSelector != nil { + in, out := &in.SecretIDSelector, &out.SecretIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretRotationParameters. +func (in *SecretRotationParameters) DeepCopy() *SecretRotationParameters { + if in == nil { + return nil + } + out := new(SecretRotationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretRotationSpec) DeepCopyInto(out *SecretRotationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretRotationSpec. +func (in *SecretRotationSpec) DeepCopy() *SecretRotationSpec { + if in == nil { + return nil + } + out := new(SecretRotationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretRotationStatus) DeepCopyInto(out *SecretRotationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretRotationStatus. +func (in *SecretRotationStatus) DeepCopy() *SecretRotationStatus { + if in == nil { + return nil + } + out := new(SecretRotationStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/secretsmanager/v1beta2/zz_generated.managed.go b/apis/secretsmanager/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..6d130234ef --- /dev/null +++ b/apis/secretsmanager/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this SecretRotation. +func (mg *SecretRotation) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SecretRotation. +func (mg *SecretRotation) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SecretRotation. +func (mg *SecretRotation) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SecretRotation. +func (mg *SecretRotation) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SecretRotation. +func (mg *SecretRotation) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SecretRotation. +func (mg *SecretRotation) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SecretRotation. +func (mg *SecretRotation) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SecretRotation. +func (mg *SecretRotation) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SecretRotation. +func (mg *SecretRotation) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SecretRotation. +func (mg *SecretRotation) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SecretRotation. +func (mg *SecretRotation) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SecretRotation. +func (mg *SecretRotation) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/secretsmanager/v1beta2/zz_generated.managedlist.go b/apis/secretsmanager/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..bcfd7f63e6 --- /dev/null +++ b/apis/secretsmanager/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this SecretRotationList. +func (l *SecretRotationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/secretsmanager/v1beta2/zz_generated.resolvers.go b/apis/secretsmanager/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..c55a149d35 --- /dev/null +++ b/apis/secretsmanager/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,106 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *SecretRotation) ResolveReferences( // ResolveReferences of this SecretRotation. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RotationLambdaArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.RotationLambdaArnRef, + Selector: mg.Spec.ForProvider.RotationLambdaArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RotationLambdaArn") + } + mg.Spec.ForProvider.RotationLambdaArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RotationLambdaArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("secretsmanager.aws.upbound.io", "v1beta1", "Secret", "SecretList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SecretID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SecretIDRef, + Selector: mg.Spec.ForProvider.SecretIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecretID") + } + mg.Spec.ForProvider.SecretID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SecretIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RotationLambdaArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.RotationLambdaArnRef, + Selector: mg.Spec.InitProvider.RotationLambdaArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RotationLambdaArn") + } + mg.Spec.InitProvider.RotationLambdaArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RotationLambdaArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("secretsmanager.aws.upbound.io", "v1beta1", "Secret", "SecretList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SecretID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SecretIDRef, + Selector: mg.Spec.InitProvider.SecretIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecretID") + } + mg.Spec.InitProvider.SecretID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SecretIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/secretsmanager/v1beta2/zz_groupversion_info.go b/apis/secretsmanager/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..2657962654 --- /dev/null +++ b/apis/secretsmanager/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=secretsmanager.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "secretsmanager.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/secretsmanager/v1beta2/zz_secretrotation_terraformed.go b/apis/secretsmanager/v1beta2/zz_secretrotation_terraformed.go new file mode 100755 index 0000000000..88602f8ee5 --- /dev/null +++ b/apis/secretsmanager/v1beta2/zz_secretrotation_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SecretRotation +func (mg *SecretRotation) GetTerraformResourceType() string { + return "aws_secretsmanager_secret_rotation" +} + +// GetConnectionDetailsMapping for this SecretRotation +func (tr *SecretRotation) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SecretRotation +func (tr *SecretRotation) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SecretRotation +func (tr *SecretRotation) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SecretRotation +func (tr *SecretRotation) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SecretRotation +func (tr *SecretRotation) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SecretRotation +func (tr *SecretRotation) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SecretRotation +func (tr *SecretRotation) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SecretRotation +func (tr *SecretRotation) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SecretRotation using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SecretRotation) LateInitialize(attrs []byte) (bool, error) { + params := &SecretRotationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SecretRotation) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/secretsmanager/v1beta2/zz_secretrotation_types.go b/apis/secretsmanager/v1beta2/zz_secretrotation_types.go new file mode 100755 index 0000000000..98cc9ed7a5 --- /dev/null +++ b/apis/secretsmanager/v1beta2/zz_secretrotation_types.go @@ -0,0 +1,214 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type RotationRulesInitParameters struct { + + // Specifies the number of days between automatic scheduled rotations of the secret. Either automatically_after_days or schedule_expression must be specified. + AutomaticallyAfterDays *float64 `json:"automaticallyAfterDays,omitempty" tf:"automatically_after_days,omitempty"` + + // - The length of the rotation window in hours. For example, 3h for a three hour window. + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // A cron() or rate() expression that defines the schedule for rotating your secret. Either automatically_after_days or schedule_expression must be specified. + ScheduleExpression *string `json:"scheduleExpression,omitempty" tf:"schedule_expression,omitempty"` +} + +type RotationRulesObservation struct { + + // Specifies the number of days between automatic scheduled rotations of the secret. Either automatically_after_days or schedule_expression must be specified. + AutomaticallyAfterDays *float64 `json:"automaticallyAfterDays,omitempty" tf:"automatically_after_days,omitempty"` + + // - The length of the rotation window in hours. For example, 3h for a three hour window. + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // A cron() or rate() expression that defines the schedule for rotating your secret. Either automatically_after_days or schedule_expression must be specified. + ScheduleExpression *string `json:"scheduleExpression,omitempty" tf:"schedule_expression,omitempty"` +} + +type RotationRulesParameters struct { + + // Specifies the number of days between automatic scheduled rotations of the secret. Either automatically_after_days or schedule_expression must be specified. + // +kubebuilder:validation:Optional + AutomaticallyAfterDays *float64 `json:"automaticallyAfterDays,omitempty" tf:"automatically_after_days,omitempty"` + + // - The length of the rotation window in hours. For example, 3h for a three hour window. + // +kubebuilder:validation:Optional + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // A cron() or rate() expression that defines the schedule for rotating your secret. Either automatically_after_days or schedule_expression must be specified. + // +kubebuilder:validation:Optional + ScheduleExpression *string `json:"scheduleExpression,omitempty" tf:"schedule_expression,omitempty"` +} + +type SecretRotationInitParameters struct { + + // Specifies whether to rotate the secret immediately or wait until the next scheduled rotation window. The rotation schedule is defined in rotation_rules. For secrets that use a Lambda rotation function to rotate, if you don't immediately rotate the secret, Secrets Manager tests the rotation configuration by running the testSecret step (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html) of the Lambda rotation function. The test creates an AWSPENDING version of the secret and then removes it. Defaults to true. + RotateImmediately *bool `json:"rotateImmediately,omitempty" tf:"rotate_immediately,omitempty"` + + // Specifies the ARN of the Lambda function that can rotate the secret. Must be supplied if the secret is not managed by AWS. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RotationLambdaArn *string `json:"rotationLambdaArn,omitempty" tf:"rotation_lambda_arn,omitempty"` + + // Reference to a Function in lambda to populate rotationLambdaArn. + // +kubebuilder:validation:Optional + RotationLambdaArnRef *v1.Reference `json:"rotationLambdaArnRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate rotationLambdaArn. + // +kubebuilder:validation:Optional + RotationLambdaArnSelector *v1.Selector `json:"rotationLambdaArnSelector,omitempty" tf:"-"` + + // A structure that defines the rotation configuration for this secret. Defined below. + RotationRules *RotationRulesInitParameters `json:"rotationRules,omitempty" tf:"rotation_rules,omitempty"` + + // Specifies the secret to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret. The secret must already exist. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/secretsmanager/v1beta1.Secret + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` + + // Reference to a Secret in secretsmanager to populate secretId. + // +kubebuilder:validation:Optional + SecretIDRef *v1.Reference `json:"secretIdRef,omitempty" tf:"-"` + + // Selector for a Secret in secretsmanager to populate secretId. + // +kubebuilder:validation:Optional + SecretIDSelector *v1.Selector `json:"secretIdSelector,omitempty" tf:"-"` +} + +type SecretRotationObservation struct { + + // Amazon Resource Name (ARN) of the secret. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies whether to rotate the secret immediately or wait until the next scheduled rotation window. The rotation schedule is defined in rotation_rules. For secrets that use a Lambda rotation function to rotate, if you don't immediately rotate the secret, Secrets Manager tests the rotation configuration by running the testSecret step (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html) of the Lambda rotation function. The test creates an AWSPENDING version of the secret and then removes it. Defaults to true. + RotateImmediately *bool `json:"rotateImmediately,omitempty" tf:"rotate_immediately,omitempty"` + + // Specifies whether automatic rotation is enabled for this secret. + RotationEnabled *bool `json:"rotationEnabled,omitempty" tf:"rotation_enabled,omitempty"` + + // Specifies the ARN of the Lambda function that can rotate the secret. Must be supplied if the secret is not managed by AWS. + RotationLambdaArn *string `json:"rotationLambdaArn,omitempty" tf:"rotation_lambda_arn,omitempty"` + + // A structure that defines the rotation configuration for this secret. Defined below. + RotationRules *RotationRulesObservation `json:"rotationRules,omitempty" tf:"rotation_rules,omitempty"` + + // Specifies the secret to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret. The secret must already exist. + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` +} + +type SecretRotationParameters struct { + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Specifies whether to rotate the secret immediately or wait until the next scheduled rotation window. The rotation schedule is defined in rotation_rules. For secrets that use a Lambda rotation function to rotate, if you don't immediately rotate the secret, Secrets Manager tests the rotation configuration by running the testSecret step (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html) of the Lambda rotation function. The test creates an AWSPENDING version of the secret and then removes it. Defaults to true. + // +kubebuilder:validation:Optional + RotateImmediately *bool `json:"rotateImmediately,omitempty" tf:"rotate_immediately,omitempty"` + + // Specifies the ARN of the Lambda function that can rotate the secret. Must be supplied if the secret is not managed by AWS. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RotationLambdaArn *string `json:"rotationLambdaArn,omitempty" tf:"rotation_lambda_arn,omitempty"` + + // Reference to a Function in lambda to populate rotationLambdaArn. + // +kubebuilder:validation:Optional + RotationLambdaArnRef *v1.Reference `json:"rotationLambdaArnRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate rotationLambdaArn. + // +kubebuilder:validation:Optional + RotationLambdaArnSelector *v1.Selector `json:"rotationLambdaArnSelector,omitempty" tf:"-"` + + // A structure that defines the rotation configuration for this secret. Defined below. + // +kubebuilder:validation:Optional + RotationRules *RotationRulesParameters `json:"rotationRules,omitempty" tf:"rotation_rules,omitempty"` + + // Specifies the secret to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret. The secret must already exist. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/secretsmanager/v1beta1.Secret + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` + + // Reference to a Secret in secretsmanager to populate secretId. + // +kubebuilder:validation:Optional + SecretIDRef *v1.Reference `json:"secretIdRef,omitempty" tf:"-"` + + // Selector for a Secret in secretsmanager to populate secretId. + // +kubebuilder:validation:Optional + SecretIDSelector *v1.Selector `json:"secretIdSelector,omitempty" tf:"-"` +} + +// SecretRotationSpec defines the desired state of SecretRotation +type SecretRotationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SecretRotationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SecretRotationInitParameters `json:"initProvider,omitempty"` +} + +// SecretRotationStatus defines the observed state of SecretRotation. +type SecretRotationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SecretRotationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SecretRotation is the Schema for the SecretRotations API. Provides a resource to manage AWS Secrets Manager secret rotation +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type SecretRotation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.rotationRules) || (has(self.initProvider) && has(self.initProvider.rotationRules))",message="spec.forProvider.rotationRules is a required parameter" + Spec SecretRotationSpec `json:"spec"` + Status SecretRotationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SecretRotationList contains a list of SecretRotations +type SecretRotationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SecretRotation `json:"items"` +} + +// Repository type metadata. +var ( + SecretRotation_Kind = "SecretRotation" + SecretRotation_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SecretRotation_Kind}.String() + SecretRotation_KindAPIVersion = SecretRotation_Kind + "." + CRDGroupVersion.String() + SecretRotation_GroupVersionKind = CRDGroupVersion.WithKind(SecretRotation_Kind) +) + +func init() { + SchemeBuilder.Register(&SecretRotation{}, &SecretRotationList{}) +} diff --git a/apis/securityhub/v1beta1/zz_generated.conversion_hubs.go b/apis/securityhub/v1beta1/zz_generated.conversion_hubs.go index 8cf05da1a9..aa525b8281 100755 --- a/apis/securityhub/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/securityhub/v1beta1/zz_generated.conversion_hubs.go @@ -15,9 +15,6 @@ func (tr *ActionTarget) Hub() {} // Hub marks this type as a conversion hub. func (tr *FindingAggregator) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Insight) Hub() {} - // Hub marks this type as a conversion hub. func (tr *InviteAccepter) Hub() {} diff --git a/apis/securityhub/v1beta1/zz_generated.conversion_spokes.go b/apis/securityhub/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..aec9546b4e --- /dev/null +++ b/apis/securityhub/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Insight to the hub type. +func (tr *Insight) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Insight type. +func (tr *Insight) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/securityhub/v1beta2/zz_generated.conversion_hubs.go b/apis/securityhub/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..e1b24b02de --- /dev/null +++ b/apis/securityhub/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Insight) Hub() {} diff --git a/apis/securityhub/v1beta2/zz_generated.deepcopy.go b/apis/securityhub/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..2c0b098c24 --- /dev/null +++ b/apis/securityhub/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,9770 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsAccountIDInitParameters) DeepCopyInto(out *AwsAccountIDInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsAccountIDInitParameters. +func (in *AwsAccountIDInitParameters) DeepCopy() *AwsAccountIDInitParameters { + if in == nil { + return nil + } + out := new(AwsAccountIDInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsAccountIDObservation) DeepCopyInto(out *AwsAccountIDObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsAccountIDObservation. +func (in *AwsAccountIDObservation) DeepCopy() *AwsAccountIDObservation { + if in == nil { + return nil + } + out := new(AwsAccountIDObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsAccountIDParameters) DeepCopyInto(out *AwsAccountIDParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsAccountIDParameters. +func (in *AwsAccountIDParameters) DeepCopy() *AwsAccountIDParameters { + if in == nil { + return nil + } + out := new(AwsAccountIDParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompanyNameInitParameters) DeepCopyInto(out *CompanyNameInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompanyNameInitParameters. +func (in *CompanyNameInitParameters) DeepCopy() *CompanyNameInitParameters { + if in == nil { + return nil + } + out := new(CompanyNameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompanyNameObservation) DeepCopyInto(out *CompanyNameObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompanyNameObservation. +func (in *CompanyNameObservation) DeepCopy() *CompanyNameObservation { + if in == nil { + return nil + } + out := new(CompanyNameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompanyNameParameters) DeepCopyInto(out *CompanyNameParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompanyNameParameters. +func (in *CompanyNameParameters) DeepCopy() *CompanyNameParameters { + if in == nil { + return nil + } + out := new(CompanyNameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComplianceStatusInitParameters) DeepCopyInto(out *ComplianceStatusInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComplianceStatusInitParameters. +func (in *ComplianceStatusInitParameters) DeepCopy() *ComplianceStatusInitParameters { + if in == nil { + return nil + } + out := new(ComplianceStatusInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComplianceStatusObservation) DeepCopyInto(out *ComplianceStatusObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComplianceStatusObservation. +func (in *ComplianceStatusObservation) DeepCopy() *ComplianceStatusObservation { + if in == nil { + return nil + } + out := new(ComplianceStatusObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComplianceStatusParameters) DeepCopyInto(out *ComplianceStatusParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComplianceStatusParameters. +func (in *ComplianceStatusParameters) DeepCopy() *ComplianceStatusParameters { + if in == nil { + return nil + } + out := new(ComplianceStatusParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfidenceInitParameters) DeepCopyInto(out *ConfidenceInitParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfidenceInitParameters. +func (in *ConfidenceInitParameters) DeepCopy() *ConfidenceInitParameters { + if in == nil { + return nil + } + out := new(ConfidenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfidenceObservation) DeepCopyInto(out *ConfidenceObservation) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfidenceObservation. +func (in *ConfidenceObservation) DeepCopy() *ConfidenceObservation { + if in == nil { + return nil + } + out := new(ConfidenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfidenceParameters) DeepCopyInto(out *ConfidenceParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfidenceParameters. +func (in *ConfidenceParameters) DeepCopy() *ConfidenceParameters { + if in == nil { + return nil + } + out := new(ConfidenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreatedAtInitParameters) DeepCopyInto(out *CreatedAtInitParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(DateRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreatedAtInitParameters. +func (in *CreatedAtInitParameters) DeepCopy() *CreatedAtInitParameters { + if in == nil { + return nil + } + out := new(CreatedAtInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreatedAtObservation) DeepCopyInto(out *CreatedAtObservation) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(DateRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreatedAtObservation. +func (in *CreatedAtObservation) DeepCopy() *CreatedAtObservation { + if in == nil { + return nil + } + out := new(CreatedAtObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreatedAtParameters) DeepCopyInto(out *CreatedAtParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(DateRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreatedAtParameters. +func (in *CreatedAtParameters) DeepCopy() *CreatedAtParameters { + if in == nil { + return nil + } + out := new(CreatedAtParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriticalityInitParameters) DeepCopyInto(out *CriticalityInitParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriticalityInitParameters. +func (in *CriticalityInitParameters) DeepCopy() *CriticalityInitParameters { + if in == nil { + return nil + } + out := new(CriticalityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriticalityObservation) DeepCopyInto(out *CriticalityObservation) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriticalityObservation. +func (in *CriticalityObservation) DeepCopy() *CriticalityObservation { + if in == nil { + return nil + } + out := new(CriticalityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriticalityParameters) DeepCopyInto(out *CriticalityParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriticalityParameters. +func (in *CriticalityParameters) DeepCopy() *CriticalityParameters { + if in == nil { + return nil + } + out := new(CriticalityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DateRangeInitParameters) DeepCopyInto(out *DateRangeInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DateRangeInitParameters. +func (in *DateRangeInitParameters) DeepCopy() *DateRangeInitParameters { + if in == nil { + return nil + } + out := new(DateRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DateRangeObservation) DeepCopyInto(out *DateRangeObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DateRangeObservation. +func (in *DateRangeObservation) DeepCopy() *DateRangeObservation { + if in == nil { + return nil + } + out := new(DateRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DateRangeParameters) DeepCopyInto(out *DateRangeParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DateRangeParameters. +func (in *DateRangeParameters) DeepCopy() *DateRangeParameters { + if in == nil { + return nil + } + out := new(DateRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DescriptionInitParameters) DeepCopyInto(out *DescriptionInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DescriptionInitParameters. +func (in *DescriptionInitParameters) DeepCopy() *DescriptionInitParameters { + if in == nil { + return nil + } + out := new(DescriptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DescriptionObservation) DeepCopyInto(out *DescriptionObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DescriptionObservation. +func (in *DescriptionObservation) DeepCopy() *DescriptionObservation { + if in == nil { + return nil + } + out := new(DescriptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DescriptionParameters) DeepCopyInto(out *DescriptionParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DescriptionParameters. +func (in *DescriptionParameters) DeepCopy() *DescriptionParameters { + if in == nil { + return nil + } + out := new(DescriptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FiltersInitParameters) DeepCopyInto(out *FiltersInitParameters) { + *out = *in + if in.AwsAccountID != nil { + in, out := &in.AwsAccountID, &out.AwsAccountID + *out = make([]AwsAccountIDInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CompanyName != nil { + in, out := &in.CompanyName, &out.CompanyName + *out = make([]CompanyNameInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ComplianceStatus != nil { + in, out := &in.ComplianceStatus, &out.ComplianceStatus + *out = make([]ComplianceStatusInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Confidence != nil { + in, out := &in.Confidence, &out.Confidence + *out = make([]ConfidenceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = make([]CreatedAtInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Criticality != nil { + in, out := &in.Criticality, &out.Criticality + *out = make([]CriticalityInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = make([]DescriptionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsConfidence != nil { + in, out := &in.FindingProviderFieldsConfidence, &out.FindingProviderFieldsConfidence + *out = make([]FindingProviderFieldsConfidenceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsCriticality != nil { + in, out := &in.FindingProviderFieldsCriticality, &out.FindingProviderFieldsCriticality + *out = make([]FindingProviderFieldsCriticalityInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsRelatedFindingsID != nil { + in, out := &in.FindingProviderFieldsRelatedFindingsID, &out.FindingProviderFieldsRelatedFindingsID + *out = make([]FindingProviderFieldsRelatedFindingsIDInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsRelatedFindingsProductArn != nil { + in, out := &in.FindingProviderFieldsRelatedFindingsProductArn, &out.FindingProviderFieldsRelatedFindingsProductArn + *out = make([]FindingProviderFieldsRelatedFindingsProductArnInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsSeverityLabel != nil { + in, out := &in.FindingProviderFieldsSeverityLabel, &out.FindingProviderFieldsSeverityLabel + *out = make([]FindingProviderFieldsSeverityLabelInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsSeverityOriginal != nil { + in, out := &in.FindingProviderFieldsSeverityOriginal, &out.FindingProviderFieldsSeverityOriginal + *out = make([]FindingProviderFieldsSeverityOriginalInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsTypes != nil { + in, out := &in.FindingProviderFieldsTypes, &out.FindingProviderFieldsTypes + *out = make([]FindingProviderFieldsTypesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FirstObservedAt != nil { + in, out := &in.FirstObservedAt, &out.FirstObservedAt + *out = make([]FirstObservedAtInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GeneratorID != nil { + in, out := &in.GeneratorID, &out.GeneratorID + *out = make([]GeneratorIDInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]IDInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Keyword != nil { + in, out := &in.Keyword, &out.Keyword + *out = make([]KeywordInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LastObservedAt != nil { + in, out := &in.LastObservedAt, &out.LastObservedAt + *out = make([]LastObservedAtInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MalwareName != nil { + in, out := &in.MalwareName, &out.MalwareName + *out = make([]MalwareNameInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MalwarePath != nil { + in, out := &in.MalwarePath, &out.MalwarePath + *out = make([]MalwarePathInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MalwareState != nil { + in, out := &in.MalwareState, &out.MalwareState + *out = make([]MalwareStateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MalwareType != nil { + in, out := &in.MalwareType, &out.MalwareType + *out = make([]MalwareTypeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkDestinationDomain != nil { + in, out := &in.NetworkDestinationDomain, &out.NetworkDestinationDomain + *out = make([]NetworkDestinationDomainInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkDestinationIPv4 != nil { + in, out := &in.NetworkDestinationIPv4, &out.NetworkDestinationIPv4 + *out = make([]NetworkDestinationIPv4InitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkDestinationIPv6 != nil { + in, out := &in.NetworkDestinationIPv6, &out.NetworkDestinationIPv6 + *out = make([]NetworkDestinationIPv6InitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkDestinationPort != nil { + in, out := &in.NetworkDestinationPort, &out.NetworkDestinationPort + *out = make([]NetworkDestinationPortInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkDirection != nil { + in, out := &in.NetworkDirection, &out.NetworkDirection + *out = make([]NetworkDirectionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkProtocol != nil { + in, out := &in.NetworkProtocol, &out.NetworkProtocol + *out = make([]NetworkProtocolInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSourceDomain != nil { + in, out := &in.NetworkSourceDomain, &out.NetworkSourceDomain + *out = make([]NetworkSourceDomainInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSourceIPv4 != nil { + in, out := &in.NetworkSourceIPv4, &out.NetworkSourceIPv4 + *out = make([]NetworkSourceIPv4InitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSourceIPv6 != nil { + in, out := &in.NetworkSourceIPv6, &out.NetworkSourceIPv6 + *out = make([]NetworkSourceIPv6InitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSourceMac != nil { + in, out := &in.NetworkSourceMac, &out.NetworkSourceMac + *out = make([]NetworkSourceMacInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSourcePort != nil { + in, out := &in.NetworkSourcePort, &out.NetworkSourcePort + *out = make([]NetworkSourcePortInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NoteText != nil { + in, out := &in.NoteText, &out.NoteText + *out = make([]NoteTextInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NoteUpdatedAt != nil { + in, out := &in.NoteUpdatedAt, &out.NoteUpdatedAt + *out = make([]NoteUpdatedAtInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NoteUpdatedBy != nil { + in, out := &in.NoteUpdatedBy, &out.NoteUpdatedBy + *out = make([]NoteUpdatedByInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessLaunchedAt != nil { + in, out := &in.ProcessLaunchedAt, &out.ProcessLaunchedAt + *out = make([]ProcessLaunchedAtInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessName != nil { + in, out := &in.ProcessName, &out.ProcessName + *out = make([]ProcessNameInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessParentPid != nil { + in, out := &in.ProcessParentPid, &out.ProcessParentPid + *out = make([]ProcessParentPidInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessPath != nil { + in, out := &in.ProcessPath, &out.ProcessPath + *out = make([]ProcessPathInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessPid != nil { + in, out := &in.ProcessPid, &out.ProcessPid + *out = make([]ProcessPidInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessTerminatedAt != nil { + in, out := &in.ProcessTerminatedAt, &out.ProcessTerminatedAt + *out = make([]ProcessTerminatedAtInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProductArn != nil { + in, out := &in.ProductArn, &out.ProductArn + *out = make([]ProductArnInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProductFields != nil { + in, out := &in.ProductFields, &out.ProductFields + *out = make([]ProductFieldsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProductName != nil { + in, out := &in.ProductName, &out.ProductName + *out = make([]ProductNameInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecommendationText != nil { + in, out := &in.RecommendationText, &out.RecommendationText + *out = make([]RecommendationTextInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecordState != nil { + in, out := &in.RecordState, &out.RecordState + *out = make([]RecordStateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RelatedFindingsID != nil { + in, out := &in.RelatedFindingsID, &out.RelatedFindingsID + *out = make([]RelatedFindingsIDInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RelatedFindingsProductArn != nil { + in, out := &in.RelatedFindingsProductArn, &out.RelatedFindingsProductArn + *out = make([]RelatedFindingsProductArnInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceIAMInstanceProfileArn != nil { + in, out := &in.ResourceAwsEC2InstanceIAMInstanceProfileArn, &out.ResourceAwsEC2InstanceIAMInstanceProfileArn + *out = make([]ResourceAwsEC2InstanceIAMInstanceProfileArnInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceIPv4Addresses != nil { + in, out := &in.ResourceAwsEC2InstanceIPv4Addresses, &out.ResourceAwsEC2InstanceIPv4Addresses + *out = make([]ResourceAwsEC2InstanceIPv4AddressesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceIPv6Addresses != nil { + in, out := &in.ResourceAwsEC2InstanceIPv6Addresses, &out.ResourceAwsEC2InstanceIPv6Addresses + *out = make([]ResourceAwsEC2InstanceIPv6AddressesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceImageID != nil { + in, out := &in.ResourceAwsEC2InstanceImageID, &out.ResourceAwsEC2InstanceImageID + *out = make([]ResourceAwsEC2InstanceImageIDInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceKeyName != nil { + in, out := &in.ResourceAwsEC2InstanceKeyName, &out.ResourceAwsEC2InstanceKeyName + *out = make([]ResourceAwsEC2InstanceKeyNameInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceLaunchedAt != nil { + in, out := &in.ResourceAwsEC2InstanceLaunchedAt, &out.ResourceAwsEC2InstanceLaunchedAt + *out = make([]ResourceAwsEC2InstanceLaunchedAtInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceSubnetID != nil { + in, out := &in.ResourceAwsEC2InstanceSubnetID, &out.ResourceAwsEC2InstanceSubnetID + *out = make([]ResourceAwsEC2InstanceSubnetIDInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceType != nil { + in, out := &in.ResourceAwsEC2InstanceType, &out.ResourceAwsEC2InstanceType + *out = make([]ResourceAwsEC2InstanceTypeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceVPCID != nil { + in, out := &in.ResourceAwsEC2InstanceVPCID, &out.ResourceAwsEC2InstanceVPCID + *out = make([]ResourceAwsEC2InstanceVPCIDInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsIAMAccessKeyCreatedAt != nil { + in, out := &in.ResourceAwsIAMAccessKeyCreatedAt, &out.ResourceAwsIAMAccessKeyCreatedAt + *out = make([]ResourceAwsIAMAccessKeyCreatedAtInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsIAMAccessKeyStatus != nil { + in, out := &in.ResourceAwsIAMAccessKeyStatus, &out.ResourceAwsIAMAccessKeyStatus + *out = make([]ResourceAwsIAMAccessKeyStatusInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsIAMAccessKeyUserName != nil { + in, out := &in.ResourceAwsIAMAccessKeyUserName, &out.ResourceAwsIAMAccessKeyUserName + *out = make([]ResourceAwsIAMAccessKeyUserNameInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsS3BucketOwnerID != nil { + in, out := &in.ResourceAwsS3BucketOwnerID, &out.ResourceAwsS3BucketOwnerID + *out = make([]ResourceAwsS3BucketOwnerIDInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsS3BucketOwnerName != nil { + in, out := &in.ResourceAwsS3BucketOwnerName, &out.ResourceAwsS3BucketOwnerName + *out = make([]ResourceAwsS3BucketOwnerNameInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceContainerImageID != nil { + in, out := &in.ResourceContainerImageID, &out.ResourceContainerImageID + *out = make([]ResourceContainerImageIDInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceContainerImageName != nil { + in, out := &in.ResourceContainerImageName, &out.ResourceContainerImageName + *out = make([]ResourceContainerImageNameInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceContainerLaunchedAt != nil { + in, out := &in.ResourceContainerLaunchedAt, &out.ResourceContainerLaunchedAt + *out = make([]ResourceContainerLaunchedAtInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceContainerName != nil { + in, out := &in.ResourceContainerName, &out.ResourceContainerName + *out = make([]ResourceContainerNameInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceDetailsOther != nil { + in, out := &in.ResourceDetailsOther, &out.ResourceDetailsOther + *out = make([]ResourceDetailsOtherInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = make([]ResourceIDInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourcePartition != nil { + in, out := &in.ResourcePartition, &out.ResourcePartition + *out = make([]ResourcePartitionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceRegion != nil { + in, out := &in.ResourceRegion, &out.ResourceRegion + *out = make([]ResourceRegionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]ResourceTagsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = make([]ResourceTypeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SeverityLabel != nil { + in, out := &in.SeverityLabel, &out.SeverityLabel + *out = make([]SeverityLabelInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceURL != nil { + in, out := &in.SourceURL, &out.SourceURL + *out = make([]SourceURLInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorCategory != nil { + in, out := &in.ThreatIntelIndicatorCategory, &out.ThreatIntelIndicatorCategory + *out = make([]ThreatIntelIndicatorCategoryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorLastObservedAt != nil { + in, out := &in.ThreatIntelIndicatorLastObservedAt, &out.ThreatIntelIndicatorLastObservedAt + *out = make([]ThreatIntelIndicatorLastObservedAtInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorSource != nil { + in, out := &in.ThreatIntelIndicatorSource, &out.ThreatIntelIndicatorSource + *out = make([]ThreatIntelIndicatorSourceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorSourceURL != nil { + in, out := &in.ThreatIntelIndicatorSourceURL, &out.ThreatIntelIndicatorSourceURL + *out = make([]ThreatIntelIndicatorSourceURLInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorType != nil { + in, out := &in.ThreatIntelIndicatorType, &out.ThreatIntelIndicatorType + *out = make([]ThreatIntelIndicatorTypeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorValue != nil { + in, out := &in.ThreatIntelIndicatorValue, &out.ThreatIntelIndicatorValue + *out = make([]ThreatIntelIndicatorValueInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = make([]TitleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = make([]TypeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = make([]UpdatedAtInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UserDefinedValues != nil { + in, out := &in.UserDefinedValues, &out.UserDefinedValues + *out = make([]UserDefinedValuesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VerificationState != nil { + in, out := &in.VerificationState, &out.VerificationState + *out = make([]VerificationStateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WorkflowStatus != nil { + in, out := &in.WorkflowStatus, &out.WorkflowStatus + *out = make([]WorkflowStatusInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FiltersInitParameters. +func (in *FiltersInitParameters) DeepCopy() *FiltersInitParameters { + if in == nil { + return nil + } + out := new(FiltersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FiltersObservation) DeepCopyInto(out *FiltersObservation) { + *out = *in + if in.AwsAccountID != nil { + in, out := &in.AwsAccountID, &out.AwsAccountID + *out = make([]AwsAccountIDObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CompanyName != nil { + in, out := &in.CompanyName, &out.CompanyName + *out = make([]CompanyNameObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ComplianceStatus != nil { + in, out := &in.ComplianceStatus, &out.ComplianceStatus + *out = make([]ComplianceStatusObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Confidence != nil { + in, out := &in.Confidence, &out.Confidence + *out = make([]ConfidenceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = make([]CreatedAtObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Criticality != nil { + in, out := &in.Criticality, &out.Criticality + *out = make([]CriticalityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = make([]DescriptionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsConfidence != nil { + in, out := &in.FindingProviderFieldsConfidence, &out.FindingProviderFieldsConfidence + *out = make([]FindingProviderFieldsConfidenceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsCriticality != nil { + in, out := &in.FindingProviderFieldsCriticality, &out.FindingProviderFieldsCriticality + *out = make([]FindingProviderFieldsCriticalityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsRelatedFindingsID != nil { + in, out := &in.FindingProviderFieldsRelatedFindingsID, &out.FindingProviderFieldsRelatedFindingsID + *out = make([]FindingProviderFieldsRelatedFindingsIDObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsRelatedFindingsProductArn != nil { + in, out := &in.FindingProviderFieldsRelatedFindingsProductArn, &out.FindingProviderFieldsRelatedFindingsProductArn + *out = make([]FindingProviderFieldsRelatedFindingsProductArnObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsSeverityLabel != nil { + in, out := &in.FindingProviderFieldsSeverityLabel, &out.FindingProviderFieldsSeverityLabel + *out = make([]FindingProviderFieldsSeverityLabelObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsSeverityOriginal != nil { + in, out := &in.FindingProviderFieldsSeverityOriginal, &out.FindingProviderFieldsSeverityOriginal + *out = make([]FindingProviderFieldsSeverityOriginalObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsTypes != nil { + in, out := &in.FindingProviderFieldsTypes, &out.FindingProviderFieldsTypes + *out = make([]FindingProviderFieldsTypesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FirstObservedAt != nil { + in, out := &in.FirstObservedAt, &out.FirstObservedAt + *out = make([]FirstObservedAtObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GeneratorID != nil { + in, out := &in.GeneratorID, &out.GeneratorID + *out = make([]GeneratorIDObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]IDObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Keyword != nil { + in, out := &in.Keyword, &out.Keyword + *out = make([]KeywordObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LastObservedAt != nil { + in, out := &in.LastObservedAt, &out.LastObservedAt + *out = make([]LastObservedAtObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MalwareName != nil { + in, out := &in.MalwareName, &out.MalwareName + *out = make([]MalwareNameObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MalwarePath != nil { + in, out := &in.MalwarePath, &out.MalwarePath + *out = make([]MalwarePathObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MalwareState != nil { + in, out := &in.MalwareState, &out.MalwareState + *out = make([]MalwareStateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MalwareType != nil { + in, out := &in.MalwareType, &out.MalwareType + *out = make([]MalwareTypeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkDestinationDomain != nil { + in, out := &in.NetworkDestinationDomain, &out.NetworkDestinationDomain + *out = make([]NetworkDestinationDomainObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkDestinationIPv4 != nil { + in, out := &in.NetworkDestinationIPv4, &out.NetworkDestinationIPv4 + *out = make([]NetworkDestinationIPv4Observation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkDestinationIPv6 != nil { + in, out := &in.NetworkDestinationIPv6, &out.NetworkDestinationIPv6 + *out = make([]NetworkDestinationIPv6Observation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkDestinationPort != nil { + in, out := &in.NetworkDestinationPort, &out.NetworkDestinationPort + *out = make([]NetworkDestinationPortObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkDirection != nil { + in, out := &in.NetworkDirection, &out.NetworkDirection + *out = make([]NetworkDirectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkProtocol != nil { + in, out := &in.NetworkProtocol, &out.NetworkProtocol + *out = make([]NetworkProtocolObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSourceDomain != nil { + in, out := &in.NetworkSourceDomain, &out.NetworkSourceDomain + *out = make([]NetworkSourceDomainObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSourceIPv4 != nil { + in, out := &in.NetworkSourceIPv4, &out.NetworkSourceIPv4 + *out = make([]NetworkSourceIPv4Observation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSourceIPv6 != nil { + in, out := &in.NetworkSourceIPv6, &out.NetworkSourceIPv6 + *out = make([]NetworkSourceIPv6Observation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSourceMac != nil { + in, out := &in.NetworkSourceMac, &out.NetworkSourceMac + *out = make([]NetworkSourceMacObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSourcePort != nil { + in, out := &in.NetworkSourcePort, &out.NetworkSourcePort + *out = make([]NetworkSourcePortObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NoteText != nil { + in, out := &in.NoteText, &out.NoteText + *out = make([]NoteTextObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NoteUpdatedAt != nil { + in, out := &in.NoteUpdatedAt, &out.NoteUpdatedAt + *out = make([]NoteUpdatedAtObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NoteUpdatedBy != nil { + in, out := &in.NoteUpdatedBy, &out.NoteUpdatedBy + *out = make([]NoteUpdatedByObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessLaunchedAt != nil { + in, out := &in.ProcessLaunchedAt, &out.ProcessLaunchedAt + *out = make([]ProcessLaunchedAtObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessName != nil { + in, out := &in.ProcessName, &out.ProcessName + *out = make([]ProcessNameObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessParentPid != nil { + in, out := &in.ProcessParentPid, &out.ProcessParentPid + *out = make([]ProcessParentPidObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessPath != nil { + in, out := &in.ProcessPath, &out.ProcessPath + *out = make([]ProcessPathObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessPid != nil { + in, out := &in.ProcessPid, &out.ProcessPid + *out = make([]ProcessPidObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessTerminatedAt != nil { + in, out := &in.ProcessTerminatedAt, &out.ProcessTerminatedAt + *out = make([]ProcessTerminatedAtObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProductArn != nil { + in, out := &in.ProductArn, &out.ProductArn + *out = make([]ProductArnObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProductFields != nil { + in, out := &in.ProductFields, &out.ProductFields + *out = make([]ProductFieldsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProductName != nil { + in, out := &in.ProductName, &out.ProductName + *out = make([]ProductNameObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecommendationText != nil { + in, out := &in.RecommendationText, &out.RecommendationText + *out = make([]RecommendationTextObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecordState != nil { + in, out := &in.RecordState, &out.RecordState + *out = make([]RecordStateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RelatedFindingsID != nil { + in, out := &in.RelatedFindingsID, &out.RelatedFindingsID + *out = make([]RelatedFindingsIDObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RelatedFindingsProductArn != nil { + in, out := &in.RelatedFindingsProductArn, &out.RelatedFindingsProductArn + *out = make([]RelatedFindingsProductArnObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceIAMInstanceProfileArn != nil { + in, out := &in.ResourceAwsEC2InstanceIAMInstanceProfileArn, &out.ResourceAwsEC2InstanceIAMInstanceProfileArn + *out = make([]ResourceAwsEC2InstanceIAMInstanceProfileArnObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceIPv4Addresses != nil { + in, out := &in.ResourceAwsEC2InstanceIPv4Addresses, &out.ResourceAwsEC2InstanceIPv4Addresses + *out = make([]ResourceAwsEC2InstanceIPv4AddressesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceIPv6Addresses != nil { + in, out := &in.ResourceAwsEC2InstanceIPv6Addresses, &out.ResourceAwsEC2InstanceIPv6Addresses + *out = make([]ResourceAwsEC2InstanceIPv6AddressesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceImageID != nil { + in, out := &in.ResourceAwsEC2InstanceImageID, &out.ResourceAwsEC2InstanceImageID + *out = make([]ResourceAwsEC2InstanceImageIDObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceKeyName != nil { + in, out := &in.ResourceAwsEC2InstanceKeyName, &out.ResourceAwsEC2InstanceKeyName + *out = make([]ResourceAwsEC2InstanceKeyNameObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceLaunchedAt != nil { + in, out := &in.ResourceAwsEC2InstanceLaunchedAt, &out.ResourceAwsEC2InstanceLaunchedAt + *out = make([]ResourceAwsEC2InstanceLaunchedAtObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceSubnetID != nil { + in, out := &in.ResourceAwsEC2InstanceSubnetID, &out.ResourceAwsEC2InstanceSubnetID + *out = make([]ResourceAwsEC2InstanceSubnetIDObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceType != nil { + in, out := &in.ResourceAwsEC2InstanceType, &out.ResourceAwsEC2InstanceType + *out = make([]ResourceAwsEC2InstanceTypeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceVPCID != nil { + in, out := &in.ResourceAwsEC2InstanceVPCID, &out.ResourceAwsEC2InstanceVPCID + *out = make([]ResourceAwsEC2InstanceVPCIDObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsIAMAccessKeyCreatedAt != nil { + in, out := &in.ResourceAwsIAMAccessKeyCreatedAt, &out.ResourceAwsIAMAccessKeyCreatedAt + *out = make([]ResourceAwsIAMAccessKeyCreatedAtObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsIAMAccessKeyStatus != nil { + in, out := &in.ResourceAwsIAMAccessKeyStatus, &out.ResourceAwsIAMAccessKeyStatus + *out = make([]ResourceAwsIAMAccessKeyStatusObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsIAMAccessKeyUserName != nil { + in, out := &in.ResourceAwsIAMAccessKeyUserName, &out.ResourceAwsIAMAccessKeyUserName + *out = make([]ResourceAwsIAMAccessKeyUserNameObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsS3BucketOwnerID != nil { + in, out := &in.ResourceAwsS3BucketOwnerID, &out.ResourceAwsS3BucketOwnerID + *out = make([]ResourceAwsS3BucketOwnerIDObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsS3BucketOwnerName != nil { + in, out := &in.ResourceAwsS3BucketOwnerName, &out.ResourceAwsS3BucketOwnerName + *out = make([]ResourceAwsS3BucketOwnerNameObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceContainerImageID != nil { + in, out := &in.ResourceContainerImageID, &out.ResourceContainerImageID + *out = make([]ResourceContainerImageIDObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceContainerImageName != nil { + in, out := &in.ResourceContainerImageName, &out.ResourceContainerImageName + *out = make([]ResourceContainerImageNameObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceContainerLaunchedAt != nil { + in, out := &in.ResourceContainerLaunchedAt, &out.ResourceContainerLaunchedAt + *out = make([]ResourceContainerLaunchedAtObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceContainerName != nil { + in, out := &in.ResourceContainerName, &out.ResourceContainerName + *out = make([]ResourceContainerNameObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceDetailsOther != nil { + in, out := &in.ResourceDetailsOther, &out.ResourceDetailsOther + *out = make([]ResourceDetailsOtherObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = make([]ResourceIDObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourcePartition != nil { + in, out := &in.ResourcePartition, &out.ResourcePartition + *out = make([]ResourcePartitionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceRegion != nil { + in, out := &in.ResourceRegion, &out.ResourceRegion + *out = make([]ResourceRegionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]ResourceTagsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = make([]ResourceTypeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SeverityLabel != nil { + in, out := &in.SeverityLabel, &out.SeverityLabel + *out = make([]SeverityLabelObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceURL != nil { + in, out := &in.SourceURL, &out.SourceURL + *out = make([]SourceURLObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorCategory != nil { + in, out := &in.ThreatIntelIndicatorCategory, &out.ThreatIntelIndicatorCategory + *out = make([]ThreatIntelIndicatorCategoryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorLastObservedAt != nil { + in, out := &in.ThreatIntelIndicatorLastObservedAt, &out.ThreatIntelIndicatorLastObservedAt + *out = make([]ThreatIntelIndicatorLastObservedAtObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorSource != nil { + in, out := &in.ThreatIntelIndicatorSource, &out.ThreatIntelIndicatorSource + *out = make([]ThreatIntelIndicatorSourceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorSourceURL != nil { + in, out := &in.ThreatIntelIndicatorSourceURL, &out.ThreatIntelIndicatorSourceURL + *out = make([]ThreatIntelIndicatorSourceURLObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorType != nil { + in, out := &in.ThreatIntelIndicatorType, &out.ThreatIntelIndicatorType + *out = make([]ThreatIntelIndicatorTypeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorValue != nil { + in, out := &in.ThreatIntelIndicatorValue, &out.ThreatIntelIndicatorValue + *out = make([]ThreatIntelIndicatorValueObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = make([]TitleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = make([]TypeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = make([]UpdatedAtObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UserDefinedValues != nil { + in, out := &in.UserDefinedValues, &out.UserDefinedValues + *out = make([]UserDefinedValuesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VerificationState != nil { + in, out := &in.VerificationState, &out.VerificationState + *out = make([]VerificationStateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WorkflowStatus != nil { + in, out := &in.WorkflowStatus, &out.WorkflowStatus + *out = make([]WorkflowStatusObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FiltersObservation. +func (in *FiltersObservation) DeepCopy() *FiltersObservation { + if in == nil { + return nil + } + out := new(FiltersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FiltersParameters) DeepCopyInto(out *FiltersParameters) { + *out = *in + if in.AwsAccountID != nil { + in, out := &in.AwsAccountID, &out.AwsAccountID + *out = make([]AwsAccountIDParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CompanyName != nil { + in, out := &in.CompanyName, &out.CompanyName + *out = make([]CompanyNameParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ComplianceStatus != nil { + in, out := &in.ComplianceStatus, &out.ComplianceStatus + *out = make([]ComplianceStatusParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Confidence != nil { + in, out := &in.Confidence, &out.Confidence + *out = make([]ConfidenceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = make([]CreatedAtParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Criticality != nil { + in, out := &in.Criticality, &out.Criticality + *out = make([]CriticalityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = make([]DescriptionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsConfidence != nil { + in, out := &in.FindingProviderFieldsConfidence, &out.FindingProviderFieldsConfidence + *out = make([]FindingProviderFieldsConfidenceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsCriticality != nil { + in, out := &in.FindingProviderFieldsCriticality, &out.FindingProviderFieldsCriticality + *out = make([]FindingProviderFieldsCriticalityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsRelatedFindingsID != nil { + in, out := &in.FindingProviderFieldsRelatedFindingsID, &out.FindingProviderFieldsRelatedFindingsID + *out = make([]FindingProviderFieldsRelatedFindingsIDParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsRelatedFindingsProductArn != nil { + in, out := &in.FindingProviderFieldsRelatedFindingsProductArn, &out.FindingProviderFieldsRelatedFindingsProductArn + *out = make([]FindingProviderFieldsRelatedFindingsProductArnParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsSeverityLabel != nil { + in, out := &in.FindingProviderFieldsSeverityLabel, &out.FindingProviderFieldsSeverityLabel + *out = make([]FindingProviderFieldsSeverityLabelParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsSeverityOriginal != nil { + in, out := &in.FindingProviderFieldsSeverityOriginal, &out.FindingProviderFieldsSeverityOriginal + *out = make([]FindingProviderFieldsSeverityOriginalParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FindingProviderFieldsTypes != nil { + in, out := &in.FindingProviderFieldsTypes, &out.FindingProviderFieldsTypes + *out = make([]FindingProviderFieldsTypesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FirstObservedAt != nil { + in, out := &in.FirstObservedAt, &out.FirstObservedAt + *out = make([]FirstObservedAtParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GeneratorID != nil { + in, out := &in.GeneratorID, &out.GeneratorID + *out = make([]GeneratorIDParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]IDParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Keyword != nil { + in, out := &in.Keyword, &out.Keyword + *out = make([]KeywordParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LastObservedAt != nil { + in, out := &in.LastObservedAt, &out.LastObservedAt + *out = make([]LastObservedAtParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MalwareName != nil { + in, out := &in.MalwareName, &out.MalwareName + *out = make([]MalwareNameParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MalwarePath != nil { + in, out := &in.MalwarePath, &out.MalwarePath + *out = make([]MalwarePathParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MalwareState != nil { + in, out := &in.MalwareState, &out.MalwareState + *out = make([]MalwareStateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MalwareType != nil { + in, out := &in.MalwareType, &out.MalwareType + *out = make([]MalwareTypeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkDestinationDomain != nil { + in, out := &in.NetworkDestinationDomain, &out.NetworkDestinationDomain + *out = make([]NetworkDestinationDomainParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkDestinationIPv4 != nil { + in, out := &in.NetworkDestinationIPv4, &out.NetworkDestinationIPv4 + *out = make([]NetworkDestinationIPv4Parameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkDestinationIPv6 != nil { + in, out := &in.NetworkDestinationIPv6, &out.NetworkDestinationIPv6 + *out = make([]NetworkDestinationIPv6Parameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkDestinationPort != nil { + in, out := &in.NetworkDestinationPort, &out.NetworkDestinationPort + *out = make([]NetworkDestinationPortParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkDirection != nil { + in, out := &in.NetworkDirection, &out.NetworkDirection + *out = make([]NetworkDirectionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkProtocol != nil { + in, out := &in.NetworkProtocol, &out.NetworkProtocol + *out = make([]NetworkProtocolParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSourceDomain != nil { + in, out := &in.NetworkSourceDomain, &out.NetworkSourceDomain + *out = make([]NetworkSourceDomainParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSourceIPv4 != nil { + in, out := &in.NetworkSourceIPv4, &out.NetworkSourceIPv4 + *out = make([]NetworkSourceIPv4Parameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSourceIPv6 != nil { + in, out := &in.NetworkSourceIPv6, &out.NetworkSourceIPv6 + *out = make([]NetworkSourceIPv6Parameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSourceMac != nil { + in, out := &in.NetworkSourceMac, &out.NetworkSourceMac + *out = make([]NetworkSourceMacParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkSourcePort != nil { + in, out := &in.NetworkSourcePort, &out.NetworkSourcePort + *out = make([]NetworkSourcePortParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NoteText != nil { + in, out := &in.NoteText, &out.NoteText + *out = make([]NoteTextParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NoteUpdatedAt != nil { + in, out := &in.NoteUpdatedAt, &out.NoteUpdatedAt + *out = make([]NoteUpdatedAtParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NoteUpdatedBy != nil { + in, out := &in.NoteUpdatedBy, &out.NoteUpdatedBy + *out = make([]NoteUpdatedByParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessLaunchedAt != nil { + in, out := &in.ProcessLaunchedAt, &out.ProcessLaunchedAt + *out = make([]ProcessLaunchedAtParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessName != nil { + in, out := &in.ProcessName, &out.ProcessName + *out = make([]ProcessNameParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessParentPid != nil { + in, out := &in.ProcessParentPid, &out.ProcessParentPid + *out = make([]ProcessParentPidParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessPath != nil { + in, out := &in.ProcessPath, &out.ProcessPath + *out = make([]ProcessPathParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessPid != nil { + in, out := &in.ProcessPid, &out.ProcessPid + *out = make([]ProcessPidParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProcessTerminatedAt != nil { + in, out := &in.ProcessTerminatedAt, &out.ProcessTerminatedAt + *out = make([]ProcessTerminatedAtParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProductArn != nil { + in, out := &in.ProductArn, &out.ProductArn + *out = make([]ProductArnParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProductFields != nil { + in, out := &in.ProductFields, &out.ProductFields + *out = make([]ProductFieldsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProductName != nil { + in, out := &in.ProductName, &out.ProductName + *out = make([]ProductNameParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecommendationText != nil { + in, out := &in.RecommendationText, &out.RecommendationText + *out = make([]RecommendationTextParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecordState != nil { + in, out := &in.RecordState, &out.RecordState + *out = make([]RecordStateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RelatedFindingsID != nil { + in, out := &in.RelatedFindingsID, &out.RelatedFindingsID + *out = make([]RelatedFindingsIDParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RelatedFindingsProductArn != nil { + in, out := &in.RelatedFindingsProductArn, &out.RelatedFindingsProductArn + *out = make([]RelatedFindingsProductArnParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceIAMInstanceProfileArn != nil { + in, out := &in.ResourceAwsEC2InstanceIAMInstanceProfileArn, &out.ResourceAwsEC2InstanceIAMInstanceProfileArn + *out = make([]ResourceAwsEC2InstanceIAMInstanceProfileArnParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceIPv4Addresses != nil { + in, out := &in.ResourceAwsEC2InstanceIPv4Addresses, &out.ResourceAwsEC2InstanceIPv4Addresses + *out = make([]ResourceAwsEC2InstanceIPv4AddressesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceIPv6Addresses != nil { + in, out := &in.ResourceAwsEC2InstanceIPv6Addresses, &out.ResourceAwsEC2InstanceIPv6Addresses + *out = make([]ResourceAwsEC2InstanceIPv6AddressesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceImageID != nil { + in, out := &in.ResourceAwsEC2InstanceImageID, &out.ResourceAwsEC2InstanceImageID + *out = make([]ResourceAwsEC2InstanceImageIDParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceKeyName != nil { + in, out := &in.ResourceAwsEC2InstanceKeyName, &out.ResourceAwsEC2InstanceKeyName + *out = make([]ResourceAwsEC2InstanceKeyNameParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceLaunchedAt != nil { + in, out := &in.ResourceAwsEC2InstanceLaunchedAt, &out.ResourceAwsEC2InstanceLaunchedAt + *out = make([]ResourceAwsEC2InstanceLaunchedAtParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceSubnetID != nil { + in, out := &in.ResourceAwsEC2InstanceSubnetID, &out.ResourceAwsEC2InstanceSubnetID + *out = make([]ResourceAwsEC2InstanceSubnetIDParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceType != nil { + in, out := &in.ResourceAwsEC2InstanceType, &out.ResourceAwsEC2InstanceType + *out = make([]ResourceAwsEC2InstanceTypeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsEC2InstanceVPCID != nil { + in, out := &in.ResourceAwsEC2InstanceVPCID, &out.ResourceAwsEC2InstanceVPCID + *out = make([]ResourceAwsEC2InstanceVPCIDParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsIAMAccessKeyCreatedAt != nil { + in, out := &in.ResourceAwsIAMAccessKeyCreatedAt, &out.ResourceAwsIAMAccessKeyCreatedAt + *out = make([]ResourceAwsIAMAccessKeyCreatedAtParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsIAMAccessKeyStatus != nil { + in, out := &in.ResourceAwsIAMAccessKeyStatus, &out.ResourceAwsIAMAccessKeyStatus + *out = make([]ResourceAwsIAMAccessKeyStatusParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsIAMAccessKeyUserName != nil { + in, out := &in.ResourceAwsIAMAccessKeyUserName, &out.ResourceAwsIAMAccessKeyUserName + *out = make([]ResourceAwsIAMAccessKeyUserNameParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsS3BucketOwnerID != nil { + in, out := &in.ResourceAwsS3BucketOwnerID, &out.ResourceAwsS3BucketOwnerID + *out = make([]ResourceAwsS3BucketOwnerIDParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceAwsS3BucketOwnerName != nil { + in, out := &in.ResourceAwsS3BucketOwnerName, &out.ResourceAwsS3BucketOwnerName + *out = make([]ResourceAwsS3BucketOwnerNameParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceContainerImageID != nil { + in, out := &in.ResourceContainerImageID, &out.ResourceContainerImageID + *out = make([]ResourceContainerImageIDParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceContainerImageName != nil { + in, out := &in.ResourceContainerImageName, &out.ResourceContainerImageName + *out = make([]ResourceContainerImageNameParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceContainerLaunchedAt != nil { + in, out := &in.ResourceContainerLaunchedAt, &out.ResourceContainerLaunchedAt + *out = make([]ResourceContainerLaunchedAtParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceContainerName != nil { + in, out := &in.ResourceContainerName, &out.ResourceContainerName + *out = make([]ResourceContainerNameParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceDetailsOther != nil { + in, out := &in.ResourceDetailsOther, &out.ResourceDetailsOther + *out = make([]ResourceDetailsOtherParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = make([]ResourceIDParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourcePartition != nil { + in, out := &in.ResourcePartition, &out.ResourcePartition + *out = make([]ResourcePartitionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceRegion != nil { + in, out := &in.ResourceRegion, &out.ResourceRegion + *out = make([]ResourceRegionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]ResourceTagsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = make([]ResourceTypeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SeverityLabel != nil { + in, out := &in.SeverityLabel, &out.SeverityLabel + *out = make([]SeverityLabelParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceURL != nil { + in, out := &in.SourceURL, &out.SourceURL + *out = make([]SourceURLParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorCategory != nil { + in, out := &in.ThreatIntelIndicatorCategory, &out.ThreatIntelIndicatorCategory + *out = make([]ThreatIntelIndicatorCategoryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorLastObservedAt != nil { + in, out := &in.ThreatIntelIndicatorLastObservedAt, &out.ThreatIntelIndicatorLastObservedAt + *out = make([]ThreatIntelIndicatorLastObservedAtParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorSource != nil { + in, out := &in.ThreatIntelIndicatorSource, &out.ThreatIntelIndicatorSource + *out = make([]ThreatIntelIndicatorSourceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorSourceURL != nil { + in, out := &in.ThreatIntelIndicatorSourceURL, &out.ThreatIntelIndicatorSourceURL + *out = make([]ThreatIntelIndicatorSourceURLParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorType != nil { + in, out := &in.ThreatIntelIndicatorType, &out.ThreatIntelIndicatorType + *out = make([]ThreatIntelIndicatorTypeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ThreatIntelIndicatorValue != nil { + in, out := &in.ThreatIntelIndicatorValue, &out.ThreatIntelIndicatorValue + *out = make([]ThreatIntelIndicatorValueParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = make([]TitleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = make([]TypeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = make([]UpdatedAtParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UserDefinedValues != nil { + in, out := &in.UserDefinedValues, &out.UserDefinedValues + *out = make([]UserDefinedValuesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VerificationState != nil { + in, out := &in.VerificationState, &out.VerificationState + *out = make([]VerificationStateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WorkflowStatus != nil { + in, out := &in.WorkflowStatus, &out.WorkflowStatus + *out = make([]WorkflowStatusParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FiltersParameters. +func (in *FiltersParameters) DeepCopy() *FiltersParameters { + if in == nil { + return nil + } + out := new(FiltersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsConfidenceInitParameters) DeepCopyInto(out *FindingProviderFieldsConfidenceInitParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsConfidenceInitParameters. +func (in *FindingProviderFieldsConfidenceInitParameters) DeepCopy() *FindingProviderFieldsConfidenceInitParameters { + if in == nil { + return nil + } + out := new(FindingProviderFieldsConfidenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsConfidenceObservation) DeepCopyInto(out *FindingProviderFieldsConfidenceObservation) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsConfidenceObservation. +func (in *FindingProviderFieldsConfidenceObservation) DeepCopy() *FindingProviderFieldsConfidenceObservation { + if in == nil { + return nil + } + out := new(FindingProviderFieldsConfidenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsConfidenceParameters) DeepCopyInto(out *FindingProviderFieldsConfidenceParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsConfidenceParameters. +func (in *FindingProviderFieldsConfidenceParameters) DeepCopy() *FindingProviderFieldsConfidenceParameters { + if in == nil { + return nil + } + out := new(FindingProviderFieldsConfidenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsCriticalityInitParameters) DeepCopyInto(out *FindingProviderFieldsCriticalityInitParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsCriticalityInitParameters. +func (in *FindingProviderFieldsCriticalityInitParameters) DeepCopy() *FindingProviderFieldsCriticalityInitParameters { + if in == nil { + return nil + } + out := new(FindingProviderFieldsCriticalityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsCriticalityObservation) DeepCopyInto(out *FindingProviderFieldsCriticalityObservation) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsCriticalityObservation. +func (in *FindingProviderFieldsCriticalityObservation) DeepCopy() *FindingProviderFieldsCriticalityObservation { + if in == nil { + return nil + } + out := new(FindingProviderFieldsCriticalityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsCriticalityParameters) DeepCopyInto(out *FindingProviderFieldsCriticalityParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsCriticalityParameters. +func (in *FindingProviderFieldsCriticalityParameters) DeepCopy() *FindingProviderFieldsCriticalityParameters { + if in == nil { + return nil + } + out := new(FindingProviderFieldsCriticalityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsRelatedFindingsIDInitParameters) DeepCopyInto(out *FindingProviderFieldsRelatedFindingsIDInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsRelatedFindingsIDInitParameters. +func (in *FindingProviderFieldsRelatedFindingsIDInitParameters) DeepCopy() *FindingProviderFieldsRelatedFindingsIDInitParameters { + if in == nil { + return nil + } + out := new(FindingProviderFieldsRelatedFindingsIDInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsRelatedFindingsIDObservation) DeepCopyInto(out *FindingProviderFieldsRelatedFindingsIDObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsRelatedFindingsIDObservation. +func (in *FindingProviderFieldsRelatedFindingsIDObservation) DeepCopy() *FindingProviderFieldsRelatedFindingsIDObservation { + if in == nil { + return nil + } + out := new(FindingProviderFieldsRelatedFindingsIDObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsRelatedFindingsIDParameters) DeepCopyInto(out *FindingProviderFieldsRelatedFindingsIDParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsRelatedFindingsIDParameters. +func (in *FindingProviderFieldsRelatedFindingsIDParameters) DeepCopy() *FindingProviderFieldsRelatedFindingsIDParameters { + if in == nil { + return nil + } + out := new(FindingProviderFieldsRelatedFindingsIDParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsRelatedFindingsProductArnInitParameters) DeepCopyInto(out *FindingProviderFieldsRelatedFindingsProductArnInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsRelatedFindingsProductArnInitParameters. +func (in *FindingProviderFieldsRelatedFindingsProductArnInitParameters) DeepCopy() *FindingProviderFieldsRelatedFindingsProductArnInitParameters { + if in == nil { + return nil + } + out := new(FindingProviderFieldsRelatedFindingsProductArnInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsRelatedFindingsProductArnObservation) DeepCopyInto(out *FindingProviderFieldsRelatedFindingsProductArnObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsRelatedFindingsProductArnObservation. +func (in *FindingProviderFieldsRelatedFindingsProductArnObservation) DeepCopy() *FindingProviderFieldsRelatedFindingsProductArnObservation { + if in == nil { + return nil + } + out := new(FindingProviderFieldsRelatedFindingsProductArnObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsRelatedFindingsProductArnParameters) DeepCopyInto(out *FindingProviderFieldsRelatedFindingsProductArnParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsRelatedFindingsProductArnParameters. +func (in *FindingProviderFieldsRelatedFindingsProductArnParameters) DeepCopy() *FindingProviderFieldsRelatedFindingsProductArnParameters { + if in == nil { + return nil + } + out := new(FindingProviderFieldsRelatedFindingsProductArnParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsSeverityLabelInitParameters) DeepCopyInto(out *FindingProviderFieldsSeverityLabelInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsSeverityLabelInitParameters. +func (in *FindingProviderFieldsSeverityLabelInitParameters) DeepCopy() *FindingProviderFieldsSeverityLabelInitParameters { + if in == nil { + return nil + } + out := new(FindingProviderFieldsSeverityLabelInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsSeverityLabelObservation) DeepCopyInto(out *FindingProviderFieldsSeverityLabelObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsSeverityLabelObservation. +func (in *FindingProviderFieldsSeverityLabelObservation) DeepCopy() *FindingProviderFieldsSeverityLabelObservation { + if in == nil { + return nil + } + out := new(FindingProviderFieldsSeverityLabelObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsSeverityLabelParameters) DeepCopyInto(out *FindingProviderFieldsSeverityLabelParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsSeverityLabelParameters. +func (in *FindingProviderFieldsSeverityLabelParameters) DeepCopy() *FindingProviderFieldsSeverityLabelParameters { + if in == nil { + return nil + } + out := new(FindingProviderFieldsSeverityLabelParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsSeverityOriginalInitParameters) DeepCopyInto(out *FindingProviderFieldsSeverityOriginalInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsSeverityOriginalInitParameters. +func (in *FindingProviderFieldsSeverityOriginalInitParameters) DeepCopy() *FindingProviderFieldsSeverityOriginalInitParameters { + if in == nil { + return nil + } + out := new(FindingProviderFieldsSeverityOriginalInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsSeverityOriginalObservation) DeepCopyInto(out *FindingProviderFieldsSeverityOriginalObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsSeverityOriginalObservation. +func (in *FindingProviderFieldsSeverityOriginalObservation) DeepCopy() *FindingProviderFieldsSeverityOriginalObservation { + if in == nil { + return nil + } + out := new(FindingProviderFieldsSeverityOriginalObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsSeverityOriginalParameters) DeepCopyInto(out *FindingProviderFieldsSeverityOriginalParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsSeverityOriginalParameters. +func (in *FindingProviderFieldsSeverityOriginalParameters) DeepCopy() *FindingProviderFieldsSeverityOriginalParameters { + if in == nil { + return nil + } + out := new(FindingProviderFieldsSeverityOriginalParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsTypesInitParameters) DeepCopyInto(out *FindingProviderFieldsTypesInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsTypesInitParameters. +func (in *FindingProviderFieldsTypesInitParameters) DeepCopy() *FindingProviderFieldsTypesInitParameters { + if in == nil { + return nil + } + out := new(FindingProviderFieldsTypesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsTypesObservation) DeepCopyInto(out *FindingProviderFieldsTypesObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsTypesObservation. +func (in *FindingProviderFieldsTypesObservation) DeepCopy() *FindingProviderFieldsTypesObservation { + if in == nil { + return nil + } + out := new(FindingProviderFieldsTypesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FindingProviderFieldsTypesParameters) DeepCopyInto(out *FindingProviderFieldsTypesParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindingProviderFieldsTypesParameters. +func (in *FindingProviderFieldsTypesParameters) DeepCopy() *FindingProviderFieldsTypesParameters { + if in == nil { + return nil + } + out := new(FindingProviderFieldsTypesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirstObservedAtDateRangeInitParameters) DeepCopyInto(out *FirstObservedAtDateRangeInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirstObservedAtDateRangeInitParameters. +func (in *FirstObservedAtDateRangeInitParameters) DeepCopy() *FirstObservedAtDateRangeInitParameters { + if in == nil { + return nil + } + out := new(FirstObservedAtDateRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirstObservedAtDateRangeObservation) DeepCopyInto(out *FirstObservedAtDateRangeObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirstObservedAtDateRangeObservation. +func (in *FirstObservedAtDateRangeObservation) DeepCopy() *FirstObservedAtDateRangeObservation { + if in == nil { + return nil + } + out := new(FirstObservedAtDateRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirstObservedAtDateRangeParameters) DeepCopyInto(out *FirstObservedAtDateRangeParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirstObservedAtDateRangeParameters. +func (in *FirstObservedAtDateRangeParameters) DeepCopy() *FirstObservedAtDateRangeParameters { + if in == nil { + return nil + } + out := new(FirstObservedAtDateRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirstObservedAtInitParameters) DeepCopyInto(out *FirstObservedAtInitParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(FirstObservedAtDateRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirstObservedAtInitParameters. +func (in *FirstObservedAtInitParameters) DeepCopy() *FirstObservedAtInitParameters { + if in == nil { + return nil + } + out := new(FirstObservedAtInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirstObservedAtObservation) DeepCopyInto(out *FirstObservedAtObservation) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(FirstObservedAtDateRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirstObservedAtObservation. +func (in *FirstObservedAtObservation) DeepCopy() *FirstObservedAtObservation { + if in == nil { + return nil + } + out := new(FirstObservedAtObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirstObservedAtParameters) DeepCopyInto(out *FirstObservedAtParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(FirstObservedAtDateRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirstObservedAtParameters. +func (in *FirstObservedAtParameters) DeepCopy() *FirstObservedAtParameters { + if in == nil { + return nil + } + out := new(FirstObservedAtParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeneratorIDInitParameters) DeepCopyInto(out *GeneratorIDInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeneratorIDInitParameters. +func (in *GeneratorIDInitParameters) DeepCopy() *GeneratorIDInitParameters { + if in == nil { + return nil + } + out := new(GeneratorIDInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeneratorIDObservation) DeepCopyInto(out *GeneratorIDObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeneratorIDObservation. +func (in *GeneratorIDObservation) DeepCopy() *GeneratorIDObservation { + if in == nil { + return nil + } + out := new(GeneratorIDObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeneratorIDParameters) DeepCopyInto(out *GeneratorIDParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeneratorIDParameters. +func (in *GeneratorIDParameters) DeepCopy() *GeneratorIDParameters { + if in == nil { + return nil + } + out := new(GeneratorIDParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IDInitParameters) DeepCopyInto(out *IDInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDInitParameters. +func (in *IDInitParameters) DeepCopy() *IDInitParameters { + if in == nil { + return nil + } + out := new(IDInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IDObservation) DeepCopyInto(out *IDObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDObservation. +func (in *IDObservation) DeepCopy() *IDObservation { + if in == nil { + return nil + } + out := new(IDObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IDParameters) DeepCopyInto(out *IDParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDParameters. +func (in *IDParameters) DeepCopy() *IDParameters { + if in == nil { + return nil + } + out := new(IDParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Insight) DeepCopyInto(out *Insight) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Insight. +func (in *Insight) DeepCopy() *Insight { + if in == nil { + return nil + } + out := new(Insight) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Insight) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightInitParameters) DeepCopyInto(out *InsightInitParameters) { + *out = *in + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = new(FiltersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.GroupByAttribute != nil { + in, out := &in.GroupByAttribute, &out.GroupByAttribute + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightInitParameters. +func (in *InsightInitParameters) DeepCopy() *InsightInitParameters { + if in == nil { + return nil + } + out := new(InsightInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightList) DeepCopyInto(out *InsightList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Insight, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightList. +func (in *InsightList) DeepCopy() *InsightList { + if in == nil { + return nil + } + out := new(InsightList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InsightList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightObservation) DeepCopyInto(out *InsightObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = new(FiltersObservation) + (*in).DeepCopyInto(*out) + } + if in.GroupByAttribute != nil { + in, out := &in.GroupByAttribute, &out.GroupByAttribute + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightObservation. +func (in *InsightObservation) DeepCopy() *InsightObservation { + if in == nil { + return nil + } + out := new(InsightObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightParameters) DeepCopyInto(out *InsightParameters) { + *out = *in + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = new(FiltersParameters) + (*in).DeepCopyInto(*out) + } + if in.GroupByAttribute != nil { + in, out := &in.GroupByAttribute, &out.GroupByAttribute + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightParameters. +func (in *InsightParameters) DeepCopy() *InsightParameters { + if in == nil { + return nil + } + out := new(InsightParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightSpec) DeepCopyInto(out *InsightSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightSpec. +func (in *InsightSpec) DeepCopy() *InsightSpec { + if in == nil { + return nil + } + out := new(InsightSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightStatus) DeepCopyInto(out *InsightStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightStatus. +func (in *InsightStatus) DeepCopy() *InsightStatus { + if in == nil { + return nil + } + out := new(InsightStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeywordInitParameters) DeepCopyInto(out *KeywordInitParameters) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeywordInitParameters. +func (in *KeywordInitParameters) DeepCopy() *KeywordInitParameters { + if in == nil { + return nil + } + out := new(KeywordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeywordObservation) DeepCopyInto(out *KeywordObservation) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeywordObservation. +func (in *KeywordObservation) DeepCopy() *KeywordObservation { + if in == nil { + return nil + } + out := new(KeywordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeywordParameters) DeepCopyInto(out *KeywordParameters) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeywordParameters. +func (in *KeywordParameters) DeepCopy() *KeywordParameters { + if in == nil { + return nil + } + out := new(KeywordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastObservedAtDateRangeInitParameters) DeepCopyInto(out *LastObservedAtDateRangeInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastObservedAtDateRangeInitParameters. +func (in *LastObservedAtDateRangeInitParameters) DeepCopy() *LastObservedAtDateRangeInitParameters { + if in == nil { + return nil + } + out := new(LastObservedAtDateRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastObservedAtDateRangeObservation) DeepCopyInto(out *LastObservedAtDateRangeObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastObservedAtDateRangeObservation. +func (in *LastObservedAtDateRangeObservation) DeepCopy() *LastObservedAtDateRangeObservation { + if in == nil { + return nil + } + out := new(LastObservedAtDateRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastObservedAtDateRangeParameters) DeepCopyInto(out *LastObservedAtDateRangeParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastObservedAtDateRangeParameters. +func (in *LastObservedAtDateRangeParameters) DeepCopy() *LastObservedAtDateRangeParameters { + if in == nil { + return nil + } + out := new(LastObservedAtDateRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastObservedAtInitParameters) DeepCopyInto(out *LastObservedAtInitParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(LastObservedAtDateRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastObservedAtInitParameters. +func (in *LastObservedAtInitParameters) DeepCopy() *LastObservedAtInitParameters { + if in == nil { + return nil + } + out := new(LastObservedAtInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastObservedAtObservation) DeepCopyInto(out *LastObservedAtObservation) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(LastObservedAtDateRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastObservedAtObservation. +func (in *LastObservedAtObservation) DeepCopy() *LastObservedAtObservation { + if in == nil { + return nil + } + out := new(LastObservedAtObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastObservedAtParameters) DeepCopyInto(out *LastObservedAtParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(LastObservedAtDateRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastObservedAtParameters. +func (in *LastObservedAtParameters) DeepCopy() *LastObservedAtParameters { + if in == nil { + return nil + } + out := new(LastObservedAtParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MalwareNameInitParameters) DeepCopyInto(out *MalwareNameInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MalwareNameInitParameters. +func (in *MalwareNameInitParameters) DeepCopy() *MalwareNameInitParameters { + if in == nil { + return nil + } + out := new(MalwareNameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MalwareNameObservation) DeepCopyInto(out *MalwareNameObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MalwareNameObservation. +func (in *MalwareNameObservation) DeepCopy() *MalwareNameObservation { + if in == nil { + return nil + } + out := new(MalwareNameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MalwareNameParameters) DeepCopyInto(out *MalwareNameParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MalwareNameParameters. +func (in *MalwareNameParameters) DeepCopy() *MalwareNameParameters { + if in == nil { + return nil + } + out := new(MalwareNameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MalwarePathInitParameters) DeepCopyInto(out *MalwarePathInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MalwarePathInitParameters. +func (in *MalwarePathInitParameters) DeepCopy() *MalwarePathInitParameters { + if in == nil { + return nil + } + out := new(MalwarePathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MalwarePathObservation) DeepCopyInto(out *MalwarePathObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MalwarePathObservation. +func (in *MalwarePathObservation) DeepCopy() *MalwarePathObservation { + if in == nil { + return nil + } + out := new(MalwarePathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MalwarePathParameters) DeepCopyInto(out *MalwarePathParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MalwarePathParameters. +func (in *MalwarePathParameters) DeepCopy() *MalwarePathParameters { + if in == nil { + return nil + } + out := new(MalwarePathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MalwareStateInitParameters) DeepCopyInto(out *MalwareStateInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MalwareStateInitParameters. +func (in *MalwareStateInitParameters) DeepCopy() *MalwareStateInitParameters { + if in == nil { + return nil + } + out := new(MalwareStateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MalwareStateObservation) DeepCopyInto(out *MalwareStateObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MalwareStateObservation. +func (in *MalwareStateObservation) DeepCopy() *MalwareStateObservation { + if in == nil { + return nil + } + out := new(MalwareStateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MalwareStateParameters) DeepCopyInto(out *MalwareStateParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MalwareStateParameters. +func (in *MalwareStateParameters) DeepCopy() *MalwareStateParameters { + if in == nil { + return nil + } + out := new(MalwareStateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MalwareTypeInitParameters) DeepCopyInto(out *MalwareTypeInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MalwareTypeInitParameters. +func (in *MalwareTypeInitParameters) DeepCopy() *MalwareTypeInitParameters { + if in == nil { + return nil + } + out := new(MalwareTypeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MalwareTypeObservation) DeepCopyInto(out *MalwareTypeObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MalwareTypeObservation. +func (in *MalwareTypeObservation) DeepCopy() *MalwareTypeObservation { + if in == nil { + return nil + } + out := new(MalwareTypeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MalwareTypeParameters) DeepCopyInto(out *MalwareTypeParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MalwareTypeParameters. +func (in *MalwareTypeParameters) DeepCopy() *MalwareTypeParameters { + if in == nil { + return nil + } + out := new(MalwareTypeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDestinationDomainInitParameters) DeepCopyInto(out *NetworkDestinationDomainInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDestinationDomainInitParameters. +func (in *NetworkDestinationDomainInitParameters) DeepCopy() *NetworkDestinationDomainInitParameters { + if in == nil { + return nil + } + out := new(NetworkDestinationDomainInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDestinationDomainObservation) DeepCopyInto(out *NetworkDestinationDomainObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDestinationDomainObservation. +func (in *NetworkDestinationDomainObservation) DeepCopy() *NetworkDestinationDomainObservation { + if in == nil { + return nil + } + out := new(NetworkDestinationDomainObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDestinationDomainParameters) DeepCopyInto(out *NetworkDestinationDomainParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDestinationDomainParameters. +func (in *NetworkDestinationDomainParameters) DeepCopy() *NetworkDestinationDomainParameters { + if in == nil { + return nil + } + out := new(NetworkDestinationDomainParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDestinationIPv4InitParameters) DeepCopyInto(out *NetworkDestinationIPv4InitParameters) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDestinationIPv4InitParameters. +func (in *NetworkDestinationIPv4InitParameters) DeepCopy() *NetworkDestinationIPv4InitParameters { + if in == nil { + return nil + } + out := new(NetworkDestinationIPv4InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDestinationIPv4Observation) DeepCopyInto(out *NetworkDestinationIPv4Observation) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDestinationIPv4Observation. +func (in *NetworkDestinationIPv4Observation) DeepCopy() *NetworkDestinationIPv4Observation { + if in == nil { + return nil + } + out := new(NetworkDestinationIPv4Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDestinationIPv4Parameters) DeepCopyInto(out *NetworkDestinationIPv4Parameters) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDestinationIPv4Parameters. +func (in *NetworkDestinationIPv4Parameters) DeepCopy() *NetworkDestinationIPv4Parameters { + if in == nil { + return nil + } + out := new(NetworkDestinationIPv4Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDestinationIPv6InitParameters) DeepCopyInto(out *NetworkDestinationIPv6InitParameters) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDestinationIPv6InitParameters. +func (in *NetworkDestinationIPv6InitParameters) DeepCopy() *NetworkDestinationIPv6InitParameters { + if in == nil { + return nil + } + out := new(NetworkDestinationIPv6InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDestinationIPv6Observation) DeepCopyInto(out *NetworkDestinationIPv6Observation) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDestinationIPv6Observation. +func (in *NetworkDestinationIPv6Observation) DeepCopy() *NetworkDestinationIPv6Observation { + if in == nil { + return nil + } + out := new(NetworkDestinationIPv6Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDestinationIPv6Parameters) DeepCopyInto(out *NetworkDestinationIPv6Parameters) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDestinationIPv6Parameters. +func (in *NetworkDestinationIPv6Parameters) DeepCopy() *NetworkDestinationIPv6Parameters { + if in == nil { + return nil + } + out := new(NetworkDestinationIPv6Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDestinationPortInitParameters) DeepCopyInto(out *NetworkDestinationPortInitParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDestinationPortInitParameters. +func (in *NetworkDestinationPortInitParameters) DeepCopy() *NetworkDestinationPortInitParameters { + if in == nil { + return nil + } + out := new(NetworkDestinationPortInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDestinationPortObservation) DeepCopyInto(out *NetworkDestinationPortObservation) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDestinationPortObservation. +func (in *NetworkDestinationPortObservation) DeepCopy() *NetworkDestinationPortObservation { + if in == nil { + return nil + } + out := new(NetworkDestinationPortObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDestinationPortParameters) DeepCopyInto(out *NetworkDestinationPortParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDestinationPortParameters. +func (in *NetworkDestinationPortParameters) DeepCopy() *NetworkDestinationPortParameters { + if in == nil { + return nil + } + out := new(NetworkDestinationPortParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDirectionInitParameters) DeepCopyInto(out *NetworkDirectionInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDirectionInitParameters. +func (in *NetworkDirectionInitParameters) DeepCopy() *NetworkDirectionInitParameters { + if in == nil { + return nil + } + out := new(NetworkDirectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDirectionObservation) DeepCopyInto(out *NetworkDirectionObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDirectionObservation. +func (in *NetworkDirectionObservation) DeepCopy() *NetworkDirectionObservation { + if in == nil { + return nil + } + out := new(NetworkDirectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDirectionParameters) DeepCopyInto(out *NetworkDirectionParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDirectionParameters. +func (in *NetworkDirectionParameters) DeepCopy() *NetworkDirectionParameters { + if in == nil { + return nil + } + out := new(NetworkDirectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkProtocolInitParameters) DeepCopyInto(out *NetworkProtocolInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkProtocolInitParameters. +func (in *NetworkProtocolInitParameters) DeepCopy() *NetworkProtocolInitParameters { + if in == nil { + return nil + } + out := new(NetworkProtocolInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkProtocolObservation) DeepCopyInto(out *NetworkProtocolObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkProtocolObservation. +func (in *NetworkProtocolObservation) DeepCopy() *NetworkProtocolObservation { + if in == nil { + return nil + } + out := new(NetworkProtocolObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkProtocolParameters) DeepCopyInto(out *NetworkProtocolParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkProtocolParameters. +func (in *NetworkProtocolParameters) DeepCopy() *NetworkProtocolParameters { + if in == nil { + return nil + } + out := new(NetworkProtocolParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSourceDomainInitParameters) DeepCopyInto(out *NetworkSourceDomainInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSourceDomainInitParameters. +func (in *NetworkSourceDomainInitParameters) DeepCopy() *NetworkSourceDomainInitParameters { + if in == nil { + return nil + } + out := new(NetworkSourceDomainInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSourceDomainObservation) DeepCopyInto(out *NetworkSourceDomainObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSourceDomainObservation. +func (in *NetworkSourceDomainObservation) DeepCopy() *NetworkSourceDomainObservation { + if in == nil { + return nil + } + out := new(NetworkSourceDomainObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSourceDomainParameters) DeepCopyInto(out *NetworkSourceDomainParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSourceDomainParameters. +func (in *NetworkSourceDomainParameters) DeepCopy() *NetworkSourceDomainParameters { + if in == nil { + return nil + } + out := new(NetworkSourceDomainParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSourceIPv4InitParameters) DeepCopyInto(out *NetworkSourceIPv4InitParameters) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSourceIPv4InitParameters. +func (in *NetworkSourceIPv4InitParameters) DeepCopy() *NetworkSourceIPv4InitParameters { + if in == nil { + return nil + } + out := new(NetworkSourceIPv4InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSourceIPv4Observation) DeepCopyInto(out *NetworkSourceIPv4Observation) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSourceIPv4Observation. +func (in *NetworkSourceIPv4Observation) DeepCopy() *NetworkSourceIPv4Observation { + if in == nil { + return nil + } + out := new(NetworkSourceIPv4Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSourceIPv4Parameters) DeepCopyInto(out *NetworkSourceIPv4Parameters) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSourceIPv4Parameters. +func (in *NetworkSourceIPv4Parameters) DeepCopy() *NetworkSourceIPv4Parameters { + if in == nil { + return nil + } + out := new(NetworkSourceIPv4Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSourceIPv6InitParameters) DeepCopyInto(out *NetworkSourceIPv6InitParameters) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSourceIPv6InitParameters. +func (in *NetworkSourceIPv6InitParameters) DeepCopy() *NetworkSourceIPv6InitParameters { + if in == nil { + return nil + } + out := new(NetworkSourceIPv6InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSourceIPv6Observation) DeepCopyInto(out *NetworkSourceIPv6Observation) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSourceIPv6Observation. +func (in *NetworkSourceIPv6Observation) DeepCopy() *NetworkSourceIPv6Observation { + if in == nil { + return nil + } + out := new(NetworkSourceIPv6Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSourceIPv6Parameters) DeepCopyInto(out *NetworkSourceIPv6Parameters) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSourceIPv6Parameters. +func (in *NetworkSourceIPv6Parameters) DeepCopy() *NetworkSourceIPv6Parameters { + if in == nil { + return nil + } + out := new(NetworkSourceIPv6Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSourceMacInitParameters) DeepCopyInto(out *NetworkSourceMacInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSourceMacInitParameters. +func (in *NetworkSourceMacInitParameters) DeepCopy() *NetworkSourceMacInitParameters { + if in == nil { + return nil + } + out := new(NetworkSourceMacInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSourceMacObservation) DeepCopyInto(out *NetworkSourceMacObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSourceMacObservation. +func (in *NetworkSourceMacObservation) DeepCopy() *NetworkSourceMacObservation { + if in == nil { + return nil + } + out := new(NetworkSourceMacObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSourceMacParameters) DeepCopyInto(out *NetworkSourceMacParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSourceMacParameters. +func (in *NetworkSourceMacParameters) DeepCopy() *NetworkSourceMacParameters { + if in == nil { + return nil + } + out := new(NetworkSourceMacParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSourcePortInitParameters) DeepCopyInto(out *NetworkSourcePortInitParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSourcePortInitParameters. +func (in *NetworkSourcePortInitParameters) DeepCopy() *NetworkSourcePortInitParameters { + if in == nil { + return nil + } + out := new(NetworkSourcePortInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSourcePortObservation) DeepCopyInto(out *NetworkSourcePortObservation) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSourcePortObservation. +func (in *NetworkSourcePortObservation) DeepCopy() *NetworkSourcePortObservation { + if in == nil { + return nil + } + out := new(NetworkSourcePortObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSourcePortParameters) DeepCopyInto(out *NetworkSourcePortParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSourcePortParameters. +func (in *NetworkSourcePortParameters) DeepCopy() *NetworkSourcePortParameters { + if in == nil { + return nil + } + out := new(NetworkSourcePortParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoteTextInitParameters) DeepCopyInto(out *NoteTextInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoteTextInitParameters. +func (in *NoteTextInitParameters) DeepCopy() *NoteTextInitParameters { + if in == nil { + return nil + } + out := new(NoteTextInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoteTextObservation) DeepCopyInto(out *NoteTextObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoteTextObservation. +func (in *NoteTextObservation) DeepCopy() *NoteTextObservation { + if in == nil { + return nil + } + out := new(NoteTextObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoteTextParameters) DeepCopyInto(out *NoteTextParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoteTextParameters. +func (in *NoteTextParameters) DeepCopy() *NoteTextParameters { + if in == nil { + return nil + } + out := new(NoteTextParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoteUpdatedAtDateRangeInitParameters) DeepCopyInto(out *NoteUpdatedAtDateRangeInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoteUpdatedAtDateRangeInitParameters. +func (in *NoteUpdatedAtDateRangeInitParameters) DeepCopy() *NoteUpdatedAtDateRangeInitParameters { + if in == nil { + return nil + } + out := new(NoteUpdatedAtDateRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoteUpdatedAtDateRangeObservation) DeepCopyInto(out *NoteUpdatedAtDateRangeObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoteUpdatedAtDateRangeObservation. +func (in *NoteUpdatedAtDateRangeObservation) DeepCopy() *NoteUpdatedAtDateRangeObservation { + if in == nil { + return nil + } + out := new(NoteUpdatedAtDateRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoteUpdatedAtDateRangeParameters) DeepCopyInto(out *NoteUpdatedAtDateRangeParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoteUpdatedAtDateRangeParameters. +func (in *NoteUpdatedAtDateRangeParameters) DeepCopy() *NoteUpdatedAtDateRangeParameters { + if in == nil { + return nil + } + out := new(NoteUpdatedAtDateRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoteUpdatedAtInitParameters) DeepCopyInto(out *NoteUpdatedAtInitParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(NoteUpdatedAtDateRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoteUpdatedAtInitParameters. +func (in *NoteUpdatedAtInitParameters) DeepCopy() *NoteUpdatedAtInitParameters { + if in == nil { + return nil + } + out := new(NoteUpdatedAtInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoteUpdatedAtObservation) DeepCopyInto(out *NoteUpdatedAtObservation) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(NoteUpdatedAtDateRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoteUpdatedAtObservation. +func (in *NoteUpdatedAtObservation) DeepCopy() *NoteUpdatedAtObservation { + if in == nil { + return nil + } + out := new(NoteUpdatedAtObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoteUpdatedAtParameters) DeepCopyInto(out *NoteUpdatedAtParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(NoteUpdatedAtDateRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoteUpdatedAtParameters. +func (in *NoteUpdatedAtParameters) DeepCopy() *NoteUpdatedAtParameters { + if in == nil { + return nil + } + out := new(NoteUpdatedAtParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoteUpdatedByInitParameters) DeepCopyInto(out *NoteUpdatedByInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoteUpdatedByInitParameters. +func (in *NoteUpdatedByInitParameters) DeepCopy() *NoteUpdatedByInitParameters { + if in == nil { + return nil + } + out := new(NoteUpdatedByInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoteUpdatedByObservation) DeepCopyInto(out *NoteUpdatedByObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoteUpdatedByObservation. +func (in *NoteUpdatedByObservation) DeepCopy() *NoteUpdatedByObservation { + if in == nil { + return nil + } + out := new(NoteUpdatedByObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoteUpdatedByParameters) DeepCopyInto(out *NoteUpdatedByParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoteUpdatedByParameters. +func (in *NoteUpdatedByParameters) DeepCopy() *NoteUpdatedByParameters { + if in == nil { + return nil + } + out := new(NoteUpdatedByParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessLaunchedAtDateRangeInitParameters) DeepCopyInto(out *ProcessLaunchedAtDateRangeInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessLaunchedAtDateRangeInitParameters. +func (in *ProcessLaunchedAtDateRangeInitParameters) DeepCopy() *ProcessLaunchedAtDateRangeInitParameters { + if in == nil { + return nil + } + out := new(ProcessLaunchedAtDateRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessLaunchedAtDateRangeObservation) DeepCopyInto(out *ProcessLaunchedAtDateRangeObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessLaunchedAtDateRangeObservation. +func (in *ProcessLaunchedAtDateRangeObservation) DeepCopy() *ProcessLaunchedAtDateRangeObservation { + if in == nil { + return nil + } + out := new(ProcessLaunchedAtDateRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessLaunchedAtDateRangeParameters) DeepCopyInto(out *ProcessLaunchedAtDateRangeParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessLaunchedAtDateRangeParameters. +func (in *ProcessLaunchedAtDateRangeParameters) DeepCopy() *ProcessLaunchedAtDateRangeParameters { + if in == nil { + return nil + } + out := new(ProcessLaunchedAtDateRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessLaunchedAtInitParameters) DeepCopyInto(out *ProcessLaunchedAtInitParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ProcessLaunchedAtDateRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessLaunchedAtInitParameters. +func (in *ProcessLaunchedAtInitParameters) DeepCopy() *ProcessLaunchedAtInitParameters { + if in == nil { + return nil + } + out := new(ProcessLaunchedAtInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessLaunchedAtObservation) DeepCopyInto(out *ProcessLaunchedAtObservation) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ProcessLaunchedAtDateRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessLaunchedAtObservation. +func (in *ProcessLaunchedAtObservation) DeepCopy() *ProcessLaunchedAtObservation { + if in == nil { + return nil + } + out := new(ProcessLaunchedAtObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessLaunchedAtParameters) DeepCopyInto(out *ProcessLaunchedAtParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ProcessLaunchedAtDateRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessLaunchedAtParameters. +func (in *ProcessLaunchedAtParameters) DeepCopy() *ProcessLaunchedAtParameters { + if in == nil { + return nil + } + out := new(ProcessLaunchedAtParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessNameInitParameters) DeepCopyInto(out *ProcessNameInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessNameInitParameters. +func (in *ProcessNameInitParameters) DeepCopy() *ProcessNameInitParameters { + if in == nil { + return nil + } + out := new(ProcessNameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessNameObservation) DeepCopyInto(out *ProcessNameObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessNameObservation. +func (in *ProcessNameObservation) DeepCopy() *ProcessNameObservation { + if in == nil { + return nil + } + out := new(ProcessNameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessNameParameters) DeepCopyInto(out *ProcessNameParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessNameParameters. +func (in *ProcessNameParameters) DeepCopy() *ProcessNameParameters { + if in == nil { + return nil + } + out := new(ProcessNameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessParentPidInitParameters) DeepCopyInto(out *ProcessParentPidInitParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessParentPidInitParameters. +func (in *ProcessParentPidInitParameters) DeepCopy() *ProcessParentPidInitParameters { + if in == nil { + return nil + } + out := new(ProcessParentPidInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessParentPidObservation) DeepCopyInto(out *ProcessParentPidObservation) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessParentPidObservation. +func (in *ProcessParentPidObservation) DeepCopy() *ProcessParentPidObservation { + if in == nil { + return nil + } + out := new(ProcessParentPidObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessParentPidParameters) DeepCopyInto(out *ProcessParentPidParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessParentPidParameters. +func (in *ProcessParentPidParameters) DeepCopy() *ProcessParentPidParameters { + if in == nil { + return nil + } + out := new(ProcessParentPidParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessPathInitParameters) DeepCopyInto(out *ProcessPathInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessPathInitParameters. +func (in *ProcessPathInitParameters) DeepCopy() *ProcessPathInitParameters { + if in == nil { + return nil + } + out := new(ProcessPathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessPathObservation) DeepCopyInto(out *ProcessPathObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessPathObservation. +func (in *ProcessPathObservation) DeepCopy() *ProcessPathObservation { + if in == nil { + return nil + } + out := new(ProcessPathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessPathParameters) DeepCopyInto(out *ProcessPathParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessPathParameters. +func (in *ProcessPathParameters) DeepCopy() *ProcessPathParameters { + if in == nil { + return nil + } + out := new(ProcessPathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessPidInitParameters) DeepCopyInto(out *ProcessPidInitParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessPidInitParameters. +func (in *ProcessPidInitParameters) DeepCopy() *ProcessPidInitParameters { + if in == nil { + return nil + } + out := new(ProcessPidInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessPidObservation) DeepCopyInto(out *ProcessPidObservation) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessPidObservation. +func (in *ProcessPidObservation) DeepCopy() *ProcessPidObservation { + if in == nil { + return nil + } + out := new(ProcessPidObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessPidParameters) DeepCopyInto(out *ProcessPidParameters) { + *out = *in + if in.Eq != nil { + in, out := &in.Eq, &out.Eq + *out = new(string) + **out = **in + } + if in.Gte != nil { + in, out := &in.Gte, &out.Gte + *out = new(string) + **out = **in + } + if in.Lte != nil { + in, out := &in.Lte, &out.Lte + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessPidParameters. +func (in *ProcessPidParameters) DeepCopy() *ProcessPidParameters { + if in == nil { + return nil + } + out := new(ProcessPidParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessTerminatedAtDateRangeInitParameters) DeepCopyInto(out *ProcessTerminatedAtDateRangeInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessTerminatedAtDateRangeInitParameters. +func (in *ProcessTerminatedAtDateRangeInitParameters) DeepCopy() *ProcessTerminatedAtDateRangeInitParameters { + if in == nil { + return nil + } + out := new(ProcessTerminatedAtDateRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessTerminatedAtDateRangeObservation) DeepCopyInto(out *ProcessTerminatedAtDateRangeObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessTerminatedAtDateRangeObservation. +func (in *ProcessTerminatedAtDateRangeObservation) DeepCopy() *ProcessTerminatedAtDateRangeObservation { + if in == nil { + return nil + } + out := new(ProcessTerminatedAtDateRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessTerminatedAtDateRangeParameters) DeepCopyInto(out *ProcessTerminatedAtDateRangeParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessTerminatedAtDateRangeParameters. +func (in *ProcessTerminatedAtDateRangeParameters) DeepCopy() *ProcessTerminatedAtDateRangeParameters { + if in == nil { + return nil + } + out := new(ProcessTerminatedAtDateRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessTerminatedAtInitParameters) DeepCopyInto(out *ProcessTerminatedAtInitParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ProcessTerminatedAtDateRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessTerminatedAtInitParameters. +func (in *ProcessTerminatedAtInitParameters) DeepCopy() *ProcessTerminatedAtInitParameters { + if in == nil { + return nil + } + out := new(ProcessTerminatedAtInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessTerminatedAtObservation) DeepCopyInto(out *ProcessTerminatedAtObservation) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ProcessTerminatedAtDateRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessTerminatedAtObservation. +func (in *ProcessTerminatedAtObservation) DeepCopy() *ProcessTerminatedAtObservation { + if in == nil { + return nil + } + out := new(ProcessTerminatedAtObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProcessTerminatedAtParameters) DeepCopyInto(out *ProcessTerminatedAtParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ProcessTerminatedAtDateRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessTerminatedAtParameters. +func (in *ProcessTerminatedAtParameters) DeepCopy() *ProcessTerminatedAtParameters { + if in == nil { + return nil + } + out := new(ProcessTerminatedAtParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductArnInitParameters) DeepCopyInto(out *ProductArnInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductArnInitParameters. +func (in *ProductArnInitParameters) DeepCopy() *ProductArnInitParameters { + if in == nil { + return nil + } + out := new(ProductArnInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductArnObservation) DeepCopyInto(out *ProductArnObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductArnObservation. +func (in *ProductArnObservation) DeepCopy() *ProductArnObservation { + if in == nil { + return nil + } + out := new(ProductArnObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductArnParameters) DeepCopyInto(out *ProductArnParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductArnParameters. +func (in *ProductArnParameters) DeepCopy() *ProductArnParameters { + if in == nil { + return nil + } + out := new(ProductArnParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductFieldsInitParameters) DeepCopyInto(out *ProductFieldsInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductFieldsInitParameters. +func (in *ProductFieldsInitParameters) DeepCopy() *ProductFieldsInitParameters { + if in == nil { + return nil + } + out := new(ProductFieldsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductFieldsObservation) DeepCopyInto(out *ProductFieldsObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductFieldsObservation. +func (in *ProductFieldsObservation) DeepCopy() *ProductFieldsObservation { + if in == nil { + return nil + } + out := new(ProductFieldsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductFieldsParameters) DeepCopyInto(out *ProductFieldsParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductFieldsParameters. +func (in *ProductFieldsParameters) DeepCopy() *ProductFieldsParameters { + if in == nil { + return nil + } + out := new(ProductFieldsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductNameInitParameters) DeepCopyInto(out *ProductNameInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductNameInitParameters. +func (in *ProductNameInitParameters) DeepCopy() *ProductNameInitParameters { + if in == nil { + return nil + } + out := new(ProductNameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductNameObservation) DeepCopyInto(out *ProductNameObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductNameObservation. +func (in *ProductNameObservation) DeepCopy() *ProductNameObservation { + if in == nil { + return nil + } + out := new(ProductNameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductNameParameters) DeepCopyInto(out *ProductNameParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductNameParameters. +func (in *ProductNameParameters) DeepCopy() *ProductNameParameters { + if in == nil { + return nil + } + out := new(ProductNameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecommendationTextInitParameters) DeepCopyInto(out *RecommendationTextInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecommendationTextInitParameters. +func (in *RecommendationTextInitParameters) DeepCopy() *RecommendationTextInitParameters { + if in == nil { + return nil + } + out := new(RecommendationTextInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecommendationTextObservation) DeepCopyInto(out *RecommendationTextObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecommendationTextObservation. +func (in *RecommendationTextObservation) DeepCopy() *RecommendationTextObservation { + if in == nil { + return nil + } + out := new(RecommendationTextObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecommendationTextParameters) DeepCopyInto(out *RecommendationTextParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecommendationTextParameters. +func (in *RecommendationTextParameters) DeepCopy() *RecommendationTextParameters { + if in == nil { + return nil + } + out := new(RecommendationTextParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordStateInitParameters) DeepCopyInto(out *RecordStateInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordStateInitParameters. +func (in *RecordStateInitParameters) DeepCopy() *RecordStateInitParameters { + if in == nil { + return nil + } + out := new(RecordStateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordStateObservation) DeepCopyInto(out *RecordStateObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordStateObservation. +func (in *RecordStateObservation) DeepCopy() *RecordStateObservation { + if in == nil { + return nil + } + out := new(RecordStateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordStateParameters) DeepCopyInto(out *RecordStateParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordStateParameters. +func (in *RecordStateParameters) DeepCopy() *RecordStateParameters { + if in == nil { + return nil + } + out := new(RecordStateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelatedFindingsIDInitParameters) DeepCopyInto(out *RelatedFindingsIDInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelatedFindingsIDInitParameters. +func (in *RelatedFindingsIDInitParameters) DeepCopy() *RelatedFindingsIDInitParameters { + if in == nil { + return nil + } + out := new(RelatedFindingsIDInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelatedFindingsIDObservation) DeepCopyInto(out *RelatedFindingsIDObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelatedFindingsIDObservation. +func (in *RelatedFindingsIDObservation) DeepCopy() *RelatedFindingsIDObservation { + if in == nil { + return nil + } + out := new(RelatedFindingsIDObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelatedFindingsIDParameters) DeepCopyInto(out *RelatedFindingsIDParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelatedFindingsIDParameters. +func (in *RelatedFindingsIDParameters) DeepCopy() *RelatedFindingsIDParameters { + if in == nil { + return nil + } + out := new(RelatedFindingsIDParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelatedFindingsProductArnInitParameters) DeepCopyInto(out *RelatedFindingsProductArnInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelatedFindingsProductArnInitParameters. +func (in *RelatedFindingsProductArnInitParameters) DeepCopy() *RelatedFindingsProductArnInitParameters { + if in == nil { + return nil + } + out := new(RelatedFindingsProductArnInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelatedFindingsProductArnObservation) DeepCopyInto(out *RelatedFindingsProductArnObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelatedFindingsProductArnObservation. +func (in *RelatedFindingsProductArnObservation) DeepCopy() *RelatedFindingsProductArnObservation { + if in == nil { + return nil + } + out := new(RelatedFindingsProductArnObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelatedFindingsProductArnParameters) DeepCopyInto(out *RelatedFindingsProductArnParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelatedFindingsProductArnParameters. +func (in *RelatedFindingsProductArnParameters) DeepCopy() *RelatedFindingsProductArnParameters { + if in == nil { + return nil + } + out := new(RelatedFindingsProductArnParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceIAMInstanceProfileArnInitParameters) DeepCopyInto(out *ResourceAwsEC2InstanceIAMInstanceProfileArnInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceIAMInstanceProfileArnInitParameters. +func (in *ResourceAwsEC2InstanceIAMInstanceProfileArnInitParameters) DeepCopy() *ResourceAwsEC2InstanceIAMInstanceProfileArnInitParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceIAMInstanceProfileArnInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceIAMInstanceProfileArnObservation) DeepCopyInto(out *ResourceAwsEC2InstanceIAMInstanceProfileArnObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceIAMInstanceProfileArnObservation. +func (in *ResourceAwsEC2InstanceIAMInstanceProfileArnObservation) DeepCopy() *ResourceAwsEC2InstanceIAMInstanceProfileArnObservation { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceIAMInstanceProfileArnObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceIAMInstanceProfileArnParameters) DeepCopyInto(out *ResourceAwsEC2InstanceIAMInstanceProfileArnParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceIAMInstanceProfileArnParameters. +func (in *ResourceAwsEC2InstanceIAMInstanceProfileArnParameters) DeepCopy() *ResourceAwsEC2InstanceIAMInstanceProfileArnParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceIAMInstanceProfileArnParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceIPv4AddressesInitParameters) DeepCopyInto(out *ResourceAwsEC2InstanceIPv4AddressesInitParameters) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceIPv4AddressesInitParameters. +func (in *ResourceAwsEC2InstanceIPv4AddressesInitParameters) DeepCopy() *ResourceAwsEC2InstanceIPv4AddressesInitParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceIPv4AddressesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceIPv4AddressesObservation) DeepCopyInto(out *ResourceAwsEC2InstanceIPv4AddressesObservation) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceIPv4AddressesObservation. +func (in *ResourceAwsEC2InstanceIPv4AddressesObservation) DeepCopy() *ResourceAwsEC2InstanceIPv4AddressesObservation { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceIPv4AddressesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceIPv4AddressesParameters) DeepCopyInto(out *ResourceAwsEC2InstanceIPv4AddressesParameters) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceIPv4AddressesParameters. +func (in *ResourceAwsEC2InstanceIPv4AddressesParameters) DeepCopy() *ResourceAwsEC2InstanceIPv4AddressesParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceIPv4AddressesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceIPv6AddressesInitParameters) DeepCopyInto(out *ResourceAwsEC2InstanceIPv6AddressesInitParameters) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceIPv6AddressesInitParameters. +func (in *ResourceAwsEC2InstanceIPv6AddressesInitParameters) DeepCopy() *ResourceAwsEC2InstanceIPv6AddressesInitParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceIPv6AddressesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceIPv6AddressesObservation) DeepCopyInto(out *ResourceAwsEC2InstanceIPv6AddressesObservation) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceIPv6AddressesObservation. +func (in *ResourceAwsEC2InstanceIPv6AddressesObservation) DeepCopy() *ResourceAwsEC2InstanceIPv6AddressesObservation { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceIPv6AddressesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceIPv6AddressesParameters) DeepCopyInto(out *ResourceAwsEC2InstanceIPv6AddressesParameters) { + *out = *in + if in.Cidr != nil { + in, out := &in.Cidr, &out.Cidr + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceIPv6AddressesParameters. +func (in *ResourceAwsEC2InstanceIPv6AddressesParameters) DeepCopy() *ResourceAwsEC2InstanceIPv6AddressesParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceIPv6AddressesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceImageIDInitParameters) DeepCopyInto(out *ResourceAwsEC2InstanceImageIDInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceImageIDInitParameters. +func (in *ResourceAwsEC2InstanceImageIDInitParameters) DeepCopy() *ResourceAwsEC2InstanceImageIDInitParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceImageIDInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceImageIDObservation) DeepCopyInto(out *ResourceAwsEC2InstanceImageIDObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceImageIDObservation. +func (in *ResourceAwsEC2InstanceImageIDObservation) DeepCopy() *ResourceAwsEC2InstanceImageIDObservation { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceImageIDObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceImageIDParameters) DeepCopyInto(out *ResourceAwsEC2InstanceImageIDParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceImageIDParameters. +func (in *ResourceAwsEC2InstanceImageIDParameters) DeepCopy() *ResourceAwsEC2InstanceImageIDParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceImageIDParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceKeyNameInitParameters) DeepCopyInto(out *ResourceAwsEC2InstanceKeyNameInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceKeyNameInitParameters. +func (in *ResourceAwsEC2InstanceKeyNameInitParameters) DeepCopy() *ResourceAwsEC2InstanceKeyNameInitParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceKeyNameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceKeyNameObservation) DeepCopyInto(out *ResourceAwsEC2InstanceKeyNameObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceKeyNameObservation. +func (in *ResourceAwsEC2InstanceKeyNameObservation) DeepCopy() *ResourceAwsEC2InstanceKeyNameObservation { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceKeyNameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceKeyNameParameters) DeepCopyInto(out *ResourceAwsEC2InstanceKeyNameParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceKeyNameParameters. +func (in *ResourceAwsEC2InstanceKeyNameParameters) DeepCopy() *ResourceAwsEC2InstanceKeyNameParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceKeyNameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceLaunchedAtDateRangeInitParameters) DeepCopyInto(out *ResourceAwsEC2InstanceLaunchedAtDateRangeInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceLaunchedAtDateRangeInitParameters. +func (in *ResourceAwsEC2InstanceLaunchedAtDateRangeInitParameters) DeepCopy() *ResourceAwsEC2InstanceLaunchedAtDateRangeInitParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceLaunchedAtDateRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceLaunchedAtDateRangeObservation) DeepCopyInto(out *ResourceAwsEC2InstanceLaunchedAtDateRangeObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceLaunchedAtDateRangeObservation. +func (in *ResourceAwsEC2InstanceLaunchedAtDateRangeObservation) DeepCopy() *ResourceAwsEC2InstanceLaunchedAtDateRangeObservation { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceLaunchedAtDateRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceLaunchedAtDateRangeParameters) DeepCopyInto(out *ResourceAwsEC2InstanceLaunchedAtDateRangeParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceLaunchedAtDateRangeParameters. +func (in *ResourceAwsEC2InstanceLaunchedAtDateRangeParameters) DeepCopy() *ResourceAwsEC2InstanceLaunchedAtDateRangeParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceLaunchedAtDateRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceLaunchedAtInitParameters) DeepCopyInto(out *ResourceAwsEC2InstanceLaunchedAtInitParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ResourceAwsEC2InstanceLaunchedAtDateRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceLaunchedAtInitParameters. +func (in *ResourceAwsEC2InstanceLaunchedAtInitParameters) DeepCopy() *ResourceAwsEC2InstanceLaunchedAtInitParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceLaunchedAtInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceLaunchedAtObservation) DeepCopyInto(out *ResourceAwsEC2InstanceLaunchedAtObservation) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ResourceAwsEC2InstanceLaunchedAtDateRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceLaunchedAtObservation. +func (in *ResourceAwsEC2InstanceLaunchedAtObservation) DeepCopy() *ResourceAwsEC2InstanceLaunchedAtObservation { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceLaunchedAtObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceLaunchedAtParameters) DeepCopyInto(out *ResourceAwsEC2InstanceLaunchedAtParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ResourceAwsEC2InstanceLaunchedAtDateRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceLaunchedAtParameters. +func (in *ResourceAwsEC2InstanceLaunchedAtParameters) DeepCopy() *ResourceAwsEC2InstanceLaunchedAtParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceLaunchedAtParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceSubnetIDInitParameters) DeepCopyInto(out *ResourceAwsEC2InstanceSubnetIDInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceSubnetIDInitParameters. +func (in *ResourceAwsEC2InstanceSubnetIDInitParameters) DeepCopy() *ResourceAwsEC2InstanceSubnetIDInitParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceSubnetIDInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceSubnetIDObservation) DeepCopyInto(out *ResourceAwsEC2InstanceSubnetIDObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceSubnetIDObservation. +func (in *ResourceAwsEC2InstanceSubnetIDObservation) DeepCopy() *ResourceAwsEC2InstanceSubnetIDObservation { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceSubnetIDObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceSubnetIDParameters) DeepCopyInto(out *ResourceAwsEC2InstanceSubnetIDParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceSubnetIDParameters. +func (in *ResourceAwsEC2InstanceSubnetIDParameters) DeepCopy() *ResourceAwsEC2InstanceSubnetIDParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceSubnetIDParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceTypeInitParameters) DeepCopyInto(out *ResourceAwsEC2InstanceTypeInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceTypeInitParameters. +func (in *ResourceAwsEC2InstanceTypeInitParameters) DeepCopy() *ResourceAwsEC2InstanceTypeInitParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceTypeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceTypeObservation) DeepCopyInto(out *ResourceAwsEC2InstanceTypeObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceTypeObservation. +func (in *ResourceAwsEC2InstanceTypeObservation) DeepCopy() *ResourceAwsEC2InstanceTypeObservation { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceTypeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceTypeParameters) DeepCopyInto(out *ResourceAwsEC2InstanceTypeParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceTypeParameters. +func (in *ResourceAwsEC2InstanceTypeParameters) DeepCopy() *ResourceAwsEC2InstanceTypeParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceTypeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceVPCIDInitParameters) DeepCopyInto(out *ResourceAwsEC2InstanceVPCIDInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceVPCIDInitParameters. +func (in *ResourceAwsEC2InstanceVPCIDInitParameters) DeepCopy() *ResourceAwsEC2InstanceVPCIDInitParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceVPCIDInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceVPCIDObservation) DeepCopyInto(out *ResourceAwsEC2InstanceVPCIDObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceVPCIDObservation. +func (in *ResourceAwsEC2InstanceVPCIDObservation) DeepCopy() *ResourceAwsEC2InstanceVPCIDObservation { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceVPCIDObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsEC2InstanceVPCIDParameters) DeepCopyInto(out *ResourceAwsEC2InstanceVPCIDParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsEC2InstanceVPCIDParameters. +func (in *ResourceAwsEC2InstanceVPCIDParameters) DeepCopy() *ResourceAwsEC2InstanceVPCIDParameters { + if in == nil { + return nil + } + out := new(ResourceAwsEC2InstanceVPCIDParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsIAMAccessKeyCreatedAtDateRangeInitParameters) DeepCopyInto(out *ResourceAwsIAMAccessKeyCreatedAtDateRangeInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsIAMAccessKeyCreatedAtDateRangeInitParameters. +func (in *ResourceAwsIAMAccessKeyCreatedAtDateRangeInitParameters) DeepCopy() *ResourceAwsIAMAccessKeyCreatedAtDateRangeInitParameters { + if in == nil { + return nil + } + out := new(ResourceAwsIAMAccessKeyCreatedAtDateRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsIAMAccessKeyCreatedAtDateRangeObservation) DeepCopyInto(out *ResourceAwsIAMAccessKeyCreatedAtDateRangeObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsIAMAccessKeyCreatedAtDateRangeObservation. +func (in *ResourceAwsIAMAccessKeyCreatedAtDateRangeObservation) DeepCopy() *ResourceAwsIAMAccessKeyCreatedAtDateRangeObservation { + if in == nil { + return nil + } + out := new(ResourceAwsIAMAccessKeyCreatedAtDateRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsIAMAccessKeyCreatedAtDateRangeParameters) DeepCopyInto(out *ResourceAwsIAMAccessKeyCreatedAtDateRangeParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsIAMAccessKeyCreatedAtDateRangeParameters. +func (in *ResourceAwsIAMAccessKeyCreatedAtDateRangeParameters) DeepCopy() *ResourceAwsIAMAccessKeyCreatedAtDateRangeParameters { + if in == nil { + return nil + } + out := new(ResourceAwsIAMAccessKeyCreatedAtDateRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsIAMAccessKeyCreatedAtInitParameters) DeepCopyInto(out *ResourceAwsIAMAccessKeyCreatedAtInitParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ResourceAwsIAMAccessKeyCreatedAtDateRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsIAMAccessKeyCreatedAtInitParameters. +func (in *ResourceAwsIAMAccessKeyCreatedAtInitParameters) DeepCopy() *ResourceAwsIAMAccessKeyCreatedAtInitParameters { + if in == nil { + return nil + } + out := new(ResourceAwsIAMAccessKeyCreatedAtInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsIAMAccessKeyCreatedAtObservation) DeepCopyInto(out *ResourceAwsIAMAccessKeyCreatedAtObservation) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ResourceAwsIAMAccessKeyCreatedAtDateRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsIAMAccessKeyCreatedAtObservation. +func (in *ResourceAwsIAMAccessKeyCreatedAtObservation) DeepCopy() *ResourceAwsIAMAccessKeyCreatedAtObservation { + if in == nil { + return nil + } + out := new(ResourceAwsIAMAccessKeyCreatedAtObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsIAMAccessKeyCreatedAtParameters) DeepCopyInto(out *ResourceAwsIAMAccessKeyCreatedAtParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ResourceAwsIAMAccessKeyCreatedAtDateRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsIAMAccessKeyCreatedAtParameters. +func (in *ResourceAwsIAMAccessKeyCreatedAtParameters) DeepCopy() *ResourceAwsIAMAccessKeyCreatedAtParameters { + if in == nil { + return nil + } + out := new(ResourceAwsIAMAccessKeyCreatedAtParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsIAMAccessKeyStatusInitParameters) DeepCopyInto(out *ResourceAwsIAMAccessKeyStatusInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsIAMAccessKeyStatusInitParameters. +func (in *ResourceAwsIAMAccessKeyStatusInitParameters) DeepCopy() *ResourceAwsIAMAccessKeyStatusInitParameters { + if in == nil { + return nil + } + out := new(ResourceAwsIAMAccessKeyStatusInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsIAMAccessKeyStatusObservation) DeepCopyInto(out *ResourceAwsIAMAccessKeyStatusObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsIAMAccessKeyStatusObservation. +func (in *ResourceAwsIAMAccessKeyStatusObservation) DeepCopy() *ResourceAwsIAMAccessKeyStatusObservation { + if in == nil { + return nil + } + out := new(ResourceAwsIAMAccessKeyStatusObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsIAMAccessKeyStatusParameters) DeepCopyInto(out *ResourceAwsIAMAccessKeyStatusParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsIAMAccessKeyStatusParameters. +func (in *ResourceAwsIAMAccessKeyStatusParameters) DeepCopy() *ResourceAwsIAMAccessKeyStatusParameters { + if in == nil { + return nil + } + out := new(ResourceAwsIAMAccessKeyStatusParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsIAMAccessKeyUserNameInitParameters) DeepCopyInto(out *ResourceAwsIAMAccessKeyUserNameInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsIAMAccessKeyUserNameInitParameters. +func (in *ResourceAwsIAMAccessKeyUserNameInitParameters) DeepCopy() *ResourceAwsIAMAccessKeyUserNameInitParameters { + if in == nil { + return nil + } + out := new(ResourceAwsIAMAccessKeyUserNameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsIAMAccessKeyUserNameObservation) DeepCopyInto(out *ResourceAwsIAMAccessKeyUserNameObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsIAMAccessKeyUserNameObservation. +func (in *ResourceAwsIAMAccessKeyUserNameObservation) DeepCopy() *ResourceAwsIAMAccessKeyUserNameObservation { + if in == nil { + return nil + } + out := new(ResourceAwsIAMAccessKeyUserNameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsIAMAccessKeyUserNameParameters) DeepCopyInto(out *ResourceAwsIAMAccessKeyUserNameParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsIAMAccessKeyUserNameParameters. +func (in *ResourceAwsIAMAccessKeyUserNameParameters) DeepCopy() *ResourceAwsIAMAccessKeyUserNameParameters { + if in == nil { + return nil + } + out := new(ResourceAwsIAMAccessKeyUserNameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsS3BucketOwnerIDInitParameters) DeepCopyInto(out *ResourceAwsS3BucketOwnerIDInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsS3BucketOwnerIDInitParameters. +func (in *ResourceAwsS3BucketOwnerIDInitParameters) DeepCopy() *ResourceAwsS3BucketOwnerIDInitParameters { + if in == nil { + return nil + } + out := new(ResourceAwsS3BucketOwnerIDInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsS3BucketOwnerIDObservation) DeepCopyInto(out *ResourceAwsS3BucketOwnerIDObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsS3BucketOwnerIDObservation. +func (in *ResourceAwsS3BucketOwnerIDObservation) DeepCopy() *ResourceAwsS3BucketOwnerIDObservation { + if in == nil { + return nil + } + out := new(ResourceAwsS3BucketOwnerIDObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsS3BucketOwnerIDParameters) DeepCopyInto(out *ResourceAwsS3BucketOwnerIDParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsS3BucketOwnerIDParameters. +func (in *ResourceAwsS3BucketOwnerIDParameters) DeepCopy() *ResourceAwsS3BucketOwnerIDParameters { + if in == nil { + return nil + } + out := new(ResourceAwsS3BucketOwnerIDParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsS3BucketOwnerNameInitParameters) DeepCopyInto(out *ResourceAwsS3BucketOwnerNameInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsS3BucketOwnerNameInitParameters. +func (in *ResourceAwsS3BucketOwnerNameInitParameters) DeepCopy() *ResourceAwsS3BucketOwnerNameInitParameters { + if in == nil { + return nil + } + out := new(ResourceAwsS3BucketOwnerNameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsS3BucketOwnerNameObservation) DeepCopyInto(out *ResourceAwsS3BucketOwnerNameObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsS3BucketOwnerNameObservation. +func (in *ResourceAwsS3BucketOwnerNameObservation) DeepCopy() *ResourceAwsS3BucketOwnerNameObservation { + if in == nil { + return nil + } + out := new(ResourceAwsS3BucketOwnerNameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAwsS3BucketOwnerNameParameters) DeepCopyInto(out *ResourceAwsS3BucketOwnerNameParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAwsS3BucketOwnerNameParameters. +func (in *ResourceAwsS3BucketOwnerNameParameters) DeepCopy() *ResourceAwsS3BucketOwnerNameParameters { + if in == nil { + return nil + } + out := new(ResourceAwsS3BucketOwnerNameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceContainerImageIDInitParameters) DeepCopyInto(out *ResourceContainerImageIDInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceContainerImageIDInitParameters. +func (in *ResourceContainerImageIDInitParameters) DeepCopy() *ResourceContainerImageIDInitParameters { + if in == nil { + return nil + } + out := new(ResourceContainerImageIDInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceContainerImageIDObservation) DeepCopyInto(out *ResourceContainerImageIDObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceContainerImageIDObservation. +func (in *ResourceContainerImageIDObservation) DeepCopy() *ResourceContainerImageIDObservation { + if in == nil { + return nil + } + out := new(ResourceContainerImageIDObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceContainerImageIDParameters) DeepCopyInto(out *ResourceContainerImageIDParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceContainerImageIDParameters. +func (in *ResourceContainerImageIDParameters) DeepCopy() *ResourceContainerImageIDParameters { + if in == nil { + return nil + } + out := new(ResourceContainerImageIDParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceContainerImageNameInitParameters) DeepCopyInto(out *ResourceContainerImageNameInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceContainerImageNameInitParameters. +func (in *ResourceContainerImageNameInitParameters) DeepCopy() *ResourceContainerImageNameInitParameters { + if in == nil { + return nil + } + out := new(ResourceContainerImageNameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceContainerImageNameObservation) DeepCopyInto(out *ResourceContainerImageNameObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceContainerImageNameObservation. +func (in *ResourceContainerImageNameObservation) DeepCopy() *ResourceContainerImageNameObservation { + if in == nil { + return nil + } + out := new(ResourceContainerImageNameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceContainerImageNameParameters) DeepCopyInto(out *ResourceContainerImageNameParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceContainerImageNameParameters. +func (in *ResourceContainerImageNameParameters) DeepCopy() *ResourceContainerImageNameParameters { + if in == nil { + return nil + } + out := new(ResourceContainerImageNameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceContainerLaunchedAtDateRangeInitParameters) DeepCopyInto(out *ResourceContainerLaunchedAtDateRangeInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceContainerLaunchedAtDateRangeInitParameters. +func (in *ResourceContainerLaunchedAtDateRangeInitParameters) DeepCopy() *ResourceContainerLaunchedAtDateRangeInitParameters { + if in == nil { + return nil + } + out := new(ResourceContainerLaunchedAtDateRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceContainerLaunchedAtDateRangeObservation) DeepCopyInto(out *ResourceContainerLaunchedAtDateRangeObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceContainerLaunchedAtDateRangeObservation. +func (in *ResourceContainerLaunchedAtDateRangeObservation) DeepCopy() *ResourceContainerLaunchedAtDateRangeObservation { + if in == nil { + return nil + } + out := new(ResourceContainerLaunchedAtDateRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceContainerLaunchedAtDateRangeParameters) DeepCopyInto(out *ResourceContainerLaunchedAtDateRangeParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceContainerLaunchedAtDateRangeParameters. +func (in *ResourceContainerLaunchedAtDateRangeParameters) DeepCopy() *ResourceContainerLaunchedAtDateRangeParameters { + if in == nil { + return nil + } + out := new(ResourceContainerLaunchedAtDateRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceContainerLaunchedAtInitParameters) DeepCopyInto(out *ResourceContainerLaunchedAtInitParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ResourceContainerLaunchedAtDateRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceContainerLaunchedAtInitParameters. +func (in *ResourceContainerLaunchedAtInitParameters) DeepCopy() *ResourceContainerLaunchedAtInitParameters { + if in == nil { + return nil + } + out := new(ResourceContainerLaunchedAtInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceContainerLaunchedAtObservation) DeepCopyInto(out *ResourceContainerLaunchedAtObservation) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ResourceContainerLaunchedAtDateRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceContainerLaunchedAtObservation. +func (in *ResourceContainerLaunchedAtObservation) DeepCopy() *ResourceContainerLaunchedAtObservation { + if in == nil { + return nil + } + out := new(ResourceContainerLaunchedAtObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceContainerLaunchedAtParameters) DeepCopyInto(out *ResourceContainerLaunchedAtParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ResourceContainerLaunchedAtDateRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceContainerLaunchedAtParameters. +func (in *ResourceContainerLaunchedAtParameters) DeepCopy() *ResourceContainerLaunchedAtParameters { + if in == nil { + return nil + } + out := new(ResourceContainerLaunchedAtParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceContainerNameInitParameters) DeepCopyInto(out *ResourceContainerNameInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceContainerNameInitParameters. +func (in *ResourceContainerNameInitParameters) DeepCopy() *ResourceContainerNameInitParameters { + if in == nil { + return nil + } + out := new(ResourceContainerNameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceContainerNameObservation) DeepCopyInto(out *ResourceContainerNameObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceContainerNameObservation. +func (in *ResourceContainerNameObservation) DeepCopy() *ResourceContainerNameObservation { + if in == nil { + return nil + } + out := new(ResourceContainerNameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceContainerNameParameters) DeepCopyInto(out *ResourceContainerNameParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceContainerNameParameters. +func (in *ResourceContainerNameParameters) DeepCopy() *ResourceContainerNameParameters { + if in == nil { + return nil + } + out := new(ResourceContainerNameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDetailsOtherInitParameters) DeepCopyInto(out *ResourceDetailsOtherInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDetailsOtherInitParameters. +func (in *ResourceDetailsOtherInitParameters) DeepCopy() *ResourceDetailsOtherInitParameters { + if in == nil { + return nil + } + out := new(ResourceDetailsOtherInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDetailsOtherObservation) DeepCopyInto(out *ResourceDetailsOtherObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDetailsOtherObservation. +func (in *ResourceDetailsOtherObservation) DeepCopy() *ResourceDetailsOtherObservation { + if in == nil { + return nil + } + out := new(ResourceDetailsOtherObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDetailsOtherParameters) DeepCopyInto(out *ResourceDetailsOtherParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDetailsOtherParameters. +func (in *ResourceDetailsOtherParameters) DeepCopy() *ResourceDetailsOtherParameters { + if in == nil { + return nil + } + out := new(ResourceDetailsOtherParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceIDInitParameters) DeepCopyInto(out *ResourceIDInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceIDInitParameters. +func (in *ResourceIDInitParameters) DeepCopy() *ResourceIDInitParameters { + if in == nil { + return nil + } + out := new(ResourceIDInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceIDObservation) DeepCopyInto(out *ResourceIDObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceIDObservation. +func (in *ResourceIDObservation) DeepCopy() *ResourceIDObservation { + if in == nil { + return nil + } + out := new(ResourceIDObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceIDParameters) DeepCopyInto(out *ResourceIDParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceIDParameters. +func (in *ResourceIDParameters) DeepCopy() *ResourceIDParameters { + if in == nil { + return nil + } + out := new(ResourceIDParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePartitionInitParameters) DeepCopyInto(out *ResourcePartitionInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePartitionInitParameters. +func (in *ResourcePartitionInitParameters) DeepCopy() *ResourcePartitionInitParameters { + if in == nil { + return nil + } + out := new(ResourcePartitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePartitionObservation) DeepCopyInto(out *ResourcePartitionObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePartitionObservation. +func (in *ResourcePartitionObservation) DeepCopy() *ResourcePartitionObservation { + if in == nil { + return nil + } + out := new(ResourcePartitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePartitionParameters) DeepCopyInto(out *ResourcePartitionParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePartitionParameters. +func (in *ResourcePartitionParameters) DeepCopy() *ResourcePartitionParameters { + if in == nil { + return nil + } + out := new(ResourcePartitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRegionInitParameters) DeepCopyInto(out *ResourceRegionInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRegionInitParameters. +func (in *ResourceRegionInitParameters) DeepCopy() *ResourceRegionInitParameters { + if in == nil { + return nil + } + out := new(ResourceRegionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRegionObservation) DeepCopyInto(out *ResourceRegionObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRegionObservation. +func (in *ResourceRegionObservation) DeepCopy() *ResourceRegionObservation { + if in == nil { + return nil + } + out := new(ResourceRegionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRegionParameters) DeepCopyInto(out *ResourceRegionParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRegionParameters. +func (in *ResourceRegionParameters) DeepCopy() *ResourceRegionParameters { + if in == nil { + return nil + } + out := new(ResourceRegionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTagsInitParameters) DeepCopyInto(out *ResourceTagsInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTagsInitParameters. +func (in *ResourceTagsInitParameters) DeepCopy() *ResourceTagsInitParameters { + if in == nil { + return nil + } + out := new(ResourceTagsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTagsObservation) DeepCopyInto(out *ResourceTagsObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTagsObservation. +func (in *ResourceTagsObservation) DeepCopy() *ResourceTagsObservation { + if in == nil { + return nil + } + out := new(ResourceTagsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTagsParameters) DeepCopyInto(out *ResourceTagsParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTagsParameters. +func (in *ResourceTagsParameters) DeepCopy() *ResourceTagsParameters { + if in == nil { + return nil + } + out := new(ResourceTagsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTypeInitParameters) DeepCopyInto(out *ResourceTypeInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTypeInitParameters. +func (in *ResourceTypeInitParameters) DeepCopy() *ResourceTypeInitParameters { + if in == nil { + return nil + } + out := new(ResourceTypeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTypeObservation) DeepCopyInto(out *ResourceTypeObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTypeObservation. +func (in *ResourceTypeObservation) DeepCopy() *ResourceTypeObservation { + if in == nil { + return nil + } + out := new(ResourceTypeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTypeParameters) DeepCopyInto(out *ResourceTypeParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTypeParameters. +func (in *ResourceTypeParameters) DeepCopy() *ResourceTypeParameters { + if in == nil { + return nil + } + out := new(ResourceTypeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeverityLabelInitParameters) DeepCopyInto(out *SeverityLabelInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeverityLabelInitParameters. +func (in *SeverityLabelInitParameters) DeepCopy() *SeverityLabelInitParameters { + if in == nil { + return nil + } + out := new(SeverityLabelInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeverityLabelObservation) DeepCopyInto(out *SeverityLabelObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeverityLabelObservation. +func (in *SeverityLabelObservation) DeepCopy() *SeverityLabelObservation { + if in == nil { + return nil + } + out := new(SeverityLabelObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeverityLabelParameters) DeepCopyInto(out *SeverityLabelParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeverityLabelParameters. +func (in *SeverityLabelParameters) DeepCopy() *SeverityLabelParameters { + if in == nil { + return nil + } + out := new(SeverityLabelParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceURLInitParameters) DeepCopyInto(out *SourceURLInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceURLInitParameters. +func (in *SourceURLInitParameters) DeepCopy() *SourceURLInitParameters { + if in == nil { + return nil + } + out := new(SourceURLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceURLObservation) DeepCopyInto(out *SourceURLObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceURLObservation. +func (in *SourceURLObservation) DeepCopy() *SourceURLObservation { + if in == nil { + return nil + } + out := new(SourceURLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceURLParameters) DeepCopyInto(out *SourceURLParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceURLParameters. +func (in *SourceURLParameters) DeepCopy() *SourceURLParameters { + if in == nil { + return nil + } + out := new(SourceURLParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorCategoryInitParameters) DeepCopyInto(out *ThreatIntelIndicatorCategoryInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorCategoryInitParameters. +func (in *ThreatIntelIndicatorCategoryInitParameters) DeepCopy() *ThreatIntelIndicatorCategoryInitParameters { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorCategoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorCategoryObservation) DeepCopyInto(out *ThreatIntelIndicatorCategoryObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorCategoryObservation. +func (in *ThreatIntelIndicatorCategoryObservation) DeepCopy() *ThreatIntelIndicatorCategoryObservation { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorCategoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorCategoryParameters) DeepCopyInto(out *ThreatIntelIndicatorCategoryParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorCategoryParameters. +func (in *ThreatIntelIndicatorCategoryParameters) DeepCopy() *ThreatIntelIndicatorCategoryParameters { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorCategoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorLastObservedAtDateRangeInitParameters) DeepCopyInto(out *ThreatIntelIndicatorLastObservedAtDateRangeInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorLastObservedAtDateRangeInitParameters. +func (in *ThreatIntelIndicatorLastObservedAtDateRangeInitParameters) DeepCopy() *ThreatIntelIndicatorLastObservedAtDateRangeInitParameters { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorLastObservedAtDateRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorLastObservedAtDateRangeObservation) DeepCopyInto(out *ThreatIntelIndicatorLastObservedAtDateRangeObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorLastObservedAtDateRangeObservation. +func (in *ThreatIntelIndicatorLastObservedAtDateRangeObservation) DeepCopy() *ThreatIntelIndicatorLastObservedAtDateRangeObservation { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorLastObservedAtDateRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorLastObservedAtDateRangeParameters) DeepCopyInto(out *ThreatIntelIndicatorLastObservedAtDateRangeParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorLastObservedAtDateRangeParameters. +func (in *ThreatIntelIndicatorLastObservedAtDateRangeParameters) DeepCopy() *ThreatIntelIndicatorLastObservedAtDateRangeParameters { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorLastObservedAtDateRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorLastObservedAtInitParameters) DeepCopyInto(out *ThreatIntelIndicatorLastObservedAtInitParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ThreatIntelIndicatorLastObservedAtDateRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorLastObservedAtInitParameters. +func (in *ThreatIntelIndicatorLastObservedAtInitParameters) DeepCopy() *ThreatIntelIndicatorLastObservedAtInitParameters { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorLastObservedAtInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorLastObservedAtObservation) DeepCopyInto(out *ThreatIntelIndicatorLastObservedAtObservation) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ThreatIntelIndicatorLastObservedAtDateRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorLastObservedAtObservation. +func (in *ThreatIntelIndicatorLastObservedAtObservation) DeepCopy() *ThreatIntelIndicatorLastObservedAtObservation { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorLastObservedAtObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorLastObservedAtParameters) DeepCopyInto(out *ThreatIntelIndicatorLastObservedAtParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(ThreatIntelIndicatorLastObservedAtDateRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorLastObservedAtParameters. +func (in *ThreatIntelIndicatorLastObservedAtParameters) DeepCopy() *ThreatIntelIndicatorLastObservedAtParameters { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorLastObservedAtParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorSourceInitParameters) DeepCopyInto(out *ThreatIntelIndicatorSourceInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorSourceInitParameters. +func (in *ThreatIntelIndicatorSourceInitParameters) DeepCopy() *ThreatIntelIndicatorSourceInitParameters { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorSourceObservation) DeepCopyInto(out *ThreatIntelIndicatorSourceObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorSourceObservation. +func (in *ThreatIntelIndicatorSourceObservation) DeepCopy() *ThreatIntelIndicatorSourceObservation { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorSourceParameters) DeepCopyInto(out *ThreatIntelIndicatorSourceParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorSourceParameters. +func (in *ThreatIntelIndicatorSourceParameters) DeepCopy() *ThreatIntelIndicatorSourceParameters { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorSourceURLInitParameters) DeepCopyInto(out *ThreatIntelIndicatorSourceURLInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorSourceURLInitParameters. +func (in *ThreatIntelIndicatorSourceURLInitParameters) DeepCopy() *ThreatIntelIndicatorSourceURLInitParameters { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorSourceURLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorSourceURLObservation) DeepCopyInto(out *ThreatIntelIndicatorSourceURLObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorSourceURLObservation. +func (in *ThreatIntelIndicatorSourceURLObservation) DeepCopy() *ThreatIntelIndicatorSourceURLObservation { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorSourceURLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorSourceURLParameters) DeepCopyInto(out *ThreatIntelIndicatorSourceURLParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorSourceURLParameters. +func (in *ThreatIntelIndicatorSourceURLParameters) DeepCopy() *ThreatIntelIndicatorSourceURLParameters { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorSourceURLParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorTypeInitParameters) DeepCopyInto(out *ThreatIntelIndicatorTypeInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorTypeInitParameters. +func (in *ThreatIntelIndicatorTypeInitParameters) DeepCopy() *ThreatIntelIndicatorTypeInitParameters { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorTypeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorTypeObservation) DeepCopyInto(out *ThreatIntelIndicatorTypeObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorTypeObservation. +func (in *ThreatIntelIndicatorTypeObservation) DeepCopy() *ThreatIntelIndicatorTypeObservation { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorTypeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorTypeParameters) DeepCopyInto(out *ThreatIntelIndicatorTypeParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorTypeParameters. +func (in *ThreatIntelIndicatorTypeParameters) DeepCopy() *ThreatIntelIndicatorTypeParameters { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorTypeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorValueInitParameters) DeepCopyInto(out *ThreatIntelIndicatorValueInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorValueInitParameters. +func (in *ThreatIntelIndicatorValueInitParameters) DeepCopy() *ThreatIntelIndicatorValueInitParameters { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorValueObservation) DeepCopyInto(out *ThreatIntelIndicatorValueObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorValueObservation. +func (in *ThreatIntelIndicatorValueObservation) DeepCopy() *ThreatIntelIndicatorValueObservation { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelIndicatorValueParameters) DeepCopyInto(out *ThreatIntelIndicatorValueParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelIndicatorValueParameters. +func (in *ThreatIntelIndicatorValueParameters) DeepCopy() *ThreatIntelIndicatorValueParameters { + if in == nil { + return nil + } + out := new(ThreatIntelIndicatorValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TitleInitParameters) DeepCopyInto(out *TitleInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TitleInitParameters. +func (in *TitleInitParameters) DeepCopy() *TitleInitParameters { + if in == nil { + return nil + } + out := new(TitleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TitleObservation) DeepCopyInto(out *TitleObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TitleObservation. +func (in *TitleObservation) DeepCopy() *TitleObservation { + if in == nil { + return nil + } + out := new(TitleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TitleParameters) DeepCopyInto(out *TitleParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TitleParameters. +func (in *TitleParameters) DeepCopy() *TitleParameters { + if in == nil { + return nil + } + out := new(TitleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypeInitParameters) DeepCopyInto(out *TypeInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeInitParameters. +func (in *TypeInitParameters) DeepCopy() *TypeInitParameters { + if in == nil { + return nil + } + out := new(TypeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypeObservation) DeepCopyInto(out *TypeObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeObservation. +func (in *TypeObservation) DeepCopy() *TypeObservation { + if in == nil { + return nil + } + out := new(TypeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypeParameters) DeepCopyInto(out *TypeParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeParameters. +func (in *TypeParameters) DeepCopy() *TypeParameters { + if in == nil { + return nil + } + out := new(TypeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatedAtDateRangeInitParameters) DeepCopyInto(out *UpdatedAtDateRangeInitParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatedAtDateRangeInitParameters. +func (in *UpdatedAtDateRangeInitParameters) DeepCopy() *UpdatedAtDateRangeInitParameters { + if in == nil { + return nil + } + out := new(UpdatedAtDateRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatedAtDateRangeObservation) DeepCopyInto(out *UpdatedAtDateRangeObservation) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatedAtDateRangeObservation. +func (in *UpdatedAtDateRangeObservation) DeepCopy() *UpdatedAtDateRangeObservation { + if in == nil { + return nil + } + out := new(UpdatedAtDateRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatedAtDateRangeParameters) DeepCopyInto(out *UpdatedAtDateRangeParameters) { + *out = *in + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatedAtDateRangeParameters. +func (in *UpdatedAtDateRangeParameters) DeepCopy() *UpdatedAtDateRangeParameters { + if in == nil { + return nil + } + out := new(UpdatedAtDateRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatedAtInitParameters) DeepCopyInto(out *UpdatedAtInitParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(UpdatedAtDateRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatedAtInitParameters. +func (in *UpdatedAtInitParameters) DeepCopy() *UpdatedAtInitParameters { + if in == nil { + return nil + } + out := new(UpdatedAtInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatedAtObservation) DeepCopyInto(out *UpdatedAtObservation) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(UpdatedAtDateRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatedAtObservation. +func (in *UpdatedAtObservation) DeepCopy() *UpdatedAtObservation { + if in == nil { + return nil + } + out := new(UpdatedAtObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatedAtParameters) DeepCopyInto(out *UpdatedAtParameters) { + *out = *in + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(UpdatedAtDateRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatedAtParameters. +func (in *UpdatedAtParameters) DeepCopy() *UpdatedAtParameters { + if in == nil { + return nil + } + out := new(UpdatedAtParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserDefinedValuesInitParameters) DeepCopyInto(out *UserDefinedValuesInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserDefinedValuesInitParameters. +func (in *UserDefinedValuesInitParameters) DeepCopy() *UserDefinedValuesInitParameters { + if in == nil { + return nil + } + out := new(UserDefinedValuesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserDefinedValuesObservation) DeepCopyInto(out *UserDefinedValuesObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserDefinedValuesObservation. +func (in *UserDefinedValuesObservation) DeepCopy() *UserDefinedValuesObservation { + if in == nil { + return nil + } + out := new(UserDefinedValuesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserDefinedValuesParameters) DeepCopyInto(out *UserDefinedValuesParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserDefinedValuesParameters. +func (in *UserDefinedValuesParameters) DeepCopy() *UserDefinedValuesParameters { + if in == nil { + return nil + } + out := new(UserDefinedValuesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerificationStateInitParameters) DeepCopyInto(out *VerificationStateInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerificationStateInitParameters. +func (in *VerificationStateInitParameters) DeepCopy() *VerificationStateInitParameters { + if in == nil { + return nil + } + out := new(VerificationStateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerificationStateObservation) DeepCopyInto(out *VerificationStateObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerificationStateObservation. +func (in *VerificationStateObservation) DeepCopy() *VerificationStateObservation { + if in == nil { + return nil + } + out := new(VerificationStateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerificationStateParameters) DeepCopyInto(out *VerificationStateParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerificationStateParameters. +func (in *VerificationStateParameters) DeepCopy() *VerificationStateParameters { + if in == nil { + return nil + } + out := new(VerificationStateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowStatusInitParameters) DeepCopyInto(out *WorkflowStatusInitParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowStatusInitParameters. +func (in *WorkflowStatusInitParameters) DeepCopy() *WorkflowStatusInitParameters { + if in == nil { + return nil + } + out := new(WorkflowStatusInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowStatusObservation) DeepCopyInto(out *WorkflowStatusObservation) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowStatusObservation. +func (in *WorkflowStatusObservation) DeepCopy() *WorkflowStatusObservation { + if in == nil { + return nil + } + out := new(WorkflowStatusObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowStatusParameters) DeepCopyInto(out *WorkflowStatusParameters) { + *out = *in + if in.Comparison != nil { + in, out := &in.Comparison, &out.Comparison + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowStatusParameters. +func (in *WorkflowStatusParameters) DeepCopy() *WorkflowStatusParameters { + if in == nil { + return nil + } + out := new(WorkflowStatusParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/securityhub/v1beta2/zz_generated.managed.go b/apis/securityhub/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..80a5f84455 --- /dev/null +++ b/apis/securityhub/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Insight. +func (mg *Insight) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Insight. +func (mg *Insight) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Insight. +func (mg *Insight) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Insight. +func (mg *Insight) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Insight. +func (mg *Insight) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Insight. +func (mg *Insight) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Insight. +func (mg *Insight) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Insight. +func (mg *Insight) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Insight. +func (mg *Insight) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Insight. +func (mg *Insight) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Insight. +func (mg *Insight) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Insight. +func (mg *Insight) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/securityhub/v1beta2/zz_generated.managedlist.go b/apis/securityhub/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..522da84ecd --- /dev/null +++ b/apis/securityhub/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this InsightList. +func (l *InsightList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/securityhub/v1beta2/zz_groupversion_info.go b/apis/securityhub/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..38b6de02e3 --- /dev/null +++ b/apis/securityhub/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=securityhub.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "securityhub.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/securityhub/v1beta2/zz_insight_terraformed.go b/apis/securityhub/v1beta2/zz_insight_terraformed.go new file mode 100755 index 0000000000..899d98d5c6 --- /dev/null +++ b/apis/securityhub/v1beta2/zz_insight_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Insight +func (mg *Insight) GetTerraformResourceType() string { + return "aws_securityhub_insight" +} + +// GetConnectionDetailsMapping for this Insight +func (tr *Insight) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Insight +func (tr *Insight) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Insight +func (tr *Insight) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Insight +func (tr *Insight) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Insight +func (tr *Insight) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Insight +func (tr *Insight) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Insight +func (tr *Insight) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Insight +func (tr *Insight) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Insight using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Insight) LateInitialize(attrs []byte) (bool, error) { + params := &InsightParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Insight) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/securityhub/v1beta2/zz_insight_types.go b/apis/securityhub/v1beta2/zz_insight_types.go new file mode 100755 index 0000000000..761306bfec --- /dev/null +++ b/apis/securityhub/v1beta2/zz_insight_types.go @@ -0,0 +1,4047 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AwsAccountIDInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type AwsAccountIDObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type AwsAccountIDParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type CompanyNameInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type CompanyNameObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type CompanyNameParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ComplianceStatusInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ComplianceStatusObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ComplianceStatusParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ConfidenceInitParameters struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type ConfidenceObservation struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type ConfidenceParameters struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type CreatedAtInitParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *DateRangeInitParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type CreatedAtObservation struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *DateRangeObservation `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type CreatedAtParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + // +kubebuilder:validation:Optional + DateRange *DateRangeParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + // +kubebuilder:validation:Optional + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + // +kubebuilder:validation:Optional + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type CriticalityInitParameters struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type CriticalityObservation struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type CriticalityParameters struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type DateRangeInitParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type DateRangeObservation struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type DateRangeParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type DescriptionInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DescriptionObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DescriptionParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type FiltersInitParameters struct { + + // AWS account ID that a finding is generated in. See String_Filter below for more details. + AwsAccountID []AwsAccountIDInitParameters `json:"awsAccountId,omitempty" tf:"aws_account_id,omitempty"` + + // The name of the findings provider (company) that owns the solution (product) that generates findings. See String_Filter below for more details. + CompanyName []CompanyNameInitParameters `json:"companyName,omitempty" tf:"company_name,omitempty"` + + // Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard, such as CIS AWS Foundations. Contains security standard-related finding details. See String Filter below for more details. + ComplianceStatus []ComplianceStatusInitParameters `json:"complianceStatus,omitempty" tf:"compliance_status,omitempty"` + + // A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence. See Number Filter below for more details. + Confidence []ConfidenceInitParameters `json:"confidence,omitempty" tf:"confidence,omitempty"` + + // An ISO8601-formatted timestamp that indicates when the security-findings provider captured the potential security issue that a finding captured. See Date Filter below for more details. + CreatedAt []CreatedAtInitParameters `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // The level of importance assigned to the resources associated with the finding. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources. See Number Filter below for more details. + Criticality []CriticalityInitParameters `json:"criticality,omitempty" tf:"criticality,omitempty"` + + // A finding's description. See String Filter below for more details. + Description []DescriptionInitParameters `json:"description,omitempty" tf:"description,omitempty"` + + // The finding provider value for the finding confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence. See Number Filter below for more details. + FindingProviderFieldsConfidence []FindingProviderFieldsConfidenceInitParameters `json:"findingProviderFieldsConfidence,omitempty" tf:"finding_provider_fields_confidence,omitempty"` + + // The finding provider value for the level of importance assigned to the resources associated with the findings. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources. See Number Filter below for more details. + FindingProviderFieldsCriticality []FindingProviderFieldsCriticalityInitParameters `json:"findingProviderFieldsCriticality,omitempty" tf:"finding_provider_fields_criticality,omitempty"` + + // The finding identifier of a related finding that is identified by the finding provider. See String Filter below for more details. + FindingProviderFieldsRelatedFindingsID []FindingProviderFieldsRelatedFindingsIDInitParameters `json:"findingProviderFieldsRelatedFindingsId,omitempty" tf:"finding_provider_fields_related_findings_id,omitempty"` + + // The ARN of the solution that generated a related finding that is identified by the finding provider. See String Filter below for more details. + FindingProviderFieldsRelatedFindingsProductArn []FindingProviderFieldsRelatedFindingsProductArnInitParameters `json:"findingProviderFieldsRelatedFindingsProductArn,omitempty" tf:"finding_provider_fields_related_findings_product_arn,omitempty"` + + // The finding provider value for the severity label. See String Filter below for more details. + FindingProviderFieldsSeverityLabel []FindingProviderFieldsSeverityLabelInitParameters `json:"findingProviderFieldsSeverityLabel,omitempty" tf:"finding_provider_fields_severity_label,omitempty"` + + // The finding provider's original value for the severity. See String Filter below for more details. + FindingProviderFieldsSeverityOriginal []FindingProviderFieldsSeverityOriginalInitParameters `json:"findingProviderFieldsSeverityOriginal,omitempty" tf:"finding_provider_fields_severity_original,omitempty"` + + // One or more finding types that the finding provider assigned to the finding. Uses the format of namespace/category/classifier that classify a finding. Valid namespace values include: Software and Configuration Checks, TTPs, Effects, Unusual Behaviors, and Sensitive Data Identifications. See String Filter below for more details. + FindingProviderFieldsTypes []FindingProviderFieldsTypesInitParameters `json:"findingProviderFieldsTypes,omitempty" tf:"finding_provider_fields_types,omitempty"` + + // An ISO8601-formatted timestamp that indicates when the security-findings provider first observed the potential security issue that a finding captured. See Date Filter below for more details. + FirstObservedAt []FirstObservedAtInitParameters `json:"firstObservedAt,omitempty" tf:"first_observed_at,omitempty"` + + // The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. See String Filter below for more details. + GeneratorID []GeneratorIDInitParameters `json:"generatorId,omitempty" tf:"generator_id,omitempty"` + + // The security findings provider-specific identifier for a finding. See String Filter below for more details. + ID []IDInitParameters `json:"id,omitempty" tf:"id,omitempty"` + + // A keyword for a finding. See Keyword Filter below for more details. + Keyword []KeywordInitParameters `json:"keyword,omitempty" tf:"keyword,omitempty"` + + // An ISO8601-formatted timestamp that indicates when the security-findings provider most recently observed the potential security issue that a finding captured. See Date Filter below for more details. + LastObservedAt []LastObservedAtInitParameters `json:"lastObservedAt,omitempty" tf:"last_observed_at,omitempty"` + + // The name of the malware that was observed. See String Filter below for more details. + MalwareName []MalwareNameInitParameters `json:"malwareName,omitempty" tf:"malware_name,omitempty"` + + // The filesystem path of the malware that was observed. See String Filter below for more details. + MalwarePath []MalwarePathInitParameters `json:"malwarePath,omitempty" tf:"malware_path,omitempty"` + + // The state of the malware that was observed. See String Filter below for more details. + MalwareState []MalwareStateInitParameters `json:"malwareState,omitempty" tf:"malware_state,omitempty"` + + // The type of the malware that was observed. See String Filter below for more details. + MalwareType []MalwareTypeInitParameters `json:"malwareType,omitempty" tf:"malware_type,omitempty"` + + // The destination domain of network-related information about a finding. See String Filter below for more details. + NetworkDestinationDomain []NetworkDestinationDomainInitParameters `json:"networkDestinationDomain,omitempty" tf:"network_destination_domain,omitempty"` + + // The destination IPv4 address of network-related information about a finding. See Ip Filter below for more details. + NetworkDestinationIPv4 []NetworkDestinationIPv4InitParameters `json:"networkDestinationIpv4,omitempty" tf:"network_destination_ipv4,omitempty"` + + // The destination IPv6 address of network-related information about a finding. See Ip Filter below for more details. + NetworkDestinationIPv6 []NetworkDestinationIPv6InitParameters `json:"networkDestinationIpv6,omitempty" tf:"network_destination_ipv6,omitempty"` + + // The destination port of network-related information about a finding. See Number Filter below for more details. + NetworkDestinationPort []NetworkDestinationPortInitParameters `json:"networkDestinationPort,omitempty" tf:"network_destination_port,omitempty"` + + // Indicates the direction of network traffic associated with a finding. See String Filter below for more details. + NetworkDirection []NetworkDirectionInitParameters `json:"networkDirection,omitempty" tf:"network_direction,omitempty"` + + // The protocol of network-related information about a finding. See String Filter below for more details. + NetworkProtocol []NetworkProtocolInitParameters `json:"networkProtocol,omitempty" tf:"network_protocol,omitempty"` + + // The source domain of network-related information about a finding. See String Filter below for more details. + NetworkSourceDomain []NetworkSourceDomainInitParameters `json:"networkSourceDomain,omitempty" tf:"network_source_domain,omitempty"` + + // The source IPv4 address of network-related information about a finding. See Ip Filter below for more details. + NetworkSourceIPv4 []NetworkSourceIPv4InitParameters `json:"networkSourceIpv4,omitempty" tf:"network_source_ipv4,omitempty"` + + // The source IPv6 address of network-related information about a finding. See Ip Filter below for more details. + NetworkSourceIPv6 []NetworkSourceIPv6InitParameters `json:"networkSourceIpv6,omitempty" tf:"network_source_ipv6,omitempty"` + + // The source media access control (MAC) address of network-related information about a finding. See String Filter below for more details. + NetworkSourceMac []NetworkSourceMacInitParameters `json:"networkSourceMac,omitempty" tf:"network_source_mac,omitempty"` + + // The source port of network-related information about a finding. See Number Filter below for more details. + NetworkSourcePort []NetworkSourcePortInitParameters `json:"networkSourcePort,omitempty" tf:"network_source_port,omitempty"` + + // The text of a note. See String Filter below for more details. + NoteText []NoteTextInitParameters `json:"noteText,omitempty" tf:"note_text,omitempty"` + + // The timestamp of when the note was updated. See Date Filter below for more details. + NoteUpdatedAt []NoteUpdatedAtInitParameters `json:"noteUpdatedAt,omitempty" tf:"note_updated_at,omitempty"` + + // The principal that created a note. See String Filter below for more details. + NoteUpdatedBy []NoteUpdatedByInitParameters `json:"noteUpdatedBy,omitempty" tf:"note_updated_by,omitempty"` + + // The date/time that the process was launched. See Date Filter below for more details. + ProcessLaunchedAt []ProcessLaunchedAtInitParameters `json:"processLaunchedAt,omitempty" tf:"process_launched_at,omitempty"` + + // The name of the process. See String Filter below for more details. + ProcessName []ProcessNameInitParameters `json:"processName,omitempty" tf:"process_name,omitempty"` + + // The parent process ID. See Number Filter below for more details. + ProcessParentPid []ProcessParentPidInitParameters `json:"processParentPid,omitempty" tf:"process_parent_pid,omitempty"` + + // The path to the process executable. See String Filter below for more details. + ProcessPath []ProcessPathInitParameters `json:"processPath,omitempty" tf:"process_path,omitempty"` + + // The process ID. See Number Filter below for more details. + ProcessPid []ProcessPidInitParameters `json:"processPid,omitempty" tf:"process_pid,omitempty"` + + // The date/time that the process was terminated. See Date Filter below for more details. + ProcessTerminatedAt []ProcessTerminatedAtInitParameters `json:"processTerminatedAt,omitempty" tf:"process_terminated_at,omitempty"` + + // The ARN generated by Security Hub that uniquely identifies a third-party company (security findings provider) after this provider's product (solution that generates findings) is registered with Security Hub. See String Filter below for more details. + ProductArn []ProductArnInitParameters `json:"productArn,omitempty" tf:"product_arn,omitempty"` + + // A data type where security-findings providers can include additional solution-specific details that aren't part of the defined AwsSecurityFinding format. See Map Filter below for more details. + ProductFields []ProductFieldsInitParameters `json:"productFields,omitempty" tf:"product_fields,omitempty"` + + // The name of the solution (product) that generates findings. See String Filter below for more details. + ProductName []ProductNameInitParameters `json:"productName,omitempty" tf:"product_name,omitempty"` + + // The recommendation of what to do about the issue described in a finding. See String Filter below for more details. + RecommendationText []RecommendationTextInitParameters `json:"recommendationText,omitempty" tf:"recommendation_text,omitempty"` + + // The updated record state for the finding. See String Filter below for more details. + RecordState []RecordStateInitParameters `json:"recordState,omitempty" tf:"record_state,omitempty"` + + // The solution-generated identifier for a related finding. See String Filter below for more details. + RelatedFindingsID []RelatedFindingsIDInitParameters `json:"relatedFindingsId,omitempty" tf:"related_findings_id,omitempty"` + + // The ARN of the solution that generated a related finding. See String Filter below for more details. + RelatedFindingsProductArn []RelatedFindingsProductArnInitParameters `json:"relatedFindingsProductArn,omitempty" tf:"related_findings_product_arn,omitempty"` + + // The IAM profile ARN of the instance. See String Filter below for more details. + ResourceAwsEC2InstanceIAMInstanceProfileArn []ResourceAwsEC2InstanceIAMInstanceProfileArnInitParameters `json:"resourceAwsEc2InstanceIamInstanceProfileArn,omitempty" tf:"resource_aws_ec2_instance_iam_instance_profile_arn,omitempty"` + + // The IPv4 addresses associated with the instance. See Ip Filter below for more details. + ResourceAwsEC2InstanceIPv4Addresses []ResourceAwsEC2InstanceIPv4AddressesInitParameters `json:"resourceAwsEc2InstanceIpv4Addresses,omitempty" tf:"resource_aws_ec2_instance_ipv4_addresses,omitempty"` + + // The IPv6 addresses associated with the instance. See Ip Filter below for more details. + ResourceAwsEC2InstanceIPv6Addresses []ResourceAwsEC2InstanceIPv6AddressesInitParameters `json:"resourceAwsEc2InstanceIpv6Addresses,omitempty" tf:"resource_aws_ec2_instance_ipv6_addresses,omitempty"` + + // The Amazon Machine Image (AMI) ID of the instance. See String Filter below for more details. + ResourceAwsEC2InstanceImageID []ResourceAwsEC2InstanceImageIDInitParameters `json:"resourceAwsEc2InstanceImageId,omitempty" tf:"resource_aws_ec2_instance_image_id,omitempty"` + + // The key name associated with the instance. See String Filter below for more details. + ResourceAwsEC2InstanceKeyName []ResourceAwsEC2InstanceKeyNameInitParameters `json:"resourceAwsEc2InstanceKeyName,omitempty" tf:"resource_aws_ec2_instance_key_name,omitempty"` + + // The date and time the instance was launched. See Date Filter below for more details. + ResourceAwsEC2InstanceLaunchedAt []ResourceAwsEC2InstanceLaunchedAtInitParameters `json:"resourceAwsEc2InstanceLaunchedAt,omitempty" tf:"resource_aws_ec2_instance_launched_at,omitempty"` + + // The identifier of the subnet that the instance was launched in. See String Filter below for more details. + ResourceAwsEC2InstanceSubnetID []ResourceAwsEC2InstanceSubnetIDInitParameters `json:"resourceAwsEc2InstanceSubnetId,omitempty" tf:"resource_aws_ec2_instance_subnet_id,omitempty"` + + // The instance type of the instance. See String Filter below for more details. + ResourceAwsEC2InstanceType []ResourceAwsEC2InstanceTypeInitParameters `json:"resourceAwsEc2InstanceType,omitempty" tf:"resource_aws_ec2_instance_type,omitempty"` + + // The identifier of the VPC that the instance was launched in. See String Filter below for more details. + ResourceAwsEC2InstanceVPCID []ResourceAwsEC2InstanceVPCIDInitParameters `json:"resourceAwsEc2InstanceVpcId,omitempty" tf:"resource_aws_ec2_instance_vpc_id,omitempty"` + + // The creation date/time of the IAM access key related to a finding. See Date Filter below for more details. + ResourceAwsIAMAccessKeyCreatedAt []ResourceAwsIAMAccessKeyCreatedAtInitParameters `json:"resourceAwsIamAccessKeyCreatedAt,omitempty" tf:"resource_aws_iam_access_key_created_at,omitempty"` + + // The status of the IAM access key related to a finding. See String Filter below for more details. + ResourceAwsIAMAccessKeyStatus []ResourceAwsIAMAccessKeyStatusInitParameters `json:"resourceAwsIamAccessKeyStatus,omitempty" tf:"resource_aws_iam_access_key_status,omitempty"` + + // The user associated with the IAM access key related to a finding. See String Filter below for more details. + ResourceAwsIAMAccessKeyUserName []ResourceAwsIAMAccessKeyUserNameInitParameters `json:"resourceAwsIamAccessKeyUserName,omitempty" tf:"resource_aws_iam_access_key_user_name,omitempty"` + + // The canonical user ID of the owner of the S3 bucket. See String Filter below for more details. + ResourceAwsS3BucketOwnerID []ResourceAwsS3BucketOwnerIDInitParameters `json:"resourceAwsS3BucketOwnerId,omitempty" tf:"resource_aws_s3_bucket_owner_id,omitempty"` + + // The display name of the owner of the S3 bucket. See String Filter below for more details. + ResourceAwsS3BucketOwnerName []ResourceAwsS3BucketOwnerNameInitParameters `json:"resourceAwsS3BucketOwnerName,omitempty" tf:"resource_aws_s3_bucket_owner_name,omitempty"` + + // The identifier of the image related to a finding. See String Filter below for more details. + ResourceContainerImageID []ResourceContainerImageIDInitParameters `json:"resourceContainerImageId,omitempty" tf:"resource_container_image_id,omitempty"` + + // The name of the image related to a finding. See String Filter below for more details. + ResourceContainerImageName []ResourceContainerImageNameInitParameters `json:"resourceContainerImageName,omitempty" tf:"resource_container_image_name,omitempty"` + + // The date/time that the container was started. See Date Filter below for more details. + ResourceContainerLaunchedAt []ResourceContainerLaunchedAtInitParameters `json:"resourceContainerLaunchedAt,omitempty" tf:"resource_container_launched_at,omitempty"` + + // The name of the container related to a finding. See String Filter below for more details. + ResourceContainerName []ResourceContainerNameInitParameters `json:"resourceContainerName,omitempty" tf:"resource_container_name,omitempty"` + + // The details of a resource that doesn't have a specific subfield for the resource type defined. See Map Filter below for more details. + ResourceDetailsOther []ResourceDetailsOtherInitParameters `json:"resourceDetailsOther,omitempty" tf:"resource_details_other,omitempty"` + + // The canonical identifier for the given resource type. See String Filter below for more details. + ResourceID []ResourceIDInitParameters `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // The canonical AWS partition name that the Region is assigned to. See String Filter below for more details. + ResourcePartition []ResourcePartitionInitParameters `json:"resourcePartition,omitempty" tf:"resource_partition,omitempty"` + + // The canonical AWS external Region name where this resource is located. See String Filter below for more details. + ResourceRegion []ResourceRegionInitParameters `json:"resourceRegion,omitempty" tf:"resource_region,omitempty"` + + // A list of AWS tags associated with a resource at the time the finding was processed. See Map Filter below for more details. + ResourceTags []ResourceTagsInitParameters `json:"resourceTags,omitempty" tf:"resource_tags,omitempty"` + + // Specifies the type of the resource that details are provided for. See String Filter below for more details. + ResourceType []ResourceTypeInitParameters `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // The label of a finding's severity. See String Filter below for more details. + SeverityLabel []SeverityLabelInitParameters `json:"severityLabel,omitempty" tf:"severity_label,omitempty"` + + // A URL that links to a page about the current finding in the security-findings provider's solution. See String Filter below for more details. + SourceURL []SourceURLInitParameters `json:"sourceUrl,omitempty" tf:"source_url,omitempty"` + + // The category of a threat intelligence indicator. See String Filter below for more details. + ThreatIntelIndicatorCategory []ThreatIntelIndicatorCategoryInitParameters `json:"threatIntelIndicatorCategory,omitempty" tf:"threat_intel_indicator_category,omitempty"` + + // The date/time of the last observation of a threat intelligence indicator. See Date Filter below for more details. + ThreatIntelIndicatorLastObservedAt []ThreatIntelIndicatorLastObservedAtInitParameters `json:"threatIntelIndicatorLastObservedAt,omitempty" tf:"threat_intel_indicator_last_observed_at,omitempty"` + + // The source of the threat intelligence. See String Filter below for more details. + ThreatIntelIndicatorSource []ThreatIntelIndicatorSourceInitParameters `json:"threatIntelIndicatorSource,omitempty" tf:"threat_intel_indicator_source,omitempty"` + + // The URL for more details from the source of the threat intelligence. See String Filter below for more details. + ThreatIntelIndicatorSourceURL []ThreatIntelIndicatorSourceURLInitParameters `json:"threatIntelIndicatorSourceUrl,omitempty" tf:"threat_intel_indicator_source_url,omitempty"` + + // The type of a threat intelligence indicator. See String Filter below for more details. + ThreatIntelIndicatorType []ThreatIntelIndicatorTypeInitParameters `json:"threatIntelIndicatorType,omitempty" tf:"threat_intel_indicator_type,omitempty"` + + // The value of a threat intelligence indicator. See String Filter below for more details. + ThreatIntelIndicatorValue []ThreatIntelIndicatorValueInitParameters `json:"threatIntelIndicatorValue,omitempty" tf:"threat_intel_indicator_value,omitempty"` + + // A finding's title. See String Filter below for more details. + Title []TitleInitParameters `json:"title,omitempty" tf:"title,omitempty"` + + // A finding type in the format of namespace/category/classifier that classifies a finding. See String Filter below for more details. + Type []TypeInitParameters `json:"type,omitempty" tf:"type,omitempty"` + + // An ISO8601-formatted timestamp that indicates when the security-findings provider last updated the finding record. See Date Filter below for more details. + UpdatedAt []UpdatedAtInitParameters `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` + + // A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding. See Map Filter below for more details. + UserDefinedValues []UserDefinedValuesInitParameters `json:"userDefinedValues,omitempty" tf:"user_defined_values,omitempty"` + + // The veracity of a finding. See String Filter below for more details. + VerificationState []VerificationStateInitParameters `json:"verificationState,omitempty" tf:"verification_state,omitempty"` + + // The status of the investigation into a finding. See Workflow Status Filter below for more details. + WorkflowStatus []WorkflowStatusInitParameters `json:"workflowStatus,omitempty" tf:"workflow_status,omitempty"` +} + +type FiltersObservation struct { + + // AWS account ID that a finding is generated in. See String_Filter below for more details. + AwsAccountID []AwsAccountIDObservation `json:"awsAccountId,omitempty" tf:"aws_account_id,omitempty"` + + // The name of the findings provider (company) that owns the solution (product) that generates findings. See String_Filter below for more details. + CompanyName []CompanyNameObservation `json:"companyName,omitempty" tf:"company_name,omitempty"` + + // Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard, such as CIS AWS Foundations. Contains security standard-related finding details. See String Filter below for more details. + ComplianceStatus []ComplianceStatusObservation `json:"complianceStatus,omitempty" tf:"compliance_status,omitempty"` + + // A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence. See Number Filter below for more details. + Confidence []ConfidenceObservation `json:"confidence,omitempty" tf:"confidence,omitempty"` + + // An ISO8601-formatted timestamp that indicates when the security-findings provider captured the potential security issue that a finding captured. See Date Filter below for more details. + CreatedAt []CreatedAtObservation `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // The level of importance assigned to the resources associated with the finding. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources. See Number Filter below for more details. + Criticality []CriticalityObservation `json:"criticality,omitempty" tf:"criticality,omitempty"` + + // A finding's description. See String Filter below for more details. + Description []DescriptionObservation `json:"description,omitempty" tf:"description,omitempty"` + + // The finding provider value for the finding confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence. See Number Filter below for more details. + FindingProviderFieldsConfidence []FindingProviderFieldsConfidenceObservation `json:"findingProviderFieldsConfidence,omitempty" tf:"finding_provider_fields_confidence,omitempty"` + + // The finding provider value for the level of importance assigned to the resources associated with the findings. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources. See Number Filter below for more details. + FindingProviderFieldsCriticality []FindingProviderFieldsCriticalityObservation `json:"findingProviderFieldsCriticality,omitempty" tf:"finding_provider_fields_criticality,omitempty"` + + // The finding identifier of a related finding that is identified by the finding provider. See String Filter below for more details. + FindingProviderFieldsRelatedFindingsID []FindingProviderFieldsRelatedFindingsIDObservation `json:"findingProviderFieldsRelatedFindingsId,omitempty" tf:"finding_provider_fields_related_findings_id,omitempty"` + + // The ARN of the solution that generated a related finding that is identified by the finding provider. See String Filter below for more details. + FindingProviderFieldsRelatedFindingsProductArn []FindingProviderFieldsRelatedFindingsProductArnObservation `json:"findingProviderFieldsRelatedFindingsProductArn,omitempty" tf:"finding_provider_fields_related_findings_product_arn,omitempty"` + + // The finding provider value for the severity label. See String Filter below for more details. + FindingProviderFieldsSeverityLabel []FindingProviderFieldsSeverityLabelObservation `json:"findingProviderFieldsSeverityLabel,omitempty" tf:"finding_provider_fields_severity_label,omitempty"` + + // The finding provider's original value for the severity. See String Filter below for more details. + FindingProviderFieldsSeverityOriginal []FindingProviderFieldsSeverityOriginalObservation `json:"findingProviderFieldsSeverityOriginal,omitempty" tf:"finding_provider_fields_severity_original,omitempty"` + + // One or more finding types that the finding provider assigned to the finding. Uses the format of namespace/category/classifier that classify a finding. Valid namespace values include: Software and Configuration Checks, TTPs, Effects, Unusual Behaviors, and Sensitive Data Identifications. See String Filter below for more details. + FindingProviderFieldsTypes []FindingProviderFieldsTypesObservation `json:"findingProviderFieldsTypes,omitempty" tf:"finding_provider_fields_types,omitempty"` + + // An ISO8601-formatted timestamp that indicates when the security-findings provider first observed the potential security issue that a finding captured. See Date Filter below for more details. + FirstObservedAt []FirstObservedAtObservation `json:"firstObservedAt,omitempty" tf:"first_observed_at,omitempty"` + + // The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. See String Filter below for more details. + GeneratorID []GeneratorIDObservation `json:"generatorId,omitempty" tf:"generator_id,omitempty"` + + // The security findings provider-specific identifier for a finding. See String Filter below for more details. + ID []IDObservation `json:"id,omitempty" tf:"id,omitempty"` + + // A keyword for a finding. See Keyword Filter below for more details. + Keyword []KeywordObservation `json:"keyword,omitempty" tf:"keyword,omitempty"` + + // An ISO8601-formatted timestamp that indicates when the security-findings provider most recently observed the potential security issue that a finding captured. See Date Filter below for more details. + LastObservedAt []LastObservedAtObservation `json:"lastObservedAt,omitempty" tf:"last_observed_at,omitempty"` + + // The name of the malware that was observed. See String Filter below for more details. + MalwareName []MalwareNameObservation `json:"malwareName,omitempty" tf:"malware_name,omitempty"` + + // The filesystem path of the malware that was observed. See String Filter below for more details. + MalwarePath []MalwarePathObservation `json:"malwarePath,omitempty" tf:"malware_path,omitempty"` + + // The state of the malware that was observed. See String Filter below for more details. + MalwareState []MalwareStateObservation `json:"malwareState,omitempty" tf:"malware_state,omitempty"` + + // The type of the malware that was observed. See String Filter below for more details. + MalwareType []MalwareTypeObservation `json:"malwareType,omitempty" tf:"malware_type,omitempty"` + + // The destination domain of network-related information about a finding. See String Filter below for more details. + NetworkDestinationDomain []NetworkDestinationDomainObservation `json:"networkDestinationDomain,omitempty" tf:"network_destination_domain,omitempty"` + + // The destination IPv4 address of network-related information about a finding. See Ip Filter below for more details. + NetworkDestinationIPv4 []NetworkDestinationIPv4Observation `json:"networkDestinationIpv4,omitempty" tf:"network_destination_ipv4,omitempty"` + + // The destination IPv6 address of network-related information about a finding. See Ip Filter below for more details. + NetworkDestinationIPv6 []NetworkDestinationIPv6Observation `json:"networkDestinationIpv6,omitempty" tf:"network_destination_ipv6,omitempty"` + + // The destination port of network-related information about a finding. See Number Filter below for more details. + NetworkDestinationPort []NetworkDestinationPortObservation `json:"networkDestinationPort,omitempty" tf:"network_destination_port,omitempty"` + + // Indicates the direction of network traffic associated with a finding. See String Filter below for more details. + NetworkDirection []NetworkDirectionObservation `json:"networkDirection,omitempty" tf:"network_direction,omitempty"` + + // The protocol of network-related information about a finding. See String Filter below for more details. + NetworkProtocol []NetworkProtocolObservation `json:"networkProtocol,omitempty" tf:"network_protocol,omitempty"` + + // The source domain of network-related information about a finding. See String Filter below for more details. + NetworkSourceDomain []NetworkSourceDomainObservation `json:"networkSourceDomain,omitempty" tf:"network_source_domain,omitempty"` + + // The source IPv4 address of network-related information about a finding. See Ip Filter below for more details. + NetworkSourceIPv4 []NetworkSourceIPv4Observation `json:"networkSourceIpv4,omitempty" tf:"network_source_ipv4,omitempty"` + + // The source IPv6 address of network-related information about a finding. See Ip Filter below for more details. + NetworkSourceIPv6 []NetworkSourceIPv6Observation `json:"networkSourceIpv6,omitempty" tf:"network_source_ipv6,omitempty"` + + // The source media access control (MAC) address of network-related information about a finding. See String Filter below for more details. + NetworkSourceMac []NetworkSourceMacObservation `json:"networkSourceMac,omitempty" tf:"network_source_mac,omitempty"` + + // The source port of network-related information about a finding. See Number Filter below for more details. + NetworkSourcePort []NetworkSourcePortObservation `json:"networkSourcePort,omitempty" tf:"network_source_port,omitempty"` + + // The text of a note. See String Filter below for more details. + NoteText []NoteTextObservation `json:"noteText,omitempty" tf:"note_text,omitempty"` + + // The timestamp of when the note was updated. See Date Filter below for more details. + NoteUpdatedAt []NoteUpdatedAtObservation `json:"noteUpdatedAt,omitempty" tf:"note_updated_at,omitempty"` + + // The principal that created a note. See String Filter below for more details. + NoteUpdatedBy []NoteUpdatedByObservation `json:"noteUpdatedBy,omitempty" tf:"note_updated_by,omitempty"` + + // The date/time that the process was launched. See Date Filter below for more details. + ProcessLaunchedAt []ProcessLaunchedAtObservation `json:"processLaunchedAt,omitempty" tf:"process_launched_at,omitempty"` + + // The name of the process. See String Filter below for more details. + ProcessName []ProcessNameObservation `json:"processName,omitempty" tf:"process_name,omitempty"` + + // The parent process ID. See Number Filter below for more details. + ProcessParentPid []ProcessParentPidObservation `json:"processParentPid,omitempty" tf:"process_parent_pid,omitempty"` + + // The path to the process executable. See String Filter below for more details. + ProcessPath []ProcessPathObservation `json:"processPath,omitempty" tf:"process_path,omitempty"` + + // The process ID. See Number Filter below for more details. + ProcessPid []ProcessPidObservation `json:"processPid,omitempty" tf:"process_pid,omitempty"` + + // The date/time that the process was terminated. See Date Filter below for more details. + ProcessTerminatedAt []ProcessTerminatedAtObservation `json:"processTerminatedAt,omitempty" tf:"process_terminated_at,omitempty"` + + // The ARN generated by Security Hub that uniquely identifies a third-party company (security findings provider) after this provider's product (solution that generates findings) is registered with Security Hub. See String Filter below for more details. + ProductArn []ProductArnObservation `json:"productArn,omitempty" tf:"product_arn,omitempty"` + + // A data type where security-findings providers can include additional solution-specific details that aren't part of the defined AwsSecurityFinding format. See Map Filter below for more details. + ProductFields []ProductFieldsObservation `json:"productFields,omitempty" tf:"product_fields,omitempty"` + + // The name of the solution (product) that generates findings. See String Filter below for more details. + ProductName []ProductNameObservation `json:"productName,omitempty" tf:"product_name,omitempty"` + + // The recommendation of what to do about the issue described in a finding. See String Filter below for more details. + RecommendationText []RecommendationTextObservation `json:"recommendationText,omitempty" tf:"recommendation_text,omitempty"` + + // The updated record state for the finding. See String Filter below for more details. + RecordState []RecordStateObservation `json:"recordState,omitempty" tf:"record_state,omitempty"` + + // The solution-generated identifier for a related finding. See String Filter below for more details. + RelatedFindingsID []RelatedFindingsIDObservation `json:"relatedFindingsId,omitempty" tf:"related_findings_id,omitempty"` + + // The ARN of the solution that generated a related finding. See String Filter below for more details. + RelatedFindingsProductArn []RelatedFindingsProductArnObservation `json:"relatedFindingsProductArn,omitempty" tf:"related_findings_product_arn,omitempty"` + + // The IAM profile ARN of the instance. See String Filter below for more details. + ResourceAwsEC2InstanceIAMInstanceProfileArn []ResourceAwsEC2InstanceIAMInstanceProfileArnObservation `json:"resourceAwsEc2InstanceIamInstanceProfileArn,omitempty" tf:"resource_aws_ec2_instance_iam_instance_profile_arn,omitempty"` + + // The IPv4 addresses associated with the instance. See Ip Filter below for more details. + ResourceAwsEC2InstanceIPv4Addresses []ResourceAwsEC2InstanceIPv4AddressesObservation `json:"resourceAwsEc2InstanceIpv4Addresses,omitempty" tf:"resource_aws_ec2_instance_ipv4_addresses,omitempty"` + + // The IPv6 addresses associated with the instance. See Ip Filter below for more details. + ResourceAwsEC2InstanceIPv6Addresses []ResourceAwsEC2InstanceIPv6AddressesObservation `json:"resourceAwsEc2InstanceIpv6Addresses,omitempty" tf:"resource_aws_ec2_instance_ipv6_addresses,omitempty"` + + // The Amazon Machine Image (AMI) ID of the instance. See String Filter below for more details. + ResourceAwsEC2InstanceImageID []ResourceAwsEC2InstanceImageIDObservation `json:"resourceAwsEc2InstanceImageId,omitempty" tf:"resource_aws_ec2_instance_image_id,omitempty"` + + // The key name associated with the instance. See String Filter below for more details. + ResourceAwsEC2InstanceKeyName []ResourceAwsEC2InstanceKeyNameObservation `json:"resourceAwsEc2InstanceKeyName,omitempty" tf:"resource_aws_ec2_instance_key_name,omitempty"` + + // The date and time the instance was launched. See Date Filter below for more details. + ResourceAwsEC2InstanceLaunchedAt []ResourceAwsEC2InstanceLaunchedAtObservation `json:"resourceAwsEc2InstanceLaunchedAt,omitempty" tf:"resource_aws_ec2_instance_launched_at,omitempty"` + + // The identifier of the subnet that the instance was launched in. See String Filter below for more details. + ResourceAwsEC2InstanceSubnetID []ResourceAwsEC2InstanceSubnetIDObservation `json:"resourceAwsEc2InstanceSubnetId,omitempty" tf:"resource_aws_ec2_instance_subnet_id,omitempty"` + + // The instance type of the instance. See String Filter below for more details. + ResourceAwsEC2InstanceType []ResourceAwsEC2InstanceTypeObservation `json:"resourceAwsEc2InstanceType,omitempty" tf:"resource_aws_ec2_instance_type,omitempty"` + + // The identifier of the VPC that the instance was launched in. See String Filter below for more details. + ResourceAwsEC2InstanceVPCID []ResourceAwsEC2InstanceVPCIDObservation `json:"resourceAwsEc2InstanceVpcId,omitempty" tf:"resource_aws_ec2_instance_vpc_id,omitempty"` + + // The creation date/time of the IAM access key related to a finding. See Date Filter below for more details. + ResourceAwsIAMAccessKeyCreatedAt []ResourceAwsIAMAccessKeyCreatedAtObservation `json:"resourceAwsIamAccessKeyCreatedAt,omitempty" tf:"resource_aws_iam_access_key_created_at,omitempty"` + + // The status of the IAM access key related to a finding. See String Filter below for more details. + ResourceAwsIAMAccessKeyStatus []ResourceAwsIAMAccessKeyStatusObservation `json:"resourceAwsIamAccessKeyStatus,omitempty" tf:"resource_aws_iam_access_key_status,omitempty"` + + // The user associated with the IAM access key related to a finding. See String Filter below for more details. + ResourceAwsIAMAccessKeyUserName []ResourceAwsIAMAccessKeyUserNameObservation `json:"resourceAwsIamAccessKeyUserName,omitempty" tf:"resource_aws_iam_access_key_user_name,omitempty"` + + // The canonical user ID of the owner of the S3 bucket. See String Filter below for more details. + ResourceAwsS3BucketOwnerID []ResourceAwsS3BucketOwnerIDObservation `json:"resourceAwsS3BucketOwnerId,omitempty" tf:"resource_aws_s3_bucket_owner_id,omitempty"` + + // The display name of the owner of the S3 bucket. See String Filter below for more details. + ResourceAwsS3BucketOwnerName []ResourceAwsS3BucketOwnerNameObservation `json:"resourceAwsS3BucketOwnerName,omitempty" tf:"resource_aws_s3_bucket_owner_name,omitempty"` + + // The identifier of the image related to a finding. See String Filter below for more details. + ResourceContainerImageID []ResourceContainerImageIDObservation `json:"resourceContainerImageId,omitempty" tf:"resource_container_image_id,omitempty"` + + // The name of the image related to a finding. See String Filter below for more details. + ResourceContainerImageName []ResourceContainerImageNameObservation `json:"resourceContainerImageName,omitempty" tf:"resource_container_image_name,omitempty"` + + // The date/time that the container was started. See Date Filter below for more details. + ResourceContainerLaunchedAt []ResourceContainerLaunchedAtObservation `json:"resourceContainerLaunchedAt,omitempty" tf:"resource_container_launched_at,omitempty"` + + // The name of the container related to a finding. See String Filter below for more details. + ResourceContainerName []ResourceContainerNameObservation `json:"resourceContainerName,omitempty" tf:"resource_container_name,omitempty"` + + // The details of a resource that doesn't have a specific subfield for the resource type defined. See Map Filter below for more details. + ResourceDetailsOther []ResourceDetailsOtherObservation `json:"resourceDetailsOther,omitempty" tf:"resource_details_other,omitempty"` + + // The canonical identifier for the given resource type. See String Filter below for more details. + ResourceID []ResourceIDObservation `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // The canonical AWS partition name that the Region is assigned to. See String Filter below for more details. + ResourcePartition []ResourcePartitionObservation `json:"resourcePartition,omitempty" tf:"resource_partition,omitempty"` + + // The canonical AWS external Region name where this resource is located. See String Filter below for more details. + ResourceRegion []ResourceRegionObservation `json:"resourceRegion,omitempty" tf:"resource_region,omitempty"` + + // A list of AWS tags associated with a resource at the time the finding was processed. See Map Filter below for more details. + ResourceTags []ResourceTagsObservation `json:"resourceTags,omitempty" tf:"resource_tags,omitempty"` + + // Specifies the type of the resource that details are provided for. See String Filter below for more details. + ResourceType []ResourceTypeObservation `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // The label of a finding's severity. See String Filter below for more details. + SeverityLabel []SeverityLabelObservation `json:"severityLabel,omitempty" tf:"severity_label,omitempty"` + + // A URL that links to a page about the current finding in the security-findings provider's solution. See String Filter below for more details. + SourceURL []SourceURLObservation `json:"sourceUrl,omitempty" tf:"source_url,omitempty"` + + // The category of a threat intelligence indicator. See String Filter below for more details. + ThreatIntelIndicatorCategory []ThreatIntelIndicatorCategoryObservation `json:"threatIntelIndicatorCategory,omitempty" tf:"threat_intel_indicator_category,omitempty"` + + // The date/time of the last observation of a threat intelligence indicator. See Date Filter below for more details. + ThreatIntelIndicatorLastObservedAt []ThreatIntelIndicatorLastObservedAtObservation `json:"threatIntelIndicatorLastObservedAt,omitempty" tf:"threat_intel_indicator_last_observed_at,omitempty"` + + // The source of the threat intelligence. See String Filter below for more details. + ThreatIntelIndicatorSource []ThreatIntelIndicatorSourceObservation `json:"threatIntelIndicatorSource,omitempty" tf:"threat_intel_indicator_source,omitempty"` + + // The URL for more details from the source of the threat intelligence. See String Filter below for more details. + ThreatIntelIndicatorSourceURL []ThreatIntelIndicatorSourceURLObservation `json:"threatIntelIndicatorSourceUrl,omitempty" tf:"threat_intel_indicator_source_url,omitempty"` + + // The type of a threat intelligence indicator. See String Filter below for more details. + ThreatIntelIndicatorType []ThreatIntelIndicatorTypeObservation `json:"threatIntelIndicatorType,omitempty" tf:"threat_intel_indicator_type,omitempty"` + + // The value of a threat intelligence indicator. See String Filter below for more details. + ThreatIntelIndicatorValue []ThreatIntelIndicatorValueObservation `json:"threatIntelIndicatorValue,omitempty" tf:"threat_intel_indicator_value,omitempty"` + + // A finding's title. See String Filter below for more details. + Title []TitleObservation `json:"title,omitempty" tf:"title,omitempty"` + + // A finding type in the format of namespace/category/classifier that classifies a finding. See String Filter below for more details. + Type []TypeObservation `json:"type,omitempty" tf:"type,omitempty"` + + // An ISO8601-formatted timestamp that indicates when the security-findings provider last updated the finding record. See Date Filter below for more details. + UpdatedAt []UpdatedAtObservation `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` + + // A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding. See Map Filter below for more details. + UserDefinedValues []UserDefinedValuesObservation `json:"userDefinedValues,omitempty" tf:"user_defined_values,omitempty"` + + // The veracity of a finding. See String Filter below for more details. + VerificationState []VerificationStateObservation `json:"verificationState,omitempty" tf:"verification_state,omitempty"` + + // The status of the investigation into a finding. See Workflow Status Filter below for more details. + WorkflowStatus []WorkflowStatusObservation `json:"workflowStatus,omitempty" tf:"workflow_status,omitempty"` +} + +type FiltersParameters struct { + + // AWS account ID that a finding is generated in. See String_Filter below for more details. + // +kubebuilder:validation:Optional + AwsAccountID []AwsAccountIDParameters `json:"awsAccountId,omitempty" tf:"aws_account_id,omitempty"` + + // The name of the findings provider (company) that owns the solution (product) that generates findings. See String_Filter below for more details. + // +kubebuilder:validation:Optional + CompanyName []CompanyNameParameters `json:"companyName,omitempty" tf:"company_name,omitempty"` + + // Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard, such as CIS AWS Foundations. Contains security standard-related finding details. See String Filter below for more details. + // +kubebuilder:validation:Optional + ComplianceStatus []ComplianceStatusParameters `json:"complianceStatus,omitempty" tf:"compliance_status,omitempty"` + + // A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence. See Number Filter below for more details. + // +kubebuilder:validation:Optional + Confidence []ConfidenceParameters `json:"confidence,omitempty" tf:"confidence,omitempty"` + + // An ISO8601-formatted timestamp that indicates when the security-findings provider captured the potential security issue that a finding captured. See Date Filter below for more details. + // +kubebuilder:validation:Optional + CreatedAt []CreatedAtParameters `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // The level of importance assigned to the resources associated with the finding. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources. See Number Filter below for more details. + // +kubebuilder:validation:Optional + Criticality []CriticalityParameters `json:"criticality,omitempty" tf:"criticality,omitempty"` + + // A finding's description. See String Filter below for more details. + // +kubebuilder:validation:Optional + Description []DescriptionParameters `json:"description,omitempty" tf:"description,omitempty"` + + // The finding provider value for the finding confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence. See Number Filter below for more details. + // +kubebuilder:validation:Optional + FindingProviderFieldsConfidence []FindingProviderFieldsConfidenceParameters `json:"findingProviderFieldsConfidence,omitempty" tf:"finding_provider_fields_confidence,omitempty"` + + // The finding provider value for the level of importance assigned to the resources associated with the findings. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources. See Number Filter below for more details. + // +kubebuilder:validation:Optional + FindingProviderFieldsCriticality []FindingProviderFieldsCriticalityParameters `json:"findingProviderFieldsCriticality,omitempty" tf:"finding_provider_fields_criticality,omitempty"` + + // The finding identifier of a related finding that is identified by the finding provider. See String Filter below for more details. + // +kubebuilder:validation:Optional + FindingProviderFieldsRelatedFindingsID []FindingProviderFieldsRelatedFindingsIDParameters `json:"findingProviderFieldsRelatedFindingsId,omitempty" tf:"finding_provider_fields_related_findings_id,omitempty"` + + // The ARN of the solution that generated a related finding that is identified by the finding provider. See String Filter below for more details. + // +kubebuilder:validation:Optional + FindingProviderFieldsRelatedFindingsProductArn []FindingProviderFieldsRelatedFindingsProductArnParameters `json:"findingProviderFieldsRelatedFindingsProductArn,omitempty" tf:"finding_provider_fields_related_findings_product_arn,omitempty"` + + // The finding provider value for the severity label. See String Filter below for more details. + // +kubebuilder:validation:Optional + FindingProviderFieldsSeverityLabel []FindingProviderFieldsSeverityLabelParameters `json:"findingProviderFieldsSeverityLabel,omitempty" tf:"finding_provider_fields_severity_label,omitempty"` + + // The finding provider's original value for the severity. See String Filter below for more details. + // +kubebuilder:validation:Optional + FindingProviderFieldsSeverityOriginal []FindingProviderFieldsSeverityOriginalParameters `json:"findingProviderFieldsSeverityOriginal,omitempty" tf:"finding_provider_fields_severity_original,omitempty"` + + // One or more finding types that the finding provider assigned to the finding. Uses the format of namespace/category/classifier that classify a finding. Valid namespace values include: Software and Configuration Checks, TTPs, Effects, Unusual Behaviors, and Sensitive Data Identifications. See String Filter below for more details. + // +kubebuilder:validation:Optional + FindingProviderFieldsTypes []FindingProviderFieldsTypesParameters `json:"findingProviderFieldsTypes,omitempty" tf:"finding_provider_fields_types,omitempty"` + + // An ISO8601-formatted timestamp that indicates when the security-findings provider first observed the potential security issue that a finding captured. See Date Filter below for more details. + // +kubebuilder:validation:Optional + FirstObservedAt []FirstObservedAtParameters `json:"firstObservedAt,omitempty" tf:"first_observed_at,omitempty"` + + // The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + GeneratorID []GeneratorIDParameters `json:"generatorId,omitempty" tf:"generator_id,omitempty"` + + // The security findings provider-specific identifier for a finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + ID []IDParameters `json:"id,omitempty" tf:"id,omitempty"` + + // A keyword for a finding. See Keyword Filter below for more details. + // +kubebuilder:validation:Optional + Keyword []KeywordParameters `json:"keyword,omitempty" tf:"keyword,omitempty"` + + // An ISO8601-formatted timestamp that indicates when the security-findings provider most recently observed the potential security issue that a finding captured. See Date Filter below for more details. + // +kubebuilder:validation:Optional + LastObservedAt []LastObservedAtParameters `json:"lastObservedAt,omitempty" tf:"last_observed_at,omitempty"` + + // The name of the malware that was observed. See String Filter below for more details. + // +kubebuilder:validation:Optional + MalwareName []MalwareNameParameters `json:"malwareName,omitempty" tf:"malware_name,omitempty"` + + // The filesystem path of the malware that was observed. See String Filter below for more details. + // +kubebuilder:validation:Optional + MalwarePath []MalwarePathParameters `json:"malwarePath,omitempty" tf:"malware_path,omitempty"` + + // The state of the malware that was observed. See String Filter below for more details. + // +kubebuilder:validation:Optional + MalwareState []MalwareStateParameters `json:"malwareState,omitempty" tf:"malware_state,omitempty"` + + // The type of the malware that was observed. See String Filter below for more details. + // +kubebuilder:validation:Optional + MalwareType []MalwareTypeParameters `json:"malwareType,omitempty" tf:"malware_type,omitempty"` + + // The destination domain of network-related information about a finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + NetworkDestinationDomain []NetworkDestinationDomainParameters `json:"networkDestinationDomain,omitempty" tf:"network_destination_domain,omitempty"` + + // The destination IPv4 address of network-related information about a finding. See Ip Filter below for more details. + // +kubebuilder:validation:Optional + NetworkDestinationIPv4 []NetworkDestinationIPv4Parameters `json:"networkDestinationIpv4,omitempty" tf:"network_destination_ipv4,omitempty"` + + // The destination IPv6 address of network-related information about a finding. See Ip Filter below for more details. + // +kubebuilder:validation:Optional + NetworkDestinationIPv6 []NetworkDestinationIPv6Parameters `json:"networkDestinationIpv6,omitempty" tf:"network_destination_ipv6,omitempty"` + + // The destination port of network-related information about a finding. See Number Filter below for more details. + // +kubebuilder:validation:Optional + NetworkDestinationPort []NetworkDestinationPortParameters `json:"networkDestinationPort,omitempty" tf:"network_destination_port,omitempty"` + + // Indicates the direction of network traffic associated with a finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + NetworkDirection []NetworkDirectionParameters `json:"networkDirection,omitempty" tf:"network_direction,omitempty"` + + // The protocol of network-related information about a finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + NetworkProtocol []NetworkProtocolParameters `json:"networkProtocol,omitempty" tf:"network_protocol,omitempty"` + + // The source domain of network-related information about a finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + NetworkSourceDomain []NetworkSourceDomainParameters `json:"networkSourceDomain,omitempty" tf:"network_source_domain,omitempty"` + + // The source IPv4 address of network-related information about a finding. See Ip Filter below for more details. + // +kubebuilder:validation:Optional + NetworkSourceIPv4 []NetworkSourceIPv4Parameters `json:"networkSourceIpv4,omitempty" tf:"network_source_ipv4,omitempty"` + + // The source IPv6 address of network-related information about a finding. See Ip Filter below for more details. + // +kubebuilder:validation:Optional + NetworkSourceIPv6 []NetworkSourceIPv6Parameters `json:"networkSourceIpv6,omitempty" tf:"network_source_ipv6,omitempty"` + + // The source media access control (MAC) address of network-related information about a finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + NetworkSourceMac []NetworkSourceMacParameters `json:"networkSourceMac,omitempty" tf:"network_source_mac,omitempty"` + + // The source port of network-related information about a finding. See Number Filter below for more details. + // +kubebuilder:validation:Optional + NetworkSourcePort []NetworkSourcePortParameters `json:"networkSourcePort,omitempty" tf:"network_source_port,omitempty"` + + // The text of a note. See String Filter below for more details. + // +kubebuilder:validation:Optional + NoteText []NoteTextParameters `json:"noteText,omitempty" tf:"note_text,omitempty"` + + // The timestamp of when the note was updated. See Date Filter below for more details. + // +kubebuilder:validation:Optional + NoteUpdatedAt []NoteUpdatedAtParameters `json:"noteUpdatedAt,omitempty" tf:"note_updated_at,omitempty"` + + // The principal that created a note. See String Filter below for more details. + // +kubebuilder:validation:Optional + NoteUpdatedBy []NoteUpdatedByParameters `json:"noteUpdatedBy,omitempty" tf:"note_updated_by,omitempty"` + + // The date/time that the process was launched. See Date Filter below for more details. + // +kubebuilder:validation:Optional + ProcessLaunchedAt []ProcessLaunchedAtParameters `json:"processLaunchedAt,omitempty" tf:"process_launched_at,omitempty"` + + // The name of the process. See String Filter below for more details. + // +kubebuilder:validation:Optional + ProcessName []ProcessNameParameters `json:"processName,omitempty" tf:"process_name,omitempty"` + + // The parent process ID. See Number Filter below for more details. + // +kubebuilder:validation:Optional + ProcessParentPid []ProcessParentPidParameters `json:"processParentPid,omitempty" tf:"process_parent_pid,omitempty"` + + // The path to the process executable. See String Filter below for more details. + // +kubebuilder:validation:Optional + ProcessPath []ProcessPathParameters `json:"processPath,omitempty" tf:"process_path,omitempty"` + + // The process ID. See Number Filter below for more details. + // +kubebuilder:validation:Optional + ProcessPid []ProcessPidParameters `json:"processPid,omitempty" tf:"process_pid,omitempty"` + + // The date/time that the process was terminated. See Date Filter below for more details. + // +kubebuilder:validation:Optional + ProcessTerminatedAt []ProcessTerminatedAtParameters `json:"processTerminatedAt,omitempty" tf:"process_terminated_at,omitempty"` + + // The ARN generated by Security Hub that uniquely identifies a third-party company (security findings provider) after this provider's product (solution that generates findings) is registered with Security Hub. See String Filter below for more details. + // +kubebuilder:validation:Optional + ProductArn []ProductArnParameters `json:"productArn,omitempty" tf:"product_arn,omitempty"` + + // A data type where security-findings providers can include additional solution-specific details that aren't part of the defined AwsSecurityFinding format. See Map Filter below for more details. + // +kubebuilder:validation:Optional + ProductFields []ProductFieldsParameters `json:"productFields,omitempty" tf:"product_fields,omitempty"` + + // The name of the solution (product) that generates findings. See String Filter below for more details. + // +kubebuilder:validation:Optional + ProductName []ProductNameParameters `json:"productName,omitempty" tf:"product_name,omitempty"` + + // The recommendation of what to do about the issue described in a finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + RecommendationText []RecommendationTextParameters `json:"recommendationText,omitempty" tf:"recommendation_text,omitempty"` + + // The updated record state for the finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + RecordState []RecordStateParameters `json:"recordState,omitempty" tf:"record_state,omitempty"` + + // The solution-generated identifier for a related finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + RelatedFindingsID []RelatedFindingsIDParameters `json:"relatedFindingsId,omitempty" tf:"related_findings_id,omitempty"` + + // The ARN of the solution that generated a related finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + RelatedFindingsProductArn []RelatedFindingsProductArnParameters `json:"relatedFindingsProductArn,omitempty" tf:"related_findings_product_arn,omitempty"` + + // The IAM profile ARN of the instance. See String Filter below for more details. + // +kubebuilder:validation:Optional + ResourceAwsEC2InstanceIAMInstanceProfileArn []ResourceAwsEC2InstanceIAMInstanceProfileArnParameters `json:"resourceAwsEc2InstanceIamInstanceProfileArn,omitempty" tf:"resource_aws_ec2_instance_iam_instance_profile_arn,omitempty"` + + // The IPv4 addresses associated with the instance. See Ip Filter below for more details. + // +kubebuilder:validation:Optional + ResourceAwsEC2InstanceIPv4Addresses []ResourceAwsEC2InstanceIPv4AddressesParameters `json:"resourceAwsEc2InstanceIpv4Addresses,omitempty" tf:"resource_aws_ec2_instance_ipv4_addresses,omitempty"` + + // The IPv6 addresses associated with the instance. See Ip Filter below for more details. + // +kubebuilder:validation:Optional + ResourceAwsEC2InstanceIPv6Addresses []ResourceAwsEC2InstanceIPv6AddressesParameters `json:"resourceAwsEc2InstanceIpv6Addresses,omitempty" tf:"resource_aws_ec2_instance_ipv6_addresses,omitempty"` + + // The Amazon Machine Image (AMI) ID of the instance. See String Filter below for more details. + // +kubebuilder:validation:Optional + ResourceAwsEC2InstanceImageID []ResourceAwsEC2InstanceImageIDParameters `json:"resourceAwsEc2InstanceImageId,omitempty" tf:"resource_aws_ec2_instance_image_id,omitempty"` + + // The key name associated with the instance. See String Filter below for more details. + // +kubebuilder:validation:Optional + ResourceAwsEC2InstanceKeyName []ResourceAwsEC2InstanceKeyNameParameters `json:"resourceAwsEc2InstanceKeyName,omitempty" tf:"resource_aws_ec2_instance_key_name,omitempty"` + + // The date and time the instance was launched. See Date Filter below for more details. + // +kubebuilder:validation:Optional + ResourceAwsEC2InstanceLaunchedAt []ResourceAwsEC2InstanceLaunchedAtParameters `json:"resourceAwsEc2InstanceLaunchedAt,omitempty" tf:"resource_aws_ec2_instance_launched_at,omitempty"` + + // The identifier of the subnet that the instance was launched in. See String Filter below for more details. + // +kubebuilder:validation:Optional + ResourceAwsEC2InstanceSubnetID []ResourceAwsEC2InstanceSubnetIDParameters `json:"resourceAwsEc2InstanceSubnetId,omitempty" tf:"resource_aws_ec2_instance_subnet_id,omitempty"` + + // The instance type of the instance. See String Filter below for more details. + // +kubebuilder:validation:Optional + ResourceAwsEC2InstanceType []ResourceAwsEC2InstanceTypeParameters `json:"resourceAwsEc2InstanceType,omitempty" tf:"resource_aws_ec2_instance_type,omitempty"` + + // The identifier of the VPC that the instance was launched in. See String Filter below for more details. + // +kubebuilder:validation:Optional + ResourceAwsEC2InstanceVPCID []ResourceAwsEC2InstanceVPCIDParameters `json:"resourceAwsEc2InstanceVpcId,omitempty" tf:"resource_aws_ec2_instance_vpc_id,omitempty"` + + // The creation date/time of the IAM access key related to a finding. See Date Filter below for more details. + // +kubebuilder:validation:Optional + ResourceAwsIAMAccessKeyCreatedAt []ResourceAwsIAMAccessKeyCreatedAtParameters `json:"resourceAwsIamAccessKeyCreatedAt,omitempty" tf:"resource_aws_iam_access_key_created_at,omitempty"` + + // The status of the IAM access key related to a finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + ResourceAwsIAMAccessKeyStatus []ResourceAwsIAMAccessKeyStatusParameters `json:"resourceAwsIamAccessKeyStatus,omitempty" tf:"resource_aws_iam_access_key_status,omitempty"` + + // The user associated with the IAM access key related to a finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + ResourceAwsIAMAccessKeyUserName []ResourceAwsIAMAccessKeyUserNameParameters `json:"resourceAwsIamAccessKeyUserName,omitempty" tf:"resource_aws_iam_access_key_user_name,omitempty"` + + // The canonical user ID of the owner of the S3 bucket. See String Filter below for more details. + // +kubebuilder:validation:Optional + ResourceAwsS3BucketOwnerID []ResourceAwsS3BucketOwnerIDParameters `json:"resourceAwsS3BucketOwnerId,omitempty" tf:"resource_aws_s3_bucket_owner_id,omitempty"` + + // The display name of the owner of the S3 bucket. See String Filter below for more details. + // +kubebuilder:validation:Optional + ResourceAwsS3BucketOwnerName []ResourceAwsS3BucketOwnerNameParameters `json:"resourceAwsS3BucketOwnerName,omitempty" tf:"resource_aws_s3_bucket_owner_name,omitempty"` + + // The identifier of the image related to a finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + ResourceContainerImageID []ResourceContainerImageIDParameters `json:"resourceContainerImageId,omitempty" tf:"resource_container_image_id,omitempty"` + + // The name of the image related to a finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + ResourceContainerImageName []ResourceContainerImageNameParameters `json:"resourceContainerImageName,omitempty" tf:"resource_container_image_name,omitempty"` + + // The date/time that the container was started. See Date Filter below for more details. + // +kubebuilder:validation:Optional + ResourceContainerLaunchedAt []ResourceContainerLaunchedAtParameters `json:"resourceContainerLaunchedAt,omitempty" tf:"resource_container_launched_at,omitempty"` + + // The name of the container related to a finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + ResourceContainerName []ResourceContainerNameParameters `json:"resourceContainerName,omitempty" tf:"resource_container_name,omitempty"` + + // The details of a resource that doesn't have a specific subfield for the resource type defined. See Map Filter below for more details. + // +kubebuilder:validation:Optional + ResourceDetailsOther []ResourceDetailsOtherParameters `json:"resourceDetailsOther,omitempty" tf:"resource_details_other,omitempty"` + + // The canonical identifier for the given resource type. See String Filter below for more details. + // +kubebuilder:validation:Optional + ResourceID []ResourceIDParameters `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // The canonical AWS partition name that the Region is assigned to. See String Filter below for more details. + // +kubebuilder:validation:Optional + ResourcePartition []ResourcePartitionParameters `json:"resourcePartition,omitempty" tf:"resource_partition,omitempty"` + + // The canonical AWS external Region name where this resource is located. See String Filter below for more details. + // +kubebuilder:validation:Optional + ResourceRegion []ResourceRegionParameters `json:"resourceRegion,omitempty" tf:"resource_region,omitempty"` + + // A list of AWS tags associated with a resource at the time the finding was processed. See Map Filter below for more details. + // +kubebuilder:validation:Optional + ResourceTags []ResourceTagsParameters `json:"resourceTags,omitempty" tf:"resource_tags,omitempty"` + + // Specifies the type of the resource that details are provided for. See String Filter below for more details. + // +kubebuilder:validation:Optional + ResourceType []ResourceTypeParameters `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // The label of a finding's severity. See String Filter below for more details. + // +kubebuilder:validation:Optional + SeverityLabel []SeverityLabelParameters `json:"severityLabel,omitempty" tf:"severity_label,omitempty"` + + // A URL that links to a page about the current finding in the security-findings provider's solution. See String Filter below for more details. + // +kubebuilder:validation:Optional + SourceURL []SourceURLParameters `json:"sourceUrl,omitempty" tf:"source_url,omitempty"` + + // The category of a threat intelligence indicator. See String Filter below for more details. + // +kubebuilder:validation:Optional + ThreatIntelIndicatorCategory []ThreatIntelIndicatorCategoryParameters `json:"threatIntelIndicatorCategory,omitempty" tf:"threat_intel_indicator_category,omitempty"` + + // The date/time of the last observation of a threat intelligence indicator. See Date Filter below for more details. + // +kubebuilder:validation:Optional + ThreatIntelIndicatorLastObservedAt []ThreatIntelIndicatorLastObservedAtParameters `json:"threatIntelIndicatorLastObservedAt,omitempty" tf:"threat_intel_indicator_last_observed_at,omitempty"` + + // The source of the threat intelligence. See String Filter below for more details. + // +kubebuilder:validation:Optional + ThreatIntelIndicatorSource []ThreatIntelIndicatorSourceParameters `json:"threatIntelIndicatorSource,omitempty" tf:"threat_intel_indicator_source,omitempty"` + + // The URL for more details from the source of the threat intelligence. See String Filter below for more details. + // +kubebuilder:validation:Optional + ThreatIntelIndicatorSourceURL []ThreatIntelIndicatorSourceURLParameters `json:"threatIntelIndicatorSourceUrl,omitempty" tf:"threat_intel_indicator_source_url,omitempty"` + + // The type of a threat intelligence indicator. See String Filter below for more details. + // +kubebuilder:validation:Optional + ThreatIntelIndicatorType []ThreatIntelIndicatorTypeParameters `json:"threatIntelIndicatorType,omitempty" tf:"threat_intel_indicator_type,omitempty"` + + // The value of a threat intelligence indicator. See String Filter below for more details. + // +kubebuilder:validation:Optional + ThreatIntelIndicatorValue []ThreatIntelIndicatorValueParameters `json:"threatIntelIndicatorValue,omitempty" tf:"threat_intel_indicator_value,omitempty"` + + // A finding's title. See String Filter below for more details. + // +kubebuilder:validation:Optional + Title []TitleParameters `json:"title,omitempty" tf:"title,omitempty"` + + // A finding type in the format of namespace/category/classifier that classifies a finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + Type []TypeParameters `json:"type,omitempty" tf:"type,omitempty"` + + // An ISO8601-formatted timestamp that indicates when the security-findings provider last updated the finding record. See Date Filter below for more details. + // +kubebuilder:validation:Optional + UpdatedAt []UpdatedAtParameters `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` + + // A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding. See Map Filter below for more details. + // +kubebuilder:validation:Optional + UserDefinedValues []UserDefinedValuesParameters `json:"userDefinedValues,omitempty" tf:"user_defined_values,omitempty"` + + // The veracity of a finding. See String Filter below for more details. + // +kubebuilder:validation:Optional + VerificationState []VerificationStateParameters `json:"verificationState,omitempty" tf:"verification_state,omitempty"` + + // The status of the investigation into a finding. See Workflow Status Filter below for more details. + // +kubebuilder:validation:Optional + WorkflowStatus []WorkflowStatusParameters `json:"workflowStatus,omitempty" tf:"workflow_status,omitempty"` +} + +type FindingProviderFieldsConfidenceInitParameters struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type FindingProviderFieldsConfidenceObservation struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type FindingProviderFieldsConfidenceParameters struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type FindingProviderFieldsCriticalityInitParameters struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type FindingProviderFieldsCriticalityObservation struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type FindingProviderFieldsCriticalityParameters struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type FindingProviderFieldsRelatedFindingsIDInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FindingProviderFieldsRelatedFindingsIDObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FindingProviderFieldsRelatedFindingsIDParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type FindingProviderFieldsRelatedFindingsProductArnInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FindingProviderFieldsRelatedFindingsProductArnObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FindingProviderFieldsRelatedFindingsProductArnParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type FindingProviderFieldsSeverityLabelInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FindingProviderFieldsSeverityLabelObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FindingProviderFieldsSeverityLabelParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type FindingProviderFieldsSeverityOriginalInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FindingProviderFieldsSeverityOriginalObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FindingProviderFieldsSeverityOriginalParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type FindingProviderFieldsTypesInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FindingProviderFieldsTypesObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FindingProviderFieldsTypesParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type FirstObservedAtDateRangeInitParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type FirstObservedAtDateRangeObservation struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type FirstObservedAtDateRangeParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type FirstObservedAtInitParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *FirstObservedAtDateRangeInitParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type FirstObservedAtObservation struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *FirstObservedAtDateRangeObservation `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type FirstObservedAtParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + // +kubebuilder:validation:Optional + DateRange *FirstObservedAtDateRangeParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + // +kubebuilder:validation:Optional + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + // +kubebuilder:validation:Optional + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type GeneratorIDInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type GeneratorIDObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type GeneratorIDParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type IDInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type IDObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type IDParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type InsightInitParameters struct { + + // A configuration block including one or more (up to 10 distinct) attributes used to filter the findings included in the insight. The insight only includes findings that match criteria defined in the filters. See filters below for more details. + Filters *FiltersInitParameters `json:"filters,omitempty" tf:"filters,omitempty"` + + // The attribute used to group the findings for the insight e.g., if an insight is grouped by ResourceId, then the insight produces a list of resource identifiers. + GroupByAttribute *string `json:"groupByAttribute,omitempty" tf:"group_by_attribute,omitempty"` + + // The name of the custom insight. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type InsightObservation struct { + + // ARN of the insight. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A configuration block including one or more (up to 10 distinct) attributes used to filter the findings included in the insight. The insight only includes findings that match criteria defined in the filters. See filters below for more details. + Filters *FiltersObservation `json:"filters,omitempty" tf:"filters,omitempty"` + + // The attribute used to group the findings for the insight e.g., if an insight is grouped by ResourceId, then the insight produces a list of resource identifiers. + GroupByAttribute *string `json:"groupByAttribute,omitempty" tf:"group_by_attribute,omitempty"` + + // ARN of the insight. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the custom insight. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type InsightParameters struct { + + // A configuration block including one or more (up to 10 distinct) attributes used to filter the findings included in the insight. The insight only includes findings that match criteria defined in the filters. See filters below for more details. + // +kubebuilder:validation:Optional + Filters *FiltersParameters `json:"filters,omitempty" tf:"filters,omitempty"` + + // The attribute used to group the findings for the insight e.g., if an insight is grouped by ResourceId, then the insight produces a list of resource identifiers. + // +kubebuilder:validation:Optional + GroupByAttribute *string `json:"groupByAttribute,omitempty" tf:"group_by_attribute,omitempty"` + + // The name of the custom insight. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type KeywordInitParameters struct { + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type KeywordObservation struct { + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type KeywordParameters struct { + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type LastObservedAtDateRangeInitParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type LastObservedAtDateRangeObservation struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type LastObservedAtDateRangeParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type LastObservedAtInitParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *LastObservedAtDateRangeInitParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type LastObservedAtObservation struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *LastObservedAtDateRangeObservation `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type LastObservedAtParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + // +kubebuilder:validation:Optional + DateRange *LastObservedAtDateRangeParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + // +kubebuilder:validation:Optional + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + // +kubebuilder:validation:Optional + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type MalwareNameInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MalwareNameObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MalwareNameParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type MalwarePathInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MalwarePathObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MalwarePathParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type MalwareStateInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MalwareStateObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MalwareStateParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type MalwareTypeInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MalwareTypeObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MalwareTypeParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type NetworkDestinationDomainInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type NetworkDestinationDomainObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type NetworkDestinationDomainParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type NetworkDestinationIPv4InitParameters struct { + + // A finding's CIDR value. + Cidr *string `json:"cidr,omitempty" tf:"cidr,omitempty"` +} + +type NetworkDestinationIPv4Observation struct { + + // A finding's CIDR value. + Cidr *string `json:"cidr,omitempty" tf:"cidr,omitempty"` +} + +type NetworkDestinationIPv4Parameters struct { + + // A finding's CIDR value. + // +kubebuilder:validation:Optional + Cidr *string `json:"cidr" tf:"cidr,omitempty"` +} + +type NetworkDestinationIPv6InitParameters struct { + + // A finding's CIDR value. + Cidr *string `json:"cidr,omitempty" tf:"cidr,omitempty"` +} + +type NetworkDestinationIPv6Observation struct { + + // A finding's CIDR value. + Cidr *string `json:"cidr,omitempty" tf:"cidr,omitempty"` +} + +type NetworkDestinationIPv6Parameters struct { + + // A finding's CIDR value. + // +kubebuilder:validation:Optional + Cidr *string `json:"cidr" tf:"cidr,omitempty"` +} + +type NetworkDestinationPortInitParameters struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type NetworkDestinationPortObservation struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type NetworkDestinationPortParameters struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type NetworkDirectionInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type NetworkDirectionObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type NetworkDirectionParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type NetworkProtocolInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type NetworkProtocolObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type NetworkProtocolParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type NetworkSourceDomainInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type NetworkSourceDomainObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type NetworkSourceDomainParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type NetworkSourceIPv4InitParameters struct { + + // A finding's CIDR value. + Cidr *string `json:"cidr,omitempty" tf:"cidr,omitempty"` +} + +type NetworkSourceIPv4Observation struct { + + // A finding's CIDR value. + Cidr *string `json:"cidr,omitempty" tf:"cidr,omitempty"` +} + +type NetworkSourceIPv4Parameters struct { + + // A finding's CIDR value. + // +kubebuilder:validation:Optional + Cidr *string `json:"cidr" tf:"cidr,omitempty"` +} + +type NetworkSourceIPv6InitParameters struct { + + // A finding's CIDR value. + Cidr *string `json:"cidr,omitempty" tf:"cidr,omitempty"` +} + +type NetworkSourceIPv6Observation struct { + + // A finding's CIDR value. + Cidr *string `json:"cidr,omitempty" tf:"cidr,omitempty"` +} + +type NetworkSourceIPv6Parameters struct { + + // A finding's CIDR value. + // +kubebuilder:validation:Optional + Cidr *string `json:"cidr" tf:"cidr,omitempty"` +} + +type NetworkSourceMacInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type NetworkSourceMacObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type NetworkSourceMacParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type NetworkSourcePortInitParameters struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type NetworkSourcePortObservation struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type NetworkSourcePortParameters struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type NoteTextInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type NoteTextObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type NoteTextParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type NoteUpdatedAtDateRangeInitParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type NoteUpdatedAtDateRangeObservation struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type NoteUpdatedAtDateRangeParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type NoteUpdatedAtInitParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *NoteUpdatedAtDateRangeInitParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type NoteUpdatedAtObservation struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *NoteUpdatedAtDateRangeObservation `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type NoteUpdatedAtParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + // +kubebuilder:validation:Optional + DateRange *NoteUpdatedAtDateRangeParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + // +kubebuilder:validation:Optional + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + // +kubebuilder:validation:Optional + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type NoteUpdatedByInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type NoteUpdatedByObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type NoteUpdatedByParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ProcessLaunchedAtDateRangeInitParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type ProcessLaunchedAtDateRangeObservation struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type ProcessLaunchedAtDateRangeParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type ProcessLaunchedAtInitParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *ProcessLaunchedAtDateRangeInitParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ProcessLaunchedAtObservation struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *ProcessLaunchedAtDateRangeObservation `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ProcessLaunchedAtParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + // +kubebuilder:validation:Optional + DateRange *ProcessLaunchedAtDateRangeParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + // +kubebuilder:validation:Optional + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + // +kubebuilder:validation:Optional + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ProcessNameInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ProcessNameObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ProcessNameParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ProcessParentPidInitParameters struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type ProcessParentPidObservation struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type ProcessParentPidParameters struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type ProcessPathInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ProcessPathObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ProcessPathParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ProcessPidInitParameters struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type ProcessPidObservation struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type ProcessPidParameters struct { + + // The equal-to condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Eq *string `json:"eq,omitempty" tf:"eq,omitempty"` + + // The greater-than-equal condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Gte *string `json:"gte,omitempty" tf:"gte,omitempty"` + + // The less-than-equal condition to be applied to a single field when querying for findings, provided as a String. + // +kubebuilder:validation:Optional + Lte *string `json:"lte,omitempty" tf:"lte,omitempty"` +} + +type ProcessTerminatedAtDateRangeInitParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type ProcessTerminatedAtDateRangeObservation struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type ProcessTerminatedAtDateRangeParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type ProcessTerminatedAtInitParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *ProcessTerminatedAtDateRangeInitParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ProcessTerminatedAtObservation struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *ProcessTerminatedAtDateRangeObservation `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ProcessTerminatedAtParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + // +kubebuilder:validation:Optional + DateRange *ProcessTerminatedAtDateRangeParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + // +kubebuilder:validation:Optional + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + // +kubebuilder:validation:Optional + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ProductArnInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ProductArnObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ProductArnParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ProductFieldsInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // The key of the map filter. For example, for ResourceTags, Key identifies the name of the tag. For UserDefinedFields, Key is the name of the field. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ProductFieldsObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // The key of the map filter. For example, for ResourceTags, Key identifies the name of the tag. For UserDefinedFields, Key is the name of the field. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ProductFieldsParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // The key of the map filter. For example, for ResourceTags, Key identifies the name of the tag. For UserDefinedFields, Key is the name of the field. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ProductNameInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ProductNameObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ProductNameParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type RecommendationTextInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RecommendationTextObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RecommendationTextParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type RecordStateInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RecordStateObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RecordStateParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type RelatedFindingsIDInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RelatedFindingsIDObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RelatedFindingsIDParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type RelatedFindingsProductArnInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RelatedFindingsProductArnObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RelatedFindingsProductArnParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceIAMInstanceProfileArnInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceIAMInstanceProfileArnObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceIAMInstanceProfileArnParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceIPv4AddressesInitParameters struct { + + // A finding's CIDR value. + Cidr *string `json:"cidr,omitempty" tf:"cidr,omitempty"` +} + +type ResourceAwsEC2InstanceIPv4AddressesObservation struct { + + // A finding's CIDR value. + Cidr *string `json:"cidr,omitempty" tf:"cidr,omitempty"` +} + +type ResourceAwsEC2InstanceIPv4AddressesParameters struct { + + // A finding's CIDR value. + // +kubebuilder:validation:Optional + Cidr *string `json:"cidr" tf:"cidr,omitempty"` +} + +type ResourceAwsEC2InstanceIPv6AddressesInitParameters struct { + + // A finding's CIDR value. + Cidr *string `json:"cidr,omitempty" tf:"cidr,omitempty"` +} + +type ResourceAwsEC2InstanceIPv6AddressesObservation struct { + + // A finding's CIDR value. + Cidr *string `json:"cidr,omitempty" tf:"cidr,omitempty"` +} + +type ResourceAwsEC2InstanceIPv6AddressesParameters struct { + + // A finding's CIDR value. + // +kubebuilder:validation:Optional + Cidr *string `json:"cidr" tf:"cidr,omitempty"` +} + +type ResourceAwsEC2InstanceImageIDInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceImageIDObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceImageIDParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceKeyNameInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceKeyNameObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceKeyNameParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceLaunchedAtDateRangeInitParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceLaunchedAtDateRangeObservation struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceLaunchedAtDateRangeParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceLaunchedAtInitParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *ResourceAwsEC2InstanceLaunchedAtDateRangeInitParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ResourceAwsEC2InstanceLaunchedAtObservation struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *ResourceAwsEC2InstanceLaunchedAtDateRangeObservation `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ResourceAwsEC2InstanceLaunchedAtParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + // +kubebuilder:validation:Optional + DateRange *ResourceAwsEC2InstanceLaunchedAtDateRangeParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + // +kubebuilder:validation:Optional + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + // +kubebuilder:validation:Optional + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ResourceAwsEC2InstanceSubnetIDInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceSubnetIDObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceSubnetIDParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceTypeInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceTypeObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceTypeParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceVPCIDInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceVPCIDObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsEC2InstanceVPCIDParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceAwsIAMAccessKeyCreatedAtDateRangeInitParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsIAMAccessKeyCreatedAtDateRangeObservation struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsIAMAccessKeyCreatedAtDateRangeParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type ResourceAwsIAMAccessKeyCreatedAtInitParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *ResourceAwsIAMAccessKeyCreatedAtDateRangeInitParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ResourceAwsIAMAccessKeyCreatedAtObservation struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *ResourceAwsIAMAccessKeyCreatedAtDateRangeObservation `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ResourceAwsIAMAccessKeyCreatedAtParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + // +kubebuilder:validation:Optional + DateRange *ResourceAwsIAMAccessKeyCreatedAtDateRangeParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + // +kubebuilder:validation:Optional + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + // +kubebuilder:validation:Optional + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ResourceAwsIAMAccessKeyStatusInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsIAMAccessKeyStatusObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsIAMAccessKeyStatusParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceAwsIAMAccessKeyUserNameInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsIAMAccessKeyUserNameObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsIAMAccessKeyUserNameParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceAwsS3BucketOwnerIDInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsS3BucketOwnerIDObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsS3BucketOwnerIDParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceAwsS3BucketOwnerNameInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsS3BucketOwnerNameObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceAwsS3BucketOwnerNameParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceContainerImageIDInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceContainerImageIDObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceContainerImageIDParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceContainerImageNameInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceContainerImageNameObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceContainerImageNameParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceContainerLaunchedAtDateRangeInitParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceContainerLaunchedAtDateRangeObservation struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceContainerLaunchedAtDateRangeParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type ResourceContainerLaunchedAtInitParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *ResourceContainerLaunchedAtDateRangeInitParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ResourceContainerLaunchedAtObservation struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *ResourceContainerLaunchedAtDateRangeObservation `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ResourceContainerLaunchedAtParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + // +kubebuilder:validation:Optional + DateRange *ResourceContainerLaunchedAtDateRangeParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + // +kubebuilder:validation:Optional + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + // +kubebuilder:validation:Optional + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ResourceContainerNameInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceContainerNameObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceContainerNameParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceDetailsOtherInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // The key of the map filter. For example, for ResourceTags, Key identifies the name of the tag. For UserDefinedFields, Key is the name of the field. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceDetailsOtherObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // The key of the map filter. For example, for ResourceTags, Key identifies the name of the tag. For UserDefinedFields, Key is the name of the field. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceDetailsOtherParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // The key of the map filter. For example, for ResourceTags, Key identifies the name of the tag. For UserDefinedFields, Key is the name of the field. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceIDInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceIDObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceIDParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourcePartitionInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourcePartitionObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourcePartitionParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceRegionInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceRegionObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceRegionParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceTagsInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // The key of the map filter. For example, for ResourceTags, Key identifies the name of the tag. For UserDefinedFields, Key is the name of the field. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceTagsObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // The key of the map filter. For example, for ResourceTags, Key identifies the name of the tag. For UserDefinedFields, Key is the name of the field. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceTagsParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // The key of the map filter. For example, for ResourceTags, Key identifies the name of the tag. For UserDefinedFields, Key is the name of the field. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceTypeInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceTypeObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceTypeParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type SeverityLabelInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type SeverityLabelObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type SeverityLabelParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type SourceURLInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type SourceURLObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type SourceURLParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorCategoryInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorCategoryObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorCategoryParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorLastObservedAtDateRangeInitParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorLastObservedAtDateRangeObservation struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorLastObservedAtDateRangeParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorLastObservedAtInitParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *ThreatIntelIndicatorLastObservedAtDateRangeInitParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ThreatIntelIndicatorLastObservedAtObservation struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *ThreatIntelIndicatorLastObservedAtDateRangeObservation `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ThreatIntelIndicatorLastObservedAtParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + // +kubebuilder:validation:Optional + DateRange *ThreatIntelIndicatorLastObservedAtDateRangeParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + // +kubebuilder:validation:Optional + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + // +kubebuilder:validation:Optional + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type ThreatIntelIndicatorSourceInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorSourceObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorSourceParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorSourceURLInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorSourceURLObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorSourceURLParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorTypeInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorTypeObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorTypeParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorValueInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorValueObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ThreatIntelIndicatorValueParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type TitleInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TitleObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TitleParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type TypeInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TypeObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TypeParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type UpdatedAtDateRangeInitParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type UpdatedAtDateRangeObservation struct { + + // A date range unit for the date filter. Valid values: DAYS. + Unit *string `json:"unit,omitempty" tf:"unit,omitempty"` + + // A value for the keyword. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type UpdatedAtDateRangeParameters struct { + + // A date range unit for the date filter. Valid values: DAYS. + // +kubebuilder:validation:Optional + Unit *string `json:"unit" tf:"unit,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type UpdatedAtInitParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *UpdatedAtDateRangeInitParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type UpdatedAtObservation struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + DateRange *UpdatedAtDateRangeObservation `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type UpdatedAtParameters struct { + + // A configuration block of the date range for the date filter. See date_range below for more details. + // +kubebuilder:validation:Optional + DateRange *UpdatedAtDateRangeParameters `json:"dateRange,omitempty" tf:"date_range,omitempty"` + + // An end date for the date filter. Required with start if date_range is not specified. + // +kubebuilder:validation:Optional + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // A start date for the date filter. Required with end if date_range is not specified. + // +kubebuilder:validation:Optional + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type UserDefinedValuesInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // The key of the map filter. For example, for ResourceTags, Key identifies the name of the tag. For UserDefinedFields, Key is the name of the field. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type UserDefinedValuesObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // The key of the map filter. For example, for ResourceTags, Key identifies the name of the tag. For UserDefinedFields, Key is the name of the field. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type UserDefinedValuesParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // The key of the map filter. For example, for ResourceTags, Key identifies the name of the tag. For UserDefinedFields, Key is the name of the field. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type VerificationStateInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type VerificationStateObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type VerificationStateParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type WorkflowStatusInitParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type WorkflowStatusObservation struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + Comparison *string `json:"comparison,omitempty" tf:"comparison,omitempty"` + + // A value for the keyword. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type WorkflowStatusParameters struct { + + // The condition to apply to a string value when querying for findings. Valid values include: EQUALS and NOT_EQUALS. + // +kubebuilder:validation:Optional + Comparison *string `json:"comparison" tf:"comparison,omitempty"` + + // A value for the keyword. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +// InsightSpec defines the desired state of Insight +type InsightSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider InsightParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider InsightInitParameters `json:"initProvider,omitempty"` +} + +// InsightStatus defines the observed state of Insight. +type InsightStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider InsightObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Insight is the Schema for the Insights API. Provides a Security Hub custom insight resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Insight struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.filters) || (has(self.initProvider) && has(self.initProvider.filters))",message="spec.forProvider.filters is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.groupByAttribute) || (has(self.initProvider) && has(self.initProvider.groupByAttribute))",message="spec.forProvider.groupByAttribute is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec InsightSpec `json:"spec"` + Status InsightStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// InsightList contains a list of Insights +type InsightList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Insight `json:"items"` +} + +// Repository type metadata. +var ( + Insight_Kind = "Insight" + Insight_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Insight_Kind}.String() + Insight_KindAPIVersion = Insight_Kind + "." + CRDGroupVersion.String() + Insight_GroupVersionKind = CRDGroupVersion.WithKind(Insight_Kind) +) + +func init() { + SchemeBuilder.Register(&Insight{}, &InsightList{}) +} diff --git a/apis/servicecatalog/v1beta1/zz_budgetresourceassociation_types.go b/apis/servicecatalog/v1beta1/zz_budgetresourceassociation_types.go index db966e6434..e7e98bc275 100755 --- a/apis/servicecatalog/v1beta1/zz_budgetresourceassociation_types.go +++ b/apis/servicecatalog/v1beta1/zz_budgetresourceassociation_types.go @@ -16,7 +16,7 @@ import ( type BudgetResourceAssociationInitParameters struct { // Budget name. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/budgets/v1beta1.Budget + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/budgets/v1beta2.Budget BudgetName *string `json:"budgetName,omitempty" tf:"budget_name,omitempty"` // Reference to a Budget in budgets to populate budgetName. @@ -28,7 +28,7 @@ type BudgetResourceAssociationInitParameters struct { BudgetNameSelector *v1.Selector `json:"budgetNameSelector,omitempty" tf:"-"` // Resource identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta1.Product + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta2.Product ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` // Reference to a Product in servicecatalog to populate resourceId. @@ -55,7 +55,7 @@ type BudgetResourceAssociationObservation struct { type BudgetResourceAssociationParameters struct { // Budget name. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/budgets/v1beta1.Budget + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/budgets/v1beta2.Budget // +kubebuilder:validation:Optional BudgetName *string `json:"budgetName,omitempty" tf:"budget_name,omitempty"` @@ -73,7 +73,7 @@ type BudgetResourceAssociationParameters struct { Region *string `json:"region" tf:"-"` // Resource identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta1.Product + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta2.Product // +kubebuilder:validation:Optional ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` diff --git a/apis/servicecatalog/v1beta1/zz_constraint_types.go b/apis/servicecatalog/v1beta1/zz_constraint_types.go index 7ba4fc1913..9edbf3eba1 100755 --- a/apis/servicecatalog/v1beta1/zz_constraint_types.go +++ b/apis/servicecatalog/v1beta1/zz_constraint_types.go @@ -38,7 +38,7 @@ type ConstraintInitParameters struct { PortfolioIDSelector *v1.Selector `json:"portfolioIdSelector,omitempty" tf:"-"` // Product identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta1.Product + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta2.Product // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() ProductID *string `json:"productId,omitempty" tf:"product_id,omitempty"` @@ -112,7 +112,7 @@ type ConstraintParameters struct { PortfolioIDSelector *v1.Selector `json:"portfolioIdSelector,omitempty" tf:"-"` // Product identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta1.Product + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta2.Product // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ProductID *string `json:"productId,omitempty" tf:"product_id,omitempty"` diff --git a/apis/servicecatalog/v1beta1/zz_generated.conversion_hubs.go b/apis/servicecatalog/v1beta1/zz_generated.conversion_hubs.go index 1761e88231..25cc0be0de 100755 --- a/apis/servicecatalog/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/servicecatalog/v1beta1/zz_generated.conversion_hubs.go @@ -21,18 +21,12 @@ func (tr *PortfolioShare) Hub() {} // Hub marks this type as a conversion hub. func (tr *PrincipalPortfolioAssociation) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Product) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ProductPortfolioAssociation) Hub() {} // Hub marks this type as a conversion hub. func (tr *ProvisioningArtifact) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ServiceAction) Hub() {} - // Hub marks this type as a conversion hub. func (tr *TagOption) Hub() {} diff --git a/apis/servicecatalog/v1beta1/zz_generated.conversion_spokes.go b/apis/servicecatalog/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..c19bea9501 --- /dev/null +++ b/apis/servicecatalog/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Product to the hub type. +func (tr *Product) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Product type. +func (tr *Product) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ServiceAction to the hub type. +func (tr *ServiceAction) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ServiceAction type. +func (tr *ServiceAction) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/servicecatalog/v1beta1/zz_generated.resolvers.go b/apis/servicecatalog/v1beta1/zz_generated.resolvers.go index ee34d577e9..9bc23ed823 100644 --- a/apis/servicecatalog/v1beta1/zz_generated.resolvers.go +++ b/apis/servicecatalog/v1beta1/zz_generated.resolvers.go @@ -27,7 +27,7 @@ func (mg *BudgetResourceAssociation) ResolveReferences( // ResolveReferences of var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("budgets.aws.upbound.io", "v1beta1", "Budget", "BudgetList") + m, l, err = apisresolver.GetManagedResource("budgets.aws.upbound.io", "v1beta2", "Budget", "BudgetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -46,7 +46,7 @@ func (mg *BudgetResourceAssociation) ResolveReferences( // ResolveReferences of mg.Spec.ForProvider.BudgetName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.BudgetNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta1", "Product", "ProductList") + m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta2", "Product", "ProductList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -65,7 +65,7 @@ func (mg *BudgetResourceAssociation) ResolveReferences( // ResolveReferences of mg.Spec.ForProvider.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("budgets.aws.upbound.io", "v1beta1", "Budget", "BudgetList") + m, l, err = apisresolver.GetManagedResource("budgets.aws.upbound.io", "v1beta2", "Budget", "BudgetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -84,7 +84,7 @@ func (mg *BudgetResourceAssociation) ResolveReferences( // ResolveReferences of mg.Spec.InitProvider.BudgetName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.BudgetNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta1", "Product", "ProductList") + m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta2", "Product", "ProductList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -134,7 +134,7 @@ func (mg *Constraint) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.PortfolioID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.PortfolioIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta1", "Product", "ProductList") + m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta2", "Product", "ProductList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -172,7 +172,7 @@ func (mg *Constraint) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.InitProvider.PortfolioID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.PortfolioIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta1", "Product", "ProductList") + m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta2", "Product", "ProductList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -360,7 +360,7 @@ func (mg *ProductPortfolioAssociation) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.PortfolioID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.PortfolioIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta1", "Product", "ProductList") + m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta2", "Product", "ProductList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -398,7 +398,7 @@ func (mg *ProductPortfolioAssociation) ResolveReferences(ctx context.Context, c mg.Spec.InitProvider.PortfolioID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.PortfolioIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta1", "Product", "ProductList") + m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta2", "Product", "ProductList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -429,7 +429,7 @@ func (mg *ProvisioningArtifact) ResolveReferences(ctx context.Context, c client. var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta1", "Product", "ProductList") + m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta2", "Product", "ProductList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -448,7 +448,7 @@ func (mg *ProvisioningArtifact) ResolveReferences(ctx context.Context, c client. mg.Spec.ForProvider.ProductID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ProductIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta1", "Product", "ProductList") + m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta2", "Product", "ProductList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -479,7 +479,7 @@ func (mg *TagOptionResourceAssociation) ResolveReferences(ctx context.Context, c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta1", "Product", "ProductList") + m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta2", "Product", "ProductList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -517,7 +517,7 @@ func (mg *TagOptionResourceAssociation) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.TagOptionID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TagOptionIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta1", "Product", "ProductList") + m, l, err = apisresolver.GetManagedResource("servicecatalog.aws.upbound.io", "v1beta2", "Product", "ProductList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/servicecatalog/v1beta1/zz_productportfolioassociation_types.go b/apis/servicecatalog/v1beta1/zz_productportfolioassociation_types.go index e5d9a87845..891ec4d86f 100755 --- a/apis/servicecatalog/v1beta1/zz_productportfolioassociation_types.go +++ b/apis/servicecatalog/v1beta1/zz_productportfolioassociation_types.go @@ -31,7 +31,7 @@ type ProductPortfolioAssociationInitParameters struct { PortfolioIDSelector *v1.Selector `json:"portfolioIdSelector,omitempty" tf:"-"` // Product identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta1.Product + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta2.Product ProductID *string `json:"productId,omitempty" tf:"product_id,omitempty"` // Reference to a Product in servicecatalog to populate productId. @@ -84,7 +84,7 @@ type ProductPortfolioAssociationParameters struct { PortfolioIDSelector *v1.Selector `json:"portfolioIdSelector,omitempty" tf:"-"` // Product identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta1.Product + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta2.Product // +kubebuilder:validation:Optional ProductID *string `json:"productId,omitempty" tf:"product_id,omitempty"` diff --git a/apis/servicecatalog/v1beta1/zz_provisioningartifact_types.go b/apis/servicecatalog/v1beta1/zz_provisioningartifact_types.go index 44e4462cb6..2a92b0d67e 100755 --- a/apis/servicecatalog/v1beta1/zz_provisioningartifact_types.go +++ b/apis/servicecatalog/v1beta1/zz_provisioningartifact_types.go @@ -34,7 +34,7 @@ type ProvisioningArtifactInitParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // Identifier of the product. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta1.Product + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta2.Product // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() ProductID *string `json:"productId,omitempty" tf:"product_id,omitempty"` @@ -125,7 +125,7 @@ type ProvisioningArtifactParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // Identifier of the product. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta1.Product + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta2.Product // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ProductID *string `json:"productId,omitempty" tf:"product_id,omitempty"` diff --git a/apis/servicecatalog/v1beta1/zz_tagoptionresourceassociation_types.go b/apis/servicecatalog/v1beta1/zz_tagoptionresourceassociation_types.go index b821789e9f..fab09beff7 100755 --- a/apis/servicecatalog/v1beta1/zz_tagoptionresourceassociation_types.go +++ b/apis/servicecatalog/v1beta1/zz_tagoptionresourceassociation_types.go @@ -16,7 +16,7 @@ import ( type TagOptionResourceAssociationInitParameters struct { // Resource identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta1.Product + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta2.Product ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` // Reference to a Product in servicecatalog to populate resourceId. @@ -72,7 +72,7 @@ type TagOptionResourceAssociationParameters struct { Region *string `json:"region" tf:"-"` // Resource identifier. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta1.Product + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicecatalog/v1beta2.Product // +kubebuilder:validation:Optional ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` diff --git a/apis/servicecatalog/v1beta2/zz_generated.conversion_hubs.go b/apis/servicecatalog/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..120384c19b --- /dev/null +++ b/apis/servicecatalog/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Product) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ServiceAction) Hub() {} diff --git a/apis/servicecatalog/v1beta2/zz_generated.deepcopy.go b/apis/servicecatalog/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..5432bb18c9 --- /dev/null +++ b/apis/servicecatalog/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,860 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefinitionInitParameters) DeepCopyInto(out *DefinitionInitParameters) { + *out = *in + if in.AssumeRole != nil { + in, out := &in.AssumeRole, &out.AssumeRole + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefinitionInitParameters. +func (in *DefinitionInitParameters) DeepCopy() *DefinitionInitParameters { + if in == nil { + return nil + } + out := new(DefinitionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefinitionObservation) DeepCopyInto(out *DefinitionObservation) { + *out = *in + if in.AssumeRole != nil { + in, out := &in.AssumeRole, &out.AssumeRole + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefinitionObservation. +func (in *DefinitionObservation) DeepCopy() *DefinitionObservation { + if in == nil { + return nil + } + out := new(DefinitionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefinitionParameters) DeepCopyInto(out *DefinitionParameters) { + *out = *in + if in.AssumeRole != nil { + in, out := &in.AssumeRole, &out.AssumeRole + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefinitionParameters. +func (in *DefinitionParameters) DeepCopy() *DefinitionParameters { + if in == nil { + return nil + } + out := new(DefinitionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Product) DeepCopyInto(out *Product) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Product. +func (in *Product) DeepCopy() *Product { + if in == nil { + return nil + } + out := new(Product) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Product) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductInitParameters) DeepCopyInto(out *ProductInitParameters) { + *out = *in + if in.AcceptLanguage != nil { + in, out := &in.AcceptLanguage, &out.AcceptLanguage + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Distributor != nil { + in, out := &in.Distributor, &out.Distributor + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.ProvisioningArtifactParameters != nil { + in, out := &in.ProvisioningArtifactParameters, &out.ProvisioningArtifactParameters + *out = new(ProvisioningArtifactParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SupportDescription != nil { + in, out := &in.SupportDescription, &out.SupportDescription + *out = new(string) + **out = **in + } + if in.SupportEmail != nil { + in, out := &in.SupportEmail, &out.SupportEmail + *out = new(string) + **out = **in + } + if in.SupportURL != nil { + in, out := &in.SupportURL, &out.SupportURL + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductInitParameters. +func (in *ProductInitParameters) DeepCopy() *ProductInitParameters { + if in == nil { + return nil + } + out := new(ProductInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductList) DeepCopyInto(out *ProductList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Product, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductList. +func (in *ProductList) DeepCopy() *ProductList { + if in == nil { + return nil + } + out := new(ProductList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProductList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductObservation) DeepCopyInto(out *ProductObservation) { + *out = *in + if in.AcceptLanguage != nil { + in, out := &in.AcceptLanguage, &out.AcceptLanguage + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CreatedTime != nil { + in, out := &in.CreatedTime, &out.CreatedTime + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Distributor != nil { + in, out := &in.Distributor, &out.Distributor + *out = new(string) + **out = **in + } + if in.HasDefaultPath != nil { + in, out := &in.HasDefaultPath, &out.HasDefaultPath + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.ProvisioningArtifactParameters != nil { + in, out := &in.ProvisioningArtifactParameters, &out.ProvisioningArtifactParameters + *out = new(ProvisioningArtifactParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.SupportDescription != nil { + in, out := &in.SupportDescription, &out.SupportDescription + *out = new(string) + **out = **in + } + if in.SupportEmail != nil { + in, out := &in.SupportEmail, &out.SupportEmail + *out = new(string) + **out = **in + } + if in.SupportURL != nil { + in, out := &in.SupportURL, &out.SupportURL + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductObservation. +func (in *ProductObservation) DeepCopy() *ProductObservation { + if in == nil { + return nil + } + out := new(ProductObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductParameters) DeepCopyInto(out *ProductParameters) { + *out = *in + if in.AcceptLanguage != nil { + in, out := &in.AcceptLanguage, &out.AcceptLanguage + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Distributor != nil { + in, out := &in.Distributor, &out.Distributor + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(string) + **out = **in + } + if in.ProvisioningArtifactParameters != nil { + in, out := &in.ProvisioningArtifactParameters, &out.ProvisioningArtifactParameters + *out = new(ProvisioningArtifactParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SupportDescription != nil { + in, out := &in.SupportDescription, &out.SupportDescription + *out = new(string) + **out = **in + } + if in.SupportEmail != nil { + in, out := &in.SupportEmail, &out.SupportEmail + *out = new(string) + **out = **in + } + if in.SupportURL != nil { + in, out := &in.SupportURL, &out.SupportURL + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductParameters. +func (in *ProductParameters) DeepCopy() *ProductParameters { + if in == nil { + return nil + } + out := new(ProductParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductSpec) DeepCopyInto(out *ProductSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductSpec. +func (in *ProductSpec) DeepCopy() *ProductSpec { + if in == nil { + return nil + } + out := new(ProductSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProductStatus) DeepCopyInto(out *ProductStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProductStatus. +func (in *ProductStatus) DeepCopy() *ProductStatus { + if in == nil { + return nil + } + out := new(ProductStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisioningArtifactParametersInitParameters) DeepCopyInto(out *ProvisioningArtifactParametersInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisableTemplateValidation != nil { + in, out := &in.DisableTemplateValidation, &out.DisableTemplateValidation + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TemplatePhysicalID != nil { + in, out := &in.TemplatePhysicalID, &out.TemplatePhysicalID + *out = new(string) + **out = **in + } + if in.TemplateURL != nil { + in, out := &in.TemplateURL, &out.TemplateURL + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningArtifactParametersInitParameters. +func (in *ProvisioningArtifactParametersInitParameters) DeepCopy() *ProvisioningArtifactParametersInitParameters { + if in == nil { + return nil + } + out := new(ProvisioningArtifactParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisioningArtifactParametersObservation) DeepCopyInto(out *ProvisioningArtifactParametersObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisableTemplateValidation != nil { + in, out := &in.DisableTemplateValidation, &out.DisableTemplateValidation + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TemplatePhysicalID != nil { + in, out := &in.TemplatePhysicalID, &out.TemplatePhysicalID + *out = new(string) + **out = **in + } + if in.TemplateURL != nil { + in, out := &in.TemplateURL, &out.TemplateURL + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningArtifactParametersObservation. +func (in *ProvisioningArtifactParametersObservation) DeepCopy() *ProvisioningArtifactParametersObservation { + if in == nil { + return nil + } + out := new(ProvisioningArtifactParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisioningArtifactParametersParameters) DeepCopyInto(out *ProvisioningArtifactParametersParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisableTemplateValidation != nil { + in, out := &in.DisableTemplateValidation, &out.DisableTemplateValidation + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TemplatePhysicalID != nil { + in, out := &in.TemplatePhysicalID, &out.TemplatePhysicalID + *out = new(string) + **out = **in + } + if in.TemplateURL != nil { + in, out := &in.TemplateURL, &out.TemplateURL + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningArtifactParametersParameters. +func (in *ProvisioningArtifactParametersParameters) DeepCopy() *ProvisioningArtifactParametersParameters { + if in == nil { + return nil + } + out := new(ProvisioningArtifactParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAction) DeepCopyInto(out *ServiceAction) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAction. +func (in *ServiceAction) DeepCopy() *ServiceAction { + if in == nil { + return nil + } + out := new(ServiceAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAction) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceActionInitParameters) DeepCopyInto(out *ServiceActionInitParameters) { + *out = *in + if in.AcceptLanguage != nil { + in, out := &in.AcceptLanguage, &out.AcceptLanguage + *out = new(string) + **out = **in + } + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = new(DefinitionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceActionInitParameters. +func (in *ServiceActionInitParameters) DeepCopy() *ServiceActionInitParameters { + if in == nil { + return nil + } + out := new(ServiceActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceActionList) DeepCopyInto(out *ServiceActionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceActionList. +func (in *ServiceActionList) DeepCopy() *ServiceActionList { + if in == nil { + return nil + } + out := new(ServiceActionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceActionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceActionObservation) DeepCopyInto(out *ServiceActionObservation) { + *out = *in + if in.AcceptLanguage != nil { + in, out := &in.AcceptLanguage, &out.AcceptLanguage + *out = new(string) + **out = **in + } + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = new(DefinitionObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceActionObservation. +func (in *ServiceActionObservation) DeepCopy() *ServiceActionObservation { + if in == nil { + return nil + } + out := new(ServiceActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceActionParameters) DeepCopyInto(out *ServiceActionParameters) { + *out = *in + if in.AcceptLanguage != nil { + in, out := &in.AcceptLanguage, &out.AcceptLanguage + *out = new(string) + **out = **in + } + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = new(DefinitionParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceActionParameters. +func (in *ServiceActionParameters) DeepCopy() *ServiceActionParameters { + if in == nil { + return nil + } + out := new(ServiceActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceActionSpec) DeepCopyInto(out *ServiceActionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceActionSpec. +func (in *ServiceActionSpec) DeepCopy() *ServiceActionSpec { + if in == nil { + return nil + } + out := new(ServiceActionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceActionStatus) DeepCopyInto(out *ServiceActionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceActionStatus. +func (in *ServiceActionStatus) DeepCopy() *ServiceActionStatus { + if in == nil { + return nil + } + out := new(ServiceActionStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/servicecatalog/v1beta2/zz_generated.managed.go b/apis/servicecatalog/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..0db16bd0c5 --- /dev/null +++ b/apis/servicecatalog/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Product. +func (mg *Product) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Product. +func (mg *Product) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Product. +func (mg *Product) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Product. +func (mg *Product) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Product. +func (mg *Product) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Product. +func (mg *Product) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Product. +func (mg *Product) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Product. +func (mg *Product) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Product. +func (mg *Product) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Product. +func (mg *Product) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Product. +func (mg *Product) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Product. +func (mg *Product) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ServiceAction. +func (mg *ServiceAction) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ServiceAction. +func (mg *ServiceAction) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ServiceAction. +func (mg *ServiceAction) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ServiceAction. +func (mg *ServiceAction) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ServiceAction. +func (mg *ServiceAction) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ServiceAction. +func (mg *ServiceAction) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ServiceAction. +func (mg *ServiceAction) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ServiceAction. +func (mg *ServiceAction) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ServiceAction. +func (mg *ServiceAction) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ServiceAction. +func (mg *ServiceAction) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ServiceAction. +func (mg *ServiceAction) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ServiceAction. +func (mg *ServiceAction) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/servicecatalog/v1beta2/zz_generated.managedlist.go b/apis/servicecatalog/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..168a7243f8 --- /dev/null +++ b/apis/servicecatalog/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ProductList. +func (l *ProductList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ServiceActionList. +func (l *ServiceActionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/servicecatalog/v1beta2/zz_groupversion_info.go b/apis/servicecatalog/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..9a1bd42468 --- /dev/null +++ b/apis/servicecatalog/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=servicecatalog.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "servicecatalog.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/servicecatalog/v1beta2/zz_product_terraformed.go b/apis/servicecatalog/v1beta2/zz_product_terraformed.go new file mode 100755 index 0000000000..20a84c5a4b --- /dev/null +++ b/apis/servicecatalog/v1beta2/zz_product_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Product +func (mg *Product) GetTerraformResourceType() string { + return "aws_servicecatalog_product" +} + +// GetConnectionDetailsMapping for this Product +func (tr *Product) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Product +func (tr *Product) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Product +func (tr *Product) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Product +func (tr *Product) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Product +func (tr *Product) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Product +func (tr *Product) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Product +func (tr *Product) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Product +func (tr *Product) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Product using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Product) LateInitialize(attrs []byte) (bool, error) { + params := &ProductParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Product) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/servicecatalog/v1beta2/zz_product_types.go b/apis/servicecatalog/v1beta2/zz_product_types.go new file mode 100755 index 0000000000..31ca3c756d --- /dev/null +++ b/apis/servicecatalog/v1beta2/zz_product_types.go @@ -0,0 +1,293 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ProductInitParameters struct { + + // Language code. Valid values: en (English), jp (Japanese), zh (Chinese). Default value is en. + AcceptLanguage *string `json:"acceptLanguage,omitempty" tf:"accept_language,omitempty"` + + // Description of the product. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Distributor (i.e., vendor) of the product. + Distributor *string `json:"distributor,omitempty" tf:"distributor,omitempty"` + + // Name of the product. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Owner of the product. + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + // Configuration block for provisioning artifact (i.e., version) parameters. Detailed below. + ProvisioningArtifactParameters *ProvisioningArtifactParametersInitParameters `json:"provisioningArtifactParameters,omitempty" tf:"provisioning_artifact_parameters,omitempty"` + + // Support information about the product. + SupportDescription *string `json:"supportDescription,omitempty" tf:"support_description,omitempty"` + + // Contact email for product support. + SupportEmail *string `json:"supportEmail,omitempty" tf:"support_email,omitempty"` + + // Contact URL for product support. + SupportURL *string `json:"supportUrl,omitempty" tf:"support_url,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Type of product. See AWS Docs for valid list of values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ProductObservation struct { + + // Language code. Valid values: en (English), jp (Japanese), zh (Chinese). Default value is en. + AcceptLanguage *string `json:"acceptLanguage,omitempty" tf:"accept_language,omitempty"` + + // ARN of the product. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Time when the product was created. + CreatedTime *string `json:"createdTime,omitempty" tf:"created_time,omitempty"` + + // Description of the product. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Distributor (i.e., vendor) of the product. + Distributor *string `json:"distributor,omitempty" tf:"distributor,omitempty"` + + // Whether the product has a default path. If the product does not have a default path, call ListLaunchPaths to disambiguate between paths. Otherwise, ListLaunchPaths is not required, and the output of ProductViewSummary can be used directly with DescribeProvisioningParameters. + HasDefaultPath *bool `json:"hasDefaultPath,omitempty" tf:"has_default_path,omitempty"` + + // Product ID. For example, prod-dnigbtea24ste. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the product. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Owner of the product. + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + // Configuration block for provisioning artifact (i.e., version) parameters. Detailed below. + ProvisioningArtifactParameters *ProvisioningArtifactParametersObservation `json:"provisioningArtifactParameters,omitempty" tf:"provisioning_artifact_parameters,omitempty"` + + // Status of the product. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Support information about the product. + SupportDescription *string `json:"supportDescription,omitempty" tf:"support_description,omitempty"` + + // Contact email for product support. + SupportEmail *string `json:"supportEmail,omitempty" tf:"support_email,omitempty"` + + // Contact URL for product support. + SupportURL *string `json:"supportUrl,omitempty" tf:"support_url,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Type of product. See AWS Docs for valid list of values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ProductParameters struct { + + // Language code. Valid values: en (English), jp (Japanese), zh (Chinese). Default value is en. + // +kubebuilder:validation:Optional + AcceptLanguage *string `json:"acceptLanguage,omitempty" tf:"accept_language,omitempty"` + + // Description of the product. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Distributor (i.e., vendor) of the product. + // +kubebuilder:validation:Optional + Distributor *string `json:"distributor,omitempty" tf:"distributor,omitempty"` + + // Name of the product. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Owner of the product. + // +kubebuilder:validation:Optional + Owner *string `json:"owner,omitempty" tf:"owner,omitempty"` + + // Configuration block for provisioning artifact (i.e., version) parameters. Detailed below. + // +kubebuilder:validation:Optional + ProvisioningArtifactParameters *ProvisioningArtifactParametersParameters `json:"provisioningArtifactParameters,omitempty" tf:"provisioning_artifact_parameters,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Support information about the product. + // +kubebuilder:validation:Optional + SupportDescription *string `json:"supportDescription,omitempty" tf:"support_description,omitempty"` + + // Contact email for product support. + // +kubebuilder:validation:Optional + SupportEmail *string `json:"supportEmail,omitempty" tf:"support_email,omitempty"` + + // Contact URL for product support. + // +kubebuilder:validation:Optional + SupportURL *string `json:"supportUrl,omitempty" tf:"support_url,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Type of product. See AWS Docs for valid list of values. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ProvisioningArtifactParametersInitParameters struct { + + // Description of the provisioning artifact (i.e., version), including how it differs from the previous provisioning artifact. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether AWS Service Catalog stops validating the specified provisioning artifact template even if it is invalid. + DisableTemplateValidation *bool `json:"disableTemplateValidation,omitempty" tf:"disable_template_validation,omitempty"` + + // Name of the provisioning artifact (for example, v1, v2beta). No spaces are allowed. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Template source as the physical ID of the resource that contains the template. Currently only supports CloudFormation stack ARN. Specify the physical ID as arn:[partition]:cloudformation:[region]:[account ID]:stack/[stack name]/[resource ID]. + TemplatePhysicalID *string `json:"templatePhysicalId,omitempty" tf:"template_physical_id,omitempty"` + + // Template source as URL of the CloudFormation template in Amazon S3. + TemplateURL *string `json:"templateUrl,omitempty" tf:"template_url,omitempty"` + + // Type of provisioning artifact. See AWS Docs for valid list of values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ProvisioningArtifactParametersObservation struct { + + // Description of the provisioning artifact (i.e., version), including how it differs from the previous provisioning artifact. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether AWS Service Catalog stops validating the specified provisioning artifact template even if it is invalid. + DisableTemplateValidation *bool `json:"disableTemplateValidation,omitempty" tf:"disable_template_validation,omitempty"` + + // Name of the provisioning artifact (for example, v1, v2beta). No spaces are allowed. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Template source as the physical ID of the resource that contains the template. Currently only supports CloudFormation stack ARN. Specify the physical ID as arn:[partition]:cloudformation:[region]:[account ID]:stack/[stack name]/[resource ID]. + TemplatePhysicalID *string `json:"templatePhysicalId,omitempty" tf:"template_physical_id,omitempty"` + + // Template source as URL of the CloudFormation template in Amazon S3. + TemplateURL *string `json:"templateUrl,omitempty" tf:"template_url,omitempty"` + + // Type of provisioning artifact. See AWS Docs for valid list of values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ProvisioningArtifactParametersParameters struct { + + // Description of the provisioning artifact (i.e., version), including how it differs from the previous provisioning artifact. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether AWS Service Catalog stops validating the specified provisioning artifact template even if it is invalid. + // +kubebuilder:validation:Optional + DisableTemplateValidation *bool `json:"disableTemplateValidation,omitempty" tf:"disable_template_validation,omitempty"` + + // Name of the provisioning artifact (for example, v1, v2beta). No spaces are allowed. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Template source as the physical ID of the resource that contains the template. Currently only supports CloudFormation stack ARN. Specify the physical ID as arn:[partition]:cloudformation:[region]:[account ID]:stack/[stack name]/[resource ID]. + // +kubebuilder:validation:Optional + TemplatePhysicalID *string `json:"templatePhysicalId,omitempty" tf:"template_physical_id,omitempty"` + + // Template source as URL of the CloudFormation template in Amazon S3. + // +kubebuilder:validation:Optional + TemplateURL *string `json:"templateUrl,omitempty" tf:"template_url,omitempty"` + + // Type of provisioning artifact. See AWS Docs for valid list of values. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +// ProductSpec defines the desired state of Product +type ProductSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProductParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProductInitParameters `json:"initProvider,omitempty"` +} + +// ProductStatus defines the observed state of Product. +type ProductStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProductObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Product is the Schema for the Products API. Manages a Service Catalog Product +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Product struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.owner) || (has(self.initProvider) && has(self.initProvider.owner))",message="spec.forProvider.owner is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.provisioningArtifactParameters) || (has(self.initProvider) && has(self.initProvider.provisioningArtifactParameters))",message="spec.forProvider.provisioningArtifactParameters is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + Spec ProductSpec `json:"spec"` + Status ProductStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProductList contains a list of Products +type ProductList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Product `json:"items"` +} + +// Repository type metadata. +var ( + Product_Kind = "Product" + Product_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Product_Kind}.String() + Product_KindAPIVersion = Product_Kind + "." + CRDGroupVersion.String() + Product_GroupVersionKind = CRDGroupVersion.WithKind(Product_Kind) +) + +func init() { + SchemeBuilder.Register(&Product{}, &ProductList{}) +} diff --git a/apis/servicecatalog/v1beta2/zz_serviceaction_terraformed.go b/apis/servicecatalog/v1beta2/zz_serviceaction_terraformed.go new file mode 100755 index 0000000000..b8b9977569 --- /dev/null +++ b/apis/servicecatalog/v1beta2/zz_serviceaction_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ServiceAction +func (mg *ServiceAction) GetTerraformResourceType() string { + return "aws_servicecatalog_service_action" +} + +// GetConnectionDetailsMapping for this ServiceAction +func (tr *ServiceAction) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ServiceAction +func (tr *ServiceAction) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ServiceAction +func (tr *ServiceAction) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ServiceAction +func (tr *ServiceAction) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ServiceAction +func (tr *ServiceAction) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ServiceAction +func (tr *ServiceAction) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ServiceAction +func (tr *ServiceAction) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ServiceAction +func (tr *ServiceAction) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ServiceAction using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ServiceAction) LateInitialize(attrs []byte) (bool, error) { + params := &ServiceActionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ServiceAction) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/servicecatalog/v1beta2/zz_serviceaction_types.go b/apis/servicecatalog/v1beta2/zz_serviceaction_types.go new file mode 100755 index 0000000000..0e6594d3a2 --- /dev/null +++ b/apis/servicecatalog/v1beta2/zz_serviceaction_types.go @@ -0,0 +1,192 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DefinitionInitParameters struct { + + // ARN of the role that performs the self-service actions on your behalf. For example, arn:aws:iam::12345678910:role/ActionRole. To reuse the provisioned product launch role, set to LAUNCH_ROLE. + AssumeRole *string `json:"assumeRole,omitempty" tf:"assume_role,omitempty"` + + // Name of the SSM document. For example, AWS-RestartEC2Instance. If you are using a shared SSM document, you must provide the ARN instead of the name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // List of parameters in JSON format. For example: [{\"Name\":\"InstanceId\",\"Type\":\"TARGET\"}] or [{\"Name\":\"InstanceId\",\"Type\":\"TEXT_VALUE\"}]. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Service action definition type. Valid value is SSM_AUTOMATION. Default is SSM_AUTOMATION. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // SSM document version. For example, 1. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type DefinitionObservation struct { + + // ARN of the role that performs the self-service actions on your behalf. For example, arn:aws:iam::12345678910:role/ActionRole. To reuse the provisioned product launch role, set to LAUNCH_ROLE. + AssumeRole *string `json:"assumeRole,omitempty" tf:"assume_role,omitempty"` + + // Name of the SSM document. For example, AWS-RestartEC2Instance. If you are using a shared SSM document, you must provide the ARN instead of the name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // List of parameters in JSON format. For example: [{\"Name\":\"InstanceId\",\"Type\":\"TARGET\"}] or [{\"Name\":\"InstanceId\",\"Type\":\"TEXT_VALUE\"}]. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Service action definition type. Valid value is SSM_AUTOMATION. Default is SSM_AUTOMATION. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // SSM document version. For example, 1. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type DefinitionParameters struct { + + // ARN of the role that performs the self-service actions on your behalf. For example, arn:aws:iam::12345678910:role/ActionRole. To reuse the provisioned product launch role, set to LAUNCH_ROLE. + // +kubebuilder:validation:Optional + AssumeRole *string `json:"assumeRole,omitempty" tf:"assume_role,omitempty"` + + // Name of the SSM document. For example, AWS-RestartEC2Instance. If you are using a shared SSM document, you must provide the ARN instead of the name. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // List of parameters in JSON format. For example: [{\"Name\":\"InstanceId\",\"Type\":\"TARGET\"}] or [{\"Name\":\"InstanceId\",\"Type\":\"TEXT_VALUE\"}]. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Service action definition type. Valid value is SSM_AUTOMATION. Default is SSM_AUTOMATION. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // SSM document version. For example, 1. + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` +} + +type ServiceActionInitParameters struct { + + // Language code. Valid values are en (English), jp (Japanese), and zh (Chinese). Default is en. + AcceptLanguage *string `json:"acceptLanguage,omitempty" tf:"accept_language,omitempty"` + + // Self-service action definition configuration block. Detailed below. + Definition *DefinitionInitParameters `json:"definition,omitempty" tf:"definition,omitempty"` + + // Self-service action description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Self-service action name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ServiceActionObservation struct { + + // Language code. Valid values are en (English), jp (Japanese), and zh (Chinese). Default is en. + AcceptLanguage *string `json:"acceptLanguage,omitempty" tf:"accept_language,omitempty"` + + // Self-service action definition configuration block. Detailed below. + Definition *DefinitionObservation `json:"definition,omitempty" tf:"definition,omitempty"` + + // Self-service action description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Identifier of the service action. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Self-service action name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ServiceActionParameters struct { + + // Language code. Valid values are en (English), jp (Japanese), and zh (Chinese). Default is en. + // +kubebuilder:validation:Optional + AcceptLanguage *string `json:"acceptLanguage,omitempty" tf:"accept_language,omitempty"` + + // Self-service action definition configuration block. Detailed below. + // +kubebuilder:validation:Optional + Definition *DefinitionParameters `json:"definition,omitempty" tf:"definition,omitempty"` + + // Self-service action description. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Self-service action name. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +// ServiceActionSpec defines the desired state of ServiceAction +type ServiceActionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServiceActionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServiceActionInitParameters `json:"initProvider,omitempty"` +} + +// ServiceActionStatus defines the observed state of ServiceAction. +type ServiceActionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServiceActionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ServiceAction is the Schema for the ServiceActions API. Manages a Service Catalog Service Action +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ServiceAction struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.definition) || (has(self.initProvider) && has(self.initProvider.definition))",message="spec.forProvider.definition is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ServiceActionSpec `json:"spec"` + Status ServiceActionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServiceActionList contains a list of ServiceActions +type ServiceActionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ServiceAction `json:"items"` +} + +// Repository type metadata. +var ( + ServiceAction_Kind = "ServiceAction" + ServiceAction_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ServiceAction_Kind}.String() + ServiceAction_KindAPIVersion = ServiceAction_Kind + "." + CRDGroupVersion.String() + ServiceAction_GroupVersionKind = CRDGroupVersion.WithKind(ServiceAction_Kind) +) + +func init() { + SchemeBuilder.Register(&ServiceAction{}, &ServiceActionList{}) +} diff --git a/apis/servicediscovery/v1beta1/zz_generated.conversion_hubs.go b/apis/servicediscovery/v1beta1/zz_generated.conversion_hubs.go index 5ed1d8a955..0e7da31e89 100755 --- a/apis/servicediscovery/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/servicediscovery/v1beta1/zz_generated.conversion_hubs.go @@ -14,6 +14,3 @@ func (tr *PrivateDNSNamespace) Hub() {} // Hub marks this type as a conversion hub. func (tr *PublicDNSNamespace) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Service) Hub() {} diff --git a/apis/servicediscovery/v1beta1/zz_generated.conversion_spokes.go b/apis/servicediscovery/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..80f7574769 --- /dev/null +++ b/apis/servicediscovery/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Service to the hub type. +func (tr *Service) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Service type. +func (tr *Service) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/servicediscovery/v1beta2/zz_generated.conversion_hubs.go b/apis/servicediscovery/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..aa1fa71e29 --- /dev/null +++ b/apis/servicediscovery/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Service) Hub() {} diff --git a/apis/servicediscovery/v1beta2/zz_generated.deepcopy.go b/apis/servicediscovery/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..8da872f10e --- /dev/null +++ b/apis/servicediscovery/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,693 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSConfigInitParameters) DeepCopyInto(out *DNSConfigInitParameters) { + *out = *in + if in.DNSRecords != nil { + in, out := &in.DNSRecords, &out.DNSRecords + *out = make([]DNSRecordsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NamespaceID != nil { + in, out := &in.NamespaceID, &out.NamespaceID + *out = new(string) + **out = **in + } + if in.NamespaceIDRef != nil { + in, out := &in.NamespaceIDRef, &out.NamespaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NamespaceIDSelector != nil { + in, out := &in.NamespaceIDSelector, &out.NamespaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RoutingPolicy != nil { + in, out := &in.RoutingPolicy, &out.RoutingPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSConfigInitParameters. +func (in *DNSConfigInitParameters) DeepCopy() *DNSConfigInitParameters { + if in == nil { + return nil + } + out := new(DNSConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSConfigObservation) DeepCopyInto(out *DNSConfigObservation) { + *out = *in + if in.DNSRecords != nil { + in, out := &in.DNSRecords, &out.DNSRecords + *out = make([]DNSRecordsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NamespaceID != nil { + in, out := &in.NamespaceID, &out.NamespaceID + *out = new(string) + **out = **in + } + if in.RoutingPolicy != nil { + in, out := &in.RoutingPolicy, &out.RoutingPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSConfigObservation. +func (in *DNSConfigObservation) DeepCopy() *DNSConfigObservation { + if in == nil { + return nil + } + out := new(DNSConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSConfigParameters) DeepCopyInto(out *DNSConfigParameters) { + *out = *in + if in.DNSRecords != nil { + in, out := &in.DNSRecords, &out.DNSRecords + *out = make([]DNSRecordsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NamespaceID != nil { + in, out := &in.NamespaceID, &out.NamespaceID + *out = new(string) + **out = **in + } + if in.NamespaceIDRef != nil { + in, out := &in.NamespaceIDRef, &out.NamespaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NamespaceIDSelector != nil { + in, out := &in.NamespaceIDSelector, &out.NamespaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RoutingPolicy != nil { + in, out := &in.RoutingPolicy, &out.RoutingPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSConfigParameters. +func (in *DNSConfigParameters) DeepCopy() *DNSConfigParameters { + if in == nil { + return nil + } + out := new(DNSConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSRecordsInitParameters) DeepCopyInto(out *DNSRecordsInitParameters) { + *out = *in + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSRecordsInitParameters. +func (in *DNSRecordsInitParameters) DeepCopy() *DNSRecordsInitParameters { + if in == nil { + return nil + } + out := new(DNSRecordsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSRecordsObservation) DeepCopyInto(out *DNSRecordsObservation) { + *out = *in + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSRecordsObservation. +func (in *DNSRecordsObservation) DeepCopy() *DNSRecordsObservation { + if in == nil { + return nil + } + out := new(DNSRecordsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSRecordsParameters) DeepCopyInto(out *DNSRecordsParameters) { + *out = *in + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSRecordsParameters. +func (in *DNSRecordsParameters) DeepCopy() *DNSRecordsParameters { + if in == nil { + return nil + } + out := new(DNSRecordsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckConfigInitParameters) DeepCopyInto(out *HealthCheckConfigInitParameters) { + *out = *in + if in.FailureThreshold != nil { + in, out := &in.FailureThreshold, &out.FailureThreshold + *out = new(float64) + **out = **in + } + if in.ResourcePath != nil { + in, out := &in.ResourcePath, &out.ResourcePath + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckConfigInitParameters. +func (in *HealthCheckConfigInitParameters) DeepCopy() *HealthCheckConfigInitParameters { + if in == nil { + return nil + } + out := new(HealthCheckConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckConfigObservation) DeepCopyInto(out *HealthCheckConfigObservation) { + *out = *in + if in.FailureThreshold != nil { + in, out := &in.FailureThreshold, &out.FailureThreshold + *out = new(float64) + **out = **in + } + if in.ResourcePath != nil { + in, out := &in.ResourcePath, &out.ResourcePath + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckConfigObservation. +func (in *HealthCheckConfigObservation) DeepCopy() *HealthCheckConfigObservation { + if in == nil { + return nil + } + out := new(HealthCheckConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckConfigParameters) DeepCopyInto(out *HealthCheckConfigParameters) { + *out = *in + if in.FailureThreshold != nil { + in, out := &in.FailureThreshold, &out.FailureThreshold + *out = new(float64) + **out = **in + } + if in.ResourcePath != nil { + in, out := &in.ResourcePath, &out.ResourcePath + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckConfigParameters. +func (in *HealthCheckConfigParameters) DeepCopy() *HealthCheckConfigParameters { + if in == nil { + return nil + } + out := new(HealthCheckConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckCustomConfigInitParameters) DeepCopyInto(out *HealthCheckCustomConfigInitParameters) { + *out = *in + if in.FailureThreshold != nil { + in, out := &in.FailureThreshold, &out.FailureThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckCustomConfigInitParameters. +func (in *HealthCheckCustomConfigInitParameters) DeepCopy() *HealthCheckCustomConfigInitParameters { + if in == nil { + return nil + } + out := new(HealthCheckCustomConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckCustomConfigObservation) DeepCopyInto(out *HealthCheckCustomConfigObservation) { + *out = *in + if in.FailureThreshold != nil { + in, out := &in.FailureThreshold, &out.FailureThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckCustomConfigObservation. +func (in *HealthCheckCustomConfigObservation) DeepCopy() *HealthCheckCustomConfigObservation { + if in == nil { + return nil + } + out := new(HealthCheckCustomConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckCustomConfigParameters) DeepCopyInto(out *HealthCheckCustomConfigParameters) { + *out = *in + if in.FailureThreshold != nil { + in, out := &in.FailureThreshold, &out.FailureThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckCustomConfigParameters. +func (in *HealthCheckCustomConfigParameters) DeepCopy() *HealthCheckCustomConfigParameters { + if in == nil { + return nil + } + out := new(HealthCheckCustomConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Service) DeepCopyInto(out *Service) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. +func (in *Service) DeepCopy() *Service { + if in == nil { + return nil + } + out := new(Service) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Service) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceInitParameters) DeepCopyInto(out *ServiceInitParameters) { + *out = *in + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(DNSConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.HealthCheckConfig != nil { + in, out := &in.HealthCheckConfig, &out.HealthCheckConfig + *out = new(HealthCheckConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HealthCheckCustomConfig != nil { + in, out := &in.HealthCheckCustomConfig, &out.HealthCheckCustomConfig + *out = new(HealthCheckCustomConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NamespaceID != nil { + in, out := &in.NamespaceID, &out.NamespaceID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceInitParameters. +func (in *ServiceInitParameters) DeepCopy() *ServiceInitParameters { + if in == nil { + return nil + } + out := new(ServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceList) DeepCopyInto(out *ServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. +func (in *ServiceList) DeepCopy() *ServiceList { + if in == nil { + return nil + } + out := new(ServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceObservation) DeepCopyInto(out *ServiceObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(DNSConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.HealthCheckConfig != nil { + in, out := &in.HealthCheckConfig, &out.HealthCheckConfig + *out = new(HealthCheckConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.HealthCheckCustomConfig != nil { + in, out := &in.HealthCheckCustomConfig, &out.HealthCheckCustomConfig + *out = new(HealthCheckCustomConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NamespaceID != nil { + in, out := &in.NamespaceID, &out.NamespaceID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceObservation. +func (in *ServiceObservation) DeepCopy() *ServiceObservation { + if in == nil { + return nil + } + out := new(ServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceParameters) DeepCopyInto(out *ServiceParameters) { + *out = *in + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(DNSConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.HealthCheckConfig != nil { + in, out := &in.HealthCheckConfig, &out.HealthCheckConfig + *out = new(HealthCheckConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.HealthCheckCustomConfig != nil { + in, out := &in.HealthCheckCustomConfig, &out.HealthCheckCustomConfig + *out = new(HealthCheckCustomConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NamespaceID != nil { + in, out := &in.NamespaceID, &out.NamespaceID + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceParameters. +func (in *ServiceParameters) DeepCopy() *ServiceParameters { + if in == nil { + return nil + } + out := new(ServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. +func (in *ServiceSpec) DeepCopy() *ServiceSpec { + if in == nil { + return nil + } + out := new(ServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. +func (in *ServiceStatus) DeepCopy() *ServiceStatus { + if in == nil { + return nil + } + out := new(ServiceStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/servicediscovery/v1beta2/zz_generated.managed.go b/apis/servicediscovery/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..841889e279 --- /dev/null +++ b/apis/servicediscovery/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Service. +func (mg *Service) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Service. +func (mg *Service) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Service. +func (mg *Service) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Service. +func (mg *Service) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Service. +func (mg *Service) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Service. +func (mg *Service) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Service. +func (mg *Service) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Service. +func (mg *Service) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Service. +func (mg *Service) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Service. +func (mg *Service) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Service. +func (mg *Service) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Service. +func (mg *Service) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/servicediscovery/v1beta2/zz_generated.managedlist.go b/apis/servicediscovery/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..d9baadeb70 --- /dev/null +++ b/apis/servicediscovery/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ServiceList. +func (l *ServiceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/servicediscovery/v1beta2/zz_generated.resolvers.go b/apis/servicediscovery/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..8f5b8868b9 --- /dev/null +++ b/apis/servicediscovery/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Service. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Service) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.DNSConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("servicediscovery.aws.upbound.io", "v1beta1", "PrivateDNSNamespace", "PrivateDNSNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DNSConfig.NamespaceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DNSConfig.NamespaceIDRef, + Selector: mg.Spec.ForProvider.DNSConfig.NamespaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DNSConfig.NamespaceID") + } + mg.Spec.ForProvider.DNSConfig.NamespaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DNSConfig.NamespaceIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.DNSConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("servicediscovery.aws.upbound.io", "v1beta1", "PrivateDNSNamespace", "PrivateDNSNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DNSConfig.NamespaceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DNSConfig.NamespaceIDRef, + Selector: mg.Spec.InitProvider.DNSConfig.NamespaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DNSConfig.NamespaceID") + } + mg.Spec.InitProvider.DNSConfig.NamespaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DNSConfig.NamespaceIDRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/servicediscovery/v1beta2/zz_groupversion_info.go b/apis/servicediscovery/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..b7986a7a7a --- /dev/null +++ b/apis/servicediscovery/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=servicediscovery.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "servicediscovery.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/servicediscovery/v1beta2/zz_service_terraformed.go b/apis/servicediscovery/v1beta2/zz_service_terraformed.go new file mode 100755 index 0000000000..5ec292b44d --- /dev/null +++ b/apis/servicediscovery/v1beta2/zz_service_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Service +func (mg *Service) GetTerraformResourceType() string { + return "aws_service_discovery_service" +} + +// GetConnectionDetailsMapping for this Service +func (tr *Service) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Service +func (tr *Service) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Service +func (tr *Service) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Service +func (tr *Service) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Service +func (tr *Service) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Service +func (tr *Service) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Service +func (tr *Service) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Service +func (tr *Service) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Service using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Service) LateInitialize(attrs []byte) (bool, error) { + params := &ServiceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Service) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/servicediscovery/v1beta2/zz_service_types.go b/apis/servicediscovery/v1beta2/zz_service_types.go new file mode 100755 index 0000000000..0c28583ce6 --- /dev/null +++ b/apis/servicediscovery/v1beta2/zz_service_types.go @@ -0,0 +1,338 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DNSConfigInitParameters struct { + + // An array that contains one DnsRecord object for each resource record set. + DNSRecords []DNSRecordsInitParameters `json:"dnsRecords,omitempty" tf:"dns_records,omitempty"` + + // The ID of the namespace to use for DNS configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicediscovery/v1beta1.PrivateDNSNamespace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + NamespaceID *string `json:"namespaceId,omitempty" tf:"namespace_id,omitempty"` + + // Reference to a PrivateDNSNamespace in servicediscovery to populate namespaceId. + // +kubebuilder:validation:Optional + NamespaceIDRef *v1.Reference `json:"namespaceIdRef,omitempty" tf:"-"` + + // Selector for a PrivateDNSNamespace in servicediscovery to populate namespaceId. + // +kubebuilder:validation:Optional + NamespaceIDSelector *v1.Selector `json:"namespaceIdSelector,omitempty" tf:"-"` + + // The routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED + RoutingPolicy *string `json:"routingPolicy,omitempty" tf:"routing_policy,omitempty"` +} + +type DNSConfigObservation struct { + + // An array that contains one DnsRecord object for each resource record set. + DNSRecords []DNSRecordsObservation `json:"dnsRecords,omitempty" tf:"dns_records,omitempty"` + + // The ID of the namespace to use for DNS configuration. + NamespaceID *string `json:"namespaceId,omitempty" tf:"namespace_id,omitempty"` + + // The routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED + RoutingPolicy *string `json:"routingPolicy,omitempty" tf:"routing_policy,omitempty"` +} + +type DNSConfigParameters struct { + + // An array that contains one DnsRecord object for each resource record set. + // +kubebuilder:validation:Optional + DNSRecords []DNSRecordsParameters `json:"dnsRecords" tf:"dns_records,omitempty"` + + // The ID of the namespace to use for DNS configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/servicediscovery/v1beta1.PrivateDNSNamespace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + NamespaceID *string `json:"namespaceId,omitempty" tf:"namespace_id,omitempty"` + + // Reference to a PrivateDNSNamespace in servicediscovery to populate namespaceId. + // +kubebuilder:validation:Optional + NamespaceIDRef *v1.Reference `json:"namespaceIdRef,omitempty" tf:"-"` + + // Selector for a PrivateDNSNamespace in servicediscovery to populate namespaceId. + // +kubebuilder:validation:Optional + NamespaceIDSelector *v1.Selector `json:"namespaceIdSelector,omitempty" tf:"-"` + + // The routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED + // +kubebuilder:validation:Optional + RoutingPolicy *string `json:"routingPolicy,omitempty" tf:"routing_policy,omitempty"` +} + +type DNSRecordsInitParameters struct { + + // The amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // The type of the resource, which indicates the value that Amazon Route 53 returns in response to DNS queries. Valid Values: A, AAAA, SRV, CNAME + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DNSRecordsObservation struct { + + // The amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // The type of the resource, which indicates the value that Amazon Route 53 returns in response to DNS queries. Valid Values: A, AAAA, SRV, CNAME + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DNSRecordsParameters struct { + + // The amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set. + // +kubebuilder:validation:Optional + TTL *float64 `json:"ttl" tf:"ttl,omitempty"` + + // The type of the resource, which indicates the value that Amazon Route 53 returns in response to DNS queries. Valid Values: A, AAAA, SRV, CNAME + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type HealthCheckConfigInitParameters struct { + + // The number of consecutive health checks. Maximum value of 10. + FailureThreshold *float64 `json:"failureThreshold,omitempty" tf:"failure_threshold,omitempty"` + + // The path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /. + ResourcePath *string `json:"resourcePath,omitempty" tf:"resource_path,omitempty"` + + // The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type HealthCheckConfigObservation struct { + + // The number of consecutive health checks. Maximum value of 10. + FailureThreshold *float64 `json:"failureThreshold,omitempty" tf:"failure_threshold,omitempty"` + + // The path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /. + ResourcePath *string `json:"resourcePath,omitempty" tf:"resource_path,omitempty"` + + // The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type HealthCheckConfigParameters struct { + + // The number of consecutive health checks. Maximum value of 10. + // +kubebuilder:validation:Optional + FailureThreshold *float64 `json:"failureThreshold,omitempty" tf:"failure_threshold,omitempty"` + + // The path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /. + // +kubebuilder:validation:Optional + ResourcePath *string `json:"resourcePath,omitempty" tf:"resource_path,omitempty"` + + // The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type HealthCheckCustomConfigInitParameters struct { + + // The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10. + FailureThreshold *float64 `json:"failureThreshold,omitempty" tf:"failure_threshold,omitempty"` +} + +type HealthCheckCustomConfigObservation struct { + + // The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10. + FailureThreshold *float64 `json:"failureThreshold,omitempty" tf:"failure_threshold,omitempty"` +} + +type HealthCheckCustomConfigParameters struct { + + // The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10. + // +kubebuilder:validation:Optional + FailureThreshold *float64 `json:"failureThreshold,omitempty" tf:"failure_threshold,omitempty"` +} + +type ServiceInitParameters struct { + + // A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. + DNSConfig *DNSConfigInitParameters `json:"dnsConfig,omitempty" tf:"dns_config,omitempty"` + + // The description of the service. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A boolean that indicates all instances should be deleted from the service so that the service can be destroyed without error. These instances are not recoverable. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // A complex type that contains settings for an optional health check. Only for Public DNS namespaces. + HealthCheckConfig *HealthCheckConfigInitParameters `json:"healthCheckConfig,omitempty" tf:"health_check_config,omitempty"` + + // A complex type that contains settings for ECS managed health checks. + HealthCheckCustomConfig *HealthCheckCustomConfigInitParameters `json:"healthCheckCustomConfig,omitempty" tf:"health_check_custom_config,omitempty"` + + // The name of the service. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the namespace that you want to use to create the service. + NamespaceID *string `json:"namespaceId,omitempty" tf:"namespace_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // If present, specifies that the service instances are only discoverable using the DiscoverInstances API operation. No DNS records is registered for the service instances. The only valid value is HTTP. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ServiceObservation struct { + + // The ARN of the service. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. + DNSConfig *DNSConfigObservation `json:"dnsConfig,omitempty" tf:"dns_config,omitempty"` + + // The description of the service. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A boolean that indicates all instances should be deleted from the service so that the service can be destroyed without error. These instances are not recoverable. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // A complex type that contains settings for an optional health check. Only for Public DNS namespaces. + HealthCheckConfig *HealthCheckConfigObservation `json:"healthCheckConfig,omitempty" tf:"health_check_config,omitempty"` + + // A complex type that contains settings for ECS managed health checks. + HealthCheckCustomConfig *HealthCheckCustomConfigObservation `json:"healthCheckCustomConfig,omitempty" tf:"health_check_custom_config,omitempty"` + + // The ID of the service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the service. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the namespace that you want to use to create the service. + NamespaceID *string `json:"namespaceId,omitempty" tf:"namespace_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // If present, specifies that the service instances are only discoverable using the DiscoverInstances API operation. No DNS records is registered for the service instances. The only valid value is HTTP. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ServiceParameters struct { + + // A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. + // +kubebuilder:validation:Optional + DNSConfig *DNSConfigParameters `json:"dnsConfig,omitempty" tf:"dns_config,omitempty"` + + // The description of the service. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A boolean that indicates all instances should be deleted from the service so that the service can be destroyed without error. These instances are not recoverable. + // +kubebuilder:validation:Optional + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // A complex type that contains settings for an optional health check. Only for Public DNS namespaces. + // +kubebuilder:validation:Optional + HealthCheckConfig *HealthCheckConfigParameters `json:"healthCheckConfig,omitempty" tf:"health_check_config,omitempty"` + + // A complex type that contains settings for ECS managed health checks. + // +kubebuilder:validation:Optional + HealthCheckCustomConfig *HealthCheckCustomConfigParameters `json:"healthCheckCustomConfig,omitempty" tf:"health_check_custom_config,omitempty"` + + // The name of the service. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the namespace that you want to use to create the service. + // +kubebuilder:validation:Optional + NamespaceID *string `json:"namespaceId,omitempty" tf:"namespace_id,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // If present, specifies that the service instances are only discoverable using the DiscoverInstances API operation. No DNS records is registered for the service instances. The only valid value is HTTP. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +// ServiceSpec defines the desired state of Service +type ServiceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServiceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServiceInitParameters `json:"initProvider,omitempty"` +} + +// ServiceStatus defines the observed state of Service. +type ServiceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServiceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Service is the Schema for the Services API. Provides a Service Discovery Service resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Service struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ServiceSpec `json:"spec"` + Status ServiceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServiceList contains a list of Services +type ServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Service `json:"items"` +} + +// Repository type metadata. +var ( + Service_Kind = "Service" + Service_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Service_Kind}.String() + Service_KindAPIVersion = Service_Kind + "." + CRDGroupVersion.String() + Service_GroupVersionKind = CRDGroupVersion.WithKind(Service_Kind) +) + +func init() { + SchemeBuilder.Register(&Service{}, &ServiceList{}) +} diff --git a/apis/ses/v1beta1/zz_generated.conversion_hubs.go b/apis/ses/v1beta1/zz_generated.conversion_hubs.go index 00363b9a26..bfa8dd48fd 100755 --- a/apis/ses/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/ses/v1beta1/zz_generated.conversion_hubs.go @@ -9,9 +9,6 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *ActiveReceiptRuleSet) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ConfigurationSet) Hub() {} - // Hub marks this type as a conversion hub. func (tr *DomainDKIM) Hub() {} @@ -24,9 +21,6 @@ func (tr *DomainMailFrom) Hub() {} // Hub marks this type as a conversion hub. func (tr *EmailIdentity) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *EventDestination) Hub() {} - // Hub marks this type as a conversion hub. func (tr *IdentityNotificationTopic) Hub() {} diff --git a/apis/ses/v1beta1/zz_generated.conversion_spokes.go b/apis/ses/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..7d17b3f268 --- /dev/null +++ b/apis/ses/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ConfigurationSet to the hub type. +func (tr *ConfigurationSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ConfigurationSet type. +func (tr *ConfigurationSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this EventDestination to the hub type. +func (tr *EventDestination) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the EventDestination type. +func (tr *EventDestination) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/ses/v1beta2/zz_configurationset_terraformed.go b/apis/ses/v1beta2/zz_configurationset_terraformed.go new file mode 100755 index 0000000000..d933f8d1eb --- /dev/null +++ b/apis/ses/v1beta2/zz_configurationset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ConfigurationSet +func (mg *ConfigurationSet) GetTerraformResourceType() string { + return "aws_ses_configuration_set" +} + +// GetConnectionDetailsMapping for this ConfigurationSet +func (tr *ConfigurationSet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ConfigurationSet +func (tr *ConfigurationSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ConfigurationSet +func (tr *ConfigurationSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ConfigurationSet +func (tr *ConfigurationSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ConfigurationSet +func (tr *ConfigurationSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ConfigurationSet +func (tr *ConfigurationSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ConfigurationSet +func (tr *ConfigurationSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ConfigurationSet +func (tr *ConfigurationSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ConfigurationSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ConfigurationSet) LateInitialize(attrs []byte) (bool, error) { + params := &ConfigurationSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ConfigurationSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ses/v1beta2/zz_configurationset_types.go b/apis/ses/v1beta2/zz_configurationset_types.go new file mode 100755 index 0000000000..d98cdde661 --- /dev/null +++ b/apis/ses/v1beta2/zz_configurationset_types.go @@ -0,0 +1,175 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConfigurationSetInitParameters struct { + + // Whether messages that use the configuration set are required to use TLS. See below. + DeliveryOptions *DeliveryOptionsInitParameters `json:"deliveryOptions,omitempty" tf:"delivery_options,omitempty"` + + // Whether or not Amazon SES publishes reputation metrics for the configuration set, such as bounce and complaint rates, to Amazon CloudWatch. The default value is false. + ReputationMetricsEnabled *bool `json:"reputationMetricsEnabled,omitempty" tf:"reputation_metrics_enabled,omitempty"` + + // Whether email sending is enabled or disabled for the configuration set. The default value is true. + SendingEnabled *bool `json:"sendingEnabled,omitempty" tf:"sending_enabled,omitempty"` + + // Domain that is used to redirect email recipients to an Amazon SES-operated domain. See below. NOTE: This functionality is best effort. + TrackingOptions *TrackingOptionsInitParameters `json:"trackingOptions,omitempty" tf:"tracking_options,omitempty"` +} + +type ConfigurationSetObservation struct { + + // SES configuration set ARN. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Whether messages that use the configuration set are required to use TLS. See below. + DeliveryOptions *DeliveryOptionsObservation `json:"deliveryOptions,omitempty" tf:"delivery_options,omitempty"` + + // SES configuration set name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Date and time at which the reputation metrics for the configuration set were last reset. Resetting these metrics is known as a fresh start. + LastFreshStart *string `json:"lastFreshStart,omitempty" tf:"last_fresh_start,omitempty"` + + // Whether or not Amazon SES publishes reputation metrics for the configuration set, such as bounce and complaint rates, to Amazon CloudWatch. The default value is false. + ReputationMetricsEnabled *bool `json:"reputationMetricsEnabled,omitempty" tf:"reputation_metrics_enabled,omitempty"` + + // Whether email sending is enabled or disabled for the configuration set. The default value is true. + SendingEnabled *bool `json:"sendingEnabled,omitempty" tf:"sending_enabled,omitempty"` + + // Domain that is used to redirect email recipients to an Amazon SES-operated domain. See below. NOTE: This functionality is best effort. + TrackingOptions *TrackingOptionsObservation `json:"trackingOptions,omitempty" tf:"tracking_options,omitempty"` +} + +type ConfigurationSetParameters struct { + + // Whether messages that use the configuration set are required to use TLS. See below. + // +kubebuilder:validation:Optional + DeliveryOptions *DeliveryOptionsParameters `json:"deliveryOptions,omitempty" tf:"delivery_options,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Whether or not Amazon SES publishes reputation metrics for the configuration set, such as bounce and complaint rates, to Amazon CloudWatch. The default value is false. + // +kubebuilder:validation:Optional + ReputationMetricsEnabled *bool `json:"reputationMetricsEnabled,omitempty" tf:"reputation_metrics_enabled,omitempty"` + + // Whether email sending is enabled or disabled for the configuration set. The default value is true. + // +kubebuilder:validation:Optional + SendingEnabled *bool `json:"sendingEnabled,omitempty" tf:"sending_enabled,omitempty"` + + // Domain that is used to redirect email recipients to an Amazon SES-operated domain. See below. NOTE: This functionality is best effort. + // +kubebuilder:validation:Optional + TrackingOptions *TrackingOptionsParameters `json:"trackingOptions,omitempty" tf:"tracking_options,omitempty"` +} + +type DeliveryOptionsInitParameters struct { + + // Whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is Require, messages are only delivered if a TLS connection can be established. If the value is Optional, messages can be delivered in plain text if a TLS connection can't be established. Valid values: Require or Optional. Defaults to Optional. + TLSPolicy *string `json:"tlsPolicy,omitempty" tf:"tls_policy,omitempty"` +} + +type DeliveryOptionsObservation struct { + + // Whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is Require, messages are only delivered if a TLS connection can be established. If the value is Optional, messages can be delivered in plain text if a TLS connection can't be established. Valid values: Require or Optional. Defaults to Optional. + TLSPolicy *string `json:"tlsPolicy,omitempty" tf:"tls_policy,omitempty"` +} + +type DeliveryOptionsParameters struct { + + // Whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is Require, messages are only delivered if a TLS connection can be established. If the value is Optional, messages can be delivered in plain text if a TLS connection can't be established. Valid values: Require or Optional. Defaults to Optional. + // +kubebuilder:validation:Optional + TLSPolicy *string `json:"tlsPolicy,omitempty" tf:"tls_policy,omitempty"` +} + +type TrackingOptionsInitParameters struct { + + // Custom subdomain that is used to redirect email recipients to the Amazon SES event tracking domain. + CustomRedirectDomain *string `json:"customRedirectDomain,omitempty" tf:"custom_redirect_domain,omitempty"` +} + +type TrackingOptionsObservation struct { + + // Custom subdomain that is used to redirect email recipients to the Amazon SES event tracking domain. + CustomRedirectDomain *string `json:"customRedirectDomain,omitempty" tf:"custom_redirect_domain,omitempty"` +} + +type TrackingOptionsParameters struct { + + // Custom subdomain that is used to redirect email recipients to the Amazon SES event tracking domain. + // +kubebuilder:validation:Optional + CustomRedirectDomain *string `json:"customRedirectDomain,omitempty" tf:"custom_redirect_domain,omitempty"` +} + +// ConfigurationSetSpec defines the desired state of ConfigurationSet +type ConfigurationSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ConfigurationSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ConfigurationSetInitParameters `json:"initProvider,omitempty"` +} + +// ConfigurationSetStatus defines the observed state of ConfigurationSet. +type ConfigurationSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ConfigurationSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ConfigurationSet is the Schema for the ConfigurationSets API. Provides an SES configuration set +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ConfigurationSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ConfigurationSetSpec `json:"spec"` + Status ConfigurationSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConfigurationSetList contains a list of ConfigurationSets +type ConfigurationSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ConfigurationSet `json:"items"` +} + +// Repository type metadata. +var ( + ConfigurationSet_Kind = "ConfigurationSet" + ConfigurationSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ConfigurationSet_Kind}.String() + ConfigurationSet_KindAPIVersion = ConfigurationSet_Kind + "." + CRDGroupVersion.String() + ConfigurationSet_GroupVersionKind = CRDGroupVersion.WithKind(ConfigurationSet_Kind) +) + +func init() { + SchemeBuilder.Register(&ConfigurationSet{}, &ConfigurationSetList{}) +} diff --git a/apis/ses/v1beta2/zz_eventdestination_terraformed.go b/apis/ses/v1beta2/zz_eventdestination_terraformed.go new file mode 100755 index 0000000000..0fae9fa35a --- /dev/null +++ b/apis/ses/v1beta2/zz_eventdestination_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this EventDestination +func (mg *EventDestination) GetTerraformResourceType() string { + return "aws_ses_event_destination" +} + +// GetConnectionDetailsMapping for this EventDestination +func (tr *EventDestination) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this EventDestination +func (tr *EventDestination) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this EventDestination +func (tr *EventDestination) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this EventDestination +func (tr *EventDestination) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this EventDestination +func (tr *EventDestination) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this EventDestination +func (tr *EventDestination) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this EventDestination +func (tr *EventDestination) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this EventDestination +func (tr *EventDestination) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this EventDestination using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *EventDestination) LateInitialize(attrs []byte) (bool, error) { + params := &EventDestinationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *EventDestination) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ses/v1beta2/zz_eventdestination_types.go b/apis/ses/v1beta2/zz_eventdestination_types.go new file mode 100755 index 0000000000..0441f056c8 --- /dev/null +++ b/apis/ses/v1beta2/zz_eventdestination_types.go @@ -0,0 +1,323 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CloudwatchDestinationInitParameters struct { + + // The default value for the event + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // The name for the dimension + DimensionName *string `json:"dimensionName,omitempty" tf:"dimension_name,omitempty"` + + // The source for the value. May be any of "messageTag", "emailHeader" or "linkTag". + ValueSource *string `json:"valueSource,omitempty" tf:"value_source,omitempty"` +} + +type CloudwatchDestinationObservation struct { + + // The default value for the event + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // The name for the dimension + DimensionName *string `json:"dimensionName,omitempty" tf:"dimension_name,omitempty"` + + // The source for the value. May be any of "messageTag", "emailHeader" or "linkTag". + ValueSource *string `json:"valueSource,omitempty" tf:"value_source,omitempty"` +} + +type CloudwatchDestinationParameters struct { + + // The default value for the event + // +kubebuilder:validation:Optional + DefaultValue *string `json:"defaultValue" tf:"default_value,omitempty"` + + // The name for the dimension + // +kubebuilder:validation:Optional + DimensionName *string `json:"dimensionName" tf:"dimension_name,omitempty"` + + // The source for the value. May be any of "messageTag", "emailHeader" or "linkTag". + // +kubebuilder:validation:Optional + ValueSource *string `json:"valueSource" tf:"value_source,omitempty"` +} + +type EventDestinationInitParameters struct { + + // CloudWatch destination for the events + CloudwatchDestination []CloudwatchDestinationInitParameters `json:"cloudwatchDestination,omitempty" tf:"cloudwatch_destination,omitempty"` + + // The name of the configuration set + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ses/v1beta2.ConfigurationSet + ConfigurationSetName *string `json:"configurationSetName,omitempty" tf:"configuration_set_name,omitempty"` + + // Reference to a ConfigurationSet in ses to populate configurationSetName. + // +kubebuilder:validation:Optional + ConfigurationSetNameRef *v1.Reference `json:"configurationSetNameRef,omitempty" tf:"-"` + + // Selector for a ConfigurationSet in ses to populate configurationSetName. + // +kubebuilder:validation:Optional + ConfigurationSetNameSelector *v1.Selector `json:"configurationSetNameSelector,omitempty" tf:"-"` + + // If true, the event destination will be enabled + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Send the events to a kinesis firehose destination + KinesisDestination *KinesisDestinationInitParameters `json:"kinesisDestination,omitempty" tf:"kinesis_destination,omitempty"` + + // A list of matching types. May be any of "send", "reject", "bounce", "complaint", "delivery", "open", "click", or "renderingFailure". + // +listType=set + MatchingTypes []*string `json:"matchingTypes,omitempty" tf:"matching_types,omitempty"` + + // Send the events to an SNS Topic destination + SnsDestination *SnsDestinationInitParameters `json:"snsDestination,omitempty" tf:"sns_destination,omitempty"` +} + +type EventDestinationObservation struct { + + // The SES event destination ARN. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // CloudWatch destination for the events + CloudwatchDestination []CloudwatchDestinationObservation `json:"cloudwatchDestination,omitempty" tf:"cloudwatch_destination,omitempty"` + + // The name of the configuration set + ConfigurationSetName *string `json:"configurationSetName,omitempty" tf:"configuration_set_name,omitempty"` + + // If true, the event destination will be enabled + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The SES event destination name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Send the events to a kinesis firehose destination + KinesisDestination *KinesisDestinationObservation `json:"kinesisDestination,omitempty" tf:"kinesis_destination,omitempty"` + + // A list of matching types. May be any of "send", "reject", "bounce", "complaint", "delivery", "open", "click", or "renderingFailure". + // +listType=set + MatchingTypes []*string `json:"matchingTypes,omitempty" tf:"matching_types,omitempty"` + + // Send the events to an SNS Topic destination + SnsDestination *SnsDestinationObservation `json:"snsDestination,omitempty" tf:"sns_destination,omitempty"` +} + +type EventDestinationParameters struct { + + // CloudWatch destination for the events + // +kubebuilder:validation:Optional + CloudwatchDestination []CloudwatchDestinationParameters `json:"cloudwatchDestination,omitempty" tf:"cloudwatch_destination,omitempty"` + + // The name of the configuration set + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ses/v1beta2.ConfigurationSet + // +kubebuilder:validation:Optional + ConfigurationSetName *string `json:"configurationSetName,omitempty" tf:"configuration_set_name,omitempty"` + + // Reference to a ConfigurationSet in ses to populate configurationSetName. + // +kubebuilder:validation:Optional + ConfigurationSetNameRef *v1.Reference `json:"configurationSetNameRef,omitempty" tf:"-"` + + // Selector for a ConfigurationSet in ses to populate configurationSetName. + // +kubebuilder:validation:Optional + ConfigurationSetNameSelector *v1.Selector `json:"configurationSetNameSelector,omitempty" tf:"-"` + + // If true, the event destination will be enabled + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Send the events to a kinesis firehose destination + // +kubebuilder:validation:Optional + KinesisDestination *KinesisDestinationParameters `json:"kinesisDestination,omitempty" tf:"kinesis_destination,omitempty"` + + // A list of matching types. May be any of "send", "reject", "bounce", "complaint", "delivery", "open", "click", or "renderingFailure". + // +kubebuilder:validation:Optional + // +listType=set + MatchingTypes []*string `json:"matchingTypes,omitempty" tf:"matching_types,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Send the events to an SNS Topic destination + // +kubebuilder:validation:Optional + SnsDestination *SnsDestinationParameters `json:"snsDestination,omitempty" tf:"sns_destination,omitempty"` +} + +type KinesisDestinationInitParameters struct { + + // The ARN of the role that has permissions to access the Kinesis Stream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The ARN of the Kinesis Stream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` + + // Reference to a DeliveryStream in firehose to populate streamArn. + // +kubebuilder:validation:Optional + StreamArnRef *v1.Reference `json:"streamArnRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate streamArn. + // +kubebuilder:validation:Optional + StreamArnSelector *v1.Selector `json:"streamArnSelector,omitempty" tf:"-"` +} + +type KinesisDestinationObservation struct { + + // The ARN of the role that has permissions to access the Kinesis Stream + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The ARN of the Kinesis Stream + StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` +} + +type KinesisDestinationParameters struct { + + // The ARN of the role that has permissions to access the Kinesis Stream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // The ARN of the Kinesis Stream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + // +kubebuilder:validation:Optional + StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"` + + // Reference to a DeliveryStream in firehose to populate streamArn. + // +kubebuilder:validation:Optional + StreamArnRef *v1.Reference `json:"streamArnRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate streamArn. + // +kubebuilder:validation:Optional + StreamArnSelector *v1.Selector `json:"streamArnSelector,omitempty" tf:"-"` +} + +type SnsDestinationInitParameters struct { + + // The ARN of the SNS topic + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + TopicArn *string `json:"topicArn,omitempty" tf:"topic_arn,omitempty"` + + // Reference to a Topic in sns to populate topicArn. + // +kubebuilder:validation:Optional + TopicArnRef *v1.Reference `json:"topicArnRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate topicArn. + // +kubebuilder:validation:Optional + TopicArnSelector *v1.Selector `json:"topicArnSelector,omitempty" tf:"-"` +} + +type SnsDestinationObservation struct { + + // The ARN of the SNS topic + TopicArn *string `json:"topicArn,omitempty" tf:"topic_arn,omitempty"` +} + +type SnsDestinationParameters struct { + + // The ARN of the SNS topic + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + TopicArn *string `json:"topicArn,omitempty" tf:"topic_arn,omitempty"` + + // Reference to a Topic in sns to populate topicArn. + // +kubebuilder:validation:Optional + TopicArnRef *v1.Reference `json:"topicArnRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate topicArn. + // +kubebuilder:validation:Optional + TopicArnSelector *v1.Selector `json:"topicArnSelector,omitempty" tf:"-"` +} + +// EventDestinationSpec defines the desired state of EventDestination +type EventDestinationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider EventDestinationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider EventDestinationInitParameters `json:"initProvider,omitempty"` +} + +// EventDestinationStatus defines the observed state of EventDestination. +type EventDestinationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider EventDestinationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// EventDestination is the Schema for the EventDestinations API. Provides an SES event destination +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type EventDestination struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.matchingTypes) || (has(self.initProvider) && has(self.initProvider.matchingTypes))",message="spec.forProvider.matchingTypes is a required parameter" + Spec EventDestinationSpec `json:"spec"` + Status EventDestinationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EventDestinationList contains a list of EventDestinations +type EventDestinationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []EventDestination `json:"items"` +} + +// Repository type metadata. +var ( + EventDestination_Kind = "EventDestination" + EventDestination_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: EventDestination_Kind}.String() + EventDestination_KindAPIVersion = EventDestination_Kind + "." + CRDGroupVersion.String() + EventDestination_GroupVersionKind = CRDGroupVersion.WithKind(EventDestination_Kind) +) + +func init() { + SchemeBuilder.Register(&EventDestination{}, &EventDestinationList{}) +} diff --git a/apis/ses/v1beta2/zz_generated.conversion_hubs.go b/apis/ses/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..a7521587e2 --- /dev/null +++ b/apis/ses/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ConfigurationSet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *EventDestination) Hub() {} diff --git a/apis/ses/v1beta2/zz_generated.deepcopy.go b/apis/ses/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..10a4ab967a --- /dev/null +++ b/apis/ses/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,926 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchDestinationInitParameters) DeepCopyInto(out *CloudwatchDestinationInitParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.DimensionName != nil { + in, out := &in.DimensionName, &out.DimensionName + *out = new(string) + **out = **in + } + if in.ValueSource != nil { + in, out := &in.ValueSource, &out.ValueSource + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchDestinationInitParameters. +func (in *CloudwatchDestinationInitParameters) DeepCopy() *CloudwatchDestinationInitParameters { + if in == nil { + return nil + } + out := new(CloudwatchDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchDestinationObservation) DeepCopyInto(out *CloudwatchDestinationObservation) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.DimensionName != nil { + in, out := &in.DimensionName, &out.DimensionName + *out = new(string) + **out = **in + } + if in.ValueSource != nil { + in, out := &in.ValueSource, &out.ValueSource + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchDestinationObservation. +func (in *CloudwatchDestinationObservation) DeepCopy() *CloudwatchDestinationObservation { + if in == nil { + return nil + } + out := new(CloudwatchDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchDestinationParameters) DeepCopyInto(out *CloudwatchDestinationParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.DimensionName != nil { + in, out := &in.DimensionName, &out.DimensionName + *out = new(string) + **out = **in + } + if in.ValueSource != nil { + in, out := &in.ValueSource, &out.ValueSource + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchDestinationParameters. +func (in *CloudwatchDestinationParameters) DeepCopy() *CloudwatchDestinationParameters { + if in == nil { + return nil + } + out := new(CloudwatchDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSet) DeepCopyInto(out *ConfigurationSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSet. +func (in *ConfigurationSet) DeepCopy() *ConfigurationSet { + if in == nil { + return nil + } + out := new(ConfigurationSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigurationSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetInitParameters) DeepCopyInto(out *ConfigurationSetInitParameters) { + *out = *in + if in.DeliveryOptions != nil { + in, out := &in.DeliveryOptions, &out.DeliveryOptions + *out = new(DeliveryOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ReputationMetricsEnabled != nil { + in, out := &in.ReputationMetricsEnabled, &out.ReputationMetricsEnabled + *out = new(bool) + **out = **in + } + if in.SendingEnabled != nil { + in, out := &in.SendingEnabled, &out.SendingEnabled + *out = new(bool) + **out = **in + } + if in.TrackingOptions != nil { + in, out := &in.TrackingOptions, &out.TrackingOptions + *out = new(TrackingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetInitParameters. +func (in *ConfigurationSetInitParameters) DeepCopy() *ConfigurationSetInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetList) DeepCopyInto(out *ConfigurationSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigurationSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetList. +func (in *ConfigurationSetList) DeepCopy() *ConfigurationSetList { + if in == nil { + return nil + } + out := new(ConfigurationSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigurationSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetObservation) DeepCopyInto(out *ConfigurationSetObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DeliveryOptions != nil { + in, out := &in.DeliveryOptions, &out.DeliveryOptions + *out = new(DeliveryOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LastFreshStart != nil { + in, out := &in.LastFreshStart, &out.LastFreshStart + *out = new(string) + **out = **in + } + if in.ReputationMetricsEnabled != nil { + in, out := &in.ReputationMetricsEnabled, &out.ReputationMetricsEnabled + *out = new(bool) + **out = **in + } + if in.SendingEnabled != nil { + in, out := &in.SendingEnabled, &out.SendingEnabled + *out = new(bool) + **out = **in + } + if in.TrackingOptions != nil { + in, out := &in.TrackingOptions, &out.TrackingOptions + *out = new(TrackingOptionsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetObservation. +func (in *ConfigurationSetObservation) DeepCopy() *ConfigurationSetObservation { + if in == nil { + return nil + } + out := new(ConfigurationSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetParameters) DeepCopyInto(out *ConfigurationSetParameters) { + *out = *in + if in.DeliveryOptions != nil { + in, out := &in.DeliveryOptions, &out.DeliveryOptions + *out = new(DeliveryOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ReputationMetricsEnabled != nil { + in, out := &in.ReputationMetricsEnabled, &out.ReputationMetricsEnabled + *out = new(bool) + **out = **in + } + if in.SendingEnabled != nil { + in, out := &in.SendingEnabled, &out.SendingEnabled + *out = new(bool) + **out = **in + } + if in.TrackingOptions != nil { + in, out := &in.TrackingOptions, &out.TrackingOptions + *out = new(TrackingOptionsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetParameters. +func (in *ConfigurationSetParameters) DeepCopy() *ConfigurationSetParameters { + if in == nil { + return nil + } + out := new(ConfigurationSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetSpec) DeepCopyInto(out *ConfigurationSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetSpec. +func (in *ConfigurationSetSpec) DeepCopy() *ConfigurationSetSpec { + if in == nil { + return nil + } + out := new(ConfigurationSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetStatus) DeepCopyInto(out *ConfigurationSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetStatus. +func (in *ConfigurationSetStatus) DeepCopy() *ConfigurationSetStatus { + if in == nil { + return nil + } + out := new(ConfigurationSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryOptionsInitParameters) DeepCopyInto(out *DeliveryOptionsInitParameters) { + *out = *in + if in.TLSPolicy != nil { + in, out := &in.TLSPolicy, &out.TLSPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryOptionsInitParameters. +func (in *DeliveryOptionsInitParameters) DeepCopy() *DeliveryOptionsInitParameters { + if in == nil { + return nil + } + out := new(DeliveryOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryOptionsObservation) DeepCopyInto(out *DeliveryOptionsObservation) { + *out = *in + if in.TLSPolicy != nil { + in, out := &in.TLSPolicy, &out.TLSPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryOptionsObservation. +func (in *DeliveryOptionsObservation) DeepCopy() *DeliveryOptionsObservation { + if in == nil { + return nil + } + out := new(DeliveryOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryOptionsParameters) DeepCopyInto(out *DeliveryOptionsParameters) { + *out = *in + if in.TLSPolicy != nil { + in, out := &in.TLSPolicy, &out.TLSPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryOptionsParameters. +func (in *DeliveryOptionsParameters) DeepCopy() *DeliveryOptionsParameters { + if in == nil { + return nil + } + out := new(DeliveryOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventDestination) DeepCopyInto(out *EventDestination) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventDestination. +func (in *EventDestination) DeepCopy() *EventDestination { + if in == nil { + return nil + } + out := new(EventDestination) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventDestination) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventDestinationInitParameters) DeepCopyInto(out *EventDestinationInitParameters) { + *out = *in + if in.CloudwatchDestination != nil { + in, out := &in.CloudwatchDestination, &out.CloudwatchDestination + *out = make([]CloudwatchDestinationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConfigurationSetName != nil { + in, out := &in.ConfigurationSetName, &out.ConfigurationSetName + *out = new(string) + **out = **in + } + if in.ConfigurationSetNameRef != nil { + in, out := &in.ConfigurationSetNameRef, &out.ConfigurationSetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConfigurationSetNameSelector != nil { + in, out := &in.ConfigurationSetNameSelector, &out.ConfigurationSetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KinesisDestination != nil { + in, out := &in.KinesisDestination, &out.KinesisDestination + *out = new(KinesisDestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MatchingTypes != nil { + in, out := &in.MatchingTypes, &out.MatchingTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SnsDestination != nil { + in, out := &in.SnsDestination, &out.SnsDestination + *out = new(SnsDestinationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventDestinationInitParameters. +func (in *EventDestinationInitParameters) DeepCopy() *EventDestinationInitParameters { + if in == nil { + return nil + } + out := new(EventDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventDestinationList) DeepCopyInto(out *EventDestinationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EventDestination, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventDestinationList. +func (in *EventDestinationList) DeepCopy() *EventDestinationList { + if in == nil { + return nil + } + out := new(EventDestinationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventDestinationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventDestinationObservation) DeepCopyInto(out *EventDestinationObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CloudwatchDestination != nil { + in, out := &in.CloudwatchDestination, &out.CloudwatchDestination + *out = make([]CloudwatchDestinationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConfigurationSetName != nil { + in, out := &in.ConfigurationSetName, &out.ConfigurationSetName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KinesisDestination != nil { + in, out := &in.KinesisDestination, &out.KinesisDestination + *out = new(KinesisDestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.MatchingTypes != nil { + in, out := &in.MatchingTypes, &out.MatchingTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SnsDestination != nil { + in, out := &in.SnsDestination, &out.SnsDestination + *out = new(SnsDestinationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventDestinationObservation. +func (in *EventDestinationObservation) DeepCopy() *EventDestinationObservation { + if in == nil { + return nil + } + out := new(EventDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventDestinationParameters) DeepCopyInto(out *EventDestinationParameters) { + *out = *in + if in.CloudwatchDestination != nil { + in, out := &in.CloudwatchDestination, &out.CloudwatchDestination + *out = make([]CloudwatchDestinationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConfigurationSetName != nil { + in, out := &in.ConfigurationSetName, &out.ConfigurationSetName + *out = new(string) + **out = **in + } + if in.ConfigurationSetNameRef != nil { + in, out := &in.ConfigurationSetNameRef, &out.ConfigurationSetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConfigurationSetNameSelector != nil { + in, out := &in.ConfigurationSetNameSelector, &out.ConfigurationSetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KinesisDestination != nil { + in, out := &in.KinesisDestination, &out.KinesisDestination + *out = new(KinesisDestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.MatchingTypes != nil { + in, out := &in.MatchingTypes, &out.MatchingTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SnsDestination != nil { + in, out := &in.SnsDestination, &out.SnsDestination + *out = new(SnsDestinationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventDestinationParameters. +func (in *EventDestinationParameters) DeepCopy() *EventDestinationParameters { + if in == nil { + return nil + } + out := new(EventDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventDestinationSpec) DeepCopyInto(out *EventDestinationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventDestinationSpec. +func (in *EventDestinationSpec) DeepCopy() *EventDestinationSpec { + if in == nil { + return nil + } + out := new(EventDestinationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventDestinationStatus) DeepCopyInto(out *EventDestinationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventDestinationStatus. +func (in *EventDestinationStatus) DeepCopy() *EventDestinationStatus { + if in == nil { + return nil + } + out := new(EventDestinationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisDestinationInitParameters) DeepCopyInto(out *KinesisDestinationInitParameters) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StreamArn != nil { + in, out := &in.StreamArn, &out.StreamArn + *out = new(string) + **out = **in + } + if in.StreamArnRef != nil { + in, out := &in.StreamArnRef, &out.StreamArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamArnSelector != nil { + in, out := &in.StreamArnSelector, &out.StreamArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisDestinationInitParameters. +func (in *KinesisDestinationInitParameters) DeepCopy() *KinesisDestinationInitParameters { + if in == nil { + return nil + } + out := new(KinesisDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisDestinationObservation) DeepCopyInto(out *KinesisDestinationObservation) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StreamArn != nil { + in, out := &in.StreamArn, &out.StreamArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisDestinationObservation. +func (in *KinesisDestinationObservation) DeepCopy() *KinesisDestinationObservation { + if in == nil { + return nil + } + out := new(KinesisDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisDestinationParameters) DeepCopyInto(out *KinesisDestinationParameters) { + *out = *in + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StreamArn != nil { + in, out := &in.StreamArn, &out.StreamArn + *out = new(string) + **out = **in + } + if in.StreamArnRef != nil { + in, out := &in.StreamArnRef, &out.StreamArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamArnSelector != nil { + in, out := &in.StreamArnSelector, &out.StreamArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisDestinationParameters. +func (in *KinesisDestinationParameters) DeepCopy() *KinesisDestinationParameters { + if in == nil { + return nil + } + out := new(KinesisDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnsDestinationInitParameters) DeepCopyInto(out *SnsDestinationInitParameters) { + *out = *in + if in.TopicArn != nil { + in, out := &in.TopicArn, &out.TopicArn + *out = new(string) + **out = **in + } + if in.TopicArnRef != nil { + in, out := &in.TopicArnRef, &out.TopicArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TopicArnSelector != nil { + in, out := &in.TopicArnSelector, &out.TopicArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnsDestinationInitParameters. +func (in *SnsDestinationInitParameters) DeepCopy() *SnsDestinationInitParameters { + if in == nil { + return nil + } + out := new(SnsDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnsDestinationObservation) DeepCopyInto(out *SnsDestinationObservation) { + *out = *in + if in.TopicArn != nil { + in, out := &in.TopicArn, &out.TopicArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnsDestinationObservation. +func (in *SnsDestinationObservation) DeepCopy() *SnsDestinationObservation { + if in == nil { + return nil + } + out := new(SnsDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnsDestinationParameters) DeepCopyInto(out *SnsDestinationParameters) { + *out = *in + if in.TopicArn != nil { + in, out := &in.TopicArn, &out.TopicArn + *out = new(string) + **out = **in + } + if in.TopicArnRef != nil { + in, out := &in.TopicArnRef, &out.TopicArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TopicArnSelector != nil { + in, out := &in.TopicArnSelector, &out.TopicArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnsDestinationParameters. +func (in *SnsDestinationParameters) DeepCopy() *SnsDestinationParameters { + if in == nil { + return nil + } + out := new(SnsDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackingOptionsInitParameters) DeepCopyInto(out *TrackingOptionsInitParameters) { + *out = *in + if in.CustomRedirectDomain != nil { + in, out := &in.CustomRedirectDomain, &out.CustomRedirectDomain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackingOptionsInitParameters. +func (in *TrackingOptionsInitParameters) DeepCopy() *TrackingOptionsInitParameters { + if in == nil { + return nil + } + out := new(TrackingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackingOptionsObservation) DeepCopyInto(out *TrackingOptionsObservation) { + *out = *in + if in.CustomRedirectDomain != nil { + in, out := &in.CustomRedirectDomain, &out.CustomRedirectDomain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackingOptionsObservation. +func (in *TrackingOptionsObservation) DeepCopy() *TrackingOptionsObservation { + if in == nil { + return nil + } + out := new(TrackingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackingOptionsParameters) DeepCopyInto(out *TrackingOptionsParameters) { + *out = *in + if in.CustomRedirectDomain != nil { + in, out := &in.CustomRedirectDomain, &out.CustomRedirectDomain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackingOptionsParameters. +func (in *TrackingOptionsParameters) DeepCopy() *TrackingOptionsParameters { + if in == nil { + return nil + } + out := new(TrackingOptionsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/ses/v1beta2/zz_generated.managed.go b/apis/ses/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..38d2322d79 --- /dev/null +++ b/apis/ses/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ConfigurationSet. +func (mg *ConfigurationSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ConfigurationSet. +func (mg *ConfigurationSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ConfigurationSet. +func (mg *ConfigurationSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ConfigurationSet. +func (mg *ConfigurationSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ConfigurationSet. +func (mg *ConfigurationSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ConfigurationSet. +func (mg *ConfigurationSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ConfigurationSet. +func (mg *ConfigurationSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ConfigurationSet. +func (mg *ConfigurationSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ConfigurationSet. +func (mg *ConfigurationSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ConfigurationSet. +func (mg *ConfigurationSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ConfigurationSet. +func (mg *ConfigurationSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ConfigurationSet. +func (mg *ConfigurationSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this EventDestination. +func (mg *EventDestination) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this EventDestination. +func (mg *EventDestination) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this EventDestination. +func (mg *EventDestination) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this EventDestination. +func (mg *EventDestination) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this EventDestination. +func (mg *EventDestination) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this EventDestination. +func (mg *EventDestination) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this EventDestination. +func (mg *EventDestination) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this EventDestination. +func (mg *EventDestination) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this EventDestination. +func (mg *EventDestination) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this EventDestination. +func (mg *EventDestination) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this EventDestination. +func (mg *EventDestination) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this EventDestination. +func (mg *EventDestination) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/ses/v1beta2/zz_generated.managedlist.go b/apis/ses/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..e0d2cf2b19 --- /dev/null +++ b/apis/ses/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ConfigurationSetList. +func (l *ConfigurationSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this EventDestinationList. +func (l *EventDestinationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/ses/v1beta2/zz_generated.resolvers.go b/apis/ses/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..1693e7e167 --- /dev/null +++ b/apis/ses/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,195 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *EventDestination) ResolveReferences( // ResolveReferences of this EventDestination. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ses.aws.upbound.io", "v1beta2", "ConfigurationSet", "ConfigurationSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ConfigurationSetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ConfigurationSetNameRef, + Selector: mg.Spec.ForProvider.ConfigurationSetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ConfigurationSetName") + } + mg.Spec.ForProvider.ConfigurationSetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ConfigurationSetNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.KinesisDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KinesisDestination.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.KinesisDestination.RoleArnRef, + Selector: mg.Spec.ForProvider.KinesisDestination.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KinesisDestination.RoleArn") + } + mg.Spec.ForProvider.KinesisDestination.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KinesisDestination.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.KinesisDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KinesisDestination.StreamArn), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.ForProvider.KinesisDestination.StreamArnRef, + Selector: mg.Spec.ForProvider.KinesisDestination.StreamArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KinesisDestination.StreamArn") + } + mg.Spec.ForProvider.KinesisDestination.StreamArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KinesisDestination.StreamArnRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.SnsDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SnsDestination.TopicArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.SnsDestination.TopicArnRef, + Selector: mg.Spec.ForProvider.SnsDestination.TopicArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SnsDestination.TopicArn") + } + mg.Spec.ForProvider.SnsDestination.TopicArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SnsDestination.TopicArnRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("ses.aws.upbound.io", "v1beta2", "ConfigurationSet", "ConfigurationSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ConfigurationSetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ConfigurationSetNameRef, + Selector: mg.Spec.InitProvider.ConfigurationSetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ConfigurationSetName") + } + mg.Spec.InitProvider.ConfigurationSetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ConfigurationSetNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.KinesisDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KinesisDestination.RoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.KinesisDestination.RoleArnRef, + Selector: mg.Spec.InitProvider.KinesisDestination.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KinesisDestination.RoleArn") + } + mg.Spec.InitProvider.KinesisDestination.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KinesisDestination.RoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.KinesisDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KinesisDestination.StreamArn), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.InitProvider.KinesisDestination.StreamArnRef, + Selector: mg.Spec.InitProvider.KinesisDestination.StreamArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KinesisDestination.StreamArn") + } + mg.Spec.InitProvider.KinesisDestination.StreamArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KinesisDestination.StreamArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.SnsDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SnsDestination.TopicArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.SnsDestination.TopicArnRef, + Selector: mg.Spec.InitProvider.SnsDestination.TopicArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SnsDestination.TopicArn") + } + mg.Spec.InitProvider.SnsDestination.TopicArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SnsDestination.TopicArnRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/ses/v1beta2/zz_groupversion_info.go b/apis/ses/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..fd86a061c2 --- /dev/null +++ b/apis/ses/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=ses.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "ses.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/sesv2/v1beta1/zz_generated.conversion_hubs.go b/apis/sesv2/v1beta1/zz_generated.conversion_hubs.go index fac2273d36..7e3e8c18f2 100755 --- a/apis/sesv2/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/sesv2/v1beta1/zz_generated.conversion_hubs.go @@ -6,18 +6,9 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *ConfigurationSet) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ConfigurationSetEventDestination) Hub() {} - // Hub marks this type as a conversion hub. func (tr *DedicatedIPPool) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *EmailIdentity) Hub() {} - // Hub marks this type as a conversion hub. func (tr *EmailIdentityFeedbackAttributes) Hub() {} diff --git a/apis/sesv2/v1beta1/zz_generated.conversion_spokes.go b/apis/sesv2/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..f1d3b7ba48 --- /dev/null +++ b/apis/sesv2/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ConfigurationSet to the hub type. +func (tr *ConfigurationSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ConfigurationSet type. +func (tr *ConfigurationSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ConfigurationSetEventDestination to the hub type. +func (tr *ConfigurationSetEventDestination) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ConfigurationSetEventDestination type. +func (tr *ConfigurationSetEventDestination) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this EmailIdentity to the hub type. +func (tr *EmailIdentity) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the EmailIdentity type. +func (tr *EmailIdentity) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/sesv2/v1beta2/zz_configurationset_terraformed.go b/apis/sesv2/v1beta2/zz_configurationset_terraformed.go new file mode 100755 index 0000000000..71b763b77f --- /dev/null +++ b/apis/sesv2/v1beta2/zz_configurationset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ConfigurationSet +func (mg *ConfigurationSet) GetTerraformResourceType() string { + return "aws_sesv2_configuration_set" +} + +// GetConnectionDetailsMapping for this ConfigurationSet +func (tr *ConfigurationSet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ConfigurationSet +func (tr *ConfigurationSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ConfigurationSet +func (tr *ConfigurationSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ConfigurationSet +func (tr *ConfigurationSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ConfigurationSet +func (tr *ConfigurationSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ConfigurationSet +func (tr *ConfigurationSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ConfigurationSet +func (tr *ConfigurationSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ConfigurationSet +func (tr *ConfigurationSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ConfigurationSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ConfigurationSet) LateInitialize(attrs []byte) (bool, error) { + params := &ConfigurationSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ConfigurationSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sesv2/v1beta2/zz_configurationset_types.go b/apis/sesv2/v1beta2/zz_configurationset_types.go new file mode 100755 index 0000000000..b599de832f --- /dev/null +++ b/apis/sesv2/v1beta2/zz_configurationset_types.go @@ -0,0 +1,344 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConfigurationSetInitParameters struct { + + // An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set. + DeliveryOptions *DeliveryOptionsInitParameters `json:"deliveryOptions,omitempty" tf:"delivery_options,omitempty"` + + // An object that defines whether or not Amazon SES collects reputation metrics for the emails that you send that use the configuration set. + ReputationOptions *ReputationOptionsInitParameters `json:"reputationOptions,omitempty" tf:"reputation_options,omitempty"` + + // An object that defines whether or not Amazon SES can send email that you send using the configuration set. + SendingOptions *SendingOptionsInitParameters `json:"sendingOptions,omitempty" tf:"sending_options,omitempty"` + + // An object that contains information about the suppression list preferences for your account. + SuppressionOptions *SuppressionOptionsInitParameters `json:"suppressionOptions,omitempty" tf:"suppression_options,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // An object that defines the open and click tracking options for emails that you send using the configuration set. + TrackingOptions *TrackingOptionsInitParameters `json:"trackingOptions,omitempty" tf:"tracking_options,omitempty"` + + // An object that defines the VDM settings that apply to emails that you send using the configuration set. + VdmOptions *VdmOptionsInitParameters `json:"vdmOptions,omitempty" tf:"vdm_options,omitempty"` +} + +type ConfigurationSetObservation struct { + + // ARN of the Configuration Set. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set. + DeliveryOptions *DeliveryOptionsObservation `json:"deliveryOptions,omitempty" tf:"delivery_options,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An object that defines whether or not Amazon SES collects reputation metrics for the emails that you send that use the configuration set. + ReputationOptions *ReputationOptionsObservation `json:"reputationOptions,omitempty" tf:"reputation_options,omitempty"` + + // An object that defines whether or not Amazon SES can send email that you send using the configuration set. + SendingOptions *SendingOptionsObservation `json:"sendingOptions,omitempty" tf:"sending_options,omitempty"` + + // An object that contains information about the suppression list preferences for your account. + SuppressionOptions *SuppressionOptionsObservation `json:"suppressionOptions,omitempty" tf:"suppression_options,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // An object that defines the open and click tracking options for emails that you send using the configuration set. + TrackingOptions *TrackingOptionsObservation `json:"trackingOptions,omitempty" tf:"tracking_options,omitempty"` + + // An object that defines the VDM settings that apply to emails that you send using the configuration set. + VdmOptions *VdmOptionsObservation `json:"vdmOptions,omitempty" tf:"vdm_options,omitempty"` +} + +type ConfigurationSetParameters struct { + + // An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set. + // +kubebuilder:validation:Optional + DeliveryOptions *DeliveryOptionsParameters `json:"deliveryOptions,omitempty" tf:"delivery_options,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // An object that defines whether or not Amazon SES collects reputation metrics for the emails that you send that use the configuration set. + // +kubebuilder:validation:Optional + ReputationOptions *ReputationOptionsParameters `json:"reputationOptions,omitempty" tf:"reputation_options,omitempty"` + + // An object that defines whether or not Amazon SES can send email that you send using the configuration set. + // +kubebuilder:validation:Optional + SendingOptions *SendingOptionsParameters `json:"sendingOptions,omitempty" tf:"sending_options,omitempty"` + + // An object that contains information about the suppression list preferences for your account. + // +kubebuilder:validation:Optional + SuppressionOptions *SuppressionOptionsParameters `json:"suppressionOptions,omitempty" tf:"suppression_options,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // An object that defines the open and click tracking options for emails that you send using the configuration set. + // +kubebuilder:validation:Optional + TrackingOptions *TrackingOptionsParameters `json:"trackingOptions,omitempty" tf:"tracking_options,omitempty"` + + // An object that defines the VDM settings that apply to emails that you send using the configuration set. + // +kubebuilder:validation:Optional + VdmOptions *VdmOptionsParameters `json:"vdmOptions,omitempty" tf:"vdm_options,omitempty"` +} + +type DashboardOptionsInitParameters struct { + + // Specifies the status of your VDM engagement metrics collection. Valid values: ENABLED, DISABLED. + EngagementMetrics *string `json:"engagementMetrics,omitempty" tf:"engagement_metrics,omitempty"` +} + +type DashboardOptionsObservation struct { + + // Specifies the status of your VDM engagement metrics collection. Valid values: ENABLED, DISABLED. + EngagementMetrics *string `json:"engagementMetrics,omitempty" tf:"engagement_metrics,omitempty"` +} + +type DashboardOptionsParameters struct { + + // Specifies the status of your VDM engagement metrics collection. Valid values: ENABLED, DISABLED. + // +kubebuilder:validation:Optional + EngagementMetrics *string `json:"engagementMetrics,omitempty" tf:"engagement_metrics,omitempty"` +} + +type DeliveryOptionsInitParameters struct { + + // The name of the dedicated IP pool to associate with the configuration set. + SendingPoolName *string `json:"sendingPoolName,omitempty" tf:"sending_pool_name,omitempty"` + + // Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). Valid values: REQUIRE, OPTIONAL. + TLSPolicy *string `json:"tlsPolicy,omitempty" tf:"tls_policy,omitempty"` +} + +type DeliveryOptionsObservation struct { + + // The name of the dedicated IP pool to associate with the configuration set. + SendingPoolName *string `json:"sendingPoolName,omitempty" tf:"sending_pool_name,omitempty"` + + // Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). Valid values: REQUIRE, OPTIONAL. + TLSPolicy *string `json:"tlsPolicy,omitempty" tf:"tls_policy,omitempty"` +} + +type DeliveryOptionsParameters struct { + + // The name of the dedicated IP pool to associate with the configuration set. + // +kubebuilder:validation:Optional + SendingPoolName *string `json:"sendingPoolName,omitempty" tf:"sending_pool_name,omitempty"` + + // Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). Valid values: REQUIRE, OPTIONAL. + // +kubebuilder:validation:Optional + TLSPolicy *string `json:"tlsPolicy,omitempty" tf:"tls_policy,omitempty"` +} + +type GuardianOptionsInitParameters struct { + + // Specifies the status of your VDM optimized shared delivery. Valid values: ENABLED, DISABLED. + OptimizedSharedDelivery *string `json:"optimizedSharedDelivery,omitempty" tf:"optimized_shared_delivery,omitempty"` +} + +type GuardianOptionsObservation struct { + + // Specifies the status of your VDM optimized shared delivery. Valid values: ENABLED, DISABLED. + OptimizedSharedDelivery *string `json:"optimizedSharedDelivery,omitempty" tf:"optimized_shared_delivery,omitempty"` +} + +type GuardianOptionsParameters struct { + + // Specifies the status of your VDM optimized shared delivery. Valid values: ENABLED, DISABLED. + // +kubebuilder:validation:Optional + OptimizedSharedDelivery *string `json:"optimizedSharedDelivery,omitempty" tf:"optimized_shared_delivery,omitempty"` +} + +type ReputationOptionsInitParameters struct { + + // If true, tracking of reputation metrics is enabled for the configuration set. If false, tracking of reputation metrics is disabled for the configuration set. + ReputationMetricsEnabled *bool `json:"reputationMetricsEnabled,omitempty" tf:"reputation_metrics_enabled,omitempty"` +} + +type ReputationOptionsObservation struct { + + // The date and time (in Unix time) when the reputation metrics were last given a fresh start. When your account is given a fresh start, your reputation metrics are calculated starting from the date of the fresh start. + LastFreshStart *string `json:"lastFreshStart,omitempty" tf:"last_fresh_start,omitempty"` + + // If true, tracking of reputation metrics is enabled for the configuration set. If false, tracking of reputation metrics is disabled for the configuration set. + ReputationMetricsEnabled *bool `json:"reputationMetricsEnabled,omitempty" tf:"reputation_metrics_enabled,omitempty"` +} + +type ReputationOptionsParameters struct { + + // If true, tracking of reputation metrics is enabled for the configuration set. If false, tracking of reputation metrics is disabled for the configuration set. + // +kubebuilder:validation:Optional + ReputationMetricsEnabled *bool `json:"reputationMetricsEnabled,omitempty" tf:"reputation_metrics_enabled,omitempty"` +} + +type SendingOptionsInitParameters struct { + + // If true, email sending is enabled for the configuration set. If false, email sending is disabled for the configuration set. + SendingEnabled *bool `json:"sendingEnabled,omitempty" tf:"sending_enabled,omitempty"` +} + +type SendingOptionsObservation struct { + + // If true, email sending is enabled for the configuration set. If false, email sending is disabled for the configuration set. + SendingEnabled *bool `json:"sendingEnabled,omitempty" tf:"sending_enabled,omitempty"` +} + +type SendingOptionsParameters struct { + + // If true, email sending is enabled for the configuration set. If false, email sending is disabled for the configuration set. + // +kubebuilder:validation:Optional + SendingEnabled *bool `json:"sendingEnabled,omitempty" tf:"sending_enabled,omitempty"` +} + +type SuppressionOptionsInitParameters struct { + + // A list that contains the reasons that email addresses are automatically added to the suppression list for your account. Valid values: BOUNCE, COMPLAINT. + SuppressedReasons []*string `json:"suppressedReasons,omitempty" tf:"suppressed_reasons,omitempty"` +} + +type SuppressionOptionsObservation struct { + + // A list that contains the reasons that email addresses are automatically added to the suppression list for your account. Valid values: BOUNCE, COMPLAINT. + SuppressedReasons []*string `json:"suppressedReasons,omitempty" tf:"suppressed_reasons,omitempty"` +} + +type SuppressionOptionsParameters struct { + + // A list that contains the reasons that email addresses are automatically added to the suppression list for your account. Valid values: BOUNCE, COMPLAINT. + // +kubebuilder:validation:Optional + SuppressedReasons []*string `json:"suppressedReasons,omitempty" tf:"suppressed_reasons,omitempty"` +} + +type TrackingOptionsInitParameters struct { + + // The domain to use for tracking open and click events. + CustomRedirectDomain *string `json:"customRedirectDomain,omitempty" tf:"custom_redirect_domain,omitempty"` +} + +type TrackingOptionsObservation struct { + + // The domain to use for tracking open and click events. + CustomRedirectDomain *string `json:"customRedirectDomain,omitempty" tf:"custom_redirect_domain,omitempty"` +} + +type TrackingOptionsParameters struct { + + // The domain to use for tracking open and click events. + // +kubebuilder:validation:Optional + CustomRedirectDomain *string `json:"customRedirectDomain" tf:"custom_redirect_domain,omitempty"` +} + +type VdmOptionsInitParameters struct { + + // Specifies additional settings for your VDM configuration as applicable to the Dashboard. + DashboardOptions *DashboardOptionsInitParameters `json:"dashboardOptions,omitempty" tf:"dashboard_options,omitempty"` + + // Specifies additional settings for your VDM configuration as applicable to the Guardian. + GuardianOptions *GuardianOptionsInitParameters `json:"guardianOptions,omitempty" tf:"guardian_options,omitempty"` +} + +type VdmOptionsObservation struct { + + // Specifies additional settings for your VDM configuration as applicable to the Dashboard. + DashboardOptions *DashboardOptionsObservation `json:"dashboardOptions,omitempty" tf:"dashboard_options,omitempty"` + + // Specifies additional settings for your VDM configuration as applicable to the Guardian. + GuardianOptions *GuardianOptionsObservation `json:"guardianOptions,omitempty" tf:"guardian_options,omitempty"` +} + +type VdmOptionsParameters struct { + + // Specifies additional settings for your VDM configuration as applicable to the Dashboard. + // +kubebuilder:validation:Optional + DashboardOptions *DashboardOptionsParameters `json:"dashboardOptions,omitempty" tf:"dashboard_options,omitempty"` + + // Specifies additional settings for your VDM configuration as applicable to the Guardian. + // +kubebuilder:validation:Optional + GuardianOptions *GuardianOptionsParameters `json:"guardianOptions,omitempty" tf:"guardian_options,omitempty"` +} + +// ConfigurationSetSpec defines the desired state of ConfigurationSet +type ConfigurationSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ConfigurationSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ConfigurationSetInitParameters `json:"initProvider,omitempty"` +} + +// ConfigurationSetStatus defines the observed state of ConfigurationSet. +type ConfigurationSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ConfigurationSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ConfigurationSet is the Schema for the ConfigurationSets API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ConfigurationSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ConfigurationSetSpec `json:"spec"` + Status ConfigurationSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConfigurationSetList contains a list of ConfigurationSets +type ConfigurationSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ConfigurationSet `json:"items"` +} + +// Repository type metadata. +var ( + ConfigurationSet_Kind = "ConfigurationSet" + ConfigurationSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ConfigurationSet_Kind}.String() + ConfigurationSet_KindAPIVersion = ConfigurationSet_Kind + "." + CRDGroupVersion.String() + ConfigurationSet_GroupVersionKind = CRDGroupVersion.WithKind(ConfigurationSet_Kind) +) + +func init() { + SchemeBuilder.Register(&ConfigurationSet{}, &ConfigurationSetList{}) +} diff --git a/apis/sesv2/v1beta2/zz_configurationseteventdestination_terraformed.go b/apis/sesv2/v1beta2/zz_configurationseteventdestination_terraformed.go new file mode 100755 index 0000000000..26a1e46501 --- /dev/null +++ b/apis/sesv2/v1beta2/zz_configurationseteventdestination_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ConfigurationSetEventDestination +func (mg *ConfigurationSetEventDestination) GetTerraformResourceType() string { + return "aws_sesv2_configuration_set_event_destination" +} + +// GetConnectionDetailsMapping for this ConfigurationSetEventDestination +func (tr *ConfigurationSetEventDestination) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ConfigurationSetEventDestination +func (tr *ConfigurationSetEventDestination) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ConfigurationSetEventDestination +func (tr *ConfigurationSetEventDestination) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ConfigurationSetEventDestination +func (tr *ConfigurationSetEventDestination) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ConfigurationSetEventDestination +func (tr *ConfigurationSetEventDestination) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ConfigurationSetEventDestination +func (tr *ConfigurationSetEventDestination) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ConfigurationSetEventDestination +func (tr *ConfigurationSetEventDestination) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ConfigurationSetEventDestination +func (tr *ConfigurationSetEventDestination) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ConfigurationSetEventDestination using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ConfigurationSetEventDestination) LateInitialize(attrs []byte) (bool, error) { + params := &ConfigurationSetEventDestinationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ConfigurationSetEventDestination) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sesv2/v1beta2/zz_configurationseteventdestination_types.go b/apis/sesv2/v1beta2/zz_configurationseteventdestination_types.go new file mode 100755 index 0000000000..cfc9c87e0e --- /dev/null +++ b/apis/sesv2/v1beta2/zz_configurationseteventdestination_types.go @@ -0,0 +1,411 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CloudWatchDestinationInitParameters struct { + + // An array of objects that define the dimensions to use when you send email events to Amazon CloudWatch. See dimension_configuration below. + DimensionConfiguration []DimensionConfigurationInitParameters `json:"dimensionConfiguration,omitempty" tf:"dimension_configuration,omitempty"` +} + +type CloudWatchDestinationObservation struct { + + // An array of objects that define the dimensions to use when you send email events to Amazon CloudWatch. See dimension_configuration below. + DimensionConfiguration []DimensionConfigurationObservation `json:"dimensionConfiguration,omitempty" tf:"dimension_configuration,omitempty"` +} + +type CloudWatchDestinationParameters struct { + + // An array of objects that define the dimensions to use when you send email events to Amazon CloudWatch. See dimension_configuration below. + // +kubebuilder:validation:Optional + DimensionConfiguration []DimensionConfigurationParameters `json:"dimensionConfiguration" tf:"dimension_configuration,omitempty"` +} + +type ConfigurationSetEventDestinationInitParameters struct { + + // The name of the configuration set. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sesv2/v1beta2.ConfigurationSet + ConfigurationSetName *string `json:"configurationSetName,omitempty" tf:"configuration_set_name,omitempty"` + + // Reference to a ConfigurationSet in sesv2 to populate configurationSetName. + // +kubebuilder:validation:Optional + ConfigurationSetNameRef *v1.Reference `json:"configurationSetNameRef,omitempty" tf:"-"` + + // Selector for a ConfigurationSet in sesv2 to populate configurationSetName. + // +kubebuilder:validation:Optional + ConfigurationSetNameSelector *v1.Selector `json:"configurationSetNameSelector,omitempty" tf:"-"` + + // A name that identifies the event destination within the configuration set. + EventDestination *EventDestinationInitParameters `json:"eventDestination,omitempty" tf:"event_destination,omitempty"` + + // An object that defines the event destination. See event_destination below. + EventDestinationName *string `json:"eventDestinationName,omitempty" tf:"event_destination_name,omitempty"` +} + +type ConfigurationSetEventDestinationObservation struct { + + // The name of the configuration set. + ConfigurationSetName *string `json:"configurationSetName,omitempty" tf:"configuration_set_name,omitempty"` + + // A name that identifies the event destination within the configuration set. + EventDestination *EventDestinationObservation `json:"eventDestination,omitempty" tf:"event_destination,omitempty"` + + // An object that defines the event destination. See event_destination below. + EventDestinationName *string `json:"eventDestinationName,omitempty" tf:"event_destination_name,omitempty"` + + // A pipe-delimited string combining configuration_set_name and event_destination_name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type ConfigurationSetEventDestinationParameters struct { + + // The name of the configuration set. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sesv2/v1beta2.ConfigurationSet + // +kubebuilder:validation:Optional + ConfigurationSetName *string `json:"configurationSetName,omitempty" tf:"configuration_set_name,omitempty"` + + // Reference to a ConfigurationSet in sesv2 to populate configurationSetName. + // +kubebuilder:validation:Optional + ConfigurationSetNameRef *v1.Reference `json:"configurationSetNameRef,omitempty" tf:"-"` + + // Selector for a ConfigurationSet in sesv2 to populate configurationSetName. + // +kubebuilder:validation:Optional + ConfigurationSetNameSelector *v1.Selector `json:"configurationSetNameSelector,omitempty" tf:"-"` + + // A name that identifies the event destination within the configuration set. + // +kubebuilder:validation:Optional + EventDestination *EventDestinationParameters `json:"eventDestination,omitempty" tf:"event_destination,omitempty"` + + // An object that defines the event destination. See event_destination below. + // +kubebuilder:validation:Optional + EventDestinationName *string `json:"eventDestinationName,omitempty" tf:"event_destination_name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type DimensionConfigurationInitParameters struct { + + // The default value of the dimension that is published to Amazon CloudWatch if you don't provide the value of the dimension when you send an email. + DefaultDimensionValue *string `json:"defaultDimensionValue,omitempty" tf:"default_dimension_value,omitempty"` + + // The name of an Amazon CloudWatch dimension associated with an email sending metric. + DimensionName *string `json:"dimensionName,omitempty" tf:"dimension_name,omitempty"` + + // The location where the Amazon SES API v2 finds the value of a dimension to publish to Amazon CloudWatch. Valid values: MESSAGE_TAG, EMAIL_HEADER, LINK_TAG. + DimensionValueSource *string `json:"dimensionValueSource,omitempty" tf:"dimension_value_source,omitempty"` +} + +type DimensionConfigurationObservation struct { + + // The default value of the dimension that is published to Amazon CloudWatch if you don't provide the value of the dimension when you send an email. + DefaultDimensionValue *string `json:"defaultDimensionValue,omitempty" tf:"default_dimension_value,omitempty"` + + // The name of an Amazon CloudWatch dimension associated with an email sending metric. + DimensionName *string `json:"dimensionName,omitempty" tf:"dimension_name,omitempty"` + + // The location where the Amazon SES API v2 finds the value of a dimension to publish to Amazon CloudWatch. Valid values: MESSAGE_TAG, EMAIL_HEADER, LINK_TAG. + DimensionValueSource *string `json:"dimensionValueSource,omitempty" tf:"dimension_value_source,omitempty"` +} + +type DimensionConfigurationParameters struct { + + // The default value of the dimension that is published to Amazon CloudWatch if you don't provide the value of the dimension when you send an email. + // +kubebuilder:validation:Optional + DefaultDimensionValue *string `json:"defaultDimensionValue" tf:"default_dimension_value,omitempty"` + + // The name of an Amazon CloudWatch dimension associated with an email sending metric. + // +kubebuilder:validation:Optional + DimensionName *string `json:"dimensionName" tf:"dimension_name,omitempty"` + + // The location where the Amazon SES API v2 finds the value of a dimension to publish to Amazon CloudWatch. Valid values: MESSAGE_TAG, EMAIL_HEADER, LINK_TAG. + // +kubebuilder:validation:Optional + DimensionValueSource *string `json:"dimensionValueSource" tf:"dimension_value_source,omitempty"` +} + +type EventDestinationInitParameters struct { + + // An object that defines an Amazon CloudWatch destination for email events. See cloud_watch_destination below + CloudWatchDestination *CloudWatchDestinationInitParameters `json:"cloudWatchDestination,omitempty" tf:"cloud_watch_destination,omitempty"` + + // When the event destination is enabled, the specified event types are sent to the destinations. Default: false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // An object that defines an Amazon Kinesis Data Firehose destination for email events. See kinesis_firehose_destination below. + KinesisFirehoseDestination *KinesisFirehoseDestinationInitParameters `json:"kinesisFirehoseDestination,omitempty" tf:"kinesis_firehose_destination,omitempty"` + + // - An array that specifies which events the Amazon SES API v2 should send to the destinations. Valid values: SEND, REJECT, BOUNCE, COMPLAINT, DELIVERY, OPEN, CLICK, RENDERING_FAILURE, DELIVERY_DELAY, SUBSCRIPTION. + MatchingEventTypes []*string `json:"matchingEventTypes,omitempty" tf:"matching_event_types,omitempty"` + + // An object that defines an Amazon Pinpoint project destination for email events. See pinpoint_destination below. + PinpointDestination *PinpointDestinationInitParameters `json:"pinpointDestination,omitempty" tf:"pinpoint_destination,omitempty"` + + // An object that defines an Amazon SNS destination for email events. See sns_destination below. + SnsDestination *SnsDestinationInitParameters `json:"snsDestination,omitempty" tf:"sns_destination,omitempty"` +} + +type EventDestinationObservation struct { + + // An object that defines an Amazon CloudWatch destination for email events. See cloud_watch_destination below + CloudWatchDestination *CloudWatchDestinationObservation `json:"cloudWatchDestination,omitempty" tf:"cloud_watch_destination,omitempty"` + + // When the event destination is enabled, the specified event types are sent to the destinations. Default: false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // An object that defines an Amazon Kinesis Data Firehose destination for email events. See kinesis_firehose_destination below. + KinesisFirehoseDestination *KinesisFirehoseDestinationObservation `json:"kinesisFirehoseDestination,omitempty" tf:"kinesis_firehose_destination,omitempty"` + + // - An array that specifies which events the Amazon SES API v2 should send to the destinations. Valid values: SEND, REJECT, BOUNCE, COMPLAINT, DELIVERY, OPEN, CLICK, RENDERING_FAILURE, DELIVERY_DELAY, SUBSCRIPTION. + MatchingEventTypes []*string `json:"matchingEventTypes,omitempty" tf:"matching_event_types,omitempty"` + + // An object that defines an Amazon Pinpoint project destination for email events. See pinpoint_destination below. + PinpointDestination *PinpointDestinationObservation `json:"pinpointDestination,omitempty" tf:"pinpoint_destination,omitempty"` + + // An object that defines an Amazon SNS destination for email events. See sns_destination below. + SnsDestination *SnsDestinationObservation `json:"snsDestination,omitempty" tf:"sns_destination,omitempty"` +} + +type EventDestinationParameters struct { + + // An object that defines an Amazon CloudWatch destination for email events. See cloud_watch_destination below + // +kubebuilder:validation:Optional + CloudWatchDestination *CloudWatchDestinationParameters `json:"cloudWatchDestination,omitempty" tf:"cloud_watch_destination,omitempty"` + + // When the event destination is enabled, the specified event types are sent to the destinations. Default: false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // An object that defines an Amazon Kinesis Data Firehose destination for email events. See kinesis_firehose_destination below. + // +kubebuilder:validation:Optional + KinesisFirehoseDestination *KinesisFirehoseDestinationParameters `json:"kinesisFirehoseDestination,omitempty" tf:"kinesis_firehose_destination,omitempty"` + + // - An array that specifies which events the Amazon SES API v2 should send to the destinations. Valid values: SEND, REJECT, BOUNCE, COMPLAINT, DELIVERY, OPEN, CLICK, RENDERING_FAILURE, DELIVERY_DELAY, SUBSCRIPTION. + // +kubebuilder:validation:Optional + MatchingEventTypes []*string `json:"matchingEventTypes" tf:"matching_event_types,omitempty"` + + // An object that defines an Amazon Pinpoint project destination for email events. See pinpoint_destination below. + // +kubebuilder:validation:Optional + PinpointDestination *PinpointDestinationParameters `json:"pinpointDestination,omitempty" tf:"pinpoint_destination,omitempty"` + + // An object that defines an Amazon SNS destination for email events. See sns_destination below. + // +kubebuilder:validation:Optional + SnsDestination *SnsDestinationParameters `json:"snsDestination,omitempty" tf:"sns_destination,omitempty"` +} + +type KinesisFirehoseDestinationInitParameters struct { + + // The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose stream that the Amazon SES API v2 sends email events to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + DeliveryStreamArn *string `json:"deliveryStreamArn,omitempty" tf:"delivery_stream_arn,omitempty"` + + // Reference to a DeliveryStream in firehose to populate deliveryStreamArn. + // +kubebuilder:validation:Optional + DeliveryStreamArnRef *v1.Reference `json:"deliveryStreamArnRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate deliveryStreamArn. + // +kubebuilder:validation:Optional + DeliveryStreamArnSelector *v1.Selector `json:"deliveryStreamArnSelector,omitempty" tf:"-"` + + // The Amazon Resource Name (ARN) of the IAM role that the Amazon SES API v2 uses to send email events to the Amazon Kinesis Data Firehose stream. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + IAMRoleArn *string `json:"iamRoleArn,omitempty" tf:"iam_role_arn,omitempty"` + + // Reference to a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnRef *v1.Reference `json:"iamRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnSelector *v1.Selector `json:"iamRoleArnSelector,omitempty" tf:"-"` +} + +type KinesisFirehoseDestinationObservation struct { + + // The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose stream that the Amazon SES API v2 sends email events to. + DeliveryStreamArn *string `json:"deliveryStreamArn,omitempty" tf:"delivery_stream_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the IAM role that the Amazon SES API v2 uses to send email events to the Amazon Kinesis Data Firehose stream. + IAMRoleArn *string `json:"iamRoleArn,omitempty" tf:"iam_role_arn,omitempty"` +} + +type KinesisFirehoseDestinationParameters struct { + + // The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose stream that the Amazon SES API v2 sends email events to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + // +kubebuilder:validation:Optional + DeliveryStreamArn *string `json:"deliveryStreamArn,omitempty" tf:"delivery_stream_arn,omitempty"` + + // Reference to a DeliveryStream in firehose to populate deliveryStreamArn. + // +kubebuilder:validation:Optional + DeliveryStreamArnRef *v1.Reference `json:"deliveryStreamArnRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate deliveryStreamArn. + // +kubebuilder:validation:Optional + DeliveryStreamArnSelector *v1.Selector `json:"deliveryStreamArnSelector,omitempty" tf:"-"` + + // The Amazon Resource Name (ARN) of the IAM role that the Amazon SES API v2 uses to send email events to the Amazon Kinesis Data Firehose stream. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + IAMRoleArn *string `json:"iamRoleArn,omitempty" tf:"iam_role_arn,omitempty"` + + // Reference to a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnRef *v1.Reference `json:"iamRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate iamRoleArn. + // +kubebuilder:validation:Optional + IAMRoleArnSelector *v1.Selector `json:"iamRoleArnSelector,omitempty" tf:"-"` +} + +type PinpointDestinationInitParameters struct { + + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/pinpoint/v1beta2.App + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + ApplicationArn *string `json:"applicationArn,omitempty" tf:"application_arn,omitempty"` + + // Reference to a App in pinpoint to populate applicationArn. + // +kubebuilder:validation:Optional + ApplicationArnRef *v1.Reference `json:"applicationArnRef,omitempty" tf:"-"` + + // Selector for a App in pinpoint to populate applicationArn. + // +kubebuilder:validation:Optional + ApplicationArnSelector *v1.Selector `json:"applicationArnSelector,omitempty" tf:"-"` +} + +type PinpointDestinationObservation struct { + ApplicationArn *string `json:"applicationArn,omitempty" tf:"application_arn,omitempty"` +} + +type PinpointDestinationParameters struct { + + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/pinpoint/v1beta2.App + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + ApplicationArn *string `json:"applicationArn,omitempty" tf:"application_arn,omitempty"` + + // Reference to a App in pinpoint to populate applicationArn. + // +kubebuilder:validation:Optional + ApplicationArnRef *v1.Reference `json:"applicationArnRef,omitempty" tf:"-"` + + // Selector for a App in pinpoint to populate applicationArn. + // +kubebuilder:validation:Optional + ApplicationArnSelector *v1.Selector `json:"applicationArnSelector,omitempty" tf:"-"` +} + +type SnsDestinationInitParameters struct { + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to publish email events to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + TopicArn *string `json:"topicArn,omitempty" tf:"topic_arn,omitempty"` + + // Reference to a Topic in sns to populate topicArn. + // +kubebuilder:validation:Optional + TopicArnRef *v1.Reference `json:"topicArnRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate topicArn. + // +kubebuilder:validation:Optional + TopicArnSelector *v1.Selector `json:"topicArnSelector,omitempty" tf:"-"` +} + +type SnsDestinationObservation struct { + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to publish email events to. + TopicArn *string `json:"topicArn,omitempty" tf:"topic_arn,omitempty"` +} + +type SnsDestinationParameters struct { + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to publish email events to. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + TopicArn *string `json:"topicArn,omitempty" tf:"topic_arn,omitempty"` + + // Reference to a Topic in sns to populate topicArn. + // +kubebuilder:validation:Optional + TopicArnRef *v1.Reference `json:"topicArnRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate topicArn. + // +kubebuilder:validation:Optional + TopicArnSelector *v1.Selector `json:"topicArnSelector,omitempty" tf:"-"` +} + +// ConfigurationSetEventDestinationSpec defines the desired state of ConfigurationSetEventDestination +type ConfigurationSetEventDestinationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ConfigurationSetEventDestinationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ConfigurationSetEventDestinationInitParameters `json:"initProvider,omitempty"` +} + +// ConfigurationSetEventDestinationStatus defines the observed state of ConfigurationSetEventDestination. +type ConfigurationSetEventDestinationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ConfigurationSetEventDestinationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ConfigurationSetEventDestination is the Schema for the ConfigurationSetEventDestinations API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ConfigurationSetEventDestination struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.eventDestination) || (has(self.initProvider) && has(self.initProvider.eventDestination))",message="spec.forProvider.eventDestination is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.eventDestinationName) || (has(self.initProvider) && has(self.initProvider.eventDestinationName))",message="spec.forProvider.eventDestinationName is a required parameter" + Spec ConfigurationSetEventDestinationSpec `json:"spec"` + Status ConfigurationSetEventDestinationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConfigurationSetEventDestinationList contains a list of ConfigurationSetEventDestinations +type ConfigurationSetEventDestinationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ConfigurationSetEventDestination `json:"items"` +} + +// Repository type metadata. +var ( + ConfigurationSetEventDestination_Kind = "ConfigurationSetEventDestination" + ConfigurationSetEventDestination_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ConfigurationSetEventDestination_Kind}.String() + ConfigurationSetEventDestination_KindAPIVersion = ConfigurationSetEventDestination_Kind + "." + CRDGroupVersion.String() + ConfigurationSetEventDestination_GroupVersionKind = CRDGroupVersion.WithKind(ConfigurationSetEventDestination_Kind) +) + +func init() { + SchemeBuilder.Register(&ConfigurationSetEventDestination{}, &ConfigurationSetEventDestinationList{}) +} diff --git a/apis/sesv2/v1beta2/zz_emailidentity_terraformed.go b/apis/sesv2/v1beta2/zz_emailidentity_terraformed.go new file mode 100755 index 0000000000..1873372b5b --- /dev/null +++ b/apis/sesv2/v1beta2/zz_emailidentity_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this EmailIdentity +func (mg *EmailIdentity) GetTerraformResourceType() string { + return "aws_sesv2_email_identity" +} + +// GetConnectionDetailsMapping for this EmailIdentity +func (tr *EmailIdentity) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"dkim_signing_attributes[*].domain_signing_private_key": "dkimSigningAttributes[*].domainSigningPrivateKeySecretRef"} +} + +// GetObservation of this EmailIdentity +func (tr *EmailIdentity) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this EmailIdentity +func (tr *EmailIdentity) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this EmailIdentity +func (tr *EmailIdentity) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this EmailIdentity +func (tr *EmailIdentity) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this EmailIdentity +func (tr *EmailIdentity) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this EmailIdentity +func (tr *EmailIdentity) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this EmailIdentity +func (tr *EmailIdentity) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this EmailIdentity using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *EmailIdentity) LateInitialize(attrs []byte) (bool, error) { + params := &EmailIdentityParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *EmailIdentity) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sesv2/v1beta2/zz_emailidentity_types.go b/apis/sesv2/v1beta2/zz_emailidentity_types.go new file mode 100755 index 0000000000..1f059e978f --- /dev/null +++ b/apis/sesv2/v1beta2/zz_emailidentity_types.go @@ -0,0 +1,205 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DKIMSigningAttributesInitParameters struct { + + // [Bring Your Own DKIM] A private key that's used to generate a DKIM signature. The private key must use 1024 or 2048-bit RSA encryption, and must be encoded using base64 encoding. + DomainSigningPrivateKeySecretRef *v1.SecretKeySelector `json:"domainSigningPrivateKeySecretRef,omitempty" tf:"-"` + + // [Bring Your Own DKIM] A string that's used to identify a public key in the DNS configuration for a domain. + DomainSigningSelector *string `json:"domainSigningSelector,omitempty" tf:"domain_signing_selector,omitempty"` + + // [Easy DKIM] The key length of the future DKIM key pair to be generated. This can be changed at most once per day. Valid values: RSA_1024_BIT, RSA_2048_BIT. + NextSigningKeyLength *string `json:"nextSigningKeyLength,omitempty" tf:"next_signing_key_length,omitempty"` +} + +type DKIMSigningAttributesObservation struct { + + // [Easy DKIM] The key length of the DKIM key pair in use. + CurrentSigningKeyLength *string `json:"currentSigningKeyLength,omitempty" tf:"current_signing_key_length,omitempty"` + + // [Bring Your Own DKIM] A string that's used to identify a public key in the DNS configuration for a domain. + DomainSigningSelector *string `json:"domainSigningSelector,omitempty" tf:"domain_signing_selector,omitempty"` + + // [Easy DKIM] The last time a key pair was generated for this identity. + LastKeyGenerationTimestamp *string `json:"lastKeyGenerationTimestamp,omitempty" tf:"last_key_generation_timestamp,omitempty"` + + // [Easy DKIM] The key length of the future DKIM key pair to be generated. This can be changed at most once per day. Valid values: RSA_1024_BIT, RSA_2048_BIT. + NextSigningKeyLength *string `json:"nextSigningKeyLength,omitempty" tf:"next_signing_key_length,omitempty"` + + // A string that indicates how DKIM was configured for the identity. AWS_SES indicates that DKIM was configured for the identity by using Easy DKIM. EXTERNAL indicates that DKIM was configured for the identity by using Bring Your Own DKIM (BYODKIM). + SigningAttributesOrigin *string `json:"signingAttributesOrigin,omitempty" tf:"signing_attributes_origin,omitempty"` + + // Describes whether or not Amazon SES has successfully located the DKIM records in the DNS records for the domain. See the AWS SES API v2 Reference for supported statuses. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // If you used Easy DKIM to configure DKIM authentication for the domain, then this object contains a set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon SES detects these records in the DNS configuration for your domain, the DKIM authentication process is complete. If you configured DKIM authentication for the domain by providing your own public-private key pair, then this object contains the selector for the public key. + Tokens []*string `json:"tokens,omitempty" tf:"tokens,omitempty"` +} + +type DKIMSigningAttributesParameters struct { + + // [Bring Your Own DKIM] A private key that's used to generate a DKIM signature. The private key must use 1024 or 2048-bit RSA encryption, and must be encoded using base64 encoding. + // +kubebuilder:validation:Optional + DomainSigningPrivateKeySecretRef *v1.SecretKeySelector `json:"domainSigningPrivateKeySecretRef,omitempty" tf:"-"` + + // [Bring Your Own DKIM] A string that's used to identify a public key in the DNS configuration for a domain. + // +kubebuilder:validation:Optional + DomainSigningSelector *string `json:"domainSigningSelector,omitempty" tf:"domain_signing_selector,omitempty"` + + // [Easy DKIM] The key length of the future DKIM key pair to be generated. This can be changed at most once per day. Valid values: RSA_1024_BIT, RSA_2048_BIT. + // +kubebuilder:validation:Optional + NextSigningKeyLength *string `json:"nextSigningKeyLength,omitempty" tf:"next_signing_key_length,omitempty"` +} + +type EmailIdentityInitParameters struct { + + // The configuration set to use by default when sending from this identity. Note that any configuration set defined in the email sending request takes precedence. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sesv2/v1beta2.ConfigurationSet + ConfigurationSetName *string `json:"configurationSetName,omitempty" tf:"configuration_set_name,omitempty"` + + // Reference to a ConfigurationSet in sesv2 to populate configurationSetName. + // +kubebuilder:validation:Optional + ConfigurationSetNameRef *v1.Reference `json:"configurationSetNameRef,omitempty" tf:"-"` + + // Selector for a ConfigurationSet in sesv2 to populate configurationSetName. + // +kubebuilder:validation:Optional + ConfigurationSetNameSelector *v1.Selector `json:"configurationSetNameSelector,omitempty" tf:"-"` + + // The configuration of the DKIM authentication settings for an email domain identity. + DKIMSigningAttributes *DKIMSigningAttributesInitParameters `json:"dkimSigningAttributes,omitempty" tf:"dkim_signing_attributes,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type EmailIdentityObservation struct { + + // ARN of the Email Identity. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The configuration set to use by default when sending from this identity. Note that any configuration set defined in the email sending request takes precedence. + ConfigurationSetName *string `json:"configurationSetName,omitempty" tf:"configuration_set_name,omitempty"` + + // The configuration of the DKIM authentication settings for an email domain identity. + DKIMSigningAttributes *DKIMSigningAttributesObservation `json:"dkimSigningAttributes,omitempty" tf:"dkim_signing_attributes,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The email identity type. Valid values: EMAIL_ADDRESS, DOMAIN. + IdentityType *string `json:"identityType,omitempty" tf:"identity_type,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Specifies whether or not the identity is verified. + VerifiedForSendingStatus *bool `json:"verifiedForSendingStatus,omitempty" tf:"verified_for_sending_status,omitempty"` +} + +type EmailIdentityParameters struct { + + // The configuration set to use by default when sending from this identity. Note that any configuration set defined in the email sending request takes precedence. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sesv2/v1beta2.ConfigurationSet + // +kubebuilder:validation:Optional + ConfigurationSetName *string `json:"configurationSetName,omitempty" tf:"configuration_set_name,omitempty"` + + // Reference to a ConfigurationSet in sesv2 to populate configurationSetName. + // +kubebuilder:validation:Optional + ConfigurationSetNameRef *v1.Reference `json:"configurationSetNameRef,omitempty" tf:"-"` + + // Selector for a ConfigurationSet in sesv2 to populate configurationSetName. + // +kubebuilder:validation:Optional + ConfigurationSetNameSelector *v1.Selector `json:"configurationSetNameSelector,omitempty" tf:"-"` + + // The configuration of the DKIM authentication settings for an email domain identity. + // +kubebuilder:validation:Optional + DKIMSigningAttributes *DKIMSigningAttributesParameters `json:"dkimSigningAttributes,omitempty" tf:"dkim_signing_attributes,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// EmailIdentitySpec defines the desired state of EmailIdentity +type EmailIdentitySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider EmailIdentityParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider EmailIdentityInitParameters `json:"initProvider,omitempty"` +} + +// EmailIdentityStatus defines the observed state of EmailIdentity. +type EmailIdentityStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider EmailIdentityObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// EmailIdentity is the Schema for the EmailIdentitys API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type EmailIdentity struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec EmailIdentitySpec `json:"spec"` + Status EmailIdentityStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EmailIdentityList contains a list of EmailIdentitys +type EmailIdentityList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []EmailIdentity `json:"items"` +} + +// Repository type metadata. +var ( + EmailIdentity_Kind = "EmailIdentity" + EmailIdentity_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: EmailIdentity_Kind}.String() + EmailIdentity_KindAPIVersion = EmailIdentity_Kind + "." + CRDGroupVersion.String() + EmailIdentity_GroupVersionKind = CRDGroupVersion.WithKind(EmailIdentity_Kind) +) + +func init() { + SchemeBuilder.Register(&EmailIdentity{}, &EmailIdentityList{}) +} diff --git a/apis/sesv2/v1beta2/zz_generated.conversion_hubs.go b/apis/sesv2/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..8ebe95f2ed --- /dev/null +++ b/apis/sesv2/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ConfigurationSet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ConfigurationSetEventDestination) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *EmailIdentity) Hub() {} diff --git a/apis/sesv2/v1beta2/zz_generated.deepcopy.go b/apis/sesv2/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..1b7e403c5a --- /dev/null +++ b/apis/sesv2/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2047 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudWatchDestinationInitParameters) DeepCopyInto(out *CloudWatchDestinationInitParameters) { + *out = *in + if in.DimensionConfiguration != nil { + in, out := &in.DimensionConfiguration, &out.DimensionConfiguration + *out = make([]DimensionConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudWatchDestinationInitParameters. +func (in *CloudWatchDestinationInitParameters) DeepCopy() *CloudWatchDestinationInitParameters { + if in == nil { + return nil + } + out := new(CloudWatchDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudWatchDestinationObservation) DeepCopyInto(out *CloudWatchDestinationObservation) { + *out = *in + if in.DimensionConfiguration != nil { + in, out := &in.DimensionConfiguration, &out.DimensionConfiguration + *out = make([]DimensionConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudWatchDestinationObservation. +func (in *CloudWatchDestinationObservation) DeepCopy() *CloudWatchDestinationObservation { + if in == nil { + return nil + } + out := new(CloudWatchDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudWatchDestinationParameters) DeepCopyInto(out *CloudWatchDestinationParameters) { + *out = *in + if in.DimensionConfiguration != nil { + in, out := &in.DimensionConfiguration, &out.DimensionConfiguration + *out = make([]DimensionConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudWatchDestinationParameters. +func (in *CloudWatchDestinationParameters) DeepCopy() *CloudWatchDestinationParameters { + if in == nil { + return nil + } + out := new(CloudWatchDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSet) DeepCopyInto(out *ConfigurationSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSet. +func (in *ConfigurationSet) DeepCopy() *ConfigurationSet { + if in == nil { + return nil + } + out := new(ConfigurationSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigurationSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetEventDestination) DeepCopyInto(out *ConfigurationSetEventDestination) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetEventDestination. +func (in *ConfigurationSetEventDestination) DeepCopy() *ConfigurationSetEventDestination { + if in == nil { + return nil + } + out := new(ConfigurationSetEventDestination) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigurationSetEventDestination) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetEventDestinationInitParameters) DeepCopyInto(out *ConfigurationSetEventDestinationInitParameters) { + *out = *in + if in.ConfigurationSetName != nil { + in, out := &in.ConfigurationSetName, &out.ConfigurationSetName + *out = new(string) + **out = **in + } + if in.ConfigurationSetNameRef != nil { + in, out := &in.ConfigurationSetNameRef, &out.ConfigurationSetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConfigurationSetNameSelector != nil { + in, out := &in.ConfigurationSetNameSelector, &out.ConfigurationSetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EventDestination != nil { + in, out := &in.EventDestination, &out.EventDestination + *out = new(EventDestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EventDestinationName != nil { + in, out := &in.EventDestinationName, &out.EventDestinationName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetEventDestinationInitParameters. +func (in *ConfigurationSetEventDestinationInitParameters) DeepCopy() *ConfigurationSetEventDestinationInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationSetEventDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetEventDestinationList) DeepCopyInto(out *ConfigurationSetEventDestinationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigurationSetEventDestination, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetEventDestinationList. +func (in *ConfigurationSetEventDestinationList) DeepCopy() *ConfigurationSetEventDestinationList { + if in == nil { + return nil + } + out := new(ConfigurationSetEventDestinationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigurationSetEventDestinationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetEventDestinationObservation) DeepCopyInto(out *ConfigurationSetEventDestinationObservation) { + *out = *in + if in.ConfigurationSetName != nil { + in, out := &in.ConfigurationSetName, &out.ConfigurationSetName + *out = new(string) + **out = **in + } + if in.EventDestination != nil { + in, out := &in.EventDestination, &out.EventDestination + *out = new(EventDestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.EventDestinationName != nil { + in, out := &in.EventDestinationName, &out.EventDestinationName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetEventDestinationObservation. +func (in *ConfigurationSetEventDestinationObservation) DeepCopy() *ConfigurationSetEventDestinationObservation { + if in == nil { + return nil + } + out := new(ConfigurationSetEventDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetEventDestinationParameters) DeepCopyInto(out *ConfigurationSetEventDestinationParameters) { + *out = *in + if in.ConfigurationSetName != nil { + in, out := &in.ConfigurationSetName, &out.ConfigurationSetName + *out = new(string) + **out = **in + } + if in.ConfigurationSetNameRef != nil { + in, out := &in.ConfigurationSetNameRef, &out.ConfigurationSetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConfigurationSetNameSelector != nil { + in, out := &in.ConfigurationSetNameSelector, &out.ConfigurationSetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EventDestination != nil { + in, out := &in.EventDestination, &out.EventDestination + *out = new(EventDestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.EventDestinationName != nil { + in, out := &in.EventDestinationName, &out.EventDestinationName + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetEventDestinationParameters. +func (in *ConfigurationSetEventDestinationParameters) DeepCopy() *ConfigurationSetEventDestinationParameters { + if in == nil { + return nil + } + out := new(ConfigurationSetEventDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetEventDestinationSpec) DeepCopyInto(out *ConfigurationSetEventDestinationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetEventDestinationSpec. +func (in *ConfigurationSetEventDestinationSpec) DeepCopy() *ConfigurationSetEventDestinationSpec { + if in == nil { + return nil + } + out := new(ConfigurationSetEventDestinationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetEventDestinationStatus) DeepCopyInto(out *ConfigurationSetEventDestinationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetEventDestinationStatus. +func (in *ConfigurationSetEventDestinationStatus) DeepCopy() *ConfigurationSetEventDestinationStatus { + if in == nil { + return nil + } + out := new(ConfigurationSetEventDestinationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetInitParameters) DeepCopyInto(out *ConfigurationSetInitParameters) { + *out = *in + if in.DeliveryOptions != nil { + in, out := &in.DeliveryOptions, &out.DeliveryOptions + *out = new(DeliveryOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ReputationOptions != nil { + in, out := &in.ReputationOptions, &out.ReputationOptions + *out = new(ReputationOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SendingOptions != nil { + in, out := &in.SendingOptions, &out.SendingOptions + *out = new(SendingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SuppressionOptions != nil { + in, out := &in.SuppressionOptions, &out.SuppressionOptions + *out = new(SuppressionOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrackingOptions != nil { + in, out := &in.TrackingOptions, &out.TrackingOptions + *out = new(TrackingOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VdmOptions != nil { + in, out := &in.VdmOptions, &out.VdmOptions + *out = new(VdmOptionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetInitParameters. +func (in *ConfigurationSetInitParameters) DeepCopy() *ConfigurationSetInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetList) DeepCopyInto(out *ConfigurationSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigurationSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetList. +func (in *ConfigurationSetList) DeepCopy() *ConfigurationSetList { + if in == nil { + return nil + } + out := new(ConfigurationSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigurationSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetObservation) DeepCopyInto(out *ConfigurationSetObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DeliveryOptions != nil { + in, out := &in.DeliveryOptions, &out.DeliveryOptions + *out = new(DeliveryOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ReputationOptions != nil { + in, out := &in.ReputationOptions, &out.ReputationOptions + *out = new(ReputationOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.SendingOptions != nil { + in, out := &in.SendingOptions, &out.SendingOptions + *out = new(SendingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.SuppressionOptions != nil { + in, out := &in.SuppressionOptions, &out.SuppressionOptions + *out = new(SuppressionOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrackingOptions != nil { + in, out := &in.TrackingOptions, &out.TrackingOptions + *out = new(TrackingOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.VdmOptions != nil { + in, out := &in.VdmOptions, &out.VdmOptions + *out = new(VdmOptionsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetObservation. +func (in *ConfigurationSetObservation) DeepCopy() *ConfigurationSetObservation { + if in == nil { + return nil + } + out := new(ConfigurationSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetParameters) DeepCopyInto(out *ConfigurationSetParameters) { + *out = *in + if in.DeliveryOptions != nil { + in, out := &in.DeliveryOptions, &out.DeliveryOptions + *out = new(DeliveryOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ReputationOptions != nil { + in, out := &in.ReputationOptions, &out.ReputationOptions + *out = new(ReputationOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.SendingOptions != nil { + in, out := &in.SendingOptions, &out.SendingOptions + *out = new(SendingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.SuppressionOptions != nil { + in, out := &in.SuppressionOptions, &out.SuppressionOptions + *out = new(SuppressionOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrackingOptions != nil { + in, out := &in.TrackingOptions, &out.TrackingOptions + *out = new(TrackingOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.VdmOptions != nil { + in, out := &in.VdmOptions, &out.VdmOptions + *out = new(VdmOptionsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetParameters. +func (in *ConfigurationSetParameters) DeepCopy() *ConfigurationSetParameters { + if in == nil { + return nil + } + out := new(ConfigurationSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetSpec) DeepCopyInto(out *ConfigurationSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetSpec. +func (in *ConfigurationSetSpec) DeepCopy() *ConfigurationSetSpec { + if in == nil { + return nil + } + out := new(ConfigurationSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSetStatus) DeepCopyInto(out *ConfigurationSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSetStatus. +func (in *ConfigurationSetStatus) DeepCopy() *ConfigurationSetStatus { + if in == nil { + return nil + } + out := new(ConfigurationSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DKIMSigningAttributesInitParameters) DeepCopyInto(out *DKIMSigningAttributesInitParameters) { + *out = *in + if in.DomainSigningPrivateKeySecretRef != nil { + in, out := &in.DomainSigningPrivateKeySecretRef, &out.DomainSigningPrivateKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.DomainSigningSelector != nil { + in, out := &in.DomainSigningSelector, &out.DomainSigningSelector + *out = new(string) + **out = **in + } + if in.NextSigningKeyLength != nil { + in, out := &in.NextSigningKeyLength, &out.NextSigningKeyLength + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DKIMSigningAttributesInitParameters. +func (in *DKIMSigningAttributesInitParameters) DeepCopy() *DKIMSigningAttributesInitParameters { + if in == nil { + return nil + } + out := new(DKIMSigningAttributesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DKIMSigningAttributesObservation) DeepCopyInto(out *DKIMSigningAttributesObservation) { + *out = *in + if in.CurrentSigningKeyLength != nil { + in, out := &in.CurrentSigningKeyLength, &out.CurrentSigningKeyLength + *out = new(string) + **out = **in + } + if in.DomainSigningSelector != nil { + in, out := &in.DomainSigningSelector, &out.DomainSigningSelector + *out = new(string) + **out = **in + } + if in.LastKeyGenerationTimestamp != nil { + in, out := &in.LastKeyGenerationTimestamp, &out.LastKeyGenerationTimestamp + *out = new(string) + **out = **in + } + if in.NextSigningKeyLength != nil { + in, out := &in.NextSigningKeyLength, &out.NextSigningKeyLength + *out = new(string) + **out = **in + } + if in.SigningAttributesOrigin != nil { + in, out := &in.SigningAttributesOrigin, &out.SigningAttributesOrigin + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tokens != nil { + in, out := &in.Tokens, &out.Tokens + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DKIMSigningAttributesObservation. +func (in *DKIMSigningAttributesObservation) DeepCopy() *DKIMSigningAttributesObservation { + if in == nil { + return nil + } + out := new(DKIMSigningAttributesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DKIMSigningAttributesParameters) DeepCopyInto(out *DKIMSigningAttributesParameters) { + *out = *in + if in.DomainSigningPrivateKeySecretRef != nil { + in, out := &in.DomainSigningPrivateKeySecretRef, &out.DomainSigningPrivateKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.DomainSigningSelector != nil { + in, out := &in.DomainSigningSelector, &out.DomainSigningSelector + *out = new(string) + **out = **in + } + if in.NextSigningKeyLength != nil { + in, out := &in.NextSigningKeyLength, &out.NextSigningKeyLength + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DKIMSigningAttributesParameters. +func (in *DKIMSigningAttributesParameters) DeepCopy() *DKIMSigningAttributesParameters { + if in == nil { + return nil + } + out := new(DKIMSigningAttributesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardOptionsInitParameters) DeepCopyInto(out *DashboardOptionsInitParameters) { + *out = *in + if in.EngagementMetrics != nil { + in, out := &in.EngagementMetrics, &out.EngagementMetrics + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardOptionsInitParameters. +func (in *DashboardOptionsInitParameters) DeepCopy() *DashboardOptionsInitParameters { + if in == nil { + return nil + } + out := new(DashboardOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardOptionsObservation) DeepCopyInto(out *DashboardOptionsObservation) { + *out = *in + if in.EngagementMetrics != nil { + in, out := &in.EngagementMetrics, &out.EngagementMetrics + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardOptionsObservation. +func (in *DashboardOptionsObservation) DeepCopy() *DashboardOptionsObservation { + if in == nil { + return nil + } + out := new(DashboardOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardOptionsParameters) DeepCopyInto(out *DashboardOptionsParameters) { + *out = *in + if in.EngagementMetrics != nil { + in, out := &in.EngagementMetrics, &out.EngagementMetrics + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardOptionsParameters. +func (in *DashboardOptionsParameters) DeepCopy() *DashboardOptionsParameters { + if in == nil { + return nil + } + out := new(DashboardOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryOptionsInitParameters) DeepCopyInto(out *DeliveryOptionsInitParameters) { + *out = *in + if in.SendingPoolName != nil { + in, out := &in.SendingPoolName, &out.SendingPoolName + *out = new(string) + **out = **in + } + if in.TLSPolicy != nil { + in, out := &in.TLSPolicy, &out.TLSPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryOptionsInitParameters. +func (in *DeliveryOptionsInitParameters) DeepCopy() *DeliveryOptionsInitParameters { + if in == nil { + return nil + } + out := new(DeliveryOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryOptionsObservation) DeepCopyInto(out *DeliveryOptionsObservation) { + *out = *in + if in.SendingPoolName != nil { + in, out := &in.SendingPoolName, &out.SendingPoolName + *out = new(string) + **out = **in + } + if in.TLSPolicy != nil { + in, out := &in.TLSPolicy, &out.TLSPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryOptionsObservation. +func (in *DeliveryOptionsObservation) DeepCopy() *DeliveryOptionsObservation { + if in == nil { + return nil + } + out := new(DeliveryOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryOptionsParameters) DeepCopyInto(out *DeliveryOptionsParameters) { + *out = *in + if in.SendingPoolName != nil { + in, out := &in.SendingPoolName, &out.SendingPoolName + *out = new(string) + **out = **in + } + if in.TLSPolicy != nil { + in, out := &in.TLSPolicy, &out.TLSPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryOptionsParameters. +func (in *DeliveryOptionsParameters) DeepCopy() *DeliveryOptionsParameters { + if in == nil { + return nil + } + out := new(DeliveryOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionConfigurationInitParameters) DeepCopyInto(out *DimensionConfigurationInitParameters) { + *out = *in + if in.DefaultDimensionValue != nil { + in, out := &in.DefaultDimensionValue, &out.DefaultDimensionValue + *out = new(string) + **out = **in + } + if in.DimensionName != nil { + in, out := &in.DimensionName, &out.DimensionName + *out = new(string) + **out = **in + } + if in.DimensionValueSource != nil { + in, out := &in.DimensionValueSource, &out.DimensionValueSource + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionConfigurationInitParameters. +func (in *DimensionConfigurationInitParameters) DeepCopy() *DimensionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(DimensionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionConfigurationObservation) DeepCopyInto(out *DimensionConfigurationObservation) { + *out = *in + if in.DefaultDimensionValue != nil { + in, out := &in.DefaultDimensionValue, &out.DefaultDimensionValue + *out = new(string) + **out = **in + } + if in.DimensionName != nil { + in, out := &in.DimensionName, &out.DimensionName + *out = new(string) + **out = **in + } + if in.DimensionValueSource != nil { + in, out := &in.DimensionValueSource, &out.DimensionValueSource + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionConfigurationObservation. +func (in *DimensionConfigurationObservation) DeepCopy() *DimensionConfigurationObservation { + if in == nil { + return nil + } + out := new(DimensionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionConfigurationParameters) DeepCopyInto(out *DimensionConfigurationParameters) { + *out = *in + if in.DefaultDimensionValue != nil { + in, out := &in.DefaultDimensionValue, &out.DefaultDimensionValue + *out = new(string) + **out = **in + } + if in.DimensionName != nil { + in, out := &in.DimensionName, &out.DimensionName + *out = new(string) + **out = **in + } + if in.DimensionValueSource != nil { + in, out := &in.DimensionValueSource, &out.DimensionValueSource + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionConfigurationParameters. +func (in *DimensionConfigurationParameters) DeepCopy() *DimensionConfigurationParameters { + if in == nil { + return nil + } + out := new(DimensionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailIdentity) DeepCopyInto(out *EmailIdentity) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailIdentity. +func (in *EmailIdentity) DeepCopy() *EmailIdentity { + if in == nil { + return nil + } + out := new(EmailIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EmailIdentity) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailIdentityInitParameters) DeepCopyInto(out *EmailIdentityInitParameters) { + *out = *in + if in.ConfigurationSetName != nil { + in, out := &in.ConfigurationSetName, &out.ConfigurationSetName + *out = new(string) + **out = **in + } + if in.ConfigurationSetNameRef != nil { + in, out := &in.ConfigurationSetNameRef, &out.ConfigurationSetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConfigurationSetNameSelector != nil { + in, out := &in.ConfigurationSetNameSelector, &out.ConfigurationSetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DKIMSigningAttributes != nil { + in, out := &in.DKIMSigningAttributes, &out.DKIMSigningAttributes + *out = new(DKIMSigningAttributesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailIdentityInitParameters. +func (in *EmailIdentityInitParameters) DeepCopy() *EmailIdentityInitParameters { + if in == nil { + return nil + } + out := new(EmailIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailIdentityList) DeepCopyInto(out *EmailIdentityList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EmailIdentity, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailIdentityList. +func (in *EmailIdentityList) DeepCopy() *EmailIdentityList { + if in == nil { + return nil + } + out := new(EmailIdentityList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EmailIdentityList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailIdentityObservation) DeepCopyInto(out *EmailIdentityObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ConfigurationSetName != nil { + in, out := &in.ConfigurationSetName, &out.ConfigurationSetName + *out = new(string) + **out = **in + } + if in.DKIMSigningAttributes != nil { + in, out := &in.DKIMSigningAttributes, &out.DKIMSigningAttributes + *out = new(DKIMSigningAttributesObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IdentityType != nil { + in, out := &in.IdentityType, &out.IdentityType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VerifiedForSendingStatus != nil { + in, out := &in.VerifiedForSendingStatus, &out.VerifiedForSendingStatus + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailIdentityObservation. +func (in *EmailIdentityObservation) DeepCopy() *EmailIdentityObservation { + if in == nil { + return nil + } + out := new(EmailIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailIdentityParameters) DeepCopyInto(out *EmailIdentityParameters) { + *out = *in + if in.ConfigurationSetName != nil { + in, out := &in.ConfigurationSetName, &out.ConfigurationSetName + *out = new(string) + **out = **in + } + if in.ConfigurationSetNameRef != nil { + in, out := &in.ConfigurationSetNameRef, &out.ConfigurationSetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConfigurationSetNameSelector != nil { + in, out := &in.ConfigurationSetNameSelector, &out.ConfigurationSetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DKIMSigningAttributes != nil { + in, out := &in.DKIMSigningAttributes, &out.DKIMSigningAttributes + *out = new(DKIMSigningAttributesParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailIdentityParameters. +func (in *EmailIdentityParameters) DeepCopy() *EmailIdentityParameters { + if in == nil { + return nil + } + out := new(EmailIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailIdentitySpec) DeepCopyInto(out *EmailIdentitySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailIdentitySpec. +func (in *EmailIdentitySpec) DeepCopy() *EmailIdentitySpec { + if in == nil { + return nil + } + out := new(EmailIdentitySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailIdentityStatus) DeepCopyInto(out *EmailIdentityStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailIdentityStatus. +func (in *EmailIdentityStatus) DeepCopy() *EmailIdentityStatus { + if in == nil { + return nil + } + out := new(EmailIdentityStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventDestinationInitParameters) DeepCopyInto(out *EventDestinationInitParameters) { + *out = *in + if in.CloudWatchDestination != nil { + in, out := &in.CloudWatchDestination, &out.CloudWatchDestination + *out = new(CloudWatchDestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KinesisFirehoseDestination != nil { + in, out := &in.KinesisFirehoseDestination, &out.KinesisFirehoseDestination + *out = new(KinesisFirehoseDestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MatchingEventTypes != nil { + in, out := &in.MatchingEventTypes, &out.MatchingEventTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PinpointDestination != nil { + in, out := &in.PinpointDestination, &out.PinpointDestination + *out = new(PinpointDestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SnsDestination != nil { + in, out := &in.SnsDestination, &out.SnsDestination + *out = new(SnsDestinationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventDestinationInitParameters. +func (in *EventDestinationInitParameters) DeepCopy() *EventDestinationInitParameters { + if in == nil { + return nil + } + out := new(EventDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventDestinationObservation) DeepCopyInto(out *EventDestinationObservation) { + *out = *in + if in.CloudWatchDestination != nil { + in, out := &in.CloudWatchDestination, &out.CloudWatchDestination + *out = new(CloudWatchDestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KinesisFirehoseDestination != nil { + in, out := &in.KinesisFirehoseDestination, &out.KinesisFirehoseDestination + *out = new(KinesisFirehoseDestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.MatchingEventTypes != nil { + in, out := &in.MatchingEventTypes, &out.MatchingEventTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PinpointDestination != nil { + in, out := &in.PinpointDestination, &out.PinpointDestination + *out = new(PinpointDestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.SnsDestination != nil { + in, out := &in.SnsDestination, &out.SnsDestination + *out = new(SnsDestinationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventDestinationObservation. +func (in *EventDestinationObservation) DeepCopy() *EventDestinationObservation { + if in == nil { + return nil + } + out := new(EventDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventDestinationParameters) DeepCopyInto(out *EventDestinationParameters) { + *out = *in + if in.CloudWatchDestination != nil { + in, out := &in.CloudWatchDestination, &out.CloudWatchDestination + *out = new(CloudWatchDestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KinesisFirehoseDestination != nil { + in, out := &in.KinesisFirehoseDestination, &out.KinesisFirehoseDestination + *out = new(KinesisFirehoseDestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.MatchingEventTypes != nil { + in, out := &in.MatchingEventTypes, &out.MatchingEventTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PinpointDestination != nil { + in, out := &in.PinpointDestination, &out.PinpointDestination + *out = new(PinpointDestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.SnsDestination != nil { + in, out := &in.SnsDestination, &out.SnsDestination + *out = new(SnsDestinationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventDestinationParameters. +func (in *EventDestinationParameters) DeepCopy() *EventDestinationParameters { + if in == nil { + return nil + } + out := new(EventDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GuardianOptionsInitParameters) DeepCopyInto(out *GuardianOptionsInitParameters) { + *out = *in + if in.OptimizedSharedDelivery != nil { + in, out := &in.OptimizedSharedDelivery, &out.OptimizedSharedDelivery + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GuardianOptionsInitParameters. +func (in *GuardianOptionsInitParameters) DeepCopy() *GuardianOptionsInitParameters { + if in == nil { + return nil + } + out := new(GuardianOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GuardianOptionsObservation) DeepCopyInto(out *GuardianOptionsObservation) { + *out = *in + if in.OptimizedSharedDelivery != nil { + in, out := &in.OptimizedSharedDelivery, &out.OptimizedSharedDelivery + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GuardianOptionsObservation. +func (in *GuardianOptionsObservation) DeepCopy() *GuardianOptionsObservation { + if in == nil { + return nil + } + out := new(GuardianOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GuardianOptionsParameters) DeepCopyInto(out *GuardianOptionsParameters) { + *out = *in + if in.OptimizedSharedDelivery != nil { + in, out := &in.OptimizedSharedDelivery, &out.OptimizedSharedDelivery + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GuardianOptionsParameters. +func (in *GuardianOptionsParameters) DeepCopy() *GuardianOptionsParameters { + if in == nil { + return nil + } + out := new(GuardianOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisFirehoseDestinationInitParameters) DeepCopyInto(out *KinesisFirehoseDestinationInitParameters) { + *out = *in + if in.DeliveryStreamArn != nil { + in, out := &in.DeliveryStreamArn, &out.DeliveryStreamArn + *out = new(string) + **out = **in + } + if in.DeliveryStreamArnRef != nil { + in, out := &in.DeliveryStreamArnRef, &out.DeliveryStreamArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DeliveryStreamArnSelector != nil { + in, out := &in.DeliveryStreamArnSelector, &out.DeliveryStreamArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } + if in.IAMRoleArnRef != nil { + in, out := &in.IAMRoleArnRef, &out.IAMRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMRoleArnSelector != nil { + in, out := &in.IAMRoleArnSelector, &out.IAMRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseDestinationInitParameters. +func (in *KinesisFirehoseDestinationInitParameters) DeepCopy() *KinesisFirehoseDestinationInitParameters { + if in == nil { + return nil + } + out := new(KinesisFirehoseDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisFirehoseDestinationObservation) DeepCopyInto(out *KinesisFirehoseDestinationObservation) { + *out = *in + if in.DeliveryStreamArn != nil { + in, out := &in.DeliveryStreamArn, &out.DeliveryStreamArn + *out = new(string) + **out = **in + } + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseDestinationObservation. +func (in *KinesisFirehoseDestinationObservation) DeepCopy() *KinesisFirehoseDestinationObservation { + if in == nil { + return nil + } + out := new(KinesisFirehoseDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KinesisFirehoseDestinationParameters) DeepCopyInto(out *KinesisFirehoseDestinationParameters) { + *out = *in + if in.DeliveryStreamArn != nil { + in, out := &in.DeliveryStreamArn, &out.DeliveryStreamArn + *out = new(string) + **out = **in + } + if in.DeliveryStreamArnRef != nil { + in, out := &in.DeliveryStreamArnRef, &out.DeliveryStreamArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DeliveryStreamArnSelector != nil { + in, out := &in.DeliveryStreamArnSelector, &out.DeliveryStreamArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IAMRoleArn != nil { + in, out := &in.IAMRoleArn, &out.IAMRoleArn + *out = new(string) + **out = **in + } + if in.IAMRoleArnRef != nil { + in, out := &in.IAMRoleArnRef, &out.IAMRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IAMRoleArnSelector != nil { + in, out := &in.IAMRoleArnSelector, &out.IAMRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseDestinationParameters. +func (in *KinesisFirehoseDestinationParameters) DeepCopy() *KinesisFirehoseDestinationParameters { + if in == nil { + return nil + } + out := new(KinesisFirehoseDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PinpointDestinationInitParameters) DeepCopyInto(out *PinpointDestinationInitParameters) { + *out = *in + if in.ApplicationArn != nil { + in, out := &in.ApplicationArn, &out.ApplicationArn + *out = new(string) + **out = **in + } + if in.ApplicationArnRef != nil { + in, out := &in.ApplicationArnRef, &out.ApplicationArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ApplicationArnSelector != nil { + in, out := &in.ApplicationArnSelector, &out.ApplicationArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PinpointDestinationInitParameters. +func (in *PinpointDestinationInitParameters) DeepCopy() *PinpointDestinationInitParameters { + if in == nil { + return nil + } + out := new(PinpointDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PinpointDestinationObservation) DeepCopyInto(out *PinpointDestinationObservation) { + *out = *in + if in.ApplicationArn != nil { + in, out := &in.ApplicationArn, &out.ApplicationArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PinpointDestinationObservation. +func (in *PinpointDestinationObservation) DeepCopy() *PinpointDestinationObservation { + if in == nil { + return nil + } + out := new(PinpointDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PinpointDestinationParameters) DeepCopyInto(out *PinpointDestinationParameters) { + *out = *in + if in.ApplicationArn != nil { + in, out := &in.ApplicationArn, &out.ApplicationArn + *out = new(string) + **out = **in + } + if in.ApplicationArnRef != nil { + in, out := &in.ApplicationArnRef, &out.ApplicationArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ApplicationArnSelector != nil { + in, out := &in.ApplicationArnSelector, &out.ApplicationArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PinpointDestinationParameters. +func (in *PinpointDestinationParameters) DeepCopy() *PinpointDestinationParameters { + if in == nil { + return nil + } + out := new(PinpointDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReputationOptionsInitParameters) DeepCopyInto(out *ReputationOptionsInitParameters) { + *out = *in + if in.ReputationMetricsEnabled != nil { + in, out := &in.ReputationMetricsEnabled, &out.ReputationMetricsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReputationOptionsInitParameters. +func (in *ReputationOptionsInitParameters) DeepCopy() *ReputationOptionsInitParameters { + if in == nil { + return nil + } + out := new(ReputationOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReputationOptionsObservation) DeepCopyInto(out *ReputationOptionsObservation) { + *out = *in + if in.LastFreshStart != nil { + in, out := &in.LastFreshStart, &out.LastFreshStart + *out = new(string) + **out = **in + } + if in.ReputationMetricsEnabled != nil { + in, out := &in.ReputationMetricsEnabled, &out.ReputationMetricsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReputationOptionsObservation. +func (in *ReputationOptionsObservation) DeepCopy() *ReputationOptionsObservation { + if in == nil { + return nil + } + out := new(ReputationOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReputationOptionsParameters) DeepCopyInto(out *ReputationOptionsParameters) { + *out = *in + if in.ReputationMetricsEnabled != nil { + in, out := &in.ReputationMetricsEnabled, &out.ReputationMetricsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReputationOptionsParameters. +func (in *ReputationOptionsParameters) DeepCopy() *ReputationOptionsParameters { + if in == nil { + return nil + } + out := new(ReputationOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SendingOptionsInitParameters) DeepCopyInto(out *SendingOptionsInitParameters) { + *out = *in + if in.SendingEnabled != nil { + in, out := &in.SendingEnabled, &out.SendingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SendingOptionsInitParameters. +func (in *SendingOptionsInitParameters) DeepCopy() *SendingOptionsInitParameters { + if in == nil { + return nil + } + out := new(SendingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SendingOptionsObservation) DeepCopyInto(out *SendingOptionsObservation) { + *out = *in + if in.SendingEnabled != nil { + in, out := &in.SendingEnabled, &out.SendingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SendingOptionsObservation. +func (in *SendingOptionsObservation) DeepCopy() *SendingOptionsObservation { + if in == nil { + return nil + } + out := new(SendingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SendingOptionsParameters) DeepCopyInto(out *SendingOptionsParameters) { + *out = *in + if in.SendingEnabled != nil { + in, out := &in.SendingEnabled, &out.SendingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SendingOptionsParameters. +func (in *SendingOptionsParameters) DeepCopy() *SendingOptionsParameters { + if in == nil { + return nil + } + out := new(SendingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnsDestinationInitParameters) DeepCopyInto(out *SnsDestinationInitParameters) { + *out = *in + if in.TopicArn != nil { + in, out := &in.TopicArn, &out.TopicArn + *out = new(string) + **out = **in + } + if in.TopicArnRef != nil { + in, out := &in.TopicArnRef, &out.TopicArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TopicArnSelector != nil { + in, out := &in.TopicArnSelector, &out.TopicArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnsDestinationInitParameters. +func (in *SnsDestinationInitParameters) DeepCopy() *SnsDestinationInitParameters { + if in == nil { + return nil + } + out := new(SnsDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnsDestinationObservation) DeepCopyInto(out *SnsDestinationObservation) { + *out = *in + if in.TopicArn != nil { + in, out := &in.TopicArn, &out.TopicArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnsDestinationObservation. +func (in *SnsDestinationObservation) DeepCopy() *SnsDestinationObservation { + if in == nil { + return nil + } + out := new(SnsDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnsDestinationParameters) DeepCopyInto(out *SnsDestinationParameters) { + *out = *in + if in.TopicArn != nil { + in, out := &in.TopicArn, &out.TopicArn + *out = new(string) + **out = **in + } + if in.TopicArnRef != nil { + in, out := &in.TopicArnRef, &out.TopicArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TopicArnSelector != nil { + in, out := &in.TopicArnSelector, &out.TopicArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnsDestinationParameters. +func (in *SnsDestinationParameters) DeepCopy() *SnsDestinationParameters { + if in == nil { + return nil + } + out := new(SnsDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuppressionOptionsInitParameters) DeepCopyInto(out *SuppressionOptionsInitParameters) { + *out = *in + if in.SuppressedReasons != nil { + in, out := &in.SuppressedReasons, &out.SuppressedReasons + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuppressionOptionsInitParameters. +func (in *SuppressionOptionsInitParameters) DeepCopy() *SuppressionOptionsInitParameters { + if in == nil { + return nil + } + out := new(SuppressionOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuppressionOptionsObservation) DeepCopyInto(out *SuppressionOptionsObservation) { + *out = *in + if in.SuppressedReasons != nil { + in, out := &in.SuppressedReasons, &out.SuppressedReasons + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuppressionOptionsObservation. +func (in *SuppressionOptionsObservation) DeepCopy() *SuppressionOptionsObservation { + if in == nil { + return nil + } + out := new(SuppressionOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuppressionOptionsParameters) DeepCopyInto(out *SuppressionOptionsParameters) { + *out = *in + if in.SuppressedReasons != nil { + in, out := &in.SuppressedReasons, &out.SuppressedReasons + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuppressionOptionsParameters. +func (in *SuppressionOptionsParameters) DeepCopy() *SuppressionOptionsParameters { + if in == nil { + return nil + } + out := new(SuppressionOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackingOptionsInitParameters) DeepCopyInto(out *TrackingOptionsInitParameters) { + *out = *in + if in.CustomRedirectDomain != nil { + in, out := &in.CustomRedirectDomain, &out.CustomRedirectDomain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackingOptionsInitParameters. +func (in *TrackingOptionsInitParameters) DeepCopy() *TrackingOptionsInitParameters { + if in == nil { + return nil + } + out := new(TrackingOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackingOptionsObservation) DeepCopyInto(out *TrackingOptionsObservation) { + *out = *in + if in.CustomRedirectDomain != nil { + in, out := &in.CustomRedirectDomain, &out.CustomRedirectDomain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackingOptionsObservation. +func (in *TrackingOptionsObservation) DeepCopy() *TrackingOptionsObservation { + if in == nil { + return nil + } + out := new(TrackingOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackingOptionsParameters) DeepCopyInto(out *TrackingOptionsParameters) { + *out = *in + if in.CustomRedirectDomain != nil { + in, out := &in.CustomRedirectDomain, &out.CustomRedirectDomain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackingOptionsParameters. +func (in *TrackingOptionsParameters) DeepCopy() *TrackingOptionsParameters { + if in == nil { + return nil + } + out := new(TrackingOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VdmOptionsInitParameters) DeepCopyInto(out *VdmOptionsInitParameters) { + *out = *in + if in.DashboardOptions != nil { + in, out := &in.DashboardOptions, &out.DashboardOptions + *out = new(DashboardOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.GuardianOptions != nil { + in, out := &in.GuardianOptions, &out.GuardianOptions + *out = new(GuardianOptionsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VdmOptionsInitParameters. +func (in *VdmOptionsInitParameters) DeepCopy() *VdmOptionsInitParameters { + if in == nil { + return nil + } + out := new(VdmOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VdmOptionsObservation) DeepCopyInto(out *VdmOptionsObservation) { + *out = *in + if in.DashboardOptions != nil { + in, out := &in.DashboardOptions, &out.DashboardOptions + *out = new(DashboardOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.GuardianOptions != nil { + in, out := &in.GuardianOptions, &out.GuardianOptions + *out = new(GuardianOptionsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VdmOptionsObservation. +func (in *VdmOptionsObservation) DeepCopy() *VdmOptionsObservation { + if in == nil { + return nil + } + out := new(VdmOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VdmOptionsParameters) DeepCopyInto(out *VdmOptionsParameters) { + *out = *in + if in.DashboardOptions != nil { + in, out := &in.DashboardOptions, &out.DashboardOptions + *out = new(DashboardOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.GuardianOptions != nil { + in, out := &in.GuardianOptions, &out.GuardianOptions + *out = new(GuardianOptionsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VdmOptionsParameters. +func (in *VdmOptionsParameters) DeepCopy() *VdmOptionsParameters { + if in == nil { + return nil + } + out := new(VdmOptionsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/sesv2/v1beta2/zz_generated.managed.go b/apis/sesv2/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..386f3e8612 --- /dev/null +++ b/apis/sesv2/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ConfigurationSet. +func (mg *ConfigurationSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ConfigurationSet. +func (mg *ConfigurationSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ConfigurationSet. +func (mg *ConfigurationSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ConfigurationSet. +func (mg *ConfigurationSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ConfigurationSet. +func (mg *ConfigurationSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ConfigurationSet. +func (mg *ConfigurationSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ConfigurationSet. +func (mg *ConfigurationSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ConfigurationSet. +func (mg *ConfigurationSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ConfigurationSet. +func (mg *ConfigurationSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ConfigurationSet. +func (mg *ConfigurationSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ConfigurationSet. +func (mg *ConfigurationSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ConfigurationSet. +func (mg *ConfigurationSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ConfigurationSetEventDestination. +func (mg *ConfigurationSetEventDestination) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ConfigurationSetEventDestination. +func (mg *ConfigurationSetEventDestination) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ConfigurationSetEventDestination. +func (mg *ConfigurationSetEventDestination) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ConfigurationSetEventDestination. +func (mg *ConfigurationSetEventDestination) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ConfigurationSetEventDestination. +func (mg *ConfigurationSetEventDestination) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ConfigurationSetEventDestination. +func (mg *ConfigurationSetEventDestination) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ConfigurationSetEventDestination. +func (mg *ConfigurationSetEventDestination) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ConfigurationSetEventDestination. +func (mg *ConfigurationSetEventDestination) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ConfigurationSetEventDestination. +func (mg *ConfigurationSetEventDestination) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ConfigurationSetEventDestination. +func (mg *ConfigurationSetEventDestination) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ConfigurationSetEventDestination. +func (mg *ConfigurationSetEventDestination) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ConfigurationSetEventDestination. +func (mg *ConfigurationSetEventDestination) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this EmailIdentity. +func (mg *EmailIdentity) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this EmailIdentity. +func (mg *EmailIdentity) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this EmailIdentity. +func (mg *EmailIdentity) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this EmailIdentity. +func (mg *EmailIdentity) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this EmailIdentity. +func (mg *EmailIdentity) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this EmailIdentity. +func (mg *EmailIdentity) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this EmailIdentity. +func (mg *EmailIdentity) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this EmailIdentity. +func (mg *EmailIdentity) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this EmailIdentity. +func (mg *EmailIdentity) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this EmailIdentity. +func (mg *EmailIdentity) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this EmailIdentity. +func (mg *EmailIdentity) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this EmailIdentity. +func (mg *EmailIdentity) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/sesv2/v1beta2/zz_generated.managedlist.go b/apis/sesv2/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..14e21067ac --- /dev/null +++ b/apis/sesv2/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ConfigurationSetEventDestinationList. +func (l *ConfigurationSetEventDestinationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ConfigurationSetList. +func (l *ConfigurationSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this EmailIdentityList. +func (l *EmailIdentityList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/sesv2/v1beta2/zz_generated.resolvers.go b/apis/sesv2/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..86bfc1f3d8 --- /dev/null +++ b/apis/sesv2/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,303 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *ConfigurationSetEventDestination) ResolveReferences( // ResolveReferences of this ConfigurationSetEventDestination. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("sesv2.aws.upbound.io", "v1beta2", "ConfigurationSet", "ConfigurationSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ConfigurationSetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ConfigurationSetNameRef, + Selector: mg.Spec.ForProvider.ConfigurationSetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ConfigurationSetName") + } + mg.Spec.ForProvider.ConfigurationSetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ConfigurationSetNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.EventDestination != nil { + if mg.Spec.ForProvider.EventDestination.KinesisFirehoseDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EventDestination.KinesisFirehoseDestination.DeliveryStreamArn), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.ForProvider.EventDestination.KinesisFirehoseDestination.DeliveryStreamArnRef, + Selector: mg.Spec.ForProvider.EventDestination.KinesisFirehoseDestination.DeliveryStreamArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EventDestination.KinesisFirehoseDestination.DeliveryStreamArn") + } + mg.Spec.ForProvider.EventDestination.KinesisFirehoseDestination.DeliveryStreamArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EventDestination.KinesisFirehoseDestination.DeliveryStreamArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.EventDestination != nil { + if mg.Spec.ForProvider.EventDestination.KinesisFirehoseDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EventDestination.KinesisFirehoseDestination.IAMRoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.EventDestination.KinesisFirehoseDestination.IAMRoleArnRef, + Selector: mg.Spec.ForProvider.EventDestination.KinesisFirehoseDestination.IAMRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EventDestination.KinesisFirehoseDestination.IAMRoleArn") + } + mg.Spec.ForProvider.EventDestination.KinesisFirehoseDestination.IAMRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EventDestination.KinesisFirehoseDestination.IAMRoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.EventDestination != nil { + if mg.Spec.ForProvider.EventDestination.PinpointDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("pinpoint.aws.upbound.io", "v1beta2", "App", "AppList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EventDestination.PinpointDestination.ApplicationArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.EventDestination.PinpointDestination.ApplicationArnRef, + Selector: mg.Spec.ForProvider.EventDestination.PinpointDestination.ApplicationArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EventDestination.PinpointDestination.ApplicationArn") + } + mg.Spec.ForProvider.EventDestination.PinpointDestination.ApplicationArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EventDestination.PinpointDestination.ApplicationArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.EventDestination != nil { + if mg.Spec.ForProvider.EventDestination.SnsDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EventDestination.SnsDestination.TopicArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.EventDestination.SnsDestination.TopicArnRef, + Selector: mg.Spec.ForProvider.EventDestination.SnsDestination.TopicArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EventDestination.SnsDestination.TopicArn") + } + mg.Spec.ForProvider.EventDestination.SnsDestination.TopicArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EventDestination.SnsDestination.TopicArnRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("sesv2.aws.upbound.io", "v1beta2", "ConfigurationSet", "ConfigurationSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ConfigurationSetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ConfigurationSetNameRef, + Selector: mg.Spec.InitProvider.ConfigurationSetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ConfigurationSetName") + } + mg.Spec.InitProvider.ConfigurationSetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ConfigurationSetNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.EventDestination != nil { + if mg.Spec.InitProvider.EventDestination.KinesisFirehoseDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EventDestination.KinesisFirehoseDestination.DeliveryStreamArn), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.InitProvider.EventDestination.KinesisFirehoseDestination.DeliveryStreamArnRef, + Selector: mg.Spec.InitProvider.EventDestination.KinesisFirehoseDestination.DeliveryStreamArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EventDestination.KinesisFirehoseDestination.DeliveryStreamArn") + } + mg.Spec.InitProvider.EventDestination.KinesisFirehoseDestination.DeliveryStreamArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EventDestination.KinesisFirehoseDestination.DeliveryStreamArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.EventDestination != nil { + if mg.Spec.InitProvider.EventDestination.KinesisFirehoseDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EventDestination.KinesisFirehoseDestination.IAMRoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.EventDestination.KinesisFirehoseDestination.IAMRoleArnRef, + Selector: mg.Spec.InitProvider.EventDestination.KinesisFirehoseDestination.IAMRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EventDestination.KinesisFirehoseDestination.IAMRoleArn") + } + mg.Spec.InitProvider.EventDestination.KinesisFirehoseDestination.IAMRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EventDestination.KinesisFirehoseDestination.IAMRoleArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.EventDestination != nil { + if mg.Spec.InitProvider.EventDestination.PinpointDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("pinpoint.aws.upbound.io", "v1beta2", "App", "AppList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EventDestination.PinpointDestination.ApplicationArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.EventDestination.PinpointDestination.ApplicationArnRef, + Selector: mg.Spec.InitProvider.EventDestination.PinpointDestination.ApplicationArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EventDestination.PinpointDestination.ApplicationArn") + } + mg.Spec.InitProvider.EventDestination.PinpointDestination.ApplicationArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EventDestination.PinpointDestination.ApplicationArnRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.EventDestination != nil { + if mg.Spec.InitProvider.EventDestination.SnsDestination != nil { + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EventDestination.SnsDestination.TopicArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.EventDestination.SnsDestination.TopicArnRef, + Selector: mg.Spec.InitProvider.EventDestination.SnsDestination.TopicArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EventDestination.SnsDestination.TopicArn") + } + mg.Spec.InitProvider.EventDestination.SnsDestination.TopicArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EventDestination.SnsDestination.TopicArnRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this EmailIdentity. +func (mg *EmailIdentity) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("sesv2.aws.upbound.io", "v1beta2", "ConfigurationSet", "ConfigurationSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ConfigurationSetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ConfigurationSetNameRef, + Selector: mg.Spec.ForProvider.ConfigurationSetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ConfigurationSetName") + } + mg.Spec.ForProvider.ConfigurationSetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ConfigurationSetNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("sesv2.aws.upbound.io", "v1beta2", "ConfigurationSet", "ConfigurationSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ConfigurationSetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ConfigurationSetNameRef, + Selector: mg.Spec.InitProvider.ConfigurationSetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ConfigurationSetName") + } + mg.Spec.InitProvider.ConfigurationSetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ConfigurationSetNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/sesv2/v1beta2/zz_groupversion_info.go b/apis/sesv2/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..27211cdc97 --- /dev/null +++ b/apis/sesv2/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=sesv2.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "sesv2.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/sfn/v1beta1/zz_generated.conversion_hubs.go b/apis/sfn/v1beta1/zz_generated.conversion_hubs.go index 410b568fd6..fdf7d927c3 100755 --- a/apis/sfn/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/sfn/v1beta1/zz_generated.conversion_hubs.go @@ -8,6 +8,3 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *Activity) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *StateMachine) Hub() {} diff --git a/apis/sfn/v1beta1/zz_generated.conversion_spokes.go b/apis/sfn/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..051116978d --- /dev/null +++ b/apis/sfn/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this StateMachine to the hub type. +func (tr *StateMachine) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the StateMachine type. +func (tr *StateMachine) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/sfn/v1beta2/zz_generated.conversion_hubs.go b/apis/sfn/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..a9aa251f5a --- /dev/null +++ b/apis/sfn/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *StateMachine) Hub() {} diff --git a/apis/sfn/v1beta2/zz_generated.deepcopy.go b/apis/sfn/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..22d4f974e7 --- /dev/null +++ b/apis/sfn/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,522 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationInitParameters) DeepCopyInto(out *LoggingConfigurationInitParameters) { + *out = *in + if in.IncludeExecutionData != nil { + in, out := &in.IncludeExecutionData, &out.IncludeExecutionData + *out = new(bool) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationInitParameters. +func (in *LoggingConfigurationInitParameters) DeepCopy() *LoggingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(LoggingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationObservation) DeepCopyInto(out *LoggingConfigurationObservation) { + *out = *in + if in.IncludeExecutionData != nil { + in, out := &in.IncludeExecutionData, &out.IncludeExecutionData + *out = new(bool) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationObservation. +func (in *LoggingConfigurationObservation) DeepCopy() *LoggingConfigurationObservation { + if in == nil { + return nil + } + out := new(LoggingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationParameters) DeepCopyInto(out *LoggingConfigurationParameters) { + *out = *in + if in.IncludeExecutionData != nil { + in, out := &in.IncludeExecutionData, &out.IncludeExecutionData + *out = new(bool) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationParameters. +func (in *LoggingConfigurationParameters) DeepCopy() *LoggingConfigurationParameters { + if in == nil { + return nil + } + out := new(LoggingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StateMachine) DeepCopyInto(out *StateMachine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StateMachine. +func (in *StateMachine) DeepCopy() *StateMachine { + if in == nil { + return nil + } + out := new(StateMachine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StateMachine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StateMachineInitParameters) DeepCopyInto(out *StateMachineInitParameters) { + *out = *in + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = new(string) + **out = **in + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = new(LoggingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Publish != nil { + in, out := &in.Publish, &out.Publish + *out = new(bool) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TracingConfiguration != nil { + in, out := &in.TracingConfiguration, &out.TracingConfiguration + *out = new(TracingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StateMachineInitParameters. +func (in *StateMachineInitParameters) DeepCopy() *StateMachineInitParameters { + if in == nil { + return nil + } + out := new(StateMachineInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StateMachineList) DeepCopyInto(out *StateMachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StateMachine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StateMachineList. +func (in *StateMachineList) DeepCopy() *StateMachineList { + if in == nil { + return nil + } + out := new(StateMachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StateMachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StateMachineObservation) DeepCopyInto(out *StateMachineObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CreationDate != nil { + in, out := &in.CreationDate, &out.CreationDate + *out = new(string) + **out = **in + } + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = new(LoggingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Publish != nil { + in, out := &in.Publish, &out.Publish + *out = new(bool) + **out = **in + } + if in.RevisionID != nil { + in, out := &in.RevisionID, &out.RevisionID + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.StateMachineVersionArn != nil { + in, out := &in.StateMachineVersionArn, &out.StateMachineVersionArn + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TracingConfiguration != nil { + in, out := &in.TracingConfiguration, &out.TracingConfiguration + *out = new(TracingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.VersionDescription != nil { + in, out := &in.VersionDescription, &out.VersionDescription + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StateMachineObservation. +func (in *StateMachineObservation) DeepCopy() *StateMachineObservation { + if in == nil { + return nil + } + out := new(StateMachineObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StateMachineParameters) DeepCopyInto(out *StateMachineParameters) { + *out = *in + if in.Definition != nil { + in, out := &in.Definition, &out.Definition + *out = new(string) + **out = **in + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = new(LoggingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Publish != nil { + in, out := &in.Publish, &out.Publish + *out = new(bool) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RoleArn != nil { + in, out := &in.RoleArn, &out.RoleArn + *out = new(string) + **out = **in + } + if in.RoleArnRef != nil { + in, out := &in.RoleArnRef, &out.RoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleArnSelector != nil { + in, out := &in.RoleArnSelector, &out.RoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TracingConfiguration != nil { + in, out := &in.TracingConfiguration, &out.TracingConfiguration + *out = new(TracingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StateMachineParameters. +func (in *StateMachineParameters) DeepCopy() *StateMachineParameters { + if in == nil { + return nil + } + out := new(StateMachineParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StateMachineSpec) DeepCopyInto(out *StateMachineSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StateMachineSpec. +func (in *StateMachineSpec) DeepCopy() *StateMachineSpec { + if in == nil { + return nil + } + out := new(StateMachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StateMachineStatus) DeepCopyInto(out *StateMachineStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StateMachineStatus. +func (in *StateMachineStatus) DeepCopy() *StateMachineStatus { + if in == nil { + return nil + } + out := new(StateMachineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingConfigurationInitParameters) DeepCopyInto(out *TracingConfigurationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfigurationInitParameters. +func (in *TracingConfigurationInitParameters) DeepCopy() *TracingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(TracingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingConfigurationObservation) DeepCopyInto(out *TracingConfigurationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfigurationObservation. +func (in *TracingConfigurationObservation) DeepCopy() *TracingConfigurationObservation { + if in == nil { + return nil + } + out := new(TracingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingConfigurationParameters) DeepCopyInto(out *TracingConfigurationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfigurationParameters. +func (in *TracingConfigurationParameters) DeepCopy() *TracingConfigurationParameters { + if in == nil { + return nil + } + out := new(TracingConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/sfn/v1beta2/zz_generated.managed.go b/apis/sfn/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..baaa3676f0 --- /dev/null +++ b/apis/sfn/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this StateMachine. +func (mg *StateMachine) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this StateMachine. +func (mg *StateMachine) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this StateMachine. +func (mg *StateMachine) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this StateMachine. +func (mg *StateMachine) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this StateMachine. +func (mg *StateMachine) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this StateMachine. +func (mg *StateMachine) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this StateMachine. +func (mg *StateMachine) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this StateMachine. +func (mg *StateMachine) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this StateMachine. +func (mg *StateMachine) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this StateMachine. +func (mg *StateMachine) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this StateMachine. +func (mg *StateMachine) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this StateMachine. +func (mg *StateMachine) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/sfn/v1beta2/zz_generated.managedlist.go b/apis/sfn/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..12eda3dbd4 --- /dev/null +++ b/apis/sfn/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this StateMachineList. +func (l *StateMachineList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/sfn/v1beta2/zz_generated.resolvers.go b/apis/sfn/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..433366bb88 --- /dev/null +++ b/apis/sfn/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *StateMachine) ResolveReferences( // ResolveReferences of this StateMachine. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleArnRef, + Selector: mg.Spec.ForProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RoleArn") + } + mg.Spec.ForProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleArnRef, + Selector: mg.Spec.InitProvider.RoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RoleArn") + } + mg.Spec.InitProvider.RoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleArnRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/sfn/v1beta2/zz_groupversion_info.go b/apis/sfn/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..68880f3d19 --- /dev/null +++ b/apis/sfn/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=sfn.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "sfn.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/sfn/v1beta2/zz_statemachine_terraformed.go b/apis/sfn/v1beta2/zz_statemachine_terraformed.go new file mode 100755 index 0000000000..a49324e645 --- /dev/null +++ b/apis/sfn/v1beta2/zz_statemachine_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this StateMachine +func (mg *StateMachine) GetTerraformResourceType() string { + return "aws_sfn_state_machine" +} + +// GetConnectionDetailsMapping for this StateMachine +func (tr *StateMachine) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this StateMachine +func (tr *StateMachine) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this StateMachine +func (tr *StateMachine) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this StateMachine +func (tr *StateMachine) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this StateMachine +func (tr *StateMachine) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this StateMachine +func (tr *StateMachine) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this StateMachine +func (tr *StateMachine) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this StateMachine +func (tr *StateMachine) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this StateMachine using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *StateMachine) LateInitialize(attrs []byte) (bool, error) { + params := &StateMachineParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *StateMachine) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sfn/v1beta2/zz_statemachine_types.go b/apis/sfn/v1beta2/zz_statemachine_types.go new file mode 100755 index 0000000000..a064508798 --- /dev/null +++ b/apis/sfn/v1beta2/zz_statemachine_types.go @@ -0,0 +1,266 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type LoggingConfigurationInitParameters struct { + + // Determines whether execution data is included in your log. When set to false, data is excluded. + IncludeExecutionData *bool `json:"includeExecutionData,omitempty" tf:"include_execution_data,omitempty"` + + // Defines which category of execution history events are logged. Valid values: ALL, ERROR, FATAL, OFF + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // Amazon Resource Name (ARN) of a CloudWatch log group. Make sure the State Machine has the correct IAM policies for logging. The ARN must end with :* + LogDestination *string `json:"logDestination,omitempty" tf:"log_destination,omitempty"` +} + +type LoggingConfigurationObservation struct { + + // Determines whether execution data is included in your log. When set to false, data is excluded. + IncludeExecutionData *bool `json:"includeExecutionData,omitempty" tf:"include_execution_data,omitempty"` + + // Defines which category of execution history events are logged. Valid values: ALL, ERROR, FATAL, OFF + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // Amazon Resource Name (ARN) of a CloudWatch log group. Make sure the State Machine has the correct IAM policies for logging. The ARN must end with :* + LogDestination *string `json:"logDestination,omitempty" tf:"log_destination,omitempty"` +} + +type LoggingConfigurationParameters struct { + + // Determines whether execution data is included in your log. When set to false, data is excluded. + // +kubebuilder:validation:Optional + IncludeExecutionData *bool `json:"includeExecutionData,omitempty" tf:"include_execution_data,omitempty"` + + // Defines which category of execution history events are logged. Valid values: ALL, ERROR, FATAL, OFF + // +kubebuilder:validation:Optional + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // Amazon Resource Name (ARN) of a CloudWatch log group. Make sure the State Machine has the correct IAM policies for logging. The ARN must end with :* + // +kubebuilder:validation:Optional + LogDestination *string `json:"logDestination,omitempty" tf:"log_destination,omitempty"` +} + +type StateMachineInitParameters struct { + + // The Amazon States Language definition of the state machine. + Definition *string `json:"definition,omitempty" tf:"definition,omitempty"` + + // Defines what execution history events are logged and where they are logged. The logging_configuration parameter is only valid when type is set to EXPRESS. Defaults to OFF. For more information see Logging Express Workflows and Log Levels in the AWS Step Functions User Guide. + LoggingConfiguration *LoggingConfigurationInitParameters `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` + + // Set to true to publish a version of the state machine during creation. Default: false. + Publish *bool `json:"publish,omitempty" tf:"publish,omitempty"` + + // The Amazon Resource Name (ARN) of the IAM role to use for this state machine. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Selects whether AWS X-Ray tracing is enabled. + TracingConfiguration *TracingConfigurationInitParameters `json:"tracingConfiguration,omitempty" tf:"tracing_configuration,omitempty"` + + // Determines whether a Standard or Express state machine is created. The default is STANDARD. You cannot update the type of a state machine once it has been created. Valid values: STANDARD, EXPRESS. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StateMachineObservation struct { + + // The ARN of the state machine. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The date the state machine was created. + CreationDate *string `json:"creationDate,omitempty" tf:"creation_date,omitempty"` + + // The Amazon States Language definition of the state machine. + Definition *string `json:"definition,omitempty" tf:"definition,omitempty"` + + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ARN of the state machine. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Defines what execution history events are logged and where they are logged. The logging_configuration parameter is only valid when type is set to EXPRESS. Defaults to OFF. For more information see Logging Express Workflows and Log Levels in the AWS Step Functions User Guide. + LoggingConfiguration *LoggingConfigurationObservation `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` + + // Set to true to publish a version of the state machine during creation. Default: false. + Publish *bool `json:"publish,omitempty" tf:"publish,omitempty"` + + // The ARN of the state machine. + RevisionID *string `json:"revisionId,omitempty" tf:"revision_id,omitempty"` + + // The Amazon Resource Name (ARN) of the IAM role to use for this state machine. + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // The ARN of the state machine version. + StateMachineVersionArn *string `json:"stateMachineVersionArn,omitempty" tf:"state_machine_version_arn,omitempty"` + + // The current status of the state machine. Either ACTIVE or DELETING. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Selects whether AWS X-Ray tracing is enabled. + TracingConfiguration *TracingConfigurationObservation `json:"tracingConfiguration,omitempty" tf:"tracing_configuration,omitempty"` + + // Determines whether a Standard or Express state machine is created. The default is STANDARD. You cannot update the type of a state machine once it has been created. Valid values: STANDARD, EXPRESS. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + VersionDescription *string `json:"versionDescription,omitempty" tf:"version_description,omitempty"` +} + +type StateMachineParameters struct { + + // The Amazon States Language definition of the state machine. + // +kubebuilder:validation:Optional + Definition *string `json:"definition,omitempty" tf:"definition,omitempty"` + + // Defines what execution history events are logged and where they are logged. The logging_configuration parameter is only valid when type is set to EXPRESS. Defaults to OFF. For more information see Logging Express Workflows and Log Levels in the AWS Step Functions User Guide. + // +kubebuilder:validation:Optional + LoggingConfiguration *LoggingConfigurationParameters `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` + + // Set to true to publish a version of the state machine during creation. Default: false. + // +kubebuilder:validation:Optional + Publish *bool `json:"publish,omitempty" tf:"publish,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The Amazon Resource Name (ARN) of the IAM role to use for this state machine. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + RoleArn *string `json:"roleArn,omitempty" tf:"role_arn,omitempty"` + + // Reference to a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnRef *v1.Reference `json:"roleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate roleArn. + // +kubebuilder:validation:Optional + RoleArnSelector *v1.Selector `json:"roleArnSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Selects whether AWS X-Ray tracing is enabled. + // +kubebuilder:validation:Optional + TracingConfiguration *TracingConfigurationParameters `json:"tracingConfiguration,omitempty" tf:"tracing_configuration,omitempty"` + + // Determines whether a Standard or Express state machine is created. The default is STANDARD. You cannot update the type of a state machine once it has been created. Valid values: STANDARD, EXPRESS. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type TracingConfigurationInitParameters struct { + + // When set to true, AWS X-Ray tracing is enabled. Make sure the State Machine has the correct IAM policies for logging. See the AWS Step Functions Developer Guide for details. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type TracingConfigurationObservation struct { + + // When set to true, AWS X-Ray tracing is enabled. Make sure the State Machine has the correct IAM policies for logging. See the AWS Step Functions Developer Guide for details. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type TracingConfigurationParameters struct { + + // When set to true, AWS X-Ray tracing is enabled. Make sure the State Machine has the correct IAM policies for logging. See the AWS Step Functions Developer Guide for details. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +// StateMachineSpec defines the desired state of StateMachine +type StateMachineSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StateMachineParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StateMachineInitParameters `json:"initProvider,omitempty"` +} + +// StateMachineStatus defines the observed state of StateMachine. +type StateMachineStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StateMachineObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// StateMachine is the Schema for the StateMachines API. Provides a Step Function State Machine resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type StateMachine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.definition) || (has(self.initProvider) && has(self.initProvider.definition))",message="spec.forProvider.definition is a required parameter" + Spec StateMachineSpec `json:"spec"` + Status StateMachineStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StateMachineList contains a list of StateMachines +type StateMachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StateMachine `json:"items"` +} + +// Repository type metadata. +var ( + StateMachine_Kind = "StateMachine" + StateMachine_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: StateMachine_Kind}.String() + StateMachine_KindAPIVersion = StateMachine_Kind + "." + CRDGroupVersion.String() + StateMachine_GroupVersionKind = CRDGroupVersion.WithKind(StateMachine_Kind) +) + +func init() { + SchemeBuilder.Register(&StateMachine{}, &StateMachineList{}) +} diff --git a/apis/signer/v1beta1/zz_generated.conversion_hubs.go b/apis/signer/v1beta1/zz_generated.conversion_hubs.go index 7304b93c60..4bb7c824e5 100755 --- a/apis/signer/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/signer/v1beta1/zz_generated.conversion_hubs.go @@ -6,11 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *SigningJob) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SigningProfile) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SigningProfilePermission) Hub() {} diff --git a/apis/signer/v1beta1/zz_generated.conversion_spokes.go b/apis/signer/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..3b2befa8cc --- /dev/null +++ b/apis/signer/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this SigningJob to the hub type. +func (tr *SigningJob) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SigningJob type. +func (tr *SigningJob) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SigningProfile to the hub type. +func (tr *SigningProfile) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SigningProfile type. +func (tr *SigningProfile) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/signer/v1beta1/zz_generated.resolvers.go b/apis/signer/v1beta1/zz_generated.resolvers.go index d941c6c9aa..39c3293242 100644 --- a/apis/signer/v1beta1/zz_generated.resolvers.go +++ b/apis/signer/v1beta1/zz_generated.resolvers.go @@ -9,9 +9,10 @@ package v1beta1 import ( "context" reference "github.com/crossplane/crossplane-runtime/pkg/reference" - xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" resource "github.com/crossplane/upjet/pkg/resource" errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" apisresolver "github.com/upbound/provider-aws/internal/apis" client "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -75,7 +76,7 @@ func (mg *SigningProfilePermission) ResolveReferences(ctx context.Context, c cli var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("signer.aws.upbound.io", "v1beta1", "SigningProfile", "SigningProfileList") + m, l, err = apisresolver.GetManagedResource("signer.aws.upbound.io", "v1beta2", "SigningProfile", "SigningProfileList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -94,7 +95,7 @@ func (mg *SigningProfilePermission) ResolveReferences(ctx context.Context, c cli mg.Spec.ForProvider.ProfileName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ProfileNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("signer.aws.upbound.io", "v1beta1", "SigningProfile", "SigningProfileList") + m, l, err = apisresolver.GetManagedResource("signer.aws.upbound.io", "v1beta2", "SigningProfile", "SigningProfileList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -113,7 +114,7 @@ func (mg *SigningProfilePermission) ResolveReferences(ctx context.Context, c cli mg.Spec.ForProvider.ProfileVersion = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ProfileVersionRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("signer.aws.upbound.io", "v1beta1", "SigningProfile", "SigningProfileList") + m, l, err = apisresolver.GetManagedResource("signer.aws.upbound.io", "v1beta2", "SigningProfile", "SigningProfileList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/signer/v1beta1/zz_signingprofilepermission_types.go b/apis/signer/v1beta1/zz_signingprofilepermission_types.go index 6a04740d39..f8bf4962ac 100755 --- a/apis/signer/v1beta1/zz_signingprofilepermission_types.go +++ b/apis/signer/v1beta1/zz_signingprofilepermission_types.go @@ -22,7 +22,7 @@ type SigningProfilePermissionInitParameters struct { Principal *string `json:"principal,omitempty" tf:"principal,omitempty"` // The signing profile version that a permission applies to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/signer/v1beta1.SigningProfile + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/signer/v1beta2.SigningProfile // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("version",true) ProfileVersion *string `json:"profileVersion,omitempty" tf:"profile_version,omitempty"` @@ -72,7 +72,7 @@ type SigningProfilePermissionParameters struct { Principal *string `json:"principal,omitempty" tf:"principal,omitempty"` // Name of the signing profile to add the cross-account permissions. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/signer/v1beta1.SigningProfile + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/signer/v1beta2.SigningProfile // +kubebuilder:validation:Optional ProfileName *string `json:"profileName,omitempty" tf:"profile_name,omitempty"` @@ -85,7 +85,7 @@ type SigningProfilePermissionParameters struct { ProfileNameSelector *v1.Selector `json:"profileNameSelector,omitempty" tf:"-"` // The signing profile version that a permission applies to. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/signer/v1beta1.SigningProfile + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/signer/v1beta2.SigningProfile // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("version",true) // +kubebuilder:validation:Optional ProfileVersion *string `json:"profileVersion,omitempty" tf:"profile_version,omitempty"` diff --git a/apis/signer/v1beta2/zz_generated.conversion_hubs.go b/apis/signer/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..2ee0f37107 --- /dev/null +++ b/apis/signer/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *SigningJob) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SigningProfile) Hub() {} diff --git a/apis/signer/v1beta2/zz_generated.deepcopy.go b/apis/signer/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..90affcd97b --- /dev/null +++ b/apis/signer/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1254 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationInitParameters) DeepCopyInto(out *DestinationInitParameters) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3InitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationInitParameters. +func (in *DestinationInitParameters) DeepCopy() *DestinationInitParameters { + if in == nil { + return nil + } + out := new(DestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationObservation) DeepCopyInto(out *DestinationObservation) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Observation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationObservation. +func (in *DestinationObservation) DeepCopy() *DestinationObservation { + if in == nil { + return nil + } + out := new(DestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationParameters) DeepCopyInto(out *DestinationParameters) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Parameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationParameters. +func (in *DestinationParameters) DeepCopy() *DestinationParameters { + if in == nil { + return nil + } + out := new(DestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RevocationRecordInitParameters) DeepCopyInto(out *RevocationRecordInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RevocationRecordInitParameters. +func (in *RevocationRecordInitParameters) DeepCopy() *RevocationRecordInitParameters { + if in == nil { + return nil + } + out := new(RevocationRecordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RevocationRecordObservation) DeepCopyInto(out *RevocationRecordObservation) { + *out = *in + if in.Reason != nil { + in, out := &in.Reason, &out.Reason + *out = new(string) + **out = **in + } + if in.RevokedAt != nil { + in, out := &in.RevokedAt, &out.RevokedAt + *out = new(string) + **out = **in + } + if in.RevokedBy != nil { + in, out := &in.RevokedBy, &out.RevokedBy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RevocationRecordObservation. +func (in *RevocationRecordObservation) DeepCopy() *RevocationRecordObservation { + if in == nil { + return nil + } + out := new(RevocationRecordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RevocationRecordParameters) DeepCopyInto(out *RevocationRecordParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RevocationRecordParameters. +func (in *RevocationRecordParameters) DeepCopy() *RevocationRecordParameters { + if in == nil { + return nil + } + out := new(RevocationRecordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InitParameters) DeepCopyInto(out *S3InitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InitParameters. +func (in *S3InitParameters) DeepCopy() *S3InitParameters { + if in == nil { + return nil + } + out := new(S3InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Observation) DeepCopyInto(out *S3Observation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Observation. +func (in *S3Observation) DeepCopy() *S3Observation { + if in == nil { + return nil + } + out := new(S3Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Parameters) DeepCopyInto(out *S3Parameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Parameters. +func (in *S3Parameters) DeepCopy() *S3Parameters { + if in == nil { + return nil + } + out := new(S3Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureValidityPeriodInitParameters) DeepCopyInto(out *SignatureValidityPeriodInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureValidityPeriodInitParameters. +func (in *SignatureValidityPeriodInitParameters) DeepCopy() *SignatureValidityPeriodInitParameters { + if in == nil { + return nil + } + out := new(SignatureValidityPeriodInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureValidityPeriodObservation) DeepCopyInto(out *SignatureValidityPeriodObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureValidityPeriodObservation. +func (in *SignatureValidityPeriodObservation) DeepCopy() *SignatureValidityPeriodObservation { + if in == nil { + return nil + } + out := new(SignatureValidityPeriodObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureValidityPeriodParameters) DeepCopyInto(out *SignatureValidityPeriodParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureValidityPeriodParameters. +func (in *SignatureValidityPeriodParameters) DeepCopy() *SignatureValidityPeriodParameters { + if in == nil { + return nil + } + out := new(SignatureValidityPeriodParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignedObjectInitParameters) DeepCopyInto(out *SignedObjectInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignedObjectInitParameters. +func (in *SignedObjectInitParameters) DeepCopy() *SignedObjectInitParameters { + if in == nil { + return nil + } + out := new(SignedObjectInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignedObjectObservation) DeepCopyInto(out *SignedObjectObservation) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = make([]SignedObjectS3Observation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignedObjectObservation. +func (in *SignedObjectObservation) DeepCopy() *SignedObjectObservation { + if in == nil { + return nil + } + out := new(SignedObjectObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignedObjectParameters) DeepCopyInto(out *SignedObjectParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignedObjectParameters. +func (in *SignedObjectParameters) DeepCopy() *SignedObjectParameters { + if in == nil { + return nil + } + out := new(SignedObjectParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignedObjectS3InitParameters) DeepCopyInto(out *SignedObjectS3InitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignedObjectS3InitParameters. +func (in *SignedObjectS3InitParameters) DeepCopy() *SignedObjectS3InitParameters { + if in == nil { + return nil + } + out := new(SignedObjectS3InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignedObjectS3Observation) DeepCopyInto(out *SignedObjectS3Observation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignedObjectS3Observation. +func (in *SignedObjectS3Observation) DeepCopy() *SignedObjectS3Observation { + if in == nil { + return nil + } + out := new(SignedObjectS3Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignedObjectS3Parameters) DeepCopyInto(out *SignedObjectS3Parameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignedObjectS3Parameters. +func (in *SignedObjectS3Parameters) DeepCopy() *SignedObjectS3Parameters { + if in == nil { + return nil + } + out := new(SignedObjectS3Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningJob) DeepCopyInto(out *SigningJob) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningJob. +func (in *SigningJob) DeepCopy() *SigningJob { + if in == nil { + return nil + } + out := new(SigningJob) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SigningJob) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningJobInitParameters) DeepCopyInto(out *SigningJobInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(DestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IgnoreSigningJobFailure != nil { + in, out := &in.IgnoreSigningJobFailure, &out.IgnoreSigningJobFailure + *out = new(bool) + **out = **in + } + if in.ProfileName != nil { + in, out := &in.ProfileName, &out.ProfileName + *out = new(string) + **out = **in + } + if in.ProfileNameRef != nil { + in, out := &in.ProfileNameRef, &out.ProfileNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ProfileNameSelector != nil { + in, out := &in.ProfileNameSelector, &out.ProfileNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(SourceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningJobInitParameters. +func (in *SigningJobInitParameters) DeepCopy() *SigningJobInitParameters { + if in == nil { + return nil + } + out := new(SigningJobInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningJobList) DeepCopyInto(out *SigningJobList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SigningJob, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningJobList. +func (in *SigningJobList) DeepCopy() *SigningJobList { + if in == nil { + return nil + } + out := new(SigningJobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SigningJobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningJobObservation) DeepCopyInto(out *SigningJobObservation) { + *out = *in + if in.CompletedAt != nil { + in, out := &in.CompletedAt, &out.CompletedAt + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(DestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IgnoreSigningJobFailure != nil { + in, out := &in.IgnoreSigningJobFailure, &out.IgnoreSigningJobFailure + *out = new(bool) + **out = **in + } + if in.JobID != nil { + in, out := &in.JobID, &out.JobID + *out = new(string) + **out = **in + } + if in.JobInvoker != nil { + in, out := &in.JobInvoker, &out.JobInvoker + *out = new(string) + **out = **in + } + if in.JobOwner != nil { + in, out := &in.JobOwner, &out.JobOwner + *out = new(string) + **out = **in + } + if in.PlatformDisplayName != nil { + in, out := &in.PlatformDisplayName, &out.PlatformDisplayName + *out = new(string) + **out = **in + } + if in.PlatformID != nil { + in, out := &in.PlatformID, &out.PlatformID + *out = new(string) + **out = **in + } + if in.ProfileName != nil { + in, out := &in.ProfileName, &out.ProfileName + *out = new(string) + **out = **in + } + if in.ProfileVersion != nil { + in, out := &in.ProfileVersion, &out.ProfileVersion + *out = new(string) + **out = **in + } + if in.RequestedBy != nil { + in, out := &in.RequestedBy, &out.RequestedBy + *out = new(string) + **out = **in + } + if in.RevocationRecord != nil { + in, out := &in.RevocationRecord, &out.RevocationRecord + *out = make([]RevocationRecordObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SignatureExpiresAt != nil { + in, out := &in.SignatureExpiresAt, &out.SignatureExpiresAt + *out = new(string) + **out = **in + } + if in.SignedObject != nil { + in, out := &in.SignedObject, &out.SignedObject + *out = make([]SignedObjectObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(SourceObservation) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.StatusReason != nil { + in, out := &in.StatusReason, &out.StatusReason + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningJobObservation. +func (in *SigningJobObservation) DeepCopy() *SigningJobObservation { + if in == nil { + return nil + } + out := new(SigningJobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningJobParameters) DeepCopyInto(out *SigningJobParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(DestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.IgnoreSigningJobFailure != nil { + in, out := &in.IgnoreSigningJobFailure, &out.IgnoreSigningJobFailure + *out = new(bool) + **out = **in + } + if in.ProfileName != nil { + in, out := &in.ProfileName, &out.ProfileName + *out = new(string) + **out = **in + } + if in.ProfileNameRef != nil { + in, out := &in.ProfileNameRef, &out.ProfileNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ProfileNameSelector != nil { + in, out := &in.ProfileNameSelector, &out.ProfileNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(SourceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningJobParameters. +func (in *SigningJobParameters) DeepCopy() *SigningJobParameters { + if in == nil { + return nil + } + out := new(SigningJobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningJobSpec) DeepCopyInto(out *SigningJobSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningJobSpec. +func (in *SigningJobSpec) DeepCopy() *SigningJobSpec { + if in == nil { + return nil + } + out := new(SigningJobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningJobStatus) DeepCopyInto(out *SigningJobStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningJobStatus. +func (in *SigningJobStatus) DeepCopy() *SigningJobStatus { + if in == nil { + return nil + } + out := new(SigningJobStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningMaterialInitParameters) DeepCopyInto(out *SigningMaterialInitParameters) { + *out = *in + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningMaterialInitParameters. +func (in *SigningMaterialInitParameters) DeepCopy() *SigningMaterialInitParameters { + if in == nil { + return nil + } + out := new(SigningMaterialInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningMaterialObservation) DeepCopyInto(out *SigningMaterialObservation) { + *out = *in + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningMaterialObservation. +func (in *SigningMaterialObservation) DeepCopy() *SigningMaterialObservation { + if in == nil { + return nil + } + out := new(SigningMaterialObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningMaterialParameters) DeepCopyInto(out *SigningMaterialParameters) { + *out = *in + if in.CertificateArn != nil { + in, out := &in.CertificateArn, &out.CertificateArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningMaterialParameters. +func (in *SigningMaterialParameters) DeepCopy() *SigningMaterialParameters { + if in == nil { + return nil + } + out := new(SigningMaterialParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningProfile) DeepCopyInto(out *SigningProfile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningProfile. +func (in *SigningProfile) DeepCopy() *SigningProfile { + if in == nil { + return nil + } + out := new(SigningProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SigningProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningProfileInitParameters) DeepCopyInto(out *SigningProfileInitParameters) { + *out = *in + if in.PlatformID != nil { + in, out := &in.PlatformID, &out.PlatformID + *out = new(string) + **out = **in + } + if in.SignatureValidityPeriod != nil { + in, out := &in.SignatureValidityPeriod, &out.SignatureValidityPeriod + *out = new(SignatureValidityPeriodInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SigningMaterial != nil { + in, out := &in.SigningMaterial, &out.SigningMaterial + *out = new(SigningMaterialInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningProfileInitParameters. +func (in *SigningProfileInitParameters) DeepCopy() *SigningProfileInitParameters { + if in == nil { + return nil + } + out := new(SigningProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningProfileList) DeepCopyInto(out *SigningProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SigningProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningProfileList. +func (in *SigningProfileList) DeepCopy() *SigningProfileList { + if in == nil { + return nil + } + out := new(SigningProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SigningProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningProfileObservation) DeepCopyInto(out *SigningProfileObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PlatformDisplayName != nil { + in, out := &in.PlatformDisplayName, &out.PlatformDisplayName + *out = new(string) + **out = **in + } + if in.PlatformID != nil { + in, out := &in.PlatformID, &out.PlatformID + *out = new(string) + **out = **in + } + if in.RevocationRecord != nil { + in, out := &in.RevocationRecord, &out.RevocationRecord + *out = make([]SigningProfileRevocationRecordObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SignatureValidityPeriod != nil { + in, out := &in.SignatureValidityPeriod, &out.SignatureValidityPeriod + *out = new(SignatureValidityPeriodObservation) + (*in).DeepCopyInto(*out) + } + if in.SigningMaterial != nil { + in, out := &in.SigningMaterial, &out.SigningMaterial + *out = new(SigningMaterialObservation) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.VersionArn != nil { + in, out := &in.VersionArn, &out.VersionArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningProfileObservation. +func (in *SigningProfileObservation) DeepCopy() *SigningProfileObservation { + if in == nil { + return nil + } + out := new(SigningProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningProfileParameters) DeepCopyInto(out *SigningProfileParameters) { + *out = *in + if in.PlatformID != nil { + in, out := &in.PlatformID, &out.PlatformID + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SignatureValidityPeriod != nil { + in, out := &in.SignatureValidityPeriod, &out.SignatureValidityPeriod + *out = new(SignatureValidityPeriodParameters) + (*in).DeepCopyInto(*out) + } + if in.SigningMaterial != nil { + in, out := &in.SigningMaterial, &out.SigningMaterial + *out = new(SigningMaterialParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningProfileParameters. +func (in *SigningProfileParameters) DeepCopy() *SigningProfileParameters { + if in == nil { + return nil + } + out := new(SigningProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningProfileRevocationRecordInitParameters) DeepCopyInto(out *SigningProfileRevocationRecordInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningProfileRevocationRecordInitParameters. +func (in *SigningProfileRevocationRecordInitParameters) DeepCopy() *SigningProfileRevocationRecordInitParameters { + if in == nil { + return nil + } + out := new(SigningProfileRevocationRecordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningProfileRevocationRecordObservation) DeepCopyInto(out *SigningProfileRevocationRecordObservation) { + *out = *in + if in.RevocationEffectiveFrom != nil { + in, out := &in.RevocationEffectiveFrom, &out.RevocationEffectiveFrom + *out = new(string) + **out = **in + } + if in.RevokedAt != nil { + in, out := &in.RevokedAt, &out.RevokedAt + *out = new(string) + **out = **in + } + if in.RevokedBy != nil { + in, out := &in.RevokedBy, &out.RevokedBy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningProfileRevocationRecordObservation. +func (in *SigningProfileRevocationRecordObservation) DeepCopy() *SigningProfileRevocationRecordObservation { + if in == nil { + return nil + } + out := new(SigningProfileRevocationRecordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningProfileRevocationRecordParameters) DeepCopyInto(out *SigningProfileRevocationRecordParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningProfileRevocationRecordParameters. +func (in *SigningProfileRevocationRecordParameters) DeepCopy() *SigningProfileRevocationRecordParameters { + if in == nil { + return nil + } + out := new(SigningProfileRevocationRecordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningProfileSpec) DeepCopyInto(out *SigningProfileSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningProfileSpec. +func (in *SigningProfileSpec) DeepCopy() *SigningProfileSpec { + if in == nil { + return nil + } + out := new(SigningProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningProfileStatus) DeepCopyInto(out *SigningProfileStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningProfileStatus. +func (in *SigningProfileStatus) DeepCopy() *SigningProfileStatus { + if in == nil { + return nil + } + out := new(SigningProfileStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceInitParameters) DeepCopyInto(out *SourceInitParameters) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(SourceS3InitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceInitParameters. +func (in *SourceInitParameters) DeepCopy() *SourceInitParameters { + if in == nil { + return nil + } + out := new(SourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceObservation) DeepCopyInto(out *SourceObservation) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(SourceS3Observation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceObservation. +func (in *SourceObservation) DeepCopy() *SourceObservation { + if in == nil { + return nil + } + out := new(SourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceParameters) DeepCopyInto(out *SourceParameters) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(SourceS3Parameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceParameters. +func (in *SourceParameters) DeepCopy() *SourceParameters { + if in == nil { + return nil + } + out := new(SourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceS3InitParameters) DeepCopyInto(out *SourceS3InitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceS3InitParameters. +func (in *SourceS3InitParameters) DeepCopy() *SourceS3InitParameters { + if in == nil { + return nil + } + out := new(SourceS3InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceS3Observation) DeepCopyInto(out *SourceS3Observation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceS3Observation. +func (in *SourceS3Observation) DeepCopy() *SourceS3Observation { + if in == nil { + return nil + } + out := new(SourceS3Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceS3Parameters) DeepCopyInto(out *SourceS3Parameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceS3Parameters. +func (in *SourceS3Parameters) DeepCopy() *SourceS3Parameters { + if in == nil { + return nil + } + out := new(SourceS3Parameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/signer/v1beta2/zz_generated.managed.go b/apis/signer/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..e75db645f6 --- /dev/null +++ b/apis/signer/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this SigningJob. +func (mg *SigningJob) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SigningJob. +func (mg *SigningJob) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SigningJob. +func (mg *SigningJob) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SigningJob. +func (mg *SigningJob) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SigningJob. +func (mg *SigningJob) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SigningJob. +func (mg *SigningJob) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SigningJob. +func (mg *SigningJob) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SigningJob. +func (mg *SigningJob) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SigningJob. +func (mg *SigningJob) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SigningJob. +func (mg *SigningJob) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SigningJob. +func (mg *SigningJob) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SigningJob. +func (mg *SigningJob) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SigningProfile. +func (mg *SigningProfile) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SigningProfile. +func (mg *SigningProfile) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SigningProfile. +func (mg *SigningProfile) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SigningProfile. +func (mg *SigningProfile) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SigningProfile. +func (mg *SigningProfile) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SigningProfile. +func (mg *SigningProfile) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SigningProfile. +func (mg *SigningProfile) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SigningProfile. +func (mg *SigningProfile) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SigningProfile. +func (mg *SigningProfile) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SigningProfile. +func (mg *SigningProfile) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SigningProfile. +func (mg *SigningProfile) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SigningProfile. +func (mg *SigningProfile) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/signer/v1beta2/zz_generated.managedlist.go b/apis/signer/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..be7762cec7 --- /dev/null +++ b/apis/signer/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this SigningJobList. +func (l *SigningJobList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SigningProfileList. +func (l *SigningProfileList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/signer/v1beta2/zz_generated.resolvers.go b/apis/signer/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..3387a1883c --- /dev/null +++ b/apis/signer/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,66 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + errors "github.com/pkg/errors" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *SigningJob) ResolveReferences( // ResolveReferences of this SigningJob. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("signer.aws.upbound.io", "v1beta2", "SigningProfile", "SigningProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ProfileName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ProfileNameRef, + Selector: mg.Spec.ForProvider.ProfileNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ProfileName") + } + mg.Spec.ForProvider.ProfileName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ProfileNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("signer.aws.upbound.io", "v1beta2", "SigningProfile", "SigningProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ProfileName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ProfileNameRef, + Selector: mg.Spec.InitProvider.ProfileNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ProfileName") + } + mg.Spec.InitProvider.ProfileName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ProfileNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/signer/v1beta2/zz_groupversion_info.go b/apis/signer/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..7dc44f2b03 --- /dev/null +++ b/apis/signer/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=signer.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "signer.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/signer/v1beta2/zz_signingjob_terraformed.go b/apis/signer/v1beta2/zz_signingjob_terraformed.go new file mode 100755 index 0000000000..2c45d79175 --- /dev/null +++ b/apis/signer/v1beta2/zz_signingjob_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SigningJob +func (mg *SigningJob) GetTerraformResourceType() string { + return "aws_signer_signing_job" +} + +// GetConnectionDetailsMapping for this SigningJob +func (tr *SigningJob) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SigningJob +func (tr *SigningJob) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SigningJob +func (tr *SigningJob) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SigningJob +func (tr *SigningJob) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SigningJob +func (tr *SigningJob) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SigningJob +func (tr *SigningJob) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SigningJob +func (tr *SigningJob) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SigningJob +func (tr *SigningJob) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SigningJob using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SigningJob) LateInitialize(attrs []byte) (bool, error) { + params := &SigningJobParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SigningJob) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/signer/v1beta2/zz_signingjob_types.go b/apis/signer/v1beta2/zz_signingjob_types.go new file mode 100755 index 0000000000..2bc0a10382 --- /dev/null +++ b/apis/signer/v1beta2/zz_signingjob_types.go @@ -0,0 +1,339 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DestinationInitParameters struct { + + // A configuration block describing the S3 Source object: See S3 Source below for details. + S3 *S3InitParameters `json:"s3,omitempty" tf:"s3,omitempty"` +} + +type DestinationObservation struct { + + // A configuration block describing the S3 Source object: See S3 Source below for details. + S3 *S3Observation `json:"s3,omitempty" tf:"s3,omitempty"` +} + +type DestinationParameters struct { + + // A configuration block describing the S3 Source object: See S3 Source below for details. + // +kubebuilder:validation:Optional + S3 *S3Parameters `json:"s3" tf:"s3,omitempty"` +} + +type RevocationRecordInitParameters struct { +} + +type RevocationRecordObservation struct { + Reason *string `json:"reason,omitempty" tf:"reason,omitempty"` + + RevokedAt *string `json:"revokedAt,omitempty" tf:"revoked_at,omitempty"` + + RevokedBy *string `json:"revokedBy,omitempty" tf:"revoked_by,omitempty"` +} + +type RevocationRecordParameters struct { +} + +type S3InitParameters struct { + + // Name of the S3 bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type S3Observation struct { + + // Name of the S3 bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type S3Parameters struct { + + // Name of the S3 bucket. + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket" tf:"bucket,omitempty"` + + // An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` +} + +type SignedObjectInitParameters struct { +} + +type SignedObjectObservation struct { + + // A configuration block describing the S3 Source object: See S3 Source below for details. + S3 []SignedObjectS3Observation `json:"s3,omitempty" tf:"s3,omitempty"` +} + +type SignedObjectParameters struct { +} + +type SignedObjectS3InitParameters struct { +} + +type SignedObjectS3Observation struct { + + // Name of the S3 bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Key name of the object that contains your unsigned code. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type SignedObjectS3Parameters struct { +} + +type SigningJobInitParameters struct { + + // The S3 bucket in which to save your signed object. See Destination below for details. + Destination *DestinationInitParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + // Set this argument to true to ignore signing job failures and retrieve failed status and reason. Default false. + IgnoreSigningJobFailure *bool `json:"ignoreSigningJobFailure,omitempty" tf:"ignore_signing_job_failure,omitempty"` + + // The name of the profile to initiate the signing operation. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/signer/v1beta2.SigningProfile + ProfileName *string `json:"profileName,omitempty" tf:"profile_name,omitempty"` + + // Reference to a SigningProfile in signer to populate profileName. + // +kubebuilder:validation:Optional + ProfileNameRef *v1.Reference `json:"profileNameRef,omitempty" tf:"-"` + + // Selector for a SigningProfile in signer to populate profileName. + // +kubebuilder:validation:Optional + ProfileNameSelector *v1.Selector `json:"profileNameSelector,omitempty" tf:"-"` + + // The S3 bucket that contains the object to sign. See Source below for details. + Source *SourceInitParameters `json:"source,omitempty" tf:"source,omitempty"` +} + +type SigningJobObservation struct { + + // Date and time in RFC3339 format that the signing job was completed. + CompletedAt *string `json:"completedAt,omitempty" tf:"completed_at,omitempty"` + + // Date and time in RFC3339 format that the signing job was created. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // The S3 bucket in which to save your signed object. See Destination below for details. + Destination *DestinationObservation `json:"destination,omitempty" tf:"destination,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Set this argument to true to ignore signing job failures and retrieve failed status and reason. Default false. + IgnoreSigningJobFailure *bool `json:"ignoreSigningJobFailure,omitempty" tf:"ignore_signing_job_failure,omitempty"` + + // The ID of the signing job on output. + JobID *string `json:"jobId,omitempty" tf:"job_id,omitempty"` + + // The IAM entity that initiated the signing job. + JobInvoker *string `json:"jobInvoker,omitempty" tf:"job_invoker,omitempty"` + + // The AWS account ID of the job owner. + JobOwner *string `json:"jobOwner,omitempty" tf:"job_owner,omitempty"` + + // A human-readable name for the signing platform associated with the signing job. + PlatformDisplayName *string `json:"platformDisplayName,omitempty" tf:"platform_display_name,omitempty"` + + // The platform to which your signed code image will be distributed. + PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` + + // The name of the profile to initiate the signing operation. + ProfileName *string `json:"profileName,omitempty" tf:"profile_name,omitempty"` + + // The version of the signing profile used to initiate the signing job. + ProfileVersion *string `json:"profileVersion,omitempty" tf:"profile_version,omitempty"` + + // The IAM principal that requested the signing job. + RequestedBy *string `json:"requestedBy,omitempty" tf:"requested_by,omitempty"` + + // A revocation record if the signature generated by the signing job has been revoked. Contains a timestamp and the ID of the IAM entity that revoked the signature. + RevocationRecord []RevocationRecordObservation `json:"revocationRecord,omitempty" tf:"revocation_record,omitempty"` + + // The time when the signature of a signing job expires. + SignatureExpiresAt *string `json:"signatureExpiresAt,omitempty" tf:"signature_expires_at,omitempty"` + + // Name of the S3 bucket where the signed code image is saved by code signing. + SignedObject []SignedObjectObservation `json:"signedObject,omitempty" tf:"signed_object,omitempty"` + + // The S3 bucket that contains the object to sign. See Source below for details. + Source *SourceObservation `json:"source,omitempty" tf:"source,omitempty"` + + // Status of the signing job. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // String value that contains the status reason. + StatusReason *string `json:"statusReason,omitempty" tf:"status_reason,omitempty"` +} + +type SigningJobParameters struct { + + // The S3 bucket in which to save your signed object. See Destination below for details. + // +kubebuilder:validation:Optional + Destination *DestinationParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + // Set this argument to true to ignore signing job failures and retrieve failed status and reason. Default false. + // +kubebuilder:validation:Optional + IgnoreSigningJobFailure *bool `json:"ignoreSigningJobFailure,omitempty" tf:"ignore_signing_job_failure,omitempty"` + + // The name of the profile to initiate the signing operation. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/signer/v1beta2.SigningProfile + // +kubebuilder:validation:Optional + ProfileName *string `json:"profileName,omitempty" tf:"profile_name,omitempty"` + + // Reference to a SigningProfile in signer to populate profileName. + // +kubebuilder:validation:Optional + ProfileNameRef *v1.Reference `json:"profileNameRef,omitempty" tf:"-"` + + // Selector for a SigningProfile in signer to populate profileName. + // +kubebuilder:validation:Optional + ProfileNameSelector *v1.Selector `json:"profileNameSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The S3 bucket that contains the object to sign. See Source below for details. + // +kubebuilder:validation:Optional + Source *SourceParameters `json:"source,omitempty" tf:"source,omitempty"` +} + +type SourceInitParameters struct { + + // A configuration block describing the S3 Source object: See S3 Source below for details. + S3 *SourceS3InitParameters `json:"s3,omitempty" tf:"s3,omitempty"` +} + +type SourceObservation struct { + + // A configuration block describing the S3 Source object: See S3 Source below for details. + S3 *SourceS3Observation `json:"s3,omitempty" tf:"s3,omitempty"` +} + +type SourceParameters struct { + + // A configuration block describing the S3 Source object: See S3 Source below for details. + // +kubebuilder:validation:Optional + S3 *SourceS3Parameters `json:"s3" tf:"s3,omitempty"` +} + +type SourceS3InitParameters struct { + + // Name of the S3 bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Key name of the object that contains your unsigned code. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Version of your source image in your version enabled S3 bucket. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type SourceS3Observation struct { + + // Name of the S3 bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Key name of the object that contains your unsigned code. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Version of your source image in your version enabled S3 bucket. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type SourceS3Parameters struct { + + // Name of the S3 bucket. + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket" tf:"bucket,omitempty"` + + // Key name of the object that contains your unsigned code. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Version of your source image in your version enabled S3 bucket. + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` +} + +// SigningJobSpec defines the desired state of SigningJob +type SigningJobSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SigningJobParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SigningJobInitParameters `json:"initProvider,omitempty"` +} + +// SigningJobStatus defines the observed state of SigningJob. +type SigningJobStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SigningJobObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SigningJob is the Schema for the SigningJobs API. Creates a Signer Signing Job. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type SigningJob struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.destination) || (has(self.initProvider) && has(self.initProvider.destination))",message="spec.forProvider.destination is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.source) || (has(self.initProvider) && has(self.initProvider.source))",message="spec.forProvider.source is a required parameter" + Spec SigningJobSpec `json:"spec"` + Status SigningJobStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SigningJobList contains a list of SigningJobs +type SigningJobList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SigningJob `json:"items"` +} + +// Repository type metadata. +var ( + SigningJob_Kind = "SigningJob" + SigningJob_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SigningJob_Kind}.String() + SigningJob_KindAPIVersion = SigningJob_Kind + "." + CRDGroupVersion.String() + SigningJob_GroupVersionKind = CRDGroupVersion.WithKind(SigningJob_Kind) +) + +func init() { + SchemeBuilder.Register(&SigningJob{}, &SigningJobList{}) +} diff --git a/apis/signer/v1beta2/zz_signingprofile_terraformed.go b/apis/signer/v1beta2/zz_signingprofile_terraformed.go new file mode 100755 index 0000000000..bf16336d0e --- /dev/null +++ b/apis/signer/v1beta2/zz_signingprofile_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SigningProfile +func (mg *SigningProfile) GetTerraformResourceType() string { + return "aws_signer_signing_profile" +} + +// GetConnectionDetailsMapping for this SigningProfile +func (tr *SigningProfile) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SigningProfile +func (tr *SigningProfile) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SigningProfile +func (tr *SigningProfile) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SigningProfile +func (tr *SigningProfile) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SigningProfile +func (tr *SigningProfile) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SigningProfile +func (tr *SigningProfile) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SigningProfile +func (tr *SigningProfile) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SigningProfile +func (tr *SigningProfile) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SigningProfile using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SigningProfile) LateInitialize(attrs []byte) (bool, error) { + params := &SigningProfileParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SigningProfile) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/signer/v1beta2/zz_signingprofile_types.go b/apis/signer/v1beta2/zz_signingprofile_types.go new file mode 100755 index 0000000000..9dfec9d550 --- /dev/null +++ b/apis/signer/v1beta2/zz_signingprofile_types.go @@ -0,0 +1,222 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SignatureValidityPeriodInitParameters struct { + + // The time unit for signature validity. Valid values: DAYS, MONTHS, YEARS. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The numerical value of the time unit for signature validity. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type SignatureValidityPeriodObservation struct { + + // The time unit for signature validity. Valid values: DAYS, MONTHS, YEARS. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The numerical value of the time unit for signature validity. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type SignatureValidityPeriodParameters struct { + + // The time unit for signature validity. Valid values: DAYS, MONTHS, YEARS. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The numerical value of the time unit for signature validity. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type SigningMaterialInitParameters struct { + + // The Amazon Resource Name (ARN) of the certificates that is used to sign your code. + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` +} + +type SigningMaterialObservation struct { + + // The Amazon Resource Name (ARN) of the certificates that is used to sign your code. + CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"` +} + +type SigningMaterialParameters struct { + + // The Amazon Resource Name (ARN) of the certificates that is used to sign your code. + // +kubebuilder:validation:Optional + CertificateArn *string `json:"certificateArn" tf:"certificate_arn,omitempty"` +} + +type SigningProfileInitParameters struct { + + // The ID of the platform that is used by the target signing profile. + PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` + + // The validity period for a signing job. See signature_validity_period Block below for details. + SignatureValidityPeriod *SignatureValidityPeriodInitParameters `json:"signatureValidityPeriod,omitempty" tf:"signature_validity_period,omitempty"` + + // The AWS Certificate Manager certificate that will be used to sign code with the new signing profile. See signing_material Block below for details. + SigningMaterial *SigningMaterialInitParameters `json:"signingMaterial,omitempty" tf:"signing_material,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SigningProfileObservation struct { + + // The Amazon Resource Name (ARN) for the signing profile. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A human-readable name for the signing platform associated with the signing profile. + PlatformDisplayName *string `json:"platformDisplayName,omitempty" tf:"platform_display_name,omitempty"` + + // The ID of the platform that is used by the target signing profile. + PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` + + // Revocation information for a signing profile. See revocation_record Block below for details. + RevocationRecord []SigningProfileRevocationRecordObservation `json:"revocationRecord,omitempty" tf:"revocation_record,omitempty"` + + // The validity period for a signing job. See signature_validity_period Block below for details. + SignatureValidityPeriod *SignatureValidityPeriodObservation `json:"signatureValidityPeriod,omitempty" tf:"signature_validity_period,omitempty"` + + // The AWS Certificate Manager certificate that will be used to sign code with the new signing profile. See signing_material Block below for details. + SigningMaterial *SigningMaterialObservation `json:"signingMaterial,omitempty" tf:"signing_material,omitempty"` + + // The status of the target signing profile. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The current version of the signing profile. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // The signing profile ARN, including the profile version. + VersionArn *string `json:"versionArn,omitempty" tf:"version_arn,omitempty"` +} + +type SigningProfileParameters struct { + + // The ID of the platform that is used by the target signing profile. + // +kubebuilder:validation:Optional + PlatformID *string `json:"platformId,omitempty" tf:"platform_id,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The validity period for a signing job. See signature_validity_period Block below for details. + // +kubebuilder:validation:Optional + SignatureValidityPeriod *SignatureValidityPeriodParameters `json:"signatureValidityPeriod,omitempty" tf:"signature_validity_period,omitempty"` + + // The AWS Certificate Manager certificate that will be used to sign code with the new signing profile. See signing_material Block below for details. + // +kubebuilder:validation:Optional + SigningMaterial *SigningMaterialParameters `json:"signingMaterial,omitempty" tf:"signing_material,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SigningProfileRevocationRecordInitParameters struct { +} + +type SigningProfileRevocationRecordObservation struct { + + // The time when revocation becomes effective. + RevocationEffectiveFrom *string `json:"revocationEffectiveFrom,omitempty" tf:"revocation_effective_from,omitempty"` + + // The time when the signing profile was revoked. + RevokedAt *string `json:"revokedAt,omitempty" tf:"revoked_at,omitempty"` + + // The identity of the revoker. + RevokedBy *string `json:"revokedBy,omitempty" tf:"revoked_by,omitempty"` +} + +type SigningProfileRevocationRecordParameters struct { +} + +// SigningProfileSpec defines the desired state of SigningProfile +type SigningProfileSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SigningProfileParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SigningProfileInitParameters `json:"initProvider,omitempty"` +} + +// SigningProfileStatus defines the observed state of SigningProfile. +type SigningProfileStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SigningProfileObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SigningProfile is the Schema for the SigningProfiles API. Creates a Signer Signing Profile. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type SigningProfile struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.platformId) || (has(self.initProvider) && has(self.initProvider.platformId))",message="spec.forProvider.platformId is a required parameter" + Spec SigningProfileSpec `json:"spec"` + Status SigningProfileStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SigningProfileList contains a list of SigningProfiles +type SigningProfileList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SigningProfile `json:"items"` +} + +// Repository type metadata. +var ( + SigningProfile_Kind = "SigningProfile" + SigningProfile_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SigningProfile_Kind}.String() + SigningProfile_KindAPIVersion = SigningProfile_Kind + "." + CRDGroupVersion.String() + SigningProfile_GroupVersionKind = CRDGroupVersion.WithKind(SigningProfile_Kind) +) + +func init() { + SchemeBuilder.Register(&SigningProfile{}, &SigningProfileList{}) +} diff --git a/apis/ssm/v1beta1/zz_generated.conversion_hubs.go b/apis/ssm/v1beta1/zz_generated.conversion_hubs.go index c5d33edc6b..5d4d83089a 100755 --- a/apis/ssm/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/ssm/v1beta1/zz_generated.conversion_hubs.go @@ -9,9 +9,6 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *Activation) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Association) Hub() {} - // Hub marks this type as a conversion hub. func (tr *DefaultPatchBaseline) Hub() {} @@ -24,9 +21,6 @@ func (tr *MaintenanceWindow) Hub() {} // Hub marks this type as a conversion hub. func (tr *MaintenanceWindowTarget) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *MaintenanceWindowTask) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Parameter) Hub() {} @@ -36,8 +30,5 @@ func (tr *PatchBaseline) Hub() {} // Hub marks this type as a conversion hub. func (tr *PatchGroup) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ResourceDataSync) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ServiceSetting) Hub() {} diff --git a/apis/ssm/v1beta1/zz_generated.conversion_spokes.go b/apis/ssm/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..b99df003ba --- /dev/null +++ b/apis/ssm/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Association to the hub type. +func (tr *Association) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Association type. +func (tr *Association) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MaintenanceWindowTask to the hub type. +func (tr *MaintenanceWindowTask) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MaintenanceWindowTask type. +func (tr *MaintenanceWindowTask) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ResourceDataSync to the hub type. +func (tr *ResourceDataSync) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ResourceDataSync type. +func (tr *ResourceDataSync) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/ssm/v1beta2/zz_association_terraformed.go b/apis/ssm/v1beta2/zz_association_terraformed.go new file mode 100755 index 0000000000..87471ebd55 --- /dev/null +++ b/apis/ssm/v1beta2/zz_association_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Association +func (mg *Association) GetTerraformResourceType() string { + return "aws_ssm_association" +} + +// GetConnectionDetailsMapping for this Association +func (tr *Association) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Association +func (tr *Association) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Association +func (tr *Association) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Association +func (tr *Association) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Association +func (tr *Association) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Association +func (tr *Association) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Association +func (tr *Association) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Association +func (tr *Association) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Association using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Association) LateInitialize(attrs []byte) (bool, error) { + params := &AssociationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Association) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/ssm/v1beta2/zz_association_types.go b/apis/ssm/v1beta2/zz_association_types.go new file mode 100755 index 0000000000..ae34b399b7 --- /dev/null +++ b/apis/ssm/v1beta2/zz_association_types.go @@ -0,0 +1,335 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AssociationInitParameters struct { + + // By default, when you create a new or update associations, the system runs it immediately and then according to the schedule you specified. Enable this option if you do not want an association to run immediately after you create or update it. This parameter is not supported for rate expressions. Default: false. + ApplyOnlyAtCronInterval *bool `json:"applyOnlyAtCronInterval,omitempty" tf:"apply_only_at_cron_interval,omitempty"` + + // The descriptive name for the association. + AssociationName *string `json:"associationName,omitempty" tf:"association_name,omitempty"` + + // Specify the target for the association. This target is required for associations that use an Automation document and target resources by using rate controls. This should be set to the SSM document parameter that will define how your automation will branch out. + AutomationTargetParameterName *string `json:"automationTargetParameterName,omitempty" tf:"automation_target_parameter_name,omitempty"` + + // The compliance severity for the association. Can be one of the following: UNSPECIFIED, LOW, MEDIUM, HIGH or CRITICAL + ComplianceSeverity *string `json:"complianceSeverity,omitempty" tf:"compliance_severity,omitempty"` + + // The document version you want to associate with the target(s). Can be a specific version or the default version. + DocumentVersion *string `json:"documentVersion,omitempty" tf:"document_version,omitempty"` + + // The instance ID to apply an SSM document to. Use targets with key InstanceIds for document schema versions 2.0 and above. Use the targets attribute instead. + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. + MaxConcurrency *string `json:"maxConcurrency,omitempty" tf:"max_concurrency,omitempty"` + + // The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify a number, for example 10, or a percentage of the target set, for example 10%. If you specify a threshold of 3, the stop command is sent when the fourth error is returned. If you specify a threshold of 10% for 50 associations, the stop command is sent when the sixth error is returned. + MaxErrors *string `json:"maxErrors,omitempty" tf:"max_errors,omitempty"` + + // The name of the SSM document to apply. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ssm/v1beta1.Document + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a Document in ssm to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a Document in ssm to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // An output location block. Output Location is documented below. + OutputLocation *OutputLocationInitParameters `json:"outputLocation,omitempty" tf:"output_location,omitempty"` + + // A block of arbitrary string parameters to pass to the SSM document. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // A cron or rate expression that specifies when the association runs. + ScheduleExpression *string `json:"scheduleExpression,omitempty" tf:"schedule_expression,omitempty"` + + // The mode for generating association compliance. You can specify AUTO or MANUAL. + SyncCompliance *string `json:"syncCompliance,omitempty" tf:"sync_compliance,omitempty"` + + // A block containing the targets of the SSM association. Targets are documented below. AWS currently supports a maximum of 5 targets. + Targets []TargetsInitParameters `json:"targets,omitempty" tf:"targets,omitempty"` + + // The number of seconds to wait for the association status to be Success. If Success status is not reached within the given time, create opration will fail. + WaitForSuccessTimeoutSeconds *float64 `json:"waitForSuccessTimeoutSeconds,omitempty" tf:"wait_for_success_timeout_seconds,omitempty"` +} + +type AssociationObservation struct { + + // By default, when you create a new or update associations, the system runs it immediately and then according to the schedule you specified. Enable this option if you do not want an association to run immediately after you create or update it. This parameter is not supported for rate expressions. Default: false. + ApplyOnlyAtCronInterval *bool `json:"applyOnlyAtCronInterval,omitempty" tf:"apply_only_at_cron_interval,omitempty"` + + // The ARN of the SSM association + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The ID of the SSM association. + AssociationID *string `json:"associationId,omitempty" tf:"association_id,omitempty"` + + // The descriptive name for the association. + AssociationName *string `json:"associationName,omitempty" tf:"association_name,omitempty"` + + // Specify the target for the association. This target is required for associations that use an Automation document and target resources by using rate controls. This should be set to the SSM document parameter that will define how your automation will branch out. + AutomationTargetParameterName *string `json:"automationTargetParameterName,omitempty" tf:"automation_target_parameter_name,omitempty"` + + // The compliance severity for the association. Can be one of the following: UNSPECIFIED, LOW, MEDIUM, HIGH or CRITICAL + ComplianceSeverity *string `json:"complianceSeverity,omitempty" tf:"compliance_severity,omitempty"` + + // The document version you want to associate with the target(s). Can be a specific version or the default version. + DocumentVersion *string `json:"documentVersion,omitempty" tf:"document_version,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The instance ID to apply an SSM document to. Use targets with key InstanceIds for document schema versions 2.0 and above. Use the targets attribute instead. + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. + MaxConcurrency *string `json:"maxConcurrency,omitempty" tf:"max_concurrency,omitempty"` + + // The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify a number, for example 10, or a percentage of the target set, for example 10%. If you specify a threshold of 3, the stop command is sent when the fourth error is returned. If you specify a threshold of 10% for 50 associations, the stop command is sent when the sixth error is returned. + MaxErrors *string `json:"maxErrors,omitempty" tf:"max_errors,omitempty"` + + // The name of the SSM document to apply. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // An output location block. Output Location is documented below. + OutputLocation *OutputLocationObservation `json:"outputLocation,omitempty" tf:"output_location,omitempty"` + + // A block of arbitrary string parameters to pass to the SSM document. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // A cron or rate expression that specifies when the association runs. + ScheduleExpression *string `json:"scheduleExpression,omitempty" tf:"schedule_expression,omitempty"` + + // The mode for generating association compliance. You can specify AUTO or MANUAL. + SyncCompliance *string `json:"syncCompliance,omitempty" tf:"sync_compliance,omitempty"` + + // A block containing the targets of the SSM association. Targets are documented below. AWS currently supports a maximum of 5 targets. + Targets []TargetsObservation `json:"targets,omitempty" tf:"targets,omitempty"` + + // The number of seconds to wait for the association status to be Success. If Success status is not reached within the given time, create opration will fail. + WaitForSuccessTimeoutSeconds *float64 `json:"waitForSuccessTimeoutSeconds,omitempty" tf:"wait_for_success_timeout_seconds,omitempty"` +} + +type AssociationParameters struct { + + // By default, when you create a new or update associations, the system runs it immediately and then according to the schedule you specified. Enable this option if you do not want an association to run immediately after you create or update it. This parameter is not supported for rate expressions. Default: false. + // +kubebuilder:validation:Optional + ApplyOnlyAtCronInterval *bool `json:"applyOnlyAtCronInterval,omitempty" tf:"apply_only_at_cron_interval,omitempty"` + + // The descriptive name for the association. + // +kubebuilder:validation:Optional + AssociationName *string `json:"associationName,omitempty" tf:"association_name,omitempty"` + + // Specify the target for the association. This target is required for associations that use an Automation document and target resources by using rate controls. This should be set to the SSM document parameter that will define how your automation will branch out. + // +kubebuilder:validation:Optional + AutomationTargetParameterName *string `json:"automationTargetParameterName,omitempty" tf:"automation_target_parameter_name,omitempty"` + + // The compliance severity for the association. Can be one of the following: UNSPECIFIED, LOW, MEDIUM, HIGH or CRITICAL + // +kubebuilder:validation:Optional + ComplianceSeverity *string `json:"complianceSeverity,omitempty" tf:"compliance_severity,omitempty"` + + // The document version you want to associate with the target(s). Can be a specific version or the default version. + // +kubebuilder:validation:Optional + DocumentVersion *string `json:"documentVersion,omitempty" tf:"document_version,omitempty"` + + // The instance ID to apply an SSM document to. Use targets with key InstanceIds for document schema versions 2.0 and above. Use the targets attribute instead. + // +kubebuilder:validation:Optional + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. + // +kubebuilder:validation:Optional + MaxConcurrency *string `json:"maxConcurrency,omitempty" tf:"max_concurrency,omitempty"` + + // The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify a number, for example 10, or a percentage of the target set, for example 10%. If you specify a threshold of 3, the stop command is sent when the fourth error is returned. If you specify a threshold of 10% for 50 associations, the stop command is sent when the sixth error is returned. + // +kubebuilder:validation:Optional + MaxErrors *string `json:"maxErrors,omitempty" tf:"max_errors,omitempty"` + + // The name of the SSM document to apply. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ssm/v1beta1.Document + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a Document in ssm to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a Document in ssm to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // An output location block. Output Location is documented below. + // +kubebuilder:validation:Optional + OutputLocation *OutputLocationParameters `json:"outputLocation,omitempty" tf:"output_location,omitempty"` + + // A block of arbitrary string parameters to pass to the SSM document. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // A cron or rate expression that specifies when the association runs. + // +kubebuilder:validation:Optional + ScheduleExpression *string `json:"scheduleExpression,omitempty" tf:"schedule_expression,omitempty"` + + // The mode for generating association compliance. You can specify AUTO or MANUAL. + // +kubebuilder:validation:Optional + SyncCompliance *string `json:"syncCompliance,omitempty" tf:"sync_compliance,omitempty"` + + // A block containing the targets of the SSM association. Targets are documented below. AWS currently supports a maximum of 5 targets. + // +kubebuilder:validation:Optional + Targets []TargetsParameters `json:"targets,omitempty" tf:"targets,omitempty"` + + // The number of seconds to wait for the association status to be Success. If Success status is not reached within the given time, create opration will fail. + // +kubebuilder:validation:Optional + WaitForSuccessTimeoutSeconds *float64 `json:"waitForSuccessTimeoutSeconds,omitempty" tf:"wait_for_success_timeout_seconds,omitempty"` +} + +type OutputLocationInitParameters struct { + + // The S3 bucket name. + S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` + + // The S3 bucket prefix. Results stored in the root if not configured. + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` + + // The S3 bucket region. + S3Region *string `json:"s3Region,omitempty" tf:"s3_region,omitempty"` +} + +type OutputLocationObservation struct { + + // The S3 bucket name. + S3BucketName *string `json:"s3BucketName,omitempty" tf:"s3_bucket_name,omitempty"` + + // The S3 bucket prefix. Results stored in the root if not configured. + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` + + // The S3 bucket region. + S3Region *string `json:"s3Region,omitempty" tf:"s3_region,omitempty"` +} + +type OutputLocationParameters struct { + + // The S3 bucket name. + // +kubebuilder:validation:Optional + S3BucketName *string `json:"s3BucketName" tf:"s3_bucket_name,omitempty"` + + // The S3 bucket prefix. Results stored in the root if not configured. + // +kubebuilder:validation:Optional + S3KeyPrefix *string `json:"s3KeyPrefix,omitempty" tf:"s3_key_prefix,omitempty"` + + // The S3 bucket region. + // +kubebuilder:validation:Optional + S3Region *string `json:"s3Region,omitempty" tf:"s3_region,omitempty"` +} + +type TargetsInitParameters struct { + + // Either InstanceIds or tag:Tag Name to specify an EC2 tag. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A list of instance IDs or tag values. AWS currently limits this list size to one value. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type TargetsObservation struct { + + // Either InstanceIds or tag:Tag Name to specify an EC2 tag. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // A list of instance IDs or tag values. AWS currently limits this list size to one value. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type TargetsParameters struct { + + // Either InstanceIds or tag:Tag Name to specify an EC2 tag. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // A list of instance IDs or tag values. AWS currently limits this list size to one value. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +// AssociationSpec defines the desired state of Association +type AssociationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AssociationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AssociationInitParameters `json:"initProvider,omitempty"` +} + +// AssociationStatus defines the observed state of Association. +type AssociationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AssociationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Association is the Schema for the Associations API. Associates an SSM Document to an instance or EC2 tag. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Association struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec AssociationSpec `json:"spec"` + Status AssociationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AssociationList contains a list of Associations +type AssociationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Association `json:"items"` +} + +// Repository type metadata. +var ( + Association_Kind = "Association" + Association_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Association_Kind}.String() + Association_KindAPIVersion = Association_Kind + "." + CRDGroupVersion.String() + Association_GroupVersionKind = CRDGroupVersion.WithKind(Association_Kind) +) + +func init() { + SchemeBuilder.Register(&Association{}, &AssociationList{}) +} diff --git a/apis/ssm/v1beta2/zz_generated.conversion_hubs.go b/apis/ssm/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..2261ad6399 --- /dev/null +++ b/apis/ssm/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Association) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MaintenanceWindowTask) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ResourceDataSync) Hub() {} diff --git a/apis/ssm/v1beta2/zz_generated.deepcopy.go b/apis/ssm/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..286689c822 --- /dev/null +++ b/apis/ssm/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2433 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Association) DeepCopyInto(out *Association) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Association. +func (in *Association) DeepCopy() *Association { + if in == nil { + return nil + } + out := new(Association) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Association) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssociationInitParameters) DeepCopyInto(out *AssociationInitParameters) { + *out = *in + if in.ApplyOnlyAtCronInterval != nil { + in, out := &in.ApplyOnlyAtCronInterval, &out.ApplyOnlyAtCronInterval + *out = new(bool) + **out = **in + } + if in.AssociationName != nil { + in, out := &in.AssociationName, &out.AssociationName + *out = new(string) + **out = **in + } + if in.AutomationTargetParameterName != nil { + in, out := &in.AutomationTargetParameterName, &out.AutomationTargetParameterName + *out = new(string) + **out = **in + } + if in.ComplianceSeverity != nil { + in, out := &in.ComplianceSeverity, &out.ComplianceSeverity + *out = new(string) + **out = **in + } + if in.DocumentVersion != nil { + in, out := &in.DocumentVersion, &out.DocumentVersion + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.MaxConcurrency != nil { + in, out := &in.MaxConcurrency, &out.MaxConcurrency + *out = new(string) + **out = **in + } + if in.MaxErrors != nil { + in, out := &in.MaxErrors, &out.MaxErrors + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OutputLocation != nil { + in, out := &in.OutputLocation, &out.OutputLocation + *out = new(OutputLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } + if in.SyncCompliance != nil { + in, out := &in.SyncCompliance, &out.SyncCompliance + *out = new(string) + **out = **in + } + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]TargetsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WaitForSuccessTimeoutSeconds != nil { + in, out := &in.WaitForSuccessTimeoutSeconds, &out.WaitForSuccessTimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssociationInitParameters. +func (in *AssociationInitParameters) DeepCopy() *AssociationInitParameters { + if in == nil { + return nil + } + out := new(AssociationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssociationList) DeepCopyInto(out *AssociationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Association, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssociationList. +func (in *AssociationList) DeepCopy() *AssociationList { + if in == nil { + return nil + } + out := new(AssociationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AssociationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssociationObservation) DeepCopyInto(out *AssociationObservation) { + *out = *in + if in.ApplyOnlyAtCronInterval != nil { + in, out := &in.ApplyOnlyAtCronInterval, &out.ApplyOnlyAtCronInterval + *out = new(bool) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.AssociationID != nil { + in, out := &in.AssociationID, &out.AssociationID + *out = new(string) + **out = **in + } + if in.AssociationName != nil { + in, out := &in.AssociationName, &out.AssociationName + *out = new(string) + **out = **in + } + if in.AutomationTargetParameterName != nil { + in, out := &in.AutomationTargetParameterName, &out.AutomationTargetParameterName + *out = new(string) + **out = **in + } + if in.ComplianceSeverity != nil { + in, out := &in.ComplianceSeverity, &out.ComplianceSeverity + *out = new(string) + **out = **in + } + if in.DocumentVersion != nil { + in, out := &in.DocumentVersion, &out.DocumentVersion + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.MaxConcurrency != nil { + in, out := &in.MaxConcurrency, &out.MaxConcurrency + *out = new(string) + **out = **in + } + if in.MaxErrors != nil { + in, out := &in.MaxErrors, &out.MaxErrors + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutputLocation != nil { + in, out := &in.OutputLocation, &out.OutputLocation + *out = new(OutputLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } + if in.SyncCompliance != nil { + in, out := &in.SyncCompliance, &out.SyncCompliance + *out = new(string) + **out = **in + } + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]TargetsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WaitForSuccessTimeoutSeconds != nil { + in, out := &in.WaitForSuccessTimeoutSeconds, &out.WaitForSuccessTimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssociationObservation. +func (in *AssociationObservation) DeepCopy() *AssociationObservation { + if in == nil { + return nil + } + out := new(AssociationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssociationParameters) DeepCopyInto(out *AssociationParameters) { + *out = *in + if in.ApplyOnlyAtCronInterval != nil { + in, out := &in.ApplyOnlyAtCronInterval, &out.ApplyOnlyAtCronInterval + *out = new(bool) + **out = **in + } + if in.AssociationName != nil { + in, out := &in.AssociationName, &out.AssociationName + *out = new(string) + **out = **in + } + if in.AutomationTargetParameterName != nil { + in, out := &in.AutomationTargetParameterName, &out.AutomationTargetParameterName + *out = new(string) + **out = **in + } + if in.ComplianceSeverity != nil { + in, out := &in.ComplianceSeverity, &out.ComplianceSeverity + *out = new(string) + **out = **in + } + if in.DocumentVersion != nil { + in, out := &in.DocumentVersion, &out.DocumentVersion + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.MaxConcurrency != nil { + in, out := &in.MaxConcurrency, &out.MaxConcurrency + *out = new(string) + **out = **in + } + if in.MaxErrors != nil { + in, out := &in.MaxErrors, &out.MaxErrors + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OutputLocation != nil { + in, out := &in.OutputLocation, &out.OutputLocation + *out = new(OutputLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ScheduleExpression != nil { + in, out := &in.ScheduleExpression, &out.ScheduleExpression + *out = new(string) + **out = **in + } + if in.SyncCompliance != nil { + in, out := &in.SyncCompliance, &out.SyncCompliance + *out = new(string) + **out = **in + } + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]TargetsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WaitForSuccessTimeoutSeconds != nil { + in, out := &in.WaitForSuccessTimeoutSeconds, &out.WaitForSuccessTimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssociationParameters. +func (in *AssociationParameters) DeepCopy() *AssociationParameters { + if in == nil { + return nil + } + out := new(AssociationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssociationSpec) DeepCopyInto(out *AssociationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssociationSpec. +func (in *AssociationSpec) DeepCopy() *AssociationSpec { + if in == nil { + return nil + } + out := new(AssociationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssociationStatus) DeepCopyInto(out *AssociationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssociationStatus. +func (in *AssociationStatus) DeepCopy() *AssociationStatus { + if in == nil { + return nil + } + out := new(AssociationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomationParametersInitParameters) DeepCopyInto(out *AutomationParametersInitParameters) { + *out = *in + if in.DocumentVersion != nil { + in, out := &in.DocumentVersion, &out.DocumentVersion + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomationParametersInitParameters. +func (in *AutomationParametersInitParameters) DeepCopy() *AutomationParametersInitParameters { + if in == nil { + return nil + } + out := new(AutomationParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomationParametersObservation) DeepCopyInto(out *AutomationParametersObservation) { + *out = *in + if in.DocumentVersion != nil { + in, out := &in.DocumentVersion, &out.DocumentVersion + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomationParametersObservation. +func (in *AutomationParametersObservation) DeepCopy() *AutomationParametersObservation { + if in == nil { + return nil + } + out := new(AutomationParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomationParametersParameters) DeepCopyInto(out *AutomationParametersParameters) { + *out = *in + if in.DocumentVersion != nil { + in, out := &in.DocumentVersion, &out.DocumentVersion + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomationParametersParameters. +func (in *AutomationParametersParameters) DeepCopy() *AutomationParametersParameters { + if in == nil { + return nil + } + out := new(AutomationParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchConfigInitParameters) DeepCopyInto(out *CloudwatchConfigInitParameters) { + *out = *in + if in.CloudwatchLogGroupName != nil { + in, out := &in.CloudwatchLogGroupName, &out.CloudwatchLogGroupName + *out = new(string) + **out = **in + } + if in.CloudwatchOutputEnabled != nil { + in, out := &in.CloudwatchOutputEnabled, &out.CloudwatchOutputEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchConfigInitParameters. +func (in *CloudwatchConfigInitParameters) DeepCopy() *CloudwatchConfigInitParameters { + if in == nil { + return nil + } + out := new(CloudwatchConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchConfigObservation) DeepCopyInto(out *CloudwatchConfigObservation) { + *out = *in + if in.CloudwatchLogGroupName != nil { + in, out := &in.CloudwatchLogGroupName, &out.CloudwatchLogGroupName + *out = new(string) + **out = **in + } + if in.CloudwatchOutputEnabled != nil { + in, out := &in.CloudwatchOutputEnabled, &out.CloudwatchOutputEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchConfigObservation. +func (in *CloudwatchConfigObservation) DeepCopy() *CloudwatchConfigObservation { + if in == nil { + return nil + } + out := new(CloudwatchConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudwatchConfigParameters) DeepCopyInto(out *CloudwatchConfigParameters) { + *out = *in + if in.CloudwatchLogGroupName != nil { + in, out := &in.CloudwatchLogGroupName, &out.CloudwatchLogGroupName + *out = new(string) + **out = **in + } + if in.CloudwatchOutputEnabled != nil { + in, out := &in.CloudwatchOutputEnabled, &out.CloudwatchOutputEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudwatchConfigParameters. +func (in *CloudwatchConfigParameters) DeepCopy() *CloudwatchConfigParameters { + if in == nil { + return nil + } + out := new(CloudwatchConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaParametersInitParameters) DeepCopyInto(out *LambdaParametersInitParameters) { + *out = *in + if in.ClientContext != nil { + in, out := &in.ClientContext, &out.ClientContext + *out = new(string) + **out = **in + } + if in.PayloadSecretRef != nil { + in, out := &in.PayloadSecretRef, &out.PayloadSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Qualifier != nil { + in, out := &in.Qualifier, &out.Qualifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaParametersInitParameters. +func (in *LambdaParametersInitParameters) DeepCopy() *LambdaParametersInitParameters { + if in == nil { + return nil + } + out := new(LambdaParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaParametersObservation) DeepCopyInto(out *LambdaParametersObservation) { + *out = *in + if in.ClientContext != nil { + in, out := &in.ClientContext, &out.ClientContext + *out = new(string) + **out = **in + } + if in.Qualifier != nil { + in, out := &in.Qualifier, &out.Qualifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaParametersObservation. +func (in *LambdaParametersObservation) DeepCopy() *LambdaParametersObservation { + if in == nil { + return nil + } + out := new(LambdaParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LambdaParametersParameters) DeepCopyInto(out *LambdaParametersParameters) { + *out = *in + if in.ClientContext != nil { + in, out := &in.ClientContext, &out.ClientContext + *out = new(string) + **out = **in + } + if in.PayloadSecretRef != nil { + in, out := &in.PayloadSecretRef, &out.PayloadSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Qualifier != nil { + in, out := &in.Qualifier, &out.Qualifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LambdaParametersParameters. +func (in *LambdaParametersParameters) DeepCopy() *LambdaParametersParameters { + if in == nil { + return nil + } + out := new(LambdaParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowTask) DeepCopyInto(out *MaintenanceWindowTask) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowTask. +func (in *MaintenanceWindowTask) DeepCopy() *MaintenanceWindowTask { + if in == nil { + return nil + } + out := new(MaintenanceWindowTask) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MaintenanceWindowTask) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowTaskInitParameters) DeepCopyInto(out *MaintenanceWindowTaskInitParameters) { + *out = *in + if in.CutoffBehavior != nil { + in, out := &in.CutoffBehavior, &out.CutoffBehavior + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.MaxConcurrency != nil { + in, out := &in.MaxConcurrency, &out.MaxConcurrency + *out = new(string) + **out = **in + } + if in.MaxErrors != nil { + in, out := &in.MaxErrors, &out.MaxErrors + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceRoleArn != nil { + in, out := &in.ServiceRoleArn, &out.ServiceRoleArn + *out = new(string) + **out = **in + } + if in.ServiceRoleArnRef != nil { + in, out := &in.ServiceRoleArnRef, &out.ServiceRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceRoleArnSelector != nil { + in, out := &in.ServiceRoleArnSelector, &out.ServiceRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]MaintenanceWindowTaskTargetsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TaskArn != nil { + in, out := &in.TaskArn, &out.TaskArn + *out = new(string) + **out = **in + } + if in.TaskArnRef != nil { + in, out := &in.TaskArnRef, &out.TaskArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TaskArnSelector != nil { + in, out := &in.TaskArnSelector, &out.TaskArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TaskInvocationParameters != nil { + in, out := &in.TaskInvocationParameters, &out.TaskInvocationParameters + *out = new(TaskInvocationParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TaskType != nil { + in, out := &in.TaskType, &out.TaskType + *out = new(string) + **out = **in + } + if in.WindowID != nil { + in, out := &in.WindowID, &out.WindowID + *out = new(string) + **out = **in + } + if in.WindowIDRef != nil { + in, out := &in.WindowIDRef, &out.WindowIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WindowIDSelector != nil { + in, out := &in.WindowIDSelector, &out.WindowIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowTaskInitParameters. +func (in *MaintenanceWindowTaskInitParameters) DeepCopy() *MaintenanceWindowTaskInitParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowTaskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowTaskList) DeepCopyInto(out *MaintenanceWindowTaskList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MaintenanceWindowTask, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowTaskList. +func (in *MaintenanceWindowTaskList) DeepCopy() *MaintenanceWindowTaskList { + if in == nil { + return nil + } + out := new(MaintenanceWindowTaskList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MaintenanceWindowTaskList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowTaskObservation) DeepCopyInto(out *MaintenanceWindowTaskObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CutoffBehavior != nil { + in, out := &in.CutoffBehavior, &out.CutoffBehavior + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MaxConcurrency != nil { + in, out := &in.MaxConcurrency, &out.MaxConcurrency + *out = new(string) + **out = **in + } + if in.MaxErrors != nil { + in, out := &in.MaxErrors, &out.MaxErrors + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceRoleArn != nil { + in, out := &in.ServiceRoleArn, &out.ServiceRoleArn + *out = new(string) + **out = **in + } + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]MaintenanceWindowTaskTargetsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TaskArn != nil { + in, out := &in.TaskArn, &out.TaskArn + *out = new(string) + **out = **in + } + if in.TaskInvocationParameters != nil { + in, out := &in.TaskInvocationParameters, &out.TaskInvocationParameters + *out = new(TaskInvocationParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.TaskType != nil { + in, out := &in.TaskType, &out.TaskType + *out = new(string) + **out = **in + } + if in.WindowID != nil { + in, out := &in.WindowID, &out.WindowID + *out = new(string) + **out = **in + } + if in.WindowTaskID != nil { + in, out := &in.WindowTaskID, &out.WindowTaskID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowTaskObservation. +func (in *MaintenanceWindowTaskObservation) DeepCopy() *MaintenanceWindowTaskObservation { + if in == nil { + return nil + } + out := new(MaintenanceWindowTaskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowTaskParameters) DeepCopyInto(out *MaintenanceWindowTaskParameters) { + *out = *in + if in.CutoffBehavior != nil { + in, out := &in.CutoffBehavior, &out.CutoffBehavior + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.MaxConcurrency != nil { + in, out := &in.MaxConcurrency, &out.MaxConcurrency + *out = new(string) + **out = **in + } + if in.MaxErrors != nil { + in, out := &in.MaxErrors, &out.MaxErrors + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.ServiceRoleArn != nil { + in, out := &in.ServiceRoleArn, &out.ServiceRoleArn + *out = new(string) + **out = **in + } + if in.ServiceRoleArnRef != nil { + in, out := &in.ServiceRoleArnRef, &out.ServiceRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceRoleArnSelector != nil { + in, out := &in.ServiceRoleArnSelector, &out.ServiceRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]MaintenanceWindowTaskTargetsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TaskArn != nil { + in, out := &in.TaskArn, &out.TaskArn + *out = new(string) + **out = **in + } + if in.TaskArnRef != nil { + in, out := &in.TaskArnRef, &out.TaskArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TaskArnSelector != nil { + in, out := &in.TaskArnSelector, &out.TaskArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TaskInvocationParameters != nil { + in, out := &in.TaskInvocationParameters, &out.TaskInvocationParameters + *out = new(TaskInvocationParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.TaskType != nil { + in, out := &in.TaskType, &out.TaskType + *out = new(string) + **out = **in + } + if in.WindowID != nil { + in, out := &in.WindowID, &out.WindowID + *out = new(string) + **out = **in + } + if in.WindowIDRef != nil { + in, out := &in.WindowIDRef, &out.WindowIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WindowIDSelector != nil { + in, out := &in.WindowIDSelector, &out.WindowIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowTaskParameters. +func (in *MaintenanceWindowTaskParameters) DeepCopy() *MaintenanceWindowTaskParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowTaskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowTaskSpec) DeepCopyInto(out *MaintenanceWindowTaskSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowTaskSpec. +func (in *MaintenanceWindowTaskSpec) DeepCopy() *MaintenanceWindowTaskSpec { + if in == nil { + return nil + } + out := new(MaintenanceWindowTaskSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowTaskStatus) DeepCopyInto(out *MaintenanceWindowTaskStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowTaskStatus. +func (in *MaintenanceWindowTaskStatus) DeepCopy() *MaintenanceWindowTaskStatus { + if in == nil { + return nil + } + out := new(MaintenanceWindowTaskStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowTaskTargetsInitParameters) DeepCopyInto(out *MaintenanceWindowTaskTargetsInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowTaskTargetsInitParameters. +func (in *MaintenanceWindowTaskTargetsInitParameters) DeepCopy() *MaintenanceWindowTaskTargetsInitParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowTaskTargetsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowTaskTargetsObservation) DeepCopyInto(out *MaintenanceWindowTaskTargetsObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowTaskTargetsObservation. +func (in *MaintenanceWindowTaskTargetsObservation) DeepCopy() *MaintenanceWindowTaskTargetsObservation { + if in == nil { + return nil + } + out := new(MaintenanceWindowTaskTargetsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowTaskTargetsParameters) DeepCopyInto(out *MaintenanceWindowTaskTargetsParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowTaskTargetsParameters. +func (in *MaintenanceWindowTaskTargetsParameters) DeepCopy() *MaintenanceWindowTaskTargetsParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowTaskTargetsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationConfigInitParameters) DeepCopyInto(out *NotificationConfigInitParameters) { + *out = *in + if in.NotificationArn != nil { + in, out := &in.NotificationArn, &out.NotificationArn + *out = new(string) + **out = **in + } + if in.NotificationArnRef != nil { + in, out := &in.NotificationArnRef, &out.NotificationArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NotificationArnSelector != nil { + in, out := &in.NotificationArnSelector, &out.NotificationArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NotificationEvents != nil { + in, out := &in.NotificationEvents, &out.NotificationEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NotificationType != nil { + in, out := &in.NotificationType, &out.NotificationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationConfigInitParameters. +func (in *NotificationConfigInitParameters) DeepCopy() *NotificationConfigInitParameters { + if in == nil { + return nil + } + out := new(NotificationConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationConfigObservation) DeepCopyInto(out *NotificationConfigObservation) { + *out = *in + if in.NotificationArn != nil { + in, out := &in.NotificationArn, &out.NotificationArn + *out = new(string) + **out = **in + } + if in.NotificationEvents != nil { + in, out := &in.NotificationEvents, &out.NotificationEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NotificationType != nil { + in, out := &in.NotificationType, &out.NotificationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationConfigObservation. +func (in *NotificationConfigObservation) DeepCopy() *NotificationConfigObservation { + if in == nil { + return nil + } + out := new(NotificationConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationConfigParameters) DeepCopyInto(out *NotificationConfigParameters) { + *out = *in + if in.NotificationArn != nil { + in, out := &in.NotificationArn, &out.NotificationArn + *out = new(string) + **out = **in + } + if in.NotificationArnRef != nil { + in, out := &in.NotificationArnRef, &out.NotificationArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NotificationArnSelector != nil { + in, out := &in.NotificationArnSelector, &out.NotificationArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NotificationEvents != nil { + in, out := &in.NotificationEvents, &out.NotificationEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NotificationType != nil { + in, out := &in.NotificationType, &out.NotificationType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationConfigParameters. +func (in *NotificationConfigParameters) DeepCopy() *NotificationConfigParameters { + if in == nil { + return nil + } + out := new(NotificationConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputLocationInitParameters) DeepCopyInto(out *OutputLocationInitParameters) { + *out = *in + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } + if in.S3Region != nil { + in, out := &in.S3Region, &out.S3Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputLocationInitParameters. +func (in *OutputLocationInitParameters) DeepCopy() *OutputLocationInitParameters { + if in == nil { + return nil + } + out := new(OutputLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputLocationObservation) DeepCopyInto(out *OutputLocationObservation) { + *out = *in + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } + if in.S3Region != nil { + in, out := &in.S3Region, &out.S3Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputLocationObservation. +func (in *OutputLocationObservation) DeepCopy() *OutputLocationObservation { + if in == nil { + return nil + } + out := new(OutputLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputLocationParameters) DeepCopyInto(out *OutputLocationParameters) { + *out = *in + if in.S3BucketName != nil { + in, out := &in.S3BucketName, &out.S3BucketName + *out = new(string) + **out = **in + } + if in.S3KeyPrefix != nil { + in, out := &in.S3KeyPrefix, &out.S3KeyPrefix + *out = new(string) + **out = **in + } + if in.S3Region != nil { + in, out := &in.S3Region, &out.S3Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputLocationParameters. +func (in *OutputLocationParameters) DeepCopy() *OutputLocationParameters { + if in == nil { + return nil + } + out := new(OutputLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterInitParameters) DeepCopyInto(out *ParameterInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterInitParameters. +func (in *ParameterInitParameters) DeepCopy() *ParameterInitParameters { + if in == nil { + return nil + } + out := new(ParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterObservation) DeepCopyInto(out *ParameterObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterObservation. +func (in *ParameterObservation) DeepCopy() *ParameterObservation { + if in == nil { + return nil + } + out := new(ParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterParameters) DeepCopyInto(out *ParameterParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterParameters. +func (in *ParameterParameters) DeepCopy() *ParameterParameters { + if in == nil { + return nil + } + out := new(ParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDataSync) DeepCopyInto(out *ResourceDataSync) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDataSync. +func (in *ResourceDataSync) DeepCopy() *ResourceDataSync { + if in == nil { + return nil + } + out := new(ResourceDataSync) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceDataSync) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDataSyncInitParameters) DeepCopyInto(out *ResourceDataSyncInitParameters) { + *out = *in + if in.S3Destination != nil { + in, out := &in.S3Destination, &out.S3Destination + *out = new(S3DestinationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDataSyncInitParameters. +func (in *ResourceDataSyncInitParameters) DeepCopy() *ResourceDataSyncInitParameters { + if in == nil { + return nil + } + out := new(ResourceDataSyncInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDataSyncList) DeepCopyInto(out *ResourceDataSyncList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceDataSync, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDataSyncList. +func (in *ResourceDataSyncList) DeepCopy() *ResourceDataSyncList { + if in == nil { + return nil + } + out := new(ResourceDataSyncList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceDataSyncList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDataSyncObservation) DeepCopyInto(out *ResourceDataSyncObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.S3Destination != nil { + in, out := &in.S3Destination, &out.S3Destination + *out = new(S3DestinationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDataSyncObservation. +func (in *ResourceDataSyncObservation) DeepCopy() *ResourceDataSyncObservation { + if in == nil { + return nil + } + out := new(ResourceDataSyncObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDataSyncParameters) DeepCopyInto(out *ResourceDataSyncParameters) { + *out = *in + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.S3Destination != nil { + in, out := &in.S3Destination, &out.S3Destination + *out = new(S3DestinationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDataSyncParameters. +func (in *ResourceDataSyncParameters) DeepCopy() *ResourceDataSyncParameters { + if in == nil { + return nil + } + out := new(ResourceDataSyncParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDataSyncSpec) DeepCopyInto(out *ResourceDataSyncSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDataSyncSpec. +func (in *ResourceDataSyncSpec) DeepCopy() *ResourceDataSyncSpec { + if in == nil { + return nil + } + out := new(ResourceDataSyncSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDataSyncStatus) DeepCopyInto(out *ResourceDataSyncStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDataSyncStatus. +func (in *ResourceDataSyncStatus) DeepCopy() *ResourceDataSyncStatus { + if in == nil { + return nil + } + out := new(ResourceDataSyncStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunCommandParametersInitParameters) DeepCopyInto(out *RunCommandParametersInitParameters) { + *out = *in + if in.CloudwatchConfig != nil { + in, out := &in.CloudwatchConfig, &out.CloudwatchConfig + *out = new(CloudwatchConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.DocumentHash != nil { + in, out := &in.DocumentHash, &out.DocumentHash + *out = new(string) + **out = **in + } + if in.DocumentHashType != nil { + in, out := &in.DocumentHashType, &out.DocumentHashType + *out = new(string) + **out = **in + } + if in.DocumentVersion != nil { + in, out := &in.DocumentVersion, &out.DocumentVersion + *out = new(string) + **out = **in + } + if in.NotificationConfig != nil { + in, out := &in.NotificationConfig, &out.NotificationConfig + *out = new(NotificationConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputS3Bucket != nil { + in, out := &in.OutputS3Bucket, &out.OutputS3Bucket + *out = new(string) + **out = **in + } + if in.OutputS3BucketRef != nil { + in, out := &in.OutputS3BucketRef, &out.OutputS3BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.OutputS3BucketSelector != nil { + in, out := &in.OutputS3BucketSelector, &out.OutputS3BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OutputS3KeyPrefix != nil { + in, out := &in.OutputS3KeyPrefix, &out.OutputS3KeyPrefix + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]RunCommandParametersParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceRoleArn != nil { + in, out := &in.ServiceRoleArn, &out.ServiceRoleArn + *out = new(string) + **out = **in + } + if in.ServiceRoleArnRef != nil { + in, out := &in.ServiceRoleArnRef, &out.ServiceRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceRoleArnSelector != nil { + in, out := &in.ServiceRoleArnSelector, &out.ServiceRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunCommandParametersInitParameters. +func (in *RunCommandParametersInitParameters) DeepCopy() *RunCommandParametersInitParameters { + if in == nil { + return nil + } + out := new(RunCommandParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunCommandParametersObservation) DeepCopyInto(out *RunCommandParametersObservation) { + *out = *in + if in.CloudwatchConfig != nil { + in, out := &in.CloudwatchConfig, &out.CloudwatchConfig + *out = new(CloudwatchConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.DocumentHash != nil { + in, out := &in.DocumentHash, &out.DocumentHash + *out = new(string) + **out = **in + } + if in.DocumentHashType != nil { + in, out := &in.DocumentHashType, &out.DocumentHashType + *out = new(string) + **out = **in + } + if in.DocumentVersion != nil { + in, out := &in.DocumentVersion, &out.DocumentVersion + *out = new(string) + **out = **in + } + if in.NotificationConfig != nil { + in, out := &in.NotificationConfig, &out.NotificationConfig + *out = new(NotificationConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.OutputS3Bucket != nil { + in, out := &in.OutputS3Bucket, &out.OutputS3Bucket + *out = new(string) + **out = **in + } + if in.OutputS3KeyPrefix != nil { + in, out := &in.OutputS3KeyPrefix, &out.OutputS3KeyPrefix + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]RunCommandParametersParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceRoleArn != nil { + in, out := &in.ServiceRoleArn, &out.ServiceRoleArn + *out = new(string) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunCommandParametersObservation. +func (in *RunCommandParametersObservation) DeepCopy() *RunCommandParametersObservation { + if in == nil { + return nil + } + out := new(RunCommandParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunCommandParametersParameterInitParameters) DeepCopyInto(out *RunCommandParametersParameterInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunCommandParametersParameterInitParameters. +func (in *RunCommandParametersParameterInitParameters) DeepCopy() *RunCommandParametersParameterInitParameters { + if in == nil { + return nil + } + out := new(RunCommandParametersParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunCommandParametersParameterObservation) DeepCopyInto(out *RunCommandParametersParameterObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunCommandParametersParameterObservation. +func (in *RunCommandParametersParameterObservation) DeepCopy() *RunCommandParametersParameterObservation { + if in == nil { + return nil + } + out := new(RunCommandParametersParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunCommandParametersParameterParameters) DeepCopyInto(out *RunCommandParametersParameterParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunCommandParametersParameterParameters. +func (in *RunCommandParametersParameterParameters) DeepCopy() *RunCommandParametersParameterParameters { + if in == nil { + return nil + } + out := new(RunCommandParametersParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunCommandParametersParameters) DeepCopyInto(out *RunCommandParametersParameters) { + *out = *in + if in.CloudwatchConfig != nil { + in, out := &in.CloudwatchConfig, &out.CloudwatchConfig + *out = new(CloudwatchConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Comment != nil { + in, out := &in.Comment, &out.Comment + *out = new(string) + **out = **in + } + if in.DocumentHash != nil { + in, out := &in.DocumentHash, &out.DocumentHash + *out = new(string) + **out = **in + } + if in.DocumentHashType != nil { + in, out := &in.DocumentHashType, &out.DocumentHashType + *out = new(string) + **out = **in + } + if in.DocumentVersion != nil { + in, out := &in.DocumentVersion, &out.DocumentVersion + *out = new(string) + **out = **in + } + if in.NotificationConfig != nil { + in, out := &in.NotificationConfig, &out.NotificationConfig + *out = new(NotificationConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputS3Bucket != nil { + in, out := &in.OutputS3Bucket, &out.OutputS3Bucket + *out = new(string) + **out = **in + } + if in.OutputS3BucketRef != nil { + in, out := &in.OutputS3BucketRef, &out.OutputS3BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.OutputS3BucketSelector != nil { + in, out := &in.OutputS3BucketSelector, &out.OutputS3BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OutputS3KeyPrefix != nil { + in, out := &in.OutputS3KeyPrefix, &out.OutputS3KeyPrefix + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]RunCommandParametersParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceRoleArn != nil { + in, out := &in.ServiceRoleArn, &out.ServiceRoleArn + *out = new(string) + **out = **in + } + if in.ServiceRoleArnRef != nil { + in, out := &in.ServiceRoleArnRef, &out.ServiceRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceRoleArnSelector != nil { + in, out := &in.ServiceRoleArnSelector, &out.ServiceRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunCommandParametersParameters. +func (in *RunCommandParametersParameters) DeepCopy() *RunCommandParametersParameters { + if in == nil { + return nil + } + out := new(RunCommandParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DestinationInitParameters) DeepCopyInto(out *S3DestinationInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketNameRef != nil { + in, out := &in.BucketNameRef, &out.BucketNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketNameSelector != nil { + in, out := &in.BucketNameSelector, &out.BucketNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.SyncFormat != nil { + in, out := &in.SyncFormat, &out.SyncFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DestinationInitParameters. +func (in *S3DestinationInitParameters) DeepCopy() *S3DestinationInitParameters { + if in == nil { + return nil + } + out := new(S3DestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DestinationObservation) DeepCopyInto(out *S3DestinationObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SyncFormat != nil { + in, out := &in.SyncFormat, &out.SyncFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DestinationObservation. +func (in *S3DestinationObservation) DeepCopy() *S3DestinationObservation { + if in == nil { + return nil + } + out := new(S3DestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3DestinationParameters) DeepCopyInto(out *S3DestinationParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.BucketNameRef != nil { + in, out := &in.BucketNameRef, &out.BucketNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketNameSelector != nil { + in, out := &in.BucketNameSelector, &out.BucketNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyArn != nil { + in, out := &in.KMSKeyArn, &out.KMSKeyArn + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RegionRef != nil { + in, out := &in.RegionRef, &out.RegionRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RegionSelector != nil { + in, out := &in.RegionSelector, &out.RegionSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SyncFormat != nil { + in, out := &in.SyncFormat, &out.SyncFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3DestinationParameters. +func (in *S3DestinationParameters) DeepCopy() *S3DestinationParameters { + if in == nil { + return nil + } + out := new(S3DestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepFunctionsParametersInitParameters) DeepCopyInto(out *StepFunctionsParametersInitParameters) { + *out = *in + if in.InputSecretRef != nil { + in, out := &in.InputSecretRef, &out.InputSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepFunctionsParametersInitParameters. +func (in *StepFunctionsParametersInitParameters) DeepCopy() *StepFunctionsParametersInitParameters { + if in == nil { + return nil + } + out := new(StepFunctionsParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepFunctionsParametersObservation) DeepCopyInto(out *StepFunctionsParametersObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepFunctionsParametersObservation. +func (in *StepFunctionsParametersObservation) DeepCopy() *StepFunctionsParametersObservation { + if in == nil { + return nil + } + out := new(StepFunctionsParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepFunctionsParametersParameters) DeepCopyInto(out *StepFunctionsParametersParameters) { + *out = *in + if in.InputSecretRef != nil { + in, out := &in.InputSecretRef, &out.InputSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepFunctionsParametersParameters. +func (in *StepFunctionsParametersParameters) DeepCopy() *StepFunctionsParametersParameters { + if in == nil { + return nil + } + out := new(StepFunctionsParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetsInitParameters) DeepCopyInto(out *TargetsInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetsInitParameters. +func (in *TargetsInitParameters) DeepCopy() *TargetsInitParameters { + if in == nil { + return nil + } + out := new(TargetsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetsObservation) DeepCopyInto(out *TargetsObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetsObservation. +func (in *TargetsObservation) DeepCopy() *TargetsObservation { + if in == nil { + return nil + } + out := new(TargetsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetsParameters) DeepCopyInto(out *TargetsParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetsParameters. +func (in *TargetsParameters) DeepCopy() *TargetsParameters { + if in == nil { + return nil + } + out := new(TargetsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskInvocationParametersInitParameters) DeepCopyInto(out *TaskInvocationParametersInitParameters) { + *out = *in + if in.AutomationParameters != nil { + in, out := &in.AutomationParameters, &out.AutomationParameters + *out = new(AutomationParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LambdaParameters != nil { + in, out := &in.LambdaParameters, &out.LambdaParameters + *out = new(LambdaParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RunCommandParameters != nil { + in, out := &in.RunCommandParameters, &out.RunCommandParameters + *out = new(RunCommandParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StepFunctionsParameters != nil { + in, out := &in.StepFunctionsParameters, &out.StepFunctionsParameters + *out = new(StepFunctionsParametersInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskInvocationParametersInitParameters. +func (in *TaskInvocationParametersInitParameters) DeepCopy() *TaskInvocationParametersInitParameters { + if in == nil { + return nil + } + out := new(TaskInvocationParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskInvocationParametersObservation) DeepCopyInto(out *TaskInvocationParametersObservation) { + *out = *in + if in.AutomationParameters != nil { + in, out := &in.AutomationParameters, &out.AutomationParameters + *out = new(AutomationParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.LambdaParameters != nil { + in, out := &in.LambdaParameters, &out.LambdaParameters + *out = new(LambdaParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.RunCommandParameters != nil { + in, out := &in.RunCommandParameters, &out.RunCommandParameters + *out = new(RunCommandParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.StepFunctionsParameters != nil { + in, out := &in.StepFunctionsParameters, &out.StepFunctionsParameters + *out = new(StepFunctionsParametersObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskInvocationParametersObservation. +func (in *TaskInvocationParametersObservation) DeepCopy() *TaskInvocationParametersObservation { + if in == nil { + return nil + } + out := new(TaskInvocationParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskInvocationParametersParameters) DeepCopyInto(out *TaskInvocationParametersParameters) { + *out = *in + if in.AutomationParameters != nil { + in, out := &in.AutomationParameters, &out.AutomationParameters + *out = new(AutomationParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.LambdaParameters != nil { + in, out := &in.LambdaParameters, &out.LambdaParameters + *out = new(LambdaParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.RunCommandParameters != nil { + in, out := &in.RunCommandParameters, &out.RunCommandParameters + *out = new(RunCommandParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.StepFunctionsParameters != nil { + in, out := &in.StepFunctionsParameters, &out.StepFunctionsParameters + *out = new(StepFunctionsParametersParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskInvocationParametersParameters. +func (in *TaskInvocationParametersParameters) DeepCopy() *TaskInvocationParametersParameters { + if in == nil { + return nil + } + out := new(TaskInvocationParametersParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/ssm/v1beta2/zz_generated.managed.go b/apis/ssm/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..075a711038 --- /dev/null +++ b/apis/ssm/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Association. +func (mg *Association) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Association. +func (mg *Association) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Association. +func (mg *Association) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Association. +func (mg *Association) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Association. +func (mg *Association) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Association. +func (mg *Association) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Association. +func (mg *Association) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Association. +func (mg *Association) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Association. +func (mg *Association) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Association. +func (mg *Association) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Association. +func (mg *Association) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Association. +func (mg *Association) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MaintenanceWindowTask. +func (mg *MaintenanceWindowTask) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MaintenanceWindowTask. +func (mg *MaintenanceWindowTask) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MaintenanceWindowTask. +func (mg *MaintenanceWindowTask) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MaintenanceWindowTask. +func (mg *MaintenanceWindowTask) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MaintenanceWindowTask. +func (mg *MaintenanceWindowTask) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MaintenanceWindowTask. +func (mg *MaintenanceWindowTask) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MaintenanceWindowTask. +func (mg *MaintenanceWindowTask) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MaintenanceWindowTask. +func (mg *MaintenanceWindowTask) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MaintenanceWindowTask. +func (mg *MaintenanceWindowTask) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MaintenanceWindowTask. +func (mg *MaintenanceWindowTask) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MaintenanceWindowTask. +func (mg *MaintenanceWindowTask) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MaintenanceWindowTask. +func (mg *MaintenanceWindowTask) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ResourceDataSync. +func (mg *ResourceDataSync) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ResourceDataSync. +func (mg *ResourceDataSync) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ResourceDataSync. +func (mg *ResourceDataSync) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ResourceDataSync. +func (mg *ResourceDataSync) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ResourceDataSync. +func (mg *ResourceDataSync) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ResourceDataSync. +func (mg *ResourceDataSync) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ResourceDataSync. +func (mg *ResourceDataSync) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ResourceDataSync. +func (mg *ResourceDataSync) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ResourceDataSync. +func (mg *ResourceDataSync) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ResourceDataSync. +func (mg *ResourceDataSync) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ResourceDataSync. +func (mg *ResourceDataSync) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ResourceDataSync. +func (mg *ResourceDataSync) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/ssm/v1beta2/zz_generated.managedlist.go b/apis/ssm/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..d3fed3513a --- /dev/null +++ b/apis/ssm/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AssociationList. +func (l *AssociationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MaintenanceWindowTaskList. +func (l *MaintenanceWindowTaskList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ResourceDataSyncList. +func (l *ResourceDataSyncList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/ssm/v1beta2/zz_generated.resolvers.go b/apis/ssm/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..1168160d34 --- /dev/null +++ b/apis/ssm/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,413 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Association) ResolveReferences( // ResolveReferences of this Association. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ssm.aws.upbound.io", "v1beta1", "Document", "DocumentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NameRef, + Selector: mg.Spec.ForProvider.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Name") + } + mg.Spec.ForProvider.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ssm.aws.upbound.io", "v1beta1", "Document", "DocumentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.NameRef, + Selector: mg.Spec.InitProvider.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Name") + } + mg.Spec.InitProvider.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MaintenanceWindowTask. +func (mg *MaintenanceWindowTask) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ServiceRoleArnRef, + Selector: mg.Spec.ForProvider.ServiceRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceRoleArn") + } + mg.Spec.ForProvider.ServiceRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TaskArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.TaskArnRef, + Selector: mg.Spec.ForProvider.TaskArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TaskArn") + } + mg.Spec.ForProvider.TaskArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TaskArnRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.TaskInvocationParameters != nil { + if mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters != nil { + if mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.NotificationConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.NotificationConfig.NotificationArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.NotificationConfig.NotificationArnRef, + Selector: mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.NotificationConfig.NotificationArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.NotificationConfig.NotificationArn") + } + mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.NotificationConfig.NotificationArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.NotificationConfig.NotificationArnRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.ForProvider.TaskInvocationParameters != nil { + if mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.OutputS3Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.OutputS3BucketRef, + Selector: mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.OutputS3BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.OutputS3Bucket") + } + mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.OutputS3Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.OutputS3BucketRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.TaskInvocationParameters != nil { + if mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.ServiceRoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.ServiceRoleArnRef, + Selector: mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.ServiceRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.ServiceRoleArn") + } + mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.ServiceRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TaskInvocationParameters.RunCommandParameters.ServiceRoleArnRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("ssm.aws.upbound.io", "v1beta1", "MaintenanceWindow", "MaintenanceWindowList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.WindowID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.WindowIDRef, + Selector: mg.Spec.ForProvider.WindowIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.WindowID") + } + mg.Spec.ForProvider.WindowID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.WindowIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ServiceRoleArnRef, + Selector: mg.Spec.InitProvider.ServiceRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceRoleArn") + } + mg.Spec.InitProvider.ServiceRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TaskArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.TaskArnRef, + Selector: mg.Spec.InitProvider.TaskArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TaskArn") + } + mg.Spec.InitProvider.TaskArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TaskArnRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.TaskInvocationParameters != nil { + if mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters != nil { + if mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.NotificationConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("sns.aws.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.NotificationConfig.NotificationArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.NotificationConfig.NotificationArnRef, + Selector: mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.NotificationConfig.NotificationArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.NotificationConfig.NotificationArn") + } + mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.NotificationConfig.NotificationArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.NotificationConfig.NotificationArnRef = rsp.ResolvedReference + + } + } + } + if mg.Spec.InitProvider.TaskInvocationParameters != nil { + if mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.OutputS3Bucket), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.OutputS3BucketRef, + Selector: mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.OutputS3BucketSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.OutputS3Bucket") + } + mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.OutputS3Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.OutputS3BucketRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.TaskInvocationParameters != nil { + if mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.ServiceRoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.ServiceRoleArnRef, + Selector: mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.ServiceRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.ServiceRoleArn") + } + mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.ServiceRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TaskInvocationParameters.RunCommandParameters.ServiceRoleArnRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("ssm.aws.upbound.io", "v1beta1", "MaintenanceWindow", "MaintenanceWindowList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.WindowID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.WindowIDRef, + Selector: mg.Spec.InitProvider.WindowIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.WindowID") + } + mg.Spec.InitProvider.WindowID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.WindowIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ResourceDataSync. +func (mg *ResourceDataSync) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.S3Destination != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.S3Destination.BucketName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.S3Destination.BucketNameRef, + Selector: mg.Spec.ForProvider.S3Destination.BucketNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.S3Destination.BucketName") + } + mg.Spec.ForProvider.S3Destination.BucketName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.S3Destination.BucketNameRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.S3Destination != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.S3Destination.Region), + Extract: resource.ExtractParamPath("region", false), + Reference: mg.Spec.ForProvider.S3Destination.RegionRef, + Selector: mg.Spec.ForProvider.S3Destination.RegionSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.S3Destination.Region") + } + mg.Spec.ForProvider.S3Destination.Region = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.S3Destination.RegionRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.S3Destination != nil { + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta2", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.S3Destination.BucketName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.S3Destination.BucketNameRef, + Selector: mg.Spec.InitProvider.S3Destination.BucketNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.S3Destination.BucketName") + } + mg.Spec.InitProvider.S3Destination.BucketName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.S3Destination.BucketNameRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/ssm/v1beta2/zz_groupversion_info.go b/apis/ssm/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..a8ffe72e04 --- /dev/null +++ b/apis/ssm/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=ssm.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "ssm.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/ssm/v1beta2/zz_maintenancewindowtask_terraformed.go b/apis/ssm/v1beta2/zz_maintenancewindowtask_terraformed.go new file mode 100755 index 0000000000..f6d2420dd8 --- /dev/null +++ b/apis/ssm/v1beta2/zz_maintenancewindowtask_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MaintenanceWindowTask +func (mg *MaintenanceWindowTask) GetTerraformResourceType() string { + return "aws_ssm_maintenance_window_task" +} + +// GetConnectionDetailsMapping for this MaintenanceWindowTask +func (tr *MaintenanceWindowTask) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"task_invocation_parameters[*].lambda_parameters[*].payload": "taskInvocationParameters[*].lambdaParameters[*].payloadSecretRef", "task_invocation_parameters[*].step_functions_parameters[*].input": "taskInvocationParameters[*].stepFunctionsParameters[*].inputSecretRef"} +} + +// GetObservation of this MaintenanceWindowTask +func (tr *MaintenanceWindowTask) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MaintenanceWindowTask +func (tr *MaintenanceWindowTask) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MaintenanceWindowTask +func (tr *MaintenanceWindowTask) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MaintenanceWindowTask +func (tr *MaintenanceWindowTask) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MaintenanceWindowTask +func (tr *MaintenanceWindowTask) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MaintenanceWindowTask +func (tr *MaintenanceWindowTask) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MaintenanceWindowTask +func (tr *MaintenanceWindowTask) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MaintenanceWindowTask using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MaintenanceWindowTask) LateInitialize(attrs []byte) (bool, error) { + params := &MaintenanceWindowTaskParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MaintenanceWindowTask) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ssm/v1beta2/zz_maintenancewindowtask_types.go b/apis/ssm/v1beta2/zz_maintenancewindowtask_types.go new file mode 100755 index 0000000000..5aa805d67e --- /dev/null +++ b/apis/ssm/v1beta2/zz_maintenancewindowtask_types.go @@ -0,0 +1,747 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AutomationParametersInitParameters struct { + + // The version of an Automation document to use during task execution. + DocumentVersion *string `json:"documentVersion,omitempty" tf:"document_version,omitempty"` + + // The parameters for the RUN_COMMAND task execution. Documented below. + Parameter []ParameterInitParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` +} + +type AutomationParametersObservation struct { + + // The version of an Automation document to use during task execution. + DocumentVersion *string `json:"documentVersion,omitempty" tf:"document_version,omitempty"` + + // The parameters for the RUN_COMMAND task execution. Documented below. + Parameter []ParameterObservation `json:"parameter,omitempty" tf:"parameter,omitempty"` +} + +type AutomationParametersParameters struct { + + // The version of an Automation document to use during task execution. + // +kubebuilder:validation:Optional + DocumentVersion *string `json:"documentVersion,omitempty" tf:"document_version,omitempty"` + + // The parameters for the RUN_COMMAND task execution. Documented below. + // +kubebuilder:validation:Optional + Parameter []ParameterParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` +} + +type CloudwatchConfigInitParameters struct { + + // The name of the CloudWatch log group where you want to send command output. If you don't specify a group name, Systems Manager automatically creates a log group for you. The log group uses the following naming format: aws/ssm/SystemsManagerDocumentName. + CloudwatchLogGroupName *string `json:"cloudwatchLogGroupName,omitempty" tf:"cloudwatch_log_group_name,omitempty"` + + // Enables Systems Manager to send command output to CloudWatch Logs. + CloudwatchOutputEnabled *bool `json:"cloudwatchOutputEnabled,omitempty" tf:"cloudwatch_output_enabled,omitempty"` +} + +type CloudwatchConfigObservation struct { + + // The name of the CloudWatch log group where you want to send command output. If you don't specify a group name, Systems Manager automatically creates a log group for you. The log group uses the following naming format: aws/ssm/SystemsManagerDocumentName. + CloudwatchLogGroupName *string `json:"cloudwatchLogGroupName,omitempty" tf:"cloudwatch_log_group_name,omitempty"` + + // Enables Systems Manager to send command output to CloudWatch Logs. + CloudwatchOutputEnabled *bool `json:"cloudwatchOutputEnabled,omitempty" tf:"cloudwatch_output_enabled,omitempty"` +} + +type CloudwatchConfigParameters struct { + + // The name of the CloudWatch log group where you want to send command output. If you don't specify a group name, Systems Manager automatically creates a log group for you. The log group uses the following naming format: aws/ssm/SystemsManagerDocumentName. + // +kubebuilder:validation:Optional + CloudwatchLogGroupName *string `json:"cloudwatchLogGroupName,omitempty" tf:"cloudwatch_log_group_name,omitempty"` + + // Enables Systems Manager to send command output to CloudWatch Logs. + // +kubebuilder:validation:Optional + CloudwatchOutputEnabled *bool `json:"cloudwatchOutputEnabled,omitempty" tf:"cloudwatch_output_enabled,omitempty"` +} + +type LambdaParametersInitParameters struct { + + // Pass client-specific information to the Lambda function that you are invoking. + ClientContext *string `json:"clientContext,omitempty" tf:"client_context,omitempty"` + + // JSON to provide to your Lambda function as input. + PayloadSecretRef *v1.SecretKeySelector `json:"payloadSecretRef,omitempty" tf:"-"` + + // Specify a Lambda function version or alias name. + Qualifier *string `json:"qualifier,omitempty" tf:"qualifier,omitempty"` +} + +type LambdaParametersObservation struct { + + // Pass client-specific information to the Lambda function that you are invoking. + ClientContext *string `json:"clientContext,omitempty" tf:"client_context,omitempty"` + + // Specify a Lambda function version or alias name. + Qualifier *string `json:"qualifier,omitempty" tf:"qualifier,omitempty"` +} + +type LambdaParametersParameters struct { + + // Pass client-specific information to the Lambda function that you are invoking. + // +kubebuilder:validation:Optional + ClientContext *string `json:"clientContext,omitempty" tf:"client_context,omitempty"` + + // JSON to provide to your Lambda function as input. + // +kubebuilder:validation:Optional + PayloadSecretRef *v1.SecretKeySelector `json:"payloadSecretRef,omitempty" tf:"-"` + + // Specify a Lambda function version or alias name. + // +kubebuilder:validation:Optional + Qualifier *string `json:"qualifier,omitempty" tf:"qualifier,omitempty"` +} + +type MaintenanceWindowTaskInitParameters struct { + + // Indicates whether tasks should continue to run after the cutoff time specified in the maintenance windows is reached. Valid values are CONTINUE_TASK and CANCEL_TASK. + CutoffBehavior *string `json:"cutoffBehavior,omitempty" tf:"cutoff_behavior,omitempty"` + + // The description of the maintenance window task. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The maximum number of targets this task can be run for in parallel. + MaxConcurrency *string `json:"maxConcurrency,omitempty" tf:"max_concurrency,omitempty"` + + // The maximum number of errors allowed before this task stops being scheduled. + MaxErrors *string `json:"maxErrors,omitempty" tf:"max_errors,omitempty"` + + // The name of the maintenance window task. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority of the task in the Maintenance Window, the lower the number the higher the priority. Tasks in a Maintenance Window are scheduled in priority order with tasks that have the same priority scheduled in parallel. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The role that should be assumed when executing the task. If a role is not provided, Systems Manager uses your account's service-linked role. If no service-linked role for Systems Manager exists in your account, it is created for you. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + ServiceRoleArn *string `json:"serviceRoleArn,omitempty" tf:"service_role_arn,omitempty"` + + // Reference to a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnRef *v1.Reference `json:"serviceRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnSelector *v1.Selector `json:"serviceRoleArnSelector,omitempty" tf:"-"` + + // The targets (either instances or window target ids). Instances are specified using Key=InstanceIds,Values=instanceid1,instanceid2. Window target ids are specified using Key=WindowTargetIds,Values=window target id1, window target id2. + Targets []MaintenanceWindowTaskTargetsInitParameters `json:"targets,omitempty" tf:"targets,omitempty"` + + // The ARN of the task to execute. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + TaskArn *string `json:"taskArn,omitempty" tf:"task_arn,omitempty"` + + // Reference to a Function in lambda to populate taskArn. + // +kubebuilder:validation:Optional + TaskArnRef *v1.Reference `json:"taskArnRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate taskArn. + // +kubebuilder:validation:Optional + TaskArnSelector *v1.Selector `json:"taskArnSelector,omitempty" tf:"-"` + + // Configuration block with parameters for task execution. + TaskInvocationParameters *TaskInvocationParametersInitParameters `json:"taskInvocationParameters,omitempty" tf:"task_invocation_parameters,omitempty"` + + // The type of task being registered. Valid values: AUTOMATION, LAMBDA, RUN_COMMAND or STEP_FUNCTIONS. + TaskType *string `json:"taskType,omitempty" tf:"task_type,omitempty"` + + // The Id of the maintenance window to register the task with. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ssm/v1beta1.MaintenanceWindow + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + WindowID *string `json:"windowId,omitempty" tf:"window_id,omitempty"` + + // Reference to a MaintenanceWindow in ssm to populate windowId. + // +kubebuilder:validation:Optional + WindowIDRef *v1.Reference `json:"windowIdRef,omitempty" tf:"-"` + + // Selector for a MaintenanceWindow in ssm to populate windowId. + // +kubebuilder:validation:Optional + WindowIDSelector *v1.Selector `json:"windowIdSelector,omitempty" tf:"-"` +} + +type MaintenanceWindowTaskObservation struct { + + // The ARN of the maintenance window task. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Indicates whether tasks should continue to run after the cutoff time specified in the maintenance windows is reached. Valid values are CONTINUE_TASK and CANCEL_TASK. + CutoffBehavior *string `json:"cutoffBehavior,omitempty" tf:"cutoff_behavior,omitempty"` + + // The description of the maintenance window task. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the maintenance window task. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The maximum number of targets this task can be run for in parallel. + MaxConcurrency *string `json:"maxConcurrency,omitempty" tf:"max_concurrency,omitempty"` + + // The maximum number of errors allowed before this task stops being scheduled. + MaxErrors *string `json:"maxErrors,omitempty" tf:"max_errors,omitempty"` + + // The name of the maintenance window task. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority of the task in the Maintenance Window, the lower the number the higher the priority. Tasks in a Maintenance Window are scheduled in priority order with tasks that have the same priority scheduled in parallel. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The role that should be assumed when executing the task. If a role is not provided, Systems Manager uses your account's service-linked role. If no service-linked role for Systems Manager exists in your account, it is created for you. + ServiceRoleArn *string `json:"serviceRoleArn,omitempty" tf:"service_role_arn,omitempty"` + + // The targets (either instances or window target ids). Instances are specified using Key=InstanceIds,Values=instanceid1,instanceid2. Window target ids are specified using Key=WindowTargetIds,Values=window target id1, window target id2. + Targets []MaintenanceWindowTaskTargetsObservation `json:"targets,omitempty" tf:"targets,omitempty"` + + // The ARN of the task to execute. + TaskArn *string `json:"taskArn,omitempty" tf:"task_arn,omitempty"` + + // Configuration block with parameters for task execution. + TaskInvocationParameters *TaskInvocationParametersObservation `json:"taskInvocationParameters,omitempty" tf:"task_invocation_parameters,omitempty"` + + // The type of task being registered. Valid values: AUTOMATION, LAMBDA, RUN_COMMAND or STEP_FUNCTIONS. + TaskType *string `json:"taskType,omitempty" tf:"task_type,omitempty"` + + // The Id of the maintenance window to register the task with. + WindowID *string `json:"windowId,omitempty" tf:"window_id,omitempty"` + + // The ID of the maintenance window task. + WindowTaskID *string `json:"windowTaskId,omitempty" tf:"window_task_id,omitempty"` +} + +type MaintenanceWindowTaskParameters struct { + + // Indicates whether tasks should continue to run after the cutoff time specified in the maintenance windows is reached. Valid values are CONTINUE_TASK and CANCEL_TASK. + // +kubebuilder:validation:Optional + CutoffBehavior *string `json:"cutoffBehavior,omitempty" tf:"cutoff_behavior,omitempty"` + + // The description of the maintenance window task. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The maximum number of targets this task can be run for in parallel. + // +kubebuilder:validation:Optional + MaxConcurrency *string `json:"maxConcurrency,omitempty" tf:"max_concurrency,omitempty"` + + // The maximum number of errors allowed before this task stops being scheduled. + // +kubebuilder:validation:Optional + MaxErrors *string `json:"maxErrors,omitempty" tf:"max_errors,omitempty"` + + // The name of the maintenance window task. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority of the task in the Maintenance Window, the lower the number the higher the priority. Tasks in a Maintenance Window are scheduled in priority order with tasks that have the same priority scheduled in parallel. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The role that should be assumed when executing the task. If a role is not provided, Systems Manager uses your account's service-linked role. If no service-linked role for Systems Manager exists in your account, it is created for you. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + ServiceRoleArn *string `json:"serviceRoleArn,omitempty" tf:"service_role_arn,omitempty"` + + // Reference to a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnRef *v1.Reference `json:"serviceRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnSelector *v1.Selector `json:"serviceRoleArnSelector,omitempty" tf:"-"` + + // The targets (either instances or window target ids). Instances are specified using Key=InstanceIds,Values=instanceid1,instanceid2. Window target ids are specified using Key=WindowTargetIds,Values=window target id1, window target id2. + // +kubebuilder:validation:Optional + Targets []MaintenanceWindowTaskTargetsParameters `json:"targets,omitempty" tf:"targets,omitempty"` + + // The ARN of the task to execute. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + TaskArn *string `json:"taskArn,omitempty" tf:"task_arn,omitempty"` + + // Reference to a Function in lambda to populate taskArn. + // +kubebuilder:validation:Optional + TaskArnRef *v1.Reference `json:"taskArnRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate taskArn. + // +kubebuilder:validation:Optional + TaskArnSelector *v1.Selector `json:"taskArnSelector,omitempty" tf:"-"` + + // Configuration block with parameters for task execution. + // +kubebuilder:validation:Optional + TaskInvocationParameters *TaskInvocationParametersParameters `json:"taskInvocationParameters,omitempty" tf:"task_invocation_parameters,omitempty"` + + // The type of task being registered. Valid values: AUTOMATION, LAMBDA, RUN_COMMAND or STEP_FUNCTIONS. + // +kubebuilder:validation:Optional + TaskType *string `json:"taskType,omitempty" tf:"task_type,omitempty"` + + // The Id of the maintenance window to register the task with. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ssm/v1beta1.MaintenanceWindow + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + WindowID *string `json:"windowId,omitempty" tf:"window_id,omitempty"` + + // Reference to a MaintenanceWindow in ssm to populate windowId. + // +kubebuilder:validation:Optional + WindowIDRef *v1.Reference `json:"windowIdRef,omitempty" tf:"-"` + + // Selector for a MaintenanceWindow in ssm to populate windowId. + // +kubebuilder:validation:Optional + WindowIDSelector *v1.Selector `json:"windowIdSelector,omitempty" tf:"-"` +} + +type MaintenanceWindowTaskTargetsInitParameters struct { + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The array of strings. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MaintenanceWindowTaskTargetsObservation struct { + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The array of strings. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MaintenanceWindowTaskTargetsParameters struct { + + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // The array of strings. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type NotificationConfigInitParameters struct { + + // An Amazon Resource Name (ARN) for a Simple Notification Service (SNS) topic. Run Command pushes notifications about command status changes to this topic. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + NotificationArn *string `json:"notificationArn,omitempty" tf:"notification_arn,omitempty"` + + // Reference to a Topic in sns to populate notificationArn. + // +kubebuilder:validation:Optional + NotificationArnRef *v1.Reference `json:"notificationArnRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate notificationArn. + // +kubebuilder:validation:Optional + NotificationArnSelector *v1.Selector `json:"notificationArnSelector,omitempty" tf:"-"` + + // The different events for which you can receive notifications. Valid values: All, InProgress, Success, TimedOut, Cancelled, and Failed + NotificationEvents []*string `json:"notificationEvents,omitempty" tf:"notification_events,omitempty"` + + // When specified with Command, receive notification when the status of a command changes. When specified with Invocation, for commands sent to multiple instances, receive notification on a per-instance basis when the status of a command changes. Valid values: Command and Invocation + NotificationType *string `json:"notificationType,omitempty" tf:"notification_type,omitempty"` +} + +type NotificationConfigObservation struct { + + // An Amazon Resource Name (ARN) for a Simple Notification Service (SNS) topic. Run Command pushes notifications about command status changes to this topic. + NotificationArn *string `json:"notificationArn,omitempty" tf:"notification_arn,omitempty"` + + // The different events for which you can receive notifications. Valid values: All, InProgress, Success, TimedOut, Cancelled, and Failed + NotificationEvents []*string `json:"notificationEvents,omitempty" tf:"notification_events,omitempty"` + + // When specified with Command, receive notification when the status of a command changes. When specified with Invocation, for commands sent to multiple instances, receive notification on a per-instance basis when the status of a command changes. Valid values: Command and Invocation + NotificationType *string `json:"notificationType,omitempty" tf:"notification_type,omitempty"` +} + +type NotificationConfigParameters struct { + + // An Amazon Resource Name (ARN) for a Simple Notification Service (SNS) topic. Run Command pushes notifications about command status changes to this topic. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/sns/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + NotificationArn *string `json:"notificationArn,omitempty" tf:"notification_arn,omitempty"` + + // Reference to a Topic in sns to populate notificationArn. + // +kubebuilder:validation:Optional + NotificationArnRef *v1.Reference `json:"notificationArnRef,omitempty" tf:"-"` + + // Selector for a Topic in sns to populate notificationArn. + // +kubebuilder:validation:Optional + NotificationArnSelector *v1.Selector `json:"notificationArnSelector,omitempty" tf:"-"` + + // The different events for which you can receive notifications. Valid values: All, InProgress, Success, TimedOut, Cancelled, and Failed + // +kubebuilder:validation:Optional + NotificationEvents []*string `json:"notificationEvents,omitempty" tf:"notification_events,omitempty"` + + // When specified with Command, receive notification when the status of a command changes. When specified with Invocation, for commands sent to multiple instances, receive notification on a per-instance basis when the status of a command changes. Valid values: Command and Invocation + // +kubebuilder:validation:Optional + NotificationType *string `json:"notificationType,omitempty" tf:"notification_type,omitempty"` +} + +type ParameterInitParameters struct { + + // The name of the maintenance window task. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The array of strings. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ParameterObservation struct { + + // The name of the maintenance window task. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The array of strings. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ParameterParameters struct { + + // The name of the maintenance window task. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The array of strings. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type RunCommandParametersInitParameters struct { + + // Configuration options for sending command output to CloudWatch Logs. Documented below. + CloudwatchConfig *CloudwatchConfigInitParameters `json:"cloudwatchConfig,omitempty" tf:"cloudwatch_config,omitempty"` + + // Information about the command(s) to execute. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // The SHA-256 or SHA-1 hash created by the system when the document was created. SHA-1 hashes have been deprecated. + DocumentHash *string `json:"documentHash,omitempty" tf:"document_hash,omitempty"` + + // SHA-256 or SHA-1. SHA-1 hashes have been deprecated. Valid values: Sha256 and Sha1 + DocumentHashType *string `json:"documentHashType,omitempty" tf:"document_hash_type,omitempty"` + + // The version of an Automation document to use during task execution. + DocumentVersion *string `json:"documentVersion,omitempty" tf:"document_version,omitempty"` + + // Configurations for sending notifications about command status changes on a per-instance basis. Documented below. + NotificationConfig *NotificationConfigInitParameters `json:"notificationConfig,omitempty" tf:"notification_config,omitempty"` + + // The name of the Amazon S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + OutputS3Bucket *string `json:"outputS3Bucket,omitempty" tf:"output_s3_bucket,omitempty"` + + // Reference to a Bucket in s3 to populate outputS3Bucket. + // +kubebuilder:validation:Optional + OutputS3BucketRef *v1.Reference `json:"outputS3BucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate outputS3Bucket. + // +kubebuilder:validation:Optional + OutputS3BucketSelector *v1.Selector `json:"outputS3BucketSelector,omitempty" tf:"-"` + + // The Amazon S3 bucket subfolder. + OutputS3KeyPrefix *string `json:"outputS3KeyPrefix,omitempty" tf:"output_s3_key_prefix,omitempty"` + + // The parameters for the RUN_COMMAND task execution. Documented below. + Parameter []RunCommandParametersParameterInitParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // The role that should be assumed when executing the task. If a role is not provided, Systems Manager uses your account's service-linked role. If no service-linked role for Systems Manager exists in your account, it is created for you. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + ServiceRoleArn *string `json:"serviceRoleArn,omitempty" tf:"service_role_arn,omitempty"` + + // Reference to a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnRef *v1.Reference `json:"serviceRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnSelector *v1.Selector `json:"serviceRoleArnSelector,omitempty" tf:"-"` + + // If this time is reached and the command has not already started executing, it doesn't run. + TimeoutSeconds *float64 `json:"timeoutSeconds,omitempty" tf:"timeout_seconds,omitempty"` +} + +type RunCommandParametersObservation struct { + + // Configuration options for sending command output to CloudWatch Logs. Documented below. + CloudwatchConfig *CloudwatchConfigObservation `json:"cloudwatchConfig,omitempty" tf:"cloudwatch_config,omitempty"` + + // Information about the command(s) to execute. + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // The SHA-256 or SHA-1 hash created by the system when the document was created. SHA-1 hashes have been deprecated. + DocumentHash *string `json:"documentHash,omitempty" tf:"document_hash,omitempty"` + + // SHA-256 or SHA-1. SHA-1 hashes have been deprecated. Valid values: Sha256 and Sha1 + DocumentHashType *string `json:"documentHashType,omitempty" tf:"document_hash_type,omitempty"` + + // The version of an Automation document to use during task execution. + DocumentVersion *string `json:"documentVersion,omitempty" tf:"document_version,omitempty"` + + // Configurations for sending notifications about command status changes on a per-instance basis. Documented below. + NotificationConfig *NotificationConfigObservation `json:"notificationConfig,omitempty" tf:"notification_config,omitempty"` + + // The name of the Amazon S3 bucket. + OutputS3Bucket *string `json:"outputS3Bucket,omitempty" tf:"output_s3_bucket,omitempty"` + + // The Amazon S3 bucket subfolder. + OutputS3KeyPrefix *string `json:"outputS3KeyPrefix,omitempty" tf:"output_s3_key_prefix,omitempty"` + + // The parameters for the RUN_COMMAND task execution. Documented below. + Parameter []RunCommandParametersParameterObservation `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // The role that should be assumed when executing the task. If a role is not provided, Systems Manager uses your account's service-linked role. If no service-linked role for Systems Manager exists in your account, it is created for you. + ServiceRoleArn *string `json:"serviceRoleArn,omitempty" tf:"service_role_arn,omitempty"` + + // If this time is reached and the command has not already started executing, it doesn't run. + TimeoutSeconds *float64 `json:"timeoutSeconds,omitempty" tf:"timeout_seconds,omitempty"` +} + +type RunCommandParametersParameterInitParameters struct { + + // The name of the maintenance window task. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The array of strings. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type RunCommandParametersParameterObservation struct { + + // The name of the maintenance window task. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The array of strings. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type RunCommandParametersParameterParameters struct { + + // The name of the maintenance window task. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The array of strings. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type RunCommandParametersParameters struct { + + // Configuration options for sending command output to CloudWatch Logs. Documented below. + // +kubebuilder:validation:Optional + CloudwatchConfig *CloudwatchConfigParameters `json:"cloudwatchConfig,omitempty" tf:"cloudwatch_config,omitempty"` + + // Information about the command(s) to execute. + // +kubebuilder:validation:Optional + Comment *string `json:"comment,omitempty" tf:"comment,omitempty"` + + // The SHA-256 or SHA-1 hash created by the system when the document was created. SHA-1 hashes have been deprecated. + // +kubebuilder:validation:Optional + DocumentHash *string `json:"documentHash,omitempty" tf:"document_hash,omitempty"` + + // SHA-256 or SHA-1. SHA-1 hashes have been deprecated. Valid values: Sha256 and Sha1 + // +kubebuilder:validation:Optional + DocumentHashType *string `json:"documentHashType,omitempty" tf:"document_hash_type,omitempty"` + + // The version of an Automation document to use during task execution. + // +kubebuilder:validation:Optional + DocumentVersion *string `json:"documentVersion,omitempty" tf:"document_version,omitempty"` + + // Configurations for sending notifications about command status changes on a per-instance basis. Documented below. + // +kubebuilder:validation:Optional + NotificationConfig *NotificationConfigParameters `json:"notificationConfig,omitempty" tf:"notification_config,omitempty"` + + // The name of the Amazon S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + OutputS3Bucket *string `json:"outputS3Bucket,omitempty" tf:"output_s3_bucket,omitempty"` + + // Reference to a Bucket in s3 to populate outputS3Bucket. + // +kubebuilder:validation:Optional + OutputS3BucketRef *v1.Reference `json:"outputS3BucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate outputS3Bucket. + // +kubebuilder:validation:Optional + OutputS3BucketSelector *v1.Selector `json:"outputS3BucketSelector,omitempty" tf:"-"` + + // The Amazon S3 bucket subfolder. + // +kubebuilder:validation:Optional + OutputS3KeyPrefix *string `json:"outputS3KeyPrefix,omitempty" tf:"output_s3_key_prefix,omitempty"` + + // The parameters for the RUN_COMMAND task execution. Documented below. + // +kubebuilder:validation:Optional + Parameter []RunCommandParametersParameterParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // The role that should be assumed when executing the task. If a role is not provided, Systems Manager uses your account's service-linked role. If no service-linked role for Systems Manager exists in your account, it is created for you. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + ServiceRoleArn *string `json:"serviceRoleArn,omitempty" tf:"service_role_arn,omitempty"` + + // Reference to a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnRef *v1.Reference `json:"serviceRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate serviceRoleArn. + // +kubebuilder:validation:Optional + ServiceRoleArnSelector *v1.Selector `json:"serviceRoleArnSelector,omitempty" tf:"-"` + + // If this time is reached and the command has not already started executing, it doesn't run. + // +kubebuilder:validation:Optional + TimeoutSeconds *float64 `json:"timeoutSeconds,omitempty" tf:"timeout_seconds,omitempty"` +} + +type StepFunctionsParametersInitParameters struct { + + // The inputs for the STEP_FUNCTION task. + InputSecretRef *v1.SecretKeySelector `json:"inputSecretRef,omitempty" tf:"-"` + + // The name of the maintenance window task. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type StepFunctionsParametersObservation struct { + + // The name of the maintenance window task. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type StepFunctionsParametersParameters struct { + + // The inputs for the STEP_FUNCTION task. + // +kubebuilder:validation:Optional + InputSecretRef *v1.SecretKeySelector `json:"inputSecretRef,omitempty" tf:"-"` + + // The name of the maintenance window task. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TaskInvocationParametersInitParameters struct { + + // The parameters for an AUTOMATION task type. Documented below. + AutomationParameters *AutomationParametersInitParameters `json:"automationParameters,omitempty" tf:"automation_parameters,omitempty"` + + // The parameters for a LAMBDA task type. Documented below. + LambdaParameters *LambdaParametersInitParameters `json:"lambdaParameters,omitempty" tf:"lambda_parameters,omitempty"` + + // The parameters for a RUN_COMMAND task type. Documented below. + RunCommandParameters *RunCommandParametersInitParameters `json:"runCommandParameters,omitempty" tf:"run_command_parameters,omitempty"` + + // The parameters for a STEP_FUNCTIONS task type. Documented below. + StepFunctionsParameters *StepFunctionsParametersInitParameters `json:"stepFunctionsParameters,omitempty" tf:"step_functions_parameters,omitempty"` +} + +type TaskInvocationParametersObservation struct { + + // The parameters for an AUTOMATION task type. Documented below. + AutomationParameters *AutomationParametersObservation `json:"automationParameters,omitempty" tf:"automation_parameters,omitempty"` + + // The parameters for a LAMBDA task type. Documented below. + LambdaParameters *LambdaParametersObservation `json:"lambdaParameters,omitempty" tf:"lambda_parameters,omitempty"` + + // The parameters for a RUN_COMMAND task type. Documented below. + RunCommandParameters *RunCommandParametersObservation `json:"runCommandParameters,omitempty" tf:"run_command_parameters,omitempty"` + + // The parameters for a STEP_FUNCTIONS task type. Documented below. + StepFunctionsParameters *StepFunctionsParametersObservation `json:"stepFunctionsParameters,omitempty" tf:"step_functions_parameters,omitempty"` +} + +type TaskInvocationParametersParameters struct { + + // The parameters for an AUTOMATION task type. Documented below. + // +kubebuilder:validation:Optional + AutomationParameters *AutomationParametersParameters `json:"automationParameters,omitempty" tf:"automation_parameters,omitempty"` + + // The parameters for a LAMBDA task type. Documented below. + // +kubebuilder:validation:Optional + LambdaParameters *LambdaParametersParameters `json:"lambdaParameters,omitempty" tf:"lambda_parameters,omitempty"` + + // The parameters for a RUN_COMMAND task type. Documented below. + // +kubebuilder:validation:Optional + RunCommandParameters *RunCommandParametersParameters `json:"runCommandParameters,omitempty" tf:"run_command_parameters,omitempty"` + + // The parameters for a STEP_FUNCTIONS task type. Documented below. + // +kubebuilder:validation:Optional + StepFunctionsParameters *StepFunctionsParametersParameters `json:"stepFunctionsParameters,omitempty" tf:"step_functions_parameters,omitempty"` +} + +// MaintenanceWindowTaskSpec defines the desired state of MaintenanceWindowTask +type MaintenanceWindowTaskSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MaintenanceWindowTaskParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MaintenanceWindowTaskInitParameters `json:"initProvider,omitempty"` +} + +// MaintenanceWindowTaskStatus defines the observed state of MaintenanceWindowTask. +type MaintenanceWindowTaskStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MaintenanceWindowTaskObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MaintenanceWindowTask is the Schema for the MaintenanceWindowTasks API. Provides an SSM Maintenance Window Task resource +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type MaintenanceWindowTask struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.taskType) || (has(self.initProvider) && has(self.initProvider.taskType))",message="spec.forProvider.taskType is a required parameter" + Spec MaintenanceWindowTaskSpec `json:"spec"` + Status MaintenanceWindowTaskStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MaintenanceWindowTaskList contains a list of MaintenanceWindowTasks +type MaintenanceWindowTaskList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MaintenanceWindowTask `json:"items"` +} + +// Repository type metadata. +var ( + MaintenanceWindowTask_Kind = "MaintenanceWindowTask" + MaintenanceWindowTask_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MaintenanceWindowTask_Kind}.String() + MaintenanceWindowTask_KindAPIVersion = MaintenanceWindowTask_Kind + "." + CRDGroupVersion.String() + MaintenanceWindowTask_GroupVersionKind = CRDGroupVersion.WithKind(MaintenanceWindowTask_Kind) +) + +func init() { + SchemeBuilder.Register(&MaintenanceWindowTask{}, &MaintenanceWindowTaskList{}) +} diff --git a/apis/ssm/v1beta2/zz_resourcedatasync_terraformed.go b/apis/ssm/v1beta2/zz_resourcedatasync_terraformed.go new file mode 100755 index 0000000000..d78bb15c92 --- /dev/null +++ b/apis/ssm/v1beta2/zz_resourcedatasync_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ResourceDataSync +func (mg *ResourceDataSync) GetTerraformResourceType() string { + return "aws_ssm_resource_data_sync" +} + +// GetConnectionDetailsMapping for this ResourceDataSync +func (tr *ResourceDataSync) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ResourceDataSync +func (tr *ResourceDataSync) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ResourceDataSync +func (tr *ResourceDataSync) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ResourceDataSync +func (tr *ResourceDataSync) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ResourceDataSync +func (tr *ResourceDataSync) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ResourceDataSync +func (tr *ResourceDataSync) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ResourceDataSync +func (tr *ResourceDataSync) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ResourceDataSync +func (tr *ResourceDataSync) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ResourceDataSync using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ResourceDataSync) LateInitialize(attrs []byte) (bool, error) { + params := &ResourceDataSyncParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ResourceDataSync) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ssm/v1beta2/zz_resourcedatasync_types.go b/apis/ssm/v1beta2/zz_resourcedatasync_types.go new file mode 100755 index 0000000000..3de5197912 --- /dev/null +++ b/apis/ssm/v1beta2/zz_resourcedatasync_types.go @@ -0,0 +1,185 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ResourceDataSyncInitParameters struct { + + // Amazon S3 configuration details for the sync. + S3Destination *S3DestinationInitParameters `json:"s3Destination,omitempty" tf:"s3_destination,omitempty"` +} + +type ResourceDataSyncObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Amazon S3 configuration details for the sync. + S3Destination *S3DestinationObservation `json:"s3Destination,omitempty" tf:"s3_destination,omitempty"` +} + +type ResourceDataSyncParameters struct { + + // Region with the bucket targeted by the Resource Data Sync. + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Amazon S3 configuration details for the sync. + // +kubebuilder:validation:Optional + S3Destination *S3DestinationParameters `json:"s3Destination,omitempty" tf:"s3_destination,omitempty"` +} + +type S3DestinationInitParameters struct { + + // Name of S3 bucket where the aggregated data is stored. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Reference to a Bucket in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameRef *v1.Reference `json:"bucketNameRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameSelector *v1.Selector `json:"bucketNameSelector,omitempty" tf:"-"` + + // ARN of an encryption key for a destination in Amazon S3. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Prefix for the bucket. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // A supported sync format. Only JsonSerDe is currently supported. Defaults to JsonSerDe. + SyncFormat *string `json:"syncFormat,omitempty" tf:"sync_format,omitempty"` +} + +type S3DestinationObservation struct { + + // Name of S3 bucket where the aggregated data is stored. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // ARN of an encryption key for a destination in Amazon S3. + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Prefix for the bucket. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Region with the bucket targeted by the Resource Data Sync. + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // A supported sync format. Only JsonSerDe is currently supported. Defaults to JsonSerDe. + SyncFormat *string `json:"syncFormat,omitempty" tf:"sync_format,omitempty"` +} + +type S3DestinationParameters struct { + + // Name of S3 bucket where the aggregated data is stored. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Reference to a Bucket in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameRef *v1.Reference `json:"bucketNameRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate bucketName. + // +kubebuilder:validation:Optional + BucketNameSelector *v1.Selector `json:"bucketNameSelector,omitempty" tf:"-"` + + // ARN of an encryption key for a destination in Amazon S3. + // +kubebuilder:validation:Optional + KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"` + + // Prefix for the bucket. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Region with the bucket targeted by the Resource Data Sync. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta2.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("region",false) + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // Reference to a Bucket in s3 to populate region. + // +kubebuilder:validation:Optional + RegionRef *v1.Reference `json:"regionRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate region. + // +kubebuilder:validation:Optional + RegionSelector *v1.Selector `json:"regionSelector,omitempty" tf:"-"` + + // A supported sync format. Only JsonSerDe is currently supported. Defaults to JsonSerDe. + // +kubebuilder:validation:Optional + SyncFormat *string `json:"syncFormat,omitempty" tf:"sync_format,omitempty"` +} + +// ResourceDataSyncSpec defines the desired state of ResourceDataSync +type ResourceDataSyncSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ResourceDataSyncParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ResourceDataSyncInitParameters `json:"initProvider,omitempty"` +} + +// ResourceDataSyncStatus defines the observed state of ResourceDataSync. +type ResourceDataSyncStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ResourceDataSyncObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ResourceDataSync is the Schema for the ResourceDataSyncs API. Provides a SSM resource data sync. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ResourceDataSync struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.s3Destination) || (has(self.initProvider) && has(self.initProvider.s3Destination))",message="spec.forProvider.s3Destination is a required parameter" + Spec ResourceDataSyncSpec `json:"spec"` + Status ResourceDataSyncStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ResourceDataSyncList contains a list of ResourceDataSyncs +type ResourceDataSyncList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ResourceDataSync `json:"items"` +} + +// Repository type metadata. +var ( + ResourceDataSync_Kind = "ResourceDataSync" + ResourceDataSync_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ResourceDataSync_Kind}.String() + ResourceDataSync_KindAPIVersion = ResourceDataSync_Kind + "." + CRDGroupVersion.String() + ResourceDataSync_GroupVersionKind = CRDGroupVersion.WithKind(ResourceDataSync_Kind) +) + +func init() { + SchemeBuilder.Register(&ResourceDataSync{}, &ResourceDataSyncList{}) +} diff --git a/apis/ssoadmin/v1beta1/zz_generated.conversion_hubs.go b/apis/ssoadmin/v1beta1/zz_generated.conversion_hubs.go index b86396d58f..eac9a2ec07 100755 --- a/apis/ssoadmin/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/ssoadmin/v1beta1/zz_generated.conversion_hubs.go @@ -9,18 +9,12 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *AccountAssignment) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *CustomerManagedPolicyAttachment) Hub() {} - // Hub marks this type as a conversion hub. func (tr *InstanceAccessControlAttributes) Hub() {} // Hub marks this type as a conversion hub. func (tr *ManagedPolicyAttachment) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *PermissionsBoundaryAttachment) Hub() {} - // Hub marks this type as a conversion hub. func (tr *PermissionSet) Hub() {} diff --git a/apis/ssoadmin/v1beta1/zz_generated.conversion_spokes.go b/apis/ssoadmin/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..067057d6b4 --- /dev/null +++ b/apis/ssoadmin/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this CustomerManagedPolicyAttachment to the hub type. +func (tr *CustomerManagedPolicyAttachment) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CustomerManagedPolicyAttachment type. +func (tr *CustomerManagedPolicyAttachment) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this PermissionsBoundaryAttachment to the hub type. +func (tr *PermissionsBoundaryAttachment) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the PermissionsBoundaryAttachment type. +func (tr *PermissionsBoundaryAttachment) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/ssoadmin/v1beta2/zz_customermanagedpolicyattachment_terraformed.go b/apis/ssoadmin/v1beta2/zz_customermanagedpolicyattachment_terraformed.go new file mode 100755 index 0000000000..39a2d2c2a5 --- /dev/null +++ b/apis/ssoadmin/v1beta2/zz_customermanagedpolicyattachment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CustomerManagedPolicyAttachment +func (mg *CustomerManagedPolicyAttachment) GetTerraformResourceType() string { + return "aws_ssoadmin_customer_managed_policy_attachment" +} + +// GetConnectionDetailsMapping for this CustomerManagedPolicyAttachment +func (tr *CustomerManagedPolicyAttachment) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CustomerManagedPolicyAttachment +func (tr *CustomerManagedPolicyAttachment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CustomerManagedPolicyAttachment +func (tr *CustomerManagedPolicyAttachment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CustomerManagedPolicyAttachment +func (tr *CustomerManagedPolicyAttachment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CustomerManagedPolicyAttachment +func (tr *CustomerManagedPolicyAttachment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CustomerManagedPolicyAttachment +func (tr *CustomerManagedPolicyAttachment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CustomerManagedPolicyAttachment +func (tr *CustomerManagedPolicyAttachment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CustomerManagedPolicyAttachment +func (tr *CustomerManagedPolicyAttachment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CustomerManagedPolicyAttachment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CustomerManagedPolicyAttachment) LateInitialize(attrs []byte) (bool, error) { + params := &CustomerManagedPolicyAttachmentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CustomerManagedPolicyAttachment) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ssoadmin/v1beta2/zz_customermanagedpolicyattachment_types.go b/apis/ssoadmin/v1beta2/zz_customermanagedpolicyattachment_types.go new file mode 100755 index 0000000000..e2f2edbe13 --- /dev/null +++ b/apis/ssoadmin/v1beta2/zz_customermanagedpolicyattachment_types.go @@ -0,0 +1,177 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomerManagedPolicyAttachmentInitParameters struct { + + // Specifies the name and path of a customer managed policy. See below. + CustomerManagedPolicyReference *CustomerManagedPolicyReferenceInitParameters `json:"customerManagedPolicyReference,omitempty" tf:"customer_managed_policy_reference,omitempty"` +} + +type CustomerManagedPolicyAttachmentObservation struct { + + // Specifies the name and path of a customer managed policy. See below. + CustomerManagedPolicyReference *CustomerManagedPolicyReferenceObservation `json:"customerManagedPolicyReference,omitempty" tf:"customer_managed_policy_reference,omitempty"` + + // Policy Name, Policy Path, Permission Set Amazon Resource Name (ARN), and SSO Instance ARN, each separated by a comma (,). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. + InstanceArn *string `json:"instanceArn,omitempty" tf:"instance_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the Permission Set. + PermissionSetArn *string `json:"permissionSetArn,omitempty" tf:"permission_set_arn,omitempty"` +} + +type CustomerManagedPolicyAttachmentParameters struct { + + // Specifies the name and path of a customer managed policy. See below. + // +kubebuilder:validation:Optional + CustomerManagedPolicyReference *CustomerManagedPolicyReferenceParameters `json:"customerManagedPolicyReference,omitempty" tf:"customer_managed_policy_reference,omitempty"` + + // The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. + // +kubebuilder:validation:Required + InstanceArn *string `json:"instanceArn" tf:"instance_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the Permission Set. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ssoadmin/v1beta1.PermissionSet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + PermissionSetArn *string `json:"permissionSetArn,omitempty" tf:"permission_set_arn,omitempty"` + + // Reference to a PermissionSet in ssoadmin to populate permissionSetArn. + // +kubebuilder:validation:Optional + PermissionSetArnRef *v1.Reference `json:"permissionSetArnRef,omitempty" tf:"-"` + + // Selector for a PermissionSet in ssoadmin to populate permissionSetArn. + // +kubebuilder:validation:Optional + PermissionSetArnSelector *v1.Selector `json:"permissionSetArnSelector,omitempty" tf:"-"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type CustomerManagedPolicyReferenceInitParameters struct { + + // Name of the customer managed IAM Policy to be attached. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Policy + // +crossplane:generate:reference:refFieldName=PolicyNameRef + // +crossplane:generate:reference:selectorFieldName=PolicyNameSelector + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The path to the IAM policy to be attached. The default is /. See IAM Identifiers for more information. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Reference to a Policy in iam to populate name. + // +kubebuilder:validation:Optional + PolicyNameRef *v1.Reference `json:"policyNameRef,omitempty" tf:"-"` + + // Selector for a Policy in iam to populate name. + // +kubebuilder:validation:Optional + PolicyNameSelector *v1.Selector `json:"policyNameSelector,omitempty" tf:"-"` +} + +type CustomerManagedPolicyReferenceObservation struct { + + // Name of the customer managed IAM Policy to be attached. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The path to the IAM policy to be attached. The default is /. See IAM Identifiers for more information. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type CustomerManagedPolicyReferenceParameters struct { + + // Name of the customer managed IAM Policy to be attached. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Policy + // +crossplane:generate:reference:refFieldName=PolicyNameRef + // +crossplane:generate:reference:selectorFieldName=PolicyNameSelector + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The path to the IAM policy to be attached. The default is /. See IAM Identifiers for more information. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Reference to a Policy in iam to populate name. + // +kubebuilder:validation:Optional + PolicyNameRef *v1.Reference `json:"policyNameRef,omitempty" tf:"-"` + + // Selector for a Policy in iam to populate name. + // +kubebuilder:validation:Optional + PolicyNameSelector *v1.Selector `json:"policyNameSelector,omitempty" tf:"-"` +} + +// CustomerManagedPolicyAttachmentSpec defines the desired state of CustomerManagedPolicyAttachment +type CustomerManagedPolicyAttachmentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CustomerManagedPolicyAttachmentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CustomerManagedPolicyAttachmentInitParameters `json:"initProvider,omitempty"` +} + +// CustomerManagedPolicyAttachmentStatus defines the observed state of CustomerManagedPolicyAttachment. +type CustomerManagedPolicyAttachmentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CustomerManagedPolicyAttachmentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CustomerManagedPolicyAttachment is the Schema for the CustomerManagedPolicyAttachments API. Manages a customer managed policy for a Single Sign-On (SSO) Permission Set +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type CustomerManagedPolicyAttachment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.customerManagedPolicyReference) || (has(self.initProvider) && has(self.initProvider.customerManagedPolicyReference))",message="spec.forProvider.customerManagedPolicyReference is a required parameter" + Spec CustomerManagedPolicyAttachmentSpec `json:"spec"` + Status CustomerManagedPolicyAttachmentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CustomerManagedPolicyAttachmentList contains a list of CustomerManagedPolicyAttachments +type CustomerManagedPolicyAttachmentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CustomerManagedPolicyAttachment `json:"items"` +} + +// Repository type metadata. +var ( + CustomerManagedPolicyAttachment_Kind = "CustomerManagedPolicyAttachment" + CustomerManagedPolicyAttachment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CustomerManagedPolicyAttachment_Kind}.String() + CustomerManagedPolicyAttachment_KindAPIVersion = CustomerManagedPolicyAttachment_Kind + "." + CRDGroupVersion.String() + CustomerManagedPolicyAttachment_GroupVersionKind = CRDGroupVersion.WithKind(CustomerManagedPolicyAttachment_Kind) +) + +func init() { + SchemeBuilder.Register(&CustomerManagedPolicyAttachment{}, &CustomerManagedPolicyAttachmentList{}) +} diff --git a/apis/ssoadmin/v1beta2/zz_generated.conversion_hubs.go b/apis/ssoadmin/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..fe9e43d02b --- /dev/null +++ b/apis/ssoadmin/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *CustomerManagedPolicyAttachment) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *PermissionsBoundaryAttachment) Hub() {} diff --git a/apis/ssoadmin/v1beta2/zz_generated.deepcopy.go b/apis/ssoadmin/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..3f021d3f83 --- /dev/null +++ b/apis/ssoadmin/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,667 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedPolicyAttachment) DeepCopyInto(out *CustomerManagedPolicyAttachment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedPolicyAttachment. +func (in *CustomerManagedPolicyAttachment) DeepCopy() *CustomerManagedPolicyAttachment { + if in == nil { + return nil + } + out := new(CustomerManagedPolicyAttachment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomerManagedPolicyAttachment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedPolicyAttachmentInitParameters) DeepCopyInto(out *CustomerManagedPolicyAttachmentInitParameters) { + *out = *in + if in.CustomerManagedPolicyReference != nil { + in, out := &in.CustomerManagedPolicyReference, &out.CustomerManagedPolicyReference + *out = new(CustomerManagedPolicyReferenceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedPolicyAttachmentInitParameters. +func (in *CustomerManagedPolicyAttachmentInitParameters) DeepCopy() *CustomerManagedPolicyAttachmentInitParameters { + if in == nil { + return nil + } + out := new(CustomerManagedPolicyAttachmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedPolicyAttachmentList) DeepCopyInto(out *CustomerManagedPolicyAttachmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomerManagedPolicyAttachment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedPolicyAttachmentList. +func (in *CustomerManagedPolicyAttachmentList) DeepCopy() *CustomerManagedPolicyAttachmentList { + if in == nil { + return nil + } + out := new(CustomerManagedPolicyAttachmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomerManagedPolicyAttachmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedPolicyAttachmentObservation) DeepCopyInto(out *CustomerManagedPolicyAttachmentObservation) { + *out = *in + if in.CustomerManagedPolicyReference != nil { + in, out := &in.CustomerManagedPolicyReference, &out.CustomerManagedPolicyReference + *out = new(CustomerManagedPolicyReferenceObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceArn != nil { + in, out := &in.InstanceArn, &out.InstanceArn + *out = new(string) + **out = **in + } + if in.PermissionSetArn != nil { + in, out := &in.PermissionSetArn, &out.PermissionSetArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedPolicyAttachmentObservation. +func (in *CustomerManagedPolicyAttachmentObservation) DeepCopy() *CustomerManagedPolicyAttachmentObservation { + if in == nil { + return nil + } + out := new(CustomerManagedPolicyAttachmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedPolicyAttachmentParameters) DeepCopyInto(out *CustomerManagedPolicyAttachmentParameters) { + *out = *in + if in.CustomerManagedPolicyReference != nil { + in, out := &in.CustomerManagedPolicyReference, &out.CustomerManagedPolicyReference + *out = new(CustomerManagedPolicyReferenceParameters) + (*in).DeepCopyInto(*out) + } + if in.InstanceArn != nil { + in, out := &in.InstanceArn, &out.InstanceArn + *out = new(string) + **out = **in + } + if in.PermissionSetArn != nil { + in, out := &in.PermissionSetArn, &out.PermissionSetArn + *out = new(string) + **out = **in + } + if in.PermissionSetArnRef != nil { + in, out := &in.PermissionSetArnRef, &out.PermissionSetArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PermissionSetArnSelector != nil { + in, out := &in.PermissionSetArnSelector, &out.PermissionSetArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedPolicyAttachmentParameters. +func (in *CustomerManagedPolicyAttachmentParameters) DeepCopy() *CustomerManagedPolicyAttachmentParameters { + if in == nil { + return nil + } + out := new(CustomerManagedPolicyAttachmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedPolicyAttachmentSpec) DeepCopyInto(out *CustomerManagedPolicyAttachmentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedPolicyAttachmentSpec. +func (in *CustomerManagedPolicyAttachmentSpec) DeepCopy() *CustomerManagedPolicyAttachmentSpec { + if in == nil { + return nil + } + out := new(CustomerManagedPolicyAttachmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedPolicyAttachmentStatus) DeepCopyInto(out *CustomerManagedPolicyAttachmentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedPolicyAttachmentStatus. +func (in *CustomerManagedPolicyAttachmentStatus) DeepCopy() *CustomerManagedPolicyAttachmentStatus { + if in == nil { + return nil + } + out := new(CustomerManagedPolicyAttachmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedPolicyReferenceInitParameters) DeepCopyInto(out *CustomerManagedPolicyReferenceInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.PolicyNameRef != nil { + in, out := &in.PolicyNameRef, &out.PolicyNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PolicyNameSelector != nil { + in, out := &in.PolicyNameSelector, &out.PolicyNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedPolicyReferenceInitParameters. +func (in *CustomerManagedPolicyReferenceInitParameters) DeepCopy() *CustomerManagedPolicyReferenceInitParameters { + if in == nil { + return nil + } + out := new(CustomerManagedPolicyReferenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedPolicyReferenceObservation) DeepCopyInto(out *CustomerManagedPolicyReferenceObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedPolicyReferenceObservation. +func (in *CustomerManagedPolicyReferenceObservation) DeepCopy() *CustomerManagedPolicyReferenceObservation { + if in == nil { + return nil + } + out := new(CustomerManagedPolicyReferenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedPolicyReferenceParameters) DeepCopyInto(out *CustomerManagedPolicyReferenceParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.PolicyNameRef != nil { + in, out := &in.PolicyNameRef, &out.PolicyNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PolicyNameSelector != nil { + in, out := &in.PolicyNameSelector, &out.PolicyNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedPolicyReferenceParameters. +func (in *CustomerManagedPolicyReferenceParameters) DeepCopy() *CustomerManagedPolicyReferenceParameters { + if in == nil { + return nil + } + out := new(CustomerManagedPolicyReferenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsBoundaryAttachment) DeepCopyInto(out *PermissionsBoundaryAttachment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsBoundaryAttachment. +func (in *PermissionsBoundaryAttachment) DeepCopy() *PermissionsBoundaryAttachment { + if in == nil { + return nil + } + out := new(PermissionsBoundaryAttachment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PermissionsBoundaryAttachment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsBoundaryAttachmentInitParameters) DeepCopyInto(out *PermissionsBoundaryAttachmentInitParameters) { + *out = *in + if in.PermissionsBoundary != nil { + in, out := &in.PermissionsBoundary, &out.PermissionsBoundary + *out = new(PermissionsBoundaryInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsBoundaryAttachmentInitParameters. +func (in *PermissionsBoundaryAttachmentInitParameters) DeepCopy() *PermissionsBoundaryAttachmentInitParameters { + if in == nil { + return nil + } + out := new(PermissionsBoundaryAttachmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsBoundaryAttachmentList) DeepCopyInto(out *PermissionsBoundaryAttachmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PermissionsBoundaryAttachment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsBoundaryAttachmentList. +func (in *PermissionsBoundaryAttachmentList) DeepCopy() *PermissionsBoundaryAttachmentList { + if in == nil { + return nil + } + out := new(PermissionsBoundaryAttachmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PermissionsBoundaryAttachmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsBoundaryAttachmentObservation) DeepCopyInto(out *PermissionsBoundaryAttachmentObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceArn != nil { + in, out := &in.InstanceArn, &out.InstanceArn + *out = new(string) + **out = **in + } + if in.PermissionSetArn != nil { + in, out := &in.PermissionSetArn, &out.PermissionSetArn + *out = new(string) + **out = **in + } + if in.PermissionsBoundary != nil { + in, out := &in.PermissionsBoundary, &out.PermissionsBoundary + *out = new(PermissionsBoundaryObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsBoundaryAttachmentObservation. +func (in *PermissionsBoundaryAttachmentObservation) DeepCopy() *PermissionsBoundaryAttachmentObservation { + if in == nil { + return nil + } + out := new(PermissionsBoundaryAttachmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsBoundaryAttachmentParameters) DeepCopyInto(out *PermissionsBoundaryAttachmentParameters) { + *out = *in + if in.InstanceArn != nil { + in, out := &in.InstanceArn, &out.InstanceArn + *out = new(string) + **out = **in + } + if in.PermissionSetArn != nil { + in, out := &in.PermissionSetArn, &out.PermissionSetArn + *out = new(string) + **out = **in + } + if in.PermissionSetArnRef != nil { + in, out := &in.PermissionSetArnRef, &out.PermissionSetArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PermissionSetArnSelector != nil { + in, out := &in.PermissionSetArnSelector, &out.PermissionSetArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PermissionsBoundary != nil { + in, out := &in.PermissionsBoundary, &out.PermissionsBoundary + *out = new(PermissionsBoundaryParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsBoundaryAttachmentParameters. +func (in *PermissionsBoundaryAttachmentParameters) DeepCopy() *PermissionsBoundaryAttachmentParameters { + if in == nil { + return nil + } + out := new(PermissionsBoundaryAttachmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsBoundaryAttachmentSpec) DeepCopyInto(out *PermissionsBoundaryAttachmentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsBoundaryAttachmentSpec. +func (in *PermissionsBoundaryAttachmentSpec) DeepCopy() *PermissionsBoundaryAttachmentSpec { + if in == nil { + return nil + } + out := new(PermissionsBoundaryAttachmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsBoundaryAttachmentStatus) DeepCopyInto(out *PermissionsBoundaryAttachmentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsBoundaryAttachmentStatus. +func (in *PermissionsBoundaryAttachmentStatus) DeepCopy() *PermissionsBoundaryAttachmentStatus { + if in == nil { + return nil + } + out := new(PermissionsBoundaryAttachmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsBoundaryCustomerManagedPolicyReferenceInitParameters) DeepCopyInto(out *PermissionsBoundaryCustomerManagedPolicyReferenceInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsBoundaryCustomerManagedPolicyReferenceInitParameters. +func (in *PermissionsBoundaryCustomerManagedPolicyReferenceInitParameters) DeepCopy() *PermissionsBoundaryCustomerManagedPolicyReferenceInitParameters { + if in == nil { + return nil + } + out := new(PermissionsBoundaryCustomerManagedPolicyReferenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsBoundaryCustomerManagedPolicyReferenceObservation) DeepCopyInto(out *PermissionsBoundaryCustomerManagedPolicyReferenceObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsBoundaryCustomerManagedPolicyReferenceObservation. +func (in *PermissionsBoundaryCustomerManagedPolicyReferenceObservation) DeepCopy() *PermissionsBoundaryCustomerManagedPolicyReferenceObservation { + if in == nil { + return nil + } + out := new(PermissionsBoundaryCustomerManagedPolicyReferenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsBoundaryCustomerManagedPolicyReferenceParameters) DeepCopyInto(out *PermissionsBoundaryCustomerManagedPolicyReferenceParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsBoundaryCustomerManagedPolicyReferenceParameters. +func (in *PermissionsBoundaryCustomerManagedPolicyReferenceParameters) DeepCopy() *PermissionsBoundaryCustomerManagedPolicyReferenceParameters { + if in == nil { + return nil + } + out := new(PermissionsBoundaryCustomerManagedPolicyReferenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsBoundaryInitParameters) DeepCopyInto(out *PermissionsBoundaryInitParameters) { + *out = *in + if in.CustomerManagedPolicyReference != nil { + in, out := &in.CustomerManagedPolicyReference, &out.CustomerManagedPolicyReference + *out = new(PermissionsBoundaryCustomerManagedPolicyReferenceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ManagedPolicyArn != nil { + in, out := &in.ManagedPolicyArn, &out.ManagedPolicyArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsBoundaryInitParameters. +func (in *PermissionsBoundaryInitParameters) DeepCopy() *PermissionsBoundaryInitParameters { + if in == nil { + return nil + } + out := new(PermissionsBoundaryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsBoundaryObservation) DeepCopyInto(out *PermissionsBoundaryObservation) { + *out = *in + if in.CustomerManagedPolicyReference != nil { + in, out := &in.CustomerManagedPolicyReference, &out.CustomerManagedPolicyReference + *out = new(PermissionsBoundaryCustomerManagedPolicyReferenceObservation) + (*in).DeepCopyInto(*out) + } + if in.ManagedPolicyArn != nil { + in, out := &in.ManagedPolicyArn, &out.ManagedPolicyArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsBoundaryObservation. +func (in *PermissionsBoundaryObservation) DeepCopy() *PermissionsBoundaryObservation { + if in == nil { + return nil + } + out := new(PermissionsBoundaryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsBoundaryParameters) DeepCopyInto(out *PermissionsBoundaryParameters) { + *out = *in + if in.CustomerManagedPolicyReference != nil { + in, out := &in.CustomerManagedPolicyReference, &out.CustomerManagedPolicyReference + *out = new(PermissionsBoundaryCustomerManagedPolicyReferenceParameters) + (*in).DeepCopyInto(*out) + } + if in.ManagedPolicyArn != nil { + in, out := &in.ManagedPolicyArn, &out.ManagedPolicyArn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsBoundaryParameters. +func (in *PermissionsBoundaryParameters) DeepCopy() *PermissionsBoundaryParameters { + if in == nil { + return nil + } + out := new(PermissionsBoundaryParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/ssoadmin/v1beta2/zz_generated.managed.go b/apis/ssoadmin/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..ee71c1cbac --- /dev/null +++ b/apis/ssoadmin/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this CustomerManagedPolicyAttachment. +func (mg *CustomerManagedPolicyAttachment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CustomerManagedPolicyAttachment. +func (mg *CustomerManagedPolicyAttachment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CustomerManagedPolicyAttachment. +func (mg *CustomerManagedPolicyAttachment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CustomerManagedPolicyAttachment. +func (mg *CustomerManagedPolicyAttachment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CustomerManagedPolicyAttachment. +func (mg *CustomerManagedPolicyAttachment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CustomerManagedPolicyAttachment. +func (mg *CustomerManagedPolicyAttachment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CustomerManagedPolicyAttachment. +func (mg *CustomerManagedPolicyAttachment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CustomerManagedPolicyAttachment. +func (mg *CustomerManagedPolicyAttachment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CustomerManagedPolicyAttachment. +func (mg *CustomerManagedPolicyAttachment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CustomerManagedPolicyAttachment. +func (mg *CustomerManagedPolicyAttachment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CustomerManagedPolicyAttachment. +func (mg *CustomerManagedPolicyAttachment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CustomerManagedPolicyAttachment. +func (mg *CustomerManagedPolicyAttachment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this PermissionsBoundaryAttachment. +func (mg *PermissionsBoundaryAttachment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this PermissionsBoundaryAttachment. +func (mg *PermissionsBoundaryAttachment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this PermissionsBoundaryAttachment. +func (mg *PermissionsBoundaryAttachment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this PermissionsBoundaryAttachment. +func (mg *PermissionsBoundaryAttachment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this PermissionsBoundaryAttachment. +func (mg *PermissionsBoundaryAttachment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this PermissionsBoundaryAttachment. +func (mg *PermissionsBoundaryAttachment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this PermissionsBoundaryAttachment. +func (mg *PermissionsBoundaryAttachment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this PermissionsBoundaryAttachment. +func (mg *PermissionsBoundaryAttachment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this PermissionsBoundaryAttachment. +func (mg *PermissionsBoundaryAttachment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this PermissionsBoundaryAttachment. +func (mg *PermissionsBoundaryAttachment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this PermissionsBoundaryAttachment. +func (mg *PermissionsBoundaryAttachment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this PermissionsBoundaryAttachment. +func (mg *PermissionsBoundaryAttachment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/ssoadmin/v1beta2/zz_generated.managedlist.go b/apis/ssoadmin/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..39ebba6c45 --- /dev/null +++ b/apis/ssoadmin/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this CustomerManagedPolicyAttachmentList. +func (l *CustomerManagedPolicyAttachmentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this PermissionsBoundaryAttachmentList. +func (l *PermissionsBoundaryAttachmentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/ssoadmin/v1beta2/zz_generated.resolvers.go b/apis/ssoadmin/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..9e0b47700c --- /dev/null +++ b/apis/ssoadmin/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,170 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *CustomerManagedPolicyAttachment) ResolveReferences( // ResolveReferences of this CustomerManagedPolicyAttachment. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.CustomerManagedPolicyReference != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Policy", "PolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CustomerManagedPolicyReference.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.CustomerManagedPolicyReference.PolicyNameRef, + Selector: mg.Spec.ForProvider.CustomerManagedPolicyReference.PolicyNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomerManagedPolicyReference.Name") + } + mg.Spec.ForProvider.CustomerManagedPolicyReference.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CustomerManagedPolicyReference.PolicyNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("ssoadmin.aws.upbound.io", "v1beta1", "PermissionSet", "PermissionSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PermissionSetArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.PermissionSetArnRef, + Selector: mg.Spec.ForProvider.PermissionSetArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PermissionSetArn") + } + mg.Spec.ForProvider.PermissionSetArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PermissionSetArnRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.CustomerManagedPolicyReference != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Policy", "PolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CustomerManagedPolicyReference.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.CustomerManagedPolicyReference.PolicyNameRef, + Selector: mg.Spec.InitProvider.CustomerManagedPolicyReference.PolicyNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CustomerManagedPolicyReference.Name") + } + mg.Spec.InitProvider.CustomerManagedPolicyReference.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CustomerManagedPolicyReference.PolicyNameRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this PermissionsBoundaryAttachment. +func (mg *PermissionsBoundaryAttachment) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ssoadmin.aws.upbound.io", "v1beta1", "PermissionSet", "PermissionSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PermissionSetArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.PermissionSetArnRef, + Selector: mg.Spec.ForProvider.PermissionSetArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PermissionSetArn") + } + mg.Spec.ForProvider.PermissionSetArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PermissionSetArnRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.PermissionsBoundary != nil { + if mg.Spec.ForProvider.PermissionsBoundary.CustomerManagedPolicyReference != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Policy", "PolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PermissionsBoundary.CustomerManagedPolicyReference.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.PermissionsBoundary.CustomerManagedPolicyReference.NameRef, + Selector: mg.Spec.ForProvider.PermissionsBoundary.CustomerManagedPolicyReference.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PermissionsBoundary.CustomerManagedPolicyReference.Name") + } + mg.Spec.ForProvider.PermissionsBoundary.CustomerManagedPolicyReference.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PermissionsBoundary.CustomerManagedPolicyReference.NameRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.PermissionsBoundary != nil { + if mg.Spec.InitProvider.PermissionsBoundary.CustomerManagedPolicyReference != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Policy", "PolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PermissionsBoundary.CustomerManagedPolicyReference.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.PermissionsBoundary.CustomerManagedPolicyReference.NameRef, + Selector: mg.Spec.InitProvider.PermissionsBoundary.CustomerManagedPolicyReference.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PermissionsBoundary.CustomerManagedPolicyReference.Name") + } + mg.Spec.InitProvider.PermissionsBoundary.CustomerManagedPolicyReference.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PermissionsBoundary.CustomerManagedPolicyReference.NameRef = rsp.ResolvedReference + + } + } + + return nil +} diff --git a/apis/ssoadmin/v1beta2/zz_groupversion_info.go b/apis/ssoadmin/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..38c20078ad --- /dev/null +++ b/apis/ssoadmin/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=ssoadmin.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "ssoadmin.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/ssoadmin/v1beta2/zz_permissionsboundaryattachment_terraformed.go b/apis/ssoadmin/v1beta2/zz_permissionsboundaryattachment_terraformed.go new file mode 100755 index 0000000000..5d9cdb87b2 --- /dev/null +++ b/apis/ssoadmin/v1beta2/zz_permissionsboundaryattachment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this PermissionsBoundaryAttachment +func (mg *PermissionsBoundaryAttachment) GetTerraformResourceType() string { + return "aws_ssoadmin_permissions_boundary_attachment" +} + +// GetConnectionDetailsMapping for this PermissionsBoundaryAttachment +func (tr *PermissionsBoundaryAttachment) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this PermissionsBoundaryAttachment +func (tr *PermissionsBoundaryAttachment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this PermissionsBoundaryAttachment +func (tr *PermissionsBoundaryAttachment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this PermissionsBoundaryAttachment +func (tr *PermissionsBoundaryAttachment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this PermissionsBoundaryAttachment +func (tr *PermissionsBoundaryAttachment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this PermissionsBoundaryAttachment +func (tr *PermissionsBoundaryAttachment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this PermissionsBoundaryAttachment +func (tr *PermissionsBoundaryAttachment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this PermissionsBoundaryAttachment +func (tr *PermissionsBoundaryAttachment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this PermissionsBoundaryAttachment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *PermissionsBoundaryAttachment) LateInitialize(attrs []byte) (bool, error) { + params := &PermissionsBoundaryAttachmentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *PermissionsBoundaryAttachment) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/ssoadmin/v1beta2/zz_permissionsboundaryattachment_types.go b/apis/ssoadmin/v1beta2/zz_permissionsboundaryattachment_types.go new file mode 100755 index 0000000000..42d08da959 --- /dev/null +++ b/apis/ssoadmin/v1beta2/zz_permissionsboundaryattachment_types.go @@ -0,0 +1,202 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type PermissionsBoundaryAttachmentInitParameters struct { + + // The permissions boundary policy. See below. + PermissionsBoundary *PermissionsBoundaryInitParameters `json:"permissionsBoundary,omitempty" tf:"permissions_boundary,omitempty"` +} + +type PermissionsBoundaryAttachmentObservation struct { + + // Permission Set Amazon Resource Name (ARN) and SSO Instance ARN, separated by a comma (,). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. + InstanceArn *string `json:"instanceArn,omitempty" tf:"instance_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the Permission Set. + PermissionSetArn *string `json:"permissionSetArn,omitempty" tf:"permission_set_arn,omitempty"` + + // The permissions boundary policy. See below. + PermissionsBoundary *PermissionsBoundaryObservation `json:"permissionsBoundary,omitempty" tf:"permissions_boundary,omitempty"` +} + +type PermissionsBoundaryAttachmentParameters struct { + + // The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. + // +kubebuilder:validation:Required + InstanceArn *string `json:"instanceArn" tf:"instance_arn,omitempty"` + + // The Amazon Resource Name (ARN) of the Permission Set. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ssoadmin/v1beta1.PermissionSet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + PermissionSetArn *string `json:"permissionSetArn,omitempty" tf:"permission_set_arn,omitempty"` + + // Reference to a PermissionSet in ssoadmin to populate permissionSetArn. + // +kubebuilder:validation:Optional + PermissionSetArnRef *v1.Reference `json:"permissionSetArnRef,omitempty" tf:"-"` + + // Selector for a PermissionSet in ssoadmin to populate permissionSetArn. + // +kubebuilder:validation:Optional + PermissionSetArnSelector *v1.Selector `json:"permissionSetArnSelector,omitempty" tf:"-"` + + // The permissions boundary policy. See below. + // +kubebuilder:validation:Optional + PermissionsBoundary *PermissionsBoundaryParameters `json:"permissionsBoundary,omitempty" tf:"permissions_boundary,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type PermissionsBoundaryCustomerManagedPolicyReferenceInitParameters struct { + + // Name of the customer managed IAM Policy to be attached. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Policy + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a Policy in iam to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a Policy in iam to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // The path to the IAM policy to be attached. The default is /. See IAM Identifiers for more information. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type PermissionsBoundaryCustomerManagedPolicyReferenceObservation struct { + + // Name of the customer managed IAM Policy to be attached. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The path to the IAM policy to be attached. The default is /. See IAM Identifiers for more information. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type PermissionsBoundaryCustomerManagedPolicyReferenceParameters struct { + + // Name of the customer managed IAM Policy to be attached. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Policy + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a Policy in iam to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a Policy in iam to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // The path to the IAM policy to be attached. The default is /. See IAM Identifiers for more information. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type PermissionsBoundaryInitParameters struct { + + // Specifies the name and path of a customer managed policy. See below. + CustomerManagedPolicyReference *PermissionsBoundaryCustomerManagedPolicyReferenceInitParameters `json:"customerManagedPolicyReference,omitempty" tf:"customer_managed_policy_reference,omitempty"` + + // AWS-managed IAM policy ARN to use as the permissions boundary. + ManagedPolicyArn *string `json:"managedPolicyArn,omitempty" tf:"managed_policy_arn,omitempty"` +} + +type PermissionsBoundaryObservation struct { + + // Specifies the name and path of a customer managed policy. See below. + CustomerManagedPolicyReference *PermissionsBoundaryCustomerManagedPolicyReferenceObservation `json:"customerManagedPolicyReference,omitempty" tf:"customer_managed_policy_reference,omitempty"` + + // AWS-managed IAM policy ARN to use as the permissions boundary. + ManagedPolicyArn *string `json:"managedPolicyArn,omitempty" tf:"managed_policy_arn,omitempty"` +} + +type PermissionsBoundaryParameters struct { + + // Specifies the name and path of a customer managed policy. See below. + // +kubebuilder:validation:Optional + CustomerManagedPolicyReference *PermissionsBoundaryCustomerManagedPolicyReferenceParameters `json:"customerManagedPolicyReference,omitempty" tf:"customer_managed_policy_reference,omitempty"` + + // AWS-managed IAM policy ARN to use as the permissions boundary. + // +kubebuilder:validation:Optional + ManagedPolicyArn *string `json:"managedPolicyArn,omitempty" tf:"managed_policy_arn,omitempty"` +} + +// PermissionsBoundaryAttachmentSpec defines the desired state of PermissionsBoundaryAttachment +type PermissionsBoundaryAttachmentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PermissionsBoundaryAttachmentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PermissionsBoundaryAttachmentInitParameters `json:"initProvider,omitempty"` +} + +// PermissionsBoundaryAttachmentStatus defines the observed state of PermissionsBoundaryAttachment. +type PermissionsBoundaryAttachmentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PermissionsBoundaryAttachmentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// PermissionsBoundaryAttachment is the Schema for the PermissionsBoundaryAttachments API. Attaches a permissions boundary policy to a Single Sign-On (SSO) Permission Set resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type PermissionsBoundaryAttachment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.permissionsBoundary) || (has(self.initProvider) && has(self.initProvider.permissionsBoundary))",message="spec.forProvider.permissionsBoundary is a required parameter" + Spec PermissionsBoundaryAttachmentSpec `json:"spec"` + Status PermissionsBoundaryAttachmentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PermissionsBoundaryAttachmentList contains a list of PermissionsBoundaryAttachments +type PermissionsBoundaryAttachmentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PermissionsBoundaryAttachment `json:"items"` +} + +// Repository type metadata. +var ( + PermissionsBoundaryAttachment_Kind = "PermissionsBoundaryAttachment" + PermissionsBoundaryAttachment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: PermissionsBoundaryAttachment_Kind}.String() + PermissionsBoundaryAttachment_KindAPIVersion = PermissionsBoundaryAttachment_Kind + "." + CRDGroupVersion.String() + PermissionsBoundaryAttachment_GroupVersionKind = CRDGroupVersion.WithKind(PermissionsBoundaryAttachment_Kind) +) + +func init() { + SchemeBuilder.Register(&PermissionsBoundaryAttachment{}, &PermissionsBoundaryAttachmentList{}) +} diff --git a/apis/timestreamwrite/v1beta1/zz_generated.conversion_hubs.go b/apis/timestreamwrite/v1beta1/zz_generated.conversion_hubs.go index 9df800bf6d..ff4480fc73 100755 --- a/apis/timestreamwrite/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/timestreamwrite/v1beta1/zz_generated.conversion_hubs.go @@ -8,6 +8,3 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *Database) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Table) Hub() {} diff --git a/apis/timestreamwrite/v1beta1/zz_generated.conversion_spokes.go b/apis/timestreamwrite/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..5062339cc1 --- /dev/null +++ b/apis/timestreamwrite/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Table to the hub type. +func (tr *Table) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Table type. +func (tr *Table) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/timestreamwrite/v1beta2/zz_generated.conversion_hubs.go b/apis/timestreamwrite/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..0ed428e742 --- /dev/null +++ b/apis/timestreamwrite/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Table) Hub() {} diff --git a/apis/timestreamwrite/v1beta2/zz_generated.deepcopy.go b/apis/timestreamwrite/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..a2eef5fb8a --- /dev/null +++ b/apis/timestreamwrite/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,772 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositePartitionKeyInitParameters) DeepCopyInto(out *CompositePartitionKeyInitParameters) { + *out = *in + if in.EnforcementInRecord != nil { + in, out := &in.EnforcementInRecord, &out.EnforcementInRecord + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositePartitionKeyInitParameters. +func (in *CompositePartitionKeyInitParameters) DeepCopy() *CompositePartitionKeyInitParameters { + if in == nil { + return nil + } + out := new(CompositePartitionKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositePartitionKeyObservation) DeepCopyInto(out *CompositePartitionKeyObservation) { + *out = *in + if in.EnforcementInRecord != nil { + in, out := &in.EnforcementInRecord, &out.EnforcementInRecord + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositePartitionKeyObservation. +func (in *CompositePartitionKeyObservation) DeepCopy() *CompositePartitionKeyObservation { + if in == nil { + return nil + } + out := new(CompositePartitionKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositePartitionKeyParameters) DeepCopyInto(out *CompositePartitionKeyParameters) { + *out = *in + if in.EnforcementInRecord != nil { + in, out := &in.EnforcementInRecord, &out.EnforcementInRecord + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositePartitionKeyParameters. +func (in *CompositePartitionKeyParameters) DeepCopy() *CompositePartitionKeyParameters { + if in == nil { + return nil + } + out := new(CompositePartitionKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MagneticStoreRejectedDataLocationInitParameters) DeepCopyInto(out *MagneticStoreRejectedDataLocationInitParameters) { + *out = *in + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MagneticStoreRejectedDataLocationInitParameters. +func (in *MagneticStoreRejectedDataLocationInitParameters) DeepCopy() *MagneticStoreRejectedDataLocationInitParameters { + if in == nil { + return nil + } + out := new(MagneticStoreRejectedDataLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MagneticStoreRejectedDataLocationObservation) DeepCopyInto(out *MagneticStoreRejectedDataLocationObservation) { + *out = *in + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3ConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MagneticStoreRejectedDataLocationObservation. +func (in *MagneticStoreRejectedDataLocationObservation) DeepCopy() *MagneticStoreRejectedDataLocationObservation { + if in == nil { + return nil + } + out := new(MagneticStoreRejectedDataLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MagneticStoreRejectedDataLocationParameters) DeepCopyInto(out *MagneticStoreRejectedDataLocationParameters) { + *out = *in + if in.S3Configuration != nil { + in, out := &in.S3Configuration, &out.S3Configuration + *out = new(S3ConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MagneticStoreRejectedDataLocationParameters. +func (in *MagneticStoreRejectedDataLocationParameters) DeepCopy() *MagneticStoreRejectedDataLocationParameters { + if in == nil { + return nil + } + out := new(MagneticStoreRejectedDataLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MagneticStoreWritePropertiesInitParameters) DeepCopyInto(out *MagneticStoreWritePropertiesInitParameters) { + *out = *in + if in.EnableMagneticStoreWrites != nil { + in, out := &in.EnableMagneticStoreWrites, &out.EnableMagneticStoreWrites + *out = new(bool) + **out = **in + } + if in.MagneticStoreRejectedDataLocation != nil { + in, out := &in.MagneticStoreRejectedDataLocation, &out.MagneticStoreRejectedDataLocation + *out = new(MagneticStoreRejectedDataLocationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MagneticStoreWritePropertiesInitParameters. +func (in *MagneticStoreWritePropertiesInitParameters) DeepCopy() *MagneticStoreWritePropertiesInitParameters { + if in == nil { + return nil + } + out := new(MagneticStoreWritePropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MagneticStoreWritePropertiesObservation) DeepCopyInto(out *MagneticStoreWritePropertiesObservation) { + *out = *in + if in.EnableMagneticStoreWrites != nil { + in, out := &in.EnableMagneticStoreWrites, &out.EnableMagneticStoreWrites + *out = new(bool) + **out = **in + } + if in.MagneticStoreRejectedDataLocation != nil { + in, out := &in.MagneticStoreRejectedDataLocation, &out.MagneticStoreRejectedDataLocation + *out = new(MagneticStoreRejectedDataLocationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MagneticStoreWritePropertiesObservation. +func (in *MagneticStoreWritePropertiesObservation) DeepCopy() *MagneticStoreWritePropertiesObservation { + if in == nil { + return nil + } + out := new(MagneticStoreWritePropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MagneticStoreWritePropertiesParameters) DeepCopyInto(out *MagneticStoreWritePropertiesParameters) { + *out = *in + if in.EnableMagneticStoreWrites != nil { + in, out := &in.EnableMagneticStoreWrites, &out.EnableMagneticStoreWrites + *out = new(bool) + **out = **in + } + if in.MagneticStoreRejectedDataLocation != nil { + in, out := &in.MagneticStoreRejectedDataLocation, &out.MagneticStoreRejectedDataLocation + *out = new(MagneticStoreRejectedDataLocationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MagneticStoreWritePropertiesParameters. +func (in *MagneticStoreWritePropertiesParameters) DeepCopy() *MagneticStoreWritePropertiesParameters { + if in == nil { + return nil + } + out := new(MagneticStoreWritePropertiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPropertiesInitParameters) DeepCopyInto(out *RetentionPropertiesInitParameters) { + *out = *in + if in.MagneticStoreRetentionPeriodInDays != nil { + in, out := &in.MagneticStoreRetentionPeriodInDays, &out.MagneticStoreRetentionPeriodInDays + *out = new(float64) + **out = **in + } + if in.MemoryStoreRetentionPeriodInHours != nil { + in, out := &in.MemoryStoreRetentionPeriodInHours, &out.MemoryStoreRetentionPeriodInHours + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPropertiesInitParameters. +func (in *RetentionPropertiesInitParameters) DeepCopy() *RetentionPropertiesInitParameters { + if in == nil { + return nil + } + out := new(RetentionPropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPropertiesObservation) DeepCopyInto(out *RetentionPropertiesObservation) { + *out = *in + if in.MagneticStoreRetentionPeriodInDays != nil { + in, out := &in.MagneticStoreRetentionPeriodInDays, &out.MagneticStoreRetentionPeriodInDays + *out = new(float64) + **out = **in + } + if in.MemoryStoreRetentionPeriodInHours != nil { + in, out := &in.MemoryStoreRetentionPeriodInHours, &out.MemoryStoreRetentionPeriodInHours + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPropertiesObservation. +func (in *RetentionPropertiesObservation) DeepCopy() *RetentionPropertiesObservation { + if in == nil { + return nil + } + out := new(RetentionPropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPropertiesParameters) DeepCopyInto(out *RetentionPropertiesParameters) { + *out = *in + if in.MagneticStoreRetentionPeriodInDays != nil { + in, out := &in.MagneticStoreRetentionPeriodInDays, &out.MagneticStoreRetentionPeriodInDays + *out = new(float64) + **out = **in + } + if in.MemoryStoreRetentionPeriodInHours != nil { + in, out := &in.MemoryStoreRetentionPeriodInHours, &out.MemoryStoreRetentionPeriodInHours + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPropertiesParameters. +func (in *RetentionPropertiesParameters) DeepCopy() *RetentionPropertiesParameters { + if in == nil { + return nil + } + out := new(RetentionPropertiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigurationInitParameters) DeepCopyInto(out *S3ConfigurationInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.EncryptionOption != nil { + in, out := &in.EncryptionOption, &out.EncryptionOption + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.ObjectKeyPrefix != nil { + in, out := &in.ObjectKeyPrefix, &out.ObjectKeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigurationInitParameters. +func (in *S3ConfigurationInitParameters) DeepCopy() *S3ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(S3ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigurationObservation) DeepCopyInto(out *S3ConfigurationObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.EncryptionOption != nil { + in, out := &in.EncryptionOption, &out.EncryptionOption + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.ObjectKeyPrefix != nil { + in, out := &in.ObjectKeyPrefix, &out.ObjectKeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigurationObservation. +func (in *S3ConfigurationObservation) DeepCopy() *S3ConfigurationObservation { + if in == nil { + return nil + } + out := new(S3ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3ConfigurationParameters) DeepCopyInto(out *S3ConfigurationParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.EncryptionOption != nil { + in, out := &in.EncryptionOption, &out.EncryptionOption + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.ObjectKeyPrefix != nil { + in, out := &in.ObjectKeyPrefix, &out.ObjectKeyPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ConfigurationParameters. +func (in *S3ConfigurationParameters) DeepCopy() *S3ConfigurationParameters { + if in == nil { + return nil + } + out := new(S3ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaInitParameters) DeepCopyInto(out *SchemaInitParameters) { + *out = *in + if in.CompositePartitionKey != nil { + in, out := &in.CompositePartitionKey, &out.CompositePartitionKey + *out = new(CompositePartitionKeyInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaInitParameters. +func (in *SchemaInitParameters) DeepCopy() *SchemaInitParameters { + if in == nil { + return nil + } + out := new(SchemaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaObservation) DeepCopyInto(out *SchemaObservation) { + *out = *in + if in.CompositePartitionKey != nil { + in, out := &in.CompositePartitionKey, &out.CompositePartitionKey + *out = new(CompositePartitionKeyObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaObservation. +func (in *SchemaObservation) DeepCopy() *SchemaObservation { + if in == nil { + return nil + } + out := new(SchemaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaParameters) DeepCopyInto(out *SchemaParameters) { + *out = *in + if in.CompositePartitionKey != nil { + in, out := &in.CompositePartitionKey, &out.CompositePartitionKey + *out = new(CompositePartitionKeyParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaParameters. +func (in *SchemaParameters) DeepCopy() *SchemaParameters { + if in == nil { + return nil + } + out := new(SchemaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Table) DeepCopyInto(out *Table) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. +func (in *Table) DeepCopy() *Table { + if in == nil { + return nil + } + out := new(Table) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Table) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableInitParameters) DeepCopyInto(out *TableInitParameters) { + *out = *in + if in.MagneticStoreWriteProperties != nil { + in, out := &in.MagneticStoreWriteProperties, &out.MagneticStoreWriteProperties + *out = new(MagneticStoreWritePropertiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionProperties != nil { + in, out := &in.RetentionProperties, &out.RetentionProperties + *out = new(RetentionPropertiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(SchemaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableInitParameters. +func (in *TableInitParameters) DeepCopy() *TableInitParameters { + if in == nil { + return nil + } + out := new(TableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableList) DeepCopyInto(out *TableList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Table, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableList. +func (in *TableList) DeepCopy() *TableList { + if in == nil { + return nil + } + out := new(TableList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TableList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableObservation) DeepCopyInto(out *TableObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MagneticStoreWriteProperties != nil { + in, out := &in.MagneticStoreWriteProperties, &out.MagneticStoreWriteProperties + *out = new(MagneticStoreWritePropertiesObservation) + (*in).DeepCopyInto(*out) + } + if in.RetentionProperties != nil { + in, out := &in.RetentionProperties, &out.RetentionProperties + *out = new(RetentionPropertiesObservation) + (*in).DeepCopyInto(*out) + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(SchemaObservation) + (*in).DeepCopyInto(*out) + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableObservation. +func (in *TableObservation) DeepCopy() *TableObservation { + if in == nil { + return nil + } + out := new(TableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableParameters) DeepCopyInto(out *TableParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DatabaseNameRef != nil { + in, out := &in.DatabaseNameRef, &out.DatabaseNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseNameSelector != nil { + in, out := &in.DatabaseNameSelector, &out.DatabaseNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.MagneticStoreWriteProperties != nil { + in, out := &in.MagneticStoreWriteProperties, &out.MagneticStoreWriteProperties + *out = new(MagneticStoreWritePropertiesParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RetentionProperties != nil { + in, out := &in.RetentionProperties, &out.RetentionProperties + *out = new(RetentionPropertiesParameters) + (*in).DeepCopyInto(*out) + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(SchemaParameters) + (*in).DeepCopyInto(*out) + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableParameters. +func (in *TableParameters) DeepCopy() *TableParameters { + if in == nil { + return nil + } + out := new(TableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableSpec) DeepCopyInto(out *TableSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableSpec. +func (in *TableSpec) DeepCopy() *TableSpec { + if in == nil { + return nil + } + out := new(TableSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableStatus) DeepCopyInto(out *TableStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableStatus. +func (in *TableStatus) DeepCopy() *TableStatus { + if in == nil { + return nil + } + out := new(TableStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/timestreamwrite/v1beta2/zz_generated.managed.go b/apis/timestreamwrite/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..25acdb1a58 --- /dev/null +++ b/apis/timestreamwrite/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Table. +func (mg *Table) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Table. +func (mg *Table) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Table. +func (mg *Table) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Table. +func (mg *Table) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Table. +func (mg *Table) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Table. +func (mg *Table) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Table. +func (mg *Table) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Table. +func (mg *Table) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Table. +func (mg *Table) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Table. +func (mg *Table) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Table. +func (mg *Table) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Table. +func (mg *Table) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/timestreamwrite/v1beta2/zz_generated.managedlist.go b/apis/timestreamwrite/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..bfa82abb5e --- /dev/null +++ b/apis/timestreamwrite/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this TableList. +func (l *TableList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/timestreamwrite/v1beta2/zz_generated.resolvers.go b/apis/timestreamwrite/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..ed389b5c4b --- /dev/null +++ b/apis/timestreamwrite/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,49 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Table. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Table) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("timestreamwrite.aws.upbound.io", "v1beta1", "Database", "DatabaseList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DatabaseName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DatabaseNameRef, + Selector: mg.Spec.ForProvider.DatabaseNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DatabaseName") + } + mg.Spec.ForProvider.DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DatabaseNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/timestreamwrite/v1beta2/zz_groupversion_info.go b/apis/timestreamwrite/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..e2cb3460fb --- /dev/null +++ b/apis/timestreamwrite/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=timestreamwrite.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "timestreamwrite.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/timestreamwrite/v1beta2/zz_table_terraformed.go b/apis/timestreamwrite/v1beta2/zz_table_terraformed.go new file mode 100755 index 0000000000..41b83e8857 --- /dev/null +++ b/apis/timestreamwrite/v1beta2/zz_table_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Table +func (mg *Table) GetTerraformResourceType() string { + return "aws_timestreamwrite_table" +} + +// GetConnectionDetailsMapping for this Table +func (tr *Table) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Table +func (tr *Table) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Table +func (tr *Table) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Table +func (tr *Table) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Table +func (tr *Table) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Table +func (tr *Table) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Table +func (tr *Table) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Table +func (tr *Table) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Table using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Table) LateInitialize(attrs []byte) (bool, error) { + params := &TableParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Table) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/timestreamwrite/v1beta2/zz_table_types.go b/apis/timestreamwrite/v1beta2/zz_table_types.go new file mode 100755 index 0000000000..6bd541f8f2 --- /dev/null +++ b/apis/timestreamwrite/v1beta2/zz_table_types.go @@ -0,0 +1,348 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CompositePartitionKeyInitParameters struct { + + // The level of enforcement for the specification of a dimension key in ingested records. Valid values: REQUIRED, OPTIONAL. + EnforcementInRecord *string `json:"enforcementInRecord,omitempty" tf:"enforcement_in_record,omitempty"` + + // The name of the attribute used for a dimension key. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type of the partition key. Valid values: DIMENSION, MEASURE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type CompositePartitionKeyObservation struct { + + // The level of enforcement for the specification of a dimension key in ingested records. Valid values: REQUIRED, OPTIONAL. + EnforcementInRecord *string `json:"enforcementInRecord,omitempty" tf:"enforcement_in_record,omitempty"` + + // The name of the attribute used for a dimension key. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type of the partition key. Valid values: DIMENSION, MEASURE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type CompositePartitionKeyParameters struct { + + // The level of enforcement for the specification of a dimension key in ingested records. Valid values: REQUIRED, OPTIONAL. + // +kubebuilder:validation:Optional + EnforcementInRecord *string `json:"enforcementInRecord,omitempty" tf:"enforcement_in_record,omitempty"` + + // The name of the attribute used for a dimension key. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type of the partition key. Valid values: DIMENSION, MEASURE. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type MagneticStoreRejectedDataLocationInitParameters struct { + + // Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details. + S3Configuration *S3ConfigurationInitParameters `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` +} + +type MagneticStoreRejectedDataLocationObservation struct { + + // Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details. + S3Configuration *S3ConfigurationObservation `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` +} + +type MagneticStoreRejectedDataLocationParameters struct { + + // Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details. + // +kubebuilder:validation:Optional + S3Configuration *S3ConfigurationParameters `json:"s3Configuration,omitempty" tf:"s3_configuration,omitempty"` +} + +type MagneticStoreWritePropertiesInitParameters struct { + + // A flag to enable magnetic store writes. + EnableMagneticStoreWrites *bool `json:"enableMagneticStoreWrites,omitempty" tf:"enable_magnetic_store_writes,omitempty"` + + // The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details. + MagneticStoreRejectedDataLocation *MagneticStoreRejectedDataLocationInitParameters `json:"magneticStoreRejectedDataLocation,omitempty" tf:"magnetic_store_rejected_data_location,omitempty"` +} + +type MagneticStoreWritePropertiesObservation struct { + + // A flag to enable magnetic store writes. + EnableMagneticStoreWrites *bool `json:"enableMagneticStoreWrites,omitempty" tf:"enable_magnetic_store_writes,omitempty"` + + // The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details. + MagneticStoreRejectedDataLocation *MagneticStoreRejectedDataLocationObservation `json:"magneticStoreRejectedDataLocation,omitempty" tf:"magnetic_store_rejected_data_location,omitempty"` +} + +type MagneticStoreWritePropertiesParameters struct { + + // A flag to enable magnetic store writes. + // +kubebuilder:validation:Optional + EnableMagneticStoreWrites *bool `json:"enableMagneticStoreWrites,omitempty" tf:"enable_magnetic_store_writes,omitempty"` + + // The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details. + // +kubebuilder:validation:Optional + MagneticStoreRejectedDataLocation *MagneticStoreRejectedDataLocationParameters `json:"magneticStoreRejectedDataLocation,omitempty" tf:"magnetic_store_rejected_data_location,omitempty"` +} + +type RetentionPropertiesInitParameters struct { + + // The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000. + MagneticStoreRetentionPeriodInDays *float64 `json:"magneticStoreRetentionPeriodInDays,omitempty" tf:"magnetic_store_retention_period_in_days,omitempty"` + + // The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766. + MemoryStoreRetentionPeriodInHours *float64 `json:"memoryStoreRetentionPeriodInHours,omitempty" tf:"memory_store_retention_period_in_hours,omitempty"` +} + +type RetentionPropertiesObservation struct { + + // The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000. + MagneticStoreRetentionPeriodInDays *float64 `json:"magneticStoreRetentionPeriodInDays,omitempty" tf:"magnetic_store_retention_period_in_days,omitempty"` + + // The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766. + MemoryStoreRetentionPeriodInHours *float64 `json:"memoryStoreRetentionPeriodInHours,omitempty" tf:"memory_store_retention_period_in_hours,omitempty"` +} + +type RetentionPropertiesParameters struct { + + // The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000. + // +kubebuilder:validation:Optional + MagneticStoreRetentionPeriodInDays *float64 `json:"magneticStoreRetentionPeriodInDays" tf:"magnetic_store_retention_period_in_days,omitempty"` + + // The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766. + // +kubebuilder:validation:Optional + MemoryStoreRetentionPeriodInHours *float64 `json:"memoryStoreRetentionPeriodInHours" tf:"memory_store_retention_period_in_hours,omitempty"` +} + +type S3ConfigurationInitParameters struct { + + // Bucket name of the customer S3 bucket. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are SSE_KMS and SSE_S3. + EncryptionOption *string `json:"encryptionOption,omitempty" tf:"encryption_option,omitempty"` + + // KMS key arn for the customer s3 location when encrypting with a KMS managed key. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Object key prefix for the customer S3 location. + ObjectKeyPrefix *string `json:"objectKeyPrefix,omitempty" tf:"object_key_prefix,omitempty"` +} + +type S3ConfigurationObservation struct { + + // Bucket name of the customer S3 bucket. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are SSE_KMS and SSE_S3. + EncryptionOption *string `json:"encryptionOption,omitempty" tf:"encryption_option,omitempty"` + + // KMS key arn for the customer s3 location when encrypting with a KMS managed key. + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Object key prefix for the customer S3 location. + ObjectKeyPrefix *string `json:"objectKeyPrefix,omitempty" tf:"object_key_prefix,omitempty"` +} + +type S3ConfigurationParameters struct { + + // Bucket name of the customer S3 bucket. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are SSE_KMS and SSE_S3. + // +kubebuilder:validation:Optional + EncryptionOption *string `json:"encryptionOption,omitempty" tf:"encryption_option,omitempty"` + + // KMS key arn for the customer s3 location when encrypting with a KMS managed key. + // +kubebuilder:validation:Optional + KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Object key prefix for the customer S3 location. + // +kubebuilder:validation:Optional + ObjectKeyPrefix *string `json:"objectKeyPrefix,omitempty" tf:"object_key_prefix,omitempty"` +} + +type SchemaInitParameters struct { + + // A non-empty list of partition keys defining the attributes used to partition the table data. The order of the list determines the partition hierarchy. The name and type of each partition key as well as the partition key order cannot be changed after the table is created. However, the enforcement level of each partition key can be changed. See Composite Partition Key below for more details. + CompositePartitionKey *CompositePartitionKeyInitParameters `json:"compositePartitionKey,omitempty" tf:"composite_partition_key,omitempty"` +} + +type SchemaObservation struct { + + // A non-empty list of partition keys defining the attributes used to partition the table data. The order of the list determines the partition hierarchy. The name and type of each partition key as well as the partition key order cannot be changed after the table is created. However, the enforcement level of each partition key can be changed. See Composite Partition Key below for more details. + CompositePartitionKey *CompositePartitionKeyObservation `json:"compositePartitionKey,omitempty" tf:"composite_partition_key,omitempty"` +} + +type SchemaParameters struct { + + // A non-empty list of partition keys defining the attributes used to partition the table data. The order of the list determines the partition hierarchy. The name and type of each partition key as well as the partition key order cannot be changed after the table is created. However, the enforcement level of each partition key can be changed. See Composite Partition Key below for more details. + // +kubebuilder:validation:Optional + CompositePartitionKey *CompositePartitionKeyParameters `json:"compositePartitionKey,omitempty" tf:"composite_partition_key,omitempty"` +} + +type TableInitParameters struct { + + // Contains properties to set on the table when enabling magnetic store writes. See Magnetic Store Write Properties below for more details. + MagneticStoreWriteProperties *MagneticStoreWritePropertiesInitParameters `json:"magneticStoreWriteProperties,omitempty" tf:"magnetic_store_write_properties,omitempty"` + + // The retention duration for the memory store and magnetic store. See Retention Properties below for more details. If not provided, magnetic_store_retention_period_in_days default to 73000 and memory_store_retention_period_in_hours defaults to 6. + RetentionProperties *RetentionPropertiesInitParameters `json:"retentionProperties,omitempty" tf:"retention_properties,omitempty"` + + // The schema of the table. See Schema below for more details. + Schema *SchemaInitParameters `json:"schema,omitempty" tf:"schema,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type TableObservation struct { + + // The ARN that uniquely identifies this table. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // – The name of the Timestream database. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The table_name and database_name separated by a colon (:). + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Contains properties to set on the table when enabling magnetic store writes. See Magnetic Store Write Properties below for more details. + MagneticStoreWriteProperties *MagneticStoreWritePropertiesObservation `json:"magneticStoreWriteProperties,omitempty" tf:"magnetic_store_write_properties,omitempty"` + + // The retention duration for the memory store and magnetic store. See Retention Properties below for more details. If not provided, magnetic_store_retention_period_in_days default to 73000 and memory_store_retention_period_in_hours defaults to 6. + RetentionProperties *RetentionPropertiesObservation `json:"retentionProperties,omitempty" tf:"retention_properties,omitempty"` + + // The schema of the table. See Schema below for more details. + Schema *SchemaObservation `json:"schema,omitempty" tf:"schema,omitempty"` + + // The name of the Timestream table. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type TableParameters struct { + + // – The name of the Timestream database. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/timestreamwrite/v1beta1.Database + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Reference to a Database in timestreamwrite to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameRef *v1.Reference `json:"databaseNameRef,omitempty" tf:"-"` + + // Selector for a Database in timestreamwrite to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameSelector *v1.Selector `json:"databaseNameSelector,omitempty" tf:"-"` + + // Contains properties to set on the table when enabling magnetic store writes. See Magnetic Store Write Properties below for more details. + // +kubebuilder:validation:Optional + MagneticStoreWriteProperties *MagneticStoreWritePropertiesParameters `json:"magneticStoreWriteProperties,omitempty" tf:"magnetic_store_write_properties,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The retention duration for the memory store and magnetic store. See Retention Properties below for more details. If not provided, magnetic_store_retention_period_in_days default to 73000 and memory_store_retention_period_in_hours defaults to 6. + // +kubebuilder:validation:Optional + RetentionProperties *RetentionPropertiesParameters `json:"retentionProperties,omitempty" tf:"retention_properties,omitempty"` + + // The schema of the table. See Schema below for more details. + // +kubebuilder:validation:Optional + Schema *SchemaParameters `json:"schema,omitempty" tf:"schema,omitempty"` + + // The name of the Timestream table. + // +kubebuilder:validation:Required + TableName *string `json:"tableName" tf:"table_name,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// TableSpec defines the desired state of Table +type TableSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TableParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TableInitParameters `json:"initProvider,omitempty"` +} + +// TableStatus defines the observed state of Table. +type TableStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TableObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Table is the Schema for the Tables API. Provides a Timestream table resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Table struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec TableSpec `json:"spec"` + Status TableStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TableList contains a list of Tables +type TableList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Table `json:"items"` +} + +// Repository type metadata. +var ( + Table_Kind = "Table" + Table_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Table_Kind}.String() + Table_KindAPIVersion = Table_Kind + "." + CRDGroupVersion.String() + Table_GroupVersionKind = CRDGroupVersion.WithKind(Table_Kind) +) + +func init() { + SchemeBuilder.Register(&Table{}, &TableList{}) +} diff --git a/apis/transcribe/v1beta1/zz_generated.conversion_hubs.go b/apis/transcribe/v1beta1/zz_generated.conversion_hubs.go index 0491331481..38a90abf22 100755 --- a/apis/transcribe/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/transcribe/v1beta1/zz_generated.conversion_hubs.go @@ -6,9 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *LanguageModel) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Vocabulary) Hub() {} diff --git a/apis/transcribe/v1beta1/zz_generated.conversion_spokes.go b/apis/transcribe/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..cb9e528309 --- /dev/null +++ b/apis/transcribe/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this LanguageModel to the hub type. +func (tr *LanguageModel) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LanguageModel type. +func (tr *LanguageModel) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/transcribe/v1beta2/zz_generated.conversion_hubs.go b/apis/transcribe/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..2a2e1c9c3a --- /dev/null +++ b/apis/transcribe/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *LanguageModel) Hub() {} diff --git a/apis/transcribe/v1beta2/zz_generated.deepcopy.go b/apis/transcribe/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..c5733e5f33 --- /dev/null +++ b/apis/transcribe/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,387 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputDataConfigInitParameters) DeepCopyInto(out *InputDataConfigInitParameters) { + *out = *in + if in.DataAccessRoleArn != nil { + in, out := &in.DataAccessRoleArn, &out.DataAccessRoleArn + *out = new(string) + **out = **in + } + if in.DataAccessRoleArnRef != nil { + in, out := &in.DataAccessRoleArnRef, &out.DataAccessRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataAccessRoleArnSelector != nil { + in, out := &in.DataAccessRoleArnSelector, &out.DataAccessRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3URI != nil { + in, out := &in.S3URI, &out.S3URI + *out = new(string) + **out = **in + } + if in.TuningDataS3URI != nil { + in, out := &in.TuningDataS3URI, &out.TuningDataS3URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputDataConfigInitParameters. +func (in *InputDataConfigInitParameters) DeepCopy() *InputDataConfigInitParameters { + if in == nil { + return nil + } + out := new(InputDataConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputDataConfigObservation) DeepCopyInto(out *InputDataConfigObservation) { + *out = *in + if in.DataAccessRoleArn != nil { + in, out := &in.DataAccessRoleArn, &out.DataAccessRoleArn + *out = new(string) + **out = **in + } + if in.S3URI != nil { + in, out := &in.S3URI, &out.S3URI + *out = new(string) + **out = **in + } + if in.TuningDataS3URI != nil { + in, out := &in.TuningDataS3URI, &out.TuningDataS3URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputDataConfigObservation. +func (in *InputDataConfigObservation) DeepCopy() *InputDataConfigObservation { + if in == nil { + return nil + } + out := new(InputDataConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputDataConfigParameters) DeepCopyInto(out *InputDataConfigParameters) { + *out = *in + if in.DataAccessRoleArn != nil { + in, out := &in.DataAccessRoleArn, &out.DataAccessRoleArn + *out = new(string) + **out = **in + } + if in.DataAccessRoleArnRef != nil { + in, out := &in.DataAccessRoleArnRef, &out.DataAccessRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataAccessRoleArnSelector != nil { + in, out := &in.DataAccessRoleArnSelector, &out.DataAccessRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.S3URI != nil { + in, out := &in.S3URI, &out.S3URI + *out = new(string) + **out = **in + } + if in.TuningDataS3URI != nil { + in, out := &in.TuningDataS3URI, &out.TuningDataS3URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputDataConfigParameters. +func (in *InputDataConfigParameters) DeepCopy() *InputDataConfigParameters { + if in == nil { + return nil + } + out := new(InputDataConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LanguageModel) DeepCopyInto(out *LanguageModel) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LanguageModel. +func (in *LanguageModel) DeepCopy() *LanguageModel { + if in == nil { + return nil + } + out := new(LanguageModel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LanguageModel) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LanguageModelInitParameters) DeepCopyInto(out *LanguageModelInitParameters) { + *out = *in + if in.BaseModelName != nil { + in, out := &in.BaseModelName, &out.BaseModelName + *out = new(string) + **out = **in + } + if in.InputDataConfig != nil { + in, out := &in.InputDataConfig, &out.InputDataConfig + *out = new(InputDataConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LanguageModelInitParameters. +func (in *LanguageModelInitParameters) DeepCopy() *LanguageModelInitParameters { + if in == nil { + return nil + } + out := new(LanguageModelInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LanguageModelList) DeepCopyInto(out *LanguageModelList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LanguageModel, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LanguageModelList. +func (in *LanguageModelList) DeepCopy() *LanguageModelList { + if in == nil { + return nil + } + out := new(LanguageModelList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LanguageModelList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LanguageModelObservation) DeepCopyInto(out *LanguageModelObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.BaseModelName != nil { + in, out := &in.BaseModelName, &out.BaseModelName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InputDataConfig != nil { + in, out := &in.InputDataConfig, &out.InputDataConfig + *out = new(InputDataConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LanguageModelObservation. +func (in *LanguageModelObservation) DeepCopy() *LanguageModelObservation { + if in == nil { + return nil + } + out := new(LanguageModelObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LanguageModelParameters) DeepCopyInto(out *LanguageModelParameters) { + *out = *in + if in.BaseModelName != nil { + in, out := &in.BaseModelName, &out.BaseModelName + *out = new(string) + **out = **in + } + if in.InputDataConfig != nil { + in, out := &in.InputDataConfig, &out.InputDataConfig + *out = new(InputDataConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.LanguageCode != nil { + in, out := &in.LanguageCode, &out.LanguageCode + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LanguageModelParameters. +func (in *LanguageModelParameters) DeepCopy() *LanguageModelParameters { + if in == nil { + return nil + } + out := new(LanguageModelParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LanguageModelSpec) DeepCopyInto(out *LanguageModelSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LanguageModelSpec. +func (in *LanguageModelSpec) DeepCopy() *LanguageModelSpec { + if in == nil { + return nil + } + out := new(LanguageModelSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LanguageModelStatus) DeepCopyInto(out *LanguageModelStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LanguageModelStatus. +func (in *LanguageModelStatus) DeepCopy() *LanguageModelStatus { + if in == nil { + return nil + } + out := new(LanguageModelStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/transcribe/v1beta2/zz_generated.managed.go b/apis/transcribe/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..0bb16b275c --- /dev/null +++ b/apis/transcribe/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this LanguageModel. +func (mg *LanguageModel) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LanguageModel. +func (mg *LanguageModel) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LanguageModel. +func (mg *LanguageModel) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LanguageModel. +func (mg *LanguageModel) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LanguageModel. +func (mg *LanguageModel) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LanguageModel. +func (mg *LanguageModel) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LanguageModel. +func (mg *LanguageModel) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LanguageModel. +func (mg *LanguageModel) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LanguageModel. +func (mg *LanguageModel) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LanguageModel. +func (mg *LanguageModel) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LanguageModel. +func (mg *LanguageModel) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LanguageModel. +func (mg *LanguageModel) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/transcribe/v1beta2/zz_generated.managedlist.go b/apis/transcribe/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..000c972a21 --- /dev/null +++ b/apis/transcribe/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this LanguageModelList. +func (l *LanguageModelList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/transcribe/v1beta2/zz_generated.resolvers.go b/apis/transcribe/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..bd9eb3a241 --- /dev/null +++ b/apis/transcribe/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,73 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *LanguageModel) ResolveReferences( // ResolveReferences of this LanguageModel. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.InputDataConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InputDataConfig.DataAccessRoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.InputDataConfig.DataAccessRoleArnRef, + Selector: mg.Spec.ForProvider.InputDataConfig.DataAccessRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InputDataConfig.DataAccessRoleArn") + } + mg.Spec.ForProvider.InputDataConfig.DataAccessRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InputDataConfig.DataAccessRoleArnRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.InputDataConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.InputDataConfig.DataAccessRoleArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.InputDataConfig.DataAccessRoleArnRef, + Selector: mg.Spec.InitProvider.InputDataConfig.DataAccessRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InputDataConfig.DataAccessRoleArn") + } + mg.Spec.InitProvider.InputDataConfig.DataAccessRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.InputDataConfig.DataAccessRoleArnRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/transcribe/v1beta2/zz_groupversion_info.go b/apis/transcribe/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..69395a73d2 --- /dev/null +++ b/apis/transcribe/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=transcribe.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "transcribe.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/transcribe/v1beta2/zz_languagemodel_terraformed.go b/apis/transcribe/v1beta2/zz_languagemodel_terraformed.go new file mode 100755 index 0000000000..1d3de2d293 --- /dev/null +++ b/apis/transcribe/v1beta2/zz_languagemodel_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LanguageModel +func (mg *LanguageModel) GetTerraformResourceType() string { + return "aws_transcribe_language_model" +} + +// GetConnectionDetailsMapping for this LanguageModel +func (tr *LanguageModel) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LanguageModel +func (tr *LanguageModel) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LanguageModel +func (tr *LanguageModel) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LanguageModel +func (tr *LanguageModel) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LanguageModel +func (tr *LanguageModel) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LanguageModel +func (tr *LanguageModel) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LanguageModel +func (tr *LanguageModel) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LanguageModel +func (tr *LanguageModel) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LanguageModel using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LanguageModel) LateInitialize(attrs []byte) (bool, error) { + params := &LanguageModelParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LanguageModel) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/transcribe/v1beta2/zz_languagemodel_types.go b/apis/transcribe/v1beta2/zz_languagemodel_types.go new file mode 100755 index 0000000000..4b49dcc027 --- /dev/null +++ b/apis/transcribe/v1beta2/zz_languagemodel_types.go @@ -0,0 +1,202 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type InputDataConfigInitParameters struct { + + // IAM role with access to S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + DataAccessRoleArn *string `json:"dataAccessRoleArn,omitempty" tf:"data_access_role_arn,omitempty"` + + // Reference to a Role in iam to populate dataAccessRoleArn. + // +kubebuilder:validation:Optional + DataAccessRoleArnRef *v1.Reference `json:"dataAccessRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate dataAccessRoleArn. + // +kubebuilder:validation:Optional + DataAccessRoleArnSelector *v1.Selector `json:"dataAccessRoleArnSelector,omitempty" tf:"-"` + + // S3 URI where training data is located. + S3URI *string `json:"s3Uri,omitempty" tf:"s3_uri,omitempty"` + + // S3 URI where tuning data is located. + TuningDataS3URI *string `json:"tuningDataS3Uri,omitempty" tf:"tuning_data_s3_uri,omitempty"` +} + +type InputDataConfigObservation struct { + + // IAM role with access to S3 bucket. + DataAccessRoleArn *string `json:"dataAccessRoleArn,omitempty" tf:"data_access_role_arn,omitempty"` + + // S3 URI where training data is located. + S3URI *string `json:"s3Uri,omitempty" tf:"s3_uri,omitempty"` + + // S3 URI where tuning data is located. + TuningDataS3URI *string `json:"tuningDataS3Uri,omitempty" tf:"tuning_data_s3_uri,omitempty"` +} + +type InputDataConfigParameters struct { + + // IAM role with access to S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + DataAccessRoleArn *string `json:"dataAccessRoleArn,omitempty" tf:"data_access_role_arn,omitempty"` + + // Reference to a Role in iam to populate dataAccessRoleArn. + // +kubebuilder:validation:Optional + DataAccessRoleArnRef *v1.Reference `json:"dataAccessRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate dataAccessRoleArn. + // +kubebuilder:validation:Optional + DataAccessRoleArnSelector *v1.Selector `json:"dataAccessRoleArnSelector,omitempty" tf:"-"` + + // S3 URI where training data is located. + // +kubebuilder:validation:Optional + S3URI *string `json:"s3Uri" tf:"s3_uri,omitempty"` + + // S3 URI where tuning data is located. + // +kubebuilder:validation:Optional + TuningDataS3URI *string `json:"tuningDataS3Uri,omitempty" tf:"tuning_data_s3_uri,omitempty"` +} + +type LanguageModelInitParameters struct { + + // Name of reference base model. + BaseModelName *string `json:"baseModelName,omitempty" tf:"base_model_name,omitempty"` + + // The input data config for the LanguageModel. See Input Data Config for more details. + InputDataConfig *InputDataConfigInitParameters `json:"inputDataConfig,omitempty" tf:"input_data_config,omitempty"` + + // The language code you selected for your language model. Refer to the supported languages page for accepted codes. + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type LanguageModelObservation struct { + + // ARN of the LanguageModel. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Name of reference base model. + BaseModelName *string `json:"baseModelName,omitempty" tf:"base_model_name,omitempty"` + + // LanguageModel name. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The input data config for the LanguageModel. See Input Data Config for more details. + InputDataConfig *InputDataConfigObservation `json:"inputDataConfig,omitempty" tf:"input_data_config,omitempty"` + + // The language code you selected for your language model. Refer to the supported languages page for accepted codes. + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type LanguageModelParameters struct { + + // Name of reference base model. + // +kubebuilder:validation:Optional + BaseModelName *string `json:"baseModelName,omitempty" tf:"base_model_name,omitempty"` + + // The input data config for the LanguageModel. See Input Data Config for more details. + // +kubebuilder:validation:Optional + InputDataConfig *InputDataConfigParameters `json:"inputDataConfig,omitempty" tf:"input_data_config,omitempty"` + + // The language code you selected for your language model. Refer to the supported languages page for accepted codes. + // +kubebuilder:validation:Optional + LanguageCode *string `json:"languageCode,omitempty" tf:"language_code,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// LanguageModelSpec defines the desired state of LanguageModel +type LanguageModelSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LanguageModelParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LanguageModelInitParameters `json:"initProvider,omitempty"` +} + +// LanguageModelStatus defines the observed state of LanguageModel. +type LanguageModelStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LanguageModelObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LanguageModel is the Schema for the LanguageModels API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type LanguageModel struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.baseModelName) || (has(self.initProvider) && has(self.initProvider.baseModelName))",message="spec.forProvider.baseModelName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.inputDataConfig) || (has(self.initProvider) && has(self.initProvider.inputDataConfig))",message="spec.forProvider.inputDataConfig is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.languageCode) || (has(self.initProvider) && has(self.initProvider.languageCode))",message="spec.forProvider.languageCode is a required parameter" + Spec LanguageModelSpec `json:"spec"` + Status LanguageModelStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LanguageModelList contains a list of LanguageModels +type LanguageModelList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LanguageModel `json:"items"` +} + +// Repository type metadata. +var ( + LanguageModel_Kind = "LanguageModel" + LanguageModel_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LanguageModel_Kind}.String() + LanguageModel_KindAPIVersion = LanguageModel_Kind + "." + CRDGroupVersion.String() + LanguageModel_GroupVersionKind = CRDGroupVersion.WithKind(LanguageModel_Kind) +) + +func init() { + SchemeBuilder.Register(&LanguageModel{}, &LanguageModelList{}) +} diff --git a/apis/transfer/v1beta1/zz_generated.conversion_hubs.go b/apis/transfer/v1beta1/zz_generated.conversion_hubs.go index 2ef92cedd7..162bdd8b30 100755 --- a/apis/transfer/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/transfer/v1beta1/zz_generated.conversion_hubs.go @@ -6,20 +6,8 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Connector) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Server) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SSHKey) Hub() {} // Hub marks this type as a conversion hub. func (tr *Tag) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *User) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Workflow) Hub() {} diff --git a/apis/transfer/v1beta1/zz_generated.conversion_spokes.go b/apis/transfer/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..833fae5ff5 --- /dev/null +++ b/apis/transfer/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Server to the hub type. +func (tr *Server) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Server type. +func (tr *Server) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this User to the hub type. +func (tr *User) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the User type. +func (tr *User) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Workflow to the hub type. +func (tr *Workflow) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Workflow type. +func (tr *Workflow) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/transfer/v1beta1/zz_generated.resolvers.go b/apis/transfer/v1beta1/zz_generated.resolvers.go index ab0bdcebb7..6f733847f9 100644 --- a/apis/transfer/v1beta1/zz_generated.resolvers.go +++ b/apis/transfer/v1beta1/zz_generated.resolvers.go @@ -120,7 +120,7 @@ func (mg *SSHKey) ResolveReferences(ctx context.Context, c client.Reader) error var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("transfer.aws.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("transfer.aws.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -139,7 +139,7 @@ func (mg *SSHKey) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.ForProvider.ServerID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ServerIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("transfer.aws.upbound.io", "v1beta1", "User", "UserList") + m, l, err = apisresolver.GetManagedResource("transfer.aws.upbound.io", "v1beta2", "User", "UserList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -158,7 +158,7 @@ func (mg *SSHKey) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.ForProvider.UserName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.UserNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("transfer.aws.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("transfer.aws.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -177,7 +177,7 @@ func (mg *SSHKey) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.InitProvider.ServerID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ServerIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("transfer.aws.upbound.io", "v1beta1", "User", "UserList") + m, l, err = apisresolver.GetManagedResource("transfer.aws.upbound.io", "v1beta2", "User", "UserList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -376,7 +376,7 @@ func (mg *Tag) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("transfer.aws.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("transfer.aws.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -395,7 +395,7 @@ func (mg *Tag) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.ForProvider.ResourceArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceArnRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("transfer.aws.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("transfer.aws.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/transfer/v1beta1/zz_sshkey_types.go b/apis/transfer/v1beta1/zz_sshkey_types.go index 2766f5dae5..3d6e8078cf 100755 --- a/apis/transfer/v1beta1/zz_sshkey_types.go +++ b/apis/transfer/v1beta1/zz_sshkey_types.go @@ -19,7 +19,7 @@ type SSHKeyInitParameters struct { Body *string `json:"body,omitempty" tf:"body,omitempty"` // (Requirement) The Server ID of the Transfer Server (e.g., s-12345678) - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/transfer/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/transfer/v1beta2.Server // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` @@ -32,7 +32,7 @@ type SSHKeyInitParameters struct { ServerIDSelector *v1.Selector `json:"serverIdSelector,omitempty" tf:"-"` // (Requirement) The name of the user account that is assigned to one or more servers. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/transfer/v1beta1.User + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/transfer/v1beta2.User UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` // Reference to a User in transfer to populate userName. @@ -70,7 +70,7 @@ type SSHKeyParameters struct { Region *string `json:"region" tf:"-"` // (Requirement) The Server ID of the Transfer Server (e.g., s-12345678) - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/transfer/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/transfer/v1beta2.Server // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` @@ -84,7 +84,7 @@ type SSHKeyParameters struct { ServerIDSelector *v1.Selector `json:"serverIdSelector,omitempty" tf:"-"` // (Requirement) The name of the user account that is assigned to one or more servers. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/transfer/v1beta1.User + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/transfer/v1beta2.User // +kubebuilder:validation:Optional UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` diff --git a/apis/transfer/v1beta1/zz_tag_types.go b/apis/transfer/v1beta1/zz_tag_types.go index 06f385625e..16e8699b90 100755 --- a/apis/transfer/v1beta1/zz_tag_types.go +++ b/apis/transfer/v1beta1/zz_tag_types.go @@ -19,7 +19,7 @@ type TagInitParameters struct { Key *string `json:"key,omitempty" tf:"key,omitempty"` // Amazon Resource Name (ARN) of the Transfer Family resource to tag. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/transfer/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/transfer/v1beta2.Server // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` @@ -62,7 +62,7 @@ type TagParameters struct { Region *string `json:"region" tf:"-"` // Amazon Resource Name (ARN) of the Transfer Family resource to tag. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/transfer/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/transfer/v1beta2.Server // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) // +kubebuilder:validation:Optional ResourceArn *string `json:"resourceArn,omitempty" tf:"resource_arn,omitempty"` diff --git a/apis/transfer/v1beta2/zz_generated.conversion_hubs.go b/apis/transfer/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..af012d5586 --- /dev/null +++ b/apis/transfer/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Server) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *User) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Workflow) Hub() {} diff --git a/apis/transfer/v1beta2/zz_generated.deepcopy.go b/apis/transfer/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..eb4a746558 --- /dev/null +++ b/apis/transfer/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,4409 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyStepDetailsDestinationFileLocationEFSFileLocationInitParameters) DeepCopyInto(out *CopyStepDetailsDestinationFileLocationEFSFileLocationInitParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyStepDetailsDestinationFileLocationEFSFileLocationInitParameters. +func (in *CopyStepDetailsDestinationFileLocationEFSFileLocationInitParameters) DeepCopy() *CopyStepDetailsDestinationFileLocationEFSFileLocationInitParameters { + if in == nil { + return nil + } + out := new(CopyStepDetailsDestinationFileLocationEFSFileLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyStepDetailsDestinationFileLocationEFSFileLocationObservation) DeepCopyInto(out *CopyStepDetailsDestinationFileLocationEFSFileLocationObservation) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyStepDetailsDestinationFileLocationEFSFileLocationObservation. +func (in *CopyStepDetailsDestinationFileLocationEFSFileLocationObservation) DeepCopy() *CopyStepDetailsDestinationFileLocationEFSFileLocationObservation { + if in == nil { + return nil + } + out := new(CopyStepDetailsDestinationFileLocationEFSFileLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyStepDetailsDestinationFileLocationEFSFileLocationParameters) DeepCopyInto(out *CopyStepDetailsDestinationFileLocationEFSFileLocationParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyStepDetailsDestinationFileLocationEFSFileLocationParameters. +func (in *CopyStepDetailsDestinationFileLocationEFSFileLocationParameters) DeepCopy() *CopyStepDetailsDestinationFileLocationEFSFileLocationParameters { + if in == nil { + return nil + } + out := new(CopyStepDetailsDestinationFileLocationEFSFileLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyStepDetailsDestinationFileLocationInitParameters) DeepCopyInto(out *CopyStepDetailsDestinationFileLocationInitParameters) { + *out = *in + if in.EFSFileLocation != nil { + in, out := &in.EFSFileLocation, &out.EFSFileLocation + *out = new(CopyStepDetailsDestinationFileLocationEFSFileLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3FileLocation != nil { + in, out := &in.S3FileLocation, &out.S3FileLocation + *out = new(CopyStepDetailsDestinationFileLocationS3FileLocationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyStepDetailsDestinationFileLocationInitParameters. +func (in *CopyStepDetailsDestinationFileLocationInitParameters) DeepCopy() *CopyStepDetailsDestinationFileLocationInitParameters { + if in == nil { + return nil + } + out := new(CopyStepDetailsDestinationFileLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyStepDetailsDestinationFileLocationObservation) DeepCopyInto(out *CopyStepDetailsDestinationFileLocationObservation) { + *out = *in + if in.EFSFileLocation != nil { + in, out := &in.EFSFileLocation, &out.EFSFileLocation + *out = new(CopyStepDetailsDestinationFileLocationEFSFileLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.S3FileLocation != nil { + in, out := &in.S3FileLocation, &out.S3FileLocation + *out = new(CopyStepDetailsDestinationFileLocationS3FileLocationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyStepDetailsDestinationFileLocationObservation. +func (in *CopyStepDetailsDestinationFileLocationObservation) DeepCopy() *CopyStepDetailsDestinationFileLocationObservation { + if in == nil { + return nil + } + out := new(CopyStepDetailsDestinationFileLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyStepDetailsDestinationFileLocationParameters) DeepCopyInto(out *CopyStepDetailsDestinationFileLocationParameters) { + *out = *in + if in.EFSFileLocation != nil { + in, out := &in.EFSFileLocation, &out.EFSFileLocation + *out = new(CopyStepDetailsDestinationFileLocationEFSFileLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.S3FileLocation != nil { + in, out := &in.S3FileLocation, &out.S3FileLocation + *out = new(CopyStepDetailsDestinationFileLocationS3FileLocationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyStepDetailsDestinationFileLocationParameters. +func (in *CopyStepDetailsDestinationFileLocationParameters) DeepCopy() *CopyStepDetailsDestinationFileLocationParameters { + if in == nil { + return nil + } + out := new(CopyStepDetailsDestinationFileLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyStepDetailsDestinationFileLocationS3FileLocationInitParameters) DeepCopyInto(out *CopyStepDetailsDestinationFileLocationS3FileLocationInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyStepDetailsDestinationFileLocationS3FileLocationInitParameters. +func (in *CopyStepDetailsDestinationFileLocationS3FileLocationInitParameters) DeepCopy() *CopyStepDetailsDestinationFileLocationS3FileLocationInitParameters { + if in == nil { + return nil + } + out := new(CopyStepDetailsDestinationFileLocationS3FileLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyStepDetailsDestinationFileLocationS3FileLocationObservation) DeepCopyInto(out *CopyStepDetailsDestinationFileLocationS3FileLocationObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyStepDetailsDestinationFileLocationS3FileLocationObservation. +func (in *CopyStepDetailsDestinationFileLocationS3FileLocationObservation) DeepCopy() *CopyStepDetailsDestinationFileLocationS3FileLocationObservation { + if in == nil { + return nil + } + out := new(CopyStepDetailsDestinationFileLocationS3FileLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyStepDetailsDestinationFileLocationS3FileLocationParameters) DeepCopyInto(out *CopyStepDetailsDestinationFileLocationS3FileLocationParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyStepDetailsDestinationFileLocationS3FileLocationParameters. +func (in *CopyStepDetailsDestinationFileLocationS3FileLocationParameters) DeepCopy() *CopyStepDetailsDestinationFileLocationS3FileLocationParameters { + if in == nil { + return nil + } + out := new(CopyStepDetailsDestinationFileLocationS3FileLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyStepDetailsInitParameters) DeepCopyInto(out *CopyStepDetailsInitParameters) { + *out = *in + if in.DestinationFileLocation != nil { + in, out := &in.DestinationFileLocation, &out.DestinationFileLocation + *out = new(DestinationFileLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverwriteExisting != nil { + in, out := &in.OverwriteExisting, &out.OverwriteExisting + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyStepDetailsInitParameters. +func (in *CopyStepDetailsInitParameters) DeepCopy() *CopyStepDetailsInitParameters { + if in == nil { + return nil + } + out := new(CopyStepDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyStepDetailsObservation) DeepCopyInto(out *CopyStepDetailsObservation) { + *out = *in + if in.DestinationFileLocation != nil { + in, out := &in.DestinationFileLocation, &out.DestinationFileLocation + *out = new(DestinationFileLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverwriteExisting != nil { + in, out := &in.OverwriteExisting, &out.OverwriteExisting + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyStepDetailsObservation. +func (in *CopyStepDetailsObservation) DeepCopy() *CopyStepDetailsObservation { + if in == nil { + return nil + } + out := new(CopyStepDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyStepDetailsParameters) DeepCopyInto(out *CopyStepDetailsParameters) { + *out = *in + if in.DestinationFileLocation != nil { + in, out := &in.DestinationFileLocation, &out.DestinationFileLocation + *out = new(DestinationFileLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverwriteExisting != nil { + in, out := &in.OverwriteExisting, &out.OverwriteExisting + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyStepDetailsParameters. +func (in *CopyStepDetailsParameters) DeepCopy() *CopyStepDetailsParameters { + if in == nil { + return nil + } + out := new(CopyStepDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomStepDetailsInitParameters) DeepCopyInto(out *CustomStepDetailsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomStepDetailsInitParameters. +func (in *CustomStepDetailsInitParameters) DeepCopy() *CustomStepDetailsInitParameters { + if in == nil { + return nil + } + out := new(CustomStepDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomStepDetailsObservation) DeepCopyInto(out *CustomStepDetailsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomStepDetailsObservation. +func (in *CustomStepDetailsObservation) DeepCopy() *CustomStepDetailsObservation { + if in == nil { + return nil + } + out := new(CustomStepDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomStepDetailsParameters) DeepCopyInto(out *CustomStepDetailsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomStepDetailsParameters. +func (in *CustomStepDetailsParameters) DeepCopy() *CustomStepDetailsParameters { + if in == nil { + return nil + } + out := new(CustomStepDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DecryptStepDetailsDestinationFileLocationEFSFileLocationInitParameters) DeepCopyInto(out *DecryptStepDetailsDestinationFileLocationEFSFileLocationInitParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DecryptStepDetailsDestinationFileLocationEFSFileLocationInitParameters. +func (in *DecryptStepDetailsDestinationFileLocationEFSFileLocationInitParameters) DeepCopy() *DecryptStepDetailsDestinationFileLocationEFSFileLocationInitParameters { + if in == nil { + return nil + } + out := new(DecryptStepDetailsDestinationFileLocationEFSFileLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DecryptStepDetailsDestinationFileLocationEFSFileLocationObservation) DeepCopyInto(out *DecryptStepDetailsDestinationFileLocationEFSFileLocationObservation) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DecryptStepDetailsDestinationFileLocationEFSFileLocationObservation. +func (in *DecryptStepDetailsDestinationFileLocationEFSFileLocationObservation) DeepCopy() *DecryptStepDetailsDestinationFileLocationEFSFileLocationObservation { + if in == nil { + return nil + } + out := new(DecryptStepDetailsDestinationFileLocationEFSFileLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DecryptStepDetailsDestinationFileLocationEFSFileLocationParameters) DeepCopyInto(out *DecryptStepDetailsDestinationFileLocationEFSFileLocationParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DecryptStepDetailsDestinationFileLocationEFSFileLocationParameters. +func (in *DecryptStepDetailsDestinationFileLocationEFSFileLocationParameters) DeepCopy() *DecryptStepDetailsDestinationFileLocationEFSFileLocationParameters { + if in == nil { + return nil + } + out := new(DecryptStepDetailsDestinationFileLocationEFSFileLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DecryptStepDetailsDestinationFileLocationInitParameters) DeepCopyInto(out *DecryptStepDetailsDestinationFileLocationInitParameters) { + *out = *in + if in.EFSFileLocation != nil { + in, out := &in.EFSFileLocation, &out.EFSFileLocation + *out = new(DestinationFileLocationEFSFileLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3FileLocation != nil { + in, out := &in.S3FileLocation, &out.S3FileLocation + *out = new(DestinationFileLocationS3FileLocationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DecryptStepDetailsDestinationFileLocationInitParameters. +func (in *DecryptStepDetailsDestinationFileLocationInitParameters) DeepCopy() *DecryptStepDetailsDestinationFileLocationInitParameters { + if in == nil { + return nil + } + out := new(DecryptStepDetailsDestinationFileLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DecryptStepDetailsDestinationFileLocationObservation) DeepCopyInto(out *DecryptStepDetailsDestinationFileLocationObservation) { + *out = *in + if in.EFSFileLocation != nil { + in, out := &in.EFSFileLocation, &out.EFSFileLocation + *out = new(DestinationFileLocationEFSFileLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.S3FileLocation != nil { + in, out := &in.S3FileLocation, &out.S3FileLocation + *out = new(DestinationFileLocationS3FileLocationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DecryptStepDetailsDestinationFileLocationObservation. +func (in *DecryptStepDetailsDestinationFileLocationObservation) DeepCopy() *DecryptStepDetailsDestinationFileLocationObservation { + if in == nil { + return nil + } + out := new(DecryptStepDetailsDestinationFileLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DecryptStepDetailsDestinationFileLocationParameters) DeepCopyInto(out *DecryptStepDetailsDestinationFileLocationParameters) { + *out = *in + if in.EFSFileLocation != nil { + in, out := &in.EFSFileLocation, &out.EFSFileLocation + *out = new(DestinationFileLocationEFSFileLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.S3FileLocation != nil { + in, out := &in.S3FileLocation, &out.S3FileLocation + *out = new(DestinationFileLocationS3FileLocationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DecryptStepDetailsDestinationFileLocationParameters. +func (in *DecryptStepDetailsDestinationFileLocationParameters) DeepCopy() *DecryptStepDetailsDestinationFileLocationParameters { + if in == nil { + return nil + } + out := new(DecryptStepDetailsDestinationFileLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DecryptStepDetailsDestinationFileLocationS3FileLocationInitParameters) DeepCopyInto(out *DecryptStepDetailsDestinationFileLocationS3FileLocationInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DecryptStepDetailsDestinationFileLocationS3FileLocationInitParameters. +func (in *DecryptStepDetailsDestinationFileLocationS3FileLocationInitParameters) DeepCopy() *DecryptStepDetailsDestinationFileLocationS3FileLocationInitParameters { + if in == nil { + return nil + } + out := new(DecryptStepDetailsDestinationFileLocationS3FileLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DecryptStepDetailsDestinationFileLocationS3FileLocationObservation) DeepCopyInto(out *DecryptStepDetailsDestinationFileLocationS3FileLocationObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DecryptStepDetailsDestinationFileLocationS3FileLocationObservation. +func (in *DecryptStepDetailsDestinationFileLocationS3FileLocationObservation) DeepCopy() *DecryptStepDetailsDestinationFileLocationS3FileLocationObservation { + if in == nil { + return nil + } + out := new(DecryptStepDetailsDestinationFileLocationS3FileLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DecryptStepDetailsDestinationFileLocationS3FileLocationParameters) DeepCopyInto(out *DecryptStepDetailsDestinationFileLocationS3FileLocationParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DecryptStepDetailsDestinationFileLocationS3FileLocationParameters. +func (in *DecryptStepDetailsDestinationFileLocationS3FileLocationParameters) DeepCopy() *DecryptStepDetailsDestinationFileLocationS3FileLocationParameters { + if in == nil { + return nil + } + out := new(DecryptStepDetailsDestinationFileLocationS3FileLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DecryptStepDetailsInitParameters) DeepCopyInto(out *DecryptStepDetailsInitParameters) { + *out = *in + if in.DestinationFileLocation != nil { + in, out := &in.DestinationFileLocation, &out.DestinationFileLocation + *out = new(DecryptStepDetailsDestinationFileLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverwriteExisting != nil { + in, out := &in.OverwriteExisting, &out.OverwriteExisting + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DecryptStepDetailsInitParameters. +func (in *DecryptStepDetailsInitParameters) DeepCopy() *DecryptStepDetailsInitParameters { + if in == nil { + return nil + } + out := new(DecryptStepDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DecryptStepDetailsObservation) DeepCopyInto(out *DecryptStepDetailsObservation) { + *out = *in + if in.DestinationFileLocation != nil { + in, out := &in.DestinationFileLocation, &out.DestinationFileLocation + *out = new(DecryptStepDetailsDestinationFileLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverwriteExisting != nil { + in, out := &in.OverwriteExisting, &out.OverwriteExisting + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DecryptStepDetailsObservation. +func (in *DecryptStepDetailsObservation) DeepCopy() *DecryptStepDetailsObservation { + if in == nil { + return nil + } + out := new(DecryptStepDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DecryptStepDetailsParameters) DeepCopyInto(out *DecryptStepDetailsParameters) { + *out = *in + if in.DestinationFileLocation != nil { + in, out := &in.DestinationFileLocation, &out.DestinationFileLocation + *out = new(DecryptStepDetailsDestinationFileLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverwriteExisting != nil { + in, out := &in.OverwriteExisting, &out.OverwriteExisting + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DecryptStepDetailsParameters. +func (in *DecryptStepDetailsParameters) DeepCopy() *DecryptStepDetailsParameters { + if in == nil { + return nil + } + out := new(DecryptStepDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteStepDetailsInitParameters) DeepCopyInto(out *DeleteStepDetailsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteStepDetailsInitParameters. +func (in *DeleteStepDetailsInitParameters) DeepCopy() *DeleteStepDetailsInitParameters { + if in == nil { + return nil + } + out := new(DeleteStepDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteStepDetailsObservation) DeepCopyInto(out *DeleteStepDetailsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteStepDetailsObservation. +func (in *DeleteStepDetailsObservation) DeepCopy() *DeleteStepDetailsObservation { + if in == nil { + return nil + } + out := new(DeleteStepDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteStepDetailsParameters) DeepCopyInto(out *DeleteStepDetailsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteStepDetailsParameters. +func (in *DeleteStepDetailsParameters) DeepCopy() *DeleteStepDetailsParameters { + if in == nil { + return nil + } + out := new(DeleteStepDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationFileLocationEFSFileLocationInitParameters) DeepCopyInto(out *DestinationFileLocationEFSFileLocationInitParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationFileLocationEFSFileLocationInitParameters. +func (in *DestinationFileLocationEFSFileLocationInitParameters) DeepCopy() *DestinationFileLocationEFSFileLocationInitParameters { + if in == nil { + return nil + } + out := new(DestinationFileLocationEFSFileLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationFileLocationEFSFileLocationObservation) DeepCopyInto(out *DestinationFileLocationEFSFileLocationObservation) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationFileLocationEFSFileLocationObservation. +func (in *DestinationFileLocationEFSFileLocationObservation) DeepCopy() *DestinationFileLocationEFSFileLocationObservation { + if in == nil { + return nil + } + out := new(DestinationFileLocationEFSFileLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationFileLocationEFSFileLocationParameters) DeepCopyInto(out *DestinationFileLocationEFSFileLocationParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationFileLocationEFSFileLocationParameters. +func (in *DestinationFileLocationEFSFileLocationParameters) DeepCopy() *DestinationFileLocationEFSFileLocationParameters { + if in == nil { + return nil + } + out := new(DestinationFileLocationEFSFileLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationFileLocationInitParameters) DeepCopyInto(out *DestinationFileLocationInitParameters) { + *out = *in + if in.EFSFileLocation != nil { + in, out := &in.EFSFileLocation, &out.EFSFileLocation + *out = new(EFSFileLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3FileLocation != nil { + in, out := &in.S3FileLocation, &out.S3FileLocation + *out = new(S3FileLocationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationFileLocationInitParameters. +func (in *DestinationFileLocationInitParameters) DeepCopy() *DestinationFileLocationInitParameters { + if in == nil { + return nil + } + out := new(DestinationFileLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationFileLocationObservation) DeepCopyInto(out *DestinationFileLocationObservation) { + *out = *in + if in.EFSFileLocation != nil { + in, out := &in.EFSFileLocation, &out.EFSFileLocation + *out = new(EFSFileLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.S3FileLocation != nil { + in, out := &in.S3FileLocation, &out.S3FileLocation + *out = new(S3FileLocationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationFileLocationObservation. +func (in *DestinationFileLocationObservation) DeepCopy() *DestinationFileLocationObservation { + if in == nil { + return nil + } + out := new(DestinationFileLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationFileLocationParameters) DeepCopyInto(out *DestinationFileLocationParameters) { + *out = *in + if in.EFSFileLocation != nil { + in, out := &in.EFSFileLocation, &out.EFSFileLocation + *out = new(EFSFileLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.S3FileLocation != nil { + in, out := &in.S3FileLocation, &out.S3FileLocation + *out = new(S3FileLocationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationFileLocationParameters. +func (in *DestinationFileLocationParameters) DeepCopy() *DestinationFileLocationParameters { + if in == nil { + return nil + } + out := new(DestinationFileLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationFileLocationS3FileLocationInitParameters) DeepCopyInto(out *DestinationFileLocationS3FileLocationInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationFileLocationS3FileLocationInitParameters. +func (in *DestinationFileLocationS3FileLocationInitParameters) DeepCopy() *DestinationFileLocationS3FileLocationInitParameters { + if in == nil { + return nil + } + out := new(DestinationFileLocationS3FileLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationFileLocationS3FileLocationObservation) DeepCopyInto(out *DestinationFileLocationS3FileLocationObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationFileLocationS3FileLocationObservation. +func (in *DestinationFileLocationS3FileLocationObservation) DeepCopy() *DestinationFileLocationS3FileLocationObservation { + if in == nil { + return nil + } + out := new(DestinationFileLocationS3FileLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationFileLocationS3FileLocationParameters) DeepCopyInto(out *DestinationFileLocationS3FileLocationParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationFileLocationS3FileLocationParameters. +func (in *DestinationFileLocationS3FileLocationParameters) DeepCopy() *DestinationFileLocationS3FileLocationParameters { + if in == nil { + return nil + } + out := new(DestinationFileLocationS3FileLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EFSFileLocationInitParameters) DeepCopyInto(out *EFSFileLocationInitParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EFSFileLocationInitParameters. +func (in *EFSFileLocationInitParameters) DeepCopy() *EFSFileLocationInitParameters { + if in == nil { + return nil + } + out := new(EFSFileLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EFSFileLocationObservation) DeepCopyInto(out *EFSFileLocationObservation) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EFSFileLocationObservation. +func (in *EFSFileLocationObservation) DeepCopy() *EFSFileLocationObservation { + if in == nil { + return nil + } + out := new(EFSFileLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EFSFileLocationParameters) DeepCopyInto(out *EFSFileLocationParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EFSFileLocationParameters. +func (in *EFSFileLocationParameters) DeepCopy() *EFSFileLocationParameters { + if in == nil { + return nil + } + out := new(EFSFileLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointDetailsInitParameters) DeepCopyInto(out *EndpointDetailsInitParameters) { + *out = *in + if in.AddressAllocationIds != nil { + in, out := &in.AddressAllocationIds, &out.AddressAllocationIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCEndpointID != nil { + in, out := &in.VPCEndpointID, &out.VPCEndpointID + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointDetailsInitParameters. +func (in *EndpointDetailsInitParameters) DeepCopy() *EndpointDetailsInitParameters { + if in == nil { + return nil + } + out := new(EndpointDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointDetailsObservation) DeepCopyInto(out *EndpointDetailsObservation) { + *out = *in + if in.AddressAllocationIds != nil { + in, out := &in.AddressAllocationIds, &out.AddressAllocationIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCEndpointID != nil { + in, out := &in.VPCEndpointID, &out.VPCEndpointID + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointDetailsObservation. +func (in *EndpointDetailsObservation) DeepCopy() *EndpointDetailsObservation { + if in == nil { + return nil + } + out := new(EndpointDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointDetailsParameters) DeepCopyInto(out *EndpointDetailsParameters) { + *out = *in + if in.AddressAllocationIds != nil { + in, out := &in.AddressAllocationIds, &out.AddressAllocationIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPCEndpointID != nil { + in, out := &in.VPCEndpointID, &out.VPCEndpointID + *out = new(string) + **out = **in + } + if in.VPCID != nil { + in, out := &in.VPCID, &out.VPCID + *out = new(string) + **out = **in + } + if in.VPCIDRef != nil { + in, out := &in.VPCIDRef, &out.VPCIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPCIDSelector != nil { + in, out := &in.VPCIDSelector, &out.VPCIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointDetailsParameters. +func (in *EndpointDetailsParameters) DeepCopy() *EndpointDetailsParameters { + if in == nil { + return nil + } + out := new(EndpointDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HomeDirectoryMappingsInitParameters) DeepCopyInto(out *HomeDirectoryMappingsInitParameters) { + *out = *in + if in.Entry != nil { + in, out := &in.Entry, &out.Entry + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HomeDirectoryMappingsInitParameters. +func (in *HomeDirectoryMappingsInitParameters) DeepCopy() *HomeDirectoryMappingsInitParameters { + if in == nil { + return nil + } + out := new(HomeDirectoryMappingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HomeDirectoryMappingsObservation) DeepCopyInto(out *HomeDirectoryMappingsObservation) { + *out = *in + if in.Entry != nil { + in, out := &in.Entry, &out.Entry + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HomeDirectoryMappingsObservation. +func (in *HomeDirectoryMappingsObservation) DeepCopy() *HomeDirectoryMappingsObservation { + if in == nil { + return nil + } + out := new(HomeDirectoryMappingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HomeDirectoryMappingsParameters) DeepCopyInto(out *HomeDirectoryMappingsParameters) { + *out = *in + if in.Entry != nil { + in, out := &in.Entry, &out.Entry + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HomeDirectoryMappingsParameters. +func (in *HomeDirectoryMappingsParameters) DeepCopy() *HomeDirectoryMappingsParameters { + if in == nil { + return nil + } + out := new(HomeDirectoryMappingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnExceptionStepsInitParameters) DeepCopyInto(out *OnExceptionStepsInitParameters) { + *out = *in + if in.CopyStepDetails != nil { + in, out := &in.CopyStepDetails, &out.CopyStepDetails + *out = new(CopyStepDetailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomStepDetails != nil { + in, out := &in.CustomStepDetails, &out.CustomStepDetails + *out = new(CustomStepDetailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DecryptStepDetails != nil { + in, out := &in.DecryptStepDetails, &out.DecryptStepDetails + *out = new(DecryptStepDetailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DeleteStepDetails != nil { + in, out := &in.DeleteStepDetails, &out.DeleteStepDetails + *out = new(DeleteStepDetailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TagStepDetails != nil { + in, out := &in.TagStepDetails, &out.TagStepDetails + *out = new(TagStepDetailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnExceptionStepsInitParameters. +func (in *OnExceptionStepsInitParameters) DeepCopy() *OnExceptionStepsInitParameters { + if in == nil { + return nil + } + out := new(OnExceptionStepsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnExceptionStepsObservation) DeepCopyInto(out *OnExceptionStepsObservation) { + *out = *in + if in.CopyStepDetails != nil { + in, out := &in.CopyStepDetails, &out.CopyStepDetails + *out = new(CopyStepDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomStepDetails != nil { + in, out := &in.CustomStepDetails, &out.CustomStepDetails + *out = new(CustomStepDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.DecryptStepDetails != nil { + in, out := &in.DecryptStepDetails, &out.DecryptStepDetails + *out = new(DecryptStepDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.DeleteStepDetails != nil { + in, out := &in.DeleteStepDetails, &out.DeleteStepDetails + *out = new(DeleteStepDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.TagStepDetails != nil { + in, out := &in.TagStepDetails, &out.TagStepDetails + *out = new(TagStepDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnExceptionStepsObservation. +func (in *OnExceptionStepsObservation) DeepCopy() *OnExceptionStepsObservation { + if in == nil { + return nil + } + out := new(OnExceptionStepsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnExceptionStepsParameters) DeepCopyInto(out *OnExceptionStepsParameters) { + *out = *in + if in.CopyStepDetails != nil { + in, out := &in.CopyStepDetails, &out.CopyStepDetails + *out = new(CopyStepDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomStepDetails != nil { + in, out := &in.CustomStepDetails, &out.CustomStepDetails + *out = new(CustomStepDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.DecryptStepDetails != nil { + in, out := &in.DecryptStepDetails, &out.DecryptStepDetails + *out = new(DecryptStepDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.DeleteStepDetails != nil { + in, out := &in.DeleteStepDetails, &out.DeleteStepDetails + *out = new(DeleteStepDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.TagStepDetails != nil { + in, out := &in.TagStepDetails, &out.TagStepDetails + *out = new(TagStepDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnExceptionStepsParameters. +func (in *OnExceptionStepsParameters) DeepCopy() *OnExceptionStepsParameters { + if in == nil { + return nil + } + out := new(OnExceptionStepsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPartialUploadInitParameters) DeepCopyInto(out *OnPartialUploadInitParameters) { + *out = *in + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.WorkflowID != nil { + in, out := &in.WorkflowID, &out.WorkflowID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPartialUploadInitParameters. +func (in *OnPartialUploadInitParameters) DeepCopy() *OnPartialUploadInitParameters { + if in == nil { + return nil + } + out := new(OnPartialUploadInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPartialUploadObservation) DeepCopyInto(out *OnPartialUploadObservation) { + *out = *in + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.WorkflowID != nil { + in, out := &in.WorkflowID, &out.WorkflowID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPartialUploadObservation. +func (in *OnPartialUploadObservation) DeepCopy() *OnPartialUploadObservation { + if in == nil { + return nil + } + out := new(OnPartialUploadObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPartialUploadParameters) DeepCopyInto(out *OnPartialUploadParameters) { + *out = *in + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.WorkflowID != nil { + in, out := &in.WorkflowID, &out.WorkflowID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPartialUploadParameters. +func (in *OnPartialUploadParameters) DeepCopy() *OnPartialUploadParameters { + if in == nil { + return nil + } + out := new(OnPartialUploadParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnUploadInitParameters) DeepCopyInto(out *OnUploadInitParameters) { + *out = *in + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.WorkflowID != nil { + in, out := &in.WorkflowID, &out.WorkflowID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnUploadInitParameters. +func (in *OnUploadInitParameters) DeepCopy() *OnUploadInitParameters { + if in == nil { + return nil + } + out := new(OnUploadInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnUploadObservation) DeepCopyInto(out *OnUploadObservation) { + *out = *in + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.WorkflowID != nil { + in, out := &in.WorkflowID, &out.WorkflowID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnUploadObservation. +func (in *OnUploadObservation) DeepCopy() *OnUploadObservation { + if in == nil { + return nil + } + out := new(OnUploadObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnUploadParameters) DeepCopyInto(out *OnUploadParameters) { + *out = *in + if in.ExecutionRole != nil { + in, out := &in.ExecutionRole, &out.ExecutionRole + *out = new(string) + **out = **in + } + if in.WorkflowID != nil { + in, out := &in.WorkflowID, &out.WorkflowID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnUploadParameters. +func (in *OnUploadParameters) DeepCopy() *OnUploadParameters { + if in == nil { + return nil + } + out := new(OnUploadParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PosixProfileInitParameters) DeepCopyInto(out *PosixProfileInitParameters) { + *out = *in + if in.GID != nil { + in, out := &in.GID, &out.GID + *out = new(float64) + **out = **in + } + if in.SecondaryGids != nil { + in, out := &in.SecondaryGids, &out.SecondaryGids + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PosixProfileInitParameters. +func (in *PosixProfileInitParameters) DeepCopy() *PosixProfileInitParameters { + if in == nil { + return nil + } + out := new(PosixProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PosixProfileObservation) DeepCopyInto(out *PosixProfileObservation) { + *out = *in + if in.GID != nil { + in, out := &in.GID, &out.GID + *out = new(float64) + **out = **in + } + if in.SecondaryGids != nil { + in, out := &in.SecondaryGids, &out.SecondaryGids + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PosixProfileObservation. +func (in *PosixProfileObservation) DeepCopy() *PosixProfileObservation { + if in == nil { + return nil + } + out := new(PosixProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PosixProfileParameters) DeepCopyInto(out *PosixProfileParameters) { + *out = *in + if in.GID != nil { + in, out := &in.GID, &out.GID + *out = new(float64) + **out = **in + } + if in.SecondaryGids != nil { + in, out := &in.SecondaryGids, &out.SecondaryGids + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PosixProfileParameters. +func (in *PosixProfileParameters) DeepCopy() *PosixProfileParameters { + if in == nil { + return nil + } + out := new(PosixProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtocolDetailsInitParameters) DeepCopyInto(out *ProtocolDetailsInitParameters) { + *out = *in + if in.As2Transports != nil { + in, out := &in.As2Transports, &out.As2Transports + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PassiveIP != nil { + in, out := &in.PassiveIP, &out.PassiveIP + *out = new(string) + **out = **in + } + if in.SetStatOption != nil { + in, out := &in.SetStatOption, &out.SetStatOption + *out = new(string) + **out = **in + } + if in.TLSSessionResumptionMode != nil { + in, out := &in.TLSSessionResumptionMode, &out.TLSSessionResumptionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtocolDetailsInitParameters. +func (in *ProtocolDetailsInitParameters) DeepCopy() *ProtocolDetailsInitParameters { + if in == nil { + return nil + } + out := new(ProtocolDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtocolDetailsObservation) DeepCopyInto(out *ProtocolDetailsObservation) { + *out = *in + if in.As2Transports != nil { + in, out := &in.As2Transports, &out.As2Transports + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PassiveIP != nil { + in, out := &in.PassiveIP, &out.PassiveIP + *out = new(string) + **out = **in + } + if in.SetStatOption != nil { + in, out := &in.SetStatOption, &out.SetStatOption + *out = new(string) + **out = **in + } + if in.TLSSessionResumptionMode != nil { + in, out := &in.TLSSessionResumptionMode, &out.TLSSessionResumptionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtocolDetailsObservation. +func (in *ProtocolDetailsObservation) DeepCopy() *ProtocolDetailsObservation { + if in == nil { + return nil + } + out := new(ProtocolDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtocolDetailsParameters) DeepCopyInto(out *ProtocolDetailsParameters) { + *out = *in + if in.As2Transports != nil { + in, out := &in.As2Transports, &out.As2Transports + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PassiveIP != nil { + in, out := &in.PassiveIP, &out.PassiveIP + *out = new(string) + **out = **in + } + if in.SetStatOption != nil { + in, out := &in.SetStatOption, &out.SetStatOption + *out = new(string) + **out = **in + } + if in.TLSSessionResumptionMode != nil { + in, out := &in.TLSSessionResumptionMode, &out.TLSSessionResumptionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtocolDetailsParameters. +func (in *ProtocolDetailsParameters) DeepCopy() *ProtocolDetailsParameters { + if in == nil { + return nil + } + out := new(ProtocolDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3FileLocationInitParameters) DeepCopyInto(out *S3FileLocationInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3FileLocationInitParameters. +func (in *S3FileLocationInitParameters) DeepCopy() *S3FileLocationInitParameters { + if in == nil { + return nil + } + out := new(S3FileLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3FileLocationObservation) DeepCopyInto(out *S3FileLocationObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3FileLocationObservation. +func (in *S3FileLocationObservation) DeepCopy() *S3FileLocationObservation { + if in == nil { + return nil + } + out := new(S3FileLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3FileLocationParameters) DeepCopyInto(out *S3FileLocationParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3FileLocationParameters. +func (in *S3FileLocationParameters) DeepCopy() *S3FileLocationParameters { + if in == nil { + return nil + } + out := new(S3FileLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3StorageOptionsInitParameters) DeepCopyInto(out *S3StorageOptionsInitParameters) { + *out = *in + if in.DirectoryListingOptimization != nil { + in, out := &in.DirectoryListingOptimization, &out.DirectoryListingOptimization + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3StorageOptionsInitParameters. +func (in *S3StorageOptionsInitParameters) DeepCopy() *S3StorageOptionsInitParameters { + if in == nil { + return nil + } + out := new(S3StorageOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3StorageOptionsObservation) DeepCopyInto(out *S3StorageOptionsObservation) { + *out = *in + if in.DirectoryListingOptimization != nil { + in, out := &in.DirectoryListingOptimization, &out.DirectoryListingOptimization + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3StorageOptionsObservation. +func (in *S3StorageOptionsObservation) DeepCopy() *S3StorageOptionsObservation { + if in == nil { + return nil + } + out := new(S3StorageOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3StorageOptionsParameters) DeepCopyInto(out *S3StorageOptionsParameters) { + *out = *in + if in.DirectoryListingOptimization != nil { + in, out := &in.DirectoryListingOptimization, &out.DirectoryListingOptimization + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3StorageOptionsParameters. +func (in *S3StorageOptionsParameters) DeepCopy() *S3StorageOptionsParameters { + if in == nil { + return nil + } + out := new(S3StorageOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Server) DeepCopyInto(out *Server) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server. +func (in *Server) DeepCopy() *Server { + if in == nil { + return nil + } + out := new(Server) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Server) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerInitParameters) DeepCopyInto(out *ServerInitParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(string) + **out = **in + } + if in.CertificateRef != nil { + in, out := &in.CertificateRef, &out.CertificateRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CertificateSelector != nil { + in, out := &in.CertificateSelector, &out.CertificateSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DirectoryID != nil { + in, out := &in.DirectoryID, &out.DirectoryID + *out = new(string) + **out = **in + } + if in.DirectoryIDRef != nil { + in, out := &in.DirectoryIDRef, &out.DirectoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DirectoryIDSelector != nil { + in, out := &in.DirectoryIDSelector, &out.DirectoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.EndpointDetails != nil { + in, out := &in.EndpointDetails, &out.EndpointDetails + *out = new(EndpointDetailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = new(string) + **out = **in + } + if in.HostKeySecretRef != nil { + in, out := &in.HostKeySecretRef, &out.HostKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.IdentityProviderType != nil { + in, out := &in.IdentityProviderType, &out.IdentityProviderType + *out = new(string) + **out = **in + } + if in.InvocationRole != nil { + in, out := &in.InvocationRole, &out.InvocationRole + *out = new(string) + **out = **in + } + if in.LoggingRole != nil { + in, out := &in.LoggingRole, &out.LoggingRole + *out = new(string) + **out = **in + } + if in.LoggingRoleRef != nil { + in, out := &in.LoggingRoleRef, &out.LoggingRoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LoggingRoleSelector != nil { + in, out := &in.LoggingRoleSelector, &out.LoggingRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PostAuthenticationLoginBannerSecretRef != nil { + in, out := &in.PostAuthenticationLoginBannerSecretRef, &out.PostAuthenticationLoginBannerSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.PreAuthenticationLoginBannerSecretRef != nil { + in, out := &in.PreAuthenticationLoginBannerSecretRef, &out.PreAuthenticationLoginBannerSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ProtocolDetails != nil { + in, out := &in.ProtocolDetails, &out.ProtocolDetails + *out = new(ProtocolDetailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Protocols != nil { + in, out := &in.Protocols, &out.Protocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.S3StorageOptions != nil { + in, out := &in.S3StorageOptions, &out.S3StorageOptions + *out = new(S3StorageOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityPolicyName != nil { + in, out := &in.SecurityPolicyName, &out.SecurityPolicyName + *out = new(string) + **out = **in + } + if in.SftpAuthenticationMethods != nil { + in, out := &in.SftpAuthenticationMethods, &out.SftpAuthenticationMethods + *out = new(string) + **out = **in + } + if in.StructuredLogDestinations != nil { + in, out := &in.StructuredLogDestinations, &out.StructuredLogDestinations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.WorkflowDetails != nil { + in, out := &in.WorkflowDetails, &out.WorkflowDetails + *out = new(WorkflowDetailsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerInitParameters. +func (in *ServerInitParameters) DeepCopy() *ServerInitParameters { + if in == nil { + return nil + } + out := new(ServerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerList) DeepCopyInto(out *ServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Server, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerList. +func (in *ServerList) DeepCopy() *ServerList { + if in == nil { + return nil + } + out := new(ServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerObservation) DeepCopyInto(out *ServerObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(string) + **out = **in + } + if in.DirectoryID != nil { + in, out := &in.DirectoryID, &out.DirectoryID + *out = new(string) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.EndpointDetails != nil { + in, out := &in.EndpointDetails, &out.EndpointDetails + *out = new(EndpointDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = new(string) + **out = **in + } + if in.HostKeyFingerprint != nil { + in, out := &in.HostKeyFingerprint, &out.HostKeyFingerprint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IdentityProviderType != nil { + in, out := &in.IdentityProviderType, &out.IdentityProviderType + *out = new(string) + **out = **in + } + if in.InvocationRole != nil { + in, out := &in.InvocationRole, &out.InvocationRole + *out = new(string) + **out = **in + } + if in.LoggingRole != nil { + in, out := &in.LoggingRole, &out.LoggingRole + *out = new(string) + **out = **in + } + if in.ProtocolDetails != nil { + in, out := &in.ProtocolDetails, &out.ProtocolDetails + *out = new(ProtocolDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.Protocols != nil { + in, out := &in.Protocols, &out.Protocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.S3StorageOptions != nil { + in, out := &in.S3StorageOptions, &out.S3StorageOptions + *out = new(S3StorageOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.SecurityPolicyName != nil { + in, out := &in.SecurityPolicyName, &out.SecurityPolicyName + *out = new(string) + **out = **in + } + if in.SftpAuthenticationMethods != nil { + in, out := &in.SftpAuthenticationMethods, &out.SftpAuthenticationMethods + *out = new(string) + **out = **in + } + if in.StructuredLogDestinations != nil { + in, out := &in.StructuredLogDestinations, &out.StructuredLogDestinations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.WorkflowDetails != nil { + in, out := &in.WorkflowDetails, &out.WorkflowDetails + *out = new(WorkflowDetailsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerObservation. +func (in *ServerObservation) DeepCopy() *ServerObservation { + if in == nil { + return nil + } + out := new(ServerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerParameters) DeepCopyInto(out *ServerParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(string) + **out = **in + } + if in.CertificateRef != nil { + in, out := &in.CertificateRef, &out.CertificateRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CertificateSelector != nil { + in, out := &in.CertificateSelector, &out.CertificateSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DirectoryID != nil { + in, out := &in.DirectoryID, &out.DirectoryID + *out = new(string) + **out = **in + } + if in.DirectoryIDRef != nil { + in, out := &in.DirectoryIDRef, &out.DirectoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DirectoryIDSelector != nil { + in, out := &in.DirectoryIDSelector, &out.DirectoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.EndpointDetails != nil { + in, out := &in.EndpointDetails, &out.EndpointDetails + *out = new(EndpointDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.ForceDestroy != nil { + in, out := &in.ForceDestroy, &out.ForceDestroy + *out = new(bool) + **out = **in + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = new(string) + **out = **in + } + if in.HostKeySecretRef != nil { + in, out := &in.HostKeySecretRef, &out.HostKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.IdentityProviderType != nil { + in, out := &in.IdentityProviderType, &out.IdentityProviderType + *out = new(string) + **out = **in + } + if in.InvocationRole != nil { + in, out := &in.InvocationRole, &out.InvocationRole + *out = new(string) + **out = **in + } + if in.LoggingRole != nil { + in, out := &in.LoggingRole, &out.LoggingRole + *out = new(string) + **out = **in + } + if in.LoggingRoleRef != nil { + in, out := &in.LoggingRoleRef, &out.LoggingRoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LoggingRoleSelector != nil { + in, out := &in.LoggingRoleSelector, &out.LoggingRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PostAuthenticationLoginBannerSecretRef != nil { + in, out := &in.PostAuthenticationLoginBannerSecretRef, &out.PostAuthenticationLoginBannerSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.PreAuthenticationLoginBannerSecretRef != nil { + in, out := &in.PreAuthenticationLoginBannerSecretRef, &out.PreAuthenticationLoginBannerSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ProtocolDetails != nil { + in, out := &in.ProtocolDetails, &out.ProtocolDetails + *out = new(ProtocolDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.Protocols != nil { + in, out := &in.Protocols, &out.Protocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.S3StorageOptions != nil { + in, out := &in.S3StorageOptions, &out.S3StorageOptions + *out = new(S3StorageOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityPolicyName != nil { + in, out := &in.SecurityPolicyName, &out.SecurityPolicyName + *out = new(string) + **out = **in + } + if in.SftpAuthenticationMethods != nil { + in, out := &in.SftpAuthenticationMethods, &out.SftpAuthenticationMethods + *out = new(string) + **out = **in + } + if in.StructuredLogDestinations != nil { + in, out := &in.StructuredLogDestinations, &out.StructuredLogDestinations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.WorkflowDetails != nil { + in, out := &in.WorkflowDetails, &out.WorkflowDetails + *out = new(WorkflowDetailsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerParameters. +func (in *ServerParameters) DeepCopy() *ServerParameters { + if in == nil { + return nil + } + out := new(ServerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSpec) DeepCopyInto(out *ServerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSpec. +func (in *ServerSpec) DeepCopy() *ServerSpec { + if in == nil { + return nil + } + out := new(ServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStatus) DeepCopyInto(out *ServerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatus. +func (in *ServerStatus) DeepCopy() *ServerStatus { + if in == nil { + return nil + } + out := new(ServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsCopyStepDetailsInitParameters) DeepCopyInto(out *StepsCopyStepDetailsInitParameters) { + *out = *in + if in.DestinationFileLocation != nil { + in, out := &in.DestinationFileLocation, &out.DestinationFileLocation + *out = new(CopyStepDetailsDestinationFileLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverwriteExisting != nil { + in, out := &in.OverwriteExisting, &out.OverwriteExisting + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsCopyStepDetailsInitParameters. +func (in *StepsCopyStepDetailsInitParameters) DeepCopy() *StepsCopyStepDetailsInitParameters { + if in == nil { + return nil + } + out := new(StepsCopyStepDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsCopyStepDetailsObservation) DeepCopyInto(out *StepsCopyStepDetailsObservation) { + *out = *in + if in.DestinationFileLocation != nil { + in, out := &in.DestinationFileLocation, &out.DestinationFileLocation + *out = new(CopyStepDetailsDestinationFileLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverwriteExisting != nil { + in, out := &in.OverwriteExisting, &out.OverwriteExisting + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsCopyStepDetailsObservation. +func (in *StepsCopyStepDetailsObservation) DeepCopy() *StepsCopyStepDetailsObservation { + if in == nil { + return nil + } + out := new(StepsCopyStepDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsCopyStepDetailsParameters) DeepCopyInto(out *StepsCopyStepDetailsParameters) { + *out = *in + if in.DestinationFileLocation != nil { + in, out := &in.DestinationFileLocation, &out.DestinationFileLocation + *out = new(CopyStepDetailsDestinationFileLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverwriteExisting != nil { + in, out := &in.OverwriteExisting, &out.OverwriteExisting + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsCopyStepDetailsParameters. +func (in *StepsCopyStepDetailsParameters) DeepCopy() *StepsCopyStepDetailsParameters { + if in == nil { + return nil + } + out := new(StepsCopyStepDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsCustomStepDetailsInitParameters) DeepCopyInto(out *StepsCustomStepDetailsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetSelector != nil { + in, out := &in.TargetSelector, &out.TargetSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsCustomStepDetailsInitParameters. +func (in *StepsCustomStepDetailsInitParameters) DeepCopy() *StepsCustomStepDetailsInitParameters { + if in == nil { + return nil + } + out := new(StepsCustomStepDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsCustomStepDetailsObservation) DeepCopyInto(out *StepsCustomStepDetailsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsCustomStepDetailsObservation. +func (in *StepsCustomStepDetailsObservation) DeepCopy() *StepsCustomStepDetailsObservation { + if in == nil { + return nil + } + out := new(StepsCustomStepDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsCustomStepDetailsParameters) DeepCopyInto(out *StepsCustomStepDetailsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetSelector != nil { + in, out := &in.TargetSelector, &out.TargetSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsCustomStepDetailsParameters. +func (in *StepsCustomStepDetailsParameters) DeepCopy() *StepsCustomStepDetailsParameters { + if in == nil { + return nil + } + out := new(StepsCustomStepDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsDecryptStepDetailsDestinationFileLocationInitParameters) DeepCopyInto(out *StepsDecryptStepDetailsDestinationFileLocationInitParameters) { + *out = *in + if in.EFSFileLocation != nil { + in, out := &in.EFSFileLocation, &out.EFSFileLocation + *out = new(DecryptStepDetailsDestinationFileLocationEFSFileLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.S3FileLocation != nil { + in, out := &in.S3FileLocation, &out.S3FileLocation + *out = new(DecryptStepDetailsDestinationFileLocationS3FileLocationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsDecryptStepDetailsDestinationFileLocationInitParameters. +func (in *StepsDecryptStepDetailsDestinationFileLocationInitParameters) DeepCopy() *StepsDecryptStepDetailsDestinationFileLocationInitParameters { + if in == nil { + return nil + } + out := new(StepsDecryptStepDetailsDestinationFileLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsDecryptStepDetailsDestinationFileLocationObservation) DeepCopyInto(out *StepsDecryptStepDetailsDestinationFileLocationObservation) { + *out = *in + if in.EFSFileLocation != nil { + in, out := &in.EFSFileLocation, &out.EFSFileLocation + *out = new(DecryptStepDetailsDestinationFileLocationEFSFileLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.S3FileLocation != nil { + in, out := &in.S3FileLocation, &out.S3FileLocation + *out = new(DecryptStepDetailsDestinationFileLocationS3FileLocationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsDecryptStepDetailsDestinationFileLocationObservation. +func (in *StepsDecryptStepDetailsDestinationFileLocationObservation) DeepCopy() *StepsDecryptStepDetailsDestinationFileLocationObservation { + if in == nil { + return nil + } + out := new(StepsDecryptStepDetailsDestinationFileLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsDecryptStepDetailsDestinationFileLocationParameters) DeepCopyInto(out *StepsDecryptStepDetailsDestinationFileLocationParameters) { + *out = *in + if in.EFSFileLocation != nil { + in, out := &in.EFSFileLocation, &out.EFSFileLocation + *out = new(DecryptStepDetailsDestinationFileLocationEFSFileLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.S3FileLocation != nil { + in, out := &in.S3FileLocation, &out.S3FileLocation + *out = new(DecryptStepDetailsDestinationFileLocationS3FileLocationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsDecryptStepDetailsDestinationFileLocationParameters. +func (in *StepsDecryptStepDetailsDestinationFileLocationParameters) DeepCopy() *StepsDecryptStepDetailsDestinationFileLocationParameters { + if in == nil { + return nil + } + out := new(StepsDecryptStepDetailsDestinationFileLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsDecryptStepDetailsInitParameters) DeepCopyInto(out *StepsDecryptStepDetailsInitParameters) { + *out = *in + if in.DestinationFileLocation != nil { + in, out := &in.DestinationFileLocation, &out.DestinationFileLocation + *out = new(StepsDecryptStepDetailsDestinationFileLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverwriteExisting != nil { + in, out := &in.OverwriteExisting, &out.OverwriteExisting + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsDecryptStepDetailsInitParameters. +func (in *StepsDecryptStepDetailsInitParameters) DeepCopy() *StepsDecryptStepDetailsInitParameters { + if in == nil { + return nil + } + out := new(StepsDecryptStepDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsDecryptStepDetailsObservation) DeepCopyInto(out *StepsDecryptStepDetailsObservation) { + *out = *in + if in.DestinationFileLocation != nil { + in, out := &in.DestinationFileLocation, &out.DestinationFileLocation + *out = new(StepsDecryptStepDetailsDestinationFileLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverwriteExisting != nil { + in, out := &in.OverwriteExisting, &out.OverwriteExisting + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsDecryptStepDetailsObservation. +func (in *StepsDecryptStepDetailsObservation) DeepCopy() *StepsDecryptStepDetailsObservation { + if in == nil { + return nil + } + out := new(StepsDecryptStepDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsDecryptStepDetailsParameters) DeepCopyInto(out *StepsDecryptStepDetailsParameters) { + *out = *in + if in.DestinationFileLocation != nil { + in, out := &in.DestinationFileLocation, &out.DestinationFileLocation + *out = new(StepsDecryptStepDetailsDestinationFileLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OverwriteExisting != nil { + in, out := &in.OverwriteExisting, &out.OverwriteExisting + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsDecryptStepDetailsParameters. +func (in *StepsDecryptStepDetailsParameters) DeepCopy() *StepsDecryptStepDetailsParameters { + if in == nil { + return nil + } + out := new(StepsDecryptStepDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsDeleteStepDetailsInitParameters) DeepCopyInto(out *StepsDeleteStepDetailsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsDeleteStepDetailsInitParameters. +func (in *StepsDeleteStepDetailsInitParameters) DeepCopy() *StepsDeleteStepDetailsInitParameters { + if in == nil { + return nil + } + out := new(StepsDeleteStepDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsDeleteStepDetailsObservation) DeepCopyInto(out *StepsDeleteStepDetailsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsDeleteStepDetailsObservation. +func (in *StepsDeleteStepDetailsObservation) DeepCopy() *StepsDeleteStepDetailsObservation { + if in == nil { + return nil + } + out := new(StepsDeleteStepDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsDeleteStepDetailsParameters) DeepCopyInto(out *StepsDeleteStepDetailsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsDeleteStepDetailsParameters. +func (in *StepsDeleteStepDetailsParameters) DeepCopy() *StepsDeleteStepDetailsParameters { + if in == nil { + return nil + } + out := new(StepsDeleteStepDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsInitParameters) DeepCopyInto(out *StepsInitParameters) { + *out = *in + if in.CopyStepDetails != nil { + in, out := &in.CopyStepDetails, &out.CopyStepDetails + *out = new(StepsCopyStepDetailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomStepDetails != nil { + in, out := &in.CustomStepDetails, &out.CustomStepDetails + *out = new(StepsCustomStepDetailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DecryptStepDetails != nil { + in, out := &in.DecryptStepDetails, &out.DecryptStepDetails + *out = new(StepsDecryptStepDetailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DeleteStepDetails != nil { + in, out := &in.DeleteStepDetails, &out.DeleteStepDetails + *out = new(StepsDeleteStepDetailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TagStepDetails != nil { + in, out := &in.TagStepDetails, &out.TagStepDetails + *out = new(StepsTagStepDetailsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsInitParameters. +func (in *StepsInitParameters) DeepCopy() *StepsInitParameters { + if in == nil { + return nil + } + out := new(StepsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsObservation) DeepCopyInto(out *StepsObservation) { + *out = *in + if in.CopyStepDetails != nil { + in, out := &in.CopyStepDetails, &out.CopyStepDetails + *out = new(StepsCopyStepDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomStepDetails != nil { + in, out := &in.CustomStepDetails, &out.CustomStepDetails + *out = new(StepsCustomStepDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.DecryptStepDetails != nil { + in, out := &in.DecryptStepDetails, &out.DecryptStepDetails + *out = new(StepsDecryptStepDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.DeleteStepDetails != nil { + in, out := &in.DeleteStepDetails, &out.DeleteStepDetails + *out = new(StepsDeleteStepDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.TagStepDetails != nil { + in, out := &in.TagStepDetails, &out.TagStepDetails + *out = new(StepsTagStepDetailsObservation) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsObservation. +func (in *StepsObservation) DeepCopy() *StepsObservation { + if in == nil { + return nil + } + out := new(StepsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsParameters) DeepCopyInto(out *StepsParameters) { + *out = *in + if in.CopyStepDetails != nil { + in, out := &in.CopyStepDetails, &out.CopyStepDetails + *out = new(StepsCopyStepDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomStepDetails != nil { + in, out := &in.CustomStepDetails, &out.CustomStepDetails + *out = new(StepsCustomStepDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.DecryptStepDetails != nil { + in, out := &in.DecryptStepDetails, &out.DecryptStepDetails + *out = new(StepsDecryptStepDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.DeleteStepDetails != nil { + in, out := &in.DeleteStepDetails, &out.DeleteStepDetails + *out = new(StepsDeleteStepDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.TagStepDetails != nil { + in, out := &in.TagStepDetails, &out.TagStepDetails + *out = new(StepsTagStepDetailsParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsParameters. +func (in *StepsParameters) DeepCopy() *StepsParameters { + if in == nil { + return nil + } + out := new(StepsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsTagStepDetailsInitParameters) DeepCopyInto(out *StepsTagStepDetailsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]TagStepDetailsTagsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsTagStepDetailsInitParameters. +func (in *StepsTagStepDetailsInitParameters) DeepCopy() *StepsTagStepDetailsInitParameters { + if in == nil { + return nil + } + out := new(StepsTagStepDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsTagStepDetailsObservation) DeepCopyInto(out *StepsTagStepDetailsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]TagStepDetailsTagsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsTagStepDetailsObservation. +func (in *StepsTagStepDetailsObservation) DeepCopy() *StepsTagStepDetailsObservation { + if in == nil { + return nil + } + out := new(StepsTagStepDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepsTagStepDetailsParameters) DeepCopyInto(out *StepsTagStepDetailsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]TagStepDetailsTagsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepsTagStepDetailsParameters. +func (in *StepsTagStepDetailsParameters) DeepCopy() *StepsTagStepDetailsParameters { + if in == nil { + return nil + } + out := new(StepsTagStepDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagStepDetailsInitParameters) DeepCopyInto(out *TagStepDetailsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]TagsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagStepDetailsInitParameters. +func (in *TagStepDetailsInitParameters) DeepCopy() *TagStepDetailsInitParameters { + if in == nil { + return nil + } + out := new(TagStepDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagStepDetailsObservation) DeepCopyInto(out *TagStepDetailsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]TagsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagStepDetailsObservation. +func (in *TagStepDetailsObservation) DeepCopy() *TagStepDetailsObservation { + if in == nil { + return nil + } + out := new(TagStepDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagStepDetailsParameters) DeepCopyInto(out *TagStepDetailsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceFileLocation != nil { + in, out := &in.SourceFileLocation, &out.SourceFileLocation + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]TagsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagStepDetailsParameters. +func (in *TagStepDetailsParameters) DeepCopy() *TagStepDetailsParameters { + if in == nil { + return nil + } + out := new(TagStepDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagStepDetailsTagsInitParameters) DeepCopyInto(out *TagStepDetailsTagsInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagStepDetailsTagsInitParameters. +func (in *TagStepDetailsTagsInitParameters) DeepCopy() *TagStepDetailsTagsInitParameters { + if in == nil { + return nil + } + out := new(TagStepDetailsTagsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagStepDetailsTagsObservation) DeepCopyInto(out *TagStepDetailsTagsObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagStepDetailsTagsObservation. +func (in *TagStepDetailsTagsObservation) DeepCopy() *TagStepDetailsTagsObservation { + if in == nil { + return nil + } + out := new(TagStepDetailsTagsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagStepDetailsTagsParameters) DeepCopyInto(out *TagStepDetailsTagsParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagStepDetailsTagsParameters. +func (in *TagStepDetailsTagsParameters) DeepCopy() *TagStepDetailsTagsParameters { + if in == nil { + return nil + } + out := new(TagStepDetailsTagsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagsInitParameters) DeepCopyInto(out *TagsInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagsInitParameters. +func (in *TagsInitParameters) DeepCopy() *TagsInitParameters { + if in == nil { + return nil + } + out := new(TagsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagsObservation) DeepCopyInto(out *TagsObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagsObservation. +func (in *TagsObservation) DeepCopy() *TagsObservation { + if in == nil { + return nil + } + out := new(TagsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagsParameters) DeepCopyInto(out *TagsParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagsParameters. +func (in *TagsParameters) DeepCopy() *TagsParameters { + if in == nil { + return nil + } + out := new(TagsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *User) DeepCopyInto(out *User) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User. +func (in *User) DeepCopy() *User { + if in == nil { + return nil + } + out := new(User) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *User) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserInitParameters) DeepCopyInto(out *UserInitParameters) { + *out = *in + if in.HomeDirectory != nil { + in, out := &in.HomeDirectory, &out.HomeDirectory + *out = new(string) + **out = **in + } + if in.HomeDirectoryMappings != nil { + in, out := &in.HomeDirectoryMappings, &out.HomeDirectoryMappings + *out = make([]HomeDirectoryMappingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HomeDirectoryType != nil { + in, out := &in.HomeDirectoryType, &out.HomeDirectoryType + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } + if in.PosixProfile != nil { + in, out := &in.PosixProfile, &out.PosixProfile + *out = new(PosixProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.RoleRef != nil { + in, out := &in.RoleRef, &out.RoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleSelector != nil { + in, out := &in.RoleSelector, &out.RoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServerID != nil { + in, out := &in.ServerID, &out.ServerID + *out = new(string) + **out = **in + } + if in.ServerIDRef != nil { + in, out := &in.ServerIDRef, &out.ServerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServerIDSelector != nil { + in, out := &in.ServerIDSelector, &out.ServerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInitParameters. +func (in *UserInitParameters) DeepCopy() *UserInitParameters { + if in == nil { + return nil + } + out := new(UserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserList) DeepCopyInto(out *UserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]User, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserList. +func (in *UserList) DeepCopy() *UserList { + if in == nil { + return nil + } + out := new(UserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserObservation) DeepCopyInto(out *UserObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.HomeDirectory != nil { + in, out := &in.HomeDirectory, &out.HomeDirectory + *out = new(string) + **out = **in + } + if in.HomeDirectoryMappings != nil { + in, out := &in.HomeDirectoryMappings, &out.HomeDirectoryMappings + *out = make([]HomeDirectoryMappingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HomeDirectoryType != nil { + in, out := &in.HomeDirectoryType, &out.HomeDirectoryType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } + if in.PosixProfile != nil { + in, out := &in.PosixProfile, &out.PosixProfile + *out = new(PosixProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.ServerID != nil { + in, out := &in.ServerID, &out.ServerID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserObservation. +func (in *UserObservation) DeepCopy() *UserObservation { + if in == nil { + return nil + } + out := new(UserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserParameters) DeepCopyInto(out *UserParameters) { + *out = *in + if in.HomeDirectory != nil { + in, out := &in.HomeDirectory, &out.HomeDirectory + *out = new(string) + **out = **in + } + if in.HomeDirectoryMappings != nil { + in, out := &in.HomeDirectoryMappings, &out.HomeDirectoryMappings + *out = make([]HomeDirectoryMappingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HomeDirectoryType != nil { + in, out := &in.HomeDirectoryType, &out.HomeDirectoryType + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } + if in.PosixProfile != nil { + in, out := &in.PosixProfile, &out.PosixProfile + *out = new(PosixProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.RoleRef != nil { + in, out := &in.RoleRef, &out.RoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RoleSelector != nil { + in, out := &in.RoleSelector, &out.RoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServerID != nil { + in, out := &in.ServerID, &out.ServerID + *out = new(string) + **out = **in + } + if in.ServerIDRef != nil { + in, out := &in.ServerIDRef, &out.ServerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServerIDSelector != nil { + in, out := &in.ServerIDSelector, &out.ServerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserParameters. +func (in *UserParameters) DeepCopy() *UserParameters { + if in == nil { + return nil + } + out := new(UserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSpec) DeepCopyInto(out *UserSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSpec. +func (in *UserSpec) DeepCopy() *UserSpec { + if in == nil { + return nil + } + out := new(UserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserStatus) DeepCopyInto(out *UserStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserStatus. +func (in *UserStatus) DeepCopy() *UserStatus { + if in == nil { + return nil + } + out := new(UserStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workflow) DeepCopyInto(out *Workflow) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workflow. +func (in *Workflow) DeepCopy() *Workflow { + if in == nil { + return nil + } + out := new(Workflow) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Workflow) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowDetailsInitParameters) DeepCopyInto(out *WorkflowDetailsInitParameters) { + *out = *in + if in.OnPartialUpload != nil { + in, out := &in.OnPartialUpload, &out.OnPartialUpload + *out = new(OnPartialUploadInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OnUpload != nil { + in, out := &in.OnUpload, &out.OnUpload + *out = new(OnUploadInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowDetailsInitParameters. +func (in *WorkflowDetailsInitParameters) DeepCopy() *WorkflowDetailsInitParameters { + if in == nil { + return nil + } + out := new(WorkflowDetailsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowDetailsObservation) DeepCopyInto(out *WorkflowDetailsObservation) { + *out = *in + if in.OnPartialUpload != nil { + in, out := &in.OnPartialUpload, &out.OnPartialUpload + *out = new(OnPartialUploadObservation) + (*in).DeepCopyInto(*out) + } + if in.OnUpload != nil { + in, out := &in.OnUpload, &out.OnUpload + *out = new(OnUploadObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowDetailsObservation. +func (in *WorkflowDetailsObservation) DeepCopy() *WorkflowDetailsObservation { + if in == nil { + return nil + } + out := new(WorkflowDetailsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowDetailsParameters) DeepCopyInto(out *WorkflowDetailsParameters) { + *out = *in + if in.OnPartialUpload != nil { + in, out := &in.OnPartialUpload, &out.OnPartialUpload + *out = new(OnPartialUploadParameters) + (*in).DeepCopyInto(*out) + } + if in.OnUpload != nil { + in, out := &in.OnUpload, &out.OnUpload + *out = new(OnUploadParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowDetailsParameters. +func (in *WorkflowDetailsParameters) DeepCopy() *WorkflowDetailsParameters { + if in == nil { + return nil + } + out := new(WorkflowDetailsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowInitParameters) DeepCopyInto(out *WorkflowInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.OnExceptionSteps != nil { + in, out := &in.OnExceptionSteps, &out.OnExceptionSteps + *out = make([]OnExceptionStepsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]StepsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowInitParameters. +func (in *WorkflowInitParameters) DeepCopy() *WorkflowInitParameters { + if in == nil { + return nil + } + out := new(WorkflowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowList) DeepCopyInto(out *WorkflowList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Workflow, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowList. +func (in *WorkflowList) DeepCopy() *WorkflowList { + if in == nil { + return nil + } + out := new(WorkflowList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkflowList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowObservation) DeepCopyInto(out *WorkflowObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.OnExceptionSteps != nil { + in, out := &in.OnExceptionSteps, &out.OnExceptionSteps + *out = make([]OnExceptionStepsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]StepsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowObservation. +func (in *WorkflowObservation) DeepCopy() *WorkflowObservation { + if in == nil { + return nil + } + out := new(WorkflowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowParameters) DeepCopyInto(out *WorkflowParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.OnExceptionSteps != nil { + in, out := &in.OnExceptionSteps, &out.OnExceptionSteps + *out = make([]OnExceptionStepsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]StepsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowParameters. +func (in *WorkflowParameters) DeepCopy() *WorkflowParameters { + if in == nil { + return nil + } + out := new(WorkflowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowSpec) DeepCopyInto(out *WorkflowSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowSpec. +func (in *WorkflowSpec) DeepCopy() *WorkflowSpec { + if in == nil { + return nil + } + out := new(WorkflowSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowStatus) DeepCopyInto(out *WorkflowStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowStatus. +func (in *WorkflowStatus) DeepCopy() *WorkflowStatus { + if in == nil { + return nil + } + out := new(WorkflowStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/transfer/v1beta2/zz_generated.managed.go b/apis/transfer/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..5573ba3c50 --- /dev/null +++ b/apis/transfer/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Server. +func (mg *Server) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Server. +func (mg *Server) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Server. +func (mg *Server) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Server. +func (mg *Server) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Server. +func (mg *Server) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Server. +func (mg *Server) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Server. +func (mg *Server) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Server. +func (mg *Server) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Server. +func (mg *Server) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Server. +func (mg *Server) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Server. +func (mg *Server) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Server. +func (mg *Server) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this User. +func (mg *User) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this User. +func (mg *User) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this User. +func (mg *User) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this User. +func (mg *User) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this User. +func (mg *User) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this User. +func (mg *User) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this User. +func (mg *User) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this User. +func (mg *User) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this User. +func (mg *User) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this User. +func (mg *User) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this User. +func (mg *User) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this User. +func (mg *User) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Workflow. +func (mg *Workflow) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Workflow. +func (mg *Workflow) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Workflow. +func (mg *Workflow) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Workflow. +func (mg *Workflow) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Workflow. +func (mg *Workflow) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Workflow. +func (mg *Workflow) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Workflow. +func (mg *Workflow) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Workflow. +func (mg *Workflow) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Workflow. +func (mg *Workflow) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Workflow. +func (mg *Workflow) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Workflow. +func (mg *Workflow) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Workflow. +func (mg *Workflow) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/transfer/v1beta2/zz_generated.managedlist.go b/apis/transfer/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..363443357a --- /dev/null +++ b/apis/transfer/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ServerList. +func (l *ServerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this UserList. +func (l *UserList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WorkflowList. +func (l *WorkflowList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/transfer/v1beta2/zz_generated.resolvers.go b/apis/transfer/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..875c2839e7 --- /dev/null +++ b/apis/transfer/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,335 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Server. + apisresolver "github.com/upbound/provider-aws/internal/apis" +) + +func (mg *Server) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta2", "Certificate", "CertificateList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Certificate), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.CertificateRef, + Selector: mg.Spec.ForProvider.CertificateSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Certificate") + } + mg.Spec.ForProvider.Certificate = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CertificateRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ds.aws.upbound.io", "v1beta2", "Directory", "DirectoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DirectoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DirectoryIDRef, + Selector: mg.Spec.ForProvider.DirectoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DirectoryID") + } + mg.Spec.ForProvider.DirectoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DirectoryIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.EndpointDetails != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EndpointDetails.VPCID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.EndpointDetails.VPCIDRef, + Selector: mg.Spec.ForProvider.EndpointDetails.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EndpointDetails.VPCID") + } + mg.Spec.ForProvider.EndpointDetails.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EndpointDetails.VPCIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LoggingRole), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.LoggingRoleRef, + Selector: mg.Spec.ForProvider.LoggingRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LoggingRole") + } + mg.Spec.ForProvider.LoggingRole = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LoggingRoleRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("acm.aws.upbound.io", "v1beta2", "Certificate", "CertificateList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Certificate), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.CertificateRef, + Selector: mg.Spec.InitProvider.CertificateSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Certificate") + } + mg.Spec.InitProvider.Certificate = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CertificateRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ds.aws.upbound.io", "v1beta2", "Directory", "DirectoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DirectoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DirectoryIDRef, + Selector: mg.Spec.InitProvider.DirectoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DirectoryID") + } + mg.Spec.InitProvider.DirectoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DirectoryIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.EndpointDetails != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "VPC", "VPCList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EndpointDetails.VPCID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.EndpointDetails.VPCIDRef, + Selector: mg.Spec.InitProvider.EndpointDetails.VPCIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EndpointDetails.VPCID") + } + mg.Spec.InitProvider.EndpointDetails.VPCID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EndpointDetails.VPCIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LoggingRole), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.LoggingRoleRef, + Selector: mg.Spec.InitProvider.LoggingRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LoggingRole") + } + mg.Spec.InitProvider.LoggingRole = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LoggingRoleRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this User. +func (mg *User) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Role), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.RoleRef, + Selector: mg.Spec.ForProvider.RoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Role") + } + mg.Spec.ForProvider.Role = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RoleRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("transfer.aws.upbound.io", "v1beta2", "Server", "ServerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServerID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServerIDRef, + Selector: mg.Spec.ForProvider.ServerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServerID") + } + mg.Spec.ForProvider.ServerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServerIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Role), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.RoleRef, + Selector: mg.Spec.InitProvider.RoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Role") + } + mg.Spec.InitProvider.Role = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RoleRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("transfer.aws.upbound.io", "v1beta2", "Server", "ServerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServerID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ServerIDRef, + Selector: mg.Spec.InitProvider.ServerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServerID") + } + mg.Spec.InitProvider.ServerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServerIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Workflow. +func (mg *Workflow) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Steps); i3++ { + if mg.Spec.ForProvider.Steps[i3].CustomStepDetails != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Steps[i3].CustomStepDetails.Target), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.Steps[i3].CustomStepDetails.TargetRef, + Selector: mg.Spec.ForProvider.Steps[i3].CustomStepDetails.TargetSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Steps[i3].CustomStepDetails.Target") + } + mg.Spec.ForProvider.Steps[i3].CustomStepDetails.Target = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Steps[i3].CustomStepDetails.TargetRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Steps); i3++ { + if mg.Spec.InitProvider.Steps[i3].CustomStepDetails != nil { + { + m, l, err = apisresolver.GetManagedResource("lambda.aws.upbound.io", "v1beta2", "Function", "FunctionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Steps[i3].CustomStepDetails.Target), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.Steps[i3].CustomStepDetails.TargetRef, + Selector: mg.Spec.InitProvider.Steps[i3].CustomStepDetails.TargetSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Steps[i3].CustomStepDetails.Target") + } + mg.Spec.InitProvider.Steps[i3].CustomStepDetails.Target = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Steps[i3].CustomStepDetails.TargetRef = rsp.ResolvedReference + + } + } + + return nil +} diff --git a/apis/transfer/v1beta2/zz_groupversion_info.go b/apis/transfer/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..8d1b658704 --- /dev/null +++ b/apis/transfer/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=transfer.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "transfer.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/transfer/v1beta2/zz_server_terraformed.go b/apis/transfer/v1beta2/zz_server_terraformed.go new file mode 100755 index 0000000000..a591c65c21 --- /dev/null +++ b/apis/transfer/v1beta2/zz_server_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Server +func (mg *Server) GetTerraformResourceType() string { + return "aws_transfer_server" +} + +// GetConnectionDetailsMapping for this Server +func (tr *Server) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"host_key": "hostKeySecretRef", "post_authentication_login_banner": "postAuthenticationLoginBannerSecretRef", "pre_authentication_login_banner": "preAuthenticationLoginBannerSecretRef"} +} + +// GetObservation of this Server +func (tr *Server) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Server +func (tr *Server) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Server +func (tr *Server) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Server +func (tr *Server) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Server +func (tr *Server) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Server +func (tr *Server) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Server +func (tr *Server) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Server using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Server) LateInitialize(attrs []byte) (bool, error) { + params := &ServerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Server) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/transfer/v1beta2/zz_server_types.go b/apis/transfer/v1beta2/zz_server_types.go new file mode 100755 index 0000000000..279c201f3f --- /dev/null +++ b/apis/transfer/v1beta2/zz_server_types.go @@ -0,0 +1,633 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EndpointDetailsInitParameters struct { + + // A list of address allocation IDs that are required to attach an Elastic IP address to your SFTP server's endpoint. This property can only be used when endpoint_type is set to VPC. + // +listType=set + AddressAllocationIds []*string `json:"addressAllocationIds,omitempty" tf:"address_allocation_ids,omitempty"` + + // A list of security groups IDs that are available to attach to your server's endpoint. If no security groups are specified, the VPC's default security groups are automatically assigned to your endpoint. This property can only be used when endpoint_type is set to VPC. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of subnet IDs that are required to host your SFTP server endpoint in your VPC. This property can only be used when endpoint_type is set to VPC. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // The ID of the VPC endpoint. This property can only be used when endpoint_type is set to VPC_ENDPOINT + VPCEndpointID *string `json:"vpcEndpointId,omitempty" tf:"vpc_endpoint_id,omitempty"` + + // The VPC ID of the virtual private cloud in which the SFTP server's endpoint will be hosted. This property can only be used when endpoint_type is set to VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type EndpointDetailsObservation struct { + + // A list of address allocation IDs that are required to attach an Elastic IP address to your SFTP server's endpoint. This property can only be used when endpoint_type is set to VPC. + // +listType=set + AddressAllocationIds []*string `json:"addressAllocationIds,omitempty" tf:"address_allocation_ids,omitempty"` + + // A list of security groups IDs that are available to attach to your server's endpoint. If no security groups are specified, the VPC's default security groups are automatically assigned to your endpoint. This property can only be used when endpoint_type is set to VPC. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of subnet IDs that are required to host your SFTP server endpoint in your VPC. This property can only be used when endpoint_type is set to VPC. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // The ID of the VPC endpoint. This property can only be used when endpoint_type is set to VPC_ENDPOINT + VPCEndpointID *string `json:"vpcEndpointId,omitempty" tf:"vpc_endpoint_id,omitempty"` + + // The VPC ID of the virtual private cloud in which the SFTP server's endpoint will be hosted. This property can only be used when endpoint_type is set to VPC. + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` +} + +type EndpointDetailsParameters struct { + + // A list of address allocation IDs that are required to attach an Elastic IP address to your SFTP server's endpoint. This property can only be used when endpoint_type is set to VPC. + // +kubebuilder:validation:Optional + // +listType=set + AddressAllocationIds []*string `json:"addressAllocationIds,omitempty" tf:"address_allocation_ids,omitempty"` + + // A list of security groups IDs that are available to attach to your server's endpoint. If no security groups are specified, the VPC's default security groups are automatically assigned to your endpoint. This property can only be used when endpoint_type is set to VPC. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // A list of subnet IDs that are required to host your SFTP server endpoint in your VPC. This property can only be used when endpoint_type is set to VPC. + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // The ID of the VPC endpoint. This property can only be used when endpoint_type is set to VPC_ENDPOINT + // +kubebuilder:validation:Optional + VPCEndpointID *string `json:"vpcEndpointId,omitempty" tf:"vpc_endpoint_id,omitempty"` + + // The VPC ID of the virtual private cloud in which the SFTP server's endpoint will be hosted. This property can only be used when endpoint_type is set to VPC. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.VPC + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VPCID *string `json:"vpcId,omitempty" tf:"vpc_id,omitempty"` + + // Reference to a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDRef *v1.Reference `json:"vpcIdRef,omitempty" tf:"-"` + + // Selector for a VPC in ec2 to populate vpcId. + // +kubebuilder:validation:Optional + VPCIDSelector *v1.Selector `json:"vpcIdSelector,omitempty" tf:"-"` +} + +type OnPartialUploadInitParameters struct { + + // Includes the necessary permissions for S3, EFS, and Lambda operations that Transfer can assume, so that all workflow steps can operate on the required resources. + ExecutionRole *string `json:"executionRole,omitempty" tf:"execution_role,omitempty"` + + // A unique identifier for the workflow. + WorkflowID *string `json:"workflowId,omitempty" tf:"workflow_id,omitempty"` +} + +type OnPartialUploadObservation struct { + + // Includes the necessary permissions for S3, EFS, and Lambda operations that Transfer can assume, so that all workflow steps can operate on the required resources. + ExecutionRole *string `json:"executionRole,omitempty" tf:"execution_role,omitempty"` + + // A unique identifier for the workflow. + WorkflowID *string `json:"workflowId,omitempty" tf:"workflow_id,omitempty"` +} + +type OnPartialUploadParameters struct { + + // Includes the necessary permissions for S3, EFS, and Lambda operations that Transfer can assume, so that all workflow steps can operate on the required resources. + // +kubebuilder:validation:Optional + ExecutionRole *string `json:"executionRole" tf:"execution_role,omitempty"` + + // A unique identifier for the workflow. + // +kubebuilder:validation:Optional + WorkflowID *string `json:"workflowId" tf:"workflow_id,omitempty"` +} + +type OnUploadInitParameters struct { + + // Includes the necessary permissions for S3, EFS, and Lambda operations that Transfer can assume, so that all workflow steps can operate on the required resources. + ExecutionRole *string `json:"executionRole,omitempty" tf:"execution_role,omitempty"` + + // A unique identifier for the workflow. + WorkflowID *string `json:"workflowId,omitempty" tf:"workflow_id,omitempty"` +} + +type OnUploadObservation struct { + + // Includes the necessary permissions for S3, EFS, and Lambda operations that Transfer can assume, so that all workflow steps can operate on the required resources. + ExecutionRole *string `json:"executionRole,omitempty" tf:"execution_role,omitempty"` + + // A unique identifier for the workflow. + WorkflowID *string `json:"workflowId,omitempty" tf:"workflow_id,omitempty"` +} + +type OnUploadParameters struct { + + // Includes the necessary permissions for S3, EFS, and Lambda operations that Transfer can assume, so that all workflow steps can operate on the required resources. + // +kubebuilder:validation:Optional + ExecutionRole *string `json:"executionRole" tf:"execution_role,omitempty"` + + // A unique identifier for the workflow. + // +kubebuilder:validation:Optional + WorkflowID *string `json:"workflowId" tf:"workflow_id,omitempty"` +} + +type ProtocolDetailsInitParameters struct { + + // Indicates the transport method for the AS2 messages. Currently, only HTTP is supported. + // +listType=set + As2Transports []*string `json:"as2Transports,omitempty" tf:"as2_transports,omitempty"` + + // Indicates passive mode, for FTP and FTPS protocols. Enter a single IPv4 address, such as the public IP address of a firewall, router, or load balancer. + PassiveIP *string `json:"passiveIp,omitempty" tf:"passive_ip,omitempty"` + + // Use to ignore the error that is generated when the client attempts to use SETSTAT on a file you are uploading to an S3 bucket. Valid values: DEFAULT, ENABLE_NO_OP. + SetStatOption *string `json:"setStatOption,omitempty" tf:"set_stat_option,omitempty"` + + // A property used with Transfer Family servers that use the FTPS protocol. Provides a mechanism to resume or share a negotiated secret key between the control and data connection for an FTPS session. Valid values: DISABLED, ENABLED, ENFORCED. + TLSSessionResumptionMode *string `json:"tlsSessionResumptionMode,omitempty" tf:"tls_session_resumption_mode,omitempty"` +} + +type ProtocolDetailsObservation struct { + + // Indicates the transport method for the AS2 messages. Currently, only HTTP is supported. + // +listType=set + As2Transports []*string `json:"as2Transports,omitempty" tf:"as2_transports,omitempty"` + + // Indicates passive mode, for FTP and FTPS protocols. Enter a single IPv4 address, such as the public IP address of a firewall, router, or load balancer. + PassiveIP *string `json:"passiveIp,omitempty" tf:"passive_ip,omitempty"` + + // Use to ignore the error that is generated when the client attempts to use SETSTAT on a file you are uploading to an S3 bucket. Valid values: DEFAULT, ENABLE_NO_OP. + SetStatOption *string `json:"setStatOption,omitempty" tf:"set_stat_option,omitempty"` + + // A property used with Transfer Family servers that use the FTPS protocol. Provides a mechanism to resume or share a negotiated secret key between the control and data connection for an FTPS session. Valid values: DISABLED, ENABLED, ENFORCED. + TLSSessionResumptionMode *string `json:"tlsSessionResumptionMode,omitempty" tf:"tls_session_resumption_mode,omitempty"` +} + +type ProtocolDetailsParameters struct { + + // Indicates the transport method for the AS2 messages. Currently, only HTTP is supported. + // +kubebuilder:validation:Optional + // +listType=set + As2Transports []*string `json:"as2Transports,omitempty" tf:"as2_transports,omitempty"` + + // Indicates passive mode, for FTP and FTPS protocols. Enter a single IPv4 address, such as the public IP address of a firewall, router, or load balancer. + // +kubebuilder:validation:Optional + PassiveIP *string `json:"passiveIp,omitempty" tf:"passive_ip,omitempty"` + + // Use to ignore the error that is generated when the client attempts to use SETSTAT on a file you are uploading to an S3 bucket. Valid values: DEFAULT, ENABLE_NO_OP. + // +kubebuilder:validation:Optional + SetStatOption *string `json:"setStatOption,omitempty" tf:"set_stat_option,omitempty"` + + // A property used with Transfer Family servers that use the FTPS protocol. Provides a mechanism to resume or share a negotiated secret key between the control and data connection for an FTPS session. Valid values: DISABLED, ENABLED, ENFORCED. + // +kubebuilder:validation:Optional + TLSSessionResumptionMode *string `json:"tlsSessionResumptionMode,omitempty" tf:"tls_session_resumption_mode,omitempty"` +} + +type S3StorageOptionsInitParameters struct { + + // Specifies whether or not performance for your Amazon S3 directories is optimized. Valid values are DISABLED, ENABLED. + DirectoryListingOptimization *string `json:"directoryListingOptimization,omitempty" tf:"directory_listing_optimization,omitempty"` +} + +type S3StorageOptionsObservation struct { + + // Specifies whether or not performance for your Amazon S3 directories is optimized. Valid values are DISABLED, ENABLED. + DirectoryListingOptimization *string `json:"directoryListingOptimization,omitempty" tf:"directory_listing_optimization,omitempty"` +} + +type S3StorageOptionsParameters struct { + + // Specifies whether or not performance for your Amazon S3 directories is optimized. Valid values are DISABLED, ENABLED. + // +kubebuilder:validation:Optional + DirectoryListingOptimization *string `json:"directoryListingOptimization,omitempty" tf:"directory_listing_optimization,omitempty"` +} + +type ServerInitParameters struct { + + // The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. This is required when protocols is set to FTPS + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta2.Certificate + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + Certificate *string `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Reference to a Certificate in acm to populate certificate. + // +kubebuilder:validation:Optional + CertificateRef *v1.Reference `json:"certificateRef,omitempty" tf:"-"` + + // Selector for a Certificate in acm to populate certificate. + // +kubebuilder:validation:Optional + CertificateSelector *v1.Selector `json:"certificateSelector,omitempty" tf:"-"` + + // The directory service ID of the directory service you want to connect to with an identity_provider_type of AWS_DIRECTORY_SERVICE. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ds/v1beta2.Directory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + DirectoryID *string `json:"directoryId,omitempty" tf:"directory_id,omitempty"` + + // Reference to a Directory in ds to populate directoryId. + // +kubebuilder:validation:Optional + DirectoryIDRef *v1.Reference `json:"directoryIdRef,omitempty" tf:"-"` + + // Selector for a Directory in ds to populate directoryId. + // +kubebuilder:validation:Optional + DirectoryIDSelector *v1.Selector `json:"directoryIdSelector,omitempty" tf:"-"` + + // The domain of the storage system that is used for file transfers. Valid values are: S3 and EFS. The default value is S3. + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // The virtual private cloud (VPC) endpoint settings that you want to configure for your SFTP server. See endpoint_details block below for details. + EndpointDetails *EndpointDetailsInitParameters `json:"endpointDetails,omitempty" tf:"endpoint_details,omitempty"` + + // The type of endpoint that you want your SFTP server connect to. If you connect to a VPC (or VPC_ENDPOINT), your SFTP server isn't accessible over the public internet. If you want to connect your SFTP server via public internet, set PUBLIC. Defaults to PUBLIC. + EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"` + + // A boolean that indicates all users associated with the server should be deleted so that the Server can be destroyed without error. The default value is false. This option only applies to servers configured with a SERVICE_MANAGED identity_provider_type. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // The ARN for a lambda function to use for the Identity provider. + Function *string `json:"function,omitempty" tf:"function,omitempty"` + + // RSA, ECDSA, or ED25519 private key (e.g., as generated by the ssh-keygen -t rsa -b 2048 -N "" -m PEM -f my-new-server-key, ssh-keygen -t ecdsa -b 256 -N "" -m PEM -f my-new-server-key or ssh-keygen -t ed25519 -N "" -f my-new-server-key commands). + HostKeySecretRef *v1.SecretKeySelector `json:"hostKeySecretRef,omitempty" tf:"-"` + + // The mode of authentication enabled for this service. The default value is SERVICE_MANAGED, which allows you to store and access SFTP user credentials within the service. API_GATEWAY indicates that user authentication requires a call to an API Gateway endpoint URL provided by you to integrate an identity provider of your choice. Using AWS_DIRECTORY_SERVICE will allow for authentication against AWS Managed Active Directory or Microsoft Active Directory in your on-premises environment, or in AWS using AD Connectors. Use the AWS_LAMBDA value to directly use a Lambda function as your identity provider. If you choose this value, you must specify the ARN for the lambda function in the function argument. + IdentityProviderType *string `json:"identityProviderType,omitempty" tf:"identity_provider_type,omitempty"` + + // Amazon Resource Name (ARN) of the IAM role used to authenticate the user account with an identity_provider_type of API_GATEWAY. + InvocationRole *string `json:"invocationRole,omitempty" tf:"invocation_role,omitempty"` + + // Amazon Resource Name (ARN) of an IAM role that allows the service to write your SFTP users’ activity to your Amazon CloudWatch logs for monitoring and auditing purposes. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + LoggingRole *string `json:"loggingRole,omitempty" tf:"logging_role,omitempty"` + + // Reference to a Role in iam to populate loggingRole. + // +kubebuilder:validation:Optional + LoggingRoleRef *v1.Reference `json:"loggingRoleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate loggingRole. + // +kubebuilder:validation:Optional + LoggingRoleSelector *v1.Selector `json:"loggingRoleSelector,omitempty" tf:"-"` + + // Specify a string to display when users connect to a server. This string is displayed after the user authenticates. The SFTP protocol does not support post-authentication display banners. + PostAuthenticationLoginBannerSecretRef *v1.SecretKeySelector `json:"postAuthenticationLoginBannerSecretRef,omitempty" tf:"-"` + + // Specify a string to display when users connect to a server. This string is displayed before the user authenticates. + PreAuthenticationLoginBannerSecretRef *v1.SecretKeySelector `json:"preAuthenticationLoginBannerSecretRef,omitempty" tf:"-"` + + // The protocol settings that are configured for your server. See protocol_details block below for details. + ProtocolDetails *ProtocolDetailsInitParameters `json:"protocolDetails,omitempty" tf:"protocol_details,omitempty"` + + // Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. This defaults to SFTP . The available protocols are: + // +listType=set + Protocols []*string `json:"protocols,omitempty" tf:"protocols,omitempty"` + + // Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default. See s3_storage_options block below for details. + S3StorageOptions *S3StorageOptionsInitParameters `json:"s3StorageOptions,omitempty" tf:"s3_storage_options,omitempty"` + + // Specifies the name of the security policy that is attached to the server. Default value is: TransferSecurityPolicy-2018-11. The available values are: + SecurityPolicyName *string `json:"securityPolicyName,omitempty" tf:"security_policy_name,omitempty"` + + // For SFTP-enabled servers, and for custom identity providers only. Valid values are PASSWORD, PUBLIC_KEY, PUBLIC_KEY_OR_PASSWORD and PUBLIC_KEY_AND_PASSWORD. Default value is: PUBLIC_KEY_OR_PASSWORD. + SftpAuthenticationMethods *string `json:"sftpAuthenticationMethods,omitempty" tf:"sftp_authentication_methods,omitempty"` + + // A set of ARNs of destinations that will receive structured logs from the transfer server such as CloudWatch Log Group ARNs. If provided this enables the transfer server to emit structured logs to the specified locations. + // This is a set of arns of destinations that will receive structured logs from the transfer server + // +listType=set + StructuredLogDestinations []*string `json:"structuredLogDestinations,omitempty" tf:"structured_log_destinations,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // - URL of the service endpoint used to authenticate users with an identity_provider_type of API_GATEWAY. + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // Specifies the workflow details. See workflow_details block below for details. + WorkflowDetails *WorkflowDetailsInitParameters `json:"workflowDetails,omitempty" tf:"workflow_details,omitempty"` +} + +type ServerObservation struct { + + // Amazon Resource Name (ARN) of Transfer Server + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. This is required when protocols is set to FTPS + Certificate *string `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // The directory service ID of the directory service you want to connect to with an identity_provider_type of AWS_DIRECTORY_SERVICE. + DirectoryID *string `json:"directoryId,omitempty" tf:"directory_id,omitempty"` + + // The domain of the storage system that is used for file transfers. Valid values are: S3 and EFS. The default value is S3. + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // The endpoint of the Transfer Server (e.g., s-12345678.server.transfer.REGION.amazonaws.com) + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The virtual private cloud (VPC) endpoint settings that you want to configure for your SFTP server. See endpoint_details block below for details. + EndpointDetails *EndpointDetailsObservation `json:"endpointDetails,omitempty" tf:"endpoint_details,omitempty"` + + // The type of endpoint that you want your SFTP server connect to. If you connect to a VPC (or VPC_ENDPOINT), your SFTP server isn't accessible over the public internet. If you want to connect your SFTP server via public internet, set PUBLIC. Defaults to PUBLIC. + EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"` + + // A boolean that indicates all users associated with the server should be deleted so that the Server can be destroyed without error. The default value is false. This option only applies to servers configured with a SERVICE_MANAGED identity_provider_type. + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // The ARN for a lambda function to use for the Identity provider. + Function *string `json:"function,omitempty" tf:"function,omitempty"` + + // This value contains the message-digest algorithm (MD5) hash of the server's host key. This value is equivalent to the output of the ssh-keygen -l -E md5 -f my-new-server-key command. + HostKeyFingerprint *string `json:"hostKeyFingerprint,omitempty" tf:"host_key_fingerprint,omitempty"` + + // The Server ID of the Transfer Server (e.g., s-12345678) + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The mode of authentication enabled for this service. The default value is SERVICE_MANAGED, which allows you to store and access SFTP user credentials within the service. API_GATEWAY indicates that user authentication requires a call to an API Gateway endpoint URL provided by you to integrate an identity provider of your choice. Using AWS_DIRECTORY_SERVICE will allow for authentication against AWS Managed Active Directory or Microsoft Active Directory in your on-premises environment, or in AWS using AD Connectors. Use the AWS_LAMBDA value to directly use a Lambda function as your identity provider. If you choose this value, you must specify the ARN for the lambda function in the function argument. + IdentityProviderType *string `json:"identityProviderType,omitempty" tf:"identity_provider_type,omitempty"` + + // Amazon Resource Name (ARN) of the IAM role used to authenticate the user account with an identity_provider_type of API_GATEWAY. + InvocationRole *string `json:"invocationRole,omitempty" tf:"invocation_role,omitempty"` + + // Amazon Resource Name (ARN) of an IAM role that allows the service to write your SFTP users’ activity to your Amazon CloudWatch logs for monitoring and auditing purposes. + LoggingRole *string `json:"loggingRole,omitempty" tf:"logging_role,omitempty"` + + // The protocol settings that are configured for your server. See protocol_details block below for details. + ProtocolDetails *ProtocolDetailsObservation `json:"protocolDetails,omitempty" tf:"protocol_details,omitempty"` + + // Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. This defaults to SFTP . The available protocols are: + // +listType=set + Protocols []*string `json:"protocols,omitempty" tf:"protocols,omitempty"` + + // Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default. See s3_storage_options block below for details. + S3StorageOptions *S3StorageOptionsObservation `json:"s3StorageOptions,omitempty" tf:"s3_storage_options,omitempty"` + + // Specifies the name of the security policy that is attached to the server. Default value is: TransferSecurityPolicy-2018-11. The available values are: + SecurityPolicyName *string `json:"securityPolicyName,omitempty" tf:"security_policy_name,omitempty"` + + // For SFTP-enabled servers, and for custom identity providers only. Valid values are PASSWORD, PUBLIC_KEY, PUBLIC_KEY_OR_PASSWORD and PUBLIC_KEY_AND_PASSWORD. Default value is: PUBLIC_KEY_OR_PASSWORD. + SftpAuthenticationMethods *string `json:"sftpAuthenticationMethods,omitempty" tf:"sftp_authentication_methods,omitempty"` + + // A set of ARNs of destinations that will receive structured logs from the transfer server such as CloudWatch Log Group ARNs. If provided this enables the transfer server to emit structured logs to the specified locations. + // This is a set of arns of destinations that will receive structured logs from the transfer server + // +listType=set + StructuredLogDestinations []*string `json:"structuredLogDestinations,omitempty" tf:"structured_log_destinations,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // - URL of the service endpoint used to authenticate users with an identity_provider_type of API_GATEWAY. + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // Specifies the workflow details. See workflow_details block below for details. + WorkflowDetails *WorkflowDetailsObservation `json:"workflowDetails,omitempty" tf:"workflow_details,omitempty"` +} + +type ServerParameters struct { + + // The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. This is required when protocols is set to FTPS + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acm/v1beta2.Certificate + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + Certificate *string `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Reference to a Certificate in acm to populate certificate. + // +kubebuilder:validation:Optional + CertificateRef *v1.Reference `json:"certificateRef,omitempty" tf:"-"` + + // Selector for a Certificate in acm to populate certificate. + // +kubebuilder:validation:Optional + CertificateSelector *v1.Selector `json:"certificateSelector,omitempty" tf:"-"` + + // The directory service ID of the directory service you want to connect to with an identity_provider_type of AWS_DIRECTORY_SERVICE. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ds/v1beta2.Directory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DirectoryID *string `json:"directoryId,omitempty" tf:"directory_id,omitempty"` + + // Reference to a Directory in ds to populate directoryId. + // +kubebuilder:validation:Optional + DirectoryIDRef *v1.Reference `json:"directoryIdRef,omitempty" tf:"-"` + + // Selector for a Directory in ds to populate directoryId. + // +kubebuilder:validation:Optional + DirectoryIDSelector *v1.Selector `json:"directoryIdSelector,omitempty" tf:"-"` + + // The domain of the storage system that is used for file transfers. Valid values are: S3 and EFS. The default value is S3. + // +kubebuilder:validation:Optional + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // The virtual private cloud (VPC) endpoint settings that you want to configure for your SFTP server. See endpoint_details block below for details. + // +kubebuilder:validation:Optional + EndpointDetails *EndpointDetailsParameters `json:"endpointDetails,omitempty" tf:"endpoint_details,omitempty"` + + // The type of endpoint that you want your SFTP server connect to. If you connect to a VPC (or VPC_ENDPOINT), your SFTP server isn't accessible over the public internet. If you want to connect your SFTP server via public internet, set PUBLIC. Defaults to PUBLIC. + // +kubebuilder:validation:Optional + EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"` + + // A boolean that indicates all users associated with the server should be deleted so that the Server can be destroyed without error. The default value is false. This option only applies to servers configured with a SERVICE_MANAGED identity_provider_type. + // +kubebuilder:validation:Optional + ForceDestroy *bool `json:"forceDestroy,omitempty" tf:"force_destroy,omitempty"` + + // The ARN for a lambda function to use for the Identity provider. + // +kubebuilder:validation:Optional + Function *string `json:"function,omitempty" tf:"function,omitempty"` + + // RSA, ECDSA, or ED25519 private key (e.g., as generated by the ssh-keygen -t rsa -b 2048 -N "" -m PEM -f my-new-server-key, ssh-keygen -t ecdsa -b 256 -N "" -m PEM -f my-new-server-key or ssh-keygen -t ed25519 -N "" -f my-new-server-key commands). + // +kubebuilder:validation:Optional + HostKeySecretRef *v1.SecretKeySelector `json:"hostKeySecretRef,omitempty" tf:"-"` + + // The mode of authentication enabled for this service. The default value is SERVICE_MANAGED, which allows you to store and access SFTP user credentials within the service. API_GATEWAY indicates that user authentication requires a call to an API Gateway endpoint URL provided by you to integrate an identity provider of your choice. Using AWS_DIRECTORY_SERVICE will allow for authentication against AWS Managed Active Directory or Microsoft Active Directory in your on-premises environment, or in AWS using AD Connectors. Use the AWS_LAMBDA value to directly use a Lambda function as your identity provider. If you choose this value, you must specify the ARN for the lambda function in the function argument. + // +kubebuilder:validation:Optional + IdentityProviderType *string `json:"identityProviderType,omitempty" tf:"identity_provider_type,omitempty"` + + // Amazon Resource Name (ARN) of the IAM role used to authenticate the user account with an identity_provider_type of API_GATEWAY. + // +kubebuilder:validation:Optional + InvocationRole *string `json:"invocationRole,omitempty" tf:"invocation_role,omitempty"` + + // Amazon Resource Name (ARN) of an IAM role that allows the service to write your SFTP users’ activity to your Amazon CloudWatch logs for monitoring and auditing purposes. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + LoggingRole *string `json:"loggingRole,omitempty" tf:"logging_role,omitempty"` + + // Reference to a Role in iam to populate loggingRole. + // +kubebuilder:validation:Optional + LoggingRoleRef *v1.Reference `json:"loggingRoleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate loggingRole. + // +kubebuilder:validation:Optional + LoggingRoleSelector *v1.Selector `json:"loggingRoleSelector,omitempty" tf:"-"` + + // Specify a string to display when users connect to a server. This string is displayed after the user authenticates. The SFTP protocol does not support post-authentication display banners. + // +kubebuilder:validation:Optional + PostAuthenticationLoginBannerSecretRef *v1.SecretKeySelector `json:"postAuthenticationLoginBannerSecretRef,omitempty" tf:"-"` + + // Specify a string to display when users connect to a server. This string is displayed before the user authenticates. + // +kubebuilder:validation:Optional + PreAuthenticationLoginBannerSecretRef *v1.SecretKeySelector `json:"preAuthenticationLoginBannerSecretRef,omitempty" tf:"-"` + + // The protocol settings that are configured for your server. See protocol_details block below for details. + // +kubebuilder:validation:Optional + ProtocolDetails *ProtocolDetailsParameters `json:"protocolDetails,omitempty" tf:"protocol_details,omitempty"` + + // Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. This defaults to SFTP . The available protocols are: + // +kubebuilder:validation:Optional + // +listType=set + Protocols []*string `json:"protocols,omitempty" tf:"protocols,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default. See s3_storage_options block below for details. + // +kubebuilder:validation:Optional + S3StorageOptions *S3StorageOptionsParameters `json:"s3StorageOptions,omitempty" tf:"s3_storage_options,omitempty"` + + // Specifies the name of the security policy that is attached to the server. Default value is: TransferSecurityPolicy-2018-11. The available values are: + // +kubebuilder:validation:Optional + SecurityPolicyName *string `json:"securityPolicyName,omitempty" tf:"security_policy_name,omitempty"` + + // For SFTP-enabled servers, and for custom identity providers only. Valid values are PASSWORD, PUBLIC_KEY, PUBLIC_KEY_OR_PASSWORD and PUBLIC_KEY_AND_PASSWORD. Default value is: PUBLIC_KEY_OR_PASSWORD. + // +kubebuilder:validation:Optional + SftpAuthenticationMethods *string `json:"sftpAuthenticationMethods,omitempty" tf:"sftp_authentication_methods,omitempty"` + + // A set of ARNs of destinations that will receive structured logs from the transfer server such as CloudWatch Log Group ARNs. If provided this enables the transfer server to emit structured logs to the specified locations. + // This is a set of arns of destinations that will receive structured logs from the transfer server + // +kubebuilder:validation:Optional + // +listType=set + StructuredLogDestinations []*string `json:"structuredLogDestinations,omitempty" tf:"structured_log_destinations,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // - URL of the service endpoint used to authenticate users with an identity_provider_type of API_GATEWAY. + // +kubebuilder:validation:Optional + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // Specifies the workflow details. See workflow_details block below for details. + // +kubebuilder:validation:Optional + WorkflowDetails *WorkflowDetailsParameters `json:"workflowDetails,omitempty" tf:"workflow_details,omitempty"` +} + +type WorkflowDetailsInitParameters struct { + + // A trigger that starts a workflow if a file is only partially uploaded. See Workflow Detail below. See on_partial_upload block below for details. + OnPartialUpload *OnPartialUploadInitParameters `json:"onPartialUpload,omitempty" tf:"on_partial_upload,omitempty"` + + // A trigger that starts a workflow: the workflow begins to execute after a file is uploaded. See on_upload block below for details. + OnUpload *OnUploadInitParameters `json:"onUpload,omitempty" tf:"on_upload,omitempty"` +} + +type WorkflowDetailsObservation struct { + + // A trigger that starts a workflow if a file is only partially uploaded. See Workflow Detail below. See on_partial_upload block below for details. + OnPartialUpload *OnPartialUploadObservation `json:"onPartialUpload,omitempty" tf:"on_partial_upload,omitempty"` + + // A trigger that starts a workflow: the workflow begins to execute after a file is uploaded. See on_upload block below for details. + OnUpload *OnUploadObservation `json:"onUpload,omitempty" tf:"on_upload,omitempty"` +} + +type WorkflowDetailsParameters struct { + + // A trigger that starts a workflow if a file is only partially uploaded. See Workflow Detail below. See on_partial_upload block below for details. + // +kubebuilder:validation:Optional + OnPartialUpload *OnPartialUploadParameters `json:"onPartialUpload,omitempty" tf:"on_partial_upload,omitempty"` + + // A trigger that starts a workflow: the workflow begins to execute after a file is uploaded. See on_upload block below for details. + // +kubebuilder:validation:Optional + OnUpload *OnUploadParameters `json:"onUpload,omitempty" tf:"on_upload,omitempty"` +} + +// ServerSpec defines the desired state of Server +type ServerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServerInitParameters `json:"initProvider,omitempty"` +} + +// ServerStatus defines the observed state of Server. +type ServerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Server is the Schema for the Servers API. Provides a AWS Transfer Server resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Server struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ServerSpec `json:"spec"` + Status ServerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServerList contains a list of Servers +type ServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Server `json:"items"` +} + +// Repository type metadata. +var ( + Server_Kind = "Server" + Server_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Server_Kind}.String() + Server_KindAPIVersion = Server_Kind + "." + CRDGroupVersion.String() + Server_GroupVersionKind = CRDGroupVersion.WithKind(Server_Kind) +) + +func init() { + SchemeBuilder.Register(&Server{}, &ServerList{}) +} diff --git a/apis/transfer/v1beta2/zz_user_terraformed.go b/apis/transfer/v1beta2/zz_user_terraformed.go new file mode 100755 index 0000000000..193d72da91 --- /dev/null +++ b/apis/transfer/v1beta2/zz_user_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this User +func (mg *User) GetTerraformResourceType() string { + return "aws_transfer_user" +} + +// GetConnectionDetailsMapping for this User +func (tr *User) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this User +func (tr *User) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this User +func (tr *User) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this User +func (tr *User) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this User +func (tr *User) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this User +func (tr *User) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this User +func (tr *User) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this User +func (tr *User) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this User using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *User) LateInitialize(attrs []byte) (bool, error) { + params := &UserParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *User) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/transfer/v1beta2/zz_user_types.go b/apis/transfer/v1beta2/zz_user_types.go new file mode 100755 index 0000000000..c50306f5a6 --- /dev/null +++ b/apis/transfer/v1beta2/zz_user_types.go @@ -0,0 +1,289 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type HomeDirectoryMappingsInitParameters struct { + + // Represents an entry and a target. + Entry *string `json:"entry,omitempty" tf:"entry,omitempty"` + + // Represents the map target. + Target *string `json:"target,omitempty" tf:"target,omitempty"` +} + +type HomeDirectoryMappingsObservation struct { + + // Represents an entry and a target. + Entry *string `json:"entry,omitempty" tf:"entry,omitempty"` + + // Represents the map target. + Target *string `json:"target,omitempty" tf:"target,omitempty"` +} + +type HomeDirectoryMappingsParameters struct { + + // Represents an entry and a target. + // +kubebuilder:validation:Optional + Entry *string `json:"entry" tf:"entry,omitempty"` + + // Represents the map target. + // +kubebuilder:validation:Optional + Target *string `json:"target" tf:"target,omitempty"` +} + +type PosixProfileInitParameters struct { + + // The POSIX group ID used for all EFS operations by this user. + GID *float64 `json:"gid,omitempty" tf:"gid,omitempty"` + + // The secondary POSIX group IDs used for all EFS operations by this user. + // +listType=set + SecondaryGids []*float64 `json:"secondaryGids,omitempty" tf:"secondary_gids,omitempty"` + + // The POSIX user ID used for all EFS operations by this user. + UID *float64 `json:"uid,omitempty" tf:"uid,omitempty"` +} + +type PosixProfileObservation struct { + + // The POSIX group ID used for all EFS operations by this user. + GID *float64 `json:"gid,omitempty" tf:"gid,omitempty"` + + // The secondary POSIX group IDs used for all EFS operations by this user. + // +listType=set + SecondaryGids []*float64 `json:"secondaryGids,omitempty" tf:"secondary_gids,omitempty"` + + // The POSIX user ID used for all EFS operations by this user. + UID *float64 `json:"uid,omitempty" tf:"uid,omitempty"` +} + +type PosixProfileParameters struct { + + // The POSIX group ID used for all EFS operations by this user. + // +kubebuilder:validation:Optional + GID *float64 `json:"gid" tf:"gid,omitempty"` + + // The secondary POSIX group IDs used for all EFS operations by this user. + // +kubebuilder:validation:Optional + // +listType=set + SecondaryGids []*float64 `json:"secondaryGids,omitempty" tf:"secondary_gids,omitempty"` + + // The POSIX user ID used for all EFS operations by this user. + // +kubebuilder:validation:Optional + UID *float64 `json:"uid" tf:"uid,omitempty"` +} + +type UserInitParameters struct { + + // The landing directory (folder) for a user when they log in to the server using their SFTP client. It should begin with a /. The first item in the path is the name of the home bucket (accessible as ${Transfer:HomeBucket} in the policy) and the rest is the home directory (accessible as ${Transfer:HomeDirectory} in the policy). For example, /example-bucket-1234/username would set the home bucket to example-bucket-1234 and the home directory to username. + HomeDirectory *string `json:"homeDirectory,omitempty" tf:"home_directory,omitempty"` + + // Logical directory mappings that specify what S3 paths and keys should be visible to your user and how you want to make them visible. See Home Directory Mappings below. + HomeDirectoryMappings []HomeDirectoryMappingsInitParameters `json:"homeDirectoryMappings,omitempty" tf:"home_directory_mappings,omitempty"` + + // The type of landing directory (folder) you mapped for your users' home directory. Valid values are PATH and LOGICAL. + HomeDirectoryType *string `json:"homeDirectoryType,omitempty" tf:"home_directory_type,omitempty"` + + // An IAM JSON policy document that scopes down user access to portions of their Amazon S3 bucket. IAM variables you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}. These are evaluated on-the-fly when navigating the bucket. + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` + + // Specifies the full POSIX identity, including user ID (Uid), group ID (Gid), and any secondary groups IDs (SecondaryGids), that controls your users' access to your Amazon EFS file systems. See Posix Profile below. + PosixProfile *PosixProfileInitParameters `json:"posixProfile,omitempty" tf:"posix_profile,omitempty"` + + // Amazon Resource Name (ARN) of an IAM role that allows the service to control your user’s access to your Amazon S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // Reference to a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleRef *v1.Reference `json:"roleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleSelector *v1.Selector `json:"roleSelector,omitempty" tf:"-"` + + // The Server ID of the Transfer Server (e.g., s-12345678) + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/transfer/v1beta2.Server + ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` + + // Reference to a Server in transfer to populate serverId. + // +kubebuilder:validation:Optional + ServerIDRef *v1.Reference `json:"serverIdRef,omitempty" tf:"-"` + + // Selector for a Server in transfer to populate serverId. + // +kubebuilder:validation:Optional + ServerIDSelector *v1.Selector `json:"serverIdSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type UserObservation struct { + + // Amazon Resource Name (ARN) of Transfer User + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The landing directory (folder) for a user when they log in to the server using their SFTP client. It should begin with a /. The first item in the path is the name of the home bucket (accessible as ${Transfer:HomeBucket} in the policy) and the rest is the home directory (accessible as ${Transfer:HomeDirectory} in the policy). For example, /example-bucket-1234/username would set the home bucket to example-bucket-1234 and the home directory to username. + HomeDirectory *string `json:"homeDirectory,omitempty" tf:"home_directory,omitempty"` + + // Logical directory mappings that specify what S3 paths and keys should be visible to your user and how you want to make them visible. See Home Directory Mappings below. + HomeDirectoryMappings []HomeDirectoryMappingsObservation `json:"homeDirectoryMappings,omitempty" tf:"home_directory_mappings,omitempty"` + + // The type of landing directory (folder) you mapped for your users' home directory. Valid values are PATH and LOGICAL. + HomeDirectoryType *string `json:"homeDirectoryType,omitempty" tf:"home_directory_type,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An IAM JSON policy document that scopes down user access to portions of their Amazon S3 bucket. IAM variables you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}. These are evaluated on-the-fly when navigating the bucket. + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` + + // Specifies the full POSIX identity, including user ID (Uid), group ID (Gid), and any secondary groups IDs (SecondaryGids), that controls your users' access to your Amazon EFS file systems. See Posix Profile below. + PosixProfile *PosixProfileObservation `json:"posixProfile,omitempty" tf:"posix_profile,omitempty"` + + // Amazon Resource Name (ARN) of an IAM role that allows the service to control your user’s access to your Amazon S3 bucket. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // The Server ID of the Transfer Server (e.g., s-12345678) + ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type UserParameters struct { + + // The landing directory (folder) for a user when they log in to the server using their SFTP client. It should begin with a /. The first item in the path is the name of the home bucket (accessible as ${Transfer:HomeBucket} in the policy) and the rest is the home directory (accessible as ${Transfer:HomeDirectory} in the policy). For example, /example-bucket-1234/username would set the home bucket to example-bucket-1234 and the home directory to username. + // +kubebuilder:validation:Optional + HomeDirectory *string `json:"homeDirectory,omitempty" tf:"home_directory,omitempty"` + + // Logical directory mappings that specify what S3 paths and keys should be visible to your user and how you want to make them visible. See Home Directory Mappings below. + // +kubebuilder:validation:Optional + HomeDirectoryMappings []HomeDirectoryMappingsParameters `json:"homeDirectoryMappings,omitempty" tf:"home_directory_mappings,omitempty"` + + // The type of landing directory (folder) you mapped for your users' home directory. Valid values are PATH and LOGICAL. + // +kubebuilder:validation:Optional + HomeDirectoryType *string `json:"homeDirectoryType,omitempty" tf:"home_directory_type,omitempty"` + + // An IAM JSON policy document that scopes down user access to portions of their Amazon S3 bucket. IAM variables you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}. These are evaluated on-the-fly when navigating the bucket. + // +kubebuilder:validation:Optional + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` + + // Specifies the full POSIX identity, including user ID (Uid), group ID (Gid), and any secondary groups IDs (SecondaryGids), that controls your users' access to your Amazon EFS file systems. See Posix Profile below. + // +kubebuilder:validation:Optional + PosixProfile *PosixProfileParameters `json:"posixProfile,omitempty" tf:"posix_profile,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Amazon Resource Name (ARN) of an IAM role that allows the service to control your user’s access to your Amazon S3 bucket. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // Reference to a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleRef *v1.Reference `json:"roleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate role. + // +kubebuilder:validation:Optional + RoleSelector *v1.Selector `json:"roleSelector,omitempty" tf:"-"` + + // The Server ID of the Transfer Server (e.g., s-12345678) + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/transfer/v1beta2.Server + // +kubebuilder:validation:Optional + ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` + + // Reference to a Server in transfer to populate serverId. + // +kubebuilder:validation:Optional + ServerIDRef *v1.Reference `json:"serverIdRef,omitempty" tf:"-"` + + // Selector for a Server in transfer to populate serverId. + // +kubebuilder:validation:Optional + ServerIDSelector *v1.Selector `json:"serverIdSelector,omitempty" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// UserSpec defines the desired state of User +type UserSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider UserParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider UserInitParameters `json:"initProvider,omitempty"` +} + +// UserStatus defines the observed state of User. +type UserStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider UserObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// User is the Schema for the Users API. Provides a AWS Transfer User resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type User struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec UserSpec `json:"spec"` + Status UserStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// UserList contains a list of Users +type UserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []User `json:"items"` +} + +// Repository type metadata. +var ( + User_Kind = "User" + User_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: User_Kind}.String() + User_KindAPIVersion = User_Kind + "." + CRDGroupVersion.String() + User_GroupVersionKind = CRDGroupVersion.WithKind(User_Kind) +) + +func init() { + SchemeBuilder.Register(&User{}, &UserList{}) +} diff --git a/apis/transfer/v1beta2/zz_workflow_terraformed.go b/apis/transfer/v1beta2/zz_workflow_terraformed.go new file mode 100755 index 0000000000..8b6f8af3af --- /dev/null +++ b/apis/transfer/v1beta2/zz_workflow_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Workflow +func (mg *Workflow) GetTerraformResourceType() string { + return "aws_transfer_workflow" +} + +// GetConnectionDetailsMapping for this Workflow +func (tr *Workflow) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Workflow +func (tr *Workflow) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Workflow +func (tr *Workflow) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Workflow +func (tr *Workflow) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Workflow +func (tr *Workflow) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Workflow +func (tr *Workflow) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Workflow +func (tr *Workflow) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Workflow +func (tr *Workflow) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Workflow using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Workflow) LateInitialize(attrs []byte) (bool, error) { + params := &WorkflowParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Workflow) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/transfer/v1beta2/zz_workflow_types.go b/apis/transfer/v1beta2/zz_workflow_types.go new file mode 100755 index 0000000000..991d5c0481 --- /dev/null +++ b/apis/transfer/v1beta2/zz_workflow_types.go @@ -0,0 +1,1156 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CopyStepDetailsDestinationFileLocationEFSFileLocationInitParameters struct { + + // The ID of the file system, assigned by Amazon EFS. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The pathname for the folder being used by a workflow. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type CopyStepDetailsDestinationFileLocationEFSFileLocationObservation struct { + + // The ID of the file system, assigned by Amazon EFS. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The pathname for the folder being used by a workflow. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type CopyStepDetailsDestinationFileLocationEFSFileLocationParameters struct { + + // The ID of the file system, assigned by Amazon EFS. + // +kubebuilder:validation:Optional + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The pathname for the folder being used by a workflow. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type CopyStepDetailsDestinationFileLocationInitParameters struct { + + // Specifies the details for the EFS file being copied. + EFSFileLocation *CopyStepDetailsDestinationFileLocationEFSFileLocationInitParameters `json:"efsFileLocation,omitempty" tf:"efs_file_location,omitempty"` + + // Specifies the details for the S3 file being copied. + S3FileLocation *CopyStepDetailsDestinationFileLocationS3FileLocationInitParameters `json:"s3FileLocation,omitempty" tf:"s3_file_location,omitempty"` +} + +type CopyStepDetailsDestinationFileLocationObservation struct { + + // Specifies the details for the EFS file being copied. + EFSFileLocation *CopyStepDetailsDestinationFileLocationEFSFileLocationObservation `json:"efsFileLocation,omitempty" tf:"efs_file_location,omitempty"` + + // Specifies the details for the S3 file being copied. + S3FileLocation *CopyStepDetailsDestinationFileLocationS3FileLocationObservation `json:"s3FileLocation,omitempty" tf:"s3_file_location,omitempty"` +} + +type CopyStepDetailsDestinationFileLocationParameters struct { + + // Specifies the details for the EFS file being copied. + // +kubebuilder:validation:Optional + EFSFileLocation *CopyStepDetailsDestinationFileLocationEFSFileLocationParameters `json:"efsFileLocation,omitempty" tf:"efs_file_location,omitempty"` + + // Specifies the details for the S3 file being copied. + // +kubebuilder:validation:Optional + S3FileLocation *CopyStepDetailsDestinationFileLocationS3FileLocationParameters `json:"s3FileLocation,omitempty" tf:"s3_file_location,omitempty"` +} + +type CopyStepDetailsDestinationFileLocationS3FileLocationInitParameters struct { + + // Specifies the S3 bucket for the customer input file. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type CopyStepDetailsDestinationFileLocationS3FileLocationObservation struct { + + // Specifies the S3 bucket for the customer input file. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type CopyStepDetailsDestinationFileLocationS3FileLocationParameters struct { + + // Specifies the S3 bucket for the customer input file. + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type CopyStepDetailsInitParameters struct { + + // Specifies the location for the file being copied. Use ${Transfer:username} in this field to parametrize the destination prefix by username. + DestinationFileLocation *DestinationFileLocationInitParameters `json:"destinationFileLocation,omitempty" tf:"destination_file_location,omitempty"` + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A flag that indicates whether or not to overwrite an existing file of the same name. The default is FALSE. Valid values are TRUE and FALSE. + OverwriteExisting *string `json:"overwriteExisting,omitempty" tf:"overwrite_existing,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` +} + +type CopyStepDetailsObservation struct { + + // Specifies the location for the file being copied. Use ${Transfer:username} in this field to parametrize the destination prefix by username. + DestinationFileLocation *DestinationFileLocationObservation `json:"destinationFileLocation,omitempty" tf:"destination_file_location,omitempty"` + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A flag that indicates whether or not to overwrite an existing file of the same name. The default is FALSE. Valid values are TRUE and FALSE. + OverwriteExisting *string `json:"overwriteExisting,omitempty" tf:"overwrite_existing,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` +} + +type CopyStepDetailsParameters struct { + + // Specifies the location for the file being copied. Use ${Transfer:username} in this field to parametrize the destination prefix by username. + // +kubebuilder:validation:Optional + DestinationFileLocation *DestinationFileLocationParameters `json:"destinationFileLocation,omitempty" tf:"destination_file_location,omitempty"` + + // The name of the step, used as an identifier. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A flag that indicates whether or not to overwrite an existing file of the same name. The default is FALSE. Valid values are TRUE and FALSE. + // +kubebuilder:validation:Optional + OverwriteExisting *string `json:"overwriteExisting,omitempty" tf:"overwrite_existing,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + // +kubebuilder:validation:Optional + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` +} + +type CustomStepDetailsInitParameters struct { + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // The ARN for the lambda function that is being called. + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // Timeout, in seconds, for the step. + TimeoutSeconds *float64 `json:"timeoutSeconds,omitempty" tf:"timeout_seconds,omitempty"` +} + +type CustomStepDetailsObservation struct { + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // The ARN for the lambda function that is being called. + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // Timeout, in seconds, for the step. + TimeoutSeconds *float64 `json:"timeoutSeconds,omitempty" tf:"timeout_seconds,omitempty"` +} + +type CustomStepDetailsParameters struct { + + // The name of the step, used as an identifier. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + // +kubebuilder:validation:Optional + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // The ARN for the lambda function that is being called. + // +kubebuilder:validation:Optional + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // Timeout, in seconds, for the step. + // +kubebuilder:validation:Optional + TimeoutSeconds *float64 `json:"timeoutSeconds,omitempty" tf:"timeout_seconds,omitempty"` +} + +type DecryptStepDetailsDestinationFileLocationEFSFileLocationInitParameters struct { + + // The ID of the file system, assigned by Amazon EFS. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The pathname for the folder being used by a workflow. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DecryptStepDetailsDestinationFileLocationEFSFileLocationObservation struct { + + // The ID of the file system, assigned by Amazon EFS. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The pathname for the folder being used by a workflow. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DecryptStepDetailsDestinationFileLocationEFSFileLocationParameters struct { + + // The ID of the file system, assigned by Amazon EFS. + // +kubebuilder:validation:Optional + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The pathname for the folder being used by a workflow. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DecryptStepDetailsDestinationFileLocationInitParameters struct { + + // Specifies the details for the EFS file being copied. + EFSFileLocation *DestinationFileLocationEFSFileLocationInitParameters `json:"efsFileLocation,omitempty" tf:"efs_file_location,omitempty"` + + // Specifies the details for the S3 file being copied. + S3FileLocation *DestinationFileLocationS3FileLocationInitParameters `json:"s3FileLocation,omitempty" tf:"s3_file_location,omitempty"` +} + +type DecryptStepDetailsDestinationFileLocationObservation struct { + + // Specifies the details for the EFS file being copied. + EFSFileLocation *DestinationFileLocationEFSFileLocationObservation `json:"efsFileLocation,omitempty" tf:"efs_file_location,omitempty"` + + // Specifies the details for the S3 file being copied. + S3FileLocation *DestinationFileLocationS3FileLocationObservation `json:"s3FileLocation,omitempty" tf:"s3_file_location,omitempty"` +} + +type DecryptStepDetailsDestinationFileLocationParameters struct { + + // Specifies the details for the EFS file being copied. + // +kubebuilder:validation:Optional + EFSFileLocation *DestinationFileLocationEFSFileLocationParameters `json:"efsFileLocation,omitempty" tf:"efs_file_location,omitempty"` + + // Specifies the details for the S3 file being copied. + // +kubebuilder:validation:Optional + S3FileLocation *DestinationFileLocationS3FileLocationParameters `json:"s3FileLocation,omitempty" tf:"s3_file_location,omitempty"` +} + +type DecryptStepDetailsDestinationFileLocationS3FileLocationInitParameters struct { + + // Specifies the S3 bucket for the customer input file. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type DecryptStepDetailsDestinationFileLocationS3FileLocationObservation struct { + + // Specifies the S3 bucket for the customer input file. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type DecryptStepDetailsDestinationFileLocationS3FileLocationParameters struct { + + // Specifies the S3 bucket for the customer input file. + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type DecryptStepDetailsInitParameters struct { + + // Specifies the location for the file being copied. Use ${Transfer:username} in this field to parametrize the destination prefix by username. + DestinationFileLocation *DecryptStepDetailsDestinationFileLocationInitParameters `json:"destinationFileLocation,omitempty" tf:"destination_file_location,omitempty"` + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A flag that indicates whether or not to overwrite an existing file of the same name. The default is FALSE. Valid values are TRUE and FALSE. + OverwriteExisting *string `json:"overwriteExisting,omitempty" tf:"overwrite_existing,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // One of the following step types are supported. COPY, CUSTOM, DECRYPT, DELETE, and TAG. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DecryptStepDetailsObservation struct { + + // Specifies the location for the file being copied. Use ${Transfer:username} in this field to parametrize the destination prefix by username. + DestinationFileLocation *DecryptStepDetailsDestinationFileLocationObservation `json:"destinationFileLocation,omitempty" tf:"destination_file_location,omitempty"` + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A flag that indicates whether or not to overwrite an existing file of the same name. The default is FALSE. Valid values are TRUE and FALSE. + OverwriteExisting *string `json:"overwriteExisting,omitempty" tf:"overwrite_existing,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // One of the following step types are supported. COPY, CUSTOM, DECRYPT, DELETE, and TAG. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DecryptStepDetailsParameters struct { + + // Specifies the location for the file being copied. Use ${Transfer:username} in this field to parametrize the destination prefix by username. + // +kubebuilder:validation:Optional + DestinationFileLocation *DecryptStepDetailsDestinationFileLocationParameters `json:"destinationFileLocation,omitempty" tf:"destination_file_location,omitempty"` + + // The name of the step, used as an identifier. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A flag that indicates whether or not to overwrite an existing file of the same name. The default is FALSE. Valid values are TRUE and FALSE. + // +kubebuilder:validation:Optional + OverwriteExisting *string `json:"overwriteExisting,omitempty" tf:"overwrite_existing,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + // +kubebuilder:validation:Optional + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // One of the following step types are supported. COPY, CUSTOM, DECRYPT, DELETE, and TAG. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type DeleteStepDetailsInitParameters struct { + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` +} + +type DeleteStepDetailsObservation struct { + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` +} + +type DeleteStepDetailsParameters struct { + + // The name of the step, used as an identifier. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + // +kubebuilder:validation:Optional + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` +} + +type DestinationFileLocationEFSFileLocationInitParameters struct { + + // The ID of the file system, assigned by Amazon EFS. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The pathname for the folder being used by a workflow. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DestinationFileLocationEFSFileLocationObservation struct { + + // The ID of the file system, assigned by Amazon EFS. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The pathname for the folder being used by a workflow. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DestinationFileLocationEFSFileLocationParameters struct { + + // The ID of the file system, assigned by Amazon EFS. + // +kubebuilder:validation:Optional + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The pathname for the folder being used by a workflow. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DestinationFileLocationInitParameters struct { + + // Specifies the details for the EFS file being copied. + EFSFileLocation *EFSFileLocationInitParameters `json:"efsFileLocation,omitempty" tf:"efs_file_location,omitempty"` + + // Specifies the details for the S3 file being copied. + S3FileLocation *S3FileLocationInitParameters `json:"s3FileLocation,omitempty" tf:"s3_file_location,omitempty"` +} + +type DestinationFileLocationObservation struct { + + // Specifies the details for the EFS file being copied. + EFSFileLocation *EFSFileLocationObservation `json:"efsFileLocation,omitempty" tf:"efs_file_location,omitempty"` + + // Specifies the details for the S3 file being copied. + S3FileLocation *S3FileLocationObservation `json:"s3FileLocation,omitempty" tf:"s3_file_location,omitempty"` +} + +type DestinationFileLocationParameters struct { + + // Specifies the details for the EFS file being copied. + // +kubebuilder:validation:Optional + EFSFileLocation *EFSFileLocationParameters `json:"efsFileLocation,omitempty" tf:"efs_file_location,omitempty"` + + // Specifies the details for the S3 file being copied. + // +kubebuilder:validation:Optional + S3FileLocation *S3FileLocationParameters `json:"s3FileLocation,omitempty" tf:"s3_file_location,omitempty"` +} + +type DestinationFileLocationS3FileLocationInitParameters struct { + + // Specifies the S3 bucket for the customer input file. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type DestinationFileLocationS3FileLocationObservation struct { + + // Specifies the S3 bucket for the customer input file. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type DestinationFileLocationS3FileLocationParameters struct { + + // Specifies the S3 bucket for the customer input file. + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type EFSFileLocationInitParameters struct { + + // The ID of the file system, assigned by Amazon EFS. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The pathname for the folder being used by a workflow. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type EFSFileLocationObservation struct { + + // The ID of the file system, assigned by Amazon EFS. + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The pathname for the folder being used by a workflow. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type EFSFileLocationParameters struct { + + // The ID of the file system, assigned by Amazon EFS. + // +kubebuilder:validation:Optional + FileSystemID *string `json:"fileSystemId,omitempty" tf:"file_system_id,omitempty"` + + // The pathname for the folder being used by a workflow. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type OnExceptionStepsInitParameters struct { + + // Details for a step that performs a file copy. See Copy Step Details below. + CopyStepDetails *CopyStepDetailsInitParameters `json:"copyStepDetails,omitempty" tf:"copy_step_details,omitempty"` + + // Details for a step that invokes a lambda function. + CustomStepDetails *CustomStepDetailsInitParameters `json:"customStepDetails,omitempty" tf:"custom_step_details,omitempty"` + + // Details for a step that decrypts the file. + DecryptStepDetails *DecryptStepDetailsInitParameters `json:"decryptStepDetails,omitempty" tf:"decrypt_step_details,omitempty"` + + // Details for a step that deletes the file. + DeleteStepDetails *DeleteStepDetailsInitParameters `json:"deleteStepDetails,omitempty" tf:"delete_step_details,omitempty"` + + // Details for a step that creates one or more tags. + TagStepDetails *TagStepDetailsInitParameters `json:"tagStepDetails,omitempty" tf:"tag_step_details,omitempty"` + + // One of the following step types are supported. COPY, CUSTOM, DECRYPT, DELETE, and TAG. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OnExceptionStepsObservation struct { + + // Details for a step that performs a file copy. See Copy Step Details below. + CopyStepDetails *CopyStepDetailsObservation `json:"copyStepDetails,omitempty" tf:"copy_step_details,omitempty"` + + // Details for a step that invokes a lambda function. + CustomStepDetails *CustomStepDetailsObservation `json:"customStepDetails,omitempty" tf:"custom_step_details,omitempty"` + + // Details for a step that decrypts the file. + DecryptStepDetails *DecryptStepDetailsObservation `json:"decryptStepDetails,omitempty" tf:"decrypt_step_details,omitempty"` + + // Details for a step that deletes the file. + DeleteStepDetails *DeleteStepDetailsObservation `json:"deleteStepDetails,omitempty" tf:"delete_step_details,omitempty"` + + // Details for a step that creates one or more tags. + TagStepDetails *TagStepDetailsObservation `json:"tagStepDetails,omitempty" tf:"tag_step_details,omitempty"` + + // One of the following step types are supported. COPY, CUSTOM, DECRYPT, DELETE, and TAG. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OnExceptionStepsParameters struct { + + // Details for a step that performs a file copy. See Copy Step Details below. + // +kubebuilder:validation:Optional + CopyStepDetails *CopyStepDetailsParameters `json:"copyStepDetails,omitempty" tf:"copy_step_details,omitempty"` + + // Details for a step that invokes a lambda function. + // +kubebuilder:validation:Optional + CustomStepDetails *CustomStepDetailsParameters `json:"customStepDetails,omitempty" tf:"custom_step_details,omitempty"` + + // Details for a step that decrypts the file. + // +kubebuilder:validation:Optional + DecryptStepDetails *DecryptStepDetailsParameters `json:"decryptStepDetails,omitempty" tf:"decrypt_step_details,omitempty"` + + // Details for a step that deletes the file. + // +kubebuilder:validation:Optional + DeleteStepDetails *DeleteStepDetailsParameters `json:"deleteStepDetails,omitempty" tf:"delete_step_details,omitempty"` + + // Details for a step that creates one or more tags. + // +kubebuilder:validation:Optional + TagStepDetails *TagStepDetailsParameters `json:"tagStepDetails,omitempty" tf:"tag_step_details,omitempty"` + + // One of the following step types are supported. COPY, CUSTOM, DECRYPT, DELETE, and TAG. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type S3FileLocationInitParameters struct { + + // Specifies the S3 bucket for the customer input file. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type S3FileLocationObservation struct { + + // Specifies the S3 bucket for the customer input file. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type S3FileLocationParameters struct { + + // Specifies the S3 bucket for the customer input file. + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type StepsCopyStepDetailsInitParameters struct { + + // Specifies the location for the file being copied. Use ${Transfer:username} in this field to parametrize the destination prefix by username. + DestinationFileLocation *CopyStepDetailsDestinationFileLocationInitParameters `json:"destinationFileLocation,omitempty" tf:"destination_file_location,omitempty"` + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A flag that indicates whether or not to overwrite an existing file of the same name. The default is FALSE. Valid values are TRUE and FALSE. + OverwriteExisting *string `json:"overwriteExisting,omitempty" tf:"overwrite_existing,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` +} + +type StepsCopyStepDetailsObservation struct { + + // Specifies the location for the file being copied. Use ${Transfer:username} in this field to parametrize the destination prefix by username. + DestinationFileLocation *CopyStepDetailsDestinationFileLocationObservation `json:"destinationFileLocation,omitempty" tf:"destination_file_location,omitempty"` + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A flag that indicates whether or not to overwrite an existing file of the same name. The default is FALSE. Valid values are TRUE and FALSE. + OverwriteExisting *string `json:"overwriteExisting,omitempty" tf:"overwrite_existing,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` +} + +type StepsCopyStepDetailsParameters struct { + + // Specifies the location for the file being copied. Use ${Transfer:username} in this field to parametrize the destination prefix by username. + // +kubebuilder:validation:Optional + DestinationFileLocation *CopyStepDetailsDestinationFileLocationParameters `json:"destinationFileLocation,omitempty" tf:"destination_file_location,omitempty"` + + // The name of the step, used as an identifier. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A flag that indicates whether or not to overwrite an existing file of the same name. The default is FALSE. Valid values are TRUE and FALSE. + // +kubebuilder:validation:Optional + OverwriteExisting *string `json:"overwriteExisting,omitempty" tf:"overwrite_existing,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + // +kubebuilder:validation:Optional + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` +} + +type StepsCustomStepDetailsInitParameters struct { + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // The ARN for the lambda function that is being called. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // Reference to a Function in lambda to populate target. + // +kubebuilder:validation:Optional + TargetRef *v1.Reference `json:"targetRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate target. + // +kubebuilder:validation:Optional + TargetSelector *v1.Selector `json:"targetSelector,omitempty" tf:"-"` + + // Timeout, in seconds, for the step. + TimeoutSeconds *float64 `json:"timeoutSeconds,omitempty" tf:"timeout_seconds,omitempty"` +} + +type StepsCustomStepDetailsObservation struct { + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // The ARN for the lambda function that is being called. + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // Timeout, in seconds, for the step. + TimeoutSeconds *float64 `json:"timeoutSeconds,omitempty" tf:"timeout_seconds,omitempty"` +} + +type StepsCustomStepDetailsParameters struct { + + // The name of the step, used as an identifier. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + // +kubebuilder:validation:Optional + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // The ARN for the lambda function that is being called. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/lambda/v1beta2.Function + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + Target *string `json:"target,omitempty" tf:"target,omitempty"` + + // Reference to a Function in lambda to populate target. + // +kubebuilder:validation:Optional + TargetRef *v1.Reference `json:"targetRef,omitempty" tf:"-"` + + // Selector for a Function in lambda to populate target. + // +kubebuilder:validation:Optional + TargetSelector *v1.Selector `json:"targetSelector,omitempty" tf:"-"` + + // Timeout, in seconds, for the step. + // +kubebuilder:validation:Optional + TimeoutSeconds *float64 `json:"timeoutSeconds,omitempty" tf:"timeout_seconds,omitempty"` +} + +type StepsDecryptStepDetailsDestinationFileLocationInitParameters struct { + + // Specifies the details for the EFS file being copied. + EFSFileLocation *DecryptStepDetailsDestinationFileLocationEFSFileLocationInitParameters `json:"efsFileLocation,omitempty" tf:"efs_file_location,omitempty"` + + // Specifies the details for the S3 file being copied. + S3FileLocation *DecryptStepDetailsDestinationFileLocationS3FileLocationInitParameters `json:"s3FileLocation,omitempty" tf:"s3_file_location,omitempty"` +} + +type StepsDecryptStepDetailsDestinationFileLocationObservation struct { + + // Specifies the details for the EFS file being copied. + EFSFileLocation *DecryptStepDetailsDestinationFileLocationEFSFileLocationObservation `json:"efsFileLocation,omitempty" tf:"efs_file_location,omitempty"` + + // Specifies the details for the S3 file being copied. + S3FileLocation *DecryptStepDetailsDestinationFileLocationS3FileLocationObservation `json:"s3FileLocation,omitempty" tf:"s3_file_location,omitempty"` +} + +type StepsDecryptStepDetailsDestinationFileLocationParameters struct { + + // Specifies the details for the EFS file being copied. + // +kubebuilder:validation:Optional + EFSFileLocation *DecryptStepDetailsDestinationFileLocationEFSFileLocationParameters `json:"efsFileLocation,omitempty" tf:"efs_file_location,omitempty"` + + // Specifies the details for the S3 file being copied. + // +kubebuilder:validation:Optional + S3FileLocation *DecryptStepDetailsDestinationFileLocationS3FileLocationParameters `json:"s3FileLocation,omitempty" tf:"s3_file_location,omitempty"` +} + +type StepsDecryptStepDetailsInitParameters struct { + + // Specifies the location for the file being copied. Use ${Transfer:username} in this field to parametrize the destination prefix by username. + DestinationFileLocation *StepsDecryptStepDetailsDestinationFileLocationInitParameters `json:"destinationFileLocation,omitempty" tf:"destination_file_location,omitempty"` + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A flag that indicates whether or not to overwrite an existing file of the same name. The default is FALSE. Valid values are TRUE and FALSE. + OverwriteExisting *string `json:"overwriteExisting,omitempty" tf:"overwrite_existing,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // One of the following step types are supported. COPY, CUSTOM, DECRYPT, DELETE, and TAG. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StepsDecryptStepDetailsObservation struct { + + // Specifies the location for the file being copied. Use ${Transfer:username} in this field to parametrize the destination prefix by username. + DestinationFileLocation *StepsDecryptStepDetailsDestinationFileLocationObservation `json:"destinationFileLocation,omitempty" tf:"destination_file_location,omitempty"` + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A flag that indicates whether or not to overwrite an existing file of the same name. The default is FALSE. Valid values are TRUE and FALSE. + OverwriteExisting *string `json:"overwriteExisting,omitempty" tf:"overwrite_existing,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // One of the following step types are supported. COPY, CUSTOM, DECRYPT, DELETE, and TAG. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StepsDecryptStepDetailsParameters struct { + + // Specifies the location for the file being copied. Use ${Transfer:username} in this field to parametrize the destination prefix by username. + // +kubebuilder:validation:Optional + DestinationFileLocation *StepsDecryptStepDetailsDestinationFileLocationParameters `json:"destinationFileLocation,omitempty" tf:"destination_file_location,omitempty"` + + // The name of the step, used as an identifier. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A flag that indicates whether or not to overwrite an existing file of the same name. The default is FALSE. Valid values are TRUE and FALSE. + // +kubebuilder:validation:Optional + OverwriteExisting *string `json:"overwriteExisting,omitempty" tf:"overwrite_existing,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + // +kubebuilder:validation:Optional + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // One of the following step types are supported. COPY, CUSTOM, DECRYPT, DELETE, and TAG. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type StepsDeleteStepDetailsInitParameters struct { + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` +} + +type StepsDeleteStepDetailsObservation struct { + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` +} + +type StepsDeleteStepDetailsParameters struct { + + // The name of the step, used as an identifier. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + // +kubebuilder:validation:Optional + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` +} + +type StepsInitParameters struct { + + // Details for a step that performs a file copy. See Copy Step Details below. + CopyStepDetails *StepsCopyStepDetailsInitParameters `json:"copyStepDetails,omitempty" tf:"copy_step_details,omitempty"` + + // Details for a step that invokes a lambda function. + CustomStepDetails *StepsCustomStepDetailsInitParameters `json:"customStepDetails,omitempty" tf:"custom_step_details,omitempty"` + + // Details for a step that decrypts the file. + DecryptStepDetails *StepsDecryptStepDetailsInitParameters `json:"decryptStepDetails,omitempty" tf:"decrypt_step_details,omitempty"` + + // Details for a step that deletes the file. + DeleteStepDetails *StepsDeleteStepDetailsInitParameters `json:"deleteStepDetails,omitempty" tf:"delete_step_details,omitempty"` + + // Details for a step that creates one or more tags. + TagStepDetails *StepsTagStepDetailsInitParameters `json:"tagStepDetails,omitempty" tf:"tag_step_details,omitempty"` + + // One of the following step types are supported. COPY, CUSTOM, DECRYPT, DELETE, and TAG. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StepsObservation struct { + + // Details for a step that performs a file copy. See Copy Step Details below. + CopyStepDetails *StepsCopyStepDetailsObservation `json:"copyStepDetails,omitempty" tf:"copy_step_details,omitempty"` + + // Details for a step that invokes a lambda function. + CustomStepDetails *StepsCustomStepDetailsObservation `json:"customStepDetails,omitempty" tf:"custom_step_details,omitempty"` + + // Details for a step that decrypts the file. + DecryptStepDetails *StepsDecryptStepDetailsObservation `json:"decryptStepDetails,omitempty" tf:"decrypt_step_details,omitempty"` + + // Details for a step that deletes the file. + DeleteStepDetails *StepsDeleteStepDetailsObservation `json:"deleteStepDetails,omitempty" tf:"delete_step_details,omitempty"` + + // Details for a step that creates one or more tags. + TagStepDetails *StepsTagStepDetailsObservation `json:"tagStepDetails,omitempty" tf:"tag_step_details,omitempty"` + + // One of the following step types are supported. COPY, CUSTOM, DECRYPT, DELETE, and TAG. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StepsParameters struct { + + // Details for a step that performs a file copy. See Copy Step Details below. + // +kubebuilder:validation:Optional + CopyStepDetails *StepsCopyStepDetailsParameters `json:"copyStepDetails,omitempty" tf:"copy_step_details,omitempty"` + + // Details for a step that invokes a lambda function. + // +kubebuilder:validation:Optional + CustomStepDetails *StepsCustomStepDetailsParameters `json:"customStepDetails,omitempty" tf:"custom_step_details,omitempty"` + + // Details for a step that decrypts the file. + // +kubebuilder:validation:Optional + DecryptStepDetails *StepsDecryptStepDetailsParameters `json:"decryptStepDetails,omitempty" tf:"decrypt_step_details,omitempty"` + + // Details for a step that deletes the file. + // +kubebuilder:validation:Optional + DeleteStepDetails *StepsDeleteStepDetailsParameters `json:"deleteStepDetails,omitempty" tf:"delete_step_details,omitempty"` + + // Details for a step that creates one or more tags. + // +kubebuilder:validation:Optional + TagStepDetails *StepsTagStepDetailsParameters `json:"tagStepDetails,omitempty" tf:"tag_step_details,omitempty"` + + // One of the following step types are supported. COPY, CUSTOM, DECRYPT, DELETE, and TAG. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type StepsTagStepDetailsInitParameters struct { + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // Key-value map of resource tags. + Tags []TagStepDetailsTagsInitParameters `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type StepsTagStepDetailsObservation struct { + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // Key-value map of resource tags. + Tags []TagStepDetailsTagsObservation `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type StepsTagStepDetailsParameters struct { + + // The name of the step, used as an identifier. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + // +kubebuilder:validation:Optional + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + Tags []TagStepDetailsTagsParameters `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type TagStepDetailsInitParameters struct { + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // Key-value map of resource tags. + Tags []TagsInitParameters `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type TagStepDetailsObservation struct { + + // The name of the step, used as an identifier. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // Key-value map of resource tags. + Tags []TagsObservation `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type TagStepDetailsParameters struct { + + // The name of the step, used as an identifier. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow. Enter ${previous.file} to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value. Enter ${original.file} to use the originally-uploaded file location as input for this step. + // +kubebuilder:validation:Optional + SourceFileLocation *string `json:"sourceFileLocation,omitempty" tf:"source_file_location,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + Tags []TagsParameters `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type TagStepDetailsTagsInitParameters struct { + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The value that corresponds to the key. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagStepDetailsTagsObservation struct { + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The value that corresponds to the key. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagStepDetailsTagsParameters struct { + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // The value that corresponds to the key. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type TagsInitParameters struct { + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The value that corresponds to the key. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagsObservation struct { + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The value that corresponds to the key. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TagsParameters struct { + + // The name assigned to the file when it was created in S3. You use the object key to retrieve the object. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // The value that corresponds to the key. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type WorkflowInitParameters struct { + + // A textual description for the workflow. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the steps (actions) to take if errors are encountered during execution of the workflow. See Workflow Steps below. + OnExceptionSteps []OnExceptionStepsInitParameters `json:"onExceptionSteps,omitempty" tf:"on_exception_steps,omitempty"` + + // Specifies the details for the steps that are in the specified workflow. See Workflow Steps below. + Steps []StepsInitParameters `json:"steps,omitempty" tf:"steps,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type WorkflowObservation struct { + + // The Workflow ARN. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // A textual description for the workflow. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Workflow id. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the steps (actions) to take if errors are encountered during execution of the workflow. See Workflow Steps below. + OnExceptionSteps []OnExceptionStepsObservation `json:"onExceptionSteps,omitempty" tf:"on_exception_steps,omitempty"` + + // Specifies the details for the steps that are in the specified workflow. See Workflow Steps below. + Steps []StepsObservation `json:"steps,omitempty" tf:"steps,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type WorkflowParameters struct { + + // A textual description for the workflow. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the steps (actions) to take if errors are encountered during execution of the workflow. See Workflow Steps below. + // +kubebuilder:validation:Optional + OnExceptionSteps []OnExceptionStepsParameters `json:"onExceptionSteps,omitempty" tf:"on_exception_steps,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Specifies the details for the steps that are in the specified workflow. See Workflow Steps below. + // +kubebuilder:validation:Optional + Steps []StepsParameters `json:"steps,omitempty" tf:"steps,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// WorkflowSpec defines the desired state of Workflow +type WorkflowSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WorkflowParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WorkflowInitParameters `json:"initProvider,omitempty"` +} + +// WorkflowStatus defines the observed state of Workflow. +type WorkflowStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WorkflowObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Workflow is the Schema for the Workflows API. Provides a AWS Transfer Workflow resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Workflow struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.steps) || (has(self.initProvider) && has(self.initProvider.steps))",message="spec.forProvider.steps is a required parameter" + Spec WorkflowSpec `json:"spec"` + Status WorkflowStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WorkflowList contains a list of Workflows +type WorkflowList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Workflow `json:"items"` +} + +// Repository type metadata. +var ( + Workflow_Kind = "Workflow" + Workflow_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Workflow_Kind}.String() + Workflow_KindAPIVersion = Workflow_Kind + "." + CRDGroupVersion.String() + Workflow_GroupVersionKind = CRDGroupVersion.WithKind(Workflow_Kind) +) + +func init() { + SchemeBuilder.Register(&Workflow{}, &WorkflowList{}) +} diff --git a/apis/waf/v1beta1/zz_generated.conversion_hubs.go b/apis/waf/v1beta1/zz_generated.conversion_hubs.go index 868ba95efa..dbe0307623 100755 --- a/apis/waf/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/waf/v1beta1/zz_generated.conversion_hubs.go @@ -6,9 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *ByteMatchSet) Hub() {} - // Hub marks this type as a conversion hub. func (tr *GeoMatchSet) Hub() {} @@ -18,23 +15,8 @@ func (tr *IPSet) Hub() {} // Hub marks this type as a conversion hub. func (tr *RateBasedRule) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *RegexMatchSet) Hub() {} - // Hub marks this type as a conversion hub. func (tr *RegexPatternSet) Hub() {} // Hub marks this type as a conversion hub. func (tr *Rule) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SizeConstraintSet) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SQLInjectionMatchSet) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *WebACL) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *XSSMatchSet) Hub() {} diff --git a/apis/waf/v1beta1/zz_generated.conversion_spokes.go b/apis/waf/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..d24ff6e935 --- /dev/null +++ b/apis/waf/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,134 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ByteMatchSet to the hub type. +func (tr *ByteMatchSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ByteMatchSet type. +func (tr *ByteMatchSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this RegexMatchSet to the hub type. +func (tr *RegexMatchSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the RegexMatchSet type. +func (tr *RegexMatchSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SizeConstraintSet to the hub type. +func (tr *SizeConstraintSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SizeConstraintSet type. +func (tr *SizeConstraintSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SQLInjectionMatchSet to the hub type. +func (tr *SQLInjectionMatchSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SQLInjectionMatchSet type. +func (tr *SQLInjectionMatchSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this WebACL to the hub type. +func (tr *WebACL) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the WebACL type. +func (tr *WebACL) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this XSSMatchSet to the hub type. +func (tr *XSSMatchSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the XSSMatchSet type. +func (tr *XSSMatchSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/waf/v1beta2/zz_bytematchset_terraformed.go b/apis/waf/v1beta2/zz_bytematchset_terraformed.go new file mode 100755 index 0000000000..d9d59a4ba1 --- /dev/null +++ b/apis/waf/v1beta2/zz_bytematchset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ByteMatchSet +func (mg *ByteMatchSet) GetTerraformResourceType() string { + return "aws_waf_byte_match_set" +} + +// GetConnectionDetailsMapping for this ByteMatchSet +func (tr *ByteMatchSet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ByteMatchSet +func (tr *ByteMatchSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ByteMatchSet +func (tr *ByteMatchSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ByteMatchSet +func (tr *ByteMatchSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ByteMatchSet +func (tr *ByteMatchSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ByteMatchSet +func (tr *ByteMatchSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ByteMatchSet +func (tr *ByteMatchSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ByteMatchSet +func (tr *ByteMatchSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ByteMatchSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ByteMatchSet) LateInitialize(attrs []byte) (bool, error) { + params := &ByteMatchSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ByteMatchSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/waf/v1beta2/zz_bytematchset_types.go b/apis/waf/v1beta2/zz_bytematchset_types.go new file mode 100755 index 0000000000..2a47bc9e75 --- /dev/null +++ b/apis/waf/v1beta2/zz_bytematchset_types.go @@ -0,0 +1,238 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ByteMatchSetInitParameters struct { + + // Specifies the bytes (typically a string that corresponds + // with ASCII characters) that you want to search for in web requests, + // the location in requests that you want to search, and other settings. + ByteMatchTuples []ByteMatchTuplesInitParameters `json:"byteMatchTuples,omitempty" tf:"byte_match_tuples,omitempty"` + + // The name or description of the Byte Match Set. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ByteMatchSetObservation struct { + + // Specifies the bytes (typically a string that corresponds + // with ASCII characters) that you want to search for in web requests, + // the location in requests that you want to search, and other settings. + ByteMatchTuples []ByteMatchTuplesObservation `json:"byteMatchTuples,omitempty" tf:"byte_match_tuples,omitempty"` + + // The ID of the WAF Byte Match Set. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name or description of the Byte Match Set. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ByteMatchSetParameters struct { + + // Specifies the bytes (typically a string that corresponds + // with ASCII characters) that you want to search for in web requests, + // the location in requests that you want to search, and other settings. + // +kubebuilder:validation:Optional + ByteMatchTuples []ByteMatchTuplesParameters `json:"byteMatchTuples,omitempty" tf:"byte_match_tuples,omitempty"` + + // The name or description of the Byte Match Set. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type ByteMatchTuplesInitParameters struct { + + // The part of a web request that you want to search, such as a specified header or a query string. + FieldToMatch *FieldToMatchInitParameters `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // Within the portion of a web request that you want to search + // (for example, in the query string, if any), specify where you want to search. + // e.g., CONTAINS, CONTAINS_WORD or EXACTLY. + // See docs + // for all supported values. + PositionalConstraint *string `json:"positionalConstraint,omitempty" tf:"positional_constraint,omitempty"` + + // The value that you want to search for within the field specified by field_to_match, e.g., badrefer1. + // See docs + // for all supported values. + TargetString *string `json:"targetString,omitempty" tf:"target_string,omitempty"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // If you specify a transformation, AWS WAF performs the transformation on target_string before inspecting a request for a match. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type ByteMatchTuplesObservation struct { + + // The part of a web request that you want to search, such as a specified header or a query string. + FieldToMatch *FieldToMatchObservation `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // Within the portion of a web request that you want to search + // (for example, in the query string, if any), specify where you want to search. + // e.g., CONTAINS, CONTAINS_WORD or EXACTLY. + // See docs + // for all supported values. + PositionalConstraint *string `json:"positionalConstraint,omitempty" tf:"positional_constraint,omitempty"` + + // The value that you want to search for within the field specified by field_to_match, e.g., badrefer1. + // See docs + // for all supported values. + TargetString *string `json:"targetString,omitempty" tf:"target_string,omitempty"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // If you specify a transformation, AWS WAF performs the transformation on target_string before inspecting a request for a match. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type ByteMatchTuplesParameters struct { + + // The part of a web request that you want to search, such as a specified header or a query string. + // +kubebuilder:validation:Optional + FieldToMatch *FieldToMatchParameters `json:"fieldToMatch" tf:"field_to_match,omitempty"` + + // Within the portion of a web request that you want to search + // (for example, in the query string, if any), specify where you want to search. + // e.g., CONTAINS, CONTAINS_WORD or EXACTLY. + // See docs + // for all supported values. + // +kubebuilder:validation:Optional + PositionalConstraint *string `json:"positionalConstraint" tf:"positional_constraint,omitempty"` + + // The value that you want to search for within the field specified by field_to_match, e.g., badrefer1. + // See docs + // for all supported values. + // +kubebuilder:validation:Optional + TargetString *string `json:"targetString,omitempty" tf:"target_string,omitempty"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // If you specify a transformation, AWS WAF performs the transformation on target_string before inspecting a request for a match. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + // +kubebuilder:validation:Optional + TextTransformation *string `json:"textTransformation" tf:"text_transformation,omitempty"` +} + +type FieldToMatchInitParameters struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FieldToMatchObservation struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FieldToMatchParameters struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + // +kubebuilder:validation:Optional + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// ByteMatchSetSpec defines the desired state of ByteMatchSet +type ByteMatchSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ByteMatchSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ByteMatchSetInitParameters `json:"initProvider,omitempty"` +} + +// ByteMatchSetStatus defines the observed state of ByteMatchSet. +type ByteMatchSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ByteMatchSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ByteMatchSet is the Schema for the ByteMatchSets API. Provides a AWS WAF Byte Match Set resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ByteMatchSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ByteMatchSetSpec `json:"spec"` + Status ByteMatchSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ByteMatchSetList contains a list of ByteMatchSets +type ByteMatchSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ByteMatchSet `json:"items"` +} + +// Repository type metadata. +var ( + ByteMatchSet_Kind = "ByteMatchSet" + ByteMatchSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ByteMatchSet_Kind}.String() + ByteMatchSet_KindAPIVersion = ByteMatchSet_Kind + "." + CRDGroupVersion.String() + ByteMatchSet_GroupVersionKind = CRDGroupVersion.WithKind(ByteMatchSet_Kind) +) + +func init() { + SchemeBuilder.Register(&ByteMatchSet{}, &ByteMatchSetList{}) +} diff --git a/apis/waf/v1beta2/zz_generated.conversion_hubs.go b/apis/waf/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..7f9898eef5 --- /dev/null +++ b/apis/waf/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,25 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ByteMatchSet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *RegexMatchSet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SizeConstraintSet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SQLInjectionMatchSet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *WebACL) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *XSSMatchSet) Hub() {} diff --git a/apis/waf/v1beta2/zz_generated.deepcopy.go b/apis/waf/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..ef8d5b1e26 --- /dev/null +++ b/apis/waf/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2654 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionInitParameters) DeepCopyInto(out *ActionInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionInitParameters. +func (in *ActionInitParameters) DeepCopy() *ActionInitParameters { + if in == nil { + return nil + } + out := new(ActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionObservation) DeepCopyInto(out *ActionObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionObservation. +func (in *ActionObservation) DeepCopy() *ActionObservation { + if in == nil { + return nil + } + out := new(ActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionParameters) DeepCopyInto(out *ActionParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionParameters. +func (in *ActionParameters) DeepCopy() *ActionParameters { + if in == nil { + return nil + } + out := new(ActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchSet) DeepCopyInto(out *ByteMatchSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchSet. +func (in *ByteMatchSet) DeepCopy() *ByteMatchSet { + if in == nil { + return nil + } + out := new(ByteMatchSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ByteMatchSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchSetInitParameters) DeepCopyInto(out *ByteMatchSetInitParameters) { + *out = *in + if in.ByteMatchTuples != nil { + in, out := &in.ByteMatchTuples, &out.ByteMatchTuples + *out = make([]ByteMatchTuplesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchSetInitParameters. +func (in *ByteMatchSetInitParameters) DeepCopy() *ByteMatchSetInitParameters { + if in == nil { + return nil + } + out := new(ByteMatchSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchSetList) DeepCopyInto(out *ByteMatchSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ByteMatchSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchSetList. +func (in *ByteMatchSetList) DeepCopy() *ByteMatchSetList { + if in == nil { + return nil + } + out := new(ByteMatchSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ByteMatchSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchSetObservation) DeepCopyInto(out *ByteMatchSetObservation) { + *out = *in + if in.ByteMatchTuples != nil { + in, out := &in.ByteMatchTuples, &out.ByteMatchTuples + *out = make([]ByteMatchTuplesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchSetObservation. +func (in *ByteMatchSetObservation) DeepCopy() *ByteMatchSetObservation { + if in == nil { + return nil + } + out := new(ByteMatchSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchSetParameters) DeepCopyInto(out *ByteMatchSetParameters) { + *out = *in + if in.ByteMatchTuples != nil { + in, out := &in.ByteMatchTuples, &out.ByteMatchTuples + *out = make([]ByteMatchTuplesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchSetParameters. +func (in *ByteMatchSetParameters) DeepCopy() *ByteMatchSetParameters { + if in == nil { + return nil + } + out := new(ByteMatchSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchSetSpec) DeepCopyInto(out *ByteMatchSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchSetSpec. +func (in *ByteMatchSetSpec) DeepCopy() *ByteMatchSetSpec { + if in == nil { + return nil + } + out := new(ByteMatchSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchSetStatus) DeepCopyInto(out *ByteMatchSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchSetStatus. +func (in *ByteMatchSetStatus) DeepCopy() *ByteMatchSetStatus { + if in == nil { + return nil + } + out := new(ByteMatchSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchTuplesInitParameters) DeepCopyInto(out *ByteMatchTuplesInitParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(FieldToMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PositionalConstraint != nil { + in, out := &in.PositionalConstraint, &out.PositionalConstraint + *out = new(string) + **out = **in + } + if in.TargetString != nil { + in, out := &in.TargetString, &out.TargetString + *out = new(string) + **out = **in + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchTuplesInitParameters. +func (in *ByteMatchTuplesInitParameters) DeepCopy() *ByteMatchTuplesInitParameters { + if in == nil { + return nil + } + out := new(ByteMatchTuplesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchTuplesObservation) DeepCopyInto(out *ByteMatchTuplesObservation) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(FieldToMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.PositionalConstraint != nil { + in, out := &in.PositionalConstraint, &out.PositionalConstraint + *out = new(string) + **out = **in + } + if in.TargetString != nil { + in, out := &in.TargetString, &out.TargetString + *out = new(string) + **out = **in + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchTuplesObservation. +func (in *ByteMatchTuplesObservation) DeepCopy() *ByteMatchTuplesObservation { + if in == nil { + return nil + } + out := new(ByteMatchTuplesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchTuplesParameters) DeepCopyInto(out *ByteMatchTuplesParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(FieldToMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.PositionalConstraint != nil { + in, out := &in.PositionalConstraint, &out.PositionalConstraint + *out = new(string) + **out = **in + } + if in.TargetString != nil { + in, out := &in.TargetString, &out.TargetString + *out = new(string) + **out = **in + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchTuplesParameters. +func (in *ByteMatchTuplesParameters) DeepCopy() *ByteMatchTuplesParameters { + if in == nil { + return nil + } + out := new(ByteMatchTuplesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultActionInitParameters) DeepCopyInto(out *DefaultActionInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultActionInitParameters. +func (in *DefaultActionInitParameters) DeepCopy() *DefaultActionInitParameters { + if in == nil { + return nil + } + out := new(DefaultActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultActionObservation) DeepCopyInto(out *DefaultActionObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultActionObservation. +func (in *DefaultActionObservation) DeepCopy() *DefaultActionObservation { + if in == nil { + return nil + } + out := new(DefaultActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultActionParameters) DeepCopyInto(out *DefaultActionParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultActionParameters. +func (in *DefaultActionParameters) DeepCopy() *DefaultActionParameters { + if in == nil { + return nil + } + out := new(DefaultActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldToMatchInitParameters) DeepCopyInto(out *FieldToMatchInitParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldToMatchInitParameters. +func (in *FieldToMatchInitParameters) DeepCopy() *FieldToMatchInitParameters { + if in == nil { + return nil + } + out := new(FieldToMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldToMatchObservation) DeepCopyInto(out *FieldToMatchObservation) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldToMatchObservation. +func (in *FieldToMatchObservation) DeepCopy() *FieldToMatchObservation { + if in == nil { + return nil + } + out := new(FieldToMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldToMatchParameters) DeepCopyInto(out *FieldToMatchParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldToMatchParameters. +func (in *FieldToMatchParameters) DeepCopy() *FieldToMatchParameters { + if in == nil { + return nil + } + out := new(FieldToMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationInitParameters) DeepCopyInto(out *LoggingConfigurationInitParameters) { + *out = *in + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = new(string) + **out = **in + } + if in.LogDestinationRef != nil { + in, out := &in.LogDestinationRef, &out.LogDestinationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogDestinationSelector != nil { + in, out := &in.LogDestinationSelector, &out.LogDestinationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RedactedFields != nil { + in, out := &in.RedactedFields, &out.RedactedFields + *out = new(RedactedFieldsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationInitParameters. +func (in *LoggingConfigurationInitParameters) DeepCopy() *LoggingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(LoggingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationObservation) DeepCopyInto(out *LoggingConfigurationObservation) { + *out = *in + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = new(string) + **out = **in + } + if in.RedactedFields != nil { + in, out := &in.RedactedFields, &out.RedactedFields + *out = new(RedactedFieldsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationObservation. +func (in *LoggingConfigurationObservation) DeepCopy() *LoggingConfigurationObservation { + if in == nil { + return nil + } + out := new(LoggingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationParameters) DeepCopyInto(out *LoggingConfigurationParameters) { + *out = *in + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = new(string) + **out = **in + } + if in.LogDestinationRef != nil { + in, out := &in.LogDestinationRef, &out.LogDestinationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogDestinationSelector != nil { + in, out := &in.LogDestinationSelector, &out.LogDestinationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RedactedFields != nil { + in, out := &in.RedactedFields, &out.RedactedFields + *out = new(RedactedFieldsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationParameters. +func (in *LoggingConfigurationParameters) DeepCopy() *LoggingConfigurationParameters { + if in == nil { + return nil + } + out := new(LoggingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideActionInitParameters) DeepCopyInto(out *OverrideActionInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideActionInitParameters. +func (in *OverrideActionInitParameters) DeepCopy() *OverrideActionInitParameters { + if in == nil { + return nil + } + out := new(OverrideActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideActionObservation) DeepCopyInto(out *OverrideActionObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideActionObservation. +func (in *OverrideActionObservation) DeepCopy() *OverrideActionObservation { + if in == nil { + return nil + } + out := new(OverrideActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideActionParameters) DeepCopyInto(out *OverrideActionParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideActionParameters. +func (in *OverrideActionParameters) DeepCopy() *OverrideActionParameters { + if in == nil { + return nil + } + out := new(OverrideActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedactedFieldsFieldToMatchInitParameters) DeepCopyInto(out *RedactedFieldsFieldToMatchInitParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedactedFieldsFieldToMatchInitParameters. +func (in *RedactedFieldsFieldToMatchInitParameters) DeepCopy() *RedactedFieldsFieldToMatchInitParameters { + if in == nil { + return nil + } + out := new(RedactedFieldsFieldToMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedactedFieldsFieldToMatchObservation) DeepCopyInto(out *RedactedFieldsFieldToMatchObservation) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedactedFieldsFieldToMatchObservation. +func (in *RedactedFieldsFieldToMatchObservation) DeepCopy() *RedactedFieldsFieldToMatchObservation { + if in == nil { + return nil + } + out := new(RedactedFieldsFieldToMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedactedFieldsFieldToMatchParameters) DeepCopyInto(out *RedactedFieldsFieldToMatchParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedactedFieldsFieldToMatchParameters. +func (in *RedactedFieldsFieldToMatchParameters) DeepCopy() *RedactedFieldsFieldToMatchParameters { + if in == nil { + return nil + } + out := new(RedactedFieldsFieldToMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedactedFieldsInitParameters) DeepCopyInto(out *RedactedFieldsInitParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = make([]RedactedFieldsFieldToMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedactedFieldsInitParameters. +func (in *RedactedFieldsInitParameters) DeepCopy() *RedactedFieldsInitParameters { + if in == nil { + return nil + } + out := new(RedactedFieldsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedactedFieldsObservation) DeepCopyInto(out *RedactedFieldsObservation) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = make([]RedactedFieldsFieldToMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedactedFieldsObservation. +func (in *RedactedFieldsObservation) DeepCopy() *RedactedFieldsObservation { + if in == nil { + return nil + } + out := new(RedactedFieldsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedactedFieldsParameters) DeepCopyInto(out *RedactedFieldsParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = make([]RedactedFieldsFieldToMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedactedFieldsParameters. +func (in *RedactedFieldsParameters) DeepCopy() *RedactedFieldsParameters { + if in == nil { + return nil + } + out := new(RedactedFieldsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchSet) DeepCopyInto(out *RegexMatchSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchSet. +func (in *RegexMatchSet) DeepCopy() *RegexMatchSet { + if in == nil { + return nil + } + out := new(RegexMatchSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RegexMatchSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchSetInitParameters) DeepCopyInto(out *RegexMatchSetInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RegexMatchTuple != nil { + in, out := &in.RegexMatchTuple, &out.RegexMatchTuple + *out = make([]RegexMatchTupleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchSetInitParameters. +func (in *RegexMatchSetInitParameters) DeepCopy() *RegexMatchSetInitParameters { + if in == nil { + return nil + } + out := new(RegexMatchSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchSetList) DeepCopyInto(out *RegexMatchSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RegexMatchSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchSetList. +func (in *RegexMatchSetList) DeepCopy() *RegexMatchSetList { + if in == nil { + return nil + } + out := new(RegexMatchSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RegexMatchSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchSetObservation) DeepCopyInto(out *RegexMatchSetObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RegexMatchTuple != nil { + in, out := &in.RegexMatchTuple, &out.RegexMatchTuple + *out = make([]RegexMatchTupleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchSetObservation. +func (in *RegexMatchSetObservation) DeepCopy() *RegexMatchSetObservation { + if in == nil { + return nil + } + out := new(RegexMatchSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchSetParameters) DeepCopyInto(out *RegexMatchSetParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RegexMatchTuple != nil { + in, out := &in.RegexMatchTuple, &out.RegexMatchTuple + *out = make([]RegexMatchTupleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchSetParameters. +func (in *RegexMatchSetParameters) DeepCopy() *RegexMatchSetParameters { + if in == nil { + return nil + } + out := new(RegexMatchSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchSetSpec) DeepCopyInto(out *RegexMatchSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchSetSpec. +func (in *RegexMatchSetSpec) DeepCopy() *RegexMatchSetSpec { + if in == nil { + return nil + } + out := new(RegexMatchSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchSetStatus) DeepCopyInto(out *RegexMatchSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchSetStatus. +func (in *RegexMatchSetStatus) DeepCopy() *RegexMatchSetStatus { + if in == nil { + return nil + } + out := new(RegexMatchSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchTupleFieldToMatchInitParameters) DeepCopyInto(out *RegexMatchTupleFieldToMatchInitParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchTupleFieldToMatchInitParameters. +func (in *RegexMatchTupleFieldToMatchInitParameters) DeepCopy() *RegexMatchTupleFieldToMatchInitParameters { + if in == nil { + return nil + } + out := new(RegexMatchTupleFieldToMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchTupleFieldToMatchObservation) DeepCopyInto(out *RegexMatchTupleFieldToMatchObservation) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchTupleFieldToMatchObservation. +func (in *RegexMatchTupleFieldToMatchObservation) DeepCopy() *RegexMatchTupleFieldToMatchObservation { + if in == nil { + return nil + } + out := new(RegexMatchTupleFieldToMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchTupleFieldToMatchParameters) DeepCopyInto(out *RegexMatchTupleFieldToMatchParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchTupleFieldToMatchParameters. +func (in *RegexMatchTupleFieldToMatchParameters) DeepCopy() *RegexMatchTupleFieldToMatchParameters { + if in == nil { + return nil + } + out := new(RegexMatchTupleFieldToMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchTupleInitParameters) DeepCopyInto(out *RegexMatchTupleInitParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(RegexMatchTupleFieldToMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RegexPatternSetID != nil { + in, out := &in.RegexPatternSetID, &out.RegexPatternSetID + *out = new(string) + **out = **in + } + if in.RegexPatternSetIDRef != nil { + in, out := &in.RegexPatternSetIDRef, &out.RegexPatternSetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RegexPatternSetIDSelector != nil { + in, out := &in.RegexPatternSetIDSelector, &out.RegexPatternSetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchTupleInitParameters. +func (in *RegexMatchTupleInitParameters) DeepCopy() *RegexMatchTupleInitParameters { + if in == nil { + return nil + } + out := new(RegexMatchTupleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchTupleObservation) DeepCopyInto(out *RegexMatchTupleObservation) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(RegexMatchTupleFieldToMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.RegexPatternSetID != nil { + in, out := &in.RegexPatternSetID, &out.RegexPatternSetID + *out = new(string) + **out = **in + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchTupleObservation. +func (in *RegexMatchTupleObservation) DeepCopy() *RegexMatchTupleObservation { + if in == nil { + return nil + } + out := new(RegexMatchTupleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchTupleParameters) DeepCopyInto(out *RegexMatchTupleParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(RegexMatchTupleFieldToMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.RegexPatternSetID != nil { + in, out := &in.RegexPatternSetID, &out.RegexPatternSetID + *out = new(string) + **out = **in + } + if in.RegexPatternSetIDRef != nil { + in, out := &in.RegexPatternSetIDRef, &out.RegexPatternSetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RegexPatternSetIDSelector != nil { + in, out := &in.RegexPatternSetIDSelector, &out.RegexPatternSetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchTupleParameters. +func (in *RegexMatchTupleParameters) DeepCopy() *RegexMatchTupleParameters { + if in == nil { + return nil + } + out := new(RegexMatchTupleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesInitParameters) DeepCopyInto(out *RulesInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OverrideAction != nil { + in, out := &in.OverrideAction, &out.OverrideAction + *out = new(OverrideActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RuleID != nil { + in, out := &in.RuleID, &out.RuleID + *out = new(string) + **out = **in + } + if in.RuleIDRef != nil { + in, out := &in.RuleIDRef, &out.RuleIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RuleIDSelector != nil { + in, out := &in.RuleIDSelector, &out.RuleIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesInitParameters. +func (in *RulesInitParameters) DeepCopy() *RulesInitParameters { + if in == nil { + return nil + } + out := new(RulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesObservation) DeepCopyInto(out *RulesObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionObservation) + (*in).DeepCopyInto(*out) + } + if in.OverrideAction != nil { + in, out := &in.OverrideAction, &out.OverrideAction + *out = new(OverrideActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RuleID != nil { + in, out := &in.RuleID, &out.RuleID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesObservation. +func (in *RulesObservation) DeepCopy() *RulesObservation { + if in == nil { + return nil + } + out := new(RulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesParameters) DeepCopyInto(out *RulesParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionParameters) + (*in).DeepCopyInto(*out) + } + if in.OverrideAction != nil { + in, out := &in.OverrideAction, &out.OverrideAction + *out = new(OverrideActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RuleID != nil { + in, out := &in.RuleID, &out.RuleID + *out = new(string) + **out = **in + } + if in.RuleIDRef != nil { + in, out := &in.RuleIDRef, &out.RuleIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RuleIDSelector != nil { + in, out := &in.RuleIDSelector, &out.RuleIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesParameters. +func (in *RulesParameters) DeepCopy() *RulesParameters { + if in == nil { + return nil + } + out := new(RulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchSet) DeepCopyInto(out *SQLInjectionMatchSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchSet. +func (in *SQLInjectionMatchSet) DeepCopy() *SQLInjectionMatchSet { + if in == nil { + return nil + } + out := new(SQLInjectionMatchSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SQLInjectionMatchSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchSetInitParameters) DeepCopyInto(out *SQLInjectionMatchSetInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SQLInjectionMatchTuples != nil { + in, out := &in.SQLInjectionMatchTuples, &out.SQLInjectionMatchTuples + *out = make([]SQLInjectionMatchTuplesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchSetInitParameters. +func (in *SQLInjectionMatchSetInitParameters) DeepCopy() *SQLInjectionMatchSetInitParameters { + if in == nil { + return nil + } + out := new(SQLInjectionMatchSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchSetList) DeepCopyInto(out *SQLInjectionMatchSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SQLInjectionMatchSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchSetList. +func (in *SQLInjectionMatchSetList) DeepCopy() *SQLInjectionMatchSetList { + if in == nil { + return nil + } + out := new(SQLInjectionMatchSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SQLInjectionMatchSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchSetObservation) DeepCopyInto(out *SQLInjectionMatchSetObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SQLInjectionMatchTuples != nil { + in, out := &in.SQLInjectionMatchTuples, &out.SQLInjectionMatchTuples + *out = make([]SQLInjectionMatchTuplesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchSetObservation. +func (in *SQLInjectionMatchSetObservation) DeepCopy() *SQLInjectionMatchSetObservation { + if in == nil { + return nil + } + out := new(SQLInjectionMatchSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchSetParameters) DeepCopyInto(out *SQLInjectionMatchSetParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SQLInjectionMatchTuples != nil { + in, out := &in.SQLInjectionMatchTuples, &out.SQLInjectionMatchTuples + *out = make([]SQLInjectionMatchTuplesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchSetParameters. +func (in *SQLInjectionMatchSetParameters) DeepCopy() *SQLInjectionMatchSetParameters { + if in == nil { + return nil + } + out := new(SQLInjectionMatchSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchSetSpec) DeepCopyInto(out *SQLInjectionMatchSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchSetSpec. +func (in *SQLInjectionMatchSetSpec) DeepCopy() *SQLInjectionMatchSetSpec { + if in == nil { + return nil + } + out := new(SQLInjectionMatchSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchSetStatus) DeepCopyInto(out *SQLInjectionMatchSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchSetStatus. +func (in *SQLInjectionMatchSetStatus) DeepCopy() *SQLInjectionMatchSetStatus { + if in == nil { + return nil + } + out := new(SQLInjectionMatchSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchTuplesFieldToMatchInitParameters) DeepCopyInto(out *SQLInjectionMatchTuplesFieldToMatchInitParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchTuplesFieldToMatchInitParameters. +func (in *SQLInjectionMatchTuplesFieldToMatchInitParameters) DeepCopy() *SQLInjectionMatchTuplesFieldToMatchInitParameters { + if in == nil { + return nil + } + out := new(SQLInjectionMatchTuplesFieldToMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchTuplesFieldToMatchObservation) DeepCopyInto(out *SQLInjectionMatchTuplesFieldToMatchObservation) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchTuplesFieldToMatchObservation. +func (in *SQLInjectionMatchTuplesFieldToMatchObservation) DeepCopy() *SQLInjectionMatchTuplesFieldToMatchObservation { + if in == nil { + return nil + } + out := new(SQLInjectionMatchTuplesFieldToMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchTuplesFieldToMatchParameters) DeepCopyInto(out *SQLInjectionMatchTuplesFieldToMatchParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchTuplesFieldToMatchParameters. +func (in *SQLInjectionMatchTuplesFieldToMatchParameters) DeepCopy() *SQLInjectionMatchTuplesFieldToMatchParameters { + if in == nil { + return nil + } + out := new(SQLInjectionMatchTuplesFieldToMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchTuplesInitParameters) DeepCopyInto(out *SQLInjectionMatchTuplesInitParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(SQLInjectionMatchTuplesFieldToMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchTuplesInitParameters. +func (in *SQLInjectionMatchTuplesInitParameters) DeepCopy() *SQLInjectionMatchTuplesInitParameters { + if in == nil { + return nil + } + out := new(SQLInjectionMatchTuplesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchTuplesObservation) DeepCopyInto(out *SQLInjectionMatchTuplesObservation) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(SQLInjectionMatchTuplesFieldToMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchTuplesObservation. +func (in *SQLInjectionMatchTuplesObservation) DeepCopy() *SQLInjectionMatchTuplesObservation { + if in == nil { + return nil + } + out := new(SQLInjectionMatchTuplesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchTuplesParameters) DeepCopyInto(out *SQLInjectionMatchTuplesParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(SQLInjectionMatchTuplesFieldToMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchTuplesParameters. +func (in *SQLInjectionMatchTuplesParameters) DeepCopy() *SQLInjectionMatchTuplesParameters { + if in == nil { + return nil + } + out := new(SQLInjectionMatchTuplesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintSet) DeepCopyInto(out *SizeConstraintSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintSet. +func (in *SizeConstraintSet) DeepCopy() *SizeConstraintSet { + if in == nil { + return nil + } + out := new(SizeConstraintSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SizeConstraintSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintSetInitParameters) DeepCopyInto(out *SizeConstraintSetInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SizeConstraints != nil { + in, out := &in.SizeConstraints, &out.SizeConstraints + *out = make([]SizeConstraintsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintSetInitParameters. +func (in *SizeConstraintSetInitParameters) DeepCopy() *SizeConstraintSetInitParameters { + if in == nil { + return nil + } + out := new(SizeConstraintSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintSetList) DeepCopyInto(out *SizeConstraintSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SizeConstraintSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintSetList. +func (in *SizeConstraintSetList) DeepCopy() *SizeConstraintSetList { + if in == nil { + return nil + } + out := new(SizeConstraintSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SizeConstraintSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintSetObservation) DeepCopyInto(out *SizeConstraintSetObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SizeConstraints != nil { + in, out := &in.SizeConstraints, &out.SizeConstraints + *out = make([]SizeConstraintsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintSetObservation. +func (in *SizeConstraintSetObservation) DeepCopy() *SizeConstraintSetObservation { + if in == nil { + return nil + } + out := new(SizeConstraintSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintSetParameters) DeepCopyInto(out *SizeConstraintSetParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SizeConstraints != nil { + in, out := &in.SizeConstraints, &out.SizeConstraints + *out = make([]SizeConstraintsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintSetParameters. +func (in *SizeConstraintSetParameters) DeepCopy() *SizeConstraintSetParameters { + if in == nil { + return nil + } + out := new(SizeConstraintSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintSetSpec) DeepCopyInto(out *SizeConstraintSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintSetSpec. +func (in *SizeConstraintSetSpec) DeepCopy() *SizeConstraintSetSpec { + if in == nil { + return nil + } + out := new(SizeConstraintSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintSetStatus) DeepCopyInto(out *SizeConstraintSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintSetStatus. +func (in *SizeConstraintSetStatus) DeepCopy() *SizeConstraintSetStatus { + if in == nil { + return nil + } + out := new(SizeConstraintSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintsFieldToMatchInitParameters) DeepCopyInto(out *SizeConstraintsFieldToMatchInitParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintsFieldToMatchInitParameters. +func (in *SizeConstraintsFieldToMatchInitParameters) DeepCopy() *SizeConstraintsFieldToMatchInitParameters { + if in == nil { + return nil + } + out := new(SizeConstraintsFieldToMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintsFieldToMatchObservation) DeepCopyInto(out *SizeConstraintsFieldToMatchObservation) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintsFieldToMatchObservation. +func (in *SizeConstraintsFieldToMatchObservation) DeepCopy() *SizeConstraintsFieldToMatchObservation { + if in == nil { + return nil + } + out := new(SizeConstraintsFieldToMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintsFieldToMatchParameters) DeepCopyInto(out *SizeConstraintsFieldToMatchParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintsFieldToMatchParameters. +func (in *SizeConstraintsFieldToMatchParameters) DeepCopy() *SizeConstraintsFieldToMatchParameters { + if in == nil { + return nil + } + out := new(SizeConstraintsFieldToMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintsInitParameters) DeepCopyInto(out *SizeConstraintsInitParameters) { + *out = *in + if in.ComparisonOperator != nil { + in, out := &in.ComparisonOperator, &out.ComparisonOperator + *out = new(string) + **out = **in + } + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(SizeConstraintsFieldToMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintsInitParameters. +func (in *SizeConstraintsInitParameters) DeepCopy() *SizeConstraintsInitParameters { + if in == nil { + return nil + } + out := new(SizeConstraintsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintsObservation) DeepCopyInto(out *SizeConstraintsObservation) { + *out = *in + if in.ComparisonOperator != nil { + in, out := &in.ComparisonOperator, &out.ComparisonOperator + *out = new(string) + **out = **in + } + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(SizeConstraintsFieldToMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintsObservation. +func (in *SizeConstraintsObservation) DeepCopy() *SizeConstraintsObservation { + if in == nil { + return nil + } + out := new(SizeConstraintsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintsParameters) DeepCopyInto(out *SizeConstraintsParameters) { + *out = *in + if in.ComparisonOperator != nil { + in, out := &in.ComparisonOperator, &out.ComparisonOperator + *out = new(string) + **out = **in + } + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(SizeConstraintsFieldToMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintsParameters. +func (in *SizeConstraintsParameters) DeepCopy() *SizeConstraintsParameters { + if in == nil { + return nil + } + out := new(SizeConstraintsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebACL) DeepCopyInto(out *WebACL) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebACL. +func (in *WebACL) DeepCopy() *WebACL { + if in == nil { + return nil + } + out := new(WebACL) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebACL) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebACLInitParameters) DeepCopyInto(out *WebACLInitParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(DefaultActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = new(LoggingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RulesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebACLInitParameters. +func (in *WebACLInitParameters) DeepCopy() *WebACLInitParameters { + if in == nil { + return nil + } + out := new(WebACLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebACLList) DeepCopyInto(out *WebACLList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WebACL, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebACLList. +func (in *WebACLList) DeepCopy() *WebACLList { + if in == nil { + return nil + } + out := new(WebACLList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebACLList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebACLObservation) DeepCopyInto(out *WebACLObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(DefaultActionObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = new(LoggingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RulesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebACLObservation. +func (in *WebACLObservation) DeepCopy() *WebACLObservation { + if in == nil { + return nil + } + out := new(WebACLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebACLParameters) DeepCopyInto(out *WebACLParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(DefaultActionParameters) + (*in).DeepCopyInto(*out) + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = new(LoggingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RulesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebACLParameters. +func (in *WebACLParameters) DeepCopy() *WebACLParameters { + if in == nil { + return nil + } + out := new(WebACLParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebACLSpec) DeepCopyInto(out *WebACLSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebACLSpec. +func (in *WebACLSpec) DeepCopy() *WebACLSpec { + if in == nil { + return nil + } + out := new(WebACLSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebACLStatus) DeepCopyInto(out *WebACLStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebACLStatus. +func (in *WebACLStatus) DeepCopy() *WebACLStatus { + if in == nil { + return nil + } + out := new(WebACLStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchSet) DeepCopyInto(out *XSSMatchSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchSet. +func (in *XSSMatchSet) DeepCopy() *XSSMatchSet { + if in == nil { + return nil + } + out := new(XSSMatchSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *XSSMatchSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchSetInitParameters) DeepCopyInto(out *XSSMatchSetInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.XSSMatchTuples != nil { + in, out := &in.XSSMatchTuples, &out.XSSMatchTuples + *out = make([]XSSMatchTuplesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchSetInitParameters. +func (in *XSSMatchSetInitParameters) DeepCopy() *XSSMatchSetInitParameters { + if in == nil { + return nil + } + out := new(XSSMatchSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchSetList) DeepCopyInto(out *XSSMatchSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]XSSMatchSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchSetList. +func (in *XSSMatchSetList) DeepCopy() *XSSMatchSetList { + if in == nil { + return nil + } + out := new(XSSMatchSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *XSSMatchSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchSetObservation) DeepCopyInto(out *XSSMatchSetObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.XSSMatchTuples != nil { + in, out := &in.XSSMatchTuples, &out.XSSMatchTuples + *out = make([]XSSMatchTuplesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchSetObservation. +func (in *XSSMatchSetObservation) DeepCopy() *XSSMatchSetObservation { + if in == nil { + return nil + } + out := new(XSSMatchSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchSetParameters) DeepCopyInto(out *XSSMatchSetParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.XSSMatchTuples != nil { + in, out := &in.XSSMatchTuples, &out.XSSMatchTuples + *out = make([]XSSMatchTuplesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchSetParameters. +func (in *XSSMatchSetParameters) DeepCopy() *XSSMatchSetParameters { + if in == nil { + return nil + } + out := new(XSSMatchSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchSetSpec) DeepCopyInto(out *XSSMatchSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchSetSpec. +func (in *XSSMatchSetSpec) DeepCopy() *XSSMatchSetSpec { + if in == nil { + return nil + } + out := new(XSSMatchSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchSetStatus) DeepCopyInto(out *XSSMatchSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchSetStatus. +func (in *XSSMatchSetStatus) DeepCopy() *XSSMatchSetStatus { + if in == nil { + return nil + } + out := new(XSSMatchSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchTuplesFieldToMatchInitParameters) DeepCopyInto(out *XSSMatchTuplesFieldToMatchInitParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchTuplesFieldToMatchInitParameters. +func (in *XSSMatchTuplesFieldToMatchInitParameters) DeepCopy() *XSSMatchTuplesFieldToMatchInitParameters { + if in == nil { + return nil + } + out := new(XSSMatchTuplesFieldToMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchTuplesFieldToMatchObservation) DeepCopyInto(out *XSSMatchTuplesFieldToMatchObservation) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchTuplesFieldToMatchObservation. +func (in *XSSMatchTuplesFieldToMatchObservation) DeepCopy() *XSSMatchTuplesFieldToMatchObservation { + if in == nil { + return nil + } + out := new(XSSMatchTuplesFieldToMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchTuplesFieldToMatchParameters) DeepCopyInto(out *XSSMatchTuplesFieldToMatchParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchTuplesFieldToMatchParameters. +func (in *XSSMatchTuplesFieldToMatchParameters) DeepCopy() *XSSMatchTuplesFieldToMatchParameters { + if in == nil { + return nil + } + out := new(XSSMatchTuplesFieldToMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchTuplesInitParameters) DeepCopyInto(out *XSSMatchTuplesInitParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(XSSMatchTuplesFieldToMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchTuplesInitParameters. +func (in *XSSMatchTuplesInitParameters) DeepCopy() *XSSMatchTuplesInitParameters { + if in == nil { + return nil + } + out := new(XSSMatchTuplesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchTuplesObservation) DeepCopyInto(out *XSSMatchTuplesObservation) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(XSSMatchTuplesFieldToMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchTuplesObservation. +func (in *XSSMatchTuplesObservation) DeepCopy() *XSSMatchTuplesObservation { + if in == nil { + return nil + } + out := new(XSSMatchTuplesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchTuplesParameters) DeepCopyInto(out *XSSMatchTuplesParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(XSSMatchTuplesFieldToMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchTuplesParameters. +func (in *XSSMatchTuplesParameters) DeepCopy() *XSSMatchTuplesParameters { + if in == nil { + return nil + } + out := new(XSSMatchTuplesParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/waf/v1beta2/zz_generated.managed.go b/apis/waf/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..fbce536036 --- /dev/null +++ b/apis/waf/v1beta2/zz_generated.managed.go @@ -0,0 +1,368 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ByteMatchSet. +func (mg *ByteMatchSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ByteMatchSet. +func (mg *ByteMatchSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ByteMatchSet. +func (mg *ByteMatchSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ByteMatchSet. +func (mg *ByteMatchSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ByteMatchSet. +func (mg *ByteMatchSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ByteMatchSet. +func (mg *ByteMatchSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ByteMatchSet. +func (mg *ByteMatchSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ByteMatchSet. +func (mg *ByteMatchSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ByteMatchSet. +func (mg *ByteMatchSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ByteMatchSet. +func (mg *ByteMatchSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ByteMatchSet. +func (mg *ByteMatchSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ByteMatchSet. +func (mg *ByteMatchSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this RegexMatchSet. +func (mg *RegexMatchSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this RegexMatchSet. +func (mg *RegexMatchSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this RegexMatchSet. +func (mg *RegexMatchSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this RegexMatchSet. +func (mg *RegexMatchSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this RegexMatchSet. +func (mg *RegexMatchSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this RegexMatchSet. +func (mg *RegexMatchSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this RegexMatchSet. +func (mg *RegexMatchSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this RegexMatchSet. +func (mg *RegexMatchSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this RegexMatchSet. +func (mg *RegexMatchSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this RegexMatchSet. +func (mg *RegexMatchSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this RegexMatchSet. +func (mg *RegexMatchSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this RegexMatchSet. +func (mg *RegexMatchSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SizeConstraintSet. +func (mg *SizeConstraintSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SizeConstraintSet. +func (mg *SizeConstraintSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SizeConstraintSet. +func (mg *SizeConstraintSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SizeConstraintSet. +func (mg *SizeConstraintSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SizeConstraintSet. +func (mg *SizeConstraintSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SizeConstraintSet. +func (mg *SizeConstraintSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SizeConstraintSet. +func (mg *SizeConstraintSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SizeConstraintSet. +func (mg *SizeConstraintSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SizeConstraintSet. +func (mg *SizeConstraintSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SizeConstraintSet. +func (mg *SizeConstraintSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SizeConstraintSet. +func (mg *SizeConstraintSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SizeConstraintSet. +func (mg *SizeConstraintSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this WebACL. +func (mg *WebACL) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this WebACL. +func (mg *WebACL) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this WebACL. +func (mg *WebACL) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this WebACL. +func (mg *WebACL) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this WebACL. +func (mg *WebACL) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this WebACL. +func (mg *WebACL) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this WebACL. +func (mg *WebACL) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this WebACL. +func (mg *WebACL) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this WebACL. +func (mg *WebACL) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this WebACL. +func (mg *WebACL) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this WebACL. +func (mg *WebACL) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this WebACL. +func (mg *WebACL) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this XSSMatchSet. +func (mg *XSSMatchSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this XSSMatchSet. +func (mg *XSSMatchSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this XSSMatchSet. +func (mg *XSSMatchSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this XSSMatchSet. +func (mg *XSSMatchSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this XSSMatchSet. +func (mg *XSSMatchSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this XSSMatchSet. +func (mg *XSSMatchSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this XSSMatchSet. +func (mg *XSSMatchSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this XSSMatchSet. +func (mg *XSSMatchSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this XSSMatchSet. +func (mg *XSSMatchSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this XSSMatchSet. +func (mg *XSSMatchSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this XSSMatchSet. +func (mg *XSSMatchSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this XSSMatchSet. +func (mg *XSSMatchSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/waf/v1beta2/zz_generated.managedlist.go b/apis/waf/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..8d78477c53 --- /dev/null +++ b/apis/waf/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,62 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ByteMatchSetList. +func (l *ByteMatchSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this RegexMatchSetList. +func (l *RegexMatchSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SQLInjectionMatchSetList. +func (l *SQLInjectionMatchSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SizeConstraintSetList. +func (l *SizeConstraintSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WebACLList. +func (l *WebACLList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this XSSMatchSetList. +func (l *XSSMatchSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/waf/v1beta2/zz_generated.resolvers.go b/apis/waf/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..f92ad98487 --- /dev/null +++ b/apis/waf/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,170 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *RegexMatchSet) ResolveReferences( // ResolveReferences of this RegexMatchSet. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.RegexMatchTuple); i3++ { + { + m, l, err = apisresolver.GetManagedResource("waf.aws.upbound.io", "v1beta1", "RegexPatternSet", "RegexPatternSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RegexMatchTuple[i3].RegexPatternSetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.RegexMatchTuple[i3].RegexPatternSetIDRef, + Selector: mg.Spec.ForProvider.RegexMatchTuple[i3].RegexPatternSetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RegexMatchTuple[i3].RegexPatternSetID") + } + mg.Spec.ForProvider.RegexMatchTuple[i3].RegexPatternSetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RegexMatchTuple[i3].RegexPatternSetIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.RegexMatchTuple); i3++ { + { + m, l, err = apisresolver.GetManagedResource("waf.aws.upbound.io", "v1beta1", "RegexPatternSet", "RegexPatternSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RegexMatchTuple[i3].RegexPatternSetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.RegexMatchTuple[i3].RegexPatternSetIDRef, + Selector: mg.Spec.InitProvider.RegexMatchTuple[i3].RegexPatternSetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RegexMatchTuple[i3].RegexPatternSetID") + } + mg.Spec.InitProvider.RegexMatchTuple[i3].RegexPatternSetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RegexMatchTuple[i3].RegexPatternSetIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this WebACL. +func (mg *WebACL) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.LoggingConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LoggingConfiguration.LogDestination), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.ForProvider.LoggingConfiguration.LogDestinationRef, + Selector: mg.Spec.ForProvider.LoggingConfiguration.LogDestinationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LoggingConfiguration.LogDestination") + } + mg.Spec.ForProvider.LoggingConfiguration.LogDestination = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LoggingConfiguration.LogDestinationRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Rules); i3++ { + { + m, l, err = apisresolver.GetManagedResource("waf.aws.upbound.io", "v1beta1", "Rule", "RuleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Rules[i3].RuleID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Rules[i3].RuleIDRef, + Selector: mg.Spec.ForProvider.Rules[i3].RuleIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Rules[i3].RuleID") + } + mg.Spec.ForProvider.Rules[i3].RuleID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Rules[i3].RuleIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LoggingConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LoggingConfiguration.LogDestination), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.InitProvider.LoggingConfiguration.LogDestinationRef, + Selector: mg.Spec.InitProvider.LoggingConfiguration.LogDestinationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LoggingConfiguration.LogDestination") + } + mg.Spec.InitProvider.LoggingConfiguration.LogDestination = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LoggingConfiguration.LogDestinationRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Rules); i3++ { + { + m, l, err = apisresolver.GetManagedResource("waf.aws.upbound.io", "v1beta1", "Rule", "RuleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Rules[i3].RuleID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Rules[i3].RuleIDRef, + Selector: mg.Spec.InitProvider.Rules[i3].RuleIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Rules[i3].RuleID") + } + mg.Spec.InitProvider.Rules[i3].RuleID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Rules[i3].RuleIDRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/waf/v1beta2/zz_groupversion_info.go b/apis/waf/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..f25d6827fa --- /dev/null +++ b/apis/waf/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=waf.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "waf.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/waf/v1beta2/zz_regexmatchset_terraformed.go b/apis/waf/v1beta2/zz_regexmatchset_terraformed.go new file mode 100755 index 0000000000..e224214fb3 --- /dev/null +++ b/apis/waf/v1beta2/zz_regexmatchset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this RegexMatchSet +func (mg *RegexMatchSet) GetTerraformResourceType() string { + return "aws_waf_regex_match_set" +} + +// GetConnectionDetailsMapping for this RegexMatchSet +func (tr *RegexMatchSet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this RegexMatchSet +func (tr *RegexMatchSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this RegexMatchSet +func (tr *RegexMatchSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this RegexMatchSet +func (tr *RegexMatchSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this RegexMatchSet +func (tr *RegexMatchSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this RegexMatchSet +func (tr *RegexMatchSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this RegexMatchSet +func (tr *RegexMatchSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this RegexMatchSet +func (tr *RegexMatchSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this RegexMatchSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *RegexMatchSet) LateInitialize(attrs []byte) (bool, error) { + params := &RegexMatchSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *RegexMatchSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/waf/v1beta2/zz_regexmatchset_types.go b/apis/waf/v1beta2/zz_regexmatchset_types.go new file mode 100755 index 0000000000..0cb326394b --- /dev/null +++ b/apis/waf/v1beta2/zz_regexmatchset_types.go @@ -0,0 +1,224 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type RegexMatchSetInitParameters struct { + + // The name or description of the Regex Match Set. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The regular expression pattern that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings. See below. + RegexMatchTuple []RegexMatchTupleInitParameters `json:"regexMatchTuple,omitempty" tf:"regex_match_tuple,omitempty"` +} + +type RegexMatchSetObservation struct { + + // Amazon Resource Name (ARN) + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The ID of the WAF Regex Match Set. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name or description of the Regex Match Set. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The regular expression pattern that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings. See below. + RegexMatchTuple []RegexMatchTupleObservation `json:"regexMatchTuple,omitempty" tf:"regex_match_tuple,omitempty"` +} + +type RegexMatchSetParameters struct { + + // The name or description of the Regex Match Set. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The regular expression pattern that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings. See below. + // +kubebuilder:validation:Optional + RegexMatchTuple []RegexMatchTupleParameters `json:"regexMatchTuple,omitempty" tf:"regex_match_tuple,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type RegexMatchTupleFieldToMatchInitParameters struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RegexMatchTupleFieldToMatchObservation struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RegexMatchTupleFieldToMatchParameters struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + // +kubebuilder:validation:Optional + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type RegexMatchTupleInitParameters struct { + + // The part of a web request that you want to search, such as a specified header or a query string. + FieldToMatch *RegexMatchTupleFieldToMatchInitParameters `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // The ID of a Regex Pattern Set. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/waf/v1beta1.RegexPatternSet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + RegexPatternSetID *string `json:"regexPatternSetId,omitempty" tf:"regex_pattern_set_id,omitempty"` + + // Reference to a RegexPatternSet in waf to populate regexPatternSetId. + // +kubebuilder:validation:Optional + RegexPatternSetIDRef *v1.Reference `json:"regexPatternSetIdRef,omitempty" tf:"-"` + + // Selector for a RegexPatternSet in waf to populate regexPatternSetId. + // +kubebuilder:validation:Optional + RegexPatternSetIDSelector *v1.Selector `json:"regexPatternSetIdSelector,omitempty" tf:"-"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type RegexMatchTupleObservation struct { + + // The part of a web request that you want to search, such as a specified header or a query string. + FieldToMatch *RegexMatchTupleFieldToMatchObservation `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // The ID of a Regex Pattern Set. + RegexPatternSetID *string `json:"regexPatternSetId,omitempty" tf:"regex_pattern_set_id,omitempty"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type RegexMatchTupleParameters struct { + + // The part of a web request that you want to search, such as a specified header or a query string. + // +kubebuilder:validation:Optional + FieldToMatch *RegexMatchTupleFieldToMatchParameters `json:"fieldToMatch" tf:"field_to_match,omitempty"` + + // The ID of a Regex Pattern Set. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/waf/v1beta1.RegexPatternSet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + RegexPatternSetID *string `json:"regexPatternSetId,omitempty" tf:"regex_pattern_set_id,omitempty"` + + // Reference to a RegexPatternSet in waf to populate regexPatternSetId. + // +kubebuilder:validation:Optional + RegexPatternSetIDRef *v1.Reference `json:"regexPatternSetIdRef,omitempty" tf:"-"` + + // Selector for a RegexPatternSet in waf to populate regexPatternSetId. + // +kubebuilder:validation:Optional + RegexPatternSetIDSelector *v1.Selector `json:"regexPatternSetIdSelector,omitempty" tf:"-"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + // +kubebuilder:validation:Optional + TextTransformation *string `json:"textTransformation" tf:"text_transformation,omitempty"` +} + +// RegexMatchSetSpec defines the desired state of RegexMatchSet +type RegexMatchSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RegexMatchSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RegexMatchSetInitParameters `json:"initProvider,omitempty"` +} + +// RegexMatchSetStatus defines the observed state of RegexMatchSet. +type RegexMatchSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RegexMatchSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// RegexMatchSet is the Schema for the RegexMatchSets API. Provides a AWS WAF Regex Match Set resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type RegexMatchSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec RegexMatchSetSpec `json:"spec"` + Status RegexMatchSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RegexMatchSetList contains a list of RegexMatchSets +type RegexMatchSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RegexMatchSet `json:"items"` +} + +// Repository type metadata. +var ( + RegexMatchSet_Kind = "RegexMatchSet" + RegexMatchSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: RegexMatchSet_Kind}.String() + RegexMatchSet_KindAPIVersion = RegexMatchSet_Kind + "." + CRDGroupVersion.String() + RegexMatchSet_GroupVersionKind = CRDGroupVersion.WithKind(RegexMatchSet_Kind) +) + +func init() { + SchemeBuilder.Register(&RegexMatchSet{}, &RegexMatchSetList{}) +} diff --git a/apis/waf/v1beta2/zz_sizeconstraintset_terraformed.go b/apis/waf/v1beta2/zz_sizeconstraintset_terraformed.go new file mode 100755 index 0000000000..a701dcfdb0 --- /dev/null +++ b/apis/waf/v1beta2/zz_sizeconstraintset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SizeConstraintSet +func (mg *SizeConstraintSet) GetTerraformResourceType() string { + return "aws_waf_size_constraint_set" +} + +// GetConnectionDetailsMapping for this SizeConstraintSet +func (tr *SizeConstraintSet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SizeConstraintSet +func (tr *SizeConstraintSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SizeConstraintSet +func (tr *SizeConstraintSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SizeConstraintSet +func (tr *SizeConstraintSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SizeConstraintSet +func (tr *SizeConstraintSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SizeConstraintSet +func (tr *SizeConstraintSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SizeConstraintSet +func (tr *SizeConstraintSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SizeConstraintSet +func (tr *SizeConstraintSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SizeConstraintSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SizeConstraintSet) LateInitialize(attrs []byte) (bool, error) { + params := &SizeConstraintSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SizeConstraintSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/waf/v1beta2/zz_sizeconstraintset_types.go b/apis/waf/v1beta2/zz_sizeconstraintset_types.go new file mode 100755 index 0000000000..a000a7b380 --- /dev/null +++ b/apis/waf/v1beta2/zz_sizeconstraintset_types.go @@ -0,0 +1,196 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SizeConstraintSetInitParameters struct { + + // Name or description of the Size Constraint Set. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Parts of web requests that you want to inspect the size of. + SizeConstraints []SizeConstraintsInitParameters `json:"sizeConstraints,omitempty" tf:"size_constraints,omitempty"` +} + +type SizeConstraintSetObservation struct { + + // Amazon Resource Name (ARN). + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // ID of the WAF Size Constraint Set. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name or description of the Size Constraint Set. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Parts of web requests that you want to inspect the size of. + SizeConstraints []SizeConstraintsObservation `json:"sizeConstraints,omitempty" tf:"size_constraints,omitempty"` +} + +type SizeConstraintSetParameters struct { + + // Name or description of the Size Constraint Set. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Parts of web requests that you want to inspect the size of. + // +kubebuilder:validation:Optional + SizeConstraints []SizeConstraintsParameters `json:"sizeConstraints,omitempty" tf:"size_constraints,omitempty"` +} + +type SizeConstraintsFieldToMatchInitParameters struct { + + // When the type is HEADER, specify the name of the header that you want to search using the data field, for example, User-Agent or Referer. If the type is any other value, you can omit this field. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // Part of the web request that you want AWS WAF to search for a specified string. For example, HEADER, METHOD, or BODY. See the docs for all supported values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SizeConstraintsFieldToMatchObservation struct { + + // When the type is HEADER, specify the name of the header that you want to search using the data field, for example, User-Agent or Referer. If the type is any other value, you can omit this field. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // Part of the web request that you want AWS WAF to search for a specified string. For example, HEADER, METHOD, or BODY. See the docs for all supported values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SizeConstraintsFieldToMatchParameters struct { + + // When the type is HEADER, specify the name of the header that you want to search using the data field, for example, User-Agent or Referer. If the type is any other value, you can omit this field. + // +kubebuilder:validation:Optional + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // Part of the web request that you want AWS WAF to search for a specified string. For example, HEADER, METHOD, or BODY. See the docs for all supported values. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type SizeConstraintsInitParameters struct { + + // Type of comparison you want to perform, such as EQ, NE, LT, or GT. Please refer to the documentation for a complete list of supported values. + ComparisonOperator *string `json:"comparisonOperator,omitempty" tf:"comparison_operator,omitempty"` + + // Parameter that specifies where in a web request to look for the size constraint. + FieldToMatch *SizeConstraintsFieldToMatchInitParameters `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // Size in bytes that you want to compare against the size of the specified field_to_match. Valid values for size are between 0 and 21474836480 bytes (0 and 20 GB). + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // Parameter is used to eliminate unusual formatting that attackers may use in web requests to bypass AWS WAF. When a transformation is specified, AWS WAF performs the transformation on the field_to_match before inspecting the request for a match. Some examples of supported transformations are CMD_LINE, HTML_ENTITY_DECODE, and NONE. You can find a complete list of supported values in the AWS WAF API Reference. + // Note: If you choose BODY as the type, you must also choose NONE because CloudFront only forwards the first 8192 bytes for inspection. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type SizeConstraintsObservation struct { + + // Type of comparison you want to perform, such as EQ, NE, LT, or GT. Please refer to the documentation for a complete list of supported values. + ComparisonOperator *string `json:"comparisonOperator,omitempty" tf:"comparison_operator,omitempty"` + + // Parameter that specifies where in a web request to look for the size constraint. + FieldToMatch *SizeConstraintsFieldToMatchObservation `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // Size in bytes that you want to compare against the size of the specified field_to_match. Valid values for size are between 0 and 21474836480 bytes (0 and 20 GB). + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // Parameter is used to eliminate unusual formatting that attackers may use in web requests to bypass AWS WAF. When a transformation is specified, AWS WAF performs the transformation on the field_to_match before inspecting the request for a match. Some examples of supported transformations are CMD_LINE, HTML_ENTITY_DECODE, and NONE. You can find a complete list of supported values in the AWS WAF API Reference. + // Note: If you choose BODY as the type, you must also choose NONE because CloudFront only forwards the first 8192 bytes for inspection. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type SizeConstraintsParameters struct { + + // Type of comparison you want to perform, such as EQ, NE, LT, or GT. Please refer to the documentation for a complete list of supported values. + // +kubebuilder:validation:Optional + ComparisonOperator *string `json:"comparisonOperator" tf:"comparison_operator,omitempty"` + + // Parameter that specifies where in a web request to look for the size constraint. + // +kubebuilder:validation:Optional + FieldToMatch *SizeConstraintsFieldToMatchParameters `json:"fieldToMatch" tf:"field_to_match,omitempty"` + + // Size in bytes that you want to compare against the size of the specified field_to_match. Valid values for size are between 0 and 21474836480 bytes (0 and 20 GB). + // +kubebuilder:validation:Optional + Size *float64 `json:"size" tf:"size,omitempty"` + + // Parameter is used to eliminate unusual formatting that attackers may use in web requests to bypass AWS WAF. When a transformation is specified, AWS WAF performs the transformation on the field_to_match before inspecting the request for a match. Some examples of supported transformations are CMD_LINE, HTML_ENTITY_DECODE, and NONE. You can find a complete list of supported values in the AWS WAF API Reference. + // Note: If you choose BODY as the type, you must also choose NONE because CloudFront only forwards the first 8192 bytes for inspection. + // +kubebuilder:validation:Optional + TextTransformation *string `json:"textTransformation" tf:"text_transformation,omitempty"` +} + +// SizeConstraintSetSpec defines the desired state of SizeConstraintSet +type SizeConstraintSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SizeConstraintSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SizeConstraintSetInitParameters `json:"initProvider,omitempty"` +} + +// SizeConstraintSetStatus defines the observed state of SizeConstraintSet. +type SizeConstraintSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SizeConstraintSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SizeConstraintSet is the Schema for the SizeConstraintSets API. The +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type SizeConstraintSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec SizeConstraintSetSpec `json:"spec"` + Status SizeConstraintSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SizeConstraintSetList contains a list of SizeConstraintSets +type SizeConstraintSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SizeConstraintSet `json:"items"` +} + +// Repository type metadata. +var ( + SizeConstraintSet_Kind = "SizeConstraintSet" + SizeConstraintSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SizeConstraintSet_Kind}.String() + SizeConstraintSet_KindAPIVersion = SizeConstraintSet_Kind + "." + CRDGroupVersion.String() + SizeConstraintSet_GroupVersionKind = CRDGroupVersion.WithKind(SizeConstraintSet_Kind) +) + +func init() { + SchemeBuilder.Register(&SizeConstraintSet{}, &SizeConstraintSetList{}) +} diff --git a/apis/waf/v1beta2/zz_sqlinjectionmatchset_terraformed.go b/apis/waf/v1beta2/zz_sqlinjectionmatchset_terraformed.go new file mode 100755 index 0000000000..b3d51e104f --- /dev/null +++ b/apis/waf/v1beta2/zz_sqlinjectionmatchset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SQLInjectionMatchSet +func (mg *SQLInjectionMatchSet) GetTerraformResourceType() string { + return "aws_waf_sql_injection_match_set" +} + +// GetConnectionDetailsMapping for this SQLInjectionMatchSet +func (tr *SQLInjectionMatchSet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SQLInjectionMatchSet +func (tr *SQLInjectionMatchSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SQLInjectionMatchSet +func (tr *SQLInjectionMatchSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SQLInjectionMatchSet +func (tr *SQLInjectionMatchSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SQLInjectionMatchSet +func (tr *SQLInjectionMatchSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SQLInjectionMatchSet +func (tr *SQLInjectionMatchSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SQLInjectionMatchSet +func (tr *SQLInjectionMatchSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SQLInjectionMatchSet +func (tr *SQLInjectionMatchSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SQLInjectionMatchSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SQLInjectionMatchSet) LateInitialize(attrs []byte) (bool, error) { + params := &SQLInjectionMatchSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SQLInjectionMatchSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/waf/v1beta2/zz_sqlinjectionmatchset_types.go b/apis/waf/v1beta2/zz_sqlinjectionmatchset_types.go new file mode 100755 index 0000000000..d5cdcd45fa --- /dev/null +++ b/apis/waf/v1beta2/zz_sqlinjectionmatchset_types.go @@ -0,0 +1,194 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SQLInjectionMatchSetInitParameters struct { + + // The name or description of the SQL Injection Match Set. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parts of web requests that you want AWS WAF to inspect for malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header. + SQLInjectionMatchTuples []SQLInjectionMatchTuplesInitParameters `json:"sqlInjectionMatchTuples,omitempty" tf:"sql_injection_match_tuples,omitempty"` +} + +type SQLInjectionMatchSetObservation struct { + + // The ID of the WAF SQL Injection Match Set. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name or description of the SQL Injection Match Set. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parts of web requests that you want AWS WAF to inspect for malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header. + SQLInjectionMatchTuples []SQLInjectionMatchTuplesObservation `json:"sqlInjectionMatchTuples,omitempty" tf:"sql_injection_match_tuples,omitempty"` +} + +type SQLInjectionMatchSetParameters struct { + + // The name or description of the SQL Injection Match Set. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The parts of web requests that you want AWS WAF to inspect for malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header. + // +kubebuilder:validation:Optional + SQLInjectionMatchTuples []SQLInjectionMatchTuplesParameters `json:"sqlInjectionMatchTuples,omitempty" tf:"sql_injection_match_tuples,omitempty"` +} + +type SQLInjectionMatchTuplesFieldToMatchInitParameters struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SQLInjectionMatchTuplesFieldToMatchObservation struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SQLInjectionMatchTuplesFieldToMatchParameters struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + // +kubebuilder:validation:Optional + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type SQLInjectionMatchTuplesInitParameters struct { + + // Specifies where in a web request to look for snippets of malicious SQL code. + FieldToMatch *SQLInjectionMatchTuplesFieldToMatchInitParameters `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type SQLInjectionMatchTuplesObservation struct { + + // Specifies where in a web request to look for snippets of malicious SQL code. + FieldToMatch *SQLInjectionMatchTuplesFieldToMatchObservation `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type SQLInjectionMatchTuplesParameters struct { + + // Specifies where in a web request to look for snippets of malicious SQL code. + // +kubebuilder:validation:Optional + FieldToMatch *SQLInjectionMatchTuplesFieldToMatchParameters `json:"fieldToMatch" tf:"field_to_match,omitempty"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + // +kubebuilder:validation:Optional + TextTransformation *string `json:"textTransformation" tf:"text_transformation,omitempty"` +} + +// SQLInjectionMatchSetSpec defines the desired state of SQLInjectionMatchSet +type SQLInjectionMatchSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SQLInjectionMatchSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SQLInjectionMatchSetInitParameters `json:"initProvider,omitempty"` +} + +// SQLInjectionMatchSetStatus defines the observed state of SQLInjectionMatchSet. +type SQLInjectionMatchSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SQLInjectionMatchSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SQLInjectionMatchSet is the Schema for the SQLInjectionMatchSets API. Provides a AWS WAF SQL Injection Match Set resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type SQLInjectionMatchSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec SQLInjectionMatchSetSpec `json:"spec"` + Status SQLInjectionMatchSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SQLInjectionMatchSetList contains a list of SQLInjectionMatchSets +type SQLInjectionMatchSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SQLInjectionMatchSet `json:"items"` +} + +// Repository type metadata. +var ( + SQLInjectionMatchSet_Kind = "SQLInjectionMatchSet" + SQLInjectionMatchSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SQLInjectionMatchSet_Kind}.String() + SQLInjectionMatchSet_KindAPIVersion = SQLInjectionMatchSet_Kind + "." + CRDGroupVersion.String() + SQLInjectionMatchSet_GroupVersionKind = CRDGroupVersion.WithKind(SQLInjectionMatchSet_Kind) +) + +func init() { + SchemeBuilder.Register(&SQLInjectionMatchSet{}, &SQLInjectionMatchSetList{}) +} diff --git a/apis/waf/v1beta2/zz_webacl_terraformed.go b/apis/waf/v1beta2/zz_webacl_terraformed.go new file mode 100755 index 0000000000..86daaa3a26 --- /dev/null +++ b/apis/waf/v1beta2/zz_webacl_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this WebACL +func (mg *WebACL) GetTerraformResourceType() string { + return "aws_waf_web_acl" +} + +// GetConnectionDetailsMapping for this WebACL +func (tr *WebACL) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this WebACL +func (tr *WebACL) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this WebACL +func (tr *WebACL) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this WebACL +func (tr *WebACL) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this WebACL +func (tr *WebACL) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this WebACL +func (tr *WebACL) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this WebACL +func (tr *WebACL) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this WebACL +func (tr *WebACL) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this WebACL using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *WebACL) LateInitialize(attrs []byte) (bool, error) { + params := &WebACLParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *WebACL) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/waf/v1beta2/zz_webacl_types.go b/apis/waf/v1beta2/zz_webacl_types.go new file mode 100755 index 0000000000..63a0657b41 --- /dev/null +++ b/apis/waf/v1beta2/zz_webacl_types.go @@ -0,0 +1,403 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionInitParameters struct { + + // valid values are: BLOCK, ALLOW, or COUNT + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ActionObservation struct { + + // valid values are: BLOCK, ALLOW, or COUNT + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ActionParameters struct { + + // valid values are: BLOCK, ALLOW, or COUNT + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type DefaultActionInitParameters struct { + + // Specifies how you want AWS WAF to respond to requests that don't match the criteria in any of the rules. + // e.g., ALLOW or BLOCK + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DefaultActionObservation struct { + + // Specifies how you want AWS WAF to respond to requests that don't match the criteria in any of the rules. + // e.g., ALLOW or BLOCK + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DefaultActionParameters struct { + + // Specifies how you want AWS WAF to respond to requests that don't match the criteria in any of the rules. + // e.g., ALLOW or BLOCK + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type LoggingConfigurationInitParameters struct { + + // Amazon Resource Name (ARN) of Kinesis Firehose Delivery Stream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + LogDestination *string `json:"logDestination,omitempty" tf:"log_destination,omitempty"` + + // Reference to a DeliveryStream in firehose to populate logDestination. + // +kubebuilder:validation:Optional + LogDestinationRef *v1.Reference `json:"logDestinationRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate logDestination. + // +kubebuilder:validation:Optional + LogDestinationSelector *v1.Selector `json:"logDestinationSelector,omitempty" tf:"-"` + + // Configuration block containing parts of the request that you want redacted from the logs. Detailed below. + RedactedFields *RedactedFieldsInitParameters `json:"redactedFields,omitempty" tf:"redacted_fields,omitempty"` +} + +type LoggingConfigurationObservation struct { + + // Amazon Resource Name (ARN) of Kinesis Firehose Delivery Stream + LogDestination *string `json:"logDestination,omitempty" tf:"log_destination,omitempty"` + + // Configuration block containing parts of the request that you want redacted from the logs. Detailed below. + RedactedFields *RedactedFieldsObservation `json:"redactedFields,omitempty" tf:"redacted_fields,omitempty"` +} + +type LoggingConfigurationParameters struct { + + // Amazon Resource Name (ARN) of Kinesis Firehose Delivery Stream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + // +kubebuilder:validation:Optional + LogDestination *string `json:"logDestination,omitempty" tf:"log_destination,omitempty"` + + // Reference to a DeliveryStream in firehose to populate logDestination. + // +kubebuilder:validation:Optional + LogDestinationRef *v1.Reference `json:"logDestinationRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate logDestination. + // +kubebuilder:validation:Optional + LogDestinationSelector *v1.Selector `json:"logDestinationSelector,omitempty" tf:"-"` + + // Configuration block containing parts of the request that you want redacted from the logs. Detailed below. + // +kubebuilder:validation:Optional + RedactedFields *RedactedFieldsParameters `json:"redactedFields,omitempty" tf:"redacted_fields,omitempty"` +} + +type OverrideActionInitParameters struct { + + // valid values are: BLOCK, ALLOW, or COUNT + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OverrideActionObservation struct { + + // valid values are: BLOCK, ALLOW, or COUNT + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OverrideActionParameters struct { + + // valid values are: BLOCK, ALLOW, or COUNT + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type RedactedFieldsFieldToMatchInitParameters struct { + + // When the value of type is HEADER, enter the name of the header that you want the WAF to search, for example, User-Agent or Referer. If the value of type is any other value, omit data. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // valid values are: BLOCK, ALLOW, or COUNT + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RedactedFieldsFieldToMatchObservation struct { + + // When the value of type is HEADER, enter the name of the header that you want the WAF to search, for example, User-Agent or Referer. If the value of type is any other value, omit data. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // valid values are: BLOCK, ALLOW, or COUNT + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RedactedFieldsFieldToMatchParameters struct { + + // When the value of type is HEADER, enter the name of the header that you want the WAF to search, for example, User-Agent or Referer. If the value of type is any other value, omit data. + // +kubebuilder:validation:Optional + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // valid values are: BLOCK, ALLOW, or COUNT + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type RedactedFieldsInitParameters struct { + + // Set of configuration blocks for fields to redact. Detailed below. + FieldToMatch []RedactedFieldsFieldToMatchInitParameters `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` +} + +type RedactedFieldsObservation struct { + + // Set of configuration blocks for fields to redact. Detailed below. + FieldToMatch []RedactedFieldsFieldToMatchObservation `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` +} + +type RedactedFieldsParameters struct { + + // Set of configuration blocks for fields to redact. Detailed below. + // +kubebuilder:validation:Optional + FieldToMatch []RedactedFieldsFieldToMatchParameters `json:"fieldToMatch" tf:"field_to_match,omitempty"` +} + +type RulesInitParameters struct { + + // The action that CloudFront or AWS WAF takes when a web request matches the conditions in the rule. Not used if type is GROUP. + Action *ActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Override the action that a group requests CloudFront or AWS WAF takes when a web request matches the conditions in the rule. Only used if type is GROUP. + OverrideAction *OverrideActionInitParameters `json:"overrideAction,omitempty" tf:"override_action,omitempty"` + + // Specifies the order in which the rules in a WebACL are evaluated. + // Rules with a lower value are evaluated before rules with a higher value. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // ID of the associated WAF (Global) rule (e.g., aws_waf_rule). WAF (Regional) rules cannot be used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/waf/v1beta1.Rule + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + RuleID *string `json:"ruleId,omitempty" tf:"rule_id,omitempty"` + + // Reference to a Rule in waf to populate ruleId. + // +kubebuilder:validation:Optional + RuleIDRef *v1.Reference `json:"ruleIdRef,omitempty" tf:"-"` + + // Selector for a Rule in waf to populate ruleId. + // +kubebuilder:validation:Optional + RuleIDSelector *v1.Selector `json:"ruleIdSelector,omitempty" tf:"-"` + + // The rule type, either REGULAR, as defined by Rule, RATE_BASED, as defined by RateBasedRule, or GROUP, as defined by RuleGroup. The default is REGULAR. If you add a RATE_BASED rule, you need to set type as RATE_BASED. If you add a GROUP rule, you need to set type as GROUP. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RulesObservation struct { + + // The action that CloudFront or AWS WAF takes when a web request matches the conditions in the rule. Not used if type is GROUP. + Action *ActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // Override the action that a group requests CloudFront or AWS WAF takes when a web request matches the conditions in the rule. Only used if type is GROUP. + OverrideAction *OverrideActionObservation `json:"overrideAction,omitempty" tf:"override_action,omitempty"` + + // Specifies the order in which the rules in a WebACL are evaluated. + // Rules with a lower value are evaluated before rules with a higher value. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // ID of the associated WAF (Global) rule (e.g., aws_waf_rule). WAF (Regional) rules cannot be used. + RuleID *string `json:"ruleId,omitempty" tf:"rule_id,omitempty"` + + // The rule type, either REGULAR, as defined by Rule, RATE_BASED, as defined by RateBasedRule, or GROUP, as defined by RuleGroup. The default is REGULAR. If you add a RATE_BASED rule, you need to set type as RATE_BASED. If you add a GROUP rule, you need to set type as GROUP. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RulesParameters struct { + + // The action that CloudFront or AWS WAF takes when a web request matches the conditions in the rule. Not used if type is GROUP. + // +kubebuilder:validation:Optional + Action *ActionParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Override the action that a group requests CloudFront or AWS WAF takes when a web request matches the conditions in the rule. Only used if type is GROUP. + // +kubebuilder:validation:Optional + OverrideAction *OverrideActionParameters `json:"overrideAction,omitempty" tf:"override_action,omitempty"` + + // Specifies the order in which the rules in a WebACL are evaluated. + // Rules with a lower value are evaluated before rules with a higher value. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority" tf:"priority,omitempty"` + + // ID of the associated WAF (Global) rule (e.g., aws_waf_rule). WAF (Regional) rules cannot be used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/waf/v1beta1.Rule + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + RuleID *string `json:"ruleId,omitempty" tf:"rule_id,omitempty"` + + // Reference to a Rule in waf to populate ruleId. + // +kubebuilder:validation:Optional + RuleIDRef *v1.Reference `json:"ruleIdRef,omitempty" tf:"-"` + + // Selector for a Rule in waf to populate ruleId. + // +kubebuilder:validation:Optional + RuleIDSelector *v1.Selector `json:"ruleIdSelector,omitempty" tf:"-"` + + // The rule type, either REGULAR, as defined by Rule, RATE_BASED, as defined by RateBasedRule, or GROUP, as defined by RuleGroup. The default is REGULAR. If you add a RATE_BASED rule, you need to set type as RATE_BASED. If you add a GROUP rule, you need to set type as GROUP. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WebACLInitParameters struct { + + // Configuration block with action that you want AWS WAF to take when a request doesn't match the criteria in any of the rules that are associated with the web ACL. Detailed below. + DefaultAction *DefaultActionInitParameters `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // Configuration block to enable WAF logging. Detailed below. + LoggingConfiguration *LoggingConfigurationInitParameters `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` + + // The name or description for the Amazon CloudWatch metric of this web ACL. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // The name or description of the web ACL. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration blocks containing rules to associate with the web ACL and the settings for each rule. Detailed below. + Rules []RulesInitParameters `json:"rules,omitempty" tf:"rules,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type WebACLObservation struct { + + // The ARN of the WAF WebACL. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Configuration block with action that you want AWS WAF to take when a request doesn't match the criteria in any of the rules that are associated with the web ACL. Detailed below. + DefaultAction *DefaultActionObservation `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // The ID of the WAF WebACL. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration block to enable WAF logging. Detailed below. + LoggingConfiguration *LoggingConfigurationObservation `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` + + // The name or description for the Amazon CloudWatch metric of this web ACL. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // The name or description of the web ACL. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Configuration blocks containing rules to associate with the web ACL and the settings for each rule. Detailed below. + Rules []RulesObservation `json:"rules,omitempty" tf:"rules,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type WebACLParameters struct { + + // Configuration block with action that you want AWS WAF to take when a request doesn't match the criteria in any of the rules that are associated with the web ACL. Detailed below. + // +kubebuilder:validation:Optional + DefaultAction *DefaultActionParameters `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // Configuration block to enable WAF logging. Detailed below. + // +kubebuilder:validation:Optional + LoggingConfiguration *LoggingConfigurationParameters `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` + + // The name or description for the Amazon CloudWatch metric of this web ACL. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // The name or description of the web ACL. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Configuration blocks containing rules to associate with the web ACL and the settings for each rule. Detailed below. + // +kubebuilder:validation:Optional + Rules []RulesParameters `json:"rules,omitempty" tf:"rules,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// WebACLSpec defines the desired state of WebACL +type WebACLSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WebACLParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WebACLInitParameters `json:"initProvider,omitempty"` +} + +// WebACLStatus defines the observed state of WebACL. +type WebACLStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WebACLObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// WebACL is the Schema for the WebACLs API. Provides a AWS WAF web access control group (ACL) resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type WebACL struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.defaultAction) || (has(self.initProvider) && has(self.initProvider.defaultAction))",message="spec.forProvider.defaultAction is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.metricName) || (has(self.initProvider) && has(self.initProvider.metricName))",message="spec.forProvider.metricName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec WebACLSpec `json:"spec"` + Status WebACLStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WebACLList contains a list of WebACLs +type WebACLList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []WebACL `json:"items"` +} + +// Repository type metadata. +var ( + WebACL_Kind = "WebACL" + WebACL_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: WebACL_Kind}.String() + WebACL_KindAPIVersion = WebACL_Kind + "." + CRDGroupVersion.String() + WebACL_GroupVersionKind = CRDGroupVersion.WithKind(WebACL_Kind) +) + +func init() { + SchemeBuilder.Register(&WebACL{}, &WebACLList{}) +} diff --git a/apis/waf/v1beta2/zz_xssmatchset_terraformed.go b/apis/waf/v1beta2/zz_xssmatchset_terraformed.go new file mode 100755 index 0000000000..c9d441d8da --- /dev/null +++ b/apis/waf/v1beta2/zz_xssmatchset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this XSSMatchSet +func (mg *XSSMatchSet) GetTerraformResourceType() string { + return "aws_waf_xss_match_set" +} + +// GetConnectionDetailsMapping for this XSSMatchSet +func (tr *XSSMatchSet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this XSSMatchSet +func (tr *XSSMatchSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this XSSMatchSet +func (tr *XSSMatchSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this XSSMatchSet +func (tr *XSSMatchSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this XSSMatchSet +func (tr *XSSMatchSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this XSSMatchSet +func (tr *XSSMatchSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this XSSMatchSet +func (tr *XSSMatchSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this XSSMatchSet +func (tr *XSSMatchSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this XSSMatchSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *XSSMatchSet) LateInitialize(attrs []byte) (bool, error) { + params := &XSSMatchSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *XSSMatchSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/waf/v1beta2/zz_xssmatchset_types.go b/apis/waf/v1beta2/zz_xssmatchset_types.go new file mode 100755 index 0000000000..6769591173 --- /dev/null +++ b/apis/waf/v1beta2/zz_xssmatchset_types.go @@ -0,0 +1,197 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type XSSMatchSetInitParameters struct { + + // The name or description of the SizeConstraintSet. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parts of web requests that you want to inspect for cross-site scripting attacks. + XSSMatchTuples []XSSMatchTuplesInitParameters `json:"xssMatchTuples,omitempty" tf:"xss_match_tuples,omitempty"` +} + +type XSSMatchSetObservation struct { + + // Amazon Resource Name (ARN) + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The ID of the WAF XssMatchSet. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name or description of the SizeConstraintSet. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parts of web requests that you want to inspect for cross-site scripting attacks. + XSSMatchTuples []XSSMatchTuplesObservation `json:"xssMatchTuples,omitempty" tf:"xss_match_tuples,omitempty"` +} + +type XSSMatchSetParameters struct { + + // The name or description of the SizeConstraintSet. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The parts of web requests that you want to inspect for cross-site scripting attacks. + // +kubebuilder:validation:Optional + XSSMatchTuples []XSSMatchTuplesParameters `json:"xssMatchTuples,omitempty" tf:"xss_match_tuples,omitempty"` +} + +type XSSMatchTuplesFieldToMatchInitParameters struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type XSSMatchTuplesFieldToMatchObservation struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type XSSMatchTuplesFieldToMatchParameters struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + // +kubebuilder:validation:Optional + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type XSSMatchTuplesInitParameters struct { + + // Specifies where in a web request to look for cross-site scripting attacks. + FieldToMatch *XSSMatchTuplesFieldToMatchInitParameters `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // If you specify a transformation, AWS WAF performs the transformation on target_string before inspecting a request for a match. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type XSSMatchTuplesObservation struct { + + // Specifies where in a web request to look for cross-site scripting attacks. + FieldToMatch *XSSMatchTuplesFieldToMatchObservation `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // If you specify a transformation, AWS WAF performs the transformation on target_string before inspecting a request for a match. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type XSSMatchTuplesParameters struct { + + // Specifies where in a web request to look for cross-site scripting attacks. + // +kubebuilder:validation:Optional + FieldToMatch *XSSMatchTuplesFieldToMatchParameters `json:"fieldToMatch" tf:"field_to_match,omitempty"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // If you specify a transformation, AWS WAF performs the transformation on target_string before inspecting a request for a match. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + // +kubebuilder:validation:Optional + TextTransformation *string `json:"textTransformation" tf:"text_transformation,omitempty"` +} + +// XSSMatchSetSpec defines the desired state of XSSMatchSet +type XSSMatchSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider XSSMatchSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider XSSMatchSetInitParameters `json:"initProvider,omitempty"` +} + +// XSSMatchSetStatus defines the observed state of XSSMatchSet. +type XSSMatchSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider XSSMatchSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// XSSMatchSet is the Schema for the XSSMatchSets API. Provides a AWS WAF XssMatchSet resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type XSSMatchSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec XSSMatchSetSpec `json:"spec"` + Status XSSMatchSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// XSSMatchSetList contains a list of XSSMatchSets +type XSSMatchSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []XSSMatchSet `json:"items"` +} + +// Repository type metadata. +var ( + XSSMatchSet_Kind = "XSSMatchSet" + XSSMatchSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: XSSMatchSet_Kind}.String() + XSSMatchSet_KindAPIVersion = XSSMatchSet_Kind + "." + CRDGroupVersion.String() + XSSMatchSet_GroupVersionKind = CRDGroupVersion.WithKind(XSSMatchSet_Kind) +) + +func init() { + SchemeBuilder.Register(&XSSMatchSet{}, &XSSMatchSetList{}) +} diff --git a/apis/wafregional/v1beta1/zz_generated.conversion_hubs.go b/apis/wafregional/v1beta1/zz_generated.conversion_hubs.go index 868ba95efa..dbe0307623 100755 --- a/apis/wafregional/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/wafregional/v1beta1/zz_generated.conversion_hubs.go @@ -6,9 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *ByteMatchSet) Hub() {} - // Hub marks this type as a conversion hub. func (tr *GeoMatchSet) Hub() {} @@ -18,23 +15,8 @@ func (tr *IPSet) Hub() {} // Hub marks this type as a conversion hub. func (tr *RateBasedRule) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *RegexMatchSet) Hub() {} - // Hub marks this type as a conversion hub. func (tr *RegexPatternSet) Hub() {} // Hub marks this type as a conversion hub. func (tr *Rule) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SizeConstraintSet) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SQLInjectionMatchSet) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *WebACL) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *XSSMatchSet) Hub() {} diff --git a/apis/wafregional/v1beta1/zz_generated.conversion_spokes.go b/apis/wafregional/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..d24ff6e935 --- /dev/null +++ b/apis/wafregional/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,134 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ByteMatchSet to the hub type. +func (tr *ByteMatchSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ByteMatchSet type. +func (tr *ByteMatchSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this RegexMatchSet to the hub type. +func (tr *RegexMatchSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the RegexMatchSet type. +func (tr *RegexMatchSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SizeConstraintSet to the hub type. +func (tr *SizeConstraintSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SizeConstraintSet type. +func (tr *SizeConstraintSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SQLInjectionMatchSet to the hub type. +func (tr *SQLInjectionMatchSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SQLInjectionMatchSet type. +func (tr *SQLInjectionMatchSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this WebACL to the hub type. +func (tr *WebACL) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the WebACL type. +func (tr *WebACL) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this XSSMatchSet to the hub type. +func (tr *XSSMatchSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the XSSMatchSet type. +func (tr *XSSMatchSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/wafregional/v1beta2/zz_bytematchset_terraformed.go b/apis/wafregional/v1beta2/zz_bytematchset_terraformed.go new file mode 100755 index 0000000000..7135916461 --- /dev/null +++ b/apis/wafregional/v1beta2/zz_bytematchset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ByteMatchSet +func (mg *ByteMatchSet) GetTerraformResourceType() string { + return "aws_wafregional_byte_match_set" +} + +// GetConnectionDetailsMapping for this ByteMatchSet +func (tr *ByteMatchSet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ByteMatchSet +func (tr *ByteMatchSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ByteMatchSet +func (tr *ByteMatchSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ByteMatchSet +func (tr *ByteMatchSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ByteMatchSet +func (tr *ByteMatchSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ByteMatchSet +func (tr *ByteMatchSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ByteMatchSet +func (tr *ByteMatchSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ByteMatchSet +func (tr *ByteMatchSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ByteMatchSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ByteMatchSet) LateInitialize(attrs []byte) (bool, error) { + params := &ByteMatchSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ByteMatchSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/wafregional/v1beta2/zz_bytematchset_types.go b/apis/wafregional/v1beta2/zz_bytematchset_types.go new file mode 100755 index 0000000000..5a429dc084 --- /dev/null +++ b/apis/wafregional/v1beta2/zz_bytematchset_types.go @@ -0,0 +1,190 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ByteMatchSetInitParameters struct { + + // Settings for the ByteMatchSet, such as the bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests. ByteMatchTuple documented below. + ByteMatchTuples []ByteMatchTuplesInitParameters `json:"byteMatchTuples,omitempty" tf:"byte_match_tuples,omitempty"` + + // The name or description of the ByteMatchSet. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ByteMatchSetObservation struct { + + // Settings for the ByteMatchSet, such as the bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests. ByteMatchTuple documented below. + ByteMatchTuples []ByteMatchTuplesObservation `json:"byteMatchTuples,omitempty" tf:"byte_match_tuples,omitempty"` + + // The ID of the WAF ByteMatchSet. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name or description of the ByteMatchSet. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ByteMatchSetParameters struct { + + // Settings for the ByteMatchSet, such as the bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests. ByteMatchTuple documented below. + // +kubebuilder:validation:Optional + ByteMatchTuples []ByteMatchTuplesParameters `json:"byteMatchTuples,omitempty" tf:"byte_match_tuples,omitempty"` + + // The name or description of the ByteMatchSet. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type ByteMatchTuplesInitParameters struct { + + // Settings for the ByteMatchTuple. FieldToMatch documented below. + FieldToMatch *FieldToMatchInitParameters `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // Within the portion of a web request that you want to search. + PositionalConstraint *string `json:"positionalConstraint,omitempty" tf:"positional_constraint,omitempty"` + + // The value that you want AWS WAF to search for. The maximum length of the value is 50 bytes. + TargetString *string `json:"targetString,omitempty" tf:"target_string,omitempty"` + + // The formatting way for web request. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type ByteMatchTuplesObservation struct { + + // Settings for the ByteMatchTuple. FieldToMatch documented below. + FieldToMatch *FieldToMatchObservation `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // Within the portion of a web request that you want to search. + PositionalConstraint *string `json:"positionalConstraint,omitempty" tf:"positional_constraint,omitempty"` + + // The value that you want AWS WAF to search for. The maximum length of the value is 50 bytes. + TargetString *string `json:"targetString,omitempty" tf:"target_string,omitempty"` + + // The formatting way for web request. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type ByteMatchTuplesParameters struct { + + // Settings for the ByteMatchTuple. FieldToMatch documented below. + // +kubebuilder:validation:Optional + FieldToMatch *FieldToMatchParameters `json:"fieldToMatch" tf:"field_to_match,omitempty"` + + // Within the portion of a web request that you want to search. + // +kubebuilder:validation:Optional + PositionalConstraint *string `json:"positionalConstraint" tf:"positional_constraint,omitempty"` + + // The value that you want AWS WAF to search for. The maximum length of the value is 50 bytes. + // +kubebuilder:validation:Optional + TargetString *string `json:"targetString,omitempty" tf:"target_string,omitempty"` + + // The formatting way for web request. + // +kubebuilder:validation:Optional + TextTransformation *string `json:"textTransformation" tf:"text_transformation,omitempty"` +} + +type FieldToMatchInitParameters struct { + + // When the value of Type is HEADER, enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer. If the value of Type is any other value, omit Data. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FieldToMatchObservation struct { + + // When the value of Type is HEADER, enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer. If the value of Type is any other value, omit Data. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FieldToMatchParameters struct { + + // When the value of Type is HEADER, enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer. If the value of Type is any other value, omit Data. + // +kubebuilder:validation:Optional + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// ByteMatchSetSpec defines the desired state of ByteMatchSet +type ByteMatchSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ByteMatchSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ByteMatchSetInitParameters `json:"initProvider,omitempty"` +} + +// ByteMatchSetStatus defines the observed state of ByteMatchSet. +type ByteMatchSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ByteMatchSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ByteMatchSet is the Schema for the ByteMatchSets API. Provides a AWS WAF Regional ByteMatchSet resource for use with ALB. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type ByteMatchSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ByteMatchSetSpec `json:"spec"` + Status ByteMatchSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ByteMatchSetList contains a list of ByteMatchSets +type ByteMatchSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ByteMatchSet `json:"items"` +} + +// Repository type metadata. +var ( + ByteMatchSet_Kind = "ByteMatchSet" + ByteMatchSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ByteMatchSet_Kind}.String() + ByteMatchSet_KindAPIVersion = ByteMatchSet_Kind + "." + CRDGroupVersion.String() + ByteMatchSet_GroupVersionKind = CRDGroupVersion.WithKind(ByteMatchSet_Kind) +) + +func init() { + SchemeBuilder.Register(&ByteMatchSet{}, &ByteMatchSetList{}) +} diff --git a/apis/wafregional/v1beta2/zz_generated.conversion_hubs.go b/apis/wafregional/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..7f9898eef5 --- /dev/null +++ b/apis/wafregional/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,25 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ByteMatchSet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *RegexMatchSet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SizeConstraintSet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SQLInjectionMatchSet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *WebACL) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *XSSMatchSet) Hub() {} diff --git a/apis/wafregional/v1beta2/zz_generated.deepcopy.go b/apis/wafregional/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..8f205be624 --- /dev/null +++ b/apis/wafregional/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2644 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionInitParameters) DeepCopyInto(out *ActionInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionInitParameters. +func (in *ActionInitParameters) DeepCopy() *ActionInitParameters { + if in == nil { + return nil + } + out := new(ActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionObservation) DeepCopyInto(out *ActionObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionObservation. +func (in *ActionObservation) DeepCopy() *ActionObservation { + if in == nil { + return nil + } + out := new(ActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionParameters) DeepCopyInto(out *ActionParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionParameters. +func (in *ActionParameters) DeepCopy() *ActionParameters { + if in == nil { + return nil + } + out := new(ActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchSet) DeepCopyInto(out *ByteMatchSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchSet. +func (in *ByteMatchSet) DeepCopy() *ByteMatchSet { + if in == nil { + return nil + } + out := new(ByteMatchSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ByteMatchSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchSetInitParameters) DeepCopyInto(out *ByteMatchSetInitParameters) { + *out = *in + if in.ByteMatchTuples != nil { + in, out := &in.ByteMatchTuples, &out.ByteMatchTuples + *out = make([]ByteMatchTuplesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchSetInitParameters. +func (in *ByteMatchSetInitParameters) DeepCopy() *ByteMatchSetInitParameters { + if in == nil { + return nil + } + out := new(ByteMatchSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchSetList) DeepCopyInto(out *ByteMatchSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ByteMatchSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchSetList. +func (in *ByteMatchSetList) DeepCopy() *ByteMatchSetList { + if in == nil { + return nil + } + out := new(ByteMatchSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ByteMatchSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchSetObservation) DeepCopyInto(out *ByteMatchSetObservation) { + *out = *in + if in.ByteMatchTuples != nil { + in, out := &in.ByteMatchTuples, &out.ByteMatchTuples + *out = make([]ByteMatchTuplesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchSetObservation. +func (in *ByteMatchSetObservation) DeepCopy() *ByteMatchSetObservation { + if in == nil { + return nil + } + out := new(ByteMatchSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchSetParameters) DeepCopyInto(out *ByteMatchSetParameters) { + *out = *in + if in.ByteMatchTuples != nil { + in, out := &in.ByteMatchTuples, &out.ByteMatchTuples + *out = make([]ByteMatchTuplesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchSetParameters. +func (in *ByteMatchSetParameters) DeepCopy() *ByteMatchSetParameters { + if in == nil { + return nil + } + out := new(ByteMatchSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchSetSpec) DeepCopyInto(out *ByteMatchSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchSetSpec. +func (in *ByteMatchSetSpec) DeepCopy() *ByteMatchSetSpec { + if in == nil { + return nil + } + out := new(ByteMatchSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchSetStatus) DeepCopyInto(out *ByteMatchSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchSetStatus. +func (in *ByteMatchSetStatus) DeepCopy() *ByteMatchSetStatus { + if in == nil { + return nil + } + out := new(ByteMatchSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchTuplesInitParameters) DeepCopyInto(out *ByteMatchTuplesInitParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(FieldToMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PositionalConstraint != nil { + in, out := &in.PositionalConstraint, &out.PositionalConstraint + *out = new(string) + **out = **in + } + if in.TargetString != nil { + in, out := &in.TargetString, &out.TargetString + *out = new(string) + **out = **in + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchTuplesInitParameters. +func (in *ByteMatchTuplesInitParameters) DeepCopy() *ByteMatchTuplesInitParameters { + if in == nil { + return nil + } + out := new(ByteMatchTuplesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchTuplesObservation) DeepCopyInto(out *ByteMatchTuplesObservation) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(FieldToMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.PositionalConstraint != nil { + in, out := &in.PositionalConstraint, &out.PositionalConstraint + *out = new(string) + **out = **in + } + if in.TargetString != nil { + in, out := &in.TargetString, &out.TargetString + *out = new(string) + **out = **in + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchTuplesObservation. +func (in *ByteMatchTuplesObservation) DeepCopy() *ByteMatchTuplesObservation { + if in == nil { + return nil + } + out := new(ByteMatchTuplesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ByteMatchTuplesParameters) DeepCopyInto(out *ByteMatchTuplesParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(FieldToMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.PositionalConstraint != nil { + in, out := &in.PositionalConstraint, &out.PositionalConstraint + *out = new(string) + **out = **in + } + if in.TargetString != nil { + in, out := &in.TargetString, &out.TargetString + *out = new(string) + **out = **in + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByteMatchTuplesParameters. +func (in *ByteMatchTuplesParameters) DeepCopy() *ByteMatchTuplesParameters { + if in == nil { + return nil + } + out := new(ByteMatchTuplesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultActionInitParameters) DeepCopyInto(out *DefaultActionInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultActionInitParameters. +func (in *DefaultActionInitParameters) DeepCopy() *DefaultActionInitParameters { + if in == nil { + return nil + } + out := new(DefaultActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultActionObservation) DeepCopyInto(out *DefaultActionObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultActionObservation. +func (in *DefaultActionObservation) DeepCopy() *DefaultActionObservation { + if in == nil { + return nil + } + out := new(DefaultActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultActionParameters) DeepCopyInto(out *DefaultActionParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultActionParameters. +func (in *DefaultActionParameters) DeepCopy() *DefaultActionParameters { + if in == nil { + return nil + } + out := new(DefaultActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldToMatchInitParameters) DeepCopyInto(out *FieldToMatchInitParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldToMatchInitParameters. +func (in *FieldToMatchInitParameters) DeepCopy() *FieldToMatchInitParameters { + if in == nil { + return nil + } + out := new(FieldToMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldToMatchObservation) DeepCopyInto(out *FieldToMatchObservation) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldToMatchObservation. +func (in *FieldToMatchObservation) DeepCopy() *FieldToMatchObservation { + if in == nil { + return nil + } + out := new(FieldToMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldToMatchParameters) DeepCopyInto(out *FieldToMatchParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldToMatchParameters. +func (in *FieldToMatchParameters) DeepCopy() *FieldToMatchParameters { + if in == nil { + return nil + } + out := new(FieldToMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationInitParameters) DeepCopyInto(out *LoggingConfigurationInitParameters) { + *out = *in + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = new(string) + **out = **in + } + if in.LogDestinationRef != nil { + in, out := &in.LogDestinationRef, &out.LogDestinationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogDestinationSelector != nil { + in, out := &in.LogDestinationSelector, &out.LogDestinationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RedactedFields != nil { + in, out := &in.RedactedFields, &out.RedactedFields + *out = new(RedactedFieldsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationInitParameters. +func (in *LoggingConfigurationInitParameters) DeepCopy() *LoggingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(LoggingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationObservation) DeepCopyInto(out *LoggingConfigurationObservation) { + *out = *in + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = new(string) + **out = **in + } + if in.RedactedFields != nil { + in, out := &in.RedactedFields, &out.RedactedFields + *out = new(RedactedFieldsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationObservation. +func (in *LoggingConfigurationObservation) DeepCopy() *LoggingConfigurationObservation { + if in == nil { + return nil + } + out := new(LoggingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationParameters) DeepCopyInto(out *LoggingConfigurationParameters) { + *out = *in + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = new(string) + **out = **in + } + if in.LogDestinationRef != nil { + in, out := &in.LogDestinationRef, &out.LogDestinationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogDestinationSelector != nil { + in, out := &in.LogDestinationSelector, &out.LogDestinationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RedactedFields != nil { + in, out := &in.RedactedFields, &out.RedactedFields + *out = new(RedactedFieldsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationParameters. +func (in *LoggingConfigurationParameters) DeepCopy() *LoggingConfigurationParameters { + if in == nil { + return nil + } + out := new(LoggingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideActionInitParameters) DeepCopyInto(out *OverrideActionInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideActionInitParameters. +func (in *OverrideActionInitParameters) DeepCopy() *OverrideActionInitParameters { + if in == nil { + return nil + } + out := new(OverrideActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideActionObservation) DeepCopyInto(out *OverrideActionObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideActionObservation. +func (in *OverrideActionObservation) DeepCopy() *OverrideActionObservation { + if in == nil { + return nil + } + out := new(OverrideActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideActionParameters) DeepCopyInto(out *OverrideActionParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideActionParameters. +func (in *OverrideActionParameters) DeepCopy() *OverrideActionParameters { + if in == nil { + return nil + } + out := new(OverrideActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedactedFieldsFieldToMatchInitParameters) DeepCopyInto(out *RedactedFieldsFieldToMatchInitParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedactedFieldsFieldToMatchInitParameters. +func (in *RedactedFieldsFieldToMatchInitParameters) DeepCopy() *RedactedFieldsFieldToMatchInitParameters { + if in == nil { + return nil + } + out := new(RedactedFieldsFieldToMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedactedFieldsFieldToMatchObservation) DeepCopyInto(out *RedactedFieldsFieldToMatchObservation) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedactedFieldsFieldToMatchObservation. +func (in *RedactedFieldsFieldToMatchObservation) DeepCopy() *RedactedFieldsFieldToMatchObservation { + if in == nil { + return nil + } + out := new(RedactedFieldsFieldToMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedactedFieldsFieldToMatchParameters) DeepCopyInto(out *RedactedFieldsFieldToMatchParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedactedFieldsFieldToMatchParameters. +func (in *RedactedFieldsFieldToMatchParameters) DeepCopy() *RedactedFieldsFieldToMatchParameters { + if in == nil { + return nil + } + out := new(RedactedFieldsFieldToMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedactedFieldsInitParameters) DeepCopyInto(out *RedactedFieldsInitParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = make([]RedactedFieldsFieldToMatchInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedactedFieldsInitParameters. +func (in *RedactedFieldsInitParameters) DeepCopy() *RedactedFieldsInitParameters { + if in == nil { + return nil + } + out := new(RedactedFieldsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedactedFieldsObservation) DeepCopyInto(out *RedactedFieldsObservation) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = make([]RedactedFieldsFieldToMatchObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedactedFieldsObservation. +func (in *RedactedFieldsObservation) DeepCopy() *RedactedFieldsObservation { + if in == nil { + return nil + } + out := new(RedactedFieldsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedactedFieldsParameters) DeepCopyInto(out *RedactedFieldsParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = make([]RedactedFieldsFieldToMatchParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedactedFieldsParameters. +func (in *RedactedFieldsParameters) DeepCopy() *RedactedFieldsParameters { + if in == nil { + return nil + } + out := new(RedactedFieldsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchSet) DeepCopyInto(out *RegexMatchSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchSet. +func (in *RegexMatchSet) DeepCopy() *RegexMatchSet { + if in == nil { + return nil + } + out := new(RegexMatchSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RegexMatchSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchSetInitParameters) DeepCopyInto(out *RegexMatchSetInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RegexMatchTuple != nil { + in, out := &in.RegexMatchTuple, &out.RegexMatchTuple + *out = make([]RegexMatchTupleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchSetInitParameters. +func (in *RegexMatchSetInitParameters) DeepCopy() *RegexMatchSetInitParameters { + if in == nil { + return nil + } + out := new(RegexMatchSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchSetList) DeepCopyInto(out *RegexMatchSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RegexMatchSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchSetList. +func (in *RegexMatchSetList) DeepCopy() *RegexMatchSetList { + if in == nil { + return nil + } + out := new(RegexMatchSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RegexMatchSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchSetObservation) DeepCopyInto(out *RegexMatchSetObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RegexMatchTuple != nil { + in, out := &in.RegexMatchTuple, &out.RegexMatchTuple + *out = make([]RegexMatchTupleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchSetObservation. +func (in *RegexMatchSetObservation) DeepCopy() *RegexMatchSetObservation { + if in == nil { + return nil + } + out := new(RegexMatchSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchSetParameters) DeepCopyInto(out *RegexMatchSetParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RegexMatchTuple != nil { + in, out := &in.RegexMatchTuple, &out.RegexMatchTuple + *out = make([]RegexMatchTupleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchSetParameters. +func (in *RegexMatchSetParameters) DeepCopy() *RegexMatchSetParameters { + if in == nil { + return nil + } + out := new(RegexMatchSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchSetSpec) DeepCopyInto(out *RegexMatchSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchSetSpec. +func (in *RegexMatchSetSpec) DeepCopy() *RegexMatchSetSpec { + if in == nil { + return nil + } + out := new(RegexMatchSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchSetStatus) DeepCopyInto(out *RegexMatchSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchSetStatus. +func (in *RegexMatchSetStatus) DeepCopy() *RegexMatchSetStatus { + if in == nil { + return nil + } + out := new(RegexMatchSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchTupleFieldToMatchInitParameters) DeepCopyInto(out *RegexMatchTupleFieldToMatchInitParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchTupleFieldToMatchInitParameters. +func (in *RegexMatchTupleFieldToMatchInitParameters) DeepCopy() *RegexMatchTupleFieldToMatchInitParameters { + if in == nil { + return nil + } + out := new(RegexMatchTupleFieldToMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchTupleFieldToMatchObservation) DeepCopyInto(out *RegexMatchTupleFieldToMatchObservation) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchTupleFieldToMatchObservation. +func (in *RegexMatchTupleFieldToMatchObservation) DeepCopy() *RegexMatchTupleFieldToMatchObservation { + if in == nil { + return nil + } + out := new(RegexMatchTupleFieldToMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchTupleFieldToMatchParameters) DeepCopyInto(out *RegexMatchTupleFieldToMatchParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchTupleFieldToMatchParameters. +func (in *RegexMatchTupleFieldToMatchParameters) DeepCopy() *RegexMatchTupleFieldToMatchParameters { + if in == nil { + return nil + } + out := new(RegexMatchTupleFieldToMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchTupleInitParameters) DeepCopyInto(out *RegexMatchTupleInitParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(RegexMatchTupleFieldToMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RegexPatternSetID != nil { + in, out := &in.RegexPatternSetID, &out.RegexPatternSetID + *out = new(string) + **out = **in + } + if in.RegexPatternSetIDRef != nil { + in, out := &in.RegexPatternSetIDRef, &out.RegexPatternSetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RegexPatternSetIDSelector != nil { + in, out := &in.RegexPatternSetIDSelector, &out.RegexPatternSetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchTupleInitParameters. +func (in *RegexMatchTupleInitParameters) DeepCopy() *RegexMatchTupleInitParameters { + if in == nil { + return nil + } + out := new(RegexMatchTupleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchTupleObservation) DeepCopyInto(out *RegexMatchTupleObservation) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(RegexMatchTupleFieldToMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.RegexPatternSetID != nil { + in, out := &in.RegexPatternSetID, &out.RegexPatternSetID + *out = new(string) + **out = **in + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchTupleObservation. +func (in *RegexMatchTupleObservation) DeepCopy() *RegexMatchTupleObservation { + if in == nil { + return nil + } + out := new(RegexMatchTupleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexMatchTupleParameters) DeepCopyInto(out *RegexMatchTupleParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(RegexMatchTupleFieldToMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.RegexPatternSetID != nil { + in, out := &in.RegexPatternSetID, &out.RegexPatternSetID + *out = new(string) + **out = **in + } + if in.RegexPatternSetIDRef != nil { + in, out := &in.RegexPatternSetIDRef, &out.RegexPatternSetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RegexPatternSetIDSelector != nil { + in, out := &in.RegexPatternSetIDSelector, &out.RegexPatternSetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexMatchTupleParameters. +func (in *RegexMatchTupleParameters) DeepCopy() *RegexMatchTupleParameters { + if in == nil { + return nil + } + out := new(RegexMatchTupleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleInitParameters) DeepCopyInto(out *RuleInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OverrideAction != nil { + in, out := &in.OverrideAction, &out.OverrideAction + *out = new(OverrideActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RuleID != nil { + in, out := &in.RuleID, &out.RuleID + *out = new(string) + **out = **in + } + if in.RuleIDRef != nil { + in, out := &in.RuleIDRef, &out.RuleIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RuleIDSelector != nil { + in, out := &in.RuleIDSelector, &out.RuleIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleInitParameters. +func (in *RuleInitParameters) DeepCopy() *RuleInitParameters { + if in == nil { + return nil + } + out := new(RuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleObservation) DeepCopyInto(out *RuleObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionObservation) + (*in).DeepCopyInto(*out) + } + if in.OverrideAction != nil { + in, out := &in.OverrideAction, &out.OverrideAction + *out = new(OverrideActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RuleID != nil { + in, out := &in.RuleID, &out.RuleID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleObservation. +func (in *RuleObservation) DeepCopy() *RuleObservation { + if in == nil { + return nil + } + out := new(RuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleParameters) DeepCopyInto(out *RuleParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionParameters) + (*in).DeepCopyInto(*out) + } + if in.OverrideAction != nil { + in, out := &in.OverrideAction, &out.OverrideAction + *out = new(OverrideActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RuleID != nil { + in, out := &in.RuleID, &out.RuleID + *out = new(string) + **out = **in + } + if in.RuleIDRef != nil { + in, out := &in.RuleIDRef, &out.RuleIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RuleIDSelector != nil { + in, out := &in.RuleIDSelector, &out.RuleIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleParameters. +func (in *RuleParameters) DeepCopy() *RuleParameters { + if in == nil { + return nil + } + out := new(RuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchSet) DeepCopyInto(out *SQLInjectionMatchSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchSet. +func (in *SQLInjectionMatchSet) DeepCopy() *SQLInjectionMatchSet { + if in == nil { + return nil + } + out := new(SQLInjectionMatchSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SQLInjectionMatchSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchSetInitParameters) DeepCopyInto(out *SQLInjectionMatchSetInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SQLInjectionMatchTuple != nil { + in, out := &in.SQLInjectionMatchTuple, &out.SQLInjectionMatchTuple + *out = make([]SQLInjectionMatchTupleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchSetInitParameters. +func (in *SQLInjectionMatchSetInitParameters) DeepCopy() *SQLInjectionMatchSetInitParameters { + if in == nil { + return nil + } + out := new(SQLInjectionMatchSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchSetList) DeepCopyInto(out *SQLInjectionMatchSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SQLInjectionMatchSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchSetList. +func (in *SQLInjectionMatchSetList) DeepCopy() *SQLInjectionMatchSetList { + if in == nil { + return nil + } + out := new(SQLInjectionMatchSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SQLInjectionMatchSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchSetObservation) DeepCopyInto(out *SQLInjectionMatchSetObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SQLInjectionMatchTuple != nil { + in, out := &in.SQLInjectionMatchTuple, &out.SQLInjectionMatchTuple + *out = make([]SQLInjectionMatchTupleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchSetObservation. +func (in *SQLInjectionMatchSetObservation) DeepCopy() *SQLInjectionMatchSetObservation { + if in == nil { + return nil + } + out := new(SQLInjectionMatchSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchSetParameters) DeepCopyInto(out *SQLInjectionMatchSetParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SQLInjectionMatchTuple != nil { + in, out := &in.SQLInjectionMatchTuple, &out.SQLInjectionMatchTuple + *out = make([]SQLInjectionMatchTupleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchSetParameters. +func (in *SQLInjectionMatchSetParameters) DeepCopy() *SQLInjectionMatchSetParameters { + if in == nil { + return nil + } + out := new(SQLInjectionMatchSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchSetSpec) DeepCopyInto(out *SQLInjectionMatchSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchSetSpec. +func (in *SQLInjectionMatchSetSpec) DeepCopy() *SQLInjectionMatchSetSpec { + if in == nil { + return nil + } + out := new(SQLInjectionMatchSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchSetStatus) DeepCopyInto(out *SQLInjectionMatchSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchSetStatus. +func (in *SQLInjectionMatchSetStatus) DeepCopy() *SQLInjectionMatchSetStatus { + if in == nil { + return nil + } + out := new(SQLInjectionMatchSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchTupleFieldToMatchInitParameters) DeepCopyInto(out *SQLInjectionMatchTupleFieldToMatchInitParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchTupleFieldToMatchInitParameters. +func (in *SQLInjectionMatchTupleFieldToMatchInitParameters) DeepCopy() *SQLInjectionMatchTupleFieldToMatchInitParameters { + if in == nil { + return nil + } + out := new(SQLInjectionMatchTupleFieldToMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchTupleFieldToMatchObservation) DeepCopyInto(out *SQLInjectionMatchTupleFieldToMatchObservation) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchTupleFieldToMatchObservation. +func (in *SQLInjectionMatchTupleFieldToMatchObservation) DeepCopy() *SQLInjectionMatchTupleFieldToMatchObservation { + if in == nil { + return nil + } + out := new(SQLInjectionMatchTupleFieldToMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchTupleFieldToMatchParameters) DeepCopyInto(out *SQLInjectionMatchTupleFieldToMatchParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchTupleFieldToMatchParameters. +func (in *SQLInjectionMatchTupleFieldToMatchParameters) DeepCopy() *SQLInjectionMatchTupleFieldToMatchParameters { + if in == nil { + return nil + } + out := new(SQLInjectionMatchTupleFieldToMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchTupleInitParameters) DeepCopyInto(out *SQLInjectionMatchTupleInitParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(SQLInjectionMatchTupleFieldToMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchTupleInitParameters. +func (in *SQLInjectionMatchTupleInitParameters) DeepCopy() *SQLInjectionMatchTupleInitParameters { + if in == nil { + return nil + } + out := new(SQLInjectionMatchTupleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchTupleObservation) DeepCopyInto(out *SQLInjectionMatchTupleObservation) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(SQLInjectionMatchTupleFieldToMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchTupleObservation. +func (in *SQLInjectionMatchTupleObservation) DeepCopy() *SQLInjectionMatchTupleObservation { + if in == nil { + return nil + } + out := new(SQLInjectionMatchTupleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLInjectionMatchTupleParameters) DeepCopyInto(out *SQLInjectionMatchTupleParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(SQLInjectionMatchTupleFieldToMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLInjectionMatchTupleParameters. +func (in *SQLInjectionMatchTupleParameters) DeepCopy() *SQLInjectionMatchTupleParameters { + if in == nil { + return nil + } + out := new(SQLInjectionMatchTupleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintSet) DeepCopyInto(out *SizeConstraintSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintSet. +func (in *SizeConstraintSet) DeepCopy() *SizeConstraintSet { + if in == nil { + return nil + } + out := new(SizeConstraintSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SizeConstraintSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintSetInitParameters) DeepCopyInto(out *SizeConstraintSetInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SizeConstraints != nil { + in, out := &in.SizeConstraints, &out.SizeConstraints + *out = make([]SizeConstraintsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintSetInitParameters. +func (in *SizeConstraintSetInitParameters) DeepCopy() *SizeConstraintSetInitParameters { + if in == nil { + return nil + } + out := new(SizeConstraintSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintSetList) DeepCopyInto(out *SizeConstraintSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SizeConstraintSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintSetList. +func (in *SizeConstraintSetList) DeepCopy() *SizeConstraintSetList { + if in == nil { + return nil + } + out := new(SizeConstraintSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SizeConstraintSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintSetObservation) DeepCopyInto(out *SizeConstraintSetObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SizeConstraints != nil { + in, out := &in.SizeConstraints, &out.SizeConstraints + *out = make([]SizeConstraintsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintSetObservation. +func (in *SizeConstraintSetObservation) DeepCopy() *SizeConstraintSetObservation { + if in == nil { + return nil + } + out := new(SizeConstraintSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintSetParameters) DeepCopyInto(out *SizeConstraintSetParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SizeConstraints != nil { + in, out := &in.SizeConstraints, &out.SizeConstraints + *out = make([]SizeConstraintsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintSetParameters. +func (in *SizeConstraintSetParameters) DeepCopy() *SizeConstraintSetParameters { + if in == nil { + return nil + } + out := new(SizeConstraintSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintSetSpec) DeepCopyInto(out *SizeConstraintSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintSetSpec. +func (in *SizeConstraintSetSpec) DeepCopy() *SizeConstraintSetSpec { + if in == nil { + return nil + } + out := new(SizeConstraintSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintSetStatus) DeepCopyInto(out *SizeConstraintSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintSetStatus. +func (in *SizeConstraintSetStatus) DeepCopy() *SizeConstraintSetStatus { + if in == nil { + return nil + } + out := new(SizeConstraintSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintsFieldToMatchInitParameters) DeepCopyInto(out *SizeConstraintsFieldToMatchInitParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintsFieldToMatchInitParameters. +func (in *SizeConstraintsFieldToMatchInitParameters) DeepCopy() *SizeConstraintsFieldToMatchInitParameters { + if in == nil { + return nil + } + out := new(SizeConstraintsFieldToMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintsFieldToMatchObservation) DeepCopyInto(out *SizeConstraintsFieldToMatchObservation) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintsFieldToMatchObservation. +func (in *SizeConstraintsFieldToMatchObservation) DeepCopy() *SizeConstraintsFieldToMatchObservation { + if in == nil { + return nil + } + out := new(SizeConstraintsFieldToMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintsFieldToMatchParameters) DeepCopyInto(out *SizeConstraintsFieldToMatchParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintsFieldToMatchParameters. +func (in *SizeConstraintsFieldToMatchParameters) DeepCopy() *SizeConstraintsFieldToMatchParameters { + if in == nil { + return nil + } + out := new(SizeConstraintsFieldToMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintsInitParameters) DeepCopyInto(out *SizeConstraintsInitParameters) { + *out = *in + if in.ComparisonOperator != nil { + in, out := &in.ComparisonOperator, &out.ComparisonOperator + *out = new(string) + **out = **in + } + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(SizeConstraintsFieldToMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintsInitParameters. +func (in *SizeConstraintsInitParameters) DeepCopy() *SizeConstraintsInitParameters { + if in == nil { + return nil + } + out := new(SizeConstraintsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintsObservation) DeepCopyInto(out *SizeConstraintsObservation) { + *out = *in + if in.ComparisonOperator != nil { + in, out := &in.ComparisonOperator, &out.ComparisonOperator + *out = new(string) + **out = **in + } + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(SizeConstraintsFieldToMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintsObservation. +func (in *SizeConstraintsObservation) DeepCopy() *SizeConstraintsObservation { + if in == nil { + return nil + } + out := new(SizeConstraintsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SizeConstraintsParameters) DeepCopyInto(out *SizeConstraintsParameters) { + *out = *in + if in.ComparisonOperator != nil { + in, out := &in.ComparisonOperator, &out.ComparisonOperator + *out = new(string) + **out = **in + } + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(SizeConstraintsFieldToMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SizeConstraintsParameters. +func (in *SizeConstraintsParameters) DeepCopy() *SizeConstraintsParameters { + if in == nil { + return nil + } + out := new(SizeConstraintsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebACL) DeepCopyInto(out *WebACL) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebACL. +func (in *WebACL) DeepCopy() *WebACL { + if in == nil { + return nil + } + out := new(WebACL) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebACL) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebACLInitParameters) DeepCopyInto(out *WebACLInitParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(DefaultActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = new(LoggingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebACLInitParameters. +func (in *WebACLInitParameters) DeepCopy() *WebACLInitParameters { + if in == nil { + return nil + } + out := new(WebACLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebACLList) DeepCopyInto(out *WebACLList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WebACL, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebACLList. +func (in *WebACLList) DeepCopy() *WebACLList { + if in == nil { + return nil + } + out := new(WebACLList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebACLList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebACLObservation) DeepCopyInto(out *WebACLObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(DefaultActionObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = new(LoggingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebACLObservation. +func (in *WebACLObservation) DeepCopy() *WebACLObservation { + if in == nil { + return nil + } + out := new(WebACLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebACLParameters) DeepCopyInto(out *WebACLParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(DefaultActionParameters) + (*in).DeepCopyInto(*out) + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = new(LoggingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebACLParameters. +func (in *WebACLParameters) DeepCopy() *WebACLParameters { + if in == nil { + return nil + } + out := new(WebACLParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebACLSpec) DeepCopyInto(out *WebACLSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebACLSpec. +func (in *WebACLSpec) DeepCopy() *WebACLSpec { + if in == nil { + return nil + } + out := new(WebACLSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebACLStatus) DeepCopyInto(out *WebACLStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebACLStatus. +func (in *WebACLStatus) DeepCopy() *WebACLStatus { + if in == nil { + return nil + } + out := new(WebACLStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchSet) DeepCopyInto(out *XSSMatchSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchSet. +func (in *XSSMatchSet) DeepCopy() *XSSMatchSet { + if in == nil { + return nil + } + out := new(XSSMatchSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *XSSMatchSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchSetInitParameters) DeepCopyInto(out *XSSMatchSetInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.XSSMatchTuple != nil { + in, out := &in.XSSMatchTuple, &out.XSSMatchTuple + *out = make([]XSSMatchTupleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchSetInitParameters. +func (in *XSSMatchSetInitParameters) DeepCopy() *XSSMatchSetInitParameters { + if in == nil { + return nil + } + out := new(XSSMatchSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchSetList) DeepCopyInto(out *XSSMatchSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]XSSMatchSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchSetList. +func (in *XSSMatchSetList) DeepCopy() *XSSMatchSetList { + if in == nil { + return nil + } + out := new(XSSMatchSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *XSSMatchSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchSetObservation) DeepCopyInto(out *XSSMatchSetObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.XSSMatchTuple != nil { + in, out := &in.XSSMatchTuple, &out.XSSMatchTuple + *out = make([]XSSMatchTupleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchSetObservation. +func (in *XSSMatchSetObservation) DeepCopy() *XSSMatchSetObservation { + if in == nil { + return nil + } + out := new(XSSMatchSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchSetParameters) DeepCopyInto(out *XSSMatchSetParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.XSSMatchTuple != nil { + in, out := &in.XSSMatchTuple, &out.XSSMatchTuple + *out = make([]XSSMatchTupleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchSetParameters. +func (in *XSSMatchSetParameters) DeepCopy() *XSSMatchSetParameters { + if in == nil { + return nil + } + out := new(XSSMatchSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchSetSpec) DeepCopyInto(out *XSSMatchSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchSetSpec. +func (in *XSSMatchSetSpec) DeepCopy() *XSSMatchSetSpec { + if in == nil { + return nil + } + out := new(XSSMatchSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchSetStatus) DeepCopyInto(out *XSSMatchSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchSetStatus. +func (in *XSSMatchSetStatus) DeepCopy() *XSSMatchSetStatus { + if in == nil { + return nil + } + out := new(XSSMatchSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchTupleFieldToMatchInitParameters) DeepCopyInto(out *XSSMatchTupleFieldToMatchInitParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchTupleFieldToMatchInitParameters. +func (in *XSSMatchTupleFieldToMatchInitParameters) DeepCopy() *XSSMatchTupleFieldToMatchInitParameters { + if in == nil { + return nil + } + out := new(XSSMatchTupleFieldToMatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchTupleFieldToMatchObservation) DeepCopyInto(out *XSSMatchTupleFieldToMatchObservation) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchTupleFieldToMatchObservation. +func (in *XSSMatchTupleFieldToMatchObservation) DeepCopy() *XSSMatchTupleFieldToMatchObservation { + if in == nil { + return nil + } + out := new(XSSMatchTupleFieldToMatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchTupleFieldToMatchParameters) DeepCopyInto(out *XSSMatchTupleFieldToMatchParameters) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchTupleFieldToMatchParameters. +func (in *XSSMatchTupleFieldToMatchParameters) DeepCopy() *XSSMatchTupleFieldToMatchParameters { + if in == nil { + return nil + } + out := new(XSSMatchTupleFieldToMatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchTupleInitParameters) DeepCopyInto(out *XSSMatchTupleInitParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(XSSMatchTupleFieldToMatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchTupleInitParameters. +func (in *XSSMatchTupleInitParameters) DeepCopy() *XSSMatchTupleInitParameters { + if in == nil { + return nil + } + out := new(XSSMatchTupleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchTupleObservation) DeepCopyInto(out *XSSMatchTupleObservation) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(XSSMatchTupleFieldToMatchObservation) + (*in).DeepCopyInto(*out) + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchTupleObservation. +func (in *XSSMatchTupleObservation) DeepCopy() *XSSMatchTupleObservation { + if in == nil { + return nil + } + out := new(XSSMatchTupleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *XSSMatchTupleParameters) DeepCopyInto(out *XSSMatchTupleParameters) { + *out = *in + if in.FieldToMatch != nil { + in, out := &in.FieldToMatch, &out.FieldToMatch + *out = new(XSSMatchTupleFieldToMatchParameters) + (*in).DeepCopyInto(*out) + } + if in.TextTransformation != nil { + in, out := &in.TextTransformation, &out.TextTransformation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XSSMatchTupleParameters. +func (in *XSSMatchTupleParameters) DeepCopy() *XSSMatchTupleParameters { + if in == nil { + return nil + } + out := new(XSSMatchTupleParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/wafregional/v1beta2/zz_generated.managed.go b/apis/wafregional/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..fbce536036 --- /dev/null +++ b/apis/wafregional/v1beta2/zz_generated.managed.go @@ -0,0 +1,368 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ByteMatchSet. +func (mg *ByteMatchSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ByteMatchSet. +func (mg *ByteMatchSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ByteMatchSet. +func (mg *ByteMatchSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ByteMatchSet. +func (mg *ByteMatchSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ByteMatchSet. +func (mg *ByteMatchSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ByteMatchSet. +func (mg *ByteMatchSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ByteMatchSet. +func (mg *ByteMatchSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ByteMatchSet. +func (mg *ByteMatchSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ByteMatchSet. +func (mg *ByteMatchSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ByteMatchSet. +func (mg *ByteMatchSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ByteMatchSet. +func (mg *ByteMatchSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ByteMatchSet. +func (mg *ByteMatchSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this RegexMatchSet. +func (mg *RegexMatchSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this RegexMatchSet. +func (mg *RegexMatchSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this RegexMatchSet. +func (mg *RegexMatchSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this RegexMatchSet. +func (mg *RegexMatchSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this RegexMatchSet. +func (mg *RegexMatchSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this RegexMatchSet. +func (mg *RegexMatchSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this RegexMatchSet. +func (mg *RegexMatchSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this RegexMatchSet. +func (mg *RegexMatchSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this RegexMatchSet. +func (mg *RegexMatchSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this RegexMatchSet. +func (mg *RegexMatchSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this RegexMatchSet. +func (mg *RegexMatchSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this RegexMatchSet. +func (mg *RegexMatchSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SQLInjectionMatchSet. +func (mg *SQLInjectionMatchSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SizeConstraintSet. +func (mg *SizeConstraintSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SizeConstraintSet. +func (mg *SizeConstraintSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SizeConstraintSet. +func (mg *SizeConstraintSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SizeConstraintSet. +func (mg *SizeConstraintSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SizeConstraintSet. +func (mg *SizeConstraintSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SizeConstraintSet. +func (mg *SizeConstraintSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SizeConstraintSet. +func (mg *SizeConstraintSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SizeConstraintSet. +func (mg *SizeConstraintSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SizeConstraintSet. +func (mg *SizeConstraintSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SizeConstraintSet. +func (mg *SizeConstraintSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SizeConstraintSet. +func (mg *SizeConstraintSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SizeConstraintSet. +func (mg *SizeConstraintSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this WebACL. +func (mg *WebACL) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this WebACL. +func (mg *WebACL) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this WebACL. +func (mg *WebACL) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this WebACL. +func (mg *WebACL) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this WebACL. +func (mg *WebACL) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this WebACL. +func (mg *WebACL) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this WebACL. +func (mg *WebACL) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this WebACL. +func (mg *WebACL) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this WebACL. +func (mg *WebACL) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this WebACL. +func (mg *WebACL) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this WebACL. +func (mg *WebACL) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this WebACL. +func (mg *WebACL) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this XSSMatchSet. +func (mg *XSSMatchSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this XSSMatchSet. +func (mg *XSSMatchSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this XSSMatchSet. +func (mg *XSSMatchSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this XSSMatchSet. +func (mg *XSSMatchSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this XSSMatchSet. +func (mg *XSSMatchSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this XSSMatchSet. +func (mg *XSSMatchSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this XSSMatchSet. +func (mg *XSSMatchSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this XSSMatchSet. +func (mg *XSSMatchSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this XSSMatchSet. +func (mg *XSSMatchSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this XSSMatchSet. +func (mg *XSSMatchSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this XSSMatchSet. +func (mg *XSSMatchSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this XSSMatchSet. +func (mg *XSSMatchSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/wafregional/v1beta2/zz_generated.managedlist.go b/apis/wafregional/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..8d78477c53 --- /dev/null +++ b/apis/wafregional/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,62 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ByteMatchSetList. +func (l *ByteMatchSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this RegexMatchSetList. +func (l *RegexMatchSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SQLInjectionMatchSetList. +func (l *SQLInjectionMatchSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SizeConstraintSetList. +func (l *SizeConstraintSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WebACLList. +func (l *WebACLList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this XSSMatchSetList. +func (l *XSSMatchSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/wafregional/v1beta2/zz_generated.resolvers.go b/apis/wafregional/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..1f6d25539e --- /dev/null +++ b/apis/wafregional/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,170 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *RegexMatchSet) ResolveReferences( // ResolveReferences of this RegexMatchSet. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.RegexMatchTuple); i3++ { + { + m, l, err = apisresolver.GetManagedResource("wafregional.aws.upbound.io", "v1beta1", "RegexPatternSet", "RegexPatternSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RegexMatchTuple[i3].RegexPatternSetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.RegexMatchTuple[i3].RegexPatternSetIDRef, + Selector: mg.Spec.ForProvider.RegexMatchTuple[i3].RegexPatternSetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RegexMatchTuple[i3].RegexPatternSetID") + } + mg.Spec.ForProvider.RegexMatchTuple[i3].RegexPatternSetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RegexMatchTuple[i3].RegexPatternSetIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.RegexMatchTuple); i3++ { + { + m, l, err = apisresolver.GetManagedResource("wafregional.aws.upbound.io", "v1beta1", "RegexPatternSet", "RegexPatternSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RegexMatchTuple[i3].RegexPatternSetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.RegexMatchTuple[i3].RegexPatternSetIDRef, + Selector: mg.Spec.InitProvider.RegexMatchTuple[i3].RegexPatternSetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RegexMatchTuple[i3].RegexPatternSetID") + } + mg.Spec.InitProvider.RegexMatchTuple[i3].RegexPatternSetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RegexMatchTuple[i3].RegexPatternSetIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this WebACL. +func (mg *WebACL) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.LoggingConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LoggingConfiguration.LogDestination), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.ForProvider.LoggingConfiguration.LogDestinationRef, + Selector: mg.Spec.ForProvider.LoggingConfiguration.LogDestinationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LoggingConfiguration.LogDestination") + } + mg.Spec.ForProvider.LoggingConfiguration.LogDestination = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LoggingConfiguration.LogDestinationRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Rule); i3++ { + { + m, l, err = apisresolver.GetManagedResource("wafregional.aws.upbound.io", "v1beta1", "Rule", "RuleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Rule[i3].RuleID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Rule[i3].RuleIDRef, + Selector: mg.Spec.ForProvider.Rule[i3].RuleIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Rule[i3].RuleID") + } + mg.Spec.ForProvider.Rule[i3].RuleID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Rule[i3].RuleIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LoggingConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("firehose.aws.upbound.io", "v1beta2", "DeliveryStream", "DeliveryStreamList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LoggingConfiguration.LogDestination), + Extract: resource.ExtractParamPath("arn", false), + Reference: mg.Spec.InitProvider.LoggingConfiguration.LogDestinationRef, + Selector: mg.Spec.InitProvider.LoggingConfiguration.LogDestinationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LoggingConfiguration.LogDestination") + } + mg.Spec.InitProvider.LoggingConfiguration.LogDestination = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LoggingConfiguration.LogDestinationRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Rule); i3++ { + { + m, l, err = apisresolver.GetManagedResource("wafregional.aws.upbound.io", "v1beta1", "Rule", "RuleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Rule[i3].RuleID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Rule[i3].RuleIDRef, + Selector: mg.Spec.InitProvider.Rule[i3].RuleIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Rule[i3].RuleID") + } + mg.Spec.InitProvider.Rule[i3].RuleID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Rule[i3].RuleIDRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/wafregional/v1beta2/zz_groupversion_info.go b/apis/wafregional/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..4eef521a87 --- /dev/null +++ b/apis/wafregional/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=wafregional.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "wafregional.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/wafregional/v1beta2/zz_regexmatchset_terraformed.go b/apis/wafregional/v1beta2/zz_regexmatchset_terraformed.go new file mode 100755 index 0000000000..107dd677dc --- /dev/null +++ b/apis/wafregional/v1beta2/zz_regexmatchset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this RegexMatchSet +func (mg *RegexMatchSet) GetTerraformResourceType() string { + return "aws_wafregional_regex_match_set" +} + +// GetConnectionDetailsMapping for this RegexMatchSet +func (tr *RegexMatchSet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this RegexMatchSet +func (tr *RegexMatchSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this RegexMatchSet +func (tr *RegexMatchSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this RegexMatchSet +func (tr *RegexMatchSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this RegexMatchSet +func (tr *RegexMatchSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this RegexMatchSet +func (tr *RegexMatchSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this RegexMatchSet +func (tr *RegexMatchSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this RegexMatchSet +func (tr *RegexMatchSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this RegexMatchSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *RegexMatchSet) LateInitialize(attrs []byte) (bool, error) { + params := &RegexMatchSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *RegexMatchSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/wafregional/v1beta2/zz_regexmatchset_types.go b/apis/wafregional/v1beta2/zz_regexmatchset_types.go new file mode 100755 index 0000000000..818742990a --- /dev/null +++ b/apis/wafregional/v1beta2/zz_regexmatchset_types.go @@ -0,0 +1,221 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type RegexMatchSetInitParameters struct { + + // The name or description of the Regex Match Set. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The regular expression pattern that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings. See below. + RegexMatchTuple []RegexMatchTupleInitParameters `json:"regexMatchTuple,omitempty" tf:"regex_match_tuple,omitempty"` +} + +type RegexMatchSetObservation struct { + + // The ID of the WAF Regional Regex Match Set. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name or description of the Regex Match Set. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The regular expression pattern that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings. See below. + RegexMatchTuple []RegexMatchTupleObservation `json:"regexMatchTuple,omitempty" tf:"regex_match_tuple,omitempty"` +} + +type RegexMatchSetParameters struct { + + // The name or description of the Regex Match Set. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The regular expression pattern that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings. See below. + // +kubebuilder:validation:Optional + RegexMatchTuple []RegexMatchTupleParameters `json:"regexMatchTuple,omitempty" tf:"regex_match_tuple,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` +} + +type RegexMatchTupleFieldToMatchInitParameters struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RegexMatchTupleFieldToMatchObservation struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RegexMatchTupleFieldToMatchParameters struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + // +kubebuilder:validation:Optional + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type RegexMatchTupleInitParameters struct { + + // The part of a web request that you want to search, such as a specified header or a query string. + FieldToMatch *RegexMatchTupleFieldToMatchInitParameters `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // The ID of a Regex Pattern Set. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/wafregional/v1beta1.RegexPatternSet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + RegexPatternSetID *string `json:"regexPatternSetId,omitempty" tf:"regex_pattern_set_id,omitempty"` + + // Reference to a RegexPatternSet in wafregional to populate regexPatternSetId. + // +kubebuilder:validation:Optional + RegexPatternSetIDRef *v1.Reference `json:"regexPatternSetIdRef,omitempty" tf:"-"` + + // Selector for a RegexPatternSet in wafregional to populate regexPatternSetId. + // +kubebuilder:validation:Optional + RegexPatternSetIDSelector *v1.Selector `json:"regexPatternSetIdSelector,omitempty" tf:"-"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type RegexMatchTupleObservation struct { + + // The part of a web request that you want to search, such as a specified header or a query string. + FieldToMatch *RegexMatchTupleFieldToMatchObservation `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // The ID of a Regex Pattern Set. + RegexPatternSetID *string `json:"regexPatternSetId,omitempty" tf:"regex_pattern_set_id,omitempty"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type RegexMatchTupleParameters struct { + + // The part of a web request that you want to search, such as a specified header or a query string. + // +kubebuilder:validation:Optional + FieldToMatch *RegexMatchTupleFieldToMatchParameters `json:"fieldToMatch" tf:"field_to_match,omitempty"` + + // The ID of a Regex Pattern Set. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/wafregional/v1beta1.RegexPatternSet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + RegexPatternSetID *string `json:"regexPatternSetId,omitempty" tf:"regex_pattern_set_id,omitempty"` + + // Reference to a RegexPatternSet in wafregional to populate regexPatternSetId. + // +kubebuilder:validation:Optional + RegexPatternSetIDRef *v1.Reference `json:"regexPatternSetIdRef,omitempty" tf:"-"` + + // Selector for a RegexPatternSet in wafregional to populate regexPatternSetId. + // +kubebuilder:validation:Optional + RegexPatternSetIDSelector *v1.Selector `json:"regexPatternSetIdSelector,omitempty" tf:"-"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + // +kubebuilder:validation:Optional + TextTransformation *string `json:"textTransformation" tf:"text_transformation,omitempty"` +} + +// RegexMatchSetSpec defines the desired state of RegexMatchSet +type RegexMatchSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RegexMatchSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RegexMatchSetInitParameters `json:"initProvider,omitempty"` +} + +// RegexMatchSetStatus defines the observed state of RegexMatchSet. +type RegexMatchSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RegexMatchSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// RegexMatchSet is the Schema for the RegexMatchSets API. Provides a AWS WAF Regional Regex Match Set resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type RegexMatchSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec RegexMatchSetSpec `json:"spec"` + Status RegexMatchSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RegexMatchSetList contains a list of RegexMatchSets +type RegexMatchSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RegexMatchSet `json:"items"` +} + +// Repository type metadata. +var ( + RegexMatchSet_Kind = "RegexMatchSet" + RegexMatchSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: RegexMatchSet_Kind}.String() + RegexMatchSet_KindAPIVersion = RegexMatchSet_Kind + "." + CRDGroupVersion.String() + RegexMatchSet_GroupVersionKind = CRDGroupVersion.WithKind(RegexMatchSet_Kind) +) + +func init() { + SchemeBuilder.Register(&RegexMatchSet{}, &RegexMatchSetList{}) +} diff --git a/apis/wafregional/v1beta2/zz_sizeconstraintset_terraformed.go b/apis/wafregional/v1beta2/zz_sizeconstraintset_terraformed.go new file mode 100755 index 0000000000..da7245ad55 --- /dev/null +++ b/apis/wafregional/v1beta2/zz_sizeconstraintset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SizeConstraintSet +func (mg *SizeConstraintSet) GetTerraformResourceType() string { + return "aws_wafregional_size_constraint_set" +} + +// GetConnectionDetailsMapping for this SizeConstraintSet +func (tr *SizeConstraintSet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SizeConstraintSet +func (tr *SizeConstraintSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SizeConstraintSet +func (tr *SizeConstraintSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SizeConstraintSet +func (tr *SizeConstraintSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SizeConstraintSet +func (tr *SizeConstraintSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SizeConstraintSet +func (tr *SizeConstraintSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SizeConstraintSet +func (tr *SizeConstraintSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SizeConstraintSet +func (tr *SizeConstraintSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SizeConstraintSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SizeConstraintSet) LateInitialize(attrs []byte) (bool, error) { + params := &SizeConstraintSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SizeConstraintSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/wafregional/v1beta2/zz_sizeconstraintset_types.go b/apis/wafregional/v1beta2/zz_sizeconstraintset_types.go new file mode 100755 index 0000000000..c029ca0060 --- /dev/null +++ b/apis/wafregional/v1beta2/zz_sizeconstraintset_types.go @@ -0,0 +1,227 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SizeConstraintSetInitParameters struct { + + // The name or description of the Size Constraint Set. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the parts of web requests that you want to inspect the size of. + SizeConstraints []SizeConstraintsInitParameters `json:"sizeConstraints,omitempty" tf:"size_constraints,omitempty"` +} + +type SizeConstraintSetObservation struct { + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The ID of the WAF Size Constraint Set. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name or description of the Size Constraint Set. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the parts of web requests that you want to inspect the size of. + SizeConstraints []SizeConstraintsObservation `json:"sizeConstraints,omitempty" tf:"size_constraints,omitempty"` +} + +type SizeConstraintSetParameters struct { + + // The name or description of the Size Constraint Set. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Specifies the parts of web requests that you want to inspect the size of. + // +kubebuilder:validation:Optional + SizeConstraints []SizeConstraintsParameters `json:"sizeConstraints,omitempty" tf:"size_constraints,omitempty"` +} + +type SizeConstraintsFieldToMatchInitParameters struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SizeConstraintsFieldToMatchObservation struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SizeConstraintsFieldToMatchParameters struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + // +kubebuilder:validation:Optional + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type SizeConstraintsInitParameters struct { + + // The type of comparison you want to perform. + // e.g., EQ, NE, LT, GT. + // See docs for all supported values. + ComparisonOperator *string `json:"comparisonOperator,omitempty" tf:"comparison_operator,omitempty"` + + // Specifies where in a web request to look for the size constraint. + FieldToMatch *SizeConstraintsFieldToMatchInitParameters `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // The size in bytes that you want to compare against the size of the specified field_to_match. + // Valid values are between 0 - 21474836480 bytes (0 - 20 GB). + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + // Note: if you choose BODY as type, you must choose NONE because CloudFront forwards only the first 8192 bytes for inspection. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type SizeConstraintsObservation struct { + + // The type of comparison you want to perform. + // e.g., EQ, NE, LT, GT. + // See docs for all supported values. + ComparisonOperator *string `json:"comparisonOperator,omitempty" tf:"comparison_operator,omitempty"` + + // Specifies where in a web request to look for the size constraint. + FieldToMatch *SizeConstraintsFieldToMatchObservation `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // The size in bytes that you want to compare against the size of the specified field_to_match. + // Valid values are between 0 - 21474836480 bytes (0 - 20 GB). + Size *float64 `json:"size,omitempty" tf:"size,omitempty"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + // Note: if you choose BODY as type, you must choose NONE because CloudFront forwards only the first 8192 bytes for inspection. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type SizeConstraintsParameters struct { + + // The type of comparison you want to perform. + // e.g., EQ, NE, LT, GT. + // See docs for all supported values. + // +kubebuilder:validation:Optional + ComparisonOperator *string `json:"comparisonOperator" tf:"comparison_operator,omitempty"` + + // Specifies where in a web request to look for the size constraint. + // +kubebuilder:validation:Optional + FieldToMatch *SizeConstraintsFieldToMatchParameters `json:"fieldToMatch" tf:"field_to_match,omitempty"` + + // The size in bytes that you want to compare against the size of the specified field_to_match. + // Valid values are between 0 - 21474836480 bytes (0 - 20 GB). + // +kubebuilder:validation:Optional + Size *float64 `json:"size" tf:"size,omitempty"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + // Note: if you choose BODY as type, you must choose NONE because CloudFront forwards only the first 8192 bytes for inspection. + // +kubebuilder:validation:Optional + TextTransformation *string `json:"textTransformation" tf:"text_transformation,omitempty"` +} + +// SizeConstraintSetSpec defines the desired state of SizeConstraintSet +type SizeConstraintSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SizeConstraintSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SizeConstraintSetInitParameters `json:"initProvider,omitempty"` +} + +// SizeConstraintSetStatus defines the observed state of SizeConstraintSet. +type SizeConstraintSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SizeConstraintSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SizeConstraintSet is the Schema for the SizeConstraintSets API. Provides an AWS WAF Regional Size Constraint Set resource for use with ALB. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type SizeConstraintSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec SizeConstraintSetSpec `json:"spec"` + Status SizeConstraintSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SizeConstraintSetList contains a list of SizeConstraintSets +type SizeConstraintSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SizeConstraintSet `json:"items"` +} + +// Repository type metadata. +var ( + SizeConstraintSet_Kind = "SizeConstraintSet" + SizeConstraintSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SizeConstraintSet_Kind}.String() + SizeConstraintSet_KindAPIVersion = SizeConstraintSet_Kind + "." + CRDGroupVersion.String() + SizeConstraintSet_GroupVersionKind = CRDGroupVersion.WithKind(SizeConstraintSet_Kind) +) + +func init() { + SchemeBuilder.Register(&SizeConstraintSet{}, &SizeConstraintSetList{}) +} diff --git a/apis/wafregional/v1beta2/zz_sqlinjectionmatchset_terraformed.go b/apis/wafregional/v1beta2/zz_sqlinjectionmatchset_terraformed.go new file mode 100755 index 0000000000..00cfb7620c --- /dev/null +++ b/apis/wafregional/v1beta2/zz_sqlinjectionmatchset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SQLInjectionMatchSet +func (mg *SQLInjectionMatchSet) GetTerraformResourceType() string { + return "aws_wafregional_sql_injection_match_set" +} + +// GetConnectionDetailsMapping for this SQLInjectionMatchSet +func (tr *SQLInjectionMatchSet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SQLInjectionMatchSet +func (tr *SQLInjectionMatchSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SQLInjectionMatchSet +func (tr *SQLInjectionMatchSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SQLInjectionMatchSet +func (tr *SQLInjectionMatchSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SQLInjectionMatchSet +func (tr *SQLInjectionMatchSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SQLInjectionMatchSet +func (tr *SQLInjectionMatchSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SQLInjectionMatchSet +func (tr *SQLInjectionMatchSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SQLInjectionMatchSet +func (tr *SQLInjectionMatchSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SQLInjectionMatchSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SQLInjectionMatchSet) LateInitialize(attrs []byte) (bool, error) { + params := &SQLInjectionMatchSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SQLInjectionMatchSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/wafregional/v1beta2/zz_sqlinjectionmatchset_types.go b/apis/wafregional/v1beta2/zz_sqlinjectionmatchset_types.go new file mode 100755 index 0000000000..49f2a4da09 --- /dev/null +++ b/apis/wafregional/v1beta2/zz_sqlinjectionmatchset_types.go @@ -0,0 +1,194 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SQLInjectionMatchSetInitParameters struct { + + // The name or description of the SizeConstraintSet. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parts of web requests that you want AWS WAF to inspect for malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header. + SQLInjectionMatchTuple []SQLInjectionMatchTupleInitParameters `json:"sqlInjectionMatchTuple,omitempty" tf:"sql_injection_match_tuple,omitempty"` +} + +type SQLInjectionMatchSetObservation struct { + + // The ID of the WAF SqlInjectionMatchSet. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name or description of the SizeConstraintSet. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parts of web requests that you want AWS WAF to inspect for malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header. + SQLInjectionMatchTuple []SQLInjectionMatchTupleObservation `json:"sqlInjectionMatchTuple,omitempty" tf:"sql_injection_match_tuple,omitempty"` +} + +type SQLInjectionMatchSetParameters struct { + + // The name or description of the SizeConstraintSet. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The parts of web requests that you want AWS WAF to inspect for malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header. + // +kubebuilder:validation:Optional + SQLInjectionMatchTuple []SQLInjectionMatchTupleParameters `json:"sqlInjectionMatchTuple,omitempty" tf:"sql_injection_match_tuple,omitempty"` +} + +type SQLInjectionMatchTupleFieldToMatchInitParameters struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SQLInjectionMatchTupleFieldToMatchObservation struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SQLInjectionMatchTupleFieldToMatchParameters struct { + + // When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + // If type is any other value, omit this field. + // +kubebuilder:validation:Optional + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified string. + // e.g., HEADER, METHOD or BODY. + // See docs + // for all supported values. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type SQLInjectionMatchTupleInitParameters struct { + + // Specifies where in a web request to look for snippets of malicious SQL code. + FieldToMatch *SQLInjectionMatchTupleFieldToMatchInitParameters `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type SQLInjectionMatchTupleObservation struct { + + // Specifies where in a web request to look for snippets of malicious SQL code. + FieldToMatch *SQLInjectionMatchTupleFieldToMatchObservation `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type SQLInjectionMatchTupleParameters struct { + + // Specifies where in a web request to look for snippets of malicious SQL code. + // +kubebuilder:validation:Optional + FieldToMatch *SQLInjectionMatchTupleFieldToMatchParameters `json:"fieldToMatch" tf:"field_to_match,omitempty"` + + // Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + // If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + // e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + // See docs + // for all supported values. + // +kubebuilder:validation:Optional + TextTransformation *string `json:"textTransformation" tf:"text_transformation,omitempty"` +} + +// SQLInjectionMatchSetSpec defines the desired state of SQLInjectionMatchSet +type SQLInjectionMatchSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SQLInjectionMatchSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SQLInjectionMatchSetInitParameters `json:"initProvider,omitempty"` +} + +// SQLInjectionMatchSetStatus defines the observed state of SQLInjectionMatchSet. +type SQLInjectionMatchSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SQLInjectionMatchSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SQLInjectionMatchSet is the Schema for the SQLInjectionMatchSets API. Provides a AWS WAF Regional SqlInjectionMatchSet resource for use with ALB. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type SQLInjectionMatchSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec SQLInjectionMatchSetSpec `json:"spec"` + Status SQLInjectionMatchSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SQLInjectionMatchSetList contains a list of SQLInjectionMatchSets +type SQLInjectionMatchSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SQLInjectionMatchSet `json:"items"` +} + +// Repository type metadata. +var ( + SQLInjectionMatchSet_Kind = "SQLInjectionMatchSet" + SQLInjectionMatchSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SQLInjectionMatchSet_Kind}.String() + SQLInjectionMatchSet_KindAPIVersion = SQLInjectionMatchSet_Kind + "." + CRDGroupVersion.String() + SQLInjectionMatchSet_GroupVersionKind = CRDGroupVersion.WithKind(SQLInjectionMatchSet_Kind) +) + +func init() { + SchemeBuilder.Register(&SQLInjectionMatchSet{}, &SQLInjectionMatchSetList{}) +} diff --git a/apis/wafregional/v1beta2/zz_webacl_terraformed.go b/apis/wafregional/v1beta2/zz_webacl_terraformed.go new file mode 100755 index 0000000000..047a1c8b22 --- /dev/null +++ b/apis/wafregional/v1beta2/zz_webacl_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this WebACL +func (mg *WebACL) GetTerraformResourceType() string { + return "aws_wafregional_web_acl" +} + +// GetConnectionDetailsMapping for this WebACL +func (tr *WebACL) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this WebACL +func (tr *WebACL) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this WebACL +func (tr *WebACL) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this WebACL +func (tr *WebACL) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this WebACL +func (tr *WebACL) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this WebACL +func (tr *WebACL) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this WebACL +func (tr *WebACL) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this WebACL +func (tr *WebACL) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this WebACL using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *WebACL) LateInitialize(attrs []byte) (bool, error) { + params := &WebACLParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *WebACL) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/wafregional/v1beta2/zz_webacl_types.go b/apis/wafregional/v1beta2/zz_webacl_types.go new file mode 100755 index 0000000000..91aaf6e625 --- /dev/null +++ b/apis/wafregional/v1beta2/zz_webacl_types.go @@ -0,0 +1,400 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionInitParameters struct { + + // Specifies how you want AWS WAF Regional to respond to requests that match the settings in a rule. Valid values for action are ALLOW, BLOCK or COUNT. Valid values for override_action are COUNT and NONE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ActionObservation struct { + + // Specifies how you want AWS WAF Regional to respond to requests that match the settings in a rule. Valid values for action are ALLOW, BLOCK or COUNT. Valid values for override_action are COUNT and NONE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ActionParameters struct { + + // Specifies how you want AWS WAF Regional to respond to requests that match the settings in a rule. Valid values for action are ALLOW, BLOCK or COUNT. Valid values for override_action are COUNT and NONE. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type DefaultActionInitParameters struct { + + // Specifies how you want AWS WAF Regional to respond to requests that match the settings in a ruleE.g., ALLOW, BLOCK or COUNT + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DefaultActionObservation struct { + + // Specifies how you want AWS WAF Regional to respond to requests that match the settings in a ruleE.g., ALLOW, BLOCK or COUNT + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DefaultActionParameters struct { + + // Specifies how you want AWS WAF Regional to respond to requests that match the settings in a ruleE.g., ALLOW, BLOCK or COUNT + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type LoggingConfigurationInitParameters struct { + + // Amazon Resource Name (ARN) of Kinesis Firehose Delivery Stream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + LogDestination *string `json:"logDestination,omitempty" tf:"log_destination,omitempty"` + + // Reference to a DeliveryStream in firehose to populate logDestination. + // +kubebuilder:validation:Optional + LogDestinationRef *v1.Reference `json:"logDestinationRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate logDestination. + // +kubebuilder:validation:Optional + LogDestinationSelector *v1.Selector `json:"logDestinationSelector,omitempty" tf:"-"` + + // Configuration block containing parts of the request that you want redacted from the logs. Detailed below. + RedactedFields *RedactedFieldsInitParameters `json:"redactedFields,omitempty" tf:"redacted_fields,omitempty"` +} + +type LoggingConfigurationObservation struct { + + // Amazon Resource Name (ARN) of Kinesis Firehose Delivery Stream + LogDestination *string `json:"logDestination,omitempty" tf:"log_destination,omitempty"` + + // Configuration block containing parts of the request that you want redacted from the logs. Detailed below. + RedactedFields *RedactedFieldsObservation `json:"redactedFields,omitempty" tf:"redacted_fields,omitempty"` +} + +type LoggingConfigurationParameters struct { + + // Amazon Resource Name (ARN) of Kinesis Firehose Delivery Stream + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/firehose/v1beta2.DeliveryStream + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",false) + // +kubebuilder:validation:Optional + LogDestination *string `json:"logDestination,omitempty" tf:"log_destination,omitempty"` + + // Reference to a DeliveryStream in firehose to populate logDestination. + // +kubebuilder:validation:Optional + LogDestinationRef *v1.Reference `json:"logDestinationRef,omitempty" tf:"-"` + + // Selector for a DeliveryStream in firehose to populate logDestination. + // +kubebuilder:validation:Optional + LogDestinationSelector *v1.Selector `json:"logDestinationSelector,omitempty" tf:"-"` + + // Configuration block containing parts of the request that you want redacted from the logs. Detailed below. + // +kubebuilder:validation:Optional + RedactedFields *RedactedFieldsParameters `json:"redactedFields,omitempty" tf:"redacted_fields,omitempty"` +} + +type OverrideActionInitParameters struct { + + // Specifies how you want AWS WAF Regional to respond to requests that match the settings in a rule. Valid values for action are ALLOW, BLOCK or COUNT. Valid values for override_action are COUNT and NONE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OverrideActionObservation struct { + + // Specifies how you want AWS WAF Regional to respond to requests that match the settings in a rule. Valid values for action are ALLOW, BLOCK or COUNT. Valid values for override_action are COUNT and NONE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OverrideActionParameters struct { + + // Specifies how you want AWS WAF Regional to respond to requests that match the settings in a rule. Valid values for action are ALLOW, BLOCK or COUNT. Valid values for override_action are COUNT and NONE. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type RedactedFieldsFieldToMatchInitParameters struct { + + // When the value of type is HEADER, enter the name of the header that you want the WAF to search, for example, User-Agent or Referer. If the value of type is any other value, omit data. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // Specifies how you want AWS WAF Regional to respond to requests that match the settings in a rule. Valid values for action are ALLOW, BLOCK or COUNT. Valid values for override_action are COUNT and NONE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RedactedFieldsFieldToMatchObservation struct { + + // When the value of type is HEADER, enter the name of the header that you want the WAF to search, for example, User-Agent or Referer. If the value of type is any other value, omit data. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // Specifies how you want AWS WAF Regional to respond to requests that match the settings in a rule. Valid values for action are ALLOW, BLOCK or COUNT. Valid values for override_action are COUNT and NONE. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RedactedFieldsFieldToMatchParameters struct { + + // When the value of type is HEADER, enter the name of the header that you want the WAF to search, for example, User-Agent or Referer. If the value of type is any other value, omit data. + // +kubebuilder:validation:Optional + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // Specifies how you want AWS WAF Regional to respond to requests that match the settings in a rule. Valid values for action are ALLOW, BLOCK or COUNT. Valid values for override_action are COUNT and NONE. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type RedactedFieldsInitParameters struct { + + // Set of configuration blocks for fields to redact. Detailed below. + FieldToMatch []RedactedFieldsFieldToMatchInitParameters `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` +} + +type RedactedFieldsObservation struct { + + // Set of configuration blocks for fields to redact. Detailed below. + FieldToMatch []RedactedFieldsFieldToMatchObservation `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` +} + +type RedactedFieldsParameters struct { + + // Set of configuration blocks for fields to redact. Detailed below. + // +kubebuilder:validation:Optional + FieldToMatch []RedactedFieldsFieldToMatchParameters `json:"fieldToMatch" tf:"field_to_match,omitempty"` +} + +type RuleInitParameters struct { + + // Configuration block of the action that CloudFront or AWS WAF takes when a web request matches the conditions in the rule. Not used if type is GROUP. Detailed below. + Action *ActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Configuration block of the override the action that a group requests CloudFront or AWS WAF takes when a web request matches the conditions in the rule. Only used if type is GROUP. Detailed below. + OverrideAction *OverrideActionInitParameters `json:"overrideAction,omitempty" tf:"override_action,omitempty"` + + // Specifies the order in which the rules in a WebACL are evaluated. + // Rules with a lower value are evaluated before rules with a higher value. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // ID of the associated WAF (Regional) rule (e.g., aws_wafregional_rule). WAF (Global) rules cannot be used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/wafregional/v1beta1.Rule + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + RuleID *string `json:"ruleId,omitempty" tf:"rule_id,omitempty"` + + // Reference to a Rule in wafregional to populate ruleId. + // +kubebuilder:validation:Optional + RuleIDRef *v1.Reference `json:"ruleIdRef,omitempty" tf:"-"` + + // Selector for a Rule in wafregional to populate ruleId. + // +kubebuilder:validation:Optional + RuleIDSelector *v1.Selector `json:"ruleIdSelector,omitempty" tf:"-"` + + // The rule type, either REGULAR, as defined by Rule, RATE_BASED, as defined by RateBasedRule, or GROUP, as defined by RuleGroup. The default is REGULAR. If you add a RATE_BASED rule, you need to set type as RATE_BASED. If you add a GROUP rule, you need to set type as GROUP. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RuleObservation struct { + + // Configuration block of the action that CloudFront or AWS WAF takes when a web request matches the conditions in the rule. Not used if type is GROUP. Detailed below. + Action *ActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // Configuration block of the override the action that a group requests CloudFront or AWS WAF takes when a web request matches the conditions in the rule. Only used if type is GROUP. Detailed below. + OverrideAction *OverrideActionObservation `json:"overrideAction,omitempty" tf:"override_action,omitempty"` + + // Specifies the order in which the rules in a WebACL are evaluated. + // Rules with a lower value are evaluated before rules with a higher value. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // ID of the associated WAF (Regional) rule (e.g., aws_wafregional_rule). WAF (Global) rules cannot be used. + RuleID *string `json:"ruleId,omitempty" tf:"rule_id,omitempty"` + + // The rule type, either REGULAR, as defined by Rule, RATE_BASED, as defined by RateBasedRule, or GROUP, as defined by RuleGroup. The default is REGULAR. If you add a RATE_BASED rule, you need to set type as RATE_BASED. If you add a GROUP rule, you need to set type as GROUP. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RuleParameters struct { + + // Configuration block of the action that CloudFront or AWS WAF takes when a web request matches the conditions in the rule. Not used if type is GROUP. Detailed below. + // +kubebuilder:validation:Optional + Action *ActionParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Configuration block of the override the action that a group requests CloudFront or AWS WAF takes when a web request matches the conditions in the rule. Only used if type is GROUP. Detailed below. + // +kubebuilder:validation:Optional + OverrideAction *OverrideActionParameters `json:"overrideAction,omitempty" tf:"override_action,omitempty"` + + // Specifies the order in which the rules in a WebACL are evaluated. + // Rules with a lower value are evaluated before rules with a higher value. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority" tf:"priority,omitempty"` + + // ID of the associated WAF (Regional) rule (e.g., aws_wafregional_rule). WAF (Global) rules cannot be used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/wafregional/v1beta1.Rule + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + RuleID *string `json:"ruleId,omitempty" tf:"rule_id,omitempty"` + + // Reference to a Rule in wafregional to populate ruleId. + // +kubebuilder:validation:Optional + RuleIDRef *v1.Reference `json:"ruleIdRef,omitempty" tf:"-"` + + // Selector for a Rule in wafregional to populate ruleId. + // +kubebuilder:validation:Optional + RuleIDSelector *v1.Selector `json:"ruleIdSelector,omitempty" tf:"-"` + + // The rule type, either REGULAR, as defined by Rule, RATE_BASED, as defined by RateBasedRule, or GROUP, as defined by RuleGroup. The default is REGULAR. If you add a RATE_BASED rule, you need to set type as RATE_BASED. If you add a GROUP rule, you need to set type as GROUP. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WebACLInitParameters struct { + + // The action that you want AWS WAF Regional to take when a request doesn't match the criteria in any of the rules that are associated with the web ACL. + DefaultAction *DefaultActionInitParameters `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // Configuration block to enable WAF logging. Detailed below. + LoggingConfiguration *LoggingConfigurationInitParameters `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` + + // The name or description for the Amazon CloudWatch metric of this web ACL. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // The name or description of the web ACL. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Set of configuration blocks containing rules for the web ACL. Detailed below. + Rule []RuleInitParameters `json:"rule,omitempty" tf:"rule,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type WebACLObservation struct { + + // Amazon Resource Name (ARN) of the WAF Regional WebACL. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The action that you want AWS WAF Regional to take when a request doesn't match the criteria in any of the rules that are associated with the web ACL. + DefaultAction *DefaultActionObservation `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // The ID of the WAF Regional WebACL. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration block to enable WAF logging. Detailed below. + LoggingConfiguration *LoggingConfigurationObservation `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` + + // The name or description for the Amazon CloudWatch metric of this web ACL. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // The name or description of the web ACL. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Set of configuration blocks containing rules for the web ACL. Detailed below. + Rule []RuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type WebACLParameters struct { + + // The action that you want AWS WAF Regional to take when a request doesn't match the criteria in any of the rules that are associated with the web ACL. + // +kubebuilder:validation:Optional + DefaultAction *DefaultActionParameters `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // Configuration block to enable WAF logging. Detailed below. + // +kubebuilder:validation:Optional + LoggingConfiguration *LoggingConfigurationParameters `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` + + // The name or description for the Amazon CloudWatch metric of this web ACL. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // The name or description of the web ACL. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Set of configuration blocks containing rules for the web ACL. Detailed below. + // +kubebuilder:validation:Optional + Rule []RuleParameters `json:"rule,omitempty" tf:"rule,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// WebACLSpec defines the desired state of WebACL +type WebACLSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WebACLParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WebACLInitParameters `json:"initProvider,omitempty"` +} + +// WebACLStatus defines the observed state of WebACL. +type WebACLStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WebACLObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// WebACL is the Schema for the WebACLs API. Provides a AWS WAF Regional web access control group (ACL) resource for use with ALB. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type WebACL struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.defaultAction) || (has(self.initProvider) && has(self.initProvider.defaultAction))",message="spec.forProvider.defaultAction is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.metricName) || (has(self.initProvider) && has(self.initProvider.metricName))",message="spec.forProvider.metricName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec WebACLSpec `json:"spec"` + Status WebACLStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WebACLList contains a list of WebACLs +type WebACLList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []WebACL `json:"items"` +} + +// Repository type metadata. +var ( + WebACL_Kind = "WebACL" + WebACL_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: WebACL_Kind}.String() + WebACL_KindAPIVersion = WebACL_Kind + "." + CRDGroupVersion.String() + WebACL_GroupVersionKind = CRDGroupVersion.WithKind(WebACL_Kind) +) + +func init() { + SchemeBuilder.Register(&WebACL{}, &WebACLList{}) +} diff --git a/apis/wafregional/v1beta2/zz_xssmatchset_terraformed.go b/apis/wafregional/v1beta2/zz_xssmatchset_terraformed.go new file mode 100755 index 0000000000..88fa6d9276 --- /dev/null +++ b/apis/wafregional/v1beta2/zz_xssmatchset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this XSSMatchSet +func (mg *XSSMatchSet) GetTerraformResourceType() string { + return "aws_wafregional_xss_match_set" +} + +// GetConnectionDetailsMapping for this XSSMatchSet +func (tr *XSSMatchSet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this XSSMatchSet +func (tr *XSSMatchSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this XSSMatchSet +func (tr *XSSMatchSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this XSSMatchSet +func (tr *XSSMatchSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this XSSMatchSet +func (tr *XSSMatchSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this XSSMatchSet +func (tr *XSSMatchSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this XSSMatchSet +func (tr *XSSMatchSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this XSSMatchSet +func (tr *XSSMatchSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this XSSMatchSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *XSSMatchSet) LateInitialize(attrs []byte) (bool, error) { + params := &XSSMatchSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *XSSMatchSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/wafregional/v1beta2/zz_xssmatchset_types.go b/apis/wafregional/v1beta2/zz_xssmatchset_types.go new file mode 100755 index 0000000000..05db0365dc --- /dev/null +++ b/apis/wafregional/v1beta2/zz_xssmatchset_types.go @@ -0,0 +1,170 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type XSSMatchSetInitParameters struct { + + // The name of the set + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parts of web requests that you want to inspect for cross-site scripting attacks. + XSSMatchTuple []XSSMatchTupleInitParameters `json:"xssMatchTuple,omitempty" tf:"xss_match_tuple,omitempty"` +} + +type XSSMatchSetObservation struct { + + // The ID of the Regional WAF XSS Match Set. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the set + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parts of web requests that you want to inspect for cross-site scripting attacks. + XSSMatchTuple []XSSMatchTupleObservation `json:"xssMatchTuple,omitempty" tf:"xss_match_tuple,omitempty"` +} + +type XSSMatchSetParameters struct { + + // The name of the set + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The parts of web requests that you want to inspect for cross-site scripting attacks. + // +kubebuilder:validation:Optional + XSSMatchTuple []XSSMatchTupleParameters `json:"xssMatchTuple,omitempty" tf:"xss_match_tuple,omitempty"` +} + +type XSSMatchTupleFieldToMatchInitParameters struct { + + // When the value of type is HEADER, enter the name of the header that you want the WAF to search, for example, User-Agent or Referer. If the value of type is any other value, omit data. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified stringE.g., HEADER or METHOD + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type XSSMatchTupleFieldToMatchObservation struct { + + // When the value of type is HEADER, enter the name of the header that you want the WAF to search, for example, User-Agent or Referer. If the value of type is any other value, omit data. + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified stringE.g., HEADER or METHOD + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type XSSMatchTupleFieldToMatchParameters struct { + + // When the value of type is HEADER, enter the name of the header that you want the WAF to search, for example, User-Agent or Referer. If the value of type is any other value, omit data. + // +kubebuilder:validation:Optional + Data *string `json:"data,omitempty" tf:"data,omitempty"` + + // The part of the web request that you want AWS WAF to search for a specified stringE.g., HEADER or METHOD + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type XSSMatchTupleInitParameters struct { + + // Specifies where in a web request to look for cross-site scripting attacks. + FieldToMatch *XSSMatchTupleFieldToMatchInitParameters `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // Which text transformation, if any, to perform on the web request before inspecting the request for cross-site scripting attacks. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type XSSMatchTupleObservation struct { + + // Specifies where in a web request to look for cross-site scripting attacks. + FieldToMatch *XSSMatchTupleFieldToMatchObservation `json:"fieldToMatch,omitempty" tf:"field_to_match,omitempty"` + + // Which text transformation, if any, to perform on the web request before inspecting the request for cross-site scripting attacks. + TextTransformation *string `json:"textTransformation,omitempty" tf:"text_transformation,omitempty"` +} + +type XSSMatchTupleParameters struct { + + // Specifies where in a web request to look for cross-site scripting attacks. + // +kubebuilder:validation:Optional + FieldToMatch *XSSMatchTupleFieldToMatchParameters `json:"fieldToMatch" tf:"field_to_match,omitempty"` + + // Which text transformation, if any, to perform on the web request before inspecting the request for cross-site scripting attacks. + // +kubebuilder:validation:Optional + TextTransformation *string `json:"textTransformation" tf:"text_transformation,omitempty"` +} + +// XSSMatchSetSpec defines the desired state of XSSMatchSet +type XSSMatchSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider XSSMatchSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider XSSMatchSetInitParameters `json:"initProvider,omitempty"` +} + +// XSSMatchSetStatus defines the observed state of XSSMatchSet. +type XSSMatchSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider XSSMatchSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// XSSMatchSet is the Schema for the XSSMatchSets API. Provides an AWS WAF Regional XSS Match Set resource for use with ALB. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type XSSMatchSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec XSSMatchSetSpec `json:"spec"` + Status XSSMatchSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// XSSMatchSetList contains a list of XSSMatchSets +type XSSMatchSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []XSSMatchSet `json:"items"` +} + +// Repository type metadata. +var ( + XSSMatchSet_Kind = "XSSMatchSet" + XSSMatchSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: XSSMatchSet_Kind}.String() + XSSMatchSet_KindAPIVersion = XSSMatchSet_Kind + "." + CRDGroupVersion.String() + XSSMatchSet_GroupVersionKind = CRDGroupVersion.WithKind(XSSMatchSet_Kind) +) + +func init() { + SchemeBuilder.Register(&XSSMatchSet{}, &XSSMatchSetList{}) +} diff --git a/apis/workspaces/v1beta1/zz_generated.conversion_hubs.go b/apis/workspaces/v1beta1/zz_generated.conversion_hubs.go index c7aa524f66..4579c639a0 100755 --- a/apis/workspaces/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/workspaces/v1beta1/zz_generated.conversion_hubs.go @@ -6,8 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Directory) Hub() {} - // Hub marks this type as a conversion hub. func (tr *IPGroup) Hub() {} diff --git a/apis/workspaces/v1beta1/zz_generated.conversion_spokes.go b/apis/workspaces/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..8813472983 --- /dev/null +++ b/apis/workspaces/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Directory to the hub type. +func (tr *Directory) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Directory type. +func (tr *Directory) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/workspaces/v1beta2/zz_directory_terraformed.go b/apis/workspaces/v1beta2/zz_directory_terraformed.go new file mode 100755 index 0000000000..40b12b5997 --- /dev/null +++ b/apis/workspaces/v1beta2/zz_directory_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Directory +func (mg *Directory) GetTerraformResourceType() string { + return "aws_workspaces_directory" +} + +// GetConnectionDetailsMapping for this Directory +func (tr *Directory) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Directory +func (tr *Directory) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Directory +func (tr *Directory) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Directory +func (tr *Directory) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Directory +func (tr *Directory) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Directory +func (tr *Directory) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Directory +func (tr *Directory) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Directory +func (tr *Directory) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Directory using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Directory) LateInitialize(attrs []byte) (bool, error) { + params := &DirectoryParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Directory) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/workspaces/v1beta2/zz_directory_types.go b/apis/workspaces/v1beta2/zz_directory_types.go new file mode 100755 index 0000000000..f387757ff4 --- /dev/null +++ b/apis/workspaces/v1beta2/zz_directory_types.go @@ -0,0 +1,468 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DirectoryInitParameters struct { + + // The directory identifier for registration in WorkSpaces service. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ds/v1beta2.Directory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + DirectoryID *string `json:"directoryId,omitempty" tf:"directory_id,omitempty"` + + // Reference to a Directory in ds to populate directoryId. + // +kubebuilder:validation:Optional + DirectoryIDRef *v1.Reference `json:"directoryIdRef,omitempty" tf:"-"` + + // Selector for a Directory in ds to populate directoryId. + // +kubebuilder:validation:Optional + DirectoryIDSelector *v1.Selector `json:"directoryIdSelector,omitempty" tf:"-"` + + // The identifiers of the IP access control groups associated with the directory. + // +listType=set + IPGroupIds []*string `json:"ipGroupIds,omitempty" tf:"ip_group_ids,omitempty"` + + // service capabilities. Defined below. + SelfServicePermissions *SelfServicePermissionsInitParameters `json:"selfServicePermissions,omitempty" tf:"self_service_permissions,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The identifiers of the subnets where the directory resides. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // – Specifies which devices and operating systems users can use to access their WorkSpaces. Defined below. + WorkspaceAccessProperties *WorkspaceAccessPropertiesInitParameters `json:"workspaceAccessProperties,omitempty" tf:"workspace_access_properties,omitempty"` + + // – Default properties that are used for creating WorkSpaces. Defined below. + WorkspaceCreationProperties *WorkspaceCreationPropertiesInitParameters `json:"workspaceCreationProperties,omitempty" tf:"workspace_creation_properties,omitempty"` +} + +type DirectoryObservation struct { + + // The directory alias. + Alias *string `json:"alias,omitempty" tf:"alias,omitempty"` + + // The user name for the service account. + CustomerUserName *string `json:"customerUserName,omitempty" tf:"customer_user_name,omitempty"` + + // The IP addresses of the DNS servers for the directory. + // +listType=set + DNSIPAddresses []*string `json:"dnsIpAddresses,omitempty" tf:"dns_ip_addresses,omitempty"` + + // The directory identifier for registration in WorkSpaces service. + DirectoryID *string `json:"directoryId,omitempty" tf:"directory_id,omitempty"` + + // The name of the directory. + DirectoryName *string `json:"directoryName,omitempty" tf:"directory_name,omitempty"` + + // The directory type. + DirectoryType *string `json:"directoryType,omitempty" tf:"directory_type,omitempty"` + + // The identifier of the IAM role. This is the role that allows Amazon WorkSpaces to make calls to other services, such as Amazon EC2, on your behalf. + IAMRoleID *string `json:"iamRoleId,omitempty" tf:"iam_role_id,omitempty"` + + // The WorkSpaces directory identifier. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The identifiers of the IP access control groups associated with the directory. + // +listType=set + IPGroupIds []*string `json:"ipGroupIds,omitempty" tf:"ip_group_ids,omitempty"` + + // The registration code for the directory. This is the code that users enter in their Amazon WorkSpaces client application to connect to the directory. + RegistrationCode *string `json:"registrationCode,omitempty" tf:"registration_code,omitempty"` + + // service capabilities. Defined below. + SelfServicePermissions *SelfServicePermissionsObservation `json:"selfServicePermissions,omitempty" tf:"self_service_permissions,omitempty"` + + // The identifiers of the subnets where the directory resides. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // – Specifies which devices and operating systems users can use to access their WorkSpaces. Defined below. + WorkspaceAccessProperties *WorkspaceAccessPropertiesObservation `json:"workspaceAccessProperties,omitempty" tf:"workspace_access_properties,omitempty"` + + // – Default properties that are used for creating WorkSpaces. Defined below. + WorkspaceCreationProperties *WorkspaceCreationPropertiesObservation `json:"workspaceCreationProperties,omitempty" tf:"workspace_creation_properties,omitempty"` + + // The identifier of the security group that is assigned to new WorkSpaces. + WorkspaceSecurityGroupID *string `json:"workspaceSecurityGroupId,omitempty" tf:"workspace_security_group_id,omitempty"` +} + +type DirectoryParameters struct { + + // The directory identifier for registration in WorkSpaces service. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ds/v1beta2.Directory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DirectoryID *string `json:"directoryId,omitempty" tf:"directory_id,omitempty"` + + // Reference to a Directory in ds to populate directoryId. + // +kubebuilder:validation:Optional + DirectoryIDRef *v1.Reference `json:"directoryIdRef,omitempty" tf:"-"` + + // Selector for a Directory in ds to populate directoryId. + // +kubebuilder:validation:Optional + DirectoryIDSelector *v1.Selector `json:"directoryIdSelector,omitempty" tf:"-"` + + // The identifiers of the IP access control groups associated with the directory. + // +kubebuilder:validation:Optional + // +listType=set + IPGroupIds []*string `json:"ipGroupIds,omitempty" tf:"ip_group_ids,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // service capabilities. Defined below. + // +kubebuilder:validation:Optional + SelfServicePermissions *SelfServicePermissionsParameters `json:"selfServicePermissions,omitempty" tf:"self_service_permissions,omitempty"` + + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The identifiers of the subnets where the directory resides. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // – Specifies which devices and operating systems users can use to access their WorkSpaces. Defined below. + // +kubebuilder:validation:Optional + WorkspaceAccessProperties *WorkspaceAccessPropertiesParameters `json:"workspaceAccessProperties,omitempty" tf:"workspace_access_properties,omitempty"` + + // – Default properties that are used for creating WorkSpaces. Defined below. + // +kubebuilder:validation:Optional + WorkspaceCreationProperties *WorkspaceCreationPropertiesParameters `json:"workspaceCreationProperties,omitempty" tf:"workspace_creation_properties,omitempty"` +} + +type SelfServicePermissionsInitParameters struct { + + // – Whether WorkSpaces directory users can change the compute type (bundle) for their workspace. Default false. + ChangeComputeType *bool `json:"changeComputeType,omitempty" tf:"change_compute_type,omitempty"` + + // – Whether WorkSpaces directory users can increase the volume size of the drives on their workspace. Default false. + IncreaseVolumeSize *bool `json:"increaseVolumeSize,omitempty" tf:"increase_volume_size,omitempty"` + + // – Whether WorkSpaces directory users can rebuild the operating system of a workspace to its original state. Default false. + RebuildWorkspace *bool `json:"rebuildWorkspace,omitempty" tf:"rebuild_workspace,omitempty"` + + // – Whether WorkSpaces directory users can restart their workspace. Default true. + RestartWorkspace *bool `json:"restartWorkspace,omitempty" tf:"restart_workspace,omitempty"` + + // – Whether WorkSpaces directory users can switch the running mode of their workspace. Default false. + SwitchRunningMode *bool `json:"switchRunningMode,omitempty" tf:"switch_running_mode,omitempty"` +} + +type SelfServicePermissionsObservation struct { + + // – Whether WorkSpaces directory users can change the compute type (bundle) for their workspace. Default false. + ChangeComputeType *bool `json:"changeComputeType,omitempty" tf:"change_compute_type,omitempty"` + + // – Whether WorkSpaces directory users can increase the volume size of the drives on their workspace. Default false. + IncreaseVolumeSize *bool `json:"increaseVolumeSize,omitempty" tf:"increase_volume_size,omitempty"` + + // – Whether WorkSpaces directory users can rebuild the operating system of a workspace to its original state. Default false. + RebuildWorkspace *bool `json:"rebuildWorkspace,omitempty" tf:"rebuild_workspace,omitempty"` + + // – Whether WorkSpaces directory users can restart their workspace. Default true. + RestartWorkspace *bool `json:"restartWorkspace,omitempty" tf:"restart_workspace,omitempty"` + + // – Whether WorkSpaces directory users can switch the running mode of their workspace. Default false. + SwitchRunningMode *bool `json:"switchRunningMode,omitempty" tf:"switch_running_mode,omitempty"` +} + +type SelfServicePermissionsParameters struct { + + // – Whether WorkSpaces directory users can change the compute type (bundle) for their workspace. Default false. + // +kubebuilder:validation:Optional + ChangeComputeType *bool `json:"changeComputeType,omitempty" tf:"change_compute_type,omitempty"` + + // – Whether WorkSpaces directory users can increase the volume size of the drives on their workspace. Default false. + // +kubebuilder:validation:Optional + IncreaseVolumeSize *bool `json:"increaseVolumeSize,omitempty" tf:"increase_volume_size,omitempty"` + + // – Whether WorkSpaces directory users can rebuild the operating system of a workspace to its original state. Default false. + // +kubebuilder:validation:Optional + RebuildWorkspace *bool `json:"rebuildWorkspace,omitempty" tf:"rebuild_workspace,omitempty"` + + // – Whether WorkSpaces directory users can restart their workspace. Default true. + // +kubebuilder:validation:Optional + RestartWorkspace *bool `json:"restartWorkspace,omitempty" tf:"restart_workspace,omitempty"` + + // – Whether WorkSpaces directory users can switch the running mode of their workspace. Default false. + // +kubebuilder:validation:Optional + SwitchRunningMode *bool `json:"switchRunningMode,omitempty" tf:"switch_running_mode,omitempty"` +} + +type WorkspaceAccessPropertiesInitParameters struct { + + // – Indicates whether users can use Android devices to access their WorkSpaces. + DeviceTypeAndroid *string `json:"deviceTypeAndroid,omitempty" tf:"device_type_android,omitempty"` + + // – Indicates whether users can use Chromebooks to access their WorkSpaces. + DeviceTypeChromeos *string `json:"deviceTypeChromeos,omitempty" tf:"device_type_chromeos,omitempty"` + + // – Indicates whether users can use iOS devices to access their WorkSpaces. + DeviceTypeIos *string `json:"deviceTypeIos,omitempty" tf:"device_type_ios,omitempty"` + + // – Indicates whether users can use Linux clients to access their WorkSpaces. + DeviceTypeLinux *string `json:"deviceTypeLinux,omitempty" tf:"device_type_linux,omitempty"` + + // – Indicates whether users can use macOS clients to access their WorkSpaces. + DeviceTypeOsx *string `json:"deviceTypeOsx,omitempty" tf:"device_type_osx,omitempty"` + + // – Indicates whether users can access their WorkSpaces through a web browser. + DeviceTypeWeb *string `json:"deviceTypeWeb,omitempty" tf:"device_type_web,omitempty"` + + // – Indicates whether users can use Windows clients to access their WorkSpaces. + DeviceTypeWindows *string `json:"deviceTypeWindows,omitempty" tf:"device_type_windows,omitempty"` + + // – Indicates whether users can use zero client devices to access their WorkSpaces. + DeviceTypeZeroclient *string `json:"deviceTypeZeroclient,omitempty" tf:"device_type_zeroclient,omitempty"` +} + +type WorkspaceAccessPropertiesObservation struct { + + // – Indicates whether users can use Android devices to access their WorkSpaces. + DeviceTypeAndroid *string `json:"deviceTypeAndroid,omitempty" tf:"device_type_android,omitempty"` + + // – Indicates whether users can use Chromebooks to access their WorkSpaces. + DeviceTypeChromeos *string `json:"deviceTypeChromeos,omitempty" tf:"device_type_chromeos,omitempty"` + + // – Indicates whether users can use iOS devices to access their WorkSpaces. + DeviceTypeIos *string `json:"deviceTypeIos,omitempty" tf:"device_type_ios,omitempty"` + + // – Indicates whether users can use Linux clients to access their WorkSpaces. + DeviceTypeLinux *string `json:"deviceTypeLinux,omitempty" tf:"device_type_linux,omitempty"` + + // – Indicates whether users can use macOS clients to access their WorkSpaces. + DeviceTypeOsx *string `json:"deviceTypeOsx,omitempty" tf:"device_type_osx,omitempty"` + + // – Indicates whether users can access their WorkSpaces through a web browser. + DeviceTypeWeb *string `json:"deviceTypeWeb,omitempty" tf:"device_type_web,omitempty"` + + // – Indicates whether users can use Windows clients to access their WorkSpaces. + DeviceTypeWindows *string `json:"deviceTypeWindows,omitempty" tf:"device_type_windows,omitempty"` + + // – Indicates whether users can use zero client devices to access their WorkSpaces. + DeviceTypeZeroclient *string `json:"deviceTypeZeroclient,omitempty" tf:"device_type_zeroclient,omitempty"` +} + +type WorkspaceAccessPropertiesParameters struct { + + // – Indicates whether users can use Android devices to access their WorkSpaces. + // +kubebuilder:validation:Optional + DeviceTypeAndroid *string `json:"deviceTypeAndroid,omitempty" tf:"device_type_android,omitempty"` + + // – Indicates whether users can use Chromebooks to access their WorkSpaces. + // +kubebuilder:validation:Optional + DeviceTypeChromeos *string `json:"deviceTypeChromeos,omitempty" tf:"device_type_chromeos,omitempty"` + + // – Indicates whether users can use iOS devices to access their WorkSpaces. + // +kubebuilder:validation:Optional + DeviceTypeIos *string `json:"deviceTypeIos,omitempty" tf:"device_type_ios,omitempty"` + + // – Indicates whether users can use Linux clients to access their WorkSpaces. + // +kubebuilder:validation:Optional + DeviceTypeLinux *string `json:"deviceTypeLinux,omitempty" tf:"device_type_linux,omitempty"` + + // – Indicates whether users can use macOS clients to access their WorkSpaces. + // +kubebuilder:validation:Optional + DeviceTypeOsx *string `json:"deviceTypeOsx,omitempty" tf:"device_type_osx,omitempty"` + + // – Indicates whether users can access their WorkSpaces through a web browser. + // +kubebuilder:validation:Optional + DeviceTypeWeb *string `json:"deviceTypeWeb,omitempty" tf:"device_type_web,omitempty"` + + // – Indicates whether users can use Windows clients to access their WorkSpaces. + // +kubebuilder:validation:Optional + DeviceTypeWindows *string `json:"deviceTypeWindows,omitempty" tf:"device_type_windows,omitempty"` + + // – Indicates whether users can use zero client devices to access their WorkSpaces. + // +kubebuilder:validation:Optional + DeviceTypeZeroclient *string `json:"deviceTypeZeroclient,omitempty" tf:"device_type_zeroclient,omitempty"` +} + +type WorkspaceCreationPropertiesInitParameters struct { + + // – The identifier of your custom security group. Should relate to the same VPC, where workspaces reside in. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + CustomSecurityGroupID *string `json:"customSecurityGroupId,omitempty" tf:"custom_security_group_id,omitempty"` + + // Reference to a SecurityGroup in ec2 to populate customSecurityGroupId. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRef *v1.Reference `json:"customSecurityGroupIdRef,omitempty" tf:"-"` + + // Selector for a SecurityGroup in ec2 to populate customSecurityGroupId. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // – The default organizational unit (OU) for your WorkSpace directories. Should conform "OU=,DC=,...,DC=" pattern. + DefaultOu *string `json:"defaultOu,omitempty" tf:"default_ou,omitempty"` + + // – Indicates whether internet access is enabled for your WorkSpaces. + EnableInternetAccess *bool `json:"enableInternetAccess,omitempty" tf:"enable_internet_access,omitempty"` + + // – Indicates whether maintenance mode is enabled for your WorkSpaces. For more information, see WorkSpace Maintenance.. + EnableMaintenanceMode *bool `json:"enableMaintenanceMode,omitempty" tf:"enable_maintenance_mode,omitempty"` + + // – Indicates whether users are local administrators of their WorkSpaces. + UserEnabledAsLocalAdministrator *bool `json:"userEnabledAsLocalAdministrator,omitempty" tf:"user_enabled_as_local_administrator,omitempty"` +} + +type WorkspaceCreationPropertiesObservation struct { + + // – The identifier of your custom security group. Should relate to the same VPC, where workspaces reside in. + CustomSecurityGroupID *string `json:"customSecurityGroupId,omitempty" tf:"custom_security_group_id,omitempty"` + + // – The default organizational unit (OU) for your WorkSpace directories. Should conform "OU=,DC=,...,DC=" pattern. + DefaultOu *string `json:"defaultOu,omitempty" tf:"default_ou,omitempty"` + + // – Indicates whether internet access is enabled for your WorkSpaces. + EnableInternetAccess *bool `json:"enableInternetAccess,omitempty" tf:"enable_internet_access,omitempty"` + + // – Indicates whether maintenance mode is enabled for your WorkSpaces. For more information, see WorkSpace Maintenance.. + EnableMaintenanceMode *bool `json:"enableMaintenanceMode,omitempty" tf:"enable_maintenance_mode,omitempty"` + + // – Indicates whether users are local administrators of their WorkSpaces. + UserEnabledAsLocalAdministrator *bool `json:"userEnabledAsLocalAdministrator,omitempty" tf:"user_enabled_as_local_administrator,omitempty"` +} + +type WorkspaceCreationPropertiesParameters struct { + + // – The identifier of your custom security group. Should relate to the same VPC, where workspaces reside in. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + CustomSecurityGroupID *string `json:"customSecurityGroupId,omitempty" tf:"custom_security_group_id,omitempty"` + + // Reference to a SecurityGroup in ec2 to populate customSecurityGroupId. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDRef *v1.Reference `json:"customSecurityGroupIdRef,omitempty" tf:"-"` + + // Selector for a SecurityGroup in ec2 to populate customSecurityGroupId. + // +kubebuilder:validation:Optional + CustomSecurityGroupIDSelector *v1.Selector `json:"customSecurityGroupIdSelector,omitempty" tf:"-"` + + // – The default organizational unit (OU) for your WorkSpace directories. Should conform "OU=,DC=,...,DC=" pattern. + // +kubebuilder:validation:Optional + DefaultOu *string `json:"defaultOu,omitempty" tf:"default_ou,omitempty"` + + // – Indicates whether internet access is enabled for your WorkSpaces. + // +kubebuilder:validation:Optional + EnableInternetAccess *bool `json:"enableInternetAccess,omitempty" tf:"enable_internet_access,omitempty"` + + // – Indicates whether maintenance mode is enabled for your WorkSpaces. For more information, see WorkSpace Maintenance.. + // +kubebuilder:validation:Optional + EnableMaintenanceMode *bool `json:"enableMaintenanceMode,omitempty" tf:"enable_maintenance_mode,omitempty"` + + // – Indicates whether users are local administrators of their WorkSpaces. + // +kubebuilder:validation:Optional + UserEnabledAsLocalAdministrator *bool `json:"userEnabledAsLocalAdministrator,omitempty" tf:"user_enabled_as_local_administrator,omitempty"` +} + +// DirectorySpec defines the desired state of Directory +type DirectorySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DirectoryParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DirectoryInitParameters `json:"initProvider,omitempty"` +} + +// DirectoryStatus defines the observed state of Directory. +type DirectoryStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DirectoryObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Directory is the Schema for the Directorys API. Provides a WorkSpaces directory in AWS WorkSpaces Service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Directory struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DirectorySpec `json:"spec"` + Status DirectoryStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DirectoryList contains a list of Directorys +type DirectoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Directory `json:"items"` +} + +// Repository type metadata. +var ( + Directory_Kind = "Directory" + Directory_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Directory_Kind}.String() + Directory_KindAPIVersion = Directory_Kind + "." + CRDGroupVersion.String() + Directory_GroupVersionKind = CRDGroupVersion.WithKind(Directory_Kind) +) + +func init() { + SchemeBuilder.Register(&Directory{}, &DirectoryList{}) +} diff --git a/apis/workspaces/v1beta2/zz_generated.conversion_hubs.go b/apis/workspaces/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..4e320edf96 --- /dev/null +++ b/apis/workspaces/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Directory) Hub() {} diff --git a/apis/workspaces/v1beta2/zz_generated.deepcopy.go b/apis/workspaces/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..1daa2e4ddf --- /dev/null +++ b/apis/workspaces/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,868 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Directory) DeepCopyInto(out *Directory) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Directory. +func (in *Directory) DeepCopy() *Directory { + if in == nil { + return nil + } + out := new(Directory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Directory) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryInitParameters) DeepCopyInto(out *DirectoryInitParameters) { + *out = *in + if in.DirectoryID != nil { + in, out := &in.DirectoryID, &out.DirectoryID + *out = new(string) + **out = **in + } + if in.DirectoryIDRef != nil { + in, out := &in.DirectoryIDRef, &out.DirectoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DirectoryIDSelector != nil { + in, out := &in.DirectoryIDSelector, &out.DirectoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IPGroupIds != nil { + in, out := &in.IPGroupIds, &out.IPGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SelfServicePermissions != nil { + in, out := &in.SelfServicePermissions, &out.SelfServicePermissions + *out = new(SelfServicePermissionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkspaceAccessProperties != nil { + in, out := &in.WorkspaceAccessProperties, &out.WorkspaceAccessProperties + *out = new(WorkspaceAccessPropertiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceCreationProperties != nil { + in, out := &in.WorkspaceCreationProperties, &out.WorkspaceCreationProperties + *out = new(WorkspaceCreationPropertiesInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryInitParameters. +func (in *DirectoryInitParameters) DeepCopy() *DirectoryInitParameters { + if in == nil { + return nil + } + out := new(DirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryList) DeepCopyInto(out *DirectoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Directory, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryList. +func (in *DirectoryList) DeepCopy() *DirectoryList { + if in == nil { + return nil + } + out := new(DirectoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DirectoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryObservation) DeepCopyInto(out *DirectoryObservation) { + *out = *in + if in.Alias != nil { + in, out := &in.Alias, &out.Alias + *out = new(string) + **out = **in + } + if in.CustomerUserName != nil { + in, out := &in.CustomerUserName, &out.CustomerUserName + *out = new(string) + **out = **in + } + if in.DNSIPAddresses != nil { + in, out := &in.DNSIPAddresses, &out.DNSIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DirectoryID != nil { + in, out := &in.DirectoryID, &out.DirectoryID + *out = new(string) + **out = **in + } + if in.DirectoryName != nil { + in, out := &in.DirectoryName, &out.DirectoryName + *out = new(string) + **out = **in + } + if in.DirectoryType != nil { + in, out := &in.DirectoryType, &out.DirectoryType + *out = new(string) + **out = **in + } + if in.IAMRoleID != nil { + in, out := &in.IAMRoleID, &out.IAMRoleID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPGroupIds != nil { + in, out := &in.IPGroupIds, &out.IPGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RegistrationCode != nil { + in, out := &in.RegistrationCode, &out.RegistrationCode + *out = new(string) + **out = **in + } + if in.SelfServicePermissions != nil { + in, out := &in.SelfServicePermissions, &out.SelfServicePermissions + *out = new(SelfServicePermissionsObservation) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkspaceAccessProperties != nil { + in, out := &in.WorkspaceAccessProperties, &out.WorkspaceAccessProperties + *out = new(WorkspaceAccessPropertiesObservation) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceCreationProperties != nil { + in, out := &in.WorkspaceCreationProperties, &out.WorkspaceCreationProperties + *out = new(WorkspaceCreationPropertiesObservation) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceSecurityGroupID != nil { + in, out := &in.WorkspaceSecurityGroupID, &out.WorkspaceSecurityGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryObservation. +func (in *DirectoryObservation) DeepCopy() *DirectoryObservation { + if in == nil { + return nil + } + out := new(DirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryParameters) DeepCopyInto(out *DirectoryParameters) { + *out = *in + if in.DirectoryID != nil { + in, out := &in.DirectoryID, &out.DirectoryID + *out = new(string) + **out = **in + } + if in.DirectoryIDRef != nil { + in, out := &in.DirectoryIDRef, &out.DirectoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DirectoryIDSelector != nil { + in, out := &in.DirectoryIDSelector, &out.DirectoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IPGroupIds != nil { + in, out := &in.IPGroupIds, &out.IPGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SelfServicePermissions != nil { + in, out := &in.SelfServicePermissions, &out.SelfServicePermissions + *out = new(SelfServicePermissionsParameters) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkspaceAccessProperties != nil { + in, out := &in.WorkspaceAccessProperties, &out.WorkspaceAccessProperties + *out = new(WorkspaceAccessPropertiesParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceCreationProperties != nil { + in, out := &in.WorkspaceCreationProperties, &out.WorkspaceCreationProperties + *out = new(WorkspaceCreationPropertiesParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryParameters. +func (in *DirectoryParameters) DeepCopy() *DirectoryParameters { + if in == nil { + return nil + } + out := new(DirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectorySpec) DeepCopyInto(out *DirectorySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectorySpec. +func (in *DirectorySpec) DeepCopy() *DirectorySpec { + if in == nil { + return nil + } + out := new(DirectorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryStatus) DeepCopyInto(out *DirectoryStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryStatus. +func (in *DirectoryStatus) DeepCopy() *DirectoryStatus { + if in == nil { + return nil + } + out := new(DirectoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfServicePermissionsInitParameters) DeepCopyInto(out *SelfServicePermissionsInitParameters) { + *out = *in + if in.ChangeComputeType != nil { + in, out := &in.ChangeComputeType, &out.ChangeComputeType + *out = new(bool) + **out = **in + } + if in.IncreaseVolumeSize != nil { + in, out := &in.IncreaseVolumeSize, &out.IncreaseVolumeSize + *out = new(bool) + **out = **in + } + if in.RebuildWorkspace != nil { + in, out := &in.RebuildWorkspace, &out.RebuildWorkspace + *out = new(bool) + **out = **in + } + if in.RestartWorkspace != nil { + in, out := &in.RestartWorkspace, &out.RestartWorkspace + *out = new(bool) + **out = **in + } + if in.SwitchRunningMode != nil { + in, out := &in.SwitchRunningMode, &out.SwitchRunningMode + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfServicePermissionsInitParameters. +func (in *SelfServicePermissionsInitParameters) DeepCopy() *SelfServicePermissionsInitParameters { + if in == nil { + return nil + } + out := new(SelfServicePermissionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfServicePermissionsObservation) DeepCopyInto(out *SelfServicePermissionsObservation) { + *out = *in + if in.ChangeComputeType != nil { + in, out := &in.ChangeComputeType, &out.ChangeComputeType + *out = new(bool) + **out = **in + } + if in.IncreaseVolumeSize != nil { + in, out := &in.IncreaseVolumeSize, &out.IncreaseVolumeSize + *out = new(bool) + **out = **in + } + if in.RebuildWorkspace != nil { + in, out := &in.RebuildWorkspace, &out.RebuildWorkspace + *out = new(bool) + **out = **in + } + if in.RestartWorkspace != nil { + in, out := &in.RestartWorkspace, &out.RestartWorkspace + *out = new(bool) + **out = **in + } + if in.SwitchRunningMode != nil { + in, out := &in.SwitchRunningMode, &out.SwitchRunningMode + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfServicePermissionsObservation. +func (in *SelfServicePermissionsObservation) DeepCopy() *SelfServicePermissionsObservation { + if in == nil { + return nil + } + out := new(SelfServicePermissionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfServicePermissionsParameters) DeepCopyInto(out *SelfServicePermissionsParameters) { + *out = *in + if in.ChangeComputeType != nil { + in, out := &in.ChangeComputeType, &out.ChangeComputeType + *out = new(bool) + **out = **in + } + if in.IncreaseVolumeSize != nil { + in, out := &in.IncreaseVolumeSize, &out.IncreaseVolumeSize + *out = new(bool) + **out = **in + } + if in.RebuildWorkspace != nil { + in, out := &in.RebuildWorkspace, &out.RebuildWorkspace + *out = new(bool) + **out = **in + } + if in.RestartWorkspace != nil { + in, out := &in.RestartWorkspace, &out.RestartWorkspace + *out = new(bool) + **out = **in + } + if in.SwitchRunningMode != nil { + in, out := &in.SwitchRunningMode, &out.SwitchRunningMode + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfServicePermissionsParameters. +func (in *SelfServicePermissionsParameters) DeepCopy() *SelfServicePermissionsParameters { + if in == nil { + return nil + } + out := new(SelfServicePermissionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceAccessPropertiesInitParameters) DeepCopyInto(out *WorkspaceAccessPropertiesInitParameters) { + *out = *in + if in.DeviceTypeAndroid != nil { + in, out := &in.DeviceTypeAndroid, &out.DeviceTypeAndroid + *out = new(string) + **out = **in + } + if in.DeviceTypeChromeos != nil { + in, out := &in.DeviceTypeChromeos, &out.DeviceTypeChromeos + *out = new(string) + **out = **in + } + if in.DeviceTypeIos != nil { + in, out := &in.DeviceTypeIos, &out.DeviceTypeIos + *out = new(string) + **out = **in + } + if in.DeviceTypeLinux != nil { + in, out := &in.DeviceTypeLinux, &out.DeviceTypeLinux + *out = new(string) + **out = **in + } + if in.DeviceTypeOsx != nil { + in, out := &in.DeviceTypeOsx, &out.DeviceTypeOsx + *out = new(string) + **out = **in + } + if in.DeviceTypeWeb != nil { + in, out := &in.DeviceTypeWeb, &out.DeviceTypeWeb + *out = new(string) + **out = **in + } + if in.DeviceTypeWindows != nil { + in, out := &in.DeviceTypeWindows, &out.DeviceTypeWindows + *out = new(string) + **out = **in + } + if in.DeviceTypeZeroclient != nil { + in, out := &in.DeviceTypeZeroclient, &out.DeviceTypeZeroclient + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceAccessPropertiesInitParameters. +func (in *WorkspaceAccessPropertiesInitParameters) DeepCopy() *WorkspaceAccessPropertiesInitParameters { + if in == nil { + return nil + } + out := new(WorkspaceAccessPropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceAccessPropertiesObservation) DeepCopyInto(out *WorkspaceAccessPropertiesObservation) { + *out = *in + if in.DeviceTypeAndroid != nil { + in, out := &in.DeviceTypeAndroid, &out.DeviceTypeAndroid + *out = new(string) + **out = **in + } + if in.DeviceTypeChromeos != nil { + in, out := &in.DeviceTypeChromeos, &out.DeviceTypeChromeos + *out = new(string) + **out = **in + } + if in.DeviceTypeIos != nil { + in, out := &in.DeviceTypeIos, &out.DeviceTypeIos + *out = new(string) + **out = **in + } + if in.DeviceTypeLinux != nil { + in, out := &in.DeviceTypeLinux, &out.DeviceTypeLinux + *out = new(string) + **out = **in + } + if in.DeviceTypeOsx != nil { + in, out := &in.DeviceTypeOsx, &out.DeviceTypeOsx + *out = new(string) + **out = **in + } + if in.DeviceTypeWeb != nil { + in, out := &in.DeviceTypeWeb, &out.DeviceTypeWeb + *out = new(string) + **out = **in + } + if in.DeviceTypeWindows != nil { + in, out := &in.DeviceTypeWindows, &out.DeviceTypeWindows + *out = new(string) + **out = **in + } + if in.DeviceTypeZeroclient != nil { + in, out := &in.DeviceTypeZeroclient, &out.DeviceTypeZeroclient + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceAccessPropertiesObservation. +func (in *WorkspaceAccessPropertiesObservation) DeepCopy() *WorkspaceAccessPropertiesObservation { + if in == nil { + return nil + } + out := new(WorkspaceAccessPropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceAccessPropertiesParameters) DeepCopyInto(out *WorkspaceAccessPropertiesParameters) { + *out = *in + if in.DeviceTypeAndroid != nil { + in, out := &in.DeviceTypeAndroid, &out.DeviceTypeAndroid + *out = new(string) + **out = **in + } + if in.DeviceTypeChromeos != nil { + in, out := &in.DeviceTypeChromeos, &out.DeviceTypeChromeos + *out = new(string) + **out = **in + } + if in.DeviceTypeIos != nil { + in, out := &in.DeviceTypeIos, &out.DeviceTypeIos + *out = new(string) + **out = **in + } + if in.DeviceTypeLinux != nil { + in, out := &in.DeviceTypeLinux, &out.DeviceTypeLinux + *out = new(string) + **out = **in + } + if in.DeviceTypeOsx != nil { + in, out := &in.DeviceTypeOsx, &out.DeviceTypeOsx + *out = new(string) + **out = **in + } + if in.DeviceTypeWeb != nil { + in, out := &in.DeviceTypeWeb, &out.DeviceTypeWeb + *out = new(string) + **out = **in + } + if in.DeviceTypeWindows != nil { + in, out := &in.DeviceTypeWindows, &out.DeviceTypeWindows + *out = new(string) + **out = **in + } + if in.DeviceTypeZeroclient != nil { + in, out := &in.DeviceTypeZeroclient, &out.DeviceTypeZeroclient + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceAccessPropertiesParameters. +func (in *WorkspaceAccessPropertiesParameters) DeepCopy() *WorkspaceAccessPropertiesParameters { + if in == nil { + return nil + } + out := new(WorkspaceAccessPropertiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceCreationPropertiesInitParameters) DeepCopyInto(out *WorkspaceCreationPropertiesInitParameters) { + *out = *in + if in.CustomSecurityGroupID != nil { + in, out := &in.CustomSecurityGroupID, &out.CustomSecurityGroupID + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRef != nil { + in, out := &in.CustomSecurityGroupIDRef, &out.CustomSecurityGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DefaultOu != nil { + in, out := &in.DefaultOu, &out.DefaultOu + *out = new(string) + **out = **in + } + if in.EnableInternetAccess != nil { + in, out := &in.EnableInternetAccess, &out.EnableInternetAccess + *out = new(bool) + **out = **in + } + if in.EnableMaintenanceMode != nil { + in, out := &in.EnableMaintenanceMode, &out.EnableMaintenanceMode + *out = new(bool) + **out = **in + } + if in.UserEnabledAsLocalAdministrator != nil { + in, out := &in.UserEnabledAsLocalAdministrator, &out.UserEnabledAsLocalAdministrator + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceCreationPropertiesInitParameters. +func (in *WorkspaceCreationPropertiesInitParameters) DeepCopy() *WorkspaceCreationPropertiesInitParameters { + if in == nil { + return nil + } + out := new(WorkspaceCreationPropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceCreationPropertiesObservation) DeepCopyInto(out *WorkspaceCreationPropertiesObservation) { + *out = *in + if in.CustomSecurityGroupID != nil { + in, out := &in.CustomSecurityGroupID, &out.CustomSecurityGroupID + *out = new(string) + **out = **in + } + if in.DefaultOu != nil { + in, out := &in.DefaultOu, &out.DefaultOu + *out = new(string) + **out = **in + } + if in.EnableInternetAccess != nil { + in, out := &in.EnableInternetAccess, &out.EnableInternetAccess + *out = new(bool) + **out = **in + } + if in.EnableMaintenanceMode != nil { + in, out := &in.EnableMaintenanceMode, &out.EnableMaintenanceMode + *out = new(bool) + **out = **in + } + if in.UserEnabledAsLocalAdministrator != nil { + in, out := &in.UserEnabledAsLocalAdministrator, &out.UserEnabledAsLocalAdministrator + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceCreationPropertiesObservation. +func (in *WorkspaceCreationPropertiesObservation) DeepCopy() *WorkspaceCreationPropertiesObservation { + if in == nil { + return nil + } + out := new(WorkspaceCreationPropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceCreationPropertiesParameters) DeepCopyInto(out *WorkspaceCreationPropertiesParameters) { + *out = *in + if in.CustomSecurityGroupID != nil { + in, out := &in.CustomSecurityGroupID, &out.CustomSecurityGroupID + *out = new(string) + **out = **in + } + if in.CustomSecurityGroupIDRef != nil { + in, out := &in.CustomSecurityGroupIDRef, &out.CustomSecurityGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CustomSecurityGroupIDSelector != nil { + in, out := &in.CustomSecurityGroupIDSelector, &out.CustomSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DefaultOu != nil { + in, out := &in.DefaultOu, &out.DefaultOu + *out = new(string) + **out = **in + } + if in.EnableInternetAccess != nil { + in, out := &in.EnableInternetAccess, &out.EnableInternetAccess + *out = new(bool) + **out = **in + } + if in.EnableMaintenanceMode != nil { + in, out := &in.EnableMaintenanceMode, &out.EnableMaintenanceMode + *out = new(bool) + **out = **in + } + if in.UserEnabledAsLocalAdministrator != nil { + in, out := &in.UserEnabledAsLocalAdministrator, &out.UserEnabledAsLocalAdministrator + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceCreationPropertiesParameters. +func (in *WorkspaceCreationPropertiesParameters) DeepCopy() *WorkspaceCreationPropertiesParameters { + if in == nil { + return nil + } + out := new(WorkspaceCreationPropertiesParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/workspaces/v1beta2/zz_generated.managed.go b/apis/workspaces/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..f47466a2af --- /dev/null +++ b/apis/workspaces/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Directory. +func (mg *Directory) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Directory. +func (mg *Directory) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Directory. +func (mg *Directory) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Directory. +func (mg *Directory) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Directory. +func (mg *Directory) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Directory. +func (mg *Directory) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Directory. +func (mg *Directory) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Directory. +func (mg *Directory) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Directory. +func (mg *Directory) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Directory. +func (mg *Directory) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Directory. +func (mg *Directory) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Directory. +func (mg *Directory) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/workspaces/v1beta2/zz_generated.managedlist.go b/apis/workspaces/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..ec7b758d0c --- /dev/null +++ b/apis/workspaces/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this DirectoryList. +func (l *DirectoryList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/workspaces/v1beta2/zz_generated.resolvers.go b/apis/workspaces/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..53dd61359b --- /dev/null +++ b/apis/workspaces/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,150 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Directory) ResolveReferences( // ResolveReferences of this Directory. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("ds.aws.upbound.io", "v1beta2", "Directory", "DirectoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DirectoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DirectoryIDRef, + Selector: mg.Spec.ForProvider.DirectoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DirectoryID") + } + mg.Spec.ForProvider.DirectoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DirectoryIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.SubnetIDRefs, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetIds") + } + mg.Spec.ForProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.SubnetIDRefs = mrsp.ResolvedReferences + + if mg.Spec.ForProvider.WorkspaceCreationProperties != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.WorkspaceCreationProperties.CustomSecurityGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.WorkspaceCreationProperties.CustomSecurityGroupIDRef, + Selector: mg.Spec.ForProvider.WorkspaceCreationProperties.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.WorkspaceCreationProperties.CustomSecurityGroupID") + } + mg.Spec.ForProvider.WorkspaceCreationProperties.CustomSecurityGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.WorkspaceCreationProperties.CustomSecurityGroupIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("ds.aws.upbound.io", "v1beta2", "Directory", "DirectoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DirectoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DirectoryIDRef, + Selector: mg.Spec.InitProvider.DirectoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DirectoryID") + } + mg.Spec.InitProvider.DirectoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DirectoryIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.SubnetIDRefs, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetIds") + } + mg.Spec.InitProvider.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.SubnetIDRefs = mrsp.ResolvedReferences + + if mg.Spec.InitProvider.WorkspaceCreationProperties != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.WorkspaceCreationProperties.CustomSecurityGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.WorkspaceCreationProperties.CustomSecurityGroupIDRef, + Selector: mg.Spec.InitProvider.WorkspaceCreationProperties.CustomSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.WorkspaceCreationProperties.CustomSecurityGroupID") + } + mg.Spec.InitProvider.WorkspaceCreationProperties.CustomSecurityGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.WorkspaceCreationProperties.CustomSecurityGroupIDRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/workspaces/v1beta2/zz_groupversion_info.go b/apis/workspaces/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..77201580a3 --- /dev/null +++ b/apis/workspaces/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=workspaces.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "workspaces.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/xray/v1beta1/zz_generated.conversion_hubs.go b/apis/xray/v1beta1/zz_generated.conversion_hubs.go index ccfb0596f0..16d4982c41 100755 --- a/apis/xray/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/xray/v1beta1/zz_generated.conversion_hubs.go @@ -9,8 +9,5 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *EncryptionConfig) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Group) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SamplingRule) Hub() {} diff --git a/apis/xray/v1beta1/zz_generated.conversion_spokes.go b/apis/xray/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 0000000000..6e5344f630 --- /dev/null +++ b/apis/xray/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Group to the hub type. +func (tr *Group) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Group type. +func (tr *Group) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/xray/v1beta2/zz_generated.conversion_hubs.go b/apis/xray/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..471e2253ef --- /dev/null +++ b/apis/xray/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Group) Hub() {} diff --git a/apis/xray/v1beta2/zz_generated.deepcopy.go b/apis/xray/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..86c3d9e9bb --- /dev/null +++ b/apis/xray/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,351 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Group) DeepCopyInto(out *Group) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Group. +func (in *Group) DeepCopy() *Group { + if in == nil { + return nil + } + out := new(Group) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Group) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupInitParameters) DeepCopyInto(out *GroupInitParameters) { + *out = *in + if in.FilterExpression != nil { + in, out := &in.FilterExpression, &out.FilterExpression + *out = new(string) + **out = **in + } + if in.GroupName != nil { + in, out := &in.GroupName, &out.GroupName + *out = new(string) + **out = **in + } + if in.InsightsConfiguration != nil { + in, out := &in.InsightsConfiguration, &out.InsightsConfiguration + *out = new(InsightsConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupInitParameters. +func (in *GroupInitParameters) DeepCopy() *GroupInitParameters { + if in == nil { + return nil + } + out := new(GroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupList) DeepCopyInto(out *GroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Group, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupList. +func (in *GroupList) DeepCopy() *GroupList { + if in == nil { + return nil + } + out := new(GroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupObservation) DeepCopyInto(out *GroupObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.FilterExpression != nil { + in, out := &in.FilterExpression, &out.FilterExpression + *out = new(string) + **out = **in + } + if in.GroupName != nil { + in, out := &in.GroupName, &out.GroupName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InsightsConfiguration != nil { + in, out := &in.InsightsConfiguration, &out.InsightsConfiguration + *out = new(InsightsConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupObservation. +func (in *GroupObservation) DeepCopy() *GroupObservation { + if in == nil { + return nil + } + out := new(GroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupParameters) DeepCopyInto(out *GroupParameters) { + *out = *in + if in.FilterExpression != nil { + in, out := &in.FilterExpression, &out.FilterExpression + *out = new(string) + **out = **in + } + if in.GroupName != nil { + in, out := &in.GroupName, &out.GroupName + *out = new(string) + **out = **in + } + if in.InsightsConfiguration != nil { + in, out := &in.InsightsConfiguration, &out.InsightsConfiguration + *out = new(InsightsConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupParameters. +func (in *GroupParameters) DeepCopy() *GroupParameters { + if in == nil { + return nil + } + out := new(GroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupSpec) DeepCopyInto(out *GroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSpec. +func (in *GroupSpec) DeepCopy() *GroupSpec { + if in == nil { + return nil + } + out := new(GroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupStatus) DeepCopyInto(out *GroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupStatus. +func (in *GroupStatus) DeepCopy() *GroupStatus { + if in == nil { + return nil + } + out := new(GroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsConfigurationInitParameters) DeepCopyInto(out *InsightsConfigurationInitParameters) { + *out = *in + if in.InsightsEnabled != nil { + in, out := &in.InsightsEnabled, &out.InsightsEnabled + *out = new(bool) + **out = **in + } + if in.NotificationsEnabled != nil { + in, out := &in.NotificationsEnabled, &out.NotificationsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsConfigurationInitParameters. +func (in *InsightsConfigurationInitParameters) DeepCopy() *InsightsConfigurationInitParameters { + if in == nil { + return nil + } + out := new(InsightsConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsConfigurationObservation) DeepCopyInto(out *InsightsConfigurationObservation) { + *out = *in + if in.InsightsEnabled != nil { + in, out := &in.InsightsEnabled, &out.InsightsEnabled + *out = new(bool) + **out = **in + } + if in.NotificationsEnabled != nil { + in, out := &in.NotificationsEnabled, &out.NotificationsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsConfigurationObservation. +func (in *InsightsConfigurationObservation) DeepCopy() *InsightsConfigurationObservation { + if in == nil { + return nil + } + out := new(InsightsConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsConfigurationParameters) DeepCopyInto(out *InsightsConfigurationParameters) { + *out = *in + if in.InsightsEnabled != nil { + in, out := &in.InsightsEnabled, &out.InsightsEnabled + *out = new(bool) + **out = **in + } + if in.NotificationsEnabled != nil { + in, out := &in.NotificationsEnabled, &out.NotificationsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsConfigurationParameters. +func (in *InsightsConfigurationParameters) DeepCopy() *InsightsConfigurationParameters { + if in == nil { + return nil + } + out := new(InsightsConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/xray/v1beta2/zz_generated.managed.go b/apis/xray/v1beta2/zz_generated.managed.go new file mode 100644 index 0000000000..93fa467759 --- /dev/null +++ b/apis/xray/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Group. +func (mg *Group) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Group. +func (mg *Group) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Group. +func (mg *Group) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Group. +func (mg *Group) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Group. +func (mg *Group) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Group. +func (mg *Group) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Group. +func (mg *Group) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Group. +func (mg *Group) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Group. +func (mg *Group) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Group. +func (mg *Group) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Group. +func (mg *Group) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Group. +func (mg *Group) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/xray/v1beta2/zz_generated.managedlist.go b/apis/xray/v1beta2/zz_generated.managedlist.go new file mode 100644 index 0000000000..5274ced33e --- /dev/null +++ b/apis/xray/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this GroupList. +func (l *GroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/xray/v1beta2/zz_group_terraformed.go b/apis/xray/v1beta2/zz_group_terraformed.go new file mode 100755 index 0000000000..eae3424e16 --- /dev/null +++ b/apis/xray/v1beta2/zz_group_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Group +func (mg *Group) GetTerraformResourceType() string { + return "aws_xray_group" +} + +// GetConnectionDetailsMapping for this Group +func (tr *Group) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Group +func (tr *Group) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Group +func (tr *Group) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Group +func (tr *Group) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Group +func (tr *Group) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Group +func (tr *Group) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Group +func (tr *Group) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Group +func (tr *Group) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Group using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Group) LateInitialize(attrs []byte) (bool, error) { + params := &GroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Group) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/xray/v1beta2/zz_group_types.go b/apis/xray/v1beta2/zz_group_types.go new file mode 100755 index 0000000000..9dd0a2f6af --- /dev/null +++ b/apis/xray/v1beta2/zz_group_types.go @@ -0,0 +1,172 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type GroupInitParameters struct { + + // The filter expression defining criteria by which to group traces. more info can be found in official docs. + FilterExpression *string `json:"filterExpression,omitempty" tf:"filter_expression,omitempty"` + + // The name of the group. + GroupName *string `json:"groupName,omitempty" tf:"group_name,omitempty"` + + // Configuration options for enabling insights. + InsightsConfiguration *InsightsConfigurationInitParameters `json:"insightsConfiguration,omitempty" tf:"insights_configuration,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type GroupObservation struct { + + // The ARN of the Group. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The filter expression defining criteria by which to group traces. more info can be found in official docs. + FilterExpression *string `json:"filterExpression,omitempty" tf:"filter_expression,omitempty"` + + // The name of the group. + GroupName *string `json:"groupName,omitempty" tf:"group_name,omitempty"` + + // The ARN of the Group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Configuration options for enabling insights. + InsightsConfiguration *InsightsConfigurationObservation `json:"insightsConfiguration,omitempty" tf:"insights_configuration,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` +} + +type GroupParameters struct { + + // The filter expression defining criteria by which to group traces. more info can be found in official docs. + // +kubebuilder:validation:Optional + FilterExpression *string `json:"filterExpression,omitempty" tf:"filter_expression,omitempty"` + + // The name of the group. + // +kubebuilder:validation:Optional + GroupName *string `json:"groupName,omitempty" tf:"group_name,omitempty"` + + // Configuration options for enabling insights. + // +kubebuilder:validation:Optional + InsightsConfiguration *InsightsConfigurationParameters `json:"insightsConfiguration,omitempty" tf:"insights_configuration,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type InsightsConfigurationInitParameters struct { + + // Specifies whether insights are enabled. + InsightsEnabled *bool `json:"insightsEnabled,omitempty" tf:"insights_enabled,omitempty"` + + // Specifies whether insight notifications are enabled. + NotificationsEnabled *bool `json:"notificationsEnabled,omitempty" tf:"notifications_enabled,omitempty"` +} + +type InsightsConfigurationObservation struct { + + // Specifies whether insights are enabled. + InsightsEnabled *bool `json:"insightsEnabled,omitempty" tf:"insights_enabled,omitempty"` + + // Specifies whether insight notifications are enabled. + NotificationsEnabled *bool `json:"notificationsEnabled,omitempty" tf:"notifications_enabled,omitempty"` +} + +type InsightsConfigurationParameters struct { + + // Specifies whether insights are enabled. + // +kubebuilder:validation:Optional + InsightsEnabled *bool `json:"insightsEnabled" tf:"insights_enabled,omitempty"` + + // Specifies whether insight notifications are enabled. + // +kubebuilder:validation:Optional + NotificationsEnabled *bool `json:"notificationsEnabled,omitempty" tf:"notifications_enabled,omitempty"` +} + +// GroupSpec defines the desired state of Group +type GroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GroupInitParameters `json:"initProvider,omitempty"` +} + +// GroupStatus defines the observed state of Group. +type GroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Group is the Schema for the Groups API. Creates and manages an AWS XRay Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Group struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.filterExpression) || (has(self.initProvider) && has(self.initProvider.filterExpression))",message="spec.forProvider.filterExpression is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.groupName) || (has(self.initProvider) && has(self.initProvider.groupName))",message="spec.forProvider.groupName is a required parameter" + Spec GroupSpec `json:"spec"` + Status GroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GroupList contains a list of Groups +type GroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Group `json:"items"` +} + +// Repository type metadata. +var ( + Group_Kind = "Group" + Group_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Group_Kind}.String() + Group_KindAPIVersion = Group_Kind + "." + CRDGroupVersion.String() + Group_GroupVersionKind = CRDGroupVersion.WithKind(Group_Kind) +) + +func init() { + SchemeBuilder.Register(&Group{}, &GroupList{}) +} diff --git a/apis/xray/v1beta2/zz_groupversion_info.go b/apis/xray/v1beta2/zz_groupversion_info.go new file mode 100755 index 0000000000..70055c3a2d --- /dev/null +++ b/apis/xray/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=xray.aws.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "xray.aws.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/zz_register.go b/apis/zz_register.go index 5f1163f003..1740f7a0e4 100755 --- a/apis/zz_register.go +++ b/apis/zz_register.go @@ -11,350 +11,586 @@ import ( "k8s.io/apimachinery/pkg/runtime" v1beta1 "github.com/upbound/provider-aws/apis/accessanalyzer/v1beta1" + v1beta2 "github.com/upbound/provider-aws/apis/accessanalyzer/v1beta2" v1beta1account "github.com/upbound/provider-aws/apis/account/v1beta1" v1beta1acm "github.com/upbound/provider-aws/apis/acm/v1beta1" + v1beta2acm "github.com/upbound/provider-aws/apis/acm/v1beta2" v1beta1acmpca "github.com/upbound/provider-aws/apis/acmpca/v1beta1" + v1beta2acmpca "github.com/upbound/provider-aws/apis/acmpca/v1beta2" v1beta1amp "github.com/upbound/provider-aws/apis/amp/v1beta1" + v1beta2amp "github.com/upbound/provider-aws/apis/amp/v1beta2" v1beta1amplify "github.com/upbound/provider-aws/apis/amplify/v1beta1" + v1beta2amplify "github.com/upbound/provider-aws/apis/amplify/v1beta2" v1beta1apigateway "github.com/upbound/provider-aws/apis/apigateway/v1beta1" + v1beta2apigateway "github.com/upbound/provider-aws/apis/apigateway/v1beta2" v1beta1apigatewayv2 "github.com/upbound/provider-aws/apis/apigatewayv2/v1beta1" + v1beta2apigatewayv2 "github.com/upbound/provider-aws/apis/apigatewayv2/v1beta2" v1beta1appautoscaling "github.com/upbound/provider-aws/apis/appautoscaling/v1beta1" + v1beta2appautoscaling "github.com/upbound/provider-aws/apis/appautoscaling/v1beta2" v1beta1appconfig "github.com/upbound/provider-aws/apis/appconfig/v1beta1" v1beta1appflow "github.com/upbound/provider-aws/apis/appflow/v1beta1" + v1beta2appflow "github.com/upbound/provider-aws/apis/appflow/v1beta2" v1beta1appintegrations "github.com/upbound/provider-aws/apis/appintegrations/v1beta1" + v1beta2appintegrations "github.com/upbound/provider-aws/apis/appintegrations/v1beta2" v1beta1applicationinsights "github.com/upbound/provider-aws/apis/applicationinsights/v1beta1" v1beta1appmesh "github.com/upbound/provider-aws/apis/appmesh/v1beta1" + v1beta2appmesh "github.com/upbound/provider-aws/apis/appmesh/v1beta2" v1beta1apprunner "github.com/upbound/provider-aws/apis/apprunner/v1beta1" + v1beta2apprunner "github.com/upbound/provider-aws/apis/apprunner/v1beta2" v1beta1appstream "github.com/upbound/provider-aws/apis/appstream/v1beta1" + v1beta2appstream "github.com/upbound/provider-aws/apis/appstream/v1beta2" v1beta1appsync "github.com/upbound/provider-aws/apis/appsync/v1beta1" + v1beta2appsync "github.com/upbound/provider-aws/apis/appsync/v1beta2" v1beta1athena "github.com/upbound/provider-aws/apis/athena/v1beta1" + v1beta2athena "github.com/upbound/provider-aws/apis/athena/v1beta2" v1beta1autoscaling "github.com/upbound/provider-aws/apis/autoscaling/v1beta1" - v1beta2 "github.com/upbound/provider-aws/apis/autoscaling/v1beta2" + v1beta2autoscaling "github.com/upbound/provider-aws/apis/autoscaling/v1beta2" + v1beta3 "github.com/upbound/provider-aws/apis/autoscaling/v1beta3" v1beta1autoscalingplans "github.com/upbound/provider-aws/apis/autoscalingplans/v1beta1" + v1beta2autoscalingplans "github.com/upbound/provider-aws/apis/autoscalingplans/v1beta2" v1beta1backup "github.com/upbound/provider-aws/apis/backup/v1beta1" + v1beta2backup "github.com/upbound/provider-aws/apis/backup/v1beta2" v1beta1batch "github.com/upbound/provider-aws/apis/batch/v1beta1" + v1beta2batch "github.com/upbound/provider-aws/apis/batch/v1beta2" v1beta1budgets "github.com/upbound/provider-aws/apis/budgets/v1beta1" + v1beta2budgets "github.com/upbound/provider-aws/apis/budgets/v1beta2" v1beta1ce "github.com/upbound/provider-aws/apis/ce/v1beta1" v1beta1chime "github.com/upbound/provider-aws/apis/chime/v1beta1" + v1beta2chime "github.com/upbound/provider-aws/apis/chime/v1beta2" v1beta1cloud9 "github.com/upbound/provider-aws/apis/cloud9/v1beta1" v1beta1cloudcontrol "github.com/upbound/provider-aws/apis/cloudcontrol/v1beta1" v1beta1cloudformation "github.com/upbound/provider-aws/apis/cloudformation/v1beta1" + v1beta2cloudformation "github.com/upbound/provider-aws/apis/cloudformation/v1beta2" v1beta1cloudfront "github.com/upbound/provider-aws/apis/cloudfront/v1beta1" + v1beta2cloudfront "github.com/upbound/provider-aws/apis/cloudfront/v1beta2" v1beta1cloudsearch "github.com/upbound/provider-aws/apis/cloudsearch/v1beta1" + v1beta2cloudsearch "github.com/upbound/provider-aws/apis/cloudsearch/v1beta2" v1beta1cloudtrail "github.com/upbound/provider-aws/apis/cloudtrail/v1beta1" v1beta1cloudwatch "github.com/upbound/provider-aws/apis/cloudwatch/v1beta1" + v1beta2cloudwatch "github.com/upbound/provider-aws/apis/cloudwatch/v1beta2" v1beta1cloudwatchevents "github.com/upbound/provider-aws/apis/cloudwatchevents/v1beta1" + v1beta2cloudwatchevents "github.com/upbound/provider-aws/apis/cloudwatchevents/v1beta2" v1beta1cloudwatchlogs "github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta1" + v1beta2cloudwatchlogs "github.com/upbound/provider-aws/apis/cloudwatchlogs/v1beta2" v1beta1codecommit "github.com/upbound/provider-aws/apis/codecommit/v1beta1" v1beta1codeguruprofiler "github.com/upbound/provider-aws/apis/codeguruprofiler/v1beta1" v1beta1codepipeline "github.com/upbound/provider-aws/apis/codepipeline/v1beta1" + v1beta2codepipeline "github.com/upbound/provider-aws/apis/codepipeline/v1beta2" v1beta1codestarconnections "github.com/upbound/provider-aws/apis/codestarconnections/v1beta1" + v1beta2codestarconnections "github.com/upbound/provider-aws/apis/codestarconnections/v1beta2" v1beta1codestarnotifications "github.com/upbound/provider-aws/apis/codestarnotifications/v1beta1" v1beta1cognitoidentity "github.com/upbound/provider-aws/apis/cognitoidentity/v1beta1" v1beta1cognitoidp "github.com/upbound/provider-aws/apis/cognitoidp/v1beta1" + v1beta2cognitoidp "github.com/upbound/provider-aws/apis/cognitoidp/v1beta2" v1beta1configservice "github.com/upbound/provider-aws/apis/configservice/v1beta1" + v1beta2configservice "github.com/upbound/provider-aws/apis/configservice/v1beta2" v1beta1connect "github.com/upbound/provider-aws/apis/connect/v1beta1" v1beta2connect "github.com/upbound/provider-aws/apis/connect/v1beta2" + v1beta3connect "github.com/upbound/provider-aws/apis/connect/v1beta3" v1beta1cur "github.com/upbound/provider-aws/apis/cur/v1beta1" v1beta1dataexchange "github.com/upbound/provider-aws/apis/dataexchange/v1beta1" v1beta1datapipeline "github.com/upbound/provider-aws/apis/datapipeline/v1beta1" v1beta1datasync "github.com/upbound/provider-aws/apis/datasync/v1beta1" + v1beta2datasync "github.com/upbound/provider-aws/apis/datasync/v1beta2" v1beta1dax "github.com/upbound/provider-aws/apis/dax/v1beta1" + v1beta2dax "github.com/upbound/provider-aws/apis/dax/v1beta2" v1beta1deploy "github.com/upbound/provider-aws/apis/deploy/v1beta1" + v1beta2deploy "github.com/upbound/provider-aws/apis/deploy/v1beta2" v1beta1detective "github.com/upbound/provider-aws/apis/detective/v1beta1" v1beta1devicefarm "github.com/upbound/provider-aws/apis/devicefarm/v1beta1" + v1beta2devicefarm "github.com/upbound/provider-aws/apis/devicefarm/v1beta2" v1beta1directconnect "github.com/upbound/provider-aws/apis/directconnect/v1beta1" v1beta1dlm "github.com/upbound/provider-aws/apis/dlm/v1beta1" + v1beta2dlm "github.com/upbound/provider-aws/apis/dlm/v1beta2" v1beta1dms "github.com/upbound/provider-aws/apis/dms/v1beta1" + v1beta2dms "github.com/upbound/provider-aws/apis/dms/v1beta2" v1beta1docdb "github.com/upbound/provider-aws/apis/docdb/v1beta1" v1beta1ds "github.com/upbound/provider-aws/apis/ds/v1beta1" + v1beta2ds "github.com/upbound/provider-aws/apis/ds/v1beta2" v1beta1dynamodb "github.com/upbound/provider-aws/apis/dynamodb/v1beta1" + v1beta2dynamodb "github.com/upbound/provider-aws/apis/dynamodb/v1beta2" v1beta1ec2 "github.com/upbound/provider-aws/apis/ec2/v1beta1" v1beta2ec2 "github.com/upbound/provider-aws/apis/ec2/v1beta2" v1beta1ecr "github.com/upbound/provider-aws/apis/ecr/v1beta1" + v1beta2ecr "github.com/upbound/provider-aws/apis/ecr/v1beta2" v1beta1ecrpublic "github.com/upbound/provider-aws/apis/ecrpublic/v1beta1" + v1beta2ecrpublic "github.com/upbound/provider-aws/apis/ecrpublic/v1beta2" v1beta1ecs "github.com/upbound/provider-aws/apis/ecs/v1beta1" + v1beta2ecs "github.com/upbound/provider-aws/apis/ecs/v1beta2" v1beta1efs "github.com/upbound/provider-aws/apis/efs/v1beta1" + v1beta2efs "github.com/upbound/provider-aws/apis/efs/v1beta2" v1beta1eks "github.com/upbound/provider-aws/apis/eks/v1beta1" + v1beta2eks "github.com/upbound/provider-aws/apis/eks/v1beta2" v1beta1elasticache "github.com/upbound/provider-aws/apis/elasticache/v1beta1" v1beta2elasticache "github.com/upbound/provider-aws/apis/elasticache/v1beta2" v1beta1elasticbeanstalk "github.com/upbound/provider-aws/apis/elasticbeanstalk/v1beta1" + v1beta2elasticbeanstalk "github.com/upbound/provider-aws/apis/elasticbeanstalk/v1beta2" v1beta1elasticsearch "github.com/upbound/provider-aws/apis/elasticsearch/v1beta1" + v1beta2elasticsearch "github.com/upbound/provider-aws/apis/elasticsearch/v1beta2" v1beta1elastictranscoder "github.com/upbound/provider-aws/apis/elastictranscoder/v1beta1" + v1beta2elastictranscoder "github.com/upbound/provider-aws/apis/elastictranscoder/v1beta2" v1beta1elb "github.com/upbound/provider-aws/apis/elb/v1beta1" + v1beta2elb "github.com/upbound/provider-aws/apis/elb/v1beta2" v1beta1elbv2 "github.com/upbound/provider-aws/apis/elbv2/v1beta1" + v1beta2elbv2 "github.com/upbound/provider-aws/apis/elbv2/v1beta2" v1beta1emr "github.com/upbound/provider-aws/apis/emr/v1beta1" v1beta1emrserverless "github.com/upbound/provider-aws/apis/emrserverless/v1beta1" + v1beta2emrserverless "github.com/upbound/provider-aws/apis/emrserverless/v1beta2" v1beta1evidently "github.com/upbound/provider-aws/apis/evidently/v1beta1" + v1beta2evidently "github.com/upbound/provider-aws/apis/evidently/v1beta2" v1beta1firehose "github.com/upbound/provider-aws/apis/firehose/v1beta1" + v1beta2firehose "github.com/upbound/provider-aws/apis/firehose/v1beta2" v1beta1fis "github.com/upbound/provider-aws/apis/fis/v1beta1" + v1beta2fis "github.com/upbound/provider-aws/apis/fis/v1beta2" v1beta1fsx "github.com/upbound/provider-aws/apis/fsx/v1beta1" + v1beta2fsx "github.com/upbound/provider-aws/apis/fsx/v1beta2" v1beta1gamelift "github.com/upbound/provider-aws/apis/gamelift/v1beta1" + v1beta2gamelift "github.com/upbound/provider-aws/apis/gamelift/v1beta2" v1beta1glacier "github.com/upbound/provider-aws/apis/glacier/v1beta1" + v1beta2glacier "github.com/upbound/provider-aws/apis/glacier/v1beta2" v1beta1globalaccelerator "github.com/upbound/provider-aws/apis/globalaccelerator/v1beta1" + v1beta2globalaccelerator "github.com/upbound/provider-aws/apis/globalaccelerator/v1beta2" v1beta1glue "github.com/upbound/provider-aws/apis/glue/v1beta1" + v1beta2glue "github.com/upbound/provider-aws/apis/glue/v1beta2" v1beta1grafana "github.com/upbound/provider-aws/apis/grafana/v1beta1" + v1beta2grafana "github.com/upbound/provider-aws/apis/grafana/v1beta2" v1beta1guardduty "github.com/upbound/provider-aws/apis/guardduty/v1beta1" + v1beta2guardduty "github.com/upbound/provider-aws/apis/guardduty/v1beta2" v1beta1iam "github.com/upbound/provider-aws/apis/iam/v1beta1" v1beta1identitystore "github.com/upbound/provider-aws/apis/identitystore/v1beta1" + v1beta2identitystore "github.com/upbound/provider-aws/apis/identitystore/v1beta2" v1beta1imagebuilder "github.com/upbound/provider-aws/apis/imagebuilder/v1beta1" + v1beta2imagebuilder "github.com/upbound/provider-aws/apis/imagebuilder/v1beta2" v1beta1inspector "github.com/upbound/provider-aws/apis/inspector/v1beta1" v1beta1inspector2 "github.com/upbound/provider-aws/apis/inspector2/v1beta1" v1beta1iot "github.com/upbound/provider-aws/apis/iot/v1beta1" + v1beta2iot "github.com/upbound/provider-aws/apis/iot/v1beta2" v1beta1ivs "github.com/upbound/provider-aws/apis/ivs/v1beta1" + v1beta2ivs "github.com/upbound/provider-aws/apis/ivs/v1beta2" v1beta1kafka "github.com/upbound/provider-aws/apis/kafka/v1beta1" v1beta2kafka "github.com/upbound/provider-aws/apis/kafka/v1beta2" + v1beta3kafka "github.com/upbound/provider-aws/apis/kafka/v1beta3" v1beta1kafkaconnect "github.com/upbound/provider-aws/apis/kafkaconnect/v1beta1" + v1beta2kafkaconnect "github.com/upbound/provider-aws/apis/kafkaconnect/v1beta2" v1beta1kendra "github.com/upbound/provider-aws/apis/kendra/v1beta1" + v1beta2kendra "github.com/upbound/provider-aws/apis/kendra/v1beta2" v1beta1keyspaces "github.com/upbound/provider-aws/apis/keyspaces/v1beta1" + v1beta2keyspaces "github.com/upbound/provider-aws/apis/keyspaces/v1beta2" v1beta1kinesis "github.com/upbound/provider-aws/apis/kinesis/v1beta1" + v1beta2kinesis "github.com/upbound/provider-aws/apis/kinesis/v1beta2" v1beta1kinesisanalytics "github.com/upbound/provider-aws/apis/kinesisanalytics/v1beta1" + v1beta2kinesisanalytics "github.com/upbound/provider-aws/apis/kinesisanalytics/v1beta2" v1beta1kinesisanalyticsv2 "github.com/upbound/provider-aws/apis/kinesisanalyticsv2/v1beta1" + v1beta2kinesisanalyticsv2 "github.com/upbound/provider-aws/apis/kinesisanalyticsv2/v1beta2" v1beta1kinesisvideo "github.com/upbound/provider-aws/apis/kinesisvideo/v1beta1" v1beta1kms "github.com/upbound/provider-aws/apis/kms/v1beta1" v1beta1lakeformation "github.com/upbound/provider-aws/apis/lakeformation/v1beta1" + v1beta2lakeformation "github.com/upbound/provider-aws/apis/lakeformation/v1beta2" v1beta1lambda "github.com/upbound/provider-aws/apis/lambda/v1beta1" + v1beta2lambda "github.com/upbound/provider-aws/apis/lambda/v1beta2" v1beta1lexmodels "github.com/upbound/provider-aws/apis/lexmodels/v1beta1" + v1beta2lexmodels "github.com/upbound/provider-aws/apis/lexmodels/v1beta2" v1beta1licensemanager "github.com/upbound/provider-aws/apis/licensemanager/v1beta1" v1beta1lightsail "github.com/upbound/provider-aws/apis/lightsail/v1beta1" + v1beta2lightsail "github.com/upbound/provider-aws/apis/lightsail/v1beta2" v1beta1location "github.com/upbound/provider-aws/apis/location/v1beta1" + v1beta2location "github.com/upbound/provider-aws/apis/location/v1beta2" v1beta1macie2 "github.com/upbound/provider-aws/apis/macie2/v1beta1" + v1beta2macie2 "github.com/upbound/provider-aws/apis/macie2/v1beta2" v1beta1mediaconvert "github.com/upbound/provider-aws/apis/mediaconvert/v1beta1" + v1beta2mediaconvert "github.com/upbound/provider-aws/apis/mediaconvert/v1beta2" v1beta1medialive "github.com/upbound/provider-aws/apis/medialive/v1beta1" + v1beta2medialive "github.com/upbound/provider-aws/apis/medialive/v1beta2" v1beta1mediapackage "github.com/upbound/provider-aws/apis/mediapackage/v1beta1" v1beta1mediastore "github.com/upbound/provider-aws/apis/mediastore/v1beta1" v1beta1memorydb "github.com/upbound/provider-aws/apis/memorydb/v1beta1" + v1beta2memorydb "github.com/upbound/provider-aws/apis/memorydb/v1beta2" v1alpha1 "github.com/upbound/provider-aws/apis/mq/v1alpha1" v1beta1mq "github.com/upbound/provider-aws/apis/mq/v1beta1" + v1beta2mq "github.com/upbound/provider-aws/apis/mq/v1beta2" v1beta1neptune "github.com/upbound/provider-aws/apis/neptune/v1beta1" + v1beta2neptune "github.com/upbound/provider-aws/apis/neptune/v1beta2" v1beta1networkfirewall "github.com/upbound/provider-aws/apis/networkfirewall/v1beta1" + v1beta2networkfirewall "github.com/upbound/provider-aws/apis/networkfirewall/v1beta2" v1beta1networkmanager "github.com/upbound/provider-aws/apis/networkmanager/v1beta1" + v1beta2networkmanager "github.com/upbound/provider-aws/apis/networkmanager/v1beta2" v1beta1opensearch "github.com/upbound/provider-aws/apis/opensearch/v1beta1" + v1beta2opensearch "github.com/upbound/provider-aws/apis/opensearch/v1beta2" v1beta1opensearchserverless "github.com/upbound/provider-aws/apis/opensearchserverless/v1beta1" + v1beta2opensearchserverless "github.com/upbound/provider-aws/apis/opensearchserverless/v1beta2" v1beta1opsworks "github.com/upbound/provider-aws/apis/opsworks/v1beta1" + v1beta2opsworks "github.com/upbound/provider-aws/apis/opsworks/v1beta2" v1beta1organizations "github.com/upbound/provider-aws/apis/organizations/v1beta1" v1beta1pinpoint "github.com/upbound/provider-aws/apis/pinpoint/v1beta1" + v1beta2pinpoint "github.com/upbound/provider-aws/apis/pinpoint/v1beta2" v1beta1qldb "github.com/upbound/provider-aws/apis/qldb/v1beta1" + v1beta2qldb "github.com/upbound/provider-aws/apis/qldb/v1beta2" v1beta1quicksight "github.com/upbound/provider-aws/apis/quicksight/v1beta1" v1beta1ram "github.com/upbound/provider-aws/apis/ram/v1beta1" v1beta1rds "github.com/upbound/provider-aws/apis/rds/v1beta1" v1beta2rds "github.com/upbound/provider-aws/apis/rds/v1beta2" + v1beta3rds "github.com/upbound/provider-aws/apis/rds/v1beta3" v1beta1redshift "github.com/upbound/provider-aws/apis/redshift/v1beta1" + v1beta2redshift "github.com/upbound/provider-aws/apis/redshift/v1beta2" v1beta1redshiftserverless "github.com/upbound/provider-aws/apis/redshiftserverless/v1beta1" v1beta1resourcegroups "github.com/upbound/provider-aws/apis/resourcegroups/v1beta1" + v1beta2resourcegroups "github.com/upbound/provider-aws/apis/resourcegroups/v1beta2" v1beta1rolesanywhere "github.com/upbound/provider-aws/apis/rolesanywhere/v1beta1" v1beta1route53 "github.com/upbound/provider-aws/apis/route53/v1beta1" + v1beta2route53 "github.com/upbound/provider-aws/apis/route53/v1beta2" v1beta1route53recoverycontrolconfig "github.com/upbound/provider-aws/apis/route53recoverycontrolconfig/v1beta1" + v1beta2route53recoverycontrolconfig "github.com/upbound/provider-aws/apis/route53recoverycontrolconfig/v1beta2" v1beta1route53recoveryreadiness "github.com/upbound/provider-aws/apis/route53recoveryreadiness/v1beta1" + v1beta2route53recoveryreadiness "github.com/upbound/provider-aws/apis/route53recoveryreadiness/v1beta2" v1beta1route53resolver "github.com/upbound/provider-aws/apis/route53resolver/v1beta1" v1beta1rum "github.com/upbound/provider-aws/apis/rum/v1beta1" + v1beta2rum "github.com/upbound/provider-aws/apis/rum/v1beta2" v1beta1s3 "github.com/upbound/provider-aws/apis/s3/v1beta1" + v1beta2s3 "github.com/upbound/provider-aws/apis/s3/v1beta2" v1beta1s3control "github.com/upbound/provider-aws/apis/s3control/v1beta1" + v1beta2s3control "github.com/upbound/provider-aws/apis/s3control/v1beta2" v1beta1sagemaker "github.com/upbound/provider-aws/apis/sagemaker/v1beta1" + v1beta2sagemaker "github.com/upbound/provider-aws/apis/sagemaker/v1beta2" v1beta1scheduler "github.com/upbound/provider-aws/apis/scheduler/v1beta1" + v1beta2scheduler "github.com/upbound/provider-aws/apis/scheduler/v1beta2" v1beta1schemas "github.com/upbound/provider-aws/apis/schemas/v1beta1" v1beta1secretsmanager "github.com/upbound/provider-aws/apis/secretsmanager/v1beta1" + v1beta2secretsmanager "github.com/upbound/provider-aws/apis/secretsmanager/v1beta2" v1beta1securityhub "github.com/upbound/provider-aws/apis/securityhub/v1beta1" + v1beta2securityhub "github.com/upbound/provider-aws/apis/securityhub/v1beta2" v1beta1serverlessrepo "github.com/upbound/provider-aws/apis/serverlessrepo/v1beta1" v1beta1servicecatalog "github.com/upbound/provider-aws/apis/servicecatalog/v1beta1" + v1beta2servicecatalog "github.com/upbound/provider-aws/apis/servicecatalog/v1beta2" v1beta1servicediscovery "github.com/upbound/provider-aws/apis/servicediscovery/v1beta1" + v1beta2servicediscovery "github.com/upbound/provider-aws/apis/servicediscovery/v1beta2" v1beta1servicequotas "github.com/upbound/provider-aws/apis/servicequotas/v1beta1" v1beta1ses "github.com/upbound/provider-aws/apis/ses/v1beta1" + v1beta2ses "github.com/upbound/provider-aws/apis/ses/v1beta2" v1beta1sesv2 "github.com/upbound/provider-aws/apis/sesv2/v1beta1" + v1beta2sesv2 "github.com/upbound/provider-aws/apis/sesv2/v1beta2" v1beta1sfn "github.com/upbound/provider-aws/apis/sfn/v1beta1" + v1beta2sfn "github.com/upbound/provider-aws/apis/sfn/v1beta2" v1beta1signer "github.com/upbound/provider-aws/apis/signer/v1beta1" + v1beta2signer "github.com/upbound/provider-aws/apis/signer/v1beta2" v1beta1simpledb "github.com/upbound/provider-aws/apis/simpledb/v1beta1" v1beta1sns "github.com/upbound/provider-aws/apis/sns/v1beta1" v1beta1sqs "github.com/upbound/provider-aws/apis/sqs/v1beta1" v1beta1ssm "github.com/upbound/provider-aws/apis/ssm/v1beta1" + v1beta2ssm "github.com/upbound/provider-aws/apis/ssm/v1beta2" v1beta1ssoadmin "github.com/upbound/provider-aws/apis/ssoadmin/v1beta1" + v1beta2ssoadmin "github.com/upbound/provider-aws/apis/ssoadmin/v1beta2" v1beta1swf "github.com/upbound/provider-aws/apis/swf/v1beta1" v1beta1timestreamwrite "github.com/upbound/provider-aws/apis/timestreamwrite/v1beta1" + v1beta2timestreamwrite "github.com/upbound/provider-aws/apis/timestreamwrite/v1beta2" v1beta1transcribe "github.com/upbound/provider-aws/apis/transcribe/v1beta1" + v1beta2transcribe "github.com/upbound/provider-aws/apis/transcribe/v1beta2" v1beta1transfer "github.com/upbound/provider-aws/apis/transfer/v1beta1" + v1beta2transfer "github.com/upbound/provider-aws/apis/transfer/v1beta2" v1alpha1apis "github.com/upbound/provider-aws/apis/v1alpha1" v1beta1apis "github.com/upbound/provider-aws/apis/v1beta1" v1beta1vpc "github.com/upbound/provider-aws/apis/vpc/v1beta1" v1beta1waf "github.com/upbound/provider-aws/apis/waf/v1beta1" + v1beta2waf "github.com/upbound/provider-aws/apis/waf/v1beta2" v1beta1wafregional "github.com/upbound/provider-aws/apis/wafregional/v1beta1" + v1beta2wafregional "github.com/upbound/provider-aws/apis/wafregional/v1beta2" v1beta1wafv2 "github.com/upbound/provider-aws/apis/wafv2/v1beta1" v1beta1workspaces "github.com/upbound/provider-aws/apis/workspaces/v1beta1" + v1beta2workspaces "github.com/upbound/provider-aws/apis/workspaces/v1beta2" v1beta1xray "github.com/upbound/provider-aws/apis/xray/v1beta1" + v1beta2xray "github.com/upbound/provider-aws/apis/xray/v1beta2" ) func init() { // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back AddToSchemes = append(AddToSchemes, v1beta1.SchemeBuilder.AddToScheme, + v1beta2.SchemeBuilder.AddToScheme, v1beta1account.SchemeBuilder.AddToScheme, v1beta1acm.SchemeBuilder.AddToScheme, + v1beta2acm.SchemeBuilder.AddToScheme, v1beta1acmpca.SchemeBuilder.AddToScheme, + v1beta2acmpca.SchemeBuilder.AddToScheme, v1beta1amp.SchemeBuilder.AddToScheme, + v1beta2amp.SchemeBuilder.AddToScheme, v1beta1amplify.SchemeBuilder.AddToScheme, + v1beta2amplify.SchemeBuilder.AddToScheme, v1beta1apigateway.SchemeBuilder.AddToScheme, + v1beta2apigateway.SchemeBuilder.AddToScheme, v1beta1apigatewayv2.SchemeBuilder.AddToScheme, + v1beta2apigatewayv2.SchemeBuilder.AddToScheme, v1beta1appautoscaling.SchemeBuilder.AddToScheme, + v1beta2appautoscaling.SchemeBuilder.AddToScheme, v1beta1appconfig.SchemeBuilder.AddToScheme, v1beta1appflow.SchemeBuilder.AddToScheme, + v1beta2appflow.SchemeBuilder.AddToScheme, v1beta1appintegrations.SchemeBuilder.AddToScheme, + v1beta2appintegrations.SchemeBuilder.AddToScheme, v1beta1applicationinsights.SchemeBuilder.AddToScheme, v1beta1appmesh.SchemeBuilder.AddToScheme, + v1beta2appmesh.SchemeBuilder.AddToScheme, v1beta1apprunner.SchemeBuilder.AddToScheme, + v1beta2apprunner.SchemeBuilder.AddToScheme, v1beta1appstream.SchemeBuilder.AddToScheme, + v1beta2appstream.SchemeBuilder.AddToScheme, v1beta1appsync.SchemeBuilder.AddToScheme, + v1beta2appsync.SchemeBuilder.AddToScheme, v1beta1athena.SchemeBuilder.AddToScheme, + v1beta2athena.SchemeBuilder.AddToScheme, v1beta1autoscaling.SchemeBuilder.AddToScheme, - v1beta2.SchemeBuilder.AddToScheme, + v1beta2autoscaling.SchemeBuilder.AddToScheme, + v1beta3.SchemeBuilder.AddToScheme, v1beta1autoscalingplans.SchemeBuilder.AddToScheme, + v1beta2autoscalingplans.SchemeBuilder.AddToScheme, v1beta1backup.SchemeBuilder.AddToScheme, + v1beta2backup.SchemeBuilder.AddToScheme, v1beta1batch.SchemeBuilder.AddToScheme, + v1beta2batch.SchemeBuilder.AddToScheme, v1beta1budgets.SchemeBuilder.AddToScheme, + v1beta2budgets.SchemeBuilder.AddToScheme, v1beta1ce.SchemeBuilder.AddToScheme, v1beta1chime.SchemeBuilder.AddToScheme, + v1beta2chime.SchemeBuilder.AddToScheme, v1beta1cloud9.SchemeBuilder.AddToScheme, v1beta1cloudcontrol.SchemeBuilder.AddToScheme, v1beta1cloudformation.SchemeBuilder.AddToScheme, + v1beta2cloudformation.SchemeBuilder.AddToScheme, v1beta1cloudfront.SchemeBuilder.AddToScheme, + v1beta2cloudfront.SchemeBuilder.AddToScheme, v1beta1cloudsearch.SchemeBuilder.AddToScheme, + v1beta2cloudsearch.SchemeBuilder.AddToScheme, v1beta1cloudtrail.SchemeBuilder.AddToScheme, v1beta1cloudwatch.SchemeBuilder.AddToScheme, + v1beta2cloudwatch.SchemeBuilder.AddToScheme, v1beta1cloudwatchevents.SchemeBuilder.AddToScheme, + v1beta2cloudwatchevents.SchemeBuilder.AddToScheme, v1beta1cloudwatchlogs.SchemeBuilder.AddToScheme, + v1beta2cloudwatchlogs.SchemeBuilder.AddToScheme, v1beta1codecommit.SchemeBuilder.AddToScheme, v1beta1codeguruprofiler.SchemeBuilder.AddToScheme, v1beta1codepipeline.SchemeBuilder.AddToScheme, + v1beta2codepipeline.SchemeBuilder.AddToScheme, v1beta1codestarconnections.SchemeBuilder.AddToScheme, + v1beta2codestarconnections.SchemeBuilder.AddToScheme, v1beta1codestarnotifications.SchemeBuilder.AddToScheme, v1beta1cognitoidentity.SchemeBuilder.AddToScheme, v1beta1cognitoidp.SchemeBuilder.AddToScheme, + v1beta2cognitoidp.SchemeBuilder.AddToScheme, v1beta1configservice.SchemeBuilder.AddToScheme, + v1beta2configservice.SchemeBuilder.AddToScheme, v1beta1connect.SchemeBuilder.AddToScheme, v1beta2connect.SchemeBuilder.AddToScheme, + v1beta3connect.SchemeBuilder.AddToScheme, v1beta1cur.SchemeBuilder.AddToScheme, v1beta1dataexchange.SchemeBuilder.AddToScheme, v1beta1datapipeline.SchemeBuilder.AddToScheme, v1beta1datasync.SchemeBuilder.AddToScheme, + v1beta2datasync.SchemeBuilder.AddToScheme, v1beta1dax.SchemeBuilder.AddToScheme, + v1beta2dax.SchemeBuilder.AddToScheme, v1beta1deploy.SchemeBuilder.AddToScheme, + v1beta2deploy.SchemeBuilder.AddToScheme, v1beta1detective.SchemeBuilder.AddToScheme, v1beta1devicefarm.SchemeBuilder.AddToScheme, + v1beta2devicefarm.SchemeBuilder.AddToScheme, v1beta1directconnect.SchemeBuilder.AddToScheme, v1beta1dlm.SchemeBuilder.AddToScheme, + v1beta2dlm.SchemeBuilder.AddToScheme, v1beta1dms.SchemeBuilder.AddToScheme, + v1beta2dms.SchemeBuilder.AddToScheme, v1beta1docdb.SchemeBuilder.AddToScheme, v1beta1ds.SchemeBuilder.AddToScheme, + v1beta2ds.SchemeBuilder.AddToScheme, v1beta1dynamodb.SchemeBuilder.AddToScheme, + v1beta2dynamodb.SchemeBuilder.AddToScheme, v1beta1ec2.SchemeBuilder.AddToScheme, v1beta2ec2.SchemeBuilder.AddToScheme, v1beta1ecr.SchemeBuilder.AddToScheme, + v1beta2ecr.SchemeBuilder.AddToScheme, v1beta1ecrpublic.SchemeBuilder.AddToScheme, + v1beta2ecrpublic.SchemeBuilder.AddToScheme, v1beta1ecs.SchemeBuilder.AddToScheme, + v1beta2ecs.SchemeBuilder.AddToScheme, v1beta1efs.SchemeBuilder.AddToScheme, + v1beta2efs.SchemeBuilder.AddToScheme, v1beta1eks.SchemeBuilder.AddToScheme, + v1beta2eks.SchemeBuilder.AddToScheme, v1beta1elasticache.SchemeBuilder.AddToScheme, v1beta2elasticache.SchemeBuilder.AddToScheme, v1beta1elasticbeanstalk.SchemeBuilder.AddToScheme, + v1beta2elasticbeanstalk.SchemeBuilder.AddToScheme, v1beta1elasticsearch.SchemeBuilder.AddToScheme, + v1beta2elasticsearch.SchemeBuilder.AddToScheme, v1beta1elastictranscoder.SchemeBuilder.AddToScheme, + v1beta2elastictranscoder.SchemeBuilder.AddToScheme, v1beta1elb.SchemeBuilder.AddToScheme, + v1beta2elb.SchemeBuilder.AddToScheme, v1beta1elbv2.SchemeBuilder.AddToScheme, + v1beta2elbv2.SchemeBuilder.AddToScheme, v1beta1emr.SchemeBuilder.AddToScheme, v1beta1emrserverless.SchemeBuilder.AddToScheme, + v1beta2emrserverless.SchemeBuilder.AddToScheme, v1beta1evidently.SchemeBuilder.AddToScheme, + v1beta2evidently.SchemeBuilder.AddToScheme, v1beta1firehose.SchemeBuilder.AddToScheme, + v1beta2firehose.SchemeBuilder.AddToScheme, v1beta1fis.SchemeBuilder.AddToScheme, + v1beta2fis.SchemeBuilder.AddToScheme, v1beta1fsx.SchemeBuilder.AddToScheme, + v1beta2fsx.SchemeBuilder.AddToScheme, v1beta1gamelift.SchemeBuilder.AddToScheme, + v1beta2gamelift.SchemeBuilder.AddToScheme, v1beta1glacier.SchemeBuilder.AddToScheme, + v1beta2glacier.SchemeBuilder.AddToScheme, v1beta1globalaccelerator.SchemeBuilder.AddToScheme, + v1beta2globalaccelerator.SchemeBuilder.AddToScheme, v1beta1glue.SchemeBuilder.AddToScheme, + v1beta2glue.SchemeBuilder.AddToScheme, v1beta1grafana.SchemeBuilder.AddToScheme, + v1beta2grafana.SchemeBuilder.AddToScheme, v1beta1guardduty.SchemeBuilder.AddToScheme, + v1beta2guardduty.SchemeBuilder.AddToScheme, v1beta1iam.SchemeBuilder.AddToScheme, v1beta1identitystore.SchemeBuilder.AddToScheme, + v1beta2identitystore.SchemeBuilder.AddToScheme, v1beta1imagebuilder.SchemeBuilder.AddToScheme, + v1beta2imagebuilder.SchemeBuilder.AddToScheme, v1beta1inspector.SchemeBuilder.AddToScheme, v1beta1inspector2.SchemeBuilder.AddToScheme, v1beta1iot.SchemeBuilder.AddToScheme, + v1beta2iot.SchemeBuilder.AddToScheme, v1beta1ivs.SchemeBuilder.AddToScheme, + v1beta2ivs.SchemeBuilder.AddToScheme, v1beta1kafka.SchemeBuilder.AddToScheme, v1beta2kafka.SchemeBuilder.AddToScheme, + v1beta3kafka.SchemeBuilder.AddToScheme, v1beta1kafkaconnect.SchemeBuilder.AddToScheme, + v1beta2kafkaconnect.SchemeBuilder.AddToScheme, v1beta1kendra.SchemeBuilder.AddToScheme, + v1beta2kendra.SchemeBuilder.AddToScheme, v1beta1keyspaces.SchemeBuilder.AddToScheme, + v1beta2keyspaces.SchemeBuilder.AddToScheme, v1beta1kinesis.SchemeBuilder.AddToScheme, + v1beta2kinesis.SchemeBuilder.AddToScheme, v1beta1kinesisanalytics.SchemeBuilder.AddToScheme, + v1beta2kinesisanalytics.SchemeBuilder.AddToScheme, v1beta1kinesisanalyticsv2.SchemeBuilder.AddToScheme, + v1beta2kinesisanalyticsv2.SchemeBuilder.AddToScheme, v1beta1kinesisvideo.SchemeBuilder.AddToScheme, v1beta1kms.SchemeBuilder.AddToScheme, v1beta1lakeformation.SchemeBuilder.AddToScheme, + v1beta2lakeformation.SchemeBuilder.AddToScheme, v1beta1lambda.SchemeBuilder.AddToScheme, + v1beta2lambda.SchemeBuilder.AddToScheme, v1beta1lexmodels.SchemeBuilder.AddToScheme, + v1beta2lexmodels.SchemeBuilder.AddToScheme, v1beta1licensemanager.SchemeBuilder.AddToScheme, v1beta1lightsail.SchemeBuilder.AddToScheme, + v1beta2lightsail.SchemeBuilder.AddToScheme, v1beta1location.SchemeBuilder.AddToScheme, + v1beta2location.SchemeBuilder.AddToScheme, v1beta1macie2.SchemeBuilder.AddToScheme, + v1beta2macie2.SchemeBuilder.AddToScheme, v1beta1mediaconvert.SchemeBuilder.AddToScheme, + v1beta2mediaconvert.SchemeBuilder.AddToScheme, v1beta1medialive.SchemeBuilder.AddToScheme, + v1beta2medialive.SchemeBuilder.AddToScheme, v1beta1mediapackage.SchemeBuilder.AddToScheme, v1beta1mediastore.SchemeBuilder.AddToScheme, v1beta1memorydb.SchemeBuilder.AddToScheme, + v1beta2memorydb.SchemeBuilder.AddToScheme, v1alpha1.SchemeBuilder.AddToScheme, v1beta1mq.SchemeBuilder.AddToScheme, + v1beta2mq.SchemeBuilder.AddToScheme, v1beta1neptune.SchemeBuilder.AddToScheme, + v1beta2neptune.SchemeBuilder.AddToScheme, v1beta1networkfirewall.SchemeBuilder.AddToScheme, + v1beta2networkfirewall.SchemeBuilder.AddToScheme, v1beta1networkmanager.SchemeBuilder.AddToScheme, + v1beta2networkmanager.SchemeBuilder.AddToScheme, v1beta1opensearch.SchemeBuilder.AddToScheme, + v1beta2opensearch.SchemeBuilder.AddToScheme, v1beta1opensearchserverless.SchemeBuilder.AddToScheme, + v1beta2opensearchserverless.SchemeBuilder.AddToScheme, v1beta1opsworks.SchemeBuilder.AddToScheme, + v1beta2opsworks.SchemeBuilder.AddToScheme, v1beta1organizations.SchemeBuilder.AddToScheme, v1beta1pinpoint.SchemeBuilder.AddToScheme, + v1beta2pinpoint.SchemeBuilder.AddToScheme, v1beta1qldb.SchemeBuilder.AddToScheme, + v1beta2qldb.SchemeBuilder.AddToScheme, v1beta1quicksight.SchemeBuilder.AddToScheme, v1beta1ram.SchemeBuilder.AddToScheme, v1beta1rds.SchemeBuilder.AddToScheme, v1beta2rds.SchemeBuilder.AddToScheme, + v1beta3rds.SchemeBuilder.AddToScheme, v1beta1redshift.SchemeBuilder.AddToScheme, + v1beta2redshift.SchemeBuilder.AddToScheme, v1beta1redshiftserverless.SchemeBuilder.AddToScheme, v1beta1resourcegroups.SchemeBuilder.AddToScheme, + v1beta2resourcegroups.SchemeBuilder.AddToScheme, v1beta1rolesanywhere.SchemeBuilder.AddToScheme, v1beta1route53.SchemeBuilder.AddToScheme, + v1beta2route53.SchemeBuilder.AddToScheme, v1beta1route53recoverycontrolconfig.SchemeBuilder.AddToScheme, + v1beta2route53recoverycontrolconfig.SchemeBuilder.AddToScheme, v1beta1route53recoveryreadiness.SchemeBuilder.AddToScheme, + v1beta2route53recoveryreadiness.SchemeBuilder.AddToScheme, v1beta1route53resolver.SchemeBuilder.AddToScheme, v1beta1rum.SchemeBuilder.AddToScheme, + v1beta2rum.SchemeBuilder.AddToScheme, v1beta1s3.SchemeBuilder.AddToScheme, + v1beta2s3.SchemeBuilder.AddToScheme, v1beta1s3control.SchemeBuilder.AddToScheme, + v1beta2s3control.SchemeBuilder.AddToScheme, v1beta1sagemaker.SchemeBuilder.AddToScheme, + v1beta2sagemaker.SchemeBuilder.AddToScheme, v1beta1scheduler.SchemeBuilder.AddToScheme, + v1beta2scheduler.SchemeBuilder.AddToScheme, v1beta1schemas.SchemeBuilder.AddToScheme, v1beta1secretsmanager.SchemeBuilder.AddToScheme, + v1beta2secretsmanager.SchemeBuilder.AddToScheme, v1beta1securityhub.SchemeBuilder.AddToScheme, + v1beta2securityhub.SchemeBuilder.AddToScheme, v1beta1serverlessrepo.SchemeBuilder.AddToScheme, v1beta1servicecatalog.SchemeBuilder.AddToScheme, + v1beta2servicecatalog.SchemeBuilder.AddToScheme, v1beta1servicediscovery.SchemeBuilder.AddToScheme, + v1beta2servicediscovery.SchemeBuilder.AddToScheme, v1beta1servicequotas.SchemeBuilder.AddToScheme, v1beta1ses.SchemeBuilder.AddToScheme, + v1beta2ses.SchemeBuilder.AddToScheme, v1beta1sesv2.SchemeBuilder.AddToScheme, + v1beta2sesv2.SchemeBuilder.AddToScheme, v1beta1sfn.SchemeBuilder.AddToScheme, + v1beta2sfn.SchemeBuilder.AddToScheme, v1beta1signer.SchemeBuilder.AddToScheme, + v1beta2signer.SchemeBuilder.AddToScheme, v1beta1simpledb.SchemeBuilder.AddToScheme, v1beta1sns.SchemeBuilder.AddToScheme, v1beta1sqs.SchemeBuilder.AddToScheme, v1beta1ssm.SchemeBuilder.AddToScheme, + v1beta2ssm.SchemeBuilder.AddToScheme, v1beta1ssoadmin.SchemeBuilder.AddToScheme, + v1beta2ssoadmin.SchemeBuilder.AddToScheme, v1beta1swf.SchemeBuilder.AddToScheme, v1beta1timestreamwrite.SchemeBuilder.AddToScheme, + v1beta2timestreamwrite.SchemeBuilder.AddToScheme, v1beta1transcribe.SchemeBuilder.AddToScheme, + v1beta2transcribe.SchemeBuilder.AddToScheme, v1beta1transfer.SchemeBuilder.AddToScheme, + v1beta2transfer.SchemeBuilder.AddToScheme, v1alpha1apis.SchemeBuilder.AddToScheme, v1beta1apis.SchemeBuilder.AddToScheme, v1beta1vpc.SchemeBuilder.AddToScheme, v1beta1waf.SchemeBuilder.AddToScheme, + v1beta2waf.SchemeBuilder.AddToScheme, v1beta1wafregional.SchemeBuilder.AddToScheme, + v1beta2wafregional.SchemeBuilder.AddToScheme, v1beta1wafv2.SchemeBuilder.AddToScheme, v1beta1workspaces.SchemeBuilder.AddToScheme, + v1beta2workspaces.SchemeBuilder.AddToScheme, v1beta1xray.SchemeBuilder.AddToScheme, + v1beta2xray.SchemeBuilder.AddToScheme, ) } diff --git a/examples-generated/accessanalyzer/v1beta2/analyzer.yaml b/examples-generated/accessanalyzer/v1beta2/analyzer.yaml new file mode 100644 index 0000000000..df59ecf686 --- /dev/null +++ b/examples-generated/accessanalyzer/v1beta2/analyzer.yaml @@ -0,0 +1,11 @@ +apiVersion: accessanalyzer.aws.upbound.io/v1beta2 +kind: Analyzer +metadata: + annotations: + meta.upbound.io/example-id: accessanalyzer/v1beta2/analyzer + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/acm/v1beta1/certificatevalidation.yaml b/examples-generated/acm/v1beta1/certificatevalidation.yaml index ac5715b26f..41a1548738 100644 --- a/examples-generated/acm/v1beta1/certificatevalidation.yaml +++ b/examples-generated/acm/v1beta1/certificatevalidation.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: acm.aws.upbound.io/v1beta1 +apiVersion: acm.aws.upbound.io/v1beta2 kind: Certificate metadata: annotations: @@ -32,7 +32,7 @@ spec: --- -apiVersion: elbv2.aws.upbound.io/v1beta1 +apiVersion: elbv2.aws.upbound.io/v1beta2 kind: LBListener metadata: annotations: @@ -47,7 +47,7 @@ spec: --- -apiVersion: route53.aws.upbound.io/v1beta1 +apiVersion: route53.aws.upbound.io/v1beta2 kind: Record metadata: annotations: diff --git a/examples-generated/acm/v1beta2/certificate.yaml b/examples-generated/acm/v1beta2/certificate.yaml new file mode 100644 index 0000000000..96ac112326 --- /dev/null +++ b/examples-generated/acm/v1beta2/certificate.yaml @@ -0,0 +1,15 @@ +apiVersion: acm.aws.upbound.io/v1beta2 +kind: Certificate +metadata: + annotations: + meta.upbound.io/example-id: acm/v1beta2/certificate + labels: + testing.upbound.io/example-name: cert + name: cert +spec: + forProvider: + domainName: example.com + region: us-west-1 + tags: + Environment: test + validationMethod: DNS diff --git a/examples-generated/acmpca/v1beta1/certificateauthoritycertificate.yaml b/examples-generated/acmpca/v1beta1/certificateauthoritycertificate.yaml index bc3cdf222b..5f8e1cd4a8 100644 --- a/examples-generated/acmpca/v1beta1/certificateauthoritycertificate.yaml +++ b/examples-generated/acmpca/v1beta1/certificateauthoritycertificate.yaml @@ -23,7 +23,7 @@ spec: --- -apiVersion: acmpca.aws.upbound.io/v1beta1 +apiVersion: acmpca.aws.upbound.io/v1beta2 kind: Certificate metadata: annotations: @@ -49,7 +49,7 @@ spec: --- -apiVersion: acmpca.aws.upbound.io/v1beta1 +apiVersion: acmpca.aws.upbound.io/v1beta2 kind: CertificateAuthority metadata: annotations: diff --git a/examples-generated/acmpca/v1beta1/permission.yaml b/examples-generated/acmpca/v1beta1/permission.yaml index 60f4637340..b0c9af26b2 100644 --- a/examples-generated/acmpca/v1beta1/permission.yaml +++ b/examples-generated/acmpca/v1beta1/permission.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: acmpca.aws.upbound.io/v1beta1 +apiVersion: acmpca.aws.upbound.io/v1beta2 kind: CertificateAuthority metadata: annotations: diff --git a/examples-generated/acmpca/v1beta2/certificate.yaml b/examples-generated/acmpca/v1beta2/certificate.yaml new file mode 100644 index 0000000000..53059614c4 --- /dev/null +++ b/examples-generated/acmpca/v1beta2/certificate.yaml @@ -0,0 +1,42 @@ +apiVersion: acmpca.aws.upbound.io/v1beta2 +kind: Certificate +metadata: + annotations: + meta.upbound.io/example-id: acmpca/v1beta2/certificate + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + certificateAuthorityArnSelector: + matchLabels: + testing.upbound.io/example-name: example + certificateSigningRequestSecretRef: + key: attribute.cert_request_pem + name: example-cert-request + namespace: upbound-system + region: us-west-1 + signingAlgorithm: SHA256WITHRSA + validity: + - type: YEARS + value: 1 + +--- + +apiVersion: acmpca.aws.upbound.io/v1beta2 +kind: CertificateAuthority +metadata: + annotations: + meta.upbound.io/example-id: acmpca/v1beta2/certificate + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + certificateAuthorityConfiguration: + - keyAlgorithm: RSA_4096 + signingAlgorithm: SHA512WITHRSA + subject: + - commonName: example.com + permanentDeletionTimeInDays: 7 + region: us-west-1 diff --git a/examples-generated/acmpca/v1beta2/certificateauthority.yaml b/examples-generated/acmpca/v1beta2/certificateauthority.yaml new file mode 100644 index 0000000000..92f31f1b7c --- /dev/null +++ b/examples-generated/acmpca/v1beta2/certificateauthority.yaml @@ -0,0 +1,17 @@ +apiVersion: acmpca.aws.upbound.io/v1beta2 +kind: CertificateAuthority +metadata: + annotations: + meta.upbound.io/example-id: acmpca/v1beta2/certificateauthority + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + certificateAuthorityConfiguration: + - keyAlgorithm: RSA_4096 + signingAlgorithm: SHA512WITHRSA + subject: + - commonName: example.com + permanentDeletionTimeInDays: 7 + region: us-west-1 diff --git a/examples-generated/amp/v1beta1/alertmanagerdefinition.yaml b/examples-generated/amp/v1beta1/alertmanagerdefinition.yaml index 1dc4b19f77..f611a26cfc 100644 --- a/examples-generated/amp/v1beta1/alertmanagerdefinition.yaml +++ b/examples-generated/amp/v1beta1/alertmanagerdefinition.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: amp.aws.upbound.io/v1beta1 +apiVersion: amp.aws.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/amp/v1beta1/rulegroupnamespace.yaml b/examples-generated/amp/v1beta1/rulegroupnamespace.yaml index e9c219a33f..3e1c38753f 100644 --- a/examples-generated/amp/v1beta1/rulegroupnamespace.yaml +++ b/examples-generated/amp/v1beta1/rulegroupnamespace.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: amp.aws.upbound.io/v1beta1 +apiVersion: amp.aws.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/amp/v1beta2/workspace.yaml b/examples-generated/amp/v1beta2/workspace.yaml new file mode 100644 index 0000000000..47920d5717 --- /dev/null +++ b/examples-generated/amp/v1beta2/workspace.yaml @@ -0,0 +1,14 @@ +apiVersion: amp.aws.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: amp/v1beta2/workspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + alias: example + region: us-west-1 + tags: + Environment: production diff --git a/examples-generated/amplify/v1beta1/backendenvironment.yaml b/examples-generated/amplify/v1beta1/backendenvironment.yaml index c9baf23c32..83080ef62a 100644 --- a/examples-generated/amplify/v1beta1/backendenvironment.yaml +++ b/examples-generated/amplify/v1beta1/backendenvironment.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: amplify.aws.upbound.io/v1beta1 +apiVersion: amplify.aws.upbound.io/v1beta2 kind: App metadata: annotations: diff --git a/examples-generated/amplify/v1beta1/branch.yaml b/examples-generated/amplify/v1beta1/branch.yaml index 3a2b01f677..97cb4acdb9 100644 --- a/examples-generated/amplify/v1beta1/branch.yaml +++ b/examples-generated/amplify/v1beta1/branch.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: amplify.aws.upbound.io/v1beta1 +apiVersion: amplify.aws.upbound.io/v1beta2 kind: App metadata: annotations: diff --git a/examples-generated/amplify/v1beta1/webhook.yaml b/examples-generated/amplify/v1beta1/webhook.yaml index 0c3d95af3f..89fc7f7814 100644 --- a/examples-generated/amplify/v1beta1/webhook.yaml +++ b/examples-generated/amplify/v1beta1/webhook.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: amplify.aws.upbound.io/v1beta1 +apiVersion: amplify.aws.upbound.io/v1beta2 kind: App metadata: annotations: diff --git a/examples-generated/amplify/v1beta2/app.yaml b/examples-generated/amplify/v1beta2/app.yaml new file mode 100644 index 0000000000..b23c5885d1 --- /dev/null +++ b/examples-generated/amplify/v1beta2/app.yaml @@ -0,0 +1,36 @@ +apiVersion: amplify.aws.upbound.io/v1beta2 +kind: App +metadata: + annotations: + meta.upbound.io/example-id: amplify/v1beta2/app + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + buildSpec: | + version: 0.1 + frontend: + phases: + preBuild: + commands: + - yarn install + build: + commands: + - yarn run build + artifacts: + baseDirectory: build + files: + - '**/*' + cache: + paths: + - node_modules/**/* + customRule: + - source: /<*> + status: "404" + target: /index.html + environmentVariables: + ENV: test + name: example + region: us-west-1 + repository: https://github.com/example/app diff --git a/examples-generated/apigateway/v1beta1/authorizer.yaml b/examples-generated/apigateway/v1beta1/authorizer.yaml index e696800725..b384bc9ca0 100644 --- a/examples-generated/apigateway/v1beta1/authorizer.yaml +++ b/examples-generated/apigateway/v1beta1/authorizer.yaml @@ -22,7 +22,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: RestAPI metadata: annotations: @@ -83,7 +83,7 @@ spec: --- -apiVersion: lambda.aws.upbound.io/v1beta1 +apiVersion: lambda.aws.upbound.io/v1beta2 kind: Function metadata: annotations: diff --git a/examples-generated/apigateway/v1beta1/basepathmapping.yaml b/examples-generated/apigateway/v1beta1/basepathmapping.yaml index d61adddf92..578dcb7ca0 100644 --- a/examples-generated/apigateway/v1beta1/basepathmapping.yaml +++ b/examples-generated/apigateway/v1beta1/basepathmapping.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: DomainName metadata: annotations: @@ -43,7 +43,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: Stage metadata: annotations: diff --git a/examples-generated/apigateway/v1beta1/deployment.yaml b/examples-generated/apigateway/v1beta1/deployment.yaml index c1d6520e2b..79c6ab2fc9 100644 --- a/examples-generated/apigateway/v1beta1/deployment.yaml +++ b/examples-generated/apigateway/v1beta1/deployment.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: RestAPI metadata: annotations: @@ -52,7 +52,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: Stage metadata: annotations: diff --git a/examples-generated/apigateway/v1beta1/documentationversion.yaml b/examples-generated/apigateway/v1beta1/documentationversion.yaml index a729c47a38..b6bdb01b3e 100644 --- a/examples-generated/apigateway/v1beta1/documentationversion.yaml +++ b/examples-generated/apigateway/v1beta1/documentationversion.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: DocumentationPart metadata: annotations: @@ -37,7 +37,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: RestAPI metadata: annotations: diff --git a/examples-generated/apigateway/v1beta1/gatewayresponse.yaml b/examples-generated/apigateway/v1beta1/gatewayresponse.yaml index 26161c4a0e..f7a920d2cc 100644 --- a/examples-generated/apigateway/v1beta1/gatewayresponse.yaml +++ b/examples-generated/apigateway/v1beta1/gatewayresponse.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: RestAPI metadata: annotations: diff --git a/examples-generated/apigateway/v1beta1/integrationresponse.yaml b/examples-generated/apigateway/v1beta1/integrationresponse.yaml index 1567c812ef..60ef165576 100644 --- a/examples-generated/apigateway/v1beta1/integrationresponse.yaml +++ b/examples-generated/apigateway/v1beta1/integrationresponse.yaml @@ -31,7 +31,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: Integration metadata: annotations: @@ -122,7 +122,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: RestAPI metadata: annotations: diff --git a/examples-generated/apigateway/v1beta1/method.yaml b/examples-generated/apigateway/v1beta1/method.yaml index d800c14aa8..7ea54b29f8 100644 --- a/examples-generated/apigateway/v1beta1/method.yaml +++ b/examples-generated/apigateway/v1beta1/method.yaml @@ -41,7 +41,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: RestAPI metadata: annotations: diff --git a/examples-generated/apigateway/v1beta1/methodresponse.yaml b/examples-generated/apigateway/v1beta1/methodresponse.yaml index b876186b66..d0c39dd45c 100644 --- a/examples-generated/apigateway/v1beta1/methodresponse.yaml +++ b/examples-generated/apigateway/v1beta1/methodresponse.yaml @@ -22,7 +22,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: Integration metadata: annotations: @@ -89,7 +89,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: RestAPI metadata: annotations: diff --git a/examples-generated/apigateway/v1beta1/model.yaml b/examples-generated/apigateway/v1beta1/model.yaml index 80102c2e3d..9fa7be830a 100644 --- a/examples-generated/apigateway/v1beta1/model.yaml +++ b/examples-generated/apigateway/v1beta1/model.yaml @@ -22,7 +22,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: RestAPI metadata: annotations: diff --git a/examples-generated/apigateway/v1beta1/resource.yaml b/examples-generated/apigateway/v1beta1/resource.yaml index f965caaa37..3809a2d6c5 100644 --- a/examples-generated/apigateway/v1beta1/resource.yaml +++ b/examples-generated/apigateway/v1beta1/resource.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: RestAPI metadata: annotations: diff --git a/examples-generated/apigateway/v1beta1/restapipolicy.yaml b/examples-generated/apigateway/v1beta1/restapipolicy.yaml index d511442755..b5f6303070 100644 --- a/examples-generated/apigateway/v1beta1/restapipolicy.yaml +++ b/examples-generated/apigateway/v1beta1/restapipolicy.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: RestAPI metadata: annotations: diff --git a/examples-generated/apigateway/v1beta1/usageplankey.yaml b/examples-generated/apigateway/v1beta1/usageplankey.yaml index ecc9bc89e9..700f2fc3ce 100644 --- a/examples-generated/apigateway/v1beta1/usageplankey.yaml +++ b/examples-generated/apigateway/v1beta1/usageplankey.yaml @@ -34,7 +34,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: RestAPI metadata: annotations: @@ -49,7 +49,7 @@ spec: --- -apiVersion: apigateway.aws.upbound.io/v1beta1 +apiVersion: apigateway.aws.upbound.io/v1beta2 kind: UsagePlan metadata: annotations: diff --git a/examples-generated/apigateway/v1beta1/vpclink.yaml b/examples-generated/apigateway/v1beta1/vpclink.yaml index d2af8fbf01..6eaed56134 100644 --- a/examples-generated/apigateway/v1beta1/vpclink.yaml +++ b/examples-generated/apigateway/v1beta1/vpclink.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: elbv2.aws.upbound.io/v1beta1 +apiVersion: elbv2.aws.upbound.io/v1beta2 kind: LB metadata: annotations: diff --git a/examples-generated/apigateway/v1beta2/documentationpart.yaml b/examples-generated/apigateway/v1beta2/documentationpart.yaml new file mode 100644 index 0000000000..027fe5b44b --- /dev/null +++ b/examples-generated/apigateway/v1beta2/documentationpart.yaml @@ -0,0 +1,34 @@ +apiVersion: apigateway.aws.upbound.io/v1beta2 +kind: DocumentationPart +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/documentationpart + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: + - method: GET + path: /example + type: METHOD + properties: '{"description":"Example description"}' + region: us-west-1 + restApiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: apigateway.aws.upbound.io/v1beta2 +kind: RestAPI +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/documentationpart + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: example_api + region: us-west-1 diff --git a/examples-generated/apigateway/v1beta2/domainname.yaml b/examples-generated/apigateway/v1beta2/domainname.yaml new file mode 100644 index 0000000000..92776a0749 --- /dev/null +++ b/examples-generated/apigateway/v1beta2/domainname.yaml @@ -0,0 +1,38 @@ +apiVersion: apigateway.aws.upbound.io/v1beta2 +kind: DomainName +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/domainname + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + certificateArnSelector: + matchLabels: + testing.upbound.io/example-name: example + domainName: api.example.com + region: us-west-1 + +--- + +apiVersion: route53.aws.upbound.io/v1beta2 +kind: Record +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/domainname + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + alias: + - evaluateTargetHealth: true + name: ${aws_api_gateway_domain_name.example.cloudfront_domain_name} + zoneId: ${aws_api_gateway_domain_name.example.cloudfront_zone_id} + name: api.example.com + region: us-west-1 + type: A + zoneIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/apigateway/v1beta2/integration.yaml b/examples-generated/apigateway/v1beta2/integration.yaml new file mode 100644 index 0000000000..adaf7088b6 --- /dev/null +++ b/examples-generated/apigateway/v1beta2/integration.yaml @@ -0,0 +1,91 @@ +apiVersion: apigateway.aws.upbound.io/v1beta2 +kind: Integration +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/integration + labels: + testing.upbound.io/example-name: MyDemoIntegration + name: mydemointegration +spec: + forProvider: + cacheKeyParameters: + - method.request.path.param + cacheNamespace: foobar + httpMethodSelector: + matchLabels: + testing.upbound.io/example-name: MyDemoMethod + region: us-west-1 + requestParameters: + integration.request.header.X-Authorization: '''static''' + requestTemplates: + application/xml: | + { + "body" : $input.json('$') + } + resourceIdSelector: + matchLabels: + testing.upbound.io/example-name: MyDemoResource + restApiIdSelector: + matchLabels: + testing.upbound.io/example-name: MyDemoAPI + timeoutMilliseconds: 29000 + type: MOCK + +--- + +apiVersion: apigateway.aws.upbound.io/v1beta1 +kind: Method +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/integration + labels: + testing.upbound.io/example-name: MyDemoMethod + name: mydemomethod +spec: + forProvider: + authorization: NONE + httpMethod: GET + region: us-west-1 + resourceIdSelector: + matchLabels: + testing.upbound.io/example-name: MyDemoResource + restApiIdSelector: + matchLabels: + testing.upbound.io/example-name: MyDemoAPI + +--- + +apiVersion: apigateway.aws.upbound.io/v1beta1 +kind: Resource +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/integration + labels: + testing.upbound.io/example-name: MyDemoResource + name: mydemoresource +spec: + forProvider: + parentIdSelector: + matchLabels: + testing.upbound.io/example-name: MyDemoAPI + pathPart: mydemoresource + region: us-west-1 + restApiIdSelector: + matchLabels: + testing.upbound.io/example-name: MyDemoAPI + +--- + +apiVersion: apigateway.aws.upbound.io/v1beta2 +kind: RestAPI +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/integration + labels: + testing.upbound.io/example-name: MyDemoAPI + name: mydemoapi +spec: + forProvider: + description: This is my API for demonstration purposes + name: MyDemoAPI + region: us-west-1 diff --git a/examples-generated/apigateway/v1beta2/methodsettings.yaml b/examples-generated/apigateway/v1beta2/methodsettings.yaml new file mode 100644 index 0000000000..8b81abf83f --- /dev/null +++ b/examples-generated/apigateway/v1beta2/methodsettings.yaml @@ -0,0 +1,96 @@ +apiVersion: apigateway.aws.upbound.io/v1beta2 +kind: MethodSettings +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/methodsettings + labels: + testing.upbound.io/example-name: all + name: all +spec: + forProvider: + methodPath: '*/*' + region: us-west-1 + restApiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + settings: + - loggingLevel: ERROR + metricsEnabled: true + stageNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: apigateway.aws.upbound.io/v1beta1 +kind: Deployment +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/methodsettings + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + restApiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + triggers: + redeployment: ${sha1(jsonencode(aws_api_gateway_rest_api.example.body))} + +--- + +apiVersion: apigateway.aws.upbound.io/v1beta2 +kind: RestAPI +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/methodsettings + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + body: |- + ${jsonencode({ + openapi = "3.0.1" + info = { + title = "example" + version = "1.0" + } + paths = { + "/path1" = { + get = { + x-amazon-apigateway-integration = { + httpMethod = "GET" + payloadFormatVersion = "1.0" + type = "HTTP_PROXY" + uri = "https://ip-ranges.amazonaws.com/ip-ranges.json" + } + } + } + } + })} + name: example + region: us-west-1 + +--- + +apiVersion: apigateway.aws.upbound.io/v1beta2 +kind: Stage +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/methodsettings + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + deploymentIdSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + restApiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + stageName: example diff --git a/examples-generated/apigateway/v1beta2/restapi.yaml b/examples-generated/apigateway/v1beta2/restapi.yaml new file mode 100644 index 0000000000..29f9d8c56d --- /dev/null +++ b/examples-generated/apigateway/v1beta2/restapi.yaml @@ -0,0 +1,75 @@ +apiVersion: apigateway.aws.upbound.io/v1beta2 +kind: RestAPI +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/restapi + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + body: |- + ${jsonencode({ + openapi = "3.0.1" + info = { + title = "example" + version = "1.0" + } + paths = { + "/path1" = { + get = { + x-amazon-apigateway-integration = { + httpMethod = "GET" + payloadFormatVersion = "1.0" + type = "HTTP_PROXY" + uri = "https://ip-ranges.amazonaws.com/ip-ranges.json" + } + } + } + } + })} + endpointConfiguration: + - types: + - REGIONAL + name: example + region: us-west-1 + +--- + +apiVersion: apigateway.aws.upbound.io/v1beta1 +kind: Deployment +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/restapi + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + restApiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + triggers: + redeployment: ${sha1(jsonencode(aws_api_gateway_rest_api.example.body))} + +--- + +apiVersion: apigateway.aws.upbound.io/v1beta2 +kind: Stage +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/restapi + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + deploymentIdSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + restApiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + stageName: example diff --git a/examples-generated/apigateway/v1beta2/stage.yaml b/examples-generated/apigateway/v1beta2/stage.yaml new file mode 100644 index 0000000000..7001a7bbfe --- /dev/null +++ b/examples-generated/apigateway/v1beta2/stage.yaml @@ -0,0 +1,96 @@ +apiVersion: apigateway.aws.upbound.io/v1beta2 +kind: Stage +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/stage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + deploymentIdSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + restApiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + stageName: example + +--- + +apiVersion: apigateway.aws.upbound.io/v1beta1 +kind: Deployment +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/stage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + restApiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + triggers: + redeployment: ${sha1(jsonencode(aws_api_gateway_rest_api.example.body))} + +--- + +apiVersion: apigateway.aws.upbound.io/v1beta2 +kind: MethodSettings +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/stage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + methodPath: '*/*' + region: us-west-1 + restApiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + settings: + - loggingLevel: INFO + metricsEnabled: true + stageNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: apigateway.aws.upbound.io/v1beta2 +kind: RestAPI +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/stage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + body: |- + ${jsonencode({ + openapi = "3.0.1" + info = { + title = "example" + version = "1.0" + } + paths = { + "/path1" = { + get = { + x-amazon-apigateway-integration = { + httpMethod = "GET" + payloadFormatVersion = "1.0" + type = "HTTP_PROXY" + uri = "https://ip-ranges.amazonaws.com/ip-ranges.json" + } + } + } + } + })} + name: example + region: us-west-1 diff --git a/examples-generated/apigateway/v1beta2/usageplan.yaml b/examples-generated/apigateway/v1beta2/usageplan.yaml new file mode 100644 index 0000000000..ff627e63d8 --- /dev/null +++ b/examples-generated/apigateway/v1beta2/usageplan.yaml @@ -0,0 +1,130 @@ +apiVersion: apigateway.aws.upbound.io/v1beta2 +kind: UsagePlan +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/usageplan + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiStages: + - apiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + stageSelector: + matchLabels: + testing.upbound.io/example-name: development + - apiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + stageSelector: + matchLabels: + testing.upbound.io/example-name: production + description: my description + name: my-usage-plan + productCode: MYCODE + quotaSettings: + - limit: 20 + offset: 2 + period: WEEK + region: us-west-1 + throttleSettings: + - burstLimit: 5 + rateLimit: 10 + +--- + +apiVersion: apigateway.aws.upbound.io/v1beta1 +kind: Deployment +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/usageplan + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + restApiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + triggers: + redeployment: ${sha1(jsonencode(aws_api_gateway_rest_api.example.body))} + +--- + +apiVersion: apigateway.aws.upbound.io/v1beta2 +kind: RestAPI +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/usageplan + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + body: |- + ${jsonencode({ + openapi = "3.0.1" + info = { + title = "example" + version = "1.0" + } + paths = { + "/path1" = { + get = { + x-amazon-apigateway-integration = { + httpMethod = "GET" + payloadFormatVersion = "1.0" + type = "HTTP_PROXY" + uri = "https://ip-ranges.amazonaws.com/ip-ranges.json" + } + } + } + } + })} + name: example + region: us-west-1 + +--- + +apiVersion: apigateway.aws.upbound.io/v1beta2 +kind: Stage +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/usageplan + labels: + testing.upbound.io/example-name: development + name: development +spec: + forProvider: + deploymentIdSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + restApiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + stageName: development + +--- + +apiVersion: apigateway.aws.upbound.io/v1beta2 +kind: Stage +metadata: + annotations: + meta.upbound.io/example-id: apigateway/v1beta2/usageplan + labels: + testing.upbound.io/example-name: production + name: production +spec: + forProvider: + deploymentIdSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + restApiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + stageName: production diff --git a/examples-generated/apigatewayv2/v1beta1/route.yaml b/examples-generated/apigatewayv2/v1beta1/route.yaml index 2d3710410a..d7161d6e45 100644 --- a/examples-generated/apigatewayv2/v1beta1/route.yaml +++ b/examples-generated/apigatewayv2/v1beta1/route.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: apigatewayv2.aws.upbound.io/v1beta1 +apiVersion: apigatewayv2.aws.upbound.io/v1beta2 kind: API metadata: annotations: diff --git a/examples-generated/apigatewayv2/v1beta2/api.yaml b/examples-generated/apigatewayv2/v1beta2/api.yaml new file mode 100644 index 0000000000..592bfce8bf --- /dev/null +++ b/examples-generated/apigatewayv2/v1beta2/api.yaml @@ -0,0 +1,14 @@ +apiVersion: apigatewayv2.aws.upbound.io/v1beta2 +kind: API +metadata: + annotations: + meta.upbound.io/example-id: apigatewayv2/v1beta2/api + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: example-websocket-api + protocolType: WEBSOCKET + region: us-west-1 + routeSelectionExpression: $request.body.action diff --git a/examples-generated/apigatewayv2/v1beta2/authorizer.yaml b/examples-generated/apigatewayv2/v1beta2/authorizer.yaml new file mode 100644 index 0000000000..3924f3718d --- /dev/null +++ b/examples-generated/apigatewayv2/v1beta2/authorizer.yaml @@ -0,0 +1,21 @@ +apiVersion: apigatewayv2.aws.upbound.io/v1beta2 +kind: Authorizer +metadata: + annotations: + meta.upbound.io/example-id: apigatewayv2/v1beta2/authorizer + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + authorizerType: REQUEST + authorizerUriSelector: + matchLabels: + testing.upbound.io/example-name: example + identitySources: + - route.request.header.Auth + name: example-authorizer + region: us-west-1 diff --git a/examples-generated/apigatewayv2/v1beta2/domainname.yaml b/examples-generated/apigatewayv2/v1beta2/domainname.yaml new file mode 100644 index 0000000000..8b852daf3f --- /dev/null +++ b/examples-generated/apigatewayv2/v1beta2/domainname.yaml @@ -0,0 +1,17 @@ +apiVersion: apigatewayv2.aws.upbound.io/v1beta2 +kind: DomainName +metadata: + annotations: + meta.upbound.io/example-id: apigatewayv2/v1beta2/domainname + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + domainNameConfiguration: + - certificateArnSelector: + matchLabels: + testing.upbound.io/example-name: example + endpointType: REGIONAL + securityPolicy: TLS_1_2 + region: us-west-1 diff --git a/examples-generated/apigatewayv2/v1beta2/integration.yaml b/examples-generated/apigatewayv2/v1beta2/integration.yaml new file mode 100644 index 0000000000..1098e41a98 --- /dev/null +++ b/examples-generated/apigatewayv2/v1beta2/integration.yaml @@ -0,0 +1,15 @@ +apiVersion: apigatewayv2.aws.upbound.io/v1beta2 +kind: Integration +metadata: + annotations: + meta.upbound.io/example-id: apigatewayv2/v1beta2/integration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + integrationType: MOCK + region: us-west-1 diff --git a/examples-generated/apigatewayv2/v1beta2/stage.yaml b/examples-generated/apigatewayv2/v1beta2/stage.yaml new file mode 100644 index 0000000000..08e9a6e59f --- /dev/null +++ b/examples-generated/apigatewayv2/v1beta2/stage.yaml @@ -0,0 +1,14 @@ +apiVersion: apigatewayv2.aws.upbound.io/v1beta2 +kind: Stage +metadata: + annotations: + meta.upbound.io/example-id: apigatewayv2/v1beta2/stage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 diff --git a/examples-generated/appautoscaling/v1beta2/policy.yaml b/examples-generated/appautoscaling/v1beta2/policy.yaml new file mode 100644 index 0000000000..80854f2635 --- /dev/null +++ b/examples-generated/appautoscaling/v1beta2/policy.yaml @@ -0,0 +1,44 @@ +apiVersion: appautoscaling.aws.upbound.io/v1beta2 +kind: Policy +metadata: + annotations: + meta.upbound.io/example-id: appautoscaling/v1beta2/policy + labels: + testing.upbound.io/example-name: dynamodb_table_read_policy + name: dynamodb-table-read-policy +spec: + forProvider: + policyType: TargetTrackingScaling + region: us-west-1 + resourceIdSelector: + matchLabels: + testing.upbound.io/example-name: dynamodb_table_read_target + scalableDimensionSelector: + matchLabels: + testing.upbound.io/example-name: dynamodb_table_read_target + serviceNamespaceSelector: + matchLabels: + testing.upbound.io/example-name: dynamodb_table_read_target + targetTrackingScalingPolicyConfiguration: + - predefinedMetricSpecification: + - predefinedMetricType: DynamoDBReadCapacityUtilization + targetValue: 70 + +--- + +apiVersion: appautoscaling.aws.upbound.io/v1beta1 +kind: Target +metadata: + annotations: + meta.upbound.io/example-id: appautoscaling/v1beta2/policy + labels: + testing.upbound.io/example-name: dynamodb_table_read_target + name: dynamodb-table-read-target +spec: + forProvider: + maxCapacity: 100 + minCapacity: 5 + region: us-west-1 + resourceId: table/tableName + scalableDimension: dynamodb:table:ReadCapacityUnits + serviceNamespace: dynamodb diff --git a/examples-generated/appautoscaling/v1beta2/scheduledaction.yaml b/examples-generated/appautoscaling/v1beta2/scheduledaction.yaml new file mode 100644 index 0000000000..b8fb98d75a --- /dev/null +++ b/examples-generated/appautoscaling/v1beta2/scheduledaction.yaml @@ -0,0 +1,44 @@ +apiVersion: appautoscaling.aws.upbound.io/v1beta2 +kind: ScheduledAction +metadata: + annotations: + meta.upbound.io/example-id: appautoscaling/v1beta2/scheduledaction + labels: + testing.upbound.io/example-name: dynamodb + name: dynamodb +spec: + forProvider: + name: dynamodb + region: us-west-1 + resourceIdSelector: + matchLabels: + testing.upbound.io/example-name: dynamodb + scalableDimensionSelector: + matchLabels: + testing.upbound.io/example-name: dynamodb + scalableTargetAction: + - maxCapacity: 200 + minCapacity: 1 + schedule: at(2006-01-02T15:04:05) + serviceNamespaceSelector: + matchLabels: + testing.upbound.io/example-name: dynamodb + +--- + +apiVersion: appautoscaling.aws.upbound.io/v1beta1 +kind: Target +metadata: + annotations: + meta.upbound.io/example-id: appautoscaling/v1beta2/scheduledaction + labels: + testing.upbound.io/example-name: dynamodb + name: dynamodb +spec: + forProvider: + maxCapacity: 100 + minCapacity: 5 + region: us-west-1 + resourceId: table/tableName + scalableDimension: dynamodb:table:ReadCapacityUnits + serviceNamespace: dynamodb diff --git a/examples-generated/appflow/v1beta2/flow.yaml b/examples-generated/appflow/v1beta2/flow.yaml new file mode 100644 index 0000000000..8aa27a3291 --- /dev/null +++ b/examples-generated/appflow/v1beta2/flow.yaml @@ -0,0 +1,123 @@ +apiVersion: appflow.aws.upbound.io/v1beta2 +kind: Flow +metadata: + annotations: + meta.upbound.io/example-id: appflow/v1beta2/flow + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + destinationFlowConfig: + - connectorType: S3 + destinationConnectorProperties: + - s3: + - bucketNameSelector: + matchLabels: + testing.upbound.io/example-name: example_destination + s3OutputFormatConfig: + - prefixConfig: + - prefixType: PATH + region: us-west-1 + sourceFlowConfig: + - connectorType: S3 + sourceConnectorProperties: + - s3: + - bucketNameSelector: + matchLabels: + testing.upbound.io/example-name: example_source + bucketPrefix: example + task: + - connectorOperator: + - s3: NO_OP + destinationField: exampleField + sourceFields: + - exampleField + taskType: Map + triggerConfig: + - triggerType: OnDemand + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: appflow/v1beta2/flow + labels: + testing.upbound.io/example-name: example_destination + name: example-destination +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: appflow/v1beta2/flow + labels: + testing.upbound.io/example-name: example_source + name: example-source +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta1 +kind: BucketPolicy +metadata: + annotations: + meta.upbound.io/example-id: appflow/v1beta2/flow + labels: + testing.upbound.io/example-name: example_destination + name: example-destination +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example_destination + policy: ${data.aws_iam_policy_document.example_destination.json} + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta1 +kind: BucketPolicy +metadata: + annotations: + meta.upbound.io/example-id: appflow/v1beta2/flow + labels: + testing.upbound.io/example-name: example_source + name: example-source +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example_source + policy: ${data.aws_iam_policy_document.example_source.json} + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Object +metadata: + annotations: + meta.upbound.io/example-id: appflow/v1beta2/flow + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example_source + key: example_source.csv + region: us-west-1 + source: example_source.csv diff --git a/examples-generated/appintegrations/v1beta2/eventintegration.yaml b/examples-generated/appintegrations/v1beta2/eventintegration.yaml new file mode 100644 index 0000000000..ff2f851a73 --- /dev/null +++ b/examples-generated/appintegrations/v1beta2/eventintegration.yaml @@ -0,0 +1,17 @@ +apiVersion: appintegrations.aws.upbound.io/v1beta2 +kind: EventIntegration +metadata: + annotations: + meta.upbound.io/example-id: appintegrations/v1beta2/eventintegration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: Example Description + eventFilter: + - source: aws.partner/examplepartner.com + eventbridgeBus: default + region: us-west-1 + tags: + Name: Example Event Integration diff --git a/examples-generated/applicationinsights/v1beta1/application.yaml b/examples-generated/applicationinsights/v1beta1/application.yaml index 0afa9537f7..0aec74080f 100644 --- a/examples-generated/applicationinsights/v1beta1/application.yaml +++ b/examples-generated/applicationinsights/v1beta1/application.yaml @@ -12,7 +12,7 @@ spec: --- -apiVersion: resourcegroups.aws.upbound.io/v1beta1 +apiVersion: resourcegroups.aws.upbound.io/v1beta2 kind: Group metadata: annotations: diff --git a/examples-generated/appmesh/v1beta2/gatewayroute.yaml b/examples-generated/appmesh/v1beta2/gatewayroute.yaml new file mode 100644 index 0000000000..ac1d59433f --- /dev/null +++ b/examples-generated/appmesh/v1beta2/gatewayroute.yaml @@ -0,0 +1,28 @@ +apiVersion: appmesh.aws.upbound.io/v1beta2 +kind: GatewayRoute +metadata: + annotations: + meta.upbound.io/example-id: appmesh/v1beta2/gatewayroute + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + meshName: example-service-mesh + name: example-gateway-route + region: us-west-1 + spec: + - httpRoute: + - action: + - target: + - virtualService: + - virtualServiceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + match: + - prefix: / + tags: + Environment: test + virtualGatewayNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/appmesh/v1beta2/mesh.yaml b/examples-generated/appmesh/v1beta2/mesh.yaml new file mode 100644 index 0000000000..6398b72682 --- /dev/null +++ b/examples-generated/appmesh/v1beta2/mesh.yaml @@ -0,0 +1,11 @@ +apiVersion: appmesh.aws.upbound.io/v1beta2 +kind: Mesh +metadata: + annotations: + meta.upbound.io/example-id: appmesh/v1beta2/mesh + labels: + testing.upbound.io/example-name: simple + name: simple +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/appmesh/v1beta2/route.yaml b/examples-generated/appmesh/v1beta2/route.yaml new file mode 100644 index 0000000000..73fb1a34f7 --- /dev/null +++ b/examples-generated/appmesh/v1beta2/route.yaml @@ -0,0 +1,32 @@ +apiVersion: appmesh.aws.upbound.io/v1beta2 +kind: Route +metadata: + annotations: + meta.upbound.io/example-id: appmesh/v1beta2/route + labels: + testing.upbound.io/example-name: serviceb + name: serviceb +spec: + forProvider: + meshNameSelector: + matchLabels: + testing.upbound.io/example-name: simple + name: serviceB-route + region: us-west-1 + spec: + - httpRoute: + - action: + - weightedTarget: + - virtualNodeSelector: + matchLabels: + testing.upbound.io/example-name: serviceb1 + weight: 90 + - virtualNodeSelector: + matchLabels: + testing.upbound.io/example-name: serviceb2 + weight: 10 + match: + - prefix: / + virtualRouterNameSelector: + matchLabels: + testing.upbound.io/example-name: serviceb diff --git a/examples-generated/appmesh/v1beta2/virtualgateway.yaml b/examples-generated/appmesh/v1beta2/virtualgateway.yaml new file mode 100644 index 0000000000..463372dece --- /dev/null +++ b/examples-generated/appmesh/v1beta2/virtualgateway.yaml @@ -0,0 +1,20 @@ +apiVersion: appmesh.aws.upbound.io/v1beta2 +kind: VirtualGateway +metadata: + annotations: + meta.upbound.io/example-id: appmesh/v1beta2/virtualgateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + meshName: example-service-mesh + name: example-virtual-gateway + region: us-west-1 + spec: + - listener: + - portMapping: + - port: 8080 + protocol: http + tags: + Environment: test diff --git a/examples-generated/appmesh/v1beta2/virtualnode.yaml b/examples-generated/appmesh/v1beta2/virtualnode.yaml new file mode 100644 index 0000000000..145795093e --- /dev/null +++ b/examples-generated/appmesh/v1beta2/virtualnode.yaml @@ -0,0 +1,26 @@ +apiVersion: appmesh.aws.upbound.io/v1beta2 +kind: VirtualNode +metadata: + annotations: + meta.upbound.io/example-id: appmesh/v1beta2/virtualnode + labels: + testing.upbound.io/example-name: serviceb1 + name: serviceb1 +spec: + forProvider: + meshNameSelector: + matchLabels: + testing.upbound.io/example-name: simple + name: serviceBv1 + region: us-west-1 + spec: + - backend: + - virtualService: + - virtualServiceName: servicea.simpleapp.local + listener: + - portMapping: + - port: 8080 + protocol: http + serviceDiscovery: + - dns: + - hostname: serviceb.simpleapp.local diff --git a/examples-generated/appmesh/v1beta2/virtualrouter.yaml b/examples-generated/appmesh/v1beta2/virtualrouter.yaml new file mode 100644 index 0000000000..bd9d20bd03 --- /dev/null +++ b/examples-generated/appmesh/v1beta2/virtualrouter.yaml @@ -0,0 +1,20 @@ +apiVersion: appmesh.aws.upbound.io/v1beta2 +kind: VirtualRouter +metadata: + annotations: + meta.upbound.io/example-id: appmesh/v1beta2/virtualrouter + labels: + testing.upbound.io/example-name: serviceb + name: serviceb +spec: + forProvider: + meshNameSelector: + matchLabels: + testing.upbound.io/example-name: simple + name: serviceB + region: us-west-1 + spec: + - listener: + - portMapping: + - port: 8080 + protocol: http diff --git a/examples-generated/appmesh/v1beta2/virtualservice.yaml b/examples-generated/appmesh/v1beta2/virtualservice.yaml new file mode 100644 index 0000000000..06b491e7ec --- /dev/null +++ b/examples-generated/appmesh/v1beta2/virtualservice.yaml @@ -0,0 +1,21 @@ +apiVersion: appmesh.aws.upbound.io/v1beta2 +kind: VirtualService +metadata: + annotations: + meta.upbound.io/example-id: appmesh/v1beta2/virtualservice + labels: + testing.upbound.io/example-name: servicea + name: servicea +spec: + forProvider: + meshNameSelector: + matchLabels: + testing.upbound.io/example-name: simple + name: servicea.simpleapp.local + region: us-west-1 + spec: + - provider: + - virtualNode: + - virtualNodeNameSelector: + matchLabels: + testing.upbound.io/example-name: serviceb1 diff --git a/examples-generated/apprunner/v1beta2/observabilityconfiguration.yaml b/examples-generated/apprunner/v1beta2/observabilityconfiguration.yaml new file mode 100644 index 0000000000..34ef83d733 --- /dev/null +++ b/examples-generated/apprunner/v1beta2/observabilityconfiguration.yaml @@ -0,0 +1,16 @@ +apiVersion: apprunner.aws.upbound.io/v1beta2 +kind: ObservabilityConfiguration +metadata: + annotations: + meta.upbound.io/example-id: apprunner/v1beta2/observabilityconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + observabilityConfigurationName: example + region: us-west-1 + tags: + Name: example-apprunner-observability-configuration + traceConfiguration: + - vendor: AWSXRAY diff --git a/examples-generated/apprunner/v1beta2/service.yaml b/examples-generated/apprunner/v1beta2/service.yaml new file mode 100644 index 0000000000..8c82b45552 --- /dev/null +++ b/examples-generated/apprunner/v1beta2/service.yaml @@ -0,0 +1,37 @@ +apiVersion: apprunner.aws.upbound.io/v1beta2 +kind: Service +metadata: + annotations: + meta.upbound.io/example-id: apprunner/v1beta2/service + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + networkConfiguration: + - egressConfiguration: + - egressType: VPC + vpcConnectorArnSelector: + matchLabels: + testing.upbound.io/example-name: connector + region: us-west-1 + serviceName: example + sourceConfiguration: + - authenticationConfiguration: + - connectionArnSelector: + matchLabels: + testing.upbound.io/example-name: example + codeRepository: + - codeConfiguration: + - codeConfigurationValues: + - buildCommand: python setup.py develop + port: "8000" + runtime: PYTHON_3 + startCommand: python runapp.py + configurationSource: API + repositoryUrl: https://github.com/example/my-example-python-app + sourceCodeVersion: + - type: BRANCH + value: main + tags: + Name: example-apprunner-service diff --git a/examples-generated/appstream/v1beta1/fleetstackassociation.yaml b/examples-generated/appstream/v1beta1/fleetstackassociation.yaml index 8ff4ad7623..46b8ca64e7 100644 --- a/examples-generated/appstream/v1beta1/fleetstackassociation.yaml +++ b/examples-generated/appstream/v1beta1/fleetstackassociation.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: appstream.aws.upbound.io/v1beta1 +apiVersion: appstream.aws.upbound.io/v1beta2 kind: Fleet metadata: annotations: @@ -37,7 +37,7 @@ spec: --- -apiVersion: appstream.aws.upbound.io/v1beta1 +apiVersion: appstream.aws.upbound.io/v1beta2 kind: Stack metadata: annotations: diff --git a/examples-generated/appstream/v1beta1/userstackassociation.yaml b/examples-generated/appstream/v1beta1/userstackassociation.yaml index ea970f0445..f3877a955b 100644 --- a/examples-generated/appstream/v1beta1/userstackassociation.yaml +++ b/examples-generated/appstream/v1beta1/userstackassociation.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: appstream.aws.upbound.io/v1beta1 +apiVersion: appstream.aws.upbound.io/v1beta2 kind: Stack metadata: annotations: diff --git a/examples-generated/appstream/v1beta2/directoryconfig.yaml b/examples-generated/appstream/v1beta2/directoryconfig.yaml new file mode 100644 index 0000000000..419166f5a4 --- /dev/null +++ b/examples-generated/appstream/v1beta2/directoryconfig.yaml @@ -0,0 +1,20 @@ +apiVersion: appstream.aws.upbound.io/v1beta2 +kind: DirectoryConfig +metadata: + annotations: + meta.upbound.io/example-id: appstream/v1beta2/directoryconfig + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + directoryName: NAME OF DIRECTORY + organizationalUnitDistinguishedNames: + - DISTINGUISHED NAME + region: us-west-1 + serviceAccountCredentials: + - accountName: NAME OF ACCOUNT + accountPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system diff --git a/examples-generated/appstream/v1beta2/fleet.yaml b/examples-generated/appstream/v1beta2/fleet.yaml new file mode 100644 index 0000000000..aa48997fc8 --- /dev/null +++ b/examples-generated/appstream/v1beta2/fleet.yaml @@ -0,0 +1,27 @@ +apiVersion: appstream.aws.upbound.io/v1beta2 +kind: Fleet +metadata: + annotations: + meta.upbound.io/example-id: appstream/v1beta2/fleet + labels: + testing.upbound.io/example-name: test_fleet + name: test-fleet +spec: + forProvider: + computeCapacity: + - desiredInstances: 1 + description: test fleet + displayName: test-fleet + enableDefaultInternetAccess: false + fleetType: ON_DEMAND + idleDisconnectTimeoutInSeconds: 60 + imageName: Amazon-AppStream2-Sample-Image-03-11-2023 + instanceType: stream.standard.large + maxUserDurationInSeconds: 600 + name: test-fleet + region: us-west-1 + tags: + TagName: tag-value + vpcConfig: + - subnetIdRefs: + - name: example diff --git a/examples-generated/appstream/v1beta2/imagebuilder.yaml b/examples-generated/appstream/v1beta2/imagebuilder.yaml new file mode 100644 index 0000000000..cdcc620d58 --- /dev/null +++ b/examples-generated/appstream/v1beta2/imagebuilder.yaml @@ -0,0 +1,20 @@ +apiVersion: appstream.aws.upbound.io/v1beta2 +kind: ImageBuilder +metadata: + annotations: + meta.upbound.io/example-id: appstream/v1beta2/imagebuilder + labels: + testing.upbound.io/example-name: test_fleet + name: test-fleet +spec: + forProvider: + description: Description of a ImageBuilder + displayName: Display name of a ImageBuilder + enableDefaultInternetAccess: false + instanceType: stream.standard.large + region: us-west-1 + tags: + Name: Example Image Builder + vpcConfig: + - subnetIdRefs: + - name: example diff --git a/examples-generated/appstream/v1beta2/stack.yaml b/examples-generated/appstream/v1beta2/stack.yaml new file mode 100644 index 0000000000..526e29f2be --- /dev/null +++ b/examples-generated/appstream/v1beta2/stack.yaml @@ -0,0 +1,38 @@ +apiVersion: appstream.aws.upbound.io/v1beta2 +kind: Stack +metadata: + annotations: + meta.upbound.io/example-id: appstream/v1beta2/stack + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationSettings: + - enabled: true + settingsGroup: SettingsGroup + description: stack description + displayName: stack display name + feedbackUrl: http://your-domain/feedback + name: stack name + redirectUrl: http://your-domain/redirect + region: us-west-1 + storageConnectors: + - connectorType: HOMEFOLDERS + tags: + TagName: TagValue + userSettings: + - action: CLIPBOARD_COPY_FROM_LOCAL_DEVICE + permission: ENABLED + - action: CLIPBOARD_COPY_TO_LOCAL_DEVICE + permission: ENABLED + - action: DOMAIN_PASSWORD_SIGNIN + permission: ENABLED + - action: DOMAIN_SMART_CARD_SIGNIN + permission: DISABLED + - action: FILE_DOWNLOAD + permission: ENABLED + - action: FILE_UPLOAD + permission: ENABLED + - action: PRINTING_TO_LOCAL_DEVICE + permission: ENABLED diff --git a/examples-generated/appsync/v1beta1/apicache.yaml b/examples-generated/appsync/v1beta1/apicache.yaml index 1d0a90e6bb..dc981517c3 100644 --- a/examples-generated/appsync/v1beta1/apicache.yaml +++ b/examples-generated/appsync/v1beta1/apicache.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: appsync.aws.upbound.io/v1beta1 +apiVersion: appsync.aws.upbound.io/v1beta2 kind: GraphQLAPI metadata: annotations: diff --git a/examples-generated/appsync/v1beta1/apikey.yaml b/examples-generated/appsync/v1beta1/apikey.yaml index 004a003873..72129ff4af 100644 --- a/examples-generated/appsync/v1beta1/apikey.yaml +++ b/examples-generated/appsync/v1beta1/apikey.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: appsync.aws.upbound.io/v1beta1 +apiVersion: appsync.aws.upbound.io/v1beta2 kind: GraphQLAPI metadata: annotations: diff --git a/examples-generated/appsync/v1beta2/datasource.yaml b/examples-generated/appsync/v1beta2/datasource.yaml new file mode 100644 index 0000000000..14e5ce4bdf --- /dev/null +++ b/examples-generated/appsync/v1beta2/datasource.yaml @@ -0,0 +1,89 @@ +apiVersion: appsync.aws.upbound.io/v1beta2 +kind: Datasource +metadata: + annotations: + meta.upbound.io/example-id: appsync/v1beta2/datasource + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + dynamodbConfig: + - tableNameSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + serviceRoleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + type: AMAZON_DYNAMODB + +--- + +apiVersion: appsync.aws.upbound.io/v1beta2 +kind: GraphQLAPI +metadata: + annotations: + meta.upbound.io/example-id: appsync/v1beta2/datasource + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + authenticationType: API_KEY + name: tf_appsync_example + region: us-west-1 + +--- + +apiVersion: dynamodb.aws.upbound.io/v1beta2 +kind: Table +metadata: + annotations: + meta.upbound.io/example-id: appsync/v1beta2/datasource + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + attribute: + - name: UserId + type: S + hashKey: UserId + readCapacity: 1 + region: us-west-1 + writeCapacity: 1 + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: appsync/v1beta2/datasource + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.assume_role.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicy +metadata: + annotations: + meta.upbound.io/example-id: appsync/v1beta2/datasource + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + policy: ${data.aws_iam_policy_document.example.json} + roleSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/appsync/v1beta2/function.yaml b/examples-generated/appsync/v1beta2/function.yaml new file mode 100644 index 0000000000..38facefeb7 --- /dev/null +++ b/examples-generated/appsync/v1beta2/function.yaml @@ -0,0 +1,87 @@ +apiVersion: appsync.aws.upbound.io/v1beta2 +kind: Function +metadata: + annotations: + meta.upbound.io/example-id: appsync/v1beta2/function + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + dataSourceSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example + region: us-west-1 + requestMappingTemplate: | + { + "version": "2018-05-29", + "method": "GET", + "resourcePath": "/", + "params":{ + "headers": $utils.http.copyheaders($ctx.request.headers) + } + } + responseMappingTemplate: | + #if($ctx.result.statusCode == 200) + $ctx.result.body + #else + $utils.appendError($ctx.result.body, $ctx.result.statusCode) + #end + +--- + +apiVersion: appsync.aws.upbound.io/v1beta2 +kind: Datasource +metadata: + annotations: + meta.upbound.io/example-id: appsync/v1beta2/function + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiIdSelector: + matchLabels: + testing.upbound.io/example-name: example + httpConfig: + - endpoint: http://example.com + region: us-west-1 + type: HTTP + +--- + +apiVersion: appsync.aws.upbound.io/v1beta2 +kind: GraphQLAPI +metadata: + annotations: + meta.upbound.io/example-id: appsync/v1beta2/function + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + authenticationType: API_KEY + name: example + region: us-west-1 + schema: | + type Mutation { + putPost(id: ID!, title: String!): Post + } + + type Post { + id: ID! + title: String! + } + + type Query { + singlePost(id: ID!): Post + } + + schema { + query: Query + mutation: Mutation + } diff --git a/examples-generated/appsync/v1beta2/graphqlapi.yaml b/examples-generated/appsync/v1beta2/graphqlapi.yaml new file mode 100644 index 0000000000..6e9bf89677 --- /dev/null +++ b/examples-generated/appsync/v1beta2/graphqlapi.yaml @@ -0,0 +1,13 @@ +apiVersion: appsync.aws.upbound.io/v1beta2 +kind: GraphQLAPI +metadata: + annotations: + meta.upbound.io/example-id: appsync/v1beta2/graphqlapi + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + authenticationType: API_KEY + name: example + region: us-west-1 diff --git a/examples-generated/appsync/v1beta2/resolver.yaml b/examples-generated/appsync/v1beta2/resolver.yaml new file mode 100644 index 0000000000..ceb33e866c --- /dev/null +++ b/examples-generated/appsync/v1beta2/resolver.yaml @@ -0,0 +1,78 @@ +apiVersion: appsync.aws.upbound.io/v1beta2 +kind: Resolver +metadata: + annotations: + meta.upbound.io/example-id: appsync/v1beta2/resolver + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + apiIdSelector: + matchLabels: + testing.upbound.io/example-name: test + cachingConfig: + - cachingKeys: + - $context.identity.sub + - $context.arguments.id + ttl: 60 + dataSourceSelector: + matchLabels: + testing.upbound.io/example-name: test + field: singlePost + region: us-west-1 + requestTemplate: | + { + "version": "2018-05-29", + "method": "GET", + "resourcePath": "/", + "params":{ + "headers": $utils.http.copyheaders($ctx.request.headers) + } + } + responseTemplate: | + #if($ctx.result.statusCode == 200) + $ctx.result.body + #else + $utils.appendError($ctx.result.body, $ctx.result.statusCode) + #end + type: Query + +--- + +apiVersion: appsync.aws.upbound.io/v1beta2 +kind: Datasource +metadata: + annotations: + meta.upbound.io/example-id: appsync/v1beta2/resolver + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + apiIdSelector: + matchLabels: + testing.upbound.io/example-name: test + httpConfig: + - endpoint: http://example.com + region: us-west-1 + type: HTTP + +--- + +apiVersion: appsync.aws.upbound.io/v1beta2 +kind: GraphQLAPI +metadata: + annotations: + meta.upbound.io/example-id: appsync/v1beta2/resolver + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + authenticationType: API_KEY + name: tf-example + region: us-west-1 + schema: "type Mutation {\n\tputPost(id: ID!, title: String!): Post\n}\n\ntype + Post {\n\tid: ID!\n\ttitle: String!\n}\n\ntype Query {\n\tsinglePost(id: ID!): + Post\n}\n\nschema {\n\tquery: Query\n\tmutation: Mutation\n}\n" diff --git a/examples-generated/athena/v1beta1/namedquery.yaml b/examples-generated/athena/v1beta1/namedquery.yaml index 8d71cf4b3f..495021d558 100644 --- a/examples-generated/athena/v1beta1/namedquery.yaml +++ b/examples-generated/athena/v1beta1/namedquery.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: athena.aws.upbound.io/v1beta1 +apiVersion: athena.aws.upbound.io/v1beta2 kind: Database metadata: annotations: @@ -37,7 +37,7 @@ spec: --- -apiVersion: athena.aws.upbound.io/v1beta1 +apiVersion: athena.aws.upbound.io/v1beta2 kind: Workgroup metadata: annotations: @@ -74,7 +74,7 @@ spec: --- -apiVersion: s3.aws.upbound.io/v1beta1 +apiVersion: s3.aws.upbound.io/v1beta2 kind: Bucket metadata: annotations: diff --git a/examples-generated/athena/v1beta2/database.yaml b/examples-generated/athena/v1beta2/database.yaml new file mode 100644 index 0000000000..3c83c2fdf4 --- /dev/null +++ b/examples-generated/athena/v1beta2/database.yaml @@ -0,0 +1,29 @@ +apiVersion: athena.aws.upbound.io/v1beta2 +kind: Database +metadata: + annotations: + meta.upbound.io/example-id: athena/v1beta2/database + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: athena/v1beta2/database + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/athena/v1beta2/workgroup.yaml b/examples-generated/athena/v1beta2/workgroup.yaml new file mode 100644 index 0000000000..eef4c3a213 --- /dev/null +++ b/examples-generated/athena/v1beta2/workgroup.yaml @@ -0,0 +1,21 @@ +apiVersion: athena.aws.upbound.io/v1beta2 +kind: Workgroup +metadata: + annotations: + meta.upbound.io/example-id: athena/v1beta2/workgroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + configuration: + - enforceWorkgroupConfiguration: true + publishCloudwatchMetricsEnabled: true + resultConfiguration: + - encryptionConfiguration: + - encryptionOption: SSE_KMS + kmsKeyArnSelector: + matchLabels: + testing.upbound.io/example-name: example + outputLocation: s3://${aws_s3_bucket.example.bucket}/output/ + region: us-west-1 diff --git a/examples-generated/autoscaling/v1beta1/lifecyclehook.yaml b/examples-generated/autoscaling/v1beta1/lifecyclehook.yaml index 1fac6dc808..b45b864687 100644 --- a/examples-generated/autoscaling/v1beta1/lifecyclehook.yaml +++ b/examples-generated/autoscaling/v1beta1/lifecyclehook.yaml @@ -26,7 +26,7 @@ spec: --- -apiVersion: autoscaling.aws.upbound.io/v1beta2 +apiVersion: autoscaling.aws.upbound.io/v1beta3 kind: AutoscalingGroup metadata: annotations: diff --git a/examples-generated/autoscaling/v1beta1/notification.yaml b/examples-generated/autoscaling/v1beta1/notification.yaml index c0ccbd2614..deeb5140a2 100644 --- a/examples-generated/autoscaling/v1beta1/notification.yaml +++ b/examples-generated/autoscaling/v1beta1/notification.yaml @@ -23,7 +23,7 @@ spec: --- -apiVersion: autoscaling.aws.upbound.io/v1beta2 +apiVersion: autoscaling.aws.upbound.io/v1beta3 kind: AutoscalingGroup metadata: annotations: @@ -37,7 +37,7 @@ spec: --- -apiVersion: autoscaling.aws.upbound.io/v1beta2 +apiVersion: autoscaling.aws.upbound.io/v1beta3 kind: AutoscalingGroup metadata: annotations: diff --git a/examples-generated/autoscaling/v1beta1/schedule.yaml b/examples-generated/autoscaling/v1beta1/schedule.yaml index 29891fb816..99dbbdaafd 100644 --- a/examples-generated/autoscaling/v1beta1/schedule.yaml +++ b/examples-generated/autoscaling/v1beta1/schedule.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: autoscaling.aws.upbound.io/v1beta2 +apiVersion: autoscaling.aws.upbound.io/v1beta3 kind: AutoscalingGroup metadata: annotations: diff --git a/examples-generated/autoscaling/v1beta2/grouptag.yaml b/examples-generated/autoscaling/v1beta2/grouptag.yaml new file mode 100644 index 0000000000..51a7afeafe --- /dev/null +++ b/examples-generated/autoscaling/v1beta2/grouptag.yaml @@ -0,0 +1,41 @@ +apiVersion: autoscaling.aws.upbound.io/v1beta2 +kind: GroupTag +metadata: + annotations: + meta.upbound.io/example-id: autoscaling/v1beta2/grouptag + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + autoscalingGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + for_each: |- + ${toset( + [for asg in flatten( + [for resources in aws_eks_node_group.example.resources : resources.autoscaling_groups] + ) : asg.name] + )} + region: us-west-1 + tag: + - key: k8s.io/cluster-autoscaler/node-template/label/eks.amazonaws.com/capacityType + propagateAtLaunch: false + value: SPOT + +--- + +apiVersion: eks.aws.upbound.io/v1beta2 +kind: NodeGroup +metadata: + annotations: + meta.upbound.io/example-id: autoscaling/v1beta2/grouptag + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clusterNameSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 diff --git a/examples-generated/autoscaling/v1beta2/launchconfiguration.yaml b/examples-generated/autoscaling/v1beta2/launchconfiguration.yaml new file mode 100644 index 0000000000..d6b1f36ee2 --- /dev/null +++ b/examples-generated/autoscaling/v1beta2/launchconfiguration.yaml @@ -0,0 +1,13 @@ +apiVersion: autoscaling.aws.upbound.io/v1beta2 +kind: LaunchConfiguration +metadata: + annotations: + meta.upbound.io/example-id: autoscaling/v1beta2/launchconfiguration + labels: + testing.upbound.io/example-name: as_conf + name: as-conf +spec: + forProvider: + imageId: ${data.aws_ami.ubuntu.id} + instanceType: t2.micro + region: us-west-1 diff --git a/examples-generated/autoscaling/v1beta2/policy.yaml b/examples-generated/autoscaling/v1beta2/policy.yaml new file mode 100644 index 0000000000..ad248d9cf0 --- /dev/null +++ b/examples-generated/autoscaling/v1beta2/policy.yaml @@ -0,0 +1,41 @@ +apiVersion: autoscaling.aws.upbound.io/v1beta2 +kind: Policy +metadata: + annotations: + meta.upbound.io/example-id: autoscaling/v1beta2/policy + labels: + testing.upbound.io/example-name: bat + name: bat +spec: + forProvider: + adjustmentType: ChangeInCapacity + autoscalingGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: bar + cooldown: 300 + region: us-west-1 + scalingAdjustment: 4 + +--- + +apiVersion: autoscaling.aws.upbound.io/v1beta3 +kind: AutoscalingGroup +metadata: + annotations: + meta.upbound.io/example-id: autoscaling/v1beta2/policy + labels: + testing.upbound.io/example-name: bar + name: bar +spec: + forProvider: + availabilityZones: + - us-east-1a + forceDelete: true + healthCheckGracePeriod: 300 + healthCheckType: ELB + launchConfigurationSelector: + matchLabels: + testing.upbound.io/example-name: foo + maxSize: 5 + minSize: 2 + region: us-west-1 diff --git a/examples-generated/autoscaling/v1beta3/autoscalinggroup.yaml b/examples-generated/autoscaling/v1beta3/autoscalinggroup.yaml new file mode 100644 index 0000000000..8007b33f4a --- /dev/null +++ b/examples-generated/autoscaling/v1beta3/autoscalinggroup.yaml @@ -0,0 +1,64 @@ +apiVersion: autoscaling.aws.upbound.io/v1beta3 +kind: AutoscalingGroup +metadata: + annotations: + meta.upbound.io/example-id: autoscaling/v1beta3/autoscalinggroup + labels: + testing.upbound.io/example-name: bar + name: bar +spec: + forProvider: + desiredCapacity: 4 + forceDelete: true + healthCheckGracePeriod: 300 + healthCheckType: ELB + initialLifecycleHook: + - defaultResult: CONTINUE + heartbeatTimeout: 2000 + lifecycleTransition: autoscaling:EC2_INSTANCE_LAUNCHING + name: foobar + notificationMetadata: |- + ${jsonencode({ + foo = "bar" + })} + notificationTargetArn: arn:aws:sqs:us-east-1:444455556666:queue1* + roleArn: arn:aws:iam::123456789012:role/S3Access + instanceMaintenancePolicy: + - maxHealthyPercentage: 120 + minHealthyPercentage: 90 + launchConfigurationSelector: + matchLabels: + testing.upbound.io/example-name: foobar + maxSize: 5 + minSize: 2 + placementGroupSelector: + matchLabels: + testing.upbound.io/example-name: test + region: us-west-1 + tag: + - key: foo + propagateAtLaunch: true + value: bar + - key: lorem + propagateAtLaunch: false + value: ipsum + timeouts: + - delete: 15m + vpcZoneIdentifierRefs: + - name: example1 + - name: example2 + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: PlacementGroup +metadata: + annotations: + meta.upbound.io/example-id: autoscaling/v1beta3/autoscalinggroup + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + region: us-west-1 + strategy: cluster diff --git a/examples-generated/autoscalingplans/v1beta2/scalingplan.yaml b/examples-generated/autoscalingplans/v1beta2/scalingplan.yaml new file mode 100644 index 0000000000..b10d7b450a --- /dev/null +++ b/examples-generated/autoscalingplans/v1beta2/scalingplan.yaml @@ -0,0 +1,52 @@ +apiVersion: autoscalingplans.aws.upbound.io/v1beta2 +kind: ScalingPlan +metadata: + annotations: + meta.upbound.io/example-id: autoscalingplans/v1beta2/scalingplan + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationSource: + - tagFilter: + - key: application + values: + - example + name: example-dynamic-cost-optimization + region: us-west-1 + scalingInstruction: + - maxCapacity: 3 + minCapacity: 0 + resourceId: ${format("autoScalingGroup/%s", aws_autoscaling_group.example.name)} + scalableDimension: autoscaling:autoScalingGroup:DesiredCapacity + serviceNamespace: autoscaling + targetTrackingConfiguration: + - predefinedScalingMetricSpecification: + - predefinedScalingMetricType: ASGAverageCPUUtilization + targetValue: 70 + +--- + +apiVersion: autoscaling.aws.upbound.io/v1beta3 +kind: AutoscalingGroup +metadata: + annotations: + meta.upbound.io/example-id: autoscalingplans/v1beta2/scalingplan + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + availabilityZones: + - ${data.aws_availability_zones.available.names[0]} + launchConfigurationSelector: + matchLabels: + testing.upbound.io/example-name: example + maxSize: 3 + minSize: 0 + region: us-west-1 + tags: + - key: application + propagate_at_launch: true + value: example diff --git a/examples-generated/backup/v1beta2/framework.yaml b/examples-generated/backup/v1beta2/framework.yaml new file mode 100644 index 0000000000..acd9468d07 --- /dev/null +++ b/examples-generated/backup/v1beta2/framework.yaml @@ -0,0 +1,52 @@ +apiVersion: backup.aws.upbound.io/v1beta2 +kind: Framework +metadata: + annotations: + meta.upbound.io/example-id: backup/v1beta2/framework + labels: + testing.upbound.io/example-name: Example + name: example +spec: + forProvider: + control: + - inputParameter: + - name: requiredRetentionDays + value: "35" + name: BACKUP_RECOVERY_POINT_MINIMUM_RETENTION_CHECK + - inputParameter: + - name: requiredFrequencyUnit + value: hours + - name: requiredRetentionDays + value: "35" + - name: requiredFrequencyValue + value: "1" + name: BACKUP_PLAN_MIN_FREQUENCY_AND_MIN_RETENTION_CHECK + - name: BACKUP_RECOVERY_POINT_ENCRYPTED + - name: BACKUP_RESOURCES_PROTECTED_BY_BACKUP_PLAN + scope: + - complianceResourceTypes: + - EBS + - name: BACKUP_RECOVERY_POINT_MANUAL_DELETION_DISABLED + - inputParameter: + - name: maxRetentionDays + value: "100" + - name: minRetentionDays + value: "1" + name: BACKUP_RESOURCES_PROTECTED_BY_BACKUP_VAULT_LOCK + scope: + - complianceResourceTypes: + - EBS + - inputParameter: + - name: recoveryPointAgeUnit + value: days + - name: recoveryPointAgeValue + value: "1" + name: BACKUP_LAST_RECOVERY_POINT_CREATED + scope: + - complianceResourceTypes: + - EBS + description: this is an example framework + name: exampleFramework + region: us-west-1 + tags: + Name: Example Framework diff --git a/examples-generated/backup/v1beta2/plan.yaml b/examples-generated/backup/v1beta2/plan.yaml new file mode 100644 index 0000000000..e6e30ab602 --- /dev/null +++ b/examples-generated/backup/v1beta2/plan.yaml @@ -0,0 +1,24 @@ +apiVersion: backup.aws.upbound.io/v1beta2 +kind: Plan +metadata: + annotations: + meta.upbound.io/example-id: backup/v1beta2/plan + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + advancedBackupSetting: + - backupOptions: + WindowsVSS: enabled + resourceType: EC2 + name: tf_example_backup_plan + region: us-west-1 + rule: + - lifecycle: + - deleteAfter: 14 + ruleName: tf_example_backup_rule + schedule: cron(0 12 * * ? *) + targetVaultNameSelector: + matchLabels: + testing.upbound.io/example-name: test diff --git a/examples-generated/backup/v1beta2/reportplan.yaml b/examples-generated/backup/v1beta2/reportplan.yaml new file mode 100644 index 0000000000..b5ff488284 --- /dev/null +++ b/examples-generated/backup/v1beta2/reportplan.yaml @@ -0,0 +1,22 @@ +apiVersion: backup.aws.upbound.io/v1beta2 +kind: ReportPlan +metadata: + annotations: + meta.upbound.io/example-id: backup/v1beta2/reportplan + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: example description + name: example_name + region: us-west-1 + reportDeliveryChannel: + - formats: + - CSV + - JSON + s3BucketName: example-bucket-name + reportSetting: + - reportTemplate: RESTORE_JOB_REPORT + tags: + Name: Example Report Plan diff --git a/examples-generated/batch/v1beta2/jobdefinition.yaml b/examples-generated/batch/v1beta2/jobdefinition.yaml new file mode 100644 index 0000000000..0159ba3436 --- /dev/null +++ b/examples-generated/batch/v1beta2/jobdefinition.yaml @@ -0,0 +1,61 @@ +apiVersion: batch.aws.upbound.io/v1beta2 +kind: JobDefinition +metadata: + annotations: + meta.upbound.io/example-id: batch/v1beta2/jobdefinition + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + containerProperties: |- + ${jsonencode({ + command = ["ls", "-la"], + image = "busybox" + + resourceRequirements = [ + { + type = "VCPU" + value = "0.25" + }, + { + type = "MEMORY" + value = "512" + } + ] + + volumes = [ + { + host = { + sourcePath = "/tmp" + } + name = "tmp" + } + ] + + environment = [ + { + name = "VARNAME" + value = "VARVAL" + } + ] + + mountPoints = [ + { + sourceVolume = "tmp" + containerPath = "/tmp" + readOnly = false + } + ] + + ulimits = [ + { + hardLimit = 1024 + name = "nofile" + softLimit = 1024 + } + ] + })} + name: tf_test_batch_job_definition + region: us-west-1 + type: container diff --git a/examples-generated/batch/v1beta2/schedulingpolicy.yaml b/examples-generated/batch/v1beta2/schedulingpolicy.yaml new file mode 100644 index 0000000000..b7463d558b --- /dev/null +++ b/examples-generated/batch/v1beta2/schedulingpolicy.yaml @@ -0,0 +1,21 @@ +apiVersion: batch.aws.upbound.io/v1beta2 +kind: SchedulingPolicy +metadata: + annotations: + meta.upbound.io/example-id: batch/v1beta2/schedulingpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + fairSharePolicy: + - computeReservation: 1 + shareDecaySeconds: 3600 + shareDistribution: + - shareIdentifier: A1* + weightFactor: 0.1 + - shareIdentifier: A2 + weightFactor: 0.2 + region: us-west-1 + tags: + Name: Example Batch Scheduling Policy diff --git a/examples-generated/budgets/v1beta2/budget.yaml b/examples-generated/budgets/v1beta2/budget.yaml new file mode 100644 index 0000000000..998e898f30 --- /dev/null +++ b/examples-generated/budgets/v1beta2/budget.yaml @@ -0,0 +1,31 @@ +apiVersion: budgets.aws.upbound.io/v1beta2 +kind: Budget +metadata: + annotations: + meta.upbound.io/example-id: budgets/v1beta2/budget + labels: + testing.upbound.io/example-name: ec2 + name: ec2 +spec: + forProvider: + budgetType: COST + costFilter: + - name: Service + values: + - Amazon Elastic Compute Cloud - Compute + limitAmount: "1200" + limitUnit: USD + notification: + - comparisonOperator: GREATER_THAN + notificationType: FORECASTED + subscriberEmailAddresses: + - test@example.com + threshold: 100 + thresholdType: PERCENTAGE + region: us-west-1 + tags: + Tag1: Value1 + Tag2: Value2 + timePeriodEnd: 2087-06-15_00:00 + timePeriodStart: 2017-07-01_00:00 + timeUnit: MONTHLY diff --git a/examples-generated/budgets/v1beta2/budgetaction.yaml b/examples-generated/budgets/v1beta2/budgetaction.yaml new file mode 100644 index 0000000000..d36ca43610 --- /dev/null +++ b/examples-generated/budgets/v1beta2/budgetaction.yaml @@ -0,0 +1,84 @@ +apiVersion: budgets.aws.upbound.io/v1beta2 +kind: BudgetAction +metadata: + annotations: + meta.upbound.io/example-id: budgets/v1beta2/budgetaction + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + actionThreshold: + - actionThresholdType: ABSOLUTE_VALUE + actionThresholdValue: 100 + actionType: APPLY_IAM_POLICY + approvalModel: AUTOMATIC + budgetNameSelector: + matchLabels: + testing.upbound.io/example-name: example + definition: + - iamActionDefinition: + - policyArnSelector: + matchLabels: + testing.upbound.io/example-name: example + roles: + - ${aws_iam_role.example.name} + executionRoleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + notificationType: ACTUAL + region: us-west-1 + subscriber: + - address: example@example.example + subscriptionType: EMAIL + tags: + Tag1: Value1 + Tag2: Value2 + +--- + +apiVersion: budgets.aws.upbound.io/v1beta2 +kind: Budget +metadata: + annotations: + meta.upbound.io/example-id: budgets/v1beta2/budgetaction + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + budgetType: USAGE + limitAmount: "10.0" + limitUnit: dollars + region: us-west-1 + timePeriodStart: 2006-01-02_15:04 + timeUnit: MONTHLY + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Policy +metadata: + annotations: + meta.upbound.io/example-id: budgets/v1beta2/budgetaction + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: My example policy + policy: ${data.aws_iam_policy_document.example.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: budgets/v1beta2/budgetaction + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.assume_role.json} diff --git a/examples-generated/chime/v1beta2/voiceconnectorstreaming.yaml b/examples-generated/chime/v1beta2/voiceconnectorstreaming.yaml new file mode 100644 index 0000000000..8b4e6f7595 --- /dev/null +++ b/examples-generated/chime/v1beta2/voiceconnectorstreaming.yaml @@ -0,0 +1,33 @@ +apiVersion: chime.aws.upbound.io/v1beta2 +kind: VoiceConnectorStreaming +metadata: + annotations: + meta.upbound.io/example-id: chime/v1beta2/voiceconnectorstreaming + labels: + testing.upbound.io/example-name: default + name: default +spec: + forProvider: + dataRetention: 7 + disabled: false + region: us-west-1 + streamingNotificationTargets: + - SQS + voiceConnectorIdSelector: + matchLabels: + testing.upbound.io/example-name: default + +--- + +apiVersion: chime.aws.upbound.io/v1beta1 +kind: VoiceConnector +metadata: + annotations: + meta.upbound.io/example-id: chime/v1beta2/voiceconnectorstreaming + labels: + testing.upbound.io/example-name: default + name: default +spec: + forProvider: + region: us-west-1 + requireEncryption: true diff --git a/examples-generated/cloudformation/v1beta2/stackset.yaml b/examples-generated/cloudformation/v1beta2/stackset.yaml new file mode 100644 index 0000000000..0f31aed3ec --- /dev/null +++ b/examples-generated/cloudformation/v1beta2/stackset.yaml @@ -0,0 +1,73 @@ +apiVersion: cloudformation.aws.upbound.io/v1beta2 +kind: StackSet +metadata: + annotations: + meta.upbound.io/example-id: cloudformation/v1beta2/stackset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + administrationRoleArnSelector: + matchLabels: + testing.upbound.io/example-name: AWSCloudFormationStackSetAdministrationRole + parameters: + VPCCidr: 10.0.0.0/16 + region: us-west-1 + templateBody: |- + ${jsonencode({ + Parameters = { + VPCCidr = { + Type = "String" + Default = "10.0.0.0/16" + Description = "Enter the CIDR block for the VPC. Default is 10.0.0.0/16." + } + } + Resources = { + myVpc = { + Type = "AWS::EC2::VPC" + Properties = { + CidrBlock = { + Ref = "VPCCidr" + } + Tags = [ + { + Key = "Name" + Value = "Primary_CF_VPC" + } + ] + } + } + } + })} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: cloudformation/v1beta2/stackset + labels: + testing.upbound.io/example-name: AWSCloudFormationStackSetAdministrationRole + name: awscloudformationstacksetadministrationrole +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.AWSCloudFormationStackSetAdministrationRole_assume_role_policy.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicy +metadata: + annotations: + meta.upbound.io/example-id: cloudformation/v1beta2/stackset + labels: + testing.upbound.io/example-name: AWSCloudFormationStackSetAdministrationRole_ExecutionPolicy + name: awscloudformationstacksetadministrationrole-executionpolicy +spec: + forProvider: + policy: ${data.aws_iam_policy_document.AWSCloudFormationStackSetAdministrationRole_ExecutionPolicy.json} + roleSelector: + matchLabels: + testing.upbound.io/example-name: AWSCloudFormationStackSetAdministrationRole diff --git a/examples-generated/cloudformation/v1beta2/stacksetinstance.yaml b/examples-generated/cloudformation/v1beta2/stacksetinstance.yaml new file mode 100644 index 0000000000..c835dd11c5 --- /dev/null +++ b/examples-generated/cloudformation/v1beta2/stacksetinstance.yaml @@ -0,0 +1,15 @@ +apiVersion: cloudformation.aws.upbound.io/v1beta2 +kind: StackSetInstance +metadata: + annotations: + meta.upbound.io/example-id: cloudformation/v1beta2/stacksetinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountId: "123456789012" + region: us-west-1 + stackSetNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/cloudfront/v1beta2/cachepolicy.yaml b/examples-generated/cloudfront/v1beta2/cachepolicy.yaml new file mode 100644 index 0000000000..c2a5b7e2fc --- /dev/null +++ b/examples-generated/cloudfront/v1beta2/cachepolicy.yaml @@ -0,0 +1,32 @@ +apiVersion: cloudfront.aws.upbound.io/v1beta2 +kind: CachePolicy +metadata: + annotations: + meta.upbound.io/example-id: cloudfront/v1beta2/cachepolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + comment: test comment + defaultTtl: 50 + maxTtl: 100 + minTtl: 1 + name: example-policy + parametersInCacheKeyAndForwardedToOrigin: + - cookiesConfig: + - cookieBehavior: whitelist + cookies: + - items: + - example + headersConfig: + - headerBehavior: whitelist + headers: + - items: + - example + queryStringsConfig: + - queryStringBehavior: whitelist + queryStrings: + - items: + - example + region: us-west-1 diff --git a/examples-generated/cloudfront/v1beta2/distribution.yaml b/examples-generated/cloudfront/v1beta2/distribution.yaml new file mode 100644 index 0000000000..f84033bc71 --- /dev/null +++ b/examples-generated/cloudfront/v1beta2/distribution.yaml @@ -0,0 +1,137 @@ +apiVersion: cloudfront.aws.upbound.io/v1beta2 +kind: Distribution +metadata: + annotations: + meta.upbound.io/example-id: cloudfront/v1beta2/distribution + labels: + testing.upbound.io/example-name: s3_distribution + name: s3-distribution +spec: + forProvider: + aliases: + - mysite.example.com + - yoursite.example.com + comment: Some comment + defaultCacheBehavior: + - allowedMethods: + - DELETE + - GET + - HEAD + - OPTIONS + - PATCH + - POST + - PUT + cachedMethods: + - GET + - HEAD + defaultTtl: 3600 + forwardedValues: + - cookies: + - forward: none + queryString: false + maxTtl: 86400 + minTtl: 0 + targetOriginId: ${local.s3_origin_id} + viewerProtocolPolicy: allow-all + defaultRootObject: index.html + enabled: true + isIpv6Enabled: true + loggingConfig: + - bucket: mylogs.s3.amazonaws.com + includeCookies: false + prefix: myprefix + orderedCacheBehavior: + - allowedMethods: + - GET + - HEAD + - OPTIONS + cachedMethods: + - GET + - HEAD + - OPTIONS + compress: true + defaultTtl: 86400 + forwardedValues: + - cookies: + - forward: none + headers: + - Origin + queryString: false + maxTtl: 31536000 + minTtl: 0 + pathPattern: /content/immutable/* + targetOriginId: ${local.s3_origin_id} + viewerProtocolPolicy: redirect-to-https + - allowedMethods: + - GET + - HEAD + - OPTIONS + cachedMethods: + - GET + - HEAD + compress: true + defaultTtl: 3600 + forwardedValues: + - cookies: + - forward: none + queryString: false + maxTtl: 86400 + minTtl: 0 + pathPattern: /content/* + targetOriginId: ${local.s3_origin_id} + viewerProtocolPolicy: redirect-to-https + origin: + - domainName: ${aws_s3_bucket.b.bucket_regional_domain_name} + originAccessControlIdSelector: + matchLabels: + testing.upbound.io/example-name: default + originId: ${local.s3_origin_id} + priceClass: PriceClass_200 + region: us-west-1 + restrictions: + - geoRestriction: + - locations: + - US + - CA + - GB + - DE + restrictionType: whitelist + tags: + Environment: production + viewerCertificate: + - cloudfrontDefaultCertificate: true + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: cloudfront/v1beta2/distribution + labels: + testing.upbound.io/example-name: b + name: b +spec: + forProvider: + region: us-west-1 + tags: + Name: My bucket + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketACL +metadata: + annotations: + meta.upbound.io/example-id: cloudfront/v1beta2/distribution + labels: + testing.upbound.io/example-name: b_acl + name: b-acl +spec: + forProvider: + acl: private + bucketSelector: + matchLabels: + testing.upbound.io/example-name: b + region: us-west-1 diff --git a/examples-generated/cloudfront/v1beta2/fieldlevelencryptionconfig.yaml b/examples-generated/cloudfront/v1beta2/fieldlevelencryptionconfig.yaml new file mode 100644 index 0000000000..1931683951 --- /dev/null +++ b/examples-generated/cloudfront/v1beta2/fieldlevelencryptionconfig.yaml @@ -0,0 +1,26 @@ +apiVersion: cloudfront.aws.upbound.io/v1beta2 +kind: FieldLevelEncryptionConfig +metadata: + annotations: + meta.upbound.io/example-id: cloudfront/v1beta2/fieldlevelencryptionconfig + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + comment: test comment + contentTypeProfileConfig: + - contentTypeProfiles: + - items: + - contentType: application/x-www-form-urlencoded + format: URLEncoded + forwardWhenContentTypeIsUnknown: true + queryArgProfileConfig: + - forwardWhenQueryArgProfileIsUnknown: true + queryArgProfiles: + - items: + - profileIdSelector: + matchLabels: + testing.upbound.io/example-name: test + queryArg: Arg1 + region: us-west-1 diff --git a/examples-generated/cloudfront/v1beta2/fieldlevelencryptionprofile.yaml b/examples-generated/cloudfront/v1beta2/fieldlevelencryptionprofile.yaml new file mode 100644 index 0000000000..e4001c67f9 --- /dev/null +++ b/examples-generated/cloudfront/v1beta2/fieldlevelencryptionprofile.yaml @@ -0,0 +1,42 @@ +apiVersion: cloudfront.aws.upbound.io/v1beta2 +kind: FieldLevelEncryptionProfile +metadata: + annotations: + meta.upbound.io/example-id: cloudfront/v1beta2/fieldlevelencryptionprofile + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + comment: test comment + encryptionEntities: + - items: + - fieldPatterns: + - items: + - DateOfBirth + providerId: test provider + publicKeyIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: test profile + region: us-west-1 + +--- + +apiVersion: cloudfront.aws.upbound.io/v1beta1 +kind: PublicKey +metadata: + annotations: + meta.upbound.io/example-id: cloudfront/v1beta2/fieldlevelencryptionprofile + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + comment: test public key + encodedKeySecretRef: + key: attribute.public_key.pem + name: example-secret + namespace: upbound-system + name: test_key + region: us-west-1 diff --git a/examples-generated/cloudfront/v1beta2/monitoringsubscription.yaml b/examples-generated/cloudfront/v1beta2/monitoringsubscription.yaml new file mode 100644 index 0000000000..59e97daf86 --- /dev/null +++ b/examples-generated/cloudfront/v1beta2/monitoringsubscription.yaml @@ -0,0 +1,17 @@ +apiVersion: cloudfront.aws.upbound.io/v1beta2 +kind: MonitoringSubscription +metadata: + annotations: + meta.upbound.io/example-id: cloudfront/v1beta2/monitoringsubscription + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + distributionIdSelector: + matchLabels: + testing.upbound.io/example-name: example + monitoringSubscription: + - realtimeMetricsSubscriptionConfig: + - realtimeMetricsSubscriptionStatus: Enabled + region: us-west-1 diff --git a/examples-generated/cloudfront/v1beta2/originrequestpolicy.yaml b/examples-generated/cloudfront/v1beta2/originrequestpolicy.yaml new file mode 100644 index 0000000000..4a9cb9ff2c --- /dev/null +++ b/examples-generated/cloudfront/v1beta2/originrequestpolicy.yaml @@ -0,0 +1,27 @@ +apiVersion: cloudfront.aws.upbound.io/v1beta2 +kind: OriginRequestPolicy +metadata: + annotations: + meta.upbound.io/example-id: cloudfront/v1beta2/originrequestpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + comment: example comment + cookiesConfig: + - cookieBehavior: whitelist + cookies: + - items: + - example + headersConfig: + - headerBehavior: whitelist + headers: + - items: + - example + queryStringsConfig: + - queryStringBehavior: whitelist + queryStrings: + - items: + - example + region: us-west-1 diff --git a/examples-generated/cloudfront/v1beta2/realtimelogconfig.yaml b/examples-generated/cloudfront/v1beta2/realtimelogconfig.yaml new file mode 100644 index 0000000000..685649702c --- /dev/null +++ b/examples-generated/cloudfront/v1beta2/realtimelogconfig.yaml @@ -0,0 +1,56 @@ +apiVersion: cloudfront.aws.upbound.io/v1beta2 +kind: RealtimeLogConfig +metadata: + annotations: + meta.upbound.io/example-id: cloudfront/v1beta2/realtimelogconfig + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + endpoint: + - kinesisStreamConfig: + - roleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + streamArnSelector: + matchLabels: + testing.upbound.io/example-name: example + streamType: Kinesis + fields: + - timestamp + - c-ip + name: example + region: us-west-1 + samplingRate: 75 + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: cloudfront/v1beta2/realtimelogconfig + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.assume_role.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicy +metadata: + annotations: + meta.upbound.io/example-id: cloudfront/v1beta2/realtimelogconfig + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + policy: ${data.aws_iam_policy_document.example.json} + roleSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/cloudfront/v1beta2/responseheaderspolicy.yaml b/examples-generated/cloudfront/v1beta2/responseheaderspolicy.yaml new file mode 100644 index 0000000000..05f4d8a26b --- /dev/null +++ b/examples-generated/cloudfront/v1beta2/responseheaderspolicy.yaml @@ -0,0 +1,25 @@ +apiVersion: cloudfront.aws.upbound.io/v1beta2 +kind: ResponseHeadersPolicy +metadata: + annotations: + meta.upbound.io/example-id: cloudfront/v1beta2/responseheaderspolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + comment: test comment + corsConfig: + - accessControlAllowCredentials: true + accessControlAllowHeaders: + - items: + - test + accessControlAllowMethods: + - items: + - GET + accessControlAllowOrigins: + - items: + - test.example.comtest + originOverride: true + name: example-policy + region: us-west-1 diff --git a/examples-generated/cloudsearch/v1beta1/domainserviceaccesspolicy.yaml b/examples-generated/cloudsearch/v1beta1/domainserviceaccesspolicy.yaml index 07940ca35e..0f6e1de197 100644 --- a/examples-generated/cloudsearch/v1beta1/domainserviceaccesspolicy.yaml +++ b/examples-generated/cloudsearch/v1beta1/domainserviceaccesspolicy.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: cloudsearch.aws.upbound.io/v1beta1 +apiVersion: cloudsearch.aws.upbound.io/v1beta2 kind: Domain metadata: annotations: diff --git a/examples-generated/cloudsearch/v1beta2/domain.yaml b/examples-generated/cloudsearch/v1beta2/domain.yaml new file mode 100644 index 0000000000..b2cf78e328 --- /dev/null +++ b/examples-generated/cloudsearch/v1beta2/domain.yaml @@ -0,0 +1,28 @@ +apiVersion: cloudsearch.aws.upbound.io/v1beta2 +kind: Domain +metadata: + annotations: + meta.upbound.io/example-id: cloudsearch/v1beta2/domain + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + indexField: + - analysisScheme: _en_default_ + highlight: false + name: headline + return: true + search: true + sort: true + type: text + - facet: true + name: price + return: true + search: true + sort: true + sourceFields: headline + type: double + region: us-west-1 + scalingParameters: + - desiredInstanceType: search.medium diff --git a/examples-generated/cloudtrail/v1beta1/trail.yaml b/examples-generated/cloudtrail/v1beta1/trail.yaml index f88439d4f3..3b10550579 100644 --- a/examples-generated/cloudtrail/v1beta1/trail.yaml +++ b/examples-generated/cloudtrail/v1beta1/trail.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: s3.aws.upbound.io/v1beta1 +apiVersion: s3.aws.upbound.io/v1beta2 kind: Bucket metadata: annotations: diff --git a/examples-generated/cloudwatch/v1beta1/metricstream.yaml b/examples-generated/cloudwatch/v1beta1/metricstream.yaml index dbc4a3e33e..e67acd529c 100644 --- a/examples-generated/cloudwatch/v1beta1/metricstream.yaml +++ b/examples-generated/cloudwatch/v1beta1/metricstream.yaml @@ -89,7 +89,7 @@ spec: --- -apiVersion: firehose.aws.upbound.io/v1beta1 +apiVersion: firehose.aws.upbound.io/v1beta2 kind: DeliveryStream metadata: annotations: @@ -112,7 +112,7 @@ spec: --- -apiVersion: s3.aws.upbound.io/v1beta1 +apiVersion: s3.aws.upbound.io/v1beta2 kind: Bucket metadata: annotations: @@ -127,7 +127,7 @@ spec: --- -apiVersion: s3.aws.upbound.io/v1beta1 +apiVersion: s3.aws.upbound.io/v1beta2 kind: BucketACL metadata: annotations: diff --git a/examples-generated/cloudwatch/v1beta2/compositealarm.yaml b/examples-generated/cloudwatch/v1beta2/compositealarm.yaml new file mode 100644 index 0000000000..e24a8208ac --- /dev/null +++ b/examples-generated/cloudwatch/v1beta2/compositealarm.yaml @@ -0,0 +1,25 @@ +apiVersion: cloudwatch.aws.upbound.io/v1beta2 +kind: CompositeAlarm +metadata: + annotations: + meta.upbound.io/example-id: cloudwatch/v1beta2/compositealarm + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + actionsSuppressor: + - alarm: suppressor-alarm + extensionPeriod: 10 + waitPeriod: 20 + alarmActionsSelector: + matchLabels: + testing.upbound.io/example-name: example + alarmDescription: This is a composite alarm! + alarmRule: | + ALARM(${aws_cloudwatch_metric_alarm.alpha.alarm_name}) OR + ALARM(${aws_cloudwatch_metric_alarm.bravo.alarm_name}) + okActionsSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 diff --git a/examples-generated/cloudwatch/v1beta2/metricalarm.yaml b/examples-generated/cloudwatch/v1beta2/metricalarm.yaml new file mode 100644 index 0000000000..c4bb16dbb7 --- /dev/null +++ b/examples-generated/cloudwatch/v1beta2/metricalarm.yaml @@ -0,0 +1,20 @@ +apiVersion: cloudwatch.aws.upbound.io/v1beta2 +kind: MetricAlarm +metadata: + annotations: + meta.upbound.io/example-id: cloudwatch/v1beta2/metricalarm + labels: + testing.upbound.io/example-name: foobar + name: foobar +spec: + forProvider: + alarmDescription: This metric monitors ec2 cpu utilization + comparisonOperator: GreaterThanOrEqualToThreshold + evaluationPeriods: 2 + insufficientDataActions: [] + metricName: CPUUtilization + namespace: AWS/EC2 + period: 120 + region: us-west-1 + statistic: Average + threshold: 80 diff --git a/examples-generated/cloudwatchevents/v1beta1/rule.yaml b/examples-generated/cloudwatchevents/v1beta1/rule.yaml index b37efaf5b1..6d23cd9ddf 100644 --- a/examples-generated/cloudwatchevents/v1beta1/rule.yaml +++ b/examples-generated/cloudwatchevents/v1beta1/rule.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: cloudwatchevents.aws.upbound.io/v1beta1 +apiVersion: cloudwatchevents.aws.upbound.io/v1beta2 kind: Target metadata: annotations: diff --git a/examples-generated/cloudwatchevents/v1beta2/connection.yaml b/examples-generated/cloudwatchevents/v1beta2/connection.yaml new file mode 100644 index 0000000000..30fd7fab9c --- /dev/null +++ b/examples-generated/cloudwatchevents/v1beta2/connection.yaml @@ -0,0 +1,20 @@ +apiVersion: cloudwatchevents.aws.upbound.io/v1beta2 +kind: Connection +metadata: + annotations: + meta.upbound.io/example-id: cloudwatchevents/v1beta2/connection + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + authParameters: + - apiKey: + - key: x-signature + valueSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + authorizationType: API_KEY + description: A connection description + region: us-west-1 diff --git a/examples-generated/cloudwatchevents/v1beta2/permission.yaml b/examples-generated/cloudwatchevents/v1beta2/permission.yaml new file mode 100644 index 0000000000..6786a640cc --- /dev/null +++ b/examples-generated/cloudwatchevents/v1beta2/permission.yaml @@ -0,0 +1,13 @@ +apiVersion: cloudwatchevents.aws.upbound.io/v1beta2 +kind: Permission +metadata: + annotations: + meta.upbound.io/example-id: cloudwatchevents/v1beta2/permission + labels: + testing.upbound.io/example-name: DevAccountAccess + name: devaccountaccess +spec: + forProvider: + principal: "123456789012" + region: us-west-1 + statementId: DevAccountAccess diff --git a/examples-generated/cloudwatchevents/v1beta2/target.yaml b/examples-generated/cloudwatchevents/v1beta2/target.yaml new file mode 100644 index 0000000000..e49a785155 --- /dev/null +++ b/examples-generated/cloudwatchevents/v1beta2/target.yaml @@ -0,0 +1,66 @@ +apiVersion: cloudwatchevents.aws.upbound.io/v1beta2 +kind: Target +metadata: + annotations: + meta.upbound.io/example-id: cloudwatchevents/v1beta2/target + labels: + testing.upbound.io/example-name: yada + name: yada +spec: + forProvider: + arn: ${aws_kinesis_stream.test_stream.arn} + region: us-west-1 + ruleSelector: + matchLabels: + testing.upbound.io/example-name: console + runCommandTargets: + - key: tag:Name + values: + - FooBar + - key: InstanceIds + values: + - i-162058cd308bffec2 + targetId: Yada + +--- + +apiVersion: cloudwatchevents.aws.upbound.io/v1beta1 +kind: Rule +metadata: + annotations: + meta.upbound.io/example-id: cloudwatchevents/v1beta2/target + labels: + testing.upbound.io/example-name: console + name: console +spec: + forProvider: + description: Capture all EC2 scaling events + eventPattern: |- + ${jsonencode({ + source = [ + "aws.autoscaling" + ] + + detail-type = [ + "EC2 Instance Launch Successful", + "EC2 Instance Terminate Successful", + "EC2 Instance Launch Unsuccessful", + "EC2 Instance Terminate Unsuccessful" + ] + })} + region: us-west-1 + +--- + +apiVersion: kinesis.aws.upbound.io/v1beta2 +kind: Stream +metadata: + annotations: + meta.upbound.io/example-id: cloudwatchevents/v1beta2/target + labels: + testing.upbound.io/example-name: test_stream + name: test-stream +spec: + forProvider: + region: us-west-1 + shardCount: 1 diff --git a/examples-generated/cloudwatchlogs/v1beta2/metricfilter.yaml b/examples-generated/cloudwatchlogs/v1beta2/metricfilter.yaml new file mode 100644 index 0000000000..25b6b2df23 --- /dev/null +++ b/examples-generated/cloudwatchlogs/v1beta2/metricfilter.yaml @@ -0,0 +1,33 @@ +apiVersion: cloudwatchlogs.aws.upbound.io/v1beta2 +kind: MetricFilter +metadata: + annotations: + meta.upbound.io/example-id: cloudwatchlogs/v1beta2/metricfilter + labels: + testing.upbound.io/example-name: yada + name: yada +spec: + forProvider: + logGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: dada + metricTransformation: + - name: EventCount + namespace: YourNamespace + value: "1" + pattern: "" + region: us-west-1 + +--- + +apiVersion: cloudwatchlogs.aws.upbound.io/v1beta1 +kind: Group +metadata: + annotations: + meta.upbound.io/example-id: cloudwatchlogs/v1beta2/metricfilter + labels: + testing.upbound.io/example-name: dada + name: dada +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/codepipeline/v1beta2/codepipeline.yaml b/examples-generated/codepipeline/v1beta2/codepipeline.yaml new file mode 100644 index 0000000000..33ff104cd8 --- /dev/null +++ b/examples-generated/codepipeline/v1beta2/codepipeline.yaml @@ -0,0 +1,147 @@ +apiVersion: codepipeline.aws.upbound.io/v1beta2 +kind: Codepipeline +metadata: + annotations: + meta.upbound.io/example-id: codepipeline/v1beta2/codepipeline + labels: + testing.upbound.io/example-name: codepipeline + name: codepipeline +spec: + forProvider: + artifactStore: + - encryptionKey: + - id: ${data.aws_kms_alias.s3kmskey.arn} + type: KMS + locationSelector: + matchLabels: + testing.upbound.io/example-name: codepipeline_bucket + type: S3 + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: codepipeline_role + stage: + - action: + - category: Source + configuration: + BranchName: main + ConnectionArn: ${aws_codestarconnections_connection.example.arn} + FullRepositoryId: my-organization/example + name: Source + outputArtifacts: + - source_output + owner: AWS + provider: CodeStarSourceConnection + version: "1" + name: Source + - action: + - category: Build + configuration: + ProjectName: test + inputArtifacts: + - source_output + name: Build + outputArtifacts: + - build_output + owner: AWS + provider: CodeBuild + version: "1" + name: Build + - action: + - category: Deploy + configuration: + ActionMode: REPLACE_ON_FAILURE + Capabilities: CAPABILITY_AUTO_EXPAND,CAPABILITY_IAM + OutputFileName: CreateStackOutput.json + StackName: MyStack + TemplatePath: build_output::sam-templated.yaml + inputArtifacts: + - build_output + name: Deploy + owner: AWS + provider: CloudFormation + version: "1" + name: Deploy + +--- + +apiVersion: codestarconnections.aws.upbound.io/v1beta1 +kind: Connection +metadata: + annotations: + meta.upbound.io/example-id: codepipeline/v1beta2/codepipeline + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: example-connection + providerType: GitHub + region: us-west-1 + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: codepipeline/v1beta2/codepipeline + labels: + testing.upbound.io/example-name: codepipeline_role + name: codepipeline-role +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.assume_role.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicy +metadata: + annotations: + meta.upbound.io/example-id: codepipeline/v1beta2/codepipeline + labels: + testing.upbound.io/example-name: codepipeline_policy + name: codepipeline-policy +spec: + forProvider: + policy: ${data.aws_iam_policy_document.codepipeline_policy.json} + roleSelector: + matchLabels: + testing.upbound.io/example-name: codepipeline_role + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: codepipeline/v1beta2/codepipeline + labels: + testing.upbound.io/example-name: codepipeline_bucket + name: codepipeline-bucket +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta1 +kind: BucketPublicAccessBlock +metadata: + annotations: + meta.upbound.io/example-id: codepipeline/v1beta2/codepipeline + labels: + testing.upbound.io/example-name: codepipeline_bucket_pab + name: codepipeline-bucket-pab +spec: + forProvider: + blockPublicAcls: true + blockPublicPolicy: true + bucketSelector: + matchLabels: + testing.upbound.io/example-name: codepipeline_bucket + ignorePublicAcls: true + region: us-west-1 + restrictPublicBuckets: true diff --git a/examples-generated/codepipeline/v1beta2/customactiontype.yaml b/examples-generated/codepipeline/v1beta2/customactiontype.yaml new file mode 100644 index 0000000000..840b1275c3 --- /dev/null +++ b/examples-generated/codepipeline/v1beta2/customactiontype.yaml @@ -0,0 +1,20 @@ +apiVersion: codepipeline.aws.upbound.io/v1beta2 +kind: CustomActionType +metadata: + annotations: + meta.upbound.io/example-id: codepipeline/v1beta2/customactiontype + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + category: Build + inputArtifactDetails: + - maximumCount: 1 + minimumCount: 0 + outputArtifactDetails: + - maximumCount: 1 + minimumCount: 0 + providerName: example + region: us-west-1 + version: "1" diff --git a/examples-generated/codepipeline/v1beta2/webhook.yaml b/examples-generated/codepipeline/v1beta2/webhook.yaml new file mode 100644 index 0000000000..cdac26bb1e --- /dev/null +++ b/examples-generated/codepipeline/v1beta2/webhook.yaml @@ -0,0 +1,74 @@ +apiVersion: codepipeline.aws.upbound.io/v1beta2 +kind: Webhook +metadata: + annotations: + meta.upbound.io/example-id: codepipeline/v1beta2/webhook + labels: + testing.upbound.io/example-name: bar + name: bar +spec: + forProvider: + authentication: GITHUB_HMAC + authenticationConfiguration: + - secretTokenSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + filter: + - jsonPath: $.ref + matchEquals: refs/heads/{Branch} + region: us-west-1 + targetAction: Source + targetPipelineSelector: + matchLabels: + testing.upbound.io/example-name: bar + +--- + +apiVersion: codepipeline.aws.upbound.io/v1beta2 +kind: Codepipeline +metadata: + annotations: + meta.upbound.io/example-id: codepipeline/v1beta2/webhook + labels: + testing.upbound.io/example-name: bar + name: bar +spec: + forProvider: + artifactStore: + - encryptionKey: + - id: ${data.aws_kms_alias.s3kmskey.arn} + type: KMS + locationSelector: + matchLabels: + testing.upbound.io/example-name: bar + type: S3 + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: bar + stage: + - action: + - category: Source + configuration: + Branch: master + Owner: my-organization + Repo: test + name: Source + outputArtifacts: + - test + owner: ThirdParty + provider: GitHub + version: "1" + name: Source + - action: + - category: Build + configuration: + ProjectName: test + inputArtifacts: + - test + name: Build + owner: AWS + provider: CodeBuild + version: "1" + name: Build diff --git a/examples-generated/codestarconnections/v1beta1/connection.yaml b/examples-generated/codestarconnections/v1beta1/connection.yaml index 8128a97d22..10a4217d4b 100644 --- a/examples-generated/codestarconnections/v1beta1/connection.yaml +++ b/examples-generated/codestarconnections/v1beta1/connection.yaml @@ -14,7 +14,7 @@ spec: --- -apiVersion: codepipeline.aws.upbound.io/v1beta1 +apiVersion: codepipeline.aws.upbound.io/v1beta2 kind: Codepipeline metadata: annotations: diff --git a/examples-generated/codestarconnections/v1beta2/host.yaml b/examples-generated/codestarconnections/v1beta2/host.yaml new file mode 100644 index 0000000000..357c91d2f3 --- /dev/null +++ b/examples-generated/codestarconnections/v1beta2/host.yaml @@ -0,0 +1,14 @@ +apiVersion: codestarconnections.aws.upbound.io/v1beta2 +kind: Host +metadata: + annotations: + meta.upbound.io/example-id: codestarconnections/v1beta2/host + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: example-host + providerEndpoint: https://example.com + providerType: GitHubEnterpriseServer + region: us-west-1 diff --git a/examples-generated/cognitoidentity/v1beta1/cognitoidentitypoolproviderprincipaltag.yaml b/examples-generated/cognitoidentity/v1beta1/cognitoidentitypoolproviderprincipaltag.yaml index 5a12150459..f0a8189698 100644 --- a/examples-generated/cognitoidentity/v1beta1/cognitoidentitypoolproviderprincipaltag.yaml +++ b/examples-generated/cognitoidentity/v1beta1/cognitoidentitypoolproviderprincipaltag.yaml @@ -43,7 +43,7 @@ spec: --- -apiVersion: cognitoidp.aws.upbound.io/v1beta1 +apiVersion: cognitoidp.aws.upbound.io/v1beta2 kind: UserPool metadata: annotations: diff --git a/examples-generated/cognitoidp/v1beta1/identityprovider.yaml b/examples-generated/cognitoidp/v1beta1/identityprovider.yaml index 13e6b6018a..00700038d4 100644 --- a/examples-generated/cognitoidp/v1beta1/identityprovider.yaml +++ b/examples-generated/cognitoidp/v1beta1/identityprovider.yaml @@ -24,7 +24,7 @@ spec: --- -apiVersion: cognitoidp.aws.upbound.io/v1beta1 +apiVersion: cognitoidp.aws.upbound.io/v1beta2 kind: UserPool metadata: annotations: diff --git a/examples-generated/cognitoidp/v1beta1/resourceserver.yaml b/examples-generated/cognitoidp/v1beta1/resourceserver.yaml index 8e9a06c278..6ee2b65af4 100644 --- a/examples-generated/cognitoidp/v1beta1/resourceserver.yaml +++ b/examples-generated/cognitoidp/v1beta1/resourceserver.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: cognitoidp.aws.upbound.io/v1beta1 +apiVersion: cognitoidp.aws.upbound.io/v1beta2 kind: UserPool metadata: annotations: diff --git a/examples-generated/cognitoidp/v1beta1/user.yaml b/examples-generated/cognitoidp/v1beta1/user.yaml index 9e5d5a3ce5..d02792751e 100644 --- a/examples-generated/cognitoidp/v1beta1/user.yaml +++ b/examples-generated/cognitoidp/v1beta1/user.yaml @@ -15,7 +15,7 @@ spec: --- -apiVersion: cognitoidp.aws.upbound.io/v1beta1 +apiVersion: cognitoidp.aws.upbound.io/v1beta2 kind: UserPool metadata: annotations: diff --git a/examples-generated/cognitoidp/v1beta1/usergroup.yaml b/examples-generated/cognitoidp/v1beta1/usergroup.yaml index fcf95fb279..cfcbaab68a 100644 --- a/examples-generated/cognitoidp/v1beta1/usergroup.yaml +++ b/examples-generated/cognitoidp/v1beta1/usergroup.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: cognitoidp.aws.upbound.io/v1beta1 +apiVersion: cognitoidp.aws.upbound.io/v1beta2 kind: UserPool metadata: annotations: diff --git a/examples-generated/cognitoidp/v1beta1/useringroup.yaml b/examples-generated/cognitoidp/v1beta1/useringroup.yaml index 324da642b6..371bcd7947 100644 --- a/examples-generated/cognitoidp/v1beta1/useringroup.yaml +++ b/examples-generated/cognitoidp/v1beta1/useringroup.yaml @@ -56,7 +56,7 @@ spec: --- -apiVersion: cognitoidp.aws.upbound.io/v1beta1 +apiVersion: cognitoidp.aws.upbound.io/v1beta2 kind: UserPool metadata: annotations: diff --git a/examples-generated/cognitoidp/v1beta1/userpoolclient.yaml b/examples-generated/cognitoidp/v1beta1/userpoolclient.yaml index c1e16781f0..44a3d27a1f 100644 --- a/examples-generated/cognitoidp/v1beta1/userpoolclient.yaml +++ b/examples-generated/cognitoidp/v1beta1/userpoolclient.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: cognitoidp.aws.upbound.io/v1beta1 +apiVersion: cognitoidp.aws.upbound.io/v1beta2 kind: UserPool metadata: annotations: diff --git a/examples-generated/cognitoidp/v1beta1/userpooldomain.yaml b/examples-generated/cognitoidp/v1beta1/userpooldomain.yaml index 7aacfb78d4..004a9a6415 100644 --- a/examples-generated/cognitoidp/v1beta1/userpooldomain.yaml +++ b/examples-generated/cognitoidp/v1beta1/userpooldomain.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: cognitoidp.aws.upbound.io/v1beta1 +apiVersion: cognitoidp.aws.upbound.io/v1beta2 kind: UserPool metadata: annotations: diff --git a/examples-generated/cognitoidp/v1beta1/userpooluicustomization.yaml b/examples-generated/cognitoidp/v1beta1/userpooluicustomization.yaml index 779ddea8ea..9321b0e154 100644 --- a/examples-generated/cognitoidp/v1beta1/userpooluicustomization.yaml +++ b/examples-generated/cognitoidp/v1beta1/userpooluicustomization.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: cognitoidp.aws.upbound.io/v1beta1 +apiVersion: cognitoidp.aws.upbound.io/v1beta2 kind: UserPool metadata: annotations: diff --git a/examples-generated/cognitoidp/v1beta2/riskconfiguration.yaml b/examples-generated/cognitoidp/v1beta2/riskconfiguration.yaml new file mode 100644 index 0000000000..da2c9c0c36 --- /dev/null +++ b/examples-generated/cognitoidp/v1beta2/riskconfiguration.yaml @@ -0,0 +1,17 @@ +apiVersion: cognitoidp.aws.upbound.io/v1beta2 +kind: RiskConfiguration +metadata: + annotations: + meta.upbound.io/example-id: cognitoidp/v1beta2/riskconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + riskExceptionConfiguration: + - blockedIpRangeList: + - 10.10.10.10/32 + userPoolIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/cognitoidp/v1beta2/userpool.yaml b/examples-generated/cognitoidp/v1beta2/userpool.yaml new file mode 100644 index 0000000000..2a82c6bc92 --- /dev/null +++ b/examples-generated/cognitoidp/v1beta2/userpool.yaml @@ -0,0 +1,12 @@ +apiVersion: cognitoidp.aws.upbound.io/v1beta2 +kind: UserPool +metadata: + annotations: + meta.upbound.io/example-id: cognitoidp/v1beta2/userpool + labels: + testing.upbound.io/example-name: pool + name: pool +spec: + forProvider: + name: mypool + region: us-west-1 diff --git a/examples-generated/configservice/v1beta1/awsconfigurationrecorderstatus.yaml b/examples-generated/configservice/v1beta1/awsconfigurationrecorderstatus.yaml index d033a08008..4a39b06f79 100644 --- a/examples-generated/configservice/v1beta1/awsconfigurationrecorderstatus.yaml +++ b/examples-generated/configservice/v1beta1/awsconfigurationrecorderstatus.yaml @@ -13,7 +13,7 @@ spec: --- -apiVersion: configservice.aws.upbound.io/v1beta1 +apiVersion: configservice.aws.upbound.io/v1beta2 kind: ConfigurationRecorder metadata: annotations: @@ -30,7 +30,7 @@ spec: --- -apiVersion: configservice.aws.upbound.io/v1beta1 +apiVersion: configservice.aws.upbound.io/v1beta2 kind: DeliveryChannel metadata: annotations: @@ -97,7 +97,7 @@ spec: --- -apiVersion: s3.aws.upbound.io/v1beta1 +apiVersion: s3.aws.upbound.io/v1beta2 kind: Bucket metadata: annotations: diff --git a/examples-generated/configservice/v1beta2/configrule.yaml b/examples-generated/configservice/v1beta2/configrule.yaml new file mode 100644 index 0000000000..be019c1da5 --- /dev/null +++ b/examples-generated/configservice/v1beta2/configrule.yaml @@ -0,0 +1,64 @@ +apiVersion: configservice.aws.upbound.io/v1beta2 +kind: ConfigRule +metadata: + annotations: + meta.upbound.io/example-id: configservice/v1beta2/configrule + labels: + testing.upbound.io/example-name: r + name: r +spec: + forProvider: + region: us-west-1 + source: + - owner: AWS + sourceIdentifierSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: configservice.aws.upbound.io/v1beta2 +kind: ConfigurationRecorder +metadata: + annotations: + meta.upbound.io/example-id: configservice/v1beta2/configrule + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: r + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: configservice/v1beta2/configrule + labels: + testing.upbound.io/example-name: r + name: r +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.assume_role.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicy +metadata: + annotations: + meta.upbound.io/example-id: configservice/v1beta2/configrule + labels: + testing.upbound.io/example-name: p + name: p +spec: + forProvider: + policy: ${data.aws_iam_policy_document.p.json} + roleSelector: + matchLabels: + testing.upbound.io/example-name: r diff --git a/examples-generated/configservice/v1beta2/configurationaggregator.yaml b/examples-generated/configservice/v1beta2/configurationaggregator.yaml new file mode 100644 index 0000000000..1b854eb5ed --- /dev/null +++ b/examples-generated/configservice/v1beta2/configurationaggregator.yaml @@ -0,0 +1,16 @@ +apiVersion: configservice.aws.upbound.io/v1beta2 +kind: ConfigurationAggregator +metadata: + annotations: + meta.upbound.io/example-id: configservice/v1beta2/configurationaggregator + labels: + testing.upbound.io/example-name: account + name: account +spec: + forProvider: + accountAggregationSource: + - accountIds: + - "123456789012" + regions: + - us-west-2 + region: us-west-1 diff --git a/examples-generated/configservice/v1beta2/configurationrecorder.yaml b/examples-generated/configservice/v1beta2/configurationrecorder.yaml new file mode 100644 index 0000000000..710197728a --- /dev/null +++ b/examples-generated/configservice/v1beta2/configurationrecorder.yaml @@ -0,0 +1,28 @@ +apiVersion: configservice.aws.upbound.io/v1beta2 +kind: ConfigurationRecorder +metadata: + annotations: + meta.upbound.io/example-id: configservice/v1beta2/configurationrecorder + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: r + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: configservice/v1beta2/configurationrecorder + labels: + testing.upbound.io/example-name: r + name: r +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.assume_role.json} diff --git a/examples-generated/configservice/v1beta2/deliverychannel.yaml b/examples-generated/configservice/v1beta2/deliverychannel.yaml new file mode 100644 index 0000000000..1a084341ac --- /dev/null +++ b/examples-generated/configservice/v1beta2/deliverychannel.yaml @@ -0,0 +1,78 @@ +apiVersion: configservice.aws.upbound.io/v1beta2 +kind: DeliveryChannel +metadata: + annotations: + meta.upbound.io/example-id: configservice/v1beta2/deliverychannel + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + region: us-west-1 + s3BucketNameSelector: + matchLabels: + testing.upbound.io/example-name: b + +--- + +apiVersion: configservice.aws.upbound.io/v1beta2 +kind: ConfigurationRecorder +metadata: + annotations: + meta.upbound.io/example-id: configservice/v1beta2/deliverychannel + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: r + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: configservice/v1beta2/deliverychannel + labels: + testing.upbound.io/example-name: r + name: r +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.assume_role.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicy +metadata: + annotations: + meta.upbound.io/example-id: configservice/v1beta2/deliverychannel + labels: + testing.upbound.io/example-name: p + name: p +spec: + forProvider: + policy: ${data.aws_iam_policy_document.p.json} + roleSelector: + matchLabels: + testing.upbound.io/example-name: r + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: configservice/v1beta2/deliverychannel + labels: + testing.upbound.io/example-name: b + name: b +spec: + forProvider: + forceDestroy: true + region: us-west-1 diff --git a/examples-generated/configservice/v1beta2/remediationconfiguration.yaml b/examples-generated/configservice/v1beta2/remediationconfiguration.yaml new file mode 100644 index 0000000000..4216ba1d66 --- /dev/null +++ b/examples-generated/configservice/v1beta2/remediationconfiguration.yaml @@ -0,0 +1,48 @@ +apiVersion: configservice.aws.upbound.io/v1beta2 +kind: RemediationConfiguration +metadata: + annotations: + meta.upbound.io/example-id: configservice/v1beta2/remediationconfiguration + labels: + testing.upbound.io/example-name: this + name: this +spec: + forProvider: + automatic: true + executionControls: + - ssmControls: + - concurrentExecutionRatePercentage: 25 + errorPercentage: 20 + maximumAutomaticAttempts: 10 + parameter: + - name: AutomationAssumeRole + staticValue: arn:aws:iam::875924563244:role/security_config + - name: BucketName + resourceValue: RESOURCE_ID + - name: SSEAlgorithm + staticValue: AES256 + region: us-west-1 + resourceType: AWS::S3::Bucket + retryAttemptSeconds: 600 + targetId: AWS-EnableS3BucketEncryption + targetType: SSM_DOCUMENT + targetVersion: "1" + +--- + +apiVersion: configservice.aws.upbound.io/v1beta2 +kind: ConfigRule +metadata: + annotations: + meta.upbound.io/example-id: configservice/v1beta2/remediationconfiguration + labels: + testing.upbound.io/example-name: this + name: this +spec: + forProvider: + region: us-west-1 + source: + - owner: AWS + sourceIdentifierSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/connect/v1beta2/botassociation.yaml b/examples-generated/connect/v1beta2/botassociation.yaml new file mode 100644 index 0000000000..757774b409 --- /dev/null +++ b/examples-generated/connect/v1beta2/botassociation.yaml @@ -0,0 +1,19 @@ +apiVersion: connect.aws.upbound.io/v1beta2 +kind: BotAssociation +metadata: + annotations: + meta.upbound.io/example-id: connect/v1beta2/botassociation + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + instanceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + lexBot: + - lexRegion: us-west-2 + nameSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 diff --git a/examples-generated/connect/v1beta2/instancestorageconfig.yaml b/examples-generated/connect/v1beta2/instancestorageconfig.yaml new file mode 100644 index 0000000000..f3e09620a6 --- /dev/null +++ b/examples-generated/connect/v1beta2/instancestorageconfig.yaml @@ -0,0 +1,21 @@ +apiVersion: connect.aws.upbound.io/v1beta2 +kind: InstanceStorageConfig +metadata: + annotations: + meta.upbound.io/example-id: connect/v1beta2/instancestorageconfig + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + instanceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + resourceType: CONTACT_TRACE_RECORDS + storageConfig: + - kinesisFirehoseConfig: + - firehoseArnSelector: + matchLabels: + testing.upbound.io/example-name: example + storageType: KINESIS_FIREHOSE diff --git a/examples-generated/connect/v1beta2/quickconnect.yaml b/examples-generated/connect/v1beta2/quickconnect.yaml new file mode 100644 index 0000000000..54e253521b --- /dev/null +++ b/examples-generated/connect/v1beta2/quickconnect.yaml @@ -0,0 +1,22 @@ +apiVersion: connect.aws.upbound.io/v1beta2 +kind: QuickConnect +metadata: + annotations: + meta.upbound.io/example-id: connect/v1beta2/quickconnect + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + description: quick connect phone number + instanceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: Example Name + quickConnectConfig: + - phoneConfig: + - phoneNumber: "+12345678912" + quickConnectType: PHONE_NUMBER + region: us-west-1 + tags: + Name: Example Quick Connect diff --git a/examples-generated/connect/v1beta2/user.yaml b/examples-generated/connect/v1beta2/user.yaml new file mode 100644 index 0000000000..154840c3c4 --- /dev/null +++ b/examples-generated/connect/v1beta2/user.yaml @@ -0,0 +1,30 @@ +apiVersion: connect.aws.upbound.io/v1beta2 +kind: User +metadata: + annotations: + meta.upbound.io/example-id: connect/v1beta2/user + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identityInfo: + - firstName: example + lastName: example2 + instanceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + phoneConfig: + - afterContactWorkTimeLimit: 0 + phoneType: SOFT_PHONE + region: us-west-1 + routingProfileIdSelector: + matchLabels: + testing.upbound.io/example-name: example + securityProfileIds: + - ${aws_connect_security_profile.example.security_profile_id} diff --git a/examples-generated/connect/v1beta2/userhierarchystructure.yaml b/examples-generated/connect/v1beta2/userhierarchystructure.yaml new file mode 100644 index 0000000000..1a29520f93 --- /dev/null +++ b/examples-generated/connect/v1beta2/userhierarchystructure.yaml @@ -0,0 +1,17 @@ +apiVersion: connect.aws.upbound.io/v1beta2 +kind: UserHierarchyStructure +metadata: + annotations: + meta.upbound.io/example-id: connect/v1beta2/userhierarchystructure + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + hierarchyStructure: + - levelOne: + - name: levelone + instanceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 diff --git a/examples-generated/connect/v1beta3/hoursofoperation.yaml b/examples-generated/connect/v1beta3/hoursofoperation.yaml new file mode 100644 index 0000000000..f573d73c86 --- /dev/null +++ b/examples-generated/connect/v1beta3/hoursofoperation.yaml @@ -0,0 +1,34 @@ +apiVersion: connect.aws.upbound.io/v1beta3 +kind: HoursOfOperation +metadata: + annotations: + meta.upbound.io/example-id: connect/v1beta3/hoursofoperation + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + config: + - day: MONDAY + endTime: + - hours: 23 + minutes: 8 + startTime: + - hours: 8 + minutes: 0 + - day: TUESDAY + endTime: + - hours: 21 + minutes: 0 + startTime: + - hours: 9 + minutes: 0 + description: Monday office hours + instanceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: Office Hours + region: us-west-1 + tags: + Name: Example Hours of Operation + timeZone: EST diff --git a/examples-generated/connect/v1beta3/queue.yaml b/examples-generated/connect/v1beta3/queue.yaml new file mode 100644 index 0000000000..3c45ffc425 --- /dev/null +++ b/examples-generated/connect/v1beta3/queue.yaml @@ -0,0 +1,21 @@ +apiVersion: connect.aws.upbound.io/v1beta3 +kind: Queue +metadata: + annotations: + meta.upbound.io/example-id: connect/v1beta3/queue + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + description: Example Description + hoursOfOperationIdSelector: + matchLabels: + testing.upbound.io/example-name: example + instanceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: Example Name + region: us-west-1 + tags: + Name: Example Queue diff --git a/examples-generated/datasync/v1beta2/locations3.yaml b/examples-generated/datasync/v1beta2/locations3.yaml new file mode 100644 index 0000000000..5a6a0a63da --- /dev/null +++ b/examples-generated/datasync/v1beta2/locations3.yaml @@ -0,0 +1,19 @@ +apiVersion: datasync.aws.upbound.io/v1beta2 +kind: LocationS3 +metadata: + annotations: + meta.upbound.io/example-id: datasync/v1beta2/locations3 + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + s3BucketArnSelector: + matchLabels: + testing.upbound.io/example-name: example + s3Config: + - bucketAccessRoleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + subdirectory: /example/prefix diff --git a/examples-generated/datasync/v1beta2/task.yaml b/examples-generated/datasync/v1beta2/task.yaml new file mode 100644 index 0000000000..f50a0a2614 --- /dev/null +++ b/examples-generated/datasync/v1beta2/task.yaml @@ -0,0 +1,20 @@ +apiVersion: datasync.aws.upbound.io/v1beta2 +kind: Task +metadata: + annotations: + meta.upbound.io/example-id: datasync/v1beta2/task + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + destinationLocationArnSelector: + matchLabels: + testing.upbound.io/example-name: destination + name: example + options: + - bytesPerSecond: -1 + region: us-west-1 + sourceLocationArnSelector: + matchLabels: + testing.upbound.io/example-name: source diff --git a/examples-generated/dax/v1beta2/cluster.yaml b/examples-generated/dax/v1beta2/cluster.yaml new file mode 100644 index 0000000000..cfb933c431 --- /dev/null +++ b/examples-generated/dax/v1beta2/cluster.yaml @@ -0,0 +1,16 @@ +apiVersion: dax.aws.upbound.io/v1beta2 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: dax/v1beta2/cluster + labels: + testing.upbound.io/example-name: bar + name: bar +spec: + forProvider: + iamRoleArnSelector: + matchLabels: + testing.upbound.io/example-name: aws_iam_role + nodeType: dax.r4.large + region: us-west-1 + replicationFactor: 1 diff --git a/examples-generated/deploy/v1beta2/deploymentconfig.yaml b/examples-generated/deploy/v1beta2/deploymentconfig.yaml new file mode 100644 index 0000000000..d4b4e0c538 --- /dev/null +++ b/examples-generated/deploy/v1beta2/deploymentconfig.yaml @@ -0,0 +1,54 @@ +apiVersion: deploy.aws.upbound.io/v1beta2 +kind: DeploymentConfig +metadata: + annotations: + meta.upbound.io/example-id: deploy/v1beta2/deploymentconfig + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + minimumHealthyHosts: + - type: HOST_COUNT + value: 2 + region: us-west-1 + +--- + +apiVersion: deploy.aws.upbound.io/v1beta2 +kind: DeploymentGroup +metadata: + annotations: + meta.upbound.io/example-id: deploy/v1beta2/deploymentconfig + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + alarmConfiguration: + - alarms: + - my-alarm-name + enabled: true + appNameSelector: + matchLabels: + testing.upbound.io/example-name: foo_app + autoRollbackConfiguration: + - enabled: true + events: + - DEPLOYMENT_FAILURE + deploymentConfigName: ${aws_codedeploy_deployment_config.foo.id} + ec2TagFilter: + - key: filterkey + type: KEY_AND_VALUE + value: filtervalue + region: us-west-1 + serviceRoleArnSelector: + matchLabels: + testing.upbound.io/example-name: foo_role + triggerConfiguration: + - triggerEvents: + - DeploymentFailure + triggerName: foo-trigger + triggerTargetArnSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/deploy/v1beta2/deploymentgroup.yaml b/examples-generated/deploy/v1beta2/deploymentgroup.yaml new file mode 100644 index 0000000000..a845c27d1d --- /dev/null +++ b/examples-generated/deploy/v1beta2/deploymentgroup.yaml @@ -0,0 +1,102 @@ +apiVersion: deploy.aws.upbound.io/v1beta2 +kind: DeploymentGroup +metadata: + annotations: + meta.upbound.io/example-id: deploy/v1beta2/deploymentgroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + alarmConfiguration: + - alarms: + - my-alarm-name + enabled: true + appNameSelector: + matchLabels: + testing.upbound.io/example-name: example + autoRollbackConfiguration: + - enabled: true + events: + - DEPLOYMENT_FAILURE + ec2TagSet: + - ec2TagFilter: + - key: filterkey1 + type: KEY_AND_VALUE + value: filtervalue + - key: filterkey2 + type: KEY_AND_VALUE + value: filtervalue + outdatedInstancesStrategy: UPDATE + region: us-west-1 + serviceRoleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + triggerConfiguration: + - triggerEvents: + - DeploymentFailure + triggerName: example-trigger + triggerTargetArnSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: deploy.aws.upbound.io/v1beta1 +kind: App +metadata: + annotations: + meta.upbound.io/example-id: deploy/v1beta2/deploymentgroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: deploy/v1beta2/deploymentgroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.assume_role.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicyAttachment +metadata: + annotations: + meta.upbound.io/example-id: deploy/v1beta2/deploymentgroup + labels: + testing.upbound.io/example-name: AWSCodeDeployRole + name: awscodedeployrole +spec: + forProvider: + policyArnSelector: + matchLabels: + testing.upbound.io/example-name: example + roleSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: sns.aws.upbound.io/v1beta1 +kind: Topic +metadata: + annotations: + meta.upbound.io/example-id: deploy/v1beta2/deploymentgroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/devicefarm/v1beta2/testgridproject.yaml b/examples-generated/devicefarm/v1beta2/testgridproject.yaml new file mode 100644 index 0000000000..56abcc2d1f --- /dev/null +++ b/examples-generated/devicefarm/v1beta2/testgridproject.yaml @@ -0,0 +1,22 @@ +apiVersion: devicefarm.aws.upbound.io/v1beta2 +kind: TestGridProject +metadata: + annotations: + meta.upbound.io/example-id: devicefarm/v1beta2/testgridproject + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: example + region: us-west-1 + vpcConfig: + - securityGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example[*] + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example[*] + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/dlm/v1beta2/lifecyclepolicy.yaml b/examples-generated/dlm/v1beta2/lifecyclepolicy.yaml new file mode 100644 index 0000000000..d6ee0b6a8c --- /dev/null +++ b/examples-generated/dlm/v1beta2/lifecyclepolicy.yaml @@ -0,0 +1,64 @@ +apiVersion: dlm.aws.upbound.io/v1beta2 +kind: LifecyclePolicy +metadata: + annotations: + meta.upbound.io/example-id: dlm/v1beta2/lifecyclepolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: example DLM lifecycle policy + executionRoleArnSelector: + matchLabels: + testing.upbound.io/example-name: dlm_lifecycle_role + policyDetails: + - resourceTypes: + - VOLUME + schedule: + - copyTags: false + createRule: + - interval: 24 + intervalUnit: HOURS + times: + - "23:45" + name: 2 weeks of daily snapshots + retainRule: + - count: 14 + tagsToAdd: + SnapshotCreator: DLM + targetTags: + Snapshot: "true" + region: us-west-1 + state: ENABLED + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: dlm/v1beta2/lifecyclepolicy + labels: + testing.upbound.io/example-name: dlm_lifecycle_role + name: dlm-lifecycle-role +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.assume_role.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicy +metadata: + annotations: + meta.upbound.io/example-id: dlm/v1beta2/lifecyclepolicy + labels: + testing.upbound.io/example-name: dlm_lifecycle + name: dlm-lifecycle +spec: + forProvider: + policy: ${data.aws_iam_policy_document.dlm_lifecycle.json} + roleSelector: + matchLabels: + testing.upbound.io/example-name: dlm_lifecycle_role diff --git a/examples-generated/dms/v1beta2/endpoint.yaml b/examples-generated/dms/v1beta2/endpoint.yaml new file mode 100644 index 0000000000..7f5e0687a9 --- /dev/null +++ b/examples-generated/dms/v1beta2/endpoint.yaml @@ -0,0 +1,29 @@ +apiVersion: dms.aws.upbound.io/v1beta2 +kind: Endpoint +metadata: + annotations: + meta.upbound.io/example-id: dms/v1beta2/endpoint + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + certificateArn: arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012 + databaseName: test + endpointType: source + engineName: aurora + extraConnectionAttributes: "" + kmsKeyArnSelector: + matchLabels: + testing.upbound.io/example-name: example + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + port: 3306 + region: us-west-1 + serverName: test + sslMode: none + tags: + Name: test + username: test diff --git a/examples-generated/ds/v1beta2/directory.yaml b/examples-generated/ds/v1beta2/directory.yaml new file mode 100644 index 0000000000..5d051e64bf --- /dev/null +++ b/examples-generated/ds/v1beta2/directory.yaml @@ -0,0 +1,79 @@ +apiVersion: ds.aws.upbound.io/v1beta2 +kind: Directory +metadata: + annotations: + meta.upbound.io/example-id: ds/v1beta2/directory + labels: + testing.upbound.io/example-name: bar + name: bar +spec: + forProvider: + name: corp.notexample.com + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + region: us-west-1 + size: Small + tags: + Project: foo + vpcSettings: + - subnetIdsRefs: + - name: foo + - name: bar + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: main + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: ds/v1beta2/directory + labels: + testing.upbound.io/example-name: bar + name: bar +spec: + forProvider: + availabilityZone: us-west-2b + cidrBlock: 10.0.2.0/24 + region: us-west-1 + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: main + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: ds/v1beta2/directory + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + availabilityZone: us-west-2a + cidrBlock: 10.0.1.0/24 + region: us-west-1 + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: main + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: VPC +metadata: + annotations: + meta.upbound.io/example-id: ds/v1beta2/directory + labels: + testing.upbound.io/example-name: main + name: main +spec: + forProvider: + cidrBlock: 10.0.0.0/16 + region: us-west-1 diff --git a/examples-generated/ds/v1beta2/shareddirectory.yaml b/examples-generated/ds/v1beta2/shareddirectory.yaml new file mode 100644 index 0000000000..81da828147 --- /dev/null +++ b/examples-generated/ds/v1beta2/shareddirectory.yaml @@ -0,0 +1,48 @@ +apiVersion: ds.aws.upbound.io/v1beta2 +kind: SharedDirectory +metadata: + annotations: + meta.upbound.io/example-id: ds/v1beta2/shareddirectory + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + directoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + notesSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + region: us-west-1 + target: + - id: ${data.aws_caller_identity.receiver.account_id} + +--- + +apiVersion: ds.aws.upbound.io/v1beta2 +kind: Directory +metadata: + annotations: + meta.upbound.io/example-id: ds/v1beta2/shareddirectory + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + edition: Standard + name: tf-example + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + region: us-west-1 + type: MicrosoftAD + vpcSettings: + - subnetIdsSelector: + matchLabels: + testing.upbound.io/example-name: example[*] + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/dynamodb/v1beta1/globaltable.yaml b/examples-generated/dynamodb/v1beta1/globaltable.yaml index 0d57e7b212..6441b4737a 100644 --- a/examples-generated/dynamodb/v1beta1/globaltable.yaml +++ b/examples-generated/dynamodb/v1beta1/globaltable.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: dynamodb.aws.upbound.io/v1beta1 +apiVersion: dynamodb.aws.upbound.io/v1beta2 kind: Table metadata: annotations: @@ -39,7 +39,7 @@ spec: --- -apiVersion: dynamodb.aws.upbound.io/v1beta1 +apiVersion: dynamodb.aws.upbound.io/v1beta2 kind: Table metadata: annotations: diff --git a/examples-generated/dynamodb/v1beta1/kinesisstreamingdestination.yaml b/examples-generated/dynamodb/v1beta1/kinesisstreamingdestination.yaml index 58ece4ea1e..71cfb816cc 100644 --- a/examples-generated/dynamodb/v1beta1/kinesisstreamingdestination.yaml +++ b/examples-generated/dynamodb/v1beta1/kinesisstreamingdestination.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: dynamodb.aws.upbound.io/v1beta1 +apiVersion: dynamodb.aws.upbound.io/v1beta2 kind: Table metadata: annotations: @@ -36,7 +36,7 @@ spec: --- -apiVersion: kinesis.aws.upbound.io/v1beta1 +apiVersion: kinesis.aws.upbound.io/v1beta2 kind: Stream metadata: annotations: diff --git a/examples-generated/dynamodb/v1beta1/tableitem.yaml b/examples-generated/dynamodb/v1beta1/tableitem.yaml index be3f96005c..798e84c0d7 100644 --- a/examples-generated/dynamodb/v1beta1/tableitem.yaml +++ b/examples-generated/dynamodb/v1beta1/tableitem.yaml @@ -24,7 +24,7 @@ spec: --- -apiVersion: dynamodb.aws.upbound.io/v1beta1 +apiVersion: dynamodb.aws.upbound.io/v1beta2 kind: Table metadata: annotations: diff --git a/examples-generated/dynamodb/v1beta1/tablereplica.yaml b/examples-generated/dynamodb/v1beta1/tablereplica.yaml index a57569ca7e..7a2ecf134f 100644 --- a/examples-generated/dynamodb/v1beta1/tablereplica.yaml +++ b/examples-generated/dynamodb/v1beta1/tablereplica.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: dynamodb.aws.upbound.io/v1beta1 +apiVersion: dynamodb.aws.upbound.io/v1beta2 kind: Table metadata: annotations: diff --git a/examples-generated/dynamodb/v1beta1/tag.yaml b/examples-generated/dynamodb/v1beta1/tag.yaml index 6a28592997..076ea38b5f 100644 --- a/examples-generated/dynamodb/v1beta1/tag.yaml +++ b/examples-generated/dynamodb/v1beta1/tag.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: dynamodb.aws.upbound.io/v1beta1 +apiVersion: dynamodb.aws.upbound.io/v1beta2 kind: Table metadata: annotations: diff --git a/examples-generated/dynamodb/v1beta2/table.yaml b/examples-generated/dynamodb/v1beta2/table.yaml new file mode 100644 index 0000000000..01635ab9bf --- /dev/null +++ b/examples-generated/dynamodb/v1beta2/table.yaml @@ -0,0 +1,38 @@ +apiVersion: dynamodb.aws.upbound.io/v1beta2 +kind: Table +metadata: + annotations: + meta.upbound.io/example-id: dynamodb/v1beta2/table + labels: + testing.upbound.io/example-name: basic-dynamodb-table + name: basic-dynamodb-table +spec: + forProvider: + attribute: + - name: UserId + type: S + - name: GameTitle + type: S + - name: TopScore + type: "N" + billingMode: PROVISIONED + globalSecondaryIndex: + - hashKey: GameTitle + name: GameTitleIndex + nonKeyAttributes: + - UserId + projectionType: INCLUDE + rangeKey: TopScore + readCapacity: 10 + writeCapacity: 10 + hashKey: UserId + rangeKey: GameTitle + readCapacity: 20 + region: us-west-1 + tags: + Environment: production + Name: dynamodb-table-1 + ttl: + - attributeName: TimeToExist + enabled: false + writeCapacity: 20 diff --git a/examples-generated/ec2/v1beta1/eipassociation.yaml b/examples-generated/ec2/v1beta1/eipassociation.yaml index 8816850fe2..56d9838c41 100644 --- a/examples-generated/ec2/v1beta1/eipassociation.yaml +++ b/examples-generated/ec2/v1beta1/eipassociation.yaml @@ -33,7 +33,7 @@ spec: --- -apiVersion: ec2.aws.upbound.io/v1beta1 +apiVersion: ec2.aws.upbound.io/v1beta2 kind: Instance metadata: annotations: diff --git a/examples-generated/ec2/v1beta1/instancestate.yaml b/examples-generated/ec2/v1beta1/instancestate.yaml index d82ce647df..797f816e6f 100644 --- a/examples-generated/ec2/v1beta1/instancestate.yaml +++ b/examples-generated/ec2/v1beta1/instancestate.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: ec2.aws.upbound.io/v1beta1 +apiVersion: ec2.aws.upbound.io/v1beta2 kind: Instance metadata: annotations: diff --git a/examples-generated/ec2/v1beta1/networkinterfacesgattachment.yaml b/examples-generated/ec2/v1beta1/networkinterfacesgattachment.yaml index 367180139f..0f8fb44793 100644 --- a/examples-generated/ec2/v1beta1/networkinterfacesgattachment.yaml +++ b/examples-generated/ec2/v1beta1/networkinterfacesgattachment.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: ec2.aws.upbound.io/v1beta1 +apiVersion: ec2.aws.upbound.io/v1beta2 kind: Instance metadata: annotations: diff --git a/examples-generated/ec2/v1beta1/spotdatafeedsubscription.yaml b/examples-generated/ec2/v1beta1/spotdatafeedsubscription.yaml index 71ea034204..8c5e2f8064 100644 --- a/examples-generated/ec2/v1beta1/spotdatafeedsubscription.yaml +++ b/examples-generated/ec2/v1beta1/spotdatafeedsubscription.yaml @@ -14,7 +14,7 @@ spec: --- -apiVersion: s3.aws.upbound.io/v1beta1 +apiVersion: s3.aws.upbound.io/v1beta2 kind: Bucket metadata: annotations: diff --git a/examples-generated/ec2/v1beta1/tag.yaml b/examples-generated/ec2/v1beta1/tag.yaml index 6ea564d7f0..db752a82a3 100644 --- a/examples-generated/ec2/v1beta1/tag.yaml +++ b/examples-generated/ec2/v1beta1/tag.yaml @@ -46,7 +46,7 @@ spec: --- -apiVersion: ec2.aws.upbound.io/v1beta1 +apiVersion: ec2.aws.upbound.io/v1beta2 kind: VPNConnection metadata: annotations: diff --git a/examples-generated/ec2/v1beta1/transitgatewaymulticastdomain.yaml b/examples-generated/ec2/v1beta1/transitgatewaymulticastdomain.yaml index 7a53e9985f..769e2a0d26 100644 --- a/examples-generated/ec2/v1beta1/transitgatewaymulticastdomain.yaml +++ b/examples-generated/ec2/v1beta1/transitgatewaymulticastdomain.yaml @@ -210,7 +210,7 @@ spec: --- -apiVersion: ec2.aws.upbound.io/v1beta1 +apiVersion: ec2.aws.upbound.io/v1beta2 kind: Instance metadata: annotations: @@ -229,7 +229,7 @@ spec: --- -apiVersion: ec2.aws.upbound.io/v1beta1 +apiVersion: ec2.aws.upbound.io/v1beta2 kind: Instance metadata: annotations: @@ -248,7 +248,7 @@ spec: --- -apiVersion: ec2.aws.upbound.io/v1beta1 +apiVersion: ec2.aws.upbound.io/v1beta2 kind: Instance metadata: annotations: diff --git a/examples-generated/ec2/v1beta1/volumeattachment.yaml b/examples-generated/ec2/v1beta1/volumeattachment.yaml index 313e7c4838..5b9f9adb4a 100644 --- a/examples-generated/ec2/v1beta1/volumeattachment.yaml +++ b/examples-generated/ec2/v1beta1/volumeattachment.yaml @@ -35,7 +35,7 @@ spec: --- -apiVersion: ec2.aws.upbound.io/v1beta1 +apiVersion: ec2.aws.upbound.io/v1beta2 kind: Instance metadata: annotations: diff --git a/examples-generated/ec2/v1beta1/vpcipampoolcidrallocation.yaml b/examples-generated/ec2/v1beta1/vpcipampoolcidrallocation.yaml index 3e97eb04dc..8bbb9fd4d6 100644 --- a/examples-generated/ec2/v1beta1/vpcipampoolcidrallocation.yaml +++ b/examples-generated/ec2/v1beta1/vpcipampoolcidrallocation.yaml @@ -51,7 +51,7 @@ spec: --- -apiVersion: ec2.aws.upbound.io/v1beta1 +apiVersion: ec2.aws.upbound.io/v1beta2 kind: VPCIpamPoolCidr metadata: annotations: diff --git a/examples-generated/ec2/v1beta1/vpnconnectionroute.yaml b/examples-generated/ec2/v1beta1/vpnconnectionroute.yaml index 77f0be5bab..f016662a79 100644 --- a/examples-generated/ec2/v1beta1/vpnconnectionroute.yaml +++ b/examples-generated/ec2/v1beta1/vpnconnectionroute.yaml @@ -48,7 +48,7 @@ spec: --- -apiVersion: ec2.aws.upbound.io/v1beta1 +apiVersion: ec2.aws.upbound.io/v1beta2 kind: VPNConnection metadata: annotations: diff --git a/examples-generated/ec2/v1beta2/ebssnapshotimport.yaml b/examples-generated/ec2/v1beta2/ebssnapshotimport.yaml new file mode 100644 index 0000000000..e40ef76fbc --- /dev/null +++ b/examples-generated/ec2/v1beta2/ebssnapshotimport.yaml @@ -0,0 +1,19 @@ +apiVersion: ec2.aws.upbound.io/v1beta2 +kind: EBSSnapshotImport +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/ebssnapshotimport + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + diskContainer: + - format: VHD + userBucket: + - s3Bucket: disk-images + s3Key: source.vhd + region: us-west-1 + roleName: disk-image-import + tags: + Name: HelloWorld diff --git a/examples-generated/ec2/v1beta2/flowlog.yaml b/examples-generated/ec2/v1beta2/flowlog.yaml new file mode 100644 index 0000000000..6a8506f4a4 --- /dev/null +++ b/examples-generated/ec2/v1beta2/flowlog.yaml @@ -0,0 +1,66 @@ +apiVersion: ec2.aws.upbound.io/v1beta2 +kind: FlowLog +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/flowlog + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + iamRoleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + logDestinationSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + trafficType: ALL + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: cloudwatchlogs.aws.upbound.io/v1beta1 +kind: Group +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/flowlog + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/flowlog + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.assume_role.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicy +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/flowlog + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + policy: ${data.aws_iam_policy_document.example.json} + roleSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/ec2/v1beta2/instance.yaml b/examples-generated/ec2/v1beta2/instance.yaml new file mode 100644 index 0000000000..3782529cb7 --- /dev/null +++ b/examples-generated/ec2/v1beta2/instance.yaml @@ -0,0 +1,15 @@ +apiVersion: ec2.aws.upbound.io/v1beta2 +kind: Instance +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/instance + labels: + testing.upbound.io/example-name: web + name: web +spec: + forProvider: + ami: ${data.aws_ami.ubuntu.id} + instanceType: t3.micro + region: us-west-1 + tags: + Name: HelloWorld diff --git a/examples-generated/ec2/v1beta2/launchtemplate.yaml b/examples-generated/ec2/v1beta2/launchtemplate.yaml new file mode 100644 index 0000000000..2c6d4c1f2a --- /dev/null +++ b/examples-generated/ec2/v1beta2/launchtemplate.yaml @@ -0,0 +1,62 @@ +apiVersion: ec2.aws.upbound.io/v1beta2 +kind: LaunchTemplate +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/launchtemplate + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + blockDeviceMappings: + - deviceName: /dev/sdf + ebs: + - volumeSize: 20 + capacityReservationSpecification: + - capacityReservationPreference: open + cpuOptions: + - coreCount: 4 + threadsPerCore: 2 + creditSpecification: + - cpuCredits: standard + disableApiStop: true + disableApiTermination: true + ebsOptimized: true + elasticGpuSpecifications: + - type: test + elasticInferenceAccelerator: + - type: eia1.medium + iamInstanceProfile: + - nameSelector: + matchLabels: + testing.upbound.io/example-name: example + imageId: ami-test + instanceInitiatedShutdownBehavior: terminate + instanceMarketOptions: + - marketType: spot + instanceType: t2.micro + kernelId: test + keyName: test + licenseSpecification: + - licenseConfigurationArn: arn:aws:license-manager:eu-west-1:123456789012:license-configuration:lic-0123456789abcdef0123456789abcdef + metadataOptions: + - httpEndpoint: enabled + httpPutResponseHopLimit: 1 + httpTokens: required + instanceMetadataTags: enabled + monitoring: + - enabled: true + name: foo + networkInterfaces: + - associatePublicIpAddress: true + placement: + - availabilityZone: us-west-2a + ramDiskId: test + region: us-west-1 + tagSpecifications: + - resourceType: instance + tags: + Name: test + userData: ${filebase64("${path.module}/example.sh")} + vpcSecurityGroupIdRefs: + - name: example diff --git a/examples-generated/ec2/v1beta2/spotfleetrequest.yaml b/examples-generated/ec2/v1beta2/spotfleetrequest.yaml new file mode 100644 index 0000000000..6ce5a4709f --- /dev/null +++ b/examples-generated/ec2/v1beta2/spotfleetrequest.yaml @@ -0,0 +1,39 @@ +apiVersion: ec2.aws.upbound.io/v1beta2 +kind: SpotFleetRequest +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/spotfleetrequest + labels: + testing.upbound.io/example-name: cheap_compute + name: cheap-compute +spec: + forProvider: + allocationStrategy: diversified + iamFleetRole: arn:aws:iam::12345678:role/spot-fleet + launchSpecification: + - ami: ami-1234 + iamInstanceProfileArnSelector: + matchLabels: + testing.upbound.io/example-name: example + instanceType: m4.10xlarge + placementTenancy: dedicated + spotPrice: "2.793" + - ami: ami-5678 + availabilityZone: us-west-1a + iamInstanceProfileArnSelector: + matchLabels: + testing.upbound.io/example-name: example + instanceType: m4.4xlarge + keyName: my-key + rootBlockDevice: + - volumeSize: "300" + volumeType: gp2 + spotPrice: "1.117" + subnetId: subnet-1234 + tags: + Name: spot-fleet-example + weightedCapacity: 35 + region: us-west-1 + spotPrice: "0.03" + targetCapacity: 6 + validUntil: "2019-11-04T20:44:20Z" diff --git a/examples-generated/ec2/v1beta2/spotinstancerequest.yaml b/examples-generated/ec2/v1beta2/spotinstancerequest.yaml new file mode 100644 index 0000000000..676f48a0f5 --- /dev/null +++ b/examples-generated/ec2/v1beta2/spotinstancerequest.yaml @@ -0,0 +1,16 @@ +apiVersion: ec2.aws.upbound.io/v1beta2 +kind: SpotInstanceRequest +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/spotinstancerequest + labels: + testing.upbound.io/example-name: cheap_worker + name: cheap-worker +spec: + forProvider: + ami: ami-1234 + instanceType: c4.xlarge + region: us-west-1 + spotPrice: "0.03" + tags: + Name: CheapWorker diff --git a/examples-generated/ec2/v1beta2/trafficmirrorfilterrule.yaml b/examples-generated/ec2/v1beta2/trafficmirrorfilterrule.yaml new file mode 100644 index 0000000000..29c9077dd0 --- /dev/null +++ b/examples-generated/ec2/v1beta2/trafficmirrorfilterrule.yaml @@ -0,0 +1,37 @@ +apiVersion: ec2.aws.upbound.io/v1beta2 +kind: TrafficMirrorFilterRule +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/trafficmirrorfilterrule + labels: + testing.upbound.io/example-name: ruleout + name: ruleout +spec: + forProvider: + description: test rule + destinationCidrBlock: 10.0.0.0/8 + region: us-west-1 + ruleAction: accept + ruleNumber: 1 + sourceCidrBlock: 10.0.0.0/8 + trafficDirection: egress + trafficMirrorFilterIdSelector: + matchLabels: + testing.upbound.io/example-name: filter + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: TrafficMirrorFilter +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/trafficmirrorfilterrule + labels: + testing.upbound.io/example-name: filter + name: filter +spec: + forProvider: + description: traffic mirror filter - terraform example + networkServices: + - amazon-dns + region: us-west-1 diff --git a/examples-generated/ec2/v1beta2/vpcendpoint.yaml b/examples-generated/ec2/v1beta2/vpcendpoint.yaml new file mode 100644 index 0000000000..41e25d7af4 --- /dev/null +++ b/examples-generated/ec2/v1beta2/vpcendpoint.yaml @@ -0,0 +1,17 @@ +apiVersion: ec2.aws.upbound.io/v1beta2 +kind: VPCEndpoint +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/vpcendpoint + labels: + testing.upbound.io/example-name: s3 + name: s3 +spec: + forProvider: + region: us-west-1 + serviceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: main diff --git a/examples-generated/ec2/v1beta2/vpcipampoolcidr.yaml b/examples-generated/ec2/v1beta2/vpcipampoolcidr.yaml new file mode 100644 index 0000000000..498d38cea9 --- /dev/null +++ b/examples-generated/ec2/v1beta2/vpcipampoolcidr.yaml @@ -0,0 +1,50 @@ +apiVersion: ec2.aws.upbound.io/v1beta2 +kind: VPCIpamPoolCidr +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/vpcipampoolcidr + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cidr: 172.20.0.0/16 + ipamPoolIdSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: VPCIpam +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/vpcipampoolcidr + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + operatingRegions: + - regionName: ${data.aws_region.current.name} + region: us-west-1 + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: VPCIpamPool +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/vpcipampoolcidr + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressFamily: ipv4 + ipamScopeIdSelector: + matchLabels: + testing.upbound.io/example-name: example + locale: ${data.aws_region.current.name} + region: us-west-1 diff --git a/examples-generated/ec2/v1beta2/vpcpeeringconnection.yaml b/examples-generated/ec2/v1beta2/vpcpeeringconnection.yaml new file mode 100644 index 0000000000..1d15141372 --- /dev/null +++ b/examples-generated/ec2/v1beta2/vpcpeeringconnection.yaml @@ -0,0 +1,18 @@ +apiVersion: ec2.aws.upbound.io/v1beta2 +kind: VPCPeeringConnection +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/vpcpeeringconnection + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + peerOwnerId: ${var.peer_owner_id} + peerVpcIdSelector: + matchLabels: + testing.upbound.io/example-name: bar + region: us-west-1 + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: foo diff --git a/examples-generated/ec2/v1beta2/vpcpeeringconnectionaccepter.yaml b/examples-generated/ec2/v1beta2/vpcpeeringconnectionaccepter.yaml new file mode 100644 index 0000000000..3badec75c9 --- /dev/null +++ b/examples-generated/ec2/v1beta2/vpcpeeringconnectionaccepter.yaml @@ -0,0 +1,74 @@ +apiVersion: ec2.aws.upbound.io/v1beta2 +kind: VPCPeeringConnectionAccepter +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/vpcpeeringconnectionaccepter + labels: + testing.upbound.io/example-name: peer + name: peer +spec: + forProvider: + autoAccept: true + provider: ${aws.peer} + region: us-west-1 + tags: + Side: Accepter + vpcPeeringConnectionIdSelector: + matchLabels: + testing.upbound.io/example-name: peer + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: VPC +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/vpcpeeringconnectionaccepter + labels: + testing.upbound.io/example-name: main + name: main +spec: + forProvider: + cidrBlock: 10.0.0.0/16 + region: us-west-1 + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: VPC +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/vpcpeeringconnectionaccepter + labels: + testing.upbound.io/example-name: peer + name: peer +spec: + forProvider: + cidrBlock: 10.1.0.0/16 + provider: ${aws.peer} + region: us-west-1 + +--- + +apiVersion: ec2.aws.upbound.io/v1beta2 +kind: VPCPeeringConnection +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/vpcpeeringconnectionaccepter + labels: + testing.upbound.io/example-name: peer + name: peer +spec: + forProvider: + autoAccept: false + peerOwnerId: ${data.aws_caller_identity.peer.account_id} + peerRegion: us-west-2 + peerVpcIdSelector: + matchLabels: + testing.upbound.io/example-name: peer + region: us-west-1 + tags: + Side: Requester + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: main diff --git a/examples-generated/ec2/v1beta2/vpcpeeringconnectionoptions.yaml b/examples-generated/ec2/v1beta2/vpcpeeringconnectionoptions.yaml new file mode 100644 index 0000000000..5e5d6278e0 --- /dev/null +++ b/examples-generated/ec2/v1beta2/vpcpeeringconnectionoptions.yaml @@ -0,0 +1,67 @@ +apiVersion: ec2.aws.upbound.io/v1beta2 +kind: VPCPeeringConnectionOptions +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/vpcpeeringconnectionoptions + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + accepter: + - allowRemoteVpcDnsResolution: true + region: us-west-1 + vpcPeeringConnectionIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: VPC +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/vpcpeeringconnectionoptions + labels: + testing.upbound.io/example-name: bar + name: bar +spec: + forProvider: + cidrBlock: 10.1.0.0/16 + region: us-west-1 + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: VPC +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/vpcpeeringconnectionoptions + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + cidrBlock: 10.0.0.0/16 + region: us-west-1 + +--- + +apiVersion: ec2.aws.upbound.io/v1beta2 +kind: VPCPeeringConnection +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/vpcpeeringconnectionoptions + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + autoAccept: true + peerVpcIdSelector: + matchLabels: + testing.upbound.io/example-name: bar + region: us-west-1 + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: foo diff --git a/examples-generated/ec2/v1beta2/vpnconnection.yaml b/examples-generated/ec2/v1beta2/vpnconnection.yaml new file mode 100644 index 0000000000..53afe4a5cc --- /dev/null +++ b/examples-generated/ec2/v1beta2/vpnconnection.yaml @@ -0,0 +1,51 @@ +apiVersion: ec2.aws.upbound.io/v1beta2 +kind: VPNConnection +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/vpnconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + customerGatewayIdSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + transitGatewayIdSelector: + matchLabels: + testing.upbound.io/example-name: example + typeSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: CustomerGateway +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/vpnconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bgpAsn: 65000 + ipAddress: 172.0.0.1 + region: us-west-1 + type: ipsec.1 + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: TransitGateway +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta2/vpnconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/ecr/v1beta1/lifecyclepolicy.yaml b/examples-generated/ecr/v1beta1/lifecyclepolicy.yaml index 61b9217513..73b1ad29da 100644 --- a/examples-generated/ecr/v1beta1/lifecyclepolicy.yaml +++ b/examples-generated/ecr/v1beta1/lifecyclepolicy.yaml @@ -33,7 +33,7 @@ spec: --- -apiVersion: ecr.aws.upbound.io/v1beta1 +apiVersion: ecr.aws.upbound.io/v1beta2 kind: Repository metadata: annotations: diff --git a/examples-generated/ecr/v1beta1/repositorypolicy.yaml b/examples-generated/ecr/v1beta1/repositorypolicy.yaml index a9a129cce8..4e9ba75168 100644 --- a/examples-generated/ecr/v1beta1/repositorypolicy.yaml +++ b/examples-generated/ecr/v1beta1/repositorypolicy.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: ecr.aws.upbound.io/v1beta1 +apiVersion: ecr.aws.upbound.io/v1beta2 kind: Repository metadata: annotations: diff --git a/examples-generated/ecr/v1beta2/replicationconfiguration.yaml b/examples-generated/ecr/v1beta2/replicationconfiguration.yaml new file mode 100644 index 0000000000..43be3e200b --- /dev/null +++ b/examples-generated/ecr/v1beta2/replicationconfiguration.yaml @@ -0,0 +1,16 @@ +apiVersion: ecr.aws.upbound.io/v1beta2 +kind: ReplicationConfiguration +metadata: + annotations: + meta.upbound.io/example-id: ecr/v1beta2/replicationconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + replicationConfiguration: + - rule: + - destination: + - region: ${data.aws_regions.example.names[0]} + registryId: ${data.aws_caller_identity.current.account_id} diff --git a/examples-generated/ecr/v1beta2/repository.yaml b/examples-generated/ecr/v1beta2/repository.yaml new file mode 100644 index 0000000000..c195da524d --- /dev/null +++ b/examples-generated/ecr/v1beta2/repository.yaml @@ -0,0 +1,14 @@ +apiVersion: ecr.aws.upbound.io/v1beta2 +kind: Repository +metadata: + annotations: + meta.upbound.io/example-id: ecr/v1beta2/repository + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + imageScanningConfiguration: + - scanOnPush: true + imageTagMutability: MUTABLE + region: us-west-1 diff --git a/examples-generated/ecrpublic/v1beta1/repositorypolicy.yaml b/examples-generated/ecrpublic/v1beta1/repositorypolicy.yaml index 6f4c161a5e..57f4666d45 100644 --- a/examples-generated/ecrpublic/v1beta1/repositorypolicy.yaml +++ b/examples-generated/ecrpublic/v1beta1/repositorypolicy.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: ecrpublic.aws.upbound.io/v1beta1 +apiVersion: ecrpublic.aws.upbound.io/v1beta2 kind: Repository metadata: annotations: diff --git a/examples-generated/ecrpublic/v1beta2/repository.yaml b/examples-generated/ecrpublic/v1beta2/repository.yaml new file mode 100644 index 0000000000..aecd84e23d --- /dev/null +++ b/examples-generated/ecrpublic/v1beta2/repository.yaml @@ -0,0 +1,23 @@ +apiVersion: ecrpublic.aws.upbound.io/v1beta2 +kind: Repository +metadata: + annotations: + meta.upbound.io/example-id: ecrpublic/v1beta2/repository + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + catalogData: + - aboutText: About Text + architectures: + - ARM + description: Description + logoImageBlob: ${filebase64(image.png)} + operatingSystems: + - Linux + usageText: Usage Text + provider: ${aws.us_east_1} + region: us-west-1 + tags: + env: production diff --git a/examples-generated/ecs/v1beta1/clustercapacityproviders.yaml b/examples-generated/ecs/v1beta1/clustercapacityproviders.yaml index 0cd4327c0c..e0df223ab2 100644 --- a/examples-generated/ecs/v1beta1/clustercapacityproviders.yaml +++ b/examples-generated/ecs/v1beta1/clustercapacityproviders.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: ecs.aws.upbound.io/v1beta1 +apiVersion: ecs.aws.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/ecs/v1beta2/capacityprovider.yaml b/examples-generated/ecs/v1beta2/capacityprovider.yaml new file mode 100644 index 0000000000..53f6acd040 --- /dev/null +++ b/examples-generated/ecs/v1beta2/capacityprovider.yaml @@ -0,0 +1,39 @@ +apiVersion: ecs.aws.upbound.io/v1beta2 +kind: CapacityProvider +metadata: + annotations: + meta.upbound.io/example-id: ecs/v1beta2/capacityprovider + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + autoScalingGroupProvider: + - autoScalingGroupArnSelector: + matchLabels: + testing.upbound.io/example-name: test + managedScaling: + - maximumScalingStepSize: 1000 + minimumScalingStepSize: 1 + status: ENABLED + targetCapacity: 10 + managedTerminationProtection: ENABLED + region: us-west-1 + +--- + +apiVersion: autoscaling.aws.upbound.io/v1beta3 +kind: AutoscalingGroup +metadata: + annotations: + meta.upbound.io/example-id: ecs/v1beta2/capacityprovider + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + region: us-west-1 + tag: + - key: AmazonECSManaged + propagateAtLaunch: true + value: true diff --git a/examples-generated/ecs/v1beta2/cluster.yaml b/examples-generated/ecs/v1beta2/cluster.yaml new file mode 100644 index 0000000000..11e4cfbc8f --- /dev/null +++ b/examples-generated/ecs/v1beta2/cluster.yaml @@ -0,0 +1,14 @@ +apiVersion: ecs.aws.upbound.io/v1beta2 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: ecs/v1beta2/cluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + region: us-west-1 + setting: + - name: containerInsights + value: enabled diff --git a/examples-generated/ecs/v1beta2/service.yaml b/examples-generated/ecs/v1beta2/service.yaml new file mode 100644 index 0000000000..18bfd3696a --- /dev/null +++ b/examples-generated/ecs/v1beta2/service.yaml @@ -0,0 +1,33 @@ +apiVersion: ecs.aws.upbound.io/v1beta2 +kind: Service +metadata: + annotations: + meta.upbound.io/example-id: ecs/v1beta2/service + labels: + testing.upbound.io/example-name: mongo + name: mongo +spec: + forProvider: + clusterSelector: + matchLabels: + testing.upbound.io/example-name: foo + desiredCount: 3 + iamRoleSelector: + matchLabels: + testing.upbound.io/example-name: foo + loadBalancer: + - containerName: mongo + containerPort: 8080 + targetGroupArnSelector: + matchLabels: + testing.upbound.io/example-name: foo + orderedPlacementStrategy: + - field: cpu + type: binpack + placementConstraints: + - expression: attribute:ecs.availability-zone in [us-west-2a, us-west-2b] + type: memberOf + region: us-west-1 + taskDefinitionSelector: + matchLabels: + testing.upbound.io/example-name: mongo diff --git a/examples-generated/ecs/v1beta2/taskdefinition.yaml b/examples-generated/ecs/v1beta2/taskdefinition.yaml new file mode 100644 index 0000000000..89cea12aa9 --- /dev/null +++ b/examples-generated/ecs/v1beta2/taskdefinition.yaml @@ -0,0 +1,47 @@ +apiVersion: ecs.aws.upbound.io/v1beta2 +kind: TaskDefinition +metadata: + annotations: + meta.upbound.io/example-id: ecs/v1beta2/taskdefinition + labels: + testing.upbound.io/example-name: service + name: service +spec: + forProvider: + containerDefinitions: |- + ${jsonencode([ + { + name = "first" + image = "service-first" + cpu = 10 + memory = 512 + essential = true + portMappings = [ + { + containerPort = 80 + hostPort = 80 + } + ] + }, + { + name = "second" + image = "service-second" + cpu = 10 + memory = 256 + essential = true + portMappings = [ + { + containerPort = 443 + hostPort = 443 + } + ] + } + ])} + family: service + placementConstraints: + - expression: attribute:ecs.availability-zone in [us-west-2a, us-west-2b] + type: memberOf + region: us-west-1 + volume: + - hostPath: /ecs/service-storage + name: service-storage diff --git a/examples-generated/efs/v1beta1/filesystempolicy.yaml b/examples-generated/efs/v1beta1/filesystempolicy.yaml index 71c0987c36..08a0c46e0c 100644 --- a/examples-generated/efs/v1beta1/filesystempolicy.yaml +++ b/examples-generated/efs/v1beta1/filesystempolicy.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: efs.aws.upbound.io/v1beta1 +apiVersion: efs.aws.upbound.io/v1beta2 kind: FileSystem metadata: annotations: diff --git a/examples-generated/efs/v1beta2/accesspoint.yaml b/examples-generated/efs/v1beta2/accesspoint.yaml new file mode 100644 index 0000000000..26c1e813bf --- /dev/null +++ b/examples-generated/efs/v1beta2/accesspoint.yaml @@ -0,0 +1,14 @@ +apiVersion: efs.aws.upbound.io/v1beta2 +kind: AccessPoint +metadata: + annotations: + meta.upbound.io/example-id: efs/v1beta2/accesspoint + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + fileSystemIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + region: us-west-1 diff --git a/examples-generated/efs/v1beta2/backuppolicy.yaml b/examples-generated/efs/v1beta2/backuppolicy.yaml new file mode 100644 index 0000000000..fc519f9135 --- /dev/null +++ b/examples-generated/efs/v1beta2/backuppolicy.yaml @@ -0,0 +1,31 @@ +apiVersion: efs.aws.upbound.io/v1beta2 +kind: BackupPolicy +metadata: + annotations: + meta.upbound.io/example-id: efs/v1beta2/backuppolicy + labels: + testing.upbound.io/example-name: policy + name: policy +spec: + forProvider: + backupPolicy: + - status: ENABLED + fileSystemIdSelector: + matchLabels: + testing.upbound.io/example-name: fs + region: us-west-1 + +--- + +apiVersion: efs.aws.upbound.io/v1beta2 +kind: FileSystem +metadata: + annotations: + meta.upbound.io/example-id: efs/v1beta2/backuppolicy + labels: + testing.upbound.io/example-name: fs + name: fs +spec: + forProvider: + creationToken: my-product + region: us-west-1 diff --git a/examples-generated/efs/v1beta2/filesystem.yaml b/examples-generated/efs/v1beta2/filesystem.yaml new file mode 100644 index 0000000000..f91dcf3a9a --- /dev/null +++ b/examples-generated/efs/v1beta2/filesystem.yaml @@ -0,0 +1,14 @@ +apiVersion: efs.aws.upbound.io/v1beta2 +kind: FileSystem +metadata: + annotations: + meta.upbound.io/example-id: efs/v1beta2/filesystem + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + creationToken: my-product + region: us-west-1 + tags: + Name: MyProduct diff --git a/examples-generated/efs/v1beta2/replicationconfiguration.yaml b/examples-generated/efs/v1beta2/replicationconfiguration.yaml new file mode 100644 index 0000000000..579f118a12 --- /dev/null +++ b/examples-generated/efs/v1beta2/replicationconfiguration.yaml @@ -0,0 +1,30 @@ +apiVersion: efs.aws.upbound.io/v1beta2 +kind: ReplicationConfiguration +metadata: + annotations: + meta.upbound.io/example-id: efs/v1beta2/replicationconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + destination: + - region: us-west-2 + region: us-west-1 + sourceFileSystemIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: efs.aws.upbound.io/v1beta2 +kind: FileSystem +metadata: + annotations: + meta.upbound.io/example-id: efs/v1beta2/replicationconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/eks/v1beta2/cluster.yaml b/examples-generated/eks/v1beta2/cluster.yaml new file mode 100644 index 0000000000..a3ea4f96fc --- /dev/null +++ b/examples-generated/eks/v1beta2/cluster.yaml @@ -0,0 +1,18 @@ +apiVersion: eks.aws.upbound.io/v1beta2 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: eks/v1beta2/cluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + vpcConfig: + - subnetIdRefs: + - name: example1 + - name: example2 diff --git a/examples-generated/eks/v1beta2/identityproviderconfig.yaml b/examples-generated/eks/v1beta2/identityproviderconfig.yaml new file mode 100644 index 0000000000..e49055ec43 --- /dev/null +++ b/examples-generated/eks/v1beta2/identityproviderconfig.yaml @@ -0,0 +1,17 @@ +apiVersion: eks.aws.upbound.io/v1beta2 +kind: IdentityProviderConfig +metadata: + annotations: + meta.upbound.io/example-id: eks/v1beta2/identityproviderconfig + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clusterNameSelector: + matchLabels: + testing.upbound.io/example-name: example + oidc: + - clientId: your client_id + issuerUrl: your issuer_url + region: us-west-1 diff --git a/examples-generated/eks/v1beta2/nodegroup.yaml b/examples-generated/eks/v1beta2/nodegroup.yaml new file mode 100644 index 0000000000..471963b081 --- /dev/null +++ b/examples-generated/eks/v1beta2/nodegroup.yaml @@ -0,0 +1,26 @@ +apiVersion: eks.aws.upbound.io/v1beta2 +kind: NodeGroup +metadata: + annotations: + meta.upbound.io/example-id: eks/v1beta2/nodegroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clusterNameSelector: + matchLabels: + testing.upbound.io/example-name: example + nodeRoleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + scalingConfig: + - desiredSize: 1 + maxSize: 2 + minSize: 1 + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example[*] + updateConfig: + - maxUnavailable: 1 diff --git a/examples-generated/elasticache/v1beta1/usergroup.yaml b/examples-generated/elasticache/v1beta1/usergroup.yaml index f0c697959b..373951c599 100644 --- a/examples-generated/elasticache/v1beta1/usergroup.yaml +++ b/examples-generated/elasticache/v1beta1/usergroup.yaml @@ -15,7 +15,7 @@ spec: --- -apiVersion: elasticache.aws.upbound.io/v1beta1 +apiVersion: elasticache.aws.upbound.io/v1beta2 kind: User metadata: annotations: diff --git a/examples-generated/elasticache/v1beta2/user.yaml b/examples-generated/elasticache/v1beta2/user.yaml new file mode 100644 index 0000000000..554155154b --- /dev/null +++ b/examples-generated/elasticache/v1beta2/user.yaml @@ -0,0 +1,20 @@ +apiVersion: elasticache.aws.upbound.io/v1beta2 +kind: User +metadata: + annotations: + meta.upbound.io/example-id: elasticache/v1beta2/user + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + accessString: on ~app::* -@all +@read +@hash +@bitmap +@geo -setbit -bitfield + -hset -hsetnx -hmset -hincrby -hincrbyfloat -hdel -bitop -geoadd -georadius + -georadiusbymember + engine: REDIS + passwordsSecretRef: + - key: example-key + name: example-secret + namespace: upbound-system + region: us-west-1 + userName: testUserName diff --git a/examples-generated/elasticbeanstalk/v1beta1/applicationversion.yaml b/examples-generated/elasticbeanstalk/v1beta1/applicationversion.yaml index 2ef99f9332..9b0dd967f7 100644 --- a/examples-generated/elasticbeanstalk/v1beta1/applicationversion.yaml +++ b/examples-generated/elasticbeanstalk/v1beta1/applicationversion.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: elasticbeanstalk.aws.upbound.io/v1beta1 +apiVersion: elasticbeanstalk.aws.upbound.io/v1beta2 kind: Application metadata: annotations: @@ -35,7 +35,7 @@ spec: --- -apiVersion: s3.aws.upbound.io/v1beta1 +apiVersion: s3.aws.upbound.io/v1beta2 kind: Bucket metadata: annotations: @@ -50,7 +50,7 @@ spec: --- -apiVersion: s3.aws.upbound.io/v1beta1 +apiVersion: s3.aws.upbound.io/v1beta2 kind: Object metadata: annotations: diff --git a/examples-generated/elasticbeanstalk/v1beta1/configurationtemplate.yaml b/examples-generated/elasticbeanstalk/v1beta1/configurationtemplate.yaml index 92fbc70e17..fdd376c23d 100644 --- a/examples-generated/elasticbeanstalk/v1beta1/configurationtemplate.yaml +++ b/examples-generated/elasticbeanstalk/v1beta1/configurationtemplate.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: elasticbeanstalk.aws.upbound.io/v1beta1 +apiVersion: elasticbeanstalk.aws.upbound.io/v1beta2 kind: Application metadata: annotations: diff --git a/examples-generated/elasticbeanstalk/v1beta2/application.yaml b/examples-generated/elasticbeanstalk/v1beta2/application.yaml new file mode 100644 index 0000000000..bbf82599ca --- /dev/null +++ b/examples-generated/elasticbeanstalk/v1beta2/application.yaml @@ -0,0 +1,18 @@ +apiVersion: elasticbeanstalk.aws.upbound.io/v1beta2 +kind: Application +metadata: + annotations: + meta.upbound.io/example-id: elasticbeanstalk/v1beta2/application + labels: + testing.upbound.io/example-name: tftest + name: tftest +spec: + forProvider: + appversionLifecycle: + - deleteSourceFromS3: true + maxCount: 128 + serviceRoleSelector: + matchLabels: + testing.upbound.io/example-name: beanstalk_service + description: tf-test-desc + region: us-west-1 diff --git a/examples-generated/elasticsearch/v1beta1/domainpolicy.yaml b/examples-generated/elasticsearch/v1beta1/domainpolicy.yaml index 065ada8de6..60f07cc90b 100644 --- a/examples-generated/elasticsearch/v1beta1/domainpolicy.yaml +++ b/examples-generated/elasticsearch/v1beta1/domainpolicy.yaml @@ -30,7 +30,7 @@ spec: --- -apiVersion: elasticsearch.aws.upbound.io/v1beta1 +apiVersion: elasticsearch.aws.upbound.io/v1beta2 kind: Domain metadata: annotations: diff --git a/examples-generated/elasticsearch/v1beta2/domain.yaml b/examples-generated/elasticsearch/v1beta2/domain.yaml new file mode 100644 index 0000000000..d4badb1946 --- /dev/null +++ b/examples-generated/elasticsearch/v1beta2/domain.yaml @@ -0,0 +1,16 @@ +apiVersion: elasticsearch.aws.upbound.io/v1beta2 +kind: Domain +metadata: + annotations: + meta.upbound.io/example-id: elasticsearch/v1beta2/domain + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clusterConfig: + - instanceType: r4.large.elasticsearch + elasticsearchVersion: "7.10" + region: us-west-1 + tags: + Domain: TestDomain diff --git a/examples-generated/elasticsearch/v1beta2/domainsamloptions.yaml b/examples-generated/elasticsearch/v1beta2/domainsamloptions.yaml new file mode 100644 index 0000000000..024a49a228 --- /dev/null +++ b/examples-generated/elasticsearch/v1beta2/domainsamloptions.yaml @@ -0,0 +1,37 @@ +apiVersion: elasticsearch.aws.upbound.io/v1beta2 +kind: DomainSAMLOptions +metadata: + annotations: + meta.upbound.io/example-id: elasticsearch/v1beta2/domainsamloptions + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + samlOptions: + - enabled: true + idp: + - entityId: https://example.com + metadataContent: ${file("./saml-metadata.xml")} + +--- + +apiVersion: elasticsearch.aws.upbound.io/v1beta2 +kind: Domain +metadata: + annotations: + meta.upbound.io/example-id: elasticsearch/v1beta2/domainsamloptions + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clusterConfig: + - instanceType: r4.large.elasticsearch + elasticsearchVersion: "1.5" + region: us-west-1 + snapshotOptions: + - automatedSnapshotStartHour: 23 + tags: + Domain: TestDomain diff --git a/examples-generated/elastictranscoder/v1beta2/pipeline.yaml b/examples-generated/elastictranscoder/v1beta2/pipeline.yaml new file mode 100644 index 0000000000..ee97e6f09f --- /dev/null +++ b/examples-generated/elastictranscoder/v1beta2/pipeline.yaml @@ -0,0 +1,28 @@ +apiVersion: elastictranscoder.aws.upbound.io/v1beta2 +kind: Pipeline +metadata: + annotations: + meta.upbound.io/example-id: elastictranscoder/v1beta2/pipeline + labels: + testing.upbound.io/example-name: bar + name: bar +spec: + forProvider: + contentConfig: + - bucketSelector: + matchLabels: + testing.upbound.io/example-name: content_bucket + storageClass: Standard + inputBucketSelector: + matchLabels: + testing.upbound.io/example-name: input_bucket + name: aws_elastictranscoder_pipeline_tf_test_ + region: us-west-1 + roleSelector: + matchLabels: + testing.upbound.io/example-name: test_role + thumbnailConfig: + - bucketSelector: + matchLabels: + testing.upbound.io/example-name: thumb_bucket + storageClass: Standard diff --git a/examples-generated/elastictranscoder/v1beta2/preset.yaml b/examples-generated/elastictranscoder/v1beta2/preset.yaml new file mode 100644 index 0000000000..1b612f5061 --- /dev/null +++ b/examples-generated/elastictranscoder/v1beta2/preset.yaml @@ -0,0 +1,58 @@ +apiVersion: elastictranscoder.aws.upbound.io/v1beta2 +kind: Preset +metadata: + annotations: + meta.upbound.io/example-id: elastictranscoder/v1beta2/preset + labels: + testing.upbound.io/example-name: bar + name: bar +spec: + forProvider: + audio: + - audioPackingMode: SingleTrack + bitRate: 96 + channels: 2 + codec: AAC + sampleRate: 44100 + audioCodecOptions: + - profile: AAC-LC + container: mp4 + description: Sample Preset + name: sample_preset + region: us-west-1 + thumbnails: + - format: png + interval: 120 + maxHeight: auto + maxWidth: auto + paddingPolicy: Pad + sizingPolicy: Fit + video: + - bitRate: "1600" + codec: H.264 + displayAspectRatio: "16:9" + fixedGop: "false" + frameRate: auto + keyframesMaxDist: 240 + maxFrameRate: "60" + maxHeight: auto + maxWidth: auto + paddingPolicy: Pad + sizingPolicy: Fit + videoCodecOptions: + ColorSpaceConversionMode: None + InterlacedMode: Progressive + Level: "2.2" + MaxReferenceFrames: 3 + Profile: main + videoWatermarks: + - horizontalAlign: Right + horizontalOffset: 10px + id: Terraform Test + maxHeight: 20% + maxWidth: 20% + opacity: "55.5" + sizingPolicy: ShrinkToFit + target: Content + verticalAlign: Bottom + verticalOffset: 10px diff --git a/examples-generated/elb/v1beta1/appcookiestickinesspolicy.yaml b/examples-generated/elb/v1beta1/appcookiestickinesspolicy.yaml index 22926c90c4..2ec2f9d886 100644 --- a/examples-generated/elb/v1beta1/appcookiestickinesspolicy.yaml +++ b/examples-generated/elb/v1beta1/appcookiestickinesspolicy.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: elb.aws.upbound.io/v1beta1 +apiVersion: elb.aws.upbound.io/v1beta2 kind: ELB metadata: annotations: diff --git a/examples-generated/elb/v1beta1/backendserverpolicy.yaml b/examples-generated/elb/v1beta1/backendserverpolicy.yaml index de5c175487..a99985d205 100644 --- a/examples-generated/elb/v1beta1/backendserverpolicy.yaml +++ b/examples-generated/elb/v1beta1/backendserverpolicy.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: elb.aws.upbound.io/v1beta1 +apiVersion: elb.aws.upbound.io/v1beta2 kind: ELB metadata: annotations: diff --git a/examples-generated/elb/v1beta1/lbcookiestickinesspolicy.yaml b/examples-generated/elb/v1beta1/lbcookiestickinesspolicy.yaml index 612f893774..67f63ee72f 100644 --- a/examples-generated/elb/v1beta1/lbcookiestickinesspolicy.yaml +++ b/examples-generated/elb/v1beta1/lbcookiestickinesspolicy.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: elb.aws.upbound.io/v1beta1 +apiVersion: elb.aws.upbound.io/v1beta2 kind: ELB metadata: annotations: diff --git a/examples-generated/elb/v1beta1/lbsslnegotiationpolicy.yaml b/examples-generated/elb/v1beta1/lbsslnegotiationpolicy.yaml index 8fd128a346..1d7345049a 100644 --- a/examples-generated/elb/v1beta1/lbsslnegotiationpolicy.yaml +++ b/examples-generated/elb/v1beta1/lbsslnegotiationpolicy.yaml @@ -32,7 +32,7 @@ spec: --- -apiVersion: elb.aws.upbound.io/v1beta1 +apiVersion: elb.aws.upbound.io/v1beta2 kind: ELB metadata: annotations: diff --git a/examples-generated/elb/v1beta1/listenerpolicy.yaml b/examples-generated/elb/v1beta1/listenerpolicy.yaml index 4307af20b0..eca0681175 100644 --- a/examples-generated/elb/v1beta1/listenerpolicy.yaml +++ b/examples-generated/elb/v1beta1/listenerpolicy.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: elb.aws.upbound.io/v1beta1 +apiVersion: elb.aws.upbound.io/v1beta2 kind: ELB metadata: annotations: diff --git a/examples-generated/elb/v1beta1/policy.yaml b/examples-generated/elb/v1beta1/policy.yaml index 9b4c1f6ca9..f052303e0b 100644 --- a/examples-generated/elb/v1beta1/policy.yaml +++ b/examples-generated/elb/v1beta1/policy.yaml @@ -22,7 +22,7 @@ spec: --- -apiVersion: elb.aws.upbound.io/v1beta1 +apiVersion: elb.aws.upbound.io/v1beta2 kind: ELB metadata: annotations: diff --git a/examples-generated/elb/v1beta1/proxyprotocolpolicy.yaml b/examples-generated/elb/v1beta1/proxyprotocolpolicy.yaml index d969b175c0..197ef85e6e 100644 --- a/examples-generated/elb/v1beta1/proxyprotocolpolicy.yaml +++ b/examples-generated/elb/v1beta1/proxyprotocolpolicy.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: elb.aws.upbound.io/v1beta1 +apiVersion: elb.aws.upbound.io/v1beta2 kind: ELB metadata: annotations: diff --git a/examples-generated/elb/v1beta2/elb.yaml b/examples-generated/elb/v1beta2/elb.yaml new file mode 100644 index 0000000000..33981c090d --- /dev/null +++ b/examples-generated/elb/v1beta2/elb.yaml @@ -0,0 +1,43 @@ +apiVersion: elb.aws.upbound.io/v1beta2 +kind: ELB +metadata: + annotations: + meta.upbound.io/example-id: elb/v1beta2/elb + labels: + testing.upbound.io/example-name: bar + name: bar +spec: + forProvider: + accessLogs: + - bucket: foo + bucketPrefix: bar + interval: 60 + availabilityZones: + - us-west-2a + - us-west-2b + - us-west-2c + connectionDraining: true + connectionDrainingTimeout: 400 + crossZoneLoadBalancing: true + healthCheck: + - healthyThreshold: 2 + interval: 30 + target: HTTP:8000/ + timeout: 3 + unhealthyThreshold: 2 + idleTimeout: 400 + instancesRefs: + - name: foo + listener: + - instancePort: 8000 + instanceProtocol: http + lbPort: 80 + lbProtocol: http + - instancePort: 8000 + instanceProtocol: http + lbPort: 443 + lbProtocol: https + sslCertificateId: arn:aws:iam::123456789012:server-certificate/certName + region: us-west-1 + tags: + Name: foobar-terraform-elb diff --git a/examples-generated/elbv2/v1beta1/lblistenercertificate.yaml b/examples-generated/elbv2/v1beta1/lblistenercertificate.yaml index f0c0ec05f9..3fa7f0ba61 100644 --- a/examples-generated/elbv2/v1beta1/lblistenercertificate.yaml +++ b/examples-generated/elbv2/v1beta1/lblistenercertificate.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: acm.aws.upbound.io/v1beta1 +apiVersion: acm.aws.upbound.io/v1beta2 kind: Certificate metadata: annotations: @@ -32,7 +32,7 @@ spec: --- -apiVersion: elbv2.aws.upbound.io/v1beta1 +apiVersion: elbv2.aws.upbound.io/v1beta2 kind: LB metadata: annotations: @@ -46,7 +46,7 @@ spec: --- -apiVersion: elbv2.aws.upbound.io/v1beta1 +apiVersion: elbv2.aws.upbound.io/v1beta2 kind: LBListener metadata: annotations: diff --git a/examples-generated/elbv2/v1beta1/lbtargetgroupattachment.yaml b/examples-generated/elbv2/v1beta1/lbtargetgroupattachment.yaml index 90d254fe94..292343c4a2 100644 --- a/examples-generated/elbv2/v1beta1/lbtargetgroupattachment.yaml +++ b/examples-generated/elbv2/v1beta1/lbtargetgroupattachment.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: ec2.aws.upbound.io/v1beta1 +apiVersion: ec2.aws.upbound.io/v1beta2 kind: Instance metadata: annotations: @@ -31,7 +31,7 @@ spec: --- -apiVersion: elbv2.aws.upbound.io/v1beta1 +apiVersion: elbv2.aws.upbound.io/v1beta2 kind: LBTargetGroup metadata: annotations: diff --git a/examples-generated/elbv2/v1beta2/lb.yaml b/examples-generated/elbv2/v1beta2/lb.yaml new file mode 100644 index 0000000000..d111b49ae0 --- /dev/null +++ b/examples-generated/elbv2/v1beta2/lb.yaml @@ -0,0 +1,28 @@ +apiVersion: elbv2.aws.upbound.io/v1beta2 +kind: LB +metadata: + annotations: + meta.upbound.io/example-id: elbv2/v1beta2/lb + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + accessLogs: + - bucketSelector: + matchLabels: + testing.upbound.io/example-name: lb_logs + enabled: true + prefix: test-lb + enableDeletionProtection: true + internal: false + loadBalancerType: application + name: test-lb-tf + region: us-west-1 + securityGroupRefs: + - name: lb_sg + subnetSelector: + matchLabels: + testing.upbound.io/example-name: 'public : subnet' + tags: + Environment: production diff --git a/examples-generated/elbv2/v1beta2/lblistener.yaml b/examples-generated/elbv2/v1beta2/lblistener.yaml new file mode 100644 index 0000000000..1b9b3e7be2 --- /dev/null +++ b/examples-generated/elbv2/v1beta2/lblistener.yaml @@ -0,0 +1,51 @@ +apiVersion: elbv2.aws.upbound.io/v1beta2 +kind: LBListener +metadata: + annotations: + meta.upbound.io/example-id: elbv2/v1beta2/lblistener + labels: + testing.upbound.io/example-name: front_end + name: front-end +spec: + forProvider: + certificateArn: arn:aws:iam::187416307283:server-certificate/test_cert_rab3wuqwgja25ct3n4jdj2tzu4 + defaultAction: + - targetGroupArnSelector: + matchLabels: + testing.upbound.io/example-name: front_end + type: forward + loadBalancerArnSelector: + matchLabels: + testing.upbound.io/example-name: front_end + port: "443" + protocol: HTTPS + region: us-west-1 + sslPolicy: ELBSecurityPolicy-2016-08 + +--- + +apiVersion: elbv2.aws.upbound.io/v1beta2 +kind: LB +metadata: + annotations: + meta.upbound.io/example-id: elbv2/v1beta2/lblistener + labels: + testing.upbound.io/example-name: front_end + name: front-end +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: elbv2.aws.upbound.io/v1beta2 +kind: LBTargetGroup +metadata: + annotations: + meta.upbound.io/example-id: elbv2/v1beta2/lblistener + labels: + testing.upbound.io/example-name: front_end + name: front-end +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/elbv2/v1beta2/lblistenerrule.yaml b/examples-generated/elbv2/v1beta2/lblistenerrule.yaml new file mode 100644 index 0000000000..dbea1ff10d --- /dev/null +++ b/examples-generated/elbv2/v1beta2/lblistenerrule.yaml @@ -0,0 +1,97 @@ +apiVersion: elbv2.aws.upbound.io/v1beta2 +kind: LBListenerRule +metadata: + annotations: + meta.upbound.io/example-id: elbv2/v1beta2/lblistenerrule + labels: + testing.upbound.io/example-name: static + name: static +spec: + forProvider: + action: + - targetGroupArnSelector: + matchLabels: + testing.upbound.io/example-name: static + type: forward + condition: + - pathPattern: + - values: + - /static/* + - hostHeader: + - values: + - example.com + listenerArnSelector: + matchLabels: + testing.upbound.io/example-name: front_end + priority: 100 + region: us-west-1 + +--- + +apiVersion: cognitoidp.aws.upbound.io/v1beta2 +kind: UserPool +metadata: + annotations: + meta.upbound.io/example-id: elbv2/v1beta2/lblistenerrule + labels: + testing.upbound.io/example-name: pool + name: pool +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: cognitoidp.aws.upbound.io/v1beta1 +kind: UserPoolClient +metadata: + annotations: + meta.upbound.io/example-id: elbv2/v1beta2/lblistenerrule + labels: + testing.upbound.io/example-name: client + name: client +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: cognitoidp.aws.upbound.io/v1beta1 +kind: UserPoolDomain +metadata: + annotations: + meta.upbound.io/example-id: elbv2/v1beta2/lblistenerrule + labels: + testing.upbound.io/example-name: domain + name: domain +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: elbv2.aws.upbound.io/v1beta2 +kind: LB +metadata: + annotations: + meta.upbound.io/example-id: elbv2/v1beta2/lblistenerrule + labels: + testing.upbound.io/example-name: front_end + name: front-end +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: elbv2.aws.upbound.io/v1beta2 +kind: LBListener +metadata: + annotations: + meta.upbound.io/example-id: elbv2/v1beta2/lblistenerrule + labels: + testing.upbound.io/example-name: front_end + name: front-end +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/elbv2/v1beta2/lbtargetgroup.yaml b/examples-generated/elbv2/v1beta2/lbtargetgroup.yaml new file mode 100644 index 0000000000..b6e0beb308 --- /dev/null +++ b/examples-generated/elbv2/v1beta2/lbtargetgroup.yaml @@ -0,0 +1,32 @@ +apiVersion: elbv2.aws.upbound.io/v1beta2 +kind: LBTargetGroup +metadata: + annotations: + meta.upbound.io/example-id: elbv2/v1beta2/lbtargetgroup + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + name: tf-example-lb-tg + port: 80 + protocol: HTTP + region: us-west-1 + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: main + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: VPC +metadata: + annotations: + meta.upbound.io/example-id: elbv2/v1beta2/lbtargetgroup + labels: + testing.upbound.io/example-name: main + name: main +spec: + forProvider: + cidrBlock: 10.0.0.0/16 + region: us-west-1 diff --git a/examples-generated/emrserverless/v1beta2/application.yaml b/examples-generated/emrserverless/v1beta2/application.yaml new file mode 100644 index 0000000000..3c034e8487 --- /dev/null +++ b/examples-generated/emrserverless/v1beta2/application.yaml @@ -0,0 +1,14 @@ +apiVersion: emrserverless.aws.upbound.io/v1beta2 +kind: Application +metadata: + annotations: + meta.upbound.io/example-id: emrserverless/v1beta2/application + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: example + region: us-west-1 + releaseLabel: emr-6.6.0 + type: hive diff --git a/examples-generated/evidently/v1beta2/feature.yaml b/examples-generated/evidently/v1beta2/feature.yaml new file mode 100644 index 0000000000..99f0b38f7e --- /dev/null +++ b/examples-generated/evidently/v1beta2/feature.yaml @@ -0,0 +1,21 @@ +apiVersion: evidently.aws.upbound.io/v1beta2 +kind: Feature +metadata: + annotations: + meta.upbound.io/example-id: evidently/v1beta2/feature + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: example description + projectSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + tags: + Key1: example Feature + variations: + - name: Variation1 + value: + - stringValue: example diff --git a/examples-generated/evidently/v1beta2/project.yaml b/examples-generated/evidently/v1beta2/project.yaml new file mode 100644 index 0000000000..77fe767af6 --- /dev/null +++ b/examples-generated/evidently/v1beta2/project.yaml @@ -0,0 +1,15 @@ +apiVersion: evidently.aws.upbound.io/v1beta2 +kind: Project +metadata: + annotations: + meta.upbound.io/example-id: evidently/v1beta2/project + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: Example Description + name: Example + region: us-west-1 + tags: + Key1: example Project diff --git a/examples-generated/firehose/v1beta2/deliverystream.yaml b/examples-generated/firehose/v1beta2/deliverystream.yaml new file mode 100644 index 0000000000..ebedcfe270 --- /dev/null +++ b/examples-generated/firehose/v1beta2/deliverystream.yaml @@ -0,0 +1,108 @@ +apiVersion: firehose.aws.upbound.io/v1beta2 +kind: DeliveryStream +metadata: + annotations: + meta.upbound.io/example-id: firehose/v1beta2/deliverystream + labels: + testing.upbound.io/example-name: extended_s3_stream + name: extended-s3-stream +spec: + forProvider: + destination: extended_s3 + extendedS3Configuration: + - bucketArnSelector: + matchLabels: + testing.upbound.io/example-name: bucket + processingConfiguration: + - enabled: "true" + processors: + - parameters: + - parameterName: LambdaArn + parameterValue: ${aws_lambda_function.lambda_processor.arn}:$LATEST + type: Lambda + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: firehose_role + name: terraform-kinesis-firehose-extended-s3-test-stream + region: us-west-1 + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: firehose/v1beta2/deliverystream + labels: + testing.upbound.io/example-name: firehose_role + name: firehose-role +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.firehose_assume_role.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: firehose/v1beta2/deliverystream + labels: + testing.upbound.io/example-name: lambda_iam + name: lambda-iam +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.lambda_assume_role.json} + +--- + +apiVersion: lambda.aws.upbound.io/v1beta2 +kind: Function +metadata: + annotations: + meta.upbound.io/example-id: firehose/v1beta2/deliverystream + labels: + testing.upbound.io/example-name: lambda_processor + name: lambda-processor +spec: + forProvider: + filename: lambda.zip + handler: exports.handler + region: us-west-1 + roleSelector: + matchLabels: + testing.upbound.io/example-name: lambda_iam + runtime: nodejs16.x + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: firehose/v1beta2/deliverystream + labels: + testing.upbound.io/example-name: bucket + name: bucket +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketACL +metadata: + annotations: + meta.upbound.io/example-id: firehose/v1beta2/deliverystream + labels: + testing.upbound.io/example-name: bucket_acl + name: bucket-acl +spec: + forProvider: + acl: private + bucketSelector: + matchLabels: + testing.upbound.io/example-name: bucket + region: us-west-1 diff --git a/examples-generated/fis/v1beta2/experimenttemplate.yaml b/examples-generated/fis/v1beta2/experimenttemplate.yaml new file mode 100644 index 0000000000..1005c465bd --- /dev/null +++ b/examples-generated/fis/v1beta2/experimenttemplate.yaml @@ -0,0 +1,30 @@ +apiVersion: fis.aws.upbound.io/v1beta2 +kind: ExperimentTemplate +metadata: + annotations: + meta.upbound.io/example-id: fis/v1beta2/experimenttemplate + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + action: + - actionId: aws:ec2:terminate-instances + name: example-action + target: + - key: Instances + value: example-target + description: example + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + stopCondition: + - source: none + target: + - name: example-target + resourceTag: + - key: env + value: example + resourceType: aws:ec2:instance + selectionMode: COUNT(1) diff --git a/examples-generated/fsx/v1beta1/backup.yaml b/examples-generated/fsx/v1beta1/backup.yaml index 86cccbf816..35c3c32427 100644 --- a/examples-generated/fsx/v1beta1/backup.yaml +++ b/examples-generated/fsx/v1beta1/backup.yaml @@ -15,7 +15,7 @@ spec: --- -apiVersion: fsx.aws.upbound.io/v1beta1 +apiVersion: fsx.aws.upbound.io/v1beta2 kind: LustreFileSystem metadata: annotations: diff --git a/examples-generated/fsx/v1beta2/datarepositoryassociation.yaml b/examples-generated/fsx/v1beta2/datarepositoryassociation.yaml new file mode 100644 index 0000000000..776c9b5ad5 --- /dev/null +++ b/examples-generated/fsx/v1beta2/datarepositoryassociation.yaml @@ -0,0 +1,79 @@ +apiVersion: fsx.aws.upbound.io/v1beta2 +kind: DataRepositoryAssociation +metadata: + annotations: + meta.upbound.io/example-id: fsx/v1beta2/datarepositoryassociation + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dataRepositoryPath: s3://${aws_s3_bucket.example.id} + fileSystemIdSelector: + matchLabels: + testing.upbound.io/example-name: example + fileSystemPath: /my-bucket + region: us-west-1 + s3: + - autoExportPolicy: + - events: + - NEW + - CHANGED + - DELETED + autoImportPolicy: + - events: + - NEW + - CHANGED + - DELETED + +--- + +apiVersion: fsx.aws.upbound.io/v1beta2 +kind: LustreFileSystem +metadata: + annotations: + meta.upbound.io/example-id: fsx/v1beta2/datarepositoryassociation + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + deploymentType: PERSISTENT_2 + perUnitStorageThroughput: 125 + region: us-west-1 + storageCapacity: 1200 + subnetIdRefs: + - name: example + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: fsx/v1beta2/datarepositoryassociation + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketACL +metadata: + annotations: + meta.upbound.io/example-id: fsx/v1beta2/datarepositoryassociation + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + acl: private + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 diff --git a/examples-generated/fsx/v1beta2/lustrefilesystem.yaml b/examples-generated/fsx/v1beta2/lustrefilesystem.yaml new file mode 100644 index 0000000000..715c55ace6 --- /dev/null +++ b/examples-generated/fsx/v1beta2/lustrefilesystem.yaml @@ -0,0 +1,15 @@ +apiVersion: fsx.aws.upbound.io/v1beta2 +kind: LustreFileSystem +metadata: + annotations: + meta.upbound.io/example-id: fsx/v1beta2/lustrefilesystem + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + importPath: s3://${aws_s3_bucket.example.bucket} + region: us-west-1 + storageCapacity: 1200 + subnetIdRefs: + - name: example diff --git a/examples-generated/fsx/v1beta2/ontapfilesystem.yaml b/examples-generated/fsx/v1beta2/ontapfilesystem.yaml new file mode 100644 index 0000000000..5154c54aa3 --- /dev/null +++ b/examples-generated/fsx/v1beta2/ontapfilesystem.yaml @@ -0,0 +1,20 @@ +apiVersion: fsx.aws.upbound.io/v1beta2 +kind: OntapFileSystem +metadata: + annotations: + meta.upbound.io/example-id: fsx/v1beta2/ontapfilesystem + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + deploymentType: MULTI_AZ_1 + preferredSubnetIdSelector: + matchLabels: + testing.upbound.io/example-name: test1 + region: us-west-1 + storageCapacity: 1024 + subnetIdRefs: + - name: test1 + - name: test2 + throughputCapacity: 512 diff --git a/examples-generated/fsx/v1beta2/ontapstoragevirtualmachine.yaml b/examples-generated/fsx/v1beta2/ontapstoragevirtualmachine.yaml new file mode 100644 index 0000000000..773e763644 --- /dev/null +++ b/examples-generated/fsx/v1beta2/ontapstoragevirtualmachine.yaml @@ -0,0 +1,15 @@ +apiVersion: fsx.aws.upbound.io/v1beta2 +kind: OntapStorageVirtualMachine +metadata: + annotations: + meta.upbound.io/example-id: fsx/v1beta2/ontapstoragevirtualmachine + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + fileSystemIdSelector: + matchLabels: + testing.upbound.io/example-name: test + name: test + region: us-west-1 diff --git a/examples-generated/fsx/v1beta2/windowsfilesystem.yaml b/examples-generated/fsx/v1beta2/windowsfilesystem.yaml new file mode 100644 index 0000000000..4a7bed3898 --- /dev/null +++ b/examples-generated/fsx/v1beta2/windowsfilesystem.yaml @@ -0,0 +1,21 @@ +apiVersion: fsx.aws.upbound.io/v1beta2 +kind: WindowsFileSystem +metadata: + annotations: + meta.upbound.io/example-id: fsx/v1beta2/windowsfilesystem + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + activeDirectoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + kmsKeyIdSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + storageCapacity: 300 + subnetIdRefs: + - name: example + throughputCapacity: 1024 diff --git a/examples-generated/gamelift/v1beta2/alias.yaml b/examples-generated/gamelift/v1beta2/alias.yaml new file mode 100644 index 0000000000..23705bc22b --- /dev/null +++ b/examples-generated/gamelift/v1beta2/alias.yaml @@ -0,0 +1,16 @@ +apiVersion: gamelift.aws.upbound.io/v1beta2 +kind: Alias +metadata: + annotations: + meta.upbound.io/example-id: gamelift/v1beta2/alias + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: Example Description + name: example-alias + region: us-west-1 + routingStrategy: + - message: Example Message + type: TERMINAL diff --git a/examples-generated/gamelift/v1beta2/build.yaml b/examples-generated/gamelift/v1beta2/build.yaml new file mode 100644 index 0000000000..35bf1f3392 --- /dev/null +++ b/examples-generated/gamelift/v1beta2/build.yaml @@ -0,0 +1,23 @@ +apiVersion: gamelift.aws.upbound.io/v1beta2 +kind: Build +metadata: + annotations: + meta.upbound.io/example-id: gamelift/v1beta2/build + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + name: example-build + operatingSystem: WINDOWS_2012 + region: us-west-1 + storageLocation: + - bucketSelector: + matchLabels: + testing.upbound.io/example-name: test + keySelector: + matchLabels: + testing.upbound.io/example-name: test + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: test diff --git a/examples-generated/gamelift/v1beta2/fleet.yaml b/examples-generated/gamelift/v1beta2/fleet.yaml new file mode 100644 index 0000000000..4452db41c7 --- /dev/null +++ b/examples-generated/gamelift/v1beta2/fleet.yaml @@ -0,0 +1,21 @@ +apiVersion: gamelift.aws.upbound.io/v1beta2 +kind: Fleet +metadata: + annotations: + meta.upbound.io/example-id: gamelift/v1beta2/fleet + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + buildIdSelector: + matchLabels: + testing.upbound.io/example-name: example + ec2InstanceType: t2.micro + fleetType: ON_DEMAND + name: example-fleet-name + region: us-west-1 + runtimeConfiguration: + - serverProcess: + - concurrentExecutions: 1 + launchPath: C:\game\GomokuServer.exe diff --git a/examples-generated/gamelift/v1beta2/script.yaml b/examples-generated/gamelift/v1beta2/script.yaml new file mode 100644 index 0000000000..ae32aea47d --- /dev/null +++ b/examples-generated/gamelift/v1beta2/script.yaml @@ -0,0 +1,22 @@ +apiVersion: gamelift.aws.upbound.io/v1beta2 +kind: Script +metadata: + annotations: + meta.upbound.io/example-id: gamelift/v1beta2/script + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: example-script + region: us-west-1 + storageLocation: + - bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + keySelector: + matchLabels: + testing.upbound.io/example-name: example + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/glacier/v1beta1/vaultlock.yaml b/examples-generated/glacier/v1beta1/vaultlock.yaml index 8ad4905801..330776bc52 100644 --- a/examples-generated/glacier/v1beta1/vaultlock.yaml +++ b/examples-generated/glacier/v1beta1/vaultlock.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: glacier.aws.upbound.io/v1beta1 +apiVersion: glacier.aws.upbound.io/v1beta2 kind: Vault metadata: annotations: diff --git a/examples-generated/glacier/v1beta2/vault.yaml b/examples-generated/glacier/v1beta2/vault.yaml new file mode 100644 index 0000000000..4d3316b8f7 --- /dev/null +++ b/examples-generated/glacier/v1beta2/vault.yaml @@ -0,0 +1,35 @@ +apiVersion: glacier.aws.upbound.io/v1beta2 +kind: Vault +metadata: + annotations: + meta.upbound.io/example-id: glacier/v1beta2/vault + labels: + testing.upbound.io/example-name: my_archive + name: my-archive +spec: + forProvider: + accessPolicy: ${data.aws_iam_policy_document.my_archive.json} + notification: + - events: + - ArchiveRetrievalCompleted + - InventoryRetrievalCompleted + snsTopicSelector: + matchLabels: + testing.upbound.io/example-name: aws_sns_topic + region: us-west-1 + tags: + Test: MyArchive + +--- + +apiVersion: sns.aws.upbound.io/v1beta1 +kind: Topic +metadata: + annotations: + meta.upbound.io/example-id: glacier/v1beta2/vault + labels: + testing.upbound.io/example-name: aws_sns_topic + name: aws-sns-topic +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/globalaccelerator/v1beta1/listener.yaml b/examples-generated/globalaccelerator/v1beta1/listener.yaml index e3c7492e0e..d0af1eac32 100644 --- a/examples-generated/globalaccelerator/v1beta1/listener.yaml +++ b/examples-generated/globalaccelerator/v1beta1/listener.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: globalaccelerator.aws.upbound.io/v1beta1 +apiVersion: globalaccelerator.aws.upbound.io/v1beta2 kind: Accelerator metadata: annotations: diff --git a/examples-generated/globalaccelerator/v1beta2/accelerator.yaml b/examples-generated/globalaccelerator/v1beta2/accelerator.yaml new file mode 100644 index 0000000000..580bde0a4e --- /dev/null +++ b/examples-generated/globalaccelerator/v1beta2/accelerator.yaml @@ -0,0 +1,20 @@ +apiVersion: globalaccelerator.aws.upbound.io/v1beta2 +kind: Accelerator +metadata: + annotations: + meta.upbound.io/example-id: globalaccelerator/v1beta2/accelerator + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + attributes: + - flowLogsEnabled: true + flowLogsS3Bucket: example-bucket + flowLogsS3Prefix: flow-logs/ + enabled: true + ipAddressType: IPV4 + ipAddresses: + - 1.2.3.4 + name: Example + region: us-west-1 diff --git a/examples-generated/glue/v1beta1/userdefinedfunction.yaml b/examples-generated/glue/v1beta1/userdefinedfunction.yaml index f224145152..34e89ad355 100644 --- a/examples-generated/glue/v1beta1/userdefinedfunction.yaml +++ b/examples-generated/glue/v1beta1/userdefinedfunction.yaml @@ -22,7 +22,7 @@ spec: --- -apiVersion: glue.aws.upbound.io/v1beta1 +apiVersion: glue.aws.upbound.io/v1beta2 kind: CatalogDatabase metadata: annotations: diff --git a/examples-generated/glue/v1beta1/workflow.yaml b/examples-generated/glue/v1beta1/workflow.yaml index e47e60e2ff..b3bba4fd3f 100644 --- a/examples-generated/glue/v1beta1/workflow.yaml +++ b/examples-generated/glue/v1beta1/workflow.yaml @@ -12,7 +12,7 @@ spec: --- -apiVersion: glue.aws.upbound.io/v1beta1 +apiVersion: glue.aws.upbound.io/v1beta2 kind: Trigger metadata: annotations: @@ -38,7 +38,7 @@ spec: --- -apiVersion: glue.aws.upbound.io/v1beta1 +apiVersion: glue.aws.upbound.io/v1beta2 kind: Trigger metadata: annotations: diff --git a/examples-generated/glue/v1beta2/catalogdatabase.yaml b/examples-generated/glue/v1beta2/catalogdatabase.yaml new file mode 100644 index 0000000000..69f3b0d404 --- /dev/null +++ b/examples-generated/glue/v1beta2/catalogdatabase.yaml @@ -0,0 +1,11 @@ +apiVersion: glue.aws.upbound.io/v1beta2 +kind: CatalogDatabase +metadata: + annotations: + meta.upbound.io/example-id: glue/v1beta2/catalogdatabase + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/glue/v1beta2/catalogtable.yaml b/examples-generated/glue/v1beta2/catalogtable.yaml new file mode 100644 index 0000000000..22a37d867c --- /dev/null +++ b/examples-generated/glue/v1beta2/catalogtable.yaml @@ -0,0 +1,14 @@ +apiVersion: glue.aws.upbound.io/v1beta2 +kind: CatalogTable +metadata: + annotations: + meta.upbound.io/example-id: glue/v1beta2/catalogtable + labels: + testing.upbound.io/example-name: aws_glue_catalog_table + name: aws-glue-catalog-table +spec: + forProvider: + databaseNameSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 diff --git a/examples-generated/glue/v1beta2/classifier.yaml b/examples-generated/glue/v1beta2/classifier.yaml new file mode 100644 index 0000000000..d0f53ade8d --- /dev/null +++ b/examples-generated/glue/v1beta2/classifier.yaml @@ -0,0 +1,20 @@ +apiVersion: glue.aws.upbound.io/v1beta2 +kind: Classifier +metadata: + annotations: + meta.upbound.io/example-id: glue/v1beta2/classifier + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + csvClassifier: + - allowSingleColumn: false + containsHeader: PRESENT + delimiter: ',' + disableValueTrimming: false + header: + - example1 + - example2 + quoteSymbol: '''' + region: us-west-1 diff --git a/examples-generated/glue/v1beta2/connection.yaml b/examples-generated/glue/v1beta2/connection.yaml new file mode 100644 index 0000000000..470d47286d --- /dev/null +++ b/examples-generated/glue/v1beta2/connection.yaml @@ -0,0 +1,15 @@ +apiVersion: glue.aws.upbound.io/v1beta2 +kind: Connection +metadata: + annotations: + meta.upbound.io/example-id: glue/v1beta2/connection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + connectionPropertiesSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + region: us-west-1 diff --git a/examples-generated/glue/v1beta2/crawler.yaml b/examples-generated/glue/v1beta2/crawler.yaml new file mode 100644 index 0000000000..56a487f84b --- /dev/null +++ b/examples-generated/glue/v1beta2/crawler.yaml @@ -0,0 +1,19 @@ +apiVersion: glue.aws.upbound.io/v1beta2 +kind: Crawler +metadata: + annotations: + meta.upbound.io/example-id: glue/v1beta2/crawler + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + databaseNameSelector: + matchLabels: + testing.upbound.io/example-name: example + dynamodbTarget: + - path: table-name + region: us-west-1 + roleSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/glue/v1beta2/datacatalogencryptionsettings.yaml b/examples-generated/glue/v1beta2/datacatalogencryptionsettings.yaml new file mode 100644 index 0000000000..4f9f766559 --- /dev/null +++ b/examples-generated/glue/v1beta2/datacatalogencryptionsettings.yaml @@ -0,0 +1,23 @@ +apiVersion: glue.aws.upbound.io/v1beta2 +kind: DataCatalogEncryptionSettings +metadata: + annotations: + meta.upbound.io/example-id: glue/v1beta2/datacatalogencryptionsettings + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dataCatalogEncryptionSettings: + - connectionPasswordEncryption: + - awsKmsKeyIdSelector: + matchLabels: + testing.upbound.io/example-name: test + returnConnectionPasswordEncrypted: true + encryptionAtRest: + - catalogEncryptionMode: SSE-KMS + catalogEncryptionServiceRole: ${aws_iam.role.test.arn} + sseAwsKmsKeyIdSelector: + matchLabels: + testing.upbound.io/example-name: test + region: us-west-1 diff --git a/examples-generated/glue/v1beta2/job.yaml b/examples-generated/glue/v1beta2/job.yaml new file mode 100644 index 0000000000..d5b16f37dc --- /dev/null +++ b/examples-generated/glue/v1beta2/job.yaml @@ -0,0 +1,16 @@ +apiVersion: glue.aws.upbound.io/v1beta2 +kind: Job +metadata: + annotations: + meta.upbound.io/example-id: glue/v1beta2/job + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + command: + - scriptLocation: s3://${aws_s3_bucket.example.bucket}/example.py + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/glue/v1beta2/securityconfiguration.yaml b/examples-generated/glue/v1beta2/securityconfiguration.yaml new file mode 100644 index 0000000000..eb0f1c970b --- /dev/null +++ b/examples-generated/glue/v1beta2/securityconfiguration.yaml @@ -0,0 +1,21 @@ +apiVersion: glue.aws.upbound.io/v1beta2 +kind: SecurityConfiguration +metadata: + annotations: + meta.upbound.io/example-id: glue/v1beta2/securityconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + encryptionConfiguration: + - cloudwatchEncryption: + - cloudwatchEncryptionMode: DISABLED + jobBookmarksEncryption: + - jobBookmarksEncryptionMode: DISABLED + s3Encryption: + - kmsKeyArnSelector: + matchLabels: + testing.upbound.io/example-name: aws_kms_key + s3EncryptionMode: SSE-KMS + region: us-west-1 diff --git a/examples-generated/glue/v1beta2/trigger.yaml b/examples-generated/glue/v1beta2/trigger.yaml new file mode 100644 index 0000000000..e4d68add84 --- /dev/null +++ b/examples-generated/glue/v1beta2/trigger.yaml @@ -0,0 +1,22 @@ +apiVersion: glue.aws.upbound.io/v1beta2 +kind: Trigger +metadata: + annotations: + meta.upbound.io/example-id: glue/v1beta2/trigger + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + actions: + - jobNameSelector: + matchLabels: + testing.upbound.io/example-name: example1 + predicate: + - conditions: + - jobNameSelector: + matchLabels: + testing.upbound.io/example-name: example2 + state: SUCCEEDED + region: us-west-1 + type: CONDITIONAL diff --git a/examples-generated/grafana/v1beta1/licenseassociation.yaml b/examples-generated/grafana/v1beta1/licenseassociation.yaml index a4782b1dee..716bfc407d 100644 --- a/examples-generated/grafana/v1beta1/licenseassociation.yaml +++ b/examples-generated/grafana/v1beta1/licenseassociation.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: grafana.aws.upbound.io/v1beta1 +apiVersion: grafana.aws.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/grafana/v1beta1/roleassociation.yaml b/examples-generated/grafana/v1beta1/roleassociation.yaml index 76cfeff0d7..d693c92904 100644 --- a/examples-generated/grafana/v1beta1/roleassociation.yaml +++ b/examples-generated/grafana/v1beta1/roleassociation.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: grafana.aws.upbound.io/v1beta1 +apiVersion: grafana.aws.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/grafana/v1beta1/workspacesamlconfiguration.yaml b/examples-generated/grafana/v1beta1/workspacesamlconfiguration.yaml index f5e99291ba..b9a94b6def 100644 --- a/examples-generated/grafana/v1beta1/workspacesamlconfiguration.yaml +++ b/examples-generated/grafana/v1beta1/workspacesamlconfiguration.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: grafana.aws.upbound.io/v1beta1 +apiVersion: grafana.aws.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/grafana/v1beta2/workspace.yaml b/examples-generated/grafana/v1beta2/workspace.yaml new file mode 100644 index 0000000000..1ea17bcffb --- /dev/null +++ b/examples-generated/grafana/v1beta2/workspace.yaml @@ -0,0 +1,45 @@ +apiVersion: grafana.aws.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: grafana/v1beta2/workspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountAccessType: CURRENT_ACCOUNT + authenticationProviders: + - SAML + permissionType: SERVICE_MANAGED + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: assume + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: grafana/v1beta2/workspace + labels: + testing.upbound.io/example-name: assume + name: assume +spec: + forProvider: + assumeRolePolicy: |- + ${jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Sid = "" + Principal = { + Service = "grafana.amazonaws.com" + } + }, + ] + })} diff --git a/examples-generated/guardduty/v1beta1/member.yaml b/examples-generated/guardduty/v1beta1/member.yaml index 2715449e82..d3bc9152d5 100644 --- a/examples-generated/guardduty/v1beta1/member.yaml +++ b/examples-generated/guardduty/v1beta1/member.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: guardduty.aws.upbound.io/v1beta1 +apiVersion: guardduty.aws.upbound.io/v1beta2 kind: Detector metadata: annotations: @@ -37,7 +37,7 @@ spec: --- -apiVersion: guardduty.aws.upbound.io/v1beta1 +apiVersion: guardduty.aws.upbound.io/v1beta2 kind: Detector metadata: annotations: diff --git a/examples-generated/guardduty/v1beta2/detector.yaml b/examples-generated/guardduty/v1beta2/detector.yaml new file mode 100644 index 0000000000..4ce95216d1 --- /dev/null +++ b/examples-generated/guardduty/v1beta2/detector.yaml @@ -0,0 +1,22 @@ +apiVersion: guardduty.aws.upbound.io/v1beta2 +kind: Detector +metadata: + annotations: + meta.upbound.io/example-id: guardduty/v1beta2/detector + labels: + testing.upbound.io/example-name: MyDetector + name: mydetector +spec: + forProvider: + datasources: + - kubernetes: + - auditLogs: + - enable: false + malwareProtection: + - scanEc2InstanceWithFindings: + - ebsVolumes: + - enable: true + s3Logs: + - enable: true + enable: true + region: us-west-1 diff --git a/examples-generated/guardduty/v1beta2/filter.yaml b/examples-generated/guardduty/v1beta2/filter.yaml new file mode 100644 index 0000000000..60d042116a --- /dev/null +++ b/examples-generated/guardduty/v1beta2/filter.yaml @@ -0,0 +1,30 @@ +apiVersion: guardduty.aws.upbound.io/v1beta2 +kind: Filter +metadata: + annotations: + meta.upbound.io/example-id: guardduty/v1beta2/filter + labels: + testing.upbound.io/example-name: MyFilter + name: myfilter +spec: + forProvider: + action: ARCHIVE + detectorIdSelector: + matchLabels: + testing.upbound.io/example-name: example + findingCriteria: + - criterion: + - equals: + - eu-west-1 + field: region + - field: service.additionalInfo.threatListName + notEquals: + - some-threat + - another-threat + - field: updatedAt + greaterThan: "2020-01-01T00:00:00Z" + lessThan: "2020-02-01T00:00:00Z" + - field: severity + greaterThanOrEqual: "4" + rank: 1 + region: us-west-1 diff --git a/examples-generated/identitystore/v1beta1/groupmembership.yaml b/examples-generated/identitystore/v1beta1/groupmembership.yaml index 18779aaf48..3d508c9e97 100644 --- a/examples-generated/identitystore/v1beta1/groupmembership.yaml +++ b/examples-generated/identitystore/v1beta1/groupmembership.yaml @@ -36,7 +36,7 @@ spec: --- -apiVersion: identitystore.aws.upbound.io/v1beta1 +apiVersion: identitystore.aws.upbound.io/v1beta2 kind: User metadata: annotations: diff --git a/examples-generated/identitystore/v1beta2/user.yaml b/examples-generated/identitystore/v1beta2/user.yaml new file mode 100644 index 0000000000..4d59a738bb --- /dev/null +++ b/examples-generated/identitystore/v1beta2/user.yaml @@ -0,0 +1,19 @@ +apiVersion: identitystore.aws.upbound.io/v1beta2 +kind: User +metadata: + annotations: + meta.upbound.io/example-id: identitystore/v1beta2/user + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + displayName: John Doe + emails: + - value: john@example.com + identityStoreId: ${tolist(data.aws_ssoadmin_instances.example.identity_store_ids)[0]} + name: + - familyName: Doe + givenName: John + region: us-west-1 + userName: johndoe diff --git a/examples-generated/imagebuilder/v1beta2/containerrecipe.yaml b/examples-generated/imagebuilder/v1beta2/containerrecipe.yaml new file mode 100644 index 0000000000..f6ccfa0862 --- /dev/null +++ b/examples-generated/imagebuilder/v1beta2/containerrecipe.yaml @@ -0,0 +1,33 @@ +apiVersion: imagebuilder.aws.upbound.io/v1beta2 +kind: ContainerRecipe +metadata: + annotations: + meta.upbound.io/example-id: imagebuilder/v1beta2/containerrecipe + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + component: + - componentArnSelector: + matchLabels: + testing.upbound.io/example-name: example + parameter: + - name: Parameter1 + value: Value1 + - name: Parameter2 + value: Value2 + containerType: DOCKER + dockerfileTemplateData: | + FROM {{{ imagebuilder:parentImage }}} + {{{ imagebuilder:environments }}} + {{{ imagebuilder:components }}} + name: example + parentImage: arn:aws:imagebuilder:eu-central-1:aws:image/amazon-linux-x86-latest/x.x.x + region: us-west-1 + targetRepository: + - repositoryNameSelector: + matchLabels: + testing.upbound.io/example-name: example + service: ECR + version: 1.0.0 diff --git a/examples-generated/imagebuilder/v1beta2/distributionconfiguration.yaml b/examples-generated/imagebuilder/v1beta2/distributionconfiguration.yaml new file mode 100644 index 0000000000..fe2eab892e --- /dev/null +++ b/examples-generated/imagebuilder/v1beta2/distributionconfiguration.yaml @@ -0,0 +1,23 @@ +apiVersion: imagebuilder.aws.upbound.io/v1beta2 +kind: DistributionConfiguration +metadata: + annotations: + meta.upbound.io/example-id: imagebuilder/v1beta2/distributionconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + distribution: + - amiDistributionConfiguration: + - amiTags: + CostCenter: IT + launchPermission: + - userIds: + - "123456789012" + name: example-{{ imagebuilder:buildDate }} + launchTemplateConfiguration: + - launchTemplateId: lt-0aaa1bcde2ff3456 + region: us-east-1 + name: example + region: us-west-1 diff --git a/examples-generated/imagebuilder/v1beta2/image.yaml b/examples-generated/imagebuilder/v1beta2/image.yaml new file mode 100644 index 0000000000..1b422bc115 --- /dev/null +++ b/examples-generated/imagebuilder/v1beta2/image.yaml @@ -0,0 +1,20 @@ +apiVersion: imagebuilder.aws.upbound.io/v1beta2 +kind: Image +metadata: + annotations: + meta.upbound.io/example-id: imagebuilder/v1beta2/image + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + distributionConfigurationArnSelector: + matchLabels: + testing.upbound.io/example-name: example + imageRecipeArnSelector: + matchLabels: + testing.upbound.io/example-name: example + infrastructureConfigurationArnSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 diff --git a/examples-generated/imagebuilder/v1beta2/imagepipeline.yaml b/examples-generated/imagebuilder/v1beta2/imagepipeline.yaml new file mode 100644 index 0000000000..1ebea4392a --- /dev/null +++ b/examples-generated/imagebuilder/v1beta2/imagepipeline.yaml @@ -0,0 +1,20 @@ +apiVersion: imagebuilder.aws.upbound.io/v1beta2 +kind: ImagePipeline +metadata: + annotations: + meta.upbound.io/example-id: imagebuilder/v1beta2/imagepipeline + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + imageRecipeArnSelector: + matchLabels: + testing.upbound.io/example-name: example + infrastructureConfigurationArnSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example + region: us-west-1 + schedule: + - scheduleExpression: cron(0 0 * * ? *) diff --git a/examples-generated/imagebuilder/v1beta2/imagerecipe.yaml b/examples-generated/imagebuilder/v1beta2/imagerecipe.yaml new file mode 100644 index 0000000000..1dcd3c2fa9 --- /dev/null +++ b/examples-generated/imagebuilder/v1beta2/imagerecipe.yaml @@ -0,0 +1,29 @@ +apiVersion: imagebuilder.aws.upbound.io/v1beta2 +kind: ImageRecipe +metadata: + annotations: + meta.upbound.io/example-id: imagebuilder/v1beta2/imagerecipe + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + blockDeviceMapping: + - deviceName: /dev/xvdb + ebs: + - deleteOnTermination: true + volumeSize: 100 + volumeType: gp2 + component: + - componentArnSelector: + matchLabels: + testing.upbound.io/example-name: example + parameter: + - name: Parameter1 + value: Value1 + - name: Parameter2 + value: Value2 + name: example + parentImage: arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x + region: us-west-1 + version: 1.0.0 diff --git a/examples-generated/imagebuilder/v1beta2/infrastructureconfiguration.yaml b/examples-generated/imagebuilder/v1beta2/infrastructureconfiguration.yaml new file mode 100644 index 0000000000..d7be5f30c2 --- /dev/null +++ b/examples-generated/imagebuilder/v1beta2/infrastructureconfiguration.yaml @@ -0,0 +1,39 @@ +apiVersion: imagebuilder.aws.upbound.io/v1beta2 +kind: InfrastructureConfiguration +metadata: + annotations: + meta.upbound.io/example-id: imagebuilder/v1beta2/infrastructureconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: example description + instanceProfileNameSelector: + matchLabels: + testing.upbound.io/example-name: example + instanceTypes: + - t2.nano + - t3.micro + keyPairSelector: + matchLabels: + testing.upbound.io/example-name: example + logging: + - s3Logs: + - s3BucketNameSelector: + matchLabels: + testing.upbound.io/example-name: example + s3KeyPrefix: logs + name: example + region: us-west-1 + securityGroupIdRefs: + - name: example + snsTopicArnSelector: + matchLabels: + testing.upbound.io/example-name: example + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: main + tags: + foo: bar + terminateInstanceOnFailure: true diff --git a/examples-generated/iot/v1beta2/indexingconfiguration.yaml b/examples-generated/iot/v1beta2/indexingconfiguration.yaml new file mode 100644 index 0000000000..1c0b0da192 --- /dev/null +++ b/examples-generated/iot/v1beta2/indexingconfiguration.yaml @@ -0,0 +1,28 @@ +apiVersion: iot.aws.upbound.io/v1beta2 +kind: IndexingConfiguration +metadata: + annotations: + meta.upbound.io/example-id: iot/v1beta2/indexingconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + thingIndexingConfiguration: + - customField: + - name: shadow.desired.power + type: Boolean + - name: attributes.version + type: Number + - name: shadow.name.thing1shadow.desired.DefaultDesired + type: String + - name: deviceDefender.securityProfile1.NUMBER_VALUE_BEHAVIOR.lastViolationValue.number + type: Number + deviceDefenderIndexingMode: VIOLATIONS + filter: + - namedShadowNames: + - thing1shadow + namedShadowIndexingMode: "ON" + thingConnectivityIndexingMode: STATUS + thingIndexingMode: REGISTRY_AND_SHADOW diff --git a/examples-generated/iot/v1beta2/provisioningtemplate.yaml b/examples-generated/iot/v1beta2/provisioningtemplate.yaml new file mode 100644 index 0000000000..dd0bf800d0 --- /dev/null +++ b/examples-generated/iot/v1beta2/provisioningtemplate.yaml @@ -0,0 +1,88 @@ +apiVersion: iot.aws.upbound.io/v1beta2 +kind: ProvisioningTemplate +metadata: + annotations: + meta.upbound.io/example-id: iot/v1beta2/provisioningtemplate + labels: + testing.upbound.io/example-name: fleet + name: fleet +spec: + forProvider: + description: My provisioning template + enabled: true + provisioningRoleArnSelector: + matchLabels: + testing.upbound.io/example-name: iot_fleet_provisioning + region: us-west-1 + templateBody: |- + ${jsonencode({ + Parameters = { + SerialNumber = { Type = "String" } + } + + Resources = { + certificate = { + Properties = { + CertificateId = { Ref = "AWS::IoT::Certificate::Id" } + Status = "Active" + } + Type = "AWS::IoT::Certificate" + } + + policy = { + Properties = { + PolicyName = aws_iot_policy.device_policy.name + } + Type = "AWS::IoT::Policy" + } + } + })} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: iot/v1beta2/provisioningtemplate + labels: + testing.upbound.io/example-name: iot_fleet_provisioning + name: iot-fleet-provisioning +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.iot_assume_role_policy.json} + path: /service-role/ + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicyAttachment +metadata: + annotations: + meta.upbound.io/example-id: iot/v1beta2/provisioningtemplate + labels: + testing.upbound.io/example-name: iot_fleet_provisioning_registration + name: iot-fleet-provisioning-registration +spec: + forProvider: + policyArnSelector: + matchLabels: + testing.upbound.io/example-name: example + roleSelector: + matchLabels: + testing.upbound.io/example-name: iot_fleet_provisioning + +--- + +apiVersion: iot.aws.upbound.io/v1beta1 +kind: Policy +metadata: + annotations: + meta.upbound.io/example-id: iot/v1beta2/provisioningtemplate + labels: + testing.upbound.io/example-name: device_policy + name: device-policy +spec: + forProvider: + policy: ${data.aws_iam_policy_document.device_policy.json} + region: us-west-1 diff --git a/examples-generated/iot/v1beta2/thinggroup.yaml b/examples-generated/iot/v1beta2/thinggroup.yaml new file mode 100644 index 0000000000..1ee078e1c0 --- /dev/null +++ b/examples-generated/iot/v1beta2/thinggroup.yaml @@ -0,0 +1,11 @@ +apiVersion: iot.aws.upbound.io/v1beta2 +kind: ThingGroup +metadata: + annotations: + meta.upbound.io/example-id: iot/v1beta2/thinggroup + labels: + testing.upbound.io/example-name: parent + name: parent +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/iot/v1beta2/thingtype.yaml b/examples-generated/iot/v1beta2/thingtype.yaml new file mode 100644 index 0000000000..3047381ccc --- /dev/null +++ b/examples-generated/iot/v1beta2/thingtype.yaml @@ -0,0 +1,12 @@ +apiVersion: iot.aws.upbound.io/v1beta2 +kind: ThingType +metadata: + annotations: + meta.upbound.io/example-id: iot/v1beta2/thingtype + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + name: my_iot_thing + region: us-west-1 diff --git a/examples-generated/iot/v1beta2/topicrule.yaml b/examples-generated/iot/v1beta2/topicrule.yaml new file mode 100644 index 0000000000..da7493a39c --- /dev/null +++ b/examples-generated/iot/v1beta2/topicrule.yaml @@ -0,0 +1,91 @@ +apiVersion: iot.aws.upbound.io/v1beta2 +kind: TopicRule +metadata: + annotations: + meta.upbound.io/example-id: iot/v1beta2/topicrule + labels: + testing.upbound.io/example-name: rule + name: rule +spec: + forProvider: + description: Example rule + enabled: true + errorAction: + - sns: + - messageFormat: RAW + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: role + targetArnSelector: + matchLabels: + testing.upbound.io/example-name: myerrortopic + region: us-west-1 + sns: + - messageFormat: RAW + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: role + targetArnSelector: + matchLabels: + testing.upbound.io/example-name: mytopic + sql: SELECT * FROM 'topic/test' + sqlVersion: "2016-03-23" + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: iot/v1beta2/topicrule + labels: + testing.upbound.io/example-name: myrole + name: myrole +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.assume_role.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicy +metadata: + annotations: + meta.upbound.io/example-id: iot/v1beta2/topicrule + labels: + testing.upbound.io/example-name: mypolicy + name: mypolicy +spec: + forProvider: + policy: ${data.aws_iam_policy_document.mypolicy.json} + roleSelector: + matchLabels: + testing.upbound.io/example-name: myrole + +--- + +apiVersion: sns.aws.upbound.io/v1beta1 +kind: Topic +metadata: + annotations: + meta.upbound.io/example-id: iot/v1beta2/topicrule + labels: + testing.upbound.io/example-name: myerrortopic + name: myerrortopic +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: sns.aws.upbound.io/v1beta1 +kind: Topic +metadata: + annotations: + meta.upbound.io/example-id: iot/v1beta2/topicrule + labels: + testing.upbound.io/example-name: mytopic + name: mytopic +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/iot/v1beta2/topicruledestination.yaml b/examples-generated/iot/v1beta2/topicruledestination.yaml new file mode 100644 index 0000000000..7db3d9bb92 --- /dev/null +++ b/examples-generated/iot/v1beta2/topicruledestination.yaml @@ -0,0 +1,23 @@ +apiVersion: iot.aws.upbound.io/v1beta2 +kind: TopicRuleDestination +metadata: + annotations: + meta.upbound.io/example-id: iot/v1beta2/topicruledestination + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + vpcConfiguration: + - roleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + securityGroupRefs: + - name: example + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example[*] + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/ivs/v1beta2/recordingconfiguration.yaml b/examples-generated/ivs/v1beta2/recordingconfiguration.yaml new file mode 100644 index 0000000000..18f43a78cf --- /dev/null +++ b/examples-generated/ivs/v1beta2/recordingconfiguration.yaml @@ -0,0 +1,15 @@ +apiVersion: ivs.aws.upbound.io/v1beta2 +kind: RecordingConfiguration +metadata: + annotations: + meta.upbound.io/example-id: ivs/v1beta2/recordingconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + destinationConfiguration: + - s3: + - bucketName: ivs-stream-archive + name: recording_configuration-1 + region: us-west-1 diff --git a/examples-generated/kafka/v1beta1/scramsecretassociation.yaml b/examples-generated/kafka/v1beta1/scramsecretassociation.yaml index c21705380c..24c9875d9b 100644 --- a/examples-generated/kafka/v1beta1/scramsecretassociation.yaml +++ b/examples-generated/kafka/v1beta1/scramsecretassociation.yaml @@ -32,7 +32,7 @@ spec: --- -apiVersion: kafka.aws.upbound.io/v1beta2 +apiVersion: kafka.aws.upbound.io/v1beta3 kind: Cluster metadata: annotations: diff --git a/examples-generated/kafka/v1beta2/serverlesscluster.yaml b/examples-generated/kafka/v1beta2/serverlesscluster.yaml new file mode 100644 index 0000000000..f32d6e933b --- /dev/null +++ b/examples-generated/kafka/v1beta2/serverlesscluster.yaml @@ -0,0 +1,22 @@ +apiVersion: kafka.aws.upbound.io/v1beta2 +kind: ServerlessCluster +metadata: + annotations: + meta.upbound.io/example-id: kafka/v1beta2/serverlesscluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clientAuthentication: + - sasl: + - iam: + - enabled: true + clusterName: Example + region: us-west-1 + vpcConfig: + - securityGroupIdRefs: + - name: example + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example[*] diff --git a/examples-generated/kafka/v1beta3/cluster.yaml b/examples-generated/kafka/v1beta3/cluster.yaml new file mode 100644 index 0000000000..bf3a9a8a1d --- /dev/null +++ b/examples-generated/kafka/v1beta3/cluster.yaml @@ -0,0 +1,245 @@ +apiVersion: kafka.aws.upbound.io/v1beta3 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: kafka/v1beta3/cluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + brokerNodeGroupInfo: + - clientSubnetsRefs: + - name: subnet_az1 + - name: subnet_az2 + - name: subnet_az3 + instanceType: kafka.m5.large + securityGroupsRefs: + - name: sg + storageInfo: + - ebsStorageInfo: + - volumeSize: 1000 + clusterName: example + encryptionInfo: + - encryptionAtRestKmsKeyArnSelector: + matchLabels: + testing.upbound.io/example-name: kms + kafkaVersion: 3.2.0 + loggingInfo: + - brokerLogs: + - cloudwatchLogs: + - enabled: true + logGroupSelector: + matchLabels: + testing.upbound.io/example-name: test + firehose: + - deliveryStreamSelector: + matchLabels: + testing.upbound.io/example-name: test_stream + enabled: true + s3: + - bucketSelector: + matchLabels: + testing.upbound.io/example-name: bucket + enabled: true + prefix: logs/msk- + numberOfBrokerNodes: 3 + openMonitoring: + - prometheus: + - jmxExporter: + - enabledInBroker: true + nodeExporter: + - enabledInBroker: true + region: us-west-1 + tags: + foo: bar + +--- + +apiVersion: cloudwatchlogs.aws.upbound.io/v1beta1 +kind: Group +metadata: + annotations: + meta.upbound.io/example-id: kafka/v1beta3/cluster + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: kafka/v1beta3/cluster + labels: + testing.upbound.io/example-name: firehose_role + name: firehose-role +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.assume_role.json} + +--- + +apiVersion: firehose.aws.upbound.io/v1beta2 +kind: DeliveryStream +metadata: + annotations: + meta.upbound.io/example-id: kafka/v1beta3/cluster + labels: + testing.upbound.io/example-name: test_stream + name: test-stream +spec: + forProvider: + destination: extended_s3 + extendedS3Configuration: + - bucketArnSelector: + matchLabels: + testing.upbound.io/example-name: bucket + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: firehose_role + name: terraform-kinesis-firehose-msk-broker-logs-stream + region: us-west-1 + tags: + LogDeliveryEnabled: placeholder + +--- + +apiVersion: kms.aws.upbound.io/v1beta1 +kind: Key +metadata: + annotations: + meta.upbound.io/example-id: kafka/v1beta3/cluster + labels: + testing.upbound.io/example-name: kms + name: kms +spec: + forProvider: + description: example + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: kafka/v1beta3/cluster + labels: + testing.upbound.io/example-name: bucket + name: bucket +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketACL +metadata: + annotations: + meta.upbound.io/example-id: kafka/v1beta3/cluster + labels: + testing.upbound.io/example-name: bucket_acl + name: bucket-acl +spec: + forProvider: + acl: private + bucketSelector: + matchLabels: + testing.upbound.io/example-name: bucket + region: us-west-1 + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: SecurityGroup +metadata: + annotations: + meta.upbound.io/example-id: kafka/v1beta3/cluster + labels: + testing.upbound.io/example-name: sg + name: sg +spec: + forProvider: + region: us-west-1 + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: vpc + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: kafka/v1beta3/cluster + labels: + testing.upbound.io/example-name: subnet_az1 + name: subnet-az1 +spec: + forProvider: + availabilityZone: ${data.aws_availability_zones.azs.names[0]} + cidrBlock: 192.168.0.0/24 + region: us-west-1 + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: vpc + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: kafka/v1beta3/cluster + labels: + testing.upbound.io/example-name: subnet_az2 + name: subnet-az2 +spec: + forProvider: + availabilityZone: ${data.aws_availability_zones.azs.names[1]} + cidrBlock: 192.168.1.0/24 + region: us-west-1 + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: vpc + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: kafka/v1beta3/cluster + labels: + testing.upbound.io/example-name: subnet_az3 + name: subnet-az3 +spec: + forProvider: + availabilityZone: ${data.aws_availability_zones.azs.names[2]} + cidrBlock: 192.168.2.0/24 + region: us-west-1 + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: vpc + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: VPC +metadata: + annotations: + meta.upbound.io/example-id: kafka/v1beta3/cluster + labels: + testing.upbound.io/example-name: vpc + name: vpc +spec: + forProvider: + cidrBlock: 192.168.0.0/22 + region: us-west-1 diff --git a/examples-generated/kafkaconnect/v1beta2/connector.yaml b/examples-generated/kafkaconnect/v1beta2/connector.yaml new file mode 100644 index 0000000000..d03a4d0edf --- /dev/null +++ b/examples-generated/kafkaconnect/v1beta2/connector.yaml @@ -0,0 +1,49 @@ +apiVersion: kafkaconnect.aws.upbound.io/v1beta2 +kind: Connector +metadata: + annotations: + meta.upbound.io/example-id: kafkaconnect/v1beta2/connector + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + capacity: + - autoscaling: + - maxWorkerCount: 2 + mcuCount: 1 + minWorkerCount: 1 + scaleInPolicy: + - cpuUtilizationPercentage: 20 + scaleOutPolicy: + - cpuUtilizationPercentage: 80 + connectorConfiguration: + connector.class: com.github.jcustenborder.kafka.connect.simulator.SimulatorSinkConnector + tasks.max: "1" + topics: example + kafkaCluster: + - apacheKafkaCluster: + - bootstrapServers: ${aws_msk_cluster.example.bootstrap_brokers_tls} + vpc: + - securityGroupRefs: + - name: example + subnetRefs: + - name: example1 + - name: example2 + - name: example3 + kafkaClusterClientAuthentication: + - authenticationType: NONE + kafkaClusterEncryptionInTransit: + - encryptionType: TLS + kafkaconnectVersion: 2.7.1 + name: example + plugin: + - customPlugin: + - arnSelector: + matchLabels: + testing.upbound.io/example-name: example + revision: ${aws_mskconnect_custom_plugin.example.latest_revision} + region: us-west-1 + serviceExecutionRoleArnSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/kafkaconnect/v1beta2/customplugin.yaml b/examples-generated/kafkaconnect/v1beta2/customplugin.yaml new file mode 100644 index 0000000000..858e4e54d7 --- /dev/null +++ b/examples-generated/kafkaconnect/v1beta2/customplugin.yaml @@ -0,0 +1,55 @@ +apiVersion: kafkaconnect.aws.upbound.io/v1beta2 +kind: CustomPlugin +metadata: + annotations: + meta.upbound.io/example-id: kafkaconnect/v1beta2/customplugin + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + contentType: ZIP + location: + - s3: + - bucketArnSelector: + matchLabels: + testing.upbound.io/example-name: example + fileKeySelector: + matchLabels: + testing.upbound.io/example-name: example + name: debezium-example + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: kafkaconnect/v1beta2/customplugin + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Object +metadata: + annotations: + meta.upbound.io/example-id: kafkaconnect/v1beta2/customplugin + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + key: debezium.zip + region: us-west-1 + source: debezium.zip diff --git a/examples-generated/kendra/v1beta2/datasource.yaml b/examples-generated/kendra/v1beta2/datasource.yaml new file mode 100644 index 0000000000..e467fbd287 --- /dev/null +++ b/examples-generated/kendra/v1beta2/datasource.yaml @@ -0,0 +1,20 @@ +apiVersion: kendra.aws.upbound.io/v1beta2 +kind: DataSource +metadata: + annotations: + meta.upbound.io/example-id: kendra/v1beta2/datasource + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: example + indexIdSelector: + matchLabels: + testing.upbound.io/example-name: example + languageCode: en + name: example + region: us-west-1 + tags: + hello: world + type: CUSTOM diff --git a/examples-generated/kendra/v1beta2/experience.yaml b/examples-generated/kendra/v1beta2/experience.yaml new file mode 100644 index 0000000000..31d6bf6ff1 --- /dev/null +++ b/examples-generated/kendra/v1beta2/experience.yaml @@ -0,0 +1,26 @@ +apiVersion: kendra.aws.upbound.io/v1beta2 +kind: Experience +metadata: + annotations: + meta.upbound.io/example-id: kendra/v1beta2/experience + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + configuration: + - contentSourceConfiguration: + - directPutContent: true + faqIds: + - ${aws_kendra_faq.example.faq_id} + userIdentityConfiguration: + - identityAttributeName: 12345ec453-1546651e-79c4-4554-91fa-00b43ccfa245 + description: My Kendra Experience + indexIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/kendra/v1beta2/index.yaml b/examples-generated/kendra/v1beta2/index.yaml new file mode 100644 index 0000000000..3bdc716ae2 --- /dev/null +++ b/examples-generated/kendra/v1beta2/index.yaml @@ -0,0 +1,19 @@ +apiVersion: kendra.aws.upbound.io/v1beta2 +kind: Index +metadata: + annotations: + meta.upbound.io/example-id: kendra/v1beta2/index + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: example + edition: DEVELOPER_EDITION + name: example + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: this + tags: + Key1: Value1 diff --git a/examples-generated/kendra/v1beta2/querysuggestionsblocklist.yaml b/examples-generated/kendra/v1beta2/querysuggestionsblocklist.yaml new file mode 100644 index 0000000000..3b87f63203 --- /dev/null +++ b/examples-generated/kendra/v1beta2/querysuggestionsblocklist.yaml @@ -0,0 +1,25 @@ +apiVersion: kendra.aws.upbound.io/v1beta2 +kind: QuerySuggestionsBlockList +metadata: + annotations: + meta.upbound.io/example-id: kendra/v1beta2/querysuggestionsblocklist + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + indexIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: Example + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + sourceS3Path: + - bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + key: example/suggestions.txt + tags: + Name: Example Kendra Index diff --git a/examples-generated/kendra/v1beta2/thesaurus.yaml b/examples-generated/kendra/v1beta2/thesaurus.yaml new file mode 100644 index 0000000000..4f584b60ec --- /dev/null +++ b/examples-generated/kendra/v1beta2/thesaurus.yaml @@ -0,0 +1,27 @@ +apiVersion: kendra.aws.upbound.io/v1beta2 +kind: Thesaurus +metadata: + annotations: + meta.upbound.io/example-id: kendra/v1beta2/thesaurus + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + indexIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: Example + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + sourceS3Path: + - bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + keySelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + Name: Example Kendra Thesaurus diff --git a/examples-generated/keyspaces/v1beta2/table.yaml b/examples-generated/keyspaces/v1beta2/table.yaml new file mode 100644 index 0000000000..f13331cdd0 --- /dev/null +++ b/examples-generated/keyspaces/v1beta2/table.yaml @@ -0,0 +1,21 @@ +apiVersion: keyspaces.aws.upbound.io/v1beta2 +kind: Table +metadata: + annotations: + meta.upbound.io/example-id: keyspaces/v1beta2/table + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + keyspaceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + schemaDefinition: + - column: + - name: Message + type: ASCII + partitionKey: + - name: Message + tableName: my_table diff --git a/examples-generated/kinesis/v1beta1/streamconsumer.yaml b/examples-generated/kinesis/v1beta1/streamconsumer.yaml index fe0f773520..8001c53e65 100644 --- a/examples-generated/kinesis/v1beta1/streamconsumer.yaml +++ b/examples-generated/kinesis/v1beta1/streamconsumer.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: kinesis.aws.upbound.io/v1beta1 +apiVersion: kinesis.aws.upbound.io/v1beta2 kind: Stream metadata: annotations: diff --git a/examples-generated/kinesis/v1beta2/stream.yaml b/examples-generated/kinesis/v1beta2/stream.yaml new file mode 100644 index 0000000000..83a183635d --- /dev/null +++ b/examples-generated/kinesis/v1beta2/stream.yaml @@ -0,0 +1,20 @@ +apiVersion: kinesis.aws.upbound.io/v1beta2 +kind: Stream +metadata: + annotations: + meta.upbound.io/example-id: kinesis/v1beta2/stream + labels: + testing.upbound.io/example-name: test_stream + name: test-stream +spec: + forProvider: + region: us-west-1 + retentionPeriod: 48 + shardCount: 1 + shardLevelMetrics: + - IncomingBytes + - OutgoingBytes + streamModeDetails: + - streamMode: PROVISIONED + tags: + Environment: test diff --git a/examples-generated/kinesisanalytics/v1beta2/application.yaml b/examples-generated/kinesisanalytics/v1beta2/application.yaml new file mode 100644 index 0000000000..b48c50e451 --- /dev/null +++ b/examples-generated/kinesisanalytics/v1beta2/application.yaml @@ -0,0 +1,47 @@ +apiVersion: kinesisanalytics.aws.upbound.io/v1beta2 +kind: Application +metadata: + annotations: + meta.upbound.io/example-id: kinesisanalytics/v1beta2/application + labels: + testing.upbound.io/example-name: test_application + name: test-application +spec: + forProvider: + inputs: + - kinesisStream: + - resourceArnSelector: + matchLabels: + testing.upbound.io/example-name: test_stream + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: test + namePrefix: test_prefix + parallelism: + - count: 1 + schema: + - recordColumns: + - mapping: $.test + name: test + sqlType: VARCHAR(8) + recordEncoding: UTF-8 + recordFormat: + - mappingParameters: + - json: + - recordRowPath: $ + region: us-west-1 + +--- + +apiVersion: kinesis.aws.upbound.io/v1beta2 +kind: Stream +metadata: + annotations: + meta.upbound.io/example-id: kinesisanalytics/v1beta2/application + labels: + testing.upbound.io/example-name: test_stream + name: test-stream +spec: + forProvider: + region: us-west-1 + shardCount: 1 diff --git a/examples-generated/kinesisanalyticsv2/v1beta2/application.yaml b/examples-generated/kinesisanalyticsv2/v1beta2/application.yaml new file mode 100644 index 0000000000..0e83f91cd1 --- /dev/null +++ b/examples-generated/kinesisanalyticsv2/v1beta2/application.yaml @@ -0,0 +1,83 @@ +apiVersion: kinesisanalyticsv2.aws.upbound.io/v1beta2 +kind: Application +metadata: + annotations: + meta.upbound.io/example-id: kinesisanalyticsv2/v1beta2/application + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationConfiguration: + - applicationCodeConfiguration: + - codeContent: + - s3ContentLocation: + - bucketArnSelector: + matchLabels: + testing.upbound.io/example-name: example + fileKeySelector: + matchLabels: + testing.upbound.io/example-name: example + codeContentType: ZIPFILE + environmentProperties: + - propertyGroup: + - propertyGroupId: PROPERTY-GROUP-1 + propertyMap: + Key1: Value1 + - propertyGroupId: PROPERTY-GROUP-2 + propertyMap: + KeyA: ValueA + KeyB: ValueB + flinkApplicationConfiguration: + - checkpointConfiguration: + - configurationType: DEFAULT + monitoringConfiguration: + - configurationType: CUSTOM + logLevel: DEBUG + metricsLevel: TASK + parallelismConfiguration: + - autoScalingEnabled: true + configurationType: CUSTOM + parallelism: 10 + parallelismPerKpu: 4 + region: us-west-1 + runtimeEnvironment: FLINK-1_8 + serviceExecutionRoleSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + Environment: test + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: kinesisanalyticsv2/v1beta2/application + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Object +metadata: + annotations: + meta.upbound.io/example-id: kinesisanalyticsv2/v1beta2/application + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + key: example-flink-application + region: us-west-1 + source: flink-app.jar diff --git a/examples-generated/lakeformation/v1beta2/permissions.yaml b/examples-generated/lakeformation/v1beta2/permissions.yaml new file mode 100644 index 0000000000..f5e64e9b06 --- /dev/null +++ b/examples-generated/lakeformation/v1beta2/permissions.yaml @@ -0,0 +1,56 @@ +apiVersion: lakeformation.aws.upbound.io/v1beta2 +kind: Permissions +metadata: + annotations: + meta.upbound.io/example-id: lakeformation/v1beta2/permissions + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + permissions: + - SELECT + principal: arn:aws:iam:us-east-1:123456789012:user/SanHolo + region: us-west-1 + tableWithColumns: + - columnNames: + - event + databaseName: ${aws_glue_catalog_table.example.database_name} + nameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: glue.aws.upbound.io/v1beta2 +kind: CatalogDatabase +metadata: + annotations: + meta.upbound.io/example-id: lakeformation/v1beta2/permissions + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: glue.aws.upbound.io/v1beta2 +kind: CatalogTable +metadata: + annotations: + meta.upbound.io/example-id: lakeformation/v1beta2/permissions + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + databaseNameSelector: + matchLabels: + testing.upbound.io/example-name: test + region: us-west-1 + storageDescriptor: + - columns: + - name: event + type: string diff --git a/examples-generated/lambda/v1beta1/permission.yaml b/examples-generated/lambda/v1beta1/permission.yaml index 52e510a29a..b17762ca12 100644 --- a/examples-generated/lambda/v1beta1/permission.yaml +++ b/examples-generated/lambda/v1beta1/permission.yaml @@ -49,7 +49,7 @@ spec: --- -apiVersion: lambda.aws.upbound.io/v1beta1 +apiVersion: lambda.aws.upbound.io/v1beta2 kind: Alias metadata: annotations: @@ -68,7 +68,7 @@ spec: --- -apiVersion: lambda.aws.upbound.io/v1beta1 +apiVersion: lambda.aws.upbound.io/v1beta2 kind: Function metadata: annotations: diff --git a/examples-generated/lambda/v1beta2/alias.yaml b/examples-generated/lambda/v1beta2/alias.yaml new file mode 100644 index 0000000000..232df67eaf --- /dev/null +++ b/examples-generated/lambda/v1beta2/alias.yaml @@ -0,0 +1,19 @@ +apiVersion: lambda.aws.upbound.io/v1beta2 +kind: Alias +metadata: + annotations: + meta.upbound.io/example-id: lambda/v1beta2/alias + labels: + testing.upbound.io/example-name: test_lambda_alias + name: test-lambda-alias +spec: + forProvider: + description: a sample description + functionNameSelector: + matchLabels: + testing.upbound.io/example-name: lambda_function_test + functionVersion: "1" + region: us-west-1 + routingConfig: + - additionalVersionWeights: + "2": 0.5 diff --git a/examples-generated/lambda/v1beta2/codesigningconfig.yaml b/examples-generated/lambda/v1beta2/codesigningconfig.yaml new file mode 100644 index 0000000000..6311cca8c9 --- /dev/null +++ b/examples-generated/lambda/v1beta2/codesigningconfig.yaml @@ -0,0 +1,18 @@ +apiVersion: lambda.aws.upbound.io/v1beta2 +kind: CodeSigningConfig +metadata: + annotations: + meta.upbound.io/example-id: lambda/v1beta2/codesigningconfig + labels: + testing.upbound.io/example-name: new_csc + name: new-csc +spec: + forProvider: + allowedPublishers: + - signingProfileVersionArnsRefs: + - name: example1 + - name: example2 + description: My awesome code signing config. + policies: + - untrustedArtifactOnDeployment: Warn + region: us-west-1 diff --git a/examples-generated/lambda/v1beta2/eventsourcemapping.yaml b/examples-generated/lambda/v1beta2/eventsourcemapping.yaml new file mode 100644 index 0000000000..e912607cf2 --- /dev/null +++ b/examples-generated/lambda/v1beta2/eventsourcemapping.yaml @@ -0,0 +1,16 @@ +apiVersion: lambda.aws.upbound.io/v1beta2 +kind: EventSourceMapping +metadata: + annotations: + meta.upbound.io/example-id: lambda/v1beta2/eventsourcemapping + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + eventSourceArn: ${aws_dynamodb_table.example.stream_arn} + functionNameSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + startingPosition: LATEST diff --git a/examples-generated/lambda/v1beta2/function.yaml b/examples-generated/lambda/v1beta2/function.yaml new file mode 100644 index 0000000000..bf83eef99b --- /dev/null +++ b/examples-generated/lambda/v1beta2/function.yaml @@ -0,0 +1,35 @@ +apiVersion: lambda.aws.upbound.io/v1beta2 +kind: Function +metadata: + annotations: + meta.upbound.io/example-id: lambda/v1beta2/function + labels: + testing.upbound.io/example-name: test_lambda + name: test-lambda +spec: + forProvider: + environment: + - variables: + foo: bar + filename: lambda_function_payload.zip + handler: index.test + region: us-west-1 + roleSelector: + matchLabels: + testing.upbound.io/example-name: iam_for_lambda + runtime: nodejs18.x + sourceCodeHash: ${data.archive_file.lambda.output_base64sha256} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: lambda/v1beta2/function + labels: + testing.upbound.io/example-name: iam_for_lambda + name: iam-for-lambda +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.assume_role.json} diff --git a/examples-generated/lambda/v1beta2/functioneventinvokeconfig.yaml b/examples-generated/lambda/v1beta2/functioneventinvokeconfig.yaml new file mode 100644 index 0000000000..c7be8b5a19 --- /dev/null +++ b/examples-generated/lambda/v1beta2/functioneventinvokeconfig.yaml @@ -0,0 +1,21 @@ +apiVersion: lambda.aws.upbound.io/v1beta2 +kind: FunctionEventInvokeConfig +metadata: + annotations: + meta.upbound.io/example-id: lambda/v1beta2/functioneventinvokeconfig + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + destinationConfig: + - onFailure: + - destinationSelector: + matchLabels: + testing.upbound.io/example-name: example + onSuccess: + - destinationSelector: + matchLabels: + testing.upbound.io/example-name: example + functionName: ${aws_lambda_alias.example.function_name} + region: us-west-1 diff --git a/examples-generated/lambda/v1beta2/functionurl.yaml b/examples-generated/lambda/v1beta2/functionurl.yaml new file mode 100644 index 0000000000..53901518af --- /dev/null +++ b/examples-generated/lambda/v1beta2/functionurl.yaml @@ -0,0 +1,15 @@ +apiVersion: lambda.aws.upbound.io/v1beta2 +kind: FunctionURL +metadata: + annotations: + meta.upbound.io/example-id: lambda/v1beta2/functionurl + labels: + testing.upbound.io/example-name: test_latest + name: test-latest +spec: + forProvider: + authorizationType: NONE + functionNameSelector: + matchLabels: + testing.upbound.io/example-name: test + region: us-west-1 diff --git a/examples-generated/lexmodels/v1beta2/bot.yaml b/examples-generated/lexmodels/v1beta2/bot.yaml new file mode 100644 index 0000000000..feb8ba407e --- /dev/null +++ b/examples-generated/lexmodels/v1beta2/bot.yaml @@ -0,0 +1,30 @@ +apiVersion: lexmodels.aws.upbound.io/v1beta2 +kind: Bot +metadata: + annotations: + meta.upbound.io/example-id: lexmodels/v1beta2/bot + labels: + testing.upbound.io/example-name: order_flowers_bot + name: order-flowers-bot +spec: + forProvider: + abortStatement: + - message: + - content: Sorry, I am not able to assist at this time + contentType: PlainText + childDirected: false + clarificationPrompt: + - maxAttempts: 2 + message: + - content: I didn't understand you, what would you like to do? + contentType: PlainText + createVersion: false + description: Bot to order flowers on the behalf of a user + idleSessionTtlInSeconds: 600 + intent: + - intentName: OrderFlowers + intentVersion: "1" + locale: en-US + processBehavior: BUILD + region: us-west-1 + voiceId: Salli diff --git a/examples-generated/lexmodels/v1beta2/botalias.yaml b/examples-generated/lexmodels/v1beta2/botalias.yaml new file mode 100644 index 0000000000..ba90eac6ee --- /dev/null +++ b/examples-generated/lexmodels/v1beta2/botalias.yaml @@ -0,0 +1,14 @@ +apiVersion: lexmodels.aws.upbound.io/v1beta2 +kind: BotAlias +metadata: + annotations: + meta.upbound.io/example-id: lexmodels/v1beta2/botalias + labels: + testing.upbound.io/example-name: order_flowers_prod + name: order-flowers-prod +spec: + forProvider: + botName: OrderFlowers + botVersion: "1" + description: Production Version of the OrderFlowers Bot. + region: us-west-1 diff --git a/examples-generated/lexmodels/v1beta2/intent.yaml b/examples-generated/lexmodels/v1beta2/intent.yaml new file mode 100644 index 0000000000..1b73826b54 --- /dev/null +++ b/examples-generated/lexmodels/v1beta2/intent.yaml @@ -0,0 +1,68 @@ +apiVersion: lexmodels.aws.upbound.io/v1beta2 +kind: Intent +metadata: + annotations: + meta.upbound.io/example-id: lexmodels/v1beta2/intent + labels: + testing.upbound.io/example-name: order_flowers_intent + name: order-flowers-intent +spec: + forProvider: + confirmationPrompt: + - maxAttempts: 2 + message: + - content: Okay, your {FlowerType} will be ready for pickup by {PickupTime} + on {PickupDate}. Does this sound okay? + contentType: PlainText + createVersion: false + description: Intent to order a bouquet of flowers for pick up + fulfillmentActivity: + - type: ReturnIntent + region: us-west-1 + rejectionStatement: + - message: + - content: Okay, I will not place your order. + contentType: PlainText + sampleUtterances: + - I would like to order some flowers + - I would like to pick up flowers + slot: + - description: The type of flowers to pick up + name: FlowerType + priority: 1 + sampleUtterances: + - I would like to order {FlowerType} + slotConstraint: Required + slotType: FlowerTypes + slotTypeVersion: $$LATEST + valueElicitationPrompt: + - maxAttempts: 2 + message: + - content: What type of flowers would you like to order? + contentType: PlainText + - description: The date to pick up the flowers + name: PickupDate + priority: 2 + sampleUtterances: + - I would like to order {FlowerType} + slotConstraint: Required + slotType: AMAZON.DATE + slotTypeVersion: $$LATEST + valueElicitationPrompt: + - maxAttempts: 2 + message: + - content: What day do you want the {FlowerType} to be picked up? + contentType: PlainText + - description: The time to pick up the flowers + name: PickupTime + priority: 3 + sampleUtterances: + - I would like to order {FlowerType} + slotConstraint: Required + slotType: AMAZON.TIME + slotTypeVersion: $$LATEST + valueElicitationPrompt: + - maxAttempts: 2 + message: + - content: Pick up the {FlowerType} at what time on {PickupDate}? + contentType: PlainText diff --git a/examples-generated/licensemanager/v1beta1/association.yaml b/examples-generated/licensemanager/v1beta1/association.yaml index 09f1807de1..6c5ed74d3d 100644 --- a/examples-generated/licensemanager/v1beta1/association.yaml +++ b/examples-generated/licensemanager/v1beta1/association.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: ec2.aws.upbound.io/v1beta1 +apiVersion: ec2.aws.upbound.io/v1beta2 kind: Instance metadata: annotations: diff --git a/examples-generated/lightsail/v1beta1/diskattachment.yaml b/examples-generated/lightsail/v1beta1/diskattachment.yaml index caee314fde..9c422a701e 100644 --- a/examples-generated/lightsail/v1beta1/diskattachment.yaml +++ b/examples-generated/lightsail/v1beta1/diskattachment.yaml @@ -35,7 +35,7 @@ spec: --- -apiVersion: lightsail.aws.upbound.io/v1beta1 +apiVersion: lightsail.aws.upbound.io/v1beta2 kind: Instance metadata: annotations: diff --git a/examples-generated/lightsail/v1beta1/instancepublicports.yaml b/examples-generated/lightsail/v1beta1/instancepublicports.yaml index 2586413183..63cafbec8a 100644 --- a/examples-generated/lightsail/v1beta1/instancepublicports.yaml +++ b/examples-generated/lightsail/v1beta1/instancepublicports.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: lightsail.aws.upbound.io/v1beta1 +apiVersion: lightsail.aws.upbound.io/v1beta2 kind: Instance metadata: annotations: diff --git a/examples-generated/lightsail/v1beta1/lbattachment.yaml b/examples-generated/lightsail/v1beta1/lbattachment.yaml index abf4d693aa..ba565b6681 100644 --- a/examples-generated/lightsail/v1beta1/lbattachment.yaml +++ b/examples-generated/lightsail/v1beta1/lbattachment.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: lightsail.aws.upbound.io/v1beta1 +apiVersion: lightsail.aws.upbound.io/v1beta2 kind: Instance metadata: annotations: diff --git a/examples-generated/lightsail/v1beta1/staticipattachment.yaml b/examples-generated/lightsail/v1beta1/staticipattachment.yaml index 7bfc12a2a4..5df9793ac3 100644 --- a/examples-generated/lightsail/v1beta1/staticipattachment.yaml +++ b/examples-generated/lightsail/v1beta1/staticipattachment.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: lightsail.aws.upbound.io/v1beta1 +apiVersion: lightsail.aws.upbound.io/v1beta2 kind: Instance metadata: annotations: diff --git a/examples-generated/lightsail/v1beta2/containerservice.yaml b/examples-generated/lightsail/v1beta2/containerservice.yaml new file mode 100644 index 0000000000..2754137f20 --- /dev/null +++ b/examples-generated/lightsail/v1beta2/containerservice.yaml @@ -0,0 +1,17 @@ +apiVersion: lightsail.aws.upbound.io/v1beta2 +kind: ContainerService +metadata: + annotations: + meta.upbound.io/example-id: lightsail/v1beta2/containerservice + labels: + testing.upbound.io/example-name: my_container_service + name: my-container-service +spec: + forProvider: + isDisabled: false + power: nano + region: us-west-1 + scale: 1 + tags: + foo1: bar1 + foo2: "" diff --git a/examples-generated/lightsail/v1beta2/instance.yaml b/examples-generated/lightsail/v1beta2/instance.yaml new file mode 100644 index 0000000000..e2d8487628 --- /dev/null +++ b/examples-generated/lightsail/v1beta2/instance.yaml @@ -0,0 +1,17 @@ +apiVersion: lightsail.aws.upbound.io/v1beta2 +kind: Instance +metadata: + annotations: + meta.upbound.io/example-id: lightsail/v1beta2/instance + labels: + testing.upbound.io/example-name: gitlab_test + name: gitlab-test +spec: + forProvider: + availabilityZone: us-east-1b + blueprintId: amazon_linux_2 + bundleId: nano_3_0 + keyPairName: some_key_name + region: us-west-1 + tags: + foo: bar diff --git a/examples-generated/location/v1beta2/placeindex.yaml b/examples-generated/location/v1beta2/placeindex.yaml new file mode 100644 index 0000000000..47aa8237e2 --- /dev/null +++ b/examples-generated/location/v1beta2/placeindex.yaml @@ -0,0 +1,12 @@ +apiVersion: location.aws.upbound.io/v1beta2 +kind: PlaceIndex +metadata: + annotations: + meta.upbound.io/example-id: location/v1beta2/placeindex + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dataSource: Here + region: us-west-1 diff --git a/examples-generated/macie2/v1beta2/classificationjob.yaml b/examples-generated/macie2/v1beta2/classificationjob.yaml new file mode 100644 index 0000000000..9a2775e32d --- /dev/null +++ b/examples-generated/macie2/v1beta2/classificationjob.yaml @@ -0,0 +1,32 @@ +apiVersion: macie2.aws.upbound.io/v1beta2 +kind: ClassificationJob +metadata: + annotations: + meta.upbound.io/example-id: macie2/v1beta2/classificationjob + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + jobType: ONE_TIME + name: NAME OF THE CLASSIFICATION JOB + region: us-west-1 + s3JobDefinition: + - bucketDefinitions: + - accountId: ACCOUNT ID + buckets: + - S3 BUCKET NAME + +--- + +apiVersion: macie2.aws.upbound.io/v1beta1 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: macie2/v1beta2/classificationjob + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/macie2/v1beta2/findingsfilter.yaml b/examples-generated/macie2/v1beta2/findingsfilter.yaml new file mode 100644 index 0000000000..7efc2e3d85 --- /dev/null +++ b/examples-generated/macie2/v1beta2/findingsfilter.yaml @@ -0,0 +1,34 @@ +apiVersion: macie2.aws.upbound.io/v1beta2 +kind: FindingsFilter +metadata: + annotations: + meta.upbound.io/example-id: macie2/v1beta2/findingsfilter + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + action: ARCHIVE + description: DESCRIPTION + findingCriteria: + - criterion: + - eq: + - ${data.aws_region.current.name} + field: region + name: NAME OF THE FINDINGS FILTER + position: 1 + region: us-west-1 + +--- + +apiVersion: macie2.aws.upbound.io/v1beta1 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: macie2/v1beta2/findingsfilter + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/mediaconvert/v1beta2/queue.yaml b/examples-generated/mediaconvert/v1beta2/queue.yaml new file mode 100644 index 0000000000..5be659856a --- /dev/null +++ b/examples-generated/mediaconvert/v1beta2/queue.yaml @@ -0,0 +1,11 @@ +apiVersion: mediaconvert.aws.upbound.io/v1beta2 +kind: Queue +metadata: + annotations: + meta.upbound.io/example-id: mediaconvert/v1beta2/queue + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/medialive/v1beta2/channel.yaml b/examples-generated/medialive/v1beta2/channel.yaml new file mode 100644 index 0000000000..36a4d70e51 --- /dev/null +++ b/examples-generated/medialive/v1beta2/channel.yaml @@ -0,0 +1,57 @@ +apiVersion: medialive.aws.upbound.io/v1beta2 +kind: Channel +metadata: + annotations: + meta.upbound.io/example-id: medialive/v1beta2/channel + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + channelClass: STANDARD + destinations: + - id: destination + settings: + - url: s3://${aws_s3_bucket.main.id}/test1 + - url: s3://${aws_s3_bucket.main2.id}/test2 + encoderSettings: + - audioDescriptions: + - audioSelectorName: example audio selector + name: audio-selector + outputGroups: + - outputGroupSettings: + - archiveGroupSettings: + - destination: + - destinationRefId: destination + outputs: + - audioDescriptionNames: + - audio-selector + outputName: example-name + outputSettings: + - archiveOutputSettings: + - containerSettings: + - m2tsSettings: + - audioBufferModel: ATSC + bufferModel: MULTIPLEX + rateMode: CBR + extension: m2ts + nameModifier: _1 + videoDescriptionName: example-video + timecodeConfig: + - source: EMBEDDED + videoDescriptions: + - name: example-video + inputAttachments: + - inputAttachmentName: example-input + inputIdSelector: + matchLabels: + testing.upbound.io/example-name: example + inputSpecification: + - codec: AVC + inputResolution: HD + maximumBitrate: MAX_20_MBPS + name: example-channel + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/medialive/v1beta2/input.yaml b/examples-generated/medialive/v1beta2/input.yaml new file mode 100644 index 0000000000..999a163fd4 --- /dev/null +++ b/examples-generated/medialive/v1beta2/input.yaml @@ -0,0 +1,35 @@ +apiVersion: medialive.aws.upbound.io/v1beta2 +kind: Input +metadata: + annotations: + meta.upbound.io/example-id: medialive/v1beta2/input + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + inputSecurityGroups: + - ${aws_medialive_input_security_group.example.id} + name: example-input + region: us-west-1 + tags: + ENVIRONMENT: prod + type: UDP_PUSH + +--- + +apiVersion: medialive.aws.upbound.io/v1beta1 +kind: InputSecurityGroup +metadata: + annotations: + meta.upbound.io/example-id: medialive/v1beta2/input + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + tags: + ENVIRONMENT: prod + whitelistRules: + - cidr: 10.0.0.8/32 diff --git a/examples-generated/medialive/v1beta2/multiplex.yaml b/examples-generated/medialive/v1beta2/multiplex.yaml new file mode 100644 index 0000000000..0171d08d89 --- /dev/null +++ b/examples-generated/medialive/v1beta2/multiplex.yaml @@ -0,0 +1,23 @@ +apiVersion: medialive.aws.upbound.io/v1beta2 +kind: Multiplex +metadata: + annotations: + meta.upbound.io/example-id: medialive/v1beta2/multiplex + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + availabilityZones: + - ${data.aws_availability_zones.available.names[0]} + - ${data.aws_availability_zones.available.names[1]} + multiplexSettings: + - maximumVideoBufferDelayMilliseconds: 1000 + transportStreamBitrate: 1000000 + transportStreamId: 1 + transportStreamReservedBitrate: 1 + name: example-multiplex-changed + region: us-west-1 + startMultiplex: true + tags: + tag1: value1 diff --git a/examples-generated/memorydb/v1beta2/user.yaml b/examples-generated/memorydb/v1beta2/user.yaml new file mode 100644 index 0000000000..849a19325c --- /dev/null +++ b/examples-generated/memorydb/v1beta2/user.yaml @@ -0,0 +1,18 @@ +apiVersion: memorydb.aws.upbound.io/v1beta2 +kind: User +metadata: + annotations: + meta.upbound.io/example-id: memorydb/v1beta2/user + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accessString: on ~* &* +@all + authenticationMode: + - passwordsSecretRef: + - key: example-key + name: example-secret + namespace: upbound-system + type: password + region: us-west-1 diff --git a/examples-generated/mq/v1beta2/broker.yaml b/examples-generated/mq/v1beta2/broker.yaml new file mode 100644 index 0000000000..793281483f --- /dev/null +++ b/examples-generated/mq/v1beta2/broker.yaml @@ -0,0 +1,28 @@ +apiVersion: mq.aws.upbound.io/v1beta2 +kind: Broker +metadata: + annotations: + meta.upbound.io/example-id: mq/v1beta2/broker + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + brokerName: example + configuration: + - idSelector: + matchLabels: + testing.upbound.io/example-name: test + revision: ${aws_mq_configuration.test.latest_revision} + engineType: ActiveMQ + engineVersion: 5.17.6 + hostInstanceType: mq.t2.micro + region: us-west-1 + securityGroupRefs: + - name: test + user: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: ExampleUser diff --git a/examples-generated/neptune/v1beta1/clusterinstance.yaml b/examples-generated/neptune/v1beta1/clusterinstance.yaml index f8e407bfaa..eae7194df6 100644 --- a/examples-generated/neptune/v1beta1/clusterinstance.yaml +++ b/examples-generated/neptune/v1beta1/clusterinstance.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: neptune.aws.upbound.io/v1beta1 +apiVersion: neptune.aws.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/neptune/v1beta1/eventsubscription.yaml b/examples-generated/neptune/v1beta1/eventsubscription.yaml index e80b36860f..00fb4bb3fe 100644 --- a/examples-generated/neptune/v1beta1/eventsubscription.yaml +++ b/examples-generated/neptune/v1beta1/eventsubscription.yaml @@ -33,7 +33,7 @@ spec: --- -apiVersion: neptune.aws.upbound.io/v1beta1 +apiVersion: neptune.aws.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/neptune/v1beta1/globalcluster.yaml b/examples-generated/neptune/v1beta1/globalcluster.yaml index bad6386b91..61022aee19 100644 --- a/examples-generated/neptune/v1beta1/globalcluster.yaml +++ b/examples-generated/neptune/v1beta1/globalcluster.yaml @@ -14,7 +14,7 @@ spec: --- -apiVersion: neptune.aws.upbound.io/v1beta1 +apiVersion: neptune.aws.upbound.io/v1beta2 kind: Cluster metadata: annotations: @@ -35,7 +35,7 @@ spec: --- -apiVersion: neptune.aws.upbound.io/v1beta1 +apiVersion: neptune.aws.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/neptune/v1beta2/cluster.yaml b/examples-generated/neptune/v1beta2/cluster.yaml new file mode 100644 index 0000000000..79bff394e1 --- /dev/null +++ b/examples-generated/neptune/v1beta2/cluster.yaml @@ -0,0 +1,17 @@ +apiVersion: neptune.aws.upbound.io/v1beta2 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: neptune/v1beta2/cluster + labels: + testing.upbound.io/example-name: default + name: default +spec: + forProvider: + applyImmediately: true + backupRetentionPeriod: 5 + engine: neptune + iamDatabaseAuthenticationEnabled: true + preferredBackupWindow: 07:00-09:00 + region: us-west-1 + skipFinalSnapshot: true diff --git a/examples-generated/networkfirewall/v1beta2/firewall.yaml b/examples-generated/networkfirewall/v1beta2/firewall.yaml new file mode 100644 index 0000000000..16f34b2907 --- /dev/null +++ b/examples-generated/networkfirewall/v1beta2/firewall.yaml @@ -0,0 +1,29 @@ +apiVersion: networkfirewall.aws.upbound.io/v1beta2 +kind: Firewall +metadata: + annotations: + meta.upbound.io/example-id: networkfirewall/v1beta2/firewall + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + firewallPolicyArnSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example + region: us-west-1 + subnetMapping: + - subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + Tag1: Value1 + Tag2: Value2 + timeouts: + - create: 40m + delete: 1h + update: 50m + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/networkfirewall/v1beta2/firewallpolicy.yaml b/examples-generated/networkfirewall/v1beta2/firewallpolicy.yaml new file mode 100644 index 0000000000..d3eae4e550 --- /dev/null +++ b/examples-generated/networkfirewall/v1beta2/firewallpolicy.yaml @@ -0,0 +1,25 @@ +apiVersion: networkfirewall.aws.upbound.io/v1beta2 +kind: FirewallPolicy +metadata: + annotations: + meta.upbound.io/example-id: networkfirewall/v1beta2/firewallpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + firewallPolicy: + - statelessDefaultActions: + - aws:pass + statelessFragmentDefaultActions: + - aws:drop + statelessRuleGroupReference: + - priority: 1 + resourceArnSelector: + matchLabels: + testing.upbound.io/example-name: example + tlsInspectionConfigurationArn: arn:aws:network-firewall:REGION:ACCT:tls-configuration/example + region: us-west-1 + tags: + Tag1: Value1 + Tag2: Value2 diff --git a/examples-generated/networkfirewall/v1beta2/loggingconfiguration.yaml b/examples-generated/networkfirewall/v1beta2/loggingconfiguration.yaml new file mode 100644 index 0000000000..19fef596ee --- /dev/null +++ b/examples-generated/networkfirewall/v1beta2/loggingconfiguration.yaml @@ -0,0 +1,21 @@ +apiVersion: networkfirewall.aws.upbound.io/v1beta2 +kind: LoggingConfiguration +metadata: + annotations: + meta.upbound.io/example-id: networkfirewall/v1beta2/loggingconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + firewallArnSelector: + matchLabels: + testing.upbound.io/example-name: example + loggingConfiguration: + - logDestinationConfig: + - logDestination: + bucketName: ${aws_s3_bucket.example.bucket} + prefix: /example + logDestinationType: S3 + logType: FLOW + region: us-west-1 diff --git a/examples-generated/networkfirewall/v1beta2/rulegroup.yaml b/examples-generated/networkfirewall/v1beta2/rulegroup.yaml new file mode 100644 index 0000000000..477c8bec16 --- /dev/null +++ b/examples-generated/networkfirewall/v1beta2/rulegroup.yaml @@ -0,0 +1,25 @@ +apiVersion: networkfirewall.aws.upbound.io/v1beta2 +kind: RuleGroup +metadata: + annotations: + meta.upbound.io/example-id: networkfirewall/v1beta2/rulegroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + capacity: 100 + name: example + region: us-west-1 + ruleGroup: + - rulesSource: + - rulesSourceList: + - generatedRulesType: DENYLIST + targetTypes: + - HTTP_HOST + targets: + - test.example.com + tags: + Tag1: Value1 + Tag2: Value2 + type: STATEFUL diff --git a/examples-generated/networkmanager/v1beta1/customergatewayassociation.yaml b/examples-generated/networkmanager/v1beta1/customergatewayassociation.yaml index 211f6901b3..e6407182b7 100644 --- a/examples-generated/networkmanager/v1beta1/customergatewayassociation.yaml +++ b/examples-generated/networkmanager/v1beta1/customergatewayassociation.yaml @@ -52,7 +52,7 @@ spec: --- -apiVersion: networkmanager.aws.upbound.io/v1beta1 +apiVersion: networkmanager.aws.upbound.io/v1beta2 kind: Device metadata: annotations: @@ -87,7 +87,7 @@ spec: --- -apiVersion: networkmanager.aws.upbound.io/v1beta1 +apiVersion: networkmanager.aws.upbound.io/v1beta2 kind: Site metadata: annotations: @@ -124,7 +124,7 @@ spec: --- -apiVersion: ec2.aws.upbound.io/v1beta1 +apiVersion: ec2.aws.upbound.io/v1beta2 kind: VPNConnection metadata: annotations: diff --git a/examples-generated/networkmanager/v1beta2/connectattachment.yaml b/examples-generated/networkmanager/v1beta2/connectattachment.yaml new file mode 100644 index 0000000000..8d3918f59f --- /dev/null +++ b/examples-generated/networkmanager/v1beta2/connectattachment.yaml @@ -0,0 +1,45 @@ +apiVersion: networkmanager.aws.upbound.io/v1beta2 +kind: ConnectAttachment +metadata: + annotations: + meta.upbound.io/example-id: networkmanager/v1beta2/connectattachment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + coreNetworkIdSelector: + matchLabels: + testing.upbound.io/example-name: example + edgeLocationSelector: + matchLabels: + testing.upbound.io/example-name: example + options: + - protocol: GRE + region: us-west-1 + transportAttachmentIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: networkmanager.aws.upbound.io/v1beta2 +kind: VPCAttachment +metadata: + annotations: + meta.upbound.io/example-id: networkmanager/v1beta2/connectattachment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + coreNetworkIdSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + subnetArnsSelector: + matchLabels: + testing.upbound.io/example-name: example[*] + vpcArnSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/networkmanager/v1beta2/device.yaml b/examples-generated/networkmanager/v1beta2/device.yaml new file mode 100644 index 0000000000..f023e3655c --- /dev/null +++ b/examples-generated/networkmanager/v1beta2/device.yaml @@ -0,0 +1,17 @@ +apiVersion: networkmanager.aws.upbound.io/v1beta2 +kind: Device +metadata: + annotations: + meta.upbound.io/example-id: networkmanager/v1beta2/device + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + globalNetworkIdSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + siteIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/networkmanager/v1beta2/link.yaml b/examples-generated/networkmanager/v1beta2/link.yaml new file mode 100644 index 0000000000..a78a20501c --- /dev/null +++ b/examples-generated/networkmanager/v1beta2/link.yaml @@ -0,0 +1,21 @@ +apiVersion: networkmanager.aws.upbound.io/v1beta2 +kind: Link +metadata: + annotations: + meta.upbound.io/example-id: networkmanager/v1beta2/link + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bandwidth: + - downloadSpeed: 50 + uploadSpeed: 10 + globalNetworkIdSelector: + matchLabels: + testing.upbound.io/example-name: example + providerName: MegaCorp + region: us-west-1 + siteIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/networkmanager/v1beta2/site.yaml b/examples-generated/networkmanager/v1beta2/site.yaml new file mode 100644 index 0000000000..0627a473e9 --- /dev/null +++ b/examples-generated/networkmanager/v1beta2/site.yaml @@ -0,0 +1,28 @@ +apiVersion: networkmanager.aws.upbound.io/v1beta2 +kind: Site +metadata: + annotations: + meta.upbound.io/example-id: networkmanager/v1beta2/site + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + globalNetworkIdSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + +--- + +apiVersion: networkmanager.aws.upbound.io/v1beta1 +kind: GlobalNetwork +metadata: + annotations: + meta.upbound.io/example-id: networkmanager/v1beta2/site + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/networkmanager/v1beta2/vpcattachment.yaml b/examples-generated/networkmanager/v1beta2/vpcattachment.yaml new file mode 100644 index 0000000000..373ec71439 --- /dev/null +++ b/examples-generated/networkmanager/v1beta2/vpcattachment.yaml @@ -0,0 +1,19 @@ +apiVersion: networkmanager.aws.upbound.io/v1beta2 +kind: VPCAttachment +metadata: + annotations: + meta.upbound.io/example-id: networkmanager/v1beta2/vpcattachment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + coreNetworkIdSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + subnetArnsRefs: + - name: example + vpcArnSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/opensearch/v1beta1/domainpolicy.yaml b/examples-generated/opensearch/v1beta1/domainpolicy.yaml index 05c2af91f0..dee98f7e3f 100644 --- a/examples-generated/opensearch/v1beta1/domainpolicy.yaml +++ b/examples-generated/opensearch/v1beta1/domainpolicy.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: opensearch.aws.upbound.io/v1beta1 +apiVersion: opensearch.aws.upbound.io/v1beta2 kind: Domain metadata: annotations: diff --git a/examples-generated/opensearch/v1beta2/domain.yaml b/examples-generated/opensearch/v1beta2/domain.yaml new file mode 100644 index 0000000000..f18e1c1aff --- /dev/null +++ b/examples-generated/opensearch/v1beta2/domain.yaml @@ -0,0 +1,17 @@ +apiVersion: opensearch.aws.upbound.io/v1beta2 +kind: Domain +metadata: + annotations: + meta.upbound.io/example-id: opensearch/v1beta2/domain + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clusterConfig: + - instanceType: r4.large.search + domainName: example + engineVersion: Elasticsearch_7.10 + region: us-west-1 + tags: + Domain: TestDomain diff --git a/examples-generated/opensearch/v1beta2/domainsamloptions.yaml b/examples-generated/opensearch/v1beta2/domainsamloptions.yaml new file mode 100644 index 0000000000..577de57a42 --- /dev/null +++ b/examples-generated/opensearch/v1beta2/domainsamloptions.yaml @@ -0,0 +1,41 @@ +apiVersion: opensearch.aws.upbound.io/v1beta2 +kind: DomainSAMLOptions +metadata: + annotations: + meta.upbound.io/example-id: opensearch/v1beta2/domainsamloptions + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + domainNameSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + samlOptions: + - enabled: true + idp: + - entityId: https://example.com + metadataContent: ${file("./saml-metadata.xml")} + +--- + +apiVersion: opensearch.aws.upbound.io/v1beta2 +kind: Domain +metadata: + annotations: + meta.upbound.io/example-id: opensearch/v1beta2/domainsamloptions + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clusterConfig: + - instanceType: r4.large.search + domainName: example + engineVersion: OpenSearch_1.1 + region: us-west-1 + snapshotOptions: + - automatedSnapshotStartHour: 23 + tags: + Domain: TestDomain diff --git a/examples-generated/opensearchserverless/v1beta2/securityconfig.yaml b/examples-generated/opensearchserverless/v1beta2/securityconfig.yaml new file mode 100644 index 0000000000..b07d2e1100 --- /dev/null +++ b/examples-generated/opensearchserverless/v1beta2/securityconfig.yaml @@ -0,0 +1,12 @@ +apiVersion: opensearchserverless.aws.upbound.io/v1beta2 +kind: SecurityConfig +metadata: + annotations: + meta.upbound.io/example-id: opensearchserverless/v1beta2/securityconfig + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + type: saml diff --git a/examples-generated/opsworks/v1beta2/customlayer.yaml b/examples-generated/opsworks/v1beta2/customlayer.yaml new file mode 100644 index 0000000000..61193a9c87 --- /dev/null +++ b/examples-generated/opsworks/v1beta2/customlayer.yaml @@ -0,0 +1,15 @@ +apiVersion: opsworks.aws.upbound.io/v1beta2 +kind: CustomLayer +metadata: + annotations: + meta.upbound.io/example-id: opsworks/v1beta2/customlayer + labels: + testing.upbound.io/example-name: custlayer + name: custlayer +spec: + forProvider: + name: My Awesome Custom Layer + shortName: awesome + stackIdSelector: + matchLabels: + testing.upbound.io/example-name: main diff --git a/examples-generated/opsworks/v1beta2/ecsclusterlayer.yaml b/examples-generated/opsworks/v1beta2/ecsclusterlayer.yaml new file mode 100644 index 0000000000..4637bf6c9f --- /dev/null +++ b/examples-generated/opsworks/v1beta2/ecsclusterlayer.yaml @@ -0,0 +1,16 @@ +apiVersion: opsworks.aws.upbound.io/v1beta2 +kind: EcsClusterLayer +metadata: + annotations: + meta.upbound.io/example-id: opsworks/v1beta2/ecsclusterlayer + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + ecsClusterArnSelector: + matchLabels: + testing.upbound.io/example-name: example + stackIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/opsworks/v1beta2/ganglialayer.yaml b/examples-generated/opsworks/v1beta2/ganglialayer.yaml new file mode 100644 index 0000000000..88e1330588 --- /dev/null +++ b/examples-generated/opsworks/v1beta2/ganglialayer.yaml @@ -0,0 +1,14 @@ +apiVersion: opsworks.aws.upbound.io/v1beta2 +kind: GangliaLayer +metadata: + annotations: + meta.upbound.io/example-id: opsworks/v1beta2/ganglialayer + labels: + testing.upbound.io/example-name: monitor + name: monitor +spec: + forProvider: + password: foobarbaz + stackIdSelector: + matchLabels: + testing.upbound.io/example-name: main diff --git a/examples-generated/opsworks/v1beta2/haproxylayer.yaml b/examples-generated/opsworks/v1beta2/haproxylayer.yaml new file mode 100644 index 0000000000..0bf2d5f2b2 --- /dev/null +++ b/examples-generated/opsworks/v1beta2/haproxylayer.yaml @@ -0,0 +1,14 @@ +apiVersion: opsworks.aws.upbound.io/v1beta2 +kind: HAProxyLayer +metadata: + annotations: + meta.upbound.io/example-id: opsworks/v1beta2/haproxylayer + labels: + testing.upbound.io/example-name: lb + name: lb +spec: + forProvider: + stackIdSelector: + matchLabels: + testing.upbound.io/example-name: main + statsPassword: foobarbaz diff --git a/examples-generated/opsworks/v1beta2/javaapplayer.yaml b/examples-generated/opsworks/v1beta2/javaapplayer.yaml new file mode 100644 index 0000000000..91c7938321 --- /dev/null +++ b/examples-generated/opsworks/v1beta2/javaapplayer.yaml @@ -0,0 +1,13 @@ +apiVersion: opsworks.aws.upbound.io/v1beta2 +kind: JavaAppLayer +metadata: + annotations: + meta.upbound.io/example-id: opsworks/v1beta2/javaapplayer + labels: + testing.upbound.io/example-name: app + name: app +spec: + forProvider: + stackIdSelector: + matchLabels: + testing.upbound.io/example-name: main diff --git a/examples-generated/opsworks/v1beta2/memcachedlayer.yaml b/examples-generated/opsworks/v1beta2/memcachedlayer.yaml new file mode 100644 index 0000000000..a4c5640242 --- /dev/null +++ b/examples-generated/opsworks/v1beta2/memcachedlayer.yaml @@ -0,0 +1,13 @@ +apiVersion: opsworks.aws.upbound.io/v1beta2 +kind: MemcachedLayer +metadata: + annotations: + meta.upbound.io/example-id: opsworks/v1beta2/memcachedlayer + labels: + testing.upbound.io/example-name: cache + name: cache +spec: + forProvider: + stackIdSelector: + matchLabels: + testing.upbound.io/example-name: main diff --git a/examples-generated/opsworks/v1beta2/mysqllayer.yaml b/examples-generated/opsworks/v1beta2/mysqllayer.yaml new file mode 100644 index 0000000000..fbeee7404f --- /dev/null +++ b/examples-generated/opsworks/v1beta2/mysqllayer.yaml @@ -0,0 +1,13 @@ +apiVersion: opsworks.aws.upbound.io/v1beta2 +kind: MySQLLayer +metadata: + annotations: + meta.upbound.io/example-id: opsworks/v1beta2/mysqllayer + labels: + testing.upbound.io/example-name: db + name: db +spec: + forProvider: + stackIdSelector: + matchLabels: + testing.upbound.io/example-name: main diff --git a/examples-generated/opsworks/v1beta2/nodejsapplayer.yaml b/examples-generated/opsworks/v1beta2/nodejsapplayer.yaml new file mode 100644 index 0000000000..1e3c636701 --- /dev/null +++ b/examples-generated/opsworks/v1beta2/nodejsapplayer.yaml @@ -0,0 +1,13 @@ +apiVersion: opsworks.aws.upbound.io/v1beta2 +kind: NodeJSAppLayer +metadata: + annotations: + meta.upbound.io/example-id: opsworks/v1beta2/nodejsapplayer + labels: + testing.upbound.io/example-name: app + name: app +spec: + forProvider: + stackIdSelector: + matchLabels: + testing.upbound.io/example-name: main diff --git a/examples-generated/opsworks/v1beta2/phpapplayer.yaml b/examples-generated/opsworks/v1beta2/phpapplayer.yaml new file mode 100644 index 0000000000..e498887fb0 --- /dev/null +++ b/examples-generated/opsworks/v1beta2/phpapplayer.yaml @@ -0,0 +1,13 @@ +apiVersion: opsworks.aws.upbound.io/v1beta2 +kind: PHPAppLayer +metadata: + annotations: + meta.upbound.io/example-id: opsworks/v1beta2/phpapplayer + labels: + testing.upbound.io/example-name: app + name: app +spec: + forProvider: + stackIdSelector: + matchLabels: + testing.upbound.io/example-name: main diff --git a/examples-generated/opsworks/v1beta2/railsapplayer.yaml b/examples-generated/opsworks/v1beta2/railsapplayer.yaml new file mode 100644 index 0000000000..99086f593b --- /dev/null +++ b/examples-generated/opsworks/v1beta2/railsapplayer.yaml @@ -0,0 +1,13 @@ +apiVersion: opsworks.aws.upbound.io/v1beta2 +kind: RailsAppLayer +metadata: + annotations: + meta.upbound.io/example-id: opsworks/v1beta2/railsapplayer + labels: + testing.upbound.io/example-name: app + name: app +spec: + forProvider: + stackIdSelector: + matchLabels: + testing.upbound.io/example-name: main diff --git a/examples-generated/opsworks/v1beta2/stack.yaml b/examples-generated/opsworks/v1beta2/stack.yaml new file mode 100644 index 0000000000..6ad2c8e3b0 --- /dev/null +++ b/examples-generated/opsworks/v1beta2/stack.yaml @@ -0,0 +1,26 @@ +apiVersion: opsworks.aws.upbound.io/v1beta2 +kind: Stack +metadata: + annotations: + meta.upbound.io/example-id: opsworks/v1beta2/stack + labels: + testing.upbound.io/example-name: main + name: main +spec: + forProvider: + customJson: | + { + "foobar": { + "version": "1.0.0" + } + } + defaultInstanceProfileArnSelector: + matchLabels: + testing.upbound.io/example-name: opsworks + name: awesome-stack + region: us-west-1 + serviceRoleArnSelector: + matchLabels: + testing.upbound.io/example-name: opsworks + tags: + Name: foobar-terraform-stack diff --git a/examples-generated/opsworks/v1beta2/staticweblayer.yaml b/examples-generated/opsworks/v1beta2/staticweblayer.yaml new file mode 100644 index 0000000000..22ffce3d09 --- /dev/null +++ b/examples-generated/opsworks/v1beta2/staticweblayer.yaml @@ -0,0 +1,13 @@ +apiVersion: opsworks.aws.upbound.io/v1beta2 +kind: StaticWebLayer +metadata: + annotations: + meta.upbound.io/example-id: opsworks/v1beta2/staticweblayer + labels: + testing.upbound.io/example-name: web + name: web +spec: + forProvider: + stackIdSelector: + matchLabels: + testing.upbound.io/example-name: main diff --git a/examples-generated/pinpoint/v1beta1/smschannel.yaml b/examples-generated/pinpoint/v1beta1/smschannel.yaml index ac187c4c08..faa4bde2e2 100644 --- a/examples-generated/pinpoint/v1beta1/smschannel.yaml +++ b/examples-generated/pinpoint/v1beta1/smschannel.yaml @@ -15,7 +15,7 @@ spec: --- -apiVersion: pinpoint.aws.upbound.io/v1beta1 +apiVersion: pinpoint.aws.upbound.io/v1beta2 kind: App metadata: annotations: diff --git a/examples-generated/pinpoint/v1beta2/app.yaml b/examples-generated/pinpoint/v1beta2/app.yaml new file mode 100644 index 0000000000..d0c3a115ff --- /dev/null +++ b/examples-generated/pinpoint/v1beta2/app.yaml @@ -0,0 +1,17 @@ +apiVersion: pinpoint.aws.upbound.io/v1beta2 +kind: App +metadata: + annotations: + meta.upbound.io/example-id: pinpoint/v1beta2/app + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + limits: + - maximumDuration: 600 + name: test-app + quietTime: + - end: "06:00" + start: "00:00" + region: us-west-1 diff --git a/examples-generated/qldb/v1beta2/stream.yaml b/examples-generated/qldb/v1beta2/stream.yaml new file mode 100644 index 0000000000..e48ecd73d4 --- /dev/null +++ b/examples-generated/qldb/v1beta2/stream.yaml @@ -0,0 +1,26 @@ +apiVersion: qldb.aws.upbound.io/v1beta2 +kind: Stream +metadata: + annotations: + meta.upbound.io/example-id: qldb/v1beta2/stream + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + inclusiveStartTime: "2021-01-01T00:00:00Z" + kinesisConfiguration: + - aggregationEnabled: false + streamArnSelector: + matchLabels: + testing.upbound.io/example-name: example + ledgerNameSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + streamName: sample-ledger-stream + tags: + example: tag diff --git a/examples-generated/rds/v1beta1/clusteractivitystream.yaml b/examples-generated/rds/v1beta1/clusteractivitystream.yaml index fea71e6381..9f75fd082b 100644 --- a/examples-generated/rds/v1beta1/clusteractivitystream.yaml +++ b/examples-generated/rds/v1beta1/clusteractivitystream.yaml @@ -34,7 +34,7 @@ spec: --- -apiVersion: rds.aws.upbound.io/v1beta1 +apiVersion: rds.aws.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/rds/v1beta1/clusterendpoint.yaml b/examples-generated/rds/v1beta1/clusterendpoint.yaml index b5bc5df5f6..28f089c483 100644 --- a/examples-generated/rds/v1beta1/clusterendpoint.yaml +++ b/examples-generated/rds/v1beta1/clusterendpoint.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: rds.aws.upbound.io/v1beta1 +apiVersion: rds.aws.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/rds/v1beta1/clusterinstance.yaml b/examples-generated/rds/v1beta1/clusterinstance.yaml index 697320de61..9fc153978e 100644 --- a/examples-generated/rds/v1beta1/clusterinstance.yaml +++ b/examples-generated/rds/v1beta1/clusterinstance.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: rds.aws.upbound.io/v1beta1 +apiVersion: rds.aws.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/rds/v1beta1/dbsnapshotcopy.yaml b/examples-generated/rds/v1beta1/dbsnapshotcopy.yaml index 9e4381cfd8..23e818fa12 100644 --- a/examples-generated/rds/v1beta1/dbsnapshotcopy.yaml +++ b/examples-generated/rds/v1beta1/dbsnapshotcopy.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: rds.aws.upbound.io/v1beta2 +apiVersion: rds.aws.upbound.io/v1beta3 kind: Instance metadata: annotations: diff --git a/examples-generated/rds/v1beta1/eventsubscription.yaml b/examples-generated/rds/v1beta1/eventsubscription.yaml index 83dd095295..591e6dfded 100644 --- a/examples-generated/rds/v1beta1/eventsubscription.yaml +++ b/examples-generated/rds/v1beta1/eventsubscription.yaml @@ -29,7 +29,7 @@ spec: --- -apiVersion: rds.aws.upbound.io/v1beta2 +apiVersion: rds.aws.upbound.io/v1beta3 kind: Instance metadata: annotations: diff --git a/examples-generated/rds/v1beta1/globalcluster.yaml b/examples-generated/rds/v1beta1/globalcluster.yaml index 4517dca7fb..07137d02bb 100644 --- a/examples-generated/rds/v1beta1/globalcluster.yaml +++ b/examples-generated/rds/v1beta1/globalcluster.yaml @@ -15,7 +15,7 @@ spec: --- -apiVersion: rds.aws.upbound.io/v1beta1 +apiVersion: rds.aws.upbound.io/v1beta2 kind: Cluster metadata: annotations: @@ -42,7 +42,7 @@ spec: --- -apiVersion: rds.aws.upbound.io/v1beta1 +apiVersion: rds.aws.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/rds/v1beta1/proxytarget.yaml b/examples-generated/rds/v1beta1/proxytarget.yaml index 940f4c3bde..95185c3165 100644 --- a/examples-generated/rds/v1beta1/proxytarget.yaml +++ b/examples-generated/rds/v1beta1/proxytarget.yaml @@ -54,7 +54,7 @@ spec: --- -apiVersion: rds.aws.upbound.io/v1beta1 +apiVersion: rds.aws.upbound.io/v1beta2 kind: ProxyDefaultTargetGroup metadata: annotations: diff --git a/examples-generated/rds/v1beta1/snapshot.yaml b/examples-generated/rds/v1beta1/snapshot.yaml index 208c3825ad..a1f326cb45 100644 --- a/examples-generated/rds/v1beta1/snapshot.yaml +++ b/examples-generated/rds/v1beta1/snapshot.yaml @@ -15,7 +15,7 @@ spec: --- -apiVersion: rds.aws.upbound.io/v1beta2 +apiVersion: rds.aws.upbound.io/v1beta3 kind: Instance metadata: annotations: diff --git a/examples-generated/rds/v1beta2/cluster.yaml b/examples-generated/rds/v1beta2/cluster.yaml new file mode 100644 index 0000000000..919bedbc95 --- /dev/null +++ b/examples-generated/rds/v1beta2/cluster.yaml @@ -0,0 +1,25 @@ +apiVersion: rds.aws.upbound.io/v1beta2 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: rds/v1beta2/cluster + labels: + testing.upbound.io/example-name: default + name: default +spec: + forProvider: + availabilityZones: + - us-west-2a + - us-west-2b + - us-west-2c + backupRetentionPeriod: 5 + databaseName: mydb + engine: aurora-mysql + engineVersion: 5.7.mysql_aurora.2.03.2 + masterPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + masterUsername: foo + preferredBackupWindow: 07:00-09:00 + region: us-west-1 diff --git a/examples-generated/rds/v1beta2/proxydefaulttargetgroup.yaml b/examples-generated/rds/v1beta2/proxydefaulttargetgroup.yaml new file mode 100644 index 0000000000..72ebf71a87 --- /dev/null +++ b/examples-generated/rds/v1beta2/proxydefaulttargetgroup.yaml @@ -0,0 +1,56 @@ +apiVersion: rds.aws.upbound.io/v1beta2 +kind: ProxyDefaultTargetGroup +metadata: + annotations: + meta.upbound.io/example-id: rds/v1beta2/proxydefaulttargetgroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + connectionPoolConfig: + - connectionBorrowTimeout: 120 + initQuery: SET x=1, y=2 + maxConnectionsPercent: 100 + maxIdleConnectionsPercent: 50 + sessionPinningFilters: + - EXCLUDE_VARIABLE_SETS + dbProxyNameSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + +--- + +apiVersion: rds.aws.upbound.io/v1beta1 +kind: Proxy +metadata: + annotations: + meta.upbound.io/example-id: rds/v1beta2/proxydefaulttargetgroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + auth: + - authScheme: SECRETS + description: example + iamAuth: DISABLED + secretArnSelector: + matchLabels: + testing.upbound.io/example-name: example + debugLogging: false + engineFamily: MYSQL + idleClientTimeout: 1800 + region: us-west-1 + requireTls: true + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + Key: value + Name: example + vpcSecurityGroupIdRefs: + - name: example + vpcSubnetIds: + - ${aws_subnet.example.id} diff --git a/examples-generated/rds/v1beta3/instance.yaml b/examples-generated/rds/v1beta3/instance.yaml new file mode 100644 index 0000000000..199733844f --- /dev/null +++ b/examples-generated/rds/v1beta3/instance.yaml @@ -0,0 +1,25 @@ +apiVersion: rds.aws.upbound.io/v1beta3 +kind: Instance +metadata: + annotations: + meta.upbound.io/example-id: rds/v1beta3/instance + labels: + testing.upbound.io/example-name: default + name: default +spec: + forProvider: + allocatedStorage: 10 + dbName: mydb + engine: mysql + engineVersion: "8.0" + instanceClass: db.t3.micro + parameterGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + region: us-west-1 + skipFinalSnapshot: true + username: foo diff --git a/examples-generated/redshift/v1beta1/eventsubscription.yaml b/examples-generated/redshift/v1beta1/eventsubscription.yaml index 0ebdd4a190..769594f026 100644 --- a/examples-generated/redshift/v1beta1/eventsubscription.yaml +++ b/examples-generated/redshift/v1beta1/eventsubscription.yaml @@ -26,7 +26,7 @@ spec: --- -apiVersion: redshift.aws.upbound.io/v1beta1 +apiVersion: redshift.aws.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/redshift/v1beta1/snapshotcopygrant.yaml b/examples-generated/redshift/v1beta1/snapshotcopygrant.yaml index aedd563324..837163aa0f 100644 --- a/examples-generated/redshift/v1beta1/snapshotcopygrant.yaml +++ b/examples-generated/redshift/v1beta1/snapshotcopygrant.yaml @@ -13,7 +13,7 @@ spec: --- -apiVersion: redshift.aws.upbound.io/v1beta1 +apiVersion: redshift.aws.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/redshift/v1beta1/snapshotscheduleassociation.yaml b/examples-generated/redshift/v1beta1/snapshotscheduleassociation.yaml index 16caff6dac..783c520772 100644 --- a/examples-generated/redshift/v1beta1/snapshotscheduleassociation.yaml +++ b/examples-generated/redshift/v1beta1/snapshotscheduleassociation.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: redshift.aws.upbound.io/v1beta1 +apiVersion: redshift.aws.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/redshift/v1beta2/cluster.yaml b/examples-generated/redshift/v1beta2/cluster.yaml new file mode 100644 index 0000000000..1e1974bb73 --- /dev/null +++ b/examples-generated/redshift/v1beta2/cluster.yaml @@ -0,0 +1,19 @@ +apiVersion: redshift.aws.upbound.io/v1beta2 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: redshift/v1beta2/cluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clusterType: single-node + databaseName: mydb + masterPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + masterUsername: exampleuser + nodeType: dc1.large + region: us-west-1 diff --git a/examples-generated/redshift/v1beta2/scheduledaction.yaml b/examples-generated/redshift/v1beta2/scheduledaction.yaml new file mode 100644 index 0000000000..d415f732cc --- /dev/null +++ b/examples-generated/redshift/v1beta2/scheduledaction.yaml @@ -0,0 +1,65 @@ +apiVersion: redshift.aws.upbound.io/v1beta2 +kind: ScheduledAction +metadata: + annotations: + meta.upbound.io/example-id: redshift/v1beta2/scheduledaction + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + iamRoleSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + schedule: cron(00 23 * * ? *) + targetAction: + - pauseCluster: + - clusterIdentifier: tf-redshift001 + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Policy +metadata: + annotations: + meta.upbound.io/example-id: redshift/v1beta2/scheduledaction + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + policy: ${data.aws_iam_policy_document.example.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: redshift/v1beta2/scheduledaction + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.assume_role.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicyAttachment +metadata: + annotations: + meta.upbound.io/example-id: redshift/v1beta2/scheduledaction + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + policyArnSelector: + matchLabels: + testing.upbound.io/example-name: example + roleSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/resourcegroups/v1beta2/group.yaml b/examples-generated/resourcegroups/v1beta2/group.yaml new file mode 100644 index 0000000000..85f03d5b4c --- /dev/null +++ b/examples-generated/resourcegroups/v1beta2/group.yaml @@ -0,0 +1,24 @@ +apiVersion: resourcegroups.aws.upbound.io/v1beta2 +kind: Group +metadata: + annotations: + meta.upbound.io/example-id: resourcegroups/v1beta2/group + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + region: us-west-1 + resourceQuery: + - query: | + { + "ResourceTypeFilters": [ + "AWS::EC2::Instance" + ], + "TagFilters": [ + { + "Key": "Stage", + "Values": ["Test"] + } + ] + } diff --git a/examples-generated/route53/v1beta2/record.yaml b/examples-generated/route53/v1beta2/record.yaml new file mode 100644 index 0000000000..0a4cb8a7b7 --- /dev/null +++ b/examples-generated/route53/v1beta2/record.yaml @@ -0,0 +1,19 @@ +apiVersion: route53.aws.upbound.io/v1beta2 +kind: Record +metadata: + annotations: + meta.upbound.io/example-id: route53/v1beta2/record + labels: + testing.upbound.io/example-name: www + name: www +spec: + forProvider: + name: www.example.com + records: + - ${aws_eip.lb.public_ip} + region: us-west-1 + ttl: 300 + type: A + zoneIdSelector: + matchLabels: + testing.upbound.io/example-name: primary diff --git a/examples-generated/route53recoverycontrolconfig/v1beta2/safetyrule.yaml b/examples-generated/route53recoverycontrolconfig/v1beta2/safetyrule.yaml new file mode 100644 index 0000000000..10880d2021 --- /dev/null +++ b/examples-generated/route53recoverycontrolconfig/v1beta2/safetyrule.yaml @@ -0,0 +1,22 @@ +apiVersion: route53recoverycontrolconfig.aws.upbound.io/v1beta2 +kind: SafetyRule +metadata: + annotations: + meta.upbound.io/example-id: route53recoverycontrolconfig/v1beta2/safetyrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + assertedControlsRefs: + - name: example + controlPanelArnSelector: + matchLabels: + testing.upbound.io/example-name: example + name: daisyguttridge + region: us-west-1 + ruleConfig: + - inverted: false + threshold: 1 + type: ATLEAST + waitPeriodMs: 5000 diff --git a/examples-generated/route53recoveryreadiness/v1beta2/resourceset.yaml b/examples-generated/route53recoveryreadiness/v1beta2/resourceset.yaml new file mode 100644 index 0000000000..48dded388e --- /dev/null +++ b/examples-generated/route53recoveryreadiness/v1beta2/resourceset.yaml @@ -0,0 +1,16 @@ +apiVersion: route53recoveryreadiness.aws.upbound.io/v1beta2 +kind: ResourceSet +metadata: + annotations: + meta.upbound.io/example-id: route53recoveryreadiness/v1beta2/resourceset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + resourceSetType: AWS::CloudWatch::Alarm + resources: + - resourceArnSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/rum/v1beta2/appmonitor.yaml b/examples-generated/rum/v1beta2/appmonitor.yaml new file mode 100644 index 0000000000..6b51ebb467 --- /dev/null +++ b/examples-generated/rum/v1beta2/appmonitor.yaml @@ -0,0 +1,12 @@ +apiVersion: rum.aws.upbound.io/v1beta2 +kind: AppMonitor +metadata: + annotations: + meta.upbound.io/example-id: rum/v1beta2/appmonitor + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + domain: localhost + region: us-west-1 diff --git a/examples-generated/s3/v1beta1/bucketaccelerateconfiguration.yaml b/examples-generated/s3/v1beta1/bucketaccelerateconfiguration.yaml index 051ef1ddb0..ef5240978a 100644 --- a/examples-generated/s3/v1beta1/bucketaccelerateconfiguration.yaml +++ b/examples-generated/s3/v1beta1/bucketaccelerateconfiguration.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: s3.aws.upbound.io/v1beta1 +apiVersion: s3.aws.upbound.io/v1beta2 kind: Bucket metadata: annotations: diff --git a/examples-generated/s3/v1beta1/bucketcorsconfiguration.yaml b/examples-generated/s3/v1beta1/bucketcorsconfiguration.yaml index bd6a3fd889..6444548150 100644 --- a/examples-generated/s3/v1beta1/bucketcorsconfiguration.yaml +++ b/examples-generated/s3/v1beta1/bucketcorsconfiguration.yaml @@ -30,7 +30,7 @@ spec: --- -apiVersion: s3.aws.upbound.io/v1beta1 +apiVersion: s3.aws.upbound.io/v1beta2 kind: Bucket metadata: annotations: diff --git a/examples-generated/s3/v1beta1/bucketnotification.yaml b/examples-generated/s3/v1beta1/bucketnotification.yaml index e562f019fa..ee2944202c 100644 --- a/examples-generated/s3/v1beta1/bucketnotification.yaml +++ b/examples-generated/s3/v1beta1/bucketnotification.yaml @@ -22,7 +22,7 @@ spec: --- -apiVersion: s3.aws.upbound.io/v1beta1 +apiVersion: s3.aws.upbound.io/v1beta2 kind: Bucket metadata: annotations: diff --git a/examples-generated/s3/v1beta1/bucketpolicy.yaml b/examples-generated/s3/v1beta1/bucketpolicy.yaml index 6a202a80ba..5c38e7c1b6 100644 --- a/examples-generated/s3/v1beta1/bucketpolicy.yaml +++ b/examples-generated/s3/v1beta1/bucketpolicy.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: s3.aws.upbound.io/v1beta1 +apiVersion: s3.aws.upbound.io/v1beta2 kind: Bucket metadata: annotations: diff --git a/examples-generated/s3/v1beta1/bucketpublicaccessblock.yaml b/examples-generated/s3/v1beta1/bucketpublicaccessblock.yaml index d1d94492ed..c0a8a805bf 100644 --- a/examples-generated/s3/v1beta1/bucketpublicaccessblock.yaml +++ b/examples-generated/s3/v1beta1/bucketpublicaccessblock.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: s3.aws.upbound.io/v1beta1 +apiVersion: s3.aws.upbound.io/v1beta2 kind: Bucket metadata: annotations: diff --git a/examples-generated/s3/v1beta2/bucket.yaml b/examples-generated/s3/v1beta2/bucket.yaml new file mode 100644 index 0000000000..7c3112f91a --- /dev/null +++ b/examples-generated/s3/v1beta2/bucket.yaml @@ -0,0 +1,15 @@ +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3/v1beta2/bucket + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + tags: + Environment: Dev + Name: My bucket diff --git a/examples-generated/s3/v1beta2/bucketacl.yaml b/examples-generated/s3/v1beta2/bucketacl.yaml new file mode 100644 index 0000000000..f689ac6fc6 --- /dev/null +++ b/examples-generated/s3/v1beta2/bucketacl.yaml @@ -0,0 +1,49 @@ +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketACL +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketacl + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + acl: private + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3/v1beta2/bucketacl + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketOwnershipControls +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketacl + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + rule: + - objectOwnership: BucketOwnerPreferred diff --git a/examples-generated/s3/v1beta2/bucketanalyticsconfiguration.yaml b/examples-generated/s3/v1beta2/bucketanalyticsconfiguration.yaml new file mode 100644 index 0000000000..d586ae81ad --- /dev/null +++ b/examples-generated/s3/v1beta2/bucketanalyticsconfiguration.yaml @@ -0,0 +1,52 @@ +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketAnalyticsConfiguration +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketanalyticsconfiguration + labels: + testing.upbound.io/example-name: example-entire-bucket + name: example-entire-bucket +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + name: EntireBucket + region: us-west-1 + storageClassAnalysis: + - dataExport: + - destination: + - s3BucketDestination: + - bucketArnSelector: + matchLabels: + testing.upbound.io/example-name: analytics + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3/v1beta2/bucketanalyticsconfiguration + labels: + testing.upbound.io/example-name: analytics + name: analytics +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3/v1beta2/bucketanalyticsconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/s3/v1beta2/bucketintelligenttieringconfiguration.yaml b/examples-generated/s3/v1beta2/bucketintelligenttieringconfiguration.yaml new file mode 100644 index 0000000000..cdb173f7fd --- /dev/null +++ b/examples-generated/s3/v1beta2/bucketintelligenttieringconfiguration.yaml @@ -0,0 +1,35 @@ +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketIntelligentTieringConfiguration +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketintelligenttieringconfiguration + labels: + testing.upbound.io/example-name: example-entire-bucket + name: example-entire-bucket +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + name: EntireBucket + region: us-west-1 + tiering: + - accessTier: DEEP_ARCHIVE_ACCESS + days: 180 + - accessTier: ARCHIVE_ACCESS + days: 125 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3/v1beta2/bucketintelligenttieringconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/s3/v1beta2/bucketinventory.yaml b/examples-generated/s3/v1beta2/bucketinventory.yaml new file mode 100644 index 0000000000..e244bf2885 --- /dev/null +++ b/examples-generated/s3/v1beta2/bucketinventory.yaml @@ -0,0 +1,54 @@ +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketInventory +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketinventory + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: test + destination: + - bucket: + - bucketArnSelector: + matchLabels: + testing.upbound.io/example-name: inventory + format: ORC + includedObjectVersions: All + name: EntireBucketDaily + region: us-west-1 + schedule: + - frequency: Daily + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3/v1beta2/bucketinventory + labels: + testing.upbound.io/example-name: inventory + name: inventory +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3/v1beta2/bucketinventory + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/s3/v1beta2/bucketlifecycleconfiguration.yaml b/examples-generated/s3/v1beta2/bucketlifecycleconfiguration.yaml new file mode 100644 index 0000000000..9eeae54baa --- /dev/null +++ b/examples-generated/s3/v1beta2/bucketlifecycleconfiguration.yaml @@ -0,0 +1,17 @@ +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketLifecycleConfiguration +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketlifecycleconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: bucket + region: us-west-1 + rule: + - id: rule-1 + status: Enabled diff --git a/examples-generated/s3/v1beta2/bucketlogging.yaml b/examples-generated/s3/v1beta2/bucketlogging.yaml new file mode 100644 index 0000000000..1d53d4c840 --- /dev/null +++ b/examples-generated/s3/v1beta2/bucketlogging.yaml @@ -0,0 +1,84 @@ +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketLogging +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketlogging + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + targetBucketSelector: + matchLabels: + testing.upbound.io/example-name: log_bucket + targetPrefix: log/ + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3/v1beta2/bucketlogging + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3/v1beta2/bucketlogging + labels: + testing.upbound.io/example-name: log_bucket + name: log-bucket +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketACL +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketlogging + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + acl: private + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketACL +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketlogging + labels: + testing.upbound.io/example-name: log_bucket_acl + name: log-bucket-acl +spec: + forProvider: + acl: log-delivery-write + bucketSelector: + matchLabels: + testing.upbound.io/example-name: log_bucket + region: us-west-1 diff --git a/examples-generated/s3/v1beta2/bucketmetric.yaml b/examples-generated/s3/v1beta2/bucketmetric.yaml new file mode 100644 index 0000000000..085f6b8ecc --- /dev/null +++ b/examples-generated/s3/v1beta2/bucketmetric.yaml @@ -0,0 +1,30 @@ +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketMetric +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketmetric + labels: + testing.upbound.io/example-name: example-entire-bucket + name: example-entire-bucket +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + name: EntireBucket + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3/v1beta2/bucketmetric + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/s3/v1beta2/bucketobjectlockconfiguration.yaml b/examples-generated/s3/v1beta2/bucketobjectlockconfiguration.yaml new file mode 100644 index 0000000000..f7f1c378f6 --- /dev/null +++ b/examples-generated/s3/v1beta2/bucketobjectlockconfiguration.yaml @@ -0,0 +1,52 @@ +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketObjectLockConfiguration +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketobjectlockconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + rule: + - defaultRetention: + - days: 5 + mode: COMPLIANCE + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3/v1beta2/bucketobjectlockconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketVersioning +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketobjectlockconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + versioningConfiguration: + - status: Enabled diff --git a/examples-generated/s3/v1beta2/bucketownershipcontrols.yaml b/examples-generated/s3/v1beta2/bucketownershipcontrols.yaml new file mode 100644 index 0000000000..5953b28464 --- /dev/null +++ b/examples-generated/s3/v1beta2/bucketownershipcontrols.yaml @@ -0,0 +1,31 @@ +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketOwnershipControls +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketownershipcontrols + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + rule: + - objectOwnership: BucketOwnerPreferred + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3/v1beta2/bucketownershipcontrols + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/s3/v1beta2/bucketreplicationconfiguration.yaml b/examples-generated/s3/v1beta2/bucketreplicationconfiguration.yaml new file mode 100644 index 0000000000..f41c2a9720 --- /dev/null +++ b/examples-generated/s3/v1beta2/bucketreplicationconfiguration.yaml @@ -0,0 +1,164 @@ +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketReplicationConfiguration +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketreplicationconfiguration + labels: + testing.upbound.io/example-name: replication + name: replication +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: source + provider: ${aws.central} + region: us-west-1 + roleSelector: + matchLabels: + testing.upbound.io/example-name: replication + rule: + - destination: + - bucketSelector: + matchLabels: + testing.upbound.io/example-name: destination + storageClass: STANDARD + filter: + - prefix: foo + id: foobar + status: Enabled + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Policy +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketreplicationconfiguration + labels: + testing.upbound.io/example-name: replication + name: replication +spec: + forProvider: + policy: ${data.aws_iam_policy_document.replication.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketreplicationconfiguration + labels: + testing.upbound.io/example-name: replication + name: replication +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.assume_role.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicyAttachment +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketreplicationconfiguration + labels: + testing.upbound.io/example-name: replication + name: replication +spec: + forProvider: + policyArnSelector: + matchLabels: + testing.upbound.io/example-name: replication + roleSelector: + matchLabels: + testing.upbound.io/example-name: replication + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3/v1beta2/bucketreplicationconfiguration + labels: + testing.upbound.io/example-name: destination + name: destination +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3/v1beta2/bucketreplicationconfiguration + labels: + testing.upbound.io/example-name: source + name: source +spec: + forProvider: + provider: ${aws.central} + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketACL +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketreplicationconfiguration + labels: + testing.upbound.io/example-name: source_bucket_acl + name: source-bucket-acl +spec: + forProvider: + acl: private + bucketSelector: + matchLabels: + testing.upbound.io/example-name: source + provider: ${aws.central} + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketVersioning +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketreplicationconfiguration + labels: + testing.upbound.io/example-name: destination + name: destination +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: destination + region: us-west-1 + versioningConfiguration: + - status: Enabled + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketVersioning +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketreplicationconfiguration + labels: + testing.upbound.io/example-name: source + name: source +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: source + provider: ${aws.central} + region: us-west-1 + versioningConfiguration: + - status: Enabled diff --git a/examples-generated/s3/v1beta2/bucketserversideencryptionconfiguration.yaml b/examples-generated/s3/v1beta2/bucketserversideencryptionconfiguration.yaml new file mode 100644 index 0000000000..719c0b1890 --- /dev/null +++ b/examples-generated/s3/v1beta2/bucketserversideencryptionconfiguration.yaml @@ -0,0 +1,51 @@ +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketServerSideEncryptionConfiguration +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketserversideencryptionconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: mybucket + region: us-west-1 + rule: + - applyServerSideEncryptionByDefault: + - kmsMasterKeyIdSelector: + matchLabels: + testing.upbound.io/example-name: mykey + sseAlgorithm: aws:kms + +--- + +apiVersion: kms.aws.upbound.io/v1beta1 +kind: Key +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketserversideencryptionconfiguration + labels: + testing.upbound.io/example-name: mykey + name: mykey +spec: + forProvider: + deletionWindowInDays: 10 + description: This key is used to encrypt bucket objects + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3/v1beta2/bucketserversideencryptionconfiguration + labels: + testing.upbound.io/example-name: mybucket + name: mybucket +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/s3/v1beta2/bucketversioning.yaml b/examples-generated/s3/v1beta2/bucketversioning.yaml new file mode 100644 index 0000000000..4b8390c388 --- /dev/null +++ b/examples-generated/s3/v1beta2/bucketversioning.yaml @@ -0,0 +1,49 @@ +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketVersioning +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketversioning + labels: + testing.upbound.io/example-name: versioning_example + name: versioning-example +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + versioningConfiguration: + - status: Enabled + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3/v1beta2/bucketversioning + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketACL +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketversioning + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + acl: private + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 diff --git a/examples-generated/s3/v1beta2/bucketwebsiteconfiguration.yaml b/examples-generated/s3/v1beta2/bucketwebsiteconfiguration.yaml new file mode 100644 index 0000000000..28b0cf56e0 --- /dev/null +++ b/examples-generated/s3/v1beta2/bucketwebsiteconfiguration.yaml @@ -0,0 +1,23 @@ +apiVersion: s3.aws.upbound.io/v1beta2 +kind: BucketWebsiteConfiguration +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/bucketwebsiteconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + errorDocument: + - key: error.html + indexDocument: + - suffix: index.html + region: us-west-1 + routingRule: + - condition: + - keyPrefixEquals: docs/ + redirect: + - replaceKeyPrefixWith: documents/ diff --git a/examples-generated/s3/v1beta2/object.yaml b/examples-generated/s3/v1beta2/object.yaml new file mode 100644 index 0000000000..96d8426a2d --- /dev/null +++ b/examples-generated/s3/v1beta2/object.yaml @@ -0,0 +1,17 @@ +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Object +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta2/object + labels: + testing.upbound.io/example-name: object + name: object +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + etag: ${filemd5("path/to/file")} + key: new_object_key + region: us-west-1 + source: path/to/file diff --git a/examples-generated/s3control/v1beta1/accesspointpolicy.yaml b/examples-generated/s3control/v1beta1/accesspointpolicy.yaml index a9d8303a64..5745bdd594 100644 --- a/examples-generated/s3control/v1beta1/accesspointpolicy.yaml +++ b/examples-generated/s3control/v1beta1/accesspointpolicy.yaml @@ -27,7 +27,7 @@ spec: --- -apiVersion: s3control.aws.upbound.io/v1beta1 +apiVersion: s3control.aws.upbound.io/v1beta2 kind: AccessPoint metadata: annotations: @@ -50,7 +50,7 @@ spec: --- -apiVersion: s3.aws.upbound.io/v1beta1 +apiVersion: s3.aws.upbound.io/v1beta2 kind: Bucket metadata: annotations: diff --git a/examples-generated/s3control/v1beta1/objectlambdaaccesspointpolicy.yaml b/examples-generated/s3control/v1beta1/objectlambdaaccesspointpolicy.yaml index 5b169efe95..8971ff45e1 100644 --- a/examples-generated/s3control/v1beta1/objectlambdaaccesspointpolicy.yaml +++ b/examples-generated/s3control/v1beta1/objectlambdaaccesspointpolicy.yaml @@ -27,7 +27,7 @@ spec: --- -apiVersion: s3control.aws.upbound.io/v1beta1 +apiVersion: s3control.aws.upbound.io/v1beta2 kind: AccessPoint metadata: annotations: @@ -45,7 +45,7 @@ spec: --- -apiVersion: s3.aws.upbound.io/v1beta1 +apiVersion: s3.aws.upbound.io/v1beta2 kind: Bucket metadata: annotations: @@ -60,7 +60,7 @@ spec: --- -apiVersion: s3control.aws.upbound.io/v1beta1 +apiVersion: s3control.aws.upbound.io/v1beta2 kind: ObjectLambdaAccessPoint metadata: annotations: diff --git a/examples-generated/s3control/v1beta2/accesspoint.yaml b/examples-generated/s3control/v1beta2/accesspoint.yaml new file mode 100644 index 0000000000..fbcceaa08c --- /dev/null +++ b/examples-generated/s3control/v1beta2/accesspoint.yaml @@ -0,0 +1,30 @@ +apiVersion: s3control.aws.upbound.io/v1beta2 +kind: AccessPoint +metadata: + annotations: + meta.upbound.io/example-id: s3control/v1beta2/accesspoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3control/v1beta2/accesspoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/s3control/v1beta2/multiregionaccesspoint.yaml b/examples-generated/s3control/v1beta2/multiregionaccesspoint.yaml new file mode 100644 index 0000000000..e02c9724c5 --- /dev/null +++ b/examples-generated/s3control/v1beta2/multiregionaccesspoint.yaml @@ -0,0 +1,52 @@ +apiVersion: s3control.aws.upbound.io/v1beta2 +kind: MultiRegionAccessPoint +metadata: + annotations: + meta.upbound.io/example-id: s3control/v1beta2/multiregionaccesspoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + details: + - name: example + region: + - bucketSelector: + matchLabels: + testing.upbound.io/example-name: foo_bucket + - bucketSelector: + matchLabels: + testing.upbound.io/example-name: bar_bucket + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3control/v1beta2/multiregionaccesspoint + labels: + testing.upbound.io/example-name: bar_bucket + name: bar-bucket +spec: + forProvider: + provider: ${aws.secondary_region} + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3control/v1beta2/multiregionaccesspoint + labels: + testing.upbound.io/example-name: foo_bucket + name: foo-bucket +spec: + forProvider: + provider: ${aws.primary_region} + region: us-west-1 diff --git a/examples-generated/s3control/v1beta2/multiregionaccesspointpolicy.yaml b/examples-generated/s3control/v1beta2/multiregionaccesspointpolicy.yaml new file mode 100644 index 0000000000..f57f05b86e --- /dev/null +++ b/examples-generated/s3control/v1beta2/multiregionaccesspointpolicy.yaml @@ -0,0 +1,64 @@ +apiVersion: s3control.aws.upbound.io/v1beta2 +kind: MultiRegionAccessPointPolicy +metadata: + annotations: + meta.upbound.io/example-id: s3control/v1beta2/multiregionaccesspointpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + details: + - name: ${element(split(":", aws_s3control_multi_region_access_point.example.id), + 1)} + policy: |- + ${jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Sid" : "Example", + "Effect" : "Allow", + "Principal" : { + "AWS" : data.aws_caller_identity.current.account_id + }, + "Action" : ["s3:GetObject", "s3:PutObject"], + "Resource" : "arn:${data.aws_partition.current.partition}:s3::${data.aws_caller_identity.current.account_id}:accesspoint/${aws_s3control_multi_region_access_point.example.alias}/object/*" + } + ] + })} + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3control/v1beta2/multiregionaccesspointpolicy + labels: + testing.upbound.io/example-name: foo_bucket + name: foo-bucket +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3control.aws.upbound.io/v1beta2 +kind: MultiRegionAccessPoint +metadata: + annotations: + meta.upbound.io/example-id: s3control/v1beta2/multiregionaccesspointpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + details: + - name: example + region: + - bucketSelector: + matchLabels: + testing.upbound.io/example-name: foo_bucket + region: us-west-1 diff --git a/examples-generated/s3control/v1beta2/objectlambdaaccesspoint.yaml b/examples-generated/s3control/v1beta2/objectlambdaaccesspoint.yaml new file mode 100644 index 0000000000..d4dcc2a295 --- /dev/null +++ b/examples-generated/s3control/v1beta2/objectlambdaaccesspoint.yaml @@ -0,0 +1,57 @@ +apiVersion: s3control.aws.upbound.io/v1beta2 +kind: ObjectLambdaAccessPoint +metadata: + annotations: + meta.upbound.io/example-id: s3control/v1beta2/objectlambdaaccesspoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + configuration: + - supportingAccessPointSelector: + matchLabels: + testing.upbound.io/example-name: example + transformationConfiguration: + - actions: + - GetObject + contentTransformation: + - awsLambda: + - functionArnSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example + region: us-west-1 + +--- + +apiVersion: s3control.aws.upbound.io/v1beta2 +kind: AccessPoint +metadata: + annotations: + meta.upbound.io/example-id: s3control/v1beta2/objectlambdaaccesspoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: s3control/v1beta2/objectlambdaaccesspoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/s3control/v1beta2/storagelensconfiguration.yaml b/examples-generated/s3control/v1beta2/storagelensconfiguration.yaml new file mode 100644 index 0000000000..168beb4c53 --- /dev/null +++ b/examples-generated/s3control/v1beta2/storagelensconfiguration.yaml @@ -0,0 +1,39 @@ +apiVersion: s3control.aws.upbound.io/v1beta2 +kind: StorageLensConfiguration +metadata: + annotations: + meta.upbound.io/example-id: s3control/v1beta2/storagelensconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + configId: example-1 + region: us-west-1 + storageLensConfiguration: + - accountLevel: + - activityMetrics: + - enabled: true + bucketLevel: + - activityMetrics: + - enabled: true + dataExport: + - cloudWatchMetrics: + - enabled: true + s3BucketDestination: + - accountId: ${data.aws_caller_identity.current.account_id} + arnSelector: + matchLabels: + testing.upbound.io/example-name: target + encryption: + - sseS3: + - {} + format: CSV + outputSchemaVersion: V_1 + enabled: true + exclude: + - buckets: + - ${aws_s3_bucket.b1.arn} + - ${aws_s3_bucket.b2.arn} + regions: + - us-east-2 diff --git a/examples-generated/sagemaker/v1beta2/app.yaml b/examples-generated/sagemaker/v1beta2/app.yaml new file mode 100644 index 0000000000..1068a2ec68 --- /dev/null +++ b/examples-generated/sagemaker/v1beta2/app.yaml @@ -0,0 +1,19 @@ +apiVersion: sagemaker.aws.upbound.io/v1beta2 +kind: App +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/app + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + appName: example + appType: JupyterServer + domainIdSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + userProfileNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/sagemaker/v1beta2/appimageconfig.yaml b/examples-generated/sagemaker/v1beta2/appimageconfig.yaml new file mode 100644 index 0000000000..1e352676db --- /dev/null +++ b/examples-generated/sagemaker/v1beta2/appimageconfig.yaml @@ -0,0 +1,14 @@ +apiVersion: sagemaker.aws.upbound.io/v1beta2 +kind: AppImageConfig +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/appimageconfig + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + kernelGatewayImageConfig: + - kernelSpec: + - name: example + region: us-west-1 diff --git a/examples-generated/sagemaker/v1beta2/coderepository.yaml b/examples-generated/sagemaker/v1beta2/coderepository.yaml new file mode 100644 index 0000000000..d3b3dd5f66 --- /dev/null +++ b/examples-generated/sagemaker/v1beta2/coderepository.yaml @@ -0,0 +1,13 @@ +apiVersion: sagemaker.aws.upbound.io/v1beta2 +kind: CodeRepository +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/coderepository + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + gitConfig: + - repositoryUrl: https://github.com/hashicorp/terraform-provider-aws.git + region: us-west-1 diff --git a/examples-generated/sagemaker/v1beta2/device.yaml b/examples-generated/sagemaker/v1beta2/device.yaml new file mode 100644 index 0000000000..1b33f4dd90 --- /dev/null +++ b/examples-generated/sagemaker/v1beta2/device.yaml @@ -0,0 +1,16 @@ +apiVersion: sagemaker.aws.upbound.io/v1beta2 +kind: Device +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/device + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + device: + - deviceName: example + deviceFleetNameSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 diff --git a/examples-generated/sagemaker/v1beta2/devicefleet.yaml b/examples-generated/sagemaker/v1beta2/devicefleet.yaml new file mode 100644 index 0000000000..5801aa43de --- /dev/null +++ b/examples-generated/sagemaker/v1beta2/devicefleet.yaml @@ -0,0 +1,16 @@ +apiVersion: sagemaker.aws.upbound.io/v1beta2 +kind: DeviceFleet +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/devicefleet + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + outputConfig: + - s3OutputLocation: s3://${aws_s3_bucket.example.bucket}/prefix/ + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: test diff --git a/examples-generated/sagemaker/v1beta2/domain.yaml b/examples-generated/sagemaker/v1beta2/domain.yaml new file mode 100644 index 0000000000..a2a3085321 --- /dev/null +++ b/examples-generated/sagemaker/v1beta2/domain.yaml @@ -0,0 +1,37 @@ +apiVersion: sagemaker.aws.upbound.io/v1beta2 +kind: Domain +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/domain + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + authMode: IAM + defaultUserSettings: + - executionRoleSelector: + matchLabels: + testing.upbound.io/example-name: example + domainName: example + region: us-west-1 + subnetIdRefs: + - name: example + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/domain + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.example.json} + path: / diff --git a/examples-generated/sagemaker/v1beta2/endpoint.yaml b/examples-generated/sagemaker/v1beta2/endpoint.yaml new file mode 100644 index 0000000000..f16399e4e2 --- /dev/null +++ b/examples-generated/sagemaker/v1beta2/endpoint.yaml @@ -0,0 +1,16 @@ +apiVersion: sagemaker.aws.upbound.io/v1beta2 +kind: Endpoint +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/endpoint + labels: + testing.upbound.io/example-name: e + name: e +spec: + forProvider: + endpointConfigNameSelector: + matchLabels: + testing.upbound.io/example-name: ec + region: us-west-1 + tags: + Name: foo diff --git a/examples-generated/sagemaker/v1beta2/endpointconfiguration.yaml b/examples-generated/sagemaker/v1beta2/endpointconfiguration.yaml new file mode 100644 index 0000000000..bbe43d524e --- /dev/null +++ b/examples-generated/sagemaker/v1beta2/endpointconfiguration.yaml @@ -0,0 +1,20 @@ +apiVersion: sagemaker.aws.upbound.io/v1beta2 +kind: EndpointConfiguration +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/endpointconfiguration + labels: + testing.upbound.io/example-name: ec + name: ec +spec: + forProvider: + productionVariants: + - initialInstanceCount: 1 + instanceType: ml.t2.medium + modelNameSelector: + matchLabels: + testing.upbound.io/example-name: m + variantName: variant-1 + region: us-west-1 + tags: + Name: foo diff --git a/examples-generated/sagemaker/v1beta2/featuregroup.yaml b/examples-generated/sagemaker/v1beta2/featuregroup.yaml new file mode 100644 index 0000000000..d982d68db1 --- /dev/null +++ b/examples-generated/sagemaker/v1beta2/featuregroup.yaml @@ -0,0 +1,21 @@ +apiVersion: sagemaker.aws.upbound.io/v1beta2 +kind: FeatureGroup +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/featuregroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + eventTimeFeatureName: example + featureDefinition: + - featureName: example + featureType: String + onlineStoreConfig: + - enableOnlineStore: true + recordIdentifierFeatureName: example + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: test diff --git a/examples-generated/sagemaker/v1beta2/model.yaml b/examples-generated/sagemaker/v1beta2/model.yaml new file mode 100644 index 0000000000..1282e3a90d --- /dev/null +++ b/examples-generated/sagemaker/v1beta2/model.yaml @@ -0,0 +1,30 @@ +apiVersion: sagemaker.aws.upbound.io/v1beta2 +kind: Model +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/model + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + executionRoleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + primaryContainer: + - image: ${data.aws_sagemaker_prebuilt_ecr_image.test.registry_path} + region: us-west-1 + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/model + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.assume_role.json} diff --git a/examples-generated/sagemaker/v1beta2/notebookinstance.yaml b/examples-generated/sagemaker/v1beta2/notebookinstance.yaml new file mode 100644 index 0000000000..160c9cfd2e --- /dev/null +++ b/examples-generated/sagemaker/v1beta2/notebookinstance.yaml @@ -0,0 +1,17 @@ +apiVersion: sagemaker.aws.upbound.io/v1beta2 +kind: NotebookInstance +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/notebookinstance + labels: + testing.upbound.io/example-name: ni + name: ni +spec: + forProvider: + instanceType: ml.t2.medium + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: role + tags: + Name: foo diff --git a/examples-generated/sagemaker/v1beta2/space.yaml b/examples-generated/sagemaker/v1beta2/space.yaml new file mode 100644 index 0000000000..1b5112a832 --- /dev/null +++ b/examples-generated/sagemaker/v1beta2/space.yaml @@ -0,0 +1,15 @@ +apiVersion: sagemaker.aws.upbound.io/v1beta2 +kind: Space +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/space + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + domainIdSelector: + matchLabels: + testing.upbound.io/example-name: test + region: us-west-1 + spaceName: example diff --git a/examples-generated/sagemaker/v1beta2/userprofile.yaml b/examples-generated/sagemaker/v1beta2/userprofile.yaml new file mode 100644 index 0000000000..a818aae954 --- /dev/null +++ b/examples-generated/sagemaker/v1beta2/userprofile.yaml @@ -0,0 +1,15 @@ +apiVersion: sagemaker.aws.upbound.io/v1beta2 +kind: UserProfile +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/userprofile + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + domainIdSelector: + matchLabels: + testing.upbound.io/example-name: test + region: us-west-1 + userProfileName: example diff --git a/examples-generated/sagemaker/v1beta2/workforce.yaml b/examples-generated/sagemaker/v1beta2/workforce.yaml new file mode 100644 index 0000000000..af20a726a7 --- /dev/null +++ b/examples-generated/sagemaker/v1beta2/workforce.yaml @@ -0,0 +1,70 @@ +apiVersion: sagemaker.aws.upbound.io/v1beta2 +kind: Workforce +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/workforce + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cognitoConfig: + - clientIdSelector: + matchLabels: + testing.upbound.io/example-name: example + userPoolSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + +--- + +apiVersion: cognitoidp.aws.upbound.io/v1beta2 +kind: UserPool +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/workforce + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: example + region: us-west-1 + +--- + +apiVersion: cognitoidp.aws.upbound.io/v1beta1 +kind: UserPoolClient +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/workforce + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + generateSecret: true + name: example + region: us-west-1 + userPoolIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: cognitoidp.aws.upbound.io/v1beta1 +kind: UserPoolDomain +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/workforce + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + domain: example + region: us-west-1 + userPoolIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/sagemaker/v1beta2/workteam.yaml b/examples-generated/sagemaker/v1beta2/workteam.yaml new file mode 100644 index 0000000000..175f575e90 --- /dev/null +++ b/examples-generated/sagemaker/v1beta2/workteam.yaml @@ -0,0 +1,26 @@ +apiVersion: sagemaker.aws.upbound.io/v1beta2 +kind: Workteam +metadata: + annotations: + meta.upbound.io/example-id: sagemaker/v1beta2/workteam + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: example + memberDefinition: + - cognitoMemberDefinition: + - clientIdSelector: + matchLabels: + testing.upbound.io/example-name: example + userGroupSelector: + matchLabels: + testing.upbound.io/example-name: example + userPoolSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + workforceNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/scheduler/v1beta2/schedule.yaml b/examples-generated/scheduler/v1beta2/schedule.yaml new file mode 100644 index 0000000000..8fa7bfc734 --- /dev/null +++ b/examples-generated/scheduler/v1beta2/schedule.yaml @@ -0,0 +1,23 @@ +apiVersion: scheduler.aws.upbound.io/v1beta2 +kind: Schedule +metadata: + annotations: + meta.upbound.io/example-id: scheduler/v1beta2/schedule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + flexibleTimeWindow: + - mode: "OFF" + groupName: default + name: my-schedule + region: us-west-1 + scheduleExpression: rate(1 hours) + target: + - arnSelector: + matchLabels: + testing.upbound.io/example-name: example + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/secretsmanager/v1beta2/secretrotation.yaml b/examples-generated/secretsmanager/v1beta2/secretrotation.yaml new file mode 100644 index 0000000000..b273ebc70d --- /dev/null +++ b/examples-generated/secretsmanager/v1beta2/secretrotation.yaml @@ -0,0 +1,19 @@ +apiVersion: secretsmanager.aws.upbound.io/v1beta2 +kind: SecretRotation +metadata: + annotations: + meta.upbound.io/example-id: secretsmanager/v1beta2/secretrotation + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + rotationLambdaArnSelector: + matchLabels: + testing.upbound.io/example-name: example + rotationRules: + - automaticallyAfterDays: 30 + secretIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/securityhub/v1beta2/insight.yaml b/examples-generated/securityhub/v1beta2/insight.yaml new file mode 100644 index 0000000000..cc8f868576 --- /dev/null +++ b/examples-generated/securityhub/v1beta2/insight.yaml @@ -0,0 +1,33 @@ +apiVersion: securityhub.aws.upbound.io/v1beta2 +kind: Insight +metadata: + annotations: + meta.upbound.io/example-id: securityhub/v1beta2/insight + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + filters: + - awsAccountId: + - comparison: EQUALS + value: "1234567890" + - comparison: EQUALS + value: "09876543210" + groupByAttribute: AwsAccountId + name: example-insight + region: us-west-1 + +--- + +apiVersion: securityhub.aws.upbound.io/v1beta1 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: securityhub/v1beta2/insight + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/servicecatalog/v1beta2/product.yaml b/examples-generated/servicecatalog/v1beta2/product.yaml new file mode 100644 index 0000000000..f150fecb0f --- /dev/null +++ b/examples-generated/servicecatalog/v1beta2/product.yaml @@ -0,0 +1,18 @@ +apiVersion: servicecatalog.aws.upbound.io/v1beta2 +kind: Product +metadata: + annotations: + meta.upbound.io/example-id: servicecatalog/v1beta2/product + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: example + owner: example-owner + provisioningArtifactParameters: + - templateUrl: https://s3.amazonaws.com/cf-templates-ozkq9d3hgiq2-us-east-1/temp1.json + region: us-west-1 + tags: + foo: bar + type: CLOUD_FORMATION_TEMPLATE diff --git a/examples-generated/servicecatalog/v1beta2/serviceaction.yaml b/examples-generated/servicecatalog/v1beta2/serviceaction.yaml new file mode 100644 index 0000000000..8acff7832d --- /dev/null +++ b/examples-generated/servicecatalog/v1beta2/serviceaction.yaml @@ -0,0 +1,15 @@ +apiVersion: servicecatalog.aws.upbound.io/v1beta2 +kind: ServiceAction +metadata: + annotations: + meta.upbound.io/example-id: servicecatalog/v1beta2/serviceaction + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + definition: + - name: AWS-RestartEC2Instance + description: Motor generator unit + name: MGU + region: us-west-1 diff --git a/examples-generated/servicediscovery/v1beta2/service.yaml b/examples-generated/servicediscovery/v1beta2/service.yaml new file mode 100644 index 0000000000..f6d5fdef70 --- /dev/null +++ b/examples-generated/servicediscovery/v1beta2/service.yaml @@ -0,0 +1,58 @@ +apiVersion: servicediscovery.aws.upbound.io/v1beta2 +kind: Service +metadata: + annotations: + meta.upbound.io/example-id: servicediscovery/v1beta2/service + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dnsConfig: + - dnsRecords: + - ttl: 10 + type: A + namespaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + routingPolicy: MULTIVALUE + healthCheckCustomConfig: + - failureThreshold: 1 + name: example + region: us-west-1 + +--- + +apiVersion: servicediscovery.aws.upbound.io/v1beta1 +kind: PrivateDNSNamespace +metadata: + annotations: + meta.upbound.io/example-id: servicediscovery/v1beta2/service + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: example + name: example.terraform.local + region: us-west-1 + vpcSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: VPC +metadata: + annotations: + meta.upbound.io/example-id: servicediscovery/v1beta2/service + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cidrBlock: 10.0.0.0/16 + enableDnsHostnames: true + enableDnsSupport: true + region: us-west-1 diff --git a/examples-generated/ses/v1beta1/domaindkim.yaml b/examples-generated/ses/v1beta1/domaindkim.yaml index b6a8999bc7..246b82b767 100644 --- a/examples-generated/ses/v1beta1/domaindkim.yaml +++ b/examples-generated/ses/v1beta1/domaindkim.yaml @@ -12,7 +12,7 @@ spec: --- -apiVersion: route53.aws.upbound.io/v1beta1 +apiVersion: route53.aws.upbound.io/v1beta2 kind: Record metadata: annotations: diff --git a/examples-generated/ses/v1beta1/domainmailfrom.yaml b/examples-generated/ses/v1beta1/domainmailfrom.yaml index 6751ef040f..8412b17102 100644 --- a/examples-generated/ses/v1beta1/domainmailfrom.yaml +++ b/examples-generated/ses/v1beta1/domainmailfrom.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: route53.aws.upbound.io/v1beta1 +apiVersion: route53.aws.upbound.io/v1beta2 kind: Record metadata: annotations: @@ -38,7 +38,7 @@ spec: --- -apiVersion: route53.aws.upbound.io/v1beta1 +apiVersion: route53.aws.upbound.io/v1beta2 kind: Record metadata: annotations: diff --git a/examples-generated/ses/v1beta2/configurationset.yaml b/examples-generated/ses/v1beta2/configurationset.yaml new file mode 100644 index 0000000000..56d1b6bac4 --- /dev/null +++ b/examples-generated/ses/v1beta2/configurationset.yaml @@ -0,0 +1,11 @@ +apiVersion: ses.aws.upbound.io/v1beta2 +kind: ConfigurationSet +metadata: + annotations: + meta.upbound.io/example-id: ses/v1beta2/configurationset + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/ses/v1beta2/eventdestination.yaml b/examples-generated/ses/v1beta2/eventdestination.yaml new file mode 100644 index 0000000000..8c726e1744 --- /dev/null +++ b/examples-generated/ses/v1beta2/eventdestination.yaml @@ -0,0 +1,22 @@ +apiVersion: ses.aws.upbound.io/v1beta2 +kind: EventDestination +metadata: + annotations: + meta.upbound.io/example-id: ses/v1beta2/eventdestination + labels: + testing.upbound.io/example-name: cloudwatch + name: cloudwatch +spec: + forProvider: + cloudwatchDestination: + - defaultValue: default + dimensionName: dimension + valueSource: emailHeader + configurationSetNameSelector: + matchLabels: + testing.upbound.io/example-name: example + enabled: true + matchingTypes: + - bounce + - send + region: us-west-1 diff --git a/examples-generated/sesv2/v1beta1/emailidentityfeedbackattributes.yaml b/examples-generated/sesv2/v1beta1/emailidentityfeedbackattributes.yaml index 6783d7f6ec..312992d51d 100644 --- a/examples-generated/sesv2/v1beta1/emailidentityfeedbackattributes.yaml +++ b/examples-generated/sesv2/v1beta1/emailidentityfeedbackattributes.yaml @@ -13,7 +13,7 @@ spec: --- -apiVersion: sesv2.aws.upbound.io/v1beta1 +apiVersion: sesv2.aws.upbound.io/v1beta2 kind: EmailIdentity metadata: annotations: diff --git a/examples-generated/sesv2/v1beta1/emailidentitymailfromattributes.yaml b/examples-generated/sesv2/v1beta1/emailidentitymailfromattributes.yaml index 46acc2da0f..d8a61ba9f3 100644 --- a/examples-generated/sesv2/v1beta1/emailidentitymailfromattributes.yaml +++ b/examples-generated/sesv2/v1beta1/emailidentitymailfromattributes.yaml @@ -14,7 +14,7 @@ spec: --- -apiVersion: sesv2.aws.upbound.io/v1beta1 +apiVersion: sesv2.aws.upbound.io/v1beta2 kind: EmailIdentity metadata: annotations: diff --git a/examples-generated/sesv2/v1beta2/configurationset.yaml b/examples-generated/sesv2/v1beta2/configurationset.yaml new file mode 100644 index 0000000000..52d086c946 --- /dev/null +++ b/examples-generated/sesv2/v1beta2/configurationset.yaml @@ -0,0 +1,23 @@ +apiVersion: sesv2.aws.upbound.io/v1beta2 +kind: ConfigurationSet +metadata: + annotations: + meta.upbound.io/example-id: sesv2/v1beta2/configurationset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + deliveryOptions: + - tlsPolicy: REQUIRE + region: us-west-1 + reputationOptions: + - reputationMetricsEnabled: false + sendingOptions: + - sendingEnabled: true + suppressionOptions: + - suppressedReasons: + - BOUNCE + - COMPLAINT + trackingOptions: + - customRedirectDomain: example.com diff --git a/examples-generated/sesv2/v1beta2/configurationseteventdestination.yaml b/examples-generated/sesv2/v1beta2/configurationseteventdestination.yaml new file mode 100644 index 0000000000..1dc336851b --- /dev/null +++ b/examples-generated/sesv2/v1beta2/configurationseteventdestination.yaml @@ -0,0 +1,38 @@ +apiVersion: sesv2.aws.upbound.io/v1beta2 +kind: ConfigurationSetEventDestination +metadata: + annotations: + meta.upbound.io/example-id: sesv2/v1beta2/configurationseteventdestination + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + configurationSetNameSelector: + matchLabels: + testing.upbound.io/example-name: example + eventDestination: + - cloudWatchDestination: + - dimensionConfiguration: + - defaultDimensionValue: example + dimensionName: example + dimensionValueSource: MESSAGE_TAG + enabled: true + matchingEventTypes: + - SEND + eventDestinationName: example + region: us-west-1 + +--- + +apiVersion: sesv2.aws.upbound.io/v1beta2 +kind: ConfigurationSet +metadata: + annotations: + meta.upbound.io/example-id: sesv2/v1beta2/configurationseteventdestination + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/sesv2/v1beta2/emailidentity.yaml b/examples-generated/sesv2/v1beta2/emailidentity.yaml new file mode 100644 index 0000000000..aa073983b6 --- /dev/null +++ b/examples-generated/sesv2/v1beta2/emailidentity.yaml @@ -0,0 +1,11 @@ +apiVersion: sesv2.aws.upbound.io/v1beta2 +kind: EmailIdentity +metadata: + annotations: + meta.upbound.io/example-id: sesv2/v1beta2/emailidentity + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 diff --git a/examples-generated/sfn/v1beta2/statemachine.yaml b/examples-generated/sfn/v1beta2/statemachine.yaml new file mode 100644 index 0000000000..a73f457178 --- /dev/null +++ b/examples-generated/sfn/v1beta2/statemachine.yaml @@ -0,0 +1,26 @@ +apiVersion: sfn.aws.upbound.io/v1beta2 +kind: StateMachine +metadata: + annotations: + meta.upbound.io/example-id: sfn/v1beta2/statemachine + labels: + testing.upbound.io/example-name: sfn_state_machine + name: sfn-state-machine +spec: + forProvider: + definition: | + { + "Comment": "A Hello World example of the Amazon States Language using an AWS Lambda Function", + "StartAt": "HelloWorld", + "States": { + "HelloWorld": { + "Type": "Task", + "Resource": "${aws_lambda_function.lambda.arn}", + "End": true + } + } + } + region: us-west-1 + roleArnSelector: + matchLabels: + testing.upbound.io/example-name: iam_for_sfn diff --git a/examples-generated/signer/v1beta1/signingprofilepermission.yaml b/examples-generated/signer/v1beta1/signingprofilepermission.yaml index 3dce81d8f1..356802bcd8 100644 --- a/examples-generated/signer/v1beta1/signingprofilepermission.yaml +++ b/examples-generated/signer/v1beta1/signingprofilepermission.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: signer.aws.upbound.io/v1beta1 +apiVersion: signer.aws.upbound.io/v1beta2 kind: SigningProfile metadata: annotations: diff --git a/examples-generated/signer/v1beta2/signingjob.yaml b/examples-generated/signer/v1beta2/signingjob.yaml new file mode 100644 index 0000000000..4d45f9ca02 --- /dev/null +++ b/examples-generated/signer/v1beta2/signingjob.yaml @@ -0,0 +1,39 @@ +apiVersion: signer.aws.upbound.io/v1beta2 +kind: SigningJob +metadata: + annotations: + meta.upbound.io/example-id: signer/v1beta2/signingjob + labels: + testing.upbound.io/example-name: build_signing_job + name: build-signing-job +spec: + forProvider: + destination: + - s3: + - bucket: s3-bucket-name + prefix: signed/ + ignoreSigningJobFailure: true + profileNameSelector: + matchLabels: + testing.upbound.io/example-name: test_sp + region: us-west-1 + source: + - s3: + - bucket: s3-bucket-name + key: object-to-be-signed.zip + version: jADjFYYYEXAMPLETszPjOmCMFDzd9dN1 + +--- + +apiVersion: signer.aws.upbound.io/v1beta2 +kind: SigningProfile +metadata: + annotations: + meta.upbound.io/example-id: signer/v1beta2/signingjob + labels: + testing.upbound.io/example-name: test_sp + name: test-sp +spec: + forProvider: + platformId: AWSLambda-SHA384-ECDSA + region: us-west-1 diff --git a/examples-generated/signer/v1beta2/signingprofile.yaml b/examples-generated/signer/v1beta2/signingprofile.yaml new file mode 100644 index 0000000000..ee6ad6ea71 --- /dev/null +++ b/examples-generated/signer/v1beta2/signingprofile.yaml @@ -0,0 +1,12 @@ +apiVersion: signer.aws.upbound.io/v1beta2 +kind: SigningProfile +metadata: + annotations: + meta.upbound.io/example-id: signer/v1beta2/signingprofile + labels: + testing.upbound.io/example-name: test_sp + name: test-sp +spec: + forProvider: + platformId: AWSLambda-SHA384-ECDSA + region: us-west-1 diff --git a/examples-generated/ssm/v1beta2/association.yaml b/examples-generated/ssm/v1beta2/association.yaml new file mode 100644 index 0000000000..f876548644 --- /dev/null +++ b/examples-generated/ssm/v1beta2/association.yaml @@ -0,0 +1,18 @@ +apiVersion: ssm.aws.upbound.io/v1beta2 +kind: Association +metadata: + annotations: + meta.upbound.io/example-id: ssm/v1beta2/association + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + nameSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + targets: + - key: InstanceIds + values: + - ${aws_instance.example.id} diff --git a/examples-generated/ssm/v1beta2/maintenancewindowtask.yaml b/examples-generated/ssm/v1beta2/maintenancewindowtask.yaml new file mode 100644 index 0000000000..238f308361 --- /dev/null +++ b/examples-generated/ssm/v1beta2/maintenancewindowtask.yaml @@ -0,0 +1,32 @@ +apiVersion: ssm.aws.upbound.io/v1beta2 +kind: MaintenanceWindowTask +metadata: + annotations: + meta.upbound.io/example-id: ssm/v1beta2/maintenancewindowtask + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + maxConcurrency: 2 + maxErrors: 1 + priority: 1 + region: us-west-1 + targets: + - key: InstanceIds + values: + - ${aws_instance.example.id} + taskArnSelector: + matchLabels: + testing.upbound.io/example-name: example + taskInvocationParameters: + - automationParameters: + - documentVersion: $LATEST + parameter: + - name: InstanceId + values: + - ${aws_instance.example.id} + taskType: AUTOMATION + windowIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/ssm/v1beta2/resourcedatasync.yaml b/examples-generated/ssm/v1beta2/resourcedatasync.yaml new file mode 100644 index 0000000000..8cb2e23ef2 --- /dev/null +++ b/examples-generated/ssm/v1beta2/resourcedatasync.yaml @@ -0,0 +1,51 @@ +apiVersion: ssm.aws.upbound.io/v1beta2 +kind: ResourceDataSync +metadata: + annotations: + meta.upbound.io/example-id: ssm/v1beta2/resourcedatasync + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + region: us-west-1 + s3Destination: + - bucketNameSelector: + matchLabels: + testing.upbound.io/example-name: hoge + regionSelector: + matchLabels: + testing.upbound.io/example-name: hoge + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: ssm/v1beta2/resourcedatasync + labels: + testing.upbound.io/example-name: hoge + name: hoge +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta1 +kind: BucketPolicy +metadata: + annotations: + meta.upbound.io/example-id: ssm/v1beta2/resourcedatasync + labels: + testing.upbound.io/example-name: hoge + name: hoge +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: hoge + policy: ${data.aws_iam_policy_document.hoge.json} + region: us-west-1 diff --git a/examples-generated/ssoadmin/v1beta2/customermanagedpolicyattachment.yaml b/examples-generated/ssoadmin/v1beta2/customermanagedpolicyattachment.yaml new file mode 100644 index 0000000000..a4f7ec9f8e --- /dev/null +++ b/examples-generated/ssoadmin/v1beta2/customermanagedpolicyattachment.yaml @@ -0,0 +1,63 @@ +apiVersion: ssoadmin.aws.upbound.io/v1beta2 +kind: CustomerManagedPolicyAttachment +metadata: + annotations: + meta.upbound.io/example-id: ssoadmin/v1beta2/customermanagedpolicyattachment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + customerManagedPolicyReference: + - path: / + policyNameSelector: + matchLabels: + testing.upbound.io/example-name: example + instanceArn: ${aws_ssoadmin_permission_set.example.instance_arn} + permissionSetArnSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Policy +metadata: + annotations: + meta.upbound.io/example-id: ssoadmin/v1beta2/customermanagedpolicyattachment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: My test policy + policy: |- + ${jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = [ + "ec2:Describe*", + ] + Effect = "Allow" + Resource = "*" + }, + ] + })} + +--- + +apiVersion: ssoadmin.aws.upbound.io/v1beta1 +kind: PermissionSet +metadata: + annotations: + meta.upbound.io/example-id: ssoadmin/v1beta2/customermanagedpolicyattachment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + instanceArn: ${tolist(data.aws_ssoadmin_instances.example.arns)[0]} + name: Example + region: us-west-1 diff --git a/examples-generated/ssoadmin/v1beta2/permissionsboundaryattachment.yaml b/examples-generated/ssoadmin/v1beta2/permissionsboundaryattachment.yaml new file mode 100644 index 0000000000..b06a488eec --- /dev/null +++ b/examples-generated/ssoadmin/v1beta2/permissionsboundaryattachment.yaml @@ -0,0 +1,64 @@ +apiVersion: ssoadmin.aws.upbound.io/v1beta2 +kind: PermissionsBoundaryAttachment +metadata: + annotations: + meta.upbound.io/example-id: ssoadmin/v1beta2/permissionsboundaryattachment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + instanceArn: ${aws_ssoadmin_permission_set.example.instance_arn} + permissionSetArnSelector: + matchLabels: + testing.upbound.io/example-name: example + permissionsBoundary: + - customerManagedPolicyReference: + - nameSelector: + matchLabels: + testing.upbound.io/example-name: example + path: / + region: us-west-1 + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Policy +metadata: + annotations: + meta.upbound.io/example-id: ssoadmin/v1beta2/permissionsboundaryattachment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: My test policy + policy: |- + ${jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = [ + "ec2:Describe*", + ] + Effect = "Allow" + Resource = "*" + }, + ] + })} + +--- + +apiVersion: ssoadmin.aws.upbound.io/v1beta1 +kind: PermissionSet +metadata: + annotations: + meta.upbound.io/example-id: ssoadmin/v1beta2/permissionsboundaryattachment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + instanceArn: ${tolist(data.aws_ssoadmin_instances.example.arns)[0]} + name: Example + region: us-west-1 diff --git a/examples-generated/timestreamwrite/v1beta2/table.yaml b/examples-generated/timestreamwrite/v1beta2/table.yaml new file mode 100644 index 0000000000..c959e841ad --- /dev/null +++ b/examples-generated/timestreamwrite/v1beta2/table.yaml @@ -0,0 +1,15 @@ +apiVersion: timestreamwrite.aws.upbound.io/v1beta2 +kind: Table +metadata: + annotations: + meta.upbound.io/example-id: timestreamwrite/v1beta2/table + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + databaseNameSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + tableName: example diff --git a/examples-generated/transcribe/v1beta1/vocabulary.yaml b/examples-generated/transcribe/v1beta1/vocabulary.yaml index 09783dd01a..31de69c4f4 100644 --- a/examples-generated/transcribe/v1beta1/vocabulary.yaml +++ b/examples-generated/transcribe/v1beta1/vocabulary.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: s3.aws.upbound.io/v1beta1 +apiVersion: s3.aws.upbound.io/v1beta2 kind: Bucket metadata: annotations: @@ -33,7 +33,7 @@ spec: --- -apiVersion: s3.aws.upbound.io/v1beta1 +apiVersion: s3.aws.upbound.io/v1beta2 kind: Object metadata: annotations: diff --git a/examples-generated/transcribe/v1beta2/languagemodel.yaml b/examples-generated/transcribe/v1beta2/languagemodel.yaml new file mode 100644 index 0000000000..5163d5e90c --- /dev/null +++ b/examples-generated/transcribe/v1beta2/languagemodel.yaml @@ -0,0 +1,99 @@ +apiVersion: transcribe.aws.upbound.io/v1beta2 +kind: LanguageModel +metadata: + annotations: + meta.upbound.io/example-id: transcribe/v1beta2/languagemodel + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + baseModelName: NarrowBand + inputDataConfig: + - dataAccessRoleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + s3Uri: s3://${aws_s3_bucket.example.id}/transcribe/ + languageCode: en-US + region: us-west-1 + tags: + ENVIRONMENT: development + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: transcribe/v1beta2/languagemodel + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.example.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicy +metadata: + annotations: + meta.upbound.io/example-id: transcribe/v1beta2/languagemodel + labels: + testing.upbound.io/example-name: test_policy + name: test-policy +spec: + forProvider: + policy: |- + ${jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = [ + "s3:GetObject", + "s3:ListBucket", + ] + Effect = "Allow" + Resource = ["*"] + }, + ] + })} + roleSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Bucket +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: transcribe/v1beta2/languagemodel + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + forceDestroy: true + region: us-west-1 + +--- + +apiVersion: s3.aws.upbound.io/v1beta2 +kind: Object +metadata: + annotations: + meta.upbound.io/example-id: transcribe/v1beta2/languagemodel + labels: + testing.upbound.io/example-name: object + name: object +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + key: transcribe/test1.txt + region: us-west-1 + source: test1.txt diff --git a/examples-generated/transfer/v1beta1/sshkey.yaml b/examples-generated/transfer/v1beta1/sshkey.yaml index 366ef993f0..bc990352de 100644 --- a/examples-generated/transfer/v1beta1/sshkey.yaml +++ b/examples-generated/transfer/v1beta1/sshkey.yaml @@ -50,7 +50,7 @@ spec: --- -apiVersion: transfer.aws.upbound.io/v1beta1 +apiVersion: transfer.aws.upbound.io/v1beta2 kind: Server metadata: annotations: @@ -67,7 +67,7 @@ spec: --- -apiVersion: transfer.aws.upbound.io/v1beta1 +apiVersion: transfer.aws.upbound.io/v1beta2 kind: User metadata: annotations: diff --git a/examples-generated/transfer/v1beta1/tag.yaml b/examples-generated/transfer/v1beta1/tag.yaml index b74f806c51..b42988431d 100644 --- a/examples-generated/transfer/v1beta1/tag.yaml +++ b/examples-generated/transfer/v1beta1/tag.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: transfer.aws.upbound.io/v1beta1 +apiVersion: transfer.aws.upbound.io/v1beta2 kind: Server metadata: annotations: diff --git a/examples-generated/transfer/v1beta2/server.yaml b/examples-generated/transfer/v1beta2/server.yaml new file mode 100644 index 0000000000..1f07f84ab5 --- /dev/null +++ b/examples-generated/transfer/v1beta2/server.yaml @@ -0,0 +1,13 @@ +apiVersion: transfer.aws.upbound.io/v1beta2 +kind: Server +metadata: + annotations: + meta.upbound.io/example-id: transfer/v1beta2/server + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + tags: + Name: Example diff --git a/examples-generated/transfer/v1beta2/user.yaml b/examples-generated/transfer/v1beta2/user.yaml new file mode 100644 index 0000000000..73f886526c --- /dev/null +++ b/examples-generated/transfer/v1beta2/user.yaml @@ -0,0 +1,69 @@ +apiVersion: transfer.aws.upbound.io/v1beta2 +kind: User +metadata: + annotations: + meta.upbound.io/example-id: transfer/v1beta2/user + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + homeDirectoryMappings: + - entry: /test.pdf + target: /bucket3/test-path/tftestuser.pdf + homeDirectoryType: LOGICAL + region: us-west-1 + roleSelector: + matchLabels: + testing.upbound.io/example-name: foo + serverIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: transfer/v1beta2/user + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.assume_role.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicy +metadata: + annotations: + meta.upbound.io/example-id: transfer/v1beta2/user + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + policy: ${data.aws_iam_policy_document.foo.json} + roleSelector: + matchLabels: + testing.upbound.io/example-name: foo + +--- + +apiVersion: transfer.aws.upbound.io/v1beta2 +kind: Server +metadata: + annotations: + meta.upbound.io/example-id: transfer/v1beta2/user + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + identityProviderType: SERVICE_MANAGED + region: us-west-1 + tags: + NAME: tf-acc-test-transfer-server diff --git a/examples-generated/transfer/v1beta2/workflow.yaml b/examples-generated/transfer/v1beta2/workflow.yaml new file mode 100644 index 0000000000..fc7b435ef9 --- /dev/null +++ b/examples-generated/transfer/v1beta2/workflow.yaml @@ -0,0 +1,16 @@ +apiVersion: transfer.aws.upbound.io/v1beta2 +kind: Workflow +metadata: + annotations: + meta.upbound.io/example-id: transfer/v1beta2/workflow + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + steps: + - deleteStepDetails: + - name: example + sourceFileLocation: ${original.file} + type: DELETE diff --git a/examples-generated/waf/v1beta2/bytematchset.yaml b/examples-generated/waf/v1beta2/bytematchset.yaml new file mode 100644 index 0000000000..350f8a8e19 --- /dev/null +++ b/examples-generated/waf/v1beta2/bytematchset.yaml @@ -0,0 +1,19 @@ +apiVersion: waf.aws.upbound.io/v1beta2 +kind: ByteMatchSet +metadata: + annotations: + meta.upbound.io/example-id: waf/v1beta2/bytematchset + labels: + testing.upbound.io/example-name: byte_set + name: byte-set +spec: + forProvider: + byteMatchTuples: + - fieldToMatch: + - data: referer + type: HEADER + positionalConstraint: CONTAINS + targetString: badrefer1 + textTransformation: NONE + name: tf_waf_byte_match_set + region: us-west-1 diff --git a/examples-generated/waf/v1beta2/regexmatchset.yaml b/examples-generated/waf/v1beta2/regexmatchset.yaml new file mode 100644 index 0000000000..2391c91ebc --- /dev/null +++ b/examples-generated/waf/v1beta2/regexmatchset.yaml @@ -0,0 +1,38 @@ +apiVersion: waf.aws.upbound.io/v1beta2 +kind: RegexMatchSet +metadata: + annotations: + meta.upbound.io/example-id: waf/v1beta2/regexmatchset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: example + regexMatchTuple: + - fieldToMatch: + - data: User-Agent + type: HEADER + regexPatternSetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + textTransformation: NONE + region: us-west-1 + +--- + +apiVersion: waf.aws.upbound.io/v1beta1 +kind: RegexPatternSet +metadata: + annotations: + meta.upbound.io/example-id: waf/v1beta2/regexmatchset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: example + regexPatternStrings: + - one + - two + region: us-west-1 diff --git a/examples-generated/waf/v1beta2/sizeconstraintset.yaml b/examples-generated/waf/v1beta2/sizeconstraintset.yaml new file mode 100644 index 0000000000..ef74fdd8f2 --- /dev/null +++ b/examples-generated/waf/v1beta2/sizeconstraintset.yaml @@ -0,0 +1,18 @@ +apiVersion: waf.aws.upbound.io/v1beta2 +kind: SizeConstraintSet +metadata: + annotations: + meta.upbound.io/example-id: waf/v1beta2/sizeconstraintset + labels: + testing.upbound.io/example-name: size_constraint_set + name: size-constraint-set +spec: + forProvider: + name: tfsize_constraints + region: us-west-1 + sizeConstraints: + - comparisonOperator: EQ + fieldToMatch: + - type: BODY + size: "4096" + textTransformation: NONE diff --git a/examples-generated/waf/v1beta2/sqlinjectionmatchset.yaml b/examples-generated/waf/v1beta2/sqlinjectionmatchset.yaml new file mode 100644 index 0000000000..2cf6262134 --- /dev/null +++ b/examples-generated/waf/v1beta2/sqlinjectionmatchset.yaml @@ -0,0 +1,16 @@ +apiVersion: waf.aws.upbound.io/v1beta2 +kind: SQLInjectionMatchSet +metadata: + annotations: + meta.upbound.io/example-id: waf/v1beta2/sqlinjectionmatchset + labels: + testing.upbound.io/example-name: sql_injection_match_set + name: sql-injection-match-set +spec: + forProvider: + name: tf-sql_injection_match_set + region: us-west-1 + sqlInjectionMatchTuples: + - fieldToMatch: + - type: QUERY_STRING + textTransformation: URL_DECODE diff --git a/examples-generated/waf/v1beta2/webacl.yaml b/examples-generated/waf/v1beta2/webacl.yaml new file mode 100644 index 0000000000..009c90ed2d --- /dev/null +++ b/examples-generated/waf/v1beta2/webacl.yaml @@ -0,0 +1,63 @@ +apiVersion: waf.aws.upbound.io/v1beta2 +kind: WebACL +metadata: + annotations: + meta.upbound.io/example-id: waf/v1beta2/webacl + labels: + testing.upbound.io/example-name: waf_acl + name: waf-acl +spec: + forProvider: + defaultAction: + - type: ALLOW + metricName: tfWebACL + name: tfWebACL + region: us-west-1 + rules: + - action: + - type: BLOCK + priority: 1 + ruleIdSelector: + matchLabels: + testing.upbound.io/example-name: wafrule + type: REGULAR + +--- + +apiVersion: waf.aws.upbound.io/v1beta1 +kind: IPSet +metadata: + annotations: + meta.upbound.io/example-id: waf/v1beta2/webacl + labels: + testing.upbound.io/example-name: ipset + name: ipset +spec: + forProvider: + ipSetDescriptors: + - type: IPV4 + value: 192.0.7.0/24 + name: tfIPSet + region: us-west-1 + +--- + +apiVersion: waf.aws.upbound.io/v1beta1 +kind: Rule +metadata: + annotations: + meta.upbound.io/example-id: waf/v1beta2/webacl + labels: + testing.upbound.io/example-name: wafrule + name: wafrule +spec: + forProvider: + metricName: tfWAFRule + name: tfWAFRule + predicates: + - dataIdSelector: + matchLabels: + testing.upbound.io/example-name: ipset + negated: false + type: IPMatch + region: us-west-1 diff --git a/examples-generated/waf/v1beta2/xssmatchset.yaml b/examples-generated/waf/v1beta2/xssmatchset.yaml new file mode 100644 index 0000000000..71b5104a2c --- /dev/null +++ b/examples-generated/waf/v1beta2/xssmatchset.yaml @@ -0,0 +1,19 @@ +apiVersion: waf.aws.upbound.io/v1beta2 +kind: XSSMatchSet +metadata: + annotations: + meta.upbound.io/example-id: waf/v1beta2/xssmatchset + labels: + testing.upbound.io/example-name: xss_match_set + name: xss-match-set +spec: + forProvider: + name: xss_match_set + region: us-west-1 + xssMatchTuples: + - fieldToMatch: + - type: URI + textTransformation: NONE + - fieldToMatch: + - type: QUERY_STRING + textTransformation: NONE diff --git a/examples-generated/wafregional/v1beta2/bytematchset.yaml b/examples-generated/wafregional/v1beta2/bytematchset.yaml new file mode 100644 index 0000000000..c0f31183bc --- /dev/null +++ b/examples-generated/wafregional/v1beta2/bytematchset.yaml @@ -0,0 +1,19 @@ +apiVersion: wafregional.aws.upbound.io/v1beta2 +kind: ByteMatchSet +metadata: + annotations: + meta.upbound.io/example-id: wafregional/v1beta2/bytematchset + labels: + testing.upbound.io/example-name: byte_set + name: byte-set +spec: + forProvider: + byteMatchTuples: + - fieldToMatch: + - data: referer + type: HEADER + positionalConstraint: CONTAINS + targetString: badrefer1 + textTransformation: NONE + name: tf_waf_byte_match_set + region: us-west-1 diff --git a/examples-generated/wafregional/v1beta2/regexmatchset.yaml b/examples-generated/wafregional/v1beta2/regexmatchset.yaml new file mode 100644 index 0000000000..8d4bb74f15 --- /dev/null +++ b/examples-generated/wafregional/v1beta2/regexmatchset.yaml @@ -0,0 +1,38 @@ +apiVersion: wafregional.aws.upbound.io/v1beta2 +kind: RegexMatchSet +metadata: + annotations: + meta.upbound.io/example-id: wafregional/v1beta2/regexmatchset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: example + regexMatchTuple: + - fieldToMatch: + - data: User-Agent + type: HEADER + regexPatternSetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + textTransformation: NONE + region: us-west-1 + +--- + +apiVersion: wafregional.aws.upbound.io/v1beta1 +kind: RegexPatternSet +metadata: + annotations: + meta.upbound.io/example-id: wafregional/v1beta2/regexmatchset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: example + regexPatternStrings: + - one + - two + region: us-west-1 diff --git a/examples-generated/wafregional/v1beta2/sizeconstraintset.yaml b/examples-generated/wafregional/v1beta2/sizeconstraintset.yaml new file mode 100644 index 0000000000..37e0896aed --- /dev/null +++ b/examples-generated/wafregional/v1beta2/sizeconstraintset.yaml @@ -0,0 +1,18 @@ +apiVersion: wafregional.aws.upbound.io/v1beta2 +kind: SizeConstraintSet +metadata: + annotations: + meta.upbound.io/example-id: wafregional/v1beta2/sizeconstraintset + labels: + testing.upbound.io/example-name: size_constraint_set + name: size-constraint-set +spec: + forProvider: + name: tfsize_constraints + region: us-west-1 + sizeConstraints: + - comparisonOperator: EQ + fieldToMatch: + - type: BODY + size: "4096" + textTransformation: NONE diff --git a/examples-generated/wafregional/v1beta2/sqlinjectionmatchset.yaml b/examples-generated/wafregional/v1beta2/sqlinjectionmatchset.yaml new file mode 100644 index 0000000000..f580249504 --- /dev/null +++ b/examples-generated/wafregional/v1beta2/sqlinjectionmatchset.yaml @@ -0,0 +1,16 @@ +apiVersion: wafregional.aws.upbound.io/v1beta2 +kind: SQLInjectionMatchSet +metadata: + annotations: + meta.upbound.io/example-id: wafregional/v1beta2/sqlinjectionmatchset + labels: + testing.upbound.io/example-name: sql_injection_match_set + name: sql-injection-match-set +spec: + forProvider: + name: tf-sql_injection_match_set + region: us-west-1 + sqlInjectionMatchTuple: + - fieldToMatch: + - type: QUERY_STRING + textTransformation: URL_DECODE diff --git a/examples-generated/wafregional/v1beta2/webacl.yaml b/examples-generated/wafregional/v1beta2/webacl.yaml new file mode 100644 index 0000000000..cd24df989e --- /dev/null +++ b/examples-generated/wafregional/v1beta2/webacl.yaml @@ -0,0 +1,63 @@ +apiVersion: wafregional.aws.upbound.io/v1beta2 +kind: WebACL +metadata: + annotations: + meta.upbound.io/example-id: wafregional/v1beta2/webacl + labels: + testing.upbound.io/example-name: wafacl + name: wafacl +spec: + forProvider: + defaultAction: + - type: ALLOW + metricName: tfWebACL + name: tfWebACL + region: us-west-1 + rule: + - action: + - type: BLOCK + priority: 1 + ruleIdSelector: + matchLabels: + testing.upbound.io/example-name: wafrule + type: REGULAR + +--- + +apiVersion: wafregional.aws.upbound.io/v1beta1 +kind: IPSet +metadata: + annotations: + meta.upbound.io/example-id: wafregional/v1beta2/webacl + labels: + testing.upbound.io/example-name: ipset + name: ipset +spec: + forProvider: + ipSetDescriptor: + - type: IPV4 + value: 192.0.7.0/24 + name: tfIPSet + region: us-west-1 + +--- + +apiVersion: wafregional.aws.upbound.io/v1beta1 +kind: Rule +metadata: + annotations: + meta.upbound.io/example-id: wafregional/v1beta2/webacl + labels: + testing.upbound.io/example-name: wafrule + name: wafrule +spec: + forProvider: + metricName: tfWAFRule + name: tfWAFRule + predicate: + - dataIdSelector: + matchLabels: + testing.upbound.io/example-name: ipset + negated: false + type: IPMatch + region: us-west-1 diff --git a/examples-generated/wafregional/v1beta2/xssmatchset.yaml b/examples-generated/wafregional/v1beta2/xssmatchset.yaml new file mode 100644 index 0000000000..afc48f23af --- /dev/null +++ b/examples-generated/wafregional/v1beta2/xssmatchset.yaml @@ -0,0 +1,19 @@ +apiVersion: wafregional.aws.upbound.io/v1beta2 +kind: XSSMatchSet +metadata: + annotations: + meta.upbound.io/example-id: wafregional/v1beta2/xssmatchset + labels: + testing.upbound.io/example-name: xss_match_set + name: xss-match-set +spec: + forProvider: + name: xss_match_set + region: us-west-1 + xssMatchTuple: + - fieldToMatch: + - type: URI + textTransformation: NONE + - fieldToMatch: + - type: QUERY_STRING + textTransformation: NONE diff --git a/examples-generated/workspaces/v1beta2/directory.yaml b/examples-generated/workspaces/v1beta2/directory.yaml new file mode 100644 index 0000000000..b8237c165f --- /dev/null +++ b/examples-generated/workspaces/v1beta2/directory.yaml @@ -0,0 +1,212 @@ +apiVersion: workspaces.aws.upbound.io/v1beta2 +kind: Directory +metadata: + annotations: + meta.upbound.io/example-id: workspaces/v1beta2/directory + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + directoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + selfServicePermissions: + - changeComputeType: true + increaseVolumeSize: true + rebuildWorkspace: true + restartWorkspace: true + switchRunningMode: true + subnetIdRefs: + - name: example_c + - name: example_d + tags: + Example: true + workspaceAccessProperties: + - deviceTypeAndroid: ALLOW + deviceTypeChromeos: ALLOW + deviceTypeIos: ALLOW + deviceTypeLinux: DENY + deviceTypeOsx: ALLOW + deviceTypeWeb: DENY + deviceTypeWindows: DENY + deviceTypeZeroclient: DENY + workspaceCreationProperties: + - customSecurityGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + defaultOu: OU=AWS,DC=Workgroup,DC=Example,DC=com + enableInternetAccess: true + enableMaintenanceMode: true + userEnabledAsLocalAdministrator: true + +--- + +apiVersion: ds.aws.upbound.io/v1beta2 +kind: Directory +metadata: + annotations: + meta.upbound.io/example-id: workspaces/v1beta2/directory + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: corp.example.com + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + region: us-west-1 + size: Small + vpcSettings: + - subnetIdsRefs: + - name: example_a + - name: example_b + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: workspaces/v1beta2/directory + labels: + testing.upbound.io/example-name: workspaces_default + name: workspaces-default +spec: + forProvider: + assumeRolePolicy: ${data.aws_iam_policy_document.workspaces.json} + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicyAttachment +metadata: + annotations: + meta.upbound.io/example-id: workspaces/v1beta2/directory + labels: + testing.upbound.io/example-name: workspaces_default_self_service_access + name: workspaces-default-self-service-access +spec: + forProvider: + policyArnSelector: + matchLabels: + testing.upbound.io/example-name: example + roleSelector: + matchLabels: + testing.upbound.io/example-name: workspaces_default + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicyAttachment +metadata: + annotations: + meta.upbound.io/example-id: workspaces/v1beta2/directory + labels: + testing.upbound.io/example-name: workspaces_default_service_access + name: workspaces-default-service-access +spec: + forProvider: + policyArnSelector: + matchLabels: + testing.upbound.io/example-name: example + roleSelector: + matchLabels: + testing.upbound.io/example-name: workspaces_default + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: workspaces/v1beta2/directory + labels: + testing.upbound.io/example-name: example_a + name: example-a +spec: + forProvider: + availabilityZone: us-east-1a + cidrBlock: 10.0.0.0/24 + region: us-west-1 + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: workspaces/v1beta2/directory + labels: + testing.upbound.io/example-name: example_b + name: example-b +spec: + forProvider: + availabilityZone: us-east-1b + cidrBlock: 10.0.1.0/24 + region: us-west-1 + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: workspaces/v1beta2/directory + labels: + testing.upbound.io/example-name: example_c + name: example-c +spec: + forProvider: + availabilityZone: us-east-1c + cidrBlock: 10.0.2.0/24 + region: us-west-1 + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: workspaces/v1beta2/directory + labels: + testing.upbound.io/example-name: example_d + name: example-d +spec: + forProvider: + availabilityZone: us-east-1d + cidrBlock: 10.0.3.0/24 + region: us-west-1 + vpcIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: VPC +metadata: + annotations: + meta.upbound.io/example-id: workspaces/v1beta2/directory + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cidrBlock: 10.0.0.0/16 + region: us-west-1 diff --git a/examples-generated/xray/v1beta2/group.yaml b/examples-generated/xray/v1beta2/group.yaml new file mode 100644 index 0000000000..3d9f321c28 --- /dev/null +++ b/examples-generated/xray/v1beta2/group.yaml @@ -0,0 +1,16 @@ +apiVersion: xray.aws.upbound.io/v1beta2 +kind: Group +metadata: + annotations: + meta.upbound.io/example-id: xray/v1beta2/group + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + filterExpression: responsetime > 5 + groupName: example + insightsConfiguration: + - insightsEnabled: true + notificationsEnabled: true + region: us-west-1 diff --git a/package/crds/accessanalyzer.aws.upbound.io_analyzers.yaml b/package/crds/accessanalyzer.aws.upbound.io_analyzers.yaml index 89953037b5..c6291b4084 100644 --- a/package/crds/accessanalyzer.aws.upbound.io_analyzers.yaml +++ b/package/crds/accessanalyzer.aws.upbound.io_analyzers.yaml @@ -431,3 +431,404 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Analyzer is the Schema for the Analyzers API. Manages an Access + Analyzer Analyzer + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AnalyzerSpec defines the desired state of Analyzer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + configuration: + description: A block that specifies the configuration of the analyzer. + Documented below + properties: + unusedAccess: + description: A block that specifies the configuration of an + unused access analyzer for an AWS organization or account. + Documented below + properties: + unusedAccessAge: + description: The specified access age in days for which + to generate findings for unused access. + type: number + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: Type of Analyzer. Valid values are ACCOUNT, ORGANIZATION, + ACCOUNT_UNUSED_ACCESS , ORGANIZATION_UNUSED_ACCESS. Defaults + to ACCOUNT. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + configuration: + description: A block that specifies the configuration of the analyzer. + Documented below + properties: + unusedAccess: + description: A block that specifies the configuration of an + unused access analyzer for an AWS organization or account. + Documented below + properties: + unusedAccessAge: + description: The specified access age in days for which + to generate findings for unused access. + type: number + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: Type of Analyzer. Valid values are ACCOUNT, ORGANIZATION, + ACCOUNT_UNUSED_ACCESS , ORGANIZATION_UNUSED_ACCESS. Defaults + to ACCOUNT. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: AnalyzerStatus defines the observed state of Analyzer. + properties: + atProvider: + properties: + arn: + description: ARN of the Analyzer. + type: string + configuration: + description: A block that specifies the configuration of the analyzer. + Documented below + properties: + unusedAccess: + description: A block that specifies the configuration of an + unused access analyzer for an AWS organization or account. + Documented below + properties: + unusedAccessAge: + description: The specified access age in days for which + to generate findings for unused access. + type: number + type: object + type: object + id: + description: Analyzer name. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: + description: Type of Analyzer. Valid values are ACCOUNT, ORGANIZATION, + ACCOUNT_UNUSED_ACCESS , ORGANIZATION_UNUSED_ACCESS. Defaults + to ACCOUNT. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/acm.aws.upbound.io_certificates.yaml b/package/crds/acm.aws.upbound.io_certificates.yaml index 155b897775..ba31ca751b 100644 --- a/package/crds/acm.aws.upbound.io_certificates.yaml +++ b/package/crds/acm.aws.upbound.io_certificates.yaml @@ -813,3 +813,640 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Certificate is the Schema for the Certificates API. Requests + and manages a certificate from Amazon Certificate Manager (ACM). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CertificateSpec defines the desired state of Certificate + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + certificateAuthorityArn: + description: ARN of an ACM PCA + type: string + certificateBody: + description: Certificate's PEM-formatted public key + type: string + certificateChain: + description: Certificate's PEM-formatted chain + type: string + domainName: + description: Domain name for which the certificate should be issued + type: string + earlyRenewalDuration: + description: |- + Amount of time to start automatic renewal process before expiration. + Has no effect if less than 60 days. + Represented by either + a subset of RFC 3339 duration supporting years, months, and days (e.g., P90D), + or a string such as 2160h. + type: string + keyAlgorithm: + description: Specifies the algorithm of the public and private + key pair that your Amazon issued certificate uses to encrypt + data. See ACM Certificate characteristics for more details. + type: string + options: + description: Configuration block used to set certificate options. + Detailed below. + properties: + certificateTransparencyLoggingPreference: + description: Whether certificate details should be added to + a certificate transparency log. Valid values are ENABLED + or DISABLED. See https://docs.aws.amazon.com/acm/latest/userguide/acm-concepts.html#concept-transparency + for more details. + type: string + type: object + privateKeySecretRef: + description: Certificate's PEM-formatted private key + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + subjectAlternativeNames: + description: Set of domains that should be SANs in the issued + certificate. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + validationMethod: + description: Which method to use for validation. DNS or EMAIL + are valid. + type: string + validationOption: + description: Configuration block used to specify information about + the initial validation of each domain name. Detailed below. + items: + properties: + domainName: + description: Fully qualified domain name (FQDN) in the certificate. + type: string + validationDomain: + description: Domain name that you want ACM to use to send + you validation emails. This domain name is the suffix + of the email addresses that you want ACM to use. This + must be the same as the domain_name value or a superdomain + of the domain_name value. For example, if you request + a certificate for "testing.example.com", you can specify + "example.com" for this value. + type: string + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + certificateAuthorityArn: + description: ARN of an ACM PCA + type: string + certificateBody: + description: Certificate's PEM-formatted public key + type: string + certificateChain: + description: Certificate's PEM-formatted chain + type: string + domainName: + description: Domain name for which the certificate should be issued + type: string + earlyRenewalDuration: + description: |- + Amount of time to start automatic renewal process before expiration. + Has no effect if less than 60 days. + Represented by either + a subset of RFC 3339 duration supporting years, months, and days (e.g., P90D), + or a string such as 2160h. + type: string + keyAlgorithm: + description: Specifies the algorithm of the public and private + key pair that your Amazon issued certificate uses to encrypt + data. See ACM Certificate characteristics for more details. + type: string + options: + description: Configuration block used to set certificate options. + Detailed below. + properties: + certificateTransparencyLoggingPreference: + description: Whether certificate details should be added to + a certificate transparency log. Valid values are ENABLED + or DISABLED. See https://docs.aws.amazon.com/acm/latest/userguide/acm-concepts.html#concept-transparency + for more details. + type: string + type: object + privateKeySecretRef: + description: Certificate's PEM-formatted private key + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + subjectAlternativeNames: + description: Set of domains that should be SANs in the issued + certificate. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + validationMethod: + description: Which method to use for validation. DNS or EMAIL + are valid. + type: string + validationOption: + description: Configuration block used to specify information about + the initial validation of each domain name. Detailed below. + items: + properties: + domainName: + description: Fully qualified domain name (FQDN) in the certificate. + type: string + validationDomain: + description: Domain name that you want ACM to use to send + you validation emails. This domain name is the suffix + of the email addresses that you want ACM to use. This + must be the same as the domain_name value or a superdomain + of the domain_name value. For example, if you request + a certificate for "testing.example.com", you can specify + "example.com" for this value. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: CertificateStatus defines the observed state of Certificate. + properties: + atProvider: + properties: + arn: + description: ARN of the certificate + type: string + certificateAuthorityArn: + description: ARN of an ACM PCA + type: string + certificateBody: + description: Certificate's PEM-formatted public key + type: string + certificateChain: + description: Certificate's PEM-formatted chain + type: string + domainName: + description: Domain name for which the certificate should be issued + type: string + domainValidationOptions: + description: |- + Set of domain validation objects which can be used to complete certificate validation. + Can have more than one element, e.g., if SANs are defined. + Only set if DNS-validation was used. + items: + properties: + domainName: + description: Fully qualified domain name (FQDN) in the certificate. + type: string + resourceRecordName: + description: The name of the DNS record to create to validate + the certificate + type: string + resourceRecordType: + description: The type of DNS record to create + type: string + resourceRecordValue: + description: The value the DNS record needs to have + type: string + type: object + type: array + earlyRenewalDuration: + description: |- + Amount of time to start automatic renewal process before expiration. + Has no effect if less than 60 days. + Represented by either + a subset of RFC 3339 duration supporting years, months, and days (e.g., P90D), + or a string such as 2160h. + type: string + id: + description: ARN of the certificate + type: string + keyAlgorithm: + description: Specifies the algorithm of the public and private + key pair that your Amazon issued certificate uses to encrypt + data. See ACM Certificate characteristics for more details. + type: string + notAfter: + description: Expiration date and time of the certificate. + type: string + notBefore: + description: Start of the validity period of the certificate. + type: string + options: + description: Configuration block used to set certificate options. + Detailed below. + properties: + certificateTransparencyLoggingPreference: + description: Whether certificate details should be added to + a certificate transparency log. Valid values are ENABLED + or DISABLED. See https://docs.aws.amazon.com/acm/latest/userguide/acm-concepts.html#concept-transparency + for more details. + type: string + type: object + pendingRenewal: + description: true if a Private certificate eligible for managed + renewal is within the early_renewal_duration period. + type: boolean + renewalEligibility: + description: Whether the certificate is eligible for managed renewal. + type: string + renewalSummary: + description: Contains information about the status of ACM's managed + renewal for the certificate. + items: + properties: + renewalStatus: + description: The status of ACM's managed renewal of the + certificate + type: string + renewalStatusReason: + description: The reason that a renewal request was unsuccessful + or is pending + type: string + updatedAt: + type: string + type: object + type: array + status: + description: Status of the certificate. + type: string + subjectAlternativeNames: + description: Set of domains that should be SANs in the issued + certificate. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: + description: Source of the certificate. + type: string + validationEmails: + description: List of addresses that received a validation email. + Only set if EMAIL validation was used. + items: + type: string + type: array + validationMethod: + description: Which method to use for validation. DNS or EMAIL + are valid. + type: string + validationOption: + description: Configuration block used to specify information about + the initial validation of each domain name. Detailed below. + items: + properties: + domainName: + description: Fully qualified domain name (FQDN) in the certificate. + type: string + validationDomain: + description: Domain name that you want ACM to use to send + you validation emails. This domain name is the suffix + of the email addresses that you want ACM to use. This + must be the same as the domain_name value or a superdomain + of the domain_name value. For example, if you request + a certificate for "testing.example.com", you can specify + "example.com" for this value. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/acmpca.aws.upbound.io_certificateauthorities.yaml b/package/crds/acmpca.aws.upbound.io_certificateauthorities.yaml index 161118414b..f375aac0db 100644 --- a/package/crds/acmpca.aws.upbound.io_certificateauthorities.yaml +++ b/package/crds/acmpca.aws.upbound.io_certificateauthorities.yaml @@ -972,3 +972,915 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CertificateAuthority is the Schema for the CertificateAuthoritys + API. Provides a resource to manage AWS Certificate Manager Private Certificate + Authorities + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CertificateAuthoritySpec defines the desired state of CertificateAuthority + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + certificateAuthorityConfiguration: + description: Nested argument containing algorithms and certificate + subject information. Defined below. + properties: + keyAlgorithm: + description: Type of the public key algorithm and size, in + bits, of the key pair that your key pair creates when it + issues a certificate. Valid values can be found in the ACM + PCA Documentation. + type: string + signingAlgorithm: + description: Name of the algorithm your private CA uses to + sign certificate requests. Valid values can be found in + the ACM PCA Documentation. + type: string + subject: + description: Nested argument that contains X.500 distinguished + name information. At least one nested attribute must be + specified. + properties: + commonName: + description: Fully qualified domain name (FQDN) associated + with the certificate subject. Must be less than or equal + to 64 characters in length. + type: string + country: + description: Two digit code that specifies the country + in which the certificate subject located. Must be less + than or equal to 2 characters in length. + type: string + distinguishedNameQualifier: + description: Disambiguating information for the certificate + subject. Must be less than or equal to 64 characters + in length. + type: string + generationQualifier: + description: Typically a qualifier appended to the name + of an individual. Examples include Jr. for junior, Sr. + for senior, and III for third. Must be less than or + equal to 3 characters in length. + type: string + givenName: + description: First name. Must be less than or equal to + 16 characters in length. + type: string + initials: + description: Concatenation that typically contains the + first letter of the given_name, the first letter of + the middle name if one exists, and the first letter + of the surname. Must be less than or equal to 5 characters + in length. + type: string + locality: + description: Locality (such as a city or town) in which + the certificate subject is located. Must be less than + or equal to 128 characters in length. + type: string + organization: + description: Legal name of the organization with which + the certificate subject is affiliated. Must be less + than or equal to 64 characters in length. + type: string + organizationalUnit: + description: Subdivision or unit of the organization (such + as sales or finance) with which the certificate subject + is affiliated. Must be less than or equal to 64 characters + in length. + type: string + pseudonym: + description: Typically a shortened version of a longer + given_name. For example, Jonathan is often shortened + to John. Elizabeth is often shortened to Beth, Liz, + or Eliza. Must be less than or equal to 128 characters + in length. + type: string + state: + description: State in which the subject of the certificate + is located. Must be less than or equal to 128 characters + in length. + type: string + surname: + description: Family name. In the US and the UK for example, + the surname of an individual is ordered last. In Asian + cultures the surname is typically ordered first. Must + be less than or equal to 40 characters in length. + type: string + title: + description: Title such as Mr. or Ms. which is pre-pended + to the name to refer formally to the certificate subject. + Must be less than or equal to 64 characters in length. + type: string + type: object + type: object + enabled: + description: Whether the certificate authority is enabled or disabled. + Defaults to true. Can only be disabled if the CA is in an ACTIVE + state. + type: boolean + keyStorageSecurityStandard: + description: 'Cryptographic key management compliance standard + used for handling CA keys. Defaults to FIPS_140_2_LEVEL_3_OR_HIGHER. + Valid values: FIPS_140_2_LEVEL_3_OR_HIGHER and FIPS_140_2_LEVEL_2_OR_HIGHER. + Supported standard for each region can be found in the Storage + and security compliance of AWS Private CA private keys Documentation.' + type: string + permanentDeletionTimeInDays: + description: Number of days to make a CA restorable after it has + been deleted, must be between 7 to 30 days, with default to + 30 days. + type: number + region: + description: Region is the region you'd like your resource to + be created in. + type: string + revocationConfiguration: + description: Nested argument containing revocation configuration. + Defined below. + properties: + crlConfiguration: + description: Nested argument containing configuration of the + certificate revocation list (CRL), if any, maintained by + the certificate authority. Defined below. + properties: + customCname: + description: Name inserted into the certificate CRL Distribution + Points extension that enables the use of an alias for + the CRL distribution point. Use this value if you don't + want the name of your S3 bucket to be public. Must be + less than or equal to 253 characters in length. + type: string + enabled: + description: Boolean value that specifies whether certificate + revocation lists (CRLs) are enabled. Defaults to false. + type: boolean + expirationInDays: + description: Number of days until a certificate expires. + Must be between 1 and 5000. + type: number + s3BucketName: + description: Name of the S3 bucket that contains the CRL. + If you do not provide a value for the custom_cname argument, + the name of your S3 bucket is placed into the CRL Distribution + Points extension of the issued certificate. You must + specify a bucket policy that allows ACM PCA to write + the CRL to your bucket. Must be between 3 and 255 characters + in length. + type: string + s3ObjectAcl: + description: Determines whether the CRL will be publicly + readable or privately held in the CRL Amazon S3 bucket. + Defaults to PUBLIC_READ. + type: string + type: object + ocspConfiguration: + description: |- + Nested argument containing configuration of + the custom OCSP responder endpoint. Defined below. + properties: + enabled: + description: Boolean value that specifies whether a custom + OCSP responder is enabled. + type: boolean + ocspCustomCname: + description: 'CNAME specifying a customized OCSP domain. + Note: The value of the CNAME must not include a protocol + prefix such as "http://" or "https://".' + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: 'Type of the certificate authority. Defaults to SUBORDINATE. + Valid values: ROOT and SUBORDINATE.' + type: string + usageMode: + description: 'Specifies whether the CA issues general-purpose + certificates that typically require a revocation mechanism, + or short-lived certificates that may optionally omit revocation + because they expire quickly. Short-lived certificate validity + is limited to seven days. Defaults to GENERAL_PURPOSE. Valid + values: GENERAL_PURPOSE and SHORT_LIVED_CERTIFICATE.' + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + certificateAuthorityConfiguration: + description: Nested argument containing algorithms and certificate + subject information. Defined below. + properties: + keyAlgorithm: + description: Type of the public key algorithm and size, in + bits, of the key pair that your key pair creates when it + issues a certificate. Valid values can be found in the ACM + PCA Documentation. + type: string + signingAlgorithm: + description: Name of the algorithm your private CA uses to + sign certificate requests. Valid values can be found in + the ACM PCA Documentation. + type: string + subject: + description: Nested argument that contains X.500 distinguished + name information. At least one nested attribute must be + specified. + properties: + commonName: + description: Fully qualified domain name (FQDN) associated + with the certificate subject. Must be less than or equal + to 64 characters in length. + type: string + country: + description: Two digit code that specifies the country + in which the certificate subject located. Must be less + than or equal to 2 characters in length. + type: string + distinguishedNameQualifier: + description: Disambiguating information for the certificate + subject. Must be less than or equal to 64 characters + in length. + type: string + generationQualifier: + description: Typically a qualifier appended to the name + of an individual. Examples include Jr. for junior, Sr. + for senior, and III for third. Must be less than or + equal to 3 characters in length. + type: string + givenName: + description: First name. Must be less than or equal to + 16 characters in length. + type: string + initials: + description: Concatenation that typically contains the + first letter of the given_name, the first letter of + the middle name if one exists, and the first letter + of the surname. Must be less than or equal to 5 characters + in length. + type: string + locality: + description: Locality (such as a city or town) in which + the certificate subject is located. Must be less than + or equal to 128 characters in length. + type: string + organization: + description: Legal name of the organization with which + the certificate subject is affiliated. Must be less + than or equal to 64 characters in length. + type: string + organizationalUnit: + description: Subdivision or unit of the organization (such + as sales or finance) with which the certificate subject + is affiliated. Must be less than or equal to 64 characters + in length. + type: string + pseudonym: + description: Typically a shortened version of a longer + given_name. For example, Jonathan is often shortened + to John. Elizabeth is often shortened to Beth, Liz, + or Eliza. Must be less than or equal to 128 characters + in length. + type: string + state: + description: State in which the subject of the certificate + is located. Must be less than or equal to 128 characters + in length. + type: string + surname: + description: Family name. In the US and the UK for example, + the surname of an individual is ordered last. In Asian + cultures the surname is typically ordered first. Must + be less than or equal to 40 characters in length. + type: string + title: + description: Title such as Mr. or Ms. which is pre-pended + to the name to refer formally to the certificate subject. + Must be less than or equal to 64 characters in length. + type: string + type: object + type: object + enabled: + description: Whether the certificate authority is enabled or disabled. + Defaults to true. Can only be disabled if the CA is in an ACTIVE + state. + type: boolean + keyStorageSecurityStandard: + description: 'Cryptographic key management compliance standard + used for handling CA keys. Defaults to FIPS_140_2_LEVEL_3_OR_HIGHER. + Valid values: FIPS_140_2_LEVEL_3_OR_HIGHER and FIPS_140_2_LEVEL_2_OR_HIGHER. + Supported standard for each region can be found in the Storage + and security compliance of AWS Private CA private keys Documentation.' + type: string + permanentDeletionTimeInDays: + description: Number of days to make a CA restorable after it has + been deleted, must be between 7 to 30 days, with default to + 30 days. + type: number + revocationConfiguration: + description: Nested argument containing revocation configuration. + Defined below. + properties: + crlConfiguration: + description: Nested argument containing configuration of the + certificate revocation list (CRL), if any, maintained by + the certificate authority. Defined below. + properties: + customCname: + description: Name inserted into the certificate CRL Distribution + Points extension that enables the use of an alias for + the CRL distribution point. Use this value if you don't + want the name of your S3 bucket to be public. Must be + less than or equal to 253 characters in length. + type: string + enabled: + description: Boolean value that specifies whether certificate + revocation lists (CRLs) are enabled. Defaults to false. + type: boolean + expirationInDays: + description: Number of days until a certificate expires. + Must be between 1 and 5000. + type: number + s3BucketName: + description: Name of the S3 bucket that contains the CRL. + If you do not provide a value for the custom_cname argument, + the name of your S3 bucket is placed into the CRL Distribution + Points extension of the issued certificate. You must + specify a bucket policy that allows ACM PCA to write + the CRL to your bucket. Must be between 3 and 255 characters + in length. + type: string + s3ObjectAcl: + description: Determines whether the CRL will be publicly + readable or privately held in the CRL Amazon S3 bucket. + Defaults to PUBLIC_READ. + type: string + type: object + ocspConfiguration: + description: |- + Nested argument containing configuration of + the custom OCSP responder endpoint. Defined below. + properties: + enabled: + description: Boolean value that specifies whether a custom + OCSP responder is enabled. + type: boolean + ocspCustomCname: + description: 'CNAME specifying a customized OCSP domain. + Note: The value of the CNAME must not include a protocol + prefix such as "http://" or "https://".' + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: 'Type of the certificate authority. Defaults to SUBORDINATE. + Valid values: ROOT and SUBORDINATE.' + type: string + usageMode: + description: 'Specifies whether the CA issues general-purpose + certificates that typically require a revocation mechanism, + or short-lived certificates that may optionally omit revocation + because they expire quickly. Short-lived certificate validity + is limited to seven days. Defaults to GENERAL_PURPOSE. Valid + values: GENERAL_PURPOSE and SHORT_LIVED_CERTIFICATE.' + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.certificateAuthorityConfiguration is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.certificateAuthorityConfiguration) + || (has(self.initProvider) && has(self.initProvider.certificateAuthorityConfiguration))' + status: + description: CertificateAuthorityStatus defines the observed state of + CertificateAuthority. + properties: + atProvider: + properties: + arn: + description: ARN of the certificate authority. + type: string + certificate: + description: Base64-encoded certificate authority (CA) certificate. + Only available after the certificate authority certificate has + been imported. + type: string + certificateAuthorityConfiguration: + description: Nested argument containing algorithms and certificate + subject information. Defined below. + properties: + keyAlgorithm: + description: Type of the public key algorithm and size, in + bits, of the key pair that your key pair creates when it + issues a certificate. Valid values can be found in the ACM + PCA Documentation. + type: string + signingAlgorithm: + description: Name of the algorithm your private CA uses to + sign certificate requests. Valid values can be found in + the ACM PCA Documentation. + type: string + subject: + description: Nested argument that contains X.500 distinguished + name information. At least one nested attribute must be + specified. + properties: + commonName: + description: Fully qualified domain name (FQDN) associated + with the certificate subject. Must be less than or equal + to 64 characters in length. + type: string + country: + description: Two digit code that specifies the country + in which the certificate subject located. Must be less + than or equal to 2 characters in length. + type: string + distinguishedNameQualifier: + description: Disambiguating information for the certificate + subject. Must be less than or equal to 64 characters + in length. + type: string + generationQualifier: + description: Typically a qualifier appended to the name + of an individual. Examples include Jr. for junior, Sr. + for senior, and III for third. Must be less than or + equal to 3 characters in length. + type: string + givenName: + description: First name. Must be less than or equal to + 16 characters in length. + type: string + initials: + description: Concatenation that typically contains the + first letter of the given_name, the first letter of + the middle name if one exists, and the first letter + of the surname. Must be less than or equal to 5 characters + in length. + type: string + locality: + description: Locality (such as a city or town) in which + the certificate subject is located. Must be less than + or equal to 128 characters in length. + type: string + organization: + description: Legal name of the organization with which + the certificate subject is affiliated. Must be less + than or equal to 64 characters in length. + type: string + organizationalUnit: + description: Subdivision or unit of the organization (such + as sales or finance) with which the certificate subject + is affiliated. Must be less than or equal to 64 characters + in length. + type: string + pseudonym: + description: Typically a shortened version of a longer + given_name. For example, Jonathan is often shortened + to John. Elizabeth is often shortened to Beth, Liz, + or Eliza. Must be less than or equal to 128 characters + in length. + type: string + state: + description: State in which the subject of the certificate + is located. Must be less than or equal to 128 characters + in length. + type: string + surname: + description: Family name. In the US and the UK for example, + the surname of an individual is ordered last. In Asian + cultures the surname is typically ordered first. Must + be less than or equal to 40 characters in length. + type: string + title: + description: Title such as Mr. or Ms. which is pre-pended + to the name to refer formally to the certificate subject. + Must be less than or equal to 64 characters in length. + type: string + type: object + type: object + certificateChain: + description: Base64-encoded certificate chain that includes any + intermediate certificates and chains up to root on-premises + certificate that you used to sign your private CA certificate. + The chain does not include your private CA certificate. Only + available after the certificate authority certificate has been + imported. + type: string + certificateSigningRequest: + description: The base64 PEM-encoded certificate signing request + (CSR) for your private CA certificate. + type: string + enabled: + description: Whether the certificate authority is enabled or disabled. + Defaults to true. Can only be disabled if the CA is in an ACTIVE + state. + type: boolean + id: + description: ARN of the certificate authority. + type: string + keyStorageSecurityStandard: + description: 'Cryptographic key management compliance standard + used for handling CA keys. Defaults to FIPS_140_2_LEVEL_3_OR_HIGHER. + Valid values: FIPS_140_2_LEVEL_3_OR_HIGHER and FIPS_140_2_LEVEL_2_OR_HIGHER. + Supported standard for each region can be found in the Storage + and security compliance of AWS Private CA private keys Documentation.' + type: string + notAfter: + description: Date and time after which the certificate authority + is not valid. Only available after the certificate authority + certificate has been imported. + type: string + notBefore: + description: Date and time before which the certificate authority + is not valid. Only available after the certificate authority + certificate has been imported. + type: string + permanentDeletionTimeInDays: + description: Number of days to make a CA restorable after it has + been deleted, must be between 7 to 30 days, with default to + 30 days. + type: number + revocationConfiguration: + description: Nested argument containing revocation configuration. + Defined below. + properties: + crlConfiguration: + description: Nested argument containing configuration of the + certificate revocation list (CRL), if any, maintained by + the certificate authority. Defined below. + properties: + customCname: + description: Name inserted into the certificate CRL Distribution + Points extension that enables the use of an alias for + the CRL distribution point. Use this value if you don't + want the name of your S3 bucket to be public. Must be + less than or equal to 253 characters in length. + type: string + enabled: + description: Boolean value that specifies whether certificate + revocation lists (CRLs) are enabled. Defaults to false. + type: boolean + expirationInDays: + description: Number of days until a certificate expires. + Must be between 1 and 5000. + type: number + s3BucketName: + description: Name of the S3 bucket that contains the CRL. + If you do not provide a value for the custom_cname argument, + the name of your S3 bucket is placed into the CRL Distribution + Points extension of the issued certificate. You must + specify a bucket policy that allows ACM PCA to write + the CRL to your bucket. Must be between 3 and 255 characters + in length. + type: string + s3ObjectAcl: + description: Determines whether the CRL will be publicly + readable or privately held in the CRL Amazon S3 bucket. + Defaults to PUBLIC_READ. + type: string + type: object + ocspConfiguration: + description: |- + Nested argument containing configuration of + the custom OCSP responder endpoint. Defined below. + properties: + enabled: + description: Boolean value that specifies whether a custom + OCSP responder is enabled. + type: boolean + ocspCustomCname: + description: 'CNAME specifying a customized OCSP domain. + Note: The value of the CNAME must not include a protocol + prefix such as "http://" or "https://".' + type: string + type: object + type: object + serial: + description: Serial number of the certificate authority. Only + available after the certificate authority certificate has been + imported. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: + description: 'Type of the certificate authority. Defaults to SUBORDINATE. + Valid values: ROOT and SUBORDINATE.' + type: string + usageMode: + description: 'Specifies whether the CA issues general-purpose + certificates that typically require a revocation mechanism, + or short-lived certificates that may optionally omit revocation + because they expire quickly. Short-lived certificate validity + is limited to seven days. Defaults to GENERAL_PURPOSE. Valid + values: GENERAL_PURPOSE and SHORT_LIVED_CERTIFICATE.' + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/acmpca.aws.upbound.io_certificates.yaml b/package/crds/acmpca.aws.upbound.io_certificates.yaml index 28cc725599..b031d89508 100644 --- a/package/crds/acmpca.aws.upbound.io_certificates.yaml +++ b/package/crds/acmpca.aws.upbound.io_certificates.yaml @@ -644,3 +644,623 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Certificate is the Schema for the Certificates API. Provides + a resource to issue a certificate using AWS Certificate Manager Private + Certificate Authority (ACM PCA) + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CertificateSpec defines the desired state of Certificate + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apiPassthrough: + description: Specifies X.509 certificate information to be included + in the issued certificate. To use with API Passthrough templates + type: string + certificateAuthorityArn: + description: ARN of the certificate authority. + type: string + certificateAuthorityArnRef: + description: Reference to a CertificateAuthority in acmpca to + populate certificateAuthorityArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + certificateAuthorityArnSelector: + description: Selector for a CertificateAuthority in acmpca to + populate certificateAuthorityArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + certificateSigningRequestSecretRef: + description: Certificate Signing Request in PEM format. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + signingAlgorithm: + description: 'Algorithm to use to sign certificate requests. Valid + values: SHA256WITHRSA, SHA256WITHECDSA, SHA384WITHRSA, SHA384WITHECDSA, + SHA512WITHRSA, SHA512WITHECDSA.' + type: string + templateArn: + description: |- + Template to use when issuing a certificate. + See ACM PCA Documentation for more information. + type: string + validity: + description: Configures end of the validity period for the certificate. + See validity block below. + properties: + type: + description: 'Determines how value is interpreted. Valid values: + DAYS, MONTHS, YEARS, ABSOLUTE, END_DATE.' + type: string + value: + description: If type is DAYS, MONTHS, or YEARS, the relative + time until the certificate expires. If type is ABSOLUTE, + the date in seconds since the Unix epoch. If type is END_DATE, + the date in RFC 3339 format. + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + apiPassthrough: + description: Specifies X.509 certificate information to be included + in the issued certificate. To use with API Passthrough templates + type: string + certificateAuthorityArn: + description: ARN of the certificate authority. + type: string + certificateAuthorityArnRef: + description: Reference to a CertificateAuthority in acmpca to + populate certificateAuthorityArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + certificateAuthorityArnSelector: + description: Selector for a CertificateAuthority in acmpca to + populate certificateAuthorityArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + certificateSigningRequestSecretRef: + description: Certificate Signing Request in PEM format. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + signingAlgorithm: + description: 'Algorithm to use to sign certificate requests. Valid + values: SHA256WITHRSA, SHA256WITHECDSA, SHA384WITHRSA, SHA384WITHECDSA, + SHA512WITHRSA, SHA512WITHECDSA.' + type: string + templateArn: + description: |- + Template to use when issuing a certificate. + See ACM PCA Documentation for more information. + type: string + validity: + description: Configures end of the validity period for the certificate. + See validity block below. + properties: + type: + description: 'Determines how value is interpreted. Valid values: + DAYS, MONTHS, YEARS, ABSOLUTE, END_DATE.' + type: string + value: + description: If type is DAYS, MONTHS, or YEARS, the relative + time until the certificate expires. If type is ABSOLUTE, + the date in seconds since the Unix epoch. If type is END_DATE, + the date in RFC 3339 format. + type: string + type: object + required: + - certificateSigningRequestSecretRef + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.certificateSigningRequestSecretRef is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.certificateSigningRequestSecretRef)' + - message: spec.forProvider.signingAlgorithm is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.signingAlgorithm) + || (has(self.initProvider) && has(self.initProvider.signingAlgorithm))' + - message: spec.forProvider.validity is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.validity) + || (has(self.initProvider) && has(self.initProvider.validity))' + status: + description: CertificateStatus defines the observed state of Certificate. + properties: + atProvider: + properties: + apiPassthrough: + description: Specifies X.509 certificate information to be included + in the issued certificate. To use with API Passthrough templates + type: string + arn: + description: ARN of the certificate. + type: string + certificate: + description: PEM-encoded certificate value. + type: string + certificateAuthorityArn: + description: ARN of the certificate authority. + type: string + certificateChain: + description: PEM-encoded certificate chain that includes any intermediate + certificates and chains up to root CA. + type: string + id: + type: string + signingAlgorithm: + description: 'Algorithm to use to sign certificate requests. Valid + values: SHA256WITHRSA, SHA256WITHECDSA, SHA384WITHRSA, SHA384WITHECDSA, + SHA512WITHRSA, SHA512WITHECDSA.' + type: string + templateArn: + description: |- + Template to use when issuing a certificate. + See ACM PCA Documentation for more information. + type: string + validity: + description: Configures end of the validity period for the certificate. + See validity block below. + properties: + type: + description: 'Determines how value is interpreted. Valid values: + DAYS, MONTHS, YEARS, ABSOLUTE, END_DATE.' + type: string + value: + description: If type is DAYS, MONTHS, or YEARS, the relative + time until the certificate expires. If type is ABSOLUTE, + the date in seconds since the Unix epoch. If type is END_DATE, + the date in RFC 3339 format. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/amp.aws.upbound.io_workspaces.yaml b/package/crds/amp.aws.upbound.io_workspaces.yaml index da7719d29e..9e2549adb8 100644 --- a/package/crds/amp.aws.upbound.io_workspaces.yaml +++ b/package/crds/amp.aws.upbound.io_workspaces.yaml @@ -574,3 +574,553 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Workspace is the Schema for the Workspaces API. Manages an Amazon + Managed Service for Prometheus (AMP) Workspace + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WorkspaceSpec defines the desired state of Workspace + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + alias: + description: The alias of the prometheus workspace. See more in + AWS Docs. + type: string + kmsKeyArn: + description: The ARN for the KMS encryption key. If this argument + is not provided, then the AWS owned encryption key will be used + to encrypt the data in the workspace. See more in AWS Docs + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + loggingConfiguration: + description: Logging configuration for the workspace. See Logging + Configuration below for details. + properties: + logGroupArn: + description: The ARN of the CloudWatch log group to which + the vended log data will be published. This log group must + exist. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + alias: + description: The alias of the prometheus workspace. See more in + AWS Docs. + type: string + kmsKeyArn: + description: The ARN for the KMS encryption key. If this argument + is not provided, then the AWS owned encryption key will be used + to encrypt the data in the workspace. See more in AWS Docs + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + loggingConfiguration: + description: Logging configuration for the workspace. See Logging + Configuration below for details. + properties: + logGroupArn: + description: The ARN of the CloudWatch log group to which + the vended log data will be published. This log group must + exist. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: WorkspaceStatus defines the observed state of Workspace. + properties: + atProvider: + properties: + alias: + description: The alias of the prometheus workspace. See more in + AWS Docs. + type: string + arn: + description: Amazon Resource Name (ARN) of the workspace. + type: string + id: + description: Identifier of the workspace + type: string + kmsKeyArn: + description: The ARN for the KMS encryption key. If this argument + is not provided, then the AWS owned encryption key will be used + to encrypt the data in the workspace. See more in AWS Docs + type: string + loggingConfiguration: + description: Logging configuration for the workspace. See Logging + Configuration below for details. + properties: + logGroupArn: + description: The ARN of the CloudWatch log group to which + the vended log data will be published. This log group must + exist. + type: string + type: object + prometheusEndpoint: + description: Prometheus endpoint available for this workspace. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/amplify.aws.upbound.io_apps.yaml b/package/crds/amplify.aws.upbound.io_apps.yaml index 75c1097d13..8196063ca9 100644 --- a/package/crds/amplify.aws.upbound.io_apps.yaml +++ b/package/crds/amplify.aws.upbound.io_apps.yaml @@ -1040,3 +1040,1019 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: App is the Schema for the Apps API. Provides an Amplify App resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AppSpec defines the desired state of App + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessTokenSecretRef: + description: Personal access token for a third-party source control + system for an Amplify app. This token must have write access + to the relevant repo to create a webhook and a read-only deploy + key for the Amplify project. The token is not stored, so after + applying this attribute can be removed and the setup token deleted. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + autoBranchCreationConfig: + description: Automated branch creation configuration for an Amplify + app. An auto_branch_creation_config block is documented below. + properties: + basicAuthCredentialsSecretRef: + description: Basic authorization credentials for the autocreated + branch. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + buildSpec: + description: Build specification (build spec) for the autocreated + branch. + type: string + enableAutoBuild: + description: Enables auto building for the autocreated branch. + type: boolean + enableBasicAuth: + description: Enables basic authorization for the autocreated + branch. + type: boolean + enablePerformanceMode: + description: Enables performance mode for the branch. + type: boolean + enablePullRequestPreview: + description: Enables pull request previews for the autocreated + branch. + type: boolean + environmentVariables: + additionalProperties: + type: string + description: Environment variables for the autocreated branch. + type: object + x-kubernetes-map-type: granular + framework: + description: Framework for the autocreated branch. + type: string + pullRequestEnvironmentName: + description: Amplify environment name for the pull request. + type: string + stage: + description: 'Describes the current stage for the autocreated + branch. Valid values: PRODUCTION, BETA, DEVELOPMENT, EXPERIMENTAL, + PULL_REQUEST.' + type: string + type: object + autoBranchCreationPatterns: + description: Automated branch creation glob patterns for an Amplify + app. + items: + type: string + type: array + x-kubernetes-list-type: set + basicAuthCredentialsSecretRef: + description: Credentials for basic authorization for an Amplify + app. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + buildSpec: + description: The build specification (build spec) for an Amplify + app. + type: string + customHeaders: + description: The custom HTTP headers for an Amplify app. + type: string + customRule: + description: Custom rewrite and redirect rules for an Amplify + app. A custom_rule block is documented below. + items: + properties: + condition: + description: Condition for a URL rewrite or redirect rule, + such as a country code. + type: string + source: + description: Source pattern for a URL rewrite or redirect + rule. + type: string + status: + description: 'Status code for a URL rewrite or redirect + rule. Valid values: 200, 301, 302, 404, 404-200.' + type: string + target: + description: Target pattern for a URL rewrite or redirect + rule. + type: string + type: object + type: array + description: + description: Description for an Amplify app. + type: string + enableAutoBranchCreation: + description: Enables automated branch creation for an Amplify + app. + type: boolean + enableBasicAuth: + description: Enables basic authorization for an Amplify app. This + will apply to all branches that are part of this app. + type: boolean + enableBranchAutoBuild: + description: Enables auto-building of branches for the Amplify + App. + type: boolean + enableBranchAutoDeletion: + description: Automatically disconnects a branch in the Amplify + Console when you delete a branch from your Git repository. + type: boolean + environmentVariables: + additionalProperties: + type: string + description: Environment variables map for an Amplify app. + type: object + x-kubernetes-map-type: granular + iamServiceRoleArn: + description: AWS Identity and Access Management (IAM) service + role for an Amplify app. + type: string + iamServiceRoleArnRef: + description: Reference to a Role in iam to populate iamServiceRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamServiceRoleArnSelector: + description: Selector for a Role in iam to populate iamServiceRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Name for an Amplify app. + type: string + oauthTokenSecretRef: + description: OAuth token for a third-party source control system + for an Amplify app. The OAuth token is used to create a webhook + and a read-only deploy key. The OAuth token is not stored. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + platform: + description: 'Platform or framework for an Amplify app. Valid + values: WEB, WEB_COMPUTE. Default value: WEB.' + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + repository: + description: Repository for an Amplify app. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessTokenSecretRef: + description: Personal access token for a third-party source control + system for an Amplify app. This token must have write access + to the relevant repo to create a webhook and a read-only deploy + key for the Amplify project. The token is not stored, so after + applying this attribute can be removed and the setup token deleted. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + autoBranchCreationConfig: + description: Automated branch creation configuration for an Amplify + app. An auto_branch_creation_config block is documented below. + properties: + basicAuthCredentialsSecretRef: + description: Basic authorization credentials for the autocreated + branch. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + buildSpec: + description: Build specification (build spec) for the autocreated + branch. + type: string + enableAutoBuild: + description: Enables auto building for the autocreated branch. + type: boolean + enableBasicAuth: + description: Enables basic authorization for the autocreated + branch. + type: boolean + enablePerformanceMode: + description: Enables performance mode for the branch. + type: boolean + enablePullRequestPreview: + description: Enables pull request previews for the autocreated + branch. + type: boolean + environmentVariables: + additionalProperties: + type: string + description: Environment variables for the autocreated branch. + type: object + x-kubernetes-map-type: granular + framework: + description: Framework for the autocreated branch. + type: string + pullRequestEnvironmentName: + description: Amplify environment name for the pull request. + type: string + stage: + description: 'Describes the current stage for the autocreated + branch. Valid values: PRODUCTION, BETA, DEVELOPMENT, EXPERIMENTAL, + PULL_REQUEST.' + type: string + type: object + autoBranchCreationPatterns: + description: Automated branch creation glob patterns for an Amplify + app. + items: + type: string + type: array + x-kubernetes-list-type: set + basicAuthCredentialsSecretRef: + description: Credentials for basic authorization for an Amplify + app. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + buildSpec: + description: The build specification (build spec) for an Amplify + app. + type: string + customHeaders: + description: The custom HTTP headers for an Amplify app. + type: string + customRule: + description: Custom rewrite and redirect rules for an Amplify + app. A custom_rule block is documented below. + items: + properties: + condition: + description: Condition for a URL rewrite or redirect rule, + such as a country code. + type: string + source: + description: Source pattern for a URL rewrite or redirect + rule. + type: string + status: + description: 'Status code for a URL rewrite or redirect + rule. Valid values: 200, 301, 302, 404, 404-200.' + type: string + target: + description: Target pattern for a URL rewrite or redirect + rule. + type: string + type: object + type: array + description: + description: Description for an Amplify app. + type: string + enableAutoBranchCreation: + description: Enables automated branch creation for an Amplify + app. + type: boolean + enableBasicAuth: + description: Enables basic authorization for an Amplify app. This + will apply to all branches that are part of this app. + type: boolean + enableBranchAutoBuild: + description: Enables auto-building of branches for the Amplify + App. + type: boolean + enableBranchAutoDeletion: + description: Automatically disconnects a branch in the Amplify + Console when you delete a branch from your Git repository. + type: boolean + environmentVariables: + additionalProperties: + type: string + description: Environment variables map for an Amplify app. + type: object + x-kubernetes-map-type: granular + iamServiceRoleArn: + description: AWS Identity and Access Management (IAM) service + role for an Amplify app. + type: string + iamServiceRoleArnRef: + description: Reference to a Role in iam to populate iamServiceRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamServiceRoleArnSelector: + description: Selector for a Role in iam to populate iamServiceRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Name for an Amplify app. + type: string + oauthTokenSecretRef: + description: OAuth token for a third-party source control system + for an Amplify app. The OAuth token is used to create a webhook + and a read-only deploy key. The OAuth token is not stored. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + platform: + description: 'Platform or framework for an Amplify app. Valid + values: WEB, WEB_COMPUTE. Default value: WEB.' + type: string + repository: + description: Repository for an Amplify app. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: AppStatus defines the observed state of App. + properties: + atProvider: + properties: + arn: + description: ARN of the Amplify app. + type: string + autoBranchCreationConfig: + description: Automated branch creation configuration for an Amplify + app. An auto_branch_creation_config block is documented below. + properties: + buildSpec: + description: Build specification (build spec) for the autocreated + branch. + type: string + enableAutoBuild: + description: Enables auto building for the autocreated branch. + type: boolean + enableBasicAuth: + description: Enables basic authorization for the autocreated + branch. + type: boolean + enablePerformanceMode: + description: Enables performance mode for the branch. + type: boolean + enablePullRequestPreview: + description: Enables pull request previews for the autocreated + branch. + type: boolean + environmentVariables: + additionalProperties: + type: string + description: Environment variables for the autocreated branch. + type: object + x-kubernetes-map-type: granular + framework: + description: Framework for the autocreated branch. + type: string + pullRequestEnvironmentName: + description: Amplify environment name for the pull request. + type: string + stage: + description: 'Describes the current stage for the autocreated + branch. Valid values: PRODUCTION, BETA, DEVELOPMENT, EXPERIMENTAL, + PULL_REQUEST.' + type: string + type: object + autoBranchCreationPatterns: + description: Automated branch creation glob patterns for an Amplify + app. + items: + type: string + type: array + x-kubernetes-list-type: set + buildSpec: + description: The build specification (build spec) for an Amplify + app. + type: string + customHeaders: + description: The custom HTTP headers for an Amplify app. + type: string + customRule: + description: Custom rewrite and redirect rules for an Amplify + app. A custom_rule block is documented below. + items: + properties: + condition: + description: Condition for a URL rewrite or redirect rule, + such as a country code. + type: string + source: + description: Source pattern for a URL rewrite or redirect + rule. + type: string + status: + description: 'Status code for a URL rewrite or redirect + rule. Valid values: 200, 301, 302, 404, 404-200.' + type: string + target: + description: Target pattern for a URL rewrite or redirect + rule. + type: string + type: object + type: array + defaultDomain: + description: Default domain for the Amplify app. + type: string + description: + description: Description for an Amplify app. + type: string + enableAutoBranchCreation: + description: Enables automated branch creation for an Amplify + app. + type: boolean + enableBasicAuth: + description: Enables basic authorization for an Amplify app. This + will apply to all branches that are part of this app. + type: boolean + enableBranchAutoBuild: + description: Enables auto-building of branches for the Amplify + App. + type: boolean + enableBranchAutoDeletion: + description: Automatically disconnects a branch in the Amplify + Console when you delete a branch from your Git repository. + type: boolean + environmentVariables: + additionalProperties: + type: string + description: Environment variables map for an Amplify app. + type: object + x-kubernetes-map-type: granular + iamServiceRoleArn: + description: AWS Identity and Access Management (IAM) service + role for an Amplify app. + type: string + id: + description: Unique ID of the Amplify app. + type: string + name: + description: Name for an Amplify app. + type: string + platform: + description: 'Platform or framework for an Amplify app. Valid + values: WEB, WEB_COMPUTE. Default value: WEB.' + type: string + productionBranch: + description: Describes the information about a production branch + for an Amplify app. A production_branch block is documented + below. + items: + properties: + branchName: + description: Branch name for the production branch. + type: string + lastDeployTime: + description: Last deploy time of the production branch. + type: string + status: + description: Status of the production branch. + type: string + thumbnailUrl: + description: Thumbnail URL for the production branch. + type: string + type: object + type: array + repository: + description: Repository for an Amplify app. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apigateway.aws.upbound.io_documentationparts.yaml b/package/crds/apigateway.aws.upbound.io_documentationparts.yaml index bead22f421..bcec561760 100644 --- a/package/crds/apigateway.aws.upbound.io_documentationparts.yaml +++ b/package/crds/apigateway.aws.upbound.io_documentationparts.yaml @@ -601,3 +601,580 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DocumentationPart is the Schema for the DocumentationParts API. + Provides a settings of an API Gateway Documentation Part. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DocumentationPartSpec defines the desired state of DocumentationPart + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + location: + description: Location of the targeted API entity of the to-be-created + documentation part. See below. + properties: + method: + description: HTTP verb of a method. The default value is * + for any method. + type: string + name: + description: Name of the targeted API entity. + type: string + path: + description: URL path of the target. The default value is + / for the root resource. + type: string + statusCode: + description: HTTP status code of a response. The default value + is * for any status code. + type: string + type: + description: Type of API entity to which the documentation + content appliesE.g., API, METHOD or REQUEST_BODY + type: string + type: object + properties: + description: 'Content map of API-specific key-value pairs describing + the targeted API entity. The map must be encoded as a JSON string, + e.g., "{ "description": "The API does ..." }". Only Swagger-compliant + key-value pairs can be exported and, hence, published.' + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + restApiId: + description: ID of the associated Rest API + type: string + restApiIdRef: + description: Reference to a RestAPI in apigateway to populate + restApiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + restApiIdSelector: + description: Selector for a RestAPI in apigateway to populate + restApiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + location: + description: Location of the targeted API entity of the to-be-created + documentation part. See below. + properties: + method: + description: HTTP verb of a method. The default value is * + for any method. + type: string + name: + description: Name of the targeted API entity. + type: string + path: + description: URL path of the target. The default value is + / for the root resource. + type: string + statusCode: + description: HTTP status code of a response. The default value + is * for any status code. + type: string + type: + description: Type of API entity to which the documentation + content appliesE.g., API, METHOD or REQUEST_BODY + type: string + type: object + properties: + description: 'Content map of API-specific key-value pairs describing + the targeted API entity. The map must be encoded as a JSON string, + e.g., "{ "description": "The API does ..." }". Only Swagger-compliant + key-value pairs can be exported and, hence, published.' + type: string + restApiId: + description: ID of the associated Rest API + type: string + restApiIdRef: + description: Reference to a RestAPI in apigateway to populate + restApiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + restApiIdSelector: + description: Selector for a RestAPI in apigateway to populate + restApiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.properties is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.properties) + || (has(self.initProvider) && has(self.initProvider.properties))' + status: + description: DocumentationPartStatus defines the observed state of DocumentationPart. + properties: + atProvider: + properties: + documentationPartId: + description: The DocumentationPart identifier, generated by API + Gateway when the documentation part is created. + type: string + id: + description: Unique ID of the Documentation Part + type: string + location: + description: Location of the targeted API entity of the to-be-created + documentation part. See below. + properties: + method: + description: HTTP verb of a method. The default value is * + for any method. + type: string + name: + description: Name of the targeted API entity. + type: string + path: + description: URL path of the target. The default value is + / for the root resource. + type: string + statusCode: + description: HTTP status code of a response. The default value + is * for any status code. + type: string + type: + description: Type of API entity to which the documentation + content appliesE.g., API, METHOD or REQUEST_BODY + type: string + type: object + properties: + description: 'Content map of API-specific key-value pairs describing + the targeted API entity. The map must be encoded as a JSON string, + e.g., "{ "description": "The API does ..." }". Only Swagger-compliant + key-value pairs can be exported and, hence, published.' + type: string + restApiId: + description: ID of the associated Rest API + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apigateway.aws.upbound.io_domainnames.yaml b/package/crds/apigateway.aws.upbound.io_domainnames.yaml index 519a4f2060..c89403b805 100644 --- a/package/crds/apigateway.aws.upbound.io_domainnames.yaml +++ b/package/crds/apigateway.aws.upbound.io_domainnames.yaml @@ -994,3 +994,967 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DomainName is the Schema for the DomainNames API. Registers a + custom domain name for use with AWS API Gateway. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DomainNameSpec defines the desired state of DomainName + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + certificateArn: + description: ARN for an AWS-managed certificate. AWS Certificate + Manager is the only supported source. Used when an edge-optimized + domain name is desired. Conflicts with certificate_name, certificate_body, + certificate_chain, certificate_private_key, regional_certificate_arn, + and regional_certificate_name. + type: string + certificateArnRef: + description: Reference to a CertificateValidation in acm to populate + certificateArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + certificateArnSelector: + description: Selector for a CertificateValidation in acm to populate + certificateArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + certificateBody: + description: Certificate issued for the domain name being registered, + in PEM format. Only valid for EDGE endpoint configuration type. + Conflicts with certificate_arn, regional_certificate_arn, and + regional_certificate_name. + type: string + certificateChain: + description: Certificate for the CA that issued the certificate, + along with any intermediate CA certificates required to create + an unbroken chain to a certificate trusted by the intended API + clients. Only valid for EDGE endpoint configuration type. Conflicts + with certificate_arn, regional_certificate_arn, and regional_certificate_name. + type: string + certificateName: + description: Unique name to use when registering this certificate + as an IAM server certificate. Conflicts with certificate_arn, + regional_certificate_arn, and regional_certificate_name. Required + if certificate_arn is not set. + type: string + certificatePrivateKeySecretRef: + description: Private key associated with the domain certificate + given in certificate_body. Only valid for EDGE endpoint configuration + type. Conflicts with certificate_arn, regional_certificate_arn, + and regional_certificate_name. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + domainName: + description: Fully-qualified domain name to register. + type: string + endpointConfiguration: + description: Configuration block defining API endpoint information + including type. See below. + properties: + types: + description: 'List of endpoint types. This resource currently + only supports managing a single value. Valid values: EDGE + or REGIONAL. If unspecified, defaults to EDGE. Must be declared + as REGIONAL in non-Commercial partitions. Refer to the documentation + for more information on the difference between edge-optimized + and regional APIs.' + items: + type: string + type: array + type: object + mutualTlsAuthentication: + description: Mutual TLS authentication configuration for the domain + name. See below. + properties: + truststoreUri: + description: Amazon S3 URL that specifies the truststore for + mutual TLS authentication, for example, s3://bucket-name/key-name. + The truststore can contain certificates from public or private + certificate authorities. To update the truststore, upload + a new version to S3, and then update your custom domain + name to use the new version. + type: string + truststoreVersion: + description: Version of the S3 object that contains the truststore. + To specify a version, you must have versioning enabled for + the S3 bucket. + type: string + type: object + ownershipVerificationCertificateArn: + description: ARN of the AWS-issued certificate used to validate + custom domain ownership (when certificate_arn is issued via + an ACM Private CA or mutual_tls_authentication is configured + with an ACM-imported certificate.) + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + regionalCertificateArn: + description: ARN for an AWS-managed certificate. AWS Certificate + Manager is the only supported source. Used when a regional domain + name is desired. Conflicts with certificate_arn, certificate_name, + certificate_body, certificate_chain, and certificate_private_key. + type: string + regionalCertificateArnRef: + description: Reference to a CertificateValidation in acm to populate + regionalCertificateArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + regionalCertificateArnSelector: + description: Selector for a CertificateValidation in acm to populate + regionalCertificateArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + regionalCertificateName: + description: User-friendly name of the certificate that will be + used by regional endpoint for this domain name. Conflicts with + certificate_arn, certificate_name, certificate_body, certificate_chain, + and certificate_private_key. + type: string + securityPolicy: + description: Transport Layer Security (TLS) version + cipher suite + for this DomainName. Valid values are TLS_1_0 and TLS_1_2. Must + be configured to perform drift detection. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + certificateArn: + description: ARN for an AWS-managed certificate. AWS Certificate + Manager is the only supported source. Used when an edge-optimized + domain name is desired. Conflicts with certificate_name, certificate_body, + certificate_chain, certificate_private_key, regional_certificate_arn, + and regional_certificate_name. + type: string + certificateArnRef: + description: Reference to a CertificateValidation in acm to populate + certificateArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + certificateArnSelector: + description: Selector for a CertificateValidation in acm to populate + certificateArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + certificateBody: + description: Certificate issued for the domain name being registered, + in PEM format. Only valid for EDGE endpoint configuration type. + Conflicts with certificate_arn, regional_certificate_arn, and + regional_certificate_name. + type: string + certificateChain: + description: Certificate for the CA that issued the certificate, + along with any intermediate CA certificates required to create + an unbroken chain to a certificate trusted by the intended API + clients. Only valid for EDGE endpoint configuration type. Conflicts + with certificate_arn, regional_certificate_arn, and regional_certificate_name. + type: string + certificateName: + description: Unique name to use when registering this certificate + as an IAM server certificate. Conflicts with certificate_arn, + regional_certificate_arn, and regional_certificate_name. Required + if certificate_arn is not set. + type: string + certificatePrivateKeySecretRef: + description: Private key associated with the domain certificate + given in certificate_body. Only valid for EDGE endpoint configuration + type. Conflicts with certificate_arn, regional_certificate_arn, + and regional_certificate_name. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + domainName: + description: Fully-qualified domain name to register. + type: string + endpointConfiguration: + description: Configuration block defining API endpoint information + including type. See below. + properties: + types: + description: 'List of endpoint types. This resource currently + only supports managing a single value. Valid values: EDGE + or REGIONAL. If unspecified, defaults to EDGE. Must be declared + as REGIONAL in non-Commercial partitions. Refer to the documentation + for more information on the difference between edge-optimized + and regional APIs.' + items: + type: string + type: array + type: object + mutualTlsAuthentication: + description: Mutual TLS authentication configuration for the domain + name. See below. + properties: + truststoreUri: + description: Amazon S3 URL that specifies the truststore for + mutual TLS authentication, for example, s3://bucket-name/key-name. + The truststore can contain certificates from public or private + certificate authorities. To update the truststore, upload + a new version to S3, and then update your custom domain + name to use the new version. + type: string + truststoreVersion: + description: Version of the S3 object that contains the truststore. + To specify a version, you must have versioning enabled for + the S3 bucket. + type: string + type: object + ownershipVerificationCertificateArn: + description: ARN of the AWS-issued certificate used to validate + custom domain ownership (when certificate_arn is issued via + an ACM Private CA or mutual_tls_authentication is configured + with an ACM-imported certificate.) + type: string + regionalCertificateArn: + description: ARN for an AWS-managed certificate. AWS Certificate + Manager is the only supported source. Used when a regional domain + name is desired. Conflicts with certificate_arn, certificate_name, + certificate_body, certificate_chain, and certificate_private_key. + type: string + regionalCertificateArnRef: + description: Reference to a CertificateValidation in acm to populate + regionalCertificateArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + regionalCertificateArnSelector: + description: Selector for a CertificateValidation in acm to populate + regionalCertificateArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + regionalCertificateName: + description: User-friendly name of the certificate that will be + used by regional endpoint for this domain name. Conflicts with + certificate_arn, certificate_name, certificate_body, certificate_chain, + and certificate_private_key. + type: string + securityPolicy: + description: Transport Layer Security (TLS) version + cipher suite + for this DomainName. Valid values are TLS_1_0 and TLS_1_2. Must + be configured to perform drift detection. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.domainName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.domainName) + || (has(self.initProvider) && has(self.initProvider.domainName))' + status: + description: DomainNameStatus defines the observed state of DomainName. + properties: + atProvider: + properties: + arn: + description: ARN of domain name. + type: string + certificateArn: + description: ARN for an AWS-managed certificate. AWS Certificate + Manager is the only supported source. Used when an edge-optimized + domain name is desired. Conflicts with certificate_name, certificate_body, + certificate_chain, certificate_private_key, regional_certificate_arn, + and regional_certificate_name. + type: string + certificateBody: + description: Certificate issued for the domain name being registered, + in PEM format. Only valid for EDGE endpoint configuration type. + Conflicts with certificate_arn, regional_certificate_arn, and + regional_certificate_name. + type: string + certificateChain: + description: Certificate for the CA that issued the certificate, + along with any intermediate CA certificates required to create + an unbroken chain to a certificate trusted by the intended API + clients. Only valid for EDGE endpoint configuration type. Conflicts + with certificate_arn, regional_certificate_arn, and regional_certificate_name. + type: string + certificateName: + description: Unique name to use when registering this certificate + as an IAM server certificate. Conflicts with certificate_arn, + regional_certificate_arn, and regional_certificate_name. Required + if certificate_arn is not set. + type: string + certificateUploadDate: + description: Upload date associated with the domain certificate. + type: string + cloudfrontDomainName: + description: Hostname created by Cloudfront to represent the distribution + that implements this domain name mapping. + type: string + cloudfrontZoneId: + description: For convenience, the hosted zone ID (Z2FDTNDATAQYW2) + that can be used to create a Route53 alias record for the distribution. + type: string + domainName: + description: Fully-qualified domain name to register. + type: string + endpointConfiguration: + description: Configuration block defining API endpoint information + including type. See below. + properties: + types: + description: 'List of endpoint types. This resource currently + only supports managing a single value. Valid values: EDGE + or REGIONAL. If unspecified, defaults to EDGE. Must be declared + as REGIONAL in non-Commercial partitions. Refer to the documentation + for more information on the difference between edge-optimized + and regional APIs.' + items: + type: string + type: array + type: object + id: + description: Internal identifier assigned to this domain name + by API Gateway. + type: string + mutualTlsAuthentication: + description: Mutual TLS authentication configuration for the domain + name. See below. + properties: + truststoreUri: + description: Amazon S3 URL that specifies the truststore for + mutual TLS authentication, for example, s3://bucket-name/key-name. + The truststore can contain certificates from public or private + certificate authorities. To update the truststore, upload + a new version to S3, and then update your custom domain + name to use the new version. + type: string + truststoreVersion: + description: Version of the S3 object that contains the truststore. + To specify a version, you must have versioning enabled for + the S3 bucket. + type: string + type: object + ownershipVerificationCertificateArn: + description: ARN of the AWS-issued certificate used to validate + custom domain ownership (when certificate_arn is issued via + an ACM Private CA or mutual_tls_authentication is configured + with an ACM-imported certificate.) + type: string + regionalCertificateArn: + description: ARN for an AWS-managed certificate. AWS Certificate + Manager is the only supported source. Used when a regional domain + name is desired. Conflicts with certificate_arn, certificate_name, + certificate_body, certificate_chain, and certificate_private_key. + type: string + regionalCertificateName: + description: User-friendly name of the certificate that will be + used by regional endpoint for this domain name. Conflicts with + certificate_arn, certificate_name, certificate_body, certificate_chain, + and certificate_private_key. + type: string + regionalDomainName: + description: Hostname for the custom domain's regional endpoint. + type: string + regionalZoneId: + description: Hosted zone ID that can be used to create a Route53 + alias record for the regional endpoint. + type: string + securityPolicy: + description: Transport Layer Security (TLS) version + cipher suite + for this DomainName. Valid values are TLS_1_0 and TLS_1_2. Must + be configured to perform drift detection. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apigateway.aws.upbound.io_integrations.yaml b/package/crds/apigateway.aws.upbound.io_integrations.yaml index 0b84040df5..55b2d9408c 100644 --- a/package/crds/apigateway.aws.upbound.io_integrations.yaml +++ b/package/crds/apigateway.aws.upbound.io_integrations.yaml @@ -1414,3 +1414,1390 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Integration is the Schema for the Integrations API. Provides + an HTTP Method Integration for an API Gateway Integration. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IntegrationSpec defines the desired state of Integration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cacheKeyParameters: + description: List of cache key parameters for the integration. + items: + type: string + type: array + x-kubernetes-list-type: set + cacheNamespace: + description: Integration's cache namespace. + type: string + connectionId: + description: ID of the VpcLink used for the integration. Required + if connection_type is VPC_LINK + type: string + connectionIdRef: + description: Reference to a VPCLink in apigateway to populate + connectionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + connectionIdSelector: + description: Selector for a VPCLink in apigateway to populate + connectionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + connectionType: + description: Integration input's connectionType. Valid values + are INTERNET (default for connections through the public routable + internet), and VPC_LINK (for private connections between API + Gateway and a network load balancer in a VPC). + type: string + contentHandling: + description: How to handle request payload content type conversions. + Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT. + If this property is not defined, the request payload will be + passed through from the method request to integration request + without modification, provided that the passthroughBehaviors + is configured to support payload pass-through. + type: string + credentials: + description: Credentials required for the integration. For AWS + integrations, 2 options are available. To specify an IAM Role + for Amazon API Gateway to assume, use the role's ARN. To require + that the caller's identity be passed through from the request, + specify the string arn:aws:iam::\*:user/\*. + type: string + httpMethod: + description: |- + HTTP method (GET, POST, PUT, DELETE, HEAD, OPTION, ANY) + when calling the associated resource. + type: string + httpMethodRef: + description: Reference to a Method in apigateway to populate httpMethod. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + httpMethodSelector: + description: Selector for a Method in apigateway to populate httpMethod. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + integrationHttpMethod: + description: |- + Integration HTTP method + (GET, POST, PUT, DELETE, HEAD, OPTIONs, ANY, PATCH) specifying how API Gateway will interact with the back end. + Required if type is AWS, AWS_PROXY, HTTP or HTTP_PROXY. + Not all methods are compatible with all AWS integrations. + e.g., Lambda function can only be invoked via POST. + type: string + passthroughBehavior: + description: Integration passthrough behavior (WHEN_NO_MATCH, + WHEN_NO_TEMPLATES, NEVER). Required if request_templates is + used. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + requestParameters: + additionalProperties: + type: string + description: |- + Map of request query string parameters and headers that should be passed to the backend responder. + For example: request_parameters = { "integration.request.header.X-Some-Other-Header" = "method.request.header.X-Some-Header" } + type: object + x-kubernetes-map-type: granular + requestTemplates: + additionalProperties: + type: string + description: Map of the integration's request templates. + type: object + x-kubernetes-map-type: granular + resourceId: + description: API resource ID. + type: string + resourceIdRef: + description: Reference to a Resource in apigateway to populate + resourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceIdSelector: + description: Selector for a Resource in apigateway to populate + resourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + restApiId: + description: ID of the associated REST API. + type: string + restApiIdRef: + description: Reference to a RestAPI in apigateway to populate + restApiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + restApiIdSelector: + description: Selector for a RestAPI in apigateway to populate + restApiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + timeoutMilliseconds: + description: Custom timeout between 50 and 29,000 milliseconds. + The default value is 29,000 milliseconds. + type: number + tlsConfig: + description: TLS configuration. See below. + properties: + insecureSkipVerification: + description: Whether or not API Gateway skips verification + that the certificate for an integration endpoint is issued + by a supported certificate authority. This isn’t recommended, + but it enables you to use certificates that are signed by + private certificate authorities, or certificates that are + self-signed. If enabled, API Gateway still performs basic + certificate validation, which includes checking the certificate's + expiration date, hostname, and presence of a root certificate + authority. Supported only for HTTP and HTTP_PROXY integrations. + type: boolean + type: object + type: + description: Integration input's type. Valid values are HTTP (for + HTTP backends), MOCK (not calling any real backend), AWS (for + AWS services), AWS_PROXY (for Lambda proxy integration) and + HTTP_PROXY (for HTTP proxy integration). An HTTP or HTTP_PROXY + integration with a connection_type of VPC_LINK is referred to + as a private integration and uses a VpcLink to connect API Gateway + to a network load balancer of a VPC. + type: string + uri: + description: |- + Input's URI. Required if type is AWS, AWS_PROXY, HTTP or HTTP_PROXY. + For HTTP integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification . For AWS integrations, the URI should be of the form arn:aws:apigateway:{region}:{subdomain.service|service}:{path|action}/{service_api}. region, subdomain and service are used to determine the right endpoint. + e.g., arn:aws:apigateway:eu-west-1:lambda:path/2015-03-31/functions/arn:aws:lambda:eu-west-1:012345678901:function:my-func/invocations. For private integrations, the URI parameter is not used for routing requests to your endpoint, but is used for setting the Host header and for certificate validation. + type: string + uriRef: + description: Reference to a Function in lambda to populate uri. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + uriSelector: + description: Selector for a Function in lambda to populate uri. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + cacheKeyParameters: + description: List of cache key parameters for the integration. + items: + type: string + type: array + x-kubernetes-list-type: set + cacheNamespace: + description: Integration's cache namespace. + type: string + connectionId: + description: ID of the VpcLink used for the integration. Required + if connection_type is VPC_LINK + type: string + connectionIdRef: + description: Reference to a VPCLink in apigateway to populate + connectionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + connectionIdSelector: + description: Selector for a VPCLink in apigateway to populate + connectionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + connectionType: + description: Integration input's connectionType. Valid values + are INTERNET (default for connections through the public routable + internet), and VPC_LINK (for private connections between API + Gateway and a network load balancer in a VPC). + type: string + contentHandling: + description: How to handle request payload content type conversions. + Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT. + If this property is not defined, the request payload will be + passed through from the method request to integration request + without modification, provided that the passthroughBehaviors + is configured to support payload pass-through. + type: string + credentials: + description: Credentials required for the integration. For AWS + integrations, 2 options are available. To specify an IAM Role + for Amazon API Gateway to assume, use the role's ARN. To require + that the caller's identity be passed through from the request, + specify the string arn:aws:iam::\*:user/\*. + type: string + httpMethod: + description: |- + HTTP method (GET, POST, PUT, DELETE, HEAD, OPTION, ANY) + when calling the associated resource. + type: string + httpMethodRef: + description: Reference to a Method in apigateway to populate httpMethod. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + httpMethodSelector: + description: Selector for a Method in apigateway to populate httpMethod. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + integrationHttpMethod: + description: |- + Integration HTTP method + (GET, POST, PUT, DELETE, HEAD, OPTIONs, ANY, PATCH) specifying how API Gateway will interact with the back end. + Required if type is AWS, AWS_PROXY, HTTP or HTTP_PROXY. + Not all methods are compatible with all AWS integrations. + e.g., Lambda function can only be invoked via POST. + type: string + passthroughBehavior: + description: Integration passthrough behavior (WHEN_NO_MATCH, + WHEN_NO_TEMPLATES, NEVER). Required if request_templates is + used. + type: string + requestParameters: + additionalProperties: + type: string + description: |- + Map of request query string parameters and headers that should be passed to the backend responder. + For example: request_parameters = { "integration.request.header.X-Some-Other-Header" = "method.request.header.X-Some-Header" } + type: object + x-kubernetes-map-type: granular + requestTemplates: + additionalProperties: + type: string + description: Map of the integration's request templates. + type: object + x-kubernetes-map-type: granular + resourceId: + description: API resource ID. + type: string + resourceIdRef: + description: Reference to a Resource in apigateway to populate + resourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceIdSelector: + description: Selector for a Resource in apigateway to populate + resourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + restApiId: + description: ID of the associated REST API. + type: string + restApiIdRef: + description: Reference to a RestAPI in apigateway to populate + restApiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + restApiIdSelector: + description: Selector for a RestAPI in apigateway to populate + restApiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + timeoutMilliseconds: + description: Custom timeout between 50 and 29,000 milliseconds. + The default value is 29,000 milliseconds. + type: number + tlsConfig: + description: TLS configuration. See below. + properties: + insecureSkipVerification: + description: Whether or not API Gateway skips verification + that the certificate for an integration endpoint is issued + by a supported certificate authority. This isn’t recommended, + but it enables you to use certificates that are signed by + private certificate authorities, or certificates that are + self-signed. If enabled, API Gateway still performs basic + certificate validation, which includes checking the certificate's + expiration date, hostname, and presence of a root certificate + authority. Supported only for HTTP and HTTP_PROXY integrations. + type: boolean + type: object + type: + description: Integration input's type. Valid values are HTTP (for + HTTP backends), MOCK (not calling any real backend), AWS (for + AWS services), AWS_PROXY (for Lambda proxy integration) and + HTTP_PROXY (for HTTP proxy integration). An HTTP or HTTP_PROXY + integration with a connection_type of VPC_LINK is referred to + as a private integration and uses a VpcLink to connect API Gateway + to a network load balancer of a VPC. + type: string + uri: + description: |- + Input's URI. Required if type is AWS, AWS_PROXY, HTTP or HTTP_PROXY. + For HTTP integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification . For AWS integrations, the URI should be of the form arn:aws:apigateway:{region}:{subdomain.service|service}:{path|action}/{service_api}. region, subdomain and service are used to determine the right endpoint. + e.g., arn:aws:apigateway:eu-west-1:lambda:path/2015-03-31/functions/arn:aws:lambda:eu-west-1:012345678901:function:my-func/invocations. For private integrations, the URI parameter is not used for routing requests to your endpoint, but is used for setting the Host header and for certificate validation. + type: string + uriRef: + description: Reference to a Function in lambda to populate uri. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + uriSelector: + description: Selector for a Function in lambda to populate uri. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + status: + description: IntegrationStatus defines the observed state of Integration. + properties: + atProvider: + properties: + cacheKeyParameters: + description: List of cache key parameters for the integration. + items: + type: string + type: array + x-kubernetes-list-type: set + cacheNamespace: + description: Integration's cache namespace. + type: string + connectionId: + description: ID of the VpcLink used for the integration. Required + if connection_type is VPC_LINK + type: string + connectionType: + description: Integration input's connectionType. Valid values + are INTERNET (default for connections through the public routable + internet), and VPC_LINK (for private connections between API + Gateway and a network load balancer in a VPC). + type: string + contentHandling: + description: How to handle request payload content type conversions. + Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT. + If this property is not defined, the request payload will be + passed through from the method request to integration request + without modification, provided that the passthroughBehaviors + is configured to support payload pass-through. + type: string + credentials: + description: Credentials required for the integration. For AWS + integrations, 2 options are available. To specify an IAM Role + for Amazon API Gateway to assume, use the role's ARN. To require + that the caller's identity be passed through from the request, + specify the string arn:aws:iam::\*:user/\*. + type: string + httpMethod: + description: |- + HTTP method (GET, POST, PUT, DELETE, HEAD, OPTION, ANY) + when calling the associated resource. + type: string + id: + type: string + integrationHttpMethod: + description: |- + Integration HTTP method + (GET, POST, PUT, DELETE, HEAD, OPTIONs, ANY, PATCH) specifying how API Gateway will interact with the back end. + Required if type is AWS, AWS_PROXY, HTTP or HTTP_PROXY. + Not all methods are compatible with all AWS integrations. + e.g., Lambda function can only be invoked via POST. + type: string + passthroughBehavior: + description: Integration passthrough behavior (WHEN_NO_MATCH, + WHEN_NO_TEMPLATES, NEVER). Required if request_templates is + used. + type: string + requestParameters: + additionalProperties: + type: string + description: |- + Map of request query string parameters and headers that should be passed to the backend responder. + For example: request_parameters = { "integration.request.header.X-Some-Other-Header" = "method.request.header.X-Some-Header" } + type: object + x-kubernetes-map-type: granular + requestTemplates: + additionalProperties: + type: string + description: Map of the integration's request templates. + type: object + x-kubernetes-map-type: granular + resourceId: + description: API resource ID. + type: string + restApiId: + description: ID of the associated REST API. + type: string + timeoutMilliseconds: + description: Custom timeout between 50 and 29,000 milliseconds. + The default value is 29,000 milliseconds. + type: number + tlsConfig: + description: TLS configuration. See below. + properties: + insecureSkipVerification: + description: Whether or not API Gateway skips verification + that the certificate for an integration endpoint is issued + by a supported certificate authority. This isn’t recommended, + but it enables you to use certificates that are signed by + private certificate authorities, or certificates that are + self-signed. If enabled, API Gateway still performs basic + certificate validation, which includes checking the certificate's + expiration date, hostname, and presence of a root certificate + authority. Supported only for HTTP and HTTP_PROXY integrations. + type: boolean + type: object + type: + description: Integration input's type. Valid values are HTTP (for + HTTP backends), MOCK (not calling any real backend), AWS (for + AWS services), AWS_PROXY (for Lambda proxy integration) and + HTTP_PROXY (for HTTP proxy integration). An HTTP or HTTP_PROXY + integration with a connection_type of VPC_LINK is referred to + as a private integration and uses a VpcLink to connect API Gateway + to a network load balancer of a VPC. + type: string + uri: + description: |- + Input's URI. Required if type is AWS, AWS_PROXY, HTTP or HTTP_PROXY. + For HTTP integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification . For AWS integrations, the URI should be of the form arn:aws:apigateway:{region}:{subdomain.service|service}:{path|action}/{service_api}. region, subdomain and service are used to determine the right endpoint. + e.g., arn:aws:apigateway:eu-west-1:lambda:path/2015-03-31/functions/arn:aws:lambda:eu-west-1:012345678901:function:my-func/invocations. For private integrations, the URI parameter is not used for routing requests to your endpoint, but is used for setting the Host header and for certificate validation. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apigateway.aws.upbound.io_methodsettings.yaml b/package/crds/apigateway.aws.upbound.io_methodsettings.yaml index 65eab737f8..a6f58adb42 100644 --- a/package/crds/apigateway.aws.upbound.io_methodsettings.yaml +++ b/package/crds/apigateway.aws.upbound.io_methodsettings.yaml @@ -828,3 +828,804 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MethodSettings is the Schema for the MethodSettingss API. Manages + API Gateway Stage Method Settings + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MethodSettingsSpec defines the desired state of MethodSettings + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + methodPath: + description: Method path defined as {resource_path}/{http_method} + for an individual method override, or */* for overriding all + methods in the stage. Ensure to trim any leading forward slashes + in the path (e.g., trimprefix(aws_api_gateway_resource.example.path, + "/")). + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + restApiId: + description: ID of the REST API + type: string + restApiIdRef: + description: Reference to a RestAPI in apigateway to populate + restApiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + restApiIdSelector: + description: Selector for a RestAPI in apigateway to populate + restApiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + settings: + description: Settings block, see below. + properties: + cacheDataEncrypted: + description: Whether the cached responses are encrypted. + type: boolean + cacheTtlInSeconds: + description: Time to live (TTL), in seconds, for cached responses. + The higher the TTL, the longer the response will be cached. + type: number + cachingEnabled: + description: Whether responses should be cached and returned + for requests. A cache cluster must be enabled on the stage + for responses to be cached. + type: boolean + dataTraceEnabled: + description: Whether data trace logging is enabled for this + method, which effects the log entries pushed to Amazon CloudWatch + Logs. + type: boolean + loggingLevel: + description: Logging level for this method, which effects + the log entries pushed to Amazon CloudWatch Logs. The available + levels are OFF, ERROR, and INFO. + type: string + metricsEnabled: + description: Whether Amazon CloudWatch metrics are enabled + for this method. + type: boolean + requireAuthorizationForCacheControl: + description: Whether authorization is required for a cache + invalidation request. + type: boolean + throttlingBurstLimit: + description: 'Throttling burst limit. Default: -1 (throttling + disabled).' + type: number + throttlingRateLimit: + description: 'Throttling rate limit. Default: -1 (throttling + disabled).' + type: number + unauthorizedCacheControlHeaderStrategy: + description: How to handle unauthorized requests for cache + invalidation. The available values are FAIL_WITH_403, SUCCEED_WITH_RESPONSE_HEADER, + SUCCEED_WITHOUT_RESPONSE_HEADER. + type: string + type: object + stageName: + description: Name of the stage + type: string + stageNameRef: + description: Reference to a Stage in apigateway to populate stageName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stageNameSelector: + description: Selector for a Stage in apigateway to populate stageName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + methodPath: + description: Method path defined as {resource_path}/{http_method} + for an individual method override, or */* for overriding all + methods in the stage. Ensure to trim any leading forward slashes + in the path (e.g., trimprefix(aws_api_gateway_resource.example.path, + "/")). + type: string + restApiId: + description: ID of the REST API + type: string + restApiIdRef: + description: Reference to a RestAPI in apigateway to populate + restApiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + restApiIdSelector: + description: Selector for a RestAPI in apigateway to populate + restApiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + settings: + description: Settings block, see below. + properties: + cacheDataEncrypted: + description: Whether the cached responses are encrypted. + type: boolean + cacheTtlInSeconds: + description: Time to live (TTL), in seconds, for cached responses. + The higher the TTL, the longer the response will be cached. + type: number + cachingEnabled: + description: Whether responses should be cached and returned + for requests. A cache cluster must be enabled on the stage + for responses to be cached. + type: boolean + dataTraceEnabled: + description: Whether data trace logging is enabled for this + method, which effects the log entries pushed to Amazon CloudWatch + Logs. + type: boolean + loggingLevel: + description: Logging level for this method, which effects + the log entries pushed to Amazon CloudWatch Logs. The available + levels are OFF, ERROR, and INFO. + type: string + metricsEnabled: + description: Whether Amazon CloudWatch metrics are enabled + for this method. + type: boolean + requireAuthorizationForCacheControl: + description: Whether authorization is required for a cache + invalidation request. + type: boolean + throttlingBurstLimit: + description: 'Throttling burst limit. Default: -1 (throttling + disabled).' + type: number + throttlingRateLimit: + description: 'Throttling rate limit. Default: -1 (throttling + disabled).' + type: number + unauthorizedCacheControlHeaderStrategy: + description: How to handle unauthorized requests for cache + invalidation. The available values are FAIL_WITH_403, SUCCEED_WITH_RESPONSE_HEADER, + SUCCEED_WITHOUT_RESPONSE_HEADER. + type: string + type: object + stageName: + description: Name of the stage + type: string + stageNameRef: + description: Reference to a Stage in apigateway to populate stageName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stageNameSelector: + description: Selector for a Stage in apigateway to populate stageName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.methodPath is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.methodPath) + || (has(self.initProvider) && has(self.initProvider.methodPath))' + - message: spec.forProvider.settings is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.settings) + || (has(self.initProvider) && has(self.initProvider.settings))' + status: + description: MethodSettingsStatus defines the observed state of MethodSettings. + properties: + atProvider: + properties: + id: + type: string + methodPath: + description: Method path defined as {resource_path}/{http_method} + for an individual method override, or */* for overriding all + methods in the stage. Ensure to trim any leading forward slashes + in the path (e.g., trimprefix(aws_api_gateway_resource.example.path, + "/")). + type: string + restApiId: + description: ID of the REST API + type: string + settings: + description: Settings block, see below. + properties: + cacheDataEncrypted: + description: Whether the cached responses are encrypted. + type: boolean + cacheTtlInSeconds: + description: Time to live (TTL), in seconds, for cached responses. + The higher the TTL, the longer the response will be cached. + type: number + cachingEnabled: + description: Whether responses should be cached and returned + for requests. A cache cluster must be enabled on the stage + for responses to be cached. + type: boolean + dataTraceEnabled: + description: Whether data trace logging is enabled for this + method, which effects the log entries pushed to Amazon CloudWatch + Logs. + type: boolean + loggingLevel: + description: Logging level for this method, which effects + the log entries pushed to Amazon CloudWatch Logs. The available + levels are OFF, ERROR, and INFO. + type: string + metricsEnabled: + description: Whether Amazon CloudWatch metrics are enabled + for this method. + type: boolean + requireAuthorizationForCacheControl: + description: Whether authorization is required for a cache + invalidation request. + type: boolean + throttlingBurstLimit: + description: 'Throttling burst limit. Default: -1 (throttling + disabled).' + type: number + throttlingRateLimit: + description: 'Throttling rate limit. Default: -1 (throttling + disabled).' + type: number + unauthorizedCacheControlHeaderStrategy: + description: How to handle unauthorized requests for cache + invalidation. The available values are FAIL_WITH_403, SUCCEED_WITH_RESPONSE_HEADER, + SUCCEED_WITHOUT_RESPONSE_HEADER. + type: string + type: object + stageName: + description: Name of the stage + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apigateway.aws.upbound.io_restapis.yaml b/package/crds/apigateway.aws.upbound.io_restapis.yaml index 670ca6ae32..f8dd5da972 100644 --- a/package/crds/apigateway.aws.upbound.io_restapis.yaml +++ b/package/crds/apigateway.aws.upbound.io_restapis.yaml @@ -751,3 +751,724 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: RestAPI is the Schema for the RestAPIs API. Manages an API Gateway + REST API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RestAPISpec defines the desired state of RestAPI + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apiKeySource: + description: Source of the API key for requests. Valid values + are HEADER (default) and AUTHORIZER. If importing an OpenAPI + specification via the body argument, this corresponds to the + x-amazon-apigateway-api-key-source extension. If the argument + value is provided and is different than the OpenAPI value, the + argument value will override the OpenAPI value. + type: string + binaryMediaTypes: + description: List of binary media types supported by the REST + API. By default, the REST API supports only UTF-8-encoded text + payloads. If importing an OpenAPI specification via the body + argument, this corresponds to the x-amazon-apigateway-binary-media-types + extension. If the argument value is provided and is different + than the OpenAPI value, the argument value will override the + OpenAPI value. + items: + type: string + type: array + body: + description: OpenAPI specification that defines the set of routes + and integrations to create as part of the REST API. This configuration, + and any updates to it, will replace all REST API configuration + except values overridden in this resource configuration and + other resource updates applied after this resource but before + any aws_api_gateway_deployment creation. More information about + REST API OpenAPI support can be found in the API Gateway Developer + Guide. + type: string + description: + description: Description of the REST API. If importing an OpenAPI + specification via the body argument, this corresponds to the + info.description field. If the argument value is provided and + is different than the OpenAPI value, the argument value will + override the OpenAPI value. + type: string + disableExecuteApiEndpoint: + description: Whether clients can invoke your API by using the + default execute-api endpoint. By default, clients can invoke + your API with the default https://{api_id}.execute-api.{region}.amazonaws.com + endpoint. To require that clients use a custom domain name to + invoke your API, disable the default endpoint. Defaults to false. + If importing an OpenAPI specification via the body argument, + this corresponds to the x-amazon-apigateway-endpoint-configuration + extension disableExecuteApiEndpoint property. If the argument + value is true and is different than the OpenAPI value, the argument + value will override the OpenAPI value. + type: boolean + endpointConfiguration: + description: Configuration block defining API endpoint configuration + including endpoint type. Defined below. + properties: + types: + description: 'List of endpoint types. This resource currently + only supports managing a single value. Valid values: EDGE, + REGIONAL or PRIVATE. If unspecified, defaults to EDGE. If + set to PRIVATE recommend to set put_rest_api_mode = merge + to not cause the endpoints and associated Route53 records + to be deleted. Refer to the documentation for more information + on the difference between edge-optimized and regional APIs.' + items: + type: string + type: array + vpcEndpointIds: + description: Set of VPC Endpoint identifiers. It is only supported + for PRIVATE endpoint type. If importing an OpenAPI specification + via the body argument, this corresponds to the x-amazon-apigateway-endpoint-configuration + extension vpcEndpointIds property. If the argument value + is provided and is different than the OpenAPI value, the + argument value will override the OpenAPI value. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + failOnWarnings: + description: Whether warnings while API Gateway is creating or + updating the resource should return an error or not. Defaults + to false + type: boolean + minimumCompressionSize: + description: Minimum response size to compress for the REST API. + String containing an integer value between -1 and 10485760 (10MB). + -1 will disable an existing compression configuration, and all + other values will enable compression with the configured size. + New resources can simply omit this argument to disable compression, + rather than setting the value to -1. If importing an OpenAPI + specification via the body argument, this corresponds to the + x-amazon-apigateway-minimum-compression-size extension. If the + argument value is provided and is different than the OpenAPI + value, the argument value will override the OpenAPI value. + type: string + name: + description: Name of the REST API. If importing an OpenAPI specification + via the body argument, this corresponds to the info.title field. + If the argument value is different than the OpenAPI value, the + argument value will override the OpenAPI value. + type: string + parameters: + additionalProperties: + type: string + description: Map of customizations for importing the specification + in the body argument. For example, to exclude DocumentationParts + from an imported API, set ignore equal to documentation. Additional + documentation, including other parameters such as basepath, + can be found in the API Gateway Developer Guide. + type: object + x-kubernetes-map-type: granular + putRestApiMode: + description: Mode of the PutRestApi operation when importing an + OpenAPI specification via the body argument (create or update + operation). Valid values are merge and overwrite. If unspecificed, + defaults to overwrite (for backwards compatibility). This corresponds + to the x-amazon-apigateway-put-integration-method extension. + If the argument value is provided and is different than the + OpenAPI value, the argument value will override the OpenAPI + value. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + apiKeySource: + description: Source of the API key for requests. Valid values + are HEADER (default) and AUTHORIZER. If importing an OpenAPI + specification via the body argument, this corresponds to the + x-amazon-apigateway-api-key-source extension. If the argument + value is provided and is different than the OpenAPI value, the + argument value will override the OpenAPI value. + type: string + binaryMediaTypes: + description: List of binary media types supported by the REST + API. By default, the REST API supports only UTF-8-encoded text + payloads. If importing an OpenAPI specification via the body + argument, this corresponds to the x-amazon-apigateway-binary-media-types + extension. If the argument value is provided and is different + than the OpenAPI value, the argument value will override the + OpenAPI value. + items: + type: string + type: array + body: + description: OpenAPI specification that defines the set of routes + and integrations to create as part of the REST API. This configuration, + and any updates to it, will replace all REST API configuration + except values overridden in this resource configuration and + other resource updates applied after this resource but before + any aws_api_gateway_deployment creation. More information about + REST API OpenAPI support can be found in the API Gateway Developer + Guide. + type: string + description: + description: Description of the REST API. If importing an OpenAPI + specification via the body argument, this corresponds to the + info.description field. If the argument value is provided and + is different than the OpenAPI value, the argument value will + override the OpenAPI value. + type: string + disableExecuteApiEndpoint: + description: Whether clients can invoke your API by using the + default execute-api endpoint. By default, clients can invoke + your API with the default https://{api_id}.execute-api.{region}.amazonaws.com + endpoint. To require that clients use a custom domain name to + invoke your API, disable the default endpoint. Defaults to false. + If importing an OpenAPI specification via the body argument, + this corresponds to the x-amazon-apigateway-endpoint-configuration + extension disableExecuteApiEndpoint property. If the argument + value is true and is different than the OpenAPI value, the argument + value will override the OpenAPI value. + type: boolean + endpointConfiguration: + description: Configuration block defining API endpoint configuration + including endpoint type. Defined below. + properties: + types: + description: 'List of endpoint types. This resource currently + only supports managing a single value. Valid values: EDGE, + REGIONAL or PRIVATE. If unspecified, defaults to EDGE. If + set to PRIVATE recommend to set put_rest_api_mode = merge + to not cause the endpoints and associated Route53 records + to be deleted. Refer to the documentation for more information + on the difference between edge-optimized and regional APIs.' + items: + type: string + type: array + vpcEndpointIds: + description: Set of VPC Endpoint identifiers. It is only supported + for PRIVATE endpoint type. If importing an OpenAPI specification + via the body argument, this corresponds to the x-amazon-apigateway-endpoint-configuration + extension vpcEndpointIds property. If the argument value + is provided and is different than the OpenAPI value, the + argument value will override the OpenAPI value. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + failOnWarnings: + description: Whether warnings while API Gateway is creating or + updating the resource should return an error or not. Defaults + to false + type: boolean + minimumCompressionSize: + description: Minimum response size to compress for the REST API. + String containing an integer value between -1 and 10485760 (10MB). + -1 will disable an existing compression configuration, and all + other values will enable compression with the configured size. + New resources can simply omit this argument to disable compression, + rather than setting the value to -1. If importing an OpenAPI + specification via the body argument, this corresponds to the + x-amazon-apigateway-minimum-compression-size extension. If the + argument value is provided and is different than the OpenAPI + value, the argument value will override the OpenAPI value. + type: string + name: + description: Name of the REST API. If importing an OpenAPI specification + via the body argument, this corresponds to the info.title field. + If the argument value is different than the OpenAPI value, the + argument value will override the OpenAPI value. + type: string + parameters: + additionalProperties: + type: string + description: Map of customizations for importing the specification + in the body argument. For example, to exclude DocumentationParts + from an imported API, set ignore equal to documentation. Additional + documentation, including other parameters such as basepath, + can be found in the API Gateway Developer Guide. + type: object + x-kubernetes-map-type: granular + putRestApiMode: + description: Mode of the PutRestApi operation when importing an + OpenAPI specification via the body argument (create or update + operation). Valid values are merge and overwrite. If unspecificed, + defaults to overwrite (for backwards compatibility). This corresponds + to the x-amazon-apigateway-put-integration-method extension. + If the argument value is provided and is different than the + OpenAPI value, the argument value will override the OpenAPI + value. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: RestAPIStatus defines the observed state of RestAPI. + properties: + atProvider: + properties: + apiKeySource: + description: Source of the API key for requests. Valid values + are HEADER (default) and AUTHORIZER. If importing an OpenAPI + specification via the body argument, this corresponds to the + x-amazon-apigateway-api-key-source extension. If the argument + value is provided and is different than the OpenAPI value, the + argument value will override the OpenAPI value. + type: string + arn: + description: ARN + type: string + binaryMediaTypes: + description: List of binary media types supported by the REST + API. By default, the REST API supports only UTF-8-encoded text + payloads. If importing an OpenAPI specification via the body + argument, this corresponds to the x-amazon-apigateway-binary-media-types + extension. If the argument value is provided and is different + than the OpenAPI value, the argument value will override the + OpenAPI value. + items: + type: string + type: array + body: + description: OpenAPI specification that defines the set of routes + and integrations to create as part of the REST API. This configuration, + and any updates to it, will replace all REST API configuration + except values overridden in this resource configuration and + other resource updates applied after this resource but before + any aws_api_gateway_deployment creation. More information about + REST API OpenAPI support can be found in the API Gateway Developer + Guide. + type: string + createdDate: + description: Creation date of the REST API + type: string + description: + description: Description of the REST API. If importing an OpenAPI + specification via the body argument, this corresponds to the + info.description field. If the argument value is provided and + is different than the OpenAPI value, the argument value will + override the OpenAPI value. + type: string + disableExecuteApiEndpoint: + description: Whether clients can invoke your API by using the + default execute-api endpoint. By default, clients can invoke + your API with the default https://{api_id}.execute-api.{region}.amazonaws.com + endpoint. To require that clients use a custom domain name to + invoke your API, disable the default endpoint. Defaults to false. + If importing an OpenAPI specification via the body argument, + this corresponds to the x-amazon-apigateway-endpoint-configuration + extension disableExecuteApiEndpoint property. If the argument + value is true and is different than the OpenAPI value, the argument + value will override the OpenAPI value. + type: boolean + endpointConfiguration: + description: Configuration block defining API endpoint configuration + including endpoint type. Defined below. + properties: + types: + description: 'List of endpoint types. This resource currently + only supports managing a single value. Valid values: EDGE, + REGIONAL or PRIVATE. If unspecified, defaults to EDGE. If + set to PRIVATE recommend to set put_rest_api_mode = merge + to not cause the endpoints and associated Route53 records + to be deleted. Refer to the documentation for more information + on the difference between edge-optimized and regional APIs.' + items: + type: string + type: array + vpcEndpointIds: + description: Set of VPC Endpoint identifiers. It is only supported + for PRIVATE endpoint type. If importing an OpenAPI specification + via the body argument, this corresponds to the x-amazon-apigateway-endpoint-configuration + extension vpcEndpointIds property. If the argument value + is provided and is different than the OpenAPI value, the + argument value will override the OpenAPI value. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + executionArn: + description: |- + Execution ARN part to be used in lambda_permission's source_arn + when allowing API Gateway to invoke a Lambda function, + e.g., arn:aws:execute-api:eu-west-2:123456789012:z4675bid1j, which can be concatenated with allowed stage, method and resource path. + type: string + failOnWarnings: + description: Whether warnings while API Gateway is creating or + updating the resource should return an error or not. Defaults + to false + type: boolean + id: + description: ID of the REST API + type: string + minimumCompressionSize: + description: Minimum response size to compress for the REST API. + String containing an integer value between -1 and 10485760 (10MB). + -1 will disable an existing compression configuration, and all + other values will enable compression with the configured size. + New resources can simply omit this argument to disable compression, + rather than setting the value to -1. If importing an OpenAPI + specification via the body argument, this corresponds to the + x-amazon-apigateway-minimum-compression-size extension. If the + argument value is provided and is different than the OpenAPI + value, the argument value will override the OpenAPI value. + type: string + name: + description: Name of the REST API. If importing an OpenAPI specification + via the body argument, this corresponds to the info.title field. + If the argument value is different than the OpenAPI value, the + argument value will override the OpenAPI value. + type: string + parameters: + additionalProperties: + type: string + description: Map of customizations for importing the specification + in the body argument. For example, to exclude DocumentationParts + from an imported API, set ignore equal to documentation. Additional + documentation, including other parameters such as basepath, + can be found in the API Gateway Developer Guide. + type: object + x-kubernetes-map-type: granular + policy: + description: JSON formatted policy document that controls access + to the API Gateway. We recommend using the aws_api_gateway_rest_api_policy + resource instead. If importing an OpenAPI specification via + the body argument, this corresponds to the x-amazon-apigateway-policy + extension. If the argument value is provided and is different + than the OpenAPI value, the argument value will override the + OpenAPI value. + type: string + putRestApiMode: + description: Mode of the PutRestApi operation when importing an + OpenAPI specification via the body argument (create or update + operation). Valid values are merge and overwrite. If unspecificed, + defaults to overwrite (for backwards compatibility). This corresponds + to the x-amazon-apigateway-put-integration-method extension. + If the argument value is provided and is different than the + OpenAPI value, the argument value will override the OpenAPI + value. + type: string + rootResourceId: + description: Resource ID of the REST API's root + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apigateway.aws.upbound.io_stages.yaml b/package/crds/apigateway.aws.upbound.io_stages.yaml index 9be0500ed3..fa333d6926 100644 --- a/package/crds/apigateway.aws.upbound.io_stages.yaml +++ b/package/crds/apigateway.aws.upbound.io_stages.yaml @@ -913,3 +913,886 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Stage is the Schema for the Stages API. Manages an API Gateway + Stage. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StageSpec defines the desired state of Stage + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessLogSettings: + description: Enables access logs for the API stage. See Access + Log Settings below. + properties: + destinationArn: + description: ARN of the CloudWatch Logs log group or Kinesis + Data Firehose delivery stream to receive access logs. If + you specify a Kinesis Data Firehose delivery stream, the + stream name must begin with amazon-apigateway-. Automatically + removes trailing :* if present. + type: string + format: + description: |- + Formatting and values recorded in the logs. + For more information on configuring the log format rules visit the AWS documentation + type: string + type: object + cacheClusterEnabled: + description: Whether a cache cluster is enabled for the stage + type: boolean + cacheClusterSize: + description: Size of the cache cluster for the stage, if enabled. + Allowed values include 0.5, 1.6, 6.1, 13.5, 28.4, 58.2, 118 + and 237. + type: string + canarySettings: + description: Configuration settings of a canary deployment. See + Canary Settings below. + properties: + percentTraffic: + description: Percent 0.0 - 100.0 of traffic to divert to the + canary deployment. + type: number + stageVariableOverrides: + additionalProperties: + type: string + description: Map of overridden stage variables (including + new variables) for the canary deployment. + type: object + x-kubernetes-map-type: granular + useStageCache: + description: Whether the canary deployment uses the stage + cache. Defaults to false. + type: boolean + type: object + clientCertificateId: + description: Identifier of a client certificate for the stage. + type: string + deploymentId: + description: ID of the deployment that the stage points to + type: string + deploymentIdRef: + description: Reference to a Deployment in apigateway to populate + deploymentId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + deploymentIdSelector: + description: Selector for a Deployment in apigateway to populate + deploymentId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: Description of the stage. + type: string + documentationVersion: + description: Version of the associated API documentation + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + restApiId: + description: ID of the associated REST API + type: string + restApiIdRef: + description: Reference to a RestAPI in apigateway to populate + restApiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + restApiIdSelector: + description: Selector for a RestAPI in apigateway to populate + restApiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + stageName: + description: Name of the stage + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + variables: + additionalProperties: + type: string + description: Map that defines the stage variables + type: object + x-kubernetes-map-type: granular + xrayTracingEnabled: + description: Whether active tracing with X-ray is enabled. Defaults + to false. + type: boolean + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessLogSettings: + description: Enables access logs for the API stage. See Access + Log Settings below. + properties: + destinationArn: + description: ARN of the CloudWatch Logs log group or Kinesis + Data Firehose delivery stream to receive access logs. If + you specify a Kinesis Data Firehose delivery stream, the + stream name must begin with amazon-apigateway-. Automatically + removes trailing :* if present. + type: string + format: + description: |- + Formatting and values recorded in the logs. + For more information on configuring the log format rules visit the AWS documentation + type: string + type: object + cacheClusterEnabled: + description: Whether a cache cluster is enabled for the stage + type: boolean + cacheClusterSize: + description: Size of the cache cluster for the stage, if enabled. + Allowed values include 0.5, 1.6, 6.1, 13.5, 28.4, 58.2, 118 + and 237. + type: string + canarySettings: + description: Configuration settings of a canary deployment. See + Canary Settings below. + properties: + percentTraffic: + description: Percent 0.0 - 100.0 of traffic to divert to the + canary deployment. + type: number + stageVariableOverrides: + additionalProperties: + type: string + description: Map of overridden stage variables (including + new variables) for the canary deployment. + type: object + x-kubernetes-map-type: granular + useStageCache: + description: Whether the canary deployment uses the stage + cache. Defaults to false. + type: boolean + type: object + clientCertificateId: + description: Identifier of a client certificate for the stage. + type: string + deploymentId: + description: ID of the deployment that the stage points to + type: string + deploymentIdRef: + description: Reference to a Deployment in apigateway to populate + deploymentId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + deploymentIdSelector: + description: Selector for a Deployment in apigateway to populate + deploymentId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: Description of the stage. + type: string + documentationVersion: + description: Version of the associated API documentation + type: string + restApiId: + description: ID of the associated REST API + type: string + restApiIdRef: + description: Reference to a RestAPI in apigateway to populate + restApiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + restApiIdSelector: + description: Selector for a RestAPI in apigateway to populate + restApiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + stageName: + description: Name of the stage + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + variables: + additionalProperties: + type: string + description: Map that defines the stage variables + type: object + x-kubernetes-map-type: granular + xrayTracingEnabled: + description: Whether active tracing with X-ray is enabled. Defaults + to false. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.stageName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.stageName) + || (has(self.initProvider) && has(self.initProvider.stageName))' + status: + description: StageStatus defines the observed state of Stage. + properties: + atProvider: + properties: + accessLogSettings: + description: Enables access logs for the API stage. See Access + Log Settings below. + properties: + destinationArn: + description: ARN of the CloudWatch Logs log group or Kinesis + Data Firehose delivery stream to receive access logs. If + you specify a Kinesis Data Firehose delivery stream, the + stream name must begin with amazon-apigateway-. Automatically + removes trailing :* if present. + type: string + format: + description: |- + Formatting and values recorded in the logs. + For more information on configuring the log format rules visit the AWS documentation + type: string + type: object + arn: + description: ARN + type: string + cacheClusterEnabled: + description: Whether a cache cluster is enabled for the stage + type: boolean + cacheClusterSize: + description: Size of the cache cluster for the stage, if enabled. + Allowed values include 0.5, 1.6, 6.1, 13.5, 28.4, 58.2, 118 + and 237. + type: string + canarySettings: + description: Configuration settings of a canary deployment. See + Canary Settings below. + properties: + percentTraffic: + description: Percent 0.0 - 100.0 of traffic to divert to the + canary deployment. + type: number + stageVariableOverrides: + additionalProperties: + type: string + description: Map of overridden stage variables (including + new variables) for the canary deployment. + type: object + x-kubernetes-map-type: granular + useStageCache: + description: Whether the canary deployment uses the stage + cache. Defaults to false. + type: boolean + type: object + clientCertificateId: + description: Identifier of a client certificate for the stage. + type: string + deploymentId: + description: ID of the deployment that the stage points to + type: string + description: + description: Description of the stage. + type: string + documentationVersion: + description: Version of the associated API documentation + type: string + executionArn: + description: |- + Execution ARN to be used in lambda_permission's source_arn + when allowing API Gateway to invoke a Lambda function, + e.g., arn:aws:execute-api:eu-west-2:123456789012:z4675bid1j/prod + type: string + id: + description: ID of the stage + type: string + invokeUrl: + description: |- + URL to invoke the API pointing to the stage, + e.g., https://z4675bid1j.execute-api.eu-west-2.amazonaws.com/prod + type: string + restApiId: + description: ID of the associated REST API + type: string + stageName: + description: Name of the stage + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + variables: + additionalProperties: + type: string + description: Map that defines the stage variables + type: object + x-kubernetes-map-type: granular + webAclArn: + description: ARN of the WebAcl associated with the Stage. + type: string + xrayTracingEnabled: + description: Whether active tracing with X-ray is enabled. Defaults + to false. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apigateway.aws.upbound.io_usageplans.yaml b/package/crds/apigateway.aws.upbound.io_usageplans.yaml index 85ccafc194..5d19035faf 100644 --- a/package/crds/apigateway.aws.upbound.io_usageplans.yaml +++ b/package/crds/apigateway.aws.upbound.io_usageplans.yaml @@ -896,3 +896,869 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: UsagePlan is the Schema for the UsagePlans API. Provides an API + Gateway Usage Plan. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: UsagePlanSpec defines the desired state of UsagePlan + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apiStages: + description: Associated API stages of the usage plan. + items: + properties: + apiId: + description: API Id of the associated API stage in a usage + plan. + type: string + apiIdRef: + description: Reference to a RestAPI in apigateway to populate + apiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiIdSelector: + description: Selector for a RestAPI in apigateway to populate + apiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + stage: + description: API stage name of the associated API stage + in a usage plan. + type: string + stageRef: + description: Reference to a Stage in apigateway to populate + stage. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stageSelector: + description: Selector for a Stage in apigateway to populate + stage. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + throttle: + description: The throttling limits of the usage plan. + items: + properties: + burstLimit: + description: The API request burst limit, the maximum + rate limit over a time ranging from one to a few + seconds, depending upon whether the underlying token + bucket is at its full capacity. + type: number + path: + description: Method to apply the throttle settings + for. Specfiy the path and method, for example /test/GET. + type: string + rateLimit: + description: The API request steady-state rate limit. + type: number + type: object + type: array + type: object + type: array + description: + description: Description of a usage plan. + type: string + name: + description: Name of the usage plan. + type: string + productCode: + description: AWS Marketplace product identifier to associate with + the usage plan as a SaaS product on AWS Marketplace. + type: string + quotaSettings: + description: The quota settings of the usage plan. + properties: + limit: + description: Maximum number of requests that can be made in + a given time period. + type: number + offset: + description: Number of requests subtracted from the given + limit in the initial time period. + type: number + period: + description: Time period in which the limit applies. Valid + values are "DAY", "WEEK" or "MONTH". + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + throttleSettings: + description: The throttling limits of the usage plan. + properties: + burstLimit: + description: The API request burst limit, the maximum rate + limit over a time ranging from one to a few seconds, depending + upon whether the underlying token bucket is at its full + capacity. + type: number + rateLimit: + description: The API request steady-state rate limit. + type: number + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + apiStages: + description: Associated API stages of the usage plan. + items: + properties: + apiId: + description: API Id of the associated API stage in a usage + plan. + type: string + apiIdRef: + description: Reference to a RestAPI in apigateway to populate + apiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiIdSelector: + description: Selector for a RestAPI in apigateway to populate + apiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + stage: + description: API stage name of the associated API stage + in a usage plan. + type: string + stageRef: + description: Reference to a Stage in apigateway to populate + stage. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stageSelector: + description: Selector for a Stage in apigateway to populate + stage. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + throttle: + description: The throttling limits of the usage plan. + items: + properties: + burstLimit: + description: The API request burst limit, the maximum + rate limit over a time ranging from one to a few + seconds, depending upon whether the underlying token + bucket is at its full capacity. + type: number + path: + description: Method to apply the throttle settings + for. Specfiy the path and method, for example /test/GET. + type: string + rateLimit: + description: The API request steady-state rate limit. + type: number + type: object + type: array + type: object + type: array + description: + description: Description of a usage plan. + type: string + name: + description: Name of the usage plan. + type: string + productCode: + description: AWS Marketplace product identifier to associate with + the usage plan as a SaaS product on AWS Marketplace. + type: string + quotaSettings: + description: The quota settings of the usage plan. + properties: + limit: + description: Maximum number of requests that can be made in + a given time period. + type: number + offset: + description: Number of requests subtracted from the given + limit in the initial time period. + type: number + period: + description: Time period in which the limit applies. Valid + values are "DAY", "WEEK" or "MONTH". + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + throttleSettings: + description: The throttling limits of the usage plan. + properties: + burstLimit: + description: The API request burst limit, the maximum rate + limit over a time ranging from one to a few seconds, depending + upon whether the underlying token bucket is at its full + capacity. + type: number + rateLimit: + description: The API request steady-state rate limit. + type: number + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: UsagePlanStatus defines the observed state of UsagePlan. + properties: + atProvider: + properties: + apiStages: + description: Associated API stages of the usage plan. + items: + properties: + apiId: + description: API Id of the associated API stage in a usage + plan. + type: string + stage: + description: API stage name of the associated API stage + in a usage plan. + type: string + throttle: + description: The throttling limits of the usage plan. + items: + properties: + burstLimit: + description: The API request burst limit, the maximum + rate limit over a time ranging from one to a few + seconds, depending upon whether the underlying token + bucket is at its full capacity. + type: number + path: + description: Method to apply the throttle settings + for. Specfiy the path and method, for example /test/GET. + type: string + rateLimit: + description: The API request steady-state rate limit. + type: number + type: object + type: array + type: object + type: array + arn: + description: ARN + type: string + description: + description: Description of a usage plan. + type: string + id: + description: ID of the API resource + type: string + name: + description: Name of the usage plan. + type: string + productCode: + description: AWS Marketplace product identifier to associate with + the usage plan as a SaaS product on AWS Marketplace. + type: string + quotaSettings: + description: The quota settings of the usage plan. + properties: + limit: + description: Maximum number of requests that can be made in + a given time period. + type: number + offset: + description: Number of requests subtracted from the given + limit in the initial time period. + type: number + period: + description: Time period in which the limit applies. Valid + values are "DAY", "WEEK" or "MONTH". + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + throttleSettings: + description: The throttling limits of the usage plan. + properties: + burstLimit: + description: The API request burst limit, the maximum rate + limit over a time ranging from one to a few seconds, depending + upon whether the underlying token bucket is at its full + capacity. + type: number + rateLimit: + description: The API request steady-state rate limit. + type: number + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apigatewayv2.aws.upbound.io_apis.yaml b/package/crds/apigatewayv2.aws.upbound.io_apis.yaml index f04240b464..8092973177 100644 --- a/package/crds/apigatewayv2.aws.upbound.io_apis.yaml +++ b/package/crds/apigatewayv2.aws.upbound.io_apis.yaml @@ -664,3 +664,643 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: API is the Schema for the APIs API. Manages an Amazon API Gateway + Version 2 API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: APISpec defines the desired state of API + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apiKeySelectionExpression: + description: |- + An API key selection expression. + Valid values: $context.authorizer.usageIdentifierKey, $request.header.x-api-key. Defaults to $request.header.x-api-key. + Applicable for WebSocket APIs. + type: string + body: + description: An OpenAPI specification that defines the set of + routes and integrations to create as part of the HTTP APIs. + Supported only for HTTP APIs. + type: string + corsConfiguration: + description: Cross-origin resource sharing (CORS) configuration. + Applicable for HTTP APIs. + properties: + allowCredentials: + description: Whether credentials are included in the CORS + request. + type: boolean + allowHeaders: + description: Set of allowed HTTP headers. + items: + type: string + type: array + x-kubernetes-list-type: set + allowMethods: + description: Set of allowed HTTP methods. + items: + type: string + type: array + x-kubernetes-list-type: set + allowOrigins: + description: Set of allowed origins. + items: + type: string + type: array + x-kubernetes-list-type: set + exposeHeaders: + description: Set of exposed HTTP headers. + items: + type: string + type: array + x-kubernetes-list-type: set + maxAge: + description: Number of seconds that the browser should cache + preflight request results. + type: number + type: object + credentialsArn: + description: Part of quick create. Specifies any credentials required + for the integration. Applicable for HTTP APIs. + type: string + description: + description: Description of the API. Must be less than or equal + to 1024 characters in length. + type: string + disableExecuteApiEndpoint: + description: |- + Whether clients can invoke the API by using the default execute-api endpoint. + By default, clients can invoke the API with the default {api_id}.execute-api.{region}.amazonaws.com endpoint. + To require that clients use a custom domain name to invoke the API, disable the default endpoint. + type: boolean + failOnWarnings: + description: Whether warnings should return an error while API + Gateway is creating or updating the resource using an OpenAPI + specification. Defaults to false. Applicable for HTTP APIs. + type: boolean + name: + description: Name of the API. Must be less than or equal to 128 + characters in length. + type: string + protocolType: + description: 'API protocol. Valid values: HTTP, WEBSOCKET.' + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + routeKey: + description: Part of quick create. Specifies any route key. Applicable + for HTTP APIs. + type: string + routeSelectionExpression: + description: |- + The route selection expression for the API. + Defaults to $request.method $request.path. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + target: + description: |- + Part of quick create. Quick create produces an API with an integration, a default catch-all route, and a default stage which is configured to automatically deploy changes. + For HTTP integrations, specify a fully qualified URL. For Lambda integrations, specify a function ARN. + The type of the integration will be HTTP_PROXY or AWS_PROXY, respectively. Applicable for HTTP APIs. + type: string + version: + description: Version identifier for the API. Must be between 1 + and 64 characters in length. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + apiKeySelectionExpression: + description: |- + An API key selection expression. + Valid values: $context.authorizer.usageIdentifierKey, $request.header.x-api-key. Defaults to $request.header.x-api-key. + Applicable for WebSocket APIs. + type: string + body: + description: An OpenAPI specification that defines the set of + routes and integrations to create as part of the HTTP APIs. + Supported only for HTTP APIs. + type: string + corsConfiguration: + description: Cross-origin resource sharing (CORS) configuration. + Applicable for HTTP APIs. + properties: + allowCredentials: + description: Whether credentials are included in the CORS + request. + type: boolean + allowHeaders: + description: Set of allowed HTTP headers. + items: + type: string + type: array + x-kubernetes-list-type: set + allowMethods: + description: Set of allowed HTTP methods. + items: + type: string + type: array + x-kubernetes-list-type: set + allowOrigins: + description: Set of allowed origins. + items: + type: string + type: array + x-kubernetes-list-type: set + exposeHeaders: + description: Set of exposed HTTP headers. + items: + type: string + type: array + x-kubernetes-list-type: set + maxAge: + description: Number of seconds that the browser should cache + preflight request results. + type: number + type: object + credentialsArn: + description: Part of quick create. Specifies any credentials required + for the integration. Applicable for HTTP APIs. + type: string + description: + description: Description of the API. Must be less than or equal + to 1024 characters in length. + type: string + disableExecuteApiEndpoint: + description: |- + Whether clients can invoke the API by using the default execute-api endpoint. + By default, clients can invoke the API with the default {api_id}.execute-api.{region}.amazonaws.com endpoint. + To require that clients use a custom domain name to invoke the API, disable the default endpoint. + type: boolean + failOnWarnings: + description: Whether warnings should return an error while API + Gateway is creating or updating the resource using an OpenAPI + specification. Defaults to false. Applicable for HTTP APIs. + type: boolean + name: + description: Name of the API. Must be less than or equal to 128 + characters in length. + type: string + protocolType: + description: 'API protocol. Valid values: HTTP, WEBSOCKET.' + type: string + routeKey: + description: Part of quick create. Specifies any route key. Applicable + for HTTP APIs. + type: string + routeSelectionExpression: + description: |- + The route selection expression for the API. + Defaults to $request.method $request.path. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + target: + description: |- + Part of quick create. Quick create produces an API with an integration, a default catch-all route, and a default stage which is configured to automatically deploy changes. + For HTTP integrations, specify a fully qualified URL. For Lambda integrations, specify a function ARN. + The type of the integration will be HTTP_PROXY or AWS_PROXY, respectively. Applicable for HTTP APIs. + type: string + version: + description: Version identifier for the API. Must be between 1 + and 64 characters in length. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.protocolType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.protocolType) + || (has(self.initProvider) && has(self.initProvider.protocolType))' + status: + description: APIStatus defines the observed state of API. + properties: + atProvider: + properties: + apiEndpoint: + description: URI of the API, of the form https://{api-id}.execute-api.{region}.amazonaws.com + for HTTP APIs and wss://{api-id}.execute-api.{region}.amazonaws.com + for WebSocket APIs. + type: string + apiKeySelectionExpression: + description: |- + An API key selection expression. + Valid values: $context.authorizer.usageIdentifierKey, $request.header.x-api-key. Defaults to $request.header.x-api-key. + Applicable for WebSocket APIs. + type: string + arn: + description: ARN of the API. + type: string + body: + description: An OpenAPI specification that defines the set of + routes and integrations to create as part of the HTTP APIs. + Supported only for HTTP APIs. + type: string + corsConfiguration: + description: Cross-origin resource sharing (CORS) configuration. + Applicable for HTTP APIs. + properties: + allowCredentials: + description: Whether credentials are included in the CORS + request. + type: boolean + allowHeaders: + description: Set of allowed HTTP headers. + items: + type: string + type: array + x-kubernetes-list-type: set + allowMethods: + description: Set of allowed HTTP methods. + items: + type: string + type: array + x-kubernetes-list-type: set + allowOrigins: + description: Set of allowed origins. + items: + type: string + type: array + x-kubernetes-list-type: set + exposeHeaders: + description: Set of exposed HTTP headers. + items: + type: string + type: array + x-kubernetes-list-type: set + maxAge: + description: Number of seconds that the browser should cache + preflight request results. + type: number + type: object + credentialsArn: + description: Part of quick create. Specifies any credentials required + for the integration. Applicable for HTTP APIs. + type: string + description: + description: Description of the API. Must be less than or equal + to 1024 characters in length. + type: string + disableExecuteApiEndpoint: + description: |- + Whether clients can invoke the API by using the default execute-api endpoint. + By default, clients can invoke the API with the default {api_id}.execute-api.{region}.amazonaws.com endpoint. + To require that clients use a custom domain name to invoke the API, disable the default endpoint. + type: boolean + executionArn: + description: |- + ARN prefix to be used in an aws_lambda_permission's source_arn attribute + or in an aws_iam_policy to authorize access to the @connections API. + See the Amazon API Gateway Developer Guide for details. + type: string + failOnWarnings: + description: Whether warnings should return an error while API + Gateway is creating or updating the resource using an OpenAPI + specification. Defaults to false. Applicable for HTTP APIs. + type: boolean + id: + description: API identifier. + type: string + name: + description: Name of the API. Must be less than or equal to 128 + characters in length. + type: string + protocolType: + description: 'API protocol. Valid values: HTTP, WEBSOCKET.' + type: string + routeKey: + description: Part of quick create. Specifies any route key. Applicable + for HTTP APIs. + type: string + routeSelectionExpression: + description: |- + The route selection expression for the API. + Defaults to $request.method $request.path. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + target: + description: |- + Part of quick create. Quick create produces an API with an integration, a default catch-all route, and a default stage which is configured to automatically deploy changes. + For HTTP integrations, specify a fully qualified URL. For Lambda integrations, specify a function ARN. + The type of the integration will be HTTP_PROXY or AWS_PROXY, respectively. Applicable for HTTP APIs. + type: string + version: + description: Version identifier for the API. Must be between 1 + and 64 characters in length. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apigatewayv2.aws.upbound.io_authorizers.yaml b/package/crds/apigatewayv2.aws.upbound.io_authorizers.yaml index 38fa1737ec..aaf4215065 100644 --- a/package/crds/apigatewayv2.aws.upbound.io_authorizers.yaml +++ b/package/crds/apigatewayv2.aws.upbound.io_authorizers.yaml @@ -846,3 +846,825 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Authorizer is the Schema for the Authorizers API. Manages an + Amazon API Gateway Version 2 authorizer. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AuthorizerSpec defines the desired state of Authorizer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apiId: + description: API identifier. + type: string + apiIdRef: + description: Reference to a API in apigatewayv2 to populate apiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiIdSelector: + description: Selector for a API in apigatewayv2 to populate apiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + authorizerCredentialsArn: + description: |- + Required credentials as an IAM role for API Gateway to invoke the authorizer. + Supported only for REQUEST authorizers. + type: string + authorizerPayloadFormatVersion: + description: |- + Format of the payload sent to an HTTP API Lambda authorizer. Required for HTTP API Lambda authorizers. + Valid values: 1.0, 2.0. + type: string + authorizerResultTtlInSeconds: + description: |- + Time to live (TTL) for cached authorizer results, in seconds. If it equals 0, authorization caching is disabled. + If it is greater than 0, API Gateway caches authorizer responses. The maximum value is 3600, or 1 hour. Defaults to 300. + Supported only for HTTP API Lambda authorizers. + type: number + authorizerType: + description: |- + Authorizer type. Valid values: JWT, REQUEST. + Specify REQUEST for a Lambda function using incoming request parameters. + For HTTP APIs, specify JWT to use JSON Web Tokens. + type: string + authorizerUri: + description: |- + Authorizer's Uniform Resource Identifier (URI). + For REQUEST authorizers this must be a well-formed Lambda function URI, such as the invoke_arn attribute of the aws_lambda_function resource. + Supported only for REQUEST authorizers. Must be between 1 and 2048 characters in length. + type: string + authorizerUriRef: + description: Reference to a Function in lambda to populate authorizerUri. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + authorizerUriSelector: + description: Selector for a Function in lambda to populate authorizerUri. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enableSimpleResponses: + description: |- + Whether a Lambda authorizer returns a response in a simple format. If enabled, the Lambda authorizer can return a boolean value instead of an IAM policy. + Supported only for HTTP APIs. + type: boolean + identitySources: + description: |- + Identity sources for which authorization is requested. + For REQUEST authorizers the value is a list of one or more mapping expressions of the specified request parameters. + For JWT authorizers the single entry specifies where to extract the JSON Web Token (JWT) from inbound requests. + items: + type: string + type: array + x-kubernetes-list-type: set + jwtConfiguration: + description: |- + Configuration of a JWT authorizer. Required for the JWT authorizer type. + Supported only for HTTP APIs. + properties: + audience: + description: List of the intended recipients of the JWT. A + valid JWT must provide an aud that matches at least one + entry in this list. + items: + type: string + type: array + x-kubernetes-list-type: set + issuer: + description: Base domain of the identity provider that issues + JSON Web Tokens, such as the endpoint attribute of the aws_cognito_user_pool + resource. + type: string + type: object + name: + description: Name of the authorizer. Must be between 1 and 128 + characters in length. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + apiId: + description: API identifier. + type: string + apiIdRef: + description: Reference to a API in apigatewayv2 to populate apiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiIdSelector: + description: Selector for a API in apigatewayv2 to populate apiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + authorizerCredentialsArn: + description: |- + Required credentials as an IAM role for API Gateway to invoke the authorizer. + Supported only for REQUEST authorizers. + type: string + authorizerPayloadFormatVersion: + description: |- + Format of the payload sent to an HTTP API Lambda authorizer. Required for HTTP API Lambda authorizers. + Valid values: 1.0, 2.0. + type: string + authorizerResultTtlInSeconds: + description: |- + Time to live (TTL) for cached authorizer results, in seconds. If it equals 0, authorization caching is disabled. + If it is greater than 0, API Gateway caches authorizer responses. The maximum value is 3600, or 1 hour. Defaults to 300. + Supported only for HTTP API Lambda authorizers. + type: number + authorizerType: + description: |- + Authorizer type. Valid values: JWT, REQUEST. + Specify REQUEST for a Lambda function using incoming request parameters. + For HTTP APIs, specify JWT to use JSON Web Tokens. + type: string + authorizerUri: + description: |- + Authorizer's Uniform Resource Identifier (URI). + For REQUEST authorizers this must be a well-formed Lambda function URI, such as the invoke_arn attribute of the aws_lambda_function resource. + Supported only for REQUEST authorizers. Must be between 1 and 2048 characters in length. + type: string + authorizerUriRef: + description: Reference to a Function in lambda to populate authorizerUri. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + authorizerUriSelector: + description: Selector for a Function in lambda to populate authorizerUri. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enableSimpleResponses: + description: |- + Whether a Lambda authorizer returns a response in a simple format. If enabled, the Lambda authorizer can return a boolean value instead of an IAM policy. + Supported only for HTTP APIs. + type: boolean + identitySources: + description: |- + Identity sources for which authorization is requested. + For REQUEST authorizers the value is a list of one or more mapping expressions of the specified request parameters. + For JWT authorizers the single entry specifies where to extract the JSON Web Token (JWT) from inbound requests. + items: + type: string + type: array + x-kubernetes-list-type: set + jwtConfiguration: + description: |- + Configuration of a JWT authorizer. Required for the JWT authorizer type. + Supported only for HTTP APIs. + properties: + audience: + description: List of the intended recipients of the JWT. A + valid JWT must provide an aud that matches at least one + entry in this list. + items: + type: string + type: array + x-kubernetes-list-type: set + issuer: + description: Base domain of the identity provider that issues + JSON Web Tokens, such as the endpoint attribute of the aws_cognito_user_pool + resource. + type: string + type: object + name: + description: Name of the authorizer. Must be between 1 and 128 + characters in length. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.authorizerType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.authorizerType) + || (has(self.initProvider) && has(self.initProvider.authorizerType))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: AuthorizerStatus defines the observed state of Authorizer. + properties: + atProvider: + properties: + apiId: + description: API identifier. + type: string + authorizerCredentialsArn: + description: |- + Required credentials as an IAM role for API Gateway to invoke the authorizer. + Supported only for REQUEST authorizers. + type: string + authorizerPayloadFormatVersion: + description: |- + Format of the payload sent to an HTTP API Lambda authorizer. Required for HTTP API Lambda authorizers. + Valid values: 1.0, 2.0. + type: string + authorizerResultTtlInSeconds: + description: |- + Time to live (TTL) for cached authorizer results, in seconds. If it equals 0, authorization caching is disabled. + If it is greater than 0, API Gateway caches authorizer responses. The maximum value is 3600, or 1 hour. Defaults to 300. + Supported only for HTTP API Lambda authorizers. + type: number + authorizerType: + description: |- + Authorizer type. Valid values: JWT, REQUEST. + Specify REQUEST for a Lambda function using incoming request parameters. + For HTTP APIs, specify JWT to use JSON Web Tokens. + type: string + authorizerUri: + description: |- + Authorizer's Uniform Resource Identifier (URI). + For REQUEST authorizers this must be a well-formed Lambda function URI, such as the invoke_arn attribute of the aws_lambda_function resource. + Supported only for REQUEST authorizers. Must be between 1 and 2048 characters in length. + type: string + enableSimpleResponses: + description: |- + Whether a Lambda authorizer returns a response in a simple format. If enabled, the Lambda authorizer can return a boolean value instead of an IAM policy. + Supported only for HTTP APIs. + type: boolean + id: + description: Authorizer identifier. + type: string + identitySources: + description: |- + Identity sources for which authorization is requested. + For REQUEST authorizers the value is a list of one or more mapping expressions of the specified request parameters. + For JWT authorizers the single entry specifies where to extract the JSON Web Token (JWT) from inbound requests. + items: + type: string + type: array + x-kubernetes-list-type: set + jwtConfiguration: + description: |- + Configuration of a JWT authorizer. Required for the JWT authorizer type. + Supported only for HTTP APIs. + properties: + audience: + description: List of the intended recipients of the JWT. A + valid JWT must provide an aud that matches at least one + entry in this list. + items: + type: string + type: array + x-kubernetes-list-type: set + issuer: + description: Base domain of the identity provider that issues + JSON Web Tokens, such as the endpoint attribute of the aws_cognito_user_pool + resource. + type: string + type: object + name: + description: Name of the authorizer. Must be between 1 and 128 + characters in length. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apigatewayv2.aws.upbound.io_domainnames.yaml b/package/crds/apigatewayv2.aws.upbound.io_domainnames.yaml index 9e555fe733..23cd44b0f2 100644 --- a/package/crds/apigatewayv2.aws.upbound.io_domainnames.yaml +++ b/package/crds/apigatewayv2.aws.upbound.io_domainnames.yaml @@ -664,3 +664,634 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DomainName is the Schema for the DomainNames API. Manages an + Amazon API Gateway Version 2 domain name. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DomainNameSpec defines the desired state of DomainName + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + domainNameConfiguration: + description: Domain name configuration. See below. + properties: + certificateArn: + description: ARN of an AWS-managed certificate that will be + used by the endpoint for the domain name. AWS Certificate + Manager is the only supported source. Use the aws_acm_certificate + resource to configure an ACM certificate. + type: string + certificateArnRef: + description: Reference to a Certificate in acm to populate + certificateArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + certificateArnSelector: + description: Selector for a Certificate in acm to populate + certificateArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + endpointType: + description: 'Endpoint type. Valid values: REGIONAL.' + type: string + ownershipVerificationCertificateArn: + description: ARN of the AWS-issued certificate used to validate + custom domain ownership (when certificate_arn is issued + via an ACM Private CA or mutual_tls_authentication is configured + with an ACM-imported certificate.) + type: string + securityPolicy: + description: 'Transport Layer Security (TLS) version of the + security policy for the domain name. Valid values: TLS_1_2.' + type: string + type: object + mutualTlsAuthentication: + description: Mutual TLS authentication configuration for the domain + name. + properties: + truststoreUri: + description: Amazon S3 URL that specifies the truststore for + mutual TLS authentication, for example, s3://bucket-name/key-name. + The truststore can contain certificates from public or private + certificate authorities. To update the truststore, upload + a new version to S3, and then update your custom domain + name to use the new version. + type: string + truststoreVersion: + description: Version of the S3 object that contains the truststore. + To specify a version, you must have versioning enabled for + the S3 bucket. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + domainNameConfiguration: + description: Domain name configuration. See below. + properties: + certificateArn: + description: ARN of an AWS-managed certificate that will be + used by the endpoint for the domain name. AWS Certificate + Manager is the only supported source. Use the aws_acm_certificate + resource to configure an ACM certificate. + type: string + certificateArnRef: + description: Reference to a Certificate in acm to populate + certificateArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + certificateArnSelector: + description: Selector for a Certificate in acm to populate + certificateArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + endpointType: + description: 'Endpoint type. Valid values: REGIONAL.' + type: string + ownershipVerificationCertificateArn: + description: ARN of the AWS-issued certificate used to validate + custom domain ownership (when certificate_arn is issued + via an ACM Private CA or mutual_tls_authentication is configured + with an ACM-imported certificate.) + type: string + securityPolicy: + description: 'Transport Layer Security (TLS) version of the + security policy for the domain name. Valid values: TLS_1_2.' + type: string + type: object + mutualTlsAuthentication: + description: Mutual TLS authentication configuration for the domain + name. + properties: + truststoreUri: + description: Amazon S3 URL that specifies the truststore for + mutual TLS authentication, for example, s3://bucket-name/key-name. + The truststore can contain certificates from public or private + certificate authorities. To update the truststore, upload + a new version to S3, and then update your custom domain + name to use the new version. + type: string + truststoreVersion: + description: Version of the S3 object that contains the truststore. + To specify a version, you must have versioning enabled for + the S3 bucket. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.domainNameConfiguration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.domainNameConfiguration) + || (has(self.initProvider) && has(self.initProvider.domainNameConfiguration))' + status: + description: DomainNameStatus defines the observed state of DomainName. + properties: + atProvider: + properties: + apiMappingSelectionExpression: + description: API mapping selection expression for the domain name. + type: string + arn: + description: ARN of the domain name. + type: string + domainNameConfiguration: + description: Domain name configuration. See below. + properties: + certificateArn: + description: ARN of an AWS-managed certificate that will be + used by the endpoint for the domain name. AWS Certificate + Manager is the only supported source. Use the aws_acm_certificate + resource to configure an ACM certificate. + type: string + endpointType: + description: 'Endpoint type. Valid values: REGIONAL.' + type: string + hostedZoneId: + description: (Computed) Amazon Route 53 Hosted Zone ID of + the endpoint. + type: string + ownershipVerificationCertificateArn: + description: ARN of the AWS-issued certificate used to validate + custom domain ownership (when certificate_arn is issued + via an ACM Private CA or mutual_tls_authentication is configured + with an ACM-imported certificate.) + type: string + securityPolicy: + description: 'Transport Layer Security (TLS) version of the + security policy for the domain name. Valid values: TLS_1_2.' + type: string + targetDomainName: + description: (Computed) Target domain name. + type: string + type: object + id: + description: Domain name identifier. + type: string + mutualTlsAuthentication: + description: Mutual TLS authentication configuration for the domain + name. + properties: + truststoreUri: + description: Amazon S3 URL that specifies the truststore for + mutual TLS authentication, for example, s3://bucket-name/key-name. + The truststore can contain certificates from public or private + certificate authorities. To update the truststore, upload + a new version to S3, and then update your custom domain + name to use the new version. + type: string + truststoreVersion: + description: Version of the S3 object that contains the truststore. + To specify a version, you must have versioning enabled for + the S3 bucket. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apigatewayv2.aws.upbound.io_integrations.yaml b/package/crds/apigatewayv2.aws.upbound.io_integrations.yaml index 227cca083a..9b1fbb007e 100644 --- a/package/crds/apigatewayv2.aws.upbound.io_integrations.yaml +++ b/package/crds/apigatewayv2.aws.upbound.io_integrations.yaml @@ -1266,3 +1266,1245 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Integration is the Schema for the Integrations API. Manages an + Amazon API Gateway Version 2 integration. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IntegrationSpec defines the desired state of Integration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apiId: + description: API identifier. + type: string + apiIdRef: + description: Reference to a API in apigatewayv2 to populate apiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiIdSelector: + description: Selector for a API in apigatewayv2 to populate apiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + connectionId: + description: ID of the VPC link for a private integration. Supported + only for HTTP APIs. Must be between 1 and 1024 characters in + length. + type: string + connectionIdRef: + description: Reference to a VPCLink in apigatewayv2 to populate + connectionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + connectionIdSelector: + description: Selector for a VPCLink in apigatewayv2 to populate + connectionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + connectionType: + description: 'Type of the network connection to the integration + endpoint. Valid values: INTERNET, VPC_LINK. Default is INTERNET.' + type: string + contentHandlingStrategy: + description: 'How to handle response payload content type conversions. + Valid values: CONVERT_TO_BINARY, CONVERT_TO_TEXT. Supported + only for WebSocket APIs.' + type: string + credentialsArn: + description: Credentials required for the integration, if any. + type: string + credentialsArnRef: + description: Reference to a Role in iam to populate credentialsArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + credentialsArnSelector: + description: Selector for a Role in iam to populate credentialsArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: Description of the integration. + type: string + integrationMethod: + description: Integration's HTTP method. Must be specified if integration_type + is not MOCK. + type: string + integrationSubtype: + description: AWS service action to invoke. Supported only for + HTTP APIs when integration_type is AWS_PROXY. See the AWS service + integration reference documentation for supported values. Must + be between 1 and 128 characters in length. + type: string + integrationType: + description: |- + Integration type of an integration. + Valid values: AWS (supported only for WebSocket APIs), AWS_PROXY, HTTP (supported only for WebSocket APIs), HTTP_PROXY, MOCK (supported only for WebSocket APIs). For an HTTP API private integration, use HTTP_PROXY. + type: string + integrationUri: + description: |- + URI of the Lambda function for a Lambda proxy integration, when integration_type is AWS_PROXY. + For an HTTP integration, specify a fully-qualified URL. For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. + type: string + integrationUriRef: + description: Reference to a Function in lambda to populate integrationUri. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + integrationUriSelector: + description: Selector for a Function in lambda to populate integrationUri. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + passthroughBehavior: + description: |- + Pass-through behavior for incoming requests based on the Content-Type header in the request, and the available mapping templates specified as the request_templates attribute. + Valid values: WHEN_NO_MATCH, WHEN_NO_TEMPLATES, NEVER. Default is WHEN_NO_MATCH. Supported only for WebSocket APIs. + type: string + payloadFormatVersion: + description: 'The format of the payload sent to an integration. + Valid values: 1.0, 2.0. Default is 1.0.' + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + requestParameters: + additionalProperties: + type: string + description: |- + For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. + For HTTP APIs with a specified integration_subtype, a key-value map specifying parameters that are passed to AWS_PROXY integrations. + For HTTP APIs without a specified integration_subtype, a key-value map specifying how to transform HTTP requests before sending them to the backend. + See the Amazon API Gateway Developer Guide for details. + type: object + x-kubernetes-map-type: granular + requestTemplates: + additionalProperties: + type: string + description: Map of Velocity templates that are applied on the + request payload based on the value of the Content-Type header + sent by the client. Supported only for WebSocket APIs. + type: object + x-kubernetes-map-type: granular + responseParameters: + description: Mappings to transform the HTTP response from a backend + integration before returning the response to clients. Supported + only for HTTP APIs. + items: + properties: + mappings: + additionalProperties: + type: string + description: |- + Key-value map. The key of this map identifies the location of the request parameter to change, and how to change it. The corresponding value specifies the new data for the parameter. + See the Amazon API Gateway Developer Guide for details. + type: object + x-kubernetes-map-type: granular + statusCode: + description: HTTP status code in the range 200-599. + type: string + type: object + type: array + templateSelectionExpression: + description: The template selection expression for the integration. + type: string + timeoutMilliseconds: + description: |- + Custom timeout between 50 and 29,000 milliseconds for WebSocket APIs and between 50 and 30,000 milliseconds for HTTP APIs. + The default timeout is 29 seconds for WebSocket APIs and 30 seconds for HTTP APIs. + type: number + tlsConfig: + description: TLS configuration for a private integration. Supported + only for HTTP APIs. + properties: + serverNameToVerify: + description: If you specify a server name, API Gateway uses + it to verify the hostname on the integration's certificate. + The server name is also included in the TLS handshake to + support Server Name Indication (SNI) or virtual hosting. + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + apiId: + description: API identifier. + type: string + apiIdRef: + description: Reference to a API in apigatewayv2 to populate apiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiIdSelector: + description: Selector for a API in apigatewayv2 to populate apiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + connectionId: + description: ID of the VPC link for a private integration. Supported + only for HTTP APIs. Must be between 1 and 1024 characters in + length. + type: string + connectionIdRef: + description: Reference to a VPCLink in apigatewayv2 to populate + connectionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + connectionIdSelector: + description: Selector for a VPCLink in apigatewayv2 to populate + connectionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + connectionType: + description: 'Type of the network connection to the integration + endpoint. Valid values: INTERNET, VPC_LINK. Default is INTERNET.' + type: string + contentHandlingStrategy: + description: 'How to handle response payload content type conversions. + Valid values: CONVERT_TO_BINARY, CONVERT_TO_TEXT. Supported + only for WebSocket APIs.' + type: string + credentialsArn: + description: Credentials required for the integration, if any. + type: string + credentialsArnRef: + description: Reference to a Role in iam to populate credentialsArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + credentialsArnSelector: + description: Selector for a Role in iam to populate credentialsArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: Description of the integration. + type: string + integrationMethod: + description: Integration's HTTP method. Must be specified if integration_type + is not MOCK. + type: string + integrationSubtype: + description: AWS service action to invoke. Supported only for + HTTP APIs when integration_type is AWS_PROXY. See the AWS service + integration reference documentation for supported values. Must + be between 1 and 128 characters in length. + type: string + integrationType: + description: |- + Integration type of an integration. + Valid values: AWS (supported only for WebSocket APIs), AWS_PROXY, HTTP (supported only for WebSocket APIs), HTTP_PROXY, MOCK (supported only for WebSocket APIs). For an HTTP API private integration, use HTTP_PROXY. + type: string + integrationUri: + description: |- + URI of the Lambda function for a Lambda proxy integration, when integration_type is AWS_PROXY. + For an HTTP integration, specify a fully-qualified URL. For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. + type: string + integrationUriRef: + description: Reference to a Function in lambda to populate integrationUri. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + integrationUriSelector: + description: Selector for a Function in lambda to populate integrationUri. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + passthroughBehavior: + description: |- + Pass-through behavior for incoming requests based on the Content-Type header in the request, and the available mapping templates specified as the request_templates attribute. + Valid values: WHEN_NO_MATCH, WHEN_NO_TEMPLATES, NEVER. Default is WHEN_NO_MATCH. Supported only for WebSocket APIs. + type: string + payloadFormatVersion: + description: 'The format of the payload sent to an integration. + Valid values: 1.0, 2.0. Default is 1.0.' + type: string + requestParameters: + additionalProperties: + type: string + description: |- + For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. + For HTTP APIs with a specified integration_subtype, a key-value map specifying parameters that are passed to AWS_PROXY integrations. + For HTTP APIs without a specified integration_subtype, a key-value map specifying how to transform HTTP requests before sending them to the backend. + See the Amazon API Gateway Developer Guide for details. + type: object + x-kubernetes-map-type: granular + requestTemplates: + additionalProperties: + type: string + description: Map of Velocity templates that are applied on the + request payload based on the value of the Content-Type header + sent by the client. Supported only for WebSocket APIs. + type: object + x-kubernetes-map-type: granular + responseParameters: + description: Mappings to transform the HTTP response from a backend + integration before returning the response to clients. Supported + only for HTTP APIs. + items: + properties: + mappings: + additionalProperties: + type: string + description: |- + Key-value map. The key of this map identifies the location of the request parameter to change, and how to change it. The corresponding value specifies the new data for the parameter. + See the Amazon API Gateway Developer Guide for details. + type: object + x-kubernetes-map-type: granular + statusCode: + description: HTTP status code in the range 200-599. + type: string + type: object + type: array + templateSelectionExpression: + description: The template selection expression for the integration. + type: string + timeoutMilliseconds: + description: |- + Custom timeout between 50 and 29,000 milliseconds for WebSocket APIs and between 50 and 30,000 milliseconds for HTTP APIs. + The default timeout is 29 seconds for WebSocket APIs and 30 seconds for HTTP APIs. + type: number + tlsConfig: + description: TLS configuration for a private integration. Supported + only for HTTP APIs. + properties: + serverNameToVerify: + description: If you specify a server name, API Gateway uses + it to verify the hostname on the integration's certificate. + The server name is also included in the TLS handshake to + support Server Name Indication (SNI) or virtual hosting. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.integrationType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.integrationType) + || (has(self.initProvider) && has(self.initProvider.integrationType))' + status: + description: IntegrationStatus defines the observed state of Integration. + properties: + atProvider: + properties: + apiId: + description: API identifier. + type: string + connectionId: + description: ID of the VPC link for a private integration. Supported + only for HTTP APIs. Must be between 1 and 1024 characters in + length. + type: string + connectionType: + description: 'Type of the network connection to the integration + endpoint. Valid values: INTERNET, VPC_LINK. Default is INTERNET.' + type: string + contentHandlingStrategy: + description: 'How to handle response payload content type conversions. + Valid values: CONVERT_TO_BINARY, CONVERT_TO_TEXT. Supported + only for WebSocket APIs.' + type: string + credentialsArn: + description: Credentials required for the integration, if any. + type: string + description: + description: Description of the integration. + type: string + id: + description: Integration identifier. + type: string + integrationMethod: + description: Integration's HTTP method. Must be specified if integration_type + is not MOCK. + type: string + integrationResponseSelectionExpression: + description: The integration response selection expression for + the integration. + type: string + integrationSubtype: + description: AWS service action to invoke. Supported only for + HTTP APIs when integration_type is AWS_PROXY. See the AWS service + integration reference documentation for supported values. Must + be between 1 and 128 characters in length. + type: string + integrationType: + description: |- + Integration type of an integration. + Valid values: AWS (supported only for WebSocket APIs), AWS_PROXY, HTTP (supported only for WebSocket APIs), HTTP_PROXY, MOCK (supported only for WebSocket APIs). For an HTTP API private integration, use HTTP_PROXY. + type: string + integrationUri: + description: |- + URI of the Lambda function for a Lambda proxy integration, when integration_type is AWS_PROXY. + For an HTTP integration, specify a fully-qualified URL. For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. + type: string + passthroughBehavior: + description: |- + Pass-through behavior for incoming requests based on the Content-Type header in the request, and the available mapping templates specified as the request_templates attribute. + Valid values: WHEN_NO_MATCH, WHEN_NO_TEMPLATES, NEVER. Default is WHEN_NO_MATCH. Supported only for WebSocket APIs. + type: string + payloadFormatVersion: + description: 'The format of the payload sent to an integration. + Valid values: 1.0, 2.0. Default is 1.0.' + type: string + requestParameters: + additionalProperties: + type: string + description: |- + For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. + For HTTP APIs with a specified integration_subtype, a key-value map specifying parameters that are passed to AWS_PROXY integrations. + For HTTP APIs without a specified integration_subtype, a key-value map specifying how to transform HTTP requests before sending them to the backend. + See the Amazon API Gateway Developer Guide for details. + type: object + x-kubernetes-map-type: granular + requestTemplates: + additionalProperties: + type: string + description: Map of Velocity templates that are applied on the + request payload based on the value of the Content-Type header + sent by the client. Supported only for WebSocket APIs. + type: object + x-kubernetes-map-type: granular + responseParameters: + description: Mappings to transform the HTTP response from a backend + integration before returning the response to clients. Supported + only for HTTP APIs. + items: + properties: + mappings: + additionalProperties: + type: string + description: |- + Key-value map. The key of this map identifies the location of the request parameter to change, and how to change it. The corresponding value specifies the new data for the parameter. + See the Amazon API Gateway Developer Guide for details. + type: object + x-kubernetes-map-type: granular + statusCode: + description: HTTP status code in the range 200-599. + type: string + type: object + type: array + templateSelectionExpression: + description: The template selection expression for the integration. + type: string + timeoutMilliseconds: + description: |- + Custom timeout between 50 and 29,000 milliseconds for WebSocket APIs and between 50 and 30,000 milliseconds for HTTP APIs. + The default timeout is 29 seconds for WebSocket APIs and 30 seconds for HTTP APIs. + type: number + tlsConfig: + description: TLS configuration for a private integration. Supported + only for HTTP APIs. + properties: + serverNameToVerify: + description: If you specify a server name, API Gateway uses + it to verify the hostname on the integration's certificate. + The server name is also included in the TLS handshake to + support Server Name Indication (SNI) or virtual hosting. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apigatewayv2.aws.upbound.io_stages.yaml b/package/crds/apigatewayv2.aws.upbound.io_stages.yaml index bbfdca428d..0f63c9ed86 100644 --- a/package/crds/apigatewayv2.aws.upbound.io_stages.yaml +++ b/package/crds/apigatewayv2.aws.upbound.io_stages.yaml @@ -961,3 +961,934 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Stage is the Schema for the Stages API. Manages an Amazon API + Gateway Version 2 stage. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StageSpec defines the desired state of Stage + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessLogSettings: + description: |- + Settings for logging access in this stage. + Use the aws_api_gateway_account resource to configure permissions for CloudWatch Logging. + properties: + destinationArn: + description: ARN of the CloudWatch Logs log group to receive + access logs. Any trailing :* is trimmed from the ARN. + type: string + format: + description: Single line format of the access logs of data. + Refer to log settings for HTTP or Websocket. + type: string + type: object + apiId: + description: API identifier. + type: string + apiIdRef: + description: Reference to a API in apigatewayv2 to populate apiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiIdSelector: + description: Selector for a API in apigatewayv2 to populate apiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + autoDeploy: + description: Whether updates to an API automatically trigger a + new deployment. Defaults to false. Applicable for HTTP APIs. + type: boolean + clientCertificateId: + description: |- + Identifier of a client certificate for the stage. Use the aws_api_gateway_client_certificate resource to configure a client certificate. + Supported only for WebSocket APIs. + type: string + defaultRouteSettings: + description: Default route settings for the stage. + properties: + dataTraceEnabled: + description: |- + Whether data trace logging is enabled for the default route. Affects the log entries pushed to Amazon CloudWatch Logs. + Defaults to false. Supported only for WebSocket APIs. + type: boolean + detailedMetricsEnabled: + description: Whether detailed metrics are enabled for the + default route. Defaults to false. + type: boolean + loggingLevel: + description: |- + Logging level for the default route. Affects the log entries pushed to Amazon CloudWatch Logs. + Valid values: ERROR, INFO, OFF. Defaults to OFF. Supported only for WebSocket APIs. + type: string + throttlingBurstLimit: + description: Throttling burst limit for the default route. + type: number + throttlingRateLimit: + description: Throttling rate limit for the default route. + type: number + type: object + deploymentId: + description: Deployment identifier of the stage. Use the aws_apigatewayv2_deployment + resource to configure a deployment. + type: string + deploymentIdRef: + description: Reference to a Deployment in apigatewayv2 to populate + deploymentId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + deploymentIdSelector: + description: Selector for a Deployment in apigatewayv2 to populate + deploymentId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: Description for the stage. Must be less than or equal + to 1024 characters in length. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + routeSettings: + description: Route settings for the stage. + items: + properties: + dataTraceEnabled: + description: |- + Whether data trace logging is enabled for the route. Affects the log entries pushed to Amazon CloudWatch Logs. + Defaults to false. Supported only for WebSocket APIs. + type: boolean + detailedMetricsEnabled: + description: Whether detailed metrics are enabled for the + route. Defaults to false. + type: boolean + loggingLevel: + description: |- + Logging level for the route. Affects the log entries pushed to Amazon CloudWatch Logs. + Valid values: ERROR, INFO, OFF. Defaults to OFF. Supported only for WebSocket APIs. + type: string + routeKey: + description: Route key. + type: string + throttlingBurstLimit: + description: Throttling burst limit for the route. + type: number + throttlingRateLimit: + description: Throttling rate limit for the route. + type: number + type: object + type: array + stageVariables: + additionalProperties: + type: string + description: Map that defines the stage variables for the stage. + type: object + x-kubernetes-map-type: granular + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessLogSettings: + description: |- + Settings for logging access in this stage. + Use the aws_api_gateway_account resource to configure permissions for CloudWatch Logging. + properties: + destinationArn: + description: ARN of the CloudWatch Logs log group to receive + access logs. Any trailing :* is trimmed from the ARN. + type: string + format: + description: Single line format of the access logs of data. + Refer to log settings for HTTP or Websocket. + type: string + type: object + apiId: + description: API identifier. + type: string + apiIdRef: + description: Reference to a API in apigatewayv2 to populate apiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiIdSelector: + description: Selector for a API in apigatewayv2 to populate apiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + autoDeploy: + description: Whether updates to an API automatically trigger a + new deployment. Defaults to false. Applicable for HTTP APIs. + type: boolean + clientCertificateId: + description: |- + Identifier of a client certificate for the stage. Use the aws_api_gateway_client_certificate resource to configure a client certificate. + Supported only for WebSocket APIs. + type: string + defaultRouteSettings: + description: Default route settings for the stage. + properties: + dataTraceEnabled: + description: |- + Whether data trace logging is enabled for the default route. Affects the log entries pushed to Amazon CloudWatch Logs. + Defaults to false. Supported only for WebSocket APIs. + type: boolean + detailedMetricsEnabled: + description: Whether detailed metrics are enabled for the + default route. Defaults to false. + type: boolean + loggingLevel: + description: |- + Logging level for the default route. Affects the log entries pushed to Amazon CloudWatch Logs. + Valid values: ERROR, INFO, OFF. Defaults to OFF. Supported only for WebSocket APIs. + type: string + throttlingBurstLimit: + description: Throttling burst limit for the default route. + type: number + throttlingRateLimit: + description: Throttling rate limit for the default route. + type: number + type: object + deploymentId: + description: Deployment identifier of the stage. Use the aws_apigatewayv2_deployment + resource to configure a deployment. + type: string + deploymentIdRef: + description: Reference to a Deployment in apigatewayv2 to populate + deploymentId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + deploymentIdSelector: + description: Selector for a Deployment in apigatewayv2 to populate + deploymentId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: Description for the stage. Must be less than or equal + to 1024 characters in length. + type: string + routeSettings: + description: Route settings for the stage. + items: + properties: + dataTraceEnabled: + description: |- + Whether data trace logging is enabled for the route. Affects the log entries pushed to Amazon CloudWatch Logs. + Defaults to false. Supported only for WebSocket APIs. + type: boolean + detailedMetricsEnabled: + description: Whether detailed metrics are enabled for the + route. Defaults to false. + type: boolean + loggingLevel: + description: |- + Logging level for the route. Affects the log entries pushed to Amazon CloudWatch Logs. + Valid values: ERROR, INFO, OFF. Defaults to OFF. Supported only for WebSocket APIs. + type: string + routeKey: + description: Route key. + type: string + throttlingBurstLimit: + description: Throttling burst limit for the route. + type: number + throttlingRateLimit: + description: Throttling rate limit for the route. + type: number + type: object + type: array + stageVariables: + additionalProperties: + type: string + description: Map that defines the stage variables for the stage. + type: object + x-kubernetes-map-type: granular + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: StageStatus defines the observed state of Stage. + properties: + atProvider: + properties: + accessLogSettings: + description: |- + Settings for logging access in this stage. + Use the aws_api_gateway_account resource to configure permissions for CloudWatch Logging. + properties: + destinationArn: + description: ARN of the CloudWatch Logs log group to receive + access logs. Any trailing :* is trimmed from the ARN. + type: string + format: + description: Single line format of the access logs of data. + Refer to log settings for HTTP or Websocket. + type: string + type: object + apiId: + description: API identifier. + type: string + arn: + description: ARN of the stage. + type: string + autoDeploy: + description: Whether updates to an API automatically trigger a + new deployment. Defaults to false. Applicable for HTTP APIs. + type: boolean + clientCertificateId: + description: |- + Identifier of a client certificate for the stage. Use the aws_api_gateway_client_certificate resource to configure a client certificate. + Supported only for WebSocket APIs. + type: string + defaultRouteSettings: + description: Default route settings for the stage. + properties: + dataTraceEnabled: + description: |- + Whether data trace logging is enabled for the default route. Affects the log entries pushed to Amazon CloudWatch Logs. + Defaults to false. Supported only for WebSocket APIs. + type: boolean + detailedMetricsEnabled: + description: Whether detailed metrics are enabled for the + default route. Defaults to false. + type: boolean + loggingLevel: + description: |- + Logging level for the default route. Affects the log entries pushed to Amazon CloudWatch Logs. + Valid values: ERROR, INFO, OFF. Defaults to OFF. Supported only for WebSocket APIs. + type: string + throttlingBurstLimit: + description: Throttling burst limit for the default route. + type: number + throttlingRateLimit: + description: Throttling rate limit for the default route. + type: number + type: object + deploymentId: + description: Deployment identifier of the stage. Use the aws_apigatewayv2_deployment + resource to configure a deployment. + type: string + description: + description: Description for the stage. Must be less than or equal + to 1024 characters in length. + type: string + executionArn: + description: |- + ARN prefix to be used in an aws_lambda_permission's source_arn attribute. + For WebSocket APIs this attribute can additionally be used in an aws_iam_policy to authorize access to the @connections API. + See the Amazon API Gateway Developer Guide for details. + type: string + id: + description: Stage identifier. + type: string + invokeUrl: + description: |- + URL to invoke the API pointing to the stage, + e.g., wss://z4675bid1j.execute-api.eu-west-2.amazonaws.com/example-stage, or https://z4675bid1j.execute-api.eu-west-2.amazonaws.com/ + type: string + routeSettings: + description: Route settings for the stage. + items: + properties: + dataTraceEnabled: + description: |- + Whether data trace logging is enabled for the route. Affects the log entries pushed to Amazon CloudWatch Logs. + Defaults to false. Supported only for WebSocket APIs. + type: boolean + detailedMetricsEnabled: + description: Whether detailed metrics are enabled for the + route. Defaults to false. + type: boolean + loggingLevel: + description: |- + Logging level for the route. Affects the log entries pushed to Amazon CloudWatch Logs. + Valid values: ERROR, INFO, OFF. Defaults to OFF. Supported only for WebSocket APIs. + type: string + routeKey: + description: Route key. + type: string + throttlingBurstLimit: + description: Throttling burst limit for the route. + type: number + throttlingRateLimit: + description: Throttling rate limit for the route. + type: number + type: object + type: array + stageVariables: + additionalProperties: + type: string + description: Map that defines the stage variables for the stage. + type: object + x-kubernetes-map-type: granular + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appautoscaling.aws.upbound.io_policies.yaml b/package/crds/appautoscaling.aws.upbound.io_policies.yaml index a6d7c6d59c..f55365c25b 100644 --- a/package/crds/appautoscaling.aws.upbound.io_policies.yaml +++ b/package/crds/appautoscaling.aws.upbound.io_policies.yaml @@ -1231,3 +1231,1162 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Policy is the Schema for the Policys API. Provides an Application + AutoScaling Policy resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PolicySpec defines the desired state of Policy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + policyType: + description: Policy type. Valid values are StepScaling and TargetTrackingScaling. + Defaults to StepScaling. Certain services only support only + one policy type. For more information see the Target Tracking + Scaling Policies and Step Scaling Policies documentation. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + resourceId: + description: 'Resource type and unique identifier string for the + resource associated with the scaling policy. Documentation can + be found in the ResourceId parameter at: AWS Application Auto + Scaling API Reference' + type: string + resourceIdRef: + description: Reference to a Target in appautoscaling to populate + resourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceIdSelector: + description: Selector for a Target in appautoscaling to populate + resourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + scalableDimension: + description: 'Scalable dimension of the scalable target. Documentation + can be found in the ScalableDimension parameter at: AWS Application + Auto Scaling API Reference' + type: string + scalableDimensionRef: + description: Reference to a Target in appautoscaling to populate + scalableDimension. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + scalableDimensionSelector: + description: Selector for a Target in appautoscaling to populate + scalableDimension. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceNamespace: + description: 'AWS service namespace of the scalable target. Documentation + can be found in the ServiceNamespace parameter at: AWS Application + Auto Scaling API Reference' + type: string + serviceNamespaceRef: + description: Reference to a Target in appautoscaling to populate + serviceNamespace. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceNamespaceSelector: + description: Selector for a Target in appautoscaling to populate + serviceNamespace. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + stepScalingPolicyConfiguration: + description: Step scaling policy configuration, requires policy_type + = "StepScaling" (default). See supported fields below. + properties: + adjustmentType: + description: Whether the adjustment is an absolute number + or a percentage of the current capacity. Valid values are + ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity. + type: string + cooldown: + description: Amount of time, in seconds, after a scaling activity + completes and before the next scaling activity can start. + type: number + metricAggregationType: + description: Aggregation type for the policy's metrics. Valid + values are "Minimum", "Maximum", and "Average". Without + a value, AWS will treat the aggregation type as "Average". + type: string + minAdjustmentMagnitude: + description: Minimum number to adjust your scalable dimension + as a result of a scaling activity. If the adjustment type + is PercentChangeInCapacity, the scaling policy changes the + scalable dimension of the scalable target by this amount. + type: number + stepAdjustment: + description: 'Set of adjustments that manage scaling. These + have the following structure:' + items: + properties: + metricIntervalLowerBound: + description: Lower bound for the difference between + the alarm threshold and the CloudWatch metric. Without + a value, AWS will treat this bound as negative infinity. + type: string + metricIntervalUpperBound: + description: Upper bound for the difference between + the alarm threshold and the CloudWatch metric. Without + a value, AWS will treat this bound as infinity. The + upper bound must be greater than the lower bound. + type: string + scalingAdjustment: + description: Number of members by which to scale, when + the adjustment bounds are breached. A positive value + scales up. A negative value scales down. + type: number + type: object + type: array + type: object + targetTrackingScalingPolicyConfiguration: + description: Target tracking policy, requires policy_type = "TargetTrackingScaling". + See supported fields below. + properties: + customizedMetricSpecification: + description: 'Custom CloudWatch metric. Documentation can + be found at: AWS Customized Metric Specification. See supported + fields below.' + properties: + dimensions: + description: Configuration block(s) with the dimensions + of the metric if the metric was published with dimensions. + Detailed below. + items: + properties: + name: + description: Name of the dimension. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + metrics: + description: Metrics to include, as a metric data query. + items: + properties: + expression: + description: Math expression used on the returned + metric. You must specify either expression or + metric_stat, but not both. + type: string + id: + description: Short name for the metric used in target + tracking scaling policy. + type: string + label: + description: Human-readable label for this metric + or expression. + type: string + metricStat: + description: Structure that defines CloudWatch metric + to be used in target tracking scaling policy. + You must specify either expression or metric_stat, + but not both. + properties: + metric: + description: Structure that defines the CloudWatch + metric to return, including the metric name, + namespace, and dimensions. + properties: + dimensions: + description: Configuration block(s) with + the dimensions of the metric if the metric + was published with dimensions. Detailed + below. + items: + properties: + name: + description: Name of the dimension. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + type: object + stat: + description: Statistic of the metrics to return. + type: string + unit: + description: Unit of the metric. + type: string + type: object + returnData: + description: Boolean that indicates whether to return + the timestamps and raw data values of this metric, + the default is true + type: boolean + type: object + type: array + namespace: + description: Namespace of the metric. + type: string + statistic: + description: 'Statistic of the metric. Valid values: Average, + Minimum, Maximum, SampleCount, and Sum.' + type: string + unit: + description: Unit of the metric. + type: string + type: object + disableScaleIn: + description: Whether scale in by the target tracking policy + is disabled. If the value is true, scale in is disabled + and the target tracking policy won't remove capacity from + the scalable resource. Otherwise, scale in is enabled and + the target tracking policy can remove capacity from the + scalable resource. The default value is false. + type: boolean + predefinedMetricSpecification: + description: Predefined metric. See supported fields below. + properties: + predefinedMetricType: + description: Metric type. + type: string + resourceLabel: + description: 'Reserved for future use if the predefined_metric_type + is not ALBRequestCountPerTarget. If the predefined_metric_type + is ALBRequestCountPerTarget, you must specify this argument. + Documentation can be found at: AWS Predefined Scaling + Metric Specification. Must be less than or equal to + 1023 characters in length.' + type: string + type: object + scaleInCooldown: + description: Amount of time, in seconds, after a scale in + activity completes before another scale in activity can + start. + type: number + scaleOutCooldown: + description: Amount of time, in seconds, after a scale out + activity completes before another scale out activity can + start. + type: number + targetValue: + description: Target value for the metric. + type: number + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + policyType: + description: Policy type. Valid values are StepScaling and TargetTrackingScaling. + Defaults to StepScaling. Certain services only support only + one policy type. For more information see the Target Tracking + Scaling Policies and Step Scaling Policies documentation. + type: string + stepScalingPolicyConfiguration: + description: Step scaling policy configuration, requires policy_type + = "StepScaling" (default). See supported fields below. + properties: + adjustmentType: + description: Whether the adjustment is an absolute number + or a percentage of the current capacity. Valid values are + ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity. + type: string + cooldown: + description: Amount of time, in seconds, after a scaling activity + completes and before the next scaling activity can start. + type: number + metricAggregationType: + description: Aggregation type for the policy's metrics. Valid + values are "Minimum", "Maximum", and "Average". Without + a value, AWS will treat the aggregation type as "Average". + type: string + minAdjustmentMagnitude: + description: Minimum number to adjust your scalable dimension + as a result of a scaling activity. If the adjustment type + is PercentChangeInCapacity, the scaling policy changes the + scalable dimension of the scalable target by this amount. + type: number + stepAdjustment: + description: 'Set of adjustments that manage scaling. These + have the following structure:' + items: + properties: + metricIntervalLowerBound: + description: Lower bound for the difference between + the alarm threshold and the CloudWatch metric. Without + a value, AWS will treat this bound as negative infinity. + type: string + metricIntervalUpperBound: + description: Upper bound for the difference between + the alarm threshold and the CloudWatch metric. Without + a value, AWS will treat this bound as infinity. The + upper bound must be greater than the lower bound. + type: string + scalingAdjustment: + description: Number of members by which to scale, when + the adjustment bounds are breached. A positive value + scales up. A negative value scales down. + type: number + type: object + type: array + type: object + targetTrackingScalingPolicyConfiguration: + description: Target tracking policy, requires policy_type = "TargetTrackingScaling". + See supported fields below. + properties: + customizedMetricSpecification: + description: 'Custom CloudWatch metric. Documentation can + be found at: AWS Customized Metric Specification. See supported + fields below.' + properties: + dimensions: + description: Configuration block(s) with the dimensions + of the metric if the metric was published with dimensions. + Detailed below. + items: + properties: + name: + description: Name of the dimension. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + metrics: + description: Metrics to include, as a metric data query. + items: + properties: + expression: + description: Math expression used on the returned + metric. You must specify either expression or + metric_stat, but not both. + type: string + id: + description: Short name for the metric used in target + tracking scaling policy. + type: string + label: + description: Human-readable label for this metric + or expression. + type: string + metricStat: + description: Structure that defines CloudWatch metric + to be used in target tracking scaling policy. + You must specify either expression or metric_stat, + but not both. + properties: + metric: + description: Structure that defines the CloudWatch + metric to return, including the metric name, + namespace, and dimensions. + properties: + dimensions: + description: Configuration block(s) with + the dimensions of the metric if the metric + was published with dimensions. Detailed + below. + items: + properties: + name: + description: Name of the dimension. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + type: object + stat: + description: Statistic of the metrics to return. + type: string + unit: + description: Unit of the metric. + type: string + type: object + returnData: + description: Boolean that indicates whether to return + the timestamps and raw data values of this metric, + the default is true + type: boolean + type: object + type: array + namespace: + description: Namespace of the metric. + type: string + statistic: + description: 'Statistic of the metric. Valid values: Average, + Minimum, Maximum, SampleCount, and Sum.' + type: string + unit: + description: Unit of the metric. + type: string + type: object + disableScaleIn: + description: Whether scale in by the target tracking policy + is disabled. If the value is true, scale in is disabled + and the target tracking policy won't remove capacity from + the scalable resource. Otherwise, scale in is enabled and + the target tracking policy can remove capacity from the + scalable resource. The default value is false. + type: boolean + predefinedMetricSpecification: + description: Predefined metric. See supported fields below. + properties: + predefinedMetricType: + description: Metric type. + type: string + resourceLabel: + description: 'Reserved for future use if the predefined_metric_type + is not ALBRequestCountPerTarget. If the predefined_metric_type + is ALBRequestCountPerTarget, you must specify this argument. + Documentation can be found at: AWS Predefined Scaling + Metric Specification. Must be less than or equal to + 1023 characters in length.' + type: string + type: object + scaleInCooldown: + description: Amount of time, in seconds, after a scale in + activity completes before another scale in activity can + start. + type: number + scaleOutCooldown: + description: Amount of time, in seconds, after a scale out + activity completes before another scale out activity can + start. + type: number + targetValue: + description: Target value for the metric. + type: number + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: PolicyStatus defines the observed state of Policy. + properties: + atProvider: + properties: + alarmArns: + description: List of CloudWatch alarm ARNs associated with the + scaling policy. + items: + type: string + type: array + arn: + description: ARN assigned by AWS to the scaling policy. + type: string + id: + description: Short name for the metric used in target tracking + scaling policy. + type: string + policyType: + description: Policy type. Valid values are StepScaling and TargetTrackingScaling. + Defaults to StepScaling. Certain services only support only + one policy type. For more information see the Target Tracking + Scaling Policies and Step Scaling Policies documentation. + type: string + resourceId: + description: 'Resource type and unique identifier string for the + resource associated with the scaling policy. Documentation can + be found in the ResourceId parameter at: AWS Application Auto + Scaling API Reference' + type: string + scalableDimension: + description: 'Scalable dimension of the scalable target. Documentation + can be found in the ScalableDimension parameter at: AWS Application + Auto Scaling API Reference' + type: string + serviceNamespace: + description: 'AWS service namespace of the scalable target. Documentation + can be found in the ServiceNamespace parameter at: AWS Application + Auto Scaling API Reference' + type: string + stepScalingPolicyConfiguration: + description: Step scaling policy configuration, requires policy_type + = "StepScaling" (default). See supported fields below. + properties: + adjustmentType: + description: Whether the adjustment is an absolute number + or a percentage of the current capacity. Valid values are + ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity. + type: string + cooldown: + description: Amount of time, in seconds, after a scaling activity + completes and before the next scaling activity can start. + type: number + metricAggregationType: + description: Aggregation type for the policy's metrics. Valid + values are "Minimum", "Maximum", and "Average". Without + a value, AWS will treat the aggregation type as "Average". + type: string + minAdjustmentMagnitude: + description: Minimum number to adjust your scalable dimension + as a result of a scaling activity. If the adjustment type + is PercentChangeInCapacity, the scaling policy changes the + scalable dimension of the scalable target by this amount. + type: number + stepAdjustment: + description: 'Set of adjustments that manage scaling. These + have the following structure:' + items: + properties: + metricIntervalLowerBound: + description: Lower bound for the difference between + the alarm threshold and the CloudWatch metric. Without + a value, AWS will treat this bound as negative infinity. + type: string + metricIntervalUpperBound: + description: Upper bound for the difference between + the alarm threshold and the CloudWatch metric. Without + a value, AWS will treat this bound as infinity. The + upper bound must be greater than the lower bound. + type: string + scalingAdjustment: + description: Number of members by which to scale, when + the adjustment bounds are breached. A positive value + scales up. A negative value scales down. + type: number + type: object + type: array + type: object + targetTrackingScalingPolicyConfiguration: + description: Target tracking policy, requires policy_type = "TargetTrackingScaling". + See supported fields below. + properties: + customizedMetricSpecification: + description: 'Custom CloudWatch metric. Documentation can + be found at: AWS Customized Metric Specification. See supported + fields below.' + properties: + dimensions: + description: Configuration block(s) with the dimensions + of the metric if the metric was published with dimensions. + Detailed below. + items: + properties: + name: + description: Name of the dimension. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + metrics: + description: Metrics to include, as a metric data query. + items: + properties: + expression: + description: Math expression used on the returned + metric. You must specify either expression or + metric_stat, but not both. + type: string + id: + description: Short name for the metric used in target + tracking scaling policy. + type: string + label: + description: Human-readable label for this metric + or expression. + type: string + metricStat: + description: Structure that defines CloudWatch metric + to be used in target tracking scaling policy. + You must specify either expression or metric_stat, + but not both. + properties: + metric: + description: Structure that defines the CloudWatch + metric to return, including the metric name, + namespace, and dimensions. + properties: + dimensions: + description: Configuration block(s) with + the dimensions of the metric if the metric + was published with dimensions. Detailed + below. + items: + properties: + name: + description: Name of the dimension. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + type: object + stat: + description: Statistic of the metrics to return. + type: string + unit: + description: Unit of the metric. + type: string + type: object + returnData: + description: Boolean that indicates whether to return + the timestamps and raw data values of this metric, + the default is true + type: boolean + type: object + type: array + namespace: + description: Namespace of the metric. + type: string + statistic: + description: 'Statistic of the metric. Valid values: Average, + Minimum, Maximum, SampleCount, and Sum.' + type: string + unit: + description: Unit of the metric. + type: string + type: object + disableScaleIn: + description: Whether scale in by the target tracking policy + is disabled. If the value is true, scale in is disabled + and the target tracking policy won't remove capacity from + the scalable resource. Otherwise, scale in is enabled and + the target tracking policy can remove capacity from the + scalable resource. The default value is false. + type: boolean + predefinedMetricSpecification: + description: Predefined metric. See supported fields below. + properties: + predefinedMetricType: + description: Metric type. + type: string + resourceLabel: + description: 'Reserved for future use if the predefined_metric_type + is not ALBRequestCountPerTarget. If the predefined_metric_type + is ALBRequestCountPerTarget, you must specify this argument. + Documentation can be found at: AWS Predefined Scaling + Metric Specification. Must be less than or equal to + 1023 characters in length.' + type: string + type: object + scaleInCooldown: + description: Amount of time, in seconds, after a scale in + activity completes before another scale in activity can + start. + type: number + scaleOutCooldown: + description: Amount of time, in seconds, after a scale out + activity completes before another scale out activity can + start. + type: number + targetValue: + description: Target value for the metric. + type: number + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appautoscaling.aws.upbound.io_scheduledactions.yaml b/package/crds/appautoscaling.aws.upbound.io_scheduledactions.yaml index 05fb79c8a7..cb8d058746 100644 --- a/package/crds/appautoscaling.aws.upbound.io_scheduledactions.yaml +++ b/package/crds/appautoscaling.aws.upbound.io_scheduledactions.yaml @@ -979,3 +979,958 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ScheduledAction is the Schema for the ScheduledActions API. Provides + an Application AutoScaling ScheduledAction resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ScheduledActionSpec defines the desired state of ScheduledAction + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + endTime: + description: Date and time for the scheduled action to end in + RFC 3339 format. The timezone is not affected by the setting + of timezone. + type: string + name: + description: Name of the scheduled action. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + resourceId: + description: 'Identifier of the resource associated with the scheduled + action. Documentation can be found in the ResourceId parameter + at: AWS Application Auto Scaling API Reference' + type: string + resourceIdRef: + description: Reference to a Target in appautoscaling to populate + resourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceIdSelector: + description: Selector for a Target in appautoscaling to populate + resourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + scalableDimension: + description: 'Scalable dimension. Documentation can be found in + the ScalableDimension parameter at: AWS Application Auto Scaling + API Reference Example: ecs:service:DesiredCount' + type: string + scalableDimensionRef: + description: Reference to a Target in appautoscaling to populate + scalableDimension. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + scalableDimensionSelector: + description: Selector for a Target in appautoscaling to populate + scalableDimension. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + scalableTargetAction: + description: New minimum and maximum capacity. You can set both + values or just one. See below + properties: + maxCapacity: + description: Maximum capacity. At least one of max_capacity + or min_capacity must be set. + type: string + minCapacity: + description: Minimum capacity. At least one of min_capacity + or max_capacity must be set. + type: string + type: object + schedule: + description: 'Schedule for this action. The following formats + are supported: At expressions - at(yyyy-mm-ddThh:mm:ss), Rate + expressions - rate(valueunit), Cron expressions - cron(fields). + Times for at expressions and cron expressions are evaluated + using the time zone configured in timezone. Documentation can + be found in the Timezone parameter at: AWS Application Auto + Scaling API Reference' + type: string + serviceNamespace: + description: 'Namespace of the AWS service. Documentation can + be found in the ServiceNamespace parameter at: AWS Application + Auto Scaling API Reference Example: ecs' + type: string + serviceNamespaceRef: + description: Reference to a Target in appautoscaling to populate + serviceNamespace. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceNamespaceSelector: + description: Selector for a Target in appautoscaling to populate + serviceNamespace. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + startTime: + description: Date and time for the scheduled action to start in + RFC 3339 format. The timezone is not affected by the setting + of timezone. + type: string + timezone: + description: Time zone used when setting a scheduled action by + using an at or cron expression. Does not affect timezone for + start_time and end_time. Valid values are the canonical names + of the IANA time zones supported by Joda-Time, such as Etc/GMT+9 + or Pacific/Tahiti. Default is UTC. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + endTime: + description: Date and time for the scheduled action to end in + RFC 3339 format. The timezone is not affected by the setting + of timezone. + type: string + name: + description: Name of the scheduled action. + type: string + resourceId: + description: 'Identifier of the resource associated with the scheduled + action. Documentation can be found in the ResourceId parameter + at: AWS Application Auto Scaling API Reference' + type: string + resourceIdRef: + description: Reference to a Target in appautoscaling to populate + resourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceIdSelector: + description: Selector for a Target in appautoscaling to populate + resourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + scalableDimension: + description: 'Scalable dimension. Documentation can be found in + the ScalableDimension parameter at: AWS Application Auto Scaling + API Reference Example: ecs:service:DesiredCount' + type: string + scalableDimensionRef: + description: Reference to a Target in appautoscaling to populate + scalableDimension. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + scalableDimensionSelector: + description: Selector for a Target in appautoscaling to populate + scalableDimension. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + scalableTargetAction: + description: New minimum and maximum capacity. You can set both + values or just one. See below + properties: + maxCapacity: + description: Maximum capacity. At least one of max_capacity + or min_capacity must be set. + type: string + minCapacity: + description: Minimum capacity. At least one of min_capacity + or max_capacity must be set. + type: string + type: object + schedule: + description: 'Schedule for this action. The following formats + are supported: At expressions - at(yyyy-mm-ddThh:mm:ss), Rate + expressions - rate(valueunit), Cron expressions - cron(fields). + Times for at expressions and cron expressions are evaluated + using the time zone configured in timezone. Documentation can + be found in the Timezone parameter at: AWS Application Auto + Scaling API Reference' + type: string + serviceNamespace: + description: 'Namespace of the AWS service. Documentation can + be found in the ServiceNamespace parameter at: AWS Application + Auto Scaling API Reference Example: ecs' + type: string + serviceNamespaceRef: + description: Reference to a Target in appautoscaling to populate + serviceNamespace. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceNamespaceSelector: + description: Selector for a Target in appautoscaling to populate + serviceNamespace. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + startTime: + description: Date and time for the scheduled action to start in + RFC 3339 format. The timezone is not affected by the setting + of timezone. + type: string + timezone: + description: Time zone used when setting a scheduled action by + using an at or cron expression. Does not affect timezone for + start_time and end_time. Valid values are the canonical names + of the IANA time zones supported by Joda-Time, such as Etc/GMT+9 + or Pacific/Tahiti. Default is UTC. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.scalableTargetAction is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scalableTargetAction) + || (has(self.initProvider) && has(self.initProvider.scalableTargetAction))' + - message: spec.forProvider.schedule is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.schedule) + || (has(self.initProvider) && has(self.initProvider.schedule))' + status: + description: ScheduledActionStatus defines the observed state of ScheduledAction. + properties: + atProvider: + properties: + arn: + description: ARN of the scheduled action. + type: string + endTime: + description: Date and time for the scheduled action to end in + RFC 3339 format. The timezone is not affected by the setting + of timezone. + type: string + id: + type: string + name: + description: Name of the scheduled action. + type: string + resourceId: + description: 'Identifier of the resource associated with the scheduled + action. Documentation can be found in the ResourceId parameter + at: AWS Application Auto Scaling API Reference' + type: string + scalableDimension: + description: 'Scalable dimension. Documentation can be found in + the ScalableDimension parameter at: AWS Application Auto Scaling + API Reference Example: ecs:service:DesiredCount' + type: string + scalableTargetAction: + description: New minimum and maximum capacity. You can set both + values or just one. See below + properties: + maxCapacity: + description: Maximum capacity. At least one of max_capacity + or min_capacity must be set. + type: string + minCapacity: + description: Minimum capacity. At least one of min_capacity + or max_capacity must be set. + type: string + type: object + schedule: + description: 'Schedule for this action. The following formats + are supported: At expressions - at(yyyy-mm-ddThh:mm:ss), Rate + expressions - rate(valueunit), Cron expressions - cron(fields). + Times for at expressions and cron expressions are evaluated + using the time zone configured in timezone. Documentation can + be found in the Timezone parameter at: AWS Application Auto + Scaling API Reference' + type: string + serviceNamespace: + description: 'Namespace of the AWS service. Documentation can + be found in the ServiceNamespace parameter at: AWS Application + Auto Scaling API Reference Example: ecs' + type: string + startTime: + description: Date and time for the scheduled action to start in + RFC 3339 format. The timezone is not affected by the setting + of timezone. + type: string + timezone: + description: Time zone used when setting a scheduled action by + using an at or cron expression. Does not affect timezone for + start_time and end_time. Valid values are the canonical names + of the IANA time zones supported by Joda-Time, such as Etc/GMT+9 + or Pacific/Tahiti. Default is UTC. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appflow.aws.upbound.io_flows.yaml b/package/crds/appflow.aws.upbound.io_flows.yaml index 1c55dc3909..2e0b23a226 100644 --- a/package/crds/appflow.aws.upbound.io_flows.yaml +++ b/package/crds/appflow.aws.upbound.io_flows.yaml @@ -3949,3 +3949,3460 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Flow is the Schema for the Flows API. Provides an AppFlow Flow + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FlowSpec defines the desired state of Flow + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the flow you want to create. + type: string + destinationFlowConfig: + description: A Destination Flow Config that controls how Amazon + AppFlow places data in the destination connector. + items: + properties: + apiVersion: + description: API version that the destination connector + uses. + type: string + connectorProfileName: + description: Name of the connector profile. This name must + be unique for each connector profile in the AWS account. + type: string + connectorType: + description: Type of connector, such as Salesforce, Amplitude, + and so on. Valid values are Salesforce, Singular, Slack, + Redshift, S3, Marketo, Googleanalytics, Zendesk, Servicenow, + Datadog, Trendmicro, Snowflake, Dynatrace, Infornexus, + Amplitude, Veeva, EventBridge, LookoutMetrics, Upsolver, + Honeycode, CustomerProfiles, SAPOData, and CustomConnector. + type: string + destinationConnectorProperties: + description: This stores the information that is required + to query a particular connector. See Destination Connector + Properties for more information. + properties: + customConnector: + description: Properties that are required to query the + custom Connector. See Custom Connector Destination + Properties for more details. + properties: + customProperties: + additionalProperties: + type: string + description: Custom properties that are specific + to the connector when it's used as a destination + in the flow. Maximum of 50 items. + type: object + x-kubernetes-map-type: granular + entityName: + description: Entity specified in the custom connector + as a destination in the flow. + type: string + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + idFieldNames: + description: Name of the field that Amazon AppFlow + uses as an ID when performing a write operation + such as update, delete, or upsert. + items: + type: string + type: array + writeOperationType: + description: Type of write operation to be performed + in the custom connector when it's used as destination. + Valid values are INSERT, UPSERT, UPDATE, and DELETE. + type: string + type: object + customerProfiles: + description: Properties that are required to query Amazon + Connect Customer Profiles. See Customer Profiles Destination + Properties for more details. + properties: + domainName: + description: Unique name of the Amazon Connect Customer + Profiles domain. + type: string + objectTypeName: + description: Object specified in the Amazon Connect + Customer Profiles flow destination. + type: string + type: object + eventBridge: + description: Properties that are required to query Amazon + EventBridge. See Generic Destination Properties for + more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + object: + description: Object specified in the flow destination. + type: string + type: object + honeycode: + description: Properties that are required to query Amazon + Honeycode. See Generic Destination Properties for + more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + object: + description: Object specified in the flow destination. + type: string + type: object + lookoutMetrics: + type: object + marketo: + description: Properties that are required to query Marketo. + See Generic Destination Properties for more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + object: + description: Object specified in the flow destination. + type: string + type: object + redshift: + description: Properties that are required to query Amazon + Redshift. See Redshift Destination Properties for + more details. + properties: + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + intermediateBucketName: + description: Intermediate bucket that Amazon AppFlow + uses when moving data into Amazon Redshift. + type: string + object: + description: Object specified in the flow destination. + type: string + type: object + s3: + description: Properties that are required to query Amazon + S3. See S3 Destination Properties for more details. + properties: + bucketName: + description: Amazon S3 bucket name in which Amazon + AppFlow places the transferred data. + type: string + bucketNameRef: + description: Reference to a BucketPolicy in s3 to + populate bucketName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketNameSelector: + description: Selector for a BucketPolicy in s3 to + populate bucketName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + s3OutputFormatConfig: + description: Configuration that determines how Amazon + AppFlow should format the flow output data when + Amazon S3 is used as the destination. See S3 Output + Format Config for more details. + properties: + aggregationConfig: + description: Aggregation settings that you can + use to customize the output format of your + flow data. See Aggregation Config for more + details. + properties: + aggregationType: + description: Whether Amazon AppFlow aggregates + the flow records into a single file, or + leave them unaggregated. Valid values + are None and SingleFile. + type: string + targetFileSize: + description: The desired file size, in MB, + for each output file that Amazon AppFlow + writes to the flow destination. Integer + value. + type: number + type: object + fileType: + description: File type that Amazon AppFlow places + in the Amazon S3 bucket. Valid values are + CSV, JSON, and PARQUET. + type: string + prefixConfig: + description: Determines the prefix that Amazon + AppFlow applies to the folder name in the + Amazon S3 bucket. You can name folders according + to the flow frequency and date. See Prefix + Config for more details. + properties: + prefixFormat: + description: Determines the level of granularity + that's included in the prefix. Valid values + are YEAR, MONTH, DAY, HOUR, and MINUTE. + type: string + prefixType: + description: Determines the format of the + prefix, and whether it applies to the + file name, file path, or both. Valid values + are FILENAME, PATH, and PATH_AND_FILENAME. + type: string + type: object + preserveSourceDataTyping: + description: Whether the data types from the + source system need to be preserved (Only valid + for Parquet file type) + type: boolean + type: object + type: object + salesforce: + description: Properties that are required to query Salesforce. + See Salesforce Destination Properties for more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + idFieldNames: + description: Name of the field that Amazon AppFlow + uses as an ID when performing a write operation + such as update, delete, or upsert. + items: + type: string + type: array + object: + description: Object specified in the flow destination. + type: string + writeOperationType: + description: Type of write operation to be performed + in the custom connector when it's used as destination. + Valid values are INSERT, UPSERT, UPDATE, and DELETE. + type: string + type: object + sapoData: + description: Properties that are required to query SAPOData. + See SAPOData Destination Properties for more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + idFieldNames: + description: Name of the field that Amazon AppFlow + uses as an ID when performing a write operation + such as update, delete, or upsert. + items: + type: string + type: array + objectPath: + description: Object path specified in the SAPOData + flow destination. + type: string + successResponseHandlingConfig: + description: Determines how Amazon AppFlow handles + the success response that it gets from the connector + after placing data. See Success Response Handling + Config for more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + type: object + writeOperationType: + description: Type of write operation to be performed + in the custom connector when it's used as destination. + Valid values are INSERT, UPSERT, UPDATE, and DELETE. + type: string + type: object + snowflake: + description: Properties that are required to query Snowflake. + See Snowflake Destination Properties for more details. + properties: + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + intermediateBucketName: + description: Intermediate bucket that Amazon AppFlow + uses when moving data into Amazon Redshift. + type: string + object: + description: Object specified in the flow destination. + type: string + type: object + upsolver: + description: Properties that are required to query Upsolver. + See Upsolver Destination Properties for more details. + properties: + bucketName: + description: Amazon S3 bucket name in which Amazon + AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + s3OutputFormatConfig: + description: Configuration that determines how Amazon + AppFlow should format the flow output data when + Amazon S3 is used as the destination. See S3 Output + Format Config for more details. + properties: + aggregationConfig: + description: Aggregation settings that you can + use to customize the output format of your + flow data. See Aggregation Config for more + details. + properties: + aggregationType: + description: Whether Amazon AppFlow aggregates + the flow records into a single file, or + leave them unaggregated. Valid values + are None and SingleFile. + type: string + type: object + fileType: + description: File type that Amazon AppFlow places + in the Amazon S3 bucket. Valid values are + CSV, JSON, and PARQUET. + type: string + prefixConfig: + description: Determines the prefix that Amazon + AppFlow applies to the folder name in the + Amazon S3 bucket. You can name folders according + to the flow frequency and date. See Prefix + Config for more details. + properties: + prefixFormat: + description: Determines the level of granularity + that's included in the prefix. Valid values + are YEAR, MONTH, DAY, HOUR, and MINUTE. + type: string + prefixType: + description: Determines the format of the + prefix, and whether it applies to the + file name, file path, or both. Valid values + are FILENAME, PATH, and PATH_AND_FILENAME. + type: string + type: object + type: object + type: object + zendesk: + description: Properties that are required to query Zendesk. + See Zendesk Destination Properties for more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + idFieldNames: + description: Name of the field that Amazon AppFlow + uses as an ID when performing a write operation + such as update, delete, or upsert. + items: + type: string + type: array + object: + description: Object specified in the flow destination. + type: string + writeOperationType: + description: Type of write operation to be performed + in the custom connector when it's used as destination. + Valid values are INSERT, UPSERT, UPDATE, and DELETE. + type: string + type: object + type: object + type: object + type: array + kmsArn: + description: ARN (Amazon Resource Name) of the Key Management + Service (KMS) key you provide for encryption. This is required + if you do not want to use the Amazon AppFlow-managed KMS key. + If you don't provide anything here, Amazon AppFlow uses the + Amazon AppFlow-managed KMS key. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + sourceFlowConfig: + description: The Source Flow Config that controls how Amazon AppFlow + retrieves data from the source connector. + properties: + apiVersion: + description: API version that the destination connector uses. + type: string + connectorProfileName: + description: Name of the connector profile. This name must + be unique for each connector profile in the AWS account. + type: string + connectorType: + description: Type of connector, such as Salesforce, Amplitude, + and so on. Valid values are Salesforce, Singular, Slack, + Redshift, S3, Marketo, Googleanalytics, Zendesk, Servicenow, + Datadog, Trendmicro, Snowflake, Dynatrace, Infornexus, Amplitude, + Veeva, EventBridge, LookoutMetrics, Upsolver, Honeycode, + CustomerProfiles, SAPOData, and CustomConnector. + type: string + incrementalPullConfig: + description: Defines the configuration for a scheduled incremental + data pull. If a valid configuration is provided, the fields + specified in the configuration are used when querying for + the incremental data pull. See Incremental Pull Config for + more details. + properties: + datetimeTypeFieldName: + description: Field that specifies the date time or timestamp + field as the criteria to use when importing incremental + records from the source. + type: string + type: object + sourceConnectorProperties: + description: Information that is required to query a particular + source connector. See Source Connector Properties for details. + properties: + amplitude: + description: Information that is required for querying + Amplitude. See Generic Source Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + customConnector: + description: Properties that are required to query the + custom Connector. See Custom Connector Destination Properties + for more details. + properties: + customProperties: + additionalProperties: + type: string + description: Custom properties that are specific to + the connector when it's used as a destination in + the flow. Maximum of 50 items. + type: object + x-kubernetes-map-type: granular + entityName: + description: Entity specified in the custom connector + as a destination in the flow. + type: string + type: object + datadog: + description: Information that is required for querying + Datadog. See Generic Source Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + dynatrace: + description: Operation to be performed on the provided + Dynatrace source fields. Valid values are PROJECTION, + BETWEEN, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, + SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, + VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, + and NO_OP. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + googleAnalytics: + description: Operation to be performed on the provided + Google Analytics source fields. Valid values are PROJECTION + and BETWEEN. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + inforNexus: + description: Information that is required for querying + Infor Nexus. See Generic Source Properties for more + details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + marketo: + description: Properties that are required to query Marketo. + See Generic Destination Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + s3: + description: Properties that are required to query Amazon + S3. See S3 Destination Properties for more details. + properties: + bucketName: + description: Amazon S3 bucket name in which Amazon + AppFlow places the transferred data. + type: string + bucketNameRef: + description: Reference to a BucketPolicy in s3 to + populate bucketName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketNameSelector: + description: Selector for a BucketPolicy in s3 to + populate bucketName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bucketPrefix: + description: Object key for the bucket in which Amazon + AppFlow places the destination files. + type: string + s3InputFormatConfig: + description: When you use Amazon S3 as the source, + the configuration format that you provide the flow + input data. See S3 Input Format Config for details. + properties: + s3InputFileType: + description: File type that Amazon AppFlow gets + from your Amazon S3 bucket. Valid values are + CSV and JSON. + type: string + type: object + type: object + salesforce: + description: Properties that are required to query Salesforce. + See Salesforce Destination Properties for more details. + properties: + enableDynamicFieldUpdate: + description: Flag that enables dynamic fetching of + new (recently added) fields in the Salesforce objects + while running a flow. + type: boolean + includeDeletedRecords: + description: Whether Amazon AppFlow includes deleted + files in the flow run. + type: boolean + object: + description: Object specified in the flow destination. + type: string + type: object + sapoData: + description: Properties that are required to query SAPOData. + See SAPOData Destination Properties for more details. + properties: + objectPath: + description: Object path specified in the SAPOData + flow destination. + type: string + type: object + serviceNow: + description: Information that is required for querying + ServiceNow. See Generic Source Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + singular: + description: Information that is required for querying + Singular. See Generic Source Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + slack: + description: Information that is required for querying + Slack. See Generic Source Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + trendmicro: + description: Operation to be performed on the provided + Trend Micro source fields. Valid values are PROJECTION, + EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, + MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, + VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, + and NO_OP. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + veeva: + description: Information that is required for querying + Veeva. See Veeva Source Properties for more details. + properties: + documentType: + description: Document type specified in the Veeva + document extract flow. + type: string + includeAllVersions: + description: Boolean value to include All Versions + of files in Veeva document extract flow. + type: boolean + includeRenditions: + description: Boolean value to include file renditions + in Veeva document extract flow. + type: boolean + includeSourceFiles: + description: Boolean value to include source files + in Veeva document extract flow. + type: boolean + object: + description: Object specified in the flow destination. + type: string + type: object + zendesk: + description: Properties that are required to query Zendesk. + See Zendesk Destination Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + task: + description: A Task that Amazon AppFlow performs while transferring + the data in the flow run. + items: + properties: + connectorOperator: + description: Operation to be performed on the provided source + fields. See Connector Operator for details. + items: + properties: + amplitude: + description: Information that is required for querying + Amplitude. See Generic Source Properties for more + details. + type: string + customConnector: + description: Properties that are required to query + the custom Connector. See Custom Connector Destination + Properties for more details. + type: string + datadog: + description: Information that is required for querying + Datadog. See Generic Source Properties for more + details. + type: string + dynatrace: + description: Operation to be performed on the provided + Dynatrace source fields. Valid values are PROJECTION, + BETWEEN, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, + SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, + VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, + VALIDATE_NUMERIC, and NO_OP. + type: string + googleAnalytics: + description: Operation to be performed on the provided + Google Analytics source fields. Valid values are + PROJECTION and BETWEEN. + type: string + inforNexus: + description: Information that is required for querying + Infor Nexus. See Generic Source Properties for more + details. + type: string + marketo: + description: Properties that are required to query + Marketo. See Generic Destination Properties for + more details. + type: string + s3: + description: Properties that are required to query + Amazon S3. See S3 Destination Properties for more + details. + type: string + salesforce: + description: Properties that are required to query + Salesforce. See Salesforce Destination Properties + for more details. + type: string + sapoData: + description: Properties that are required to query + SAPOData. See SAPOData Destination Properties for + more details. + type: string + serviceNow: + description: Information that is required for querying + ServiceNow. See Generic Source Properties for more + details. + type: string + singular: + description: Information that is required for querying + Singular. See Generic Source Properties for more + details. + type: string + slack: + description: Information that is required for querying + Slack. See Generic Source Properties for more details. + type: string + trendmicro: + description: Operation to be performed on the provided + Trend Micro source fields. Valid values are PROJECTION, + EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, + MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, + VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, + and NO_OP. + type: string + veeva: + description: Information that is required for querying + Veeva. See Veeva Source Properties for more details. + type: string + zendesk: + description: Properties that are required to query + Zendesk. See Zendesk Destination Properties for + more details. + type: string + type: object + type: array + destinationField: + description: Field in a destination connector, or a field + value against which Amazon AppFlow validates a source + field. + type: string + sourceFields: + description: Source fields to which a particular task is + applied. + items: + type: string + type: array + taskProperties: + additionalProperties: + type: string + description: Map used to store task-related information. + The execution service looks for particular information + based on the TaskType. Valid keys are VALUE, VALUES, DATA_TYPE, + UPPER_BOUND, LOWER_BOUND, SOURCE_DATA_TYPE, DESTINATION_DATA_TYPE, + VALIDATION_ACTION, MASK_VALUE, MASK_LENGTH, TRUNCATE_LENGTH, + MATH_OPERATION_FIELDS_ORDER, CONCAT_FORMAT, SUBFIELD_CATEGORY_MAP, + and EXCLUDE_SOURCE_FIELDS_LIST. + type: object + x-kubernetes-map-type: granular + taskType: + description: Particular task implementation that Amazon + AppFlow performs. Valid values are Arithmetic, Filter, + Map, Map_all, Mask, Merge, Passthrough, Truncate, and + Validate. + type: string + type: object + type: array + triggerConfig: + description: A Trigger that determine how and when the flow runs. + properties: + triggerProperties: + description: Configuration details of a schedule-triggered + flow as defined by the user. Currently, these settings only + apply to the Scheduled trigger type. See Scheduled Trigger + Properties for details. + properties: + scheduled: + properties: + dataPullMode: + description: Whether a scheduled flow has an incremental + data transfer or a complete data transfer for each + flow run. Valid values are Incremental and Complete. + type: string + firstExecutionFrom: + description: Date range for the records to import + from the connector in the first flow run. Must be + a valid RFC3339 timestamp. + type: string + scheduleEndTime: + description: Scheduled end time for a schedule-triggered + flow. Must be a valid RFC3339 timestamp. + type: string + scheduleExpression: + description: Scheduling expression that determines + the rate at which the schedule will run, for example + rate(5minutes). + type: string + scheduleOffset: + description: Optional offset that is added to the + time interval for a schedule-triggered flow. Maximum + value of 36000. + type: number + scheduleStartTime: + description: Scheduled start time for a schedule-triggered + flow. Must be a valid RFC3339 timestamp. + type: string + timezone: + description: Time zone used when referring to the + date and time of a scheduled-triggered flow, such + as America/New_York. + type: string + type: object + type: object + triggerType: + description: Type of flow trigger. Valid values are Scheduled, + Event, and OnDemand. + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the flow you want to create. + type: string + destinationFlowConfig: + description: A Destination Flow Config that controls how Amazon + AppFlow places data in the destination connector. + items: + properties: + apiVersion: + description: API version that the destination connector + uses. + type: string + connectorProfileName: + description: Name of the connector profile. This name must + be unique for each connector profile in the AWS account. + type: string + connectorType: + description: Type of connector, such as Salesforce, Amplitude, + and so on. Valid values are Salesforce, Singular, Slack, + Redshift, S3, Marketo, Googleanalytics, Zendesk, Servicenow, + Datadog, Trendmicro, Snowflake, Dynatrace, Infornexus, + Amplitude, Veeva, EventBridge, LookoutMetrics, Upsolver, + Honeycode, CustomerProfiles, SAPOData, and CustomConnector. + type: string + destinationConnectorProperties: + description: This stores the information that is required + to query a particular connector. See Destination Connector + Properties for more information. + properties: + customConnector: + description: Properties that are required to query the + custom Connector. See Custom Connector Destination + Properties for more details. + properties: + customProperties: + additionalProperties: + type: string + description: Custom properties that are specific + to the connector when it's used as a destination + in the flow. Maximum of 50 items. + type: object + x-kubernetes-map-type: granular + entityName: + description: Entity specified in the custom connector + as a destination in the flow. + type: string + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + idFieldNames: + description: Name of the field that Amazon AppFlow + uses as an ID when performing a write operation + such as update, delete, or upsert. + items: + type: string + type: array + writeOperationType: + description: Type of write operation to be performed + in the custom connector when it's used as destination. + Valid values are INSERT, UPSERT, UPDATE, and DELETE. + type: string + type: object + customerProfiles: + description: Properties that are required to query Amazon + Connect Customer Profiles. See Customer Profiles Destination + Properties for more details. + properties: + domainName: + description: Unique name of the Amazon Connect Customer + Profiles domain. + type: string + objectTypeName: + description: Object specified in the Amazon Connect + Customer Profiles flow destination. + type: string + type: object + eventBridge: + description: Properties that are required to query Amazon + EventBridge. See Generic Destination Properties for + more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + object: + description: Object specified in the flow destination. + type: string + type: object + honeycode: + description: Properties that are required to query Amazon + Honeycode. See Generic Destination Properties for + more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + object: + description: Object specified in the flow destination. + type: string + type: object + lookoutMetrics: + type: object + marketo: + description: Properties that are required to query Marketo. + See Generic Destination Properties for more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + object: + description: Object specified in the flow destination. + type: string + type: object + redshift: + description: Properties that are required to query Amazon + Redshift. See Redshift Destination Properties for + more details. + properties: + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + intermediateBucketName: + description: Intermediate bucket that Amazon AppFlow + uses when moving data into Amazon Redshift. + type: string + object: + description: Object specified in the flow destination. + type: string + type: object + s3: + description: Properties that are required to query Amazon + S3. See S3 Destination Properties for more details. + properties: + bucketName: + description: Amazon S3 bucket name in which Amazon + AppFlow places the transferred data. + type: string + bucketNameRef: + description: Reference to a BucketPolicy in s3 to + populate bucketName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketNameSelector: + description: Selector for a BucketPolicy in s3 to + populate bucketName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + s3OutputFormatConfig: + description: Configuration that determines how Amazon + AppFlow should format the flow output data when + Amazon S3 is used as the destination. See S3 Output + Format Config for more details. + properties: + aggregationConfig: + description: Aggregation settings that you can + use to customize the output format of your + flow data. See Aggregation Config for more + details. + properties: + aggregationType: + description: Whether Amazon AppFlow aggregates + the flow records into a single file, or + leave them unaggregated. Valid values + are None and SingleFile. + type: string + targetFileSize: + description: The desired file size, in MB, + for each output file that Amazon AppFlow + writes to the flow destination. Integer + value. + type: number + type: object + fileType: + description: File type that Amazon AppFlow places + in the Amazon S3 bucket. Valid values are + CSV, JSON, and PARQUET. + type: string + prefixConfig: + description: Determines the prefix that Amazon + AppFlow applies to the folder name in the + Amazon S3 bucket. You can name folders according + to the flow frequency and date. See Prefix + Config for more details. + properties: + prefixFormat: + description: Determines the level of granularity + that's included in the prefix. Valid values + are YEAR, MONTH, DAY, HOUR, and MINUTE. + type: string + prefixType: + description: Determines the format of the + prefix, and whether it applies to the + file name, file path, or both. Valid values + are FILENAME, PATH, and PATH_AND_FILENAME. + type: string + type: object + preserveSourceDataTyping: + description: Whether the data types from the + source system need to be preserved (Only valid + for Parquet file type) + type: boolean + type: object + type: object + salesforce: + description: Properties that are required to query Salesforce. + See Salesforce Destination Properties for more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + idFieldNames: + description: Name of the field that Amazon AppFlow + uses as an ID when performing a write operation + such as update, delete, or upsert. + items: + type: string + type: array + object: + description: Object specified in the flow destination. + type: string + writeOperationType: + description: Type of write operation to be performed + in the custom connector when it's used as destination. + Valid values are INSERT, UPSERT, UPDATE, and DELETE. + type: string + type: object + sapoData: + description: Properties that are required to query SAPOData. + See SAPOData Destination Properties for more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + idFieldNames: + description: Name of the field that Amazon AppFlow + uses as an ID when performing a write operation + such as update, delete, or upsert. + items: + type: string + type: array + objectPath: + description: Object path specified in the SAPOData + flow destination. + type: string + successResponseHandlingConfig: + description: Determines how Amazon AppFlow handles + the success response that it gets from the connector + after placing data. See Success Response Handling + Config for more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + type: object + writeOperationType: + description: Type of write operation to be performed + in the custom connector when it's used as destination. + Valid values are INSERT, UPSERT, UPDATE, and DELETE. + type: string + type: object + snowflake: + description: Properties that are required to query Snowflake. + See Snowflake Destination Properties for more details. + properties: + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + intermediateBucketName: + description: Intermediate bucket that Amazon AppFlow + uses when moving data into Amazon Redshift. + type: string + object: + description: Object specified in the flow destination. + type: string + type: object + upsolver: + description: Properties that are required to query Upsolver. + See Upsolver Destination Properties for more details. + properties: + bucketName: + description: Amazon S3 bucket name in which Amazon + AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + s3OutputFormatConfig: + description: Configuration that determines how Amazon + AppFlow should format the flow output data when + Amazon S3 is used as the destination. See S3 Output + Format Config for more details. + properties: + aggregationConfig: + description: Aggregation settings that you can + use to customize the output format of your + flow data. See Aggregation Config for more + details. + properties: + aggregationType: + description: Whether Amazon AppFlow aggregates + the flow records into a single file, or + leave them unaggregated. Valid values + are None and SingleFile. + type: string + type: object + fileType: + description: File type that Amazon AppFlow places + in the Amazon S3 bucket. Valid values are + CSV, JSON, and PARQUET. + type: string + prefixConfig: + description: Determines the prefix that Amazon + AppFlow applies to the folder name in the + Amazon S3 bucket. You can name folders according + to the flow frequency and date. See Prefix + Config for more details. + properties: + prefixFormat: + description: Determines the level of granularity + that's included in the prefix. Valid values + are YEAR, MONTH, DAY, HOUR, and MINUTE. + type: string + prefixType: + description: Determines the format of the + prefix, and whether it applies to the + file name, file path, or both. Valid values + are FILENAME, PATH, and PATH_AND_FILENAME. + type: string + type: object + type: object + type: object + zendesk: + description: Properties that are required to query Zendesk. + See Zendesk Destination Properties for more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + idFieldNames: + description: Name of the field that Amazon AppFlow + uses as an ID when performing a write operation + such as update, delete, or upsert. + items: + type: string + type: array + object: + description: Object specified in the flow destination. + type: string + writeOperationType: + description: Type of write operation to be performed + in the custom connector when it's used as destination. + Valid values are INSERT, UPSERT, UPDATE, and DELETE. + type: string + type: object + type: object + type: object + type: array + kmsArn: + description: ARN (Amazon Resource Name) of the Key Management + Service (KMS) key you provide for encryption. This is required + if you do not want to use the Amazon AppFlow-managed KMS key. + If you don't provide anything here, Amazon AppFlow uses the + Amazon AppFlow-managed KMS key. + type: string + sourceFlowConfig: + description: The Source Flow Config that controls how Amazon AppFlow + retrieves data from the source connector. + properties: + apiVersion: + description: API version that the destination connector uses. + type: string + connectorProfileName: + description: Name of the connector profile. This name must + be unique for each connector profile in the AWS account. + type: string + connectorType: + description: Type of connector, such as Salesforce, Amplitude, + and so on. Valid values are Salesforce, Singular, Slack, + Redshift, S3, Marketo, Googleanalytics, Zendesk, Servicenow, + Datadog, Trendmicro, Snowflake, Dynatrace, Infornexus, Amplitude, + Veeva, EventBridge, LookoutMetrics, Upsolver, Honeycode, + CustomerProfiles, SAPOData, and CustomConnector. + type: string + incrementalPullConfig: + description: Defines the configuration for a scheduled incremental + data pull. If a valid configuration is provided, the fields + specified in the configuration are used when querying for + the incremental data pull. See Incremental Pull Config for + more details. + properties: + datetimeTypeFieldName: + description: Field that specifies the date time or timestamp + field as the criteria to use when importing incremental + records from the source. + type: string + type: object + sourceConnectorProperties: + description: Information that is required to query a particular + source connector. See Source Connector Properties for details. + properties: + amplitude: + description: Information that is required for querying + Amplitude. See Generic Source Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + customConnector: + description: Properties that are required to query the + custom Connector. See Custom Connector Destination Properties + for more details. + properties: + customProperties: + additionalProperties: + type: string + description: Custom properties that are specific to + the connector when it's used as a destination in + the flow. Maximum of 50 items. + type: object + x-kubernetes-map-type: granular + entityName: + description: Entity specified in the custom connector + as a destination in the flow. + type: string + type: object + datadog: + description: Information that is required for querying + Datadog. See Generic Source Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + dynatrace: + description: Operation to be performed on the provided + Dynatrace source fields. Valid values are PROJECTION, + BETWEEN, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, + SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, + VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, + and NO_OP. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + googleAnalytics: + description: Operation to be performed on the provided + Google Analytics source fields. Valid values are PROJECTION + and BETWEEN. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + inforNexus: + description: Information that is required for querying + Infor Nexus. See Generic Source Properties for more + details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + marketo: + description: Properties that are required to query Marketo. + See Generic Destination Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + s3: + description: Properties that are required to query Amazon + S3. See S3 Destination Properties for more details. + properties: + bucketName: + description: Amazon S3 bucket name in which Amazon + AppFlow places the transferred data. + type: string + bucketNameRef: + description: Reference to a BucketPolicy in s3 to + populate bucketName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketNameSelector: + description: Selector for a BucketPolicy in s3 to + populate bucketName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bucketPrefix: + description: Object key for the bucket in which Amazon + AppFlow places the destination files. + type: string + s3InputFormatConfig: + description: When you use Amazon S3 as the source, + the configuration format that you provide the flow + input data. See S3 Input Format Config for details. + properties: + s3InputFileType: + description: File type that Amazon AppFlow gets + from your Amazon S3 bucket. Valid values are + CSV and JSON. + type: string + type: object + type: object + salesforce: + description: Properties that are required to query Salesforce. + See Salesforce Destination Properties for more details. + properties: + enableDynamicFieldUpdate: + description: Flag that enables dynamic fetching of + new (recently added) fields in the Salesforce objects + while running a flow. + type: boolean + includeDeletedRecords: + description: Whether Amazon AppFlow includes deleted + files in the flow run. + type: boolean + object: + description: Object specified in the flow destination. + type: string + type: object + sapoData: + description: Properties that are required to query SAPOData. + See SAPOData Destination Properties for more details. + properties: + objectPath: + description: Object path specified in the SAPOData + flow destination. + type: string + type: object + serviceNow: + description: Information that is required for querying + ServiceNow. See Generic Source Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + singular: + description: Information that is required for querying + Singular. See Generic Source Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + slack: + description: Information that is required for querying + Slack. See Generic Source Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + trendmicro: + description: Operation to be performed on the provided + Trend Micro source fields. Valid values are PROJECTION, + EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, + MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, + VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, + and NO_OP. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + veeva: + description: Information that is required for querying + Veeva. See Veeva Source Properties for more details. + properties: + documentType: + description: Document type specified in the Veeva + document extract flow. + type: string + includeAllVersions: + description: Boolean value to include All Versions + of files in Veeva document extract flow. + type: boolean + includeRenditions: + description: Boolean value to include file renditions + in Veeva document extract flow. + type: boolean + includeSourceFiles: + description: Boolean value to include source files + in Veeva document extract flow. + type: boolean + object: + description: Object specified in the flow destination. + type: string + type: object + zendesk: + description: Properties that are required to query Zendesk. + See Zendesk Destination Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + task: + description: A Task that Amazon AppFlow performs while transferring + the data in the flow run. + items: + properties: + connectorOperator: + description: Operation to be performed on the provided source + fields. See Connector Operator for details. + items: + properties: + amplitude: + description: Information that is required for querying + Amplitude. See Generic Source Properties for more + details. + type: string + customConnector: + description: Properties that are required to query + the custom Connector. See Custom Connector Destination + Properties for more details. + type: string + datadog: + description: Information that is required for querying + Datadog. See Generic Source Properties for more + details. + type: string + dynatrace: + description: Operation to be performed on the provided + Dynatrace source fields. Valid values are PROJECTION, + BETWEEN, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, + SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, + VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, + VALIDATE_NUMERIC, and NO_OP. + type: string + googleAnalytics: + description: Operation to be performed on the provided + Google Analytics source fields. Valid values are + PROJECTION and BETWEEN. + type: string + inforNexus: + description: Information that is required for querying + Infor Nexus. See Generic Source Properties for more + details. + type: string + marketo: + description: Properties that are required to query + Marketo. See Generic Destination Properties for + more details. + type: string + s3: + description: Properties that are required to query + Amazon S3. See S3 Destination Properties for more + details. + type: string + salesforce: + description: Properties that are required to query + Salesforce. See Salesforce Destination Properties + for more details. + type: string + sapoData: + description: Properties that are required to query + SAPOData. See SAPOData Destination Properties for + more details. + type: string + serviceNow: + description: Information that is required for querying + ServiceNow. See Generic Source Properties for more + details. + type: string + singular: + description: Information that is required for querying + Singular. See Generic Source Properties for more + details. + type: string + slack: + description: Information that is required for querying + Slack. See Generic Source Properties for more details. + type: string + trendmicro: + description: Operation to be performed on the provided + Trend Micro source fields. Valid values are PROJECTION, + EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, + MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, + VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, + and NO_OP. + type: string + veeva: + description: Information that is required for querying + Veeva. See Veeva Source Properties for more details. + type: string + zendesk: + description: Properties that are required to query + Zendesk. See Zendesk Destination Properties for + more details. + type: string + type: object + type: array + destinationField: + description: Field in a destination connector, or a field + value against which Amazon AppFlow validates a source + field. + type: string + sourceFields: + description: Source fields to which a particular task is + applied. + items: + type: string + type: array + taskProperties: + additionalProperties: + type: string + description: Map used to store task-related information. + The execution service looks for particular information + based on the TaskType. Valid keys are VALUE, VALUES, DATA_TYPE, + UPPER_BOUND, LOWER_BOUND, SOURCE_DATA_TYPE, DESTINATION_DATA_TYPE, + VALIDATION_ACTION, MASK_VALUE, MASK_LENGTH, TRUNCATE_LENGTH, + MATH_OPERATION_FIELDS_ORDER, CONCAT_FORMAT, SUBFIELD_CATEGORY_MAP, + and EXCLUDE_SOURCE_FIELDS_LIST. + type: object + x-kubernetes-map-type: granular + taskType: + description: Particular task implementation that Amazon + AppFlow performs. Valid values are Arithmetic, Filter, + Map, Map_all, Mask, Merge, Passthrough, Truncate, and + Validate. + type: string + type: object + type: array + triggerConfig: + description: A Trigger that determine how and when the flow runs. + properties: + triggerProperties: + description: Configuration details of a schedule-triggered + flow as defined by the user. Currently, these settings only + apply to the Scheduled trigger type. See Scheduled Trigger + Properties for details. + properties: + scheduled: + properties: + dataPullMode: + description: Whether a scheduled flow has an incremental + data transfer or a complete data transfer for each + flow run. Valid values are Incremental and Complete. + type: string + firstExecutionFrom: + description: Date range for the records to import + from the connector in the first flow run. Must be + a valid RFC3339 timestamp. + type: string + scheduleEndTime: + description: Scheduled end time for a schedule-triggered + flow. Must be a valid RFC3339 timestamp. + type: string + scheduleExpression: + description: Scheduling expression that determines + the rate at which the schedule will run, for example + rate(5minutes). + type: string + scheduleOffset: + description: Optional offset that is added to the + time interval for a schedule-triggered flow. Maximum + value of 36000. + type: number + scheduleStartTime: + description: Scheduled start time for a schedule-triggered + flow. Must be a valid RFC3339 timestamp. + type: string + timezone: + description: Time zone used when referring to the + date and time of a scheduled-triggered flow, such + as America/New_York. + type: string + type: object + type: object + triggerType: + description: Type of flow trigger. Valid values are Scheduled, + Event, and OnDemand. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.destinationFlowConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.destinationFlowConfig) + || (has(self.initProvider) && has(self.initProvider.destinationFlowConfig))' + - message: spec.forProvider.sourceFlowConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sourceFlowConfig) + || (has(self.initProvider) && has(self.initProvider.sourceFlowConfig))' + - message: spec.forProvider.task is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.task) + || (has(self.initProvider) && has(self.initProvider.task))' + - message: spec.forProvider.triggerConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.triggerConfig) + || (has(self.initProvider) && has(self.initProvider.triggerConfig))' + status: + description: FlowStatus defines the observed state of Flow. + properties: + atProvider: + properties: + arn: + description: Flow's ARN. + type: string + description: + description: Description of the flow you want to create. + type: string + destinationFlowConfig: + description: A Destination Flow Config that controls how Amazon + AppFlow places data in the destination connector. + items: + properties: + apiVersion: + description: API version that the destination connector + uses. + type: string + connectorProfileName: + description: Name of the connector profile. This name must + be unique for each connector profile in the AWS account. + type: string + connectorType: + description: Type of connector, such as Salesforce, Amplitude, + and so on. Valid values are Salesforce, Singular, Slack, + Redshift, S3, Marketo, Googleanalytics, Zendesk, Servicenow, + Datadog, Trendmicro, Snowflake, Dynatrace, Infornexus, + Amplitude, Veeva, EventBridge, LookoutMetrics, Upsolver, + Honeycode, CustomerProfiles, SAPOData, and CustomConnector. + type: string + destinationConnectorProperties: + description: This stores the information that is required + to query a particular connector. See Destination Connector + Properties for more information. + properties: + customConnector: + description: Properties that are required to query the + custom Connector. See Custom Connector Destination + Properties for more details. + properties: + customProperties: + additionalProperties: + type: string + description: Custom properties that are specific + to the connector when it's used as a destination + in the flow. Maximum of 50 items. + type: object + x-kubernetes-map-type: granular + entityName: + description: Entity specified in the custom connector + as a destination in the flow. + type: string + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + idFieldNames: + description: Name of the field that Amazon AppFlow + uses as an ID when performing a write operation + such as update, delete, or upsert. + items: + type: string + type: array + writeOperationType: + description: Type of write operation to be performed + in the custom connector when it's used as destination. + Valid values are INSERT, UPSERT, UPDATE, and DELETE. + type: string + type: object + customerProfiles: + description: Properties that are required to query Amazon + Connect Customer Profiles. See Customer Profiles Destination + Properties for more details. + properties: + domainName: + description: Unique name of the Amazon Connect Customer + Profiles domain. + type: string + objectTypeName: + description: Object specified in the Amazon Connect + Customer Profiles flow destination. + type: string + type: object + eventBridge: + description: Properties that are required to query Amazon + EventBridge. See Generic Destination Properties for + more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + object: + description: Object specified in the flow destination. + type: string + type: object + honeycode: + description: Properties that are required to query Amazon + Honeycode. See Generic Destination Properties for + more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + object: + description: Object specified in the flow destination. + type: string + type: object + lookoutMetrics: + type: object + marketo: + description: Properties that are required to query Marketo. + See Generic Destination Properties for more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + object: + description: Object specified in the flow destination. + type: string + type: object + redshift: + description: Properties that are required to query Amazon + Redshift. See Redshift Destination Properties for + more details. + properties: + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + intermediateBucketName: + description: Intermediate bucket that Amazon AppFlow + uses when moving data into Amazon Redshift. + type: string + object: + description: Object specified in the flow destination. + type: string + type: object + s3: + description: Properties that are required to query Amazon + S3. See S3 Destination Properties for more details. + properties: + bucketName: + description: Amazon S3 bucket name in which Amazon + AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + s3OutputFormatConfig: + description: Configuration that determines how Amazon + AppFlow should format the flow output data when + Amazon S3 is used as the destination. See S3 Output + Format Config for more details. + properties: + aggregationConfig: + description: Aggregation settings that you can + use to customize the output format of your + flow data. See Aggregation Config for more + details. + properties: + aggregationType: + description: Whether Amazon AppFlow aggregates + the flow records into a single file, or + leave them unaggregated. Valid values + are None and SingleFile. + type: string + targetFileSize: + description: The desired file size, in MB, + for each output file that Amazon AppFlow + writes to the flow destination. Integer + value. + type: number + type: object + fileType: + description: File type that Amazon AppFlow places + in the Amazon S3 bucket. Valid values are + CSV, JSON, and PARQUET. + type: string + prefixConfig: + description: Determines the prefix that Amazon + AppFlow applies to the folder name in the + Amazon S3 bucket. You can name folders according + to the flow frequency and date. See Prefix + Config for more details. + properties: + prefixFormat: + description: Determines the level of granularity + that's included in the prefix. Valid values + are YEAR, MONTH, DAY, HOUR, and MINUTE. + type: string + prefixType: + description: Determines the format of the + prefix, and whether it applies to the + file name, file path, or both. Valid values + are FILENAME, PATH, and PATH_AND_FILENAME. + type: string + type: object + preserveSourceDataTyping: + description: Whether the data types from the + source system need to be preserved (Only valid + for Parquet file type) + type: boolean + type: object + type: object + salesforce: + description: Properties that are required to query Salesforce. + See Salesforce Destination Properties for more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + idFieldNames: + description: Name of the field that Amazon AppFlow + uses as an ID when performing a write operation + such as update, delete, or upsert. + items: + type: string + type: array + object: + description: Object specified in the flow destination. + type: string + writeOperationType: + description: Type of write operation to be performed + in the custom connector when it's used as destination. + Valid values are INSERT, UPSERT, UPDATE, and DELETE. + type: string + type: object + sapoData: + description: Properties that are required to query SAPOData. + See SAPOData Destination Properties for more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + idFieldNames: + description: Name of the field that Amazon AppFlow + uses as an ID when performing a write operation + such as update, delete, or upsert. + items: + type: string + type: array + objectPath: + description: Object path specified in the SAPOData + flow destination. + type: string + successResponseHandlingConfig: + description: Determines how Amazon AppFlow handles + the success response that it gets from the connector + after placing data. See Success Response Handling + Config for more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + type: object + writeOperationType: + description: Type of write operation to be performed + in the custom connector when it's used as destination. + Valid values are INSERT, UPSERT, UPDATE, and DELETE. + type: string + type: object + snowflake: + description: Properties that are required to query Snowflake. + See Snowflake Destination Properties for more details. + properties: + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + intermediateBucketName: + description: Intermediate bucket that Amazon AppFlow + uses when moving data into Amazon Redshift. + type: string + object: + description: Object specified in the flow destination. + type: string + type: object + upsolver: + description: Properties that are required to query Upsolver. + See Upsolver Destination Properties for more details. + properties: + bucketName: + description: Amazon S3 bucket name in which Amazon + AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + s3OutputFormatConfig: + description: Configuration that determines how Amazon + AppFlow should format the flow output data when + Amazon S3 is used as the destination. See S3 Output + Format Config for more details. + properties: + aggregationConfig: + description: Aggregation settings that you can + use to customize the output format of your + flow data. See Aggregation Config for more + details. + properties: + aggregationType: + description: Whether Amazon AppFlow aggregates + the flow records into a single file, or + leave them unaggregated. Valid values + are None and SingleFile. + type: string + type: object + fileType: + description: File type that Amazon AppFlow places + in the Amazon S3 bucket. Valid values are + CSV, JSON, and PARQUET. + type: string + prefixConfig: + description: Determines the prefix that Amazon + AppFlow applies to the folder name in the + Amazon S3 bucket. You can name folders according + to the flow frequency and date. See Prefix + Config for more details. + properties: + prefixFormat: + description: Determines the level of granularity + that's included in the prefix. Valid values + are YEAR, MONTH, DAY, HOUR, and MINUTE. + type: string + prefixType: + description: Determines the format of the + prefix, and whether it applies to the + file name, file path, or both. Valid values + are FILENAME, PATH, and PATH_AND_FILENAME. + type: string + type: object + type: object + type: object + zendesk: + description: Properties that are required to query Zendesk. + See Zendesk Destination Properties for more details. + properties: + errorHandlingConfig: + description: Settings that determine how Amazon + AppFlow handles an error when placing data in + the destination. See Error Handling Config for + more details. + properties: + bucketName: + description: Amazon S3 bucket name in which + Amazon AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which + Amazon AppFlow places the destination files. + type: string + failOnFirstDestinationError: + description: If the flow should fail after the + first instance of a failure when attempting + to place data in the destination. + type: boolean + type: object + idFieldNames: + description: Name of the field that Amazon AppFlow + uses as an ID when performing a write operation + such as update, delete, or upsert. + items: + type: string + type: array + object: + description: Object specified in the flow destination. + type: string + writeOperationType: + description: Type of write operation to be performed + in the custom connector when it's used as destination. + Valid values are INSERT, UPSERT, UPDATE, and DELETE. + type: string + type: object + type: object + type: object + type: array + flowStatus: + description: The current status of the flow. + type: string + id: + type: string + kmsArn: + description: ARN (Amazon Resource Name) of the Key Management + Service (KMS) key you provide for encryption. This is required + if you do not want to use the Amazon AppFlow-managed KMS key. + If you don't provide anything here, Amazon AppFlow uses the + Amazon AppFlow-managed KMS key. + type: string + sourceFlowConfig: + description: The Source Flow Config that controls how Amazon AppFlow + retrieves data from the source connector. + properties: + apiVersion: + description: API version that the destination connector uses. + type: string + connectorProfileName: + description: Name of the connector profile. This name must + be unique for each connector profile in the AWS account. + type: string + connectorType: + description: Type of connector, such as Salesforce, Amplitude, + and so on. Valid values are Salesforce, Singular, Slack, + Redshift, S3, Marketo, Googleanalytics, Zendesk, Servicenow, + Datadog, Trendmicro, Snowflake, Dynatrace, Infornexus, Amplitude, + Veeva, EventBridge, LookoutMetrics, Upsolver, Honeycode, + CustomerProfiles, SAPOData, and CustomConnector. + type: string + incrementalPullConfig: + description: Defines the configuration for a scheduled incremental + data pull. If a valid configuration is provided, the fields + specified in the configuration are used when querying for + the incremental data pull. See Incremental Pull Config for + more details. + properties: + datetimeTypeFieldName: + description: Field that specifies the date time or timestamp + field as the criteria to use when importing incremental + records from the source. + type: string + type: object + sourceConnectorProperties: + description: Information that is required to query a particular + source connector. See Source Connector Properties for details. + properties: + amplitude: + description: Information that is required for querying + Amplitude. See Generic Source Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + customConnector: + description: Properties that are required to query the + custom Connector. See Custom Connector Destination Properties + for more details. + properties: + customProperties: + additionalProperties: + type: string + description: Custom properties that are specific to + the connector when it's used as a destination in + the flow. Maximum of 50 items. + type: object + x-kubernetes-map-type: granular + entityName: + description: Entity specified in the custom connector + as a destination in the flow. + type: string + type: object + datadog: + description: Information that is required for querying + Datadog. See Generic Source Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + dynatrace: + description: Operation to be performed on the provided + Dynatrace source fields. Valid values are PROJECTION, + BETWEEN, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, + SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, + VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, + and NO_OP. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + googleAnalytics: + description: Operation to be performed on the provided + Google Analytics source fields. Valid values are PROJECTION + and BETWEEN. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + inforNexus: + description: Information that is required for querying + Infor Nexus. See Generic Source Properties for more + details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + marketo: + description: Properties that are required to query Marketo. + See Generic Destination Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + s3: + description: Properties that are required to query Amazon + S3. See S3 Destination Properties for more details. + properties: + bucketName: + description: Amazon S3 bucket name in which Amazon + AppFlow places the transferred data. + type: string + bucketPrefix: + description: Object key for the bucket in which Amazon + AppFlow places the destination files. + type: string + s3InputFormatConfig: + description: When you use Amazon S3 as the source, + the configuration format that you provide the flow + input data. See S3 Input Format Config for details. + properties: + s3InputFileType: + description: File type that Amazon AppFlow gets + from your Amazon S3 bucket. Valid values are + CSV and JSON. + type: string + type: object + type: object + salesforce: + description: Properties that are required to query Salesforce. + See Salesforce Destination Properties for more details. + properties: + enableDynamicFieldUpdate: + description: Flag that enables dynamic fetching of + new (recently added) fields in the Salesforce objects + while running a flow. + type: boolean + includeDeletedRecords: + description: Whether Amazon AppFlow includes deleted + files in the flow run. + type: boolean + object: + description: Object specified in the flow destination. + type: string + type: object + sapoData: + description: Properties that are required to query SAPOData. + See SAPOData Destination Properties for more details. + properties: + objectPath: + description: Object path specified in the SAPOData + flow destination. + type: string + type: object + serviceNow: + description: Information that is required for querying + ServiceNow. See Generic Source Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + singular: + description: Information that is required for querying + Singular. See Generic Source Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + slack: + description: Information that is required for querying + Slack. See Generic Source Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + trendmicro: + description: Operation to be performed on the provided + Trend Micro source fields. Valid values are PROJECTION, + EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, + MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, + VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, + and NO_OP. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + veeva: + description: Information that is required for querying + Veeva. See Veeva Source Properties for more details. + properties: + documentType: + description: Document type specified in the Veeva + document extract flow. + type: string + includeAllVersions: + description: Boolean value to include All Versions + of files in Veeva document extract flow. + type: boolean + includeRenditions: + description: Boolean value to include file renditions + in Veeva document extract flow. + type: boolean + includeSourceFiles: + description: Boolean value to include source files + in Veeva document extract flow. + type: boolean + object: + description: Object specified in the flow destination. + type: string + type: object + zendesk: + description: Properties that are required to query Zendesk. + See Zendesk Destination Properties for more details. + properties: + object: + description: Object specified in the flow destination. + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + task: + description: A Task that Amazon AppFlow performs while transferring + the data in the flow run. + items: + properties: + connectorOperator: + description: Operation to be performed on the provided source + fields. See Connector Operator for details. + items: + properties: + amplitude: + description: Information that is required for querying + Amplitude. See Generic Source Properties for more + details. + type: string + customConnector: + description: Properties that are required to query + the custom Connector. See Custom Connector Destination + Properties for more details. + type: string + datadog: + description: Information that is required for querying + Datadog. See Generic Source Properties for more + details. + type: string + dynatrace: + description: Operation to be performed on the provided + Dynatrace source fields. Valid values are PROJECTION, + BETWEEN, EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, + SUBTRACTION, MASK_ALL, MASK_FIRST_N, MASK_LAST_N, + VALIDATE_NON_NULL, VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, + VALIDATE_NUMERIC, and NO_OP. + type: string + googleAnalytics: + description: Operation to be performed on the provided + Google Analytics source fields. Valid values are + PROJECTION and BETWEEN. + type: string + inforNexus: + description: Information that is required for querying + Infor Nexus. See Generic Source Properties for more + details. + type: string + marketo: + description: Properties that are required to query + Marketo. See Generic Destination Properties for + more details. + type: string + s3: + description: Properties that are required to query + Amazon S3. See S3 Destination Properties for more + details. + type: string + salesforce: + description: Properties that are required to query + Salesforce. See Salesforce Destination Properties + for more details. + type: string + sapoData: + description: Properties that are required to query + SAPOData. See SAPOData Destination Properties for + more details. + type: string + serviceNow: + description: Information that is required for querying + ServiceNow. See Generic Source Properties for more + details. + type: string + singular: + description: Information that is required for querying + Singular. See Generic Source Properties for more + details. + type: string + slack: + description: Information that is required for querying + Slack. See Generic Source Properties for more details. + type: string + trendmicro: + description: Operation to be performed on the provided + Trend Micro source fields. Valid values are PROJECTION, + EQUAL_TO, ADDITION, MULTIPLICATION, DIVISION, SUBTRACTION, + MASK_ALL, MASK_FIRST_N, MASK_LAST_N, VALIDATE_NON_NULL, + VALIDATE_NON_ZERO, VALIDATE_NON_NEGATIVE, VALIDATE_NUMERIC, + and NO_OP. + type: string + veeva: + description: Information that is required for querying + Veeva. See Veeva Source Properties for more details. + type: string + zendesk: + description: Properties that are required to query + Zendesk. See Zendesk Destination Properties for + more details. + type: string + type: object + type: array + destinationField: + description: Field in a destination connector, or a field + value against which Amazon AppFlow validates a source + field. + type: string + sourceFields: + description: Source fields to which a particular task is + applied. + items: + type: string + type: array + taskProperties: + additionalProperties: + type: string + description: Map used to store task-related information. + The execution service looks for particular information + based on the TaskType. Valid keys are VALUE, VALUES, DATA_TYPE, + UPPER_BOUND, LOWER_BOUND, SOURCE_DATA_TYPE, DESTINATION_DATA_TYPE, + VALIDATION_ACTION, MASK_VALUE, MASK_LENGTH, TRUNCATE_LENGTH, + MATH_OPERATION_FIELDS_ORDER, CONCAT_FORMAT, SUBFIELD_CATEGORY_MAP, + and EXCLUDE_SOURCE_FIELDS_LIST. + type: object + x-kubernetes-map-type: granular + taskType: + description: Particular task implementation that Amazon + AppFlow performs. Valid values are Arithmetic, Filter, + Map, Map_all, Mask, Merge, Passthrough, Truncate, and + Validate. + type: string + type: object + type: array + triggerConfig: + description: A Trigger that determine how and when the flow runs. + properties: + triggerProperties: + description: Configuration details of a schedule-triggered + flow as defined by the user. Currently, these settings only + apply to the Scheduled trigger type. See Scheduled Trigger + Properties for details. + properties: + scheduled: + properties: + dataPullMode: + description: Whether a scheduled flow has an incremental + data transfer or a complete data transfer for each + flow run. Valid values are Incremental and Complete. + type: string + firstExecutionFrom: + description: Date range for the records to import + from the connector in the first flow run. Must be + a valid RFC3339 timestamp. + type: string + scheduleEndTime: + description: Scheduled end time for a schedule-triggered + flow. Must be a valid RFC3339 timestamp. + type: string + scheduleExpression: + description: Scheduling expression that determines + the rate at which the schedule will run, for example + rate(5minutes). + type: string + scheduleOffset: + description: Optional offset that is added to the + time interval for a schedule-triggered flow. Maximum + value of 36000. + type: number + scheduleStartTime: + description: Scheduled start time for a schedule-triggered + flow. Must be a valid RFC3339 timestamp. + type: string + timezone: + description: Time zone used when referring to the + date and time of a scheduled-triggered flow, such + as America/New_York. + type: string + type: object + type: object + triggerType: + description: Type of flow trigger. Valid values are Scheduled, + Event, and OnDemand. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appintegrations.aws.upbound.io_eventintegrations.yaml b/package/crds/appintegrations.aws.upbound.io_eventintegrations.yaml index b2b95ff9ba..b4c7909097 100644 --- a/package/crds/appintegrations.aws.upbound.io_eventintegrations.yaml +++ b/package/crds/appintegrations.aws.upbound.io_eventintegrations.yaml @@ -417,3 +417,396 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: EventIntegration is the Schema for the EventIntegrations API. + Provides details about a specific Amazon AppIntegrations Event Integration + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EventIntegrationSpec defines the desired state of EventIntegration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the Event Integration. + type: string + eventFilter: + description: Block that defines the configuration information + for the event filter. The Event Filter block is documented below. + properties: + source: + description: Source of the events. + type: string + type: object + eventbridgeBus: + description: EventBridge bus. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the Event Integration. + type: string + eventFilter: + description: Block that defines the configuration information + for the event filter. The Event Filter block is documented below. + properties: + source: + description: Source of the events. + type: string + type: object + eventbridgeBus: + description: EventBridge bus. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.eventFilter is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.eventFilter) + || (has(self.initProvider) && has(self.initProvider.eventFilter))' + - message: spec.forProvider.eventbridgeBus is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.eventbridgeBus) + || (has(self.initProvider) && has(self.initProvider.eventbridgeBus))' + status: + description: EventIntegrationStatus defines the observed state of EventIntegration. + properties: + atProvider: + properties: + arn: + description: ARN of the Event Integration. + type: string + description: + description: Description of the Event Integration. + type: string + eventFilter: + description: Block that defines the configuration information + for the event filter. The Event Filter block is documented below. + properties: + source: + description: Source of the events. + type: string + type: object + eventbridgeBus: + description: EventBridge bus. + type: string + id: + description: Identifier of the Event Integration which is the + name of the Event Integration. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appmesh.aws.upbound.io_gatewayroutes.yaml b/package/crds/appmesh.aws.upbound.io_gatewayroutes.yaml index 9f973e835d..0f48cf99f5 100644 --- a/package/crds/appmesh.aws.upbound.io_gatewayroutes.yaml +++ b/package/crds/appmesh.aws.upbound.io_gatewayroutes.yaml @@ -2195,3 +2195,1864 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: GatewayRoute is the Schema for the GatewayRoutes API. Provides + an AWS App Mesh gateway route resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GatewayRouteSpec defines the desired state of GatewayRoute + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + meshName: + description: Name of the service mesh in which to create the gateway + route. Must be between 1 and 255 characters in length. + type: string + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the gateway route. Must be between + 1 and 255 characters in length. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + spec: + description: Gateway route specification to apply. + properties: + grpcRoute: + description: Specification of a gRPC gateway route. + properties: + action: + description: Action to take if a match is determined. + properties: + target: + description: Target that traffic is routed to when + a request matches the gateway route. + properties: + port: + description: The port number that corresponds + to the target for Virtual Service provider port. + This is required when the provider (router or + node) of the Virtual Service has multiple listeners. + type: number + virtualService: + description: Virtual service gateway route target. + properties: + virtualServiceName: + description: Name of the virtual service that + traffic is routed to. Must be between 1 + and 255 characters in length. + type: string + type: object + type: object + type: object + match: + description: Criteria for determining a request match. + properties: + port: + description: The port number that corresponds to the + target for Virtual Service provider port. This is + required when the provider (router or node) of the + Virtual Service has multiple listeners. + type: number + serviceName: + description: Fully qualified domain name for the service + to match from the request. + type: string + type: object + type: object + http2Route: + description: Specification of an HTTP/2 gateway route. + properties: + action: + description: Action to take if a match is determined. + properties: + rewrite: + description: Gateway route action to rewrite. + properties: + hostname: + description: Host name to rewrite. + properties: + defaultTargetHostname: + description: 'Default target host name to + write to. Valid values: ENABLED, DISABLED.' + type: string + type: object + path: + description: Exact path to rewrite. + properties: + exact: + description: Value used to replace matched + path. + type: string + type: object + prefix: + description: Specified beginning characters to + rewrite. + properties: + defaultPrefix: + description: 'Default prefix used to replace + the incoming route prefix when rewritten. + Valid values: ENABLED, DISABLED.' + type: string + value: + description: Value used to replace the incoming + route prefix when rewritten. + type: string + type: object + type: object + target: + description: Target that traffic is routed to when + a request matches the gateway route. + properties: + port: + description: The port number that corresponds + to the target for Virtual Service provider port. + This is required when the provider (router or + node) of the Virtual Service has multiple listeners. + type: number + virtualService: + description: Virtual service gateway route target. + properties: + virtualServiceName: + description: Name of the virtual service that + traffic is routed to. Must be between 1 + and 255 characters in length. + type: string + type: object + type: object + type: object + match: + description: Criteria for determining a request match. + properties: + header: + description: Client request headers to match on. + items: + properties: + invert: + description: If true, the match is on the opposite + of the match method and value. Default is + false. + type: boolean + match: + description: Criteria for determining a request + match. + properties: + exact: + description: Value used to replace matched + path. + type: string + prefix: + description: Specified beginning characters + to rewrite. + type: string + range: + description: Object that specifies the range + of numbers that the header value sent + by the client must be included in. + properties: + end: + description: End of the range. + type: number + start: + description: (Requited) Start of the + range. + type: number + type: object + regex: + description: Header value sent by the client + must include the specified characters. + type: string + suffix: + description: Header value sent by the client + must end with the specified characters. + type: string + type: object + name: + description: Name to use for the gateway route. + Must be between 1 and 255 characters in length. + type: string + type: object + type: array + hostname: + description: Host name to rewrite. + properties: + exact: + description: Value used to replace matched path. + type: string + suffix: + description: Header value sent by the client must + end with the specified characters. + type: string + type: object + path: + description: Exact path to rewrite. + properties: + exact: + description: Value used to replace matched path. + type: string + regex: + description: Header value sent by the client must + include the specified characters. + type: string + type: object + port: + description: The port number that corresponds to the + target for Virtual Service provider port. This is + required when the provider (router or node) of the + Virtual Service has multiple listeners. + type: number + prefix: + description: Specified beginning characters to rewrite. + type: string + queryParameter: + description: Client request query parameters to match + on. + items: + properties: + match: + description: Criteria for determining a request + match. + properties: + exact: + description: Value used to replace matched + path. + type: string + type: object + name: + description: Name to use for the gateway route. + Must be between 1 and 255 characters in length. + type: string + type: object + type: array + type: object + type: object + httpRoute: + description: Specification of an HTTP gateway route. + properties: + action: + description: Action to take if a match is determined. + properties: + rewrite: + description: Gateway route action to rewrite. + properties: + hostname: + description: Host name to rewrite. + properties: + defaultTargetHostname: + description: 'Default target host name to + write to. Valid values: ENABLED, DISABLED.' + type: string + type: object + path: + description: Exact path to rewrite. + properties: + exact: + description: Value used to replace matched + path. + type: string + type: object + prefix: + description: Specified beginning characters to + rewrite. + properties: + defaultPrefix: + description: 'Default prefix used to replace + the incoming route prefix when rewritten. + Valid values: ENABLED, DISABLED.' + type: string + value: + description: Value used to replace the incoming + route prefix when rewritten. + type: string + type: object + type: object + target: + description: Target that traffic is routed to when + a request matches the gateway route. + properties: + port: + description: The port number that corresponds + to the target for Virtual Service provider port. + This is required when the provider (router or + node) of the Virtual Service has multiple listeners. + type: number + virtualService: + description: Virtual service gateway route target. + properties: + virtualServiceName: + description: Name of the virtual service that + traffic is routed to. Must be between 1 + and 255 characters in length. + type: string + virtualServiceNameRef: + description: Reference to a VirtualService + in appmesh to populate virtualServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualServiceNameSelector: + description: Selector for a VirtualService + in appmesh to populate virtualServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + type: object + match: + description: Criteria for determining a request match. + properties: + header: + description: Client request headers to match on. + items: + properties: + invert: + description: If true, the match is on the opposite + of the match method and value. Default is + false. + type: boolean + match: + description: Criteria for determining a request + match. + properties: + exact: + description: Value used to replace matched + path. + type: string + prefix: + description: Specified beginning characters + to rewrite. + type: string + range: + description: Object that specifies the range + of numbers that the header value sent + by the client must be included in. + properties: + end: + description: End of the range. + type: number + start: + description: (Requited) Start of the + range. + type: number + type: object + regex: + description: Header value sent by the client + must include the specified characters. + type: string + suffix: + description: Header value sent by the client + must end with the specified characters. + type: string + type: object + name: + description: Name to use for the gateway route. + Must be between 1 and 255 characters in length. + type: string + type: object + type: array + hostname: + description: Host name to rewrite. + properties: + exact: + description: Value used to replace matched path. + type: string + suffix: + description: Header value sent by the client must + end with the specified characters. + type: string + type: object + path: + description: Exact path to rewrite. + properties: + exact: + description: Value used to replace matched path. + type: string + regex: + description: Header value sent by the client must + include the specified characters. + type: string + type: object + port: + description: The port number that corresponds to the + target for Virtual Service provider port. This is + required when the provider (router or node) of the + Virtual Service has multiple listeners. + type: number + prefix: + description: Specified beginning characters to rewrite. + type: string + queryParameter: + description: Client request query parameters to match + on. + items: + properties: + match: + description: Criteria for determining a request + match. + properties: + exact: + description: Value used to replace matched + path. + type: string + type: object + name: + description: Name to use for the gateway route. + Must be between 1 and 255 characters in length. + type: string + type: object + type: array + type: object + type: object + priority: + description: Priority for the gateway route, between 0 and + 1000. + type: number + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + virtualGatewayName: + description: Name of the virtual gateway to associate the gateway + route with. Must be between 1 and 255 characters in length. + type: string + virtualGatewayNameRef: + description: Reference to a VirtualGateway in appmesh to populate + virtualGatewayName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualGatewayNameSelector: + description: Selector for a VirtualGateway in appmesh to populate + virtualGatewayName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + meshName: + description: Name of the service mesh in which to create the gateway + route. Must be between 1 and 255 characters in length. + type: string + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the gateway route. Must be between + 1 and 255 characters in length. + type: string + spec: + description: Gateway route specification to apply. + properties: + grpcRoute: + description: Specification of a gRPC gateway route. + properties: + action: + description: Action to take if a match is determined. + properties: + target: + description: Target that traffic is routed to when + a request matches the gateway route. + properties: + port: + description: The port number that corresponds + to the target for Virtual Service provider port. + This is required when the provider (router or + node) of the Virtual Service has multiple listeners. + type: number + virtualService: + description: Virtual service gateway route target. + properties: + virtualServiceName: + description: Name of the virtual service that + traffic is routed to. Must be between 1 + and 255 characters in length. + type: string + type: object + type: object + type: object + match: + description: Criteria for determining a request match. + properties: + port: + description: The port number that corresponds to the + target for Virtual Service provider port. This is + required when the provider (router or node) of the + Virtual Service has multiple listeners. + type: number + serviceName: + description: Fully qualified domain name for the service + to match from the request. + type: string + type: object + type: object + http2Route: + description: Specification of an HTTP/2 gateway route. + properties: + action: + description: Action to take if a match is determined. + properties: + rewrite: + description: Gateway route action to rewrite. + properties: + hostname: + description: Host name to rewrite. + properties: + defaultTargetHostname: + description: 'Default target host name to + write to. Valid values: ENABLED, DISABLED.' + type: string + type: object + path: + description: Exact path to rewrite. + properties: + exact: + description: Value used to replace matched + path. + type: string + type: object + prefix: + description: Specified beginning characters to + rewrite. + properties: + defaultPrefix: + description: 'Default prefix used to replace + the incoming route prefix when rewritten. + Valid values: ENABLED, DISABLED.' + type: string + value: + description: Value used to replace the incoming + route prefix when rewritten. + type: string + type: object + type: object + target: + description: Target that traffic is routed to when + a request matches the gateway route. + properties: + port: + description: The port number that corresponds + to the target for Virtual Service provider port. + This is required when the provider (router or + node) of the Virtual Service has multiple listeners. + type: number + virtualService: + description: Virtual service gateway route target. + properties: + virtualServiceName: + description: Name of the virtual service that + traffic is routed to. Must be between 1 + and 255 characters in length. + type: string + type: object + type: object + type: object + match: + description: Criteria for determining a request match. + properties: + header: + description: Client request headers to match on. + items: + properties: + invert: + description: If true, the match is on the opposite + of the match method and value. Default is + false. + type: boolean + match: + description: Criteria for determining a request + match. + properties: + exact: + description: Value used to replace matched + path. + type: string + prefix: + description: Specified beginning characters + to rewrite. + type: string + range: + description: Object that specifies the range + of numbers that the header value sent + by the client must be included in. + properties: + end: + description: End of the range. + type: number + start: + description: (Requited) Start of the + range. + type: number + type: object + regex: + description: Header value sent by the client + must include the specified characters. + type: string + suffix: + description: Header value sent by the client + must end with the specified characters. + type: string + type: object + name: + description: Name to use for the gateway route. + Must be between 1 and 255 characters in length. + type: string + type: object + type: array + hostname: + description: Host name to rewrite. + properties: + exact: + description: Value used to replace matched path. + type: string + suffix: + description: Header value sent by the client must + end with the specified characters. + type: string + type: object + path: + description: Exact path to rewrite. + properties: + exact: + description: Value used to replace matched path. + type: string + regex: + description: Header value sent by the client must + include the specified characters. + type: string + type: object + port: + description: The port number that corresponds to the + target for Virtual Service provider port. This is + required when the provider (router or node) of the + Virtual Service has multiple listeners. + type: number + prefix: + description: Specified beginning characters to rewrite. + type: string + queryParameter: + description: Client request query parameters to match + on. + items: + properties: + match: + description: Criteria for determining a request + match. + properties: + exact: + description: Value used to replace matched + path. + type: string + type: object + name: + description: Name to use for the gateway route. + Must be between 1 and 255 characters in length. + type: string + type: object + type: array + type: object + type: object + httpRoute: + description: Specification of an HTTP gateway route. + properties: + action: + description: Action to take if a match is determined. + properties: + rewrite: + description: Gateway route action to rewrite. + properties: + hostname: + description: Host name to rewrite. + properties: + defaultTargetHostname: + description: 'Default target host name to + write to. Valid values: ENABLED, DISABLED.' + type: string + type: object + path: + description: Exact path to rewrite. + properties: + exact: + description: Value used to replace matched + path. + type: string + type: object + prefix: + description: Specified beginning characters to + rewrite. + properties: + defaultPrefix: + description: 'Default prefix used to replace + the incoming route prefix when rewritten. + Valid values: ENABLED, DISABLED.' + type: string + value: + description: Value used to replace the incoming + route prefix when rewritten. + type: string + type: object + type: object + target: + description: Target that traffic is routed to when + a request matches the gateway route. + properties: + port: + description: The port number that corresponds + to the target for Virtual Service provider port. + This is required when the provider (router or + node) of the Virtual Service has multiple listeners. + type: number + virtualService: + description: Virtual service gateway route target. + properties: + virtualServiceName: + description: Name of the virtual service that + traffic is routed to. Must be between 1 + and 255 characters in length. + type: string + virtualServiceNameRef: + description: Reference to a VirtualService + in appmesh to populate virtualServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualServiceNameSelector: + description: Selector for a VirtualService + in appmesh to populate virtualServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + type: object + match: + description: Criteria for determining a request match. + properties: + header: + description: Client request headers to match on. + items: + properties: + invert: + description: If true, the match is on the opposite + of the match method and value. Default is + false. + type: boolean + match: + description: Criteria for determining a request + match. + properties: + exact: + description: Value used to replace matched + path. + type: string + prefix: + description: Specified beginning characters + to rewrite. + type: string + range: + description: Object that specifies the range + of numbers that the header value sent + by the client must be included in. + properties: + end: + description: End of the range. + type: number + start: + description: (Requited) Start of the + range. + type: number + type: object + regex: + description: Header value sent by the client + must include the specified characters. + type: string + suffix: + description: Header value sent by the client + must end with the specified characters. + type: string + type: object + name: + description: Name to use for the gateway route. + Must be between 1 and 255 characters in length. + type: string + type: object + type: array + hostname: + description: Host name to rewrite. + properties: + exact: + description: Value used to replace matched path. + type: string + suffix: + description: Header value sent by the client must + end with the specified characters. + type: string + type: object + path: + description: Exact path to rewrite. + properties: + exact: + description: Value used to replace matched path. + type: string + regex: + description: Header value sent by the client must + include the specified characters. + type: string + type: object + port: + description: The port number that corresponds to the + target for Virtual Service provider port. This is + required when the provider (router or node) of the + Virtual Service has multiple listeners. + type: number + prefix: + description: Specified beginning characters to rewrite. + type: string + queryParameter: + description: Client request query parameters to match + on. + items: + properties: + match: + description: Criteria for determining a request + match. + properties: + exact: + description: Value used to replace matched + path. + type: string + type: object + name: + description: Name to use for the gateway route. + Must be between 1 and 255 characters in length. + type: string + type: object + type: array + type: object + type: object + priority: + description: Priority for the gateway route, between 0 and + 1000. + type: number + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + virtualGatewayName: + description: Name of the virtual gateway to associate the gateway + route with. Must be between 1 and 255 characters in length. + type: string + virtualGatewayNameRef: + description: Reference to a VirtualGateway in appmesh to populate + virtualGatewayName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualGatewayNameSelector: + description: Selector for a VirtualGateway in appmesh to populate + virtualGatewayName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.meshName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.meshName) + || (has(self.initProvider) && has(self.initProvider.meshName))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.spec is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.spec) + || (has(self.initProvider) && has(self.initProvider.spec))' + status: + description: GatewayRouteStatus defines the observed state of GatewayRoute. + properties: + atProvider: + properties: + arn: + description: ARN of the gateway route. + type: string + createdDate: + description: Creation date of the gateway route. + type: string + id: + description: ID of the gateway route. + type: string + lastUpdatedDate: + description: Last update date of the gateway route. + type: string + meshName: + description: Name of the service mesh in which to create the gateway + route. Must be between 1 and 255 characters in length. + type: string + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the gateway route. Must be between + 1 and 255 characters in length. + type: string + resourceOwner: + description: Resource owner's AWS account ID. + type: string + spec: + description: Gateway route specification to apply. + properties: + grpcRoute: + description: Specification of a gRPC gateway route. + properties: + action: + description: Action to take if a match is determined. + properties: + target: + description: Target that traffic is routed to when + a request matches the gateway route. + properties: + port: + description: The port number that corresponds + to the target for Virtual Service provider port. + This is required when the provider (router or + node) of the Virtual Service has multiple listeners. + type: number + virtualService: + description: Virtual service gateway route target. + properties: + virtualServiceName: + description: Name of the virtual service that + traffic is routed to. Must be between 1 + and 255 characters in length. + type: string + type: object + type: object + type: object + match: + description: Criteria for determining a request match. + properties: + port: + description: The port number that corresponds to the + target for Virtual Service provider port. This is + required when the provider (router or node) of the + Virtual Service has multiple listeners. + type: number + serviceName: + description: Fully qualified domain name for the service + to match from the request. + type: string + type: object + type: object + http2Route: + description: Specification of an HTTP/2 gateway route. + properties: + action: + description: Action to take if a match is determined. + properties: + rewrite: + description: Gateway route action to rewrite. + properties: + hostname: + description: Host name to rewrite. + properties: + defaultTargetHostname: + description: 'Default target host name to + write to. Valid values: ENABLED, DISABLED.' + type: string + type: object + path: + description: Exact path to rewrite. + properties: + exact: + description: Value used to replace matched + path. + type: string + type: object + prefix: + description: Specified beginning characters to + rewrite. + properties: + defaultPrefix: + description: 'Default prefix used to replace + the incoming route prefix when rewritten. + Valid values: ENABLED, DISABLED.' + type: string + value: + description: Value used to replace the incoming + route prefix when rewritten. + type: string + type: object + type: object + target: + description: Target that traffic is routed to when + a request matches the gateway route. + properties: + port: + description: The port number that corresponds + to the target for Virtual Service provider port. + This is required when the provider (router or + node) of the Virtual Service has multiple listeners. + type: number + virtualService: + description: Virtual service gateway route target. + properties: + virtualServiceName: + description: Name of the virtual service that + traffic is routed to. Must be between 1 + and 255 characters in length. + type: string + type: object + type: object + type: object + match: + description: Criteria for determining a request match. + properties: + header: + description: Client request headers to match on. + items: + properties: + invert: + description: If true, the match is on the opposite + of the match method and value. Default is + false. + type: boolean + match: + description: Criteria for determining a request + match. + properties: + exact: + description: Value used to replace matched + path. + type: string + prefix: + description: Specified beginning characters + to rewrite. + type: string + range: + description: Object that specifies the range + of numbers that the header value sent + by the client must be included in. + properties: + end: + description: End of the range. + type: number + start: + description: (Requited) Start of the + range. + type: number + type: object + regex: + description: Header value sent by the client + must include the specified characters. + type: string + suffix: + description: Header value sent by the client + must end with the specified characters. + type: string + type: object + name: + description: Name to use for the gateway route. + Must be between 1 and 255 characters in length. + type: string + type: object + type: array + hostname: + description: Host name to rewrite. + properties: + exact: + description: Value used to replace matched path. + type: string + suffix: + description: Header value sent by the client must + end with the specified characters. + type: string + type: object + path: + description: Exact path to rewrite. + properties: + exact: + description: Value used to replace matched path. + type: string + regex: + description: Header value sent by the client must + include the specified characters. + type: string + type: object + port: + description: The port number that corresponds to the + target for Virtual Service provider port. This is + required when the provider (router or node) of the + Virtual Service has multiple listeners. + type: number + prefix: + description: Specified beginning characters to rewrite. + type: string + queryParameter: + description: Client request query parameters to match + on. + items: + properties: + match: + description: Criteria for determining a request + match. + properties: + exact: + description: Value used to replace matched + path. + type: string + type: object + name: + description: Name to use for the gateway route. + Must be between 1 and 255 characters in length. + type: string + type: object + type: array + type: object + type: object + httpRoute: + description: Specification of an HTTP gateway route. + properties: + action: + description: Action to take if a match is determined. + properties: + rewrite: + description: Gateway route action to rewrite. + properties: + hostname: + description: Host name to rewrite. + properties: + defaultTargetHostname: + description: 'Default target host name to + write to. Valid values: ENABLED, DISABLED.' + type: string + type: object + path: + description: Exact path to rewrite. + properties: + exact: + description: Value used to replace matched + path. + type: string + type: object + prefix: + description: Specified beginning characters to + rewrite. + properties: + defaultPrefix: + description: 'Default prefix used to replace + the incoming route prefix when rewritten. + Valid values: ENABLED, DISABLED.' + type: string + value: + description: Value used to replace the incoming + route prefix when rewritten. + type: string + type: object + type: object + target: + description: Target that traffic is routed to when + a request matches the gateway route. + properties: + port: + description: The port number that corresponds + to the target for Virtual Service provider port. + This is required when the provider (router or + node) of the Virtual Service has multiple listeners. + type: number + virtualService: + description: Virtual service gateway route target. + properties: + virtualServiceName: + description: Name of the virtual service that + traffic is routed to. Must be between 1 + and 255 characters in length. + type: string + type: object + type: object + type: object + match: + description: Criteria for determining a request match. + properties: + header: + description: Client request headers to match on. + items: + properties: + invert: + description: If true, the match is on the opposite + of the match method and value. Default is + false. + type: boolean + match: + description: Criteria for determining a request + match. + properties: + exact: + description: Value used to replace matched + path. + type: string + prefix: + description: Specified beginning characters + to rewrite. + type: string + range: + description: Object that specifies the range + of numbers that the header value sent + by the client must be included in. + properties: + end: + description: End of the range. + type: number + start: + description: (Requited) Start of the + range. + type: number + type: object + regex: + description: Header value sent by the client + must include the specified characters. + type: string + suffix: + description: Header value sent by the client + must end with the specified characters. + type: string + type: object + name: + description: Name to use for the gateway route. + Must be between 1 and 255 characters in length. + type: string + type: object + type: array + hostname: + description: Host name to rewrite. + properties: + exact: + description: Value used to replace matched path. + type: string + suffix: + description: Header value sent by the client must + end with the specified characters. + type: string + type: object + path: + description: Exact path to rewrite. + properties: + exact: + description: Value used to replace matched path. + type: string + regex: + description: Header value sent by the client must + include the specified characters. + type: string + type: object + port: + description: The port number that corresponds to the + target for Virtual Service provider port. This is + required when the provider (router or node) of the + Virtual Service has multiple listeners. + type: number + prefix: + description: Specified beginning characters to rewrite. + type: string + queryParameter: + description: Client request query parameters to match + on. + items: + properties: + match: + description: Criteria for determining a request + match. + properties: + exact: + description: Value used to replace matched + path. + type: string + type: object + name: + description: Name to use for the gateway route. + Must be between 1 and 255 characters in length. + type: string + type: object + type: array + type: object + type: object + priority: + description: Priority for the gateway route, between 0 and + 1000. + type: number + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + virtualGatewayName: + description: Name of the virtual gateway to associate the gateway + route with. Must be between 1 and 255 characters in length. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appmesh.aws.upbound.io_meshes.yaml b/package/crds/appmesh.aws.upbound.io_meshes.yaml index e4f862222b..c27a6cba7d 100644 --- a/package/crds/appmesh.aws.upbound.io_meshes.yaml +++ b/package/crds/appmesh.aws.upbound.io_meshes.yaml @@ -455,3 +455,422 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Mesh is the Schema for the Meshs API. Provides an AWS App Mesh + service mesh resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MeshSpec defines the desired state of Mesh + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + region: + description: Region is the region you'd like your resource to + be created in. + type: string + spec: + description: Service mesh specification to apply. + properties: + egressFilter: + description: Egress filter rules for the service mesh. + properties: + type: + description: Egress filter type. By default, the type + is DROP_ALL. Valid values are ALLOW_ALL and DROP_ALL. + type: string + type: object + serviceDiscovery: + description: The service discovery information for the service + mesh. + properties: + ipPreference: + description: The IP version to use to control traffic + within the mesh. Valid values are IPv6_PREFERRED, IPv4_PREFERRED, + IPv4_ONLY, and IPv6_ONLY. + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + spec: + description: Service mesh specification to apply. + properties: + egressFilter: + description: Egress filter rules for the service mesh. + properties: + type: + description: Egress filter type. By default, the type + is DROP_ALL. Valid values are ALLOW_ALL and DROP_ALL. + type: string + type: object + serviceDiscovery: + description: The service discovery information for the service + mesh. + properties: + ipPreference: + description: The IP version to use to control traffic + within the mesh. Valid values are IPv6_PREFERRED, IPv4_PREFERRED, + IPv4_ONLY, and IPv6_ONLY. + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: MeshStatus defines the observed state of Mesh. + properties: + atProvider: + properties: + arn: + description: ARN of the service mesh. + type: string + createdDate: + description: Creation date of the service mesh. + type: string + id: + description: ID of the service mesh. + type: string + lastUpdatedDate: + description: Last update date of the service mesh. + type: string + meshOwner: + description: AWS account ID of the service mesh's owner. + type: string + resourceOwner: + description: Resource owner's AWS account ID. + type: string + spec: + description: Service mesh specification to apply. + properties: + egressFilter: + description: Egress filter rules for the service mesh. + properties: + type: + description: Egress filter type. By default, the type + is DROP_ALL. Valid values are ALLOW_ALL and DROP_ALL. + type: string + type: object + serviceDiscovery: + description: The service discovery information for the service + mesh. + properties: + ipPreference: + description: The IP version to use to control traffic + within the mesh. Valid values are IPv6_PREFERRED, IPv4_PREFERRED, + IPv4_ONLY, and IPv6_ONLY. + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appmesh.aws.upbound.io_routes.yaml b/package/crds/appmesh.aws.upbound.io_routes.yaml index 9f92264936..687d045dc9 100644 --- a/package/crds/appmesh.aws.upbound.io_routes.yaml +++ b/package/crds/appmesh.aws.upbound.io_routes.yaml @@ -3307,3 +3307,2928 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Route is the Schema for the Routes API. Provides an AWS App Mesh + route resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RouteSpec defines the desired state of Route + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + meshName: + description: Name of the service mesh in which to create the route. + Must be between 1 and 255 characters in length. + type: string + meshNameRef: + description: Reference to a Mesh in appmesh to populate meshName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + meshNameSelector: + description: Selector for a Mesh in appmesh to populate meshName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the route. Must be between 1 and + 255 characters in length. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + spec: + description: Route specification to apply. + properties: + grpcRoute: + description: GRPC routing information for the route. + properties: + action: + description: Action to take if a match is determined. + properties: + weightedTarget: + description: |- + Targets that traffic is routed to when a request matches the route. + You can specify one or more targets and their relative weights with which to distribute traffic. + items: + properties: + port: + description: The port number to match from the + request. + type: number + virtualNode: + description: Virtual node to associate with + the weighted target. Must be between 1 and + 255 characters in length. + type: string + weight: + description: Relative weight of the weighted + target. An integer between 0 and 100. + type: number + type: object + type: array + type: object + match: + description: Criteria for determining an gRPC request + match. + properties: + metadata: + description: Data to match from the gRPC request. + items: + properties: + invert: + description: If true, the match is on the opposite + of the match criteria. Default is false. + type: boolean + match: + description: Criteria for determining an gRPC + request match. + properties: + exact: + description: Value sent by the client must + match the specified value exactly. Must + be between 1 and 255 characters in length. + type: string + prefix: + description: Value sent by the client must + begin with the specified characters. Must + be between 1 and 255 characters in length. + type: string + range: + description: Object that specifies the range + of numbers that the value sent by the + client must be included in. + properties: + end: + description: End of the range. + type: number + start: + description: (Requited) Start of the + range. + type: number + type: object + regex: + description: Value sent by the client must + include the specified characters. Must + be between 1 and 255 characters in length. + type: string + suffix: + description: Value sent by the client must + end with the specified characters. Must + be between 1 and 255 characters in length. + type: string + type: object + name: + description: Name to use for the route. Must + be between 1 and 255 characters in length. + type: string + type: object + type: array + methodName: + description: Method name to match from the request. + If you specify a name, you must also specify a service_name. + type: string + port: + description: The port number to match from the request. + type: number + prefix: + description: Value sent by the client must begin with + the specified characters. Must be between 1 and + 255 characters in length. + type: string + serviceName: + description: Fully qualified domain name for the service + to match from the request. + type: string + type: object + retryPolicy: + description: Retry policy. + properties: + grpcRetryEvents: + description: |- + List of gRPC retry events. + Valid values: cancelled, deadline-exceeded, internal, resource-exhausted, unavailable. + items: + type: string + type: array + x-kubernetes-list-type: set + httpRetryEvents: + description: |- + List of HTTP retry events. + Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + items: + type: string + type: array + x-kubernetes-list-type: set + maxRetries: + description: Maximum number of retries. + type: number + perRetryTimeout: + description: Per-retry timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + tcpRetryEvents: + description: List of TCP retry events. The only valid + value is connection-error. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + timeout: + description: Types of timeouts. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be idle. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + type: object + type: object + http2Route: + description: HTTP/2 routing information for the route. + properties: + action: + description: Action to take if a match is determined. + properties: + weightedTarget: + description: |- + Targets that traffic is routed to when a request matches the route. + You can specify one or more targets and their relative weights with which to distribute traffic. + items: + properties: + port: + description: The port number to match from the + request. + type: number + virtualNode: + description: Virtual node to associate with + the weighted target. Must be between 1 and + 255 characters in length. + type: string + weight: + description: Relative weight of the weighted + target. An integer between 0 and 100. + type: number + type: object + type: array + type: object + match: + description: Criteria for determining an gRPC request + match. + properties: + header: + description: Client request headers to match on. + items: + properties: + invert: + description: If true, the match is on the opposite + of the match criteria. Default is false. + type: boolean + match: + description: Criteria for determining an gRPC + request match. + properties: + exact: + description: Value sent by the client must + match the specified value exactly. Must + be between 1 and 255 characters in length. + type: string + prefix: + description: Value sent by the client must + begin with the specified characters. Must + be between 1 and 255 characters in length. + type: string + range: + description: Object that specifies the range + of numbers that the value sent by the + client must be included in. + properties: + end: + description: End of the range. + type: number + start: + description: (Requited) Start of the + range. + type: number + type: object + regex: + description: Value sent by the client must + include the specified characters. Must + be between 1 and 255 characters in length. + type: string + suffix: + description: Value sent by the client must + end with the specified characters. Must + be between 1 and 255 characters in length. + type: string + type: object + name: + description: Name to use for the route. Must + be between 1 and 255 characters in length. + type: string + type: object + type: array + method: + description: 'Client request header method to match + on. Valid values: GET, HEAD, POST, PUT, DELETE, + CONNECT, OPTIONS, TRACE, PATCH.' + type: string + path: + description: Client request path to match on. + properties: + exact: + description: Value sent by the client must match + the specified value exactly. Must be between + 1 and 255 characters in length. + type: string + regex: + description: Value sent by the client must include + the specified characters. Must be between 1 + and 255 characters in length. + type: string + type: object + port: + description: The port number to match from the request. + type: number + prefix: + description: Value sent by the client must begin with + the specified characters. Must be between 1 and + 255 characters in length. + type: string + queryParameter: + description: Client request query parameters to match + on. + items: + properties: + match: + description: Criteria for determining an gRPC + request match. + properties: + exact: + description: Value sent by the client must + match the specified value exactly. Must + be between 1 and 255 characters in length. + type: string + type: object + name: + description: Name to use for the route. Must + be between 1 and 255 characters in length. + type: string + type: object + type: array + scheme: + description: 'Client request header scheme to match + on. Valid values: http, https.' + type: string + type: object + retryPolicy: + description: Retry policy. + properties: + httpRetryEvents: + description: |- + List of HTTP retry events. + Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + items: + type: string + type: array + x-kubernetes-list-type: set + maxRetries: + description: Maximum number of retries. + type: number + perRetryTimeout: + description: Per-retry timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + tcpRetryEvents: + description: List of TCP retry events. The only valid + value is connection-error. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + timeout: + description: Types of timeouts. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be idle. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + type: object + type: object + httpRoute: + description: HTTP routing information for the route. + properties: + action: + description: Action to take if a match is determined. + properties: + weightedTarget: + description: |- + Targets that traffic is routed to when a request matches the route. + You can specify one or more targets and their relative weights with which to distribute traffic. + items: + properties: + port: + description: The port number to match from the + request. + type: number + virtualNode: + description: Virtual node to associate with + the weighted target. Must be between 1 and + 255 characters in length. + type: string + virtualNodeRef: + description: Reference to a VirtualNode in appmesh + to populate virtualNode. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNodeSelector: + description: Selector for a VirtualNode in appmesh + to populate virtualNode. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + weight: + description: Relative weight of the weighted + target. An integer between 0 and 100. + type: number + type: object + type: array + type: object + match: + description: Criteria for determining an gRPC request + match. + properties: + header: + description: Client request headers to match on. + items: + properties: + invert: + description: If true, the match is on the opposite + of the match criteria. Default is false. + type: boolean + match: + description: Criteria for determining an gRPC + request match. + properties: + exact: + description: Value sent by the client must + match the specified value exactly. Must + be between 1 and 255 characters in length. + type: string + prefix: + description: Value sent by the client must + begin with the specified characters. Must + be between 1 and 255 characters in length. + type: string + range: + description: Object that specifies the range + of numbers that the value sent by the + client must be included in. + properties: + end: + description: End of the range. + type: number + start: + description: (Requited) Start of the + range. + type: number + type: object + regex: + description: Value sent by the client must + include the specified characters. Must + be between 1 and 255 characters in length. + type: string + suffix: + description: Value sent by the client must + end with the specified characters. Must + be between 1 and 255 characters in length. + type: string + type: object + name: + description: Name to use for the route. Must + be between 1 and 255 characters in length. + type: string + type: object + type: array + method: + description: 'Client request header method to match + on. Valid values: GET, HEAD, POST, PUT, DELETE, + CONNECT, OPTIONS, TRACE, PATCH.' + type: string + path: + description: Client request path to match on. + properties: + exact: + description: Value sent by the client must match + the specified value exactly. Must be between + 1 and 255 characters in length. + type: string + regex: + description: Value sent by the client must include + the specified characters. Must be between 1 + and 255 characters in length. + type: string + type: object + port: + description: The port number to match from the request. + type: number + prefix: + description: Value sent by the client must begin with + the specified characters. Must be between 1 and + 255 characters in length. + type: string + queryParameter: + description: Client request query parameters to match + on. + items: + properties: + match: + description: Criteria for determining an gRPC + request match. + properties: + exact: + description: Value sent by the client must + match the specified value exactly. Must + be between 1 and 255 characters in length. + type: string + type: object + name: + description: Name to use for the route. Must + be between 1 and 255 characters in length. + type: string + type: object + type: array + scheme: + description: 'Client request header scheme to match + on. Valid values: http, https.' + type: string + type: object + retryPolicy: + description: Retry policy. + properties: + httpRetryEvents: + description: |- + List of HTTP retry events. + Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + items: + type: string + type: array + x-kubernetes-list-type: set + maxRetries: + description: Maximum number of retries. + type: number + perRetryTimeout: + description: Per-retry timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + tcpRetryEvents: + description: List of TCP retry events. The only valid + value is connection-error. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + timeout: + description: Types of timeouts. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be idle. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + type: object + type: object + priority: + description: |- + Priority for the route, between 0 and 1000. + Routes are matched based on the specified value, where 0 is the highest priority. + type: number + tcpRoute: + description: TCP routing information for the route. + properties: + action: + description: Action to take if a match is determined. + properties: + weightedTarget: + description: |- + Targets that traffic is routed to when a request matches the route. + You can specify one or more targets and their relative weights with which to distribute traffic. + items: + properties: + port: + description: The port number to match from the + request. + type: number + virtualNode: + description: Virtual node to associate with + the weighted target. Must be between 1 and + 255 characters in length. + type: string + virtualNodeRef: + description: Reference to a VirtualNode in appmesh + to populate virtualNode. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNodeSelector: + description: Selector for a VirtualNode in appmesh + to populate virtualNode. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + weight: + description: Relative weight of the weighted + target. An integer between 0 and 100. + type: number + type: object + type: array + type: object + match: + description: Criteria for determining an gRPC request + match. + properties: + port: + description: The port number to match from the request. + type: number + type: object + timeout: + description: Types of timeouts. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be idle. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + virtualRouterName: + description: Name of the virtual router in which to create the + route. Must be between 1 and 255 characters in length. + type: string + virtualRouterNameRef: + description: Reference to a VirtualRouter in appmesh to populate + virtualRouterName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualRouterNameSelector: + description: Selector for a VirtualRouter in appmesh to populate + virtualRouterName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + meshName: + description: Name of the service mesh in which to create the route. + Must be between 1 and 255 characters in length. + type: string + meshNameRef: + description: Reference to a Mesh in appmesh to populate meshName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + meshNameSelector: + description: Selector for a Mesh in appmesh to populate meshName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the route. Must be between 1 and + 255 characters in length. + type: string + spec: + description: Route specification to apply. + properties: + grpcRoute: + description: GRPC routing information for the route. + properties: + action: + description: Action to take if a match is determined. + properties: + weightedTarget: + description: |- + Targets that traffic is routed to when a request matches the route. + You can specify one or more targets and their relative weights with which to distribute traffic. + items: + properties: + port: + description: The port number to match from the + request. + type: number + virtualNode: + description: Virtual node to associate with + the weighted target. Must be between 1 and + 255 characters in length. + type: string + weight: + description: Relative weight of the weighted + target. An integer between 0 and 100. + type: number + type: object + type: array + type: object + match: + description: Criteria for determining an gRPC request + match. + properties: + metadata: + description: Data to match from the gRPC request. + items: + properties: + invert: + description: If true, the match is on the opposite + of the match criteria. Default is false. + type: boolean + match: + description: Criteria for determining an gRPC + request match. + properties: + exact: + description: Value sent by the client must + match the specified value exactly. Must + be between 1 and 255 characters in length. + type: string + prefix: + description: Value sent by the client must + begin with the specified characters. Must + be between 1 and 255 characters in length. + type: string + range: + description: Object that specifies the range + of numbers that the value sent by the + client must be included in. + properties: + end: + description: End of the range. + type: number + start: + description: (Requited) Start of the + range. + type: number + type: object + regex: + description: Value sent by the client must + include the specified characters. Must + be between 1 and 255 characters in length. + type: string + suffix: + description: Value sent by the client must + end with the specified characters. Must + be between 1 and 255 characters in length. + type: string + type: object + name: + description: Name to use for the route. Must + be between 1 and 255 characters in length. + type: string + type: object + type: array + methodName: + description: Method name to match from the request. + If you specify a name, you must also specify a service_name. + type: string + port: + description: The port number to match from the request. + type: number + prefix: + description: Value sent by the client must begin with + the specified characters. Must be between 1 and + 255 characters in length. + type: string + serviceName: + description: Fully qualified domain name for the service + to match from the request. + type: string + type: object + retryPolicy: + description: Retry policy. + properties: + grpcRetryEvents: + description: |- + List of gRPC retry events. + Valid values: cancelled, deadline-exceeded, internal, resource-exhausted, unavailable. + items: + type: string + type: array + x-kubernetes-list-type: set + httpRetryEvents: + description: |- + List of HTTP retry events. + Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + items: + type: string + type: array + x-kubernetes-list-type: set + maxRetries: + description: Maximum number of retries. + type: number + perRetryTimeout: + description: Per-retry timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + tcpRetryEvents: + description: List of TCP retry events. The only valid + value is connection-error. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + timeout: + description: Types of timeouts. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be idle. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + type: object + type: object + http2Route: + description: HTTP/2 routing information for the route. + properties: + action: + description: Action to take if a match is determined. + properties: + weightedTarget: + description: |- + Targets that traffic is routed to when a request matches the route. + You can specify one or more targets and their relative weights with which to distribute traffic. + items: + properties: + port: + description: The port number to match from the + request. + type: number + virtualNode: + description: Virtual node to associate with + the weighted target. Must be between 1 and + 255 characters in length. + type: string + weight: + description: Relative weight of the weighted + target. An integer between 0 and 100. + type: number + type: object + type: array + type: object + match: + description: Criteria for determining an gRPC request + match. + properties: + header: + description: Client request headers to match on. + items: + properties: + invert: + description: If true, the match is on the opposite + of the match criteria. Default is false. + type: boolean + match: + description: Criteria for determining an gRPC + request match. + properties: + exact: + description: Value sent by the client must + match the specified value exactly. Must + be between 1 and 255 characters in length. + type: string + prefix: + description: Value sent by the client must + begin with the specified characters. Must + be between 1 and 255 characters in length. + type: string + range: + description: Object that specifies the range + of numbers that the value sent by the + client must be included in. + properties: + end: + description: End of the range. + type: number + start: + description: (Requited) Start of the + range. + type: number + type: object + regex: + description: Value sent by the client must + include the specified characters. Must + be between 1 and 255 characters in length. + type: string + suffix: + description: Value sent by the client must + end with the specified characters. Must + be between 1 and 255 characters in length. + type: string + type: object + name: + description: Name to use for the route. Must + be between 1 and 255 characters in length. + type: string + type: object + type: array + method: + description: 'Client request header method to match + on. Valid values: GET, HEAD, POST, PUT, DELETE, + CONNECT, OPTIONS, TRACE, PATCH.' + type: string + path: + description: Client request path to match on. + properties: + exact: + description: Value sent by the client must match + the specified value exactly. Must be between + 1 and 255 characters in length. + type: string + regex: + description: Value sent by the client must include + the specified characters. Must be between 1 + and 255 characters in length. + type: string + type: object + port: + description: The port number to match from the request. + type: number + prefix: + description: Value sent by the client must begin with + the specified characters. Must be between 1 and + 255 characters in length. + type: string + queryParameter: + description: Client request query parameters to match + on. + items: + properties: + match: + description: Criteria for determining an gRPC + request match. + properties: + exact: + description: Value sent by the client must + match the specified value exactly. Must + be between 1 and 255 characters in length. + type: string + type: object + name: + description: Name to use for the route. Must + be between 1 and 255 characters in length. + type: string + type: object + type: array + scheme: + description: 'Client request header scheme to match + on. Valid values: http, https.' + type: string + type: object + retryPolicy: + description: Retry policy. + properties: + httpRetryEvents: + description: |- + List of HTTP retry events. + Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + items: + type: string + type: array + x-kubernetes-list-type: set + maxRetries: + description: Maximum number of retries. + type: number + perRetryTimeout: + description: Per-retry timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + tcpRetryEvents: + description: List of TCP retry events. The only valid + value is connection-error. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + timeout: + description: Types of timeouts. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be idle. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + type: object + type: object + httpRoute: + description: HTTP routing information for the route. + properties: + action: + description: Action to take if a match is determined. + properties: + weightedTarget: + description: |- + Targets that traffic is routed to when a request matches the route. + You can specify one or more targets and their relative weights with which to distribute traffic. + items: + properties: + port: + description: The port number to match from the + request. + type: number + virtualNode: + description: Virtual node to associate with + the weighted target. Must be between 1 and + 255 characters in length. + type: string + virtualNodeRef: + description: Reference to a VirtualNode in appmesh + to populate virtualNode. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNodeSelector: + description: Selector for a VirtualNode in appmesh + to populate virtualNode. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + weight: + description: Relative weight of the weighted + target. An integer between 0 and 100. + type: number + type: object + type: array + type: object + match: + description: Criteria for determining an gRPC request + match. + properties: + header: + description: Client request headers to match on. + items: + properties: + invert: + description: If true, the match is on the opposite + of the match criteria. Default is false. + type: boolean + match: + description: Criteria for determining an gRPC + request match. + properties: + exact: + description: Value sent by the client must + match the specified value exactly. Must + be between 1 and 255 characters in length. + type: string + prefix: + description: Value sent by the client must + begin with the specified characters. Must + be between 1 and 255 characters in length. + type: string + range: + description: Object that specifies the range + of numbers that the value sent by the + client must be included in. + properties: + end: + description: End of the range. + type: number + start: + description: (Requited) Start of the + range. + type: number + type: object + regex: + description: Value sent by the client must + include the specified characters. Must + be between 1 and 255 characters in length. + type: string + suffix: + description: Value sent by the client must + end with the specified characters. Must + be between 1 and 255 characters in length. + type: string + type: object + name: + description: Name to use for the route. Must + be between 1 and 255 characters in length. + type: string + type: object + type: array + method: + description: 'Client request header method to match + on. Valid values: GET, HEAD, POST, PUT, DELETE, + CONNECT, OPTIONS, TRACE, PATCH.' + type: string + path: + description: Client request path to match on. + properties: + exact: + description: Value sent by the client must match + the specified value exactly. Must be between + 1 and 255 characters in length. + type: string + regex: + description: Value sent by the client must include + the specified characters. Must be between 1 + and 255 characters in length. + type: string + type: object + port: + description: The port number to match from the request. + type: number + prefix: + description: Value sent by the client must begin with + the specified characters. Must be between 1 and + 255 characters in length. + type: string + queryParameter: + description: Client request query parameters to match + on. + items: + properties: + match: + description: Criteria for determining an gRPC + request match. + properties: + exact: + description: Value sent by the client must + match the specified value exactly. Must + be between 1 and 255 characters in length. + type: string + type: object + name: + description: Name to use for the route. Must + be between 1 and 255 characters in length. + type: string + type: object + type: array + scheme: + description: 'Client request header scheme to match + on. Valid values: http, https.' + type: string + type: object + retryPolicy: + description: Retry policy. + properties: + httpRetryEvents: + description: |- + List of HTTP retry events. + Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + items: + type: string + type: array + x-kubernetes-list-type: set + maxRetries: + description: Maximum number of retries. + type: number + perRetryTimeout: + description: Per-retry timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + tcpRetryEvents: + description: List of TCP retry events. The only valid + value is connection-error. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + timeout: + description: Types of timeouts. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be idle. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + type: object + type: object + priority: + description: |- + Priority for the route, between 0 and 1000. + Routes are matched based on the specified value, where 0 is the highest priority. + type: number + tcpRoute: + description: TCP routing information for the route. + properties: + action: + description: Action to take if a match is determined. + properties: + weightedTarget: + description: |- + Targets that traffic is routed to when a request matches the route. + You can specify one or more targets and their relative weights with which to distribute traffic. + items: + properties: + port: + description: The port number to match from the + request. + type: number + virtualNode: + description: Virtual node to associate with + the weighted target. Must be between 1 and + 255 characters in length. + type: string + virtualNodeRef: + description: Reference to a VirtualNode in appmesh + to populate virtualNode. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNodeSelector: + description: Selector for a VirtualNode in appmesh + to populate virtualNode. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + weight: + description: Relative weight of the weighted + target. An integer between 0 and 100. + type: number + type: object + type: array + type: object + match: + description: Criteria for determining an gRPC request + match. + properties: + port: + description: The port number to match from the request. + type: number + type: object + timeout: + description: Types of timeouts. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be idle. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + virtualRouterName: + description: Name of the virtual router in which to create the + route. Must be between 1 and 255 characters in length. + type: string + virtualRouterNameRef: + description: Reference to a VirtualRouter in appmesh to populate + virtualRouterName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualRouterNameSelector: + description: Selector for a VirtualRouter in appmesh to populate + virtualRouterName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.spec is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.spec) + || (has(self.initProvider) && has(self.initProvider.spec))' + status: + description: RouteStatus defines the observed state of Route. + properties: + atProvider: + properties: + arn: + description: ARN of the route. + type: string + createdDate: + description: Creation date of the route. + type: string + id: + description: ID of the route. + type: string + lastUpdatedDate: + description: Last update date of the route. + type: string + meshName: + description: Name of the service mesh in which to create the route. + Must be between 1 and 255 characters in length. + type: string + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the route. Must be between 1 and + 255 characters in length. + type: string + resourceOwner: + description: Resource owner's AWS account ID. + type: string + spec: + description: Route specification to apply. + properties: + grpcRoute: + description: GRPC routing information for the route. + properties: + action: + description: Action to take if a match is determined. + properties: + weightedTarget: + description: |- + Targets that traffic is routed to when a request matches the route. + You can specify one or more targets and their relative weights with which to distribute traffic. + items: + properties: + port: + description: The port number to match from the + request. + type: number + virtualNode: + description: Virtual node to associate with + the weighted target. Must be between 1 and + 255 characters in length. + type: string + weight: + description: Relative weight of the weighted + target. An integer between 0 and 100. + type: number + type: object + type: array + type: object + match: + description: Criteria for determining an gRPC request + match. + properties: + metadata: + description: Data to match from the gRPC request. + items: + properties: + invert: + description: If true, the match is on the opposite + of the match criteria. Default is false. + type: boolean + match: + description: Criteria for determining an gRPC + request match. + properties: + exact: + description: Value sent by the client must + match the specified value exactly. Must + be between 1 and 255 characters in length. + type: string + prefix: + description: Value sent by the client must + begin with the specified characters. Must + be between 1 and 255 characters in length. + type: string + range: + description: Object that specifies the range + of numbers that the value sent by the + client must be included in. + properties: + end: + description: End of the range. + type: number + start: + description: (Requited) Start of the + range. + type: number + type: object + regex: + description: Value sent by the client must + include the specified characters. Must + be between 1 and 255 characters in length. + type: string + suffix: + description: Value sent by the client must + end with the specified characters. Must + be between 1 and 255 characters in length. + type: string + type: object + name: + description: Name to use for the route. Must + be between 1 and 255 characters in length. + type: string + type: object + type: array + methodName: + description: Method name to match from the request. + If you specify a name, you must also specify a service_name. + type: string + port: + description: The port number to match from the request. + type: number + prefix: + description: Value sent by the client must begin with + the specified characters. Must be between 1 and + 255 characters in length. + type: string + serviceName: + description: Fully qualified domain name for the service + to match from the request. + type: string + type: object + retryPolicy: + description: Retry policy. + properties: + grpcRetryEvents: + description: |- + List of gRPC retry events. + Valid values: cancelled, deadline-exceeded, internal, resource-exhausted, unavailable. + items: + type: string + type: array + x-kubernetes-list-type: set + httpRetryEvents: + description: |- + List of HTTP retry events. + Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + items: + type: string + type: array + x-kubernetes-list-type: set + maxRetries: + description: Maximum number of retries. + type: number + perRetryTimeout: + description: Per-retry timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + tcpRetryEvents: + description: List of TCP retry events. The only valid + value is connection-error. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + timeout: + description: Types of timeouts. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be idle. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + type: object + type: object + http2Route: + description: HTTP/2 routing information for the route. + properties: + action: + description: Action to take if a match is determined. + properties: + weightedTarget: + description: |- + Targets that traffic is routed to when a request matches the route. + You can specify one or more targets and their relative weights with which to distribute traffic. + items: + properties: + port: + description: The port number to match from the + request. + type: number + virtualNode: + description: Virtual node to associate with + the weighted target. Must be between 1 and + 255 characters in length. + type: string + weight: + description: Relative weight of the weighted + target. An integer between 0 and 100. + type: number + type: object + type: array + type: object + match: + description: Criteria for determining an gRPC request + match. + properties: + header: + description: Client request headers to match on. + items: + properties: + invert: + description: If true, the match is on the opposite + of the match criteria. Default is false. + type: boolean + match: + description: Criteria for determining an gRPC + request match. + properties: + exact: + description: Value sent by the client must + match the specified value exactly. Must + be between 1 and 255 characters in length. + type: string + prefix: + description: Value sent by the client must + begin with the specified characters. Must + be between 1 and 255 characters in length. + type: string + range: + description: Object that specifies the range + of numbers that the value sent by the + client must be included in. + properties: + end: + description: End of the range. + type: number + start: + description: (Requited) Start of the + range. + type: number + type: object + regex: + description: Value sent by the client must + include the specified characters. Must + be between 1 and 255 characters in length. + type: string + suffix: + description: Value sent by the client must + end with the specified characters. Must + be between 1 and 255 characters in length. + type: string + type: object + name: + description: Name to use for the route. Must + be between 1 and 255 characters in length. + type: string + type: object + type: array + method: + description: 'Client request header method to match + on. Valid values: GET, HEAD, POST, PUT, DELETE, + CONNECT, OPTIONS, TRACE, PATCH.' + type: string + path: + description: Client request path to match on. + properties: + exact: + description: Value sent by the client must match + the specified value exactly. Must be between + 1 and 255 characters in length. + type: string + regex: + description: Value sent by the client must include + the specified characters. Must be between 1 + and 255 characters in length. + type: string + type: object + port: + description: The port number to match from the request. + type: number + prefix: + description: Value sent by the client must begin with + the specified characters. Must be between 1 and + 255 characters in length. + type: string + queryParameter: + description: Client request query parameters to match + on. + items: + properties: + match: + description: Criteria for determining an gRPC + request match. + properties: + exact: + description: Value sent by the client must + match the specified value exactly. Must + be between 1 and 255 characters in length. + type: string + type: object + name: + description: Name to use for the route. Must + be between 1 and 255 characters in length. + type: string + type: object + type: array + scheme: + description: 'Client request header scheme to match + on. Valid values: http, https.' + type: string + type: object + retryPolicy: + description: Retry policy. + properties: + httpRetryEvents: + description: |- + List of HTTP retry events. + Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + items: + type: string + type: array + x-kubernetes-list-type: set + maxRetries: + description: Maximum number of retries. + type: number + perRetryTimeout: + description: Per-retry timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + tcpRetryEvents: + description: List of TCP retry events. The only valid + value is connection-error. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + timeout: + description: Types of timeouts. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be idle. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + type: object + type: object + httpRoute: + description: HTTP routing information for the route. + properties: + action: + description: Action to take if a match is determined. + properties: + weightedTarget: + description: |- + Targets that traffic is routed to when a request matches the route. + You can specify one or more targets and their relative weights with which to distribute traffic. + items: + properties: + port: + description: The port number to match from the + request. + type: number + virtualNode: + description: Virtual node to associate with + the weighted target. Must be between 1 and + 255 characters in length. + type: string + weight: + description: Relative weight of the weighted + target. An integer between 0 and 100. + type: number + type: object + type: array + type: object + match: + description: Criteria for determining an gRPC request + match. + properties: + header: + description: Client request headers to match on. + items: + properties: + invert: + description: If true, the match is on the opposite + of the match criteria. Default is false. + type: boolean + match: + description: Criteria for determining an gRPC + request match. + properties: + exact: + description: Value sent by the client must + match the specified value exactly. Must + be between 1 and 255 characters in length. + type: string + prefix: + description: Value sent by the client must + begin with the specified characters. Must + be between 1 and 255 characters in length. + type: string + range: + description: Object that specifies the range + of numbers that the value sent by the + client must be included in. + properties: + end: + description: End of the range. + type: number + start: + description: (Requited) Start of the + range. + type: number + type: object + regex: + description: Value sent by the client must + include the specified characters. Must + be between 1 and 255 characters in length. + type: string + suffix: + description: Value sent by the client must + end with the specified characters. Must + be between 1 and 255 characters in length. + type: string + type: object + name: + description: Name to use for the route. Must + be between 1 and 255 characters in length. + type: string + type: object + type: array + method: + description: 'Client request header method to match + on. Valid values: GET, HEAD, POST, PUT, DELETE, + CONNECT, OPTIONS, TRACE, PATCH.' + type: string + path: + description: Client request path to match on. + properties: + exact: + description: Value sent by the client must match + the specified value exactly. Must be between + 1 and 255 characters in length. + type: string + regex: + description: Value sent by the client must include + the specified characters. Must be between 1 + and 255 characters in length. + type: string + type: object + port: + description: The port number to match from the request. + type: number + prefix: + description: Value sent by the client must begin with + the specified characters. Must be between 1 and + 255 characters in length. + type: string + queryParameter: + description: Client request query parameters to match + on. + items: + properties: + match: + description: Criteria for determining an gRPC + request match. + properties: + exact: + description: Value sent by the client must + match the specified value exactly. Must + be between 1 and 255 characters in length. + type: string + type: object + name: + description: Name to use for the route. Must + be between 1 and 255 characters in length. + type: string + type: object + type: array + scheme: + description: 'Client request header scheme to match + on. Valid values: http, https.' + type: string + type: object + retryPolicy: + description: Retry policy. + properties: + httpRetryEvents: + description: |- + List of HTTP retry events. + Valid values: client-error (HTTP status code 409), gateway-error (HTTP status codes 502, 503, and 504), server-error (HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511), stream-error (retry on refused stream). + items: + type: string + type: array + x-kubernetes-list-type: set + maxRetries: + description: Maximum number of retries. + type: number + perRetryTimeout: + description: Per-retry timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + tcpRetryEvents: + description: List of TCP retry events. The only valid + value is connection-error. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + timeout: + description: Types of timeouts. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be idle. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + type: object + type: object + priority: + description: |- + Priority for the route, between 0 and 1000. + Routes are matched based on the specified value, where 0 is the highest priority. + type: number + tcpRoute: + description: TCP routing information for the route. + properties: + action: + description: Action to take if a match is determined. + properties: + weightedTarget: + description: |- + Targets that traffic is routed to when a request matches the route. + You can specify one or more targets and their relative weights with which to distribute traffic. + items: + properties: + port: + description: The port number to match from the + request. + type: number + virtualNode: + description: Virtual node to associate with + the weighted target. Must be between 1 and + 255 characters in length. + type: string + weight: + description: Relative weight of the weighted + target. An integer between 0 and 100. + type: number + type: object + type: array + type: object + match: + description: Criteria for determining an gRPC request + match. + properties: + port: + description: The port number to match from the request. + type: number + type: object + timeout: + description: Types of timeouts. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be idle. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: Number of time units. Minimum value + of 0. + type: number + type: object + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + virtualRouterName: + description: Name of the virtual router in which to create the + route. Must be between 1 and 255 characters in length. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appmesh.aws.upbound.io_virtualgateways.yaml b/package/crds/appmesh.aws.upbound.io_virtualgateways.yaml index 15affda9cf..7d0b23d505 100644 --- a/package/crds/appmesh.aws.upbound.io_virtualgateways.yaml +++ b/package/crds/appmesh.aws.upbound.io_virtualgateways.yaml @@ -1905,3 +1905,1585 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VirtualGateway is the Schema for the VirtualGateways API. Provides + an AWS App Mesh virtual gateway resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VirtualGatewaySpec defines the desired state of VirtualGateway + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + meshName: + description: Name of the service mesh in which to create the virtual + gateway. Must be between 1 and 255 characters in length. + type: string + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the virtual gateway. Must be between + 1 and 255 characters in length. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + spec: + description: Virtual gateway specification to apply. + properties: + backendDefaults: + description: Defaults for backends. + properties: + clientPolicy: + description: Default client policy for virtual gateway + backends. + properties: + tls: + description: Transport Layer Security (TLS) client + policy. + properties: + certificate: + description: Virtual gateway's client's Transport + Layer Security (TLS) certificate. + properties: + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the + certificate. + type: string + privateKey: + description: Private key for a certificate + stored on the file system of the mesh + endpoint that the proxy is running on. + type: string + type: object + sds: + description: A Secret Discovery Service certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + enforce: + description: Whether the policy is enforced. Default + is true. + type: boolean + ports: + description: One or more ports that the policy + is enforced for. + items: + type: number + type: array + x-kubernetes-list-type: set + validation: + description: TLS validation context. + properties: + subjectAlternativeNames: + description: SANs for a virtual gateway's + listener's Transport Layer Security (TLS) + validation context. + properties: + match: + description: Criteria for determining + a SAN's match. + properties: + exact: + description: Values sent must match + the specified values exactly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + trust: + description: TLS validation context trust. + properties: + acm: + description: TLS validation context trust + for an AWS Certificate Manager (ACM) + certificate. + properties: + certificateAuthorityArns: + description: One or more ACM ARNs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for + the certificate. + type: string + type: object + sds: + description: A Secret Discovery Service + certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + type: object + type: object + type: object + type: object + listener: + description: Listeners that the mesh endpoint is expected + to receive inbound traffic from. You can specify one listener. + items: + properties: + connectionPool: + description: Connection pool information for the listener. + properties: + grpc: + description: Connection pool information for gRPC + listeners. + properties: + maxRequests: + description: Maximum number of inflight requests + Envoy can concurrently support across hosts + in upstream cluster. Minimum value of 1. + type: number + type: object + http: + description: Connection pool information for HTTP + listeners. + properties: + maxConnections: + description: Maximum number of outbound TCP + connections Envoy can establish concurrently + with all hosts in upstream cluster. Minimum + value of 1. + type: number + maxPendingRequests: + description: Number of overflowing requests + after max_connections Envoy will queue to + upstream cluster. Minimum value of 1. + type: number + type: object + http2: + description: Connection pool information for HTTP2 + listeners. + properties: + maxRequests: + description: Maximum number of inflight requests + Envoy can concurrently support across hosts + in upstream cluster. Minimum value of 1. + type: number + type: object + type: object + healthCheck: + description: Health check information for the listener. + properties: + healthyThreshold: + description: Number of consecutive successful health + checks that must occur before declaring listener + healthy. + type: number + intervalMillis: + description: Time period in milliseconds between + each health check execution. + type: number + path: + description: File path to write access logs to. + You can use /dev/stdout to send access logs to + standard out. Must be between 1 and 255 characters + in length. + type: string + port: + description: Port used for the port mapping. + type: number + protocol: + description: Protocol used for the port mapping. + Valid values are http, http2, tcp and grpc. + type: string + timeoutMillis: + description: Amount of time to wait when receiving + a response from the health check, in milliseconds. + type: number + unhealthyThreshold: + description: Number of consecutive failed health + checks that must occur before declaring a virtual + gateway unhealthy. + type: number + type: object + portMapping: + description: Port mapping information for the listener. + properties: + port: + description: Port used for the port mapping. + type: number + protocol: + description: Protocol used for the port mapping. + Valid values are http, http2, tcp and grpc. + type: string + type: object + tls: + description: Transport Layer Security (TLS) client policy. + properties: + certificate: + description: Virtual gateway's client's Transport + Layer Security (TLS) certificate. + properties: + acm: + description: TLS validation context trust for + an AWS Certificate Manager (ACM) certificate. + properties: + certificateArn: + description: ARN for the certificate. + type: string + certificateArnRef: + description: Reference to a Certificate + in acm to populate certificateArn. + properties: + name: + description: Name of the referenced + object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + certificateArnSelector: + description: Selector for a Certificate + in acm to populate certificateArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an + object with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the certificate. + type: string + privateKey: + description: Private key for a certificate + stored on the file system of the mesh + endpoint that the proxy is running on. + type: string + type: object + sds: + description: A Secret Discovery Service certificate. + properties: + secretName: + description: Name of the secret secret requested + from the Secret Discovery Service provider + representing Transport Layer Security + (TLS) materials like a certificate or + certificate chain. + type: string + type: object + type: object + mode: + description: 'Listener''s TLS mode. Valid values: + DISABLED, PERMISSIVE, STRICT.' + type: string + validation: + description: TLS validation context. + properties: + subjectAlternativeNames: + description: SANs for a virtual gateway's listener's + Transport Layer Security (TLS) validation + context. + properties: + match: + description: Criteria for determining a + SAN's match. + properties: + exact: + description: Values sent must match + the specified values exactly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + trust: + description: TLS validation context trust. + properties: + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the + certificate. + type: string + type: object + sds: + description: A Secret Discovery Service + certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + type: object + type: object + type: object + type: array + logging: + description: Inbound and outbound access logging information + for the virtual gateway. + properties: + accessLog: + description: Access log configuration for a virtual gateway. + properties: + file: + description: Local file certificate. + properties: + format: + description: The specified format for the logs. + properties: + json: + description: The logging format for JSON. + items: + properties: + key: + description: The specified key for the + JSON. Must be between 1 and 100 characters + in length. + type: string + value: + description: The specified value for + the JSON. Must be between 1 and 100 + characters in length. + type: string + type: object + type: array + text: + description: The logging format for text. + Must be between 1 and 1000 characters in + length. + type: string + type: object + path: + description: File path to write access logs to. + You can use /dev/stdout to send access logs + to standard out. Must be between 1 and 255 characters + in length. + type: string + type: object + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + meshName: + description: Name of the service mesh in which to create the virtual + gateway. Must be between 1 and 255 characters in length. + type: string + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the virtual gateway. Must be between + 1 and 255 characters in length. + type: string + spec: + description: Virtual gateway specification to apply. + properties: + backendDefaults: + description: Defaults for backends. + properties: + clientPolicy: + description: Default client policy for virtual gateway + backends. + properties: + tls: + description: Transport Layer Security (TLS) client + policy. + properties: + certificate: + description: Virtual gateway's client's Transport + Layer Security (TLS) certificate. + properties: + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the + certificate. + type: string + privateKey: + description: Private key for a certificate + stored on the file system of the mesh + endpoint that the proxy is running on. + type: string + type: object + sds: + description: A Secret Discovery Service certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + enforce: + description: Whether the policy is enforced. Default + is true. + type: boolean + ports: + description: One or more ports that the policy + is enforced for. + items: + type: number + type: array + x-kubernetes-list-type: set + validation: + description: TLS validation context. + properties: + subjectAlternativeNames: + description: SANs for a virtual gateway's + listener's Transport Layer Security (TLS) + validation context. + properties: + match: + description: Criteria for determining + a SAN's match. + properties: + exact: + description: Values sent must match + the specified values exactly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + trust: + description: TLS validation context trust. + properties: + acm: + description: TLS validation context trust + for an AWS Certificate Manager (ACM) + certificate. + properties: + certificateAuthorityArns: + description: One or more ACM ARNs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for + the certificate. + type: string + type: object + sds: + description: A Secret Discovery Service + certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + type: object + type: object + type: object + type: object + listener: + description: Listeners that the mesh endpoint is expected + to receive inbound traffic from. You can specify one listener. + items: + properties: + connectionPool: + description: Connection pool information for the listener. + properties: + grpc: + description: Connection pool information for gRPC + listeners. + properties: + maxRequests: + description: Maximum number of inflight requests + Envoy can concurrently support across hosts + in upstream cluster. Minimum value of 1. + type: number + type: object + http: + description: Connection pool information for HTTP + listeners. + properties: + maxConnections: + description: Maximum number of outbound TCP + connections Envoy can establish concurrently + with all hosts in upstream cluster. Minimum + value of 1. + type: number + maxPendingRequests: + description: Number of overflowing requests + after max_connections Envoy will queue to + upstream cluster. Minimum value of 1. + type: number + type: object + http2: + description: Connection pool information for HTTP2 + listeners. + properties: + maxRequests: + description: Maximum number of inflight requests + Envoy can concurrently support across hosts + in upstream cluster. Minimum value of 1. + type: number + type: object + type: object + healthCheck: + description: Health check information for the listener. + properties: + healthyThreshold: + description: Number of consecutive successful health + checks that must occur before declaring listener + healthy. + type: number + intervalMillis: + description: Time period in milliseconds between + each health check execution. + type: number + path: + description: File path to write access logs to. + You can use /dev/stdout to send access logs to + standard out. Must be between 1 and 255 characters + in length. + type: string + port: + description: Port used for the port mapping. + type: number + protocol: + description: Protocol used for the port mapping. + Valid values are http, http2, tcp and grpc. + type: string + timeoutMillis: + description: Amount of time to wait when receiving + a response from the health check, in milliseconds. + type: number + unhealthyThreshold: + description: Number of consecutive failed health + checks that must occur before declaring a virtual + gateway unhealthy. + type: number + type: object + portMapping: + description: Port mapping information for the listener. + properties: + port: + description: Port used for the port mapping. + type: number + protocol: + description: Protocol used for the port mapping. + Valid values are http, http2, tcp and grpc. + type: string + type: object + tls: + description: Transport Layer Security (TLS) client policy. + properties: + certificate: + description: Virtual gateway's client's Transport + Layer Security (TLS) certificate. + properties: + acm: + description: TLS validation context trust for + an AWS Certificate Manager (ACM) certificate. + properties: + certificateArn: + description: ARN for the certificate. + type: string + certificateArnRef: + description: Reference to a Certificate + in acm to populate certificateArn. + properties: + name: + description: Name of the referenced + object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + certificateArnSelector: + description: Selector for a Certificate + in acm to populate certificateArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an + object with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the certificate. + type: string + privateKey: + description: Private key for a certificate + stored on the file system of the mesh + endpoint that the proxy is running on. + type: string + type: object + sds: + description: A Secret Discovery Service certificate. + properties: + secretName: + description: Name of the secret secret requested + from the Secret Discovery Service provider + representing Transport Layer Security + (TLS) materials like a certificate or + certificate chain. + type: string + type: object + type: object + mode: + description: 'Listener''s TLS mode. Valid values: + DISABLED, PERMISSIVE, STRICT.' + type: string + validation: + description: TLS validation context. + properties: + subjectAlternativeNames: + description: SANs for a virtual gateway's listener's + Transport Layer Security (TLS) validation + context. + properties: + match: + description: Criteria for determining a + SAN's match. + properties: + exact: + description: Values sent must match + the specified values exactly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + trust: + description: TLS validation context trust. + properties: + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the + certificate. + type: string + type: object + sds: + description: A Secret Discovery Service + certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + type: object + type: object + type: object + type: array + logging: + description: Inbound and outbound access logging information + for the virtual gateway. + properties: + accessLog: + description: Access log configuration for a virtual gateway. + properties: + file: + description: Local file certificate. + properties: + format: + description: The specified format for the logs. + properties: + json: + description: The logging format for JSON. + items: + properties: + key: + description: The specified key for the + JSON. Must be between 1 and 100 characters + in length. + type: string + value: + description: The specified value for + the JSON. Must be between 1 and 100 + characters in length. + type: string + type: object + type: array + text: + description: The logging format for text. + Must be between 1 and 1000 characters in + length. + type: string + type: object + path: + description: File path to write access logs to. + You can use /dev/stdout to send access logs + to standard out. Must be between 1 and 255 characters + in length. + type: string + type: object + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.meshName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.meshName) + || (has(self.initProvider) && has(self.initProvider.meshName))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.spec is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.spec) + || (has(self.initProvider) && has(self.initProvider.spec))' + status: + description: VirtualGatewayStatus defines the observed state of VirtualGateway. + properties: + atProvider: + properties: + arn: + description: ARN of the virtual gateway. + type: string + createdDate: + description: Creation date of the virtual gateway. + type: string + id: + description: ID of the virtual gateway. + type: string + lastUpdatedDate: + description: Last update date of the virtual gateway. + type: string + meshName: + description: Name of the service mesh in which to create the virtual + gateway. Must be between 1 and 255 characters in length. + type: string + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the virtual gateway. Must be between + 1 and 255 characters in length. + type: string + resourceOwner: + description: Resource owner's AWS account ID. + type: string + spec: + description: Virtual gateway specification to apply. + properties: + backendDefaults: + description: Defaults for backends. + properties: + clientPolicy: + description: Default client policy for virtual gateway + backends. + properties: + tls: + description: Transport Layer Security (TLS) client + policy. + properties: + certificate: + description: Virtual gateway's client's Transport + Layer Security (TLS) certificate. + properties: + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the + certificate. + type: string + privateKey: + description: Private key for a certificate + stored on the file system of the mesh + endpoint that the proxy is running on. + type: string + type: object + sds: + description: A Secret Discovery Service certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + enforce: + description: Whether the policy is enforced. Default + is true. + type: boolean + ports: + description: One or more ports that the policy + is enforced for. + items: + type: number + type: array + x-kubernetes-list-type: set + validation: + description: TLS validation context. + properties: + subjectAlternativeNames: + description: SANs for a virtual gateway's + listener's Transport Layer Security (TLS) + validation context. + properties: + match: + description: Criteria for determining + a SAN's match. + properties: + exact: + description: Values sent must match + the specified values exactly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + trust: + description: TLS validation context trust. + properties: + acm: + description: TLS validation context trust + for an AWS Certificate Manager (ACM) + certificate. + properties: + certificateAuthorityArns: + description: One or more ACM ARNs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for + the certificate. + type: string + type: object + sds: + description: A Secret Discovery Service + certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + type: object + type: object + type: object + type: object + listener: + description: Listeners that the mesh endpoint is expected + to receive inbound traffic from. You can specify one listener. + items: + properties: + connectionPool: + description: Connection pool information for the listener. + properties: + grpc: + description: Connection pool information for gRPC + listeners. + properties: + maxRequests: + description: Maximum number of inflight requests + Envoy can concurrently support across hosts + in upstream cluster. Minimum value of 1. + type: number + type: object + http: + description: Connection pool information for HTTP + listeners. + properties: + maxConnections: + description: Maximum number of outbound TCP + connections Envoy can establish concurrently + with all hosts in upstream cluster. Minimum + value of 1. + type: number + maxPendingRequests: + description: Number of overflowing requests + after max_connections Envoy will queue to + upstream cluster. Minimum value of 1. + type: number + type: object + http2: + description: Connection pool information for HTTP2 + listeners. + properties: + maxRequests: + description: Maximum number of inflight requests + Envoy can concurrently support across hosts + in upstream cluster. Minimum value of 1. + type: number + type: object + type: object + healthCheck: + description: Health check information for the listener. + properties: + healthyThreshold: + description: Number of consecutive successful health + checks that must occur before declaring listener + healthy. + type: number + intervalMillis: + description: Time period in milliseconds between + each health check execution. + type: number + path: + description: File path to write access logs to. + You can use /dev/stdout to send access logs to + standard out. Must be between 1 and 255 characters + in length. + type: string + port: + description: Port used for the port mapping. + type: number + protocol: + description: Protocol used for the port mapping. + Valid values are http, http2, tcp and grpc. + type: string + timeoutMillis: + description: Amount of time to wait when receiving + a response from the health check, in milliseconds. + type: number + unhealthyThreshold: + description: Number of consecutive failed health + checks that must occur before declaring a virtual + gateway unhealthy. + type: number + type: object + portMapping: + description: Port mapping information for the listener. + properties: + port: + description: Port used for the port mapping. + type: number + protocol: + description: Protocol used for the port mapping. + Valid values are http, http2, tcp and grpc. + type: string + type: object + tls: + description: Transport Layer Security (TLS) client policy. + properties: + certificate: + description: Virtual gateway's client's Transport + Layer Security (TLS) certificate. + properties: + acm: + description: TLS validation context trust for + an AWS Certificate Manager (ACM) certificate. + properties: + certificateArn: + description: ARN for the certificate. + type: string + type: object + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the certificate. + type: string + privateKey: + description: Private key for a certificate + stored on the file system of the mesh + endpoint that the proxy is running on. + type: string + type: object + sds: + description: A Secret Discovery Service certificate. + properties: + secretName: + description: Name of the secret secret requested + from the Secret Discovery Service provider + representing Transport Layer Security + (TLS) materials like a certificate or + certificate chain. + type: string + type: object + type: object + mode: + description: 'Listener''s TLS mode. Valid values: + DISABLED, PERMISSIVE, STRICT.' + type: string + validation: + description: TLS validation context. + properties: + subjectAlternativeNames: + description: SANs for a virtual gateway's listener's + Transport Layer Security (TLS) validation + context. + properties: + match: + description: Criteria for determining a + SAN's match. + properties: + exact: + description: Values sent must match + the specified values exactly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + trust: + description: TLS validation context trust. + properties: + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the + certificate. + type: string + type: object + sds: + description: A Secret Discovery Service + certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + type: object + type: object + type: object + type: array + logging: + description: Inbound and outbound access logging information + for the virtual gateway. + properties: + accessLog: + description: Access log configuration for a virtual gateway. + properties: + file: + description: Local file certificate. + properties: + format: + description: The specified format for the logs. + properties: + json: + description: The logging format for JSON. + items: + properties: + key: + description: The specified key for the + JSON. Must be between 1 and 100 characters + in length. + type: string + value: + description: The specified value for + the JSON. Must be between 1 and 100 + characters in length. + type: string + type: object + type: array + text: + description: The logging format for text. + Must be between 1 and 1000 characters in + length. + type: string + type: object + path: + description: File path to write access logs to. + You can use /dev/stdout to send access logs + to standard out. Must be between 1 and 255 characters + in length. + type: string + type: object + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appmesh.aws.upbound.io_virtualnodes.yaml b/package/crds/appmesh.aws.upbound.io_virtualnodes.yaml index 5773d7a197..bdc8fadf4a 100644 --- a/package/crds/appmesh.aws.upbound.io_virtualnodes.yaml +++ b/package/crds/appmesh.aws.upbound.io_virtualnodes.yaml @@ -3344,3 +3344,2774 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VirtualNode is the Schema for the VirtualNodes API. Provides + an AWS App Mesh virtual node resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VirtualNodeSpec defines the desired state of VirtualNode + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + meshName: + description: Name of the service mesh in which to create the virtual + node. Must be between 1 and 255 characters in length. + type: string + meshNameRef: + description: Reference to a Mesh in appmesh to populate meshName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + meshNameSelector: + description: Selector for a Mesh in appmesh to populate meshName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the virtual node. Must be between + 1 and 255 characters in length. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + spec: + description: Virtual node specification to apply. + properties: + backend: + description: Backends to which the virtual node is expected + to send outbound traffic. + items: + properties: + virtualService: + description: Virtual service to use as a backend for + a virtual node. + properties: + clientPolicy: + description: Client policy for the backend. + properties: + tls: + description: Transport Layer Security (TLS) + client policy. + properties: + certificate: + description: Virtual node's client's Transport + Layer Security (TLS) certificate. + properties: + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for + the certificate. + type: string + privateKey: + description: Private key for a certificate + stored on the file system of the + mesh endpoint that the proxy is + running on. + type: string + type: object + sds: + description: A Secret Discovery Service + certificate. + properties: + secretName: + description: Name of the secret + secret requested from the Secret + Discovery Service provider representing + Transport Layer Security (TLS) + materials like a certificate or + certificate chain. + type: string + type: object + type: object + enforce: + description: Whether the policy is enforced. + Default is true. + type: boolean + ports: + description: One or more ports that the + policy is enforced for. + items: + type: number + type: array + x-kubernetes-list-type: set + validation: + description: TLS validation context. + properties: + subjectAlternativeNames: + description: SANs for a TLS validation + context. + properties: + match: + description: Criteria for determining + a SAN's match. + properties: + exact: + description: Values sent must + match the specified values + exactly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + trust: + description: TLS validation context + trust. + properties: + acm: + description: TLS validation context + trust for an AWS Certificate Manager + (ACM) certificate. + properties: + certificateAuthorityArns: + description: One or more ACM + ARNs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain + for the certificate. + type: string + type: object + sds: + description: A Secret Discovery + Service certificate. + properties: + secretName: + description: Name of the secret + secret requested from the + Secret Discovery Service provider + representing Transport Layer + Security (TLS) materials like + a certificate or certificate + chain. + type: string + type: object + type: object + type: object + type: object + type: object + virtualServiceName: + description: Name of the virtual service that is + acting as a virtual node backend. Must be between + 1 and 255 characters in length. + type: string + type: object + type: object + type: array + backendDefaults: + description: Defaults for backends. + properties: + clientPolicy: + description: Client policy for the backend. + properties: + tls: + description: Transport Layer Security (TLS) client + policy. + properties: + certificate: + description: Virtual node's client's Transport + Layer Security (TLS) certificate. + properties: + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the + certificate. + type: string + privateKey: + description: Private key for a certificate + stored on the file system of the mesh + endpoint that the proxy is running on. + type: string + type: object + sds: + description: A Secret Discovery Service certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + enforce: + description: Whether the policy is enforced. Default + is true. + type: boolean + ports: + description: One or more ports that the policy + is enforced for. + items: + type: number + type: array + x-kubernetes-list-type: set + validation: + description: TLS validation context. + properties: + subjectAlternativeNames: + description: SANs for a TLS validation context. + properties: + match: + description: Criteria for determining + a SAN's match. + properties: + exact: + description: Values sent must match + the specified values exactly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + trust: + description: TLS validation context trust. + properties: + acm: + description: TLS validation context trust + for an AWS Certificate Manager (ACM) + certificate. + properties: + certificateAuthorityArns: + description: One or more ACM ARNs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for + the certificate. + type: string + type: object + sds: + description: A Secret Discovery Service + certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + type: object + type: object + type: object + type: object + listener: + description: Listeners from which the virtual node is expected + to receive inbound traffic. + items: + properties: + connectionPool: + description: Connection pool information for the listener. + properties: + grpc: + description: Connection pool information for gRPC + listeners. + properties: + maxRequests: + description: Maximum number of inflight requests + Envoy can concurrently support across hosts + in upstream cluster. Minimum value of 1. + type: number + type: object + http: + description: Connection pool information for HTTP + listeners. + items: + properties: + maxConnections: + description: Maximum number of outbound TCP + connections Envoy can establish concurrently + with all hosts in upstream cluster. Minimum + value of 1. + type: number + maxPendingRequests: + description: Number of overflowing requests + after max_connections Envoy will queue to + upstream cluster. Minimum value of 1. + type: number + type: object + type: array + http2: + description: Connection pool information for HTTP2 + listeners. + items: + properties: + maxRequests: + description: Maximum number of inflight requests + Envoy can concurrently support across hosts + in upstream cluster. Minimum value of 1. + type: number + type: object + type: array + tcp: + description: Connection pool information for TCP + listeners. + items: + properties: + maxConnections: + description: Maximum number of outbound TCP + connections Envoy can establish concurrently + with all hosts in upstream cluster. Minimum + value of 1. + type: number + type: object + type: array + type: object + healthCheck: + description: Health check information for the listener. + properties: + healthyThreshold: + description: Number of consecutive successful health + checks that must occur before declaring listener + healthy. + type: number + intervalMillis: + description: Time period in milliseconds between + each health check execution. + type: number + path: + description: File path to write access logs to. + You can use /dev/stdout to send access logs to + standard out. Must be between 1 and 255 characters + in length. + type: string + port: + description: Port used for the port mapping. + type: number + protocol: + description: Protocol used for the port mapping. + Valid values are http, http2, tcp and grpc. + type: string + timeoutMillis: + description: Amount of time to wait when receiving + a response from the health check, in milliseconds. + type: number + unhealthyThreshold: + description: Number of consecutive failed health + checks that must occur before declaring a virtual + node unhealthy. + type: number + type: object + outlierDetection: + description: Outlier detection information for the listener. + properties: + baseEjectionDuration: + description: Base amount of time for which a host + is ejected. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: The specified value for the JSON. + Must be between 1 and 100 characters in length. + type: number + type: object + interval: + description: Time interval between ejection sweep + analysis. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: The specified value for the JSON. + Must be between 1 and 100 characters in length. + type: number + type: object + maxEjectionPercent: + description: |- + Maximum percentage of hosts in load balancing pool for upstream service that can be ejected. Will eject at least one host regardless of the value. + Minimum value of 0. Maximum value of 100. + type: number + maxServerErrors: + description: Number of consecutive 5xx errors required + for ejection. Minimum value of 1. + type: number + type: object + portMapping: + description: Port mapping information for the listener. + properties: + port: + description: Port used for the port mapping. + type: number + protocol: + description: Protocol used for the port mapping. + Valid values are http, http2, tcp and grpc. + type: string + type: object + timeout: + description: Timeouts for different protocols. + properties: + grpc: + description: Connection pool information for gRPC + listeners. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be + idle. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + type: object + http: + description: Connection pool information for HTTP + listeners. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be + idle. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + type: object + http2: + description: Connection pool information for HTTP2 + listeners. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be + idle. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + type: object + tcp: + description: Connection pool information for TCP + listeners. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be + idle. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + type: object + type: object + tls: + description: Transport Layer Security (TLS) client policy. + properties: + certificate: + description: Virtual node's client's Transport Layer + Security (TLS) certificate. + properties: + acm: + description: TLS validation context trust for + an AWS Certificate Manager (ACM) certificate. + properties: + certificateArn: + description: ARN for the certificate. + type: string + type: object + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the certificate. + type: string + privateKey: + description: Private key for a certificate + stored on the file system of the mesh + endpoint that the proxy is running on. + type: string + type: object + sds: + description: A Secret Discovery Service certificate. + properties: + secretName: + description: Name of the secret secret requested + from the Secret Discovery Service provider + representing Transport Layer Security + (TLS) materials like a certificate or + certificate chain. + type: string + type: object + type: object + mode: + description: 'Listener''s TLS mode. Valid values: + DISABLED, PERMISSIVE, STRICT.' + type: string + validation: + description: TLS validation context. + properties: + subjectAlternativeNames: + description: SANs for a TLS validation context. + properties: + match: + description: Criteria for determining a + SAN's match. + properties: + exact: + description: Values sent must match + the specified values exactly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + trust: + description: TLS validation context trust. + properties: + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the + certificate. + type: string + type: object + sds: + description: A Secret Discovery Service + certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + type: object + type: object + type: object + type: array + logging: + description: Inbound and outbound access logging information + for the virtual node. + properties: + accessLog: + description: Access log configuration for a virtual node. + properties: + file: + description: Local file certificate. + properties: + format: + description: The specified format for the logs. + properties: + json: + description: The logging format for JSON. + items: + properties: + key: + description: The specified key for the + JSON. Must be between 1 and 100 characters + in length. + type: string + value: + description: The specified value for + the JSON. Must be between 1 and 100 + characters in length. + type: string + type: object + type: array + text: + description: The logging format for text. + Must be between 1 and 1000 characters in + length. + type: string + type: object + path: + description: File path to write access logs to. + You can use /dev/stdout to send access logs + to standard out. Must be between 1 and 255 characters + in length. + type: string + type: object + type: object + type: object + serviceDiscovery: + description: Service discovery information for the virtual + node. + properties: + awsCloudMap: + description: Any AWS Cloud Map information for the virtual + node. + properties: + attributes: + additionalProperties: + type: string + description: String map that contains attributes with + values that you can use to filter instances by any + custom attribute that you specified when you registered + the instance. Only instances that match all of the + specified key/value pairs will be returned. + type: object + x-kubernetes-map-type: granular + namespaceName: + description: |- + Name of the AWS Cloud Map namespace to use. + Use the aws_service_discovery_http_namespace resource to configure a Cloud Map namespace. Must be between 1 and 1024 characters in length. + type: string + namespaceNameRef: + description: Reference to a HTTPNamespace in servicediscovery + to populate namespaceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + namespaceNameSelector: + description: Selector for a HTTPNamespace in servicediscovery + to populate namespaceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceName: + description: attribute of the dns object to hostname. + type: string + type: object + dns: + description: DNS service name for the virtual node. + properties: + hostname: + description: DNS host name for your virtual node. + type: string + ipPreference: + description: 'The preferred IP version that this virtual + node uses. Valid values: IPv6_PREFERRED, IPv4_PREFERRED, + IPv4_ONLY, IPv6_ONLY.' + type: string + responseType: + description: 'The DNS response type for the virtual + node. Valid values: LOADBALANCER, ENDPOINTS.' + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + meshName: + description: Name of the service mesh in which to create the virtual + node. Must be between 1 and 255 characters in length. + type: string + meshNameRef: + description: Reference to a Mesh in appmesh to populate meshName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + meshNameSelector: + description: Selector for a Mesh in appmesh to populate meshName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the virtual node. Must be between + 1 and 255 characters in length. + type: string + spec: + description: Virtual node specification to apply. + properties: + backend: + description: Backends to which the virtual node is expected + to send outbound traffic. + items: + properties: + virtualService: + description: Virtual service to use as a backend for + a virtual node. + properties: + clientPolicy: + description: Client policy for the backend. + properties: + tls: + description: Transport Layer Security (TLS) + client policy. + properties: + certificate: + description: Virtual node's client's Transport + Layer Security (TLS) certificate. + properties: + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for + the certificate. + type: string + privateKey: + description: Private key for a certificate + stored on the file system of the + mesh endpoint that the proxy is + running on. + type: string + type: object + sds: + description: A Secret Discovery Service + certificate. + properties: + secretName: + description: Name of the secret + secret requested from the Secret + Discovery Service provider representing + Transport Layer Security (TLS) + materials like a certificate or + certificate chain. + type: string + type: object + type: object + enforce: + description: Whether the policy is enforced. + Default is true. + type: boolean + ports: + description: One or more ports that the + policy is enforced for. + items: + type: number + type: array + x-kubernetes-list-type: set + validation: + description: TLS validation context. + properties: + subjectAlternativeNames: + description: SANs for a TLS validation + context. + properties: + match: + description: Criteria for determining + a SAN's match. + properties: + exact: + description: Values sent must + match the specified values + exactly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + trust: + description: TLS validation context + trust. + properties: + acm: + description: TLS validation context + trust for an AWS Certificate Manager + (ACM) certificate. + properties: + certificateAuthorityArns: + description: One or more ACM + ARNs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain + for the certificate. + type: string + type: object + sds: + description: A Secret Discovery + Service certificate. + properties: + secretName: + description: Name of the secret + secret requested from the + Secret Discovery Service provider + representing Transport Layer + Security (TLS) materials like + a certificate or certificate + chain. + type: string + type: object + type: object + type: object + type: object + type: object + virtualServiceName: + description: Name of the virtual service that is + acting as a virtual node backend. Must be between + 1 and 255 characters in length. + type: string + type: object + type: object + type: array + backendDefaults: + description: Defaults for backends. + properties: + clientPolicy: + description: Client policy for the backend. + properties: + tls: + description: Transport Layer Security (TLS) client + policy. + properties: + certificate: + description: Virtual node's client's Transport + Layer Security (TLS) certificate. + properties: + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the + certificate. + type: string + privateKey: + description: Private key for a certificate + stored on the file system of the mesh + endpoint that the proxy is running on. + type: string + type: object + sds: + description: A Secret Discovery Service certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + enforce: + description: Whether the policy is enforced. Default + is true. + type: boolean + ports: + description: One or more ports that the policy + is enforced for. + items: + type: number + type: array + x-kubernetes-list-type: set + validation: + description: TLS validation context. + properties: + subjectAlternativeNames: + description: SANs for a TLS validation context. + properties: + match: + description: Criteria for determining + a SAN's match. + properties: + exact: + description: Values sent must match + the specified values exactly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + trust: + description: TLS validation context trust. + properties: + acm: + description: TLS validation context trust + for an AWS Certificate Manager (ACM) + certificate. + properties: + certificateAuthorityArns: + description: One or more ACM ARNs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for + the certificate. + type: string + type: object + sds: + description: A Secret Discovery Service + certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + type: object + type: object + type: object + type: object + listener: + description: Listeners from which the virtual node is expected + to receive inbound traffic. + items: + properties: + connectionPool: + description: Connection pool information for the listener. + properties: + grpc: + description: Connection pool information for gRPC + listeners. + properties: + maxRequests: + description: Maximum number of inflight requests + Envoy can concurrently support across hosts + in upstream cluster. Minimum value of 1. + type: number + type: object + http: + description: Connection pool information for HTTP + listeners. + items: + properties: + maxConnections: + description: Maximum number of outbound TCP + connections Envoy can establish concurrently + with all hosts in upstream cluster. Minimum + value of 1. + type: number + maxPendingRequests: + description: Number of overflowing requests + after max_connections Envoy will queue to + upstream cluster. Minimum value of 1. + type: number + type: object + type: array + http2: + description: Connection pool information for HTTP2 + listeners. + items: + properties: + maxRequests: + description: Maximum number of inflight requests + Envoy can concurrently support across hosts + in upstream cluster. Minimum value of 1. + type: number + type: object + type: array + tcp: + description: Connection pool information for TCP + listeners. + items: + properties: + maxConnections: + description: Maximum number of outbound TCP + connections Envoy can establish concurrently + with all hosts in upstream cluster. Minimum + value of 1. + type: number + type: object + type: array + type: object + healthCheck: + description: Health check information for the listener. + properties: + healthyThreshold: + description: Number of consecutive successful health + checks that must occur before declaring listener + healthy. + type: number + intervalMillis: + description: Time period in milliseconds between + each health check execution. + type: number + path: + description: File path to write access logs to. + You can use /dev/stdout to send access logs to + standard out. Must be between 1 and 255 characters + in length. + type: string + port: + description: Port used for the port mapping. + type: number + protocol: + description: Protocol used for the port mapping. + Valid values are http, http2, tcp and grpc. + type: string + timeoutMillis: + description: Amount of time to wait when receiving + a response from the health check, in milliseconds. + type: number + unhealthyThreshold: + description: Number of consecutive failed health + checks that must occur before declaring a virtual + node unhealthy. + type: number + type: object + outlierDetection: + description: Outlier detection information for the listener. + properties: + baseEjectionDuration: + description: Base amount of time for which a host + is ejected. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: The specified value for the JSON. + Must be between 1 and 100 characters in length. + type: number + type: object + interval: + description: Time interval between ejection sweep + analysis. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: The specified value for the JSON. + Must be between 1 and 100 characters in length. + type: number + type: object + maxEjectionPercent: + description: |- + Maximum percentage of hosts in load balancing pool for upstream service that can be ejected. Will eject at least one host regardless of the value. + Minimum value of 0. Maximum value of 100. + type: number + maxServerErrors: + description: Number of consecutive 5xx errors required + for ejection. Minimum value of 1. + type: number + type: object + portMapping: + description: Port mapping information for the listener. + properties: + port: + description: Port used for the port mapping. + type: number + protocol: + description: Protocol used for the port mapping. + Valid values are http, http2, tcp and grpc. + type: string + type: object + timeout: + description: Timeouts for different protocols. + properties: + grpc: + description: Connection pool information for gRPC + listeners. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be + idle. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + type: object + http: + description: Connection pool information for HTTP + listeners. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be + idle. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + type: object + http2: + description: Connection pool information for HTTP2 + listeners. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be + idle. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + type: object + tcp: + description: Connection pool information for TCP + listeners. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be + idle. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + type: object + type: object + tls: + description: Transport Layer Security (TLS) client policy. + properties: + certificate: + description: Virtual node's client's Transport Layer + Security (TLS) certificate. + properties: + acm: + description: TLS validation context trust for + an AWS Certificate Manager (ACM) certificate. + properties: + certificateArn: + description: ARN for the certificate. + type: string + type: object + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the certificate. + type: string + privateKey: + description: Private key for a certificate + stored on the file system of the mesh + endpoint that the proxy is running on. + type: string + type: object + sds: + description: A Secret Discovery Service certificate. + properties: + secretName: + description: Name of the secret secret requested + from the Secret Discovery Service provider + representing Transport Layer Security + (TLS) materials like a certificate or + certificate chain. + type: string + type: object + type: object + mode: + description: 'Listener''s TLS mode. Valid values: + DISABLED, PERMISSIVE, STRICT.' + type: string + validation: + description: TLS validation context. + properties: + subjectAlternativeNames: + description: SANs for a TLS validation context. + properties: + match: + description: Criteria for determining a + SAN's match. + properties: + exact: + description: Values sent must match + the specified values exactly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + trust: + description: TLS validation context trust. + properties: + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the + certificate. + type: string + type: object + sds: + description: A Secret Discovery Service + certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + type: object + type: object + type: object + type: array + logging: + description: Inbound and outbound access logging information + for the virtual node. + properties: + accessLog: + description: Access log configuration for a virtual node. + properties: + file: + description: Local file certificate. + properties: + format: + description: The specified format for the logs. + properties: + json: + description: The logging format for JSON. + items: + properties: + key: + description: The specified key for the + JSON. Must be between 1 and 100 characters + in length. + type: string + value: + description: The specified value for + the JSON. Must be between 1 and 100 + characters in length. + type: string + type: object + type: array + text: + description: The logging format for text. + Must be between 1 and 1000 characters in + length. + type: string + type: object + path: + description: File path to write access logs to. + You can use /dev/stdout to send access logs + to standard out. Must be between 1 and 255 characters + in length. + type: string + type: object + type: object + type: object + serviceDiscovery: + description: Service discovery information for the virtual + node. + properties: + awsCloudMap: + description: Any AWS Cloud Map information for the virtual + node. + properties: + attributes: + additionalProperties: + type: string + description: String map that contains attributes with + values that you can use to filter instances by any + custom attribute that you specified when you registered + the instance. Only instances that match all of the + specified key/value pairs will be returned. + type: object + x-kubernetes-map-type: granular + namespaceName: + description: |- + Name of the AWS Cloud Map namespace to use. + Use the aws_service_discovery_http_namespace resource to configure a Cloud Map namespace. Must be between 1 and 1024 characters in length. + type: string + namespaceNameRef: + description: Reference to a HTTPNamespace in servicediscovery + to populate namespaceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + namespaceNameSelector: + description: Selector for a HTTPNamespace in servicediscovery + to populate namespaceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceName: + description: attribute of the dns object to hostname. + type: string + type: object + dns: + description: DNS service name for the virtual node. + properties: + hostname: + description: DNS host name for your virtual node. + type: string + ipPreference: + description: 'The preferred IP version that this virtual + node uses. Valid values: IPv6_PREFERRED, IPv4_PREFERRED, + IPv4_ONLY, IPv6_ONLY.' + type: string + responseType: + description: 'The DNS response type for the virtual + node. Valid values: LOADBALANCER, ENDPOINTS.' + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.spec is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.spec) + || (has(self.initProvider) && has(self.initProvider.spec))' + status: + description: VirtualNodeStatus defines the observed state of VirtualNode. + properties: + atProvider: + properties: + arn: + description: ARN of the virtual node. + type: string + createdDate: + description: Creation date of the virtual node. + type: string + id: + description: ID of the virtual node. + type: string + lastUpdatedDate: + description: Last update date of the virtual node. + type: string + meshName: + description: Name of the service mesh in which to create the virtual + node. Must be between 1 and 255 characters in length. + type: string + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the virtual node. Must be between + 1 and 255 characters in length. + type: string + resourceOwner: + description: Resource owner's AWS account ID. + type: string + spec: + description: Virtual node specification to apply. + properties: + backend: + description: Backends to which the virtual node is expected + to send outbound traffic. + items: + properties: + virtualService: + description: Virtual service to use as a backend for + a virtual node. + properties: + clientPolicy: + description: Client policy for the backend. + properties: + tls: + description: Transport Layer Security (TLS) + client policy. + properties: + certificate: + description: Virtual node's client's Transport + Layer Security (TLS) certificate. + properties: + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for + the certificate. + type: string + privateKey: + description: Private key for a certificate + stored on the file system of the + mesh endpoint that the proxy is + running on. + type: string + type: object + sds: + description: A Secret Discovery Service + certificate. + properties: + secretName: + description: Name of the secret + secret requested from the Secret + Discovery Service provider representing + Transport Layer Security (TLS) + materials like a certificate or + certificate chain. + type: string + type: object + type: object + enforce: + description: Whether the policy is enforced. + Default is true. + type: boolean + ports: + description: One or more ports that the + policy is enforced for. + items: + type: number + type: array + x-kubernetes-list-type: set + validation: + description: TLS validation context. + properties: + subjectAlternativeNames: + description: SANs for a TLS validation + context. + properties: + match: + description: Criteria for determining + a SAN's match. + properties: + exact: + description: Values sent must + match the specified values + exactly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + trust: + description: TLS validation context + trust. + properties: + acm: + description: TLS validation context + trust for an AWS Certificate Manager + (ACM) certificate. + properties: + certificateAuthorityArns: + description: One or more ACM + ARNs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain + for the certificate. + type: string + type: object + sds: + description: A Secret Discovery + Service certificate. + properties: + secretName: + description: Name of the secret + secret requested from the + Secret Discovery Service provider + representing Transport Layer + Security (TLS) materials like + a certificate or certificate + chain. + type: string + type: object + type: object + type: object + type: object + type: object + virtualServiceName: + description: Name of the virtual service that is + acting as a virtual node backend. Must be between + 1 and 255 characters in length. + type: string + type: object + type: object + type: array + backendDefaults: + description: Defaults for backends. + properties: + clientPolicy: + description: Client policy for the backend. + properties: + tls: + description: Transport Layer Security (TLS) client + policy. + properties: + certificate: + description: Virtual node's client's Transport + Layer Security (TLS) certificate. + properties: + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the + certificate. + type: string + privateKey: + description: Private key for a certificate + stored on the file system of the mesh + endpoint that the proxy is running on. + type: string + type: object + sds: + description: A Secret Discovery Service certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + enforce: + description: Whether the policy is enforced. Default + is true. + type: boolean + ports: + description: One or more ports that the policy + is enforced for. + items: + type: number + type: array + x-kubernetes-list-type: set + validation: + description: TLS validation context. + properties: + subjectAlternativeNames: + description: SANs for a TLS validation context. + properties: + match: + description: Criteria for determining + a SAN's match. + properties: + exact: + description: Values sent must match + the specified values exactly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + trust: + description: TLS validation context trust. + properties: + acm: + description: TLS validation context trust + for an AWS Certificate Manager (ACM) + certificate. + properties: + certificateAuthorityArns: + description: One or more ACM ARNs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for + the certificate. + type: string + type: object + sds: + description: A Secret Discovery Service + certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + type: object + type: object + type: object + type: object + listener: + description: Listeners from which the virtual node is expected + to receive inbound traffic. + items: + properties: + connectionPool: + description: Connection pool information for the listener. + properties: + grpc: + description: Connection pool information for gRPC + listeners. + properties: + maxRequests: + description: Maximum number of inflight requests + Envoy can concurrently support across hosts + in upstream cluster. Minimum value of 1. + type: number + type: object + http: + description: Connection pool information for HTTP + listeners. + items: + properties: + maxConnections: + description: Maximum number of outbound TCP + connections Envoy can establish concurrently + with all hosts in upstream cluster. Minimum + value of 1. + type: number + maxPendingRequests: + description: Number of overflowing requests + after max_connections Envoy will queue to + upstream cluster. Minimum value of 1. + type: number + type: object + type: array + http2: + description: Connection pool information for HTTP2 + listeners. + items: + properties: + maxRequests: + description: Maximum number of inflight requests + Envoy can concurrently support across hosts + in upstream cluster. Minimum value of 1. + type: number + type: object + type: array + tcp: + description: Connection pool information for TCP + listeners. + items: + properties: + maxConnections: + description: Maximum number of outbound TCP + connections Envoy can establish concurrently + with all hosts in upstream cluster. Minimum + value of 1. + type: number + type: object + type: array + type: object + healthCheck: + description: Health check information for the listener. + properties: + healthyThreshold: + description: Number of consecutive successful health + checks that must occur before declaring listener + healthy. + type: number + intervalMillis: + description: Time period in milliseconds between + each health check execution. + type: number + path: + description: File path to write access logs to. + You can use /dev/stdout to send access logs to + standard out. Must be between 1 and 255 characters + in length. + type: string + port: + description: Port used for the port mapping. + type: number + protocol: + description: Protocol used for the port mapping. + Valid values are http, http2, tcp and grpc. + type: string + timeoutMillis: + description: Amount of time to wait when receiving + a response from the health check, in milliseconds. + type: number + unhealthyThreshold: + description: Number of consecutive failed health + checks that must occur before declaring a virtual + node unhealthy. + type: number + type: object + outlierDetection: + description: Outlier detection information for the listener. + properties: + baseEjectionDuration: + description: Base amount of time for which a host + is ejected. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: The specified value for the JSON. + Must be between 1 and 100 characters in length. + type: number + type: object + interval: + description: Time interval between ejection sweep + analysis. + properties: + unit: + description: 'Unit of time. Valid values: ms, + s.' + type: string + value: + description: The specified value for the JSON. + Must be between 1 and 100 characters in length. + type: number + type: object + maxEjectionPercent: + description: |- + Maximum percentage of hosts in load balancing pool for upstream service that can be ejected. Will eject at least one host regardless of the value. + Minimum value of 0. Maximum value of 100. + type: number + maxServerErrors: + description: Number of consecutive 5xx errors required + for ejection. Minimum value of 1. + type: number + type: object + portMapping: + description: Port mapping information for the listener. + properties: + port: + description: Port used for the port mapping. + type: number + protocol: + description: Protocol used for the port mapping. + Valid values are http, http2, tcp and grpc. + type: string + type: object + timeout: + description: Timeouts for different protocols. + properties: + grpc: + description: Connection pool information for gRPC + listeners. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be + idle. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + type: object + http: + description: Connection pool information for HTTP + listeners. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be + idle. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + type: object + http2: + description: Connection pool information for HTTP2 + listeners. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be + idle. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + perRequest: + description: Per request timeout. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + type: object + tcp: + description: Connection pool information for TCP + listeners. + properties: + idle: + description: Idle timeout. An idle timeout bounds + the amount of time that a connection may be + idle. + properties: + unit: + description: 'Unit of time. Valid values: + ms, s.' + type: string + value: + description: The specified value for the + JSON. Must be between 1 and 100 characters + in length. + type: number + type: object + type: object + type: object + tls: + description: Transport Layer Security (TLS) client policy. + properties: + certificate: + description: Virtual node's client's Transport Layer + Security (TLS) certificate. + properties: + acm: + description: TLS validation context trust for + an AWS Certificate Manager (ACM) certificate. + properties: + certificateArn: + description: ARN for the certificate. + type: string + type: object + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the certificate. + type: string + privateKey: + description: Private key for a certificate + stored on the file system of the mesh + endpoint that the proxy is running on. + type: string + type: object + sds: + description: A Secret Discovery Service certificate. + properties: + secretName: + description: Name of the secret secret requested + from the Secret Discovery Service provider + representing Transport Layer Security + (TLS) materials like a certificate or + certificate chain. + type: string + type: object + type: object + mode: + description: 'Listener''s TLS mode. Valid values: + DISABLED, PERMISSIVE, STRICT.' + type: string + validation: + description: TLS validation context. + properties: + subjectAlternativeNames: + description: SANs for a TLS validation context. + properties: + match: + description: Criteria for determining a + SAN's match. + properties: + exact: + description: Values sent must match + the specified values exactly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + trust: + description: TLS validation context trust. + properties: + file: + description: Local file certificate. + properties: + certificateChain: + description: Certificate chain for the + certificate. + type: string + type: object + sds: + description: A Secret Discovery Service + certificate. + properties: + secretName: + description: Name of the secret secret + requested from the Secret Discovery + Service provider representing Transport + Layer Security (TLS) materials like + a certificate or certificate chain. + type: string + type: object + type: object + type: object + type: object + type: object + type: array + logging: + description: Inbound and outbound access logging information + for the virtual node. + properties: + accessLog: + description: Access log configuration for a virtual node. + properties: + file: + description: Local file certificate. + properties: + format: + description: The specified format for the logs. + properties: + json: + description: The logging format for JSON. + items: + properties: + key: + description: The specified key for the + JSON. Must be between 1 and 100 characters + in length. + type: string + value: + description: The specified value for + the JSON. Must be between 1 and 100 + characters in length. + type: string + type: object + type: array + text: + description: The logging format for text. + Must be between 1 and 1000 characters in + length. + type: string + type: object + path: + description: File path to write access logs to. + You can use /dev/stdout to send access logs + to standard out. Must be between 1 and 255 characters + in length. + type: string + type: object + type: object + type: object + serviceDiscovery: + description: Service discovery information for the virtual + node. + properties: + awsCloudMap: + description: Any AWS Cloud Map information for the virtual + node. + properties: + attributes: + additionalProperties: + type: string + description: String map that contains attributes with + values that you can use to filter instances by any + custom attribute that you specified when you registered + the instance. Only instances that match all of the + specified key/value pairs will be returned. + type: object + x-kubernetes-map-type: granular + namespaceName: + description: |- + Name of the AWS Cloud Map namespace to use. + Use the aws_service_discovery_http_namespace resource to configure a Cloud Map namespace. Must be between 1 and 1024 characters in length. + type: string + serviceName: + description: attribute of the dns object to hostname. + type: string + type: object + dns: + description: DNS service name for the virtual node. + properties: + hostname: + description: DNS host name for your virtual node. + type: string + ipPreference: + description: 'The preferred IP version that this virtual + node uses. Valid values: IPv6_PREFERRED, IPv4_PREFERRED, + IPv4_ONLY, IPv6_ONLY.' + type: string + responseType: + description: 'The DNS response type for the virtual + node. Valid values: LOADBALANCER, ENDPOINTS.' + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appmesh.aws.upbound.io_virtualrouters.yaml b/package/crds/appmesh.aws.upbound.io_virtualrouters.yaml index 199bae2aab..2a4ea295ff 100644 --- a/package/crds/appmesh.aws.upbound.io_virtualrouters.yaml +++ b/package/crds/appmesh.aws.upbound.io_virtualrouters.yaml @@ -636,3 +636,609 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VirtualRouter is the Schema for the VirtualRouters API. Provides + an AWS App Mesh virtual router resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VirtualRouterSpec defines the desired state of VirtualRouter + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + meshName: + description: Name of the service mesh in which to create the virtual + router. Must be between 1 and 255 characters in length. + type: string + meshNameRef: + description: Reference to a Mesh in appmesh to populate meshName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + meshNameSelector: + description: Selector for a Mesh in appmesh to populate meshName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the virtual router. Must be between + 1 and 255 characters in length. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + spec: + description: Virtual router specification to apply. + properties: + listener: + description: configuration block to the spec argument. + items: + properties: + portMapping: + description: Port mapping information for the listener. + properties: + port: + description: Port used for the port mapping. + type: number + protocol: + description: Protocol used for the port mapping. + Valid values are http,http2, tcp and grpc. + type: string + type: object + type: object + type: array + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + meshName: + description: Name of the service mesh in which to create the virtual + router. Must be between 1 and 255 characters in length. + type: string + meshNameRef: + description: Reference to a Mesh in appmesh to populate meshName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + meshNameSelector: + description: Selector for a Mesh in appmesh to populate meshName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the virtual router. Must be between + 1 and 255 characters in length. + type: string + spec: + description: Virtual router specification to apply. + properties: + listener: + description: configuration block to the spec argument. + items: + properties: + portMapping: + description: Port mapping information for the listener. + properties: + port: + description: Port used for the port mapping. + type: number + protocol: + description: Protocol used for the port mapping. + Valid values are http,http2, tcp and grpc. + type: string + type: object + type: object + type: array + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.spec is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.spec) + || (has(self.initProvider) && has(self.initProvider.spec))' + status: + description: VirtualRouterStatus defines the observed state of VirtualRouter. + properties: + atProvider: + properties: + arn: + description: ARN of the virtual router. + type: string + createdDate: + description: Creation date of the virtual router. + type: string + id: + description: ID of the virtual router. + type: string + lastUpdatedDate: + description: Last update date of the virtual router. + type: string + meshName: + description: Name of the service mesh in which to create the virtual + router. Must be between 1 and 255 characters in length. + type: string + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the virtual router. Must be between + 1 and 255 characters in length. + type: string + resourceOwner: + description: Resource owner's AWS account ID. + type: string + spec: + description: Virtual router specification to apply. + properties: + listener: + description: configuration block to the spec argument. + items: + properties: + portMapping: + description: Port mapping information for the listener. + properties: + port: + description: Port used for the port mapping. + type: number + protocol: + description: Protocol used for the port mapping. + Valid values are http,http2, tcp and grpc. + type: string + type: object + type: object + type: array + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appmesh.aws.upbound.io_virtualservices.yaml b/package/crds/appmesh.aws.upbound.io_virtualservices.yaml index ac522f87b3..9137a978c5 100644 --- a/package/crds/appmesh.aws.upbound.io_virtualservices.yaml +++ b/package/crds/appmesh.aws.upbound.io_virtualservices.yaml @@ -979,3 +979,937 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VirtualService is the Schema for the VirtualServices API. Provides + an AWS App Mesh virtual service resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VirtualServiceSpec defines the desired state of VirtualService + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + meshName: + description: Name of the service mesh in which to create the virtual + service. Must be between 1 and 255 characters in length. + type: string + meshNameRef: + description: Reference to a Mesh in appmesh to populate meshName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + meshNameSelector: + description: Selector for a Mesh in appmesh to populate meshName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the virtual service. Must be between + 1 and 255 characters in length. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + spec: + description: Virtual service specification to apply. + properties: + provider: + description: App Mesh object that is acting as the provider + for a virtual service. You can specify a single virtual + node or virtual router. + properties: + virtualNode: + description: Virtual node associated with a virtual service. + properties: + virtualNodeName: + description: Name of the virtual node that is acting + as a service provider. Must be between 1 and 255 + characters in length. + type: string + virtualNodeNameRef: + description: Reference to a VirtualNode in appmesh + to populate virtualNodeName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNodeNameSelector: + description: Selector for a VirtualNode in appmesh + to populate virtualNodeName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + virtualRouter: + description: Virtual router associated with a virtual + service. + properties: + virtualRouterName: + description: Name of the virtual router that is acting + as a service provider. Must be between 1 and 255 + characters in length. + type: string + virtualRouterNameRef: + description: Reference to a VirtualRouter in appmesh + to populate virtualRouterName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualRouterNameSelector: + description: Selector for a VirtualRouter in appmesh + to populate virtualRouterName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + meshName: + description: Name of the service mesh in which to create the virtual + service. Must be between 1 and 255 characters in length. + type: string + meshNameRef: + description: Reference to a Mesh in appmesh to populate meshName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + meshNameSelector: + description: Selector for a Mesh in appmesh to populate meshName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the virtual service. Must be between + 1 and 255 characters in length. + type: string + spec: + description: Virtual service specification to apply. + properties: + provider: + description: App Mesh object that is acting as the provider + for a virtual service. You can specify a single virtual + node or virtual router. + properties: + virtualNode: + description: Virtual node associated with a virtual service. + properties: + virtualNodeName: + description: Name of the virtual node that is acting + as a service provider. Must be between 1 and 255 + characters in length. + type: string + virtualNodeNameRef: + description: Reference to a VirtualNode in appmesh + to populate virtualNodeName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNodeNameSelector: + description: Selector for a VirtualNode in appmesh + to populate virtualNodeName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + virtualRouter: + description: Virtual router associated with a virtual + service. + properties: + virtualRouterName: + description: Name of the virtual router that is acting + as a service provider. Must be between 1 and 255 + characters in length. + type: string + virtualRouterNameRef: + description: Reference to a VirtualRouter in appmesh + to populate virtualRouterName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualRouterNameSelector: + description: Selector for a VirtualRouter in appmesh + to populate virtualRouterName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.spec is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.spec) + || (has(self.initProvider) && has(self.initProvider.spec))' + status: + description: VirtualServiceStatus defines the observed state of VirtualService. + properties: + atProvider: + properties: + arn: + description: ARN of the virtual service. + type: string + createdDate: + description: Creation date of the virtual service. + type: string + id: + description: ID of the virtual service. + type: string + lastUpdatedDate: + description: Last update date of the virtual service. + type: string + meshName: + description: Name of the service mesh in which to create the virtual + service. Must be between 1 and 255 characters in length. + type: string + meshOwner: + description: AWS account ID of the service mesh's owner. Defaults + to the account ID the AWS provider is currently connected to. + type: string + name: + description: Name to use for the virtual service. Must be between + 1 and 255 characters in length. + type: string + resourceOwner: + description: Resource owner's AWS account ID. + type: string + spec: + description: Virtual service specification to apply. + properties: + provider: + description: App Mesh object that is acting as the provider + for a virtual service. You can specify a single virtual + node or virtual router. + properties: + virtualNode: + description: Virtual node associated with a virtual service. + properties: + virtualNodeName: + description: Name of the virtual node that is acting + as a service provider. Must be between 1 and 255 + characters in length. + type: string + type: object + virtualRouter: + description: Virtual router associated with a virtual + service. + properties: + virtualRouterName: + description: Name of the virtual router that is acting + as a service provider. Must be between 1 and 255 + characters in length. + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apprunner.aws.upbound.io_observabilityconfigurations.yaml b/package/crds/apprunner.aws.upbound.io_observabilityconfigurations.yaml index 679b463603..8efc80efc6 100644 --- a/package/crds/apprunner.aws.upbound.io_observabilityconfigurations.yaml +++ b/package/crds/apprunner.aws.upbound.io_observabilityconfigurations.yaml @@ -427,3 +427,406 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ObservabilityConfiguration is the Schema for the ObservabilityConfigurations + API. Manages an App Runner Observability Configuration. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ObservabilityConfigurationSpec defines the desired state + of ObservabilityConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + observabilityConfigurationName: + description: Name of the observability configuration. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + traceConfiguration: + description: Configuration of the tracing feature within this + observability configuration. If you don't specify it, App Runner + doesn't enable tracing. See Trace Configuration below for more + details. + properties: + vendor: + description: 'Implementation provider chosen for tracing App + Runner services. Valid values: AWSXRAY.' + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + observabilityConfigurationName: + description: Name of the observability configuration. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + traceConfiguration: + description: Configuration of the tracing feature within this + observability configuration. If you don't specify it, App Runner + doesn't enable tracing. See Trace Configuration below for more + details. + properties: + vendor: + description: 'Implementation provider chosen for tracing App + Runner services. Valid values: AWSXRAY.' + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.observabilityConfigurationName is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.observabilityConfigurationName) + || (has(self.initProvider) && has(self.initProvider.observabilityConfigurationName))' + status: + description: ObservabilityConfigurationStatus defines the observed state + of ObservabilityConfiguration. + properties: + atProvider: + properties: + arn: + description: ARN of this observability configuration. + type: string + id: + type: string + latest: + description: Whether the observability configuration has the highest + observability_configuration_revision among all configurations + that share the same observability_configuration_name. + type: boolean + observabilityConfigurationName: + description: Name of the observability configuration. + type: string + observabilityConfigurationRevision: + description: The revision of this observability configuration. + type: number + status: + description: Current state of the observability configuration. + An INACTIVE configuration revision has been deleted and can't + be used. It is permanently removed some time after deletion. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + traceConfiguration: + description: Configuration of the tracing feature within this + observability configuration. If you don't specify it, App Runner + doesn't enable tracing. See Trace Configuration below for more + details. + properties: + vendor: + description: 'Implementation provider chosen for tracing App + Runner services. Valid values: AWSXRAY.' + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apprunner.aws.upbound.io_services.yaml b/package/crds/apprunner.aws.upbound.io_services.yaml index 8620418d84..442b56e877 100644 --- a/package/crds/apprunner.aws.upbound.io_services.yaml +++ b/package/crds/apprunner.aws.upbound.io_services.yaml @@ -1887,3 +1887,1740 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Service is the Schema for the Services API. Manages an App Runner + Service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServiceSpec defines the desired state of Service + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoScalingConfigurationArn: + description: ARN of an App Runner automatic scaling configuration + resource that you want to associate with your service. If not + provided, App Runner associates the latest revision of a default + auto scaling configuration. + type: string + encryptionConfiguration: + description: (Forces new resource) An optional custom encryption + key that App Runner uses to encrypt the copy of your source + repository that it maintains and your service logs. By default, + App Runner uses an AWS managed CMK. See Encryption Configuration + below for more details. + properties: + kmsKey: + description: ARN of the KMS key used for encryption. + type: string + type: object + healthCheckConfiguration: + description: Settings of the health check that AWS App Runner + performs to monitor the health of your service. See Health Check + Configuration below for more details. + properties: + healthyThreshold: + description: Number of consecutive checks that must succeed + before App Runner decides that the service is healthy. Defaults + to 1. Minimum value of 1. Maximum value of 20. + type: number + interval: + description: Time interval, in seconds, between health checks. + Defaults to 5. Minimum value of 1. Maximum value of 20. + type: number + path: + description: URL to send requests to for health checks. Defaults + to /. Minimum length of 0. Maximum length of 51200. + type: string + protocol: + description: 'IP protocol that App Runner uses to perform + health checks for your service. Valid values: TCP, HTTP. + Defaults to TCP. If you set protocol to HTTP, App Runner + sends health check requests to the HTTP path specified by + path.' + type: string + timeout: + description: Time, in seconds, to wait for a health check + response before deciding it failed. Defaults to 2. Minimum + value of 1. Maximum value of 20. + type: number + unhealthyThreshold: + description: Number of consecutive checks that must fail before + App Runner decides that the service is unhealthy. Defaults + to 5. Minimum value of 1. Maximum value of 20. + type: number + type: object + instanceConfiguration: + description: The runtime configuration of instances (scaling units) + of the App Runner service. See Instance Configuration below + for more details. + properties: + cpu: + description: 'Number of CPU units reserved for each instance + of your App Runner service represented as a String. Defaults + to 1024. Valid values: 256|512|1024|2048|4096|(0.25|0.5|1|2|4) + vCPU.' + type: string + instanceRoleArn: + description: ARN of an IAM role that provides permissions + to your App Runner service. These are permissions that your + code needs when it calls any AWS APIs. + type: string + memory: + description: 'Amount of memory, in MB or GB, reserved for + each instance of your App Runner service. Defaults to 2048. + Valid values: 512|1024|2048|3072|4096|6144|8192|10240|12288|(0.5|1|2|3|4|6|8|10|12) + GB.' + type: string + type: object + networkConfiguration: + description: Configuration settings related to network traffic + of the web application that the App Runner service runs. See + Network Configuration below for more details. + properties: + egressConfiguration: + description: Network configuration settings for outbound message + traffic. See Egress Configuration below for more details. + properties: + egressType: + description: 'The type of egress configuration. Valid + values are: DEFAULT and VPC.' + type: string + vpcConnectorArn: + description: The Amazon Resource Name (ARN) of the App + Runner VPC connector that you want to associate with + your App Runner service. Only valid when EgressType + = VPC. + type: string + vpcConnectorArnRef: + description: Reference to a VPCConnector in apprunner + to populate vpcConnectorArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcConnectorArnSelector: + description: Selector for a VPCConnector in apprunner + to populate vpcConnectorArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + ingressConfiguration: + description: Network configuration settings for inbound network + traffic. See Ingress Configuration below for more details. + properties: + isPubliclyAccessible: + description: Specifies whether your App Runner service + is publicly accessible. To make the service publicly + accessible set it to True. To make the service privately + accessible, from only within an Amazon VPC set it to + False. + type: boolean + type: object + ipAddressType: + description: 'App Runner provides you with the option to choose + between Internet Protocol version 4 (IPv4) and dual stack + (IPv4 and IPv6) for your incoming public network configuration. + Valid values: IPV4, DUAL_STACK. Default: IPV4.' + type: string + type: object + observabilityConfiguration: + description: The observability configuration of your service. + See Observability Configuration below for more details. + properties: + observabilityConfigurationArn: + description: ARN of the observability configuration that is + associated with the service. Specified only when observability_enabled + is true. + type: string + observabilityConfigurationArnRef: + description: Reference to a ObservabilityConfiguration in + apprunner to populate observabilityConfigurationArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + observabilityConfigurationArnSelector: + description: Selector for a ObservabilityConfiguration in + apprunner to populate observabilityConfigurationArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + observabilityEnabled: + description: When true, an observability configuration resource + is associated with the service. + type: boolean + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + serviceName: + description: (Forces new resource) Name of the service. + type: string + sourceConfiguration: + description: The source to deploy to the App Runner service. Can + be a code or an image repository. See Source Configuration below + for more details. + properties: + authenticationConfiguration: + description: Describes resources needed to authenticate access + to some source repositories. See Authentication Configuration + below for more details. + properties: + accessRoleArn: + description: ARN of the IAM role that grants the App Runner + service access to a source repository. Required for + ECR image repositories (but not for ECR Public) + type: string + connectionArn: + description: ARN of the App Runner connection that enables + the App Runner service to connect to a source repository. + Required for GitHub code repositories. + type: string + connectionArnRef: + description: Reference to a Connection in apprunner to + populate connectionArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + connectionArnSelector: + description: Selector for a Connection in apprunner to + populate connectionArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + autoDeploymentsEnabled: + description: Whether continuous integration from the source + repository is enabled for the App Runner service. If set + to true, each repository change (source code commit or new + image version) starts a deployment. Defaults to true. + type: boolean + codeRepository: + description: Description of a source code repository. See + Code Repository below for more details. + properties: + codeConfiguration: + description: Configuration for building and running the + service from a source code repository. See Code Configuration + below for more details. + properties: + codeConfigurationValues: + description: Basic configuration for building and + running the App Runner service. Use this parameter + to quickly launch an App Runner service without + providing an apprunner.yaml file in the source code + repository (or ignoring the file if it exists). + See Code Configuration Values below for more details. + properties: + buildCommand: + description: Command App Runner runs to build + your application. + type: string + port: + description: Port that your application listens + to in the container. Defaults to "8080". + type: string + runtime: + description: 'Runtime environment type for building + and running an App Runner service. Represents + a programming language runtime. Valid values: + PYTHON_3, NODEJS_12, NODEJS_14, NODEJS_16, CORRETTO_8, + CORRETTO_11, GO_1, DOTNET_6, PHP_81, RUBY_31.' + type: string + runtimeEnvironmentSecrets: + additionalProperties: + type: string + description: Secrets and parameters available + to your service as environment variables. A + map of key/value pairs, where the key is the + desired name of the Secret in the environment + (i.e. it does not have to match the name of + the secret in Secrets Manager or SSM Parameter + Store), and the value is the ARN of the secret + from AWS Secrets Manager or the ARN of the parameter + in AWS SSM Parameter Store. + type: object + x-kubernetes-map-type: granular + runtimeEnvironmentVariables: + additionalProperties: + type: string + description: Environment variables available to + your running App Runner service. A map of key/value + pairs. Keys with a prefix of AWSAPPRUNNER are + reserved for system use and aren't valid. + type: object + x-kubernetes-map-type: granular + startCommand: + description: Command App Runner runs to start + the application in the source image. If specified, + this command overrides the Docker image’s default + start command. + type: string + type: object + configurationSource: + description: 'Source of the App Runner configuration. + Valid values: REPOSITORY, API. Values are interpreted + as follows:' + type: string + type: object + repositoryUrl: + description: Location of the repository that contains + the source code. + type: string + sourceCodeVersion: + description: Version that should be used within the source + code repository. See Source Code Version below for more + details. + properties: + type: + description: 'Type of version identifier. For a git-based + repository, branches represent versions. Valid values: + BRANCH.' + type: string + value: + description: Source code version. For a git-based + repository, a branch name maps to a specific version. + App Runner uses the most recent commit to the branch. + type: string + type: object + sourceDirectory: + description: The path of the directory that stores source + code and configuration files. The build and start commands + also execute from here. The path is absolute from root + and, if not specified, defaults to the repository root. + type: string + type: object + imageRepository: + description: Description of a source image repository. See + Image Repository below for more details. + properties: + imageConfiguration: + description: Configuration for running the identified + image. See Image Configuration below for more details. + properties: + port: + description: Port that your application listens to + in the container. Defaults to "8080". + type: string + runtimeEnvironmentSecrets: + additionalProperties: + type: string + description: Secrets and parameters available to your + service as environment variables. A map of key/value + pairs, where the key is the desired name of the + Secret in the environment (i.e. it does not have + to match the name of the secret in Secrets Manager + or SSM Parameter Store), and the value is the ARN + of the secret from AWS Secrets Manager or the ARN + of the parameter in AWS SSM Parameter Store. + type: object + x-kubernetes-map-type: granular + runtimeEnvironmentVariables: + additionalProperties: + type: string + description: Environment variables available to your + running App Runner service. A map of key/value pairs. + Keys with a prefix of AWSAPPRUNNER are reserved + for system use and aren't valid. + type: object + x-kubernetes-map-type: granular + startCommand: + description: Command App Runner runs to start the + application in the source image. If specified, this + command overrides the Docker image’s default start + command. + type: string + type: object + imageIdentifier: + description: |- + Identifier of an image. For an image in Amazon Elastic Container Registry (Amazon ECR), this is an image name. For the + image name format, see Pulling an image in the Amazon ECR User Guide. + type: string + imageRepositoryType: + description: 'Type of the image repository. This reflects + the repository provider and whether the repository is + private or public. Valid values: ECR , ECR_PUBLIC.' + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoScalingConfigurationArn: + description: ARN of an App Runner automatic scaling configuration + resource that you want to associate with your service. If not + provided, App Runner associates the latest revision of a default + auto scaling configuration. + type: string + encryptionConfiguration: + description: (Forces new resource) An optional custom encryption + key that App Runner uses to encrypt the copy of your source + repository that it maintains and your service logs. By default, + App Runner uses an AWS managed CMK. See Encryption Configuration + below for more details. + properties: + kmsKey: + description: ARN of the KMS key used for encryption. + type: string + type: object + healthCheckConfiguration: + description: Settings of the health check that AWS App Runner + performs to monitor the health of your service. See Health Check + Configuration below for more details. + properties: + healthyThreshold: + description: Number of consecutive checks that must succeed + before App Runner decides that the service is healthy. Defaults + to 1. Minimum value of 1. Maximum value of 20. + type: number + interval: + description: Time interval, in seconds, between health checks. + Defaults to 5. Minimum value of 1. Maximum value of 20. + type: number + path: + description: URL to send requests to for health checks. Defaults + to /. Minimum length of 0. Maximum length of 51200. + type: string + protocol: + description: 'IP protocol that App Runner uses to perform + health checks for your service. Valid values: TCP, HTTP. + Defaults to TCP. If you set protocol to HTTP, App Runner + sends health check requests to the HTTP path specified by + path.' + type: string + timeout: + description: Time, in seconds, to wait for a health check + response before deciding it failed. Defaults to 2. Minimum + value of 1. Maximum value of 20. + type: number + unhealthyThreshold: + description: Number of consecutive checks that must fail before + App Runner decides that the service is unhealthy. Defaults + to 5. Minimum value of 1. Maximum value of 20. + type: number + type: object + instanceConfiguration: + description: The runtime configuration of instances (scaling units) + of the App Runner service. See Instance Configuration below + for more details. + properties: + cpu: + description: 'Number of CPU units reserved for each instance + of your App Runner service represented as a String. Defaults + to 1024. Valid values: 256|512|1024|2048|4096|(0.25|0.5|1|2|4) + vCPU.' + type: string + instanceRoleArn: + description: ARN of an IAM role that provides permissions + to your App Runner service. These are permissions that your + code needs when it calls any AWS APIs. + type: string + memory: + description: 'Amount of memory, in MB or GB, reserved for + each instance of your App Runner service. Defaults to 2048. + Valid values: 512|1024|2048|3072|4096|6144|8192|10240|12288|(0.5|1|2|3|4|6|8|10|12) + GB.' + type: string + type: object + networkConfiguration: + description: Configuration settings related to network traffic + of the web application that the App Runner service runs. See + Network Configuration below for more details. + properties: + egressConfiguration: + description: Network configuration settings for outbound message + traffic. See Egress Configuration below for more details. + properties: + egressType: + description: 'The type of egress configuration. Valid + values are: DEFAULT and VPC.' + type: string + vpcConnectorArn: + description: The Amazon Resource Name (ARN) of the App + Runner VPC connector that you want to associate with + your App Runner service. Only valid when EgressType + = VPC. + type: string + vpcConnectorArnRef: + description: Reference to a VPCConnector in apprunner + to populate vpcConnectorArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcConnectorArnSelector: + description: Selector for a VPCConnector in apprunner + to populate vpcConnectorArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + ingressConfiguration: + description: Network configuration settings for inbound network + traffic. See Ingress Configuration below for more details. + properties: + isPubliclyAccessible: + description: Specifies whether your App Runner service + is publicly accessible. To make the service publicly + accessible set it to True. To make the service privately + accessible, from only within an Amazon VPC set it to + False. + type: boolean + type: object + ipAddressType: + description: 'App Runner provides you with the option to choose + between Internet Protocol version 4 (IPv4) and dual stack + (IPv4 and IPv6) for your incoming public network configuration. + Valid values: IPV4, DUAL_STACK. Default: IPV4.' + type: string + type: object + observabilityConfiguration: + description: The observability configuration of your service. + See Observability Configuration below for more details. + properties: + observabilityConfigurationArn: + description: ARN of the observability configuration that is + associated with the service. Specified only when observability_enabled + is true. + type: string + observabilityConfigurationArnRef: + description: Reference to a ObservabilityConfiguration in + apprunner to populate observabilityConfigurationArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + observabilityConfigurationArnSelector: + description: Selector for a ObservabilityConfiguration in + apprunner to populate observabilityConfigurationArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + observabilityEnabled: + description: When true, an observability configuration resource + is associated with the service. + type: boolean + type: object + serviceName: + description: (Forces new resource) Name of the service. + type: string + sourceConfiguration: + description: The source to deploy to the App Runner service. Can + be a code or an image repository. See Source Configuration below + for more details. + properties: + authenticationConfiguration: + description: Describes resources needed to authenticate access + to some source repositories. See Authentication Configuration + below for more details. + properties: + accessRoleArn: + description: ARN of the IAM role that grants the App Runner + service access to a source repository. Required for + ECR image repositories (but not for ECR Public) + type: string + connectionArn: + description: ARN of the App Runner connection that enables + the App Runner service to connect to a source repository. + Required for GitHub code repositories. + type: string + connectionArnRef: + description: Reference to a Connection in apprunner to + populate connectionArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + connectionArnSelector: + description: Selector for a Connection in apprunner to + populate connectionArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + autoDeploymentsEnabled: + description: Whether continuous integration from the source + repository is enabled for the App Runner service. If set + to true, each repository change (source code commit or new + image version) starts a deployment. Defaults to true. + type: boolean + codeRepository: + description: Description of a source code repository. See + Code Repository below for more details. + properties: + codeConfiguration: + description: Configuration for building and running the + service from a source code repository. See Code Configuration + below for more details. + properties: + codeConfigurationValues: + description: Basic configuration for building and + running the App Runner service. Use this parameter + to quickly launch an App Runner service without + providing an apprunner.yaml file in the source code + repository (or ignoring the file if it exists). + See Code Configuration Values below for more details. + properties: + buildCommand: + description: Command App Runner runs to build + your application. + type: string + port: + description: Port that your application listens + to in the container. Defaults to "8080". + type: string + runtime: + description: 'Runtime environment type for building + and running an App Runner service. Represents + a programming language runtime. Valid values: + PYTHON_3, NODEJS_12, NODEJS_14, NODEJS_16, CORRETTO_8, + CORRETTO_11, GO_1, DOTNET_6, PHP_81, RUBY_31.' + type: string + runtimeEnvironmentSecrets: + additionalProperties: + type: string + description: Secrets and parameters available + to your service as environment variables. A + map of key/value pairs, where the key is the + desired name of the Secret in the environment + (i.e. it does not have to match the name of + the secret in Secrets Manager or SSM Parameter + Store), and the value is the ARN of the secret + from AWS Secrets Manager or the ARN of the parameter + in AWS SSM Parameter Store. + type: object + x-kubernetes-map-type: granular + runtimeEnvironmentVariables: + additionalProperties: + type: string + description: Environment variables available to + your running App Runner service. A map of key/value + pairs. Keys with a prefix of AWSAPPRUNNER are + reserved for system use and aren't valid. + type: object + x-kubernetes-map-type: granular + startCommand: + description: Command App Runner runs to start + the application in the source image. If specified, + this command overrides the Docker image’s default + start command. + type: string + type: object + configurationSource: + description: 'Source of the App Runner configuration. + Valid values: REPOSITORY, API. Values are interpreted + as follows:' + type: string + type: object + repositoryUrl: + description: Location of the repository that contains + the source code. + type: string + sourceCodeVersion: + description: Version that should be used within the source + code repository. See Source Code Version below for more + details. + properties: + type: + description: 'Type of version identifier. For a git-based + repository, branches represent versions. Valid values: + BRANCH.' + type: string + value: + description: Source code version. For a git-based + repository, a branch name maps to a specific version. + App Runner uses the most recent commit to the branch. + type: string + type: object + sourceDirectory: + description: The path of the directory that stores source + code and configuration files. The build and start commands + also execute from here. The path is absolute from root + and, if not specified, defaults to the repository root. + type: string + type: object + imageRepository: + description: Description of a source image repository. See + Image Repository below for more details. + properties: + imageConfiguration: + description: Configuration for running the identified + image. See Image Configuration below for more details. + properties: + port: + description: Port that your application listens to + in the container. Defaults to "8080". + type: string + runtimeEnvironmentSecrets: + additionalProperties: + type: string + description: Secrets and parameters available to your + service as environment variables. A map of key/value + pairs, where the key is the desired name of the + Secret in the environment (i.e. it does not have + to match the name of the secret in Secrets Manager + or SSM Parameter Store), and the value is the ARN + of the secret from AWS Secrets Manager or the ARN + of the parameter in AWS SSM Parameter Store. + type: object + x-kubernetes-map-type: granular + runtimeEnvironmentVariables: + additionalProperties: + type: string + description: Environment variables available to your + running App Runner service. A map of key/value pairs. + Keys with a prefix of AWSAPPRUNNER are reserved + for system use and aren't valid. + type: object + x-kubernetes-map-type: granular + startCommand: + description: Command App Runner runs to start the + application in the source image. If specified, this + command overrides the Docker image’s default start + command. + type: string + type: object + imageIdentifier: + description: |- + Identifier of an image. For an image in Amazon Elastic Container Registry (Amazon ECR), this is an image name. For the + image name format, see Pulling an image in the Amazon ECR User Guide. + type: string + imageRepositoryType: + description: 'Type of the image repository. This reflects + the repository provider and whether the repository is + private or public. Valid values: ECR , ECR_PUBLIC.' + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.serviceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceName) + || (has(self.initProvider) && has(self.initProvider.serviceName))' + - message: spec.forProvider.sourceConfiguration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sourceConfiguration) + || (has(self.initProvider) && has(self.initProvider.sourceConfiguration))' + status: + description: ServiceStatus defines the observed state of Service. + properties: + atProvider: + properties: + arn: + description: ARN of the App Runner service. + type: string + autoScalingConfigurationArn: + description: ARN of an App Runner automatic scaling configuration + resource that you want to associate with your service. If not + provided, App Runner associates the latest revision of a default + auto scaling configuration. + type: string + encryptionConfiguration: + description: (Forces new resource) An optional custom encryption + key that App Runner uses to encrypt the copy of your source + repository that it maintains and your service logs. By default, + App Runner uses an AWS managed CMK. See Encryption Configuration + below for more details. + properties: + kmsKey: + description: ARN of the KMS key used for encryption. + type: string + type: object + healthCheckConfiguration: + description: Settings of the health check that AWS App Runner + performs to monitor the health of your service. See Health Check + Configuration below for more details. + properties: + healthyThreshold: + description: Number of consecutive checks that must succeed + before App Runner decides that the service is healthy. Defaults + to 1. Minimum value of 1. Maximum value of 20. + type: number + interval: + description: Time interval, in seconds, between health checks. + Defaults to 5. Minimum value of 1. Maximum value of 20. + type: number + path: + description: URL to send requests to for health checks. Defaults + to /. Minimum length of 0. Maximum length of 51200. + type: string + protocol: + description: 'IP protocol that App Runner uses to perform + health checks for your service. Valid values: TCP, HTTP. + Defaults to TCP. If you set protocol to HTTP, App Runner + sends health check requests to the HTTP path specified by + path.' + type: string + timeout: + description: Time, in seconds, to wait for a health check + response before deciding it failed. Defaults to 2. Minimum + value of 1. Maximum value of 20. + type: number + unhealthyThreshold: + description: Number of consecutive checks that must fail before + App Runner decides that the service is unhealthy. Defaults + to 5. Minimum value of 1. Maximum value of 20. + type: number + type: object + id: + type: string + instanceConfiguration: + description: The runtime configuration of instances (scaling units) + of the App Runner service. See Instance Configuration below + for more details. + properties: + cpu: + description: 'Number of CPU units reserved for each instance + of your App Runner service represented as a String. Defaults + to 1024. Valid values: 256|512|1024|2048|4096|(0.25|0.5|1|2|4) + vCPU.' + type: string + instanceRoleArn: + description: ARN of an IAM role that provides permissions + to your App Runner service. These are permissions that your + code needs when it calls any AWS APIs. + type: string + memory: + description: 'Amount of memory, in MB or GB, reserved for + each instance of your App Runner service. Defaults to 2048. + Valid values: 512|1024|2048|3072|4096|6144|8192|10240|12288|(0.5|1|2|3|4|6|8|10|12) + GB.' + type: string + type: object + networkConfiguration: + description: Configuration settings related to network traffic + of the web application that the App Runner service runs. See + Network Configuration below for more details. + properties: + egressConfiguration: + description: Network configuration settings for outbound message + traffic. See Egress Configuration below for more details. + properties: + egressType: + description: 'The type of egress configuration. Valid + values are: DEFAULT and VPC.' + type: string + vpcConnectorArn: + description: The Amazon Resource Name (ARN) of the App + Runner VPC connector that you want to associate with + your App Runner service. Only valid when EgressType + = VPC. + type: string + type: object + ingressConfiguration: + description: Network configuration settings for inbound network + traffic. See Ingress Configuration below for more details. + properties: + isPubliclyAccessible: + description: Specifies whether your App Runner service + is publicly accessible. To make the service publicly + accessible set it to True. To make the service privately + accessible, from only within an Amazon VPC set it to + False. + type: boolean + type: object + ipAddressType: + description: 'App Runner provides you with the option to choose + between Internet Protocol version 4 (IPv4) and dual stack + (IPv4 and IPv6) for your incoming public network configuration. + Valid values: IPV4, DUAL_STACK. Default: IPV4.' + type: string + type: object + observabilityConfiguration: + description: The observability configuration of your service. + See Observability Configuration below for more details. + properties: + observabilityConfigurationArn: + description: ARN of the observability configuration that is + associated with the service. Specified only when observability_enabled + is true. + type: string + observabilityEnabled: + description: When true, an observability configuration resource + is associated with the service. + type: boolean + type: object + serviceId: + description: An alphanumeric ID that App Runner generated for + this service. Unique within the AWS Region. + type: string + serviceName: + description: (Forces new resource) Name of the service. + type: string + serviceUrl: + description: Subdomain URL that App Runner generated for this + service. You can use this URL to access your service web application. + type: string + sourceConfiguration: + description: The source to deploy to the App Runner service. Can + be a code or an image repository. See Source Configuration below + for more details. + properties: + authenticationConfiguration: + description: Describes resources needed to authenticate access + to some source repositories. See Authentication Configuration + below for more details. + properties: + accessRoleArn: + description: ARN of the IAM role that grants the App Runner + service access to a source repository. Required for + ECR image repositories (but not for ECR Public) + type: string + connectionArn: + description: ARN of the App Runner connection that enables + the App Runner service to connect to a source repository. + Required for GitHub code repositories. + type: string + type: object + autoDeploymentsEnabled: + description: Whether continuous integration from the source + repository is enabled for the App Runner service. If set + to true, each repository change (source code commit or new + image version) starts a deployment. Defaults to true. + type: boolean + codeRepository: + description: Description of a source code repository. See + Code Repository below for more details. + properties: + codeConfiguration: + description: Configuration for building and running the + service from a source code repository. See Code Configuration + below for more details. + properties: + codeConfigurationValues: + description: Basic configuration for building and + running the App Runner service. Use this parameter + to quickly launch an App Runner service without + providing an apprunner.yaml file in the source code + repository (or ignoring the file if it exists). + See Code Configuration Values below for more details. + properties: + buildCommand: + description: Command App Runner runs to build + your application. + type: string + port: + description: Port that your application listens + to in the container. Defaults to "8080". + type: string + runtime: + description: 'Runtime environment type for building + and running an App Runner service. Represents + a programming language runtime. Valid values: + PYTHON_3, NODEJS_12, NODEJS_14, NODEJS_16, CORRETTO_8, + CORRETTO_11, GO_1, DOTNET_6, PHP_81, RUBY_31.' + type: string + runtimeEnvironmentSecrets: + additionalProperties: + type: string + description: Secrets and parameters available + to your service as environment variables. A + map of key/value pairs, where the key is the + desired name of the Secret in the environment + (i.e. it does not have to match the name of + the secret in Secrets Manager or SSM Parameter + Store), and the value is the ARN of the secret + from AWS Secrets Manager or the ARN of the parameter + in AWS SSM Parameter Store. + type: object + x-kubernetes-map-type: granular + runtimeEnvironmentVariables: + additionalProperties: + type: string + description: Environment variables available to + your running App Runner service. A map of key/value + pairs. Keys with a prefix of AWSAPPRUNNER are + reserved for system use and aren't valid. + type: object + x-kubernetes-map-type: granular + startCommand: + description: Command App Runner runs to start + the application in the source image. If specified, + this command overrides the Docker image’s default + start command. + type: string + type: object + configurationSource: + description: 'Source of the App Runner configuration. + Valid values: REPOSITORY, API. Values are interpreted + as follows:' + type: string + type: object + repositoryUrl: + description: Location of the repository that contains + the source code. + type: string + sourceCodeVersion: + description: Version that should be used within the source + code repository. See Source Code Version below for more + details. + properties: + type: + description: 'Type of version identifier. For a git-based + repository, branches represent versions. Valid values: + BRANCH.' + type: string + value: + description: Source code version. For a git-based + repository, a branch name maps to a specific version. + App Runner uses the most recent commit to the branch. + type: string + type: object + sourceDirectory: + description: The path of the directory that stores source + code and configuration files. The build and start commands + also execute from here. The path is absolute from root + and, if not specified, defaults to the repository root. + type: string + type: object + imageRepository: + description: Description of a source image repository. See + Image Repository below for more details. + properties: + imageConfiguration: + description: Configuration for running the identified + image. See Image Configuration below for more details. + properties: + port: + description: Port that your application listens to + in the container. Defaults to "8080". + type: string + runtimeEnvironmentSecrets: + additionalProperties: + type: string + description: Secrets and parameters available to your + service as environment variables. A map of key/value + pairs, where the key is the desired name of the + Secret in the environment (i.e. it does not have + to match the name of the secret in Secrets Manager + or SSM Parameter Store), and the value is the ARN + of the secret from AWS Secrets Manager or the ARN + of the parameter in AWS SSM Parameter Store. + type: object + x-kubernetes-map-type: granular + runtimeEnvironmentVariables: + additionalProperties: + type: string + description: Environment variables available to your + running App Runner service. A map of key/value pairs. + Keys with a prefix of AWSAPPRUNNER are reserved + for system use and aren't valid. + type: object + x-kubernetes-map-type: granular + startCommand: + description: Command App Runner runs to start the + application in the source image. If specified, this + command overrides the Docker image’s default start + command. + type: string + type: object + imageIdentifier: + description: |- + Identifier of an image. For an image in Amazon Elastic Container Registry (Amazon ECR), this is an image name. For the + image name format, see Pulling an image in the Amazon ECR User Guide. + type: string + imageRepositoryType: + description: 'Type of the image repository. This reflects + the repository provider and whether the repository is + private or public. Valid values: ECR , ECR_PUBLIC.' + type: string + type: object + type: object + status: + description: Current state of the App Runner service. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appstream.aws.upbound.io_directoryconfigs.yaml b/package/crds/appstream.aws.upbound.io_directoryconfigs.yaml index fb3f2ae45b..703407f18a 100644 --- a/package/crds/appstream.aws.upbound.io_directoryconfigs.yaml +++ b/package/crds/appstream.aws.upbound.io_directoryconfigs.yaml @@ -464,3 +464,443 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DirectoryConfig is the Schema for the DirectoryConfigs API. Provides + an AppStream Directory Config + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DirectoryConfigSpec defines the desired state of DirectoryConfig + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + directoryName: + description: Fully qualified name of the directory. + type: string + organizationalUnitDistinguishedNames: + description: Distinguished names of the organizational units for + computer accounts. + items: + type: string + type: array + x-kubernetes-list-type: set + region: + description: Region is the region you'd like your resource to + be created in. + type: string + serviceAccountCredentials: + description: Configuration block for the name of the directory + and organizational unit (OU) to use to join the directory config + to a Microsoft Active Directory domain. See service_account_credentials + below. + properties: + accountName: + description: 'User name of the account. This account must + have the following privileges: create computer objects, + join computers to the domain, and change/reset the password + on descendant computer objects for the organizational units + specified.' + type: string + accountPasswordSecretRef: + description: Password for the account. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + directoryName: + description: Fully qualified name of the directory. + type: string + organizationalUnitDistinguishedNames: + description: Distinguished names of the organizational units for + computer accounts. + items: + type: string + type: array + x-kubernetes-list-type: set + serviceAccountCredentials: + description: Configuration block for the name of the directory + and organizational unit (OU) to use to join the directory config + to a Microsoft Active Directory domain. See service_account_credentials + below. + properties: + accountName: + description: 'User name of the account. This account must + have the following privileges: create computer objects, + join computers to the domain, and change/reset the password + on descendant computer objects for the organizational units + specified.' + type: string + accountPasswordSecretRef: + description: Password for the account. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - accountPasswordSecretRef + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.directoryName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.directoryName) + || (has(self.initProvider) && has(self.initProvider.directoryName))' + - message: spec.forProvider.organizationalUnitDistinguishedNames is a + required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.organizationalUnitDistinguishedNames) + || (has(self.initProvider) && has(self.initProvider.organizationalUnitDistinguishedNames))' + - message: spec.forProvider.serviceAccountCredentials is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceAccountCredentials) + || (has(self.initProvider) && has(self.initProvider.serviceAccountCredentials))' + status: + description: DirectoryConfigStatus defines the observed state of DirectoryConfig. + properties: + atProvider: + properties: + createdTime: + description: Date and time, in UTC and extended RFC 3339 format, + when the directory config was created. + type: string + directoryName: + description: Fully qualified name of the directory. + type: string + id: + description: Unique identifier (ID) of the appstream directory + config. + type: string + organizationalUnitDistinguishedNames: + description: Distinguished names of the organizational units for + computer accounts. + items: + type: string + type: array + x-kubernetes-list-type: set + serviceAccountCredentials: + description: Configuration block for the name of the directory + and organizational unit (OU) to use to join the directory config + to a Microsoft Active Directory domain. See service_account_credentials + below. + properties: + accountName: + description: 'User name of the account. This account must + have the following privileges: create computer objects, + join computers to the domain, and change/reset the password + on descendant computer objects for the organizational units + specified.' + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appstream.aws.upbound.io_fleet.yaml b/package/crds/appstream.aws.upbound.io_fleet.yaml index 68f360615c..ba1bb51fed 100644 --- a/package/crds/appstream.aws.upbound.io_fleet.yaml +++ b/package/crds/appstream.aws.upbound.io_fleet.yaml @@ -1002,3 +1002,969 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Fleet is the Schema for the Fleets API. Provides an AppStream + fleet + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FleetSpec defines the desired state of Fleet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + computeCapacity: + description: Configuration block for the desired capacity of the + fleet. See below. + properties: + desiredInstances: + description: Desired number of streaming instances. + type: number + desiredSessions: + description: Desired number of user sessions for a multi-session + fleet. This is not allowed for single-session fleets. + type: number + type: object + description: + description: Description to display. + type: string + disconnectTimeoutInSeconds: + description: Amount of time that a streaming session remains active + after users disconnect. + type: number + displayName: + description: Human-readable friendly name for the AppStream fleet. + type: string + domainJoinInfo: + description: Configuration block for the name of the directory + and organizational unit (OU) to use to join the fleet to a Microsoft + Active Directory domain. See below. + properties: + directoryName: + description: Fully qualified name of the directory (for example, + corp.example.com). + type: string + organizationalUnitDistinguishedName: + description: Distinguished name of the organizational unit + for computer accounts. + type: string + type: object + enableDefaultInternetAccess: + description: Enables or disables default internet access for the + fleet. + type: boolean + fleetType: + description: 'Fleet type. Valid values are: ON_DEMAND, ALWAYS_ON' + type: string + iamRoleArn: + description: ARN of the IAM role to apply to the fleet. + type: string + iamRoleArnRef: + description: Reference to a Role in iam to populate iamRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamRoleArnSelector: + description: Selector for a Role in iam to populate iamRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + idleDisconnectTimeoutInSeconds: + description: Amount of time that users can be idle (inactive) + before they are disconnected from their streaming session and + the disconnect_timeout_in_seconds time interval begins. Defaults + to 60 seconds. + type: number + imageArn: + description: ARN of the public, private, or shared image to use. + type: string + imageName: + description: Name of the image used to create the fleet. + type: string + instanceType: + description: Instance type to use when launching fleet instances. + type: string + maxSessionsPerInstance: + description: The maximum number of user sessions on an instance. + This only applies to multi-session fleets. + type: number + maxUserDurationInSeconds: + description: Maximum amount of time that a streaming session can + remain active, in seconds. + type: number + name: + description: Unique name for the fleet. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + streamView: + description: AppStream 2.0 view that is displayed to your users + when they stream from the fleet. When APP is specified, only + the windows of applications opened by users display. When DESKTOP + is specified, the standard desktop that is provided by the operating + system displays. If not specified, defaults to APP. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcConfig: + description: Configuration block for the VPC configuration for + the image builder. See below. + properties: + securityGroupIds: + description: Identifiers of the security groups for the fleet + or image builder. + items: + type: string + type: array + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: Identifiers of the subnets to which a network + interface is attached from the fleet instance or image builder + instance. + items: + type: string + type: array + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + computeCapacity: + description: Configuration block for the desired capacity of the + fleet. See below. + properties: + desiredInstances: + description: Desired number of streaming instances. + type: number + desiredSessions: + description: Desired number of user sessions for a multi-session + fleet. This is not allowed for single-session fleets. + type: number + type: object + description: + description: Description to display. + type: string + disconnectTimeoutInSeconds: + description: Amount of time that a streaming session remains active + after users disconnect. + type: number + displayName: + description: Human-readable friendly name for the AppStream fleet. + type: string + domainJoinInfo: + description: Configuration block for the name of the directory + and organizational unit (OU) to use to join the fleet to a Microsoft + Active Directory domain. See below. + properties: + directoryName: + description: Fully qualified name of the directory (for example, + corp.example.com). + type: string + organizationalUnitDistinguishedName: + description: Distinguished name of the organizational unit + for computer accounts. + type: string + type: object + enableDefaultInternetAccess: + description: Enables or disables default internet access for the + fleet. + type: boolean + fleetType: + description: 'Fleet type. Valid values are: ON_DEMAND, ALWAYS_ON' + type: string + iamRoleArn: + description: ARN of the IAM role to apply to the fleet. + type: string + iamRoleArnRef: + description: Reference to a Role in iam to populate iamRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamRoleArnSelector: + description: Selector for a Role in iam to populate iamRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + idleDisconnectTimeoutInSeconds: + description: Amount of time that users can be idle (inactive) + before they are disconnected from their streaming session and + the disconnect_timeout_in_seconds time interval begins. Defaults + to 60 seconds. + type: number + imageArn: + description: ARN of the public, private, or shared image to use. + type: string + imageName: + description: Name of the image used to create the fleet. + type: string + instanceType: + description: Instance type to use when launching fleet instances. + type: string + maxSessionsPerInstance: + description: The maximum number of user sessions on an instance. + This only applies to multi-session fleets. + type: number + maxUserDurationInSeconds: + description: Maximum amount of time that a streaming session can + remain active, in seconds. + type: number + name: + description: Unique name for the fleet. + type: string + streamView: + description: AppStream 2.0 view that is displayed to your users + when they stream from the fleet. When APP is specified, only + the windows of applications opened by users display. When DESKTOP + is specified, the standard desktop that is provided by the operating + system displays. If not specified, defaults to APP. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcConfig: + description: Configuration block for the VPC configuration for + the image builder. See below. + properties: + securityGroupIds: + description: Identifiers of the security groups for the fleet + or image builder. + items: + type: string + type: array + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: Identifiers of the subnets to which a network + interface is attached from the fleet instance or image builder + instance. + items: + type: string + type: array + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.computeCapacity is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.computeCapacity) + || (has(self.initProvider) && has(self.initProvider.computeCapacity))' + - message: spec.forProvider.instanceType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.instanceType) + || (has(self.initProvider) && has(self.initProvider.instanceType))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: FleetStatus defines the observed state of Fleet. + properties: + atProvider: + properties: + arn: + description: ARN of the appstream fleet. + type: string + computeCapacity: + description: Configuration block for the desired capacity of the + fleet. See below. + properties: + available: + description: Number of currently available instances that + can be used to stream sessions. + type: number + desiredInstances: + description: Desired number of streaming instances. + type: number + desiredSessions: + description: Desired number of user sessions for a multi-session + fleet. This is not allowed for single-session fleets. + type: number + inUse: + description: Number of instances in use for streaming. + type: number + running: + description: Total number of simultaneous streaming instances + that are running. + type: number + type: object + createdTime: + description: Date and time, in UTC and extended RFC 3339 format, + when the fleet was created. + type: string + description: + description: Description to display. + type: string + disconnectTimeoutInSeconds: + description: Amount of time that a streaming session remains active + after users disconnect. + type: number + displayName: + description: Human-readable friendly name for the AppStream fleet. + type: string + domainJoinInfo: + description: Configuration block for the name of the directory + and organizational unit (OU) to use to join the fleet to a Microsoft + Active Directory domain. See below. + properties: + directoryName: + description: Fully qualified name of the directory (for example, + corp.example.com). + type: string + organizationalUnitDistinguishedName: + description: Distinguished name of the organizational unit + for computer accounts. + type: string + type: object + enableDefaultInternetAccess: + description: Enables or disables default internet access for the + fleet. + type: boolean + fleetType: + description: 'Fleet type. Valid values are: ON_DEMAND, ALWAYS_ON' + type: string + iamRoleArn: + description: ARN of the IAM role to apply to the fleet. + type: string + id: + description: Unique identifier (ID) of the appstream fleet. + type: string + idleDisconnectTimeoutInSeconds: + description: Amount of time that users can be idle (inactive) + before they are disconnected from their streaming session and + the disconnect_timeout_in_seconds time interval begins. Defaults + to 60 seconds. + type: number + imageArn: + description: ARN of the public, private, or shared image to use. + type: string + imageName: + description: Name of the image used to create the fleet. + type: string + instanceType: + description: Instance type to use when launching fleet instances. + type: string + maxSessionsPerInstance: + description: The maximum number of user sessions on an instance. + This only applies to multi-session fleets. + type: number + maxUserDurationInSeconds: + description: Maximum amount of time that a streaming session can + remain active, in seconds. + type: number + name: + description: Unique name for the fleet. + type: string + state: + description: State of the fleet. Can be STARTING, RUNNING, STOPPING + or STOPPED + type: string + streamView: + description: AppStream 2.0 view that is displayed to your users + when they stream from the fleet. When APP is specified, only + the windows of applications opened by users display. When DESKTOP + is specified, the standard desktop that is provided by the operating + system displays. If not specified, defaults to APP. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + vpcConfig: + description: Configuration block for the VPC configuration for + the image builder. See below. + properties: + securityGroupIds: + description: Identifiers of the security groups for the fleet + or image builder. + items: + type: string + type: array + subnetIds: + description: Identifiers of the subnets to which a network + interface is attached from the fleet instance or image builder + instance. + items: + type: string + type: array + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appstream.aws.upbound.io_imagebuilders.yaml b/package/crds/appstream.aws.upbound.io_imagebuilders.yaml index ebdbdcae02..07b69853f7 100644 --- a/package/crds/appstream.aws.upbound.io_imagebuilders.yaml +++ b/package/crds/appstream.aws.upbound.io_imagebuilders.yaml @@ -904,3 +904,877 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ImageBuilder is the Schema for the ImageBuilders API. Provides + an AppStream image builder + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ImageBuilderSpec defines the desired state of ImageBuilder + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessEndpoint: + description: Set of interface VPC endpoint (interface endpoint) + objects. Maximum of 4. See below. + items: + properties: + endpointType: + description: Type of interface endpoint. For valid values, + refer to the AWS documentation. + type: string + vpceId: + description: Identifier (ID) of the interface VPC endpoint. + type: string + type: object + type: array + appstreamAgentVersion: + description: Version of the AppStream 2.0 agent to use for this + image builder. + type: string + description: + description: Description to display. + type: string + displayName: + description: Human-readable friendly name for the AppStream image + builder. + type: string + domainJoinInfo: + description: Configuration block for the name of the directory + and organizational unit (OU) to use to join the image builder + to a Microsoft Active Directory domain. See below. + properties: + directoryName: + description: Fully qualified name of the directory (for example, + corp.example.com). + type: string + organizationalUnitDistinguishedName: + description: Distinguished name of the organizational unit + for computer accounts. + type: string + type: object + enableDefaultInternetAccess: + description: Enables or disables default internet access for the + image builder. + type: boolean + iamRoleArn: + description: ARN of the IAM role to apply to the image builder. + type: string + iamRoleArnRef: + description: Reference to a Role in iam to populate iamRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamRoleArnSelector: + description: Selector for a Role in iam to populate iamRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + imageArn: + description: ARN of the public, private, or shared image to use. + type: string + instanceType: + description: Instance type to use when launching the image builder. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcConfig: + description: Configuration block for the VPC configuration for + the image builder. See below. + properties: + securityGroupIds: + description: Identifiers of the security groups for the image + builder or image builder. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: Identifier of the subnet to which a network interface + is attached from the image builder instance. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessEndpoint: + description: Set of interface VPC endpoint (interface endpoint) + objects. Maximum of 4. See below. + items: + properties: + endpointType: + description: Type of interface endpoint. For valid values, + refer to the AWS documentation. + type: string + vpceId: + description: Identifier (ID) of the interface VPC endpoint. + type: string + type: object + type: array + appstreamAgentVersion: + description: Version of the AppStream 2.0 agent to use for this + image builder. + type: string + description: + description: Description to display. + type: string + displayName: + description: Human-readable friendly name for the AppStream image + builder. + type: string + domainJoinInfo: + description: Configuration block for the name of the directory + and organizational unit (OU) to use to join the image builder + to a Microsoft Active Directory domain. See below. + properties: + directoryName: + description: Fully qualified name of the directory (for example, + corp.example.com). + type: string + organizationalUnitDistinguishedName: + description: Distinguished name of the organizational unit + for computer accounts. + type: string + type: object + enableDefaultInternetAccess: + description: Enables or disables default internet access for the + image builder. + type: boolean + iamRoleArn: + description: ARN of the IAM role to apply to the image builder. + type: string + iamRoleArnRef: + description: Reference to a Role in iam to populate iamRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamRoleArnSelector: + description: Selector for a Role in iam to populate iamRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + imageArn: + description: ARN of the public, private, or shared image to use. + type: string + instanceType: + description: Instance type to use when launching the image builder. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcConfig: + description: Configuration block for the VPC configuration for + the image builder. See below. + properties: + securityGroupIds: + description: Identifiers of the security groups for the image + builder or image builder. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: Identifier of the subnet to which a network interface + is attached from the image builder instance. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.instanceType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.instanceType) + || (has(self.initProvider) && has(self.initProvider.instanceType))' + status: + description: ImageBuilderStatus defines the observed state of ImageBuilder. + properties: + atProvider: + properties: + accessEndpoint: + description: Set of interface VPC endpoint (interface endpoint) + objects. Maximum of 4. See below. + items: + properties: + endpointType: + description: Type of interface endpoint. For valid values, + refer to the AWS documentation. + type: string + vpceId: + description: Identifier (ID) of the interface VPC endpoint. + type: string + type: object + type: array + appstreamAgentVersion: + description: Version of the AppStream 2.0 agent to use for this + image builder. + type: string + arn: + description: ARN of the appstream image builder. + type: string + createdTime: + description: Date and time, in UTC and extended RFC 3339 format, + when the image builder was created. + type: string + description: + description: Description to display. + type: string + displayName: + description: Human-readable friendly name for the AppStream image + builder. + type: string + domainJoinInfo: + description: Configuration block for the name of the directory + and organizational unit (OU) to use to join the image builder + to a Microsoft Active Directory domain. See below. + properties: + directoryName: + description: Fully qualified name of the directory (for example, + corp.example.com). + type: string + organizationalUnitDistinguishedName: + description: Distinguished name of the organizational unit + for computer accounts. + type: string + type: object + enableDefaultInternetAccess: + description: Enables or disables default internet access for the + image builder. + type: boolean + iamRoleArn: + description: ARN of the IAM role to apply to the image builder. + type: string + id: + description: Name of the image builder. + type: string + imageArn: + description: ARN of the public, private, or shared image to use. + type: string + imageName: + description: Name of the image used to create the image builder. + type: string + instanceType: + description: Instance type to use when launching the image builder. + type: string + state: + description: State of the image builder. For valid values, refer + to the AWS documentation. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + vpcConfig: + description: Configuration block for the VPC configuration for + the image builder. See below. + properties: + securityGroupIds: + description: Identifiers of the security groups for the image + builder or image builder. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: Identifier of the subnet to which a network interface + is attached from the image builder instance. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appstream.aws.upbound.io_stacks.yaml b/package/crds/appstream.aws.upbound.io_stacks.yaml index 0b315a2ba7..27440673ec 100644 --- a/package/crds/appstream.aws.upbound.io_stacks.yaml +++ b/package/crds/appstream.aws.upbound.io_stacks.yaml @@ -702,3 +702,675 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Stack is the Schema for the Stacks API. Provides an AppStream + stack + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StackSpec defines the desired state of Stack + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessEndpoints: + description: |- + Set of configuration blocks defining the interface VPC endpoints. Users of the stack can connect to AppStream 2.0 only through the specified endpoints. + See access_endpoints below. + items: + properties: + endpointType: + description: |- + Type of the interface endpoint. + See the AccessEndpoint AWS API documentation for valid values. + type: string + vpceId: + description: ID of the VPC in which the interface endpoint + is used. + type: string + type: object + type: array + applicationSettings: + description: |- + Settings for application settings persistence. + See application_settings below. + properties: + enabled: + description: Whether application settings should be persisted. + type: boolean + settingsGroup: + description: |- + Name of the settings group. + Required when enabled is true. + Can be up to 100 characters. + type: string + type: object + description: + description: Description for the AppStream stack. + type: string + displayName: + description: Stack name to display. + type: string + embedHostDomains: + description: Domains where AppStream 2.0 streaming sessions can + be embedded in an iframe. You must approve the domains that + you want to host embedded AppStream 2.0 streaming sessions. + items: + type: string + type: array + x-kubernetes-list-type: set + feedbackUrl: + description: URL that users are redirected to after they click + the Send Feedback link. If no URL is specified, no Send Feedback + link is displayed. . + type: string + name: + description: Unique name for the AppStream stack. + type: string + redirectUrl: + description: URL that users are redirected to after their streaming + session ends. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + storageConnectors: + description: |- + Configuration block for the storage connectors to enable. + See storage_connectors below. + items: + properties: + connectorType: + description: |- + Type of storage connector. + Valid values are HOMEFOLDERS, GOOGLE_DRIVE, or ONE_DRIVE. + type: string + domains: + description: Names of the domains for the account. + items: + type: string + type: array + resourceIdentifier: + description: ARN of the storage connector. + type: string + type: object + type: array + streamingExperienceSettings: + description: |- + The streaming protocol you want your stack to prefer. This can be UDP or TCP. Currently, UDP is only supported in the Windows native client. + See streaming_experience_settings below. + properties: + preferredProtocol: + description: |- + The preferred protocol that you want to use while streaming your application. + Valid values are TCP and UDP. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userSettings: + description: |- + Configuration block for the actions that are enabled or disabled for users during their streaming sessions. If not provided, these settings are configured automatically by AWS. + See user_settings below. + items: + properties: + action: + description: |- + Action that is enabled or disabled. + Valid values are CLIPBOARD_COPY_FROM_LOCAL_DEVICE, CLIPBOARD_COPY_TO_LOCAL_DEVICE, FILE_UPLOAD, FILE_DOWNLOAD, PRINTING_TO_LOCAL_DEVICE, DOMAIN_PASSWORD_SIGNIN, or DOMAIN_SMART_CARD_SIGNIN. + type: string + permission: + description: |- + Whether the action is enabled or disabled. + Valid values are ENABLED or DISABLED. + type: string + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessEndpoints: + description: |- + Set of configuration blocks defining the interface VPC endpoints. Users of the stack can connect to AppStream 2.0 only through the specified endpoints. + See access_endpoints below. + items: + properties: + endpointType: + description: |- + Type of the interface endpoint. + See the AccessEndpoint AWS API documentation for valid values. + type: string + vpceId: + description: ID of the VPC in which the interface endpoint + is used. + type: string + type: object + type: array + applicationSettings: + description: |- + Settings for application settings persistence. + See application_settings below. + properties: + enabled: + description: Whether application settings should be persisted. + type: boolean + settingsGroup: + description: |- + Name of the settings group. + Required when enabled is true. + Can be up to 100 characters. + type: string + type: object + description: + description: Description for the AppStream stack. + type: string + displayName: + description: Stack name to display. + type: string + embedHostDomains: + description: Domains where AppStream 2.0 streaming sessions can + be embedded in an iframe. You must approve the domains that + you want to host embedded AppStream 2.0 streaming sessions. + items: + type: string + type: array + x-kubernetes-list-type: set + feedbackUrl: + description: URL that users are redirected to after they click + the Send Feedback link. If no URL is specified, no Send Feedback + link is displayed. . + type: string + name: + description: Unique name for the AppStream stack. + type: string + redirectUrl: + description: URL that users are redirected to after their streaming + session ends. + type: string + storageConnectors: + description: |- + Configuration block for the storage connectors to enable. + See storage_connectors below. + items: + properties: + connectorType: + description: |- + Type of storage connector. + Valid values are HOMEFOLDERS, GOOGLE_DRIVE, or ONE_DRIVE. + type: string + domains: + description: Names of the domains for the account. + items: + type: string + type: array + resourceIdentifier: + description: ARN of the storage connector. + type: string + type: object + type: array + streamingExperienceSettings: + description: |- + The streaming protocol you want your stack to prefer. This can be UDP or TCP. Currently, UDP is only supported in the Windows native client. + See streaming_experience_settings below. + properties: + preferredProtocol: + description: |- + The preferred protocol that you want to use while streaming your application. + Valid values are TCP and UDP. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userSettings: + description: |- + Configuration block for the actions that are enabled or disabled for users during their streaming sessions. If not provided, these settings are configured automatically by AWS. + See user_settings below. + items: + properties: + action: + description: |- + Action that is enabled or disabled. + Valid values are CLIPBOARD_COPY_FROM_LOCAL_DEVICE, CLIPBOARD_COPY_TO_LOCAL_DEVICE, FILE_UPLOAD, FILE_DOWNLOAD, PRINTING_TO_LOCAL_DEVICE, DOMAIN_PASSWORD_SIGNIN, or DOMAIN_SMART_CARD_SIGNIN. + type: string + permission: + description: |- + Whether the action is enabled or disabled. + Valid values are ENABLED or DISABLED. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: StackStatus defines the observed state of Stack. + properties: + atProvider: + properties: + accessEndpoints: + description: |- + Set of configuration blocks defining the interface VPC endpoints. Users of the stack can connect to AppStream 2.0 only through the specified endpoints. + See access_endpoints below. + items: + properties: + endpointType: + description: |- + Type of the interface endpoint. + See the AccessEndpoint AWS API documentation for valid values. + type: string + vpceId: + description: ID of the VPC in which the interface endpoint + is used. + type: string + type: object + type: array + applicationSettings: + description: |- + Settings for application settings persistence. + See application_settings below. + properties: + enabled: + description: Whether application settings should be persisted. + type: boolean + settingsGroup: + description: |- + Name of the settings group. + Required when enabled is true. + Can be up to 100 characters. + type: string + type: object + arn: + description: ARN of the appstream stack. + type: string + createdTime: + description: Date and time, in UTC and extended RFC 3339 format, + when the stack was created. + type: string + description: + description: Description for the AppStream stack. + type: string + displayName: + description: Stack name to display. + type: string + embedHostDomains: + description: Domains where AppStream 2.0 streaming sessions can + be embedded in an iframe. You must approve the domains that + you want to host embedded AppStream 2.0 streaming sessions. + items: + type: string + type: array + x-kubernetes-list-type: set + feedbackUrl: + description: URL that users are redirected to after they click + the Send Feedback link. If no URL is specified, no Send Feedback + link is displayed. . + type: string + id: + description: Unique ID of the appstream stack. + type: string + name: + description: Unique name for the AppStream stack. + type: string + redirectUrl: + description: URL that users are redirected to after their streaming + session ends. + type: string + storageConnectors: + description: |- + Configuration block for the storage connectors to enable. + See storage_connectors below. + items: + properties: + connectorType: + description: |- + Type of storage connector. + Valid values are HOMEFOLDERS, GOOGLE_DRIVE, or ONE_DRIVE. + type: string + domains: + description: Names of the domains for the account. + items: + type: string + type: array + resourceIdentifier: + description: ARN of the storage connector. + type: string + type: object + type: array + streamingExperienceSettings: + description: |- + The streaming protocol you want your stack to prefer. This can be UDP or TCP. Currently, UDP is only supported in the Windows native client. + See streaming_experience_settings below. + properties: + preferredProtocol: + description: |- + The preferred protocol that you want to use while streaming your application. + Valid values are TCP and UDP. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + userSettings: + description: |- + Configuration block for the actions that are enabled or disabled for users during their streaming sessions. If not provided, these settings are configured automatically by AWS. + See user_settings below. + items: + properties: + action: + description: |- + Action that is enabled or disabled. + Valid values are CLIPBOARD_COPY_FROM_LOCAL_DEVICE, CLIPBOARD_COPY_TO_LOCAL_DEVICE, FILE_UPLOAD, FILE_DOWNLOAD, PRINTING_TO_LOCAL_DEVICE, DOMAIN_PASSWORD_SIGNIN, or DOMAIN_SMART_CARD_SIGNIN. + type: string + permission: + description: |- + Whether the action is enabled or disabled. + Valid values are ENABLED or DISABLED. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appsync.aws.upbound.io_datasources.yaml b/package/crds/appsync.aws.upbound.io_datasources.yaml index d07533c2a9..69df67b8cf 100644 --- a/package/crds/appsync.aws.upbound.io_datasources.yaml +++ b/package/crds/appsync.aws.upbound.io_datasources.yaml @@ -1200,3 +1200,1119 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Datasource is the Schema for the Datasources API. Provides an + AppSync Data Source. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DatasourceSpec defines the desired state of Datasource + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apiId: + description: API ID for the GraphQL API for the data source. + type: string + apiIdRef: + description: Reference to a GraphQLAPI in appsync to populate + apiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiIdSelector: + description: Selector for a GraphQLAPI in appsync to populate + apiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: Description of the data source. + type: string + dynamodbConfig: + description: DynamoDB settings. See DynamoDB Config + properties: + deltaSyncConfig: + description: The DeltaSyncConfig for a versioned data source. + See Delta Sync Config + properties: + baseTableTtl: + description: The number of minutes that an Item is stored + in the data source. + type: number + deltaSyncTableName: + description: The table name. + type: string + deltaSyncTableTtl: + description: The number of minutes that a Delta Sync log + entry is stored in the Delta Sync table. + type: number + type: object + region: + description: AWS region of the DynamoDB table. Defaults to + current region. + type: string + tableName: + description: Name of the DynamoDB table. + type: string + tableNameRef: + description: Reference to a Table in dynamodb to populate + tableName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + tableNameSelector: + description: Selector for a Table in dynamodb to populate + tableName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + useCallerCredentials: + description: Set to true to use Amazon Cognito credentials + with this data source. + type: boolean + versioned: + description: Detects Conflict Detection and Resolution with + this data source. + type: boolean + type: object + elasticsearchConfig: + description: Amazon Elasticsearch settings. See ElasticSearch + Config + properties: + endpoint: + description: HTTP endpoint of the Elasticsearch domain. + type: string + region: + description: AWS region of the DynamoDB table. Defaults to + current region. + type: string + type: object + eventBridgeConfig: + description: AWS EventBridge settings. See Event Bridge Config + properties: + eventBusArn: + description: ARN for the EventBridge bus. + type: string + type: object + httpConfig: + description: HTTP settings. See HTTP Config + properties: + authorizationConfig: + description: Authorization configuration in case the HTTP + endpoint requires authorization. See Authorization Config. + properties: + authorizationType: + description: Authorization type that the HTTP endpoint + requires. Default values is AWS_IAM. + type: string + awsIamConfig: + description: Identity and Access Management (IAM) settings. + See AWS IAM Config. + properties: + signingRegion: + description: Signing Amazon Web Services Region for + IAM authorization. + type: string + signingServiceName: + description: Signing service name for IAM authorization. + type: string + type: object + type: object + endpoint: + description: HTTP endpoint of the Elasticsearch domain. + type: string + type: object + lambdaConfig: + description: AWS Lambda settings. See Lambda Config + properties: + functionArn: + description: ARN for the Lambda function. + type: string + type: object + opensearchserviceConfig: + description: Amazon OpenSearch Service settings. See OpenSearch + Service Config + properties: + endpoint: + description: HTTP endpoint of the Elasticsearch domain. + type: string + region: + description: AWS region of the DynamoDB table. Defaults to + current region. + type: string + type: object + region: + description: |- + AWS region of the DynamoDB table. Defaults to current region. + Region is the region you'd like your resource to be created in. + type: string + relationalDatabaseConfig: + description: AWS RDS settings. See Relational Database Config + properties: + httpEndpointConfig: + description: Amazon RDS HTTP endpoint configuration. See HTTP + Endpoint Config. + properties: + awsSecretStoreArn: + description: AWS secret store ARN for database credentials. + type: string + databaseName: + description: Logical database name. + type: string + dbClusterIdentifier: + description: Amazon RDS cluster identifier. + type: string + region: + description: AWS region of the DynamoDB table. Defaults + to current region. + type: string + schema: + description: Logical schema name. + type: string + type: object + sourceType: + description: 'Source type for the relational database. Valid + values: RDS_HTTP_ENDPOINT.' + type: string + type: object + serviceRoleArn: + description: IAM service role ARN for the data source. Required + if type is specified as AWS_LAMBDA, AMAZON_DYNAMODB, AMAZON_ELASTICSEARCH, + AMAZON_EVENTBRIDGE, or AMAZON_OPENSEARCH_SERVICE. + type: string + serviceRoleArnRef: + description: Reference to a Role in iam to populate serviceRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceRoleArnSelector: + description: Selector for a Role in iam to populate serviceRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: 'Type of the Data Source. Valid values: AWS_LAMBDA, + AMAZON_DYNAMODB, AMAZON_ELASTICSEARCH, HTTP, NONE, RELATIONAL_DATABASE, + AMAZON_EVENTBRIDGE, AMAZON_OPENSEARCH_SERVICE.' + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the data source. + type: string + dynamodbConfig: + description: DynamoDB settings. See DynamoDB Config + properties: + deltaSyncConfig: + description: The DeltaSyncConfig for a versioned data source. + See Delta Sync Config + properties: + baseTableTtl: + description: The number of minutes that an Item is stored + in the data source. + type: number + deltaSyncTableName: + description: The table name. + type: string + deltaSyncTableTtl: + description: The number of minutes that a Delta Sync log + entry is stored in the Delta Sync table. + type: number + type: object + tableName: + description: Name of the DynamoDB table. + type: string + tableNameRef: + description: Reference to a Table in dynamodb to populate + tableName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + tableNameSelector: + description: Selector for a Table in dynamodb to populate + tableName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + useCallerCredentials: + description: Set to true to use Amazon Cognito credentials + with this data source. + type: boolean + versioned: + description: Detects Conflict Detection and Resolution with + this data source. + type: boolean + type: object + elasticsearchConfig: + description: Amazon Elasticsearch settings. See ElasticSearch + Config + properties: + endpoint: + description: HTTP endpoint of the Elasticsearch domain. + type: string + type: object + eventBridgeConfig: + description: AWS EventBridge settings. See Event Bridge Config + properties: + eventBusArn: + description: ARN for the EventBridge bus. + type: string + type: object + httpConfig: + description: HTTP settings. See HTTP Config + properties: + authorizationConfig: + description: Authorization configuration in case the HTTP + endpoint requires authorization. See Authorization Config. + properties: + authorizationType: + description: Authorization type that the HTTP endpoint + requires. Default values is AWS_IAM. + type: string + awsIamConfig: + description: Identity and Access Management (IAM) settings. + See AWS IAM Config. + properties: + signingRegion: + description: Signing Amazon Web Services Region for + IAM authorization. + type: string + signingServiceName: + description: Signing service name for IAM authorization. + type: string + type: object + type: object + endpoint: + description: HTTP endpoint of the Elasticsearch domain. + type: string + type: object + lambdaConfig: + description: AWS Lambda settings. See Lambda Config + properties: + functionArn: + description: ARN for the Lambda function. + type: string + type: object + opensearchserviceConfig: + description: Amazon OpenSearch Service settings. See OpenSearch + Service Config + properties: + endpoint: + description: HTTP endpoint of the Elasticsearch domain. + type: string + type: object + relationalDatabaseConfig: + description: AWS RDS settings. See Relational Database Config + properties: + httpEndpointConfig: + description: Amazon RDS HTTP endpoint configuration. See HTTP + Endpoint Config. + properties: + awsSecretStoreArn: + description: AWS secret store ARN for database credentials. + type: string + databaseName: + description: Logical database name. + type: string + dbClusterIdentifier: + description: Amazon RDS cluster identifier. + type: string + schema: + description: Logical schema name. + type: string + type: object + sourceType: + description: 'Source type for the relational database. Valid + values: RDS_HTTP_ENDPOINT.' + type: string + type: object + serviceRoleArn: + description: IAM service role ARN for the data source. Required + if type is specified as AWS_LAMBDA, AMAZON_DYNAMODB, AMAZON_ELASTICSEARCH, + AMAZON_EVENTBRIDGE, or AMAZON_OPENSEARCH_SERVICE. + type: string + serviceRoleArnRef: + description: Reference to a Role in iam to populate serviceRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceRoleArnSelector: + description: Selector for a Role in iam to populate serviceRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: 'Type of the Data Source. Valid values: AWS_LAMBDA, + AMAZON_DYNAMODB, AMAZON_ELASTICSEARCH, HTTP, NONE, RELATIONAL_DATABASE, + AMAZON_EVENTBRIDGE, AMAZON_OPENSEARCH_SERVICE.' + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + status: + description: DatasourceStatus defines the observed state of Datasource. + properties: + atProvider: + properties: + apiId: + description: API ID for the GraphQL API for the data source. + type: string + arn: + description: ARN + type: string + description: + description: Description of the data source. + type: string + dynamodbConfig: + description: DynamoDB settings. See DynamoDB Config + properties: + deltaSyncConfig: + description: The DeltaSyncConfig for a versioned data source. + See Delta Sync Config + properties: + baseTableTtl: + description: The number of minutes that an Item is stored + in the data source. + type: number + deltaSyncTableName: + description: The table name. + type: string + deltaSyncTableTtl: + description: The number of minutes that a Delta Sync log + entry is stored in the Delta Sync table. + type: number + type: object + region: + description: AWS region of the DynamoDB table. Defaults to + current region. + type: string + tableName: + description: Name of the DynamoDB table. + type: string + useCallerCredentials: + description: Set to true to use Amazon Cognito credentials + with this data source. + type: boolean + versioned: + description: Detects Conflict Detection and Resolution with + this data source. + type: boolean + type: object + elasticsearchConfig: + description: Amazon Elasticsearch settings. See ElasticSearch + Config + properties: + endpoint: + description: HTTP endpoint of the Elasticsearch domain. + type: string + region: + description: AWS region of the DynamoDB table. Defaults to + current region. + type: string + type: object + eventBridgeConfig: + description: AWS EventBridge settings. See Event Bridge Config + properties: + eventBusArn: + description: ARN for the EventBridge bus. + type: string + type: object + httpConfig: + description: HTTP settings. See HTTP Config + properties: + authorizationConfig: + description: Authorization configuration in case the HTTP + endpoint requires authorization. See Authorization Config. + properties: + authorizationType: + description: Authorization type that the HTTP endpoint + requires. Default values is AWS_IAM. + type: string + awsIamConfig: + description: Identity and Access Management (IAM) settings. + See AWS IAM Config. + properties: + signingRegion: + description: Signing Amazon Web Services Region for + IAM authorization. + type: string + signingServiceName: + description: Signing service name for IAM authorization. + type: string + type: object + type: object + endpoint: + description: HTTP endpoint of the Elasticsearch domain. + type: string + type: object + id: + type: string + lambdaConfig: + description: AWS Lambda settings. See Lambda Config + properties: + functionArn: + description: ARN for the Lambda function. + type: string + type: object + opensearchserviceConfig: + description: Amazon OpenSearch Service settings. See OpenSearch + Service Config + properties: + endpoint: + description: HTTP endpoint of the Elasticsearch domain. + type: string + region: + description: AWS region of the DynamoDB table. Defaults to + current region. + type: string + type: object + relationalDatabaseConfig: + description: AWS RDS settings. See Relational Database Config + properties: + httpEndpointConfig: + description: Amazon RDS HTTP endpoint configuration. See HTTP + Endpoint Config. + properties: + awsSecretStoreArn: + description: AWS secret store ARN for database credentials. + type: string + databaseName: + description: Logical database name. + type: string + dbClusterIdentifier: + description: Amazon RDS cluster identifier. + type: string + region: + description: AWS region of the DynamoDB table. Defaults + to current region. + type: string + schema: + description: Logical schema name. + type: string + type: object + sourceType: + description: 'Source type for the relational database. Valid + values: RDS_HTTP_ENDPOINT.' + type: string + type: object + serviceRoleArn: + description: IAM service role ARN for the data source. Required + if type is specified as AWS_LAMBDA, AMAZON_DYNAMODB, AMAZON_ELASTICSEARCH, + AMAZON_EVENTBRIDGE, or AMAZON_OPENSEARCH_SERVICE. + type: string + type: + description: 'Type of the Data Source. Valid values: AWS_LAMBDA, + AMAZON_DYNAMODB, AMAZON_ELASTICSEARCH, HTTP, NONE, RELATIONAL_DATABASE, + AMAZON_EVENTBRIDGE, AMAZON_OPENSEARCH_SERVICE.' + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appsync.aws.upbound.io_functions.yaml b/package/crds/appsync.aws.upbound.io_functions.yaml index 9428554790..a5fa66865e 100644 --- a/package/crds/appsync.aws.upbound.io_functions.yaml +++ b/package/crds/appsync.aws.upbound.io_functions.yaml @@ -883,3 +883,850 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Function is the Schema for the Functions API. Provides an AppSync + Function. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FunctionSpec defines the desired state of Function + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apiId: + description: ID of the associated AppSync API. + type: string + apiIdRef: + description: Reference to a GraphQLAPI in appsync to populate + apiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiIdSelector: + description: Selector for a GraphQLAPI in appsync to populate + apiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + code: + description: The function code that contains the request and response + functions. When code is used, the runtime is required. The runtime + value must be APPSYNC_JS. + type: string + dataSource: + description: Function data source name. + type: string + dataSourceRef: + description: Reference to a Datasource in appsync to populate + dataSource. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataSourceSelector: + description: Selector for a Datasource in appsync to populate + dataSource. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: Function description. + type: string + functionVersion: + description: Version of the request mapping template. Currently + the supported value is 2018-05-29. Does not apply when specifying + code. + type: string + maxBatchSize: + description: Maximum batching size for a resolver. Valid values + are between 0 and 2000. + type: number + name: + description: Function name. The function name does not have to + be unique. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + requestMappingTemplate: + description: Function request mapping template. Functions support + only the 2018-05-29 version of the request mapping template. + type: string + responseMappingTemplate: + description: Function response mapping template. + type: string + runtime: + description: Describes a runtime used by an AWS AppSync pipeline + resolver or AWS AppSync function. Specifies the name and version + of the runtime to use. Note that if a runtime is specified, + code must also be specified. See Runtime. + properties: + name: + description: Function name. The function name does not have + to be unique. + type: string + runtimeVersion: + description: The version of the runtime to use. Currently, + the only allowed version is 1.0.0. + type: string + type: object + syncConfig: + description: Describes a Sync configuration for a resolver. See + Sync Config. + properties: + conflictDetection: + description: Conflict Detection strategy to use. Valid values + are NONE and VERSION. + type: string + conflictHandler: + description: Conflict Resolution strategy to perform in the + event of a conflict. Valid values are NONE, OPTIMISTIC_CONCURRENCY, + AUTOMERGE, and LAMBDA. + type: string + lambdaConflictHandlerConfig: + description: Lambda Conflict Handler Config when configuring + LAMBDA as the Conflict Handler. See Lambda Conflict Handler + Config. + properties: + lambdaConflictHandlerArn: + description: ARN for the Lambda function to use as the + Conflict Handler. + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + apiId: + description: ID of the associated AppSync API. + type: string + apiIdRef: + description: Reference to a GraphQLAPI in appsync to populate + apiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiIdSelector: + description: Selector for a GraphQLAPI in appsync to populate + apiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + code: + description: The function code that contains the request and response + functions. When code is used, the runtime is required. The runtime + value must be APPSYNC_JS. + type: string + dataSource: + description: Function data source name. + type: string + dataSourceRef: + description: Reference to a Datasource in appsync to populate + dataSource. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataSourceSelector: + description: Selector for a Datasource in appsync to populate + dataSource. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: Function description. + type: string + functionVersion: + description: Version of the request mapping template. Currently + the supported value is 2018-05-29. Does not apply when specifying + code. + type: string + maxBatchSize: + description: Maximum batching size for a resolver. Valid values + are between 0 and 2000. + type: number + name: + description: Function name. The function name does not have to + be unique. + type: string + requestMappingTemplate: + description: Function request mapping template. Functions support + only the 2018-05-29 version of the request mapping template. + type: string + responseMappingTemplate: + description: Function response mapping template. + type: string + runtime: + description: Describes a runtime used by an AWS AppSync pipeline + resolver or AWS AppSync function. Specifies the name and version + of the runtime to use. Note that if a runtime is specified, + code must also be specified. See Runtime. + properties: + name: + description: Function name. The function name does not have + to be unique. + type: string + runtimeVersion: + description: The version of the runtime to use. Currently, + the only allowed version is 1.0.0. + type: string + type: object + syncConfig: + description: Describes a Sync configuration for a resolver. See + Sync Config. + properties: + conflictDetection: + description: Conflict Detection strategy to use. Valid values + are NONE and VERSION. + type: string + conflictHandler: + description: Conflict Resolution strategy to perform in the + event of a conflict. Valid values are NONE, OPTIMISTIC_CONCURRENCY, + AUTOMERGE, and LAMBDA. + type: string + lambdaConflictHandlerConfig: + description: Lambda Conflict Handler Config when configuring + LAMBDA as the Conflict Handler. See Lambda Conflict Handler + Config. + properties: + lambdaConflictHandlerArn: + description: ARN for the Lambda function to use as the + Conflict Handler. + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: FunctionStatus defines the observed state of Function. + properties: + atProvider: + properties: + apiId: + description: ID of the associated AppSync API. + type: string + arn: + description: ARN of the Function object. + type: string + code: + description: The function code that contains the request and response + functions. When code is used, the runtime is required. The runtime + value must be APPSYNC_JS. + type: string + dataSource: + description: Function data source name. + type: string + description: + description: Function description. + type: string + functionId: + description: Unique ID representing the Function object. + type: string + functionVersion: + description: Version of the request mapping template. Currently + the supported value is 2018-05-29. Does not apply when specifying + code. + type: string + id: + description: API Function ID (Formatted as ApiId-FunctionId) + type: string + maxBatchSize: + description: Maximum batching size for a resolver. Valid values + are between 0 and 2000. + type: number + name: + description: Function name. The function name does not have to + be unique. + type: string + requestMappingTemplate: + description: Function request mapping template. Functions support + only the 2018-05-29 version of the request mapping template. + type: string + responseMappingTemplate: + description: Function response mapping template. + type: string + runtime: + description: Describes a runtime used by an AWS AppSync pipeline + resolver or AWS AppSync function. Specifies the name and version + of the runtime to use. Note that if a runtime is specified, + code must also be specified. See Runtime. + properties: + name: + description: Function name. The function name does not have + to be unique. + type: string + runtimeVersion: + description: The version of the runtime to use. Currently, + the only allowed version is 1.0.0. + type: string + type: object + syncConfig: + description: Describes a Sync configuration for a resolver. See + Sync Config. + properties: + conflictDetection: + description: Conflict Detection strategy to use. Valid values + are NONE and VERSION. + type: string + conflictHandler: + description: Conflict Resolution strategy to perform in the + event of a conflict. Valid values are NONE, OPTIMISTIC_CONCURRENCY, + AUTOMERGE, and LAMBDA. + type: string + lambdaConflictHandlerConfig: + description: Lambda Conflict Handler Config when configuring + LAMBDA as the Conflict Handler. See Lambda Conflict Handler + Config. + properties: + lambdaConflictHandlerArn: + description: ARN for the Lambda function to use as the + Conflict Handler. + type: string + type: object + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appsync.aws.upbound.io_graphqlapis.yaml b/package/crds/appsync.aws.upbound.io_graphqlapis.yaml index d201de8f34..f1d0ffe245 100644 --- a/package/crds/appsync.aws.upbound.io_graphqlapis.yaml +++ b/package/crds/appsync.aws.upbound.io_graphqlapis.yaml @@ -1344,3 +1344,1278 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: GraphQLAPI is the Schema for the GraphQLAPIs API. Provides an + AppSync GraphQL API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GraphQLAPISpec defines the desired state of GraphQLAPI + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalAuthenticationProvider: + description: One or more additional authentication providers for + the GraphqlApi. Defined below. + items: + properties: + authenticationType: + description: 'Authentication type. Valid values: API_KEY, + AWS_IAM, AMAZON_COGNITO_USER_POOLS, OPENID_CONNECT, AWS_LAMBDA' + type: string + lambdaAuthorizerConfig: + description: Nested argument containing Lambda authorizer + configuration. Defined below. + properties: + authorizerResultTtlInSeconds: + description: Number of seconds a response should be + cached for. The default is 5 minutes (300 seconds). + The Lambda function can override this by returning + a ttlOverride key in its response. A value of 0 disables + caching of responses. Minimum value of 0. Maximum + value of 3600. + type: number + authorizerUri: + description: 'ARN of the Lambda function to be called + for authorization. Note: This Lambda function must + have a resource-based policy assigned to it, to allow + lambda:InvokeFunction from service principal appsync.amazonaws.com.' + type: string + identityValidationExpression: + description: Regular expression for validation of tokens + before the Lambda function is called. + type: string + type: object + openidConnectConfig: + description: Nested argument containing OpenID Connect configuration. + Defined below. + properties: + authTtl: + description: Number of milliseconds a token is valid + after being authenticated. + type: number + clientId: + description: Client identifier of the Relying party + at the OpenID identity provider. This identifier is + typically obtained when the Relying party is registered + with the OpenID identity provider. You can specify + a regular expression so the AWS AppSync can validate + against multiple client identifiers at a time. + type: string + iatTtl: + description: Number of milliseconds a token is valid + after being issued to a user. + type: number + issuer: + description: Issuer for the OpenID Connect configuration. + The issuer returned by discovery MUST exactly match + the value of iss in the ID Token. + type: string + type: object + userPoolConfig: + description: Amazon Cognito User Pool configuration. Defined + below. + properties: + appIdClientRegex: + description: Regular expression for validating the incoming + Amazon Cognito User Pool app client ID. + type: string + awsRegion: + description: AWS region in which the user pool was created. + type: string + userPoolId: + description: User pool ID. + type: string + type: object + type: object + type: array + authenticationType: + description: 'Authentication type. Valid values: API_KEY, AWS_IAM, + AMAZON_COGNITO_USER_POOLS, OPENID_CONNECT, AWS_LAMBDA' + type: string + introspectionConfig: + description: Sets the value of the GraphQL API to enable (ENABLED) + or disable (DISABLED) introspection. If no value is provided, + the introspection configuration will be set to ENABLED by default. + This field will produce an error if the operation attempts to + use the introspection feature while this field is disabled. + For more information about introspection, see GraphQL introspection. + type: string + lambdaAuthorizerConfig: + description: Nested argument containing Lambda authorizer configuration. + Defined below. + properties: + authorizerResultTtlInSeconds: + description: Number of seconds a response should be cached + for. The default is 5 minutes (300 seconds). The Lambda + function can override this by returning a ttlOverride key + in its response. A value of 0 disables caching of responses. + Minimum value of 0. Maximum value of 3600. + type: number + authorizerUri: + description: 'ARN of the Lambda function to be called for + authorization. Note: This Lambda function must have a resource-based + policy assigned to it, to allow lambda:InvokeFunction from + service principal appsync.amazonaws.com.' + type: string + identityValidationExpression: + description: Regular expression for validation of tokens before + the Lambda function is called. + type: string + type: object + logConfig: + description: Nested argument containing logging configuration. + Defined below. + properties: + cloudwatchLogsRoleArn: + description: Amazon Resource Name of the service role that + AWS AppSync will assume to publish to Amazon CloudWatch + logs in your account. + type: string + cloudwatchLogsRoleArnRef: + description: Reference to a Role in iam to populate cloudwatchLogsRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cloudwatchLogsRoleArnSelector: + description: Selector for a Role in iam to populate cloudwatchLogsRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + excludeVerboseContent: + description: 'Set to TRUE to exclude sections that contain + information such as headers, context, and evaluated mapping + templates, regardless of logging level. Valid values: true, + false. Default value: false' + type: boolean + fieldLogLevel: + description: 'Field logging level. Valid values: ALL, ERROR, + NONE.' + type: string + type: object + name: + description: User-supplied name for the GraphqlApi. + type: string + openidConnectConfig: + description: Nested argument containing OpenID Connect configuration. + Defined below. + properties: + authTtl: + description: Number of milliseconds a token is valid after + being authenticated. + type: number + clientId: + description: Client identifier of the Relying party at the + OpenID identity provider. This identifier is typically obtained + when the Relying party is registered with the OpenID identity + provider. You can specify a regular expression so the AWS + AppSync can validate against multiple client identifiers + at a time. + type: string + iatTtl: + description: Number of milliseconds a token is valid after + being issued to a user. + type: number + issuer: + description: Issuer for the OpenID Connect configuration. + The issuer returned by discovery MUST exactly match the + value of iss in the ID Token. + type: string + type: object + queryDepthLimit: + description: The maximum depth a query can have in a single request. + Depth refers to the amount of nested levels allowed in the body + of query. The default value is 0 (or unspecified), which indicates + there's no depth limit. If you set a limit, it can be between + 1 and 75 nested levels. This field will produce a limit error + if the operation falls out of bounds. + type: number + region: + description: Region is the region you'd like your resource to + be created in. + type: string + resolverCountLimit: + description: The maximum number of resolvers that can be invoked + in a single request. The default value is 0 (or unspecified), + which will set the limit to 10000. When specified, the limit + value can be between 1 and 10000. This field will produce a + limit error if the operation falls out of bounds. + type: number + schema: + description: Schema definition, in GraphQL schema language format. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userPoolConfig: + description: Amazon Cognito User Pool configuration. Defined below. + properties: + appIdClientRegex: + description: Regular expression for validating the incoming + Amazon Cognito User Pool app client ID. + type: string + awsRegion: + description: AWS region in which the user pool was created. + type: string + defaultAction: + description: 'Action that you want your GraphQL API to take + when a request that uses Amazon Cognito User Pool authentication + doesn''t match the Amazon Cognito User Pool configuration. + Valid: ALLOW and DENY' + type: string + userPoolId: + description: User pool ID. + type: string + userPoolIdRef: + description: Reference to a UserPool in cognitoidp to populate + userPoolId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userPoolIdSelector: + description: Selector for a UserPool in cognitoidp to populate + userPoolId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + visibility: + description: Sets the value of the GraphQL API to public (GLOBAL) + or private (PRIVATE). If no value is provided, the visibility + will be set to GLOBAL by default. This value cannot be changed + once the API has been created. + type: string + xrayEnabled: + description: Whether tracing with X-ray is enabled. Defaults to + false. + type: boolean + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalAuthenticationProvider: + description: One or more additional authentication providers for + the GraphqlApi. Defined below. + items: + properties: + authenticationType: + description: 'Authentication type. Valid values: API_KEY, + AWS_IAM, AMAZON_COGNITO_USER_POOLS, OPENID_CONNECT, AWS_LAMBDA' + type: string + lambdaAuthorizerConfig: + description: Nested argument containing Lambda authorizer + configuration. Defined below. + properties: + authorizerResultTtlInSeconds: + description: Number of seconds a response should be + cached for. The default is 5 minutes (300 seconds). + The Lambda function can override this by returning + a ttlOverride key in its response. A value of 0 disables + caching of responses. Minimum value of 0. Maximum + value of 3600. + type: number + authorizerUri: + description: 'ARN of the Lambda function to be called + for authorization. Note: This Lambda function must + have a resource-based policy assigned to it, to allow + lambda:InvokeFunction from service principal appsync.amazonaws.com.' + type: string + identityValidationExpression: + description: Regular expression for validation of tokens + before the Lambda function is called. + type: string + type: object + openidConnectConfig: + description: Nested argument containing OpenID Connect configuration. + Defined below. + properties: + authTtl: + description: Number of milliseconds a token is valid + after being authenticated. + type: number + clientId: + description: Client identifier of the Relying party + at the OpenID identity provider. This identifier is + typically obtained when the Relying party is registered + with the OpenID identity provider. You can specify + a regular expression so the AWS AppSync can validate + against multiple client identifiers at a time. + type: string + iatTtl: + description: Number of milliseconds a token is valid + after being issued to a user. + type: number + issuer: + description: Issuer for the OpenID Connect configuration. + The issuer returned by discovery MUST exactly match + the value of iss in the ID Token. + type: string + type: object + userPoolConfig: + description: Amazon Cognito User Pool configuration. Defined + below. + properties: + appIdClientRegex: + description: Regular expression for validating the incoming + Amazon Cognito User Pool app client ID. + type: string + awsRegion: + description: AWS region in which the user pool was created. + type: string + userPoolId: + description: User pool ID. + type: string + type: object + type: object + type: array + authenticationType: + description: 'Authentication type. Valid values: API_KEY, AWS_IAM, + AMAZON_COGNITO_USER_POOLS, OPENID_CONNECT, AWS_LAMBDA' + type: string + introspectionConfig: + description: Sets the value of the GraphQL API to enable (ENABLED) + or disable (DISABLED) introspection. If no value is provided, + the introspection configuration will be set to ENABLED by default. + This field will produce an error if the operation attempts to + use the introspection feature while this field is disabled. + For more information about introspection, see GraphQL introspection. + type: string + lambdaAuthorizerConfig: + description: Nested argument containing Lambda authorizer configuration. + Defined below. + properties: + authorizerResultTtlInSeconds: + description: Number of seconds a response should be cached + for. The default is 5 minutes (300 seconds). The Lambda + function can override this by returning a ttlOverride key + in its response. A value of 0 disables caching of responses. + Minimum value of 0. Maximum value of 3600. + type: number + authorizerUri: + description: 'ARN of the Lambda function to be called for + authorization. Note: This Lambda function must have a resource-based + policy assigned to it, to allow lambda:InvokeFunction from + service principal appsync.amazonaws.com.' + type: string + identityValidationExpression: + description: Regular expression for validation of tokens before + the Lambda function is called. + type: string + type: object + logConfig: + description: Nested argument containing logging configuration. + Defined below. + properties: + cloudwatchLogsRoleArn: + description: Amazon Resource Name of the service role that + AWS AppSync will assume to publish to Amazon CloudWatch + logs in your account. + type: string + cloudwatchLogsRoleArnRef: + description: Reference to a Role in iam to populate cloudwatchLogsRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cloudwatchLogsRoleArnSelector: + description: Selector for a Role in iam to populate cloudwatchLogsRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + excludeVerboseContent: + description: 'Set to TRUE to exclude sections that contain + information such as headers, context, and evaluated mapping + templates, regardless of logging level. Valid values: true, + false. Default value: false' + type: boolean + fieldLogLevel: + description: 'Field logging level. Valid values: ALL, ERROR, + NONE.' + type: string + type: object + name: + description: User-supplied name for the GraphqlApi. + type: string + openidConnectConfig: + description: Nested argument containing OpenID Connect configuration. + Defined below. + properties: + authTtl: + description: Number of milliseconds a token is valid after + being authenticated. + type: number + clientId: + description: Client identifier of the Relying party at the + OpenID identity provider. This identifier is typically obtained + when the Relying party is registered with the OpenID identity + provider. You can specify a regular expression so the AWS + AppSync can validate against multiple client identifiers + at a time. + type: string + iatTtl: + description: Number of milliseconds a token is valid after + being issued to a user. + type: number + issuer: + description: Issuer for the OpenID Connect configuration. + The issuer returned by discovery MUST exactly match the + value of iss in the ID Token. + type: string + type: object + queryDepthLimit: + description: The maximum depth a query can have in a single request. + Depth refers to the amount of nested levels allowed in the body + of query. The default value is 0 (or unspecified), which indicates + there's no depth limit. If you set a limit, it can be between + 1 and 75 nested levels. This field will produce a limit error + if the operation falls out of bounds. + type: number + resolverCountLimit: + description: The maximum number of resolvers that can be invoked + in a single request. The default value is 0 (or unspecified), + which will set the limit to 10000. When specified, the limit + value can be between 1 and 10000. This field will produce a + limit error if the operation falls out of bounds. + type: number + schema: + description: Schema definition, in GraphQL schema language format. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userPoolConfig: + description: Amazon Cognito User Pool configuration. Defined below. + properties: + appIdClientRegex: + description: Regular expression for validating the incoming + Amazon Cognito User Pool app client ID. + type: string + awsRegion: + description: AWS region in which the user pool was created. + type: string + defaultAction: + description: 'Action that you want your GraphQL API to take + when a request that uses Amazon Cognito User Pool authentication + doesn''t match the Amazon Cognito User Pool configuration. + Valid: ALLOW and DENY' + type: string + userPoolId: + description: User pool ID. + type: string + userPoolIdRef: + description: Reference to a UserPool in cognitoidp to populate + userPoolId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userPoolIdSelector: + description: Selector for a UserPool in cognitoidp to populate + userPoolId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + visibility: + description: Sets the value of the GraphQL API to public (GLOBAL) + or private (PRIVATE). If no value is provided, the visibility + will be set to GLOBAL by default. This value cannot be changed + once the API has been created. + type: string + xrayEnabled: + description: Whether tracing with X-ray is enabled. Defaults to + false. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.authenticationType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.authenticationType) + || (has(self.initProvider) && has(self.initProvider.authenticationType))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: GraphQLAPIStatus defines the observed state of GraphQLAPI. + properties: + atProvider: + properties: + additionalAuthenticationProvider: + description: One or more additional authentication providers for + the GraphqlApi. Defined below. + items: + properties: + authenticationType: + description: 'Authentication type. Valid values: API_KEY, + AWS_IAM, AMAZON_COGNITO_USER_POOLS, OPENID_CONNECT, AWS_LAMBDA' + type: string + lambdaAuthorizerConfig: + description: Nested argument containing Lambda authorizer + configuration. Defined below. + properties: + authorizerResultTtlInSeconds: + description: Number of seconds a response should be + cached for. The default is 5 minutes (300 seconds). + The Lambda function can override this by returning + a ttlOverride key in its response. A value of 0 disables + caching of responses. Minimum value of 0. Maximum + value of 3600. + type: number + authorizerUri: + description: 'ARN of the Lambda function to be called + for authorization. Note: This Lambda function must + have a resource-based policy assigned to it, to allow + lambda:InvokeFunction from service principal appsync.amazonaws.com.' + type: string + identityValidationExpression: + description: Regular expression for validation of tokens + before the Lambda function is called. + type: string + type: object + openidConnectConfig: + description: Nested argument containing OpenID Connect configuration. + Defined below. + properties: + authTtl: + description: Number of milliseconds a token is valid + after being authenticated. + type: number + clientId: + description: Client identifier of the Relying party + at the OpenID identity provider. This identifier is + typically obtained when the Relying party is registered + with the OpenID identity provider. You can specify + a regular expression so the AWS AppSync can validate + against multiple client identifiers at a time. + type: string + iatTtl: + description: Number of milliseconds a token is valid + after being issued to a user. + type: number + issuer: + description: Issuer for the OpenID Connect configuration. + The issuer returned by discovery MUST exactly match + the value of iss in the ID Token. + type: string + type: object + userPoolConfig: + description: Amazon Cognito User Pool configuration. Defined + below. + properties: + appIdClientRegex: + description: Regular expression for validating the incoming + Amazon Cognito User Pool app client ID. + type: string + awsRegion: + description: AWS region in which the user pool was created. + type: string + userPoolId: + description: User pool ID. + type: string + type: object + type: object + type: array + arn: + description: ARN + type: string + authenticationType: + description: 'Authentication type. Valid values: API_KEY, AWS_IAM, + AMAZON_COGNITO_USER_POOLS, OPENID_CONNECT, AWS_LAMBDA' + type: string + id: + description: API ID + type: string + introspectionConfig: + description: Sets the value of the GraphQL API to enable (ENABLED) + or disable (DISABLED) introspection. If no value is provided, + the introspection configuration will be set to ENABLED by default. + This field will produce an error if the operation attempts to + use the introspection feature while this field is disabled. + For more information about introspection, see GraphQL introspection. + type: string + lambdaAuthorizerConfig: + description: Nested argument containing Lambda authorizer configuration. + Defined below. + properties: + authorizerResultTtlInSeconds: + description: Number of seconds a response should be cached + for. The default is 5 minutes (300 seconds). The Lambda + function can override this by returning a ttlOverride key + in its response. A value of 0 disables caching of responses. + Minimum value of 0. Maximum value of 3600. + type: number + authorizerUri: + description: 'ARN of the Lambda function to be called for + authorization. Note: This Lambda function must have a resource-based + policy assigned to it, to allow lambda:InvokeFunction from + service principal appsync.amazonaws.com.' + type: string + identityValidationExpression: + description: Regular expression for validation of tokens before + the Lambda function is called. + type: string + type: object + logConfig: + description: Nested argument containing logging configuration. + Defined below. + properties: + cloudwatchLogsRoleArn: + description: Amazon Resource Name of the service role that + AWS AppSync will assume to publish to Amazon CloudWatch + logs in your account. + type: string + excludeVerboseContent: + description: 'Set to TRUE to exclude sections that contain + information such as headers, context, and evaluated mapping + templates, regardless of logging level. Valid values: true, + false. Default value: false' + type: boolean + fieldLogLevel: + description: 'Field logging level. Valid values: ALL, ERROR, + NONE.' + type: string + type: object + name: + description: User-supplied name for the GraphqlApi. + type: string + openidConnectConfig: + description: Nested argument containing OpenID Connect configuration. + Defined below. + properties: + authTtl: + description: Number of milliseconds a token is valid after + being authenticated. + type: number + clientId: + description: Client identifier of the Relying party at the + OpenID identity provider. This identifier is typically obtained + when the Relying party is registered with the OpenID identity + provider. You can specify a regular expression so the AWS + AppSync can validate against multiple client identifiers + at a time. + type: string + iatTtl: + description: Number of milliseconds a token is valid after + being issued to a user. + type: number + issuer: + description: Issuer for the OpenID Connect configuration. + The issuer returned by discovery MUST exactly match the + value of iss in the ID Token. + type: string + type: object + queryDepthLimit: + description: The maximum depth a query can have in a single request. + Depth refers to the amount of nested levels allowed in the body + of query. The default value is 0 (or unspecified), which indicates + there's no depth limit. If you set a limit, it can be between + 1 and 75 nested levels. This field will produce a limit error + if the operation falls out of bounds. + type: number + resolverCountLimit: + description: The maximum number of resolvers that can be invoked + in a single request. The default value is 0 (or unspecified), + which will set the limit to 10000. When specified, the limit + value can be between 1 and 10000. This field will produce a + limit error if the operation falls out of bounds. + type: number + schema: + description: Schema definition, in GraphQL schema language format. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + uris: + additionalProperties: + type: string + description: Map of URIs associated with the APIE.g., uris["GRAPHQL"] + = https://ID.appsync-api.REGION.amazonaws.com/graphql + type: object + x-kubernetes-map-type: granular + userPoolConfig: + description: Amazon Cognito User Pool configuration. Defined below. + properties: + appIdClientRegex: + description: Regular expression for validating the incoming + Amazon Cognito User Pool app client ID. + type: string + awsRegion: + description: AWS region in which the user pool was created. + type: string + defaultAction: + description: 'Action that you want your GraphQL API to take + when a request that uses Amazon Cognito User Pool authentication + doesn''t match the Amazon Cognito User Pool configuration. + Valid: ALLOW and DENY' + type: string + userPoolId: + description: User pool ID. + type: string + type: object + visibility: + description: Sets the value of the GraphQL API to public (GLOBAL) + or private (PRIVATE). If no value is provided, the visibility + will be set to GLOBAL by default. This value cannot be changed + once the API has been created. + type: string + xrayEnabled: + description: Whether tracing with X-ray is enabled. Defaults to + false. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appsync.aws.upbound.io_resolvers.yaml b/package/crds/appsync.aws.upbound.io_resolvers.yaml index 1fb9846c38..26838cf938 100644 --- a/package/crds/appsync.aws.upbound.io_resolvers.yaml +++ b/package/crds/appsync.aws.upbound.io_resolvers.yaml @@ -888,3 +888,840 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Resolver is the Schema for the Resolvers API. Provides an AppSync + Resolver. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ResolverSpec defines the desired state of Resolver + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apiId: + description: API ID for the GraphQL API. + type: string + apiIdRef: + description: Reference to a GraphQLAPI in appsync to populate + apiId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiIdSelector: + description: Selector for a GraphQLAPI in appsync to populate + apiId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + cachingConfig: + description: The Caching Config. See Caching Config. + properties: + cachingKeys: + description: The caching keys for a resolver that has caching + activated. Valid values are entries from the $context.arguments, + $context.source, and $context.identity maps. + items: + type: string + type: array + x-kubernetes-list-type: set + ttl: + description: The TTL in seconds for a resolver that has caching + activated. Valid values are between 1 and 3600 seconds. + type: number + type: object + code: + description: The function code that contains the request and response + functions. When code is used, the runtime is required. The runtime + value must be APPSYNC_JS. + type: string + dataSource: + description: Data source name. + type: string + dataSourceRef: + description: Reference to a Datasource in appsync to populate + dataSource. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataSourceSelector: + description: Selector for a Datasource in appsync to populate + dataSource. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + field: + description: Field name from the schema defined in the GraphQL + API. + type: string + kind: + description: Resolver type. Valid values are UNIT and PIPELINE. + type: string + maxBatchSize: + description: Maximum batching size for a resolver. Valid values + are between 0 and 2000. + type: number + pipelineConfig: + description: The caching configuration for the resolver. See Pipeline + Config. + properties: + functions: + description: A list of Function objects. + items: + type: string + type: array + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + requestTemplate: + description: Request mapping template for UNIT resolver or 'before + mapping template' for PIPELINE resolver. Required for non-Lambda + resolvers. + type: string + responseTemplate: + description: Response mapping template for UNIT resolver or 'after + mapping template' for PIPELINE resolver. Required for non-Lambda + resolvers. + type: string + runtime: + description: Describes a runtime used by an AWS AppSync pipeline + resolver or AWS AppSync function. Specifies the name and version + of the runtime to use. Note that if a runtime is specified, + code must also be specified. See Runtime. + properties: + name: + description: The name of the runtime to use. Currently, the + only allowed value is APPSYNC_JS. + type: string + runtimeVersion: + description: The version of the runtime to use. Currently, + the only allowed version is 1.0.0. + type: string + type: object + syncConfig: + description: Describes a Sync configuration for a resolver. See + Sync Config. + properties: + conflictDetection: + description: Conflict Detection strategy to use. Valid values + are NONE and VERSION. + type: string + conflictHandler: + description: Conflict Resolution strategy to perform in the + event of a conflict. Valid values are NONE, OPTIMISTIC_CONCURRENCY, + AUTOMERGE, and LAMBDA. + type: string + lambdaConflictHandlerConfig: + description: Lambda Conflict Handler Config when configuring + LAMBDA as the Conflict Handler. See Lambda Conflict Handler + Config. + properties: + lambdaConflictHandlerArn: + description: ARN for the Lambda function to use as the + Conflict Handler. + type: string + type: object + type: object + type: + description: Type name from the schema defined in the GraphQL + API. + type: string + required: + - field + - region + - type + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + cachingConfig: + description: The Caching Config. See Caching Config. + properties: + cachingKeys: + description: The caching keys for a resolver that has caching + activated. Valid values are entries from the $context.arguments, + $context.source, and $context.identity maps. + items: + type: string + type: array + x-kubernetes-list-type: set + ttl: + description: The TTL in seconds for a resolver that has caching + activated. Valid values are between 1 and 3600 seconds. + type: number + type: object + code: + description: The function code that contains the request and response + functions. When code is used, the runtime is required. The runtime + value must be APPSYNC_JS. + type: string + dataSource: + description: Data source name. + type: string + dataSourceRef: + description: Reference to a Datasource in appsync to populate + dataSource. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataSourceSelector: + description: Selector for a Datasource in appsync to populate + dataSource. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + kind: + description: Resolver type. Valid values are UNIT and PIPELINE. + type: string + maxBatchSize: + description: Maximum batching size for a resolver. Valid values + are between 0 and 2000. + type: number + pipelineConfig: + description: The caching configuration for the resolver. See Pipeline + Config. + properties: + functions: + description: A list of Function objects. + items: + type: string + type: array + type: object + requestTemplate: + description: Request mapping template for UNIT resolver or 'before + mapping template' for PIPELINE resolver. Required for non-Lambda + resolvers. + type: string + responseTemplate: + description: Response mapping template for UNIT resolver or 'after + mapping template' for PIPELINE resolver. Required for non-Lambda + resolvers. + type: string + runtime: + description: Describes a runtime used by an AWS AppSync pipeline + resolver or AWS AppSync function. Specifies the name and version + of the runtime to use. Note that if a runtime is specified, + code must also be specified. See Runtime. + properties: + name: + description: The name of the runtime to use. Currently, the + only allowed value is APPSYNC_JS. + type: string + runtimeVersion: + description: The version of the runtime to use. Currently, + the only allowed version is 1.0.0. + type: string + type: object + syncConfig: + description: Describes a Sync configuration for a resolver. See + Sync Config. + properties: + conflictDetection: + description: Conflict Detection strategy to use. Valid values + are NONE and VERSION. + type: string + conflictHandler: + description: Conflict Resolution strategy to perform in the + event of a conflict. Valid values are NONE, OPTIMISTIC_CONCURRENCY, + AUTOMERGE, and LAMBDA. + type: string + lambdaConflictHandlerConfig: + description: Lambda Conflict Handler Config when configuring + LAMBDA as the Conflict Handler. See Lambda Conflict Handler + Config. + properties: + lambdaConflictHandlerArn: + description: ARN for the Lambda function to use as the + Conflict Handler. + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ResolverStatus defines the observed state of Resolver. + properties: + atProvider: + properties: + apiId: + description: API ID for the GraphQL API. + type: string + arn: + description: ARN + type: string + cachingConfig: + description: The Caching Config. See Caching Config. + properties: + cachingKeys: + description: The caching keys for a resolver that has caching + activated. Valid values are entries from the $context.arguments, + $context.source, and $context.identity maps. + items: + type: string + type: array + x-kubernetes-list-type: set + ttl: + description: The TTL in seconds for a resolver that has caching + activated. Valid values are between 1 and 3600 seconds. + type: number + type: object + code: + description: The function code that contains the request and response + functions. When code is used, the runtime is required. The runtime + value must be APPSYNC_JS. + type: string + dataSource: + description: Data source name. + type: string + field: + description: Field name from the schema defined in the GraphQL + API. + type: string + id: + type: string + kind: + description: Resolver type. Valid values are UNIT and PIPELINE. + type: string + maxBatchSize: + description: Maximum batching size for a resolver. Valid values + are between 0 and 2000. + type: number + pipelineConfig: + description: The caching configuration for the resolver. See Pipeline + Config. + properties: + functions: + description: A list of Function objects. + items: + type: string + type: array + type: object + requestTemplate: + description: Request mapping template for UNIT resolver or 'before + mapping template' for PIPELINE resolver. Required for non-Lambda + resolvers. + type: string + responseTemplate: + description: Response mapping template for UNIT resolver or 'after + mapping template' for PIPELINE resolver. Required for non-Lambda + resolvers. + type: string + runtime: + description: Describes a runtime used by an AWS AppSync pipeline + resolver or AWS AppSync function. Specifies the name and version + of the runtime to use. Note that if a runtime is specified, + code must also be specified. See Runtime. + properties: + name: + description: The name of the runtime to use. Currently, the + only allowed value is APPSYNC_JS. + type: string + runtimeVersion: + description: The version of the runtime to use. Currently, + the only allowed version is 1.0.0. + type: string + type: object + syncConfig: + description: Describes a Sync configuration for a resolver. See + Sync Config. + properties: + conflictDetection: + description: Conflict Detection strategy to use. Valid values + are NONE and VERSION. + type: string + conflictHandler: + description: Conflict Resolution strategy to perform in the + event of a conflict. Valid values are NONE, OPTIMISTIC_CONCURRENCY, + AUTOMERGE, and LAMBDA. + type: string + lambdaConflictHandlerConfig: + description: Lambda Conflict Handler Config when configuring + LAMBDA as the Conflict Handler. See Lambda Conflict Handler + Config. + properties: + lambdaConflictHandlerArn: + description: ARN for the Lambda function to use as the + Conflict Handler. + type: string + type: object + type: object + type: + description: Type name from the schema defined in the GraphQL + API. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/athena.aws.upbound.io_databases.yaml b/package/crds/athena.aws.upbound.io_databases.yaml index 37b999a78b..c06d51ce87 100644 --- a/package/crds/athena.aws.upbound.io_databases.yaml +++ b/package/crds/athena.aws.upbound.io_databases.yaml @@ -626,3 +626,599 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Database is the Schema for the Databases API. Provides an Athena + database. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DatabaseSpec defines the desired state of Database + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + aclConfiguration: + description: That an Amazon S3 canned ACL should be set to control + ownership of stored query results. See ACL Configuration below. + properties: + s3AclOption: + description: Amazon S3 canned ACL that Athena should specify + when storing query results. Valid value is BUCKET_OWNER_FULL_CONTROL. + type: string + type: object + bucket: + description: Name of S3 bucket to save the results of the query + execution. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + comment: + description: Description of the database. + type: string + encryptionConfiguration: + description: Encryption key block AWS Athena uses to decrypt the + data in S3, such as an AWS Key Management Service (AWS KMS) + key. See Encryption Configuration below. + properties: + encryptionOption: + description: Type of key; one of SSE_S3, SSE_KMS, CSE_KMS + type: string + kmsKey: + description: KMS key ARN or ID; required for key types SSE_KMS + and CSE_KMS. + type: string + type: object + expectedBucketOwner: + description: AWS account ID that you expect to be the owner of + the Amazon S3 bucket. + type: string + forceDestroy: + description: Boolean that indicates all tables should be deleted + from the database so that the database can be destroyed without + error. The tables are not recoverable. + type: boolean + properties: + additionalProperties: + type: string + description: Key-value map of custom metadata properties for the + database definition. + type: object + x-kubernetes-map-type: granular + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + aclConfiguration: + description: That an Amazon S3 canned ACL should be set to control + ownership of stored query results. See ACL Configuration below. + properties: + s3AclOption: + description: Amazon S3 canned ACL that Athena should specify + when storing query results. Valid value is BUCKET_OWNER_FULL_CONTROL. + type: string + type: object + bucket: + description: Name of S3 bucket to save the results of the query + execution. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + comment: + description: Description of the database. + type: string + encryptionConfiguration: + description: Encryption key block AWS Athena uses to decrypt the + data in S3, such as an AWS Key Management Service (AWS KMS) + key. See Encryption Configuration below. + properties: + encryptionOption: + description: Type of key; one of SSE_S3, SSE_KMS, CSE_KMS + type: string + kmsKey: + description: KMS key ARN or ID; required for key types SSE_KMS + and CSE_KMS. + type: string + type: object + expectedBucketOwner: + description: AWS account ID that you expect to be the owner of + the Amazon S3 bucket. + type: string + forceDestroy: + description: Boolean that indicates all tables should be deleted + from the database so that the database can be destroyed without + error. The tables are not recoverable. + type: boolean + properties: + additionalProperties: + type: string + description: Key-value map of custom metadata properties for the + database definition. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DatabaseStatus defines the observed state of Database. + properties: + atProvider: + properties: + aclConfiguration: + description: That an Amazon S3 canned ACL should be set to control + ownership of stored query results. See ACL Configuration below. + properties: + s3AclOption: + description: Amazon S3 canned ACL that Athena should specify + when storing query results. Valid value is BUCKET_OWNER_FULL_CONTROL. + type: string + type: object + bucket: + description: Name of S3 bucket to save the results of the query + execution. + type: string + comment: + description: Description of the database. + type: string + encryptionConfiguration: + description: Encryption key block AWS Athena uses to decrypt the + data in S3, such as an AWS Key Management Service (AWS KMS) + key. See Encryption Configuration below. + properties: + encryptionOption: + description: Type of key; one of SSE_S3, SSE_KMS, CSE_KMS + type: string + kmsKey: + description: KMS key ARN or ID; required for key types SSE_KMS + and CSE_KMS. + type: string + type: object + expectedBucketOwner: + description: AWS account ID that you expect to be the owner of + the Amazon S3 bucket. + type: string + forceDestroy: + description: Boolean that indicates all tables should be deleted + from the database so that the database can be destroyed without + error. The tables are not recoverable. + type: boolean + id: + description: Database name + type: string + properties: + additionalProperties: + type: string + description: Key-value map of custom metadata properties for the + database definition. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/athena.aws.upbound.io_workgroups.yaml b/package/crds/athena.aws.upbound.io_workgroups.yaml index ae573578dc..7ba01028d7 100644 --- a/package/crds/athena.aws.upbound.io_workgroups.yaml +++ b/package/crds/athena.aws.upbound.io_workgroups.yaml @@ -852,3 +852,798 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Workgroup is the Schema for the Workgroups API. Manages an Athena + Workgroup. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WorkgroupSpec defines the desired state of Workgroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + configuration: + description: Configuration block with various settings for the + workgroup. Documented below. + properties: + bytesScannedCutoffPerQuery: + description: Integer for the upper data usage limit (cutoff) + for the amount of bytes a single query in a workgroup is + allowed to scan. Must be at least 10485760. + type: number + enforceWorkgroupConfiguration: + description: Boolean whether the settings for the workgroup + override client-side settings. For more information, see + Workgroup Settings Override Client-Side Settings. Defaults + to true. + type: boolean + engineVersion: + description: Configuration block for the Athena Engine Versioning. + For more information, see Athena Engine Versioning. See + Engine Version below. + properties: + selectedEngineVersion: + description: Requested engine version. Defaults to AUTO. + type: string + type: object + executionRole: + description: Role used in a notebook session for accessing + the user's resources. + type: string + publishCloudwatchMetricsEnabled: + description: Boolean whether Amazon CloudWatch metrics are + enabled for the workgroup. Defaults to true. + type: boolean + requesterPaysEnabled: + description: If set to true , allows members assigned to a + workgroup to reference Amazon S3 Requester Pays buckets + in queries. If set to false , workgroup members cannot query + data from Requester Pays buckets, and queries that retrieve + data from Requester Pays buckets cause an error. The default + is false . For more information about Requester Pays buckets, + see Requester Pays Buckets in the Amazon Simple Storage + Service Developer Guide. + type: boolean + resultConfiguration: + description: Configuration block with result settings. See + Result Configuration below. + properties: + aclConfiguration: + description: That an Amazon S3 canned ACL should be set + to control ownership of stored query results. See ACL + Configuration below. + properties: + s3AclOption: + description: Amazon S3 canned ACL that Athena should + specify when storing query results. Valid value + is BUCKET_OWNER_FULL_CONTROL. + type: string + type: object + encryptionConfiguration: + description: Configuration block with encryption settings. + See Encryption Configuration below. + properties: + encryptionOption: + description: Whether Amazon S3 server-side encryption + with Amazon S3-managed keys (SSE_S3), server-side + encryption with KMS-managed keys (SSE_KMS), or client-side + encryption with KMS-managed keys (CSE_KMS) is used. + If a query runs in a workgroup and the workgroup + overrides client-side settings, then the workgroup's + setting for encryption is used. It specifies whether + query results must be encrypted, for all queries + that run in this workgroup. + type: string + kmsKeyArn: + description: For SSE_KMS and CSE_KMS, this is the + KMS key ARN. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate + kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate + kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + expectedBucketOwner: + description: AWS account ID that you expect to be the + owner of the Amazon S3 bucket. + type: string + outputLocation: + description: Location in Amazon S3 where your query results + are stored, such as s3://path/to/query/bucket/. For + more information, see Queries and Query Result Files. + type: string + type: object + type: object + description: + description: Description of the workgroup. + type: string + forceDestroy: + description: Option to delete the workgroup and its contents even + if the workgroup contains any named queries. + type: boolean + region: + description: Region is the region you'd like your resource to + be created in. + type: string + state: + description: State of the workgroup. Valid values are DISABLED + or ENABLED. Defaults to ENABLED. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + configuration: + description: Configuration block with various settings for the + workgroup. Documented below. + properties: + bytesScannedCutoffPerQuery: + description: Integer for the upper data usage limit (cutoff) + for the amount of bytes a single query in a workgroup is + allowed to scan. Must be at least 10485760. + type: number + enforceWorkgroupConfiguration: + description: Boolean whether the settings for the workgroup + override client-side settings. For more information, see + Workgroup Settings Override Client-Side Settings. Defaults + to true. + type: boolean + engineVersion: + description: Configuration block for the Athena Engine Versioning. + For more information, see Athena Engine Versioning. See + Engine Version below. + properties: + selectedEngineVersion: + description: Requested engine version. Defaults to AUTO. + type: string + type: object + executionRole: + description: Role used in a notebook session for accessing + the user's resources. + type: string + publishCloudwatchMetricsEnabled: + description: Boolean whether Amazon CloudWatch metrics are + enabled for the workgroup. Defaults to true. + type: boolean + requesterPaysEnabled: + description: If set to true , allows members assigned to a + workgroup to reference Amazon S3 Requester Pays buckets + in queries. If set to false , workgroup members cannot query + data from Requester Pays buckets, and queries that retrieve + data from Requester Pays buckets cause an error. The default + is false . For more information about Requester Pays buckets, + see Requester Pays Buckets in the Amazon Simple Storage + Service Developer Guide. + type: boolean + resultConfiguration: + description: Configuration block with result settings. See + Result Configuration below. + properties: + aclConfiguration: + description: That an Amazon S3 canned ACL should be set + to control ownership of stored query results. See ACL + Configuration below. + properties: + s3AclOption: + description: Amazon S3 canned ACL that Athena should + specify when storing query results. Valid value + is BUCKET_OWNER_FULL_CONTROL. + type: string + type: object + encryptionConfiguration: + description: Configuration block with encryption settings. + See Encryption Configuration below. + properties: + encryptionOption: + description: Whether Amazon S3 server-side encryption + with Amazon S3-managed keys (SSE_S3), server-side + encryption with KMS-managed keys (SSE_KMS), or client-side + encryption with KMS-managed keys (CSE_KMS) is used. + If a query runs in a workgroup and the workgroup + overrides client-side settings, then the workgroup's + setting for encryption is used. It specifies whether + query results must be encrypted, for all queries + that run in this workgroup. + type: string + kmsKeyArn: + description: For SSE_KMS and CSE_KMS, this is the + KMS key ARN. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate + kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate + kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + expectedBucketOwner: + description: AWS account ID that you expect to be the + owner of the Amazon S3 bucket. + type: string + outputLocation: + description: Location in Amazon S3 where your query results + are stored, such as s3://path/to/query/bucket/. For + more information, see Queries and Query Result Files. + type: string + type: object + type: object + description: + description: Description of the workgroup. + type: string + forceDestroy: + description: Option to delete the workgroup and its contents even + if the workgroup contains any named queries. + type: boolean + state: + description: State of the workgroup. Valid values are DISABLED + or ENABLED. Defaults to ENABLED. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: WorkgroupStatus defines the observed state of Workgroup. + properties: + atProvider: + properties: + arn: + description: ARN of the workgroup + type: string + configuration: + description: Configuration block with various settings for the + workgroup. Documented below. + properties: + bytesScannedCutoffPerQuery: + description: Integer for the upper data usage limit (cutoff) + for the amount of bytes a single query in a workgroup is + allowed to scan. Must be at least 10485760. + type: number + enforceWorkgroupConfiguration: + description: Boolean whether the settings for the workgroup + override client-side settings. For more information, see + Workgroup Settings Override Client-Side Settings. Defaults + to true. + type: boolean + engineVersion: + description: Configuration block for the Athena Engine Versioning. + For more information, see Athena Engine Versioning. See + Engine Version below. + properties: + effectiveEngineVersion: + description: The engine version on which the query runs. + If selected_engine_version is set to AUTO, the effective + engine version is chosen by Athena. + type: string + selectedEngineVersion: + description: Requested engine version. Defaults to AUTO. + type: string + type: object + executionRole: + description: Role used in a notebook session for accessing + the user's resources. + type: string + publishCloudwatchMetricsEnabled: + description: Boolean whether Amazon CloudWatch metrics are + enabled for the workgroup. Defaults to true. + type: boolean + requesterPaysEnabled: + description: If set to true , allows members assigned to a + workgroup to reference Amazon S3 Requester Pays buckets + in queries. If set to false , workgroup members cannot query + data from Requester Pays buckets, and queries that retrieve + data from Requester Pays buckets cause an error. The default + is false . For more information about Requester Pays buckets, + see Requester Pays Buckets in the Amazon Simple Storage + Service Developer Guide. + type: boolean + resultConfiguration: + description: Configuration block with result settings. See + Result Configuration below. + properties: + aclConfiguration: + description: That an Amazon S3 canned ACL should be set + to control ownership of stored query results. See ACL + Configuration below. + properties: + s3AclOption: + description: Amazon S3 canned ACL that Athena should + specify when storing query results. Valid value + is BUCKET_OWNER_FULL_CONTROL. + type: string + type: object + encryptionConfiguration: + description: Configuration block with encryption settings. + See Encryption Configuration below. + properties: + encryptionOption: + description: Whether Amazon S3 server-side encryption + with Amazon S3-managed keys (SSE_S3), server-side + encryption with KMS-managed keys (SSE_KMS), or client-side + encryption with KMS-managed keys (CSE_KMS) is used. + If a query runs in a workgroup and the workgroup + overrides client-side settings, then the workgroup's + setting for encryption is used. It specifies whether + query results must be encrypted, for all queries + that run in this workgroup. + type: string + kmsKeyArn: + description: For SSE_KMS and CSE_KMS, this is the + KMS key ARN. + type: string + type: object + expectedBucketOwner: + description: AWS account ID that you expect to be the + owner of the Amazon S3 bucket. + type: string + outputLocation: + description: Location in Amazon S3 where your query results + are stored, such as s3://path/to/query/bucket/. For + more information, see Queries and Query Result Files. + type: string + type: object + type: object + description: + description: Description of the workgroup. + type: string + forceDestroy: + description: Option to delete the workgroup and its contents even + if the workgroup contains any named queries. + type: boolean + id: + description: Workgroup name + type: string + state: + description: State of the workgroup. Valid values are DISABLED + or ENABLED. Defaults to ENABLED. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/autoscaling.aws.upbound.io_autoscalinggroups.yaml b/package/crds/autoscaling.aws.upbound.io_autoscalinggroups.yaml index 6dbbf3c19b..af53297259 100644 --- a/package/crds/autoscaling.aws.upbound.io_autoscalinggroups.yaml +++ b/package/crds/autoscaling.aws.upbound.io_autoscalinggroups.yaml @@ -7361,3 +7361,3601 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta3 + schema: + openAPIV3Schema: + description: AutoscalingGroup is the Schema for the AutoscalingGroups API. + Provides an Auto Scaling Group resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AutoscalingGroupSpec defines the desired state of AutoscalingGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + availabilityZones: + description: A list of Availability Zones where instances in the + Auto Scaling group can be created. Used for launching into the + default VPC subnet in each Availability Zone when not using + the vpc_zone_identifier attribute, or for attaching a network + interface when an existing network interface ID is specified + in a launch template. Conflicts with vpc_zone_identifier. + items: + type: string + type: array + x-kubernetes-list-type: set + capacityRebalance: + description: Whether capacity rebalance is enabled. Otherwise, + capacity rebalance is disabled. + type: boolean + context: + description: Reserved. + type: string + defaultCooldown: + description: Amount of time, in seconds, after a scaling activity + completes before another scaling activity can start. + type: number + defaultInstanceWarmup: + description: Amount of time, in seconds, until a newly launched + instance can contribute to the Amazon CloudWatch metrics. This + delay lets an instance finish initializing before Amazon EC2 + Auto Scaling aggregates instance metrics, resulting in more + reliable usage data. Set this value equal to the amount of time + that it takes for resource consumption to become stable after + an instance reaches the InService state. (See Set the default + instance warmup for an Auto Scaling group) + type: number + desiredCapacity: + description: |- + Number of Amazon EC2 instances that + should be running in the group. (See also Waiting for + Capacity below.) + type: number + desiredCapacityType: + description: 'The unit of measurement for the value specified + for desired_capacity. Supported for attribute-based instance + type selection only. Valid values: "units", "vcpu", "memory-mib".' + type: string + enabledMetrics: + description: List of metrics to collect. The allowed values are + defined by the underlying AWS API. + items: + type: string + type: array + x-kubernetes-list-type: set + forceDelete: + description: |- + Allows deleting the Auto Scaling Group without waiting + for all instances in the pool to terminate. You can force an Auto Scaling Group to delete + even if it's in the process of scaling a resource. This bypasses that + behavior and potentially leaves resources dangling. + type: boolean + forceDeleteWarmPool: + description: Allows deleting the Auto Scaling Group without waiting + for all instances in the warm pool to terminate. + type: boolean + healthCheckGracePeriod: + description: Time (in seconds) after instance comes into service + before checking health. + type: number + healthCheckType: + description: '"EC2" or "ELB". Controls how health checking is + done.' + type: string + ignoreFailedScalingActivities: + description: Whether to ignore failed Auto Scaling scaling activities + while waiting for capacity. The default is false -- failed scaling + activities cause errors to be returned. + type: boolean + initialLifecycleHook: + description: |- + One or more + Lifecycle Hooks + to attach to the Auto Scaling Group before instances are launched. The + syntax is exactly the same as the separate + aws_autoscaling_lifecycle_hook + resource, without the autoscaling_group_name attribute. Please note that this will only work when creating + a new Auto Scaling Group. For all other use-cases, please use aws_autoscaling_lifecycle_hook resource. + items: + properties: + defaultResult: + type: string + heartbeatTimeout: + type: number + lifecycleTransition: + type: string + name: + description: Name of the Auto Scaling Group. Conflicts with + name_prefix. + type: string + notificationMetadata: + type: string + notificationTargetArn: + description: ARN for this Auto Scaling Group + type: string + roleArn: + description: ARN for this Auto Scaling Group + type: string + type: object + type: array + instanceMaintenancePolicy: + description: If this block is configured, add a instance maintenance + policy to the specified Auto Scaling group. Defined below. + properties: + maxHealthyPercentage: + description: Amount of capacity in the Auto Scaling group + that can be in service and healthy, or pending, to support + your workload when an instance refresh is in place, as a + percentage of the desired capacity of the Auto Scaling group. + Values must be between 100 and 200, defaults to 100. + type: number + minHealthyPercentage: + description: Amount of capacity in the Auto Scaling group + that must remain healthy during an instance refresh to allow + the operation to continue, as a percentage of the desired + capacity of the Auto Scaling group. Defaults to 90. + type: number + type: object + instanceRefresh: + description: |- + If this block is configured, start an + Instance Refresh + when this Auto Scaling Group is updated. Defined below. + properties: + preferences: + description: Override default parameters for Instance Refresh. + properties: + alarmSpecification: + description: Alarm Specification for Instance Refresh. + properties: + alarms: + description: List of Cloudwatch alarms. If any of + these alarms goes into ALARM state, Instance Refresh + is failed. + items: + type: string + type: array + type: object + autoRollback: + description: Automatically rollback if instance refresh + fails. Defaults to false. This option may only be set + to true when specifying a launch_template or mixed_instances_policy. + type: boolean + checkpointDelay: + description: Number of seconds to wait after a checkpoint. + Defaults to 3600. + type: string + checkpointPercentages: + description: List of percentages for each checkpoint. + Values must be unique and in ascending order. To replace + all instances, the final number must be 100. + items: + type: number + type: array + instanceWarmup: + description: Number of seconds until a newly launched + instance is configured and ready to use. Default behavior + is to use the Auto Scaling Group's health check grace + period. + type: string + maxHealthyPercentage: + description: Amount of capacity in the Auto Scaling group + that can be in service and healthy, or pending, to support + your workload when an instance refresh is in place, + as a percentage of the desired capacity of the Auto + Scaling group. Values must be between 100 and 200, defaults + to 100. + type: number + minHealthyPercentage: + description: Amount of capacity in the Auto Scaling group + that must remain healthy during an instance refresh + to allow the operation to continue, as a percentage + of the desired capacity of the Auto Scaling group. Defaults + to 90. + type: number + scaleInProtectedInstances: + description: Behavior when encountering instances protected + from scale in are found. Available behaviors are Refresh, + Ignore, and Wait. Default is Ignore. + type: string + skipMatching: + description: Replace instances that already have your + desired configuration. Defaults to false. + type: boolean + standbyInstances: + description: Behavior when encountering instances in the + Standby state in are found. Available behaviors are + Terminate, Ignore, and Wait. Default is Ignore. + type: string + type: object + strategy: + description: Strategy to use for instance refresh. The only + allowed value is Rolling. See StartInstanceRefresh Action + for more information. + type: string + triggers: + description: Set of additional property names that will trigger + an Instance Refresh. A refresh will always be triggered + by a change in any of launch_configuration, launch_template, + or mixed_instances_policy. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + launchConfiguration: + description: Name of the launch configuration to use. + type: string + launchConfigurationRef: + description: Reference to a LaunchConfiguration in autoscaling + to populate launchConfiguration. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + launchConfigurationSelector: + description: Selector for a LaunchConfiguration in autoscaling + to populate launchConfiguration. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + launchTemplate: + description: Nested argument with Launch template specification + to use to launch instances. See Launch Template below for more + details. + properties: + id: + description: ID of the launch template. Conflicts with name. + type: string + idRef: + description: Reference to a LaunchTemplate in ec2 to populate + id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a LaunchTemplate in ec2 to populate + id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Name of the launch template. Conflicts with id. + type: string + version: + description: 'Template version. Can be version number, $Latest, + or $Default. (Default: $Default).' + type: string + type: object + maxInstanceLifetime: + description: Maximum amount of time, in seconds, that an instance + can be in service, values must be either equal to 0 or between + 86400 and 31536000 seconds. + type: number + maxSize: + description: Maximum size of the Auto Scaling Group. + type: number + metricsGranularity: + description: Granularity to associate with the metrics to collect. + The only valid value is 1Minute. Default is 1Minute. + type: string + minElbCapacity: + description: |- + Updates will not wait on ELB instance number changes. + (See also Waiting for Capacity below.) + type: number + minSize: + description: |- + Minimum size of the Auto Scaling Group. + (See also Waiting for Capacity below.) + type: number + mixedInstancesPolicy: + description: Configuration block containing settings to define + launch targets for Auto Scaling groups. See Mixed Instances + Policy below for more details. + properties: + instancesDistribution: + description: Nested argument containing settings on how to + mix on-demand and Spot instances in the Auto Scaling group. + Defined below. + properties: + onDemandAllocationStrategy: + description: 'Strategy to use when launching on-demand + instances. Valid values: prioritized, lowest-price. + Default: prioritized.' + type: string + onDemandBaseCapacity: + description: 'Absolute minimum amount of desired capacity + that must be fulfilled by on-demand instances. Default: + 0.' + type: number + onDemandPercentageAboveBaseCapacity: + description: 'Percentage split between on-demand and Spot + instances above the base on-demand capacity. Default: + 100.' + type: number + spotAllocationStrategy: + description: 'How to allocate capacity across the Spot + pools. Valid values: lowest-price, capacity-optimized, + capacity-optimized-prioritized, and price-capacity-optimized. + Default: lowest-price.' + type: string + spotInstancePools: + description: 'Number of Spot pools per availability zone + to allocate capacity. EC2 Auto Scaling selects the cheapest + Spot pools and evenly allocates Spot capacity across + the number of Spot pools that you specify. Only available + with spot_allocation_strategy set to lowest-price. Otherwise + it must be set to 0, if it has been defined before. + Default: 2.' + type: number + spotMaxPrice: + description: 'Maximum price per unit hour that the user + is willing to pay for the Spot instances. Default: an + empty string which means the on-demand price.' + type: string + type: object + launchTemplate: + description: Nested argument containing launch template settings + along with the overrides to specify multiple instance types + and weights. Defined below. + properties: + launchTemplateSpecification: + description: Nested argument defines the Launch Template. + Defined below. + properties: + launchTemplateId: + description: ID of the launch template. Conflicts + with launch_template_name. + type: string + launchTemplateIdRef: + description: Reference to a LaunchTemplate in ec2 + to populate launchTemplateId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + launchTemplateIdSelector: + description: Selector for a LaunchTemplate in ec2 + to populate launchTemplateId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + launchTemplateName: + description: Name of the launch template. Conflicts + with launch_template_id. + type: string + version: + description: 'Template version. Can be version number, + $Latest, or $Default. (Default: $Default).' + type: string + type: object + override: + description: List of nested arguments provides the ability + to specify multiple instance types. This will override + the same parameter in the launch template. For on-demand + instances, Auto Scaling considers the order of preference + of instance types to launch based on the order specified + in the overrides list. Defined below. + items: + properties: + instanceRequirements: + description: Override the instance type in the Launch + Template with instance types that satisfy the + requirements. + properties: + acceleratorCount: + description: Block describing the minimum and + maximum number of accelerators (GPUs, FPGAs, + or AWS Inferentia chips). Default is no minimum + or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorManufacturers: + description: List of accelerator manufacturer + names. Default is any manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorNames: + description: List of accelerator names. Default + is any acclerator. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorTotalMemoryMib: + description: Block describing the minimum and + maximum total memory of the accelerators. + Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorTypes: + description: List of accelerator types. Default + is any accelerator type. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedInstanceTypes: + description: 'List of instance types to apply + your specified attributes against. All other + instance types are ignored, even if they match + your specified attributes. You can use strings + with one or more wild cards, represented by + an asterisk (*), to allow an instance type, + size, or generation. The following are examples: + m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, + if you specify c5*, you are allowing the entire + C5 instance family, which includes all C5a + and C5n instance types. If you specify m5a.*, + you are allowing all the M5a instance types, + but not the M5n instance types. Maximum of + 400 entries in the list; each entry is limited + to 30 characters. Default is all instance + types.' + items: + type: string + type: array + x-kubernetes-list-type: set + bareMetal: + description: Indicate whether bare metal instace + types should be included, excluded, or required. + Default is excluded. + type: string + baselineEbsBandwidthMbps: + description: Block describing the minimum and + maximum baseline EBS bandwidth, in Mbps. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + burstablePerformance: + description: Indicate whether burstable performance + instance types should be included, excluded, + or required. Default is excluded. + type: string + cpuManufacturers: + description: List of CPU manufacturer names. + Default is any manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + excludedInstanceTypes: + description: 'List of instance types to exclude. + You can use strings with one or more wild + cards, represented by an asterisk (*), to + exclude an instance type, size, or generation. + The following are examples: m5.8xlarge, c5*.*, + m5a.*, r*, *3*. For example, if you specify + c5*, you are excluding the entire C5 instance + family, which includes all C5a and C5n instance + types. If you specify m5a.*, you are excluding + all the M5a instance types, but not the M5n + instance types. Maximum of 400 entries in + the list; each entry is limited to 30 characters. + Default is no excluded instance types.' + items: + type: string + type: array + x-kubernetes-list-type: set + instanceGenerations: + description: List of instance generation names. + Default is any generation. + items: + type: string + type: array + x-kubernetes-list-type: set + localStorage: + description: Indicate whether instance types + with local storage volumes are included, excluded, + or required. Default is included. + type: string + localStorageTypes: + description: List of local storage type names. + Default any storage type. + items: + type: string + type: array + x-kubernetes-list-type: set + memoryGibPerVcpu: + description: Block describing the minimum and + maximum amount of memory (GiB) per vCPU. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + memoryMib: + description: Block describing the minimum and + maximum amount of memory (MiB). Default is + no maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkBandwidthGbps: + description: Block describing the minimum and + maximum amount of network bandwidth, in gigabits + per second (Gbps). Default is no minimum or + maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkInterfaceCount: + description: Block describing the minimum and + maximum number of network interfaces. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + onDemandMaxPricePercentageOverLowestPrice: + description: Price protection threshold for + On-Demand Instances. This is the maximum you’ll + pay for an On-Demand Instance, expressed as + a percentage higher than the cheapest M, C, + or R instance type with your specified attributes. + When Amazon EC2 Auto Scaling selects instance + types with your attributes, we will exclude + instance types whose price is higher than + your threshold. The parameter accepts an integer, + which Amazon EC2 Auto Scaling interprets as + a percentage. To turn off price protection, + specify a high value, such as 999999. Default + is 20. + type: number + requireHibernateSupport: + description: Indicate whether instance types + must support On-Demand Instance Hibernation, + either true or false. Default is false. + type: boolean + spotMaxPricePercentageOverLowestPrice: + description: Price protection threshold for + Spot Instances. This is the maximum you’ll + pay for a Spot Instance, expressed as a percentage + higher than the cheapest M, C, or R instance + type with your specified attributes. When + Amazon EC2 Auto Scaling selects instance types + with your attributes, we will exclude instance + types whose price is higher than your threshold. + The parameter accepts an integer, which Amazon + EC2 Auto Scaling interprets as a percentage. + To turn off price protection, specify a high + value, such as 999999. Default is 100. + type: number + totalLocalStorageGb: + description: Block describing the minimum and + maximum total local storage (GB). Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + vcpuCount: + description: Block describing the minimum and + maximum number of vCPUs. Default is no maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + type: object + instanceType: + description: Override the instance type in the Launch + Template. + type: string + launchTemplateSpecification: + description: Nested argument defines the Launch + Template. Defined below. + properties: + launchTemplateId: + description: ID of the launch template. Conflicts + with launch_template_name. + type: string + launchTemplateIdRef: + description: Reference to a LaunchTemplate in + ec2 to populate launchTemplateId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + launchTemplateIdSelector: + description: Selector for a LaunchTemplate in + ec2 to populate launchTemplateId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + launchTemplateName: + description: Name of the launch template. Conflicts + with launch_template_id. + type: string + version: + description: 'Template version. Can be version + number, $Latest, or $Default. (Default: $Default).' + type: string + type: object + weightedCapacity: + description: Number of capacity units, which gives + the instance type a proportional weight to other + instance types. + type: string + type: object + type: array + type: object + type: object + placementGroup: + description: Name of the placement group into which you'll launch + your instances, if any. + type: string + placementGroupRef: + description: Reference to a PlacementGroup in ec2 to populate + placementGroup. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + placementGroupSelector: + description: Selector for a PlacementGroup in ec2 to populate + placementGroup. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + protectFromScaleIn: + description: |- + Whether newly launched instances + are automatically protected from termination by Amazon EC2 Auto Scaling when + scaling in. For more information about preventing instances from terminating + on scale in, see Using instance scale-in protection + in the Amazon EC2 Auto Scaling User Guide. + type: boolean + region: + description: Region is the region you'd like your resource to + be created in. + type: string + serviceLinkedRoleArn: + description: ARN of the service-linked role that the ASG will + use to call other AWS services + type: string + serviceLinkedRoleArnRef: + description: Reference to a Role in iam to populate serviceLinkedRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceLinkedRoleArnSelector: + description: Selector for a Role in iam to populate serviceLinkedRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + suspendedProcesses: + description: |- + List of processes to suspend for the Auto Scaling Group. The allowed values are Launch, Terminate, HealthCheck, ReplaceUnhealthy, AZRebalance, AlarmNotification, ScheduledActions, AddToLoadBalancer, InstanceRefresh. + Note that if you suspend either the Launch or Terminate process types, it can prevent your Auto Scaling Group from functioning properly. + items: + type: string + type: array + x-kubernetes-list-type: set + tag: + description: Configuration block(s) containing resource tags. + See Tag below for more details. + items: + properties: + key: + description: Key + type: string + propagateAtLaunch: + description: |- + Enables propagation of the tag to + Amazon EC2 instances launched via this ASG + type: boolean + value: + description: Value + type: string + type: object + type: array + terminationPolicies: + description: List of policies to decide how the instances in the + Auto Scaling Group should be terminated. The allowed values + are OldestInstance, NewestInstance, OldestLaunchConfiguration, + ClosestToNextInstanceHour, OldestLaunchTemplate, AllocationStrategy, + Default. Additionally, the ARN of a Lambda function can be specified + for custom termination policies. + items: + type: string + type: array + trafficSource: + description: Attaches one or more traffic sources to the specified + Auto Scaling group. + items: + properties: + identifier: + description: Identifies the traffic source. For Application + Load Balancers, Gateway Load Balancers, Network Load Balancers, + and VPC Lattice, this will be the Amazon Resource Name + (ARN) for a target group in this account and Region. For + Classic Load Balancers, this will be the name of the Classic + Load Balancer in this account and Region. + type: string + type: + description: |- + Provides additional context for the value of Identifier. + The following lists the valid values: + elb if identifier is the name of a Classic Load Balancer. + elbv2 if identifier is the ARN of an Application Load Balancer, Gateway Load Balancer, or Network Load Balancer target group. + vpc-lattice if identifier is the ARN of a VPC Lattice target group. + type: string + type: object + type: array + vpcZoneIdentifier: + description: List of subnet IDs to launch resources in. Subnets + automatically determine which availability zones the group will + reside. Conflicts with availability_zones. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcZoneIdentifierRefs: + description: References to Subnet in ec2 to populate vpcZoneIdentifier. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + vpcZoneIdentifierSelector: + description: Selector for a list of Subnet in ec2 to populate + vpcZoneIdentifier. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + waitForCapacityTimeout: + description: |- + (See also Waiting + for Capacity below. + type: string + waitForElbCapacity: + description: |- + (Takes + precedence over min_elb_capacity behavior.) + (See also Waiting for Capacity below.) + type: number + warmPool: + description: |- + If this block is configured, add a Warm Pool + to the specified Auto Scaling group. Defined below + properties: + instanceReusePolicy: + description: Whether instances in the Auto Scaling group can + be returned to the warm pool on scale in. The default is + to terminate instances in the Auto Scaling group when the + group scales in. + properties: + reuseOnScaleIn: + description: Whether instances in the Auto Scaling group + can be returned to the warm pool on scale in. + type: boolean + type: object + maxGroupPreparedCapacity: + description: Total maximum number of instances that are allowed + to be in the warm pool or in any state except Terminated + for the Auto Scaling group. + type: number + minSize: + description: |- + Minimum size of the Auto Scaling Group. + (See also Waiting for Capacity below.) + type: number + poolState: + description: 'Sets the instance state to transition to after + the lifecycle hooks finish. Valid values are: Stopped (default), + Running or Hibernated.' + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + availabilityZones: + description: A list of Availability Zones where instances in the + Auto Scaling group can be created. Used for launching into the + default VPC subnet in each Availability Zone when not using + the vpc_zone_identifier attribute, or for attaching a network + interface when an existing network interface ID is specified + in a launch template. Conflicts with vpc_zone_identifier. + items: + type: string + type: array + x-kubernetes-list-type: set + capacityRebalance: + description: Whether capacity rebalance is enabled. Otherwise, + capacity rebalance is disabled. + type: boolean + context: + description: Reserved. + type: string + defaultCooldown: + description: Amount of time, in seconds, after a scaling activity + completes before another scaling activity can start. + type: number + defaultInstanceWarmup: + description: Amount of time, in seconds, until a newly launched + instance can contribute to the Amazon CloudWatch metrics. This + delay lets an instance finish initializing before Amazon EC2 + Auto Scaling aggregates instance metrics, resulting in more + reliable usage data. Set this value equal to the amount of time + that it takes for resource consumption to become stable after + an instance reaches the InService state. (See Set the default + instance warmup for an Auto Scaling group) + type: number + desiredCapacity: + description: |- + Number of Amazon EC2 instances that + should be running in the group. (See also Waiting for + Capacity below.) + type: number + desiredCapacityType: + description: 'The unit of measurement for the value specified + for desired_capacity. Supported for attribute-based instance + type selection only. Valid values: "units", "vcpu", "memory-mib".' + type: string + enabledMetrics: + description: List of metrics to collect. The allowed values are + defined by the underlying AWS API. + items: + type: string + type: array + x-kubernetes-list-type: set + forceDelete: + description: |- + Allows deleting the Auto Scaling Group without waiting + for all instances in the pool to terminate. You can force an Auto Scaling Group to delete + even if it's in the process of scaling a resource. This bypasses that + behavior and potentially leaves resources dangling. + type: boolean + forceDeleteWarmPool: + description: Allows deleting the Auto Scaling Group without waiting + for all instances in the warm pool to terminate. + type: boolean + healthCheckGracePeriod: + description: Time (in seconds) after instance comes into service + before checking health. + type: number + healthCheckType: + description: '"EC2" or "ELB". Controls how health checking is + done.' + type: string + ignoreFailedScalingActivities: + description: Whether to ignore failed Auto Scaling scaling activities + while waiting for capacity. The default is false -- failed scaling + activities cause errors to be returned. + type: boolean + initialLifecycleHook: + description: |- + One or more + Lifecycle Hooks + to attach to the Auto Scaling Group before instances are launched. The + syntax is exactly the same as the separate + aws_autoscaling_lifecycle_hook + resource, without the autoscaling_group_name attribute. Please note that this will only work when creating + a new Auto Scaling Group. For all other use-cases, please use aws_autoscaling_lifecycle_hook resource. + items: + properties: + defaultResult: + type: string + heartbeatTimeout: + type: number + lifecycleTransition: + type: string + name: + description: Name of the Auto Scaling Group. Conflicts with + name_prefix. + type: string + notificationMetadata: + type: string + notificationTargetArn: + description: ARN for this Auto Scaling Group + type: string + roleArn: + description: ARN for this Auto Scaling Group + type: string + type: object + type: array + instanceMaintenancePolicy: + description: If this block is configured, add a instance maintenance + policy to the specified Auto Scaling group. Defined below. + properties: + maxHealthyPercentage: + description: Amount of capacity in the Auto Scaling group + that can be in service and healthy, or pending, to support + your workload when an instance refresh is in place, as a + percentage of the desired capacity of the Auto Scaling group. + Values must be between 100 and 200, defaults to 100. + type: number + minHealthyPercentage: + description: Amount of capacity in the Auto Scaling group + that must remain healthy during an instance refresh to allow + the operation to continue, as a percentage of the desired + capacity of the Auto Scaling group. Defaults to 90. + type: number + type: object + instanceRefresh: + description: |- + If this block is configured, start an + Instance Refresh + when this Auto Scaling Group is updated. Defined below. + properties: + preferences: + description: Override default parameters for Instance Refresh. + properties: + alarmSpecification: + description: Alarm Specification for Instance Refresh. + properties: + alarms: + description: List of Cloudwatch alarms. If any of + these alarms goes into ALARM state, Instance Refresh + is failed. + items: + type: string + type: array + type: object + autoRollback: + description: Automatically rollback if instance refresh + fails. Defaults to false. This option may only be set + to true when specifying a launch_template or mixed_instances_policy. + type: boolean + checkpointDelay: + description: Number of seconds to wait after a checkpoint. + Defaults to 3600. + type: string + checkpointPercentages: + description: List of percentages for each checkpoint. + Values must be unique and in ascending order. To replace + all instances, the final number must be 100. + items: + type: number + type: array + instanceWarmup: + description: Number of seconds until a newly launched + instance is configured and ready to use. Default behavior + is to use the Auto Scaling Group's health check grace + period. + type: string + maxHealthyPercentage: + description: Amount of capacity in the Auto Scaling group + that can be in service and healthy, or pending, to support + your workload when an instance refresh is in place, + as a percentage of the desired capacity of the Auto + Scaling group. Values must be between 100 and 200, defaults + to 100. + type: number + minHealthyPercentage: + description: Amount of capacity in the Auto Scaling group + that must remain healthy during an instance refresh + to allow the operation to continue, as a percentage + of the desired capacity of the Auto Scaling group. Defaults + to 90. + type: number + scaleInProtectedInstances: + description: Behavior when encountering instances protected + from scale in are found. Available behaviors are Refresh, + Ignore, and Wait. Default is Ignore. + type: string + skipMatching: + description: Replace instances that already have your + desired configuration. Defaults to false. + type: boolean + standbyInstances: + description: Behavior when encountering instances in the + Standby state in are found. Available behaviors are + Terminate, Ignore, and Wait. Default is Ignore. + type: string + type: object + strategy: + description: Strategy to use for instance refresh. The only + allowed value is Rolling. See StartInstanceRefresh Action + for more information. + type: string + triggers: + description: Set of additional property names that will trigger + an Instance Refresh. A refresh will always be triggered + by a change in any of launch_configuration, launch_template, + or mixed_instances_policy. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + launchConfiguration: + description: Name of the launch configuration to use. + type: string + launchConfigurationRef: + description: Reference to a LaunchConfiguration in autoscaling + to populate launchConfiguration. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + launchConfigurationSelector: + description: Selector for a LaunchConfiguration in autoscaling + to populate launchConfiguration. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + launchTemplate: + description: Nested argument with Launch template specification + to use to launch instances. See Launch Template below for more + details. + properties: + id: + description: ID of the launch template. Conflicts with name. + type: string + idRef: + description: Reference to a LaunchTemplate in ec2 to populate + id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a LaunchTemplate in ec2 to populate + id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Name of the launch template. Conflicts with id. + type: string + version: + description: 'Template version. Can be version number, $Latest, + or $Default. (Default: $Default).' + type: string + type: object + maxInstanceLifetime: + description: Maximum amount of time, in seconds, that an instance + can be in service, values must be either equal to 0 or between + 86400 and 31536000 seconds. + type: number + maxSize: + description: Maximum size of the Auto Scaling Group. + type: number + metricsGranularity: + description: Granularity to associate with the metrics to collect. + The only valid value is 1Minute. Default is 1Minute. + type: string + minElbCapacity: + description: |- + Updates will not wait on ELB instance number changes. + (See also Waiting for Capacity below.) + type: number + minSize: + description: |- + Minimum size of the Auto Scaling Group. + (See also Waiting for Capacity below.) + type: number + mixedInstancesPolicy: + description: Configuration block containing settings to define + launch targets for Auto Scaling groups. See Mixed Instances + Policy below for more details. + properties: + instancesDistribution: + description: Nested argument containing settings on how to + mix on-demand and Spot instances in the Auto Scaling group. + Defined below. + properties: + onDemandAllocationStrategy: + description: 'Strategy to use when launching on-demand + instances. Valid values: prioritized, lowest-price. + Default: prioritized.' + type: string + onDemandBaseCapacity: + description: 'Absolute minimum amount of desired capacity + that must be fulfilled by on-demand instances. Default: + 0.' + type: number + onDemandPercentageAboveBaseCapacity: + description: 'Percentage split between on-demand and Spot + instances above the base on-demand capacity. Default: + 100.' + type: number + spotAllocationStrategy: + description: 'How to allocate capacity across the Spot + pools. Valid values: lowest-price, capacity-optimized, + capacity-optimized-prioritized, and price-capacity-optimized. + Default: lowest-price.' + type: string + spotInstancePools: + description: 'Number of Spot pools per availability zone + to allocate capacity. EC2 Auto Scaling selects the cheapest + Spot pools and evenly allocates Spot capacity across + the number of Spot pools that you specify. Only available + with spot_allocation_strategy set to lowest-price. Otherwise + it must be set to 0, if it has been defined before. + Default: 2.' + type: number + spotMaxPrice: + description: 'Maximum price per unit hour that the user + is willing to pay for the Spot instances. Default: an + empty string which means the on-demand price.' + type: string + type: object + launchTemplate: + description: Nested argument containing launch template settings + along with the overrides to specify multiple instance types + and weights. Defined below. + properties: + launchTemplateSpecification: + description: Nested argument defines the Launch Template. + Defined below. + properties: + launchTemplateId: + description: ID of the launch template. Conflicts + with launch_template_name. + type: string + launchTemplateIdRef: + description: Reference to a LaunchTemplate in ec2 + to populate launchTemplateId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + launchTemplateIdSelector: + description: Selector for a LaunchTemplate in ec2 + to populate launchTemplateId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + launchTemplateName: + description: Name of the launch template. Conflicts + with launch_template_id. + type: string + version: + description: 'Template version. Can be version number, + $Latest, or $Default. (Default: $Default).' + type: string + type: object + override: + description: List of nested arguments provides the ability + to specify multiple instance types. This will override + the same parameter in the launch template. For on-demand + instances, Auto Scaling considers the order of preference + of instance types to launch based on the order specified + in the overrides list. Defined below. + items: + properties: + instanceRequirements: + description: Override the instance type in the Launch + Template with instance types that satisfy the + requirements. + properties: + acceleratorCount: + description: Block describing the minimum and + maximum number of accelerators (GPUs, FPGAs, + or AWS Inferentia chips). Default is no minimum + or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorManufacturers: + description: List of accelerator manufacturer + names. Default is any manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorNames: + description: List of accelerator names. Default + is any acclerator. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorTotalMemoryMib: + description: Block describing the minimum and + maximum total memory of the accelerators. + Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorTypes: + description: List of accelerator types. Default + is any accelerator type. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedInstanceTypes: + description: 'List of instance types to apply + your specified attributes against. All other + instance types are ignored, even if they match + your specified attributes. You can use strings + with one or more wild cards, represented by + an asterisk (*), to allow an instance type, + size, or generation. The following are examples: + m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, + if you specify c5*, you are allowing the entire + C5 instance family, which includes all C5a + and C5n instance types. If you specify m5a.*, + you are allowing all the M5a instance types, + but not the M5n instance types. Maximum of + 400 entries in the list; each entry is limited + to 30 characters. Default is all instance + types.' + items: + type: string + type: array + x-kubernetes-list-type: set + bareMetal: + description: Indicate whether bare metal instace + types should be included, excluded, or required. + Default is excluded. + type: string + baselineEbsBandwidthMbps: + description: Block describing the minimum and + maximum baseline EBS bandwidth, in Mbps. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + burstablePerformance: + description: Indicate whether burstable performance + instance types should be included, excluded, + or required. Default is excluded. + type: string + cpuManufacturers: + description: List of CPU manufacturer names. + Default is any manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + excludedInstanceTypes: + description: 'List of instance types to exclude. + You can use strings with one or more wild + cards, represented by an asterisk (*), to + exclude an instance type, size, or generation. + The following are examples: m5.8xlarge, c5*.*, + m5a.*, r*, *3*. For example, if you specify + c5*, you are excluding the entire C5 instance + family, which includes all C5a and C5n instance + types. If you specify m5a.*, you are excluding + all the M5a instance types, but not the M5n + instance types. Maximum of 400 entries in + the list; each entry is limited to 30 characters. + Default is no excluded instance types.' + items: + type: string + type: array + x-kubernetes-list-type: set + instanceGenerations: + description: List of instance generation names. + Default is any generation. + items: + type: string + type: array + x-kubernetes-list-type: set + localStorage: + description: Indicate whether instance types + with local storage volumes are included, excluded, + or required. Default is included. + type: string + localStorageTypes: + description: List of local storage type names. + Default any storage type. + items: + type: string + type: array + x-kubernetes-list-type: set + memoryGibPerVcpu: + description: Block describing the minimum and + maximum amount of memory (GiB) per vCPU. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + memoryMib: + description: Block describing the minimum and + maximum amount of memory (MiB). Default is + no maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkBandwidthGbps: + description: Block describing the minimum and + maximum amount of network bandwidth, in gigabits + per second (Gbps). Default is no minimum or + maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkInterfaceCount: + description: Block describing the minimum and + maximum number of network interfaces. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + onDemandMaxPricePercentageOverLowestPrice: + description: Price protection threshold for + On-Demand Instances. This is the maximum you’ll + pay for an On-Demand Instance, expressed as + a percentage higher than the cheapest M, C, + or R instance type with your specified attributes. + When Amazon EC2 Auto Scaling selects instance + types with your attributes, we will exclude + instance types whose price is higher than + your threshold. The parameter accepts an integer, + which Amazon EC2 Auto Scaling interprets as + a percentage. To turn off price protection, + specify a high value, such as 999999. Default + is 20. + type: number + requireHibernateSupport: + description: Indicate whether instance types + must support On-Demand Instance Hibernation, + either true or false. Default is false. + type: boolean + spotMaxPricePercentageOverLowestPrice: + description: Price protection threshold for + Spot Instances. This is the maximum you’ll + pay for a Spot Instance, expressed as a percentage + higher than the cheapest M, C, or R instance + type with your specified attributes. When + Amazon EC2 Auto Scaling selects instance types + with your attributes, we will exclude instance + types whose price is higher than your threshold. + The parameter accepts an integer, which Amazon + EC2 Auto Scaling interprets as a percentage. + To turn off price protection, specify a high + value, such as 999999. Default is 100. + type: number + totalLocalStorageGb: + description: Block describing the minimum and + maximum total local storage (GB). Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + vcpuCount: + description: Block describing the minimum and + maximum number of vCPUs. Default is no maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + type: object + instanceType: + description: Override the instance type in the Launch + Template. + type: string + launchTemplateSpecification: + description: Nested argument defines the Launch + Template. Defined below. + properties: + launchTemplateId: + description: ID of the launch template. Conflicts + with launch_template_name. + type: string + launchTemplateIdRef: + description: Reference to a LaunchTemplate in + ec2 to populate launchTemplateId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + launchTemplateIdSelector: + description: Selector for a LaunchTemplate in + ec2 to populate launchTemplateId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + launchTemplateName: + description: Name of the launch template. Conflicts + with launch_template_id. + type: string + version: + description: 'Template version. Can be version + number, $Latest, or $Default. (Default: $Default).' + type: string + type: object + weightedCapacity: + description: Number of capacity units, which gives + the instance type a proportional weight to other + instance types. + type: string + type: object + type: array + type: object + type: object + placementGroup: + description: Name of the placement group into which you'll launch + your instances, if any. + type: string + placementGroupRef: + description: Reference to a PlacementGroup in ec2 to populate + placementGroup. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + placementGroupSelector: + description: Selector for a PlacementGroup in ec2 to populate + placementGroup. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + protectFromScaleIn: + description: |- + Whether newly launched instances + are automatically protected from termination by Amazon EC2 Auto Scaling when + scaling in. For more information about preventing instances from terminating + on scale in, see Using instance scale-in protection + in the Amazon EC2 Auto Scaling User Guide. + type: boolean + serviceLinkedRoleArn: + description: ARN of the service-linked role that the ASG will + use to call other AWS services + type: string + serviceLinkedRoleArnRef: + description: Reference to a Role in iam to populate serviceLinkedRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceLinkedRoleArnSelector: + description: Selector for a Role in iam to populate serviceLinkedRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + suspendedProcesses: + description: |- + List of processes to suspend for the Auto Scaling Group. The allowed values are Launch, Terminate, HealthCheck, ReplaceUnhealthy, AZRebalance, AlarmNotification, ScheduledActions, AddToLoadBalancer, InstanceRefresh. + Note that if you suspend either the Launch or Terminate process types, it can prevent your Auto Scaling Group from functioning properly. + items: + type: string + type: array + x-kubernetes-list-type: set + tag: + description: Configuration block(s) containing resource tags. + See Tag below for more details. + items: + properties: + key: + description: Key + type: string + propagateAtLaunch: + description: |- + Enables propagation of the tag to + Amazon EC2 instances launched via this ASG + type: boolean + value: + description: Value + type: string + type: object + type: array + terminationPolicies: + description: List of policies to decide how the instances in the + Auto Scaling Group should be terminated. The allowed values + are OldestInstance, NewestInstance, OldestLaunchConfiguration, + ClosestToNextInstanceHour, OldestLaunchTemplate, AllocationStrategy, + Default. Additionally, the ARN of a Lambda function can be specified + for custom termination policies. + items: + type: string + type: array + trafficSource: + description: Attaches one or more traffic sources to the specified + Auto Scaling group. + items: + properties: + identifier: + description: Identifies the traffic source. For Application + Load Balancers, Gateway Load Balancers, Network Load Balancers, + and VPC Lattice, this will be the Amazon Resource Name + (ARN) for a target group in this account and Region. For + Classic Load Balancers, this will be the name of the Classic + Load Balancer in this account and Region. + type: string + type: + description: |- + Provides additional context for the value of Identifier. + The following lists the valid values: + elb if identifier is the name of a Classic Load Balancer. + elbv2 if identifier is the ARN of an Application Load Balancer, Gateway Load Balancer, or Network Load Balancer target group. + vpc-lattice if identifier is the ARN of a VPC Lattice target group. + type: string + type: object + type: array + vpcZoneIdentifier: + description: List of subnet IDs to launch resources in. Subnets + automatically determine which availability zones the group will + reside. Conflicts with availability_zones. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcZoneIdentifierRefs: + description: References to Subnet in ec2 to populate vpcZoneIdentifier. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + vpcZoneIdentifierSelector: + description: Selector for a list of Subnet in ec2 to populate + vpcZoneIdentifier. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + waitForCapacityTimeout: + description: |- + (See also Waiting + for Capacity below. + type: string + waitForElbCapacity: + description: |- + (Takes + precedence over min_elb_capacity behavior.) + (See also Waiting for Capacity below.) + type: number + warmPool: + description: |- + If this block is configured, add a Warm Pool + to the specified Auto Scaling group. Defined below + properties: + instanceReusePolicy: + description: Whether instances in the Auto Scaling group can + be returned to the warm pool on scale in. The default is + to terminate instances in the Auto Scaling group when the + group scales in. + properties: + reuseOnScaleIn: + description: Whether instances in the Auto Scaling group + can be returned to the warm pool on scale in. + type: boolean + type: object + maxGroupPreparedCapacity: + description: Total maximum number of instances that are allowed + to be in the warm pool or in any state except Terminated + for the Auto Scaling group. + type: number + minSize: + description: |- + Minimum size of the Auto Scaling Group. + (See also Waiting for Capacity below.) + type: number + poolState: + description: 'Sets the instance state to transition to after + the lifecycle hooks finish. Valid values are: Stopped (default), + Running or Hibernated.' + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.maxSize is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.maxSize) + || (has(self.initProvider) && has(self.initProvider.maxSize))' + - message: spec.forProvider.minSize is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.minSize) + || (has(self.initProvider) && has(self.initProvider.minSize))' + status: + description: AutoscalingGroupStatus defines the observed state of AutoscalingGroup. + properties: + atProvider: + properties: + arn: + description: ARN for this Auto Scaling Group + type: string + availabilityZones: + description: A list of Availability Zones where instances in the + Auto Scaling group can be created. Used for launching into the + default VPC subnet in each Availability Zone when not using + the vpc_zone_identifier attribute, or for attaching a network + interface when an existing network interface ID is specified + in a launch template. Conflicts with vpc_zone_identifier. + items: + type: string + type: array + x-kubernetes-list-type: set + capacityRebalance: + description: Whether capacity rebalance is enabled. Otherwise, + capacity rebalance is disabled. + type: boolean + context: + description: Reserved. + type: string + defaultCooldown: + description: Amount of time, in seconds, after a scaling activity + completes before another scaling activity can start. + type: number + defaultInstanceWarmup: + description: Amount of time, in seconds, until a newly launched + instance can contribute to the Amazon CloudWatch metrics. This + delay lets an instance finish initializing before Amazon EC2 + Auto Scaling aggregates instance metrics, resulting in more + reliable usage data. Set this value equal to the amount of time + that it takes for resource consumption to become stable after + an instance reaches the InService state. (See Set the default + instance warmup for an Auto Scaling group) + type: number + desiredCapacity: + description: |- + Number of Amazon EC2 instances that + should be running in the group. (See also Waiting for + Capacity below.) + type: number + desiredCapacityType: + description: 'The unit of measurement for the value specified + for desired_capacity. Supported for attribute-based instance + type selection only. Valid values: "units", "vcpu", "memory-mib".' + type: string + enabledMetrics: + description: List of metrics to collect. The allowed values are + defined by the underlying AWS API. + items: + type: string + type: array + x-kubernetes-list-type: set + forceDelete: + description: |- + Allows deleting the Auto Scaling Group without waiting + for all instances in the pool to terminate. You can force an Auto Scaling Group to delete + even if it's in the process of scaling a resource. This bypasses that + behavior and potentially leaves resources dangling. + type: boolean + forceDeleteWarmPool: + description: Allows deleting the Auto Scaling Group without waiting + for all instances in the warm pool to terminate. + type: boolean + healthCheckGracePeriod: + description: Time (in seconds) after instance comes into service + before checking health. + type: number + healthCheckType: + description: '"EC2" or "ELB". Controls how health checking is + done.' + type: string + id: + description: Auto Scaling Group id. + type: string + ignoreFailedScalingActivities: + description: Whether to ignore failed Auto Scaling scaling activities + while waiting for capacity. The default is false -- failed scaling + activities cause errors to be returned. + type: boolean + initialLifecycleHook: + description: |- + One or more + Lifecycle Hooks + to attach to the Auto Scaling Group before instances are launched. The + syntax is exactly the same as the separate + aws_autoscaling_lifecycle_hook + resource, without the autoscaling_group_name attribute. Please note that this will only work when creating + a new Auto Scaling Group. For all other use-cases, please use aws_autoscaling_lifecycle_hook resource. + items: + properties: + defaultResult: + type: string + heartbeatTimeout: + type: number + lifecycleTransition: + type: string + name: + description: Name of the Auto Scaling Group. Conflicts with + name_prefix. + type: string + notificationMetadata: + type: string + notificationTargetArn: + description: ARN for this Auto Scaling Group + type: string + roleArn: + description: ARN for this Auto Scaling Group + type: string + type: object + type: array + instanceMaintenancePolicy: + description: If this block is configured, add a instance maintenance + policy to the specified Auto Scaling group. Defined below. + properties: + maxHealthyPercentage: + description: Amount of capacity in the Auto Scaling group + that can be in service and healthy, or pending, to support + your workload when an instance refresh is in place, as a + percentage of the desired capacity of the Auto Scaling group. + Values must be between 100 and 200, defaults to 100. + type: number + minHealthyPercentage: + description: Amount of capacity in the Auto Scaling group + that must remain healthy during an instance refresh to allow + the operation to continue, as a percentage of the desired + capacity of the Auto Scaling group. Defaults to 90. + type: number + type: object + instanceRefresh: + description: |- + If this block is configured, start an + Instance Refresh + when this Auto Scaling Group is updated. Defined below. + properties: + preferences: + description: Override default parameters for Instance Refresh. + properties: + alarmSpecification: + description: Alarm Specification for Instance Refresh. + properties: + alarms: + description: List of Cloudwatch alarms. If any of + these alarms goes into ALARM state, Instance Refresh + is failed. + items: + type: string + type: array + type: object + autoRollback: + description: Automatically rollback if instance refresh + fails. Defaults to false. This option may only be set + to true when specifying a launch_template or mixed_instances_policy. + type: boolean + checkpointDelay: + description: Number of seconds to wait after a checkpoint. + Defaults to 3600. + type: string + checkpointPercentages: + description: List of percentages for each checkpoint. + Values must be unique and in ascending order. To replace + all instances, the final number must be 100. + items: + type: number + type: array + instanceWarmup: + description: Number of seconds until a newly launched + instance is configured and ready to use. Default behavior + is to use the Auto Scaling Group's health check grace + period. + type: string + maxHealthyPercentage: + description: Amount of capacity in the Auto Scaling group + that can be in service and healthy, or pending, to support + your workload when an instance refresh is in place, + as a percentage of the desired capacity of the Auto + Scaling group. Values must be between 100 and 200, defaults + to 100. + type: number + minHealthyPercentage: + description: Amount of capacity in the Auto Scaling group + that must remain healthy during an instance refresh + to allow the operation to continue, as a percentage + of the desired capacity of the Auto Scaling group. Defaults + to 90. + type: number + scaleInProtectedInstances: + description: Behavior when encountering instances protected + from scale in are found. Available behaviors are Refresh, + Ignore, and Wait. Default is Ignore. + type: string + skipMatching: + description: Replace instances that already have your + desired configuration. Defaults to false. + type: boolean + standbyInstances: + description: Behavior when encountering instances in the + Standby state in are found. Available behaviors are + Terminate, Ignore, and Wait. Default is Ignore. + type: string + type: object + strategy: + description: Strategy to use for instance refresh. The only + allowed value is Rolling. See StartInstanceRefresh Action + for more information. + type: string + triggers: + description: Set of additional property names that will trigger + an Instance Refresh. A refresh will always be triggered + by a change in any of launch_configuration, launch_template, + or mixed_instances_policy. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + launchConfiguration: + description: Name of the launch configuration to use. + type: string + launchTemplate: + description: Nested argument with Launch template specification + to use to launch instances. See Launch Template below for more + details. + properties: + id: + description: ID of the launch template. Conflicts with name. + type: string + name: + description: Name of the launch template. Conflicts with id. + type: string + version: + description: 'Template version. Can be version number, $Latest, + or $Default. (Default: $Default).' + type: string + type: object + loadBalancers: + description: |- + List of elastic load balancer names to add to the autoscaling + group names. Only valid for classic load balancers. For ALBs, use target_group_arns instead. To remove all load balancer attachments an empty list should be specified. + items: + type: string + type: array + x-kubernetes-list-type: set + maxInstanceLifetime: + description: Maximum amount of time, in seconds, that an instance + can be in service, values must be either equal to 0 or between + 86400 and 31536000 seconds. + type: number + maxSize: + description: Maximum size of the Auto Scaling Group. + type: number + metricsGranularity: + description: Granularity to associate with the metrics to collect. + The only valid value is 1Minute. Default is 1Minute. + type: string + minElbCapacity: + description: |- + Updates will not wait on ELB instance number changes. + (See also Waiting for Capacity below.) + type: number + minSize: + description: |- + Minimum size of the Auto Scaling Group. + (See also Waiting for Capacity below.) + type: number + mixedInstancesPolicy: + description: Configuration block containing settings to define + launch targets for Auto Scaling groups. See Mixed Instances + Policy below for more details. + properties: + instancesDistribution: + description: Nested argument containing settings on how to + mix on-demand and Spot instances in the Auto Scaling group. + Defined below. + properties: + onDemandAllocationStrategy: + description: 'Strategy to use when launching on-demand + instances. Valid values: prioritized, lowest-price. + Default: prioritized.' + type: string + onDemandBaseCapacity: + description: 'Absolute minimum amount of desired capacity + that must be fulfilled by on-demand instances. Default: + 0.' + type: number + onDemandPercentageAboveBaseCapacity: + description: 'Percentage split between on-demand and Spot + instances above the base on-demand capacity. Default: + 100.' + type: number + spotAllocationStrategy: + description: 'How to allocate capacity across the Spot + pools. Valid values: lowest-price, capacity-optimized, + capacity-optimized-prioritized, and price-capacity-optimized. + Default: lowest-price.' + type: string + spotInstancePools: + description: 'Number of Spot pools per availability zone + to allocate capacity. EC2 Auto Scaling selects the cheapest + Spot pools and evenly allocates Spot capacity across + the number of Spot pools that you specify. Only available + with spot_allocation_strategy set to lowest-price. Otherwise + it must be set to 0, if it has been defined before. + Default: 2.' + type: number + spotMaxPrice: + description: 'Maximum price per unit hour that the user + is willing to pay for the Spot instances. Default: an + empty string which means the on-demand price.' + type: string + type: object + launchTemplate: + description: Nested argument containing launch template settings + along with the overrides to specify multiple instance types + and weights. Defined below. + properties: + launchTemplateSpecification: + description: Nested argument defines the Launch Template. + Defined below. + properties: + launchTemplateId: + description: ID of the launch template. Conflicts + with launch_template_name. + type: string + launchTemplateName: + description: Name of the launch template. Conflicts + with launch_template_id. + type: string + version: + description: 'Template version. Can be version number, + $Latest, or $Default. (Default: $Default).' + type: string + type: object + override: + description: List of nested arguments provides the ability + to specify multiple instance types. This will override + the same parameter in the launch template. For on-demand + instances, Auto Scaling considers the order of preference + of instance types to launch based on the order specified + in the overrides list. Defined below. + items: + properties: + instanceRequirements: + description: Override the instance type in the Launch + Template with instance types that satisfy the + requirements. + properties: + acceleratorCount: + description: Block describing the minimum and + maximum number of accelerators (GPUs, FPGAs, + or AWS Inferentia chips). Default is no minimum + or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorManufacturers: + description: List of accelerator manufacturer + names. Default is any manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorNames: + description: List of accelerator names. Default + is any acclerator. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorTotalMemoryMib: + description: Block describing the minimum and + maximum total memory of the accelerators. + Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorTypes: + description: List of accelerator types. Default + is any accelerator type. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedInstanceTypes: + description: 'List of instance types to apply + your specified attributes against. All other + instance types are ignored, even if they match + your specified attributes. You can use strings + with one or more wild cards, represented by + an asterisk (*), to allow an instance type, + size, or generation. The following are examples: + m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, + if you specify c5*, you are allowing the entire + C5 instance family, which includes all C5a + and C5n instance types. If you specify m5a.*, + you are allowing all the M5a instance types, + but not the M5n instance types. Maximum of + 400 entries in the list; each entry is limited + to 30 characters. Default is all instance + types.' + items: + type: string + type: array + x-kubernetes-list-type: set + bareMetal: + description: Indicate whether bare metal instace + types should be included, excluded, or required. + Default is excluded. + type: string + baselineEbsBandwidthMbps: + description: Block describing the minimum and + maximum baseline EBS bandwidth, in Mbps. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + burstablePerformance: + description: Indicate whether burstable performance + instance types should be included, excluded, + or required. Default is excluded. + type: string + cpuManufacturers: + description: List of CPU manufacturer names. + Default is any manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + excludedInstanceTypes: + description: 'List of instance types to exclude. + You can use strings with one or more wild + cards, represented by an asterisk (*), to + exclude an instance type, size, or generation. + The following are examples: m5.8xlarge, c5*.*, + m5a.*, r*, *3*. For example, if you specify + c5*, you are excluding the entire C5 instance + family, which includes all C5a and C5n instance + types. If you specify m5a.*, you are excluding + all the M5a instance types, but not the M5n + instance types. Maximum of 400 entries in + the list; each entry is limited to 30 characters. + Default is no excluded instance types.' + items: + type: string + type: array + x-kubernetes-list-type: set + instanceGenerations: + description: List of instance generation names. + Default is any generation. + items: + type: string + type: array + x-kubernetes-list-type: set + localStorage: + description: Indicate whether instance types + with local storage volumes are included, excluded, + or required. Default is included. + type: string + localStorageTypes: + description: List of local storage type names. + Default any storage type. + items: + type: string + type: array + x-kubernetes-list-type: set + memoryGibPerVcpu: + description: Block describing the minimum and + maximum amount of memory (GiB) per vCPU. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + memoryMib: + description: Block describing the minimum and + maximum amount of memory (MiB). Default is + no maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkBandwidthGbps: + description: Block describing the minimum and + maximum amount of network bandwidth, in gigabits + per second (Gbps). Default is no minimum or + maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkInterfaceCount: + description: Block describing the minimum and + maximum number of network interfaces. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + onDemandMaxPricePercentageOverLowestPrice: + description: Price protection threshold for + On-Demand Instances. This is the maximum you’ll + pay for an On-Demand Instance, expressed as + a percentage higher than the cheapest M, C, + or R instance type with your specified attributes. + When Amazon EC2 Auto Scaling selects instance + types with your attributes, we will exclude + instance types whose price is higher than + your threshold. The parameter accepts an integer, + which Amazon EC2 Auto Scaling interprets as + a percentage. To turn off price protection, + specify a high value, such as 999999. Default + is 20. + type: number + requireHibernateSupport: + description: Indicate whether instance types + must support On-Demand Instance Hibernation, + either true or false. Default is false. + type: boolean + spotMaxPricePercentageOverLowestPrice: + description: Price protection threshold for + Spot Instances. This is the maximum you’ll + pay for a Spot Instance, expressed as a percentage + higher than the cheapest M, C, or R instance + type with your specified attributes. When + Amazon EC2 Auto Scaling selects instance types + with your attributes, we will exclude instance + types whose price is higher than your threshold. + The parameter accepts an integer, which Amazon + EC2 Auto Scaling interprets as a percentage. + To turn off price protection, specify a high + value, such as 999999. Default is 100. + type: number + totalLocalStorageGb: + description: Block describing the minimum and + maximum total local storage (GB). Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + vcpuCount: + description: Block describing the minimum and + maximum number of vCPUs. Default is no maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + type: object + instanceType: + description: Override the instance type in the Launch + Template. + type: string + launchTemplateSpecification: + description: Nested argument defines the Launch + Template. Defined below. + properties: + launchTemplateId: + description: ID of the launch template. Conflicts + with launch_template_name. + type: string + launchTemplateName: + description: Name of the launch template. Conflicts + with launch_template_id. + type: string + version: + description: 'Template version. Can be version + number, $Latest, or $Default. (Default: $Default).' + type: string + type: object + weightedCapacity: + description: Number of capacity units, which gives + the instance type a proportional weight to other + instance types. + type: string + type: object + type: array + type: object + type: object + placementGroup: + description: Name of the placement group into which you'll launch + your instances, if any. + type: string + predictedCapacity: + description: Predicted capacity of the group. + type: number + protectFromScaleIn: + description: |- + Whether newly launched instances + are automatically protected from termination by Amazon EC2 Auto Scaling when + scaling in. For more information about preventing instances from terminating + on scale in, see Using instance scale-in protection + in the Amazon EC2 Auto Scaling User Guide. + type: boolean + serviceLinkedRoleArn: + description: ARN of the service-linked role that the ASG will + use to call other AWS services + type: string + suspendedProcesses: + description: |- + List of processes to suspend for the Auto Scaling Group. The allowed values are Launch, Terminate, HealthCheck, ReplaceUnhealthy, AZRebalance, AlarmNotification, ScheduledActions, AddToLoadBalancer, InstanceRefresh. + Note that if you suspend either the Launch or Terminate process types, it can prevent your Auto Scaling Group from functioning properly. + items: + type: string + type: array + x-kubernetes-list-type: set + tag: + description: Configuration block(s) containing resource tags. + See Tag below for more details. + items: + properties: + key: + description: Key + type: string + propagateAtLaunch: + description: |- + Enables propagation of the tag to + Amazon EC2 instances launched via this ASG + type: boolean + value: + description: Value + type: string + type: object + type: array + targetGroupArns: + description: Set of aws_alb_target_group ARNs, for use with Application + or Network Load Balancing. To remove all target group attachments + an empty list should be specified. + items: + type: string + type: array + x-kubernetes-list-type: set + terminationPolicies: + description: List of policies to decide how the instances in the + Auto Scaling Group should be terminated. The allowed values + are OldestInstance, NewestInstance, OldestLaunchConfiguration, + ClosestToNextInstanceHour, OldestLaunchTemplate, AllocationStrategy, + Default. Additionally, the ARN of a Lambda function can be specified + for custom termination policies. + items: + type: string + type: array + trafficSource: + description: Attaches one or more traffic sources to the specified + Auto Scaling group. + items: + properties: + identifier: + description: Identifies the traffic source. For Application + Load Balancers, Gateway Load Balancers, Network Load Balancers, + and VPC Lattice, this will be the Amazon Resource Name + (ARN) for a target group in this account and Region. For + Classic Load Balancers, this will be the name of the Classic + Load Balancer in this account and Region. + type: string + type: + description: |- + Provides additional context for the value of Identifier. + The following lists the valid values: + elb if identifier is the name of a Classic Load Balancer. + elbv2 if identifier is the ARN of an Application Load Balancer, Gateway Load Balancer, or Network Load Balancer target group. + vpc-lattice if identifier is the ARN of a VPC Lattice target group. + type: string + type: object + type: array + vpcZoneIdentifier: + description: List of subnet IDs to launch resources in. Subnets + automatically determine which availability zones the group will + reside. Conflicts with availability_zones. + items: + type: string + type: array + x-kubernetes-list-type: set + waitForCapacityTimeout: + description: |- + (See also Waiting + for Capacity below. + type: string + waitForElbCapacity: + description: |- + (Takes + precedence over min_elb_capacity behavior.) + (See also Waiting for Capacity below.) + type: number + warmPool: + description: |- + If this block is configured, add a Warm Pool + to the specified Auto Scaling group. Defined below + properties: + instanceReusePolicy: + description: Whether instances in the Auto Scaling group can + be returned to the warm pool on scale in. The default is + to terminate instances in the Auto Scaling group when the + group scales in. + properties: + reuseOnScaleIn: + description: Whether instances in the Auto Scaling group + can be returned to the warm pool on scale in. + type: boolean + type: object + maxGroupPreparedCapacity: + description: Total maximum number of instances that are allowed + to be in the warm pool or in any state except Terminated + for the Auto Scaling group. + type: number + minSize: + description: |- + Minimum size of the Auto Scaling Group. + (See also Waiting for Capacity below.) + type: number + poolState: + description: 'Sets the instance state to transition to after + the lifecycle hooks finish. Valid values are: Stopped (default), + Running or Hibernated.' + type: string + type: object + warmPoolSize: + description: Current size of the warm pool. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/autoscaling.aws.upbound.io_grouptags.yaml b/package/crds/autoscaling.aws.upbound.io_grouptags.yaml index f6ffd21516..960cd443c1 100644 --- a/package/crds/autoscaling.aws.upbound.io_grouptags.yaml +++ b/package/crds/autoscaling.aws.upbound.io_grouptags.yaml @@ -545,3 +545,524 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: GroupTag is the Schema for the GroupTags API. Manages an individual + Autoscaling Group tag + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GroupTagSpec defines the desired state of GroupTag + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoscalingGroupName: + description: Name of the Autoscaling Group to apply the tag to. + type: string + autoscalingGroupNameRef: + description: Reference to a AutoscalingGroup in autoscaling to + populate autoscalingGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + autoscalingGroupNameSelector: + description: Selector for a AutoscalingGroup in autoscaling to + populate autoscalingGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tag: + description: Tag to create. The tag block is documented below. + properties: + key: + description: Tag name. + type: string + propagateAtLaunch: + description: Whether to propagate the tags to instances launched + by the ASG. + type: boolean + value: + description: Tag value. + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoscalingGroupName: + description: Name of the Autoscaling Group to apply the tag to. + type: string + autoscalingGroupNameRef: + description: Reference to a AutoscalingGroup in autoscaling to + populate autoscalingGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + autoscalingGroupNameSelector: + description: Selector for a AutoscalingGroup in autoscaling to + populate autoscalingGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tag: + description: Tag to create. The tag block is documented below. + properties: + key: + description: Tag name. + type: string + propagateAtLaunch: + description: Whether to propagate the tags to instances launched + by the ASG. + type: boolean + value: + description: Tag value. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.tag is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.tag) + || (has(self.initProvider) && has(self.initProvider.tag))' + status: + description: GroupTagStatus defines the observed state of GroupTag. + properties: + atProvider: + properties: + autoscalingGroupName: + description: Name of the Autoscaling Group to apply the tag to. + type: string + id: + description: ASG name and key, separated by a comma (,) + type: string + tag: + description: Tag to create. The tag block is documented below. + properties: + key: + description: Tag name. + type: string + propagateAtLaunch: + description: Whether to propagate the tags to instances launched + by the ASG. + type: boolean + value: + description: Tag value. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/autoscaling.aws.upbound.io_launchconfigurations.yaml b/package/crds/autoscaling.aws.upbound.io_launchconfigurations.yaml index 62adbe3f8f..51be037e7b 100644 --- a/package/crds/autoscaling.aws.upbound.io_launchconfigurations.yaml +++ b/package/crds/autoscaling.aws.upbound.io_launchconfigurations.yaml @@ -821,3 +821,791 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LaunchConfiguration is the Schema for the LaunchConfigurations + API. Provides a resource to create a new launch configuration, used for + autoscaling groups. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LaunchConfigurationSpec defines the desired state of LaunchConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + associatePublicIpAddress: + description: Associate a public ip address with an instance in + a VPC. + type: boolean + ebsBlockDevice: + description: Additional EBS block devices to attach to the instance. + See Block Devices below for details. + items: + properties: + deleteOnTermination: + description: |- + Whether the volume should be destroyed + on instance termination (Default: true). + type: boolean + deviceName: + description: The name of the device to mount. + type: string + encrypted: + description: Whether the volume should be encrypted or not. + Defaults to false. + type: boolean + iops: + description: |- + The amount of provisioned + IOPS. + This must be set with a volume_type of "io1". + type: number + noDevice: + description: Whether the device in the block device mapping + of the AMI is suppressed. + type: boolean + snapshotId: + description: The Snapshot ID to mount. + type: string + throughput: + description: The throughput (MiBps) to provision for a gp3 + volume. + type: number + volumeSize: + description: The size of the volume in gigabytes. + type: number + volumeType: + description: The type of volume. Can be standard, gp2, gp3, + st1, sc1 or io1. + type: string + type: object + type: array + ebsOptimized: + description: If true, the launched EC2 instance will be EBS-optimized. + type: boolean + enableMonitoring: + description: Enables/disables detailed monitoring. This is enabled + by default. + type: boolean + ephemeralBlockDevice: + description: Customize Ephemeral (also known as "Instance Store") + volumes on the instance. See Block Devices below for details. + items: + properties: + deviceName: + description: The name of the block device to mount on the + instance. + type: string + noDevice: + description: Whether the device in the block device mapping + of the AMI is suppressed. + type: boolean + virtualName: + description: The Instance Store Device Name. + type: string + type: object + type: array + iamInstanceProfile: + description: The name attribute of the IAM instance profile to + associate with launched instances. + type: string + imageId: + description: The EC2 image ID to launch. + type: string + instanceType: + description: The size of instance to launch. + type: string + keyName: + description: The key name that should be used for the instance. + type: string + metadataOptions: + description: The metadata options for the instance. + properties: + httpEndpoint: + description: 'The state of the metadata service: enabled, + disabled.' + type: string + httpPutResponseHopLimit: + description: The desired HTTP PUT response hop limit for instance + metadata requests. + type: number + httpTokens: + description: 'If session tokens are required: optional, required.' + type: string + type: object + placementTenancy: + description: The tenancy of the instance. Valid values are default + or dedicated, see AWS's Create Launch Configuration for more + details. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + rootBlockDevice: + description: Customize details about the root block device of + the instance. See Block Devices below for details. + properties: + deleteOnTermination: + description: Whether the volume should be destroyed on instance + termination. Defaults to true. + type: boolean + encrypted: + description: Whether the volume should be encrypted or not. + Defaults to false. + type: boolean + iops: + description: The amount of provisioned IOPS. This must be + set with a volume_type of io1. + type: number + throughput: + description: The throughput (MiBps) to provision for a gp3 + volume. + type: number + volumeSize: + description: The size of the volume in gigabytes. + type: number + volumeType: + description: The type of volume. Can be standard, gp2, gp3, + st1, sc1 or io1. + type: string + type: object + securityGroups: + description: A list of associated security group IDS. + items: + type: string + type: array + x-kubernetes-list-type: set + spotPrice: + description: The maximum price to use for reserving spot instances. + type: string + userData: + description: The user data to provide when launching the instance. + Do not pass gzip-compressed data via this argument; see user_data_base64 + instead. + type: string + userDataBase64: + description: Can be used instead of user_data to pass base64-encoded + binary data directly. Use this instead of user_data whenever + the value is not a valid UTF-8 string. For example, gzip-encoded + user data must be base64-encoded and passed via this argument + to avoid corruption. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + associatePublicIpAddress: + description: Associate a public ip address with an instance in + a VPC. + type: boolean + ebsBlockDevice: + description: Additional EBS block devices to attach to the instance. + See Block Devices below for details. + items: + properties: + deleteOnTermination: + description: |- + Whether the volume should be destroyed + on instance termination (Default: true). + type: boolean + deviceName: + description: The name of the device to mount. + type: string + encrypted: + description: Whether the volume should be encrypted or not. + Defaults to false. + type: boolean + iops: + description: |- + The amount of provisioned + IOPS. + This must be set with a volume_type of "io1". + type: number + noDevice: + description: Whether the device in the block device mapping + of the AMI is suppressed. + type: boolean + snapshotId: + description: The Snapshot ID to mount. + type: string + throughput: + description: The throughput (MiBps) to provision for a gp3 + volume. + type: number + volumeSize: + description: The size of the volume in gigabytes. + type: number + volumeType: + description: The type of volume. Can be standard, gp2, gp3, + st1, sc1 or io1. + type: string + type: object + type: array + ebsOptimized: + description: If true, the launched EC2 instance will be EBS-optimized. + type: boolean + enableMonitoring: + description: Enables/disables detailed monitoring. This is enabled + by default. + type: boolean + ephemeralBlockDevice: + description: Customize Ephemeral (also known as "Instance Store") + volumes on the instance. See Block Devices below for details. + items: + properties: + deviceName: + description: The name of the block device to mount on the + instance. + type: string + noDevice: + description: Whether the device in the block device mapping + of the AMI is suppressed. + type: boolean + virtualName: + description: The Instance Store Device Name. + type: string + type: object + type: array + iamInstanceProfile: + description: The name attribute of the IAM instance profile to + associate with launched instances. + type: string + imageId: + description: The EC2 image ID to launch. + type: string + instanceType: + description: The size of instance to launch. + type: string + keyName: + description: The key name that should be used for the instance. + type: string + metadataOptions: + description: The metadata options for the instance. + properties: + httpEndpoint: + description: 'The state of the metadata service: enabled, + disabled.' + type: string + httpPutResponseHopLimit: + description: The desired HTTP PUT response hop limit for instance + metadata requests. + type: number + httpTokens: + description: 'If session tokens are required: optional, required.' + type: string + type: object + placementTenancy: + description: The tenancy of the instance. Valid values are default + or dedicated, see AWS's Create Launch Configuration for more + details. + type: string + rootBlockDevice: + description: Customize details about the root block device of + the instance. See Block Devices below for details. + properties: + deleteOnTermination: + description: Whether the volume should be destroyed on instance + termination. Defaults to true. + type: boolean + encrypted: + description: Whether the volume should be encrypted or not. + Defaults to false. + type: boolean + iops: + description: The amount of provisioned IOPS. This must be + set with a volume_type of io1. + type: number + throughput: + description: The throughput (MiBps) to provision for a gp3 + volume. + type: number + volumeSize: + description: The size of the volume in gigabytes. + type: number + volumeType: + description: The type of volume. Can be standard, gp2, gp3, + st1, sc1 or io1. + type: string + type: object + securityGroups: + description: A list of associated security group IDS. + items: + type: string + type: array + x-kubernetes-list-type: set + spotPrice: + description: The maximum price to use for reserving spot instances. + type: string + userData: + description: The user data to provide when launching the instance. + Do not pass gzip-compressed data via this argument; see user_data_base64 + instead. + type: string + userDataBase64: + description: Can be used instead of user_data to pass base64-encoded + binary data directly. Use this instead of user_data whenever + the value is not a valid UTF-8 string. For example, gzip-encoded + user data must be base64-encoded and passed via this argument + to avoid corruption. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.imageId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.imageId) + || (has(self.initProvider) && has(self.initProvider.imageId))' + - message: spec.forProvider.instanceType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.instanceType) + || (has(self.initProvider) && has(self.initProvider.instanceType))' + status: + description: LaunchConfigurationStatus defines the observed state of LaunchConfiguration. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name of the launch configuration. + type: string + associatePublicIpAddress: + description: Associate a public ip address with an instance in + a VPC. + type: boolean + ebsBlockDevice: + description: Additional EBS block devices to attach to the instance. + See Block Devices below for details. + items: + properties: + deleteOnTermination: + description: |- + Whether the volume should be destroyed + on instance termination (Default: true). + type: boolean + deviceName: + description: The name of the device to mount. + type: string + encrypted: + description: Whether the volume should be encrypted or not. + Defaults to false. + type: boolean + iops: + description: |- + The amount of provisioned + IOPS. + This must be set with a volume_type of "io1". + type: number + noDevice: + description: Whether the device in the block device mapping + of the AMI is suppressed. + type: boolean + snapshotId: + description: The Snapshot ID to mount. + type: string + throughput: + description: The throughput (MiBps) to provision for a gp3 + volume. + type: number + volumeSize: + description: The size of the volume in gigabytes. + type: number + volumeType: + description: The type of volume. Can be standard, gp2, gp3, + st1, sc1 or io1. + type: string + type: object + type: array + ebsOptimized: + description: If true, the launched EC2 instance will be EBS-optimized. + type: boolean + enableMonitoring: + description: Enables/disables detailed monitoring. This is enabled + by default. + type: boolean + ephemeralBlockDevice: + description: Customize Ephemeral (also known as "Instance Store") + volumes on the instance. See Block Devices below for details. + items: + properties: + deviceName: + description: The name of the block device to mount on the + instance. + type: string + noDevice: + description: Whether the device in the block device mapping + of the AMI is suppressed. + type: boolean + virtualName: + description: The Instance Store Device Name. + type: string + type: object + type: array + iamInstanceProfile: + description: The name attribute of the IAM instance profile to + associate with launched instances. + type: string + id: + description: The ID of the launch configuration. + type: string + imageId: + description: The EC2 image ID to launch. + type: string + instanceType: + description: The size of instance to launch. + type: string + keyName: + description: The key name that should be used for the instance. + type: string + metadataOptions: + description: The metadata options for the instance. + properties: + httpEndpoint: + description: 'The state of the metadata service: enabled, + disabled.' + type: string + httpPutResponseHopLimit: + description: The desired HTTP PUT response hop limit for instance + metadata requests. + type: number + httpTokens: + description: 'If session tokens are required: optional, required.' + type: string + type: object + placementTenancy: + description: The tenancy of the instance. Valid values are default + or dedicated, see AWS's Create Launch Configuration for more + details. + type: string + rootBlockDevice: + description: Customize details about the root block device of + the instance. See Block Devices below for details. + properties: + deleteOnTermination: + description: Whether the volume should be destroyed on instance + termination. Defaults to true. + type: boolean + encrypted: + description: Whether the volume should be encrypted or not. + Defaults to false. + type: boolean + iops: + description: The amount of provisioned IOPS. This must be + set with a volume_type of io1. + type: number + throughput: + description: The throughput (MiBps) to provision for a gp3 + volume. + type: number + volumeSize: + description: The size of the volume in gigabytes. + type: number + volumeType: + description: The type of volume. Can be standard, gp2, gp3, + st1, sc1 or io1. + type: string + type: object + securityGroups: + description: A list of associated security group IDS. + items: + type: string + type: array + x-kubernetes-list-type: set + spotPrice: + description: The maximum price to use for reserving spot instances. + type: string + userData: + description: The user data to provide when launching the instance. + Do not pass gzip-compressed data via this argument; see user_data_base64 + instead. + type: string + userDataBase64: + description: Can be used instead of user_data to pass base64-encoded + binary data directly. Use this instead of user_data whenever + the value is not a valid UTF-8 string. For example, gzip-encoded + user data must be base64-encoded and passed via this argument + to avoid corruption. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/autoscaling.aws.upbound.io_policies.yaml b/package/crds/autoscaling.aws.upbound.io_policies.yaml index 9997e33d35..6c39bc4ea2 100644 --- a/package/crds/autoscaling.aws.upbound.io_policies.yaml +++ b/package/crds/autoscaling.aws.upbound.io_policies.yaml @@ -2094,3 +2094,1872 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Policy is the Schema for the Policys API. Provides an AutoScaling + Scaling Group resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PolicySpec defines the desired state of Policy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + adjustmentType: + description: Whether the adjustment is an absolute number or a + percentage of the current capacity. Valid values are ChangeInCapacity, + ExactCapacity, and PercentChangeInCapacity. + type: string + autoscalingGroupName: + description: Name of the autoscaling group. + type: string + autoscalingGroupNameRef: + description: Reference to a AutoscalingGroup in autoscaling to + populate autoscalingGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + autoscalingGroupNameSelector: + description: Selector for a AutoscalingGroup in autoscaling to + populate autoscalingGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + cooldown: + description: Amount of time, in seconds, after a scaling activity + completes and before the next scaling activity can start. + type: number + enabled: + description: 'Whether the scaling policy is enabled or disabled. + Default: true.' + type: boolean + estimatedInstanceWarmup: + description: Estimated time, in seconds, until a newly launched + instance will contribute CloudWatch metrics. Without a value, + AWS will default to the group's specified cooldown period. + type: number + metricAggregationType: + description: Aggregation type for the policy's metrics. Valid + values are "Minimum", "Maximum", and "Average". Without a value, + AWS will treat the aggregation type as "Average". + type: string + minAdjustmentMagnitude: + description: Minimum value to scale by when adjustment_type is + set to PercentChangeInCapacity. + type: number + policyType: + description: Policy type, either "SimpleScaling", "StepScaling", + "TargetTrackingScaling", or "PredictiveScaling". If this value + isn't provided, AWS will default to "SimpleScaling." + type: string + predictiveScalingConfiguration: + description: Predictive scaling policy configuration to use with + Amazon EC2 Auto Scaling. + properties: + maxCapacityBreachBehavior: + description: Defines the behavior that should be applied if + the forecast capacity approaches or exceeds the maximum + capacity of the Auto Scaling group. Valid values are HonorMaxCapacity + or IncreaseMaxCapacity. Default is HonorMaxCapacity. + type: string + maxCapacityBuffer: + description: Size of the capacity buffer to use when the forecast + capacity is close to or exceeds the maximum capacity. Valid + range is 0 to 100. If set to 0, Amazon EC2 Auto Scaling + may scale capacity higher than the maximum capacity to equal + but not exceed forecast capacity. + type: string + metricSpecification: + description: This structure includes the metrics and target + utilization to use for predictive scaling. + properties: + customizedCapacityMetricSpecification: + description: Customized capacity metric specification. + The field is only valid when you use customized_load_metric_specification + properties: + metricDataQueries: + description: List of up to 10 structures that defines + custom capacity metric in predictive scaling policy + items: + properties: + expression: + description: Math expression used on the returned + metric. You must specify either expression + or metric_stat, but not both. + type: string + id: + description: Short name for the metric used + in predictive scaling policy. + type: string + label: + description: Human-readable label for this metric + or expression. + type: string + metricStat: + description: Structure that defines CloudWatch + metric to be used in predictive scaling policy. + You must specify either expression or metric_stat, + but not both. + properties: + metric: + description: Structure that defines the + CloudWatch metric to return, including + the metric name, namespace, and dimensions. + properties: + dimensions: + description: Dimensions of the metric. + items: + properties: + name: + description: Name of the policy. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + type: object + stat: + description: Statistic of the metrics to + return. + type: string + unit: + description: Unit of the metric. + type: string + type: object + returnData: + description: Boolean that indicates whether + to return the timestamps and raw data values + of this metric, the default is true + type: boolean + type: object + type: array + type: object + customizedLoadMetricSpecification: + description: Customized load metric specification. + properties: + metricDataQueries: + description: List of up to 10 structures that defines + custom load metric in predictive scaling policy + items: + properties: + expression: + description: Math expression used on the returned + metric. You must specify either expression + or metric_stat, but not both. + type: string + id: + description: Short name for the metric used + in predictive scaling policy. + type: string + label: + description: Human-readable label for this metric + or expression. + type: string + metricStat: + description: Structure that defines CloudWatch + metric to be used in predictive scaling policy. + You must specify either expression or metric_stat, + but not both. + properties: + metric: + description: Structure that defines the + CloudWatch metric to return, including + the metric name, namespace, and dimensions. + properties: + dimensions: + description: Dimensions of the metric. + items: + properties: + name: + description: Name of the policy. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + type: object + stat: + description: Statistic of the metrics to + return. + type: string + unit: + description: Unit of the metric. + type: string + type: object + returnData: + description: Boolean that indicates whether + to return the timestamps and raw data values + of this metric, the default is true + type: boolean + type: object + type: array + type: object + customizedScalingMetricSpecification: + description: Customized scaling metric specification. + properties: + metricDataQueries: + description: List of up to 10 structures that defines + custom scaling metric in predictive scaling policy + items: + properties: + expression: + description: Math expression used on the returned + metric. You must specify either expression + or metric_stat, but not both. + type: string + id: + description: Short name for the metric used + in predictive scaling policy. + type: string + label: + description: Human-readable label for this metric + or expression. + type: string + metricStat: + description: Structure that defines CloudWatch + metric to be used in predictive scaling policy. + You must specify either expression or metric_stat, + but not both. + properties: + metric: + description: Structure that defines the + CloudWatch metric to return, including + the metric name, namespace, and dimensions. + properties: + dimensions: + description: Dimensions of the metric. + items: + properties: + name: + description: Name of the policy. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + type: object + stat: + description: Statistic of the metrics to + return. + type: string + unit: + description: Unit of the metric. + type: string + type: object + returnData: + description: Boolean that indicates whether + to return the timestamps and raw data values + of this metric, the default is true + type: boolean + type: object + type: array + type: object + predefinedLoadMetricSpecification: + description: Predefined load metric specification. + properties: + predefinedMetricType: + description: Metric type. Valid values are ASGTotalCPUUtilization, + ASGTotalNetworkIn, ASGTotalNetworkOut, or ALBTargetGroupRequestCount. + type: string + resourceLabel: + description: Label that uniquely identifies a specific + Application Load Balancer target group from which + to determine the request count served by your Auto + Scaling group. You create the resource label by + appending the final portion of the load balancer + ARN and the final portion of the target group ARN + into a single value, separated by a forward slash + (/). Refer to PredefinedMetricSpecification for + more information. + type: string + type: object + predefinedMetricPairSpecification: + description: Metric pair specification from which Amazon + EC2 Auto Scaling determines the appropriate scaling + metric and load metric to use. + properties: + predefinedMetricType: + description: 'Which metrics to use. There are two + different types of metrics for each metric type: + one is a load metric and one is a scaling metric. + For example, if the metric type is ASGCPUUtilization, + the Auto Scaling group''s total CPU metric is used + as the load metric, and the average CPU metric is + used for the scaling metric. Valid values are ASGCPUUtilization, + ASGNetworkIn, ASGNetworkOut, or ALBRequestCount.' + type: string + resourceLabel: + description: Label that uniquely identifies a specific + Application Load Balancer target group from which + to determine the request count served by your Auto + Scaling group. You create the resource label by + appending the final portion of the load balancer + ARN and the final portion of the target group ARN + into a single value, separated by a forward slash + (/). Refer to PredefinedMetricSpecification for + more information. + type: string + type: object + predefinedScalingMetricSpecification: + description: Predefined scaling metric specification. + properties: + predefinedMetricType: + description: Describes a scaling metric for a predictive + scaling policy. Valid values are ASGAverageCPUUtilization, + ASGAverageNetworkIn, ASGAverageNetworkOut, or ALBRequestCountPerTarget. + type: string + resourceLabel: + description: Label that uniquely identifies a specific + Application Load Balancer target group from which + to determine the request count served by your Auto + Scaling group. You create the resource label by + appending the final portion of the load balancer + ARN and the final portion of the target group ARN + into a single value, separated by a forward slash + (/). Refer to PredefinedMetricSpecification for + more information. + type: string + type: object + targetValue: + description: Target value for the metric. + type: number + type: object + mode: + description: Predictive scaling mode. Valid values are ForecastAndScale + and ForecastOnly. Default is ForecastOnly. + type: string + schedulingBufferTime: + description: Amount of time, in seconds, by which the instance + launch time can be advanced. Minimum is 0. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + scalingAdjustment: + description: Number of instances by which to scale. adjustment_type + determines the interpretation of this number (e.g., as an absolute + number or as a percentage of the existing Auto Scaling group + size). A positive increment adds to the current capacity and + a negative value removes from the current capacity. + type: number + stepAdjustment: + description: |- + Set of adjustments that manage + group scaling. These have the following structure: + items: + properties: + metricIntervalLowerBound: + description: |- + Lower bound for the + difference between the alarm threshold and the CloudWatch metric. + Without a value, AWS will treat this bound as negative infinity. + type: string + metricIntervalUpperBound: + description: |- + Upper bound for the + difference between the alarm threshold and the CloudWatch metric. + Without a value, AWS will treat this bound as positive infinity. The upper bound + must be greater than the lower bound. + type: string + scalingAdjustment: + description: Number of instances by which to scale. adjustment_type + determines the interpretation of this number (e.g., as + an absolute number or as a percentage of the existing + Auto Scaling group size). A positive increment adds to + the current capacity and a negative value removes from + the current capacity. + type: number + type: object + type: array + targetTrackingConfiguration: + description: 'Target tracking policy. These have the following + structure:' + properties: + customizedMetricSpecification: + description: Customized metric. Conflicts with predefined_metric_specification. + properties: + metricDimension: + description: Dimensions of the metric. + items: + properties: + name: + description: Name of the policy. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + metrics: + description: Metrics to include, as a metric data query. + items: + properties: + expression: + description: Math expression used on the returned + metric. You must specify either expression or + metric_stat, but not both. + type: string + id: + description: Short name for the metric used in predictive + scaling policy. + type: string + label: + description: Human-readable label for this metric + or expression. + type: string + metricStat: + description: Structure that defines CloudWatch metric + to be used in predictive scaling policy. You must + specify either expression or metric_stat, but + not both. + properties: + metric: + description: Structure that defines the CloudWatch + metric to return, including the metric name, + namespace, and dimensions. + properties: + dimensions: + description: Dimensions of the metric. + items: + properties: + name: + description: Name of the policy. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + type: object + stat: + description: Statistic of the metrics to return. + type: string + unit: + description: Unit of the metric. + type: string + type: object + returnData: + description: Boolean that indicates whether to return + the timestamps and raw data values of this metric, + the default is true + type: boolean + type: object + type: array + namespace: + description: Namespace of the metric. + type: string + statistic: + description: Statistic of the metric. + type: string + unit: + description: Unit of the metric. + type: string + type: object + disableScaleIn: + description: Whether scale in by the target tracking policy + is disabled. + type: boolean + predefinedMetricSpecification: + description: Predefined metric. Conflicts with customized_metric_specification. + properties: + predefinedMetricType: + description: Describes a scaling metric for a predictive + scaling policy. Valid values are ASGAverageCPUUtilization, + ASGAverageNetworkIn, ASGAverageNetworkOut, or ALBRequestCountPerTarget. + type: string + resourceLabel: + description: Label that uniquely identifies a specific + Application Load Balancer target group from which to + determine the request count served by your Auto Scaling + group. You create the resource label by appending the + final portion of the load balancer ARN and the final + portion of the target group ARN into a single value, + separated by a forward slash (/). Refer to PredefinedMetricSpecification + for more information. + type: string + type: object + targetValue: + description: Target value for the metric. + type: number + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + adjustmentType: + description: Whether the adjustment is an absolute number or a + percentage of the current capacity. Valid values are ChangeInCapacity, + ExactCapacity, and PercentChangeInCapacity. + type: string + cooldown: + description: Amount of time, in seconds, after a scaling activity + completes and before the next scaling activity can start. + type: number + enabled: + description: 'Whether the scaling policy is enabled or disabled. + Default: true.' + type: boolean + estimatedInstanceWarmup: + description: Estimated time, in seconds, until a newly launched + instance will contribute CloudWatch metrics. Without a value, + AWS will default to the group's specified cooldown period. + type: number + metricAggregationType: + description: Aggregation type for the policy's metrics. Valid + values are "Minimum", "Maximum", and "Average". Without a value, + AWS will treat the aggregation type as "Average". + type: string + minAdjustmentMagnitude: + description: Minimum value to scale by when adjustment_type is + set to PercentChangeInCapacity. + type: number + policyType: + description: Policy type, either "SimpleScaling", "StepScaling", + "TargetTrackingScaling", or "PredictiveScaling". If this value + isn't provided, AWS will default to "SimpleScaling." + type: string + predictiveScalingConfiguration: + description: Predictive scaling policy configuration to use with + Amazon EC2 Auto Scaling. + properties: + maxCapacityBreachBehavior: + description: Defines the behavior that should be applied if + the forecast capacity approaches or exceeds the maximum + capacity of the Auto Scaling group. Valid values are HonorMaxCapacity + or IncreaseMaxCapacity. Default is HonorMaxCapacity. + type: string + maxCapacityBuffer: + description: Size of the capacity buffer to use when the forecast + capacity is close to or exceeds the maximum capacity. Valid + range is 0 to 100. If set to 0, Amazon EC2 Auto Scaling + may scale capacity higher than the maximum capacity to equal + but not exceed forecast capacity. + type: string + metricSpecification: + description: This structure includes the metrics and target + utilization to use for predictive scaling. + properties: + customizedCapacityMetricSpecification: + description: Customized capacity metric specification. + The field is only valid when you use customized_load_metric_specification + properties: + metricDataQueries: + description: List of up to 10 structures that defines + custom capacity metric in predictive scaling policy + items: + properties: + expression: + description: Math expression used on the returned + metric. You must specify either expression + or metric_stat, but not both. + type: string + id: + description: Short name for the metric used + in predictive scaling policy. + type: string + label: + description: Human-readable label for this metric + or expression. + type: string + metricStat: + description: Structure that defines CloudWatch + metric to be used in predictive scaling policy. + You must specify either expression or metric_stat, + but not both. + properties: + metric: + description: Structure that defines the + CloudWatch metric to return, including + the metric name, namespace, and dimensions. + properties: + dimensions: + description: Dimensions of the metric. + items: + properties: + name: + description: Name of the policy. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + type: object + stat: + description: Statistic of the metrics to + return. + type: string + unit: + description: Unit of the metric. + type: string + type: object + returnData: + description: Boolean that indicates whether + to return the timestamps and raw data values + of this metric, the default is true + type: boolean + type: object + type: array + type: object + customizedLoadMetricSpecification: + description: Customized load metric specification. + properties: + metricDataQueries: + description: List of up to 10 structures that defines + custom load metric in predictive scaling policy + items: + properties: + expression: + description: Math expression used on the returned + metric. You must specify either expression + or metric_stat, but not both. + type: string + id: + description: Short name for the metric used + in predictive scaling policy. + type: string + label: + description: Human-readable label for this metric + or expression. + type: string + metricStat: + description: Structure that defines CloudWatch + metric to be used in predictive scaling policy. + You must specify either expression or metric_stat, + but not both. + properties: + metric: + description: Structure that defines the + CloudWatch metric to return, including + the metric name, namespace, and dimensions. + properties: + dimensions: + description: Dimensions of the metric. + items: + properties: + name: + description: Name of the policy. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + type: object + stat: + description: Statistic of the metrics to + return. + type: string + unit: + description: Unit of the metric. + type: string + type: object + returnData: + description: Boolean that indicates whether + to return the timestamps and raw data values + of this metric, the default is true + type: boolean + type: object + type: array + type: object + customizedScalingMetricSpecification: + description: Customized scaling metric specification. + properties: + metricDataQueries: + description: List of up to 10 structures that defines + custom scaling metric in predictive scaling policy + items: + properties: + expression: + description: Math expression used on the returned + metric. You must specify either expression + or metric_stat, but not both. + type: string + id: + description: Short name for the metric used + in predictive scaling policy. + type: string + label: + description: Human-readable label for this metric + or expression. + type: string + metricStat: + description: Structure that defines CloudWatch + metric to be used in predictive scaling policy. + You must specify either expression or metric_stat, + but not both. + properties: + metric: + description: Structure that defines the + CloudWatch metric to return, including + the metric name, namespace, and dimensions. + properties: + dimensions: + description: Dimensions of the metric. + items: + properties: + name: + description: Name of the policy. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + type: object + stat: + description: Statistic of the metrics to + return. + type: string + unit: + description: Unit of the metric. + type: string + type: object + returnData: + description: Boolean that indicates whether + to return the timestamps and raw data values + of this metric, the default is true + type: boolean + type: object + type: array + type: object + predefinedLoadMetricSpecification: + description: Predefined load metric specification. + properties: + predefinedMetricType: + description: Metric type. Valid values are ASGTotalCPUUtilization, + ASGTotalNetworkIn, ASGTotalNetworkOut, or ALBTargetGroupRequestCount. + type: string + resourceLabel: + description: Label that uniquely identifies a specific + Application Load Balancer target group from which + to determine the request count served by your Auto + Scaling group. You create the resource label by + appending the final portion of the load balancer + ARN and the final portion of the target group ARN + into a single value, separated by a forward slash + (/). Refer to PredefinedMetricSpecification for + more information. + type: string + type: object + predefinedMetricPairSpecification: + description: Metric pair specification from which Amazon + EC2 Auto Scaling determines the appropriate scaling + metric and load metric to use. + properties: + predefinedMetricType: + description: 'Which metrics to use. There are two + different types of metrics for each metric type: + one is a load metric and one is a scaling metric. + For example, if the metric type is ASGCPUUtilization, + the Auto Scaling group''s total CPU metric is used + as the load metric, and the average CPU metric is + used for the scaling metric. Valid values are ASGCPUUtilization, + ASGNetworkIn, ASGNetworkOut, or ALBRequestCount.' + type: string + resourceLabel: + description: Label that uniquely identifies a specific + Application Load Balancer target group from which + to determine the request count served by your Auto + Scaling group. You create the resource label by + appending the final portion of the load balancer + ARN and the final portion of the target group ARN + into a single value, separated by a forward slash + (/). Refer to PredefinedMetricSpecification for + more information. + type: string + type: object + predefinedScalingMetricSpecification: + description: Predefined scaling metric specification. + properties: + predefinedMetricType: + description: Describes a scaling metric for a predictive + scaling policy. Valid values are ASGAverageCPUUtilization, + ASGAverageNetworkIn, ASGAverageNetworkOut, or ALBRequestCountPerTarget. + type: string + resourceLabel: + description: Label that uniquely identifies a specific + Application Load Balancer target group from which + to determine the request count served by your Auto + Scaling group. You create the resource label by + appending the final portion of the load balancer + ARN and the final portion of the target group ARN + into a single value, separated by a forward slash + (/). Refer to PredefinedMetricSpecification for + more information. + type: string + type: object + targetValue: + description: Target value for the metric. + type: number + type: object + mode: + description: Predictive scaling mode. Valid values are ForecastAndScale + and ForecastOnly. Default is ForecastOnly. + type: string + schedulingBufferTime: + description: Amount of time, in seconds, by which the instance + launch time can be advanced. Minimum is 0. + type: string + type: object + scalingAdjustment: + description: Number of instances by which to scale. adjustment_type + determines the interpretation of this number (e.g., as an absolute + number or as a percentage of the existing Auto Scaling group + size). A positive increment adds to the current capacity and + a negative value removes from the current capacity. + type: number + stepAdjustment: + description: |- + Set of adjustments that manage + group scaling. These have the following structure: + items: + properties: + metricIntervalLowerBound: + description: |- + Lower bound for the + difference between the alarm threshold and the CloudWatch metric. + Without a value, AWS will treat this bound as negative infinity. + type: string + metricIntervalUpperBound: + description: |- + Upper bound for the + difference between the alarm threshold and the CloudWatch metric. + Without a value, AWS will treat this bound as positive infinity. The upper bound + must be greater than the lower bound. + type: string + scalingAdjustment: + description: Number of instances by which to scale. adjustment_type + determines the interpretation of this number (e.g., as + an absolute number or as a percentage of the existing + Auto Scaling group size). A positive increment adds to + the current capacity and a negative value removes from + the current capacity. + type: number + type: object + type: array + targetTrackingConfiguration: + description: 'Target tracking policy. These have the following + structure:' + properties: + customizedMetricSpecification: + description: Customized metric. Conflicts with predefined_metric_specification. + properties: + metricDimension: + description: Dimensions of the metric. + items: + properties: + name: + description: Name of the policy. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + metrics: + description: Metrics to include, as a metric data query. + items: + properties: + expression: + description: Math expression used on the returned + metric. You must specify either expression or + metric_stat, but not both. + type: string + id: + description: Short name for the metric used in predictive + scaling policy. + type: string + label: + description: Human-readable label for this metric + or expression. + type: string + metricStat: + description: Structure that defines CloudWatch metric + to be used in predictive scaling policy. You must + specify either expression or metric_stat, but + not both. + properties: + metric: + description: Structure that defines the CloudWatch + metric to return, including the metric name, + namespace, and dimensions. + properties: + dimensions: + description: Dimensions of the metric. + items: + properties: + name: + description: Name of the policy. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + type: object + stat: + description: Statistic of the metrics to return. + type: string + unit: + description: Unit of the metric. + type: string + type: object + returnData: + description: Boolean that indicates whether to return + the timestamps and raw data values of this metric, + the default is true + type: boolean + type: object + type: array + namespace: + description: Namespace of the metric. + type: string + statistic: + description: Statistic of the metric. + type: string + unit: + description: Unit of the metric. + type: string + type: object + disableScaleIn: + description: Whether scale in by the target tracking policy + is disabled. + type: boolean + predefinedMetricSpecification: + description: Predefined metric. Conflicts with customized_metric_specification. + properties: + predefinedMetricType: + description: Describes a scaling metric for a predictive + scaling policy. Valid values are ASGAverageCPUUtilization, + ASGAverageNetworkIn, ASGAverageNetworkOut, or ALBRequestCountPerTarget. + type: string + resourceLabel: + description: Label that uniquely identifies a specific + Application Load Balancer target group from which to + determine the request count served by your Auto Scaling + group. You create the resource label by appending the + final portion of the load balancer ARN and the final + portion of the target group ARN into a single value, + separated by a forward slash (/). Refer to PredefinedMetricSpecification + for more information. + type: string + type: object + targetValue: + description: Target value for the metric. + type: number + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: PolicyStatus defines the observed state of Policy. + properties: + atProvider: + properties: + adjustmentType: + description: Whether the adjustment is an absolute number or a + percentage of the current capacity. Valid values are ChangeInCapacity, + ExactCapacity, and PercentChangeInCapacity. + type: string + arn: + description: ARN assigned by AWS to the scaling policy. + type: string + autoscalingGroupName: + description: Name of the autoscaling group. + type: string + cooldown: + description: Amount of time, in seconds, after a scaling activity + completes and before the next scaling activity can start. + type: number + enabled: + description: 'Whether the scaling policy is enabled or disabled. + Default: true.' + type: boolean + estimatedInstanceWarmup: + description: Estimated time, in seconds, until a newly launched + instance will contribute CloudWatch metrics. Without a value, + AWS will default to the group's specified cooldown period. + type: number + id: + description: Short name for the metric used in predictive scaling + policy. + type: string + metricAggregationType: + description: Aggregation type for the policy's metrics. Valid + values are "Minimum", "Maximum", and "Average". Without a value, + AWS will treat the aggregation type as "Average". + type: string + minAdjustmentMagnitude: + description: Minimum value to scale by when adjustment_type is + set to PercentChangeInCapacity. + type: number + policyType: + description: Policy type, either "SimpleScaling", "StepScaling", + "TargetTrackingScaling", or "PredictiveScaling". If this value + isn't provided, AWS will default to "SimpleScaling." + type: string + predictiveScalingConfiguration: + description: Predictive scaling policy configuration to use with + Amazon EC2 Auto Scaling. + properties: + maxCapacityBreachBehavior: + description: Defines the behavior that should be applied if + the forecast capacity approaches or exceeds the maximum + capacity of the Auto Scaling group. Valid values are HonorMaxCapacity + or IncreaseMaxCapacity. Default is HonorMaxCapacity. + type: string + maxCapacityBuffer: + description: Size of the capacity buffer to use when the forecast + capacity is close to or exceeds the maximum capacity. Valid + range is 0 to 100. If set to 0, Amazon EC2 Auto Scaling + may scale capacity higher than the maximum capacity to equal + but not exceed forecast capacity. + type: string + metricSpecification: + description: This structure includes the metrics and target + utilization to use for predictive scaling. + properties: + customizedCapacityMetricSpecification: + description: Customized capacity metric specification. + The field is only valid when you use customized_load_metric_specification + properties: + metricDataQueries: + description: List of up to 10 structures that defines + custom capacity metric in predictive scaling policy + items: + properties: + expression: + description: Math expression used on the returned + metric. You must specify either expression + or metric_stat, but not both. + type: string + id: + description: Short name for the metric used + in predictive scaling policy. + type: string + label: + description: Human-readable label for this metric + or expression. + type: string + metricStat: + description: Structure that defines CloudWatch + metric to be used in predictive scaling policy. + You must specify either expression or metric_stat, + but not both. + properties: + metric: + description: Structure that defines the + CloudWatch metric to return, including + the metric name, namespace, and dimensions. + properties: + dimensions: + description: Dimensions of the metric. + items: + properties: + name: + description: Name of the policy. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + type: object + stat: + description: Statistic of the metrics to + return. + type: string + unit: + description: Unit of the metric. + type: string + type: object + returnData: + description: Boolean that indicates whether + to return the timestamps and raw data values + of this metric, the default is true + type: boolean + type: object + type: array + type: object + customizedLoadMetricSpecification: + description: Customized load metric specification. + properties: + metricDataQueries: + description: List of up to 10 structures that defines + custom load metric in predictive scaling policy + items: + properties: + expression: + description: Math expression used on the returned + metric. You must specify either expression + or metric_stat, but not both. + type: string + id: + description: Short name for the metric used + in predictive scaling policy. + type: string + label: + description: Human-readable label for this metric + or expression. + type: string + metricStat: + description: Structure that defines CloudWatch + metric to be used in predictive scaling policy. + You must specify either expression or metric_stat, + but not both. + properties: + metric: + description: Structure that defines the + CloudWatch metric to return, including + the metric name, namespace, and dimensions. + properties: + dimensions: + description: Dimensions of the metric. + items: + properties: + name: + description: Name of the policy. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + type: object + stat: + description: Statistic of the metrics to + return. + type: string + unit: + description: Unit of the metric. + type: string + type: object + returnData: + description: Boolean that indicates whether + to return the timestamps and raw data values + of this metric, the default is true + type: boolean + type: object + type: array + type: object + customizedScalingMetricSpecification: + description: Customized scaling metric specification. + properties: + metricDataQueries: + description: List of up to 10 structures that defines + custom scaling metric in predictive scaling policy + items: + properties: + expression: + description: Math expression used on the returned + metric. You must specify either expression + or metric_stat, but not both. + type: string + id: + description: Short name for the metric used + in predictive scaling policy. + type: string + label: + description: Human-readable label for this metric + or expression. + type: string + metricStat: + description: Structure that defines CloudWatch + metric to be used in predictive scaling policy. + You must specify either expression or metric_stat, + but not both. + properties: + metric: + description: Structure that defines the + CloudWatch metric to return, including + the metric name, namespace, and dimensions. + properties: + dimensions: + description: Dimensions of the metric. + items: + properties: + name: + description: Name of the policy. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + type: object + stat: + description: Statistic of the metrics to + return. + type: string + unit: + description: Unit of the metric. + type: string + type: object + returnData: + description: Boolean that indicates whether + to return the timestamps and raw data values + of this metric, the default is true + type: boolean + type: object + type: array + type: object + predefinedLoadMetricSpecification: + description: Predefined load metric specification. + properties: + predefinedMetricType: + description: Metric type. Valid values are ASGTotalCPUUtilization, + ASGTotalNetworkIn, ASGTotalNetworkOut, or ALBTargetGroupRequestCount. + type: string + resourceLabel: + description: Label that uniquely identifies a specific + Application Load Balancer target group from which + to determine the request count served by your Auto + Scaling group. You create the resource label by + appending the final portion of the load balancer + ARN and the final portion of the target group ARN + into a single value, separated by a forward slash + (/). Refer to PredefinedMetricSpecification for + more information. + type: string + type: object + predefinedMetricPairSpecification: + description: Metric pair specification from which Amazon + EC2 Auto Scaling determines the appropriate scaling + metric and load metric to use. + properties: + predefinedMetricType: + description: 'Which metrics to use. There are two + different types of metrics for each metric type: + one is a load metric and one is a scaling metric. + For example, if the metric type is ASGCPUUtilization, + the Auto Scaling group''s total CPU metric is used + as the load metric, and the average CPU metric is + used for the scaling metric. Valid values are ASGCPUUtilization, + ASGNetworkIn, ASGNetworkOut, or ALBRequestCount.' + type: string + resourceLabel: + description: Label that uniquely identifies a specific + Application Load Balancer target group from which + to determine the request count served by your Auto + Scaling group. You create the resource label by + appending the final portion of the load balancer + ARN and the final portion of the target group ARN + into a single value, separated by a forward slash + (/). Refer to PredefinedMetricSpecification for + more information. + type: string + type: object + predefinedScalingMetricSpecification: + description: Predefined scaling metric specification. + properties: + predefinedMetricType: + description: Describes a scaling metric for a predictive + scaling policy. Valid values are ASGAverageCPUUtilization, + ASGAverageNetworkIn, ASGAverageNetworkOut, or ALBRequestCountPerTarget. + type: string + resourceLabel: + description: Label that uniquely identifies a specific + Application Load Balancer target group from which + to determine the request count served by your Auto + Scaling group. You create the resource label by + appending the final portion of the load balancer + ARN and the final portion of the target group ARN + into a single value, separated by a forward slash + (/). Refer to PredefinedMetricSpecification for + more information. + type: string + type: object + targetValue: + description: Target value for the metric. + type: number + type: object + mode: + description: Predictive scaling mode. Valid values are ForecastAndScale + and ForecastOnly. Default is ForecastOnly. + type: string + schedulingBufferTime: + description: Amount of time, in seconds, by which the instance + launch time can be advanced. Minimum is 0. + type: string + type: object + scalingAdjustment: + description: Number of instances by which to scale. adjustment_type + determines the interpretation of this number (e.g., as an absolute + number or as a percentage of the existing Auto Scaling group + size). A positive increment adds to the current capacity and + a negative value removes from the current capacity. + type: number + stepAdjustment: + description: |- + Set of adjustments that manage + group scaling. These have the following structure: + items: + properties: + metricIntervalLowerBound: + description: |- + Lower bound for the + difference between the alarm threshold and the CloudWatch metric. + Without a value, AWS will treat this bound as negative infinity. + type: string + metricIntervalUpperBound: + description: |- + Upper bound for the + difference between the alarm threshold and the CloudWatch metric. + Without a value, AWS will treat this bound as positive infinity. The upper bound + must be greater than the lower bound. + type: string + scalingAdjustment: + description: Number of instances by which to scale. adjustment_type + determines the interpretation of this number (e.g., as + an absolute number or as a percentage of the existing + Auto Scaling group size). A positive increment adds to + the current capacity and a negative value removes from + the current capacity. + type: number + type: object + type: array + targetTrackingConfiguration: + description: 'Target tracking policy. These have the following + structure:' + properties: + customizedMetricSpecification: + description: Customized metric. Conflicts with predefined_metric_specification. + properties: + metricDimension: + description: Dimensions of the metric. + items: + properties: + name: + description: Name of the policy. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + metrics: + description: Metrics to include, as a metric data query. + items: + properties: + expression: + description: Math expression used on the returned + metric. You must specify either expression or + metric_stat, but not both. + type: string + id: + description: Short name for the metric used in predictive + scaling policy. + type: string + label: + description: Human-readable label for this metric + or expression. + type: string + metricStat: + description: Structure that defines CloudWatch metric + to be used in predictive scaling policy. You must + specify either expression or metric_stat, but + not both. + properties: + metric: + description: Structure that defines the CloudWatch + metric to return, including the metric name, + namespace, and dimensions. + properties: + dimensions: + description: Dimensions of the metric. + items: + properties: + name: + description: Name of the policy. + type: string + value: + description: Value of the dimension. + type: string + type: object + type: array + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + type: object + stat: + description: Statistic of the metrics to return. + type: string + unit: + description: Unit of the metric. + type: string + type: object + returnData: + description: Boolean that indicates whether to return + the timestamps and raw data values of this metric, + the default is true + type: boolean + type: object + type: array + namespace: + description: Namespace of the metric. + type: string + statistic: + description: Statistic of the metric. + type: string + unit: + description: Unit of the metric. + type: string + type: object + disableScaleIn: + description: Whether scale in by the target tracking policy + is disabled. + type: boolean + predefinedMetricSpecification: + description: Predefined metric. Conflicts with customized_metric_specification. + properties: + predefinedMetricType: + description: Describes a scaling metric for a predictive + scaling policy. Valid values are ASGAverageCPUUtilization, + ASGAverageNetworkIn, ASGAverageNetworkOut, or ALBRequestCountPerTarget. + type: string + resourceLabel: + description: Label that uniquely identifies a specific + Application Load Balancer target group from which to + determine the request count served by your Auto Scaling + group. You create the resource label by appending the + final portion of the load balancer ARN and the final + portion of the target group ARN into a single value, + separated by a forward slash (/). Refer to PredefinedMetricSpecification + for more information. + type: string + type: object + targetValue: + description: Target value for the metric. + type: number + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/autoscalingplans.aws.upbound.io_scalingplans.yaml b/package/crds/autoscalingplans.aws.upbound.io_scalingplans.yaml index 59bc70a9ff..b04445d3ee 100644 --- a/package/crds/autoscalingplans.aws.upbound.io_scalingplans.yaml +++ b/package/crds/autoscalingplans.aws.upbound.io_scalingplans.yaml @@ -975,3 +975,930 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ScalingPlan is the Schema for the ScalingPlans API. Manages an + AWS Auto Scaling scaling plan. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ScalingPlanSpec defines the desired state of ScalingPlan + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + applicationSource: + description: CloudFormation stack or set of tags. You can create + one scaling plan per application source. + properties: + cloudformationStackArn: + description: ARN of a AWS CloudFormation stack. + type: string + tagFilter: + description: Set of tags. + items: + properties: + key: + description: Tag key. + type: string + values: + description: Tag values. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + name: + description: Name of the scaling plan. Names cannot contain vertical + bars, colons, or forward slashes. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + scalingInstruction: + description: Scaling instructions. More details can be found in + the AWS Auto Scaling API Reference. + items: + properties: + customizedLoadMetricSpecification: + description: |- + Customized load metric to use for predictive scaling. You must specify either customized_load_metric_specification or predefined_load_metric_specification when configuring predictive scaling. + More details can be found in the AWS Auto Scaling API Reference. + properties: + dimensions: + additionalProperties: + type: string + description: Dimensions of the metric. + type: object + x-kubernetes-map-type: granular + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + statistic: + description: Statistic of the metric. Currently, the + value must always be Sum. + type: string + unit: + description: Unit of the metric. + type: string + type: object + disableDynamicScaling: + description: Boolean controlling whether dynamic scaling + by AWS Auto Scaling is disabled. Defaults to false. + type: boolean + maxCapacity: + description: Maximum capacity of the resource. The exception + to this upper limit is if you specify a non-default setting + for predictive_scaling_max_capacity_behavior. + type: number + minCapacity: + description: Minimum capacity of the resource. + type: number + predefinedLoadMetricSpecification: + description: |- + Predefined load metric to use for predictive scaling. You must specify either predefined_load_metric_specification or customized_load_metric_specification when configuring predictive scaling. + More details can be found in the AWS Auto Scaling API Reference. + properties: + predefinedLoadMetricType: + description: 'Metric type. Valid values: ALBTargetGroupRequestCount, + ASGTotalCPUUtilization, ASGTotalNetworkIn, ASGTotalNetworkOut.' + type: string + resourceLabel: + description: Identifies the resource associated with + the metric type. + type: string + type: object + predictiveScalingMaxCapacityBehavior: + description: |- + Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity specified for the resource. + Valid values: SetForecastCapacityToMaxCapacity, SetMaxCapacityAboveForecastCapacity, SetMaxCapacityToForecastCapacity. + type: string + predictiveScalingMaxCapacityBuffer: + description: Size of the capacity buffer to use when the + forecast capacity is close to or exceeds the maximum capacity. + type: number + predictiveScalingMode: + description: 'Predictive scaling mode. Valid values: ForecastAndScale, + ForecastOnly.' + type: string + resourceId: + description: ID of the resource. This string consists of + the resource type and unique identifier. + type: string + scalableDimension: + description: 'Scalable dimension associated with the resource. + Valid values: autoscaling:autoScalingGroup:DesiredCapacity, + dynamodb:index:ReadCapacityUnits, dynamodb:index:WriteCapacityUnits, + dynamodb:table:ReadCapacityUnits, dynamodb:table:WriteCapacityUnits, + ecs:service:DesiredCount, ec2:spot-fleet-request:TargetCapacity, + rds:cluster:ReadReplicaCount.' + type: string + scalingPolicyUpdateBehavior: + description: 'Controls whether a resource''s externally + created scaling policies are kept or replaced. Valid values: + KeepExternalPolicies, ReplaceExternalPolicies. Defaults + to KeepExternalPolicies.' + type: string + scheduledActionBufferTime: + description: Amount of time, in seconds, to buffer the run + time of scheduled scaling actions when scaling out. + type: number + serviceNamespace: + description: 'Namespace of the AWS service. Valid values: + autoscaling, dynamodb, ecs, ec2, rds.' + type: string + targetTrackingConfiguration: + description: |- + Structure that defines new target tracking configurations. Each of these structures includes a specific scaling metric and a target value for the metric, along with various parameters to use with dynamic scaling. + More details can be found in the AWS Auto Scaling API Reference. + items: + properties: + customizedScalingMetricSpecification: + description: |- + Customized metric. You can specify either customized_scaling_metric_specification or predefined_scaling_metric_specification. + More details can be found in the AWS Auto Scaling API Reference. + properties: + dimensions: + additionalProperties: + type: string + description: Dimensions of the metric. + type: object + x-kubernetes-map-type: granular + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + statistic: + description: Statistic of the metric. Currently, + the value must always be Sum. + type: string + unit: + description: Unit of the metric. + type: string + type: object + disableScaleIn: + description: Boolean indicating whether scale in by + the target tracking scaling policy is disabled. + Defaults to false. + type: boolean + estimatedInstanceWarmup: + description: |- + Estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. + This value is used only if the resource is an Auto Scaling group. + type: number + predefinedScalingMetricSpecification: + description: |- + Predefined metric. You can specify either predefined_scaling_metric_specification or customized_scaling_metric_specification. + More details can be found in the AWS Auto Scaling API Reference. + properties: + predefinedScalingMetricType: + description: 'Metric type. Valid values: ALBRequestCountPerTarget, + ASGAverageCPUUtilization, ASGAverageNetworkIn, + ASGAverageNetworkOut, DynamoDBReadCapacityUtilization, + DynamoDBWriteCapacityUtilization, ECSServiceAverageCPUUtilization, + ECSServiceAverageMemoryUtilization, EC2SpotFleetRequestAverageCPUUtilization, + EC2SpotFleetRequestAverageNetworkIn, EC2SpotFleetRequestAverageNetworkOut, + RDSReaderAverageCPUUtilization, RDSReaderAverageDatabaseConnections.' + type: string + resourceLabel: + description: Identifies the resource associated + with the metric type. + type: string + type: object + scaleInCooldown: + description: |- + Amount of time, in seconds, after a scale in activity completes before another scale in activity can start. + This value is not used if the scalable resource is an Auto Scaling group. + type: number + scaleOutCooldown: + description: |- + Amount of time, in seconds, after a scale-out activity completes before another scale-out activity can start. + This value is not used if the scalable resource is an Auto Scaling group. + type: number + targetValue: + description: Target value for the metric. + type: number + type: object + type: array + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + applicationSource: + description: CloudFormation stack or set of tags. You can create + one scaling plan per application source. + properties: + cloudformationStackArn: + description: ARN of a AWS CloudFormation stack. + type: string + tagFilter: + description: Set of tags. + items: + properties: + key: + description: Tag key. + type: string + values: + description: Tag values. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + name: + description: Name of the scaling plan. Names cannot contain vertical + bars, colons, or forward slashes. + type: string + scalingInstruction: + description: Scaling instructions. More details can be found in + the AWS Auto Scaling API Reference. + items: + properties: + customizedLoadMetricSpecification: + description: |- + Customized load metric to use for predictive scaling. You must specify either customized_load_metric_specification or predefined_load_metric_specification when configuring predictive scaling. + More details can be found in the AWS Auto Scaling API Reference. + properties: + dimensions: + additionalProperties: + type: string + description: Dimensions of the metric. + type: object + x-kubernetes-map-type: granular + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + statistic: + description: Statistic of the metric. Currently, the + value must always be Sum. + type: string + unit: + description: Unit of the metric. + type: string + type: object + disableDynamicScaling: + description: Boolean controlling whether dynamic scaling + by AWS Auto Scaling is disabled. Defaults to false. + type: boolean + maxCapacity: + description: Maximum capacity of the resource. The exception + to this upper limit is if you specify a non-default setting + for predictive_scaling_max_capacity_behavior. + type: number + minCapacity: + description: Minimum capacity of the resource. + type: number + predefinedLoadMetricSpecification: + description: |- + Predefined load metric to use for predictive scaling. You must specify either predefined_load_metric_specification or customized_load_metric_specification when configuring predictive scaling. + More details can be found in the AWS Auto Scaling API Reference. + properties: + predefinedLoadMetricType: + description: 'Metric type. Valid values: ALBTargetGroupRequestCount, + ASGTotalCPUUtilization, ASGTotalNetworkIn, ASGTotalNetworkOut.' + type: string + resourceLabel: + description: Identifies the resource associated with + the metric type. + type: string + type: object + predictiveScalingMaxCapacityBehavior: + description: |- + Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity specified for the resource. + Valid values: SetForecastCapacityToMaxCapacity, SetMaxCapacityAboveForecastCapacity, SetMaxCapacityToForecastCapacity. + type: string + predictiveScalingMaxCapacityBuffer: + description: Size of the capacity buffer to use when the + forecast capacity is close to or exceeds the maximum capacity. + type: number + predictiveScalingMode: + description: 'Predictive scaling mode. Valid values: ForecastAndScale, + ForecastOnly.' + type: string + resourceId: + description: ID of the resource. This string consists of + the resource type and unique identifier. + type: string + scalableDimension: + description: 'Scalable dimension associated with the resource. + Valid values: autoscaling:autoScalingGroup:DesiredCapacity, + dynamodb:index:ReadCapacityUnits, dynamodb:index:WriteCapacityUnits, + dynamodb:table:ReadCapacityUnits, dynamodb:table:WriteCapacityUnits, + ecs:service:DesiredCount, ec2:spot-fleet-request:TargetCapacity, + rds:cluster:ReadReplicaCount.' + type: string + scalingPolicyUpdateBehavior: + description: 'Controls whether a resource''s externally + created scaling policies are kept or replaced. Valid values: + KeepExternalPolicies, ReplaceExternalPolicies. Defaults + to KeepExternalPolicies.' + type: string + scheduledActionBufferTime: + description: Amount of time, in seconds, to buffer the run + time of scheduled scaling actions when scaling out. + type: number + serviceNamespace: + description: 'Namespace of the AWS service. Valid values: + autoscaling, dynamodb, ecs, ec2, rds.' + type: string + targetTrackingConfiguration: + description: |- + Structure that defines new target tracking configurations. Each of these structures includes a specific scaling metric and a target value for the metric, along with various parameters to use with dynamic scaling. + More details can be found in the AWS Auto Scaling API Reference. + items: + properties: + customizedScalingMetricSpecification: + description: |- + Customized metric. You can specify either customized_scaling_metric_specification or predefined_scaling_metric_specification. + More details can be found in the AWS Auto Scaling API Reference. + properties: + dimensions: + additionalProperties: + type: string + description: Dimensions of the metric. + type: object + x-kubernetes-map-type: granular + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + statistic: + description: Statistic of the metric. Currently, + the value must always be Sum. + type: string + unit: + description: Unit of the metric. + type: string + type: object + disableScaleIn: + description: Boolean indicating whether scale in by + the target tracking scaling policy is disabled. + Defaults to false. + type: boolean + estimatedInstanceWarmup: + description: |- + Estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. + This value is used only if the resource is an Auto Scaling group. + type: number + predefinedScalingMetricSpecification: + description: |- + Predefined metric. You can specify either predefined_scaling_metric_specification or customized_scaling_metric_specification. + More details can be found in the AWS Auto Scaling API Reference. + properties: + predefinedScalingMetricType: + description: 'Metric type. Valid values: ALBRequestCountPerTarget, + ASGAverageCPUUtilization, ASGAverageNetworkIn, + ASGAverageNetworkOut, DynamoDBReadCapacityUtilization, + DynamoDBWriteCapacityUtilization, ECSServiceAverageCPUUtilization, + ECSServiceAverageMemoryUtilization, EC2SpotFleetRequestAverageCPUUtilization, + EC2SpotFleetRequestAverageNetworkIn, EC2SpotFleetRequestAverageNetworkOut, + RDSReaderAverageCPUUtilization, RDSReaderAverageDatabaseConnections.' + type: string + resourceLabel: + description: Identifies the resource associated + with the metric type. + type: string + type: object + scaleInCooldown: + description: |- + Amount of time, in seconds, after a scale in activity completes before another scale in activity can start. + This value is not used if the scalable resource is an Auto Scaling group. + type: number + scaleOutCooldown: + description: |- + Amount of time, in seconds, after a scale-out activity completes before another scale-out activity can start. + This value is not used if the scalable resource is an Auto Scaling group. + type: number + targetValue: + description: Target value for the metric. + type: number + type: object + type: array + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.applicationSource is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.applicationSource) + || (has(self.initProvider) && has(self.initProvider.applicationSource))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.scalingInstruction is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scalingInstruction) + || (has(self.initProvider) && has(self.initProvider.scalingInstruction))' + status: + description: ScalingPlanStatus defines the observed state of ScalingPlan. + properties: + atProvider: + properties: + applicationSource: + description: CloudFormation stack or set of tags. You can create + one scaling plan per application source. + properties: + cloudformationStackArn: + description: ARN of a AWS CloudFormation stack. + type: string + tagFilter: + description: Set of tags. + items: + properties: + key: + description: Tag key. + type: string + values: + description: Tag values. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + id: + description: Scaling plan identifier. + type: string + name: + description: Name of the scaling plan. Names cannot contain vertical + bars, colons, or forward slashes. + type: string + scalingInstruction: + description: Scaling instructions. More details can be found in + the AWS Auto Scaling API Reference. + items: + properties: + customizedLoadMetricSpecification: + description: |- + Customized load metric to use for predictive scaling. You must specify either customized_load_metric_specification or predefined_load_metric_specification when configuring predictive scaling. + More details can be found in the AWS Auto Scaling API Reference. + properties: + dimensions: + additionalProperties: + type: string + description: Dimensions of the metric. + type: object + x-kubernetes-map-type: granular + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + statistic: + description: Statistic of the metric. Currently, the + value must always be Sum. + type: string + unit: + description: Unit of the metric. + type: string + type: object + disableDynamicScaling: + description: Boolean controlling whether dynamic scaling + by AWS Auto Scaling is disabled. Defaults to false. + type: boolean + maxCapacity: + description: Maximum capacity of the resource. The exception + to this upper limit is if you specify a non-default setting + for predictive_scaling_max_capacity_behavior. + type: number + minCapacity: + description: Minimum capacity of the resource. + type: number + predefinedLoadMetricSpecification: + description: |- + Predefined load metric to use for predictive scaling. You must specify either predefined_load_metric_specification or customized_load_metric_specification when configuring predictive scaling. + More details can be found in the AWS Auto Scaling API Reference. + properties: + predefinedLoadMetricType: + description: 'Metric type. Valid values: ALBTargetGroupRequestCount, + ASGTotalCPUUtilization, ASGTotalNetworkIn, ASGTotalNetworkOut.' + type: string + resourceLabel: + description: Identifies the resource associated with + the metric type. + type: string + type: object + predictiveScalingMaxCapacityBehavior: + description: |- + Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity specified for the resource. + Valid values: SetForecastCapacityToMaxCapacity, SetMaxCapacityAboveForecastCapacity, SetMaxCapacityToForecastCapacity. + type: string + predictiveScalingMaxCapacityBuffer: + description: Size of the capacity buffer to use when the + forecast capacity is close to or exceeds the maximum capacity. + type: number + predictiveScalingMode: + description: 'Predictive scaling mode. Valid values: ForecastAndScale, + ForecastOnly.' + type: string + resourceId: + description: ID of the resource. This string consists of + the resource type and unique identifier. + type: string + scalableDimension: + description: 'Scalable dimension associated with the resource. + Valid values: autoscaling:autoScalingGroup:DesiredCapacity, + dynamodb:index:ReadCapacityUnits, dynamodb:index:WriteCapacityUnits, + dynamodb:table:ReadCapacityUnits, dynamodb:table:WriteCapacityUnits, + ecs:service:DesiredCount, ec2:spot-fleet-request:TargetCapacity, + rds:cluster:ReadReplicaCount.' + type: string + scalingPolicyUpdateBehavior: + description: 'Controls whether a resource''s externally + created scaling policies are kept or replaced. Valid values: + KeepExternalPolicies, ReplaceExternalPolicies. Defaults + to KeepExternalPolicies.' + type: string + scheduledActionBufferTime: + description: Amount of time, in seconds, to buffer the run + time of scheduled scaling actions when scaling out. + type: number + serviceNamespace: + description: 'Namespace of the AWS service. Valid values: + autoscaling, dynamodb, ecs, ec2, rds.' + type: string + targetTrackingConfiguration: + description: |- + Structure that defines new target tracking configurations. Each of these structures includes a specific scaling metric and a target value for the metric, along with various parameters to use with dynamic scaling. + More details can be found in the AWS Auto Scaling API Reference. + items: + properties: + customizedScalingMetricSpecification: + description: |- + Customized metric. You can specify either customized_scaling_metric_specification or predefined_scaling_metric_specification. + More details can be found in the AWS Auto Scaling API Reference. + properties: + dimensions: + additionalProperties: + type: string + description: Dimensions of the metric. + type: object + x-kubernetes-map-type: granular + metricName: + description: Name of the metric. + type: string + namespace: + description: Namespace of the metric. + type: string + statistic: + description: Statistic of the metric. Currently, + the value must always be Sum. + type: string + unit: + description: Unit of the metric. + type: string + type: object + disableScaleIn: + description: Boolean indicating whether scale in by + the target tracking scaling policy is disabled. + Defaults to false. + type: boolean + estimatedInstanceWarmup: + description: |- + Estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. + This value is used only if the resource is an Auto Scaling group. + type: number + predefinedScalingMetricSpecification: + description: |- + Predefined metric. You can specify either predefined_scaling_metric_specification or customized_scaling_metric_specification. + More details can be found in the AWS Auto Scaling API Reference. + properties: + predefinedScalingMetricType: + description: 'Metric type. Valid values: ALBRequestCountPerTarget, + ASGAverageCPUUtilization, ASGAverageNetworkIn, + ASGAverageNetworkOut, DynamoDBReadCapacityUtilization, + DynamoDBWriteCapacityUtilization, ECSServiceAverageCPUUtilization, + ECSServiceAverageMemoryUtilization, EC2SpotFleetRequestAverageCPUUtilization, + EC2SpotFleetRequestAverageNetworkIn, EC2SpotFleetRequestAverageNetworkOut, + RDSReaderAverageCPUUtilization, RDSReaderAverageDatabaseConnections.' + type: string + resourceLabel: + description: Identifies the resource associated + with the metric type. + type: string + type: object + scaleInCooldown: + description: |- + Amount of time, in seconds, after a scale in activity completes before another scale in activity can start. + This value is not used if the scalable resource is an Auto Scaling group. + type: number + scaleOutCooldown: + description: |- + Amount of time, in seconds, after a scale-out activity completes before another scale-out activity can start. + This value is not used if the scalable resource is an Auto Scaling group. + type: number + targetValue: + description: Target value for the metric. + type: number + type: object + type: array + type: object + type: array + scalingPlanVersion: + description: The version number of the scaling plan. This value + is always 1. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/backup.aws.upbound.io_frameworks.yaml b/package/crds/backup.aws.upbound.io_frameworks.yaml index e22e542ce0..20c9812af3 100644 --- a/package/crds/backup.aws.upbound.io_frameworks.yaml +++ b/package/crds/backup.aws.upbound.io_frameworks.yaml @@ -604,3 +604,583 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Framework is the Schema for the Frameworks API. Provides an AWS + Backup Framework resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FrameworkSpec defines the desired state of Framework + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + control: + description: One or more control blocks that make up the framework. + Each control in the list has a name, input parameters, and scope. + Detailed below. + items: + properties: + inputParameter: + description: 'One or more input parameter blocks. An example + of a control with two parameters is: "backup plan frequency + is at least daily and the retention period is at least + 1 year". The first parameter is daily. The second parameter + is 1 year. Detailed below.' + items: + properties: + name: + description: The unique name of the framework. The + name must be between 1 and 256 characters, starting + with a letter, and consisting of letters, numbers, + and underscores. + type: string + value: + description: The value of parameter, for example, + hourly. + type: string + type: object + type: array + name: + description: The unique name of the framework. The name + must be between 1 and 256 characters, starting with a + letter, and consisting of letters, numbers, and underscores. + type: string + scope: + description: 'The scope of a control. The control scope + defines what the control will evaluate. Three examples + of control scopes are: a specific backup plan, all backup + plans with a specific tag, or all backup plans. Detailed + below.' + properties: + complianceResourceIds: + description: The ID of the only AWS resource that you + want your control scope to contain. Minimum number + of 1 item. Maximum number of 100 items. + items: + type: string + type: array + x-kubernetes-list-type: set + complianceResourceTypes: + description: Describes whether the control scope includes + one or more types of resources, such as EFS or RDS. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + type: object + type: array + description: + description: The description of the framework with a maximum of + 1,024 characters + type: string + name: + description: The unique name of the framework. The name must be + between 1 and 256 characters, starting with a letter, and consisting + of letters, numbers, and underscores. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + control: + description: One or more control blocks that make up the framework. + Each control in the list has a name, input parameters, and scope. + Detailed below. + items: + properties: + inputParameter: + description: 'One or more input parameter blocks. An example + of a control with two parameters is: "backup plan frequency + is at least daily and the retention period is at least + 1 year". The first parameter is daily. The second parameter + is 1 year. Detailed below.' + items: + properties: + name: + description: The unique name of the framework. The + name must be between 1 and 256 characters, starting + with a letter, and consisting of letters, numbers, + and underscores. + type: string + value: + description: The value of parameter, for example, + hourly. + type: string + type: object + type: array + name: + description: The unique name of the framework. The name + must be between 1 and 256 characters, starting with a + letter, and consisting of letters, numbers, and underscores. + type: string + scope: + description: 'The scope of a control. The control scope + defines what the control will evaluate. Three examples + of control scopes are: a specific backup plan, all backup + plans with a specific tag, or all backup plans. Detailed + below.' + properties: + complianceResourceIds: + description: The ID of the only AWS resource that you + want your control scope to contain. Minimum number + of 1 item. Maximum number of 100 items. + items: + type: string + type: array + x-kubernetes-list-type: set + complianceResourceTypes: + description: Describes whether the control scope includes + one or more types of resources, such as EFS or RDS. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + type: object + type: array + description: + description: The description of the framework with a maximum of + 1,024 characters + type: string + name: + description: The unique name of the framework. The name must be + between 1 and 256 characters, starting with a letter, and consisting + of letters, numbers, and underscores. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.control is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.control) + || (has(self.initProvider) && has(self.initProvider.control))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: FrameworkStatus defines the observed state of Framework. + properties: + atProvider: + properties: + arn: + description: The ARN of the backup framework. + type: string + control: + description: One or more control blocks that make up the framework. + Each control in the list has a name, input parameters, and scope. + Detailed below. + items: + properties: + inputParameter: + description: 'One or more input parameter blocks. An example + of a control with two parameters is: "backup plan frequency + is at least daily and the retention period is at least + 1 year". The first parameter is daily. The second parameter + is 1 year. Detailed below.' + items: + properties: + name: + description: The unique name of the framework. The + name must be between 1 and 256 characters, starting + with a letter, and consisting of letters, numbers, + and underscores. + type: string + value: + description: The value of parameter, for example, + hourly. + type: string + type: object + type: array + name: + description: The unique name of the framework. The name + must be between 1 and 256 characters, starting with a + letter, and consisting of letters, numbers, and underscores. + type: string + scope: + description: 'The scope of a control. The control scope + defines what the control will evaluate. Three examples + of control scopes are: a specific backup plan, all backup + plans with a specific tag, or all backup plans. Detailed + below.' + properties: + complianceResourceIds: + description: The ID of the only AWS resource that you + want your control scope to contain. Minimum number + of 1 item. Maximum number of 100 items. + items: + type: string + type: array + x-kubernetes-list-type: set + complianceResourceTypes: + description: Describes whether the control scope includes + one or more types of resources, such as EFS or RDS. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + type: object + type: array + creationTime: + description: The date and time that a framework is created, in + Unix format and Coordinated Universal Time (UTC). + type: string + deploymentStatus: + description: 'The deployment status of a framework. The statuses + are: CREATE_IN_PROGRESS | UPDATE_IN_PROGRESS | DELETE_IN_PROGRESS + | COMPLETED | FAILED.' + type: string + description: + description: The description of the framework with a maximum of + 1,024 characters + type: string + id: + description: The id of the backup framework. + type: string + name: + description: The unique name of the framework. The name must be + between 1 and 256 characters, starting with a letter, and consisting + of letters, numbers, and underscores. + type: string + status: + description: A framework consists of one or more controls. Each + control governs a resource, such as backup plans, backup selections, + backup vaults, or recovery points. You can also turn AWS Config + recording on or off for each resource. For more information + refer to the AWS documentation for Framework Status + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/backup.aws.upbound.io_plans.yaml b/package/crds/backup.aws.upbound.io_plans.yaml index 64db346338..6d1e9cd5a0 100644 --- a/package/crds/backup.aws.upbound.io_plans.yaml +++ b/package/crds/backup.aws.upbound.io_plans.yaml @@ -882,3 +882,855 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Plan is the Schema for the Plans API. Provides an AWS Backup + plan resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PlanSpec defines the desired state of Plan + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + advancedBackupSetting: + description: An object that specifies backup options for each + resource type. + items: + properties: + backupOptions: + additionalProperties: + type: string + description: Specifies the backup option for a selected + resource. This option is only available for Windows VSS + backup jobs. Set to { WindowsVSS = "enabled" } to enable + Windows VSS backup option and create a VSS Windows backup. + type: object + x-kubernetes-map-type: granular + resourceType: + description: 'The type of AWS resource to be backed up. + For VSS Windows backups, the only supported resource type + is Amazon EC2. Valid values: EC2.' + type: string + type: object + type: array + name: + description: The display name of a backup plan. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + rule: + description: A rule object that specifies a scheduled task that + is used to back up a selection of resources. + items: + properties: + completionWindow: + description: The amount of time in minutes AWS Backup attempts + a backup before canceling the job and returning an error. + type: number + copyAction: + description: Configuration block(s) with copy operation + settings. Detailed below. + items: + properties: + destinationVaultArn: + description: An Amazon Resource Name (ARN) that uniquely + identifies the destination backup vault for the + copied backup. + type: string + lifecycle: + description: The lifecycle defines when a protected + resource is transitioned to cold storage and when + it expires. Fields documented below. + properties: + coldStorageAfter: + description: Specifies the number of days after + creation that a recovery point is moved to cold + storage. + type: number + deleteAfter: + description: Specifies the number of days after + creation that a recovery point is deleted. Must + be 90 days greater than cold_storage_after. + type: number + optInToArchiveForSupportedResources: + description: This setting will instruct your backup + plan to transition supported resources to archive + (cold) storage tier in accordance with your + lifecycle settings. + type: boolean + type: object + type: object + type: array + enableContinuousBackup: + description: Enable continuous backups for supported resources. + type: boolean + lifecycle: + description: The lifecycle defines when a protected resource + is transitioned to cold storage and when it expires. Fields + documented below. + properties: + coldStorageAfter: + description: Specifies the number of days after creation + that a recovery point is moved to cold storage. + type: number + deleteAfter: + description: Specifies the number of days after creation + that a recovery point is deleted. Must be 90 days + greater than cold_storage_after. + type: number + optInToArchiveForSupportedResources: + description: This setting will instruct your backup + plan to transition supported resources to archive + (cold) storage tier in accordance with your lifecycle + settings. + type: boolean + type: object + recoveryPointTags: + additionalProperties: + type: string + description: Metadata that you can assign to help organize + the resources that you create. + type: object + x-kubernetes-map-type: granular + ruleName: + description: An display name for a backup rule. + type: string + schedule: + description: A CRON expression specifying when AWS Backup + initiates a backup job. + type: string + startWindow: + description: The amount of time in minutes before beginning + a backup. + type: number + targetVaultName: + description: The name of a logical container where backups + are stored. + type: string + targetVaultNameRef: + description: Reference to a Vault in backup to populate + targetVaultName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetVaultNameSelector: + description: Selector for a Vault in backup to populate + targetVaultName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + advancedBackupSetting: + description: An object that specifies backup options for each + resource type. + items: + properties: + backupOptions: + additionalProperties: + type: string + description: Specifies the backup option for a selected + resource. This option is only available for Windows VSS + backup jobs. Set to { WindowsVSS = "enabled" } to enable + Windows VSS backup option and create a VSS Windows backup. + type: object + x-kubernetes-map-type: granular + resourceType: + description: 'The type of AWS resource to be backed up. + For VSS Windows backups, the only supported resource type + is Amazon EC2. Valid values: EC2.' + type: string + type: object + type: array + name: + description: The display name of a backup plan. + type: string + rule: + description: A rule object that specifies a scheduled task that + is used to back up a selection of resources. + items: + properties: + completionWindow: + description: The amount of time in minutes AWS Backup attempts + a backup before canceling the job and returning an error. + type: number + copyAction: + description: Configuration block(s) with copy operation + settings. Detailed below. + items: + properties: + destinationVaultArn: + description: An Amazon Resource Name (ARN) that uniquely + identifies the destination backup vault for the + copied backup. + type: string + lifecycle: + description: The lifecycle defines when a protected + resource is transitioned to cold storage and when + it expires. Fields documented below. + properties: + coldStorageAfter: + description: Specifies the number of days after + creation that a recovery point is moved to cold + storage. + type: number + deleteAfter: + description: Specifies the number of days after + creation that a recovery point is deleted. Must + be 90 days greater than cold_storage_after. + type: number + optInToArchiveForSupportedResources: + description: This setting will instruct your backup + plan to transition supported resources to archive + (cold) storage tier in accordance with your + lifecycle settings. + type: boolean + type: object + type: object + type: array + enableContinuousBackup: + description: Enable continuous backups for supported resources. + type: boolean + lifecycle: + description: The lifecycle defines when a protected resource + is transitioned to cold storage and when it expires. Fields + documented below. + properties: + coldStorageAfter: + description: Specifies the number of days after creation + that a recovery point is moved to cold storage. + type: number + deleteAfter: + description: Specifies the number of days after creation + that a recovery point is deleted. Must be 90 days + greater than cold_storage_after. + type: number + optInToArchiveForSupportedResources: + description: This setting will instruct your backup + plan to transition supported resources to archive + (cold) storage tier in accordance with your lifecycle + settings. + type: boolean + type: object + recoveryPointTags: + additionalProperties: + type: string + description: Metadata that you can assign to help organize + the resources that you create. + type: object + x-kubernetes-map-type: granular + ruleName: + description: An display name for a backup rule. + type: string + schedule: + description: A CRON expression specifying when AWS Backup + initiates a backup job. + type: string + startWindow: + description: The amount of time in minutes before beginning + a backup. + type: number + targetVaultName: + description: The name of a logical container where backups + are stored. + type: string + targetVaultNameRef: + description: Reference to a Vault in backup to populate + targetVaultName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetVaultNameSelector: + description: Selector for a Vault in backup to populate + targetVaultName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.rule is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.rule) + || (has(self.initProvider) && has(self.initProvider.rule))' + status: + description: PlanStatus defines the observed state of Plan. + properties: + atProvider: + properties: + advancedBackupSetting: + description: An object that specifies backup options for each + resource type. + items: + properties: + backupOptions: + additionalProperties: + type: string + description: Specifies the backup option for a selected + resource. This option is only available for Windows VSS + backup jobs. Set to { WindowsVSS = "enabled" } to enable + Windows VSS backup option and create a VSS Windows backup. + type: object + x-kubernetes-map-type: granular + resourceType: + description: 'The type of AWS resource to be backed up. + For VSS Windows backups, the only supported resource type + is Amazon EC2. Valid values: EC2.' + type: string + type: object + type: array + arn: + description: The ARN of the backup plan. + type: string + id: + description: The id of the backup plan. + type: string + name: + description: The display name of a backup plan. + type: string + rule: + description: A rule object that specifies a scheduled task that + is used to back up a selection of resources. + items: + properties: + completionWindow: + description: The amount of time in minutes AWS Backup attempts + a backup before canceling the job and returning an error. + type: number + copyAction: + description: Configuration block(s) with copy operation + settings. Detailed below. + items: + properties: + destinationVaultArn: + description: An Amazon Resource Name (ARN) that uniquely + identifies the destination backup vault for the + copied backup. + type: string + lifecycle: + description: The lifecycle defines when a protected + resource is transitioned to cold storage and when + it expires. Fields documented below. + properties: + coldStorageAfter: + description: Specifies the number of days after + creation that a recovery point is moved to cold + storage. + type: number + deleteAfter: + description: Specifies the number of days after + creation that a recovery point is deleted. Must + be 90 days greater than cold_storage_after. + type: number + optInToArchiveForSupportedResources: + description: This setting will instruct your backup + plan to transition supported resources to archive + (cold) storage tier in accordance with your + lifecycle settings. + type: boolean + type: object + type: object + type: array + enableContinuousBackup: + description: Enable continuous backups for supported resources. + type: boolean + lifecycle: + description: The lifecycle defines when a protected resource + is transitioned to cold storage and when it expires. Fields + documented below. + properties: + coldStorageAfter: + description: Specifies the number of days after creation + that a recovery point is moved to cold storage. + type: number + deleteAfter: + description: Specifies the number of days after creation + that a recovery point is deleted. Must be 90 days + greater than cold_storage_after. + type: number + optInToArchiveForSupportedResources: + description: This setting will instruct your backup + plan to transition supported resources to archive + (cold) storage tier in accordance with your lifecycle + settings. + type: boolean + type: object + recoveryPointTags: + additionalProperties: + type: string + description: Metadata that you can assign to help organize + the resources that you create. + type: object + x-kubernetes-map-type: granular + ruleName: + description: An display name for a backup rule. + type: string + schedule: + description: A CRON expression specifying when AWS Backup + initiates a backup job. + type: string + startWindow: + description: The amount of time in minutes before beginning + a backup. + type: number + targetVaultName: + description: The name of a logical container where backups + are stored. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + version: + description: Unique, randomly generated, Unicode, UTF-8 encoded + string that serves as the version ID of the backup plan. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/backup.aws.upbound.io_reportplans.yaml b/package/crds/backup.aws.upbound.io_reportplans.yaml index 94b922b46b..2e5db3563c 100644 --- a/package/crds/backup.aws.upbound.io_reportplans.yaml +++ b/package/crds/backup.aws.upbound.io_reportplans.yaml @@ -622,3 +622,589 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ReportPlan is the Schema for the ReportPlans API. Provides an + AWS Backup Report Plan resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ReportPlanSpec defines the desired state of ReportPlan + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: The description of the report plan with a maximum + of 1,024 characters + type: string + name: + description: The unique name of the report plan. The name must + be between 1 and 256 characters, starting with a letter, and + consisting of letters, numbers, and underscores. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + reportDeliveryChannel: + description: An object that contains information about where and + how to deliver your reports, specifically your Amazon S3 bucket + name, S3 key prefix, and the formats of your reports. Detailed + below. + properties: + formats: + description: 'A list of the format of your reports: CSV, JSON, + or both. If not specified, the default format is CSV.' + items: + type: string + type: array + x-kubernetes-list-type: set + s3BucketName: + description: The unique name of the S3 bucket that receives + your reports. + type: string + s3KeyPrefix: + description: 'The prefix for where Backup Audit Manager delivers + your reports to Amazon S3. The prefix is this part of the + following path: s3://your-bucket-name/prefix/Backup/us-west-2/year/month/day/report-name. + If not specified, there is no prefix.' + type: string + type: object + reportSetting: + description: An object that identifies the report template for + the report. Reports are built using a report template. Detailed + below. + properties: + accounts: + description: Specifies the list of accounts a report covers. + items: + type: string + type: array + x-kubernetes-list-type: set + frameworkArns: + description: Specifies the Amazon Resource Names (ARNs) of + the frameworks a report covers. + items: + type: string + type: array + x-kubernetes-list-type: set + numberOfFrameworks: + description: Specifies the number of frameworks a report covers. + type: number + organizationUnits: + description: Specifies the list of Organizational Units a + report covers. + items: + type: string + type: array + x-kubernetes-list-type: set + regions: + description: Specifies the list of regions a report covers. + items: + type: string + type: array + x-kubernetes-list-type: set + reportTemplate: + description: 'Identifies the report template for the report. + Reports are built using a report template. The report templates + are: RESOURCE_COMPLIANCE_REPORT | CONTROL_COMPLIANCE_REPORT + | BACKUP_JOB_REPORT | COPY_JOB_REPORT | RESTORE_JOB_REPORT.' + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: The description of the report plan with a maximum + of 1,024 characters + type: string + name: + description: The unique name of the report plan. The name must + be between 1 and 256 characters, starting with a letter, and + consisting of letters, numbers, and underscores. + type: string + reportDeliveryChannel: + description: An object that contains information about where and + how to deliver your reports, specifically your Amazon S3 bucket + name, S3 key prefix, and the formats of your reports. Detailed + below. + properties: + formats: + description: 'A list of the format of your reports: CSV, JSON, + or both. If not specified, the default format is CSV.' + items: + type: string + type: array + x-kubernetes-list-type: set + s3BucketName: + description: The unique name of the S3 bucket that receives + your reports. + type: string + s3KeyPrefix: + description: 'The prefix for where Backup Audit Manager delivers + your reports to Amazon S3. The prefix is this part of the + following path: s3://your-bucket-name/prefix/Backup/us-west-2/year/month/day/report-name. + If not specified, there is no prefix.' + type: string + type: object + reportSetting: + description: An object that identifies the report template for + the report. Reports are built using a report template. Detailed + below. + properties: + accounts: + description: Specifies the list of accounts a report covers. + items: + type: string + type: array + x-kubernetes-list-type: set + frameworkArns: + description: Specifies the Amazon Resource Names (ARNs) of + the frameworks a report covers. + items: + type: string + type: array + x-kubernetes-list-type: set + numberOfFrameworks: + description: Specifies the number of frameworks a report covers. + type: number + organizationUnits: + description: Specifies the list of Organizational Units a + report covers. + items: + type: string + type: array + x-kubernetes-list-type: set + regions: + description: Specifies the list of regions a report covers. + items: + type: string + type: array + x-kubernetes-list-type: set + reportTemplate: + description: 'Identifies the report template for the report. + Reports are built using a report template. The report templates + are: RESOURCE_COMPLIANCE_REPORT | CONTROL_COMPLIANCE_REPORT + | BACKUP_JOB_REPORT | COPY_JOB_REPORT | RESTORE_JOB_REPORT.' + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.reportDeliveryChannel is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.reportDeliveryChannel) + || (has(self.initProvider) && has(self.initProvider.reportDeliveryChannel))' + - message: spec.forProvider.reportSetting is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.reportSetting) + || (has(self.initProvider) && has(self.initProvider.reportSetting))' + status: + description: ReportPlanStatus defines the observed state of ReportPlan. + properties: + atProvider: + properties: + arn: + description: The ARN of the backup report plan. + type: string + creationTime: + description: The date and time that a report plan is created, + in Unix format and Coordinated Universal Time (UTC). + type: string + deploymentStatus: + description: 'The deployment status of a report plan. The statuses + are: CREATE_IN_PROGRESS | UPDATE_IN_PROGRESS | DELETE_IN_PROGRESS + | COMPLETED.' + type: string + description: + description: The description of the report plan with a maximum + of 1,024 characters + type: string + id: + description: The id of the backup report plan. + type: string + name: + description: The unique name of the report plan. The name must + be between 1 and 256 characters, starting with a letter, and + consisting of letters, numbers, and underscores. + type: string + reportDeliveryChannel: + description: An object that contains information about where and + how to deliver your reports, specifically your Amazon S3 bucket + name, S3 key prefix, and the formats of your reports. Detailed + below. + properties: + formats: + description: 'A list of the format of your reports: CSV, JSON, + or both. If not specified, the default format is CSV.' + items: + type: string + type: array + x-kubernetes-list-type: set + s3BucketName: + description: The unique name of the S3 bucket that receives + your reports. + type: string + s3KeyPrefix: + description: 'The prefix for where Backup Audit Manager delivers + your reports to Amazon S3. The prefix is this part of the + following path: s3://your-bucket-name/prefix/Backup/us-west-2/year/month/day/report-name. + If not specified, there is no prefix.' + type: string + type: object + reportSetting: + description: An object that identifies the report template for + the report. Reports are built using a report template. Detailed + below. + properties: + accounts: + description: Specifies the list of accounts a report covers. + items: + type: string + type: array + x-kubernetes-list-type: set + frameworkArns: + description: Specifies the Amazon Resource Names (ARNs) of + the frameworks a report covers. + items: + type: string + type: array + x-kubernetes-list-type: set + numberOfFrameworks: + description: Specifies the number of frameworks a report covers. + type: number + organizationUnits: + description: Specifies the list of Organizational Units a + report covers. + items: + type: string + type: array + x-kubernetes-list-type: set + regions: + description: Specifies the list of regions a report covers. + items: + type: string + type: array + x-kubernetes-list-type: set + reportTemplate: + description: 'Identifies the report template for the report. + Reports are built using a report template. The report templates + are: RESOURCE_COMPLIANCE_REPORT | CONTROL_COMPLIANCE_REPORT + | BACKUP_JOB_REPORT | COPY_JOB_REPORT | RESTORE_JOB_REPORT.' + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/batch.aws.upbound.io_jobdefinitions.yaml b/package/crds/batch.aws.upbound.io_jobdefinitions.yaml index 5f259067bb..984608e438 100644 --- a/package/crds/batch.aws.upbound.io_jobdefinitions.yaml +++ b/package/crds/batch.aws.upbound.io_jobdefinitions.yaml @@ -1293,3 +1293,1185 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: JobDefinition is the Schema for the JobDefinitions API. Provides + a Batch Job Definition resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: JobDefinitionSpec defines the desired state of JobDefinition + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + containerProperties: + description: A valid container properties provided as a single + valid JSON document. This parameter is only valid if the type + parameter is container. + type: string + deregisterOnNewRevision: + description: When updating a job definition a new revision is + created. This parameter determines if the previous version is + deregistered (INACTIVE) or left ACTIVE. Defaults to true. + type: boolean + eksProperties: + description: A valid eks properties. This parameter is only valid + if the type parameter is container. + properties: + podProperties: + description: The properties for the Kubernetes pod resources + of a job. See pod_properties below. + properties: + containers: + description: The properties of the container that's used + on the Amazon EKS pod. See containers below. + properties: + args: + description: An array of arguments to the entrypoint. + If this isn't specified, the CMD of the container + image is used. This corresponds to the args member + in the Entrypoint portion of the Pod in Kubernetes. + Environment variable references are expanded using + the container's environment. + items: + type: string + type: array + command: + description: The entrypoint for the container. This + isn't run within a shell. If this isn't specified, + the ENTRYPOINT of the container image is used. Environment + variable references are expanded using the container's + environment. + items: + type: string + type: array + env: + description: The environment variables to pass to + a container. See EKS Environment below. + items: + properties: + name: + description: Specifies the name of the job definition. + type: string + value: + description: The value of the environment variable. + type: string + type: object + type: array + image: + description: The Docker image used to start the container. + type: string + imagePullPolicy: + description: The image pull policy for the container. + Supported values are Always, IfNotPresent, and Never. + type: string + name: + description: The name of the container. If the name + isn't specified, the default name "Default" is used. + Each container in a pod must have a unique name. + type: string + resources: + description: The type and amount of resources to assign + to a container. The supported resources include + memory, cpu, and nvidia.com/gpu. + properties: + limits: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + requests: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + type: object + securityContext: + description: The security context for a job. + properties: + privileged: + type: boolean + readOnlyRootFileSystem: + type: boolean + runAsGroup: + type: number + runAsNonRoot: + type: boolean + runAsUser: + type: number + type: object + volumeMounts: + description: The volume mounts for the container. + items: + properties: + mountPath: + description: The path of the file or directory + on the host to mount into containers on the + pod. + type: string + name: + description: Specifies the name of the job definition. + type: string + readOnly: + type: boolean + type: object + type: array + type: object + dnsPolicy: + description: The DNS policy for the pod. The default value + is ClusterFirst. If the host_network argument is not + specified, the default is ClusterFirstWithHostNet. ClusterFirst + indicates that any DNS query that does not match the + configured cluster domain suffix is forwarded to the + upstream nameserver inherited from the node. For more + information, see Pod's DNS policy in the Kubernetes + documentation. + type: string + hostNetwork: + description: Indicates if the pod uses the hosts' network + IP address. The default value is true. Setting this + to false enables the Kubernetes pod networking model. + Most AWS Batch workloads are egress-only and don't require + the overhead of IP allocation for each pod for incoming + connections. + type: boolean + metadata: + description: Metadata about the Kubernetes pod. + properties: + labels: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + type: object + serviceAccountName: + description: The name of the service account that's used + to run the pod. + type: string + volumes: + description: Specifies the volumes for a job definition + that uses Amazon EKS resources. AWS Batch supports emptyDir, + hostPath, and secret volume types. + items: + properties: + emptyDir: + properties: + medium: + description: The medium to store the volume. + The default value is an empty string, which + uses the storage of the node. + type: string + sizeLimit: + description: The maximum size of the volume. + By default, there's no maximum size defined. + type: string + type: object + hostPath: + description: The path of the file or directory on + the host to mount into containers on the pod. + properties: + path: + description: The path of the file or directory + on the host to mount into containers on the + pod. + type: string + type: object + name: + description: Specifies the name of the job definition. + type: string + secret: + properties: + optional: + description: Specifies whether the secret or + the secret's keys must be defined. + type: boolean + secretName: + description: The name of the secret. The name + must be allowed as a DNS subdomain name. + type: string + type: object + type: object + type: array + type: object + type: object + name: + description: Specifies the name of the job definition. + type: string + nodeProperties: + description: A valid node properties provided as a single valid + JSON document. This parameter is required if the type parameter + is multinode. + type: string + parameters: + additionalProperties: + type: string + description: Specifies the parameter substitution placeholders + to set in the job definition. + type: object + x-kubernetes-map-type: granular + platformCapabilities: + description: The platform capabilities required by the job definition. + If no value is specified, it defaults to EC2. To run the job + on Fargate resources, specify FARGATE. + items: + type: string + type: array + x-kubernetes-list-type: set + propagateTags: + description: Specifies whether to propagate the tags from the + job definition to the corresponding Amazon ECS task. Default + is false. + type: boolean + region: + description: Region is the region you'd like your resource to + be created in. + type: string + retryStrategy: + description: Specifies the retry strategy to use for failed jobs + that are submitted with this job definition. Maximum number + of retry_strategy is 1. Defined below. + properties: + attempts: + description: The number of times to move a job to the RUNNABLE + status. You may specify between 1 and 10 attempts. + type: number + evaluateOnExit: + description: The evaluate on exit conditions under which the + job should be retried or failed. If this parameter is specified, + then the attempts parameter must also be specified. You + may specify up to 5 configuration blocks. + items: + properties: + action: + description: 'Specifies the action to take if all of + the specified conditions are met. The values are not + case sensitive. Valid values: retry, exit.' + type: string + onExitCode: + description: A glob pattern to match against the decimal + representation of the exit code returned for a job. + type: string + onReason: + description: A glob pattern to match against the reason + returned for a job. + type: string + onStatusReason: + description: A glob pattern to match against the status + reason returned for a job. + type: string + type: object + type: array + type: object + schedulingPriority: + description: The scheduling priority of the job definition. This + only affects jobs in job queues with a fair share policy. Jobs + with a higher scheduling priority are scheduled before jobs + with a lower scheduling priority. Allowed values 0 through 9999. + type: number + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + timeout: + description: Specifies the timeout for jobs so that if a job runs + longer, AWS Batch terminates the job. Maximum number of timeout + is 1. Defined below. + properties: + attemptDurationSeconds: + description: The time duration in seconds after which AWS + Batch terminates your jobs if they have not finished. The + minimum value for the timeout is 60 seconds. + type: number + type: object + type: + description: The type of job definition. Must be container or + multinode. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + containerProperties: + description: A valid container properties provided as a single + valid JSON document. This parameter is only valid if the type + parameter is container. + type: string + deregisterOnNewRevision: + description: When updating a job definition a new revision is + created. This parameter determines if the previous version is + deregistered (INACTIVE) or left ACTIVE. Defaults to true. + type: boolean + eksProperties: + description: A valid eks properties. This parameter is only valid + if the type parameter is container. + properties: + podProperties: + description: The properties for the Kubernetes pod resources + of a job. See pod_properties below. + properties: + containers: + description: The properties of the container that's used + on the Amazon EKS pod. See containers below. + properties: + args: + description: An array of arguments to the entrypoint. + If this isn't specified, the CMD of the container + image is used. This corresponds to the args member + in the Entrypoint portion of the Pod in Kubernetes. + Environment variable references are expanded using + the container's environment. + items: + type: string + type: array + command: + description: The entrypoint for the container. This + isn't run within a shell. If this isn't specified, + the ENTRYPOINT of the container image is used. Environment + variable references are expanded using the container's + environment. + items: + type: string + type: array + env: + description: The environment variables to pass to + a container. See EKS Environment below. + items: + properties: + name: + description: Specifies the name of the job definition. + type: string + value: + description: The value of the environment variable. + type: string + type: object + type: array + image: + description: The Docker image used to start the container. + type: string + imagePullPolicy: + description: The image pull policy for the container. + Supported values are Always, IfNotPresent, and Never. + type: string + name: + description: The name of the container. If the name + isn't specified, the default name "Default" is used. + Each container in a pod must have a unique name. + type: string + resources: + description: The type and amount of resources to assign + to a container. The supported resources include + memory, cpu, and nvidia.com/gpu. + properties: + limits: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + requests: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + type: object + securityContext: + description: The security context for a job. + properties: + privileged: + type: boolean + readOnlyRootFileSystem: + type: boolean + runAsGroup: + type: number + runAsNonRoot: + type: boolean + runAsUser: + type: number + type: object + volumeMounts: + description: The volume mounts for the container. + items: + properties: + mountPath: + description: The path of the file or directory + on the host to mount into containers on the + pod. + type: string + name: + description: Specifies the name of the job definition. + type: string + readOnly: + type: boolean + type: object + type: array + type: object + dnsPolicy: + description: The DNS policy for the pod. The default value + is ClusterFirst. If the host_network argument is not + specified, the default is ClusterFirstWithHostNet. ClusterFirst + indicates that any DNS query that does not match the + configured cluster domain suffix is forwarded to the + upstream nameserver inherited from the node. For more + information, see Pod's DNS policy in the Kubernetes + documentation. + type: string + hostNetwork: + description: Indicates if the pod uses the hosts' network + IP address. The default value is true. Setting this + to false enables the Kubernetes pod networking model. + Most AWS Batch workloads are egress-only and don't require + the overhead of IP allocation for each pod for incoming + connections. + type: boolean + metadata: + description: Metadata about the Kubernetes pod. + properties: + labels: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + type: object + serviceAccountName: + description: The name of the service account that's used + to run the pod. + type: string + volumes: + description: Specifies the volumes for a job definition + that uses Amazon EKS resources. AWS Batch supports emptyDir, + hostPath, and secret volume types. + items: + properties: + emptyDir: + properties: + medium: + description: The medium to store the volume. + The default value is an empty string, which + uses the storage of the node. + type: string + sizeLimit: + description: The maximum size of the volume. + By default, there's no maximum size defined. + type: string + type: object + hostPath: + description: The path of the file or directory on + the host to mount into containers on the pod. + properties: + path: + description: The path of the file or directory + on the host to mount into containers on the + pod. + type: string + type: object + name: + description: Specifies the name of the job definition. + type: string + secret: + properties: + optional: + description: Specifies whether the secret or + the secret's keys must be defined. + type: boolean + secretName: + description: The name of the secret. The name + must be allowed as a DNS subdomain name. + type: string + type: object + type: object + type: array + type: object + type: object + name: + description: Specifies the name of the job definition. + type: string + nodeProperties: + description: A valid node properties provided as a single valid + JSON document. This parameter is required if the type parameter + is multinode. + type: string + parameters: + additionalProperties: + type: string + description: Specifies the parameter substitution placeholders + to set in the job definition. + type: object + x-kubernetes-map-type: granular + platformCapabilities: + description: The platform capabilities required by the job definition. + If no value is specified, it defaults to EC2. To run the job + on Fargate resources, specify FARGATE. + items: + type: string + type: array + x-kubernetes-list-type: set + propagateTags: + description: Specifies whether to propagate the tags from the + job definition to the corresponding Amazon ECS task. Default + is false. + type: boolean + retryStrategy: + description: Specifies the retry strategy to use for failed jobs + that are submitted with this job definition. Maximum number + of retry_strategy is 1. Defined below. + properties: + attempts: + description: The number of times to move a job to the RUNNABLE + status. You may specify between 1 and 10 attempts. + type: number + evaluateOnExit: + description: The evaluate on exit conditions under which the + job should be retried or failed. If this parameter is specified, + then the attempts parameter must also be specified. You + may specify up to 5 configuration blocks. + items: + properties: + action: + description: 'Specifies the action to take if all of + the specified conditions are met. The values are not + case sensitive. Valid values: retry, exit.' + type: string + onExitCode: + description: A glob pattern to match against the decimal + representation of the exit code returned for a job. + type: string + onReason: + description: A glob pattern to match against the reason + returned for a job. + type: string + onStatusReason: + description: A glob pattern to match against the status + reason returned for a job. + type: string + type: object + type: array + type: object + schedulingPriority: + description: The scheduling priority of the job definition. This + only affects jobs in job queues with a fair share policy. Jobs + with a higher scheduling priority are scheduled before jobs + with a lower scheduling priority. Allowed values 0 through 9999. + type: number + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + timeout: + description: Specifies the timeout for jobs so that if a job runs + longer, AWS Batch terminates the job. Maximum number of timeout + is 1. Defined below. + properties: + attemptDurationSeconds: + description: The time duration in seconds after which AWS + Batch terminates your jobs if they have not finished. The + minimum value for the timeout is 60 seconds. + type: number + type: object + type: + description: The type of job definition. Must be container or + multinode. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + status: + description: JobDefinitionStatus defines the observed state of JobDefinition. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name of the job definition, includes + revision (:#). + type: string + arnPrefix: + description: The ARN without the revision number. + type: string + containerProperties: + description: A valid container properties provided as a single + valid JSON document. This parameter is only valid if the type + parameter is container. + type: string + deregisterOnNewRevision: + description: When updating a job definition a new revision is + created. This parameter determines if the previous version is + deregistered (INACTIVE) or left ACTIVE. Defaults to true. + type: boolean + eksProperties: + description: A valid eks properties. This parameter is only valid + if the type parameter is container. + properties: + podProperties: + description: The properties for the Kubernetes pod resources + of a job. See pod_properties below. + properties: + containers: + description: The properties of the container that's used + on the Amazon EKS pod. See containers below. + properties: + args: + description: An array of arguments to the entrypoint. + If this isn't specified, the CMD of the container + image is used. This corresponds to the args member + in the Entrypoint portion of the Pod in Kubernetes. + Environment variable references are expanded using + the container's environment. + items: + type: string + type: array + command: + description: The entrypoint for the container. This + isn't run within a shell. If this isn't specified, + the ENTRYPOINT of the container image is used. Environment + variable references are expanded using the container's + environment. + items: + type: string + type: array + env: + description: The environment variables to pass to + a container. See EKS Environment below. + items: + properties: + name: + description: Specifies the name of the job definition. + type: string + value: + description: The value of the environment variable. + type: string + type: object + type: array + image: + description: The Docker image used to start the container. + type: string + imagePullPolicy: + description: The image pull policy for the container. + Supported values are Always, IfNotPresent, and Never. + type: string + name: + description: The name of the container. If the name + isn't specified, the default name "Default" is used. + Each container in a pod must have a unique name. + type: string + resources: + description: The type and amount of resources to assign + to a container. The supported resources include + memory, cpu, and nvidia.com/gpu. + properties: + limits: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + requests: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + type: object + securityContext: + description: The security context for a job. + properties: + privileged: + type: boolean + readOnlyRootFileSystem: + type: boolean + runAsGroup: + type: number + runAsNonRoot: + type: boolean + runAsUser: + type: number + type: object + volumeMounts: + description: The volume mounts for the container. + items: + properties: + mountPath: + description: The path of the file or directory + on the host to mount into containers on the + pod. + type: string + name: + description: Specifies the name of the job definition. + type: string + readOnly: + type: boolean + type: object + type: array + type: object + dnsPolicy: + description: The DNS policy for the pod. The default value + is ClusterFirst. If the host_network argument is not + specified, the default is ClusterFirstWithHostNet. ClusterFirst + indicates that any DNS query that does not match the + configured cluster domain suffix is forwarded to the + upstream nameserver inherited from the node. For more + information, see Pod's DNS policy in the Kubernetes + documentation. + type: string + hostNetwork: + description: Indicates if the pod uses the hosts' network + IP address. The default value is true. Setting this + to false enables the Kubernetes pod networking model. + Most AWS Batch workloads are egress-only and don't require + the overhead of IP allocation for each pod for incoming + connections. + type: boolean + metadata: + description: Metadata about the Kubernetes pod. + properties: + labels: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + type: object + serviceAccountName: + description: The name of the service account that's used + to run the pod. + type: string + volumes: + description: Specifies the volumes for a job definition + that uses Amazon EKS resources. AWS Batch supports emptyDir, + hostPath, and secret volume types. + items: + properties: + emptyDir: + properties: + medium: + description: The medium to store the volume. + The default value is an empty string, which + uses the storage of the node. + type: string + sizeLimit: + description: The maximum size of the volume. + By default, there's no maximum size defined. + type: string + type: object + hostPath: + description: The path of the file or directory on + the host to mount into containers on the pod. + properties: + path: + description: The path of the file or directory + on the host to mount into containers on the + pod. + type: string + type: object + name: + description: Specifies the name of the job definition. + type: string + secret: + properties: + optional: + description: Specifies whether the secret or + the secret's keys must be defined. + type: boolean + secretName: + description: The name of the secret. The name + must be allowed as a DNS subdomain name. + type: string + type: object + type: object + type: array + type: object + type: object + id: + type: string + name: + description: Specifies the name of the job definition. + type: string + nodeProperties: + description: A valid node properties provided as a single valid + JSON document. This parameter is required if the type parameter + is multinode. + type: string + parameters: + additionalProperties: + type: string + description: Specifies the parameter substitution placeholders + to set in the job definition. + type: object + x-kubernetes-map-type: granular + platformCapabilities: + description: The platform capabilities required by the job definition. + If no value is specified, it defaults to EC2. To run the job + on Fargate resources, specify FARGATE. + items: + type: string + type: array + x-kubernetes-list-type: set + propagateTags: + description: Specifies whether to propagate the tags from the + job definition to the corresponding Amazon ECS task. Default + is false. + type: boolean + retryStrategy: + description: Specifies the retry strategy to use for failed jobs + that are submitted with this job definition. Maximum number + of retry_strategy is 1. Defined below. + properties: + attempts: + description: The number of times to move a job to the RUNNABLE + status. You may specify between 1 and 10 attempts. + type: number + evaluateOnExit: + description: The evaluate on exit conditions under which the + job should be retried or failed. If this parameter is specified, + then the attempts parameter must also be specified. You + may specify up to 5 configuration blocks. + items: + properties: + action: + description: 'Specifies the action to take if all of + the specified conditions are met. The values are not + case sensitive. Valid values: retry, exit.' + type: string + onExitCode: + description: A glob pattern to match against the decimal + representation of the exit code returned for a job. + type: string + onReason: + description: A glob pattern to match against the reason + returned for a job. + type: string + onStatusReason: + description: A glob pattern to match against the status + reason returned for a job. + type: string + type: object + type: array + type: object + revision: + description: The revision of the job definition. + type: number + schedulingPriority: + description: The scheduling priority of the job definition. This + only affects jobs in job queues with a fair share policy. Jobs + with a higher scheduling priority are scheduled before jobs + with a lower scheduling priority. Allowed values 0 through 9999. + type: number + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + timeout: + description: Specifies the timeout for jobs so that if a job runs + longer, AWS Batch terminates the job. Maximum number of timeout + is 1. Defined below. + properties: + attemptDurationSeconds: + description: The time duration in seconds after which AWS + Batch terminates your jobs if they have not finished. The + minimum value for the timeout is 60 seconds. + type: number + type: object + type: + description: The type of job definition. Must be container or + multinode. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/batch.aws.upbound.io_schedulingpolicies.yaml b/package/crds/batch.aws.upbound.io_schedulingpolicies.yaml index 149a6a06a7..365e5e897d 100644 --- a/package/crds/batch.aws.upbound.io_schedulingpolicies.yaml +++ b/package/crds/batch.aws.upbound.io_schedulingpolicies.yaml @@ -446,3 +446,425 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SchedulingPolicy is the Schema for the SchedulingPolicys API. + Provides a Batch Scheduling Policy resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SchedulingPolicySpec defines the desired state of SchedulingPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + fairSharePolicy: + properties: + computeReservation: + description: A value used to reserve some of the available + maximum vCPU for fair share identifiers that have not yet + been used. For more information, see FairsharePolicy. + type: number + shareDecaySeconds: + type: number + shareDistribution: + description: One or more share distribution blocks which define + the weights for the fair share identifiers for the fair + share policy. For more information, see FairsharePolicy. + The share_distribution block is documented below. + items: + properties: + shareIdentifier: + description: A fair share identifier or fair share identifier + prefix. For more information, see ShareAttributes. + type: string + weightFactor: + description: The weight factor for the fair share identifier. + For more information, see ShareAttributes. + type: number + type: object + type: array + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + fairSharePolicy: + properties: + computeReservation: + description: A value used to reserve some of the available + maximum vCPU for fair share identifiers that have not yet + been used. For more information, see FairsharePolicy. + type: number + shareDecaySeconds: + type: number + shareDistribution: + description: One or more share distribution blocks which define + the weights for the fair share identifiers for the fair + share policy. For more information, see FairsharePolicy. + The share_distribution block is documented below. + items: + properties: + shareIdentifier: + description: A fair share identifier or fair share identifier + prefix. For more information, see ShareAttributes. + type: string + weightFactor: + description: The weight factor for the fair share identifier. + For more information, see ShareAttributes. + type: number + type: object + type: array + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SchedulingPolicyStatus defines the observed state of SchedulingPolicy. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name of the scheduling policy. + type: string + fairSharePolicy: + properties: + computeReservation: + description: A value used to reserve some of the available + maximum vCPU for fair share identifiers that have not yet + been used. For more information, see FairsharePolicy. + type: number + shareDecaySeconds: + type: number + shareDistribution: + description: One or more share distribution blocks which define + the weights for the fair share identifiers for the fair + share policy. For more information, see FairsharePolicy. + The share_distribution block is documented below. + items: + properties: + shareIdentifier: + description: A fair share identifier or fair share identifier + prefix. For more information, see ShareAttributes. + type: string + weightFactor: + description: The weight factor for the fair share identifier. + For more information, see ShareAttributes. + type: number + type: object + type: array + type: object + id: + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/budgets.aws.upbound.io_budgetactions.yaml b/package/crds/budgets.aws.upbound.io_budgetactions.yaml index 3fa1120608..edbe030d41 100644 --- a/package/crds/budgets.aws.upbound.io_budgetactions.yaml +++ b/package/crds/budgets.aws.upbound.io_budgetactions.yaml @@ -1219,3 +1219,1174 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BudgetAction is the Schema for the BudgetActions API. Provides + a budget action resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BudgetActionSpec defines the desired state of BudgetAction + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountId: + description: The ID of the target account for budget. Will use + current user's account_id by default if omitted. + type: string + actionThreshold: + description: The trigger threshold of the action. See Action Threshold. + properties: + actionThresholdType: + description: The type of threshold for a notification. Valid + values are PERCENTAGE or ABSOLUTE_VALUE. + type: string + actionThresholdValue: + description: The threshold of a notification. + type: number + type: object + actionType: + description: The type of action. This defines the type of tasks + that can be carried out by this action. This field also determines + the format for definition. Valid values are APPLY_IAM_POLICY, + APPLY_SCP_POLICY, and RUN_SSM_DOCUMENTS. + type: string + approvalModel: + description: This specifies if the action needs manual or automatic + approval. Valid values are AUTOMATIC and MANUAL. + type: string + budgetName: + description: The name of a budget. + type: string + budgetNameRef: + description: Reference to a Budget in budgets to populate budgetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + budgetNameSelector: + description: Selector for a Budget in budgets to populate budgetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + definition: + description: Specifies all of the type-specific parameters. See + Definition. + properties: + iamActionDefinition: + description: The AWS Identity and Access Management (IAM) + action definition details. See IAM Action Definition. + properties: + groups: + description: A list of groups to be attached. There must + be at least one group. + items: + type: string + type: array + x-kubernetes-list-type: set + policyArn: + description: The Amazon Resource Name (ARN) of the policy + to be attached. + type: string + policyArnRef: + description: Reference to a Policy in iam to populate + policyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + policyArnSelector: + description: Selector for a Policy in iam to populate + policyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + roles: + description: A list of roles to be attached. There must + be at least one role. + items: + type: string + type: array + x-kubernetes-list-type: set + users: + description: A list of users to be attached. There must + be at least one user. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + scpActionDefinition: + description: The service control policies (SCPs) action definition + details. See SCP Action Definition. + properties: + policyId: + description: The policy ID attached. + type: string + targetIds: + description: A list of target IDs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + ssmActionDefinition: + description: The AWS Systems Manager (SSM) action definition + details. See SSM Action Definition. + properties: + actionSubType: + description: The action subType. Valid values are STOP_EC2_INSTANCES + or STOP_RDS_INSTANCES. + type: string + instanceIds: + description: The EC2 and RDS instance IDs. + items: + type: string + type: array + x-kubernetes-list-type: set + region: + description: The Region to run the SSM document. + type: string + required: + - region + type: object + type: object + executionRoleArn: + description: The role passed for action execution and reversion. + Roles and actions must be in the same account. + type: string + executionRoleArnRef: + description: Reference to a Role in iam to populate executionRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + executionRoleArnSelector: + description: Selector for a Role in iam to populate executionRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + notificationType: + description: The type of a notification. Valid values are ACTUAL + or FORECASTED. + type: string + region: + description: |- + The Region to run the SSM document. + Region is the region you'd like your resource to be created in. + type: string + subscriber: + description: A list of subscribers. See Subscriber. + items: + properties: + address: + description: The address that AWS sends budget notifications + to, either an SNS topic or an email. + type: string + subscriptionType: + description: The type of notification that AWS sends to + a subscriber. Valid values are SNS or EMAIL. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accountId: + description: The ID of the target account for budget. Will use + current user's account_id by default if omitted. + type: string + actionThreshold: + description: The trigger threshold of the action. See Action Threshold. + properties: + actionThresholdType: + description: The type of threshold for a notification. Valid + values are PERCENTAGE or ABSOLUTE_VALUE. + type: string + actionThresholdValue: + description: The threshold of a notification. + type: number + type: object + actionType: + description: The type of action. This defines the type of tasks + that can be carried out by this action. This field also determines + the format for definition. Valid values are APPLY_IAM_POLICY, + APPLY_SCP_POLICY, and RUN_SSM_DOCUMENTS. + type: string + approvalModel: + description: This specifies if the action needs manual or automatic + approval. Valid values are AUTOMATIC and MANUAL. + type: string + budgetName: + description: The name of a budget. + type: string + budgetNameRef: + description: Reference to a Budget in budgets to populate budgetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + budgetNameSelector: + description: Selector for a Budget in budgets to populate budgetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + definition: + description: Specifies all of the type-specific parameters. See + Definition. + properties: + iamActionDefinition: + description: The AWS Identity and Access Management (IAM) + action definition details. See IAM Action Definition. + properties: + groups: + description: A list of groups to be attached. There must + be at least one group. + items: + type: string + type: array + x-kubernetes-list-type: set + policyArn: + description: The Amazon Resource Name (ARN) of the policy + to be attached. + type: string + policyArnRef: + description: Reference to a Policy in iam to populate + policyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + policyArnSelector: + description: Selector for a Policy in iam to populate + policyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + roles: + description: A list of roles to be attached. There must + be at least one role. + items: + type: string + type: array + x-kubernetes-list-type: set + users: + description: A list of users to be attached. There must + be at least one user. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + scpActionDefinition: + description: The service control policies (SCPs) action definition + details. See SCP Action Definition. + properties: + policyId: + description: The policy ID attached. + type: string + targetIds: + description: A list of target IDs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + ssmActionDefinition: + description: The AWS Systems Manager (SSM) action definition + details. See SSM Action Definition. + properties: + actionSubType: + description: The action subType. Valid values are STOP_EC2_INSTANCES + or STOP_RDS_INSTANCES. + type: string + instanceIds: + description: The EC2 and RDS instance IDs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + executionRoleArn: + description: The role passed for action execution and reversion. + Roles and actions must be in the same account. + type: string + executionRoleArnRef: + description: Reference to a Role in iam to populate executionRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + executionRoleArnSelector: + description: Selector for a Role in iam to populate executionRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + notificationType: + description: The type of a notification. Valid values are ACTUAL + or FORECASTED. + type: string + subscriber: + description: A list of subscribers. See Subscriber. + items: + properties: + address: + description: The address that AWS sends budget notifications + to, either an SNS topic or an email. + type: string + subscriptionType: + description: The type of notification that AWS sends to + a subscriber. Valid values are SNS or EMAIL. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.actionThreshold is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.actionThreshold) + || (has(self.initProvider) && has(self.initProvider.actionThreshold))' + - message: spec.forProvider.actionType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.actionType) + || (has(self.initProvider) && has(self.initProvider.actionType))' + - message: spec.forProvider.approvalModel is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.approvalModel) + || (has(self.initProvider) && has(self.initProvider.approvalModel))' + - message: spec.forProvider.definition is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.definition) + || (has(self.initProvider) && has(self.initProvider.definition))' + - message: spec.forProvider.notificationType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.notificationType) + || (has(self.initProvider) && has(self.initProvider.notificationType))' + - message: spec.forProvider.subscriber is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.subscriber) + || (has(self.initProvider) && has(self.initProvider.subscriber))' + status: + description: BudgetActionStatus defines the observed state of BudgetAction. + properties: + atProvider: + properties: + accountId: + description: The ID of the target account for budget. Will use + current user's account_id by default if omitted. + type: string + actionId: + description: The id of the budget action. + type: string + actionThreshold: + description: The trigger threshold of the action. See Action Threshold. + properties: + actionThresholdType: + description: The type of threshold for a notification. Valid + values are PERCENTAGE or ABSOLUTE_VALUE. + type: string + actionThresholdValue: + description: The threshold of a notification. + type: number + type: object + actionType: + description: The type of action. This defines the type of tasks + that can be carried out by this action. This field also determines + the format for definition. Valid values are APPLY_IAM_POLICY, + APPLY_SCP_POLICY, and RUN_SSM_DOCUMENTS. + type: string + approvalModel: + description: This specifies if the action needs manual or automatic + approval. Valid values are AUTOMATIC and MANUAL. + type: string + arn: + description: The ARN of the budget action. + type: string + budgetName: + description: The name of a budget. + type: string + definition: + description: Specifies all of the type-specific parameters. See + Definition. + properties: + iamActionDefinition: + description: The AWS Identity and Access Management (IAM) + action definition details. See IAM Action Definition. + properties: + groups: + description: A list of groups to be attached. There must + be at least one group. + items: + type: string + type: array + x-kubernetes-list-type: set + policyArn: + description: The Amazon Resource Name (ARN) of the policy + to be attached. + type: string + roles: + description: A list of roles to be attached. There must + be at least one role. + items: + type: string + type: array + x-kubernetes-list-type: set + users: + description: A list of users to be attached. There must + be at least one user. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + scpActionDefinition: + description: The service control policies (SCPs) action definition + details. See SCP Action Definition. + properties: + policyId: + description: The policy ID attached. + type: string + targetIds: + description: A list of target IDs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + ssmActionDefinition: + description: The AWS Systems Manager (SSM) action definition + details. See SSM Action Definition. + properties: + actionSubType: + description: The action subType. Valid values are STOP_EC2_INSTANCES + or STOP_RDS_INSTANCES. + type: string + instanceIds: + description: The EC2 and RDS instance IDs. + items: + type: string + type: array + x-kubernetes-list-type: set + region: + description: The Region to run the SSM document. + type: string + type: object + type: object + executionRoleArn: + description: The role passed for action execution and reversion. + Roles and actions must be in the same account. + type: string + id: + description: ID of resource. + type: string + notificationType: + description: The type of a notification. Valid values are ACTUAL + or FORECASTED. + type: string + status: + description: The status of the budget action. + type: string + subscriber: + description: A list of subscribers. See Subscriber. + items: + properties: + address: + description: The address that AWS sends budget notifications + to, either an SNS topic or an email. + type: string + subscriptionType: + description: The type of notification that AWS sends to + a subscriber. Valid values are SNS or EMAIL. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/budgets.aws.upbound.io_budgets.yaml b/package/crds/budgets.aws.upbound.io_budgets.yaml index d0b894e6e2..5b6c6463c2 100644 --- a/package/crds/budgets.aws.upbound.io_budgets.yaml +++ b/package/crds/budgets.aws.upbound.io_budgets.yaml @@ -919,3 +919,886 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Budget is the Schema for the Budgets API. Provides a budgets + budget resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BudgetSpec defines the desired state of Budget + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountId: + description: The ID of the target account for budget. Will use + current user's account_id by default if omitted. + type: string + autoAdjustData: + description: Object containing AutoAdjustData which determines + the budget amount for an auto-adjusting budget. + properties: + autoAdjustType: + description: 'The string that defines whether your budget + auto-adjusts based on historical or forecasted data. Valid + values: FORECAST,HISTORICAL' + type: string + historicalOptions: + description: Configuration block of Historical Options. Required + for auto_adjust_type of HISTORICAL Configuration block that + defines the historical data that your auto-adjusting budget + is based on. + properties: + budgetAdjustmentPeriod: + description: The number of budget periods included in + the moving-average calculation that determines your + auto-adjusted budget amount. + type: number + type: object + type: object + budgetType: + description: Whether this budget tracks monetary cost or usage. + type: string + costFilter: + description: A list of CostFilter name/values pair to apply to + budget. + items: + properties: + name: + description: The name of a budget. Unique within accounts. + type: string + values: + items: + type: string + type: array + type: object + type: array + costTypes: + description: Object containing CostTypes The types of cost included + in a budget, such as tax and subscriptions. + properties: + includeCredit: + description: A boolean value whether to include credits in + the cost budget. Defaults to true + type: boolean + includeDiscount: + description: Whether a budget includes discounts. Defaults + to true + type: boolean + includeOtherSubscription: + description: A boolean value whether to include other subscription + costs in the cost budget. Defaults to true + type: boolean + includeRecurring: + description: A boolean value whether to include recurring + costs in the cost budget. Defaults to true + type: boolean + includeRefund: + description: A boolean value whether to include refunds in + the cost budget. Defaults to true + type: boolean + includeSubscription: + description: A boolean value whether to include subscriptions + in the cost budget. Defaults to true + type: boolean + includeSupport: + description: A boolean value whether to include support costs + in the cost budget. Defaults to true + type: boolean + includeTax: + description: A boolean value whether to include tax in the + cost budget. Defaults to true + type: boolean + includeUpfront: + description: A boolean value whether to include upfront costs + in the cost budget. Defaults to true + type: boolean + useAmortized: + description: Whether a budget uses the amortized rate. Defaults + to false + type: boolean + useBlended: + description: A boolean value whether to use blended costs + in the cost budget. Defaults to false + type: boolean + type: object + limitAmount: + description: The amount of cost or usage being measured for a + budget. + type: string + limitUnit: + description: The unit of measurement used for the budget forecast, + actual spend, or budget threshold, such as dollars or GB. See + Spend documentation. + type: string + notification: + description: Object containing Budget Notifications. Can be used + multiple times to define more than one budget notification. + items: + properties: + comparisonOperator: + description: Comparison operator to use to evaluate the + condition. Can be LESS_THAN, EQUAL_TO or GREATER_THAN. + type: string + notificationType: + description: What kind of budget value to notify on. Can + be ACTUAL or FORECASTED + type: string + subscriberEmailAddresses: + description: E-Mail addresses to notify. Either this or + subscriber_sns_topic_arns is required. + items: + type: string + type: array + x-kubernetes-list-type: set + subscriberSnsTopicArns: + description: SNS topics to notify. Either this or subscriber_email_addresses + is required. + items: + type: string + type: array + x-kubernetes-list-type: set + threshold: + description: Threshold when the notification should be sent. + type: number + thresholdType: + description: What kind of threshold is defined. Can be PERCENTAGE + OR ABSOLUTE_VALUE. + type: string + type: object + type: array + plannedLimit: + description: Object containing Planned Budget Limits. Can be used + multiple times to plan more than one budget limit. See PlannedBudgetLimits + documentation. + items: + properties: + amount: + description: The amount of cost or usage being measured + for a budget. + type: string + startTime: + description: 'The start time of the budget limit. Format: + 2017-01-01_12:00. See PlannedBudgetLimits documentation.' + type: string + unit: + description: The unit of measurement used for the budget + forecast, actual spend, or budget threshold, such as dollars + or GB. See Spend documentation. + type: string + type: object + type: array + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + timePeriodEnd: + description: 'The end of the time period covered by the budget. + There are no restrictions on the end date. Format: 2017-01-01_12:00.' + type: string + timePeriodStart: + description: 'The start of the time period covered by the budget. + If you don''t specify a start date, AWS defaults to the start + of your chosen time period. The start date must come before + the end date. Format: 2017-01-01_12:00.' + type: string + timeUnit: + description: 'The length of time until a budget resets the actual + and forecasted spend. Valid values: MONTHLY, QUARTERLY, ANNUALLY, + and DAILY.' + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accountId: + description: The ID of the target account for budget. Will use + current user's account_id by default if omitted. + type: string + autoAdjustData: + description: Object containing AutoAdjustData which determines + the budget amount for an auto-adjusting budget. + properties: + autoAdjustType: + description: 'The string that defines whether your budget + auto-adjusts based on historical or forecasted data. Valid + values: FORECAST,HISTORICAL' + type: string + historicalOptions: + description: Configuration block of Historical Options. Required + for auto_adjust_type of HISTORICAL Configuration block that + defines the historical data that your auto-adjusting budget + is based on. + properties: + budgetAdjustmentPeriod: + description: The number of budget periods included in + the moving-average calculation that determines your + auto-adjusted budget amount. + type: number + type: object + type: object + budgetType: + description: Whether this budget tracks monetary cost or usage. + type: string + costFilter: + description: A list of CostFilter name/values pair to apply to + budget. + items: + properties: + name: + description: The name of a budget. Unique within accounts. + type: string + values: + items: + type: string + type: array + type: object + type: array + costTypes: + description: Object containing CostTypes The types of cost included + in a budget, such as tax and subscriptions. + properties: + includeCredit: + description: A boolean value whether to include credits in + the cost budget. Defaults to true + type: boolean + includeDiscount: + description: Whether a budget includes discounts. Defaults + to true + type: boolean + includeOtherSubscription: + description: A boolean value whether to include other subscription + costs in the cost budget. Defaults to true + type: boolean + includeRecurring: + description: A boolean value whether to include recurring + costs in the cost budget. Defaults to true + type: boolean + includeRefund: + description: A boolean value whether to include refunds in + the cost budget. Defaults to true + type: boolean + includeSubscription: + description: A boolean value whether to include subscriptions + in the cost budget. Defaults to true + type: boolean + includeSupport: + description: A boolean value whether to include support costs + in the cost budget. Defaults to true + type: boolean + includeTax: + description: A boolean value whether to include tax in the + cost budget. Defaults to true + type: boolean + includeUpfront: + description: A boolean value whether to include upfront costs + in the cost budget. Defaults to true + type: boolean + useAmortized: + description: Whether a budget uses the amortized rate. Defaults + to false + type: boolean + useBlended: + description: A boolean value whether to use blended costs + in the cost budget. Defaults to false + type: boolean + type: object + limitAmount: + description: The amount of cost or usage being measured for a + budget. + type: string + limitUnit: + description: The unit of measurement used for the budget forecast, + actual spend, or budget threshold, such as dollars or GB. See + Spend documentation. + type: string + notification: + description: Object containing Budget Notifications. Can be used + multiple times to define more than one budget notification. + items: + properties: + comparisonOperator: + description: Comparison operator to use to evaluate the + condition. Can be LESS_THAN, EQUAL_TO or GREATER_THAN. + type: string + notificationType: + description: What kind of budget value to notify on. Can + be ACTUAL or FORECASTED + type: string + subscriberEmailAddresses: + description: E-Mail addresses to notify. Either this or + subscriber_sns_topic_arns is required. + items: + type: string + type: array + x-kubernetes-list-type: set + subscriberSnsTopicArns: + description: SNS topics to notify. Either this or subscriber_email_addresses + is required. + items: + type: string + type: array + x-kubernetes-list-type: set + threshold: + description: Threshold when the notification should be sent. + type: number + thresholdType: + description: What kind of threshold is defined. Can be PERCENTAGE + OR ABSOLUTE_VALUE. + type: string + type: object + type: array + plannedLimit: + description: Object containing Planned Budget Limits. Can be used + multiple times to plan more than one budget limit. See PlannedBudgetLimits + documentation. + items: + properties: + amount: + description: The amount of cost or usage being measured + for a budget. + type: string + startTime: + description: 'The start time of the budget limit. Format: + 2017-01-01_12:00. See PlannedBudgetLimits documentation.' + type: string + unit: + description: The unit of measurement used for the budget + forecast, actual spend, or budget threshold, such as dollars + or GB. See Spend documentation. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + timePeriodEnd: + description: 'The end of the time period covered by the budget. + There are no restrictions on the end date. Format: 2017-01-01_12:00.' + type: string + timePeriodStart: + description: 'The start of the time period covered by the budget. + If you don''t specify a start date, AWS defaults to the start + of your chosen time period. The start date must come before + the end date. Format: 2017-01-01_12:00.' + type: string + timeUnit: + description: 'The length of time until a budget resets the actual + and forecasted spend. Valid values: MONTHLY, QUARTERLY, ANNUALLY, + and DAILY.' + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.budgetType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.budgetType) + || (has(self.initProvider) && has(self.initProvider.budgetType))' + - message: spec.forProvider.timeUnit is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.timeUnit) + || (has(self.initProvider) && has(self.initProvider.timeUnit))' + status: + description: BudgetStatus defines the observed state of Budget. + properties: + atProvider: + properties: + accountId: + description: The ID of the target account for budget. Will use + current user's account_id by default if omitted. + type: string + arn: + description: The ARN of the budget. + type: string + autoAdjustData: + description: Object containing AutoAdjustData which determines + the budget amount for an auto-adjusting budget. + properties: + autoAdjustType: + description: 'The string that defines whether your budget + auto-adjusts based on historical or forecasted data. Valid + values: FORECAST,HISTORICAL' + type: string + historicalOptions: + description: Configuration block of Historical Options. Required + for auto_adjust_type of HISTORICAL Configuration block that + defines the historical data that your auto-adjusting budget + is based on. + properties: + budgetAdjustmentPeriod: + description: The number of budget periods included in + the moving-average calculation that determines your + auto-adjusted budget amount. + type: number + lookbackAvailablePeriods: + description: The integer that describes how many budget + periods in your BudgetAdjustmentPeriod are included + in the calculation of your current budget limit. If + the first budget period in your BudgetAdjustmentPeriod + has no cost data, then that budget period isn’t included + in the average that determines your budget limit. You + can’t set your own LookBackAvailablePeriods. The value + is automatically calculated from the budget_adjustment_period + and your historical cost data. + type: number + type: object + lastAutoAdjustTime: + description: The last time that your budget was auto-adjusted. + type: string + type: object + budgetType: + description: Whether this budget tracks monetary cost or usage. + type: string + costFilter: + description: A list of CostFilter name/values pair to apply to + budget. + items: + properties: + name: + description: The name of a budget. Unique within accounts. + type: string + values: + items: + type: string + type: array + type: object + type: array + costTypes: + description: Object containing CostTypes The types of cost included + in a budget, such as tax and subscriptions. + properties: + includeCredit: + description: A boolean value whether to include credits in + the cost budget. Defaults to true + type: boolean + includeDiscount: + description: Whether a budget includes discounts. Defaults + to true + type: boolean + includeOtherSubscription: + description: A boolean value whether to include other subscription + costs in the cost budget. Defaults to true + type: boolean + includeRecurring: + description: A boolean value whether to include recurring + costs in the cost budget. Defaults to true + type: boolean + includeRefund: + description: A boolean value whether to include refunds in + the cost budget. Defaults to true + type: boolean + includeSubscription: + description: A boolean value whether to include subscriptions + in the cost budget. Defaults to true + type: boolean + includeSupport: + description: A boolean value whether to include support costs + in the cost budget. Defaults to true + type: boolean + includeTax: + description: A boolean value whether to include tax in the + cost budget. Defaults to true + type: boolean + includeUpfront: + description: A boolean value whether to include upfront costs + in the cost budget. Defaults to true + type: boolean + useAmortized: + description: Whether a budget uses the amortized rate. Defaults + to false + type: boolean + useBlended: + description: A boolean value whether to use blended costs + in the cost budget. Defaults to false + type: boolean + type: object + id: + description: id of resource. + type: string + limitAmount: + description: The amount of cost or usage being measured for a + budget. + type: string + limitUnit: + description: The unit of measurement used for the budget forecast, + actual spend, or budget threshold, such as dollars or GB. See + Spend documentation. + type: string + notification: + description: Object containing Budget Notifications. Can be used + multiple times to define more than one budget notification. + items: + properties: + comparisonOperator: + description: Comparison operator to use to evaluate the + condition. Can be LESS_THAN, EQUAL_TO or GREATER_THAN. + type: string + notificationType: + description: What kind of budget value to notify on. Can + be ACTUAL or FORECASTED + type: string + subscriberEmailAddresses: + description: E-Mail addresses to notify. Either this or + subscriber_sns_topic_arns is required. + items: + type: string + type: array + x-kubernetes-list-type: set + subscriberSnsTopicArns: + description: SNS topics to notify. Either this or subscriber_email_addresses + is required. + items: + type: string + type: array + x-kubernetes-list-type: set + threshold: + description: Threshold when the notification should be sent. + type: number + thresholdType: + description: What kind of threshold is defined. Can be PERCENTAGE + OR ABSOLUTE_VALUE. + type: string + type: object + type: array + plannedLimit: + description: Object containing Planned Budget Limits. Can be used + multiple times to plan more than one budget limit. See PlannedBudgetLimits + documentation. + items: + properties: + amount: + description: The amount of cost or usage being measured + for a budget. + type: string + startTime: + description: 'The start time of the budget limit. Format: + 2017-01-01_12:00. See PlannedBudgetLimits documentation.' + type: string + unit: + description: The unit of measurement used for the budget + forecast, actual spend, or budget threshold, such as dollars + or GB. See Spend documentation. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + timePeriodEnd: + description: 'The end of the time period covered by the budget. + There are no restrictions on the end date. Format: 2017-01-01_12:00.' + type: string + timePeriodStart: + description: 'The start of the time period covered by the budget. + If you don''t specify a start date, AWS defaults to the start + of your chosen time period. The start date must come before + the end date. Format: 2017-01-01_12:00.' + type: string + timeUnit: + description: 'The length of time until a budget resets the actual + and forecasted spend. Valid values: MONTHLY, QUARTERLY, ANNUALLY, + and DAILY.' + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/chime.aws.upbound.io_voiceconnectorstreamings.yaml b/package/crds/chime.aws.upbound.io_voiceconnectorstreamings.yaml index 1419a14fca..41f8178922 100644 --- a/package/crds/chime.aws.upbound.io_voiceconnectorstreamings.yaml +++ b/package/crds/chime.aws.upbound.io_voiceconnectorstreamings.yaml @@ -588,3 +588,567 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VoiceConnectorStreaming is the Schema for the VoiceConnectorStreamings + API. The streaming configuration associated with an Amazon Chime Voice Connector. + Specifies whether media streaming is enabled for sending to Amazon Kinesis, + and shows the retention period for the Amazon Kinesis data, in hours. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VoiceConnectorStreamingSpec defines the desired state of + VoiceConnectorStreaming + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + dataRetention: + description: The retention period, in hours, for the Amazon Kinesis + data. + type: number + disabled: + description: 'When true, media streaming to Amazon Kinesis is + turned off. Default: false' + type: boolean + mediaInsightsConfiguration: + description: The media insights configuration. See media_insights_configuration. + properties: + configurationArn: + description: The media insights configuration that will be + invoked by the Voice Connector. + type: string + disabled: + description: When true, the media insights configuration is + not enabled. Defaults to false. + type: boolean + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + streamingNotificationTargets: + description: 'The streaming notification targets. Valid Values: + EventBridge | SNS | SQS' + items: + type: string + type: array + x-kubernetes-list-type: set + voiceConnectorId: + description: The Amazon Chime Voice Connector ID. + type: string + voiceConnectorIdRef: + description: Reference to a VoiceConnector in chime to populate + voiceConnectorId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + voiceConnectorIdSelector: + description: Selector for a VoiceConnector in chime to populate + voiceConnectorId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + dataRetention: + description: The retention period, in hours, for the Amazon Kinesis + data. + type: number + disabled: + description: 'When true, media streaming to Amazon Kinesis is + turned off. Default: false' + type: boolean + mediaInsightsConfiguration: + description: The media insights configuration. See media_insights_configuration. + properties: + configurationArn: + description: The media insights configuration that will be + invoked by the Voice Connector. + type: string + disabled: + description: When true, the media insights configuration is + not enabled. Defaults to false. + type: boolean + type: object + streamingNotificationTargets: + description: 'The streaming notification targets. Valid Values: + EventBridge | SNS | SQS' + items: + type: string + type: array + x-kubernetes-list-type: set + voiceConnectorId: + description: The Amazon Chime Voice Connector ID. + type: string + voiceConnectorIdRef: + description: Reference to a VoiceConnector in chime to populate + voiceConnectorId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + voiceConnectorIdSelector: + description: Selector for a VoiceConnector in chime to populate + voiceConnectorId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.dataRetention is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.dataRetention) + || (has(self.initProvider) && has(self.initProvider.dataRetention))' + status: + description: VoiceConnectorStreamingStatus defines the observed state + of VoiceConnectorStreaming. + properties: + atProvider: + properties: + dataRetention: + description: The retention period, in hours, for the Amazon Kinesis + data. + type: number + disabled: + description: 'When true, media streaming to Amazon Kinesis is + turned off. Default: false' + type: boolean + id: + description: The Amazon Chime Voice Connector ID. + type: string + mediaInsightsConfiguration: + description: The media insights configuration. See media_insights_configuration. + properties: + configurationArn: + description: The media insights configuration that will be + invoked by the Voice Connector. + type: string + disabled: + description: When true, the media insights configuration is + not enabled. Defaults to false. + type: boolean + type: object + streamingNotificationTargets: + description: 'The streaming notification targets. Valid Values: + EventBridge | SNS | SQS' + items: + type: string + type: array + x-kubernetes-list-type: set + voiceConnectorId: + description: The Amazon Chime Voice Connector ID. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cloudformation.aws.upbound.io_stacksetinstances.yaml b/package/crds/cloudformation.aws.upbound.io_stacksetinstances.yaml index 64e33567c5..d4c7563e73 100644 --- a/package/crds/cloudformation.aws.upbound.io_stacksetinstances.yaml +++ b/package/crds/cloudformation.aws.upbound.io_stacksetinstances.yaml @@ -749,3 +749,722 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: StackSetInstance is the Schema for the StackSetInstances API. + Manages a CloudFormation StackSet Instance. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StackSetInstanceSpec defines the desired state of StackSetInstance + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountId: + description: Target AWS Account ID to create a Stack based on + the StackSet. Defaults to current account. + type: string + callAs: + description: 'Specifies whether you are acting as an account administrator + in the organization''s management account or as a delegated + administrator in a member account. Valid values: SELF (default), + DELEGATED_ADMIN.' + type: string + deploymentTargets: + description: The AWS Organizations accounts to which StackSets + deploys. StackSets doesn't deploy stack instances to the organization + management account, even if the organization management account + is in your organization or in an OU in your organization. Drift + detection is not possible for this argument. See deployment_targets + below. + properties: + organizationalUnitIds: + description: The organization root ID or organizational unit + (OU) IDs to which StackSets deploys. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + operationPreferences: + description: Preferences for how AWS CloudFormation performs a + stack set operation. + properties: + failureToleranceCount: + description: The number of accounts, per Region, for which + this operation can fail before AWS CloudFormation stops + the operation in that Region. + type: number + failureTolerancePercentage: + description: The percentage of accounts, per Region, for which + this stack operation can fail before AWS CloudFormation + stops the operation in that Region. + type: number + maxConcurrentCount: + description: The maximum number of accounts in which to perform + this operation at one time. + type: number + maxConcurrentPercentage: + description: The maximum percentage of accounts in which to + perform this operation at one time. + type: number + regionConcurrencyType: + description: The concurrency type of deploying StackSets operations + in Regions, could be in parallel or one Region at a time. + Valid values are SEQUENTIAL and PARALLEL. + type: string + regionOrder: + description: The order of the Regions in where you want to + perform the stack operation. + items: + type: string + type: array + type: object + parameterOverrides: + additionalProperties: + type: string + description: Key-value map of input parameters to override from + the StackSet for this Instance. + type: object + x-kubernetes-map-type: granular + region: + description: |- + Target AWS Region to create a Stack based on the StackSet. Defaults to current region. + Region is the region you'd like your resource to be created in. + type: string + retainStack: + description: You cannot reassociate a retained Stack or add an + existing, saved Stack to a new StackSet. Defaults to false. + type: boolean + stackSetName: + description: Name of the StackSet. + type: string + stackSetNameRef: + description: Reference to a StackSet in cloudformation to populate + stackSetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackSetNameSelector: + description: Selector for a StackSet in cloudformation to populate + stackSetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accountId: + description: Target AWS Account ID to create a Stack based on + the StackSet. Defaults to current account. + type: string + callAs: + description: 'Specifies whether you are acting as an account administrator + in the organization''s management account or as a delegated + administrator in a member account. Valid values: SELF (default), + DELEGATED_ADMIN.' + type: string + deploymentTargets: + description: The AWS Organizations accounts to which StackSets + deploys. StackSets doesn't deploy stack instances to the organization + management account, even if the organization management account + is in your organization or in an OU in your organization. Drift + detection is not possible for this argument. See deployment_targets + below. + properties: + organizationalUnitIds: + description: The organization root ID or organizational unit + (OU) IDs to which StackSets deploys. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + operationPreferences: + description: Preferences for how AWS CloudFormation performs a + stack set operation. + properties: + failureToleranceCount: + description: The number of accounts, per Region, for which + this operation can fail before AWS CloudFormation stops + the operation in that Region. + type: number + failureTolerancePercentage: + description: The percentage of accounts, per Region, for which + this stack operation can fail before AWS CloudFormation + stops the operation in that Region. + type: number + maxConcurrentCount: + description: The maximum number of accounts in which to perform + this operation at one time. + type: number + maxConcurrentPercentage: + description: The maximum percentage of accounts in which to + perform this operation at one time. + type: number + regionConcurrencyType: + description: The concurrency type of deploying StackSets operations + in Regions, could be in parallel or one Region at a time. + Valid values are SEQUENTIAL and PARALLEL. + type: string + regionOrder: + description: The order of the Regions in where you want to + perform the stack operation. + items: + type: string + type: array + type: object + parameterOverrides: + additionalProperties: + type: string + description: Key-value map of input parameters to override from + the StackSet for this Instance. + type: object + x-kubernetes-map-type: granular + retainStack: + description: You cannot reassociate a retained Stack or add an + existing, saved Stack to a new StackSet. Defaults to false. + type: boolean + stackSetName: + description: Name of the StackSet. + type: string + stackSetNameRef: + description: Reference to a StackSet in cloudformation to populate + stackSetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackSetNameSelector: + description: Selector for a StackSet in cloudformation to populate + stackSetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: StackSetInstanceStatus defines the observed state of StackSetInstance. + properties: + atProvider: + properties: + accountId: + description: Target AWS Account ID to create a Stack based on + the StackSet. Defaults to current account. + type: string + callAs: + description: 'Specifies whether you are acting as an account administrator + in the organization''s management account or as a delegated + administrator in a member account. Valid values: SELF (default), + DELEGATED_ADMIN.' + type: string + deploymentTargets: + description: The AWS Organizations accounts to which StackSets + deploys. StackSets doesn't deploy stack instances to the organization + management account, even if the organization management account + is in your organization or in an OU in your organization. Drift + detection is not possible for this argument. See deployment_targets + below. + properties: + organizationalUnitIds: + description: The organization root ID or organizational unit + (OU) IDs to which StackSets deploys. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + id: + description: Unique identifier for the resource. If deployment_targets + is set, this is a comma-delimited string combining stack set + name, organizational unit IDs (/-delimited), and region (ie. + mystack,ou-123/ou-456,us-east-1). Otherwise, this is a comma-delimited + string combining stack set name, AWS account ID, and region + (ie. mystack,123456789012,us-east-1). + type: string + operationPreferences: + description: Preferences for how AWS CloudFormation performs a + stack set operation. + properties: + failureToleranceCount: + description: The number of accounts, per Region, for which + this operation can fail before AWS CloudFormation stops + the operation in that Region. + type: number + failureTolerancePercentage: + description: The percentage of accounts, per Region, for which + this stack operation can fail before AWS CloudFormation + stops the operation in that Region. + type: number + maxConcurrentCount: + description: The maximum number of accounts in which to perform + this operation at one time. + type: number + maxConcurrentPercentage: + description: The maximum percentage of accounts in which to + perform this operation at one time. + type: number + regionConcurrencyType: + description: The concurrency type of deploying StackSets operations + in Regions, could be in parallel or one Region at a time. + Valid values are SEQUENTIAL and PARALLEL. + type: string + regionOrder: + description: The order of the Regions in where you want to + perform the stack operation. + items: + type: string + type: array + type: object + organizationalUnitId: + description: The organization root ID or organizational unit (OU) + ID in which the stack is deployed. + type: string + parameterOverrides: + additionalProperties: + type: string + description: Key-value map of input parameters to override from + the StackSet for this Instance. + type: object + x-kubernetes-map-type: granular + retainStack: + description: You cannot reassociate a retained Stack or add an + existing, saved Stack to a new StackSet. Defaults to false. + type: boolean + stackId: + description: Stack identifier. + type: string + stackInstanceSummaries: + description: |- + List of stack instances created from an organizational unit deployment target. This will only be populated when deployment_targets is set. See stack_instance_summaries. + List of stack instances created from an organizational unit deployment target. This will only be populated when `deployment_targets` is set. + items: + properties: + accountId: + description: Target AWS Account ID to create a Stack based + on the StackSet. Defaults to current account. + type: string + organizationalUnitId: + description: The organization root ID or organizational + unit (OU) ID in which the stack is deployed. + type: string + stackId: + description: Stack identifier. + type: string + type: object + type: array + stackSetName: + description: Name of the StackSet. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cloudformation.aws.upbound.io_stacksets.yaml b/package/crds/cloudformation.aws.upbound.io_stacksets.yaml index 94795661d6..a7fd6d7fde 100644 --- a/package/crds/cloudformation.aws.upbound.io_stacksets.yaml +++ b/package/crds/cloudformation.aws.upbound.io_stacksets.yaml @@ -862,3 +862,826 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: StackSet is the Schema for the StackSets API. Manages a CloudFormation + StackSet. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StackSetSpec defines the desired state of StackSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + administrationRoleArn: + description: Amazon Resource Number (ARN) of the IAM Role in the + administrator account. This must be defined when using the SELF_MANAGED + permission model. + type: string + administrationRoleArnRef: + description: Reference to a Role in iam to populate administrationRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + administrationRoleArnSelector: + description: Selector for a Role in iam to populate administrationRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + autoDeployment: + description: Configuration block containing the auto-deployment + model for your StackSet. This can only be defined when using + the SERVICE_MANAGED permission model. + properties: + enabled: + description: Whether or not auto-deployment is enabled. + type: boolean + retainStacksOnAccountRemoval: + description: Whether or not to retain stacks when the account + is removed. + type: boolean + type: object + callAs: + description: 'Specifies whether you are acting as an account administrator + in the organization''s management account or as a delegated + administrator in a member account. Valid values: SELF (default), + DELEGATED_ADMIN.' + type: string + capabilities: + description: 'A list of capabilities. Valid values: CAPABILITY_IAM, + CAPABILITY_NAMED_IAM, CAPABILITY_AUTO_EXPAND.' + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the StackSet. + type: string + executionRoleName: + description: Name of the IAM Role in all target accounts for StackSet + operations. Defaults to AWSCloudFormationStackSetExecutionRole + when using the SELF_MANAGED permission model. This should not + be defined when using the SERVICE_MANAGED permission model. + type: string + managedExecution: + description: Configuration block to allow StackSets to perform + non-conflicting operations concurrently and queues conflicting + operations. + properties: + active: + description: When set to true, StackSets performs non-conflicting + operations concurrently and queues conflicting operations. + After conflicting operations finish, StackSets starts queued + operations in request order. Default is false. + type: boolean + type: object + operationPreferences: + description: Preferences for how AWS CloudFormation performs a + stack set update. + properties: + failureToleranceCount: + description: The number of accounts, per Region, for which + this operation can fail before AWS CloudFormation stops + the operation in that Region. + type: number + failureTolerancePercentage: + description: The percentage of accounts, per Region, for which + this stack operation can fail before AWS CloudFormation + stops the operation in that Region. + type: number + maxConcurrentCount: + description: The maximum number of accounts in which to perform + this operation at one time. + type: number + maxConcurrentPercentage: + description: The maximum percentage of accounts in which to + perform this operation at one time. + type: number + regionConcurrencyType: + description: The concurrency type of deploying StackSets operations + in Regions, could be in parallel or one Region at a time. + type: string + regionOrder: + description: The order of the Regions in where you want to + perform the stack operation. + items: + type: string + type: array + type: object + parameters: + additionalProperties: + type: string + description: Key-value map of input parameters for the StackSet + template. All template parameters, including those with a Default, + must be configured or ignored with lifecycle configuration block + ignore_changes argument. All NoEcho template parameters must + be ignored with the lifecycle configuration block ignore_changes + argument. + type: object + x-kubernetes-map-type: granular + permissionModel: + description: 'Describes how the IAM roles required for your StackSet + are created. Valid values: SELF_MANAGED (default), SERVICE_MANAGED.' + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + templateBody: + description: 'String containing the CloudFormation template body. + Maximum size: 51,200 bytes. Conflicts with template_url.' + type: string + templateUrl: + description: 'String containing the location of a file containing + the CloudFormation template body. The URL must point to a template + that is located in an Amazon S3 bucket. Maximum location file + size: 460,800 bytes. Conflicts with template_body.' + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + administrationRoleArn: + description: Amazon Resource Number (ARN) of the IAM Role in the + administrator account. This must be defined when using the SELF_MANAGED + permission model. + type: string + administrationRoleArnRef: + description: Reference to a Role in iam to populate administrationRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + administrationRoleArnSelector: + description: Selector for a Role in iam to populate administrationRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + autoDeployment: + description: Configuration block containing the auto-deployment + model for your StackSet. This can only be defined when using + the SERVICE_MANAGED permission model. + properties: + enabled: + description: Whether or not auto-deployment is enabled. + type: boolean + retainStacksOnAccountRemoval: + description: Whether or not to retain stacks when the account + is removed. + type: boolean + type: object + callAs: + description: 'Specifies whether you are acting as an account administrator + in the organization''s management account or as a delegated + administrator in a member account. Valid values: SELF (default), + DELEGATED_ADMIN.' + type: string + capabilities: + description: 'A list of capabilities. Valid values: CAPABILITY_IAM, + CAPABILITY_NAMED_IAM, CAPABILITY_AUTO_EXPAND.' + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the StackSet. + type: string + executionRoleName: + description: Name of the IAM Role in all target accounts for StackSet + operations. Defaults to AWSCloudFormationStackSetExecutionRole + when using the SELF_MANAGED permission model. This should not + be defined when using the SERVICE_MANAGED permission model. + type: string + managedExecution: + description: Configuration block to allow StackSets to perform + non-conflicting operations concurrently and queues conflicting + operations. + properties: + active: + description: When set to true, StackSets performs non-conflicting + operations concurrently and queues conflicting operations. + After conflicting operations finish, StackSets starts queued + operations in request order. Default is false. + type: boolean + type: object + operationPreferences: + description: Preferences for how AWS CloudFormation performs a + stack set update. + properties: + failureToleranceCount: + description: The number of accounts, per Region, for which + this operation can fail before AWS CloudFormation stops + the operation in that Region. + type: number + failureTolerancePercentage: + description: The percentage of accounts, per Region, for which + this stack operation can fail before AWS CloudFormation + stops the operation in that Region. + type: number + maxConcurrentCount: + description: The maximum number of accounts in which to perform + this operation at one time. + type: number + maxConcurrentPercentage: + description: The maximum percentage of accounts in which to + perform this operation at one time. + type: number + regionConcurrencyType: + description: The concurrency type of deploying StackSets operations + in Regions, could be in parallel or one Region at a time. + type: string + regionOrder: + description: The order of the Regions in where you want to + perform the stack operation. + items: + type: string + type: array + type: object + parameters: + additionalProperties: + type: string + description: Key-value map of input parameters for the StackSet + template. All template parameters, including those with a Default, + must be configured or ignored with lifecycle configuration block + ignore_changes argument. All NoEcho template parameters must + be ignored with the lifecycle configuration block ignore_changes + argument. + type: object + x-kubernetes-map-type: granular + permissionModel: + description: 'Describes how the IAM roles required for your StackSet + are created. Valid values: SELF_MANAGED (default), SERVICE_MANAGED.' + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + templateBody: + description: 'String containing the CloudFormation template body. + Maximum size: 51,200 bytes. Conflicts with template_url.' + type: string + templateUrl: + description: 'String containing the location of a file containing + the CloudFormation template body. The URL must point to a template + that is located in an Amazon S3 bucket. Maximum location file + size: 460,800 bytes. Conflicts with template_body.' + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: StackSetStatus defines the observed state of StackSet. + properties: + atProvider: + properties: + administrationRoleArn: + description: Amazon Resource Number (ARN) of the IAM Role in the + administrator account. This must be defined when using the SELF_MANAGED + permission model. + type: string + arn: + description: Amazon Resource Name (ARN) of the StackSet. + type: string + autoDeployment: + description: Configuration block containing the auto-deployment + model for your StackSet. This can only be defined when using + the SERVICE_MANAGED permission model. + properties: + enabled: + description: Whether or not auto-deployment is enabled. + type: boolean + retainStacksOnAccountRemoval: + description: Whether or not to retain stacks when the account + is removed. + type: boolean + type: object + callAs: + description: 'Specifies whether you are acting as an account administrator + in the organization''s management account or as a delegated + administrator in a member account. Valid values: SELF (default), + DELEGATED_ADMIN.' + type: string + capabilities: + description: 'A list of capabilities. Valid values: CAPABILITY_IAM, + CAPABILITY_NAMED_IAM, CAPABILITY_AUTO_EXPAND.' + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the StackSet. + type: string + executionRoleName: + description: Name of the IAM Role in all target accounts for StackSet + operations. Defaults to AWSCloudFormationStackSetExecutionRole + when using the SELF_MANAGED permission model. This should not + be defined when using the SERVICE_MANAGED permission model. + type: string + id: + description: Name of the StackSet. + type: string + managedExecution: + description: Configuration block to allow StackSets to perform + non-conflicting operations concurrently and queues conflicting + operations. + properties: + active: + description: When set to true, StackSets performs non-conflicting + operations concurrently and queues conflicting operations. + After conflicting operations finish, StackSets starts queued + operations in request order. Default is false. + type: boolean + type: object + operationPreferences: + description: Preferences for how AWS CloudFormation performs a + stack set update. + properties: + failureToleranceCount: + description: The number of accounts, per Region, for which + this operation can fail before AWS CloudFormation stops + the operation in that Region. + type: number + failureTolerancePercentage: + description: The percentage of accounts, per Region, for which + this stack operation can fail before AWS CloudFormation + stops the operation in that Region. + type: number + maxConcurrentCount: + description: The maximum number of accounts in which to perform + this operation at one time. + type: number + maxConcurrentPercentage: + description: The maximum percentage of accounts in which to + perform this operation at one time. + type: number + regionConcurrencyType: + description: The concurrency type of deploying StackSets operations + in Regions, could be in parallel or one Region at a time. + type: string + regionOrder: + description: The order of the Regions in where you want to + perform the stack operation. + items: + type: string + type: array + type: object + parameters: + additionalProperties: + type: string + description: Key-value map of input parameters for the StackSet + template. All template parameters, including those with a Default, + must be configured or ignored with lifecycle configuration block + ignore_changes argument. All NoEcho template parameters must + be ignored with the lifecycle configuration block ignore_changes + argument. + type: object + x-kubernetes-map-type: granular + permissionModel: + description: 'Describes how the IAM roles required for your StackSet + are created. Valid values: SELF_MANAGED (default), SERVICE_MANAGED.' + type: string + stackSetId: + description: Unique identifier of the StackSet. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + templateBody: + description: 'String containing the CloudFormation template body. + Maximum size: 51,200 bytes. Conflicts with template_url.' + type: string + templateUrl: + description: 'String containing the location of a file containing + the CloudFormation template body. The URL must point to a template + that is located in an Amazon S3 bucket. Maximum location file + size: 460,800 bytes. Conflicts with template_body.' + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cloudfront.aws.upbound.io_cachepolicies.yaml b/package/crds/cloudfront.aws.upbound.io_cachepolicies.yaml index f8ba5f2f9c..1addd35b07 100644 --- a/package/crds/cloudfront.aws.upbound.io_cachepolicies.yaml +++ b/package/crds/cloudfront.aws.upbound.io_cachepolicies.yaml @@ -739,3 +739,673 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CachePolicy is the Schema for the CachePolicys API. Use the + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CachePolicySpec defines the desired state of CachePolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + comment: + description: Description for the cache policy. + type: string + defaultTtl: + description: Amount of time, in seconds, that objects are allowed + to remain in the CloudFront cache before CloudFront sends a + new request to the origin server to check if the object has + been updated. + type: number + maxTtl: + description: Maximum amount of time, in seconds, that objects + stay in the CloudFront cache before CloudFront sends another + request to the origin to see if the object has been updated. + type: number + minTtl: + description: Minimum amount of time, in seconds, that objects + should remain in the CloudFront cache before a new request is + sent to the origin to check for updates. + type: number + name: + description: Unique name used to identify the cache policy. + type: string + parametersInCacheKeyAndForwardedToOrigin: + description: Configuration for including HTTP headers, cookies, + and URL query strings in the cache key. For more information, + refer to the Parameters In Cache Key And Forwarded To Origin + section. + properties: + cookiesConfig: + description: Whether any cookies in viewer requests are included + in the cache key and automatically included in requests + that CloudFront sends to the origin. See Cookies Config + for more information. + properties: + cookieBehavior: + description: Whether any cookies in viewer requests are + included in the cache key and automatically included + in requests that CloudFront sends to the origin. Valid + values for cookie_behavior are none, whitelist, allExcept, + and all. + type: string + cookies: + description: Object that contains a list of cookie names. + See Items for more information. + properties: + items: + description: List of item names, such as cookies, + headers, or query strings. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + enableAcceptEncodingBrotli: + description: Flag determines whether the Accept-Encoding HTTP + header is included in the cache key and in requests that + CloudFront sends to the origin. + type: boolean + enableAcceptEncodingGzip: + description: Whether the Accept-Encoding HTTP header is included + in the cache key and in requests sent to the origin by CloudFront. + type: boolean + headersConfig: + description: Whether any HTTP headers are included in the + cache key and automatically included in requests that CloudFront + sends to the origin. See Headers Config for more information. + properties: + headerBehavior: + description: Whether any HTTP headers are included in + the cache key and automatically included in requests + that CloudFront sends to the origin. Valid values for + header_behavior are none and whitelist. + type: string + headers: + description: Object contains a list of header names. See + Items for more information. + properties: + items: + description: List of item names, such as cookies, + headers, or query strings. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + queryStringsConfig: + description: Whether any URL query strings in viewer requests + are included in the cache key. It also automatically includes + these query strings in requests that CloudFront sends to + the origin. Please refer to the Query String Config for + more information. + properties: + queryStringBehavior: + description: Whether URL query strings in viewer requests + are included in the cache key and automatically included + in requests that CloudFront sends to the origin. Valid + values for query_string_behavior are none, whitelist, + allExcept, and all. + type: string + queryStrings: + description: Configuration parameter that contains a list + of query string names. See Items for more information. + properties: + items: + description: List of item names, such as cookies, + headers, or query strings. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + comment: + description: Description for the cache policy. + type: string + defaultTtl: + description: Amount of time, in seconds, that objects are allowed + to remain in the CloudFront cache before CloudFront sends a + new request to the origin server to check if the object has + been updated. + type: number + maxTtl: + description: Maximum amount of time, in seconds, that objects + stay in the CloudFront cache before CloudFront sends another + request to the origin to see if the object has been updated. + type: number + minTtl: + description: Minimum amount of time, in seconds, that objects + should remain in the CloudFront cache before a new request is + sent to the origin to check for updates. + type: number + name: + description: Unique name used to identify the cache policy. + type: string + parametersInCacheKeyAndForwardedToOrigin: + description: Configuration for including HTTP headers, cookies, + and URL query strings in the cache key. For more information, + refer to the Parameters In Cache Key And Forwarded To Origin + section. + properties: + cookiesConfig: + description: Whether any cookies in viewer requests are included + in the cache key and automatically included in requests + that CloudFront sends to the origin. See Cookies Config + for more information. + properties: + cookieBehavior: + description: Whether any cookies in viewer requests are + included in the cache key and automatically included + in requests that CloudFront sends to the origin. Valid + values for cookie_behavior are none, whitelist, allExcept, + and all. + type: string + cookies: + description: Object that contains a list of cookie names. + See Items for more information. + properties: + items: + description: List of item names, such as cookies, + headers, or query strings. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + enableAcceptEncodingBrotli: + description: Flag determines whether the Accept-Encoding HTTP + header is included in the cache key and in requests that + CloudFront sends to the origin. + type: boolean + enableAcceptEncodingGzip: + description: Whether the Accept-Encoding HTTP header is included + in the cache key and in requests sent to the origin by CloudFront. + type: boolean + headersConfig: + description: Whether any HTTP headers are included in the + cache key and automatically included in requests that CloudFront + sends to the origin. See Headers Config for more information. + properties: + headerBehavior: + description: Whether any HTTP headers are included in + the cache key and automatically included in requests + that CloudFront sends to the origin. Valid values for + header_behavior are none and whitelist. + type: string + headers: + description: Object contains a list of header names. See + Items for more information. + properties: + items: + description: List of item names, such as cookies, + headers, or query strings. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + queryStringsConfig: + description: Whether any URL query strings in viewer requests + are included in the cache key. It also automatically includes + these query strings in requests that CloudFront sends to + the origin. Please refer to the Query String Config for + more information. + properties: + queryStringBehavior: + description: Whether URL query strings in viewer requests + are included in the cache key and automatically included + in requests that CloudFront sends to the origin. Valid + values for query_string_behavior are none, whitelist, + allExcept, and all. + type: string + queryStrings: + description: Configuration parameter that contains a list + of query string names. See Items for more information. + properties: + items: + description: List of item names, such as cookies, + headers, or query strings. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.parametersInCacheKeyAndForwardedToOrigin is + a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.parametersInCacheKeyAndForwardedToOrigin) + || (has(self.initProvider) && has(self.initProvider.parametersInCacheKeyAndForwardedToOrigin))' + status: + description: CachePolicyStatus defines the observed state of CachePolicy. + properties: + atProvider: + properties: + comment: + description: Description for the cache policy. + type: string + defaultTtl: + description: Amount of time, in seconds, that objects are allowed + to remain in the CloudFront cache before CloudFront sends a + new request to the origin server to check if the object has + been updated. + type: number + etag: + description: Current version of the cache policy. + type: string + id: + description: Identifier for the cache policy. + type: string + maxTtl: + description: Maximum amount of time, in seconds, that objects + stay in the CloudFront cache before CloudFront sends another + request to the origin to see if the object has been updated. + type: number + minTtl: + description: Minimum amount of time, in seconds, that objects + should remain in the CloudFront cache before a new request is + sent to the origin to check for updates. + type: number + name: + description: Unique name used to identify the cache policy. + type: string + parametersInCacheKeyAndForwardedToOrigin: + description: Configuration for including HTTP headers, cookies, + and URL query strings in the cache key. For more information, + refer to the Parameters In Cache Key And Forwarded To Origin + section. + properties: + cookiesConfig: + description: Whether any cookies in viewer requests are included + in the cache key and automatically included in requests + that CloudFront sends to the origin. See Cookies Config + for more information. + properties: + cookieBehavior: + description: Whether any cookies in viewer requests are + included in the cache key and automatically included + in requests that CloudFront sends to the origin. Valid + values for cookie_behavior are none, whitelist, allExcept, + and all. + type: string + cookies: + description: Object that contains a list of cookie names. + See Items for more information. + properties: + items: + description: List of item names, such as cookies, + headers, or query strings. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + enableAcceptEncodingBrotli: + description: Flag determines whether the Accept-Encoding HTTP + header is included in the cache key and in requests that + CloudFront sends to the origin. + type: boolean + enableAcceptEncodingGzip: + description: Whether the Accept-Encoding HTTP header is included + in the cache key and in requests sent to the origin by CloudFront. + type: boolean + headersConfig: + description: Whether any HTTP headers are included in the + cache key and automatically included in requests that CloudFront + sends to the origin. See Headers Config for more information. + properties: + headerBehavior: + description: Whether any HTTP headers are included in + the cache key and automatically included in requests + that CloudFront sends to the origin. Valid values for + header_behavior are none and whitelist. + type: string + headers: + description: Object contains a list of header names. See + Items for more information. + properties: + items: + description: List of item names, such as cookies, + headers, or query strings. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + queryStringsConfig: + description: Whether any URL query strings in viewer requests + are included in the cache key. It also automatically includes + these query strings in requests that CloudFront sends to + the origin. Please refer to the Query String Config for + more information. + properties: + queryStringBehavior: + description: Whether URL query strings in viewer requests + are included in the cache key and automatically included + in requests that CloudFront sends to the origin. Valid + values for query_string_behavior are none, whitelist, + allExcept, and all. + type: string + queryStrings: + description: Configuration parameter that contains a list + of query string names. See Items for more information. + properties: + items: + description: List of item names, such as cookies, + headers, or query strings. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cloudfront.aws.upbound.io_distributions.yaml b/package/crds/cloudfront.aws.upbound.io_distributions.yaml index 1ce69ea1a4..e94da789a5 100644 --- a/package/crds/cloudfront.aws.upbound.io_distributions.yaml +++ b/package/crds/cloudfront.aws.upbound.io_distributions.yaml @@ -3175,3 +3175,3058 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Distribution is the Schema for the Distributions API. Provides + a CloudFront web distribution resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DistributionSpec defines the desired state of Distribution + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + aliases: + description: Extra CNAMEs (alternate domain names), if any, for + this distribution. + items: + type: string + type: array + x-kubernetes-list-type: set + comment: + description: Any comments you want to include about the distribution. + type: string + continuousDeploymentPolicyId: + description: Identifier of a continuous deployment policy. This + argument should only be set on a production distribution. See + the aws_cloudfront_continuous_deployment_policy resource for + additional details. + type: string + customErrorResponse: + description: One or more custom error response elements (multiples + allowed). + items: + properties: + errorCachingMinTtl: + description: Minimum amount of time you want HTTP error + codes to stay in CloudFront caches before CloudFront queries + your origin to see whether the object has been updated. + type: number + errorCode: + description: 4xx or 5xx HTTP status code that you want to + customize. + type: number + responseCode: + description: HTTP status code that you want CloudFront to + return with the custom error page to the viewer. + type: number + responsePagePath: + description: Path of the custom error page (for example, + /custom_404.html). + type: string + type: object + type: array + defaultCacheBehavior: + description: Default cache behavior for this distribution (maximum + one). Requires either cache_policy_id (preferred) or forwarded_values + (deprecated) be set. + properties: + allowedMethods: + description: Controls which HTTP methods CloudFront processes + and forwards to your Amazon S3 bucket or your custom origin. + items: + type: string + type: array + x-kubernetes-list-type: set + cachePolicyId: + description: Unique identifier of the cache policy that is + attached to the cache behavior. If configuring the default_cache_behavior + either cache_policy_id or forwarded_values must be set. + type: string + cachedMethods: + description: Controls whether CloudFront caches the response + to requests using the specified HTTP methods. + items: + type: string + type: array + x-kubernetes-list-type: set + compress: + description: 'Whether you want CloudFront to automatically + compress content for web requests that include Accept-Encoding: + gzip in the request header (default: false).' + type: boolean + defaultTtl: + description: Default amount of time (in seconds) that an object + is in a CloudFront cache before CloudFront forwards another + request in the absence of an Cache-Control max-age or Expires + header. + type: number + fieldLevelEncryptionId: + description: Field level encryption configuration ID. + type: string + forwardedValues: + description: The forwarded values configuration that specifies + how CloudFront handles query strings, cookies and headers + (maximum one). + properties: + cookies: + description: The forwarded values cookies that specifies + how CloudFront handles cookies (maximum one). + properties: + forward: + description: Whether you want CloudFront to forward + cookies to the origin that is associated with this + cache behavior. You can specify all, none or whitelist. + If whitelist, you must include the subsequent whitelisted_names. + type: string + whitelistedNames: + description: If you have specified whitelist to forward, + the whitelisted cookies that you want CloudFront + to forward to your origin. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + headers: + description: Headers, if any, that you want CloudFront + to vary upon for this cache behavior. Specify * to include + all headers. + items: + type: string + type: array + x-kubernetes-list-type: set + queryString: + description: Indicates whether you want CloudFront to + forward query strings to the origin that is associated + with this cache behavior. + type: boolean + queryStringCacheKeys: + description: When specified, along with a value of true + for query_string, all query strings are forwarded, however + only the query string keys listed in this argument are + cached. When omitted with a value of true for query_string, + all query string keys are cached. + items: + type: string + type: array + type: object + functionAssociation: + description: A config block that triggers a cloudfront function + with specific actions (maximum 2). + items: + properties: + eventType: + description: 'Specific event to trigger this function. + Valid values: viewer-request, origin-request, viewer-response, + origin-response.' + type: string + functionArn: + description: ARN of the CloudFront function. + type: string + type: object + type: array + lambdaFunctionAssociation: + description: A config block that triggers a lambda function + with specific actions (maximum 4). + items: + properties: + eventType: + description: 'Specific event to trigger this function. + Valid values: viewer-request, origin-request, viewer-response, + origin-response.' + type: string + includeBody: + description: 'When set to true it exposes the request + body to the lambda function. Defaults to false. Valid + values: true, false.' + type: boolean + lambdaArn: + description: ARN of the Lambda function. + type: string + type: object + type: array + maxTtl: + description: Maximum amount of time (in seconds) that an object + is in a CloudFront cache before CloudFront forwards another + request to your origin to determine whether the object has + been updated. Only effective in the presence of Cache-Control + max-age, Cache-Control s-maxage, and Expires headers. + type: number + minTtl: + description: Minimum amount of time that you want objects + to stay in CloudFront caches before CloudFront queries your + origin to see whether the object has been updated. Defaults + to 0 seconds. + type: number + originRequestPolicyId: + description: Unique identifier of the origin request policy + that is attached to the behavior. + type: string + realtimeLogConfigArn: + description: ARN of the real-time log configuration that is + attached to this cache behavior. + type: string + responseHeadersPolicyId: + description: Identifier for a response headers policy. + type: string + smoothStreaming: + description: Indicates whether you want to distribute media + files in Microsoft Smooth Streaming format using the origin + that is associated with this cache behavior. + type: boolean + targetOriginId: + description: Value of ID for the origin that you want CloudFront + to route requests to when a request matches the path pattern + either for a cache behavior or for the default cache behavior. + type: string + trustedKeyGroups: + description: List of key group IDs that CloudFront can use + to validate signed URLs or signed cookies. See the CloudFront + User Guide for more information about this feature. + items: + type: string + type: array + trustedSigners: + description: List of AWS account IDs (or self) that you want + to allow to create signed URLs for private content. See + the CloudFront User Guide for more information about this + feature. + items: + type: string + type: array + viewerProtocolPolicy: + description: Use this element to specify the protocol that + users can use to access the files in the origin specified + by TargetOriginId when a request matches the path pattern + in PathPattern. One of allow-all, https-only, or redirect-to-https. + type: string + type: object + defaultRootObject: + description: Object that you want CloudFront to return (for example, + index.html) when an end user requests the root URL. + type: string + enabled: + description: Whether the distribution is enabled to accept end + user requests for content. + type: boolean + httpVersion: + description: Maximum HTTP version to support on the distribution. + Allowed values are http1.1, http2, http2and3 and http3. The + default is http2. + type: string + isIpv6Enabled: + description: Whether the IPv6 is enabled for the distribution. + type: boolean + loggingConfig: + description: The logging configuration that controls how logs + are written to your distribution (maximum one). + properties: + bucket: + description: Amazon S3 bucket to store the access logs in, + for example, myawslogbucket.s3.amazonaws.com. + type: string + includeCookies: + description: 'Whether to include cookies in access logs (default: + false).' + type: boolean + prefix: + description: Prefix to the access log filenames for this distribution, + for example, myprefix/. + type: string + type: object + orderedCacheBehavior: + description: Ordered list of cache behaviors resource for this + distribution. List from top to bottom in order of precedence. + The topmost cache behavior will have precedence 0. + items: + properties: + allowedMethods: + description: Controls which HTTP methods CloudFront processes + and forwards to your Amazon S3 bucket or your custom origin. + items: + type: string + type: array + x-kubernetes-list-type: set + cachePolicyId: + description: Unique identifier of the cache policy that + is attached to the cache behavior. If configuring the + default_cache_behavior either cache_policy_id or forwarded_values + must be set. + type: string + cachedMethods: + description: Controls whether CloudFront caches the response + to requests using the specified HTTP methods. + items: + type: string + type: array + x-kubernetes-list-type: set + compress: + description: 'Whether you want CloudFront to automatically + compress content for web requests that include Accept-Encoding: + gzip in the request header (default: false).' + type: boolean + defaultTtl: + description: Default amount of time (in seconds) that an + object is in a CloudFront cache before CloudFront forwards + another request in the absence of an Cache-Control max-age + or Expires header. + type: number + fieldLevelEncryptionId: + description: Field level encryption configuration ID. + type: string + forwardedValues: + description: The forwarded values configuration that specifies + how CloudFront handles query strings, cookies and headers + (maximum one). + properties: + cookies: + description: The forwarded values cookies that specifies + how CloudFront handles cookies (maximum one). + properties: + forward: + description: Whether you want CloudFront to forward + cookies to the origin that is associated with + this cache behavior. You can specify all, none + or whitelist. If whitelist, you must include the + subsequent whitelisted_names. + type: string + whitelistedNames: + description: If you have specified whitelist to + forward, the whitelisted cookies that you want + CloudFront to forward to your origin. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + headers: + description: Headers, if any, that you want CloudFront + to vary upon for this cache behavior. Specify * to + include all headers. + items: + type: string + type: array + x-kubernetes-list-type: set + queryString: + description: Indicates whether you want CloudFront to + forward query strings to the origin that is associated + with this cache behavior. + type: boolean + queryStringCacheKeys: + description: When specified, along with a value of true + for query_string, all query strings are forwarded, + however only the query string keys listed in this + argument are cached. When omitted with a value of + true for query_string, all query string keys are cached. + items: + type: string + type: array + type: object + functionAssociation: + description: A config block that triggers a cloudfront function + with specific actions (maximum 2). + items: + properties: + eventType: + description: 'Specific event to trigger this function. + Valid values: viewer-request, origin-request, viewer-response, + origin-response.' + type: string + functionArn: + description: ARN of the CloudFront function. + type: string + functionArnRef: + description: Reference to a Function in cloudfront + to populate functionArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + functionArnSelector: + description: Selector for a Function in cloudfront + to populate functionArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + lambdaFunctionAssociation: + description: A config block that triggers a lambda function + with specific actions (maximum 4). + items: + properties: + eventType: + description: 'Specific event to trigger this function. + Valid values: viewer-request, origin-request, viewer-response, + origin-response.' + type: string + includeBody: + description: 'When set to true it exposes the request + body to the lambda function. Defaults to false. + Valid values: true, false.' + type: boolean + lambdaArn: + description: ARN of the Lambda function. + type: string + lambdaArnRef: + description: Reference to a Function in lambda to + populate lambdaArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + lambdaArnSelector: + description: Selector for a Function in lambda to + populate lambdaArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + maxTtl: + description: Maximum amount of time (in seconds) that an + object is in a CloudFront cache before CloudFront forwards + another request to your origin to determine whether the + object has been updated. Only effective in the presence + of Cache-Control max-age, Cache-Control s-maxage, and + Expires headers. + type: number + minTtl: + description: Minimum amount of time that you want objects + to stay in CloudFront caches before CloudFront queries + your origin to see whether the object has been updated. + Defaults to 0 seconds. + type: number + originRequestPolicyId: + description: Unique identifier of the origin request policy + that is attached to the behavior. + type: string + pathPattern: + description: Pattern (for example, images/*.jpg) that specifies + which requests you want this cache behavior to apply to. + type: string + realtimeLogConfigArn: + description: ARN of the real-time log configuration that + is attached to this cache behavior. + type: string + responseHeadersPolicyId: + description: Identifier for a response headers policy. + type: string + smoothStreaming: + description: Indicates whether you want to distribute media + files in Microsoft Smooth Streaming format using the origin + that is associated with this cache behavior. + type: boolean + targetOriginId: + description: Value of ID for the origin that you want CloudFront + to route requests to when a request matches the path pattern + either for a cache behavior or for the default cache behavior. + type: string + trustedKeyGroups: + description: List of key group IDs that CloudFront can use + to validate signed URLs or signed cookies. See the CloudFront + User Guide for more information about this feature. + items: + type: string + type: array + trustedSigners: + description: List of AWS account IDs (or self) that you + want to allow to create signed URLs for private content. + See the CloudFront User Guide for more information about + this feature. + items: + type: string + type: array + viewerProtocolPolicy: + description: Use this element to specify the protocol that + users can use to access the files in the origin specified + by TargetOriginId when a request matches the path pattern + in PathPattern. One of allow-all, https-only, or redirect-to-https. + type: string + type: object + type: array + origin: + description: One or more origins for this distribution (multiples + allowed). + items: + properties: + connectionAttempts: + description: Number of times that CloudFront attempts to + connect to the origin. Must be between 1-3. Defaults to + 3. + type: number + connectionTimeout: + description: Number of seconds that CloudFront waits when + trying to establish a connection to the origin. Must be + between 1-10. Defaults to 10. + type: number + customHeader: + description: One or more sub-resources with name and value + parameters that specify header data that will be sent + to the origin (multiples allowed). + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + customOriginConfig: + description: The CloudFront custom origin configuration + information. If an S3 origin is required, use origin_access_control_id + or s3_origin_config instead. + properties: + httpPort: + description: HTTP port the custom origin listens on. + type: number + httpsPort: + description: HTTPS port the custom origin listens on. + type: number + originKeepaliveTimeout: + description: The Custom KeepAlive timeout, in seconds. + By default, AWS enforces an upper limit of 60. But + you can request an increase. Defaults to 5. + type: number + originProtocolPolicy: + description: Origin protocol policy to apply to your + origin. One of http-only, https-only, or match-viewer. + type: string + originReadTimeout: + description: The Custom Read timeout, in seconds. By + default, AWS enforces an upper limit of 60. But you + can request an increase. Defaults to 30. + type: number + originSslProtocols: + description: 'List of SSL/TLS protocols that CloudFront + can use when connecting to your origin over HTTPS. + Valid values: SSLv3, TLSv1, TLSv1.1, TLSv1.2. For + more information, see Minimum Origin SSL Protocol + in the Amazon CloudFront Developer Guide.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + domainName: + description: DNS domain name of either the S3 bucket, or + web site of your custom origin. + type: string + originAccessControlId: + description: Unique identifier of a CloudFront origin access + control for this origin. + type: string + originAccessControlIdRef: + description: Reference to a OriginAccessControl in cloudfront + to populate originAccessControlId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + originAccessControlIdSelector: + description: Selector for a OriginAccessControl in cloudfront + to populate originAccessControlId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + originId: + description: Unique identifier for the origin. + type: string + originPath: + description: Optional element that causes CloudFront to + request your content from a directory in your Amazon S3 + bucket or your custom origin. + type: string + originShield: + description: CloudFront Origin Shield configuration information. + Using Origin Shield can help reduce the load on your origin. + For more information, see Using Origin Shield in the Amazon + CloudFront Developer Guide. + properties: + enabled: + description: Whether the distribution is enabled to + accept end user requests for content. + type: boolean + originShieldRegion: + description: AWS Region for Origin Shield. To specify + a region, use the region code, not the region name. + For example, specify the US East (Ohio) region as + us-east-2. + type: string + type: object + s3OriginConfig: + description: CloudFront S3 origin configuration information. + If a custom origin is required, use custom_origin_config + instead. + properties: + originAccessIdentity: + description: The CloudFront origin access identity to + associate with the origin. + type: string + originAccessIdentityRef: + description: Reference to a OriginAccessIdentity in + cloudfront to populate originAccessIdentity. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + originAccessIdentitySelector: + description: Selector for a OriginAccessIdentity in + cloudfront to populate originAccessIdentity. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + type: array + originGroup: + description: One or more origin_group for this distribution (multiples + allowed). + items: + properties: + failoverCriteria: + description: The failover criteria for when to failover + to the secondary origin. + properties: + statusCodes: + description: List of HTTP status codes for the origin + group. + items: + type: number + type: array + x-kubernetes-list-type: set + type: object + member: + description: Ordered member configuration blocks assigned + to the origin group, where the first member is the primary + origin. You must specify two members. + items: + properties: + originId: + description: Unique identifier for the origin. + type: string + type: object + type: array + originId: + description: Unique identifier for the origin. + type: string + type: object + type: array + priceClass: + description: Price class for this distribution. One of PriceClass_All, + PriceClass_200, PriceClass_100. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + restrictions: + description: The restriction configuration for this distribution + (maximum one). + properties: + geoRestriction: + properties: + locations: + description: ISO 3166-1-alpha-2 codes for which you want + CloudFront either to distribute your content (whitelist) + or not distribute your content (blacklist). If the type + is specified as none an empty array can be used. + items: + type: string + type: array + x-kubernetes-list-type: set + restrictionType: + description: 'Method that you want to use to restrict + distribution of your content by country: none, whitelist, + or blacklist.' + type: string + type: object + type: object + retainOnDelete: + description: 'If this is set, the distribution needs to be deleted + manually afterwards. Default: false.' + type: boolean + staging: + description: A Boolean that indicates whether this is a staging + distribution. Defaults to false. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + viewerCertificate: + description: The SSL configuration for this distribution (maximum + one). + properties: + acmCertificateArn: + description: ARN of the AWS Certificate Manager certificate + that you wish to use with this distribution. Specify this, + cloudfront_default_certificate, or iam_certificate_id. The + ACM certificate must be in US-EAST-1. + type: string + cloudfrontDefaultCertificate: + description: true if you want viewers to use HTTPS to request + your objects and you're using the CloudFront domain name + for your distribution. Specify this, acm_certificate_arn, + or iam_certificate_id. + type: boolean + iamCertificateId: + description: IAM certificate identifier of the custom viewer + certificate for this distribution if you are using a custom + domain. Specify this, acm_certificate_arn, or cloudfront_default_certificate. + type: string + minimumProtocolVersion: + description: 'Minimum version of the SSL protocol that you + want CloudFront to use for HTTPS connections. Can only be + set if cloudfront_default_certificate = false. See all possible + values in this table under "Security policy." Some examples + include: TLSv1.2_2019 and TLSv1.2_2021. Default: TLSv1. + NOTE: If you are using a custom certificate (specified with + acm_certificate_arn or iam_certificate_id), and have specified + sni-only in ssl_support_method, TLSv1 or later must be specified. + If you have specified vip in ssl_support_method, only SSLv3 + or TLSv1 can be specified. If you have specified cloudfront_default_certificate, + TLSv1 must be specified.' + type: string + sslSupportMethod: + description: 'How you want CloudFront to serve HTTPS requests. + One of vip, sni-only, or static-ip. Required if you specify + acm_certificate_arn or iam_certificate_id. NOTE: vip causes + CloudFront to use a dedicated IP address and may incur extra + charges.' + type: string + type: object + waitForDeployment: + description: 'If enabled, the resource will wait for the distribution + status to change from InProgress to Deployed. Setting this tofalse + will skip the process. Default: true.' + type: boolean + webAclId: + description: Unique identifier that specifies the AWS WAF web + ACL, if any, to associate with this distribution. To specify + a web ACL created using the latest version of AWS WAF (WAFv2), + use the ACL ARN, for example aws_wafv2_web_acl.example.arn. + To specify a web ACL created using AWS WAF Classic, use the + ACL ID, for example aws_waf_web_acl.example.id. The WAF Web + ACL must exist in the WAF Global (CloudFront) region and the + credentials configuring this argument must have waf:GetWebACL + permissions assigned. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + aliases: + description: Extra CNAMEs (alternate domain names), if any, for + this distribution. + items: + type: string + type: array + x-kubernetes-list-type: set + comment: + description: Any comments you want to include about the distribution. + type: string + continuousDeploymentPolicyId: + description: Identifier of a continuous deployment policy. This + argument should only be set on a production distribution. See + the aws_cloudfront_continuous_deployment_policy resource for + additional details. + type: string + customErrorResponse: + description: One or more custom error response elements (multiples + allowed). + items: + properties: + errorCachingMinTtl: + description: Minimum amount of time you want HTTP error + codes to stay in CloudFront caches before CloudFront queries + your origin to see whether the object has been updated. + type: number + errorCode: + description: 4xx or 5xx HTTP status code that you want to + customize. + type: number + responseCode: + description: HTTP status code that you want CloudFront to + return with the custom error page to the viewer. + type: number + responsePagePath: + description: Path of the custom error page (for example, + /custom_404.html). + type: string + type: object + type: array + defaultCacheBehavior: + description: Default cache behavior for this distribution (maximum + one). Requires either cache_policy_id (preferred) or forwarded_values + (deprecated) be set. + properties: + allowedMethods: + description: Controls which HTTP methods CloudFront processes + and forwards to your Amazon S3 bucket or your custom origin. + items: + type: string + type: array + x-kubernetes-list-type: set + cachePolicyId: + description: Unique identifier of the cache policy that is + attached to the cache behavior. If configuring the default_cache_behavior + either cache_policy_id or forwarded_values must be set. + type: string + cachedMethods: + description: Controls whether CloudFront caches the response + to requests using the specified HTTP methods. + items: + type: string + type: array + x-kubernetes-list-type: set + compress: + description: 'Whether you want CloudFront to automatically + compress content for web requests that include Accept-Encoding: + gzip in the request header (default: false).' + type: boolean + defaultTtl: + description: Default amount of time (in seconds) that an object + is in a CloudFront cache before CloudFront forwards another + request in the absence of an Cache-Control max-age or Expires + header. + type: number + fieldLevelEncryptionId: + description: Field level encryption configuration ID. + type: string + forwardedValues: + description: The forwarded values configuration that specifies + how CloudFront handles query strings, cookies and headers + (maximum one). + properties: + cookies: + description: The forwarded values cookies that specifies + how CloudFront handles cookies (maximum one). + properties: + forward: + description: Whether you want CloudFront to forward + cookies to the origin that is associated with this + cache behavior. You can specify all, none or whitelist. + If whitelist, you must include the subsequent whitelisted_names. + type: string + whitelistedNames: + description: If you have specified whitelist to forward, + the whitelisted cookies that you want CloudFront + to forward to your origin. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + headers: + description: Headers, if any, that you want CloudFront + to vary upon for this cache behavior. Specify * to include + all headers. + items: + type: string + type: array + x-kubernetes-list-type: set + queryString: + description: Indicates whether you want CloudFront to + forward query strings to the origin that is associated + with this cache behavior. + type: boolean + queryStringCacheKeys: + description: When specified, along with a value of true + for query_string, all query strings are forwarded, however + only the query string keys listed in this argument are + cached. When omitted with a value of true for query_string, + all query string keys are cached. + items: + type: string + type: array + type: object + functionAssociation: + description: A config block that triggers a cloudfront function + with specific actions (maximum 2). + items: + properties: + eventType: + description: 'Specific event to trigger this function. + Valid values: viewer-request, origin-request, viewer-response, + origin-response.' + type: string + functionArn: + description: ARN of the CloudFront function. + type: string + type: object + type: array + lambdaFunctionAssociation: + description: A config block that triggers a lambda function + with specific actions (maximum 4). + items: + properties: + eventType: + description: 'Specific event to trigger this function. + Valid values: viewer-request, origin-request, viewer-response, + origin-response.' + type: string + includeBody: + description: 'When set to true it exposes the request + body to the lambda function. Defaults to false. Valid + values: true, false.' + type: boolean + lambdaArn: + description: ARN of the Lambda function. + type: string + type: object + type: array + maxTtl: + description: Maximum amount of time (in seconds) that an object + is in a CloudFront cache before CloudFront forwards another + request to your origin to determine whether the object has + been updated. Only effective in the presence of Cache-Control + max-age, Cache-Control s-maxage, and Expires headers. + type: number + minTtl: + description: Minimum amount of time that you want objects + to stay in CloudFront caches before CloudFront queries your + origin to see whether the object has been updated. Defaults + to 0 seconds. + type: number + originRequestPolicyId: + description: Unique identifier of the origin request policy + that is attached to the behavior. + type: string + realtimeLogConfigArn: + description: ARN of the real-time log configuration that is + attached to this cache behavior. + type: string + responseHeadersPolicyId: + description: Identifier for a response headers policy. + type: string + smoothStreaming: + description: Indicates whether you want to distribute media + files in Microsoft Smooth Streaming format using the origin + that is associated with this cache behavior. + type: boolean + targetOriginId: + description: Value of ID for the origin that you want CloudFront + to route requests to when a request matches the path pattern + either for a cache behavior or for the default cache behavior. + type: string + trustedKeyGroups: + description: List of key group IDs that CloudFront can use + to validate signed URLs or signed cookies. See the CloudFront + User Guide for more information about this feature. + items: + type: string + type: array + trustedSigners: + description: List of AWS account IDs (or self) that you want + to allow to create signed URLs for private content. See + the CloudFront User Guide for more information about this + feature. + items: + type: string + type: array + viewerProtocolPolicy: + description: Use this element to specify the protocol that + users can use to access the files in the origin specified + by TargetOriginId when a request matches the path pattern + in PathPattern. One of allow-all, https-only, or redirect-to-https. + type: string + type: object + defaultRootObject: + description: Object that you want CloudFront to return (for example, + index.html) when an end user requests the root URL. + type: string + enabled: + description: Whether the distribution is enabled to accept end + user requests for content. + type: boolean + httpVersion: + description: Maximum HTTP version to support on the distribution. + Allowed values are http1.1, http2, http2and3 and http3. The + default is http2. + type: string + isIpv6Enabled: + description: Whether the IPv6 is enabled for the distribution. + type: boolean + loggingConfig: + description: The logging configuration that controls how logs + are written to your distribution (maximum one). + properties: + bucket: + description: Amazon S3 bucket to store the access logs in, + for example, myawslogbucket.s3.amazonaws.com. + type: string + includeCookies: + description: 'Whether to include cookies in access logs (default: + false).' + type: boolean + prefix: + description: Prefix to the access log filenames for this distribution, + for example, myprefix/. + type: string + type: object + orderedCacheBehavior: + description: Ordered list of cache behaviors resource for this + distribution. List from top to bottom in order of precedence. + The topmost cache behavior will have precedence 0. + items: + properties: + allowedMethods: + description: Controls which HTTP methods CloudFront processes + and forwards to your Amazon S3 bucket or your custom origin. + items: + type: string + type: array + x-kubernetes-list-type: set + cachePolicyId: + description: Unique identifier of the cache policy that + is attached to the cache behavior. If configuring the + default_cache_behavior either cache_policy_id or forwarded_values + must be set. + type: string + cachedMethods: + description: Controls whether CloudFront caches the response + to requests using the specified HTTP methods. + items: + type: string + type: array + x-kubernetes-list-type: set + compress: + description: 'Whether you want CloudFront to automatically + compress content for web requests that include Accept-Encoding: + gzip in the request header (default: false).' + type: boolean + defaultTtl: + description: Default amount of time (in seconds) that an + object is in a CloudFront cache before CloudFront forwards + another request in the absence of an Cache-Control max-age + or Expires header. + type: number + fieldLevelEncryptionId: + description: Field level encryption configuration ID. + type: string + forwardedValues: + description: The forwarded values configuration that specifies + how CloudFront handles query strings, cookies and headers + (maximum one). + properties: + cookies: + description: The forwarded values cookies that specifies + how CloudFront handles cookies (maximum one). + properties: + forward: + description: Whether you want CloudFront to forward + cookies to the origin that is associated with + this cache behavior. You can specify all, none + or whitelist. If whitelist, you must include the + subsequent whitelisted_names. + type: string + whitelistedNames: + description: If you have specified whitelist to + forward, the whitelisted cookies that you want + CloudFront to forward to your origin. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + headers: + description: Headers, if any, that you want CloudFront + to vary upon for this cache behavior. Specify * to + include all headers. + items: + type: string + type: array + x-kubernetes-list-type: set + queryString: + description: Indicates whether you want CloudFront to + forward query strings to the origin that is associated + with this cache behavior. + type: boolean + queryStringCacheKeys: + description: When specified, along with a value of true + for query_string, all query strings are forwarded, + however only the query string keys listed in this + argument are cached. When omitted with a value of + true for query_string, all query string keys are cached. + items: + type: string + type: array + type: object + functionAssociation: + description: A config block that triggers a cloudfront function + with specific actions (maximum 2). + items: + properties: + eventType: + description: 'Specific event to trigger this function. + Valid values: viewer-request, origin-request, viewer-response, + origin-response.' + type: string + functionArn: + description: ARN of the CloudFront function. + type: string + functionArnRef: + description: Reference to a Function in cloudfront + to populate functionArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + functionArnSelector: + description: Selector for a Function in cloudfront + to populate functionArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + lambdaFunctionAssociation: + description: A config block that triggers a lambda function + with specific actions (maximum 4). + items: + properties: + eventType: + description: 'Specific event to trigger this function. + Valid values: viewer-request, origin-request, viewer-response, + origin-response.' + type: string + includeBody: + description: 'When set to true it exposes the request + body to the lambda function. Defaults to false. + Valid values: true, false.' + type: boolean + lambdaArn: + description: ARN of the Lambda function. + type: string + lambdaArnRef: + description: Reference to a Function in lambda to + populate lambdaArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + lambdaArnSelector: + description: Selector for a Function in lambda to + populate lambdaArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + maxTtl: + description: Maximum amount of time (in seconds) that an + object is in a CloudFront cache before CloudFront forwards + another request to your origin to determine whether the + object has been updated. Only effective in the presence + of Cache-Control max-age, Cache-Control s-maxage, and + Expires headers. + type: number + minTtl: + description: Minimum amount of time that you want objects + to stay in CloudFront caches before CloudFront queries + your origin to see whether the object has been updated. + Defaults to 0 seconds. + type: number + originRequestPolicyId: + description: Unique identifier of the origin request policy + that is attached to the behavior. + type: string + pathPattern: + description: Pattern (for example, images/*.jpg) that specifies + which requests you want this cache behavior to apply to. + type: string + realtimeLogConfigArn: + description: ARN of the real-time log configuration that + is attached to this cache behavior. + type: string + responseHeadersPolicyId: + description: Identifier for a response headers policy. + type: string + smoothStreaming: + description: Indicates whether you want to distribute media + files in Microsoft Smooth Streaming format using the origin + that is associated with this cache behavior. + type: boolean + targetOriginId: + description: Value of ID for the origin that you want CloudFront + to route requests to when a request matches the path pattern + either for a cache behavior or for the default cache behavior. + type: string + trustedKeyGroups: + description: List of key group IDs that CloudFront can use + to validate signed URLs or signed cookies. See the CloudFront + User Guide for more information about this feature. + items: + type: string + type: array + trustedSigners: + description: List of AWS account IDs (or self) that you + want to allow to create signed URLs for private content. + See the CloudFront User Guide for more information about + this feature. + items: + type: string + type: array + viewerProtocolPolicy: + description: Use this element to specify the protocol that + users can use to access the files in the origin specified + by TargetOriginId when a request matches the path pattern + in PathPattern. One of allow-all, https-only, or redirect-to-https. + type: string + type: object + type: array + origin: + description: One or more origins for this distribution (multiples + allowed). + items: + properties: + connectionAttempts: + description: Number of times that CloudFront attempts to + connect to the origin. Must be between 1-3. Defaults to + 3. + type: number + connectionTimeout: + description: Number of seconds that CloudFront waits when + trying to establish a connection to the origin. Must be + between 1-10. Defaults to 10. + type: number + customHeader: + description: One or more sub-resources with name and value + parameters that specify header data that will be sent + to the origin (multiples allowed). + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + customOriginConfig: + description: The CloudFront custom origin configuration + information. If an S3 origin is required, use origin_access_control_id + or s3_origin_config instead. + properties: + httpPort: + description: HTTP port the custom origin listens on. + type: number + httpsPort: + description: HTTPS port the custom origin listens on. + type: number + originKeepaliveTimeout: + description: The Custom KeepAlive timeout, in seconds. + By default, AWS enforces an upper limit of 60. But + you can request an increase. Defaults to 5. + type: number + originProtocolPolicy: + description: Origin protocol policy to apply to your + origin. One of http-only, https-only, or match-viewer. + type: string + originReadTimeout: + description: The Custom Read timeout, in seconds. By + default, AWS enforces an upper limit of 60. But you + can request an increase. Defaults to 30. + type: number + originSslProtocols: + description: 'List of SSL/TLS protocols that CloudFront + can use when connecting to your origin over HTTPS. + Valid values: SSLv3, TLSv1, TLSv1.1, TLSv1.2. For + more information, see Minimum Origin SSL Protocol + in the Amazon CloudFront Developer Guide.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + domainName: + description: DNS domain name of either the S3 bucket, or + web site of your custom origin. + type: string + originAccessControlId: + description: Unique identifier of a CloudFront origin access + control for this origin. + type: string + originAccessControlIdRef: + description: Reference to a OriginAccessControl in cloudfront + to populate originAccessControlId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + originAccessControlIdSelector: + description: Selector for a OriginAccessControl in cloudfront + to populate originAccessControlId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + originId: + description: Unique identifier for the origin. + type: string + originPath: + description: Optional element that causes CloudFront to + request your content from a directory in your Amazon S3 + bucket or your custom origin. + type: string + originShield: + description: CloudFront Origin Shield configuration information. + Using Origin Shield can help reduce the load on your origin. + For more information, see Using Origin Shield in the Amazon + CloudFront Developer Guide. + properties: + enabled: + description: Whether the distribution is enabled to + accept end user requests for content. + type: boolean + originShieldRegion: + description: AWS Region for Origin Shield. To specify + a region, use the region code, not the region name. + For example, specify the US East (Ohio) region as + us-east-2. + type: string + type: object + s3OriginConfig: + description: CloudFront S3 origin configuration information. + If a custom origin is required, use custom_origin_config + instead. + properties: + originAccessIdentity: + description: The CloudFront origin access identity to + associate with the origin. + type: string + originAccessIdentityRef: + description: Reference to a OriginAccessIdentity in + cloudfront to populate originAccessIdentity. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + originAccessIdentitySelector: + description: Selector for a OriginAccessIdentity in + cloudfront to populate originAccessIdentity. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + type: array + originGroup: + description: One or more origin_group for this distribution (multiples + allowed). + items: + properties: + failoverCriteria: + description: The failover criteria for when to failover + to the secondary origin. + properties: + statusCodes: + description: List of HTTP status codes for the origin + group. + items: + type: number + type: array + x-kubernetes-list-type: set + type: object + member: + description: Ordered member configuration blocks assigned + to the origin group, where the first member is the primary + origin. You must specify two members. + items: + properties: + originId: + description: Unique identifier for the origin. + type: string + type: object + type: array + originId: + description: Unique identifier for the origin. + type: string + type: object + type: array + priceClass: + description: Price class for this distribution. One of PriceClass_All, + PriceClass_200, PriceClass_100. + type: string + restrictions: + description: The restriction configuration for this distribution + (maximum one). + properties: + geoRestriction: + properties: + locations: + description: ISO 3166-1-alpha-2 codes for which you want + CloudFront either to distribute your content (whitelist) + or not distribute your content (blacklist). If the type + is specified as none an empty array can be used. + items: + type: string + type: array + x-kubernetes-list-type: set + restrictionType: + description: 'Method that you want to use to restrict + distribution of your content by country: none, whitelist, + or blacklist.' + type: string + type: object + type: object + retainOnDelete: + description: 'If this is set, the distribution needs to be deleted + manually afterwards. Default: false.' + type: boolean + staging: + description: A Boolean that indicates whether this is a staging + distribution. Defaults to false. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + viewerCertificate: + description: The SSL configuration for this distribution (maximum + one). + properties: + acmCertificateArn: + description: ARN of the AWS Certificate Manager certificate + that you wish to use with this distribution. Specify this, + cloudfront_default_certificate, or iam_certificate_id. The + ACM certificate must be in US-EAST-1. + type: string + cloudfrontDefaultCertificate: + description: true if you want viewers to use HTTPS to request + your objects and you're using the CloudFront domain name + for your distribution. Specify this, acm_certificate_arn, + or iam_certificate_id. + type: boolean + iamCertificateId: + description: IAM certificate identifier of the custom viewer + certificate for this distribution if you are using a custom + domain. Specify this, acm_certificate_arn, or cloudfront_default_certificate. + type: string + minimumProtocolVersion: + description: 'Minimum version of the SSL protocol that you + want CloudFront to use for HTTPS connections. Can only be + set if cloudfront_default_certificate = false. See all possible + values in this table under "Security policy." Some examples + include: TLSv1.2_2019 and TLSv1.2_2021. Default: TLSv1. + NOTE: If you are using a custom certificate (specified with + acm_certificate_arn or iam_certificate_id), and have specified + sni-only in ssl_support_method, TLSv1 or later must be specified. + If you have specified vip in ssl_support_method, only SSLv3 + or TLSv1 can be specified. If you have specified cloudfront_default_certificate, + TLSv1 must be specified.' + type: string + sslSupportMethod: + description: 'How you want CloudFront to serve HTTPS requests. + One of vip, sni-only, or static-ip. Required if you specify + acm_certificate_arn or iam_certificate_id. NOTE: vip causes + CloudFront to use a dedicated IP address and may incur extra + charges.' + type: string + type: object + waitForDeployment: + description: 'If enabled, the resource will wait for the distribution + status to change from InProgress to Deployed. Setting this tofalse + will skip the process. Default: true.' + type: boolean + webAclId: + description: Unique identifier that specifies the AWS WAF web + ACL, if any, to associate with this distribution. To specify + a web ACL created using the latest version of AWS WAF (WAFv2), + use the ACL ARN, for example aws_wafv2_web_acl.example.arn. + To specify a web ACL created using AWS WAF Classic, use the + ACL ID, for example aws_waf_web_acl.example.id. The WAF Web + ACL must exist in the WAF Global (CloudFront) region and the + credentials configuring this argument must have waf:GetWebACL + permissions assigned. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.defaultCacheBehavior is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.defaultCacheBehavior) + || (has(self.initProvider) && has(self.initProvider.defaultCacheBehavior))' + - message: spec.forProvider.enabled is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.enabled) + || (has(self.initProvider) && has(self.initProvider.enabled))' + - message: spec.forProvider.origin is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.origin) + || (has(self.initProvider) && has(self.initProvider.origin))' + - message: spec.forProvider.restrictions is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.restrictions) + || (has(self.initProvider) && has(self.initProvider.restrictions))' + - message: spec.forProvider.viewerCertificate is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.viewerCertificate) + || (has(self.initProvider) && has(self.initProvider.viewerCertificate))' + status: + description: DistributionStatus defines the observed state of Distribution. + properties: + atProvider: + properties: + aliases: + description: Extra CNAMEs (alternate domain names), if any, for + this distribution. + items: + type: string + type: array + x-kubernetes-list-type: set + arn: + description: 'ARN for the distribution. For example: arn:aws:cloudfront::123456789012:distribution/EDFDVBD632BHDS5, + where 123456789012 is your AWS account ID.' + type: string + callerReference: + description: Internal value used by CloudFront to allow future + updates to the distribution configuration. + type: string + comment: + description: Any comments you want to include about the distribution. + type: string + continuousDeploymentPolicyId: + description: Identifier of a continuous deployment policy. This + argument should only be set on a production distribution. See + the aws_cloudfront_continuous_deployment_policy resource for + additional details. + type: string + customErrorResponse: + description: One or more custom error response elements (multiples + allowed). + items: + properties: + errorCachingMinTtl: + description: Minimum amount of time you want HTTP error + codes to stay in CloudFront caches before CloudFront queries + your origin to see whether the object has been updated. + type: number + errorCode: + description: 4xx or 5xx HTTP status code that you want to + customize. + type: number + responseCode: + description: HTTP status code that you want CloudFront to + return with the custom error page to the viewer. + type: number + responsePagePath: + description: Path of the custom error page (for example, + /custom_404.html). + type: string + type: object + type: array + defaultCacheBehavior: + description: Default cache behavior for this distribution (maximum + one). Requires either cache_policy_id (preferred) or forwarded_values + (deprecated) be set. + properties: + allowedMethods: + description: Controls which HTTP methods CloudFront processes + and forwards to your Amazon S3 bucket or your custom origin. + items: + type: string + type: array + x-kubernetes-list-type: set + cachePolicyId: + description: Unique identifier of the cache policy that is + attached to the cache behavior. If configuring the default_cache_behavior + either cache_policy_id or forwarded_values must be set. + type: string + cachedMethods: + description: Controls whether CloudFront caches the response + to requests using the specified HTTP methods. + items: + type: string + type: array + x-kubernetes-list-type: set + compress: + description: 'Whether you want CloudFront to automatically + compress content for web requests that include Accept-Encoding: + gzip in the request header (default: false).' + type: boolean + defaultTtl: + description: Default amount of time (in seconds) that an object + is in a CloudFront cache before CloudFront forwards another + request in the absence of an Cache-Control max-age or Expires + header. + type: number + fieldLevelEncryptionId: + description: Field level encryption configuration ID. + type: string + forwardedValues: + description: The forwarded values configuration that specifies + how CloudFront handles query strings, cookies and headers + (maximum one). + properties: + cookies: + description: The forwarded values cookies that specifies + how CloudFront handles cookies (maximum one). + properties: + forward: + description: Whether you want CloudFront to forward + cookies to the origin that is associated with this + cache behavior. You can specify all, none or whitelist. + If whitelist, you must include the subsequent whitelisted_names. + type: string + whitelistedNames: + description: If you have specified whitelist to forward, + the whitelisted cookies that you want CloudFront + to forward to your origin. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + headers: + description: Headers, if any, that you want CloudFront + to vary upon for this cache behavior. Specify * to include + all headers. + items: + type: string + type: array + x-kubernetes-list-type: set + queryString: + description: Indicates whether you want CloudFront to + forward query strings to the origin that is associated + with this cache behavior. + type: boolean + queryStringCacheKeys: + description: When specified, along with a value of true + for query_string, all query strings are forwarded, however + only the query string keys listed in this argument are + cached. When omitted with a value of true for query_string, + all query string keys are cached. + items: + type: string + type: array + type: object + functionAssociation: + description: A config block that triggers a cloudfront function + with specific actions (maximum 2). + items: + properties: + eventType: + description: 'Specific event to trigger this function. + Valid values: viewer-request, origin-request, viewer-response, + origin-response.' + type: string + functionArn: + description: ARN of the CloudFront function. + type: string + type: object + type: array + lambdaFunctionAssociation: + description: A config block that triggers a lambda function + with specific actions (maximum 4). + items: + properties: + eventType: + description: 'Specific event to trigger this function. + Valid values: viewer-request, origin-request, viewer-response, + origin-response.' + type: string + includeBody: + description: 'When set to true it exposes the request + body to the lambda function. Defaults to false. Valid + values: true, false.' + type: boolean + lambdaArn: + description: ARN of the Lambda function. + type: string + type: object + type: array + maxTtl: + description: Maximum amount of time (in seconds) that an object + is in a CloudFront cache before CloudFront forwards another + request to your origin to determine whether the object has + been updated. Only effective in the presence of Cache-Control + max-age, Cache-Control s-maxage, and Expires headers. + type: number + minTtl: + description: Minimum amount of time that you want objects + to stay in CloudFront caches before CloudFront queries your + origin to see whether the object has been updated. Defaults + to 0 seconds. + type: number + originRequestPolicyId: + description: Unique identifier of the origin request policy + that is attached to the behavior. + type: string + realtimeLogConfigArn: + description: ARN of the real-time log configuration that is + attached to this cache behavior. + type: string + responseHeadersPolicyId: + description: Identifier for a response headers policy. + type: string + smoothStreaming: + description: Indicates whether you want to distribute media + files in Microsoft Smooth Streaming format using the origin + that is associated with this cache behavior. + type: boolean + targetOriginId: + description: Value of ID for the origin that you want CloudFront + to route requests to when a request matches the path pattern + either for a cache behavior or for the default cache behavior. + type: string + trustedKeyGroups: + description: List of key group IDs that CloudFront can use + to validate signed URLs or signed cookies. See the CloudFront + User Guide for more information about this feature. + items: + type: string + type: array + trustedSigners: + description: List of AWS account IDs (or self) that you want + to allow to create signed URLs for private content. See + the CloudFront User Guide for more information about this + feature. + items: + type: string + type: array + viewerProtocolPolicy: + description: Use this element to specify the protocol that + users can use to access the files in the origin specified + by TargetOriginId when a request matches the path pattern + in PathPattern. One of allow-all, https-only, or redirect-to-https. + type: string + type: object + defaultRootObject: + description: Object that you want CloudFront to return (for example, + index.html) when an end user requests the root URL. + type: string + domainName: + description: DNS domain name of either the S3 bucket, or web site + of your custom origin. + type: string + enabled: + description: Whether the distribution is enabled to accept end + user requests for content. + type: boolean + etag: + description: 'Current version of the distribution''s information. + For example: E2QWRUHAPOMQZL.' + type: string + hostedZoneId: + description: CloudFront Route 53 zone ID that can be used to route + an Alias Resource Record Set to. This attribute is simply an + alias for the zone ID Z2FDTNDATAQYW2. + type: string + httpVersion: + description: Maximum HTTP version to support on the distribution. + Allowed values are http1.1, http2, http2and3 and http3. The + default is http2. + type: string + id: + description: 'Identifier for the distribution. For example: EDFDVBD632BHDS5.' + type: string + inProgressValidationBatches: + description: Number of invalidation batches currently in progress. + type: number + isIpv6Enabled: + description: Whether the IPv6 is enabled for the distribution. + type: boolean + lastModifiedTime: + description: Date and time the distribution was last modified. + type: string + loggingConfig: + description: The logging configuration that controls how logs + are written to your distribution (maximum one). + properties: + bucket: + description: Amazon S3 bucket to store the access logs in, + for example, myawslogbucket.s3.amazonaws.com. + type: string + includeCookies: + description: 'Whether to include cookies in access logs (default: + false).' + type: boolean + prefix: + description: Prefix to the access log filenames for this distribution, + for example, myprefix/. + type: string + type: object + orderedCacheBehavior: + description: Ordered list of cache behaviors resource for this + distribution. List from top to bottom in order of precedence. + The topmost cache behavior will have precedence 0. + items: + properties: + allowedMethods: + description: Controls which HTTP methods CloudFront processes + and forwards to your Amazon S3 bucket or your custom origin. + items: + type: string + type: array + x-kubernetes-list-type: set + cachePolicyId: + description: Unique identifier of the cache policy that + is attached to the cache behavior. If configuring the + default_cache_behavior either cache_policy_id or forwarded_values + must be set. + type: string + cachedMethods: + description: Controls whether CloudFront caches the response + to requests using the specified HTTP methods. + items: + type: string + type: array + x-kubernetes-list-type: set + compress: + description: 'Whether you want CloudFront to automatically + compress content for web requests that include Accept-Encoding: + gzip in the request header (default: false).' + type: boolean + defaultTtl: + description: Default amount of time (in seconds) that an + object is in a CloudFront cache before CloudFront forwards + another request in the absence of an Cache-Control max-age + or Expires header. + type: number + fieldLevelEncryptionId: + description: Field level encryption configuration ID. + type: string + forwardedValues: + description: The forwarded values configuration that specifies + how CloudFront handles query strings, cookies and headers + (maximum one). + properties: + cookies: + description: The forwarded values cookies that specifies + how CloudFront handles cookies (maximum one). + properties: + forward: + description: Whether you want CloudFront to forward + cookies to the origin that is associated with + this cache behavior. You can specify all, none + or whitelist. If whitelist, you must include the + subsequent whitelisted_names. + type: string + whitelistedNames: + description: If you have specified whitelist to + forward, the whitelisted cookies that you want + CloudFront to forward to your origin. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + headers: + description: Headers, if any, that you want CloudFront + to vary upon for this cache behavior. Specify * to + include all headers. + items: + type: string + type: array + x-kubernetes-list-type: set + queryString: + description: Indicates whether you want CloudFront to + forward query strings to the origin that is associated + with this cache behavior. + type: boolean + queryStringCacheKeys: + description: When specified, along with a value of true + for query_string, all query strings are forwarded, + however only the query string keys listed in this + argument are cached. When omitted with a value of + true for query_string, all query string keys are cached. + items: + type: string + type: array + type: object + functionAssociation: + description: A config block that triggers a cloudfront function + with specific actions (maximum 2). + items: + properties: + eventType: + description: 'Specific event to trigger this function. + Valid values: viewer-request, origin-request, viewer-response, + origin-response.' + type: string + functionArn: + description: ARN of the CloudFront function. + type: string + type: object + type: array + lambdaFunctionAssociation: + description: A config block that triggers a lambda function + with specific actions (maximum 4). + items: + properties: + eventType: + description: 'Specific event to trigger this function. + Valid values: viewer-request, origin-request, viewer-response, + origin-response.' + type: string + includeBody: + description: 'When set to true it exposes the request + body to the lambda function. Defaults to false. + Valid values: true, false.' + type: boolean + lambdaArn: + description: ARN of the Lambda function. + type: string + type: object + type: array + maxTtl: + description: Maximum amount of time (in seconds) that an + object is in a CloudFront cache before CloudFront forwards + another request to your origin to determine whether the + object has been updated. Only effective in the presence + of Cache-Control max-age, Cache-Control s-maxage, and + Expires headers. + type: number + minTtl: + description: Minimum amount of time that you want objects + to stay in CloudFront caches before CloudFront queries + your origin to see whether the object has been updated. + Defaults to 0 seconds. + type: number + originRequestPolicyId: + description: Unique identifier of the origin request policy + that is attached to the behavior. + type: string + pathPattern: + description: Pattern (for example, images/*.jpg) that specifies + which requests you want this cache behavior to apply to. + type: string + realtimeLogConfigArn: + description: ARN of the real-time log configuration that + is attached to this cache behavior. + type: string + responseHeadersPolicyId: + description: Identifier for a response headers policy. + type: string + smoothStreaming: + description: Indicates whether you want to distribute media + files in Microsoft Smooth Streaming format using the origin + that is associated with this cache behavior. + type: boolean + targetOriginId: + description: Value of ID for the origin that you want CloudFront + to route requests to when a request matches the path pattern + either for a cache behavior or for the default cache behavior. + type: string + trustedKeyGroups: + description: List of key group IDs that CloudFront can use + to validate signed URLs or signed cookies. See the CloudFront + User Guide for more information about this feature. + items: + type: string + type: array + trustedSigners: + description: List of AWS account IDs (or self) that you + want to allow to create signed URLs for private content. + See the CloudFront User Guide for more information about + this feature. + items: + type: string + type: array + viewerProtocolPolicy: + description: Use this element to specify the protocol that + users can use to access the files in the origin specified + by TargetOriginId when a request matches the path pattern + in PathPattern. One of allow-all, https-only, or redirect-to-https. + type: string + type: object + type: array + origin: + description: One or more origins for this distribution (multiples + allowed). + items: + properties: + connectionAttempts: + description: Number of times that CloudFront attempts to + connect to the origin. Must be between 1-3. Defaults to + 3. + type: number + connectionTimeout: + description: Number of seconds that CloudFront waits when + trying to establish a connection to the origin. Must be + between 1-10. Defaults to 10. + type: number + customHeader: + description: One or more sub-resources with name and value + parameters that specify header data that will be sent + to the origin (multiples allowed). + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + customOriginConfig: + description: The CloudFront custom origin configuration + information. If an S3 origin is required, use origin_access_control_id + or s3_origin_config instead. + properties: + httpPort: + description: HTTP port the custom origin listens on. + type: number + httpsPort: + description: HTTPS port the custom origin listens on. + type: number + originKeepaliveTimeout: + description: The Custom KeepAlive timeout, in seconds. + By default, AWS enforces an upper limit of 60. But + you can request an increase. Defaults to 5. + type: number + originProtocolPolicy: + description: Origin protocol policy to apply to your + origin. One of http-only, https-only, or match-viewer. + type: string + originReadTimeout: + description: The Custom Read timeout, in seconds. By + default, AWS enforces an upper limit of 60. But you + can request an increase. Defaults to 30. + type: number + originSslProtocols: + description: 'List of SSL/TLS protocols that CloudFront + can use when connecting to your origin over HTTPS. + Valid values: SSLv3, TLSv1, TLSv1.1, TLSv1.2. For + more information, see Minimum Origin SSL Protocol + in the Amazon CloudFront Developer Guide.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + domainName: + description: DNS domain name of either the S3 bucket, or + web site of your custom origin. + type: string + originAccessControlId: + description: Unique identifier of a CloudFront origin access + control for this origin. + type: string + originId: + description: Unique identifier for the origin. + type: string + originPath: + description: Optional element that causes CloudFront to + request your content from a directory in your Amazon S3 + bucket or your custom origin. + type: string + originShield: + description: CloudFront Origin Shield configuration information. + Using Origin Shield can help reduce the load on your origin. + For more information, see Using Origin Shield in the Amazon + CloudFront Developer Guide. + properties: + enabled: + description: Whether the distribution is enabled to + accept end user requests for content. + type: boolean + originShieldRegion: + description: AWS Region for Origin Shield. To specify + a region, use the region code, not the region name. + For example, specify the US East (Ohio) region as + us-east-2. + type: string + type: object + s3OriginConfig: + description: CloudFront S3 origin configuration information. + If a custom origin is required, use custom_origin_config + instead. + properties: + originAccessIdentity: + description: The CloudFront origin access identity to + associate with the origin. + type: string + type: object + type: object + type: array + originGroup: + description: One or more origin_group for this distribution (multiples + allowed). + items: + properties: + failoverCriteria: + description: The failover criteria for when to failover + to the secondary origin. + properties: + statusCodes: + description: List of HTTP status codes for the origin + group. + items: + type: number + type: array + x-kubernetes-list-type: set + type: object + member: + description: Ordered member configuration blocks assigned + to the origin group, where the first member is the primary + origin. You must specify two members. + items: + properties: + originId: + description: Unique identifier for the origin. + type: string + type: object + type: array + originId: + description: Unique identifier for the origin. + type: string + type: object + type: array + priceClass: + description: Price class for this distribution. One of PriceClass_All, + PriceClass_200, PriceClass_100. + type: string + restrictions: + description: The restriction configuration for this distribution + (maximum one). + properties: + geoRestriction: + properties: + locations: + description: ISO 3166-1-alpha-2 codes for which you want + CloudFront either to distribute your content (whitelist) + or not distribute your content (blacklist). If the type + is specified as none an empty array can be used. + items: + type: string + type: array + x-kubernetes-list-type: set + restrictionType: + description: 'Method that you want to use to restrict + distribution of your content by country: none, whitelist, + or blacklist.' + type: string + type: object + type: object + retainOnDelete: + description: 'If this is set, the distribution needs to be deleted + manually afterwards. Default: false.' + type: boolean + staging: + description: A Boolean that indicates whether this is a staging + distribution. Defaults to false. + type: boolean + status: + description: Current status of the distribution. Deployed if the + distribution's information is fully propagated throughout the + Amazon CloudFront system. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + trustedKeyGroups: + description: List of key group IDs that CloudFront can use to + validate signed URLs or signed cookies. See the CloudFront User + Guide for more information about this feature. + items: + properties: + enabled: + description: Whether the distribution is enabled to accept + end user requests for content. + type: boolean + items: + description: List of nested attributes for each key group. + items: + properties: + keyGroupId: + description: ID of the key group that contains the + public keys. + type: string + keyPairIds: + description: Set of CloudFront key pair IDs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + type: array + trustedSigners: + description: List of AWS account IDs (or self) that you want to + allow to create signed URLs for private content. See the CloudFront + User Guide for more information about this feature. + items: + properties: + enabled: + description: Whether the distribution is enabled to accept + end user requests for content. + type: boolean + items: + description: List of nested attributes for each key group. + items: + properties: + awsAccountNumber: + description: AWS account ID or self + type: string + keyPairIds: + description: Set of CloudFront key pair IDs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + type: array + viewerCertificate: + description: The SSL configuration for this distribution (maximum + one). + properties: + acmCertificateArn: + description: ARN of the AWS Certificate Manager certificate + that you wish to use with this distribution. Specify this, + cloudfront_default_certificate, or iam_certificate_id. The + ACM certificate must be in US-EAST-1. + type: string + cloudfrontDefaultCertificate: + description: true if you want viewers to use HTTPS to request + your objects and you're using the CloudFront domain name + for your distribution. Specify this, acm_certificate_arn, + or iam_certificate_id. + type: boolean + iamCertificateId: + description: IAM certificate identifier of the custom viewer + certificate for this distribution if you are using a custom + domain. Specify this, acm_certificate_arn, or cloudfront_default_certificate. + type: string + minimumProtocolVersion: + description: 'Minimum version of the SSL protocol that you + want CloudFront to use for HTTPS connections. Can only be + set if cloudfront_default_certificate = false. See all possible + values in this table under "Security policy." Some examples + include: TLSv1.2_2019 and TLSv1.2_2021. Default: TLSv1. + NOTE: If you are using a custom certificate (specified with + acm_certificate_arn or iam_certificate_id), and have specified + sni-only in ssl_support_method, TLSv1 or later must be specified. + If you have specified vip in ssl_support_method, only SSLv3 + or TLSv1 can be specified. If you have specified cloudfront_default_certificate, + TLSv1 must be specified.' + type: string + sslSupportMethod: + description: 'How you want CloudFront to serve HTTPS requests. + One of vip, sni-only, or static-ip. Required if you specify + acm_certificate_arn or iam_certificate_id. NOTE: vip causes + CloudFront to use a dedicated IP address and may incur extra + charges.' + type: string + type: object + waitForDeployment: + description: 'If enabled, the resource will wait for the distribution + status to change from InProgress to Deployed. Setting this tofalse + will skip the process. Default: true.' + type: boolean + webAclId: + description: Unique identifier that specifies the AWS WAF web + ACL, if any, to associate with this distribution. To specify + a web ACL created using the latest version of AWS WAF (WAFv2), + use the ACL ARN, for example aws_wafv2_web_acl.example.arn. + To specify a web ACL created using AWS WAF Classic, use the + ACL ID, for example aws_waf_web_acl.example.id. The WAF Web + ACL must exist in the WAF Global (CloudFront) region and the + credentials configuring this argument must have waf:GetWebACL + permissions assigned. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cloudfront.aws.upbound.io_fieldlevelencryptionconfigs.yaml b/package/crds/cloudfront.aws.upbound.io_fieldlevelencryptionconfigs.yaml index 7f538fd0e9..36cb3fb950 100644 --- a/package/crds/cloudfront.aws.upbound.io_fieldlevelencryptionconfigs.yaml +++ b/package/crds/cloudfront.aws.upbound.io_fieldlevelencryptionconfigs.yaml @@ -746,3 +746,701 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FieldLevelEncryptionConfig is the Schema for the FieldLevelEncryptionConfigs + API. Provides a CloudFront Field-level Encryption Config resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FieldLevelEncryptionConfigSpec defines the desired state + of FieldLevelEncryptionConfig + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + comment: + description: An optional comment about the Field Level Encryption + Config. + type: string + contentTypeProfileConfig: + description: Content Type Profile Config specifies when to forward + content if a content type isn't recognized and profiles to use + as by default in a request if a query argument doesn't specify + a profile to use. + properties: + contentTypeProfiles: + description: Object that contains an attribute items that + contains the list of configurations for a field-level encryption + content type-profile. See Content Type Profile. + properties: + items: + items: + properties: + contentType: + description: he content type for a field-level encryption + content type-profile mapping. Valid value is application/x-www-form-urlencoded. + type: string + format: + description: The format for a field-level encryption + content type-profile mapping. Valid value is URLEncoded. + type: string + profileId: + description: The profile ID for a field-level encryption + content type-profile mapping. + type: string + type: object + type: array + type: object + forwardWhenContentTypeIsUnknown: + description: specifies what to do when an unknown content + type is provided for the profile. If true, content is forwarded + without being encrypted when the content type is unknown. + If false (the default), an error is returned when the content + type is unknown. + type: boolean + type: object + queryArgProfileConfig: + description: Query Arg Profile Config that specifies when to forward + content if a profile isn't found and the profile that can be + provided as a query argument in a request. + properties: + forwardWhenQueryArgProfileIsUnknown: + description: Flag to set if you want a request to be forwarded + to the origin even if the profile specified by the field-level + encryption query argument, fle-profile, is unknown. + type: boolean + queryArgProfiles: + description: Object that contains an attribute items that + contains the list ofrofiles specified for query argument-profile + mapping for field-level encryption. see Query Arg Profile. + properties: + items: + items: + properties: + profileId: + description: The profile ID for a field-level encryption + content type-profile mapping. + type: string + profileIdRef: + description: Reference to a FieldLevelEncryptionProfile + in cloudfront to populate profileId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + profileIdSelector: + description: Selector for a FieldLevelEncryptionProfile + in cloudfront to populate profileId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + queryArg: + description: Query argument for field-level encryption + query argument-profile mapping. + type: string + type: object + type: array + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + comment: + description: An optional comment about the Field Level Encryption + Config. + type: string + contentTypeProfileConfig: + description: Content Type Profile Config specifies when to forward + content if a content type isn't recognized and profiles to use + as by default in a request if a query argument doesn't specify + a profile to use. + properties: + contentTypeProfiles: + description: Object that contains an attribute items that + contains the list of configurations for a field-level encryption + content type-profile. See Content Type Profile. + properties: + items: + items: + properties: + contentType: + description: he content type for a field-level encryption + content type-profile mapping. Valid value is application/x-www-form-urlencoded. + type: string + format: + description: The format for a field-level encryption + content type-profile mapping. Valid value is URLEncoded. + type: string + profileId: + description: The profile ID for a field-level encryption + content type-profile mapping. + type: string + type: object + type: array + type: object + forwardWhenContentTypeIsUnknown: + description: specifies what to do when an unknown content + type is provided for the profile. If true, content is forwarded + without being encrypted when the content type is unknown. + If false (the default), an error is returned when the content + type is unknown. + type: boolean + type: object + queryArgProfileConfig: + description: Query Arg Profile Config that specifies when to forward + content if a profile isn't found and the profile that can be + provided as a query argument in a request. + properties: + forwardWhenQueryArgProfileIsUnknown: + description: Flag to set if you want a request to be forwarded + to the origin even if the profile specified by the field-level + encryption query argument, fle-profile, is unknown. + type: boolean + queryArgProfiles: + description: Object that contains an attribute items that + contains the list ofrofiles specified for query argument-profile + mapping for field-level encryption. see Query Arg Profile. + properties: + items: + items: + properties: + profileId: + description: The profile ID for a field-level encryption + content type-profile mapping. + type: string + profileIdRef: + description: Reference to a FieldLevelEncryptionProfile + in cloudfront to populate profileId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + profileIdSelector: + description: Selector for a FieldLevelEncryptionProfile + in cloudfront to populate profileId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + queryArg: + description: Query argument for field-level encryption + query argument-profile mapping. + type: string + type: object + type: array + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.contentTypeProfileConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.contentTypeProfileConfig) + || (has(self.initProvider) && has(self.initProvider.contentTypeProfileConfig))' + - message: spec.forProvider.queryArgProfileConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.queryArgProfileConfig) + || (has(self.initProvider) && has(self.initProvider.queryArgProfileConfig))' + status: + description: FieldLevelEncryptionConfigStatus defines the observed state + of FieldLevelEncryptionConfig. + properties: + atProvider: + properties: + callerReference: + description: Internal value used by CloudFront to allow future + updates to the Field Level Encryption Config. + type: string + comment: + description: An optional comment about the Field Level Encryption + Config. + type: string + contentTypeProfileConfig: + description: Content Type Profile Config specifies when to forward + content if a content type isn't recognized and profiles to use + as by default in a request if a query argument doesn't specify + a profile to use. + properties: + contentTypeProfiles: + description: Object that contains an attribute items that + contains the list of configurations for a field-level encryption + content type-profile. See Content Type Profile. + properties: + items: + items: + properties: + contentType: + description: he content type for a field-level encryption + content type-profile mapping. Valid value is application/x-www-form-urlencoded. + type: string + format: + description: The format for a field-level encryption + content type-profile mapping. Valid value is URLEncoded. + type: string + profileId: + description: The profile ID for a field-level encryption + content type-profile mapping. + type: string + type: object + type: array + type: object + forwardWhenContentTypeIsUnknown: + description: specifies what to do when an unknown content + type is provided for the profile. If true, content is forwarded + without being encrypted when the content type is unknown. + If false (the default), an error is returned when the content + type is unknown. + type: boolean + type: object + etag: + description: 'The current version of the Field Level Encryption + Config. For example: E2QWRUHAPOMQZL.' + type: string + id: + description: 'The identifier for the Field Level Encryption Config. + For example: K3D5EWEUDCCXON.' + type: string + queryArgProfileConfig: + description: Query Arg Profile Config that specifies when to forward + content if a profile isn't found and the profile that can be + provided as a query argument in a request. + properties: + forwardWhenQueryArgProfileIsUnknown: + description: Flag to set if you want a request to be forwarded + to the origin even if the profile specified by the field-level + encryption query argument, fle-profile, is unknown. + type: boolean + queryArgProfiles: + description: Object that contains an attribute items that + contains the list ofrofiles specified for query argument-profile + mapping for field-level encryption. see Query Arg Profile. + properties: + items: + items: + properties: + profileId: + description: The profile ID for a field-level encryption + content type-profile mapping. + type: string + queryArg: + description: Query argument for field-level encryption + query argument-profile mapping. + type: string + type: object + type: array + type: object + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cloudfront.aws.upbound.io_fieldlevelencryptionprofiles.yaml b/package/crds/cloudfront.aws.upbound.io_fieldlevelencryptionprofiles.yaml index 72925363f5..3adb00a746 100644 --- a/package/crds/cloudfront.aws.upbound.io_fieldlevelencryptionprofiles.yaml +++ b/package/crds/cloudfront.aws.upbound.io_fieldlevelencryptionprofiles.yaml @@ -632,3 +632,605 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FieldLevelEncryptionProfile is the Schema for the FieldLevelEncryptionProfiles + API. Provides a CloudFront Field-level Encryption Profile resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FieldLevelEncryptionProfileSpec defines the desired state + of FieldLevelEncryptionProfile + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + comment: + description: An optional comment about the Field Level Encryption + Profile. + type: string + encryptionEntities: + description: The encryption entities config block for field-level + encryption profiles that contains an attribute items which includes + the encryption key and field pattern specifications. + properties: + items: + items: + properties: + fieldPatterns: + description: Object that contains an attribute items + that contains the list of field patterns in a field-level + encryption content type profile specify the fields + that you want to be encrypted. + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + providerId: + description: The provider associated with the public + key being used for encryption. + type: string + publicKeyId: + description: The public key associated with a set of + field-level encryption patterns, to be used when encrypting + the fields that match the patterns. + type: string + publicKeyIdRef: + description: Reference to a PublicKey in cloudfront + to populate publicKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publicKeyIdSelector: + description: Selector for a PublicKey in cloudfront + to populate publicKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + type: object + name: + description: The name of the Field Level Encryption Profile. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + comment: + description: An optional comment about the Field Level Encryption + Profile. + type: string + encryptionEntities: + description: The encryption entities config block for field-level + encryption profiles that contains an attribute items which includes + the encryption key and field pattern specifications. + properties: + items: + items: + properties: + fieldPatterns: + description: Object that contains an attribute items + that contains the list of field patterns in a field-level + encryption content type profile specify the fields + that you want to be encrypted. + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + providerId: + description: The provider associated with the public + key being used for encryption. + type: string + publicKeyId: + description: The public key associated with a set of + field-level encryption patterns, to be used when encrypting + the fields that match the patterns. + type: string + publicKeyIdRef: + description: Reference to a PublicKey in cloudfront + to populate publicKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publicKeyIdSelector: + description: Selector for a PublicKey in cloudfront + to populate publicKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + type: object + name: + description: The name of the Field Level Encryption Profile. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.encryptionEntities is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.encryptionEntities) + || (has(self.initProvider) && has(self.initProvider.encryptionEntities))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: FieldLevelEncryptionProfileStatus defines the observed state + of FieldLevelEncryptionProfile. + properties: + atProvider: + properties: + callerReference: + description: Internal value used by CloudFront to allow future + updates to the Field Level Encryption Profile. + type: string + comment: + description: An optional comment about the Field Level Encryption + Profile. + type: string + encryptionEntities: + description: The encryption entities config block for field-level + encryption profiles that contains an attribute items which includes + the encryption key and field pattern specifications. + properties: + items: + items: + properties: + fieldPatterns: + description: Object that contains an attribute items + that contains the list of field patterns in a field-level + encryption content type profile specify the fields + that you want to be encrypted. + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + providerId: + description: The provider associated with the public + key being used for encryption. + type: string + publicKeyId: + description: The public key associated with a set of + field-level encryption patterns, to be used when encrypting + the fields that match the patterns. + type: string + type: object + type: array + type: object + etag: + description: 'The current version of the Field Level Encryption + Profile. For example: E2QWRUHAPOMQZL.' + type: string + id: + description: 'The identifier for the Field Level Encryption Profile. + For example: K3D5EWEUDCCXON.' + type: string + name: + description: The name of the Field Level Encryption Profile. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cloudfront.aws.upbound.io_monitoringsubscriptions.yaml b/package/crds/cloudfront.aws.upbound.io_monitoringsubscriptions.yaml index ac19bde9b8..744506b02c 100644 --- a/package/crds/cloudfront.aws.upbound.io_monitoringsubscriptions.yaml +++ b/package/crds/cloudfront.aws.upbound.io_monitoringsubscriptions.yaml @@ -565,3 +565,538 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MonitoringSubscription is the Schema for the MonitoringSubscriptions + API. Provides a CloudFront monitoring subscription resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitoringSubscriptionSpec defines the desired state of MonitoringSubscription + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + distributionId: + description: The ID of the distribution that you are enabling + metrics for. + type: string + distributionIdRef: + description: Reference to a Distribution in cloudfront to populate + distributionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + distributionIdSelector: + description: Selector for a Distribution in cloudfront to populate + distributionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + monitoringSubscription: + description: A monitoring subscription. This structure contains + information about whether additional CloudWatch metrics are + enabled for a given CloudFront distribution. + properties: + realtimeMetricsSubscriptionConfig: + description: A subscription configuration for additional CloudWatch + metrics. See below. + properties: + realtimeMetricsSubscriptionStatus: + description: A flag that indicates whether additional + CloudWatch metrics are enabled for a given CloudFront + distribution. Valid values are Enabled and Disabled. + See below. + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + distributionId: + description: The ID of the distribution that you are enabling + metrics for. + type: string + distributionIdRef: + description: Reference to a Distribution in cloudfront to populate + distributionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + distributionIdSelector: + description: Selector for a Distribution in cloudfront to populate + distributionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + monitoringSubscription: + description: A monitoring subscription. This structure contains + information about whether additional CloudWatch metrics are + enabled for a given CloudFront distribution. + properties: + realtimeMetricsSubscriptionConfig: + description: A subscription configuration for additional CloudWatch + metrics. See below. + properties: + realtimeMetricsSubscriptionStatus: + description: A flag that indicates whether additional + CloudWatch metrics are enabled for a given CloudFront + distribution. Valid values are Enabled and Disabled. + See below. + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.monitoringSubscription is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.monitoringSubscription) + || (has(self.initProvider) && has(self.initProvider.monitoringSubscription))' + status: + description: MonitoringSubscriptionStatus defines the observed state of + MonitoringSubscription. + properties: + atProvider: + properties: + distributionId: + description: The ID of the distribution that you are enabling + metrics for. + type: string + id: + description: The ID of the CloudFront monitoring subscription, + which corresponds to the distribution_id. + type: string + monitoringSubscription: + description: A monitoring subscription. This structure contains + information about whether additional CloudWatch metrics are + enabled for a given CloudFront distribution. + properties: + realtimeMetricsSubscriptionConfig: + description: A subscription configuration for additional CloudWatch + metrics. See below. + properties: + realtimeMetricsSubscriptionStatus: + description: A flag that indicates whether additional + CloudWatch metrics are enabled for a given CloudFront + distribution. Valid values are Enabled and Disabled. + See below. + type: string + type: object + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cloudfront.aws.upbound.io_originrequestpolicies.yaml b/package/crds/cloudfront.aws.upbound.io_originrequestpolicies.yaml index 60ef2f8f73..8287efb00f 100644 --- a/package/crds/cloudfront.aws.upbound.io_originrequestpolicies.yaml +++ b/package/crds/cloudfront.aws.upbound.io_originrequestpolicies.yaml @@ -549,3 +549,498 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: OriginRequestPolicy is the Schema for the OriginRequestPolicys + API. Determines the values that CloudFront includes in requests that it + sends to the origin. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OriginRequestPolicySpec defines the desired state of OriginRequestPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + comment: + description: Comment to describe the origin request policy. + type: string + cookiesConfig: + description: Object that determines whether any cookies in viewer + requests (and if so, which cookies) are included in the origin + request key and automatically included in requests that CloudFront + sends to the origin. See Cookies Config for more information. + properties: + cookieBehavior: + type: string + cookies: + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + headersConfig: + description: Object that determines whether any HTTP headers (and + if so, which headers) are included in the origin request key + and automatically included in requests that CloudFront sends + to the origin. See Headers Config for more information. + properties: + headerBehavior: + type: string + headers: + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + queryStringsConfig: + description: Object that determines whether any URL query strings + in viewer requests (and if so, which query strings) are included + in the origin request key and automatically included in requests + that CloudFront sends to the origin. See Query String Config + for more information. + properties: + queryStringBehavior: + type: string + queryStrings: + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + comment: + description: Comment to describe the origin request policy. + type: string + cookiesConfig: + description: Object that determines whether any cookies in viewer + requests (and if so, which cookies) are included in the origin + request key and automatically included in requests that CloudFront + sends to the origin. See Cookies Config for more information. + properties: + cookieBehavior: + type: string + cookies: + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + headersConfig: + description: Object that determines whether any HTTP headers (and + if so, which headers) are included in the origin request key + and automatically included in requests that CloudFront sends + to the origin. See Headers Config for more information. + properties: + headerBehavior: + type: string + headers: + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + queryStringsConfig: + description: Object that determines whether any URL query strings + in viewer requests (and if so, which query strings) are included + in the origin request key and automatically included in requests + that CloudFront sends to the origin. See Query String Config + for more information. + properties: + queryStringBehavior: + type: string + queryStrings: + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.cookiesConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.cookiesConfig) + || (has(self.initProvider) && has(self.initProvider.cookiesConfig))' + - message: spec.forProvider.headersConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.headersConfig) + || (has(self.initProvider) && has(self.initProvider.headersConfig))' + - message: spec.forProvider.queryStringsConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.queryStringsConfig) + || (has(self.initProvider) && has(self.initProvider.queryStringsConfig))' + status: + description: OriginRequestPolicyStatus defines the observed state of OriginRequestPolicy. + properties: + atProvider: + properties: + comment: + description: Comment to describe the origin request policy. + type: string + cookiesConfig: + description: Object that determines whether any cookies in viewer + requests (and if so, which cookies) are included in the origin + request key and automatically included in requests that CloudFront + sends to the origin. See Cookies Config for more information. + properties: + cookieBehavior: + type: string + cookies: + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + etag: + description: The current version of the origin request policy. + type: string + headersConfig: + description: Object that determines whether any HTTP headers (and + if so, which headers) are included in the origin request key + and automatically included in requests that CloudFront sends + to the origin. See Headers Config for more information. + properties: + headerBehavior: + type: string + headers: + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + id: + description: The identifier for the origin request policy. + type: string + queryStringsConfig: + description: Object that determines whether any URL query strings + in viewer requests (and if so, which query strings) are included + in the origin request key and automatically included in requests + that CloudFront sends to the origin. See Query String Config + for more information. + properties: + queryStringBehavior: + type: string + queryStrings: + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cloudfront.aws.upbound.io_realtimelogconfigs.yaml b/package/crds/cloudfront.aws.upbound.io_realtimelogconfigs.yaml index 65e9cd67ef..84d33b2fec 100644 --- a/package/crds/cloudfront.aws.upbound.io_realtimelogconfigs.yaml +++ b/package/crds/cloudfront.aws.upbound.io_realtimelogconfigs.yaml @@ -779,3 +779,748 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: RealtimeLogConfig is the Schema for the RealtimeLogConfigs API. + Provides a CloudFront real-time log configuration resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RealtimeLogConfigSpec defines the desired state of RealtimeLogConfig + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + endpoint: + description: The Amazon Kinesis data streams where real-time log + data is sent. + properties: + kinesisStreamConfig: + description: The Amazon Kinesis data stream configuration. + properties: + roleArn: + description: |- + The ARN of an IAM role that CloudFront can use to send real-time log data to the Kinesis data stream. + See the AWS documentation for more information. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + streamArn: + description: The ARN of the Kinesis data stream. + type: string + streamArnRef: + description: Reference to a Stream in kinesis to populate + streamArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamArnSelector: + description: Selector for a Stream in kinesis to populate + streamArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + streamType: + description: The type of data stream where real-time log data + is sent. The only valid value is Kinesis. + type: string + type: object + fields: + description: The fields that are included in each real-time log + record. See the AWS documentation for supported values. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The unique name to identify this real-time log configuration. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + samplingRate: + description: The sampling rate for this real-time log configuration. + The sampling rate determines the percentage of viewer requests + that are represented in the real-time log data. An integer between + 1 and 100, inclusive. + type: number + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + endpoint: + description: The Amazon Kinesis data streams where real-time log + data is sent. + properties: + kinesisStreamConfig: + description: The Amazon Kinesis data stream configuration. + properties: + roleArn: + description: |- + The ARN of an IAM role that CloudFront can use to send real-time log data to the Kinesis data stream. + See the AWS documentation for more information. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + streamArn: + description: The ARN of the Kinesis data stream. + type: string + streamArnRef: + description: Reference to a Stream in kinesis to populate + streamArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamArnSelector: + description: Selector for a Stream in kinesis to populate + streamArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + streamType: + description: The type of data stream where real-time log data + is sent. The only valid value is Kinesis. + type: string + type: object + fields: + description: The fields that are included in each real-time log + record. See the AWS documentation for supported values. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The unique name to identify this real-time log configuration. + type: string + samplingRate: + description: The sampling rate for this real-time log configuration. + The sampling rate determines the percentage of viewer requests + that are represented in the real-time log data. An integer between + 1 and 100, inclusive. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.endpoint is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.endpoint) + || (has(self.initProvider) && has(self.initProvider.endpoint))' + - message: spec.forProvider.fields is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.fields) + || (has(self.initProvider) && has(self.initProvider.fields))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.samplingRate is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.samplingRate) + || (has(self.initProvider) && has(self.initProvider.samplingRate))' + status: + description: RealtimeLogConfigStatus defines the observed state of RealtimeLogConfig. + properties: + atProvider: + properties: + arn: + description: The ARN (Amazon Resource Name) of the CloudFront + real-time log configuration. + type: string + endpoint: + description: The Amazon Kinesis data streams where real-time log + data is sent. + properties: + kinesisStreamConfig: + description: The Amazon Kinesis data stream configuration. + properties: + roleArn: + description: |- + The ARN of an IAM role that CloudFront can use to send real-time log data to the Kinesis data stream. + See the AWS documentation for more information. + type: string + streamArn: + description: The ARN of the Kinesis data stream. + type: string + type: object + streamType: + description: The type of data stream where real-time log data + is sent. The only valid value is Kinesis. + type: string + type: object + fields: + description: The fields that are included in each real-time log + record. See the AWS documentation for supported values. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + description: The ID of the CloudFront real-time log configuration. + type: string + name: + description: The unique name to identify this real-time log configuration. + type: string + samplingRate: + description: The sampling rate for this real-time log configuration. + The sampling rate determines the percentage of viewer requests + that are represented in the real-time log data. An integer between + 1 and 100, inclusive. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cloudfront.aws.upbound.io_responseheaderspolicies.yaml b/package/crds/cloudfront.aws.upbound.io_responseheaderspolicies.yaml index 07d03ae9b0..e427896c1d 100644 --- a/package/crds/cloudfront.aws.upbound.io_responseheaderspolicies.yaml +++ b/package/crds/cloudfront.aws.upbound.io_responseheaderspolicies.yaml @@ -1183,3 +1183,1063 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ResponseHeadersPolicy is the Schema for the ResponseHeadersPolicys + API. Provides a CloudFront response headers policy resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ResponseHeadersPolicySpec defines the desired state of ResponseHeadersPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + comment: + description: A comment to describe the response headers policy. + The comment cannot be longer than 128 characters. + type: string + corsConfig: + description: A configuration for a set of HTTP response headers + that are used for Cross-Origin Resource Sharing (CORS). See + Cors Config for more information. + properties: + accessControlAllowCredentials: + description: A Boolean value that CloudFront uses as the value + for the Access-Control-Allow-Credentials HTTP response header. + type: boolean + accessControlAllowHeaders: + description: Object that contains an attribute items that + contains a list of HTTP header names that CloudFront includes + as values for the Access-Control-Allow-Headers HTTP response + header. + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + accessControlAllowMethods: + description: 'Object that contains an attribute items that + contains a list of HTTP methods that CloudFront includes + as values for the Access-Control-Allow-Methods HTTP response + header. Valid values: GET | POST | OPTIONS | PUT | DELETE + | HEAD | ALL' + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + accessControlAllowOrigins: + description: Object that contains an attribute items that + contains a list of origins that CloudFront can use as the + value for the Access-Control-Allow-Origin HTTP response + header. + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + accessControlExposeHeaders: + description: Object that contains an attribute items that + contains a list of HTTP headers that CloudFront includes + as values for the Access-Control-Expose-Headers HTTP response + header. + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + accessControlMaxAgeSec: + description: A number that CloudFront uses as the value for + the Access-Control-Max-Age HTTP response header. + type: number + originOverride: + description: A Boolean value that determines how CloudFront + behaves for the HTTP response header. + type: boolean + type: object + customHeadersConfig: + description: Object that contains an attribute items that contains + a list of custom headers. See Custom Header for more information. + properties: + items: + items: + properties: + header: + description: The HTTP response header name. + type: string + override: + description: Whether CloudFront overrides a response + header with the same name received from the origin + with the header specifies here. + type: boolean + value: + description: The value for the HTTP response header. + type: string + type: object + type: array + type: object + etag: + description: The current version of the response headers policy. + type: string + name: + description: A unique name to identify the response headers policy. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + removeHeadersConfig: + description: A configuration for a set of HTTP headers to remove + from the HTTP response. Object that contains an attribute items + that contains a list of headers. See Remove Header for more + information. + properties: + items: + items: + properties: + header: + description: The HTTP response header name. + type: string + type: object + type: array + type: object + securityHeadersConfig: + description: A configuration for a set of security-related HTTP + response headers. See Security Headers Config for more information. + properties: + contentSecurityPolicy: + description: The policy directives and their values that CloudFront + includes as values for the Content-Security-Policy HTTP + response header. See Content Security Policy for more information. + properties: + contentSecurityPolicy: + description: The policy directives and their values that + CloudFront includes as values for the Content-Security-Policy + HTTP response header. See Content Security Policy for + more information. + type: string + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + type: object + contentTypeOptions: + description: Determines whether CloudFront includes the X-Content-Type-Options + HTTP response header with its value set to nosniff. See + Content Type Options for more information. + properties: + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + type: object + frameOptions: + description: Determines whether CloudFront includes the X-Frame-Options + HTTP response header and the header’s value. See Frame Options + for more information. + properties: + frameOption: + description: 'The value of the X-Frame-Options HTTP response + header. Valid values: DENY | SAMEORIGIN' + type: string + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + type: object + referrerPolicy: + description: Determines whether CloudFront includes the Referrer-Policy + HTTP response header and the header’s value. See Referrer + Policy for more information. + properties: + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + referrerPolicy: + description: Determines whether CloudFront includes the + Referrer-Policy HTTP response header and the header’s + value. See Referrer Policy for more information. + type: string + type: object + strictTransportSecurity: + description: Determines whether CloudFront includes the Strict-Transport-Security + HTTP response header and the header’s value. See Strict + Transport Security for more information. + properties: + accessControlMaxAgeSec: + description: A number that CloudFront uses as the value + for the Access-Control-Max-Age HTTP response header. + type: number + includeSubdomains: + description: Whether CloudFront includes the includeSubDomains + directive in the Strict-Transport-Security HTTP response + header. + type: boolean + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + preload: + description: Whether CloudFront includes the preload directive + in the Strict-Transport-Security HTTP response header. + type: boolean + type: object + xssProtection: + description: Determine whether CloudFront includes the X-XSS-Protection + HTTP response header and the header’s value. See XSS Protection + for more information. + properties: + modeBlock: + description: Whether CloudFront includes the mode=block + directive in the X-XSS-Protection header. + type: boolean + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + protection: + description: A Boolean value that determines the value + of the X-XSS-Protection HTTP response header. When this + setting is true, the value of the X-XSS-Protection header + is 1. When this setting is false, the value of the X-XSS-Protection + header is 0. + type: boolean + reportUri: + description: A reporting URI, which CloudFront uses as + the value of the report directive in the X-XSS-Protection + header. You cannot specify a report_uri when mode_block + is true. + type: string + type: object + type: object + serverTimingHeadersConfig: + description: A configuration for enabling the Server-Timing header + in HTTP responses sent from CloudFront. See Server Timing Headers + Config for more information. + properties: + enabled: + description: A Whether CloudFront adds the Server-Timing header + to HTTP responses that it sends in response to requests + that match a cache behavior that's associated with this + response headers policy. + type: boolean + samplingRate: + description: 'A number 0–100 (inclusive) that specifies the + percentage of responses that you want CloudFront to add + the Server-Timing header to. Valid range: Minimum value + of 0.0. Maximum value of 100.0.' + type: number + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + comment: + description: A comment to describe the response headers policy. + The comment cannot be longer than 128 characters. + type: string + corsConfig: + description: A configuration for a set of HTTP response headers + that are used for Cross-Origin Resource Sharing (CORS). See + Cors Config for more information. + properties: + accessControlAllowCredentials: + description: A Boolean value that CloudFront uses as the value + for the Access-Control-Allow-Credentials HTTP response header. + type: boolean + accessControlAllowHeaders: + description: Object that contains an attribute items that + contains a list of HTTP header names that CloudFront includes + as values for the Access-Control-Allow-Headers HTTP response + header. + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + accessControlAllowMethods: + description: 'Object that contains an attribute items that + contains a list of HTTP methods that CloudFront includes + as values for the Access-Control-Allow-Methods HTTP response + header. Valid values: GET | POST | OPTIONS | PUT | DELETE + | HEAD | ALL' + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + accessControlAllowOrigins: + description: Object that contains an attribute items that + contains a list of origins that CloudFront can use as the + value for the Access-Control-Allow-Origin HTTP response + header. + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + accessControlExposeHeaders: + description: Object that contains an attribute items that + contains a list of HTTP headers that CloudFront includes + as values for the Access-Control-Expose-Headers HTTP response + header. + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + accessControlMaxAgeSec: + description: A number that CloudFront uses as the value for + the Access-Control-Max-Age HTTP response header. + type: number + originOverride: + description: A Boolean value that determines how CloudFront + behaves for the HTTP response header. + type: boolean + type: object + customHeadersConfig: + description: Object that contains an attribute items that contains + a list of custom headers. See Custom Header for more information. + properties: + items: + items: + properties: + header: + description: The HTTP response header name. + type: string + override: + description: Whether CloudFront overrides a response + header with the same name received from the origin + with the header specifies here. + type: boolean + value: + description: The value for the HTTP response header. + type: string + type: object + type: array + type: object + etag: + description: The current version of the response headers policy. + type: string + name: + description: A unique name to identify the response headers policy. + type: string + removeHeadersConfig: + description: A configuration for a set of HTTP headers to remove + from the HTTP response. Object that contains an attribute items + that contains a list of headers. See Remove Header for more + information. + properties: + items: + items: + properties: + header: + description: The HTTP response header name. + type: string + type: object + type: array + type: object + securityHeadersConfig: + description: A configuration for a set of security-related HTTP + response headers. See Security Headers Config for more information. + properties: + contentSecurityPolicy: + description: The policy directives and their values that CloudFront + includes as values for the Content-Security-Policy HTTP + response header. See Content Security Policy for more information. + properties: + contentSecurityPolicy: + description: The policy directives and their values that + CloudFront includes as values for the Content-Security-Policy + HTTP response header. See Content Security Policy for + more information. + type: string + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + type: object + contentTypeOptions: + description: Determines whether CloudFront includes the X-Content-Type-Options + HTTP response header with its value set to nosniff. See + Content Type Options for more information. + properties: + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + type: object + frameOptions: + description: Determines whether CloudFront includes the X-Frame-Options + HTTP response header and the header’s value. See Frame Options + for more information. + properties: + frameOption: + description: 'The value of the X-Frame-Options HTTP response + header. Valid values: DENY | SAMEORIGIN' + type: string + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + type: object + referrerPolicy: + description: Determines whether CloudFront includes the Referrer-Policy + HTTP response header and the header’s value. See Referrer + Policy for more information. + properties: + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + referrerPolicy: + description: Determines whether CloudFront includes the + Referrer-Policy HTTP response header and the header’s + value. See Referrer Policy for more information. + type: string + type: object + strictTransportSecurity: + description: Determines whether CloudFront includes the Strict-Transport-Security + HTTP response header and the header’s value. See Strict + Transport Security for more information. + properties: + accessControlMaxAgeSec: + description: A number that CloudFront uses as the value + for the Access-Control-Max-Age HTTP response header. + type: number + includeSubdomains: + description: Whether CloudFront includes the includeSubDomains + directive in the Strict-Transport-Security HTTP response + header. + type: boolean + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + preload: + description: Whether CloudFront includes the preload directive + in the Strict-Transport-Security HTTP response header. + type: boolean + type: object + xssProtection: + description: Determine whether CloudFront includes the X-XSS-Protection + HTTP response header and the header’s value. See XSS Protection + for more information. + properties: + modeBlock: + description: Whether CloudFront includes the mode=block + directive in the X-XSS-Protection header. + type: boolean + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + protection: + description: A Boolean value that determines the value + of the X-XSS-Protection HTTP response header. When this + setting is true, the value of the X-XSS-Protection header + is 1. When this setting is false, the value of the X-XSS-Protection + header is 0. + type: boolean + reportUri: + description: A reporting URI, which CloudFront uses as + the value of the report directive in the X-XSS-Protection + header. You cannot specify a report_uri when mode_block + is true. + type: string + type: object + type: object + serverTimingHeadersConfig: + description: A configuration for enabling the Server-Timing header + in HTTP responses sent from CloudFront. See Server Timing Headers + Config for more information. + properties: + enabled: + description: A Whether CloudFront adds the Server-Timing header + to HTTP responses that it sends in response to requests + that match a cache behavior that's associated with this + response headers policy. + type: boolean + samplingRate: + description: 'A number 0–100 (inclusive) that specifies the + percentage of responses that you want CloudFront to add + the Server-Timing header to. Valid range: Minimum value + of 0.0. Maximum value of 100.0.' + type: number + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ResponseHeadersPolicyStatus defines the observed state of + ResponseHeadersPolicy. + properties: + atProvider: + properties: + comment: + description: A comment to describe the response headers policy. + The comment cannot be longer than 128 characters. + type: string + corsConfig: + description: A configuration for a set of HTTP response headers + that are used for Cross-Origin Resource Sharing (CORS). See + Cors Config for more information. + properties: + accessControlAllowCredentials: + description: A Boolean value that CloudFront uses as the value + for the Access-Control-Allow-Credentials HTTP response header. + type: boolean + accessControlAllowHeaders: + description: Object that contains an attribute items that + contains a list of HTTP header names that CloudFront includes + as values for the Access-Control-Allow-Headers HTTP response + header. + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + accessControlAllowMethods: + description: 'Object that contains an attribute items that + contains a list of HTTP methods that CloudFront includes + as values for the Access-Control-Allow-Methods HTTP response + header. Valid values: GET | POST | OPTIONS | PUT | DELETE + | HEAD | ALL' + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + accessControlAllowOrigins: + description: Object that contains an attribute items that + contains a list of origins that CloudFront can use as the + value for the Access-Control-Allow-Origin HTTP response + header. + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + accessControlExposeHeaders: + description: Object that contains an attribute items that + contains a list of HTTP headers that CloudFront includes + as values for the Access-Control-Expose-Headers HTTP response + header. + properties: + items: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + accessControlMaxAgeSec: + description: A number that CloudFront uses as the value for + the Access-Control-Max-Age HTTP response header. + type: number + originOverride: + description: A Boolean value that determines how CloudFront + behaves for the HTTP response header. + type: boolean + type: object + customHeadersConfig: + description: Object that contains an attribute items that contains + a list of custom headers. See Custom Header for more information. + properties: + items: + items: + properties: + header: + description: The HTTP response header name. + type: string + override: + description: Whether CloudFront overrides a response + header with the same name received from the origin + with the header specifies here. + type: boolean + value: + description: The value for the HTTP response header. + type: string + type: object + type: array + type: object + etag: + description: The current version of the response headers policy. + type: string + id: + description: The identifier for the response headers policy. + type: string + name: + description: A unique name to identify the response headers policy. + type: string + removeHeadersConfig: + description: A configuration for a set of HTTP headers to remove + from the HTTP response. Object that contains an attribute items + that contains a list of headers. See Remove Header for more + information. + properties: + items: + items: + properties: + header: + description: The HTTP response header name. + type: string + type: object + type: array + type: object + securityHeadersConfig: + description: A configuration for a set of security-related HTTP + response headers. See Security Headers Config for more information. + properties: + contentSecurityPolicy: + description: The policy directives and their values that CloudFront + includes as values for the Content-Security-Policy HTTP + response header. See Content Security Policy for more information. + properties: + contentSecurityPolicy: + description: The policy directives and their values that + CloudFront includes as values for the Content-Security-Policy + HTTP response header. See Content Security Policy for + more information. + type: string + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + type: object + contentTypeOptions: + description: Determines whether CloudFront includes the X-Content-Type-Options + HTTP response header with its value set to nosniff. See + Content Type Options for more information. + properties: + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + type: object + frameOptions: + description: Determines whether CloudFront includes the X-Frame-Options + HTTP response header and the header’s value. See Frame Options + for more information. + properties: + frameOption: + description: 'The value of the X-Frame-Options HTTP response + header. Valid values: DENY | SAMEORIGIN' + type: string + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + type: object + referrerPolicy: + description: Determines whether CloudFront includes the Referrer-Policy + HTTP response header and the header’s value. See Referrer + Policy for more information. + properties: + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + referrerPolicy: + description: Determines whether CloudFront includes the + Referrer-Policy HTTP response header and the header’s + value. See Referrer Policy for more information. + type: string + type: object + strictTransportSecurity: + description: Determines whether CloudFront includes the Strict-Transport-Security + HTTP response header and the header’s value. See Strict + Transport Security for more information. + properties: + accessControlMaxAgeSec: + description: A number that CloudFront uses as the value + for the Access-Control-Max-Age HTTP response header. + type: number + includeSubdomains: + description: Whether CloudFront includes the includeSubDomains + directive in the Strict-Transport-Security HTTP response + header. + type: boolean + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + preload: + description: Whether CloudFront includes the preload directive + in the Strict-Transport-Security HTTP response header. + type: boolean + type: object + xssProtection: + description: Determine whether CloudFront includes the X-XSS-Protection + HTTP response header and the header’s value. See XSS Protection + for more information. + properties: + modeBlock: + description: Whether CloudFront includes the mode=block + directive in the X-XSS-Protection header. + type: boolean + override: + description: Whether CloudFront overrides a response header + with the same name received from the origin with the + header specifies here. + type: boolean + protection: + description: A Boolean value that determines the value + of the X-XSS-Protection HTTP response header. When this + setting is true, the value of the X-XSS-Protection header + is 1. When this setting is false, the value of the X-XSS-Protection + header is 0. + type: boolean + reportUri: + description: A reporting URI, which CloudFront uses as + the value of the report directive in the X-XSS-Protection + header. You cannot specify a report_uri when mode_block + is true. + type: string + type: object + type: object + serverTimingHeadersConfig: + description: A configuration for enabling the Server-Timing header + in HTTP responses sent from CloudFront. See Server Timing Headers + Config for more information. + properties: + enabled: + description: A Whether CloudFront adds the Server-Timing header + to HTTP responses that it sends in response to requests + that match a cache behavior that's associated with this + response headers policy. + type: boolean + samplingRate: + description: 'A number 0–100 (inclusive) that specifies the + percentage of responses that you want CloudFront to add + the Server-Timing header to. Valid range: Minimum value + of 0.0. Maximum value of 100.0.' + type: number + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cloudsearch.aws.upbound.io_domains.yaml b/package/crds/cloudsearch.aws.upbound.io_domains.yaml index 381916e30e..9ee7503262 100644 --- a/package/crds/cloudsearch.aws.upbound.io_domains.yaml +++ b/package/crds/cloudsearch.aws.upbound.io_domains.yaml @@ -603,3 +603,576 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Domain is the Schema for the Domains API. Provides an CloudSearch + domain resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DomainSpec defines the desired state of Domain + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + endpointOptions: + description: Domain endpoint options. Documented below. + properties: + enforceHttps: + description: Enables or disables the requirement that all + requests to the domain arrive over HTTPS. + type: boolean + tlsSecurityPolicy: + description: The minimum required TLS version. See the AWS + documentation for valid values. + type: string + type: object + indexField: + description: The index fields for documents added to the domain. + Documented below. + items: + properties: + analysisScheme: + description: The analysis scheme you want to use for a text + field. The analysis scheme specifies the language-specific + text processing options that are used during indexing. + type: string + defaultValue: + description: The default value for the field. This value + is used when no value is specified for the field in the + document data. + type: string + facet: + description: You can get facet information by enabling this. + type: boolean + highlight: + description: You can highlight information. + type: boolean + name: + description: The name of the CloudSearch domain. + type: string + return: + description: You can enable returning the value of all searchable + fields. + type: boolean + search: + description: You can set whether this index should be searchable + or not. + type: boolean + sort: + description: You can enable the property to be sortable. + type: boolean + sourceFields: + description: A comma-separated list of source fields to + map to the field. Specifying a source field copies data + from one field to another, enabling you to use the same + source data in different ways by configuring different + options for the fields. + type: string + type: + description: 'The field type. Valid values: date, date-array, + double, double-array, int, int-array, literal, literal-array, + text, text-array.' + type: string + type: object + type: array + multiAz: + description: Whether or not to maintain extra instances for the + domain in a second Availability Zone to ensure high availability. + type: boolean + region: + description: Region is the region you'd like your resource to + be created in. + type: string + scalingParameters: + description: Domain scaling parameters. Documented below. + properties: + desiredInstanceType: + description: The instance type that you want to preconfigure + for your domain. See the AWS documentation for valid values. + type: string + desiredPartitionCount: + description: The number of partitions you want to preconfigure + for your domain. Only valid when you select search.2xlarge + as the instance type. + type: number + desiredReplicationCount: + description: The number of replicas you want to preconfigure + for each index partition. + type: number + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + endpointOptions: + description: Domain endpoint options. Documented below. + properties: + enforceHttps: + description: Enables or disables the requirement that all + requests to the domain arrive over HTTPS. + type: boolean + tlsSecurityPolicy: + description: The minimum required TLS version. See the AWS + documentation for valid values. + type: string + type: object + indexField: + description: The index fields for documents added to the domain. + Documented below. + items: + properties: + analysisScheme: + description: The analysis scheme you want to use for a text + field. The analysis scheme specifies the language-specific + text processing options that are used during indexing. + type: string + defaultValue: + description: The default value for the field. This value + is used when no value is specified for the field in the + document data. + type: string + facet: + description: You can get facet information by enabling this. + type: boolean + highlight: + description: You can highlight information. + type: boolean + name: + description: The name of the CloudSearch domain. + type: string + return: + description: You can enable returning the value of all searchable + fields. + type: boolean + search: + description: You can set whether this index should be searchable + or not. + type: boolean + sort: + description: You can enable the property to be sortable. + type: boolean + sourceFields: + description: A comma-separated list of source fields to + map to the field. Specifying a source field copies data + from one field to another, enabling you to use the same + source data in different ways by configuring different + options for the fields. + type: string + type: + description: 'The field type. Valid values: date, date-array, + double, double-array, int, int-array, literal, literal-array, + text, text-array.' + type: string + type: object + type: array + multiAz: + description: Whether or not to maintain extra instances for the + domain in a second Availability Zone to ensure high availability. + type: boolean + scalingParameters: + description: Domain scaling parameters. Documented below. + properties: + desiredInstanceType: + description: The instance type that you want to preconfigure + for your domain. See the AWS documentation for valid values. + type: string + desiredPartitionCount: + description: The number of partitions you want to preconfigure + for your domain. Only valid when you select search.2xlarge + as the instance type. + type: number + desiredReplicationCount: + description: The number of replicas you want to preconfigure + for each index partition. + type: number + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DomainStatus defines the observed state of Domain. + properties: + atProvider: + properties: + arn: + description: The domain's ARN. + type: string + documentServiceEndpoint: + description: The service endpoint for updating documents in a + search domain. + type: string + domainId: + description: An internally generated unique identifier for the + domain. + type: string + endpointOptions: + description: Domain endpoint options. Documented below. + properties: + enforceHttps: + description: Enables or disables the requirement that all + requests to the domain arrive over HTTPS. + type: boolean + tlsSecurityPolicy: + description: The minimum required TLS version. See the AWS + documentation for valid values. + type: string + type: object + id: + type: string + indexField: + description: The index fields for documents added to the domain. + Documented below. + items: + properties: + analysisScheme: + description: The analysis scheme you want to use for a text + field. The analysis scheme specifies the language-specific + text processing options that are used during indexing. + type: string + defaultValue: + description: The default value for the field. This value + is used when no value is specified for the field in the + document data. + type: string + facet: + description: You can get facet information by enabling this. + type: boolean + highlight: + description: You can highlight information. + type: boolean + name: + description: The name of the CloudSearch domain. + type: string + return: + description: You can enable returning the value of all searchable + fields. + type: boolean + search: + description: You can set whether this index should be searchable + or not. + type: boolean + sort: + description: You can enable the property to be sortable. + type: boolean + sourceFields: + description: A comma-separated list of source fields to + map to the field. Specifying a source field copies data + from one field to another, enabling you to use the same + source data in different ways by configuring different + options for the fields. + type: string + type: + description: 'The field type. Valid values: date, date-array, + double, double-array, int, int-array, literal, literal-array, + text, text-array.' + type: string + type: object + type: array + multiAz: + description: Whether or not to maintain extra instances for the + domain in a second Availability Zone to ensure high availability. + type: boolean + scalingParameters: + description: Domain scaling parameters. Documented below. + properties: + desiredInstanceType: + description: The instance type that you want to preconfigure + for your domain. See the AWS documentation for valid values. + type: string + desiredPartitionCount: + description: The number of partitions you want to preconfigure + for your domain. Only valid when you select search.2xlarge + as the instance type. + type: number + desiredReplicationCount: + description: The number of replicas you want to preconfigure + for each index partition. + type: number + type: object + searchServiceEndpoint: + description: The service endpoint for requesting search results + from a search domain. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cloudwatch.aws.upbound.io_compositealarms.yaml b/package/crds/cloudwatch.aws.upbound.io_compositealarms.yaml index d47135e53c..91225af4fd 100644 --- a/package/crds/cloudwatch.aws.upbound.io_compositealarms.yaml +++ b/package/crds/cloudwatch.aws.upbound.io_compositealarms.yaml @@ -857,3 +857,836 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CompositeAlarm is the Schema for the CompositeAlarms API. Provides + a CloudWatch Composite Alarm resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CompositeAlarmSpec defines the desired state of CompositeAlarm + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + actionsEnabled: + description: Indicates whether actions should be executed during + any changes to the alarm state of the composite alarm. Defaults + to true. + type: boolean + actionsSuppressor: + description: Actions will be suppressed if the suppressor alarm + is in the ALARM state. + properties: + alarm: + description: Can be an AlarmName or an Amazon Resource Name + (ARN) from an existing alarm. + type: string + extensionPeriod: + description: The maximum time in seconds that the composite + alarm waits after suppressor alarm goes out of the ALARM + state. After this time, the composite alarm performs its + actions. + type: number + waitPeriod: + description: The maximum time in seconds that the composite + alarm waits for the suppressor alarm to go into the ALARM + state. After this time, the composite alarm performs its + actions. + type: number + type: object + alarmActions: + description: The set of actions to execute when this alarm transitions + to the ALARM state from any other state. Each action is specified + as an ARN. Up to 5 actions are allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + alarmActionsRefs: + description: References to Topic in sns to populate alarmActions. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + alarmActionsSelector: + description: Selector for a list of Topic in sns to populate alarmActions. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + alarmDescription: + description: The description for the composite alarm. + type: string + alarmRule: + description: An expression that specifies which other alarms are + to be evaluated to determine this composite alarm's state. For + syntax, see Creating a Composite Alarm. The maximum length is + 10240 characters. + type: string + insufficientDataActions: + description: The set of actions to execute when this alarm transitions + to the INSUFFICIENT_DATA state from any other state. Each action + is specified as an ARN. Up to 5 actions are allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + okActions: + description: The set of actions to execute when this alarm transitions + to an OK state from any other state. Each action is specified + as an ARN. Up to 5 actions are allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + okActionsRefs: + description: References to Topic in sns to populate okActions. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + okActionsSelector: + description: Selector for a list of Topic in sns to populate okActions. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + actionsEnabled: + description: Indicates whether actions should be executed during + any changes to the alarm state of the composite alarm. Defaults + to true. + type: boolean + actionsSuppressor: + description: Actions will be suppressed if the suppressor alarm + is in the ALARM state. + properties: + alarm: + description: Can be an AlarmName or an Amazon Resource Name + (ARN) from an existing alarm. + type: string + extensionPeriod: + description: The maximum time in seconds that the composite + alarm waits after suppressor alarm goes out of the ALARM + state. After this time, the composite alarm performs its + actions. + type: number + waitPeriod: + description: The maximum time in seconds that the composite + alarm waits for the suppressor alarm to go into the ALARM + state. After this time, the composite alarm performs its + actions. + type: number + type: object + alarmActions: + description: The set of actions to execute when this alarm transitions + to the ALARM state from any other state. Each action is specified + as an ARN. Up to 5 actions are allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + alarmActionsRefs: + description: References to Topic in sns to populate alarmActions. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + alarmActionsSelector: + description: Selector for a list of Topic in sns to populate alarmActions. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + alarmDescription: + description: The description for the composite alarm. + type: string + alarmRule: + description: An expression that specifies which other alarms are + to be evaluated to determine this composite alarm's state. For + syntax, see Creating a Composite Alarm. The maximum length is + 10240 characters. + type: string + insufficientDataActions: + description: The set of actions to execute when this alarm transitions + to the INSUFFICIENT_DATA state from any other state. Each action + is specified as an ARN. Up to 5 actions are allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + okActions: + description: The set of actions to execute when this alarm transitions + to an OK state from any other state. Each action is specified + as an ARN. Up to 5 actions are allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + okActionsRefs: + description: References to Topic in sns to populate okActions. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + okActionsSelector: + description: Selector for a list of Topic in sns to populate okActions. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.alarmRule is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.alarmRule) + || (has(self.initProvider) && has(self.initProvider.alarmRule))' + status: + description: CompositeAlarmStatus defines the observed state of CompositeAlarm. + properties: + atProvider: + properties: + actionsEnabled: + description: Indicates whether actions should be executed during + any changes to the alarm state of the composite alarm. Defaults + to true. + type: boolean + actionsSuppressor: + description: Actions will be suppressed if the suppressor alarm + is in the ALARM state. + properties: + alarm: + description: Can be an AlarmName or an Amazon Resource Name + (ARN) from an existing alarm. + type: string + extensionPeriod: + description: The maximum time in seconds that the composite + alarm waits after suppressor alarm goes out of the ALARM + state. After this time, the composite alarm performs its + actions. + type: number + waitPeriod: + description: The maximum time in seconds that the composite + alarm waits for the suppressor alarm to go into the ALARM + state. After this time, the composite alarm performs its + actions. + type: number + type: object + alarmActions: + description: The set of actions to execute when this alarm transitions + to the ALARM state from any other state. Each action is specified + as an ARN. Up to 5 actions are allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + alarmDescription: + description: The description for the composite alarm. + type: string + alarmRule: + description: An expression that specifies which other alarms are + to be evaluated to determine this composite alarm's state. For + syntax, see Creating a Composite Alarm. The maximum length is + 10240 characters. + type: string + arn: + description: The ARN of the composite alarm. + type: string + id: + description: The ID of the composite alarm resource, which is + equivalent to its alarm_name. + type: string + insufficientDataActions: + description: The set of actions to execute when this alarm transitions + to the INSUFFICIENT_DATA state from any other state. Each action + is specified as an ARN. Up to 5 actions are allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + okActions: + description: The set of actions to execute when this alarm transitions + to an OK state from any other state. Each action is specified + as an ARN. Up to 5 actions are allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cloudwatch.aws.upbound.io_metricalarms.yaml b/package/crds/cloudwatch.aws.upbound.io_metricalarms.yaml index 91056161b5..7279f610af 100644 --- a/package/crds/cloudwatch.aws.upbound.io_metricalarms.yaml +++ b/package/crds/cloudwatch.aws.upbound.io_metricalarms.yaml @@ -939,3 +939,918 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MetricAlarm is the Schema for the MetricAlarms API. Provides + a CloudWatch Metric Alarm resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MetricAlarmSpec defines the desired state of MetricAlarm + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + actionsEnabled: + description: Indicates whether or not actions should be executed + during any changes to the alarm's state. Defaults to true. + type: boolean + alarmActions: + description: The list of actions to execute when this alarm transitions + into an ALARM state from any other state. Each action is specified + as an Amazon Resource Name (ARN). + items: + type: string + type: array + x-kubernetes-list-type: set + alarmDescription: + description: The description for the alarm. + type: string + comparisonOperator: + description: 'The arithmetic operation to use when comparing the + specified Statistic and Threshold. The specified Statistic value + is used as the first operand. Either of the following is supported: + GreaterThanOrEqualToThreshold, GreaterThanThreshold, LessThanThreshold, + LessThanOrEqualToThreshold. Additionally, the values LessThanLowerOrGreaterThanUpperThreshold, + LessThanLowerThreshold, and GreaterThanUpperThreshold are used + only for alarms based on anomaly detection models.' + type: string + datapointsToAlarm: + description: The number of datapoints that must be breaching to + trigger the alarm. + type: number + dimensions: + additionalProperties: + type: string + description: The dimensions for the alarm's associated metric. For + the list of available dimensions see the AWS documentation here. + type: object + x-kubernetes-map-type: granular + evaluateLowSampleCountPercentiles: + description: |- + Used only for alarms based on percentiles. + If you specify ignore, the alarm state will not change during periods with too few data points to be statistically significant. + If you specify evaluate or omit this parameter, the alarm will always be evaluated and possibly change state no matter how many data points are available. + The following values are supported: ignore, and evaluate. + type: string + evaluationPeriods: + description: The number of periods over which data is compared + to the specified threshold. + type: number + extendedStatistic: + description: The percentile statistic for the metric associated + with the alarm. Specify a value between p0.0 and p100. + type: string + insufficientDataActions: + description: The list of actions to execute when this alarm transitions + into an INSUFFICIENT_DATA state from any other state. Each action + is specified as an Amazon Resource Name (ARN). + items: + type: string + type: array + x-kubernetes-list-type: set + metricName: + description: |- + The name for the alarm's associated metric. + See docs for supported metrics. + type: string + metricQuery: + description: Enables you to create an alarm based on a metric + math expression. You may specify at most 20. + items: + properties: + accountId: + description: The ID of the account where the metrics are + located, if this is a cross-account alarm. + type: string + expression: + description: The math expression to be performed on the + returned data, if this object is performing a math expression. + This expression can use the id of the other metrics to + refer to those metrics, and can also use the id of other + expressions to use the result of those expressions. For + more information about metric math expressions, see Metric + Math Syntax and Functions in the Amazon CloudWatch User + Guide. + type: string + id: + description: A short name used to tie this object to the + results in the response. If you are performing math expressions + on this set of data, this name represents that data and + can serve as a variable in the mathematical expression. + The valid characters are letters, numbers, and underscore. + The first character must be a lowercase letter. + type: string + label: + description: A human-readable label for this metric or expression. + This is especially useful if this is an expression, so + that you know what the value represents. + type: string + metric: + description: The metric to be returned, along with statistics, + period, and units. Use this parameter only if this object + is retrieving a metric and not performing a math expression + on returned data. + properties: + dimensions: + additionalProperties: + type: string + description: The dimensions for this metric. For the + list of available dimensions see the AWS documentation + here. + type: object + x-kubernetes-map-type: granular + metricName: + description: |- + The name for this metric. + See docs for supported metrics. + type: string + namespace: + description: |- + The namespace for this metric. See docs for the list of namespaces. + See docs for supported metrics. + type: string + period: + description: |- + Granularity in seconds of returned data points. + For metrics with regular resolution, valid values are any multiple of 60. + For high-resolution metrics, valid values are 1, 5, 10, 30, or any multiple of 60. + type: number + stat: + description: |- + The statistic to apply to this metric. + See docs for supported statistics. + type: string + unit: + description: The unit for this metric. + type: string + type: object + period: + description: |- + Granularity in seconds of returned data points. + For metrics with regular resolution, valid values are any multiple of 60. + For high-resolution metrics, valid values are 1, 5, 10, 30, or any multiple of 60. + type: number + returnData: + description: Specify exactly one metric_query to be true + to use that metric_query result as the alarm. + type: boolean + type: object + type: array + namespace: + description: |- + The namespace for the alarm's associated metric. See docs for the list of namespaces. + See docs for supported metrics. + type: string + okActions: + description: The list of actions to execute when this alarm transitions + into an OK state from any other state. Each action is specified + as an Amazon Resource Name (ARN). + items: + type: string + type: array + x-kubernetes-list-type: set + period: + description: |- + The period in seconds over which the specified statistic is applied. + Valid values are 10, 30, or any multiple of 60. + type: number + region: + description: Region is the region you'd like your resource to + be created in. + type: string + statistic: + description: |- + The statistic to apply to the alarm's associated metric. + Either of the following is supported: SampleCount, Average, Sum, Minimum, Maximum + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + threshold: + description: The value against which the specified statistic is + compared. This parameter is required for alarms based on static + thresholds, but should not be used for alarms based on anomaly + detection models. + type: number + thresholdMetricId: + description: If this is an alarm based on an anomaly detection + model, make this value match the ID of the ANOMALY_DETECTION_BAND + function. + type: string + treatMissingData: + description: 'Sets how this alarm is to handle missing data points. + The following values are supported: missing, ignore, breaching + and notBreaching. Defaults to missing.' + type: string + unit: + description: The unit for the alarm's associated metric. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + actionsEnabled: + description: Indicates whether or not actions should be executed + during any changes to the alarm's state. Defaults to true. + type: boolean + alarmActions: + description: The list of actions to execute when this alarm transitions + into an ALARM state from any other state. Each action is specified + as an Amazon Resource Name (ARN). + items: + type: string + type: array + x-kubernetes-list-type: set + alarmDescription: + description: The description for the alarm. + type: string + comparisonOperator: + description: 'The arithmetic operation to use when comparing the + specified Statistic and Threshold. The specified Statistic value + is used as the first operand. Either of the following is supported: + GreaterThanOrEqualToThreshold, GreaterThanThreshold, LessThanThreshold, + LessThanOrEqualToThreshold. Additionally, the values LessThanLowerOrGreaterThanUpperThreshold, + LessThanLowerThreshold, and GreaterThanUpperThreshold are used + only for alarms based on anomaly detection models.' + type: string + datapointsToAlarm: + description: The number of datapoints that must be breaching to + trigger the alarm. + type: number + dimensions: + additionalProperties: + type: string + description: The dimensions for the alarm's associated metric. For + the list of available dimensions see the AWS documentation here. + type: object + x-kubernetes-map-type: granular + evaluateLowSampleCountPercentiles: + description: |- + Used only for alarms based on percentiles. + If you specify ignore, the alarm state will not change during periods with too few data points to be statistically significant. + If you specify evaluate or omit this parameter, the alarm will always be evaluated and possibly change state no matter how many data points are available. + The following values are supported: ignore, and evaluate. + type: string + evaluationPeriods: + description: The number of periods over which data is compared + to the specified threshold. + type: number + extendedStatistic: + description: The percentile statistic for the metric associated + with the alarm. Specify a value between p0.0 and p100. + type: string + insufficientDataActions: + description: The list of actions to execute when this alarm transitions + into an INSUFFICIENT_DATA state from any other state. Each action + is specified as an Amazon Resource Name (ARN). + items: + type: string + type: array + x-kubernetes-list-type: set + metricName: + description: |- + The name for the alarm's associated metric. + See docs for supported metrics. + type: string + metricQuery: + description: Enables you to create an alarm based on a metric + math expression. You may specify at most 20. + items: + properties: + accountId: + description: The ID of the account where the metrics are + located, if this is a cross-account alarm. + type: string + expression: + description: The math expression to be performed on the + returned data, if this object is performing a math expression. + This expression can use the id of the other metrics to + refer to those metrics, and can also use the id of other + expressions to use the result of those expressions. For + more information about metric math expressions, see Metric + Math Syntax and Functions in the Amazon CloudWatch User + Guide. + type: string + id: + description: A short name used to tie this object to the + results in the response. If you are performing math expressions + on this set of data, this name represents that data and + can serve as a variable in the mathematical expression. + The valid characters are letters, numbers, and underscore. + The first character must be a lowercase letter. + type: string + label: + description: A human-readable label for this metric or expression. + This is especially useful if this is an expression, so + that you know what the value represents. + type: string + metric: + description: The metric to be returned, along with statistics, + period, and units. Use this parameter only if this object + is retrieving a metric and not performing a math expression + on returned data. + properties: + dimensions: + additionalProperties: + type: string + description: The dimensions for this metric. For the + list of available dimensions see the AWS documentation + here. + type: object + x-kubernetes-map-type: granular + metricName: + description: |- + The name for this metric. + See docs for supported metrics. + type: string + namespace: + description: |- + The namespace for this metric. See docs for the list of namespaces. + See docs for supported metrics. + type: string + period: + description: |- + Granularity in seconds of returned data points. + For metrics with regular resolution, valid values are any multiple of 60. + For high-resolution metrics, valid values are 1, 5, 10, 30, or any multiple of 60. + type: number + stat: + description: |- + The statistic to apply to this metric. + See docs for supported statistics. + type: string + unit: + description: The unit for this metric. + type: string + type: object + period: + description: |- + Granularity in seconds of returned data points. + For metrics with regular resolution, valid values are any multiple of 60. + For high-resolution metrics, valid values are 1, 5, 10, 30, or any multiple of 60. + type: number + returnData: + description: Specify exactly one metric_query to be true + to use that metric_query result as the alarm. + type: boolean + type: object + type: array + namespace: + description: |- + The namespace for the alarm's associated metric. See docs for the list of namespaces. + See docs for supported metrics. + type: string + okActions: + description: The list of actions to execute when this alarm transitions + into an OK state from any other state. Each action is specified + as an Amazon Resource Name (ARN). + items: + type: string + type: array + x-kubernetes-list-type: set + period: + description: |- + The period in seconds over which the specified statistic is applied. + Valid values are 10, 30, or any multiple of 60. + type: number + statistic: + description: |- + The statistic to apply to the alarm's associated metric. + Either of the following is supported: SampleCount, Average, Sum, Minimum, Maximum + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + threshold: + description: The value against which the specified statistic is + compared. This parameter is required for alarms based on static + thresholds, but should not be used for alarms based on anomaly + detection models. + type: number + thresholdMetricId: + description: If this is an alarm based on an anomaly detection + model, make this value match the ID of the ANOMALY_DETECTION_BAND + function. + type: string + treatMissingData: + description: 'Sets how this alarm is to handle missing data points. + The following values are supported: missing, ignore, breaching + and notBreaching. Defaults to missing.' + type: string + unit: + description: The unit for the alarm's associated metric. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.comparisonOperator is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.comparisonOperator) + || (has(self.initProvider) && has(self.initProvider.comparisonOperator))' + - message: spec.forProvider.evaluationPeriods is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.evaluationPeriods) + || (has(self.initProvider) && has(self.initProvider.evaluationPeriods))' + status: + description: MetricAlarmStatus defines the observed state of MetricAlarm. + properties: + atProvider: + properties: + actionsEnabled: + description: Indicates whether or not actions should be executed + during any changes to the alarm's state. Defaults to true. + type: boolean + alarmActions: + description: The list of actions to execute when this alarm transitions + into an ALARM state from any other state. Each action is specified + as an Amazon Resource Name (ARN). + items: + type: string + type: array + x-kubernetes-list-type: set + alarmDescription: + description: The description for the alarm. + type: string + arn: + description: The ARN of the CloudWatch Metric Alarm. + type: string + comparisonOperator: + description: 'The arithmetic operation to use when comparing the + specified Statistic and Threshold. The specified Statistic value + is used as the first operand. Either of the following is supported: + GreaterThanOrEqualToThreshold, GreaterThanThreshold, LessThanThreshold, + LessThanOrEqualToThreshold. Additionally, the values LessThanLowerOrGreaterThanUpperThreshold, + LessThanLowerThreshold, and GreaterThanUpperThreshold are used + only for alarms based on anomaly detection models.' + type: string + datapointsToAlarm: + description: The number of datapoints that must be breaching to + trigger the alarm. + type: number + dimensions: + additionalProperties: + type: string + description: The dimensions for the alarm's associated metric. For + the list of available dimensions see the AWS documentation here. + type: object + x-kubernetes-map-type: granular + evaluateLowSampleCountPercentiles: + description: |- + Used only for alarms based on percentiles. + If you specify ignore, the alarm state will not change during periods with too few data points to be statistically significant. + If you specify evaluate or omit this parameter, the alarm will always be evaluated and possibly change state no matter how many data points are available. + The following values are supported: ignore, and evaluate. + type: string + evaluationPeriods: + description: The number of periods over which data is compared + to the specified threshold. + type: number + extendedStatistic: + description: The percentile statistic for the metric associated + with the alarm. Specify a value between p0.0 and p100. + type: string + id: + description: The ID of the health check. + type: string + insufficientDataActions: + description: The list of actions to execute when this alarm transitions + into an INSUFFICIENT_DATA state from any other state. Each action + is specified as an Amazon Resource Name (ARN). + items: + type: string + type: array + x-kubernetes-list-type: set + metricName: + description: |- + The name for the alarm's associated metric. + See docs for supported metrics. + type: string + metricQuery: + description: Enables you to create an alarm based on a metric + math expression. You may specify at most 20. + items: + properties: + accountId: + description: The ID of the account where the metrics are + located, if this is a cross-account alarm. + type: string + expression: + description: The math expression to be performed on the + returned data, if this object is performing a math expression. + This expression can use the id of the other metrics to + refer to those metrics, and can also use the id of other + expressions to use the result of those expressions. For + more information about metric math expressions, see Metric + Math Syntax and Functions in the Amazon CloudWatch User + Guide. + type: string + id: + description: A short name used to tie this object to the + results in the response. If you are performing math expressions + on this set of data, this name represents that data and + can serve as a variable in the mathematical expression. + The valid characters are letters, numbers, and underscore. + The first character must be a lowercase letter. + type: string + label: + description: A human-readable label for this metric or expression. + This is especially useful if this is an expression, so + that you know what the value represents. + type: string + metric: + description: The metric to be returned, along with statistics, + period, and units. Use this parameter only if this object + is retrieving a metric and not performing a math expression + on returned data. + properties: + dimensions: + additionalProperties: + type: string + description: The dimensions for this metric. For the + list of available dimensions see the AWS documentation + here. + type: object + x-kubernetes-map-type: granular + metricName: + description: |- + The name for this metric. + See docs for supported metrics. + type: string + namespace: + description: |- + The namespace for this metric. See docs for the list of namespaces. + See docs for supported metrics. + type: string + period: + description: |- + Granularity in seconds of returned data points. + For metrics with regular resolution, valid values are any multiple of 60. + For high-resolution metrics, valid values are 1, 5, 10, 30, or any multiple of 60. + type: number + stat: + description: |- + The statistic to apply to this metric. + See docs for supported statistics. + type: string + unit: + description: The unit for this metric. + type: string + type: object + period: + description: |- + Granularity in seconds of returned data points. + For metrics with regular resolution, valid values are any multiple of 60. + For high-resolution metrics, valid values are 1, 5, 10, 30, or any multiple of 60. + type: number + returnData: + description: Specify exactly one metric_query to be true + to use that metric_query result as the alarm. + type: boolean + type: object + type: array + namespace: + description: |- + The namespace for the alarm's associated metric. See docs for the list of namespaces. + See docs for supported metrics. + type: string + okActions: + description: The list of actions to execute when this alarm transitions + into an OK state from any other state. Each action is specified + as an Amazon Resource Name (ARN). + items: + type: string + type: array + x-kubernetes-list-type: set + period: + description: |- + The period in seconds over which the specified statistic is applied. + Valid values are 10, 30, or any multiple of 60. + type: number + statistic: + description: |- + The statistic to apply to the alarm's associated metric. + Either of the following is supported: SampleCount, Average, Sum, Minimum, Maximum + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + threshold: + description: The value against which the specified statistic is + compared. This parameter is required for alarms based on static + thresholds, but should not be used for alarms based on anomaly + detection models. + type: number + thresholdMetricId: + description: If this is an alarm based on an anomaly detection + model, make this value match the ID of the ANOMALY_DETECTION_BAND + function. + type: string + treatMissingData: + description: 'Sets how this alarm is to handle missing data points. + The following values are supported: missing, ignore, breaching + and notBreaching. Defaults to missing.' + type: string + unit: + description: The unit for the alarm's associated metric. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cloudwatchevents.aws.upbound.io_connections.yaml b/package/crds/cloudwatchevents.aws.upbound.io_connections.yaml index a8fb1a1f9f..c6d3a1af12 100644 --- a/package/crds/cloudwatchevents.aws.upbound.io_connections.yaml +++ b/package/crds/cloudwatchevents.aws.upbound.io_connections.yaml @@ -1278,3 +1278,1200 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Connection is the Schema for the Connections API. Provides an + EventBridge connection resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ConnectionSpec defines the desired state of Connection + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authParameters: + description: Parameters used for authorization. A maximum of 1 + are allowed. Documented below. + properties: + apiKey: + description: Parameters used for API_KEY authorization. An + API key to include in the header for each authentication + request. A maximum of 1 are allowed. Conflicts with basic + and oauth. Documented below. + properties: + key: + description: Header Name. + type: string + valueSecretRef: + description: Header Value. Created and stored in AWS Secrets + Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + basic: + description: Parameters used for BASIC authorization. A maximum + of 1 are allowed. Conflicts with api_key and oauth. Documented + below. + properties: + passwordSecretRef: + description: A password for the authorization. Created + and stored in AWS Secrets Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: A username for the authorization. + type: string + type: object + invocationHttpParameters: + description: Invocation Http Parameters are additional credentials + used to sign each Invocation of the ApiDestination created + from this Connection. If the ApiDestination Rule Target + has additional HttpParameters, the values will be merged + together, with the Connection Invocation Http Parameters + taking precedence. Secret values are stored and managed + by AWS Secrets Manager. A maximum of 1 are allowed. Documented + below. + properties: + body: + description: 'Contains additional body string parameters + for the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, which + cannot exceed 64 KB. Each parameter can contain the + following:' + items: + properties: + isValueSecret: + description: Specified whether the value is secret. + type: boolean + key: + description: Header Name. + type: string + valueSecretRef: + description: Header Value. Created and stored in + AWS Secrets Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + header: + description: 'Contains additional header parameters for + the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, which + cannot exceed 64 KB. Each parameter can contain the + following:' + items: + properties: + isValueSecret: + description: Specified whether the value is secret. + type: boolean + key: + description: Header Name. + type: string + valueSecretRef: + description: Header Value. Created and stored in + AWS Secrets Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + queryString: + description: 'Contains additional query string parameters + for the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, which + cannot exceed 64 KB. Each parameter can contain the + following:' + items: + properties: + isValueSecret: + description: Specified whether the value is secret. + type: boolean + key: + description: Header Name. + type: string + valueSecretRef: + description: Header Value. Created and stored in + AWS Secrets Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + type: object + oauth: + description: Parameters used for OAUTH_CLIENT_CREDENTIALS + authorization. A maximum of 1 are allowed. Conflicts with + basic and api_key. Documented below. + properties: + authorizationEndpoint: + description: The URL to the authorization endpoint. + type: string + clientParameters: + description: Contains the client parameters for OAuth + authorization. Contains the following two parameters. + properties: + clientId: + description: The client ID for the credentials to + use for authorization. Created and stored in AWS + Secrets Manager. + type: string + clientSecretSecretRef: + description: The client secret for the credentials + to use for authorization. Created and stored in + AWS Secrets Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + httpMethod: + description: A password for the authorization. Created + and stored in AWS Secrets Manager. + type: string + oauthHttpParameters: + description: OAuth Http Parameters are additional credentials + used to sign the request to the authorization endpoint + to exchange the OAuth Client information for an access + token. Secret values are stored and managed by AWS Secrets + Manager. A maximum of 1 are allowed. Documented below. + properties: + body: + description: 'Contains additional body string parameters + for the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, + which cannot exceed 64 KB. Each parameter can contain + the following:' + items: + properties: + isValueSecret: + description: Specified whether the value is + secret. + type: boolean + key: + description: Header Name. + type: string + valueSecretRef: + description: Header Value. Created and stored + in AWS Secrets Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + header: + description: 'Contains additional header parameters + for the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, + which cannot exceed 64 KB. Each parameter can contain + the following:' + items: + properties: + isValueSecret: + description: Specified whether the value is + secret. + type: boolean + key: + description: Header Name. + type: string + valueSecretRef: + description: Header Value. Created and stored + in AWS Secrets Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + queryString: + description: 'Contains additional query string parameters + for the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, + which cannot exceed 64 KB. Each parameter can contain + the following:' + items: + properties: + isValueSecret: + description: Specified whether the value is + secret. + type: boolean + key: + description: Header Name. + type: string + valueSecretRef: + description: Header Value. Created and stored + in AWS Secrets Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + type: object + type: object + type: object + authorizationType: + description: Choose the type of authorization to use for the connection. + One of API_KEY,BASIC,OAUTH_CLIENT_CREDENTIALS. + type: string + description: + description: Enter a description for the connection. Maximum of + 512 characters. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authParameters: + description: Parameters used for authorization. A maximum of 1 + are allowed. Documented below. + properties: + apiKey: + description: Parameters used for API_KEY authorization. An + API key to include in the header for each authentication + request. A maximum of 1 are allowed. Conflicts with basic + and oauth. Documented below. + properties: + key: + description: Header Name. + type: string + valueSecretRef: + description: Header Value. Created and stored in AWS Secrets + Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - valueSecretRef + type: object + basic: + description: Parameters used for BASIC authorization. A maximum + of 1 are allowed. Conflicts with api_key and oauth. Documented + below. + properties: + passwordSecretRef: + description: A password for the authorization. Created + and stored in AWS Secrets Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: A username for the authorization. + type: string + required: + - passwordSecretRef + type: object + invocationHttpParameters: + description: Invocation Http Parameters are additional credentials + used to sign each Invocation of the ApiDestination created + from this Connection. If the ApiDestination Rule Target + has additional HttpParameters, the values will be merged + together, with the Connection Invocation Http Parameters + taking precedence. Secret values are stored and managed + by AWS Secrets Manager. A maximum of 1 are allowed. Documented + below. + properties: + body: + description: 'Contains additional body string parameters + for the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, which + cannot exceed 64 KB. Each parameter can contain the + following:' + items: + properties: + isValueSecret: + description: Specified whether the value is secret. + type: boolean + key: + description: Header Name. + type: string + valueSecretRef: + description: Header Value. Created and stored in + AWS Secrets Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + header: + description: 'Contains additional header parameters for + the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, which + cannot exceed 64 KB. Each parameter can contain the + following:' + items: + properties: + isValueSecret: + description: Specified whether the value is secret. + type: boolean + key: + description: Header Name. + type: string + valueSecretRef: + description: Header Value. Created and stored in + AWS Secrets Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + queryString: + description: 'Contains additional query string parameters + for the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, which + cannot exceed 64 KB. Each parameter can contain the + following:' + items: + properties: + isValueSecret: + description: Specified whether the value is secret. + type: boolean + key: + description: Header Name. + type: string + valueSecretRef: + description: Header Value. Created and stored in + AWS Secrets Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + type: object + oauth: + description: Parameters used for OAUTH_CLIENT_CREDENTIALS + authorization. A maximum of 1 are allowed. Conflicts with + basic and api_key. Documented below. + properties: + authorizationEndpoint: + description: The URL to the authorization endpoint. + type: string + clientParameters: + description: Contains the client parameters for OAuth + authorization. Contains the following two parameters. + properties: + clientId: + description: The client ID for the credentials to + use for authorization. Created and stored in AWS + Secrets Manager. + type: string + clientSecretSecretRef: + description: The client secret for the credentials + to use for authorization. Created and stored in + AWS Secrets Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - clientSecretSecretRef + type: object + httpMethod: + description: A password for the authorization. Created + and stored in AWS Secrets Manager. + type: string + oauthHttpParameters: + description: OAuth Http Parameters are additional credentials + used to sign the request to the authorization endpoint + to exchange the OAuth Client information for an access + token. Secret values are stored and managed by AWS Secrets + Manager. A maximum of 1 are allowed. Documented below. + properties: + body: + description: 'Contains additional body string parameters + for the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, + which cannot exceed 64 KB. Each parameter can contain + the following:' + items: + properties: + isValueSecret: + description: Specified whether the value is + secret. + type: boolean + key: + description: Header Name. + type: string + valueSecretRef: + description: Header Value. Created and stored + in AWS Secrets Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + header: + description: 'Contains additional header parameters + for the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, + which cannot exceed 64 KB. Each parameter can contain + the following:' + items: + properties: + isValueSecret: + description: Specified whether the value is + secret. + type: boolean + key: + description: Header Name. + type: string + valueSecretRef: + description: Header Value. Created and stored + in AWS Secrets Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + queryString: + description: 'Contains additional query string parameters + for the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, + which cannot exceed 64 KB. Each parameter can contain + the following:' + items: + properties: + isValueSecret: + description: Specified whether the value is + secret. + type: boolean + key: + description: Header Name. + type: string + valueSecretRef: + description: Header Value. Created and stored + in AWS Secrets Manager. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + type: object + type: object + type: object + authorizationType: + description: Choose the type of authorization to use for the connection. + One of API_KEY,BASIC,OAUTH_CLIENT_CREDENTIALS. + type: string + description: + description: Enter a description for the connection. Maximum of + 512 characters. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.authParameters is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.authParameters) + || (has(self.initProvider) && has(self.initProvider.authParameters))' + - message: spec.forProvider.authorizationType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.authorizationType) + || (has(self.initProvider) && has(self.initProvider.authorizationType))' + status: + description: ConnectionStatus defines the observed state of Connection. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) of the connection. + type: string + authParameters: + description: Parameters used for authorization. A maximum of 1 + are allowed. Documented below. + properties: + apiKey: + description: Parameters used for API_KEY authorization. An + API key to include in the header for each authentication + request. A maximum of 1 are allowed. Conflicts with basic + and oauth. Documented below. + properties: + key: + description: Header Name. + type: string + type: object + basic: + description: Parameters used for BASIC authorization. A maximum + of 1 are allowed. Conflicts with api_key and oauth. Documented + below. + properties: + username: + description: A username for the authorization. + type: string + type: object + invocationHttpParameters: + description: Invocation Http Parameters are additional credentials + used to sign each Invocation of the ApiDestination created + from this Connection. If the ApiDestination Rule Target + has additional HttpParameters, the values will be merged + together, with the Connection Invocation Http Parameters + taking precedence. Secret values are stored and managed + by AWS Secrets Manager. A maximum of 1 are allowed. Documented + below. + properties: + body: + description: 'Contains additional body string parameters + for the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, which + cannot exceed 64 KB. Each parameter can contain the + following:' + items: + properties: + isValueSecret: + description: Specified whether the value is secret. + type: boolean + key: + description: Header Name. + type: string + type: object + type: array + header: + description: 'Contains additional header parameters for + the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, which + cannot exceed 64 KB. Each parameter can contain the + following:' + items: + properties: + isValueSecret: + description: Specified whether the value is secret. + type: boolean + key: + description: Header Name. + type: string + type: object + type: array + queryString: + description: 'Contains additional query string parameters + for the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, which + cannot exceed 64 KB. Each parameter can contain the + following:' + items: + properties: + isValueSecret: + description: Specified whether the value is secret. + type: boolean + key: + description: Header Name. + type: string + type: object + type: array + type: object + oauth: + description: Parameters used for OAUTH_CLIENT_CREDENTIALS + authorization. A maximum of 1 are allowed. Conflicts with + basic and api_key. Documented below. + properties: + authorizationEndpoint: + description: The URL to the authorization endpoint. + type: string + clientParameters: + description: Contains the client parameters for OAuth + authorization. Contains the following two parameters. + properties: + clientId: + description: The client ID for the credentials to + use for authorization. Created and stored in AWS + Secrets Manager. + type: string + type: object + httpMethod: + description: A password for the authorization. Created + and stored in AWS Secrets Manager. + type: string + oauthHttpParameters: + description: OAuth Http Parameters are additional credentials + used to sign the request to the authorization endpoint + to exchange the OAuth Client information for an access + token. Secret values are stored and managed by AWS Secrets + Manager. A maximum of 1 are allowed. Documented below. + properties: + body: + description: 'Contains additional body string parameters + for the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, + which cannot exceed 64 KB. Each parameter can contain + the following:' + items: + properties: + isValueSecret: + description: Specified whether the value is + secret. + type: boolean + key: + description: Header Name. + type: string + type: object + type: array + header: + description: 'Contains additional header parameters + for the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, + which cannot exceed 64 KB. Each parameter can contain + the following:' + items: + properties: + isValueSecret: + description: Specified whether the value is + secret. + type: boolean + key: + description: Header Name. + type: string + type: object + type: array + queryString: + description: 'Contains additional query string parameters + for the connection. You can include up to 100 additional + body string parameters per request. Each additional + parameter counts towards the event payload size, + which cannot exceed 64 KB. Each parameter can contain + the following:' + items: + properties: + isValueSecret: + description: Specified whether the value is + secret. + type: boolean + key: + description: Header Name. + type: string + type: object + type: array + type: object + type: object + type: object + authorizationType: + description: Choose the type of authorization to use for the connection. + One of API_KEY,BASIC,OAUTH_CLIENT_CREDENTIALS. + type: string + description: + description: Enter a description for the connection. Maximum of + 512 characters. + type: string + id: + type: string + secretArn: + description: The Amazon Resource Name (ARN) of the secret created + from the authorization parameters specified for the connection. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cloudwatchevents.aws.upbound.io_permissions.yaml b/package/crds/cloudwatchevents.aws.upbound.io_permissions.yaml index d254df60be..dec1c3cc48 100644 --- a/package/crds/cloudwatchevents.aws.upbound.io_permissions.yaml +++ b/package/crds/cloudwatchevents.aws.upbound.io_permissions.yaml @@ -753,3 +753,732 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Permission is the Schema for the Permissions API. Provides a + resource to create an EventBridge permission to support cross-account events + in the current account default event bus. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PermissionSpec defines the desired state of Permission + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + action: + description: The action that you are enabling the other account + to perform. Defaults to events:PutEvents. + type: string + condition: + description: Configuration block to limit the event bus permissions + you are granting to only accounts that fulfill the condition. + Specified below. + properties: + key: + description: 'Key for the condition. Valid values: aws:PrincipalOrgID.' + type: string + type: + description: 'Type of condition. Value values: StringEquals.' + type: string + value: + description: Value for the key. + type: string + valueRef: + description: Reference to a Organization in organizations + to populate value. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + valueSelector: + description: Selector for a Organization in organizations + to populate value. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + eventBusName: + description: |- + The name of the event bus to set the permissions on. + If you omit this, the permissions are set on the default event bus. + type: string + eventBusNameRef: + description: Reference to a Bus in cloudwatchevents to populate + eventBusName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventBusNameSelector: + description: Selector for a Bus in cloudwatchevents to populate + eventBusName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + principal: + description: The 12-digit AWS account ID that you are permitting + to put events to your default event bus. Specify * to permit + any account to put events to your default event bus, optionally + limited by condition. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + statementId: + description: An identifier string for the external account that + you are granting permissions to. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + action: + description: The action that you are enabling the other account + to perform. Defaults to events:PutEvents. + type: string + condition: + description: Configuration block to limit the event bus permissions + you are granting to only accounts that fulfill the condition. + Specified below. + properties: + key: + description: 'Key for the condition. Valid values: aws:PrincipalOrgID.' + type: string + type: + description: 'Type of condition. Value values: StringEquals.' + type: string + value: + description: Value for the key. + type: string + valueRef: + description: Reference to a Organization in organizations + to populate value. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + valueSelector: + description: Selector for a Organization in organizations + to populate value. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + eventBusName: + description: |- + The name of the event bus to set the permissions on. + If you omit this, the permissions are set on the default event bus. + type: string + eventBusNameRef: + description: Reference to a Bus in cloudwatchevents to populate + eventBusName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventBusNameSelector: + description: Selector for a Bus in cloudwatchevents to populate + eventBusName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + principal: + description: The 12-digit AWS account ID that you are permitting + to put events to your default event bus. Specify * to permit + any account to put events to your default event bus, optionally + limited by condition. + type: string + statementId: + description: An identifier string for the external account that + you are granting permissions to. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.principal is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.principal) + || (has(self.initProvider) && has(self.initProvider.principal))' + - message: spec.forProvider.statementId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.statementId) + || (has(self.initProvider) && has(self.initProvider.statementId))' + status: + description: PermissionStatus defines the observed state of Permission. + properties: + atProvider: + properties: + action: + description: The action that you are enabling the other account + to perform. Defaults to events:PutEvents. + type: string + condition: + description: Configuration block to limit the event bus permissions + you are granting to only accounts that fulfill the condition. + Specified below. + properties: + key: + description: 'Key for the condition. Valid values: aws:PrincipalOrgID.' + type: string + type: + description: 'Type of condition. Value values: StringEquals.' + type: string + value: + description: Value for the key. + type: string + type: object + eventBusName: + description: |- + The name of the event bus to set the permissions on. + If you omit this, the permissions are set on the default event bus. + type: string + id: + description: The statement ID of the EventBridge permission. + type: string + principal: + description: The 12-digit AWS account ID that you are permitting + to put events to your default event bus. Specify * to permit + any account to put events to your default event bus, optionally + limited by condition. + type: string + statementId: + description: An identifier string for the external account that + you are granting permissions to. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cloudwatchevents.aws.upbound.io_targets.yaml b/package/crds/cloudwatchevents.aws.upbound.io_targets.yaml index 843fe2992b..6605e6647e 100644 --- a/package/crds/cloudwatchevents.aws.upbound.io_targets.yaml +++ b/package/crds/cloudwatchevents.aws.upbound.io_targets.yaml @@ -2088,3 +2088,1998 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Target is the Schema for the Targets API. Provides an EventBridge + Target resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TargetSpec defines the desired state of Target + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) of the target. + type: string + batchTarget: + description: Parameters used when you are using the rule to invoke + an Amazon Batch Job. Documented below. A maximum of 1 are allowed. + properties: + arraySize: + description: The size of the array, if this is an array batch + job. Valid values are integers between 2 and 10,000. + type: number + jobAttempts: + description: The number of times to attempt to retry, if the + job fails. Valid values are 1 to 10. + type: number + jobDefinition: + description: The ARN or name of the job definition to use + if the event target is an AWS Batch job. This job definition + must already exist. + type: string + jobName: + description: The name to use for this execution of the job, + if the target is an AWS Batch job. + type: string + type: object + deadLetterConfig: + description: Parameters used when you are providing a dead letter + config. Documented below. A maximum of 1 are allowed. + properties: + arn: + description: '- ARN of the SQS queue specified as the target + for the dead-letter queue.' + type: string + type: object + ecsTarget: + description: Parameters used when you are using the rule to invoke + Amazon ECS Task. Documented below. A maximum of 1 are allowed. + properties: + capacityProviderStrategy: + description: The capacity provider strategy to use for the + task. If a capacity_provider_strategy specified, the launch_type + parameter must be omitted. If no capacity_provider_strategy + or launch_type is specified, the default capacity provider + strategy for the cluster is used. Can be one or more. See + below. + items: + properties: + base: + description: The base value designates how many tasks, + at a minimum, to run on the specified capacity provider. + Only one capacity provider in a capacity provider + strategy can have a base defined. Defaults to 0. + type: number + capacityProvider: + description: Short name of the capacity provider. + type: string + weight: + description: The weight value designates the relative + percentage of the total number of tasks launched that + should use the specified capacity provider. The weight + value is taken into consideration after the base value, + if defined, is satisfied. + type: number + type: object + type: array + enableEcsManagedTags: + description: Specifies whether to enable Amazon ECS managed + tags for the task. + type: boolean + enableExecuteCommand: + description: Whether or not to enable the execute command + functionality for the containers in this task. If true, + this enables execute command functionality on all containers + in the task. + type: boolean + group: + description: Specifies an ECS task group for the task. The + maximum length is 255 characters. + type: string + launchType: + description: 'Specifies the launch type on which your task + is running. The launch type that you specify here must match + one of the launch type (compatibilities) of the target task. + Valid values include: EC2, EXTERNAL, or FARGATE.' + type: string + networkConfiguration: + description: Use this if the ECS task uses the awsvpc network + mode. This specifies the VPC subnets and security groups + associated with the task, and whether a public IP address + is to be used. Required if launch_type is FARGATE because + the awsvpc mode is required for Fargate tasks. + properties: + assignPublicIp: + description: Assign a public IP address to the ENI (Fargate + launch type only). Valid values are true or false. Defaults + to false. + type: boolean + securityGroups: + description: The security groups associated with the task + or service. If you do not specify a security group, + the default security group for the VPC is used. + items: + type: string + type: array + x-kubernetes-list-type: set + subnets: + description: The subnets associated with the task or service. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + orderedPlacementStrategy: + description: An array of placement strategy objects to use + for the task. You can specify a maximum of five strategy + rules per task. + items: + properties: + field: + description: The field to apply the placement strategy + against. For the spread placement strategy, valid + values are instanceId (or host, which has the same + effect), or any platform or custom attribute that + is applied to a container instance, such as attribute:ecs.availability-zone. + For the binpack placement strategy, valid values are + cpu and memory. For the random placement strategy, + this field is not used. For more information, see + Amazon ECS task placement strategies. + type: string + type: + description: Type of placement strategy. The only valid + values at this time are binpack, random and spread. + type: string + type: object + type: array + placementConstraint: + description: An array of placement constraint objects to use + for the task. You can specify up to 10 constraints per task + (including constraints in the task definition and those + specified at runtime). See Below. + items: + properties: + expression: + description: Cluster Query Language expression to apply + to the constraint. Does not need to be specified for + the distinctInstance type. For more information, see + Cluster Query Language in the Amazon EC2 Container + Service Developer Guide. + type: string + type: + description: Type of constraint. The only valid values + at this time are memberOf and distinctInstance. + type: string + type: object + type: array + platformVersion: + description: Specifies the platform version for the task. + Specify only the numeric portion of the platform version, + such as 1.1.0. This is used only if LaunchType is FARGATE. + For more information about valid platform versions, see + AWS Fargate Platform Versions. + type: string + propagateTags: + description: 'Specifies whether to propagate the tags from + the task definition to the task. If no value is specified, + the tags are not propagated. Tags can only be propagated + to the task during task creation. The only valid value is: + TASK_DEFINITION.' + type: string + tags: + additionalProperties: + type: string + description: A map of tags to assign to ecs resources. + type: object + x-kubernetes-map-type: granular + taskCount: + description: The number of tasks to create based on the TaskDefinition. + Defaults to 1. + type: number + taskDefinitionArn: + description: The ARN of the task definition to use if the + event target is an Amazon ECS cluster. + type: string + taskDefinitionArnRef: + description: Reference to a TaskDefinition in ecs to populate + taskDefinitionArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + taskDefinitionArnSelector: + description: Selector for a TaskDefinition in ecs to populate + taskDefinitionArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + eventBusName: + description: |- + The name or ARN of the event bus to associate with the rule. + If you omit this, the default event bus is used. + type: string + eventBusNameRef: + description: Reference to a Bus in cloudwatchevents to populate + eventBusName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventBusNameSelector: + description: Selector for a Bus in cloudwatchevents to populate + eventBusName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + forceDestroy: + description: Used to delete managed rules created by AWS. Defaults + to false. + type: boolean + httpTarget: + description: Parameters used when you are using the rule to invoke + an API Gateway REST endpoint. Documented below. A maximum of + 1 is allowed. + properties: + headerParameters: + additionalProperties: + type: string + description: Enables you to specify HTTP headers to add to + the request. + type: object + x-kubernetes-map-type: granular + pathParameterValues: + description: The list of values that correspond sequentially + to any path variables in your endpoint ARN (for example + arn:aws:execute-api:us-east-1:123456:myapi/*/POST/pets/*). + items: + type: string + type: array + queryStringParameters: + additionalProperties: + type: string + description: Represents keys/values of query string parameters + that are appended to the invoked endpoint. + type: object + x-kubernetes-map-type: granular + type: object + input: + description: Valid JSON text passed to the target. Conflicts with + input_path and input_transformer. + type: string + inputPath: + description: The value of the JSONPath that is used for extracting + part of the matched event when passing it to the target. Conflicts + with input and input_transformer. + type: string + inputTransformer: + description: Parameters used when you are providing a custom input + to a target based on certain event data. Conflicts with input + and input_path. + properties: + inputPaths: + additionalProperties: + type: string + description: Key value pairs specified in the form of JSONPath + (for example, time = $.time) + type: object + x-kubernetes-map-type: granular + inputTemplate: + description: Template to customize data sent to the target. + Must be valid JSON. To send a string value, the string value + must include double quotes.g., "\"Your string goes here.\\nA + new line.\"" + type: string + type: object + kinesisTarget: + description: Parameters used when you are using the rule to invoke + an Amazon Kinesis Stream. Documented below. A maximum of 1 are + allowed. + properties: + partitionKeyPath: + description: The JSON path to be extracted from the event + and used as the partition key. + type: string + type: object + redshiftTarget: + description: Parameters used when you are using the rule to invoke + an Amazon Redshift Statement. Documented below. A maximum of + 1 are allowed. + properties: + database: + description: The name of the database. + type: string + dbUser: + description: The database user name. + type: string + secretsManagerArn: + description: The name or ARN of the secret that enables access + to the database. + type: string + sql: + description: The SQL statement text to run. + type: string + statementName: + description: The name of the SQL statement. + type: string + withEvent: + description: Indicates whether to send an event back to EventBridge + after the SQL statement runs. + type: boolean + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + retryPolicy: + description: Parameters used when you are providing retry policies. + Documented below. A maximum of 1 are allowed. + properties: + maximumEventAgeInSeconds: + description: The age in seconds to continue to make retry + attempts. + type: number + maximumRetryAttempts: + description: maximum number of retry attempts to make before + the request fails + type: number + type: object + roleArn: + description: The Amazon Resource Name (ARN) of the IAM role to + be used for this target when the rule is triggered. Required + if ecs_target is used or target in arn is EC2 instance, Kinesis + data stream, Step Functions state machine, or Event Bus in different + account or region. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rule: + description: The name of the rule you want to add targets to. + type: string + ruleRef: + description: Reference to a Rule in cloudwatchevents to populate + rule. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + ruleSelector: + description: Selector for a Rule in cloudwatchevents to populate + rule. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + runCommandTargets: + description: Parameters used when you are using the rule to invoke + Amazon EC2 Run Command. Documented below. A maximum of 5 are + allowed. + items: + properties: + key: + description: Can be either tag:tag-key or InstanceIds. + type: string + values: + description: If Key is tag:tag-key, Values is a list of + tag values. If Key is InstanceIds, Values is a list of + Amazon EC2 instance IDs. + items: + type: string + type: array + type: object + type: array + sagemakerPipelineTarget: + description: Parameters used when you are using the rule to invoke + an Amazon SageMaker Pipeline. Documented below. A maximum of + 1 are allowed. + properties: + pipelineParameterList: + description: List of Parameter names and values for SageMaker + Model Building Pipeline execution. + items: + properties: + name: + description: Name of parameter to start execution of + a SageMaker Model Building Pipeline. + type: string + value: + description: Value of parameter to start execution of + a SageMaker Model Building Pipeline. + type: string + type: object + type: array + type: object + sqsTarget: + description: Parameters used when you are using the rule to invoke + an Amazon SQS Queue. Documented below. A maximum of 1 are allowed. + properties: + messageGroupId: + description: The FIFO message group ID to use as the target. + type: string + type: object + targetId: + description: The unique target assignment ID. If missing, will + generate a random, unique id. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + arn: + description: The Amazon Resource Name (ARN) of the target. + type: string + batchTarget: + description: Parameters used when you are using the rule to invoke + an Amazon Batch Job. Documented below. A maximum of 1 are allowed. + properties: + arraySize: + description: The size of the array, if this is an array batch + job. Valid values are integers between 2 and 10,000. + type: number + jobAttempts: + description: The number of times to attempt to retry, if the + job fails. Valid values are 1 to 10. + type: number + jobDefinition: + description: The ARN or name of the job definition to use + if the event target is an AWS Batch job. This job definition + must already exist. + type: string + jobName: + description: The name to use for this execution of the job, + if the target is an AWS Batch job. + type: string + type: object + deadLetterConfig: + description: Parameters used when you are providing a dead letter + config. Documented below. A maximum of 1 are allowed. + properties: + arn: + description: '- ARN of the SQS queue specified as the target + for the dead-letter queue.' + type: string + type: object + ecsTarget: + description: Parameters used when you are using the rule to invoke + Amazon ECS Task. Documented below. A maximum of 1 are allowed. + properties: + capacityProviderStrategy: + description: The capacity provider strategy to use for the + task. If a capacity_provider_strategy specified, the launch_type + parameter must be omitted. If no capacity_provider_strategy + or launch_type is specified, the default capacity provider + strategy for the cluster is used. Can be one or more. See + below. + items: + properties: + base: + description: The base value designates how many tasks, + at a minimum, to run on the specified capacity provider. + Only one capacity provider in a capacity provider + strategy can have a base defined. Defaults to 0. + type: number + capacityProvider: + description: Short name of the capacity provider. + type: string + weight: + description: The weight value designates the relative + percentage of the total number of tasks launched that + should use the specified capacity provider. The weight + value is taken into consideration after the base value, + if defined, is satisfied. + type: number + type: object + type: array + enableEcsManagedTags: + description: Specifies whether to enable Amazon ECS managed + tags for the task. + type: boolean + enableExecuteCommand: + description: Whether or not to enable the execute command + functionality for the containers in this task. If true, + this enables execute command functionality on all containers + in the task. + type: boolean + group: + description: Specifies an ECS task group for the task. The + maximum length is 255 characters. + type: string + launchType: + description: 'Specifies the launch type on which your task + is running. The launch type that you specify here must match + one of the launch type (compatibilities) of the target task. + Valid values include: EC2, EXTERNAL, or FARGATE.' + type: string + networkConfiguration: + description: Use this if the ECS task uses the awsvpc network + mode. This specifies the VPC subnets and security groups + associated with the task, and whether a public IP address + is to be used. Required if launch_type is FARGATE because + the awsvpc mode is required for Fargate tasks. + properties: + assignPublicIp: + description: Assign a public IP address to the ENI (Fargate + launch type only). Valid values are true or false. Defaults + to false. + type: boolean + securityGroups: + description: The security groups associated with the task + or service. If you do not specify a security group, + the default security group for the VPC is used. + items: + type: string + type: array + x-kubernetes-list-type: set + subnets: + description: The subnets associated with the task or service. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + orderedPlacementStrategy: + description: An array of placement strategy objects to use + for the task. You can specify a maximum of five strategy + rules per task. + items: + properties: + field: + description: The field to apply the placement strategy + against. For the spread placement strategy, valid + values are instanceId (or host, which has the same + effect), or any platform or custom attribute that + is applied to a container instance, such as attribute:ecs.availability-zone. + For the binpack placement strategy, valid values are + cpu and memory. For the random placement strategy, + this field is not used. For more information, see + Amazon ECS task placement strategies. + type: string + type: + description: Type of placement strategy. The only valid + values at this time are binpack, random and spread. + type: string + type: object + type: array + placementConstraint: + description: An array of placement constraint objects to use + for the task. You can specify up to 10 constraints per task + (including constraints in the task definition and those + specified at runtime). See Below. + items: + properties: + expression: + description: Cluster Query Language expression to apply + to the constraint. Does not need to be specified for + the distinctInstance type. For more information, see + Cluster Query Language in the Amazon EC2 Container + Service Developer Guide. + type: string + type: + description: Type of constraint. The only valid values + at this time are memberOf and distinctInstance. + type: string + type: object + type: array + platformVersion: + description: Specifies the platform version for the task. + Specify only the numeric portion of the platform version, + such as 1.1.0. This is used only if LaunchType is FARGATE. + For more information about valid platform versions, see + AWS Fargate Platform Versions. + type: string + propagateTags: + description: 'Specifies whether to propagate the tags from + the task definition to the task. If no value is specified, + the tags are not propagated. Tags can only be propagated + to the task during task creation. The only valid value is: + TASK_DEFINITION.' + type: string + tags: + additionalProperties: + type: string + description: A map of tags to assign to ecs resources. + type: object + x-kubernetes-map-type: granular + taskCount: + description: The number of tasks to create based on the TaskDefinition. + Defaults to 1. + type: number + taskDefinitionArn: + description: The ARN of the task definition to use if the + event target is an Amazon ECS cluster. + type: string + taskDefinitionArnRef: + description: Reference to a TaskDefinition in ecs to populate + taskDefinitionArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + taskDefinitionArnSelector: + description: Selector for a TaskDefinition in ecs to populate + taskDefinitionArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + eventBusName: + description: |- + The name or ARN of the event bus to associate with the rule. + If you omit this, the default event bus is used. + type: string + eventBusNameRef: + description: Reference to a Bus in cloudwatchevents to populate + eventBusName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventBusNameSelector: + description: Selector for a Bus in cloudwatchevents to populate + eventBusName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + forceDestroy: + description: Used to delete managed rules created by AWS. Defaults + to false. + type: boolean + httpTarget: + description: Parameters used when you are using the rule to invoke + an API Gateway REST endpoint. Documented below. A maximum of + 1 is allowed. + properties: + headerParameters: + additionalProperties: + type: string + description: Enables you to specify HTTP headers to add to + the request. + type: object + x-kubernetes-map-type: granular + pathParameterValues: + description: The list of values that correspond sequentially + to any path variables in your endpoint ARN (for example + arn:aws:execute-api:us-east-1:123456:myapi/*/POST/pets/*). + items: + type: string + type: array + queryStringParameters: + additionalProperties: + type: string + description: Represents keys/values of query string parameters + that are appended to the invoked endpoint. + type: object + x-kubernetes-map-type: granular + type: object + input: + description: Valid JSON text passed to the target. Conflicts with + input_path and input_transformer. + type: string + inputPath: + description: The value of the JSONPath that is used for extracting + part of the matched event when passing it to the target. Conflicts + with input and input_transformer. + type: string + inputTransformer: + description: Parameters used when you are providing a custom input + to a target based on certain event data. Conflicts with input + and input_path. + properties: + inputPaths: + additionalProperties: + type: string + description: Key value pairs specified in the form of JSONPath + (for example, time = $.time) + type: object + x-kubernetes-map-type: granular + inputTemplate: + description: Template to customize data sent to the target. + Must be valid JSON. To send a string value, the string value + must include double quotes.g., "\"Your string goes here.\\nA + new line.\"" + type: string + type: object + kinesisTarget: + description: Parameters used when you are using the rule to invoke + an Amazon Kinesis Stream. Documented below. A maximum of 1 are + allowed. + properties: + partitionKeyPath: + description: The JSON path to be extracted from the event + and used as the partition key. + type: string + type: object + redshiftTarget: + description: Parameters used when you are using the rule to invoke + an Amazon Redshift Statement. Documented below. A maximum of + 1 are allowed. + properties: + database: + description: The name of the database. + type: string + dbUser: + description: The database user name. + type: string + secretsManagerArn: + description: The name or ARN of the secret that enables access + to the database. + type: string + sql: + description: The SQL statement text to run. + type: string + statementName: + description: The name of the SQL statement. + type: string + withEvent: + description: Indicates whether to send an event back to EventBridge + after the SQL statement runs. + type: boolean + type: object + retryPolicy: + description: Parameters used when you are providing retry policies. + Documented below. A maximum of 1 are allowed. + properties: + maximumEventAgeInSeconds: + description: The age in seconds to continue to make retry + attempts. + type: number + maximumRetryAttempts: + description: maximum number of retry attempts to make before + the request fails + type: number + type: object + roleArn: + description: The Amazon Resource Name (ARN) of the IAM role to + be used for this target when the rule is triggered. Required + if ecs_target is used or target in arn is EC2 instance, Kinesis + data stream, Step Functions state machine, or Event Bus in different + account or region. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rule: + description: The name of the rule you want to add targets to. + type: string + ruleRef: + description: Reference to a Rule in cloudwatchevents to populate + rule. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + ruleSelector: + description: Selector for a Rule in cloudwatchevents to populate + rule. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + runCommandTargets: + description: Parameters used when you are using the rule to invoke + Amazon EC2 Run Command. Documented below. A maximum of 5 are + allowed. + items: + properties: + key: + description: Can be either tag:tag-key or InstanceIds. + type: string + values: + description: If Key is tag:tag-key, Values is a list of + tag values. If Key is InstanceIds, Values is a list of + Amazon EC2 instance IDs. + items: + type: string + type: array + type: object + type: array + sagemakerPipelineTarget: + description: Parameters used when you are using the rule to invoke + an Amazon SageMaker Pipeline. Documented below. A maximum of + 1 are allowed. + properties: + pipelineParameterList: + description: List of Parameter names and values for SageMaker + Model Building Pipeline execution. + items: + properties: + name: + description: Name of parameter to start execution of + a SageMaker Model Building Pipeline. + type: string + value: + description: Value of parameter to start execution of + a SageMaker Model Building Pipeline. + type: string + type: object + type: array + type: object + sqsTarget: + description: Parameters used when you are using the rule to invoke + an Amazon SQS Queue. Documented below. A maximum of 1 are allowed. + properties: + messageGroupId: + description: The FIFO message group ID to use as the target. + type: string + type: object + targetId: + description: The unique target assignment ID. If missing, will + generate a random, unique id. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.arn is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.arn) + || (has(self.initProvider) && has(self.initProvider.arn))' + status: + description: TargetStatus defines the observed state of Target. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) of the target. + type: string + batchTarget: + description: Parameters used when you are using the rule to invoke + an Amazon Batch Job. Documented below. A maximum of 1 are allowed. + properties: + arraySize: + description: The size of the array, if this is an array batch + job. Valid values are integers between 2 and 10,000. + type: number + jobAttempts: + description: The number of times to attempt to retry, if the + job fails. Valid values are 1 to 10. + type: number + jobDefinition: + description: The ARN or name of the job definition to use + if the event target is an AWS Batch job. This job definition + must already exist. + type: string + jobName: + description: The name to use for this execution of the job, + if the target is an AWS Batch job. + type: string + type: object + deadLetterConfig: + description: Parameters used when you are providing a dead letter + config. Documented below. A maximum of 1 are allowed. + properties: + arn: + description: '- ARN of the SQS queue specified as the target + for the dead-letter queue.' + type: string + type: object + ecsTarget: + description: Parameters used when you are using the rule to invoke + Amazon ECS Task. Documented below. A maximum of 1 are allowed. + properties: + capacityProviderStrategy: + description: The capacity provider strategy to use for the + task. If a capacity_provider_strategy specified, the launch_type + parameter must be omitted. If no capacity_provider_strategy + or launch_type is specified, the default capacity provider + strategy for the cluster is used. Can be one or more. See + below. + items: + properties: + base: + description: The base value designates how many tasks, + at a minimum, to run on the specified capacity provider. + Only one capacity provider in a capacity provider + strategy can have a base defined. Defaults to 0. + type: number + capacityProvider: + description: Short name of the capacity provider. + type: string + weight: + description: The weight value designates the relative + percentage of the total number of tasks launched that + should use the specified capacity provider. The weight + value is taken into consideration after the base value, + if defined, is satisfied. + type: number + type: object + type: array + enableEcsManagedTags: + description: Specifies whether to enable Amazon ECS managed + tags for the task. + type: boolean + enableExecuteCommand: + description: Whether or not to enable the execute command + functionality for the containers in this task. If true, + this enables execute command functionality on all containers + in the task. + type: boolean + group: + description: Specifies an ECS task group for the task. The + maximum length is 255 characters. + type: string + launchType: + description: 'Specifies the launch type on which your task + is running. The launch type that you specify here must match + one of the launch type (compatibilities) of the target task. + Valid values include: EC2, EXTERNAL, or FARGATE.' + type: string + networkConfiguration: + description: Use this if the ECS task uses the awsvpc network + mode. This specifies the VPC subnets and security groups + associated with the task, and whether a public IP address + is to be used. Required if launch_type is FARGATE because + the awsvpc mode is required for Fargate tasks. + properties: + assignPublicIp: + description: Assign a public IP address to the ENI (Fargate + launch type only). Valid values are true or false. Defaults + to false. + type: boolean + securityGroups: + description: The security groups associated with the task + or service. If you do not specify a security group, + the default security group for the VPC is used. + items: + type: string + type: array + x-kubernetes-list-type: set + subnets: + description: The subnets associated with the task or service. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + orderedPlacementStrategy: + description: An array of placement strategy objects to use + for the task. You can specify a maximum of five strategy + rules per task. + items: + properties: + field: + description: The field to apply the placement strategy + against. For the spread placement strategy, valid + values are instanceId (or host, which has the same + effect), or any platform or custom attribute that + is applied to a container instance, such as attribute:ecs.availability-zone. + For the binpack placement strategy, valid values are + cpu and memory. For the random placement strategy, + this field is not used. For more information, see + Amazon ECS task placement strategies. + type: string + type: + description: Type of placement strategy. The only valid + values at this time are binpack, random and spread. + type: string + type: object + type: array + placementConstraint: + description: An array of placement constraint objects to use + for the task. You can specify up to 10 constraints per task + (including constraints in the task definition and those + specified at runtime). See Below. + items: + properties: + expression: + description: Cluster Query Language expression to apply + to the constraint. Does not need to be specified for + the distinctInstance type. For more information, see + Cluster Query Language in the Amazon EC2 Container + Service Developer Guide. + type: string + type: + description: Type of constraint. The only valid values + at this time are memberOf and distinctInstance. + type: string + type: object + type: array + platformVersion: + description: Specifies the platform version for the task. + Specify only the numeric portion of the platform version, + such as 1.1.0. This is used only if LaunchType is FARGATE. + For more information about valid platform versions, see + AWS Fargate Platform Versions. + type: string + propagateTags: + description: 'Specifies whether to propagate the tags from + the task definition to the task. If no value is specified, + the tags are not propagated. Tags can only be propagated + to the task during task creation. The only valid value is: + TASK_DEFINITION.' + type: string + tags: + additionalProperties: + type: string + description: A map of tags to assign to ecs resources. + type: object + x-kubernetes-map-type: granular + taskCount: + description: The number of tasks to create based on the TaskDefinition. + Defaults to 1. + type: number + taskDefinitionArn: + description: The ARN of the task definition to use if the + event target is an Amazon ECS cluster. + type: string + type: object + eventBusName: + description: |- + The name or ARN of the event bus to associate with the rule. + If you omit this, the default event bus is used. + type: string + forceDestroy: + description: Used to delete managed rules created by AWS. Defaults + to false. + type: boolean + httpTarget: + description: Parameters used when you are using the rule to invoke + an API Gateway REST endpoint. Documented below. A maximum of + 1 is allowed. + properties: + headerParameters: + additionalProperties: + type: string + description: Enables you to specify HTTP headers to add to + the request. + type: object + x-kubernetes-map-type: granular + pathParameterValues: + description: The list of values that correspond sequentially + to any path variables in your endpoint ARN (for example + arn:aws:execute-api:us-east-1:123456:myapi/*/POST/pets/*). + items: + type: string + type: array + queryStringParameters: + additionalProperties: + type: string + description: Represents keys/values of query string parameters + that are appended to the invoked endpoint. + type: object + x-kubernetes-map-type: granular + type: object + id: + type: string + input: + description: Valid JSON text passed to the target. Conflicts with + input_path and input_transformer. + type: string + inputPath: + description: The value of the JSONPath that is used for extracting + part of the matched event when passing it to the target. Conflicts + with input and input_transformer. + type: string + inputTransformer: + description: Parameters used when you are providing a custom input + to a target based on certain event data. Conflicts with input + and input_path. + properties: + inputPaths: + additionalProperties: + type: string + description: Key value pairs specified in the form of JSONPath + (for example, time = $.time) + type: object + x-kubernetes-map-type: granular + inputTemplate: + description: Template to customize data sent to the target. + Must be valid JSON. To send a string value, the string value + must include double quotes.g., "\"Your string goes here.\\nA + new line.\"" + type: string + type: object + kinesisTarget: + description: Parameters used when you are using the rule to invoke + an Amazon Kinesis Stream. Documented below. A maximum of 1 are + allowed. + properties: + partitionKeyPath: + description: The JSON path to be extracted from the event + and used as the partition key. + type: string + type: object + redshiftTarget: + description: Parameters used when you are using the rule to invoke + an Amazon Redshift Statement. Documented below. A maximum of + 1 are allowed. + properties: + database: + description: The name of the database. + type: string + dbUser: + description: The database user name. + type: string + secretsManagerArn: + description: The name or ARN of the secret that enables access + to the database. + type: string + sql: + description: The SQL statement text to run. + type: string + statementName: + description: The name of the SQL statement. + type: string + withEvent: + description: Indicates whether to send an event back to EventBridge + after the SQL statement runs. + type: boolean + type: object + retryPolicy: + description: Parameters used when you are providing retry policies. + Documented below. A maximum of 1 are allowed. + properties: + maximumEventAgeInSeconds: + description: The age in seconds to continue to make retry + attempts. + type: number + maximumRetryAttempts: + description: maximum number of retry attempts to make before + the request fails + type: number + type: object + roleArn: + description: The Amazon Resource Name (ARN) of the IAM role to + be used for this target when the rule is triggered. Required + if ecs_target is used or target in arn is EC2 instance, Kinesis + data stream, Step Functions state machine, or Event Bus in different + account or region. + type: string + rule: + description: The name of the rule you want to add targets to. + type: string + runCommandTargets: + description: Parameters used when you are using the rule to invoke + Amazon EC2 Run Command. Documented below. A maximum of 5 are + allowed. + items: + properties: + key: + description: Can be either tag:tag-key or InstanceIds. + type: string + values: + description: If Key is tag:tag-key, Values is a list of + tag values. If Key is InstanceIds, Values is a list of + Amazon EC2 instance IDs. + items: + type: string + type: array + type: object + type: array + sagemakerPipelineTarget: + description: Parameters used when you are using the rule to invoke + an Amazon SageMaker Pipeline. Documented below. A maximum of + 1 are allowed. + properties: + pipelineParameterList: + description: List of Parameter names and values for SageMaker + Model Building Pipeline execution. + items: + properties: + name: + description: Name of parameter to start execution of + a SageMaker Model Building Pipeline. + type: string + value: + description: Value of parameter to start execution of + a SageMaker Model Building Pipeline. + type: string + type: object + type: array + type: object + sqsTarget: + description: Parameters used when you are using the rule to invoke + an Amazon SQS Queue. Documented below. A maximum of 1 are allowed. + properties: + messageGroupId: + description: The FIFO message group ID to use as the target. + type: string + type: object + targetId: + description: The unique target assignment ID. If missing, will + generate a random, unique id. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cloudwatchlogs.aws.upbound.io_metricfilters.yaml b/package/crds/cloudwatchlogs.aws.upbound.io_metricfilters.yaml index 5d11b29fa1..6058266b4f 100644 --- a/package/crds/cloudwatchlogs.aws.upbound.io_metricfilters.yaml +++ b/package/crds/cloudwatchlogs.aws.upbound.io_metricfilters.yaml @@ -636,3 +636,606 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MetricFilter is the Schema for the MetricFilters API. Provides + a CloudWatch Log Metric Filter resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MetricFilterSpec defines the desired state of MetricFilter + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + logGroupName: + description: The name of the log group to associate the metric + filter with. + type: string + logGroupNameRef: + description: Reference to a Group in cloudwatchlogs to populate + logGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logGroupNameSelector: + description: Selector for a Group in cloudwatchlogs to populate + logGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + metricTransformation: + description: A block defining collection of information needed + to define how metric data gets emitted. See below. + properties: + defaultValue: + description: The value to emit when a filter pattern does + not match a log event. Conflicts with dimensions. + type: string + dimensions: + additionalProperties: + type: string + description: Map of fields to use as dimensions for the metric. + Up to 3 dimensions are allowed. Conflicts with default_value. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the CloudWatch metric to which the + monitored log information should be published (e.g., ErrorCount) + type: string + namespace: + description: The destination namespace of the CloudWatch metric. + type: string + unit: + description: The unit to assign to the metric. If you omit + this, the unit is set as None. + type: string + value: + description: What to publish to the metric. For example, if + you're counting the occurrences of a particular term like + "Error", the value will be "1" for each occurrence. If you're + counting the bytes transferred the published value will + be the value in the log event. + type: string + type: object + pattern: + description: |- + A valid CloudWatch Logs filter pattern + for extracting metric data out of ingested log events. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + logGroupName: + description: The name of the log group to associate the metric + filter with. + type: string + logGroupNameRef: + description: Reference to a Group in cloudwatchlogs to populate + logGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logGroupNameSelector: + description: Selector for a Group in cloudwatchlogs to populate + logGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + metricTransformation: + description: A block defining collection of information needed + to define how metric data gets emitted. See below. + properties: + defaultValue: + description: The value to emit when a filter pattern does + not match a log event. Conflicts with dimensions. + type: string + dimensions: + additionalProperties: + type: string + description: Map of fields to use as dimensions for the metric. + Up to 3 dimensions are allowed. Conflicts with default_value. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the CloudWatch metric to which the + monitored log information should be published (e.g., ErrorCount) + type: string + namespace: + description: The destination namespace of the CloudWatch metric. + type: string + unit: + description: The unit to assign to the metric. If you omit + this, the unit is set as None. + type: string + value: + description: What to publish to the metric. For example, if + you're counting the occurrences of a particular term like + "Error", the value will be "1" for each occurrence. If you're + counting the bytes transferred the published value will + be the value in the log event. + type: string + type: object + pattern: + description: |- + A valid CloudWatch Logs filter pattern + for extracting metric data out of ingested log events. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.metricTransformation is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.metricTransformation) + || (has(self.initProvider) && has(self.initProvider.metricTransformation))' + - message: spec.forProvider.pattern is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.pattern) + || (has(self.initProvider) && has(self.initProvider.pattern))' + status: + description: MetricFilterStatus defines the observed state of MetricFilter. + properties: + atProvider: + properties: + id: + description: The name of the metric filter. + type: string + logGroupName: + description: The name of the log group to associate the metric + filter with. + type: string + metricTransformation: + description: A block defining collection of information needed + to define how metric data gets emitted. See below. + properties: + defaultValue: + description: The value to emit when a filter pattern does + not match a log event. Conflicts with dimensions. + type: string + dimensions: + additionalProperties: + type: string + description: Map of fields to use as dimensions for the metric. + Up to 3 dimensions are allowed. Conflicts with default_value. + type: object + x-kubernetes-map-type: granular + name: + description: The name of the CloudWatch metric to which the + monitored log information should be published (e.g., ErrorCount) + type: string + namespace: + description: The destination namespace of the CloudWatch metric. + type: string + unit: + description: The unit to assign to the metric. If you omit + this, the unit is set as None. + type: string + value: + description: What to publish to the metric. For example, if + you're counting the occurrences of a particular term like + "Error", the value will be "1" for each occurrence. If you're + counting the bytes transferred the published value will + be the value in the log event. + type: string + type: object + pattern: + description: |- + A valid CloudWatch Logs filter pattern + for extracting metric data out of ingested log events. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/codepipeline.aws.upbound.io_codepipelines.yaml b/package/crds/codepipeline.aws.upbound.io_codepipelines.yaml index b5daa65c2f..360e4f1c28 100644 --- a/package/crds/codepipeline.aws.upbound.io_codepipelines.yaml +++ b/package/crds/codepipeline.aws.upbound.io_codepipelines.yaml @@ -1564,3 +1564,1501 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Codepipeline is the Schema for the Codepipelines API. Provides + a CodePipeline + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CodepipelineSpec defines the desired state of Codepipeline + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + artifactStore: + description: One or more artifact_store blocks. Artifact stores + are documented below. + items: + properties: + encryptionKey: + description: The encryption key block AWS CodePipeline uses + to encrypt the data in the artifact store, such as an + AWS Key Management Service (AWS KMS) key. If you don't + specify a key, AWS CodePipeline uses the default key for + Amazon Simple Storage Service (Amazon S3). An encryption_key + block is documented below. + properties: + id: + description: The KMS key ARN or ID + type: string + type: + description: The type of key; currently only KMS is + supported + type: string + type: object + location: + description: The location where AWS CodePipeline stores + artifacts for a pipeline; currently only S3 is supported. + type: string + locationRef: + description: Reference to a Bucket in s3 to populate location. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + locationSelector: + description: Selector for a Bucket in s3 to populate location. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: The region where the artifact store is located. + Required for a cross-region CodePipeline, do not provide + for a single-region CodePipeline. + type: string + type: + description: The type of the artifact store, such as Amazon + S3 + type: string + type: object + type: array + executionMode: + description: The method that the pipeline will use to handle multiple + executions. The default mode is SUPERSEDED. For value values, + refer to the AWS documentation. + type: string + pipelineType: + description: 'Type of the pipeline. Possible values are: V1 and + V2. Default value is V1.' + type: string + region: + description: |- + The region in which to run the action. + Region is the region you'd like your resource to be created in. + type: string + roleArn: + description: A service role Amazon Resource Name (ARN) that grants + AWS CodePipeline permission to make calls to AWS services on + your behalf. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + stage: + description: (Minimum of at least two stage blocks is required) + A stage block. Stages are documented below. + items: + properties: + action: + description: The action(s) to include in the stage. Defined + as an action block below + items: + properties: + category: + description: A category defines what kind of action + can be taken in the stage, and constrains the provider + type for the action. Possible values are Approval, + Build, Deploy, Invoke, Source and Test. + type: string + configuration: + additionalProperties: + type: string + description: A map of the action declaration's configuration. + Configurations options for action types and providers + can be found in the Pipeline Structure Reference + and Action Structure Reference documentation. + type: object + x-kubernetes-map-type: granular + inputArtifacts: + description: A list of artifact names to be worked + on. + items: + type: string + type: array + name: + description: The action declaration's name. + type: string + namespace: + description: The namespace all output variables will + be accessed from. + type: string + outputArtifacts: + description: A list of artifact names to output. Output + artifact names must be unique within a pipeline. + items: + type: string + type: array + owner: + description: The creator of the action being called. + Possible values are AWS, Custom and ThirdParty. + type: string + provider: + description: The provider of the service being called + by the action. Valid providers are determined by + the action category. Provider names are listed in + the Action Structure Reference documentation. + type: string + region: + description: The region in which to run the action. + type: string + roleArn: + description: The ARN of the IAM service role that + will perform the declared action. This is assumed + through the roleArn for the pipeline. + type: string + runOrder: + description: The order in which actions are run. + type: number + version: + description: A string that identifies the action type. + type: string + type: object + type: array + name: + description: The name of the stage. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + trigger: + description: A trigger block. Valid only when pipeline_type is + V2. Triggers are documented below. + items: + properties: + gitConfiguration: + description: Provides the filter criteria and the source + stage for the repository event that starts the pipeline. + For more information, refer to the AWS documentation. + A git_configuration block is documented below. + properties: + pullRequest: + description: The field where the repository event that + will start the pipeline is specified as pull requests. + A pull_request block is documented below. + items: + properties: + branches: + description: The field that specifies to filter + on branches for the pull request trigger configuration. + A branches block is documented below. + properties: + excludes: + description: A list of patterns of Git tags + that, when pushed, are to be excluded from + starting the pipeline. + items: + type: string + type: array + includes: + description: A list of patterns of Git tags + that, when pushed, are to be included as + criteria that starts the pipeline. + items: + type: string + type: array + type: object + events: + description: A list that specifies which pull + request events to filter on (opened, updated, + closed) for the trigger configuration. Possible + values are OPEN, UPDATED and CLOSED. + items: + type: string + type: array + filePaths: + description: The field that specifies to filter + on file paths for the pull request trigger configuration. + A file_paths block is documented below. + properties: + excludes: + description: A list of patterns of Git tags + that, when pushed, are to be excluded from + starting the pipeline. + items: + type: string + type: array + includes: + description: A list of patterns of Git tags + that, when pushed, are to be included as + criteria that starts the pipeline. + items: + type: string + type: array + type: object + type: object + type: array + push: + description: The field where the repository event that + will start the pipeline, such as pushing Git tags, + is specified with details. A push block is documented + below. + items: + properties: + branches: + description: The field that specifies to filter + on branches for the pull request trigger configuration. + A branches block is documented below. + properties: + excludes: + description: A list of patterns of Git tags + that, when pushed, are to be excluded from + starting the pipeline. + items: + type: string + type: array + includes: + description: A list of patterns of Git tags + that, when pushed, are to be included as + criteria that starts the pipeline. + items: + type: string + type: array + type: object + filePaths: + description: The field that specifies to filter + on file paths for the pull request trigger configuration. + A file_paths block is documented below. + properties: + excludes: + description: A list of patterns of Git tags + that, when pushed, are to be excluded from + starting the pipeline. + items: + type: string + type: array + includes: + description: A list of patterns of Git tags + that, when pushed, are to be included as + criteria that starts the pipeline. + items: + type: string + type: array + type: object + tags: + description: Key-value map of resource tags. + properties: + excludes: + description: A list of patterns of Git tags + that, when pushed, are to be excluded from + starting the pipeline. + items: + type: string + type: array + includes: + description: A list of patterns of Git tags + that, when pushed, are to be included as + criteria that starts the pipeline. + items: + type: string + type: array + type: object + type: object + type: array + sourceActionName: + description: The name of the pipeline source action + where the trigger configuration. + type: string + type: object + providerType: + description: The source provider for the event. Possible + value is CodeStarSourceConnection. + type: string + type: object + type: array + variable: + description: A pipeline-level variable block. Valid only when + pipeline_type is V2. Variable are documented below. + items: + properties: + defaultValue: + description: The default value of a pipeline-level variable. + type: string + description: + description: The description of a pipeline-level variable. + type: string + name: + description: The name of a pipeline-level variable. + type: string + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + artifactStore: + description: One or more artifact_store blocks. Artifact stores + are documented below. + items: + properties: + encryptionKey: + description: The encryption key block AWS CodePipeline uses + to encrypt the data in the artifact store, such as an + AWS Key Management Service (AWS KMS) key. If you don't + specify a key, AWS CodePipeline uses the default key for + Amazon Simple Storage Service (Amazon S3). An encryption_key + block is documented below. + properties: + id: + description: The KMS key ARN or ID + type: string + type: + description: The type of key; currently only KMS is + supported + type: string + type: object + location: + description: The location where AWS CodePipeline stores + artifacts for a pipeline; currently only S3 is supported. + type: string + locationRef: + description: Reference to a Bucket in s3 to populate location. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + locationSelector: + description: Selector for a Bucket in s3 to populate location. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: The type of the artifact store, such as Amazon + S3 + type: string + type: object + type: array + executionMode: + description: The method that the pipeline will use to handle multiple + executions. The default mode is SUPERSEDED. For value values, + refer to the AWS documentation. + type: string + pipelineType: + description: 'Type of the pipeline. Possible values are: V1 and + V2. Default value is V1.' + type: string + roleArn: + description: A service role Amazon Resource Name (ARN) that grants + AWS CodePipeline permission to make calls to AWS services on + your behalf. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + stage: + description: (Minimum of at least two stage blocks is required) + A stage block. Stages are documented below. + items: + properties: + action: + description: The action(s) to include in the stage. Defined + as an action block below + items: + properties: + category: + description: A category defines what kind of action + can be taken in the stage, and constrains the provider + type for the action. Possible values are Approval, + Build, Deploy, Invoke, Source and Test. + type: string + configuration: + additionalProperties: + type: string + description: A map of the action declaration's configuration. + Configurations options for action types and providers + can be found in the Pipeline Structure Reference + and Action Structure Reference documentation. + type: object + x-kubernetes-map-type: granular + inputArtifacts: + description: A list of artifact names to be worked + on. + items: + type: string + type: array + name: + description: The action declaration's name. + type: string + namespace: + description: The namespace all output variables will + be accessed from. + type: string + outputArtifacts: + description: A list of artifact names to output. Output + artifact names must be unique within a pipeline. + items: + type: string + type: array + owner: + description: The creator of the action being called. + Possible values are AWS, Custom and ThirdParty. + type: string + provider: + description: The provider of the service being called + by the action. Valid providers are determined by + the action category. Provider names are listed in + the Action Structure Reference documentation. + type: string + roleArn: + description: The ARN of the IAM service role that + will perform the declared action. This is assumed + through the roleArn for the pipeline. + type: string + runOrder: + description: The order in which actions are run. + type: number + version: + description: A string that identifies the action type. + type: string + type: object + type: array + name: + description: The name of the stage. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + trigger: + description: A trigger block. Valid only when pipeline_type is + V2. Triggers are documented below. + items: + properties: + gitConfiguration: + description: Provides the filter criteria and the source + stage for the repository event that starts the pipeline. + For more information, refer to the AWS documentation. + A git_configuration block is documented below. + properties: + pullRequest: + description: The field where the repository event that + will start the pipeline is specified as pull requests. + A pull_request block is documented below. + items: + properties: + branches: + description: The field that specifies to filter + on branches for the pull request trigger configuration. + A branches block is documented below. + properties: + excludes: + description: A list of patterns of Git tags + that, when pushed, are to be excluded from + starting the pipeline. + items: + type: string + type: array + includes: + description: A list of patterns of Git tags + that, when pushed, are to be included as + criteria that starts the pipeline. + items: + type: string + type: array + type: object + events: + description: A list that specifies which pull + request events to filter on (opened, updated, + closed) for the trigger configuration. Possible + values are OPEN, UPDATED and CLOSED. + items: + type: string + type: array + filePaths: + description: The field that specifies to filter + on file paths for the pull request trigger configuration. + A file_paths block is documented below. + properties: + excludes: + description: A list of patterns of Git tags + that, when pushed, are to be excluded from + starting the pipeline. + items: + type: string + type: array + includes: + description: A list of patterns of Git tags + that, when pushed, are to be included as + criteria that starts the pipeline. + items: + type: string + type: array + type: object + type: object + type: array + push: + description: The field where the repository event that + will start the pipeline, such as pushing Git tags, + is specified with details. A push block is documented + below. + items: + properties: + branches: + description: The field that specifies to filter + on branches for the pull request trigger configuration. + A branches block is documented below. + properties: + excludes: + description: A list of patterns of Git tags + that, when pushed, are to be excluded from + starting the pipeline. + items: + type: string + type: array + includes: + description: A list of patterns of Git tags + that, when pushed, are to be included as + criteria that starts the pipeline. + items: + type: string + type: array + type: object + filePaths: + description: The field that specifies to filter + on file paths for the pull request trigger configuration. + A file_paths block is documented below. + properties: + excludes: + description: A list of patterns of Git tags + that, when pushed, are to be excluded from + starting the pipeline. + items: + type: string + type: array + includes: + description: A list of patterns of Git tags + that, when pushed, are to be included as + criteria that starts the pipeline. + items: + type: string + type: array + type: object + tags: + description: Key-value map of resource tags. + properties: + excludes: + description: A list of patterns of Git tags + that, when pushed, are to be excluded from + starting the pipeline. + items: + type: string + type: array + includes: + description: A list of patterns of Git tags + that, when pushed, are to be included as + criteria that starts the pipeline. + items: + type: string + type: array + type: object + type: object + type: array + sourceActionName: + description: The name of the pipeline source action + where the trigger configuration. + type: string + type: object + providerType: + description: The source provider for the event. Possible + value is CodeStarSourceConnection. + type: string + type: object + type: array + variable: + description: A pipeline-level variable block. Valid only when + pipeline_type is V2. Variable are documented below. + items: + properties: + defaultValue: + description: The default value of a pipeline-level variable. + type: string + description: + description: The description of a pipeline-level variable. + type: string + name: + description: The name of a pipeline-level variable. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.artifactStore is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.artifactStore) + || (has(self.initProvider) && has(self.initProvider.artifactStore))' + - message: spec.forProvider.stage is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.stage) + || (has(self.initProvider) && has(self.initProvider.stage))' + status: + description: CodepipelineStatus defines the observed state of Codepipeline. + properties: + atProvider: + properties: + arn: + description: The codepipeline ARN. + type: string + artifactStore: + description: One or more artifact_store blocks. Artifact stores + are documented below. + items: + properties: + encryptionKey: + description: The encryption key block AWS CodePipeline uses + to encrypt the data in the artifact store, such as an + AWS Key Management Service (AWS KMS) key. If you don't + specify a key, AWS CodePipeline uses the default key for + Amazon Simple Storage Service (Amazon S3). An encryption_key + block is documented below. + properties: + id: + description: The KMS key ARN or ID + type: string + type: + description: The type of key; currently only KMS is + supported + type: string + type: object + location: + description: The location where AWS CodePipeline stores + artifacts for a pipeline; currently only S3 is supported. + type: string + region: + description: The region where the artifact store is located. + Required for a cross-region CodePipeline, do not provide + for a single-region CodePipeline. + type: string + type: + description: The type of the artifact store, such as Amazon + S3 + type: string + type: object + type: array + executionMode: + description: The method that the pipeline will use to handle multiple + executions. The default mode is SUPERSEDED. For value values, + refer to the AWS documentation. + type: string + id: + description: The codepipeline ID. + type: string + pipelineType: + description: 'Type of the pipeline. Possible values are: V1 and + V2. Default value is V1.' + type: string + roleArn: + description: A service role Amazon Resource Name (ARN) that grants + AWS CodePipeline permission to make calls to AWS services on + your behalf. + type: string + stage: + description: (Minimum of at least two stage blocks is required) + A stage block. Stages are documented below. + items: + properties: + action: + description: The action(s) to include in the stage. Defined + as an action block below + items: + properties: + category: + description: A category defines what kind of action + can be taken in the stage, and constrains the provider + type for the action. Possible values are Approval, + Build, Deploy, Invoke, Source and Test. + type: string + configuration: + additionalProperties: + type: string + description: A map of the action declaration's configuration. + Configurations options for action types and providers + can be found in the Pipeline Structure Reference + and Action Structure Reference documentation. + type: object + x-kubernetes-map-type: granular + inputArtifacts: + description: A list of artifact names to be worked + on. + items: + type: string + type: array + name: + description: The action declaration's name. + type: string + namespace: + description: The namespace all output variables will + be accessed from. + type: string + outputArtifacts: + description: A list of artifact names to output. Output + artifact names must be unique within a pipeline. + items: + type: string + type: array + owner: + description: The creator of the action being called. + Possible values are AWS, Custom and ThirdParty. + type: string + provider: + description: The provider of the service being called + by the action. Valid providers are determined by + the action category. Provider names are listed in + the Action Structure Reference documentation. + type: string + region: + description: The region in which to run the action. + type: string + roleArn: + description: The ARN of the IAM service role that + will perform the declared action. This is assumed + through the roleArn for the pipeline. + type: string + runOrder: + description: The order in which actions are run. + type: number + version: + description: A string that identifies the action type. + type: string + type: object + type: array + name: + description: The name of the stage. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + trigger: + description: A trigger block. Valid only when pipeline_type is + V2. Triggers are documented below. + items: + properties: + gitConfiguration: + description: Provides the filter criteria and the source + stage for the repository event that starts the pipeline. + For more information, refer to the AWS documentation. + A git_configuration block is documented below. + properties: + pullRequest: + description: The field where the repository event that + will start the pipeline is specified as pull requests. + A pull_request block is documented below. + items: + properties: + branches: + description: The field that specifies to filter + on branches for the pull request trigger configuration. + A branches block is documented below. + properties: + excludes: + description: A list of patterns of Git tags + that, when pushed, are to be excluded from + starting the pipeline. + items: + type: string + type: array + includes: + description: A list of patterns of Git tags + that, when pushed, are to be included as + criteria that starts the pipeline. + items: + type: string + type: array + type: object + events: + description: A list that specifies which pull + request events to filter on (opened, updated, + closed) for the trigger configuration. Possible + values are OPEN, UPDATED and CLOSED. + items: + type: string + type: array + filePaths: + description: The field that specifies to filter + on file paths for the pull request trigger configuration. + A file_paths block is documented below. + properties: + excludes: + description: A list of patterns of Git tags + that, when pushed, are to be excluded from + starting the pipeline. + items: + type: string + type: array + includes: + description: A list of patterns of Git tags + that, when pushed, are to be included as + criteria that starts the pipeline. + items: + type: string + type: array + type: object + type: object + type: array + push: + description: The field where the repository event that + will start the pipeline, such as pushing Git tags, + is specified with details. A push block is documented + below. + items: + properties: + branches: + description: The field that specifies to filter + on branches for the pull request trigger configuration. + A branches block is documented below. + properties: + excludes: + description: A list of patterns of Git tags + that, when pushed, are to be excluded from + starting the pipeline. + items: + type: string + type: array + includes: + description: A list of patterns of Git tags + that, when pushed, are to be included as + criteria that starts the pipeline. + items: + type: string + type: array + type: object + filePaths: + description: The field that specifies to filter + on file paths for the pull request trigger configuration. + A file_paths block is documented below. + properties: + excludes: + description: A list of patterns of Git tags + that, when pushed, are to be excluded from + starting the pipeline. + items: + type: string + type: array + includes: + description: A list of patterns of Git tags + that, when pushed, are to be included as + criteria that starts the pipeline. + items: + type: string + type: array + type: object + tags: + description: Key-value map of resource tags. + properties: + excludes: + description: A list of patterns of Git tags + that, when pushed, are to be excluded from + starting the pipeline. + items: + type: string + type: array + includes: + description: A list of patterns of Git tags + that, when pushed, are to be included as + criteria that starts the pipeline. + items: + type: string + type: array + type: object + type: object + type: array + sourceActionName: + description: The name of the pipeline source action + where the trigger configuration. + type: string + type: object + providerType: + description: The source provider for the event. Possible + value is CodeStarSourceConnection. + type: string + type: object + type: array + variable: + description: A pipeline-level variable block. Valid only when + pipeline_type is V2. Variable are documented below. + items: + properties: + defaultValue: + description: The default value of a pipeline-level variable. + type: string + description: + description: The description of a pipeline-level variable. + type: string + name: + description: The name of a pipeline-level variable. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/codepipeline.aws.upbound.io_customactiontypes.yaml b/package/crds/codepipeline.aws.upbound.io_customactiontypes.yaml index 11d0afaa2e..0b137f65d2 100644 --- a/package/crds/codepipeline.aws.upbound.io_customactiontypes.yaml +++ b/package/crds/codepipeline.aws.upbound.io_customactiontypes.yaml @@ -672,3 +672,639 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CustomActionType is the Schema for the CustomActionTypes API. + Provides a CodePipeline CustomActionType. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CustomActionTypeSpec defines the desired state of CustomActionType + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + category: + description: 'The category of the custom action. Valid values: + Source, Build, Deploy, Test, Invoke, Approval' + type: string + configurationProperty: + description: The configuration properties for the custom action. + Max 10 items. + items: + properties: + description: + description: The description of the action configuration + property. + type: string + key: + description: Whether the configuration property is a key. + type: boolean + name: + description: The name of the action configuration property. + type: string + queryable: + description: Indicates that the property will be used in + conjunction with PollForJobs. + type: boolean + required: + description: Whether the configuration property is a required + value. + type: boolean + secret: + description: Whether the configuration property is secret. + type: boolean + type: + description: 'The type of the configuration property. Valid + values: String, Number, Boolean' + type: string + type: object + type: array + inputArtifactDetails: + description: The details of the input artifact for the action. + properties: + maximumCount: + description: 'The maximum number of artifacts allowed for + the action type. Min: 0, Max: 5' + type: number + minimumCount: + description: 'The minimum number of artifacts allowed for + the action type. Min: 0, Max: 5' + type: number + type: object + outputArtifactDetails: + description: The details of the output artifact of the action. + properties: + maximumCount: + description: 'The maximum number of artifacts allowed for + the action type. Min: 0, Max: 5' + type: number + minimumCount: + description: 'The minimum number of artifacts allowed for + the action type. Min: 0, Max: 5' + type: number + type: object + providerName: + description: The provider of the service used in the custom action + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + settings: + description: The settings for an action type. + properties: + entityUrlTemplate: + description: The URL returned to the AWS CodePipeline console + that provides a deep link to the resources of the external + system. + type: string + executionUrlTemplate: + description: The URL returned to the AWS CodePipeline console + that contains a link to the top-level landing page for the + external system. + type: string + revisionUrlTemplate: + description: The URL returned to the AWS CodePipeline console + that contains a link to the page where customers can update + or change the configuration of the external action. + type: string + thirdPartyConfigurationUrl: + description: The URL of a sign-up page where users can sign + up for an external service and perform initial configuration + of the action provided by that service. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + version: + description: The version identifier of the custom action. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + category: + description: 'The category of the custom action. Valid values: + Source, Build, Deploy, Test, Invoke, Approval' + type: string + configurationProperty: + description: The configuration properties for the custom action. + Max 10 items. + items: + properties: + description: + description: The description of the action configuration + property. + type: string + key: + description: Whether the configuration property is a key. + type: boolean + name: + description: The name of the action configuration property. + type: string + queryable: + description: Indicates that the property will be used in + conjunction with PollForJobs. + type: boolean + required: + description: Whether the configuration property is a required + value. + type: boolean + secret: + description: Whether the configuration property is secret. + type: boolean + type: + description: 'The type of the configuration property. Valid + values: String, Number, Boolean' + type: string + type: object + type: array + inputArtifactDetails: + description: The details of the input artifact for the action. + properties: + maximumCount: + description: 'The maximum number of artifacts allowed for + the action type. Min: 0, Max: 5' + type: number + minimumCount: + description: 'The minimum number of artifacts allowed for + the action type. Min: 0, Max: 5' + type: number + type: object + outputArtifactDetails: + description: The details of the output artifact of the action. + properties: + maximumCount: + description: 'The maximum number of artifacts allowed for + the action type. Min: 0, Max: 5' + type: number + minimumCount: + description: 'The minimum number of artifacts allowed for + the action type. Min: 0, Max: 5' + type: number + type: object + providerName: + description: The provider of the service used in the custom action + type: string + settings: + description: The settings for an action type. + properties: + entityUrlTemplate: + description: The URL returned to the AWS CodePipeline console + that provides a deep link to the resources of the external + system. + type: string + executionUrlTemplate: + description: The URL returned to the AWS CodePipeline console + that contains a link to the top-level landing page for the + external system. + type: string + revisionUrlTemplate: + description: The URL returned to the AWS CodePipeline console + that contains a link to the page where customers can update + or change the configuration of the external action. + type: string + thirdPartyConfigurationUrl: + description: The URL of a sign-up page where users can sign + up for an external service and perform initial configuration + of the action provided by that service. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + version: + description: The version identifier of the custom action. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.category is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.category) + || (has(self.initProvider) && has(self.initProvider.category))' + - message: spec.forProvider.inputArtifactDetails is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.inputArtifactDetails) + || (has(self.initProvider) && has(self.initProvider.inputArtifactDetails))' + - message: spec.forProvider.outputArtifactDetails is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.outputArtifactDetails) + || (has(self.initProvider) && has(self.initProvider.outputArtifactDetails))' + - message: spec.forProvider.providerName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.providerName) + || (has(self.initProvider) && has(self.initProvider.providerName))' + - message: spec.forProvider.version is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.version) + || (has(self.initProvider) && has(self.initProvider.version))' + status: + description: CustomActionTypeStatus defines the observed state of CustomActionType. + properties: + atProvider: + properties: + arn: + description: The action ARN. + type: string + category: + description: 'The category of the custom action. Valid values: + Source, Build, Deploy, Test, Invoke, Approval' + type: string + configurationProperty: + description: The configuration properties for the custom action. + Max 10 items. + items: + properties: + description: + description: The description of the action configuration + property. + type: string + key: + description: Whether the configuration property is a key. + type: boolean + name: + description: The name of the action configuration property. + type: string + queryable: + description: Indicates that the property will be used in + conjunction with PollForJobs. + type: boolean + required: + description: Whether the configuration property is a required + value. + type: boolean + secret: + description: Whether the configuration property is secret. + type: boolean + type: + description: 'The type of the configuration property. Valid + values: String, Number, Boolean' + type: string + type: object + type: array + id: + description: Composed of category, provider and version + type: string + inputArtifactDetails: + description: The details of the input artifact for the action. + properties: + maximumCount: + description: 'The maximum number of artifacts allowed for + the action type. Min: 0, Max: 5' + type: number + minimumCount: + description: 'The minimum number of artifacts allowed for + the action type. Min: 0, Max: 5' + type: number + type: object + outputArtifactDetails: + description: The details of the output artifact of the action. + properties: + maximumCount: + description: 'The maximum number of artifacts allowed for + the action type. Min: 0, Max: 5' + type: number + minimumCount: + description: 'The minimum number of artifacts allowed for + the action type. Min: 0, Max: 5' + type: number + type: object + owner: + description: The creator of the action being called. + type: string + providerName: + description: The provider of the service used in the custom action + type: string + settings: + description: The settings for an action type. + properties: + entityUrlTemplate: + description: The URL returned to the AWS CodePipeline console + that provides a deep link to the resources of the external + system. + type: string + executionUrlTemplate: + description: The URL returned to the AWS CodePipeline console + that contains a link to the top-level landing page for the + external system. + type: string + revisionUrlTemplate: + description: The URL returned to the AWS CodePipeline console + that contains a link to the page where customers can update + or change the configuration of the external action. + type: string + thirdPartyConfigurationUrl: + description: The URL of a sign-up page where users can sign + up for an external service and perform initial configuration + of the action provided by that service. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + version: + description: The version identifier of the custom action. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/codepipeline.aws.upbound.io_webhooks.yaml b/package/crds/codepipeline.aws.upbound.io_webhooks.yaml index 31faa47644..cc03b367bb 100644 --- a/package/crds/codepipeline.aws.upbound.io_webhooks.yaml +++ b/package/crds/codepipeline.aws.upbound.io_webhooks.yaml @@ -678,3 +678,657 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Webhook is the Schema for the Webhooks API. Provides a CodePipeline + Webhook + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WebhookSpec defines the desired state of Webhook + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authentication: + description: The type of authentication to use. One of IP, GITHUB_HMAC, + or UNAUTHENTICATED. + type: string + authenticationConfiguration: + description: An auth block. Required for IP and GITHUB_HMAC. Auth + blocks are documented below. + properties: + allowedIpRange: + description: A valid CIDR block for IP filtering. Required + for IP. + type: string + secretTokenSecretRef: + description: The shared secret for the GitHub repository webhook. + Set this as secret in your github_repository_webhook's configuration + block. Required for GITHUB_HMAC. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + filter: + description: One or more filter blocks. Filter blocks are documented + below. + items: + properties: + jsonPath: + description: The JSON path to filter on. + type: string + matchEquals: + description: The value to match on (e.g., refs/heads/{Branch}). + See AWS docs for details. + type: string + type: object + type: array + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + targetAction: + description: The name of the action in a pipeline you want to + connect to the webhook. The action must be from the source (first) + stage of the pipeline. + type: string + targetPipeline: + description: The name of the pipeline. + type: string + targetPipelineRef: + description: Reference to a Codepipeline in codepipeline to populate + targetPipeline. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetPipelineSelector: + description: Selector for a Codepipeline in codepipeline to populate + targetPipeline. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authentication: + description: The type of authentication to use. One of IP, GITHUB_HMAC, + or UNAUTHENTICATED. + type: string + authenticationConfiguration: + description: An auth block. Required for IP and GITHUB_HMAC. Auth + blocks are documented below. + properties: + allowedIpRange: + description: A valid CIDR block for IP filtering. Required + for IP. + type: string + secretTokenSecretRef: + description: The shared secret for the GitHub repository webhook. + Set this as secret in your github_repository_webhook's configuration + block. Required for GITHUB_HMAC. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + filter: + description: One or more filter blocks. Filter blocks are documented + below. + items: + properties: + jsonPath: + description: The JSON path to filter on. + type: string + matchEquals: + description: The value to match on (e.g., refs/heads/{Branch}). + See AWS docs for details. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + targetAction: + description: The name of the action in a pipeline you want to + connect to the webhook. The action must be from the source (first) + stage of the pipeline. + type: string + targetPipeline: + description: The name of the pipeline. + type: string + targetPipelineRef: + description: Reference to a Codepipeline in codepipeline to populate + targetPipeline. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetPipelineSelector: + description: Selector for a Codepipeline in codepipeline to populate + targetPipeline. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.authentication is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.authentication) + || (has(self.initProvider) && has(self.initProvider.authentication))' + - message: spec.forProvider.filter is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.filter) + || (has(self.initProvider) && has(self.initProvider.filter))' + - message: spec.forProvider.targetAction is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.targetAction) + || (has(self.initProvider) && has(self.initProvider.targetAction))' + status: + description: WebhookStatus defines the observed state of Webhook. + properties: + atProvider: + properties: + arn: + description: The CodePipeline webhook's ARN. + type: string + authentication: + description: The type of authentication to use. One of IP, GITHUB_HMAC, + or UNAUTHENTICATED. + type: string + authenticationConfiguration: + description: An auth block. Required for IP and GITHUB_HMAC. Auth + blocks are documented below. + properties: + allowedIpRange: + description: A valid CIDR block for IP filtering. Required + for IP. + type: string + type: object + filter: + description: One or more filter blocks. Filter blocks are documented + below. + items: + properties: + jsonPath: + description: The JSON path to filter on. + type: string + matchEquals: + description: The value to match on (e.g., refs/heads/{Branch}). + See AWS docs for details. + type: string + type: object + type: array + id: + description: The CodePipeline webhook's ARN. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + targetAction: + description: The name of the action in a pipeline you want to + connect to the webhook. The action must be from the source (first) + stage of the pipeline. + type: string + targetPipeline: + description: The name of the pipeline. + type: string + url: + description: The CodePipeline webhook's URL. POST events to this + endpoint to trigger the target. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/codestarconnections.aws.upbound.io_hosts.yaml b/package/crds/codestarconnections.aws.upbound.io_hosts.yaml index 2d53682f9c..3555ef614b 100644 --- a/package/crds/codestarconnections.aws.upbound.io_hosts.yaml +++ b/package/crds/codestarconnections.aws.upbound.io_hosts.yaml @@ -486,3 +486,465 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Host is the Schema for the Hosts API. Provides a CodeStar Host + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: HostSpec defines the desired state of Host + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + name: + description: The name of the host to be created. The name must + be unique in the calling AWS account. + type: string + providerEndpoint: + description: The endpoint of the infrastructure to be represented + by the host after it is created. + type: string + providerType: + description: The name of the external provider where your third-party + code repository is configured. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + vpcConfiguration: + description: The VPC configuration to be provisioned for the host. + A VPC must be configured, and the infrastructure to be represented + by the host must already be connected to the VPC. + properties: + securityGroupIds: + description: ID of the security group or security groups associated + with the Amazon VPC connected to the infrastructure where + your provider type is installed. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: The ID of the subnet or subnets associated with + the Amazon VPC connected to the infrastructure where your + provider type is installed. + items: + type: string + type: array + x-kubernetes-list-type: set + tlsCertificate: + description: The value of the Transport Layer Security (TLS) + certificate associated with the infrastructure where your + provider type is installed. + type: string + vpcId: + description: The ID of the Amazon VPC connected to the infrastructure + where your provider type is installed. + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + name: + description: The name of the host to be created. The name must + be unique in the calling AWS account. + type: string + providerEndpoint: + description: The endpoint of the infrastructure to be represented + by the host after it is created. + type: string + providerType: + description: The name of the external provider where your third-party + code repository is configured. + type: string + vpcConfiguration: + description: The VPC configuration to be provisioned for the host. + A VPC must be configured, and the infrastructure to be represented + by the host must already be connected to the VPC. + properties: + securityGroupIds: + description: ID of the security group or security groups associated + with the Amazon VPC connected to the infrastructure where + your provider type is installed. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: The ID of the subnet or subnets associated with + the Amazon VPC connected to the infrastructure where your + provider type is installed. + items: + type: string + type: array + x-kubernetes-list-type: set + tlsCertificate: + description: The value of the Transport Layer Security (TLS) + certificate associated with the infrastructure where your + provider type is installed. + type: string + vpcId: + description: The ID of the Amazon VPC connected to the infrastructure + where your provider type is installed. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.providerEndpoint is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.providerEndpoint) + || (has(self.initProvider) && has(self.initProvider.providerEndpoint))' + - message: spec.forProvider.providerType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.providerType) + || (has(self.initProvider) && has(self.initProvider.providerType))' + status: + description: HostStatus defines the observed state of Host. + properties: + atProvider: + properties: + arn: + description: The CodeStar Host ARN. + type: string + id: + description: The CodeStar Host ARN. + type: string + name: + description: The name of the host to be created. The name must + be unique in the calling AWS account. + type: string + providerEndpoint: + description: The endpoint of the infrastructure to be represented + by the host after it is created. + type: string + providerType: + description: The name of the external provider where your third-party + code repository is configured. + type: string + status: + description: The CodeStar Host status. Possible values are PENDING, + AVAILABLE, VPC_CONFIG_DELETING, VPC_CONFIG_INITIALIZING, and + VPC_CONFIG_FAILED_INITIALIZATION. + type: string + vpcConfiguration: + description: The VPC configuration to be provisioned for the host. + A VPC must be configured, and the infrastructure to be represented + by the host must already be connected to the VPC. + properties: + securityGroupIds: + description: ID of the security group or security groups associated + with the Amazon VPC connected to the infrastructure where + your provider type is installed. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: The ID of the subnet or subnets associated with + the Amazon VPC connected to the infrastructure where your + provider type is installed. + items: + type: string + type: array + x-kubernetes-list-type: set + tlsCertificate: + description: The value of the Transport Layer Security (TLS) + certificate associated with the infrastructure where your + provider type is installed. + type: string + vpcId: + description: The ID of the Amazon VPC connected to the infrastructure + where your provider type is installed. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cognitoidp.aws.upbound.io_riskconfigurations.yaml b/package/crds/cognitoidp.aws.upbound.io_riskconfigurations.yaml index 5b94bd108f..505d16a12f 100644 --- a/package/crds/cognitoidp.aws.upbound.io_riskconfigurations.yaml +++ b/package/crds/cognitoidp.aws.upbound.io_riskconfigurations.yaml @@ -1069,3 +1069,970 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: RiskConfiguration is the Schema for the RiskConfigurations API. + Provides a Cognito Risk Configuration resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RiskConfigurationSpec defines the desired state of RiskConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountTakeoverRiskConfiguration: + description: The account takeover risk configuration. See details + below. + properties: + actions: + description: Account takeover risk configuration actions. + See details below. + properties: + highAction: + description: Action to take for a high risk. See action + block below. + properties: + eventAction: + description: The action to take in response to the + account takeover action. Valid values are BLOCK, + MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + type: string + notify: + description: Whether to send a notification. + type: boolean + type: object + lowAction: + description: Action to take for a low risk. See action + block below. + properties: + eventAction: + description: The action to take in response to the + account takeover action. Valid values are BLOCK, + MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + type: string + notify: + description: Whether to send a notification. + type: boolean + type: object + mediumAction: + description: Action to take for a medium risk. See action + block below. + properties: + eventAction: + description: The action to take in response to the + account takeover action. Valid values are BLOCK, + MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + type: string + notify: + description: Whether to send a notification. + type: boolean + type: object + type: object + notifyConfiguration: + description: The notify configuration used to construct email + notifications. See details below. + properties: + blockEmail: + description: Email template used when a detected risk + event is blocked. See notify email type below. + properties: + htmlBody: + description: The email HTML body. + type: string + subject: + description: The email subject. + type: string + textBody: + description: The email text body. + type: string + type: object + from: + description: The email address that is sending the email. + The address must be either individually verified with + Amazon Simple Email Service, or from a domain that has + been verified with Amazon SES. + type: string + mfaEmail: + description: The multi-factor authentication (MFA) email + template used when MFA is challenged as part of a detected + risk. See notify email type below. + properties: + htmlBody: + description: The email HTML body. + type: string + subject: + description: The email subject. + type: string + textBody: + description: The email text body. + type: string + type: object + noActionEmail: + description: The email template used when a detected risk + event is allowed. See notify email type below. + properties: + htmlBody: + description: The email HTML body. + type: string + subject: + description: The email subject. + type: string + textBody: + description: The email text body. + type: string + type: object + replyTo: + description: The destination to which the receiver of + an email should reply to. + type: string + sourceArn: + description: The Amazon Resource Name (ARN) of the identity + that is associated with the sending authorization policy. + This identity permits Amazon Cognito to send for the + email address specified in the From parameter. + type: string + type: object + type: object + clientId: + description: The app client ID. When the client ID is not provided, + the same risk configuration is applied to all the clients in + the User Pool. + type: string + compromisedCredentialsRiskConfiguration: + description: The compromised credentials risk configuration. See + details below. + properties: + actions: + description: The compromised credentials risk configuration + actions. See details below. + properties: + eventAction: + description: The action to take in response to the account + takeover action. Valid values are BLOCK, MFA_IF_CONFIGURED, + MFA_REQUIRED and NO_ACTION. + type: string + type: object + eventFilter: + description: Perform the action for these events. The default + is to perform all events if no event filter is specified. + Valid values are SIGN_IN, PASSWORD_CHANGE, and SIGN_UP. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + riskExceptionConfiguration: + description: The configuration to override the risk decision. + See details below. + properties: + blockedIpRangeList: + description: |- + Overrides the risk decision to always block the pre-authentication requests. + The IP range is in CIDR notation, a compact representation of an IP address and its routing prefix. + Can contain a maximum of 200 items. + items: + type: string + type: array + x-kubernetes-list-type: set + skippedIpRangeList: + description: |- + Risk detection isn't performed on the IP addresses in this range list. + The IP range is in CIDR notation. + Can contain a maximum of 200 items. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + userPoolId: + description: The user pool ID. + type: string + userPoolIdRef: + description: Reference to a UserPool in cognitoidp to populate + userPoolId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userPoolIdSelector: + description: Selector for a UserPool in cognitoidp to populate + userPoolId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accountTakeoverRiskConfiguration: + description: The account takeover risk configuration. See details + below. + properties: + actions: + description: Account takeover risk configuration actions. + See details below. + properties: + highAction: + description: Action to take for a high risk. See action + block below. + properties: + eventAction: + description: The action to take in response to the + account takeover action. Valid values are BLOCK, + MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + type: string + notify: + description: Whether to send a notification. + type: boolean + type: object + lowAction: + description: Action to take for a low risk. See action + block below. + properties: + eventAction: + description: The action to take in response to the + account takeover action. Valid values are BLOCK, + MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + type: string + notify: + description: Whether to send a notification. + type: boolean + type: object + mediumAction: + description: Action to take for a medium risk. See action + block below. + properties: + eventAction: + description: The action to take in response to the + account takeover action. Valid values are BLOCK, + MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + type: string + notify: + description: Whether to send a notification. + type: boolean + type: object + type: object + notifyConfiguration: + description: The notify configuration used to construct email + notifications. See details below. + properties: + blockEmail: + description: Email template used when a detected risk + event is blocked. See notify email type below. + properties: + htmlBody: + description: The email HTML body. + type: string + subject: + description: The email subject. + type: string + textBody: + description: The email text body. + type: string + type: object + from: + description: The email address that is sending the email. + The address must be either individually verified with + Amazon Simple Email Service, or from a domain that has + been verified with Amazon SES. + type: string + mfaEmail: + description: The multi-factor authentication (MFA) email + template used when MFA is challenged as part of a detected + risk. See notify email type below. + properties: + htmlBody: + description: The email HTML body. + type: string + subject: + description: The email subject. + type: string + textBody: + description: The email text body. + type: string + type: object + noActionEmail: + description: The email template used when a detected risk + event is allowed. See notify email type below. + properties: + htmlBody: + description: The email HTML body. + type: string + subject: + description: The email subject. + type: string + textBody: + description: The email text body. + type: string + type: object + replyTo: + description: The destination to which the receiver of + an email should reply to. + type: string + sourceArn: + description: The Amazon Resource Name (ARN) of the identity + that is associated with the sending authorization policy. + This identity permits Amazon Cognito to send for the + email address specified in the From parameter. + type: string + type: object + type: object + clientId: + description: The app client ID. When the client ID is not provided, + the same risk configuration is applied to all the clients in + the User Pool. + type: string + compromisedCredentialsRiskConfiguration: + description: The compromised credentials risk configuration. See + details below. + properties: + actions: + description: The compromised credentials risk configuration + actions. See details below. + properties: + eventAction: + description: The action to take in response to the account + takeover action. Valid values are BLOCK, MFA_IF_CONFIGURED, + MFA_REQUIRED and NO_ACTION. + type: string + type: object + eventFilter: + description: Perform the action for these events. The default + is to perform all events if no event filter is specified. + Valid values are SIGN_IN, PASSWORD_CHANGE, and SIGN_UP. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + riskExceptionConfiguration: + description: The configuration to override the risk decision. + See details below. + properties: + blockedIpRangeList: + description: |- + Overrides the risk decision to always block the pre-authentication requests. + The IP range is in CIDR notation, a compact representation of an IP address and its routing prefix. + Can contain a maximum of 200 items. + items: + type: string + type: array + x-kubernetes-list-type: set + skippedIpRangeList: + description: |- + Risk detection isn't performed on the IP addresses in this range list. + The IP range is in CIDR notation. + Can contain a maximum of 200 items. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + userPoolId: + description: The user pool ID. + type: string + userPoolIdRef: + description: Reference to a UserPool in cognitoidp to populate + userPoolId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userPoolIdSelector: + description: Selector for a UserPool in cognitoidp to populate + userPoolId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: RiskConfigurationStatus defines the observed state of RiskConfiguration. + properties: + atProvider: + properties: + accountTakeoverRiskConfiguration: + description: The account takeover risk configuration. See details + below. + properties: + actions: + description: Account takeover risk configuration actions. + See details below. + properties: + highAction: + description: Action to take for a high risk. See action + block below. + properties: + eventAction: + description: The action to take in response to the + account takeover action. Valid values are BLOCK, + MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + type: string + notify: + description: Whether to send a notification. + type: boolean + type: object + lowAction: + description: Action to take for a low risk. See action + block below. + properties: + eventAction: + description: The action to take in response to the + account takeover action. Valid values are BLOCK, + MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + type: string + notify: + description: Whether to send a notification. + type: boolean + type: object + mediumAction: + description: Action to take for a medium risk. See action + block below. + properties: + eventAction: + description: The action to take in response to the + account takeover action. Valid values are BLOCK, + MFA_IF_CONFIGURED, MFA_REQUIRED and NO_ACTION. + type: string + notify: + description: Whether to send a notification. + type: boolean + type: object + type: object + notifyConfiguration: + description: The notify configuration used to construct email + notifications. See details below. + properties: + blockEmail: + description: Email template used when a detected risk + event is blocked. See notify email type below. + properties: + htmlBody: + description: The email HTML body. + type: string + subject: + description: The email subject. + type: string + textBody: + description: The email text body. + type: string + type: object + from: + description: The email address that is sending the email. + The address must be either individually verified with + Amazon Simple Email Service, or from a domain that has + been verified with Amazon SES. + type: string + mfaEmail: + description: The multi-factor authentication (MFA) email + template used when MFA is challenged as part of a detected + risk. See notify email type below. + properties: + htmlBody: + description: The email HTML body. + type: string + subject: + description: The email subject. + type: string + textBody: + description: The email text body. + type: string + type: object + noActionEmail: + description: The email template used when a detected risk + event is allowed. See notify email type below. + properties: + htmlBody: + description: The email HTML body. + type: string + subject: + description: The email subject. + type: string + textBody: + description: The email text body. + type: string + type: object + replyTo: + description: The destination to which the receiver of + an email should reply to. + type: string + sourceArn: + description: The Amazon Resource Name (ARN) of the identity + that is associated with the sending authorization policy. + This identity permits Amazon Cognito to send for the + email address specified in the From parameter. + type: string + type: object + type: object + clientId: + description: The app client ID. When the client ID is not provided, + the same risk configuration is applied to all the clients in + the User Pool. + type: string + compromisedCredentialsRiskConfiguration: + description: The compromised credentials risk configuration. See + details below. + properties: + actions: + description: The compromised credentials risk configuration + actions. See details below. + properties: + eventAction: + description: The action to take in response to the account + takeover action. Valid values are BLOCK, MFA_IF_CONFIGURED, + MFA_REQUIRED and NO_ACTION. + type: string + type: object + eventFilter: + description: Perform the action for these events. The default + is to perform all events if no event filter is specified. + Valid values are SIGN_IN, PASSWORD_CHANGE, and SIGN_UP. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + id: + description: 'The user pool ID or the user pool ID and Client + Id separated by a : if the configuration is client specific.' + type: string + riskExceptionConfiguration: + description: The configuration to override the risk decision. + See details below. + properties: + blockedIpRangeList: + description: |- + Overrides the risk decision to always block the pre-authentication requests. + The IP range is in CIDR notation, a compact representation of an IP address and its routing prefix. + Can contain a maximum of 200 items. + items: + type: string + type: array + x-kubernetes-list-type: set + skippedIpRangeList: + description: |- + Risk detection isn't performed on the IP addresses in this range list. + The IP range is in CIDR notation. + Can contain a maximum of 200 items. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + userPoolId: + description: The user pool ID. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cognitoidp.aws.upbound.io_userpools.yaml b/package/crds/cognitoidp.aws.upbound.io_userpools.yaml index 3e259f184d..41828f0b91 100644 --- a/package/crds/cognitoidp.aws.upbound.io_userpools.yaml +++ b/package/crds/cognitoidp.aws.upbound.io_userpools.yaml @@ -3933,3 +3933,3789 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: UserPool is the Schema for the UserPools API. Provides a Cognito + User Pool resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: UserPoolSpec defines the desired state of UserPool + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountRecoverySetting: + description: Configuration block to define which verified available + method a user can use to recover their forgotten password. Detailed + below. + properties: + recoveryMechanism: + description: 'List of Account Recovery Options of the following + structure:' + items: + properties: + name: + description: Name of the user pool. + type: string + priority: + description: Positive integer specifying priority of + a method with 1 being the highest priority. + type: number + type: object + type: array + type: object + adminCreateUserConfig: + description: Configuration block for creating a new user profile. + Detailed below. + properties: + allowAdminCreateUserOnly: + description: Set to True if only the administrator is allowed + to create user profiles. Set to False if users can sign + themselves up via an app. + type: boolean + inviteMessageTemplate: + description: Invite message template structure. Detailed below. + properties: + emailMessage: + description: Message template for email messages. Must + contain {username} and {####} placeholders, for username + and temporary password, respectively. + type: string + emailSubject: + description: Subject line for email messages. + type: string + smsMessage: + description: Message template for SMS messages. Must contain + {username} and {####} placeholders, for username and + temporary password, respectively. + type: string + type: object + type: object + aliasAttributes: + description: 'Attributes supported as an alias for this user pool. + Valid values: phone_number, email, or preferred_username. Conflicts + with username_attributes.' + items: + type: string + type: array + x-kubernetes-list-type: set + autoVerifiedAttributes: + description: 'Attributes to be auto-verified. Valid values: email, + phone_number.' + items: + type: string + type: array + x-kubernetes-list-type: set + deletionProtection: + description: When active, DeletionProtection prevents accidental + deletion of your user pool. Before you can delete a user pool + that you have protected against deletion, you must deactivate + this feature. Valid values are ACTIVE and INACTIVE, Default + value is INACTIVE. + type: string + deviceConfiguration: + description: Configuration block for the user pool's device tracking. + Detailed below. + properties: + challengeRequiredOnNewDevice: + description: Whether a challenge is required on a new device. + Only applicable to a new device. + type: boolean + deviceOnlyRememberedOnUserPrompt: + description: Whether a device is only remembered on user prompt. + false equates to "Always" remember, true is "User Opt In," + and not using a device_configuration block is "No." + type: boolean + type: object + emailConfiguration: + description: Configuration block for configuring email. Detailed + below. + properties: + configurationSet: + description: Email configuration set name from SES. + type: string + emailSendingAccount: + description: Email delivery method to use. COGNITO_DEFAULT + for the default email functionality built into Cognito or + DEVELOPER to use your Amazon SES configuration. Required + to be DEVELOPER if from_email_address is set. + type: string + fromEmailAddress: + description: Sender’s email address or sender’s display name + with their email address (e.g., john@example.com, John Smith + or \"John Smith Ph.D.\" ). + Escaped double quotes are required around display names + that contain certain characters as specified in RFC 5322. + type: string + replyToEmailAddress: + description: REPLY-TO email address. + type: string + sourceArn: + description: ARN of the SES verified email identity to use. + Required if email_sending_account is set to DEVELOPER. + type: string + type: object + emailVerificationMessage: + description: String representing the email verification message. + Conflicts with verification_message_template configuration block + email_message argument. + type: string + emailVerificationSubject: + description: String representing the email verification subject. + Conflicts with verification_message_template configuration block + email_subject argument. + type: string + lambdaConfig: + description: Configuration block for the AWS Lambda triggers associated + with the user pool. Detailed below. + properties: + createAuthChallenge: + description: ARN of the lambda creating an authentication + challenge. + type: string + createAuthChallengeRef: + description: Reference to a Function in lambda to populate + createAuthChallenge. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + createAuthChallengeSelector: + description: Selector for a Function in lambda to populate + createAuthChallenge. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customEmailSender: + description: A custom email sender AWS Lambda trigger. See + custom_email_sender Below. + properties: + lambdaArn: + description: The Lambda Amazon Resource Name of the Lambda + function that Amazon Cognito triggers to send email + notifications to users. + type: string + lambdaArnRef: + description: Reference to a Function in lambda to populate + lambdaArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + lambdaArnSelector: + description: Selector for a Function in lambda to populate + lambdaArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + lambdaVersion: + description: The Lambda version represents the signature + of the "request" attribute in the "event" information + Amazon Cognito passes to your custom email Lambda function. + The only supported value is V1_0. + type: string + type: object + customMessage: + description: Custom Message AWS Lambda trigger. + type: string + customMessageRef: + description: Reference to a Function in lambda to populate + customMessage. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + customMessageSelector: + description: Selector for a Function in lambda to populate + customMessage. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSmsSender: + description: A custom SMS sender AWS Lambda trigger. See custom_sms_sender + Below. + properties: + lambdaArn: + description: The Lambda Amazon Resource Name of the Lambda + function that Amazon Cognito triggers to send SMS notifications + to users. + type: string + lambdaArnRef: + description: Reference to a Function in lambda to populate + lambdaArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + lambdaArnSelector: + description: Selector for a Function in lambda to populate + lambdaArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + lambdaVersion: + description: The Lambda version represents the signature + of the "request" attribute in the "event" information + Amazon Cognito passes to your custom SMS Lambda function. + The only supported value is V1_0. + type: string + type: object + defineAuthChallenge: + description: Defines the authentication challenge. + type: string + defineAuthChallengeRef: + description: Reference to a Function in lambda to populate + defineAuthChallenge. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + defineAuthChallengeSelector: + description: Selector for a Function in lambda to populate + defineAuthChallenge. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + kmsKeyId: + description: The Amazon Resource Name of Key Management Service + Customer master keys. Amazon Cognito uses the key to encrypt + codes and temporary passwords sent to CustomEmailSender + and CustomSMSSender. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + postAuthentication: + description: Post-authentication AWS Lambda trigger. + type: string + postAuthenticationRef: + description: Reference to a Function in lambda to populate + postAuthentication. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + postAuthenticationSelector: + description: Selector for a Function in lambda to populate + postAuthentication. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + postConfirmation: + description: Post-confirmation AWS Lambda trigger. + type: string + postConfirmationRef: + description: Reference to a Function in lambda to populate + postConfirmation. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + postConfirmationSelector: + description: Selector for a Function in lambda to populate + postConfirmation. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + preAuthentication: + description: Pre-authentication AWS Lambda trigger. + type: string + preAuthenticationRef: + description: Reference to a Function in lambda to populate + preAuthentication. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + preAuthenticationSelector: + description: Selector for a Function in lambda to populate + preAuthentication. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + preSignUp: + description: Pre-registration AWS Lambda trigger. + type: string + preSignUpRef: + description: Reference to a Function in lambda to populate + preSignUp. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + preSignUpSelector: + description: Selector for a Function in lambda to populate + preSignUp. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + preTokenGeneration: + description: Allow to customize identity token claims before + token generation. Set this parameter for legacy purposes; + for new instances of pre token generation triggers, set + the lambda_arn of pre_token_generation_config. + type: string + preTokenGenerationConfig: + description: Allow to customize access tokens. See pre_token_configuration_type + properties: + lambdaArn: + description: The Lambda Amazon Resource Name of the Lambda + function that Amazon Cognito triggers to send SMS notifications + to users. + type: string + lambdaVersion: + description: The Lambda version represents the signature + of the "version" attribute in the "event" information + Amazon Cognito passes to your pre Token Generation Lambda + function. The supported values are V1_0, V2_0. + type: string + type: object + preTokenGenerationRef: + description: Reference to a Function in lambda to populate + preTokenGeneration. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + preTokenGenerationSelector: + description: Selector for a Function in lambda to populate + preTokenGeneration. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userMigration: + description: User migration Lambda config type. + type: string + userMigrationRef: + description: Reference to a Function in lambda to populate + userMigration. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userMigrationSelector: + description: Selector for a Function in lambda to populate + userMigration. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + verifyAuthChallengeResponse: + description: Verifies the authentication challenge response. + type: string + verifyAuthChallengeResponseRef: + description: Reference to a Function in lambda to populate + verifyAuthChallengeResponse. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + verifyAuthChallengeResponseSelector: + description: Selector for a Function in lambda to populate + verifyAuthChallengeResponse. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + mfaConfiguration: + description: Multi-Factor Authentication (MFA) configuration for + the User Pool. Defaults of OFF. Valid values are OFF (MFA Tokens + are not required), ON (MFA is required for all users to sign + in; requires at least one of sms_configuration or software_token_mfa_configuration + to be configured), or OPTIONAL (MFA Will be required only for + individual users who have MFA Enabled; requires at least one + of sms_configuration or software_token_mfa_configuration to + be configured). + type: string + name: + description: Name of the user pool. + type: string + passwordPolicy: + description: Configuration block for information about the user + pool password policy. Detailed below. + properties: + minimumLength: + description: Minimum length of the password policy that you + have set. + type: number + requireLowercase: + description: Whether you have required users to use at least + one lowercase letter in their password. + type: boolean + requireNumbers: + description: Whether you have required users to use at least + one number in their password. + type: boolean + requireSymbols: + description: Whether you have required users to use at least + one symbol in their password. + type: boolean + requireUppercase: + description: Whether you have required users to use at least + one uppercase letter in their password. + type: boolean + temporaryPasswordValidityDays: + description: In the password policy you have set, refers to + the number of days a temporary password is valid. If the + user does not sign-in during this time, their password will + need to be reset by an administrator. + type: number + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + schema: + description: Configuration block for the schema attributes of + a user pool. Detailed below. Schema attributes from the standard + attribute set only need to be specified if they are different + from the default configuration. Attributes can be added, but + not modified or removed. Maximum of 50 attributes. + items: + properties: + attributeDataType: + description: Attribute data type. Must be one of Boolean, + Number, String, DateTime. + type: string + developerOnlyAttribute: + description: Whether the attribute type is developer only. + type: boolean + mutable: + description: Whether the attribute can be changed once it + has been created. + type: boolean + name: + description: Name of the user pool. + type: string + numberAttributeConstraints: + description: Configuration block for the constraints for + an attribute of the number type. Detailed below. + properties: + maxValue: + description: Maximum value of an attribute that is of + the number data type. + type: string + minValue: + description: Minimum value of an attribute that is of + the number data type. + type: string + type: object + required: + description: Whether a user pool attribute is required. + If the attribute is required and the user does not provide + a value, registration or sign-in will fail. + type: boolean + stringAttributeConstraints: + description: Constraints for an attribute of the string + type. Detailed below. + properties: + maxLength: + description: Maximum length of an attribute value of + the string type. + type: string + minLength: + description: Minimum length of an attribute value of + the string type. + type: string + type: object + type: object + type: array + smsAuthenticationMessage: + description: String representing the SMS authentication message. + The Message must contain the {####} placeholder, which will + be replaced with the code. + type: string + smsConfiguration: + description: Configuration block for Short Message Service (SMS) + settings. Detailed below. These settings apply to SMS user verification + and SMS Multi-Factor Authentication (MFA). Due to Cognito API + restrictions, the SMS configuration cannot be removed without + recreating the Cognito User Pool. For user data safety, this + resource will ignore the removal of this configuration by disabling + drift detection. To force resource recreation after this configuration + has been applied, see the taint command. + properties: + externalId: + description: External ID used in IAM role trust relationships. + For more information about using external IDs, see How to + Use an External ID When Granting Access to Your AWS Resources + to a Third Party. + type: string + snsCallerArn: + description: ARN of the Amazon SNS caller. This is usually + the IAM role that you've given Cognito permission to assume. + type: string + snsCallerArnRef: + description: Reference to a Role in iam to populate snsCallerArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + snsCallerArnSelector: + description: Selector for a Role in iam to populate snsCallerArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + snsRegion: + description: The AWS Region to use with Amazon SNS integration. + You can choose the same Region as your user pool, or a supported + Legacy Amazon SNS alternate Region. Amazon Cognito resources + in the Asia Pacific (Seoul) AWS Region must use your Amazon + SNS configuration in the Asia Pacific (Tokyo) Region. For + more information, see SMS message settings for Amazon Cognito + user pools. + type: string + type: object + smsVerificationMessage: + description: String representing the SMS verification message. + Conflicts with verification_message_template configuration block + sms_message argument. + type: string + softwareTokenMfaConfiguration: + description: Configuration block for software token Mult-Factor + Authentication (MFA) settings. Detailed below. + properties: + enabled: + description: Boolean whether to enable software token Multi-Factor + (MFA) tokens, such as Time-based One-Time Password (TOTP). + To disable software token MFA When sms_configuration is + not present, the mfa_configuration argument must be set + to OFF and the software_token_mfa_configuration configuration + block must be fully removed. + type: boolean + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userAttributeUpdateSettings: + description: Configuration block for user attribute update settings. + Detailed below. + properties: + attributesRequireVerificationBeforeUpdate: + description: 'A list of attributes requiring verification + before update. If set, the provided value(s) must also be + set in auto_verified_attributes. Valid values: email, phone_number.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + userPoolAddOns: + description: Configuration block for user pool add-ons to enable + user pool advanced security mode features. Detailed below. + properties: + advancedSecurityMode: + description: Mode for advanced security, must be one of OFF, + AUDIT or ENFORCED. + type: string + type: object + usernameAttributes: + description: Whether email addresses or phone numbers can be specified + as usernames when a user signs up. Conflicts with alias_attributes. + items: + type: string + type: array + x-kubernetes-list-type: set + usernameConfiguration: + description: Configuration block for username configuration. Detailed + below. + properties: + caseSensitive: + description: Whether username case sensitivity will be applied + for all users in the user pool through Cognito APIs. + type: boolean + type: object + verificationMessageTemplate: + description: Configuration block for verification message templates. + Detailed below. + properties: + defaultEmailOption: + description: Default email option. Must be either CONFIRM_WITH_CODE + or CONFIRM_WITH_LINK. Defaults to CONFIRM_WITH_CODE. + type: string + emailMessage: + description: Email message template. Must contain the {####} + placeholder. Conflicts with email_verification_message argument. + type: string + emailMessageByLink: + description: Email message template for sending a confirmation + link to the user, it must contain the {##Click Here##} placeholder. + type: string + emailSubject: + description: Subject line for the email message template. + Conflicts with email_verification_subject argument. + type: string + emailSubjectByLink: + description: Subject line for the email message template for + sending a confirmation link to the user. + type: string + smsMessage: + description: SMS message template. Must contain the {####} + placeholder. Conflicts with sms_verification_message argument. + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accountRecoverySetting: + description: Configuration block to define which verified available + method a user can use to recover their forgotten password. Detailed + below. + properties: + recoveryMechanism: + description: 'List of Account Recovery Options of the following + structure:' + items: + properties: + name: + description: Name of the user pool. + type: string + priority: + description: Positive integer specifying priority of + a method with 1 being the highest priority. + type: number + type: object + type: array + type: object + adminCreateUserConfig: + description: Configuration block for creating a new user profile. + Detailed below. + properties: + allowAdminCreateUserOnly: + description: Set to True if only the administrator is allowed + to create user profiles. Set to False if users can sign + themselves up via an app. + type: boolean + inviteMessageTemplate: + description: Invite message template structure. Detailed below. + properties: + emailMessage: + description: Message template for email messages. Must + contain {username} and {####} placeholders, for username + and temporary password, respectively. + type: string + emailSubject: + description: Subject line for email messages. + type: string + smsMessage: + description: Message template for SMS messages. Must contain + {username} and {####} placeholders, for username and + temporary password, respectively. + type: string + type: object + type: object + aliasAttributes: + description: 'Attributes supported as an alias for this user pool. + Valid values: phone_number, email, or preferred_username. Conflicts + with username_attributes.' + items: + type: string + type: array + x-kubernetes-list-type: set + autoVerifiedAttributes: + description: 'Attributes to be auto-verified. Valid values: email, + phone_number.' + items: + type: string + type: array + x-kubernetes-list-type: set + deletionProtection: + description: When active, DeletionProtection prevents accidental + deletion of your user pool. Before you can delete a user pool + that you have protected against deletion, you must deactivate + this feature. Valid values are ACTIVE and INACTIVE, Default + value is INACTIVE. + type: string + deviceConfiguration: + description: Configuration block for the user pool's device tracking. + Detailed below. + properties: + challengeRequiredOnNewDevice: + description: Whether a challenge is required on a new device. + Only applicable to a new device. + type: boolean + deviceOnlyRememberedOnUserPrompt: + description: Whether a device is only remembered on user prompt. + false equates to "Always" remember, true is "User Opt In," + and not using a device_configuration block is "No." + type: boolean + type: object + emailConfiguration: + description: Configuration block for configuring email. Detailed + below. + properties: + configurationSet: + description: Email configuration set name from SES. + type: string + emailSendingAccount: + description: Email delivery method to use. COGNITO_DEFAULT + for the default email functionality built into Cognito or + DEVELOPER to use your Amazon SES configuration. Required + to be DEVELOPER if from_email_address is set. + type: string + fromEmailAddress: + description: Sender’s email address or sender’s display name + with their email address (e.g., john@example.com, John Smith + or \"John Smith Ph.D.\" ). + Escaped double quotes are required around display names + that contain certain characters as specified in RFC 5322. + type: string + replyToEmailAddress: + description: REPLY-TO email address. + type: string + sourceArn: + description: ARN of the SES verified email identity to use. + Required if email_sending_account is set to DEVELOPER. + type: string + type: object + emailVerificationMessage: + description: String representing the email verification message. + Conflicts with verification_message_template configuration block + email_message argument. + type: string + emailVerificationSubject: + description: String representing the email verification subject. + Conflicts with verification_message_template configuration block + email_subject argument. + type: string + lambdaConfig: + description: Configuration block for the AWS Lambda triggers associated + with the user pool. Detailed below. + properties: + createAuthChallenge: + description: ARN of the lambda creating an authentication + challenge. + type: string + createAuthChallengeRef: + description: Reference to a Function in lambda to populate + createAuthChallenge. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + createAuthChallengeSelector: + description: Selector for a Function in lambda to populate + createAuthChallenge. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customEmailSender: + description: A custom email sender AWS Lambda trigger. See + custom_email_sender Below. + properties: + lambdaArn: + description: The Lambda Amazon Resource Name of the Lambda + function that Amazon Cognito triggers to send email + notifications to users. + type: string + lambdaArnRef: + description: Reference to a Function in lambda to populate + lambdaArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + lambdaArnSelector: + description: Selector for a Function in lambda to populate + lambdaArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + lambdaVersion: + description: The Lambda version represents the signature + of the "request" attribute in the "event" information + Amazon Cognito passes to your custom email Lambda function. + The only supported value is V1_0. + type: string + type: object + customMessage: + description: Custom Message AWS Lambda trigger. + type: string + customMessageRef: + description: Reference to a Function in lambda to populate + customMessage. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + customMessageSelector: + description: Selector for a Function in lambda to populate + customMessage. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSmsSender: + description: A custom SMS sender AWS Lambda trigger. See custom_sms_sender + Below. + properties: + lambdaArn: + description: The Lambda Amazon Resource Name of the Lambda + function that Amazon Cognito triggers to send SMS notifications + to users. + type: string + lambdaArnRef: + description: Reference to a Function in lambda to populate + lambdaArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + lambdaArnSelector: + description: Selector for a Function in lambda to populate + lambdaArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + lambdaVersion: + description: The Lambda version represents the signature + of the "request" attribute in the "event" information + Amazon Cognito passes to your custom SMS Lambda function. + The only supported value is V1_0. + type: string + type: object + defineAuthChallenge: + description: Defines the authentication challenge. + type: string + defineAuthChallengeRef: + description: Reference to a Function in lambda to populate + defineAuthChallenge. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + defineAuthChallengeSelector: + description: Selector for a Function in lambda to populate + defineAuthChallenge. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + kmsKeyId: + description: The Amazon Resource Name of Key Management Service + Customer master keys. Amazon Cognito uses the key to encrypt + codes and temporary passwords sent to CustomEmailSender + and CustomSMSSender. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + postAuthentication: + description: Post-authentication AWS Lambda trigger. + type: string + postAuthenticationRef: + description: Reference to a Function in lambda to populate + postAuthentication. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + postAuthenticationSelector: + description: Selector for a Function in lambda to populate + postAuthentication. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + postConfirmation: + description: Post-confirmation AWS Lambda trigger. + type: string + postConfirmationRef: + description: Reference to a Function in lambda to populate + postConfirmation. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + postConfirmationSelector: + description: Selector for a Function in lambda to populate + postConfirmation. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + preAuthentication: + description: Pre-authentication AWS Lambda trigger. + type: string + preAuthenticationRef: + description: Reference to a Function in lambda to populate + preAuthentication. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + preAuthenticationSelector: + description: Selector for a Function in lambda to populate + preAuthentication. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + preSignUp: + description: Pre-registration AWS Lambda trigger. + type: string + preSignUpRef: + description: Reference to a Function in lambda to populate + preSignUp. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + preSignUpSelector: + description: Selector for a Function in lambda to populate + preSignUp. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + preTokenGeneration: + description: Allow to customize identity token claims before + token generation. Set this parameter for legacy purposes; + for new instances of pre token generation triggers, set + the lambda_arn of pre_token_generation_config. + type: string + preTokenGenerationConfig: + description: Allow to customize access tokens. See pre_token_configuration_type + properties: + lambdaArn: + description: The Lambda Amazon Resource Name of the Lambda + function that Amazon Cognito triggers to send SMS notifications + to users. + type: string + lambdaVersion: + description: The Lambda version represents the signature + of the "version" attribute in the "event" information + Amazon Cognito passes to your pre Token Generation Lambda + function. The supported values are V1_0, V2_0. + type: string + type: object + preTokenGenerationRef: + description: Reference to a Function in lambda to populate + preTokenGeneration. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + preTokenGenerationSelector: + description: Selector for a Function in lambda to populate + preTokenGeneration. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userMigration: + description: User migration Lambda config type. + type: string + userMigrationRef: + description: Reference to a Function in lambda to populate + userMigration. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userMigrationSelector: + description: Selector for a Function in lambda to populate + userMigration. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + verifyAuthChallengeResponse: + description: Verifies the authentication challenge response. + type: string + verifyAuthChallengeResponseRef: + description: Reference to a Function in lambda to populate + verifyAuthChallengeResponse. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + verifyAuthChallengeResponseSelector: + description: Selector for a Function in lambda to populate + verifyAuthChallengeResponse. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + mfaConfiguration: + description: Multi-Factor Authentication (MFA) configuration for + the User Pool. Defaults of OFF. Valid values are OFF (MFA Tokens + are not required), ON (MFA is required for all users to sign + in; requires at least one of sms_configuration or software_token_mfa_configuration + to be configured), or OPTIONAL (MFA Will be required only for + individual users who have MFA Enabled; requires at least one + of sms_configuration or software_token_mfa_configuration to + be configured). + type: string + name: + description: Name of the user pool. + type: string + passwordPolicy: + description: Configuration block for information about the user + pool password policy. Detailed below. + properties: + minimumLength: + description: Minimum length of the password policy that you + have set. + type: number + requireLowercase: + description: Whether you have required users to use at least + one lowercase letter in their password. + type: boolean + requireNumbers: + description: Whether you have required users to use at least + one number in their password. + type: boolean + requireSymbols: + description: Whether you have required users to use at least + one symbol in their password. + type: boolean + requireUppercase: + description: Whether you have required users to use at least + one uppercase letter in their password. + type: boolean + temporaryPasswordValidityDays: + description: In the password policy you have set, refers to + the number of days a temporary password is valid. If the + user does not sign-in during this time, their password will + need to be reset by an administrator. + type: number + type: object + schema: + description: Configuration block for the schema attributes of + a user pool. Detailed below. Schema attributes from the standard + attribute set only need to be specified if they are different + from the default configuration. Attributes can be added, but + not modified or removed. Maximum of 50 attributes. + items: + properties: + attributeDataType: + description: Attribute data type. Must be one of Boolean, + Number, String, DateTime. + type: string + developerOnlyAttribute: + description: Whether the attribute type is developer only. + type: boolean + mutable: + description: Whether the attribute can be changed once it + has been created. + type: boolean + name: + description: Name of the user pool. + type: string + numberAttributeConstraints: + description: Configuration block for the constraints for + an attribute of the number type. Detailed below. + properties: + maxValue: + description: Maximum value of an attribute that is of + the number data type. + type: string + minValue: + description: Minimum value of an attribute that is of + the number data type. + type: string + type: object + required: + description: Whether a user pool attribute is required. + If the attribute is required and the user does not provide + a value, registration or sign-in will fail. + type: boolean + stringAttributeConstraints: + description: Constraints for an attribute of the string + type. Detailed below. + properties: + maxLength: + description: Maximum length of an attribute value of + the string type. + type: string + minLength: + description: Minimum length of an attribute value of + the string type. + type: string + type: object + type: object + type: array + smsAuthenticationMessage: + description: String representing the SMS authentication message. + The Message must contain the {####} placeholder, which will + be replaced with the code. + type: string + smsConfiguration: + description: Configuration block for Short Message Service (SMS) + settings. Detailed below. These settings apply to SMS user verification + and SMS Multi-Factor Authentication (MFA). Due to Cognito API + restrictions, the SMS configuration cannot be removed without + recreating the Cognito User Pool. For user data safety, this + resource will ignore the removal of this configuration by disabling + drift detection. To force resource recreation after this configuration + has been applied, see the taint command. + properties: + externalId: + description: External ID used in IAM role trust relationships. + For more information about using external IDs, see How to + Use an External ID When Granting Access to Your AWS Resources + to a Third Party. + type: string + snsCallerArn: + description: ARN of the Amazon SNS caller. This is usually + the IAM role that you've given Cognito permission to assume. + type: string + snsCallerArnRef: + description: Reference to a Role in iam to populate snsCallerArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + snsCallerArnSelector: + description: Selector for a Role in iam to populate snsCallerArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + snsRegion: + description: The AWS Region to use with Amazon SNS integration. + You can choose the same Region as your user pool, or a supported + Legacy Amazon SNS alternate Region. Amazon Cognito resources + in the Asia Pacific (Seoul) AWS Region must use your Amazon + SNS configuration in the Asia Pacific (Tokyo) Region. For + more information, see SMS message settings for Amazon Cognito + user pools. + type: string + type: object + smsVerificationMessage: + description: String representing the SMS verification message. + Conflicts with verification_message_template configuration block + sms_message argument. + type: string + softwareTokenMfaConfiguration: + description: Configuration block for software token Mult-Factor + Authentication (MFA) settings. Detailed below. + properties: + enabled: + description: Boolean whether to enable software token Multi-Factor + (MFA) tokens, such as Time-based One-Time Password (TOTP). + To disable software token MFA When sms_configuration is + not present, the mfa_configuration argument must be set + to OFF and the software_token_mfa_configuration configuration + block must be fully removed. + type: boolean + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userAttributeUpdateSettings: + description: Configuration block for user attribute update settings. + Detailed below. + properties: + attributesRequireVerificationBeforeUpdate: + description: 'A list of attributes requiring verification + before update. If set, the provided value(s) must also be + set in auto_verified_attributes. Valid values: email, phone_number.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + userPoolAddOns: + description: Configuration block for user pool add-ons to enable + user pool advanced security mode features. Detailed below. + properties: + advancedSecurityMode: + description: Mode for advanced security, must be one of OFF, + AUDIT or ENFORCED. + type: string + type: object + usernameAttributes: + description: Whether email addresses or phone numbers can be specified + as usernames when a user signs up. Conflicts with alias_attributes. + items: + type: string + type: array + x-kubernetes-list-type: set + usernameConfiguration: + description: Configuration block for username configuration. Detailed + below. + properties: + caseSensitive: + description: Whether username case sensitivity will be applied + for all users in the user pool through Cognito APIs. + type: boolean + type: object + verificationMessageTemplate: + description: Configuration block for verification message templates. + Detailed below. + properties: + defaultEmailOption: + description: Default email option. Must be either CONFIRM_WITH_CODE + or CONFIRM_WITH_LINK. Defaults to CONFIRM_WITH_CODE. + type: string + emailMessage: + description: Email message template. Must contain the {####} + placeholder. Conflicts with email_verification_message argument. + type: string + emailMessageByLink: + description: Email message template for sending a confirmation + link to the user, it must contain the {##Click Here##} placeholder. + type: string + emailSubject: + description: Subject line for the email message template. + Conflicts with email_verification_subject argument. + type: string + emailSubjectByLink: + description: Subject line for the email message template for + sending a confirmation link to the user. + type: string + smsMessage: + description: SMS message template. Must contain the {####} + placeholder. Conflicts with sms_verification_message argument. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: UserPoolStatus defines the observed state of UserPool. + properties: + atProvider: + properties: + accountRecoverySetting: + description: Configuration block to define which verified available + method a user can use to recover their forgotten password. Detailed + below. + properties: + recoveryMechanism: + description: 'List of Account Recovery Options of the following + structure:' + items: + properties: + name: + description: Name of the user pool. + type: string + priority: + description: Positive integer specifying priority of + a method with 1 being the highest priority. + type: number + type: object + type: array + type: object + adminCreateUserConfig: + description: Configuration block for creating a new user profile. + Detailed below. + properties: + allowAdminCreateUserOnly: + description: Set to True if only the administrator is allowed + to create user profiles. Set to False if users can sign + themselves up via an app. + type: boolean + inviteMessageTemplate: + description: Invite message template structure. Detailed below. + properties: + emailMessage: + description: Message template for email messages. Must + contain {username} and {####} placeholders, for username + and temporary password, respectively. + type: string + emailSubject: + description: Subject line for email messages. + type: string + smsMessage: + description: Message template for SMS messages. Must contain + {username} and {####} placeholders, for username and + temporary password, respectively. + type: string + type: object + type: object + aliasAttributes: + description: 'Attributes supported as an alias for this user pool. + Valid values: phone_number, email, or preferred_username. Conflicts + with username_attributes.' + items: + type: string + type: array + x-kubernetes-list-type: set + arn: + description: ARN of the user pool. + type: string + autoVerifiedAttributes: + description: 'Attributes to be auto-verified. Valid values: email, + phone_number.' + items: + type: string + type: array + x-kubernetes-list-type: set + creationDate: + description: Date the user pool was created. + type: string + customDomain: + description: 'A custom domain name that you provide to Amazon + Cognito. This parameter applies only if you use a custom domain + to host the sign-up and sign-in pages for your application. + For example: auth.example.com.' + type: string + deletionProtection: + description: When active, DeletionProtection prevents accidental + deletion of your user pool. Before you can delete a user pool + that you have protected against deletion, you must deactivate + this feature. Valid values are ACTIVE and INACTIVE, Default + value is INACTIVE. + type: string + deviceConfiguration: + description: Configuration block for the user pool's device tracking. + Detailed below. + properties: + challengeRequiredOnNewDevice: + description: Whether a challenge is required on a new device. + Only applicable to a new device. + type: boolean + deviceOnlyRememberedOnUserPrompt: + description: Whether a device is only remembered on user prompt. + false equates to "Always" remember, true is "User Opt In," + and not using a device_configuration block is "No." + type: boolean + type: object + domain: + description: Holds the domain prefix if the user pool has a domain + associated with it. + type: string + emailConfiguration: + description: Configuration block for configuring email. Detailed + below. + properties: + configurationSet: + description: Email configuration set name from SES. + type: string + emailSendingAccount: + description: Email delivery method to use. COGNITO_DEFAULT + for the default email functionality built into Cognito or + DEVELOPER to use your Amazon SES configuration. Required + to be DEVELOPER if from_email_address is set. + type: string + fromEmailAddress: + description: Sender’s email address or sender’s display name + with their email address (e.g., john@example.com, John Smith + or \"John Smith Ph.D.\" ). + Escaped double quotes are required around display names + that contain certain characters as specified in RFC 5322. + type: string + replyToEmailAddress: + description: REPLY-TO email address. + type: string + sourceArn: + description: ARN of the SES verified email identity to use. + Required if email_sending_account is set to DEVELOPER. + type: string + type: object + emailVerificationMessage: + description: String representing the email verification message. + Conflicts with verification_message_template configuration block + email_message argument. + type: string + emailVerificationSubject: + description: String representing the email verification subject. + Conflicts with verification_message_template configuration block + email_subject argument. + type: string + endpoint: + description: 'Endpoint name of the user pool. Example format: + cognito-idp.REGION.amazonaws.com/xxxx_yyyyy' + type: string + estimatedNumberOfUsers: + description: A number estimating the size of the user pool. + type: number + id: + description: ID of the user pool. + type: string + lambdaConfig: + description: Configuration block for the AWS Lambda triggers associated + with the user pool. Detailed below. + properties: + createAuthChallenge: + description: ARN of the lambda creating an authentication + challenge. + type: string + customEmailSender: + description: A custom email sender AWS Lambda trigger. See + custom_email_sender Below. + properties: + lambdaArn: + description: The Lambda Amazon Resource Name of the Lambda + function that Amazon Cognito triggers to send email + notifications to users. + type: string + lambdaVersion: + description: The Lambda version represents the signature + of the "request" attribute in the "event" information + Amazon Cognito passes to your custom email Lambda function. + The only supported value is V1_0. + type: string + type: object + customMessage: + description: Custom Message AWS Lambda trigger. + type: string + customSmsSender: + description: A custom SMS sender AWS Lambda trigger. See custom_sms_sender + Below. + properties: + lambdaArn: + description: The Lambda Amazon Resource Name of the Lambda + function that Amazon Cognito triggers to send SMS notifications + to users. + type: string + lambdaVersion: + description: The Lambda version represents the signature + of the "request" attribute in the "event" information + Amazon Cognito passes to your custom SMS Lambda function. + The only supported value is V1_0. + type: string + type: object + defineAuthChallenge: + description: Defines the authentication challenge. + type: string + kmsKeyId: + description: The Amazon Resource Name of Key Management Service + Customer master keys. Amazon Cognito uses the key to encrypt + codes and temporary passwords sent to CustomEmailSender + and CustomSMSSender. + type: string + postAuthentication: + description: Post-authentication AWS Lambda trigger. + type: string + postConfirmation: + description: Post-confirmation AWS Lambda trigger. + type: string + preAuthentication: + description: Pre-authentication AWS Lambda trigger. + type: string + preSignUp: + description: Pre-registration AWS Lambda trigger. + type: string + preTokenGeneration: + description: Allow to customize identity token claims before + token generation. Set this parameter for legacy purposes; + for new instances of pre token generation triggers, set + the lambda_arn of pre_token_generation_config. + type: string + preTokenGenerationConfig: + description: Allow to customize access tokens. See pre_token_configuration_type + properties: + lambdaArn: + description: The Lambda Amazon Resource Name of the Lambda + function that Amazon Cognito triggers to send SMS notifications + to users. + type: string + lambdaVersion: + description: The Lambda version represents the signature + of the "version" attribute in the "event" information + Amazon Cognito passes to your pre Token Generation Lambda + function. The supported values are V1_0, V2_0. + type: string + type: object + userMigration: + description: User migration Lambda config type. + type: string + verifyAuthChallengeResponse: + description: Verifies the authentication challenge response. + type: string + type: object + lastModifiedDate: + description: Date the user pool was last modified. + type: string + mfaConfiguration: + description: Multi-Factor Authentication (MFA) configuration for + the User Pool. Defaults of OFF. Valid values are OFF (MFA Tokens + are not required), ON (MFA is required for all users to sign + in; requires at least one of sms_configuration or software_token_mfa_configuration + to be configured), or OPTIONAL (MFA Will be required only for + individual users who have MFA Enabled; requires at least one + of sms_configuration or software_token_mfa_configuration to + be configured). + type: string + name: + description: Name of the user pool. + type: string + passwordPolicy: + description: Configuration block for information about the user + pool password policy. Detailed below. + properties: + minimumLength: + description: Minimum length of the password policy that you + have set. + type: number + requireLowercase: + description: Whether you have required users to use at least + one lowercase letter in their password. + type: boolean + requireNumbers: + description: Whether you have required users to use at least + one number in their password. + type: boolean + requireSymbols: + description: Whether you have required users to use at least + one symbol in their password. + type: boolean + requireUppercase: + description: Whether you have required users to use at least + one uppercase letter in their password. + type: boolean + temporaryPasswordValidityDays: + description: In the password policy you have set, refers to + the number of days a temporary password is valid. If the + user does not sign-in during this time, their password will + need to be reset by an administrator. + type: number + type: object + schema: + description: Configuration block for the schema attributes of + a user pool. Detailed below. Schema attributes from the standard + attribute set only need to be specified if they are different + from the default configuration. Attributes can be added, but + not modified or removed. Maximum of 50 attributes. + items: + properties: + attributeDataType: + description: Attribute data type. Must be one of Boolean, + Number, String, DateTime. + type: string + developerOnlyAttribute: + description: Whether the attribute type is developer only. + type: boolean + mutable: + description: Whether the attribute can be changed once it + has been created. + type: boolean + name: + description: Name of the user pool. + type: string + numberAttributeConstraints: + description: Configuration block for the constraints for + an attribute of the number type. Detailed below. + properties: + maxValue: + description: Maximum value of an attribute that is of + the number data type. + type: string + minValue: + description: Minimum value of an attribute that is of + the number data type. + type: string + type: object + required: + description: Whether a user pool attribute is required. + If the attribute is required and the user does not provide + a value, registration or sign-in will fail. + type: boolean + stringAttributeConstraints: + description: Constraints for an attribute of the string + type. Detailed below. + properties: + maxLength: + description: Maximum length of an attribute value of + the string type. + type: string + minLength: + description: Minimum length of an attribute value of + the string type. + type: string + type: object + type: object + type: array + smsAuthenticationMessage: + description: String representing the SMS authentication message. + The Message must contain the {####} placeholder, which will + be replaced with the code. + type: string + smsConfiguration: + description: Configuration block for Short Message Service (SMS) + settings. Detailed below. These settings apply to SMS user verification + and SMS Multi-Factor Authentication (MFA). Due to Cognito API + restrictions, the SMS configuration cannot be removed without + recreating the Cognito User Pool. For user data safety, this + resource will ignore the removal of this configuration by disabling + drift detection. To force resource recreation after this configuration + has been applied, see the taint command. + properties: + externalId: + description: External ID used in IAM role trust relationships. + For more information about using external IDs, see How to + Use an External ID When Granting Access to Your AWS Resources + to a Third Party. + type: string + snsCallerArn: + description: ARN of the Amazon SNS caller. This is usually + the IAM role that you've given Cognito permission to assume. + type: string + snsRegion: + description: The AWS Region to use with Amazon SNS integration. + You can choose the same Region as your user pool, or a supported + Legacy Amazon SNS alternate Region. Amazon Cognito resources + in the Asia Pacific (Seoul) AWS Region must use your Amazon + SNS configuration in the Asia Pacific (Tokyo) Region. For + more information, see SMS message settings for Amazon Cognito + user pools. + type: string + type: object + smsVerificationMessage: + description: String representing the SMS verification message. + Conflicts with verification_message_template configuration block + sms_message argument. + type: string + softwareTokenMfaConfiguration: + description: Configuration block for software token Mult-Factor + Authentication (MFA) settings. Detailed below. + properties: + enabled: + description: Boolean whether to enable software token Multi-Factor + (MFA) tokens, such as Time-based One-Time Password (TOTP). + To disable software token MFA When sms_configuration is + not present, the mfa_configuration argument must be set + to OFF and the software_token_mfa_configuration configuration + block must be fully removed. + type: boolean + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + userAttributeUpdateSettings: + description: Configuration block for user attribute update settings. + Detailed below. + properties: + attributesRequireVerificationBeforeUpdate: + description: 'A list of attributes requiring verification + before update. If set, the provided value(s) must also be + set in auto_verified_attributes. Valid values: email, phone_number.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + userPoolAddOns: + description: Configuration block for user pool add-ons to enable + user pool advanced security mode features. Detailed below. + properties: + advancedSecurityMode: + description: Mode for advanced security, must be one of OFF, + AUDIT or ENFORCED. + type: string + type: object + usernameAttributes: + description: Whether email addresses or phone numbers can be specified + as usernames when a user signs up. Conflicts with alias_attributes. + items: + type: string + type: array + x-kubernetes-list-type: set + usernameConfiguration: + description: Configuration block for username configuration. Detailed + below. + properties: + caseSensitive: + description: Whether username case sensitivity will be applied + for all users in the user pool through Cognito APIs. + type: boolean + type: object + verificationMessageTemplate: + description: Configuration block for verification message templates. + Detailed below. + properties: + defaultEmailOption: + description: Default email option. Must be either CONFIRM_WITH_CODE + or CONFIRM_WITH_LINK. Defaults to CONFIRM_WITH_CODE. + type: string + emailMessage: + description: Email message template. Must contain the {####} + placeholder. Conflicts with email_verification_message argument. + type: string + emailMessageByLink: + description: Email message template for sending a confirmation + link to the user, it must contain the {##Click Here##} placeholder. + type: string + emailSubject: + description: Subject line for the email message template. + Conflicts with email_verification_subject argument. + type: string + emailSubjectByLink: + description: Subject line for the email message template for + sending a confirmation link to the user. + type: string + smsMessage: + description: SMS message template. Must contain the {####} + placeholder. Conflicts with sms_verification_message argument. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/configservice.aws.upbound.io_configrules.yaml b/package/crds/configservice.aws.upbound.io_configrules.yaml index 52f9ea9879..f66ddc9c78 100644 --- a/package/crds/configservice.aws.upbound.io_configrules.yaml +++ b/package/crds/configservice.aws.upbound.io_configrules.yaml @@ -897,3 +897,858 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ConfigRule is the Schema for the ConfigRules API. Provides an + AWS Config Rule. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ConfigRuleSpec defines the desired state of ConfigRule + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the rule + type: string + evaluationMode: + description: The modes the Config rule can be evaluated in. See + Evaluation Mode for more details. + items: + properties: + mode: + description: The mode of an evaluation. + type: string + type: object + type: array + inputParameters: + description: A string in JSON format that is passed to the AWS + Config rule Lambda function. + type: string + maximumExecutionFrequency: + description: The maximum frequency with which AWS Config runs + evaluations for a rule. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + scope: + description: Scope defines which resources can trigger an evaluation + for the rule. See Scope Below. + properties: + complianceResourceId: + description: The IDs of the only AWS resource that you want + to trigger an evaluation for the rule. If you specify a + resource ID, you must specify one resource type for compliance_resource_types. + type: string + complianceResourceTypes: + description: A list of resource types of only those AWS resources + that you want to trigger an evaluation for the ruleE.g., + AWS::EC2::Instance. You can only specify one type if you + also specify a resource ID for compliance_resource_id. See + relevant part of AWS Docs for available types. + items: + type: string + type: array + x-kubernetes-list-type: set + tagKey: + description: The tag key that is applied to only those AWS + resources that you want you want to trigger an evaluation + for the rule. + type: string + tagValue: + description: The tag value applied to only those AWS resources + that you want to trigger an evaluation for the rule. + type: string + type: object + source: + description: Source specifies the rule owner, the rule identifier, + and the notifications that cause the function to evaluate your + AWS resources. See Source Below. + properties: + customPolicyDetails: + description: Provides the runtime system, policy definition, + and whether debug logging is enabled. Required when owner + is set to CUSTOM_POLICY. See Custom Policy Details Below. + properties: + enableDebugLogDelivery: + description: The boolean expression for enabling debug + logging for your Config Custom Policy rule. The default + value is false. + type: boolean + policyRuntime: + description: The runtime system for your Config Custom + Policy rule. Guard is a policy-as-code language that + allows you to write policies that are enforced by Config + Custom Policy rules. For more information about Guard, + see the Guard GitHub Repository. + type: string + policyText: + description: The policy definition containing the logic + for your Config Custom Policy rule. + type: string + type: object + owner: + description: Indicates whether AWS or the customer owns and + manages the AWS Config rule. Valid values are AWS, CUSTOM_LAMBDA + or CUSTOM_POLICY. For more information about managed rules, + see the AWS Config Managed Rules documentation. For more + information about custom rules, see the AWS Config Custom + Rules documentation. Custom Lambda Functions require permissions + to allow the AWS Config service to invoke them, e.g., via + the aws_lambda_permission resource. + type: string + sourceDetail: + description: Provides the source and type of the event that + causes AWS Config to evaluate your AWS resources. Only valid + if owner is CUSTOM_LAMBDA or CUSTOM_POLICY. See Source Detail + Below. + items: + properties: + eventSource: + description: The source of the event, such as an AWS + service, that triggers AWS Config to evaluate your + AWSresources. This defaults to aws.config and is the + only valid value. + type: string + maximumExecutionFrequency: + description: The maximum frequency with which AWS Config + runs evaluations for a rule. + type: string + messageType: + description: 'The type of notification that triggers + AWS Config to run an evaluation for a rule. You canspecify + the following notification types:' + type: string + type: object + type: array + sourceIdentifier: + description: For AWS Config managed rules, a predefined identifier, + e.g IAM_PASSWORD_POLICY. For custom Lambda rules, the identifier + is the ARN of the Lambda Function, such as arn:aws:lambda:us-east-1:123456789012:function:custom_rule_name + or the arn attribute of the aws_lambda_function resource. + type: string + sourceIdentifierRef: + description: Reference to a Function in lambda to populate + sourceIdentifier. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceIdentifierSelector: + description: Selector for a Function in lambda to populate + sourceIdentifier. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the rule + type: string + evaluationMode: + description: The modes the Config rule can be evaluated in. See + Evaluation Mode for more details. + items: + properties: + mode: + description: The mode of an evaluation. + type: string + type: object + type: array + inputParameters: + description: A string in JSON format that is passed to the AWS + Config rule Lambda function. + type: string + maximumExecutionFrequency: + description: The maximum frequency with which AWS Config runs + evaluations for a rule. + type: string + scope: + description: Scope defines which resources can trigger an evaluation + for the rule. See Scope Below. + properties: + complianceResourceId: + description: The IDs of the only AWS resource that you want + to trigger an evaluation for the rule. If you specify a + resource ID, you must specify one resource type for compliance_resource_types. + type: string + complianceResourceTypes: + description: A list of resource types of only those AWS resources + that you want to trigger an evaluation for the ruleE.g., + AWS::EC2::Instance. You can only specify one type if you + also specify a resource ID for compliance_resource_id. See + relevant part of AWS Docs for available types. + items: + type: string + type: array + x-kubernetes-list-type: set + tagKey: + description: The tag key that is applied to only those AWS + resources that you want you want to trigger an evaluation + for the rule. + type: string + tagValue: + description: The tag value applied to only those AWS resources + that you want to trigger an evaluation for the rule. + type: string + type: object + source: + description: Source specifies the rule owner, the rule identifier, + and the notifications that cause the function to evaluate your + AWS resources. See Source Below. + properties: + customPolicyDetails: + description: Provides the runtime system, policy definition, + and whether debug logging is enabled. Required when owner + is set to CUSTOM_POLICY. See Custom Policy Details Below. + properties: + enableDebugLogDelivery: + description: The boolean expression for enabling debug + logging for your Config Custom Policy rule. The default + value is false. + type: boolean + policyRuntime: + description: The runtime system for your Config Custom + Policy rule. Guard is a policy-as-code language that + allows you to write policies that are enforced by Config + Custom Policy rules. For more information about Guard, + see the Guard GitHub Repository. + type: string + policyText: + description: The policy definition containing the logic + for your Config Custom Policy rule. + type: string + type: object + owner: + description: Indicates whether AWS or the customer owns and + manages the AWS Config rule. Valid values are AWS, CUSTOM_LAMBDA + or CUSTOM_POLICY. For more information about managed rules, + see the AWS Config Managed Rules documentation. For more + information about custom rules, see the AWS Config Custom + Rules documentation. Custom Lambda Functions require permissions + to allow the AWS Config service to invoke them, e.g., via + the aws_lambda_permission resource. + type: string + sourceDetail: + description: Provides the source and type of the event that + causes AWS Config to evaluate your AWS resources. Only valid + if owner is CUSTOM_LAMBDA or CUSTOM_POLICY. See Source Detail + Below. + items: + properties: + eventSource: + description: The source of the event, such as an AWS + service, that triggers AWS Config to evaluate your + AWSresources. This defaults to aws.config and is the + only valid value. + type: string + maximumExecutionFrequency: + description: The maximum frequency with which AWS Config + runs evaluations for a rule. + type: string + messageType: + description: 'The type of notification that triggers + AWS Config to run an evaluation for a rule. You canspecify + the following notification types:' + type: string + type: object + type: array + sourceIdentifier: + description: For AWS Config managed rules, a predefined identifier, + e.g IAM_PASSWORD_POLICY. For custom Lambda rules, the identifier + is the ARN of the Lambda Function, such as arn:aws:lambda:us-east-1:123456789012:function:custom_rule_name + or the arn attribute of the aws_lambda_function resource. + type: string + sourceIdentifierRef: + description: Reference to a Function in lambda to populate + sourceIdentifier. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceIdentifierSelector: + description: Selector for a Function in lambda to populate + sourceIdentifier. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.source is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.source) + || (has(self.initProvider) && has(self.initProvider.source))' + status: + description: ConfigRuleStatus defines the observed state of ConfigRule. + properties: + atProvider: + properties: + arn: + description: The ARN of the config rule + type: string + description: + description: Description of the rule + type: string + evaluationMode: + description: The modes the Config rule can be evaluated in. See + Evaluation Mode for more details. + items: + properties: + mode: + description: The mode of an evaluation. + type: string + type: object + type: array + id: + type: string + inputParameters: + description: A string in JSON format that is passed to the AWS + Config rule Lambda function. + type: string + maximumExecutionFrequency: + description: The maximum frequency with which AWS Config runs + evaluations for a rule. + type: string + ruleId: + description: The ID of the config rule + type: string + scope: + description: Scope defines which resources can trigger an evaluation + for the rule. See Scope Below. + properties: + complianceResourceId: + description: The IDs of the only AWS resource that you want + to trigger an evaluation for the rule. If you specify a + resource ID, you must specify one resource type for compliance_resource_types. + type: string + complianceResourceTypes: + description: A list of resource types of only those AWS resources + that you want to trigger an evaluation for the ruleE.g., + AWS::EC2::Instance. You can only specify one type if you + also specify a resource ID for compliance_resource_id. See + relevant part of AWS Docs for available types. + items: + type: string + type: array + x-kubernetes-list-type: set + tagKey: + description: The tag key that is applied to only those AWS + resources that you want you want to trigger an evaluation + for the rule. + type: string + tagValue: + description: The tag value applied to only those AWS resources + that you want to trigger an evaluation for the rule. + type: string + type: object + source: + description: Source specifies the rule owner, the rule identifier, + and the notifications that cause the function to evaluate your + AWS resources. See Source Below. + properties: + customPolicyDetails: + description: Provides the runtime system, policy definition, + and whether debug logging is enabled. Required when owner + is set to CUSTOM_POLICY. See Custom Policy Details Below. + properties: + enableDebugLogDelivery: + description: The boolean expression for enabling debug + logging for your Config Custom Policy rule. The default + value is false. + type: boolean + policyRuntime: + description: The runtime system for your Config Custom + Policy rule. Guard is a policy-as-code language that + allows you to write policies that are enforced by Config + Custom Policy rules. For more information about Guard, + see the Guard GitHub Repository. + type: string + policyText: + description: The policy definition containing the logic + for your Config Custom Policy rule. + type: string + type: object + owner: + description: Indicates whether AWS or the customer owns and + manages the AWS Config rule. Valid values are AWS, CUSTOM_LAMBDA + or CUSTOM_POLICY. For more information about managed rules, + see the AWS Config Managed Rules documentation. For more + information about custom rules, see the AWS Config Custom + Rules documentation. Custom Lambda Functions require permissions + to allow the AWS Config service to invoke them, e.g., via + the aws_lambda_permission resource. + type: string + sourceDetail: + description: Provides the source and type of the event that + causes AWS Config to evaluate your AWS resources. Only valid + if owner is CUSTOM_LAMBDA or CUSTOM_POLICY. See Source Detail + Below. + items: + properties: + eventSource: + description: The source of the event, such as an AWS + service, that triggers AWS Config to evaluate your + AWSresources. This defaults to aws.config and is the + only valid value. + type: string + maximumExecutionFrequency: + description: The maximum frequency with which AWS Config + runs evaluations for a rule. + type: string + messageType: + description: 'The type of notification that triggers + AWS Config to run an evaluation for a rule. You canspecify + the following notification types:' + type: string + type: object + type: array + sourceIdentifier: + description: For AWS Config managed rules, a predefined identifier, + e.g IAM_PASSWORD_POLICY. For custom Lambda rules, the identifier + is the ARN of the Lambda Function, such as arn:aws:lambda:us-east-1:123456789012:function:custom_rule_name + or the arn attribute of the aws_lambda_function resource. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/configservice.aws.upbound.io_configurationaggregators.yaml b/package/crds/configservice.aws.upbound.io_configurationaggregators.yaml index eb6ce850f6..ba7085ee6f 100644 --- a/package/crds/configservice.aws.upbound.io_configurationaggregators.yaml +++ b/package/crds/configservice.aws.upbound.io_configurationaggregators.yaml @@ -635,3 +635,608 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ConfigurationAggregator is the Schema for the ConfigurationAggregators + API. Manages an AWS Config Configuration Aggregator. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ConfigurationAggregatorSpec defines the desired state of + ConfigurationAggregator + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountAggregationSource: + description: The account(s) to aggregate config data from as documented + below. + properties: + accountIds: + description: List of 12-digit account IDs of the account(s) + being aggregated. + items: + type: string + type: array + allRegions: + description: If true, aggregate existing AWS Config regions + and future regions. + type: boolean + regions: + description: List of source regions being aggregated. + items: + type: string + type: array + type: object + organizationAggregationSource: + description: The organization to aggregate config data from as + documented below. + properties: + allRegions: + description: If true, aggregate existing AWS Config regions + and future regions. + type: boolean + regions: + description: List of source regions being aggregated. + items: + type: string + type: array + roleArn: + description: ARN of the IAM role used to retrieve AWS Organization + details associated with the aggregator account. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accountAggregationSource: + description: The account(s) to aggregate config data from as documented + below. + properties: + accountIds: + description: List of 12-digit account IDs of the account(s) + being aggregated. + items: + type: string + type: array + allRegions: + description: If true, aggregate existing AWS Config regions + and future regions. + type: boolean + regions: + description: List of source regions being aggregated. + items: + type: string + type: array + type: object + organizationAggregationSource: + description: The organization to aggregate config data from as + documented below. + properties: + allRegions: + description: If true, aggregate existing AWS Config regions + and future regions. + type: boolean + regions: + description: List of source regions being aggregated. + items: + type: string + type: array + roleArn: + description: ARN of the IAM role used to retrieve AWS Organization + details associated with the aggregator account. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ConfigurationAggregatorStatus defines the observed state + of ConfigurationAggregator. + properties: + atProvider: + properties: + accountAggregationSource: + description: The account(s) to aggregate config data from as documented + below. + properties: + accountIds: + description: List of 12-digit account IDs of the account(s) + being aggregated. + items: + type: string + type: array + allRegions: + description: If true, aggregate existing AWS Config regions + and future regions. + type: boolean + regions: + description: List of source regions being aggregated. + items: + type: string + type: array + type: object + arn: + description: The ARN of the aggregator + type: string + id: + type: string + organizationAggregationSource: + description: The organization to aggregate config data from as + documented below. + properties: + allRegions: + description: If true, aggregate existing AWS Config regions + and future regions. + type: boolean + regions: + description: List of source regions being aggregated. + items: + type: string + type: array + roleArn: + description: ARN of the IAM role used to retrieve AWS Organization + details associated with the aggregator account. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/configservice.aws.upbound.io_configurationrecorders.yaml b/package/crds/configservice.aws.upbound.io_configurationrecorders.yaml index f912858913..4a0a5a39cc 100644 --- a/package/crds/configservice.aws.upbound.io_configurationrecorders.yaml +++ b/package/crds/configservice.aws.upbound.io_configurationrecorders.yaml @@ -762,3 +762,720 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ConfigurationRecorder is the Schema for the ConfigurationRecorders + API. Provides an AWS Config Configuration Recorder. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ConfigurationRecorderSpec defines the desired state of ConfigurationRecorder + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + recordingGroup: + description: Recording group - see below. + properties: + allSupported: + description: Specifies whether AWS Config records configuration + changes for every supported type of regional resource (which + includes any new type that will become supported in the + future). Conflicts with resource_types. Defaults to true. + type: boolean + exclusionByResourceTypes: + description: An object that specifies how AWS Config excludes + resource types from being recorded by the configuration + recorder.To use this option, you must set the useOnly field + of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES Requires + all_supported = false. Conflicts with resource_types. + items: + properties: + resourceTypes: + description: A list that specifies the types of AWS + resources for which AWS Config excludes records configuration + changes. See relevant part of AWS Docs for available + types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + includeGlobalResourceTypes: + description: Specifies whether AWS Config includes all supported + types of global resources with the resources that it records. + Requires all_supported = true. Conflicts with resource_types. + type: boolean + recordingStrategy: + description: Recording Strategy. Detailed below. + items: + properties: + useOnly: + type: string + type: object + type: array + resourceTypes: + description: A list that specifies the types of AWS resources + for which AWS Config records configuration changes (for + example, AWS::EC2::Instance or AWS::CloudTrail::Trail). + See relevant part of AWS Docs for available types. In order + to use this attribute, all_supported must be set to false. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + recordingMode: + description: Recording mode - see below. + properties: + recordingFrequency: + description: Default reecording frequency. CONTINUOUS or DAILY. + type: string + recordingModeOverride: + description: Recording mode overrides. Detailed below. + properties: + description: + description: A description you provide of the override. + type: string + recordingFrequency: + description: Default reecording frequency. CONTINUOUS + or DAILY. + type: string + resourceTypes: + description: A list that specifies the types of AWS resources + for which AWS Config excludes records configuration + changes. See relevant part of AWS Docs for available + types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleArn: + description: Amazon Resource Name (ARN) of the IAM role. Used + to make read or write requests to the delivery channel and to + describe the AWS resources associated with the account. See + AWS Docs for more details. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + recordingGroup: + description: Recording group - see below. + properties: + allSupported: + description: Specifies whether AWS Config records configuration + changes for every supported type of regional resource (which + includes any new type that will become supported in the + future). Conflicts with resource_types. Defaults to true. + type: boolean + exclusionByResourceTypes: + description: An object that specifies how AWS Config excludes + resource types from being recorded by the configuration + recorder.To use this option, you must set the useOnly field + of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES Requires + all_supported = false. Conflicts with resource_types. + items: + properties: + resourceTypes: + description: A list that specifies the types of AWS + resources for which AWS Config excludes records configuration + changes. See relevant part of AWS Docs for available + types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + includeGlobalResourceTypes: + description: Specifies whether AWS Config includes all supported + types of global resources with the resources that it records. + Requires all_supported = true. Conflicts with resource_types. + type: boolean + recordingStrategy: + description: Recording Strategy. Detailed below. + items: + properties: + useOnly: + type: string + type: object + type: array + resourceTypes: + description: A list that specifies the types of AWS resources + for which AWS Config records configuration changes (for + example, AWS::EC2::Instance or AWS::CloudTrail::Trail). + See relevant part of AWS Docs for available types. In order + to use this attribute, all_supported must be set to false. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + recordingMode: + description: Recording mode - see below. + properties: + recordingFrequency: + description: Default reecording frequency. CONTINUOUS or DAILY. + type: string + recordingModeOverride: + description: Recording mode overrides. Detailed below. + properties: + description: + description: A description you provide of the override. + type: string + recordingFrequency: + description: Default reecording frequency. CONTINUOUS + or DAILY. + type: string + resourceTypes: + description: A list that specifies the types of AWS resources + for which AWS Config excludes records configuration + changes. See relevant part of AWS Docs for available + types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + roleArn: + description: Amazon Resource Name (ARN) of the IAM role. Used + to make read or write requests to the delivery channel and to + describe the AWS resources associated with the account. See + AWS Docs for more details. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ConfigurationRecorderStatus defines the observed state of + ConfigurationRecorder. + properties: + atProvider: + properties: + id: + description: Name of the recorder + type: string + recordingGroup: + description: Recording group - see below. + properties: + allSupported: + description: Specifies whether AWS Config records configuration + changes for every supported type of regional resource (which + includes any new type that will become supported in the + future). Conflicts with resource_types. Defaults to true. + type: boolean + exclusionByResourceTypes: + description: An object that specifies how AWS Config excludes + resource types from being recorded by the configuration + recorder.To use this option, you must set the useOnly field + of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES Requires + all_supported = false. Conflicts with resource_types. + items: + properties: + resourceTypes: + description: A list that specifies the types of AWS + resources for which AWS Config excludes records configuration + changes. See relevant part of AWS Docs for available + types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + includeGlobalResourceTypes: + description: Specifies whether AWS Config includes all supported + types of global resources with the resources that it records. + Requires all_supported = true. Conflicts with resource_types. + type: boolean + recordingStrategy: + description: Recording Strategy. Detailed below. + items: + properties: + useOnly: + type: string + type: object + type: array + resourceTypes: + description: A list that specifies the types of AWS resources + for which AWS Config records configuration changes (for + example, AWS::EC2::Instance or AWS::CloudTrail::Trail). + See relevant part of AWS Docs for available types. In order + to use this attribute, all_supported must be set to false. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + recordingMode: + description: Recording mode - see below. + properties: + recordingFrequency: + description: Default reecording frequency. CONTINUOUS or DAILY. + type: string + recordingModeOverride: + description: Recording mode overrides. Detailed below. + properties: + description: + description: A description you provide of the override. + type: string + recordingFrequency: + description: Default reecording frequency. CONTINUOUS + or DAILY. + type: string + resourceTypes: + description: A list that specifies the types of AWS resources + for which AWS Config excludes records configuration + changes. See relevant part of AWS Docs for available + types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + roleArn: + description: Amazon Resource Name (ARN) of the IAM role. Used + to make read or write requests to the delivery channel and to + describe the AWS resources associated with the account. See + AWS Docs for more details. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/configservice.aws.upbound.io_deliverychannels.yaml b/package/crds/configservice.aws.upbound.io_deliverychannels.yaml index 5b92934f53..54d970dce8 100644 --- a/package/crds/configservice.aws.upbound.io_deliverychannels.yaml +++ b/package/crds/configservice.aws.upbound.io_deliverychannels.yaml @@ -563,3 +563,542 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DeliveryChannel is the Schema for the DeliveryChannels API. Provides + an AWS Config Delivery Channel. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DeliveryChannelSpec defines the desired state of DeliveryChannel + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + region: + description: Region is the region you'd like your resource to + be created in. + type: string + s3BucketName: + description: The name of the S3 bucket used to store the configuration + history. + type: string + s3BucketNameRef: + description: Reference to a Bucket in s3 to populate s3BucketName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + s3BucketNameSelector: + description: Selector for a Bucket in s3 to populate s3BucketName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3KeyPrefix: + description: The prefix for the specified S3 bucket. + type: string + s3KmsKeyArn: + description: The ARN of the AWS KMS key used to encrypt objects + delivered by AWS Config. Must belong to the same Region as the + destination S3 bucket. + type: string + snapshotDeliveryProperties: + description: Options for how AWS Config delivers configuration + snapshots. See below + properties: + deliveryFrequency: + description: '- The frequency with which AWS Config recurringly + delivers configuration snapshotsE.g., One_Hour or Three_Hours. + Valid values are listed here.' + type: string + type: object + snsTopicArn: + description: The ARN of the SNS topic that AWS Config delivers + notifications to. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + s3BucketName: + description: The name of the S3 bucket used to store the configuration + history. + type: string + s3BucketNameRef: + description: Reference to a Bucket in s3 to populate s3BucketName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + s3BucketNameSelector: + description: Selector for a Bucket in s3 to populate s3BucketName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3KeyPrefix: + description: The prefix for the specified S3 bucket. + type: string + s3KmsKeyArn: + description: The ARN of the AWS KMS key used to encrypt objects + delivered by AWS Config. Must belong to the same Region as the + destination S3 bucket. + type: string + snapshotDeliveryProperties: + description: Options for how AWS Config delivers configuration + snapshots. See below + properties: + deliveryFrequency: + description: '- The frequency with which AWS Config recurringly + delivers configuration snapshotsE.g., One_Hour or Three_Hours. + Valid values are listed here.' + type: string + type: object + snsTopicArn: + description: The ARN of the SNS topic that AWS Config delivers + notifications to. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DeliveryChannelStatus defines the observed state of DeliveryChannel. + properties: + atProvider: + properties: + id: + description: The name of the delivery channel. + type: string + s3BucketName: + description: The name of the S3 bucket used to store the configuration + history. + type: string + s3KeyPrefix: + description: The prefix for the specified S3 bucket. + type: string + s3KmsKeyArn: + description: The ARN of the AWS KMS key used to encrypt objects + delivered by AWS Config. Must belong to the same Region as the + destination S3 bucket. + type: string + snapshotDeliveryProperties: + description: Options for how AWS Config delivers configuration + snapshots. See below + properties: + deliveryFrequency: + description: '- The frequency with which AWS Config recurringly + delivers configuration snapshotsE.g., One_Hour or Three_Hours. + Valid values are listed here.' + type: string + type: object + snsTopicArn: + description: The ARN of the SNS topic that AWS Config delivers + notifications to. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/configservice.aws.upbound.io_remediationconfigurations.yaml b/package/crds/configservice.aws.upbound.io_remediationconfigurations.yaml index 478e22b80c..9d6e068583 100644 --- a/package/crds/configservice.aws.upbound.io_remediationconfigurations.yaml +++ b/package/crds/configservice.aws.upbound.io_remediationconfigurations.yaml @@ -554,3 +554,521 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: RemediationConfiguration is the Schema for the RemediationConfigurations + API. Provides an AWS Config Remediation Configuration. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RemediationConfigurationSpec defines the desired state of + RemediationConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + automatic: + description: Remediation is triggered automatically if true. + type: boolean + executionControls: + description: Configuration block for execution controls. See below. + properties: + ssmControls: + description: Configuration block for SSM controls. See below. + properties: + concurrentExecutionRatePercentage: + description: Maximum percentage of remediation actions + allowed to run in parallel on the non-compliant resources + for that specific rule. The default value is 10%. + type: number + errorPercentage: + description: Percentage of errors that are allowed before + SSM stops running automations on non-compliant resources + for that specific rule. The default is 50%. + type: number + type: object + type: object + maximumAutomaticAttempts: + description: Maximum number of failed attempts for auto-remediation. + If you do not select a number, the default is 5. + type: number + parameter: + description: Can be specified multiple times for each parameter. + Each parameter block supports arguments below. + items: + properties: + name: + description: Name of the attribute. + type: string + resourceValue: + description: Value is dynamic and changes at run-time. + type: string + staticValue: + description: Value is static and does not change at run-time. + type: string + staticValues: + description: List of static values. + items: + type: string + type: array + type: object + type: array + region: + description: Region is the region you'd like your resource to + be created in. + type: string + resourceType: + description: Type of resource. + type: string + retryAttemptSeconds: + description: Maximum time in seconds that AWS Config runs auto-remediation. + If you do not select a number, the default is 60 seconds. + type: number + targetId: + description: Target ID is the name of the public document. + type: string + targetType: + description: Type of the target. Target executes remediation. + For example, SSM document. + type: string + targetVersion: + description: Version of the target. For example, version of the + SSM document + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + automatic: + description: Remediation is triggered automatically if true. + type: boolean + executionControls: + description: Configuration block for execution controls. See below. + properties: + ssmControls: + description: Configuration block for SSM controls. See below. + properties: + concurrentExecutionRatePercentage: + description: Maximum percentage of remediation actions + allowed to run in parallel on the non-compliant resources + for that specific rule. The default value is 10%. + type: number + errorPercentage: + description: Percentage of errors that are allowed before + SSM stops running automations on non-compliant resources + for that specific rule. The default is 50%. + type: number + type: object + type: object + maximumAutomaticAttempts: + description: Maximum number of failed attempts for auto-remediation. + If you do not select a number, the default is 5. + type: number + parameter: + description: Can be specified multiple times for each parameter. + Each parameter block supports arguments below. + items: + properties: + name: + description: Name of the attribute. + type: string + resourceValue: + description: Value is dynamic and changes at run-time. + type: string + staticValue: + description: Value is static and does not change at run-time. + type: string + staticValues: + description: List of static values. + items: + type: string + type: array + type: object + type: array + resourceType: + description: Type of resource. + type: string + retryAttemptSeconds: + description: Maximum time in seconds that AWS Config runs auto-remediation. + If you do not select a number, the default is 60 seconds. + type: number + targetId: + description: Target ID is the name of the public document. + type: string + targetType: + description: Type of the target. Target executes remediation. + For example, SSM document. + type: string + targetVersion: + description: Version of the target. For example, version of the + SSM document + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.targetId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.targetId) + || (has(self.initProvider) && has(self.initProvider.targetId))' + - message: spec.forProvider.targetType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.targetType) + || (has(self.initProvider) && has(self.initProvider.targetType))' + status: + description: RemediationConfigurationStatus defines the observed state + of RemediationConfiguration. + properties: + atProvider: + properties: + arn: + description: ARN of the Config Remediation Configuration. + type: string + automatic: + description: Remediation is triggered automatically if true. + type: boolean + executionControls: + description: Configuration block for execution controls. See below. + properties: + ssmControls: + description: Configuration block for SSM controls. See below. + properties: + concurrentExecutionRatePercentage: + description: Maximum percentage of remediation actions + allowed to run in parallel on the non-compliant resources + for that specific rule. The default value is 10%. + type: number + errorPercentage: + description: Percentage of errors that are allowed before + SSM stops running automations on non-compliant resources + for that specific rule. The default is 50%. + type: number + type: object + type: object + id: + type: string + maximumAutomaticAttempts: + description: Maximum number of failed attempts for auto-remediation. + If you do not select a number, the default is 5. + type: number + parameter: + description: Can be specified multiple times for each parameter. + Each parameter block supports arguments below. + items: + properties: + name: + description: Name of the attribute. + type: string + resourceValue: + description: Value is dynamic and changes at run-time. + type: string + staticValue: + description: Value is static and does not change at run-time. + type: string + staticValues: + description: List of static values. + items: + type: string + type: array + type: object + type: array + resourceType: + description: Type of resource. + type: string + retryAttemptSeconds: + description: Maximum time in seconds that AWS Config runs auto-remediation. + If you do not select a number, the default is 60 seconds. + type: number + targetId: + description: Target ID is the name of the public document. + type: string + targetType: + description: Type of the target. Target executes remediation. + For example, SSM document. + type: string + targetVersion: + description: Version of the target. For example, version of the + SSM document + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/connect.aws.upbound.io_botassociations.yaml b/package/crds/connect.aws.upbound.io_botassociations.yaml index 311bbb75f4..37adf184cd 100644 --- a/package/crds/connect.aws.upbound.io_botassociations.yaml +++ b/package/crds/connect.aws.upbound.io_botassociations.yaml @@ -613,3 +613,588 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BotAssociation is the Schema for the BotAssociations API. Associates + an Amazon Connect instance to an Amazon Lex (V1) bot + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BotAssociationSpec defines the desired state of BotAssociation + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + instanceId: + description: The identifier of the Amazon Connect instance. You + can find the instanceId in the ARN of the instance. + type: string + instanceIdRef: + description: Reference to a Instance in connect to populate instanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceIdSelector: + description: Selector for a Instance in connect to populate instanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + lexBot: + description: Configuration information of an Amazon Lex (V1) bot. + Detailed below. + properties: + lexRegion: + description: The Region that the Amazon Lex (V1) bot was created + in. Defaults to current region. + type: string + name: + description: The name of the Amazon Lex (V1) bot. + type: string + nameRef: + description: Reference to a Bot in lexmodels to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a Bot in lexmodels to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + lexBot: + description: Configuration information of an Amazon Lex (V1) bot. + Detailed below. + properties: + lexRegion: + description: The Region that the Amazon Lex (V1) bot was created + in. Defaults to current region. + type: string + name: + description: The name of the Amazon Lex (V1) bot. + type: string + nameRef: + description: Reference to a Bot in lexmodels to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a Bot in lexmodels to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.lexBot is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.lexBot) + || (has(self.initProvider) && has(self.initProvider.lexBot))' + status: + description: BotAssociationStatus defines the observed state of BotAssociation. + properties: + atProvider: + properties: + id: + description: The Amazon Connect instance ID, Lex (V1) bot name, + and Lex (V1) bot region separated by colons (:). + type: string + instanceId: + description: The identifier of the Amazon Connect instance. You + can find the instanceId in the ARN of the instance. + type: string + lexBot: + description: Configuration information of an Amazon Lex (V1) bot. + Detailed below. + properties: + lexRegion: + description: The Region that the Amazon Lex (V1) bot was created + in. Defaults to current region. + type: string + name: + description: The name of the Amazon Lex (V1) bot. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/connect.aws.upbound.io_hoursofoperations.yaml b/package/crds/connect.aws.upbound.io_hoursofoperations.yaml index 2622f35b84..69a6e9850b 100644 --- a/package/crds/connect.aws.upbound.io_hoursofoperations.yaml +++ b/package/crds/connect.aws.upbound.io_hoursofoperations.yaml @@ -1350,3 +1350,655 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta3 + schema: + openAPIV3Schema: + description: HoursOfOperation is the Schema for the HoursOfOperations API. + Provides details about a specific Amazon Connect Hours of Operation. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: HoursOfOperationSpec defines the desired state of HoursOfOperation + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + config: + description: 'One or more config blocks which define the configuration + information for the hours of operation: day, start time, and + end time . Config blocks are documented below.' + items: + properties: + day: + description: Specifies the day that the hours of operation + applies to. + type: string + endTime: + description: A end time block specifies the time that your + contact center closes. The end_time is documented below. + properties: + hours: + description: Specifies the hour of closing. + type: number + minutes: + description: Specifies the minute of closing. + type: number + type: object + startTime: + description: A start time block specifies the time that + your contact center opens. The start_time is documented + below. + properties: + hours: + description: Specifies the hour of opening. + type: number + minutes: + description: Specifies the minute of opening. + type: number + type: object + type: object + type: array + description: + description: Specifies the description of the Hours of Operation. + type: string + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + instanceIdRef: + description: Reference to a Instance in connect to populate instanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceIdSelector: + description: Selector for a Instance in connect to populate instanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Specifies the name of the Hours of Operation. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + timeZone: + description: Specifies the time zone of the Hours of Operation. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + config: + description: 'One or more config blocks which define the configuration + information for the hours of operation: day, start time, and + end time . Config blocks are documented below.' + items: + properties: + day: + description: Specifies the day that the hours of operation + applies to. + type: string + endTime: + description: A end time block specifies the time that your + contact center closes. The end_time is documented below. + properties: + hours: + description: Specifies the hour of closing. + type: number + minutes: + description: Specifies the minute of closing. + type: number + type: object + startTime: + description: A start time block specifies the time that + your contact center opens. The start_time is documented + below. + properties: + hours: + description: Specifies the hour of opening. + type: number + minutes: + description: Specifies the minute of opening. + type: number + type: object + type: object + type: array + description: + description: Specifies the description of the Hours of Operation. + type: string + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + instanceIdRef: + description: Reference to a Instance in connect to populate instanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceIdSelector: + description: Selector for a Instance in connect to populate instanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Specifies the name of the Hours of Operation. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + timeZone: + description: Specifies the time zone of the Hours of Operation. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.config is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.config) + || (has(self.initProvider) && has(self.initProvider.config))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.timeZone is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.timeZone) + || (has(self.initProvider) && has(self.initProvider.timeZone))' + status: + description: HoursOfOperationStatus defines the observed state of HoursOfOperation. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) of the Hours of Operation. + type: string + config: + description: 'One or more config blocks which define the configuration + information for the hours of operation: day, start time, and + end time . Config blocks are documented below.' + items: + properties: + day: + description: Specifies the day that the hours of operation + applies to. + type: string + endTime: + description: A end time block specifies the time that your + contact center closes. The end_time is documented below. + properties: + hours: + description: Specifies the hour of closing. + type: number + minutes: + description: Specifies the minute of closing. + type: number + type: object + startTime: + description: A start time block specifies the time that + your contact center opens. The start_time is documented + below. + properties: + hours: + description: Specifies the hour of opening. + type: number + minutes: + description: Specifies the minute of opening. + type: number + type: object + type: object + type: array + description: + description: Specifies the description of the Hours of Operation. + type: string + hoursOfOperationId: + description: The identifier for the hours of operation. + type: string + id: + description: The identifier of the hosting Amazon Connect Instance + and identifier of the Hours of Operation separated by a colon + (:). + type: string + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + name: + description: Specifies the name of the Hours of Operation. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + timeZone: + description: Specifies the time zone of the Hours of Operation. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/connect.aws.upbound.io_instancestorageconfigs.yaml b/package/crds/connect.aws.upbound.io_instancestorageconfigs.yaml index 0a9e0314be..3e46349a3a 100644 --- a/package/crds/connect.aws.upbound.io_instancestorageconfigs.yaml +++ b/package/crds/connect.aws.upbound.io_instancestorageconfigs.yaml @@ -1585,3 +1585,1518 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: InstanceStorageConfig is the Schema for the InstanceStorageConfigs + API. Provides details about a specific Amazon Connect Instance Storage Config. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: InstanceStorageConfigSpec defines the desired state of InstanceStorageConfig + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + instanceIdRef: + description: Reference to a Instance in connect to populate instanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceIdSelector: + description: Selector for a Instance in connect to populate instanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + resourceType: + description: 'A valid resource type. Valid Values: AGENT_EVENTS + | ATTACHMENTS | CALL_RECORDINGS | CHAT_TRANSCRIPTS | CONTACT_EVALUATIONS + | CONTACT_TRACE_RECORDS | MEDIA_STREAMS | REAL_TIME_CONTACT_ANALYSIS_SEGMENTS + | SCHEDULED_REPORTS | SCREEN_RECORDINGS.' + type: string + storageConfig: + description: Specifies the storage configuration options for the + Connect Instance. Documented below. + properties: + kinesisFirehoseConfig: + description: A block that specifies the configuration of the + Kinesis Firehose delivery stream. Documented below. + properties: + firehoseArn: + description: The Amazon Resource Name (ARN) of the delivery + stream. + type: string + firehoseArnRef: + description: Reference to a DeliveryStream in firehose + to populate firehoseArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + firehoseArnSelector: + description: Selector for a DeliveryStream in firehose + to populate firehoseArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + kinesisStreamConfig: + description: A block that specifies the configuration of the + Kinesis data stream. Documented below. + properties: + streamArn: + description: The Amazon Resource Name (ARN) of the data + stream. + type: string + streamArnRef: + description: Reference to a Stream in kinesis to populate + streamArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamArnSelector: + description: Selector for a Stream in kinesis to populate + streamArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + kinesisVideoStreamConfig: + description: A block that specifies the configuration of the + Kinesis video stream. Documented below. + properties: + encryptionConfig: + description: The encryption configuration. Documented + below. + properties: + encryptionType: + description: 'The type of encryption. Valid Values: + KMS.' + type: string + keyId: + description: The full ARN of the encryption key. Be + sure to provide the full ARN of the encryption key, + not just the ID. + type: string + keyIdRef: + description: Reference to a Key in kms to populate + keyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyIdSelector: + description: Selector for a Key in kms to populate + keyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + prefix: + description: The prefix of the video stream. Minimum length + of 1. Maximum length of 128. When read from the state, + the value returned is -connect--contact- + since the API appends additional details to the prefix. + type: string + retentionPeriodHours: + description: The number of hours data is retained in the + stream. Kinesis Video Streams retains the data in a + data store that is associated with the stream. Minimum + value of 0. Maximum value of 87600. A value of 0, indicates + that the stream does not persist data. + type: number + type: object + s3Config: + description: A block that specifies the configuration of S3 + Bucket. Documented below. + properties: + bucketName: + description: The S3 bucket name. + type: string + bucketNameRef: + description: Reference to a Bucket in s3 to populate bucketName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketNameSelector: + description: Selector for a Bucket in s3 to populate bucketName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bucketPrefix: + description: The S3 bucket prefix. + type: string + encryptionConfig: + description: The encryption configuration. Documented + below. + properties: + encryptionType: + description: 'The type of encryption. Valid Values: + KMS.' + type: string + keyId: + description: The full ARN of the encryption key. Be + sure to provide the full ARN of the encryption key, + not just the ID. + type: string + keyIdRef: + description: Reference to a Key in kms to populate + keyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyIdSelector: + description: Selector for a Key in kms to populate + keyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + storageType: + description: 'A valid storage type. Valid Values: S3 | KINESIS_VIDEO_STREAM + | KINESIS_STREAM | KINESIS_FIREHOSE.' + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + instanceIdRef: + description: Reference to a Instance in connect to populate instanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceIdSelector: + description: Selector for a Instance in connect to populate instanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceType: + description: 'A valid resource type. Valid Values: AGENT_EVENTS + | ATTACHMENTS | CALL_RECORDINGS | CHAT_TRANSCRIPTS | CONTACT_EVALUATIONS + | CONTACT_TRACE_RECORDS | MEDIA_STREAMS | REAL_TIME_CONTACT_ANALYSIS_SEGMENTS + | SCHEDULED_REPORTS | SCREEN_RECORDINGS.' + type: string + storageConfig: + description: Specifies the storage configuration options for the + Connect Instance. Documented below. + properties: + kinesisFirehoseConfig: + description: A block that specifies the configuration of the + Kinesis Firehose delivery stream. Documented below. + properties: + firehoseArn: + description: The Amazon Resource Name (ARN) of the delivery + stream. + type: string + firehoseArnRef: + description: Reference to a DeliveryStream in firehose + to populate firehoseArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + firehoseArnSelector: + description: Selector for a DeliveryStream in firehose + to populate firehoseArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + kinesisStreamConfig: + description: A block that specifies the configuration of the + Kinesis data stream. Documented below. + properties: + streamArn: + description: The Amazon Resource Name (ARN) of the data + stream. + type: string + streamArnRef: + description: Reference to a Stream in kinesis to populate + streamArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamArnSelector: + description: Selector for a Stream in kinesis to populate + streamArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + kinesisVideoStreamConfig: + description: A block that specifies the configuration of the + Kinesis video stream. Documented below. + properties: + encryptionConfig: + description: The encryption configuration. Documented + below. + properties: + encryptionType: + description: 'The type of encryption. Valid Values: + KMS.' + type: string + keyId: + description: The full ARN of the encryption key. Be + sure to provide the full ARN of the encryption key, + not just the ID. + type: string + keyIdRef: + description: Reference to a Key in kms to populate + keyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyIdSelector: + description: Selector for a Key in kms to populate + keyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + prefix: + description: The prefix of the video stream. Minimum length + of 1. Maximum length of 128. When read from the state, + the value returned is -connect--contact- + since the API appends additional details to the prefix. + type: string + retentionPeriodHours: + description: The number of hours data is retained in the + stream. Kinesis Video Streams retains the data in a + data store that is associated with the stream. Minimum + value of 0. Maximum value of 87600. A value of 0, indicates + that the stream does not persist data. + type: number + type: object + s3Config: + description: A block that specifies the configuration of S3 + Bucket. Documented below. + properties: + bucketName: + description: The S3 bucket name. + type: string + bucketNameRef: + description: Reference to a Bucket in s3 to populate bucketName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketNameSelector: + description: Selector for a Bucket in s3 to populate bucketName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bucketPrefix: + description: The S3 bucket prefix. + type: string + encryptionConfig: + description: The encryption configuration. Documented + below. + properties: + encryptionType: + description: 'The type of encryption. Valid Values: + KMS.' + type: string + keyId: + description: The full ARN of the encryption key. Be + sure to provide the full ARN of the encryption key, + not just the ID. + type: string + keyIdRef: + description: Reference to a Key in kms to populate + keyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyIdSelector: + description: Selector for a Key in kms to populate + keyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + storageType: + description: 'A valid storage type. Valid Values: S3 | KINESIS_VIDEO_STREAM + | KINESIS_STREAM | KINESIS_FIREHOSE.' + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.resourceType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.resourceType) + || (has(self.initProvider) && has(self.initProvider.resourceType))' + - message: spec.forProvider.storageConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageConfig) + || (has(self.initProvider) && has(self.initProvider.storageConfig))' + status: + description: InstanceStorageConfigStatus defines the observed state of + InstanceStorageConfig. + properties: + atProvider: + properties: + associationId: + description: The existing association identifier that uniquely + identifies the resource type and storage config for the given + instance ID. + type: string + id: + description: The identifier of the hosting Amazon Connect Instance, + association_id, and resource_type separated by a colon (:). + type: string + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + resourceType: + description: 'A valid resource type. Valid Values: AGENT_EVENTS + | ATTACHMENTS | CALL_RECORDINGS | CHAT_TRANSCRIPTS | CONTACT_EVALUATIONS + | CONTACT_TRACE_RECORDS | MEDIA_STREAMS | REAL_TIME_CONTACT_ANALYSIS_SEGMENTS + | SCHEDULED_REPORTS | SCREEN_RECORDINGS.' + type: string + storageConfig: + description: Specifies the storage configuration options for the + Connect Instance. Documented below. + properties: + kinesisFirehoseConfig: + description: A block that specifies the configuration of the + Kinesis Firehose delivery stream. Documented below. + properties: + firehoseArn: + description: The Amazon Resource Name (ARN) of the delivery + stream. + type: string + type: object + kinesisStreamConfig: + description: A block that specifies the configuration of the + Kinesis data stream. Documented below. + properties: + streamArn: + description: The Amazon Resource Name (ARN) of the data + stream. + type: string + type: object + kinesisVideoStreamConfig: + description: A block that specifies the configuration of the + Kinesis video stream. Documented below. + properties: + encryptionConfig: + description: The encryption configuration. Documented + below. + properties: + encryptionType: + description: 'The type of encryption. Valid Values: + KMS.' + type: string + keyId: + description: The full ARN of the encryption key. Be + sure to provide the full ARN of the encryption key, + not just the ID. + type: string + type: object + prefix: + description: The prefix of the video stream. Minimum length + of 1. Maximum length of 128. When read from the state, + the value returned is -connect--contact- + since the API appends additional details to the prefix. + type: string + retentionPeriodHours: + description: The number of hours data is retained in the + stream. Kinesis Video Streams retains the data in a + data store that is associated with the stream. Minimum + value of 0. Maximum value of 87600. A value of 0, indicates + that the stream does not persist data. + type: number + type: object + s3Config: + description: A block that specifies the configuration of S3 + Bucket. Documented below. + properties: + bucketName: + description: The S3 bucket name. + type: string + bucketPrefix: + description: The S3 bucket prefix. + type: string + encryptionConfig: + description: The encryption configuration. Documented + below. + properties: + encryptionType: + description: 'The type of encryption. Valid Values: + KMS.' + type: string + keyId: + description: The full ARN of the encryption key. Be + sure to provide the full ARN of the encryption key, + not just the ID. + type: string + type: object + type: object + storageType: + description: 'A valid storage type. Valid Values: S3 | KINESIS_VIDEO_STREAM + | KINESIS_STREAM | KINESIS_FIREHOSE.' + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/connect.aws.upbound.io_queues.yaml b/package/crds/connect.aws.upbound.io_queues.yaml index da25f9ba28..c82592b6f5 100644 --- a/package/crds/connect.aws.upbound.io_queues.yaml +++ b/package/crds/connect.aws.upbound.io_queues.yaml @@ -1607,3 +1607,789 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta3 + schema: + openAPIV3Schema: + description: Queue is the Schema for the Queues API. Provides details about + a specific Amazon Connect Queue + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: QueueSpec defines the desired state of Queue + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Specifies the description of the Queue. + type: string + hoursOfOperationId: + description: Specifies the identifier of the Hours of Operation. + type: string + hoursOfOperationIdRef: + description: Reference to a HoursOfOperation in connect to populate + hoursOfOperationId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + hoursOfOperationIdSelector: + description: Selector for a HoursOfOperation in connect to populate + hoursOfOperationId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + instanceIdRef: + description: Reference to a Instance in connect to populate instanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceIdSelector: + description: Selector for a Instance in connect to populate instanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + maxContacts: + description: Specifies the maximum number of contacts that can + be in the queue before it is considered full. Minimum value + of 0. + type: number + name: + description: Specifies the name of the Queue. + type: string + outboundCallerConfig: + description: A block that defines the outbound caller ID name, + number, and outbound whisper flow. The Outbound Caller Config + block is documented below. + properties: + outboundCallerIdName: + description: Specifies the caller ID name. + type: string + outboundCallerIdNumberId: + description: Specifies the caller ID number. + type: string + outboundFlowId: + description: Specifies outbound whisper flow to be used during + an outbound call. + type: string + type: object + quickConnectIds: + description: Specifies a list of quick connects ids that determine + the quick connects available to agents who are working the queue. + items: + type: string + type: array + x-kubernetes-list-type: set + region: + description: Region is the region you'd like your resource to + be created in. + type: string + status: + description: Specifies the description of the Queue. Valid values + are ENABLED, DISABLED. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Specifies the description of the Queue. + type: string + hoursOfOperationId: + description: Specifies the identifier of the Hours of Operation. + type: string + hoursOfOperationIdRef: + description: Reference to a HoursOfOperation in connect to populate + hoursOfOperationId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + hoursOfOperationIdSelector: + description: Selector for a HoursOfOperation in connect to populate + hoursOfOperationId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + instanceIdRef: + description: Reference to a Instance in connect to populate instanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceIdSelector: + description: Selector for a Instance in connect to populate instanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + maxContacts: + description: Specifies the maximum number of contacts that can + be in the queue before it is considered full. Minimum value + of 0. + type: number + name: + description: Specifies the name of the Queue. + type: string + outboundCallerConfig: + description: A block that defines the outbound caller ID name, + number, and outbound whisper flow. The Outbound Caller Config + block is documented below. + properties: + outboundCallerIdName: + description: Specifies the caller ID name. + type: string + outboundCallerIdNumberId: + description: Specifies the caller ID number. + type: string + outboundFlowId: + description: Specifies outbound whisper flow to be used during + an outbound call. + type: string + type: object + quickConnectIds: + description: Specifies a list of quick connects ids that determine + the quick connects available to agents who are working the queue. + items: + type: string + type: array + x-kubernetes-list-type: set + status: + description: Specifies the description of the Queue. Valid values + are ENABLED, DISABLED. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: QueueStatus defines the observed state of Queue. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) of the Queue. + type: string + description: + description: Specifies the description of the Queue. + type: string + hoursOfOperationId: + description: Specifies the identifier of the Hours of Operation. + type: string + id: + description: The identifier of the hosting Amazon Connect Instance + and identifier of the Queue separated by a colon (:). + type: string + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + maxContacts: + description: Specifies the maximum number of contacts that can + be in the queue before it is considered full. Minimum value + of 0. + type: number + name: + description: Specifies the name of the Queue. + type: string + outboundCallerConfig: + description: A block that defines the outbound caller ID name, + number, and outbound whisper flow. The Outbound Caller Config + block is documented below. + properties: + outboundCallerIdName: + description: Specifies the caller ID name. + type: string + outboundCallerIdNumberId: + description: Specifies the caller ID number. + type: string + outboundFlowId: + description: Specifies outbound whisper flow to be used during + an outbound call. + type: string + type: object + queueId: + description: The identifier for the Queue. + type: string + quickConnectIds: + description: Specifies a list of quick connects ids that determine + the quick connects available to agents who are working the queue. + items: + type: string + type: array + x-kubernetes-list-type: set + status: + description: Specifies the description of the Queue. Valid values + are ENABLED, DISABLED. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/connect.aws.upbound.io_quickconnects.yaml b/package/crds/connect.aws.upbound.io_quickconnects.yaml index c2ceb82b71..7121e8a234 100644 --- a/package/crds/connect.aws.upbound.io_quickconnects.yaml +++ b/package/crds/connect.aws.upbound.io_quickconnects.yaml @@ -716,3 +716,695 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: QuickConnect is the Schema for the QuickConnects API. Provides + details about a specific Amazon Quick Connect + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: QuickConnectSpec defines the desired state of QuickConnect + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Specifies the description of the Quick Connect. + type: string + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + instanceIdRef: + description: Reference to a Instance in connect to populate instanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceIdSelector: + description: Selector for a Instance in connect to populate instanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Specifies the name of the Quick Connect. + type: string + quickConnectConfig: + description: 'A block that defines the configuration information + for the Quick Connect: quick_connect_type and one of phone_config, + queue_config, user_config . The Quick Connect Config block is + documented below.' + properties: + phoneConfig: + description: Specifies the phone configuration of the Quick + Connect. This is required only if quick_connect_type is + PHONE_NUMBER. The phone_config block is documented below. + items: + properties: + phoneNumber: + description: Specifies the phone number in in E.164 + format. + type: string + type: object + type: array + queueConfig: + description: Specifies the queue configuration of the Quick + Connect. This is required only if quick_connect_type is + QUEUE. The queue_config block is documented below. + items: + properties: + contactFlowId: + description: Specifies the identifier of the contact + flow. + type: string + queueId: + description: Specifies the identifier for the queue. + type: string + type: object + type: array + quickConnectType: + description: Specifies the configuration type of the quick + connect. valid values are PHONE_NUMBER, QUEUE, USER. + type: string + userConfig: + description: Specifies the user configuration of the Quick + Connect. This is required only if quick_connect_type is + USER. The user_config block is documented below. + items: + properties: + contactFlowId: + description: Specifies the identifier of the contact + flow. + type: string + userId: + description: Specifies the identifier for the user. + type: string + type: object + type: array + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Specifies the description of the Quick Connect. + type: string + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + instanceIdRef: + description: Reference to a Instance in connect to populate instanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceIdSelector: + description: Selector for a Instance in connect to populate instanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Specifies the name of the Quick Connect. + type: string + quickConnectConfig: + description: 'A block that defines the configuration information + for the Quick Connect: quick_connect_type and one of phone_config, + queue_config, user_config . The Quick Connect Config block is + documented below.' + properties: + phoneConfig: + description: Specifies the phone configuration of the Quick + Connect. This is required only if quick_connect_type is + PHONE_NUMBER. The phone_config block is documented below. + items: + properties: + phoneNumber: + description: Specifies the phone number in in E.164 + format. + type: string + type: object + type: array + queueConfig: + description: Specifies the queue configuration of the Quick + Connect. This is required only if quick_connect_type is + QUEUE. The queue_config block is documented below. + items: + properties: + contactFlowId: + description: Specifies the identifier of the contact + flow. + type: string + queueId: + description: Specifies the identifier for the queue. + type: string + type: object + type: array + quickConnectType: + description: Specifies the configuration type of the quick + connect. valid values are PHONE_NUMBER, QUEUE, USER. + type: string + userConfig: + description: Specifies the user configuration of the Quick + Connect. This is required only if quick_connect_type is + USER. The user_config block is documented below. + items: + properties: + contactFlowId: + description: Specifies the identifier of the contact + flow. + type: string + userId: + description: Specifies the identifier for the user. + type: string + type: object + type: array + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.quickConnectConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.quickConnectConfig) + || (has(self.initProvider) && has(self.initProvider.quickConnectConfig))' + status: + description: QuickConnectStatus defines the observed state of QuickConnect. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) of the Quick Connect. + type: string + description: + description: Specifies the description of the Quick Connect. + type: string + id: + description: The identifier of the hosting Amazon Connect Instance + and identifier of the Quick Connect separated by a colon (:). + type: string + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + name: + description: Specifies the name of the Quick Connect. + type: string + quickConnectConfig: + description: 'A block that defines the configuration information + for the Quick Connect: quick_connect_type and one of phone_config, + queue_config, user_config . The Quick Connect Config block is + documented below.' + properties: + phoneConfig: + description: Specifies the phone configuration of the Quick + Connect. This is required only if quick_connect_type is + PHONE_NUMBER. The phone_config block is documented below. + items: + properties: + phoneNumber: + description: Specifies the phone number in in E.164 + format. + type: string + type: object + type: array + queueConfig: + description: Specifies the queue configuration of the Quick + Connect. This is required only if quick_connect_type is + QUEUE. The queue_config block is documented below. + items: + properties: + contactFlowId: + description: Specifies the identifier of the contact + flow. + type: string + queueId: + description: Specifies the identifier for the queue. + type: string + type: object + type: array + quickConnectType: + description: Specifies the configuration type of the quick + connect. valid values are PHONE_NUMBER, QUEUE, USER. + type: string + userConfig: + description: Specifies the user configuration of the Quick + Connect. This is required only if quick_connect_type is + USER. The user_config block is documented below. + items: + properties: + contactFlowId: + description: Specifies the identifier of the contact + flow. + type: string + userId: + description: Specifies the identifier for the user. + type: string + type: object + type: array + type: object + quickConnectId: + description: The identifier for the Quick Connect. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/connect.aws.upbound.io_userhierarchystructures.yaml b/package/crds/connect.aws.upbound.io_userhierarchystructures.yaml index bb57b9ee2a..eb82b5f55d 100644 --- a/package/crds/connect.aws.upbound.io_userhierarchystructures.yaml +++ b/package/crds/connect.aws.upbound.io_userhierarchystructures.yaml @@ -723,3 +723,672 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: UserHierarchyStructure is the Schema for the UserHierarchyStructures + API. Provides details about a specific Amazon Connect User Hierarchy Structure + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: UserHierarchyStructureSpec defines the desired state of UserHierarchyStructure + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + hierarchyStructure: + description: A block that defines the hierarchy structure's levels. + The hierarchy_structure block is documented below. + properties: + levelFive: + description: A block that defines the details of level five. + The level block is documented below. + properties: + name: + description: The name of the user hierarchy level. Must + not be more than 50 characters. + type: string + type: object + levelFour: + description: A block that defines the details of level four. + The level block is documented below. + properties: + name: + description: The name of the user hierarchy level. Must + not be more than 50 characters. + type: string + type: object + levelOne: + description: A block that defines the details of level one. + The level block is documented below. + properties: + name: + description: The name of the user hierarchy level. Must + not be more than 50 characters. + type: string + type: object + levelThree: + description: A block that defines the details of level three. + The level block is documented below. + properties: + name: + description: The name of the user hierarchy level. Must + not be more than 50 characters. + type: string + type: object + levelTwo: + description: A block that defines the details of level two. + The level block is documented below. + properties: + name: + description: The name of the user hierarchy level. Must + not be more than 50 characters. + type: string + type: object + type: object + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + instanceIdRef: + description: Reference to a Instance in connect to populate instanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceIdSelector: + description: Selector for a Instance in connect to populate instanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + hierarchyStructure: + description: A block that defines the hierarchy structure's levels. + The hierarchy_structure block is documented below. + properties: + levelFive: + description: A block that defines the details of level five. + The level block is documented below. + properties: + name: + description: The name of the user hierarchy level. Must + not be more than 50 characters. + type: string + type: object + levelFour: + description: A block that defines the details of level four. + The level block is documented below. + properties: + name: + description: The name of the user hierarchy level. Must + not be more than 50 characters. + type: string + type: object + levelOne: + description: A block that defines the details of level one. + The level block is documented below. + properties: + name: + description: The name of the user hierarchy level. Must + not be more than 50 characters. + type: string + type: object + levelThree: + description: A block that defines the details of level three. + The level block is documented below. + properties: + name: + description: The name of the user hierarchy level. Must + not be more than 50 characters. + type: string + type: object + levelTwo: + description: A block that defines the details of level two. + The level block is documented below. + properties: + name: + description: The name of the user hierarchy level. Must + not be more than 50 characters. + type: string + type: object + type: object + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + instanceIdRef: + description: Reference to a Instance in connect to populate instanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceIdSelector: + description: Selector for a Instance in connect to populate instanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.hierarchyStructure is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.hierarchyStructure) + || (has(self.initProvider) && has(self.initProvider.hierarchyStructure))' + status: + description: UserHierarchyStructureStatus defines the observed state of + UserHierarchyStructure. + properties: + atProvider: + properties: + hierarchyStructure: + description: A block that defines the hierarchy structure's levels. + The hierarchy_structure block is documented below. + properties: + levelFive: + description: A block that defines the details of level five. + The level block is documented below. + properties: + arn: + description: The Amazon Resource Name (ARN) of the hierarchy + level. + type: string + id: + description: The identifier of the hosting Amazon Connect + Instance. + type: string + name: + description: The name of the user hierarchy level. Must + not be more than 50 characters. + type: string + type: object + levelFour: + description: A block that defines the details of level four. + The level block is documented below. + properties: + arn: + description: The Amazon Resource Name (ARN) of the hierarchy + level. + type: string + id: + description: The identifier of the hosting Amazon Connect + Instance. + type: string + name: + description: The name of the user hierarchy level. Must + not be more than 50 characters. + type: string + type: object + levelOne: + description: A block that defines the details of level one. + The level block is documented below. + properties: + arn: + description: The Amazon Resource Name (ARN) of the hierarchy + level. + type: string + id: + description: The identifier of the hosting Amazon Connect + Instance. + type: string + name: + description: The name of the user hierarchy level. Must + not be more than 50 characters. + type: string + type: object + levelThree: + description: A block that defines the details of level three. + The level block is documented below. + properties: + arn: + description: The Amazon Resource Name (ARN) of the hierarchy + level. + type: string + id: + description: The identifier of the hosting Amazon Connect + Instance. + type: string + name: + description: The name of the user hierarchy level. Must + not be more than 50 characters. + type: string + type: object + levelTwo: + description: A block that defines the details of level two. + The level block is documented below. + properties: + arn: + description: The Amazon Resource Name (ARN) of the hierarchy + level. + type: string + id: + description: The identifier of the hosting Amazon Connect + Instance. + type: string + name: + description: The name of the user hierarchy level. Must + not be more than 50 characters. + type: string + type: object + type: object + id: + description: The identifier of the hosting Amazon Connect Instance. + type: string + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/connect.aws.upbound.io_users.yaml b/package/crds/connect.aws.upbound.io_users.yaml index 505a10bacf..d992c40b1e 100644 --- a/package/crds/connect.aws.upbound.io_users.yaml +++ b/package/crds/connect.aws.upbound.io_users.yaml @@ -995,3 +995,965 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: User is the Schema for the Users API. Provides details about + a specific Amazon Connect User + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: UserSpec defines the desired state of User + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + directoryUserId: + description: The identifier of the user account in the directory + used for identity management. If Amazon Connect cannot access + the directory, you can specify this identifier to authenticate + users. If you include the identifier, we assume that Amazon + Connect cannot access the directory. Otherwise, the identity + information is used to authenticate users from your directory. + This parameter is required if you are using an existing directory + for identity management in Amazon Connect when Amazon Connect + cannot access your directory to authenticate users. If you are + using SAML for identity management and include this parameter, + an error is returned. + type: string + hierarchyGroupId: + description: The identifier of the hierarchy group for the user. + type: string + identityInfo: + description: A block that contains information about the identity + of the user. Documented below. + properties: + email: + description: The email address. If you are using SAML for + identity management and include this parameter, an error + is returned. Note that updates to the email is supported. + From the UpdateUserIdentityInfo API documentation it is + strongly recommended to limit who has the ability to invoke + UpdateUserIdentityInfo. Someone with that ability can change + the login credentials of other users by changing their email + address. This poses a security risk to your organization. + They can change the email address of a user to the attacker's + email address, and then reset the password through email. + For more information, see Best Practices for Security Profiles + in the Amazon Connect Administrator Guide. + type: string + firstName: + description: The first name. This is required if you are using + Amazon Connect or SAML for identity management. Minimum + length of 1. Maximum length of 100. + type: string + lastName: + description: The last name. This is required if you are using + Amazon Connect or SAML for identity management. Minimum + length of 1. Maximum length of 100. + type: string + type: object + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + instanceIdRef: + description: Reference to a Instance in connect to populate instanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceIdSelector: + description: Selector for a Instance in connect to populate instanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The user name for the account. For instances not + using SAML for identity management, the user name can include + up to 20 characters. If you are using SAML for identity management, + the user name can include up to 64 characters from [a-zA-Z0-9_-.\@]+. + type: string + passwordSecretRef: + description: The password for the user account. A password is + required if you are using Amazon Connect for identity management. + Otherwise, it is an error to include a password. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + phoneConfig: + description: A block that contains information about the phone + settings for the user. Documented below. + properties: + afterContactWorkTimeLimit: + description: The After Call Work (ACW) timeout setting, in + seconds. Minimum value of 0. + type: number + autoAccept: + description: When Auto-Accept Call is enabled for an available + agent, the agent connects to contacts automatically. + type: boolean + deskPhoneNumber: + description: The phone number for the user's desk phone. Required + if phone_type is set as DESK_PHONE. + type: string + phoneType: + description: The phone type. Valid values are DESK_PHONE and + SOFT_PHONE. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + routingProfileId: + description: The identifier of the routing profile for the user. + type: string + routingProfileIdRef: + description: Reference to a RoutingProfile in connect to populate + routingProfileId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + routingProfileIdSelector: + description: Selector for a RoutingProfile in connect to populate + routingProfileId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityProfileIds: + description: A list of identifiers for the security profiles for + the user. Specify a minimum of 1 and maximum of 10 security + profile ids. For more information, see Best Practices for Security + Profiles in the Amazon Connect Administrator Guide. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + directoryUserId: + description: The identifier of the user account in the directory + used for identity management. If Amazon Connect cannot access + the directory, you can specify this identifier to authenticate + users. If you include the identifier, we assume that Amazon + Connect cannot access the directory. Otherwise, the identity + information is used to authenticate users from your directory. + This parameter is required if you are using an existing directory + for identity management in Amazon Connect when Amazon Connect + cannot access your directory to authenticate users. If you are + using SAML for identity management and include this parameter, + an error is returned. + type: string + hierarchyGroupId: + description: The identifier of the hierarchy group for the user. + type: string + identityInfo: + description: A block that contains information about the identity + of the user. Documented below. + properties: + email: + description: The email address. If you are using SAML for + identity management and include this parameter, an error + is returned. Note that updates to the email is supported. + From the UpdateUserIdentityInfo API documentation it is + strongly recommended to limit who has the ability to invoke + UpdateUserIdentityInfo. Someone with that ability can change + the login credentials of other users by changing their email + address. This poses a security risk to your organization. + They can change the email address of a user to the attacker's + email address, and then reset the password through email. + For more information, see Best Practices for Security Profiles + in the Amazon Connect Administrator Guide. + type: string + firstName: + description: The first name. This is required if you are using + Amazon Connect or SAML for identity management. Minimum + length of 1. Maximum length of 100. + type: string + lastName: + description: The last name. This is required if you are using + Amazon Connect or SAML for identity management. Minimum + length of 1. Maximum length of 100. + type: string + type: object + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + instanceIdRef: + description: Reference to a Instance in connect to populate instanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceIdSelector: + description: Selector for a Instance in connect to populate instanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The user name for the account. For instances not + using SAML for identity management, the user name can include + up to 20 characters. If you are using SAML for identity management, + the user name can include up to 64 characters from [a-zA-Z0-9_-.\@]+. + type: string + passwordSecretRef: + description: The password for the user account. A password is + required if you are using Amazon Connect for identity management. + Otherwise, it is an error to include a password. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + phoneConfig: + description: A block that contains information about the phone + settings for the user. Documented below. + properties: + afterContactWorkTimeLimit: + description: The After Call Work (ACW) timeout setting, in + seconds. Minimum value of 0. + type: number + autoAccept: + description: When Auto-Accept Call is enabled for an available + agent, the agent connects to contacts automatically. + type: boolean + deskPhoneNumber: + description: The phone number for the user's desk phone. Required + if phone_type is set as DESK_PHONE. + type: string + phoneType: + description: The phone type. Valid values are DESK_PHONE and + SOFT_PHONE. + type: string + type: object + routingProfileId: + description: The identifier of the routing profile for the user. + type: string + routingProfileIdRef: + description: Reference to a RoutingProfile in connect to populate + routingProfileId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + routingProfileIdSelector: + description: Selector for a RoutingProfile in connect to populate + routingProfileId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityProfileIds: + description: A list of identifiers for the security profiles for + the user. Specify a minimum of 1 and maximum of 10 security + profile ids. For more information, see Best Practices for Security + Profiles in the Amazon Connect Administrator Guide. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.phoneConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.phoneConfig) + || (has(self.initProvider) && has(self.initProvider.phoneConfig))' + - message: spec.forProvider.securityProfileIds is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.securityProfileIds) + || (has(self.initProvider) && has(self.initProvider.securityProfileIds))' + status: + description: UserStatus defines the observed state of User. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) of the user. + type: string + directoryUserId: + description: The identifier of the user account in the directory + used for identity management. If Amazon Connect cannot access + the directory, you can specify this identifier to authenticate + users. If you include the identifier, we assume that Amazon + Connect cannot access the directory. Otherwise, the identity + information is used to authenticate users from your directory. + This parameter is required if you are using an existing directory + for identity management in Amazon Connect when Amazon Connect + cannot access your directory to authenticate users. If you are + using SAML for identity management and include this parameter, + an error is returned. + type: string + hierarchyGroupId: + description: The identifier of the hierarchy group for the user. + type: string + id: + description: |- + The identifier of the hosting Amazon Connect Instance and identifier of the user + separated by a colon (:). + type: string + identityInfo: + description: A block that contains information about the identity + of the user. Documented below. + properties: + email: + description: The email address. If you are using SAML for + identity management and include this parameter, an error + is returned. Note that updates to the email is supported. + From the UpdateUserIdentityInfo API documentation it is + strongly recommended to limit who has the ability to invoke + UpdateUserIdentityInfo. Someone with that ability can change + the login credentials of other users by changing their email + address. This poses a security risk to your organization. + They can change the email address of a user to the attacker's + email address, and then reset the password through email. + For more information, see Best Practices for Security Profiles + in the Amazon Connect Administrator Guide. + type: string + firstName: + description: The first name. This is required if you are using + Amazon Connect or SAML for identity management. Minimum + length of 1. Maximum length of 100. + type: string + lastName: + description: The last name. This is required if you are using + Amazon Connect or SAML for identity management. Minimum + length of 1. Maximum length of 100. + type: string + type: object + instanceId: + description: Specifies the identifier of the hosting Amazon Connect + Instance. + type: string + name: + description: The user name for the account. For instances not + using SAML for identity management, the user name can include + up to 20 characters. If you are using SAML for identity management, + the user name can include up to 64 characters from [a-zA-Z0-9_-.\@]+. + type: string + phoneConfig: + description: A block that contains information about the phone + settings for the user. Documented below. + properties: + afterContactWorkTimeLimit: + description: The After Call Work (ACW) timeout setting, in + seconds. Minimum value of 0. + type: number + autoAccept: + description: When Auto-Accept Call is enabled for an available + agent, the agent connects to contacts automatically. + type: boolean + deskPhoneNumber: + description: The phone number for the user's desk phone. Required + if phone_type is set as DESK_PHONE. + type: string + phoneType: + description: The phone type. Valid values are DESK_PHONE and + SOFT_PHONE. + type: string + type: object + routingProfileId: + description: The identifier of the routing profile for the user. + type: string + securityProfileIds: + description: A list of identifiers for the security profiles for + the user. Specify a minimum of 1 and maximum of 10 security + profile ids. For more information, see Best Practices for Security + Profiles in the Amazon Connect Administrator Guide. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + userId: + description: The identifier for the user. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datasync.aws.upbound.io_locations3s.yaml b/package/crds/datasync.aws.upbound.io_locations3s.yaml index dfb837d345..165f3843de 100644 --- a/package/crds/datasync.aws.upbound.io_locations3s.yaml +++ b/package/crds/datasync.aws.upbound.io_locations3s.yaml @@ -754,3 +754,733 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LocationS3 is the Schema for the LocationS3s API. Manages an + AWS DataSync S3 Location + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LocationS3Spec defines the desired state of LocationS3 + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + agentArns: + description: A list of DataSync Agent ARNs with which this location + will be associated. + items: + type: string + type: array + x-kubernetes-list-type: set + region: + description: Region is the region you'd like your resource to + be created in. + type: string + s3BucketArn: + description: Amazon Resource Name (ARN) of the S3 Bucket. + type: string + s3BucketArnRef: + description: Reference to a Bucket in s3 to populate s3BucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + s3BucketArnSelector: + description: Selector for a Bucket in s3 to populate s3BucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3Config: + description: Configuration block containing information for connecting + to S3. + properties: + bucketAccessRoleArn: + description: ARN of the IAM Role used to connect to the S3 + Bucket. + type: string + bucketAccessRoleArnRef: + description: Reference to a Role in iam to populate bucketAccessRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketAccessRoleArnSelector: + description: Selector for a Role in iam to populate bucketAccessRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + s3StorageClass: + description: The Amazon S3 storage class that you want to store + your files in when this location is used as a task destination. + Valid values + type: string + subdirectory: + description: Prefix to perform actions as source or destination. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + agentArns: + description: A list of DataSync Agent ARNs with which this location + will be associated. + items: + type: string + type: array + x-kubernetes-list-type: set + s3BucketArn: + description: Amazon Resource Name (ARN) of the S3 Bucket. + type: string + s3BucketArnRef: + description: Reference to a Bucket in s3 to populate s3BucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + s3BucketArnSelector: + description: Selector for a Bucket in s3 to populate s3BucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3Config: + description: Configuration block containing information for connecting + to S3. + properties: + bucketAccessRoleArn: + description: ARN of the IAM Role used to connect to the S3 + Bucket. + type: string + bucketAccessRoleArnRef: + description: Reference to a Role in iam to populate bucketAccessRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketAccessRoleArnSelector: + description: Selector for a Role in iam to populate bucketAccessRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + s3StorageClass: + description: The Amazon S3 storage class that you want to store + your files in when this location is used as a task destination. + Valid values + type: string + subdirectory: + description: Prefix to perform actions as source or destination. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.s3Config is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.s3Config) + || (has(self.initProvider) && has(self.initProvider.s3Config))' + - message: spec.forProvider.subdirectory is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.subdirectory) + || (has(self.initProvider) && has(self.initProvider.subdirectory))' + status: + description: LocationS3Status defines the observed state of LocationS3. + properties: + atProvider: + properties: + agentArns: + description: A list of DataSync Agent ARNs with which this location + will be associated. + items: + type: string + type: array + x-kubernetes-list-type: set + arn: + description: Amazon Resource Name (ARN) of the DataSync Location. + type: string + id: + description: Amazon Resource Name (ARN) of the DataSync Location. + type: string + s3BucketArn: + description: Amazon Resource Name (ARN) of the S3 Bucket. + type: string + s3Config: + description: Configuration block containing information for connecting + to S3. + properties: + bucketAccessRoleArn: + description: ARN of the IAM Role used to connect to the S3 + Bucket. + type: string + type: object + s3StorageClass: + description: The Amazon S3 storage class that you want to store + your files in when this location is used as a task destination. + Valid values + type: string + subdirectory: + description: Prefix to perform actions as source or destination. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + uri: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datasync.aws.upbound.io_tasks.yaml b/package/crds/datasync.aws.upbound.io_tasks.yaml index 7335968a23..dcbce1e537 100644 --- a/package/crds/datasync.aws.upbound.io_tasks.yaml +++ b/package/crds/datasync.aws.upbound.io_tasks.yaml @@ -1539,3 +1539,1476 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Task is the Schema for the Tasks API. Manages an AWS DataSync + Task + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TaskSpec defines the desired state of Task + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cloudwatchLogGroupArn: + description: Amazon Resource Name (ARN) of the CloudWatch Log + Group that is used to monitor and log events in the sync task. + type: string + cloudwatchLogGroupArnRef: + description: Reference to a Group in cloudwatchlogs to populate + cloudwatchLogGroupArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cloudwatchLogGroupArnSelector: + description: Selector for a Group in cloudwatchlogs to populate + cloudwatchLogGroupArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + destinationLocationArn: + description: Amazon Resource Name (ARN) of destination DataSync + Location. + type: string + destinationLocationArnRef: + description: Reference to a LocationS3 in datasync to populate + destinationLocationArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + destinationLocationArnSelector: + description: Selector for a LocationS3 in datasync to populate + destinationLocationArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + excludes: + description: Filter rules that determines which files to exclude + from a task. + properties: + filterType: + description: 'The type of filter rule to apply. Valid values: + SIMPLE_PATTERN.' + type: string + value: + description: 'A single filter string that consists of the + patterns to exclude. The patterns are delimited by "|" (that + is, a pipe), for example: /folder1|/folder2' + type: string + type: object + includes: + description: Filter rules that determines which files to include + in a task. + properties: + filterType: + description: 'The type of filter rule to apply. Valid values: + SIMPLE_PATTERN.' + type: string + value: + description: 'A single filter string that consists of the + patterns to exclude. The patterns are delimited by "|" (that + is, a pipe), for example: /folder1|/folder2' + type: string + type: object + name: + description: Name of the DataSync Task. + type: string + options: + description: Configuration block containing option that controls + the default behavior when you start an execution of this DataSync + Task. For each individual task execution, you can override these + options by specifying an overriding configuration in those executions. + properties: + atime: + description: 'A file metadata that shows the last time a file + was accessed (that is when the file was read or written + to). If set to BEST_EFFORT, the DataSync Task attempts to + preserve the original (that is, the version before sync + PREPARING phase) atime attribute on all source files. Valid + values: BEST_EFFORT, NONE. Default: BEST_EFFORT.' + type: string + bytesPerSecond: + description: 'Limits the bandwidth utilized. For example, + to set a maximum of 1 MB, set this value to 1048576. Value + values: -1 or greater. Default: -1 (unlimited).' + type: number + gid: + description: 'Group identifier of the file''s owners. Valid + values: BOTH, INT_VALUE, NAME, NONE. Default: INT_VALUE + (preserve integer value of the ID).' + type: string + logLevel: + description: 'Determines the type of logs that DataSync publishes + to a log stream in the Amazon CloudWatch log group that + you provide. Valid values: OFF, BASIC, TRANSFER. Default: + OFF.' + type: string + mtime: + description: 'A file metadata that indicates the last time + a file was modified (written to) before the sync PREPARING + phase. Value values: NONE, PRESERVE. Default: PRESERVE.' + type: string + objectTags: + description: 'Specifies whether object tags are maintained + when transferring between object storage systems. If you + want your DataSync task to ignore object tags, specify the + NONE value. Valid values: PRESERVE, NONE. Default value: + PRESERVE.' + type: string + overwriteMode: + description: 'Determines whether files at the destination + should be overwritten or preserved when copying files. Valid + values: ALWAYS, NEVER. Default: ALWAYS.' + type: string + posixPermissions: + description: 'Determines which users or groups can access + a file for a specific purpose such as reading, writing, + or execution of the file. Valid values: NONE, PRESERVE. + Default: PRESERVE.' + type: string + preserveDeletedFiles: + description: 'Whether files deleted in the source should be + removed or preserved in the destination file system. Valid + values: PRESERVE, REMOVE. Default: PRESERVE.' + type: string + preserveDevices: + description: 'Whether the DataSync Task should preserve the + metadata of block and character devices in the source files + system, and recreate the files with that device name and + metadata on the destination. The DataSync Task can’t sync + the actual contents of such devices, because many of the + devices are non-terminal and don’t return an end of file + (EOF) marker. Valid values: NONE, PRESERVE. Default: NONE + (ignore special devices).' + type: string + securityDescriptorCopyFlags: + description: 'Determines which components of the SMB security + descriptor are copied from source to destination objects. + This value is only used for transfers between SMB and Amazon + FSx for Windows File Server locations, or between two Amazon + FSx for Windows File Server locations. Valid values: NONE, + OWNER_DACL, OWNER_DACL_SACL. Default: OWNER_DACL.' + type: string + taskQueueing: + description: 'Determines whether tasks should be queued before + executing the tasks. Valid values: ENABLED, DISABLED. Default + ENABLED.' + type: string + transferMode: + description: 'Determines whether DataSync transfers only the + data and metadata that differ between the source and the + destination location, or whether DataSync transfers all + the content from the source, without comparing to the destination + location. Valid values: CHANGED, ALL. Default: CHANGED' + type: string + uid: + description: 'User identifier of the file''s owners. Valid + values: BOTH, INT_VALUE, NAME, NONE. Default: INT_VALUE + (preserve integer value of the ID).' + type: string + verifyMode: + description: 'Whether a data integrity verification should + be performed at the end of a task execution after all data + and metadata have been transferred. Valid values: NONE, + POINT_IN_TIME_CONSISTENT, ONLY_FILES_TRANSFERRED. Default: + POINT_IN_TIME_CONSISTENT.' + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + schedule: + description: Specifies a schedule used to periodically transfer + files from a source to a destination location. + properties: + scheduleExpression: + description: Specifies the schedule you want your task to + use for repeated executions. For more information, see Schedule + Expressions for Rules. + type: string + type: object + sourceLocationArn: + description: Amazon Resource Name (ARN) of source DataSync Location. + type: string + sourceLocationArnRef: + description: Reference to a LocationS3 in datasync to populate + sourceLocationArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceLocationArnSelector: + description: Selector for a LocationS3 in datasync to populate + sourceLocationArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + taskReportConfig: + description: Configuration block containing the configuration + of a DataSync Task Report. See task_report_config below. + properties: + outputType: + description: 'Specifies the type of task report you''d like. + Valid values: SUMMARY_ONLY and STANDARD.' + type: string + reportLevel: + description: 'Specifies whether you want your task report + to include only what went wrong with your transfer or a + list of what succeeded and didn''t. Valid values: ERRORS_ONLY + and SUCCESSES_AND_ERRORS.' + type: string + reportOverrides: + description: Configuration block containing the configuration + of the reporting level for aspects of your task report. + See report_overrides below. + properties: + deletedOverride: + description: 'Specifies the level of reporting for the + files, objects, and directories that DataSync attempted + to delete in your destination location. This only applies + if you configure your task to delete data in the destination + that isn''t in the source. Valid values: ERRORS_ONLY + and SUCCESSES_AND_ERRORS.' + type: string + skippedOverride: + description: 'Specifies the level of reporting for the + files, objects, and directories that DataSync attempted + to skip during your transfer. Valid values: ERRORS_ONLY + and SUCCESSES_AND_ERRORS.' + type: string + transferredOverride: + description: 'Specifies the level of reporting for the + files, objects, and directories that DataSync attempted + to transfer. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS.' + type: string + verifiedOverride: + description: 'Specifies the level of reporting for the + files, objects, and directories that DataSync attempted + to verify at the end of your transfer. Valid values: + ERRORS_ONLY and SUCCESSES_AND_ERRORS.' + type: string + type: object + s3Destination: + description: Configuration block containing the configuration + for the Amazon S3 bucket where DataSync uploads your task + report. See s3_destination below. + properties: + bucketAccessRoleArn: + description: Specifies the Amazon Resource Name (ARN) + of the IAM policy that allows DataSync to upload a task + report to your S3 bucket. + type: string + s3BucketArn: + description: Specifies the ARN of the S3 bucket where + DataSync uploads your report. + type: string + subdirectory: + description: Specifies a bucket prefix for your report. + type: string + type: object + s3ObjectVersioning: + description: 'Specifies whether your task report includes + the new version of each object transferred into an S3 bucket. + This only applies if you enable versioning on your bucket. + Keep in mind that setting this to INCLUDE can increase the + duration of your task execution. Valid values: INCLUDE and + NONE.' + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + cloudwatchLogGroupArn: + description: Amazon Resource Name (ARN) of the CloudWatch Log + Group that is used to monitor and log events in the sync task. + type: string + cloudwatchLogGroupArnRef: + description: Reference to a Group in cloudwatchlogs to populate + cloudwatchLogGroupArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cloudwatchLogGroupArnSelector: + description: Selector for a Group in cloudwatchlogs to populate + cloudwatchLogGroupArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + destinationLocationArn: + description: Amazon Resource Name (ARN) of destination DataSync + Location. + type: string + destinationLocationArnRef: + description: Reference to a LocationS3 in datasync to populate + destinationLocationArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + destinationLocationArnSelector: + description: Selector for a LocationS3 in datasync to populate + destinationLocationArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + excludes: + description: Filter rules that determines which files to exclude + from a task. + properties: + filterType: + description: 'The type of filter rule to apply. Valid values: + SIMPLE_PATTERN.' + type: string + value: + description: 'A single filter string that consists of the + patterns to exclude. The patterns are delimited by "|" (that + is, a pipe), for example: /folder1|/folder2' + type: string + type: object + includes: + description: Filter rules that determines which files to include + in a task. + properties: + filterType: + description: 'The type of filter rule to apply. Valid values: + SIMPLE_PATTERN.' + type: string + value: + description: 'A single filter string that consists of the + patterns to exclude. The patterns are delimited by "|" (that + is, a pipe), for example: /folder1|/folder2' + type: string + type: object + name: + description: Name of the DataSync Task. + type: string + options: + description: Configuration block containing option that controls + the default behavior when you start an execution of this DataSync + Task. For each individual task execution, you can override these + options by specifying an overriding configuration in those executions. + properties: + atime: + description: 'A file metadata that shows the last time a file + was accessed (that is when the file was read or written + to). If set to BEST_EFFORT, the DataSync Task attempts to + preserve the original (that is, the version before sync + PREPARING phase) atime attribute on all source files. Valid + values: BEST_EFFORT, NONE. Default: BEST_EFFORT.' + type: string + bytesPerSecond: + description: 'Limits the bandwidth utilized. For example, + to set a maximum of 1 MB, set this value to 1048576. Value + values: -1 or greater. Default: -1 (unlimited).' + type: number + gid: + description: 'Group identifier of the file''s owners. Valid + values: BOTH, INT_VALUE, NAME, NONE. Default: INT_VALUE + (preserve integer value of the ID).' + type: string + logLevel: + description: 'Determines the type of logs that DataSync publishes + to a log stream in the Amazon CloudWatch log group that + you provide. Valid values: OFF, BASIC, TRANSFER. Default: + OFF.' + type: string + mtime: + description: 'A file metadata that indicates the last time + a file was modified (written to) before the sync PREPARING + phase. Value values: NONE, PRESERVE. Default: PRESERVE.' + type: string + objectTags: + description: 'Specifies whether object tags are maintained + when transferring between object storage systems. If you + want your DataSync task to ignore object tags, specify the + NONE value. Valid values: PRESERVE, NONE. Default value: + PRESERVE.' + type: string + overwriteMode: + description: 'Determines whether files at the destination + should be overwritten or preserved when copying files. Valid + values: ALWAYS, NEVER. Default: ALWAYS.' + type: string + posixPermissions: + description: 'Determines which users or groups can access + a file for a specific purpose such as reading, writing, + or execution of the file. Valid values: NONE, PRESERVE. + Default: PRESERVE.' + type: string + preserveDeletedFiles: + description: 'Whether files deleted in the source should be + removed or preserved in the destination file system. Valid + values: PRESERVE, REMOVE. Default: PRESERVE.' + type: string + preserveDevices: + description: 'Whether the DataSync Task should preserve the + metadata of block and character devices in the source files + system, and recreate the files with that device name and + metadata on the destination. The DataSync Task can’t sync + the actual contents of such devices, because many of the + devices are non-terminal and don’t return an end of file + (EOF) marker. Valid values: NONE, PRESERVE. Default: NONE + (ignore special devices).' + type: string + securityDescriptorCopyFlags: + description: 'Determines which components of the SMB security + descriptor are copied from source to destination objects. + This value is only used for transfers between SMB and Amazon + FSx for Windows File Server locations, or between two Amazon + FSx for Windows File Server locations. Valid values: NONE, + OWNER_DACL, OWNER_DACL_SACL. Default: OWNER_DACL.' + type: string + taskQueueing: + description: 'Determines whether tasks should be queued before + executing the tasks. Valid values: ENABLED, DISABLED. Default + ENABLED.' + type: string + transferMode: + description: 'Determines whether DataSync transfers only the + data and metadata that differ between the source and the + destination location, or whether DataSync transfers all + the content from the source, without comparing to the destination + location. Valid values: CHANGED, ALL. Default: CHANGED' + type: string + uid: + description: 'User identifier of the file''s owners. Valid + values: BOTH, INT_VALUE, NAME, NONE. Default: INT_VALUE + (preserve integer value of the ID).' + type: string + verifyMode: + description: 'Whether a data integrity verification should + be performed at the end of a task execution after all data + and metadata have been transferred. Valid values: NONE, + POINT_IN_TIME_CONSISTENT, ONLY_FILES_TRANSFERRED. Default: + POINT_IN_TIME_CONSISTENT.' + type: string + type: object + schedule: + description: Specifies a schedule used to periodically transfer + files from a source to a destination location. + properties: + scheduleExpression: + description: Specifies the schedule you want your task to + use for repeated executions. For more information, see Schedule + Expressions for Rules. + type: string + type: object + sourceLocationArn: + description: Amazon Resource Name (ARN) of source DataSync Location. + type: string + sourceLocationArnRef: + description: Reference to a LocationS3 in datasync to populate + sourceLocationArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceLocationArnSelector: + description: Selector for a LocationS3 in datasync to populate + sourceLocationArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + taskReportConfig: + description: Configuration block containing the configuration + of a DataSync Task Report. See task_report_config below. + properties: + outputType: + description: 'Specifies the type of task report you''d like. + Valid values: SUMMARY_ONLY and STANDARD.' + type: string + reportLevel: + description: 'Specifies whether you want your task report + to include only what went wrong with your transfer or a + list of what succeeded and didn''t. Valid values: ERRORS_ONLY + and SUCCESSES_AND_ERRORS.' + type: string + reportOverrides: + description: Configuration block containing the configuration + of the reporting level for aspects of your task report. + See report_overrides below. + properties: + deletedOverride: + description: 'Specifies the level of reporting for the + files, objects, and directories that DataSync attempted + to delete in your destination location. This only applies + if you configure your task to delete data in the destination + that isn''t in the source. Valid values: ERRORS_ONLY + and SUCCESSES_AND_ERRORS.' + type: string + skippedOverride: + description: 'Specifies the level of reporting for the + files, objects, and directories that DataSync attempted + to skip during your transfer. Valid values: ERRORS_ONLY + and SUCCESSES_AND_ERRORS.' + type: string + transferredOverride: + description: 'Specifies the level of reporting for the + files, objects, and directories that DataSync attempted + to transfer. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS.' + type: string + verifiedOverride: + description: 'Specifies the level of reporting for the + files, objects, and directories that DataSync attempted + to verify at the end of your transfer. Valid values: + ERRORS_ONLY and SUCCESSES_AND_ERRORS.' + type: string + type: object + s3Destination: + description: Configuration block containing the configuration + for the Amazon S3 bucket where DataSync uploads your task + report. See s3_destination below. + properties: + bucketAccessRoleArn: + description: Specifies the Amazon Resource Name (ARN) + of the IAM policy that allows DataSync to upload a task + report to your S3 bucket. + type: string + s3BucketArn: + description: Specifies the ARN of the S3 bucket where + DataSync uploads your report. + type: string + subdirectory: + description: Specifies a bucket prefix for your report. + type: string + type: object + s3ObjectVersioning: + description: 'Specifies whether your task report includes + the new version of each object transferred into an S3 bucket. + This only applies if you enable versioning on your bucket. + Keep in mind that setting this to INCLUDE can increase the + duration of your task execution. Valid values: INCLUDE and + NONE.' + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: TaskStatus defines the observed state of Task. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of the DataSync Task. + type: string + cloudwatchLogGroupArn: + description: Amazon Resource Name (ARN) of the CloudWatch Log + Group that is used to monitor and log events in the sync task. + type: string + destinationLocationArn: + description: Amazon Resource Name (ARN) of destination DataSync + Location. + type: string + excludes: + description: Filter rules that determines which files to exclude + from a task. + properties: + filterType: + description: 'The type of filter rule to apply. Valid values: + SIMPLE_PATTERN.' + type: string + value: + description: 'A single filter string that consists of the + patterns to exclude. The patterns are delimited by "|" (that + is, a pipe), for example: /folder1|/folder2' + type: string + type: object + id: + description: Amazon Resource Name (ARN) of the DataSync Task. + type: string + includes: + description: Filter rules that determines which files to include + in a task. + properties: + filterType: + description: 'The type of filter rule to apply. Valid values: + SIMPLE_PATTERN.' + type: string + value: + description: 'A single filter string that consists of the + patterns to exclude. The patterns are delimited by "|" (that + is, a pipe), for example: /folder1|/folder2' + type: string + type: object + name: + description: Name of the DataSync Task. + type: string + options: + description: Configuration block containing option that controls + the default behavior when you start an execution of this DataSync + Task. For each individual task execution, you can override these + options by specifying an overriding configuration in those executions. + properties: + atime: + description: 'A file metadata that shows the last time a file + was accessed (that is when the file was read or written + to). If set to BEST_EFFORT, the DataSync Task attempts to + preserve the original (that is, the version before sync + PREPARING phase) atime attribute on all source files. Valid + values: BEST_EFFORT, NONE. Default: BEST_EFFORT.' + type: string + bytesPerSecond: + description: 'Limits the bandwidth utilized. For example, + to set a maximum of 1 MB, set this value to 1048576. Value + values: -1 or greater. Default: -1 (unlimited).' + type: number + gid: + description: 'Group identifier of the file''s owners. Valid + values: BOTH, INT_VALUE, NAME, NONE. Default: INT_VALUE + (preserve integer value of the ID).' + type: string + logLevel: + description: 'Determines the type of logs that DataSync publishes + to a log stream in the Amazon CloudWatch log group that + you provide. Valid values: OFF, BASIC, TRANSFER. Default: + OFF.' + type: string + mtime: + description: 'A file metadata that indicates the last time + a file was modified (written to) before the sync PREPARING + phase. Value values: NONE, PRESERVE. Default: PRESERVE.' + type: string + objectTags: + description: 'Specifies whether object tags are maintained + when transferring between object storage systems. If you + want your DataSync task to ignore object tags, specify the + NONE value. Valid values: PRESERVE, NONE. Default value: + PRESERVE.' + type: string + overwriteMode: + description: 'Determines whether files at the destination + should be overwritten or preserved when copying files. Valid + values: ALWAYS, NEVER. Default: ALWAYS.' + type: string + posixPermissions: + description: 'Determines which users or groups can access + a file for a specific purpose such as reading, writing, + or execution of the file. Valid values: NONE, PRESERVE. + Default: PRESERVE.' + type: string + preserveDeletedFiles: + description: 'Whether files deleted in the source should be + removed or preserved in the destination file system. Valid + values: PRESERVE, REMOVE. Default: PRESERVE.' + type: string + preserveDevices: + description: 'Whether the DataSync Task should preserve the + metadata of block and character devices in the source files + system, and recreate the files with that device name and + metadata on the destination. The DataSync Task can’t sync + the actual contents of such devices, because many of the + devices are non-terminal and don’t return an end of file + (EOF) marker. Valid values: NONE, PRESERVE. Default: NONE + (ignore special devices).' + type: string + securityDescriptorCopyFlags: + description: 'Determines which components of the SMB security + descriptor are copied from source to destination objects. + This value is only used for transfers between SMB and Amazon + FSx for Windows File Server locations, or between two Amazon + FSx for Windows File Server locations. Valid values: NONE, + OWNER_DACL, OWNER_DACL_SACL. Default: OWNER_DACL.' + type: string + taskQueueing: + description: 'Determines whether tasks should be queued before + executing the tasks. Valid values: ENABLED, DISABLED. Default + ENABLED.' + type: string + transferMode: + description: 'Determines whether DataSync transfers only the + data and metadata that differ between the source and the + destination location, or whether DataSync transfers all + the content from the source, without comparing to the destination + location. Valid values: CHANGED, ALL. Default: CHANGED' + type: string + uid: + description: 'User identifier of the file''s owners. Valid + values: BOTH, INT_VALUE, NAME, NONE. Default: INT_VALUE + (preserve integer value of the ID).' + type: string + verifyMode: + description: 'Whether a data integrity verification should + be performed at the end of a task execution after all data + and metadata have been transferred. Valid values: NONE, + POINT_IN_TIME_CONSISTENT, ONLY_FILES_TRANSFERRED. Default: + POINT_IN_TIME_CONSISTENT.' + type: string + type: object + schedule: + description: Specifies a schedule used to periodically transfer + files from a source to a destination location. + properties: + scheduleExpression: + description: Specifies the schedule you want your task to + use for repeated executions. For more information, see Schedule + Expressions for Rules. + type: string + type: object + sourceLocationArn: + description: Amazon Resource Name (ARN) of source DataSync Location. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + taskReportConfig: + description: Configuration block containing the configuration + of a DataSync Task Report. See task_report_config below. + properties: + outputType: + description: 'Specifies the type of task report you''d like. + Valid values: SUMMARY_ONLY and STANDARD.' + type: string + reportLevel: + description: 'Specifies whether you want your task report + to include only what went wrong with your transfer or a + list of what succeeded and didn''t. Valid values: ERRORS_ONLY + and SUCCESSES_AND_ERRORS.' + type: string + reportOverrides: + description: Configuration block containing the configuration + of the reporting level for aspects of your task report. + See report_overrides below. + properties: + deletedOverride: + description: 'Specifies the level of reporting for the + files, objects, and directories that DataSync attempted + to delete in your destination location. This only applies + if you configure your task to delete data in the destination + that isn''t in the source. Valid values: ERRORS_ONLY + and SUCCESSES_AND_ERRORS.' + type: string + skippedOverride: + description: 'Specifies the level of reporting for the + files, objects, and directories that DataSync attempted + to skip during your transfer. Valid values: ERRORS_ONLY + and SUCCESSES_AND_ERRORS.' + type: string + transferredOverride: + description: 'Specifies the level of reporting for the + files, objects, and directories that DataSync attempted + to transfer. Valid values: ERRORS_ONLY and SUCCESSES_AND_ERRORS.' + type: string + verifiedOverride: + description: 'Specifies the level of reporting for the + files, objects, and directories that DataSync attempted + to verify at the end of your transfer. Valid values: + ERRORS_ONLY and SUCCESSES_AND_ERRORS.' + type: string + type: object + s3Destination: + description: Configuration block containing the configuration + for the Amazon S3 bucket where DataSync uploads your task + report. See s3_destination below. + properties: + bucketAccessRoleArn: + description: Specifies the Amazon Resource Name (ARN) + of the IAM policy that allows DataSync to upload a task + report to your S3 bucket. + type: string + s3BucketArn: + description: Specifies the ARN of the S3 bucket where + DataSync uploads your report. + type: string + subdirectory: + description: Specifies a bucket prefix for your report. + type: string + type: object + s3ObjectVersioning: + description: 'Specifies whether your task report includes + the new version of each object transferred into an S3 bucket. + This only applies if you enable versioning on your bucket. + Keep in mind that setting this to INCLUDE can increase the + duration of your task execution. Valid values: INCLUDE and + NONE.' + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/dax.aws.upbound.io_clusters.yaml b/package/crds/dax.aws.upbound.io_clusters.yaml index a2d9da9f21..e0156272ba 100644 --- a/package/crds/dax.aws.upbound.io_clusters.yaml +++ b/package/crds/dax.aws.upbound.io_clusters.yaml @@ -912,3 +912,891 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the Clusters API. Provides an DAX Cluster + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterSpec defines the desired state of Cluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + availabilityZones: + description: |- + List of Availability Zones in which the + nodes will be created + items: + type: string + type: array + x-kubernetes-list-type: set + clusterEndpointEncryptionType: + description: |- + – The type of encryption the + cluster's endpoint should support. Valid values are: NONE and TLS. + Default value is NONE. + type: string + description: + description: – Description for the cluster + type: string + iamRoleArn: + description: |- + A valid Amazon Resource Name (ARN) that identifies + an IAM role. At runtime, DAX will assume this role and use the role's + permissions to access DynamoDB on your behalf + type: string + iamRoleArnRef: + description: Reference to a Role in iam to populate iamRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamRoleArnSelector: + description: Selector for a Role in iam to populate iamRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + maintenanceWindow: + description: |- + ddd:hh24:mi + (24H Clock UTC). The minimum maintenance window is a 60 minute period. Example: + sun:05:00-sun:09:00 + type: string + nodeType: + description: |- + – The compute and memory capacity of the nodes. See + Nodes for supported node types + type: string + notificationTopicArn: + description: east-1:012345678999:my_sns_topic + type: string + parameterGroupName: + description: |- + – Name of the parameter group to associate + with this DAX cluster + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + replicationFactor: + description: |- + node cluster, without any read + replicas + type: number + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: |- + – One or more VPC security groups associated + with the cluster + items: + type: string + type: array + x-kubernetes-list-type: set + serverSideEncryption: + description: Encrypt at rest options + properties: + enabled: + description: Whether to enable encryption at rest. Defaults + to false. + type: boolean + type: object + subnetGroupName: + description: |- + – Name of the subnet group to be used for the + cluster + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + availabilityZones: + description: |- + List of Availability Zones in which the + nodes will be created + items: + type: string + type: array + x-kubernetes-list-type: set + clusterEndpointEncryptionType: + description: |- + – The type of encryption the + cluster's endpoint should support. Valid values are: NONE and TLS. + Default value is NONE. + type: string + description: + description: – Description for the cluster + type: string + iamRoleArn: + description: |- + A valid Amazon Resource Name (ARN) that identifies + an IAM role. At runtime, DAX will assume this role and use the role's + permissions to access DynamoDB on your behalf + type: string + iamRoleArnRef: + description: Reference to a Role in iam to populate iamRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamRoleArnSelector: + description: Selector for a Role in iam to populate iamRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + maintenanceWindow: + description: |- + ddd:hh24:mi + (24H Clock UTC). The minimum maintenance window is a 60 minute period. Example: + sun:05:00-sun:09:00 + type: string + nodeType: + description: |- + – The compute and memory capacity of the nodes. See + Nodes for supported node types + type: string + notificationTopicArn: + description: east-1:012345678999:my_sns_topic + type: string + parameterGroupName: + description: |- + – Name of the parameter group to associate + with this DAX cluster + type: string + replicationFactor: + description: |- + node cluster, without any read + replicas + type: number + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: |- + – One or more VPC security groups associated + with the cluster + items: + type: string + type: array + x-kubernetes-list-type: set + serverSideEncryption: + description: Encrypt at rest options + properties: + enabled: + description: Whether to enable encryption at rest. Defaults + to false. + type: boolean + type: object + subnetGroupName: + description: |- + – Name of the subnet group to be used for the + cluster + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.nodeType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.nodeType) + || (has(self.initProvider) && has(self.initProvider.nodeType))' + - message: spec.forProvider.replicationFactor is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.replicationFactor) + || (has(self.initProvider) && has(self.initProvider.replicationFactor))' + status: + description: ClusterStatus defines the observed state of Cluster. + properties: + atProvider: + properties: + arn: + description: The ARN of the DAX cluster + type: string + availabilityZones: + description: |- + List of Availability Zones in which the + nodes will be created + items: + type: string + type: array + x-kubernetes-list-type: set + clusterAddress: + description: The DNS name of the DAX cluster without the port + appended + type: string + clusterEndpointEncryptionType: + description: |- + – The type of encryption the + cluster's endpoint should support. Valid values are: NONE and TLS. + Default value is NONE. + type: string + configurationEndpoint: + description: |- + The configuration endpoint for this DAX cluster, + consisting of a DNS name and a port number + type: string + description: + description: – Description for the cluster + type: string + iamRoleArn: + description: |- + A valid Amazon Resource Name (ARN) that identifies + an IAM role. At runtime, DAX will assume this role and use the role's + permissions to access DynamoDB on your behalf + type: string + id: + type: string + maintenanceWindow: + description: |- + ddd:hh24:mi + (24H Clock UTC). The minimum maintenance window is a 60 minute period. Example: + sun:05:00-sun:09:00 + type: string + nodeType: + description: |- + – The compute and memory capacity of the nodes. See + Nodes for supported node types + type: string + nodes: + description: |- + List of node objects including id, address, port and + availability_zone. Referenceable e.g., as + ${aws_dax_cluster.test.nodes.0.address} + items: + properties: + address: + type: string + availabilityZone: + type: string + id: + type: string + port: + description: The port used by the configuration endpoint + type: number + type: object + type: array + notificationTopicArn: + description: east-1:012345678999:my_sns_topic + type: string + parameterGroupName: + description: |- + – Name of the parameter group to associate + with this DAX cluster + type: string + port: + description: The port used by the configuration endpoint + type: number + replicationFactor: + description: |- + node cluster, without any read + replicas + type: number + securityGroupIds: + description: |- + – One or more VPC security groups associated + with the cluster + items: + type: string + type: array + x-kubernetes-list-type: set + serverSideEncryption: + description: Encrypt at rest options + properties: + enabled: + description: Whether to enable encryption at rest. Defaults + to false. + type: boolean + type: object + subnetGroupName: + description: |- + – Name of the subnet group to be used for the + cluster + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/deploy.aws.upbound.io_deploymentconfigs.yaml b/package/crds/deploy.aws.upbound.io_deploymentconfigs.yaml index ad8414d217..c79d25b425 100644 --- a/package/crds/deploy.aws.upbound.io_deploymentconfigs.yaml +++ b/package/crds/deploy.aws.upbound.io_deploymentconfigs.yaml @@ -523,3 +523,484 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DeploymentConfig is the Schema for the DeploymentConfigs API. + Provides a CodeDeploy deployment config. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DeploymentConfigSpec defines the desired state of DeploymentConfig + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + computePlatform: + description: The compute platform can be Server, Lambda, or ECS. + Default is Server. + type: string + minimumHealthyHosts: + description: A minimum_healthy_hosts block. Required for Server + compute platform. Minimum Healthy Hosts are documented below. + properties: + type: + description: The type can either be FLEET_PERCENT or HOST_COUNT. + type: string + value: + description: |- + The value when the type is FLEET_PERCENT represents the minimum number of healthy instances as + a percentage of the total number of instances in the deployment. If you specify FLEET_PERCENT, at the start of the + deployment, AWS CodeDeploy converts the percentage to the equivalent number of instance and rounds up fractional instances. + When the type is HOST_COUNT, the value represents the minimum number of healthy instances as an absolute value. + type: number + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + trafficRoutingConfig: + description: A traffic_routing_config block. Traffic Routing Config + is documented below. + properties: + timeBasedCanary: + description: The time based canary configuration information. + If type is TimeBasedLinear, use time_based_linear instead. + properties: + interval: + description: The number of minutes between the first and + second traffic shifts of a TimeBasedCanary deployment. + type: number + percentage: + description: The percentage of traffic to shift in the + first increment of a TimeBasedCanary deployment. + type: number + type: object + timeBasedLinear: + description: The time based linear configuration information. + If type is TimeBasedCanary, use time_based_canary instead. + properties: + interval: + description: The number of minutes between the first and + second traffic shifts of a TimeBasedCanary deployment. + type: number + percentage: + description: The percentage of traffic to shift in the + first increment of a TimeBasedCanary deployment. + type: number + type: object + type: + description: Type of traffic routing config. One of TimeBasedCanary, + TimeBasedLinear, AllAtOnce. + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + computePlatform: + description: The compute platform can be Server, Lambda, or ECS. + Default is Server. + type: string + minimumHealthyHosts: + description: A minimum_healthy_hosts block. Required for Server + compute platform. Minimum Healthy Hosts are documented below. + properties: + type: + description: The type can either be FLEET_PERCENT or HOST_COUNT. + type: string + value: + description: |- + The value when the type is FLEET_PERCENT represents the minimum number of healthy instances as + a percentage of the total number of instances in the deployment. If you specify FLEET_PERCENT, at the start of the + deployment, AWS CodeDeploy converts the percentage to the equivalent number of instance and rounds up fractional instances. + When the type is HOST_COUNT, the value represents the minimum number of healthy instances as an absolute value. + type: number + type: object + trafficRoutingConfig: + description: A traffic_routing_config block. Traffic Routing Config + is documented below. + properties: + timeBasedCanary: + description: The time based canary configuration information. + If type is TimeBasedLinear, use time_based_linear instead. + properties: + interval: + description: The number of minutes between the first and + second traffic shifts of a TimeBasedCanary deployment. + type: number + percentage: + description: The percentage of traffic to shift in the + first increment of a TimeBasedCanary deployment. + type: number + type: object + timeBasedLinear: + description: The time based linear configuration information. + If type is TimeBasedCanary, use time_based_canary instead. + properties: + interval: + description: The number of minutes between the first and + second traffic shifts of a TimeBasedCanary deployment. + type: number + percentage: + description: The percentage of traffic to shift in the + first increment of a TimeBasedCanary deployment. + type: number + type: object + type: + description: Type of traffic routing config. One of TimeBasedCanary, + TimeBasedLinear, AllAtOnce. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DeploymentConfigStatus defines the observed state of DeploymentConfig. + properties: + atProvider: + properties: + arn: + description: The ARN of the deployment config. + type: string + computePlatform: + description: The compute platform can be Server, Lambda, or ECS. + Default is Server. + type: string + deploymentConfigId: + description: The AWS Assigned deployment config id + type: string + id: + description: The deployment group's config name. + type: string + minimumHealthyHosts: + description: A minimum_healthy_hosts block. Required for Server + compute platform. Minimum Healthy Hosts are documented below. + properties: + type: + description: The type can either be FLEET_PERCENT or HOST_COUNT. + type: string + value: + description: |- + The value when the type is FLEET_PERCENT represents the minimum number of healthy instances as + a percentage of the total number of instances in the deployment. If you specify FLEET_PERCENT, at the start of the + deployment, AWS CodeDeploy converts the percentage to the equivalent number of instance and rounds up fractional instances. + When the type is HOST_COUNT, the value represents the minimum number of healthy instances as an absolute value. + type: number + type: object + trafficRoutingConfig: + description: A traffic_routing_config block. Traffic Routing Config + is documented below. + properties: + timeBasedCanary: + description: The time based canary configuration information. + If type is TimeBasedLinear, use time_based_linear instead. + properties: + interval: + description: The number of minutes between the first and + second traffic shifts of a TimeBasedCanary deployment. + type: number + percentage: + description: The percentage of traffic to shift in the + first increment of a TimeBasedCanary deployment. + type: number + type: object + timeBasedLinear: + description: The time based linear configuration information. + If type is TimeBasedCanary, use time_based_canary instead. + properties: + interval: + description: The number of minutes between the first and + second traffic shifts of a TimeBasedCanary deployment. + type: number + percentage: + description: The percentage of traffic to shift in the + first increment of a TimeBasedCanary deployment. + type: number + type: object + type: + description: Type of traffic routing config. One of TimeBasedCanary, + TimeBasedLinear, AllAtOnce. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/deploy.aws.upbound.io_deploymentgroups.yaml b/package/crds/deploy.aws.upbound.io_deploymentgroups.yaml index b4f1d581a7..0dd19f18be 100644 --- a/package/crds/deploy.aws.upbound.io_deploymentgroups.yaml +++ b/package/crds/deploy.aws.upbound.io_deploymentgroups.yaml @@ -2338,3 +2338,2238 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DeploymentGroup is the Schema for the DeploymentGroups API. Provides + a CodeDeploy deployment group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DeploymentGroupSpec defines the desired state of DeploymentGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + alarmConfiguration: + description: Configuration block of alarms associated with the + deployment group (documented below). + properties: + alarms: + description: A list of alarms configured for the deployment + group. A maximum of 10 alarms can be added to a deployment + group. + items: + type: string + type: array + x-kubernetes-list-type: set + enabled: + description: Indicates whether the alarm configuration is + enabled. This option is useful when you want to temporarily + deactivate alarm monitoring for a deployment group without + having to add the same alarms again later. + type: boolean + ignorePollAlarmFailure: + description: Indicates whether a deployment should continue + if information about the current state of alarms cannot + be retrieved from CloudWatch. The default value is false. + type: boolean + type: object + appName: + description: The name of the application. + type: string + appNameRef: + description: Reference to a App in deploy to populate appName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + appNameSelector: + description: Selector for a App in deploy to populate appName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + autoRollbackConfiguration: + description: Configuration block of the automatic rollback configuration + associated with the deployment group (documented below). + properties: + enabled: + description: Indicates whether the alarm configuration is + enabled. This option is useful when you want to temporarily + deactivate alarm monitoring for a deployment group without + having to add the same alarms again later. + type: boolean + events: + description: The event type or types that trigger a rollback. + Supported types are DEPLOYMENT_FAILURE, DEPLOYMENT_STOP_ON_ALARM + and DEPLOYMENT_STOP_ON_REQUEST. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + autoscalingGroups: + description: Autoscaling groups associated with the deployment + group. + items: + type: string + type: array + x-kubernetes-list-type: set + blueGreenDeploymentConfig: + description: Configuration block of the blue/green deployment + options for a deployment group (documented below). + properties: + deploymentReadyOption: + description: Information about the action to take when newly + provisioned instances are ready to receive traffic in a + blue/green deployment (documented below). + properties: + actionOnTimeout: + description: When to reroute traffic from an original + environment to a replacement environment in a blue/green + deployment. + type: string + waitTimeInMinutes: + description: The number of minutes to wait before the + status of a blue/green deployment changed to Stopped + if rerouting is not started manually. Applies only to + the STOP_DEPLOYMENT option for action_on_timeout. + type: number + type: object + greenFleetProvisioningOption: + description: Information about how instances are provisioned + for a replacement environment in a blue/green deployment + (documented below). + properties: + action: + description: The method used to add instances to a replacement + environment. + type: string + type: object + terminateBlueInstancesOnDeploymentSuccess: + description: Information about whether to terminate instances + in the original fleet during a blue/green deployment (documented + below). + properties: + action: + description: The method used to add instances to a replacement + environment. + type: string + terminationWaitTimeInMinutes: + description: The number of minutes to wait after a successful + blue/green deployment before terminating instances from + the original environment. + type: number + type: object + type: object + deploymentConfigName: + description: The name of the group's deployment config. The default + is "CodeDeployDefault.OneAtATime". + type: string + deploymentStyle: + description: Configuration block of the type of deployment, either + in-place or blue/green, you want to run and whether to route + deployment traffic behind a load balancer (documented below). + properties: + deploymentOption: + description: Indicates whether to route deployment traffic + behind a load balancer. Valid Values are WITH_TRAFFIC_CONTROL + or WITHOUT_TRAFFIC_CONTROL. Default is WITHOUT_TRAFFIC_CONTROL. + type: string + deploymentType: + description: Indicates whether to run an in-place deployment + or a blue/green deployment. Valid Values are IN_PLACE or + BLUE_GREEN. Default is IN_PLACE. + type: string + type: object + ec2TagFilter: + description: Tag filters associated with the deployment group. + See the AWS docs for details. + items: + properties: + key: + description: The key of the tag filter. + type: string + type: + description: The type of the tag filter, either KEY_ONLY, + VALUE_ONLY, or KEY_AND_VALUE. + type: string + value: + description: The value of the tag filter. + type: string + type: object + type: array + ec2TagSet: + description: Configuration block(s) of Tag filters associated + with the deployment group, which are also referred to as tag + groups (documented below). See the AWS docs for details. + items: + properties: + ec2TagFilter: + description: Tag filters associated with the deployment + group. See the AWS docs for details. + items: + properties: + key: + description: The key of the tag filter. + type: string + type: + description: The type of the tag filter, either KEY_ONLY, + VALUE_ONLY, or KEY_AND_VALUE. + type: string + value: + description: The value of the tag filter. + type: string + type: object + type: array + type: object + type: array + ecsService: + description: Configuration block(s) of the ECS services for a + deployment group (documented below). + properties: + clusterName: + description: The name of the ECS cluster. + type: string + clusterNameRef: + description: Reference to a Cluster in ecs to populate clusterName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterNameSelector: + description: Selector for a Cluster in ecs to populate clusterName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceName: + description: The name of the ECS service. + type: string + serviceNameRef: + description: Reference to a Service in ecs to populate serviceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceNameSelector: + description: Selector for a Service in ecs to populate serviceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + loadBalancerInfo: + description: Single configuration block of the load balancer to + use in a blue/green deployment (documented below). + properties: + elbInfo: + description: The Classic Elastic Load Balancer to use in a + deployment. Conflicts with target_group_info and target_group_pair_info. + items: + properties: + name: + description: The name of the target group that instances + in the original environment are deregistered from, + and instances in the replacement environment registered + with. For in-place deployments, the name of the target + group that instances are deregistered from, so they + are not serving traffic during a deployment, and then + re-registered with after the deployment completes. + type: string + nameRef: + description: Reference to a ELB in elb to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a ELB in elb to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + targetGroupInfo: + description: The (Application/Network Load Balancer) target + group to use in a deployment. Conflicts with elb_info and + target_group_pair_info. + items: + properties: + name: + description: The name of the target group that instances + in the original environment are deregistered from, + and instances in the replacement environment registered + with. For in-place deployments, the name of the target + group that instances are deregistered from, so they + are not serving traffic during a deployment, and then + re-registered with after the deployment completes. + type: string + type: object + type: array + targetGroupPairInfo: + description: The (Application/Network Load Balancer) target + group pair to use in a deployment. Conflicts with elb_info + and target_group_info. + properties: + prodTrafficRoute: + description: Configuration block for the production traffic + route (documented below). + properties: + listenerArns: + description: List of Amazon Resource Names (ARNs) + of the load balancer listeners. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + targetGroup: + description: Configuration blocks for a target group within + a target group pair (documented below). + items: + properties: + name: + description: The name of the target group that instances + in the original environment are deregistered from, + and instances in the replacement environment registered + with. For in-place deployments, the name of the + target group that instances are deregistered from, + so they are not serving traffic during a deployment, + and then re-registered with after the deployment + completes. + type: string + nameRef: + description: Reference to a LBTargetGroup in elbv2 + to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a LBTargetGroup in elbv2 + to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + testTrafficRoute: + description: Configuration block for the test traffic + route (documented below). + properties: + listenerArns: + description: List of Amazon Resource Names (ARNs) + of the load balancer listeners. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: object + onPremisesInstanceTagFilter: + description: On premise tag filters associated with the group. + See the AWS docs for details. + items: + properties: + key: + description: The key of the tag filter. + type: string + type: + description: The type of the tag filter, either KEY_ONLY, + VALUE_ONLY, or KEY_AND_VALUE. + type: string + value: + description: The value of the tag filter. + type: string + type: object + type: array + outdatedInstancesStrategy: + description: Configuration block of Indicates what happens when + new Amazon EC2 instances are launched mid-deployment and do + not receive the deployed application revision. Valid values + are UPDATE and IGNORE. Defaults to UPDATE. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + serviceRoleArn: + description: The service role ARN that allows deployments. + type: string + serviceRoleArnRef: + description: Reference to a Role in iam to populate serviceRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceRoleArnSelector: + description: Selector for a Role in iam to populate serviceRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + triggerConfiguration: + description: Configuration block(s) of the triggers for the deployment + group (documented below). + items: + properties: + triggerEvents: + description: 'The event type or types for which notifications + are triggered. Some values that are supported: DeploymentStart, + DeploymentSuccess, DeploymentFailure, DeploymentStop, + DeploymentRollback, InstanceStart, InstanceSuccess, InstanceFailure. See + the CodeDeploy documentation for all possible values.' + items: + type: string + type: array + x-kubernetes-list-type: set + triggerName: + description: The name of the notification trigger. + type: string + triggerTargetArn: + description: The ARN of the SNS topic through which notifications + are sent. + type: string + triggerTargetArnRef: + description: Reference to a Topic in sns to populate triggerTargetArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + triggerTargetArnSelector: + description: Selector for a Topic in sns to populate triggerTargetArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + alarmConfiguration: + description: Configuration block of alarms associated with the + deployment group (documented below). + properties: + alarms: + description: A list of alarms configured for the deployment + group. A maximum of 10 alarms can be added to a deployment + group. + items: + type: string + type: array + x-kubernetes-list-type: set + enabled: + description: Indicates whether the alarm configuration is + enabled. This option is useful when you want to temporarily + deactivate alarm monitoring for a deployment group without + having to add the same alarms again later. + type: boolean + ignorePollAlarmFailure: + description: Indicates whether a deployment should continue + if information about the current state of alarms cannot + be retrieved from CloudWatch. The default value is false. + type: boolean + type: object + autoRollbackConfiguration: + description: Configuration block of the automatic rollback configuration + associated with the deployment group (documented below). + properties: + enabled: + description: Indicates whether the alarm configuration is + enabled. This option is useful when you want to temporarily + deactivate alarm monitoring for a deployment group without + having to add the same alarms again later. + type: boolean + events: + description: The event type or types that trigger a rollback. + Supported types are DEPLOYMENT_FAILURE, DEPLOYMENT_STOP_ON_ALARM + and DEPLOYMENT_STOP_ON_REQUEST. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + autoscalingGroups: + description: Autoscaling groups associated with the deployment + group. + items: + type: string + type: array + x-kubernetes-list-type: set + blueGreenDeploymentConfig: + description: Configuration block of the blue/green deployment + options for a deployment group (documented below). + properties: + deploymentReadyOption: + description: Information about the action to take when newly + provisioned instances are ready to receive traffic in a + blue/green deployment (documented below). + properties: + actionOnTimeout: + description: When to reroute traffic from an original + environment to a replacement environment in a blue/green + deployment. + type: string + waitTimeInMinutes: + description: The number of minutes to wait before the + status of a blue/green deployment changed to Stopped + if rerouting is not started manually. Applies only to + the STOP_DEPLOYMENT option for action_on_timeout. + type: number + type: object + greenFleetProvisioningOption: + description: Information about how instances are provisioned + for a replacement environment in a blue/green deployment + (documented below). + properties: + action: + description: The method used to add instances to a replacement + environment. + type: string + type: object + terminateBlueInstancesOnDeploymentSuccess: + description: Information about whether to terminate instances + in the original fleet during a blue/green deployment (documented + below). + properties: + action: + description: The method used to add instances to a replacement + environment. + type: string + terminationWaitTimeInMinutes: + description: The number of minutes to wait after a successful + blue/green deployment before terminating instances from + the original environment. + type: number + type: object + type: object + deploymentConfigName: + description: The name of the group's deployment config. The default + is "CodeDeployDefault.OneAtATime". + type: string + deploymentStyle: + description: Configuration block of the type of deployment, either + in-place or blue/green, you want to run and whether to route + deployment traffic behind a load balancer (documented below). + properties: + deploymentOption: + description: Indicates whether to route deployment traffic + behind a load balancer. Valid Values are WITH_TRAFFIC_CONTROL + or WITHOUT_TRAFFIC_CONTROL. Default is WITHOUT_TRAFFIC_CONTROL. + type: string + deploymentType: + description: Indicates whether to run an in-place deployment + or a blue/green deployment. Valid Values are IN_PLACE or + BLUE_GREEN. Default is IN_PLACE. + type: string + type: object + ec2TagFilter: + description: Tag filters associated with the deployment group. + See the AWS docs for details. + items: + properties: + key: + description: The key of the tag filter. + type: string + type: + description: The type of the tag filter, either KEY_ONLY, + VALUE_ONLY, or KEY_AND_VALUE. + type: string + value: + description: The value of the tag filter. + type: string + type: object + type: array + ec2TagSet: + description: Configuration block(s) of Tag filters associated + with the deployment group, which are also referred to as tag + groups (documented below). See the AWS docs for details. + items: + properties: + ec2TagFilter: + description: Tag filters associated with the deployment + group. See the AWS docs for details. + items: + properties: + key: + description: The key of the tag filter. + type: string + type: + description: The type of the tag filter, either KEY_ONLY, + VALUE_ONLY, or KEY_AND_VALUE. + type: string + value: + description: The value of the tag filter. + type: string + type: object + type: array + type: object + type: array + ecsService: + description: Configuration block(s) of the ECS services for a + deployment group (documented below). + properties: + clusterName: + description: The name of the ECS cluster. + type: string + clusterNameRef: + description: Reference to a Cluster in ecs to populate clusterName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterNameSelector: + description: Selector for a Cluster in ecs to populate clusterName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceName: + description: The name of the ECS service. + type: string + serviceNameRef: + description: Reference to a Service in ecs to populate serviceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceNameSelector: + description: Selector for a Service in ecs to populate serviceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + loadBalancerInfo: + description: Single configuration block of the load balancer to + use in a blue/green deployment (documented below). + properties: + elbInfo: + description: The Classic Elastic Load Balancer to use in a + deployment. Conflicts with target_group_info and target_group_pair_info. + items: + properties: + name: + description: The name of the target group that instances + in the original environment are deregistered from, + and instances in the replacement environment registered + with. For in-place deployments, the name of the target + group that instances are deregistered from, so they + are not serving traffic during a deployment, and then + re-registered with after the deployment completes. + type: string + nameRef: + description: Reference to a ELB in elb to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a ELB in elb to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + targetGroupInfo: + description: The (Application/Network Load Balancer) target + group to use in a deployment. Conflicts with elb_info and + target_group_pair_info. + items: + properties: + name: + description: The name of the target group that instances + in the original environment are deregistered from, + and instances in the replacement environment registered + with. For in-place deployments, the name of the target + group that instances are deregistered from, so they + are not serving traffic during a deployment, and then + re-registered with after the deployment completes. + type: string + type: object + type: array + targetGroupPairInfo: + description: The (Application/Network Load Balancer) target + group pair to use in a deployment. Conflicts with elb_info + and target_group_info. + properties: + prodTrafficRoute: + description: Configuration block for the production traffic + route (documented below). + properties: + listenerArns: + description: List of Amazon Resource Names (ARNs) + of the load balancer listeners. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + targetGroup: + description: Configuration blocks for a target group within + a target group pair (documented below). + items: + properties: + name: + description: The name of the target group that instances + in the original environment are deregistered from, + and instances in the replacement environment registered + with. For in-place deployments, the name of the + target group that instances are deregistered from, + so they are not serving traffic during a deployment, + and then re-registered with after the deployment + completes. + type: string + nameRef: + description: Reference to a LBTargetGroup in elbv2 + to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a LBTargetGroup in elbv2 + to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + testTrafficRoute: + description: Configuration block for the test traffic + route (documented below). + properties: + listenerArns: + description: List of Amazon Resource Names (ARNs) + of the load balancer listeners. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: object + onPremisesInstanceTagFilter: + description: On premise tag filters associated with the group. + See the AWS docs for details. + items: + properties: + key: + description: The key of the tag filter. + type: string + type: + description: The type of the tag filter, either KEY_ONLY, + VALUE_ONLY, or KEY_AND_VALUE. + type: string + value: + description: The value of the tag filter. + type: string + type: object + type: array + outdatedInstancesStrategy: + description: Configuration block of Indicates what happens when + new Amazon EC2 instances are launched mid-deployment and do + not receive the deployed application revision. Valid values + are UPDATE and IGNORE. Defaults to UPDATE. + type: string + serviceRoleArn: + description: The service role ARN that allows deployments. + type: string + serviceRoleArnRef: + description: Reference to a Role in iam to populate serviceRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceRoleArnSelector: + description: Selector for a Role in iam to populate serviceRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + triggerConfiguration: + description: Configuration block(s) of the triggers for the deployment + group (documented below). + items: + properties: + triggerEvents: + description: 'The event type or types for which notifications + are triggered. Some values that are supported: DeploymentStart, + DeploymentSuccess, DeploymentFailure, DeploymentStop, + DeploymentRollback, InstanceStart, InstanceSuccess, InstanceFailure. See + the CodeDeploy documentation for all possible values.' + items: + type: string + type: array + x-kubernetes-list-type: set + triggerName: + description: The name of the notification trigger. + type: string + triggerTargetArn: + description: The ARN of the SNS topic through which notifications + are sent. + type: string + triggerTargetArnRef: + description: Reference to a Topic in sns to populate triggerTargetArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + triggerTargetArnSelector: + description: Selector for a Topic in sns to populate triggerTargetArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DeploymentGroupStatus defines the observed state of DeploymentGroup. + properties: + atProvider: + properties: + alarmConfiguration: + description: Configuration block of alarms associated with the + deployment group (documented below). + properties: + alarms: + description: A list of alarms configured for the deployment + group. A maximum of 10 alarms can be added to a deployment + group. + items: + type: string + type: array + x-kubernetes-list-type: set + enabled: + description: Indicates whether the alarm configuration is + enabled. This option is useful when you want to temporarily + deactivate alarm monitoring for a deployment group without + having to add the same alarms again later. + type: boolean + ignorePollAlarmFailure: + description: Indicates whether a deployment should continue + if information about the current state of alarms cannot + be retrieved from CloudWatch. The default value is false. + type: boolean + type: object + appName: + description: The name of the application. + type: string + arn: + description: The ARN of the CodeDeploy deployment group. + type: string + autoRollbackConfiguration: + description: Configuration block of the automatic rollback configuration + associated with the deployment group (documented below). + properties: + enabled: + description: Indicates whether the alarm configuration is + enabled. This option is useful when you want to temporarily + deactivate alarm monitoring for a deployment group without + having to add the same alarms again later. + type: boolean + events: + description: The event type or types that trigger a rollback. + Supported types are DEPLOYMENT_FAILURE, DEPLOYMENT_STOP_ON_ALARM + and DEPLOYMENT_STOP_ON_REQUEST. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + autoscalingGroups: + description: Autoscaling groups associated with the deployment + group. + items: + type: string + type: array + x-kubernetes-list-type: set + blueGreenDeploymentConfig: + description: Configuration block of the blue/green deployment + options for a deployment group (documented below). + properties: + deploymentReadyOption: + description: Information about the action to take when newly + provisioned instances are ready to receive traffic in a + blue/green deployment (documented below). + properties: + actionOnTimeout: + description: When to reroute traffic from an original + environment to a replacement environment in a blue/green + deployment. + type: string + waitTimeInMinutes: + description: The number of minutes to wait before the + status of a blue/green deployment changed to Stopped + if rerouting is not started manually. Applies only to + the STOP_DEPLOYMENT option for action_on_timeout. + type: number + type: object + greenFleetProvisioningOption: + description: Information about how instances are provisioned + for a replacement environment in a blue/green deployment + (documented below). + properties: + action: + description: The method used to add instances to a replacement + environment. + type: string + type: object + terminateBlueInstancesOnDeploymentSuccess: + description: Information about whether to terminate instances + in the original fleet during a blue/green deployment (documented + below). + properties: + action: + description: The method used to add instances to a replacement + environment. + type: string + terminationWaitTimeInMinutes: + description: The number of minutes to wait after a successful + blue/green deployment before terminating instances from + the original environment. + type: number + type: object + type: object + computePlatform: + description: The destination platform type for the deployment. + type: string + deploymentConfigName: + description: The name of the group's deployment config. The default + is "CodeDeployDefault.OneAtATime". + type: string + deploymentGroupId: + description: The ID of the CodeDeploy deployment group. + type: string + deploymentStyle: + description: Configuration block of the type of deployment, either + in-place or blue/green, you want to run and whether to route + deployment traffic behind a load balancer (documented below). + properties: + deploymentOption: + description: Indicates whether to route deployment traffic + behind a load balancer. Valid Values are WITH_TRAFFIC_CONTROL + or WITHOUT_TRAFFIC_CONTROL. Default is WITHOUT_TRAFFIC_CONTROL. + type: string + deploymentType: + description: Indicates whether to run an in-place deployment + or a blue/green deployment. Valid Values are IN_PLACE or + BLUE_GREEN. Default is IN_PLACE. + type: string + type: object + ec2TagFilter: + description: Tag filters associated with the deployment group. + See the AWS docs for details. + items: + properties: + key: + description: The key of the tag filter. + type: string + type: + description: The type of the tag filter, either KEY_ONLY, + VALUE_ONLY, or KEY_AND_VALUE. + type: string + value: + description: The value of the tag filter. + type: string + type: object + type: array + ec2TagSet: + description: Configuration block(s) of Tag filters associated + with the deployment group, which are also referred to as tag + groups (documented below). See the AWS docs for details. + items: + properties: + ec2TagFilter: + description: Tag filters associated with the deployment + group. See the AWS docs for details. + items: + properties: + key: + description: The key of the tag filter. + type: string + type: + description: The type of the tag filter, either KEY_ONLY, + VALUE_ONLY, or KEY_AND_VALUE. + type: string + value: + description: The value of the tag filter. + type: string + type: object + type: array + type: object + type: array + ecsService: + description: Configuration block(s) of the ECS services for a + deployment group (documented below). + properties: + clusterName: + description: The name of the ECS cluster. + type: string + serviceName: + description: The name of the ECS service. + type: string + type: object + id: + description: Application name and deployment group name. + type: string + loadBalancerInfo: + description: Single configuration block of the load balancer to + use in a blue/green deployment (documented below). + properties: + elbInfo: + description: The Classic Elastic Load Balancer to use in a + deployment. Conflicts with target_group_info and target_group_pair_info. + items: + properties: + name: + description: The name of the target group that instances + in the original environment are deregistered from, + and instances in the replacement environment registered + with. For in-place deployments, the name of the target + group that instances are deregistered from, so they + are not serving traffic during a deployment, and then + re-registered with after the deployment completes. + type: string + type: object + type: array + targetGroupInfo: + description: The (Application/Network Load Balancer) target + group to use in a deployment. Conflicts with elb_info and + target_group_pair_info. + items: + properties: + name: + description: The name of the target group that instances + in the original environment are deregistered from, + and instances in the replacement environment registered + with. For in-place deployments, the name of the target + group that instances are deregistered from, so they + are not serving traffic during a deployment, and then + re-registered with after the deployment completes. + type: string + type: object + type: array + targetGroupPairInfo: + description: The (Application/Network Load Balancer) target + group pair to use in a deployment. Conflicts with elb_info + and target_group_info. + properties: + prodTrafficRoute: + description: Configuration block for the production traffic + route (documented below). + properties: + listenerArns: + description: List of Amazon Resource Names (ARNs) + of the load balancer listeners. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + targetGroup: + description: Configuration blocks for a target group within + a target group pair (documented below). + items: + properties: + name: + description: The name of the target group that instances + in the original environment are deregistered from, + and instances in the replacement environment registered + with. For in-place deployments, the name of the + target group that instances are deregistered from, + so they are not serving traffic during a deployment, + and then re-registered with after the deployment + completes. + type: string + type: object + type: array + testTrafficRoute: + description: Configuration block for the test traffic + route (documented below). + properties: + listenerArns: + description: List of Amazon Resource Names (ARNs) + of the load balancer listeners. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: object + onPremisesInstanceTagFilter: + description: On premise tag filters associated with the group. + See the AWS docs for details. + items: + properties: + key: + description: The key of the tag filter. + type: string + type: + description: The type of the tag filter, either KEY_ONLY, + VALUE_ONLY, or KEY_AND_VALUE. + type: string + value: + description: The value of the tag filter. + type: string + type: object + type: array + outdatedInstancesStrategy: + description: Configuration block of Indicates what happens when + new Amazon EC2 instances are launched mid-deployment and do + not receive the deployed application revision. Valid values + are UPDATE and IGNORE. Defaults to UPDATE. + type: string + serviceRoleArn: + description: The service role ARN that allows deployments. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + triggerConfiguration: + description: Configuration block(s) of the triggers for the deployment + group (documented below). + items: + properties: + triggerEvents: + description: 'The event type or types for which notifications + are triggered. Some values that are supported: DeploymentStart, + DeploymentSuccess, DeploymentFailure, DeploymentStop, + DeploymentRollback, InstanceStart, InstanceSuccess, InstanceFailure. See + the CodeDeploy documentation for all possible values.' + items: + type: string + type: array + x-kubernetes-list-type: set + triggerName: + description: The name of the notification trigger. + type: string + triggerTargetArn: + description: The ARN of the SNS topic through which notifications + are sent. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/devicefarm.aws.upbound.io_testgridprojects.yaml b/package/crds/devicefarm.aws.upbound.io_testgridprojects.yaml index 2a69ff72bc..f1439d2b38 100644 --- a/package/crds/devicefarm.aws.upbound.io_testgridprojects.yaml +++ b/package/crds/devicefarm.aws.upbound.io_testgridprojects.yaml @@ -913,3 +913,892 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: TestGridProject is the Schema for the TestGridProjects API. Provides + a Devicefarm test_grid_project + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TestGridProjectSpec defines the desired state of TestGridProject + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Human-readable description of the project. + type: string + name: + description: The name of the Selenium testing project. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcConfig: + description: The VPC security groups and subnets that are attached + to a project. See VPC Config below. + properties: + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate + securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to + populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: A list of VPC security group IDs in your Amazon + VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: A list of VPC subnet IDs in your Amazon VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcId: + description: The ID of the Amazon VPC. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Human-readable description of the project. + type: string + name: + description: The name of the Selenium testing project. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcConfig: + description: The VPC security groups and subnets that are attached + to a project. See VPC Config below. + properties: + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate + securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to + populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: A list of VPC security group IDs in your Amazon + VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: A list of VPC subnet IDs in your Amazon VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcId: + description: The ID of the Amazon VPC. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: TestGridProjectStatus defines the observed state of TestGridProject. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name of this Test Grid Project. + type: string + description: + description: Human-readable description of the project. + type: string + id: + type: string + name: + description: The name of the Selenium testing project. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + vpcConfig: + description: The VPC security groups and subnets that are attached + to a project. See VPC Config below. + properties: + securityGroupIds: + description: A list of VPC security group IDs in your Amazon + VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of VPC subnet IDs in your Amazon VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcId: + description: The ID of the Amazon VPC. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/dlm.aws.upbound.io_lifecyclepolicies.yaml b/package/crds/dlm.aws.upbound.io_lifecyclepolicies.yaml index 2467552503..43838412ab 100644 --- a/package/crds/dlm.aws.upbound.io_lifecyclepolicies.yaml +++ b/package/crds/dlm.aws.upbound.io_lifecyclepolicies.yaml @@ -2076,3 +2076,1926 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LifecyclePolicy is the Schema for the LifecyclePolicys API. Provides + a Data Lifecycle Manager (DLM) lifecycle policy for managing snapshots. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LifecyclePolicySpec defines the desired state of LifecyclePolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A description for the DLM lifecycle policy. + type: string + executionRoleArn: + description: The ARN of an IAM role that is able to be assumed + by the DLM service. + type: string + executionRoleArnRef: + description: Reference to a Role in iam to populate executionRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + executionRoleArnSelector: + description: Selector for a Role in iam to populate executionRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + policyDetails: + description: See the policy_details configuration block. Max of + 1. + properties: + action: + description: The actions to be performed when the event-based + policy is triggered. You can specify only one action per + policy. This parameter is required for event-based policies + only. If you are creating a snapshot or AMI policy, omit + this parameter. See the action configuration block. + properties: + crossRegionCopy: + description: The rule for copying shared snapshots across + Regions. See the cross_region_copy configuration block. + items: + properties: + encryptionConfiguration: + description: The encryption settings for the copied + snapshot. See the encryption_configuration block. + Max of 1 per action. + properties: + cmkArn: + description: The Amazon Resource Name (ARN) + of the AWS KMS key to use for EBS encryption. + If this parameter is not specified, the default + KMS key for the account is used. + type: string + encrypted: + description: To encrypt a copy of an unencrypted + snapshot when encryption by default is not + enabled, enable encryption using this parameter. + Copies of encrypted snapshots are encrypted, + even if this parameter is false or when encryption + by default is not enabled. + type: boolean + type: object + retainRule: + description: Specifies the retention rule for cross-Region + snapshot copies. See the retain_rule block. Max + of 1 per action. + properties: + interval: + description: How often this lifecycle policy + should be evaluated. 1, 2,3,4,6,8,12 or 24 + are valid values. Conflicts with cron_expression. + If set, interval_unit and times must also + be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default + value. Conflicts with cron_expression. Must + be set if interval is set. + type: string + type: object + target: + description: The target Region or the Amazon Resource + Name (ARN) of the target Outpost for the snapshot + copies. + type: string + type: object + type: array + name: + description: A descriptive name for the action. + type: string + type: object + eventSource: + description: The event that triggers the event-based policy. + This parameter is required for event-based policies only. + If you are creating a snapshot or AMI policy, omit this + parameter. See the event_source configuration block. + properties: + parameters: + description: A set of optional parameters for snapshot + and AMI lifecycle policies. See the parameters configuration + block. + properties: + descriptionRegex: + description: The snapshot description that can trigger + the policy. The description pattern is specified + using a regular expression. The policy runs only + if a snapshot with a description that matches the + specified pattern is shared with your account. + type: string + eventType: + description: The type of event. Currently, only shareSnapshot + events are supported. + type: string + snapshotOwner: + description: The IDs of the AWS accounts that can + trigger policy by sharing snapshots with your account. + The policy only runs if one of the specified AWS + accounts shares a snapshot with your account. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: + description: The source of the event. Currently only managed + CloudWatch Events rules are supported. Valid values + are MANAGED_CWE. + type: string + type: object + parameters: + description: A set of optional parameters for snapshot and + AMI lifecycle policies. See the parameters configuration + block. + properties: + excludeBootVolume: + description: Indicates whether to exclude the root volume + from snapshots created using CreateSnapshots. The default + is false. + type: boolean + noReboot: + description: Applies to AMI lifecycle policies only. Indicates + whether targeted instances are rebooted when the lifecycle + policy runs. true indicates that targeted instances + are not rebooted when the policy runs. false indicates + that target instances are rebooted when the policy runs. + The default is true (instances are not rebooted). + type: boolean + type: object + policyType: + description: The valid target resource types and actions a + policy can manage. Specify EBS_SNAPSHOT_MANAGEMENT to create + a lifecycle policy that manages the lifecycle of Amazon + EBS snapshots. Specify IMAGE_MANAGEMENT to create a lifecycle + policy that manages the lifecycle of EBS-backed AMIs. Specify + EVENT_BASED_POLICY to create an event-based policy that + performs specific actions when a defined event occurs in + your AWS account. Default value is EBS_SNAPSHOT_MANAGEMENT. + type: string + resourceLocations: + description: The location of the resources to backup. If the + source resources are located in an AWS Region, specify CLOUD. + If the source resources are located on an Outpost in your + account, specify OUTPOST. If you specify OUTPOST, Amazon + Data Lifecycle Manager backs up all resources of the specified + type with matching target tags across all of the Outposts + in your account. Valid values are CLOUD and OUTPOST. + items: + type: string + type: array + resourceTypes: + description: A list of resource types that should be targeted + by the lifecycle policy. Valid values are VOLUME and INSTANCE. + items: + type: string + type: array + schedule: + description: See the schedule configuration block. + items: + properties: + copyTags: + description: Copy all user-defined tags on a source + volume to snapshots of the volume created by this + policy. + type: boolean + createRule: + description: See the create_rule block. Max of 1 per + schedule. + properties: + cronExpression: + description: The schedule, as a Cron expression. + The schedule interval must be between 1 hour and + 1 year. Conflicts with interval, interval_unit, + and times. + type: string + interval: + description: How often this lifecycle policy should + be evaluated. 1, 2,3,4,6,8,12 or 24 are valid + values. Conflicts with cron_expression. If set, + interval_unit and times must also be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default value. + Conflicts with cron_expression. Must be set if + interval is set. + type: string + location: + description: Specifies the destination for snapshots + created by the policy. To create snapshots in + the same Region as the source resource, specify + CLOUD. To create snapshots on the same Outpost + as the source resource, specify OUTPOST_LOCAL. + If you omit this parameter, CLOUD is used by default. + If the policy targets resources in an AWS Region, + then you must create snapshots in the same Region + as the source resource. If the policy targets + resources on an Outpost, then you can create snapshots + on the same Outpost as the source resource, or + in the Region of that Outpost. Valid values are + CLOUD and OUTPOST_LOCAL. + type: string + times: + description: A list of times in 24 hour clock format + that sets when the lifecycle policy should be + evaluated. Max of 1. Conflicts with cron_expression. + Must be set if interval is set. + items: + type: string + type: array + type: object + crossRegionCopyRule: + description: See the cross_region_copy_rule block. Max + of 3 per schedule. + items: + properties: + cmkArn: + description: The Amazon Resource Name (ARN) of + the AWS KMS key to use for EBS encryption. If + this parameter is not specified, the default + KMS key for the account is used. + type: string + cmkArnRef: + description: Reference to a Key in kms to populate + cmkArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cmkArnSelector: + description: Selector for a Key in kms to populate + cmkArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + copyTags: + description: Copy all user-defined tags on a source + volume to snapshots of the volume created by + this policy. + type: boolean + deprecateRule: + description: See the deprecate_rule block. Max + of 1 per schedule. + properties: + interval: + description: How often this lifecycle policy + should be evaluated. 1, 2,3,4,6,8,12 or + 24 are valid values. Conflicts with cron_expression. + If set, interval_unit and times must also + be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default + value. Conflicts with cron_expression. Must + be set if interval is set. + type: string + type: object + encrypted: + description: To encrypt a copy of an unencrypted + snapshot when encryption by default is not enabled, + enable encryption using this parameter. Copies + of encrypted snapshots are encrypted, even if + this parameter is false or when encryption by + default is not enabled. + type: boolean + retainRule: + description: Specifies the retention rule for + cross-Region snapshot copies. See the retain_rule + block. Max of 1 per action. + properties: + interval: + description: How often this lifecycle policy + should be evaluated. 1, 2,3,4,6,8,12 or + 24 are valid values. Conflicts with cron_expression. + If set, interval_unit and times must also + be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default + value. Conflicts with cron_expression. Must + be set if interval is set. + type: string + type: object + target: + description: The target Region or the Amazon Resource + Name (ARN) of the target Outpost for the snapshot + copies. + type: string + type: object + type: array + deprecateRule: + description: See the deprecate_rule block. Max of 1 + per schedule. + properties: + count: + description: Specifies the number of oldest AMIs + to deprecate. Must be an integer between 1 and + 1000. Conflicts with interval and interval_unit. + type: number + interval: + description: How often this lifecycle policy should + be evaluated. 1, 2,3,4,6,8,12 or 24 are valid + values. Conflicts with cron_expression. If set, + interval_unit and times must also be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default value. + Conflicts with cron_expression. Must be set if + interval is set. + type: string + type: object + fastRestoreRule: + description: See the fast_restore_rule block. Max of + 1 per schedule. + properties: + availabilityZones: + description: The Availability Zones in which to + enable fast snapshot restore. + items: + type: string + type: array + x-kubernetes-list-type: set + count: + description: Specifies the number of oldest AMIs + to deprecate. Must be an integer between 1 and + 1000. Conflicts with interval and interval_unit. + type: number + interval: + description: How often this lifecycle policy should + be evaluated. 1, 2,3,4,6,8,12 or 24 are valid + values. Conflicts with cron_expression. If set, + interval_unit and times must also be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default value. + Conflicts with cron_expression. Must be set if + interval is set. + type: string + type: object + name: + description: A descriptive name for the action. + type: string + retainRule: + description: Specifies the retention rule for cross-Region + snapshot copies. See the retain_rule block. Max of + 1 per action. + properties: + count: + description: Specifies the number of oldest AMIs + to deprecate. Must be an integer between 1 and + 1000. Conflicts with interval and interval_unit. + type: number + interval: + description: How often this lifecycle policy should + be evaluated. 1, 2,3,4,6,8,12 or 24 are valid + values. Conflicts with cron_expression. If set, + interval_unit and times must also be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default value. + Conflicts with cron_expression. Must be set if + interval is set. + type: string + type: object + shareRule: + description: See the share_rule block. Max of 1 per + schedule. + properties: + targetAccounts: + description: The IDs of the AWS accounts with which + to share the snapshots. + items: + type: string + type: array + x-kubernetes-list-type: set + unshareInterval: + description: How often this lifecycle policy should + be evaluated. 1, 2,3,4,6,8,12 or 24 are valid + values. Conflicts with cron_expression. If set, + interval_unit and times must also be set. + type: number + unshareIntervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default value. + Conflicts with cron_expression. Must be set if + interval is set. + type: string + type: object + tagsToAdd: + additionalProperties: + type: string + description: A map of tag keys and their values. DLM + lifecycle policies will already tag the snapshot with + the tags on the volume. This configuration adds extra + tags on top of these. + type: object + x-kubernetes-map-type: granular + variableTags: + additionalProperties: + type: string + description: A map of tag keys and variable values, + where the values are determined when the policy is + executed. Only $(instance-id) or $(timestamp) are + valid values. Can only be used when resource_types + is INSTANCE. + type: object + x-kubernetes-map-type: granular + type: object + type: array + targetTags: + additionalProperties: + type: string + description: A map of tag keys and their values. Any resources + that match the resource_types and are tagged with any of + these tags will be targeted. + type: object + x-kubernetes-map-type: granular + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + state: + description: Whether the lifecycle policy should be enabled or + disabled. ENABLED or DISABLED are valid values. Defaults to + ENABLED. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A description for the DLM lifecycle policy. + type: string + executionRoleArn: + description: The ARN of an IAM role that is able to be assumed + by the DLM service. + type: string + executionRoleArnRef: + description: Reference to a Role in iam to populate executionRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + executionRoleArnSelector: + description: Selector for a Role in iam to populate executionRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + policyDetails: + description: See the policy_details configuration block. Max of + 1. + properties: + action: + description: The actions to be performed when the event-based + policy is triggered. You can specify only one action per + policy. This parameter is required for event-based policies + only. If you are creating a snapshot or AMI policy, omit + this parameter. See the action configuration block. + properties: + crossRegionCopy: + description: The rule for copying shared snapshots across + Regions. See the cross_region_copy configuration block. + items: + properties: + encryptionConfiguration: + description: The encryption settings for the copied + snapshot. See the encryption_configuration block. + Max of 1 per action. + properties: + cmkArn: + description: The Amazon Resource Name (ARN) + of the AWS KMS key to use for EBS encryption. + If this parameter is not specified, the default + KMS key for the account is used. + type: string + encrypted: + description: To encrypt a copy of an unencrypted + snapshot when encryption by default is not + enabled, enable encryption using this parameter. + Copies of encrypted snapshots are encrypted, + even if this parameter is false or when encryption + by default is not enabled. + type: boolean + type: object + retainRule: + description: Specifies the retention rule for cross-Region + snapshot copies. See the retain_rule block. Max + of 1 per action. + properties: + interval: + description: How often this lifecycle policy + should be evaluated. 1, 2,3,4,6,8,12 or 24 + are valid values. Conflicts with cron_expression. + If set, interval_unit and times must also + be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default + value. Conflicts with cron_expression. Must + be set if interval is set. + type: string + type: object + target: + description: The target Region or the Amazon Resource + Name (ARN) of the target Outpost for the snapshot + copies. + type: string + type: object + type: array + name: + description: A descriptive name for the action. + type: string + type: object + eventSource: + description: The event that triggers the event-based policy. + This parameter is required for event-based policies only. + If you are creating a snapshot or AMI policy, omit this + parameter. See the event_source configuration block. + properties: + parameters: + description: A set of optional parameters for snapshot + and AMI lifecycle policies. See the parameters configuration + block. + properties: + descriptionRegex: + description: The snapshot description that can trigger + the policy. The description pattern is specified + using a regular expression. The policy runs only + if a snapshot with a description that matches the + specified pattern is shared with your account. + type: string + eventType: + description: The type of event. Currently, only shareSnapshot + events are supported. + type: string + snapshotOwner: + description: The IDs of the AWS accounts that can + trigger policy by sharing snapshots with your account. + The policy only runs if one of the specified AWS + accounts shares a snapshot with your account. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: + description: The source of the event. Currently only managed + CloudWatch Events rules are supported. Valid values + are MANAGED_CWE. + type: string + type: object + parameters: + description: A set of optional parameters for snapshot and + AMI lifecycle policies. See the parameters configuration + block. + properties: + excludeBootVolume: + description: Indicates whether to exclude the root volume + from snapshots created using CreateSnapshots. The default + is false. + type: boolean + noReboot: + description: Applies to AMI lifecycle policies only. Indicates + whether targeted instances are rebooted when the lifecycle + policy runs. true indicates that targeted instances + are not rebooted when the policy runs. false indicates + that target instances are rebooted when the policy runs. + The default is true (instances are not rebooted). + type: boolean + type: object + policyType: + description: The valid target resource types and actions a + policy can manage. Specify EBS_SNAPSHOT_MANAGEMENT to create + a lifecycle policy that manages the lifecycle of Amazon + EBS snapshots. Specify IMAGE_MANAGEMENT to create a lifecycle + policy that manages the lifecycle of EBS-backed AMIs. Specify + EVENT_BASED_POLICY to create an event-based policy that + performs specific actions when a defined event occurs in + your AWS account. Default value is EBS_SNAPSHOT_MANAGEMENT. + type: string + resourceLocations: + description: The location of the resources to backup. If the + source resources are located in an AWS Region, specify CLOUD. + If the source resources are located on an Outpost in your + account, specify OUTPOST. If you specify OUTPOST, Amazon + Data Lifecycle Manager backs up all resources of the specified + type with matching target tags across all of the Outposts + in your account. Valid values are CLOUD and OUTPOST. + items: + type: string + type: array + resourceTypes: + description: A list of resource types that should be targeted + by the lifecycle policy. Valid values are VOLUME and INSTANCE. + items: + type: string + type: array + schedule: + description: See the schedule configuration block. + items: + properties: + copyTags: + description: Copy all user-defined tags on a source + volume to snapshots of the volume created by this + policy. + type: boolean + createRule: + description: See the create_rule block. Max of 1 per + schedule. + properties: + cronExpression: + description: The schedule, as a Cron expression. + The schedule interval must be between 1 hour and + 1 year. Conflicts with interval, interval_unit, + and times. + type: string + interval: + description: How often this lifecycle policy should + be evaluated. 1, 2,3,4,6,8,12 or 24 are valid + values. Conflicts with cron_expression. If set, + interval_unit and times must also be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default value. + Conflicts with cron_expression. Must be set if + interval is set. + type: string + location: + description: Specifies the destination for snapshots + created by the policy. To create snapshots in + the same Region as the source resource, specify + CLOUD. To create snapshots on the same Outpost + as the source resource, specify OUTPOST_LOCAL. + If you omit this parameter, CLOUD is used by default. + If the policy targets resources in an AWS Region, + then you must create snapshots in the same Region + as the source resource. If the policy targets + resources on an Outpost, then you can create snapshots + on the same Outpost as the source resource, or + in the Region of that Outpost. Valid values are + CLOUD and OUTPOST_LOCAL. + type: string + times: + description: A list of times in 24 hour clock format + that sets when the lifecycle policy should be + evaluated. Max of 1. Conflicts with cron_expression. + Must be set if interval is set. + items: + type: string + type: array + type: object + crossRegionCopyRule: + description: See the cross_region_copy_rule block. Max + of 3 per schedule. + items: + properties: + cmkArn: + description: The Amazon Resource Name (ARN) of + the AWS KMS key to use for EBS encryption. If + this parameter is not specified, the default + KMS key for the account is used. + type: string + cmkArnRef: + description: Reference to a Key in kms to populate + cmkArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cmkArnSelector: + description: Selector for a Key in kms to populate + cmkArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + copyTags: + description: Copy all user-defined tags on a source + volume to snapshots of the volume created by + this policy. + type: boolean + deprecateRule: + description: See the deprecate_rule block. Max + of 1 per schedule. + properties: + interval: + description: How often this lifecycle policy + should be evaluated. 1, 2,3,4,6,8,12 or + 24 are valid values. Conflicts with cron_expression. + If set, interval_unit and times must also + be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default + value. Conflicts with cron_expression. Must + be set if interval is set. + type: string + type: object + encrypted: + description: To encrypt a copy of an unencrypted + snapshot when encryption by default is not enabled, + enable encryption using this parameter. Copies + of encrypted snapshots are encrypted, even if + this parameter is false or when encryption by + default is not enabled. + type: boolean + retainRule: + description: Specifies the retention rule for + cross-Region snapshot copies. See the retain_rule + block. Max of 1 per action. + properties: + interval: + description: How often this lifecycle policy + should be evaluated. 1, 2,3,4,6,8,12 or + 24 are valid values. Conflicts with cron_expression. + If set, interval_unit and times must also + be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default + value. Conflicts with cron_expression. Must + be set if interval is set. + type: string + type: object + target: + description: The target Region or the Amazon Resource + Name (ARN) of the target Outpost for the snapshot + copies. + type: string + type: object + type: array + deprecateRule: + description: See the deprecate_rule block. Max of 1 + per schedule. + properties: + count: + description: Specifies the number of oldest AMIs + to deprecate. Must be an integer between 1 and + 1000. Conflicts with interval and interval_unit. + type: number + interval: + description: How often this lifecycle policy should + be evaluated. 1, 2,3,4,6,8,12 or 24 are valid + values. Conflicts with cron_expression. If set, + interval_unit and times must also be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default value. + Conflicts with cron_expression. Must be set if + interval is set. + type: string + type: object + fastRestoreRule: + description: See the fast_restore_rule block. Max of + 1 per schedule. + properties: + availabilityZones: + description: The Availability Zones in which to + enable fast snapshot restore. + items: + type: string + type: array + x-kubernetes-list-type: set + count: + description: Specifies the number of oldest AMIs + to deprecate. Must be an integer between 1 and + 1000. Conflicts with interval and interval_unit. + type: number + interval: + description: How often this lifecycle policy should + be evaluated. 1, 2,3,4,6,8,12 or 24 are valid + values. Conflicts with cron_expression. If set, + interval_unit and times must also be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default value. + Conflicts with cron_expression. Must be set if + interval is set. + type: string + type: object + name: + description: A descriptive name for the action. + type: string + retainRule: + description: Specifies the retention rule for cross-Region + snapshot copies. See the retain_rule block. Max of + 1 per action. + properties: + count: + description: Specifies the number of oldest AMIs + to deprecate. Must be an integer between 1 and + 1000. Conflicts with interval and interval_unit. + type: number + interval: + description: How often this lifecycle policy should + be evaluated. 1, 2,3,4,6,8,12 or 24 are valid + values. Conflicts with cron_expression. If set, + interval_unit and times must also be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default value. + Conflicts with cron_expression. Must be set if + interval is set. + type: string + type: object + shareRule: + description: See the share_rule block. Max of 1 per + schedule. + properties: + targetAccounts: + description: The IDs of the AWS accounts with which + to share the snapshots. + items: + type: string + type: array + x-kubernetes-list-type: set + unshareInterval: + description: How often this lifecycle policy should + be evaluated. 1, 2,3,4,6,8,12 or 24 are valid + values. Conflicts with cron_expression. If set, + interval_unit and times must also be set. + type: number + unshareIntervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default value. + Conflicts with cron_expression. Must be set if + interval is set. + type: string + type: object + tagsToAdd: + additionalProperties: + type: string + description: A map of tag keys and their values. DLM + lifecycle policies will already tag the snapshot with + the tags on the volume. This configuration adds extra + tags on top of these. + type: object + x-kubernetes-map-type: granular + variableTags: + additionalProperties: + type: string + description: A map of tag keys and variable values, + where the values are determined when the policy is + executed. Only $(instance-id) or $(timestamp) are + valid values. Can only be used when resource_types + is INSTANCE. + type: object + x-kubernetes-map-type: granular + type: object + type: array + targetTags: + additionalProperties: + type: string + description: A map of tag keys and their values. Any resources + that match the resource_types and are tagged with any of + these tags will be targeted. + type: object + x-kubernetes-map-type: granular + type: object + state: + description: Whether the lifecycle policy should be enabled or + disabled. ENABLED or DISABLED are valid values. Defaults to + ENABLED. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.description is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.description) + || (has(self.initProvider) && has(self.initProvider.description))' + - message: spec.forProvider.policyDetails is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.policyDetails) + || (has(self.initProvider) && has(self.initProvider.policyDetails))' + status: + description: LifecyclePolicyStatus defines the observed state of LifecyclePolicy. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of the DLM Lifecycle Policy. + type: string + description: + description: A description for the DLM lifecycle policy. + type: string + executionRoleArn: + description: The ARN of an IAM role that is able to be assumed + by the DLM service. + type: string + id: + description: Identifier of the DLM Lifecycle Policy. + type: string + policyDetails: + description: See the policy_details configuration block. Max of + 1. + properties: + action: + description: The actions to be performed when the event-based + policy is triggered. You can specify only one action per + policy. This parameter is required for event-based policies + only. If you are creating a snapshot or AMI policy, omit + this parameter. See the action configuration block. + properties: + crossRegionCopy: + description: The rule for copying shared snapshots across + Regions. See the cross_region_copy configuration block. + items: + properties: + encryptionConfiguration: + description: The encryption settings for the copied + snapshot. See the encryption_configuration block. + Max of 1 per action. + properties: + cmkArn: + description: The Amazon Resource Name (ARN) + of the AWS KMS key to use for EBS encryption. + If this parameter is not specified, the default + KMS key for the account is used. + type: string + encrypted: + description: To encrypt a copy of an unencrypted + snapshot when encryption by default is not + enabled, enable encryption using this parameter. + Copies of encrypted snapshots are encrypted, + even if this parameter is false or when encryption + by default is not enabled. + type: boolean + type: object + retainRule: + description: Specifies the retention rule for cross-Region + snapshot copies. See the retain_rule block. Max + of 1 per action. + properties: + interval: + description: How often this lifecycle policy + should be evaluated. 1, 2,3,4,6,8,12 or 24 + are valid values. Conflicts with cron_expression. + If set, interval_unit and times must also + be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default + value. Conflicts with cron_expression. Must + be set if interval is set. + type: string + type: object + target: + description: The target Region or the Amazon Resource + Name (ARN) of the target Outpost for the snapshot + copies. + type: string + type: object + type: array + name: + description: A descriptive name for the action. + type: string + type: object + eventSource: + description: The event that triggers the event-based policy. + This parameter is required for event-based policies only. + If you are creating a snapshot or AMI policy, omit this + parameter. See the event_source configuration block. + properties: + parameters: + description: A set of optional parameters for snapshot + and AMI lifecycle policies. See the parameters configuration + block. + properties: + descriptionRegex: + description: The snapshot description that can trigger + the policy. The description pattern is specified + using a regular expression. The policy runs only + if a snapshot with a description that matches the + specified pattern is shared with your account. + type: string + eventType: + description: The type of event. Currently, only shareSnapshot + events are supported. + type: string + snapshotOwner: + description: The IDs of the AWS accounts that can + trigger policy by sharing snapshots with your account. + The policy only runs if one of the specified AWS + accounts shares a snapshot with your account. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: + description: The source of the event. Currently only managed + CloudWatch Events rules are supported. Valid values + are MANAGED_CWE. + type: string + type: object + parameters: + description: A set of optional parameters for snapshot and + AMI lifecycle policies. See the parameters configuration + block. + properties: + excludeBootVolume: + description: Indicates whether to exclude the root volume + from snapshots created using CreateSnapshots. The default + is false. + type: boolean + noReboot: + description: Applies to AMI lifecycle policies only. Indicates + whether targeted instances are rebooted when the lifecycle + policy runs. true indicates that targeted instances + are not rebooted when the policy runs. false indicates + that target instances are rebooted when the policy runs. + The default is true (instances are not rebooted). + type: boolean + type: object + policyType: + description: The valid target resource types and actions a + policy can manage. Specify EBS_SNAPSHOT_MANAGEMENT to create + a lifecycle policy that manages the lifecycle of Amazon + EBS snapshots. Specify IMAGE_MANAGEMENT to create a lifecycle + policy that manages the lifecycle of EBS-backed AMIs. Specify + EVENT_BASED_POLICY to create an event-based policy that + performs specific actions when a defined event occurs in + your AWS account. Default value is EBS_SNAPSHOT_MANAGEMENT. + type: string + resourceLocations: + description: The location of the resources to backup. If the + source resources are located in an AWS Region, specify CLOUD. + If the source resources are located on an Outpost in your + account, specify OUTPOST. If you specify OUTPOST, Amazon + Data Lifecycle Manager backs up all resources of the specified + type with matching target tags across all of the Outposts + in your account. Valid values are CLOUD and OUTPOST. + items: + type: string + type: array + resourceTypes: + description: A list of resource types that should be targeted + by the lifecycle policy. Valid values are VOLUME and INSTANCE. + items: + type: string + type: array + schedule: + description: See the schedule configuration block. + items: + properties: + copyTags: + description: Copy all user-defined tags on a source + volume to snapshots of the volume created by this + policy. + type: boolean + createRule: + description: See the create_rule block. Max of 1 per + schedule. + properties: + cronExpression: + description: The schedule, as a Cron expression. + The schedule interval must be between 1 hour and + 1 year. Conflicts with interval, interval_unit, + and times. + type: string + interval: + description: How often this lifecycle policy should + be evaluated. 1, 2,3,4,6,8,12 or 24 are valid + values. Conflicts with cron_expression. If set, + interval_unit and times must also be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default value. + Conflicts with cron_expression. Must be set if + interval is set. + type: string + location: + description: Specifies the destination for snapshots + created by the policy. To create snapshots in + the same Region as the source resource, specify + CLOUD. To create snapshots on the same Outpost + as the source resource, specify OUTPOST_LOCAL. + If you omit this parameter, CLOUD is used by default. + If the policy targets resources in an AWS Region, + then you must create snapshots in the same Region + as the source resource. If the policy targets + resources on an Outpost, then you can create snapshots + on the same Outpost as the source resource, or + in the Region of that Outpost. Valid values are + CLOUD and OUTPOST_LOCAL. + type: string + times: + description: A list of times in 24 hour clock format + that sets when the lifecycle policy should be + evaluated. Max of 1. Conflicts with cron_expression. + Must be set if interval is set. + items: + type: string + type: array + type: object + crossRegionCopyRule: + description: See the cross_region_copy_rule block. Max + of 3 per schedule. + items: + properties: + cmkArn: + description: The Amazon Resource Name (ARN) of + the AWS KMS key to use for EBS encryption. If + this parameter is not specified, the default + KMS key for the account is used. + type: string + copyTags: + description: Copy all user-defined tags on a source + volume to snapshots of the volume created by + this policy. + type: boolean + deprecateRule: + description: See the deprecate_rule block. Max + of 1 per schedule. + properties: + interval: + description: How often this lifecycle policy + should be evaluated. 1, 2,3,4,6,8,12 or + 24 are valid values. Conflicts with cron_expression. + If set, interval_unit and times must also + be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default + value. Conflicts with cron_expression. Must + be set if interval is set. + type: string + type: object + encrypted: + description: To encrypt a copy of an unencrypted + snapshot when encryption by default is not enabled, + enable encryption using this parameter. Copies + of encrypted snapshots are encrypted, even if + this parameter is false or when encryption by + default is not enabled. + type: boolean + retainRule: + description: Specifies the retention rule for + cross-Region snapshot copies. See the retain_rule + block. Max of 1 per action. + properties: + interval: + description: How often this lifecycle policy + should be evaluated. 1, 2,3,4,6,8,12 or + 24 are valid values. Conflicts with cron_expression. + If set, interval_unit and times must also + be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default + value. Conflicts with cron_expression. Must + be set if interval is set. + type: string + type: object + target: + description: The target Region or the Amazon Resource + Name (ARN) of the target Outpost for the snapshot + copies. + type: string + type: object + type: array + deprecateRule: + description: See the deprecate_rule block. Max of 1 + per schedule. + properties: + count: + description: Specifies the number of oldest AMIs + to deprecate. Must be an integer between 1 and + 1000. Conflicts with interval and interval_unit. + type: number + interval: + description: How often this lifecycle policy should + be evaluated. 1, 2,3,4,6,8,12 or 24 are valid + values. Conflicts with cron_expression. If set, + interval_unit and times must also be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default value. + Conflicts with cron_expression. Must be set if + interval is set. + type: string + type: object + fastRestoreRule: + description: See the fast_restore_rule block. Max of + 1 per schedule. + properties: + availabilityZones: + description: The Availability Zones in which to + enable fast snapshot restore. + items: + type: string + type: array + x-kubernetes-list-type: set + count: + description: Specifies the number of oldest AMIs + to deprecate. Must be an integer between 1 and + 1000. Conflicts with interval and interval_unit. + type: number + interval: + description: How often this lifecycle policy should + be evaluated. 1, 2,3,4,6,8,12 or 24 are valid + values. Conflicts with cron_expression. If set, + interval_unit and times must also be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default value. + Conflicts with cron_expression. Must be set if + interval is set. + type: string + type: object + name: + description: A descriptive name for the action. + type: string + retainRule: + description: Specifies the retention rule for cross-Region + snapshot copies. See the retain_rule block. Max of + 1 per action. + properties: + count: + description: Specifies the number of oldest AMIs + to deprecate. Must be an integer between 1 and + 1000. Conflicts with interval and interval_unit. + type: number + interval: + description: How often this lifecycle policy should + be evaluated. 1, 2,3,4,6,8,12 or 24 are valid + values. Conflicts with cron_expression. If set, + interval_unit and times must also be set. + type: number + intervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default value. + Conflicts with cron_expression. Must be set if + interval is set. + type: string + type: object + shareRule: + description: See the share_rule block. Max of 1 per + schedule. + properties: + targetAccounts: + description: The IDs of the AWS accounts with which + to share the snapshots. + items: + type: string + type: array + x-kubernetes-list-type: set + unshareInterval: + description: How often this lifecycle policy should + be evaluated. 1, 2,3,4,6,8,12 or 24 are valid + values. Conflicts with cron_expression. If set, + interval_unit and times must also be set. + type: number + unshareIntervalUnit: + description: The unit for how often the lifecycle + policy should be evaluated. HOURS is currently + the only allowed value and also the default value. + Conflicts with cron_expression. Must be set if + interval is set. + type: string + type: object + tagsToAdd: + additionalProperties: + type: string + description: A map of tag keys and their values. DLM + lifecycle policies will already tag the snapshot with + the tags on the volume. This configuration adds extra + tags on top of these. + type: object + x-kubernetes-map-type: granular + variableTags: + additionalProperties: + type: string + description: A map of tag keys and variable values, + where the values are determined when the policy is + executed. Only $(instance-id) or $(timestamp) are + valid values. Can only be used when resource_types + is INSTANCE. + type: object + x-kubernetes-map-type: granular + type: object + type: array + targetTags: + additionalProperties: + type: string + description: A map of tag keys and their values. Any resources + that match the resource_types and are tagged with any of + these tags will be targeted. + type: object + x-kubernetes-map-type: granular + type: object + state: + description: Whether the lifecycle policy should be enabled or + disabled. ENABLED or DISABLED are valid values. Defaults to + ENABLED. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/dms.aws.upbound.io_endpoints.yaml b/package/crds/dms.aws.upbound.io_endpoints.yaml index 4d98257354..86ad27a41f 100644 --- a/package/crds/dms.aws.upbound.io_endpoints.yaml +++ b/package/crds/dms.aws.upbound.io_endpoints.yaml @@ -2729,3 +2729,2642 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Endpoint is the Schema for the Endpoints API. Provides a DMS + (Data Migration Service) endpoint resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EndpointSpec defines the desired state of Endpoint + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + certificateArn: + description: ARN for the certificate. + type: string + databaseName: + description: Name of the endpoint database. + type: string + elasticsearchSettings: + description: Configuration block for OpenSearch settings. See + below. + properties: + endpointUri: + description: Endpoint for the OpenSearch cluster. + type: string + errorRetryDuration: + description: Maximum number of seconds for which DMS retries + failed API requests to the OpenSearch cluster. Default is + 300. + type: number + fullLoadErrorPercentage: + description: Maximum percentage of records that can fail to + be written before a full load operation stops. Default is + 10. + type: number + serviceAccessRoleArn: + description: ARN of the IAM Role with permissions to write + to the OpenSearch cluster. + type: string + useNewMappingType: + description: Enable to migrate documentation using the documentation + type _doc. OpenSearch and an Elasticsearch clusters only + support the _doc documentation type in versions 7.x and + later. The default value is false. + type: boolean + type: object + endpointType: + description: Type of endpoint. Valid values are source, target. + type: string + engineName: + description: Type of engine for the endpoint. Valid values are + aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, + babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, + kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, + redshift, s3, sqlserver, sybase. Please note that some of engine + names are available only for target endpoint type (e.g. redshift). + type: string + extraConnectionAttributes: + description: Additional attributes associated with the connection. + For available attributes for a source Endpoint, see Sources + for data migration. For available attributes for a target Endpoint, + see Targets for data migration. + type: string + kafkaSettings: + description: Configuration block for Kafka settings. See below. + properties: + broker: + description: Kafka broker location. Specify in the form broker-hostname-or-ip:port. + type: string + includeControlDetails: + description: Shows detailed control information for table + definition, column definition, and table and column changes + in the Kafka message output. Default is false. + type: boolean + includeNullAndEmpty: + description: Include NULL and empty columns for records migrated + to the endpoint. Default is false. + type: boolean + includePartitionValue: + description: Shows the partition value within the Kafka message + output unless the partition type is schema-table-type. Default + is false. + type: boolean + includeTableAlterOperations: + description: Includes any data definition language (DDL) operations + that change the table in the control data, such as rename-table, + drop-table, add-column, drop-column, and rename-column. + Default is false. + type: boolean + includeTransactionDetails: + description: Provides detailed transaction information from + the source database. This information includes a commit + timestamp, a log position, and values for transaction_id, + previous transaction_id, and transaction_record_id (the + record offset within a transaction). Default is false. + type: boolean + messageFormat: + description: Output format for the records created on the + endpoint. Message format is JSON (default) or JSON_UNFORMATTED + (a single line with no tab). + type: string + messageMaxBytes: + description: Maximum size in bytes for records created on + the endpoint Default is 1,000,000. + type: number + noHexPrefix: + description: Set this optional parameter to true to avoid + adding a '0x' prefix to raw data in hexadecimal format. + For example, by default, AWS DMS adds a '0x' prefix to the + LOB column type in hexadecimal format moving from an Oracle + source to a Kafka target. Use the no_hex_prefix endpoint + setting to enable migration of RAW data type columns without + adding the '0x' prefix. + type: boolean + partitionIncludeSchemaTable: + description: Prefixes schema and table names to partition + values, when the partition type is primary-key-type. Doing + this increases data distribution among Kafka partitions. + For example, suppose that a SysBench schema has thousands + of tables and each table has only limited range for a primary + key. In this case, the same primary key is sent from thousands + of tables to the same partition, which causes throttling. + Default is false. + type: boolean + saslPasswordSecretRef: + description: Secure password you created when you first set + up your MSK cluster to validate a client identity and make + an encrypted connection between server and client using + SASL-SSL authentication. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + saslUsername: + description: Secure user name you created when you first set + up your MSK cluster to validate a client identity and make + an encrypted connection between server and client using + SASL-SSL authentication. + type: string + securityProtocol: + description: Set secure connection to a Kafka target endpoint + using Transport Layer Security (TLS). Options include ssl-encryption, + ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username + and sasl_password. + type: string + sslCaCertificateArn: + description: ARN for the private certificate authority (CA) + cert that AWS DMS uses to securely connect to your Kafka + target endpoint. + type: string + sslClientCertificateArn: + description: ARN of the client certificate used to securely + connect to a Kafka target endpoint. + type: string + sslClientKeyArn: + description: ARN for the client private key used to securely + connect to a Kafka target endpoint. + type: string + sslClientKeyPasswordSecretRef: + description: Password for the client private key used to securely + connect to a Kafka target endpoint. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + topic: + description: Kafka topic for migration. Default is kafka-default-topic. + type: string + type: object + kinesisSettings: + description: Configuration block for Kinesis settings. See below. + properties: + includeControlDetails: + description: Shows detailed control information for table + definition, column definition, and table and column changes + in the Kinesis message output. Default is false. + type: boolean + includeNullAndEmpty: + description: Include NULL and empty columns in the target. + Default is false. + type: boolean + includePartitionValue: + description: Shows the partition value within the Kinesis + message output, unless the partition type is schema-table-type. + Default is false. + type: boolean + includeTableAlterOperations: + description: Includes any data definition language (DDL) operations + that change the table in the control data. Default is false. + type: boolean + includeTransactionDetails: + description: Provides detailed transaction information from + the source database. Default is false. + type: boolean + messageFormat: + description: Output format for the records created. Default + is json. Valid values are json and json-unformatted (a single + line with no tab). + type: string + partitionIncludeSchemaTable: + description: Prefixes schema and table names to partition + values, when the partition type is primary-key-type. Default + is false. + type: boolean + serviceAccessRoleArn: + description: ARN of the IAM Role with permissions to write + to the Kinesis data stream. + type: string + streamArn: + description: ARN of the Kinesis data stream. + type: string + type: object + kmsKeyArn: + description: ARN for the KMS key that will be used to encrypt + the connection parameters. If you do not specify a value for + kms_key_arn, then AWS DMS will use your default encryption key. + AWS KMS creates the default encryption key for your AWS account. + Your AWS account has a different default encryption key for + each AWS region. To encrypt an S3 target with a KMS Key, use + the parameter s3_settings.server_side_encryption_kms_key_id. + When engine_name is redshift, kms_key_arn is the KMS Key for + the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id + encrypts the S3 intermediate storage. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + mongodbSettings: + description: Configuration block for MongoDB settings. See below. + properties: + authMechanism: + description: Authentication mechanism to access the MongoDB + source endpoint. Default is default. + type: string + authSource: + description: Authentication database name. Not used when auth_type + is no. Default is admin. + type: string + authType: + description: Authentication type to access the MongoDB source + endpoint. Default is password. + type: string + docsToInvestigate: + description: Number of documents to preview to determine the + document organization. Use this setting when nesting_level + is set to one. Default is 1000. + type: string + extractDocId: + description: Document ID. Use this setting when nesting_level + is set to none. Default is false. + type: string + nestingLevel: + description: Specifies either document or table mode. Default + is none. Valid values are one (table mode) and none (document + mode). + type: string + type: object + passwordSecretRef: + description: Password to be used to login to the endpoint database. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + pauseReplicationTasks: + description: Only tasks paused by the resource will be restarted + after the modification completes. Default is false. + type: boolean + port: + description: Port used by the endpoint database. + type: number + postgresSettings: + description: Configuration block for Postgres settings. See below. + properties: + afterConnectScript: + description: For use with change data capture (CDC) only, + this attribute has AWS DMS bypass foreign keys and user + triggers to reduce the time it takes to bulk load data. + type: string + babelfishDatabaseName: + description: The Babelfish for Aurora PostgreSQL database + name for the endpoint. + type: string + captureDdls: + description: To capture DDL events, AWS DMS creates various + artifacts in the PostgreSQL database when the task starts. + type: boolean + databaseMode: + description: Specifies the default behavior of the replication's + handling of PostgreSQL- compatible endpoints that require + some additional configuration, such as Babelfish endpoints. + type: string + ddlArtifactsSchema: + description: Sets the schema in which the operational DDL + database artifacts are created. Default is public. + type: string + executeTimeout: + description: Sets the client statement timeout for the PostgreSQL + instance, in seconds. Default value is 60. + type: number + failTasksOnLobTruncation: + description: When set to true, this value causes a task to + fail if the actual size of a LOB column is greater than + the specified LobMaxSize. Default is false. + type: boolean + heartbeatEnable: + description: The write-ahead log (WAL) heartbeat feature mimics + a dummy transaction. By doing this, it prevents idle logical + replication slots from holding onto old WAL logs, which + can result in storage full situations on the source. + type: boolean + heartbeatFrequency: + description: Sets the WAL heartbeat frequency (in minutes). + Default value is 5. + type: number + heartbeatSchema: + description: Sets the schema in which the heartbeat artifacts + are created. Default value is public. + type: string + mapBooleanAsBoolean: + description: You can use PostgreSQL endpoint settings to map + a boolean as a boolean from your PostgreSQL source to a + Amazon Redshift target. Default value is false. + type: boolean + mapJsonbAsClob: + description: Optional When true, DMS migrates JSONB values + as CLOB. + type: boolean + mapLongVarcharAs: + description: Optional When true, DMS migrates LONG values + as VARCHAR. + type: string + maxFileSize: + description: Specifies the maximum size (in KB) of any .csv + file used to transfer data to PostgreSQL. Default is 32,768 + KB. + type: number + pluginName: + description: 'Specifies the plugin to use to create a replication + slot. Valid values: pglogical, test_decoding.' + type: string + slotName: + description: Sets the name of a previously created logical + replication slot for a CDC load of the PostgreSQL source + instance. + type: string + type: object + redisSettings: + properties: + authPasswordSecretRef: + description: The password provided with the auth-role and + auth-token options of the AuthType setting for a Redis target + endpoint. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + authType: + description: Authentication type to access the MongoDB source + endpoint. Default is password. + type: string + authUserName: + description: The username provided with the auth-role option + of the AuthType setting for a Redis target endpoint. + type: string + port: + description: Port used by the endpoint database. + type: number + serverName: + description: Host name of the server. + type: string + sslCaCertificateArn: + description: The Amazon Resource Name (ARN) for the certificate + authority (CA) that DMS uses to connect to your Redis target + endpoint. + type: string + sslSecurityProtocol: + description: The plaintext option doesn't provide Transport + Layer Security (TLS) encryption for traffic between endpoint + and database. Options include plaintext, ssl-encryption. + The default is ssl-encryption. + type: string + type: object + redshiftSettings: + description: Configuration block for Redshift settings. See below. + properties: + bucketFolder: + description: Custom S3 Bucket Object prefix for intermediate + storage. + type: string + bucketName: + description: Custom S3 Bucket name for intermediate storage. + type: string + encryptionMode: + description: The server-side encryption mode that you want + to encrypt your intermediate .csv object files copied to + S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS. + type: string + serverSideEncryptionKmsKeyId: + description: ARN or Id of KMS Key to use when encryption_mode + is SSE_KMS. + type: string + serviceAccessRoleArn: + description: Amazon Resource Name (ARN) of the IAM Role with + permissions to read from or write to the S3 Bucket for intermediate + storage. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + s3Settings: + description: |- + (Deprecated, use the aws_dms_s3_endpoint resource instead) Configuration block for S3 settings. See below. + This argument is deprecated and will be removed in a future version; use aws_dms_s3_endpoint instead + properties: + addColumnName: + description: Whether to add column name information to the + .csv output file. Default is false. + type: boolean + bucketFolder: + description: S3 object prefix. + type: string + bucketName: + description: S3 bucket name. + type: string + cannedAclForObjects: + description: Predefined (canned) access control list for objects + created in an S3 bucket. Valid values include none, private, + public-read, public-read-write, authenticated-read, aws-exec-read, + bucket-owner-read, and bucket-owner-full-control. Default + is none. + type: string + cdcInsertsAndUpdates: + description: Whether to write insert and update operations + to .csv or .parquet output files. Default is false. + type: boolean + cdcInsertsOnly: + description: Whether to write insert operations to .csv or + .parquet output files. Default is false. + type: boolean + cdcMaxBatchInterval: + description: Maximum length of the interval, defined in seconds, + after which to output a file to Amazon S3. Default is 60. + type: number + cdcMinFileSize: + description: 'Minimum file size condition as defined in kilobytes + to output a file to Amazon S3. Default is 32000. NOTE: Previously, + this setting was measured in megabytes but now represents + kilobytes. Update configurations accordingly.' + type: number + cdcPath: + description: Folder path of CDC files. For an S3 source, this + setting is required if a task captures change data; otherwise, + it's optional. If cdc_path is set, AWS DMS reads CDC files + from this path and replicates the data changes to the target + endpoint. Supported in AWS DMS versions 3.4.2 and later. + type: string + compressionType: + description: Set to compress target files. Default is NONE. + Valid values are GZIP and NONE. + type: string + csvDelimiter: + description: Delimiter used to separate columns in the source + files. Default is ,. + type: string + csvNoSupValue: + description: String to use for all columns not included in + the supplemental log. + type: string + csvNullValue: + description: String to as null when writing to the target. + type: string + csvRowDelimiter: + description: Delimiter used to separate rows in the source + files. Default is \n. + type: string + dataFormat: + description: Output format for the files that AWS DMS uses + to create S3 objects. Valid values are csv and parquet. + Default is csv. + type: string + dataPageSize: + description: Size of one data page in bytes. Default is 1048576 + (1 MiB). + type: number + datePartitionDelimiter: + description: Date separating delimiter to use during folder + partitioning. Valid values are SLASH, UNDERSCORE, DASH, + and NONE. Default is SLASH. + type: string + datePartitionEnabled: + description: Partition S3 bucket folders based on transaction + commit dates. Default is false. + type: boolean + datePartitionSequence: + description: Date format to use during folder partitioning. + Use this parameter when date_partition_enabled is set to + true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, + and DDMMYYYY. Default is YYYYMMDD. + type: string + dictPageSizeLimit: + description: Maximum size in bytes of an encoded dictionary + page of a column. Default is 1048576 (1 MiB). + type: number + enableStatistics: + description: Whether to enable statistics for Parquet pages + and row groups. Default is true. + type: boolean + encodingType: + description: Type of encoding to use. Value values are rle_dictionary, + plain, and plain_dictionary. Default is rle_dictionary. + type: string + encryptionMode: + description: Server-side encryption mode that you want to + encrypt your .csv or .parquet object files copied to S3. + Valid values are SSE_S3 and SSE_KMS. Default is SSE_S3. + type: string + externalTableDefinition: + description: JSON document that describes how AWS DMS should + interpret the data. + type: string + glueCatalogGeneration: + description: Whether to integrate AWS Glue Data Catalog with + an Amazon S3 target. See Using AWS Glue Data Catalog with + an Amazon S3 target for AWS DMS for more information. Default + is false. + type: boolean + ignoreHeaderRows: + description: When this value is set to 1, DMS ignores the + first row header in a .csv file. Default is 0. + type: number + includeOpForFullLoad: + description: Whether to enable a full load to write INSERT + operations to the .csv output files only to indicate how + the rows were added to the source database. Default is false. + type: boolean + maxFileSize: + description: Maximum size (in KB) of any .csv file to be created + while migrating to an S3 target during full load. Valid + values are from 1 to 1048576. Default is 1048576 (1 GB). + type: number + parquetTimestampInMillisecond: + description: '- Specifies the precision of any TIMESTAMP column + values written to an S3 object file in .parquet format. + Default is false.' + type: boolean + parquetVersion: + description: Version of the .parquet file format. Default + is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0. + type: string + preserveTransactions: + description: Whether DMS saves the transaction order for a + CDC load on the S3 target specified by cdc_path. Default + is false. + type: boolean + rfc4180: + description: For an S3 source, whether each leading double + quotation mark has to be followed by an ending double quotation + mark. Default is true. + type: boolean + rowGroupLength: + description: Number of rows in a row group. Default is 10000. + type: number + serverSideEncryptionKmsKeyId: + description: ARN or Id of KMS Key to use when encryption_mode + is SSE_KMS. + type: string + serviceAccessRoleArn: + description: ARN of the IAM Role with permissions to read + from or write to the S3 Bucket. + type: string + timestampColumnName: + description: Column to add with timestamp information to the + endpoint data for an Amazon S3 target. + type: string + useCsvNoSupValue: + description: Whether to use csv_no_sup_value for columns not + included in the supplemental log. + type: boolean + useTaskStartTimeForFullLoadTimestamp: + description: When set to true, uses the task start time as + the timestamp column value instead of the time data is written + to target. For full load, when set to true, each row of + the timestamp column contains the task start time. For CDC + loads, each row of the timestamp column contains the transaction + commit time. When set to false, the full load timestamp + in the timestamp column increments with the time data arrives + at the target. Default is false. + type: boolean + type: object + secretsManagerAccessRoleArn: + description: ARN of the IAM role that specifies AWS DMS as the + trusted entity and has the required permissions to access the + value in the Secrets Manager secret referred to by secrets_manager_arn. + The role must allow the iam:PassRole action. + type: string + secretsManagerAccessRoleArnRef: + description: Reference to a Role in iam to populate secretsManagerAccessRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + secretsManagerAccessRoleArnSelector: + description: Selector for a Role in iam to populate secretsManagerAccessRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + secretsManagerArn: + description: text values for username, password , server_name, + and port. You can't specify both. + type: string + serverName: + description: Host name of the server. + type: string + serviceAccessRole: + description: ARN used by the service access IAM role for dynamodb + endpoints. + type: string + serviceAccessRoleRef: + description: Reference to a Role in iam to populate serviceAccessRole. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccessRoleSelector: + description: Selector for a Role in iam to populate serviceAccessRole. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sslMode: + description: SSL mode to use for the connection. Valid values + are none, require, verify-ca, verify-full + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + username: + description: User name to be used to login to the endpoint database. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + certificateArn: + description: ARN for the certificate. + type: string + databaseName: + description: Name of the endpoint database. + type: string + elasticsearchSettings: + description: Configuration block for OpenSearch settings. See + below. + properties: + endpointUri: + description: Endpoint for the OpenSearch cluster. + type: string + errorRetryDuration: + description: Maximum number of seconds for which DMS retries + failed API requests to the OpenSearch cluster. Default is + 300. + type: number + fullLoadErrorPercentage: + description: Maximum percentage of records that can fail to + be written before a full load operation stops. Default is + 10. + type: number + serviceAccessRoleArn: + description: ARN of the IAM Role with permissions to write + to the OpenSearch cluster. + type: string + useNewMappingType: + description: Enable to migrate documentation using the documentation + type _doc. OpenSearch and an Elasticsearch clusters only + support the _doc documentation type in versions 7.x and + later. The default value is false. + type: boolean + type: object + endpointType: + description: Type of endpoint. Valid values are source, target. + type: string + engineName: + description: Type of engine for the endpoint. Valid values are + aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, + babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, + kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, + redshift, s3, sqlserver, sybase. Please note that some of engine + names are available only for target endpoint type (e.g. redshift). + type: string + extraConnectionAttributes: + description: Additional attributes associated with the connection. + For available attributes for a source Endpoint, see Sources + for data migration. For available attributes for a target Endpoint, + see Targets for data migration. + type: string + kafkaSettings: + description: Configuration block for Kafka settings. See below. + properties: + broker: + description: Kafka broker location. Specify in the form broker-hostname-or-ip:port. + type: string + includeControlDetails: + description: Shows detailed control information for table + definition, column definition, and table and column changes + in the Kafka message output. Default is false. + type: boolean + includeNullAndEmpty: + description: Include NULL and empty columns for records migrated + to the endpoint. Default is false. + type: boolean + includePartitionValue: + description: Shows the partition value within the Kafka message + output unless the partition type is schema-table-type. Default + is false. + type: boolean + includeTableAlterOperations: + description: Includes any data definition language (DDL) operations + that change the table in the control data, such as rename-table, + drop-table, add-column, drop-column, and rename-column. + Default is false. + type: boolean + includeTransactionDetails: + description: Provides detailed transaction information from + the source database. This information includes a commit + timestamp, a log position, and values for transaction_id, + previous transaction_id, and transaction_record_id (the + record offset within a transaction). Default is false. + type: boolean + messageFormat: + description: Output format for the records created on the + endpoint. Message format is JSON (default) or JSON_UNFORMATTED + (a single line with no tab). + type: string + messageMaxBytes: + description: Maximum size in bytes for records created on + the endpoint Default is 1,000,000. + type: number + noHexPrefix: + description: Set this optional parameter to true to avoid + adding a '0x' prefix to raw data in hexadecimal format. + For example, by default, AWS DMS adds a '0x' prefix to the + LOB column type in hexadecimal format moving from an Oracle + source to a Kafka target. Use the no_hex_prefix endpoint + setting to enable migration of RAW data type columns without + adding the '0x' prefix. + type: boolean + partitionIncludeSchemaTable: + description: Prefixes schema and table names to partition + values, when the partition type is primary-key-type. Doing + this increases data distribution among Kafka partitions. + For example, suppose that a SysBench schema has thousands + of tables and each table has only limited range for a primary + key. In this case, the same primary key is sent from thousands + of tables to the same partition, which causes throttling. + Default is false. + type: boolean + saslPasswordSecretRef: + description: Secure password you created when you first set + up your MSK cluster to validate a client identity and make + an encrypted connection between server and client using + SASL-SSL authentication. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + saslUsername: + description: Secure user name you created when you first set + up your MSK cluster to validate a client identity and make + an encrypted connection between server and client using + SASL-SSL authentication. + type: string + securityProtocol: + description: Set secure connection to a Kafka target endpoint + using Transport Layer Security (TLS). Options include ssl-encryption, + ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username + and sasl_password. + type: string + sslCaCertificateArn: + description: ARN for the private certificate authority (CA) + cert that AWS DMS uses to securely connect to your Kafka + target endpoint. + type: string + sslClientCertificateArn: + description: ARN of the client certificate used to securely + connect to a Kafka target endpoint. + type: string + sslClientKeyArn: + description: ARN for the client private key used to securely + connect to a Kafka target endpoint. + type: string + sslClientKeyPasswordSecretRef: + description: Password for the client private key used to securely + connect to a Kafka target endpoint. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + topic: + description: Kafka topic for migration. Default is kafka-default-topic. + type: string + type: object + kinesisSettings: + description: Configuration block for Kinesis settings. See below. + properties: + includeControlDetails: + description: Shows detailed control information for table + definition, column definition, and table and column changes + in the Kinesis message output. Default is false. + type: boolean + includeNullAndEmpty: + description: Include NULL and empty columns in the target. + Default is false. + type: boolean + includePartitionValue: + description: Shows the partition value within the Kinesis + message output, unless the partition type is schema-table-type. + Default is false. + type: boolean + includeTableAlterOperations: + description: Includes any data definition language (DDL) operations + that change the table in the control data. Default is false. + type: boolean + includeTransactionDetails: + description: Provides detailed transaction information from + the source database. Default is false. + type: boolean + messageFormat: + description: Output format for the records created. Default + is json. Valid values are json and json-unformatted (a single + line with no tab). + type: string + partitionIncludeSchemaTable: + description: Prefixes schema and table names to partition + values, when the partition type is primary-key-type. Default + is false. + type: boolean + serviceAccessRoleArn: + description: ARN of the IAM Role with permissions to write + to the Kinesis data stream. + type: string + streamArn: + description: ARN of the Kinesis data stream. + type: string + type: object + kmsKeyArn: + description: ARN for the KMS key that will be used to encrypt + the connection parameters. If you do not specify a value for + kms_key_arn, then AWS DMS will use your default encryption key. + AWS KMS creates the default encryption key for your AWS account. + Your AWS account has a different default encryption key for + each AWS region. To encrypt an S3 target with a KMS Key, use + the parameter s3_settings.server_side_encryption_kms_key_id. + When engine_name is redshift, kms_key_arn is the KMS Key for + the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id + encrypts the S3 intermediate storage. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + mongodbSettings: + description: Configuration block for MongoDB settings. See below. + properties: + authMechanism: + description: Authentication mechanism to access the MongoDB + source endpoint. Default is default. + type: string + authSource: + description: Authentication database name. Not used when auth_type + is no. Default is admin. + type: string + authType: + description: Authentication type to access the MongoDB source + endpoint. Default is password. + type: string + docsToInvestigate: + description: Number of documents to preview to determine the + document organization. Use this setting when nesting_level + is set to one. Default is 1000. + type: string + extractDocId: + description: Document ID. Use this setting when nesting_level + is set to none. Default is false. + type: string + nestingLevel: + description: Specifies either document or table mode. Default + is none. Valid values are one (table mode) and none (document + mode). + type: string + type: object + passwordSecretRef: + description: Password to be used to login to the endpoint database. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + pauseReplicationTasks: + description: Only tasks paused by the resource will be restarted + after the modification completes. Default is false. + type: boolean + port: + description: Port used by the endpoint database. + type: number + postgresSettings: + description: Configuration block for Postgres settings. See below. + properties: + afterConnectScript: + description: For use with change data capture (CDC) only, + this attribute has AWS DMS bypass foreign keys and user + triggers to reduce the time it takes to bulk load data. + type: string + babelfishDatabaseName: + description: The Babelfish for Aurora PostgreSQL database + name for the endpoint. + type: string + captureDdls: + description: To capture DDL events, AWS DMS creates various + artifacts in the PostgreSQL database when the task starts. + type: boolean + databaseMode: + description: Specifies the default behavior of the replication's + handling of PostgreSQL- compatible endpoints that require + some additional configuration, such as Babelfish endpoints. + type: string + ddlArtifactsSchema: + description: Sets the schema in which the operational DDL + database artifacts are created. Default is public. + type: string + executeTimeout: + description: Sets the client statement timeout for the PostgreSQL + instance, in seconds. Default value is 60. + type: number + failTasksOnLobTruncation: + description: When set to true, this value causes a task to + fail if the actual size of a LOB column is greater than + the specified LobMaxSize. Default is false. + type: boolean + heartbeatEnable: + description: The write-ahead log (WAL) heartbeat feature mimics + a dummy transaction. By doing this, it prevents idle logical + replication slots from holding onto old WAL logs, which + can result in storage full situations on the source. + type: boolean + heartbeatFrequency: + description: Sets the WAL heartbeat frequency (in minutes). + Default value is 5. + type: number + heartbeatSchema: + description: Sets the schema in which the heartbeat artifacts + are created. Default value is public. + type: string + mapBooleanAsBoolean: + description: You can use PostgreSQL endpoint settings to map + a boolean as a boolean from your PostgreSQL source to a + Amazon Redshift target. Default value is false. + type: boolean + mapJsonbAsClob: + description: Optional When true, DMS migrates JSONB values + as CLOB. + type: boolean + mapLongVarcharAs: + description: Optional When true, DMS migrates LONG values + as VARCHAR. + type: string + maxFileSize: + description: Specifies the maximum size (in KB) of any .csv + file used to transfer data to PostgreSQL. Default is 32,768 + KB. + type: number + pluginName: + description: 'Specifies the plugin to use to create a replication + slot. Valid values: pglogical, test_decoding.' + type: string + slotName: + description: Sets the name of a previously created logical + replication slot for a CDC load of the PostgreSQL source + instance. + type: string + type: object + redisSettings: + properties: + authPasswordSecretRef: + description: The password provided with the auth-role and + auth-token options of the AuthType setting for a Redis target + endpoint. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + authType: + description: Authentication type to access the MongoDB source + endpoint. Default is password. + type: string + authUserName: + description: The username provided with the auth-role option + of the AuthType setting for a Redis target endpoint. + type: string + port: + description: Port used by the endpoint database. + type: number + serverName: + description: Host name of the server. + type: string + sslCaCertificateArn: + description: The Amazon Resource Name (ARN) for the certificate + authority (CA) that DMS uses to connect to your Redis target + endpoint. + type: string + sslSecurityProtocol: + description: The plaintext option doesn't provide Transport + Layer Security (TLS) encryption for traffic between endpoint + and database. Options include plaintext, ssl-encryption. + The default is ssl-encryption. + type: string + type: object + redshiftSettings: + description: Configuration block for Redshift settings. See below. + properties: + bucketFolder: + description: Custom S3 Bucket Object prefix for intermediate + storage. + type: string + bucketName: + description: Custom S3 Bucket name for intermediate storage. + type: string + encryptionMode: + description: The server-side encryption mode that you want + to encrypt your intermediate .csv object files copied to + S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS. + type: string + serverSideEncryptionKmsKeyId: + description: ARN or Id of KMS Key to use when encryption_mode + is SSE_KMS. + type: string + serviceAccessRoleArn: + description: Amazon Resource Name (ARN) of the IAM Role with + permissions to read from or write to the S3 Bucket for intermediate + storage. + type: string + type: object + s3Settings: + description: |- + (Deprecated, use the aws_dms_s3_endpoint resource instead) Configuration block for S3 settings. See below. + This argument is deprecated and will be removed in a future version; use aws_dms_s3_endpoint instead + properties: + addColumnName: + description: Whether to add column name information to the + .csv output file. Default is false. + type: boolean + bucketFolder: + description: S3 object prefix. + type: string + bucketName: + description: S3 bucket name. + type: string + cannedAclForObjects: + description: Predefined (canned) access control list for objects + created in an S3 bucket. Valid values include none, private, + public-read, public-read-write, authenticated-read, aws-exec-read, + bucket-owner-read, and bucket-owner-full-control. Default + is none. + type: string + cdcInsertsAndUpdates: + description: Whether to write insert and update operations + to .csv or .parquet output files. Default is false. + type: boolean + cdcInsertsOnly: + description: Whether to write insert operations to .csv or + .parquet output files. Default is false. + type: boolean + cdcMaxBatchInterval: + description: Maximum length of the interval, defined in seconds, + after which to output a file to Amazon S3. Default is 60. + type: number + cdcMinFileSize: + description: 'Minimum file size condition as defined in kilobytes + to output a file to Amazon S3. Default is 32000. NOTE: Previously, + this setting was measured in megabytes but now represents + kilobytes. Update configurations accordingly.' + type: number + cdcPath: + description: Folder path of CDC files. For an S3 source, this + setting is required if a task captures change data; otherwise, + it's optional. If cdc_path is set, AWS DMS reads CDC files + from this path and replicates the data changes to the target + endpoint. Supported in AWS DMS versions 3.4.2 and later. + type: string + compressionType: + description: Set to compress target files. Default is NONE. + Valid values are GZIP and NONE. + type: string + csvDelimiter: + description: Delimiter used to separate columns in the source + files. Default is ,. + type: string + csvNoSupValue: + description: String to use for all columns not included in + the supplemental log. + type: string + csvNullValue: + description: String to as null when writing to the target. + type: string + csvRowDelimiter: + description: Delimiter used to separate rows in the source + files. Default is \n. + type: string + dataFormat: + description: Output format for the files that AWS DMS uses + to create S3 objects. Valid values are csv and parquet. + Default is csv. + type: string + dataPageSize: + description: Size of one data page in bytes. Default is 1048576 + (1 MiB). + type: number + datePartitionDelimiter: + description: Date separating delimiter to use during folder + partitioning. Valid values are SLASH, UNDERSCORE, DASH, + and NONE. Default is SLASH. + type: string + datePartitionEnabled: + description: Partition S3 bucket folders based on transaction + commit dates. Default is false. + type: boolean + datePartitionSequence: + description: Date format to use during folder partitioning. + Use this parameter when date_partition_enabled is set to + true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, + and DDMMYYYY. Default is YYYYMMDD. + type: string + dictPageSizeLimit: + description: Maximum size in bytes of an encoded dictionary + page of a column. Default is 1048576 (1 MiB). + type: number + enableStatistics: + description: Whether to enable statistics for Parquet pages + and row groups. Default is true. + type: boolean + encodingType: + description: Type of encoding to use. Value values are rle_dictionary, + plain, and plain_dictionary. Default is rle_dictionary. + type: string + encryptionMode: + description: Server-side encryption mode that you want to + encrypt your .csv or .parquet object files copied to S3. + Valid values are SSE_S3 and SSE_KMS. Default is SSE_S3. + type: string + externalTableDefinition: + description: JSON document that describes how AWS DMS should + interpret the data. + type: string + glueCatalogGeneration: + description: Whether to integrate AWS Glue Data Catalog with + an Amazon S3 target. See Using AWS Glue Data Catalog with + an Amazon S3 target for AWS DMS for more information. Default + is false. + type: boolean + ignoreHeaderRows: + description: When this value is set to 1, DMS ignores the + first row header in a .csv file. Default is 0. + type: number + includeOpForFullLoad: + description: Whether to enable a full load to write INSERT + operations to the .csv output files only to indicate how + the rows were added to the source database. Default is false. + type: boolean + maxFileSize: + description: Maximum size (in KB) of any .csv file to be created + while migrating to an S3 target during full load. Valid + values are from 1 to 1048576. Default is 1048576 (1 GB). + type: number + parquetTimestampInMillisecond: + description: '- Specifies the precision of any TIMESTAMP column + values written to an S3 object file in .parquet format. + Default is false.' + type: boolean + parquetVersion: + description: Version of the .parquet file format. Default + is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0. + type: string + preserveTransactions: + description: Whether DMS saves the transaction order for a + CDC load on the S3 target specified by cdc_path. Default + is false. + type: boolean + rfc4180: + description: For an S3 source, whether each leading double + quotation mark has to be followed by an ending double quotation + mark. Default is true. + type: boolean + rowGroupLength: + description: Number of rows in a row group. Default is 10000. + type: number + serverSideEncryptionKmsKeyId: + description: ARN or Id of KMS Key to use when encryption_mode + is SSE_KMS. + type: string + serviceAccessRoleArn: + description: ARN of the IAM Role with permissions to read + from or write to the S3 Bucket. + type: string + timestampColumnName: + description: Column to add with timestamp information to the + endpoint data for an Amazon S3 target. + type: string + useCsvNoSupValue: + description: Whether to use csv_no_sup_value for columns not + included in the supplemental log. + type: boolean + useTaskStartTimeForFullLoadTimestamp: + description: When set to true, uses the task start time as + the timestamp column value instead of the time data is written + to target. For full load, when set to true, each row of + the timestamp column contains the task start time. For CDC + loads, each row of the timestamp column contains the transaction + commit time. When set to false, the full load timestamp + in the timestamp column increments with the time data arrives + at the target. Default is false. + type: boolean + type: object + secretsManagerAccessRoleArn: + description: ARN of the IAM role that specifies AWS DMS as the + trusted entity and has the required permissions to access the + value in the Secrets Manager secret referred to by secrets_manager_arn. + The role must allow the iam:PassRole action. + type: string + secretsManagerAccessRoleArnRef: + description: Reference to a Role in iam to populate secretsManagerAccessRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + secretsManagerAccessRoleArnSelector: + description: Selector for a Role in iam to populate secretsManagerAccessRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + secretsManagerArn: + description: text values for username, password , server_name, + and port. You can't specify both. + type: string + serverName: + description: Host name of the server. + type: string + serviceAccessRole: + description: ARN used by the service access IAM role for dynamodb + endpoints. + type: string + serviceAccessRoleRef: + description: Reference to a Role in iam to populate serviceAccessRole. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccessRoleSelector: + description: Selector for a Role in iam to populate serviceAccessRole. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sslMode: + description: SSL mode to use for the connection. Valid values + are none, require, verify-ca, verify-full + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + username: + description: User name to be used to login to the endpoint database. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.endpointType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.endpointType) + || (has(self.initProvider) && has(self.initProvider.endpointType))' + - message: spec.forProvider.engineName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.engineName) + || (has(self.initProvider) && has(self.initProvider.engineName))' + status: + description: EndpointStatus defines the observed state of Endpoint. + properties: + atProvider: + properties: + certificateArn: + description: ARN for the certificate. + type: string + databaseName: + description: Name of the endpoint database. + type: string + elasticsearchSettings: + description: Configuration block for OpenSearch settings. See + below. + properties: + endpointUri: + description: Endpoint for the OpenSearch cluster. + type: string + errorRetryDuration: + description: Maximum number of seconds for which DMS retries + failed API requests to the OpenSearch cluster. Default is + 300. + type: number + fullLoadErrorPercentage: + description: Maximum percentage of records that can fail to + be written before a full load operation stops. Default is + 10. + type: number + serviceAccessRoleArn: + description: ARN of the IAM Role with permissions to write + to the OpenSearch cluster. + type: string + useNewMappingType: + description: Enable to migrate documentation using the documentation + type _doc. OpenSearch and an Elasticsearch clusters only + support the _doc documentation type in versions 7.x and + later. The default value is false. + type: boolean + type: object + endpointArn: + description: ARN for the endpoint. + type: string + endpointType: + description: Type of endpoint. Valid values are source, target. + type: string + engineName: + description: Type of engine for the endpoint. Valid values are + aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, + babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, + kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, + redshift, s3, sqlserver, sybase. Please note that some of engine + names are available only for target endpoint type (e.g. redshift). + type: string + extraConnectionAttributes: + description: Additional attributes associated with the connection. + For available attributes for a source Endpoint, see Sources + for data migration. For available attributes for a target Endpoint, + see Targets for data migration. + type: string + id: + type: string + kafkaSettings: + description: Configuration block for Kafka settings. See below. + properties: + broker: + description: Kafka broker location. Specify in the form broker-hostname-or-ip:port. + type: string + includeControlDetails: + description: Shows detailed control information for table + definition, column definition, and table and column changes + in the Kafka message output. Default is false. + type: boolean + includeNullAndEmpty: + description: Include NULL and empty columns for records migrated + to the endpoint. Default is false. + type: boolean + includePartitionValue: + description: Shows the partition value within the Kafka message + output unless the partition type is schema-table-type. Default + is false. + type: boolean + includeTableAlterOperations: + description: Includes any data definition language (DDL) operations + that change the table in the control data, such as rename-table, + drop-table, add-column, drop-column, and rename-column. + Default is false. + type: boolean + includeTransactionDetails: + description: Provides detailed transaction information from + the source database. This information includes a commit + timestamp, a log position, and values for transaction_id, + previous transaction_id, and transaction_record_id (the + record offset within a transaction). Default is false. + type: boolean + messageFormat: + description: Output format for the records created on the + endpoint. Message format is JSON (default) or JSON_UNFORMATTED + (a single line with no tab). + type: string + messageMaxBytes: + description: Maximum size in bytes for records created on + the endpoint Default is 1,000,000. + type: number + noHexPrefix: + description: Set this optional parameter to true to avoid + adding a '0x' prefix to raw data in hexadecimal format. + For example, by default, AWS DMS adds a '0x' prefix to the + LOB column type in hexadecimal format moving from an Oracle + source to a Kafka target. Use the no_hex_prefix endpoint + setting to enable migration of RAW data type columns without + adding the '0x' prefix. + type: boolean + partitionIncludeSchemaTable: + description: Prefixes schema and table names to partition + values, when the partition type is primary-key-type. Doing + this increases data distribution among Kafka partitions. + For example, suppose that a SysBench schema has thousands + of tables and each table has only limited range for a primary + key. In this case, the same primary key is sent from thousands + of tables to the same partition, which causes throttling. + Default is false. + type: boolean + saslUsername: + description: Secure user name you created when you first set + up your MSK cluster to validate a client identity and make + an encrypted connection between server and client using + SASL-SSL authentication. + type: string + securityProtocol: + description: Set secure connection to a Kafka target endpoint + using Transport Layer Security (TLS). Options include ssl-encryption, + ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username + and sasl_password. + type: string + sslCaCertificateArn: + description: ARN for the private certificate authority (CA) + cert that AWS DMS uses to securely connect to your Kafka + target endpoint. + type: string + sslClientCertificateArn: + description: ARN of the client certificate used to securely + connect to a Kafka target endpoint. + type: string + sslClientKeyArn: + description: ARN for the client private key used to securely + connect to a Kafka target endpoint. + type: string + topic: + description: Kafka topic for migration. Default is kafka-default-topic. + type: string + type: object + kinesisSettings: + description: Configuration block for Kinesis settings. See below. + properties: + includeControlDetails: + description: Shows detailed control information for table + definition, column definition, and table and column changes + in the Kinesis message output. Default is false. + type: boolean + includeNullAndEmpty: + description: Include NULL and empty columns in the target. + Default is false. + type: boolean + includePartitionValue: + description: Shows the partition value within the Kinesis + message output, unless the partition type is schema-table-type. + Default is false. + type: boolean + includeTableAlterOperations: + description: Includes any data definition language (DDL) operations + that change the table in the control data. Default is false. + type: boolean + includeTransactionDetails: + description: Provides detailed transaction information from + the source database. Default is false. + type: boolean + messageFormat: + description: Output format for the records created. Default + is json. Valid values are json and json-unformatted (a single + line with no tab). + type: string + partitionIncludeSchemaTable: + description: Prefixes schema and table names to partition + values, when the partition type is primary-key-type. Default + is false. + type: boolean + serviceAccessRoleArn: + description: ARN of the IAM Role with permissions to write + to the Kinesis data stream. + type: string + streamArn: + description: ARN of the Kinesis data stream. + type: string + type: object + kmsKeyArn: + description: ARN for the KMS key that will be used to encrypt + the connection parameters. If you do not specify a value for + kms_key_arn, then AWS DMS will use your default encryption key. + AWS KMS creates the default encryption key for your AWS account. + Your AWS account has a different default encryption key for + each AWS region. To encrypt an S3 target with a KMS Key, use + the parameter s3_settings.server_side_encryption_kms_key_id. + When engine_name is redshift, kms_key_arn is the KMS Key for + the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id + encrypts the S3 intermediate storage. + type: string + mongodbSettings: + description: Configuration block for MongoDB settings. See below. + properties: + authMechanism: + description: Authentication mechanism to access the MongoDB + source endpoint. Default is default. + type: string + authSource: + description: Authentication database name. Not used when auth_type + is no. Default is admin. + type: string + authType: + description: Authentication type to access the MongoDB source + endpoint. Default is password. + type: string + docsToInvestigate: + description: Number of documents to preview to determine the + document organization. Use this setting when nesting_level + is set to one. Default is 1000. + type: string + extractDocId: + description: Document ID. Use this setting when nesting_level + is set to none. Default is false. + type: string + nestingLevel: + description: Specifies either document or table mode. Default + is none. Valid values are one (table mode) and none (document + mode). + type: string + type: object + pauseReplicationTasks: + description: Only tasks paused by the resource will be restarted + after the modification completes. Default is false. + type: boolean + port: + description: Port used by the endpoint database. + type: number + postgresSettings: + description: Configuration block for Postgres settings. See below. + properties: + afterConnectScript: + description: For use with change data capture (CDC) only, + this attribute has AWS DMS bypass foreign keys and user + triggers to reduce the time it takes to bulk load data. + type: string + babelfishDatabaseName: + description: The Babelfish for Aurora PostgreSQL database + name for the endpoint. + type: string + captureDdls: + description: To capture DDL events, AWS DMS creates various + artifacts in the PostgreSQL database when the task starts. + type: boolean + databaseMode: + description: Specifies the default behavior of the replication's + handling of PostgreSQL- compatible endpoints that require + some additional configuration, such as Babelfish endpoints. + type: string + ddlArtifactsSchema: + description: Sets the schema in which the operational DDL + database artifacts are created. Default is public. + type: string + executeTimeout: + description: Sets the client statement timeout for the PostgreSQL + instance, in seconds. Default value is 60. + type: number + failTasksOnLobTruncation: + description: When set to true, this value causes a task to + fail if the actual size of a LOB column is greater than + the specified LobMaxSize. Default is false. + type: boolean + heartbeatEnable: + description: The write-ahead log (WAL) heartbeat feature mimics + a dummy transaction. By doing this, it prevents idle logical + replication slots from holding onto old WAL logs, which + can result in storage full situations on the source. + type: boolean + heartbeatFrequency: + description: Sets the WAL heartbeat frequency (in minutes). + Default value is 5. + type: number + heartbeatSchema: + description: Sets the schema in which the heartbeat artifacts + are created. Default value is public. + type: string + mapBooleanAsBoolean: + description: You can use PostgreSQL endpoint settings to map + a boolean as a boolean from your PostgreSQL source to a + Amazon Redshift target. Default value is false. + type: boolean + mapJsonbAsClob: + description: Optional When true, DMS migrates JSONB values + as CLOB. + type: boolean + mapLongVarcharAs: + description: Optional When true, DMS migrates LONG values + as VARCHAR. + type: string + maxFileSize: + description: Specifies the maximum size (in KB) of any .csv + file used to transfer data to PostgreSQL. Default is 32,768 + KB. + type: number + pluginName: + description: 'Specifies the plugin to use to create a replication + slot. Valid values: pglogical, test_decoding.' + type: string + slotName: + description: Sets the name of a previously created logical + replication slot for a CDC load of the PostgreSQL source + instance. + type: string + type: object + redisSettings: + properties: + authType: + description: Authentication type to access the MongoDB source + endpoint. Default is password. + type: string + authUserName: + description: The username provided with the auth-role option + of the AuthType setting for a Redis target endpoint. + type: string + port: + description: Port used by the endpoint database. + type: number + serverName: + description: Host name of the server. + type: string + sslCaCertificateArn: + description: The Amazon Resource Name (ARN) for the certificate + authority (CA) that DMS uses to connect to your Redis target + endpoint. + type: string + sslSecurityProtocol: + description: The plaintext option doesn't provide Transport + Layer Security (TLS) encryption for traffic between endpoint + and database. Options include plaintext, ssl-encryption. + The default is ssl-encryption. + type: string + type: object + redshiftSettings: + description: Configuration block for Redshift settings. See below. + properties: + bucketFolder: + description: Custom S3 Bucket Object prefix for intermediate + storage. + type: string + bucketName: + description: Custom S3 Bucket name for intermediate storage. + type: string + encryptionMode: + description: The server-side encryption mode that you want + to encrypt your intermediate .csv object files copied to + S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS. + type: string + serverSideEncryptionKmsKeyId: + description: ARN or Id of KMS Key to use when encryption_mode + is SSE_KMS. + type: string + serviceAccessRoleArn: + description: Amazon Resource Name (ARN) of the IAM Role with + permissions to read from or write to the S3 Bucket for intermediate + storage. + type: string + type: object + s3Settings: + description: |- + (Deprecated, use the aws_dms_s3_endpoint resource instead) Configuration block for S3 settings. See below. + This argument is deprecated and will be removed in a future version; use aws_dms_s3_endpoint instead + properties: + addColumnName: + description: Whether to add column name information to the + .csv output file. Default is false. + type: boolean + bucketFolder: + description: S3 object prefix. + type: string + bucketName: + description: S3 bucket name. + type: string + cannedAclForObjects: + description: Predefined (canned) access control list for objects + created in an S3 bucket. Valid values include none, private, + public-read, public-read-write, authenticated-read, aws-exec-read, + bucket-owner-read, and bucket-owner-full-control. Default + is none. + type: string + cdcInsertsAndUpdates: + description: Whether to write insert and update operations + to .csv or .parquet output files. Default is false. + type: boolean + cdcInsertsOnly: + description: Whether to write insert operations to .csv or + .parquet output files. Default is false. + type: boolean + cdcMaxBatchInterval: + description: Maximum length of the interval, defined in seconds, + after which to output a file to Amazon S3. Default is 60. + type: number + cdcMinFileSize: + description: 'Minimum file size condition as defined in kilobytes + to output a file to Amazon S3. Default is 32000. NOTE: Previously, + this setting was measured in megabytes but now represents + kilobytes. Update configurations accordingly.' + type: number + cdcPath: + description: Folder path of CDC files. For an S3 source, this + setting is required if a task captures change data; otherwise, + it's optional. If cdc_path is set, AWS DMS reads CDC files + from this path and replicates the data changes to the target + endpoint. Supported in AWS DMS versions 3.4.2 and later. + type: string + compressionType: + description: Set to compress target files. Default is NONE. + Valid values are GZIP and NONE. + type: string + csvDelimiter: + description: Delimiter used to separate columns in the source + files. Default is ,. + type: string + csvNoSupValue: + description: String to use for all columns not included in + the supplemental log. + type: string + csvNullValue: + description: String to as null when writing to the target. + type: string + csvRowDelimiter: + description: Delimiter used to separate rows in the source + files. Default is \n. + type: string + dataFormat: + description: Output format for the files that AWS DMS uses + to create S3 objects. Valid values are csv and parquet. + Default is csv. + type: string + dataPageSize: + description: Size of one data page in bytes. Default is 1048576 + (1 MiB). + type: number + datePartitionDelimiter: + description: Date separating delimiter to use during folder + partitioning. Valid values are SLASH, UNDERSCORE, DASH, + and NONE. Default is SLASH. + type: string + datePartitionEnabled: + description: Partition S3 bucket folders based on transaction + commit dates. Default is false. + type: boolean + datePartitionSequence: + description: Date format to use during folder partitioning. + Use this parameter when date_partition_enabled is set to + true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, + and DDMMYYYY. Default is YYYYMMDD. + type: string + dictPageSizeLimit: + description: Maximum size in bytes of an encoded dictionary + page of a column. Default is 1048576 (1 MiB). + type: number + enableStatistics: + description: Whether to enable statistics for Parquet pages + and row groups. Default is true. + type: boolean + encodingType: + description: Type of encoding to use. Value values are rle_dictionary, + plain, and plain_dictionary. Default is rle_dictionary. + type: string + encryptionMode: + description: Server-side encryption mode that you want to + encrypt your .csv or .parquet object files copied to S3. + Valid values are SSE_S3 and SSE_KMS. Default is SSE_S3. + type: string + externalTableDefinition: + description: JSON document that describes how AWS DMS should + interpret the data. + type: string + glueCatalogGeneration: + description: Whether to integrate AWS Glue Data Catalog with + an Amazon S3 target. See Using AWS Glue Data Catalog with + an Amazon S3 target for AWS DMS for more information. Default + is false. + type: boolean + ignoreHeaderRows: + description: When this value is set to 1, DMS ignores the + first row header in a .csv file. Default is 0. + type: number + includeOpForFullLoad: + description: Whether to enable a full load to write INSERT + operations to the .csv output files only to indicate how + the rows were added to the source database. Default is false. + type: boolean + maxFileSize: + description: Maximum size (in KB) of any .csv file to be created + while migrating to an S3 target during full load. Valid + values are from 1 to 1048576. Default is 1048576 (1 GB). + type: number + parquetTimestampInMillisecond: + description: '- Specifies the precision of any TIMESTAMP column + values written to an S3 object file in .parquet format. + Default is false.' + type: boolean + parquetVersion: + description: Version of the .parquet file format. Default + is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0. + type: string + preserveTransactions: + description: Whether DMS saves the transaction order for a + CDC load on the S3 target specified by cdc_path. Default + is false. + type: boolean + rfc4180: + description: For an S3 source, whether each leading double + quotation mark has to be followed by an ending double quotation + mark. Default is true. + type: boolean + rowGroupLength: + description: Number of rows in a row group. Default is 10000. + type: number + serverSideEncryptionKmsKeyId: + description: ARN or Id of KMS Key to use when encryption_mode + is SSE_KMS. + type: string + serviceAccessRoleArn: + description: ARN of the IAM Role with permissions to read + from or write to the S3 Bucket. + type: string + timestampColumnName: + description: Column to add with timestamp information to the + endpoint data for an Amazon S3 target. + type: string + useCsvNoSupValue: + description: Whether to use csv_no_sup_value for columns not + included in the supplemental log. + type: boolean + useTaskStartTimeForFullLoadTimestamp: + description: When set to true, uses the task start time as + the timestamp column value instead of the time data is written + to target. For full load, when set to true, each row of + the timestamp column contains the task start time. For CDC + loads, each row of the timestamp column contains the transaction + commit time. When set to false, the full load timestamp + in the timestamp column increments with the time data arrives + at the target. Default is false. + type: boolean + type: object + secretsManagerAccessRoleArn: + description: ARN of the IAM role that specifies AWS DMS as the + trusted entity and has the required permissions to access the + value in the Secrets Manager secret referred to by secrets_manager_arn. + The role must allow the iam:PassRole action. + type: string + secretsManagerArn: + description: text values for username, password , server_name, + and port. You can't specify both. + type: string + serverName: + description: Host name of the server. + type: string + serviceAccessRole: + description: ARN used by the service access IAM role for dynamodb + endpoints. + type: string + sslMode: + description: SSL mode to use for the connection. Valid values + are none, require, verify-ca, verify-full + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + username: + description: User name to be used to login to the endpoint database. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ds.aws.upbound.io_directories.yaml b/package/crds/ds.aws.upbound.io_directories.yaml index 95ebe7d66b..0aad8abe92 100644 --- a/package/crds/ds.aws.upbound.io_directories.yaml +++ b/package/crds/ds.aws.upbound.io_directories.yaml @@ -1289,3 +1289,1259 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Directory is the Schema for the Directorys API. Provides a directory + in AWS Directory Service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DirectorySpec defines the desired state of Directory + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + alias: + description: The alias for the directory (must be unique amongst + all aliases in AWS). Required for enable_sso. + type: string + connectSettings: + description: Connector related information about the directory. + Fields documented below. + properties: + customerDnsIps: + description: The DNS IP addresses of the domain to connect + to. + items: + type: string + type: array + x-kubernetes-list-type: set + customerUsername: + description: The username corresponding to the password provided. + type: string + subnetIds: + description: The identifiers of the subnets for the directory + servers (2 subnets in 2 different AZs). + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdsRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdsSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcId: + description: The identifier of the VPC that the directory + is in. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + description: + description: A textual description for the directory. + type: string + desiredNumberOfDomainControllers: + description: The number of domain controllers desired in the directory. + Minimum value of 2. Scaling of domain controllers is only supported + for MicrosoftAD directories. + type: number + edition: + description: The MicrosoftAD edition (Standard or Enterprise). + Defaults to Enterprise. + type: string + enableSso: + description: Whether to enable single-sign on for the directory. + Requires alias. Defaults to false. + type: boolean + name: + description: The fully qualified name for the directory, such + as corp.example.com + type: string + passwordSecretRef: + description: The password for the directory administrator or connector + user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + shortName: + description: The short name of the directory, such as CORP. + type: string + size: + description: (For SimpleAD and ADConnector types) The size of + the directory (Small or Large are accepted values). Large by + default. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: The directory type (SimpleAD, ADConnector or MicrosoftAD + are accepted values). Defaults to SimpleAD. + type: string + vpcSettings: + description: VPC related information about the directory. Fields + documented below. + properties: + subnetIds: + description: The identifiers of the subnets for the directory + servers (2 subnets in 2 different AZs). + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdsRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdsSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcId: + description: The identifier of the VPC that the directory + is in. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + alias: + description: The alias for the directory (must be unique amongst + all aliases in AWS). Required for enable_sso. + type: string + connectSettings: + description: Connector related information about the directory. + Fields documented below. + properties: + customerDnsIps: + description: The DNS IP addresses of the domain to connect + to. + items: + type: string + type: array + x-kubernetes-list-type: set + customerUsername: + description: The username corresponding to the password provided. + type: string + subnetIds: + description: The identifiers of the subnets for the directory + servers (2 subnets in 2 different AZs). + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdsRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdsSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcId: + description: The identifier of the VPC that the directory + is in. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + description: + description: A textual description for the directory. + type: string + desiredNumberOfDomainControllers: + description: The number of domain controllers desired in the directory. + Minimum value of 2. Scaling of domain controllers is only supported + for MicrosoftAD directories. + type: number + edition: + description: The MicrosoftAD edition (Standard or Enterprise). + Defaults to Enterprise. + type: string + enableSso: + description: Whether to enable single-sign on for the directory. + Requires alias. Defaults to false. + type: boolean + name: + description: The fully qualified name for the directory, such + as corp.example.com + type: string + passwordSecretRef: + description: The password for the directory administrator or connector + user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + shortName: + description: The short name of the directory, such as CORP. + type: string + size: + description: (For SimpleAD and ADConnector types) The size of + the directory (Small or Large are accepted values). Large by + default. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: The directory type (SimpleAD, ADConnector or MicrosoftAD + are accepted values). Defaults to SimpleAD. + type: string + vpcSettings: + description: VPC related information about the directory. Fields + documented below. + properties: + subnetIds: + description: The identifiers of the subnets for the directory + servers (2 subnets in 2 different AZs). + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdsRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdsSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcId: + description: The identifier of the VPC that the directory + is in. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + required: + - passwordSecretRef + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.passwordSecretRef is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.passwordSecretRef)' + status: + description: DirectoryStatus defines the observed state of Directory. + properties: + atProvider: + properties: + accessUrl: + description: The access URL for the directory, such as http://alias.awsapps.com. + type: string + alias: + description: The alias for the directory (must be unique amongst + all aliases in AWS). Required for enable_sso. + type: string + connectSettings: + description: Connector related information about the directory. + Fields documented below. + properties: + availabilityZones: + items: + type: string + type: array + x-kubernetes-list-type: set + connectIps: + description: The IP addresses of the AD Connector servers. + items: + type: string + type: array + x-kubernetes-list-type: set + customerDnsIps: + description: The DNS IP addresses of the domain to connect + to. + items: + type: string + type: array + x-kubernetes-list-type: set + customerUsername: + description: The username corresponding to the password provided. + type: string + subnetIds: + description: The identifiers of the subnets for the directory + servers (2 subnets in 2 different AZs). + items: + type: string + type: array + x-kubernetes-list-type: set + vpcId: + description: The identifier of the VPC that the directory + is in. + type: string + type: object + description: + description: A textual description for the directory. + type: string + desiredNumberOfDomainControllers: + description: The number of domain controllers desired in the directory. + Minimum value of 2. Scaling of domain controllers is only supported + for MicrosoftAD directories. + type: number + dnsIpAddresses: + description: A list of IP addresses of the DNS servers for the + directory or connector. + items: + type: string + type: array + x-kubernetes-list-type: set + edition: + description: The MicrosoftAD edition (Standard or Enterprise). + Defaults to Enterprise. + type: string + enableSso: + description: Whether to enable single-sign on for the directory. + Requires alias. Defaults to false. + type: boolean + id: + description: The directory identifier. + type: string + name: + description: The fully qualified name for the directory, such + as corp.example.com + type: string + securityGroupId: + description: The ID of the security group created by the directory. + type: string + shortName: + description: The short name of the directory, such as CORP. + type: string + size: + description: (For SimpleAD and ADConnector types) The size of + the directory (Small or Large are accepted values). Large by + default. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: + description: The directory type (SimpleAD, ADConnector or MicrosoftAD + are accepted values). Defaults to SimpleAD. + type: string + vpcSettings: + description: VPC related information about the directory. Fields + documented below. + properties: + availabilityZones: + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: The identifiers of the subnets for the directory + servers (2 subnets in 2 different AZs). + items: + type: string + type: array + x-kubernetes-list-type: set + vpcId: + description: The identifier of the VPC that the directory + is in. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ds.aws.upbound.io_shareddirectories.yaml b/package/crds/ds.aws.upbound.io_shareddirectories.yaml index dacaf7921e..40288c8e96 100644 --- a/package/crds/ds.aws.upbound.io_shareddirectories.yaml +++ b/package/crds/ds.aws.upbound.io_shareddirectories.yaml @@ -594,3 +594,573 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SharedDirectory is the Schema for the SharedDirectorys API. Manages + a directory in your account (directory owner) shared with another account + (directory consumer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SharedDirectorySpec defines the desired state of SharedDirectory + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + directoryId: + description: Identifier of the Managed Microsoft AD directory + that you want to share with other accounts. + type: string + directoryIdRef: + description: Reference to a Directory in ds to populate directoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + directoryIdSelector: + description: Selector for a Directory in ds to populate directoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + method: + description: Method used when sharing a directory. Valid values + are ORGANIZATIONS and HANDSHAKE. Default is HANDSHAKE. + type: string + notesSecretRef: + description: Message sent by the directory owner to the directory + consumer to help the directory consumer administrator determine + whether to approve or reject the share invitation. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + target: + description: Identifier for the directory consumer account with + whom the directory is to be shared. See below. + properties: + id: + description: Identifier of the directory consumer account. + type: string + type: + description: Type of identifier to be used in the id field. + Valid value is ACCOUNT. Default is ACCOUNT. + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + directoryId: + description: Identifier of the Managed Microsoft AD directory + that you want to share with other accounts. + type: string + directoryIdRef: + description: Reference to a Directory in ds to populate directoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + directoryIdSelector: + description: Selector for a Directory in ds to populate directoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + method: + description: Method used when sharing a directory. Valid values + are ORGANIZATIONS and HANDSHAKE. Default is HANDSHAKE. + type: string + notesSecretRef: + description: Message sent by the directory owner to the directory + consumer to help the directory consumer administrator determine + whether to approve or reject the share invitation. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + target: + description: Identifier for the directory consumer account with + whom the directory is to be shared. See below. + properties: + id: + description: Identifier of the directory consumer account. + type: string + type: + description: Type of identifier to be used in the id field. + Valid value is ACCOUNT. Default is ACCOUNT. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.target is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.target) + || (has(self.initProvider) && has(self.initProvider.target))' + status: + description: SharedDirectoryStatus defines the observed state of SharedDirectory. + properties: + atProvider: + properties: + directoryId: + description: Identifier of the Managed Microsoft AD directory + that you want to share with other accounts. + type: string + id: + description: Identifier of the shared directory. + type: string + method: + description: Method used when sharing a directory. Valid values + are ORGANIZATIONS and HANDSHAKE. Default is HANDSHAKE. + type: string + sharedDirectoryId: + description: Identifier of the directory that is stored in the + directory consumer account that corresponds to the shared directory + in the owner account. + type: string + target: + description: Identifier for the directory consumer account with + whom the directory is to be shared. See below. + properties: + id: + description: Identifier of the directory consumer account. + type: string + type: + description: Type of identifier to be used in the id field. + Valid value is ACCOUNT. Default is ACCOUNT. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/dynamodb.aws.upbound.io_tables.yaml b/package/crds/dynamodb.aws.upbound.io_tables.yaml index a9403cab45..6afbac138c 100644 --- a/package/crds/dynamodb.aws.upbound.io_tables.yaml +++ b/package/crds/dynamodb.aws.upbound.io_tables.yaml @@ -1229,3 +1229,1166 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Table is the Schema for the Tables API. Provides a DynamoDB table + resource + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TableSpec defines the desired state of Table + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + attribute: + description: Set of nested attribute definitions. Only required + for hash_key and range_key attributes. See below. + items: + properties: + name: + description: Name of the attribute + type: string + type: + description: Attribute type. Valid values are S (string), + N (number), B (binary). + type: string + type: object + type: array + billingMode: + description: Controls how you are charged for read and write throughput + and how you manage capacity. The valid values are PROVISIONED + and PAY_PER_REQUEST. Defaults to PROVISIONED. + type: string + deletionProtectionEnabled: + description: Enables deletion protection for table. Defaults to + false. + type: boolean + globalSecondaryIndex: + description: Describe a GSI for the table; subject to the normal + limits on the number of GSIs, projected attributes, etc. See + below. + items: + properties: + hashKey: + description: Name of the hash key in the index; must be + defined as an attribute in the resource. + type: string + name: + description: Name of the index. + type: string + nonKeyAttributes: + description: Only required with INCLUDE as a projection + type; a list of attributes to project into the index. + These do not need to be defined as attributes on the table. + items: + type: string + type: array + x-kubernetes-list-type: set + projectionType: + description: One of ALL, INCLUDE or KEYS_ONLY where ALL + projects every attribute into the index, KEYS_ONLY projects into + the index only the table and index hash_key and sort_key + attributes , INCLUDE projects into the index all of the + attributes that are defined in non_key_attributes in addition + to the attributes that thatKEYS_ONLY project. + type: string + rangeKey: + description: Name of the range key; must be defined + type: string + readCapacity: + description: Number of read units for this index. Must be + set if billing_mode is set to PROVISIONED. + type: number + writeCapacity: + description: Number of write units for this index. Must + be set if billing_mode is set to PROVISIONED. + type: number + type: object + type: array + hashKey: + description: Attribute to use as the hash (partition) key. Must + also be defined as an attribute. See below. + type: string + importTable: + description: Import Amazon S3 data into a new table. See below. + properties: + inputCompressionType: + description: |- + Type of compression to be used on the input coming from the imported table. + Valid values are GZIP, ZSTD and NONE. + type: string + inputFormat: + description: |- + The format of the source data. + Valid values are CSV, DYNAMODB_JSON, and ION. + type: string + inputFormatOptions: + description: |- + Describe the format options for the data that was imported into the target table. + There is one value, csv. + See below. + properties: + csv: + description: 'This block contains the processing options + for the CSV file being imported:' + properties: + delimiter: + description: The delimiter used for separating items + in the CSV file being imported. + type: string + headerList: + description: List of the headers used to specify a + common header for all source CSV files being imported. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + s3BucketSource: + description: |- + Values for the S3 bucket the source file is imported from. + See below. + properties: + bucket: + description: The S3 bucket that is being imported from. + type: string + bucketOwner: + description: The account number of the S3 bucket that + is being imported from. + type: string + keyPrefix: + description: The key prefix shared by all S3 Objects that + are being imported. + type: string + type: object + type: object + localSecondaryIndex: + description: Describe an LSI on the table; these can only be allocated + at creation so you cannot change this definition after you have + created the resource. See below. + items: + properties: + name: + description: Name of the index + type: string + nonKeyAttributes: + description: Only required with INCLUDE as a projection + type; a list of attributes to project into the index. + These do not need to be defined as attributes on the table. + items: + type: string + type: array + projectionType: + description: One of ALL, INCLUDE or KEYS_ONLY where ALL + projects every attribute into the index, KEYS_ONLY projects into + the index only the table and index hash_key and sort_key + attributes , INCLUDE projects into the index all of the + attributes that are defined in non_key_attributes in addition + to the attributes that thatKEYS_ONLY project. + type: string + rangeKey: + description: Name of the range key. + type: string + type: object + type: array + pointInTimeRecovery: + description: Enable point-in-time recovery options. See below. + properties: + enabled: + description: Whether to enable point-in-time recovery. It + can take 10 minutes to enable for new tables. If the point_in_time_recovery + block is not provided, this defaults to false. + type: boolean + type: object + rangeKey: + description: Attribute to use as the range (sort) key. Must also + be defined as an attribute, see below. + type: string + readCapacity: + description: Number of read units for this table. If the billing_mode + is PROVISIONED, this field is required. + type: number + region: + description: Region is the region you'd like your resource to + be created in. + type: string + replica: + description: Configuration block(s) with DynamoDB Global Tables + V2 (version 2019.11.21) replication configurations. See below. + items: + properties: + kmsKeyArn: + description: 'ARN of the CMK that should be used for the + AWS KMS encryption. This argument should only be used + if the key is different from the default KMS-managed DynamoDB + key, alias/aws/dynamodb. Note: This attribute will not + be populated with the ARN of default keys.' + type: string + pointInTimeRecovery: + description: Whether to enable Point In Time Recovery for + the replica. Default is false. + type: boolean + propagateTags: + description: 'Whether to propagate the global table''s tags + to a replica. Default is false. Changes to tags only move + in one direction: from global (source) to replica. In + other words, tag drift on a replica will not trigger an + update. Tag or replica changes on the global table, whether + from drift or configuration changes, are propagated to + replicas. Changing from true to false on a subsequent + apply means replica tags are left as they were, unmanaged, + not deleted.' + type: boolean + regionName: + description: Region name of the replica. + type: string + type: object + type: array + restoreDateTime: + description: Time of the point-in-time recovery point to restore. + type: string + restoreSourceName: + description: Name of the table to restore. Must match the name + of an existing table. + type: string + restoreToLatestTime: + description: If set, restores table to the most recent point-in-time + recovery point. + type: boolean + serverSideEncryption: + description: Encryption at rest options. AWS DynamoDB tables are + automatically encrypted at rest with an AWS-owned Customer Master + Key if this argument isn't specified. See below. + properties: + enabled: + description: Whether or not to enable encryption at rest using + an AWS managed KMS customer master key (CMK). If enabled + is false then server-side encryption is set to AWS-owned + key (shown as DEFAULT in the AWS console). Potentially confusingly, + if enabled is true and no kms_key_arn is specified then + server-side encryption is set to the default KMS-managed + key (shown as KMS in the AWS console). The AWS KMS documentation + explains the difference between AWS-owned and KMS-managed + keys. + type: boolean + kmsKeyArn: + description: 'ARN of the CMK that should be used for the AWS + KMS encryption. This argument should only be used if the + key is different from the default KMS-managed DynamoDB key, + alias/aws/dynamodb. Note: This attribute will not be populated + with the ARN of default keys.' + type: string + type: object + streamEnabled: + description: Whether Streams are enabled. + type: boolean + streamViewType: + description: When an item in the table is modified, StreamViewType + determines what information is written to the table's stream. + Valid values are KEYS_ONLY, NEW_IMAGE, OLD_IMAGE, NEW_AND_OLD_IMAGES. + type: string + tableClass: + description: |- + Storage class of the table. + Valid values are STANDARD and STANDARD_INFREQUENT_ACCESS. + Default value is STANDARD. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + ttl: + description: Configuration block for TTL. See below. + properties: + attributeName: + description: Name of the table attribute to store the TTL + timestamp in. + type: string + enabled: + description: Whether TTL is enabled. + type: boolean + type: object + writeCapacity: + description: Number of write units for this table. If the billing_mode + is PROVISIONED, this field is required. + type: number + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + attribute: + description: Set of nested attribute definitions. Only required + for hash_key and range_key attributes. See below. + items: + properties: + name: + description: Name of the attribute + type: string + type: + description: Attribute type. Valid values are S (string), + N (number), B (binary). + type: string + type: object + type: array + billingMode: + description: Controls how you are charged for read and write throughput + and how you manage capacity. The valid values are PROVISIONED + and PAY_PER_REQUEST. Defaults to PROVISIONED. + type: string + deletionProtectionEnabled: + description: Enables deletion protection for table. Defaults to + false. + type: boolean + globalSecondaryIndex: + description: Describe a GSI for the table; subject to the normal + limits on the number of GSIs, projected attributes, etc. See + below. + items: + properties: + hashKey: + description: Name of the hash key in the index; must be + defined as an attribute in the resource. + type: string + name: + description: Name of the index. + type: string + nonKeyAttributes: + description: Only required with INCLUDE as a projection + type; a list of attributes to project into the index. + These do not need to be defined as attributes on the table. + items: + type: string + type: array + x-kubernetes-list-type: set + projectionType: + description: One of ALL, INCLUDE or KEYS_ONLY where ALL + projects every attribute into the index, KEYS_ONLY projects into + the index only the table and index hash_key and sort_key + attributes , INCLUDE projects into the index all of the + attributes that are defined in non_key_attributes in addition + to the attributes that thatKEYS_ONLY project. + type: string + rangeKey: + description: Name of the range key; must be defined + type: string + readCapacity: + description: Number of read units for this index. Must be + set if billing_mode is set to PROVISIONED. + type: number + writeCapacity: + description: Number of write units for this index. Must + be set if billing_mode is set to PROVISIONED. + type: number + type: object + type: array + hashKey: + description: Attribute to use as the hash (partition) key. Must + also be defined as an attribute. See below. + type: string + importTable: + description: Import Amazon S3 data into a new table. See below. + properties: + inputCompressionType: + description: |- + Type of compression to be used on the input coming from the imported table. + Valid values are GZIP, ZSTD and NONE. + type: string + inputFormat: + description: |- + The format of the source data. + Valid values are CSV, DYNAMODB_JSON, and ION. + type: string + inputFormatOptions: + description: |- + Describe the format options for the data that was imported into the target table. + There is one value, csv. + See below. + properties: + csv: + description: 'This block contains the processing options + for the CSV file being imported:' + properties: + delimiter: + description: The delimiter used for separating items + in the CSV file being imported. + type: string + headerList: + description: List of the headers used to specify a + common header for all source CSV files being imported. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + s3BucketSource: + description: |- + Values for the S3 bucket the source file is imported from. + See below. + properties: + bucket: + description: The S3 bucket that is being imported from. + type: string + bucketOwner: + description: The account number of the S3 bucket that + is being imported from. + type: string + keyPrefix: + description: The key prefix shared by all S3 Objects that + are being imported. + type: string + type: object + type: object + localSecondaryIndex: + description: Describe an LSI on the table; these can only be allocated + at creation so you cannot change this definition after you have + created the resource. See below. + items: + properties: + name: + description: Name of the index + type: string + nonKeyAttributes: + description: Only required with INCLUDE as a projection + type; a list of attributes to project into the index. + These do not need to be defined as attributes on the table. + items: + type: string + type: array + projectionType: + description: One of ALL, INCLUDE or KEYS_ONLY where ALL + projects every attribute into the index, KEYS_ONLY projects into + the index only the table and index hash_key and sort_key + attributes , INCLUDE projects into the index all of the + attributes that are defined in non_key_attributes in addition + to the attributes that thatKEYS_ONLY project. + type: string + rangeKey: + description: Name of the range key. + type: string + type: object + type: array + pointInTimeRecovery: + description: Enable point-in-time recovery options. See below. + properties: + enabled: + description: Whether to enable point-in-time recovery. It + can take 10 minutes to enable for new tables. If the point_in_time_recovery + block is not provided, this defaults to false. + type: boolean + type: object + rangeKey: + description: Attribute to use as the range (sort) key. Must also + be defined as an attribute, see below. + type: string + readCapacity: + description: Number of read units for this table. If the billing_mode + is PROVISIONED, this field is required. + type: number + replica: + description: Configuration block(s) with DynamoDB Global Tables + V2 (version 2019.11.21) replication configurations. See below. + items: + properties: + kmsKeyArn: + description: 'ARN of the CMK that should be used for the + AWS KMS encryption. This argument should only be used + if the key is different from the default KMS-managed DynamoDB + key, alias/aws/dynamodb. Note: This attribute will not + be populated with the ARN of default keys.' + type: string + pointInTimeRecovery: + description: Whether to enable Point In Time Recovery for + the replica. Default is false. + type: boolean + propagateTags: + description: 'Whether to propagate the global table''s tags + to a replica. Default is false. Changes to tags only move + in one direction: from global (source) to replica. In + other words, tag drift on a replica will not trigger an + update. Tag or replica changes on the global table, whether + from drift or configuration changes, are propagated to + replicas. Changing from true to false on a subsequent + apply means replica tags are left as they were, unmanaged, + not deleted.' + type: boolean + regionName: + description: Region name of the replica. + type: string + type: object + type: array + restoreDateTime: + description: Time of the point-in-time recovery point to restore. + type: string + restoreSourceName: + description: Name of the table to restore. Must match the name + of an existing table. + type: string + restoreToLatestTime: + description: If set, restores table to the most recent point-in-time + recovery point. + type: boolean + serverSideEncryption: + description: Encryption at rest options. AWS DynamoDB tables are + automatically encrypted at rest with an AWS-owned Customer Master + Key if this argument isn't specified. See below. + properties: + enabled: + description: Whether or not to enable encryption at rest using + an AWS managed KMS customer master key (CMK). If enabled + is false then server-side encryption is set to AWS-owned + key (shown as DEFAULT in the AWS console). Potentially confusingly, + if enabled is true and no kms_key_arn is specified then + server-side encryption is set to the default KMS-managed + key (shown as KMS in the AWS console). The AWS KMS documentation + explains the difference between AWS-owned and KMS-managed + keys. + type: boolean + kmsKeyArn: + description: 'ARN of the CMK that should be used for the AWS + KMS encryption. This argument should only be used if the + key is different from the default KMS-managed DynamoDB key, + alias/aws/dynamodb. Note: This attribute will not be populated + with the ARN of default keys.' + type: string + type: object + streamEnabled: + description: Whether Streams are enabled. + type: boolean + streamViewType: + description: When an item in the table is modified, StreamViewType + determines what information is written to the table's stream. + Valid values are KEYS_ONLY, NEW_IMAGE, OLD_IMAGE, NEW_AND_OLD_IMAGES. + type: string + tableClass: + description: |- + Storage class of the table. + Valid values are STANDARD and STANDARD_INFREQUENT_ACCESS. + Default value is STANDARD. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + ttl: + description: Configuration block for TTL. See below. + properties: + attributeName: + description: Name of the table attribute to store the TTL + timestamp in. + type: string + enabled: + description: Whether TTL is enabled. + type: boolean + type: object + writeCapacity: + description: Number of write units for this table. If the billing_mode + is PROVISIONED, this field is required. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: TableStatus defines the observed state of Table. + properties: + atProvider: + properties: + arn: + description: ARN of the table + type: string + attribute: + description: Set of nested attribute definitions. Only required + for hash_key and range_key attributes. See below. + items: + properties: + name: + description: Name of the attribute + type: string + type: + description: Attribute type. Valid values are S (string), + N (number), B (binary). + type: string + type: object + type: array + billingMode: + description: Controls how you are charged for read and write throughput + and how you manage capacity. The valid values are PROVISIONED + and PAY_PER_REQUEST. Defaults to PROVISIONED. + type: string + deletionProtectionEnabled: + description: Enables deletion protection for table. Defaults to + false. + type: boolean + globalSecondaryIndex: + description: Describe a GSI for the table; subject to the normal + limits on the number of GSIs, projected attributes, etc. See + below. + items: + properties: + hashKey: + description: Name of the hash key in the index; must be + defined as an attribute in the resource. + type: string + name: + description: Name of the index. + type: string + nonKeyAttributes: + description: Only required with INCLUDE as a projection + type; a list of attributes to project into the index. + These do not need to be defined as attributes on the table. + items: + type: string + type: array + x-kubernetes-list-type: set + projectionType: + description: One of ALL, INCLUDE or KEYS_ONLY where ALL + projects every attribute into the index, KEYS_ONLY projects into + the index only the table and index hash_key and sort_key + attributes , INCLUDE projects into the index all of the + attributes that are defined in non_key_attributes in addition + to the attributes that thatKEYS_ONLY project. + type: string + rangeKey: + description: Name of the range key; must be defined + type: string + readCapacity: + description: Number of read units for this index. Must be + set if billing_mode is set to PROVISIONED. + type: number + writeCapacity: + description: Number of write units for this index. Must + be set if billing_mode is set to PROVISIONED. + type: number + type: object + type: array + hashKey: + description: Attribute to use as the hash (partition) key. Must + also be defined as an attribute. See below. + type: string + id: + description: Name of the table + type: string + importTable: + description: Import Amazon S3 data into a new table. See below. + properties: + inputCompressionType: + description: |- + Type of compression to be used on the input coming from the imported table. + Valid values are GZIP, ZSTD and NONE. + type: string + inputFormat: + description: |- + The format of the source data. + Valid values are CSV, DYNAMODB_JSON, and ION. + type: string + inputFormatOptions: + description: |- + Describe the format options for the data that was imported into the target table. + There is one value, csv. + See below. + properties: + csv: + description: 'This block contains the processing options + for the CSV file being imported:' + properties: + delimiter: + description: The delimiter used for separating items + in the CSV file being imported. + type: string + headerList: + description: List of the headers used to specify a + common header for all source CSV files being imported. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + s3BucketSource: + description: |- + Values for the S3 bucket the source file is imported from. + See below. + properties: + bucket: + description: The S3 bucket that is being imported from. + type: string + bucketOwner: + description: The account number of the S3 bucket that + is being imported from. + type: string + keyPrefix: + description: The key prefix shared by all S3 Objects that + are being imported. + type: string + type: object + type: object + localSecondaryIndex: + description: Describe an LSI on the table; these can only be allocated + at creation so you cannot change this definition after you have + created the resource. See below. + items: + properties: + name: + description: Name of the index + type: string + nonKeyAttributes: + description: Only required with INCLUDE as a projection + type; a list of attributes to project into the index. + These do not need to be defined as attributes on the table. + items: + type: string + type: array + projectionType: + description: One of ALL, INCLUDE or KEYS_ONLY where ALL + projects every attribute into the index, KEYS_ONLY projects into + the index only the table and index hash_key and sort_key + attributes , INCLUDE projects into the index all of the + attributes that are defined in non_key_attributes in addition + to the attributes that thatKEYS_ONLY project. + type: string + rangeKey: + description: Name of the range key. + type: string + type: object + type: array + pointInTimeRecovery: + description: Enable point-in-time recovery options. See below. + properties: + enabled: + description: Whether to enable point-in-time recovery. It + can take 10 minutes to enable for new tables. If the point_in_time_recovery + block is not provided, this defaults to false. + type: boolean + type: object + rangeKey: + description: Attribute to use as the range (sort) key. Must also + be defined as an attribute, see below. + type: string + readCapacity: + description: Number of read units for this table. If the billing_mode + is PROVISIONED, this field is required. + type: number + replica: + description: Configuration block(s) with DynamoDB Global Tables + V2 (version 2019.11.21) replication configurations. See below. + items: + properties: + arn: + description: ARN of the replica + type: string + kmsKeyArn: + description: 'ARN of the CMK that should be used for the + AWS KMS encryption. This argument should only be used + if the key is different from the default KMS-managed DynamoDB + key, alias/aws/dynamodb. Note: This attribute will not + be populated with the ARN of default keys.' + type: string + pointInTimeRecovery: + description: Whether to enable Point In Time Recovery for + the replica. Default is false. + type: boolean + propagateTags: + description: 'Whether to propagate the global table''s tags + to a replica. Default is false. Changes to tags only move + in one direction: from global (source) to replica. In + other words, tag drift on a replica will not trigger an + update. Tag or replica changes on the global table, whether + from drift or configuration changes, are propagated to + replicas. Changing from true to false on a subsequent + apply means replica tags are left as they were, unmanaged, + not deleted.' + type: boolean + regionName: + description: Region name of the replica. + type: string + streamArn: + description: ARN of the Table Stream. Only available when + stream_enabled = true + type: string + streamLabel: + description: Timestamp, in ISO 8601 format, for this stream. + Note that this timestamp is not a unique identifier for + the stream on its own. However, the combination of AWS + customer ID, table name and this field is guaranteed to + be unique. It can be used for creating CloudWatch Alarms. + Only available when stream_enabled = true. + type: string + type: object + type: array + restoreDateTime: + description: Time of the point-in-time recovery point to restore. + type: string + restoreSourceName: + description: Name of the table to restore. Must match the name + of an existing table. + type: string + restoreToLatestTime: + description: If set, restores table to the most recent point-in-time + recovery point. + type: boolean + serverSideEncryption: + description: Encryption at rest options. AWS DynamoDB tables are + automatically encrypted at rest with an AWS-owned Customer Master + Key if this argument isn't specified. See below. + properties: + enabled: + description: Whether or not to enable encryption at rest using + an AWS managed KMS customer master key (CMK). If enabled + is false then server-side encryption is set to AWS-owned + key (shown as DEFAULT in the AWS console). Potentially confusingly, + if enabled is true and no kms_key_arn is specified then + server-side encryption is set to the default KMS-managed + key (shown as KMS in the AWS console). The AWS KMS documentation + explains the difference between AWS-owned and KMS-managed + keys. + type: boolean + kmsKeyArn: + description: 'ARN of the CMK that should be used for the AWS + KMS encryption. This argument should only be used if the + key is different from the default KMS-managed DynamoDB key, + alias/aws/dynamodb. Note: This attribute will not be populated + with the ARN of default keys.' + type: string + type: object + streamArn: + description: ARN of the Table Stream. Only available when stream_enabled + = true + type: string + streamEnabled: + description: Whether Streams are enabled. + type: boolean + streamLabel: + description: Timestamp, in ISO 8601 format, for this stream. Note + that this timestamp is not a unique identifier for the stream + on its own. However, the combination of AWS customer ID, table + name and this field is guaranteed to be unique. It can be used + for creating CloudWatch Alarms. Only available when stream_enabled + = true. + type: string + streamViewType: + description: When an item in the table is modified, StreamViewType + determines what information is written to the table's stream. + Valid values are KEYS_ONLY, NEW_IMAGE, OLD_IMAGE, NEW_AND_OLD_IMAGES. + type: string + tableClass: + description: |- + Storage class of the table. + Valid values are STANDARD and STANDARD_INFREQUENT_ACCESS. + Default value is STANDARD. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + ttl: + description: Configuration block for TTL. See below. + properties: + attributeName: + description: Name of the table attribute to store the TTL + timestamp in. + type: string + enabled: + description: Whether TTL is enabled. + type: boolean + type: object + writeCapacity: + description: Number of write units for this table. If the billing_mode + is PROVISIONED, this field is required. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ec2.aws.upbound.io_ebssnapshotimports.yaml b/package/crds/ec2.aws.upbound.io_ebssnapshotimports.yaml index 6fdf1a4928..972a574877 100644 --- a/package/crds/ec2.aws.upbound.io_ebssnapshotimports.yaml +++ b/package/crds/ec2.aws.upbound.io_ebssnapshotimports.yaml @@ -787,3 +787,754 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: EBSSnapshotImport is the Schema for the EBSSnapshotImports API. + Provides an elastic block storage snapshot import resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EBSSnapshotImportSpec defines the desired state of EBSSnapshotImport + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clientData: + description: The client-specific data. Detailed below. + properties: + comment: + description: A user-defined comment about the disk upload. + type: string + uploadEnd: + description: The time that the disk upload ends. + type: string + uploadSize: + description: The size of the uploaded disk image, in GiB. + type: number + uploadStart: + description: The time that the disk upload starts. + type: string + type: object + description: + description: The description string for the import snapshot task. + type: string + diskContainer: + description: Information about the disk container. Detailed below. + properties: + description: + description: The description of the disk image being imported. + type: string + format: + description: The format of the disk image being imported. + One of VHD or VMDK. + type: string + url: + description: The URL to the Amazon S3-based disk image being + imported. It can either be a https URL (https://..) or an + Amazon S3 URL (s3://..). One of url or user_bucket must + be set. + type: string + userBucket: + description: The Amazon S3 bucket for the disk image. One + of url or user_bucket must be set. Detailed below. + properties: + s3Bucket: + description: The name of the Amazon S3 bucket where the + disk image is located. + type: string + s3Key: + description: The file name of the disk image. + type: string + type: object + type: object + encrypted: + description: Specifies whether the destination snapshot of the + imported image should be encrypted. The default KMS key for + EBS is used unless you specify a non-default KMS key using KmsKeyId. + type: boolean + kmsKeyId: + description: An identifier for the symmetric KMS key to use when + creating the encrypted snapshot. This parameter is only required + if you want to use a non-default KMS key; if this parameter + is not specified, the default KMS key for EBS is used. If a + KmsKeyId is specified, the Encrypted flag must also be set. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + permanentRestore: + description: Indicates whether to permanently restore an archived + snapshot. + type: boolean + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleName: + description: 'The name of the IAM Role the VM Import/Export service + will assume. This role needs certain permissions. See https://docs.aws.amazon.com/vm-import/latest/userguide/vmie_prereqs.html#vmimport-role. + Default: vmimport' + type: string + storageTier: + description: The name of the storage tier. Valid values are archive + and standard. Default value is standard. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + temporaryRestoreDays: + description: Specifies the number of days for which to temporarily + restore an archived snapshot. Required for temporary restores + only. The snapshot will be automatically re-archived after this + period. + type: number + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clientData: + description: The client-specific data. Detailed below. + properties: + comment: + description: A user-defined comment about the disk upload. + type: string + uploadEnd: + description: The time that the disk upload ends. + type: string + uploadSize: + description: The size of the uploaded disk image, in GiB. + type: number + uploadStart: + description: The time that the disk upload starts. + type: string + type: object + description: + description: The description string for the import snapshot task. + type: string + diskContainer: + description: Information about the disk container. Detailed below. + properties: + description: + description: The description of the disk image being imported. + type: string + format: + description: The format of the disk image being imported. + One of VHD or VMDK. + type: string + url: + description: The URL to the Amazon S3-based disk image being + imported. It can either be a https URL (https://..) or an + Amazon S3 URL (s3://..). One of url or user_bucket must + be set. + type: string + userBucket: + description: The Amazon S3 bucket for the disk image. One + of url or user_bucket must be set. Detailed below. + properties: + s3Bucket: + description: The name of the Amazon S3 bucket where the + disk image is located. + type: string + s3Key: + description: The file name of the disk image. + type: string + type: object + type: object + encrypted: + description: Specifies whether the destination snapshot of the + imported image should be encrypted. The default KMS key for + EBS is used unless you specify a non-default KMS key using KmsKeyId. + type: boolean + kmsKeyId: + description: An identifier for the symmetric KMS key to use when + creating the encrypted snapshot. This parameter is only required + if you want to use a non-default KMS key; if this parameter + is not specified, the default KMS key for EBS is used. If a + KmsKeyId is specified, the Encrypted flag must also be set. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + permanentRestore: + description: Indicates whether to permanently restore an archived + snapshot. + type: boolean + roleName: + description: 'The name of the IAM Role the VM Import/Export service + will assume. This role needs certain permissions. See https://docs.aws.amazon.com/vm-import/latest/userguide/vmie_prereqs.html#vmimport-role. + Default: vmimport' + type: string + storageTier: + description: The name of the storage tier. Valid values are archive + and standard. Default value is standard. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + temporaryRestoreDays: + description: Specifies the number of days for which to temporarily + restore an archived snapshot. Required for temporary restores + only. The snapshot will be automatically re-archived after this + period. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.diskContainer is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.diskContainer) + || (has(self.initProvider) && has(self.initProvider.diskContainer))' + status: + description: EBSSnapshotImportStatus defines the observed state of EBSSnapshotImport. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of the EBS Snapshot. + type: string + clientData: + description: The client-specific data. Detailed below. + properties: + comment: + description: A user-defined comment about the disk upload. + type: string + uploadEnd: + description: The time that the disk upload ends. + type: string + uploadSize: + description: The size of the uploaded disk image, in GiB. + type: number + uploadStart: + description: The time that the disk upload starts. + type: string + type: object + dataEncryptionKeyId: + description: The data encryption key identifier for the snapshot. + type: string + description: + description: The description string for the import snapshot task. + type: string + diskContainer: + description: Information about the disk container. Detailed below. + properties: + description: + description: The description of the disk image being imported. + type: string + format: + description: The format of the disk image being imported. + One of VHD or VMDK. + type: string + url: + description: The URL to the Amazon S3-based disk image being + imported. It can either be a https URL (https://..) or an + Amazon S3 URL (s3://..). One of url or user_bucket must + be set. + type: string + userBucket: + description: The Amazon S3 bucket for the disk image. One + of url or user_bucket must be set. Detailed below. + properties: + s3Bucket: + description: The name of the Amazon S3 bucket where the + disk image is located. + type: string + s3Key: + description: The file name of the disk image. + type: string + type: object + type: object + encrypted: + description: Specifies whether the destination snapshot of the + imported image should be encrypted. The default KMS key for + EBS is used unless you specify a non-default KMS key using KmsKeyId. + type: boolean + id: + description: The snapshot ID (e.g., snap-59fcb34e). + type: string + kmsKeyId: + description: An identifier for the symmetric KMS key to use when + creating the encrypted snapshot. This parameter is only required + if you want to use a non-default KMS key; if this parameter + is not specified, the default KMS key for EBS is used. If a + KmsKeyId is specified, the Encrypted flag must also be set. + type: string + outpostArn: + description: Amazon Resource Name (ARN) of the EBS Snapshot. + type: string + ownerAlias: + description: Value from an Amazon-maintained list (amazon, aws-marketplace, + microsoft) of snapshot owners. + type: string + ownerId: + description: The AWS account ID of the EBS snapshot owner. + type: string + permanentRestore: + description: Indicates whether to permanently restore an archived + snapshot. + type: boolean + roleName: + description: 'The name of the IAM Role the VM Import/Export service + will assume. This role needs certain permissions. See https://docs.aws.amazon.com/vm-import/latest/userguide/vmie_prereqs.html#vmimport-role. + Default: vmimport' + type: string + storageTier: + description: The name of the storage tier. Valid values are archive + and standard. Default value is standard. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + temporaryRestoreDays: + description: Specifies the number of days for which to temporarily + restore an archived snapshot. Required for temporary restores + only. The snapshot will be automatically re-archived after this + period. + type: number + volumeId: + description: The snapshot ID (e.g., snap-59fcb34e). + type: string + volumeSize: + description: The size of the drive in GiBs. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ec2.aws.upbound.io_flowlogs.yaml b/package/crds/ec2.aws.upbound.io_flowlogs.yaml index cb160d64b3..4162be8a1a 100644 --- a/package/crds/ec2.aws.upbound.io_flowlogs.yaml +++ b/package/crds/ec2.aws.upbound.io_flowlogs.yaml @@ -1172,3 +1172,1151 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FlowLog is the Schema for the FlowLogs API. Provides a VPC/Subnet/ENI + Flow Log + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FlowLogSpec defines the desired state of FlowLog + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + deliverCrossAccountRole: + description: ARN of the IAM role that allows Amazon EC2 to publish + flow logs across accounts. + type: string + destinationOptions: + description: Describes the destination options for a flow log. + More details below. + properties: + fileFormat: + description: 'The format for the flow log. Default value: + plain-text. Valid values: plain-text, parquet.' + type: string + hiveCompatiblePartitions: + description: 'Indicates whether to use Hive-compatible prefixes + for flow logs stored in Amazon S3. Default value: false.' + type: boolean + perHourPartition: + description: 'Indicates whether to partition the flow log + per hour. This reduces the cost and response time for queries. + Default value: false.' + type: boolean + type: object + eniId: + description: Elastic Network Interface ID to attach to + type: string + iamRoleArn: + description: The ARN for the IAM role that's used to post flow + logs to a CloudWatch Logs log group + type: string + iamRoleArnRef: + description: Reference to a Role in iam to populate iamRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamRoleArnSelector: + description: Selector for a Role in iam to populate iamRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + logDestination: + description: The ARN of the logging destination. Either log_destination + or log_group_name must be set. + type: string + logDestinationRef: + description: Reference to a Group in cloudwatchlogs to populate + logDestination. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logDestinationSelector: + description: Selector for a Group in cloudwatchlogs to populate + logDestination. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + logDestinationType: + description: 'The type of the logging destination. Valid values: + cloud-watch-logs, s3, kinesis-data-firehose. Default: cloud-watch-logs.' + type: string + logFormat: + description: 'The fields to include in the flow log record. Accepted + format example: "$${interface-id} $${srcaddr} $${dstaddr} $${srcport} + $${dstport}".' + type: string + logGroupName: + description: 'Deprecated: Use log_destination instead. The name + of the CloudWatch log group. Either log_group_name or log_destination + must be set.' + type: string + maxAggregationInterval: + description: |- + The maximum interval of time + during which a flow of packets is captured and aggregated into a flow + log record. Valid Values: 60 seconds (1 minute) or 600 seconds (10 + minutes). Default: 600. When transit_gateway_id or transit_gateway_attachment_id is specified, max_aggregation_interval must be 60 seconds (1 minute). + type: number + region: + description: Region is the region you'd like your resource to + be created in. + type: string + subnetId: + description: Subnet ID to attach to + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + trafficType: + description: 'The type of traffic to capture. Valid values: ACCEPT,REJECT, + ALL.' + type: string + transitGatewayAttachmentId: + description: Transit Gateway Attachment ID to attach to + type: string + transitGatewayId: + description: Transit Gateway ID to attach to + type: string + vpcId: + description: VPC ID to attach to + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + deliverCrossAccountRole: + description: ARN of the IAM role that allows Amazon EC2 to publish + flow logs across accounts. + type: string + destinationOptions: + description: Describes the destination options for a flow log. + More details below. + properties: + fileFormat: + description: 'The format for the flow log. Default value: + plain-text. Valid values: plain-text, parquet.' + type: string + hiveCompatiblePartitions: + description: 'Indicates whether to use Hive-compatible prefixes + for flow logs stored in Amazon S3. Default value: false.' + type: boolean + perHourPartition: + description: 'Indicates whether to partition the flow log + per hour. This reduces the cost and response time for queries. + Default value: false.' + type: boolean + type: object + eniId: + description: Elastic Network Interface ID to attach to + type: string + iamRoleArn: + description: The ARN for the IAM role that's used to post flow + logs to a CloudWatch Logs log group + type: string + iamRoleArnRef: + description: Reference to a Role in iam to populate iamRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamRoleArnSelector: + description: Selector for a Role in iam to populate iamRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + logDestination: + description: The ARN of the logging destination. Either log_destination + or log_group_name must be set. + type: string + logDestinationRef: + description: Reference to a Group in cloudwatchlogs to populate + logDestination. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logDestinationSelector: + description: Selector for a Group in cloudwatchlogs to populate + logDestination. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + logDestinationType: + description: 'The type of the logging destination. Valid values: + cloud-watch-logs, s3, kinesis-data-firehose. Default: cloud-watch-logs.' + type: string + logFormat: + description: 'The fields to include in the flow log record. Accepted + format example: "$${interface-id} $${srcaddr} $${dstaddr} $${srcport} + $${dstport}".' + type: string + logGroupName: + description: 'Deprecated: Use log_destination instead. The name + of the CloudWatch log group. Either log_group_name or log_destination + must be set.' + type: string + maxAggregationInterval: + description: |- + The maximum interval of time + during which a flow of packets is captured and aggregated into a flow + log record. Valid Values: 60 seconds (1 minute) or 600 seconds (10 + minutes). Default: 600. When transit_gateway_id or transit_gateway_attachment_id is specified, max_aggregation_interval must be 60 seconds (1 minute). + type: number + subnetId: + description: Subnet ID to attach to + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + trafficType: + description: 'The type of traffic to capture. Valid values: ACCEPT,REJECT, + ALL.' + type: string + transitGatewayAttachmentId: + description: Transit Gateway Attachment ID to attach to + type: string + transitGatewayId: + description: Transit Gateway ID to attach to + type: string + vpcId: + description: VPC ID to attach to + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: FlowLogStatus defines the observed state of FlowLog. + properties: + atProvider: + properties: + arn: + description: The ARN of the Flow Log. + type: string + deliverCrossAccountRole: + description: ARN of the IAM role that allows Amazon EC2 to publish + flow logs across accounts. + type: string + destinationOptions: + description: Describes the destination options for a flow log. + More details below. + properties: + fileFormat: + description: 'The format for the flow log. Default value: + plain-text. Valid values: plain-text, parquet.' + type: string + hiveCompatiblePartitions: + description: 'Indicates whether to use Hive-compatible prefixes + for flow logs stored in Amazon S3. Default value: false.' + type: boolean + perHourPartition: + description: 'Indicates whether to partition the flow log + per hour. This reduces the cost and response time for queries. + Default value: false.' + type: boolean + type: object + eniId: + description: Elastic Network Interface ID to attach to + type: string + iamRoleArn: + description: The ARN for the IAM role that's used to post flow + logs to a CloudWatch Logs log group + type: string + id: + description: The Flow Log ID + type: string + logDestination: + description: The ARN of the logging destination. Either log_destination + or log_group_name must be set. + type: string + logDestinationType: + description: 'The type of the logging destination. Valid values: + cloud-watch-logs, s3, kinesis-data-firehose. Default: cloud-watch-logs.' + type: string + logFormat: + description: 'The fields to include in the flow log record. Accepted + format example: "$${interface-id} $${srcaddr} $${dstaddr} $${srcport} + $${dstport}".' + type: string + logGroupName: + description: 'Deprecated: Use log_destination instead. The name + of the CloudWatch log group. Either log_group_name or log_destination + must be set.' + type: string + maxAggregationInterval: + description: |- + The maximum interval of time + during which a flow of packets is captured and aggregated into a flow + log record. Valid Values: 60 seconds (1 minute) or 600 seconds (10 + minutes). Default: 600. When transit_gateway_id or transit_gateway_attachment_id is specified, max_aggregation_interval must be 60 seconds (1 minute). + type: number + subnetId: + description: Subnet ID to attach to + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + trafficType: + description: 'The type of traffic to capture. Valid values: ACCEPT,REJECT, + ALL.' + type: string + transitGatewayAttachmentId: + description: Transit Gateway Attachment ID to attach to + type: string + transitGatewayId: + description: Transit Gateway ID to attach to + type: string + vpcId: + description: VPC ID to attach to + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ec2.aws.upbound.io_instances.yaml b/package/crds/ec2.aws.upbound.io_instances.yaml index a081e995f1..874d6ffa20 100644 --- a/package/crds/ec2.aws.upbound.io_instances.yaml +++ b/package/crds/ec2.aws.upbound.io_instances.yaml @@ -2783,3 +2783,2681 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Instance is the Schema for the Instances API. Provides an EC2 + instance resource. This allows instances to be created, updated, and deleted. + Instances also support provisioning. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: InstanceSpec defines the desired state of Instance + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + ami: + description: AMI to use for the instance. Required unless launch_template + is specified and the Launch Template specifes an AMI. If an + AMI is specified in the Launch Template, setting ami will override + the AMI specified in the Launch Template. + type: string + associatePublicIpAddress: + description: Whether to associate a public IP address with an + instance in a VPC. + type: boolean + availabilityZone: + description: AZ to start the instance in. + type: string + capacityReservationSpecification: + description: Describes an instance's Capacity Reservation targeting + option. See Capacity Reservation Specification below for more + details. + properties: + capacityReservationPreference: + description: 'Indicates the instance''s Capacity Reservation + preferences. Can be "open" or "none". (Default: "open").' + type: string + capacityReservationTarget: + description: Information about the target Capacity Reservation. + See Capacity Reservation Target below for more details. + properties: + capacityReservationId: + description: ID of the Capacity Reservation in which to + run the instance. + type: string + capacityReservationResourceGroupArn: + description: ARN of the Capacity Reservation resource + group in which to run the instance. + type: string + type: object + type: object + cpuCoreCount: + description: Sets the number of CPU cores for an instance. This + option is only supported on creation of instance type that support + CPU Options CPU Cores and Threads Per CPU Core Per Instance + Type - specifying this option for unsupported instance types + will return an error from the EC2 API. + type: number + cpuOptions: + description: The CPU options for the instance. See CPU Options + below for more details. + properties: + amdSevSnp: + description: Indicates whether to enable the instance for + AMD SEV-SNP. AMD SEV-SNP is supported with M6a, R6a, and + C6a instance types only. Valid values are enabled and disabled. + type: string + coreCount: + description: Sets the number of CPU cores for an instance. + This option is only supported on creation of instance type + that support CPU Options CPU Cores and Threads Per CPU Core + Per Instance Type - specifying this option for unsupported + instance types will return an error from the EC2 API. + type: number + threadsPerCore: + description: If set to 1, hyperthreading is disabled on the + launched instance. Defaults to 2 if not set. See Optimizing + CPU Options for more information. + type: number + type: object + cpuThreadsPerCore: + description: If set to 1, hyperthreading is disabled on the launched + instance. Defaults to 2 if not set. See Optimizing CPU Options + for more information. + type: number + creditSpecification: + description: Configuration block for customizing the credit specification + of the instance. See Credit Specification below for more details. + Removing this configuration on existing instances will only + stop managing it. It will not change the configuration back + to the default for the instance type. + properties: + cpuCredits: + description: Credit option for CPU usage. Valid values include + standard or unlimited. T3 instances are launched as unlimited + by default. T2 instances are launched as standard by default. + type: string + type: object + disableApiStop: + description: If true, enables EC2 Instance Stop Protection. + type: boolean + disableApiTermination: + description: If true, enables EC2 Instance Termination Protection. + type: boolean + ebsBlockDevice: + description: One or more configuration blocks with additional + EBS block devices to attach to the instance. Block device configurations + only apply on resource creation. See Block Devices below for + details on attributes and drift detection. When accessing this + as an attribute reference, it is a set of objects. + items: + properties: + deleteOnTermination: + description: Whether the volume should be destroyed on instance + termination. Defaults to true. + type: boolean + deviceName: + description: Name of the device to mount. + type: string + encrypted: + description: Enables EBS encryption on the volume. Defaults + to false. Cannot be used with snapshot_id. Must be configured + to perform drift detection. + type: boolean + iops: + description: Amount of provisioned IOPS. Only valid for + volume_type of io1, io2 or gp3. + type: number + kmsKeyId: + description: Amazon Resource Name (ARN) of the KMS Key to + use when encrypting the volume. Must be configured to + perform drift detection. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + snapshotId: + description: Snapshot ID to mount. + type: string + tags: + additionalProperties: + type: string + description: Map of tags to assign to the device. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + throughput: + description: Throughput to provision for a volume in mebibytes + per second (MiB/s). This is only valid for volume_type + of gp3. + type: number + volumeSize: + description: Size of the volume in gibibytes (GiB). + type: number + volumeType: + description: Type of volume. Valid values include standard, + gp2, gp3, io1, io2, sc1, or st1. Defaults to gp2. + type: string + type: object + type: array + ebsOptimized: + description: If true, the launched EC2 instance will be EBS-optimized. + Note that if this is not set on an instance type that is optimized + by default then this will show as disabled but if the instance + type is optimized by default then there is no need to set this + and there is no effect to disabling it. See the EBS Optimized + section of the AWS User Guide for more information. + type: boolean + enclaveOptions: + description: Enable Nitro Enclaves on launched instances. See + Enclave Options below for more details. + properties: + enabled: + description: Whether Nitro Enclaves will be enabled on the + instance. Defaults to false. + type: boolean + type: object + ephemeralBlockDevice: + description: One or more configuration blocks to customize Ephemeral + (also known as "Instance Store") volumes on the instance. See + Block Devices below for details. When accessing this as an attribute + reference, it is a set of objects. + items: + properties: + deviceName: + description: Name of the block device to mount on the instance. + type: string + noDevice: + description: Suppresses the specified device included in + the AMI's block device mapping. + type: boolean + virtualName: + description: Instance Store Device Name (e.g., ephemeral0). + type: string + type: object + type: array + getPasswordData: + description: If true, wait for password data to become available + and retrieve it. Useful for getting the administrator password + for instances running Microsoft Windows. The password data is + exported to the password_data attribute. See GetPasswordData + for more information. + type: boolean + hibernation: + description: If true, the launched EC2 instance will support hibernation. + type: boolean + hostId: + description: ID of a dedicated host that the instance will be + assigned to. Use when an instance is to be launched on a specific + dedicated host. + type: string + hostResourceGroupArn: + description: ARN of the host resource group in which to launch + the instances. If you specify an ARN, omit the tenancy parameter + or set it to host. + type: string + iamInstanceProfile: + description: IAM Instance Profile to launch the instance with. + Specified as the name of the Instance Profile. Ensure your credentials + have the correct permission to assign the instance profile according + to the EC2 documentation, notably iam:PassRole. + type: string + instanceInitiatedShutdownBehavior: + description: Shutdown behavior for the instance. Amazon defaults + this to stop for EBS-backed instances and terminate for instance-store + instances. Cannot be set on instance-store instances. See Shutdown + Behavior for more information. + type: string + instanceMarketOptions: + description: Describes the market (purchasing) option for the + instances. See Market Options below for details on attributes. + properties: + marketType: + description: Type of market for the instance. Valid value + is spot. Defaults to spot. Required if spot_options is specified. + type: string + spotOptions: + description: Block to configure the options for Spot Instances. + See Spot Options below for details on attributes. + properties: + instanceInterruptionBehavior: + description: The behavior when a Spot Instance is interrupted. + Valid values include hibernate, stop, terminate . The + default is terminate. + type: string + maxPrice: + description: The maximum hourly price that you're willing + to pay for a Spot Instance. + type: string + spotInstanceType: + description: The Spot Instance request type. Valid values + include one-time, persistent. Persistent Spot Instance + requests are only supported when the instance interruption + behavior is either hibernate or stop. The default is + one-time. + type: string + validUntil: + description: The end date of the request, in UTC format + (YYYY-MM-DDTHH:MM:SSZ). Supported only for persistent + requests. + type: string + type: object + type: object + instanceType: + description: Instance type to use for the instance. Required unless + launch_template is specified and the Launch Template specifies + an instance type. If an instance type is specified in the Launch + Template, setting instance_type will override the instance type + specified in the Launch Template. Updates to this field will + trigger a stop/start of the EC2 instance. + type: string + ipv6AddressCount: + description: Number of IPv6 addresses to associate with the primary + network interface. Amazon EC2 chooses the IPv6 addresses from + the range of your subnet. + type: number + ipv6Addresses: + description: Specify one or more IPv6 addresses from the range + of the subnet to associate with the primary network interface + items: + type: string + type: array + keyName: + description: Key name of the Key Pair to use for the instance; + which can be managed using the . + type: string + launchTemplate: + description: Specifies a Launch Template to configure the instance. + Parameters configured on this resource will override the corresponding + parameters in the Launch Template. See Launch Template Specification + below for more details. + properties: + id: + description: ID of the launch template. Conflicts with name. + type: string + name: + description: Name of the launch template. Conflicts with id. + type: string + version: + description: Template version. Can be a specific version number, + $Latest or $Default. The default value is $Default. + type: string + type: object + maintenanceOptions: + description: Maintenance and recovery options for the instance. + See Maintenance Options below for more details. + properties: + autoRecovery: + description: Automatic recovery behavior of the Instance. + Can be "default" or "disabled". See Recover your instance + for more details. + type: string + type: object + metadataOptions: + description: Customize the metadata options of the instance. See + Metadata Options below for more details. + properties: + httpEndpoint: + description: Whether the metadata service is available. Valid + values include enabled or disabled. Defaults to enabled. + type: string + httpProtocolIpv6: + description: Whether the IPv6 endpoint for the instance metadata + service is enabled. Defaults to disabled. + type: string + httpPutResponseHopLimit: + description: Desired HTTP PUT response hop limit for instance + metadata requests. The larger the number, the further instance + metadata requests can travel. Valid values are integer from + 1 to 64. Defaults to 1. + type: number + httpTokens: + description: Whether or not the metadata service requires + session tokens, also referred to as Instance Metadata Service + Version 2 (IMDSv2). Valid values include optional or required. + Defaults to optional. + type: string + instanceMetadataTags: + description: Enables or disables access to instance tags from + the instance metadata service. Valid values include enabled + or disabled. Defaults to disabled. + type: string + type: object + monitoring: + description: If true, the launched EC2 instance will have detailed + monitoring enabled. (Available since v0.6.0) + type: boolean + networkInterface: + description: Customize network interfaces to be attached at instance + boot time. See Network Interfaces below for more details. + items: + properties: + deleteOnTermination: + description: Whether or not to delete the network interface + on instance termination. Defaults to false. Currently, + the only valid value is false, as this is only supported + when creating new network interfaces when launching an + instance. + type: boolean + deviceIndex: + description: Integer index of the network interface attachment. + Limited by instance type. + type: number + networkCardIndex: + description: Integer index of the network card. Limited + by instance type. The default index is 0. + type: number + networkInterfaceId: + description: ID of the network interface to attach. + type: string + networkInterfaceIdRef: + description: Reference to a NetworkInterface in ec2 to populate + networkInterfaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkInterfaceIdSelector: + description: Selector for a NetworkInterface in ec2 to populate + networkInterfaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + placementGroup: + description: Placement Group to start the instance in. + type: string + placementPartitionNumber: + description: Number of the partition the instance is in. Valid + only if the strategy argument is set to "partition". + type: number + privateDnsNameOptions: + description: Options for the instance hostname. The default values + are inherited from the subnet. See Private DNS Name Options + below for more details. + properties: + enableResourceNameDnsARecord: + description: Indicates whether to respond to DNS queries for + instance hostnames with DNS A records. + type: boolean + enableResourceNameDnsAaaaRecord: + description: Indicates whether to respond to DNS queries for + instance hostnames with DNS AAAA records. + type: boolean + hostnameType: + description: 'Type of hostname for Amazon EC2 instances. For + IPv4 only subnets, an instance DNS name must be based on + the instance IPv4 address. For IPv6 native subnets, an instance + DNS name must be based on the instance ID. For dual-stack + subnets, you can specify whether DNS names use the instance + IPv4 address or the instance ID. Valid values: ip-name and + resource-name.' + type: string + type: object + privateIp: + description: Private IP address to associate with the instance + in a VPC. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + rootBlockDevice: + description: Configuration block to customize details about the + root block device of the instance. See Block Devices below for + details. When accessing this as an attribute reference, it is + a list containing one object. + properties: + deleteOnTermination: + description: Whether the volume should be destroyed on instance + termination. Defaults to true. + type: boolean + encrypted: + description: Whether to enable volume encryption. Defaults + to false. Must be configured to perform drift detection. + type: boolean + iops: + description: Amount of provisioned IOPS. Only valid for volume_type + of io1, io2 or gp3. + type: number + kmsKeyId: + description: Amazon Resource Name (ARN) of the KMS Key to + use when encrypting the volume. Must be configured to perform + drift detection. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Map of tags to assign to the device. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + throughput: + description: Throughput to provision for a volume in mebibytes + per second (MiB/s). This is only valid for volume_type of + gp3. + type: number + volumeSize: + description: Size of the volume in gibibytes (GiB). + type: number + volumeType: + description: Type of volume. Valid values include standard, + gp2, gp3, io1, io2, sc1, or st1. Defaults to the volume + type that the AMI uses. + type: string + type: object + secondaryPrivateIps: + description: List of secondary private IPv4 addresses to assign + to the instance's primary network interface (eth0) in a VPC. + Can only be assigned to the primary network interface (eth0) + attached at instance creation, not a pre-existing network interface + i.e., referenced in a network_interface block. Refer to the + Elastic network interfaces documentation to see the maximum + number of private IP addresses allowed per instance type. + items: + type: string + type: array + x-kubernetes-list-type: set + sourceDestCheck: + description: Controls if traffic is routed to the instance when + the destination address does not match the instance. Used for + NAT or VPNs. Defaults true. + type: boolean + subnetId: + description: VPC Subnet ID to launch in. + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tenancy: + description: Tenancy of the instance (if the instance is running + in a VPC). An instance with a tenancy of dedicated runs on single-tenant + hardware. The host tenancy is not supported for the import-instance + command. Valid values are default, dedicated, and host. + type: string + userData: + description: User data to provide when launching the instance. + Do not pass gzip-compressed data via this argument; see user_data_base64 + instead. Updates to this field will trigger a stop/start of + the EC2 instance by default. If the user_data_replace_on_change + is set then updates to this field will trigger a destroy and + recreate. + type: string + userDataBase64: + description: Can be used instead of user_data to pass base64-encoded + binary data directly. Use this instead of user_data whenever + the value is not a valid UTF-8 string. For example, gzip-encoded + user data must be base64-encoded and passed via this argument + to avoid corruption. Updates to this field will trigger a stop/start + of the EC2 instance by default. If the user_data_replace_on_change + is set then updates to this field will trigger a destroy and + recreate. + type: string + userDataReplaceOnChange: + description: When used in combination with user_data or user_data_base64 + will trigger a destroy and recreate when set to true. Defaults + to false if not set. + type: boolean + volumeTags: + additionalProperties: + type: string + description: Map of tags to assign, at instance-creation time, + to root and EBS volumes. + type: object + x-kubernetes-map-type: granular + vpcSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + vpcSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + vpcSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcSecurityGroupIds: + description: List of security group IDs to associate with. + items: + type: string + type: array + x-kubernetes-list-type: set + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + ami: + description: AMI to use for the instance. Required unless launch_template + is specified and the Launch Template specifes an AMI. If an + AMI is specified in the Launch Template, setting ami will override + the AMI specified in the Launch Template. + type: string + associatePublicIpAddress: + description: Whether to associate a public IP address with an + instance in a VPC. + type: boolean + availabilityZone: + description: AZ to start the instance in. + type: string + capacityReservationSpecification: + description: Describes an instance's Capacity Reservation targeting + option. See Capacity Reservation Specification below for more + details. + properties: + capacityReservationPreference: + description: 'Indicates the instance''s Capacity Reservation + preferences. Can be "open" or "none". (Default: "open").' + type: string + capacityReservationTarget: + description: Information about the target Capacity Reservation. + See Capacity Reservation Target below for more details. + properties: + capacityReservationId: + description: ID of the Capacity Reservation in which to + run the instance. + type: string + capacityReservationResourceGroupArn: + description: ARN of the Capacity Reservation resource + group in which to run the instance. + type: string + type: object + type: object + cpuCoreCount: + description: Sets the number of CPU cores for an instance. This + option is only supported on creation of instance type that support + CPU Options CPU Cores and Threads Per CPU Core Per Instance + Type - specifying this option for unsupported instance types + will return an error from the EC2 API. + type: number + cpuOptions: + description: The CPU options for the instance. See CPU Options + below for more details. + properties: + amdSevSnp: + description: Indicates whether to enable the instance for + AMD SEV-SNP. AMD SEV-SNP is supported with M6a, R6a, and + C6a instance types only. Valid values are enabled and disabled. + type: string + coreCount: + description: Sets the number of CPU cores for an instance. + This option is only supported on creation of instance type + that support CPU Options CPU Cores and Threads Per CPU Core + Per Instance Type - specifying this option for unsupported + instance types will return an error from the EC2 API. + type: number + threadsPerCore: + description: If set to 1, hyperthreading is disabled on the + launched instance. Defaults to 2 if not set. See Optimizing + CPU Options for more information. + type: number + type: object + cpuThreadsPerCore: + description: If set to 1, hyperthreading is disabled on the launched + instance. Defaults to 2 if not set. See Optimizing CPU Options + for more information. + type: number + creditSpecification: + description: Configuration block for customizing the credit specification + of the instance. See Credit Specification below for more details. + Removing this configuration on existing instances will only + stop managing it. It will not change the configuration back + to the default for the instance type. + properties: + cpuCredits: + description: Credit option for CPU usage. Valid values include + standard or unlimited. T3 instances are launched as unlimited + by default. T2 instances are launched as standard by default. + type: string + type: object + disableApiStop: + description: If true, enables EC2 Instance Stop Protection. + type: boolean + disableApiTermination: + description: If true, enables EC2 Instance Termination Protection. + type: boolean + ebsBlockDevice: + description: One or more configuration blocks with additional + EBS block devices to attach to the instance. Block device configurations + only apply on resource creation. See Block Devices below for + details on attributes and drift detection. When accessing this + as an attribute reference, it is a set of objects. + items: + properties: + deleteOnTermination: + description: Whether the volume should be destroyed on instance + termination. Defaults to true. + type: boolean + deviceName: + description: Name of the device to mount. + type: string + encrypted: + description: Enables EBS encryption on the volume. Defaults + to false. Cannot be used with snapshot_id. Must be configured + to perform drift detection. + type: boolean + iops: + description: Amount of provisioned IOPS. Only valid for + volume_type of io1, io2 or gp3. + type: number + kmsKeyId: + description: Amazon Resource Name (ARN) of the KMS Key to + use when encrypting the volume. Must be configured to + perform drift detection. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + snapshotId: + description: Snapshot ID to mount. + type: string + tags: + additionalProperties: + type: string + description: Map of tags to assign to the device. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + throughput: + description: Throughput to provision for a volume in mebibytes + per second (MiB/s). This is only valid for volume_type + of gp3. + type: number + volumeSize: + description: Size of the volume in gibibytes (GiB). + type: number + volumeType: + description: Type of volume. Valid values include standard, + gp2, gp3, io1, io2, sc1, or st1. Defaults to gp2. + type: string + type: object + type: array + ebsOptimized: + description: If true, the launched EC2 instance will be EBS-optimized. + Note that if this is not set on an instance type that is optimized + by default then this will show as disabled but if the instance + type is optimized by default then there is no need to set this + and there is no effect to disabling it. See the EBS Optimized + section of the AWS User Guide for more information. + type: boolean + enclaveOptions: + description: Enable Nitro Enclaves on launched instances. See + Enclave Options below for more details. + properties: + enabled: + description: Whether Nitro Enclaves will be enabled on the + instance. Defaults to false. + type: boolean + type: object + ephemeralBlockDevice: + description: One or more configuration blocks to customize Ephemeral + (also known as "Instance Store") volumes on the instance. See + Block Devices below for details. When accessing this as an attribute + reference, it is a set of objects. + items: + properties: + deviceName: + description: Name of the block device to mount on the instance. + type: string + noDevice: + description: Suppresses the specified device included in + the AMI's block device mapping. + type: boolean + virtualName: + description: Instance Store Device Name (e.g., ephemeral0). + type: string + type: object + type: array + getPasswordData: + description: If true, wait for password data to become available + and retrieve it. Useful for getting the administrator password + for instances running Microsoft Windows. The password data is + exported to the password_data attribute. See GetPasswordData + for more information. + type: boolean + hibernation: + description: If true, the launched EC2 instance will support hibernation. + type: boolean + hostId: + description: ID of a dedicated host that the instance will be + assigned to. Use when an instance is to be launched on a specific + dedicated host. + type: string + hostResourceGroupArn: + description: ARN of the host resource group in which to launch + the instances. If you specify an ARN, omit the tenancy parameter + or set it to host. + type: string + iamInstanceProfile: + description: IAM Instance Profile to launch the instance with. + Specified as the name of the Instance Profile. Ensure your credentials + have the correct permission to assign the instance profile according + to the EC2 documentation, notably iam:PassRole. + type: string + instanceInitiatedShutdownBehavior: + description: Shutdown behavior for the instance. Amazon defaults + this to stop for EBS-backed instances and terminate for instance-store + instances. Cannot be set on instance-store instances. See Shutdown + Behavior for more information. + type: string + instanceMarketOptions: + description: Describes the market (purchasing) option for the + instances. See Market Options below for details on attributes. + properties: + marketType: + description: Type of market for the instance. Valid value + is spot. Defaults to spot. Required if spot_options is specified. + type: string + spotOptions: + description: Block to configure the options for Spot Instances. + See Spot Options below for details on attributes. + properties: + instanceInterruptionBehavior: + description: The behavior when a Spot Instance is interrupted. + Valid values include hibernate, stop, terminate . The + default is terminate. + type: string + maxPrice: + description: The maximum hourly price that you're willing + to pay for a Spot Instance. + type: string + spotInstanceType: + description: The Spot Instance request type. Valid values + include one-time, persistent. Persistent Spot Instance + requests are only supported when the instance interruption + behavior is either hibernate or stop. The default is + one-time. + type: string + validUntil: + description: The end date of the request, in UTC format + (YYYY-MM-DDTHH:MM:SSZ). Supported only for persistent + requests. + type: string + type: object + type: object + instanceType: + description: Instance type to use for the instance. Required unless + launch_template is specified and the Launch Template specifies + an instance type. If an instance type is specified in the Launch + Template, setting instance_type will override the instance type + specified in the Launch Template. Updates to this field will + trigger a stop/start of the EC2 instance. + type: string + ipv6AddressCount: + description: Number of IPv6 addresses to associate with the primary + network interface. Amazon EC2 chooses the IPv6 addresses from + the range of your subnet. + type: number + ipv6Addresses: + description: Specify one or more IPv6 addresses from the range + of the subnet to associate with the primary network interface + items: + type: string + type: array + keyName: + description: Key name of the Key Pair to use for the instance; + which can be managed using the . + type: string + launchTemplate: + description: Specifies a Launch Template to configure the instance. + Parameters configured on this resource will override the corresponding + parameters in the Launch Template. See Launch Template Specification + below for more details. + properties: + id: + description: ID of the launch template. Conflicts with name. + type: string + name: + description: Name of the launch template. Conflicts with id. + type: string + version: + description: Template version. Can be a specific version number, + $Latest or $Default. The default value is $Default. + type: string + type: object + maintenanceOptions: + description: Maintenance and recovery options for the instance. + See Maintenance Options below for more details. + properties: + autoRecovery: + description: Automatic recovery behavior of the Instance. + Can be "default" or "disabled". See Recover your instance + for more details. + type: string + type: object + metadataOptions: + description: Customize the metadata options of the instance. See + Metadata Options below for more details. + properties: + httpEndpoint: + description: Whether the metadata service is available. Valid + values include enabled or disabled. Defaults to enabled. + type: string + httpProtocolIpv6: + description: Whether the IPv6 endpoint for the instance metadata + service is enabled. Defaults to disabled. + type: string + httpPutResponseHopLimit: + description: Desired HTTP PUT response hop limit for instance + metadata requests. The larger the number, the further instance + metadata requests can travel. Valid values are integer from + 1 to 64. Defaults to 1. + type: number + httpTokens: + description: Whether or not the metadata service requires + session tokens, also referred to as Instance Metadata Service + Version 2 (IMDSv2). Valid values include optional or required. + Defaults to optional. + type: string + instanceMetadataTags: + description: Enables or disables access to instance tags from + the instance metadata service. Valid values include enabled + or disabled. Defaults to disabled. + type: string + type: object + monitoring: + description: If true, the launched EC2 instance will have detailed + monitoring enabled. (Available since v0.6.0) + type: boolean + networkInterface: + description: Customize network interfaces to be attached at instance + boot time. See Network Interfaces below for more details. + items: + properties: + deleteOnTermination: + description: Whether or not to delete the network interface + on instance termination. Defaults to false. Currently, + the only valid value is false, as this is only supported + when creating new network interfaces when launching an + instance. + type: boolean + deviceIndex: + description: Integer index of the network interface attachment. + Limited by instance type. + type: number + networkCardIndex: + description: Integer index of the network card. Limited + by instance type. The default index is 0. + type: number + networkInterfaceId: + description: ID of the network interface to attach. + type: string + networkInterfaceIdRef: + description: Reference to a NetworkInterface in ec2 to populate + networkInterfaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkInterfaceIdSelector: + description: Selector for a NetworkInterface in ec2 to populate + networkInterfaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + placementGroup: + description: Placement Group to start the instance in. + type: string + placementPartitionNumber: + description: Number of the partition the instance is in. Valid + only if the strategy argument is set to "partition". + type: number + privateDnsNameOptions: + description: Options for the instance hostname. The default values + are inherited from the subnet. See Private DNS Name Options + below for more details. + properties: + enableResourceNameDnsARecord: + description: Indicates whether to respond to DNS queries for + instance hostnames with DNS A records. + type: boolean + enableResourceNameDnsAaaaRecord: + description: Indicates whether to respond to DNS queries for + instance hostnames with DNS AAAA records. + type: boolean + hostnameType: + description: 'Type of hostname for Amazon EC2 instances. For + IPv4 only subnets, an instance DNS name must be based on + the instance IPv4 address. For IPv6 native subnets, an instance + DNS name must be based on the instance ID. For dual-stack + subnets, you can specify whether DNS names use the instance + IPv4 address or the instance ID. Valid values: ip-name and + resource-name.' + type: string + type: object + privateIp: + description: Private IP address to associate with the instance + in a VPC. + type: string + rootBlockDevice: + description: Configuration block to customize details about the + root block device of the instance. See Block Devices below for + details. When accessing this as an attribute reference, it is + a list containing one object. + properties: + deleteOnTermination: + description: Whether the volume should be destroyed on instance + termination. Defaults to true. + type: boolean + encrypted: + description: Whether to enable volume encryption. Defaults + to false. Must be configured to perform drift detection. + type: boolean + iops: + description: Amount of provisioned IOPS. Only valid for volume_type + of io1, io2 or gp3. + type: number + kmsKeyId: + description: Amazon Resource Name (ARN) of the KMS Key to + use when encrypting the volume. Must be configured to perform + drift detection. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Map of tags to assign to the device. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + throughput: + description: Throughput to provision for a volume in mebibytes + per second (MiB/s). This is only valid for volume_type of + gp3. + type: number + volumeSize: + description: Size of the volume in gibibytes (GiB). + type: number + volumeType: + description: Type of volume. Valid values include standard, + gp2, gp3, io1, io2, sc1, or st1. Defaults to the volume + type that the AMI uses. + type: string + type: object + secondaryPrivateIps: + description: List of secondary private IPv4 addresses to assign + to the instance's primary network interface (eth0) in a VPC. + Can only be assigned to the primary network interface (eth0) + attached at instance creation, not a pre-existing network interface + i.e., referenced in a network_interface block. Refer to the + Elastic network interfaces documentation to see the maximum + number of private IP addresses allowed per instance type. + items: + type: string + type: array + x-kubernetes-list-type: set + sourceDestCheck: + description: Controls if traffic is routed to the instance when + the destination address does not match the instance. Used for + NAT or VPNs. Defaults true. + type: boolean + subnetId: + description: VPC Subnet ID to launch in. + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tenancy: + description: Tenancy of the instance (if the instance is running + in a VPC). An instance with a tenancy of dedicated runs on single-tenant + hardware. The host tenancy is not supported for the import-instance + command. Valid values are default, dedicated, and host. + type: string + userData: + description: User data to provide when launching the instance. + Do not pass gzip-compressed data via this argument; see user_data_base64 + instead. Updates to this field will trigger a stop/start of + the EC2 instance by default. If the user_data_replace_on_change + is set then updates to this field will trigger a destroy and + recreate. + type: string + userDataBase64: + description: Can be used instead of user_data to pass base64-encoded + binary data directly. Use this instead of user_data whenever + the value is not a valid UTF-8 string. For example, gzip-encoded + user data must be base64-encoded and passed via this argument + to avoid corruption. Updates to this field will trigger a stop/start + of the EC2 instance by default. If the user_data_replace_on_change + is set then updates to this field will trigger a destroy and + recreate. + type: string + userDataReplaceOnChange: + description: When used in combination with user_data or user_data_base64 + will trigger a destroy and recreate when set to true. Defaults + to false if not set. + type: boolean + volumeTags: + additionalProperties: + type: string + description: Map of tags to assign, at instance-creation time, + to root and EBS volumes. + type: object + x-kubernetes-map-type: granular + vpcSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + vpcSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + vpcSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcSecurityGroupIds: + description: List of security group IDs to associate with. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: InstanceStatus defines the observed state of Instance. + properties: + atProvider: + properties: + ami: + description: AMI to use for the instance. Required unless launch_template + is specified and the Launch Template specifes an AMI. If an + AMI is specified in the Launch Template, setting ami will override + the AMI specified in the Launch Template. + type: string + arn: + description: ARN of the instance. + type: string + associatePublicIpAddress: + description: Whether to associate a public IP address with an + instance in a VPC. + type: boolean + availabilityZone: + description: AZ to start the instance in. + type: string + capacityReservationSpecification: + description: Describes an instance's Capacity Reservation targeting + option. See Capacity Reservation Specification below for more + details. + properties: + capacityReservationPreference: + description: 'Indicates the instance''s Capacity Reservation + preferences. Can be "open" or "none". (Default: "open").' + type: string + capacityReservationTarget: + description: Information about the target Capacity Reservation. + See Capacity Reservation Target below for more details. + properties: + capacityReservationId: + description: ID of the Capacity Reservation in which to + run the instance. + type: string + capacityReservationResourceGroupArn: + description: ARN of the Capacity Reservation resource + group in which to run the instance. + type: string + type: object + type: object + cpuCoreCount: + description: Sets the number of CPU cores for an instance. This + option is only supported on creation of instance type that support + CPU Options CPU Cores and Threads Per CPU Core Per Instance + Type - specifying this option for unsupported instance types + will return an error from the EC2 API. + type: number + cpuOptions: + description: The CPU options for the instance. See CPU Options + below for more details. + properties: + amdSevSnp: + description: Indicates whether to enable the instance for + AMD SEV-SNP. AMD SEV-SNP is supported with M6a, R6a, and + C6a instance types only. Valid values are enabled and disabled. + type: string + coreCount: + description: Sets the number of CPU cores for an instance. + This option is only supported on creation of instance type + that support CPU Options CPU Cores and Threads Per CPU Core + Per Instance Type - specifying this option for unsupported + instance types will return an error from the EC2 API. + type: number + threadsPerCore: + description: If set to 1, hyperthreading is disabled on the + launched instance. Defaults to 2 if not set. See Optimizing + CPU Options for more information. + type: number + type: object + cpuThreadsPerCore: + description: If set to 1, hyperthreading is disabled on the launched + instance. Defaults to 2 if not set. See Optimizing CPU Options + for more information. + type: number + creditSpecification: + description: Configuration block for customizing the credit specification + of the instance. See Credit Specification below for more details. + Removing this configuration on existing instances will only + stop managing it. It will not change the configuration back + to the default for the instance type. + properties: + cpuCredits: + description: Credit option for CPU usage. Valid values include + standard or unlimited. T3 instances are launched as unlimited + by default. T2 instances are launched as standard by default. + type: string + type: object + disableApiStop: + description: If true, enables EC2 Instance Stop Protection. + type: boolean + disableApiTermination: + description: If true, enables EC2 Instance Termination Protection. + type: boolean + ebsBlockDevice: + description: One or more configuration blocks with additional + EBS block devices to attach to the instance. Block device configurations + only apply on resource creation. See Block Devices below for + details on attributes and drift detection. When accessing this + as an attribute reference, it is a set of objects. + items: + properties: + deleteOnTermination: + description: Whether the volume should be destroyed on instance + termination. Defaults to true. + type: boolean + deviceName: + description: Name of the device to mount. + type: string + encrypted: + description: Enables EBS encryption on the volume. Defaults + to false. Cannot be used with snapshot_id. Must be configured + to perform drift detection. + type: boolean + iops: + description: Amount of provisioned IOPS. Only valid for + volume_type of io1, io2 or gp3. + type: number + kmsKeyId: + description: Amazon Resource Name (ARN) of the KMS Key to + use when encrypting the volume. Must be configured to + perform drift detection. + type: string + snapshotId: + description: Snapshot ID to mount. + type: string + tags: + additionalProperties: + type: string + description: Map of tags to assign to the device. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + throughput: + description: Throughput to provision for a volume in mebibytes + per second (MiB/s). This is only valid for volume_type + of gp3. + type: number + volumeId: + description: ID of the volume. For example, the ID can be + accessed like this, aws_instance.web.ebs_block_device.2.volume_id. + type: string + volumeSize: + description: Size of the volume in gibibytes (GiB). + type: number + volumeType: + description: Type of volume. Valid values include standard, + gp2, gp3, io1, io2, sc1, or st1. Defaults to gp2. + type: string + type: object + type: array + ebsOptimized: + description: If true, the launched EC2 instance will be EBS-optimized. + Note that if this is not set on an instance type that is optimized + by default then this will show as disabled but if the instance + type is optimized by default then there is no need to set this + and there is no effect to disabling it. See the EBS Optimized + section of the AWS User Guide for more information. + type: boolean + enclaveOptions: + description: Enable Nitro Enclaves on launched instances. See + Enclave Options below for more details. + properties: + enabled: + description: Whether Nitro Enclaves will be enabled on the + instance. Defaults to false. + type: boolean + type: object + ephemeralBlockDevice: + description: One or more configuration blocks to customize Ephemeral + (also known as "Instance Store") volumes on the instance. See + Block Devices below for details. When accessing this as an attribute + reference, it is a set of objects. + items: + properties: + deviceName: + description: Name of the block device to mount on the instance. + type: string + noDevice: + description: Suppresses the specified device included in + the AMI's block device mapping. + type: boolean + virtualName: + description: Instance Store Device Name (e.g., ephemeral0). + type: string + type: object + type: array + getPasswordData: + description: If true, wait for password data to become available + and retrieve it. Useful for getting the administrator password + for instances running Microsoft Windows. The password data is + exported to the password_data attribute. See GetPasswordData + for more information. + type: boolean + hibernation: + description: If true, the launched EC2 instance will support hibernation. + type: boolean + hostId: + description: ID of a dedicated host that the instance will be + assigned to. Use when an instance is to be launched on a specific + dedicated host. + type: string + hostResourceGroupArn: + description: ARN of the host resource group in which to launch + the instances. If you specify an ARN, omit the tenancy parameter + or set it to host. + type: string + iamInstanceProfile: + description: IAM Instance Profile to launch the instance with. + Specified as the name of the Instance Profile. Ensure your credentials + have the correct permission to assign the instance profile according + to the EC2 documentation, notably iam:PassRole. + type: string + id: + description: ID of the instance. + type: string + instanceInitiatedShutdownBehavior: + description: Shutdown behavior for the instance. Amazon defaults + this to stop for EBS-backed instances and terminate for instance-store + instances. Cannot be set on instance-store instances. See Shutdown + Behavior for more information. + type: string + instanceLifecycle: + description: Indicates whether this is a Spot Instance or a Scheduled + Instance. + type: string + instanceMarketOptions: + description: Describes the market (purchasing) option for the + instances. See Market Options below for details on attributes. + properties: + marketType: + description: Type of market for the instance. Valid value + is spot. Defaults to spot. Required if spot_options is specified. + type: string + spotOptions: + description: Block to configure the options for Spot Instances. + See Spot Options below for details on attributes. + properties: + instanceInterruptionBehavior: + description: The behavior when a Spot Instance is interrupted. + Valid values include hibernate, stop, terminate . The + default is terminate. + type: string + maxPrice: + description: The maximum hourly price that you're willing + to pay for a Spot Instance. + type: string + spotInstanceType: + description: The Spot Instance request type. Valid values + include one-time, persistent. Persistent Spot Instance + requests are only supported when the instance interruption + behavior is either hibernate or stop. The default is + one-time. + type: string + validUntil: + description: The end date of the request, in UTC format + (YYYY-MM-DDTHH:MM:SSZ). Supported only for persistent + requests. + type: string + type: object + type: object + instanceState: + description: 'State of the instance. One of: pending, running, + shutting-down, terminated, stopping, stopped. See Instance Lifecycle + for more information.' + type: string + instanceType: + description: Instance type to use for the instance. Required unless + launch_template is specified and the Launch Template specifies + an instance type. If an instance type is specified in the Launch + Template, setting instance_type will override the instance type + specified in the Launch Template. Updates to this field will + trigger a stop/start of the EC2 instance. + type: string + ipv6AddressCount: + description: Number of IPv6 addresses to associate with the primary + network interface. Amazon EC2 chooses the IPv6 addresses from + the range of your subnet. + type: number + ipv6Addresses: + description: Specify one or more IPv6 addresses from the range + of the subnet to associate with the primary network interface + items: + type: string + type: array + keyName: + description: Key name of the Key Pair to use for the instance; + which can be managed using the . + type: string + launchTemplate: + description: Specifies a Launch Template to configure the instance. + Parameters configured on this resource will override the corresponding + parameters in the Launch Template. See Launch Template Specification + below for more details. + properties: + id: + description: ID of the launch template. Conflicts with name. + type: string + name: + description: Name of the launch template. Conflicts with id. + type: string + version: + description: Template version. Can be a specific version number, + $Latest or $Default. The default value is $Default. + type: string + type: object + maintenanceOptions: + description: Maintenance and recovery options for the instance. + See Maintenance Options below for more details. + properties: + autoRecovery: + description: Automatic recovery behavior of the Instance. + Can be "default" or "disabled". See Recover your instance + for more details. + type: string + type: object + metadataOptions: + description: Customize the metadata options of the instance. See + Metadata Options below for more details. + properties: + httpEndpoint: + description: Whether the metadata service is available. Valid + values include enabled or disabled. Defaults to enabled. + type: string + httpProtocolIpv6: + description: Whether the IPv6 endpoint for the instance metadata + service is enabled. Defaults to disabled. + type: string + httpPutResponseHopLimit: + description: Desired HTTP PUT response hop limit for instance + metadata requests. The larger the number, the further instance + metadata requests can travel. Valid values are integer from + 1 to 64. Defaults to 1. + type: number + httpTokens: + description: Whether or not the metadata service requires + session tokens, also referred to as Instance Metadata Service + Version 2 (IMDSv2). Valid values include optional or required. + Defaults to optional. + type: string + instanceMetadataTags: + description: Enables or disables access to instance tags from + the instance metadata service. Valid values include enabled + or disabled. Defaults to disabled. + type: string + type: object + monitoring: + description: If true, the launched EC2 instance will have detailed + monitoring enabled. (Available since v0.6.0) + type: boolean + networkInterface: + description: Customize network interfaces to be attached at instance + boot time. See Network Interfaces below for more details. + items: + properties: + deleteOnTermination: + description: Whether or not to delete the network interface + on instance termination. Defaults to false. Currently, + the only valid value is false, as this is only supported + when creating new network interfaces when launching an + instance. + type: boolean + deviceIndex: + description: Integer index of the network interface attachment. + Limited by instance type. + type: number + networkCardIndex: + description: Integer index of the network card. Limited + by instance type. The default index is 0. + type: number + networkInterfaceId: + description: ID of the network interface to attach. + type: string + type: object + type: array + outpostArn: + description: ARN of the Outpost the instance is assigned to. + type: string + passwordData: + description: Base-64 encoded encrypted password data for the instance. + Useful for getting the administrator password for instances + running Microsoft Windows. This attribute is only exported if + get_password_data is true. Note that this encrypted value will + be stored in the state file, as with all exported attributes. + See GetPasswordData for more information. + type: string + placementGroup: + description: Placement Group to start the instance in. + type: string + placementPartitionNumber: + description: Number of the partition the instance is in. Valid + only if the strategy argument is set to "partition". + type: number + primaryNetworkInterfaceId: + description: ID of the instance's primary network interface. + type: string + privateDns: + description: Private DNS name assigned to the instance. Can only + be used inside the Amazon EC2, and only available if you've + enabled DNS hostnames for your VPC. + type: string + privateDnsNameOptions: + description: Options for the instance hostname. The default values + are inherited from the subnet. See Private DNS Name Options + below for more details. + properties: + enableResourceNameDnsARecord: + description: Indicates whether to respond to DNS queries for + instance hostnames with DNS A records. + type: boolean + enableResourceNameDnsAaaaRecord: + description: Indicates whether to respond to DNS queries for + instance hostnames with DNS AAAA records. + type: boolean + hostnameType: + description: 'Type of hostname for Amazon EC2 instances. For + IPv4 only subnets, an instance DNS name must be based on + the instance IPv4 address. For IPv6 native subnets, an instance + DNS name must be based on the instance ID. For dual-stack + subnets, you can specify whether DNS names use the instance + IPv4 address or the instance ID. Valid values: ip-name and + resource-name.' + type: string + type: object + privateIp: + description: Private IP address to associate with the instance + in a VPC. + type: string + publicDns: + description: Public DNS name assigned to the instance. For EC2-VPC, + this is only available if you've enabled DNS hostnames for your + VPC. + type: string + publicIp: + description: 'Public IP address assigned to the instance, if applicable. + NOTE: If you are using an aws_eip with your instance, you should + refer to the EIP''s address directly and not use public_ip as + this field will change after the EIP is attached.' + type: string + rootBlockDevice: + description: Configuration block to customize details about the + root block device of the instance. See Block Devices below for + details. When accessing this as an attribute reference, it is + a list containing one object. + properties: + deleteOnTermination: + description: Whether the volume should be destroyed on instance + termination. Defaults to true. + type: boolean + deviceName: + description: Device name, e.g., /dev/sdh or xvdh. + type: string + encrypted: + description: Whether to enable volume encryption. Defaults + to false. Must be configured to perform drift detection. + type: boolean + iops: + description: Amount of provisioned IOPS. Only valid for volume_type + of io1, io2 or gp3. + type: number + kmsKeyId: + description: Amazon Resource Name (ARN) of the KMS Key to + use when encrypting the volume. Must be configured to perform + drift detection. + type: string + tags: + additionalProperties: + type: string + description: Map of tags to assign to the device. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + throughput: + description: Throughput to provision for a volume in mebibytes + per second (MiB/s). This is only valid for volume_type of + gp3. + type: number + volumeId: + description: ID of the volume. For example, the ID can be + accessed like this, aws_instance.web.root_block_device.0.volume_id. + type: string + volumeSize: + description: Size of the volume in gibibytes (GiB). + type: number + volumeType: + description: Type of volume. Valid values include standard, + gp2, gp3, io1, io2, sc1, or st1. Defaults to the volume + type that the AMI uses. + type: string + type: object + secondaryPrivateIps: + description: List of secondary private IPv4 addresses to assign + to the instance's primary network interface (eth0) in a VPC. + Can only be assigned to the primary network interface (eth0) + attached at instance creation, not a pre-existing network interface + i.e., referenced in a network_interface block. Refer to the + Elastic network interfaces documentation to see the maximum + number of private IP addresses allowed per instance type. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroups: + description: List of security group names to associate with. + items: + type: string + type: array + x-kubernetes-list-type: set + sourceDestCheck: + description: Controls if traffic is routed to the instance when + the destination address does not match the instance. Used for + NAT or VPNs. Defaults true. + type: boolean + spotInstanceRequestId: + description: If the request is a Spot Instance request, the ID + of the request. + type: string + subnetId: + description: VPC Subnet ID to launch in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + tenancy: + description: Tenancy of the instance (if the instance is running + in a VPC). An instance with a tenancy of dedicated runs on single-tenant + hardware. The host tenancy is not supported for the import-instance + command. Valid values are default, dedicated, and host. + type: string + userData: + description: User data to provide when launching the instance. + Do not pass gzip-compressed data via this argument; see user_data_base64 + instead. Updates to this field will trigger a stop/start of + the EC2 instance by default. If the user_data_replace_on_change + is set then updates to this field will trigger a destroy and + recreate. + type: string + userDataBase64: + description: Can be used instead of user_data to pass base64-encoded + binary data directly. Use this instead of user_data whenever + the value is not a valid UTF-8 string. For example, gzip-encoded + user data must be base64-encoded and passed via this argument + to avoid corruption. Updates to this field will trigger a stop/start + of the EC2 instance by default. If the user_data_replace_on_change + is set then updates to this field will trigger a destroy and + recreate. + type: string + userDataReplaceOnChange: + description: When used in combination with user_data or user_data_base64 + will trigger a destroy and recreate when set to true. Defaults + to false if not set. + type: boolean + volumeTags: + additionalProperties: + type: string + description: Map of tags to assign, at instance-creation time, + to root and EBS volumes. + type: object + x-kubernetes-map-type: granular + vpcSecurityGroupIds: + description: List of security group IDs to associate with. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ec2.aws.upbound.io_launchtemplates.yaml b/package/crds/ec2.aws.upbound.io_launchtemplates.yaml index bd8f7ea44a..4de77e9793 100644 --- a/package/crds/ec2.aws.upbound.io_launchtemplates.yaml +++ b/package/crds/ec2.aws.upbound.io_launchtemplates.yaml @@ -3906,3 +3906,3722 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LaunchTemplate is the Schema for the LaunchTemplates API. Provides + an EC2 launch template resource. Can be used to create instances or auto + scaling groups. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LaunchTemplateSpec defines the desired state of LaunchTemplate + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + blockDeviceMappings: + description: |- + Specify volumes to attach to the instance besides the volumes specified by the AMI. + See Block Devices below for details. + items: + properties: + deviceName: + description: The name of the device to mount. + type: string + ebs: + description: Configure EBS volume properties. + properties: + deleteOnTermination: + description: |- + Whether the volume should be destroyed on instance termination. + See Preserving Amazon EBS Volumes on Instance Termination for more information. + type: string + encrypted: + description: |- + Enables EBS encryption on the volume. + Cannot be used with snapshot_id. + type: string + iops: + description: |- + The amount of provisioned IOPS. + This must be set with a volume_type of "io1/io2/gp3". + type: number + kmsKeyId: + description: |- + The ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. + encrypted must be set to true when this is set. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + snapshotId: + description: The Snapshot ID to mount. + type: string + throughput: + description: The throughput to provision for a gp3 volume + in MiB/s (specified as an integer, e.g., 500), with + a maximum of 1,000 MiB/s. + type: number + volumeSize: + description: The size of the volume in gigabytes. + type: number + volumeType: + description: |- + The volume type. + Can be one of standard, gp2, gp3, io1, io2, sc1 or st1. + type: string + type: object + noDevice: + description: Suppresses the specified device included in + the AMI's block device mapping. + type: string + virtualName: + description: |- + The Instance Store Device + Name + (e.g., "ephemeral0"). + type: string + type: object + type: array + capacityReservationSpecification: + description: Targeting for EC2 capacity reservations. See Capacity + Reservation Specification below for more details. + properties: + capacityReservationPreference: + description: Indicates the instance's Capacity Reservation + preferences. Can be open or none. (Default none). + type: string + capacityReservationTarget: + description: 'Used to target a specific Capacity Reservation:' + properties: + capacityReservationId: + description: The ID of the Capacity Reservation in which + to run the instance. + type: string + capacityReservationResourceGroupArn: + description: The ARN of the Capacity Reservation resource + group in which to run the instance. + type: string + type: object + type: object + cpuOptions: + description: The CPU options for the instance. See CPU Options + below for more details. + properties: + amdSevSnp: + description: Indicates whether to enable the instance for + AMD SEV-SNP. AMD SEV-SNP is supported with M6a, R6a, and + C6a instance types only. Valid values are enabled and disabled. + type: string + coreCount: + description: The number of CPU cores for the instance. + type: number + threadsPerCore: + description: |- + The number of threads per CPU core. + To disable Intel Hyper-Threading Technology for the instance, specify a value of 1. + Otherwise, specify the default value of 2. + type: number + type: object + creditSpecification: + description: |- + Customize the credit specification of the instance. See Credit + Specification below for more details. + properties: + cpuCredits: + description: |- + The credit option for CPU usage. + Can be standard or unlimited. + T3 instances are launched as unlimited by default. + T2 instances are launched as standard by default. + type: string + type: object + defaultVersion: + description: Default Version of the launch template. + type: number + description: + description: Description of the launch template. + type: string + disableApiStop: + description: If true, enables EC2 Instance Stop Protection. + type: boolean + disableApiTermination: + description: |- + If true, enables EC2 Instance + Termination Protection + type: boolean + ebsOptimized: + description: If true, the launched EC2 instance will be EBS-optimized. + type: string + elasticGpuSpecifications: + description: |- + The elastic GPU to attach to the instance. See Elastic GPU + below for more details. + items: + properties: + type: + description: The Elastic GPU Type + type: string + type: object + type: array + elasticInferenceAccelerator: + description: Configuration block containing an Elastic Inference + Accelerator to attach to the instance. See Elastic Inference + Accelerator below for more details. + properties: + type: + description: Accelerator type. + type: string + type: object + enclaveOptions: + description: Enable Nitro Enclaves on launched instances. See + Enclave Options below for more details. + properties: + enabled: + description: If set to true, Nitro Enclaves will be enabled + on the instance. + type: boolean + type: object + hibernationOptions: + description: The hibernation options for the instance. See Hibernation + Options below for more details. + properties: + configured: + description: If set to true, the launched EC2 instance will + hibernation enabled. + type: boolean + type: object + iamInstanceProfile: + description: |- + The IAM Instance Profile to launch the instance with. See Instance Profile + below for more details. + properties: + arn: + description: The Amazon Resource Name (ARN) of the instance + profile. Conflicts with name. + type: string + arnRef: + description: Reference to a InstanceProfile in iam to populate + arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a InstanceProfile in iam to populate + arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the instance profile. + type: string + nameRef: + description: Reference to a InstanceProfile in iam to populate + name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a InstanceProfile in iam to populate + name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + imageId: + description: The AMI from which to launch the instance. + type: string + instanceInitiatedShutdownBehavior: + description: |- + Shutdown behavior for the instance. Can be stop or terminate. + (Default: stop). + type: string + instanceMarketOptions: + description: |- + The market (purchasing) option for the instance. See Market Options + below for details. + properties: + marketType: + description: The market type. Can be spot. + type: string + spotOptions: + description: The options for Spot Instance + properties: + blockDurationMinutes: + description: The required duration in minutes. This value + must be a multiple of 60. + type: number + instanceInterruptionBehavior: + description: |- + The behavior when a Spot Instance is interrupted. Can be hibernate, + stop, or terminate. (Default: terminate). + type: string + maxPrice: + description: The maximum hourly price you're willing to + pay for the Spot Instances. + type: string + spotInstanceType: + description: The Spot Instance request type. Can be one-time, + or persistent. + type: string + validUntil: + description: The end date of the request. + type: string + type: object + type: object + instanceRequirements: + description: The attribute requirements for the type of instance. + If present then instance_type cannot be present. + properties: + acceleratorCount: + description: Block describing the minimum and maximum number + of accelerators (GPUs, FPGAs, or AWS Inferentia chips). + Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorManufacturers: + description: List of accelerator manufacturer names. Default + is any manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorNames: + description: List of accelerator names. Default is any acclerator. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorTotalMemoryMib: + description: Block describing the minimum and maximum total + memory of the accelerators. Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorTypes: + description: List of accelerator types. Default is any accelerator + type. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedInstanceTypes: + description: 'List of instance types to apply your specified + attributes against. All other instance types are ignored, + even if they match your specified attributes. You can use + strings with one or more wild cards, represented by an asterisk + (*), to allow an instance type, size, or generation. The + following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. + For example, if you specify c5*, you are allowing the entire + C5 instance family, which includes all C5a and C5n instance + types. If you specify m5a.*, you are allowing all the M5a + instance types, but not the M5n instance types. Maximum + of 400 entries in the list; each entry is limited to 30 + characters. Default is all instance types.' + items: + type: string + type: array + x-kubernetes-list-type: set + bareMetal: + description: Indicate whether bare metal instace types should + be included, excluded, or required. Default is excluded. + type: string + baselineEbsBandwidthMbps: + description: Block describing the minimum and maximum baseline + EBS bandwidth, in Mbps. Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + burstablePerformance: + description: Indicate whether burstable performance instance + types should be included, excluded, or required. Default + is excluded. + type: string + cpuManufacturers: + description: List of CPU manufacturer names. Default is any + manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + excludedInstanceTypes: + description: 'List of instance types to exclude. You can use + strings with one or more wild cards, represented by an asterisk + (*), to exclude an instance type, size, or generation. The + following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. + For example, if you specify c5*, you are excluding the entire + C5 instance family, which includes all C5a and C5n instance + types. If you specify m5a.*, you are excluding all the M5a + instance types, but not the M5n instance types. Maximum + of 400 entries in the list; each entry is limited to 30 + characters. Default is no excluded instance types.' + items: + type: string + type: array + x-kubernetes-list-type: set + instanceGenerations: + description: List of instance generation names. Default is + any generation. + items: + type: string + type: array + x-kubernetes-list-type: set + localStorage: + description: Indicate whether instance types with local storage + volumes are included, excluded, or required. Default is + included. + type: string + localStorageTypes: + description: List of local storage type names. Default any + storage type. + items: + type: string + type: array + x-kubernetes-list-type: set + memoryGibPerVcpu: + description: Block describing the minimum and maximum amount + of memory (GiB) per vCPU. Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + memoryMib: + description: Block describing the minimum and maximum amount + of memory (MiB). Default is no maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkBandwidthGbps: + description: Block describing the minimum and maximum amount + of network bandwidth, in gigabits per second (Gbps). Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkInterfaceCount: + description: Block describing the minimum and maximum number + of network interfaces. Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + onDemandMaxPricePercentageOverLowestPrice: + description: The price protection threshold for On-Demand + Instances. This is the maximum you’ll pay for an On-Demand + Instance, expressed as a percentage higher than the cheapest + M, C, or R instance type with your specified attributes. + When Amazon EC2 Auto Scaling selects instance types with + your attributes, we will exclude instance types whose price + is higher than your threshold. The parameter accepts an + integer, which Amazon EC2 Auto Scaling interprets as a percentage. + To turn off price protection, specify a high value, such + as 999999. Default is 20. + type: number + requireHibernateSupport: + description: Indicate whether instance types must support + On-Demand Instance Hibernation, either true or false. Default + is false. + type: boolean + spotMaxPricePercentageOverLowestPrice: + description: The price protection threshold for Spot Instances. + This is the maximum you’ll pay for a Spot Instance, expressed + as a percentage higher than the cheapest M, C, or R instance + type with your specified attributes. When Amazon EC2 Auto + Scaling selects instance types with your attributes, we + will exclude instance types whose price is higher than your + threshold. The parameter accepts an integer, which Amazon + EC2 Auto Scaling interprets as a percentage. To turn off + price protection, specify a high value, such as 999999. + Default is 100. + type: number + totalLocalStorageGb: + description: Block describing the minimum and maximum total + local storage (GB). Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + vcpuCount: + description: Block describing the minimum and maximum number + of vCPUs. Default is no maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + type: object + instanceType: + description: The type of the instance. If present then instance_requirements + cannot be present. + type: string + kernelId: + description: The kernel ID. + type: string + keyName: + description: The key name to use for the instance. + type: string + licenseSpecification: + description: A list of license specifications to associate with. + See License Specification below for more details. + items: + properties: + licenseConfigurationArn: + description: ARN of the license configuration. + type: string + type: object + type: array + maintenanceOptions: + description: The maintenance options for the instance. See Maintenance + Options below for more details. + properties: + autoRecovery: + description: Disables the automatic recovery behavior of your + instance or sets it to default. Can be "default" or "disabled". + See Recover your instance for more details. + type: string + type: object + metadataOptions: + description: Customize the metadata options for the instance. + See Metadata Options below for more details. + properties: + httpEndpoint: + description: 'Whether the metadata service is available. Can + be "enabled" or "disabled". (Default: "enabled").' + type: string + httpProtocolIpv6: + description: Enables or disables the IPv6 endpoint for the + instance metadata service. Can be "enabled" or "disabled". + type: string + httpPutResponseHopLimit: + description: 'The desired HTTP PUT response hop limit for + instance metadata requests. The larger the number, the further + instance metadata requests can travel. Can be an integer + from 1 to 64. (Default: 1).' + type: number + httpTokens: + description: 'Whether or not the metadata service requires + session tokens, also referred to as Instance Metadata Service + Version 2 (IMDSv2). Can be "optional" or "required". (Default: + "optional").' + type: string + instanceMetadataTags: + description: Enables or disables access to instance tags from + the instance metadata service. Can be "enabled" or "disabled". + type: string + type: object + monitoring: + description: The monitoring option for the instance. See Monitoring + below for more details. + properties: + enabled: + description: If true, the launched EC2 instance will have + detailed monitoring enabled. + type: boolean + type: object + name: + description: The name of the launch template. + type: string + networkInterfaces: + description: |- + Customize network interfaces to be attached at instance boot time. See Network + Interfaces below for more details. + items: + properties: + associateCarrierIpAddress: + description: |- + Associate a Carrier IP address with eth0 for a new network interface. + Use this option when you launch an instance in a Wavelength Zone and want to associate a Carrier IP address with the network interface. + Boolean value, can be left unset. + type: string + associatePublicIpAddress: + description: |- + Associate a public ip address with the network interface. + Boolean value, can be left unset. + type: string + deleteOnTermination: + description: Whether the network interface should be destroyed + on instance termination. + type: string + description: + description: Description of the network interface. + type: string + deviceIndex: + description: The integer index of the network interface + attachment. + type: number + interfaceType: + description: The type of network interface. To create an + Elastic Fabric Adapter (EFA), specify efa. + type: string + ipv4AddressCount: + description: The number of secondary private IPv4 addresses + to assign to a network interface. Conflicts with ipv4_addresses + type: number + ipv4Addresses: + description: One or more private IPv4 addresses to associate. + Conflicts with ipv4_address_count + items: + type: string + type: array + x-kubernetes-list-type: set + ipv4PrefixCount: + description: The number of IPv4 prefixes to be automatically + assigned to the network interface. Conflicts with ipv4_prefixes + type: number + ipv4Prefixes: + description: One or more IPv4 prefixes to be assigned to + the network interface. Conflicts with ipv4_prefix_count + items: + type: string + type: array + x-kubernetes-list-type: set + ipv6AddressCount: + description: The number of IPv6 addresses to assign to a + network interface. Conflicts with ipv6_addresses + type: number + ipv6Addresses: + description: One or more specific IPv6 addresses from the + IPv6 CIDR block range of your subnet. Conflicts with ipv6_address_count + items: + type: string + type: array + x-kubernetes-list-type: set + ipv6PrefixCount: + description: The number of IPv6 prefixes to be automatically + assigned to the network interface. Conflicts with ipv6_prefixes + type: number + ipv6Prefixes: + description: One or more IPv6 prefixes to be assigned to + the network interface. Conflicts with ipv6_prefix_count + items: + type: string + type: array + x-kubernetes-list-type: set + networkCardIndex: + description: The index of the network card. Some instance + types support multiple network cards. The primary network + interface must be assigned to network card index 0. The + default is network card index 0. + type: number + networkInterfaceId: + description: The ID of the network interface to attach. + type: string + networkInterfaceIdRef: + description: Reference to a NetworkInterface in ec2 to populate + networkInterfaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkInterfaceIdSelector: + description: Selector for a NetworkInterface in ec2 to populate + networkInterfaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + privateIpAddress: + description: The primary private IPv4 address. + type: string + securityGroupRefs: + description: References to SecurityGroup in ec2 to populate + securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupSelector: + description: Selector for a list of SecurityGroup in ec2 + to populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroups: + description: A list of security group IDs to associate. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The VPC Subnet ID to associate. + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + placement: + description: The placement of the instance. See Placement below + for more details. + properties: + affinity: + description: The affinity setting for an instance on a Dedicated + Host. + type: string + availabilityZone: + description: The Availability Zone for the instance. + type: string + groupName: + description: The name of the placement group for the instance. + type: string + hostId: + description: The ID of the Dedicated Host for the instance. + type: string + hostResourceGroupArn: + description: The ARN of the Host Resource Group in which to + launch instances. + type: string + partitionNumber: + description: The number of the partition the instance should + launch in. Valid only if the placement group strategy is + set to partition. + type: number + spreadDomain: + description: Reserved for future use. + type: string + tenancy: + description: The tenancy of the instance (if the instance + is running in a VPC). Can be default, dedicated, or host. + type: string + type: object + privateDnsNameOptions: + description: The options for the instance hostname. The default + values are inherited from the subnet. See Private DNS Name Options + below for more details. + properties: + enableResourceNameDnsARecord: + description: Indicates whether to respond to DNS queries for + instance hostnames with DNS A records. + type: boolean + enableResourceNameDnsAaaaRecord: + description: Indicates whether to respond to DNS queries for + instance hostnames with DNS AAAA records. + type: boolean + hostnameType: + description: 'The type of hostname for Amazon EC2 instances. + For IPv4 only subnets, an instance DNS name must be based + on the instance IPv4 address. For IPv6 native subnets, an + instance DNS name must be based on the instance ID. For + dual-stack subnets, you can specify whether DNS names use + the instance IPv4 address or the instance ID. Valid values: + ip-name and resource-name.' + type: string + type: object + ramDiskId: + description: The ID of the RAM disk. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + securityGroupNameRefs: + description: References to SecurityGroup in ec2 to populate securityGroupNames. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupNameSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + securityGroupNames. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupNames: + description: |- + A list of security group names to associate with. If you are creating Instances in a VPC, use + vpc_security_group_ids instead. + items: + type: string + type: array + x-kubernetes-list-type: set + tagSpecifications: + description: The tags to apply to the resources during launch. + See Tag Specifications below for more details. Default tags + are currently not propagated to ASG created resources so you + may wish to inject your default tags into this variable against + the relevant child resource types created. + items: + properties: + resourceType: + description: The type of resource to tag. + type: string + tags: + additionalProperties: + type: string + description: A map of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + updateDefaultVersion: + description: Whether to update Default Version each update. Conflicts + with default_version. + type: boolean + userData: + description: The base64-encoded user data to provide when launching + the instance. + type: string + vpcSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + vpcSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + vpcSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcSecurityGroupIds: + description: A list of security group IDs to associate with. Conflicts + with network_interfaces.security_groups + items: + type: string + type: array + x-kubernetes-list-type: set + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + blockDeviceMappings: + description: |- + Specify volumes to attach to the instance besides the volumes specified by the AMI. + See Block Devices below for details. + items: + properties: + deviceName: + description: The name of the device to mount. + type: string + ebs: + description: Configure EBS volume properties. + properties: + deleteOnTermination: + description: |- + Whether the volume should be destroyed on instance termination. + See Preserving Amazon EBS Volumes on Instance Termination for more information. + type: string + encrypted: + description: |- + Enables EBS encryption on the volume. + Cannot be used with snapshot_id. + type: string + iops: + description: |- + The amount of provisioned IOPS. + This must be set with a volume_type of "io1/io2/gp3". + type: number + kmsKeyId: + description: |- + The ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. + encrypted must be set to true when this is set. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + snapshotId: + description: The Snapshot ID to mount. + type: string + throughput: + description: The throughput to provision for a gp3 volume + in MiB/s (specified as an integer, e.g., 500), with + a maximum of 1,000 MiB/s. + type: number + volumeSize: + description: The size of the volume in gigabytes. + type: number + volumeType: + description: |- + The volume type. + Can be one of standard, gp2, gp3, io1, io2, sc1 or st1. + type: string + type: object + noDevice: + description: Suppresses the specified device included in + the AMI's block device mapping. + type: string + virtualName: + description: |- + The Instance Store Device + Name + (e.g., "ephemeral0"). + type: string + type: object + type: array + capacityReservationSpecification: + description: Targeting for EC2 capacity reservations. See Capacity + Reservation Specification below for more details. + properties: + capacityReservationPreference: + description: Indicates the instance's Capacity Reservation + preferences. Can be open or none. (Default none). + type: string + capacityReservationTarget: + description: 'Used to target a specific Capacity Reservation:' + properties: + capacityReservationId: + description: The ID of the Capacity Reservation in which + to run the instance. + type: string + capacityReservationResourceGroupArn: + description: The ARN of the Capacity Reservation resource + group in which to run the instance. + type: string + type: object + type: object + cpuOptions: + description: The CPU options for the instance. See CPU Options + below for more details. + properties: + amdSevSnp: + description: Indicates whether to enable the instance for + AMD SEV-SNP. AMD SEV-SNP is supported with M6a, R6a, and + C6a instance types only. Valid values are enabled and disabled. + type: string + coreCount: + description: The number of CPU cores for the instance. + type: number + threadsPerCore: + description: |- + The number of threads per CPU core. + To disable Intel Hyper-Threading Technology for the instance, specify a value of 1. + Otherwise, specify the default value of 2. + type: number + type: object + creditSpecification: + description: |- + Customize the credit specification of the instance. See Credit + Specification below for more details. + properties: + cpuCredits: + description: |- + The credit option for CPU usage. + Can be standard or unlimited. + T3 instances are launched as unlimited by default. + T2 instances are launched as standard by default. + type: string + type: object + defaultVersion: + description: Default Version of the launch template. + type: number + description: + description: Description of the launch template. + type: string + disableApiStop: + description: If true, enables EC2 Instance Stop Protection. + type: boolean + disableApiTermination: + description: |- + If true, enables EC2 Instance + Termination Protection + type: boolean + ebsOptimized: + description: If true, the launched EC2 instance will be EBS-optimized. + type: string + elasticGpuSpecifications: + description: |- + The elastic GPU to attach to the instance. See Elastic GPU + below for more details. + items: + properties: + type: + description: The Elastic GPU Type + type: string + type: object + type: array + elasticInferenceAccelerator: + description: Configuration block containing an Elastic Inference + Accelerator to attach to the instance. See Elastic Inference + Accelerator below for more details. + properties: + type: + description: Accelerator type. + type: string + type: object + enclaveOptions: + description: Enable Nitro Enclaves on launched instances. See + Enclave Options below for more details. + properties: + enabled: + description: If set to true, Nitro Enclaves will be enabled + on the instance. + type: boolean + type: object + hibernationOptions: + description: The hibernation options for the instance. See Hibernation + Options below for more details. + properties: + configured: + description: If set to true, the launched EC2 instance will + hibernation enabled. + type: boolean + type: object + iamInstanceProfile: + description: |- + The IAM Instance Profile to launch the instance with. See Instance Profile + below for more details. + properties: + arn: + description: The Amazon Resource Name (ARN) of the instance + profile. Conflicts with name. + type: string + arnRef: + description: Reference to a InstanceProfile in iam to populate + arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a InstanceProfile in iam to populate + arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the instance profile. + type: string + nameRef: + description: Reference to a InstanceProfile in iam to populate + name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a InstanceProfile in iam to populate + name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + imageId: + description: The AMI from which to launch the instance. + type: string + instanceInitiatedShutdownBehavior: + description: |- + Shutdown behavior for the instance. Can be stop or terminate. + (Default: stop). + type: string + instanceMarketOptions: + description: |- + The market (purchasing) option for the instance. See Market Options + below for details. + properties: + marketType: + description: The market type. Can be spot. + type: string + spotOptions: + description: The options for Spot Instance + properties: + blockDurationMinutes: + description: The required duration in minutes. This value + must be a multiple of 60. + type: number + instanceInterruptionBehavior: + description: |- + The behavior when a Spot Instance is interrupted. Can be hibernate, + stop, or terminate. (Default: terminate). + type: string + maxPrice: + description: The maximum hourly price you're willing to + pay for the Spot Instances. + type: string + spotInstanceType: + description: The Spot Instance request type. Can be one-time, + or persistent. + type: string + validUntil: + description: The end date of the request. + type: string + type: object + type: object + instanceRequirements: + description: The attribute requirements for the type of instance. + If present then instance_type cannot be present. + properties: + acceleratorCount: + description: Block describing the minimum and maximum number + of accelerators (GPUs, FPGAs, or AWS Inferentia chips). + Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorManufacturers: + description: List of accelerator manufacturer names. Default + is any manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorNames: + description: List of accelerator names. Default is any acclerator. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorTotalMemoryMib: + description: Block describing the minimum and maximum total + memory of the accelerators. Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorTypes: + description: List of accelerator types. Default is any accelerator + type. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedInstanceTypes: + description: 'List of instance types to apply your specified + attributes against. All other instance types are ignored, + even if they match your specified attributes. You can use + strings with one or more wild cards, represented by an asterisk + (*), to allow an instance type, size, or generation. The + following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. + For example, if you specify c5*, you are allowing the entire + C5 instance family, which includes all C5a and C5n instance + types. If you specify m5a.*, you are allowing all the M5a + instance types, but not the M5n instance types. Maximum + of 400 entries in the list; each entry is limited to 30 + characters. Default is all instance types.' + items: + type: string + type: array + x-kubernetes-list-type: set + bareMetal: + description: Indicate whether bare metal instace types should + be included, excluded, or required. Default is excluded. + type: string + baselineEbsBandwidthMbps: + description: Block describing the minimum and maximum baseline + EBS bandwidth, in Mbps. Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + burstablePerformance: + description: Indicate whether burstable performance instance + types should be included, excluded, or required. Default + is excluded. + type: string + cpuManufacturers: + description: List of CPU manufacturer names. Default is any + manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + excludedInstanceTypes: + description: 'List of instance types to exclude. You can use + strings with one or more wild cards, represented by an asterisk + (*), to exclude an instance type, size, or generation. The + following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. + For example, if you specify c5*, you are excluding the entire + C5 instance family, which includes all C5a and C5n instance + types. If you specify m5a.*, you are excluding all the M5a + instance types, but not the M5n instance types. Maximum + of 400 entries in the list; each entry is limited to 30 + characters. Default is no excluded instance types.' + items: + type: string + type: array + x-kubernetes-list-type: set + instanceGenerations: + description: List of instance generation names. Default is + any generation. + items: + type: string + type: array + x-kubernetes-list-type: set + localStorage: + description: Indicate whether instance types with local storage + volumes are included, excluded, or required. Default is + included. + type: string + localStorageTypes: + description: List of local storage type names. Default any + storage type. + items: + type: string + type: array + x-kubernetes-list-type: set + memoryGibPerVcpu: + description: Block describing the minimum and maximum amount + of memory (GiB) per vCPU. Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + memoryMib: + description: Block describing the minimum and maximum amount + of memory (MiB). Default is no maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkBandwidthGbps: + description: Block describing the minimum and maximum amount + of network bandwidth, in gigabits per second (Gbps). Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkInterfaceCount: + description: Block describing the minimum and maximum number + of network interfaces. Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + onDemandMaxPricePercentageOverLowestPrice: + description: The price protection threshold for On-Demand + Instances. This is the maximum you’ll pay for an On-Demand + Instance, expressed as a percentage higher than the cheapest + M, C, or R instance type with your specified attributes. + When Amazon EC2 Auto Scaling selects instance types with + your attributes, we will exclude instance types whose price + is higher than your threshold. The parameter accepts an + integer, which Amazon EC2 Auto Scaling interprets as a percentage. + To turn off price protection, specify a high value, such + as 999999. Default is 20. + type: number + requireHibernateSupport: + description: Indicate whether instance types must support + On-Demand Instance Hibernation, either true or false. Default + is false. + type: boolean + spotMaxPricePercentageOverLowestPrice: + description: The price protection threshold for Spot Instances. + This is the maximum you’ll pay for a Spot Instance, expressed + as a percentage higher than the cheapest M, C, or R instance + type with your specified attributes. When Amazon EC2 Auto + Scaling selects instance types with your attributes, we + will exclude instance types whose price is higher than your + threshold. The parameter accepts an integer, which Amazon + EC2 Auto Scaling interprets as a percentage. To turn off + price protection, specify a high value, such as 999999. + Default is 100. + type: number + totalLocalStorageGb: + description: Block describing the minimum and maximum total + local storage (GB). Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + vcpuCount: + description: Block describing the minimum and maximum number + of vCPUs. Default is no maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + type: object + instanceType: + description: The type of the instance. If present then instance_requirements + cannot be present. + type: string + kernelId: + description: The kernel ID. + type: string + keyName: + description: The key name to use for the instance. + type: string + licenseSpecification: + description: A list of license specifications to associate with. + See License Specification below for more details. + items: + properties: + licenseConfigurationArn: + description: ARN of the license configuration. + type: string + type: object + type: array + maintenanceOptions: + description: The maintenance options for the instance. See Maintenance + Options below for more details. + properties: + autoRecovery: + description: Disables the automatic recovery behavior of your + instance or sets it to default. Can be "default" or "disabled". + See Recover your instance for more details. + type: string + type: object + metadataOptions: + description: Customize the metadata options for the instance. + See Metadata Options below for more details. + properties: + httpEndpoint: + description: 'Whether the metadata service is available. Can + be "enabled" or "disabled". (Default: "enabled").' + type: string + httpProtocolIpv6: + description: Enables or disables the IPv6 endpoint for the + instance metadata service. Can be "enabled" or "disabled". + type: string + httpPutResponseHopLimit: + description: 'The desired HTTP PUT response hop limit for + instance metadata requests. The larger the number, the further + instance metadata requests can travel. Can be an integer + from 1 to 64. (Default: 1).' + type: number + httpTokens: + description: 'Whether or not the metadata service requires + session tokens, also referred to as Instance Metadata Service + Version 2 (IMDSv2). Can be "optional" or "required". (Default: + "optional").' + type: string + instanceMetadataTags: + description: Enables or disables access to instance tags from + the instance metadata service. Can be "enabled" or "disabled". + type: string + type: object + monitoring: + description: The monitoring option for the instance. See Monitoring + below for more details. + properties: + enabled: + description: If true, the launched EC2 instance will have + detailed monitoring enabled. + type: boolean + type: object + name: + description: The name of the launch template. + type: string + networkInterfaces: + description: |- + Customize network interfaces to be attached at instance boot time. See Network + Interfaces below for more details. + items: + properties: + associateCarrierIpAddress: + description: |- + Associate a Carrier IP address with eth0 for a new network interface. + Use this option when you launch an instance in a Wavelength Zone and want to associate a Carrier IP address with the network interface. + Boolean value, can be left unset. + type: string + associatePublicIpAddress: + description: |- + Associate a public ip address with the network interface. + Boolean value, can be left unset. + type: string + deleteOnTermination: + description: Whether the network interface should be destroyed + on instance termination. + type: string + description: + description: Description of the network interface. + type: string + deviceIndex: + description: The integer index of the network interface + attachment. + type: number + interfaceType: + description: The type of network interface. To create an + Elastic Fabric Adapter (EFA), specify efa. + type: string + ipv4AddressCount: + description: The number of secondary private IPv4 addresses + to assign to a network interface. Conflicts with ipv4_addresses + type: number + ipv4Addresses: + description: One or more private IPv4 addresses to associate. + Conflicts with ipv4_address_count + items: + type: string + type: array + x-kubernetes-list-type: set + ipv4PrefixCount: + description: The number of IPv4 prefixes to be automatically + assigned to the network interface. Conflicts with ipv4_prefixes + type: number + ipv4Prefixes: + description: One or more IPv4 prefixes to be assigned to + the network interface. Conflicts with ipv4_prefix_count + items: + type: string + type: array + x-kubernetes-list-type: set + ipv6AddressCount: + description: The number of IPv6 addresses to assign to a + network interface. Conflicts with ipv6_addresses + type: number + ipv6Addresses: + description: One or more specific IPv6 addresses from the + IPv6 CIDR block range of your subnet. Conflicts with ipv6_address_count + items: + type: string + type: array + x-kubernetes-list-type: set + ipv6PrefixCount: + description: The number of IPv6 prefixes to be automatically + assigned to the network interface. Conflicts with ipv6_prefixes + type: number + ipv6Prefixes: + description: One or more IPv6 prefixes to be assigned to + the network interface. Conflicts with ipv6_prefix_count + items: + type: string + type: array + x-kubernetes-list-type: set + networkCardIndex: + description: The index of the network card. Some instance + types support multiple network cards. The primary network + interface must be assigned to network card index 0. The + default is network card index 0. + type: number + networkInterfaceId: + description: The ID of the network interface to attach. + type: string + networkInterfaceIdRef: + description: Reference to a NetworkInterface in ec2 to populate + networkInterfaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkInterfaceIdSelector: + description: Selector for a NetworkInterface in ec2 to populate + networkInterfaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + privateIpAddress: + description: The primary private IPv4 address. + type: string + securityGroupRefs: + description: References to SecurityGroup in ec2 to populate + securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupSelector: + description: Selector for a list of SecurityGroup in ec2 + to populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroups: + description: A list of security group IDs to associate. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The VPC Subnet ID to associate. + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + placement: + description: The placement of the instance. See Placement below + for more details. + properties: + affinity: + description: The affinity setting for an instance on a Dedicated + Host. + type: string + availabilityZone: + description: The Availability Zone for the instance. + type: string + groupName: + description: The name of the placement group for the instance. + type: string + hostId: + description: The ID of the Dedicated Host for the instance. + type: string + hostResourceGroupArn: + description: The ARN of the Host Resource Group in which to + launch instances. + type: string + partitionNumber: + description: The number of the partition the instance should + launch in. Valid only if the placement group strategy is + set to partition. + type: number + spreadDomain: + description: Reserved for future use. + type: string + tenancy: + description: The tenancy of the instance (if the instance + is running in a VPC). Can be default, dedicated, or host. + type: string + type: object + privateDnsNameOptions: + description: The options for the instance hostname. The default + values are inherited from the subnet. See Private DNS Name Options + below for more details. + properties: + enableResourceNameDnsARecord: + description: Indicates whether to respond to DNS queries for + instance hostnames with DNS A records. + type: boolean + enableResourceNameDnsAaaaRecord: + description: Indicates whether to respond to DNS queries for + instance hostnames with DNS AAAA records. + type: boolean + hostnameType: + description: 'The type of hostname for Amazon EC2 instances. + For IPv4 only subnets, an instance DNS name must be based + on the instance IPv4 address. For IPv6 native subnets, an + instance DNS name must be based on the instance ID. For + dual-stack subnets, you can specify whether DNS names use + the instance IPv4 address or the instance ID. Valid values: + ip-name and resource-name.' + type: string + type: object + ramDiskId: + description: The ID of the RAM disk. + type: string + securityGroupNameRefs: + description: References to SecurityGroup in ec2 to populate securityGroupNames. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupNameSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + securityGroupNames. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupNames: + description: |- + A list of security group names to associate with. If you are creating Instances in a VPC, use + vpc_security_group_ids instead. + items: + type: string + type: array + x-kubernetes-list-type: set + tagSpecifications: + description: The tags to apply to the resources during launch. + See Tag Specifications below for more details. Default tags + are currently not propagated to ASG created resources so you + may wish to inject your default tags into this variable against + the relevant child resource types created. + items: + properties: + resourceType: + description: The type of resource to tag. + type: string + tags: + additionalProperties: + type: string + description: A map of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + updateDefaultVersion: + description: Whether to update Default Version each update. Conflicts + with default_version. + type: boolean + userData: + description: The base64-encoded user data to provide when launching + the instance. + type: string + vpcSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + vpcSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + vpcSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcSecurityGroupIds: + description: A list of security group IDs to associate with. Conflicts + with network_interfaces.security_groups + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: LaunchTemplateStatus defines the observed state of LaunchTemplate. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of the launch template. + type: string + blockDeviceMappings: + description: |- + Specify volumes to attach to the instance besides the volumes specified by the AMI. + See Block Devices below for details. + items: + properties: + deviceName: + description: The name of the device to mount. + type: string + ebs: + description: Configure EBS volume properties. + properties: + deleteOnTermination: + description: |- + Whether the volume should be destroyed on instance termination. + See Preserving Amazon EBS Volumes on Instance Termination for more information. + type: string + encrypted: + description: |- + Enables EBS encryption on the volume. + Cannot be used with snapshot_id. + type: string + iops: + description: |- + The amount of provisioned IOPS. + This must be set with a volume_type of "io1/io2/gp3". + type: number + kmsKeyId: + description: |- + The ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. + encrypted must be set to true when this is set. + type: string + snapshotId: + description: The Snapshot ID to mount. + type: string + throughput: + description: The throughput to provision for a gp3 volume + in MiB/s (specified as an integer, e.g., 500), with + a maximum of 1,000 MiB/s. + type: number + volumeSize: + description: The size of the volume in gigabytes. + type: number + volumeType: + description: |- + The volume type. + Can be one of standard, gp2, gp3, io1, io2, sc1 or st1. + type: string + type: object + noDevice: + description: Suppresses the specified device included in + the AMI's block device mapping. + type: string + virtualName: + description: |- + The Instance Store Device + Name + (e.g., "ephemeral0"). + type: string + type: object + type: array + capacityReservationSpecification: + description: Targeting for EC2 capacity reservations. See Capacity + Reservation Specification below for more details. + properties: + capacityReservationPreference: + description: Indicates the instance's Capacity Reservation + preferences. Can be open or none. (Default none). + type: string + capacityReservationTarget: + description: 'Used to target a specific Capacity Reservation:' + properties: + capacityReservationId: + description: The ID of the Capacity Reservation in which + to run the instance. + type: string + capacityReservationResourceGroupArn: + description: The ARN of the Capacity Reservation resource + group in which to run the instance. + type: string + type: object + type: object + cpuOptions: + description: The CPU options for the instance. See CPU Options + below for more details. + properties: + amdSevSnp: + description: Indicates whether to enable the instance for + AMD SEV-SNP. AMD SEV-SNP is supported with M6a, R6a, and + C6a instance types only. Valid values are enabled and disabled. + type: string + coreCount: + description: The number of CPU cores for the instance. + type: number + threadsPerCore: + description: |- + The number of threads per CPU core. + To disable Intel Hyper-Threading Technology for the instance, specify a value of 1. + Otherwise, specify the default value of 2. + type: number + type: object + creditSpecification: + description: |- + Customize the credit specification of the instance. See Credit + Specification below for more details. + properties: + cpuCredits: + description: |- + The credit option for CPU usage. + Can be standard or unlimited. + T3 instances are launched as unlimited by default. + T2 instances are launched as standard by default. + type: string + type: object + defaultVersion: + description: Default Version of the launch template. + type: number + description: + description: Description of the launch template. + type: string + disableApiStop: + description: If true, enables EC2 Instance Stop Protection. + type: boolean + disableApiTermination: + description: |- + If true, enables EC2 Instance + Termination Protection + type: boolean + ebsOptimized: + description: If true, the launched EC2 instance will be EBS-optimized. + type: string + elasticGpuSpecifications: + description: |- + The elastic GPU to attach to the instance. See Elastic GPU + below for more details. + items: + properties: + type: + description: The Elastic GPU Type + type: string + type: object + type: array + elasticInferenceAccelerator: + description: Configuration block containing an Elastic Inference + Accelerator to attach to the instance. See Elastic Inference + Accelerator below for more details. + properties: + type: + description: Accelerator type. + type: string + type: object + enclaveOptions: + description: Enable Nitro Enclaves on launched instances. See + Enclave Options below for more details. + properties: + enabled: + description: If set to true, Nitro Enclaves will be enabled + on the instance. + type: boolean + type: object + hibernationOptions: + description: The hibernation options for the instance. See Hibernation + Options below for more details. + properties: + configured: + description: If set to true, the launched EC2 instance will + hibernation enabled. + type: boolean + type: object + iamInstanceProfile: + description: |- + The IAM Instance Profile to launch the instance with. See Instance Profile + below for more details. + properties: + arn: + description: The Amazon Resource Name (ARN) of the instance + profile. Conflicts with name. + type: string + name: + description: The name of the instance profile. + type: string + type: object + id: + description: The ID of the launch template. + type: string + imageId: + description: The AMI from which to launch the instance. + type: string + instanceInitiatedShutdownBehavior: + description: |- + Shutdown behavior for the instance. Can be stop or terminate. + (Default: stop). + type: string + instanceMarketOptions: + description: |- + The market (purchasing) option for the instance. See Market Options + below for details. + properties: + marketType: + description: The market type. Can be spot. + type: string + spotOptions: + description: The options for Spot Instance + properties: + blockDurationMinutes: + description: The required duration in minutes. This value + must be a multiple of 60. + type: number + instanceInterruptionBehavior: + description: |- + The behavior when a Spot Instance is interrupted. Can be hibernate, + stop, or terminate. (Default: terminate). + type: string + maxPrice: + description: The maximum hourly price you're willing to + pay for the Spot Instances. + type: string + spotInstanceType: + description: The Spot Instance request type. Can be one-time, + or persistent. + type: string + validUntil: + description: The end date of the request. + type: string + type: object + type: object + instanceRequirements: + description: The attribute requirements for the type of instance. + If present then instance_type cannot be present. + properties: + acceleratorCount: + description: Block describing the minimum and maximum number + of accelerators (GPUs, FPGAs, or AWS Inferentia chips). + Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorManufacturers: + description: List of accelerator manufacturer names. Default + is any manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorNames: + description: List of accelerator names. Default is any acclerator. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorTotalMemoryMib: + description: Block describing the minimum and maximum total + memory of the accelerators. Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorTypes: + description: List of accelerator types. Default is any accelerator + type. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedInstanceTypes: + description: 'List of instance types to apply your specified + attributes against. All other instance types are ignored, + even if they match your specified attributes. You can use + strings with one or more wild cards, represented by an asterisk + (*), to allow an instance type, size, or generation. The + following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. + For example, if you specify c5*, you are allowing the entire + C5 instance family, which includes all C5a and C5n instance + types. If you specify m5a.*, you are allowing all the M5a + instance types, but not the M5n instance types. Maximum + of 400 entries in the list; each entry is limited to 30 + characters. Default is all instance types.' + items: + type: string + type: array + x-kubernetes-list-type: set + bareMetal: + description: Indicate whether bare metal instace types should + be included, excluded, or required. Default is excluded. + type: string + baselineEbsBandwidthMbps: + description: Block describing the minimum and maximum baseline + EBS bandwidth, in Mbps. Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + burstablePerformance: + description: Indicate whether burstable performance instance + types should be included, excluded, or required. Default + is excluded. + type: string + cpuManufacturers: + description: List of CPU manufacturer names. Default is any + manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + excludedInstanceTypes: + description: 'List of instance types to exclude. You can use + strings with one or more wild cards, represented by an asterisk + (*), to exclude an instance type, size, or generation. The + following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. + For example, if you specify c5*, you are excluding the entire + C5 instance family, which includes all C5a and C5n instance + types. If you specify m5a.*, you are excluding all the M5a + instance types, but not the M5n instance types. Maximum + of 400 entries in the list; each entry is limited to 30 + characters. Default is no excluded instance types.' + items: + type: string + type: array + x-kubernetes-list-type: set + instanceGenerations: + description: List of instance generation names. Default is + any generation. + items: + type: string + type: array + x-kubernetes-list-type: set + localStorage: + description: Indicate whether instance types with local storage + volumes are included, excluded, or required. Default is + included. + type: string + localStorageTypes: + description: List of local storage type names. Default any + storage type. + items: + type: string + type: array + x-kubernetes-list-type: set + memoryGibPerVcpu: + description: Block describing the minimum and maximum amount + of memory (GiB) per vCPU. Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + memoryMib: + description: Block describing the minimum and maximum amount + of memory (MiB). Default is no maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkBandwidthGbps: + description: Block describing the minimum and maximum amount + of network bandwidth, in gigabits per second (Gbps). Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkInterfaceCount: + description: Block describing the minimum and maximum number + of network interfaces. Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + onDemandMaxPricePercentageOverLowestPrice: + description: The price protection threshold for On-Demand + Instances. This is the maximum you’ll pay for an On-Demand + Instance, expressed as a percentage higher than the cheapest + M, C, or R instance type with your specified attributes. + When Amazon EC2 Auto Scaling selects instance types with + your attributes, we will exclude instance types whose price + is higher than your threshold. The parameter accepts an + integer, which Amazon EC2 Auto Scaling interprets as a percentage. + To turn off price protection, specify a high value, such + as 999999. Default is 20. + type: number + requireHibernateSupport: + description: Indicate whether instance types must support + On-Demand Instance Hibernation, either true or false. Default + is false. + type: boolean + spotMaxPricePercentageOverLowestPrice: + description: The price protection threshold for Spot Instances. + This is the maximum you’ll pay for a Spot Instance, expressed + as a percentage higher than the cheapest M, C, or R instance + type with your specified attributes. When Amazon EC2 Auto + Scaling selects instance types with your attributes, we + will exclude instance types whose price is higher than your + threshold. The parameter accepts an integer, which Amazon + EC2 Auto Scaling interprets as a percentage. To turn off + price protection, specify a high value, such as 999999. + Default is 100. + type: number + totalLocalStorageGb: + description: Block describing the minimum and maximum total + local storage (GB). Default is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + vcpuCount: + description: Block describing the minimum and maximum number + of vCPUs. Default is no maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + type: object + instanceType: + description: The type of the instance. If present then instance_requirements + cannot be present. + type: string + kernelId: + description: The kernel ID. + type: string + keyName: + description: The key name to use for the instance. + type: string + latestVersion: + description: The latest version of the launch template. + type: number + licenseSpecification: + description: A list of license specifications to associate with. + See License Specification below for more details. + items: + properties: + licenseConfigurationArn: + description: ARN of the license configuration. + type: string + type: object + type: array + maintenanceOptions: + description: The maintenance options for the instance. See Maintenance + Options below for more details. + properties: + autoRecovery: + description: Disables the automatic recovery behavior of your + instance or sets it to default. Can be "default" or "disabled". + See Recover your instance for more details. + type: string + type: object + metadataOptions: + description: Customize the metadata options for the instance. + See Metadata Options below for more details. + properties: + httpEndpoint: + description: 'Whether the metadata service is available. Can + be "enabled" or "disabled". (Default: "enabled").' + type: string + httpProtocolIpv6: + description: Enables or disables the IPv6 endpoint for the + instance metadata service. Can be "enabled" or "disabled". + type: string + httpPutResponseHopLimit: + description: 'The desired HTTP PUT response hop limit for + instance metadata requests. The larger the number, the further + instance metadata requests can travel. Can be an integer + from 1 to 64. (Default: 1).' + type: number + httpTokens: + description: 'Whether or not the metadata service requires + session tokens, also referred to as Instance Metadata Service + Version 2 (IMDSv2). Can be "optional" or "required". (Default: + "optional").' + type: string + instanceMetadataTags: + description: Enables or disables access to instance tags from + the instance metadata service. Can be "enabled" or "disabled". + type: string + type: object + monitoring: + description: The monitoring option for the instance. See Monitoring + below for more details. + properties: + enabled: + description: If true, the launched EC2 instance will have + detailed monitoring enabled. + type: boolean + type: object + name: + description: The name of the launch template. + type: string + networkInterfaces: + description: |- + Customize network interfaces to be attached at instance boot time. See Network + Interfaces below for more details. + items: + properties: + associateCarrierIpAddress: + description: |- + Associate a Carrier IP address with eth0 for a new network interface. + Use this option when you launch an instance in a Wavelength Zone and want to associate a Carrier IP address with the network interface. + Boolean value, can be left unset. + type: string + associatePublicIpAddress: + description: |- + Associate a public ip address with the network interface. + Boolean value, can be left unset. + type: string + deleteOnTermination: + description: Whether the network interface should be destroyed + on instance termination. + type: string + description: + description: Description of the network interface. + type: string + deviceIndex: + description: The integer index of the network interface + attachment. + type: number + interfaceType: + description: The type of network interface. To create an + Elastic Fabric Adapter (EFA), specify efa. + type: string + ipv4AddressCount: + description: The number of secondary private IPv4 addresses + to assign to a network interface. Conflicts with ipv4_addresses + type: number + ipv4Addresses: + description: One or more private IPv4 addresses to associate. + Conflicts with ipv4_address_count + items: + type: string + type: array + x-kubernetes-list-type: set + ipv4PrefixCount: + description: The number of IPv4 prefixes to be automatically + assigned to the network interface. Conflicts with ipv4_prefixes + type: number + ipv4Prefixes: + description: One or more IPv4 prefixes to be assigned to + the network interface. Conflicts with ipv4_prefix_count + items: + type: string + type: array + x-kubernetes-list-type: set + ipv6AddressCount: + description: The number of IPv6 addresses to assign to a + network interface. Conflicts with ipv6_addresses + type: number + ipv6Addresses: + description: One or more specific IPv6 addresses from the + IPv6 CIDR block range of your subnet. Conflicts with ipv6_address_count + items: + type: string + type: array + x-kubernetes-list-type: set + ipv6PrefixCount: + description: The number of IPv6 prefixes to be automatically + assigned to the network interface. Conflicts with ipv6_prefixes + type: number + ipv6Prefixes: + description: One or more IPv6 prefixes to be assigned to + the network interface. Conflicts with ipv6_prefix_count + items: + type: string + type: array + x-kubernetes-list-type: set + networkCardIndex: + description: The index of the network card. Some instance + types support multiple network cards. The primary network + interface must be assigned to network card index 0. The + default is network card index 0. + type: number + networkInterfaceId: + description: The ID of the network interface to attach. + type: string + privateIpAddress: + description: The primary private IPv4 address. + type: string + securityGroups: + description: A list of security group IDs to associate. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The VPC Subnet ID to associate. + type: string + type: object + type: array + placement: + description: The placement of the instance. See Placement below + for more details. + properties: + affinity: + description: The affinity setting for an instance on a Dedicated + Host. + type: string + availabilityZone: + description: The Availability Zone for the instance. + type: string + groupName: + description: The name of the placement group for the instance. + type: string + hostId: + description: The ID of the Dedicated Host for the instance. + type: string + hostResourceGroupArn: + description: The ARN of the Host Resource Group in which to + launch instances. + type: string + partitionNumber: + description: The number of the partition the instance should + launch in. Valid only if the placement group strategy is + set to partition. + type: number + spreadDomain: + description: Reserved for future use. + type: string + tenancy: + description: The tenancy of the instance (if the instance + is running in a VPC). Can be default, dedicated, or host. + type: string + type: object + privateDnsNameOptions: + description: The options for the instance hostname. The default + values are inherited from the subnet. See Private DNS Name Options + below for more details. + properties: + enableResourceNameDnsARecord: + description: Indicates whether to respond to DNS queries for + instance hostnames with DNS A records. + type: boolean + enableResourceNameDnsAaaaRecord: + description: Indicates whether to respond to DNS queries for + instance hostnames with DNS AAAA records. + type: boolean + hostnameType: + description: 'The type of hostname for Amazon EC2 instances. + For IPv4 only subnets, an instance DNS name must be based + on the instance IPv4 address. For IPv6 native subnets, an + instance DNS name must be based on the instance ID. For + dual-stack subnets, you can specify whether DNS names use + the instance IPv4 address or the instance ID. Valid values: + ip-name and resource-name.' + type: string + type: object + ramDiskId: + description: The ID of the RAM disk. + type: string + securityGroupNames: + description: |- + A list of security group names to associate with. If you are creating Instances in a VPC, use + vpc_security_group_ids instead. + items: + type: string + type: array + x-kubernetes-list-type: set + tagSpecifications: + description: The tags to apply to the resources during launch. + See Tag Specifications below for more details. Default tags + are currently not propagated to ASG created resources so you + may wish to inject your default tags into this variable against + the relevant child resource types created. + items: + properties: + resourceType: + description: The type of resource to tag. + type: string + tags: + additionalProperties: + type: string + description: A map of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + updateDefaultVersion: + description: Whether to update Default Version each update. Conflicts + with default_version. + type: boolean + userData: + description: The base64-encoded user data to provide when launching + the instance. + type: string + vpcSecurityGroupIds: + description: A list of security group IDs to associate with. Conflicts + with network_interfaces.security_groups + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ec2.aws.upbound.io_spotfleetrequests.yaml b/package/crds/ec2.aws.upbound.io_spotfleetrequests.yaml index 002c869b14..c988f0f941 100644 --- a/package/crds/ec2.aws.upbound.io_spotfleetrequests.yaml +++ b/package/crds/ec2.aws.upbound.io_spotfleetrequests.yaml @@ -2570,3 +2570,2474 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SpotFleetRequest is the Schema for the SpotFleetRequests API. + Provides a Spot Fleet Request resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SpotFleetRequestSpec defines the desired state of SpotFleetRequest + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allocationStrategy: + description: |- + Indicates how to allocate the target capacity across + the Spot pools specified by the Spot fleet request. Valid values: lowestPrice, diversified, capacityOptimized, capacityOptimizedPrioritized, and priceCapacityOptimized. The default is + lowestPrice. + type: string + context: + description: Reserved. + type: string + excessCapacityTerminationPolicy: + description: |- + Indicates whether running Spot + instances should be terminated if the target capacity of the Spot fleet + request is decreased below the current size of the Spot fleet. + type: string + fleetType: + description: |- + The type of fleet request. Indicates whether the Spot Fleet only requests the target + capacity or also attempts to maintain it. Default is maintain. + type: string + iamFleetRole: + description: |- + Grants the Spot fleet permission to terminate + Spot instances on your behalf when you cancel its Spot fleet request using + CancelSpotFleetRequests or when the Spot fleet request expires, if you set + terminateInstancesWithExpiration. + type: string + instanceInterruptionBehaviour: + description: |- + Indicates whether a Spot + instance stops or terminates when it is interrupted. Default is + terminate. + type: string + instancePoolsToUseCount: + description: |- + The number of Spot pools across which to allocate your target Spot capacity. + Valid only when allocation_strategy is set to lowestPrice. Spot Fleet selects + the cheapest Spot pools and evenly allocates your target Spot capacity across + the number of Spot pools that you specify. + type: number + launchSpecification: + description: |- + Used to define the launch configuration of the + spot-fleet request. Can be specified multiple times to define different bids + across different markets and instance types. Conflicts with launch_template_config. At least one of launch_specification or launch_template_config is required. + items: + properties: + ami: + type: string + associatePublicIpAddress: + type: boolean + availabilityZone: + description: The availability zone in which to place the + request. + type: string + ebsBlockDevice: + items: + properties: + deleteOnTermination: + type: boolean + deviceName: + description: The name of the launch template. Conflicts + with id. + type: string + encrypted: + type: boolean + iops: + type: number + kmsKeyId: + description: The ID of the launch template. Conflicts + with name. + type: string + snapshotId: + description: The ID of the launch template. Conflicts + with name. + type: string + throughput: + type: number + volumeSize: + type: number + volumeType: + type: string + type: object + type: array + ebsOptimized: + type: boolean + ephemeralBlockDevice: + items: + properties: + deviceName: + description: The name of the launch template. Conflicts + with id. + type: string + virtualName: + description: The name of the launch template. Conflicts + with id. + type: string + type: object + type: array + iamInstanceProfile: + type: string + iamInstanceProfileArn: + description: takes aws_iam_instance_profile attribute arn + as input. + type: string + iamInstanceProfileArnRef: + description: Reference to a InstanceProfile in iam to populate + iamInstanceProfileArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamInstanceProfileArnSelector: + description: Selector for a InstanceProfile in iam to populate + iamInstanceProfileArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + instanceType: + description: The type of instance to request. + type: string + keyName: + description: The name of the launch template. Conflicts + with id. + type: string + monitoring: + type: boolean + placementGroup: + type: string + placementTenancy: + type: string + rootBlockDevice: + items: + properties: + deleteOnTermination: + type: boolean + encrypted: + type: boolean + iops: + type: number + kmsKeyId: + description: The ID of the launch template. Conflicts + with name. + type: string + throughput: + type: number + volumeSize: + type: number + volumeType: + type: string + type: object + type: array + spotPrice: + description: The maximum bid price per unit hour. + type: string + subnetId: + description: The subnet in which to launch the requested + instance. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userData: + type: string + vpcSecurityGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + weightedCapacity: + description: The capacity added to the fleet by a fulfilled + request. + type: string + type: object + type: array + launchTemplateConfig: + description: Launch template configuration block. See Launch Template + Configs below for more details. Conflicts with launch_specification. + At least one of launch_specification or launch_template_config + is required. + items: + properties: + launchTemplateSpecification: + description: Launch template specification. See Launch Template + Specification below for more details. + properties: + id: + description: The ID of the launch template. Conflicts + with name. + type: string + idRef: + description: Reference to a LaunchTemplate in ec2 to + populate id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a LaunchTemplate in ec2 to + populate id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the launch template. Conflicts + with id. + type: string + version: + description: Template version. Unlike the autoscaling + equivalent, does not support $Latest or $Default, + so use the launch_template resource's attribute, e.g., + "${aws_launch_template.foo.latest_version}". It will + use the default version if omitted. + type: string + versionRef: + description: Reference to a LaunchTemplate in ec2 to + populate version. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + versionSelector: + description: Selector for a LaunchTemplate in ec2 to + populate version. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + overrides: + description: One or more override configurations. See Overrides + below for more details. + items: + properties: + availabilityZone: + description: The availability zone in which to place + the request. + type: string + instanceRequirements: + description: The instance requirements. See below. + properties: + acceleratorCount: + description: Block describing the minimum and + maximum number of accelerators (GPUs, FPGAs, + or AWS Inferentia chips). Default is no minimum + or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorManufacturers: + description: List of accelerator manufacturer + names. Default is any manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorNames: + description: List of accelerator names. Default + is any acclerator. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorTotalMemoryMib: + description: Block describing the minimum and + maximum total memory of the accelerators. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorTypes: + description: List of accelerator types. Default + is any accelerator type. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedInstanceTypes: + description: 'List of instance types to apply + your specified attributes against. All other + instance types are ignored, even if they match + your specified attributes. You can use strings + with one or more wild cards, represented by + an asterisk (*), to allow an instance type, + size, or generation. The following are examples: + m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, + if you specify c5*, you are allowing the entire + C5 instance family, which includes all C5a and + C5n instance types. If you specify m5a.*, you + are allowing all the M5a instance types, but + not the M5n instance types. Maximum of 400 entries + in the list; each entry is limited to 30 characters. + Default is all instance types.' + items: + type: string + type: array + x-kubernetes-list-type: set + bareMetal: + description: Indicate whether bare metal instace + types should be included, excluded, or required. + Default is excluded. + type: string + baselineEbsBandwidthMbps: + description: Block describing the minimum and + maximum baseline EBS bandwidth, in Mbps. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + burstablePerformance: + description: Indicate whether burstable performance + instance types should be included, excluded, + or required. Default is excluded. + type: string + cpuManufacturers: + description: List of CPU manufacturer names. Default + is any manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + excludedInstanceTypes: + description: 'List of instance types to exclude. + You can use strings with one or more wild cards, + represented by an asterisk (*), to exclude an + instance type, size, or generation. The following + are examples: m5.8xlarge, c5*.*, m5a.*, r*, + *3*. For example, if you specify c5*, you are + excluding the entire C5 instance family, which + includes all C5a and C5n instance types. If + you specify m5a.*, you are excluding all the + M5a instance types, but not the M5n instance + types. Maximum of 400 entries in the list; each + entry is limited to 30 characters. Default is + no excluded instance types.' + items: + type: string + type: array + x-kubernetes-list-type: set + instanceGenerations: + description: List of instance generation names. + Default is any generation. + items: + type: string + type: array + x-kubernetes-list-type: set + localStorage: + description: Indicate whether instance types with + local storage volumes are included, excluded, + or required. Default is included. + type: string + localStorageTypes: + description: List of local storage type names. + Default any storage type. + items: + type: string + type: array + x-kubernetes-list-type: set + memoryGibPerVcpu: + description: Block describing the minimum and + maximum amount of memory (GiB) per vCPU. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + memoryMib: + description: Block describing the minimum and + maximum amount of memory (MiB). Default is no + maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkBandwidthGbps: + description: Block describing the minimum and + maximum amount of network bandwidth, in gigabits + per second (Gbps). Default is no minimum or + maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkInterfaceCount: + description: Block describing the minimum and + maximum number of network interfaces. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + onDemandMaxPricePercentageOverLowestPrice: + description: The price protection threshold for + On-Demand Instances. This is the maximum you’ll + pay for an On-Demand Instance, expressed as + a percentage higher than the cheapest M, C, + or R instance type with your specified attributes. + When Amazon EC2 Auto Scaling selects instance + types with your attributes, we will exclude + instance types whose price is higher than your + threshold. The parameter accepts an integer, + which Amazon EC2 Auto Scaling interprets as + a percentage. To turn off price protection, + specify a high value, such as 999999. Default + is 20. + type: number + requireHibernateSupport: + description: Indicate whether instance types must + support On-Demand Instance Hibernation, either + true or false. Default is false. + type: boolean + spotMaxPricePercentageOverLowestPrice: + description: The price protection threshold for + Spot Instances. This is the maximum you’ll pay + for a Spot Instance, expressed as a percentage + higher than the cheapest M, C, or R instance + type with your specified attributes. When Amazon + EC2 Auto Scaling selects instance types with + your attributes, we will exclude instance types + whose price is higher than your threshold. The + parameter accepts an integer, which Amazon EC2 + Auto Scaling interprets as a percentage. To + turn off price protection, specify a high value, + such as 999999. Default is 100. + type: number + totalLocalStorageGb: + description: Block describing the minimum and + maximum total local storage (GB). Default is + no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + vcpuCount: + description: Block describing the minimum and + maximum number of vCPUs. Default is no maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + type: object + instanceType: + description: The type of instance to request. + type: string + priority: + description: The priority for the launch template + override. The lower the number, the higher the priority. + If no number is set, the launch template override + has the lowest priority. + type: number + spotPrice: + description: The maximum bid price per unit hour. + type: string + subnetId: + description: The subnet in which to launch the requested + instance. + type: string + weightedCapacity: + description: The capacity added to the fleet by a + fulfilled request. + type: number + type: object + type: array + type: object + type: array + loadBalancers: + description: A list of elastic load balancer names to add to the + Spot fleet. + items: + type: string + type: array + x-kubernetes-list-type: set + onDemandAllocationStrategy: + description: 'The order of the launch template overrides to use + in fulfilling On-Demand capacity. the possible values are: lowestPrice + and prioritized. the default is lowestPrice.' + type: string + onDemandMaxTotalPrice: + description: The maximum amount per hour for On-Demand Instances + that you're willing to pay. When the maximum amount you're willing + to pay is reached, the fleet stops launching instances even + if it hasn’t met the target capacity. + type: string + onDemandTargetCapacity: + description: The number of On-Demand units to request. If the + request type is maintain, you can specify a target capacity + of 0 and add capacity later. + type: number + region: + description: Region is the region you'd like your resource to + be created in. + type: string + replaceUnhealthyInstances: + description: Indicates whether Spot fleet should replace unhealthy + instances. Default false. + type: boolean + spotMaintenanceStrategies: + description: Nested argument containing maintenance strategies + for managing your Spot Instances that are at an elevated risk + of being interrupted. Defined below. + properties: + capacityRebalance: + description: Nested argument containing the capacity rebalance + for your fleet request. Defined below. + properties: + replacementStrategy: + description: 'The replacement strategy to use. Only available + for spot fleets with fleet_type set to maintain. Valid + values: launch.' + type: string + type: object + type: object + spotPrice: + description: The maximum bid price per unit hour. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + targetCapacity: + description: |- + The number of units to request. You can choose to set the + target capacity in terms of instances or a performance characteristic that is + important to your application workload, such as vCPUs, memory, or I/O. + type: number + targetCapacityUnitType: + description: The unit for the target capacity. This can only be + done with instance_requirements defined + type: string + targetGroupArns: + description: A list of aws_alb_target_group ARNs, for use with + Application Load Balancing. + items: + type: string + type: array + x-kubernetes-list-type: set + terminateInstancesOnDelete: + description: |- + Indicates whether running Spot + instances should be terminated when the resource is deleted (and the Spot fleet request cancelled). + If no value is specified, the value of the terminate_instances_with_expiration argument is used. + type: string + terminateInstancesWithExpiration: + description: |- + Indicates whether running Spot + instances should be terminated when the Spot fleet request expires. + type: boolean + validFrom: + description: The start date and time of the request, in UTC RFC3339 + format(for example, YYYY-MM-DDTHH:MM:SSZ). The default is to + start fulfilling the request immediately. + type: string + validUntil: + description: The end date and time of the request, in UTC RFC3339 + format(for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no + new Spot instance requests are placed or enabled to fulfill + the request. + type: string + waitForFulfillment: + type: boolean + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allocationStrategy: + description: |- + Indicates how to allocate the target capacity across + the Spot pools specified by the Spot fleet request. Valid values: lowestPrice, diversified, capacityOptimized, capacityOptimizedPrioritized, and priceCapacityOptimized. The default is + lowestPrice. + type: string + context: + description: Reserved. + type: string + excessCapacityTerminationPolicy: + description: |- + Indicates whether running Spot + instances should be terminated if the target capacity of the Spot fleet + request is decreased below the current size of the Spot fleet. + type: string + fleetType: + description: |- + The type of fleet request. Indicates whether the Spot Fleet only requests the target + capacity or also attempts to maintain it. Default is maintain. + type: string + iamFleetRole: + description: |- + Grants the Spot fleet permission to terminate + Spot instances on your behalf when you cancel its Spot fleet request using + CancelSpotFleetRequests or when the Spot fleet request expires, if you set + terminateInstancesWithExpiration. + type: string + instanceInterruptionBehaviour: + description: |- + Indicates whether a Spot + instance stops or terminates when it is interrupted. Default is + terminate. + type: string + instancePoolsToUseCount: + description: |- + The number of Spot pools across which to allocate your target Spot capacity. + Valid only when allocation_strategy is set to lowestPrice. Spot Fleet selects + the cheapest Spot pools and evenly allocates your target Spot capacity across + the number of Spot pools that you specify. + type: number + launchSpecification: + description: |- + Used to define the launch configuration of the + spot-fleet request. Can be specified multiple times to define different bids + across different markets and instance types. Conflicts with launch_template_config. At least one of launch_specification or launch_template_config is required. + items: + properties: + ami: + type: string + associatePublicIpAddress: + type: boolean + availabilityZone: + description: The availability zone in which to place the + request. + type: string + ebsBlockDevice: + items: + properties: + deleteOnTermination: + type: boolean + deviceName: + description: The name of the launch template. Conflicts + with id. + type: string + encrypted: + type: boolean + iops: + type: number + kmsKeyId: + description: The ID of the launch template. Conflicts + with name. + type: string + snapshotId: + description: The ID of the launch template. Conflicts + with name. + type: string + throughput: + type: number + volumeSize: + type: number + volumeType: + type: string + type: object + type: array + ebsOptimized: + type: boolean + ephemeralBlockDevice: + items: + properties: + deviceName: + description: The name of the launch template. Conflicts + with id. + type: string + virtualName: + description: The name of the launch template. Conflicts + with id. + type: string + type: object + type: array + iamInstanceProfile: + type: string + iamInstanceProfileArn: + description: takes aws_iam_instance_profile attribute arn + as input. + type: string + iamInstanceProfileArnRef: + description: Reference to a InstanceProfile in iam to populate + iamInstanceProfileArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamInstanceProfileArnSelector: + description: Selector for a InstanceProfile in iam to populate + iamInstanceProfileArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + instanceType: + description: The type of instance to request. + type: string + keyName: + description: The name of the launch template. Conflicts + with id. + type: string + monitoring: + type: boolean + placementGroup: + type: string + placementTenancy: + type: string + rootBlockDevice: + items: + properties: + deleteOnTermination: + type: boolean + encrypted: + type: boolean + iops: + type: number + kmsKeyId: + description: The ID of the launch template. Conflicts + with name. + type: string + throughput: + type: number + volumeSize: + type: number + volumeType: + type: string + type: object + type: array + spotPrice: + description: The maximum bid price per unit hour. + type: string + subnetId: + description: The subnet in which to launch the requested + instance. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userData: + type: string + vpcSecurityGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + weightedCapacity: + description: The capacity added to the fleet by a fulfilled + request. + type: string + type: object + type: array + launchTemplateConfig: + description: Launch template configuration block. See Launch Template + Configs below for more details. Conflicts with launch_specification. + At least one of launch_specification or launch_template_config + is required. + items: + properties: + launchTemplateSpecification: + description: Launch template specification. See Launch Template + Specification below for more details. + properties: + id: + description: The ID of the launch template. Conflicts + with name. + type: string + idRef: + description: Reference to a LaunchTemplate in ec2 to + populate id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a LaunchTemplate in ec2 to + populate id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the launch template. Conflicts + with id. + type: string + version: + description: Template version. Unlike the autoscaling + equivalent, does not support $Latest or $Default, + so use the launch_template resource's attribute, e.g., + "${aws_launch_template.foo.latest_version}". It will + use the default version if omitted. + type: string + versionRef: + description: Reference to a LaunchTemplate in ec2 to + populate version. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + versionSelector: + description: Selector for a LaunchTemplate in ec2 to + populate version. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + overrides: + description: One or more override configurations. See Overrides + below for more details. + items: + properties: + availabilityZone: + description: The availability zone in which to place + the request. + type: string + instanceRequirements: + description: The instance requirements. See below. + properties: + acceleratorCount: + description: Block describing the minimum and + maximum number of accelerators (GPUs, FPGAs, + or AWS Inferentia chips). Default is no minimum + or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorManufacturers: + description: List of accelerator manufacturer + names. Default is any manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorNames: + description: List of accelerator names. Default + is any acclerator. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorTotalMemoryMib: + description: Block describing the minimum and + maximum total memory of the accelerators. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorTypes: + description: List of accelerator types. Default + is any accelerator type. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedInstanceTypes: + description: 'List of instance types to apply + your specified attributes against. All other + instance types are ignored, even if they match + your specified attributes. You can use strings + with one or more wild cards, represented by + an asterisk (*), to allow an instance type, + size, or generation. The following are examples: + m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, + if you specify c5*, you are allowing the entire + C5 instance family, which includes all C5a and + C5n instance types. If you specify m5a.*, you + are allowing all the M5a instance types, but + not the M5n instance types. Maximum of 400 entries + in the list; each entry is limited to 30 characters. + Default is all instance types.' + items: + type: string + type: array + x-kubernetes-list-type: set + bareMetal: + description: Indicate whether bare metal instace + types should be included, excluded, or required. + Default is excluded. + type: string + baselineEbsBandwidthMbps: + description: Block describing the minimum and + maximum baseline EBS bandwidth, in Mbps. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + burstablePerformance: + description: Indicate whether burstable performance + instance types should be included, excluded, + or required. Default is excluded. + type: string + cpuManufacturers: + description: List of CPU manufacturer names. Default + is any manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + excludedInstanceTypes: + description: 'List of instance types to exclude. + You can use strings with one or more wild cards, + represented by an asterisk (*), to exclude an + instance type, size, or generation. The following + are examples: m5.8xlarge, c5*.*, m5a.*, r*, + *3*. For example, if you specify c5*, you are + excluding the entire C5 instance family, which + includes all C5a and C5n instance types. If + you specify m5a.*, you are excluding all the + M5a instance types, but not the M5n instance + types. Maximum of 400 entries in the list; each + entry is limited to 30 characters. Default is + no excluded instance types.' + items: + type: string + type: array + x-kubernetes-list-type: set + instanceGenerations: + description: List of instance generation names. + Default is any generation. + items: + type: string + type: array + x-kubernetes-list-type: set + localStorage: + description: Indicate whether instance types with + local storage volumes are included, excluded, + or required. Default is included. + type: string + localStorageTypes: + description: List of local storage type names. + Default any storage type. + items: + type: string + type: array + x-kubernetes-list-type: set + memoryGibPerVcpu: + description: Block describing the minimum and + maximum amount of memory (GiB) per vCPU. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + memoryMib: + description: Block describing the minimum and + maximum amount of memory (MiB). Default is no + maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkBandwidthGbps: + description: Block describing the minimum and + maximum amount of network bandwidth, in gigabits + per second (Gbps). Default is no minimum or + maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkInterfaceCount: + description: Block describing the minimum and + maximum number of network interfaces. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + onDemandMaxPricePercentageOverLowestPrice: + description: The price protection threshold for + On-Demand Instances. This is the maximum you’ll + pay for an On-Demand Instance, expressed as + a percentage higher than the cheapest M, C, + or R instance type with your specified attributes. + When Amazon EC2 Auto Scaling selects instance + types with your attributes, we will exclude + instance types whose price is higher than your + threshold. The parameter accepts an integer, + which Amazon EC2 Auto Scaling interprets as + a percentage. To turn off price protection, + specify a high value, such as 999999. Default + is 20. + type: number + requireHibernateSupport: + description: Indicate whether instance types must + support On-Demand Instance Hibernation, either + true or false. Default is false. + type: boolean + spotMaxPricePercentageOverLowestPrice: + description: The price protection threshold for + Spot Instances. This is the maximum you’ll pay + for a Spot Instance, expressed as a percentage + higher than the cheapest M, C, or R instance + type with your specified attributes. When Amazon + EC2 Auto Scaling selects instance types with + your attributes, we will exclude instance types + whose price is higher than your threshold. The + parameter accepts an integer, which Amazon EC2 + Auto Scaling interprets as a percentage. To + turn off price protection, specify a high value, + such as 999999. Default is 100. + type: number + totalLocalStorageGb: + description: Block describing the minimum and + maximum total local storage (GB). Default is + no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + vcpuCount: + description: Block describing the minimum and + maximum number of vCPUs. Default is no maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + type: object + instanceType: + description: The type of instance to request. + type: string + priority: + description: The priority for the launch template + override. The lower the number, the higher the priority. + If no number is set, the launch template override + has the lowest priority. + type: number + spotPrice: + description: The maximum bid price per unit hour. + type: string + subnetId: + description: The subnet in which to launch the requested + instance. + type: string + weightedCapacity: + description: The capacity added to the fleet by a + fulfilled request. + type: number + type: object + type: array + type: object + type: array + loadBalancers: + description: A list of elastic load balancer names to add to the + Spot fleet. + items: + type: string + type: array + x-kubernetes-list-type: set + onDemandAllocationStrategy: + description: 'The order of the launch template overrides to use + in fulfilling On-Demand capacity. the possible values are: lowestPrice + and prioritized. the default is lowestPrice.' + type: string + onDemandMaxTotalPrice: + description: The maximum amount per hour for On-Demand Instances + that you're willing to pay. When the maximum amount you're willing + to pay is reached, the fleet stops launching instances even + if it hasn’t met the target capacity. + type: string + onDemandTargetCapacity: + description: The number of On-Demand units to request. If the + request type is maintain, you can specify a target capacity + of 0 and add capacity later. + type: number + replaceUnhealthyInstances: + description: Indicates whether Spot fleet should replace unhealthy + instances. Default false. + type: boolean + spotMaintenanceStrategies: + description: Nested argument containing maintenance strategies + for managing your Spot Instances that are at an elevated risk + of being interrupted. Defined below. + properties: + capacityRebalance: + description: Nested argument containing the capacity rebalance + for your fleet request. Defined below. + properties: + replacementStrategy: + description: 'The replacement strategy to use. Only available + for spot fleets with fleet_type set to maintain. Valid + values: launch.' + type: string + type: object + type: object + spotPrice: + description: The maximum bid price per unit hour. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + targetCapacity: + description: |- + The number of units to request. You can choose to set the + target capacity in terms of instances or a performance characteristic that is + important to your application workload, such as vCPUs, memory, or I/O. + type: number + targetCapacityUnitType: + description: The unit for the target capacity. This can only be + done with instance_requirements defined + type: string + targetGroupArns: + description: A list of aws_alb_target_group ARNs, for use with + Application Load Balancing. + items: + type: string + type: array + x-kubernetes-list-type: set + terminateInstancesOnDelete: + description: |- + Indicates whether running Spot + instances should be terminated when the resource is deleted (and the Spot fleet request cancelled). + If no value is specified, the value of the terminate_instances_with_expiration argument is used. + type: string + terminateInstancesWithExpiration: + description: |- + Indicates whether running Spot + instances should be terminated when the Spot fleet request expires. + type: boolean + validFrom: + description: The start date and time of the request, in UTC RFC3339 + format(for example, YYYY-MM-DDTHH:MM:SSZ). The default is to + start fulfilling the request immediately. + type: string + validUntil: + description: The end date and time of the request, in UTC RFC3339 + format(for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no + new Spot instance requests are placed or enabled to fulfill + the request. + type: string + waitForFulfillment: + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.iamFleetRole is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.iamFleetRole) + || (has(self.initProvider) && has(self.initProvider.iamFleetRole))' + - message: spec.forProvider.targetCapacity is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.targetCapacity) + || (has(self.initProvider) && has(self.initProvider.targetCapacity))' + status: + description: SpotFleetRequestStatus defines the observed state of SpotFleetRequest. + properties: + atProvider: + properties: + allocationStrategy: + description: |- + Indicates how to allocate the target capacity across + the Spot pools specified by the Spot fleet request. Valid values: lowestPrice, diversified, capacityOptimized, capacityOptimizedPrioritized, and priceCapacityOptimized. The default is + lowestPrice. + type: string + clientToken: + type: string + context: + description: Reserved. + type: string + excessCapacityTerminationPolicy: + description: |- + Indicates whether running Spot + instances should be terminated if the target capacity of the Spot fleet + request is decreased below the current size of the Spot fleet. + type: string + fleetType: + description: |- + The type of fleet request. Indicates whether the Spot Fleet only requests the target + capacity or also attempts to maintain it. Default is maintain. + type: string + iamFleetRole: + description: |- + Grants the Spot fleet permission to terminate + Spot instances on your behalf when you cancel its Spot fleet request using + CancelSpotFleetRequests or when the Spot fleet request expires, if you set + terminateInstancesWithExpiration. + type: string + id: + description: The ID of the launch template. Conflicts with name. + type: string + instanceInterruptionBehaviour: + description: |- + Indicates whether a Spot + instance stops or terminates when it is interrupted. Default is + terminate. + type: string + instancePoolsToUseCount: + description: |- + The number of Spot pools across which to allocate your target Spot capacity. + Valid only when allocation_strategy is set to lowestPrice. Spot Fleet selects + the cheapest Spot pools and evenly allocates your target Spot capacity across + the number of Spot pools that you specify. + type: number + launchSpecification: + description: |- + Used to define the launch configuration of the + spot-fleet request. Can be specified multiple times to define different bids + across different markets and instance types. Conflicts with launch_template_config. At least one of launch_specification or launch_template_config is required. + items: + properties: + ami: + type: string + associatePublicIpAddress: + type: boolean + availabilityZone: + description: The availability zone in which to place the + request. + type: string + ebsBlockDevice: + items: + properties: + deleteOnTermination: + type: boolean + deviceName: + description: The name of the launch template. Conflicts + with id. + type: string + encrypted: + type: boolean + iops: + type: number + kmsKeyId: + description: The ID of the launch template. Conflicts + with name. + type: string + snapshotId: + description: The ID of the launch template. Conflicts + with name. + type: string + throughput: + type: number + volumeSize: + type: number + volumeType: + type: string + type: object + type: array + ebsOptimized: + type: boolean + ephemeralBlockDevice: + items: + properties: + deviceName: + description: The name of the launch template. Conflicts + with id. + type: string + virtualName: + description: The name of the launch template. Conflicts + with id. + type: string + type: object + type: array + iamInstanceProfile: + type: string + iamInstanceProfileArn: + description: takes aws_iam_instance_profile attribute arn + as input. + type: string + instanceType: + description: The type of instance to request. + type: string + keyName: + description: The name of the launch template. Conflicts + with id. + type: string + monitoring: + type: boolean + placementGroup: + type: string + placementTenancy: + type: string + rootBlockDevice: + items: + properties: + deleteOnTermination: + type: boolean + encrypted: + type: boolean + iops: + type: number + kmsKeyId: + description: The ID of the launch template. Conflicts + with name. + type: string + throughput: + type: number + volumeSize: + type: number + volumeType: + type: string + type: object + type: array + spotPrice: + description: The maximum bid price per unit hour. + type: string + subnetId: + description: The subnet in which to launch the requested + instance. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userData: + type: string + vpcSecurityGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + weightedCapacity: + description: The capacity added to the fleet by a fulfilled + request. + type: string + type: object + type: array + launchTemplateConfig: + description: Launch template configuration block. See Launch Template + Configs below for more details. Conflicts with launch_specification. + At least one of launch_specification or launch_template_config + is required. + items: + properties: + launchTemplateSpecification: + description: Launch template specification. See Launch Template + Specification below for more details. + properties: + id: + description: The ID of the launch template. Conflicts + with name. + type: string + name: + description: The name of the launch template. Conflicts + with id. + type: string + version: + description: Template version. Unlike the autoscaling + equivalent, does not support $Latest or $Default, + so use the launch_template resource's attribute, e.g., + "${aws_launch_template.foo.latest_version}". It will + use the default version if omitted. + type: string + type: object + overrides: + description: One or more override configurations. See Overrides + below for more details. + items: + properties: + availabilityZone: + description: The availability zone in which to place + the request. + type: string + instanceRequirements: + description: The instance requirements. See below. + properties: + acceleratorCount: + description: Block describing the minimum and + maximum number of accelerators (GPUs, FPGAs, + or AWS Inferentia chips). Default is no minimum + or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorManufacturers: + description: List of accelerator manufacturer + names. Default is any manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorNames: + description: List of accelerator names. Default + is any acclerator. + items: + type: string + type: array + x-kubernetes-list-type: set + acceleratorTotalMemoryMib: + description: Block describing the minimum and + maximum total memory of the accelerators. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + acceleratorTypes: + description: List of accelerator types. Default + is any accelerator type. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedInstanceTypes: + description: 'List of instance types to apply + your specified attributes against. All other + instance types are ignored, even if they match + your specified attributes. You can use strings + with one or more wild cards, represented by + an asterisk (*), to allow an instance type, + size, or generation. The following are examples: + m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, + if you specify c5*, you are allowing the entire + C5 instance family, which includes all C5a and + C5n instance types. If you specify m5a.*, you + are allowing all the M5a instance types, but + not the M5n instance types. Maximum of 400 entries + in the list; each entry is limited to 30 characters. + Default is all instance types.' + items: + type: string + type: array + x-kubernetes-list-type: set + bareMetal: + description: Indicate whether bare metal instace + types should be included, excluded, or required. + Default is excluded. + type: string + baselineEbsBandwidthMbps: + description: Block describing the minimum and + maximum baseline EBS bandwidth, in Mbps. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + burstablePerformance: + description: Indicate whether burstable performance + instance types should be included, excluded, + or required. Default is excluded. + type: string + cpuManufacturers: + description: List of CPU manufacturer names. Default + is any manufacturer. + items: + type: string + type: array + x-kubernetes-list-type: set + excludedInstanceTypes: + description: 'List of instance types to exclude. + You can use strings with one or more wild cards, + represented by an asterisk (*), to exclude an + instance type, size, or generation. The following + are examples: m5.8xlarge, c5*.*, m5a.*, r*, + *3*. For example, if you specify c5*, you are + excluding the entire C5 instance family, which + includes all C5a and C5n instance types. If + you specify m5a.*, you are excluding all the + M5a instance types, but not the M5n instance + types. Maximum of 400 entries in the list; each + entry is limited to 30 characters. Default is + no excluded instance types.' + items: + type: string + type: array + x-kubernetes-list-type: set + instanceGenerations: + description: List of instance generation names. + Default is any generation. + items: + type: string + type: array + x-kubernetes-list-type: set + localStorage: + description: Indicate whether instance types with + local storage volumes are included, excluded, + or required. Default is included. + type: string + localStorageTypes: + description: List of local storage type names. + Default any storage type. + items: + type: string + type: array + x-kubernetes-list-type: set + memoryGibPerVcpu: + description: Block describing the minimum and + maximum amount of memory (GiB) per vCPU. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + memoryMib: + description: Block describing the minimum and + maximum amount of memory (MiB). Default is no + maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkBandwidthGbps: + description: Block describing the minimum and + maximum amount of network bandwidth, in gigabits + per second (Gbps). Default is no minimum or + maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + networkInterfaceCount: + description: Block describing the minimum and + maximum number of network interfaces. Default + is no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + onDemandMaxPricePercentageOverLowestPrice: + description: The price protection threshold for + On-Demand Instances. This is the maximum you’ll + pay for an On-Demand Instance, expressed as + a percentage higher than the cheapest M, C, + or R instance type with your specified attributes. + When Amazon EC2 Auto Scaling selects instance + types with your attributes, we will exclude + instance types whose price is higher than your + threshold. The parameter accepts an integer, + which Amazon EC2 Auto Scaling interprets as + a percentage. To turn off price protection, + specify a high value, such as 999999. Default + is 20. + type: number + requireHibernateSupport: + description: Indicate whether instance types must + support On-Demand Instance Hibernation, either + true or false. Default is false. + type: boolean + spotMaxPricePercentageOverLowestPrice: + description: The price protection threshold for + Spot Instances. This is the maximum you’ll pay + for a Spot Instance, expressed as a percentage + higher than the cheapest M, C, or R instance + type with your specified attributes. When Amazon + EC2 Auto Scaling selects instance types with + your attributes, we will exclude instance types + whose price is higher than your threshold. The + parameter accepts an integer, which Amazon EC2 + Auto Scaling interprets as a percentage. To + turn off price protection, specify a high value, + such as 999999. Default is 100. + type: number + totalLocalStorageGb: + description: Block describing the minimum and + maximum total local storage (GB). Default is + no minimum or maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + vcpuCount: + description: Block describing the minimum and + maximum number of vCPUs. Default is no maximum. + properties: + max: + description: Maximum. + type: number + min: + description: Minimum. + type: number + type: object + type: object + instanceType: + description: The type of instance to request. + type: string + priority: + description: The priority for the launch template + override. The lower the number, the higher the priority. + If no number is set, the launch template override + has the lowest priority. + type: number + spotPrice: + description: The maximum bid price per unit hour. + type: string + subnetId: + description: The subnet in which to launch the requested + instance. + type: string + weightedCapacity: + description: The capacity added to the fleet by a + fulfilled request. + type: number + type: object + type: array + type: object + type: array + loadBalancers: + description: A list of elastic load balancer names to add to the + Spot fleet. + items: + type: string + type: array + x-kubernetes-list-type: set + onDemandAllocationStrategy: + description: 'The order of the launch template overrides to use + in fulfilling On-Demand capacity. the possible values are: lowestPrice + and prioritized. the default is lowestPrice.' + type: string + onDemandMaxTotalPrice: + description: The maximum amount per hour for On-Demand Instances + that you're willing to pay. When the maximum amount you're willing + to pay is reached, the fleet stops launching instances even + if it hasn’t met the target capacity. + type: string + onDemandTargetCapacity: + description: The number of On-Demand units to request. If the + request type is maintain, you can specify a target capacity + of 0 and add capacity later. + type: number + replaceUnhealthyInstances: + description: Indicates whether Spot fleet should replace unhealthy + instances. Default false. + type: boolean + spotMaintenanceStrategies: + description: Nested argument containing maintenance strategies + for managing your Spot Instances that are at an elevated risk + of being interrupted. Defined below. + properties: + capacityRebalance: + description: Nested argument containing the capacity rebalance + for your fleet request. Defined below. + properties: + replacementStrategy: + description: 'The replacement strategy to use. Only available + for spot fleets with fleet_type set to maintain. Valid + values: launch.' + type: string + type: object + type: object + spotPrice: + description: The maximum bid price per unit hour. + type: string + spotRequestState: + description: The state of the Spot fleet request. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + targetCapacity: + description: |- + The number of units to request. You can choose to set the + target capacity in terms of instances or a performance characteristic that is + important to your application workload, such as vCPUs, memory, or I/O. + type: number + targetCapacityUnitType: + description: The unit for the target capacity. This can only be + done with instance_requirements defined + type: string + targetGroupArns: + description: A list of aws_alb_target_group ARNs, for use with + Application Load Balancing. + items: + type: string + type: array + x-kubernetes-list-type: set + terminateInstancesOnDelete: + description: |- + Indicates whether running Spot + instances should be terminated when the resource is deleted (and the Spot fleet request cancelled). + If no value is specified, the value of the terminate_instances_with_expiration argument is used. + type: string + terminateInstancesWithExpiration: + description: |- + Indicates whether running Spot + instances should be terminated when the Spot fleet request expires. + type: boolean + validFrom: + description: The start date and time of the request, in UTC RFC3339 + format(for example, YYYY-MM-DDTHH:MM:SSZ). The default is to + start fulfilling the request immediately. + type: string + validUntil: + description: The end date and time of the request, in UTC RFC3339 + format(for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no + new Spot instance requests are placed or enabled to fulfill + the request. + type: string + waitForFulfillment: + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ec2.aws.upbound.io_spotinstancerequests.yaml b/package/crds/ec2.aws.upbound.io_spotinstancerequests.yaml index 04cd82165f..01409206e0 100644 --- a/package/crds/ec2.aws.upbound.io_spotinstancerequests.yaml +++ b/package/crds/ec2.aws.upbound.io_spotinstancerequests.yaml @@ -1630,3 +1630,1555 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SpotInstanceRequest is the Schema for the SpotInstanceRequests + API. Provides a Spot Instance Request resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SpotInstanceRequestSpec defines the desired state of SpotInstanceRequest + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + ami: + type: string + associatePublicIpAddress: + type: boolean + availabilityZone: + type: string + blockDurationMinutes: + description: |- + The required duration for the Spot instances, in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). + The duration period starts as soon as your Spot instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates. + Note that you can't specify an Availability Zone group or a launch group if you specify a duration. + type: number + capacityReservationSpecification: + properties: + capacityReservationPreference: + type: string + capacityReservationTarget: + properties: + capacityReservationId: + description: The Spot Instance Request ID. + type: string + capacityReservationResourceGroupArn: + type: string + type: object + type: object + cpuCoreCount: + type: number + cpuOptions: + properties: + amdSevSnp: + type: string + coreCount: + type: number + threadsPerCore: + type: number + type: object + cpuThreadsPerCore: + type: number + creditSpecification: + properties: + cpuCredits: + type: string + type: object + disableApiStop: + type: boolean + disableApiTermination: + type: boolean + ebsBlockDevice: + items: + properties: + deleteOnTermination: + type: boolean + deviceName: + type: string + encrypted: + type: boolean + iops: + type: number + kmsKeyId: + description: The Spot Instance Request ID. + type: string + snapshotId: + description: The Spot Instance Request ID. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + throughput: + type: number + volumeSize: + type: number + volumeType: + type: string + type: object + type: array + ebsOptimized: + type: boolean + enclaveOptions: + properties: + enabled: + type: boolean + type: object + ephemeralBlockDevice: + items: + properties: + deviceName: + type: string + noDevice: + type: boolean + virtualName: + type: string + type: object + type: array + getPasswordData: + type: boolean + hibernation: + type: boolean + hostId: + description: The Spot Instance Request ID. + type: string + hostResourceGroupArn: + type: string + iamInstanceProfile: + type: string + instanceInitiatedShutdownBehavior: + type: string + instanceInterruptionBehavior: + description: Indicates Spot instance behavior when it is interrupted. + Valid values are terminate, stop, or hibernate. Default value + is terminate. + type: string + instanceType: + type: string + ipv6AddressCount: + type: number + ipv6Addresses: + items: + type: string + type: array + keyName: + type: string + launchGroup: + description: |- + A launch group is a group of spot instances that launch together and terminate together. + If left empty instances are launched and terminated individually. + type: string + launchTemplate: + properties: + id: + description: The Spot Instance Request ID. + type: string + name: + type: string + version: + type: string + type: object + maintenanceOptions: + properties: + autoRecovery: + type: string + type: object + metadataOptions: + properties: + httpEndpoint: + type: string + httpProtocolIpv6: + type: string + httpPutResponseHopLimit: + type: number + httpTokens: + type: string + instanceMetadataTags: + description: Key-value map of resource tags. + type: string + type: object + monitoring: + type: boolean + networkInterface: + items: + properties: + deleteOnTermination: + type: boolean + deviceIndex: + type: number + networkCardIndex: + type: number + networkInterfaceId: + description: The Spot Instance Request ID. + type: string + type: object + type: array + placementGroup: + type: string + placementPartitionNumber: + type: number + privateDnsNameOptions: + properties: + enableResourceNameDnsARecord: + type: boolean + enableResourceNameDnsAaaaRecord: + type: boolean + hostnameType: + type: string + type: object + privateIp: + description: The private IP address assigned to the instance + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + rootBlockDevice: + properties: + deleteOnTermination: + type: boolean + encrypted: + type: boolean + iops: + type: number + kmsKeyId: + description: The Spot Instance Request ID. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + throughput: + type: number + volumeSize: + type: number + volumeType: + type: string + type: object + secondaryPrivateIps: + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroups: + items: + type: string + type: array + x-kubernetes-list-type: set + sourceDestCheck: + type: boolean + spotPrice: + description: The maximum price to request on the spot market. + type: string + spotType: + description: |- + If set to one-time, after + the instance is terminated, the spot request will be closed. + type: string + subnetId: + description: The Spot Instance Request ID. + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tenancy: + type: string + userData: + type: string + userDataBase64: + type: string + userDataReplaceOnChange: + type: boolean + validFrom: + description: The start date and time of the request, in UTC RFC3339 + format(for example, YYYY-MM-DDTHH:MM:SSZ). The default is to + start fulfilling the request immediately. + type: string + validUntil: + description: The end date and time of the request, in UTC RFC3339 + format(for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no + new Spot instance requests are placed or enabled to fulfill + the request. The default end date is 7 days from the current + date. + type: string + volumeTags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + vpcSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + vpcSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcSecurityGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + waitForFulfillment: + type: boolean + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + ami: + type: string + associatePublicIpAddress: + type: boolean + availabilityZone: + type: string + blockDurationMinutes: + description: |- + The required duration for the Spot instances, in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). + The duration period starts as soon as your Spot instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates. + Note that you can't specify an Availability Zone group or a launch group if you specify a duration. + type: number + capacityReservationSpecification: + properties: + capacityReservationPreference: + type: string + capacityReservationTarget: + properties: + capacityReservationId: + description: The Spot Instance Request ID. + type: string + capacityReservationResourceGroupArn: + type: string + type: object + type: object + cpuCoreCount: + type: number + cpuOptions: + properties: + amdSevSnp: + type: string + coreCount: + type: number + threadsPerCore: + type: number + type: object + cpuThreadsPerCore: + type: number + creditSpecification: + properties: + cpuCredits: + type: string + type: object + disableApiStop: + type: boolean + disableApiTermination: + type: boolean + ebsBlockDevice: + items: + properties: + deleteOnTermination: + type: boolean + deviceName: + type: string + encrypted: + type: boolean + iops: + type: number + kmsKeyId: + description: The Spot Instance Request ID. + type: string + snapshotId: + description: The Spot Instance Request ID. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + throughput: + type: number + volumeSize: + type: number + volumeType: + type: string + type: object + type: array + ebsOptimized: + type: boolean + enclaveOptions: + properties: + enabled: + type: boolean + type: object + ephemeralBlockDevice: + items: + properties: + deviceName: + type: string + noDevice: + type: boolean + virtualName: + type: string + type: object + type: array + getPasswordData: + type: boolean + hibernation: + type: boolean + hostId: + description: The Spot Instance Request ID. + type: string + hostResourceGroupArn: + type: string + iamInstanceProfile: + type: string + instanceInitiatedShutdownBehavior: + type: string + instanceInterruptionBehavior: + description: Indicates Spot instance behavior when it is interrupted. + Valid values are terminate, stop, or hibernate. Default value + is terminate. + type: string + instanceType: + type: string + ipv6AddressCount: + type: number + ipv6Addresses: + items: + type: string + type: array + keyName: + type: string + launchGroup: + description: |- + A launch group is a group of spot instances that launch together and terminate together. + If left empty instances are launched and terminated individually. + type: string + launchTemplate: + properties: + id: + description: The Spot Instance Request ID. + type: string + name: + type: string + version: + type: string + type: object + maintenanceOptions: + properties: + autoRecovery: + type: string + type: object + metadataOptions: + properties: + httpEndpoint: + type: string + httpProtocolIpv6: + type: string + httpPutResponseHopLimit: + type: number + httpTokens: + type: string + instanceMetadataTags: + description: Key-value map of resource tags. + type: string + type: object + monitoring: + type: boolean + networkInterface: + items: + properties: + deleteOnTermination: + type: boolean + deviceIndex: + type: number + networkCardIndex: + type: number + networkInterfaceId: + description: The Spot Instance Request ID. + type: string + type: object + type: array + placementGroup: + type: string + placementPartitionNumber: + type: number + privateDnsNameOptions: + properties: + enableResourceNameDnsARecord: + type: boolean + enableResourceNameDnsAaaaRecord: + type: boolean + hostnameType: + type: string + type: object + privateIp: + description: The private IP address assigned to the instance + type: string + rootBlockDevice: + properties: + deleteOnTermination: + type: boolean + encrypted: + type: boolean + iops: + type: number + kmsKeyId: + description: The Spot Instance Request ID. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + throughput: + type: number + volumeSize: + type: number + volumeType: + type: string + type: object + secondaryPrivateIps: + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroups: + items: + type: string + type: array + x-kubernetes-list-type: set + sourceDestCheck: + type: boolean + spotPrice: + description: The maximum price to request on the spot market. + type: string + spotType: + description: |- + If set to one-time, after + the instance is terminated, the spot request will be closed. + type: string + subnetId: + description: The Spot Instance Request ID. + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tenancy: + type: string + userData: + type: string + userDataBase64: + type: string + userDataReplaceOnChange: + type: boolean + validFrom: + description: The start date and time of the request, in UTC RFC3339 + format(for example, YYYY-MM-DDTHH:MM:SSZ). The default is to + start fulfilling the request immediately. + type: string + validUntil: + description: The end date and time of the request, in UTC RFC3339 + format(for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no + new Spot instance requests are placed or enabled to fulfill + the request. The default end date is 7 days from the current + date. + type: string + volumeTags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + vpcSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + vpcSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcSecurityGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + waitForFulfillment: + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SpotInstanceRequestStatus defines the observed state of SpotInstanceRequest. + properties: + atProvider: + properties: + ami: + type: string + arn: + type: string + associatePublicIpAddress: + type: boolean + availabilityZone: + type: string + blockDurationMinutes: + description: |- + The required duration for the Spot instances, in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). + The duration period starts as soon as your Spot instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates. + Note that you can't specify an Availability Zone group or a launch group if you specify a duration. + type: number + capacityReservationSpecification: + properties: + capacityReservationPreference: + type: string + capacityReservationTarget: + properties: + capacityReservationId: + description: The Spot Instance Request ID. + type: string + capacityReservationResourceGroupArn: + type: string + type: object + type: object + cpuCoreCount: + type: number + cpuOptions: + properties: + amdSevSnp: + type: string + coreCount: + type: number + threadsPerCore: + type: number + type: object + cpuThreadsPerCore: + type: number + creditSpecification: + properties: + cpuCredits: + type: string + type: object + disableApiStop: + type: boolean + disableApiTermination: + type: boolean + ebsBlockDevice: + items: + properties: + deleteOnTermination: + type: boolean + deviceName: + type: string + encrypted: + type: boolean + iops: + type: number + kmsKeyId: + description: The Spot Instance Request ID. + type: string + snapshotId: + description: The Spot Instance Request ID. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + throughput: + type: number + volumeId: + description: The Spot Instance Request ID. + type: string + volumeSize: + type: number + volumeType: + type: string + type: object + type: array + ebsOptimized: + type: boolean + enclaveOptions: + properties: + enabled: + type: boolean + type: object + ephemeralBlockDevice: + items: + properties: + deviceName: + type: string + noDevice: + type: boolean + virtualName: + type: string + type: object + type: array + getPasswordData: + type: boolean + hibernation: + type: boolean + hostId: + description: The Spot Instance Request ID. + type: string + hostResourceGroupArn: + type: string + iamInstanceProfile: + type: string + id: + description: The Spot Instance Request ID. + type: string + instanceInitiatedShutdownBehavior: + type: string + instanceInterruptionBehavior: + description: Indicates Spot instance behavior when it is interrupted. + Valid values are terminate, stop, or hibernate. Default value + is terminate. + type: string + instanceState: + type: string + instanceType: + type: string + ipv6AddressCount: + type: number + ipv6Addresses: + items: + type: string + type: array + keyName: + type: string + launchGroup: + description: |- + A launch group is a group of spot instances that launch together and terminate together. + If left empty instances are launched and terminated individually. + type: string + launchTemplate: + properties: + id: + description: The Spot Instance Request ID. + type: string + name: + type: string + version: + type: string + type: object + maintenanceOptions: + properties: + autoRecovery: + type: string + type: object + metadataOptions: + properties: + httpEndpoint: + type: string + httpProtocolIpv6: + type: string + httpPutResponseHopLimit: + type: number + httpTokens: + type: string + instanceMetadataTags: + description: Key-value map of resource tags. + type: string + type: object + monitoring: + type: boolean + networkInterface: + items: + properties: + deleteOnTermination: + type: boolean + deviceIndex: + type: number + networkCardIndex: + type: number + networkInterfaceId: + description: The Spot Instance Request ID. + type: string + type: object + type: array + outpostArn: + type: string + passwordData: + type: string + placementGroup: + type: string + placementPartitionNumber: + type: number + primaryNetworkInterfaceId: + description: The Spot Instance Request ID. + type: string + privateDns: + description: |- + The private DNS name assigned to the instance. Can only be + used inside the Amazon EC2, and only available if you've enabled DNS hostnames + for your VPC + type: string + privateDnsNameOptions: + properties: + enableResourceNameDnsARecord: + type: boolean + enableResourceNameDnsAaaaRecord: + type: boolean + hostnameType: + type: string + type: object + privateIp: + description: The private IP address assigned to the instance + type: string + publicDns: + description: |- + The public DNS name assigned to the instance. For EC2-VPC, this + is only available if you've enabled DNS hostnames for your VPC + type: string + publicIp: + description: The public IP address assigned to the instance, if + applicable. + type: string + rootBlockDevice: + properties: + deleteOnTermination: + type: boolean + deviceName: + type: string + encrypted: + type: boolean + iops: + type: number + kmsKeyId: + description: The Spot Instance Request ID. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + throughput: + type: number + volumeId: + description: The Spot Instance Request ID. + type: string + volumeSize: + type: number + volumeType: + type: string + type: object + secondaryPrivateIps: + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroups: + items: + type: string + type: array + x-kubernetes-list-type: set + sourceDestCheck: + type: boolean + spotBidStatus: + description: |- + The current bid + status + of the Spot Instance Request. + type: string + spotInstanceId: + description: |- + The Instance ID (if any) that is currently fulfilling + the Spot Instance request. + type: string + spotPrice: + description: The maximum price to request on the spot market. + type: string + spotRequestState: + description: |- + The current request + state + of the Spot Instance Request. + type: string + spotType: + description: |- + If set to one-time, after + the instance is terminated, the spot request will be closed. + type: string + subnetId: + description: The Spot Instance Request ID. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + tenancy: + type: string + userData: + type: string + userDataBase64: + type: string + userDataReplaceOnChange: + type: boolean + validFrom: + description: The start date and time of the request, in UTC RFC3339 + format(for example, YYYY-MM-DDTHH:MM:SSZ). The default is to + start fulfilling the request immediately. + type: string + validUntil: + description: The end date and time of the request, in UTC RFC3339 + format(for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no + new Spot instance requests are placed or enabled to fulfill + the request. The default end date is 7 days from the current + date. + type: string + volumeTags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcSecurityGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + waitForFulfillment: + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ec2.aws.upbound.io_trafficmirrorfilterrules.yaml b/package/crds/ec2.aws.upbound.io_trafficmirrorfilterrules.yaml index 4d2db05d16..6c4a5e6d7e 100644 --- a/package/crds/ec2.aws.upbound.io_trafficmirrorfilterrules.yaml +++ b/package/crds/ec2.aws.upbound.io_trafficmirrorfilterrules.yaml @@ -695,3 +695,668 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: TrafficMirrorFilterRule is the Schema for the TrafficMirrorFilterRules + API. Provides an Traffic mirror filter rule + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TrafficMirrorFilterRuleSpec defines the desired state of + TrafficMirrorFilterRule + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the traffic mirror filter rule. + type: string + destinationCidrBlock: + description: Destination CIDR block to assign to the Traffic Mirror + rule. + type: string + destinationPortRange: + description: Destination port range. Supported only when the protocol + is set to TCP(6) or UDP(17). See Traffic mirror port range documented + below + properties: + fromPort: + description: Starting port of the range + type: number + toPort: + description: Ending port of the range + type: number + type: object + protocol: + description: Protocol number, for example 17 (UDP), to assign + to the Traffic Mirror rule. For information about the protocol + value, see Protocol Numbers on the Internet Assigned Numbers + Authority (IANA) website. + type: number + region: + description: Region is the region you'd like your resource to + be created in. + type: string + ruleAction: + description: Action to take (accept | reject) on the filtered + traffic. Valid values are accept and reject + type: string + ruleNumber: + description: Number of the Traffic Mirror rule. This number must + be unique for each Traffic Mirror rule in a given direction. + The rules are processed in ascending order by rule number. + type: number + sourceCidrBlock: + description: Source CIDR block to assign to the Traffic Mirror + rule. + type: string + sourcePortRange: + description: Source port range. Supported only when the protocol + is set to TCP(6) or UDP(17). See Traffic mirror port range documented + below + properties: + fromPort: + description: Starting port of the range + type: number + toPort: + description: Ending port of the range + type: number + type: object + trafficDirection: + description: Direction of traffic to be captured. Valid values + are ingress and egress + type: string + trafficMirrorFilterId: + description: ID of the traffic mirror filter to which this rule + should be added + type: string + trafficMirrorFilterIdRef: + description: Reference to a TrafficMirrorFilter in ec2 to populate + trafficMirrorFilterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + trafficMirrorFilterIdSelector: + description: Selector for a TrafficMirrorFilter in ec2 to populate + trafficMirrorFilterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the traffic mirror filter rule. + type: string + destinationCidrBlock: + description: Destination CIDR block to assign to the Traffic Mirror + rule. + type: string + destinationPortRange: + description: Destination port range. Supported only when the protocol + is set to TCP(6) or UDP(17). See Traffic mirror port range documented + below + properties: + fromPort: + description: Starting port of the range + type: number + toPort: + description: Ending port of the range + type: number + type: object + protocol: + description: Protocol number, for example 17 (UDP), to assign + to the Traffic Mirror rule. For information about the protocol + value, see Protocol Numbers on the Internet Assigned Numbers + Authority (IANA) website. + type: number + ruleAction: + description: Action to take (accept | reject) on the filtered + traffic. Valid values are accept and reject + type: string + ruleNumber: + description: Number of the Traffic Mirror rule. This number must + be unique for each Traffic Mirror rule in a given direction. + The rules are processed in ascending order by rule number. + type: number + sourceCidrBlock: + description: Source CIDR block to assign to the Traffic Mirror + rule. + type: string + sourcePortRange: + description: Source port range. Supported only when the protocol + is set to TCP(6) or UDP(17). See Traffic mirror port range documented + below + properties: + fromPort: + description: Starting port of the range + type: number + toPort: + description: Ending port of the range + type: number + type: object + trafficDirection: + description: Direction of traffic to be captured. Valid values + are ingress and egress + type: string + trafficMirrorFilterId: + description: ID of the traffic mirror filter to which this rule + should be added + type: string + trafficMirrorFilterIdRef: + description: Reference to a TrafficMirrorFilter in ec2 to populate + trafficMirrorFilterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + trafficMirrorFilterIdSelector: + description: Selector for a TrafficMirrorFilter in ec2 to populate + trafficMirrorFilterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.destinationCidrBlock is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.destinationCidrBlock) + || (has(self.initProvider) && has(self.initProvider.destinationCidrBlock))' + - message: spec.forProvider.ruleAction is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.ruleAction) + || (has(self.initProvider) && has(self.initProvider.ruleAction))' + - message: spec.forProvider.ruleNumber is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.ruleNumber) + || (has(self.initProvider) && has(self.initProvider.ruleNumber))' + - message: spec.forProvider.sourceCidrBlock is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sourceCidrBlock) + || (has(self.initProvider) && has(self.initProvider.sourceCidrBlock))' + - message: spec.forProvider.trafficDirection is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.trafficDirection) + || (has(self.initProvider) && has(self.initProvider.trafficDirection))' + status: + description: TrafficMirrorFilterRuleStatus defines the observed state + of TrafficMirrorFilterRule. + properties: + atProvider: + properties: + arn: + description: ARN of the traffic mirror filter rule. + type: string + description: + description: Description of the traffic mirror filter rule. + type: string + destinationCidrBlock: + description: Destination CIDR block to assign to the Traffic Mirror + rule. + type: string + destinationPortRange: + description: Destination port range. Supported only when the protocol + is set to TCP(6) or UDP(17). See Traffic mirror port range documented + below + properties: + fromPort: + description: Starting port of the range + type: number + toPort: + description: Ending port of the range + type: number + type: object + id: + description: Name of the traffic mirror filter rule. + type: string + protocol: + description: Protocol number, for example 17 (UDP), to assign + to the Traffic Mirror rule. For information about the protocol + value, see Protocol Numbers on the Internet Assigned Numbers + Authority (IANA) website. + type: number + ruleAction: + description: Action to take (accept | reject) on the filtered + traffic. Valid values are accept and reject + type: string + ruleNumber: + description: Number of the Traffic Mirror rule. This number must + be unique for each Traffic Mirror rule in a given direction. + The rules are processed in ascending order by rule number. + type: number + sourceCidrBlock: + description: Source CIDR block to assign to the Traffic Mirror + rule. + type: string + sourcePortRange: + description: Source port range. Supported only when the protocol + is set to TCP(6) or UDP(17). See Traffic mirror port range documented + below + properties: + fromPort: + description: Starting port of the range + type: number + toPort: + description: Ending port of the range + type: number + type: object + trafficDirection: + description: Direction of traffic to be captured. Valid values + are ingress and egress + type: string + trafficMirrorFilterId: + description: ID of the traffic mirror filter to which this rule + should be added + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ec2.aws.upbound.io_vpcendpoints.yaml b/package/crds/ec2.aws.upbound.io_vpcendpoints.yaml index 3f379eae61..0bad619140 100644 --- a/package/crds/ec2.aws.upbound.io_vpcendpoints.yaml +++ b/package/crds/ec2.aws.upbound.io_vpcendpoints.yaml @@ -880,3 +880,859 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VPCEndpoint is the Schema for the VPCEndpoints API. Provides + a VPC Endpoint resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VPCEndpointSpec defines the desired state of VPCEndpoint + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoAccept: + description: Accept the VPC endpoint (the VPC endpoint and service + need to be in the same AWS account). + type: boolean + dnsOptions: + description: The DNS options for the endpoint. See dns_options + below. + properties: + dnsRecordIpType: + description: The DNS records created for the endpoint. Valid + values are ipv4, dualstack, service-defined, and ipv6. + type: string + privateDnsOnlyForInboundResolverEndpoint: + description: Indicates whether to enable private DNS only + for inbound endpoints. This option is available only for + services that support both gateway and interface endpoints. + It routes traffic that originates from the VPC to the gateway + endpoint and traffic that originates from on-premises to + the interface endpoint. Default is false. Can only be specified + if private_dns_enabled is true. + type: boolean + type: object + ipAddressType: + description: The IP address type for the endpoint. Valid values + are ipv4, dualstack, and ipv6. + type: string + policy: + description: A policy to attach to the endpoint that controls + access to the service. This is a JSON formatted string. Defaults + to full access. All Gateway and some Interface endpoints support + policies - see the relevant AWS documentation for more details. + type: string + privateDnsEnabled: + description: |- + Whether or not to associate a private hosted zone with the specified VPC. Applicable for endpoints of type Interface. Most users will want this enabled to allow services within the VPC to automatically use the endpoint. + Defaults to false. + type: boolean + region: + description: Region is the region you'd like your resource to + be created in. + type: string + serviceName: + description: The service name. For AWS services the service name + is usually in the form com.amazonaws.. (the + SageMaker Notebook service is an exception to this rule, the + service name is in the form aws.sagemaker..notebook). + type: string + serviceNameRef: + description: Reference to a VPCEndpointService in ec2 to populate + serviceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceNameSelector: + description: Selector for a VPCEndpointService in ec2 to populate + serviceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcEndpointType: + description: The VPC endpoint type, Gateway, GatewayLoadBalancer, + or Interface. Defaults to Gateway. + type: string + vpcId: + description: The ID of the VPC in which the endpoint will be used. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoAccept: + description: Accept the VPC endpoint (the VPC endpoint and service + need to be in the same AWS account). + type: boolean + dnsOptions: + description: The DNS options for the endpoint. See dns_options + below. + properties: + dnsRecordIpType: + description: The DNS records created for the endpoint. Valid + values are ipv4, dualstack, service-defined, and ipv6. + type: string + privateDnsOnlyForInboundResolverEndpoint: + description: Indicates whether to enable private DNS only + for inbound endpoints. This option is available only for + services that support both gateway and interface endpoints. + It routes traffic that originates from the VPC to the gateway + endpoint and traffic that originates from on-premises to + the interface endpoint. Default is false. Can only be specified + if private_dns_enabled is true. + type: boolean + type: object + ipAddressType: + description: The IP address type for the endpoint. Valid values + are ipv4, dualstack, and ipv6. + type: string + policy: + description: A policy to attach to the endpoint that controls + access to the service. This is a JSON formatted string. Defaults + to full access. All Gateway and some Interface endpoints support + policies - see the relevant AWS documentation for more details. + type: string + privateDnsEnabled: + description: |- + Whether or not to associate a private hosted zone with the specified VPC. Applicable for endpoints of type Interface. Most users will want this enabled to allow services within the VPC to automatically use the endpoint. + Defaults to false. + type: boolean + serviceName: + description: The service name. For AWS services the service name + is usually in the form com.amazonaws.. (the + SageMaker Notebook service is an exception to this rule, the + service name is in the form aws.sagemaker..notebook). + type: string + serviceNameRef: + description: Reference to a VPCEndpointService in ec2 to populate + serviceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceNameSelector: + description: Selector for a VPCEndpointService in ec2 to populate + serviceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcEndpointType: + description: The VPC endpoint type, Gateway, GatewayLoadBalancer, + or Interface. Defaults to Gateway. + type: string + vpcId: + description: The ID of the VPC in which the endpoint will be used. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: VPCEndpointStatus defines the observed state of VPCEndpoint. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) of the VPC endpoint. + type: string + autoAccept: + description: Accept the VPC endpoint (the VPC endpoint and service + need to be in the same AWS account). + type: boolean + cidrBlocks: + description: The list of CIDR blocks for the exposed AWS service. + Applicable for endpoints of type Gateway. + items: + type: string + type: array + dnsEntry: + description: The DNS entries for the VPC Endpoint. Applicable + for endpoints of type Interface. DNS blocks are documented below. + items: + properties: + dnsName: + description: The DNS name. + type: string + hostedZoneId: + description: The ID of the private hosted zone. + type: string + type: object + type: array + dnsOptions: + description: The DNS options for the endpoint. See dns_options + below. + properties: + dnsRecordIpType: + description: The DNS records created for the endpoint. Valid + values are ipv4, dualstack, service-defined, and ipv6. + type: string + privateDnsOnlyForInboundResolverEndpoint: + description: Indicates whether to enable private DNS only + for inbound endpoints. This option is available only for + services that support both gateway and interface endpoints. + It routes traffic that originates from the VPC to the gateway + endpoint and traffic that originates from on-premises to + the interface endpoint. Default is false. Can only be specified + if private_dns_enabled is true. + type: boolean + type: object + id: + description: The ID of the VPC endpoint. + type: string + ipAddressType: + description: The IP address type for the endpoint. Valid values + are ipv4, dualstack, and ipv6. + type: string + networkInterfaceIds: + description: One or more network interfaces for the VPC Endpoint. + Applicable for endpoints of type Interface. + items: + type: string + type: array + x-kubernetes-list-type: set + ownerId: + description: The ID of the AWS account that owns the VPC endpoint. + type: string + policy: + description: A policy to attach to the endpoint that controls + access to the service. This is a JSON formatted string. Defaults + to full access. All Gateway and some Interface endpoints support + policies - see the relevant AWS documentation for more details. + type: string + prefixListId: + description: The prefix list ID of the exposed AWS service. Applicable + for endpoints of type Gateway. + type: string + privateDnsEnabled: + description: |- + Whether or not to associate a private hosted zone with the specified VPC. Applicable for endpoints of type Interface. Most users will want this enabled to allow services within the VPC to automatically use the endpoint. + Defaults to false. + type: boolean + requesterManaged: + description: Whether or not the VPC Endpoint is being managed + by its service - true or false. + type: boolean + routeTableIds: + description: One or more route table IDs. Applicable for endpoints + of type Gateway. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIds: + description: |- + The ID of one or more security groups to associate with the network interface. Applicable for endpoints of type Interface. + If no security groups are specified, the VPC's default security group is associated with the endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + serviceName: + description: The service name. For AWS services the service name + is usually in the form com.amazonaws.. (the + SageMaker Notebook service is an exception to this rule, the + service name is in the form aws.sagemaker..notebook). + type: string + state: + description: The state of the VPC endpoint. + type: string + subnetIds: + description: The ID of one or more subnets in which to create + a network interface for the endpoint. Applicable for endpoints + of type GatewayLoadBalancer and Interface. Interface type endpoints + cannot function without being assigned to a subnet. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + vpcEndpointType: + description: The VPC endpoint type, Gateway, GatewayLoadBalancer, + or Interface. Defaults to Gateway. + type: string + vpcId: + description: The ID of the VPC in which the endpoint will be used. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ec2.aws.upbound.io_vpcipampoolcidrs.yaml b/package/crds/ec2.aws.upbound.io_vpcipampoolcidrs.yaml index 3703c63238..d35532fc67 100644 --- a/package/crds/ec2.aws.upbound.io_vpcipampoolcidrs.yaml +++ b/package/crds/ec2.aws.upbound.io_vpcipampoolcidrs.yaml @@ -573,3 +573,552 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VPCIpamPoolCidr is the Schema for the VPCIpamPoolCidrs API. Provisions + a CIDR from an IPAM address pool. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VPCIpamPoolCidrSpec defines the desired state of VPCIpamPoolCidr + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cidr: + description: The CIDR you want to assign to the pool. Conflicts + with netmask_length. + type: string + cidrAuthorizationContext: + description: A signed document that proves that you are authorized + to bring the specified IP address range to Amazon using BYOIP. + This is not stored in the state file. See cidr_authorization_context + for more information. + properties: + message: + description: The plain-text authorization message for the + prefix and account. + type: string + signature: + description: The signed authorization message for the prefix + and account. + type: string + type: object + ipamPoolId: + description: The ID of the pool to which you want to assign a + CIDR. + type: string + ipamPoolIdRef: + description: Reference to a VPCIpamPool in ec2 to populate ipamPoolId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + ipamPoolIdSelector: + description: Selector for a VPCIpamPool in ec2 to populate ipamPoolId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + netmaskLength: + description: If provided, the cidr provisioned into the specified + pool will be the next available cidr given this declared netmask + length. Conflicts with cidr. + type: number + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + cidr: + description: The CIDR you want to assign to the pool. Conflicts + with netmask_length. + type: string + cidrAuthorizationContext: + description: A signed document that proves that you are authorized + to bring the specified IP address range to Amazon using BYOIP. + This is not stored in the state file. See cidr_authorization_context + for more information. + properties: + message: + description: The plain-text authorization message for the + prefix and account. + type: string + signature: + description: The signed authorization message for the prefix + and account. + type: string + type: object + ipamPoolId: + description: The ID of the pool to which you want to assign a + CIDR. + type: string + ipamPoolIdRef: + description: Reference to a VPCIpamPool in ec2 to populate ipamPoolId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + ipamPoolIdSelector: + description: Selector for a VPCIpamPool in ec2 to populate ipamPoolId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + netmaskLength: + description: If provided, the cidr provisioned into the specified + pool will be the next available cidr given this declared netmask + length. Conflicts with cidr. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: VPCIpamPoolCidrStatus defines the observed state of VPCIpamPoolCidr. + properties: + atProvider: + properties: + cidr: + description: The CIDR you want to assign to the pool. Conflicts + with netmask_length. + type: string + cidrAuthorizationContext: + description: A signed document that proves that you are authorized + to bring the specified IP address range to Amazon using BYOIP. + This is not stored in the state file. See cidr_authorization_context + for more information. + properties: + message: + description: The plain-text authorization message for the + prefix and account. + type: string + signature: + description: The signed authorization message for the prefix + and account. + type: string + type: object + id: + description: The ID of the IPAM Pool Cidr concatenated with the + IPAM Pool ID. + type: string + ipamPoolCidrId: + description: The unique ID generated by AWS for the pool cidr. + type: string + ipamPoolId: + description: The ID of the pool to which you want to assign a + CIDR. + type: string + netmaskLength: + description: If provided, the cidr provisioned into the specified + pool will be the next available cidr given this declared netmask + length. Conflicts with cidr. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ec2.aws.upbound.io_vpcpeeringconnectionaccepters.yaml b/package/crds/ec2.aws.upbound.io_vpcpeeringconnectionaccepters.yaml index 81895f2487..b31c0572d2 100644 --- a/package/crds/ec2.aws.upbound.io_vpcpeeringconnectionaccepters.yaml +++ b/package/crds/ec2.aws.upbound.io_vpcpeeringconnectionaccepters.yaml @@ -626,3 +626,599 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VPCPeeringConnectionAccepter is the Schema for the VPCPeeringConnectionAccepters + API. Manage the accepter's side of a VPC Peering Connection. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VPCPeeringConnectionAccepterSpec defines the desired state + of VPCPeeringConnectionAccepter + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accepter: + description: |- + A configuration block that describes [VPC Peering Connection] + (https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the accepter VPC. + properties: + allowRemoteVpcDnsResolution: + description: |- + Indicates whether a local VPC can resolve public DNS hostnames to + private IP addresses when queried from instances in a peer VPC. + type: boolean + type: object + autoAccept: + description: Whether or not to accept the peering request. Defaults + to false. + type: boolean + region: + description: Region is the region you'd like your resource to + be created in. + type: string + requester: + description: |- + A configuration block that describes [VPC Peering Connection] + (https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the requester VPC. + properties: + allowRemoteVpcDnsResolution: + description: |- + Indicates whether a local VPC can resolve public DNS hostnames to + private IP addresses when queried from instances in a peer VPC. + type: boolean + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcPeeringConnectionId: + description: The VPC Peering Connection ID to manage. + type: string + vpcPeeringConnectionIdRef: + description: Reference to a VPCPeeringConnection in ec2 to populate + vpcPeeringConnectionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcPeeringConnectionIdSelector: + description: Selector for a VPCPeeringConnection in ec2 to populate + vpcPeeringConnectionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accepter: + description: |- + A configuration block that describes [VPC Peering Connection] + (https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the accepter VPC. + properties: + allowRemoteVpcDnsResolution: + description: |- + Indicates whether a local VPC can resolve public DNS hostnames to + private IP addresses when queried from instances in a peer VPC. + type: boolean + type: object + autoAccept: + description: Whether or not to accept the peering request. Defaults + to false. + type: boolean + requester: + description: |- + A configuration block that describes [VPC Peering Connection] + (https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the requester VPC. + properties: + allowRemoteVpcDnsResolution: + description: |- + Indicates whether a local VPC can resolve public DNS hostnames to + private IP addresses when queried from instances in a peer VPC. + type: boolean + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcPeeringConnectionId: + description: The VPC Peering Connection ID to manage. + type: string + vpcPeeringConnectionIdRef: + description: Reference to a VPCPeeringConnection in ec2 to populate + vpcPeeringConnectionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcPeeringConnectionIdSelector: + description: Selector for a VPCPeeringConnection in ec2 to populate + vpcPeeringConnectionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: VPCPeeringConnectionAccepterStatus defines the observed state + of VPCPeeringConnectionAccepter. + properties: + atProvider: + properties: + acceptStatus: + description: The status of the VPC Peering Connection request. + type: string + accepter: + description: |- + A configuration block that describes [VPC Peering Connection] + (https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the accepter VPC. + properties: + allowRemoteVpcDnsResolution: + description: |- + Indicates whether a local VPC can resolve public DNS hostnames to + private IP addresses when queried from instances in a peer VPC. + type: boolean + type: object + autoAccept: + description: Whether or not to accept the peering request. Defaults + to false. + type: boolean + id: + description: The ID of the VPC Peering Connection. + type: string + peerOwnerId: + description: The AWS account ID of the owner of the requester + VPC. + type: string + peerRegion: + description: The region of the accepter VPC. + type: string + peerVpcId: + description: The ID of the requester VPC. + type: string + requester: + description: |- + A configuration block that describes [VPC Peering Connection] + (https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the requester VPC. + properties: + allowRemoteVpcDnsResolution: + description: |- + Indicates whether a local VPC can resolve public DNS hostnames to + private IP addresses when queried from instances in a peer VPC. + type: boolean + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + vpcId: + description: The ID of the accepter VPC. + type: string + vpcPeeringConnectionId: + description: The VPC Peering Connection ID to manage. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ec2.aws.upbound.io_vpcpeeringconnectionoptions.yaml b/package/crds/ec2.aws.upbound.io_vpcpeeringconnectionoptions.yaml index ef4f63406c..2379af8c23 100644 --- a/package/crds/ec2.aws.upbound.io_vpcpeeringconnectionoptions.yaml +++ b/package/crds/ec2.aws.upbound.io_vpcpeeringconnectionoptions.yaml @@ -572,3 +572,545 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VPCPeeringConnectionOptions is the Schema for the VPCPeeringConnectionOptionss + API. Provides a resource to manage VPC peering connection options. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VPCPeeringConnectionOptionsSpec defines the desired state + of VPCPeeringConnectionOptions + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accepter: + description: An optional configuration block that allows for VPC + Peering Connection options to be set for the VPC that acceptsthe + peering connection (a maximum of one). + properties: + allowRemoteVpcDnsResolution: + description: Allow a local VPC to resolve public DNS hostnames + to private IP addresses when queried from instances in the + peer VPC. + type: boolean + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + requester: + description: A optional configuration block that allows for VPC + Peering Connection options to be set for the VPC that requeststhe + peering connection (a maximum of one). + properties: + allowRemoteVpcDnsResolution: + description: Allow a local VPC to resolve public DNS hostnames + to private IP addresses when queried from instances in the + peer VPC. + type: boolean + type: object + vpcPeeringConnectionId: + description: The ID of the requester VPC peering connection. + type: string + vpcPeeringConnectionIdRef: + description: Reference to a VPCPeeringConnection in ec2 to populate + vpcPeeringConnectionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcPeeringConnectionIdSelector: + description: Selector for a VPCPeeringConnection in ec2 to populate + vpcPeeringConnectionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accepter: + description: An optional configuration block that allows for VPC + Peering Connection options to be set for the VPC that acceptsthe + peering connection (a maximum of one). + properties: + allowRemoteVpcDnsResolution: + description: Allow a local VPC to resolve public DNS hostnames + to private IP addresses when queried from instances in the + peer VPC. + type: boolean + type: object + requester: + description: A optional configuration block that allows for VPC + Peering Connection options to be set for the VPC that requeststhe + peering connection (a maximum of one). + properties: + allowRemoteVpcDnsResolution: + description: Allow a local VPC to resolve public DNS hostnames + to private IP addresses when queried from instances in the + peer VPC. + type: boolean + type: object + vpcPeeringConnectionId: + description: The ID of the requester VPC peering connection. + type: string + vpcPeeringConnectionIdRef: + description: Reference to a VPCPeeringConnection in ec2 to populate + vpcPeeringConnectionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcPeeringConnectionIdSelector: + description: Selector for a VPCPeeringConnection in ec2 to populate + vpcPeeringConnectionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: VPCPeeringConnectionOptionsStatus defines the observed state + of VPCPeeringConnectionOptions. + properties: + atProvider: + properties: + accepter: + description: An optional configuration block that allows for VPC + Peering Connection options to be set for the VPC that acceptsthe + peering connection (a maximum of one). + properties: + allowRemoteVpcDnsResolution: + description: Allow a local VPC to resolve public DNS hostnames + to private IP addresses when queried from instances in the + peer VPC. + type: boolean + type: object + id: + description: The ID of the VPC Peering Connection Options. + type: string + requester: + description: A optional configuration block that allows for VPC + Peering Connection options to be set for the VPC that requeststhe + peering connection (a maximum of one). + properties: + allowRemoteVpcDnsResolution: + description: Allow a local VPC to resolve public DNS hostnames + to private IP addresses when queried from instances in the + peer VPC. + type: boolean + type: object + vpcPeeringConnectionId: + description: The ID of the requester VPC peering connection. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ec2.aws.upbound.io_vpcpeeringconnections.yaml b/package/crds/ec2.aws.upbound.io_vpcpeeringconnections.yaml index 06d4a8ccab..ba502b8d4e 100644 --- a/package/crds/ec2.aws.upbound.io_vpcpeeringconnections.yaml +++ b/package/crds/ec2.aws.upbound.io_vpcpeeringconnections.yaml @@ -746,3 +746,727 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VPCPeeringConnection is the Schema for the VPCPeeringConnections + API. Provides a resource to manage a VPC peering connection. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VPCPeeringConnectionSpec defines the desired state of VPCPeeringConnection + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoAccept: + description: Accept the peering (both VPCs need to be in the same + AWS account and region). + type: boolean + peerOwnerId: + description: |- + The AWS account ID of the target peer VPC. + Defaults to the account ID the AWS provider is currently connected to, so must be managed if connecting cross-account. + type: string + peerRegion: + description: |- + The region of the accepter VPC of the VPC Peering Connection. auto_accept must be false, + and use the aws_vpc_peering_connection_accepter to manage the accepter side. + type: string + peerVpcId: + description: The ID of the target VPC with which you are creating + the VPC Peering Connection. + type: string + peerVpcIdRef: + description: Reference to a VPC in ec2 to populate peerVpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + peerVpcIdSelector: + description: Selector for a VPC in ec2 to populate peerVpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcId: + description: The ID of the requester VPC. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoAccept: + description: Accept the peering (both VPCs need to be in the same + AWS account and region). + type: boolean + peerOwnerId: + description: |- + The AWS account ID of the target peer VPC. + Defaults to the account ID the AWS provider is currently connected to, so must be managed if connecting cross-account. + type: string + peerRegion: + description: |- + The region of the accepter VPC of the VPC Peering Connection. auto_accept must be false, + and use the aws_vpc_peering_connection_accepter to manage the accepter side. + type: string + peerVpcId: + description: The ID of the target VPC with which you are creating + the VPC Peering Connection. + type: string + peerVpcIdRef: + description: Reference to a VPC in ec2 to populate peerVpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + peerVpcIdSelector: + description: Selector for a VPC in ec2 to populate peerVpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcId: + description: The ID of the requester VPC. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: VPCPeeringConnectionStatus defines the observed state of + VPCPeeringConnection. + properties: + atProvider: + properties: + acceptStatus: + description: The status of the VPC Peering Connection request. + type: string + accepter: + description: |- + An optional configuration block that allows for VPC Peering Connection options to be set for the VPC that accepts + the peering connection (a maximum of one). + properties: + allowRemoteVpcDnsResolution: + description: |- + Allow a local VPC to resolve public DNS hostnames to + private IP addresses when queried from instances in the peer VPC. + type: boolean + type: object + autoAccept: + description: Accept the peering (both VPCs need to be in the same + AWS account and region). + type: boolean + id: + description: The ID of the VPC Peering Connection. + type: string + peerOwnerId: + description: |- + The AWS account ID of the target peer VPC. + Defaults to the account ID the AWS provider is currently connected to, so must be managed if connecting cross-account. + type: string + peerRegion: + description: |- + The region of the accepter VPC of the VPC Peering Connection. auto_accept must be false, + and use the aws_vpc_peering_connection_accepter to manage the accepter side. + type: string + peerVpcId: + description: The ID of the target VPC with which you are creating + the VPC Peering Connection. + type: string + requester: + description: |- + A optional configuration block that allows for VPC Peering Connection options to be set for the VPC that requests + the peering connection (a maximum of one). + properties: + allowRemoteVpcDnsResolution: + description: |- + Allow a local VPC to resolve public DNS hostnames to + private IP addresses when queried from instances in the peer VPC. + type: boolean + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + vpcId: + description: The ID of the requester VPC. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ec2.aws.upbound.io_vpnconnections.yaml b/package/crds/ec2.aws.upbound.io_vpnconnections.yaml index 0908bc66d2..bff463724a 100644 --- a/package/crds/ec2.aws.upbound.io_vpnconnections.yaml +++ b/package/crds/ec2.aws.upbound.io_vpnconnections.yaml @@ -2139,3 +2139,2094 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VPNConnection is the Schema for the VPNConnections API. Manages + a Site-to-Site VPN connection. A Site-to-Site VPN connection is an Internet + Protocol security (IPsec) VPN connection between a VPC and an on-premises + network. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VPNConnectionSpec defines the desired state of VPNConnection + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + customerGatewayId: + description: The ID of the customer gateway. + type: string + customerGatewayIdRef: + description: Reference to a CustomerGateway in ec2 to populate + customerGatewayId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + customerGatewayIdSelector: + description: Selector for a CustomerGateway in ec2 to populate + customerGatewayId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enableAcceleration: + description: Indicate whether to enable acceleration for the VPN + connection. Supports only EC2 Transit Gateway. + type: boolean + localIpv4NetworkCidr: + description: The IPv4 CIDR on the customer gateway (on-premises) + side of the VPN connection. + type: string + localIpv6NetworkCidr: + description: The IPv6 CIDR on the customer gateway (on-premises) + side of the VPN connection. + type: string + outsideIpAddressType: + description: Indicates if a Public S2S VPN or Private S2S VPN + over AWS Direct Connect. Valid values are PublicIpv4 | PrivateIpv4 + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + remoteIpv4NetworkCidr: + description: The IPv4 CIDR on the AWS side of the VPN connection. + type: string + remoteIpv6NetworkCidr: + description: The IPv6 CIDR on the AWS side of the VPN connection. + type: string + staticRoutesOnly: + description: Whether the VPN connection uses static routes exclusively. + Static routes must be used for devices that don't support BGP. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + transitGatewayId: + description: The ID of the EC2 Transit Gateway. + type: string + transitGatewayIdRef: + description: Reference to a TransitGateway in ec2 to populate + transitGatewayId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + transitGatewayIdSelector: + description: Selector for a TransitGateway in ec2 to populate + transitGatewayId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + transportTransitGatewayAttachmentId: + description: . The attachment ID of the Transit Gateway attachment + to Direct Connect Gateway. The ID is obtained through a data + source only. + type: string + tunnel1DpdTimeoutAction: + description: The action to take after DPD timeout occurs for the + first VPN tunnel. Specify restart to restart the IKE initiation. + Specify clear to end the IKE session. Valid values are clear + | none | restart. + type: string + tunnel1DpdTimeoutSeconds: + description: The number of seconds after which a DPD timeout occurs + for the first VPN tunnel. Valid value is equal or higher than + 30. + type: number + tunnel1EnableTunnelLifecycleControl: + description: Turn on or off tunnel endpoint lifecycle control + feature for the first VPN tunnel. Valid values are true | false. + type: boolean + tunnel1IkeVersions: + description: The IKE versions that are permitted for the first + VPN tunnel. Valid values are ikev1 | ikev2. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel1InsideCidr: + description: The CIDR block of the inside IP addresses for the + first VPN tunnel. Valid value is a size /30 CIDR block from + the 169.254.0.0/16 range. + type: string + tunnel1InsideIpv6Cidr: + description: The range of inside IPv6 addresses for the first + VPN tunnel. Supports only EC2 Transit Gateway. Valid value is + a size /126 CIDR block from the local fd00::/8 range. + type: string + tunnel1LogOptions: + description: Options for logging VPN tunnel activity. See Log + Options below for more details. + properties: + cloudwatchLogOptions: + description: Options for sending VPN tunnel logs to CloudWatch. + See CloudWatch Log Options below for more details. + properties: + logEnabled: + description: Enable or disable VPN tunnel logging feature. + The default is false. + type: boolean + logGroupArn: + description: The Amazon Resource Name (ARN) of the CloudWatch + log group to send logs to. + type: string + logOutputFormat: + description: 'Set log format. Default format is json. + Possible values are: json and text. The default is json.' + type: string + type: object + type: object + tunnel1Phase1DhGroupNumbers: + description: List of one or more Diffie-Hellman group numbers + that are permitted for the first VPN tunnel for phase 1 IKE + negotiations. Valid values are 2 | 14 | 15 | 16 | 17 | 18 | + 19 | 20 | 21 | 22 | 23 | 24. + items: + type: number + type: array + x-kubernetes-list-type: set + tunnel1Phase1EncryptionAlgorithms: + description: List of one or more encryption algorithms that are + permitted for the first VPN tunnel for phase 1 IKE negotiations. + Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel1Phase1IntegrityAlgorithms: + description: One or more integrity algorithms that are permitted + for the first VPN tunnel for phase 1 IKE negotiations. Valid + values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel1Phase1LifetimeSeconds: + description: The lifetime for phase 1 of the IKE negotiation for + the first VPN tunnel, in seconds. Valid value is between 900 + and 28800. + type: number + tunnel1Phase2DhGroupNumbers: + description: List of one or more Diffie-Hellman group numbers + that are permitted for the first VPN tunnel for phase 2 IKE + negotiations. Valid values are 2 | 5 | 14 | 15 | 16 | 17 | 18 + | 19 | 20 | 21 | 22 | 23 | 24. + items: + type: number + type: array + x-kubernetes-list-type: set + tunnel1Phase2EncryptionAlgorithms: + description: List of one or more encryption algorithms that are + permitted for the first VPN tunnel for phase 2 IKE negotiations. + Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel1Phase2IntegrityAlgorithms: + description: List of one or more integrity algorithms that are + permitted for the first VPN tunnel for phase 2 IKE negotiations. + Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel1Phase2LifetimeSeconds: + description: The lifetime for phase 2 of the IKE negotiation for + the first VPN tunnel, in seconds. Valid value is between 900 + and 3600. + type: number + tunnel1PresharedKeySecretRef: + description: The preshared key of the first VPN tunnel. The preshared + key must be between 8 and 64 characters in length and cannot + start with zero(0). Allowed characters are alphanumeric characters, + periods(.) and underscores(_). + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + tunnel1RekeyFuzzPercentage: + description: The percentage of the rekey window for the first + VPN tunnel (determined by tunnel1_rekey_margin_time_seconds) + during which the rekey time is randomly selected. Valid value + is between 0 and 100. + type: number + tunnel1RekeyMarginTimeSeconds: + description: The margin time, in seconds, before the phase 2 lifetime + expires, during which the AWS side of the first VPN connection + performs an IKE rekey. The exact time of the rekey is randomly + selected based on the value for tunnel1_rekey_fuzz_percentage. + Valid value is between 60 and half of tunnel1_phase2_lifetime_seconds. + type: number + tunnel1ReplayWindowSize: + description: The number of packets in an IKE replay window for + the first VPN tunnel. Valid value is between 64 and 2048. + type: number + tunnel1StartupAction: + description: The action to take when the establishing the tunnel + for the first VPN connection. By default, your customer gateway + device must initiate the IKE negotiation and bring up the tunnel. + Specify start for AWS to initiate the IKE negotiation. Valid + values are add | start. + type: string + tunnel2DpdTimeoutAction: + description: The action to take after DPD timeout occurs for the + second VPN tunnel. Specify restart to restart the IKE initiation. + Specify clear to end the IKE session. Valid values are clear + | none | restart. + type: string + tunnel2DpdTimeoutSeconds: + description: The number of seconds after which a DPD timeout occurs + for the second VPN tunnel. Valid value is equal or higher than + 30. + type: number + tunnel2EnableTunnelLifecycleControl: + description: Turn on or off tunnel endpoint lifecycle control + feature for the second VPN tunnel. Valid values are true | false. + type: boolean + tunnel2IkeVersions: + description: The IKE versions that are permitted for the second + VPN tunnel. Valid values are ikev1 | ikev2. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel2InsideCidr: + description: The CIDR block of the inside IP addresses for the + second VPN tunnel. Valid value is a size /30 CIDR block from + the 169.254.0.0/16 range. + type: string + tunnel2InsideIpv6Cidr: + description: The range of inside IPv6 addresses for the second + VPN tunnel. Supports only EC2 Transit Gateway. Valid value is + a size /126 CIDR block from the local fd00::/8 range. + type: string + tunnel2LogOptions: + description: Options for logging VPN tunnel activity. See Log + Options below for more details. + properties: + cloudwatchLogOptions: + description: Options for sending VPN tunnel logs to CloudWatch. + See CloudWatch Log Options below for more details. + properties: + logEnabled: + description: Enable or disable VPN tunnel logging feature. + The default is false. + type: boolean + logGroupArn: + description: The Amazon Resource Name (ARN) of the CloudWatch + log group to send logs to. + type: string + logOutputFormat: + description: 'Set log format. Default format is json. + Possible values are: json and text. The default is json.' + type: string + type: object + type: object + tunnel2Phase1DhGroupNumbers: + description: List of one or more Diffie-Hellman group numbers + that are permitted for the second VPN tunnel for phase 1 IKE + negotiations. Valid values are 2 | 14 | 15 | 16 | 17 | 18 | + 19 | 20 | 21 | 22 | 23 | 24. + items: + type: number + type: array + x-kubernetes-list-type: set + tunnel2Phase1EncryptionAlgorithms: + description: List of one or more encryption algorithms that are + permitted for the second VPN tunnel for phase 1 IKE negotiations. + Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel2Phase1IntegrityAlgorithms: + description: One or more integrity algorithms that are permitted + for the second VPN tunnel for phase 1 IKE negotiations. Valid + values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel2Phase1LifetimeSeconds: + description: The lifetime for phase 1 of the IKE negotiation for + the second VPN tunnel, in seconds. Valid value is between 900 + and 28800. + type: number + tunnel2Phase2DhGroupNumbers: + description: List of one or more Diffie-Hellman group numbers + that are permitted for the second VPN tunnel for phase 2 IKE + negotiations. Valid values are 2 | 5 | 14 | 15 | 16 | 17 | 18 + | 19 | 20 | 21 | 22 | 23 | 24. + items: + type: number + type: array + x-kubernetes-list-type: set + tunnel2Phase2EncryptionAlgorithms: + description: List of one or more encryption algorithms that are + permitted for the second VPN tunnel for phase 2 IKE negotiations. + Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel2Phase2IntegrityAlgorithms: + description: List of one or more integrity algorithms that are + permitted for the second VPN tunnel for phase 2 IKE negotiations. + Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel2Phase2LifetimeSeconds: + description: The lifetime for phase 2 of the IKE negotiation for + the second VPN tunnel, in seconds. Valid value is between 900 + and 3600. + type: number + tunnel2PresharedKeySecretRef: + description: The preshared key of the second VPN tunnel. The preshared + key must be between 8 and 64 characters in length and cannot + start with zero(0). Allowed characters are alphanumeric characters, + periods(.) and underscores(_). + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + tunnel2RekeyFuzzPercentage: + description: The percentage of the rekey window for the second + VPN tunnel (determined by tunnel2_rekey_margin_time_seconds) + during which the rekey time is randomly selected. Valid value + is between 0 and 100. + type: number + tunnel2RekeyMarginTimeSeconds: + description: The margin time, in seconds, before the phase 2 lifetime + expires, during which the AWS side of the second VPN connection + performs an IKE rekey. The exact time of the rekey is randomly + selected based on the value for tunnel2_rekey_fuzz_percentage. + Valid value is between 60 and half of tunnel2_phase2_lifetime_seconds. + type: number + tunnel2ReplayWindowSize: + description: The number of packets in an IKE replay window for + the second VPN tunnel. Valid value is between 64 and 2048. + type: number + tunnel2StartupAction: + description: The action to take when the establishing the tunnel + for the second VPN connection. By default, your customer gateway + device must initiate the IKE negotiation and bring up the tunnel. + Specify start for AWS to initiate the IKE negotiation. Valid + values are add | start. + type: string + tunnelInsideIpVersion: + description: Indicate whether the VPN tunnels process IPv4 or + IPv6 traffic. Valid values are ipv4 | ipv6. ipv6 Supports only + EC2 Transit Gateway. + type: string + type: + description: The type of VPN connection. The only type AWS supports + at this time is "ipsec.1". + type: string + typeRef: + description: Reference to a CustomerGateway in ec2 to populate + type. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + typeSelector: + description: Selector for a CustomerGateway in ec2 to populate + type. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpnGatewayId: + description: The ID of the Virtual Private Gateway. + type: string + vpnGatewayIdRef: + description: Reference to a VPNGateway in ec2 to populate vpnGatewayId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpnGatewayIdSelector: + description: Selector for a VPNGateway in ec2 to populate vpnGatewayId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + customerGatewayId: + description: The ID of the customer gateway. + type: string + customerGatewayIdRef: + description: Reference to a CustomerGateway in ec2 to populate + customerGatewayId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + customerGatewayIdSelector: + description: Selector for a CustomerGateway in ec2 to populate + customerGatewayId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enableAcceleration: + description: Indicate whether to enable acceleration for the VPN + connection. Supports only EC2 Transit Gateway. + type: boolean + localIpv4NetworkCidr: + description: The IPv4 CIDR on the customer gateway (on-premises) + side of the VPN connection. + type: string + localIpv6NetworkCidr: + description: The IPv6 CIDR on the customer gateway (on-premises) + side of the VPN connection. + type: string + outsideIpAddressType: + description: Indicates if a Public S2S VPN or Private S2S VPN + over AWS Direct Connect. Valid values are PublicIpv4 | PrivateIpv4 + type: string + remoteIpv4NetworkCidr: + description: The IPv4 CIDR on the AWS side of the VPN connection. + type: string + remoteIpv6NetworkCidr: + description: The IPv6 CIDR on the AWS side of the VPN connection. + type: string + staticRoutesOnly: + description: Whether the VPN connection uses static routes exclusively. + Static routes must be used for devices that don't support BGP. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + transitGatewayId: + description: The ID of the EC2 Transit Gateway. + type: string + transitGatewayIdRef: + description: Reference to a TransitGateway in ec2 to populate + transitGatewayId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + transitGatewayIdSelector: + description: Selector for a TransitGateway in ec2 to populate + transitGatewayId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + transportTransitGatewayAttachmentId: + description: . The attachment ID of the Transit Gateway attachment + to Direct Connect Gateway. The ID is obtained through a data + source only. + type: string + tunnel1DpdTimeoutAction: + description: The action to take after DPD timeout occurs for the + first VPN tunnel. Specify restart to restart the IKE initiation. + Specify clear to end the IKE session. Valid values are clear + | none | restart. + type: string + tunnel1DpdTimeoutSeconds: + description: The number of seconds after which a DPD timeout occurs + for the first VPN tunnel. Valid value is equal or higher than + 30. + type: number + tunnel1EnableTunnelLifecycleControl: + description: Turn on or off tunnel endpoint lifecycle control + feature for the first VPN tunnel. Valid values are true | false. + type: boolean + tunnel1IkeVersions: + description: The IKE versions that are permitted for the first + VPN tunnel. Valid values are ikev1 | ikev2. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel1InsideCidr: + description: The CIDR block of the inside IP addresses for the + first VPN tunnel. Valid value is a size /30 CIDR block from + the 169.254.0.0/16 range. + type: string + tunnel1InsideIpv6Cidr: + description: The range of inside IPv6 addresses for the first + VPN tunnel. Supports only EC2 Transit Gateway. Valid value is + a size /126 CIDR block from the local fd00::/8 range. + type: string + tunnel1LogOptions: + description: Options for logging VPN tunnel activity. See Log + Options below for more details. + properties: + cloudwatchLogOptions: + description: Options for sending VPN tunnel logs to CloudWatch. + See CloudWatch Log Options below for more details. + properties: + logEnabled: + description: Enable or disable VPN tunnel logging feature. + The default is false. + type: boolean + logGroupArn: + description: The Amazon Resource Name (ARN) of the CloudWatch + log group to send logs to. + type: string + logOutputFormat: + description: 'Set log format. Default format is json. + Possible values are: json and text. The default is json.' + type: string + type: object + type: object + tunnel1Phase1DhGroupNumbers: + description: List of one or more Diffie-Hellman group numbers + that are permitted for the first VPN tunnel for phase 1 IKE + negotiations. Valid values are 2 | 14 | 15 | 16 | 17 | 18 | + 19 | 20 | 21 | 22 | 23 | 24. + items: + type: number + type: array + x-kubernetes-list-type: set + tunnel1Phase1EncryptionAlgorithms: + description: List of one or more encryption algorithms that are + permitted for the first VPN tunnel for phase 1 IKE negotiations. + Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel1Phase1IntegrityAlgorithms: + description: One or more integrity algorithms that are permitted + for the first VPN tunnel for phase 1 IKE negotiations. Valid + values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel1Phase1LifetimeSeconds: + description: The lifetime for phase 1 of the IKE negotiation for + the first VPN tunnel, in seconds. Valid value is between 900 + and 28800. + type: number + tunnel1Phase2DhGroupNumbers: + description: List of one or more Diffie-Hellman group numbers + that are permitted for the first VPN tunnel for phase 2 IKE + negotiations. Valid values are 2 | 5 | 14 | 15 | 16 | 17 | 18 + | 19 | 20 | 21 | 22 | 23 | 24. + items: + type: number + type: array + x-kubernetes-list-type: set + tunnel1Phase2EncryptionAlgorithms: + description: List of one or more encryption algorithms that are + permitted for the first VPN tunnel for phase 2 IKE negotiations. + Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel1Phase2IntegrityAlgorithms: + description: List of one or more integrity algorithms that are + permitted for the first VPN tunnel for phase 2 IKE negotiations. + Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel1Phase2LifetimeSeconds: + description: The lifetime for phase 2 of the IKE negotiation for + the first VPN tunnel, in seconds. Valid value is between 900 + and 3600. + type: number + tunnel1PresharedKeySecretRef: + description: The preshared key of the first VPN tunnel. The preshared + key must be between 8 and 64 characters in length and cannot + start with zero(0). Allowed characters are alphanumeric characters, + periods(.) and underscores(_). + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + tunnel1RekeyFuzzPercentage: + description: The percentage of the rekey window for the first + VPN tunnel (determined by tunnel1_rekey_margin_time_seconds) + during which the rekey time is randomly selected. Valid value + is between 0 and 100. + type: number + tunnel1RekeyMarginTimeSeconds: + description: The margin time, in seconds, before the phase 2 lifetime + expires, during which the AWS side of the first VPN connection + performs an IKE rekey. The exact time of the rekey is randomly + selected based on the value for tunnel1_rekey_fuzz_percentage. + Valid value is between 60 and half of tunnel1_phase2_lifetime_seconds. + type: number + tunnel1ReplayWindowSize: + description: The number of packets in an IKE replay window for + the first VPN tunnel. Valid value is between 64 and 2048. + type: number + tunnel1StartupAction: + description: The action to take when the establishing the tunnel + for the first VPN connection. By default, your customer gateway + device must initiate the IKE negotiation and bring up the tunnel. + Specify start for AWS to initiate the IKE negotiation. Valid + values are add | start. + type: string + tunnel2DpdTimeoutAction: + description: The action to take after DPD timeout occurs for the + second VPN tunnel. Specify restart to restart the IKE initiation. + Specify clear to end the IKE session. Valid values are clear + | none | restart. + type: string + tunnel2DpdTimeoutSeconds: + description: The number of seconds after which a DPD timeout occurs + for the second VPN tunnel. Valid value is equal or higher than + 30. + type: number + tunnel2EnableTunnelLifecycleControl: + description: Turn on or off tunnel endpoint lifecycle control + feature for the second VPN tunnel. Valid values are true | false. + type: boolean + tunnel2IkeVersions: + description: The IKE versions that are permitted for the second + VPN tunnel. Valid values are ikev1 | ikev2. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel2InsideCidr: + description: The CIDR block of the inside IP addresses for the + second VPN tunnel. Valid value is a size /30 CIDR block from + the 169.254.0.0/16 range. + type: string + tunnel2InsideIpv6Cidr: + description: The range of inside IPv6 addresses for the second + VPN tunnel. Supports only EC2 Transit Gateway. Valid value is + a size /126 CIDR block from the local fd00::/8 range. + type: string + tunnel2LogOptions: + description: Options for logging VPN tunnel activity. See Log + Options below for more details. + properties: + cloudwatchLogOptions: + description: Options for sending VPN tunnel logs to CloudWatch. + See CloudWatch Log Options below for more details. + properties: + logEnabled: + description: Enable or disable VPN tunnel logging feature. + The default is false. + type: boolean + logGroupArn: + description: The Amazon Resource Name (ARN) of the CloudWatch + log group to send logs to. + type: string + logOutputFormat: + description: 'Set log format. Default format is json. + Possible values are: json and text. The default is json.' + type: string + type: object + type: object + tunnel2Phase1DhGroupNumbers: + description: List of one or more Diffie-Hellman group numbers + that are permitted for the second VPN tunnel for phase 1 IKE + negotiations. Valid values are 2 | 14 | 15 | 16 | 17 | 18 | + 19 | 20 | 21 | 22 | 23 | 24. + items: + type: number + type: array + x-kubernetes-list-type: set + tunnel2Phase1EncryptionAlgorithms: + description: List of one or more encryption algorithms that are + permitted for the second VPN tunnel for phase 1 IKE negotiations. + Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel2Phase1IntegrityAlgorithms: + description: One or more integrity algorithms that are permitted + for the second VPN tunnel for phase 1 IKE negotiations. Valid + values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel2Phase1LifetimeSeconds: + description: The lifetime for phase 1 of the IKE negotiation for + the second VPN tunnel, in seconds. Valid value is between 900 + and 28800. + type: number + tunnel2Phase2DhGroupNumbers: + description: List of one or more Diffie-Hellman group numbers + that are permitted for the second VPN tunnel for phase 2 IKE + negotiations. Valid values are 2 | 5 | 14 | 15 | 16 | 17 | 18 + | 19 | 20 | 21 | 22 | 23 | 24. + items: + type: number + type: array + x-kubernetes-list-type: set + tunnel2Phase2EncryptionAlgorithms: + description: List of one or more encryption algorithms that are + permitted for the second VPN tunnel for phase 2 IKE negotiations. + Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel2Phase2IntegrityAlgorithms: + description: List of one or more integrity algorithms that are + permitted for the second VPN tunnel for phase 2 IKE negotiations. + Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel2Phase2LifetimeSeconds: + description: The lifetime for phase 2 of the IKE negotiation for + the second VPN tunnel, in seconds. Valid value is between 900 + and 3600. + type: number + tunnel2PresharedKeySecretRef: + description: The preshared key of the second VPN tunnel. The preshared + key must be between 8 and 64 characters in length and cannot + start with zero(0). Allowed characters are alphanumeric characters, + periods(.) and underscores(_). + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + tunnel2RekeyFuzzPercentage: + description: The percentage of the rekey window for the second + VPN tunnel (determined by tunnel2_rekey_margin_time_seconds) + during which the rekey time is randomly selected. Valid value + is between 0 and 100. + type: number + tunnel2RekeyMarginTimeSeconds: + description: The margin time, in seconds, before the phase 2 lifetime + expires, during which the AWS side of the second VPN connection + performs an IKE rekey. The exact time of the rekey is randomly + selected based on the value for tunnel2_rekey_fuzz_percentage. + Valid value is between 60 and half of tunnel2_phase2_lifetime_seconds. + type: number + tunnel2ReplayWindowSize: + description: The number of packets in an IKE replay window for + the second VPN tunnel. Valid value is between 64 and 2048. + type: number + tunnel2StartupAction: + description: The action to take when the establishing the tunnel + for the second VPN connection. By default, your customer gateway + device must initiate the IKE negotiation and bring up the tunnel. + Specify start for AWS to initiate the IKE negotiation. Valid + values are add | start. + type: string + tunnelInsideIpVersion: + description: Indicate whether the VPN tunnels process IPv4 or + IPv6 traffic. Valid values are ipv4 | ipv6. ipv6 Supports only + EC2 Transit Gateway. + type: string + type: + description: The type of VPN connection. The only type AWS supports + at this time is "ipsec.1". + type: string + typeRef: + description: Reference to a CustomerGateway in ec2 to populate + type. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + typeSelector: + description: Selector for a CustomerGateway in ec2 to populate + type. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpnGatewayId: + description: The ID of the Virtual Private Gateway. + type: string + vpnGatewayIdRef: + description: Reference to a VPNGateway in ec2 to populate vpnGatewayId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpnGatewayIdSelector: + description: Selector for a VPNGateway in ec2 to populate vpnGatewayId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: VPNConnectionStatus defines the observed state of VPNConnection. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of the VPN Connection. + type: string + coreNetworkArn: + description: The ARN of the core network. + type: string + coreNetworkAttachmentArn: + description: The ARN of the core network attachment. + type: string + customerGatewayId: + description: The ID of the customer gateway. + type: string + enableAcceleration: + description: Indicate whether to enable acceleration for the VPN + connection. Supports only EC2 Transit Gateway. + type: boolean + id: + description: The amazon-assigned ID of the VPN connection. + type: string + localIpv4NetworkCidr: + description: The IPv4 CIDR on the customer gateway (on-premises) + side of the VPN connection. + type: string + localIpv6NetworkCidr: + description: The IPv6 CIDR on the customer gateway (on-premises) + side of the VPN connection. + type: string + outsideIpAddressType: + description: Indicates if a Public S2S VPN or Private S2S VPN + over AWS Direct Connect. Valid values are PublicIpv4 | PrivateIpv4 + type: string + remoteIpv4NetworkCidr: + description: The IPv4 CIDR on the AWS side of the VPN connection. + type: string + remoteIpv6NetworkCidr: + description: The IPv6 CIDR on the AWS side of the VPN connection. + type: string + routes: + description: The static routes associated with the VPN connection. + Detailed below. + items: + properties: + destinationCidrBlock: + description: The CIDR block associated with the local subnet + of the customer data center. + type: string + source: + description: Indicates how the routes were provided. + type: string + state: + description: The current state of the static route. + type: string + type: object + type: array + staticRoutesOnly: + description: Whether the VPN connection uses static routes exclusively. + Static routes must be used for devices that don't support BGP. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + transitGatewayAttachmentId: + description: When associated with an EC2 Transit Gateway (transit_gateway_id + argument), the attachment ID. See also the aws_ec2_tag resource + for tagging the EC2 Transit Gateway VPN Attachment. + type: string + transitGatewayId: + description: The ID of the EC2 Transit Gateway. + type: string + transportTransitGatewayAttachmentId: + description: . The attachment ID of the Transit Gateway attachment + to Direct Connect Gateway. The ID is obtained through a data + source only. + type: string + tunnel1Address: + description: The public IP address of the first VPN tunnel. + type: string + tunnel1BgpAsn: + description: The bgp asn number of the first VPN tunnel. + type: string + tunnel1BgpHoldtime: + description: The bgp holdtime of the first VPN tunnel. + type: number + tunnel1CgwInsideAddress: + description: The RFC 6890 link-local address of the first VPN + tunnel (Customer Gateway Side). + type: string + tunnel1DpdTimeoutAction: + description: The action to take after DPD timeout occurs for the + first VPN tunnel. Specify restart to restart the IKE initiation. + Specify clear to end the IKE session. Valid values are clear + | none | restart. + type: string + tunnel1DpdTimeoutSeconds: + description: The number of seconds after which a DPD timeout occurs + for the first VPN tunnel. Valid value is equal or higher than + 30. + type: number + tunnel1EnableTunnelLifecycleControl: + description: Turn on or off tunnel endpoint lifecycle control + feature for the first VPN tunnel. Valid values are true | false. + type: boolean + tunnel1IkeVersions: + description: The IKE versions that are permitted for the first + VPN tunnel. Valid values are ikev1 | ikev2. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel1InsideCidr: + description: The CIDR block of the inside IP addresses for the + first VPN tunnel. Valid value is a size /30 CIDR block from + the 169.254.0.0/16 range. + type: string + tunnel1InsideIpv6Cidr: + description: The range of inside IPv6 addresses for the first + VPN tunnel. Supports only EC2 Transit Gateway. Valid value is + a size /126 CIDR block from the local fd00::/8 range. + type: string + tunnel1LogOptions: + description: Options for logging VPN tunnel activity. See Log + Options below for more details. + properties: + cloudwatchLogOptions: + description: Options for sending VPN tunnel logs to CloudWatch. + See CloudWatch Log Options below for more details. + properties: + logEnabled: + description: Enable or disable VPN tunnel logging feature. + The default is false. + type: boolean + logGroupArn: + description: The Amazon Resource Name (ARN) of the CloudWatch + log group to send logs to. + type: string + logOutputFormat: + description: 'Set log format. Default format is json. + Possible values are: json and text. The default is json.' + type: string + type: object + type: object + tunnel1Phase1DhGroupNumbers: + description: List of one or more Diffie-Hellman group numbers + that are permitted for the first VPN tunnel for phase 1 IKE + negotiations. Valid values are 2 | 14 | 15 | 16 | 17 | 18 | + 19 | 20 | 21 | 22 | 23 | 24. + items: + type: number + type: array + x-kubernetes-list-type: set + tunnel1Phase1EncryptionAlgorithms: + description: List of one or more encryption algorithms that are + permitted for the first VPN tunnel for phase 1 IKE negotiations. + Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel1Phase1IntegrityAlgorithms: + description: One or more integrity algorithms that are permitted + for the first VPN tunnel for phase 1 IKE negotiations. Valid + values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel1Phase1LifetimeSeconds: + description: The lifetime for phase 1 of the IKE negotiation for + the first VPN tunnel, in seconds. Valid value is between 900 + and 28800. + type: number + tunnel1Phase2DhGroupNumbers: + description: List of one or more Diffie-Hellman group numbers + that are permitted for the first VPN tunnel for phase 2 IKE + negotiations. Valid values are 2 | 5 | 14 | 15 | 16 | 17 | 18 + | 19 | 20 | 21 | 22 | 23 | 24. + items: + type: number + type: array + x-kubernetes-list-type: set + tunnel1Phase2EncryptionAlgorithms: + description: List of one or more encryption algorithms that are + permitted for the first VPN tunnel for phase 2 IKE negotiations. + Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel1Phase2IntegrityAlgorithms: + description: List of one or more integrity algorithms that are + permitted for the first VPN tunnel for phase 2 IKE negotiations. + Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel1Phase2LifetimeSeconds: + description: The lifetime for phase 2 of the IKE negotiation for + the first VPN tunnel, in seconds. Valid value is between 900 + and 3600. + type: number + tunnel1RekeyFuzzPercentage: + description: The percentage of the rekey window for the first + VPN tunnel (determined by tunnel1_rekey_margin_time_seconds) + during which the rekey time is randomly selected. Valid value + is between 0 and 100. + type: number + tunnel1RekeyMarginTimeSeconds: + description: The margin time, in seconds, before the phase 2 lifetime + expires, during which the AWS side of the first VPN connection + performs an IKE rekey. The exact time of the rekey is randomly + selected based on the value for tunnel1_rekey_fuzz_percentage. + Valid value is between 60 and half of tunnel1_phase2_lifetime_seconds. + type: number + tunnel1ReplayWindowSize: + description: The number of packets in an IKE replay window for + the first VPN tunnel. Valid value is between 64 and 2048. + type: number + tunnel1StartupAction: + description: The action to take when the establishing the tunnel + for the first VPN connection. By default, your customer gateway + device must initiate the IKE negotiation and bring up the tunnel. + Specify start for AWS to initiate the IKE negotiation. Valid + values are add | start. + type: string + tunnel1VgwInsideAddress: + description: The RFC 6890 link-local address of the first VPN + tunnel (VPN Gateway Side). + type: string + tunnel2Address: + description: The public IP address of the second VPN tunnel. + type: string + tunnel2BgpAsn: + description: The bgp asn number of the second VPN tunnel. + type: string + tunnel2BgpHoldtime: + description: The bgp holdtime of the second VPN tunnel. + type: number + tunnel2CgwInsideAddress: + description: The RFC 6890 link-local address of the second VPN + tunnel (Customer Gateway Side). + type: string + tunnel2DpdTimeoutAction: + description: The action to take after DPD timeout occurs for the + second VPN tunnel. Specify restart to restart the IKE initiation. + Specify clear to end the IKE session. Valid values are clear + | none | restart. + type: string + tunnel2DpdTimeoutSeconds: + description: The number of seconds after which a DPD timeout occurs + for the second VPN tunnel. Valid value is equal or higher than + 30. + type: number + tunnel2EnableTunnelLifecycleControl: + description: Turn on or off tunnel endpoint lifecycle control + feature for the second VPN tunnel. Valid values are true | false. + type: boolean + tunnel2IkeVersions: + description: The IKE versions that are permitted for the second + VPN tunnel. Valid values are ikev1 | ikev2. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel2InsideCidr: + description: The CIDR block of the inside IP addresses for the + second VPN tunnel. Valid value is a size /30 CIDR block from + the 169.254.0.0/16 range. + type: string + tunnel2InsideIpv6Cidr: + description: The range of inside IPv6 addresses for the second + VPN tunnel. Supports only EC2 Transit Gateway. Valid value is + a size /126 CIDR block from the local fd00::/8 range. + type: string + tunnel2LogOptions: + description: Options for logging VPN tunnel activity. See Log + Options below for more details. + properties: + cloudwatchLogOptions: + description: Options for sending VPN tunnel logs to CloudWatch. + See CloudWatch Log Options below for more details. + properties: + logEnabled: + description: Enable or disable VPN tunnel logging feature. + The default is false. + type: boolean + logGroupArn: + description: The Amazon Resource Name (ARN) of the CloudWatch + log group to send logs to. + type: string + logOutputFormat: + description: 'Set log format. Default format is json. + Possible values are: json and text. The default is json.' + type: string + type: object + type: object + tunnel2Phase1DhGroupNumbers: + description: List of one or more Diffie-Hellman group numbers + that are permitted for the second VPN tunnel for phase 1 IKE + negotiations. Valid values are 2 | 14 | 15 | 16 | 17 | 18 | + 19 | 20 | 21 | 22 | 23 | 24. + items: + type: number + type: array + x-kubernetes-list-type: set + tunnel2Phase1EncryptionAlgorithms: + description: List of one or more encryption algorithms that are + permitted for the second VPN tunnel for phase 1 IKE negotiations. + Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel2Phase1IntegrityAlgorithms: + description: One or more integrity algorithms that are permitted + for the second VPN tunnel for phase 1 IKE negotiations. Valid + values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel2Phase1LifetimeSeconds: + description: The lifetime for phase 1 of the IKE negotiation for + the second VPN tunnel, in seconds. Valid value is between 900 + and 28800. + type: number + tunnel2Phase2DhGroupNumbers: + description: List of one or more Diffie-Hellman group numbers + that are permitted for the second VPN tunnel for phase 2 IKE + negotiations. Valid values are 2 | 5 | 14 | 15 | 16 | 17 | 18 + | 19 | 20 | 21 | 22 | 23 | 24. + items: + type: number + type: array + x-kubernetes-list-type: set + tunnel2Phase2EncryptionAlgorithms: + description: List of one or more encryption algorithms that are + permitted for the second VPN tunnel for phase 2 IKE negotiations. + Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel2Phase2IntegrityAlgorithms: + description: List of one or more integrity algorithms that are + permitted for the second VPN tunnel for phase 2 IKE negotiations. + Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512. + items: + type: string + type: array + x-kubernetes-list-type: set + tunnel2Phase2LifetimeSeconds: + description: The lifetime for phase 2 of the IKE negotiation for + the second VPN tunnel, in seconds. Valid value is between 900 + and 3600. + type: number + tunnel2RekeyFuzzPercentage: + description: The percentage of the rekey window for the second + VPN tunnel (determined by tunnel2_rekey_margin_time_seconds) + during which the rekey time is randomly selected. Valid value + is between 0 and 100. + type: number + tunnel2RekeyMarginTimeSeconds: + description: The margin time, in seconds, before the phase 2 lifetime + expires, during which the AWS side of the second VPN connection + performs an IKE rekey. The exact time of the rekey is randomly + selected based on the value for tunnel2_rekey_fuzz_percentage. + Valid value is between 60 and half of tunnel2_phase2_lifetime_seconds. + type: number + tunnel2ReplayWindowSize: + description: The number of packets in an IKE replay window for + the second VPN tunnel. Valid value is between 64 and 2048. + type: number + tunnel2StartupAction: + description: The action to take when the establishing the tunnel + for the second VPN connection. By default, your customer gateway + device must initiate the IKE negotiation and bring up the tunnel. + Specify start for AWS to initiate the IKE negotiation. Valid + values are add | start. + type: string + tunnel2VgwInsideAddress: + description: The RFC 6890 link-local address of the second VPN + tunnel (VPN Gateway Side). + type: string + tunnelInsideIpVersion: + description: Indicate whether the VPN tunnels process IPv4 or + IPv6 traffic. Valid values are ipv4 | ipv6. ipv6 Supports only + EC2 Transit Gateway. + type: string + type: + description: The type of VPN connection. The only type AWS supports + at this time is "ipsec.1". + type: string + vgwTelemetry: + description: Telemetry for the VPN tunnels. Detailed below. + items: + properties: + acceptedRouteCount: + description: The number of accepted routes. + type: number + certificateArn: + description: The Amazon Resource Name (ARN) of the VPN tunnel + endpoint certificate. + type: string + lastStatusChange: + description: The date and time of the last change in status. + type: string + outsideIpAddress: + description: The Internet-routable IP address of the virtual + private gateway's outside interface. + type: string + status: + description: The status of the VPN tunnel. + type: string + statusMessage: + description: If an error occurs, a description of the error. + type: string + type: object + type: array + vpnGatewayId: + description: The ID of the Virtual Private Gateway. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ecr.aws.upbound.io_replicationconfigurations.yaml b/package/crds/ecr.aws.upbound.io_replicationconfigurations.yaml index 614128525f..54f9f5ceba 100644 --- a/package/crds/ecr.aws.upbound.io_replicationconfigurations.yaml +++ b/package/crds/ecr.aws.upbound.io_replicationconfigurations.yaml @@ -471,3 +471,450 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ReplicationConfiguration is the Schema for the ReplicationConfigurations + API. Provides an Elastic Container Registry Replication Configuration. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ReplicationConfigurationSpec defines the desired state of + ReplicationConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + region: + description: |- + A Region to replicate to. + Region is the region you'd like your resource to be created in. + type: string + replicationConfiguration: + description: Replication configuration for a registry. See Replication + Configuration. + properties: + rule: + description: The replication rules for a replication configuration. + A maximum of 10 are allowed per replication_configuration. + See Rule + items: + properties: + destination: + description: the details of a replication destination. + A maximum of 25 are allowed per rule. See Destination. + items: + properties: + region: + description: A Region to replicate to. + type: string + registryId: + description: The account ID of the destination + registry to replicate to. + type: string + required: + - region + type: object + type: array + repositoryFilter: + description: filters for a replication rule. See Repository + Filter. + items: + properties: + filter: + description: The repository filter details. + type: string + filterType: + description: The repository filter type. The only + supported value is PREFIX_MATCH, which is a + repository name prefix specified with the filter + parameter. + type: string + type: object + type: array + type: object + type: array + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + replicationConfiguration: + description: Replication configuration for a registry. See Replication + Configuration. + properties: + rule: + description: The replication rules for a replication configuration. + A maximum of 10 are allowed per replication_configuration. + See Rule + items: + properties: + destination: + description: the details of a replication destination. + A maximum of 25 are allowed per rule. See Destination. + items: + properties: + registryId: + description: The account ID of the destination + registry to replicate to. + type: string + type: object + type: array + repositoryFilter: + description: filters for a replication rule. See Repository + Filter. + items: + properties: + filter: + description: The repository filter details. + type: string + filterType: + description: The repository filter type. The only + supported value is PREFIX_MATCH, which is a + repository name prefix specified with the filter + parameter. + type: string + type: object + type: array + type: object + type: array + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ReplicationConfigurationStatus defines the observed state + of ReplicationConfiguration. + properties: + atProvider: + properties: + id: + type: string + registryId: + description: The account ID of the destination registry to replicate + to. + type: string + replicationConfiguration: + description: Replication configuration for a registry. See Replication + Configuration. + properties: + rule: + description: The replication rules for a replication configuration. + A maximum of 10 are allowed per replication_configuration. + See Rule + items: + properties: + destination: + description: the details of a replication destination. + A maximum of 25 are allowed per rule. See Destination. + items: + properties: + region: + description: A Region to replicate to. + type: string + registryId: + description: The account ID of the destination + registry to replicate to. + type: string + type: object + type: array + repositoryFilter: + description: filters for a replication rule. See Repository + Filter. + items: + properties: + filter: + description: The repository filter details. + type: string + filterType: + description: The repository filter type. The only + supported value is PREFIX_MATCH, which is a + repository name prefix specified with the filter + parameter. + type: string + type: object + type: array + type: object + type: array + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ecr.aws.upbound.io_repositories.yaml b/package/crds/ecr.aws.upbound.io_repositories.yaml index 0fbef2262c..ce9a7bb3ec 100644 --- a/package/crds/ecr.aws.upbound.io_repositories.yaml +++ b/package/crds/ecr.aws.upbound.io_repositories.yaml @@ -627,3 +627,606 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Repository is the Schema for the Repositorys API. Provides an + Elastic Container Registry Repository. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RepositorySpec defines the desired state of Repository + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + encryptionConfiguration: + description: Encryption configuration for the repository. See + below for schema. + items: + properties: + encryptionType: + description: The encryption type to use for the repository. + Valid values are AES256 or KMS. Defaults to AES256. + type: string + kmsKey: + description: The ARN of the KMS key to use when encryption_type + is KMS. If not specified, uses the default AWS managed + key for ECR. + type: string + kmsKeyRef: + description: Reference to a Key in kms to populate kmsKey. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeySelector: + description: Selector for a Key in kms to populate kmsKey. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + forceDelete: + description: |- + If true, will delete the repository even if it contains images. + Defaults to false. + type: boolean + imageScanningConfiguration: + description: Configuration block that defines image scanning configuration + for the repository. By default, image scanning must be manually + triggered. See the ECR User Guide for more information about + image scanning. + properties: + scanOnPush: + description: Indicates whether images are scanned after being + pushed to the repository (true) or not scanned (false). + type: boolean + type: object + imageTagMutability: + description: 'The tag mutability setting for the repository. Must + be one of: MUTABLE or IMMUTABLE. Defaults to MUTABLE.' + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + encryptionConfiguration: + description: Encryption configuration for the repository. See + below for schema. + items: + properties: + encryptionType: + description: The encryption type to use for the repository. + Valid values are AES256 or KMS. Defaults to AES256. + type: string + kmsKey: + description: The ARN of the KMS key to use when encryption_type + is KMS. If not specified, uses the default AWS managed + key for ECR. + type: string + kmsKeyRef: + description: Reference to a Key in kms to populate kmsKey. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeySelector: + description: Selector for a Key in kms to populate kmsKey. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + forceDelete: + description: |- + If true, will delete the repository even if it contains images. + Defaults to false. + type: boolean + imageScanningConfiguration: + description: Configuration block that defines image scanning configuration + for the repository. By default, image scanning must be manually + triggered. See the ECR User Guide for more information about + image scanning. + properties: + scanOnPush: + description: Indicates whether images are scanned after being + pushed to the repository (true) or not scanned (false). + type: boolean + type: object + imageTagMutability: + description: 'The tag mutability setting for the repository. Must + be one of: MUTABLE or IMMUTABLE. Defaults to MUTABLE.' + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: RepositoryStatus defines the observed state of Repository. + properties: + atProvider: + properties: + arn: + description: Full ARN of the repository. + type: string + encryptionConfiguration: + description: Encryption configuration for the repository. See + below for schema. + items: + properties: + encryptionType: + description: The encryption type to use for the repository. + Valid values are AES256 or KMS. Defaults to AES256. + type: string + kmsKey: + description: The ARN of the KMS key to use when encryption_type + is KMS. If not specified, uses the default AWS managed + key for ECR. + type: string + type: object + type: array + forceDelete: + description: |- + If true, will delete the repository even if it contains images. + Defaults to false. + type: boolean + id: + type: string + imageScanningConfiguration: + description: Configuration block that defines image scanning configuration + for the repository. By default, image scanning must be manually + triggered. See the ECR User Guide for more information about + image scanning. + properties: + scanOnPush: + description: Indicates whether images are scanned after being + pushed to the repository (true) or not scanned (false). + type: boolean + type: object + imageTagMutability: + description: 'The tag mutability setting for the repository. Must + be one of: MUTABLE or IMMUTABLE. Defaults to MUTABLE.' + type: string + registryId: + description: The registry ID where the repository was created. + type: string + repositoryUrl: + description: The URL of the repository (in the form aws_account_id.dkr.ecr.region.amazonaws.com/repositoryName). + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ecrpublic.aws.upbound.io_repositories.yaml b/package/crds/ecrpublic.aws.upbound.io_repositories.yaml index 932a3aa5bb..2ed041c299 100644 --- a/package/crds/ecrpublic.aws.upbound.io_repositories.yaml +++ b/package/crds/ecrpublic.aws.upbound.io_repositories.yaml @@ -521,3 +521,497 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Repository is the Schema for the Repositorys API. Provides a + Public Elastic Container Registry Repository. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RepositorySpec defines the desired state of Repository + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + catalogData: + description: Catalog data configuration for the repository. See + below for schema. + properties: + aboutText: + description: A detailed description of the contents of the + repository. It is publicly visible in the Amazon ECR Public + Gallery. The text must be in markdown format. + type: string + architectures: + description: 'The system architecture that the images in the + repository are compatible with. On the Amazon ECR Public + Gallery, the following supported architectures will appear + as badges on the repository and are used as search filters: + ARM, ARM 64, x86, x86-64' + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: A short description of the contents of the repository. + This text appears in both the image details and also when + searching for repositories on the Amazon ECR Public Gallery. + type: string + logoImageBlob: + description: The base64-encoded repository logo payload. (Only + visible for verified accounts) Note that drift detection + is disabled for this attribute. + type: string + operatingSystems: + description: 'The operating systems that the images in the + repository are compatible with. On the Amazon ECR Public + Gallery, the following supported operating systems will + appear as badges on the repository and are used as search + filters: Linux, Windows' + items: + type: string + type: array + x-kubernetes-list-type: set + usageText: + description: Detailed information on how to use the contents + of the repository. It is publicly visible in the Amazon + ECR Public Gallery. The usage text provides context, support + information, and additional usage details for users of the + repository. The text must be in markdown format. + type: string + type: object + forceDestroy: + type: boolean + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + catalogData: + description: Catalog data configuration for the repository. See + below for schema. + properties: + aboutText: + description: A detailed description of the contents of the + repository. It is publicly visible in the Amazon ECR Public + Gallery. The text must be in markdown format. + type: string + architectures: + description: 'The system architecture that the images in the + repository are compatible with. On the Amazon ECR Public + Gallery, the following supported architectures will appear + as badges on the repository and are used as search filters: + ARM, ARM 64, x86, x86-64' + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: A short description of the contents of the repository. + This text appears in both the image details and also when + searching for repositories on the Amazon ECR Public Gallery. + type: string + logoImageBlob: + description: The base64-encoded repository logo payload. (Only + visible for verified accounts) Note that drift detection + is disabled for this attribute. + type: string + operatingSystems: + description: 'The operating systems that the images in the + repository are compatible with. On the Amazon ECR Public + Gallery, the following supported operating systems will + appear as badges on the repository and are used as search + filters: Linux, Windows' + items: + type: string + type: array + x-kubernetes-list-type: set + usageText: + description: Detailed information on how to use the contents + of the repository. It is publicly visible in the Amazon + ECR Public Gallery. The usage text provides context, support + information, and additional usage details for users of the + repository. The text must be in markdown format. + type: string + type: object + forceDestroy: + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: RepositoryStatus defines the observed state of Repository. + properties: + atProvider: + properties: + arn: + description: Full ARN of the repository. + type: string + catalogData: + description: Catalog data configuration for the repository. See + below for schema. + properties: + aboutText: + description: A detailed description of the contents of the + repository. It is publicly visible in the Amazon ECR Public + Gallery. The text must be in markdown format. + type: string + architectures: + description: 'The system architecture that the images in the + repository are compatible with. On the Amazon ECR Public + Gallery, the following supported architectures will appear + as badges on the repository and are used as search filters: + ARM, ARM 64, x86, x86-64' + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: A short description of the contents of the repository. + This text appears in both the image details and also when + searching for repositories on the Amazon ECR Public Gallery. + type: string + logoImageBlob: + description: The base64-encoded repository logo payload. (Only + visible for verified accounts) Note that drift detection + is disabled for this attribute. + type: string + operatingSystems: + description: 'The operating systems that the images in the + repository are compatible with. On the Amazon ECR Public + Gallery, the following supported operating systems will + appear as badges on the repository and are used as search + filters: Linux, Windows' + items: + type: string + type: array + x-kubernetes-list-type: set + usageText: + description: Detailed information on how to use the contents + of the repository. It is publicly visible in the Amazon + ECR Public Gallery. The usage text provides context, support + information, and additional usage details for users of the + repository. The text must be in markdown format. + type: string + type: object + forceDestroy: + type: boolean + id: + description: The repository name. + type: string + registryId: + description: The registry ID where the repository was created. + type: string + repositoryUri: + description: The URI of the repository. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ecs.aws.upbound.io_capacityproviders.yaml b/package/crds/ecs.aws.upbound.io_capacityproviders.yaml index 8e4bbe79ba..50f23c1eb3 100644 --- a/package/crds/ecs.aws.upbound.io_capacityproviders.yaml +++ b/package/crds/ecs.aws.upbound.io_capacityproviders.yaml @@ -669,3 +669,639 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CapacityProvider is the Schema for the CapacityProviders API. + Provides an ECS cluster capacity provider. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CapacityProviderSpec defines the desired state of CapacityProvider + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoScalingGroupProvider: + description: Configuration block for the provider for the ECS + auto scaling group. Detailed below. + properties: + autoScalingGroupArn: + description: '- ARN of the associated auto scaling group.' + type: string + autoScalingGroupArnRef: + description: Reference to a AutoscalingGroup in autoscaling + to populate autoScalingGroupArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + autoScalingGroupArnSelector: + description: Selector for a AutoscalingGroup in autoscaling + to populate autoScalingGroupArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + managedDraining: + description: '- Enables or disables a graceful shutdown of + instances without disturbing workloads. Valid values are ENABLED and DISABLED. + The default value is ENABLED when a capacity provider is + created.' + type: string + managedScaling: + description: '- Configuration block defining the parameters + of the auto scaling. Detailed below.' + properties: + instanceWarmupPeriod: + description: Period of time, in seconds, after a newly + launched Amazon EC2 instance can contribute to CloudWatch + metrics for Auto Scaling group. If this parameter is + omitted, the default value of 300 seconds is used. + type: number + maximumScalingStepSize: + description: Maximum step adjustment size. A number between + 1 and 10,000. + type: number + minimumScalingStepSize: + description: Minimum step adjustment size. A number between + 1 and 10,000. + type: number + status: + description: Whether auto scaling is managed by ECS. Valid + values are ENABLED and DISABLED. + type: string + targetCapacity: + description: Target utilization for the capacity provider. + A number between 1 and 100. + type: number + type: object + managedTerminationProtection: + description: '- Enables or disables container-aware termination + of instances in the auto scaling group when scale-in happens. + Valid values are ENABLED and DISABLED.' + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoScalingGroupProvider: + description: Configuration block for the provider for the ECS + auto scaling group. Detailed below. + properties: + autoScalingGroupArn: + description: '- ARN of the associated auto scaling group.' + type: string + autoScalingGroupArnRef: + description: Reference to a AutoscalingGroup in autoscaling + to populate autoScalingGroupArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + autoScalingGroupArnSelector: + description: Selector for a AutoscalingGroup in autoscaling + to populate autoScalingGroupArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + managedDraining: + description: '- Enables or disables a graceful shutdown of + instances without disturbing workloads. Valid values are ENABLED and DISABLED. + The default value is ENABLED when a capacity provider is + created.' + type: string + managedScaling: + description: '- Configuration block defining the parameters + of the auto scaling. Detailed below.' + properties: + instanceWarmupPeriod: + description: Period of time, in seconds, after a newly + launched Amazon EC2 instance can contribute to CloudWatch + metrics for Auto Scaling group. If this parameter is + omitted, the default value of 300 seconds is used. + type: number + maximumScalingStepSize: + description: Maximum step adjustment size. A number between + 1 and 10,000. + type: number + minimumScalingStepSize: + description: Minimum step adjustment size. A number between + 1 and 10,000. + type: number + status: + description: Whether auto scaling is managed by ECS. Valid + values are ENABLED and DISABLED. + type: string + targetCapacity: + description: Target utilization for the capacity provider. + A number between 1 and 100. + type: number + type: object + managedTerminationProtection: + description: '- Enables or disables container-aware termination + of instances in the auto scaling group when scale-in happens. + Valid values are ENABLED and DISABLED.' + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.autoScalingGroupProvider is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.autoScalingGroupProvider) + || (has(self.initProvider) && has(self.initProvider.autoScalingGroupProvider))' + status: + description: CapacityProviderStatus defines the observed state of CapacityProvider. + properties: + atProvider: + properties: + arn: + description: ARN that identifies the capacity provider. + type: string + autoScalingGroupProvider: + description: Configuration block for the provider for the ECS + auto scaling group. Detailed below. + properties: + autoScalingGroupArn: + description: '- ARN of the associated auto scaling group.' + type: string + managedDraining: + description: '- Enables or disables a graceful shutdown of + instances without disturbing workloads. Valid values are ENABLED and DISABLED. + The default value is ENABLED when a capacity provider is + created.' + type: string + managedScaling: + description: '- Configuration block defining the parameters + of the auto scaling. Detailed below.' + properties: + instanceWarmupPeriod: + description: Period of time, in seconds, after a newly + launched Amazon EC2 instance can contribute to CloudWatch + metrics for Auto Scaling group. If this parameter is + omitted, the default value of 300 seconds is used. + type: number + maximumScalingStepSize: + description: Maximum step adjustment size. A number between + 1 and 10,000. + type: number + minimumScalingStepSize: + description: Minimum step adjustment size. A number between + 1 and 10,000. + type: number + status: + description: Whether auto scaling is managed by ECS. Valid + values are ENABLED and DISABLED. + type: string + targetCapacity: + description: Target utilization for the capacity provider. + A number between 1 and 100. + type: number + type: object + managedTerminationProtection: + description: '- Enables or disables container-aware termination + of instances in the auto scaling group when scale-in happens. + Valid values are ENABLED and DISABLED.' + type: string + type: object + id: + description: ARN that identifies the capacity provider. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ecs.aws.upbound.io_clusters.yaml b/package/crds/ecs.aws.upbound.io_clusters.yaml index e3b16a0b78..db25bcd9e7 100644 --- a/package/crds/ecs.aws.upbound.io_clusters.yaml +++ b/package/crds/ecs.aws.upbound.io_clusters.yaml @@ -604,3 +604,562 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the Clusters API. Provides an ECS cluster. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterSpec defines the desired state of Cluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + configuration: + description: The execute command configuration for the cluster. + Detailed below. + properties: + executeCommandConfiguration: + description: The details of the execute command configuration. + Detailed below. + properties: + kmsKeyId: + description: The AWS Key Management Service key ID to + encrypt the data between the local client and the container. + type: string + logConfiguration: + description: The log configuration for the results of + the execute command actions Required when logging is + OVERRIDE. Detailed below. + properties: + cloudWatchEncryptionEnabled: + description: Whether or not to enable encryption on + the CloudWatch logs. If not specified, encryption + will be disabled. + type: boolean + cloudWatchLogGroupName: + description: The name of the CloudWatch log group + to send logs to. + type: string + s3BucketEncryptionEnabled: + description: Whether or not to enable encryption on + the logs sent to S3. If not specified, encryption + will be disabled. + type: boolean + s3BucketName: + description: The name of the S3 bucket to send logs + to. + type: string + s3KeyPrefix: + description: An optional folder in the S3 bucket to + place logs in. + type: string + type: object + logging: + description: The log setting to use for redirecting logs + for your execute command results. Valid values are NONE, + DEFAULT, and OVERRIDE. + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + serviceConnectDefaults: + description: Configures a default Service Connect namespace. Detailed + below. + properties: + namespace: + description: The ARN of the aws_service_discovery_http_namespace + that's used when you create a service and don't specify + a Service Connect configuration. + type: string + type: object + setting: + description: Configuration block(s) with cluster settings. For + example, this can be used to enable CloudWatch Container Insights + for a cluster. Detailed below. + items: + properties: + name: + description: 'Name of the setting to manage. Valid values: + containerInsights.' + type: string + value: + description: The value to assign to the setting. Valid values + are enabled and disabled. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + configuration: + description: The execute command configuration for the cluster. + Detailed below. + properties: + executeCommandConfiguration: + description: The details of the execute command configuration. + Detailed below. + properties: + kmsKeyId: + description: The AWS Key Management Service key ID to + encrypt the data between the local client and the container. + type: string + logConfiguration: + description: The log configuration for the results of + the execute command actions Required when logging is + OVERRIDE. Detailed below. + properties: + cloudWatchEncryptionEnabled: + description: Whether or not to enable encryption on + the CloudWatch logs. If not specified, encryption + will be disabled. + type: boolean + cloudWatchLogGroupName: + description: The name of the CloudWatch log group + to send logs to. + type: string + s3BucketEncryptionEnabled: + description: Whether or not to enable encryption on + the logs sent to S3. If not specified, encryption + will be disabled. + type: boolean + s3BucketName: + description: The name of the S3 bucket to send logs + to. + type: string + s3KeyPrefix: + description: An optional folder in the S3 bucket to + place logs in. + type: string + type: object + logging: + description: The log setting to use for redirecting logs + for your execute command results. Valid values are NONE, + DEFAULT, and OVERRIDE. + type: string + type: object + type: object + serviceConnectDefaults: + description: Configures a default Service Connect namespace. Detailed + below. + properties: + namespace: + description: The ARN of the aws_service_discovery_http_namespace + that's used when you create a service and don't specify + a Service Connect configuration. + type: string + type: object + setting: + description: Configuration block(s) with cluster settings. For + example, this can be used to enable CloudWatch Container Insights + for a cluster. Detailed below. + items: + properties: + name: + description: 'Name of the setting to manage. Valid values: + containerInsights.' + type: string + value: + description: The value to assign to the setting. Valid values + are enabled and disabled. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ClusterStatus defines the observed state of Cluster. + properties: + atProvider: + properties: + arn: + description: ARN that identifies the cluster. + type: string + configuration: + description: The execute command configuration for the cluster. + Detailed below. + properties: + executeCommandConfiguration: + description: The details of the execute command configuration. + Detailed below. + properties: + kmsKeyId: + description: The AWS Key Management Service key ID to + encrypt the data between the local client and the container. + type: string + logConfiguration: + description: The log configuration for the results of + the execute command actions Required when logging is + OVERRIDE. Detailed below. + properties: + cloudWatchEncryptionEnabled: + description: Whether or not to enable encryption on + the CloudWatch logs. If not specified, encryption + will be disabled. + type: boolean + cloudWatchLogGroupName: + description: The name of the CloudWatch log group + to send logs to. + type: string + s3BucketEncryptionEnabled: + description: Whether or not to enable encryption on + the logs sent to S3. If not specified, encryption + will be disabled. + type: boolean + s3BucketName: + description: The name of the S3 bucket to send logs + to. + type: string + s3KeyPrefix: + description: An optional folder in the S3 bucket to + place logs in. + type: string + type: object + logging: + description: The log setting to use for redirecting logs + for your execute command results. Valid values are NONE, + DEFAULT, and OVERRIDE. + type: string + type: object + type: object + id: + description: ARN that identifies the cluster. + type: string + serviceConnectDefaults: + description: Configures a default Service Connect namespace. Detailed + below. + properties: + namespace: + description: The ARN of the aws_service_discovery_http_namespace + that's used when you create a service and don't specify + a Service Connect configuration. + type: string + type: object + setting: + description: Configuration block(s) with cluster settings. For + example, this can be used to enable CloudWatch Container Insights + for a cluster. Detailed below. + items: + properties: + name: + description: 'Name of the setting to manage. Valid values: + containerInsights.' + type: string + value: + description: The value to assign to the setting. Valid values + are enabled and disabled. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ecs.aws.upbound.io_services.yaml b/package/crds/ecs.aws.upbound.io_services.yaml index 3e0628fe66..823c6f32ff 100644 --- a/package/crds/ecs.aws.upbound.io_services.yaml +++ b/package/crds/ecs.aws.upbound.io_services.yaml @@ -2515,3 +2515,2416 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Service is the Schema for the Services API. Provides an ECS service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServiceSpec defines the desired state of Service + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + alarms: + description: Information about the CloudWatch alarms. See below. + properties: + alarmNames: + description: One or more CloudWatch alarm names. + items: + type: string + type: array + x-kubernetes-list-type: set + enable: + description: Determines whether to use the CloudWatch alarm + option in the service deployment process. + type: boolean + rollback: + description: Determines whether to configure Amazon ECS to + roll back the service if a service deployment fails. If + rollback is used, when a service deployment fails, the service + is rolled back to the last deployment that completed successfully. + type: boolean + type: object + capacityProviderStrategy: + description: Capacity provider strategies to use for the service. + Can be one or more. These can be updated without destroying + and recreating the service only if force_new_deployment = true + and not changing from 0 capacity_provider_strategy blocks to + greater than 0, or vice versa. See below. Conflicts with launch_type. + items: + properties: + base: + description: Number of tasks, at a minimum, to run on the + specified capacity provider. Only one capacity provider + in a capacity provider strategy can have a base defined. + type: number + capacityProvider: + description: Short name of the capacity provider. + type: string + weight: + description: Relative percentage of the total number of + launched tasks that should use the specified capacity + provider. + type: number + type: object + type: array + cluster: + description: Name of an ECS cluster. + type: string + clusterRef: + description: Reference to a Cluster in ecs to populate cluster. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterSelector: + description: Selector for a Cluster in ecs to populate cluster. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + deploymentCircuitBreaker: + description: Configuration block for deployment circuit breaker. + See below. + properties: + enable: + description: Whether to enable the deployment circuit breaker + logic for the service. + type: boolean + rollback: + description: Whether to enable Amazon ECS to roll back the + service if a service deployment fails. If rollback is enabled, + when a service deployment fails, the service is rolled back + to the last deployment that completed successfully. + type: boolean + type: object + deploymentController: + description: Configuration block for deployment controller configuration. + See below. + properties: + type: + description: 'Type of deployment controller. Valid values: + CODE_DEPLOY, ECS, EXTERNAL. Default: ECS.' + type: string + type: object + deploymentMaximumPercent: + description: Upper limit (as a percentage of the service's desiredCount) + of the number of running tasks that can be running in a service + during a deployment. Not valid when using the DAEMON scheduling + strategy. + type: number + deploymentMinimumHealthyPercent: + description: Lower limit (as a percentage of the service's desiredCount) + of the number of running tasks that must remain running and + healthy in a service during a deployment. + type: number + desiredCount: + description: Number of instances of the task definition to place + and keep running. Defaults to 0. Do not specify if using the + DAEMON scheduling strategy. + type: number + enableEcsManagedTags: + description: Specifies whether to enable Amazon ECS managed tags + for the tasks within the service. + type: boolean + enableExecuteCommand: + description: Specifies whether to enable Amazon ECS Exec for the + tasks within the service. + type: boolean + forceNewDeployment: + description: Enable to force a new task deployment of the service. + This can be used to update tasks to use a newer Docker image + with same image/tag combination (e.g., myimage:latest), roll + Fargate tasks onto a newer platform version, or immediately + deploy ordered_placement_strategy and placement_constraints + updates. + type: boolean + healthCheckGracePeriodSeconds: + description: Seconds to ignore failing load balancer health checks + on newly instantiated tasks to prevent premature shutdown, up + to 2147483647. Only valid for services configured to use load + balancers. + type: number + iamRole: + description: ARN of the IAM role that allows Amazon ECS to make + calls to your load balancer on your behalf. This parameter is + required if you are using a load balancer with your service, + but only if your task definition does not use the awsvpc network + mode. If using awsvpc network mode, do not specify this role. + If your account has already created the Amazon ECS service-linked + role, that role is used by default for your service unless you + specify a role here. + type: string + iamRoleRef: + description: Reference to a Role in iam to populate iamRole. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamRoleSelector: + description: Selector for a Role in iam to populate iamRole. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + launchType: + description: Launch type on which to run your service. The valid + values are EC2, FARGATE, and EXTERNAL. Defaults to EC2. Conflicts + with capacity_provider_strategy. + type: string + loadBalancer: + description: Configuration block for load balancers. See below. + items: + properties: + containerName: + description: Name of the container to associate with the + load balancer (as it appears in a container definition). + type: string + containerPort: + description: Port on the container to associate with the + load balancer. + type: number + elbName: + description: Name of the ELB (Classic) to associate with + the service. + type: string + targetGroupArn: + description: ARN of the Load Balancer target group to associate + with the service. + type: string + targetGroupArnRef: + description: Reference to a LBTargetGroup in elbv2 to populate + targetGroupArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetGroupArnSelector: + description: Selector for a LBTargetGroup in elbv2 to populate + targetGroupArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + networkConfiguration: + description: Network configuration for the service. This parameter + is required for task definitions that use the awsvpc network + mode to receive their own Elastic Network Interface, and it + is not supported for other network modes. See below. + properties: + assignPublicIp: + description: Assign a public IP address to the ENI (Fargate + launch type only). Valid values are true or false. Default + false. + type: boolean + securityGroupRefs: + description: References to SecurityGroup in ec2 to populate + securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupSelector: + description: Selector for a list of SecurityGroup in ec2 to + populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroups: + description: Security groups associated with the task or service. + If you do not specify a security group, the default security + group for the VPC is used. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetRefs: + description: References to Subnet in ec2 to populate subnets. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetSelector: + description: Selector for a list of Subnet in ec2 to populate + subnets. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnets: + description: Subnets associated with the task or service. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + orderedPlacementStrategy: + description: Service level strategy rules that are taken into + consideration during task placement. List from top to bottom + in order of precedence. Updates to this configuration will take + effect next task deployment unless force_new_deployment is enabled. + The maximum number of ordered_placement_strategy blocks is 5. + See below. + items: + properties: + field: + description: |- + For the spread placement strategy, valid values are instanceId (or host, + which has the same effect), or any platform or custom attribute that is applied to a container instance. + For the binpack type, valid values are memory and cpu. For the random type, this attribute is not + needed. For more information, see Placement Strategy. + type: string + type: + description: 'Type of placement strategy. Must be one of: + binpack, random, or spread' + type: string + type: object + type: array + placementConstraints: + description: Rules that are taken into consideration during task + placement. Updates to this configuration will take effect next + task deployment unless force_new_deployment is enabled. Maximum + number of placement_constraints is 10. See below. + items: + properties: + expression: + description: Cluster Query Language expression to apply + to the constraint. Does not need to be specified for the + distinctInstance type. For more information, see Cluster + Query Language in the Amazon EC2 Container Service Developer + Guide. + type: string + type: + description: Type of constraint. The only valid values at + this time are memberOf and distinctInstance. + type: string + type: object + type: array + platformVersion: + description: Platform version on which to run your service. Only + applicable for launch_type set to FARGATE. Defaults to LATEST. + More information about Fargate platform versions can be found + in the AWS ECS User Guide. + type: string + propagateTags: + description: Specifies whether to propagate the tags from the + task definition or the service to the tasks. The valid values + are SERVICE and TASK_DEFINITION. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + schedulingStrategy: + description: Scheduling strategy to use for the service. The valid + values are REPLICA and DAEMON. Defaults to REPLICA. Note that + Tasks using the Fargate launch type or the . + type: string + serviceConnectConfiguration: + description: The ECS Service Connect configuration for this service + to discover and connect to services, and be discovered by, and + connected from, other services within a namespace. See below. + properties: + enabled: + description: Specifies whether to use Service Connect with + this service. + type: boolean + logConfiguration: + description: The log configuration for the container. See + below. + properties: + logDriver: + description: The log driver to use for the container. + type: string + options: + additionalProperties: + type: string + description: The configuration options to send to the + log driver. + type: object + x-kubernetes-map-type: granular + secretOption: + description: The secrets to pass to the log configuration. + See below. + items: + properties: + name: + description: The name of the secret. + type: string + valueFrom: + description: The secret to expose to the container. + The supported values are either the full ARN of + the AWS Secrets Manager secret or the full ARN + of the parameter in the SSM Parameter Store. + type: string + type: object + type: array + type: object + namespace: + description: The namespace name or ARN of the aws_service_discovery_http_namespace + for use with Service Connect. + type: string + service: + description: The list of Service Connect service objects. + See below. + items: + properties: + clientAlias: + description: The list of client aliases for this Service + Connect service. You use these to assign names that + can be used by client applications. The maximum number + of client aliases that you can have in this list is + 1. See below. + properties: + dnsName: + description: The name that you use in the applications + of client tasks to connect to this service. + type: string + port: + description: The listening port number for the Service + Connect proxy. This port is available inside of + all of the tasks within the same namespace. + type: number + type: object + discoveryName: + description: The name of the new AWS Cloud Map service + that Amazon ECS creates for this Amazon ECS service. + type: string + ingressPortOverride: + description: The port number for the Service Connect + proxy to listen on. + type: number + portName: + description: The name of one of the portMappings from + all the containers in the task definition of this + Amazon ECS service. + type: string + timeout: + description: Configuration timeouts for Service Connect + properties: + idleTimeoutSeconds: + description: The amount of time in seconds a connection + will stay active while idle. A value of 0 can + be set to disable idleTimeout. + type: number + perRequestTimeoutSeconds: + description: The amount of time in seconds for the + upstream to respond with a complete response per + request. A value of 0 can be set to disable perRequestTimeout. + Can only be set when appProtocol isn't TCP. + type: number + type: object + tls: + description: The configuration for enabling Transport + Layer Security (TLS) + properties: + issuerCertAuthority: + description: The details of the certificate authority + which will issue the certificate. + properties: + awsPcaAuthorityArn: + description: The ARN of the aws_acmpca_certificate_authority + used to create the TLS Certificates. + type: string + type: object + kmsKey: + description: The KMS key used to encrypt the private + key in Secrets Manager. + type: string + roleArn: + description: The ARN of the IAM Role that's associated + with the Service Connect TLS. + type: string + type: object + type: object + type: array + type: object + serviceRegistries: + description: Service discovery registries for the service. The + maximum number of service_registries blocks is 1. See below. + properties: + containerName: + description: Container name value, already specified in the + task definition, to be used for your service discovery service. + type: string + containerPort: + description: Port value, already specified in the task definition, + to be used for your service discovery service. + type: number + port: + description: Port value used if your Service Discovery service + specified an SRV record. + type: number + registryArn: + description: ARN of the Service Registry. The currently supported + service registry is Amazon Route 53 Auto Naming Service(aws_service_discovery_service). + For more information, see Service + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + taskDefinition: + description: Family and revision (family:revision) or full ARN + of the task definition that you want to run in your service. + Required unless using the EXTERNAL deployment controller. If + a revision is not specified, the latest ACTIVE revision is used. + type: string + taskDefinitionRef: + description: Reference to a TaskDefinition in ecs to populate + taskDefinition. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + taskDefinitionSelector: + description: Selector for a TaskDefinition in ecs to populate + taskDefinition. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + triggers: + additionalProperties: + type: string + description: Map of arbitrary keys and values that, when changed, + will trigger an in-place update (redeployment). Useful with + plantimestamp(). See example above. + type: object + x-kubernetes-map-type: granular + waitForSteadyState: + description: Default false. + type: boolean + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + alarms: + description: Information about the CloudWatch alarms. See below. + properties: + alarmNames: + description: One or more CloudWatch alarm names. + items: + type: string + type: array + x-kubernetes-list-type: set + enable: + description: Determines whether to use the CloudWatch alarm + option in the service deployment process. + type: boolean + rollback: + description: Determines whether to configure Amazon ECS to + roll back the service if a service deployment fails. If + rollback is used, when a service deployment fails, the service + is rolled back to the last deployment that completed successfully. + type: boolean + type: object + capacityProviderStrategy: + description: Capacity provider strategies to use for the service. + Can be one or more. These can be updated without destroying + and recreating the service only if force_new_deployment = true + and not changing from 0 capacity_provider_strategy blocks to + greater than 0, or vice versa. See below. Conflicts with launch_type. + items: + properties: + base: + description: Number of tasks, at a minimum, to run on the + specified capacity provider. Only one capacity provider + in a capacity provider strategy can have a base defined. + type: number + capacityProvider: + description: Short name of the capacity provider. + type: string + weight: + description: Relative percentage of the total number of + launched tasks that should use the specified capacity + provider. + type: number + type: object + type: array + cluster: + description: Name of an ECS cluster. + type: string + clusterRef: + description: Reference to a Cluster in ecs to populate cluster. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterSelector: + description: Selector for a Cluster in ecs to populate cluster. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + deploymentCircuitBreaker: + description: Configuration block for deployment circuit breaker. + See below. + properties: + enable: + description: Whether to enable the deployment circuit breaker + logic for the service. + type: boolean + rollback: + description: Whether to enable Amazon ECS to roll back the + service if a service deployment fails. If rollback is enabled, + when a service deployment fails, the service is rolled back + to the last deployment that completed successfully. + type: boolean + type: object + deploymentController: + description: Configuration block for deployment controller configuration. + See below. + properties: + type: + description: 'Type of deployment controller. Valid values: + CODE_DEPLOY, ECS, EXTERNAL. Default: ECS.' + type: string + type: object + deploymentMaximumPercent: + description: Upper limit (as a percentage of the service's desiredCount) + of the number of running tasks that can be running in a service + during a deployment. Not valid when using the DAEMON scheduling + strategy. + type: number + deploymentMinimumHealthyPercent: + description: Lower limit (as a percentage of the service's desiredCount) + of the number of running tasks that must remain running and + healthy in a service during a deployment. + type: number + desiredCount: + description: Number of instances of the task definition to place + and keep running. Defaults to 0. Do not specify if using the + DAEMON scheduling strategy. + type: number + enableEcsManagedTags: + description: Specifies whether to enable Amazon ECS managed tags + for the tasks within the service. + type: boolean + enableExecuteCommand: + description: Specifies whether to enable Amazon ECS Exec for the + tasks within the service. + type: boolean + forceNewDeployment: + description: Enable to force a new task deployment of the service. + This can be used to update tasks to use a newer Docker image + with same image/tag combination (e.g., myimage:latest), roll + Fargate tasks onto a newer platform version, or immediately + deploy ordered_placement_strategy and placement_constraints + updates. + type: boolean + healthCheckGracePeriodSeconds: + description: Seconds to ignore failing load balancer health checks + on newly instantiated tasks to prevent premature shutdown, up + to 2147483647. Only valid for services configured to use load + balancers. + type: number + iamRole: + description: ARN of the IAM role that allows Amazon ECS to make + calls to your load balancer on your behalf. This parameter is + required if you are using a load balancer with your service, + but only if your task definition does not use the awsvpc network + mode. If using awsvpc network mode, do not specify this role. + If your account has already created the Amazon ECS service-linked + role, that role is used by default for your service unless you + specify a role here. + type: string + iamRoleRef: + description: Reference to a Role in iam to populate iamRole. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamRoleSelector: + description: Selector for a Role in iam to populate iamRole. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + launchType: + description: Launch type on which to run your service. The valid + values are EC2, FARGATE, and EXTERNAL. Defaults to EC2. Conflicts + with capacity_provider_strategy. + type: string + loadBalancer: + description: Configuration block for load balancers. See below. + items: + properties: + containerName: + description: Name of the container to associate with the + load balancer (as it appears in a container definition). + type: string + containerPort: + description: Port on the container to associate with the + load balancer. + type: number + elbName: + description: Name of the ELB (Classic) to associate with + the service. + type: string + targetGroupArn: + description: ARN of the Load Balancer target group to associate + with the service. + type: string + targetGroupArnRef: + description: Reference to a LBTargetGroup in elbv2 to populate + targetGroupArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetGroupArnSelector: + description: Selector for a LBTargetGroup in elbv2 to populate + targetGroupArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + networkConfiguration: + description: Network configuration for the service. This parameter + is required for task definitions that use the awsvpc network + mode to receive their own Elastic Network Interface, and it + is not supported for other network modes. See below. + properties: + assignPublicIp: + description: Assign a public IP address to the ENI (Fargate + launch type only). Valid values are true or false. Default + false. + type: boolean + securityGroupRefs: + description: References to SecurityGroup in ec2 to populate + securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupSelector: + description: Selector for a list of SecurityGroup in ec2 to + populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroups: + description: Security groups associated with the task or service. + If you do not specify a security group, the default security + group for the VPC is used. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetRefs: + description: References to Subnet in ec2 to populate subnets. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetSelector: + description: Selector for a list of Subnet in ec2 to populate + subnets. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnets: + description: Subnets associated with the task or service. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + orderedPlacementStrategy: + description: Service level strategy rules that are taken into + consideration during task placement. List from top to bottom + in order of precedence. Updates to this configuration will take + effect next task deployment unless force_new_deployment is enabled. + The maximum number of ordered_placement_strategy blocks is 5. + See below. + items: + properties: + field: + description: |- + For the spread placement strategy, valid values are instanceId (or host, + which has the same effect), or any platform or custom attribute that is applied to a container instance. + For the binpack type, valid values are memory and cpu. For the random type, this attribute is not + needed. For more information, see Placement Strategy. + type: string + type: + description: 'Type of placement strategy. Must be one of: + binpack, random, or spread' + type: string + type: object + type: array + placementConstraints: + description: Rules that are taken into consideration during task + placement. Updates to this configuration will take effect next + task deployment unless force_new_deployment is enabled. Maximum + number of placement_constraints is 10. See below. + items: + properties: + expression: + description: Cluster Query Language expression to apply + to the constraint. Does not need to be specified for the + distinctInstance type. For more information, see Cluster + Query Language in the Amazon EC2 Container Service Developer + Guide. + type: string + type: + description: Type of constraint. The only valid values at + this time are memberOf and distinctInstance. + type: string + type: object + type: array + platformVersion: + description: Platform version on which to run your service. Only + applicable for launch_type set to FARGATE. Defaults to LATEST. + More information about Fargate platform versions can be found + in the AWS ECS User Guide. + type: string + propagateTags: + description: Specifies whether to propagate the tags from the + task definition or the service to the tasks. The valid values + are SERVICE and TASK_DEFINITION. + type: string + schedulingStrategy: + description: Scheduling strategy to use for the service. The valid + values are REPLICA and DAEMON. Defaults to REPLICA. Note that + Tasks using the Fargate launch type or the . + type: string + serviceConnectConfiguration: + description: The ECS Service Connect configuration for this service + to discover and connect to services, and be discovered by, and + connected from, other services within a namespace. See below. + properties: + enabled: + description: Specifies whether to use Service Connect with + this service. + type: boolean + logConfiguration: + description: The log configuration for the container. See + below. + properties: + logDriver: + description: The log driver to use for the container. + type: string + options: + additionalProperties: + type: string + description: The configuration options to send to the + log driver. + type: object + x-kubernetes-map-type: granular + secretOption: + description: The secrets to pass to the log configuration. + See below. + items: + properties: + name: + description: The name of the secret. + type: string + valueFrom: + description: The secret to expose to the container. + The supported values are either the full ARN of + the AWS Secrets Manager secret or the full ARN + of the parameter in the SSM Parameter Store. + type: string + type: object + type: array + type: object + namespace: + description: The namespace name or ARN of the aws_service_discovery_http_namespace + for use with Service Connect. + type: string + service: + description: The list of Service Connect service objects. + See below. + items: + properties: + clientAlias: + description: The list of client aliases for this Service + Connect service. You use these to assign names that + can be used by client applications. The maximum number + of client aliases that you can have in this list is + 1. See below. + properties: + dnsName: + description: The name that you use in the applications + of client tasks to connect to this service. + type: string + port: + description: The listening port number for the Service + Connect proxy. This port is available inside of + all of the tasks within the same namespace. + type: number + type: object + discoveryName: + description: The name of the new AWS Cloud Map service + that Amazon ECS creates for this Amazon ECS service. + type: string + ingressPortOverride: + description: The port number for the Service Connect + proxy to listen on. + type: number + portName: + description: The name of one of the portMappings from + all the containers in the task definition of this + Amazon ECS service. + type: string + timeout: + description: Configuration timeouts for Service Connect + properties: + idleTimeoutSeconds: + description: The amount of time in seconds a connection + will stay active while idle. A value of 0 can + be set to disable idleTimeout. + type: number + perRequestTimeoutSeconds: + description: The amount of time in seconds for the + upstream to respond with a complete response per + request. A value of 0 can be set to disable perRequestTimeout. + Can only be set when appProtocol isn't TCP. + type: number + type: object + tls: + description: The configuration for enabling Transport + Layer Security (TLS) + properties: + issuerCertAuthority: + description: The details of the certificate authority + which will issue the certificate. + properties: + awsPcaAuthorityArn: + description: The ARN of the aws_acmpca_certificate_authority + used to create the TLS Certificates. + type: string + type: object + kmsKey: + description: The KMS key used to encrypt the private + key in Secrets Manager. + type: string + roleArn: + description: The ARN of the IAM Role that's associated + with the Service Connect TLS. + type: string + type: object + type: object + type: array + type: object + serviceRegistries: + description: Service discovery registries for the service. The + maximum number of service_registries blocks is 1. See below. + properties: + containerName: + description: Container name value, already specified in the + task definition, to be used for your service discovery service. + type: string + containerPort: + description: Port value, already specified in the task definition, + to be used for your service discovery service. + type: number + port: + description: Port value used if your Service Discovery service + specified an SRV record. + type: number + registryArn: + description: ARN of the Service Registry. The currently supported + service registry is Amazon Route 53 Auto Naming Service(aws_service_discovery_service). + For more information, see Service + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + taskDefinition: + description: Family and revision (family:revision) or full ARN + of the task definition that you want to run in your service. + Required unless using the EXTERNAL deployment controller. If + a revision is not specified, the latest ACTIVE revision is used. + type: string + taskDefinitionRef: + description: Reference to a TaskDefinition in ecs to populate + taskDefinition. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + taskDefinitionSelector: + description: Selector for a TaskDefinition in ecs to populate + taskDefinition. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + triggers: + additionalProperties: + type: string + description: Map of arbitrary keys and values that, when changed, + will trigger an in-place update (redeployment). Useful with + plantimestamp(). See example above. + type: object + x-kubernetes-map-type: granular + waitForSteadyState: + description: Default false. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ServiceStatus defines the observed state of Service. + properties: + atProvider: + properties: + alarms: + description: Information about the CloudWatch alarms. See below. + properties: + alarmNames: + description: One or more CloudWatch alarm names. + items: + type: string + type: array + x-kubernetes-list-type: set + enable: + description: Determines whether to use the CloudWatch alarm + option in the service deployment process. + type: boolean + rollback: + description: Determines whether to configure Amazon ECS to + roll back the service if a service deployment fails. If + rollback is used, when a service deployment fails, the service + is rolled back to the last deployment that completed successfully. + type: boolean + type: object + capacityProviderStrategy: + description: Capacity provider strategies to use for the service. + Can be one or more. These can be updated without destroying + and recreating the service only if force_new_deployment = true + and not changing from 0 capacity_provider_strategy blocks to + greater than 0, or vice versa. See below. Conflicts with launch_type. + items: + properties: + base: + description: Number of tasks, at a minimum, to run on the + specified capacity provider. Only one capacity provider + in a capacity provider strategy can have a base defined. + type: number + capacityProvider: + description: Short name of the capacity provider. + type: string + weight: + description: Relative percentage of the total number of + launched tasks that should use the specified capacity + provider. + type: number + type: object + type: array + cluster: + description: Name of an ECS cluster. + type: string + deploymentCircuitBreaker: + description: Configuration block for deployment circuit breaker. + See below. + properties: + enable: + description: Whether to enable the deployment circuit breaker + logic for the service. + type: boolean + rollback: + description: Whether to enable Amazon ECS to roll back the + service if a service deployment fails. If rollback is enabled, + when a service deployment fails, the service is rolled back + to the last deployment that completed successfully. + type: boolean + type: object + deploymentController: + description: Configuration block for deployment controller configuration. + See below. + properties: + type: + description: 'Type of deployment controller. Valid values: + CODE_DEPLOY, ECS, EXTERNAL. Default: ECS.' + type: string + type: object + deploymentMaximumPercent: + description: Upper limit (as a percentage of the service's desiredCount) + of the number of running tasks that can be running in a service + during a deployment. Not valid when using the DAEMON scheduling + strategy. + type: number + deploymentMinimumHealthyPercent: + description: Lower limit (as a percentage of the service's desiredCount) + of the number of running tasks that must remain running and + healthy in a service during a deployment. + type: number + desiredCount: + description: Number of instances of the task definition to place + and keep running. Defaults to 0. Do not specify if using the + DAEMON scheduling strategy. + type: number + enableEcsManagedTags: + description: Specifies whether to enable Amazon ECS managed tags + for the tasks within the service. + type: boolean + enableExecuteCommand: + description: Specifies whether to enable Amazon ECS Exec for the + tasks within the service. + type: boolean + forceNewDeployment: + description: Enable to force a new task deployment of the service. + This can be used to update tasks to use a newer Docker image + with same image/tag combination (e.g., myimage:latest), roll + Fargate tasks onto a newer platform version, or immediately + deploy ordered_placement_strategy and placement_constraints + updates. + type: boolean + healthCheckGracePeriodSeconds: + description: Seconds to ignore failing load balancer health checks + on newly instantiated tasks to prevent premature shutdown, up + to 2147483647. Only valid for services configured to use load + balancers. + type: number + iamRole: + description: ARN of the IAM role that allows Amazon ECS to make + calls to your load balancer on your behalf. This parameter is + required if you are using a load balancer with your service, + but only if your task definition does not use the awsvpc network + mode. If using awsvpc network mode, do not specify this role. + If your account has already created the Amazon ECS service-linked + role, that role is used by default for your service unless you + specify a role here. + type: string + id: + description: ARN that identifies the service. + type: string + launchType: + description: Launch type on which to run your service. The valid + values are EC2, FARGATE, and EXTERNAL. Defaults to EC2. Conflicts + with capacity_provider_strategy. + type: string + loadBalancer: + description: Configuration block for load balancers. See below. + items: + properties: + containerName: + description: Name of the container to associate with the + load balancer (as it appears in a container definition). + type: string + containerPort: + description: Port on the container to associate with the + load balancer. + type: number + elbName: + description: Name of the ELB (Classic) to associate with + the service. + type: string + targetGroupArn: + description: ARN of the Load Balancer target group to associate + with the service. + type: string + type: object + type: array + networkConfiguration: + description: Network configuration for the service. This parameter + is required for task definitions that use the awsvpc network + mode to receive their own Elastic Network Interface, and it + is not supported for other network modes. See below. + properties: + assignPublicIp: + description: Assign a public IP address to the ENI (Fargate + launch type only). Valid values are true or false. Default + false. + type: boolean + securityGroups: + description: Security groups associated with the task or service. + If you do not specify a security group, the default security + group for the VPC is used. + items: + type: string + type: array + x-kubernetes-list-type: set + subnets: + description: Subnets associated with the task or service. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + orderedPlacementStrategy: + description: Service level strategy rules that are taken into + consideration during task placement. List from top to bottom + in order of precedence. Updates to this configuration will take + effect next task deployment unless force_new_deployment is enabled. + The maximum number of ordered_placement_strategy blocks is 5. + See below. + items: + properties: + field: + description: |- + For the spread placement strategy, valid values are instanceId (or host, + which has the same effect), or any platform or custom attribute that is applied to a container instance. + For the binpack type, valid values are memory and cpu. For the random type, this attribute is not + needed. For more information, see Placement Strategy. + type: string + type: + description: 'Type of placement strategy. Must be one of: + binpack, random, or spread' + type: string + type: object + type: array + placementConstraints: + description: Rules that are taken into consideration during task + placement. Updates to this configuration will take effect next + task deployment unless force_new_deployment is enabled. Maximum + number of placement_constraints is 10. See below. + items: + properties: + expression: + description: Cluster Query Language expression to apply + to the constraint. Does not need to be specified for the + distinctInstance type. For more information, see Cluster + Query Language in the Amazon EC2 Container Service Developer + Guide. + type: string + type: + description: Type of constraint. The only valid values at + this time are memberOf and distinctInstance. + type: string + type: object + type: array + platformVersion: + description: Platform version on which to run your service. Only + applicable for launch_type set to FARGATE. Defaults to LATEST. + More information about Fargate platform versions can be found + in the AWS ECS User Guide. + type: string + propagateTags: + description: Specifies whether to propagate the tags from the + task definition or the service to the tasks. The valid values + are SERVICE and TASK_DEFINITION. + type: string + schedulingStrategy: + description: Scheduling strategy to use for the service. The valid + values are REPLICA and DAEMON. Defaults to REPLICA. Note that + Tasks using the Fargate launch type or the . + type: string + serviceConnectConfiguration: + description: The ECS Service Connect configuration for this service + to discover and connect to services, and be discovered by, and + connected from, other services within a namespace. See below. + properties: + enabled: + description: Specifies whether to use Service Connect with + this service. + type: boolean + logConfiguration: + description: The log configuration for the container. See + below. + properties: + logDriver: + description: The log driver to use for the container. + type: string + options: + additionalProperties: + type: string + description: The configuration options to send to the + log driver. + type: object + x-kubernetes-map-type: granular + secretOption: + description: The secrets to pass to the log configuration. + See below. + items: + properties: + name: + description: The name of the secret. + type: string + valueFrom: + description: The secret to expose to the container. + The supported values are either the full ARN of + the AWS Secrets Manager secret or the full ARN + of the parameter in the SSM Parameter Store. + type: string + type: object + type: array + type: object + namespace: + description: The namespace name or ARN of the aws_service_discovery_http_namespace + for use with Service Connect. + type: string + service: + description: The list of Service Connect service objects. + See below. + items: + properties: + clientAlias: + description: The list of client aliases for this Service + Connect service. You use these to assign names that + can be used by client applications. The maximum number + of client aliases that you can have in this list is + 1. See below. + properties: + dnsName: + description: The name that you use in the applications + of client tasks to connect to this service. + type: string + port: + description: The listening port number for the Service + Connect proxy. This port is available inside of + all of the tasks within the same namespace. + type: number + type: object + discoveryName: + description: The name of the new AWS Cloud Map service + that Amazon ECS creates for this Amazon ECS service. + type: string + ingressPortOverride: + description: The port number for the Service Connect + proxy to listen on. + type: number + portName: + description: The name of one of the portMappings from + all the containers in the task definition of this + Amazon ECS service. + type: string + timeout: + description: Configuration timeouts for Service Connect + properties: + idleTimeoutSeconds: + description: The amount of time in seconds a connection + will stay active while idle. A value of 0 can + be set to disable idleTimeout. + type: number + perRequestTimeoutSeconds: + description: The amount of time in seconds for the + upstream to respond with a complete response per + request. A value of 0 can be set to disable perRequestTimeout. + Can only be set when appProtocol isn't TCP. + type: number + type: object + tls: + description: The configuration for enabling Transport + Layer Security (TLS) + properties: + issuerCertAuthority: + description: The details of the certificate authority + which will issue the certificate. + properties: + awsPcaAuthorityArn: + description: The ARN of the aws_acmpca_certificate_authority + used to create the TLS Certificates. + type: string + type: object + kmsKey: + description: The KMS key used to encrypt the private + key in Secrets Manager. + type: string + roleArn: + description: The ARN of the IAM Role that's associated + with the Service Connect TLS. + type: string + type: object + type: object + type: array + type: object + serviceRegistries: + description: Service discovery registries for the service. The + maximum number of service_registries blocks is 1. See below. + properties: + containerName: + description: Container name value, already specified in the + task definition, to be used for your service discovery service. + type: string + containerPort: + description: Port value, already specified in the task definition, + to be used for your service discovery service. + type: number + port: + description: Port value used if your Service Discovery service + specified an SRV record. + type: number + registryArn: + description: ARN of the Service Registry. The currently supported + service registry is Amazon Route 53 Auto Naming Service(aws_service_discovery_service). + For more information, see Service + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + taskDefinition: + description: Family and revision (family:revision) or full ARN + of the task definition that you want to run in your service. + Required unless using the EXTERNAL deployment controller. If + a revision is not specified, the latest ACTIVE revision is used. + type: string + triggers: + additionalProperties: + type: string + description: Map of arbitrary keys and values that, when changed, + will trigger an in-place update (redeployment). Useful with + plantimestamp(). See example above. + type: object + x-kubernetes-map-type: granular + waitForSteadyState: + description: Default false. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ecs.aws.upbound.io_taskdefinitions.yaml b/package/crds/ecs.aws.upbound.io_taskdefinitions.yaml index 260b47a049..54ef2c47ba 100644 --- a/package/crds/ecs.aws.upbound.io_taskdefinitions.yaml +++ b/package/crds/ecs.aws.upbound.io_taskdefinitions.yaml @@ -1402,3 +1402,1330 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: TaskDefinition is the Schema for the TaskDefinitions API. Manages + a revision of an ECS task definition. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TaskDefinitionSpec defines the desired state of TaskDefinition + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + containerDefinitions: + description: A list of valid container definitions provided as + a single valid JSON document. Please note that you should only + provide values that are part of the container definition document. + For a detailed description of what parameters are available, + see the Task Definition Parameters section from the official + Developer Guide. + type: string + cpu: + description: Number of cpu units used by the task. If the requires_compatibilities + is FARGATE this field is required. + type: string + ephemeralStorage: + description: The amount of ephemeral storage to allocate for the + task. This parameter is used to expand the total amount of ephemeral + storage available, beyond the default amount, for tasks hosted + on AWS Fargate. See Ephemeral Storage. + properties: + sizeInGib: + description: The total amount, in GiB, of ephemeral storage + to set for the task. The minimum supported value is 21 GiB + and the maximum supported value is 200 GiB. + type: number + type: object + executionRoleArn: + description: ARN of the task execution role that the Amazon ECS + container agent and the Docker daemon can assume. + type: string + executionRoleArnRef: + description: Reference to a Role in iam to populate executionRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + executionRoleArnSelector: + description: Selector for a Role in iam to populate executionRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + family: + description: A unique name for your task definition. + type: string + inferenceAccelerator: + description: Configuration block(s) with Inference Accelerators + settings. Detailed below. + items: + properties: + deviceName: + description: Elastic Inference accelerator device name. + The deviceName must also be referenced in a container + definition as a ResourceRequirement. + type: string + deviceType: + description: Elastic Inference accelerator type to use. + type: string + type: object + type: array + ipcMode: + description: IPC resource namespace to be used for the containers + in the task The valid values are host, task, and none. + type: string + memory: + description: Amount (in MiB) of memory used by the task. If the + requires_compatibilities is FARGATE this field is required. + type: string + networkMode: + description: Docker networking mode to use for the containers + in the task. Valid values are none, bridge, awsvpc, and host. + type: string + pidMode: + description: Process namespace to use for the containers in the + task. The valid values are host and task. + type: string + placementConstraints: + description: Configuration block for rules that are taken into + consideration during task placement. Maximum number of placement_constraints + is 10. Detailed below. + items: + properties: + expression: + description: Cluster Query Language expression to apply + to the constraint. For more information, see Cluster Query + Language in the Amazon EC2 Container Service Developer + Guide. + type: string + type: + description: Type of constraint. Use memberOf to restrict + selection to a group of valid candidates. Note that distinctInstance + is not supported in task definitions. + type: string + type: object + type: array + proxyConfiguration: + description: Configuration block for the App Mesh proxy. Detailed + below. + properties: + containerName: + description: Name of the container that will serve as the + App Mesh proxy. + type: string + properties: + additionalProperties: + type: string + description: Set of network configuration parameters to provide + the Container Network Interface (CNI) plugin, specified + a key-value mapping. + type: object + x-kubernetes-map-type: granular + type: + description: Proxy type. The default value is APPMESH. The + only supported value is APPMESH. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + requiresCompatibilities: + description: Set of launch types required by the task. The valid + values are EC2 and FARGATE. + items: + type: string + type: array + x-kubernetes-list-type: set + runtimePlatform: + description: Configuration block for runtime_platform that containers + in your task may use. + properties: + cpuArchitecture: + description: Must be set to either X86_64 or ARM64; see cpu + architecture + type: string + operatingSystemFamily: + description: If the requires_compatibilities is FARGATE this + field is required; must be set to a valid option from the + operating system family in the runtime platform setting + type: string + type: object + skipDestroy: + description: Whether to retain the old revision when the resource + is destroyed or replacement is necessary. Default is false. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + taskRoleArn: + description: ARN of IAM role that allows your Amazon ECS container + task to make calls to other AWS services. + type: string + trackLatest: + description: Whether should track latest task definition or the + one created with the resource. Default is false. + type: boolean + volume: + description: Configuration block for volumes that containers in + your task may use. Detailed below. + items: + properties: + dockerVolumeConfiguration: + description: Configuration block to configure a docker volume. + Detailed below. + properties: + autoprovision: + description: 'If this value is true, the Docker volume + is created if it does not already exist. Note: This + field is only used if the scope is shared.' + type: boolean + driver: + description: Docker volume driver to use. The driver + value must match the driver name provided by Docker + because it is used for task placement. + type: string + driverOpts: + additionalProperties: + type: string + description: Map of Docker driver specific options. + type: object + x-kubernetes-map-type: granular + labels: + additionalProperties: + type: string + description: Map of custom metadata to add to your Docker + volume. + type: object + x-kubernetes-map-type: granular + scope: + description: Scope for the Docker volume, which determines + its lifecycle, either task or shared. Docker volumes + that are scoped to a task are automatically provisioned + when the task starts and destroyed when the task stops. + Docker volumes that are scoped as shared persist after + the task stops. + type: string + type: object + efsVolumeConfiguration: + description: Configuration block for an EFS volume. Detailed + below. + properties: + authorizationConfig: + description: Configuration block for authorization for + the Amazon EFS file system. Detailed below. + properties: + accessPointId: + description: Access point ID to use. If an access + point is specified, the root directory value will + be relative to the directory set for the access + point. If specified, transit encryption must be + enabled in the EFSVolumeConfiguration. + type: string + iam: + description: 'Whether or not to use the Amazon ECS + task IAM role defined in a task definition when + mounting the Amazon EFS file system. If enabled, + transit encryption must be enabled in the EFSVolumeConfiguration. + Valid values: ENABLED, DISABLED. If this parameter + is omitted, the default value of DISABLED is used.' + type: string + type: object + fileSystemId: + description: ID of the EFS File System. + type: string + rootDirectory: + description: Directory within the Amazon EFS file system + to mount as the root directory inside the host. If + this parameter is omitted, the root of the Amazon + EFS volume will be used. Specifying / will have the + same effect as omitting this parameter. This argument + is ignored when using authorization_config. + type: string + transitEncryption: + description: 'Whether or not to enable encryption for + Amazon EFS data in transit between the Amazon ECS + host and the Amazon EFS server. Transit encryption + must be enabled if Amazon EFS IAM authorization is + used. Valid values: ENABLED, DISABLED. If this parameter + is omitted, the default value of DISABLED is used.' + type: string + transitEncryptionPort: + description: Port to use for transit encryption. If + you do not specify a transit encryption port, it will + use the port selection strategy that the Amazon EFS + mount helper uses. + type: number + type: object + fsxWindowsFileServerVolumeConfiguration: + description: Configuration block for an FSX Windows File + Server volume. Detailed below. + properties: + authorizationConfig: + description: Configuration block for authorization for + the Amazon FSx for Windows File Server file system + detailed below. + properties: + credentialsParameter: + description: The authorization credential option + to use. The authorization credential options can + be provided using either the Amazon Resource Name + (ARN) of an AWS Secrets Manager secret or AWS + Systems Manager Parameter Store parameter. The + ARNs refer to the stored credentials. + type: string + domain: + description: A fully qualified domain name hosted + by an AWS Directory Service Managed Microsoft + AD (Active Directory) or self-hosted AD on Amazon + EC2. + type: string + type: object + fileSystemId: + description: The Amazon FSx for Windows File Server + file system ID to use. + type: string + rootDirectory: + description: The directory within the Amazon FSx for + Windows File Server file system to mount as the root + directory inside the host. + type: string + type: object + hostPath: + description: Path on the host container instance that is + presented to the container. If not set, ECS will create + a nonpersistent data volume that starts empty and is deleted + after the task has finished. + type: string + name: + description: |- + Name of the volume. This name is referenced in the sourceVolume + parameter of container definition in the mountPoints section. + type: string + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + containerDefinitions: + description: A list of valid container definitions provided as + a single valid JSON document. Please note that you should only + provide values that are part of the container definition document. + For a detailed description of what parameters are available, + see the Task Definition Parameters section from the official + Developer Guide. + type: string + cpu: + description: Number of cpu units used by the task. If the requires_compatibilities + is FARGATE this field is required. + type: string + ephemeralStorage: + description: The amount of ephemeral storage to allocate for the + task. This parameter is used to expand the total amount of ephemeral + storage available, beyond the default amount, for tasks hosted + on AWS Fargate. See Ephemeral Storage. + properties: + sizeInGib: + description: The total amount, in GiB, of ephemeral storage + to set for the task. The minimum supported value is 21 GiB + and the maximum supported value is 200 GiB. + type: number + type: object + executionRoleArn: + description: ARN of the task execution role that the Amazon ECS + container agent and the Docker daemon can assume. + type: string + executionRoleArnRef: + description: Reference to a Role in iam to populate executionRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + executionRoleArnSelector: + description: Selector for a Role in iam to populate executionRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + family: + description: A unique name for your task definition. + type: string + inferenceAccelerator: + description: Configuration block(s) with Inference Accelerators + settings. Detailed below. + items: + properties: + deviceName: + description: Elastic Inference accelerator device name. + The deviceName must also be referenced in a container + definition as a ResourceRequirement. + type: string + deviceType: + description: Elastic Inference accelerator type to use. + type: string + type: object + type: array + ipcMode: + description: IPC resource namespace to be used for the containers + in the task The valid values are host, task, and none. + type: string + memory: + description: Amount (in MiB) of memory used by the task. If the + requires_compatibilities is FARGATE this field is required. + type: string + networkMode: + description: Docker networking mode to use for the containers + in the task. Valid values are none, bridge, awsvpc, and host. + type: string + pidMode: + description: Process namespace to use for the containers in the + task. The valid values are host and task. + type: string + placementConstraints: + description: Configuration block for rules that are taken into + consideration during task placement. Maximum number of placement_constraints + is 10. Detailed below. + items: + properties: + expression: + description: Cluster Query Language expression to apply + to the constraint. For more information, see Cluster Query + Language in the Amazon EC2 Container Service Developer + Guide. + type: string + type: + description: Type of constraint. Use memberOf to restrict + selection to a group of valid candidates. Note that distinctInstance + is not supported in task definitions. + type: string + type: object + type: array + proxyConfiguration: + description: Configuration block for the App Mesh proxy. Detailed + below. + properties: + containerName: + description: Name of the container that will serve as the + App Mesh proxy. + type: string + properties: + additionalProperties: + type: string + description: Set of network configuration parameters to provide + the Container Network Interface (CNI) plugin, specified + a key-value mapping. + type: object + x-kubernetes-map-type: granular + type: + description: Proxy type. The default value is APPMESH. The + only supported value is APPMESH. + type: string + type: object + requiresCompatibilities: + description: Set of launch types required by the task. The valid + values are EC2 and FARGATE. + items: + type: string + type: array + x-kubernetes-list-type: set + runtimePlatform: + description: Configuration block for runtime_platform that containers + in your task may use. + properties: + cpuArchitecture: + description: Must be set to either X86_64 or ARM64; see cpu + architecture + type: string + operatingSystemFamily: + description: If the requires_compatibilities is FARGATE this + field is required; must be set to a valid option from the + operating system family in the runtime platform setting + type: string + type: object + skipDestroy: + description: Whether to retain the old revision when the resource + is destroyed or replacement is necessary. Default is false. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + taskRoleArn: + description: ARN of IAM role that allows your Amazon ECS container + task to make calls to other AWS services. + type: string + trackLatest: + description: Whether should track latest task definition or the + one created with the resource. Default is false. + type: boolean + volume: + description: Configuration block for volumes that containers in + your task may use. Detailed below. + items: + properties: + dockerVolumeConfiguration: + description: Configuration block to configure a docker volume. + Detailed below. + properties: + autoprovision: + description: 'If this value is true, the Docker volume + is created if it does not already exist. Note: This + field is only used if the scope is shared.' + type: boolean + driver: + description: Docker volume driver to use. The driver + value must match the driver name provided by Docker + because it is used for task placement. + type: string + driverOpts: + additionalProperties: + type: string + description: Map of Docker driver specific options. + type: object + x-kubernetes-map-type: granular + labels: + additionalProperties: + type: string + description: Map of custom metadata to add to your Docker + volume. + type: object + x-kubernetes-map-type: granular + scope: + description: Scope for the Docker volume, which determines + its lifecycle, either task or shared. Docker volumes + that are scoped to a task are automatically provisioned + when the task starts and destroyed when the task stops. + Docker volumes that are scoped as shared persist after + the task stops. + type: string + type: object + efsVolumeConfiguration: + description: Configuration block for an EFS volume. Detailed + below. + properties: + authorizationConfig: + description: Configuration block for authorization for + the Amazon EFS file system. Detailed below. + properties: + accessPointId: + description: Access point ID to use. If an access + point is specified, the root directory value will + be relative to the directory set for the access + point. If specified, transit encryption must be + enabled in the EFSVolumeConfiguration. + type: string + iam: + description: 'Whether or not to use the Amazon ECS + task IAM role defined in a task definition when + mounting the Amazon EFS file system. If enabled, + transit encryption must be enabled in the EFSVolumeConfiguration. + Valid values: ENABLED, DISABLED. If this parameter + is omitted, the default value of DISABLED is used.' + type: string + type: object + fileSystemId: + description: ID of the EFS File System. + type: string + rootDirectory: + description: Directory within the Amazon EFS file system + to mount as the root directory inside the host. If + this parameter is omitted, the root of the Amazon + EFS volume will be used. Specifying / will have the + same effect as omitting this parameter. This argument + is ignored when using authorization_config. + type: string + transitEncryption: + description: 'Whether or not to enable encryption for + Amazon EFS data in transit between the Amazon ECS + host and the Amazon EFS server. Transit encryption + must be enabled if Amazon EFS IAM authorization is + used. Valid values: ENABLED, DISABLED. If this parameter + is omitted, the default value of DISABLED is used.' + type: string + transitEncryptionPort: + description: Port to use for transit encryption. If + you do not specify a transit encryption port, it will + use the port selection strategy that the Amazon EFS + mount helper uses. + type: number + type: object + fsxWindowsFileServerVolumeConfiguration: + description: Configuration block for an FSX Windows File + Server volume. Detailed below. + properties: + authorizationConfig: + description: Configuration block for authorization for + the Amazon FSx for Windows File Server file system + detailed below. + properties: + credentialsParameter: + description: The authorization credential option + to use. The authorization credential options can + be provided using either the Amazon Resource Name + (ARN) of an AWS Secrets Manager secret or AWS + Systems Manager Parameter Store parameter. The + ARNs refer to the stored credentials. + type: string + domain: + description: A fully qualified domain name hosted + by an AWS Directory Service Managed Microsoft + AD (Active Directory) or self-hosted AD on Amazon + EC2. + type: string + type: object + fileSystemId: + description: The Amazon FSx for Windows File Server + file system ID to use. + type: string + rootDirectory: + description: The directory within the Amazon FSx for + Windows File Server file system to mount as the root + directory inside the host. + type: string + type: object + hostPath: + description: Path on the host container instance that is + presented to the container. If not set, ECS will create + a nonpersistent data volume that starts empty and is deleted + after the task has finished. + type: string + name: + description: |- + Name of the volume. This name is referenced in the sourceVolume + parameter of container definition in the mountPoints section. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.containerDefinitions is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.containerDefinitions) + || (has(self.initProvider) && has(self.initProvider.containerDefinitions))' + - message: spec.forProvider.family is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.family) + || (has(self.initProvider) && has(self.initProvider.family))' + status: + description: TaskDefinitionStatus defines the observed state of TaskDefinition. + properties: + atProvider: + properties: + arn: + description: Full ARN of the Task Definition (including both family + and revision). + type: string + arnWithoutRevision: + description: ARN of the Task Definition with the trailing revision + removed. This may be useful for situations where the latest + task definition is always desired. If a revision isn't specified, + the latest ACTIVE revision is used. See the AWS documentation + for details. + type: string + containerDefinitions: + description: A list of valid container definitions provided as + a single valid JSON document. Please note that you should only + provide values that are part of the container definition document. + For a detailed description of what parameters are available, + see the Task Definition Parameters section from the official + Developer Guide. + type: string + cpu: + description: Number of cpu units used by the task. If the requires_compatibilities + is FARGATE this field is required. + type: string + ephemeralStorage: + description: The amount of ephemeral storage to allocate for the + task. This parameter is used to expand the total amount of ephemeral + storage available, beyond the default amount, for tasks hosted + on AWS Fargate. See Ephemeral Storage. + properties: + sizeInGib: + description: The total amount, in GiB, of ephemeral storage + to set for the task. The minimum supported value is 21 GiB + and the maximum supported value is 200 GiB. + type: number + type: object + executionRoleArn: + description: ARN of the task execution role that the Amazon ECS + container agent and the Docker daemon can assume. + type: string + family: + description: A unique name for your task definition. + type: string + id: + type: string + inferenceAccelerator: + description: Configuration block(s) with Inference Accelerators + settings. Detailed below. + items: + properties: + deviceName: + description: Elastic Inference accelerator device name. + The deviceName must also be referenced in a container + definition as a ResourceRequirement. + type: string + deviceType: + description: Elastic Inference accelerator type to use. + type: string + type: object + type: array + ipcMode: + description: IPC resource namespace to be used for the containers + in the task The valid values are host, task, and none. + type: string + memory: + description: Amount (in MiB) of memory used by the task. If the + requires_compatibilities is FARGATE this field is required. + type: string + networkMode: + description: Docker networking mode to use for the containers + in the task. Valid values are none, bridge, awsvpc, and host. + type: string + pidMode: + description: Process namespace to use for the containers in the + task. The valid values are host and task. + type: string + placementConstraints: + description: Configuration block for rules that are taken into + consideration during task placement. Maximum number of placement_constraints + is 10. Detailed below. + items: + properties: + expression: + description: Cluster Query Language expression to apply + to the constraint. For more information, see Cluster Query + Language in the Amazon EC2 Container Service Developer + Guide. + type: string + type: + description: Type of constraint. Use memberOf to restrict + selection to a group of valid candidates. Note that distinctInstance + is not supported in task definitions. + type: string + type: object + type: array + proxyConfiguration: + description: Configuration block for the App Mesh proxy. Detailed + below. + properties: + containerName: + description: Name of the container that will serve as the + App Mesh proxy. + type: string + properties: + additionalProperties: + type: string + description: Set of network configuration parameters to provide + the Container Network Interface (CNI) plugin, specified + a key-value mapping. + type: object + x-kubernetes-map-type: granular + type: + description: Proxy type. The default value is APPMESH. The + only supported value is APPMESH. + type: string + type: object + requiresCompatibilities: + description: Set of launch types required by the task. The valid + values are EC2 and FARGATE. + items: + type: string + type: array + x-kubernetes-list-type: set + revision: + description: Revision of the task in a particular family. + type: number + runtimePlatform: + description: Configuration block for runtime_platform that containers + in your task may use. + properties: + cpuArchitecture: + description: Must be set to either X86_64 or ARM64; see cpu + architecture + type: string + operatingSystemFamily: + description: If the requires_compatibilities is FARGATE this + field is required; must be set to a valid option from the + operating system family in the runtime platform setting + type: string + type: object + skipDestroy: + description: Whether to retain the old revision when the resource + is destroyed or replacement is necessary. Default is false. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + taskRoleArn: + description: ARN of IAM role that allows your Amazon ECS container + task to make calls to other AWS services. + type: string + trackLatest: + description: Whether should track latest task definition or the + one created with the resource. Default is false. + type: boolean + volume: + description: Configuration block for volumes that containers in + your task may use. Detailed below. + items: + properties: + dockerVolumeConfiguration: + description: Configuration block to configure a docker volume. + Detailed below. + properties: + autoprovision: + description: 'If this value is true, the Docker volume + is created if it does not already exist. Note: This + field is only used if the scope is shared.' + type: boolean + driver: + description: Docker volume driver to use. The driver + value must match the driver name provided by Docker + because it is used for task placement. + type: string + driverOpts: + additionalProperties: + type: string + description: Map of Docker driver specific options. + type: object + x-kubernetes-map-type: granular + labels: + additionalProperties: + type: string + description: Map of custom metadata to add to your Docker + volume. + type: object + x-kubernetes-map-type: granular + scope: + description: Scope for the Docker volume, which determines + its lifecycle, either task or shared. Docker volumes + that are scoped to a task are automatically provisioned + when the task starts and destroyed when the task stops. + Docker volumes that are scoped as shared persist after + the task stops. + type: string + type: object + efsVolumeConfiguration: + description: Configuration block for an EFS volume. Detailed + below. + properties: + authorizationConfig: + description: Configuration block for authorization for + the Amazon EFS file system. Detailed below. + properties: + accessPointId: + description: Access point ID to use. If an access + point is specified, the root directory value will + be relative to the directory set for the access + point. If specified, transit encryption must be + enabled in the EFSVolumeConfiguration. + type: string + iam: + description: 'Whether or not to use the Amazon ECS + task IAM role defined in a task definition when + mounting the Amazon EFS file system. If enabled, + transit encryption must be enabled in the EFSVolumeConfiguration. + Valid values: ENABLED, DISABLED. If this parameter + is omitted, the default value of DISABLED is used.' + type: string + type: object + fileSystemId: + description: ID of the EFS File System. + type: string + rootDirectory: + description: Directory within the Amazon EFS file system + to mount as the root directory inside the host. If + this parameter is omitted, the root of the Amazon + EFS volume will be used. Specifying / will have the + same effect as omitting this parameter. This argument + is ignored when using authorization_config. + type: string + transitEncryption: + description: 'Whether or not to enable encryption for + Amazon EFS data in transit between the Amazon ECS + host and the Amazon EFS server. Transit encryption + must be enabled if Amazon EFS IAM authorization is + used. Valid values: ENABLED, DISABLED. If this parameter + is omitted, the default value of DISABLED is used.' + type: string + transitEncryptionPort: + description: Port to use for transit encryption. If + you do not specify a transit encryption port, it will + use the port selection strategy that the Amazon EFS + mount helper uses. + type: number + type: object + fsxWindowsFileServerVolumeConfiguration: + description: Configuration block for an FSX Windows File + Server volume. Detailed below. + properties: + authorizationConfig: + description: Configuration block for authorization for + the Amazon FSx for Windows File Server file system + detailed below. + properties: + credentialsParameter: + description: The authorization credential option + to use. The authorization credential options can + be provided using either the Amazon Resource Name + (ARN) of an AWS Secrets Manager secret or AWS + Systems Manager Parameter Store parameter. The + ARNs refer to the stored credentials. + type: string + domain: + description: A fully qualified domain name hosted + by an AWS Directory Service Managed Microsoft + AD (Active Directory) or self-hosted AD on Amazon + EC2. + type: string + type: object + fileSystemId: + description: The Amazon FSx for Windows File Server + file system ID to use. + type: string + rootDirectory: + description: The directory within the Amazon FSx for + Windows File Server file system to mount as the root + directory inside the host. + type: string + type: object + hostPath: + description: Path on the host container instance that is + presented to the container. If not set, ECS will create + a nonpersistent data volume that starts empty and is deleted + after the task has finished. + type: string + name: + description: |- + Name of the volume. This name is referenced in the sourceVolume + parameter of container definition in the mountPoints section. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/efs.aws.upbound.io_accesspoints.yaml b/package/crds/efs.aws.upbound.io_accesspoints.yaml index 867a318867..97f3b6a06c 100644 --- a/package/crds/efs.aws.upbound.io_accesspoints.yaml +++ b/package/crds/efs.aws.upbound.io_accesspoints.yaml @@ -687,3 +687,654 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: AccessPoint is the Schema for the AccessPoints API. Provides + an Elastic File System (EFS) access point. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AccessPointSpec defines the desired state of AccessPoint + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + fileSystemId: + description: ID of the file system for which the access point + is intended. + type: string + fileSystemIdRef: + description: Reference to a FileSystem in efs to populate fileSystemId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + fileSystemIdSelector: + description: Selector for a FileSystem in efs to populate fileSystemId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + posixUser: + description: Operating system user and group applied to all file + system requests made using the access point. Detailed below. + properties: + gid: + description: POSIX group ID used for all file system operations + using this access point. + type: number + secondaryGids: + description: Secondary POSIX group IDs used for all file system + operations using this access point. + items: + type: number + type: array + x-kubernetes-list-type: set + uid: + description: POSIX user ID used for all file system operations + using this access point. + type: number + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + rootDirectory: + description: Directory on the Amazon EFS file system that the + access point provides access to. Detailed below. + properties: + creationInfo: + description: POSIX IDs and permissions to apply to the access + point's Root Directory. See Creation Info below. + properties: + ownerGid: + description: POSIX group ID to apply to the root_directory. + type: number + ownerUid: + description: POSIX user ID to apply to the root_directory. + type: number + permissions: + description: POSIX permissions to apply to the RootDirectory, + in the format of an octal number representing the file's + mode bits. + type: string + type: object + path: + description: Path on the EFS file system to expose as the + root directory to NFS clients using the access point to + access the EFS file system. A path can have up to four subdirectories. + If the specified path does not exist, you are required to + provide creation_info. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + fileSystemId: + description: ID of the file system for which the access point + is intended. + type: string + fileSystemIdRef: + description: Reference to a FileSystem in efs to populate fileSystemId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + fileSystemIdSelector: + description: Selector for a FileSystem in efs to populate fileSystemId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + posixUser: + description: Operating system user and group applied to all file + system requests made using the access point. Detailed below. + properties: + gid: + description: POSIX group ID used for all file system operations + using this access point. + type: number + secondaryGids: + description: Secondary POSIX group IDs used for all file system + operations using this access point. + items: + type: number + type: array + x-kubernetes-list-type: set + uid: + description: POSIX user ID used for all file system operations + using this access point. + type: number + type: object + rootDirectory: + description: Directory on the Amazon EFS file system that the + access point provides access to. Detailed below. + properties: + creationInfo: + description: POSIX IDs and permissions to apply to the access + point's Root Directory. See Creation Info below. + properties: + ownerGid: + description: POSIX group ID to apply to the root_directory. + type: number + ownerUid: + description: POSIX user ID to apply to the root_directory. + type: number + permissions: + description: POSIX permissions to apply to the RootDirectory, + in the format of an octal number representing the file's + mode bits. + type: string + type: object + path: + description: Path on the EFS file system to expose as the + root directory to NFS clients using the access point to + access the EFS file system. A path can have up to four subdirectories. + If the specified path does not exist, you are required to + provide creation_info. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: AccessPointStatus defines the observed state of AccessPoint. + properties: + atProvider: + properties: + arn: + description: ARN of the access point. + type: string + fileSystemArn: + description: ARN of the file system. + type: string + fileSystemId: + description: ID of the file system for which the access point + is intended. + type: string + id: + description: ID of the access point. + type: string + ownerId: + description: ID of the access point. + type: string + posixUser: + description: Operating system user and group applied to all file + system requests made using the access point. Detailed below. + properties: + gid: + description: POSIX group ID used for all file system operations + using this access point. + type: number + secondaryGids: + description: Secondary POSIX group IDs used for all file system + operations using this access point. + items: + type: number + type: array + x-kubernetes-list-type: set + uid: + description: POSIX user ID used for all file system operations + using this access point. + type: number + type: object + rootDirectory: + description: Directory on the Amazon EFS file system that the + access point provides access to. Detailed below. + properties: + creationInfo: + description: POSIX IDs and permissions to apply to the access + point's Root Directory. See Creation Info below. + properties: + ownerGid: + description: POSIX group ID to apply to the root_directory. + type: number + ownerUid: + description: POSIX user ID to apply to the root_directory. + type: number + permissions: + description: POSIX permissions to apply to the RootDirectory, + in the format of an octal number representing the file's + mode bits. + type: string + type: object + path: + description: Path on the EFS file system to expose as the + root directory to NFS clients using the access point to + access the EFS file system. A path can have up to four subdirectories. + If the specified path does not exist, you are required to + provide creation_info. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/efs.aws.upbound.io_backuppolicies.yaml b/package/crds/efs.aws.upbound.io_backuppolicies.yaml index dafaa5db37..7b75ab0779 100644 --- a/package/crds/efs.aws.upbound.io_backuppolicies.yaml +++ b/package/crds/efs.aws.upbound.io_backuppolicies.yaml @@ -523,3 +523,502 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BackupPolicy is the Schema for the BackupPolicys API. Provides + an Elastic File System (EFS) Backup Policy resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BackupPolicySpec defines the desired state of BackupPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + backupPolicy: + description: A backup_policy object (documented below). + properties: + status: + description: 'A status of the backup policy. Valid values: + ENABLED, DISABLED.' + type: string + type: object + fileSystemId: + description: The ID of the EFS file system. + type: string + fileSystemIdRef: + description: Reference to a FileSystem in efs to populate fileSystemId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + fileSystemIdSelector: + description: Selector for a FileSystem in efs to populate fileSystemId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + backupPolicy: + description: A backup_policy object (documented below). + properties: + status: + description: 'A status of the backup policy. Valid values: + ENABLED, DISABLED.' + type: string + type: object + fileSystemId: + description: The ID of the EFS file system. + type: string + fileSystemIdRef: + description: Reference to a FileSystem in efs to populate fileSystemId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + fileSystemIdSelector: + description: Selector for a FileSystem in efs to populate fileSystemId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.backupPolicy is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.backupPolicy) + || (has(self.initProvider) && has(self.initProvider.backupPolicy))' + status: + description: BackupPolicyStatus defines the observed state of BackupPolicy. + properties: + atProvider: + properties: + backupPolicy: + description: A backup_policy object (documented below). + properties: + status: + description: 'A status of the backup policy. Valid values: + ENABLED, DISABLED.' + type: string + type: object + fileSystemId: + description: The ID of the EFS file system. + type: string + id: + description: The ID that identifies the file system (e.g., fs-ccfc0d65). + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/efs.aws.upbound.io_filesystems.yaml b/package/crds/efs.aws.upbound.io_filesystems.yaml index b2d15bf1dc..377a82eaaf 100644 --- a/package/crds/efs.aws.upbound.io_filesystems.yaml +++ b/package/crds/efs.aws.upbound.io_filesystems.yaml @@ -757,3 +757,736 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FileSystem is the Schema for the FileSystems API. Provides an + Elastic File System (EFS) File System resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FileSystemSpec defines the desired state of FileSystem + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + availabilityZoneName: + description: the AWS Availability Zone in which to create the + file system. Used to create a file system that uses One Zone + storage classes. See user guide for more information. + type: string + creationToken: + description: |- + A unique name (a maximum of 64 characters are allowed) + used as reference when creating the Elastic File System to ensure idempotent file + system creation. See Elastic File System + user guide for more information. + type: string + encrypted: + description: If true, the disk will be encrypted. + type: boolean + kmsKeyId: + description: The ARN for the KMS encryption key. When specifying + kms_key_id, encrypted needs to be set to true. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + lifecyclePolicy: + description: A file system lifecycle policy object. See lifecycle_policy + block below for details. + items: + properties: + transitionToArchive: + description: 'Indicates how long it takes to transition + files to the archive storage class. Requires transition_to_ia, + Elastic Throughput and General Purpose performance mode. + Valid values: AFTER_1_DAY, AFTER_7_DAYS, AFTER_14_DAYS, + AFTER_30_DAYS, AFTER_60_DAYS, AFTER_90_DAYS, AFTER_180_DAYS, + AFTER_270_DAYS, or AFTER_365_DAYS.' + type: string + transitionToIa: + description: 'Indicates how long it takes to transition + files to the IA storage class. Valid values: AFTER_1_DAY, + AFTER_7_DAYS, AFTER_14_DAYS, AFTER_30_DAYS, AFTER_60_DAYS, + AFTER_90_DAYS, AFTER_180_DAYS, AFTER_270_DAYS, or AFTER_365_DAYS.' + type: string + transitionToPrimaryStorageClass: + description: 'Describes the policy used to transition a + file from infequent access storage to primary storage. + Valid values: AFTER_1_ACCESS.' + type: string + type: object + type: array + performanceMode: + description: 'The file system performance mode. Can be either + "generalPurpose" or "maxIO" (Default: "generalPurpose").' + type: string + protection: + description: A file system protection object. See protection block + below for details. + properties: + replicationOverwrite: + description: 'Indicates whether replication overwrite protection + is enabled. Valid values: ENABLED or DISABLED.' + type: string + type: object + provisionedThroughputInMibps: + description: The throughput, measured in MiB/s, that you want + to provision for the file system. Only applicable with throughput_mode + set to provisioned. + type: number + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + throughputMode: + description: 'Throughput mode for the file system. Defaults to + bursting. Valid values: bursting, provisioned, or elastic. When + using provisioned, also set provisioned_throughput_in_mibps.' + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + availabilityZoneName: + description: the AWS Availability Zone in which to create the + file system. Used to create a file system that uses One Zone + storage classes. See user guide for more information. + type: string + creationToken: + description: |- + A unique name (a maximum of 64 characters are allowed) + used as reference when creating the Elastic File System to ensure idempotent file + system creation. See Elastic File System + user guide for more information. + type: string + encrypted: + description: If true, the disk will be encrypted. + type: boolean + kmsKeyId: + description: The ARN for the KMS encryption key. When specifying + kms_key_id, encrypted needs to be set to true. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + lifecyclePolicy: + description: A file system lifecycle policy object. See lifecycle_policy + block below for details. + items: + properties: + transitionToArchive: + description: 'Indicates how long it takes to transition + files to the archive storage class. Requires transition_to_ia, + Elastic Throughput and General Purpose performance mode. + Valid values: AFTER_1_DAY, AFTER_7_DAYS, AFTER_14_DAYS, + AFTER_30_DAYS, AFTER_60_DAYS, AFTER_90_DAYS, AFTER_180_DAYS, + AFTER_270_DAYS, or AFTER_365_DAYS.' + type: string + transitionToIa: + description: 'Indicates how long it takes to transition + files to the IA storage class. Valid values: AFTER_1_DAY, + AFTER_7_DAYS, AFTER_14_DAYS, AFTER_30_DAYS, AFTER_60_DAYS, + AFTER_90_DAYS, AFTER_180_DAYS, AFTER_270_DAYS, or AFTER_365_DAYS.' + type: string + transitionToPrimaryStorageClass: + description: 'Describes the policy used to transition a + file from infequent access storage to primary storage. + Valid values: AFTER_1_ACCESS.' + type: string + type: object + type: array + performanceMode: + description: 'The file system performance mode. Can be either + "generalPurpose" or "maxIO" (Default: "generalPurpose").' + type: string + protection: + description: A file system protection object. See protection block + below for details. + properties: + replicationOverwrite: + description: 'Indicates whether replication overwrite protection + is enabled. Valid values: ENABLED or DISABLED.' + type: string + type: object + provisionedThroughputInMibps: + description: The throughput, measured in MiB/s, that you want + to provision for the file system. Only applicable with throughput_mode + set to provisioned. + type: number + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + throughputMode: + description: 'Throughput mode for the file system. Defaults to + bursting. Valid values: bursting, provisioned, or elastic. When + using provisioned, also set provisioned_throughput_in_mibps.' + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: FileSystemStatus defines the observed state of FileSystem. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name of the file system. + type: string + availabilityZoneId: + description: The identifier of the Availability Zone in which + the file system's One Zone storage classes exist. + type: string + availabilityZoneName: + description: the AWS Availability Zone in which to create the + file system. Used to create a file system that uses One Zone + storage classes. See user guide for more information. + type: string + creationToken: + description: |- + A unique name (a maximum of 64 characters are allowed) + used as reference when creating the Elastic File System to ensure idempotent file + system creation. See Elastic File System + user guide for more information. + type: string + dnsName: + description: The DNS name for the filesystem per documented convention. + type: string + encrypted: + description: If true, the disk will be encrypted. + type: boolean + id: + description: The ID that identifies the file system (e.g., fs-ccfc0d65). + type: string + kmsKeyId: + description: The ARN for the KMS encryption key. When specifying + kms_key_id, encrypted needs to be set to true. + type: string + lifecyclePolicy: + description: A file system lifecycle policy object. See lifecycle_policy + block below for details. + items: + properties: + transitionToArchive: + description: 'Indicates how long it takes to transition + files to the archive storage class. Requires transition_to_ia, + Elastic Throughput and General Purpose performance mode. + Valid values: AFTER_1_DAY, AFTER_7_DAYS, AFTER_14_DAYS, + AFTER_30_DAYS, AFTER_60_DAYS, AFTER_90_DAYS, AFTER_180_DAYS, + AFTER_270_DAYS, or AFTER_365_DAYS.' + type: string + transitionToIa: + description: 'Indicates how long it takes to transition + files to the IA storage class. Valid values: AFTER_1_DAY, + AFTER_7_DAYS, AFTER_14_DAYS, AFTER_30_DAYS, AFTER_60_DAYS, + AFTER_90_DAYS, AFTER_180_DAYS, AFTER_270_DAYS, or AFTER_365_DAYS.' + type: string + transitionToPrimaryStorageClass: + description: 'Describes the policy used to transition a + file from infequent access storage to primary storage. + Valid values: AFTER_1_ACCESS.' + type: string + type: object + type: array + name: + description: The value of the file system's Name tag. + type: string + numberOfMountTargets: + description: The current number of mount targets that the file + system has. + type: number + ownerId: + description: The AWS account that created the file system. If + the file system was createdby an IAM user, the parent account + to which the user belongs is the owner. + type: string + performanceMode: + description: 'The file system performance mode. Can be either + "generalPurpose" or "maxIO" (Default: "generalPurpose").' + type: string + protection: + description: A file system protection object. See protection block + below for details. + properties: + replicationOverwrite: + description: 'Indicates whether replication overwrite protection + is enabled. Valid values: ENABLED or DISABLED.' + type: string + type: object + provisionedThroughputInMibps: + description: The throughput, measured in MiB/s, that you want + to provision for the file system. Only applicable with throughput_mode + set to provisioned. + type: number + sizeInBytes: + description: The latest known metered size (in bytes) of data + stored in the file system, the value is not the exact size that + the file system was at any point in time. See Size In Bytes. + items: + properties: + value: + description: The latest known metered size (in bytes) of + data stored in the file system. + type: number + valueInIa: + description: The latest known metered size (in bytes) of + data stored in the Infrequent Access storage class. + type: number + valueInStandard: + description: The latest known metered size (in bytes) of + data stored in the Standard storage class. + type: number + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + throughputMode: + description: 'Throughput mode for the file system. Defaults to + bursting. Valid values: bursting, provisioned, or elastic. When + using provisioned, also set provisioned_throughput_in_mibps.' + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/efs.aws.upbound.io_replicationconfigurations.yaml b/package/crds/efs.aws.upbound.io_replicationconfigurations.yaml index 1b19e1b2e1..4a5575a893 100644 --- a/package/crds/efs.aws.upbound.io_replicationconfigurations.yaml +++ b/package/crds/efs.aws.upbound.io_replicationconfigurations.yaml @@ -588,3 +588,564 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ReplicationConfiguration is the Schema for the ReplicationConfigurations + API. Provides an Elastic File System (EFS) Replication Configuration. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ReplicationConfigurationSpec defines the desired state of + ReplicationConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + destination: + description: A destination configuration block (documented below). + properties: + availabilityZoneName: + description: The availability zone in which the replica should + be created. If specified, the replica will be created with + One Zone storage. If omitted, regional storage will be used. + type: string + fileSystemId: + description: The ID of the destination file system for the + replication. If no ID is provided, then EFS creates a new + file system with the default settings. + type: string + kmsKeyId: + description: The Key ID, ARN, alias, or alias ARN of the KMS + key that should be used to encrypt the replica file system. + If omitted, the default KMS key for EFS /aws/elasticfilesystem + will be used. + type: string + region: + description: The region in which the replica should be created. + type: string + type: object + region: + description: |- + The region in which the replica should be created. + Region is the region you'd like your resource to be created in. + type: string + sourceFileSystemId: + description: The ID of the file system that is to be replicated. + type: string + sourceFileSystemIdRef: + description: Reference to a FileSystem in efs to populate sourceFileSystemId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceFileSystemIdSelector: + description: Selector for a FileSystem in efs to populate sourceFileSystemId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + destination: + description: A destination configuration block (documented below). + properties: + availabilityZoneName: + description: The availability zone in which the replica should + be created. If specified, the replica will be created with + One Zone storage. If omitted, regional storage will be used. + type: string + fileSystemId: + description: The ID of the destination file system for the + replication. If no ID is provided, then EFS creates a new + file system with the default settings. + type: string + kmsKeyId: + description: The Key ID, ARN, alias, or alias ARN of the KMS + key that should be used to encrypt the replica file system. + If omitted, the default KMS key for EFS /aws/elasticfilesystem + will be used. + type: string + type: object + sourceFileSystemId: + description: The ID of the file system that is to be replicated. + type: string + sourceFileSystemIdRef: + description: Reference to a FileSystem in efs to populate sourceFileSystemId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceFileSystemIdSelector: + description: Selector for a FileSystem in efs to populate sourceFileSystemId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.destination is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.destination) + || (has(self.initProvider) && has(self.initProvider.destination))' + status: + description: ReplicationConfigurationStatus defines the observed state + of ReplicationConfiguration. + properties: + atProvider: + properties: + creationTime: + description: When the replication configuration was created. + type: string + destination: + description: A destination configuration block (documented below). + properties: + availabilityZoneName: + description: The availability zone in which the replica should + be created. If specified, the replica will be created with + One Zone storage. If omitted, regional storage will be used. + type: string + fileSystemId: + description: The ID of the destination file system for the + replication. If no ID is provided, then EFS creates a new + file system with the default settings. + type: string + kmsKeyId: + description: The Key ID, ARN, alias, or alias ARN of the KMS + key that should be used to encrypt the replica file system. + If omitted, the default KMS key for EFS /aws/elasticfilesystem + will be used. + type: string + region: + description: The region in which the replica should be created. + type: string + status: + description: The status of the replication. + type: string + type: object + id: + type: string + originalSourceFileSystemArn: + description: The Amazon Resource Name (ARN) of the original source + Amazon EFS file system in the replication configuration. + type: string + sourceFileSystemArn: + description: The Amazon Resource Name (ARN) of the current source + file system in the replication configuration. + type: string + sourceFileSystemId: + description: The ID of the file system that is to be replicated. + type: string + sourceFileSystemRegion: + description: The AWS Region in which the source Amazon EFS file + system is located. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/eks.aws.upbound.io_clusters.yaml b/package/crds/eks.aws.upbound.io_clusters.yaml index 714ac3c670..26536aab38 100644 --- a/package/crds/eks.aws.upbound.io_clusters.yaml +++ b/package/crds/eks.aws.upbound.io_clusters.yaml @@ -1450,3 +1450,1366 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the Clusters API. Manages an EKS Cluster + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterSpec defines the desired state of Cluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessConfig: + description: Configuration block for the access config associated + with your cluster, see Amazon EKS Access Entries. + properties: + authenticationMode: + description: The authentication mode for the cluster. Valid + values are CONFIG_MAP, API or API_AND_CONFIG_MAP + type: string + bootstrapClusterCreatorAdminPermissions: + description: Whether or not to bootstrap the access config + values to the cluster. Default is true. + type: boolean + type: object + enabledClusterLogTypes: + description: List of the desired control plane logging to enable. + For more information, see Amazon EKS Control Plane Logging. + items: + type: string + type: array + x-kubernetes-list-type: set + encryptionConfig: + description: Configuration block with encryption configuration + for the cluster. Only available on Kubernetes 1.13 and above + clusters created after March 6, 2020. Detailed below. + properties: + provider: + description: Configuration block with provider for encryption. + Detailed below. + properties: + keyArn: + description: ARN of the Key Management Service (KMS) customer + master key (CMK). The CMK must be symmetric, created + in the same region as the cluster, and if the CMK was + created in a different account, the user must have access + to the CMK. For more information, see Allowing Users + in Other Accounts to Use a CMK in the AWS Key Management + Service Developer Guide. + type: string + type: object + resources: + description: 'List of strings with resources to be encrypted. + Valid values: secrets.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + kubernetesNetworkConfig: + description: Configuration block with kubernetes network configuration + for the cluster. Detailed below. + properties: + ipFamily: + description: The IP family used to assign Kubernetes pod and + service addresses. Valid values are ipv4 (default) and ipv6. + You can only specify an IP family when you create a cluster, + changing this value will force a new cluster to be created. + type: string + serviceIpv4Cidr: + description: 'The CIDR block to assign Kubernetes pod and + service IP addresses from. If you don''t specify a block, + Kubernetes assigns addresses from either the 10.100.0.0/16 + or 172.20.0.0/16 CIDR blocks. We recommend that you specify + a block that does not overlap with resources in other networks + that are peered or connected to your VPC. You can only specify + a custom CIDR block when you create a cluster, changing + this value will force a new cluster to be created. The block + must meet the following requirements:' + type: string + type: object + outpostConfig: + description: Configuration block representing the configuration + of your local Amazon EKS cluster on an AWS Outpost. This block + isn't available for creating Amazon EKS clusters on the AWS + cloud. + properties: + controlPlaneInstanceType: + description: 'The Amazon EC2 instance type that you want to + use for your local Amazon EKS cluster on Outposts. The instance + type that you specify is used for all Kubernetes control + plane instances. The instance type can''t be changed after + cluster creation. Choose an instance type based on the number + of nodes that your cluster will have. If your cluster will + have:' + type: string + controlPlanePlacement: + description: |- + An object representing the placement configuration for all the control plane instances of your local Amazon EKS cluster on AWS Outpost. + The control_plane_placement configuration block supports the following arguments: + properties: + groupName: + description: The name of the placement group for the Kubernetes + control plane instances. This setting can't be changed + after cluster creation. + type: string + type: object + outpostArns: + description: The ARN of the Outpost that you want to use for + your local Amazon EKS cluster on Outposts. This argument + is a list of arns, but only a single Outpost ARN is supported + currently. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleArn: + description: ARN of the IAM role that provides permissions for + the Kubernetes control plane to make calls to AWS API operations + on your behalf. Ensure the resource configuration includes explicit + dependencies on the IAM Role permissions by adding depends_on + if using the aws_iam_role_policy resource or aws_iam_role_policy_attachment + resource, otherwise EKS cannot delete EKS managed EC2 infrastructure + such as Security Groups on EKS Cluster deletion. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + version: + description: – Desired Kubernetes master version. If you do not + specify a value, the latest available version at resource creation + is used and no upgrades will occur except those automatically + triggered by EKS. The value must be configured and increased + to upgrade the version when desired. Downgrades are not supported + by EKS. + type: string + vpcConfig: + description: Configuration block for the VPC associated with your + cluster. Amazon EKS VPC resources have specific requirements + to work properly with Kubernetes. For more information, see + Cluster VPC Considerations and Cluster Security Group Considerations + in the Amazon EKS User Guide. Detailed below. Also contains + attributes detailed in the Attributes section. + properties: + endpointPrivateAccess: + description: Whether the Amazon EKS private API server endpoint + is enabled. Default is false. + type: boolean + endpointPublicAccess: + description: Whether the Amazon EKS public API server endpoint + is enabled. Default is true. + type: boolean + publicAccessCidrs: + description: List of CIDR blocks. Indicates which CIDR blocks + can access the Amazon EKS public API server endpoint when + enabled. EKS defaults this to a list with 0.0.0.0/0. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate + securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to + populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: account elastic network interfaces that Amazon + EKS creates to use to allow communication between your worker + nodes and the Kubernetes control plane. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: account elastic network interfaces in these subnets + to allow communication between your worker nodes and the + Kubernetes control plane. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessConfig: + description: Configuration block for the access config associated + with your cluster, see Amazon EKS Access Entries. + properties: + authenticationMode: + description: The authentication mode for the cluster. Valid + values are CONFIG_MAP, API or API_AND_CONFIG_MAP + type: string + bootstrapClusterCreatorAdminPermissions: + description: Whether or not to bootstrap the access config + values to the cluster. Default is true. + type: boolean + type: object + enabledClusterLogTypes: + description: List of the desired control plane logging to enable. + For more information, see Amazon EKS Control Plane Logging. + items: + type: string + type: array + x-kubernetes-list-type: set + encryptionConfig: + description: Configuration block with encryption configuration + for the cluster. Only available on Kubernetes 1.13 and above + clusters created after March 6, 2020. Detailed below. + properties: + provider: + description: Configuration block with provider for encryption. + Detailed below. + properties: + keyArn: + description: ARN of the Key Management Service (KMS) customer + master key (CMK). The CMK must be symmetric, created + in the same region as the cluster, and if the CMK was + created in a different account, the user must have access + to the CMK. For more information, see Allowing Users + in Other Accounts to Use a CMK in the AWS Key Management + Service Developer Guide. + type: string + type: object + resources: + description: 'List of strings with resources to be encrypted. + Valid values: secrets.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + kubernetesNetworkConfig: + description: Configuration block with kubernetes network configuration + for the cluster. Detailed below. + properties: + ipFamily: + description: The IP family used to assign Kubernetes pod and + service addresses. Valid values are ipv4 (default) and ipv6. + You can only specify an IP family when you create a cluster, + changing this value will force a new cluster to be created. + type: string + serviceIpv4Cidr: + description: 'The CIDR block to assign Kubernetes pod and + service IP addresses from. If you don''t specify a block, + Kubernetes assigns addresses from either the 10.100.0.0/16 + or 172.20.0.0/16 CIDR blocks. We recommend that you specify + a block that does not overlap with resources in other networks + that are peered or connected to your VPC. You can only specify + a custom CIDR block when you create a cluster, changing + this value will force a new cluster to be created. The block + must meet the following requirements:' + type: string + type: object + outpostConfig: + description: Configuration block representing the configuration + of your local Amazon EKS cluster on an AWS Outpost. This block + isn't available for creating Amazon EKS clusters on the AWS + cloud. + properties: + controlPlaneInstanceType: + description: 'The Amazon EC2 instance type that you want to + use for your local Amazon EKS cluster on Outposts. The instance + type that you specify is used for all Kubernetes control + plane instances. The instance type can''t be changed after + cluster creation. Choose an instance type based on the number + of nodes that your cluster will have. If your cluster will + have:' + type: string + controlPlanePlacement: + description: |- + An object representing the placement configuration for all the control plane instances of your local Amazon EKS cluster on AWS Outpost. + The control_plane_placement configuration block supports the following arguments: + properties: + groupName: + description: The name of the placement group for the Kubernetes + control plane instances. This setting can't be changed + after cluster creation. + type: string + type: object + outpostArns: + description: The ARN of the Outpost that you want to use for + your local Amazon EKS cluster on Outposts. This argument + is a list of arns, but only a single Outpost ARN is supported + currently. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + roleArn: + description: ARN of the IAM role that provides permissions for + the Kubernetes control plane to make calls to AWS API operations + on your behalf. Ensure the resource configuration includes explicit + dependencies on the IAM Role permissions by adding depends_on + if using the aws_iam_role_policy resource or aws_iam_role_policy_attachment + resource, otherwise EKS cannot delete EKS managed EC2 infrastructure + such as Security Groups on EKS Cluster deletion. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + version: + description: – Desired Kubernetes master version. If you do not + specify a value, the latest available version at resource creation + is used and no upgrades will occur except those automatically + triggered by EKS. The value must be configured and increased + to upgrade the version when desired. Downgrades are not supported + by EKS. + type: string + vpcConfig: + description: Configuration block for the VPC associated with your + cluster. Amazon EKS VPC resources have specific requirements + to work properly with Kubernetes. For more information, see + Cluster VPC Considerations and Cluster Security Group Considerations + in the Amazon EKS User Guide. Detailed below. Also contains + attributes detailed in the Attributes section. + properties: + endpointPrivateAccess: + description: Whether the Amazon EKS private API server endpoint + is enabled. Default is false. + type: boolean + endpointPublicAccess: + description: Whether the Amazon EKS public API server endpoint + is enabled. Default is true. + type: boolean + publicAccessCidrs: + description: List of CIDR blocks. Indicates which CIDR blocks + can access the Amazon EKS public API server endpoint when + enabled. EKS defaults this to a list with 0.0.0.0/0. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate + securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to + populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: account elastic network interfaces that Amazon + EKS creates to use to allow communication between your worker + nodes and the Kubernetes control plane. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: account elastic network interfaces in these subnets + to allow communication between your worker nodes and the + Kubernetes control plane. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.vpcConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.vpcConfig) + || (has(self.initProvider) && has(self.initProvider.vpcConfig))' + status: + description: ClusterStatus defines the observed state of Cluster. + properties: + atProvider: + properties: + accessConfig: + description: Configuration block for the access config associated + with your cluster, see Amazon EKS Access Entries. + properties: + authenticationMode: + description: The authentication mode for the cluster. Valid + values are CONFIG_MAP, API or API_AND_CONFIG_MAP + type: string + bootstrapClusterCreatorAdminPermissions: + description: Whether or not to bootstrap the access config + values to the cluster. Default is true. + type: boolean + type: object + arn: + description: ARN of the cluster. + type: string + certificateAuthority: + description: Attribute block containing certificate-authority-data + for your cluster. Detailed below. + items: + properties: + data: + description: Base64 encoded certificate data required to + communicate with your cluster. Add this to the certificate-authority-data + section of the kubeconfig file for your cluster. + type: string + type: object + type: array + clusterId: + description: The ID of your local Amazon EKS cluster on the AWS + Outpost. This attribute isn't available for an AWS EKS cluster + on AWS cloud. + type: string + createdAt: + description: Unix epoch timestamp in seconds for when the cluster + was created. + type: string + enabledClusterLogTypes: + description: List of the desired control plane logging to enable. + For more information, see Amazon EKS Control Plane Logging. + items: + type: string + type: array + x-kubernetes-list-type: set + encryptionConfig: + description: Configuration block with encryption configuration + for the cluster. Only available on Kubernetes 1.13 and above + clusters created after March 6, 2020. Detailed below. + properties: + provider: + description: Configuration block with provider for encryption. + Detailed below. + properties: + keyArn: + description: ARN of the Key Management Service (KMS) customer + master key (CMK). The CMK must be symmetric, created + in the same region as the cluster, and if the CMK was + created in a different account, the user must have access + to the CMK. For more information, see Allowing Users + in Other Accounts to Use a CMK in the AWS Key Management + Service Developer Guide. + type: string + type: object + resources: + description: 'List of strings with resources to be encrypted. + Valid values: secrets.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + endpoint: + description: Endpoint for your Kubernetes API server. + type: string + id: + description: Name of the cluster. + type: string + identity: + description: Attribute block containing identity provider information + for your cluster. Only available on Kubernetes version 1.13 + and 1.14 clusters created or upgraded on or after September + 3, 2019. Detailed below. + items: + properties: + oidc: + description: Nested block containing OpenID Connect identity + provider information for the cluster. Detailed below. + items: + properties: + issuer: + description: Issuer URL for the OpenID Connect identity + provider. + type: string + type: object + type: array + type: object + type: array + kubernetesNetworkConfig: + description: Configuration block with kubernetes network configuration + for the cluster. Detailed below. + properties: + ipFamily: + description: The IP family used to assign Kubernetes pod and + service addresses. Valid values are ipv4 (default) and ipv6. + You can only specify an IP family when you create a cluster, + changing this value will force a new cluster to be created. + type: string + serviceIpv4Cidr: + description: 'The CIDR block to assign Kubernetes pod and + service IP addresses from. If you don''t specify a block, + Kubernetes assigns addresses from either the 10.100.0.0/16 + or 172.20.0.0/16 CIDR blocks. We recommend that you specify + a block that does not overlap with resources in other networks + that are peered or connected to your VPC. You can only specify + a custom CIDR block when you create a cluster, changing + this value will force a new cluster to be created. The block + must meet the following requirements:' + type: string + serviceIpv6Cidr: + description: The CIDR block that Kubernetes pod and service + IP addresses are assigned from if you specified ipv6 for + ipFamily when you created the cluster. Kubernetes assigns + service addresses from the unique local address range (fc00::/7) + because you can't specify a custom IPv6 CIDR block when + you create the cluster. + type: string + type: object + outpostConfig: + description: Configuration block representing the configuration + of your local Amazon EKS cluster on an AWS Outpost. This block + isn't available for creating Amazon EKS clusters on the AWS + cloud. + properties: + controlPlaneInstanceType: + description: 'The Amazon EC2 instance type that you want to + use for your local Amazon EKS cluster on Outposts. The instance + type that you specify is used for all Kubernetes control + plane instances. The instance type can''t be changed after + cluster creation. Choose an instance type based on the number + of nodes that your cluster will have. If your cluster will + have:' + type: string + controlPlanePlacement: + description: |- + An object representing the placement configuration for all the control plane instances of your local Amazon EKS cluster on AWS Outpost. + The control_plane_placement configuration block supports the following arguments: + properties: + groupName: + description: The name of the placement group for the Kubernetes + control plane instances. This setting can't be changed + after cluster creation. + type: string + type: object + outpostArns: + description: The ARN of the Outpost that you want to use for + your local Amazon EKS cluster on Outposts. This argument + is a list of arns, but only a single Outpost ARN is supported + currently. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + platformVersion: + description: Platform version for the cluster. + type: string + roleArn: + description: ARN of the IAM role that provides permissions for + the Kubernetes control plane to make calls to AWS API operations + on your behalf. Ensure the resource configuration includes explicit + dependencies on the IAM Role permissions by adding depends_on + if using the aws_iam_role_policy resource or aws_iam_role_policy_attachment + resource, otherwise EKS cannot delete EKS managed EC2 infrastructure + such as Security Groups on EKS Cluster deletion. + type: string + status: + description: Status of the EKS cluster. One of CREATING, ACTIVE, + DELETING, FAILED. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + version: + description: – Desired Kubernetes master version. If you do not + specify a value, the latest available version at resource creation + is used and no upgrades will occur except those automatically + triggered by EKS. The value must be configured and increased + to upgrade the version when desired. Downgrades are not supported + by EKS. + type: string + vpcConfig: + description: Configuration block for the VPC associated with your + cluster. Amazon EKS VPC resources have specific requirements + to work properly with Kubernetes. For more information, see + Cluster VPC Considerations and Cluster Security Group Considerations + in the Amazon EKS User Guide. Detailed below. Also contains + attributes detailed in the Attributes section. + properties: + clusterSecurityGroupId: + description: Cluster security group that was created by Amazon + EKS for the cluster. Managed node groups use this security + group for control-plane-to-data-plane communication. + type: string + endpointPrivateAccess: + description: Whether the Amazon EKS private API server endpoint + is enabled. Default is false. + type: boolean + endpointPublicAccess: + description: Whether the Amazon EKS public API server endpoint + is enabled. Default is true. + type: boolean + publicAccessCidrs: + description: List of CIDR blocks. Indicates which CIDR blocks + can access the Amazon EKS public API server endpoint when + enabled. EKS defaults this to a list with 0.0.0.0/0. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIds: + description: account elastic network interfaces that Amazon + EKS creates to use to allow communication between your worker + nodes and the Kubernetes control plane. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: account elastic network interfaces in these subnets + to allow communication between your worker nodes and the + Kubernetes control plane. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcId: + description: ID of the VPC associated with your cluster. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/eks.aws.upbound.io_identityproviderconfigs.yaml b/package/crds/eks.aws.upbound.io_identityproviderconfigs.yaml index 700ac37663..cd04507a67 100644 --- a/package/crds/eks.aws.upbound.io_identityproviderconfigs.yaml +++ b/package/crds/eks.aws.upbound.io_identityproviderconfigs.yaml @@ -639,3 +639,615 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: IdentityProviderConfig is the Schema for the IdentityProviderConfigs + API. Manages an EKS Identity Provider Configuration. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IdentityProviderConfigSpec defines the desired state of IdentityProviderConfig + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterName: + description: – Name of the EKS Cluster. + type: string + clusterNameRef: + description: Reference to a Cluster in eks to populate clusterName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterNameSelector: + description: Selector for a Cluster in eks to populate clusterName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + oidc: + description: Nested attribute containing OpenID Connect identity + provider information for the cluster. Detailed below. + properties: + clientId: + description: – Client ID for the OpenID Connect identity + provider. + type: string + groupsClaim: + description: The JWT claim that the provider will use to return + groups. + type: string + groupsPrefix: + description: A prefix that is prepended to group claims e.g., + oidc:. + type: string + issuerUrl: + description: Issuer URL for the OpenID Connect identity provider. + type: string + requiredClaims: + additionalProperties: + type: string + description: The key value pairs that describe required claims + in the identity token. + type: object + x-kubernetes-map-type: granular + usernameClaim: + description: The JWT claim that the provider will use as the + username. + type: string + usernamePrefix: + description: A prefix that is prepended to username claims. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterName: + description: – Name of the EKS Cluster. + type: string + clusterNameRef: + description: Reference to a Cluster in eks to populate clusterName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterNameSelector: + description: Selector for a Cluster in eks to populate clusterName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + oidc: + description: Nested attribute containing OpenID Connect identity + provider information for the cluster. Detailed below. + properties: + clientId: + description: – Client ID for the OpenID Connect identity + provider. + type: string + groupsClaim: + description: The JWT claim that the provider will use to return + groups. + type: string + groupsPrefix: + description: A prefix that is prepended to group claims e.g., + oidc:. + type: string + issuerUrl: + description: Issuer URL for the OpenID Connect identity provider. + type: string + requiredClaims: + additionalProperties: + type: string + description: The key value pairs that describe required claims + in the identity token. + type: object + x-kubernetes-map-type: granular + usernameClaim: + description: The JWT claim that the provider will use as the + username. + type: string + usernamePrefix: + description: A prefix that is prepended to username claims. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.oidc is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.oidc) + || (has(self.initProvider) && has(self.initProvider.oidc))' + status: + description: IdentityProviderConfigStatus defines the observed state of + IdentityProviderConfig. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of the EKS Identity Provider + Configuration. + type: string + clusterName: + description: – Name of the EKS Cluster. + type: string + id: + description: EKS Cluster name and EKS Identity Provider Configuration + name separated by a colon (:). + type: string + oidc: + description: Nested attribute containing OpenID Connect identity + provider information for the cluster. Detailed below. + properties: + clientId: + description: – Client ID for the OpenID Connect identity + provider. + type: string + groupsClaim: + description: The JWT claim that the provider will use to return + groups. + type: string + groupsPrefix: + description: A prefix that is prepended to group claims e.g., + oidc:. + type: string + issuerUrl: + description: Issuer URL for the OpenID Connect identity provider. + type: string + requiredClaims: + additionalProperties: + type: string + description: The key value pairs that describe required claims + in the identity token. + type: object + x-kubernetes-map-type: granular + usernameClaim: + description: The JWT claim that the provider will use as the + username. + type: string + usernamePrefix: + description: A prefix that is prepended to username claims. + type: string + type: object + status: + description: Status of the EKS Identity Provider Configuration. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/eks.aws.upbound.io_nodegroups.yaml b/package/crds/eks.aws.upbound.io_nodegroups.yaml index 900063a44a..7bc5919461 100644 --- a/package/crds/eks.aws.upbound.io_nodegroups.yaml +++ b/package/crds/eks.aws.upbound.io_nodegroups.yaml @@ -1539,3 +1539,1497 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: NodeGroup is the Schema for the NodeGroups API. Manages an EKS + Node Group + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NodeGroupSpec defines the desired state of NodeGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + amiType: + description: Type of Amazon Machine Image (AMI) associated with + the EKS Node Group. See the AWS documentation for valid values. + type: string + capacityType: + description: 'Type of capacity associated with the EKS Node Group. + Valid values: ON_DEMAND, SPOT.' + type: string + clusterName: + description: – Name of the EKS Cluster. + type: string + clusterNameRef: + description: Reference to a Cluster in eks to populate clusterName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterNameSelector: + description: Selector for a Cluster in eks to populate clusterName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + diskSize: + description: Disk size in GiB for worker nodes. Defaults to 50 + for Windows, 20 all other node groups. + type: number + forceUpdateVersion: + description: Force version update if existing pods are unable + to be drained due to a pod disruption budget issue. + type: boolean + instanceTypes: + description: List of instance types associated with the EKS Node + Group. Defaults to ["t3.medium"]. + items: + type: string + type: array + labels: + additionalProperties: + type: string + description: Key-value map of Kubernetes labels. Only labels that + are applied with the EKS API are managed by this argument. Other + Kubernetes labels applied to the EKS Node Group will not be + managed. + type: object + x-kubernetes-map-type: granular + launchTemplate: + description: Configuration block with Launch Template settings. + See launch_template below for details. Conflicts with remote_access. + properties: + id: + description: Identifier of the EC2 Launch Template. Conflicts + with name. + type: string + name: + description: Name of the EC2 Launch Template. Conflicts with + id. + type: string + version: + description: EC2 Launch Template version number. While the + API accepts values like $Default and $Latest, the API will + convert the value to the associated version number (e.g., + 1). Using the default_version or latest_version attribute + of the aws_launch_template resource or data source is recommended + for this argument. + type: string + type: object + nodeRoleArn: + description: – Amazon Resource Name (ARN) of the IAM Role that + provides permissions for the EKS Node Group. + type: string + nodeRoleArnRef: + description: Reference to a Role in iam to populate nodeRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nodeRoleArnSelector: + description: Selector for a Role in iam to populate nodeRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + releaseVersion: + description: – AMI version of the EKS Node Group. Defaults to + latest version for Kubernetes version. + type: string + remoteAccess: + description: Configuration block with remote access settings. + See remote_access below for details. Conflicts with launch_template. + properties: + ec2SshKey: + description: EC2 Key Pair name that provides access for remote + communication with the worker nodes in the EKS Node Group. + If you specify this configuration, but do not specify source_security_group_ids + when you create an EKS Node Group, either port 3389 for + Windows, or port 22 for all other operating systems is opened + on the worker nodes to the Internet (0.0.0.0/0). For Windows + nodes, this will allow you to use RDP, for all others this + allows you to SSH into the worker nodes. + type: string + sourceSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate + sourceSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + sourceSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to + populate sourceSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sourceSecurityGroupIds: + description: Set of EC2 Security Group IDs to allow SSH access + (port 22) from on the worker nodes. If you specify ec2_ssh_key, + but do not specify this configuration when you create an + EKS Node Group, port 22 on the worker nodes is opened to + the Internet (0.0.0.0/0). + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + scalingConfig: + description: Configuration block with scaling settings. See scaling_config + below for details. + properties: + desiredSize: + description: Desired number of worker nodes. + type: number + maxSize: + description: Maximum number of worker nodes. + type: number + minSize: + description: Minimum number of worker nodes. + type: number + type: object + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: Identifiers of EC2 Subnets to associate with the + EKS Node Group. Amazon EKS managed node groups can be launched + in both public and private subnets. If you plan to deploy load + balancers to a subnet, the private subnet must have tag kubernetes.io/role/internal-elb, + the public subnet must have tag kubernetes.io/role/elb. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + taint: + description: The Kubernetes taints to be applied to the nodes + in the node group. Maximum of 50 taints per node group. See + taint below for details. + items: + properties: + effect: + description: 'The effect of the taint. Valid values: NO_SCHEDULE, + NO_EXECUTE, PREFER_NO_SCHEDULE.' + type: string + key: + description: The key of the taint. Maximum length of 63. + type: string + value: + description: The value of the taint. Maximum length of 63. + type: string + type: object + type: array + updateConfig: + description: Configuration block with update settings. See update_config + below for details. + properties: + maxUnavailable: + description: Desired max number of unavailable worker nodes + during node group update. + type: number + maxUnavailablePercentage: + description: Desired max percentage of unavailable worker + nodes during node group update. + type: number + type: object + version: + description: – Kubernetes version. Defaults to EKS Cluster Kubernetes + version. + type: string + versionRef: + description: Reference to a Cluster in eks to populate version. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + versionSelector: + description: Selector for a Cluster in eks to populate version. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + amiType: + description: Type of Amazon Machine Image (AMI) associated with + the EKS Node Group. See the AWS documentation for valid values. + type: string + capacityType: + description: 'Type of capacity associated with the EKS Node Group. + Valid values: ON_DEMAND, SPOT.' + type: string + diskSize: + description: Disk size in GiB for worker nodes. Defaults to 50 + for Windows, 20 all other node groups. + type: number + forceUpdateVersion: + description: Force version update if existing pods are unable + to be drained due to a pod disruption budget issue. + type: boolean + instanceTypes: + description: List of instance types associated with the EKS Node + Group. Defaults to ["t3.medium"]. + items: + type: string + type: array + labels: + additionalProperties: + type: string + description: Key-value map of Kubernetes labels. Only labels that + are applied with the EKS API are managed by this argument. Other + Kubernetes labels applied to the EKS Node Group will not be + managed. + type: object + x-kubernetes-map-type: granular + launchTemplate: + description: Configuration block with Launch Template settings. + See launch_template below for details. Conflicts with remote_access. + properties: + id: + description: Identifier of the EC2 Launch Template. Conflicts + with name. + type: string + name: + description: Name of the EC2 Launch Template. Conflicts with + id. + type: string + version: + description: EC2 Launch Template version number. While the + API accepts values like $Default and $Latest, the API will + convert the value to the associated version number (e.g., + 1). Using the default_version or latest_version attribute + of the aws_launch_template resource or data source is recommended + for this argument. + type: string + type: object + nodeRoleArn: + description: – Amazon Resource Name (ARN) of the IAM Role that + provides permissions for the EKS Node Group. + type: string + nodeRoleArnRef: + description: Reference to a Role in iam to populate nodeRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nodeRoleArnSelector: + description: Selector for a Role in iam to populate nodeRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + releaseVersion: + description: – AMI version of the EKS Node Group. Defaults to + latest version for Kubernetes version. + type: string + remoteAccess: + description: Configuration block with remote access settings. + See remote_access below for details. Conflicts with launch_template. + properties: + ec2SshKey: + description: EC2 Key Pair name that provides access for remote + communication with the worker nodes in the EKS Node Group. + If you specify this configuration, but do not specify source_security_group_ids + when you create an EKS Node Group, either port 3389 for + Windows, or port 22 for all other operating systems is opened + on the worker nodes to the Internet (0.0.0.0/0). For Windows + nodes, this will allow you to use RDP, for all others this + allows you to SSH into the worker nodes. + type: string + sourceSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate + sourceSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + sourceSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to + populate sourceSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sourceSecurityGroupIds: + description: Set of EC2 Security Group IDs to allow SSH access + (port 22) from on the worker nodes. If you specify ec2_ssh_key, + but do not specify this configuration when you create an + EKS Node Group, port 22 on the worker nodes is opened to + the Internet (0.0.0.0/0). + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + scalingConfig: + description: Configuration block with scaling settings. See scaling_config + below for details. + properties: + desiredSize: + description: Desired number of worker nodes. + type: number + maxSize: + description: Maximum number of worker nodes. + type: number + minSize: + description: Minimum number of worker nodes. + type: number + type: object + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: Identifiers of EC2 Subnets to associate with the + EKS Node Group. Amazon EKS managed node groups can be launched + in both public and private subnets. If you plan to deploy load + balancers to a subnet, the private subnet must have tag kubernetes.io/role/internal-elb, + the public subnet must have tag kubernetes.io/role/elb. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + taint: + description: The Kubernetes taints to be applied to the nodes + in the node group. Maximum of 50 taints per node group. See + taint below for details. + items: + properties: + effect: + description: 'The effect of the taint. Valid values: NO_SCHEDULE, + NO_EXECUTE, PREFER_NO_SCHEDULE.' + type: string + key: + description: The key of the taint. Maximum length of 63. + type: string + value: + description: The value of the taint. Maximum length of 63. + type: string + type: object + type: array + updateConfig: + description: Configuration block with update settings. See update_config + below for details. + properties: + maxUnavailable: + description: Desired max number of unavailable worker nodes + during node group update. + type: number + maxUnavailablePercentage: + description: Desired max percentage of unavailable worker + nodes during node group update. + type: number + type: object + version: + description: – Kubernetes version. Defaults to EKS Cluster Kubernetes + version. + type: string + versionRef: + description: Reference to a Cluster in eks to populate version. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + versionSelector: + description: Selector for a Cluster in eks to populate version. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.scalingConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scalingConfig) + || (has(self.initProvider) && has(self.initProvider.scalingConfig))' + status: + description: NodeGroupStatus defines the observed state of NodeGroup. + properties: + atProvider: + properties: + amiType: + description: Type of Amazon Machine Image (AMI) associated with + the EKS Node Group. See the AWS documentation for valid values. + type: string + arn: + description: Amazon Resource Name (ARN) of the EKS Node Group. + type: string + capacityType: + description: 'Type of capacity associated with the EKS Node Group. + Valid values: ON_DEMAND, SPOT.' + type: string + clusterName: + description: – Name of the EKS Cluster. + type: string + diskSize: + description: Disk size in GiB for worker nodes. Defaults to 50 + for Windows, 20 all other node groups. + type: number + forceUpdateVersion: + description: Force version update if existing pods are unable + to be drained due to a pod disruption budget issue. + type: boolean + id: + description: EKS Cluster name and EKS Node Group name separated + by a colon (:). + type: string + instanceTypes: + description: List of instance types associated with the EKS Node + Group. Defaults to ["t3.medium"]. + items: + type: string + type: array + labels: + additionalProperties: + type: string + description: Key-value map of Kubernetes labels. Only labels that + are applied with the EKS API are managed by this argument. Other + Kubernetes labels applied to the EKS Node Group will not be + managed. + type: object + x-kubernetes-map-type: granular + launchTemplate: + description: Configuration block with Launch Template settings. + See launch_template below for details. Conflicts with remote_access. + properties: + id: + description: Identifier of the EC2 Launch Template. Conflicts + with name. + type: string + name: + description: Name of the EC2 Launch Template. Conflicts with + id. + type: string + version: + description: EC2 Launch Template version number. While the + API accepts values like $Default and $Latest, the API will + convert the value to the associated version number (e.g., + 1). Using the default_version or latest_version attribute + of the aws_launch_template resource or data source is recommended + for this argument. + type: string + type: object + nodeRoleArn: + description: – Amazon Resource Name (ARN) of the IAM Role that + provides permissions for the EKS Node Group. + type: string + releaseVersion: + description: – AMI version of the EKS Node Group. Defaults to + latest version for Kubernetes version. + type: string + remoteAccess: + description: Configuration block with remote access settings. + See remote_access below for details. Conflicts with launch_template. + properties: + ec2SshKey: + description: EC2 Key Pair name that provides access for remote + communication with the worker nodes in the EKS Node Group. + If you specify this configuration, but do not specify source_security_group_ids + when you create an EKS Node Group, either port 3389 for + Windows, or port 22 for all other operating systems is opened + on the worker nodes to the Internet (0.0.0.0/0). For Windows + nodes, this will allow you to use RDP, for all others this + allows you to SSH into the worker nodes. + type: string + sourceSecurityGroupIds: + description: Set of EC2 Security Group IDs to allow SSH access + (port 22) from on the worker nodes. If you specify ec2_ssh_key, + but do not specify this configuration when you create an + EKS Node Group, port 22 on the worker nodes is opened to + the Internet (0.0.0.0/0). + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + resources: + description: List of objects containing information about underlying + resources. + items: + properties: + autoscalingGroups: + description: List of objects containing information about + AutoScaling Groups. + items: + properties: + name: + description: Name of the AutoScaling Group. + type: string + type: object + type: array + remoteAccessSecurityGroupId: + description: Identifier of the remote access EC2 Security + Group. + type: string + type: object + type: array + scalingConfig: + description: Configuration block with scaling settings. See scaling_config + below for details. + properties: + desiredSize: + description: Desired number of worker nodes. + type: number + maxSize: + description: Maximum number of worker nodes. + type: number + minSize: + description: Minimum number of worker nodes. + type: number + type: object + status: + description: Status of the EKS Node Group. + type: string + subnetIds: + description: Identifiers of EC2 Subnets to associate with the + EKS Node Group. Amazon EKS managed node groups can be launched + in both public and private subnets. If you plan to deploy load + balancers to a subnet, the private subnet must have tag kubernetes.io/role/internal-elb, + the public subnet must have tag kubernetes.io/role/elb. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + taint: + description: The Kubernetes taints to be applied to the nodes + in the node group. Maximum of 50 taints per node group. See + taint below for details. + items: + properties: + effect: + description: 'The effect of the taint. Valid values: NO_SCHEDULE, + NO_EXECUTE, PREFER_NO_SCHEDULE.' + type: string + key: + description: The key of the taint. Maximum length of 63. + type: string + value: + description: The value of the taint. Maximum length of 63. + type: string + type: object + type: array + updateConfig: + description: Configuration block with update settings. See update_config + below for details. + properties: + maxUnavailable: + description: Desired max number of unavailable worker nodes + during node group update. + type: number + maxUnavailablePercentage: + description: Desired max percentage of unavailable worker + nodes during node group update. + type: number + type: object + version: + description: – Kubernetes version. Defaults to EKS Cluster Kubernetes + version. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/elasticache.aws.upbound.io_users.yaml b/package/crds/elasticache.aws.upbound.io_users.yaml index 25257e5450..63b28c4e67 100644 --- a/package/crds/elasticache.aws.upbound.io_users.yaml +++ b/package/crds/elasticache.aws.upbound.io_users.yaml @@ -495,3 +495,474 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: User is the Schema for the Users API. Provides an ElastiCache + user. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: UserSpec defines the desired state of User + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessString: + description: Access permissions string used for this user. See + Specifying Permissions Using an Access String for more details. + type: string + authenticationMode: + description: Denotes the user's authentication properties. Detailed + below. + properties: + passwordsSecretRef: + description: Specifies the passwords to use for authentication + if type is set to password. + items: + description: A SecretKeySelector is a reference to a secret + key in an arbitrary namespace. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: array + type: + description: 'Specifies the authentication type. Possible + options are: password, no-password-required or iam.' + type: string + type: object + engine: + description: The current supported value is REDIS. + type: string + noPasswordRequired: + description: Indicates a password is not required for this user. + type: boolean + passwordsSecretRef: + description: Passwords used for this user. You can create up to + two passwords for each user. + items: + description: A SecretKeySelector is a reference to a secret + key in an arbitrary namespace. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: array + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userName: + description: The username of the user. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessString: + description: Access permissions string used for this user. See + Specifying Permissions Using an Access String for more details. + type: string + authenticationMode: + description: Denotes the user's authentication properties. Detailed + below. + properties: + passwordsSecretRef: + items: + type: string + type: array + type: + description: 'Specifies the authentication type. Possible + options are: password, no-password-required or iam.' + type: string + type: object + engine: + description: The current supported value is REDIS. + type: string + noPasswordRequired: + description: Indicates a password is not required for this user. + type: boolean + passwordsSecretRef: + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userName: + description: The username of the user. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.accessString is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.accessString) + || (has(self.initProvider) && has(self.initProvider.accessString))' + - message: spec.forProvider.engine is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.engine) + || (has(self.initProvider) && has(self.initProvider.engine))' + - message: spec.forProvider.userName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.userName) + || (has(self.initProvider) && has(self.initProvider.userName))' + status: + description: UserStatus defines the observed state of User. + properties: + atProvider: + properties: + accessString: + description: Access permissions string used for this user. See + Specifying Permissions Using an Access String for more details. + type: string + arn: + description: The ARN of the created ElastiCache User. + type: string + authenticationMode: + description: Denotes the user's authentication properties. Detailed + below. + properties: + passwordCount: + type: number + type: + description: 'Specifies the authentication type. Possible + options are: password, no-password-required or iam.' + type: string + type: object + engine: + description: The current supported value is REDIS. + type: string + id: + type: string + noPasswordRequired: + description: Indicates a password is not required for this user. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + userName: + description: The username of the user. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/elasticbeanstalk.aws.upbound.io_applications.yaml b/package/crds/elasticbeanstalk.aws.upbound.io_applications.yaml index f58f19ac43..67603a92f4 100644 --- a/package/crds/elasticbeanstalk.aws.upbound.io_applications.yaml +++ b/package/crds/elasticbeanstalk.aws.upbound.io_applications.yaml @@ -589,3 +589,565 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Application is the Schema for the Applications API. Provides + an Elastic Beanstalk Application Resource + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ApplicationSpec defines the desired state of Application + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appversionLifecycle: + properties: + deleteSourceFromS3: + description: Set to true to delete a version's source bundle + from S3 when the application version is deleted. + type: boolean + maxAgeInDays: + description: The number of days to retain an application version + ('max_age_in_days' and 'max_count' cannot be enabled simultaneously.). + type: number + maxCount: + description: The maximum number of application versions to + retain ('max_age_in_days' and 'max_count' cannot be enabled + simultaneously.). + type: number + serviceRole: + description: The ARN of an IAM service role under which the + application version is deleted. Elastic Beanstalk must + have permission to assume this role. + type: string + serviceRoleRef: + description: Reference to a Role in iam to populate serviceRole. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceRoleSelector: + description: Selector for a Role in iam to populate serviceRole. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + description: + description: Short description of the application + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appversionLifecycle: + properties: + deleteSourceFromS3: + description: Set to true to delete a version's source bundle + from S3 when the application version is deleted. + type: boolean + maxAgeInDays: + description: The number of days to retain an application version + ('max_age_in_days' and 'max_count' cannot be enabled simultaneously.). + type: number + maxCount: + description: The maximum number of application versions to + retain ('max_age_in_days' and 'max_count' cannot be enabled + simultaneously.). + type: number + serviceRole: + description: The ARN of an IAM service role under which the + application version is deleted. Elastic Beanstalk must + have permission to assume this role. + type: string + serviceRoleRef: + description: Reference to a Role in iam to populate serviceRole. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceRoleSelector: + description: Selector for a Role in iam to populate serviceRole. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + description: + description: Short description of the application + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ApplicationStatus defines the observed state of Application. + properties: + atProvider: + properties: + appversionLifecycle: + properties: + deleteSourceFromS3: + description: Set to true to delete a version's source bundle + from S3 when the application version is deleted. + type: boolean + maxAgeInDays: + description: The number of days to retain an application version + ('max_age_in_days' and 'max_count' cannot be enabled simultaneously.). + type: number + maxCount: + description: The maximum number of application versions to + retain ('max_age_in_days' and 'max_count' cannot be enabled + simultaneously.). + type: number + serviceRole: + description: The ARN of an IAM service role under which the + application version is deleted. Elastic Beanstalk must + have permission to assume this role. + type: string + type: object + arn: + description: The ARN assigned by AWS for this Elastic Beanstalk + Application. + type: string + description: + description: Short description of the application + type: string + id: + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/elasticsearch.aws.upbound.io_domains.yaml b/package/crds/elasticsearch.aws.upbound.io_domains.yaml index 7f02cb1266..0a37af3b37 100644 --- a/package/crds/elasticsearch.aws.upbound.io_domains.yaml +++ b/package/crds/elasticsearch.aws.upbound.io_domains.yaml @@ -1562,3 +1562,1454 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Domain is the Schema for the Domains API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DomainSpec defines the desired state of Domain + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessPolicies: + description: IAM policy document specifying the access policies + for the domain. + type: string + advancedOptions: + additionalProperties: + type: string + description: Key-value string pairs to specify advanced configuration + options. + type: object + x-kubernetes-map-type: granular + advancedSecurityOptions: + description: Configuration block for fine-grained access control. + Detailed below. + properties: + enabled: + description: Whether advanced security is enabled. + type: boolean + internalUserDatabaseEnabled: + description: Whether the internal user database is enabled. + If not set, defaults to false by the AWS API. + type: boolean + masterUserOptions: + description: Configuration block for the main user. Detailed + below. + properties: + masterUserArn: + description: ARN for the main user. Only specify if internal_user_database_enabled + is not set or set to false. + type: string + masterUserName: + description: Main user's username, which is stored in + the Amazon Elasticsearch Service domain's internal database. + Only specify if internal_user_database_enabled is set + to true. + type: string + masterUserPasswordSecretRef: + description: Main user's password, which is stored in + the Amazon Elasticsearch Service domain's internal database. + Only specify if internal_user_database_enabled is set + to true. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: object + autoTuneOptions: + description: Configuration block for the Auto-Tune options of + the domain. Detailed below. + properties: + desiredState: + description: 'The Auto-Tune desired state for the domain. + Valid values: ENABLED or DISABLED.' + type: string + maintenanceSchedule: + description: Configuration block for Auto-Tune maintenance + windows. Can be specified multiple times for each maintenance + window. Detailed below. + items: + properties: + cronExpressionForRecurrence: + description: A cron expression specifying the recurrence + pattern for an Auto-Tune maintenance schedule. + type: string + duration: + description: Configuration block for the duration of + the Auto-Tune maintenance window. Detailed below. + properties: + unit: + description: 'The unit of time specifying the duration + of an Auto-Tune maintenance window. Valid values: + HOURS.' + type: string + value: + description: An integer specifying the value of + the duration of an Auto-Tune maintenance window. + type: number + type: object + startAt: + description: Date and time at which to start the Auto-Tune + maintenance schedule in RFC3339 format. + type: string + type: object + type: array + rollbackOnDisable: + description: 'Whether to roll back to default Auto-Tune settings + when disabling Auto-Tune. Valid values: DEFAULT_ROLLBACK + or NO_ROLLBACK.' + type: string + type: object + clusterConfig: + description: Configuration block for the cluster of the domain. + Detailed below. + properties: + coldStorageOptions: + description: Configuration block containing cold storage configuration. + Detailed below. + properties: + enabled: + description: Boolean to enable cold storage for an Elasticsearch + domain. Defaults to false. Master and ultrawarm nodes + must be enabled for cold storage. + type: boolean + type: object + dedicatedMasterCount: + description: Number of dedicated main nodes in the cluster. + type: number + dedicatedMasterEnabled: + description: Whether dedicated main nodes are enabled for + the cluster. + type: boolean + dedicatedMasterType: + description: Instance type of the dedicated main nodes in + the cluster. + type: string + instanceCount: + description: Number of instances in the cluster. + type: number + instanceType: + description: Instance type of data nodes in the cluster. + type: string + warmCount: + description: Number of warm nodes in the cluster. Valid values + are between 2 and 150. warm_count can be only and must be + set when warm_enabled is set to true. + type: number + warmEnabled: + description: Whether to enable warm storage. + type: boolean + warmType: + description: Instance type for the Elasticsearch cluster's + warm nodes. Valid values are ultrawarm1.medium.elasticsearch, + ultrawarm1.large.elasticsearch and ultrawarm1.xlarge.elasticsearch. + warm_type can be only and must be set when warm_enabled + is set to true. + type: string + zoneAwarenessConfig: + description: Configuration block containing zone awareness + settings. Detailed below. + properties: + availabilityZoneCount: + description: 'Number of Availability Zones for the domain + to use with zone_awareness_enabled. Defaults to 2. Valid + values: 2 or 3.' + type: number + type: object + zoneAwarenessEnabled: + description: Whether zone awareness is enabled, set to true + for multi-az deployment. To enable awareness with three + Availability Zones, the availability_zone_count within the + zone_awareness_config must be set to 3. + type: boolean + type: object + cognitoOptions: + description: Configuration block for authenticating Kibana with + Cognito. Detailed below. + properties: + enabled: + description: Whether Amazon Cognito authentication with Kibana + is enabled or not. + type: boolean + identityPoolId: + description: ID of the Cognito Identity Pool to use. + type: string + roleArn: + description: ARN of the IAM role that has the AmazonESCognitoAccess + policy attached. + type: string + userPoolId: + description: ID of the Cognito User Pool to use. + type: string + type: object + domainEndpointOptions: + description: Configuration block for domain endpoint HTTP(S) related + options. Detailed below. + properties: + customEndpoint: + description: Fully qualified domain for your custom endpoint. + type: string + customEndpointCertificateArn: + description: ACM certificate ARN for your custom endpoint. + type: string + customEndpointEnabled: + description: Whether to enable custom endpoint for the Elasticsearch + domain. + type: boolean + enforceHttps: + description: Whether or not to require HTTPS. Defaults to + true. + type: boolean + tlsSecurityPolicy: + description: 'Name of the TLS security policy that needs to + be applied to the HTTPS endpoint. Valid values: Policy-Min-TLS-1-0-2019-07 + and Policy-Min-TLS-1-2-2019-07.' + type: string + type: object + ebsOptions: + description: Configuration block for EBS related options, may + be required based on chosen instance size. Detailed below. + properties: + ebsEnabled: + description: Whether EBS volumes are attached to data nodes + in the domain. + type: boolean + iops: + description: Baseline input/output (I/O) performance of EBS + volumes attached to data nodes. Applicable only for the + GP3 and Provisioned IOPS EBS volume types. + type: number + throughput: + description: Specifies the throughput (in MiB/s) of the EBS + volumes attached to data nodes. Applicable only for the + gp3 volume type. + type: number + volumeSize: + description: Size of EBS volumes attached to data nodes (in + GiB). + type: number + volumeType: + description: Type of EBS volumes attached to data nodes. + type: string + type: object + elasticsearchVersion: + description: Version of Elasticsearch to deploy. Defaults to 1.5. + type: string + encryptAtRest: + description: Configuration block for encrypt at rest options. + Only available for certain instance types. Detailed below. + properties: + enabled: + description: Whether to enable encryption at rest. If the + encrypt_at_rest block is not provided then this defaults + to false. Enabling encryption on new domains requires elasticsearch_version + 5.1 or greater. + type: boolean + kmsKeyId: + description: KMS key ARN to encrypt the Elasticsearch domain + with. If not specified then it defaults to using the aws/es + service KMS key. Note that KMS will accept a KMS key ID + but will return the key ARN. + type: string + type: object + logPublishingOptions: + description: Configuration block for publishing slow and application + logs to CloudWatch Logs. This block can be declared multiple + times, for each log_type, within the same resource. Detailed + below. + items: + properties: + cloudwatchLogGroupArn: + description: ARN of the Cloudwatch log group to which log + needs to be published. + type: string + cloudwatchLogGroupArnRef: + description: Reference to a Group in cloudwatchlogs to populate + cloudwatchLogGroupArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cloudwatchLogGroupArnSelector: + description: Selector for a Group in cloudwatchlogs to populate + cloudwatchLogGroupArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: Whether given log publishing option is enabled + or not. + type: boolean + logType: + description: 'Type of Elasticsearch log. Valid values: INDEX_SLOW_LOGS, + SEARCH_SLOW_LOGS, ES_APPLICATION_LOGS, AUDIT_LOGS.' + type: string + type: object + type: array + nodeToNodeEncryption: + description: Configuration block for node-to-node encryption options. + Detailed below. + properties: + enabled: + description: Whether to enable node-to-node encryption. If + the node_to_node_encryption block is not provided then this + defaults to false. Enabling node-to-node encryption of a + new domain requires an elasticsearch_version of 6.0 or greater. + type: boolean + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + snapshotOptions: + description: Configuration block for snapshot related options. + Detailed below. DEPRECATED. For domains running Elasticsearch + 5.3 and later, Amazon ES takes hourly automated snapshots, making + this setting irrelevant. For domains running earlier versions + of Elasticsearch, Amazon ES takes daily automated snapshots. + properties: + automatedSnapshotStartHour: + description: Hour during which the service takes an automated + daily snapshot of the indices in the domain. + type: number + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcOptions: + description: Configuration block for VPC related options. Adding + or removing this configuration forces a new resource (documentation). + Detailed below. + properties: + securityGroupIds: + description: List of VPC Security Group IDs to be applied + to the Elasticsearch domain endpoints. If omitted, the default + Security Group for the VPC will be used. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: List of VPC Subnet IDs for the Elasticsearch + domain endpoints to be created in. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessPolicies: + description: IAM policy document specifying the access policies + for the domain. + type: string + advancedOptions: + additionalProperties: + type: string + description: Key-value string pairs to specify advanced configuration + options. + type: object + x-kubernetes-map-type: granular + advancedSecurityOptions: + description: Configuration block for fine-grained access control. + Detailed below. + properties: + enabled: + description: Whether advanced security is enabled. + type: boolean + internalUserDatabaseEnabled: + description: Whether the internal user database is enabled. + If not set, defaults to false by the AWS API. + type: boolean + masterUserOptions: + description: Configuration block for the main user. Detailed + below. + properties: + masterUserArn: + description: ARN for the main user. Only specify if internal_user_database_enabled + is not set or set to false. + type: string + masterUserName: + description: Main user's username, which is stored in + the Amazon Elasticsearch Service domain's internal database. + Only specify if internal_user_database_enabled is set + to true. + type: string + masterUserPasswordSecretRef: + description: Main user's password, which is stored in + the Amazon Elasticsearch Service domain's internal database. + Only specify if internal_user_database_enabled is set + to true. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: object + autoTuneOptions: + description: Configuration block for the Auto-Tune options of + the domain. Detailed below. + properties: + desiredState: + description: 'The Auto-Tune desired state for the domain. + Valid values: ENABLED or DISABLED.' + type: string + maintenanceSchedule: + description: Configuration block for Auto-Tune maintenance + windows. Can be specified multiple times for each maintenance + window. Detailed below. + items: + properties: + cronExpressionForRecurrence: + description: A cron expression specifying the recurrence + pattern for an Auto-Tune maintenance schedule. + type: string + duration: + description: Configuration block for the duration of + the Auto-Tune maintenance window. Detailed below. + properties: + unit: + description: 'The unit of time specifying the duration + of an Auto-Tune maintenance window. Valid values: + HOURS.' + type: string + value: + description: An integer specifying the value of + the duration of an Auto-Tune maintenance window. + type: number + type: object + startAt: + description: Date and time at which to start the Auto-Tune + maintenance schedule in RFC3339 format. + type: string + type: object + type: array + rollbackOnDisable: + description: 'Whether to roll back to default Auto-Tune settings + when disabling Auto-Tune. Valid values: DEFAULT_ROLLBACK + or NO_ROLLBACK.' + type: string + type: object + clusterConfig: + description: Configuration block for the cluster of the domain. + Detailed below. + properties: + coldStorageOptions: + description: Configuration block containing cold storage configuration. + Detailed below. + properties: + enabled: + description: Boolean to enable cold storage for an Elasticsearch + domain. Defaults to false. Master and ultrawarm nodes + must be enabled for cold storage. + type: boolean + type: object + dedicatedMasterCount: + description: Number of dedicated main nodes in the cluster. + type: number + dedicatedMasterEnabled: + description: Whether dedicated main nodes are enabled for + the cluster. + type: boolean + dedicatedMasterType: + description: Instance type of the dedicated main nodes in + the cluster. + type: string + instanceCount: + description: Number of instances in the cluster. + type: number + instanceType: + description: Instance type of data nodes in the cluster. + type: string + warmCount: + description: Number of warm nodes in the cluster. Valid values + are between 2 and 150. warm_count can be only and must be + set when warm_enabled is set to true. + type: number + warmEnabled: + description: Whether to enable warm storage. + type: boolean + warmType: + description: Instance type for the Elasticsearch cluster's + warm nodes. Valid values are ultrawarm1.medium.elasticsearch, + ultrawarm1.large.elasticsearch and ultrawarm1.xlarge.elasticsearch. + warm_type can be only and must be set when warm_enabled + is set to true. + type: string + zoneAwarenessConfig: + description: Configuration block containing zone awareness + settings. Detailed below. + properties: + availabilityZoneCount: + description: 'Number of Availability Zones for the domain + to use with zone_awareness_enabled. Defaults to 2. Valid + values: 2 or 3.' + type: number + type: object + zoneAwarenessEnabled: + description: Whether zone awareness is enabled, set to true + for multi-az deployment. To enable awareness with three + Availability Zones, the availability_zone_count within the + zone_awareness_config must be set to 3. + type: boolean + type: object + cognitoOptions: + description: Configuration block for authenticating Kibana with + Cognito. Detailed below. + properties: + enabled: + description: Whether Amazon Cognito authentication with Kibana + is enabled or not. + type: boolean + identityPoolId: + description: ID of the Cognito Identity Pool to use. + type: string + roleArn: + description: ARN of the IAM role that has the AmazonESCognitoAccess + policy attached. + type: string + userPoolId: + description: ID of the Cognito User Pool to use. + type: string + type: object + domainEndpointOptions: + description: Configuration block for domain endpoint HTTP(S) related + options. Detailed below. + properties: + customEndpoint: + description: Fully qualified domain for your custom endpoint. + type: string + customEndpointCertificateArn: + description: ACM certificate ARN for your custom endpoint. + type: string + customEndpointEnabled: + description: Whether to enable custom endpoint for the Elasticsearch + domain. + type: boolean + enforceHttps: + description: Whether or not to require HTTPS. Defaults to + true. + type: boolean + tlsSecurityPolicy: + description: 'Name of the TLS security policy that needs to + be applied to the HTTPS endpoint. Valid values: Policy-Min-TLS-1-0-2019-07 + and Policy-Min-TLS-1-2-2019-07.' + type: string + type: object + ebsOptions: + description: Configuration block for EBS related options, may + be required based on chosen instance size. Detailed below. + properties: + ebsEnabled: + description: Whether EBS volumes are attached to data nodes + in the domain. + type: boolean + iops: + description: Baseline input/output (I/O) performance of EBS + volumes attached to data nodes. Applicable only for the + GP3 and Provisioned IOPS EBS volume types. + type: number + throughput: + description: Specifies the throughput (in MiB/s) of the EBS + volumes attached to data nodes. Applicable only for the + gp3 volume type. + type: number + volumeSize: + description: Size of EBS volumes attached to data nodes (in + GiB). + type: number + volumeType: + description: Type of EBS volumes attached to data nodes. + type: string + type: object + elasticsearchVersion: + description: Version of Elasticsearch to deploy. Defaults to 1.5. + type: string + encryptAtRest: + description: Configuration block for encrypt at rest options. + Only available for certain instance types. Detailed below. + properties: + enabled: + description: Whether to enable encryption at rest. If the + encrypt_at_rest block is not provided then this defaults + to false. Enabling encryption on new domains requires elasticsearch_version + 5.1 or greater. + type: boolean + kmsKeyId: + description: KMS key ARN to encrypt the Elasticsearch domain + with. If not specified then it defaults to using the aws/es + service KMS key. Note that KMS will accept a KMS key ID + but will return the key ARN. + type: string + type: object + logPublishingOptions: + description: Configuration block for publishing slow and application + logs to CloudWatch Logs. This block can be declared multiple + times, for each log_type, within the same resource. Detailed + below. + items: + properties: + cloudwatchLogGroupArn: + description: ARN of the Cloudwatch log group to which log + needs to be published. + type: string + cloudwatchLogGroupArnRef: + description: Reference to a Group in cloudwatchlogs to populate + cloudwatchLogGroupArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cloudwatchLogGroupArnSelector: + description: Selector for a Group in cloudwatchlogs to populate + cloudwatchLogGroupArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: Whether given log publishing option is enabled + or not. + type: boolean + logType: + description: 'Type of Elasticsearch log. Valid values: INDEX_SLOW_LOGS, + SEARCH_SLOW_LOGS, ES_APPLICATION_LOGS, AUDIT_LOGS.' + type: string + type: object + type: array + nodeToNodeEncryption: + description: Configuration block for node-to-node encryption options. + Detailed below. + properties: + enabled: + description: Whether to enable node-to-node encryption. If + the node_to_node_encryption block is not provided then this + defaults to false. Enabling node-to-node encryption of a + new domain requires an elasticsearch_version of 6.0 or greater. + type: boolean + type: object + snapshotOptions: + description: Configuration block for snapshot related options. + Detailed below. DEPRECATED. For domains running Elasticsearch + 5.3 and later, Amazon ES takes hourly automated snapshots, making + this setting irrelevant. For domains running earlier versions + of Elasticsearch, Amazon ES takes daily automated snapshots. + properties: + automatedSnapshotStartHour: + description: Hour during which the service takes an automated + daily snapshot of the indices in the domain. + type: number + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcOptions: + description: Configuration block for VPC related options. Adding + or removing this configuration forces a new resource (documentation). + Detailed below. + properties: + securityGroupIds: + description: List of VPC Security Group IDs to be applied + to the Elasticsearch domain endpoints. If omitted, the default + Security Group for the VPC will be used. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: List of VPC Subnet IDs for the Elasticsearch + domain endpoints to be created in. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DomainStatus defines the observed state of Domain. + properties: + atProvider: + properties: + accessPolicies: + description: IAM policy document specifying the access policies + for the domain. + type: string + advancedOptions: + additionalProperties: + type: string + description: Key-value string pairs to specify advanced configuration + options. + type: object + x-kubernetes-map-type: granular + advancedSecurityOptions: + description: Configuration block for fine-grained access control. + Detailed below. + properties: + enabled: + description: Whether advanced security is enabled. + type: boolean + internalUserDatabaseEnabled: + description: Whether the internal user database is enabled. + If not set, defaults to false by the AWS API. + type: boolean + masterUserOptions: + description: Configuration block for the main user. Detailed + below. + properties: + masterUserArn: + description: ARN for the main user. Only specify if internal_user_database_enabled + is not set or set to false. + type: string + masterUserName: + description: Main user's username, which is stored in + the Amazon Elasticsearch Service domain's internal database. + Only specify if internal_user_database_enabled is set + to true. + type: string + type: object + type: object + arn: + description: ARN of the domain. + type: string + autoTuneOptions: + description: Configuration block for the Auto-Tune options of + the domain. Detailed below. + properties: + desiredState: + description: 'The Auto-Tune desired state for the domain. + Valid values: ENABLED or DISABLED.' + type: string + maintenanceSchedule: + description: Configuration block for Auto-Tune maintenance + windows. Can be specified multiple times for each maintenance + window. Detailed below. + items: + properties: + cronExpressionForRecurrence: + description: A cron expression specifying the recurrence + pattern for an Auto-Tune maintenance schedule. + type: string + duration: + description: Configuration block for the duration of + the Auto-Tune maintenance window. Detailed below. + properties: + unit: + description: 'The unit of time specifying the duration + of an Auto-Tune maintenance window. Valid values: + HOURS.' + type: string + value: + description: An integer specifying the value of + the duration of an Auto-Tune maintenance window. + type: number + type: object + startAt: + description: Date and time at which to start the Auto-Tune + maintenance schedule in RFC3339 format. + type: string + type: object + type: array + rollbackOnDisable: + description: 'Whether to roll back to default Auto-Tune settings + when disabling Auto-Tune. Valid values: DEFAULT_ROLLBACK + or NO_ROLLBACK.' + type: string + type: object + clusterConfig: + description: Configuration block for the cluster of the domain. + Detailed below. + properties: + coldStorageOptions: + description: Configuration block containing cold storage configuration. + Detailed below. + properties: + enabled: + description: Boolean to enable cold storage for an Elasticsearch + domain. Defaults to false. Master and ultrawarm nodes + must be enabled for cold storage. + type: boolean + type: object + dedicatedMasterCount: + description: Number of dedicated main nodes in the cluster. + type: number + dedicatedMasterEnabled: + description: Whether dedicated main nodes are enabled for + the cluster. + type: boolean + dedicatedMasterType: + description: Instance type of the dedicated main nodes in + the cluster. + type: string + instanceCount: + description: Number of instances in the cluster. + type: number + instanceType: + description: Instance type of data nodes in the cluster. + type: string + warmCount: + description: Number of warm nodes in the cluster. Valid values + are between 2 and 150. warm_count can be only and must be + set when warm_enabled is set to true. + type: number + warmEnabled: + description: Whether to enable warm storage. + type: boolean + warmType: + description: Instance type for the Elasticsearch cluster's + warm nodes. Valid values are ultrawarm1.medium.elasticsearch, + ultrawarm1.large.elasticsearch and ultrawarm1.xlarge.elasticsearch. + warm_type can be only and must be set when warm_enabled + is set to true. + type: string + zoneAwarenessConfig: + description: Configuration block containing zone awareness + settings. Detailed below. + properties: + availabilityZoneCount: + description: 'Number of Availability Zones for the domain + to use with zone_awareness_enabled. Defaults to 2. Valid + values: 2 or 3.' + type: number + type: object + zoneAwarenessEnabled: + description: Whether zone awareness is enabled, set to true + for multi-az deployment. To enable awareness with three + Availability Zones, the availability_zone_count within the + zone_awareness_config must be set to 3. + type: boolean + type: object + cognitoOptions: + description: Configuration block for authenticating Kibana with + Cognito. Detailed below. + properties: + enabled: + description: Whether Amazon Cognito authentication with Kibana + is enabled or not. + type: boolean + identityPoolId: + description: ID of the Cognito Identity Pool to use. + type: string + roleArn: + description: ARN of the IAM role that has the AmazonESCognitoAccess + policy attached. + type: string + userPoolId: + description: ID of the Cognito User Pool to use. + type: string + type: object + domainEndpointOptions: + description: Configuration block for domain endpoint HTTP(S) related + options. Detailed below. + properties: + customEndpoint: + description: Fully qualified domain for your custom endpoint. + type: string + customEndpointCertificateArn: + description: ACM certificate ARN for your custom endpoint. + type: string + customEndpointEnabled: + description: Whether to enable custom endpoint for the Elasticsearch + domain. + type: boolean + enforceHttps: + description: Whether or not to require HTTPS. Defaults to + true. + type: boolean + tlsSecurityPolicy: + description: 'Name of the TLS security policy that needs to + be applied to the HTTPS endpoint. Valid values: Policy-Min-TLS-1-0-2019-07 + and Policy-Min-TLS-1-2-2019-07.' + type: string + type: object + domainId: + description: Unique identifier for the domain. + type: string + ebsOptions: + description: Configuration block for EBS related options, may + be required based on chosen instance size. Detailed below. + properties: + ebsEnabled: + description: Whether EBS volumes are attached to data nodes + in the domain. + type: boolean + iops: + description: Baseline input/output (I/O) performance of EBS + volumes attached to data nodes. Applicable only for the + GP3 and Provisioned IOPS EBS volume types. + type: number + throughput: + description: Specifies the throughput (in MiB/s) of the EBS + volumes attached to data nodes. Applicable only for the + gp3 volume type. + type: number + volumeSize: + description: Size of EBS volumes attached to data nodes (in + GiB). + type: number + volumeType: + description: Type of EBS volumes attached to data nodes. + type: string + type: object + elasticsearchVersion: + description: Version of Elasticsearch to deploy. Defaults to 1.5. + type: string + encryptAtRest: + description: Configuration block for encrypt at rest options. + Only available for certain instance types. Detailed below. + properties: + enabled: + description: Whether to enable encryption at rest. If the + encrypt_at_rest block is not provided then this defaults + to false. Enabling encryption on new domains requires elasticsearch_version + 5.1 or greater. + type: boolean + kmsKeyId: + description: KMS key ARN to encrypt the Elasticsearch domain + with. If not specified then it defaults to using the aws/es + service KMS key. Note that KMS will accept a KMS key ID + but will return the key ARN. + type: string + type: object + endpoint: + description: Domain-specific endpoint used to submit index, search, + and data upload requests. + type: string + id: + type: string + kibanaEndpoint: + description: Domain-specific endpoint for kibana without https + scheme. + type: string + logPublishingOptions: + description: Configuration block for publishing slow and application + logs to CloudWatch Logs. This block can be declared multiple + times, for each log_type, within the same resource. Detailed + below. + items: + properties: + cloudwatchLogGroupArn: + description: ARN of the Cloudwatch log group to which log + needs to be published. + type: string + enabled: + description: Whether given log publishing option is enabled + or not. + type: boolean + logType: + description: 'Type of Elasticsearch log. Valid values: INDEX_SLOW_LOGS, + SEARCH_SLOW_LOGS, ES_APPLICATION_LOGS, AUDIT_LOGS.' + type: string + type: object + type: array + nodeToNodeEncryption: + description: Configuration block for node-to-node encryption options. + Detailed below. + properties: + enabled: + description: Whether to enable node-to-node encryption. If + the node_to_node_encryption block is not provided then this + defaults to false. Enabling node-to-node encryption of a + new domain requires an elasticsearch_version of 6.0 or greater. + type: boolean + type: object + snapshotOptions: + description: Configuration block for snapshot related options. + Detailed below. DEPRECATED. For domains running Elasticsearch + 5.3 and later, Amazon ES takes hourly automated snapshots, making + this setting irrelevant. For domains running earlier versions + of Elasticsearch, Amazon ES takes daily automated snapshots. + properties: + automatedSnapshotStartHour: + description: Hour during which the service takes an automated + daily snapshot of the indices in the domain. + type: number + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + vpcOptions: + description: Configuration block for VPC related options. Adding + or removing this configuration forces a new resource (documentation). + Detailed below. + properties: + availabilityZones: + description: If the domain was created inside a VPC, the names + of the availability zones the configured subnet_ids were + created inside. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIds: + description: List of VPC Security Group IDs to be applied + to the Elasticsearch domain endpoints. If omitted, the default + Security Group for the VPC will be used. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: List of VPC Subnet IDs for the Elasticsearch + domain endpoints to be created in. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcId: + description: If the domain was created inside a VPC, the ID + of the VPC. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/elasticsearch.aws.upbound.io_domainsamloptions.yaml b/package/crds/elasticsearch.aws.upbound.io_domainsamloptions.yaml index b37993eaed..00f40b6790 100644 --- a/package/crds/elasticsearch.aws.upbound.io_domainsamloptions.yaml +++ b/package/crds/elasticsearch.aws.upbound.io_domainsamloptions.yaml @@ -498,3 +498,469 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DomainSAMLOptions is the Schema for the DomainSAMLOptionss API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DomainSAMLOptionsSpec defines the desired state of DomainSAMLOptions + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + region: + description: Region is the region you'd like your resource to + be created in. + type: string + samlOptions: + description: The SAML authentication options for an AWS Elasticsearch + Domain. + properties: + enabled: + description: Whether SAML authentication is enabled. + type: boolean + idp: + description: Information from your identity provider. + properties: + entityId: + description: The unique Entity ID of the application in + SAML Identity Provider. + type: string + metadataContent: + description: The Metadata of the SAML application in xml + format. + type: string + type: object + masterBackendRole: + description: This backend role from the SAML IdP receives + full permissions to the cluster, equivalent to a new master + user. + type: string + masterUserNameSecretRef: + description: This username from the SAML IdP receives full + permissions to the cluster, equivalent to a new master user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + rolesKey: + description: Element of the SAML assertion to use for backend + roles. Default is roles. + type: string + sessionTimeoutMinutes: + description: Duration of a session in minutes after a user + logs in. Default is 60. Maximum value is 1,440. + type: number + subjectKey: + description: Custom SAML attribute to use for user names. + Default is an empty string - "". This will cause Elasticsearch + to use the NameID element of the Subject, which is the default + location for name identifiers in the SAML specification. + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + samlOptions: + description: The SAML authentication options for an AWS Elasticsearch + Domain. + properties: + enabled: + description: Whether SAML authentication is enabled. + type: boolean + idp: + description: Information from your identity provider. + properties: + entityId: + description: The unique Entity ID of the application in + SAML Identity Provider. + type: string + metadataContent: + description: The Metadata of the SAML application in xml + format. + type: string + type: object + masterBackendRole: + description: This backend role from the SAML IdP receives + full permissions to the cluster, equivalent to a new master + user. + type: string + masterUserNameSecretRef: + description: This username from the SAML IdP receives full + permissions to the cluster, equivalent to a new master user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + rolesKey: + description: Element of the SAML assertion to use for backend + roles. Default is roles. + type: string + sessionTimeoutMinutes: + description: Duration of a session in minutes after a user + logs in. Default is 60. Maximum value is 1,440. + type: number + subjectKey: + description: Custom SAML attribute to use for user names. + Default is an empty string - "". This will cause Elasticsearch + to use the NameID element of the Subject, which is the default + location for name identifiers in the SAML specification. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DomainSAMLOptionsStatus defines the observed state of DomainSAMLOptions. + properties: + atProvider: + properties: + id: + description: The name of the domain the SAML options are associated + with. + type: string + samlOptions: + description: The SAML authentication options for an AWS Elasticsearch + Domain. + properties: + enabled: + description: Whether SAML authentication is enabled. + type: boolean + idp: + description: Information from your identity provider. + properties: + entityId: + description: The unique Entity ID of the application in + SAML Identity Provider. + type: string + metadataContent: + description: The Metadata of the SAML application in xml + format. + type: string + type: object + masterBackendRole: + description: This backend role from the SAML IdP receives + full permissions to the cluster, equivalent to a new master + user. + type: string + rolesKey: + description: Element of the SAML assertion to use for backend + roles. Default is roles. + type: string + sessionTimeoutMinutes: + description: Duration of a session in minutes after a user + logs in. Default is 60. Maximum value is 1,440. + type: number + subjectKey: + description: Custom SAML attribute to use for user names. + Default is an empty string - "". This will cause Elasticsearch + to use the NameID element of the Subject, which is the default + location for name identifiers in the SAML specification. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/elastictranscoder.aws.upbound.io_pipelines.yaml b/package/crds/elastictranscoder.aws.upbound.io_pipelines.yaml index 8307f678a1..b39dcaa90c 100644 --- a/package/crds/elastictranscoder.aws.upbound.io_pipelines.yaml +++ b/package/crds/elastictranscoder.aws.upbound.io_pipelines.yaml @@ -1316,3 +1316,1283 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Pipeline is the Schema for the Pipelines API. Provides an Elastic + Transcoder pipeline resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PipelineSpec defines the desired state of Pipeline + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + awsKmsKeyArn: + description: The AWS Key Management Service (AWS KMS) key that + you want to use with this pipeline. + type: string + contentConfig: + description: The ContentConfig object specifies information about + the Amazon S3 bucket in which you want Elastic Transcoder to + save transcoded files and playlists. (documented below) + properties: + bucket: + description: The Amazon S3 bucket in which you want Elastic + Transcoder to save transcoded files and playlists. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageClass: + description: The Amazon S3 storage class, Standard or ReducedRedundancy, + that you want Elastic Transcoder to assign to the files + and playlists that it stores in your Amazon S3 bucket. + type: string + type: object + contentConfigPermissions: + description: The permissions for the content_config object. (documented + below) + items: + properties: + access: + description: The permission that you want to give to the + AWS user that you specified in content_config_permissions.grantee. + Valid values are Read, ReadAcp, WriteAcp or FullControl. + items: + type: string + type: array + grantee: + description: The AWS user or group that you want to have + access to transcoded files and playlists. + type: string + granteeType: + description: Specify the type of value that appears in the + content_config_permissions.grantee object. Valid values + are Canonical, Email or Group. + type: string + type: object + type: array + inputBucket: + description: The Amazon S3 bucket in which you saved the media + files that you want to transcode and the graphics that you want + to use as watermarks. + type: string + inputBucketRef: + description: Reference to a Bucket in s3 to populate inputBucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + inputBucketSelector: + description: Selector for a Bucket in s3 to populate inputBucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the pipeline. Maximum 40 characters + type: string + notifications: + description: The Amazon Simple Notification Service (Amazon SNS) + topic that you want to notify to report job status. (documented + below) + properties: + completed: + description: The topic ARN for the Amazon SNS topic that you + want to notify when Elastic Transcoder has finished processing + a job in this pipeline. + type: string + error: + description: The topic ARN for the Amazon SNS topic that you + want to notify when Elastic Transcoder encounters an error + condition while processing a job in this pipeline. + type: string + progressing: + description: The topic ARN for the Amazon Simple Notification + Service (Amazon SNS) topic that you want to notify when + Elastic Transcoder has started to process a job in this + pipeline. + type: string + warning: + description: The topic ARN for the Amazon SNS topic that you + want to notify when Elastic Transcoder encounters a warning + condition while processing a job in this pipeline. + type: string + type: object + outputBucket: + description: The Amazon S3 bucket in which you want Elastic Transcoder + to save the transcoded files. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + role: + description: The IAM Amazon Resource Name (ARN) for the role that + you want Elastic Transcoder to use to transcode jobs for this + pipeline. + type: string + roleRef: + description: Reference to a Role in iam to populate role. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleSelector: + description: Selector for a Role in iam to populate role. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + thumbnailConfig: + description: The ThumbnailConfig object specifies information + about the Amazon S3 bucket in which you want Elastic Transcoder + to save thumbnail files. (documented below) + properties: + bucket: + description: The Amazon S3 bucket in which you want Elastic + Transcoder to save transcoded files and playlists. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageClass: + description: The Amazon S3 storage class, Standard or ReducedRedundancy, + that you want Elastic Transcoder to assign to the files + and playlists that it stores in your Amazon S3 bucket. + type: string + type: object + thumbnailConfigPermissions: + description: The permissions for the thumbnail_config object. + (documented below) + items: + properties: + access: + description: The permission that you want to give to the + AWS user that you specified in content_config_permissions.grantee. + Valid values are Read, ReadAcp, WriteAcp or FullControl. + items: + type: string + type: array + grantee: + description: The AWS user or group that you want to have + access to transcoded files and playlists. + type: string + granteeType: + description: Specify the type of value that appears in the + content_config_permissions.grantee object. Valid values + are Canonical, Email or Group. + type: string + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + awsKmsKeyArn: + description: The AWS Key Management Service (AWS KMS) key that + you want to use with this pipeline. + type: string + contentConfig: + description: The ContentConfig object specifies information about + the Amazon S3 bucket in which you want Elastic Transcoder to + save transcoded files and playlists. (documented below) + properties: + bucket: + description: The Amazon S3 bucket in which you want Elastic + Transcoder to save transcoded files and playlists. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageClass: + description: The Amazon S3 storage class, Standard or ReducedRedundancy, + that you want Elastic Transcoder to assign to the files + and playlists that it stores in your Amazon S3 bucket. + type: string + type: object + contentConfigPermissions: + description: The permissions for the content_config object. (documented + below) + items: + properties: + access: + description: The permission that you want to give to the + AWS user that you specified in content_config_permissions.grantee. + Valid values are Read, ReadAcp, WriteAcp or FullControl. + items: + type: string + type: array + grantee: + description: The AWS user or group that you want to have + access to transcoded files and playlists. + type: string + granteeType: + description: Specify the type of value that appears in the + content_config_permissions.grantee object. Valid values + are Canonical, Email or Group. + type: string + type: object + type: array + inputBucket: + description: The Amazon S3 bucket in which you saved the media + files that you want to transcode and the graphics that you want + to use as watermarks. + type: string + inputBucketRef: + description: Reference to a Bucket in s3 to populate inputBucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + inputBucketSelector: + description: Selector for a Bucket in s3 to populate inputBucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the pipeline. Maximum 40 characters + type: string + notifications: + description: The Amazon Simple Notification Service (Amazon SNS) + topic that you want to notify to report job status. (documented + below) + properties: + completed: + description: The topic ARN for the Amazon SNS topic that you + want to notify when Elastic Transcoder has finished processing + a job in this pipeline. + type: string + error: + description: The topic ARN for the Amazon SNS topic that you + want to notify when Elastic Transcoder encounters an error + condition while processing a job in this pipeline. + type: string + progressing: + description: The topic ARN for the Amazon Simple Notification + Service (Amazon SNS) topic that you want to notify when + Elastic Transcoder has started to process a job in this + pipeline. + type: string + warning: + description: The topic ARN for the Amazon SNS topic that you + want to notify when Elastic Transcoder encounters a warning + condition while processing a job in this pipeline. + type: string + type: object + outputBucket: + description: The Amazon S3 bucket in which you want Elastic Transcoder + to save the transcoded files. + type: string + role: + description: The IAM Amazon Resource Name (ARN) for the role that + you want Elastic Transcoder to use to transcode jobs for this + pipeline. + type: string + roleRef: + description: Reference to a Role in iam to populate role. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleSelector: + description: Selector for a Role in iam to populate role. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + thumbnailConfig: + description: The ThumbnailConfig object specifies information + about the Amazon S3 bucket in which you want Elastic Transcoder + to save thumbnail files. (documented below) + properties: + bucket: + description: The Amazon S3 bucket in which you want Elastic + Transcoder to save transcoded files and playlists. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageClass: + description: The Amazon S3 storage class, Standard or ReducedRedundancy, + that you want Elastic Transcoder to assign to the files + and playlists that it stores in your Amazon S3 bucket. + type: string + type: object + thumbnailConfigPermissions: + description: The permissions for the thumbnail_config object. + (documented below) + items: + properties: + access: + description: The permission that you want to give to the + AWS user that you specified in content_config_permissions.grantee. + Valid values are Read, ReadAcp, WriteAcp or FullControl. + items: + type: string + type: array + grantee: + description: The AWS user or group that you want to have + access to transcoded files and playlists. + type: string + granteeType: + description: Specify the type of value that appears in the + content_config_permissions.grantee object. Valid values + are Canonical, Email or Group. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: PipelineStatus defines the observed state of Pipeline. + properties: + atProvider: + properties: + arn: + description: The ARN of the Elastictranscoder pipeline. + type: string + awsKmsKeyArn: + description: The AWS Key Management Service (AWS KMS) key that + you want to use with this pipeline. + type: string + contentConfig: + description: The ContentConfig object specifies information about + the Amazon S3 bucket in which you want Elastic Transcoder to + save transcoded files and playlists. (documented below) + properties: + bucket: + description: The Amazon S3 bucket in which you want Elastic + Transcoder to save transcoded files and playlists. + type: string + storageClass: + description: The Amazon S3 storage class, Standard or ReducedRedundancy, + that you want Elastic Transcoder to assign to the files + and playlists that it stores in your Amazon S3 bucket. + type: string + type: object + contentConfigPermissions: + description: The permissions for the content_config object. (documented + below) + items: + properties: + access: + description: The permission that you want to give to the + AWS user that you specified in content_config_permissions.grantee. + Valid values are Read, ReadAcp, WriteAcp or FullControl. + items: + type: string + type: array + grantee: + description: The AWS user or group that you want to have + access to transcoded files and playlists. + type: string + granteeType: + description: Specify the type of value that appears in the + content_config_permissions.grantee object. Valid values + are Canonical, Email or Group. + type: string + type: object + type: array + id: + description: The ID of the Elastictranscoder pipeline. + type: string + inputBucket: + description: The Amazon S3 bucket in which you saved the media + files that you want to transcode and the graphics that you want + to use as watermarks. + type: string + name: + description: The name of the pipeline. Maximum 40 characters + type: string + notifications: + description: The Amazon Simple Notification Service (Amazon SNS) + topic that you want to notify to report job status. (documented + below) + properties: + completed: + description: The topic ARN for the Amazon SNS topic that you + want to notify when Elastic Transcoder has finished processing + a job in this pipeline. + type: string + error: + description: The topic ARN for the Amazon SNS topic that you + want to notify when Elastic Transcoder encounters an error + condition while processing a job in this pipeline. + type: string + progressing: + description: The topic ARN for the Amazon Simple Notification + Service (Amazon SNS) topic that you want to notify when + Elastic Transcoder has started to process a job in this + pipeline. + type: string + warning: + description: The topic ARN for the Amazon SNS topic that you + want to notify when Elastic Transcoder encounters a warning + condition while processing a job in this pipeline. + type: string + type: object + outputBucket: + description: The Amazon S3 bucket in which you want Elastic Transcoder + to save the transcoded files. + type: string + role: + description: The IAM Amazon Resource Name (ARN) for the role that + you want Elastic Transcoder to use to transcode jobs for this + pipeline. + type: string + thumbnailConfig: + description: The ThumbnailConfig object specifies information + about the Amazon S3 bucket in which you want Elastic Transcoder + to save thumbnail files. (documented below) + properties: + bucket: + description: The Amazon S3 bucket in which you want Elastic + Transcoder to save transcoded files and playlists. + type: string + storageClass: + description: The Amazon S3 storage class, Standard or ReducedRedundancy, + that you want Elastic Transcoder to assign to the files + and playlists that it stores in your Amazon S3 bucket. + type: string + type: object + thumbnailConfigPermissions: + description: The permissions for the thumbnail_config object. + (documented below) + items: + properties: + access: + description: The permission that you want to give to the + AWS user that you specified in content_config_permissions.grantee. + Valid values are Read, ReadAcp, WriteAcp or FullControl. + items: + type: string + type: array + grantee: + description: The AWS user or group that you want to have + access to transcoded files and playlists. + type: string + granteeType: + description: Specify the type of value that appears in the + content_config_permissions.grantee object. Valid values + are Canonical, Email or Group. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/elastictranscoder.aws.upbound.io_presets.yaml b/package/crds/elastictranscoder.aws.upbound.io_presets.yaml index 90dc421b72..868549b8a4 100644 --- a/package/crds/elastictranscoder.aws.upbound.io_presets.yaml +++ b/package/crds/elastictranscoder.aws.upbound.io_presets.yaml @@ -1164,3 +1164,1119 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Preset is the Schema for the Presets API. Provides an Elastic + Transcoder preset resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PresetSpec defines the desired state of Preset + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + audio: + description: Audio parameters object (documented below). + properties: + audioPackingMode: + description: The method of organizing audio channels and tracks. + Use Audio:Channels to specify the number of channels in + your output, and Audio:AudioPackingMode to specify the number + of tracks and their relation to the channels. If you do + not specify an Audio:AudioPackingMode, Elastic Transcoder + uses SingleTrack. + type: string + bitRate: + description: The bit rate of the audio stream in the output + file, in kilobits/second. Enter an integer between 64 and + 320, inclusive. + type: string + channels: + description: The number of audio channels in the output file + type: string + codec: + description: The audio codec for the output file. Valid values + are AAC, flac, mp2, mp3, pcm, and vorbis. + type: string + sampleRate: + description: 'The sample rate of the audio stream in the output + file, in hertz. Valid values are: auto, 22050, 32000, 44100, + 48000, 96000' + type: string + type: object + audioCodecOptions: + description: Codec options for the audio parameters (documented + below) + properties: + bitDepth: + description: The bit depth of a sample is how many bits of + information are included in the audio samples. Valid values + are 16 and 24. (FLAC/PCM Only) + type: string + bitOrder: + description: The order the bits of a PCM sample are stored + in. The supported value is LittleEndian. (PCM Only) + type: string + profile: + description: If you specified AAC for Audio:Codec, choose + the AAC profile for the output file. + type: string + signed: + description: Whether audio samples are represented with negative + and positive numbers (signed) or only positive numbers (unsigned). + The supported value is Signed. (PCM Only) + type: string + type: object + container: + description: The container type for the output file. Valid values + are flac, flv, fmp4, gif, mp3, mp4, mpg, mxf, oga, ogg, ts, + and webm. + type: string + description: + description: A description of the preset (maximum 255 characters) + type: string + name: + description: The name of the preset. (maximum 40 characters) + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + thumbnails: + description: Thumbnail parameters object (documented below) + properties: + aspectRatio: + description: 'The aspect ratio of thumbnails. The following + values are valid: auto, 1:1, 4:3, 3:2, 16:9' + type: string + format: + description: The format of thumbnails, if any. Valid formats + are jpg and png. + type: string + interval: + description: The approximate number of seconds between thumbnails. + The value must be an integer. The actual interval can vary + by several seconds from one thumbnail to the next. + type: string + maxHeight: + description: The maximum height of thumbnails, in pixels. + If you specify auto, Elastic Transcoder uses 1080 (Full + HD) as the default value. If you specify a numeric value, + enter an even integer between 32 and 3072, inclusive. + type: string + maxWidth: + description: The maximum width of thumbnails, in pixels. If + you specify auto, Elastic Transcoder uses 1920 (Full HD) + as the default value. If you specify a numeric value, enter + an even integer between 32 and 4096, inclusive. + type: string + paddingPolicy: + description: When you set PaddingPolicy to Pad, Elastic Transcoder + might add black bars to the top and bottom and/or left and + right sides of thumbnails to make the total size of the + thumbnails match the values that you specified for thumbnail + MaxWidth and MaxHeight settings. + type: string + resolution: + description: The width and height of thumbnail files in pixels, + in the format WidthxHeight, where both values are even integers. + The values cannot exceed the width and height that you specified + in the Video:Resolution object. (To better control resolution + and aspect ratio of thumbnails, we recommend that you use + the thumbnail values max_width, max_height, sizing_policy, + and padding_policy instead of resolution and aspect_ratio. + The two groups of settings are mutually exclusive. Do not + use them together) + type: string + sizingPolicy: + description: 'A value that controls scaling of thumbnails. + Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, + and ShrinkToFill.' + type: string + type: object + type: + type: string + video: + description: Video parameters object (documented below) + properties: + aspectRatio: + description: 'The aspect ratio of thumbnails. The following + values are valid: auto, 1:1, 4:3, 3:2, 16:9' + type: string + bitRate: + description: The bit rate of the audio stream in the output + file, in kilobits/second. Enter an integer between 64 and + 320, inclusive. + type: string + codec: + description: The audio codec for the output file. Valid values + are AAC, flac, mp2, mp3, pcm, and vorbis. + type: string + displayAspectRatio: + description: The value that Elastic Transcoder adds to the + metadata in the output file. If you set DisplayAspectRatio + to auto, Elastic Transcoder chooses an aspect ratio that + ensures square pixels. If you specify another option, Elastic + Transcoder sets that value in the output file. + type: string + fixedGop: + description: Whether to use a fixed value for Video:FixedGOP. + Not applicable for containers of type gif. Valid values + are true and false. Also known as, Fixed Number of Frames + Between Keyframes. + type: string + frameRate: + description: 'The frames per second for the video stream in + the output file. The following values are valid: auto, 10, + 15, 23.97, 24, 25, 29.97, 30, 50, 60.' + type: string + keyframesMaxDist: + description: The maximum number of frames between key frames. + Not applicable for containers of type gif. + type: string + maxFrameRate: + description: If you specify auto for FrameRate, Elastic Transcoder + uses the frame rate of the input video for the frame rate + of the output video, up to the maximum frame rate. If you + do not specify a MaxFrameRate, Elastic Transcoder will use + a default of 30. + type: string + maxHeight: + description: The maximum height of thumbnails, in pixels. + If you specify auto, Elastic Transcoder uses 1080 (Full + HD) as the default value. If you specify a numeric value, + enter an even integer between 32 and 3072, inclusive. + type: string + maxWidth: + description: The maximum width of thumbnails, in pixels. If + you specify auto, Elastic Transcoder uses 1920 (Full HD) + as the default value. If you specify a numeric value, enter + an even integer between 32 and 4096, inclusive. + type: string + paddingPolicy: + description: When you set PaddingPolicy to Pad, Elastic Transcoder + might add black bars to the top and bottom and/or left and + right sides of thumbnails to make the total size of the + thumbnails match the values that you specified for thumbnail + MaxWidth and MaxHeight settings. + type: string + resolution: + description: The width and height of thumbnail files in pixels, + in the format WidthxHeight, where both values are even integers. + The values cannot exceed the width and height that you specified + in the Video:Resolution object. (To better control resolution + and aspect ratio of thumbnails, we recommend that you use + the thumbnail values max_width, max_height, sizing_policy, + and padding_policy instead of resolution and aspect_ratio. + The two groups of settings are mutually exclusive. Do not + use them together) + type: string + sizingPolicy: + description: 'A value that controls scaling of thumbnails. + Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, + and ShrinkToFill.' + type: string + type: object + videoCodecOptions: + additionalProperties: + type: string + description: Codec options for the video parameters + type: object + x-kubernetes-map-type: granular + videoWatermarks: + description: Watermark parameters for the video parameters (documented + below) + items: + properties: + horizontalAlign: + description: The horizontal position of the watermark unless + you specify a nonzero value for horzontal_offset. + type: string + horizontalOffset: + description: The amount by which you want the horizontal + position of the watermark to be offset from the position + specified by horizontal_align. + type: string + id: + description: A unique identifier for the settings for one + watermark. The value of Id can be up to 40 characters + long. You can specify settings for up to four watermarks. + type: string + maxHeight: + description: The maximum height of thumbnails, in pixels. + If you specify auto, Elastic Transcoder uses 1080 (Full + HD) as the default value. If you specify a numeric value, + enter an even integer between 32 and 3072, inclusive. + type: string + maxWidth: + description: The maximum width of thumbnails, in pixels. + If you specify auto, Elastic Transcoder uses 1920 (Full + HD) as the default value. If you specify a numeric value, + enter an even integer between 32 and 4096, inclusive. + type: string + opacity: + description: A percentage that indicates how much you want + a watermark to obscure the video in the location where + it appears. + type: string + sizingPolicy: + description: 'A value that controls scaling of thumbnails. + Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, + and ShrinkToFill.' + type: string + target: + description: A value that determines how Elastic Transcoder + interprets values that you specified for video_watermarks.horizontal_offset, + video_watermarks.vertical_offset, video_watermarks.max_width, + and video_watermarks.max_height. Valid values are Content + and Frame. + type: string + verticalAlign: + description: The vertical position of the watermark unless + you specify a nonzero value for vertical_align. Valid + values are Top, Bottom, Center. + type: string + verticalOffset: + description: The amount by which you want the vertical position + of the watermark to be offset from the position specified + by vertical_align + type: string + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + audio: + description: Audio parameters object (documented below). + properties: + audioPackingMode: + description: The method of organizing audio channels and tracks. + Use Audio:Channels to specify the number of channels in + your output, and Audio:AudioPackingMode to specify the number + of tracks and their relation to the channels. If you do + not specify an Audio:AudioPackingMode, Elastic Transcoder + uses SingleTrack. + type: string + bitRate: + description: The bit rate of the audio stream in the output + file, in kilobits/second. Enter an integer between 64 and + 320, inclusive. + type: string + channels: + description: The number of audio channels in the output file + type: string + codec: + description: The audio codec for the output file. Valid values + are AAC, flac, mp2, mp3, pcm, and vorbis. + type: string + sampleRate: + description: 'The sample rate of the audio stream in the output + file, in hertz. Valid values are: auto, 22050, 32000, 44100, + 48000, 96000' + type: string + type: object + audioCodecOptions: + description: Codec options for the audio parameters (documented + below) + properties: + bitDepth: + description: The bit depth of a sample is how many bits of + information are included in the audio samples. Valid values + are 16 and 24. (FLAC/PCM Only) + type: string + bitOrder: + description: The order the bits of a PCM sample are stored + in. The supported value is LittleEndian. (PCM Only) + type: string + profile: + description: If you specified AAC for Audio:Codec, choose + the AAC profile for the output file. + type: string + signed: + description: Whether audio samples are represented with negative + and positive numbers (signed) or only positive numbers (unsigned). + The supported value is Signed. (PCM Only) + type: string + type: object + container: + description: The container type for the output file. Valid values + are flac, flv, fmp4, gif, mp3, mp4, mpg, mxf, oga, ogg, ts, + and webm. + type: string + description: + description: A description of the preset (maximum 255 characters) + type: string + name: + description: The name of the preset. (maximum 40 characters) + type: string + thumbnails: + description: Thumbnail parameters object (documented below) + properties: + aspectRatio: + description: 'The aspect ratio of thumbnails. The following + values are valid: auto, 1:1, 4:3, 3:2, 16:9' + type: string + format: + description: The format of thumbnails, if any. Valid formats + are jpg and png. + type: string + interval: + description: The approximate number of seconds between thumbnails. + The value must be an integer. The actual interval can vary + by several seconds from one thumbnail to the next. + type: string + maxHeight: + description: The maximum height of thumbnails, in pixels. + If you specify auto, Elastic Transcoder uses 1080 (Full + HD) as the default value. If you specify a numeric value, + enter an even integer between 32 and 3072, inclusive. + type: string + maxWidth: + description: The maximum width of thumbnails, in pixels. If + you specify auto, Elastic Transcoder uses 1920 (Full HD) + as the default value. If you specify a numeric value, enter + an even integer between 32 and 4096, inclusive. + type: string + paddingPolicy: + description: When you set PaddingPolicy to Pad, Elastic Transcoder + might add black bars to the top and bottom and/or left and + right sides of thumbnails to make the total size of the + thumbnails match the values that you specified for thumbnail + MaxWidth and MaxHeight settings. + type: string + resolution: + description: The width and height of thumbnail files in pixels, + in the format WidthxHeight, where both values are even integers. + The values cannot exceed the width and height that you specified + in the Video:Resolution object. (To better control resolution + and aspect ratio of thumbnails, we recommend that you use + the thumbnail values max_width, max_height, sizing_policy, + and padding_policy instead of resolution and aspect_ratio. + The two groups of settings are mutually exclusive. Do not + use them together) + type: string + sizingPolicy: + description: 'A value that controls scaling of thumbnails. + Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, + and ShrinkToFill.' + type: string + type: object + type: + type: string + video: + description: Video parameters object (documented below) + properties: + aspectRatio: + description: 'The aspect ratio of thumbnails. The following + values are valid: auto, 1:1, 4:3, 3:2, 16:9' + type: string + bitRate: + description: The bit rate of the audio stream in the output + file, in kilobits/second. Enter an integer between 64 and + 320, inclusive. + type: string + codec: + description: The audio codec for the output file. Valid values + are AAC, flac, mp2, mp3, pcm, and vorbis. + type: string + displayAspectRatio: + description: The value that Elastic Transcoder adds to the + metadata in the output file. If you set DisplayAspectRatio + to auto, Elastic Transcoder chooses an aspect ratio that + ensures square pixels. If you specify another option, Elastic + Transcoder sets that value in the output file. + type: string + fixedGop: + description: Whether to use a fixed value for Video:FixedGOP. + Not applicable for containers of type gif. Valid values + are true and false. Also known as, Fixed Number of Frames + Between Keyframes. + type: string + frameRate: + description: 'The frames per second for the video stream in + the output file. The following values are valid: auto, 10, + 15, 23.97, 24, 25, 29.97, 30, 50, 60.' + type: string + keyframesMaxDist: + description: The maximum number of frames between key frames. + Not applicable for containers of type gif. + type: string + maxFrameRate: + description: If you specify auto for FrameRate, Elastic Transcoder + uses the frame rate of the input video for the frame rate + of the output video, up to the maximum frame rate. If you + do not specify a MaxFrameRate, Elastic Transcoder will use + a default of 30. + type: string + maxHeight: + description: The maximum height of thumbnails, in pixels. + If you specify auto, Elastic Transcoder uses 1080 (Full + HD) as the default value. If you specify a numeric value, + enter an even integer between 32 and 3072, inclusive. + type: string + maxWidth: + description: The maximum width of thumbnails, in pixels. If + you specify auto, Elastic Transcoder uses 1920 (Full HD) + as the default value. If you specify a numeric value, enter + an even integer between 32 and 4096, inclusive. + type: string + paddingPolicy: + description: When you set PaddingPolicy to Pad, Elastic Transcoder + might add black bars to the top and bottom and/or left and + right sides of thumbnails to make the total size of the + thumbnails match the values that you specified for thumbnail + MaxWidth and MaxHeight settings. + type: string + resolution: + description: The width and height of thumbnail files in pixels, + in the format WidthxHeight, where both values are even integers. + The values cannot exceed the width and height that you specified + in the Video:Resolution object. (To better control resolution + and aspect ratio of thumbnails, we recommend that you use + the thumbnail values max_width, max_height, sizing_policy, + and padding_policy instead of resolution and aspect_ratio. + The two groups of settings are mutually exclusive. Do not + use them together) + type: string + sizingPolicy: + description: 'A value that controls scaling of thumbnails. + Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, + and ShrinkToFill.' + type: string + type: object + videoCodecOptions: + additionalProperties: + type: string + description: Codec options for the video parameters + type: object + x-kubernetes-map-type: granular + videoWatermarks: + description: Watermark parameters for the video parameters (documented + below) + items: + properties: + horizontalAlign: + description: The horizontal position of the watermark unless + you specify a nonzero value for horzontal_offset. + type: string + horizontalOffset: + description: The amount by which you want the horizontal + position of the watermark to be offset from the position + specified by horizontal_align. + type: string + id: + description: A unique identifier for the settings for one + watermark. The value of Id can be up to 40 characters + long. You can specify settings for up to four watermarks. + type: string + maxHeight: + description: The maximum height of thumbnails, in pixels. + If you specify auto, Elastic Transcoder uses 1080 (Full + HD) as the default value. If you specify a numeric value, + enter an even integer between 32 and 3072, inclusive. + type: string + maxWidth: + description: The maximum width of thumbnails, in pixels. + If you specify auto, Elastic Transcoder uses 1920 (Full + HD) as the default value. If you specify a numeric value, + enter an even integer between 32 and 4096, inclusive. + type: string + opacity: + description: A percentage that indicates how much you want + a watermark to obscure the video in the location where + it appears. + type: string + sizingPolicy: + description: 'A value that controls scaling of thumbnails. + Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, + and ShrinkToFill.' + type: string + target: + description: A value that determines how Elastic Transcoder + interprets values that you specified for video_watermarks.horizontal_offset, + video_watermarks.vertical_offset, video_watermarks.max_width, + and video_watermarks.max_height. Valid values are Content + and Frame. + type: string + verticalAlign: + description: The vertical position of the watermark unless + you specify a nonzero value for vertical_align. Valid + values are Top, Bottom, Center. + type: string + verticalOffset: + description: The amount by which you want the vertical position + of the watermark to be offset from the position specified + by vertical_align + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.container is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.container) + || (has(self.initProvider) && has(self.initProvider.container))' + status: + description: PresetStatus defines the observed state of Preset. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of the Elastic Transcoder + Preset. + type: string + audio: + description: Audio parameters object (documented below). + properties: + audioPackingMode: + description: The method of organizing audio channels and tracks. + Use Audio:Channels to specify the number of channels in + your output, and Audio:AudioPackingMode to specify the number + of tracks and their relation to the channels. If you do + not specify an Audio:AudioPackingMode, Elastic Transcoder + uses SingleTrack. + type: string + bitRate: + description: The bit rate of the audio stream in the output + file, in kilobits/second. Enter an integer between 64 and + 320, inclusive. + type: string + channels: + description: The number of audio channels in the output file + type: string + codec: + description: The audio codec for the output file. Valid values + are AAC, flac, mp2, mp3, pcm, and vorbis. + type: string + sampleRate: + description: 'The sample rate of the audio stream in the output + file, in hertz. Valid values are: auto, 22050, 32000, 44100, + 48000, 96000' + type: string + type: object + audioCodecOptions: + description: Codec options for the audio parameters (documented + below) + properties: + bitDepth: + description: The bit depth of a sample is how many bits of + information are included in the audio samples. Valid values + are 16 and 24. (FLAC/PCM Only) + type: string + bitOrder: + description: The order the bits of a PCM sample are stored + in. The supported value is LittleEndian. (PCM Only) + type: string + profile: + description: If you specified AAC for Audio:Codec, choose + the AAC profile for the output file. + type: string + signed: + description: Whether audio samples are represented with negative + and positive numbers (signed) or only positive numbers (unsigned). + The supported value is Signed. (PCM Only) + type: string + type: object + container: + description: The container type for the output file. Valid values + are flac, flv, fmp4, gif, mp3, mp4, mpg, mxf, oga, ogg, ts, + and webm. + type: string + description: + description: A description of the preset (maximum 255 characters) + type: string + id: + description: A unique identifier for the settings for one watermark. + The value of Id can be up to 40 characters long. You can specify + settings for up to four watermarks. + type: string + name: + description: The name of the preset. (maximum 40 characters) + type: string + thumbnails: + description: Thumbnail parameters object (documented below) + properties: + aspectRatio: + description: 'The aspect ratio of thumbnails. The following + values are valid: auto, 1:1, 4:3, 3:2, 16:9' + type: string + format: + description: The format of thumbnails, if any. Valid formats + are jpg and png. + type: string + interval: + description: The approximate number of seconds between thumbnails. + The value must be an integer. The actual interval can vary + by several seconds from one thumbnail to the next. + type: string + maxHeight: + description: The maximum height of thumbnails, in pixels. + If you specify auto, Elastic Transcoder uses 1080 (Full + HD) as the default value. If you specify a numeric value, + enter an even integer between 32 and 3072, inclusive. + type: string + maxWidth: + description: The maximum width of thumbnails, in pixels. If + you specify auto, Elastic Transcoder uses 1920 (Full HD) + as the default value. If you specify a numeric value, enter + an even integer between 32 and 4096, inclusive. + type: string + paddingPolicy: + description: When you set PaddingPolicy to Pad, Elastic Transcoder + might add black bars to the top and bottom and/or left and + right sides of thumbnails to make the total size of the + thumbnails match the values that you specified for thumbnail + MaxWidth and MaxHeight settings. + type: string + resolution: + description: The width and height of thumbnail files in pixels, + in the format WidthxHeight, where both values are even integers. + The values cannot exceed the width and height that you specified + in the Video:Resolution object. (To better control resolution + and aspect ratio of thumbnails, we recommend that you use + the thumbnail values max_width, max_height, sizing_policy, + and padding_policy instead of resolution and aspect_ratio. + The two groups of settings are mutually exclusive. Do not + use them together) + type: string + sizingPolicy: + description: 'A value that controls scaling of thumbnails. + Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, + and ShrinkToFill.' + type: string + type: object + type: + type: string + video: + description: Video parameters object (documented below) + properties: + aspectRatio: + description: 'The aspect ratio of thumbnails. The following + values are valid: auto, 1:1, 4:3, 3:2, 16:9' + type: string + bitRate: + description: The bit rate of the audio stream in the output + file, in kilobits/second. Enter an integer between 64 and + 320, inclusive. + type: string + codec: + description: The audio codec for the output file. Valid values + are AAC, flac, mp2, mp3, pcm, and vorbis. + type: string + displayAspectRatio: + description: The value that Elastic Transcoder adds to the + metadata in the output file. If you set DisplayAspectRatio + to auto, Elastic Transcoder chooses an aspect ratio that + ensures square pixels. If you specify another option, Elastic + Transcoder sets that value in the output file. + type: string + fixedGop: + description: Whether to use a fixed value for Video:FixedGOP. + Not applicable for containers of type gif. Valid values + are true and false. Also known as, Fixed Number of Frames + Between Keyframes. + type: string + frameRate: + description: 'The frames per second for the video stream in + the output file. The following values are valid: auto, 10, + 15, 23.97, 24, 25, 29.97, 30, 50, 60.' + type: string + keyframesMaxDist: + description: The maximum number of frames between key frames. + Not applicable for containers of type gif. + type: string + maxFrameRate: + description: If you specify auto for FrameRate, Elastic Transcoder + uses the frame rate of the input video for the frame rate + of the output video, up to the maximum frame rate. If you + do not specify a MaxFrameRate, Elastic Transcoder will use + a default of 30. + type: string + maxHeight: + description: The maximum height of thumbnails, in pixels. + If you specify auto, Elastic Transcoder uses 1080 (Full + HD) as the default value. If you specify a numeric value, + enter an even integer between 32 and 3072, inclusive. + type: string + maxWidth: + description: The maximum width of thumbnails, in pixels. If + you specify auto, Elastic Transcoder uses 1920 (Full HD) + as the default value. If you specify a numeric value, enter + an even integer between 32 and 4096, inclusive. + type: string + paddingPolicy: + description: When you set PaddingPolicy to Pad, Elastic Transcoder + might add black bars to the top and bottom and/or left and + right sides of thumbnails to make the total size of the + thumbnails match the values that you specified for thumbnail + MaxWidth and MaxHeight settings. + type: string + resolution: + description: The width and height of thumbnail files in pixels, + in the format WidthxHeight, where both values are even integers. + The values cannot exceed the width and height that you specified + in the Video:Resolution object. (To better control resolution + and aspect ratio of thumbnails, we recommend that you use + the thumbnail values max_width, max_height, sizing_policy, + and padding_policy instead of resolution and aspect_ratio. + The two groups of settings are mutually exclusive. Do not + use them together) + type: string + sizingPolicy: + description: 'A value that controls scaling of thumbnails. + Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, + and ShrinkToFill.' + type: string + type: object + videoCodecOptions: + additionalProperties: + type: string + description: Codec options for the video parameters + type: object + x-kubernetes-map-type: granular + videoWatermarks: + description: Watermark parameters for the video parameters (documented + below) + items: + properties: + horizontalAlign: + description: The horizontal position of the watermark unless + you specify a nonzero value for horzontal_offset. + type: string + horizontalOffset: + description: The amount by which you want the horizontal + position of the watermark to be offset from the position + specified by horizontal_align. + type: string + id: + description: A unique identifier for the settings for one + watermark. The value of Id can be up to 40 characters + long. You can specify settings for up to four watermarks. + type: string + maxHeight: + description: The maximum height of thumbnails, in pixels. + If you specify auto, Elastic Transcoder uses 1080 (Full + HD) as the default value. If you specify a numeric value, + enter an even integer between 32 and 3072, inclusive. + type: string + maxWidth: + description: The maximum width of thumbnails, in pixels. + If you specify auto, Elastic Transcoder uses 1920 (Full + HD) as the default value. If you specify a numeric value, + enter an even integer between 32 and 4096, inclusive. + type: string + opacity: + description: A percentage that indicates how much you want + a watermark to obscure the video in the location where + it appears. + type: string + sizingPolicy: + description: 'A value that controls scaling of thumbnails. + Valid values are: Fit, Fill, Stretch, Keep, ShrinkToFit, + and ShrinkToFill.' + type: string + target: + description: A value that determines how Elastic Transcoder + interprets values that you specified for video_watermarks.horizontal_offset, + video_watermarks.vertical_offset, video_watermarks.max_width, + and video_watermarks.max_height. Valid values are Content + and Frame. + type: string + verticalAlign: + description: The vertical position of the watermark unless + you specify a nonzero value for vertical_align. Valid + values are Top, Bottom, Center. + type: string + verticalOffset: + description: The amount by which you want the vertical position + of the watermark to be offset from the position specified + by vertical_align + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/elb.aws.upbound.io_elbs.yaml b/package/crds/elb.aws.upbound.io_elbs.yaml index 75a63d0bd0..d2d3fe946a 100644 --- a/package/crds/elb.aws.upbound.io_elbs.yaml +++ b/package/crds/elb.aws.upbound.io_elbs.yaml @@ -1083,3 +1083,1056 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ELB is the Schema for the ELBs API. Provides an Elastic Load + Balancer resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ELBSpec defines the desired state of ELB + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessLogs: + description: An Access Logs block. Access Logs documented below. + properties: + bucket: + description: The S3 bucket name to store the logs in. + type: string + bucketPrefix: + description: The S3 bucket prefix. Logs are stored in the + root if not configured. + type: string + enabled: + description: Boolean to enable / disable access_logs. Default + is true + type: boolean + interval: + description: 'The publishing interval in minutes. Valid values: + 5 and 60. Default: 60' + type: number + type: object + availabilityZones: + description: The AZ's to serve traffic in. + items: + type: string + type: array + x-kubernetes-list-type: set + connectionDraining: + description: 'Boolean to enable connection draining. Default: + false' + type: boolean + connectionDrainingTimeout: + description: 'The time in seconds to allow for connections to + drain. Default: 300' + type: number + crossZoneLoadBalancing: + description: 'Enable cross-zone load balancing. Default: true' + type: boolean + desyncMitigationMode: + description: Determines how the load balancer handles requests + that might pose a security risk to an application due to HTTP + desync. Valid values are monitor, defensive (default), strictest. + type: string + healthCheck: + description: A health_check block. Health Check documented below. + properties: + healthyThreshold: + description: The number of checks before the instance is declared + healthy. + type: number + interval: + description: 'The publishing interval in minutes. Valid values: + 5 and 60. Default: 60' + type: number + target: + description: |- + The target of the check. Valid pattern is "${PROTOCOL}:${PORT}${PATH}", where PROTOCOL + values are: + type: string + timeout: + description: The length of time before the check times out. + type: number + unhealthyThreshold: + description: The number of checks before the instance is declared + unhealthy. + type: number + type: object + idleTimeout: + description: 'The time in seconds that the connection is allowed + to be idle. Default: 60' + type: number + instances: + description: A list of instance ids to place in the ELB pool. + items: + type: string + type: array + x-kubernetes-list-type: set + instancesRefs: + description: References to Instance in ec2 to populate instances. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + instancesSelector: + description: Selector for a list of Instance in ec2 to populate + instances. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + internal: + description: If true, ELB will be an internal ELB. + type: boolean + listener: + description: A list of listener blocks. Listeners documented below. + items: + properties: + instancePort: + description: The port on the instance to route to + type: number + instanceProtocol: + description: |- + The protocol to use to the instance. Valid + values are HTTP, HTTPS, TCP, or SSL + type: string + lbPort: + description: The port to listen on for the load balancer + type: number + lbProtocol: + description: |- + The protocol to listen on. Valid values are HTTP, + HTTPS, TCP, or SSL + type: string + sslCertificateId: + description: |- + The ARN of an SSL certificate you have + uploaded to AWS IAM. Note ECDSA-specific restrictions below. Only valid when + type: string + type: object + type: array + region: + description: Region is the region you'd like your resource to + be created in. + type: string + securityGroups: + description: |- + A list of security group IDs to assign to the ELB. + Only valid if creating an ELB within a VPC + items: + type: string + type: array + x-kubernetes-list-type: set + sourceSecurityGroup: + description: |- + The name of the security group that you can use as + part of your inbound rules for your load balancer's back-end application + instances. Use this for Classic or Default VPC only. + type: string + subnets: + description: A list of subnet IDs to attach to the ELB. When an + update to subnets will remove all current subnets, this will + force a new resource. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetsRefs: + description: References to Subnet in ec2 to populate subnets. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetsSelector: + description: Selector for a list of Subnet in ec2 to populate + subnets. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessLogs: + description: An Access Logs block. Access Logs documented below. + properties: + bucket: + description: The S3 bucket name to store the logs in. + type: string + bucketPrefix: + description: The S3 bucket prefix. Logs are stored in the + root if not configured. + type: string + enabled: + description: Boolean to enable / disable access_logs. Default + is true + type: boolean + interval: + description: 'The publishing interval in minutes. Valid values: + 5 and 60. Default: 60' + type: number + type: object + availabilityZones: + description: The AZ's to serve traffic in. + items: + type: string + type: array + x-kubernetes-list-type: set + connectionDraining: + description: 'Boolean to enable connection draining. Default: + false' + type: boolean + connectionDrainingTimeout: + description: 'The time in seconds to allow for connections to + drain. Default: 300' + type: number + crossZoneLoadBalancing: + description: 'Enable cross-zone load balancing. Default: true' + type: boolean + desyncMitigationMode: + description: Determines how the load balancer handles requests + that might pose a security risk to an application due to HTTP + desync. Valid values are monitor, defensive (default), strictest. + type: string + healthCheck: + description: A health_check block. Health Check documented below. + properties: + healthyThreshold: + description: The number of checks before the instance is declared + healthy. + type: number + interval: + description: 'The publishing interval in minutes. Valid values: + 5 and 60. Default: 60' + type: number + target: + description: |- + The target of the check. Valid pattern is "${PROTOCOL}:${PORT}${PATH}", where PROTOCOL + values are: + type: string + timeout: + description: The length of time before the check times out. + type: number + unhealthyThreshold: + description: The number of checks before the instance is declared + unhealthy. + type: number + type: object + idleTimeout: + description: 'The time in seconds that the connection is allowed + to be idle. Default: 60' + type: number + instances: + description: A list of instance ids to place in the ELB pool. + items: + type: string + type: array + x-kubernetes-list-type: set + instancesRefs: + description: References to Instance in ec2 to populate instances. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + instancesSelector: + description: Selector for a list of Instance in ec2 to populate + instances. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + internal: + description: If true, ELB will be an internal ELB. + type: boolean + listener: + description: A list of listener blocks. Listeners documented below. + items: + properties: + instancePort: + description: The port on the instance to route to + type: number + instanceProtocol: + description: |- + The protocol to use to the instance. Valid + values are HTTP, HTTPS, TCP, or SSL + type: string + lbPort: + description: The port to listen on for the load balancer + type: number + lbProtocol: + description: |- + The protocol to listen on. Valid values are HTTP, + HTTPS, TCP, or SSL + type: string + sslCertificateId: + description: |- + The ARN of an SSL certificate you have + uploaded to AWS IAM. Note ECDSA-specific restrictions below. Only valid when + type: string + type: object + type: array + securityGroups: + description: |- + A list of security group IDs to assign to the ELB. + Only valid if creating an ELB within a VPC + items: + type: string + type: array + x-kubernetes-list-type: set + sourceSecurityGroup: + description: |- + The name of the security group that you can use as + part of your inbound rules for your load balancer's back-end application + instances. Use this for Classic or Default VPC only. + type: string + subnets: + description: A list of subnet IDs to attach to the ELB. When an + update to subnets will remove all current subnets, this will + force a new resource. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetsRefs: + description: References to Subnet in ec2 to populate subnets. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetsSelector: + description: Selector for a list of Subnet in ec2 to populate + subnets. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.listener is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.listener) + || (has(self.initProvider) && has(self.initProvider.listener))' + status: + description: ELBStatus defines the observed state of ELB. + properties: + atProvider: + properties: + accessLogs: + description: An Access Logs block. Access Logs documented below. + properties: + bucket: + description: The S3 bucket name to store the logs in. + type: string + bucketPrefix: + description: The S3 bucket prefix. Logs are stored in the + root if not configured. + type: string + enabled: + description: Boolean to enable / disable access_logs. Default + is true + type: boolean + interval: + description: 'The publishing interval in minutes. Valid values: + 5 and 60. Default: 60' + type: number + type: object + arn: + description: The ARN of the ELB + type: string + availabilityZones: + description: The AZ's to serve traffic in. + items: + type: string + type: array + x-kubernetes-list-type: set + connectionDraining: + description: 'Boolean to enable connection draining. Default: + false' + type: boolean + connectionDrainingTimeout: + description: 'The time in seconds to allow for connections to + drain. Default: 300' + type: number + crossZoneLoadBalancing: + description: 'Enable cross-zone load balancing. Default: true' + type: boolean + desyncMitigationMode: + description: Determines how the load balancer handles requests + that might pose a security risk to an application due to HTTP + desync. Valid values are monitor, defensive (default), strictest. + type: string + dnsName: + description: The DNS name of the ELB + type: string + healthCheck: + description: A health_check block. Health Check documented below. + properties: + healthyThreshold: + description: The number of checks before the instance is declared + healthy. + type: number + interval: + description: 'The publishing interval in minutes. Valid values: + 5 and 60. Default: 60' + type: number + target: + description: |- + The target of the check. Valid pattern is "${PROTOCOL}:${PORT}${PATH}", where PROTOCOL + values are: + type: string + timeout: + description: The length of time before the check times out. + type: number + unhealthyThreshold: + description: The number of checks before the instance is declared + unhealthy. + type: number + type: object + id: + description: The name of the ELB + type: string + idleTimeout: + description: 'The time in seconds that the connection is allowed + to be idle. Default: 60' + type: number + instances: + description: A list of instance ids to place in the ELB pool. + items: + type: string + type: array + x-kubernetes-list-type: set + internal: + description: If true, ELB will be an internal ELB. + type: boolean + listener: + description: A list of listener blocks. Listeners documented below. + items: + properties: + instancePort: + description: The port on the instance to route to + type: number + instanceProtocol: + description: |- + The protocol to use to the instance. Valid + values are HTTP, HTTPS, TCP, or SSL + type: string + lbPort: + description: The port to listen on for the load balancer + type: number + lbProtocol: + description: |- + The protocol to listen on. Valid values are HTTP, + HTTPS, TCP, or SSL + type: string + sslCertificateId: + description: |- + The ARN of an SSL certificate you have + uploaded to AWS IAM. Note ECDSA-specific restrictions below. Only valid when + type: string + type: object + type: array + securityGroups: + description: |- + A list of security group IDs to assign to the ELB. + Only valid if creating an ELB within a VPC + items: + type: string + type: array + x-kubernetes-list-type: set + sourceSecurityGroup: + description: |- + The name of the security group that you can use as + part of your inbound rules for your load balancer's back-end application + instances. Use this for Classic or Default VPC only. + type: string + sourceSecurityGroupId: + description: |- + The ID of the security group that you can use as + part of your inbound rules for your load balancer's back-end application + instances. Only available on ELBs launched in a VPC. + type: string + subnets: + description: A list of subnet IDs to attach to the ELB. When an + update to subnets will remove all current subnets, this will + force a new resource. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + zoneId: + description: The canonical hosted zone ID of the ELB (to be used + in a Route 53 Alias record) + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/elbv2.aws.upbound.io_lblistenerrules.yaml b/package/crds/elbv2.aws.upbound.io_lblistenerrules.yaml index e3452013ed..9b39ad9283 100644 --- a/package/crds/elbv2.aws.upbound.io_lblistenerrules.yaml +++ b/package/crds/elbv2.aws.upbound.io_lblistenerrules.yaml @@ -2435,3 +2435,2348 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LBListenerRule is the Schema for the LBListenerRules API. Provides + a Load Balancer Listener Rule resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LBListenerRuleSpec defines the desired state of LBListenerRule + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + action: + description: An Action block. Action blocks are documented below. + items: + properties: + authenticateCognito: + description: Information for creating an authenticate action + using Cognito. Required if type is authenticate-cognito. + properties: + authenticationRequestExtraParams: + additionalProperties: + type: string + description: 'The query parameters to include in the + redirect request to the authorization endpoint. Max: + 10.' + type: object + x-kubernetes-map-type: granular + onUnauthenticatedRequest: + description: 'The behavior if the user is not authenticated. + Valid values: deny, allow and authenticate' + type: string + scope: + description: The set of user claims to be requested + from the IdP. + type: string + sessionCookieName: + description: The name of the cookie used to maintain + session information. + type: string + sessionTimeout: + description: The maximum duration of the authentication + session, in seconds. + type: number + userPoolArn: + description: The ARN of the Cognito user pool. + type: string + userPoolArnRef: + description: Reference to a UserPool in cognitoidp to + populate userPoolArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userPoolArnSelector: + description: Selector for a UserPool in cognitoidp to + populate userPoolArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userPoolClientId: + description: The ID of the Cognito user pool client. + type: string + userPoolClientIdRef: + description: Reference to a UserPoolClient in cognitoidp + to populate userPoolClientId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userPoolClientIdSelector: + description: Selector for a UserPoolClient in cognitoidp + to populate userPoolClientId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userPoolDomain: + description: The domain prefix or fully-qualified domain + name of the Cognito user pool. + type: string + userPoolDomainRef: + description: Reference to a UserPoolDomain in cognitoidp + to populate userPoolDomain. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userPoolDomainSelector: + description: Selector for a UserPoolDomain in cognitoidp + to populate userPoolDomain. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + authenticateOidc: + description: Information for creating an authenticate action + using OIDC. Required if type is authenticate-oidc. + properties: + authenticationRequestExtraParams: + additionalProperties: + type: string + description: 'The query parameters to include in the + redirect request to the authorization endpoint. Max: + 10.' + type: object + x-kubernetes-map-type: granular + authorizationEndpoint: + description: The authorization endpoint of the IdP. + type: string + clientId: + description: The OAuth 2.0 client identifier. + type: string + clientSecretSecretRef: + description: The OAuth 2.0 client secret. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + issuer: + description: The OIDC issuer identifier of the IdP. + type: string + onUnauthenticatedRequest: + description: 'The behavior if the user is not authenticated. + Valid values: deny, allow and authenticate' + type: string + scope: + description: The set of user claims to be requested + from the IdP. + type: string + sessionCookieName: + description: The name of the cookie used to maintain + session information. + type: string + sessionTimeout: + description: The maximum duration of the authentication + session, in seconds. + type: number + tokenEndpoint: + description: The token endpoint of the IdP. + type: string + userInfoEndpoint: + description: The user info endpoint of the IdP. + type: string + type: object + fixedResponse: + description: Information for creating an action that returns + a custom HTTP response. Required if type is fixed-response. + properties: + contentType: + description: The content type. Valid values are text/plain, + text/css, text/html, application/javascript and application/json. + type: string + messageBody: + description: The message body. + type: string + statusCode: + description: The HTTP redirect code. The redirect is + either permanent (HTTP_301) or temporary (HTTP_302). + type: string + type: object + forward: + description: |- + Configuration block for creating an action that distributes requests among one or more target groups. + Specify only if type is forward. + Cannot be specified with target_group_arn. + properties: + stickiness: + description: The target group stickiness for the rule. + properties: + duration: + description: The time period, in seconds, during + which requests from a client should be routed + to the same target group. The range is 1-604800 + seconds (7 days). + type: number + enabled: + description: Indicates whether target group stickiness + is enabled. + type: boolean + type: object + targetGroup: + description: One or more target groups block. + items: + properties: + arn: + description: The Amazon Resource Name (ARN) of + the target group. + type: string + arnRef: + description: Reference to a LBTargetGroup in elbv2 + to populate arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a LBTargetGroup in elbv2 + to populate arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + weight: + description: The weight. The range is 0 to 999. + type: number + type: object + type: array + type: object + order: + description: |- + Order for the action. + The action with the lowest value for order is performed first. + Valid values are between 1 and 50000. + Defaults to the position in the list of actions. + type: number + redirect: + description: Information for creating a redirect action. + Required if type is redirect. + properties: + host: + description: 'The hostname. This component is not percent-encoded. + The hostname can contain #{host}. Defaults to #{host}.' + type: string + path: + description: 'The absolute path, starting with the leading + "/". This component is not percent-encoded. The path + can contain #{host}, #{path}, and #{port}. Defaults + to /#{path}.' + type: string + port: + description: 'The port. Specify a value from 1 to 65535 + or #{port}. Defaults to #{port}.' + type: string + protocol: + description: 'The protocol. Valid values are HTTP, HTTPS, + or #{protocol}. Defaults to #{protocol}.' + type: string + query: + description: 'The query parameters, URL-encoded when + necessary, but not percent-encoded. Do not include + the leading "?". Defaults to #{query}.' + type: string + statusCode: + description: The HTTP redirect code. The redirect is + either permanent (HTTP_301) or temporary (HTTP_302). + type: string + type: object + targetGroupArn: + description: |- + ARN of the Target Group to which to route traffic. + Specify only if type is forward and you want to route to a single target group. + To route to one or more target groups, use a forward block instead. + Cannot be specified with forward. + type: string + targetGroupArnRef: + description: Reference to a LBTargetGroup in elbv2 to populate + targetGroupArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetGroupArnSelector: + description: Selector for a LBTargetGroup in elbv2 to populate + targetGroupArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: The type of routing action. Valid values are + forward, redirect, fixed-response, authenticate-cognito + and authenticate-oidc. + type: string + type: object + type: array + condition: + description: A Condition block. Multiple condition blocks of different + types can be set and all must be satisfied for the rule to match. + Condition blocks are documented below. + items: + properties: + hostHeader: + description: 'Contains a single values item which is a list + of host header patterns to match. The maximum size of + each pattern is 128 characters. Comparison is case insensitive. + Wildcard characters supported: * (matches 0 or more characters) + and ? (matches exactly 1 character). Only one pattern + needs to match for the condition to be satisfied.' + properties: + values: + description: 'Query string pairs or values to match. + Query String Value blocks documented below. Multiple + values blocks can be specified, see example above. + Maximum size of each string is 128 characters. Comparison + is case insensitive. Wildcard characters supported: + * (matches 0 or more characters) and ? (matches exactly + 1 character). To search for a literal ''*'' or ''?'' + character in a query string, escape the character + with a backslash (\). Only one pair needs to match + for the condition to be satisfied.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + httpHeader: + description: HTTP headers to match. HTTP Header block fields + documented below. + properties: + httpHeaderName: + description: Name of HTTP header to search. The maximum + size is 40 characters. Comparison is case insensitive. + Only RFC7240 characters are supported. Wildcards are + not supported. You cannot use HTTP header condition + to specify the host header, use a host-header condition + instead. + type: string + values: + description: 'List of header value patterns to match. + Maximum size of each pattern is 128 characters. Comparison + is case insensitive. Wildcard characters supported: + * (matches 0 or more characters) and ? (matches exactly + 1 character). If the same header appears multiple + times in the request they will be searched in order + until a match is found. Only one pattern needs to + match for the condition to be satisfied. To require + that all of the strings are a match, create one condition + block per string.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + httpRequestMethod: + description: Contains a single values item which is a list + of HTTP request methods or verbs to match. Maximum size + is 40 characters. Only allowed characters are A-Z, hyphen + (-) and underscore (_). Comparison is case sensitive. + Wildcards are not supported. Only one needs to match for + the condition to be satisfied. AWS recommends that GET + and HEAD requests are routed in the same way because the + response to a HEAD request may be cached. + properties: + values: + description: 'Query string pairs or values to match. + Query String Value blocks documented below. Multiple + values blocks can be specified, see example above. + Maximum size of each string is 128 characters. Comparison + is case insensitive. Wildcard characters supported: + * (matches 0 or more characters) and ? (matches exactly + 1 character). To search for a literal ''*'' or ''?'' + character in a query string, escape the character + with a backslash (\). Only one pair needs to match + for the condition to be satisfied.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + pathPattern: + description: 'Contains a single values item which is a list + of path patterns to match against the request URL. Maximum + size of each pattern is 128 characters. Comparison is + case sensitive. Wildcard characters supported: * (matches + 0 or more characters) and ? (matches exactly 1 character). + Only one pattern needs to match for the condition to be + satisfied. Path pattern is compared only to the path of + the URL, not to its query string. To compare against the + query string, use a query_string condition.' + properties: + values: + description: 'Query string pairs or values to match. + Query String Value blocks documented below. Multiple + values blocks can be specified, see example above. + Maximum size of each string is 128 characters. Comparison + is case insensitive. Wildcard characters supported: + * (matches 0 or more characters) and ? (matches exactly + 1 character). To search for a literal ''*'' or ''?'' + character in a query string, escape the character + with a backslash (\). Only one pair needs to match + for the condition to be satisfied.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + queryString: + description: Query strings to match. Query String block + fields documented below. + items: + properties: + key: + description: Query string key pattern to match. + type: string + value: + description: Query string value pattern to match. + type: string + type: object + type: array + sourceIp: + description: Contains a single values item which is a list + of source IP CIDR notations to match. You can use both + IPv4 and IPv6 addresses. Wildcards are not supported. + Condition is satisfied if the source IP address of the + request matches one of the CIDR blocks. Condition is not + satisfied by the addresses in the X-Forwarded-For header, + use http_header condition instead. + properties: + values: + description: 'Query string pairs or values to match. + Query String Value blocks documented below. Multiple + values blocks can be specified, see example above. + Maximum size of each string is 128 characters. Comparison + is case insensitive. Wildcard characters supported: + * (matches 0 or more characters) and ? (matches exactly + 1 character). To search for a literal ''*'' or ''?'' + character in a query string, escape the character + with a backslash (\). Only one pair needs to match + for the condition to be satisfied.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: array + listenerArn: + description: The ARN of the listener to which to attach the rule. + type: string + listenerArnRef: + description: Reference to a LBListener in elbv2 to populate listenerArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + listenerArnSelector: + description: Selector for a LBListener in elbv2 to populate listenerArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + priority: + description: The priority for the rule between 1 and 50000. Leaving + it unset will automatically set the rule with next available + priority after currently existing highest rule. A listener can't + have multiple rules with the same priority. + type: number + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + action: + description: An Action block. Action blocks are documented below. + items: + properties: + authenticateCognito: + description: Information for creating an authenticate action + using Cognito. Required if type is authenticate-cognito. + properties: + authenticationRequestExtraParams: + additionalProperties: + type: string + description: 'The query parameters to include in the + redirect request to the authorization endpoint. Max: + 10.' + type: object + x-kubernetes-map-type: granular + onUnauthenticatedRequest: + description: 'The behavior if the user is not authenticated. + Valid values: deny, allow and authenticate' + type: string + scope: + description: The set of user claims to be requested + from the IdP. + type: string + sessionCookieName: + description: The name of the cookie used to maintain + session information. + type: string + sessionTimeout: + description: The maximum duration of the authentication + session, in seconds. + type: number + userPoolArn: + description: The ARN of the Cognito user pool. + type: string + userPoolArnRef: + description: Reference to a UserPool in cognitoidp to + populate userPoolArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userPoolArnSelector: + description: Selector for a UserPool in cognitoidp to + populate userPoolArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userPoolClientId: + description: The ID of the Cognito user pool client. + type: string + userPoolClientIdRef: + description: Reference to a UserPoolClient in cognitoidp + to populate userPoolClientId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userPoolClientIdSelector: + description: Selector for a UserPoolClient in cognitoidp + to populate userPoolClientId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userPoolDomain: + description: The domain prefix or fully-qualified domain + name of the Cognito user pool. + type: string + userPoolDomainRef: + description: Reference to a UserPoolDomain in cognitoidp + to populate userPoolDomain. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userPoolDomainSelector: + description: Selector for a UserPoolDomain in cognitoidp + to populate userPoolDomain. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + authenticateOidc: + description: Information for creating an authenticate action + using OIDC. Required if type is authenticate-oidc. + properties: + authenticationRequestExtraParams: + additionalProperties: + type: string + description: 'The query parameters to include in the + redirect request to the authorization endpoint. Max: + 10.' + type: object + x-kubernetes-map-type: granular + authorizationEndpoint: + description: The authorization endpoint of the IdP. + type: string + clientId: + description: The OAuth 2.0 client identifier. + type: string + clientSecretSecretRef: + description: The OAuth 2.0 client secret. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + issuer: + description: The OIDC issuer identifier of the IdP. + type: string + onUnauthenticatedRequest: + description: 'The behavior if the user is not authenticated. + Valid values: deny, allow and authenticate' + type: string + scope: + description: The set of user claims to be requested + from the IdP. + type: string + sessionCookieName: + description: The name of the cookie used to maintain + session information. + type: string + sessionTimeout: + description: The maximum duration of the authentication + session, in seconds. + type: number + tokenEndpoint: + description: The token endpoint of the IdP. + type: string + userInfoEndpoint: + description: The user info endpoint of the IdP. + type: string + required: + - clientSecretSecretRef + type: object + fixedResponse: + description: Information for creating an action that returns + a custom HTTP response. Required if type is fixed-response. + properties: + contentType: + description: The content type. Valid values are text/plain, + text/css, text/html, application/javascript and application/json. + type: string + messageBody: + description: The message body. + type: string + statusCode: + description: The HTTP redirect code. The redirect is + either permanent (HTTP_301) or temporary (HTTP_302). + type: string + type: object + forward: + description: |- + Configuration block for creating an action that distributes requests among one or more target groups. + Specify only if type is forward. + Cannot be specified with target_group_arn. + properties: + stickiness: + description: The target group stickiness for the rule. + properties: + duration: + description: The time period, in seconds, during + which requests from a client should be routed + to the same target group. The range is 1-604800 + seconds (7 days). + type: number + enabled: + description: Indicates whether target group stickiness + is enabled. + type: boolean + type: object + targetGroup: + description: One or more target groups block. + items: + properties: + arn: + description: The Amazon Resource Name (ARN) of + the target group. + type: string + arnRef: + description: Reference to a LBTargetGroup in elbv2 + to populate arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a LBTargetGroup in elbv2 + to populate arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + weight: + description: The weight. The range is 0 to 999. + type: number + type: object + type: array + type: object + order: + description: |- + Order for the action. + The action with the lowest value for order is performed first. + Valid values are between 1 and 50000. + Defaults to the position in the list of actions. + type: number + redirect: + description: Information for creating a redirect action. + Required if type is redirect. + properties: + host: + description: 'The hostname. This component is not percent-encoded. + The hostname can contain #{host}. Defaults to #{host}.' + type: string + path: + description: 'The absolute path, starting with the leading + "/". This component is not percent-encoded. The path + can contain #{host}, #{path}, and #{port}. Defaults + to /#{path}.' + type: string + port: + description: 'The port. Specify a value from 1 to 65535 + or #{port}. Defaults to #{port}.' + type: string + protocol: + description: 'The protocol. Valid values are HTTP, HTTPS, + or #{protocol}. Defaults to #{protocol}.' + type: string + query: + description: 'The query parameters, URL-encoded when + necessary, but not percent-encoded. Do not include + the leading "?". Defaults to #{query}.' + type: string + statusCode: + description: The HTTP redirect code. The redirect is + either permanent (HTTP_301) or temporary (HTTP_302). + type: string + type: object + targetGroupArn: + description: |- + ARN of the Target Group to which to route traffic. + Specify only if type is forward and you want to route to a single target group. + To route to one or more target groups, use a forward block instead. + Cannot be specified with forward. + type: string + targetGroupArnRef: + description: Reference to a LBTargetGroup in elbv2 to populate + targetGroupArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetGroupArnSelector: + description: Selector for a LBTargetGroup in elbv2 to populate + targetGroupArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: The type of routing action. Valid values are + forward, redirect, fixed-response, authenticate-cognito + and authenticate-oidc. + type: string + type: object + type: array + condition: + description: A Condition block. Multiple condition blocks of different + types can be set and all must be satisfied for the rule to match. + Condition blocks are documented below. + items: + properties: + hostHeader: + description: 'Contains a single values item which is a list + of host header patterns to match. The maximum size of + each pattern is 128 characters. Comparison is case insensitive. + Wildcard characters supported: * (matches 0 or more characters) + and ? (matches exactly 1 character). Only one pattern + needs to match for the condition to be satisfied.' + properties: + values: + description: 'Query string pairs or values to match. + Query String Value blocks documented below. Multiple + values blocks can be specified, see example above. + Maximum size of each string is 128 characters. Comparison + is case insensitive. Wildcard characters supported: + * (matches 0 or more characters) and ? (matches exactly + 1 character). To search for a literal ''*'' or ''?'' + character in a query string, escape the character + with a backslash (\). Only one pair needs to match + for the condition to be satisfied.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + httpHeader: + description: HTTP headers to match. HTTP Header block fields + documented below. + properties: + httpHeaderName: + description: Name of HTTP header to search. The maximum + size is 40 characters. Comparison is case insensitive. + Only RFC7240 characters are supported. Wildcards are + not supported. You cannot use HTTP header condition + to specify the host header, use a host-header condition + instead. + type: string + values: + description: 'List of header value patterns to match. + Maximum size of each pattern is 128 characters. Comparison + is case insensitive. Wildcard characters supported: + * (matches 0 or more characters) and ? (matches exactly + 1 character). If the same header appears multiple + times in the request they will be searched in order + until a match is found. Only one pattern needs to + match for the condition to be satisfied. To require + that all of the strings are a match, create one condition + block per string.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + httpRequestMethod: + description: Contains a single values item which is a list + of HTTP request methods or verbs to match. Maximum size + is 40 characters. Only allowed characters are A-Z, hyphen + (-) and underscore (_). Comparison is case sensitive. + Wildcards are not supported. Only one needs to match for + the condition to be satisfied. AWS recommends that GET + and HEAD requests are routed in the same way because the + response to a HEAD request may be cached. + properties: + values: + description: 'Query string pairs or values to match. + Query String Value blocks documented below. Multiple + values blocks can be specified, see example above. + Maximum size of each string is 128 characters. Comparison + is case insensitive. Wildcard characters supported: + * (matches 0 or more characters) and ? (matches exactly + 1 character). To search for a literal ''*'' or ''?'' + character in a query string, escape the character + with a backslash (\). Only one pair needs to match + for the condition to be satisfied.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + pathPattern: + description: 'Contains a single values item which is a list + of path patterns to match against the request URL. Maximum + size of each pattern is 128 characters. Comparison is + case sensitive. Wildcard characters supported: * (matches + 0 or more characters) and ? (matches exactly 1 character). + Only one pattern needs to match for the condition to be + satisfied. Path pattern is compared only to the path of + the URL, not to its query string. To compare against the + query string, use a query_string condition.' + properties: + values: + description: 'Query string pairs or values to match. + Query String Value blocks documented below. Multiple + values blocks can be specified, see example above. + Maximum size of each string is 128 characters. Comparison + is case insensitive. Wildcard characters supported: + * (matches 0 or more characters) and ? (matches exactly + 1 character). To search for a literal ''*'' or ''?'' + character in a query string, escape the character + with a backslash (\). Only one pair needs to match + for the condition to be satisfied.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + queryString: + description: Query strings to match. Query String block + fields documented below. + items: + properties: + key: + description: Query string key pattern to match. + type: string + value: + description: Query string value pattern to match. + type: string + type: object + type: array + sourceIp: + description: Contains a single values item which is a list + of source IP CIDR notations to match. You can use both + IPv4 and IPv6 addresses. Wildcards are not supported. + Condition is satisfied if the source IP address of the + request matches one of the CIDR blocks. Condition is not + satisfied by the addresses in the X-Forwarded-For header, + use http_header condition instead. + properties: + values: + description: 'Query string pairs or values to match. + Query String Value blocks documented below. Multiple + values blocks can be specified, see example above. + Maximum size of each string is 128 characters. Comparison + is case insensitive. Wildcard characters supported: + * (matches 0 or more characters) and ? (matches exactly + 1 character). To search for a literal ''*'' or ''?'' + character in a query string, escape the character + with a backslash (\). Only one pair needs to match + for the condition to be satisfied.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: array + listenerArn: + description: The ARN of the listener to which to attach the rule. + type: string + listenerArnRef: + description: Reference to a LBListener in elbv2 to populate listenerArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + listenerArnSelector: + description: Selector for a LBListener in elbv2 to populate listenerArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + priority: + description: The priority for the rule between 1 and 50000. Leaving + it unset will automatically set the rule with next available + priority after currently existing highest rule. A listener can't + have multiple rules with the same priority. + type: number + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.action is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.action) + || (has(self.initProvider) && has(self.initProvider.action))' + - message: spec.forProvider.condition is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.condition) + || (has(self.initProvider) && has(self.initProvider.condition))' + status: + description: LBListenerRuleStatus defines the observed state of LBListenerRule. + properties: + atProvider: + properties: + action: + description: An Action block. Action blocks are documented below. + items: + properties: + authenticateCognito: + description: Information for creating an authenticate action + using Cognito. Required if type is authenticate-cognito. + properties: + authenticationRequestExtraParams: + additionalProperties: + type: string + description: 'The query parameters to include in the + redirect request to the authorization endpoint. Max: + 10.' + type: object + x-kubernetes-map-type: granular + onUnauthenticatedRequest: + description: 'The behavior if the user is not authenticated. + Valid values: deny, allow and authenticate' + type: string + scope: + description: The set of user claims to be requested + from the IdP. + type: string + sessionCookieName: + description: The name of the cookie used to maintain + session information. + type: string + sessionTimeout: + description: The maximum duration of the authentication + session, in seconds. + type: number + userPoolArn: + description: The ARN of the Cognito user pool. + type: string + userPoolClientId: + description: The ID of the Cognito user pool client. + type: string + userPoolDomain: + description: The domain prefix or fully-qualified domain + name of the Cognito user pool. + type: string + type: object + authenticateOidc: + description: Information for creating an authenticate action + using OIDC. Required if type is authenticate-oidc. + properties: + authenticationRequestExtraParams: + additionalProperties: + type: string + description: 'The query parameters to include in the + redirect request to the authorization endpoint. Max: + 10.' + type: object + x-kubernetes-map-type: granular + authorizationEndpoint: + description: The authorization endpoint of the IdP. + type: string + clientId: + description: The OAuth 2.0 client identifier. + type: string + issuer: + description: The OIDC issuer identifier of the IdP. + type: string + onUnauthenticatedRequest: + description: 'The behavior if the user is not authenticated. + Valid values: deny, allow and authenticate' + type: string + scope: + description: The set of user claims to be requested + from the IdP. + type: string + sessionCookieName: + description: The name of the cookie used to maintain + session information. + type: string + sessionTimeout: + description: The maximum duration of the authentication + session, in seconds. + type: number + tokenEndpoint: + description: The token endpoint of the IdP. + type: string + userInfoEndpoint: + description: The user info endpoint of the IdP. + type: string + type: object + fixedResponse: + description: Information for creating an action that returns + a custom HTTP response. Required if type is fixed-response. + properties: + contentType: + description: The content type. Valid values are text/plain, + text/css, text/html, application/javascript and application/json. + type: string + messageBody: + description: The message body. + type: string + statusCode: + description: The HTTP redirect code. The redirect is + either permanent (HTTP_301) or temporary (HTTP_302). + type: string + type: object + forward: + description: |- + Configuration block for creating an action that distributes requests among one or more target groups. + Specify only if type is forward. + Cannot be specified with target_group_arn. + properties: + stickiness: + description: The target group stickiness for the rule. + properties: + duration: + description: The time period, in seconds, during + which requests from a client should be routed + to the same target group. The range is 1-604800 + seconds (7 days). + type: number + enabled: + description: Indicates whether target group stickiness + is enabled. + type: boolean + type: object + targetGroup: + description: One or more target groups block. + items: + properties: + arn: + description: The Amazon Resource Name (ARN) of + the target group. + type: string + weight: + description: The weight. The range is 0 to 999. + type: number + type: object + type: array + type: object + order: + description: |- + Order for the action. + The action with the lowest value for order is performed first. + Valid values are between 1 and 50000. + Defaults to the position in the list of actions. + type: number + redirect: + description: Information for creating a redirect action. + Required if type is redirect. + properties: + host: + description: 'The hostname. This component is not percent-encoded. + The hostname can contain #{host}. Defaults to #{host}.' + type: string + path: + description: 'The absolute path, starting with the leading + "/". This component is not percent-encoded. The path + can contain #{host}, #{path}, and #{port}. Defaults + to /#{path}.' + type: string + port: + description: 'The port. Specify a value from 1 to 65535 + or #{port}. Defaults to #{port}.' + type: string + protocol: + description: 'The protocol. Valid values are HTTP, HTTPS, + or #{protocol}. Defaults to #{protocol}.' + type: string + query: + description: 'The query parameters, URL-encoded when + necessary, but not percent-encoded. Do not include + the leading "?". Defaults to #{query}.' + type: string + statusCode: + description: The HTTP redirect code. The redirect is + either permanent (HTTP_301) or temporary (HTTP_302). + type: string + type: object + targetGroupArn: + description: |- + ARN of the Target Group to which to route traffic. + Specify only if type is forward and you want to route to a single target group. + To route to one or more target groups, use a forward block instead. + Cannot be specified with forward. + type: string + type: + description: The type of routing action. Valid values are + forward, redirect, fixed-response, authenticate-cognito + and authenticate-oidc. + type: string + type: object + type: array + arn: + description: The ARN of the rule (matches id) + type: string + condition: + description: A Condition block. Multiple condition blocks of different + types can be set and all must be satisfied for the rule to match. + Condition blocks are documented below. + items: + properties: + hostHeader: + description: 'Contains a single values item which is a list + of host header patterns to match. The maximum size of + each pattern is 128 characters. Comparison is case insensitive. + Wildcard characters supported: * (matches 0 or more characters) + and ? (matches exactly 1 character). Only one pattern + needs to match for the condition to be satisfied.' + properties: + values: + description: 'Query string pairs or values to match. + Query String Value blocks documented below. Multiple + values blocks can be specified, see example above. + Maximum size of each string is 128 characters. Comparison + is case insensitive. Wildcard characters supported: + * (matches 0 or more characters) and ? (matches exactly + 1 character). To search for a literal ''*'' or ''?'' + character in a query string, escape the character + with a backslash (\). Only one pair needs to match + for the condition to be satisfied.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + httpHeader: + description: HTTP headers to match. HTTP Header block fields + documented below. + properties: + httpHeaderName: + description: Name of HTTP header to search. The maximum + size is 40 characters. Comparison is case insensitive. + Only RFC7240 characters are supported. Wildcards are + not supported. You cannot use HTTP header condition + to specify the host header, use a host-header condition + instead. + type: string + values: + description: 'List of header value patterns to match. + Maximum size of each pattern is 128 characters. Comparison + is case insensitive. Wildcard characters supported: + * (matches 0 or more characters) and ? (matches exactly + 1 character). If the same header appears multiple + times in the request they will be searched in order + until a match is found. Only one pattern needs to + match for the condition to be satisfied. To require + that all of the strings are a match, create one condition + block per string.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + httpRequestMethod: + description: Contains a single values item which is a list + of HTTP request methods or verbs to match. Maximum size + is 40 characters. Only allowed characters are A-Z, hyphen + (-) and underscore (_). Comparison is case sensitive. + Wildcards are not supported. Only one needs to match for + the condition to be satisfied. AWS recommends that GET + and HEAD requests are routed in the same way because the + response to a HEAD request may be cached. + properties: + values: + description: 'Query string pairs or values to match. + Query String Value blocks documented below. Multiple + values blocks can be specified, see example above. + Maximum size of each string is 128 characters. Comparison + is case insensitive. Wildcard characters supported: + * (matches 0 or more characters) and ? (matches exactly + 1 character). To search for a literal ''*'' or ''?'' + character in a query string, escape the character + with a backslash (\). Only one pair needs to match + for the condition to be satisfied.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + pathPattern: + description: 'Contains a single values item which is a list + of path patterns to match against the request URL. Maximum + size of each pattern is 128 characters. Comparison is + case sensitive. Wildcard characters supported: * (matches + 0 or more characters) and ? (matches exactly 1 character). + Only one pattern needs to match for the condition to be + satisfied. Path pattern is compared only to the path of + the URL, not to its query string. To compare against the + query string, use a query_string condition.' + properties: + values: + description: 'Query string pairs or values to match. + Query String Value blocks documented below. Multiple + values blocks can be specified, see example above. + Maximum size of each string is 128 characters. Comparison + is case insensitive. Wildcard characters supported: + * (matches 0 or more characters) and ? (matches exactly + 1 character). To search for a literal ''*'' or ''?'' + character in a query string, escape the character + with a backslash (\). Only one pair needs to match + for the condition to be satisfied.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + queryString: + description: Query strings to match. Query String block + fields documented below. + items: + properties: + key: + description: Query string key pattern to match. + type: string + value: + description: Query string value pattern to match. + type: string + type: object + type: array + sourceIp: + description: Contains a single values item which is a list + of source IP CIDR notations to match. You can use both + IPv4 and IPv6 addresses. Wildcards are not supported. + Condition is satisfied if the source IP address of the + request matches one of the CIDR blocks. Condition is not + satisfied by the addresses in the X-Forwarded-For header, + use http_header condition instead. + properties: + values: + description: 'Query string pairs or values to match. + Query String Value blocks documented below. Multiple + values blocks can be specified, see example above. + Maximum size of each string is 128 characters. Comparison + is case insensitive. Wildcard characters supported: + * (matches 0 or more characters) and ? (matches exactly + 1 character). To search for a literal ''*'' or ''?'' + character in a query string, escape the character + with a backslash (\). Only one pair needs to match + for the condition to be satisfied.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: array + id: + description: The ARN of the rule (matches arn) + type: string + listenerArn: + description: The ARN of the listener to which to attach the rule. + type: string + priority: + description: The priority for the rule between 1 and 50000. Leaving + it unset will automatically set the rule with next available + priority after currently existing highest rule. A listener can't + have multiple rules with the same priority. + type: number + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/elbv2.aws.upbound.io_lblisteners.yaml b/package/crds/elbv2.aws.upbound.io_lblisteners.yaml index 115d4a8b38..6f26fef31b 100644 --- a/package/crds/elbv2.aws.upbound.io_lblisteners.yaml +++ b/package/crds/elbv2.aws.upbound.io_lblisteners.yaml @@ -1612,3 +1612,1552 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LBListener is the Schema for the LBListeners API. Provides a + Load Balancer Listener resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LBListenerSpec defines the desired state of LBListener + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + alpnPolicy: + description: Name of the Application-Layer Protocol Negotiation + (ALPN) policy. Can be set if protocol is TLS. Valid values are + HTTP1Only, HTTP2Only, HTTP2Optional, HTTP2Preferred, and None. + type: string + certificateArn: + description: ARN of the default SSL server certificate. Exactly + one certificate is required if the protocol is HTTPS. For adding + additional SSL certificates, see the aws_lb_listener_certificate + resource. + type: string + defaultAction: + description: Configuration block for default actions. Detailed + below. + items: + properties: + authenticateCognito: + description: Configuration block for using Amazon Cognito + to authenticate users. Specify only when type is authenticate-cognito. + Detailed below. + properties: + authenticationRequestExtraParams: + additionalProperties: + type: string + description: 'Query parameters to include in the redirect + request to the authorization endpoint. Max: 10. Detailed + below.' + type: object + x-kubernetes-map-type: granular + onUnauthenticatedRequest: + description: Behavior if the user is not authenticated. + Valid values are deny, allow and authenticate. + type: string + scope: + description: Set of user claims to be requested from + the IdP. + type: string + sessionCookieName: + description: Name of the cookie used to maintain session + information. + type: string + sessionTimeout: + description: Maximum duration of the authentication + session, in seconds. + type: number + userPoolArn: + description: ARN of the Cognito user pool. + type: string + userPoolClientId: + description: ID of the Cognito user pool client. + type: string + userPoolDomain: + description: Domain prefix or fully-qualified domain + name of the Cognito user pool. + type: string + type: object + authenticateOidc: + description: Configuration block for an identity provider + that is compliant with OpenID Connect (OIDC). Specify + only when type is authenticate-oidc. Detailed below. + properties: + authenticationRequestExtraParams: + additionalProperties: + type: string + description: 'Query parameters to include in the redirect + request to the authorization endpoint. Max: 10.' + type: object + x-kubernetes-map-type: granular + authorizationEndpoint: + description: Authorization endpoint of the IdP. + type: string + clientId: + description: OAuth 2.0 client identifier. + type: string + clientSecretSecretRef: + description: OAuth 2.0 client secret. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + issuer: + description: OIDC issuer identifier of the IdP. + type: string + onUnauthenticatedRequest: + description: 'Behavior if the user is not authenticated. + Valid values: deny, allow and authenticate' + type: string + scope: + description: Set of user claims to be requested from + the IdP. + type: string + sessionCookieName: + description: Name of the cookie used to maintain session + information. + type: string + sessionTimeout: + description: Maximum duration of the authentication + session, in seconds. + type: number + tokenEndpoint: + description: Token endpoint of the IdP. + type: string + userInfoEndpoint: + description: User info endpoint of the IdP. + type: string + type: object + fixedResponse: + description: Information for creating an action that returns + a custom HTTP response. Required if type is fixed-response. + properties: + contentType: + description: Content type. Valid values are text/plain, + text/css, text/html, application/javascript and application/json. + type: string + messageBody: + description: Message body. + type: string + statusCode: + description: HTTP response code. Valid values are 2XX, + 4XX, or 5XX. + type: string + type: object + forward: + description: |- + Configuration block for creating an action that distributes requests among one or more target groups. + Specify only if type is forward. + Cannot be specified with target_group_arn. + Detailed below. + properties: + stickiness: + description: Configuration block for target group stickiness + for the rule. Detailed below. + properties: + duration: + description: Time period, in seconds, during which + requests from a client should be routed to the + same target group. The range is 1-604800 seconds + (7 days). + type: number + enabled: + description: Whether target group stickiness is + enabled. Default is false. + type: boolean + type: object + targetGroup: + description: Set of 1-5 target group blocks. Detailed + below. + items: + properties: + arn: + description: ARN of the target group. + type: string + arnRef: + description: Reference to a LBTargetGroup in elbv2 + to populate arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a LBTargetGroup in elbv2 + to populate arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + weight: + description: Weight. The range is 0 to 999. + type: number + type: object + type: array + type: object + order: + description: |- + Order for the action. + The action with the lowest value for order is performed first. + Valid values are between 1 and 50000. + Defaults to the position in the list of actions. + type: number + redirect: + description: Configuration block for creating a redirect + action. Required if type is redirect. Detailed below. + properties: + host: + description: 'Hostname. This component is not percent-encoded. + The hostname can contain #{host}. Defaults to #{host}.' + type: string + path: + description: 'Absolute path, starting with the leading + "/". This component is not percent-encoded. The path + can contain #{host}, #{path}, and #{port}. Defaults + to /#{path}.' + type: string + port: + description: 'Port. Specify a value from 1 to 65535 + or #{port}. Defaults to #{port}.' + type: string + protocol: + description: 'Protocol. Valid values are HTTP, HTTPS, + or #{protocol}. Defaults to #{protocol}.' + type: string + query: + description: 'Query parameters, URL-encoded when necessary, + but not percent-encoded. Do not include the leading + "?". Defaults to #{query}.' + type: string + statusCode: + description: HTTP redirect code. The redirect is either + permanent (HTTP_301) or temporary (HTTP_302). + type: string + type: object + targetGroupArn: + description: |- + ARN of the Target Group to which to route traffic. + Specify only if type is forward and you want to route to a single target group. + To route to one or more target groups, use a forward block instead. + Cannot be specified with forward. + type: string + targetGroupArnRef: + description: Reference to a LBTargetGroup in elbv2 to populate + targetGroupArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetGroupArnSelector: + description: Selector for a LBTargetGroup in elbv2 to populate + targetGroupArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: Type of routing action. Valid values are forward, + redirect, fixed-response, authenticate-cognito and authenticate-oidc. + type: string + type: object + type: array + loadBalancerArn: + description: ARN of the load balancer. + type: string + loadBalancerArnRef: + description: Reference to a LB in elbv2 to populate loadBalancerArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + loadBalancerArnSelector: + description: Selector for a LB in elbv2 to populate loadBalancerArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + mutualAuthentication: + description: The mutual authentication configuration information. + Detailed below. + properties: + ignoreClientCertificateExpiry: + description: Whether client certificate expiry is ignored. + Default is false. + type: boolean + mode: + description: Valid values are off, verify and passthrough. + type: string + trustStoreArn: + description: ARN of the elbv2 Trust Store. + type: string + type: object + port: + description: Port on which the load balancer is listening. Not + valid for Gateway Load Balancers. + type: number + protocol: + description: Protocol for connections from clients to the load + balancer. For Application Load Balancers, valid values are HTTP + and HTTPS, with a default of HTTP. For Network Load Balancers, + valid values are TCP, TLS, UDP, and TCP_UDP. Not valid to use + UDP or TCP_UDP if dual-stack mode is enabled. Not valid for + Gateway Load Balancers. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + sslPolicy: + description: Name of the SSL Policy for the listener. Required + if protocol is HTTPS or TLS. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + alpnPolicy: + description: Name of the Application-Layer Protocol Negotiation + (ALPN) policy. Can be set if protocol is TLS. Valid values are + HTTP1Only, HTTP2Only, HTTP2Optional, HTTP2Preferred, and None. + type: string + certificateArn: + description: ARN of the default SSL server certificate. Exactly + one certificate is required if the protocol is HTTPS. For adding + additional SSL certificates, see the aws_lb_listener_certificate + resource. + type: string + defaultAction: + description: Configuration block for default actions. Detailed + below. + items: + properties: + authenticateCognito: + description: Configuration block for using Amazon Cognito + to authenticate users. Specify only when type is authenticate-cognito. + Detailed below. + properties: + authenticationRequestExtraParams: + additionalProperties: + type: string + description: 'Query parameters to include in the redirect + request to the authorization endpoint. Max: 10. Detailed + below.' + type: object + x-kubernetes-map-type: granular + onUnauthenticatedRequest: + description: Behavior if the user is not authenticated. + Valid values are deny, allow and authenticate. + type: string + scope: + description: Set of user claims to be requested from + the IdP. + type: string + sessionCookieName: + description: Name of the cookie used to maintain session + information. + type: string + sessionTimeout: + description: Maximum duration of the authentication + session, in seconds. + type: number + userPoolArn: + description: ARN of the Cognito user pool. + type: string + userPoolClientId: + description: ID of the Cognito user pool client. + type: string + userPoolDomain: + description: Domain prefix or fully-qualified domain + name of the Cognito user pool. + type: string + type: object + authenticateOidc: + description: Configuration block for an identity provider + that is compliant with OpenID Connect (OIDC). Specify + only when type is authenticate-oidc. Detailed below. + properties: + authenticationRequestExtraParams: + additionalProperties: + type: string + description: 'Query parameters to include in the redirect + request to the authorization endpoint. Max: 10.' + type: object + x-kubernetes-map-type: granular + authorizationEndpoint: + description: Authorization endpoint of the IdP. + type: string + clientId: + description: OAuth 2.0 client identifier. + type: string + clientSecretSecretRef: + description: OAuth 2.0 client secret. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + issuer: + description: OIDC issuer identifier of the IdP. + type: string + onUnauthenticatedRequest: + description: 'Behavior if the user is not authenticated. + Valid values: deny, allow and authenticate' + type: string + scope: + description: Set of user claims to be requested from + the IdP. + type: string + sessionCookieName: + description: Name of the cookie used to maintain session + information. + type: string + sessionTimeout: + description: Maximum duration of the authentication + session, in seconds. + type: number + tokenEndpoint: + description: Token endpoint of the IdP. + type: string + userInfoEndpoint: + description: User info endpoint of the IdP. + type: string + required: + - clientSecretSecretRef + type: object + fixedResponse: + description: Information for creating an action that returns + a custom HTTP response. Required if type is fixed-response. + properties: + contentType: + description: Content type. Valid values are text/plain, + text/css, text/html, application/javascript and application/json. + type: string + messageBody: + description: Message body. + type: string + statusCode: + description: HTTP response code. Valid values are 2XX, + 4XX, or 5XX. + type: string + type: object + forward: + description: |- + Configuration block for creating an action that distributes requests among one or more target groups. + Specify only if type is forward. + Cannot be specified with target_group_arn. + Detailed below. + properties: + stickiness: + description: Configuration block for target group stickiness + for the rule. Detailed below. + properties: + duration: + description: Time period, in seconds, during which + requests from a client should be routed to the + same target group. The range is 1-604800 seconds + (7 days). + type: number + enabled: + description: Whether target group stickiness is + enabled. Default is false. + type: boolean + type: object + targetGroup: + description: Set of 1-5 target group blocks. Detailed + below. + items: + properties: + arn: + description: ARN of the target group. + type: string + arnRef: + description: Reference to a LBTargetGroup in elbv2 + to populate arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a LBTargetGroup in elbv2 + to populate arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + weight: + description: Weight. The range is 0 to 999. + type: number + type: object + type: array + type: object + order: + description: |- + Order for the action. + The action with the lowest value for order is performed first. + Valid values are between 1 and 50000. + Defaults to the position in the list of actions. + type: number + redirect: + description: Configuration block for creating a redirect + action. Required if type is redirect. Detailed below. + properties: + host: + description: 'Hostname. This component is not percent-encoded. + The hostname can contain #{host}. Defaults to #{host}.' + type: string + path: + description: 'Absolute path, starting with the leading + "/". This component is not percent-encoded. The path + can contain #{host}, #{path}, and #{port}. Defaults + to /#{path}.' + type: string + port: + description: 'Port. Specify a value from 1 to 65535 + or #{port}. Defaults to #{port}.' + type: string + protocol: + description: 'Protocol. Valid values are HTTP, HTTPS, + or #{protocol}. Defaults to #{protocol}.' + type: string + query: + description: 'Query parameters, URL-encoded when necessary, + but not percent-encoded. Do not include the leading + "?". Defaults to #{query}.' + type: string + statusCode: + description: HTTP redirect code. The redirect is either + permanent (HTTP_301) or temporary (HTTP_302). + type: string + type: object + targetGroupArn: + description: |- + ARN of the Target Group to which to route traffic. + Specify only if type is forward and you want to route to a single target group. + To route to one or more target groups, use a forward block instead. + Cannot be specified with forward. + type: string + targetGroupArnRef: + description: Reference to a LBTargetGroup in elbv2 to populate + targetGroupArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetGroupArnSelector: + description: Selector for a LBTargetGroup in elbv2 to populate + targetGroupArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: Type of routing action. Valid values are forward, + redirect, fixed-response, authenticate-cognito and authenticate-oidc. + type: string + type: object + type: array + loadBalancerArn: + description: ARN of the load balancer. + type: string + loadBalancerArnRef: + description: Reference to a LB in elbv2 to populate loadBalancerArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + loadBalancerArnSelector: + description: Selector for a LB in elbv2 to populate loadBalancerArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + mutualAuthentication: + description: The mutual authentication configuration information. + Detailed below. + properties: + ignoreClientCertificateExpiry: + description: Whether client certificate expiry is ignored. + Default is false. + type: boolean + mode: + description: Valid values are off, verify and passthrough. + type: string + trustStoreArn: + description: ARN of the elbv2 Trust Store. + type: string + type: object + port: + description: Port on which the load balancer is listening. Not + valid for Gateway Load Balancers. + type: number + protocol: + description: Protocol for connections from clients to the load + balancer. For Application Load Balancers, valid values are HTTP + and HTTPS, with a default of HTTP. For Network Load Balancers, + valid values are TCP, TLS, UDP, and TCP_UDP. Not valid to use + UDP or TCP_UDP if dual-stack mode is enabled. Not valid for + Gateway Load Balancers. + type: string + sslPolicy: + description: Name of the SSL Policy for the listener. Required + if protocol is HTTPS or TLS. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.defaultAction is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.defaultAction) + || (has(self.initProvider) && has(self.initProvider.defaultAction))' + status: + description: LBListenerStatus defines the observed state of LBListener. + properties: + atProvider: + properties: + alpnPolicy: + description: Name of the Application-Layer Protocol Negotiation + (ALPN) policy. Can be set if protocol is TLS. Valid values are + HTTP1Only, HTTP2Only, HTTP2Optional, HTTP2Preferred, and None. + type: string + arn: + description: ARN of the listener (matches id). + type: string + certificateArn: + description: ARN of the default SSL server certificate. Exactly + one certificate is required if the protocol is HTTPS. For adding + additional SSL certificates, see the aws_lb_listener_certificate + resource. + type: string + defaultAction: + description: Configuration block for default actions. Detailed + below. + items: + properties: + authenticateCognito: + description: Configuration block for using Amazon Cognito + to authenticate users. Specify only when type is authenticate-cognito. + Detailed below. + properties: + authenticationRequestExtraParams: + additionalProperties: + type: string + description: 'Query parameters to include in the redirect + request to the authorization endpoint. Max: 10. Detailed + below.' + type: object + x-kubernetes-map-type: granular + onUnauthenticatedRequest: + description: Behavior if the user is not authenticated. + Valid values are deny, allow and authenticate. + type: string + scope: + description: Set of user claims to be requested from + the IdP. + type: string + sessionCookieName: + description: Name of the cookie used to maintain session + information. + type: string + sessionTimeout: + description: Maximum duration of the authentication + session, in seconds. + type: number + userPoolArn: + description: ARN of the Cognito user pool. + type: string + userPoolClientId: + description: ID of the Cognito user pool client. + type: string + userPoolDomain: + description: Domain prefix or fully-qualified domain + name of the Cognito user pool. + type: string + type: object + authenticateOidc: + description: Configuration block for an identity provider + that is compliant with OpenID Connect (OIDC). Specify + only when type is authenticate-oidc. Detailed below. + properties: + authenticationRequestExtraParams: + additionalProperties: + type: string + description: 'Query parameters to include in the redirect + request to the authorization endpoint. Max: 10.' + type: object + x-kubernetes-map-type: granular + authorizationEndpoint: + description: Authorization endpoint of the IdP. + type: string + clientId: + description: OAuth 2.0 client identifier. + type: string + issuer: + description: OIDC issuer identifier of the IdP. + type: string + onUnauthenticatedRequest: + description: 'Behavior if the user is not authenticated. + Valid values: deny, allow and authenticate' + type: string + scope: + description: Set of user claims to be requested from + the IdP. + type: string + sessionCookieName: + description: Name of the cookie used to maintain session + information. + type: string + sessionTimeout: + description: Maximum duration of the authentication + session, in seconds. + type: number + tokenEndpoint: + description: Token endpoint of the IdP. + type: string + userInfoEndpoint: + description: User info endpoint of the IdP. + type: string + type: object + fixedResponse: + description: Information for creating an action that returns + a custom HTTP response. Required if type is fixed-response. + properties: + contentType: + description: Content type. Valid values are text/plain, + text/css, text/html, application/javascript and application/json. + type: string + messageBody: + description: Message body. + type: string + statusCode: + description: HTTP response code. Valid values are 2XX, + 4XX, or 5XX. + type: string + type: object + forward: + description: |- + Configuration block for creating an action that distributes requests among one or more target groups. + Specify only if type is forward. + Cannot be specified with target_group_arn. + Detailed below. + properties: + stickiness: + description: Configuration block for target group stickiness + for the rule. Detailed below. + properties: + duration: + description: Time period, in seconds, during which + requests from a client should be routed to the + same target group. The range is 1-604800 seconds + (7 days). + type: number + enabled: + description: Whether target group stickiness is + enabled. Default is false. + type: boolean + type: object + targetGroup: + description: Set of 1-5 target group blocks. Detailed + below. + items: + properties: + arn: + description: ARN of the target group. + type: string + weight: + description: Weight. The range is 0 to 999. + type: number + type: object + type: array + type: object + order: + description: |- + Order for the action. + The action with the lowest value for order is performed first. + Valid values are between 1 and 50000. + Defaults to the position in the list of actions. + type: number + redirect: + description: Configuration block for creating a redirect + action. Required if type is redirect. Detailed below. + properties: + host: + description: 'Hostname. This component is not percent-encoded. + The hostname can contain #{host}. Defaults to #{host}.' + type: string + path: + description: 'Absolute path, starting with the leading + "/". This component is not percent-encoded. The path + can contain #{host}, #{path}, and #{port}. Defaults + to /#{path}.' + type: string + port: + description: 'Port. Specify a value from 1 to 65535 + or #{port}. Defaults to #{port}.' + type: string + protocol: + description: 'Protocol. Valid values are HTTP, HTTPS, + or #{protocol}. Defaults to #{protocol}.' + type: string + query: + description: 'Query parameters, URL-encoded when necessary, + but not percent-encoded. Do not include the leading + "?". Defaults to #{query}.' + type: string + statusCode: + description: HTTP redirect code. The redirect is either + permanent (HTTP_301) or temporary (HTTP_302). + type: string + type: object + targetGroupArn: + description: |- + ARN of the Target Group to which to route traffic. + Specify only if type is forward and you want to route to a single target group. + To route to one or more target groups, use a forward block instead. + Cannot be specified with forward. + type: string + type: + description: Type of routing action. Valid values are forward, + redirect, fixed-response, authenticate-cognito and authenticate-oidc. + type: string + type: object + type: array + id: + description: ARN of the listener (matches arn). + type: string + loadBalancerArn: + description: ARN of the load balancer. + type: string + mutualAuthentication: + description: The mutual authentication configuration information. + Detailed below. + properties: + ignoreClientCertificateExpiry: + description: Whether client certificate expiry is ignored. + Default is false. + type: boolean + mode: + description: Valid values are off, verify and passthrough. + type: string + trustStoreArn: + description: ARN of the elbv2 Trust Store. + type: string + type: object + port: + description: Port on which the load balancer is listening. Not + valid for Gateway Load Balancers. + type: number + protocol: + description: Protocol for connections from clients to the load + balancer. For Application Load Balancers, valid values are HTTP + and HTTPS, with a default of HTTP. For Network Load Balancers, + valid values are TCP, TLS, UDP, and TCP_UDP. Not valid to use + UDP or TCP_UDP if dual-stack mode is enabled. Not valid for + Gateway Load Balancers. + type: string + sslPolicy: + description: Name of the SSL Policy for the listener. Required + if protocol is HTTPS or TLS. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/elbv2.aws.upbound.io_lbs.yaml b/package/crds/elbv2.aws.upbound.io_lbs.yaml index 26afa39af2..96a46017a8 100644 --- a/package/crds/elbv2.aws.upbound.io_lbs.yaml +++ b/package/crds/elbv2.aws.upbound.io_lbs.yaml @@ -1519,3 +1519,1492 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LB is the Schema for the LBs API. Provides a Load Balancer resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LBSpec defines the desired state of LB + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessLogs: + description: Access Logs block. See below. + properties: + bucket: + description: S3 bucket name to store the logs in. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: Boolean to enable / disable access_logs. Defaults + to false, even when bucket is specified. + type: boolean + prefix: + description: S3 bucket prefix. Logs are stored in the root + if not configured. + type: string + type: object + clientKeepAlive: + description: Client keep alive value in seconds. The valid range + is 60-604800 seconds. The default is 3600 seconds. + type: number + connectionLogs: + description: Connection Logs block. See below. Only valid for + Load Balancers of type application. + properties: + bucket: + description: S3 bucket name to store the logs in. + type: string + enabled: + description: Boolean to enable / disable connection_logs. + Defaults to false, even when bucket is specified. + type: boolean + prefix: + description: S3 bucket prefix. Logs are stored in the root + if not configured. + type: string + type: object + customerOwnedIpv4Pool: + description: ID of the customer owned ipv4 pool to use for this + load balancer. + type: string + desyncMitigationMode: + description: How the load balancer handles requests that might + pose a security risk to an application due to HTTP desync. Valid + values are monitor, defensive (default), strictest. + type: string + dnsRecordClientRoutingPolicy: + description: How traffic is distributed among the load balancer + Availability Zones. Possible values are any_availability_zone + (default), availability_zone_affinity, or partial_availability_zone_affinity. + See Availability Zone DNS affinity for additional details. + Only valid for network type load balancers. + type: string + dropInvalidHeaderFields: + description: Whether HTTP headers with header fields that are + not valid are removed by the load balancer (true) or routed + to targets (false). The default is false. Elastic Load Balancing + requires that message header names contain only alphanumeric + characters and hyphens. Only valid for Load Balancers of type + application. + type: boolean + enableCrossZoneLoadBalancing: + description: If true, cross-zone load balancing of the load balancer + will be enabled. For network and gateway type load balancers, + this feature is disabled by default (false). For application + load balancer this feature is always enabled (true) and cannot + be disabled. Defaults to false. + type: boolean + enableDeletionProtection: + description: If true, deletion of the load balancer will be disabled + via the AWS API. Defaults to false. + type: boolean + enableHttp2: + description: Whether HTTP/2 is enabled in application load balancers. + Defaults to true. + type: boolean + enableTlsVersionAndCipherSuiteHeaders: + description: Whether the two headers (x-amzn-tls-version and x-amzn-tls-cipher-suite), + which contain information about the negotiated TLS version and + cipher suite, are added to the client request before sending + it to the target. Only valid for Load Balancers of type application. + Defaults to false + type: boolean + enableWafFailOpen: + description: Whether to allow a WAF-enabled load balancer to route + requests to targets if it is unable to forward the request to + AWS WAF. Defaults to false. + type: boolean + enableXffClientPort: + description: Whether the X-Forwarded-For header should preserve + the source port that the client used to connect to the load + balancer in application load balancers. Defaults to false. + type: boolean + enforceSecurityGroupInboundRulesOnPrivateLinkTraffic: + description: Whether inbound security group rules are enforced + for traffic originating from a PrivateLink. Only valid for Load + Balancers of type network. The possible values are on and off. + type: string + idleTimeout: + description: 'Time in seconds that the connection is allowed to + be idle. Only valid for Load Balancers of type application. + Default: 60.' + type: number + internal: + description: If true, the LB will be internal. Defaults to false. + type: boolean + ipAddressType: + description: Type of IP addresses used by the subnets for your + load balancer. The possible values are ipv4 and dualstack. + type: string + loadBalancerType: + description: Type of load balancer to create. Possible values + are application, gateway, or network. The default value is application. + type: string + name: + description: Name of the LB. This name must be unique within your + AWS account, can have a maximum of 32 characters, must contain + only alphanumeric characters or hyphens, and must not begin + or end with a hyphen. + type: string + preserveHostHeader: + description: Whether the Application Load Balancer should preserve + the Host header in the HTTP request and send it to the target + without any change. Defaults to false. + type: boolean + region: + description: Region is the region you'd like your resource to + be created in. + type: string + securityGroupRefs: + description: References to SecurityGroup in ec2 to populate securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroups: + description: List of security group IDs to assign to the LB. Only + valid for Load Balancers of type application or network. For + load balancers of type network security groups cannot be added + if none are currently present, and cannot all be removed once + added. If either of these conditions are met, this will force + a recreation of the resource. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetMapping: + description: Subnet mapping block. See below. For Load Balancers + of type network subnet mappings can only be added. + items: + properties: + allocationId: + description: Allocation ID of the Elastic IP address for + an internet-facing load balancer. + type: string + ipv6Address: + description: IPv6 address. You associate IPv6 CIDR blocks + with your VPC and choose the subnets where you launch + both internet-facing and internal Application Load Balancers + or Network Load Balancers. + type: string + privateIpv4Address: + description: Private IPv4 address for an internal load balancer. + type: string + subnetId: + description: ID of the subnet of which to attach to the + load balancer. You can specify only one subnet per Availability + Zone. + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + subnetRefs: + description: References to Subnet in ec2 to populate subnets. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetSelector: + description: Selector for a list of Subnet in ec2 to populate + subnets. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnets: + description: List of subnet IDs to attach to the LB. For Load + Balancers of type network subnets can only be added (see Availability + Zones), deleting a subnet for load balancers of type network + will force a recreation of the resource. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + xffHeaderProcessingMode: + description: Determines how the load balancer modifies the X-Forwarded-For + header in the HTTP request before sending the request to the + target. The possible values are append, preserve, and remove. + Only valid for Load Balancers of type application. The default + is append. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessLogs: + description: Access Logs block. See below. + properties: + bucket: + description: S3 bucket name to store the logs in. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: Boolean to enable / disable access_logs. Defaults + to false, even when bucket is specified. + type: boolean + prefix: + description: S3 bucket prefix. Logs are stored in the root + if not configured. + type: string + type: object + clientKeepAlive: + description: Client keep alive value in seconds. The valid range + is 60-604800 seconds. The default is 3600 seconds. + type: number + connectionLogs: + description: Connection Logs block. See below. Only valid for + Load Balancers of type application. + properties: + bucket: + description: S3 bucket name to store the logs in. + type: string + enabled: + description: Boolean to enable / disable connection_logs. + Defaults to false, even when bucket is specified. + type: boolean + prefix: + description: S3 bucket prefix. Logs are stored in the root + if not configured. + type: string + type: object + customerOwnedIpv4Pool: + description: ID of the customer owned ipv4 pool to use for this + load balancer. + type: string + desyncMitigationMode: + description: How the load balancer handles requests that might + pose a security risk to an application due to HTTP desync. Valid + values are monitor, defensive (default), strictest. + type: string + dnsRecordClientRoutingPolicy: + description: How traffic is distributed among the load balancer + Availability Zones. Possible values are any_availability_zone + (default), availability_zone_affinity, or partial_availability_zone_affinity. + See Availability Zone DNS affinity for additional details. + Only valid for network type load balancers. + type: string + dropInvalidHeaderFields: + description: Whether HTTP headers with header fields that are + not valid are removed by the load balancer (true) or routed + to targets (false). The default is false. Elastic Load Balancing + requires that message header names contain only alphanumeric + characters and hyphens. Only valid for Load Balancers of type + application. + type: boolean + enableCrossZoneLoadBalancing: + description: If true, cross-zone load balancing of the load balancer + will be enabled. For network and gateway type load balancers, + this feature is disabled by default (false). For application + load balancer this feature is always enabled (true) and cannot + be disabled. Defaults to false. + type: boolean + enableDeletionProtection: + description: If true, deletion of the load balancer will be disabled + via the AWS API. Defaults to false. + type: boolean + enableHttp2: + description: Whether HTTP/2 is enabled in application load balancers. + Defaults to true. + type: boolean + enableTlsVersionAndCipherSuiteHeaders: + description: Whether the two headers (x-amzn-tls-version and x-amzn-tls-cipher-suite), + which contain information about the negotiated TLS version and + cipher suite, are added to the client request before sending + it to the target. Only valid for Load Balancers of type application. + Defaults to false + type: boolean + enableWafFailOpen: + description: Whether to allow a WAF-enabled load balancer to route + requests to targets if it is unable to forward the request to + AWS WAF. Defaults to false. + type: boolean + enableXffClientPort: + description: Whether the X-Forwarded-For header should preserve + the source port that the client used to connect to the load + balancer in application load balancers. Defaults to false. + type: boolean + enforceSecurityGroupInboundRulesOnPrivateLinkTraffic: + description: Whether inbound security group rules are enforced + for traffic originating from a PrivateLink. Only valid for Load + Balancers of type network. The possible values are on and off. + type: string + idleTimeout: + description: 'Time in seconds that the connection is allowed to + be idle. Only valid for Load Balancers of type application. + Default: 60.' + type: number + internal: + description: If true, the LB will be internal. Defaults to false. + type: boolean + ipAddressType: + description: Type of IP addresses used by the subnets for your + load balancer. The possible values are ipv4 and dualstack. + type: string + loadBalancerType: + description: Type of load balancer to create. Possible values + are application, gateway, or network. The default value is application. + type: string + name: + description: Name of the LB. This name must be unique within your + AWS account, can have a maximum of 32 characters, must contain + only alphanumeric characters or hyphens, and must not begin + or end with a hyphen. + type: string + preserveHostHeader: + description: Whether the Application Load Balancer should preserve + the Host header in the HTTP request and send it to the target + without any change. Defaults to false. + type: boolean + securityGroupRefs: + description: References to SecurityGroup in ec2 to populate securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroups: + description: List of security group IDs to assign to the LB. Only + valid for Load Balancers of type application or network. For + load balancers of type network security groups cannot be added + if none are currently present, and cannot all be removed once + added. If either of these conditions are met, this will force + a recreation of the resource. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetMapping: + description: Subnet mapping block. See below. For Load Balancers + of type network subnet mappings can only be added. + items: + properties: + allocationId: + description: Allocation ID of the Elastic IP address for + an internet-facing load balancer. + type: string + ipv6Address: + description: IPv6 address. You associate IPv6 CIDR blocks + with your VPC and choose the subnets where you launch + both internet-facing and internal Application Load Balancers + or Network Load Balancers. + type: string + privateIpv4Address: + description: Private IPv4 address for an internal load balancer. + type: string + subnetId: + description: ID of the subnet of which to attach to the + load balancer. You can specify only one subnet per Availability + Zone. + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + subnetRefs: + description: References to Subnet in ec2 to populate subnets. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetSelector: + description: Selector for a list of Subnet in ec2 to populate + subnets. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnets: + description: List of subnet IDs to attach to the LB. For Load + Balancers of type network subnets can only be added (see Availability + Zones), deleting a subnet for load balancers of type network + will force a recreation of the resource. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + xffHeaderProcessingMode: + description: Determines how the load balancer modifies the X-Forwarded-For + header in the HTTP request before sending the request to the + target. The possible values are append, preserve, and remove. + Only valid for Load Balancers of type application. The default + is append. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: LBStatus defines the observed state of LB. + properties: + atProvider: + properties: + accessLogs: + description: Access Logs block. See below. + properties: + bucket: + description: S3 bucket name to store the logs in. + type: string + enabled: + description: Boolean to enable / disable access_logs. Defaults + to false, even when bucket is specified. + type: boolean + prefix: + description: S3 bucket prefix. Logs are stored in the root + if not configured. + type: string + type: object + arn: + description: ARN of the load balancer (matches id). + type: string + arnSuffix: + description: ARN suffix for use with CloudWatch Metrics. + type: string + clientKeepAlive: + description: Client keep alive value in seconds. The valid range + is 60-604800 seconds. The default is 3600 seconds. + type: number + connectionLogs: + description: Connection Logs block. See below. Only valid for + Load Balancers of type application. + properties: + bucket: + description: S3 bucket name to store the logs in. + type: string + enabled: + description: Boolean to enable / disable connection_logs. + Defaults to false, even when bucket is specified. + type: boolean + prefix: + description: S3 bucket prefix. Logs are stored in the root + if not configured. + type: string + type: object + customerOwnedIpv4Pool: + description: ID of the customer owned ipv4 pool to use for this + load balancer. + type: string + desyncMitigationMode: + description: How the load balancer handles requests that might + pose a security risk to an application due to HTTP desync. Valid + values are monitor, defensive (default), strictest. + type: string + dnsName: + description: DNS name of the load balancer. + type: string + dnsRecordClientRoutingPolicy: + description: How traffic is distributed among the load balancer + Availability Zones. Possible values are any_availability_zone + (default), availability_zone_affinity, or partial_availability_zone_affinity. + See Availability Zone DNS affinity for additional details. + Only valid for network type load balancers. + type: string + dropInvalidHeaderFields: + description: Whether HTTP headers with header fields that are + not valid are removed by the load balancer (true) or routed + to targets (false). The default is false. Elastic Load Balancing + requires that message header names contain only alphanumeric + characters and hyphens. Only valid for Load Balancers of type + application. + type: boolean + enableCrossZoneLoadBalancing: + description: If true, cross-zone load balancing of the load balancer + will be enabled. For network and gateway type load balancers, + this feature is disabled by default (false). For application + load balancer this feature is always enabled (true) and cannot + be disabled. Defaults to false. + type: boolean + enableDeletionProtection: + description: If true, deletion of the load balancer will be disabled + via the AWS API. Defaults to false. + type: boolean + enableHttp2: + description: Whether HTTP/2 is enabled in application load balancers. + Defaults to true. + type: boolean + enableTlsVersionAndCipherSuiteHeaders: + description: Whether the two headers (x-amzn-tls-version and x-amzn-tls-cipher-suite), + which contain information about the negotiated TLS version and + cipher suite, are added to the client request before sending + it to the target. Only valid for Load Balancers of type application. + Defaults to false + type: boolean + enableWafFailOpen: + description: Whether to allow a WAF-enabled load balancer to route + requests to targets if it is unable to forward the request to + AWS WAF. Defaults to false. + type: boolean + enableXffClientPort: + description: Whether the X-Forwarded-For header should preserve + the source port that the client used to connect to the load + balancer in application load balancers. Defaults to false. + type: boolean + enforceSecurityGroupInboundRulesOnPrivateLinkTraffic: + description: Whether inbound security group rules are enforced + for traffic originating from a PrivateLink. Only valid for Load + Balancers of type network. The possible values are on and off. + type: string + id: + description: ARN of the load balancer (matches arn). + type: string + idleTimeout: + description: 'Time in seconds that the connection is allowed to + be idle. Only valid for Load Balancers of type application. + Default: 60.' + type: number + internal: + description: If true, the LB will be internal. Defaults to false. + type: boolean + ipAddressType: + description: Type of IP addresses used by the subnets for your + load balancer. The possible values are ipv4 and dualstack. + type: string + loadBalancerType: + description: Type of load balancer to create. Possible values + are application, gateway, or network. The default value is application. + type: string + name: + description: Name of the LB. This name must be unique within your + AWS account, can have a maximum of 32 characters, must contain + only alphanumeric characters or hyphens, and must not begin + or end with a hyphen. + type: string + preserveHostHeader: + description: Whether the Application Load Balancer should preserve + the Host header in the HTTP request and send it to the target + without any change. Defaults to false. + type: boolean + securityGroups: + description: List of security group IDs to assign to the LB. Only + valid for Load Balancers of type application or network. For + load balancers of type network security groups cannot be added + if none are currently present, and cannot all be removed once + added. If either of these conditions are met, this will force + a recreation of the resource. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetMapping: + description: Subnet mapping block. See below. For Load Balancers + of type network subnet mappings can only be added. + items: + properties: + allocationId: + description: Allocation ID of the Elastic IP address for + an internet-facing load balancer. + type: string + ipv6Address: + description: IPv6 address. You associate IPv6 CIDR blocks + with your VPC and choose the subnets where you launch + both internet-facing and internal Application Load Balancers + or Network Load Balancers. + type: string + outpostId: + description: ID of the Outpost containing the load balancer. + type: string + privateIpv4Address: + description: Private IPv4 address for an internal load balancer. + type: string + subnetId: + description: ID of the subnet of which to attach to the + load balancer. You can specify only one subnet per Availability + Zone. + type: string + type: object + type: array + subnets: + description: List of subnet IDs to attach to the LB. For Load + Balancers of type network subnets can only be added (see Availability + Zones), deleting a subnet for load balancers of type network + will force a recreation of the resource. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + vpcId: + description: ARN of the load balancer (matches arn). + type: string + xffHeaderProcessingMode: + description: Determines how the load balancer modifies the X-Forwarded-For + header in the HTTP request before sending the request to the + target. The possible values are append, preserve, and remove. + Only valid for Load Balancers of type application. The default + is append. + type: string + zoneId: + description: Canonical hosted zone ID of the load balancer (to + be used in a Route 53 Alias record). + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/elbv2.aws.upbound.io_lbtargetgroups.yaml b/package/crds/elbv2.aws.upbound.io_lbtargetgroups.yaml index 3ef48a34c5..804371cc5e 100644 --- a/package/crds/elbv2.aws.upbound.io_lbtargetgroups.yaml +++ b/package/crds/elbv2.aws.upbound.io_lbtargetgroups.yaml @@ -1153,3 +1153,1123 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LBTargetGroup is the Schema for the LBTargetGroups API. Provides + a Target Group resource for use with Load Balancers. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LBTargetGroupSpec defines the desired state of LBTargetGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + connectionTermination: + description: Whether to terminate connections at the end of the + deregistration timeout on Network Load Balancers. See doc for + more information. Default is false. + type: boolean + deregistrationDelay: + description: Amount time for Elastic Load Balancing to wait before + changing the state of a deregistering target from draining to + unused. The range is 0-3600 seconds. The default value is 300 + seconds. + type: string + healthCheck: + description: Health Check configuration block. Detailed below. + properties: + enabled: + description: Whether health checks are enabled. Defaults to + true. + type: boolean + healthyThreshold: + description: Number of consecutive health check successes + required before considering a target healthy. The range + is 2-10. Defaults to 3. + type: number + interval: + description: Approximate amount of time, in seconds, between + health checks of an individual target. The range is 5-300. + For lambda target groups, it needs to be greater than the + timeout of the underlying lambda. Defaults to 30. + type: number + matcher: + description: separated individual values (e.g., "200,202") + or a range of values (e.g., "200-299"). + type: string + path: + description: (May be required) Destination for the health + check request. Required for HTTP/HTTPS ALB and HTTP NLB. + Only applies to HTTP/HTTPS. + type: string + port: + description: |- + The port the load balancer uses when performing health checks on targets. + Valid values are either traffic-port, to use the same port as the target group, or a valid port number between 1 and 65536. + Default is traffic-port. + type: string + protocol: + description: |- + Protocol the load balancer uses when performing health checks on targets. + Must be one of TCP, HTTP, or HTTPS. + The TCP protocol is not supported for health checks if the protocol of the target group is HTTP or HTTPS. + Default is HTTP. + Cannot be specified when the target_type is lambda. + type: string + timeout: + description: Amount of time, in seconds, during which no response + from a target means a failed health check. The range is + 2–120 seconds. For target groups with a protocol of HTTP, + the default is 6 seconds. For target groups with a protocol + of TCP, TLS or HTTPS, the default is 10 seconds. For target + groups with a protocol of GENEVE, the default is 5 seconds. + If the target type is lambda, the default is 30 seconds. + type: number + unhealthyThreshold: + description: Number of consecutive health check failures required + before considering a target unhealthy. The range is 2-10. + Defaults to 3. + type: number + type: object + ipAddressType: + description: The type of IP addresses used by the target group, + only supported when target type is set to ip. Possible values + are ipv4 or ipv6. + type: string + lambdaMultiValueHeadersEnabled: + description: Whether the request and response headers exchanged + between the load balancer and the Lambda function include arrays + of values or strings. Only applies when target_type is lambda. + Default is false. + type: boolean + loadBalancingAlgorithmType: + description: Determines how the load balancer selects targets + when routing requests. Only applicable for Application Load + Balancer Target Groups. The value is round_robin, least_outstanding_requests, + or weighted_random. The default is round_robin. + type: string + loadBalancingAnomalyMitigation: + description: Determines whether to enable target anomaly mitigation. Target + anomaly mitigation is only supported by the weighted_random + load balancing algorithm type. See doc for more information. The + value is "on" or "off". The default is "off". + type: string + loadBalancingCrossZoneEnabled: + description: Indicates whether cross zone load balancing is enabled. + The value is "true", "false" or "use_load_balancer_configuration". + The default is "use_load_balancer_configuration". + type: string + name: + description: Name of the target group. This name must be unique + per region per account, can have a maximum of 32 characters, + must contain only alphanumeric characters or hyphens, and must + not begin or end with a hyphen. + type: string + port: + description: (May be required, Forces new resource) Port on which + targets receive traffic, unless overridden when registering + a specific target. Required when target_type is instance, ip + or alb. Does not apply when target_type is lambda. + type: number + preserveClientIp: + description: Whether client IP preservation is enabled. See doc + for more information. + type: string + protocol: + description: |- + (May be required, Forces new resource) Protocol to use for routing traffic to the targets. + Should be one of GENEVE, HTTP, HTTPS, TCP, TCP_UDP, TLS, or UDP. + Required when target_type is instance, ip, or alb. + Does not apply when target_type is lambda. + type: string + protocolVersion: + description: Only applicable when protocol is HTTP or HTTPS. The + protocol version. Specify GRPC to send requests to targets using + gRPC. Specify HTTP2 to send requests to targets using HTTP/2. + The default is HTTP1, which sends requests to targets using + HTTP/1.1 + type: string + proxyProtocolV2: + description: Whether to enable support for proxy protocol v2 on + Network Load Balancers. See doc for more information. Default + is false. + type: boolean + region: + description: Region is the region you'd like your resource to + be created in. + type: string + slowStart: + description: Amount time for targets to warm up before the load + balancer sends them a full share of requests. The range is 30-900 + seconds or 0 to disable. The default value is 0 seconds. + type: number + stickiness: + description: Stickiness configuration block. Detailed below. + properties: + cookieDuration: + description: Only used when the type is lb_cookie. The time + period, in seconds, during which requests from a client + should be routed to the same target. After this time period + expires, the load balancer-generated cookie is considered + stale. The range is 1 second to 1 week (604800 seconds). + The default value is 1 day (86400 seconds). + type: number + cookieName: + description: Name of the application based cookie. AWSALB, + AWSALBAPP, and AWSALBTG prefixes are reserved and cannot + be used. Only needed when type is app_cookie. + type: string + enabled: + description: Whether health checks are enabled. Defaults to + true. + type: boolean + type: + description: The type of sticky sessions. The only current + possible values are lb_cookie, app_cookie for ALBs, source_ip + for NLBs, and source_ip_dest_ip, source_ip_dest_ip_proto + for GWLBs. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + targetFailover: + description: Target failover block. Only applicable for Gateway + Load Balancer target groups. See target_failover for more information. + items: + properties: + onDeregistration: + description: 'Indicates how the GWLB handles existing flows + when a target is deregistered. Possible values are rebalance + and no_rebalance. Must match the attribute value set for + on_unhealthy. Default: no_rebalance.' + type: string + onUnhealthy: + description: 'Indicates how the GWLB handles existing flows + when a target is unhealthy. Possible values are rebalance + and no_rebalance. Must match the attribute value set for + on_deregistration. Default: no_rebalance.' + type: string + type: object + type: array + targetHealthState: + description: Target health state block. Only applicable for Network + Load Balancer target groups when protocol is TCP or TLS. See + target_health_state for more information. + items: + properties: + enableUnhealthyConnectionTermination: + description: 'Indicates whether the load balancer terminates + connections to unhealthy targets. Possible values are + true or false. Default: true.' + type: boolean + type: object + type: array + targetType: + description: |- + Type of target that you must specify when registering targets with this target group. + See doc for supported values. + The default is instance. + type: string + vpcId: + description: Identifier of the VPC in which to create the target + group. Required when target_type is instance, ip or alb. Does + not apply when target_type is lambda. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + connectionTermination: + description: Whether to terminate connections at the end of the + deregistration timeout on Network Load Balancers. See doc for + more information. Default is false. + type: boolean + deregistrationDelay: + description: Amount time for Elastic Load Balancing to wait before + changing the state of a deregistering target from draining to + unused. The range is 0-3600 seconds. The default value is 300 + seconds. + type: string + healthCheck: + description: Health Check configuration block. Detailed below. + properties: + enabled: + description: Whether health checks are enabled. Defaults to + true. + type: boolean + healthyThreshold: + description: Number of consecutive health check successes + required before considering a target healthy. The range + is 2-10. Defaults to 3. + type: number + interval: + description: Approximate amount of time, in seconds, between + health checks of an individual target. The range is 5-300. + For lambda target groups, it needs to be greater than the + timeout of the underlying lambda. Defaults to 30. + type: number + matcher: + description: separated individual values (e.g., "200,202") + or a range of values (e.g., "200-299"). + type: string + path: + description: (May be required) Destination for the health + check request. Required for HTTP/HTTPS ALB and HTTP NLB. + Only applies to HTTP/HTTPS. + type: string + port: + description: |- + The port the load balancer uses when performing health checks on targets. + Valid values are either traffic-port, to use the same port as the target group, or a valid port number between 1 and 65536. + Default is traffic-port. + type: string + protocol: + description: |- + Protocol the load balancer uses when performing health checks on targets. + Must be one of TCP, HTTP, or HTTPS. + The TCP protocol is not supported for health checks if the protocol of the target group is HTTP or HTTPS. + Default is HTTP. + Cannot be specified when the target_type is lambda. + type: string + timeout: + description: Amount of time, in seconds, during which no response + from a target means a failed health check. The range is + 2–120 seconds. For target groups with a protocol of HTTP, + the default is 6 seconds. For target groups with a protocol + of TCP, TLS or HTTPS, the default is 10 seconds. For target + groups with a protocol of GENEVE, the default is 5 seconds. + If the target type is lambda, the default is 30 seconds. + type: number + unhealthyThreshold: + description: Number of consecutive health check failures required + before considering a target unhealthy. The range is 2-10. + Defaults to 3. + type: number + type: object + ipAddressType: + description: The type of IP addresses used by the target group, + only supported when target type is set to ip. Possible values + are ipv4 or ipv6. + type: string + lambdaMultiValueHeadersEnabled: + description: Whether the request and response headers exchanged + between the load balancer and the Lambda function include arrays + of values or strings. Only applies when target_type is lambda. + Default is false. + type: boolean + loadBalancingAlgorithmType: + description: Determines how the load balancer selects targets + when routing requests. Only applicable for Application Load + Balancer Target Groups. The value is round_robin, least_outstanding_requests, + or weighted_random. The default is round_robin. + type: string + loadBalancingAnomalyMitigation: + description: Determines whether to enable target anomaly mitigation. Target + anomaly mitigation is only supported by the weighted_random + load balancing algorithm type. See doc for more information. The + value is "on" or "off". The default is "off". + type: string + loadBalancingCrossZoneEnabled: + description: Indicates whether cross zone load balancing is enabled. + The value is "true", "false" or "use_load_balancer_configuration". + The default is "use_load_balancer_configuration". + type: string + name: + description: Name of the target group. This name must be unique + per region per account, can have a maximum of 32 characters, + must contain only alphanumeric characters or hyphens, and must + not begin or end with a hyphen. + type: string + port: + description: (May be required, Forces new resource) Port on which + targets receive traffic, unless overridden when registering + a specific target. Required when target_type is instance, ip + or alb. Does not apply when target_type is lambda. + type: number + preserveClientIp: + description: Whether client IP preservation is enabled. See doc + for more information. + type: string + protocol: + description: |- + (May be required, Forces new resource) Protocol to use for routing traffic to the targets. + Should be one of GENEVE, HTTP, HTTPS, TCP, TCP_UDP, TLS, or UDP. + Required when target_type is instance, ip, or alb. + Does not apply when target_type is lambda. + type: string + protocolVersion: + description: Only applicable when protocol is HTTP or HTTPS. The + protocol version. Specify GRPC to send requests to targets using + gRPC. Specify HTTP2 to send requests to targets using HTTP/2. + The default is HTTP1, which sends requests to targets using + HTTP/1.1 + type: string + proxyProtocolV2: + description: Whether to enable support for proxy protocol v2 on + Network Load Balancers. See doc for more information. Default + is false. + type: boolean + slowStart: + description: Amount time for targets to warm up before the load + balancer sends them a full share of requests. The range is 30-900 + seconds or 0 to disable. The default value is 0 seconds. + type: number + stickiness: + description: Stickiness configuration block. Detailed below. + properties: + cookieDuration: + description: Only used when the type is lb_cookie. The time + period, in seconds, during which requests from a client + should be routed to the same target. After this time period + expires, the load balancer-generated cookie is considered + stale. The range is 1 second to 1 week (604800 seconds). + The default value is 1 day (86400 seconds). + type: number + cookieName: + description: Name of the application based cookie. AWSALB, + AWSALBAPP, and AWSALBTG prefixes are reserved and cannot + be used. Only needed when type is app_cookie. + type: string + enabled: + description: Whether health checks are enabled. Defaults to + true. + type: boolean + type: + description: The type of sticky sessions. The only current + possible values are lb_cookie, app_cookie for ALBs, source_ip + for NLBs, and source_ip_dest_ip, source_ip_dest_ip_proto + for GWLBs. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + targetFailover: + description: Target failover block. Only applicable for Gateway + Load Balancer target groups. See target_failover for more information. + items: + properties: + onDeregistration: + description: 'Indicates how the GWLB handles existing flows + when a target is deregistered. Possible values are rebalance + and no_rebalance. Must match the attribute value set for + on_unhealthy. Default: no_rebalance.' + type: string + onUnhealthy: + description: 'Indicates how the GWLB handles existing flows + when a target is unhealthy. Possible values are rebalance + and no_rebalance. Must match the attribute value set for + on_deregistration. Default: no_rebalance.' + type: string + type: object + type: array + targetHealthState: + description: Target health state block. Only applicable for Network + Load Balancer target groups when protocol is TCP or TLS. See + target_health_state for more information. + items: + properties: + enableUnhealthyConnectionTermination: + description: 'Indicates whether the load balancer terminates + connections to unhealthy targets. Possible values are + true or false. Default: true.' + type: boolean + type: object + type: array + targetType: + description: |- + Type of target that you must specify when registering targets with this target group. + See doc for supported values. + The default is instance. + type: string + vpcId: + description: Identifier of the VPC in which to create the target + group. Required when target_type is instance, ip or alb. Does + not apply when target_type is lambda. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: LBTargetGroupStatus defines the observed state of LBTargetGroup. + properties: + atProvider: + properties: + arn: + description: ARN of the Target Group (matches id). + type: string + arnSuffix: + description: ARN suffix for use with CloudWatch Metrics. + type: string + connectionTermination: + description: Whether to terminate connections at the end of the + deregistration timeout on Network Load Balancers. See doc for + more information. Default is false. + type: boolean + deregistrationDelay: + description: Amount time for Elastic Load Balancing to wait before + changing the state of a deregistering target from draining to + unused. The range is 0-3600 seconds. The default value is 300 + seconds. + type: string + healthCheck: + description: Health Check configuration block. Detailed below. + properties: + enabled: + description: Whether health checks are enabled. Defaults to + true. + type: boolean + healthyThreshold: + description: Number of consecutive health check successes + required before considering a target healthy. The range + is 2-10. Defaults to 3. + type: number + interval: + description: Approximate amount of time, in seconds, between + health checks of an individual target. The range is 5-300. + For lambda target groups, it needs to be greater than the + timeout of the underlying lambda. Defaults to 30. + type: number + matcher: + description: separated individual values (e.g., "200,202") + or a range of values (e.g., "200-299"). + type: string + path: + description: (May be required) Destination for the health + check request. Required for HTTP/HTTPS ALB and HTTP NLB. + Only applies to HTTP/HTTPS. + type: string + port: + description: |- + The port the load balancer uses when performing health checks on targets. + Valid values are either traffic-port, to use the same port as the target group, or a valid port number between 1 and 65536. + Default is traffic-port. + type: string + protocol: + description: |- + Protocol the load balancer uses when performing health checks on targets. + Must be one of TCP, HTTP, or HTTPS. + The TCP protocol is not supported for health checks if the protocol of the target group is HTTP or HTTPS. + Default is HTTP. + Cannot be specified when the target_type is lambda. + type: string + timeout: + description: Amount of time, in seconds, during which no response + from a target means a failed health check. The range is + 2–120 seconds. For target groups with a protocol of HTTP, + the default is 6 seconds. For target groups with a protocol + of TCP, TLS or HTTPS, the default is 10 seconds. For target + groups with a protocol of GENEVE, the default is 5 seconds. + If the target type is lambda, the default is 30 seconds. + type: number + unhealthyThreshold: + description: Number of consecutive health check failures required + before considering a target unhealthy. The range is 2-10. + Defaults to 3. + type: number + type: object + id: + description: ARN of the Target Group (matches arn). + type: string + ipAddressType: + description: The type of IP addresses used by the target group, + only supported when target type is set to ip. Possible values + are ipv4 or ipv6. + type: string + lambdaMultiValueHeadersEnabled: + description: Whether the request and response headers exchanged + between the load balancer and the Lambda function include arrays + of values or strings. Only applies when target_type is lambda. + Default is false. + type: boolean + loadBalancerArns: + description: ARNs of the Load Balancers associated with the Target + Group. + items: + type: string + type: array + x-kubernetes-list-type: set + loadBalancingAlgorithmType: + description: Determines how the load balancer selects targets + when routing requests. Only applicable for Application Load + Balancer Target Groups. The value is round_robin, least_outstanding_requests, + or weighted_random. The default is round_robin. + type: string + loadBalancingAnomalyMitigation: + description: Determines whether to enable target anomaly mitigation. Target + anomaly mitigation is only supported by the weighted_random + load balancing algorithm type. See doc for more information. The + value is "on" or "off". The default is "off". + type: string + loadBalancingCrossZoneEnabled: + description: Indicates whether cross zone load balancing is enabled. + The value is "true", "false" or "use_load_balancer_configuration". + The default is "use_load_balancer_configuration". + type: string + name: + description: Name of the target group. This name must be unique + per region per account, can have a maximum of 32 characters, + must contain only alphanumeric characters or hyphens, and must + not begin or end with a hyphen. + type: string + port: + description: (May be required, Forces new resource) Port on which + targets receive traffic, unless overridden when registering + a specific target. Required when target_type is instance, ip + or alb. Does not apply when target_type is lambda. + type: number + preserveClientIp: + description: Whether client IP preservation is enabled. See doc + for more information. + type: string + protocol: + description: |- + (May be required, Forces new resource) Protocol to use for routing traffic to the targets. + Should be one of GENEVE, HTTP, HTTPS, TCP, TCP_UDP, TLS, or UDP. + Required when target_type is instance, ip, or alb. + Does not apply when target_type is lambda. + type: string + protocolVersion: + description: Only applicable when protocol is HTTP or HTTPS. The + protocol version. Specify GRPC to send requests to targets using + gRPC. Specify HTTP2 to send requests to targets using HTTP/2. + The default is HTTP1, which sends requests to targets using + HTTP/1.1 + type: string + proxyProtocolV2: + description: Whether to enable support for proxy protocol v2 on + Network Load Balancers. See doc for more information. Default + is false. + type: boolean + slowStart: + description: Amount time for targets to warm up before the load + balancer sends them a full share of requests. The range is 30-900 + seconds or 0 to disable. The default value is 0 seconds. + type: number + stickiness: + description: Stickiness configuration block. Detailed below. + properties: + cookieDuration: + description: Only used when the type is lb_cookie. The time + period, in seconds, during which requests from a client + should be routed to the same target. After this time period + expires, the load balancer-generated cookie is considered + stale. The range is 1 second to 1 week (604800 seconds). + The default value is 1 day (86400 seconds). + type: number + cookieName: + description: Name of the application based cookie. AWSALB, + AWSALBAPP, and AWSALBTG prefixes are reserved and cannot + be used. Only needed when type is app_cookie. + type: string + enabled: + description: Whether health checks are enabled. Defaults to + true. + type: boolean + type: + description: The type of sticky sessions. The only current + possible values are lb_cookie, app_cookie for ALBs, source_ip + for NLBs, and source_ip_dest_ip, source_ip_dest_ip_proto + for GWLBs. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + targetFailover: + description: Target failover block. Only applicable for Gateway + Load Balancer target groups. See target_failover for more information. + items: + properties: + onDeregistration: + description: 'Indicates how the GWLB handles existing flows + when a target is deregistered. Possible values are rebalance + and no_rebalance. Must match the attribute value set for + on_unhealthy. Default: no_rebalance.' + type: string + onUnhealthy: + description: 'Indicates how the GWLB handles existing flows + when a target is unhealthy. Possible values are rebalance + and no_rebalance. Must match the attribute value set for + on_deregistration. Default: no_rebalance.' + type: string + type: object + type: array + targetHealthState: + description: Target health state block. Only applicable for Network + Load Balancer target groups when protocol is TCP or TLS. See + target_health_state for more information. + items: + properties: + enableUnhealthyConnectionTermination: + description: 'Indicates whether the load balancer terminates + connections to unhealthy targets. Possible values are + true or false. Default: true.' + type: boolean + type: object + type: array + targetType: + description: |- + Type of target that you must specify when registering targets with this target group. + See doc for supported values. + The default is instance. + type: string + vpcId: + description: Identifier of the VPC in which to create the target + group. Required when target_type is instance, ip or alb. Does + not apply when target_type is lambda. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/emrserverless.aws.upbound.io_applications.yaml b/package/crds/emrserverless.aws.upbound.io_applications.yaml index 50ff01b64c..cb7692c123 100644 --- a/package/crds/emrserverless.aws.upbound.io_applications.yaml +++ b/package/crds/emrserverless.aws.upbound.io_applications.yaml @@ -765,3 +765,702 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Application is the Schema for the Applications API. Manages an + EMR Serverless Application + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ApplicationSpec defines the desired state of Application + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + architecture: + description: – The CPU architecture of an application. Valid + values are ARM64 or X86_64. Default value is X86_64. + type: string + autoStartConfiguration: + description: – The configuration for an application to automatically + start on job submission. + properties: + enabled: + description: Enables the application to automatically start + on job submission. Defaults to true. + type: boolean + type: object + autoStopConfiguration: + description: – The configuration for an application to automatically + stop after a certain amount of time being idle. + properties: + enabled: + description: Enables the application to automatically start + on job submission. Defaults to true. + type: boolean + idleTimeoutMinutes: + description: The amount of idle time in minutes after which + your application will automatically stop. Defaults to 15 + minutes. + type: number + type: object + imageConfiguration: + description: – The image configuration applied to all worker + types. + properties: + imageUri: + description: The image URI. + type: string + type: object + initialCapacity: + description: – The capacity to initialize when the application + is created. + items: + properties: + initialCapacityConfig: + description: The initial capacity configuration per worker. + properties: + workerConfiguration: + description: The resource configuration of the initial + capacity configuration. + properties: + cpu: + description: The maximum allowed CPU for an application. + type: string + disk: + description: The maximum allowed disk for an application. + type: string + memory: + description: The maximum allowed resources for an + application. + type: string + type: object + workerCount: + description: The number of workers in the initial capacity + configuration. + type: number + type: object + initialCapacityType: + description: The worker type for an analytics framework. + For Spark applications, the key can either be set to Driver + or Executor. For Hive applications, it can be set to HiveDriver + or TezTask. + type: string + type: object + type: array + maximumCapacity: + description: – The maximum capacity to allocate when the application + is created. This is cumulative across all workers at any given + point in time, not just when an application is created. No new + resources will be created once any one of the defined limits + is hit. + properties: + cpu: + description: The maximum allowed CPU for an application. + type: string + disk: + description: The maximum allowed disk for an application. + type: string + memory: + description: The maximum allowed resources for an application. + type: string + type: object + name: + description: – The name of the application. + type: string + networkConfiguration: + description: – The network configuration for customer VPC connectivity. + properties: + securityGroupIds: + description: The array of security group Ids for customer + VPC connectivity. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: The array of subnet Ids for customer VPC connectivity. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + releaseLabel: + description: – The EMR release version associated with the application. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: – The type of application you want to start, such + as spark or hive. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + architecture: + description: – The CPU architecture of an application. Valid + values are ARM64 or X86_64. Default value is X86_64. + type: string + autoStartConfiguration: + description: – The configuration for an application to automatically + start on job submission. + properties: + enabled: + description: Enables the application to automatically start + on job submission. Defaults to true. + type: boolean + type: object + autoStopConfiguration: + description: – The configuration for an application to automatically + stop after a certain amount of time being idle. + properties: + enabled: + description: Enables the application to automatically start + on job submission. Defaults to true. + type: boolean + idleTimeoutMinutes: + description: The amount of idle time in minutes after which + your application will automatically stop. Defaults to 15 + minutes. + type: number + type: object + imageConfiguration: + description: – The image configuration applied to all worker + types. + properties: + imageUri: + description: The image URI. + type: string + type: object + initialCapacity: + description: – The capacity to initialize when the application + is created. + items: + properties: + initialCapacityConfig: + description: The initial capacity configuration per worker. + properties: + workerConfiguration: + description: The resource configuration of the initial + capacity configuration. + properties: + cpu: + description: The maximum allowed CPU for an application. + type: string + disk: + description: The maximum allowed disk for an application. + type: string + memory: + description: The maximum allowed resources for an + application. + type: string + type: object + workerCount: + description: The number of workers in the initial capacity + configuration. + type: number + type: object + initialCapacityType: + description: The worker type for an analytics framework. + For Spark applications, the key can either be set to Driver + or Executor. For Hive applications, it can be set to HiveDriver + or TezTask. + type: string + type: object + type: array + maximumCapacity: + description: – The maximum capacity to allocate when the application + is created. This is cumulative across all workers at any given + point in time, not just when an application is created. No new + resources will be created once any one of the defined limits + is hit. + properties: + cpu: + description: The maximum allowed CPU for an application. + type: string + disk: + description: The maximum allowed disk for an application. + type: string + memory: + description: The maximum allowed resources for an application. + type: string + type: object + name: + description: – The name of the application. + type: string + networkConfiguration: + description: – The network configuration for customer VPC connectivity. + properties: + securityGroupIds: + description: The array of security group Ids for customer + VPC connectivity. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: The array of subnet Ids for customer VPC connectivity. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + releaseLabel: + description: – The EMR release version associated with the application. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: – The type of application you want to start, such + as spark or hive. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.releaseLabel is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.releaseLabel) + || (has(self.initProvider) && has(self.initProvider.releaseLabel))' + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + status: + description: ApplicationStatus defines the observed state of Application. + properties: + atProvider: + properties: + architecture: + description: – The CPU architecture of an application. Valid + values are ARM64 or X86_64. Default value is X86_64. + type: string + arn: + description: ARN of the cluster. + type: string + autoStartConfiguration: + description: – The configuration for an application to automatically + start on job submission. + properties: + enabled: + description: Enables the application to automatically start + on job submission. Defaults to true. + type: boolean + type: object + autoStopConfiguration: + description: – The configuration for an application to automatically + stop after a certain amount of time being idle. + properties: + enabled: + description: Enables the application to automatically start + on job submission. Defaults to true. + type: boolean + idleTimeoutMinutes: + description: The amount of idle time in minutes after which + your application will automatically stop. Defaults to 15 + minutes. + type: number + type: object + id: + description: The ID of the cluster. + type: string + imageConfiguration: + description: – The image configuration applied to all worker + types. + properties: + imageUri: + description: The image URI. + type: string + type: object + initialCapacity: + description: – The capacity to initialize when the application + is created. + items: + properties: + initialCapacityConfig: + description: The initial capacity configuration per worker. + properties: + workerConfiguration: + description: The resource configuration of the initial + capacity configuration. + properties: + cpu: + description: The maximum allowed CPU for an application. + type: string + disk: + description: The maximum allowed disk for an application. + type: string + memory: + description: The maximum allowed resources for an + application. + type: string + type: object + workerCount: + description: The number of workers in the initial capacity + configuration. + type: number + type: object + initialCapacityType: + description: The worker type for an analytics framework. + For Spark applications, the key can either be set to Driver + or Executor. For Hive applications, it can be set to HiveDriver + or TezTask. + type: string + type: object + type: array + maximumCapacity: + description: – The maximum capacity to allocate when the application + is created. This is cumulative across all workers at any given + point in time, not just when an application is created. No new + resources will be created once any one of the defined limits + is hit. + properties: + cpu: + description: The maximum allowed CPU for an application. + type: string + disk: + description: The maximum allowed disk for an application. + type: string + memory: + description: The maximum allowed resources for an application. + type: string + type: object + name: + description: – The name of the application. + type: string + networkConfiguration: + description: – The network configuration for customer VPC connectivity. + properties: + securityGroupIds: + description: The array of security group Ids for customer + VPC connectivity. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: The array of subnet Ids for customer VPC connectivity. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + releaseLabel: + description: – The EMR release version associated with the application. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: + description: – The type of application you want to start, such + as spark or hive. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/evidently.aws.upbound.io_features.yaml b/package/crds/evidently.aws.upbound.io_features.yaml index aa03f2dea9..fb1364b64d 100644 --- a/package/crds/evidently.aws.upbound.io_features.yaml +++ b/package/crds/evidently.aws.upbound.io_features.yaml @@ -676,3 +676,655 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Feature is the Schema for the Features API. Provides a CloudWatch + Evidently Feature resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FeatureSpec defines the desired state of Feature + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + defaultVariation: + description: The name of the variation to use as the default variation. + The default variation is served to users who are not allocated + to any ongoing launches or experiments of this feature. This + variation must also be listed in the variations structure. If + you omit default_variation, the first variation listed in the + variations structure is used as the default variation. + type: string + description: + description: Specifies the description of the feature. + type: string + entityOverrides: + additionalProperties: + type: string + description: Specify users that should always be served a specific + variation of a feature. Each user is specified by a key-value + pair . For each key, specify a user by entering their user ID, + account ID, or some other identifier. For the value, specify + the name of the variation that they are to be served. + type: object + x-kubernetes-map-type: granular + evaluationStrategy: + description: Specify ALL_RULES to activate the traffic allocation + specified by any ongoing launches or experiments. Specify DEFAULT_VARIATION + to serve the default variation to all users instead. + type: string + project: + description: The name or ARN of the project that is to contain + the new feature. + type: string + projectRef: + description: Reference to a Project in evidently to populate project. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + projectSelector: + description: Selector for a Project in evidently to populate project. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + variations: + description: One or more blocks that contain the configuration + of the feature's different variations. Detailed below + items: + properties: + name: + description: The name of the variation. Minimum length of + 1. Maximum length of 127. + type: string + value: + description: A block that specifies the value assigned to + this variation. Detailed below + properties: + boolValue: + description: If this feature uses the Boolean variation + type, this field contains the Boolean value of this + variation. + type: string + doubleValue: + description: If this feature uses the double integer + variation type, this field contains the double integer + value of this variation. + type: string + longValue: + description: If this feature uses the long variation + type, this field contains the long value of this variation. + Minimum value of -9007199254740991. Maximum value + of 9007199254740991. + type: string + stringValue: + description: If this feature uses the string variation + type, this field contains the string value of this + variation. Minimum length of 0. Maximum length of + 512. + type: string + type: object + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + defaultVariation: + description: The name of the variation to use as the default variation. + The default variation is served to users who are not allocated + to any ongoing launches or experiments of this feature. This + variation must also be listed in the variations structure. If + you omit default_variation, the first variation listed in the + variations structure is used as the default variation. + type: string + description: + description: Specifies the description of the feature. + type: string + entityOverrides: + additionalProperties: + type: string + description: Specify users that should always be served a specific + variation of a feature. Each user is specified by a key-value + pair . For each key, specify a user by entering their user ID, + account ID, or some other identifier. For the value, specify + the name of the variation that they are to be served. + type: object + x-kubernetes-map-type: granular + evaluationStrategy: + description: Specify ALL_RULES to activate the traffic allocation + specified by any ongoing launches or experiments. Specify DEFAULT_VARIATION + to serve the default variation to all users instead. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + variations: + description: One or more blocks that contain the configuration + of the feature's different variations. Detailed below + items: + properties: + name: + description: The name of the variation. Minimum length of + 1. Maximum length of 127. + type: string + value: + description: A block that specifies the value assigned to + this variation. Detailed below + properties: + boolValue: + description: If this feature uses the Boolean variation + type, this field contains the Boolean value of this + variation. + type: string + doubleValue: + description: If this feature uses the double integer + variation type, this field contains the double integer + value of this variation. + type: string + longValue: + description: If this feature uses the long variation + type, this field contains the long value of this variation. + Minimum value of -9007199254740991. Maximum value + of 9007199254740991. + type: string + stringValue: + description: If this feature uses the string variation + type, this field contains the string value of this + variation. Minimum length of 0. Maximum length of + 512. + type: string + type: object + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.variations is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.variations) + || (has(self.initProvider) && has(self.initProvider.variations))' + status: + description: FeatureStatus defines the observed state of Feature. + properties: + atProvider: + properties: + arn: + description: The ARN of the feature. + type: string + createdTime: + description: The date and time that the feature is created. + type: string + defaultVariation: + description: The name of the variation to use as the default variation. + The default variation is served to users who are not allocated + to any ongoing launches or experiments of this feature. This + variation must also be listed in the variations structure. If + you omit default_variation, the first variation listed in the + variations structure is used as the default variation. + type: string + description: + description: Specifies the description of the feature. + type: string + entityOverrides: + additionalProperties: + type: string + description: Specify users that should always be served a specific + variation of a feature. Each user is specified by a key-value + pair . For each key, specify a user by entering their user ID, + account ID, or some other identifier. For the value, specify + the name of the variation that they are to be served. + type: object + x-kubernetes-map-type: granular + evaluationRules: + description: One or more blocks that define the evaluation rules + for the feature. Detailed below + items: + properties: + name: + description: The name of the experiment or launch. + type: string + type: + description: This value is aws.evidently.splits if this + is an evaluation rule for a launch, and it is aws.evidently.onlineab + if this is an evaluation rule for an experiment. + type: string + type: object + type: array + evaluationStrategy: + description: Specify ALL_RULES to activate the traffic allocation + specified by any ongoing launches or experiments. Specify DEFAULT_VARIATION + to serve the default variation to all users instead. + type: string + id: + description: The feature name and the project name or arn separated + by a colon (:). + type: string + lastUpdatedTime: + description: The date and time that the feature was most recently + updated. + type: string + project: + description: The name or ARN of the project that is to contain + the new feature. + type: string + status: + description: The current state of the feature. Valid values are + AVAILABLE and UPDATING. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + valueType: + description: 'Defines the type of value used to define the different + feature variations. Valid Values: STRING, LONG, DOUBLE, BOOLEAN.' + type: string + variations: + description: One or more blocks that contain the configuration + of the feature's different variations. Detailed below + items: + properties: + name: + description: The name of the variation. Minimum length of + 1. Maximum length of 127. + type: string + value: + description: A block that specifies the value assigned to + this variation. Detailed below + properties: + boolValue: + description: If this feature uses the Boolean variation + type, this field contains the Boolean value of this + variation. + type: string + doubleValue: + description: If this feature uses the double integer + variation type, this field contains the double integer + value of this variation. + type: string + longValue: + description: If this feature uses the long variation + type, this field contains the long value of this variation. + Minimum value of -9007199254740991. Maximum value + of 9007199254740991. + type: string + stringValue: + description: If this feature uses the string variation + type, this field contains the string value of this + variation. Minimum length of 0. Maximum length of + 512. + type: string + type: object + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/evidently.aws.upbound.io_projects.yaml b/package/crds/evidently.aws.upbound.io_projects.yaml index e67fe31caa..b8b0a3ca6d 100644 --- a/package/crds/evidently.aws.upbound.io_projects.yaml +++ b/package/crds/evidently.aws.upbound.io_projects.yaml @@ -522,3 +522,489 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Project is the Schema for the Projects API. Provides a CloudWatch + Evidently Project resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ProjectSpec defines the desired state of Project + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + dataDelivery: + description: A block that contains information about where Evidently + is to store evaluation events for longer term storage, if you + choose to do so. If you choose not to store these events, Evidently + deletes them after using them to produce metrics and other experiment + results that you can view. See below. + properties: + cloudwatchLogs: + description: A block that defines the CloudWatch Log Group + that stores the evaluation events. See below. + properties: + logGroup: + description: The name of the log group where the project + stores evaluation events. + type: string + type: object + s3Destination: + description: A block that defines the S3 bucket and prefix + that stores the evaluation events. See below. + properties: + bucket: + description: The name of the bucket in which Evidently + stores evaluation events. + type: string + prefix: + description: The bucket prefix in which Evidently stores + evaluation events. + type: string + type: object + type: object + description: + description: Specifies the description of the project. + type: string + name: + description: A name for the project. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + dataDelivery: + description: A block that contains information about where Evidently + is to store evaluation events for longer term storage, if you + choose to do so. If you choose not to store these events, Evidently + deletes them after using them to produce metrics and other experiment + results that you can view. See below. + properties: + cloudwatchLogs: + description: A block that defines the CloudWatch Log Group + that stores the evaluation events. See below. + properties: + logGroup: + description: The name of the log group where the project + stores evaluation events. + type: string + type: object + s3Destination: + description: A block that defines the S3 bucket and prefix + that stores the evaluation events. See below. + properties: + bucket: + description: The name of the bucket in which Evidently + stores evaluation events. + type: string + prefix: + description: The bucket prefix in which Evidently stores + evaluation events. + type: string + type: object + type: object + description: + description: Specifies the description of the project. + type: string + name: + description: A name for the project. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ProjectStatus defines the observed state of Project. + properties: + atProvider: + properties: + activeExperimentCount: + description: The number of ongoing experiments currently in the + project. + type: number + activeLaunchCount: + description: The number of ongoing launches currently in the project. + type: number + arn: + description: The ARN of the project. + type: string + createdTime: + description: The date and time that the project is created. + type: string + dataDelivery: + description: A block that contains information about where Evidently + is to store evaluation events for longer term storage, if you + choose to do so. If you choose not to store these events, Evidently + deletes them after using them to produce metrics and other experiment + results that you can view. See below. + properties: + cloudwatchLogs: + description: A block that defines the CloudWatch Log Group + that stores the evaluation events. See below. + properties: + logGroup: + description: The name of the log group where the project + stores evaluation events. + type: string + type: object + s3Destination: + description: A block that defines the S3 bucket and prefix + that stores the evaluation events. See below. + properties: + bucket: + description: The name of the bucket in which Evidently + stores evaluation events. + type: string + prefix: + description: The bucket prefix in which Evidently stores + evaluation events. + type: string + type: object + type: object + description: + description: Specifies the description of the project. + type: string + experimentCount: + description: The number of experiments currently in the project. + This includes all experiments that have been created and not + deleted, whether they are ongoing or not. + type: number + featureCount: + description: The number of features currently in the project. + type: number + id: + description: The ID has the same value as the arn of the project. + type: string + lastUpdatedTime: + description: The date and time that the project was most recently + updated. + type: string + launchCount: + description: The number of launches currently in the project. + This includes all launches that have been created and not deleted, + whether they are ongoing or not. + type: number + name: + description: A name for the project. + type: string + status: + description: The current state of the project. Valid values are + AVAILABLE and UPDATING. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/firehose.aws.upbound.io_deliverystreams.yaml b/package/crds/firehose.aws.upbound.io_deliverystreams.yaml index 7f6ade09ca..89c1ea8559 100644 --- a/package/crds/firehose.aws.upbound.io_deliverystreams.yaml +++ b/package/crds/firehose.aws.upbound.io_deliverystreams.yaml @@ -11806,3 +11806,11066 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DeliveryStream is the Schema for the DeliveryStreams API. Provides + a AWS Kinesis Firehose Delivery Stream + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DeliveryStreamSpec defines the desired state of DeliveryStream + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + destination: + description: – This is the destination to where the data is delivered. + The only options are s3 (Deprecated, use extended_s3 instead), + extended_s3, redshift, elasticsearch, splunk, http_endpoint, + opensearch, opensearchserverless and snowflake. + type: string + destinationId: + type: string + elasticsearchConfiguration: + description: Configuration options when destination is elasticsearch. + See elasticsearch_configuration block below for details. + properties: + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 900, before delivering + it to the destination. The default value is 300s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs between 1 to 100, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + clusterEndpoint: + description: The endpoint to use when communicating with the + cluster. Conflicts with domain_arn. + type: string + domainArn: + description: The ARN of the Amazon ES domain. The pattern + needs to be arn:.*. Conflicts with cluster_endpoint. + type: string + domainArnRef: + description: Reference to a Domain in elasticsearch to populate + domainArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + domainArnSelector: + description: Selector for a Domain in elasticsearch to populate + domainArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + indexName: + description: The Elasticsearch index name. + type: string + indexRotationPeriod: + description: The Elasticsearch index rotation period. Index + rotation appends a timestamp to the IndexName to facilitate + expiration of old data. Valid values are NoRotation, OneHour, + OneDay, OneWeek, and OneMonth. The default value is OneDay. + type: string + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: After an initial failure to deliver to Amazon + Elasticsearch, the total amount of time, in seconds between + 0 to 7200, during which Firehose re-attempts delivery (including + the first attempt). After this time has elapsed, the failed + documents are written to Amazon S3. The default value is + 300s. There will be no retry if the value is 0. + type: number + roleArn: + description: The ARN of the IAM role to be assumed by Firehose + for calling the Amazon ES Configuration API and for indexing + documents. The IAM role must have permission for DescribeElasticsearchDomain, + DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The + pattern needs to be arn:.*. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3BackupMode: + description: Defines how documents should be delivered to + Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default + value is FailedDocumentsOnly. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + typeName: + description: The Elasticsearch type name with maximum length + of 100 characters. + type: string + vpcConfig: + description: The VPC configuration for the delivery stream + to connect to Elastic Search associated with the VPC. See + vpc_config block below for details. + properties: + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: A list of security group IDs to associate + with Kinesis Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of subnet IDs to associate with Kinesis + Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + extendedS3Configuration: + description: Enhanced configuration options for the s3 destination. + See extended_s3_configuration block below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering it + to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values are + GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + customTimeZone: + description: The time zone you prefer. Valid values are UTC + or a non-3-letter IANA time zones (for example, America/Los_Angeles). + Default value is UTC. + type: string + dataFormatConversionConfiguration: + description: Nested argument for the serializer, deserializer, + and schema for converting data from the JSON format to the + Parquet or ORC format before writing it to Amazon S3. See + data_format_conversion_configuration block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + inputFormatConfiguration: + description: Specifies the deserializer that you want + Kinesis Data Firehose to use to convert the format of + your data from JSON. See input_format_configuration + block below for details. + properties: + deserializer: + description: Specifies which deserializer to use. + You can choose either the Apache Hive JSON SerDe + or the OpenX JSON SerDe. See deserializer block + below for details. + properties: + hiveJsonSerDe: + description: Specifies the native Hive / HCatalog + JsonSerDe. More details below. See hive_json_ser_de + block below for details. + properties: + timestampFormats: + description: A list of how you want Kinesis + Data Firehose to parse the date and time + stamps that may be present in your input + data JSON. To specify these format strings, + follow the pattern syntax of JodaTime's + DateTimeFormat format strings. For more + information, see Class DateTimeFormat. You + can also use the special value millis to + parse time stamps in epoch milliseconds. + If you don't specify a format, Kinesis Data + Firehose uses java.sql.Timestamp::valueOf + by default. + items: + type: string + type: array + type: object + openXJsonSerDe: + description: Specifies the OpenX SerDe. See open_x_json_ser_de + block below for details. + properties: + caseInsensitive: + description: When set to true, which is the + default, Kinesis Data Firehose converts + JSON keys to lowercase before deserializing + them. + type: boolean + columnToJsonKeyMappings: + additionalProperties: + type: string + description: A map of column names to JSON + keys that aren't identical to the column + names. This is useful when the JSON contains + keys that are Hive keywords. For example, + timestamp is a Hive keyword. If you have + a JSON key named timestamp, set this parameter + to { ts = "timestamp" } to map this key + to a column named ts. + type: object + x-kubernetes-map-type: granular + convertDotsInJsonKeysToUnderscores: + description: When set to true, specifies that + the names of the keys include dots and that + you want Kinesis Data Firehose to replace + them with underscores. This is useful because + Apache Hive does not allow dots in column + names. For example, if the JSON contains + a key whose name is "a.b", you can define + the column name to be "a_b" when using this + option. Defaults to false. + type: boolean + type: object + type: object + type: object + outputFormatConfiguration: + description: Specifies the serializer that you want Kinesis + Data Firehose to use to convert the format of your data + to the Parquet or ORC format. See output_format_configuration + block below for details. + properties: + serializer: + description: Specifies which serializer to use. You + can choose either the ORC SerDe or the Parquet SerDe. + See serializer block below for details. + properties: + orcSerDe: + description: Specifies converting data to the + ORC format before storing it in Amazon S3. For + more information, see Apache ORC. See orc_ser_de + block below for details. + properties: + blockSizeBytes: + description: The Hadoop Distributed File System + (HDFS) block size. This is useful if you + intend to copy the data from Amazon S3 to + HDFS before querying. The default is 256 + MiB and the minimum is 64 MiB. Kinesis Data + Firehose uses this value for padding calculations. + type: number + bloomFilterColumns: + description: A list of column names for which + you want Kinesis Data Firehose to create + bloom filters. + items: + type: string + type: array + bloomFilterFalsePositiveProbability: + description: The Bloom filter false positive + probability (FPP). The lower the FPP, the + bigger the Bloom filter. The default value + is 0.05, the minimum is 0, and the maximum + is 1. + type: number + compression: + description: The compression code to use over + data blocks. The possible values are UNCOMPRESSED, + SNAPPY, and GZIP, with the default being + SNAPPY. Use SNAPPY for higher decompression + speed. Use GZIP if the compression ratio + is more important than speed. + type: string + dictionaryKeyThreshold: + description: A float that represents the fraction + of the total number of non-null rows. To + turn off dictionary encoding, set this fraction + to a number that is less than the number + of distinct keys in a dictionary. To always + use dictionary encoding, set this threshold + to 1. + type: number + enablePadding: + description: Set this to true to indicate + that you want stripes to be padded to the + HDFS block boundaries. This is useful if + you intend to copy the data from Amazon + S3 to HDFS before querying. The default + is false. + type: boolean + formatVersion: + description: The version of the file to write. + The possible values are V0_11 and V0_12. + The default is V0_12. + type: string + paddingTolerance: + description: A float between 0 and 1 that + defines the tolerance for block padding + as a decimal fraction of stripe size. The + default value is 0.05, which means 5 percent + of stripe size. For the default values of + 64 MiB ORC stripes and 256 MiB HDFS blocks, + the default block padding tolerance of 5 + percent reserves a maximum of 3.2 MiB for + padding within the 256 MiB block. In such + a case, if the available size within the + block is more than 3.2 MiB, a new, smaller + stripe is inserted to fit within that space. + This ensures that no stripe crosses block + boundaries and causes remote reads within + a node-local task. Kinesis Data Firehose + ignores this parameter when enable_padding + is false. + type: number + rowIndexStride: + description: The number of rows between index + entries. The default is 10000 and the minimum + is 1000. + type: number + stripeSizeBytes: + description: The number of bytes in each stripe. + The default is 64 MiB and the minimum is + 8 MiB. + type: number + type: object + parquetSerDe: + description: Specifies converting data to the + Parquet format before storing it in Amazon S3. + For more information, see Apache Parquet. More + details below. + properties: + blockSizeBytes: + description: The Hadoop Distributed File System + (HDFS) block size. This is useful if you + intend to copy the data from Amazon S3 to + HDFS before querying. The default is 256 + MiB and the minimum is 64 MiB. Kinesis Data + Firehose uses this value for padding calculations. + type: number + compression: + description: The compression code to use over + data blocks. The possible values are UNCOMPRESSED, + SNAPPY, and GZIP, with the default being + SNAPPY. Use SNAPPY for higher decompression + speed. Use GZIP if the compression ratio + is more important than speed. + type: string + enableDictionaryCompression: + description: Indicates whether to enable dictionary + compression. + type: boolean + maxPaddingBytes: + description: The maximum amount of padding + to apply. This is useful if you intend to + copy the data from Amazon S3 to HDFS before + querying. The default is 0. + type: number + pageSizeBytes: + description: The Parquet page size. Column + chunks are divided into pages. A page is + conceptually an indivisible unit (in terms + of compression and encoding). The minimum + value is 64 KiB and the default is 1 MiB. + type: number + writerVersion: + description: Indicates the version of row + format to output. The possible values are + V1 and V2. The default is V1. + type: string + type: object + type: object + type: object + schemaConfiguration: + description: Specifies the AWS Glue Data Catalog table + that contains the column information. See schema_configuration + block below for details. + properties: + catalogId: + description: The ID of the AWS Glue Data Catalog. + If you don't supply this, the AWS account ID is + used by default. + type: string + databaseName: + description: Specifies the name of the AWS Glue database + that contains the schema for the output data. + type: string + region: + description: If you don't specify an AWS Region, the + default is the current region. + type: string + roleArn: + description: The ARN of the IAM role to be assumed + by Firehose for calling the Amazon EC2 configuration + API and for creating network interfaces. Make sure + role has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate + roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate + roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tableName: + description: Specifies the AWS Glue table that contains + the column information that constitutes your data + schema. + type: string + tableNameRef: + description: Reference to a CatalogTable in glue to + populate tableName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + tableNameSelector: + description: Selector for a CatalogTable in glue to + populate tableName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + versionId: + description: Specifies the table version for the output + data schema. Defaults to LATEST. + type: string + type: object + type: object + dynamicPartitioningConfiguration: + description: The configuration for dynamic partitioning. Required + when using dynamic partitioning. See dynamic_partitioning_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + retryDuration: + description: After an initial failure to deliver to Splunk, + the total amount of time, in seconds between 0 to 7200, + during which Firehose re-attempts delivery (including + the first attempt). After this time has elapsed, the + failed documents are written to Amazon S3. The default + value is 300s. There will be no retry if the value + is 0. + type: number + type: object + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket name. + For information about how to specify this prefix, see Custom + Prefixes for Amazon S3 Objects. + type: string + fileExtension: + description: The file extension to override the default file + extension (for example, .json). + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is automatically + used for delivered S3 files. You can specify an extra prefix + to be added in front of the time format prefix. Note that + if the prefix ends with a slash, it appears as a folder + in the S3 bucket + type: string + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + roleArn: + description: The ARN of the IAM role to be assumed by Firehose + for calling the Amazon EC2 configuration API and for creating + network interfaces. Make sure role has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3BackupConfiguration: + description: The configuration for backup in Amazon S3. Required + if s3_backup_mode is Enabled. Supports the same fields as + s3_configuration object. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + type: object + s3BackupMode: + description: The Amazon S3 backup mode. Valid values are + Disabled and Enabled. Default value is Disabled. + type: string + type: object + httpEndpointConfiguration: + description: Configuration options when destination is http_endpoint. + Requires the user to also specify an s3_configuration block. See + http_endpoint_configuration block below for details. + properties: + accessKeySecretRef: + description: The access key required for Kinesis Firehose + to authenticate with the HTTP endpoint selected as the destination. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds, before delivering it to the destination. + The default value is 300 (5 minutes). + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs, before delivering it to the destination. The default + value is 5. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + name: + description: The HTTP endpoint name. + type: string + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + requestConfiguration: + description: The request configuration. See request_configuration + block below for details. + properties: + commonAttributes: + description: Describes the metadata sent to the HTTP endpoint + destination. See common_attributes block below for details. + items: + properties: + name: + description: The name of the HTTP endpoint common + attribute. + type: string + value: + description: The value of the HTTP endpoint common + attribute. + type: string + type: object + type: array + contentEncoding: + description: Kinesis Data Firehose uses the content encoding + to compress the body of a request before sending the + request to the destination. Valid values are NONE and + GZIP. Default value is NONE. + type: string + type: object + retryDuration: + description: Total amount of seconds Firehose spends on retries. + This duration starts after the initial attempt fails, It + does not include the time periods during which Firehose + waits for acknowledgment from the specified destination + after each attempt. Valid values between 0 and 7200. Default + is 300. + type: number + roleArn: + description: Kinesis Data Firehose uses this IAM role for + all the permissions that the delivery stream needs. The + pattern needs to be arn:.*. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3BackupMode: + description: Defines how documents should be delivered to + Amazon S3. Valid values are FailedDataOnly and AllData. Default + value is FailedDataOnly. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + url: + description: The HTTP endpoint URL to which Kinesis Firehose + sends your data. + type: string + type: object + kinesisSourceConfiguration: + description: The stream and role Amazon Resource Names (ARNs) + for a Kinesis data stream used as the source for a delivery + stream. See kinesis_source_configuration block below for details. + properties: + kinesisStreamArn: + description: The kinesis stream used as the source of the + firehose delivery stream. + type: string + roleArn: + description: The ARN of the role that provides access to the + source Kinesis stream. + type: string + type: object + mskSourceConfiguration: + description: The configuration for the Amazon MSK cluster to be + used as the source for a delivery stream. See msk_source_configuration + block below for details. + properties: + authenticationConfiguration: + description: The authentication configuration of the Amazon + MSK cluster. See authentication_configuration block below + for details. + properties: + connectivity: + description: 'The type of connectivity used to access + the Amazon MSK cluster. Valid values: PUBLIC, PRIVATE.' + type: string + roleArn: + description: The ARN of the role used to access the Amazon + MSK cluster. + type: string + type: object + mskClusterArn: + description: The ARN of the Amazon MSK cluster. + type: string + topicName: + description: The topic name within the Amazon MSK cluster. + type: string + type: object + name: + description: A name to identify the stream. This is unique to + the AWS account and region the Stream is created in. When using + for WAF logging, name must be prefixed with aws-waf-logs-. See + AWS Documentation for more details. + type: string + opensearchConfiguration: + description: Configuration options when destination is opensearch. + See opensearch_configuration block below for details. + properties: + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 900, before delivering + it to the destination. The default value is 300s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs between 1 to 100, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + clusterEndpoint: + description: The endpoint to use when communicating with the + cluster. Conflicts with domain_arn. + type: string + documentIdOptions: + description: The method for setting up document ID. See [document_id_options + block] below for details. + properties: + defaultDocumentIdFormat: + description: 'The method for setting up document ID. Valid + values: FIREHOSE_DEFAULT, NO_DOCUMENT_ID.' + type: string + type: object + domainArn: + description: The ARN of the Amazon ES domain. The pattern + needs to be arn:.*. Conflicts with cluster_endpoint. + type: string + domainArnRef: + description: Reference to a Domain in opensearch to populate + domainArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + domainArnSelector: + description: Selector for a Domain in opensearch to populate + domainArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + indexName: + description: The OpenSearch index name. + type: string + indexRotationPeriod: + description: The OpenSearch index rotation period. Index + rotation appends a timestamp to the IndexName to facilitate + expiration of old data. Valid values are NoRotation, OneHour, + OneDay, OneWeek, and OneMonth. The default value is OneDay. + type: string + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: After an initial failure to deliver to Amazon + OpenSearch, the total amount of time, in seconds between + 0 to 7200, during which Firehose re-attempts delivery (including + the first attempt). After this time has elapsed, the failed + documents are written to Amazon S3. The default value is + 300s. There will be no retry if the value is 0. + type: number + roleArn: + description: The ARN of the IAM role to be assumed by Firehose + for calling the Amazon ES Configuration API and for indexing + documents. The IAM role must have permission for DescribeDomain, + DescribeDomains, and DescribeDomainConfig. The pattern + needs to be arn:.*. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3BackupMode: + description: Defines how documents should be delivered to + Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default + value is FailedDocumentsOnly. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + typeName: + description: The Elasticsearch type name with maximum length + of 100 characters. Types are deprecated in OpenSearch_1.1. + TypeName must be empty. + type: string + vpcConfig: + description: The VPC configuration for the delivery stream + to connect to OpenSearch associated with the VPC. See vpc_config + block below for details. + properties: + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: A list of security group IDs to associate + with Kinesis Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of subnet IDs to associate with Kinesis + Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + opensearchserverlessConfiguration: + description: Configuration options when destination is opensearchserverless. + See opensearchserverless_configuration block below for details. + properties: + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 900, before delivering + it to the destination. The default value is 300s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs between 1 to 100, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + collectionEndpoint: + description: The endpoint to use when communicating with the + collection in the Serverless offering for Amazon OpenSearch + Service. + type: string + collectionEndpointRef: + description: Reference to a Collection in opensearchserverless + to populate collectionEndpoint. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + collectionEndpointSelector: + description: Selector for a Collection in opensearchserverless + to populate collectionEndpoint. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + indexName: + description: The Serverless offering for Amazon OpenSearch + Service index name. + type: string + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: After an initial failure to deliver to the Serverless + offering for Amazon OpenSearch Service, the total amount + of time, in seconds between 0 to 7200, during which Kinesis + Data Firehose retries delivery (including the first attempt). After + this time has elapsed, the failed documents are written + to Amazon S3. The default value is 300s. There will be + no retry if the value is 0. + type: number + roleArn: + description: The Amazon Resource Name (ARN) of the IAM role + to be assumed by Kinesis Data Firehose for calling the Serverless + offering for Amazon OpenSearch Service Configuration API + and for indexing documents. The pattern needs to be arn:.*. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3BackupMode: + description: Defines how documents should be delivered to + Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default + value is FailedDocumentsOnly. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + vpcConfig: + description: The VPC configuration for the delivery stream + to connect to OpenSearch Serverless associated with the + VPC. See vpc_config block below for details. + properties: + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + securityGroupIds: + description: A list of security group IDs to associate + with Kinesis Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of subnet IDs to associate with Kinesis + Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + redshiftConfiguration: + description: Configuration options when destination is redshift. + Requires the user to also specify an s3_configuration block. + See redshift_configuration block below for details. + properties: + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + clusterJdbcurl: + description: The jdbcurl of the redshift cluster. + type: string + copyOptions: + description: Copy options for copying the data from the s3 + intermediate bucket into redshift, for example to change + the default delimiter. For valid values, see the AWS documentation + type: string + dataTableColumns: + description: The data table columns that will be targeted + by the copy command. + type: string + dataTableName: + description: The name of the table in the redshift cluster + that the s3 bucket will copy to. + type: string + passwordSecretRef: + description: The password for the username above. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: The length of time during which Firehose retries + delivery after a failure, starting from the initial request + and including the first attempt. The default value is 3600 + seconds (60 minutes). Firehose does not retry if the value + of DurationInSeconds is 0 (zero) or if the first delivery + attempt takes longer than the current value. + type: number + roleArn: + description: The arn of the role the stream assumes. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3BackupConfiguration: + description: The configuration for backup in Amazon S3. Required + if s3_backup_mode is Enabled. Supports the same fields as + s3_configuration object. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + s3BackupMode: + description: The Amazon S3 backup mode. Valid values are + Disabled and Enabled. Default value is Disabled. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration below + for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + username: + description: The username that the firehose delivery stream + will assume. It is strongly recommended that the username + and password provided is used exclusively for Amazon Kinesis + Firehose purposes, and that the permissions for the account + are restricted for Amazon Redshift INSERT permissions. + type: string + type: object + region: + description: |- + If you don't specify an AWS Region, the default is the current region. + Region is the region you'd like your resource to be created in. + type: string + serverSideEncryption: + description: Encrypt at rest options. See server_side_encryption + block below for details. + properties: + enabled: + description: Whether to enable encryption at rest. Default + is false. + type: boolean + keyArn: + description: Amazon Resource Name (ARN) of the encryption + key. Required when key_type is CUSTOMER_MANAGED_CMK. + type: string + keyType: + description: Type of encryption key. Default is AWS_OWNED_CMK. + Valid values are AWS_OWNED_CMK and CUSTOMER_MANAGED_CMK + type: string + type: object + snowflakeConfiguration: + description: Configuration options when destination is snowflake. + See snowflake_configuration block below for details. + properties: + accountUrl: + description: 'The URL of the Snowflake account. Format: https://[account_identifier].snowflakecomputing.com.' + type: string + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + contentColumnName: + description: The name of the content column. + type: string + dataLoadingOption: + description: The data loading option. + type: string + database: + description: The Snowflake database name. + type: string + keyPassphraseSecretRef: + description: The passphrase for the private key. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + metadataColumnName: + description: The name of the metadata column. + type: string + privateKeySecretRef: + description: The private key for authentication. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + processingConfiguration: + description: The processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: After an initial failure to deliver to Snowflake, + the total amount of time, in seconds between 0 to 7200, + during which Firehose re-attempts delivery (including the + first attempt). After this time has elapsed, the failed + documents are written to Amazon S3. The default value is + 60s. There will be no retry if the value is 0. + type: number + roleArn: + description: The ARN of the IAM role. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3BackupMode: + description: The S3 backup mode. + type: string + s3Configuration: + description: The S3 configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + schema: + description: The Snowflake schema name. + type: string + snowflakeRoleConfiguration: + description: The configuration for Snowflake role. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + snowflakeRole: + description: The Snowflake role. + type: string + type: object + snowflakeVpcConfiguration: + description: The VPC configuration for Snowflake. + properties: + privateLinkVpceId: + description: The VPCE ID for Firehose to privately connect + with Snowflake. + type: string + type: object + table: + description: The Snowflake table name. + type: string + user: + description: The user for authentication. + type: string + type: object + splunkConfiguration: + description: Configuration options when destination is splunk. + See splunk_configuration block below for details. + properties: + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering it + to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + hecAcknowledgmentTimeout: + description: The amount of time, in seconds between 180 and + 600, that Kinesis Firehose waits to receive an acknowledgment + from Splunk after it sends it data. + type: number + hecEndpoint: + description: The HTTP Event Collector (HEC) endpoint to which + Kinesis Firehose sends your data. + type: string + hecEndpointType: + description: The HEC endpoint type. Valid values are Raw or + Event. The default value is Raw. + type: string + hecTokenSecretRef: + description: The GUID that you obtain from your Splunk cluster + when you create a new HEC endpoint. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: After an initial failure to deliver to Splunk, + the total amount of time, in seconds between 0 to 7200, + during which Firehose re-attempts delivery (including the + first attempt). After this time has elapsed, the failed + documents are written to Amazon S3. The default value is + 300s. There will be no retry if the value is 0. + type: number + s3BackupMode: + description: Defines how documents should be delivered to + Amazon S3. Valid values are FailedEventsOnly and AllEvents. Default + value is FailedEventsOnly. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + versionId: + description: Specifies the table version for the output data schema. + Defaults to LATEST. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + destination: + description: – This is the destination to where the data is delivered. + The only options are s3 (Deprecated, use extended_s3 instead), + extended_s3, redshift, elasticsearch, splunk, http_endpoint, + opensearch, opensearchserverless and snowflake. + type: string + destinationId: + type: string + elasticsearchConfiguration: + description: Configuration options when destination is elasticsearch. + See elasticsearch_configuration block below for details. + properties: + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 900, before delivering + it to the destination. The default value is 300s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs between 1 to 100, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + clusterEndpoint: + description: The endpoint to use when communicating with the + cluster. Conflicts with domain_arn. + type: string + domainArn: + description: The ARN of the Amazon ES domain. The pattern + needs to be arn:.*. Conflicts with cluster_endpoint. + type: string + domainArnRef: + description: Reference to a Domain in elasticsearch to populate + domainArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + domainArnSelector: + description: Selector for a Domain in elasticsearch to populate + domainArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + indexName: + description: The Elasticsearch index name. + type: string + indexRotationPeriod: + description: The Elasticsearch index rotation period. Index + rotation appends a timestamp to the IndexName to facilitate + expiration of old data. Valid values are NoRotation, OneHour, + OneDay, OneWeek, and OneMonth. The default value is OneDay. + type: string + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: After an initial failure to deliver to Amazon + Elasticsearch, the total amount of time, in seconds between + 0 to 7200, during which Firehose re-attempts delivery (including + the first attempt). After this time has elapsed, the failed + documents are written to Amazon S3. The default value is + 300s. There will be no retry if the value is 0. + type: number + roleArn: + description: The ARN of the IAM role to be assumed by Firehose + for calling the Amazon ES Configuration API and for indexing + documents. The IAM role must have permission for DescribeElasticsearchDomain, + DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The + pattern needs to be arn:.*. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3BackupMode: + description: Defines how documents should be delivered to + Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default + value is FailedDocumentsOnly. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + typeName: + description: The Elasticsearch type name with maximum length + of 100 characters. + type: string + vpcConfig: + description: The VPC configuration for the delivery stream + to connect to Elastic Search associated with the VPC. See + vpc_config block below for details. + properties: + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: A list of security group IDs to associate + with Kinesis Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of subnet IDs to associate with Kinesis + Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + extendedS3Configuration: + description: Enhanced configuration options for the s3 destination. + See extended_s3_configuration block below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering it + to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values are + GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + customTimeZone: + description: The time zone you prefer. Valid values are UTC + or a non-3-letter IANA time zones (for example, America/Los_Angeles). + Default value is UTC. + type: string + dataFormatConversionConfiguration: + description: Nested argument for the serializer, deserializer, + and schema for converting data from the JSON format to the + Parquet or ORC format before writing it to Amazon S3. See + data_format_conversion_configuration block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + inputFormatConfiguration: + description: Specifies the deserializer that you want + Kinesis Data Firehose to use to convert the format of + your data from JSON. See input_format_configuration + block below for details. + properties: + deserializer: + description: Specifies which deserializer to use. + You can choose either the Apache Hive JSON SerDe + or the OpenX JSON SerDe. See deserializer block + below for details. + properties: + hiveJsonSerDe: + description: Specifies the native Hive / HCatalog + JsonSerDe. More details below. See hive_json_ser_de + block below for details. + properties: + timestampFormats: + description: A list of how you want Kinesis + Data Firehose to parse the date and time + stamps that may be present in your input + data JSON. To specify these format strings, + follow the pattern syntax of JodaTime's + DateTimeFormat format strings. For more + information, see Class DateTimeFormat. You + can also use the special value millis to + parse time stamps in epoch milliseconds. + If you don't specify a format, Kinesis Data + Firehose uses java.sql.Timestamp::valueOf + by default. + items: + type: string + type: array + type: object + openXJsonSerDe: + description: Specifies the OpenX SerDe. See open_x_json_ser_de + block below for details. + properties: + caseInsensitive: + description: When set to true, which is the + default, Kinesis Data Firehose converts + JSON keys to lowercase before deserializing + them. + type: boolean + columnToJsonKeyMappings: + additionalProperties: + type: string + description: A map of column names to JSON + keys that aren't identical to the column + names. This is useful when the JSON contains + keys that are Hive keywords. For example, + timestamp is a Hive keyword. If you have + a JSON key named timestamp, set this parameter + to { ts = "timestamp" } to map this key + to a column named ts. + type: object + x-kubernetes-map-type: granular + convertDotsInJsonKeysToUnderscores: + description: When set to true, specifies that + the names of the keys include dots and that + you want Kinesis Data Firehose to replace + them with underscores. This is useful because + Apache Hive does not allow dots in column + names. For example, if the JSON contains + a key whose name is "a.b", you can define + the column name to be "a_b" when using this + option. Defaults to false. + type: boolean + type: object + type: object + type: object + outputFormatConfiguration: + description: Specifies the serializer that you want Kinesis + Data Firehose to use to convert the format of your data + to the Parquet or ORC format. See output_format_configuration + block below for details. + properties: + serializer: + description: Specifies which serializer to use. You + can choose either the ORC SerDe or the Parquet SerDe. + See serializer block below for details. + properties: + orcSerDe: + description: Specifies converting data to the + ORC format before storing it in Amazon S3. For + more information, see Apache ORC. See orc_ser_de + block below for details. + properties: + blockSizeBytes: + description: The Hadoop Distributed File System + (HDFS) block size. This is useful if you + intend to copy the data from Amazon S3 to + HDFS before querying. The default is 256 + MiB and the minimum is 64 MiB. Kinesis Data + Firehose uses this value for padding calculations. + type: number + bloomFilterColumns: + description: A list of column names for which + you want Kinesis Data Firehose to create + bloom filters. + items: + type: string + type: array + bloomFilterFalsePositiveProbability: + description: The Bloom filter false positive + probability (FPP). The lower the FPP, the + bigger the Bloom filter. The default value + is 0.05, the minimum is 0, and the maximum + is 1. + type: number + compression: + description: The compression code to use over + data blocks. The possible values are UNCOMPRESSED, + SNAPPY, and GZIP, with the default being + SNAPPY. Use SNAPPY for higher decompression + speed. Use GZIP if the compression ratio + is more important than speed. + type: string + dictionaryKeyThreshold: + description: A float that represents the fraction + of the total number of non-null rows. To + turn off dictionary encoding, set this fraction + to a number that is less than the number + of distinct keys in a dictionary. To always + use dictionary encoding, set this threshold + to 1. + type: number + enablePadding: + description: Set this to true to indicate + that you want stripes to be padded to the + HDFS block boundaries. This is useful if + you intend to copy the data from Amazon + S3 to HDFS before querying. The default + is false. + type: boolean + formatVersion: + description: The version of the file to write. + The possible values are V0_11 and V0_12. + The default is V0_12. + type: string + paddingTolerance: + description: A float between 0 and 1 that + defines the tolerance for block padding + as a decimal fraction of stripe size. The + default value is 0.05, which means 5 percent + of stripe size. For the default values of + 64 MiB ORC stripes and 256 MiB HDFS blocks, + the default block padding tolerance of 5 + percent reserves a maximum of 3.2 MiB for + padding within the 256 MiB block. In such + a case, if the available size within the + block is more than 3.2 MiB, a new, smaller + stripe is inserted to fit within that space. + This ensures that no stripe crosses block + boundaries and causes remote reads within + a node-local task. Kinesis Data Firehose + ignores this parameter when enable_padding + is false. + type: number + rowIndexStride: + description: The number of rows between index + entries. The default is 10000 and the minimum + is 1000. + type: number + stripeSizeBytes: + description: The number of bytes in each stripe. + The default is 64 MiB and the minimum is + 8 MiB. + type: number + type: object + parquetSerDe: + description: Specifies converting data to the + Parquet format before storing it in Amazon S3. + For more information, see Apache Parquet. More + details below. + properties: + blockSizeBytes: + description: The Hadoop Distributed File System + (HDFS) block size. This is useful if you + intend to copy the data from Amazon S3 to + HDFS before querying. The default is 256 + MiB and the minimum is 64 MiB. Kinesis Data + Firehose uses this value for padding calculations. + type: number + compression: + description: The compression code to use over + data blocks. The possible values are UNCOMPRESSED, + SNAPPY, and GZIP, with the default being + SNAPPY. Use SNAPPY for higher decompression + speed. Use GZIP if the compression ratio + is more important than speed. + type: string + enableDictionaryCompression: + description: Indicates whether to enable dictionary + compression. + type: boolean + maxPaddingBytes: + description: The maximum amount of padding + to apply. This is useful if you intend to + copy the data from Amazon S3 to HDFS before + querying. The default is 0. + type: number + pageSizeBytes: + description: The Parquet page size. Column + chunks are divided into pages. A page is + conceptually an indivisible unit (in terms + of compression and encoding). The minimum + value is 64 KiB and the default is 1 MiB. + type: number + writerVersion: + description: Indicates the version of row + format to output. The possible values are + V1 and V2. The default is V1. + type: string + type: object + type: object + type: object + schemaConfiguration: + description: Specifies the AWS Glue Data Catalog table + that contains the column information. See schema_configuration + block below for details. + properties: + catalogId: + description: The ID of the AWS Glue Data Catalog. + If you don't supply this, the AWS account ID is + used by default. + type: string + databaseName: + description: Specifies the name of the AWS Glue database + that contains the schema for the output data. + type: string + roleArn: + description: The ARN of the IAM role to be assumed + by Firehose for calling the Amazon EC2 configuration + API and for creating network interfaces. Make sure + role has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate + roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate + roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tableName: + description: Specifies the AWS Glue table that contains + the column information that constitutes your data + schema. + type: string + tableNameRef: + description: Reference to a CatalogTable in glue to + populate tableName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + tableNameSelector: + description: Selector for a CatalogTable in glue to + populate tableName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + versionId: + description: Specifies the table version for the output + data schema. Defaults to LATEST. + type: string + type: object + type: object + dynamicPartitioningConfiguration: + description: The configuration for dynamic partitioning. Required + when using dynamic partitioning. See dynamic_partitioning_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + retryDuration: + description: After an initial failure to deliver to Splunk, + the total amount of time, in seconds between 0 to 7200, + during which Firehose re-attempts delivery (including + the first attempt). After this time has elapsed, the + failed documents are written to Amazon S3. The default + value is 300s. There will be no retry if the value + is 0. + type: number + type: object + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket name. + For information about how to specify this prefix, see Custom + Prefixes for Amazon S3 Objects. + type: string + fileExtension: + description: The file extension to override the default file + extension (for example, .json). + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is automatically + used for delivered S3 files. You can specify an extra prefix + to be added in front of the time format prefix. Note that + if the prefix ends with a slash, it appears as a folder + in the S3 bucket + type: string + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + roleArn: + description: The ARN of the IAM role to be assumed by Firehose + for calling the Amazon EC2 configuration API and for creating + network interfaces. Make sure role has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3BackupConfiguration: + description: The configuration for backup in Amazon S3. Required + if s3_backup_mode is Enabled. Supports the same fields as + s3_configuration object. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + type: object + s3BackupMode: + description: The Amazon S3 backup mode. Valid values are + Disabled and Enabled. Default value is Disabled. + type: string + type: object + httpEndpointConfiguration: + description: Configuration options when destination is http_endpoint. + Requires the user to also specify an s3_configuration block. See + http_endpoint_configuration block below for details. + properties: + accessKeySecretRef: + description: The access key required for Kinesis Firehose + to authenticate with the HTTP endpoint selected as the destination. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds, before delivering it to the destination. + The default value is 300 (5 minutes). + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs, before delivering it to the destination. The default + value is 5. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + name: + description: The HTTP endpoint name. + type: string + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + requestConfiguration: + description: The request configuration. See request_configuration + block below for details. + properties: + commonAttributes: + description: Describes the metadata sent to the HTTP endpoint + destination. See common_attributes block below for details. + items: + properties: + name: + description: The name of the HTTP endpoint common + attribute. + type: string + value: + description: The value of the HTTP endpoint common + attribute. + type: string + type: object + type: array + contentEncoding: + description: Kinesis Data Firehose uses the content encoding + to compress the body of a request before sending the + request to the destination. Valid values are NONE and + GZIP. Default value is NONE. + type: string + type: object + retryDuration: + description: Total amount of seconds Firehose spends on retries. + This duration starts after the initial attempt fails, It + does not include the time periods during which Firehose + waits for acknowledgment from the specified destination + after each attempt. Valid values between 0 and 7200. Default + is 300. + type: number + roleArn: + description: Kinesis Data Firehose uses this IAM role for + all the permissions that the delivery stream needs. The + pattern needs to be arn:.*. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3BackupMode: + description: Defines how documents should be delivered to + Amazon S3. Valid values are FailedDataOnly and AllData. Default + value is FailedDataOnly. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + url: + description: The HTTP endpoint URL to which Kinesis Firehose + sends your data. + type: string + type: object + kinesisSourceConfiguration: + description: The stream and role Amazon Resource Names (ARNs) + for a Kinesis data stream used as the source for a delivery + stream. See kinesis_source_configuration block below for details. + properties: + kinesisStreamArn: + description: The kinesis stream used as the source of the + firehose delivery stream. + type: string + roleArn: + description: The ARN of the role that provides access to the + source Kinesis stream. + type: string + type: object + mskSourceConfiguration: + description: The configuration for the Amazon MSK cluster to be + used as the source for a delivery stream. See msk_source_configuration + block below for details. + properties: + authenticationConfiguration: + description: The authentication configuration of the Amazon + MSK cluster. See authentication_configuration block below + for details. + properties: + connectivity: + description: 'The type of connectivity used to access + the Amazon MSK cluster. Valid values: PUBLIC, PRIVATE.' + type: string + roleArn: + description: The ARN of the role used to access the Amazon + MSK cluster. + type: string + type: object + mskClusterArn: + description: The ARN of the Amazon MSK cluster. + type: string + topicName: + description: The topic name within the Amazon MSK cluster. + type: string + type: object + name: + description: A name to identify the stream. This is unique to + the AWS account and region the Stream is created in. When using + for WAF logging, name must be prefixed with aws-waf-logs-. See + AWS Documentation for more details. + type: string + opensearchConfiguration: + description: Configuration options when destination is opensearch. + See opensearch_configuration block below for details. + properties: + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 900, before delivering + it to the destination. The default value is 300s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs between 1 to 100, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + clusterEndpoint: + description: The endpoint to use when communicating with the + cluster. Conflicts with domain_arn. + type: string + documentIdOptions: + description: The method for setting up document ID. See [document_id_options + block] below for details. + properties: + defaultDocumentIdFormat: + description: 'The method for setting up document ID. Valid + values: FIREHOSE_DEFAULT, NO_DOCUMENT_ID.' + type: string + type: object + domainArn: + description: The ARN of the Amazon ES domain. The pattern + needs to be arn:.*. Conflicts with cluster_endpoint. + type: string + domainArnRef: + description: Reference to a Domain in opensearch to populate + domainArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + domainArnSelector: + description: Selector for a Domain in opensearch to populate + domainArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + indexName: + description: The OpenSearch index name. + type: string + indexRotationPeriod: + description: The OpenSearch index rotation period. Index + rotation appends a timestamp to the IndexName to facilitate + expiration of old data. Valid values are NoRotation, OneHour, + OneDay, OneWeek, and OneMonth. The default value is OneDay. + type: string + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: After an initial failure to deliver to Amazon + OpenSearch, the total amount of time, in seconds between + 0 to 7200, during which Firehose re-attempts delivery (including + the first attempt). After this time has elapsed, the failed + documents are written to Amazon S3. The default value is + 300s. There will be no retry if the value is 0. + type: number + roleArn: + description: The ARN of the IAM role to be assumed by Firehose + for calling the Amazon ES Configuration API and for indexing + documents. The IAM role must have permission for DescribeDomain, + DescribeDomains, and DescribeDomainConfig. The pattern + needs to be arn:.*. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3BackupMode: + description: Defines how documents should be delivered to + Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default + value is FailedDocumentsOnly. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + typeName: + description: The Elasticsearch type name with maximum length + of 100 characters. Types are deprecated in OpenSearch_1.1. + TypeName must be empty. + type: string + vpcConfig: + description: The VPC configuration for the delivery stream + to connect to OpenSearch associated with the VPC. See vpc_config + block below for details. + properties: + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: A list of security group IDs to associate + with Kinesis Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of subnet IDs to associate with Kinesis + Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + opensearchserverlessConfiguration: + description: Configuration options when destination is opensearchserverless. + See opensearchserverless_configuration block below for details. + properties: + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 900, before delivering + it to the destination. The default value is 300s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs between 1 to 100, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + collectionEndpoint: + description: The endpoint to use when communicating with the + collection in the Serverless offering for Amazon OpenSearch + Service. + type: string + collectionEndpointRef: + description: Reference to a Collection in opensearchserverless + to populate collectionEndpoint. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + collectionEndpointSelector: + description: Selector for a Collection in opensearchserverless + to populate collectionEndpoint. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + indexName: + description: The Serverless offering for Amazon OpenSearch + Service index name. + type: string + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: After an initial failure to deliver to the Serverless + offering for Amazon OpenSearch Service, the total amount + of time, in seconds between 0 to 7200, during which Kinesis + Data Firehose retries delivery (including the first attempt). After + this time has elapsed, the failed documents are written + to Amazon S3. The default value is 300s. There will be + no retry if the value is 0. + type: number + roleArn: + description: The Amazon Resource Name (ARN) of the IAM role + to be assumed by Kinesis Data Firehose for calling the Serverless + offering for Amazon OpenSearch Service Configuration API + and for indexing documents. The pattern needs to be arn:.*. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3BackupMode: + description: Defines how documents should be delivered to + Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default + value is FailedDocumentsOnly. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + vpcConfig: + description: The VPC configuration for the delivery stream + to connect to OpenSearch Serverless associated with the + VPC. See vpc_config block below for details. + properties: + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + securityGroupIds: + description: A list of security group IDs to associate + with Kinesis Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of subnet IDs to associate with Kinesis + Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + redshiftConfiguration: + description: Configuration options when destination is redshift. + Requires the user to also specify an s3_configuration block. + See redshift_configuration block below for details. + properties: + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + clusterJdbcurl: + description: The jdbcurl of the redshift cluster. + type: string + copyOptions: + description: Copy options for copying the data from the s3 + intermediate bucket into redshift, for example to change + the default delimiter. For valid values, see the AWS documentation + type: string + dataTableColumns: + description: The data table columns that will be targeted + by the copy command. + type: string + dataTableName: + description: The name of the table in the redshift cluster + that the s3 bucket will copy to. + type: string + passwordSecretRef: + description: The password for the username above. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: The length of time during which Firehose retries + delivery after a failure, starting from the initial request + and including the first attempt. The default value is 3600 + seconds (60 minutes). Firehose does not retry if the value + of DurationInSeconds is 0 (zero) or if the first delivery + attempt takes longer than the current value. + type: number + roleArn: + description: The arn of the role the stream assumes. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3BackupConfiguration: + description: The configuration for backup in Amazon S3. Required + if s3_backup_mode is Enabled. Supports the same fields as + s3_configuration object. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + s3BackupMode: + description: The Amazon S3 backup mode. Valid values are + Disabled and Enabled. Default value is Disabled. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration below + for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + username: + description: The username that the firehose delivery stream + will assume. It is strongly recommended that the username + and password provided is used exclusively for Amazon Kinesis + Firehose purposes, and that the permissions for the account + are restricted for Amazon Redshift INSERT permissions. + type: string + required: + - passwordSecretRef + type: object + serverSideEncryption: + description: Encrypt at rest options. See server_side_encryption + block below for details. + properties: + enabled: + description: Whether to enable encryption at rest. Default + is false. + type: boolean + keyArn: + description: Amazon Resource Name (ARN) of the encryption + key. Required when key_type is CUSTOMER_MANAGED_CMK. + type: string + keyType: + description: Type of encryption key. Default is AWS_OWNED_CMK. + Valid values are AWS_OWNED_CMK and CUSTOMER_MANAGED_CMK + type: string + type: object + snowflakeConfiguration: + description: Configuration options when destination is snowflake. + See snowflake_configuration block below for details. + properties: + accountUrl: + description: 'The URL of the Snowflake account. Format: https://[account_identifier].snowflakecomputing.com.' + type: string + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + contentColumnName: + description: The name of the content column. + type: string + dataLoadingOption: + description: The data loading option. + type: string + database: + description: The Snowflake database name. + type: string + keyPassphraseSecretRef: + description: The passphrase for the private key. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + metadataColumnName: + description: The name of the metadata column. + type: string + privateKeySecretRef: + description: The private key for authentication. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + processingConfiguration: + description: The processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: After an initial failure to deliver to Snowflake, + the total amount of time, in seconds between 0 to 7200, + during which Firehose re-attempts delivery (including the + first attempt). After this time has elapsed, the failed + documents are written to Amazon S3. The default value is + 60s. There will be no retry if the value is 0. + type: number + roleArn: + description: The ARN of the IAM role. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3BackupMode: + description: The S3 backup mode. + type: string + s3Configuration: + description: The S3 configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + schema: + description: The Snowflake schema name. + type: string + snowflakeRoleConfiguration: + description: The configuration for Snowflake role. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + snowflakeRole: + description: The Snowflake role. + type: string + type: object + snowflakeVpcConfiguration: + description: The VPC configuration for Snowflake. + properties: + privateLinkVpceId: + description: The VPCE ID for Firehose to privately connect + with Snowflake. + type: string + type: object + table: + description: The Snowflake table name. + type: string + user: + description: The user for authentication. + type: string + required: + - privateKeySecretRef + type: object + splunkConfiguration: + description: Configuration options when destination is splunk. + See splunk_configuration block below for details. + properties: + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering it + to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + hecAcknowledgmentTimeout: + description: The amount of time, in seconds between 180 and + 600, that Kinesis Firehose waits to receive an acknowledgment + from Splunk after it sends it data. + type: number + hecEndpoint: + description: The HTTP Event Collector (HEC) endpoint to which + Kinesis Firehose sends your data. + type: string + hecEndpointType: + description: The HEC endpoint type. Valid values are Raw or + Event. The default value is Raw. + type: string + hecTokenSecretRef: + description: The GUID that you obtain from your Splunk cluster + when you create a new HEC endpoint. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: After an initial failure to deliver to Splunk, + the total amount of time, in seconds between 0 to 7200, + during which Firehose re-attempts delivery (including the + first attempt). After this time has elapsed, the failed + documents are written to Amazon S3. The default value is + 300s. There will be no retry if the value is 0. + type: number + s3BackupMode: + description: Defines how documents should be delivered to + Amazon S3. Valid values are FailedEventsOnly and AllEvents. Default + value is FailedEventsOnly. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + required: + - hecTokenSecretRef + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + versionId: + description: Specifies the table version for the output data schema. + Defaults to LATEST. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.destination is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.destination) + || (has(self.initProvider) && has(self.initProvider.destination))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: DeliveryStreamStatus defines the observed state of DeliveryStream. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) specifying the Stream + type: string + destination: + description: – This is the destination to where the data is delivered. + The only options are s3 (Deprecated, use extended_s3 instead), + extended_s3, redshift, elasticsearch, splunk, http_endpoint, + opensearch, opensearchserverless and snowflake. + type: string + destinationId: + type: string + elasticsearchConfiguration: + description: Configuration options when destination is elasticsearch. + See elasticsearch_configuration block below for details. + properties: + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 900, before delivering + it to the destination. The default value is 300s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs between 1 to 100, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + clusterEndpoint: + description: The endpoint to use when communicating with the + cluster. Conflicts with domain_arn. + type: string + domainArn: + description: The ARN of the Amazon ES domain. The pattern + needs to be arn:.*. Conflicts with cluster_endpoint. + type: string + indexName: + description: The Elasticsearch index name. + type: string + indexRotationPeriod: + description: The Elasticsearch index rotation period. Index + rotation appends a timestamp to the IndexName to facilitate + expiration of old data. Valid values are NoRotation, OneHour, + OneDay, OneWeek, and OneMonth. The default value is OneDay. + type: string + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: After an initial failure to deliver to Amazon + Elasticsearch, the total amount of time, in seconds between + 0 to 7200, during which Firehose re-attempts delivery (including + the first attempt). After this time has elapsed, the failed + documents are written to Amazon S3. The default value is + 300s. There will be no retry if the value is 0. + type: number + roleArn: + description: The ARN of the IAM role to be assumed by Firehose + for calling the Amazon ES Configuration API and for indexing + documents. The IAM role must have permission for DescribeElasticsearchDomain, + DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig. The + pattern needs to be arn:.*. + type: string + s3BackupMode: + description: Defines how documents should be delivered to + Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default + value is FailedDocumentsOnly. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + type: object + typeName: + description: The Elasticsearch type name with maximum length + of 100 characters. + type: string + vpcConfig: + description: The VPC configuration for the delivery stream + to connect to Elastic Search associated with the VPC. See + vpc_config block below for details. + properties: + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + securityGroupIds: + description: A list of security group IDs to associate + with Kinesis Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of subnet IDs to associate with Kinesis + Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcId: + type: string + type: object + type: object + extendedS3Configuration: + description: Enhanced configuration options for the s3 destination. + See extended_s3_configuration block below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering it + to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values are + GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + customTimeZone: + description: The time zone you prefer. Valid values are UTC + or a non-3-letter IANA time zones (for example, America/Los_Angeles). + Default value is UTC. + type: string + dataFormatConversionConfiguration: + description: Nested argument for the serializer, deserializer, + and schema for converting data from the JSON format to the + Parquet or ORC format before writing it to Amazon S3. See + data_format_conversion_configuration block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + inputFormatConfiguration: + description: Specifies the deserializer that you want + Kinesis Data Firehose to use to convert the format of + your data from JSON. See input_format_configuration + block below for details. + properties: + deserializer: + description: Specifies which deserializer to use. + You can choose either the Apache Hive JSON SerDe + or the OpenX JSON SerDe. See deserializer block + below for details. + properties: + hiveJsonSerDe: + description: Specifies the native Hive / HCatalog + JsonSerDe. More details below. See hive_json_ser_de + block below for details. + properties: + timestampFormats: + description: A list of how you want Kinesis + Data Firehose to parse the date and time + stamps that may be present in your input + data JSON. To specify these format strings, + follow the pattern syntax of JodaTime's + DateTimeFormat format strings. For more + information, see Class DateTimeFormat. You + can also use the special value millis to + parse time stamps in epoch milliseconds. + If you don't specify a format, Kinesis Data + Firehose uses java.sql.Timestamp::valueOf + by default. + items: + type: string + type: array + type: object + openXJsonSerDe: + description: Specifies the OpenX SerDe. See open_x_json_ser_de + block below for details. + properties: + caseInsensitive: + description: When set to true, which is the + default, Kinesis Data Firehose converts + JSON keys to lowercase before deserializing + them. + type: boolean + columnToJsonKeyMappings: + additionalProperties: + type: string + description: A map of column names to JSON + keys that aren't identical to the column + names. This is useful when the JSON contains + keys that are Hive keywords. For example, + timestamp is a Hive keyword. If you have + a JSON key named timestamp, set this parameter + to { ts = "timestamp" } to map this key + to a column named ts. + type: object + x-kubernetes-map-type: granular + convertDotsInJsonKeysToUnderscores: + description: When set to true, specifies that + the names of the keys include dots and that + you want Kinesis Data Firehose to replace + them with underscores. This is useful because + Apache Hive does not allow dots in column + names. For example, if the JSON contains + a key whose name is "a.b", you can define + the column name to be "a_b" when using this + option. Defaults to false. + type: boolean + type: object + type: object + type: object + outputFormatConfiguration: + description: Specifies the serializer that you want Kinesis + Data Firehose to use to convert the format of your data + to the Parquet or ORC format. See output_format_configuration + block below for details. + properties: + serializer: + description: Specifies which serializer to use. You + can choose either the ORC SerDe or the Parquet SerDe. + See serializer block below for details. + properties: + orcSerDe: + description: Specifies converting data to the + ORC format before storing it in Amazon S3. For + more information, see Apache ORC. See orc_ser_de + block below for details. + properties: + blockSizeBytes: + description: The Hadoop Distributed File System + (HDFS) block size. This is useful if you + intend to copy the data from Amazon S3 to + HDFS before querying. The default is 256 + MiB and the minimum is 64 MiB. Kinesis Data + Firehose uses this value for padding calculations. + type: number + bloomFilterColumns: + description: A list of column names for which + you want Kinesis Data Firehose to create + bloom filters. + items: + type: string + type: array + bloomFilterFalsePositiveProbability: + description: The Bloom filter false positive + probability (FPP). The lower the FPP, the + bigger the Bloom filter. The default value + is 0.05, the minimum is 0, and the maximum + is 1. + type: number + compression: + description: The compression code to use over + data blocks. The possible values are UNCOMPRESSED, + SNAPPY, and GZIP, with the default being + SNAPPY. Use SNAPPY for higher decompression + speed. Use GZIP if the compression ratio + is more important than speed. + type: string + dictionaryKeyThreshold: + description: A float that represents the fraction + of the total number of non-null rows. To + turn off dictionary encoding, set this fraction + to a number that is less than the number + of distinct keys in a dictionary. To always + use dictionary encoding, set this threshold + to 1. + type: number + enablePadding: + description: Set this to true to indicate + that you want stripes to be padded to the + HDFS block boundaries. This is useful if + you intend to copy the data from Amazon + S3 to HDFS before querying. The default + is false. + type: boolean + formatVersion: + description: The version of the file to write. + The possible values are V0_11 and V0_12. + The default is V0_12. + type: string + paddingTolerance: + description: A float between 0 and 1 that + defines the tolerance for block padding + as a decimal fraction of stripe size. The + default value is 0.05, which means 5 percent + of stripe size. For the default values of + 64 MiB ORC stripes and 256 MiB HDFS blocks, + the default block padding tolerance of 5 + percent reserves a maximum of 3.2 MiB for + padding within the 256 MiB block. In such + a case, if the available size within the + block is more than 3.2 MiB, a new, smaller + stripe is inserted to fit within that space. + This ensures that no stripe crosses block + boundaries and causes remote reads within + a node-local task. Kinesis Data Firehose + ignores this parameter when enable_padding + is false. + type: number + rowIndexStride: + description: The number of rows between index + entries. The default is 10000 and the minimum + is 1000. + type: number + stripeSizeBytes: + description: The number of bytes in each stripe. + The default is 64 MiB and the minimum is + 8 MiB. + type: number + type: object + parquetSerDe: + description: Specifies converting data to the + Parquet format before storing it in Amazon S3. + For more information, see Apache Parquet. More + details below. + properties: + blockSizeBytes: + description: The Hadoop Distributed File System + (HDFS) block size. This is useful if you + intend to copy the data from Amazon S3 to + HDFS before querying. The default is 256 + MiB and the minimum is 64 MiB. Kinesis Data + Firehose uses this value for padding calculations. + type: number + compression: + description: The compression code to use over + data blocks. The possible values are UNCOMPRESSED, + SNAPPY, and GZIP, with the default being + SNAPPY. Use SNAPPY for higher decompression + speed. Use GZIP if the compression ratio + is more important than speed. + type: string + enableDictionaryCompression: + description: Indicates whether to enable dictionary + compression. + type: boolean + maxPaddingBytes: + description: The maximum amount of padding + to apply. This is useful if you intend to + copy the data from Amazon S3 to HDFS before + querying. The default is 0. + type: number + pageSizeBytes: + description: The Parquet page size. Column + chunks are divided into pages. A page is + conceptually an indivisible unit (in terms + of compression and encoding). The minimum + value is 64 KiB and the default is 1 MiB. + type: number + writerVersion: + description: Indicates the version of row + format to output. The possible values are + V1 and V2. The default is V1. + type: string + type: object + type: object + type: object + schemaConfiguration: + description: Specifies the AWS Glue Data Catalog table + that contains the column information. See schema_configuration + block below for details. + properties: + catalogId: + description: The ID of the AWS Glue Data Catalog. + If you don't supply this, the AWS account ID is + used by default. + type: string + databaseName: + description: Specifies the name of the AWS Glue database + that contains the schema for the output data. + type: string + region: + description: If you don't specify an AWS Region, the + default is the current region. + type: string + roleArn: + description: The ARN of the IAM role to be assumed + by Firehose for calling the Amazon EC2 configuration + API and for creating network interfaces. Make sure + role has necessary IAM permissions + type: string + tableName: + description: Specifies the AWS Glue table that contains + the column information that constitutes your data + schema. + type: string + versionId: + description: Specifies the table version for the output + data schema. Defaults to LATEST. + type: string + type: object + type: object + dynamicPartitioningConfiguration: + description: The configuration for dynamic partitioning. Required + when using dynamic partitioning. See dynamic_partitioning_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + retryDuration: + description: After an initial failure to deliver to Splunk, + the total amount of time, in seconds between 0 to 7200, + during which Firehose re-attempts delivery (including + the first attempt). After this time has elapsed, the + failed documents are written to Amazon S3. The default + value is 300s. There will be no retry if the value + is 0. + type: number + type: object + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket name. + For information about how to specify this prefix, see Custom + Prefixes for Amazon S3 Objects. + type: string + fileExtension: + description: The file extension to override the default file + extension (for example, .json). + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is automatically + used for delivered S3 files. You can specify an extra prefix + to be added in front of the time format prefix. Note that + if the prefix ends with a slash, it appears as a folder + in the S3 bucket + type: string + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + roleArn: + description: The ARN of the IAM role to be assumed by Firehose + for calling the Amazon EC2 configuration API and for creating + network interfaces. Make sure role has necessary IAM permissions + type: string + s3BackupConfiguration: + description: The configuration for backup in Amazon S3. Required + if s3_backup_mode is Enabled. Supports the same fields as + s3_configuration object. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + type: object + s3BackupMode: + description: The Amazon S3 backup mode. Valid values are + Disabled and Enabled. Default value is Disabled. + type: string + type: object + httpEndpointConfiguration: + description: Configuration options when destination is http_endpoint. + Requires the user to also specify an s3_configuration block. See + http_endpoint_configuration block below for details. + properties: + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds, before delivering it to the destination. + The default value is 300 (5 minutes). + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs, before delivering it to the destination. The default + value is 5. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + name: + description: The HTTP endpoint name. + type: string + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + requestConfiguration: + description: The request configuration. See request_configuration + block below for details. + properties: + commonAttributes: + description: Describes the metadata sent to the HTTP endpoint + destination. See common_attributes block below for details. + items: + properties: + name: + description: The name of the HTTP endpoint common + attribute. + type: string + value: + description: The value of the HTTP endpoint common + attribute. + type: string + type: object + type: array + contentEncoding: + description: Kinesis Data Firehose uses the content encoding + to compress the body of a request before sending the + request to the destination. Valid values are NONE and + GZIP. Default value is NONE. + type: string + type: object + retryDuration: + description: Total amount of seconds Firehose spends on retries. + This duration starts after the initial attempt fails, It + does not include the time periods during which Firehose + waits for acknowledgment from the specified destination + after each attempt. Valid values between 0 and 7200. Default + is 300. + type: number + roleArn: + description: Kinesis Data Firehose uses this IAM role for + all the permissions that the delivery stream needs. The + pattern needs to be arn:.*. + type: string + s3BackupMode: + description: Defines how documents should be delivered to + Amazon S3. Valid values are FailedDataOnly and AllData. Default + value is FailedDataOnly. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + type: object + url: + description: The HTTP endpoint URL to which Kinesis Firehose + sends your data. + type: string + type: object + id: + type: string + kinesisSourceConfiguration: + description: The stream and role Amazon Resource Names (ARNs) + for a Kinesis data stream used as the source for a delivery + stream. See kinesis_source_configuration block below for details. + properties: + kinesisStreamArn: + description: The kinesis stream used as the source of the + firehose delivery stream. + type: string + roleArn: + description: The ARN of the role that provides access to the + source Kinesis stream. + type: string + type: object + mskSourceConfiguration: + description: The configuration for the Amazon MSK cluster to be + used as the source for a delivery stream. See msk_source_configuration + block below for details. + properties: + authenticationConfiguration: + description: The authentication configuration of the Amazon + MSK cluster. See authentication_configuration block below + for details. + properties: + connectivity: + description: 'The type of connectivity used to access + the Amazon MSK cluster. Valid values: PUBLIC, PRIVATE.' + type: string + roleArn: + description: The ARN of the role used to access the Amazon + MSK cluster. + type: string + type: object + mskClusterArn: + description: The ARN of the Amazon MSK cluster. + type: string + topicName: + description: The topic name within the Amazon MSK cluster. + type: string + type: object + name: + description: A name to identify the stream. This is unique to + the AWS account and region the Stream is created in. When using + for WAF logging, name must be prefixed with aws-waf-logs-. See + AWS Documentation for more details. + type: string + opensearchConfiguration: + description: Configuration options when destination is opensearch. + See opensearch_configuration block below for details. + properties: + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 900, before delivering + it to the destination. The default value is 300s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs between 1 to 100, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + clusterEndpoint: + description: The endpoint to use when communicating with the + cluster. Conflicts with domain_arn. + type: string + documentIdOptions: + description: The method for setting up document ID. See [document_id_options + block] below for details. + properties: + defaultDocumentIdFormat: + description: 'The method for setting up document ID. Valid + values: FIREHOSE_DEFAULT, NO_DOCUMENT_ID.' + type: string + type: object + domainArn: + description: The ARN of the Amazon ES domain. The pattern + needs to be arn:.*. Conflicts with cluster_endpoint. + type: string + indexName: + description: The OpenSearch index name. + type: string + indexRotationPeriod: + description: The OpenSearch index rotation period. Index + rotation appends a timestamp to the IndexName to facilitate + expiration of old data. Valid values are NoRotation, OneHour, + OneDay, OneWeek, and OneMonth. The default value is OneDay. + type: string + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: After an initial failure to deliver to Amazon + OpenSearch, the total amount of time, in seconds between + 0 to 7200, during which Firehose re-attempts delivery (including + the first attempt). After this time has elapsed, the failed + documents are written to Amazon S3. The default value is + 300s. There will be no retry if the value is 0. + type: number + roleArn: + description: The ARN of the IAM role to be assumed by Firehose + for calling the Amazon ES Configuration API and for indexing + documents. The IAM role must have permission for DescribeDomain, + DescribeDomains, and DescribeDomainConfig. The pattern + needs to be arn:.*. + type: string + s3BackupMode: + description: Defines how documents should be delivered to + Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default + value is FailedDocumentsOnly. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + type: object + typeName: + description: The Elasticsearch type name with maximum length + of 100 characters. Types are deprecated in OpenSearch_1.1. + TypeName must be empty. + type: string + vpcConfig: + description: The VPC configuration for the delivery stream + to connect to OpenSearch associated with the VPC. See vpc_config + block below for details. + properties: + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + securityGroupIds: + description: A list of security group IDs to associate + with Kinesis Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of subnet IDs to associate with Kinesis + Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcId: + type: string + type: object + type: object + opensearchserverlessConfiguration: + description: Configuration options when destination is opensearchserverless. + See opensearchserverless_configuration block below for details. + properties: + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 900, before delivering + it to the destination. The default value is 300s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs between 1 to 100, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + collectionEndpoint: + description: The endpoint to use when communicating with the + collection in the Serverless offering for Amazon OpenSearch + Service. + type: string + indexName: + description: The Serverless offering for Amazon OpenSearch + Service index name. + type: string + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: After an initial failure to deliver to the Serverless + offering for Amazon OpenSearch Service, the total amount + of time, in seconds between 0 to 7200, during which Kinesis + Data Firehose retries delivery (including the first attempt). After + this time has elapsed, the failed documents are written + to Amazon S3. The default value is 300s. There will be + no retry if the value is 0. + type: number + roleArn: + description: The Amazon Resource Name (ARN) of the IAM role + to be assumed by Kinesis Data Firehose for calling the Serverless + offering for Amazon OpenSearch Service Configuration API + and for indexing documents. The pattern needs to be arn:.*. + type: string + s3BackupMode: + description: Defines how documents should be delivered to + Amazon S3. Valid values are FailedDocumentsOnly and AllDocuments. Default + value is FailedDocumentsOnly. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + type: object + vpcConfig: + description: The VPC configuration for the delivery stream + to connect to OpenSearch Serverless associated with the + VPC. See vpc_config block below for details. + properties: + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + securityGroupIds: + description: A list of security group IDs to associate + with Kinesis Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of subnet IDs to associate with Kinesis + Firehose. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcId: + type: string + type: object + type: object + redshiftConfiguration: + description: Configuration options when destination is redshift. + Requires the user to also specify an s3_configuration block. + See redshift_configuration block below for details. + properties: + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + clusterJdbcurl: + description: The jdbcurl of the redshift cluster. + type: string + copyOptions: + description: Copy options for copying the data from the s3 + intermediate bucket into redshift, for example to change + the default delimiter. For valid values, see the AWS documentation + type: string + dataTableColumns: + description: The data table columns that will be targeted + by the copy command. + type: string + dataTableName: + description: The name of the table in the redshift cluster + that the s3 bucket will copy to. + type: string + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: The length of time during which Firehose retries + delivery after a failure, starting from the initial request + and including the first attempt. The default value is 3600 + seconds (60 minutes). Firehose does not retry if the value + of DurationInSeconds is 0 (zero) or if the first delivery + attempt takes longer than the current value. + type: number + roleArn: + description: The arn of the role the stream assumes. + type: string + s3BackupConfiguration: + description: The configuration for backup in Amazon S3. Required + if s3_backup_mode is Enabled. Supports the same fields as + s3_configuration object. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + type: object + s3BackupMode: + description: The Amazon S3 backup mode. Valid values are + Disabled and Enabled. Default value is Disabled. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration below + for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + type: object + username: + description: The username that the firehose delivery stream + will assume. It is strongly recommended that the username + and password provided is used exclusively for Amazon Kinesis + Firehose purposes, and that the permissions for the account + are restricted for Amazon Redshift INSERT permissions. + type: string + type: object + serverSideEncryption: + description: Encrypt at rest options. See server_side_encryption + block below for details. + properties: + enabled: + description: Whether to enable encryption at rest. Default + is false. + type: boolean + keyArn: + description: Amazon Resource Name (ARN) of the encryption + key. Required when key_type is CUSTOMER_MANAGED_CMK. + type: string + keyType: + description: Type of encryption key. Default is AWS_OWNED_CMK. + Valid values are AWS_OWNED_CMK and CUSTOMER_MANAGED_CMK + type: string + type: object + snowflakeConfiguration: + description: Configuration options when destination is snowflake. + See snowflake_configuration block below for details. + properties: + accountUrl: + description: 'The URL of the Snowflake account. Format: https://[account_identifier].snowflakecomputing.com.' + type: string + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + contentColumnName: + description: The name of the content column. + type: string + dataLoadingOption: + description: The data loading option. + type: string + database: + description: The Snowflake database name. + type: string + metadataColumnName: + description: The name of the metadata column. + type: string + processingConfiguration: + description: The processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: After an initial failure to deliver to Snowflake, + the total amount of time, in seconds between 0 to 7200, + during which Firehose re-attempts delivery (including the + first attempt). After this time has elapsed, the failed + documents are written to Amazon S3. The default value is + 60s. There will be no retry if the value is 0. + type: number + roleArn: + description: The ARN of the IAM role. + type: string + s3BackupMode: + description: The S3 backup mode. + type: string + s3Configuration: + description: The S3 configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + type: object + schema: + description: The Snowflake schema name. + type: string + snowflakeRoleConfiguration: + description: The configuration for Snowflake role. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + snowflakeRole: + description: The Snowflake role. + type: string + type: object + snowflakeVpcConfiguration: + description: The VPC configuration for Snowflake. + properties: + privateLinkVpceId: + description: The VPCE ID for Firehose to privately connect + with Snowflake. + type: string + type: object + table: + description: The Snowflake table name. + type: string + user: + description: The user for authentication. + type: string + type: object + splunkConfiguration: + description: Configuration options when destination is splunk. + See splunk_configuration block below for details. + properties: + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering it + to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, in + MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. This + value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + hecAcknowledgmentTimeout: + description: The amount of time, in seconds between 180 and + 600, that Kinesis Firehose waits to receive an acknowledgment + from Splunk after it sends it data. + type: number + hecEndpoint: + description: The HTTP Event Collector (HEC) endpoint to which + Kinesis Firehose sends your data. + type: string + hecEndpointType: + description: The HEC endpoint type. Valid values are Raw or + Event. The default value is Raw. + type: string + processingConfiguration: + description: The data processing configuration. See processing_configuration + block below for details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + processors: + description: Specifies the data processors as multiple + blocks. See processors block below for details. + items: + properties: + parameters: + description: Specifies the processor parameters + as multiple blocks. See parameters block below + for details. + items: + properties: + parameterName: + description: 'Parameter name. Valid Values: + LambdaArn, NumberOfRetries, MetadataExtractionQuery, + JsonParsingEngine, RoleArn, BufferSizeInMBs, + BufferIntervalInSeconds, SubRecordType, + Delimiter. Validation is done against AWS + SDK constants; so that values not explicitly + listed may also work.' + type: string + parameterValue: + description: Parameter value. Must be between + 1 and 512 length (inclusive). When providing + a Lambda ARN, you should specify the resource + version as well. + type: string + type: object + type: array + type: + description: 'The type of processor. Valid Values: + RecordDeAggregation, Lambda, MetadataExtraction, + AppendDelimiterToRecord. Validation is done against + AWS SDK constants; so that values not explicitly + listed may also work.' + type: string + type: object + type: array + type: object + retryDuration: + description: After an initial failure to deliver to Splunk, + the total amount of time, in seconds between 0 to 7200, + during which Firehose re-attempts delivery (including the + first attempt). After this time has elapsed, the failed + documents are written to Amazon S3. The default value is + 300s. There will be no retry if the value is 0. + type: number + s3BackupMode: + description: Defines how documents should be delivered to + Amazon S3. Valid values are FailedEventsOnly and AllEvents. Default + value is FailedEventsOnly. + type: string + s3Configuration: + description: The S3 Configuration. See s3_configuration block + below for details. + properties: + bucketArn: + description: The ARN of the S3 bucket + type: string + bufferingInterval: + description: Buffer incoming data for the specified period + of time, in seconds between 0 to 60, before delivering + it to the destination. The default value is 60s. + type: number + bufferingSize: + description: Buffer incoming data to the specified size, + in MBs between 1 to 5, before delivering it to the destination. The + default value is 5MB. + type: number + cloudwatchLoggingOptions: + description: The CloudWatch Logging Options for the delivery + stream. See cloudwatch_logging_options block below for + details. + properties: + enabled: + description: Enables or disables the logging. Defaults + to false. + type: boolean + logGroupName: + description: The CloudWatch group name for logging. + This value is required if enabled is true. + type: string + logStreamName: + description: The CloudWatch log stream name for logging. + This value is required if enabled is true. + type: string + type: object + compressionFormat: + description: The compression format. If no value is specified, + the default is UNCOMPRESSED. Other supported values + are GZIP, ZIP, Snappy, & HADOOP_SNAPPY. + type: string + errorOutputPrefix: + description: Prefix added to failed records before writing + them to S3. Not currently supported for redshift destination. + This prefix appears immediately following the bucket + name. For information about how to specify this prefix, + see Custom Prefixes for Amazon S3 Objects. + type: string + kmsKeyArn: + description: |- + Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will + be used. + type: string + prefix: + description: The "YYYY/MM/DD/HH" time format prefix is + automatically used for delivered S3 files. You can specify + an extra prefix to be added in front of the time format + prefix. Note that if the prefix ends with a slash, it + appears as a folder in the S3 bucket + type: string + roleArn: + description: The ARN of the IAM role to be assumed by + Firehose for calling the Amazon EC2 configuration API + and for creating network interfaces. Make sure role + has necessary IAM permissions + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + versionId: + description: Specifies the table version for the output data schema. + Defaults to LATEST. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/fis.aws.upbound.io_experimenttemplates.yaml b/package/crds/fis.aws.upbound.io_experimenttemplates.yaml index 3d04078ea6..74a1c5f83c 100644 --- a/package/crds/fis.aws.upbound.io_experimenttemplates.yaml +++ b/package/crds/fis.aws.upbound.io_experimenttemplates.yaml @@ -1025,3 +1025,986 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ExperimentTemplate is the Schema for the ExperimentTemplates + API. Provides an FIS Experiment Template. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExperimentTemplateSpec defines the desired state of ExperimentTemplate + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + action: + description: Action to be performed during an experiment. See + below. + items: + properties: + actionId: + description: ID of the action. To find out what actions + are supported see AWS FIS actions reference. + type: string + description: + description: Description of the action. + type: string + name: + description: Friendly name of the action. + type: string + parameter: + description: Parameter(s) for the action, if applicable. + See below. + items: + properties: + key: + description: Parameter name. + type: string + value: + description: Parameter value. + type: string + type: object + type: array + startAfter: + description: Set of action names that must complete before + this action can be executed. + items: + type: string + type: array + x-kubernetes-list-type: set + target: + description: Action's target, if applicable. See below. + properties: + key: + description: Tag key. + type: string + value: + description: Target name, referencing a corresponding + target. + type: string + type: object + type: object + type: array + description: + description: Description for the experiment template. + type: string + logConfiguration: + description: The configuration for experiment logging. See below. + properties: + cloudwatchLogsConfiguration: + description: The configuration for experiment logging to Amazon + CloudWatch Logs. See below. + properties: + logGroupArn: + description: The Amazon Resource Name (ARN) of the destination + Amazon CloudWatch Logs log group. + type: string + type: object + logSchemaVersion: + description: The schema version. See documentation for the + list of schema versions. + type: number + s3Configuration: + description: The configuration for experiment logging to Amazon + S3. See below. + properties: + bucketName: + description: The name of the destination bucket. + type: string + prefix: + description: The bucket prefix. + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleArn: + description: ARN of an IAM role that grants the AWS FIS service + permission to perform service actions on your behalf. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + stopCondition: + description: When an ongoing experiment should be stopped. See + below. + items: + properties: + source: + description: Source of the condition. One of none, aws:cloudwatch:alarm. + type: string + value: + description: ARN of the CloudWatch alarm. Required if the + source is a CloudWatch alarm. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + target: + description: Target of an action. See below. + items: + properties: + filter: + description: Filter(s) for the target. Filters can be used + to select resources based on specific attributes returned + by the respective describe action of the resource type. + For more information, see Targets for AWS FIS. See below. + items: + properties: + path: + description: Attribute path for the filter. + type: string + values: + description: Set of attribute values for the filter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + name: + description: Friendly name given to the target. + type: string + parameters: + additionalProperties: + type: string + description: The resource type parameters. + type: object + x-kubernetes-map-type: granular + resourceArns: + description: Set of ARNs of the resources to target with + an action. Conflicts with resource_tag. + items: + type: string + type: array + x-kubernetes-list-type: set + resourceTag: + description: Tag(s) the resources need to have to be considered + a valid target for an action. Conflicts with resource_arns. + See below. + items: + properties: + key: + description: Tag key. + type: string + value: + description: Tag value. + type: string + type: object + type: array + resourceType: + description: AWS resource type. The resource type must be + supported for the specified action. To find out what resource + types are supported, see Targets for AWS FIS. + type: string + selectionMode: + description: Scopes the identified resources. Valid values + are ALL (all identified resources), COUNT(n) (randomly + select n of the identified resources), PERCENT(n) (randomly + select n percent of the identified resources). + type: string + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + action: + description: Action to be performed during an experiment. See + below. + items: + properties: + actionId: + description: ID of the action. To find out what actions + are supported see AWS FIS actions reference. + type: string + description: + description: Description of the action. + type: string + name: + description: Friendly name of the action. + type: string + parameter: + description: Parameter(s) for the action, if applicable. + See below. + items: + properties: + key: + description: Parameter name. + type: string + value: + description: Parameter value. + type: string + type: object + type: array + startAfter: + description: Set of action names that must complete before + this action can be executed. + items: + type: string + type: array + x-kubernetes-list-type: set + target: + description: Action's target, if applicable. See below. + properties: + key: + description: Tag key. + type: string + value: + description: Target name, referencing a corresponding + target. + type: string + type: object + type: object + type: array + description: + description: Description for the experiment template. + type: string + logConfiguration: + description: The configuration for experiment logging. See below. + properties: + cloudwatchLogsConfiguration: + description: The configuration for experiment logging to Amazon + CloudWatch Logs. See below. + properties: + logGroupArn: + description: The Amazon Resource Name (ARN) of the destination + Amazon CloudWatch Logs log group. + type: string + type: object + logSchemaVersion: + description: The schema version. See documentation for the + list of schema versions. + type: number + s3Configuration: + description: The configuration for experiment logging to Amazon + S3. See below. + properties: + bucketName: + description: The name of the destination bucket. + type: string + prefix: + description: The bucket prefix. + type: string + type: object + type: object + roleArn: + description: ARN of an IAM role that grants the AWS FIS service + permission to perform service actions on your behalf. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + stopCondition: + description: When an ongoing experiment should be stopped. See + below. + items: + properties: + source: + description: Source of the condition. One of none, aws:cloudwatch:alarm. + type: string + value: + description: ARN of the CloudWatch alarm. Required if the + source is a CloudWatch alarm. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + target: + description: Target of an action. See below. + items: + properties: + filter: + description: Filter(s) for the target. Filters can be used + to select resources based on specific attributes returned + by the respective describe action of the resource type. + For more information, see Targets for AWS FIS. See below. + items: + properties: + path: + description: Attribute path for the filter. + type: string + values: + description: Set of attribute values for the filter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + name: + description: Friendly name given to the target. + type: string + parameters: + additionalProperties: + type: string + description: The resource type parameters. + type: object + x-kubernetes-map-type: granular + resourceArns: + description: Set of ARNs of the resources to target with + an action. Conflicts with resource_tag. + items: + type: string + type: array + x-kubernetes-list-type: set + resourceTag: + description: Tag(s) the resources need to have to be considered + a valid target for an action. Conflicts with resource_arns. + See below. + items: + properties: + key: + description: Tag key. + type: string + value: + description: Tag value. + type: string + type: object + type: array + resourceType: + description: AWS resource type. The resource type must be + supported for the specified action. To find out what resource + types are supported, see Targets for AWS FIS. + type: string + selectionMode: + description: Scopes the identified resources. Valid values + are ALL (all identified resources), COUNT(n) (randomly + select n of the identified resources), PERCENT(n) (randomly + select n percent of the identified resources). + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.action is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.action) + || (has(self.initProvider) && has(self.initProvider.action))' + - message: spec.forProvider.description is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.description) + || (has(self.initProvider) && has(self.initProvider.description))' + - message: spec.forProvider.stopCondition is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.stopCondition) + || (has(self.initProvider) && has(self.initProvider.stopCondition))' + status: + description: ExperimentTemplateStatus defines the observed state of ExperimentTemplate. + properties: + atProvider: + properties: + action: + description: Action to be performed during an experiment. See + below. + items: + properties: + actionId: + description: ID of the action. To find out what actions + are supported see AWS FIS actions reference. + type: string + description: + description: Description of the action. + type: string + name: + description: Friendly name of the action. + type: string + parameter: + description: Parameter(s) for the action, if applicable. + See below. + items: + properties: + key: + description: Parameter name. + type: string + value: + description: Parameter value. + type: string + type: object + type: array + startAfter: + description: Set of action names that must complete before + this action can be executed. + items: + type: string + type: array + x-kubernetes-list-type: set + target: + description: Action's target, if applicable. See below. + properties: + key: + description: Tag key. + type: string + value: + description: Target name, referencing a corresponding + target. + type: string + type: object + type: object + type: array + description: + description: Description for the experiment template. + type: string + id: + description: Experiment Template ID. + type: string + logConfiguration: + description: The configuration for experiment logging. See below. + properties: + cloudwatchLogsConfiguration: + description: The configuration for experiment logging to Amazon + CloudWatch Logs. See below. + properties: + logGroupArn: + description: The Amazon Resource Name (ARN) of the destination + Amazon CloudWatch Logs log group. + type: string + type: object + logSchemaVersion: + description: The schema version. See documentation for the + list of schema versions. + type: number + s3Configuration: + description: The configuration for experiment logging to Amazon + S3. See below. + properties: + bucketName: + description: The name of the destination bucket. + type: string + prefix: + description: The bucket prefix. + type: string + type: object + type: object + roleArn: + description: ARN of an IAM role that grants the AWS FIS service + permission to perform service actions on your behalf. + type: string + stopCondition: + description: When an ongoing experiment should be stopped. See + below. + items: + properties: + source: + description: Source of the condition. One of none, aws:cloudwatch:alarm. + type: string + value: + description: ARN of the CloudWatch alarm. Required if the + source is a CloudWatch alarm. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + target: + description: Target of an action. See below. + items: + properties: + filter: + description: Filter(s) for the target. Filters can be used + to select resources based on specific attributes returned + by the respective describe action of the resource type. + For more information, see Targets for AWS FIS. See below. + items: + properties: + path: + description: Attribute path for the filter. + type: string + values: + description: Set of attribute values for the filter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + name: + description: Friendly name given to the target. + type: string + parameters: + additionalProperties: + type: string + description: The resource type parameters. + type: object + x-kubernetes-map-type: granular + resourceArns: + description: Set of ARNs of the resources to target with + an action. Conflicts with resource_tag. + items: + type: string + type: array + x-kubernetes-list-type: set + resourceTag: + description: Tag(s) the resources need to have to be considered + a valid target for an action. Conflicts with resource_arns. + See below. + items: + properties: + key: + description: Tag key. + type: string + value: + description: Tag value. + type: string + type: object + type: array + resourceType: + description: AWS resource type. The resource type must be + supported for the specified action. To find out what resource + types are supported, see Targets for AWS FIS. + type: string + selectionMode: + description: Scopes the identified resources. Valid values + are ALL (all identified resources), COUNT(n) (randomly + select n of the identified resources), PERCENT(n) (randomly + select n percent of the identified resources). + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/fsx.aws.upbound.io_datarepositoryassociations.yaml b/package/crds/fsx.aws.upbound.io_datarepositoryassociations.yaml index cd0f98f6b3..1842b764f2 100644 --- a/package/crds/fsx.aws.upbound.io_datarepositoryassociations.yaml +++ b/package/crds/fsx.aws.upbound.io_datarepositoryassociations.yaml @@ -777,3 +777,744 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DataRepositoryAssociation is the Schema for the DataRepositoryAssociations + API. Manages a FSx for Lustre Data Repository Association. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DataRepositoryAssociationSpec defines the desired state of + DataRepositoryAssociation + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + batchImportMetaDataOnCreate: + description: Set to true to run an import data repository task + to import metadata from the data repository to the file system + after the data repository association is created. Defaults to + false. + type: boolean + dataRepositoryPath: + description: The path to the Amazon S3 data repository that will + be linked to the file system. The path must be an S3 bucket + s3://myBucket/myPrefix/. This path specifies where in the S3 + data repository files will be imported from or exported to. + The same S3 bucket cannot be linked more than once to the same + file system. + type: string + deleteDataInFilesystem: + description: Set to true to delete files from the file system + upon deleting this data repository association. Defaults to + false. + type: boolean + fileSystemId: + description: The ID of the Amazon FSx file system to on which + to create a data repository association. + type: string + fileSystemIdRef: + description: Reference to a LustreFileSystem in fsx to populate + fileSystemId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + fileSystemIdSelector: + description: Selector for a LustreFileSystem in fsx to populate + fileSystemId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + fileSystemPath: + description: A path on the file system that points to a high-level + directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) + that will be mapped 1-1 with data_repository_path. The leading + forward slash in the name is required. Two data repository associations + cannot have overlapping file system paths. For example, if a + data repository is associated with file system path /ns1/, then + you cannot link another data repository with file system path + /ns1/ns2. This path specifies where in your file system files + will be exported from or imported to. This file system directory + can be linked to only one Amazon S3 bucket, and no other S3 + bucket can be linked to the directory. + type: string + importedFileChunkSize: + description: For files imported from a data repository, this value + determines the stripe count and maximum amount of data per file + (in MiB) stored on a single physical disk. The maximum number + of disks that a single file can be striped across is limited + by the total number of disks that make up the file system. + type: number + region: + description: Region is the region you'd like your resource to + be created in. + type: string + s3: + description: |- + See the s3 configuration block. Max of 1. + The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. + properties: + autoExportPolicy: + description: Specifies the type of updated objects that will + be automatically exported from your file system to the linked + S3 bucket. See the events configuration block. + properties: + events: + description: A list of file event types to automatically + export to your linked S3 bucket or import from the linked + S3 bucket. Valid values are NEW, CHANGED, DELETED. Max + of 3. + items: + type: string + type: array + type: object + autoImportPolicy: + description: Specifies the type of updated objects that will + be automatically imported from the linked S3 bucket to your + file system. See the events configuration block. + properties: + events: + description: A list of file event types to automatically + export to your linked S3 bucket or import from the linked + S3 bucket. Valid values are NEW, CHANGED, DELETED. Max + of 3. + items: + type: string + type: array + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + batchImportMetaDataOnCreate: + description: Set to true to run an import data repository task + to import metadata from the data repository to the file system + after the data repository association is created. Defaults to + false. + type: boolean + dataRepositoryPath: + description: The path to the Amazon S3 data repository that will + be linked to the file system. The path must be an S3 bucket + s3://myBucket/myPrefix/. This path specifies where in the S3 + data repository files will be imported from or exported to. + The same S3 bucket cannot be linked more than once to the same + file system. + type: string + deleteDataInFilesystem: + description: Set to true to delete files from the file system + upon deleting this data repository association. Defaults to + false. + type: boolean + fileSystemId: + description: The ID of the Amazon FSx file system to on which + to create a data repository association. + type: string + fileSystemIdRef: + description: Reference to a LustreFileSystem in fsx to populate + fileSystemId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + fileSystemIdSelector: + description: Selector for a LustreFileSystem in fsx to populate + fileSystemId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + fileSystemPath: + description: A path on the file system that points to a high-level + directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) + that will be mapped 1-1 with data_repository_path. The leading + forward slash in the name is required. Two data repository associations + cannot have overlapping file system paths. For example, if a + data repository is associated with file system path /ns1/, then + you cannot link another data repository with file system path + /ns1/ns2. This path specifies where in your file system files + will be exported from or imported to. This file system directory + can be linked to only one Amazon S3 bucket, and no other S3 + bucket can be linked to the directory. + type: string + importedFileChunkSize: + description: For files imported from a data repository, this value + determines the stripe count and maximum amount of data per file + (in MiB) stored on a single physical disk. The maximum number + of disks that a single file can be striped across is limited + by the total number of disks that make up the file system. + type: number + s3: + description: |- + See the s3 configuration block. Max of 1. + The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. + properties: + autoExportPolicy: + description: Specifies the type of updated objects that will + be automatically exported from your file system to the linked + S3 bucket. See the events configuration block. + properties: + events: + description: A list of file event types to automatically + export to your linked S3 bucket or import from the linked + S3 bucket. Valid values are NEW, CHANGED, DELETED. Max + of 3. + items: + type: string + type: array + type: object + autoImportPolicy: + description: Specifies the type of updated objects that will + be automatically imported from the linked S3 bucket to your + file system. See the events configuration block. + properties: + events: + description: A list of file event types to automatically + export to your linked S3 bucket or import from the linked + S3 bucket. Valid values are NEW, CHANGED, DELETED. Max + of 3. + items: + type: string + type: array + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.dataRepositoryPath is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.dataRepositoryPath) + || (has(self.initProvider) && has(self.initProvider.dataRepositoryPath))' + - message: spec.forProvider.fileSystemPath is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.fileSystemPath) + || (has(self.initProvider) && has(self.initProvider.fileSystemPath))' + status: + description: DataRepositoryAssociationStatus defines the observed state + of DataRepositoryAssociation. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name of the file system. + type: string + associationId: + description: Identifier of the data repository association, e.g., + dra-12345678 + type: string + batchImportMetaDataOnCreate: + description: Set to true to run an import data repository task + to import metadata from the data repository to the file system + after the data repository association is created. Defaults to + false. + type: boolean + dataRepositoryPath: + description: The path to the Amazon S3 data repository that will + be linked to the file system. The path must be an S3 bucket + s3://myBucket/myPrefix/. This path specifies where in the S3 + data repository files will be imported from or exported to. + The same S3 bucket cannot be linked more than once to the same + file system. + type: string + deleteDataInFilesystem: + description: Set to true to delete files from the file system + upon deleting this data repository association. Defaults to + false. + type: boolean + fileSystemId: + description: The ID of the Amazon FSx file system to on which + to create a data repository association. + type: string + fileSystemPath: + description: A path on the file system that points to a high-level + directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) + that will be mapped 1-1 with data_repository_path. The leading + forward slash in the name is required. Two data repository associations + cannot have overlapping file system paths. For example, if a + data repository is associated with file system path /ns1/, then + you cannot link another data repository with file system path + /ns1/ns2. This path specifies where in your file system files + will be exported from or imported to. This file system directory + can be linked to only one Amazon S3 bucket, and no other S3 + bucket can be linked to the directory. + type: string + id: + description: Identifier of the data repository association, e.g., + dra-12345678 + type: string + importedFileChunkSize: + description: For files imported from a data repository, this value + determines the stripe count and maximum amount of data per file + (in MiB) stored on a single physical disk. The maximum number + of disks that a single file can be striped across is limited + by the total number of disks that make up the file system. + type: number + s3: + description: |- + See the s3 configuration block. Max of 1. + The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. + properties: + autoExportPolicy: + description: Specifies the type of updated objects that will + be automatically exported from your file system to the linked + S3 bucket. See the events configuration block. + properties: + events: + description: A list of file event types to automatically + export to your linked S3 bucket or import from the linked + S3 bucket. Valid values are NEW, CHANGED, DELETED. Max + of 3. + items: + type: string + type: array + type: object + autoImportPolicy: + description: Specifies the type of updated objects that will + be automatically imported from the linked S3 bucket to your + file system. See the events configuration block. + properties: + events: + description: A list of file event types to automatically + export to your linked S3 bucket or import from the linked + S3 bucket. Valid values are NEW, CHANGED, DELETED. Max + of 3. + items: + type: string + type: array + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/fsx.aws.upbound.io_lustrefilesystems.yaml b/package/crds/fsx.aws.upbound.io_lustrefilesystems.yaml index 04dbaa4b9e..c98f752a9b 100644 --- a/package/crds/fsx.aws.upbound.io_lustrefilesystems.yaml +++ b/package/crds/fsx.aws.upbound.io_lustrefilesystems.yaml @@ -1351,3 +1351,1321 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LustreFileSystem is the Schema for the LustreFileSystems API. + Manages a FSx Lustre File System. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LustreFileSystemSpec defines the desired state of LustreFileSystem + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoImportPolicy: + description: How Amazon FSx keeps your file and directory listings + up to date as you add or modify objects in your linked S3 bucket. + see Auto Import Data Repo for more details. Only supported on + PERSISTENT_1 deployment types. + type: string + automaticBackupRetentionDays: + description: The number of days to retain automatic backups. Setting + this to 0 disables automatic backups. You can retain automatic + backups for a maximum of 90 days. only valid for PERSISTENT_1 + and PERSISTENT_2 deployment_type. + type: number + backupId: + description: The ID of the source backup to create the filesystem + from. + type: string + copyTagsToBackups: + description: A boolean flag indicating whether tags for the file + system should be copied to backups. Applicable for PERSISTENT_1 + and PERSISTENT_2 deployment_type. The default value is false. + type: boolean + dailyAutomaticBackupStartTime: + description: A recurring daily time, in the format HH:MM. HH is + the zero-padded hour of the day (0-23), and MM is the zero-padded + minute of the hour. For example, 05:00 specifies 5 AM daily. + only valid for PERSISTENT_1 and PERSISTENT_2 deployment_type. + Requires automatic_backup_retention_days to be set. + type: string + dataCompressionType: + description: Sets the data compression configuration for the file + system. Valid values are LZ4 and NONE. Default value is NONE. + Unsetting this value reverts the compression type back to NONE. + type: string + deploymentType: + description: '- The filesystem deployment type. One of: SCRATCH_1, + SCRATCH_2, PERSISTENT_1, PERSISTENT_2.' + type: string + driveCacheType: + description: '- The type of drive cache used by PERSISTENT_1 filesystems + that are provisioned with HDD storage_type. Required for HDD + storage_type, set to either READ or NONE.' + type: string + exportPath: + description: S3 URI (with optional prefix) where the root of your + Amazon FSx file system is exported. Can only be specified with + import_path argument and the path must use the same Amazon S3 + bucket as specified in import_path. Set equal to import_path + to overwrite files on export. Defaults to s3://{IMPORT BUCKET}/FSxLustre{CREATION + TIMESTAMP}. Only supported on PERSISTENT_1 deployment types. + type: string + fileSystemTypeVersion: + description: Sets the Lustre version for the file system that + you're creating. Valid values are 2.10 for SCRATCH_1, SCRATCH_2 + and PERSISTENT_1 deployment types. Valid values for 2.12 include + all deployment types. + type: string + importPath: + description: S3 URI (with optional prefix) that you're using as + the data repository for your FSx for Lustre file system. For + example, s3://example-bucket/optional-prefix/. Only supported + on PERSISTENT_1 deployment types. + type: string + importedFileChunkSize: + description: For files imported from a data repository, this value + determines the stripe count and maximum amount of data per file + (in MiB) stored on a single physical disk. Can only be specified + with import_path argument. Defaults to 1024. Minimum of 1 and + maximum of 512000. Only supported on PERSISTENT_1 deployment + types. + type: number + kmsKeyId: + description: ARN for the KMS Key to encrypt the file system at + rest, applicable for PERSISTENT_1 and PERSISTENT_2 deployment_type. + Defaults to an AWS managed KMS Key. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + logConfiguration: + description: The Lustre logging configuration used when creating + an Amazon FSx for Lustre file system. When logging is enabled, + Lustre logs error and warning events for data repositories associated + with your file system to Amazon CloudWatch Logs. + properties: + destination: + description: The Amazon Resource Name (ARN) that specifies + the destination of the logs. The name of the Amazon CloudWatch + Logs log group must begin with the /aws/fsx prefix. If you + do not provide a destination, Amazon FSx will create and + use a log stream in the CloudWatch Logs /aws/fsx/lustre + log group. + type: string + level: + description: Sets which data repository events are logged + by Amazon FSx. Valid values are WARN_ONLY, FAILURE_ONLY, + ERROR_ONLY, WARN_ERROR and DISABLED. Default value is DISABLED. + type: string + type: object + perUnitStorageThroughput: + description: '- Describes the amount of read and write throughput + for each 1 tebibyte of storage, in MB/s/TiB, required for the + PERSISTENT_1 and PERSISTENT_2 deployment_type. Valid values + for PERSISTENT_1 deployment_type and SSD storage_type are 50, + 100, 200. Valid values for PERSISTENT_1 deployment_type and + HDD storage_type are 12, 40. Valid values for PERSISTENT_2 deployment_type + and SSD storage_type are 125, 250, 500, 1000.' + type: number + region: + description: Region is the region you'd like your resource to + be created in. + type: string + rootSquashConfiguration: + description: The Lustre root squash configuration used when creating + an Amazon FSx for Lustre file system. When enabled, root squash + restricts root-level access from clients that try to access + your file system as a root user. + properties: + noSquashNids: + description: 'When root squash is enabled, you can optionally + specify an array of NIDs of clients for which root squash + does not apply. A client NID is a Lustre Network Identifier + used to uniquely identify a client. You can specify the + NID as either a single address or a range of addresses: + 1. A single address is described in standard Lustre NID + format by specifying the client’s IP address followed by + the Lustre network ID (for example, 10.0.1.6@tcp). 2. An + address range is described using a dash to separate the + range (for example, 10.0.[2-10].[1-255]@tcp).' + items: + type: string + type: array + x-kubernetes-list-type: set + rootSquash: + description: You enable root squash by setting a user ID (UID) + and group ID (GID) for the file system in the format UID:GID + (for example, 365534:65534). The UID and GID values can + range from 0 to 4294967294. + type: string + type: object + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: A list of IDs for the security groups that apply + to the specified network interfaces created for file system + access. These security groups will apply to all network interfaces. + items: + type: string + type: array + x-kubernetes-list-type: set + storageCapacity: + description: The storage capacity (GiB) of the file system. Minimum + of 1200. See more details at Allowed values for Fsx storage + capacity. Update is allowed only for SCRATCH_2, PERSISTENT_1 + and PERSISTENT_2 deployment types, See more details at Fsx Storage + Capacity Update. Required when not creating filesystem for a + backup. + type: number + storageType: + description: '- The filesystem storage type. Either SSD or HDD, + defaults to SSD. HDD is only supported on PERSISTENT_1 deployment + types.' + type: string + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: A list of IDs for the subnets that the file system + will be accessible from. File systems currently support only + one subnet. The file server is also launched in that subnet's + Availability Zone. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + weeklyMaintenanceStartTime: + description: The preferred start time (in d:HH:MM format) to perform + weekly maintenance, in the UTC time zone. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoImportPolicy: + description: How Amazon FSx keeps your file and directory listings + up to date as you add or modify objects in your linked S3 bucket. + see Auto Import Data Repo for more details. Only supported on + PERSISTENT_1 deployment types. + type: string + automaticBackupRetentionDays: + description: The number of days to retain automatic backups. Setting + this to 0 disables automatic backups. You can retain automatic + backups for a maximum of 90 days. only valid for PERSISTENT_1 + and PERSISTENT_2 deployment_type. + type: number + backupId: + description: The ID of the source backup to create the filesystem + from. + type: string + copyTagsToBackups: + description: A boolean flag indicating whether tags for the file + system should be copied to backups. Applicable for PERSISTENT_1 + and PERSISTENT_2 deployment_type. The default value is false. + type: boolean + dailyAutomaticBackupStartTime: + description: A recurring daily time, in the format HH:MM. HH is + the zero-padded hour of the day (0-23), and MM is the zero-padded + minute of the hour. For example, 05:00 specifies 5 AM daily. + only valid for PERSISTENT_1 and PERSISTENT_2 deployment_type. + Requires automatic_backup_retention_days to be set. + type: string + dataCompressionType: + description: Sets the data compression configuration for the file + system. Valid values are LZ4 and NONE. Default value is NONE. + Unsetting this value reverts the compression type back to NONE. + type: string + deploymentType: + description: '- The filesystem deployment type. One of: SCRATCH_1, + SCRATCH_2, PERSISTENT_1, PERSISTENT_2.' + type: string + driveCacheType: + description: '- The type of drive cache used by PERSISTENT_1 filesystems + that are provisioned with HDD storage_type. Required for HDD + storage_type, set to either READ or NONE.' + type: string + exportPath: + description: S3 URI (with optional prefix) where the root of your + Amazon FSx file system is exported. Can only be specified with + import_path argument and the path must use the same Amazon S3 + bucket as specified in import_path. Set equal to import_path + to overwrite files on export. Defaults to s3://{IMPORT BUCKET}/FSxLustre{CREATION + TIMESTAMP}. Only supported on PERSISTENT_1 deployment types. + type: string + fileSystemTypeVersion: + description: Sets the Lustre version for the file system that + you're creating. Valid values are 2.10 for SCRATCH_1, SCRATCH_2 + and PERSISTENT_1 deployment types. Valid values for 2.12 include + all deployment types. + type: string + importPath: + description: S3 URI (with optional prefix) that you're using as + the data repository for your FSx for Lustre file system. For + example, s3://example-bucket/optional-prefix/. Only supported + on PERSISTENT_1 deployment types. + type: string + importedFileChunkSize: + description: For files imported from a data repository, this value + determines the stripe count and maximum amount of data per file + (in MiB) stored on a single physical disk. Can only be specified + with import_path argument. Defaults to 1024. Minimum of 1 and + maximum of 512000. Only supported on PERSISTENT_1 deployment + types. + type: number + kmsKeyId: + description: ARN for the KMS Key to encrypt the file system at + rest, applicable for PERSISTENT_1 and PERSISTENT_2 deployment_type. + Defaults to an AWS managed KMS Key. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + logConfiguration: + description: The Lustre logging configuration used when creating + an Amazon FSx for Lustre file system. When logging is enabled, + Lustre logs error and warning events for data repositories associated + with your file system to Amazon CloudWatch Logs. + properties: + destination: + description: The Amazon Resource Name (ARN) that specifies + the destination of the logs. The name of the Amazon CloudWatch + Logs log group must begin with the /aws/fsx prefix. If you + do not provide a destination, Amazon FSx will create and + use a log stream in the CloudWatch Logs /aws/fsx/lustre + log group. + type: string + level: + description: Sets which data repository events are logged + by Amazon FSx. Valid values are WARN_ONLY, FAILURE_ONLY, + ERROR_ONLY, WARN_ERROR and DISABLED. Default value is DISABLED. + type: string + type: object + perUnitStorageThroughput: + description: '- Describes the amount of read and write throughput + for each 1 tebibyte of storage, in MB/s/TiB, required for the + PERSISTENT_1 and PERSISTENT_2 deployment_type. Valid values + for PERSISTENT_1 deployment_type and SSD storage_type are 50, + 100, 200. Valid values for PERSISTENT_1 deployment_type and + HDD storage_type are 12, 40. Valid values for PERSISTENT_2 deployment_type + and SSD storage_type are 125, 250, 500, 1000.' + type: number + rootSquashConfiguration: + description: The Lustre root squash configuration used when creating + an Amazon FSx for Lustre file system. When enabled, root squash + restricts root-level access from clients that try to access + your file system as a root user. + properties: + noSquashNids: + description: 'When root squash is enabled, you can optionally + specify an array of NIDs of clients for which root squash + does not apply. A client NID is a Lustre Network Identifier + used to uniquely identify a client. You can specify the + NID as either a single address or a range of addresses: + 1. A single address is described in standard Lustre NID + format by specifying the client’s IP address followed by + the Lustre network ID (for example, 10.0.1.6@tcp). 2. An + address range is described using a dash to separate the + range (for example, 10.0.[2-10].[1-255]@tcp).' + items: + type: string + type: array + x-kubernetes-list-type: set + rootSquash: + description: You enable root squash by setting a user ID (UID) + and group ID (GID) for the file system in the format UID:GID + (for example, 365534:65534). The UID and GID values can + range from 0 to 4294967294. + type: string + type: object + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: A list of IDs for the security groups that apply + to the specified network interfaces created for file system + access. These security groups will apply to all network interfaces. + items: + type: string + type: array + x-kubernetes-list-type: set + storageCapacity: + description: The storage capacity (GiB) of the file system. Minimum + of 1200. See more details at Allowed values for Fsx storage + capacity. Update is allowed only for SCRATCH_2, PERSISTENT_1 + and PERSISTENT_2 deployment types, See more details at Fsx Storage + Capacity Update. Required when not creating filesystem for a + backup. + type: number + storageType: + description: '- The filesystem storage type. Either SSD or HDD, + defaults to SSD. HDD is only supported on PERSISTENT_1 deployment + types.' + type: string + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: A list of IDs for the subnets that the file system + will be accessible from. File systems currently support only + one subnet. The file server is also launched in that subnet's + Availability Zone. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + weeklyMaintenanceStartTime: + description: The preferred start time (in d:HH:MM format) to perform + weekly maintenance, in the UTC time zone. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: LustreFileSystemStatus defines the observed state of LustreFileSystem. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name of the file system. + type: string + autoImportPolicy: + description: How Amazon FSx keeps your file and directory listings + up to date as you add or modify objects in your linked S3 bucket. + see Auto Import Data Repo for more details. Only supported on + PERSISTENT_1 deployment types. + type: string + automaticBackupRetentionDays: + description: The number of days to retain automatic backups. Setting + this to 0 disables automatic backups. You can retain automatic + backups for a maximum of 90 days. only valid for PERSISTENT_1 + and PERSISTENT_2 deployment_type. + type: number + backupId: + description: The ID of the source backup to create the filesystem + from. + type: string + copyTagsToBackups: + description: A boolean flag indicating whether tags for the file + system should be copied to backups. Applicable for PERSISTENT_1 + and PERSISTENT_2 deployment_type. The default value is false. + type: boolean + dailyAutomaticBackupStartTime: + description: A recurring daily time, in the format HH:MM. HH is + the zero-padded hour of the day (0-23), and MM is the zero-padded + minute of the hour. For example, 05:00 specifies 5 AM daily. + only valid for PERSISTENT_1 and PERSISTENT_2 deployment_type. + Requires automatic_backup_retention_days to be set. + type: string + dataCompressionType: + description: Sets the data compression configuration for the file + system. Valid values are LZ4 and NONE. Default value is NONE. + Unsetting this value reverts the compression type back to NONE. + type: string + deploymentType: + description: '- The filesystem deployment type. One of: SCRATCH_1, + SCRATCH_2, PERSISTENT_1, PERSISTENT_2.' + type: string + dnsName: + description: DNS name for the file system, e.g., fs-12345678.fsx.us-west-2.amazonaws.com + type: string + driveCacheType: + description: '- The type of drive cache used by PERSISTENT_1 filesystems + that are provisioned with HDD storage_type. Required for HDD + storage_type, set to either READ or NONE.' + type: string + exportPath: + description: S3 URI (with optional prefix) where the root of your + Amazon FSx file system is exported. Can only be specified with + import_path argument and the path must use the same Amazon S3 + bucket as specified in import_path. Set equal to import_path + to overwrite files on export. Defaults to s3://{IMPORT BUCKET}/FSxLustre{CREATION + TIMESTAMP}. Only supported on PERSISTENT_1 deployment types. + type: string + fileSystemTypeVersion: + description: Sets the Lustre version for the file system that + you're creating. Valid values are 2.10 for SCRATCH_1, SCRATCH_2 + and PERSISTENT_1 deployment types. Valid values for 2.12 include + all deployment types. + type: string + id: + description: Identifier of the file system, e.g., fs-12345678 + type: string + importPath: + description: S3 URI (with optional prefix) that you're using as + the data repository for your FSx for Lustre file system. For + example, s3://example-bucket/optional-prefix/. Only supported + on PERSISTENT_1 deployment types. + type: string + importedFileChunkSize: + description: For files imported from a data repository, this value + determines the stripe count and maximum amount of data per file + (in MiB) stored on a single physical disk. Can only be specified + with import_path argument. Defaults to 1024. Minimum of 1 and + maximum of 512000. Only supported on PERSISTENT_1 deployment + types. + type: number + kmsKeyId: + description: ARN for the KMS Key to encrypt the file system at + rest, applicable for PERSISTENT_1 and PERSISTENT_2 deployment_type. + Defaults to an AWS managed KMS Key. + type: string + logConfiguration: + description: The Lustre logging configuration used when creating + an Amazon FSx for Lustre file system. When logging is enabled, + Lustre logs error and warning events for data repositories associated + with your file system to Amazon CloudWatch Logs. + properties: + destination: + description: The Amazon Resource Name (ARN) that specifies + the destination of the logs. The name of the Amazon CloudWatch + Logs log group must begin with the /aws/fsx prefix. If you + do not provide a destination, Amazon FSx will create and + use a log stream in the CloudWatch Logs /aws/fsx/lustre + log group. + type: string + level: + description: Sets which data repository events are logged + by Amazon FSx. Valid values are WARN_ONLY, FAILURE_ONLY, + ERROR_ONLY, WARN_ERROR and DISABLED. Default value is DISABLED. + type: string + type: object + mountName: + description: The value to be used when mounting the filesystem. + type: string + networkInterfaceIds: + description: Set of Elastic Network Interface identifiers from + which the file system is accessible. As explained in the documentation, + the first network interface returned is the primary network + interface. + items: + type: string + type: array + ownerId: + description: AWS account identifier that created the file system. + type: string + perUnitStorageThroughput: + description: '- Describes the amount of read and write throughput + for each 1 tebibyte of storage, in MB/s/TiB, required for the + PERSISTENT_1 and PERSISTENT_2 deployment_type. Valid values + for PERSISTENT_1 deployment_type and SSD storage_type are 50, + 100, 200. Valid values for PERSISTENT_1 deployment_type and + HDD storage_type are 12, 40. Valid values for PERSISTENT_2 deployment_type + and SSD storage_type are 125, 250, 500, 1000.' + type: number + rootSquashConfiguration: + description: The Lustre root squash configuration used when creating + an Amazon FSx for Lustre file system. When enabled, root squash + restricts root-level access from clients that try to access + your file system as a root user. + properties: + noSquashNids: + description: 'When root squash is enabled, you can optionally + specify an array of NIDs of clients for which root squash + does not apply. A client NID is a Lustre Network Identifier + used to uniquely identify a client. You can specify the + NID as either a single address or a range of addresses: + 1. A single address is described in standard Lustre NID + format by specifying the client’s IP address followed by + the Lustre network ID (for example, 10.0.1.6@tcp). 2. An + address range is described using a dash to separate the + range (for example, 10.0.[2-10].[1-255]@tcp).' + items: + type: string + type: array + x-kubernetes-list-type: set + rootSquash: + description: You enable root squash by setting a user ID (UID) + and group ID (GID) for the file system in the format UID:GID + (for example, 365534:65534). The UID and GID values can + range from 0 to 4294967294. + type: string + type: object + securityGroupIds: + description: A list of IDs for the security groups that apply + to the specified network interfaces created for file system + access. These security groups will apply to all network interfaces. + items: + type: string + type: array + x-kubernetes-list-type: set + storageCapacity: + description: The storage capacity (GiB) of the file system. Minimum + of 1200. See more details at Allowed values for Fsx storage + capacity. Update is allowed only for SCRATCH_2, PERSISTENT_1 + and PERSISTENT_2 deployment types, See more details at Fsx Storage + Capacity Update. Required when not creating filesystem for a + backup. + type: number + storageType: + description: '- The filesystem storage type. Either SSD or HDD, + defaults to SSD. HDD is only supported on PERSISTENT_1 deployment + types.' + type: string + subnetIds: + description: A list of IDs for the subnets that the file system + will be accessible from. File systems currently support only + one subnet. The file server is also launched in that subnet's + Availability Zone. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + vpcId: + description: Identifier of the Virtual Private Cloud for the file + system. + type: string + weeklyMaintenanceStartTime: + description: The preferred start time (in d:HH:MM format) to perform + weekly maintenance, in the UTC time zone. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/fsx.aws.upbound.io_ontapfilesystems.yaml b/package/crds/fsx.aws.upbound.io_ontapfilesystems.yaml index 8aa2a217b1..086d8f3446 100644 --- a/package/crds/fsx.aws.upbound.io_ontapfilesystems.yaml +++ b/package/crds/fsx.aws.upbound.io_ontapfilesystems.yaml @@ -1385,3 +1385,1364 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: OntapFileSystem is the Schema for the OntapFileSystems API. Manages + an Amazon FSx for NetApp ONTAP file system. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OntapFileSystemSpec defines the desired state of OntapFileSystem + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + automaticBackupRetentionDays: + description: The number of days to retain automatic backups. Setting + this to 0 disables automatic backups. You can retain automatic + backups for a maximum of 90 days. + type: number + dailyAutomaticBackupStartTime: + description: A recurring daily time, in the format HH:MM. HH is + the zero-padded hour of the day (0-23), and MM is the zero-padded + minute of the hour. For example, 05:00 specifies 5 AM daily. + Requires automatic_backup_retention_days to be set. + type: string + deploymentType: + description: '- The filesystem deployment type. Supports MULTI_AZ_1, + SINGLE_AZ_1, and SINGLE_AZ_2.' + type: string + diskIopsConfiguration: + description: The SSD IOPS configuration for the Amazon FSx for + NetApp ONTAP file system. See Disk Iops Configuration below. + properties: + iops: + description: '- The total number of SSD IOPS provisioned for + the file system.' + type: number + mode: + description: '- Specifies whether the number of IOPS for the + file system is using the system. Valid values are AUTOMATIC + and USER_PROVISIONED. Default value is AUTOMATIC.' + type: string + type: object + endpointIpAddressRange: + description: Specifies the IP address range in which the endpoints + to access your file system will be created. By default, Amazon + FSx selects an unused IP address range for you from the 198.19.* + range. + type: string + fsxAdminPasswordSecretRef: + description: The ONTAP administrative password for the fsxadmin + user that you can use to administer your file system using the + ONTAP CLI and REST API. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + haPairs: + description: '- The number of ha_pairs to deploy for the file + system. Valid values are 1 through 12. Value of 2 or greater + required for SINGLE_AZ_2. Only value of 1 is supported with + SINGLE_AZ_1 or MULTI_AZ_1 but not required.' + type: number + kmsKeyId: + description: ARN for the KMS Key to encrypt the file system at + rest, Defaults to an AWS managed KMS Key. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + preferredSubnetId: + description: The ID for a subnet. A subnet is a range of IP addresses + in your virtual private cloud (VPC). + type: string + preferredSubnetIdRef: + description: Reference to a Subnet in ec2 to populate preferredSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + preferredSubnetIdSelector: + description: Selector for a Subnet in ec2 to populate preferredSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + routeTableIds: + description: Specifies the VPC route tables in which your file + system's endpoints will be created. You should specify all VPC + route tables associated with the subnets in which your clients + are located. By default, Amazon FSx selects your VPC's default + route table. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: A list of IDs for the security groups that apply + to the specified network interfaces created for file system + access. These security groups will apply to all network interfaces. + items: + type: string + type: array + x-kubernetes-list-type: set + storageCapacity: + description: The storage capacity (GiB) of the file system. Valid + values between 1024 and 196608 for file systems with deployment_type + SINGLE_AZ_1 and MULTI_AZ_1. Valid values between 2048 (1024 + per ha pair) and 1048576 for file systems with deployment_type + SINGLE_AZ_2. + type: number + storageType: + description: '- The filesystem storage type. defaults to SSD.' + type: string + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: A list of IDs for the subnets that the file system + will be accessible from. Up to 2 subnets can be provided. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + throughputCapacity: + description: Sets the throughput capacity (in MBps) for the file + system that you're creating. Valid values are 128, 256, 512, + 1024, 2048, and 4096. This parameter is only supported when + not using the ha_pairs parameter. Either throughput_capacity + or throughput_capacity_per_ha_pair must be specified. + type: number + throughputCapacityPerHaPair: + description: Sets the throughput capacity (in MBps) for the file + system that you're creating. Valid value when using 1 ha_pair + are 128, 256, 512, 1024, 2048, and 4096. Valid values when using + 2 or more ha_pairs are 3072,6144. This parameter is only supported + when specifying the ha_pairs parameter. Either throughput_capacity + or throughput_capacity_per_ha_pair must be specified. + type: number + weeklyMaintenanceStartTime: + description: The preferred start time (in d:HH:MM format) to perform + weekly maintenance, in the UTC time zone. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + automaticBackupRetentionDays: + description: The number of days to retain automatic backups. Setting + this to 0 disables automatic backups. You can retain automatic + backups for a maximum of 90 days. + type: number + dailyAutomaticBackupStartTime: + description: A recurring daily time, in the format HH:MM. HH is + the zero-padded hour of the day (0-23), and MM is the zero-padded + minute of the hour. For example, 05:00 specifies 5 AM daily. + Requires automatic_backup_retention_days to be set. + type: string + deploymentType: + description: '- The filesystem deployment type. Supports MULTI_AZ_1, + SINGLE_AZ_1, and SINGLE_AZ_2.' + type: string + diskIopsConfiguration: + description: The SSD IOPS configuration for the Amazon FSx for + NetApp ONTAP file system. See Disk Iops Configuration below. + properties: + iops: + description: '- The total number of SSD IOPS provisioned for + the file system.' + type: number + mode: + description: '- Specifies whether the number of IOPS for the + file system is using the system. Valid values are AUTOMATIC + and USER_PROVISIONED. Default value is AUTOMATIC.' + type: string + type: object + endpointIpAddressRange: + description: Specifies the IP address range in which the endpoints + to access your file system will be created. By default, Amazon + FSx selects an unused IP address range for you from the 198.19.* + range. + type: string + fsxAdminPasswordSecretRef: + description: The ONTAP administrative password for the fsxadmin + user that you can use to administer your file system using the + ONTAP CLI and REST API. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + haPairs: + description: '- The number of ha_pairs to deploy for the file + system. Valid values are 1 through 12. Value of 2 or greater + required for SINGLE_AZ_2. Only value of 1 is supported with + SINGLE_AZ_1 or MULTI_AZ_1 but not required.' + type: number + kmsKeyId: + description: ARN for the KMS Key to encrypt the file system at + rest, Defaults to an AWS managed KMS Key. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + preferredSubnetId: + description: The ID for a subnet. A subnet is a range of IP addresses + in your virtual private cloud (VPC). + type: string + preferredSubnetIdRef: + description: Reference to a Subnet in ec2 to populate preferredSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + preferredSubnetIdSelector: + description: Selector for a Subnet in ec2 to populate preferredSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routeTableIds: + description: Specifies the VPC route tables in which your file + system's endpoints will be created. You should specify all VPC + route tables associated with the subnets in which your clients + are located. By default, Amazon FSx selects your VPC's default + route table. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: A list of IDs for the security groups that apply + to the specified network interfaces created for file system + access. These security groups will apply to all network interfaces. + items: + type: string + type: array + x-kubernetes-list-type: set + storageCapacity: + description: The storage capacity (GiB) of the file system. Valid + values between 1024 and 196608 for file systems with deployment_type + SINGLE_AZ_1 and MULTI_AZ_1. Valid values between 2048 (1024 + per ha pair) and 1048576 for file systems with deployment_type + SINGLE_AZ_2. + type: number + storageType: + description: '- The filesystem storage type. defaults to SSD.' + type: string + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: A list of IDs for the subnets that the file system + will be accessible from. Up to 2 subnets can be provided. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + throughputCapacity: + description: Sets the throughput capacity (in MBps) for the file + system that you're creating. Valid values are 128, 256, 512, + 1024, 2048, and 4096. This parameter is only supported when + not using the ha_pairs parameter. Either throughput_capacity + or throughput_capacity_per_ha_pair must be specified. + type: number + throughputCapacityPerHaPair: + description: Sets the throughput capacity (in MBps) for the file + system that you're creating. Valid value when using 1 ha_pair + are 128, 256, 512, 1024, 2048, and 4096. Valid values when using + 2 or more ha_pairs are 3072,6144. This parameter is only supported + when specifying the ha_pairs parameter. Either throughput_capacity + or throughput_capacity_per_ha_pair must be specified. + type: number + weeklyMaintenanceStartTime: + description: The preferred start time (in d:HH:MM format) to perform + weekly maintenance, in the UTC time zone. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.deploymentType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.deploymentType) + || (has(self.initProvider) && has(self.initProvider.deploymentType))' + - message: spec.forProvider.storageCapacity is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageCapacity) + || (has(self.initProvider) && has(self.initProvider.storageCapacity))' + status: + description: OntapFileSystemStatus defines the observed state of OntapFileSystem. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name of the file system. + type: string + automaticBackupRetentionDays: + description: The number of days to retain automatic backups. Setting + this to 0 disables automatic backups. You can retain automatic + backups for a maximum of 90 days. + type: number + dailyAutomaticBackupStartTime: + description: A recurring daily time, in the format HH:MM. HH is + the zero-padded hour of the day (0-23), and MM is the zero-padded + minute of the hour. For example, 05:00 specifies 5 AM daily. + Requires automatic_backup_retention_days to be set. + type: string + deploymentType: + description: '- The filesystem deployment type. Supports MULTI_AZ_1, + SINGLE_AZ_1, and SINGLE_AZ_2.' + type: string + diskIopsConfiguration: + description: The SSD IOPS configuration for the Amazon FSx for + NetApp ONTAP file system. See Disk Iops Configuration below. + properties: + iops: + description: '- The total number of SSD IOPS provisioned for + the file system.' + type: number + mode: + description: '- Specifies whether the number of IOPS for the + file system is using the system. Valid values are AUTOMATIC + and USER_PROVISIONED. Default value is AUTOMATIC.' + type: string + type: object + dnsName: + description: DNS name for the file system, e.g., fs-12345678.fsx.us-west-2.amazonaws.com + type: string + endpointIpAddressRange: + description: Specifies the IP address range in which the endpoints + to access your file system will be created. By default, Amazon + FSx selects an unused IP address range for you from the 198.19.* + range. + type: string + endpoints: + description: The endpoints that are used to access data or to + manage the file system using the NetApp ONTAP CLI, REST API, + or NetApp SnapMirror. See Endpoints below. + items: + properties: + intercluster: + description: An endpoint for managing your file system by + setting up NetApp SnapMirror with other ONTAP systems. + See Endpoint. + items: + properties: + dnsName: + description: DNS name for the file system, e.g., fs-12345678.fsx.us-west-2.amazonaws.com + type: string + ipAddresses: + description: IP addresses of the file system endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + management: + description: An endpoint for managing your file system using + the NetApp ONTAP CLI and NetApp ONTAP API. See Endpoint. + items: + properties: + dnsName: + description: DNS name for the file system, e.g., fs-12345678.fsx.us-west-2.amazonaws.com + type: string + ipAddresses: + description: IP addresses of the file system endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + type: array + haPairs: + description: '- The number of ha_pairs to deploy for the file + system. Valid values are 1 through 12. Value of 2 or greater + required for SINGLE_AZ_2. Only value of 1 is supported with + SINGLE_AZ_1 or MULTI_AZ_1 but not required.' + type: number + id: + description: Identifier of the file system, e.g., fs-12345678 + type: string + kmsKeyId: + description: ARN for the KMS Key to encrypt the file system at + rest, Defaults to an AWS managed KMS Key. + type: string + networkInterfaceIds: + description: Set of Elastic Network Interface identifiers from + which the file system is accessible The first network interface + returned is the primary network interface. + items: + type: string + type: array + ownerId: + description: AWS account identifier that created the file system. + type: string + preferredSubnetId: + description: The ID for a subnet. A subnet is a range of IP addresses + in your virtual private cloud (VPC). + type: string + routeTableIds: + description: Specifies the VPC route tables in which your file + system's endpoints will be created. You should specify all VPC + route tables associated with the subnets in which your clients + are located. By default, Amazon FSx selects your VPC's default + route table. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIds: + description: A list of IDs for the security groups that apply + to the specified network interfaces created for file system + access. These security groups will apply to all network interfaces. + items: + type: string + type: array + x-kubernetes-list-type: set + storageCapacity: + description: The storage capacity (GiB) of the file system. Valid + values between 1024 and 196608 for file systems with deployment_type + SINGLE_AZ_1 and MULTI_AZ_1. Valid values between 2048 (1024 + per ha pair) and 1048576 for file systems with deployment_type + SINGLE_AZ_2. + type: number + storageType: + description: '- The filesystem storage type. defaults to SSD.' + type: string + subnetIds: + description: A list of IDs for the subnets that the file system + will be accessible from. Up to 2 subnets can be provided. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + throughputCapacity: + description: Sets the throughput capacity (in MBps) for the file + system that you're creating. Valid values are 128, 256, 512, + 1024, 2048, and 4096. This parameter is only supported when + not using the ha_pairs parameter. Either throughput_capacity + or throughput_capacity_per_ha_pair must be specified. + type: number + throughputCapacityPerHaPair: + description: Sets the throughput capacity (in MBps) for the file + system that you're creating. Valid value when using 1 ha_pair + are 128, 256, 512, 1024, 2048, and 4096. Valid values when using + 2 or more ha_pairs are 3072,6144. This parameter is only supported + when specifying the ha_pairs parameter. Either throughput_capacity + or throughput_capacity_per_ha_pair must be specified. + type: number + vpcId: + description: Identifier of the Virtual Private Cloud for the file + system. + type: string + weeklyMaintenanceStartTime: + description: The preferred start time (in d:HH:MM format) to perform + weekly maintenance, in the UTC time zone. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/fsx.aws.upbound.io_ontapstoragevirtualmachines.yaml b/package/crds/fsx.aws.upbound.io_ontapstoragevirtualmachines.yaml index 7bf934a347..866059b282 100644 --- a/package/crds/fsx.aws.upbound.io_ontapstoragevirtualmachines.yaml +++ b/package/crds/fsx.aws.upbound.io_ontapstoragevirtualmachines.yaml @@ -894,3 +894,867 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: OntapStorageVirtualMachine is the Schema for the OntapStorageVirtualMachines + API. Manages a FSx Storage Virtual Machine. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OntapStorageVirtualMachineSpec defines the desired state + of OntapStorageVirtualMachine + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + activeDirectoryConfiguration: + description: Configuration block that Amazon FSx uses to join + the FSx ONTAP Storage Virtual Machine(SVM) to your Microsoft + Active Directory (AD) directory. Detailed below. + properties: + netbiosName: + description: The NetBIOS name of the Active Directory computer + object that will be created for your SVM. This is often + the same as the SVM name but can be different. AWS limits + to 15 characters because of standard NetBIOS naming limits. + type: string + selfManagedActiveDirectoryConfiguration: + description: Configuration block that Amazon FSx uses to join + the FSx ONTAP Storage Virtual Machine(SVM) to your Microsoft + Active Directory (AD) directory. Detailed below. + properties: + dnsIps: + description: A list of up to three IP addresses of DNS + servers or domain controllers in the self-managed AD + directory. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The fully qualified domain name of the self-managed + AD directory. For example, corp.example.com. + type: string + fileSystemAdministratorsGroup: + description: The name of the domain group whose members + are granted administrative privileges for the SVM. The + group that you specify must already exist in your domain. + Defaults to Domain Admins. + type: string + organizationalUnitDistinguishedName: + description: The fully qualified distinguished name of + the organizational unit within your self-managed AD + directory that the Windows File Server instance will + join. For example, OU=FSx,DC=yourdomain,DC=corp,DC=com. + Only accepts OU as the direct parent of the SVM. If + none is provided, the SVM is created in the default + location of your self-managed AD directory. To learn + more, see RFC 2253. + type: string + passwordSecretRef: + description: The password for the service account on your + self-managed AD domain that Amazon FSx will use to join + to your AD domain. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: The user name for the service account on + your self-managed AD domain that Amazon FSx will use + to join to your AD domain. + type: string + type: object + type: object + fileSystemId: + description: The ID of the Amazon FSx ONTAP File System that this + SVM will be created on. + type: string + fileSystemIdRef: + description: Reference to a OntapFileSystem in fsx to populate + fileSystemId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + fileSystemIdSelector: + description: Selector for a OntapFileSystem in fsx to populate + fileSystemId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the SVM. You can use a maximum of 47 + alphanumeric characters, plus the underscore (_) special character. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + rootVolumeSecurityStyle: + description: Specifies the root volume security style, Valid values + are UNIX, NTFS, and MIXED. All volumes created under this SVM + will inherit the root security style unless the security style + is specified on the volume. Default value is UNIX. + type: string + svmAdminPasswordSecretRef: + description: A SecretKeySelector is a reference to a secret key + in an arbitrary namespace. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + activeDirectoryConfiguration: + description: Configuration block that Amazon FSx uses to join + the FSx ONTAP Storage Virtual Machine(SVM) to your Microsoft + Active Directory (AD) directory. Detailed below. + properties: + netbiosName: + description: The NetBIOS name of the Active Directory computer + object that will be created for your SVM. This is often + the same as the SVM name but can be different. AWS limits + to 15 characters because of standard NetBIOS naming limits. + type: string + selfManagedActiveDirectoryConfiguration: + description: Configuration block that Amazon FSx uses to join + the FSx ONTAP Storage Virtual Machine(SVM) to your Microsoft + Active Directory (AD) directory. Detailed below. + properties: + dnsIps: + description: A list of up to three IP addresses of DNS + servers or domain controllers in the self-managed AD + directory. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The fully qualified domain name of the self-managed + AD directory. For example, corp.example.com. + type: string + fileSystemAdministratorsGroup: + description: The name of the domain group whose members + are granted administrative privileges for the SVM. The + group that you specify must already exist in your domain. + Defaults to Domain Admins. + type: string + organizationalUnitDistinguishedName: + description: The fully qualified distinguished name of + the organizational unit within your self-managed AD + directory that the Windows File Server instance will + join. For example, OU=FSx,DC=yourdomain,DC=corp,DC=com. + Only accepts OU as the direct parent of the SVM. If + none is provided, the SVM is created in the default + location of your self-managed AD directory. To learn + more, see RFC 2253. + type: string + passwordSecretRef: + description: The password for the service account on your + self-managed AD domain that Amazon FSx will use to join + to your AD domain. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: The user name for the service account on + your self-managed AD domain that Amazon FSx will use + to join to your AD domain. + type: string + required: + - passwordSecretRef + type: object + type: object + fileSystemId: + description: The ID of the Amazon FSx ONTAP File System that this + SVM will be created on. + type: string + fileSystemIdRef: + description: Reference to a OntapFileSystem in fsx to populate + fileSystemId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + fileSystemIdSelector: + description: Selector for a OntapFileSystem in fsx to populate + fileSystemId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the SVM. You can use a maximum of 47 + alphanumeric characters, plus the underscore (_) special character. + type: string + rootVolumeSecurityStyle: + description: Specifies the root volume security style, Valid values + are UNIX, NTFS, and MIXED. All volumes created under this SVM + will inherit the root security style unless the security style + is specified on the volume. Default value is UNIX. + type: string + svmAdminPasswordSecretRef: + description: A SecretKeySelector is a reference to a secret key + in an arbitrary namespace. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: OntapStorageVirtualMachineStatus defines the observed state + of OntapStorageVirtualMachine. + properties: + atProvider: + properties: + activeDirectoryConfiguration: + description: Configuration block that Amazon FSx uses to join + the FSx ONTAP Storage Virtual Machine(SVM) to your Microsoft + Active Directory (AD) directory. Detailed below. + properties: + netbiosName: + description: The NetBIOS name of the Active Directory computer + object that will be created for your SVM. This is often + the same as the SVM name but can be different. AWS limits + to 15 characters because of standard NetBIOS naming limits. + type: string + selfManagedActiveDirectoryConfiguration: + description: Configuration block that Amazon FSx uses to join + the FSx ONTAP Storage Virtual Machine(SVM) to your Microsoft + Active Directory (AD) directory. Detailed below. + properties: + dnsIps: + description: A list of up to three IP addresses of DNS + servers or domain controllers in the self-managed AD + directory. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The fully qualified domain name of the self-managed + AD directory. For example, corp.example.com. + type: string + fileSystemAdministratorsGroup: + description: The name of the domain group whose members + are granted administrative privileges for the SVM. The + group that you specify must already exist in your domain. + Defaults to Domain Admins. + type: string + organizationalUnitDistinguishedName: + description: The fully qualified distinguished name of + the organizational unit within your self-managed AD + directory that the Windows File Server instance will + join. For example, OU=FSx,DC=yourdomain,DC=corp,DC=com. + Only accepts OU as the direct parent of the SVM. If + none is provided, the SVM is created in the default + location of your self-managed AD directory. To learn + more, see RFC 2253. + type: string + username: + description: The user name for the service account on + your self-managed AD domain that Amazon FSx will use + to join to your AD domain. + type: string + type: object + type: object + arn: + description: Amazon Resource Name of the storage virtual machine. + type: string + endpoints: + description: The endpoints that are used to access data or to + manage the storage virtual machine using the NetApp ONTAP CLI, + REST API, or NetApp SnapMirror. See Endpoints below. + items: + properties: + iscsi: + description: An endpoint for accessing data on your storage + virtual machine via iSCSI protocol. See Endpoint. + items: + properties: + dnsName: + description: The Domain Name Service (DNS) name for + the storage virtual machine. You can mount your + storage virtual machine using its DNS name. + type: string + ipAddresses: + description: IP addresses of the storage virtual machine + endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + management: + description: An endpoint for managing your file system using + the NetApp ONTAP CLI and NetApp ONTAP API. See Endpoint. + items: + properties: + dnsName: + description: The Domain Name Service (DNS) name for + the storage virtual machine. You can mount your + storage virtual machine using its DNS name. + type: string + ipAddresses: + description: IP addresses of the storage virtual machine + endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + nfs: + description: An endpoint for accessing data on your storage + virtual machine via NFS protocol. See Endpoint. + items: + properties: + dnsName: + description: The Domain Name Service (DNS) name for + the storage virtual machine. You can mount your + storage virtual machine using its DNS name. + type: string + ipAddresses: + description: IP addresses of the storage virtual machine + endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + smb: + description: An endpoint for accessing data on your storage + virtual machine via SMB protocol. This is only set if + an active_directory_configuration has been set. See Endpoint. + items: + properties: + dnsName: + description: The Domain Name Service (DNS) name for + the storage virtual machine. You can mount your + storage virtual machine using its DNS name. + type: string + ipAddresses: + description: IP addresses of the storage virtual machine + endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + type: array + fileSystemId: + description: The ID of the Amazon FSx ONTAP File System that this + SVM will be created on. + type: string + id: + description: Identifier of the storage virtual machine, e.g., + svm-12345678 + type: string + name: + description: The name of the SVM. You can use a maximum of 47 + alphanumeric characters, plus the underscore (_) special character. + type: string + rootVolumeSecurityStyle: + description: Specifies the root volume security style, Valid values + are UNIX, NTFS, and MIXED. All volumes created under this SVM + will inherit the root security style unless the security style + is specified on the volume. Default value is UNIX. + type: string + subtype: + description: Describes the SVM's subtype, e.g. DEFAULT + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + uuid: + description: The SVM's UUID (universally unique identifier). + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/fsx.aws.upbound.io_windowsfilesystems.yaml b/package/crds/fsx.aws.upbound.io_windowsfilesystems.yaml index 168b6ce332..184c3d6468 100644 --- a/package/crds/fsx.aws.upbound.io_windowsfilesystems.yaml +++ b/package/crds/fsx.aws.upbound.io_windowsfilesystems.yaml @@ -1587,3 +1587,1548 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: WindowsFileSystem is the Schema for the WindowsFileSystems API. + Manages a FSx Windows File System. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WindowsFileSystemSpec defines the desired state of WindowsFileSystem + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + activeDirectoryId: + description: The ID for an existing Microsoft Active Directory + instance that the file system should join when it's created. + Cannot be specified with self_managed_active_directory. + type: string + activeDirectoryIdRef: + description: Reference to a Directory in ds to populate activeDirectoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + activeDirectoryIdSelector: + description: Selector for a Directory in ds to populate activeDirectoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + aliases: + description: An array DNS alias names that you want to associate + with the Amazon FSx file system. For more information, see + Working with DNS Aliases + items: + type: string + type: array + x-kubernetes-list-type: set + auditLogConfiguration: + description: The configuration that Amazon FSx for Windows File + Server uses to audit and log user accesses of files, folders, + and file shares on the Amazon FSx for Windows File Server file + system. See Audit Log Configuration below. + properties: + auditLogDestination: + description: The Amazon Resource Name (ARN) for the destination + of the audit logs. The destination can be any Amazon CloudWatch + Logs log group ARN or Amazon Kinesis Data Firehose delivery + stream ARN. Can be specified when file_access_audit_log_level + and file_share_access_audit_log_level are not set to DISABLED. + The name of the Amazon CloudWatch Logs log group must begin + with the /aws/fsx prefix. The name of the Amazon Kinesis + Data Firehouse delivery stream must begin with the aws-fsx + prefix. If you do not provide a destination in audit_log_destionation, + Amazon FSx will create and use a log stream in the CloudWatch + Logs /aws/fsx/windows log group. + type: string + fileAccessAuditLogLevel: + description: Sets which attempt type is logged by Amazon FSx + for file and folder accesses. Valid values are SUCCESS_ONLY, + FAILURE_ONLY, SUCCESS_AND_FAILURE, and DISABLED. Default + value is DISABLED. + type: string + fileShareAccessAuditLogLevel: + description: Sets which attempt type is logged by Amazon FSx + for file share accesses. Valid values are SUCCESS_ONLY, + FAILURE_ONLY, SUCCESS_AND_FAILURE, and DISABLED. Default + value is DISABLED. + type: string + type: object + automaticBackupRetentionDays: + description: The number of days to retain automatic backups. Minimum + of 0 and maximum of 90. Defaults to 7. Set to 0 to disable. + type: number + backupId: + description: The ID of the source backup to create the filesystem + from. + type: string + copyTagsToBackups: + description: A boolean flag indicating whether tags on the file + system should be copied to backups. Defaults to false. + type: boolean + dailyAutomaticBackupStartTime: + description: The preferred time (in HH:MM format) to take daily + automatic backups, in the UTC time zone. + type: string + deploymentType: + description: Specifies the file system deployment type, valid + values are MULTI_AZ_1, SINGLE_AZ_1 and SINGLE_AZ_2. Default + value is SINGLE_AZ_1. + type: string + diskIopsConfiguration: + description: The SSD IOPS configuration for the Amazon FSx for + Windows File Server file system. See Disk Iops Configuration + below. + properties: + iops: + description: '- The total number of SSD IOPS provisioned for + the file system.' + type: number + mode: + description: '- Specifies whether the number of IOPS for the + file system is using the system. Valid values are AUTOMATIC + and USER_PROVISIONED. Default value is AUTOMATIC.' + type: string + type: object + kmsKeyId: + description: ARN for the KMS Key to encrypt the file system at + rest. Defaults to an AWS managed KMS Key. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + preferredSubnetId: + description: Specifies the subnet in which you want the preferred + file server to be located. Required for when deployment type + is MULTI_AZ_1. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: A list of IDs for the security groups that apply + to the specified network interfaces created for file system + access. These security groups will apply to all network interfaces. + items: + type: string + type: array + x-kubernetes-list-type: set + selfManagedActiveDirectory: + description: Configuration block that Amazon FSx uses to join + the Windows File Server instance to your self-managed (including + on-premises) Microsoft Active Directory (AD) directory. Cannot + be specified with active_directory_id. See Self-Managed Active + Directory below. + properties: + dnsIps: + description: A list of up to two IP addresses of DNS servers + or domain controllers in the self-managed AD directory. + The IP addresses need to be either in the same VPC CIDR + range as the file system or in the private IP version 4 + (IPv4) address ranges as specified in RFC 1918. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The fully qualified domain name of the self-managed + AD directory. For example, corp.example.com. + type: string + fileSystemAdministratorsGroup: + description: The name of the domain group whose members are + granted administrative privileges for the file system. Administrative + privileges include taking ownership of files and folders, + and setting audit controls (audit ACLs) on files and folders. + The group that you specify must already exist in your domain. + Defaults to Domain Admins. + type: string + organizationalUnitDistinguishedName: + description: The fully qualified distinguished name of the + organizational unit within your self-managed AD directory + that the Windows File Server instance will join. For example, + OU=FSx,DC=yourdomain,DC=corp,DC=com. Only accepts OU as + the direct parent of the file system. If none is provided, + the FSx file system is created in the default location of + your self-managed AD directory. To learn more, see RFC 2253. + type: string + passwordSecretRef: + description: The password for the service account on your + self-managed AD domain that Amazon FSx will use to join + to your AD domain. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: The user name for the service account on your + self-managed AD domain that Amazon FSx will use to join + to your AD domain. + type: string + type: object + skipFinalBackup: + description: When enabled, will skip the default final backup + taken when the file system is deleted. This configuration must + be applied separately before attempting to delete the resource + to have the desired behavior. Defaults to false. + type: boolean + storageCapacity: + description: Storage capacity (GiB) of the file system. Minimum + of 32 and maximum of 65536. If the storage type is set to HDD + the minimum value is 2000. Required when not creating filesystem + for a backup. + type: number + storageType: + description: Specifies the storage type, Valid values are SSD + and HDD. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows + file system deployment types. Default value is SSD. + type: string + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: A list of IDs for the subnets that the file system + will be accessible from. To specify more than a single subnet + set deployment_type to MULTI_AZ_1. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + throughputCapacity: + description: Throughput (megabytes per second) of the file system + in power of 2 increments. Minimum of 8 and maximum of 2048. + type: number + weeklyMaintenanceStartTime: + description: The preferred start time (in d:HH:MM format) to perform + weekly maintenance, in the UTC time zone. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + activeDirectoryId: + description: The ID for an existing Microsoft Active Directory + instance that the file system should join when it's created. + Cannot be specified with self_managed_active_directory. + type: string + activeDirectoryIdRef: + description: Reference to a Directory in ds to populate activeDirectoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + activeDirectoryIdSelector: + description: Selector for a Directory in ds to populate activeDirectoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + aliases: + description: An array DNS alias names that you want to associate + with the Amazon FSx file system. For more information, see + Working with DNS Aliases + items: + type: string + type: array + x-kubernetes-list-type: set + auditLogConfiguration: + description: The configuration that Amazon FSx for Windows File + Server uses to audit and log user accesses of files, folders, + and file shares on the Amazon FSx for Windows File Server file + system. See Audit Log Configuration below. + properties: + auditLogDestination: + description: The Amazon Resource Name (ARN) for the destination + of the audit logs. The destination can be any Amazon CloudWatch + Logs log group ARN or Amazon Kinesis Data Firehose delivery + stream ARN. Can be specified when file_access_audit_log_level + and file_share_access_audit_log_level are not set to DISABLED. + The name of the Amazon CloudWatch Logs log group must begin + with the /aws/fsx prefix. The name of the Amazon Kinesis + Data Firehouse delivery stream must begin with the aws-fsx + prefix. If you do not provide a destination in audit_log_destionation, + Amazon FSx will create and use a log stream in the CloudWatch + Logs /aws/fsx/windows log group. + type: string + fileAccessAuditLogLevel: + description: Sets which attempt type is logged by Amazon FSx + for file and folder accesses. Valid values are SUCCESS_ONLY, + FAILURE_ONLY, SUCCESS_AND_FAILURE, and DISABLED. Default + value is DISABLED. + type: string + fileShareAccessAuditLogLevel: + description: Sets which attempt type is logged by Amazon FSx + for file share accesses. Valid values are SUCCESS_ONLY, + FAILURE_ONLY, SUCCESS_AND_FAILURE, and DISABLED. Default + value is DISABLED. + type: string + type: object + automaticBackupRetentionDays: + description: The number of days to retain automatic backups. Minimum + of 0 and maximum of 90. Defaults to 7. Set to 0 to disable. + type: number + backupId: + description: The ID of the source backup to create the filesystem + from. + type: string + copyTagsToBackups: + description: A boolean flag indicating whether tags on the file + system should be copied to backups. Defaults to false. + type: boolean + dailyAutomaticBackupStartTime: + description: The preferred time (in HH:MM format) to take daily + automatic backups, in the UTC time zone. + type: string + deploymentType: + description: Specifies the file system deployment type, valid + values are MULTI_AZ_1, SINGLE_AZ_1 and SINGLE_AZ_2. Default + value is SINGLE_AZ_1. + type: string + diskIopsConfiguration: + description: The SSD IOPS configuration for the Amazon FSx for + Windows File Server file system. See Disk Iops Configuration + below. + properties: + iops: + description: '- The total number of SSD IOPS provisioned for + the file system.' + type: number + mode: + description: '- Specifies whether the number of IOPS for the + file system is using the system. Valid values are AUTOMATIC + and USER_PROVISIONED. Default value is AUTOMATIC.' + type: string + type: object + kmsKeyId: + description: ARN for the KMS Key to encrypt the file system at + rest. Defaults to an AWS managed KMS Key. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + preferredSubnetId: + description: Specifies the subnet in which you want the preferred + file server to be located. Required for when deployment type + is MULTI_AZ_1. + type: string + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: A list of IDs for the security groups that apply + to the specified network interfaces created for file system + access. These security groups will apply to all network interfaces. + items: + type: string + type: array + x-kubernetes-list-type: set + selfManagedActiveDirectory: + description: Configuration block that Amazon FSx uses to join + the Windows File Server instance to your self-managed (including + on-premises) Microsoft Active Directory (AD) directory. Cannot + be specified with active_directory_id. See Self-Managed Active + Directory below. + properties: + dnsIps: + description: A list of up to two IP addresses of DNS servers + or domain controllers in the self-managed AD directory. + The IP addresses need to be either in the same VPC CIDR + range as the file system or in the private IP version 4 + (IPv4) address ranges as specified in RFC 1918. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The fully qualified domain name of the self-managed + AD directory. For example, corp.example.com. + type: string + fileSystemAdministratorsGroup: + description: The name of the domain group whose members are + granted administrative privileges for the file system. Administrative + privileges include taking ownership of files and folders, + and setting audit controls (audit ACLs) on files and folders. + The group that you specify must already exist in your domain. + Defaults to Domain Admins. + type: string + organizationalUnitDistinguishedName: + description: The fully qualified distinguished name of the + organizational unit within your self-managed AD directory + that the Windows File Server instance will join. For example, + OU=FSx,DC=yourdomain,DC=corp,DC=com. Only accepts OU as + the direct parent of the file system. If none is provided, + the FSx file system is created in the default location of + your self-managed AD directory. To learn more, see RFC 2253. + type: string + passwordSecretRef: + description: The password for the service account on your + self-managed AD domain that Amazon FSx will use to join + to your AD domain. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: The user name for the service account on your + self-managed AD domain that Amazon FSx will use to join + to your AD domain. + type: string + required: + - passwordSecretRef + type: object + skipFinalBackup: + description: When enabled, will skip the default final backup + taken when the file system is deleted. This configuration must + be applied separately before attempting to delete the resource + to have the desired behavior. Defaults to false. + type: boolean + storageCapacity: + description: Storage capacity (GiB) of the file system. Minimum + of 32 and maximum of 65536. If the storage type is set to HDD + the minimum value is 2000. Required when not creating filesystem + for a backup. + type: number + storageType: + description: Specifies the storage type, Valid values are SSD + and HDD. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows + file system deployment types. Default value is SSD. + type: string + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: A list of IDs for the subnets that the file system + will be accessible from. To specify more than a single subnet + set deployment_type to MULTI_AZ_1. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + throughputCapacity: + description: Throughput (megabytes per second) of the file system + in power of 2 increments. Minimum of 8 and maximum of 2048. + type: number + weeklyMaintenanceStartTime: + description: The preferred start time (in d:HH:MM format) to perform + weekly maintenance, in the UTC time zone. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.throughputCapacity is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.throughputCapacity) + || (has(self.initProvider) && has(self.initProvider.throughputCapacity))' + status: + description: WindowsFileSystemStatus defines the observed state of WindowsFileSystem. + properties: + atProvider: + properties: + activeDirectoryId: + description: The ID for an existing Microsoft Active Directory + instance that the file system should join when it's created. + Cannot be specified with self_managed_active_directory. + type: string + aliases: + description: An array DNS alias names that you want to associate + with the Amazon FSx file system. For more information, see + Working with DNS Aliases + items: + type: string + type: array + x-kubernetes-list-type: set + arn: + description: Amazon Resource Name of the file system. + type: string + auditLogConfiguration: + description: The configuration that Amazon FSx for Windows File + Server uses to audit and log user accesses of files, folders, + and file shares on the Amazon FSx for Windows File Server file + system. See Audit Log Configuration below. + properties: + auditLogDestination: + description: The Amazon Resource Name (ARN) for the destination + of the audit logs. The destination can be any Amazon CloudWatch + Logs log group ARN or Amazon Kinesis Data Firehose delivery + stream ARN. Can be specified when file_access_audit_log_level + and file_share_access_audit_log_level are not set to DISABLED. + The name of the Amazon CloudWatch Logs log group must begin + with the /aws/fsx prefix. The name of the Amazon Kinesis + Data Firehouse delivery stream must begin with the aws-fsx + prefix. If you do not provide a destination in audit_log_destionation, + Amazon FSx will create and use a log stream in the CloudWatch + Logs /aws/fsx/windows log group. + type: string + fileAccessAuditLogLevel: + description: Sets which attempt type is logged by Amazon FSx + for file and folder accesses. Valid values are SUCCESS_ONLY, + FAILURE_ONLY, SUCCESS_AND_FAILURE, and DISABLED. Default + value is DISABLED. + type: string + fileShareAccessAuditLogLevel: + description: Sets which attempt type is logged by Amazon FSx + for file share accesses. Valid values are SUCCESS_ONLY, + FAILURE_ONLY, SUCCESS_AND_FAILURE, and DISABLED. Default + value is DISABLED. + type: string + type: object + automaticBackupRetentionDays: + description: The number of days to retain automatic backups. Minimum + of 0 and maximum of 90. Defaults to 7. Set to 0 to disable. + type: number + backupId: + description: The ID of the source backup to create the filesystem + from. + type: string + copyTagsToBackups: + description: A boolean flag indicating whether tags on the file + system should be copied to backups. Defaults to false. + type: boolean + dailyAutomaticBackupStartTime: + description: The preferred time (in HH:MM format) to take daily + automatic backups, in the UTC time zone. + type: string + deploymentType: + description: Specifies the file system deployment type, valid + values are MULTI_AZ_1, SINGLE_AZ_1 and SINGLE_AZ_2. Default + value is SINGLE_AZ_1. + type: string + diskIopsConfiguration: + description: The SSD IOPS configuration for the Amazon FSx for + Windows File Server file system. See Disk Iops Configuration + below. + properties: + iops: + description: '- The total number of SSD IOPS provisioned for + the file system.' + type: number + mode: + description: '- Specifies whether the number of IOPS for the + file system is using the system. Valid values are AUTOMATIC + and USER_PROVISIONED. Default value is AUTOMATIC.' + type: string + type: object + dnsName: + description: DNS name for the file system, e.g., fs-12345678.corp.example.com + (domain name matching the Active Directory domain name) + type: string + id: + description: Identifier of the file system (e.g. fs-12345678). + type: string + kmsKeyId: + description: ARN for the KMS Key to encrypt the file system at + rest. Defaults to an AWS managed KMS Key. + type: string + networkInterfaceIds: + description: Set of Elastic Network Interface identifiers from + which the file system is accessible. + items: + type: string + type: array + x-kubernetes-list-type: set + ownerId: + description: AWS account identifier that created the file system. + type: string + preferredFileServerIp: + description: The IP address of the primary, or preferred, file + server. + type: string + preferredSubnetId: + description: Specifies the subnet in which you want the preferred + file server to be located. Required for when deployment type + is MULTI_AZ_1. + type: string + remoteAdministrationEndpoint: + description: For MULTI_AZ_1 deployment types, use this endpoint + when performing administrative tasks on the file system using + Amazon FSx Remote PowerShell. For SINGLE_AZ_1 deployment types, + this is the DNS name of the file system. + type: string + securityGroupIds: + description: A list of IDs for the security groups that apply + to the specified network interfaces created for file system + access. These security groups will apply to all network interfaces. + items: + type: string + type: array + x-kubernetes-list-type: set + selfManagedActiveDirectory: + description: Configuration block that Amazon FSx uses to join + the Windows File Server instance to your self-managed (including + on-premises) Microsoft Active Directory (AD) directory. Cannot + be specified with active_directory_id. See Self-Managed Active + Directory below. + properties: + dnsIps: + description: A list of up to two IP addresses of DNS servers + or domain controllers in the self-managed AD directory. + The IP addresses need to be either in the same VPC CIDR + range as the file system or in the private IP version 4 + (IPv4) address ranges as specified in RFC 1918. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The fully qualified domain name of the self-managed + AD directory. For example, corp.example.com. + type: string + fileSystemAdministratorsGroup: + description: The name of the domain group whose members are + granted administrative privileges for the file system. Administrative + privileges include taking ownership of files and folders, + and setting audit controls (audit ACLs) on files and folders. + The group that you specify must already exist in your domain. + Defaults to Domain Admins. + type: string + organizationalUnitDistinguishedName: + description: The fully qualified distinguished name of the + organizational unit within your self-managed AD directory + that the Windows File Server instance will join. For example, + OU=FSx,DC=yourdomain,DC=corp,DC=com. Only accepts OU as + the direct parent of the file system. If none is provided, + the FSx file system is created in the default location of + your self-managed AD directory. To learn more, see RFC 2253. + type: string + username: + description: The user name for the service account on your + self-managed AD domain that Amazon FSx will use to join + to your AD domain. + type: string + type: object + skipFinalBackup: + description: When enabled, will skip the default final backup + taken when the file system is deleted. This configuration must + be applied separately before attempting to delete the resource + to have the desired behavior. Defaults to false. + type: boolean + storageCapacity: + description: Storage capacity (GiB) of the file system. Minimum + of 32 and maximum of 65536. If the storage type is set to HDD + the minimum value is 2000. Required when not creating filesystem + for a backup. + type: number + storageType: + description: Specifies the storage type, Valid values are SSD + and HDD. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows + file system deployment types. Default value is SSD. + type: string + subnetIds: + description: A list of IDs for the subnets that the file system + will be accessible from. To specify more than a single subnet + set deployment_type to MULTI_AZ_1. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + throughputCapacity: + description: Throughput (megabytes per second) of the file system + in power of 2 increments. Minimum of 8 and maximum of 2048. + type: number + vpcId: + description: Identifier of the Virtual Private Cloud for the file + system. + type: string + weeklyMaintenanceStartTime: + description: The preferred start time (in d:HH:MM format) to perform + weekly maintenance, in the UTC time zone. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/gamelift.aws.upbound.io_aliases.yaml b/package/crds/gamelift.aws.upbound.io_aliases.yaml index 73ce676978..6e73202ff2 100644 --- a/package/crds/gamelift.aws.upbound.io_aliases.yaml +++ b/package/crds/gamelift.aws.upbound.io_aliases.yaml @@ -441,3 +441,417 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Alias is the Schema for the Aliass API. Provides a GameLift Alias + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AliasSpec defines the desired state of Alias + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the alias. + type: string + name: + description: Name of the alias. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + routingStrategy: + description: Specifies the fleet and/or routing type to use for + the alias. + properties: + fleetId: + description: ID of the GameLift Fleet to point the alias to. + type: string + message: + description: Message text to be used with the TERMINAL routing + strategy. + type: string + type: + description: Type of routing strategyE.g., SIMPLE or TERMINAL + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the alias. + type: string + name: + description: Name of the alias. + type: string + routingStrategy: + description: Specifies the fleet and/or routing type to use for + the alias. + properties: + fleetId: + description: ID of the GameLift Fleet to point the alias to. + type: string + message: + description: Message text to be used with the TERMINAL routing + strategy. + type: string + type: + description: Type of routing strategyE.g., SIMPLE or TERMINAL + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.routingStrategy is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.routingStrategy) + || (has(self.initProvider) && has(self.initProvider.routingStrategy))' + status: + description: AliasStatus defines the observed state of Alias. + properties: + atProvider: + properties: + arn: + description: Alias ARN. + type: string + description: + description: Description of the alias. + type: string + id: + description: Alias ID. + type: string + name: + description: Name of the alias. + type: string + routingStrategy: + description: Specifies the fleet and/or routing type to use for + the alias. + properties: + fleetId: + description: ID of the GameLift Fleet to point the alias to. + type: string + message: + description: Message text to be used with the TERMINAL routing + strategy. + type: string + type: + description: Type of routing strategyE.g., SIMPLE or TERMINAL + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/gamelift.aws.upbound.io_builds.yaml b/package/crds/gamelift.aws.upbound.io_builds.yaml index 61adeace35..1f3d77ea8e 100644 --- a/package/crds/gamelift.aws.upbound.io_builds.yaml +++ b/package/crds/gamelift.aws.upbound.io_builds.yaml @@ -916,3 +916,892 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Build is the Schema for the Builds API. Provides a GameLift Build + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BuildSpec defines the desired state of Build + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + name: + description: Name of the build + type: string + operatingSystem: + description: 'Operating system that the game server binaries are + built to run on. Valid values: WINDOWS_2012, AMAZON_LINUX, AMAZON_LINUX_2, + WINDOWS_2016, AMAZON_LINUX_2023.' + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + storageLocation: + description: Information indicating where your game build files + are stored. See below. + properties: + bucket: + description: Name of your S3 bucket. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + key: + description: Name of the zip file containing your build files. + type: string + keyRef: + description: Reference to a Object in s3 to populate key. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keySelector: + description: Selector for a Object in s3 to populate key. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + objectVersion: + description: A specific version of the file. If not set, the + latest version of the file is retrieved. + type: string + roleArn: + description: ARN of the access role that allows Amazon GameLift + to access your S3 bucket. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + version: + description: Version that is associated with this build. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + name: + description: Name of the build + type: string + operatingSystem: + description: 'Operating system that the game server binaries are + built to run on. Valid values: WINDOWS_2012, AMAZON_LINUX, AMAZON_LINUX_2, + WINDOWS_2016, AMAZON_LINUX_2023.' + type: string + storageLocation: + description: Information indicating where your game build files + are stored. See below. + properties: + bucket: + description: Name of your S3 bucket. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + key: + description: Name of the zip file containing your build files. + type: string + keyRef: + description: Reference to a Object in s3 to populate key. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keySelector: + description: Selector for a Object in s3 to populate key. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + objectVersion: + description: A specific version of the file. If not set, the + latest version of the file is retrieved. + type: string + roleArn: + description: ARN of the access role that allows Amazon GameLift + to access your S3 bucket. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + version: + description: Version that is associated with this build. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.operatingSystem is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.operatingSystem) + || (has(self.initProvider) && has(self.initProvider.operatingSystem))' + - message: spec.forProvider.storageLocation is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageLocation) + || (has(self.initProvider) && has(self.initProvider.storageLocation))' + status: + description: BuildStatus defines the observed state of Build. + properties: + atProvider: + properties: + arn: + description: GameLift Build ARN. + type: string + id: + description: GameLift Build ID. + type: string + name: + description: Name of the build + type: string + operatingSystem: + description: 'Operating system that the game server binaries are + built to run on. Valid values: WINDOWS_2012, AMAZON_LINUX, AMAZON_LINUX_2, + WINDOWS_2016, AMAZON_LINUX_2023.' + type: string + storageLocation: + description: Information indicating where your game build files + are stored. See below. + properties: + bucket: + description: Name of your S3 bucket. + type: string + key: + description: Name of the zip file containing your build files. + type: string + objectVersion: + description: A specific version of the file. If not set, the + latest version of the file is retrieved. + type: string + roleArn: + description: ARN of the access role that allows Amazon GameLift + to access your S3 bucket. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + version: + description: Version that is associated with this build. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/gamelift.aws.upbound.io_fleet.yaml b/package/crds/gamelift.aws.upbound.io_fleet.yaml index 05717825c4..ec0fb7f8e5 100644 --- a/package/crds/gamelift.aws.upbound.io_fleet.yaml +++ b/package/crds/gamelift.aws.upbound.io_fleet.yaml @@ -1053,3 +1053,1020 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Fleet is the Schema for the Fleets API. Provides a GameLift Fleet + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FleetSpec defines the desired state of Fleet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + buildId: + description: ID of the GameLift Build to be deployed on the fleet. + type: string + buildIdRef: + description: Reference to a Build in gamelift to populate buildId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + buildIdSelector: + description: Selector for a Build in gamelift to populate buildId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + certificateConfiguration: + description: Prompts GameLift to generate a TLS/SSL certificate + for the fleet. See certificate_configuration. + properties: + certificateType: + description: Indicates whether a TLS/SSL certificate is generated + for a fleet. Valid values are DISABLED and GENERATED. Default + value is DISABLED. + type: string + type: object + description: + description: Human-readable description of the fleet. + type: string + ec2InboundPermission: + description: Range of IP addresses and port settings that permit + inbound traffic to access server processes running on the fleet. + See below. + items: + properties: + fromPort: + description: Starting value for a range of allowed port + numbers. + type: number + ipRange: + description: Range of allowed IP addresses expressed in + CIDR notationE.g., 000.000.000.000/[subnet mask] or 0.0.0.0/[subnet + mask]. + type: string + protocol: + description: Network communication protocol used by the + fleetE.g., TCP or UDP + type: string + toPort: + description: Ending value for a range of allowed port numbers. + Port numbers are end-inclusive. This value must be higher + than from_port. + type: number + type: object + type: array + ec2InstanceType: + description: Name of an EC2 instance typeE.g., t2.micro + type: string + fleetType: + description: Type of fleet. This value must be ON_DEMAND or SPOT. + Defaults to ON_DEMAND. + type: string + instanceRoleArn: + description: ARN of an IAM role that instances in the fleet can + assume. + type: string + instanceRoleArnRef: + description: Reference to a Role in iam to populate instanceRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceRoleArnSelector: + description: Selector for a Role in iam to populate instanceRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + metricGroups: + description: List of names of metric groups to add this fleet + to. A metric group tracks metrics across all fleets in the group. + Defaults to default. + items: + type: string + type: array + name: + description: The name of the fleet. + type: string + newGameSessionProtectionPolicy: + description: Game session protection policy to apply to all instances + in this fleetE.g., FullProtection. Defaults to NoProtection. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + resourceCreationLimitPolicy: + description: Policy that limits the number of game sessions an + individual player can create over a span of time for this fleet. + See below. + properties: + newGameSessionsPerCreator: + description: Maximum number of game sessions that an individual + can create during the policy period. + type: number + policyPeriodInMinutes: + description: Time span used in evaluating the resource creation + limit policy. + type: number + type: object + runtimeConfiguration: + description: Instructions for launching server processes on each + instance in the fleet. See below. + properties: + gameSessionActivationTimeoutSeconds: + description: Maximum amount of time (in seconds) that a game + session can remain in status ACTIVATING. + type: number + maxConcurrentGameSessionActivations: + description: Maximum number of game sessions with status ACTIVATING + to allow on an instance simultaneously. + type: number + serverProcess: + description: Collection of server process configurations that + describe which server processes to run on each instance + in a fleet. See below. + items: + properties: + concurrentExecutions: + description: Number of server processes using this configuration + to run concurrently on an instance. + type: number + launchPath: + description: 'Location of the server executable in a + game build. All game builds are installed on instances + at the root : for Windows instances C:\game, and for + Linux instances /local/game.' + type: string + parameters: + description: Optional list of parameters to pass to + the server executable on launch. + type: string + type: object + type: array + type: object + scriptId: + description: ID of the GameLift Script to be deployed on the fleet. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + buildId: + description: ID of the GameLift Build to be deployed on the fleet. + type: string + buildIdRef: + description: Reference to a Build in gamelift to populate buildId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + buildIdSelector: + description: Selector for a Build in gamelift to populate buildId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + certificateConfiguration: + description: Prompts GameLift to generate a TLS/SSL certificate + for the fleet. See certificate_configuration. + properties: + certificateType: + description: Indicates whether a TLS/SSL certificate is generated + for a fleet. Valid values are DISABLED and GENERATED. Default + value is DISABLED. + type: string + type: object + description: + description: Human-readable description of the fleet. + type: string + ec2InboundPermission: + description: Range of IP addresses and port settings that permit + inbound traffic to access server processes running on the fleet. + See below. + items: + properties: + fromPort: + description: Starting value for a range of allowed port + numbers. + type: number + ipRange: + description: Range of allowed IP addresses expressed in + CIDR notationE.g., 000.000.000.000/[subnet mask] or 0.0.0.0/[subnet + mask]. + type: string + protocol: + description: Network communication protocol used by the + fleetE.g., TCP or UDP + type: string + toPort: + description: Ending value for a range of allowed port numbers. + Port numbers are end-inclusive. This value must be higher + than from_port. + type: number + type: object + type: array + ec2InstanceType: + description: Name of an EC2 instance typeE.g., t2.micro + type: string + fleetType: + description: Type of fleet. This value must be ON_DEMAND or SPOT. + Defaults to ON_DEMAND. + type: string + instanceRoleArn: + description: ARN of an IAM role that instances in the fleet can + assume. + type: string + instanceRoleArnRef: + description: Reference to a Role in iam to populate instanceRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceRoleArnSelector: + description: Selector for a Role in iam to populate instanceRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + metricGroups: + description: List of names of metric groups to add this fleet + to. A metric group tracks metrics across all fleets in the group. + Defaults to default. + items: + type: string + type: array + name: + description: The name of the fleet. + type: string + newGameSessionProtectionPolicy: + description: Game session protection policy to apply to all instances + in this fleetE.g., FullProtection. Defaults to NoProtection. + type: string + resourceCreationLimitPolicy: + description: Policy that limits the number of game sessions an + individual player can create over a span of time for this fleet. + See below. + properties: + newGameSessionsPerCreator: + description: Maximum number of game sessions that an individual + can create during the policy period. + type: number + policyPeriodInMinutes: + description: Time span used in evaluating the resource creation + limit policy. + type: number + type: object + runtimeConfiguration: + description: Instructions for launching server processes on each + instance in the fleet. See below. + properties: + gameSessionActivationTimeoutSeconds: + description: Maximum amount of time (in seconds) that a game + session can remain in status ACTIVATING. + type: number + maxConcurrentGameSessionActivations: + description: Maximum number of game sessions with status ACTIVATING + to allow on an instance simultaneously. + type: number + serverProcess: + description: Collection of server process configurations that + describe which server processes to run on each instance + in a fleet. See below. + items: + properties: + concurrentExecutions: + description: Number of server processes using this configuration + to run concurrently on an instance. + type: number + launchPath: + description: 'Location of the server executable in a + game build. All game builds are installed on instances + at the root : for Windows instances C:\game, and for + Linux instances /local/game.' + type: string + parameters: + description: Optional list of parameters to pass to + the server executable on launch. + type: string + type: object + type: array + type: object + scriptId: + description: ID of the GameLift Script to be deployed on the fleet. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.ec2InstanceType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.ec2InstanceType) + || (has(self.initProvider) && has(self.initProvider.ec2InstanceType))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: FleetStatus defines the observed state of Fleet. + properties: + atProvider: + properties: + arn: + description: Fleet ARN. + type: string + buildArn: + description: Build ARN. + type: string + buildId: + description: ID of the GameLift Build to be deployed on the fleet. + type: string + certificateConfiguration: + description: Prompts GameLift to generate a TLS/SSL certificate + for the fleet. See certificate_configuration. + properties: + certificateType: + description: Indicates whether a TLS/SSL certificate is generated + for a fleet. Valid values are DISABLED and GENERATED. Default + value is DISABLED. + type: string + type: object + description: + description: Human-readable description of the fleet. + type: string + ec2InboundPermission: + description: Range of IP addresses and port settings that permit + inbound traffic to access server processes running on the fleet. + See below. + items: + properties: + fromPort: + description: Starting value for a range of allowed port + numbers. + type: number + ipRange: + description: Range of allowed IP addresses expressed in + CIDR notationE.g., 000.000.000.000/[subnet mask] or 0.0.0.0/[subnet + mask]. + type: string + protocol: + description: Network communication protocol used by the + fleetE.g., TCP or UDP + type: string + toPort: + description: Ending value for a range of allowed port numbers. + Port numbers are end-inclusive. This value must be higher + than from_port. + type: number + type: object + type: array + ec2InstanceType: + description: Name of an EC2 instance typeE.g., t2.micro + type: string + fleetType: + description: Type of fleet. This value must be ON_DEMAND or SPOT. + Defaults to ON_DEMAND. + type: string + id: + description: Fleet ID. + type: string + instanceRoleArn: + description: ARN of an IAM role that instances in the fleet can + assume. + type: string + logPaths: + items: + type: string + type: array + metricGroups: + description: List of names of metric groups to add this fleet + to. A metric group tracks metrics across all fleets in the group. + Defaults to default. + items: + type: string + type: array + name: + description: The name of the fleet. + type: string + newGameSessionProtectionPolicy: + description: Game session protection policy to apply to all instances + in this fleetE.g., FullProtection. Defaults to NoProtection. + type: string + operatingSystem: + description: Operating system of the fleet's computing resources. + type: string + resourceCreationLimitPolicy: + description: Policy that limits the number of game sessions an + individual player can create over a span of time for this fleet. + See below. + properties: + newGameSessionsPerCreator: + description: Maximum number of game sessions that an individual + can create during the policy period. + type: number + policyPeriodInMinutes: + description: Time span used in evaluating the resource creation + limit policy. + type: number + type: object + runtimeConfiguration: + description: Instructions for launching server processes on each + instance in the fleet. See below. + properties: + gameSessionActivationTimeoutSeconds: + description: Maximum amount of time (in seconds) that a game + session can remain in status ACTIVATING. + type: number + maxConcurrentGameSessionActivations: + description: Maximum number of game sessions with status ACTIVATING + to allow on an instance simultaneously. + type: number + serverProcess: + description: Collection of server process configurations that + describe which server processes to run on each instance + in a fleet. See below. + items: + properties: + concurrentExecutions: + description: Number of server processes using this configuration + to run concurrently on an instance. + type: number + launchPath: + description: 'Location of the server executable in a + game build. All game builds are installed on instances + at the root : for Windows instances C:\game, and for + Linux instances /local/game.' + type: string + parameters: + description: Optional list of parameters to pass to + the server executable on launch. + type: string + type: object + type: array + type: object + scriptArn: + description: Script ARN. + type: string + scriptId: + description: ID of the GameLift Script to be deployed on the fleet. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/gamelift.aws.upbound.io_scripts.yaml b/package/crds/gamelift.aws.upbound.io_scripts.yaml index 396c1d6151..a9f2a5f344 100644 --- a/package/crds/gamelift.aws.upbound.io_scripts.yaml +++ b/package/crds/gamelift.aws.upbound.io_scripts.yaml @@ -908,3 +908,884 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Script is the Schema for the Scripts API. Provides a GameLift + Script resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ScriptSpec defines the desired state of Script + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + name: + description: Name of the script + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + storageLocation: + description: Information indicating where your game script files + are stored. See below. + properties: + bucket: + description: Name of your S3 bucket. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + key: + description: Name of the zip file containing your script files. + type: string + keyRef: + description: Reference to a Object in s3 to populate key. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keySelector: + description: Selector for a Object in s3 to populate key. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + objectVersion: + description: A specific version of the file. If not set, the + latest version of the file is retrieved. + type: string + roleArn: + description: ARN of the access role that allows Amazon GameLift + to access your S3 bucket. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + version: + description: Version that is associated with this script. + type: string + zipFile: + description: A data object containing your Realtime scripts and + dependencies as a zip file. The zip file can have one or multiple + files. Maximum size of a zip file is 5 MB. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + name: + description: Name of the script + type: string + storageLocation: + description: Information indicating where your game script files + are stored. See below. + properties: + bucket: + description: Name of your S3 bucket. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + key: + description: Name of the zip file containing your script files. + type: string + keyRef: + description: Reference to a Object in s3 to populate key. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keySelector: + description: Selector for a Object in s3 to populate key. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + objectVersion: + description: A specific version of the file. If not set, the + latest version of the file is retrieved. + type: string + roleArn: + description: ARN of the access role that allows Amazon GameLift + to access your S3 bucket. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + version: + description: Version that is associated with this script. + type: string + zipFile: + description: A data object containing your Realtime scripts and + dependencies as a zip file. The zip file can have one or multiple + files. Maximum size of a zip file is 5 MB. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ScriptStatus defines the observed state of Script. + properties: + atProvider: + properties: + arn: + description: GameLift Script ARN. + type: string + id: + description: GameLift Script ID. + type: string + name: + description: Name of the script + type: string + storageLocation: + description: Information indicating where your game script files + are stored. See below. + properties: + bucket: + description: Name of your S3 bucket. + type: string + key: + description: Name of the zip file containing your script files. + type: string + objectVersion: + description: A specific version of the file. If not set, the + latest version of the file is retrieved. + type: string + roleArn: + description: ARN of the access role that allows Amazon GameLift + to access your S3 bucket. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + version: + description: Version that is associated with this script. + type: string + zipFile: + description: A data object containing your Realtime scripts and + dependencies as a zip file. The zip file can have one or multiple + files. Maximum size of a zip file is 5 MB. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/glacier.aws.upbound.io_vaults.yaml b/package/crds/glacier.aws.upbound.io_vaults.yaml index 888f6f5a50..e449e8c740 100644 --- a/package/crds/glacier.aws.upbound.io_vaults.yaml +++ b/package/crds/glacier.aws.upbound.io_vaults.yaml @@ -578,3 +578,557 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Vault is the Schema for the Vaults API. Provides a Glacier Vault. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VaultSpec defines the desired state of Vault + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessPolicy: + description: |- + The policy document. This is a JSON formatted string. + The heredoc syntax or file function is helpful here. Use the Glacier Developer Guide for more information on Glacier Vault Policy + type: string + notification: + description: The notifications for the Vault. Fields documented + below. + properties: + events: + description: You can configure a vault to publish a notification + for ArchiveRetrievalCompleted and InventoryRetrievalCompleted + events. + items: + type: string + type: array + x-kubernetes-list-type: set + snsTopic: + description: The SNS Topic ARN. + type: string + snsTopicRef: + description: Reference to a Topic in sns to populate snsTopic. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + snsTopicSelector: + description: Selector for a Topic in sns to populate snsTopic. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessPolicy: + description: |- + The policy document. This is a JSON formatted string. + The heredoc syntax or file function is helpful here. Use the Glacier Developer Guide for more information on Glacier Vault Policy + type: string + notification: + description: The notifications for the Vault. Fields documented + below. + properties: + events: + description: You can configure a vault to publish a notification + for ArchiveRetrievalCompleted and InventoryRetrievalCompleted + events. + items: + type: string + type: array + x-kubernetes-list-type: set + snsTopic: + description: The SNS Topic ARN. + type: string + snsTopicRef: + description: Reference to a Topic in sns to populate snsTopic. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + snsTopicSelector: + description: Selector for a Topic in sns to populate snsTopic. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: VaultStatus defines the observed state of Vault. + properties: + atProvider: + properties: + accessPolicy: + description: |- + The policy document. This is a JSON formatted string. + The heredoc syntax or file function is helpful here. Use the Glacier Developer Guide for more information on Glacier Vault Policy + type: string + arn: + description: The ARN of the vault. + type: string + id: + type: string + location: + description: The URI of the vault that was created. + type: string + notification: + description: The notifications for the Vault. Fields documented + below. + properties: + events: + description: You can configure a vault to publish a notification + for ArchiveRetrievalCompleted and InventoryRetrievalCompleted + events. + items: + type: string + type: array + x-kubernetes-list-type: set + snsTopic: + description: The SNS Topic ARN. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/globalaccelerator.aws.upbound.io_accelerators.yaml b/package/crds/globalaccelerator.aws.upbound.io_accelerators.yaml index 96910b5ad8..2330e9102a 100644 --- a/package/crds/globalaccelerator.aws.upbound.io_accelerators.yaml +++ b/package/crds/globalaccelerator.aws.upbound.io_accelerators.yaml @@ -508,3 +508,487 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Accelerator is the Schema for the Accelerators API. Provides + a Global Accelerator accelerator. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AcceleratorSpec defines the desired state of Accelerator + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + attributes: + description: The attributes of the accelerator. Fields documented + below. + properties: + flowLogsEnabled: + description: 'Indicates whether flow logs are enabled. Defaults + to false. Valid values: true, false.' + type: boolean + flowLogsS3Bucket: + description: The name of the Amazon S3 bucket for the flow + logs. Required if flow_logs_enabled is true. + type: string + flowLogsS3Prefix: + description: The prefix for the location in the Amazon S3 + bucket for the flow logs. Required if flow_logs_enabled + is true. + type: string + type: object + enabled: + description: 'Indicates whether the accelerator is enabled. Defaults + to true. Valid values: true, false.' + type: boolean + ipAddressType: + description: 'The value for the address type. Defaults to IPV4. + Valid values: IPV4, DUAL_STACK.' + type: string + ipAddresses: + description: 'The IP addresses to use for BYOIP accelerators. + If not specified, the service assigns IP addresses. Valid values: + 1 or 2 IPv4 addresses.' + items: + type: string + type: array + name: + description: The name of the accelerator. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + attributes: + description: The attributes of the accelerator. Fields documented + below. + properties: + flowLogsEnabled: + description: 'Indicates whether flow logs are enabled. Defaults + to false. Valid values: true, false.' + type: boolean + flowLogsS3Bucket: + description: The name of the Amazon S3 bucket for the flow + logs. Required if flow_logs_enabled is true. + type: string + flowLogsS3Prefix: + description: The prefix for the location in the Amazon S3 + bucket for the flow logs. Required if flow_logs_enabled + is true. + type: string + type: object + enabled: + description: 'Indicates whether the accelerator is enabled. Defaults + to true. Valid values: true, false.' + type: boolean + ipAddressType: + description: 'The value for the address type. Defaults to IPV4. + Valid values: IPV4, DUAL_STACK.' + type: string + ipAddresses: + description: 'The IP addresses to use for BYOIP accelerators. + If not specified, the service assigns IP addresses. Valid values: + 1 or 2 IPv4 addresses.' + items: + type: string + type: array + name: + description: The name of the accelerator. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: AcceleratorStatus defines the observed state of Accelerator. + properties: + atProvider: + properties: + attributes: + description: The attributes of the accelerator. Fields documented + below. + properties: + flowLogsEnabled: + description: 'Indicates whether flow logs are enabled. Defaults + to false. Valid values: true, false.' + type: boolean + flowLogsS3Bucket: + description: The name of the Amazon S3 bucket for the flow + logs. Required if flow_logs_enabled is true. + type: string + flowLogsS3Prefix: + description: The prefix for the location in the Amazon S3 + bucket for the flow logs. Required if flow_logs_enabled + is true. + type: string + type: object + dnsName: + description: The DNS name of the accelerator. For example, a5d53ff5ee6bca4ce.awsglobalaccelerator.com. + type: string + dualStackDnsName: + description: 'The Domain Name System (DNS) name that Global Accelerator + creates that points to a dual-stack accelerator''s four static + IP addresses: two IPv4 addresses and two IPv6 addresses. For + example, a1234567890abcdef.dualstack.awsglobalaccelerator.com.' + type: string + enabled: + description: 'Indicates whether the accelerator is enabled. Defaults + to true. Valid values: true, false.' + type: boolean + hostedZoneId: + description: |- + - The Global Accelerator Route 53 zone ID that can be used to + route an Alias Resource Record Set to the Global Accelerator. This attribute + is simply an alias for the zone ID Z2BJ6XQ5FK7U4H. + type: string + id: + description: The Amazon Resource Name (ARN) of the accelerator. + type: string + ipAddressType: + description: 'The value for the address type. Defaults to IPV4. + Valid values: IPV4, DUAL_STACK.' + type: string + ipAddresses: + description: 'The IP addresses to use for BYOIP accelerators. + If not specified, the service assigns IP addresses. Valid values: + 1 or 2 IPv4 addresses.' + items: + type: string + type: array + ipSets: + description: IP address set associated with the accelerator. + items: + properties: + ipAddresses: + description: 'The IP addresses to use for BYOIP accelerators. + If not specified, the service assigns IP addresses. Valid + values: 1 or 2 IPv4 addresses.' + items: + type: string + type: array + ipFamily: + description: The type of IP addresses included in this IP + set. + type: string + type: object + type: array + name: + description: The name of the accelerator. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/glue.aws.upbound.io_catalogdatabases.yaml b/package/crds/glue.aws.upbound.io_catalogdatabases.yaml index 8871b5978b..dd300a0e0a 100644 --- a/package/crds/glue.aws.upbound.io_catalogdatabases.yaml +++ b/package/crds/glue.aws.upbound.io_catalogdatabases.yaml @@ -566,3 +566,530 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CatalogDatabase is the Schema for the CatalogDatabases API. Provides + a Glue Catalog Database. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CatalogDatabaseSpec defines the desired state of CatalogDatabase + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + catalogId: + description: ID of the Glue Catalog to create the database in. + If omitted, this defaults to the AWS Account ID. + type: string + createTableDefaultPermission: + description: Creates a set of default permissions on the table + for principals. See create_table_default_permission below. + items: + properties: + permissions: + description: The permissions that are granted to the principal. + items: + type: string + type: array + x-kubernetes-list-type: set + principal: + description: The principal who is granted permissions.. + See principal below. + properties: + dataLakePrincipalIdentifier: + description: An identifier for the Lake Formation principal. + type: string + type: object + type: object + type: array + description: + description: Description of the database. + type: string + federatedDatabase: + description: Configuration block that references an entity outside + the AWS Glue Data Catalog. See federated_database below. + properties: + connectionName: + description: Name of the connection to the external metastore. + type: string + identifier: + description: Unique identifier for the federated database. + type: string + type: object + locationUri: + description: Location of the database (for example, an HDFS path). + type: string + parameters: + additionalProperties: + type: string + description: List of key-value pairs that define parameters and + properties of the database. + type: object + x-kubernetes-map-type: granular + region: + description: |- + Region of the target database. + Region is the region you'd like your resource to be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + targetDatabase: + description: Configuration block for a target database for resource + linking. See target_database below. + properties: + catalogId: + description: ID of the Data Catalog in which the database + resides. + type: string + databaseName: + description: Name of the catalog database. + type: string + region: + description: Region of the target database. + type: string + required: + - catalogId + type: object + required: + - catalogId + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + createTableDefaultPermission: + description: Creates a set of default permissions on the table + for principals. See create_table_default_permission below. + items: + properties: + permissions: + description: The permissions that are granted to the principal. + items: + type: string + type: array + x-kubernetes-list-type: set + principal: + description: The principal who is granted permissions.. + See principal below. + properties: + dataLakePrincipalIdentifier: + description: An identifier for the Lake Formation principal. + type: string + type: object + type: object + type: array + description: + description: Description of the database. + type: string + federatedDatabase: + description: Configuration block that references an entity outside + the AWS Glue Data Catalog. See federated_database below. + properties: + connectionName: + description: Name of the connection to the external metastore. + type: string + identifier: + description: Unique identifier for the federated database. + type: string + type: object + locationUri: + description: Location of the database (for example, an HDFS path). + type: string + parameters: + additionalProperties: + type: string + description: List of key-value pairs that define parameters and + properties of the database. + type: object + x-kubernetes-map-type: granular + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + targetDatabase: + description: Configuration block for a target database for resource + linking. See target_database below. + properties: + databaseName: + description: Name of the catalog database. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: CatalogDatabaseStatus defines the observed state of CatalogDatabase. + properties: + atProvider: + properties: + arn: + description: ARN of the Glue Catalog Database. + type: string + catalogId: + description: ID of the Glue Catalog to create the database in. + If omitted, this defaults to the AWS Account ID. + type: string + createTableDefaultPermission: + description: Creates a set of default permissions on the table + for principals. See create_table_default_permission below. + items: + properties: + permissions: + description: The permissions that are granted to the principal. + items: + type: string + type: array + x-kubernetes-list-type: set + principal: + description: The principal who is granted permissions.. + See principal below. + properties: + dataLakePrincipalIdentifier: + description: An identifier for the Lake Formation principal. + type: string + type: object + type: object + type: array + description: + description: Description of the database. + type: string + federatedDatabase: + description: Configuration block that references an entity outside + the AWS Glue Data Catalog. See federated_database below. + properties: + connectionName: + description: Name of the connection to the external metastore. + type: string + identifier: + description: Unique identifier for the federated database. + type: string + type: object + id: + description: Catalog ID and name of the database. + type: string + locationUri: + description: Location of the database (for example, an HDFS path). + type: string + parameters: + additionalProperties: + type: string + description: List of key-value pairs that define parameters and + properties of the database. + type: object + x-kubernetes-map-type: granular + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + targetDatabase: + description: Configuration block for a target database for resource + linking. See target_database below. + properties: + catalogId: + description: ID of the Data Catalog in which the database + resides. + type: string + databaseName: + description: Name of the catalog database. + type: string + region: + description: Region of the target database. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/glue.aws.upbound.io_catalogtables.yaml b/package/crds/glue.aws.upbound.io_catalogtables.yaml index 9c88ce9e0a..fa6ac653f3 100644 --- a/package/crds/glue.aws.upbound.io_catalogtables.yaml +++ b/package/crds/glue.aws.upbound.io_catalogtables.yaml @@ -1235,3 +1235,1169 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CatalogTable is the Schema for the CatalogTables API. Provides + a Glue Catalog Table. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CatalogTableSpec defines the desired state of CatalogTable + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + catalogId: + description: ID of the Glue Catalog and database to create the + table in. If omitted, this defaults to the AWS Account ID plus + the database name. + type: string + databaseName: + description: Name of the metadata database where the table metadata + resides. For Hive compatibility, this must be all lowercase. + type: string + databaseNameRef: + description: Reference to a CatalogDatabase in glue to populate + databaseName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseNameSelector: + description: Selector for a CatalogDatabase in glue to populate + databaseName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: Description of the table. + type: string + openTableFormatInput: + description: Configuration block for open table formats. See open_table_format_input + below. + properties: + icebergInput: + description: Configuration block for iceberg table config. + See iceberg_input below. + properties: + metadataOperation: + description: A required metadata operation. Can only be + set to CREATE. + type: string + version: + description: The table version for the Iceberg table. + Defaults to 2. + type: string + type: object + type: object + owner: + description: Owner of the table. + type: string + parameters: + additionalProperties: + type: string + description: Properties associated with this table, as a list + of key-value pairs. + type: object + x-kubernetes-map-type: granular + partitionIndex: + description: Configuration block for a maximum of 3 partition + indexes. See partition_index below. + items: + properties: + indexName: + description: Name of the partition index. + type: string + keys: + description: Keys for the partition index. + items: + type: string + type: array + type: object + type: array + partitionKeys: + description: Configuration block of columns by which the table + is partitioned. Only primitive types are supported as partition + keys. See partition_keys below. + items: + properties: + comment: + description: Free-form text comment. + type: string + name: + description: Name of the Partition Key. + type: string + type: + description: Datatype of data in the Partition Key. + type: string + type: object + type: array + region: + description: |- + Region of the target table. + Region is the region you'd like your resource to be created in. + type: string + retention: + description: Retention time for this table. + type: number + storageDescriptor: + description: Configuration block for information about the physical + storage of this table. For more information, refer to the Glue + Developer Guide. See storage_descriptor below. + properties: + bucketColumns: + description: List of reducer grouping columns, clustering + columns, and bucketing columns in the table. + items: + type: string + type: array + columns: + description: Configuration block for columns in the table. + See columns below. + items: + properties: + comment: + description: Free-form text comment. + type: string + name: + description: Name of the Column. + type: string + parameters: + additionalProperties: + type: string + description: Key-value pairs defining properties associated + with the column. + type: object + x-kubernetes-map-type: granular + type: + description: Datatype of data in the Column. + type: string + type: object + type: array + compressed: + description: Whether the data in the table is compressed. + type: boolean + inputFormat: + description: 'Input format: SequenceFileInputFormat (binary), + or TextInputFormat, or a custom format.' + type: string + location: + description: Physical location of the table. By default this + takes the form of the warehouse location, followed by the + database location in the warehouse, followed by the table + name. + type: string + numberOfBuckets: + description: Must be specified if the table contains any dimension + columns. + type: number + outputFormat: + description: 'Output format: SequenceFileOutputFormat (binary), + or IgnoreKeyTextOutputFormat, or a custom format.' + type: string + parameters: + additionalProperties: + type: string + description: User-supplied properties in key-value form. + type: object + x-kubernetes-map-type: granular + schemaReference: + description: Object that references a schema stored in the + AWS Glue Schema Registry. When creating a table, you can + pass an empty list of columns for the schema, and instead + use a schema reference. See Schema Reference below. + properties: + schemaId: + description: Configuration block that contains schema + identity fields. Either this or the schema_version_id + has to be provided. See schema_id below. + properties: + registryName: + description: Name of the schema registry that contains + the schema. Must be provided when schema_name is + specified and conflicts with schema_arn. + type: string + schemaArn: + description: ARN of the schema. One of schema_arn + or schema_name has to be provided. + type: string + schemaName: + description: Name of the schema. One of schema_arn + or schema_name has to be provided. + type: string + type: object + schemaVersionId: + description: Unique ID assigned to a version of the schema. + Either this or the schema_id has to be provided. + type: string + schemaVersionNumber: + description: Version number of the schema. + type: number + type: object + serDeInfo: + description: Configuration block for serialization and deserialization + ("SerDe") information. See ser_de_info below. + properties: + name: + description: Name of the SerDe. + type: string + parameters: + additionalProperties: + type: string + description: Map of initialization parameters for the + SerDe, in key-value form. + type: object + x-kubernetes-map-type: granular + serializationLibrary: + description: Usually the class that implements the SerDe. + An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe. + type: string + type: object + skewedInfo: + description: Configuration block with information about values + that appear very frequently in a column (skewed values). + See skewed_info below. + properties: + skewedColumnNames: + description: List of names of columns that contain skewed + values. + items: + type: string + type: array + skewedColumnValueLocationMaps: + additionalProperties: + type: string + description: List of values that appear so frequently + as to be considered skewed. + type: object + x-kubernetes-map-type: granular + skewedColumnValues: + description: Map of skewed values to the columns that + contain them. + items: + type: string + type: array + type: object + sortColumns: + description: Configuration block for the sort order of each + bucket in the table. See sort_columns below. + items: + properties: + column: + description: Name of the column. + type: string + sortOrder: + description: Whether the column is sorted in ascending + (1) or descending order (0). + type: number + type: object + type: array + storedAsSubDirectories: + description: Whether the table data is stored in subdirectories. + type: boolean + type: object + tableType: + description: Type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, + etc.). While optional, some Athena DDL queries such as ALTER + TABLE and SHOW CREATE TABLE will fail if this argument is empty. + type: string + targetTable: + description: Configuration block of a target table for resource + linking. See target_table below. + properties: + catalogId: + description: ID of the Data Catalog in which the table resides. + type: string + databaseName: + description: Name of the catalog database that contains the + target table. + type: string + name: + description: Name of the target table. + type: string + region: + description: Region of the target table. + type: string + required: + - catalogId + - databaseName + type: object + viewExpandedText: + description: If the table is a view, the expanded text of the + view; otherwise null. + type: string + viewOriginalText: + description: If the table is a view, the original text of the + view; otherwise null. + type: string + required: + - catalogId + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the table. + type: string + openTableFormatInput: + description: Configuration block for open table formats. See open_table_format_input + below. + properties: + icebergInput: + description: Configuration block for iceberg table config. + See iceberg_input below. + properties: + metadataOperation: + description: A required metadata operation. Can only be + set to CREATE. + type: string + version: + description: The table version for the Iceberg table. + Defaults to 2. + type: string + type: object + type: object + owner: + description: Owner of the table. + type: string + parameters: + additionalProperties: + type: string + description: Properties associated with this table, as a list + of key-value pairs. + type: object + x-kubernetes-map-type: granular + partitionIndex: + description: Configuration block for a maximum of 3 partition + indexes. See partition_index below. + items: + properties: + indexName: + description: Name of the partition index. + type: string + keys: + description: Keys for the partition index. + items: + type: string + type: array + type: object + type: array + partitionKeys: + description: Configuration block of columns by which the table + is partitioned. Only primitive types are supported as partition + keys. See partition_keys below. + items: + properties: + comment: + description: Free-form text comment. + type: string + name: + description: Name of the Partition Key. + type: string + type: + description: Datatype of data in the Partition Key. + type: string + type: object + type: array + retention: + description: Retention time for this table. + type: number + storageDescriptor: + description: Configuration block for information about the physical + storage of this table. For more information, refer to the Glue + Developer Guide. See storage_descriptor below. + properties: + bucketColumns: + description: List of reducer grouping columns, clustering + columns, and bucketing columns in the table. + items: + type: string + type: array + columns: + description: Configuration block for columns in the table. + See columns below. + items: + properties: + comment: + description: Free-form text comment. + type: string + name: + description: Name of the Column. + type: string + parameters: + additionalProperties: + type: string + description: Key-value pairs defining properties associated + with the column. + type: object + x-kubernetes-map-type: granular + type: + description: Datatype of data in the Column. + type: string + type: object + type: array + compressed: + description: Whether the data in the table is compressed. + type: boolean + inputFormat: + description: 'Input format: SequenceFileInputFormat (binary), + or TextInputFormat, or a custom format.' + type: string + location: + description: Physical location of the table. By default this + takes the form of the warehouse location, followed by the + database location in the warehouse, followed by the table + name. + type: string + numberOfBuckets: + description: Must be specified if the table contains any dimension + columns. + type: number + outputFormat: + description: 'Output format: SequenceFileOutputFormat (binary), + or IgnoreKeyTextOutputFormat, or a custom format.' + type: string + parameters: + additionalProperties: + type: string + description: User-supplied properties in key-value form. + type: object + x-kubernetes-map-type: granular + schemaReference: + description: Object that references a schema stored in the + AWS Glue Schema Registry. When creating a table, you can + pass an empty list of columns for the schema, and instead + use a schema reference. See Schema Reference below. + properties: + schemaId: + description: Configuration block that contains schema + identity fields. Either this or the schema_version_id + has to be provided. See schema_id below. + properties: + registryName: + description: Name of the schema registry that contains + the schema. Must be provided when schema_name is + specified and conflicts with schema_arn. + type: string + schemaArn: + description: ARN of the schema. One of schema_arn + or schema_name has to be provided. + type: string + schemaName: + description: Name of the schema. One of schema_arn + or schema_name has to be provided. + type: string + type: object + schemaVersionId: + description: Unique ID assigned to a version of the schema. + Either this or the schema_id has to be provided. + type: string + schemaVersionNumber: + description: Version number of the schema. + type: number + type: object + serDeInfo: + description: Configuration block for serialization and deserialization + ("SerDe") information. See ser_de_info below. + properties: + name: + description: Name of the SerDe. + type: string + parameters: + additionalProperties: + type: string + description: Map of initialization parameters for the + SerDe, in key-value form. + type: object + x-kubernetes-map-type: granular + serializationLibrary: + description: Usually the class that implements the SerDe. + An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe. + type: string + type: object + skewedInfo: + description: Configuration block with information about values + that appear very frequently in a column (skewed values). + See skewed_info below. + properties: + skewedColumnNames: + description: List of names of columns that contain skewed + values. + items: + type: string + type: array + skewedColumnValueLocationMaps: + additionalProperties: + type: string + description: List of values that appear so frequently + as to be considered skewed. + type: object + x-kubernetes-map-type: granular + skewedColumnValues: + description: Map of skewed values to the columns that + contain them. + items: + type: string + type: array + type: object + sortColumns: + description: Configuration block for the sort order of each + bucket in the table. See sort_columns below. + items: + properties: + column: + description: Name of the column. + type: string + sortOrder: + description: Whether the column is sorted in ascending + (1) or descending order (0). + type: number + type: object + type: array + storedAsSubDirectories: + description: Whether the table data is stored in subdirectories. + type: boolean + type: object + tableType: + description: Type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, + etc.). While optional, some Athena DDL queries such as ALTER + TABLE and SHOW CREATE TABLE will fail if this argument is empty. + type: string + targetTable: + description: Configuration block of a target table for resource + linking. See target_table below. + properties: + name: + description: Name of the target table. + type: string + type: object + viewExpandedText: + description: If the table is a view, the expanded text of the + view; otherwise null. + type: string + viewOriginalText: + description: If the table is a view, the original text of the + view; otherwise null. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: CatalogTableStatus defines the observed state of CatalogTable. + properties: + atProvider: + properties: + arn: + description: The ARN of the Glue Table. + type: string + catalogId: + description: ID of the Glue Catalog and database to create the + table in. If omitted, this defaults to the AWS Account ID plus + the database name. + type: string + databaseName: + description: Name of the metadata database where the table metadata + resides. For Hive compatibility, this must be all lowercase. + type: string + description: + description: Description of the table. + type: string + id: + description: Catalog ID, Database name and of the name table. + type: string + openTableFormatInput: + description: Configuration block for open table formats. See open_table_format_input + below. + properties: + icebergInput: + description: Configuration block for iceberg table config. + See iceberg_input below. + properties: + metadataOperation: + description: A required metadata operation. Can only be + set to CREATE. + type: string + version: + description: The table version for the Iceberg table. + Defaults to 2. + type: string + type: object + type: object + owner: + description: Owner of the table. + type: string + parameters: + additionalProperties: + type: string + description: Properties associated with this table, as a list + of key-value pairs. + type: object + x-kubernetes-map-type: granular + partitionIndex: + description: Configuration block for a maximum of 3 partition + indexes. See partition_index below. + items: + properties: + indexName: + description: Name of the partition index. + type: string + indexStatus: + type: string + keys: + description: Keys for the partition index. + items: + type: string + type: array + type: object + type: array + partitionKeys: + description: Configuration block of columns by which the table + is partitioned. Only primitive types are supported as partition + keys. See partition_keys below. + items: + properties: + comment: + description: Free-form text comment. + type: string + name: + description: Name of the Partition Key. + type: string + type: + description: Datatype of data in the Partition Key. + type: string + type: object + type: array + retention: + description: Retention time for this table. + type: number + storageDescriptor: + description: Configuration block for information about the physical + storage of this table. For more information, refer to the Glue + Developer Guide. See storage_descriptor below. + properties: + bucketColumns: + description: List of reducer grouping columns, clustering + columns, and bucketing columns in the table. + items: + type: string + type: array + columns: + description: Configuration block for columns in the table. + See columns below. + items: + properties: + comment: + description: Free-form text comment. + type: string + name: + description: Name of the Column. + type: string + parameters: + additionalProperties: + type: string + description: Key-value pairs defining properties associated + with the column. + type: object + x-kubernetes-map-type: granular + type: + description: Datatype of data in the Column. + type: string + type: object + type: array + compressed: + description: Whether the data in the table is compressed. + type: boolean + inputFormat: + description: 'Input format: SequenceFileInputFormat (binary), + or TextInputFormat, or a custom format.' + type: string + location: + description: Physical location of the table. By default this + takes the form of the warehouse location, followed by the + database location in the warehouse, followed by the table + name. + type: string + numberOfBuckets: + description: Must be specified if the table contains any dimension + columns. + type: number + outputFormat: + description: 'Output format: SequenceFileOutputFormat (binary), + or IgnoreKeyTextOutputFormat, or a custom format.' + type: string + parameters: + additionalProperties: + type: string + description: User-supplied properties in key-value form. + type: object + x-kubernetes-map-type: granular + schemaReference: + description: Object that references a schema stored in the + AWS Glue Schema Registry. When creating a table, you can + pass an empty list of columns for the schema, and instead + use a schema reference. See Schema Reference below. + properties: + schemaId: + description: Configuration block that contains schema + identity fields. Either this or the schema_version_id + has to be provided. See schema_id below. + properties: + registryName: + description: Name of the schema registry that contains + the schema. Must be provided when schema_name is + specified and conflicts with schema_arn. + type: string + schemaArn: + description: ARN of the schema. One of schema_arn + or schema_name has to be provided. + type: string + schemaName: + description: Name of the schema. One of schema_arn + or schema_name has to be provided. + type: string + type: object + schemaVersionId: + description: Unique ID assigned to a version of the schema. + Either this or the schema_id has to be provided. + type: string + schemaVersionNumber: + description: Version number of the schema. + type: number + type: object + serDeInfo: + description: Configuration block for serialization and deserialization + ("SerDe") information. See ser_de_info below. + properties: + name: + description: Name of the SerDe. + type: string + parameters: + additionalProperties: + type: string + description: Map of initialization parameters for the + SerDe, in key-value form. + type: object + x-kubernetes-map-type: granular + serializationLibrary: + description: Usually the class that implements the SerDe. + An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe. + type: string + type: object + skewedInfo: + description: Configuration block with information about values + that appear very frequently in a column (skewed values). + See skewed_info below. + properties: + skewedColumnNames: + description: List of names of columns that contain skewed + values. + items: + type: string + type: array + skewedColumnValueLocationMaps: + additionalProperties: + type: string + description: List of values that appear so frequently + as to be considered skewed. + type: object + x-kubernetes-map-type: granular + skewedColumnValues: + description: Map of skewed values to the columns that + contain them. + items: + type: string + type: array + type: object + sortColumns: + description: Configuration block for the sort order of each + bucket in the table. See sort_columns below. + items: + properties: + column: + description: Name of the column. + type: string + sortOrder: + description: Whether the column is sorted in ascending + (1) or descending order (0). + type: number + type: object + type: array + storedAsSubDirectories: + description: Whether the table data is stored in subdirectories. + type: boolean + type: object + tableType: + description: Type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, + etc.). While optional, some Athena DDL queries such as ALTER + TABLE and SHOW CREATE TABLE will fail if this argument is empty. + type: string + targetTable: + description: Configuration block of a target table for resource + linking. See target_table below. + properties: + catalogId: + description: ID of the Data Catalog in which the table resides. + type: string + databaseName: + description: Name of the catalog database that contains the + target table. + type: string + name: + description: Name of the target table. + type: string + region: + description: Region of the target table. + type: string + type: object + viewExpandedText: + description: If the table is a view, the expanded text of the + view; otherwise null. + type: string + viewOriginalText: + description: If the table is a view, the original text of the + view; otherwise null. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/glue.aws.upbound.io_classifiers.yaml b/package/crds/glue.aws.upbound.io_classifiers.yaml index f314fb0b63..a1dab33a5c 100644 --- a/package/crds/glue.aws.upbound.io_classifiers.yaml +++ b/package/crds/glue.aws.upbound.io_classifiers.yaml @@ -601,3 +601,562 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Classifier is the Schema for the Classifiers API. Provides an + Glue Classifier resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClassifierSpec defines the desired state of Classifier + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + csvClassifier: + description: A classifier for Csv content. Defined below. + properties: + allowSingleColumn: + description: Enables the processing of files that contain + only one column. + type: boolean + containsHeader: + description: Indicates whether the CSV file contains a header. + This can be one of "ABSENT", "PRESENT", or "UNKNOWN". + type: string + customDatatypeConfigured: + description: Enables the custom datatype to be configured. + type: boolean + customDatatypes: + description: A list of supported custom datatypes. Valid values + are BINARY, BOOLEAN, DATE, DECIMAL, DOUBLE, FLOAT, INT, + LONG, SHORT, STRING, TIMESTAMP. + items: + type: string + type: array + delimiter: + description: The delimiter used in the Csv to separate columns. + type: string + disableValueTrimming: + description: Specifies whether to trim column values. + type: boolean + header: + description: A list of strings representing column names. + items: + type: string + type: array + quoteSymbol: + description: A custom symbol to denote what combines content + into a single column value. It must be different from the + column delimiter. + type: string + serde: + type: string + type: object + grokClassifier: + description: – A classifier that uses grok patterns. Defined + below. + properties: + classification: + description: An identifier of the data format that the classifier + matches, such as Twitter, JSON, Omniture logs, Amazon CloudWatch + Logs, and so on. + type: string + customPatterns: + description: Custom grok patterns used by this classifier. + type: string + grokPattern: + description: The grok pattern used by this classifier. + type: string + type: object + jsonClassifier: + description: – A classifier for JSON content. Defined below. + properties: + jsonPath: + description: A JsonPath string defining the JSON data for + the classifier to classify. AWS Glue supports a subset of + JsonPath, as described in Writing JsonPath Custom Classifiers. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + xmlClassifier: + description: – A classifier for XML content. Defined below. + properties: + classification: + description: An identifier of the data format that the classifier + matches. + type: string + rowTag: + description: The XML tag designating the element that contains + each record in an XML document being parsed. Note that this + cannot identify a self-closing element (closed by />). An + empty row element that contains only attributes can be parsed + as long as it ends with a closing tag (for example, is okay, but is not). + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + csvClassifier: + description: A classifier for Csv content. Defined below. + properties: + allowSingleColumn: + description: Enables the processing of files that contain + only one column. + type: boolean + containsHeader: + description: Indicates whether the CSV file contains a header. + This can be one of "ABSENT", "PRESENT", or "UNKNOWN". + type: string + customDatatypeConfigured: + description: Enables the custom datatype to be configured. + type: boolean + customDatatypes: + description: A list of supported custom datatypes. Valid values + are BINARY, BOOLEAN, DATE, DECIMAL, DOUBLE, FLOAT, INT, + LONG, SHORT, STRING, TIMESTAMP. + items: + type: string + type: array + delimiter: + description: The delimiter used in the Csv to separate columns. + type: string + disableValueTrimming: + description: Specifies whether to trim column values. + type: boolean + header: + description: A list of strings representing column names. + items: + type: string + type: array + quoteSymbol: + description: A custom symbol to denote what combines content + into a single column value. It must be different from the + column delimiter. + type: string + serde: + type: string + type: object + grokClassifier: + description: – A classifier that uses grok patterns. Defined + below. + properties: + classification: + description: An identifier of the data format that the classifier + matches, such as Twitter, JSON, Omniture logs, Amazon CloudWatch + Logs, and so on. + type: string + customPatterns: + description: Custom grok patterns used by this classifier. + type: string + grokPattern: + description: The grok pattern used by this classifier. + type: string + type: object + jsonClassifier: + description: – A classifier for JSON content. Defined below. + properties: + jsonPath: + description: A JsonPath string defining the JSON data for + the classifier to classify. AWS Glue supports a subset of + JsonPath, as described in Writing JsonPath Custom Classifiers. + type: string + type: object + xmlClassifier: + description: – A classifier for XML content. Defined below. + properties: + classification: + description: An identifier of the data format that the classifier + matches. + type: string + rowTag: + description: The XML tag designating the element that contains + each record in an XML document being parsed. Note that this + cannot identify a self-closing element (closed by />). An + empty row element that contains only attributes can be parsed + as long as it ends with a closing tag (for example, is okay, but is not). + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ClassifierStatus defines the observed state of Classifier. + properties: + atProvider: + properties: + csvClassifier: + description: A classifier for Csv content. Defined below. + properties: + allowSingleColumn: + description: Enables the processing of files that contain + only one column. + type: boolean + containsHeader: + description: Indicates whether the CSV file contains a header. + This can be one of "ABSENT", "PRESENT", or "UNKNOWN". + type: string + customDatatypeConfigured: + description: Enables the custom datatype to be configured. + type: boolean + customDatatypes: + description: A list of supported custom datatypes. Valid values + are BINARY, BOOLEAN, DATE, DECIMAL, DOUBLE, FLOAT, INT, + LONG, SHORT, STRING, TIMESTAMP. + items: + type: string + type: array + delimiter: + description: The delimiter used in the Csv to separate columns. + type: string + disableValueTrimming: + description: Specifies whether to trim column values. + type: boolean + header: + description: A list of strings representing column names. + items: + type: string + type: array + quoteSymbol: + description: A custom symbol to denote what combines content + into a single column value. It must be different from the + column delimiter. + type: string + serde: + type: string + type: object + grokClassifier: + description: – A classifier that uses grok patterns. Defined + below. + properties: + classification: + description: An identifier of the data format that the classifier + matches, such as Twitter, JSON, Omniture logs, Amazon CloudWatch + Logs, and so on. + type: string + customPatterns: + description: Custom grok patterns used by this classifier. + type: string + grokPattern: + description: The grok pattern used by this classifier. + type: string + type: object + id: + description: Name of the classifier + type: string + jsonClassifier: + description: – A classifier for JSON content. Defined below. + properties: + jsonPath: + description: A JsonPath string defining the JSON data for + the classifier to classify. AWS Glue supports a subset of + JsonPath, as described in Writing JsonPath Custom Classifiers. + type: string + type: object + xmlClassifier: + description: – A classifier for XML content. Defined below. + properties: + classification: + description: An identifier of the data format that the classifier + matches. + type: string + rowTag: + description: The XML tag designating the element that contains + each record in an XML document being parsed. Note that this + cannot identify a self-closing element (closed by />). An + empty row element that contains only attributes can be parsed + as long as it ends with a closing tag (for example, is okay, but is not). + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/glue.aws.upbound.io_connections.yaml b/package/crds/glue.aws.upbound.io_connections.yaml index 368dbbf75a..34ac82a182 100644 --- a/package/crds/glue.aws.upbound.io_connections.yaml +++ b/package/crds/glue.aws.upbound.io_connections.yaml @@ -789,3 +789,768 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Connection is the Schema for the Connections API. Provides an + Glue Connection resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ConnectionSpec defines the desired state of Connection + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + catalogId: + description: – The ID of the Data Catalog in which to create + the connection. If none is supplied, the AWS account ID is used + by default. + type: string + connectionPropertiesSecretRef: + description: value pairs used as parameters for this connection. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + connectionType: + description: '– The type of the connection. Supported are: CUSTOM, + JDBC, KAFKA, MARKETPLACE, MONGODB, and NETWORK. Defaults to + JDBC.' + type: string + description: + description: – Description of the connection. + type: string + matchCriteria: + description: – A list of criteria that can be used in selecting + this connection. + items: + type: string + type: array + physicalConnectionRequirements: + description: A map of physical connection requirements, such as + VPC and SecurityGroup. Defined below. + properties: + availabilityZone: + description: The availability zone of the connection. This + field is redundant and implied by subnet_id, but is currently + an api requirement. + type: string + availabilityZoneRef: + description: Reference to a Subnet in ec2 to populate availabilityZone. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + availabilityZoneSelector: + description: Selector for a Subnet in ec2 to populate availabilityZone. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIdList: + description: The security group ID list used by the connection. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The subnet ID used by the connection. + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - catalogId + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + connectionPropertiesSecretRef: + additionalProperties: + type: string + type: object + connectionType: + description: '– The type of the connection. Supported are: CUSTOM, + JDBC, KAFKA, MARKETPLACE, MONGODB, and NETWORK. Defaults to + JDBC.' + type: string + description: + description: – Description of the connection. + type: string + matchCriteria: + description: – A list of criteria that can be used in selecting + this connection. + items: + type: string + type: array + physicalConnectionRequirements: + description: A map of physical connection requirements, such as + VPC and SecurityGroup. Defined below. + properties: + availabilityZone: + description: The availability zone of the connection. This + field is redundant and implied by subnet_id, but is currently + an api requirement. + type: string + availabilityZoneRef: + description: Reference to a Subnet in ec2 to populate availabilityZone. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + availabilityZoneSelector: + description: Selector for a Subnet in ec2 to populate availabilityZone. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIdList: + description: The security group ID list used by the connection. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The subnet ID used by the connection. + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ConnectionStatus defines the observed state of Connection. + properties: + atProvider: + properties: + arn: + description: The ARN of the Glue Connection. + type: string + catalogId: + description: – The ID of the Data Catalog in which to create + the connection. If none is supplied, the AWS account ID is used + by default. + type: string + connectionType: + description: '– The type of the connection. Supported are: CUSTOM, + JDBC, KAFKA, MARKETPLACE, MONGODB, and NETWORK. Defaults to + JDBC.' + type: string + description: + description: – Description of the connection. + type: string + id: + description: Catalog ID and name of the connection + type: string + matchCriteria: + description: – A list of criteria that can be used in selecting + this connection. + items: + type: string + type: array + physicalConnectionRequirements: + description: A map of physical connection requirements, such as + VPC and SecurityGroup. Defined below. + properties: + availabilityZone: + description: The availability zone of the connection. This + field is redundant and implied by subnet_id, but is currently + an api requirement. + type: string + securityGroupIdList: + description: The security group ID list used by the connection. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The subnet ID used by the connection. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/glue.aws.upbound.io_crawlers.yaml b/package/crds/glue.aws.upbound.io_crawlers.yaml index 7f512a01f1..4b08865de2 100644 --- a/package/crds/glue.aws.upbound.io_crawlers.yaml +++ b/package/crds/glue.aws.upbound.io_crawlers.yaml @@ -2042,3 +2042,2003 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Crawler is the Schema for the Crawlers API. Manages a Glue Crawler + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CrawlerSpec defines the desired state of Crawler + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + catalogTarget: + description: List of nested AWS Glue Data Catalog target arguments. + See Catalog Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + databaseName: + description: Glue database where results are written. + type: string + databaseNameRef: + description: Reference to a CatalogDatabase in glue to populate + databaseName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseNameSelector: + description: Selector for a CatalogDatabase in glue to populate + databaseName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dlqEventQueueArn: + description: The ARN of the dead-letter SQS queue. + type: string + eventQueueArn: + description: The ARN of the SQS queue to receive S3 notifications + from. + type: string + tables: + description: A list of catalog tables to be synchronized. + items: + type: string + type: array + type: object + type: array + classifiers: + description: List of custom classifiers. By default, all AWS classifiers + are included in a crawl, but these custom classifiers always + override the default classifiers for a given classification. + items: + type: string + type: array + configuration: + description: JSON string of configuration information. For more + details see Setting Crawler Configuration Options. + type: string + databaseName: + description: Glue database where results are written. + type: string + databaseNameRef: + description: Reference to a CatalogDatabase in glue to populate + databaseName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseNameSelector: + description: Selector for a CatalogDatabase in glue to populate + databaseName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + deltaTarget: + description: List of nested Delta Lake target arguments. See Delta + Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + createNativeDeltaTable: + description: Specifies whether the crawler will create native + tables, to allow integration with query engines that support + querying of the Delta transaction log directly. + type: boolean + deltaTables: + description: A list of the Amazon S3 paths to the Delta + tables. + items: + type: string + type: array + x-kubernetes-list-type: set + writeManifest: + description: Specifies whether to write the manifest files + to the Delta table path. + type: boolean + type: object + type: array + description: + description: Description of the crawler. + type: string + dynamodbTarget: + description: List of nested DynamoDB target arguments. See Dynamodb + Target below. + items: + properties: + path: + description: The name of the DynamoDB table to crawl. + type: string + scanAll: + description: Indicates whether to scan all the records, + or to sample rows from the table. Scanning all the records + can take a long time when the table is not a high throughput + table. defaults to true. + type: boolean + scanRate: + description: The percentage of the configured read capacity + units to use by the AWS Glue crawler. The valid values + are null or a value between 0.1 to 1.5. + type: number + type: object + type: array + hudiTarget: + description: List of nested Hudi target arguments. See Iceberg + Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + exclusions: + description: A list of glob patterns used to exclude from + the crawl. + items: + type: string + type: array + maximumTraversalDepth: + description: The maximum depth of Amazon S3 paths that the + crawler can traverse to discover the Hudi metadata folder + in your Amazon S3 path. Used to limit the crawler run + time. Valid values are between 1 and 20. + type: number + paths: + description: One or more Amazon S3 paths that contains Hudi + metadata folders as s3://bucket/prefix. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + icebergTarget: + description: List of nested Iceberg target arguments. See Iceberg + Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + exclusions: + description: A list of glob patterns used to exclude from + the crawl. + items: + type: string + type: array + maximumTraversalDepth: + description: The maximum depth of Amazon S3 paths that the + crawler can traverse to discover the Hudi metadata folder + in your Amazon S3 path. Used to limit the crawler run + time. Valid values are between 1 and 20. + type: number + paths: + description: One or more Amazon S3 paths that contains Hudi + metadata folders as s3://bucket/prefix. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + jdbcTarget: + description: List of nested JDBC target arguments. See JDBC Target + below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + connectionNameRef: + description: Reference to a Connection in glue to populate + connectionName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + connectionNameSelector: + description: Selector for a Connection in glue to populate + connectionName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enableAdditionalMetadata: + description: Specify a value of RAWTYPES or COMMENTS to + enable additional metadata intable responses. RAWTYPES + provides the native-level datatype. COMMENTS provides + comments associated with a column or table in the database. + items: + type: string + type: array + exclusions: + description: A list of glob patterns used to exclude from + the crawl. + items: + type: string + type: array + path: + description: The name of the DynamoDB table to crawl. + type: string + type: object + type: array + lakeFormationConfiguration: + description: Specifies Lake Formation configuration settings for + the crawler. See Lake Formation Configuration below. + properties: + accountId: + description: Required for cross account crawls. For same account + crawls as the target data, this can omitted. + type: string + useLakeFormationCredentials: + description: Specifies whether to use Lake Formation credentials + for the crawler instead of the IAM role credentials. + type: boolean + type: object + lineageConfiguration: + description: Specifies data lineage configuration settings for + the crawler. See Lineage Configuration below. + properties: + crawlerLineageSettings: + description: 'Specifies whether data lineage is enabled for + the crawler. Valid values are: ENABLE and DISABLE. Default + value is DISABLE.' + type: string + type: object + mongodbTarget: + description: List of nested MongoDB target arguments. See MongoDB + Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + connectionNameRef: + description: Reference to a Connection in glue to populate + connectionName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + connectionNameSelector: + description: Selector for a Connection in glue to populate + connectionName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + path: + description: The name of the DynamoDB table to crawl. + type: string + scanAll: + description: Indicates whether to scan all the records, + or to sample rows from the table. Scanning all the records + can take a long time when the table is not a high throughput + table. defaults to true. + type: boolean + type: object + type: array + recrawlPolicy: + description: A policy that specifies whether to crawl the entire + dataset again, or to crawl only folders that were added since + the last crawler run.. See Recrawl Policy below. + properties: + recrawlBehavior: + description: 'Specifies whether to crawl the entire dataset + again, crawl only folders that were added since the last + crawler run, or crawl what S3 notifies the crawler of via + SQS. Valid Values are: CRAWL_EVENT_MODE, CRAWL_EVERYTHING + and CRAWL_NEW_FOLDERS_ONLY. Default value is CRAWL_EVERYTHING.' + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + role: + description: The IAM role friendly name (including path without + leading slash), or ARN of an IAM role, used by the crawler to + access other resources. + type: string + roleRef: + description: Reference to a Role in iam to populate role. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleSelector: + description: Selector for a Role in iam to populate role. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3Target: + description: List of nested Amazon S3 target arguments. See S3 + Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + dlqEventQueueArn: + description: The ARN of the dead-letter SQS queue. + type: string + eventQueueArn: + description: The ARN of the SQS queue to receive S3 notifications + from. + type: string + exclusions: + description: A list of glob patterns used to exclude from + the crawl. + items: + type: string + type: array + path: + description: The name of the DynamoDB table to crawl. + type: string + sampleSize: + description: Sets the number of files in each leaf folder + to be crawled when crawling sample files in a dataset. + If not set, all the files are crawled. A valid value is + an integer between 1 and 249. + type: number + type: object + type: array + schedule: + description: 'Based Schedules for Jobs and Crawlers. For example, + to run something every day at 12:15 UTC, you would specify: + cron(15 12 * * ? *).' + type: string + schemaChangePolicy: + description: Policy for the crawler's update and deletion behavior. + See Schema Change Policy below. + properties: + deleteBehavior: + description: 'The deletion behavior when the crawler finds + a deleted object. Valid values: LOG, DELETE_FROM_DATABASE, + or DEPRECATE_IN_DATABASE. Defaults to DEPRECATE_IN_DATABASE.' + type: string + updateBehavior: + description: 'The update behavior when the crawler finds a + changed schema. Valid values: LOG or UPDATE_IN_DATABASE. + Defaults to UPDATE_IN_DATABASE.' + type: string + type: object + securityConfiguration: + description: The name of Security Configuration to be used by + the crawler + type: string + tablePrefix: + description: The table prefix used for catalog tables that are + created. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + catalogTarget: + description: List of nested AWS Glue Data Catalog target arguments. + See Catalog Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + databaseName: + description: Glue database where results are written. + type: string + databaseNameRef: + description: Reference to a CatalogDatabase in glue to populate + databaseName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseNameSelector: + description: Selector for a CatalogDatabase in glue to populate + databaseName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dlqEventQueueArn: + description: The ARN of the dead-letter SQS queue. + type: string + eventQueueArn: + description: The ARN of the SQS queue to receive S3 notifications + from. + type: string + tables: + description: A list of catalog tables to be synchronized. + items: + type: string + type: array + type: object + type: array + classifiers: + description: List of custom classifiers. By default, all AWS classifiers + are included in a crawl, but these custom classifiers always + override the default classifiers for a given classification. + items: + type: string + type: array + configuration: + description: JSON string of configuration information. For more + details see Setting Crawler Configuration Options. + type: string + databaseName: + description: Glue database where results are written. + type: string + databaseNameRef: + description: Reference to a CatalogDatabase in glue to populate + databaseName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseNameSelector: + description: Selector for a CatalogDatabase in glue to populate + databaseName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + deltaTarget: + description: List of nested Delta Lake target arguments. See Delta + Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + createNativeDeltaTable: + description: Specifies whether the crawler will create native + tables, to allow integration with query engines that support + querying of the Delta transaction log directly. + type: boolean + deltaTables: + description: A list of the Amazon S3 paths to the Delta + tables. + items: + type: string + type: array + x-kubernetes-list-type: set + writeManifest: + description: Specifies whether to write the manifest files + to the Delta table path. + type: boolean + type: object + type: array + description: + description: Description of the crawler. + type: string + dynamodbTarget: + description: List of nested DynamoDB target arguments. See Dynamodb + Target below. + items: + properties: + path: + description: The name of the DynamoDB table to crawl. + type: string + scanAll: + description: Indicates whether to scan all the records, + or to sample rows from the table. Scanning all the records + can take a long time when the table is not a high throughput + table. defaults to true. + type: boolean + scanRate: + description: The percentage of the configured read capacity + units to use by the AWS Glue crawler. The valid values + are null or a value between 0.1 to 1.5. + type: number + type: object + type: array + hudiTarget: + description: List of nested Hudi target arguments. See Iceberg + Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + exclusions: + description: A list of glob patterns used to exclude from + the crawl. + items: + type: string + type: array + maximumTraversalDepth: + description: The maximum depth of Amazon S3 paths that the + crawler can traverse to discover the Hudi metadata folder + in your Amazon S3 path. Used to limit the crawler run + time. Valid values are between 1 and 20. + type: number + paths: + description: One or more Amazon S3 paths that contains Hudi + metadata folders as s3://bucket/prefix. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + icebergTarget: + description: List of nested Iceberg target arguments. See Iceberg + Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + exclusions: + description: A list of glob patterns used to exclude from + the crawl. + items: + type: string + type: array + maximumTraversalDepth: + description: The maximum depth of Amazon S3 paths that the + crawler can traverse to discover the Hudi metadata folder + in your Amazon S3 path. Used to limit the crawler run + time. Valid values are between 1 and 20. + type: number + paths: + description: One or more Amazon S3 paths that contains Hudi + metadata folders as s3://bucket/prefix. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + jdbcTarget: + description: List of nested JDBC target arguments. See JDBC Target + below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + connectionNameRef: + description: Reference to a Connection in glue to populate + connectionName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + connectionNameSelector: + description: Selector for a Connection in glue to populate + connectionName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enableAdditionalMetadata: + description: Specify a value of RAWTYPES or COMMENTS to + enable additional metadata intable responses. RAWTYPES + provides the native-level datatype. COMMENTS provides + comments associated with a column or table in the database. + items: + type: string + type: array + exclusions: + description: A list of glob patterns used to exclude from + the crawl. + items: + type: string + type: array + path: + description: The name of the DynamoDB table to crawl. + type: string + type: object + type: array + lakeFormationConfiguration: + description: Specifies Lake Formation configuration settings for + the crawler. See Lake Formation Configuration below. + properties: + accountId: + description: Required for cross account crawls. For same account + crawls as the target data, this can omitted. + type: string + useLakeFormationCredentials: + description: Specifies whether to use Lake Formation credentials + for the crawler instead of the IAM role credentials. + type: boolean + type: object + lineageConfiguration: + description: Specifies data lineage configuration settings for + the crawler. See Lineage Configuration below. + properties: + crawlerLineageSettings: + description: 'Specifies whether data lineage is enabled for + the crawler. Valid values are: ENABLE and DISABLE. Default + value is DISABLE.' + type: string + type: object + mongodbTarget: + description: List of nested MongoDB target arguments. See MongoDB + Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + connectionNameRef: + description: Reference to a Connection in glue to populate + connectionName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + connectionNameSelector: + description: Selector for a Connection in glue to populate + connectionName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + path: + description: The name of the DynamoDB table to crawl. + type: string + scanAll: + description: Indicates whether to scan all the records, + or to sample rows from the table. Scanning all the records + can take a long time when the table is not a high throughput + table. defaults to true. + type: boolean + type: object + type: array + recrawlPolicy: + description: A policy that specifies whether to crawl the entire + dataset again, or to crawl only folders that were added since + the last crawler run.. See Recrawl Policy below. + properties: + recrawlBehavior: + description: 'Specifies whether to crawl the entire dataset + again, crawl only folders that were added since the last + crawler run, or crawl what S3 notifies the crawler of via + SQS. Valid Values are: CRAWL_EVENT_MODE, CRAWL_EVERYTHING + and CRAWL_NEW_FOLDERS_ONLY. Default value is CRAWL_EVERYTHING.' + type: string + type: object + role: + description: The IAM role friendly name (including path without + leading slash), or ARN of an IAM role, used by the crawler to + access other resources. + type: string + roleRef: + description: Reference to a Role in iam to populate role. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleSelector: + description: Selector for a Role in iam to populate role. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3Target: + description: List of nested Amazon S3 target arguments. See S3 + Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + dlqEventQueueArn: + description: The ARN of the dead-letter SQS queue. + type: string + eventQueueArn: + description: The ARN of the SQS queue to receive S3 notifications + from. + type: string + exclusions: + description: A list of glob patterns used to exclude from + the crawl. + items: + type: string + type: array + path: + description: The name of the DynamoDB table to crawl. + type: string + sampleSize: + description: Sets the number of files in each leaf folder + to be crawled when crawling sample files in a dataset. + If not set, all the files are crawled. A valid value is + an integer between 1 and 249. + type: number + type: object + type: array + schedule: + description: 'Based Schedules for Jobs and Crawlers. For example, + to run something every day at 12:15 UTC, you would specify: + cron(15 12 * * ? *).' + type: string + schemaChangePolicy: + description: Policy for the crawler's update and deletion behavior. + See Schema Change Policy below. + properties: + deleteBehavior: + description: 'The deletion behavior when the crawler finds + a deleted object. Valid values: LOG, DELETE_FROM_DATABASE, + or DEPRECATE_IN_DATABASE. Defaults to DEPRECATE_IN_DATABASE.' + type: string + updateBehavior: + description: 'The update behavior when the crawler finds a + changed schema. Valid values: LOG or UPDATE_IN_DATABASE. + Defaults to UPDATE_IN_DATABASE.' + type: string + type: object + securityConfiguration: + description: The name of Security Configuration to be used by + the crawler + type: string + tablePrefix: + description: The table prefix used for catalog tables that are + created. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: CrawlerStatus defines the observed state of Crawler. + properties: + atProvider: + properties: + arn: + description: The ARN of the crawler + type: string + catalogTarget: + description: List of nested AWS Glue Data Catalog target arguments. + See Catalog Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + databaseName: + description: Glue database where results are written. + type: string + dlqEventQueueArn: + description: The ARN of the dead-letter SQS queue. + type: string + eventQueueArn: + description: The ARN of the SQS queue to receive S3 notifications + from. + type: string + tables: + description: A list of catalog tables to be synchronized. + items: + type: string + type: array + type: object + type: array + classifiers: + description: List of custom classifiers. By default, all AWS classifiers + are included in a crawl, but these custom classifiers always + override the default classifiers for a given classification. + items: + type: string + type: array + configuration: + description: JSON string of configuration information. For more + details see Setting Crawler Configuration Options. + type: string + databaseName: + description: Glue database where results are written. + type: string + deltaTarget: + description: List of nested Delta Lake target arguments. See Delta + Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + createNativeDeltaTable: + description: Specifies whether the crawler will create native + tables, to allow integration with query engines that support + querying of the Delta transaction log directly. + type: boolean + deltaTables: + description: A list of the Amazon S3 paths to the Delta + tables. + items: + type: string + type: array + x-kubernetes-list-type: set + writeManifest: + description: Specifies whether to write the manifest files + to the Delta table path. + type: boolean + type: object + type: array + description: + description: Description of the crawler. + type: string + dynamodbTarget: + description: List of nested DynamoDB target arguments. See Dynamodb + Target below. + items: + properties: + path: + description: The name of the DynamoDB table to crawl. + type: string + scanAll: + description: Indicates whether to scan all the records, + or to sample rows from the table. Scanning all the records + can take a long time when the table is not a high throughput + table. defaults to true. + type: boolean + scanRate: + description: The percentage of the configured read capacity + units to use by the AWS Glue crawler. The valid values + are null or a value between 0.1 to 1.5. + type: number + type: object + type: array + hudiTarget: + description: List of nested Hudi target arguments. See Iceberg + Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + exclusions: + description: A list of glob patterns used to exclude from + the crawl. + items: + type: string + type: array + maximumTraversalDepth: + description: The maximum depth of Amazon S3 paths that the + crawler can traverse to discover the Hudi metadata folder + in your Amazon S3 path. Used to limit the crawler run + time. Valid values are between 1 and 20. + type: number + paths: + description: One or more Amazon S3 paths that contains Hudi + metadata folders as s3://bucket/prefix. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + icebergTarget: + description: List of nested Iceberg target arguments. See Iceberg + Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + exclusions: + description: A list of glob patterns used to exclude from + the crawl. + items: + type: string + type: array + maximumTraversalDepth: + description: The maximum depth of Amazon S3 paths that the + crawler can traverse to discover the Hudi metadata folder + in your Amazon S3 path. Used to limit the crawler run + time. Valid values are between 1 and 20. + type: number + paths: + description: One or more Amazon S3 paths that contains Hudi + metadata folders as s3://bucket/prefix. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + id: + description: Crawler name + type: string + jdbcTarget: + description: List of nested JDBC target arguments. See JDBC Target + below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + enableAdditionalMetadata: + description: Specify a value of RAWTYPES or COMMENTS to + enable additional metadata intable responses. RAWTYPES + provides the native-level datatype. COMMENTS provides + comments associated with a column or table in the database. + items: + type: string + type: array + exclusions: + description: A list of glob patterns used to exclude from + the crawl. + items: + type: string + type: array + path: + description: The name of the DynamoDB table to crawl. + type: string + type: object + type: array + lakeFormationConfiguration: + description: Specifies Lake Formation configuration settings for + the crawler. See Lake Formation Configuration below. + properties: + accountId: + description: Required for cross account crawls. For same account + crawls as the target data, this can omitted. + type: string + useLakeFormationCredentials: + description: Specifies whether to use Lake Formation credentials + for the crawler instead of the IAM role credentials. + type: boolean + type: object + lineageConfiguration: + description: Specifies data lineage configuration settings for + the crawler. See Lineage Configuration below. + properties: + crawlerLineageSettings: + description: 'Specifies whether data lineage is enabled for + the crawler. Valid values are: ENABLE and DISABLE. Default + value is DISABLE.' + type: string + type: object + mongodbTarget: + description: List of nested MongoDB target arguments. See MongoDB + Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + path: + description: The name of the DynamoDB table to crawl. + type: string + scanAll: + description: Indicates whether to scan all the records, + or to sample rows from the table. Scanning all the records + can take a long time when the table is not a high throughput + table. defaults to true. + type: boolean + type: object + type: array + recrawlPolicy: + description: A policy that specifies whether to crawl the entire + dataset again, or to crawl only folders that were added since + the last crawler run.. See Recrawl Policy below. + properties: + recrawlBehavior: + description: 'Specifies whether to crawl the entire dataset + again, crawl only folders that were added since the last + crawler run, or crawl what S3 notifies the crawler of via + SQS. Valid Values are: CRAWL_EVENT_MODE, CRAWL_EVERYTHING + and CRAWL_NEW_FOLDERS_ONLY. Default value is CRAWL_EVERYTHING.' + type: string + type: object + role: + description: The IAM role friendly name (including path without + leading slash), or ARN of an IAM role, used by the crawler to + access other resources. + type: string + s3Target: + description: List of nested Amazon S3 target arguments. See S3 + Target below. + items: + properties: + connectionName: + description: The name of the connection to use to connect + to the JDBC target. + type: string + dlqEventQueueArn: + description: The ARN of the dead-letter SQS queue. + type: string + eventQueueArn: + description: The ARN of the SQS queue to receive S3 notifications + from. + type: string + exclusions: + description: A list of glob patterns used to exclude from + the crawl. + items: + type: string + type: array + path: + description: The name of the DynamoDB table to crawl. + type: string + sampleSize: + description: Sets the number of files in each leaf folder + to be crawled when crawling sample files in a dataset. + If not set, all the files are crawled. A valid value is + an integer between 1 and 249. + type: number + type: object + type: array + schedule: + description: 'Based Schedules for Jobs and Crawlers. For example, + to run something every day at 12:15 UTC, you would specify: + cron(15 12 * * ? *).' + type: string + schemaChangePolicy: + description: Policy for the crawler's update and deletion behavior. + See Schema Change Policy below. + properties: + deleteBehavior: + description: 'The deletion behavior when the crawler finds + a deleted object. Valid values: LOG, DELETE_FROM_DATABASE, + or DEPRECATE_IN_DATABASE. Defaults to DEPRECATE_IN_DATABASE.' + type: string + updateBehavior: + description: 'The update behavior when the crawler finds a + changed schema. Valid values: LOG or UPDATE_IN_DATABASE. + Defaults to UPDATE_IN_DATABASE.' + type: string + type: object + securityConfiguration: + description: The name of Security Configuration to be used by + the crawler + type: string + tablePrefix: + description: The table prefix used for catalog tables that are + created. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/glue.aws.upbound.io_datacatalogencryptionsettings.yaml b/package/crds/glue.aws.upbound.io_datacatalogencryptionsettings.yaml index 1c900178b3..a28e4f212c 100644 --- a/package/crds/glue.aws.upbound.io_datacatalogencryptionsettings.yaml +++ b/package/crds/glue.aws.upbound.io_datacatalogencryptionsettings.yaml @@ -815,3 +815,774 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DataCatalogEncryptionSettings is the Schema for the DataCatalogEncryptionSettingss + API. Provides a Glue Data Catalog Encryption Settings resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DataCatalogEncryptionSettingsSpec defines the desired state + of DataCatalogEncryptionSettings + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + catalogId: + description: – The ID of the Data Catalog to set the security + configuration for. If none is provided, the AWS account ID is + used by default. + type: string + dataCatalogEncryptionSettings: + description: – The security configuration to set. see Data Catalog + Encryption Settings. + properties: + connectionPasswordEncryption: + description: When connection password protection is enabled, + the Data Catalog uses a customer-provided key to encrypt + the password as part of CreateConnection or UpdateConnection + and store it in the ENCRYPTED_PASSWORD field in the connection + properties. You can enable catalog encryption or only password + encryption. see Connection Password Encryption. + properties: + awsKmsKeyId: + description: A KMS key ARN that is used to encrypt the + connection password. If connection password protection + is enabled, the caller of CreateConnection and UpdateConnection + needs at least kms:Encrypt permission on the specified + AWS KMS key, to encrypt passwords before storing them + in the Data Catalog. + type: string + awsKmsKeyIdRef: + description: Reference to a Key in kms to populate awsKmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + awsKmsKeyIdSelector: + description: Selector for a Key in kms to populate awsKmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + returnConnectionPasswordEncrypted: + description: When set to true, passwords remain encrypted + in the responses of GetConnection and GetConnections. + This encryption takes effect independently of the catalog + encryption. + type: boolean + type: object + encryptionAtRest: + description: Specifies the encryption-at-rest configuration + for the Data Catalog. see Encryption At Rest. + properties: + catalogEncryptionMode: + description: 'The encryption-at-rest mode for encrypting + Data Catalog data. Valid values: DISABLED, SSE-KMS, + SSE-KMS-WITH-SERVICE-ROLE.' + type: string + catalogEncryptionServiceRole: + description: The ARN of the AWS IAM role used for accessing + encrypted Data Catalog data. + type: string + sseAwsKmsKeyId: + description: The ARN of the AWS KMS key to use for encryption + at rest. + type: string + sseAwsKmsKeyIdRef: + description: Reference to a Key in kms to populate sseAwsKmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sseAwsKmsKeyIdSelector: + description: Selector for a Key in kms to populate sseAwsKmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + catalogId: + description: – The ID of the Data Catalog to set the security + configuration for. If none is provided, the AWS account ID is + used by default. + type: string + dataCatalogEncryptionSettings: + description: – The security configuration to set. see Data Catalog + Encryption Settings. + properties: + connectionPasswordEncryption: + description: When connection password protection is enabled, + the Data Catalog uses a customer-provided key to encrypt + the password as part of CreateConnection or UpdateConnection + and store it in the ENCRYPTED_PASSWORD field in the connection + properties. You can enable catalog encryption or only password + encryption. see Connection Password Encryption. + properties: + awsKmsKeyId: + description: A KMS key ARN that is used to encrypt the + connection password. If connection password protection + is enabled, the caller of CreateConnection and UpdateConnection + needs at least kms:Encrypt permission on the specified + AWS KMS key, to encrypt passwords before storing them + in the Data Catalog. + type: string + awsKmsKeyIdRef: + description: Reference to a Key in kms to populate awsKmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + awsKmsKeyIdSelector: + description: Selector for a Key in kms to populate awsKmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + returnConnectionPasswordEncrypted: + description: When set to true, passwords remain encrypted + in the responses of GetConnection and GetConnections. + This encryption takes effect independently of the catalog + encryption. + type: boolean + type: object + encryptionAtRest: + description: Specifies the encryption-at-rest configuration + for the Data Catalog. see Encryption At Rest. + properties: + catalogEncryptionMode: + description: 'The encryption-at-rest mode for encrypting + Data Catalog data. Valid values: DISABLED, SSE-KMS, + SSE-KMS-WITH-SERVICE-ROLE.' + type: string + catalogEncryptionServiceRole: + description: The ARN of the AWS IAM role used for accessing + encrypted Data Catalog data. + type: string + sseAwsKmsKeyId: + description: The ARN of the AWS KMS key to use for encryption + at rest. + type: string + sseAwsKmsKeyIdRef: + description: Reference to a Key in kms to populate sseAwsKmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sseAwsKmsKeyIdSelector: + description: Selector for a Key in kms to populate sseAwsKmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.dataCatalogEncryptionSettings is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.dataCatalogEncryptionSettings) + || (has(self.initProvider) && has(self.initProvider.dataCatalogEncryptionSettings))' + status: + description: DataCatalogEncryptionSettingsStatus defines the observed + state of DataCatalogEncryptionSettings. + properties: + atProvider: + properties: + catalogId: + description: – The ID of the Data Catalog to set the security + configuration for. If none is provided, the AWS account ID is + used by default. + type: string + dataCatalogEncryptionSettings: + description: – The security configuration to set. see Data Catalog + Encryption Settings. + properties: + connectionPasswordEncryption: + description: When connection password protection is enabled, + the Data Catalog uses a customer-provided key to encrypt + the password as part of CreateConnection or UpdateConnection + and store it in the ENCRYPTED_PASSWORD field in the connection + properties. You can enable catalog encryption or only password + encryption. see Connection Password Encryption. + properties: + awsKmsKeyId: + description: A KMS key ARN that is used to encrypt the + connection password. If connection password protection + is enabled, the caller of CreateConnection and UpdateConnection + needs at least kms:Encrypt permission on the specified + AWS KMS key, to encrypt passwords before storing them + in the Data Catalog. + type: string + returnConnectionPasswordEncrypted: + description: When set to true, passwords remain encrypted + in the responses of GetConnection and GetConnections. + This encryption takes effect independently of the catalog + encryption. + type: boolean + type: object + encryptionAtRest: + description: Specifies the encryption-at-rest configuration + for the Data Catalog. see Encryption At Rest. + properties: + catalogEncryptionMode: + description: 'The encryption-at-rest mode for encrypting + Data Catalog data. Valid values: DISABLED, SSE-KMS, + SSE-KMS-WITH-SERVICE-ROLE.' + type: string + catalogEncryptionServiceRole: + description: The ARN of the AWS IAM role used for accessing + encrypted Data Catalog data. + type: string + sseAwsKmsKeyId: + description: The ARN of the AWS KMS key to use for encryption + at rest. + type: string + type: object + type: object + id: + description: The ID of the Data Catalog to set the security configuration + for. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/glue.aws.upbound.io_jobs.yaml b/package/crds/glue.aws.upbound.io_jobs.yaml index 05584a3db7..2b69450ddc 100644 --- a/package/crds/glue.aws.upbound.io_jobs.yaml +++ b/package/crds/glue.aws.upbound.io_jobs.yaml @@ -860,3 +860,827 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Job is the Schema for the Jobs API. Provides an Glue Job resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: JobSpec defines the desired state of Job + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + command: + description: – The command of the job. Defined below. + properties: + name: + description: – The name you assign to this job. It must be + unique in your account. + type: string + pythonVersion: + description: The Python version being used to execute a Python + shell job. Allowed values are 2, 3 or 3.9. Version 3 refers + to Python 3.6. + type: string + runtime: + description: In Ray jobs, runtime is used to specify the versions + of Ray, Python and additional libraries available in your + environment. This field is not used in other job types. + For supported runtime environment values, see Working with + Ray jobs in the Glue Developer Guide. + type: string + scriptLocation: + description: Specifies the S3 path to a script that executes + a job. + type: string + type: object + connections: + description: – The list of connections used for this job. + items: + type: string + type: array + defaultArguments: + additionalProperties: + type: string + description: execution script consumes, as well as arguments that + AWS Glue itself consumes. For information about how to specify + and consume your own Job arguments, see the Calling AWS Glue + APIs in Python topic in the developer guide. For information + about the key-value pairs that AWS Glue consumes to set up your + job, see the Special Parameters Used by AWS Glue topic in the + developer guide. + type: object + x-kubernetes-map-type: granular + description: + description: – Description of the job. + type: string + executionClass: + description: 'Indicates whether the job is run with a standard + or flexible execution class. The standard execution class is + ideal for time-sensitive workloads that require fast job startup + and dedicated resources. Valid value: FLEX, STANDARD.' + type: string + executionProperty: + description: – Execution property of the job. Defined below. + properties: + maxConcurrentRuns: + description: The maximum number of concurrent runs allowed + for a job. The default is 1. + type: number + type: object + glueVersion: + description: The version of glue to use, for example "1.0". Ray + jobs should set this to 4.0 or greater. For information about + available versions, see the AWS Glue Release Notes. + type: string + maxCapacity: + description: – The maximum number of AWS Glue data processing + units (DPUs) that can be allocated when this job runs. Required + when pythonshell is set, accept either 0.0625 or 1.0. Use number_of_workers + and worker_type arguments instead with glue_version 2.0 and + above. + type: number + maxRetries: + description: – The maximum number of times to retry this job + if it fails. + type: number + nonOverridableArguments: + additionalProperties: + type: string + description: overridable arguments for this job, specified as + name-value pairs. + type: object + x-kubernetes-map-type: granular + notificationProperty: + description: Notification property of the job. Defined below. + properties: + notifyDelayAfter: + description: After a job run starts, the number of minutes + to wait before sending a job run delay notification. + type: number + type: object + numberOfWorkers: + description: The number of workers of a defined workerType that + are allocated when a job runs. + type: number + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleArn: + description: – The ARN of the IAM role associated with this job. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityConfiguration: + description: The name of the Security Configuration to be associated + with the job. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + timeout: + description: – The job timeout in minutes. The default is 2880 + minutes (48 hours) for glueetl and pythonshell jobs, and null + (unlimited) for gluestreaming jobs. + type: number + workerType: + description: The type of predefined worker that is allocated when + a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X + for Spark jobs. Accepts the value Z.2X for Ray jobs. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + command: + description: – The command of the job. Defined below. + properties: + name: + description: – The name you assign to this job. It must be + unique in your account. + type: string + pythonVersion: + description: The Python version being used to execute a Python + shell job. Allowed values are 2, 3 or 3.9. Version 3 refers + to Python 3.6. + type: string + runtime: + description: In Ray jobs, runtime is used to specify the versions + of Ray, Python and additional libraries available in your + environment. This field is not used in other job types. + For supported runtime environment values, see Working with + Ray jobs in the Glue Developer Guide. + type: string + scriptLocation: + description: Specifies the S3 path to a script that executes + a job. + type: string + type: object + connections: + description: – The list of connections used for this job. + items: + type: string + type: array + defaultArguments: + additionalProperties: + type: string + description: execution script consumes, as well as arguments that + AWS Glue itself consumes. For information about how to specify + and consume your own Job arguments, see the Calling AWS Glue + APIs in Python topic in the developer guide. For information + about the key-value pairs that AWS Glue consumes to set up your + job, see the Special Parameters Used by AWS Glue topic in the + developer guide. + type: object + x-kubernetes-map-type: granular + description: + description: – Description of the job. + type: string + executionClass: + description: 'Indicates whether the job is run with a standard + or flexible execution class. The standard execution class is + ideal for time-sensitive workloads that require fast job startup + and dedicated resources. Valid value: FLEX, STANDARD.' + type: string + executionProperty: + description: – Execution property of the job. Defined below. + properties: + maxConcurrentRuns: + description: The maximum number of concurrent runs allowed + for a job. The default is 1. + type: number + type: object + glueVersion: + description: The version of glue to use, for example "1.0". Ray + jobs should set this to 4.0 or greater. For information about + available versions, see the AWS Glue Release Notes. + type: string + maxCapacity: + description: – The maximum number of AWS Glue data processing + units (DPUs) that can be allocated when this job runs. Required + when pythonshell is set, accept either 0.0625 or 1.0. Use number_of_workers + and worker_type arguments instead with glue_version 2.0 and + above. + type: number + maxRetries: + description: – The maximum number of times to retry this job + if it fails. + type: number + nonOverridableArguments: + additionalProperties: + type: string + description: overridable arguments for this job, specified as + name-value pairs. + type: object + x-kubernetes-map-type: granular + notificationProperty: + description: Notification property of the job. Defined below. + properties: + notifyDelayAfter: + description: After a job run starts, the number of minutes + to wait before sending a job run delay notification. + type: number + type: object + numberOfWorkers: + description: The number of workers of a defined workerType that + are allocated when a job runs. + type: number + roleArn: + description: – The ARN of the IAM role associated with this job. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityConfiguration: + description: The name of the Security Configuration to be associated + with the job. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + timeout: + description: – The job timeout in minutes. The default is 2880 + minutes (48 hours) for glueetl and pythonshell jobs, and null + (unlimited) for gluestreaming jobs. + type: number + workerType: + description: The type of predefined worker that is allocated when + a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X + for Spark jobs. Accepts the value Z.2X for Ray jobs. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.command is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.command) + || (has(self.initProvider) && has(self.initProvider.command))' + status: + description: JobStatus defines the observed state of Job. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of Glue Job + type: string + command: + description: – The command of the job. Defined below. + properties: + name: + description: – The name you assign to this job. It must be + unique in your account. + type: string + pythonVersion: + description: The Python version being used to execute a Python + shell job. Allowed values are 2, 3 or 3.9. Version 3 refers + to Python 3.6. + type: string + runtime: + description: In Ray jobs, runtime is used to specify the versions + of Ray, Python and additional libraries available in your + environment. This field is not used in other job types. + For supported runtime environment values, see Working with + Ray jobs in the Glue Developer Guide. + type: string + scriptLocation: + description: Specifies the S3 path to a script that executes + a job. + type: string + type: object + connections: + description: – The list of connections used for this job. + items: + type: string + type: array + defaultArguments: + additionalProperties: + type: string + description: execution script consumes, as well as arguments that + AWS Glue itself consumes. For information about how to specify + and consume your own Job arguments, see the Calling AWS Glue + APIs in Python topic in the developer guide. For information + about the key-value pairs that AWS Glue consumes to set up your + job, see the Special Parameters Used by AWS Glue topic in the + developer guide. + type: object + x-kubernetes-map-type: granular + description: + description: – Description of the job. + type: string + executionClass: + description: 'Indicates whether the job is run with a standard + or flexible execution class. The standard execution class is + ideal for time-sensitive workloads that require fast job startup + and dedicated resources. Valid value: FLEX, STANDARD.' + type: string + executionProperty: + description: – Execution property of the job. Defined below. + properties: + maxConcurrentRuns: + description: The maximum number of concurrent runs allowed + for a job. The default is 1. + type: number + type: object + glueVersion: + description: The version of glue to use, for example "1.0". Ray + jobs should set this to 4.0 or greater. For information about + available versions, see the AWS Glue Release Notes. + type: string + id: + description: Job name + type: string + maxCapacity: + description: – The maximum number of AWS Glue data processing + units (DPUs) that can be allocated when this job runs. Required + when pythonshell is set, accept either 0.0625 or 1.0. Use number_of_workers + and worker_type arguments instead with glue_version 2.0 and + above. + type: number + maxRetries: + description: – The maximum number of times to retry this job + if it fails. + type: number + nonOverridableArguments: + additionalProperties: + type: string + description: overridable arguments for this job, specified as + name-value pairs. + type: object + x-kubernetes-map-type: granular + notificationProperty: + description: Notification property of the job. Defined below. + properties: + notifyDelayAfter: + description: After a job run starts, the number of minutes + to wait before sending a job run delay notification. + type: number + type: object + numberOfWorkers: + description: The number of workers of a defined workerType that + are allocated when a job runs. + type: number + roleArn: + description: – The ARN of the IAM role associated with this job. + type: string + securityConfiguration: + description: The name of the Security Configuration to be associated + with the job. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + timeout: + description: – The job timeout in minutes. The default is 2880 + minutes (48 hours) for glueetl and pythonshell jobs, and null + (unlimited) for gluestreaming jobs. + type: number + workerType: + description: The type of predefined worker that is allocated when + a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X + for Spark jobs. Accepts the value Z.2X for Ray jobs. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/glue.aws.upbound.io_securityconfigurations.yaml b/package/crds/glue.aws.upbound.io_securityconfigurations.yaml index d2dffe5f74..f77ea86e5d 100644 --- a/package/crds/glue.aws.upbound.io_securityconfigurations.yaml +++ b/package/crds/glue.aws.upbound.io_securityconfigurations.yaml @@ -946,3 +946,889 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SecurityConfiguration is the Schema for the SecurityConfigurations + API. Manages a Glue Security Configuration + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SecurityConfigurationSpec defines the desired state of SecurityConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + encryptionConfiguration: + description: – Configuration block containing encryption configuration. + Detailed below. + properties: + cloudwatchEncryption: + properties: + cloudwatchEncryptionMode: + description: 'Encryption mode to use for CloudWatch data. + Valid values: DISABLED, SSE-KMS. Default value: DISABLED.' + type: string + kmsKeyArn: + description: Amazon Resource Name (ARN) of the KMS key + to be used to encrypt the data. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + jobBookmarksEncryption: + properties: + jobBookmarksEncryptionMode: + description: 'Encryption mode to use for job bookmarks + data. Valid values: CSE-KMS, DISABLED. Default value: + DISABLED.' + type: string + kmsKeyArn: + description: Amazon Resource Name (ARN) of the KMS key + to be used to encrypt the data. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + s3Encryption: + description: A s3_encryption block as described below, which + contains encryption configuration for S3 data. + properties: + kmsKeyArn: + description: Amazon Resource Name (ARN) of the KMS key + to be used to encrypt the data. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3EncryptionMode: + description: 'Encryption mode to use for S3 data. Valid + values: DISABLED, SSE-KMS, SSE-S3. Default value: DISABLED.' + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + encryptionConfiguration: + description: – Configuration block containing encryption configuration. + Detailed below. + properties: + cloudwatchEncryption: + properties: + cloudwatchEncryptionMode: + description: 'Encryption mode to use for CloudWatch data. + Valid values: DISABLED, SSE-KMS. Default value: DISABLED.' + type: string + kmsKeyArn: + description: Amazon Resource Name (ARN) of the KMS key + to be used to encrypt the data. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + jobBookmarksEncryption: + properties: + jobBookmarksEncryptionMode: + description: 'Encryption mode to use for job bookmarks + data. Valid values: CSE-KMS, DISABLED. Default value: + DISABLED.' + type: string + kmsKeyArn: + description: Amazon Resource Name (ARN) of the KMS key + to be used to encrypt the data. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + s3Encryption: + description: A s3_encryption block as described below, which + contains encryption configuration for S3 data. + properties: + kmsKeyArn: + description: Amazon Resource Name (ARN) of the KMS key + to be used to encrypt the data. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3EncryptionMode: + description: 'Encryption mode to use for S3 data. Valid + values: DISABLED, SSE-KMS, SSE-S3. Default value: DISABLED.' + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.encryptionConfiguration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.encryptionConfiguration) + || (has(self.initProvider) && has(self.initProvider.encryptionConfiguration))' + status: + description: SecurityConfigurationStatus defines the observed state of + SecurityConfiguration. + properties: + atProvider: + properties: + encryptionConfiguration: + description: – Configuration block containing encryption configuration. + Detailed below. + properties: + cloudwatchEncryption: + properties: + cloudwatchEncryptionMode: + description: 'Encryption mode to use for CloudWatch data. + Valid values: DISABLED, SSE-KMS. Default value: DISABLED.' + type: string + kmsKeyArn: + description: Amazon Resource Name (ARN) of the KMS key + to be used to encrypt the data. + type: string + type: object + jobBookmarksEncryption: + properties: + jobBookmarksEncryptionMode: + description: 'Encryption mode to use for job bookmarks + data. Valid values: CSE-KMS, DISABLED. Default value: + DISABLED.' + type: string + kmsKeyArn: + description: Amazon Resource Name (ARN) of the KMS key + to be used to encrypt the data. + type: string + type: object + s3Encryption: + description: A s3_encryption block as described below, which + contains encryption configuration for S3 data. + properties: + kmsKeyArn: + description: Amazon Resource Name (ARN) of the KMS key + to be used to encrypt the data. + type: string + s3EncryptionMode: + description: 'Encryption mode to use for S3 data. Valid + values: DISABLED, SSE-KMS, SSE-S3. Default value: DISABLED.' + type: string + type: object + type: object + id: + description: Glue security configuration name + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/glue.aws.upbound.io_triggers.yaml b/package/crds/glue.aws.upbound.io_triggers.yaml index 7fdb65d215..4e6bf6b95b 100644 --- a/package/crds/glue.aws.upbound.io_triggers.yaml +++ b/package/crds/glue.aws.upbound.io_triggers.yaml @@ -1350,3 +1350,1323 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Trigger is the Schema for the Triggers API. Manages a Glue Trigger + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TriggerSpec defines the desired state of Trigger + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + actions: + description: – List of actions initiated by this trigger when + it fires. See Actions Below. + items: + properties: + arguments: + additionalProperties: + type: string + description: Arguments to be passed to the job. You can + specify arguments here that your own job-execution script + consumes, as well as arguments that AWS Glue itself consumes. + type: object + x-kubernetes-map-type: granular + crawlerName: + description: The name of the crawler to be executed. Conflicts + with job_name. + type: string + crawlerNameRef: + description: Reference to a Crawler in glue to populate + crawlerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + crawlerNameSelector: + description: Selector for a Crawler in glue to populate + crawlerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + jobName: + description: The name of a job to be executed. Conflicts + with crawler_name. + type: string + jobNameRef: + description: Reference to a Job in glue to populate jobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + jobNameSelector: + description: Selector for a Job in glue to populate jobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + notificationProperty: + description: Specifies configuration properties of a job + run notification. See Notification Property details below. + properties: + notifyDelayAfter: + description: After a job run starts, the number of minutes + to wait before sending a job run delay notification. + type: number + type: object + securityConfiguration: + description: The name of the Security Configuration structure + to be used with this action. + type: string + timeout: + description: The job run timeout in minutes. It overrides + the timeout value of the job. + type: number + type: object + type: array + description: + description: – A description of the new trigger. + type: string + enabled: + description: – Start the trigger. Defaults to true. + type: boolean + eventBatchingCondition: + description: Batch condition that must be met (specified number + of events received or batch time window expired) before EventBridge + event trigger fires. See Event Batching Condition. + items: + properties: + batchSize: + description: Number of events that must be received from + Amazon EventBridge before EventBridge event trigger fires. + type: number + batchWindow: + description: Window of time in seconds after which EventBridge + event trigger fires. Window starts when first event is + received. Default value is 900. + type: number + type: object + type: array + predicate: + description: – A predicate to specify when the new trigger should + fire. Required when trigger type is CONDITIONAL. See Predicate + Below. + properties: + conditions: + description: A list of the conditions that determine when + the trigger will fire. See Conditions. + items: + properties: + crawlState: + description: The condition crawl state. Currently, the + values supported are RUNNING, SUCCEEDED, CANCELLED, + and FAILED. If this is specified, crawler_name must + also be specified. Conflicts with state. + type: string + crawlerName: + description: The name of the crawler to be executed. + Conflicts with job_name. + type: string + crawlerNameRef: + description: Reference to a Crawler in glue to populate + crawlerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + crawlerNameSelector: + description: Selector for a Crawler in glue to populate + crawlerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + jobName: + description: The name of a job to be executed. Conflicts + with crawler_name. + type: string + jobNameRef: + description: Reference to a Job in glue to populate + jobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + jobNameSelector: + description: Selector for a Job in glue to populate + jobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + logicalOperator: + description: A logical operator. Defaults to EQUALS. + type: string + state: + description: The condition job state. Currently, the + values supported are SUCCEEDED, STOPPED, TIMEOUT and + FAILED. If this is specified, job_name must also be + specified. Conflicts with crawler_state. + type: string + type: object + type: array + logical: + description: How to handle multiple conditions. Defaults to + AND. Valid values are AND or ANY. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + schedule: + description: Based Schedules for Jobs and Crawlers + type: string + startOnCreation: + description: – Set to true to start SCHEDULED and CONDITIONAL + triggers when created. True is not supported for ON_DEMAND triggers. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: – The type of trigger. Valid values are CONDITIONAL, + EVENT, ON_DEMAND, and SCHEDULED. + type: string + workflowName: + description: A workflow to which the trigger should be associated + to. Every workflow graph (DAG) needs a starting trigger (ON_DEMAND + or SCHEDULED type) and can contain multiple additional CONDITIONAL + triggers. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + actions: + description: – List of actions initiated by this trigger when + it fires. See Actions Below. + items: + properties: + arguments: + additionalProperties: + type: string + description: Arguments to be passed to the job. You can + specify arguments here that your own job-execution script + consumes, as well as arguments that AWS Glue itself consumes. + type: object + x-kubernetes-map-type: granular + crawlerName: + description: The name of the crawler to be executed. Conflicts + with job_name. + type: string + crawlerNameRef: + description: Reference to a Crawler in glue to populate + crawlerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + crawlerNameSelector: + description: Selector for a Crawler in glue to populate + crawlerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + jobName: + description: The name of a job to be executed. Conflicts + with crawler_name. + type: string + jobNameRef: + description: Reference to a Job in glue to populate jobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + jobNameSelector: + description: Selector for a Job in glue to populate jobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + notificationProperty: + description: Specifies configuration properties of a job + run notification. See Notification Property details below. + properties: + notifyDelayAfter: + description: After a job run starts, the number of minutes + to wait before sending a job run delay notification. + type: number + type: object + securityConfiguration: + description: The name of the Security Configuration structure + to be used with this action. + type: string + timeout: + description: The job run timeout in minutes. It overrides + the timeout value of the job. + type: number + type: object + type: array + description: + description: – A description of the new trigger. + type: string + enabled: + description: – Start the trigger. Defaults to true. + type: boolean + eventBatchingCondition: + description: Batch condition that must be met (specified number + of events received or batch time window expired) before EventBridge + event trigger fires. See Event Batching Condition. + items: + properties: + batchSize: + description: Number of events that must be received from + Amazon EventBridge before EventBridge event trigger fires. + type: number + batchWindow: + description: Window of time in seconds after which EventBridge + event trigger fires. Window starts when first event is + received. Default value is 900. + type: number + type: object + type: array + predicate: + description: – A predicate to specify when the new trigger should + fire. Required when trigger type is CONDITIONAL. See Predicate + Below. + properties: + conditions: + description: A list of the conditions that determine when + the trigger will fire. See Conditions. + items: + properties: + crawlState: + description: The condition crawl state. Currently, the + values supported are RUNNING, SUCCEEDED, CANCELLED, + and FAILED. If this is specified, crawler_name must + also be specified. Conflicts with state. + type: string + crawlerName: + description: The name of the crawler to be executed. + Conflicts with job_name. + type: string + crawlerNameRef: + description: Reference to a Crawler in glue to populate + crawlerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + crawlerNameSelector: + description: Selector for a Crawler in glue to populate + crawlerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + jobName: + description: The name of a job to be executed. Conflicts + with crawler_name. + type: string + jobNameRef: + description: Reference to a Job in glue to populate + jobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + jobNameSelector: + description: Selector for a Job in glue to populate + jobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + logicalOperator: + description: A logical operator. Defaults to EQUALS. + type: string + state: + description: The condition job state. Currently, the + values supported are SUCCEEDED, STOPPED, TIMEOUT and + FAILED. If this is specified, job_name must also be + specified. Conflicts with crawler_state. + type: string + type: object + type: array + logical: + description: How to handle multiple conditions. Defaults to + AND. Valid values are AND or ANY. + type: string + type: object + schedule: + description: Based Schedules for Jobs and Crawlers + type: string + startOnCreation: + description: – Set to true to start SCHEDULED and CONDITIONAL + triggers when created. True is not supported for ON_DEMAND triggers. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: – The type of trigger. Valid values are CONDITIONAL, + EVENT, ON_DEMAND, and SCHEDULED. + type: string + workflowName: + description: A workflow to which the trigger should be associated + to. Every workflow graph (DAG) needs a starting trigger (ON_DEMAND + or SCHEDULED type) and can contain multiple additional CONDITIONAL + triggers. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.actions is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.actions) + || (has(self.initProvider) && has(self.initProvider.actions))' + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + status: + description: TriggerStatus defines the observed state of Trigger. + properties: + atProvider: + properties: + actions: + description: – List of actions initiated by this trigger when + it fires. See Actions Below. + items: + properties: + arguments: + additionalProperties: + type: string + description: Arguments to be passed to the job. You can + specify arguments here that your own job-execution script + consumes, as well as arguments that AWS Glue itself consumes. + type: object + x-kubernetes-map-type: granular + crawlerName: + description: The name of the crawler to be executed. Conflicts + with job_name. + type: string + jobName: + description: The name of a job to be executed. Conflicts + with crawler_name. + type: string + notificationProperty: + description: Specifies configuration properties of a job + run notification. See Notification Property details below. + properties: + notifyDelayAfter: + description: After a job run starts, the number of minutes + to wait before sending a job run delay notification. + type: number + type: object + securityConfiguration: + description: The name of the Security Configuration structure + to be used with this action. + type: string + timeout: + description: The job run timeout in minutes. It overrides + the timeout value of the job. + type: number + type: object + type: array + arn: + description: Amazon Resource Name (ARN) of Glue Trigger + type: string + description: + description: – A description of the new trigger. + type: string + enabled: + description: – Start the trigger. Defaults to true. + type: boolean + eventBatchingCondition: + description: Batch condition that must be met (specified number + of events received or batch time window expired) before EventBridge + event trigger fires. See Event Batching Condition. + items: + properties: + batchSize: + description: Number of events that must be received from + Amazon EventBridge before EventBridge event trigger fires. + type: number + batchWindow: + description: Window of time in seconds after which EventBridge + event trigger fires. Window starts when first event is + received. Default value is 900. + type: number + type: object + type: array + id: + description: Trigger name + type: string + predicate: + description: – A predicate to specify when the new trigger should + fire. Required when trigger type is CONDITIONAL. See Predicate + Below. + properties: + conditions: + description: A list of the conditions that determine when + the trigger will fire. See Conditions. + items: + properties: + crawlState: + description: The condition crawl state. Currently, the + values supported are RUNNING, SUCCEEDED, CANCELLED, + and FAILED. If this is specified, crawler_name must + also be specified. Conflicts with state. + type: string + crawlerName: + description: The name of the crawler to be executed. + Conflicts with job_name. + type: string + jobName: + description: The name of a job to be executed. Conflicts + with crawler_name. + type: string + logicalOperator: + description: A logical operator. Defaults to EQUALS. + type: string + state: + description: The condition job state. Currently, the + values supported are SUCCEEDED, STOPPED, TIMEOUT and + FAILED. If this is specified, job_name must also be + specified. Conflicts with crawler_state. + type: string + type: object + type: array + logical: + description: How to handle multiple conditions. Defaults to + AND. Valid values are AND or ANY. + type: string + type: object + schedule: + description: Based Schedules for Jobs and Crawlers + type: string + startOnCreation: + description: – Set to true to start SCHEDULED and CONDITIONAL + triggers when created. True is not supported for ON_DEMAND triggers. + type: boolean + state: + description: The condition job state. Currently, the values supported + are SUCCEEDED, STOPPED, TIMEOUT and FAILED. If this is specified, + job_name must also be specified. Conflicts with crawler_state. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: + description: – The type of trigger. Valid values are CONDITIONAL, + EVENT, ON_DEMAND, and SCHEDULED. + type: string + workflowName: + description: A workflow to which the trigger should be associated + to. Every workflow graph (DAG) needs a starting trigger (ON_DEMAND + or SCHEDULED type) and can contain multiple additional CONDITIONAL + triggers. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/grafana.aws.upbound.io_workspaces.yaml b/package/crds/grafana.aws.upbound.io_workspaces.yaml index cc30764a83..76586c5e6d 100644 --- a/package/crds/grafana.aws.upbound.io_workspaces.yaml +++ b/package/crds/grafana.aws.upbound.io_workspaces.yaml @@ -857,3 +857,830 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Workspace is the Schema for the Workspaces API. Provides an Amazon + Managed Grafana workspace resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WorkspaceSpec defines the desired state of Workspace + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountAccessType: + description: The type of account access for the workspace. Valid + values are CURRENT_ACCOUNT and ORGANIZATION. If ORGANIZATION + is specified, then organizational_units must also be present. + type: string + authenticationProviders: + description: The authentication providers for the workspace. Valid + values are AWS_SSO, SAML, or both. + items: + type: string + type: array + configuration: + description: The configuration string for the workspace that you + create. For more information about the format and configuration + options available, see Working in your Grafana workspace. + type: string + dataSources: + description: The data sources for the workspace. Valid values + are AMAZON_OPENSEARCH_SERVICE, ATHENA, CLOUDWATCH, PROMETHEUS, + REDSHIFT, SITEWISE, TIMESTREAM, XRAY + items: + type: string + type: array + description: + description: The workspace description. + type: string + grafanaVersion: + description: Specifies the version of Grafana to support in the + new workspace. Supported values are 8.4, 9.4 and 10.4. If not + specified, defaults to 9.4. + type: string + name: + description: The Grafana workspace name. + type: string + networkAccessControl: + description: Configuration for network access to your workspace.See + Network Access Control below. + properties: + prefixListIds: + description: '- An array of prefix list IDs.' + items: + type: string + type: array + x-kubernetes-list-type: set + vpceIds: + description: '- An array of Amazon VPC endpoint IDs for the + workspace. The only VPC endpoints that can be specified + here are interface VPC endpoints for Grafana workspaces + (using the com.amazonaws.[region].grafana-workspace service + endpoint). Other VPC endpoints will be ignored.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + notificationDestinations: + description: The notification destinations. If a data source is + specified here, Amazon Managed Grafana will create IAM roles + and permissions needed to use these destinations. Must be set + to SNS. + items: + type: string + type: array + organizationRoleName: + description: The role name that the workspace uses to access resources + through Amazon Organizations. + type: string + organizationalUnits: + description: The Amazon Organizations organizational units that + the workspace is authorized to use data sources from. + items: + type: string + type: array + permissionType: + description: The permission type of the workspace. If SERVICE_MANAGED + is specified, the IAM roles and IAM policy attachments are generated + automatically. If CUSTOMER_MANAGED is specified, the IAM roles + and IAM policy attachments will not be created. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleArn: + description: The IAM role ARN that the workspace assumes. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + stackSetName: + description: The AWS CloudFormation stack set name that provisions + IAM roles to be used by the workspace. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcConfiguration: + description: The configuration settings for an Amazon VPC that + contains data sources for your Grafana workspace to connect + to. See VPC Configuration below. + properties: + securityGroupIds: + description: '- The list of Amazon EC2 security group IDs + attached to the Amazon VPC for your Grafana workspace to + connect.' + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: '- The list of Amazon EC2 subnet IDs created + in the Amazon VPC for your Grafana workspace to connect.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accountAccessType: + description: The type of account access for the workspace. Valid + values are CURRENT_ACCOUNT and ORGANIZATION. If ORGANIZATION + is specified, then organizational_units must also be present. + type: string + authenticationProviders: + description: The authentication providers for the workspace. Valid + values are AWS_SSO, SAML, or both. + items: + type: string + type: array + configuration: + description: The configuration string for the workspace that you + create. For more information about the format and configuration + options available, see Working in your Grafana workspace. + type: string + dataSources: + description: The data sources for the workspace. Valid values + are AMAZON_OPENSEARCH_SERVICE, ATHENA, CLOUDWATCH, PROMETHEUS, + REDSHIFT, SITEWISE, TIMESTREAM, XRAY + items: + type: string + type: array + description: + description: The workspace description. + type: string + grafanaVersion: + description: Specifies the version of Grafana to support in the + new workspace. Supported values are 8.4, 9.4 and 10.4. If not + specified, defaults to 9.4. + type: string + name: + description: The Grafana workspace name. + type: string + networkAccessControl: + description: Configuration for network access to your workspace.See + Network Access Control below. + properties: + prefixListIds: + description: '- An array of prefix list IDs.' + items: + type: string + type: array + x-kubernetes-list-type: set + vpceIds: + description: '- An array of Amazon VPC endpoint IDs for the + workspace. The only VPC endpoints that can be specified + here are interface VPC endpoints for Grafana workspaces + (using the com.amazonaws.[region].grafana-workspace service + endpoint). Other VPC endpoints will be ignored.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + notificationDestinations: + description: The notification destinations. If a data source is + specified here, Amazon Managed Grafana will create IAM roles + and permissions needed to use these destinations. Must be set + to SNS. + items: + type: string + type: array + organizationRoleName: + description: The role name that the workspace uses to access resources + through Amazon Organizations. + type: string + organizationalUnits: + description: The Amazon Organizations organizational units that + the workspace is authorized to use data sources from. + items: + type: string + type: array + permissionType: + description: The permission type of the workspace. If SERVICE_MANAGED + is specified, the IAM roles and IAM policy attachments are generated + automatically. If CUSTOMER_MANAGED is specified, the IAM roles + and IAM policy attachments will not be created. + type: string + roleArn: + description: The IAM role ARN that the workspace assumes. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + stackSetName: + description: The AWS CloudFormation stack set name that provisions + IAM roles to be used by the workspace. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcConfiguration: + description: The configuration settings for an Amazon VPC that + contains data sources for your Grafana workspace to connect + to. See VPC Configuration below. + properties: + securityGroupIds: + description: '- The list of Amazon EC2 security group IDs + attached to the Amazon VPC for your Grafana workspace to + connect.' + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: '- The list of Amazon EC2 subnet IDs created + in the Amazon VPC for your Grafana workspace to connect.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.accountAccessType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.accountAccessType) + || (has(self.initProvider) && has(self.initProvider.accountAccessType))' + - message: spec.forProvider.authenticationProviders is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.authenticationProviders) + || (has(self.initProvider) && has(self.initProvider.authenticationProviders))' + - message: spec.forProvider.permissionType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.permissionType) + || (has(self.initProvider) && has(self.initProvider.permissionType))' + status: + description: WorkspaceStatus defines the observed state of Workspace. + properties: + atProvider: + properties: + accountAccessType: + description: The type of account access for the workspace. Valid + values are CURRENT_ACCOUNT and ORGANIZATION. If ORGANIZATION + is specified, then organizational_units must also be present. + type: string + arn: + description: The Amazon Resource Name (ARN) of the Grafana workspace. + type: string + authenticationProviders: + description: The authentication providers for the workspace. Valid + values are AWS_SSO, SAML, or both. + items: + type: string + type: array + configuration: + description: The configuration string for the workspace that you + create. For more information about the format and configuration + options available, see Working in your Grafana workspace. + type: string + dataSources: + description: The data sources for the workspace. Valid values + are AMAZON_OPENSEARCH_SERVICE, ATHENA, CLOUDWATCH, PROMETHEUS, + REDSHIFT, SITEWISE, TIMESTREAM, XRAY + items: + type: string + type: array + description: + description: The workspace description. + type: string + endpoint: + description: The endpoint of the Grafana workspace. + type: string + grafanaVersion: + description: Specifies the version of Grafana to support in the + new workspace. Supported values are 8.4, 9.4 and 10.4. If not + specified, defaults to 9.4. + type: string + id: + type: string + name: + description: The Grafana workspace name. + type: string + networkAccessControl: + description: Configuration for network access to your workspace.See + Network Access Control below. + properties: + prefixListIds: + description: '- An array of prefix list IDs.' + items: + type: string + type: array + x-kubernetes-list-type: set + vpceIds: + description: '- An array of Amazon VPC endpoint IDs for the + workspace. The only VPC endpoints that can be specified + here are interface VPC endpoints for Grafana workspaces + (using the com.amazonaws.[region].grafana-workspace service + endpoint). Other VPC endpoints will be ignored.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + notificationDestinations: + description: The notification destinations. If a data source is + specified here, Amazon Managed Grafana will create IAM roles + and permissions needed to use these destinations. Must be set + to SNS. + items: + type: string + type: array + organizationRoleName: + description: The role name that the workspace uses to access resources + through Amazon Organizations. + type: string + organizationalUnits: + description: The Amazon Organizations organizational units that + the workspace is authorized to use data sources from. + items: + type: string + type: array + permissionType: + description: The permission type of the workspace. If SERVICE_MANAGED + is specified, the IAM roles and IAM policy attachments are generated + automatically. If CUSTOMER_MANAGED is specified, the IAM roles + and IAM policy attachments will not be created. + type: string + roleArn: + description: The IAM role ARN that the workspace assumes. + type: string + samlConfigurationStatus: + type: string + stackSetName: + description: The AWS CloudFormation stack set name that provisions + IAM roles to be used by the workspace. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + vpcConfiguration: + description: The configuration settings for an Amazon VPC that + contains data sources for your Grafana workspace to connect + to. See VPC Configuration below. + properties: + securityGroupIds: + description: '- The list of Amazon EC2 security group IDs + attached to the Amazon VPC for your Grafana workspace to + connect.' + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: '- The list of Amazon EC2 subnet IDs created + in the Amazon VPC for your Grafana workspace to connect.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/guardduty.aws.upbound.io_detectors.yaml b/package/crds/guardduty.aws.upbound.io_detectors.yaml index 531f7b773d..cf79ce60fb 100644 --- a/package/crds/guardduty.aws.upbound.io_detectors.yaml +++ b/package/crds/guardduty.aws.upbound.io_detectors.yaml @@ -615,3 +615,558 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Detector is the Schema for the Detectors API. Provides a resource + to manage an Amazon GuardDuty detector + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DetectorSpec defines the desired state of Detector + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + datasources: + description: Describes which data sources will be enabled for + the detector. See Data Sources below for more details. Deprecated + in favor of aws_guardduty_detector_feature resources. + properties: + kubernetes: + description: |- + Configures Kubernetes protection. + See Kubernetes and Kubernetes Audit Logs below for more details. + properties: + auditLogs: + description: |- + Configures Kubernetes audit logs as a data source for Kubernetes protection. + See Kubernetes Audit Logs below for more details. + properties: + enable: + description: |- + If true, enables Malware Protection as data source for the detector. + Defaults to true. + type: boolean + type: object + type: object + malwareProtection: + description: |- + Configures Malware Protection. + See Malware Protection, Scan EC2 instance with findings and EBS volumes below for more details. + properties: + scanEc2InstanceWithFindings: + description: |- + Configure whether Malware Protection is enabled as data source for EC2 instances with findings for the detector. + See Scan EC2 instance with findings below for more details. + properties: + ebsVolumes: + description: |- + Configure whether scanning EBS volumes is enabled as data source for the detector for instances with findings. + See EBS volumes below for more details. + properties: + enable: + description: |- + If true, enables Malware Protection as data source for the detector. + Defaults to true. + type: boolean + type: object + type: object + type: object + s3Logs: + description: |- + Configures S3 protection. + See S3 Logs below for more details. + properties: + enable: + description: |- + If true, enables S3 protection. + Defaults to true. + type: boolean + type: object + type: object + enable: + description: Enable monitoring and feedback reporting. Setting + to false is equivalent to "suspending" GuardDuty. Defaults to + true. + type: boolean + findingPublishingFrequency: + description: 'Specifies the frequency of notifications sent for + subsequent finding occurrences. If the detector is a GuardDuty + member account, the value is determined by the GuardDuty primary + account and cannot be modified, otherwise defaults to SIX_HOURS. + Valid values for standalone and primary accounts: FIFTEEN_MINUTES, + ONE_HOUR, SIX_HOURS. See AWS Documentation for more information.' + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + datasources: + description: Describes which data sources will be enabled for + the detector. See Data Sources below for more details. Deprecated + in favor of aws_guardduty_detector_feature resources. + properties: + kubernetes: + description: |- + Configures Kubernetes protection. + See Kubernetes and Kubernetes Audit Logs below for more details. + properties: + auditLogs: + description: |- + Configures Kubernetes audit logs as a data source for Kubernetes protection. + See Kubernetes Audit Logs below for more details. + properties: + enable: + description: |- + If true, enables Malware Protection as data source for the detector. + Defaults to true. + type: boolean + type: object + type: object + malwareProtection: + description: |- + Configures Malware Protection. + See Malware Protection, Scan EC2 instance with findings and EBS volumes below for more details. + properties: + scanEc2InstanceWithFindings: + description: |- + Configure whether Malware Protection is enabled as data source for EC2 instances with findings for the detector. + See Scan EC2 instance with findings below for more details. + properties: + ebsVolumes: + description: |- + Configure whether scanning EBS volumes is enabled as data source for the detector for instances with findings. + See EBS volumes below for more details. + properties: + enable: + description: |- + If true, enables Malware Protection as data source for the detector. + Defaults to true. + type: boolean + type: object + type: object + type: object + s3Logs: + description: |- + Configures S3 protection. + See S3 Logs below for more details. + properties: + enable: + description: |- + If true, enables S3 protection. + Defaults to true. + type: boolean + type: object + type: object + enable: + description: Enable monitoring and feedback reporting. Setting + to false is equivalent to "suspending" GuardDuty. Defaults to + true. + type: boolean + findingPublishingFrequency: + description: 'Specifies the frequency of notifications sent for + subsequent finding occurrences. If the detector is a GuardDuty + member account, the value is determined by the GuardDuty primary + account and cannot be modified, otherwise defaults to SIX_HOURS. + Valid values for standalone and primary accounts: FIFTEEN_MINUTES, + ONE_HOUR, SIX_HOURS. See AWS Documentation for more information.' + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DetectorStatus defines the observed state of Detector. + properties: + atProvider: + properties: + accountId: + description: The AWS account ID of the GuardDuty detector + type: string + arn: + description: Amazon Resource Name (ARN) of the GuardDuty detector + type: string + datasources: + description: Describes which data sources will be enabled for + the detector. See Data Sources below for more details. Deprecated + in favor of aws_guardduty_detector_feature resources. + properties: + kubernetes: + description: |- + Configures Kubernetes protection. + See Kubernetes and Kubernetes Audit Logs below for more details. + properties: + auditLogs: + description: |- + Configures Kubernetes audit logs as a data source for Kubernetes protection. + See Kubernetes Audit Logs below for more details. + properties: + enable: + description: |- + If true, enables Malware Protection as data source for the detector. + Defaults to true. + type: boolean + type: object + type: object + malwareProtection: + description: |- + Configures Malware Protection. + See Malware Protection, Scan EC2 instance with findings and EBS volumes below for more details. + properties: + scanEc2InstanceWithFindings: + description: |- + Configure whether Malware Protection is enabled as data source for EC2 instances with findings for the detector. + See Scan EC2 instance with findings below for more details. + properties: + ebsVolumes: + description: |- + Configure whether scanning EBS volumes is enabled as data source for the detector for instances with findings. + See EBS volumes below for more details. + properties: + enable: + description: |- + If true, enables Malware Protection as data source for the detector. + Defaults to true. + type: boolean + type: object + type: object + type: object + s3Logs: + description: |- + Configures S3 protection. + See S3 Logs below for more details. + properties: + enable: + description: |- + If true, enables S3 protection. + Defaults to true. + type: boolean + type: object + type: object + enable: + description: Enable monitoring and feedback reporting. Setting + to false is equivalent to "suspending" GuardDuty. Defaults to + true. + type: boolean + findingPublishingFrequency: + description: 'Specifies the frequency of notifications sent for + subsequent finding occurrences. If the detector is a GuardDuty + member account, the value is determined by the GuardDuty primary + account and cannot be modified, otherwise defaults to SIX_HOURS. + Valid values for standalone and primary accounts: FIFTEEN_MINUTES, + ONE_HOUR, SIX_HOURS. See AWS Documentation for more information.' + type: string + id: + description: The ID of the GuardDuty detector + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/guardduty.aws.upbound.io_filters.yaml b/package/crds/guardduty.aws.upbound.io_filters.yaml index fa28dd262e..df774007b1 100644 --- a/package/crds/guardduty.aws.upbound.io_filters.yaml +++ b/package/crds/guardduty.aws.upbound.io_filters.yaml @@ -624,3 +624,600 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Filter is the Schema for the Filters API. Provides a resource + to manage a GuardDuty filter + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FilterSpec defines the desired state of Filter + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + action: + description: Specifies the action that is to be applied to the + findings that match the filter. Can be one of ARCHIVE or NOOP. + type: string + description: + description: Description of the filter. + type: string + detectorId: + description: ID of a GuardDuty detector, attached to your account. + type: string + detectorIdRef: + description: Reference to a Detector in guardduty to populate + detectorId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + detectorIdSelector: + description: Selector for a Detector in guardduty to populate + detectorId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + findingCriteria: + description: Represents the criteria to be used in the filter + for querying findings. Contains one or more criterion blocks, + documented below. + properties: + criterion: + items: + properties: + equals: + description: List of string values to be evaluated. + items: + type: string + type: array + field: + description: The name of the field to be evaluated. + The full list of field names can be found in AWS documentation. + type: string + greaterThan: + description: A value to be evaluated. Accepts either + an integer or a date in RFC 3339 format. + type: string + greaterThanOrEqual: + description: A value to be evaluated. Accepts either + an integer or a date in RFC 3339 format. + type: string + lessThan: + description: A value to be evaluated. Accepts either + an integer or a date in RFC 3339 format. + type: string + lessThanOrEqual: + description: A value to be evaluated. Accepts either + an integer or a date in RFC 3339 format. + type: string + notEquals: + description: List of string values to be evaluated. + items: + type: string + type: array + type: object + type: array + type: object + rank: + description: Specifies the position of the filter in the list + of current filters. Also specifies the order in which this filter + is applied to the findings. + type: number + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + action: + description: Specifies the action that is to be applied to the + findings that match the filter. Can be one of ARCHIVE or NOOP. + type: string + description: + description: Description of the filter. + type: string + findingCriteria: + description: Represents the criteria to be used in the filter + for querying findings. Contains one or more criterion blocks, + documented below. + properties: + criterion: + items: + properties: + equals: + description: List of string values to be evaluated. + items: + type: string + type: array + field: + description: The name of the field to be evaluated. + The full list of field names can be found in AWS documentation. + type: string + greaterThan: + description: A value to be evaluated. Accepts either + an integer or a date in RFC 3339 format. + type: string + greaterThanOrEqual: + description: A value to be evaluated. Accepts either + an integer or a date in RFC 3339 format. + type: string + lessThan: + description: A value to be evaluated. Accepts either + an integer or a date in RFC 3339 format. + type: string + lessThanOrEqual: + description: A value to be evaluated. Accepts either + an integer or a date in RFC 3339 format. + type: string + notEquals: + description: List of string values to be evaluated. + items: + type: string + type: array + type: object + type: array + type: object + rank: + description: Specifies the position of the filter in the list + of current filters. Also specifies the order in which this filter + is applied to the findings. + type: number + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.action is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.action) + || (has(self.initProvider) && has(self.initProvider.action))' + - message: spec.forProvider.findingCriteria is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.findingCriteria) + || (has(self.initProvider) && has(self.initProvider.findingCriteria))' + - message: spec.forProvider.rank is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.rank) + || (has(self.initProvider) && has(self.initProvider.rank))' + status: + description: FilterStatus defines the observed state of Filter. + properties: + atProvider: + properties: + action: + description: Specifies the action that is to be applied to the + findings that match the filter. Can be one of ARCHIVE or NOOP. + type: string + arn: + description: The ARN of the GuardDuty filter. + type: string + description: + description: Description of the filter. + type: string + detectorId: + description: ID of a GuardDuty detector, attached to your account. + type: string + findingCriteria: + description: Represents the criteria to be used in the filter + for querying findings. Contains one or more criterion blocks, + documented below. + properties: + criterion: + items: + properties: + equals: + description: List of string values to be evaluated. + items: + type: string + type: array + field: + description: The name of the field to be evaluated. + The full list of field names can be found in AWS documentation. + type: string + greaterThan: + description: A value to be evaluated. Accepts either + an integer or a date in RFC 3339 format. + type: string + greaterThanOrEqual: + description: A value to be evaluated. Accepts either + an integer or a date in RFC 3339 format. + type: string + lessThan: + description: A value to be evaluated. Accepts either + an integer or a date in RFC 3339 format. + type: string + lessThanOrEqual: + description: A value to be evaluated. Accepts either + an integer or a date in RFC 3339 format. + type: string + notEquals: + description: List of string values to be evaluated. + items: + type: string + type: array + type: object + type: array + type: object + id: + description: A compound field, consisting of the ID of the GuardDuty + detector and the name of the filter. + type: string + rank: + description: Specifies the position of the filter in the list + of current filters. Also specifies the order in which this filter + is applied to the findings. + type: number + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/identitystore.aws.upbound.io_users.yaml b/package/crds/identitystore.aws.upbound.io_users.yaml index 40a2688ec5..9f7c2d9e51 100644 --- a/package/crds/identitystore.aws.upbound.io_users.yaml +++ b/package/crds/identitystore.aws.upbound.io_users.yaml @@ -743,3 +743,704 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: User is the Schema for the Users API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: UserSpec defines the desired state of User + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + addresses: + description: Details about the user's address. At most 1 address + is allowed. Detailed below. + properties: + country: + description: The country that this address is in. + type: string + formatted: + description: The name that is typically displayed when the + address is shown for display. + type: string + locality: + description: The address locality. + type: string + postalCode: + description: The postal code of the address. + type: string + primary: + description: When true, this is the primary address associated + with the user. + type: boolean + region: + description: The region of the address. + type: string + streetAddress: + description: The street of the address. + type: string + type: + description: The type of address. + type: string + type: object + displayName: + description: The name that is typically displayed when the user + is referenced. + type: string + emails: + description: Details about the user's email. At most 1 email is + allowed. Detailed below. + properties: + primary: + description: When true, this is the primary email associated + with the user. + type: boolean + type: + description: The type of email. + type: string + value: + description: The email address. This value must be unique + across the identity store. + type: string + type: object + identityStoreId: + description: The globally unique identifier for the identity store + that this user is in. + type: string + locale: + description: The user's geographical region or location. + type: string + name: + description: Details about the user's full name. Detailed below. + properties: + familyName: + description: The family name of the user. + type: string + formatted: + description: The name that is typically displayed when the + name is shown for display. + type: string + givenName: + description: The given name of the user. + type: string + honorificPrefix: + description: The honorific prefix of the user. + type: string + honorificSuffix: + description: The honorific suffix of the user. + type: string + middleName: + description: The middle name of the user. + type: string + type: object + nickname: + description: An alternate name for the user. + type: string + phoneNumbers: + description: Details about the user's phone number. At most 1 + phone number is allowed. Detailed below. + properties: + primary: + description: When true, this is the primary phone number associated + with the user. + type: boolean + type: + description: The type of phone number. + type: string + value: + description: The user's phone number. + type: string + type: object + preferredLanguage: + description: The preferred language of the user. + type: string + profileUrl: + description: An URL that may be associated with the user. + type: string + region: + description: |- + The region of the address. + Region is the region you'd like your resource to be created in. + type: string + timezone: + description: The user's time zone. + type: string + title: + description: The user's title. + type: string + userName: + description: A unique string used to identify the user. This value + can consist of letters, accented characters, symbols, numbers, + and punctuation. This value is specified at the time the user + is created and stored as an attribute of the user object in + the identity store. The limit is 128 characters. + type: string + userType: + description: The user type. + type: string + required: + - identityStoreId + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + addresses: + description: Details about the user's address. At most 1 address + is allowed. Detailed below. + properties: + country: + description: The country that this address is in. + type: string + formatted: + description: The name that is typically displayed when the + address is shown for display. + type: string + locality: + description: The address locality. + type: string + postalCode: + description: The postal code of the address. + type: string + primary: + description: When true, this is the primary address associated + with the user. + type: boolean + streetAddress: + description: The street of the address. + type: string + type: + description: The type of address. + type: string + type: object + displayName: + description: The name that is typically displayed when the user + is referenced. + type: string + emails: + description: Details about the user's email. At most 1 email is + allowed. Detailed below. + properties: + primary: + description: When true, this is the primary email associated + with the user. + type: boolean + type: + description: The type of email. + type: string + value: + description: The email address. This value must be unique + across the identity store. + type: string + type: object + locale: + description: The user's geographical region or location. + type: string + name: + description: Details about the user's full name. Detailed below. + properties: + familyName: + description: The family name of the user. + type: string + formatted: + description: The name that is typically displayed when the + name is shown for display. + type: string + givenName: + description: The given name of the user. + type: string + honorificPrefix: + description: The honorific prefix of the user. + type: string + honorificSuffix: + description: The honorific suffix of the user. + type: string + middleName: + description: The middle name of the user. + type: string + type: object + nickname: + description: An alternate name for the user. + type: string + phoneNumbers: + description: Details about the user's phone number. At most 1 + phone number is allowed. Detailed below. + properties: + primary: + description: When true, this is the primary phone number associated + with the user. + type: boolean + type: + description: The type of phone number. + type: string + value: + description: The user's phone number. + type: string + type: object + preferredLanguage: + description: The preferred language of the user. + type: string + profileUrl: + description: An URL that may be associated with the user. + type: string + timezone: + description: The user's time zone. + type: string + title: + description: The user's title. + type: string + userName: + description: A unique string used to identify the user. This value + can consist of letters, accented characters, symbols, numbers, + and punctuation. This value is specified at the time the user + is created and stored as an attribute of the user object in + the identity store. The limit is 128 characters. + type: string + userType: + description: The user type. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.displayName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.displayName) + || (has(self.initProvider) && has(self.initProvider.displayName))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.userName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.userName) + || (has(self.initProvider) && has(self.initProvider.userName))' + status: + description: UserStatus defines the observed state of User. + properties: + atProvider: + properties: + addresses: + description: Details about the user's address. At most 1 address + is allowed. Detailed below. + properties: + country: + description: The country that this address is in. + type: string + formatted: + description: The name that is typically displayed when the + address is shown for display. + type: string + locality: + description: The address locality. + type: string + postalCode: + description: The postal code of the address. + type: string + primary: + description: When true, this is the primary address associated + with the user. + type: boolean + region: + description: The region of the address. + type: string + streetAddress: + description: The street of the address. + type: string + type: + description: The type of address. + type: string + type: object + displayName: + description: The name that is typically displayed when the user + is referenced. + type: string + emails: + description: Details about the user's email. At most 1 email is + allowed. Detailed below. + properties: + primary: + description: When true, this is the primary email associated + with the user. + type: boolean + type: + description: The type of email. + type: string + value: + description: The email address. This value must be unique + across the identity store. + type: string + type: object + externalIds: + description: A list of identifiers issued to this resource by + an external identity provider. + items: + properties: + id: + description: The identifier issued to this resource by an + external identity provider. + type: string + issuer: + description: The issuer for an external identifier. + type: string + type: object + type: array + id: + description: The identifier issued to this resource by an external + identity provider. + type: string + identityStoreId: + description: The globally unique identifier for the identity store + that this user is in. + type: string + locale: + description: The user's geographical region or location. + type: string + name: + description: Details about the user's full name. Detailed below. + properties: + familyName: + description: The family name of the user. + type: string + formatted: + description: The name that is typically displayed when the + name is shown for display. + type: string + givenName: + description: The given name of the user. + type: string + honorificPrefix: + description: The honorific prefix of the user. + type: string + honorificSuffix: + description: The honorific suffix of the user. + type: string + middleName: + description: The middle name of the user. + type: string + type: object + nickname: + description: An alternate name for the user. + type: string + phoneNumbers: + description: Details about the user's phone number. At most 1 + phone number is allowed. Detailed below. + properties: + primary: + description: When true, this is the primary phone number associated + with the user. + type: boolean + type: + description: The type of phone number. + type: string + value: + description: The user's phone number. + type: string + type: object + preferredLanguage: + description: The preferred language of the user. + type: string + profileUrl: + description: An URL that may be associated with the user. + type: string + timezone: + description: The user's time zone. + type: string + title: + description: The user's title. + type: string + userId: + description: The identifier for this user in the identity store. + type: string + userName: + description: A unique string used to identify the user. This value + can consist of letters, accented characters, symbols, numbers, + and punctuation. This value is specified at the time the user + is created and stored as an attribute of the user object in + the identity store. The limit is 128 characters. + type: string + userType: + description: The user type. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/imagebuilder.aws.upbound.io_containerrecipes.yaml b/package/crds/imagebuilder.aws.upbound.io_containerrecipes.yaml index b8817eac59..9b5c1d48ec 100644 --- a/package/crds/imagebuilder.aws.upbound.io_containerrecipes.yaml +++ b/package/crds/imagebuilder.aws.upbound.io_containerrecipes.yaml @@ -1293,3 +1293,1257 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ContainerRecipe is the Schema for the ContainerRecipes API. Manage + an Image Builder Container Recipe + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ContainerRecipeSpec defines the desired state of ContainerRecipe + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + component: + description: Ordered configuration block(s) with components for + the container recipe. Detailed below. + items: + properties: + componentArn: + description: Amazon Resource Name (ARN) of the Image Builder + Component to associate. + type: string + componentArnRef: + description: Reference to a Component in imagebuilder to + populate componentArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + componentArnSelector: + description: Selector for a Component in imagebuilder to + populate componentArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameter: + description: Configuration block(s) for parameters to configure + the component. Detailed below. + items: + properties: + name: + description: The name of the component parameter. + type: string + value: + description: The value for the named component parameter. + type: string + type: object + type: array + type: object + type: array + containerType: + description: 'The type of the container to create. Valid values: + DOCKER.' + type: string + description: + description: The description of the container recipe. + type: string + dockerfileTemplateData: + description: The Dockerfile template used to build the image as + an inline data blob. + type: string + dockerfileTemplateUri: + description: The Amazon S3 URI for the Dockerfile that will be + used to build the container image. + type: string + instanceConfiguration: + description: Configuration block used to configure an instance + for building and testing container images. Detailed below. + properties: + blockDeviceMapping: + description: Configuration block(s) with block device mappings + for the container recipe. Detailed below. + items: + properties: + deviceName: + description: Name of the device. For example, /dev/sda + or /dev/xvdb. + type: string + ebs: + description: Configuration block with Elastic Block + Storage (EBS) block device mapping settings. Detailed + below. + properties: + deleteOnTermination: + description: Whether to delete the volume on termination. + Defaults to unset, which is the value inherited + from the parent image. + type: string + encrypted: + description: Whether to encrypt the volume. Defaults + to unset, which is the value inherited from the + parent image. + type: string + iops: + description: Number of Input/Output (I/O) operations + per second to provision for an io1 or io2 volume. + type: number + kmsKeyId: + description: Amazon Resource Name (ARN) of the Key + Management Service (KMS) Key for encryption. + type: string + snapshotId: + description: Identifier of the EC2 Volume Snapshot. + type: string + throughput: + description: For GP3 volumes only. The throughput + in MiB/s that the volume supports. + type: number + volumeSize: + description: Size of the volume, in GiB. + type: number + volumeType: + description: Type of the volume. For example, gp2 + or io2. + type: string + type: object + noDevice: + description: Set to true to remove a mapping from the + parent image. + type: boolean + virtualName: + description: Virtual device name. For example, ephemeral0. + Instance store volumes are numbered starting from + 0. + type: string + type: object + type: array + image: + description: The AMI ID to use as the base image for a container + build and test instance. If not specified, Image Builder + will use the appropriate ECS-optimized AMI as a base image. + type: string + type: object + kmsKeyId: + description: The KMS key used to encrypt the container image. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the container recipe. + type: string + parentImage: + description: The base image for the container recipe. + type: string + platformOverride: + description: Specifies the operating system platform when you + use a custom base image. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + targetRepository: + description: The destination repository for the container image. + Detailed below. + properties: + repositoryName: + description: The name of the container repository where the + output container image is stored. This name is prefixed + by the repository location. + type: string + repositoryNameRef: + description: Reference to a Repository in ecr to populate + repositoryName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + repositoryNameSelector: + description: Selector for a Repository in ecr to populate + repositoryName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + service: + description: 'The service in which this image is registered. + Valid values: ECR.' + type: string + type: object + version: + description: Version of the container recipe. + type: string + workingDirectory: + description: The working directory to be used during build and + test workflows. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + component: + description: Ordered configuration block(s) with components for + the container recipe. Detailed below. + items: + properties: + componentArn: + description: Amazon Resource Name (ARN) of the Image Builder + Component to associate. + type: string + componentArnRef: + description: Reference to a Component in imagebuilder to + populate componentArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + componentArnSelector: + description: Selector for a Component in imagebuilder to + populate componentArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameter: + description: Configuration block(s) for parameters to configure + the component. Detailed below. + items: + properties: + name: + description: The name of the component parameter. + type: string + value: + description: The value for the named component parameter. + type: string + type: object + type: array + type: object + type: array + containerType: + description: 'The type of the container to create. Valid values: + DOCKER.' + type: string + description: + description: The description of the container recipe. + type: string + dockerfileTemplateData: + description: The Dockerfile template used to build the image as + an inline data blob. + type: string + dockerfileTemplateUri: + description: The Amazon S3 URI for the Dockerfile that will be + used to build the container image. + type: string + instanceConfiguration: + description: Configuration block used to configure an instance + for building and testing container images. Detailed below. + properties: + blockDeviceMapping: + description: Configuration block(s) with block device mappings + for the container recipe. Detailed below. + items: + properties: + deviceName: + description: Name of the device. For example, /dev/sda + or /dev/xvdb. + type: string + ebs: + description: Configuration block with Elastic Block + Storage (EBS) block device mapping settings. Detailed + below. + properties: + deleteOnTermination: + description: Whether to delete the volume on termination. + Defaults to unset, which is the value inherited + from the parent image. + type: string + encrypted: + description: Whether to encrypt the volume. Defaults + to unset, which is the value inherited from the + parent image. + type: string + iops: + description: Number of Input/Output (I/O) operations + per second to provision for an io1 or io2 volume. + type: number + kmsKeyId: + description: Amazon Resource Name (ARN) of the Key + Management Service (KMS) Key for encryption. + type: string + snapshotId: + description: Identifier of the EC2 Volume Snapshot. + type: string + throughput: + description: For GP3 volumes only. The throughput + in MiB/s that the volume supports. + type: number + volumeSize: + description: Size of the volume, in GiB. + type: number + volumeType: + description: Type of the volume. For example, gp2 + or io2. + type: string + type: object + noDevice: + description: Set to true to remove a mapping from the + parent image. + type: boolean + virtualName: + description: Virtual device name. For example, ephemeral0. + Instance store volumes are numbered starting from + 0. + type: string + type: object + type: array + image: + description: The AMI ID to use as the base image for a container + build and test instance. If not specified, Image Builder + will use the appropriate ECS-optimized AMI as a base image. + type: string + type: object + kmsKeyId: + description: The KMS key used to encrypt the container image. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the container recipe. + type: string + parentImage: + description: The base image for the container recipe. + type: string + platformOverride: + description: Specifies the operating system platform when you + use a custom base image. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + targetRepository: + description: The destination repository for the container image. + Detailed below. + properties: + repositoryName: + description: The name of the container repository where the + output container image is stored. This name is prefixed + by the repository location. + type: string + repositoryNameRef: + description: Reference to a Repository in ecr to populate + repositoryName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + repositoryNameSelector: + description: Selector for a Repository in ecr to populate + repositoryName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + service: + description: 'The service in which this image is registered. + Valid values: ECR.' + type: string + type: object + version: + description: Version of the container recipe. + type: string + workingDirectory: + description: The working directory to be used during build and + test workflows. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.component is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.component) + || (has(self.initProvider) && has(self.initProvider.component))' + - message: spec.forProvider.containerType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.containerType) + || (has(self.initProvider) && has(self.initProvider.containerType))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.parentImage is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.parentImage) + || (has(self.initProvider) && has(self.initProvider.parentImage))' + - message: spec.forProvider.targetRepository is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.targetRepository) + || (has(self.initProvider) && has(self.initProvider.targetRepository))' + - message: spec.forProvider.version is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.version) + || (has(self.initProvider) && has(self.initProvider.version))' + status: + description: ContainerRecipeStatus defines the observed state of ContainerRecipe. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of the container recipe. + type: string + component: + description: Ordered configuration block(s) with components for + the container recipe. Detailed below. + items: + properties: + componentArn: + description: Amazon Resource Name (ARN) of the Image Builder + Component to associate. + type: string + parameter: + description: Configuration block(s) for parameters to configure + the component. Detailed below. + items: + properties: + name: + description: The name of the component parameter. + type: string + value: + description: The value for the named component parameter. + type: string + type: object + type: array + type: object + type: array + containerType: + description: 'The type of the container to create. Valid values: + DOCKER.' + type: string + dateCreated: + description: Date the container recipe was created. + type: string + description: + description: The description of the container recipe. + type: string + dockerfileTemplateData: + description: The Dockerfile template used to build the image as + an inline data blob. + type: string + dockerfileTemplateUri: + description: The Amazon S3 URI for the Dockerfile that will be + used to build the container image. + type: string + encrypted: + description: A flag that indicates if the target container is + encrypted. + type: boolean + id: + type: string + instanceConfiguration: + description: Configuration block used to configure an instance + for building and testing container images. Detailed below. + properties: + blockDeviceMapping: + description: Configuration block(s) with block device mappings + for the container recipe. Detailed below. + items: + properties: + deviceName: + description: Name of the device. For example, /dev/sda + or /dev/xvdb. + type: string + ebs: + description: Configuration block with Elastic Block + Storage (EBS) block device mapping settings. Detailed + below. + properties: + deleteOnTermination: + description: Whether to delete the volume on termination. + Defaults to unset, which is the value inherited + from the parent image. + type: string + encrypted: + description: Whether to encrypt the volume. Defaults + to unset, which is the value inherited from the + parent image. + type: string + iops: + description: Number of Input/Output (I/O) operations + per second to provision for an io1 or io2 volume. + type: number + kmsKeyId: + description: Amazon Resource Name (ARN) of the Key + Management Service (KMS) Key for encryption. + type: string + snapshotId: + description: Identifier of the EC2 Volume Snapshot. + type: string + throughput: + description: For GP3 volumes only. The throughput + in MiB/s that the volume supports. + type: number + volumeSize: + description: Size of the volume, in GiB. + type: number + volumeType: + description: Type of the volume. For example, gp2 + or io2. + type: string + type: object + noDevice: + description: Set to true to remove a mapping from the + parent image. + type: boolean + virtualName: + description: Virtual device name. For example, ephemeral0. + Instance store volumes are numbered starting from + 0. + type: string + type: object + type: array + image: + description: The AMI ID to use as the base image for a container + build and test instance. If not specified, Image Builder + will use the appropriate ECS-optimized AMI as a base image. + type: string + type: object + kmsKeyId: + description: The KMS key used to encrypt the container image. + type: string + name: + description: The name of the container recipe. + type: string + owner: + description: Owner of the container recipe. + type: string + parentImage: + description: The base image for the container recipe. + type: string + platform: + description: Platform of the container recipe. + type: string + platformOverride: + description: Specifies the operating system platform when you + use a custom base image. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + targetRepository: + description: The destination repository for the container image. + Detailed below. + properties: + repositoryName: + description: The name of the container repository where the + output container image is stored. This name is prefixed + by the repository location. + type: string + service: + description: 'The service in which this image is registered. + Valid values: ECR.' + type: string + type: object + version: + description: Version of the container recipe. + type: string + workingDirectory: + description: The working directory to be used during build and + test workflows. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/imagebuilder.aws.upbound.io_distributionconfigurations.yaml b/package/crds/imagebuilder.aws.upbound.io_distributionconfigurations.yaml index 43735c9f2e..ba15574934 100644 --- a/package/crds/imagebuilder.aws.upbound.io_distributionconfigurations.yaml +++ b/package/crds/imagebuilder.aws.upbound.io_distributionconfigurations.yaml @@ -988,3 +988,919 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DistributionConfiguration is the Schema for the DistributionConfigurations + API. Manage an Image Builder Distribution Configuration + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DistributionConfigurationSpec defines the desired state of + DistributionConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the distribution configuration. + type: string + distribution: + description: One or more configuration blocks with distribution + settings. Detailed below. + items: + properties: + amiDistributionConfiguration: + description: Configuration block with Amazon Machine Image + (AMI) distribution settings. Detailed below. + properties: + amiTags: + additionalProperties: + type: string + description: Key-value map of tags to apply to the distributed + AMI. + type: object + x-kubernetes-map-type: granular + description: + description: Description to apply to the distributed + AMI. + type: string + kmsKeyId: + description: Amazon Resource Name (ARN) of the Key Management + Service (KMS) Key to encrypt the distributed AMI. + type: string + launchPermission: + description: Configuration block of EC2 launch permissions + to apply to the distributed AMI. Detailed below. + properties: + organizationArns: + description: Set of AWS Organization ARNs to assign. + items: + type: string + type: array + x-kubernetes-list-type: set + organizationalUnitArns: + description: Set of AWS Organizational Unit ARNs + to assign. + items: + type: string + type: array + x-kubernetes-list-type: set + userGroups: + description: Set of EC2 launch permission user groups + to assign. Use all to distribute a public AMI. + items: + type: string + type: array + x-kubernetes-list-type: set + userIds: + description: Set of AWS Account identifiers to assign. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + name: + description: Name to apply to the distributed AMI. + type: string + targetAccountIds: + description: Set of AWS Account identifiers to distribute + the AMI. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + containerDistributionConfiguration: + description: Configuration block with container distribution + settings. Detailed below. + properties: + containerTags: + description: Set of tags that are attached to the container + distribution configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the container distribution + configuration. + type: string + targetRepository: + description: Configuration block with the destination + repository for the container distribution configuration. + properties: + repositoryName: + description: The name of the container repository + where the output container image is stored. This + name is prefixed by the repository location. + type: string + service: + description: 'The service in which this image is + registered. Valid values: ECR.' + type: string + type: object + type: object + fastLaunchConfiguration: + description: Set of Windows faster-launching configurations + to use for AMI distribution. Detailed below. + items: + properties: + accountId: + description: The owner account ID for the fast-launch + enabled Windows AMI. + type: string + enabled: + description: A Boolean that represents the current + state of faster launching for the Windows AMI. Set + to true to start using Windows faster launching, + or false to stop using it. + type: boolean + launchTemplate: + description: Configuration block for the launch template + that the fast-launch enabled Windows AMI uses when + it launches Windows instances to create pre-provisioned + snapshots. Detailed below. + properties: + launchTemplateId: + description: The ID of the launch template to + use for faster launching for a Windows AMI. + type: string + launchTemplateName: + description: The name of the launch template to + use for faster launching for a Windows AMI. + type: string + launchTemplateVersion: + description: The version of the launch template + to use for faster launching for a Windows AMI. + type: string + type: object + maxParallelLaunches: + description: The maximum number of parallel instances + that are launched for creating resources. + type: number + snapshotConfiguration: + description: Configuration block for managing the + number of snapshots that are created from pre-provisioned + instances for the Windows AMI when faster launching + is enabled. Detailed below. + properties: + targetResourceCount: + description: The number of pre-provisioned snapshots + to keep on hand for a fast-launch enabled Windows + AMI. + type: number + type: object + type: object + type: array + launchTemplateConfiguration: + description: Set of launch template configuration settings + that apply to image distribution. Detailed below. + items: + properties: + accountId: + description: The account ID that this configuration + applies to. + type: string + default: + description: Indicates whether to set the specified + Amazon EC2 launch template as the default launch + template. Defaults to true. + type: boolean + launchTemplateId: + description: The ID of the Amazon EC2 launch template + to use. + type: string + type: object + type: array + licenseConfigurationArns: + description: Set of Amazon Resource Names (ARNs) of License + Manager License Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + region: + description: AWS Region for the distribution. + type: string + required: + - region + type: object + type: array + name: + description: Name of the distribution configuration. + type: string + region: + description: |- + AWS Region for the distribution. + Region is the region you'd like your resource to be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the distribution configuration. + type: string + distribution: + description: One or more configuration blocks with distribution + settings. Detailed below. + items: + properties: + amiDistributionConfiguration: + description: Configuration block with Amazon Machine Image + (AMI) distribution settings. Detailed below. + properties: + amiTags: + additionalProperties: + type: string + description: Key-value map of tags to apply to the distributed + AMI. + type: object + x-kubernetes-map-type: granular + description: + description: Description to apply to the distributed + AMI. + type: string + kmsKeyId: + description: Amazon Resource Name (ARN) of the Key Management + Service (KMS) Key to encrypt the distributed AMI. + type: string + launchPermission: + description: Configuration block of EC2 launch permissions + to apply to the distributed AMI. Detailed below. + properties: + organizationArns: + description: Set of AWS Organization ARNs to assign. + items: + type: string + type: array + x-kubernetes-list-type: set + organizationalUnitArns: + description: Set of AWS Organizational Unit ARNs + to assign. + items: + type: string + type: array + x-kubernetes-list-type: set + userGroups: + description: Set of EC2 launch permission user groups + to assign. Use all to distribute a public AMI. + items: + type: string + type: array + x-kubernetes-list-type: set + userIds: + description: Set of AWS Account identifiers to assign. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + name: + description: Name to apply to the distributed AMI. + type: string + targetAccountIds: + description: Set of AWS Account identifiers to distribute + the AMI. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + containerDistributionConfiguration: + description: Configuration block with container distribution + settings. Detailed below. + properties: + containerTags: + description: Set of tags that are attached to the container + distribution configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the container distribution + configuration. + type: string + targetRepository: + description: Configuration block with the destination + repository for the container distribution configuration. + properties: + repositoryName: + description: The name of the container repository + where the output container image is stored. This + name is prefixed by the repository location. + type: string + service: + description: 'The service in which this image is + registered. Valid values: ECR.' + type: string + type: object + type: object + fastLaunchConfiguration: + description: Set of Windows faster-launching configurations + to use for AMI distribution. Detailed below. + items: + properties: + accountId: + description: The owner account ID for the fast-launch + enabled Windows AMI. + type: string + enabled: + description: A Boolean that represents the current + state of faster launching for the Windows AMI. Set + to true to start using Windows faster launching, + or false to stop using it. + type: boolean + launchTemplate: + description: Configuration block for the launch template + that the fast-launch enabled Windows AMI uses when + it launches Windows instances to create pre-provisioned + snapshots. Detailed below. + properties: + launchTemplateId: + description: The ID of the launch template to + use for faster launching for a Windows AMI. + type: string + launchTemplateName: + description: The name of the launch template to + use for faster launching for a Windows AMI. + type: string + launchTemplateVersion: + description: The version of the launch template + to use for faster launching for a Windows AMI. + type: string + type: object + maxParallelLaunches: + description: The maximum number of parallel instances + that are launched for creating resources. + type: number + snapshotConfiguration: + description: Configuration block for managing the + number of snapshots that are created from pre-provisioned + instances for the Windows AMI when faster launching + is enabled. Detailed below. + properties: + targetResourceCount: + description: The number of pre-provisioned snapshots + to keep on hand for a fast-launch enabled Windows + AMI. + type: number + type: object + type: object + type: array + launchTemplateConfiguration: + description: Set of launch template configuration settings + that apply to image distribution. Detailed below. + items: + properties: + accountId: + description: The account ID that this configuration + applies to. + type: string + default: + description: Indicates whether to set the specified + Amazon EC2 launch template as the default launch + template. Defaults to true. + type: boolean + launchTemplateId: + description: The ID of the Amazon EC2 launch template + to use. + type: string + type: object + type: array + licenseConfigurationArns: + description: Set of Amazon Resource Names (ARNs) of License + Manager License Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + name: + description: Name of the distribution configuration. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.distribution is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.distribution) + || (has(self.initProvider) && has(self.initProvider.distribution))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: DistributionConfigurationStatus defines the observed state + of DistributionConfiguration. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of the distribution configuration. + type: string + dateCreated: + description: Date the distribution configuration was created. + type: string + dateUpdated: + description: Date the distribution configuration was updated. + type: string + description: + description: Description of the distribution configuration. + type: string + distribution: + description: One or more configuration blocks with distribution + settings. Detailed below. + items: + properties: + amiDistributionConfiguration: + description: Configuration block with Amazon Machine Image + (AMI) distribution settings. Detailed below. + properties: + amiTags: + additionalProperties: + type: string + description: Key-value map of tags to apply to the distributed + AMI. + type: object + x-kubernetes-map-type: granular + description: + description: Description to apply to the distributed + AMI. + type: string + kmsKeyId: + description: Amazon Resource Name (ARN) of the Key Management + Service (KMS) Key to encrypt the distributed AMI. + type: string + launchPermission: + description: Configuration block of EC2 launch permissions + to apply to the distributed AMI. Detailed below. + properties: + organizationArns: + description: Set of AWS Organization ARNs to assign. + items: + type: string + type: array + x-kubernetes-list-type: set + organizationalUnitArns: + description: Set of AWS Organizational Unit ARNs + to assign. + items: + type: string + type: array + x-kubernetes-list-type: set + userGroups: + description: Set of EC2 launch permission user groups + to assign. Use all to distribute a public AMI. + items: + type: string + type: array + x-kubernetes-list-type: set + userIds: + description: Set of AWS Account identifiers to assign. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + name: + description: Name to apply to the distributed AMI. + type: string + targetAccountIds: + description: Set of AWS Account identifiers to distribute + the AMI. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + containerDistributionConfiguration: + description: Configuration block with container distribution + settings. Detailed below. + properties: + containerTags: + description: Set of tags that are attached to the container + distribution configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the container distribution + configuration. + type: string + targetRepository: + description: Configuration block with the destination + repository for the container distribution configuration. + properties: + repositoryName: + description: The name of the container repository + where the output container image is stored. This + name is prefixed by the repository location. + type: string + service: + description: 'The service in which this image is + registered. Valid values: ECR.' + type: string + type: object + type: object + fastLaunchConfiguration: + description: Set of Windows faster-launching configurations + to use for AMI distribution. Detailed below. + items: + properties: + accountId: + description: The owner account ID for the fast-launch + enabled Windows AMI. + type: string + enabled: + description: A Boolean that represents the current + state of faster launching for the Windows AMI. Set + to true to start using Windows faster launching, + or false to stop using it. + type: boolean + launchTemplate: + description: Configuration block for the launch template + that the fast-launch enabled Windows AMI uses when + it launches Windows instances to create pre-provisioned + snapshots. Detailed below. + properties: + launchTemplateId: + description: The ID of the launch template to + use for faster launching for a Windows AMI. + type: string + launchTemplateName: + description: The name of the launch template to + use for faster launching for a Windows AMI. + type: string + launchTemplateVersion: + description: The version of the launch template + to use for faster launching for a Windows AMI. + type: string + type: object + maxParallelLaunches: + description: The maximum number of parallel instances + that are launched for creating resources. + type: number + snapshotConfiguration: + description: Configuration block for managing the + number of snapshots that are created from pre-provisioned + instances for the Windows AMI when faster launching + is enabled. Detailed below. + properties: + targetResourceCount: + description: The number of pre-provisioned snapshots + to keep on hand for a fast-launch enabled Windows + AMI. + type: number + type: object + type: object + type: array + launchTemplateConfiguration: + description: Set of launch template configuration settings + that apply to image distribution. Detailed below. + items: + properties: + accountId: + description: The account ID that this configuration + applies to. + type: string + default: + description: Indicates whether to set the specified + Amazon EC2 launch template as the default launch + template. Defaults to true. + type: boolean + launchTemplateId: + description: The ID of the Amazon EC2 launch template + to use. + type: string + type: object + type: array + licenseConfigurationArns: + description: Set of Amazon Resource Names (ARNs) of License + Manager License Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + region: + description: AWS Region for the distribution. + type: string + type: object + type: array + id: + type: string + name: + description: Name of the distribution configuration. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/imagebuilder.aws.upbound.io_imagepipelines.yaml b/package/crds/imagebuilder.aws.upbound.io_imagepipelines.yaml index a6e59c5e52..03d4ae4ca9 100644 --- a/package/crds/imagebuilder.aws.upbound.io_imagepipelines.yaml +++ b/package/crds/imagebuilder.aws.upbound.io_imagepipelines.yaml @@ -974,3 +974,932 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ImagePipeline is the Schema for the ImagePipelines API. Manages + an Image Builder Image Pipeline + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ImagePipelineSpec defines the desired state of ImagePipeline + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + containerRecipeArn: + description: Amazon Resource Name (ARN) of the container recipe. + type: string + description: + description: Description of the image pipeline. + type: string + distributionConfigurationArn: + description: Amazon Resource Name (ARN) of the Image Builder Distribution + Configuration. + type: string + enhancedImageMetadataEnabled: + description: Whether additional information about the image being + created is collected. Defaults to true. + type: boolean + imageRecipeArn: + description: Amazon Resource Name (ARN) of the image recipe. + type: string + imageRecipeArnRef: + description: Reference to a ImageRecipe in imagebuilder to populate + imageRecipeArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + imageRecipeArnSelector: + description: Selector for a ImageRecipe in imagebuilder to populate + imageRecipeArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + imageScanningConfiguration: + description: Configuration block with image scanning configuration. + Detailed below. + properties: + ecrConfiguration: + description: Configuration block with ECR configuration for + image scanning. Detailed below. + properties: + containerTags: + description: Key-value map of resource tags. + items: + type: string + type: array + x-kubernetes-list-type: set + repositoryName: + description: The name of the repository to scan + type: string + type: object + imageScanningEnabled: + description: Whether image scans are enabled. Defaults to + false. + type: boolean + type: object + imageTestsConfiguration: + description: Configuration block with image tests configuration. + Detailed below. + properties: + imageTestsEnabled: + description: Whether image tests are enabled. Defaults to + true. + type: boolean + timeoutMinutes: + description: Number of minutes before image tests time out. + Valid values are between 60 and 1440. Defaults to 720. + type: number + type: object + infrastructureConfigurationArn: + description: Amazon Resource Name (ARN) of the Image Builder Infrastructure + Configuration. + type: string + infrastructureConfigurationArnRef: + description: Reference to a InfrastructureConfiguration in imagebuilder + to populate infrastructureConfigurationArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + infrastructureConfigurationArnSelector: + description: Selector for a InfrastructureConfiguration in imagebuilder + to populate infrastructureConfigurationArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Name of the image pipeline. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + schedule: + description: Configuration block with schedule settings. Detailed + below. + properties: + pipelineExecutionStartCondition: + description: Condition when the pipeline should trigger a + new image build. Valid values are EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE + and EXPRESSION_MATCH_ONLY. Defaults to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE. + type: string + scheduleExpression: + description: Cron expression of how often the pipeline start + condition is evaluated. For example, cron(0 0 * * ? *) is + evaluated every day at midnight UTC. Configurations using + the five field syntax that was previously accepted by the + API, such as cron(0 0 * * *), must be updated to the six + field syntax. For more information, see the Image Builder + User Guide. + type: string + timezone: + description: The timezone that applies to the scheduling expression. + For example, "Etc/UTC", "America/Los_Angeles" in the IANA + timezone format. If not specified this defaults to UTC. + type: string + type: object + status: + description: Status of the image pipeline. Valid values are DISABLED + and ENABLED. Defaults to ENABLED. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + containerRecipeArn: + description: Amazon Resource Name (ARN) of the container recipe. + type: string + description: + description: Description of the image pipeline. + type: string + distributionConfigurationArn: + description: Amazon Resource Name (ARN) of the Image Builder Distribution + Configuration. + type: string + enhancedImageMetadataEnabled: + description: Whether additional information about the image being + created is collected. Defaults to true. + type: boolean + imageRecipeArn: + description: Amazon Resource Name (ARN) of the image recipe. + type: string + imageRecipeArnRef: + description: Reference to a ImageRecipe in imagebuilder to populate + imageRecipeArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + imageRecipeArnSelector: + description: Selector for a ImageRecipe in imagebuilder to populate + imageRecipeArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + imageScanningConfiguration: + description: Configuration block with image scanning configuration. + Detailed below. + properties: + ecrConfiguration: + description: Configuration block with ECR configuration for + image scanning. Detailed below. + properties: + containerTags: + description: Key-value map of resource tags. + items: + type: string + type: array + x-kubernetes-list-type: set + repositoryName: + description: The name of the repository to scan + type: string + type: object + imageScanningEnabled: + description: Whether image scans are enabled. Defaults to + false. + type: boolean + type: object + imageTestsConfiguration: + description: Configuration block with image tests configuration. + Detailed below. + properties: + imageTestsEnabled: + description: Whether image tests are enabled. Defaults to + true. + type: boolean + timeoutMinutes: + description: Number of minutes before image tests time out. + Valid values are between 60 and 1440. Defaults to 720. + type: number + type: object + infrastructureConfigurationArn: + description: Amazon Resource Name (ARN) of the Image Builder Infrastructure + Configuration. + type: string + infrastructureConfigurationArnRef: + description: Reference to a InfrastructureConfiguration in imagebuilder + to populate infrastructureConfigurationArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + infrastructureConfigurationArnSelector: + description: Selector for a InfrastructureConfiguration in imagebuilder + to populate infrastructureConfigurationArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Name of the image pipeline. + type: string + schedule: + description: Configuration block with schedule settings. Detailed + below. + properties: + pipelineExecutionStartCondition: + description: Condition when the pipeline should trigger a + new image build. Valid values are EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE + and EXPRESSION_MATCH_ONLY. Defaults to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE. + type: string + scheduleExpression: + description: Cron expression of how often the pipeline start + condition is evaluated. For example, cron(0 0 * * ? *) is + evaluated every day at midnight UTC. Configurations using + the five field syntax that was previously accepted by the + API, such as cron(0 0 * * *), must be updated to the six + field syntax. For more information, see the Image Builder + User Guide. + type: string + timezone: + description: The timezone that applies to the scheduling expression. + For example, "Etc/UTC", "America/Los_Angeles" in the IANA + timezone format. If not specified this defaults to UTC. + type: string + type: object + status: + description: Status of the image pipeline. Valid values are DISABLED + and ENABLED. Defaults to ENABLED. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ImagePipelineStatus defines the observed state of ImagePipeline. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of the image pipeline. + type: string + containerRecipeArn: + description: Amazon Resource Name (ARN) of the container recipe. + type: string + dateCreated: + description: Date the image pipeline was created. + type: string + dateLastRun: + description: Date the image pipeline was last run. + type: string + dateNextRun: + description: Date the image pipeline will run next. + type: string + dateUpdated: + description: Date the image pipeline was updated. + type: string + description: + description: Description of the image pipeline. + type: string + distributionConfigurationArn: + description: Amazon Resource Name (ARN) of the Image Builder Distribution + Configuration. + type: string + enhancedImageMetadataEnabled: + description: Whether additional information about the image being + created is collected. Defaults to true. + type: boolean + id: + type: string + imageRecipeArn: + description: Amazon Resource Name (ARN) of the image recipe. + type: string + imageScanningConfiguration: + description: Configuration block with image scanning configuration. + Detailed below. + properties: + ecrConfiguration: + description: Configuration block with ECR configuration for + image scanning. Detailed below. + properties: + containerTags: + description: Key-value map of resource tags. + items: + type: string + type: array + x-kubernetes-list-type: set + repositoryName: + description: The name of the repository to scan + type: string + type: object + imageScanningEnabled: + description: Whether image scans are enabled. Defaults to + false. + type: boolean + type: object + imageTestsConfiguration: + description: Configuration block with image tests configuration. + Detailed below. + properties: + imageTestsEnabled: + description: Whether image tests are enabled. Defaults to + true. + type: boolean + timeoutMinutes: + description: Number of minutes before image tests time out. + Valid values are between 60 and 1440. Defaults to 720. + type: number + type: object + infrastructureConfigurationArn: + description: Amazon Resource Name (ARN) of the Image Builder Infrastructure + Configuration. + type: string + name: + description: Name of the image pipeline. + type: string + platform: + description: Platform of the image pipeline. + type: string + schedule: + description: Configuration block with schedule settings. Detailed + below. + properties: + pipelineExecutionStartCondition: + description: Condition when the pipeline should trigger a + new image build. Valid values are EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE + and EXPRESSION_MATCH_ONLY. Defaults to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE. + type: string + scheduleExpression: + description: Cron expression of how often the pipeline start + condition is evaluated. For example, cron(0 0 * * ? *) is + evaluated every day at midnight UTC. Configurations using + the five field syntax that was previously accepted by the + API, such as cron(0 0 * * *), must be updated to the six + field syntax. For more information, see the Image Builder + User Guide. + type: string + timezone: + description: The timezone that applies to the scheduling expression. + For example, "Etc/UTC", "America/Los_Angeles" in the IANA + timezone format. If not specified this defaults to UTC. + type: string + type: object + status: + description: Status of the image pipeline. Valid values are DISABLED + and ENABLED. Defaults to ENABLED. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/imagebuilder.aws.upbound.io_imagerecipes.yaml b/package/crds/imagebuilder.aws.upbound.io_imagerecipes.yaml index e0c3327a02..e3dec0cae0 100644 --- a/package/crds/imagebuilder.aws.upbound.io_imagerecipes.yaml +++ b/package/crds/imagebuilder.aws.upbound.io_imagerecipes.yaml @@ -891,3 +891,864 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ImageRecipe is the Schema for the ImageRecipes API. Manage an + Image Builder Image Recipe + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ImageRecipeSpec defines the desired state of ImageRecipe + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + blockDeviceMapping: + description: Configuration block(s) with block device mappings + for the image recipe. Detailed below. + items: + properties: + deviceName: + description: Name of the device. For example, /dev/sda or + /dev/xvdb. + type: string + ebs: + description: Configuration block with Elastic Block Storage + (EBS) block device mapping settings. Detailed below. + properties: + deleteOnTermination: + description: Whether to delete the volume on termination. + Defaults to unset, which is the value inherited from + the parent image. + type: string + encrypted: + description: Whether to encrypt the volume. Defaults + to unset, which is the value inherited from the parent + image. + type: string + iops: + description: Number of Input/Output (I/O) operations + per second to provision for an io1 or io2 volume. + type: number + kmsKeyId: + description: Amazon Resource Name (ARN) of the Key Management + Service (KMS) Key for encryption. + type: string + snapshotId: + description: Identifier of the EC2 Volume Snapshot. + type: string + throughput: + description: For GP3 volumes only. The throughput in + MiB/s that the volume supports. + type: number + volumeSize: + description: Size of the volume, in GiB. + type: number + volumeType: + description: Type of the volume. For example, gp2 or + io2. + type: string + type: object + noDevice: + description: Set to true to remove a mapping from the parent + image. + type: boolean + virtualName: + description: Virtual device name. For example, ephemeral0. + Instance store volumes are numbered starting from 0. + type: string + type: object + type: array + component: + description: Ordered configuration block(s) with components for + the image recipe. Detailed below. + items: + properties: + componentArn: + description: Amazon Resource Name (ARN) of the Image Builder + Component to associate. + type: string + componentArnRef: + description: Reference to a Component in imagebuilder to + populate componentArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + componentArnSelector: + description: Selector for a Component in imagebuilder to + populate componentArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameter: + description: Configuration block(s) for parameters to configure + the component. Detailed below. + items: + properties: + name: + description: The name of the component parameter. + type: string + value: + description: The value for the named component parameter. + type: string + type: object + type: array + type: object + type: array + description: + description: Description of the image recipe. + type: string + name: + description: Name of the image recipe. + type: string + parentImage: + description: The image recipe uses this image as a base from which + to build your customized image. The value can be the base image + ARN or an AMI ID. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + systemsManagerAgent: + description: Configuration block for the Systems Manager Agent + installed by default by Image Builder. Detailed below. + properties: + uninstallAfterBuild: + description: Whether to remove the Systems Manager Agent after + the image has been built. Defaults to false. + type: boolean + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userDataBase64: + description: Base64 encoded user data. Use this to provide commands + or a command script to run when you launch your build instance. + type: string + version: + description: 'The semantic version of the image recipe, which + specifies the version in the following format, with numeric + values in each position to indicate a specific version: major.minor.patch. + For example: 1.0.0.' + type: string + workingDirectory: + description: The working directory to be used during build and + test workflows. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + blockDeviceMapping: + description: Configuration block(s) with block device mappings + for the image recipe. Detailed below. + items: + properties: + deviceName: + description: Name of the device. For example, /dev/sda or + /dev/xvdb. + type: string + ebs: + description: Configuration block with Elastic Block Storage + (EBS) block device mapping settings. Detailed below. + properties: + deleteOnTermination: + description: Whether to delete the volume on termination. + Defaults to unset, which is the value inherited from + the parent image. + type: string + encrypted: + description: Whether to encrypt the volume. Defaults + to unset, which is the value inherited from the parent + image. + type: string + iops: + description: Number of Input/Output (I/O) operations + per second to provision for an io1 or io2 volume. + type: number + kmsKeyId: + description: Amazon Resource Name (ARN) of the Key Management + Service (KMS) Key for encryption. + type: string + snapshotId: + description: Identifier of the EC2 Volume Snapshot. + type: string + throughput: + description: For GP3 volumes only. The throughput in + MiB/s that the volume supports. + type: number + volumeSize: + description: Size of the volume, in GiB. + type: number + volumeType: + description: Type of the volume. For example, gp2 or + io2. + type: string + type: object + noDevice: + description: Set to true to remove a mapping from the parent + image. + type: boolean + virtualName: + description: Virtual device name. For example, ephemeral0. + Instance store volumes are numbered starting from 0. + type: string + type: object + type: array + component: + description: Ordered configuration block(s) with components for + the image recipe. Detailed below. + items: + properties: + componentArn: + description: Amazon Resource Name (ARN) of the Image Builder + Component to associate. + type: string + componentArnRef: + description: Reference to a Component in imagebuilder to + populate componentArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + componentArnSelector: + description: Selector for a Component in imagebuilder to + populate componentArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameter: + description: Configuration block(s) for parameters to configure + the component. Detailed below. + items: + properties: + name: + description: The name of the component parameter. + type: string + value: + description: The value for the named component parameter. + type: string + type: object + type: array + type: object + type: array + description: + description: Description of the image recipe. + type: string + name: + description: Name of the image recipe. + type: string + parentImage: + description: The image recipe uses this image as a base from which + to build your customized image. The value can be the base image + ARN or an AMI ID. + type: string + systemsManagerAgent: + description: Configuration block for the Systems Manager Agent + installed by default by Image Builder. Detailed below. + properties: + uninstallAfterBuild: + description: Whether to remove the Systems Manager Agent after + the image has been built. Defaults to false. + type: boolean + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userDataBase64: + description: Base64 encoded user data. Use this to provide commands + or a command script to run when you launch your build instance. + type: string + version: + description: 'The semantic version of the image recipe, which + specifies the version in the following format, with numeric + values in each position to indicate a specific version: major.minor.patch. + For example: 1.0.0.' + type: string + workingDirectory: + description: The working directory to be used during build and + test workflows. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.component is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.component) + || (has(self.initProvider) && has(self.initProvider.component))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.parentImage is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.parentImage) + || (has(self.initProvider) && has(self.initProvider.parentImage))' + - message: spec.forProvider.version is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.version) + || (has(self.initProvider) && has(self.initProvider.version))' + status: + description: ImageRecipeStatus defines the observed state of ImageRecipe. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of the image recipe. + type: string + blockDeviceMapping: + description: Configuration block(s) with block device mappings + for the image recipe. Detailed below. + items: + properties: + deviceName: + description: Name of the device. For example, /dev/sda or + /dev/xvdb. + type: string + ebs: + description: Configuration block with Elastic Block Storage + (EBS) block device mapping settings. Detailed below. + properties: + deleteOnTermination: + description: Whether to delete the volume on termination. + Defaults to unset, which is the value inherited from + the parent image. + type: string + encrypted: + description: Whether to encrypt the volume. Defaults + to unset, which is the value inherited from the parent + image. + type: string + iops: + description: Number of Input/Output (I/O) operations + per second to provision for an io1 or io2 volume. + type: number + kmsKeyId: + description: Amazon Resource Name (ARN) of the Key Management + Service (KMS) Key for encryption. + type: string + snapshotId: + description: Identifier of the EC2 Volume Snapshot. + type: string + throughput: + description: For GP3 volumes only. The throughput in + MiB/s that the volume supports. + type: number + volumeSize: + description: Size of the volume, in GiB. + type: number + volumeType: + description: Type of the volume. For example, gp2 or + io2. + type: string + type: object + noDevice: + description: Set to true to remove a mapping from the parent + image. + type: boolean + virtualName: + description: Virtual device name. For example, ephemeral0. + Instance store volumes are numbered starting from 0. + type: string + type: object + type: array + component: + description: Ordered configuration block(s) with components for + the image recipe. Detailed below. + items: + properties: + componentArn: + description: Amazon Resource Name (ARN) of the Image Builder + Component to associate. + type: string + parameter: + description: Configuration block(s) for parameters to configure + the component. Detailed below. + items: + properties: + name: + description: The name of the component parameter. + type: string + value: + description: The value for the named component parameter. + type: string + type: object + type: array + type: object + type: array + dateCreated: + description: Date the image recipe was created. + type: string + description: + description: Description of the image recipe. + type: string + id: + type: string + name: + description: Name of the image recipe. + type: string + owner: + description: Owner of the image recipe. + type: string + parentImage: + description: The image recipe uses this image as a base from which + to build your customized image. The value can be the base image + ARN or an AMI ID. + type: string + platform: + description: Platform of the image recipe. + type: string + systemsManagerAgent: + description: Configuration block for the Systems Manager Agent + installed by default by Image Builder. Detailed below. + properties: + uninstallAfterBuild: + description: Whether to remove the Systems Manager Agent after + the image has been built. Defaults to false. + type: boolean + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + userDataBase64: + description: Base64 encoded user data. Use this to provide commands + or a command script to run when you launch your build instance. + type: string + version: + description: 'The semantic version of the image recipe, which + specifies the version in the following format, with numeric + values in each position to indicate a specific version: major.minor.patch. + For example: 1.0.0.' + type: string + workingDirectory: + description: The working directory to be used during build and + test workflows. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/imagebuilder.aws.upbound.io_images.yaml b/package/crds/imagebuilder.aws.upbound.io_images.yaml index 872593903f..314173b039 100644 --- a/package/crds/imagebuilder.aws.upbound.io_images.yaml +++ b/package/crds/imagebuilder.aws.upbound.io_images.yaml @@ -1178,3 +1178,1145 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Image is the Schema for the Images API. Manages an Image Builder + Image + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ImageSpec defines the desired state of Image + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + containerRecipeArn: + description: '- Amazon Resource Name (ARN) of the container recipe.' + type: string + distributionConfigurationArn: + description: Amazon Resource Name (ARN) of the Image Builder Distribution + Configuration. + type: string + distributionConfigurationArnRef: + description: Reference to a DistributionConfiguration in imagebuilder + to populate distributionConfigurationArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + distributionConfigurationArnSelector: + description: Selector for a DistributionConfiguration in imagebuilder + to populate distributionConfigurationArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enhancedImageMetadataEnabled: + description: Whether additional information about the image being + created is collected. Defaults to true. + type: boolean + executionRole: + description: Amazon Resource Name (ARN) of the service-linked + role to be used by Image Builder to execute workflows. + type: string + imageRecipeArn: + description: Amazon Resource Name (ARN) of the image recipe. + type: string + imageRecipeArnRef: + description: Reference to a ImageRecipe in imagebuilder to populate + imageRecipeArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + imageRecipeArnSelector: + description: Selector for a ImageRecipe in imagebuilder to populate + imageRecipeArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + imageScanningConfiguration: + description: Configuration block with image scanning configuration. + Detailed below. + properties: + ecrConfiguration: + description: Configuration block with ECR configuration. Detailed + below. + properties: + containerTags: + description: Set of tags for Image Builder to apply to + the output container image that that Amazon Inspector + scans. + items: + type: string + type: array + x-kubernetes-list-type: set + repositoryName: + description: The name of the container repository that + Amazon Inspector scans to identify findings for your + container images. + type: string + type: object + imageScanningEnabled: + description: Indicates whether Image Builder keeps a snapshot + of the vulnerability scans that Amazon Inspector runs against + the build instance when you create a new image. Defaults + to false. + type: boolean + type: object + imageTestsConfiguration: + description: Configuration block with image tests configuration. + Detailed below. + properties: + imageTestsEnabled: + description: Whether image tests are enabled. Defaults to + true. + type: boolean + timeoutMinutes: + description: Number of minutes before image tests time out. + Valid values are between 60 and 1440. Defaults to 720. + type: number + type: object + infrastructureConfigurationArn: + description: Amazon Resource Name (ARN) of the Image Builder Infrastructure + Configuration. + type: string + infrastructureConfigurationArnRef: + description: Reference to a InfrastructureConfiguration in imagebuilder + to populate infrastructureConfigurationArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + infrastructureConfigurationArnSelector: + description: Selector for a InfrastructureConfiguration in imagebuilder + to populate infrastructureConfigurationArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: |- + Region of the AMI. + Region is the region you'd like your resource to be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + workflow: + description: Configuration block with the workflow configuration. + Detailed below. + items: + properties: + onFailure: + description: The action to take if the workflow fails. Must + be one of CONTINUE or ABORT. + type: string + parallelGroup: + description: The parallel group in which to run a test Workflow. + type: string + parameter: + description: Configuration block for the workflow parameters. + Detailed below. + items: + properties: + name: + description: The name of the Workflow parameter. + type: string + value: + description: The value of the Workflow parameter. + type: string + type: object + type: array + workflowArn: + description: Amazon Resource Name (ARN) of the Image Builder + Workflow. + type: string + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + containerRecipeArn: + description: '- Amazon Resource Name (ARN) of the container recipe.' + type: string + distributionConfigurationArn: + description: Amazon Resource Name (ARN) of the Image Builder Distribution + Configuration. + type: string + distributionConfigurationArnRef: + description: Reference to a DistributionConfiguration in imagebuilder + to populate distributionConfigurationArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + distributionConfigurationArnSelector: + description: Selector for a DistributionConfiguration in imagebuilder + to populate distributionConfigurationArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enhancedImageMetadataEnabled: + description: Whether additional information about the image being + created is collected. Defaults to true. + type: boolean + executionRole: + description: Amazon Resource Name (ARN) of the service-linked + role to be used by Image Builder to execute workflows. + type: string + imageRecipeArn: + description: Amazon Resource Name (ARN) of the image recipe. + type: string + imageRecipeArnRef: + description: Reference to a ImageRecipe in imagebuilder to populate + imageRecipeArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + imageRecipeArnSelector: + description: Selector for a ImageRecipe in imagebuilder to populate + imageRecipeArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + imageScanningConfiguration: + description: Configuration block with image scanning configuration. + Detailed below. + properties: + ecrConfiguration: + description: Configuration block with ECR configuration. Detailed + below. + properties: + containerTags: + description: Set of tags for Image Builder to apply to + the output container image that that Amazon Inspector + scans. + items: + type: string + type: array + x-kubernetes-list-type: set + repositoryName: + description: The name of the container repository that + Amazon Inspector scans to identify findings for your + container images. + type: string + type: object + imageScanningEnabled: + description: Indicates whether Image Builder keeps a snapshot + of the vulnerability scans that Amazon Inspector runs against + the build instance when you create a new image. Defaults + to false. + type: boolean + type: object + imageTestsConfiguration: + description: Configuration block with image tests configuration. + Detailed below. + properties: + imageTestsEnabled: + description: Whether image tests are enabled. Defaults to + true. + type: boolean + timeoutMinutes: + description: Number of minutes before image tests time out. + Valid values are between 60 and 1440. Defaults to 720. + type: number + type: object + infrastructureConfigurationArn: + description: Amazon Resource Name (ARN) of the Image Builder Infrastructure + Configuration. + type: string + infrastructureConfigurationArnRef: + description: Reference to a InfrastructureConfiguration in imagebuilder + to populate infrastructureConfigurationArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + infrastructureConfigurationArnSelector: + description: Selector for a InfrastructureConfiguration in imagebuilder + to populate infrastructureConfigurationArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + workflow: + description: Configuration block with the workflow configuration. + Detailed below. + items: + properties: + onFailure: + description: The action to take if the workflow fails. Must + be one of CONTINUE or ABORT. + type: string + parallelGroup: + description: The parallel group in which to run a test Workflow. + type: string + parameter: + description: Configuration block for the workflow parameters. + Detailed below. + items: + properties: + name: + description: The name of the Workflow parameter. + type: string + value: + description: The value of the Workflow parameter. + type: string + type: object + type: array + workflowArn: + description: Amazon Resource Name (ARN) of the Image Builder + Workflow. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ImageStatus defines the observed state of Image. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of the image. + type: string + containerRecipeArn: + description: '- Amazon Resource Name (ARN) of the container recipe.' + type: string + dateCreated: + description: Date the image was created. + type: string + distributionConfigurationArn: + description: Amazon Resource Name (ARN) of the Image Builder Distribution + Configuration. + type: string + enhancedImageMetadataEnabled: + description: Whether additional information about the image being + created is collected. Defaults to true. + type: boolean + executionRole: + description: Amazon Resource Name (ARN) of the service-linked + role to be used by Image Builder to execute workflows. + type: string + id: + type: string + imageRecipeArn: + description: Amazon Resource Name (ARN) of the image recipe. + type: string + imageScanningConfiguration: + description: Configuration block with image scanning configuration. + Detailed below. + properties: + ecrConfiguration: + description: Configuration block with ECR configuration. Detailed + below. + properties: + containerTags: + description: Set of tags for Image Builder to apply to + the output container image that that Amazon Inspector + scans. + items: + type: string + type: array + x-kubernetes-list-type: set + repositoryName: + description: The name of the container repository that + Amazon Inspector scans to identify findings for your + container images. + type: string + type: object + imageScanningEnabled: + description: Indicates whether Image Builder keeps a snapshot + of the vulnerability scans that Amazon Inspector runs against + the build instance when you create a new image. Defaults + to false. + type: boolean + type: object + imageTestsConfiguration: + description: Configuration block with image tests configuration. + Detailed below. + properties: + imageTestsEnabled: + description: Whether image tests are enabled. Defaults to + true. + type: boolean + timeoutMinutes: + description: Number of minutes before image tests time out. + Valid values are between 60 and 1440. Defaults to 720. + type: number + type: object + infrastructureConfigurationArn: + description: Amazon Resource Name (ARN) of the Image Builder Infrastructure + Configuration. + type: string + name: + description: Name of the AMI. + type: string + osVersion: + description: Operating System version of the image. + type: string + outputResources: + description: List of objects with resources created by the image. + items: + properties: + amis: + description: Set of objects with each Amazon Machine Image + (AMI) created. + items: + properties: + accountId: + description: Account identifier of the AMI. + type: string + description: + description: Description of the AMI. + type: string + image: + description: Identifier of the AMI. + type: string + name: + description: The name of the Workflow parameter. + type: string + region: + description: Region of the AMI. + type: string + type: object + type: array + containers: + description: Set of objects with each container image created + and stored in the output repository. + items: + properties: + imageUris: + description: Set of URIs for created containers. + items: + type: string + type: array + x-kubernetes-list-type: set + region: + description: Region of the AMI. + type: string + type: object + type: array + type: object + type: array + platform: + description: Platform of the image. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + version: + description: Version of the image. + type: string + workflow: + description: Configuration block with the workflow configuration. + Detailed below. + items: + properties: + onFailure: + description: The action to take if the workflow fails. Must + be one of CONTINUE or ABORT. + type: string + parallelGroup: + description: The parallel group in which to run a test Workflow. + type: string + parameter: + description: Configuration block for the workflow parameters. + Detailed below. + items: + properties: + name: + description: The name of the Workflow parameter. + type: string + value: + description: The value of the Workflow parameter. + type: string + type: object + type: array + workflowArn: + description: Amazon Resource Name (ARN) of the Image Builder + Workflow. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/imagebuilder.aws.upbound.io_infrastructureconfigurations.yaml b/package/crds/imagebuilder.aws.upbound.io_infrastructureconfigurations.yaml index cbb4b49ae9..0831498b14 100644 --- a/package/crds/imagebuilder.aws.upbound.io_infrastructureconfigurations.yaml +++ b/package/crds/imagebuilder.aws.upbound.io_infrastructureconfigurations.yaml @@ -1514,3 +1514,1474 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: InfrastructureConfiguration is the Schema for the InfrastructureConfigurations + API. Manages an Image Builder Infrastructure Configuration + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: InfrastructureConfigurationSpec defines the desired state + of InfrastructureConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description for the configuration. + type: string + instanceMetadataOptions: + description: Configuration block with instance metadata options + for the HTTP requests that pipeline builds use to launch EC2 + build and test instances. Detailed below. + properties: + httpPutResponseHopLimit: + description: The number of hops that an instance can traverse + to reach its destonation. + type: number + httpTokens: + description: 'Whether a signed token is required for instance + metadata retrieval requests. Valid values: required, optional.' + type: string + type: object + instanceProfileName: + description: Name of IAM Instance Profile. + type: string + instanceProfileNameRef: + description: Reference to a InstanceProfile in iam to populate + instanceProfileName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceProfileNameSelector: + description: Selector for a InstanceProfile in iam to populate + instanceProfileName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + instanceTypes: + description: Set of EC2 Instance Types. + items: + type: string + type: array + x-kubernetes-list-type: set + keyPair: + description: Name of EC2 Key Pair. + type: string + keyPairRef: + description: Reference to a KeyPair in ec2 to populate keyPair. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyPairSelector: + description: Selector for a KeyPair in ec2 to populate keyPair. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + logging: + description: Configuration block with logging settings. Detailed + below. + properties: + s3Logs: + description: Configuration block with S3 logging settings. + Detailed below. + properties: + s3BucketName: + description: Name of the S3 Bucket. + type: string + s3BucketNameRef: + description: Reference to a Bucket in s3 to populate s3BucketName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + s3BucketNameSelector: + description: Selector for a Bucket in s3 to populate s3BucketName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3KeyPrefix: + description: Prefix to use for S3 logs. Defaults to /. + type: string + type: object + type: object + name: + description: Name for the configuration. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + resourceTags: + additionalProperties: + type: string + description: Key-value map of resource tags to assign to infrastructure + created by the configuration. + type: object + x-kubernetes-map-type: granular + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: Set of EC2 Security Group identifiers. + items: + type: string + type: array + x-kubernetes-list-type: set + snsTopicArn: + description: Amazon Resource Name (ARN) of SNS Topic. + type: string + snsTopicArnRef: + description: Reference to a Topic in sns to populate snsTopicArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + snsTopicArnSelector: + description: Selector for a Topic in sns to populate snsTopicArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetId: + description: EC2 Subnet identifier. Also requires security_group_ids + argument. + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + terminateInstanceOnFailure: + description: Enable if the instance should be terminated when + the pipeline fails. Defaults to false. + type: boolean + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description for the configuration. + type: string + instanceMetadataOptions: + description: Configuration block with instance metadata options + for the HTTP requests that pipeline builds use to launch EC2 + build and test instances. Detailed below. + properties: + httpPutResponseHopLimit: + description: The number of hops that an instance can traverse + to reach its destonation. + type: number + httpTokens: + description: 'Whether a signed token is required for instance + metadata retrieval requests. Valid values: required, optional.' + type: string + type: object + instanceProfileName: + description: Name of IAM Instance Profile. + type: string + instanceProfileNameRef: + description: Reference to a InstanceProfile in iam to populate + instanceProfileName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + instanceProfileNameSelector: + description: Selector for a InstanceProfile in iam to populate + instanceProfileName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + instanceTypes: + description: Set of EC2 Instance Types. + items: + type: string + type: array + x-kubernetes-list-type: set + keyPair: + description: Name of EC2 Key Pair. + type: string + keyPairRef: + description: Reference to a KeyPair in ec2 to populate keyPair. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyPairSelector: + description: Selector for a KeyPair in ec2 to populate keyPair. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + logging: + description: Configuration block with logging settings. Detailed + below. + properties: + s3Logs: + description: Configuration block with S3 logging settings. + Detailed below. + properties: + s3BucketName: + description: Name of the S3 Bucket. + type: string + s3BucketNameRef: + description: Reference to a Bucket in s3 to populate s3BucketName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + s3BucketNameSelector: + description: Selector for a Bucket in s3 to populate s3BucketName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3KeyPrefix: + description: Prefix to use for S3 logs. Defaults to /. + type: string + type: object + type: object + name: + description: Name for the configuration. + type: string + resourceTags: + additionalProperties: + type: string + description: Key-value map of resource tags to assign to infrastructure + created by the configuration. + type: object + x-kubernetes-map-type: granular + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: Set of EC2 Security Group identifiers. + items: + type: string + type: array + x-kubernetes-list-type: set + snsTopicArn: + description: Amazon Resource Name (ARN) of SNS Topic. + type: string + snsTopicArnRef: + description: Reference to a Topic in sns to populate snsTopicArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + snsTopicArnSelector: + description: Selector for a Topic in sns to populate snsTopicArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetId: + description: EC2 Subnet identifier. Also requires security_group_ids + argument. + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + terminateInstanceOnFailure: + description: Enable if the instance should be terminated when + the pipeline fails. Defaults to false. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: InfrastructureConfigurationStatus defines the observed state + of InfrastructureConfiguration. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of the configuration. + type: string + dateCreated: + description: Date when the configuration was created. + type: string + dateUpdated: + description: Date when the configuration was updated. + type: string + description: + description: Description for the configuration. + type: string + id: + description: Amazon Resource Name (ARN) of the configuration. + type: string + instanceMetadataOptions: + description: Configuration block with instance metadata options + for the HTTP requests that pipeline builds use to launch EC2 + build and test instances. Detailed below. + properties: + httpPutResponseHopLimit: + description: The number of hops that an instance can traverse + to reach its destonation. + type: number + httpTokens: + description: 'Whether a signed token is required for instance + metadata retrieval requests. Valid values: required, optional.' + type: string + type: object + instanceProfileName: + description: Name of IAM Instance Profile. + type: string + instanceTypes: + description: Set of EC2 Instance Types. + items: + type: string + type: array + x-kubernetes-list-type: set + keyPair: + description: Name of EC2 Key Pair. + type: string + logging: + description: Configuration block with logging settings. Detailed + below. + properties: + s3Logs: + description: Configuration block with S3 logging settings. + Detailed below. + properties: + s3BucketName: + description: Name of the S3 Bucket. + type: string + s3KeyPrefix: + description: Prefix to use for S3 logs. Defaults to /. + type: string + type: object + type: object + name: + description: Name for the configuration. + type: string + resourceTags: + additionalProperties: + type: string + description: Key-value map of resource tags to assign to infrastructure + created by the configuration. + type: object + x-kubernetes-map-type: granular + securityGroupIds: + description: Set of EC2 Security Group identifiers. + items: + type: string + type: array + x-kubernetes-list-type: set + snsTopicArn: + description: Amazon Resource Name (ARN) of SNS Topic. + type: string + subnetId: + description: EC2 Subnet identifier. Also requires security_group_ids + argument. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + terminateInstanceOnFailure: + description: Enable if the instance should be terminated when + the pipeline fails. Defaults to false. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/iot.aws.upbound.io_indexingconfigurations.yaml b/package/crds/iot.aws.upbound.io_indexingconfigurations.yaml index 29787e4b46..a5cc85323f 100644 --- a/package/crds/iot.aws.upbound.io_indexingconfigurations.yaml +++ b/package/crds/iot.aws.upbound.io_indexingconfigurations.yaml @@ -646,3 +646,604 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: IndexingConfiguration is the Schema for the IndexingConfigurations + API. Managing IoT Thing indexing. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IndexingConfigurationSpec defines the desired state of IndexingConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + region: + description: Region is the region you'd like your resource to + be created in. + type: string + thingGroupIndexingConfiguration: + description: Thing group indexing configuration. See below. + properties: + customField: + description: A list of thing group fields to index. This list + cannot contain any managed fields. See below. + items: + properties: + name: + description: The name of the field. + type: string + type: + description: 'The data type of the field. Valid values: + Number, String, Boolean.' + type: string + type: object + type: array + managedField: + description: Contains fields that are indexed and whose types + are already known by the Fleet Indexing service. See below. + items: + properties: + name: + description: The name of the field. + type: string + type: + description: 'The data type of the field. Valid values: + Number, String, Boolean.' + type: string + type: object + type: array + thingGroupIndexingMode: + description: 'Thing group indexing mode. Valid values: OFF, + ON.' + type: string + type: object + thingIndexingConfiguration: + description: Thing indexing configuration. See below. + properties: + customField: + description: Contains custom field names and their data type. + See below. + items: + properties: + name: + description: The name of the field. + type: string + type: + description: 'The data type of the field. Valid values: + Number, String, Boolean.' + type: string + type: object + type: array + deviceDefenderIndexingMode: + description: 'Device Defender indexing mode. Valid values: + VIOLATIONS, OFF. Default: OFF.' + type: string + filter: + description: Required if named_shadow_indexing_mode is ON. + Enables to add named shadows filtered by filter to fleet + indexing configuration. + properties: + namedShadowNames: + description: List of shadow names that you select to index. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managedField: + description: Contains fields that are indexed and whose types + are already known by the Fleet Indexing service. See below. + items: + properties: + name: + description: The name of the field. + type: string + type: + description: 'The data type of the field. Valid values: + Number, String, Boolean.' + type: string + type: object + type: array + namedShadowIndexingMode: + description: 'Named shadow indexing mode. Valid values: ON, + OFF. Default: OFF.' + type: string + thingConnectivityIndexingMode: + description: 'Thing connectivity indexing mode. Valid values: + STATUS, OFF. Default: OFF.' + type: string + thingIndexingMode: + description: 'Thing indexing mode. Valid values: REGISTRY, + REGISTRY_AND_SHADOW, OFF.' + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + thingGroupIndexingConfiguration: + description: Thing group indexing configuration. See below. + properties: + customField: + description: A list of thing group fields to index. This list + cannot contain any managed fields. See below. + items: + properties: + name: + description: The name of the field. + type: string + type: + description: 'The data type of the field. Valid values: + Number, String, Boolean.' + type: string + type: object + type: array + managedField: + description: Contains fields that are indexed and whose types + are already known by the Fleet Indexing service. See below. + items: + properties: + name: + description: The name of the field. + type: string + type: + description: 'The data type of the field. Valid values: + Number, String, Boolean.' + type: string + type: object + type: array + thingGroupIndexingMode: + description: 'Thing group indexing mode. Valid values: OFF, + ON.' + type: string + type: object + thingIndexingConfiguration: + description: Thing indexing configuration. See below. + properties: + customField: + description: Contains custom field names and their data type. + See below. + items: + properties: + name: + description: The name of the field. + type: string + type: + description: 'The data type of the field. Valid values: + Number, String, Boolean.' + type: string + type: object + type: array + deviceDefenderIndexingMode: + description: 'Device Defender indexing mode. Valid values: + VIOLATIONS, OFF. Default: OFF.' + type: string + filter: + description: Required if named_shadow_indexing_mode is ON. + Enables to add named shadows filtered by filter to fleet + indexing configuration. + properties: + namedShadowNames: + description: List of shadow names that you select to index. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managedField: + description: Contains fields that are indexed and whose types + are already known by the Fleet Indexing service. See below. + items: + properties: + name: + description: The name of the field. + type: string + type: + description: 'The data type of the field. Valid values: + Number, String, Boolean.' + type: string + type: object + type: array + namedShadowIndexingMode: + description: 'Named shadow indexing mode. Valid values: ON, + OFF. Default: OFF.' + type: string + thingConnectivityIndexingMode: + description: 'Thing connectivity indexing mode. Valid values: + STATUS, OFF. Default: OFF.' + type: string + thingIndexingMode: + description: 'Thing indexing mode. Valid values: REGISTRY, + REGISTRY_AND_SHADOW, OFF.' + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: IndexingConfigurationStatus defines the observed state of + IndexingConfiguration. + properties: + atProvider: + properties: + id: + type: string + thingGroupIndexingConfiguration: + description: Thing group indexing configuration. See below. + properties: + customField: + description: A list of thing group fields to index. This list + cannot contain any managed fields. See below. + items: + properties: + name: + description: The name of the field. + type: string + type: + description: 'The data type of the field. Valid values: + Number, String, Boolean.' + type: string + type: object + type: array + managedField: + description: Contains fields that are indexed and whose types + are already known by the Fleet Indexing service. See below. + items: + properties: + name: + description: The name of the field. + type: string + type: + description: 'The data type of the field. Valid values: + Number, String, Boolean.' + type: string + type: object + type: array + thingGroupIndexingMode: + description: 'Thing group indexing mode. Valid values: OFF, + ON.' + type: string + type: object + thingIndexingConfiguration: + description: Thing indexing configuration. See below. + properties: + customField: + description: Contains custom field names and their data type. + See below. + items: + properties: + name: + description: The name of the field. + type: string + type: + description: 'The data type of the field. Valid values: + Number, String, Boolean.' + type: string + type: object + type: array + deviceDefenderIndexingMode: + description: 'Device Defender indexing mode. Valid values: + VIOLATIONS, OFF. Default: OFF.' + type: string + filter: + description: Required if named_shadow_indexing_mode is ON. + Enables to add named shadows filtered by filter to fleet + indexing configuration. + properties: + namedShadowNames: + description: List of shadow names that you select to index. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managedField: + description: Contains fields that are indexed and whose types + are already known by the Fleet Indexing service. See below. + items: + properties: + name: + description: The name of the field. + type: string + type: + description: 'The data type of the field. Valid values: + Number, String, Boolean.' + type: string + type: object + type: array + namedShadowIndexingMode: + description: 'Named shadow indexing mode. Valid values: ON, + OFF. Default: OFF.' + type: string + thingConnectivityIndexingMode: + description: 'Thing connectivity indexing mode. Valid values: + STATUS, OFF. Default: OFF.' + type: string + thingIndexingMode: + description: 'Thing indexing mode. Valid values: REGISTRY, + REGISTRY_AND_SHADOW, OFF.' + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/iot.aws.upbound.io_provisioningtemplates.yaml b/package/crds/iot.aws.upbound.io_provisioningtemplates.yaml index a2638a1117..0bc2bf038f 100644 --- a/package/crds/iot.aws.upbound.io_provisioningtemplates.yaml +++ b/package/crds/iot.aws.upbound.io_provisioningtemplates.yaml @@ -618,3 +618,597 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ProvisioningTemplate is the Schema for the ProvisioningTemplates + API. Manages an IoT fleet provisioning template. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ProvisioningTemplateSpec defines the desired state of ProvisioningTemplate + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: The description of the fleet provisioning template. + type: string + enabled: + description: True to enable the fleet provisioning template, otherwise + false. + type: boolean + preProvisioningHook: + description: Creates a pre-provisioning hook template. Details + below. + properties: + payloadVersion: + description: The version of the payload that was sent to the + target function. The only valid (and the default) payload + version is "2020-04-01". + type: string + targetArn: + description: The ARN of the target function. + type: string + type: object + provisioningRoleArn: + description: The role ARN for the role associated with the fleet + provisioning template. This IoT role grants permission to provision + a device. + type: string + provisioningRoleArnRef: + description: Reference to a Role in iam to populate provisioningRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + provisioningRoleArnSelector: + description: Selector for a Role in iam to populate provisioningRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + templateBody: + description: The JSON formatted contents of the fleet provisioning + template. + type: string + type: + description: The type you define in a provisioning template. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: The description of the fleet provisioning template. + type: string + enabled: + description: True to enable the fleet provisioning template, otherwise + false. + type: boolean + preProvisioningHook: + description: Creates a pre-provisioning hook template. Details + below. + properties: + payloadVersion: + description: The version of the payload that was sent to the + target function. The only valid (and the default) payload + version is "2020-04-01". + type: string + targetArn: + description: The ARN of the target function. + type: string + type: object + provisioningRoleArn: + description: The role ARN for the role associated with the fleet + provisioning template. This IoT role grants permission to provision + a device. + type: string + provisioningRoleArnRef: + description: Reference to a Role in iam to populate provisioningRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + provisioningRoleArnSelector: + description: Selector for a Role in iam to populate provisioningRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + templateBody: + description: The JSON formatted contents of the fleet provisioning + template. + type: string + type: + description: The type you define in a provisioning template. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.templateBody is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.templateBody) + || (has(self.initProvider) && has(self.initProvider.templateBody))' + status: + description: ProvisioningTemplateStatus defines the observed state of + ProvisioningTemplate. + properties: + atProvider: + properties: + arn: + description: The ARN that identifies the provisioning template. + type: string + defaultVersionId: + description: The default version of the fleet provisioning template. + type: number + description: + description: The description of the fleet provisioning template. + type: string + enabled: + description: True to enable the fleet provisioning template, otherwise + false. + type: boolean + id: + type: string + preProvisioningHook: + description: Creates a pre-provisioning hook template. Details + below. + properties: + payloadVersion: + description: The version of the payload that was sent to the + target function. The only valid (and the default) payload + version is "2020-04-01". + type: string + targetArn: + description: The ARN of the target function. + type: string + type: object + provisioningRoleArn: + description: The role ARN for the role associated with the fleet + provisioning template. This IoT role grants permission to provision + a device. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + templateBody: + description: The JSON formatted contents of the fleet provisioning + template. + type: string + type: + description: The type you define in a provisioning template. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/iot.aws.upbound.io_thinggroups.yaml b/package/crds/iot.aws.upbound.io_thinggroups.yaml index 6ba5d2222f..3d86ed613a 100644 --- a/package/crds/iot.aws.upbound.io_thinggroups.yaml +++ b/package/crds/iot.aws.upbound.io_thinggroups.yaml @@ -602,3 +602,575 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ThingGroup is the Schema for the ThingGroups API. Manages an + AWS IoT Thing Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ThingGroupSpec defines the desired state of ThingGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + parentGroupName: + description: The name of the parent Thing Group. + type: string + parentGroupNameRef: + description: Reference to a ThingGroup in iot to populate parentGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + parentGroupNameSelector: + description: Selector for a ThingGroup in iot to populate parentGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + properties: + description: The Thing Group properties. Defined below. + properties: + attributePayload: + description: The Thing Group attributes. Defined below. + properties: + attributes: + additionalProperties: + type: string + description: Key-value map. + type: object + x-kubernetes-map-type: granular + type: object + description: + description: A description of the Thing Group. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + parentGroupName: + description: The name of the parent Thing Group. + type: string + parentGroupNameRef: + description: Reference to a ThingGroup in iot to populate parentGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + parentGroupNameSelector: + description: Selector for a ThingGroup in iot to populate parentGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + properties: + description: The Thing Group properties. Defined below. + properties: + attributePayload: + description: The Thing Group attributes. Defined below. + properties: + attributes: + additionalProperties: + type: string + description: Key-value map. + type: object + x-kubernetes-map-type: granular + type: object + description: + description: A description of the Thing Group. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ThingGroupStatus defines the observed state of ThingGroup. + properties: + atProvider: + properties: + arn: + description: The ARN of the Thing Group. + type: string + id: + description: The Thing Group ID. + type: string + metadata: + items: + properties: + creationDate: + type: string + parentGroupName: + description: The name of the parent Thing Group. + type: string + rootToParentGroups: + items: + properties: + groupArn: + description: The ARN of the Thing Group. + type: string + groupName: + description: The name of the Thing Group. + type: string + type: object + type: array + type: object + type: array + parentGroupName: + description: The name of the parent Thing Group. + type: string + properties: + description: The Thing Group properties. Defined below. + properties: + attributePayload: + description: The Thing Group attributes. Defined below. + properties: + attributes: + additionalProperties: + type: string + description: Key-value map. + type: object + x-kubernetes-map-type: granular + type: object + description: + description: A description of the Thing Group. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + version: + description: The current version of the Thing Group record in + the registry. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/iot.aws.upbound.io_thingtypes.yaml b/package/crds/iot.aws.upbound.io_thingtypes.yaml index 5d1cd35f5a..59507514d7 100644 --- a/package/crds/iot.aws.upbound.io_thingtypes.yaml +++ b/package/crds/iot.aws.upbound.io_thingtypes.yaml @@ -432,3 +432,411 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ThingType is the Schema for the ThingTypes API. Creates and manages + an AWS IoT Thing Type. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ThingTypeSpec defines the desired state of ThingType + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + deprecated: + description: Whether the thing type is deprecated. If true, no + new things could be associated with this type. + type: boolean + name: + description: The name of the thing type. + type: string + properties: + description: ', Configuration block that can contain the following + properties of the thing type:' + properties: + description: + description: The description of the thing type. + type: string + searchableAttributes: + description: A list of searchable thing attribute names. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + deprecated: + description: Whether the thing type is deprecated. If true, no + new things could be associated with this type. + type: boolean + name: + description: The name of the thing type. + type: string + properties: + description: ', Configuration block that can contain the following + properties of the thing type:' + properties: + description: + description: The description of the thing type. + type: string + searchableAttributes: + description: A list of searchable thing attribute names. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ThingTypeStatus defines the observed state of ThingType. + properties: + atProvider: + properties: + arn: + description: The ARN of the created AWS IoT Thing Type. + type: string + deprecated: + description: Whether the thing type is deprecated. If true, no + new things could be associated with this type. + type: boolean + id: + type: string + name: + description: The name of the thing type. + type: string + properties: + description: ', Configuration block that can contain the following + properties of the thing type:' + properties: + description: + description: The description of the thing type. + type: string + searchableAttributes: + description: A list of searchable thing attribute names. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/iot.aws.upbound.io_topicruledestinations.yaml b/package/crds/iot.aws.upbound.io_topicruledestinations.yaml index 10f746cd37..c5acdf1c65 100644 --- a/package/crds/iot.aws.upbound.io_topicruledestinations.yaml +++ b/package/crds/iot.aws.upbound.io_topicruledestinations.yaml @@ -1039,3 +1039,1018 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: TopicRuleDestination is the Schema for the TopicRuleDestinations + API. Creates and manages an AWS IoT topic rule destination + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TopicRuleDestinationSpec defines the desired state of TopicRuleDestination + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + enabled: + description: 'Whether or not to enable the destination. Default: + true.' + type: boolean + region: + description: Region is the region you'd like your resource to + be created in. + type: string + vpcConfiguration: + description: Configuration of the virtual private cloud (VPC) + connection. For more info, see the AWS documentation. + properties: + roleArn: + description: The ARN of a role that has permission to create + and attach to elastic network interfaces (ENIs). + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupRefs: + description: References to SecurityGroup in ec2 to populate + securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupSelector: + description: Selector for a list of SecurityGroup in ec2 to + populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroups: + description: The security groups of the VPC destination. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: The subnet IDs of the VPC destination. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcId: + description: The ID of the VPC. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + enabled: + description: 'Whether or not to enable the destination. Default: + true.' + type: boolean + vpcConfiguration: + description: Configuration of the virtual private cloud (VPC) + connection. For more info, see the AWS documentation. + properties: + roleArn: + description: The ARN of a role that has permission to create + and attach to elastic network interfaces (ENIs). + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupRefs: + description: References to SecurityGroup in ec2 to populate + securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupSelector: + description: Selector for a list of SecurityGroup in ec2 to + populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroups: + description: The security groups of the VPC destination. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: The subnet IDs of the VPC destination. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcId: + description: The ID of the VPC. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.vpcConfiguration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.vpcConfiguration) + || (has(self.initProvider) && has(self.initProvider.vpcConfiguration))' + status: + description: TopicRuleDestinationStatus defines the observed state of + TopicRuleDestination. + properties: + atProvider: + properties: + arn: + description: The ARN of the topic rule destination + type: string + enabled: + description: 'Whether or not to enable the destination. Default: + true.' + type: boolean + id: + type: string + vpcConfiguration: + description: Configuration of the virtual private cloud (VPC) + connection. For more info, see the AWS documentation. + properties: + roleArn: + description: The ARN of a role that has permission to create + and attach to elastic network interfaces (ENIs). + type: string + securityGroups: + description: The security groups of the VPC destination. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: The subnet IDs of the VPC destination. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcId: + description: The ID of the VPC. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/iot.aws.upbound.io_topicrules.yaml b/package/crds/iot.aws.upbound.io_topicrules.yaml index f9dcdc8e34..0d52cd42b3 100644 --- a/package/crds/iot.aws.upbound.io_topicrules.yaml +++ b/package/crds/iot.aws.upbound.io_topicrules.yaml @@ -3547,3 +3547,3374 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: TopicRule is the Schema for the TopicRules API. Creates and manages + an AWS IoT topic rule + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TopicRuleSpec defines the desired state of TopicRule + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cloudwatchAlarm: + items: + properties: + alarmName: + description: The CloudWatch alarm name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + stateReason: + description: The reason for the alarm change. + type: string + stateValue: + description: 'The value of the alarm state. Acceptable values + are: OK, ALARM, INSUFFICIENT_DATA.' + type: string + type: object + type: array + cloudwatchLogs: + items: + properties: + logGroupName: + description: The CloudWatch log group name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + cloudwatchMetric: + items: + properties: + metricName: + description: The CloudWatch metric name. + type: string + metricNamespace: + description: The CloudWatch metric namespace name. + type: string + metricTimestamp: + description: An optional Unix timestamp (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp). + type: string + metricUnit: + description: 'The metric unit (supported units can be found + here: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Unit)' + type: string + metricValue: + description: The CloudWatch metric value. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + description: + description: The description of the rule. + type: string + dynamodb: + items: + properties: + hashKeyField: + description: The hash key name. + type: string + hashKeyType: + description: The hash key type. Valid values are "STRING" + or "NUMBER". + type: string + hashKeyValue: + description: The hash key value. + type: string + operation: + description: The operation. Valid values are "INSERT", "UPDATE", + or "DELETE". + type: string + payloadField: + description: The action payload. + type: string + rangeKeyField: + description: The range key name. + type: string + rangeKeyType: + description: The range key type. Valid values are "STRING" + or "NUMBER". + type: string + rangeKeyValue: + description: The range key value. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + tableName: + description: The name of the DynamoDB table. + type: string + type: object + type: array + dynamodbv2: + items: + properties: + putItem: + description: Configuration block with DynamoDB Table to + which the message will be written. Nested arguments below. + properties: + tableName: + description: The name of the DynamoDB table. + type: string + type: object + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + elasticsearch: + items: + properties: + endpoint: + description: The endpoint of your Elasticsearch domain. + type: string + id: + description: The unique identifier for the document you + are storing. + type: string + index: + description: The Elasticsearch index where you want to store + your data. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: + description: The type of document you are storing. + type: string + type: object + type: array + enabled: + description: Specifies whether the rule is enabled. + type: boolean + errorAction: + description: Configuration block with error action to be associated + with the rule. See the documentation for cloudwatch_alarm, cloudwatch_logs, + cloudwatch_metric, dynamodb, dynamodbv2, elasticsearch, firehose, + http, iot_analytics, iot_events, kafka, kinesis, lambda, republish, + s3, sns, sqs, step_functions, timestream configuration blocks + for further configuration details. + properties: + cloudwatchAlarm: + properties: + alarmName: + description: The CloudWatch alarm name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + stateReason: + description: The reason for the alarm change. + type: string + stateValue: + description: 'The value of the alarm state. Acceptable + values are: OK, ALARM, INSUFFICIENT_DATA.' + type: string + type: object + cloudwatchLogs: + properties: + logGroupName: + description: The CloudWatch log group name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + cloudwatchMetric: + properties: + metricName: + description: The CloudWatch metric name. + type: string + metricNamespace: + description: The CloudWatch metric namespace name. + type: string + metricTimestamp: + description: An optional Unix timestamp (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp). + type: string + metricUnit: + description: 'The metric unit (supported units can be + found here: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Unit)' + type: string + metricValue: + description: The CloudWatch metric value. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + dynamodb: + properties: + hashKeyField: + description: The hash key name. + type: string + hashKeyType: + description: The hash key type. Valid values are "STRING" + or "NUMBER". + type: string + hashKeyValue: + description: The hash key value. + type: string + operation: + description: The operation. Valid values are "INSERT", + "UPDATE", or "DELETE". + type: string + payloadField: + description: The action payload. + type: string + rangeKeyField: + description: The range key name. + type: string + rangeKeyType: + description: The range key type. Valid values are "STRING" + or "NUMBER". + type: string + rangeKeyValue: + description: The range key value. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + tableName: + description: The name of the DynamoDB table. + type: string + type: object + dynamodbv2: + properties: + putItem: + description: Configuration block with DynamoDB Table to + which the message will be written. Nested arguments + below. + properties: + tableName: + description: The name of the DynamoDB table. + type: string + type: object + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + elasticsearch: + properties: + endpoint: + description: The endpoint of your Elasticsearch domain. + type: string + id: + description: The unique identifier for the document you + are storing. + type: string + index: + description: The Elasticsearch index where you want to + store your data. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: + description: The type of document you are storing. + type: string + type: object + firehose: + properties: + batchMode: + description: The payload that contains a JSON array of + records will be sent to Kinesis Firehose via a batch + call. + type: boolean + deliveryStreamName: + description: The delivery stream name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + separator: + description: 'A character separator that is used to separate + records written to the Firehose stream. Valid values + are: ''\n'' (newline), ''\t'' (tab), ''\r\n'' (Windows + newline), '','' (comma).' + type: string + type: object + http: + properties: + confirmationUrl: + description: The HTTPS URL used to verify ownership of + url. + type: string + httpHeader: + description: Custom HTTP header IoT Core should send. + It is possible to define more than one custom header. + items: + properties: + key: + description: The name of the HTTP header. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + url: + description: The HTTPS URL. + type: string + type: object + iotAnalytics: + properties: + batchMode: + description: The payload that contains a JSON array of + records will be sent to Kinesis Firehose via a batch + call. + type: boolean + channelName: + description: Name of AWS IOT Analytics channel. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + iotEvents: + properties: + batchMode: + description: The payload that contains a JSON array of + records will be sent to Kinesis Firehose via a batch + call. + type: boolean + inputName: + description: The name of the AWS IoT Events input. + type: string + messageId: + description: Use this to ensure that only one input (message) + with a given messageId is processed by an AWS IoT Events + detector. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + kafka: + properties: + clientProperties: + additionalProperties: + type: string + description: Properties of the Apache Kafka producer client. + For more info, see the AWS documentation. + type: object + x-kubernetes-map-type: granular + destinationArn: + description: The ARN of Kafka action's VPC aws_iot_topic_rule_destination. + type: string + header: + description: The list of Kafka headers that you specify. + Nested arguments below. + items: + properties: + key: + description: The name of the HTTP header. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + key: + description: The name of the HTTP header. + type: string + partition: + description: The Kafka message partition. + type: string + topic: + description: The Kafka topic for messages to be sent to + the Kafka broker. + type: string + type: object + kinesis: + properties: + partitionKey: + description: The partition key. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + streamName: + description: The name of the Amazon Kinesis stream. + type: string + type: object + lambda: + properties: + functionArn: + description: The ARN of the Lambda function. + type: string + type: object + republish: + properties: + qos: + description: The Quality of Service (QoS) level to use + when republishing messages. Valid values are 0 or 1. + The default value is 0. + type: number + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + topic: + description: The Kafka topic for messages to be sent to + the Kafka broker. + type: string + type: object + s3: + properties: + bucketName: + description: The Amazon S3 bucket name. + type: string + cannedAcl: + description: The Amazon S3 canned ACL that controls access + to the object identified by the object key. Valid values. + type: string + key: + description: The name of the HTTP header. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + sns: + properties: + messageFormat: + description: The message format of the message to publish. + Accepted values are "JSON" and "RAW". + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetArn: + description: The ARN of the SNS topic. + type: string + targetArnRef: + description: Reference to a Topic in sns to populate targetArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetArnSelector: + description: Selector for a Topic in sns to populate targetArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + sqs: + properties: + queueUrl: + description: The URL of the Amazon SQS queue. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + useBase64: + description: Specifies whether to use Base64 encoding. + type: boolean + type: object + stepFunctions: + properties: + executionNamePrefix: + description: The prefix used to generate, along with a + UUID, the unique state machine execution name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + stateMachineName: + description: The name of the Step Functions state machine + whose execution will be started. + type: string + type: object + timestream: + properties: + databaseName: + description: The name of an Amazon Timestream database. + type: string + dimension: + description: Configuration blocks with metadata attributes + of the time series that are written in each measure + record. Nested arguments below. + items: + properties: + name: + description: The name of the rule. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + tableName: + description: The name of the DynamoDB table. + type: string + timestamp: + description: Configuration block specifying an application-defined + value to replace the default value assigned to the Timestream + record's timestamp in the time column. Nested arguments + below. + properties: + unit: + description: 'The precision of the timestamp value + that results from the expression described in value. + Valid values: SECONDS, MILLISECONDS, MICROSECONDS, + NANOSECONDS.' + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: object + type: object + firehose: + items: + properties: + batchMode: + description: The payload that contains a JSON array of records + will be sent to Kinesis Firehose via a batch call. + type: boolean + deliveryStreamName: + description: The delivery stream name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + separator: + description: 'A character separator that is used to separate + records written to the Firehose stream. Valid values are: + ''\n'' (newline), ''\t'' (tab), ''\r\n'' (Windows newline), + '','' (comma).' + type: string + type: object + type: array + http: + items: + properties: + confirmationUrl: + description: The HTTPS URL used to verify ownership of url. + type: string + httpHeader: + description: Custom HTTP header IoT Core should send. It + is possible to define more than one custom header. + items: + properties: + key: + description: The name of the HTTP header. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + url: + description: The HTTPS URL. + type: string + type: object + type: array + iotAnalytics: + items: + properties: + batchMode: + description: The payload that contains a JSON array of records + will be sent to Kinesis Firehose via a batch call. + type: boolean + channelName: + description: Name of AWS IOT Analytics channel. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + iotEvents: + items: + properties: + batchMode: + description: The payload that contains a JSON array of records + will be sent to Kinesis Firehose via a batch call. + type: boolean + inputName: + description: The name of the AWS IoT Events input. + type: string + messageId: + description: Use this to ensure that only one input (message) + with a given messageId is processed by an AWS IoT Events + detector. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + kafka: + items: + properties: + clientProperties: + additionalProperties: + type: string + description: Properties of the Apache Kafka producer client. + For more info, see the AWS documentation. + type: object + x-kubernetes-map-type: granular + destinationArn: + description: The ARN of Kafka action's VPC aws_iot_topic_rule_destination. + type: string + header: + description: The list of Kafka headers that you specify. + Nested arguments below. + items: + properties: + key: + description: The name of the HTTP header. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + key: + description: The name of the HTTP header. + type: string + partition: + description: The Kafka message partition. + type: string + topic: + description: The Kafka topic for messages to be sent to + the Kafka broker. + type: string + type: object + type: array + kinesis: + items: + properties: + partitionKey: + description: The partition key. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + streamName: + description: The name of the Amazon Kinesis stream. + type: string + type: object + type: array + lambda: + items: + properties: + functionArn: + description: The ARN of the Lambda function. + type: string + type: object + type: array + region: + description: Region is the region you'd like your resource to + be created in. + type: string + republish: + items: + properties: + qos: + description: The Quality of Service (QoS) level to use when + republishing messages. Valid values are 0 or 1. The default + value is 0. + type: number + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + topic: + description: The Kafka topic for messages to be sent to + the Kafka broker. + type: string + type: object + type: array + s3: + items: + properties: + bucketName: + description: The Amazon S3 bucket name. + type: string + cannedAcl: + description: The Amazon S3 canned ACL that controls access + to the object identified by the object key. Valid values. + type: string + key: + description: The name of the HTTP header. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + sns: + items: + properties: + messageFormat: + description: The message format of the message to publish. + Accepted values are "JSON" and "RAW". + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetArn: + description: The ARN of the SNS topic. + type: string + targetArnRef: + description: Reference to a Topic in sns to populate targetArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetArnSelector: + description: Selector for a Topic in sns to populate targetArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + sql: + description: The SQL statement used to query the topic. For more + information, see AWS IoT SQL Reference (http://docs.aws.amazon.com/iot/latest/developerguide/iot-rules.html#aws-iot-sql-reference) + in the AWS IoT Developer Guide. + type: string + sqlVersion: + description: The version of the SQL rules engine to use when evaluating + the rule. + type: string + sqs: + items: + properties: + queueUrl: + description: The URL of the Amazon SQS queue. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + useBase64: + description: Specifies whether to use Base64 encoding. + type: boolean + type: object + type: array + stepFunctions: + items: + properties: + executionNamePrefix: + description: The prefix used to generate, along with a UUID, + the unique state machine execution name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + stateMachineName: + description: The name of the Step Functions state machine + whose execution will be started. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + timestream: + items: + properties: + databaseName: + description: The name of an Amazon Timestream database. + type: string + dimension: + description: Configuration blocks with metadata attributes + of the time series that are written in each measure record. + Nested arguments below. + items: + properties: + name: + description: The name of the rule. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + tableName: + description: The name of the DynamoDB table. + type: string + timestamp: + description: Configuration block specifying an application-defined + value to replace the default value assigned to the Timestream + record's timestamp in the time column. Nested arguments + below. + properties: + unit: + description: 'The precision of the timestamp value that + results from the expression described in value. Valid + values: SECONDS, MILLISECONDS, MICROSECONDS, NANOSECONDS.' + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + cloudwatchAlarm: + items: + properties: + alarmName: + description: The CloudWatch alarm name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + stateReason: + description: The reason for the alarm change. + type: string + stateValue: + description: 'The value of the alarm state. Acceptable values + are: OK, ALARM, INSUFFICIENT_DATA.' + type: string + type: object + type: array + cloudwatchLogs: + items: + properties: + logGroupName: + description: The CloudWatch log group name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + cloudwatchMetric: + items: + properties: + metricName: + description: The CloudWatch metric name. + type: string + metricNamespace: + description: The CloudWatch metric namespace name. + type: string + metricTimestamp: + description: An optional Unix timestamp (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp). + type: string + metricUnit: + description: 'The metric unit (supported units can be found + here: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Unit)' + type: string + metricValue: + description: The CloudWatch metric value. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + description: + description: The description of the rule. + type: string + dynamodb: + items: + properties: + hashKeyField: + description: The hash key name. + type: string + hashKeyType: + description: The hash key type. Valid values are "STRING" + or "NUMBER". + type: string + hashKeyValue: + description: The hash key value. + type: string + operation: + description: The operation. Valid values are "INSERT", "UPDATE", + or "DELETE". + type: string + payloadField: + description: The action payload. + type: string + rangeKeyField: + description: The range key name. + type: string + rangeKeyType: + description: The range key type. Valid values are "STRING" + or "NUMBER". + type: string + rangeKeyValue: + description: The range key value. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + tableName: + description: The name of the DynamoDB table. + type: string + type: object + type: array + dynamodbv2: + items: + properties: + putItem: + description: Configuration block with DynamoDB Table to + which the message will be written. Nested arguments below. + properties: + tableName: + description: The name of the DynamoDB table. + type: string + type: object + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + elasticsearch: + items: + properties: + endpoint: + description: The endpoint of your Elasticsearch domain. + type: string + id: + description: The unique identifier for the document you + are storing. + type: string + index: + description: The Elasticsearch index where you want to store + your data. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: + description: The type of document you are storing. + type: string + type: object + type: array + enabled: + description: Specifies whether the rule is enabled. + type: boolean + errorAction: + description: Configuration block with error action to be associated + with the rule. See the documentation for cloudwatch_alarm, cloudwatch_logs, + cloudwatch_metric, dynamodb, dynamodbv2, elasticsearch, firehose, + http, iot_analytics, iot_events, kafka, kinesis, lambda, republish, + s3, sns, sqs, step_functions, timestream configuration blocks + for further configuration details. + properties: + cloudwatchAlarm: + properties: + alarmName: + description: The CloudWatch alarm name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + stateReason: + description: The reason for the alarm change. + type: string + stateValue: + description: 'The value of the alarm state. Acceptable + values are: OK, ALARM, INSUFFICIENT_DATA.' + type: string + type: object + cloudwatchLogs: + properties: + logGroupName: + description: The CloudWatch log group name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + cloudwatchMetric: + properties: + metricName: + description: The CloudWatch metric name. + type: string + metricNamespace: + description: The CloudWatch metric namespace name. + type: string + metricTimestamp: + description: An optional Unix timestamp (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp). + type: string + metricUnit: + description: 'The metric unit (supported units can be + found here: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Unit)' + type: string + metricValue: + description: The CloudWatch metric value. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + dynamodb: + properties: + hashKeyField: + description: The hash key name. + type: string + hashKeyType: + description: The hash key type. Valid values are "STRING" + or "NUMBER". + type: string + hashKeyValue: + description: The hash key value. + type: string + operation: + description: The operation. Valid values are "INSERT", + "UPDATE", or "DELETE". + type: string + payloadField: + description: The action payload. + type: string + rangeKeyField: + description: The range key name. + type: string + rangeKeyType: + description: The range key type. Valid values are "STRING" + or "NUMBER". + type: string + rangeKeyValue: + description: The range key value. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + tableName: + description: The name of the DynamoDB table. + type: string + type: object + dynamodbv2: + properties: + putItem: + description: Configuration block with DynamoDB Table to + which the message will be written. Nested arguments + below. + properties: + tableName: + description: The name of the DynamoDB table. + type: string + type: object + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + elasticsearch: + properties: + endpoint: + description: The endpoint of your Elasticsearch domain. + type: string + id: + description: The unique identifier for the document you + are storing. + type: string + index: + description: The Elasticsearch index where you want to + store your data. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: + description: The type of document you are storing. + type: string + type: object + firehose: + properties: + batchMode: + description: The payload that contains a JSON array of + records will be sent to Kinesis Firehose via a batch + call. + type: boolean + deliveryStreamName: + description: The delivery stream name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + separator: + description: 'A character separator that is used to separate + records written to the Firehose stream. Valid values + are: ''\n'' (newline), ''\t'' (tab), ''\r\n'' (Windows + newline), '','' (comma).' + type: string + type: object + http: + properties: + confirmationUrl: + description: The HTTPS URL used to verify ownership of + url. + type: string + httpHeader: + description: Custom HTTP header IoT Core should send. + It is possible to define more than one custom header. + items: + properties: + key: + description: The name of the HTTP header. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + url: + description: The HTTPS URL. + type: string + type: object + iotAnalytics: + properties: + batchMode: + description: The payload that contains a JSON array of + records will be sent to Kinesis Firehose via a batch + call. + type: boolean + channelName: + description: Name of AWS IOT Analytics channel. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + iotEvents: + properties: + batchMode: + description: The payload that contains a JSON array of + records will be sent to Kinesis Firehose via a batch + call. + type: boolean + inputName: + description: The name of the AWS IoT Events input. + type: string + messageId: + description: Use this to ensure that only one input (message) + with a given messageId is processed by an AWS IoT Events + detector. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + kafka: + properties: + clientProperties: + additionalProperties: + type: string + description: Properties of the Apache Kafka producer client. + For more info, see the AWS documentation. + type: object + x-kubernetes-map-type: granular + destinationArn: + description: The ARN of Kafka action's VPC aws_iot_topic_rule_destination. + type: string + header: + description: The list of Kafka headers that you specify. + Nested arguments below. + items: + properties: + key: + description: The name of the HTTP header. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + key: + description: The name of the HTTP header. + type: string + partition: + description: The Kafka message partition. + type: string + topic: + description: The Kafka topic for messages to be sent to + the Kafka broker. + type: string + type: object + kinesis: + properties: + partitionKey: + description: The partition key. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + streamName: + description: The name of the Amazon Kinesis stream. + type: string + type: object + lambda: + properties: + functionArn: + description: The ARN of the Lambda function. + type: string + type: object + republish: + properties: + qos: + description: The Quality of Service (QoS) level to use + when republishing messages. Valid values are 0 or 1. + The default value is 0. + type: number + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + topic: + description: The Kafka topic for messages to be sent to + the Kafka broker. + type: string + type: object + s3: + properties: + bucketName: + description: The Amazon S3 bucket name. + type: string + cannedAcl: + description: The Amazon S3 canned ACL that controls access + to the object identified by the object key. Valid values. + type: string + key: + description: The name of the HTTP header. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + sns: + properties: + messageFormat: + description: The message format of the message to publish. + Accepted values are "JSON" and "RAW". + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetArn: + description: The ARN of the SNS topic. + type: string + targetArnRef: + description: Reference to a Topic in sns to populate targetArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetArnSelector: + description: Selector for a Topic in sns to populate targetArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + sqs: + properties: + queueUrl: + description: The URL of the Amazon SQS queue. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + useBase64: + description: Specifies whether to use Base64 encoding. + type: boolean + type: object + stepFunctions: + properties: + executionNamePrefix: + description: The prefix used to generate, along with a + UUID, the unique state machine execution name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + stateMachineName: + description: The name of the Step Functions state machine + whose execution will be started. + type: string + type: object + timestream: + properties: + databaseName: + description: The name of an Amazon Timestream database. + type: string + dimension: + description: Configuration blocks with metadata attributes + of the time series that are written in each measure + record. Nested arguments below. + items: + properties: + name: + description: The name of the rule. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + tableName: + description: The name of the DynamoDB table. + type: string + timestamp: + description: Configuration block specifying an application-defined + value to replace the default value assigned to the Timestream + record's timestamp in the time column. Nested arguments + below. + properties: + unit: + description: 'The precision of the timestamp value + that results from the expression described in value. + Valid values: SECONDS, MILLISECONDS, MICROSECONDS, + NANOSECONDS.' + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: object + type: object + firehose: + items: + properties: + batchMode: + description: The payload that contains a JSON array of records + will be sent to Kinesis Firehose via a batch call. + type: boolean + deliveryStreamName: + description: The delivery stream name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + separator: + description: 'A character separator that is used to separate + records written to the Firehose stream. Valid values are: + ''\n'' (newline), ''\t'' (tab), ''\r\n'' (Windows newline), + '','' (comma).' + type: string + type: object + type: array + http: + items: + properties: + confirmationUrl: + description: The HTTPS URL used to verify ownership of url. + type: string + httpHeader: + description: Custom HTTP header IoT Core should send. It + is possible to define more than one custom header. + items: + properties: + key: + description: The name of the HTTP header. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + url: + description: The HTTPS URL. + type: string + type: object + type: array + iotAnalytics: + items: + properties: + batchMode: + description: The payload that contains a JSON array of records + will be sent to Kinesis Firehose via a batch call. + type: boolean + channelName: + description: Name of AWS IOT Analytics channel. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + iotEvents: + items: + properties: + batchMode: + description: The payload that contains a JSON array of records + will be sent to Kinesis Firehose via a batch call. + type: boolean + inputName: + description: The name of the AWS IoT Events input. + type: string + messageId: + description: Use this to ensure that only one input (message) + with a given messageId is processed by an AWS IoT Events + detector. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + kafka: + items: + properties: + clientProperties: + additionalProperties: + type: string + description: Properties of the Apache Kafka producer client. + For more info, see the AWS documentation. + type: object + x-kubernetes-map-type: granular + destinationArn: + description: The ARN of Kafka action's VPC aws_iot_topic_rule_destination. + type: string + header: + description: The list of Kafka headers that you specify. + Nested arguments below. + items: + properties: + key: + description: The name of the HTTP header. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + key: + description: The name of the HTTP header. + type: string + partition: + description: The Kafka message partition. + type: string + topic: + description: The Kafka topic for messages to be sent to + the Kafka broker. + type: string + type: object + type: array + kinesis: + items: + properties: + partitionKey: + description: The partition key. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + streamName: + description: The name of the Amazon Kinesis stream. + type: string + type: object + type: array + lambda: + items: + properties: + functionArn: + description: The ARN of the Lambda function. + type: string + type: object + type: array + republish: + items: + properties: + qos: + description: The Quality of Service (QoS) level to use when + republishing messages. Valid values are 0 or 1. The default + value is 0. + type: number + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + topic: + description: The Kafka topic for messages to be sent to + the Kafka broker. + type: string + type: object + type: array + s3: + items: + properties: + bucketName: + description: The Amazon S3 bucket name. + type: string + cannedAcl: + description: The Amazon S3 canned ACL that controls access + to the object identified by the object key. Valid values. + type: string + key: + description: The name of the HTTP header. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + sns: + items: + properties: + messageFormat: + description: The message format of the message to publish. + Accepted values are "JSON" and "RAW". + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetArn: + description: The ARN of the SNS topic. + type: string + targetArnRef: + description: Reference to a Topic in sns to populate targetArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetArnSelector: + description: Selector for a Topic in sns to populate targetArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + sql: + description: The SQL statement used to query the topic. For more + information, see AWS IoT SQL Reference (http://docs.aws.amazon.com/iot/latest/developerguide/iot-rules.html#aws-iot-sql-reference) + in the AWS IoT Developer Guide. + type: string + sqlVersion: + description: The version of the SQL rules engine to use when evaluating + the rule. + type: string + sqs: + items: + properties: + queueUrl: + description: The URL of the Amazon SQS queue. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + useBase64: + description: Specifies whether to use Base64 encoding. + type: boolean + type: object + type: array + stepFunctions: + items: + properties: + executionNamePrefix: + description: The prefix used to generate, along with a UUID, + the unique state machine execution name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + stateMachineName: + description: The name of the Step Functions state machine + whose execution will be started. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + timestream: + items: + properties: + databaseName: + description: The name of an Amazon Timestream database. + type: string + dimension: + description: Configuration blocks with metadata attributes + of the time series that are written in each measure record. + Nested arguments below. + items: + properties: + name: + description: The name of the rule. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + tableName: + description: The name of the DynamoDB table. + type: string + timestamp: + description: Configuration block specifying an application-defined + value to replace the default value assigned to the Timestream + record's timestamp in the time column. Nested arguments + below. + properties: + unit: + description: 'The precision of the timestamp value that + results from the expression described in value. Valid + values: SECONDS, MILLISECONDS, MICROSECONDS, NANOSECONDS.' + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.enabled is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.enabled) + || (has(self.initProvider) && has(self.initProvider.enabled))' + - message: spec.forProvider.sql is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sql) + || (has(self.initProvider) && has(self.initProvider.sql))' + - message: spec.forProvider.sqlVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sqlVersion) + || (has(self.initProvider) && has(self.initProvider.sqlVersion))' + status: + description: TopicRuleStatus defines the observed state of TopicRule. + properties: + atProvider: + properties: + arn: + description: The ARN of the topic rule + type: string + cloudwatchAlarm: + items: + properties: + alarmName: + description: The CloudWatch alarm name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + stateReason: + description: The reason for the alarm change. + type: string + stateValue: + description: 'The value of the alarm state. Acceptable values + are: OK, ALARM, INSUFFICIENT_DATA.' + type: string + type: object + type: array + cloudwatchLogs: + items: + properties: + logGroupName: + description: The CloudWatch log group name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + cloudwatchMetric: + items: + properties: + metricName: + description: The CloudWatch metric name. + type: string + metricNamespace: + description: The CloudWatch metric namespace name. + type: string + metricTimestamp: + description: An optional Unix timestamp (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp). + type: string + metricUnit: + description: 'The metric unit (supported units can be found + here: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Unit)' + type: string + metricValue: + description: The CloudWatch metric value. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + description: + description: The description of the rule. + type: string + dynamodb: + items: + properties: + hashKeyField: + description: The hash key name. + type: string + hashKeyType: + description: The hash key type. Valid values are "STRING" + or "NUMBER". + type: string + hashKeyValue: + description: The hash key value. + type: string + operation: + description: The operation. Valid values are "INSERT", "UPDATE", + or "DELETE". + type: string + payloadField: + description: The action payload. + type: string + rangeKeyField: + description: The range key name. + type: string + rangeKeyType: + description: The range key type. Valid values are "STRING" + or "NUMBER". + type: string + rangeKeyValue: + description: The range key value. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + tableName: + description: The name of the DynamoDB table. + type: string + type: object + type: array + dynamodbv2: + items: + properties: + putItem: + description: Configuration block with DynamoDB Table to + which the message will be written. Nested arguments below. + properties: + tableName: + description: The name of the DynamoDB table. + type: string + type: object + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + elasticsearch: + items: + properties: + endpoint: + description: The endpoint of your Elasticsearch domain. + type: string + id: + description: The unique identifier for the document you + are storing. + type: string + index: + description: The Elasticsearch index where you want to store + your data. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: + description: The type of document you are storing. + type: string + type: object + type: array + enabled: + description: Specifies whether the rule is enabled. + type: boolean + errorAction: + description: Configuration block with error action to be associated + with the rule. See the documentation for cloudwatch_alarm, cloudwatch_logs, + cloudwatch_metric, dynamodb, dynamodbv2, elasticsearch, firehose, + http, iot_analytics, iot_events, kafka, kinesis, lambda, republish, + s3, sns, sqs, step_functions, timestream configuration blocks + for further configuration details. + properties: + cloudwatchAlarm: + properties: + alarmName: + description: The CloudWatch alarm name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + stateReason: + description: The reason for the alarm change. + type: string + stateValue: + description: 'The value of the alarm state. Acceptable + values are: OK, ALARM, INSUFFICIENT_DATA.' + type: string + type: object + cloudwatchLogs: + properties: + logGroupName: + description: The CloudWatch log group name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + cloudwatchMetric: + properties: + metricName: + description: The CloudWatch metric name. + type: string + metricNamespace: + description: The CloudWatch metric namespace name. + type: string + metricTimestamp: + description: An optional Unix timestamp (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp). + type: string + metricUnit: + description: 'The metric unit (supported units can be + found here: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Unit)' + type: string + metricValue: + description: The CloudWatch metric value. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + dynamodb: + properties: + hashKeyField: + description: The hash key name. + type: string + hashKeyType: + description: The hash key type. Valid values are "STRING" + or "NUMBER". + type: string + hashKeyValue: + description: The hash key value. + type: string + operation: + description: The operation. Valid values are "INSERT", + "UPDATE", or "DELETE". + type: string + payloadField: + description: The action payload. + type: string + rangeKeyField: + description: The range key name. + type: string + rangeKeyType: + description: The range key type. Valid values are "STRING" + or "NUMBER". + type: string + rangeKeyValue: + description: The range key value. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + tableName: + description: The name of the DynamoDB table. + type: string + type: object + dynamodbv2: + properties: + putItem: + description: Configuration block with DynamoDB Table to + which the message will be written. Nested arguments + below. + properties: + tableName: + description: The name of the DynamoDB table. + type: string + type: object + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + elasticsearch: + properties: + endpoint: + description: The endpoint of your Elasticsearch domain. + type: string + id: + description: The unique identifier for the document you + are storing. + type: string + index: + description: The Elasticsearch index where you want to + store your data. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: + description: The type of document you are storing. + type: string + type: object + firehose: + properties: + batchMode: + description: The payload that contains a JSON array of + records will be sent to Kinesis Firehose via a batch + call. + type: boolean + deliveryStreamName: + description: The delivery stream name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + separator: + description: 'A character separator that is used to separate + records written to the Firehose stream. Valid values + are: ''\n'' (newline), ''\t'' (tab), ''\r\n'' (Windows + newline), '','' (comma).' + type: string + type: object + http: + properties: + confirmationUrl: + description: The HTTPS URL used to verify ownership of + url. + type: string + httpHeader: + description: Custom HTTP header IoT Core should send. + It is possible to define more than one custom header. + items: + properties: + key: + description: The name of the HTTP header. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + url: + description: The HTTPS URL. + type: string + type: object + iotAnalytics: + properties: + batchMode: + description: The payload that contains a JSON array of + records will be sent to Kinesis Firehose via a batch + call. + type: boolean + channelName: + description: Name of AWS IOT Analytics channel. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + iotEvents: + properties: + batchMode: + description: The payload that contains a JSON array of + records will be sent to Kinesis Firehose via a batch + call. + type: boolean + inputName: + description: The name of the AWS IoT Events input. + type: string + messageId: + description: Use this to ensure that only one input (message) + with a given messageId is processed by an AWS IoT Events + detector. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + kafka: + properties: + clientProperties: + additionalProperties: + type: string + description: Properties of the Apache Kafka producer client. + For more info, see the AWS documentation. + type: object + x-kubernetes-map-type: granular + destinationArn: + description: The ARN of Kafka action's VPC aws_iot_topic_rule_destination. + type: string + header: + description: The list of Kafka headers that you specify. + Nested arguments below. + items: + properties: + key: + description: The name of the HTTP header. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + key: + description: The name of the HTTP header. + type: string + partition: + description: The Kafka message partition. + type: string + topic: + description: The Kafka topic for messages to be sent to + the Kafka broker. + type: string + type: object + kinesis: + properties: + partitionKey: + description: The partition key. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + streamName: + description: The name of the Amazon Kinesis stream. + type: string + type: object + lambda: + properties: + functionArn: + description: The ARN of the Lambda function. + type: string + type: object + republish: + properties: + qos: + description: The Quality of Service (QoS) level to use + when republishing messages. Valid values are 0 or 1. + The default value is 0. + type: number + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + topic: + description: The Kafka topic for messages to be sent to + the Kafka broker. + type: string + type: object + s3: + properties: + bucketName: + description: The Amazon S3 bucket name. + type: string + cannedAcl: + description: The Amazon S3 canned ACL that controls access + to the object identified by the object key. Valid values. + type: string + key: + description: The name of the HTTP header. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + sns: + properties: + messageFormat: + description: The message format of the message to publish. + Accepted values are "JSON" and "RAW". + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + targetArn: + description: The ARN of the SNS topic. + type: string + type: object + sqs: + properties: + queueUrl: + description: The URL of the Amazon SQS queue. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + useBase64: + description: Specifies whether to use Base64 encoding. + type: boolean + type: object + stepFunctions: + properties: + executionNamePrefix: + description: The prefix used to generate, along with a + UUID, the unique state machine execution name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + stateMachineName: + description: The name of the Step Functions state machine + whose execution will be started. + type: string + type: object + timestream: + properties: + databaseName: + description: The name of an Amazon Timestream database. + type: string + dimension: + description: Configuration blocks with metadata attributes + of the time series that are written in each measure + record. Nested arguments below. + items: + properties: + name: + description: The name of the rule. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + tableName: + description: The name of the DynamoDB table. + type: string + timestamp: + description: Configuration block specifying an application-defined + value to replace the default value assigned to the Timestream + record's timestamp in the time column. Nested arguments + below. + properties: + unit: + description: 'The precision of the timestamp value + that results from the expression described in value. + Valid values: SECONDS, MILLISECONDS, MICROSECONDS, + NANOSECONDS.' + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: object + type: object + firehose: + items: + properties: + batchMode: + description: The payload that contains a JSON array of records + will be sent to Kinesis Firehose via a batch call. + type: boolean + deliveryStreamName: + description: The delivery stream name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + separator: + description: 'A character separator that is used to separate + records written to the Firehose stream. Valid values are: + ''\n'' (newline), ''\t'' (tab), ''\r\n'' (Windows newline), + '','' (comma).' + type: string + type: object + type: array + http: + items: + properties: + confirmationUrl: + description: The HTTPS URL used to verify ownership of url. + type: string + httpHeader: + description: Custom HTTP header IoT Core should send. It + is possible to define more than one custom header. + items: + properties: + key: + description: The name of the HTTP header. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + url: + description: The HTTPS URL. + type: string + type: object + type: array + id: + description: The unique identifier for the document you are storing. + type: string + iotAnalytics: + items: + properties: + batchMode: + description: The payload that contains a JSON array of records + will be sent to Kinesis Firehose via a batch call. + type: boolean + channelName: + description: Name of AWS IOT Analytics channel. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + iotEvents: + items: + properties: + batchMode: + description: The payload that contains a JSON array of records + will be sent to Kinesis Firehose via a batch call. + type: boolean + inputName: + description: The name of the AWS IoT Events input. + type: string + messageId: + description: Use this to ensure that only one input (message) + with a given messageId is processed by an AWS IoT Events + detector. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + kafka: + items: + properties: + clientProperties: + additionalProperties: + type: string + description: Properties of the Apache Kafka producer client. + For more info, see the AWS documentation. + type: object + x-kubernetes-map-type: granular + destinationArn: + description: The ARN of Kafka action's VPC aws_iot_topic_rule_destination. + type: string + header: + description: The list of Kafka headers that you specify. + Nested arguments below. + items: + properties: + key: + description: The name of the HTTP header. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + key: + description: The name of the HTTP header. + type: string + partition: + description: The Kafka message partition. + type: string + topic: + description: The Kafka topic for messages to be sent to + the Kafka broker. + type: string + type: object + type: array + kinesis: + items: + properties: + partitionKey: + description: The partition key. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + streamName: + description: The name of the Amazon Kinesis stream. + type: string + type: object + type: array + lambda: + items: + properties: + functionArn: + description: The ARN of the Lambda function. + type: string + type: object + type: array + republish: + items: + properties: + qos: + description: The Quality of Service (QoS) level to use when + republishing messages. Valid values are 0 or 1. The default + value is 0. + type: number + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + topic: + description: The Kafka topic for messages to be sent to + the Kafka broker. + type: string + type: object + type: array + s3: + items: + properties: + bucketName: + description: The Amazon S3 bucket name. + type: string + cannedAcl: + description: The Amazon S3 canned ACL that controls access + to the object identified by the object key. Valid values. + type: string + key: + description: The name of the HTTP header. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + type: object + type: array + sns: + items: + properties: + messageFormat: + description: The message format of the message to publish. + Accepted values are "JSON" and "RAW". + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + targetArn: + description: The ARN of the SNS topic. + type: string + type: object + type: array + sql: + description: The SQL statement used to query the topic. For more + information, see AWS IoT SQL Reference (http://docs.aws.amazon.com/iot/latest/developerguide/iot-rules.html#aws-iot-sql-reference) + in the AWS IoT Developer Guide. + type: string + sqlVersion: + description: The version of the SQL rules engine to use when evaluating + the rule. + type: string + sqs: + items: + properties: + queueUrl: + description: The URL of the Amazon SQS queue. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + useBase64: + description: Specifies whether to use Base64 encoding. + type: boolean + type: object + type: array + stepFunctions: + items: + properties: + executionNamePrefix: + description: The prefix used to generate, along with a UUID, + the unique state machine execution name. + type: string + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + stateMachineName: + description: The name of the Step Functions state machine + whose execution will be started. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + timestream: + items: + properties: + databaseName: + description: The name of an Amazon Timestream database. + type: string + dimension: + description: Configuration blocks with metadata attributes + of the time series that are written in each measure record. + Nested arguments below. + items: + properties: + name: + description: The name of the rule. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + roleArn: + description: The IAM role ARN that allows access to the + CloudWatch alarm. + type: string + tableName: + description: The name of the DynamoDB table. + type: string + timestamp: + description: Configuration block specifying an application-defined + value to replace the default value assigned to the Timestream + record's timestamp in the time column. Nested arguments + below. + properties: + unit: + description: 'The precision of the timestamp value that + results from the expression described in value. Valid + values: SECONDS, MILLISECONDS, MICROSECONDS, NANOSECONDS.' + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ivs.aws.upbound.io_recordingconfigurations.yaml b/package/crds/ivs.aws.upbound.io_recordingconfigurations.yaml index 8647d2a313..f7d6ac970e 100644 --- a/package/crds/ivs.aws.upbound.io_recordingconfigurations.yaml +++ b/package/crds/ivs.aws.upbound.io_recordingconfigurations.yaml @@ -493,3 +493,460 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: RecordingConfiguration is the Schema for the RecordingConfigurations + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RecordingConfigurationSpec defines the desired state of RecordingConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + destinationConfiguration: + description: Object containing destination configuration for where + recorded video will be stored. + properties: + s3: + description: S3 destination configuration where recorded videos + will be stored. + properties: + bucketName: + description: S3 bucket name where recorded videos will + be stored. + type: string + type: object + type: object + name: + description: Recording Configuration name. + type: string + recordingReconnectWindowSeconds: + description: If a broadcast disconnects and then reconnects within + the specified interval, the multiple streams will be considered + a single broadcast and merged together. + type: number + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + thumbnailConfiguration: + description: Object containing information to enable/disable the + recording of thumbnails for a live session and modify the interval + at which thumbnails are generated for the live session. + properties: + recordingMode: + description: 'Thumbnail recording mode. Valid values: DISABLED, + INTERVAL.' + type: string + targetIntervalSeconds: + description: The targeted thumbnail-generation interval in + seconds. + type: number + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + destinationConfiguration: + description: Object containing destination configuration for where + recorded video will be stored. + properties: + s3: + description: S3 destination configuration where recorded videos + will be stored. + properties: + bucketName: + description: S3 bucket name where recorded videos will + be stored. + type: string + type: object + type: object + name: + description: Recording Configuration name. + type: string + recordingReconnectWindowSeconds: + description: If a broadcast disconnects and then reconnects within + the specified interval, the multiple streams will be considered + a single broadcast and merged together. + type: number + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + thumbnailConfiguration: + description: Object containing information to enable/disable the + recording of thumbnails for a live session and modify the interval + at which thumbnails are generated for the live session. + properties: + recordingMode: + description: 'Thumbnail recording mode. Valid values: DISABLED, + INTERVAL.' + type: string + targetIntervalSeconds: + description: The targeted thumbnail-generation interval in + seconds. + type: number + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.destinationConfiguration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.destinationConfiguration) + || (has(self.initProvider) && has(self.initProvider.destinationConfiguration))' + status: + description: RecordingConfigurationStatus defines the observed state of + RecordingConfiguration. + properties: + atProvider: + properties: + arn: + description: ARN of the Recording Configuration. + type: string + destinationConfiguration: + description: Object containing destination configuration for where + recorded video will be stored. + properties: + s3: + description: S3 destination configuration where recorded videos + will be stored. + properties: + bucketName: + description: S3 bucket name where recorded videos will + be stored. + type: string + type: object + type: object + id: + type: string + name: + description: Recording Configuration name. + type: string + recordingReconnectWindowSeconds: + description: If a broadcast disconnects and then reconnects within + the specified interval, the multiple streams will be considered + a single broadcast and merged together. + type: number + state: + description: The current state of the Recording Configuration. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + thumbnailConfiguration: + description: Object containing information to enable/disable the + recording of thumbnails for a live session and modify the interval + at which thumbnails are generated for the live session. + properties: + recordingMode: + description: 'Thumbnail recording mode. Valid values: DISABLED, + INTERVAL.' + type: string + targetIntervalSeconds: + description: The targeted thumbnail-generation interval in + seconds. + type: number + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/kafka.aws.upbound.io_clusters.yaml b/package/crds/kafka.aws.upbound.io_clusters.yaml index 1091a8dc36..6653bbff42 100644 --- a/package/crds/kafka.aws.upbound.io_clusters.yaml +++ b/package/crds/kafka.aws.upbound.io_clusters.yaml @@ -4901,3 +4901,2343 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta3 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the Clusters API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterSpec defines the desired state of Cluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + brokerNodeGroupInfo: + description: Configuration block for the broker nodes of the Kafka + cluster. + properties: + azDistribution: + description: The distribution of broker nodes across availability + zones (documentation). Currently the only valid value is + DEFAULT. + type: string + clientSubnets: + description: A list of subnets to connect to in client VPC + (documentation). + items: + type: string + type: array + x-kubernetes-list-type: set + clientSubnetsRefs: + description: References to Subnet in ec2 to populate clientSubnets. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + clientSubnetsSelector: + description: Selector for a list of Subnet in ec2 to populate + clientSubnets. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + connectivityInfo: + description: Information about the cluster access configuration. + See below. For security reasons, you can't turn on public + access while creating an MSK cluster. However, you can update + an existing cluster to make it publicly accessible. You + can also create a new cluster and then update it to make + it publicly accessible (documentation). + properties: + publicAccess: + description: Access control settings for brokers. See + below. + properties: + type: + description: 'Public access type. Valid values: DISABLED, + SERVICE_PROVIDED_EIPS.' + type: string + type: object + vpcConnectivity: + description: VPC connectivity access control for brokers. + See below. + properties: + clientAuthentication: + description: Configuration block for specifying a + client authentication. See below. + properties: + sasl: + description: SASL authentication type details + for VPC connectivity. See below. + properties: + iam: + description: Enables SASL/IAM authentication + for VPC connectivity. + type: boolean + scram: + description: Enables SASL/SCRAM authentication + for VPC connectivity. + type: boolean + type: object + tls: + description: Enables TLS authentication for VPC + connectivity. + type: boolean + type: object + type: object + type: object + instanceType: + description: Specify the instance type to use for the kafka + brokersE.g., kafka.m5.large. (Pricing info) + type: string + securityGroups: + description: A list of the security groups to associate with + the elastic network interfaces to control who can communicate + with the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupsRefs: + description: References to SecurityGroup in ec2 to populate + securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupsSelector: + description: Selector for a list of SecurityGroup in ec2 to + populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageInfo: + description: A block that contains information about storage + volumes attached to MSK broker nodes. See below. + properties: + ebsStorageInfo: + description: A block that contains EBS volume information. + See below. + properties: + provisionedThroughput: + description: A block that contains EBS volume provisioned + throughput information. To provision storage throughput, + you must choose broker type kafka.m5.4xlarge or + larger. See below. + properties: + enabled: + description: 'Controls whether provisioned throughput + is enabled or not. Default value: false.' + type: boolean + volumeThroughput: + description: Throughput value of the EBS volumes + for the data drive on each kafka broker node + in MiB per second. The minimum value is 250. + The maximum value varies between broker type. + You can refer to the valid values for the maximum + volume throughput at the following documentation + on throughput bottlenecks + type: number + type: object + volumeSize: + description: The size in GiB of the EBS volume for + the data drive on each broker node. Minimum value + of 1 and maximum value of 16384. + type: number + type: object + type: object + type: object + clientAuthentication: + description: Configuration block for specifying a client authentication. + See below. + properties: + sasl: + description: SASL authentication type details for VPC connectivity. + See below. + properties: + iam: + description: Enables SASL/IAM authentication for VPC connectivity. + type: boolean + scram: + description: Enables SASL/SCRAM authentication for VPC + connectivity. + type: boolean + type: object + tls: + description: Enables TLS authentication for VPC connectivity. + properties: + certificateAuthorityArns: + description: List of ACM Certificate Authority Amazon + Resource Names (ARNs). + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + unauthenticated: + description: Enables unauthenticated access. + type: boolean + type: object + clusterName: + description: Name of the MSK cluster. + type: string + configurationInfo: + description: Configuration block for specifying a MSK Configuration + to attach to Kafka brokers. See below. + properties: + arn: + description: Amazon Resource Name (ARN) of the MSK Configuration + to use in the cluster. + type: string + arnRef: + description: Reference to a Configuration in kafka to populate + arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a Configuration in kafka to populate + arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + revision: + description: Revision of the MSK Configuration to use in the + cluster. + type: number + type: object + encryptionInfo: + description: Configuration block for specifying encryption. See + below. + properties: + encryptionAtRestKmsKeyArn: + description: The ARN of the KMS key used for encryption at + rest of the broker data volumes. + type: string + encryptionAtRestKmsKeyArnRef: + description: Reference to a Key in kms to populate encryptionAtRestKmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + encryptionAtRestKmsKeyArnSelector: + description: Selector for a Key in kms to populate encryptionAtRestKmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + encryptionInTransit: + description: Configuration block to specify encryption in + transit. See below. + properties: + clientBroker: + description: 'Encryption setting for data in transit between + clients and brokers. Valid values: TLS, TLS_PLAINTEXT, + and PLAINTEXT. Default value is TLS.' + type: string + inCluster: + description: 'Whether data communication among broker + nodes is encrypted. Default value: true.' + type: boolean + type: object + type: object + enhancedMonitoring: + description: Specify the desired enhanced MSK CloudWatch monitoring + level. See Monitoring Amazon MSK with Amazon CloudWatch + type: string + kafkaVersion: + description: Specify the desired Kafka software version. + type: string + loggingInfo: + description: Configuration block for streaming broker logs to + Cloudwatch/S3/Kinesis Firehose. See below. + properties: + brokerLogs: + description: Configuration block for Broker Logs settings + for logging info. See below. + properties: + cloudwatchLogs: + properties: + enabled: + description: 'Controls whether provisioned throughput + is enabled or not. Default value: false.' + type: boolean + logGroup: + description: Name of the Cloudwatch Log Group to deliver + logs to. + type: string + logGroupRef: + description: Reference to a Group in cloudwatchlogs + to populate logGroup. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logGroupSelector: + description: Selector for a Group in cloudwatchlogs + to populate logGroup. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + firehose: + properties: + deliveryStream: + description: Name of the Kinesis Data Firehose delivery + stream to deliver logs to. + type: string + deliveryStreamRef: + description: Reference to a DeliveryStream in firehose + to populate deliveryStream. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + deliveryStreamSelector: + description: Selector for a DeliveryStream in firehose + to populate deliveryStream. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: 'Controls whether provisioned throughput + is enabled or not. Default value: false.' + type: boolean + type: object + s3: + properties: + bucket: + description: Name of the S3 bucket to deliver logs + to. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate + bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate + bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: 'Controls whether provisioned throughput + is enabled or not. Default value: false.' + type: boolean + prefix: + description: Prefix to append to the folder name. + type: string + type: object + type: object + type: object + numberOfBrokerNodes: + description: The desired total number of broker nodes in the kafka + cluster. It must be a multiple of the number of specified client + subnets. + type: number + openMonitoring: + description: Configuration block for JMX and Node monitoring for + the MSK cluster. See below. + properties: + prometheus: + description: Configuration block for Prometheus settings for + open monitoring. See below. + properties: + jmxExporter: + description: Configuration block for JMX Exporter. See + below. + properties: + enabledInBroker: + description: Indicates whether you want to enable + or disable the JMX Exporter. + type: boolean + type: object + nodeExporter: + description: Configuration block for Node Exporter. See + below. + properties: + enabledInBroker: + description: Indicates whether you want to enable + or disable the JMX Exporter. + type: boolean + type: object + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + storageMode: + description: 'Controls storage mode for supported storage tiers. + Valid values are: LOCAL or TIERED.' + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + brokerNodeGroupInfo: + description: Configuration block for the broker nodes of the Kafka + cluster. + properties: + azDistribution: + description: The distribution of broker nodes across availability + zones (documentation). Currently the only valid value is + DEFAULT. + type: string + clientSubnets: + description: A list of subnets to connect to in client VPC + (documentation). + items: + type: string + type: array + x-kubernetes-list-type: set + clientSubnetsRefs: + description: References to Subnet in ec2 to populate clientSubnets. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + clientSubnetsSelector: + description: Selector for a list of Subnet in ec2 to populate + clientSubnets. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + connectivityInfo: + description: Information about the cluster access configuration. + See below. For security reasons, you can't turn on public + access while creating an MSK cluster. However, you can update + an existing cluster to make it publicly accessible. You + can also create a new cluster and then update it to make + it publicly accessible (documentation). + properties: + publicAccess: + description: Access control settings for brokers. See + below. + properties: + type: + description: 'Public access type. Valid values: DISABLED, + SERVICE_PROVIDED_EIPS.' + type: string + type: object + vpcConnectivity: + description: VPC connectivity access control for brokers. + See below. + properties: + clientAuthentication: + description: Configuration block for specifying a + client authentication. See below. + properties: + sasl: + description: SASL authentication type details + for VPC connectivity. See below. + properties: + iam: + description: Enables SASL/IAM authentication + for VPC connectivity. + type: boolean + scram: + description: Enables SASL/SCRAM authentication + for VPC connectivity. + type: boolean + type: object + tls: + description: Enables TLS authentication for VPC + connectivity. + type: boolean + type: object + type: object + type: object + instanceType: + description: Specify the instance type to use for the kafka + brokersE.g., kafka.m5.large. (Pricing info) + type: string + securityGroups: + description: A list of the security groups to associate with + the elastic network interfaces to control who can communicate + with the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupsRefs: + description: References to SecurityGroup in ec2 to populate + securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupsSelector: + description: Selector for a list of SecurityGroup in ec2 to + populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageInfo: + description: A block that contains information about storage + volumes attached to MSK broker nodes. See below. + properties: + ebsStorageInfo: + description: A block that contains EBS volume information. + See below. + properties: + provisionedThroughput: + description: A block that contains EBS volume provisioned + throughput information. To provision storage throughput, + you must choose broker type kafka.m5.4xlarge or + larger. See below. + properties: + enabled: + description: 'Controls whether provisioned throughput + is enabled or not. Default value: false.' + type: boolean + volumeThroughput: + description: Throughput value of the EBS volumes + for the data drive on each kafka broker node + in MiB per second. The minimum value is 250. + The maximum value varies between broker type. + You can refer to the valid values for the maximum + volume throughput at the following documentation + on throughput bottlenecks + type: number + type: object + volumeSize: + description: The size in GiB of the EBS volume for + the data drive on each broker node. Minimum value + of 1 and maximum value of 16384. + type: number + type: object + type: object + type: object + clientAuthentication: + description: Configuration block for specifying a client authentication. + See below. + properties: + sasl: + description: SASL authentication type details for VPC connectivity. + See below. + properties: + iam: + description: Enables SASL/IAM authentication for VPC connectivity. + type: boolean + scram: + description: Enables SASL/SCRAM authentication for VPC + connectivity. + type: boolean + type: object + tls: + description: Enables TLS authentication for VPC connectivity. + properties: + certificateAuthorityArns: + description: List of ACM Certificate Authority Amazon + Resource Names (ARNs). + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + unauthenticated: + description: Enables unauthenticated access. + type: boolean + type: object + clusterName: + description: Name of the MSK cluster. + type: string + configurationInfo: + description: Configuration block for specifying a MSK Configuration + to attach to Kafka brokers. See below. + properties: + arn: + description: Amazon Resource Name (ARN) of the MSK Configuration + to use in the cluster. + type: string + arnRef: + description: Reference to a Configuration in kafka to populate + arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a Configuration in kafka to populate + arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + revision: + description: Revision of the MSK Configuration to use in the + cluster. + type: number + type: object + encryptionInfo: + description: Configuration block for specifying encryption. See + below. + properties: + encryptionAtRestKmsKeyArn: + description: The ARN of the KMS key used for encryption at + rest of the broker data volumes. + type: string + encryptionAtRestKmsKeyArnRef: + description: Reference to a Key in kms to populate encryptionAtRestKmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + encryptionAtRestKmsKeyArnSelector: + description: Selector for a Key in kms to populate encryptionAtRestKmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + encryptionInTransit: + description: Configuration block to specify encryption in + transit. See below. + properties: + clientBroker: + description: 'Encryption setting for data in transit between + clients and brokers. Valid values: TLS, TLS_PLAINTEXT, + and PLAINTEXT. Default value is TLS.' + type: string + inCluster: + description: 'Whether data communication among broker + nodes is encrypted. Default value: true.' + type: boolean + type: object + type: object + enhancedMonitoring: + description: Specify the desired enhanced MSK CloudWatch monitoring + level. See Monitoring Amazon MSK with Amazon CloudWatch + type: string + kafkaVersion: + description: Specify the desired Kafka software version. + type: string + loggingInfo: + description: Configuration block for streaming broker logs to + Cloudwatch/S3/Kinesis Firehose. See below. + properties: + brokerLogs: + description: Configuration block for Broker Logs settings + for logging info. See below. + properties: + cloudwatchLogs: + properties: + enabled: + description: 'Controls whether provisioned throughput + is enabled or not. Default value: false.' + type: boolean + logGroup: + description: Name of the Cloudwatch Log Group to deliver + logs to. + type: string + logGroupRef: + description: Reference to a Group in cloudwatchlogs + to populate logGroup. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logGroupSelector: + description: Selector for a Group in cloudwatchlogs + to populate logGroup. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + firehose: + properties: + deliveryStream: + description: Name of the Kinesis Data Firehose delivery + stream to deliver logs to. + type: string + deliveryStreamRef: + description: Reference to a DeliveryStream in firehose + to populate deliveryStream. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + deliveryStreamSelector: + description: Selector for a DeliveryStream in firehose + to populate deliveryStream. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: 'Controls whether provisioned throughput + is enabled or not. Default value: false.' + type: boolean + type: object + s3: + properties: + bucket: + description: Name of the S3 bucket to deliver logs + to. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate + bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate + bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: 'Controls whether provisioned throughput + is enabled or not. Default value: false.' + type: boolean + prefix: + description: Prefix to append to the folder name. + type: string + type: object + type: object + type: object + numberOfBrokerNodes: + description: The desired total number of broker nodes in the kafka + cluster. It must be a multiple of the number of specified client + subnets. + type: number + openMonitoring: + description: Configuration block for JMX and Node monitoring for + the MSK cluster. See below. + properties: + prometheus: + description: Configuration block for Prometheus settings for + open monitoring. See below. + properties: + jmxExporter: + description: Configuration block for JMX Exporter. See + below. + properties: + enabledInBroker: + description: Indicates whether you want to enable + or disable the JMX Exporter. + type: boolean + type: object + nodeExporter: + description: Configuration block for Node Exporter. See + below. + properties: + enabledInBroker: + description: Indicates whether you want to enable + or disable the JMX Exporter. + type: boolean + type: object + type: object + type: object + storageMode: + description: 'Controls storage mode for supported storage tiers. + Valid values are: LOCAL or TIERED.' + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.brokerNodeGroupInfo is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.brokerNodeGroupInfo) + || (has(self.initProvider) && has(self.initProvider.brokerNodeGroupInfo))' + - message: spec.forProvider.clusterName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterName) + || (has(self.initProvider) && has(self.initProvider.clusterName))' + - message: spec.forProvider.kafkaVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.kafkaVersion) + || (has(self.initProvider) && has(self.initProvider.kafkaVersion))' + - message: spec.forProvider.numberOfBrokerNodes is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.numberOfBrokerNodes) + || (has(self.initProvider) && has(self.initProvider.numberOfBrokerNodes))' + status: + description: ClusterStatus defines the observed state of Cluster. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of the MSK Configuration + to use in the cluster. + type: string + bootstrapBrokers: + description: Comma separated list of one or more hostname:port + pairs of kafka brokers suitable to bootstrap connectivity to + the kafka cluster. Contains a value if encryption_info.0.encryption_in_transit.0.client_broker + is set to PLAINTEXT or TLS_PLAINTEXT. The resource sorts values + alphabetically. AWS may not always return all endpoints so this + value is not guaranteed to be stable across applies. + type: string + bootstrapBrokersPublicSaslIam: + description: One or more DNS names (or IP addresses) and SASL + IAM port pairs. For example, b-1-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9198,b-2-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9198,b-3-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9198. + This attribute will have a value if encryption_info.0.encryption_in_transit.0.client_broker + is set to TLS_PLAINTEXT or TLS and client_authentication.0.sasl.0.iam + is set to true and broker_node_group_info.0.connectivity_info.0.public_access.0.type + is set to SERVICE_PROVIDED_EIPS and the cluster fulfill all + other requirements for public access. The resource sorts the + list alphabetically. AWS may not always return all endpoints + so the values may not be stable across applies. + type: string + bootstrapBrokersPublicSaslScram: + description: One or more DNS names (or IP addresses) and SASL + SCRAM port pairs. For example, b-1-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9196,b-2-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9196,b-3-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9196. + This attribute will have a value if encryption_info.0.encryption_in_transit.0.client_broker + is set to TLS_PLAINTEXT or TLS and client_authentication.0.sasl.0.scram + is set to true and broker_node_group_info.0.connectivity_info.0.public_access.0.type + is set to SERVICE_PROVIDED_EIPS and the cluster fulfill all + other requirements for public access. The resource sorts the + list alphabetically. AWS may not always return all endpoints + so the values may not be stable across applies. + type: string + bootstrapBrokersPublicTls: + description: One or more DNS names (or IP addresses) and TLS port + pairs. For example, b-1-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9194,b-2-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9194,b-3-public.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9194. + This attribute will have a value if encryption_info.0.encryption_in_transit.0.client_broker + is set to TLS_PLAINTEXT or TLS and broker_node_group_info.0.connectivity_info.0.public_access.0.type + is set to SERVICE_PROVIDED_EIPS and the cluster fulfill all + other requirements for public access. The resource sorts the + list alphabetically. AWS may not always return all endpoints + so the values may not be stable across applies. + type: string + bootstrapBrokersSaslIam: + description: One or more DNS names (or IP addresses) and SASL + IAM port pairs. For example, b-1.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9098,b-2.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9098,b-3.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9098. + This attribute will have a value if encryption_info.0.encryption_in_transit.0.client_broker + is set to TLS_PLAINTEXT or TLS and client_authentication.0.sasl.0.iam + is set to true. The resource sorts the list alphabetically. + AWS may not always return all endpoints so the values may not + be stable across applies. + type: string + bootstrapBrokersSaslScram: + description: One or more DNS names (or IP addresses) and SASL + SCRAM port pairs. For example, b-1.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9096,b-2.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9096,b-3.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9096. + This attribute will have a value if encryption_info.0.encryption_in_transit.0.client_broker + is set to TLS_PLAINTEXT or TLS and client_authentication.0.sasl.0.scram + is set to true. The resource sorts the list alphabetically. + AWS may not always return all endpoints so the values may not + be stable across applies. + type: string + bootstrapBrokersTls: + description: One or more DNS names (or IP addresses) and TLS port + pairs. For example, b-1.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094,b-2.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094,b-3.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094. + This attribute will have a value if encryption_info.0.encryption_in_transit.0.client_broker + is set to TLS_PLAINTEXT or TLS. The resource sorts the list + alphabetically. AWS may not always return all endpoints so the + values may not be stable across applies. + type: string + bootstrapBrokersVpcConnectivitySaslIam: + description: A string containing one or more DNS names (or IP + addresses) and SASL IAM port pairs for VPC connectivity. AWS + may not always return all endpoints so the values may not be + stable across applies. + type: string + bootstrapBrokersVpcConnectivitySaslScram: + description: A string containing one or more DNS names (or IP + addresses) and SASL SCRAM port pairs for VPC connectivity. AWS + may not always return all endpoints so the values may not be + stable across applies. + type: string + bootstrapBrokersVpcConnectivityTls: + description: A string containing one or more DNS names (or IP + addresses) and TLS port pairs for VPC connectivity. AWS may + not always return all endpoints so the values may not be stable + across applies. + type: string + brokerNodeGroupInfo: + description: Configuration block for the broker nodes of the Kafka + cluster. + properties: + azDistribution: + description: The distribution of broker nodes across availability + zones (documentation). Currently the only valid value is + DEFAULT. + type: string + clientSubnets: + description: A list of subnets to connect to in client VPC + (documentation). + items: + type: string + type: array + x-kubernetes-list-type: set + connectivityInfo: + description: Information about the cluster access configuration. + See below. For security reasons, you can't turn on public + access while creating an MSK cluster. However, you can update + an existing cluster to make it publicly accessible. You + can also create a new cluster and then update it to make + it publicly accessible (documentation). + properties: + publicAccess: + description: Access control settings for brokers. See + below. + properties: + type: + description: 'Public access type. Valid values: DISABLED, + SERVICE_PROVIDED_EIPS.' + type: string + type: object + vpcConnectivity: + description: VPC connectivity access control for brokers. + See below. + properties: + clientAuthentication: + description: Configuration block for specifying a + client authentication. See below. + properties: + sasl: + description: SASL authentication type details + for VPC connectivity. See below. + properties: + iam: + description: Enables SASL/IAM authentication + for VPC connectivity. + type: boolean + scram: + description: Enables SASL/SCRAM authentication + for VPC connectivity. + type: boolean + type: object + tls: + description: Enables TLS authentication for VPC + connectivity. + type: boolean + type: object + type: object + type: object + instanceType: + description: Specify the instance type to use for the kafka + brokersE.g., kafka.m5.large. (Pricing info) + type: string + securityGroups: + description: A list of the security groups to associate with + the elastic network interfaces to control who can communicate + with the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + storageInfo: + description: A block that contains information about storage + volumes attached to MSK broker nodes. See below. + properties: + ebsStorageInfo: + description: A block that contains EBS volume information. + See below. + properties: + provisionedThroughput: + description: A block that contains EBS volume provisioned + throughput information. To provision storage throughput, + you must choose broker type kafka.m5.4xlarge or + larger. See below. + properties: + enabled: + description: 'Controls whether provisioned throughput + is enabled or not. Default value: false.' + type: boolean + volumeThroughput: + description: Throughput value of the EBS volumes + for the data drive on each kafka broker node + in MiB per second. The minimum value is 250. + The maximum value varies between broker type. + You can refer to the valid values for the maximum + volume throughput at the following documentation + on throughput bottlenecks + type: number + type: object + volumeSize: + description: The size in GiB of the EBS volume for + the data drive on each broker node. Minimum value + of 1 and maximum value of 16384. + type: number + type: object + type: object + type: object + clientAuthentication: + description: Configuration block for specifying a client authentication. + See below. + properties: + sasl: + description: SASL authentication type details for VPC connectivity. + See below. + properties: + iam: + description: Enables SASL/IAM authentication for VPC connectivity. + type: boolean + scram: + description: Enables SASL/SCRAM authentication for VPC + connectivity. + type: boolean + type: object + tls: + description: Enables TLS authentication for VPC connectivity. + properties: + certificateAuthorityArns: + description: List of ACM Certificate Authority Amazon + Resource Names (ARNs). + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + unauthenticated: + description: Enables unauthenticated access. + type: boolean + type: object + clusterName: + description: Name of the MSK cluster. + type: string + clusterUuid: + description: UUID of the MSK cluster, for use in IAM policies. + type: string + configurationInfo: + description: Configuration block for specifying a MSK Configuration + to attach to Kafka brokers. See below. + properties: + arn: + description: Amazon Resource Name (ARN) of the MSK Configuration + to use in the cluster. + type: string + revision: + description: Revision of the MSK Configuration to use in the + cluster. + type: number + type: object + currentVersion: + description: Current version of the MSK Cluster used for updates, + e.g., K13V1IB3VIYZZH + type: string + encryptionInfo: + description: Configuration block for specifying encryption. See + below. + properties: + encryptionAtRestKmsKeyArn: + description: The ARN of the KMS key used for encryption at + rest of the broker data volumes. + type: string + encryptionInTransit: + description: Configuration block to specify encryption in + transit. See below. + properties: + clientBroker: + description: 'Encryption setting for data in transit between + clients and brokers. Valid values: TLS, TLS_PLAINTEXT, + and PLAINTEXT. Default value is TLS.' + type: string + inCluster: + description: 'Whether data communication among broker + nodes is encrypted. Default value: true.' + type: boolean + type: object + type: object + enhancedMonitoring: + description: Specify the desired enhanced MSK CloudWatch monitoring + level. See Monitoring Amazon MSK with Amazon CloudWatch + type: string + id: + type: string + kafkaVersion: + description: Specify the desired Kafka software version. + type: string + loggingInfo: + description: Configuration block for streaming broker logs to + Cloudwatch/S3/Kinesis Firehose. See below. + properties: + brokerLogs: + description: Configuration block for Broker Logs settings + for logging info. See below. + properties: + cloudwatchLogs: + properties: + enabled: + description: 'Controls whether provisioned throughput + is enabled or not. Default value: false.' + type: boolean + logGroup: + description: Name of the Cloudwatch Log Group to deliver + logs to. + type: string + type: object + firehose: + properties: + deliveryStream: + description: Name of the Kinesis Data Firehose delivery + stream to deliver logs to. + type: string + enabled: + description: 'Controls whether provisioned throughput + is enabled or not. Default value: false.' + type: boolean + type: object + s3: + properties: + bucket: + description: Name of the S3 bucket to deliver logs + to. + type: string + enabled: + description: 'Controls whether provisioned throughput + is enabled or not. Default value: false.' + type: boolean + prefix: + description: Prefix to append to the folder name. + type: string + type: object + type: object + type: object + numberOfBrokerNodes: + description: The desired total number of broker nodes in the kafka + cluster. It must be a multiple of the number of specified client + subnets. + type: number + openMonitoring: + description: Configuration block for JMX and Node monitoring for + the MSK cluster. See below. + properties: + prometheus: + description: Configuration block for Prometheus settings for + open monitoring. See below. + properties: + jmxExporter: + description: Configuration block for JMX Exporter. See + below. + properties: + enabledInBroker: + description: Indicates whether you want to enable + or disable the JMX Exporter. + type: boolean + type: object + nodeExporter: + description: Configuration block for Node Exporter. See + below. + properties: + enabledInBroker: + description: Indicates whether you want to enable + or disable the JMX Exporter. + type: boolean + type: object + type: object + type: object + storageMode: + description: 'Controls storage mode for supported storage tiers. + Valid values are: LOCAL or TIERED.' + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + zookeeperConnectString: + description: A comma separated list of one or more hostname:port + pairs to use to connect to the Apache Zookeeper cluster. The + returned values are sorted alphabetically. The AWS API may not + return all endpoints, so this value is not guaranteed to be + stable across applies. + type: string + zookeeperConnectStringTls: + description: A comma separated list of one or more hostname:port + pairs to use to connect to the Apache Zookeeper cluster via + TLS. The returned values are sorted alphabetically. The AWS + API may not return all endpoints, so this value is not guaranteed + to be stable across applies. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/kafka.aws.upbound.io_serverlessclusters.yaml b/package/crds/kafka.aws.upbound.io_serverlessclusters.yaml index 6ef0dd6ab7..663bd28163 100644 --- a/package/crds/kafka.aws.upbound.io_serverlessclusters.yaml +++ b/package/crds/kafka.aws.upbound.io_serverlessclusters.yaml @@ -832,3 +832,799 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ServerlessCluster is the Schema for the ServerlessClusters API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServerlessClusterSpec defines the desired state of ServerlessCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clientAuthentication: + description: Specifies client authentication information for the + serverless cluster. See below. + properties: + sasl: + description: Details for client authentication using SASL. + See below. + properties: + iam: + description: Details for client authentication using IAM. + See below. + properties: + enabled: + description: Whether SASL/IAM authentication is enabled + or not. + type: boolean + type: object + type: object + type: object + clusterName: + description: The name of the serverless cluster. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcConfig: + description: VPC configuration information. See below. + items: + properties: + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate + securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 + to populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: Specifies up to five security groups that control + inbound and outbound traffic for the serverless cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: A list of subnets in at least two different + Availability Zones that host your client applications. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clientAuthentication: + description: Specifies client authentication information for the + serverless cluster. See below. + properties: + sasl: + description: Details for client authentication using SASL. + See below. + properties: + iam: + description: Details for client authentication using IAM. + See below. + properties: + enabled: + description: Whether SASL/IAM authentication is enabled + or not. + type: boolean + type: object + type: object + type: object + clusterName: + description: The name of the serverless cluster. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcConfig: + description: VPC configuration information. See below. + items: + properties: + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate + securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 + to populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: Specifies up to five security groups that control + inbound and outbound traffic for the serverless cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: A list of subnets in at least two different + Availability Zones that host your client applications. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clientAuthentication is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clientAuthentication) + || (has(self.initProvider) && has(self.initProvider.clientAuthentication))' + - message: spec.forProvider.clusterName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterName) + || (has(self.initProvider) && has(self.initProvider.clusterName))' + - message: spec.forProvider.vpcConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.vpcConfig) + || (has(self.initProvider) && has(self.initProvider.vpcConfig))' + status: + description: ServerlessClusterStatus defines the observed state of ServerlessCluster. + properties: + atProvider: + properties: + arn: + description: The ARN of the serverless cluster. + type: string + clientAuthentication: + description: Specifies client authentication information for the + serverless cluster. See below. + properties: + sasl: + description: Details for client authentication using SASL. + See below. + properties: + iam: + description: Details for client authentication using IAM. + See below. + properties: + enabled: + description: Whether SASL/IAM authentication is enabled + or not. + type: boolean + type: object + type: object + type: object + clusterName: + description: The name of the serverless cluster. + type: string + clusterUuid: + description: UUID of the serverless cluster, for use in IAM policies. + type: string + id: + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + vpcConfig: + description: VPC configuration information. See below. + items: + properties: + securityGroupIds: + description: Specifies up to five security groups that control + inbound and outbound traffic for the serverless cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of subnets in at least two different + Availability Zones that host your client applications. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/kafkaconnect.aws.upbound.io_connectors.yaml b/package/crds/kafkaconnect.aws.upbound.io_connectors.yaml index ee3b5562ec..109c3eadd2 100644 --- a/package/crds/kafkaconnect.aws.upbound.io_connectors.yaml +++ b/package/crds/kafkaconnect.aws.upbound.io_connectors.yaml @@ -2343,3 +2343,2220 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Connector is the Schema for the Connectors API. Provides an Amazon + MSK Connect Connector resource. Changes to any parameter besides "scaling" + will be rejected. Instead you must create a new resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ConnectorSpec defines the desired state of Connector + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + capacity: + description: Information about the capacity allocated to the connector. + See below. + properties: + autoscaling: + description: Information about the auto scaling parameters + for the connector. See below. + properties: + maxWorkerCount: + description: The maximum number of workers allocated to + the connector. + type: number + mcuCount: + description: 'The number of microcontroller units (MCUs) + allocated to each connector worker. Valid values: 1, + 2, 4, 8. The default value is 1.' + type: number + minWorkerCount: + description: The minimum number of workers allocated to + the connector. + type: number + scaleInPolicy: + description: The scale-in policy for the connector. See + below. + properties: + cpuUtilizationPercentage: + description: The CPU utilization percentage threshold + at which you want connector scale out to be triggered. + type: number + type: object + scaleOutPolicy: + description: The scale-out policy for the connector. See + below. + properties: + cpuUtilizationPercentage: + description: The CPU utilization percentage threshold + at which you want connector scale out to be triggered. + type: number + type: object + type: object + provisionedCapacity: + description: Details about a fixed capacity allocated to a + connector. See below. + properties: + mcuCount: + description: 'The number of microcontroller units (MCUs) + allocated to each connector worker. Valid values: 1, + 2, 4, 8. The default value is 1.' + type: number + workerCount: + description: The number of workers that are allocated + to the connector. + type: number + type: object + type: object + connectorConfiguration: + additionalProperties: + type: string + description: A map of keys to values that represent the configuration + for the connector. + type: object + x-kubernetes-map-type: granular + description: + description: A summary description of the connector. + type: string + kafkaCluster: + description: Specifies which Apache Kafka cluster to connect to. + See below. + properties: + apacheKafkaCluster: + description: The Apache Kafka cluster to which the connector + is connected. + properties: + bootstrapServers: + description: The bootstrap servers of the cluster. + type: string + vpc: + description: Details of an Amazon VPC which has network + connectivity to the Apache Kafka cluster. + properties: + securityGroupRefs: + description: References to SecurityGroup in ec2 to + populate securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupSelector: + description: Selector for a list of SecurityGroup + in ec2 to populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroups: + description: The security groups for the connector. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetRefs: + description: References to Subnet in ec2 to populate + subnets. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetSelector: + description: Selector for a list of Subnet in ec2 + to populate subnets. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnets: + description: The subnets for the connector. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: object + kafkaClusterClientAuthentication: + description: Details of the client authentication used by the + Apache Kafka cluster. See below. + properties: + authenticationType: + description: 'The type of client authentication used to connect + to the Apache Kafka cluster. Valid values: IAM, NONE. A + value of NONE means that no client authentication is used. + The default value is NONE.' + type: string + type: object + kafkaClusterEncryptionInTransit: + description: Details of encryption in transit to the Apache Kafka + cluster. See below. + properties: + encryptionType: + description: 'The type of encryption in transit to the Apache + Kafka cluster. Valid values: PLAINTEXT, TLS. The default + values is PLAINTEXT.' + type: string + type: object + kafkaconnectVersion: + description: The version of Kafka Connect. It has to be compatible + with both the Apache Kafka cluster's version and the plugins. + type: string + logDelivery: + description: Details about log delivery. See below. + properties: + workerLogDelivery: + description: The workers can send worker logs to different + destination types. This configuration specifies the details + of these destinations. See below. + properties: + cloudwatchLogs: + description: Details about delivering logs to Amazon CloudWatch + Logs. See below. + properties: + enabled: + description: Specifies whether connector logs get + sent to the specified Amazon S3 destination. + type: boolean + logGroup: + description: The name of the CloudWatch log group + that is the destination for log delivery. + type: string + logGroupRef: + description: Reference to a Group in cloudwatchlogs + to populate logGroup. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logGroupSelector: + description: Selector for a Group in cloudwatchlogs + to populate logGroup. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + firehose: + description: Details about delivering logs to Amazon Kinesis + Data Firehose. See below. + properties: + deliveryStream: + description: The name of the Kinesis Data Firehose + delivery stream that is the destination for log + delivery. + type: string + deliveryStreamRef: + description: Reference to a DeliveryStream in firehose + to populate deliveryStream. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + deliveryStreamSelector: + description: Selector for a DeliveryStream in firehose + to populate deliveryStream. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: Specifies whether connector logs get + sent to the specified Amazon S3 destination. + type: boolean + type: object + s3: + description: Details about delivering logs to Amazon S3. + See below. + properties: + bucket: + description: The name of the S3 bucket that is the + destination for log delivery. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate + bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate + bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: Specifies whether connector logs get + sent to the specified Amazon S3 destination. + type: boolean + prefix: + description: The S3 prefix that is the destination + for log delivery. + type: string + type: object + type: object + type: object + name: + description: The name of the connector. + type: string + plugin: + description: Specifies which plugins to use for the connector. + See below. + items: + properties: + customPlugin: + description: Details about a custom plugin. See below. + properties: + arn: + description: The Amazon Resource Name (ARN) of the worker + configuration. + type: string + arnRef: + description: Reference to a CustomPlugin in kafkaconnect + to populate arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a CustomPlugin in kafkaconnect + to populate arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + revision: + description: The revision of the worker configuration. + type: number + type: object + type: object + type: array + region: + description: Region is the region you'd like your resource to + be created in. + type: string + serviceExecutionRoleArn: + description: The Amazon Resource Name (ARN) of the IAM role used + by the connector to access the Amazon Web Services resources + that it needs. The types of resources depends on the logic of + the connector. For example, a connector that has Amazon S3 as + a destination must have permissions that allow it to write to + the S3 destination bucket. + type: string + serviceExecutionRoleArnRef: + description: Reference to a Role in iam to populate serviceExecutionRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceExecutionRoleArnSelector: + description: Selector for a Role in iam to populate serviceExecutionRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + workerConfiguration: + description: Specifies which worker configuration to use with + the connector. See below. + properties: + arn: + description: The Amazon Resource Name (ARN) of the worker + configuration. + type: string + arnRef: + description: Reference to a WorkerConfiguration in kafkaconnect + to populate arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a WorkerConfiguration in kafkaconnect + to populate arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + revision: + description: The revision of the worker configuration. + type: number + type: object + required: + - name + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + capacity: + description: Information about the capacity allocated to the connector. + See below. + properties: + autoscaling: + description: Information about the auto scaling parameters + for the connector. See below. + properties: + maxWorkerCount: + description: The maximum number of workers allocated to + the connector. + type: number + mcuCount: + description: 'The number of microcontroller units (MCUs) + allocated to each connector worker. Valid values: 1, + 2, 4, 8. The default value is 1.' + type: number + minWorkerCount: + description: The minimum number of workers allocated to + the connector. + type: number + scaleInPolicy: + description: The scale-in policy for the connector. See + below. + properties: + cpuUtilizationPercentage: + description: The CPU utilization percentage threshold + at which you want connector scale out to be triggered. + type: number + type: object + scaleOutPolicy: + description: The scale-out policy for the connector. See + below. + properties: + cpuUtilizationPercentage: + description: The CPU utilization percentage threshold + at which you want connector scale out to be triggered. + type: number + type: object + type: object + provisionedCapacity: + description: Details about a fixed capacity allocated to a + connector. See below. + properties: + mcuCount: + description: 'The number of microcontroller units (MCUs) + allocated to each connector worker. Valid values: 1, + 2, 4, 8. The default value is 1.' + type: number + workerCount: + description: The number of workers that are allocated + to the connector. + type: number + type: object + type: object + connectorConfiguration: + additionalProperties: + type: string + description: A map of keys to values that represent the configuration + for the connector. + type: object + x-kubernetes-map-type: granular + description: + description: A summary description of the connector. + type: string + kafkaCluster: + description: Specifies which Apache Kafka cluster to connect to. + See below. + properties: + apacheKafkaCluster: + description: The Apache Kafka cluster to which the connector + is connected. + properties: + bootstrapServers: + description: The bootstrap servers of the cluster. + type: string + vpc: + description: Details of an Amazon VPC which has network + connectivity to the Apache Kafka cluster. + properties: + securityGroupRefs: + description: References to SecurityGroup in ec2 to + populate securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupSelector: + description: Selector for a list of SecurityGroup + in ec2 to populate securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroups: + description: The security groups for the connector. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetRefs: + description: References to Subnet in ec2 to populate + subnets. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetSelector: + description: Selector for a list of Subnet in ec2 + to populate subnets. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnets: + description: The subnets for the connector. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: object + kafkaClusterClientAuthentication: + description: Details of the client authentication used by the + Apache Kafka cluster. See below. + properties: + authenticationType: + description: 'The type of client authentication used to connect + to the Apache Kafka cluster. Valid values: IAM, NONE. A + value of NONE means that no client authentication is used. + The default value is NONE.' + type: string + type: object + kafkaClusterEncryptionInTransit: + description: Details of encryption in transit to the Apache Kafka + cluster. See below. + properties: + encryptionType: + description: 'The type of encryption in transit to the Apache + Kafka cluster. Valid values: PLAINTEXT, TLS. The default + values is PLAINTEXT.' + type: string + type: object + kafkaconnectVersion: + description: The version of Kafka Connect. It has to be compatible + with both the Apache Kafka cluster's version and the plugins. + type: string + logDelivery: + description: Details about log delivery. See below. + properties: + workerLogDelivery: + description: The workers can send worker logs to different + destination types. This configuration specifies the details + of these destinations. See below. + properties: + cloudwatchLogs: + description: Details about delivering logs to Amazon CloudWatch + Logs. See below. + properties: + enabled: + description: Specifies whether connector logs get + sent to the specified Amazon S3 destination. + type: boolean + logGroup: + description: The name of the CloudWatch log group + that is the destination for log delivery. + type: string + logGroupRef: + description: Reference to a Group in cloudwatchlogs + to populate logGroup. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logGroupSelector: + description: Selector for a Group in cloudwatchlogs + to populate logGroup. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + firehose: + description: Details about delivering logs to Amazon Kinesis + Data Firehose. See below. + properties: + deliveryStream: + description: The name of the Kinesis Data Firehose + delivery stream that is the destination for log + delivery. + type: string + deliveryStreamRef: + description: Reference to a DeliveryStream in firehose + to populate deliveryStream. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + deliveryStreamSelector: + description: Selector for a DeliveryStream in firehose + to populate deliveryStream. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: Specifies whether connector logs get + sent to the specified Amazon S3 destination. + type: boolean + type: object + s3: + description: Details about delivering logs to Amazon S3. + See below. + properties: + bucket: + description: The name of the S3 bucket that is the + destination for log delivery. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate + bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate + bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: Specifies whether connector logs get + sent to the specified Amazon S3 destination. + type: boolean + prefix: + description: The S3 prefix that is the destination + for log delivery. + type: string + type: object + type: object + type: object + plugin: + description: Specifies which plugins to use for the connector. + See below. + items: + properties: + customPlugin: + description: Details about a custom plugin. See below. + properties: + arn: + description: The Amazon Resource Name (ARN) of the worker + configuration. + type: string + arnRef: + description: Reference to a CustomPlugin in kafkaconnect + to populate arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a CustomPlugin in kafkaconnect + to populate arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + revision: + description: The revision of the worker configuration. + type: number + type: object + type: object + type: array + serviceExecutionRoleArn: + description: The Amazon Resource Name (ARN) of the IAM role used + by the connector to access the Amazon Web Services resources + that it needs. The types of resources depends on the logic of + the connector. For example, a connector that has Amazon S3 as + a destination must have permissions that allow it to write to + the S3 destination bucket. + type: string + serviceExecutionRoleArnRef: + description: Reference to a Role in iam to populate serviceExecutionRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceExecutionRoleArnSelector: + description: Selector for a Role in iam to populate serviceExecutionRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + workerConfiguration: + description: Specifies which worker configuration to use with + the connector. See below. + properties: + arn: + description: The Amazon Resource Name (ARN) of the worker + configuration. + type: string + arnRef: + description: Reference to a WorkerConfiguration in kafkaconnect + to populate arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a WorkerConfiguration in kafkaconnect + to populate arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + revision: + description: The revision of the worker configuration. + type: number + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.capacity is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.capacity) + || (has(self.initProvider) && has(self.initProvider.capacity))' + - message: spec.forProvider.connectorConfiguration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.connectorConfiguration) + || (has(self.initProvider) && has(self.initProvider.connectorConfiguration))' + - message: spec.forProvider.kafkaCluster is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.kafkaCluster) + || (has(self.initProvider) && has(self.initProvider.kafkaCluster))' + - message: spec.forProvider.kafkaClusterClientAuthentication is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.kafkaClusterClientAuthentication) + || (has(self.initProvider) && has(self.initProvider.kafkaClusterClientAuthentication))' + - message: spec.forProvider.kafkaClusterEncryptionInTransit is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.kafkaClusterEncryptionInTransit) + || (has(self.initProvider) && has(self.initProvider.kafkaClusterEncryptionInTransit))' + - message: spec.forProvider.kafkaconnectVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.kafkaconnectVersion) + || (has(self.initProvider) && has(self.initProvider.kafkaconnectVersion))' + - message: spec.forProvider.plugin is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.plugin) + || (has(self.initProvider) && has(self.initProvider.plugin))' + status: + description: ConnectorStatus defines the observed state of Connector. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) of the custom plugin. + type: string + capacity: + description: Information about the capacity allocated to the connector. + See below. + properties: + autoscaling: + description: Information about the auto scaling parameters + for the connector. See below. + properties: + maxWorkerCount: + description: The maximum number of workers allocated to + the connector. + type: number + mcuCount: + description: 'The number of microcontroller units (MCUs) + allocated to each connector worker. Valid values: 1, + 2, 4, 8. The default value is 1.' + type: number + minWorkerCount: + description: The minimum number of workers allocated to + the connector. + type: number + scaleInPolicy: + description: The scale-in policy for the connector. See + below. + properties: + cpuUtilizationPercentage: + description: The CPU utilization percentage threshold + at which you want connector scale out to be triggered. + type: number + type: object + scaleOutPolicy: + description: The scale-out policy for the connector. See + below. + properties: + cpuUtilizationPercentage: + description: The CPU utilization percentage threshold + at which you want connector scale out to be triggered. + type: number + type: object + type: object + provisionedCapacity: + description: Details about a fixed capacity allocated to a + connector. See below. + properties: + mcuCount: + description: 'The number of microcontroller units (MCUs) + allocated to each connector worker. Valid values: 1, + 2, 4, 8. The default value is 1.' + type: number + workerCount: + description: The number of workers that are allocated + to the connector. + type: number + type: object + type: object + connectorConfiguration: + additionalProperties: + type: string + description: A map of keys to values that represent the configuration + for the connector. + type: object + x-kubernetes-map-type: granular + description: + description: A summary description of the connector. + type: string + id: + type: string + kafkaCluster: + description: Specifies which Apache Kafka cluster to connect to. + See below. + properties: + apacheKafkaCluster: + description: The Apache Kafka cluster to which the connector + is connected. + properties: + bootstrapServers: + description: The bootstrap servers of the cluster. + type: string + vpc: + description: Details of an Amazon VPC which has network + connectivity to the Apache Kafka cluster. + properties: + securityGroups: + description: The security groups for the connector. + items: + type: string + type: array + x-kubernetes-list-type: set + subnets: + description: The subnets for the connector. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: object + kafkaClusterClientAuthentication: + description: Details of the client authentication used by the + Apache Kafka cluster. See below. + properties: + authenticationType: + description: 'The type of client authentication used to connect + to the Apache Kafka cluster. Valid values: IAM, NONE. A + value of NONE means that no client authentication is used. + The default value is NONE.' + type: string + type: object + kafkaClusterEncryptionInTransit: + description: Details of encryption in transit to the Apache Kafka + cluster. See below. + properties: + encryptionType: + description: 'The type of encryption in transit to the Apache + Kafka cluster. Valid values: PLAINTEXT, TLS. The default + values is PLAINTEXT.' + type: string + type: object + kafkaconnectVersion: + description: The version of Kafka Connect. It has to be compatible + with both the Apache Kafka cluster's version and the plugins. + type: string + logDelivery: + description: Details about log delivery. See below. + properties: + workerLogDelivery: + description: The workers can send worker logs to different + destination types. This configuration specifies the details + of these destinations. See below. + properties: + cloudwatchLogs: + description: Details about delivering logs to Amazon CloudWatch + Logs. See below. + properties: + enabled: + description: Specifies whether connector logs get + sent to the specified Amazon S3 destination. + type: boolean + logGroup: + description: The name of the CloudWatch log group + that is the destination for log delivery. + type: string + type: object + firehose: + description: Details about delivering logs to Amazon Kinesis + Data Firehose. See below. + properties: + deliveryStream: + description: The name of the Kinesis Data Firehose + delivery stream that is the destination for log + delivery. + type: string + enabled: + description: Specifies whether connector logs get + sent to the specified Amazon S3 destination. + type: boolean + type: object + s3: + description: Details about delivering logs to Amazon S3. + See below. + properties: + bucket: + description: The name of the S3 bucket that is the + destination for log delivery. + type: string + enabled: + description: Specifies whether connector logs get + sent to the specified Amazon S3 destination. + type: boolean + prefix: + description: The S3 prefix that is the destination + for log delivery. + type: string + type: object + type: object + type: object + name: + description: The name of the connector. + type: string + plugin: + description: Specifies which plugins to use for the connector. + See below. + items: + properties: + customPlugin: + description: Details about a custom plugin. See below. + properties: + arn: + description: The Amazon Resource Name (ARN) of the worker + configuration. + type: string + revision: + description: The revision of the worker configuration. + type: number + type: object + type: object + type: array + serviceExecutionRoleArn: + description: The Amazon Resource Name (ARN) of the IAM role used + by the connector to access the Amazon Web Services resources + that it needs. The types of resources depends on the logic of + the connector. For example, a connector that has Amazon S3 as + a destination must have permissions that allow it to write to + the S3 destination bucket. + type: string + version: + description: The current version of the connector. + type: string + workerConfiguration: + description: Specifies which worker configuration to use with + the connector. See below. + properties: + arn: + description: The Amazon Resource Name (ARN) of the worker + configuration. + type: string + revision: + description: The revision of the worker configuration. + type: number + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/kafkaconnect.aws.upbound.io_customplugins.yaml b/package/crds/kafkaconnect.aws.upbound.io_customplugins.yaml index e28e366e29..d60216edcd 100644 --- a/package/crds/kafkaconnect.aws.upbound.io_customplugins.yaml +++ b/package/crds/kafkaconnect.aws.upbound.io_customplugins.yaml @@ -755,3 +755,717 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CustomPlugin is the Schema for the CustomPlugins API. Provides + an Amazon MSK Connect custom plugin resource. This resource can be Created, + Observed and Deleted, but not Updated. AWS does not currently provide update + APIs. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CustomPluginSpec defines the desired state of CustomPlugin + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + contentType: + description: The type of the plugin file. Allowed values are ZIP + and JAR. + type: string + description: + description: A summary description of the custom plugin. + type: string + location: + description: Information about the location of a custom plugin. + See below. + properties: + s3: + description: Information of the plugin file stored in Amazon + S3. See below. + properties: + bucketArn: + description: The Amazon Resource Name (ARN) of an S3 bucket. + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + fileKey: + description: The file key for an object in an S3 bucket. + type: string + fileKeyRef: + description: Reference to a Object in s3 to populate fileKey. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + fileKeySelector: + description: Selector for a Object in s3 to populate fileKey. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + objectVersion: + description: The version of an object in an S3 bucket. + type: string + type: object + type: object + name: + description: The name of the custom plugin.. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - name + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + contentType: + description: The type of the plugin file. Allowed values are ZIP + and JAR. + type: string + description: + description: A summary description of the custom plugin. + type: string + location: + description: Information about the location of a custom plugin. + See below. + properties: + s3: + description: Information of the plugin file stored in Amazon + S3. See below. + properties: + bucketArn: + description: The Amazon Resource Name (ARN) of an S3 bucket. + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + fileKey: + description: The file key for an object in an S3 bucket. + type: string + fileKeyRef: + description: Reference to a Object in s3 to populate fileKey. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + fileKeySelector: + description: Selector for a Object in s3 to populate fileKey. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + objectVersion: + description: The version of an object in an S3 bucket. + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.contentType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.contentType) + || (has(self.initProvider) && has(self.initProvider.contentType))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: CustomPluginStatus defines the observed state of CustomPlugin. + properties: + atProvider: + properties: + arn: + description: the Amazon Resource Name (ARN) of the custom plugin. + type: string + contentType: + description: The type of the plugin file. Allowed values are ZIP + and JAR. + type: string + description: + description: A summary description of the custom plugin. + type: string + id: + type: string + latestRevision: + description: an ID of the latest successfully created revision + of the custom plugin. + type: number + location: + description: Information about the location of a custom plugin. + See below. + properties: + s3: + description: Information of the plugin file stored in Amazon + S3. See below. + properties: + bucketArn: + description: The Amazon Resource Name (ARN) of an S3 bucket. + type: string + fileKey: + description: The file key for an object in an S3 bucket. + type: string + objectVersion: + description: The version of an object in an S3 bucket. + type: string + type: object + type: object + name: + description: The name of the custom plugin.. + type: string + state: + description: the state of the custom plugin. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/kendra.aws.upbound.io_datasources.yaml b/package/crds/kendra.aws.upbound.io_datasources.yaml index ee012bd6f2..7f36c0739f 100644 --- a/package/crds/kendra.aws.upbound.io_datasources.yaml +++ b/package/crds/kendra.aws.upbound.io_datasources.yaml @@ -3037,3 +3037,2809 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DataSource is the Schema for the DataSources API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DataSourceSpec defines the desired state of DataSource + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + configuration: + description: A block with the configuration information to connect + to your Data Source repository. You can't specify the configuration + block when the type parameter is set to CUSTOM. Detailed below. + properties: + s3Configuration: + description: A block that provides the configuration information + to connect to an Amazon S3 bucket as your data source. Detailed + below. + properties: + accessControlListConfiguration: + description: A block that provides the path to the S3 + bucket that contains the user context filtering files + for the data source. For the format of the file, see + Access control for S3 data sources. Detailed below. + properties: + keyPath: + description: Path to the AWS S3 bucket that contains + the ACL files. + type: string + type: object + bucketName: + description: The name of the bucket that contains the + documents. + type: string + bucketNameRef: + description: Reference to a Bucket in s3 to populate bucketName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketNameSelector: + description: Selector for a Bucket in s3 to populate bucketName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + documentsMetadataConfiguration: + description: A block that defines the Document metadata + files that contain information such as the document + access control information, source URI, document author, + and custom attributes. Each metadata file contains metadata + about a single document. Detailed below. + properties: + s3Prefix: + description: A prefix used to filter metadata configuration + files in the AWS S3 bucket. The S3 bucket might + contain multiple metadata files. Use s3_prefix to + include only the desired metadata files. + type: string + type: object + exclusionPatterns: + description: A list of glob patterns for documents that + should not be indexed. If a document that matches an + inclusion prefix or inclusion pattern also matches an + exclusion pattern, the document is not indexed. Refer + to Exclusion Patterns for more examples. + items: + type: string + type: array + x-kubernetes-list-type: set + inclusionPatterns: + description: A list of glob patterns for documents that + should be indexed. If a document that matches an inclusion + pattern also matches an exclusion pattern, the document + is not indexed. Refer to Inclusion Patterns for more + examples. + items: + type: string + type: array + x-kubernetes-list-type: set + inclusionPrefixes: + description: A list of S3 prefixes for the documents that + should be included in the index. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + webCrawlerConfiguration: + description: A block that provides the configuration information + required for Amazon Kendra Web Crawler. Detailed below. + properties: + authenticationConfiguration: + description: A block with the configuration information + required to connect to websites using authentication. + You can connect to websites using basic authentication + of user name and password. You use a secret in AWS Secrets + Manager to store your authentication credentials. You + must provide the website host name and port number. + For example, the host name of https://a.example.com/page1.html + is "a.example.com" and the port is 443, the standard + port for HTTPS. Detailed below. + properties: + basicAuthentication: + description: The list of configuration information + that's required to connect to and crawl a website + host using basic authentication credentials. The + list includes the name and port number of the website + host. Detailed below. + items: + properties: + credentials: + description: Your secret ARN, which you can + create in AWS Secrets Manager. You use a secret + if basic authentication credentials are required + to connect to a website. The secret stores + your credentials of user name and password. + type: string + credentialsRef: + description: Reference to a Secret in secretsmanager + to populate credentials. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + credentialsSelector: + description: Selector for a Secret in secretsmanager + to populate credentials. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + host: + description: The name of the website host you + want to connect to using authentication credentials. + For example, the host name of https://a.example.com/page1.html + is "a.example.com". + type: string + port: + description: The port number of the website + host you want to connect to using authentication + credentials. For example, the port for https://a.example.com/page1.html + is 443, the standard port for HTTPS. + type: number + type: object + type: array + type: object + crawlDepth: + description: Specifies the number of levels in a website + that you want to crawl. The first level begins from + the website seed or starting point URL. For example, + if a website has 3 levels – index level (i.e. seed in + this example), sections level, and subsections level + – and you are only interested in crawling information + up to the sections level (i.e. levels 0-1), you can + set your depth to 1. The default crawl depth is set + to 2. Minimum value of 0. Maximum value of 10. + type: number + maxContentSizePerPageInMegaBytes: + description: The maximum size (in MB) of a webpage or + attachment to crawl. Files larger than this size (in + MB) are skipped/not crawled. The default maximum size + of a webpage or attachment is set to 50 MB. Minimum + value of 1.0e-06. Maximum value of 50. + type: number + maxLinksPerPage: + description: The maximum number of URLs on a webpage to + include when crawling a website. This number is per + webpage. As a website’s webpages are crawled, any URLs + the webpages link to are also crawled. URLs on a webpage + are crawled in order of appearance. The default maximum + links per page is 100. Minimum value of 1. Maximum value + of 1000. + type: number + maxUrlsPerMinuteCrawlRate: + description: The maximum number of URLs crawled per website + host per minute. The default maximum number of URLs + crawled per website host per minute is 300. Minimum + value of 1. Maximum value of 300. + type: number + proxyConfiguration: + description: Configuration information required to connect + to your internal websites via a web proxy. You must + provide the website host name and port number. For example, + the host name of https://a.example.com/page1.html is + "a.example.com" and the port is 443, the standard port + for HTTPS. Web proxy credentials are optional and you + can use them to connect to a web proxy server that requires + basic authentication. To store web proxy credentials, + you use a secret in AWS Secrets Manager. Detailed below. + properties: + credentials: + description: Your secret ARN, which you can create + in AWS Secrets Manager. You use a secret if basic + authentication credentials are required to connect + to a website. The secret stores your credentials + of user name and password. + type: string + credentialsRef: + description: Reference to a Secret in secretsmanager + to populate credentials. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + credentialsSelector: + description: Selector for a Secret in secretsmanager + to populate credentials. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + host: + description: The name of the website host you want + to connect to using authentication credentials. + For example, the host name of https://a.example.com/page1.html + is "a.example.com". + type: string + port: + description: The port number of the website host you + want to connect to using authentication credentials. + For example, the port for https://a.example.com/page1.html + is 443, the standard port for HTTPS. + type: number + type: object + urlExclusionPatterns: + description: 'A list of regular expression patterns to + exclude certain URLs to crawl. URLs that match the patterns + are excluded from the index. URLs that don''t match + the patterns are included in the index. If a URL matches + both an inclusion and exclusion pattern, the exclusion + pattern takes precedence and the URL file isn''t included + in the index. Array Members: Minimum number of 0 items. + Maximum number of 100 items. Length Constraints: Minimum + length of 1. Maximum length of 150.' + items: + type: string + type: array + x-kubernetes-list-type: set + urlInclusionPatterns: + description: 'A list of regular expression patterns to + include certain URLs to crawl. URLs that match the patterns + are included in the index. URLs that don''t match the + patterns are excluded from the index. If a URL matches + both an inclusion and exclusion pattern, the exclusion + pattern takes precedence and the URL file isn''t included + in the index. Array Members: Minimum number of 0 items. + Maximum number of 100 items. Length Constraints: Minimum + length of 1. Maximum length of 150.' + items: + type: string + type: array + x-kubernetes-list-type: set + urls: + description: A block that specifies the seed or starting + point URLs of the websites or the sitemap URLs of the + websites you want to crawl. You can include website + subdomains. You can list up to 100 seed URLs and up + to 3 sitemap URLs. You can only crawl websites that + use the secure communication protocol, Hypertext Transfer + Protocol Secure (HTTPS). If you receive an error when + crawling a website, it could be that the website is + blocked from crawling. When selecting websites to index, + you must adhere to the Amazon Acceptable Use Policy + and all other Amazon terms. Remember that you must only + use Amazon Kendra Web Crawler to index your own webpages, + or webpages that you have authorization to index. Detailed + below. + properties: + seedUrlConfiguration: + description: A block that specifies the configuration + of the seed or starting point URLs of the websites + you want to crawl. You can choose to crawl only + the website host names, or the website host names + with subdomains, or the website host names with + subdomains and other domains that the webpages link + to. You can list up to 100 seed URLs. Detailed below. + properties: + seedUrls: + description: 'The list of seed or starting point + URLs of the websites you want to crawl. The + list can include a maximum of 100 seed URLs. + Array Members: Minimum number of 0 items. Maximum + number of 100 items. Length Constraints: Minimum + length of 1. Maximum length of 2048.' + items: + type: string + type: array + x-kubernetes-list-type: set + webCrawlerMode: + description: 'The default mode is set to HOST_ONLY. + You can choose one of the following modes:' + type: string + type: object + siteMapsConfiguration: + description: A block that specifies the configuration + of the sitemap URLs of the websites you want to + crawl. Only URLs belonging to the same website host + names are crawled. You can list up to 3 sitemap + URLs. Detailed below. + properties: + siteMaps: + description: The list of sitemap URLs of the websites + you want to crawl. The list can include a maximum + of 3 sitemap URLs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: object + type: object + customDocumentEnrichmentConfiguration: + description: A block with the configuration information for altering + document metadata and content during the document ingestion + process. For more information on how to create, modify and delete + document metadata, or make other content alterations when you + ingest documents into Amazon Kendra, see Customizing document + metadata during the ingestion process. Detailed below. + properties: + inlineConfigurations: + description: Configuration information to alter document attributes + or metadata fields and content when ingesting documents + into Amazon Kendra. Minimum number of 0 items. Maximum number + of 100 items. Detailed below. + items: + properties: + condition: + description: Configuration of the condition used for + the target document attribute or metadata field when + ingesting documents into Amazon Kendra. See condition. + properties: + conditionDocumentAttributeKey: + description: The identifier of the document attribute + used for the condition. For example, _source_uri + could be an identifier for the attribute or metadata + field that contains source URIs associated with + the documents. Amazon Kendra currently does not + support _document_body as an attribute key used + for the condition. + type: string + conditionOnValue: + description: The value used by the operator. For + example, you can specify the value 'financial' + for strings in the _source_uri field that partially + match or contain this value. See condition_on_value. + properties: + dateValue: + description: A date expressed as an ISO 8601 + string. It is important for the time zone + to be included in the ISO 8601 date-time format. + As of this writing only UTC is supported. + For example, 2012-03-25T12:30:10+00:00. + type: string + longValue: + description: A long integer value. + type: number + stringListValue: + description: A list of strings. + items: + type: string + type: array + x-kubernetes-list-type: set + stringValue: + type: string + type: object + operator: + description: 'The condition operator. For example, + you can use Contains to partially match a string. + Valid Values: GreaterThan | GreaterThanOrEquals + | LessThan | LessThanOrEquals | Equals | NotEquals + | Contains | NotContains | Exists | NotExists + | BeginsWith.' + type: string + type: object + documentContentDeletion: + description: TRUE to delete content if the condition + used for the target attribute is met. + type: boolean + target: + description: Configuration of the target document attribute + or metadata field when ingesting documents into Amazon + Kendra. You can also include a value. Detailed below. + properties: + targetDocumentAttributeKey: + description: The identifier of the target document + attribute or metadata field. For example, 'Department' + could be an identifier for the target attribute + or metadata field that includes the department + names associated with the documents. + type: string + targetDocumentAttributeValue: + description: The target value you want to create + for the target attribute. For example, 'Finance' + could be the target value for the target attribute + key 'Department'. See target_document_attribute_value. + properties: + dateValue: + description: A date expressed as an ISO 8601 + string. It is important for the time zone + to be included in the ISO 8601 date-time format. + As of this writing only UTC is supported. + For example, 2012-03-25T12:30:10+00:00. + type: string + longValue: + description: A long integer value. + type: number + stringListValue: + description: A list of strings. + items: + type: string + type: array + x-kubernetes-list-type: set + stringValue: + type: string + type: object + targetDocumentAttributeValueDeletion: + description: TRUE to delete the existing target + value for your specified target attribute key. + You cannot create a target value and set this + to TRUE. To create a target value (TargetDocumentAttributeValue), + set this to FALSE. + type: boolean + type: object + type: object + type: array + postExtractionHookConfiguration: + description: A block that specifies the configuration information + for invoking a Lambda function in AWS Lambda on the structured + documents with their metadata and text extracted. You can + use a Lambda function to apply advanced logic for creating, + modifying, or deleting document metadata and content. For + more information, see Advanced data manipulation. Detailed + below. + properties: + invocationCondition: + description: A block that specifies the condition used + for when a Lambda function should be invoked. For example, + you can specify a condition that if there are empty + date-time values, then Amazon Kendra should invoke a + function that inserts the current date-time. See invocation_condition. + properties: + conditionDocumentAttributeKey: + description: The identifier of the document attribute + used for the condition. For example, _source_uri + could be an identifier for the attribute or metadata + field that contains source URIs associated with + the documents. Amazon Kendra currently does not + support _document_body as an attribute key used + for the condition. + type: string + conditionOnValue: + description: The value used by the operator. For example, + you can specify the value 'financial' for strings + in the _source_uri field that partially match or + contain this value. See condition_on_value. + properties: + dateValue: + description: A date expressed as an ISO 8601 string. + It is important for the time zone to be included + in the ISO 8601 date-time format. As of this + writing only UTC is supported. For example, + 2012-03-25T12:30:10+00:00. + type: string + longValue: + description: A long integer value. + type: number + stringListValue: + description: A list of strings. + items: + type: string + type: array + x-kubernetes-list-type: set + stringValue: + type: string + type: object + operator: + description: 'The condition operator. For example, + you can use Contains to partially match a string. + Valid Values: GreaterThan | GreaterThanOrEquals + | LessThan | LessThanOrEquals | Equals | NotEquals + | Contains | NotContains | Exists | NotExists | + BeginsWith.' + type: string + type: object + lambdaArn: + description: The Amazon Resource Name (ARN) of a Lambda + Function that can manipulate your document metadata + fields or attributes and content. + type: string + s3Bucket: + description: Stores the original, raw documents or the + structured, parsed documents before and after altering + them. For more information, see Data contracts for Lambda + functions. + type: string + type: object + preExtractionHookConfiguration: + description: Configuration information for invoking a Lambda + function in AWS Lambda on the original or raw documents + before extracting their metadata and text. You can use a + Lambda function to apply advanced logic for creating, modifying, + or deleting document metadata and content. For more information, + see Advanced data manipulation. Detailed below. + properties: + invocationCondition: + description: A block that specifies the condition used + for when a Lambda function should be invoked. For example, + you can specify a condition that if there are empty + date-time values, then Amazon Kendra should invoke a + function that inserts the current date-time. See invocation_condition. + properties: + conditionDocumentAttributeKey: + description: The identifier of the document attribute + used for the condition. For example, _source_uri + could be an identifier for the attribute or metadata + field that contains source URIs associated with + the documents. Amazon Kendra currently does not + support _document_body as an attribute key used + for the condition. + type: string + conditionOnValue: + description: The value used by the operator. For example, + you can specify the value 'financial' for strings + in the _source_uri field that partially match or + contain this value. See condition_on_value. + properties: + dateValue: + description: A date expressed as an ISO 8601 string. + It is important for the time zone to be included + in the ISO 8601 date-time format. As of this + writing only UTC is supported. For example, + 2012-03-25T12:30:10+00:00. + type: string + longValue: + description: A long integer value. + type: number + stringListValue: + description: A list of strings. + items: + type: string + type: array + x-kubernetes-list-type: set + stringValue: + type: string + type: object + operator: + description: 'The condition operator. For example, + you can use Contains to partially match a string. + Valid Values: GreaterThan | GreaterThanOrEquals + | LessThan | LessThanOrEquals | Equals | NotEquals + | Contains | NotContains | Exists | NotExists | + BeginsWith.' + type: string + type: object + lambdaArn: + description: The Amazon Resource Name (ARN) of a Lambda + Function that can manipulate your document metadata + fields or attributes and content. + type: string + s3Bucket: + description: Stores the original, raw documents or the + structured, parsed documents before and after altering + them. For more information, see Data contracts for Lambda + functions. + type: string + type: object + roleArn: + description: The Amazon Resource Name (ARN) of a role with + permission to run pre_extraction_hook_configuration and + post_extraction_hook_configuration for altering document + metadata and content during the document ingestion process. + For more information, see IAM roles for Amazon Kendra. + type: string + type: object + description: + description: A description for the Data Source connector. + type: string + indexId: + description: The identifier of the index for your Amazon Kendra + data source. + type: string + indexIdRef: + description: Reference to a Index in kendra to populate indexId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + indexIdSelector: + description: Selector for a Index in kendra to populate indexId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + languageCode: + description: The code for a language. This allows you to support + a language for all documents when creating the Data Source connector. + English is supported by default. For more information on supported + languages, including their codes, see Adding documents in languages + other than English. + type: string + name: + description: A name for your data source connector. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleArn: + description: The Amazon Resource Name (ARN) of a role with permission + to access the data source connector. For more information, see + IAM roles for Amazon Kendra. You can't specify the role_arn + parameter when the type parameter is set to CUSTOM. The role_arn + parameter is required for all other data sources. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + schedule: + description: Sets the frequency for Amazon Kendra to check the + documents in your Data Source repository and update the index. + If you don't set a schedule Amazon Kendra will not periodically + update the index. You can call the StartDataSourceSyncJob API + to update the index. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: The type of data source repository. For an updated + list of values, refer to Valid Values for Type. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + configuration: + description: A block with the configuration information to connect + to your Data Source repository. You can't specify the configuration + block when the type parameter is set to CUSTOM. Detailed below. + properties: + s3Configuration: + description: A block that provides the configuration information + to connect to an Amazon S3 bucket as your data source. Detailed + below. + properties: + accessControlListConfiguration: + description: A block that provides the path to the S3 + bucket that contains the user context filtering files + for the data source. For the format of the file, see + Access control for S3 data sources. Detailed below. + properties: + keyPath: + description: Path to the AWS S3 bucket that contains + the ACL files. + type: string + type: object + bucketName: + description: The name of the bucket that contains the + documents. + type: string + bucketNameRef: + description: Reference to a Bucket in s3 to populate bucketName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketNameSelector: + description: Selector for a Bucket in s3 to populate bucketName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + documentsMetadataConfiguration: + description: A block that defines the Document metadata + files that contain information such as the document + access control information, source URI, document author, + and custom attributes. Each metadata file contains metadata + about a single document. Detailed below. + properties: + s3Prefix: + description: A prefix used to filter metadata configuration + files in the AWS S3 bucket. The S3 bucket might + contain multiple metadata files. Use s3_prefix to + include only the desired metadata files. + type: string + type: object + exclusionPatterns: + description: A list of glob patterns for documents that + should not be indexed. If a document that matches an + inclusion prefix or inclusion pattern also matches an + exclusion pattern, the document is not indexed. Refer + to Exclusion Patterns for more examples. + items: + type: string + type: array + x-kubernetes-list-type: set + inclusionPatterns: + description: A list of glob patterns for documents that + should be indexed. If a document that matches an inclusion + pattern also matches an exclusion pattern, the document + is not indexed. Refer to Inclusion Patterns for more + examples. + items: + type: string + type: array + x-kubernetes-list-type: set + inclusionPrefixes: + description: A list of S3 prefixes for the documents that + should be included in the index. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + webCrawlerConfiguration: + description: A block that provides the configuration information + required for Amazon Kendra Web Crawler. Detailed below. + properties: + authenticationConfiguration: + description: A block with the configuration information + required to connect to websites using authentication. + You can connect to websites using basic authentication + of user name and password. You use a secret in AWS Secrets + Manager to store your authentication credentials. You + must provide the website host name and port number. + For example, the host name of https://a.example.com/page1.html + is "a.example.com" and the port is 443, the standard + port for HTTPS. Detailed below. + properties: + basicAuthentication: + description: The list of configuration information + that's required to connect to and crawl a website + host using basic authentication credentials. The + list includes the name and port number of the website + host. Detailed below. + items: + properties: + credentials: + description: Your secret ARN, which you can + create in AWS Secrets Manager. You use a secret + if basic authentication credentials are required + to connect to a website. The secret stores + your credentials of user name and password. + type: string + credentialsRef: + description: Reference to a Secret in secretsmanager + to populate credentials. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + credentialsSelector: + description: Selector for a Secret in secretsmanager + to populate credentials. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + host: + description: The name of the website host you + want to connect to using authentication credentials. + For example, the host name of https://a.example.com/page1.html + is "a.example.com". + type: string + port: + description: The port number of the website + host you want to connect to using authentication + credentials. For example, the port for https://a.example.com/page1.html + is 443, the standard port for HTTPS. + type: number + type: object + type: array + type: object + crawlDepth: + description: Specifies the number of levels in a website + that you want to crawl. The first level begins from + the website seed or starting point URL. For example, + if a website has 3 levels – index level (i.e. seed in + this example), sections level, and subsections level + – and you are only interested in crawling information + up to the sections level (i.e. levels 0-1), you can + set your depth to 1. The default crawl depth is set + to 2. Minimum value of 0. Maximum value of 10. + type: number + maxContentSizePerPageInMegaBytes: + description: The maximum size (in MB) of a webpage or + attachment to crawl. Files larger than this size (in + MB) are skipped/not crawled. The default maximum size + of a webpage or attachment is set to 50 MB. Minimum + value of 1.0e-06. Maximum value of 50. + type: number + maxLinksPerPage: + description: The maximum number of URLs on a webpage to + include when crawling a website. This number is per + webpage. As a website’s webpages are crawled, any URLs + the webpages link to are also crawled. URLs on a webpage + are crawled in order of appearance. The default maximum + links per page is 100. Minimum value of 1. Maximum value + of 1000. + type: number + maxUrlsPerMinuteCrawlRate: + description: The maximum number of URLs crawled per website + host per minute. The default maximum number of URLs + crawled per website host per minute is 300. Minimum + value of 1. Maximum value of 300. + type: number + proxyConfiguration: + description: Configuration information required to connect + to your internal websites via a web proxy. You must + provide the website host name and port number. For example, + the host name of https://a.example.com/page1.html is + "a.example.com" and the port is 443, the standard port + for HTTPS. Web proxy credentials are optional and you + can use them to connect to a web proxy server that requires + basic authentication. To store web proxy credentials, + you use a secret in AWS Secrets Manager. Detailed below. + properties: + credentials: + description: Your secret ARN, which you can create + in AWS Secrets Manager. You use a secret if basic + authentication credentials are required to connect + to a website. The secret stores your credentials + of user name and password. + type: string + credentialsRef: + description: Reference to a Secret in secretsmanager + to populate credentials. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + credentialsSelector: + description: Selector for a Secret in secretsmanager + to populate credentials. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + host: + description: The name of the website host you want + to connect to using authentication credentials. + For example, the host name of https://a.example.com/page1.html + is "a.example.com". + type: string + port: + description: The port number of the website host you + want to connect to using authentication credentials. + For example, the port for https://a.example.com/page1.html + is 443, the standard port for HTTPS. + type: number + type: object + urlExclusionPatterns: + description: 'A list of regular expression patterns to + exclude certain URLs to crawl. URLs that match the patterns + are excluded from the index. URLs that don''t match + the patterns are included in the index. If a URL matches + both an inclusion and exclusion pattern, the exclusion + pattern takes precedence and the URL file isn''t included + in the index. Array Members: Minimum number of 0 items. + Maximum number of 100 items. Length Constraints: Minimum + length of 1. Maximum length of 150.' + items: + type: string + type: array + x-kubernetes-list-type: set + urlInclusionPatterns: + description: 'A list of regular expression patterns to + include certain URLs to crawl. URLs that match the patterns + are included in the index. URLs that don''t match the + patterns are excluded from the index. If a URL matches + both an inclusion and exclusion pattern, the exclusion + pattern takes precedence and the URL file isn''t included + in the index. Array Members: Minimum number of 0 items. + Maximum number of 100 items. Length Constraints: Minimum + length of 1. Maximum length of 150.' + items: + type: string + type: array + x-kubernetes-list-type: set + urls: + description: A block that specifies the seed or starting + point URLs of the websites or the sitemap URLs of the + websites you want to crawl. You can include website + subdomains. You can list up to 100 seed URLs and up + to 3 sitemap URLs. You can only crawl websites that + use the secure communication protocol, Hypertext Transfer + Protocol Secure (HTTPS). If you receive an error when + crawling a website, it could be that the website is + blocked from crawling. When selecting websites to index, + you must adhere to the Amazon Acceptable Use Policy + and all other Amazon terms. Remember that you must only + use Amazon Kendra Web Crawler to index your own webpages, + or webpages that you have authorization to index. Detailed + below. + properties: + seedUrlConfiguration: + description: A block that specifies the configuration + of the seed or starting point URLs of the websites + you want to crawl. You can choose to crawl only + the website host names, or the website host names + with subdomains, or the website host names with + subdomains and other domains that the webpages link + to. You can list up to 100 seed URLs. Detailed below. + properties: + seedUrls: + description: 'The list of seed or starting point + URLs of the websites you want to crawl. The + list can include a maximum of 100 seed URLs. + Array Members: Minimum number of 0 items. Maximum + number of 100 items. Length Constraints: Minimum + length of 1. Maximum length of 2048.' + items: + type: string + type: array + x-kubernetes-list-type: set + webCrawlerMode: + description: 'The default mode is set to HOST_ONLY. + You can choose one of the following modes:' + type: string + type: object + siteMapsConfiguration: + description: A block that specifies the configuration + of the sitemap URLs of the websites you want to + crawl. Only URLs belonging to the same website host + names are crawled. You can list up to 3 sitemap + URLs. Detailed below. + properties: + siteMaps: + description: The list of sitemap URLs of the websites + you want to crawl. The list can include a maximum + of 3 sitemap URLs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: object + type: object + customDocumentEnrichmentConfiguration: + description: A block with the configuration information for altering + document metadata and content during the document ingestion + process. For more information on how to create, modify and delete + document metadata, or make other content alterations when you + ingest documents into Amazon Kendra, see Customizing document + metadata during the ingestion process. Detailed below. + properties: + inlineConfigurations: + description: Configuration information to alter document attributes + or metadata fields and content when ingesting documents + into Amazon Kendra. Minimum number of 0 items. Maximum number + of 100 items. Detailed below. + items: + properties: + condition: + description: Configuration of the condition used for + the target document attribute or metadata field when + ingesting documents into Amazon Kendra. See condition. + properties: + conditionDocumentAttributeKey: + description: The identifier of the document attribute + used for the condition. For example, _source_uri + could be an identifier for the attribute or metadata + field that contains source URIs associated with + the documents. Amazon Kendra currently does not + support _document_body as an attribute key used + for the condition. + type: string + conditionOnValue: + description: The value used by the operator. For + example, you can specify the value 'financial' + for strings in the _source_uri field that partially + match or contain this value. See condition_on_value. + properties: + dateValue: + description: A date expressed as an ISO 8601 + string. It is important for the time zone + to be included in the ISO 8601 date-time format. + As of this writing only UTC is supported. + For example, 2012-03-25T12:30:10+00:00. + type: string + longValue: + description: A long integer value. + type: number + stringListValue: + description: A list of strings. + items: + type: string + type: array + x-kubernetes-list-type: set + stringValue: + type: string + type: object + operator: + description: 'The condition operator. For example, + you can use Contains to partially match a string. + Valid Values: GreaterThan | GreaterThanOrEquals + | LessThan | LessThanOrEquals | Equals | NotEquals + | Contains | NotContains | Exists | NotExists + | BeginsWith.' + type: string + type: object + documentContentDeletion: + description: TRUE to delete content if the condition + used for the target attribute is met. + type: boolean + target: + description: Configuration of the target document attribute + or metadata field when ingesting documents into Amazon + Kendra. You can also include a value. Detailed below. + properties: + targetDocumentAttributeKey: + description: The identifier of the target document + attribute or metadata field. For example, 'Department' + could be an identifier for the target attribute + or metadata field that includes the department + names associated with the documents. + type: string + targetDocumentAttributeValue: + description: The target value you want to create + for the target attribute. For example, 'Finance' + could be the target value for the target attribute + key 'Department'. See target_document_attribute_value. + properties: + dateValue: + description: A date expressed as an ISO 8601 + string. It is important for the time zone + to be included in the ISO 8601 date-time format. + As of this writing only UTC is supported. + For example, 2012-03-25T12:30:10+00:00. + type: string + longValue: + description: A long integer value. + type: number + stringListValue: + description: A list of strings. + items: + type: string + type: array + x-kubernetes-list-type: set + stringValue: + type: string + type: object + targetDocumentAttributeValueDeletion: + description: TRUE to delete the existing target + value for your specified target attribute key. + You cannot create a target value and set this + to TRUE. To create a target value (TargetDocumentAttributeValue), + set this to FALSE. + type: boolean + type: object + type: object + type: array + postExtractionHookConfiguration: + description: A block that specifies the configuration information + for invoking a Lambda function in AWS Lambda on the structured + documents with their metadata and text extracted. You can + use a Lambda function to apply advanced logic for creating, + modifying, or deleting document metadata and content. For + more information, see Advanced data manipulation. Detailed + below. + properties: + invocationCondition: + description: A block that specifies the condition used + for when a Lambda function should be invoked. For example, + you can specify a condition that if there are empty + date-time values, then Amazon Kendra should invoke a + function that inserts the current date-time. See invocation_condition. + properties: + conditionDocumentAttributeKey: + description: The identifier of the document attribute + used for the condition. For example, _source_uri + could be an identifier for the attribute or metadata + field that contains source URIs associated with + the documents. Amazon Kendra currently does not + support _document_body as an attribute key used + for the condition. + type: string + conditionOnValue: + description: The value used by the operator. For example, + you can specify the value 'financial' for strings + in the _source_uri field that partially match or + contain this value. See condition_on_value. + properties: + dateValue: + description: A date expressed as an ISO 8601 string. + It is important for the time zone to be included + in the ISO 8601 date-time format. As of this + writing only UTC is supported. For example, + 2012-03-25T12:30:10+00:00. + type: string + longValue: + description: A long integer value. + type: number + stringListValue: + description: A list of strings. + items: + type: string + type: array + x-kubernetes-list-type: set + stringValue: + type: string + type: object + operator: + description: 'The condition operator. For example, + you can use Contains to partially match a string. + Valid Values: GreaterThan | GreaterThanOrEquals + | LessThan | LessThanOrEquals | Equals | NotEquals + | Contains | NotContains | Exists | NotExists | + BeginsWith.' + type: string + type: object + lambdaArn: + description: The Amazon Resource Name (ARN) of a Lambda + Function that can manipulate your document metadata + fields or attributes and content. + type: string + s3Bucket: + description: Stores the original, raw documents or the + structured, parsed documents before and after altering + them. For more information, see Data contracts for Lambda + functions. + type: string + type: object + preExtractionHookConfiguration: + description: Configuration information for invoking a Lambda + function in AWS Lambda on the original or raw documents + before extracting their metadata and text. You can use a + Lambda function to apply advanced logic for creating, modifying, + or deleting document metadata and content. For more information, + see Advanced data manipulation. Detailed below. + properties: + invocationCondition: + description: A block that specifies the condition used + for when a Lambda function should be invoked. For example, + you can specify a condition that if there are empty + date-time values, then Amazon Kendra should invoke a + function that inserts the current date-time. See invocation_condition. + properties: + conditionDocumentAttributeKey: + description: The identifier of the document attribute + used for the condition. For example, _source_uri + could be an identifier for the attribute or metadata + field that contains source URIs associated with + the documents. Amazon Kendra currently does not + support _document_body as an attribute key used + for the condition. + type: string + conditionOnValue: + description: The value used by the operator. For example, + you can specify the value 'financial' for strings + in the _source_uri field that partially match or + contain this value. See condition_on_value. + properties: + dateValue: + description: A date expressed as an ISO 8601 string. + It is important for the time zone to be included + in the ISO 8601 date-time format. As of this + writing only UTC is supported. For example, + 2012-03-25T12:30:10+00:00. + type: string + longValue: + description: A long integer value. + type: number + stringListValue: + description: A list of strings. + items: + type: string + type: array + x-kubernetes-list-type: set + stringValue: + type: string + type: object + operator: + description: 'The condition operator. For example, + you can use Contains to partially match a string. + Valid Values: GreaterThan | GreaterThanOrEquals + | LessThan | LessThanOrEquals | Equals | NotEquals + | Contains | NotContains | Exists | NotExists | + BeginsWith.' + type: string + type: object + lambdaArn: + description: The Amazon Resource Name (ARN) of a Lambda + Function that can manipulate your document metadata + fields or attributes and content. + type: string + s3Bucket: + description: Stores the original, raw documents or the + structured, parsed documents before and after altering + them. For more information, see Data contracts for Lambda + functions. + type: string + type: object + roleArn: + description: The Amazon Resource Name (ARN) of a role with + permission to run pre_extraction_hook_configuration and + post_extraction_hook_configuration for altering document + metadata and content during the document ingestion process. + For more information, see IAM roles for Amazon Kendra. + type: string + type: object + description: + description: A description for the Data Source connector. + type: string + indexId: + description: The identifier of the index for your Amazon Kendra + data source. + type: string + indexIdRef: + description: Reference to a Index in kendra to populate indexId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + indexIdSelector: + description: Selector for a Index in kendra to populate indexId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + languageCode: + description: The code for a language. This allows you to support + a language for all documents when creating the Data Source connector. + English is supported by default. For more information on supported + languages, including their codes, see Adding documents in languages + other than English. + type: string + name: + description: A name for your data source connector. + type: string + roleArn: + description: The Amazon Resource Name (ARN) of a role with permission + to access the data source connector. For more information, see + IAM roles for Amazon Kendra. You can't specify the role_arn + parameter when the type parameter is set to CUSTOM. The role_arn + parameter is required for all other data sources. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + schedule: + description: Sets the frequency for Amazon Kendra to check the + documents in your Data Source repository and update the index. + If you don't set a schedule Amazon Kendra will not periodically + update the index. You can call the StartDataSourceSyncJob API + to update the index. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: The type of data source repository. For an updated + list of values, refer to Valid Values for Type. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + status: + description: DataSourceStatus defines the observed state of DataSource. + properties: + atProvider: + properties: + arn: + description: ARN of the Data Source. + type: string + configuration: + description: A block with the configuration information to connect + to your Data Source repository. You can't specify the configuration + block when the type parameter is set to CUSTOM. Detailed below. + properties: + s3Configuration: + description: A block that provides the configuration information + to connect to an Amazon S3 bucket as your data source. Detailed + below. + properties: + accessControlListConfiguration: + description: A block that provides the path to the S3 + bucket that contains the user context filtering files + for the data source. For the format of the file, see + Access control for S3 data sources. Detailed below. + properties: + keyPath: + description: Path to the AWS S3 bucket that contains + the ACL files. + type: string + type: object + bucketName: + description: The name of the bucket that contains the + documents. + type: string + documentsMetadataConfiguration: + description: A block that defines the Document metadata + files that contain information such as the document + access control information, source URI, document author, + and custom attributes. Each metadata file contains metadata + about a single document. Detailed below. + properties: + s3Prefix: + description: A prefix used to filter metadata configuration + files in the AWS S3 bucket. The S3 bucket might + contain multiple metadata files. Use s3_prefix to + include only the desired metadata files. + type: string + type: object + exclusionPatterns: + description: A list of glob patterns for documents that + should not be indexed. If a document that matches an + inclusion prefix or inclusion pattern also matches an + exclusion pattern, the document is not indexed. Refer + to Exclusion Patterns for more examples. + items: + type: string + type: array + x-kubernetes-list-type: set + inclusionPatterns: + description: A list of glob patterns for documents that + should be indexed. If a document that matches an inclusion + pattern also matches an exclusion pattern, the document + is not indexed. Refer to Inclusion Patterns for more + examples. + items: + type: string + type: array + x-kubernetes-list-type: set + inclusionPrefixes: + description: A list of S3 prefixes for the documents that + should be included in the index. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + webCrawlerConfiguration: + description: A block that provides the configuration information + required for Amazon Kendra Web Crawler. Detailed below. + properties: + authenticationConfiguration: + description: A block with the configuration information + required to connect to websites using authentication. + You can connect to websites using basic authentication + of user name and password. You use a secret in AWS Secrets + Manager to store your authentication credentials. You + must provide the website host name and port number. + For example, the host name of https://a.example.com/page1.html + is "a.example.com" and the port is 443, the standard + port for HTTPS. Detailed below. + properties: + basicAuthentication: + description: The list of configuration information + that's required to connect to and crawl a website + host using basic authentication credentials. The + list includes the name and port number of the website + host. Detailed below. + items: + properties: + credentials: + description: Your secret ARN, which you can + create in AWS Secrets Manager. You use a secret + if basic authentication credentials are required + to connect to a website. The secret stores + your credentials of user name and password. + type: string + host: + description: The name of the website host you + want to connect to using authentication credentials. + For example, the host name of https://a.example.com/page1.html + is "a.example.com". + type: string + port: + description: The port number of the website + host you want to connect to using authentication + credentials. For example, the port for https://a.example.com/page1.html + is 443, the standard port for HTTPS. + type: number + type: object + type: array + type: object + crawlDepth: + description: Specifies the number of levels in a website + that you want to crawl. The first level begins from + the website seed or starting point URL. For example, + if a website has 3 levels – index level (i.e. seed in + this example), sections level, and subsections level + – and you are only interested in crawling information + up to the sections level (i.e. levels 0-1), you can + set your depth to 1. The default crawl depth is set + to 2. Minimum value of 0. Maximum value of 10. + type: number + maxContentSizePerPageInMegaBytes: + description: The maximum size (in MB) of a webpage or + attachment to crawl. Files larger than this size (in + MB) are skipped/not crawled. The default maximum size + of a webpage or attachment is set to 50 MB. Minimum + value of 1.0e-06. Maximum value of 50. + type: number + maxLinksPerPage: + description: The maximum number of URLs on a webpage to + include when crawling a website. This number is per + webpage. As a website’s webpages are crawled, any URLs + the webpages link to are also crawled. URLs on a webpage + are crawled in order of appearance. The default maximum + links per page is 100. Minimum value of 1. Maximum value + of 1000. + type: number + maxUrlsPerMinuteCrawlRate: + description: The maximum number of URLs crawled per website + host per minute. The default maximum number of URLs + crawled per website host per minute is 300. Minimum + value of 1. Maximum value of 300. + type: number + proxyConfiguration: + description: Configuration information required to connect + to your internal websites via a web proxy. You must + provide the website host name and port number. For example, + the host name of https://a.example.com/page1.html is + "a.example.com" and the port is 443, the standard port + for HTTPS. Web proxy credentials are optional and you + can use them to connect to a web proxy server that requires + basic authentication. To store web proxy credentials, + you use a secret in AWS Secrets Manager. Detailed below. + properties: + credentials: + description: Your secret ARN, which you can create + in AWS Secrets Manager. You use a secret if basic + authentication credentials are required to connect + to a website. The secret stores your credentials + of user name and password. + type: string + host: + description: The name of the website host you want + to connect to using authentication credentials. + For example, the host name of https://a.example.com/page1.html + is "a.example.com". + type: string + port: + description: The port number of the website host you + want to connect to using authentication credentials. + For example, the port for https://a.example.com/page1.html + is 443, the standard port for HTTPS. + type: number + type: object + urlExclusionPatterns: + description: 'A list of regular expression patterns to + exclude certain URLs to crawl. URLs that match the patterns + are excluded from the index. URLs that don''t match + the patterns are included in the index. If a URL matches + both an inclusion and exclusion pattern, the exclusion + pattern takes precedence and the URL file isn''t included + in the index. Array Members: Minimum number of 0 items. + Maximum number of 100 items. Length Constraints: Minimum + length of 1. Maximum length of 150.' + items: + type: string + type: array + x-kubernetes-list-type: set + urlInclusionPatterns: + description: 'A list of regular expression patterns to + include certain URLs to crawl. URLs that match the patterns + are included in the index. URLs that don''t match the + patterns are excluded from the index. If a URL matches + both an inclusion and exclusion pattern, the exclusion + pattern takes precedence and the URL file isn''t included + in the index. Array Members: Minimum number of 0 items. + Maximum number of 100 items. Length Constraints: Minimum + length of 1. Maximum length of 150.' + items: + type: string + type: array + x-kubernetes-list-type: set + urls: + description: A block that specifies the seed or starting + point URLs of the websites or the sitemap URLs of the + websites you want to crawl. You can include website + subdomains. You can list up to 100 seed URLs and up + to 3 sitemap URLs. You can only crawl websites that + use the secure communication protocol, Hypertext Transfer + Protocol Secure (HTTPS). If you receive an error when + crawling a website, it could be that the website is + blocked from crawling. When selecting websites to index, + you must adhere to the Amazon Acceptable Use Policy + and all other Amazon terms. Remember that you must only + use Amazon Kendra Web Crawler to index your own webpages, + or webpages that you have authorization to index. Detailed + below. + properties: + seedUrlConfiguration: + description: A block that specifies the configuration + of the seed or starting point URLs of the websites + you want to crawl. You can choose to crawl only + the website host names, or the website host names + with subdomains, or the website host names with + subdomains and other domains that the webpages link + to. You can list up to 100 seed URLs. Detailed below. + properties: + seedUrls: + description: 'The list of seed or starting point + URLs of the websites you want to crawl. The + list can include a maximum of 100 seed URLs. + Array Members: Minimum number of 0 items. Maximum + number of 100 items. Length Constraints: Minimum + length of 1. Maximum length of 2048.' + items: + type: string + type: array + x-kubernetes-list-type: set + webCrawlerMode: + description: 'The default mode is set to HOST_ONLY. + You can choose one of the following modes:' + type: string + type: object + siteMapsConfiguration: + description: A block that specifies the configuration + of the sitemap URLs of the websites you want to + crawl. Only URLs belonging to the same website host + names are crawled. You can list up to 3 sitemap + URLs. Detailed below. + properties: + siteMaps: + description: The list of sitemap URLs of the websites + you want to crawl. The list can include a maximum + of 3 sitemap URLs. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: object + type: object + createdAt: + description: The Unix timestamp of when the Data Source was created. + type: string + customDocumentEnrichmentConfiguration: + description: A block with the configuration information for altering + document metadata and content during the document ingestion + process. For more information on how to create, modify and delete + document metadata, or make other content alterations when you + ingest documents into Amazon Kendra, see Customizing document + metadata during the ingestion process. Detailed below. + properties: + inlineConfigurations: + description: Configuration information to alter document attributes + or metadata fields and content when ingesting documents + into Amazon Kendra. Minimum number of 0 items. Maximum number + of 100 items. Detailed below. + items: + properties: + condition: + description: Configuration of the condition used for + the target document attribute or metadata field when + ingesting documents into Amazon Kendra. See condition. + properties: + conditionDocumentAttributeKey: + description: The identifier of the document attribute + used for the condition. For example, _source_uri + could be an identifier for the attribute or metadata + field that contains source URIs associated with + the documents. Amazon Kendra currently does not + support _document_body as an attribute key used + for the condition. + type: string + conditionOnValue: + description: The value used by the operator. For + example, you can specify the value 'financial' + for strings in the _source_uri field that partially + match or contain this value. See condition_on_value. + properties: + dateValue: + description: A date expressed as an ISO 8601 + string. It is important for the time zone + to be included in the ISO 8601 date-time format. + As of this writing only UTC is supported. + For example, 2012-03-25T12:30:10+00:00. + type: string + longValue: + description: A long integer value. + type: number + stringListValue: + description: A list of strings. + items: + type: string + type: array + x-kubernetes-list-type: set + stringValue: + type: string + type: object + operator: + description: 'The condition operator. For example, + you can use Contains to partially match a string. + Valid Values: GreaterThan | GreaterThanOrEquals + | LessThan | LessThanOrEquals | Equals | NotEquals + | Contains | NotContains | Exists | NotExists + | BeginsWith.' + type: string + type: object + documentContentDeletion: + description: TRUE to delete content if the condition + used for the target attribute is met. + type: boolean + target: + description: Configuration of the target document attribute + or metadata field when ingesting documents into Amazon + Kendra. You can also include a value. Detailed below. + properties: + targetDocumentAttributeKey: + description: The identifier of the target document + attribute or metadata field. For example, 'Department' + could be an identifier for the target attribute + or metadata field that includes the department + names associated with the documents. + type: string + targetDocumentAttributeValue: + description: The target value you want to create + for the target attribute. For example, 'Finance' + could be the target value for the target attribute + key 'Department'. See target_document_attribute_value. + properties: + dateValue: + description: A date expressed as an ISO 8601 + string. It is important for the time zone + to be included in the ISO 8601 date-time format. + As of this writing only UTC is supported. + For example, 2012-03-25T12:30:10+00:00. + type: string + longValue: + description: A long integer value. + type: number + stringListValue: + description: A list of strings. + items: + type: string + type: array + x-kubernetes-list-type: set + stringValue: + type: string + type: object + targetDocumentAttributeValueDeletion: + description: TRUE to delete the existing target + value for your specified target attribute key. + You cannot create a target value and set this + to TRUE. To create a target value (TargetDocumentAttributeValue), + set this to FALSE. + type: boolean + type: object + type: object + type: array + postExtractionHookConfiguration: + description: A block that specifies the configuration information + for invoking a Lambda function in AWS Lambda on the structured + documents with their metadata and text extracted. You can + use a Lambda function to apply advanced logic for creating, + modifying, or deleting document metadata and content. For + more information, see Advanced data manipulation. Detailed + below. + properties: + invocationCondition: + description: A block that specifies the condition used + for when a Lambda function should be invoked. For example, + you can specify a condition that if there are empty + date-time values, then Amazon Kendra should invoke a + function that inserts the current date-time. See invocation_condition. + properties: + conditionDocumentAttributeKey: + description: The identifier of the document attribute + used for the condition. For example, _source_uri + could be an identifier for the attribute or metadata + field that contains source URIs associated with + the documents. Amazon Kendra currently does not + support _document_body as an attribute key used + for the condition. + type: string + conditionOnValue: + description: The value used by the operator. For example, + you can specify the value 'financial' for strings + in the _source_uri field that partially match or + contain this value. See condition_on_value. + properties: + dateValue: + description: A date expressed as an ISO 8601 string. + It is important for the time zone to be included + in the ISO 8601 date-time format. As of this + writing only UTC is supported. For example, + 2012-03-25T12:30:10+00:00. + type: string + longValue: + description: A long integer value. + type: number + stringListValue: + description: A list of strings. + items: + type: string + type: array + x-kubernetes-list-type: set + stringValue: + type: string + type: object + operator: + description: 'The condition operator. For example, + you can use Contains to partially match a string. + Valid Values: GreaterThan | GreaterThanOrEquals + | LessThan | LessThanOrEquals | Equals | NotEquals + | Contains | NotContains | Exists | NotExists | + BeginsWith.' + type: string + type: object + lambdaArn: + description: The Amazon Resource Name (ARN) of a Lambda + Function that can manipulate your document metadata + fields or attributes and content. + type: string + s3Bucket: + description: Stores the original, raw documents or the + structured, parsed documents before and after altering + them. For more information, see Data contracts for Lambda + functions. + type: string + type: object + preExtractionHookConfiguration: + description: Configuration information for invoking a Lambda + function in AWS Lambda on the original or raw documents + before extracting their metadata and text. You can use a + Lambda function to apply advanced logic for creating, modifying, + or deleting document metadata and content. For more information, + see Advanced data manipulation. Detailed below. + properties: + invocationCondition: + description: A block that specifies the condition used + for when a Lambda function should be invoked. For example, + you can specify a condition that if there are empty + date-time values, then Amazon Kendra should invoke a + function that inserts the current date-time. See invocation_condition. + properties: + conditionDocumentAttributeKey: + description: The identifier of the document attribute + used for the condition. For example, _source_uri + could be an identifier for the attribute or metadata + field that contains source URIs associated with + the documents. Amazon Kendra currently does not + support _document_body as an attribute key used + for the condition. + type: string + conditionOnValue: + description: The value used by the operator. For example, + you can specify the value 'financial' for strings + in the _source_uri field that partially match or + contain this value. See condition_on_value. + properties: + dateValue: + description: A date expressed as an ISO 8601 string. + It is important for the time zone to be included + in the ISO 8601 date-time format. As of this + writing only UTC is supported. For example, + 2012-03-25T12:30:10+00:00. + type: string + longValue: + description: A long integer value. + type: number + stringListValue: + description: A list of strings. + items: + type: string + type: array + x-kubernetes-list-type: set + stringValue: + type: string + type: object + operator: + description: 'The condition operator. For example, + you can use Contains to partially match a string. + Valid Values: GreaterThan | GreaterThanOrEquals + | LessThan | LessThanOrEquals | Equals | NotEquals + | Contains | NotContains | Exists | NotExists | + BeginsWith.' + type: string + type: object + lambdaArn: + description: The Amazon Resource Name (ARN) of a Lambda + Function that can manipulate your document metadata + fields or attributes and content. + type: string + s3Bucket: + description: Stores the original, raw documents or the + structured, parsed documents before and after altering + them. For more information, see Data contracts for Lambda + functions. + type: string + type: object + roleArn: + description: The Amazon Resource Name (ARN) of a role with + permission to run pre_extraction_hook_configuration and + post_extraction_hook_configuration for altering document + metadata and content during the document ingestion process. + For more information, see IAM roles for Amazon Kendra. + type: string + type: object + dataSourceId: + description: The unique identifiers of the Data Source. + type: string + description: + description: A description for the Data Source connector. + type: string + errorMessage: + description: When the Status field value is FAILED, the ErrorMessage + field contains a description of the error that caused the Data + Source to fail. + type: string + id: + description: The unique identifiers of the Data Source and index + separated by a slash (/). + type: string + indexId: + description: The identifier of the index for your Amazon Kendra + data source. + type: string + languageCode: + description: The code for a language. This allows you to support + a language for all documents when creating the Data Source connector. + English is supported by default. For more information on supported + languages, including their codes, see Adding documents in languages + other than English. + type: string + name: + description: A name for your data source connector. + type: string + roleArn: + description: The Amazon Resource Name (ARN) of a role with permission + to access the data source connector. For more information, see + IAM roles for Amazon Kendra. You can't specify the role_arn + parameter when the type parameter is set to CUSTOM. The role_arn + parameter is required for all other data sources. + type: string + schedule: + description: Sets the frequency for Amazon Kendra to check the + documents in your Data Source repository and update the index. + If you don't set a schedule Amazon Kendra will not periodically + update the index. You can call the StartDataSourceSyncJob API + to update the index. + type: string + status: + description: The current status of the Data Source. When the status + is ACTIVE the Data Source is ready to use. When the status is + FAILED, the error_message field contains the reason that the + Data Source failed. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: + description: The type of data source repository. For an updated + list of values, refer to Valid Values for Type. + type: string + updatedAt: + description: The Unix timestamp of when the Data Source was last + updated. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/kendra.aws.upbound.io_experiences.yaml b/package/crds/kendra.aws.upbound.io_experiences.yaml index 4b50139222..b305ce0d62 100644 --- a/package/crds/kendra.aws.upbound.io_experiences.yaml +++ b/package/crds/kendra.aws.upbound.io_experiences.yaml @@ -845,3 +845,809 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Experience is the Schema for the Experiences API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExperienceSpec defines the desired state of Experience + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + configuration: + description: Configuration information for your Amazon Kendra + experience. Detailed below. + properties: + contentSourceConfiguration: + description: The identifiers of your data sources and FAQs. + Or, you can specify that you want to use documents indexed + via the BatchPutDocument API. Detailed below. + properties: + dataSourceIds: + description: The identifiers of the data sources you want + to use for your Amazon Kendra experience. Maximum number + of 100 items. + items: + type: string + type: array + x-kubernetes-list-type: set + directPutContent: + description: Whether to use documents you indexed directly + using the BatchPutDocument API. Defaults to false. + type: boolean + faqIds: + description: The identifier of the FAQs that you want + to use for your Amazon Kendra experience. Maximum number + of 100 items. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + userIdentityConfiguration: + description: The AWS SSO field name that contains the identifiers + of your users, such as their emails. Detailed below. + properties: + identityAttributeName: + description: The AWS SSO field name that contains the + identifiers of your users, such as their emails. + type: string + type: object + type: object + description: + description: A description for your Amazon Kendra experience. + type: string + indexId: + description: The identifier of the index for your Amazon Kendra + experience. + type: string + indexIdRef: + description: Reference to a Index in kendra to populate indexId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + indexIdSelector: + description: Selector for a Index in kendra to populate indexId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: A name for your Amazon Kendra experience. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleArn: + description: The Amazon Resource Name (ARN) of a role with permission + to access Query API, QuerySuggestions API, SubmitFeedback API, + and AWS SSO that stores your user and group information. For + more information, see IAM roles for Amazon Kendra. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + configuration: + description: Configuration information for your Amazon Kendra + experience. Detailed below. + properties: + contentSourceConfiguration: + description: The identifiers of your data sources and FAQs. + Or, you can specify that you want to use documents indexed + via the BatchPutDocument API. Detailed below. + properties: + dataSourceIds: + description: The identifiers of the data sources you want + to use for your Amazon Kendra experience. Maximum number + of 100 items. + items: + type: string + type: array + x-kubernetes-list-type: set + directPutContent: + description: Whether to use documents you indexed directly + using the BatchPutDocument API. Defaults to false. + type: boolean + faqIds: + description: The identifier of the FAQs that you want + to use for your Amazon Kendra experience. Maximum number + of 100 items. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + userIdentityConfiguration: + description: The AWS SSO field name that contains the identifiers + of your users, such as their emails. Detailed below. + properties: + identityAttributeName: + description: The AWS SSO field name that contains the + identifiers of your users, such as their emails. + type: string + type: object + type: object + description: + description: A description for your Amazon Kendra experience. + type: string + indexId: + description: The identifier of the index for your Amazon Kendra + experience. + type: string + indexIdRef: + description: Reference to a Index in kendra to populate indexId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + indexIdSelector: + description: Selector for a Index in kendra to populate indexId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: A name for your Amazon Kendra experience. + type: string + roleArn: + description: The Amazon Resource Name (ARN) of a role with permission + to access Query API, QuerySuggestions API, SubmitFeedback API, + and AWS SSO that stores your user and group information. For + more information, see IAM roles for Amazon Kendra. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ExperienceStatus defines the observed state of Experience. + properties: + atProvider: + properties: + arn: + description: ARN of the Experience. + type: string + configuration: + description: Configuration information for your Amazon Kendra + experience. Detailed below. + properties: + contentSourceConfiguration: + description: The identifiers of your data sources and FAQs. + Or, you can specify that you want to use documents indexed + via the BatchPutDocument API. Detailed below. + properties: + dataSourceIds: + description: The identifiers of the data sources you want + to use for your Amazon Kendra experience. Maximum number + of 100 items. + items: + type: string + type: array + x-kubernetes-list-type: set + directPutContent: + description: Whether to use documents you indexed directly + using the BatchPutDocument API. Defaults to false. + type: boolean + faqIds: + description: The identifier of the FAQs that you want + to use for your Amazon Kendra experience. Maximum number + of 100 items. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + userIdentityConfiguration: + description: The AWS SSO field name that contains the identifiers + of your users, such as their emails. Detailed below. + properties: + identityAttributeName: + description: The AWS SSO field name that contains the + identifiers of your users, such as their emails. + type: string + type: object + type: object + description: + description: A description for your Amazon Kendra experience. + type: string + endpoints: + description: Shows the endpoint URLs for your Amazon Kendra experiences. + The URLs are unique and fully hosted by AWS. + items: + properties: + endpoint: + description: The endpoint of your Amazon Kendra experience. + type: string + endpointType: + description: The type of endpoint for your Amazon Kendra + experience. + type: string + type: object + type: array + experienceId: + description: The unique identifier of the experience. + type: string + id: + description: The unique identifiers of the experience and index + separated by a slash (/). + type: string + indexId: + description: The identifier of the index for your Amazon Kendra + experience. + type: string + name: + description: A name for your Amazon Kendra experience. + type: string + roleArn: + description: The Amazon Resource Name (ARN) of a role with permission + to access Query API, QuerySuggestions API, SubmitFeedback API, + and AWS SSO that stores your user and group information. For + more information, see IAM roles for Amazon Kendra. + type: string + status: + description: The current processing status of your Amazon Kendra + experience. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/kendra.aws.upbound.io_indices.yaml b/package/crds/kendra.aws.upbound.io_indices.yaml index 6197fb2b26..5200c450a2 100644 --- a/package/crds/kendra.aws.upbound.io_indices.yaml +++ b/package/crds/kendra.aws.upbound.io_indices.yaml @@ -1228,3 +1228,1150 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Index is the Schema for the Indexs API. Provides an Amazon Kendra + Index resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IndexSpec defines the desired state of Index + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + capacityUnits: + description: A block that sets the number of additional document + storage and query capacity units that should be used by the + index. Detailed below. + properties: + queryCapacityUnits: + description: The amount of extra query capacity for an index + and GetQuerySuggestions capacity. For more information, + refer to QueryCapacityUnits. + type: number + storageCapacityUnits: + description: The amount of extra storage capacity for an index. + A single capacity unit provides 30 GB of storage space or + 100,000 documents, whichever is reached first. Minimum value + of 0. + type: number + type: object + description: + description: The description of the Index. + type: string + documentMetadataConfigurationUpdates: + description: One or more blocks that specify the configuration + settings for any metadata applied to the documents in the index. + Minimum number of 0 items. Maximum number of 500 items. If specified, + you must define all elements, including those that are provided + by default. These index fields are documented at Amazon Kendra + Index documentation. For an example resource that defines these + default index fields, refer to the default example above. For + an example resource that appends additional index fields, refer + to the append example above. All arguments for each block must + be specified. Note that blocks cannot be removed since index + fields cannot be deleted. This argument is detailed below. + items: + properties: + name: + description: The name of the index field. Minimum length + of 1. Maximum length of 30. + type: string + relevance: + description: A block that provides manual tuning parameters + to determine how the field affects the search results. + Detailed below + properties: + duration: + description: Specifies the time period that the boost + applies to. For more information, refer to Duration. + type: string + freshness: + description: Indicates that this field determines how + "fresh" a document is. For more information, refer + to Freshness. + type: boolean + importance: + description: The relative importance of the field in + the search. Larger numbers provide more of a boost + than smaller numbers. Minimum value of 1. Maximum + value of 10. + type: number + rankOrder: + description: Determines how values should be interpreted. + For more information, refer to RankOrder. + type: string + valuesImportanceMap: + additionalProperties: + type: number + description: A list of values that should be given a + different boost when they appear in the result list. + For more information, refer to ValueImportanceMap. + type: object + x-kubernetes-map-type: granular + type: object + search: + description: A block that provides information about how + the field is used during a search. Documented below. Detailed + below + properties: + displayable: + description: Determines whether the field is returned + in the query response. The default is true. + type: boolean + facetable: + description: Indicates that the field can be used to + create search facets, a count of results for each + value in the field. The default is false. + type: boolean + searchable: + description: Determines whether the field is used in + the search. If the Searchable field is true, you can + use relevance tuning to manually tune how Amazon Kendra + weights the field in the search. The default is true + for string fields and false for number and date fields. + type: boolean + sortable: + description: Determines whether the field can be used + to sort the results of a query. If you specify sorting + on a field that does not have Sortable set to true, + Amazon Kendra returns an exception. The default is + false. + type: boolean + type: object + type: + description: The data type of the index field. Valid values + are STRING_VALUE, STRING_LIST_VALUE, LONG_VALUE, DATE_VALUE. + type: string + type: object + type: array + edition: + description: The Amazon Kendra edition to use for the index. Choose + DEVELOPER_EDITION for indexes intended for development, testing, + or proof of concept. Use ENTERPRISE_EDITION for your production + databases. Once you set the edition for an index, it can't be + changed. Defaults to ENTERPRISE_EDITION + type: string + name: + description: Specifies the name of the Index. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleArn: + description: An AWS Identity and Access Management (IAM) role + that gives Amazon Kendra permissions to access your Amazon CloudWatch + logs and metrics. This is also the role you use when you call + the BatchPutDocument API to index documents from an Amazon S3 + bucket. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serverSideEncryptionConfiguration: + description: A block that specifies the identifier of the AWS + KMS customer managed key (CMK) that's used to encrypt data indexed + by Amazon Kendra. Amazon Kendra doesn't support asymmetric CMKs. + Detailed below. + properties: + kmsKeyId: + description: The identifier of the AWS KMScustomer master + key (CMK). Amazon Kendra doesn't support asymmetric CMKs. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userContextPolicy: + description: The user context policy. Valid values are ATTRIBUTE_FILTER + or USER_TOKEN. For more information, refer to UserContextPolicy. + Defaults to ATTRIBUTE_FILTER. + type: string + userGroupResolutionConfiguration: + description: A block that enables fetching access levels of groups + and users from an AWS Single Sign-On identity source. To configure + this, see UserGroupResolutionConfiguration. Detailed below. + properties: + userGroupResolutionMode: + description: The identity store provider (mode) you want to + use to fetch access levels of groups and users. AWS Single + Sign-On is currently the only available mode. Your users + and groups must exist in an AWS SSO identity source in order + to use this mode. Valid Values are AWS_SSO or NONE. + type: string + type: object + userTokenConfigurations: + description: A block that specifies the user token configuration. + Detailed below. + properties: + jsonTokenTypeConfiguration: + description: A block that specifies the information about + the JSON token type configuration. Detailed below. + properties: + groupAttributeField: + description: The group attribute field. Minimum length + of 1. Maximum length of 2048. + type: string + userNameAttributeField: + description: The user name attribute field. Minimum length + of 1. Maximum length of 2048. + type: string + type: object + jwtTokenTypeConfiguration: + description: A block that specifies the information about + the JWT token type configuration. Detailed below. + properties: + claimRegex: + description: The regular expression that identifies the + claim. Minimum length of 1. Maximum length of 100. + type: string + groupAttributeField: + description: The group attribute field. Minimum length + of 1. Maximum length of 2048. + type: string + issuer: + description: The issuer of the token. Minimum length of + 1. Maximum length of 65. + type: string + keyLocation: + description: The location of the key. Valid values are + URL or SECRET_MANAGER + type: string + secretsManagerArn: + description: The Amazon Resource Name (ARN) of the secret. + type: string + url: + description: The signing key URL. Valid pattern is ^(https?|ftp|file):\/\/([^\s]*) + type: string + userNameAttributeField: + description: The user name attribute field. Minimum length + of 1. Maximum length of 2048. + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + capacityUnits: + description: A block that sets the number of additional document + storage and query capacity units that should be used by the + index. Detailed below. + properties: + queryCapacityUnits: + description: The amount of extra query capacity for an index + and GetQuerySuggestions capacity. For more information, + refer to QueryCapacityUnits. + type: number + storageCapacityUnits: + description: The amount of extra storage capacity for an index. + A single capacity unit provides 30 GB of storage space or + 100,000 documents, whichever is reached first. Minimum value + of 0. + type: number + type: object + description: + description: The description of the Index. + type: string + documentMetadataConfigurationUpdates: + description: One or more blocks that specify the configuration + settings for any metadata applied to the documents in the index. + Minimum number of 0 items. Maximum number of 500 items. If specified, + you must define all elements, including those that are provided + by default. These index fields are documented at Amazon Kendra + Index documentation. For an example resource that defines these + default index fields, refer to the default example above. For + an example resource that appends additional index fields, refer + to the append example above. All arguments for each block must + be specified. Note that blocks cannot be removed since index + fields cannot be deleted. This argument is detailed below. + items: + properties: + name: + description: The name of the index field. Minimum length + of 1. Maximum length of 30. + type: string + relevance: + description: A block that provides manual tuning parameters + to determine how the field affects the search results. + Detailed below + properties: + duration: + description: Specifies the time period that the boost + applies to. For more information, refer to Duration. + type: string + freshness: + description: Indicates that this field determines how + "fresh" a document is. For more information, refer + to Freshness. + type: boolean + importance: + description: The relative importance of the field in + the search. Larger numbers provide more of a boost + than smaller numbers. Minimum value of 1. Maximum + value of 10. + type: number + rankOrder: + description: Determines how values should be interpreted. + For more information, refer to RankOrder. + type: string + valuesImportanceMap: + additionalProperties: + type: number + description: A list of values that should be given a + different boost when they appear in the result list. + For more information, refer to ValueImportanceMap. + type: object + x-kubernetes-map-type: granular + type: object + search: + description: A block that provides information about how + the field is used during a search. Documented below. Detailed + below + properties: + displayable: + description: Determines whether the field is returned + in the query response. The default is true. + type: boolean + facetable: + description: Indicates that the field can be used to + create search facets, a count of results for each + value in the field. The default is false. + type: boolean + searchable: + description: Determines whether the field is used in + the search. If the Searchable field is true, you can + use relevance tuning to manually tune how Amazon Kendra + weights the field in the search. The default is true + for string fields and false for number and date fields. + type: boolean + sortable: + description: Determines whether the field can be used + to sort the results of a query. If you specify sorting + on a field that does not have Sortable set to true, + Amazon Kendra returns an exception. The default is + false. + type: boolean + type: object + type: + description: The data type of the index field. Valid values + are STRING_VALUE, STRING_LIST_VALUE, LONG_VALUE, DATE_VALUE. + type: string + type: object + type: array + edition: + description: The Amazon Kendra edition to use for the index. Choose + DEVELOPER_EDITION for indexes intended for development, testing, + or proof of concept. Use ENTERPRISE_EDITION for your production + databases. Once you set the edition for an index, it can't be + changed. Defaults to ENTERPRISE_EDITION + type: string + name: + description: Specifies the name of the Index. + type: string + roleArn: + description: An AWS Identity and Access Management (IAM) role + that gives Amazon Kendra permissions to access your Amazon CloudWatch + logs and metrics. This is also the role you use when you call + the BatchPutDocument API to index documents from an Amazon S3 + bucket. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serverSideEncryptionConfiguration: + description: A block that specifies the identifier of the AWS + KMS customer managed key (CMK) that's used to encrypt data indexed + by Amazon Kendra. Amazon Kendra doesn't support asymmetric CMKs. + Detailed below. + properties: + kmsKeyId: + description: The identifier of the AWS KMScustomer master + key (CMK). Amazon Kendra doesn't support asymmetric CMKs. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userContextPolicy: + description: The user context policy. Valid values are ATTRIBUTE_FILTER + or USER_TOKEN. For more information, refer to UserContextPolicy. + Defaults to ATTRIBUTE_FILTER. + type: string + userGroupResolutionConfiguration: + description: A block that enables fetching access levels of groups + and users from an AWS Single Sign-On identity source. To configure + this, see UserGroupResolutionConfiguration. Detailed below. + properties: + userGroupResolutionMode: + description: The identity store provider (mode) you want to + use to fetch access levels of groups and users. AWS Single + Sign-On is currently the only available mode. Your users + and groups must exist in an AWS SSO identity source in order + to use this mode. Valid Values are AWS_SSO or NONE. + type: string + type: object + userTokenConfigurations: + description: A block that specifies the user token configuration. + Detailed below. + properties: + jsonTokenTypeConfiguration: + description: A block that specifies the information about + the JSON token type configuration. Detailed below. + properties: + groupAttributeField: + description: The group attribute field. Minimum length + of 1. Maximum length of 2048. + type: string + userNameAttributeField: + description: The user name attribute field. Minimum length + of 1. Maximum length of 2048. + type: string + type: object + jwtTokenTypeConfiguration: + description: A block that specifies the information about + the JWT token type configuration. Detailed below. + properties: + claimRegex: + description: The regular expression that identifies the + claim. Minimum length of 1. Maximum length of 100. + type: string + groupAttributeField: + description: The group attribute field. Minimum length + of 1. Maximum length of 2048. + type: string + issuer: + description: The issuer of the token. Minimum length of + 1. Maximum length of 65. + type: string + keyLocation: + description: The location of the key. Valid values are + URL or SECRET_MANAGER + type: string + secretsManagerArn: + description: The Amazon Resource Name (ARN) of the secret. + type: string + url: + description: The signing key URL. Valid pattern is ^(https?|ftp|file):\/\/([^\s]*) + type: string + userNameAttributeField: + description: The user name attribute field. Minimum length + of 1. Maximum length of 2048. + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: IndexStatus defines the observed state of Index. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) of the Index. + type: string + capacityUnits: + description: A block that sets the number of additional document + storage and query capacity units that should be used by the + index. Detailed below. + properties: + queryCapacityUnits: + description: The amount of extra query capacity for an index + and GetQuerySuggestions capacity. For more information, + refer to QueryCapacityUnits. + type: number + storageCapacityUnits: + description: The amount of extra storage capacity for an index. + A single capacity unit provides 30 GB of storage space or + 100,000 documents, whichever is reached first. Minimum value + of 0. + type: number + type: object + createdAt: + description: The Unix datetime that the index was created. + type: string + description: + description: The description of the Index. + type: string + documentMetadataConfigurationUpdates: + description: One or more blocks that specify the configuration + settings for any metadata applied to the documents in the index. + Minimum number of 0 items. Maximum number of 500 items. If specified, + you must define all elements, including those that are provided + by default. These index fields are documented at Amazon Kendra + Index documentation. For an example resource that defines these + default index fields, refer to the default example above. For + an example resource that appends additional index fields, refer + to the append example above. All arguments for each block must + be specified. Note that blocks cannot be removed since index + fields cannot be deleted. This argument is detailed below. + items: + properties: + name: + description: The name of the index field. Minimum length + of 1. Maximum length of 30. + type: string + relevance: + description: A block that provides manual tuning parameters + to determine how the field affects the search results. + Detailed below + properties: + duration: + description: Specifies the time period that the boost + applies to. For more information, refer to Duration. + type: string + freshness: + description: Indicates that this field determines how + "fresh" a document is. For more information, refer + to Freshness. + type: boolean + importance: + description: The relative importance of the field in + the search. Larger numbers provide more of a boost + than smaller numbers. Minimum value of 1. Maximum + value of 10. + type: number + rankOrder: + description: Determines how values should be interpreted. + For more information, refer to RankOrder. + type: string + valuesImportanceMap: + additionalProperties: + type: number + description: A list of values that should be given a + different boost when they appear in the result list. + For more information, refer to ValueImportanceMap. + type: object + x-kubernetes-map-type: granular + type: object + search: + description: A block that provides information about how + the field is used during a search. Documented below. Detailed + below + properties: + displayable: + description: Determines whether the field is returned + in the query response. The default is true. + type: boolean + facetable: + description: Indicates that the field can be used to + create search facets, a count of results for each + value in the field. The default is false. + type: boolean + searchable: + description: Determines whether the field is used in + the search. If the Searchable field is true, you can + use relevance tuning to manually tune how Amazon Kendra + weights the field in the search. The default is true + for string fields and false for number and date fields. + type: boolean + sortable: + description: Determines whether the field can be used + to sort the results of a query. If you specify sorting + on a field that does not have Sortable set to true, + Amazon Kendra returns an exception. The default is + false. + type: boolean + type: object + type: + description: The data type of the index field. Valid values + are STRING_VALUE, STRING_LIST_VALUE, LONG_VALUE, DATE_VALUE. + type: string + type: object + type: array + edition: + description: The Amazon Kendra edition to use for the index. Choose + DEVELOPER_EDITION for indexes intended for development, testing, + or proof of concept. Use ENTERPRISE_EDITION for your production + databases. Once you set the edition for an index, it can't be + changed. Defaults to ENTERPRISE_EDITION + type: string + errorMessage: + description: When the Status field value is FAILED, this contains + a message that explains why. + type: string + id: + description: The identifier of the Index. + type: string + indexStatistics: + description: A block that provides information about the number + of FAQ questions and answers and the number of text documents + indexed. Detailed below. + items: + properties: + faqStatistics: + description: A block that specifies the number of question + and answer topics in the index. Detailed below. + items: + properties: + indexedQuestionAnswersCount: + description: The total number of FAQ questions and + answers contained in the index. + type: number + type: object + type: array + textDocumentStatistics: + description: A block that specifies the number of text documents + indexed. Detailed below. + items: + properties: + indexedTextBytes: + description: The total size, in bytes, of the indexed + documents. + type: number + indexedTextDocumentsCount: + description: The number of text documents indexed. + type: number + type: object + type: array + type: object + type: array + name: + description: Specifies the name of the Index. + type: string + roleArn: + description: An AWS Identity and Access Management (IAM) role + that gives Amazon Kendra permissions to access your Amazon CloudWatch + logs and metrics. This is also the role you use when you call + the BatchPutDocument API to index documents from an Amazon S3 + bucket. + type: string + serverSideEncryptionConfiguration: + description: A block that specifies the identifier of the AWS + KMS customer managed key (CMK) that's used to encrypt data indexed + by Amazon Kendra. Amazon Kendra doesn't support asymmetric CMKs. + Detailed below. + properties: + kmsKeyId: + description: The identifier of the AWS KMScustomer master + key (CMK). Amazon Kendra doesn't support asymmetric CMKs. + type: string + type: object + status: + description: The current status of the index. When the value is + ACTIVE, the index is ready for use. If the Status field value + is FAILED, the error_message field contains a message that explains + why. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + updatedAt: + description: The Unix datetime that the index was last updated. + type: string + userContextPolicy: + description: The user context policy. Valid values are ATTRIBUTE_FILTER + or USER_TOKEN. For more information, refer to UserContextPolicy. + Defaults to ATTRIBUTE_FILTER. + type: string + userGroupResolutionConfiguration: + description: A block that enables fetching access levels of groups + and users from an AWS Single Sign-On identity source. To configure + this, see UserGroupResolutionConfiguration. Detailed below. + properties: + userGroupResolutionMode: + description: The identity store provider (mode) you want to + use to fetch access levels of groups and users. AWS Single + Sign-On is currently the only available mode. Your users + and groups must exist in an AWS SSO identity source in order + to use this mode. Valid Values are AWS_SSO or NONE. + type: string + type: object + userTokenConfigurations: + description: A block that specifies the user token configuration. + Detailed below. + properties: + jsonTokenTypeConfiguration: + description: A block that specifies the information about + the JSON token type configuration. Detailed below. + properties: + groupAttributeField: + description: The group attribute field. Minimum length + of 1. Maximum length of 2048. + type: string + userNameAttributeField: + description: The user name attribute field. Minimum length + of 1. Maximum length of 2048. + type: string + type: object + jwtTokenTypeConfiguration: + description: A block that specifies the information about + the JWT token type configuration. Detailed below. + properties: + claimRegex: + description: The regular expression that identifies the + claim. Minimum length of 1. Maximum length of 100. + type: string + groupAttributeField: + description: The group attribute field. Minimum length + of 1. Maximum length of 2048. + type: string + issuer: + description: The issuer of the token. Minimum length of + 1. Maximum length of 65. + type: string + keyLocation: + description: The location of the key. Valid values are + URL or SECRET_MANAGER + type: string + secretsManagerArn: + description: The Amazon Resource Name (ARN) of the secret. + type: string + url: + description: The signing key URL. Valid pattern is ^(https?|ftp|file):\/\/([^\s]*) + type: string + userNameAttributeField: + description: The user name attribute field. Minimum length + of 1. Maximum length of 2048. + type: string + type: object + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/kendra.aws.upbound.io_querysuggestionsblocklists.yaml b/package/crds/kendra.aws.upbound.io_querysuggestionsblocklists.yaml index 0f5e164480..20791f947e 100644 --- a/package/crds/kendra.aws.upbound.io_querysuggestionsblocklists.yaml +++ b/package/crds/kendra.aws.upbound.io_querysuggestionsblocklists.yaml @@ -903,3 +903,882 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: QuerySuggestionsBlockList is the Schema for the QuerySuggestionsBlockLists + API. provider resource for managing an aws kendra block list used for query + suggestions for an index + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: QuerySuggestionsBlockListSpec defines the desired state of + QuerySuggestionsBlockList + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description for a block list. + type: string + indexId: + description: Identifier of the index for a block list. + type: string + indexIdRef: + description: Reference to a Index in kendra to populate indexId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + indexIdSelector: + description: Selector for a Index in kendra to populate indexId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Name for the block list. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleArn: + description: IAM (Identity and Access Management) role used to + access the block list text file in S3. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sourceS3Path: + description: S3 path where your block list text file is located. + See details below. + properties: + bucket: + description: Name of the S3 bucket that contains the file. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + key: + description: Name of the file. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. If configured with + a provider default_tags configuration block, tags with matching + keys will overwrite those defined at the provider-level. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description for a block list. + type: string + indexId: + description: Identifier of the index for a block list. + type: string + indexIdRef: + description: Reference to a Index in kendra to populate indexId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + indexIdSelector: + description: Selector for a Index in kendra to populate indexId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Name for the block list. + type: string + roleArn: + description: IAM (Identity and Access Management) role used to + access the block list text file in S3. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sourceS3Path: + description: S3 path where your block list text file is located. + See details below. + properties: + bucket: + description: Name of the S3 bucket that contains the file. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + key: + description: Name of the file. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. If configured with + a provider default_tags configuration block, tags with matching + keys will overwrite those defined at the provider-level. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.sourceS3Path is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sourceS3Path) + || (has(self.initProvider) && has(self.initProvider.sourceS3Path))' + status: + description: QuerySuggestionsBlockListStatus defines the observed state + of QuerySuggestionsBlockList. + properties: + atProvider: + properties: + arn: + description: ARN of the block list. + type: string + description: + description: Description for a block list. + type: string + id: + type: string + indexId: + description: Identifier of the index for a block list. + type: string + name: + description: Name for the block list. + type: string + querySuggestionsBlockListId: + description: Unique identifier of the block list. + type: string + roleArn: + description: IAM (Identity and Access Management) role used to + access the block list text file in S3. + type: string + sourceS3Path: + description: S3 path where your block list text file is located. + See details below. + properties: + bucket: + description: Name of the S3 bucket that contains the file. + type: string + key: + description: Name of the file. + type: string + type: object + status: + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. If configured with + a provider default_tags configuration block, tags with matching + keys will overwrite those defined at the provider-level. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider's default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/kendra.aws.upbound.io_thesaurus.yaml b/package/crds/kendra.aws.upbound.io_thesaurus.yaml index 31bafe874f..8d023af906 100644 --- a/package/crds/kendra.aws.upbound.io_thesaurus.yaml +++ b/package/crds/kendra.aws.upbound.io_thesaurus.yaml @@ -1055,3 +1055,1031 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Thesaurus is the Schema for the Thesauruss API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ThesaurusSpec defines the desired state of Thesaurus + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: The description for a thesaurus. + type: string + indexId: + description: The identifier of the index for a thesaurus. + type: string + indexIdRef: + description: Reference to a Index in kendra to populate indexId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + indexIdSelector: + description: Selector for a Index in kendra to populate indexId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name for the thesaurus. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleArn: + description: The IAM (Identity and Access Management) role used + to access the thesaurus file in S3. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sourceS3Path: + description: The S3 path where your thesaurus file sits in S3. + Detailed below. + properties: + bucket: + description: The name of the S3 bucket that contains the file. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + key: + description: The name of the file. + type: string + keyRef: + description: Reference to a Object in s3 to populate key. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keySelector: + description: Selector for a Object in s3 to populate key. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. If configured with + a provider default_tags configuration block present, tags with + matching keys will overwrite those defined at the provider-level. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: The description for a thesaurus. + type: string + indexId: + description: The identifier of the index for a thesaurus. + type: string + indexIdRef: + description: Reference to a Index in kendra to populate indexId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + indexIdSelector: + description: Selector for a Index in kendra to populate indexId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name for the thesaurus. + type: string + roleArn: + description: The IAM (Identity and Access Management) role used + to access the thesaurus file in S3. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sourceS3Path: + description: The S3 path where your thesaurus file sits in S3. + Detailed below. + properties: + bucket: + description: The name of the S3 bucket that contains the file. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + key: + description: The name of the file. + type: string + keyRef: + description: Reference to a Object in s3 to populate key. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keySelector: + description: Selector for a Object in s3 to populate key. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. If configured with + a provider default_tags configuration block present, tags with + matching keys will overwrite those defined at the provider-level. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.sourceS3Path is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sourceS3Path) + || (has(self.initProvider) && has(self.initProvider.sourceS3Path))' + status: + description: ThesaurusStatus defines the observed state of Thesaurus. + properties: + atProvider: + properties: + arn: + description: ARN of the thesaurus. + type: string + description: + description: The description for a thesaurus. + type: string + id: + description: The unique identifiers of the thesaurus and index + separated by a slash (/). + type: string + indexId: + description: The identifier of the index for a thesaurus. + type: string + name: + description: The name for the thesaurus. + type: string + roleArn: + description: The IAM (Identity and Access Management) role used + to access the thesaurus file in S3. + type: string + sourceS3Path: + description: The S3 path where your thesaurus file sits in S3. + Detailed below. + properties: + bucket: + description: The name of the S3 bucket that contains the file. + type: string + key: + description: The name of the file. + type: string + type: object + status: + description: The current status of the thesaurus. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. If configured with + a provider default_tags configuration block present, tags with + matching keys will overwrite those defined at the provider-level. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + thesaurusId: + description: The unique identifiers of the thesaurus and index + separated by a slash (/). + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/keyspaces.aws.upbound.io_tables.yaml b/package/crds/keyspaces.aws.upbound.io_tables.yaml index 45c83f8fd1..e3736ec4d3 100644 --- a/package/crds/keyspaces.aws.upbound.io_tables.yaml +++ b/package/crds/keyspaces.aws.upbound.io_tables.yaml @@ -958,3 +958,901 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Table is the Schema for the Tables API. Provides a Keyspaces + Table. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TableSpec defines the desired state of Table + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + capacitySpecification: + description: Specifies the read/write throughput capacity mode + for the table. + properties: + readCapacityUnits: + description: The throughput capacity specified for read operations + defined in read capacity units (RCUs). + type: number + throughputMode: + description: 'The read/write throughput capacity mode for + a table. Valid values: PAY_PER_REQUEST, PROVISIONED. The + default value is PAY_PER_REQUEST.' + type: string + writeCapacityUnits: + description: The throughput capacity specified for write operations + defined in write capacity units (WCUs). + type: number + type: object + clientSideTimestamps: + description: Enables client-side timestamps for the table. By + default, the setting is disabled. + properties: + status: + description: 'Shows how to enable client-side timestamps settings + for the specified table. Valid values: ENABLED.' + type: string + type: object + comment: + description: A description of the table. + properties: + message: + description: A description of the table. + type: string + type: object + defaultTimeToLive: + description: The default Time to Live setting in seconds for the + table. More information can be found in the Developer Guide. + type: number + encryptionSpecification: + description: Specifies how the encryption key for encryption at + rest is managed for the table. More information can be found + in the Developer Guide. + properties: + kmsKeyIdentifier: + description: The Amazon Resource Name (ARN) of the customer + managed KMS key. + type: string + type: + description: 'The encryption option specified for the table. + Valid values: AWS_OWNED_KMS_KEY, CUSTOMER_MANAGED_KMS_KEY. + The default value is AWS_OWNED_KMS_KEY.' + type: string + type: object + keyspaceName: + description: The name of the keyspace that the table is going + to be created in. + type: string + keyspaceNameRef: + description: Reference to a Keyspace in keyspaces to populate + keyspaceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyspaceNameSelector: + description: Selector for a Keyspace in keyspaces to populate + keyspaceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + pointInTimeRecovery: + description: Specifies if point-in-time recovery is enabled or + disabled for the table. More information can be found in the + Developer Guide. + properties: + status: + description: 'Shows how to enable client-side timestamps settings + for the specified table. Valid values: ENABLED.' + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + schemaDefinition: + description: Describes the schema of the table. + properties: + clusteringKey: + description: The columns that are part of the clustering key + of the table. + items: + properties: + name: + description: The name of the column. + type: string + orderBy: + description: 'The order modifier. Valid values: ASC, + DESC.' + type: string + type: object + type: array + column: + description: The regular columns of the table. + items: + properties: + name: + description: The name of the column. + type: string + type: + description: 'The encryption option specified for the + table. Valid values: AWS_OWNED_KMS_KEY, CUSTOMER_MANAGED_KMS_KEY. + The default value is AWS_OWNED_KMS_KEY.' + type: string + type: object + type: array + partitionKey: + description: The columns that are part of the partition key + of the table . + items: + properties: + name: + description: The name of the column. + type: string + type: object + type: array + staticColumn: + description: The columns that have been defined as STATIC. + Static columns store values that are shared by all rows + in the same partition. + items: + properties: + name: + description: The name of the column. + type: string + type: object + type: array + type: object + tableName: + description: The name of the table. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + ttl: + description: Enables Time to Live custom settings for the table. + More information can be found in the Developer Guide. + properties: + status: + description: 'Shows how to enable client-side timestamps settings + for the specified table. Valid values: ENABLED.' + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + capacitySpecification: + description: Specifies the read/write throughput capacity mode + for the table. + properties: + readCapacityUnits: + description: The throughput capacity specified for read operations + defined in read capacity units (RCUs). + type: number + throughputMode: + description: 'The read/write throughput capacity mode for + a table. Valid values: PAY_PER_REQUEST, PROVISIONED. The + default value is PAY_PER_REQUEST.' + type: string + writeCapacityUnits: + description: The throughput capacity specified for write operations + defined in write capacity units (WCUs). + type: number + type: object + clientSideTimestamps: + description: Enables client-side timestamps for the table. By + default, the setting is disabled. + properties: + status: + description: 'Shows how to enable client-side timestamps settings + for the specified table. Valid values: ENABLED.' + type: string + type: object + comment: + description: A description of the table. + properties: + message: + description: A description of the table. + type: string + type: object + defaultTimeToLive: + description: The default Time to Live setting in seconds for the + table. More information can be found in the Developer Guide. + type: number + encryptionSpecification: + description: Specifies how the encryption key for encryption at + rest is managed for the table. More information can be found + in the Developer Guide. + properties: + kmsKeyIdentifier: + description: The Amazon Resource Name (ARN) of the customer + managed KMS key. + type: string + type: + description: 'The encryption option specified for the table. + Valid values: AWS_OWNED_KMS_KEY, CUSTOMER_MANAGED_KMS_KEY. + The default value is AWS_OWNED_KMS_KEY.' + type: string + type: object + keyspaceName: + description: The name of the keyspace that the table is going + to be created in. + type: string + keyspaceNameRef: + description: Reference to a Keyspace in keyspaces to populate + keyspaceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyspaceNameSelector: + description: Selector for a Keyspace in keyspaces to populate + keyspaceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + pointInTimeRecovery: + description: Specifies if point-in-time recovery is enabled or + disabled for the table. More information can be found in the + Developer Guide. + properties: + status: + description: 'Shows how to enable client-side timestamps settings + for the specified table. Valid values: ENABLED.' + type: string + type: object + schemaDefinition: + description: Describes the schema of the table. + properties: + clusteringKey: + description: The columns that are part of the clustering key + of the table. + items: + properties: + name: + description: The name of the column. + type: string + orderBy: + description: 'The order modifier. Valid values: ASC, + DESC.' + type: string + type: object + type: array + column: + description: The regular columns of the table. + items: + properties: + name: + description: The name of the column. + type: string + type: + description: 'The encryption option specified for the + table. Valid values: AWS_OWNED_KMS_KEY, CUSTOMER_MANAGED_KMS_KEY. + The default value is AWS_OWNED_KMS_KEY.' + type: string + type: object + type: array + partitionKey: + description: The columns that are part of the partition key + of the table . + items: + properties: + name: + description: The name of the column. + type: string + type: object + type: array + staticColumn: + description: The columns that have been defined as STATIC. + Static columns store values that are shared by all rows + in the same partition. + items: + properties: + name: + description: The name of the column. + type: string + type: object + type: array + type: object + tableName: + description: The name of the table. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + ttl: + description: Enables Time to Live custom settings for the table. + More information can be found in the Developer Guide. + properties: + status: + description: 'Shows how to enable client-side timestamps settings + for the specified table. Valid values: ENABLED.' + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.schemaDefinition is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.schemaDefinition) + || (has(self.initProvider) && has(self.initProvider.schemaDefinition))' + - message: spec.forProvider.tableName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.tableName) + || (has(self.initProvider) && has(self.initProvider.tableName))' + status: + description: TableStatus defines the observed state of Table. + properties: + atProvider: + properties: + arn: + description: The ARN of the table. + type: string + capacitySpecification: + description: Specifies the read/write throughput capacity mode + for the table. + properties: + readCapacityUnits: + description: The throughput capacity specified for read operations + defined in read capacity units (RCUs). + type: number + throughputMode: + description: 'The read/write throughput capacity mode for + a table. Valid values: PAY_PER_REQUEST, PROVISIONED. The + default value is PAY_PER_REQUEST.' + type: string + writeCapacityUnits: + description: The throughput capacity specified for write operations + defined in write capacity units (WCUs). + type: number + type: object + clientSideTimestamps: + description: Enables client-side timestamps for the table. By + default, the setting is disabled. + properties: + status: + description: 'Shows how to enable client-side timestamps settings + for the specified table. Valid values: ENABLED.' + type: string + type: object + comment: + description: A description of the table. + properties: + message: + description: A description of the table. + type: string + type: object + defaultTimeToLive: + description: The default Time to Live setting in seconds for the + table. More information can be found in the Developer Guide. + type: number + encryptionSpecification: + description: Specifies how the encryption key for encryption at + rest is managed for the table. More information can be found + in the Developer Guide. + properties: + kmsKeyIdentifier: + description: The Amazon Resource Name (ARN) of the customer + managed KMS key. + type: string + type: + description: 'The encryption option specified for the table. + Valid values: AWS_OWNED_KMS_KEY, CUSTOMER_MANAGED_KMS_KEY. + The default value is AWS_OWNED_KMS_KEY.' + type: string + type: object + id: + type: string + keyspaceName: + description: The name of the keyspace that the table is going + to be created in. + type: string + pointInTimeRecovery: + description: Specifies if point-in-time recovery is enabled or + disabled for the table. More information can be found in the + Developer Guide. + properties: + status: + description: 'Shows how to enable client-side timestamps settings + for the specified table. Valid values: ENABLED.' + type: string + type: object + schemaDefinition: + description: Describes the schema of the table. + properties: + clusteringKey: + description: The columns that are part of the clustering key + of the table. + items: + properties: + name: + description: The name of the column. + type: string + orderBy: + description: 'The order modifier. Valid values: ASC, + DESC.' + type: string + type: object + type: array + column: + description: The regular columns of the table. + items: + properties: + name: + description: The name of the column. + type: string + type: + description: 'The encryption option specified for the + table. Valid values: AWS_OWNED_KMS_KEY, CUSTOMER_MANAGED_KMS_KEY. + The default value is AWS_OWNED_KMS_KEY.' + type: string + type: object + type: array + partitionKey: + description: The columns that are part of the partition key + of the table . + items: + properties: + name: + description: The name of the column. + type: string + type: object + type: array + staticColumn: + description: The columns that have been defined as STATIC. + Static columns store values that are shared by all rows + in the same partition. + items: + properties: + name: + description: The name of the column. + type: string + type: object + type: array + type: object + tableName: + description: The name of the table. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + ttl: + description: Enables Time to Live custom settings for the table. + More information can be found in the Developer Guide. + properties: + status: + description: 'Shows how to enable client-side timestamps settings + for the specified table. Valid values: ENABLED.' + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/kinesis.aws.upbound.io_streams.yaml b/package/crds/kinesis.aws.upbound.io_streams.yaml index 5e36fc6086..a3aa4e4637 100644 --- a/package/crds/kinesis.aws.upbound.io_streams.yaml +++ b/package/crds/kinesis.aws.upbound.io_streams.yaml @@ -644,3 +644,623 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Stream is the Schema for the Streams API. Provides a AWS Kinesis + Stream + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StreamSpec defines the desired state of Stream + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + encryptionType: + description: The encryption type to use. The only acceptable values + are NONE or KMS. The default value is NONE. + type: string + enforceConsumerDeletion: + description: A boolean that indicates all registered consumers + should be deregistered from the stream so that the stream can + be destroyed without error. The default value is false. + type: boolean + kmsKeyId: + description: The GUID for the customer-managed KMS key to use + for encryption. You can also use a Kinesis-owned master key + by specifying the alias alias/aws/kinesis. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + retentionPeriod: + description: Length of time data records are accessible after + they are added to the stream. The maximum value of a stream's + retention period is 8760 hours. Minimum value is 24. Default + is 24. + type: number + shardCount: + description: |- + – The number of shards that the stream will use. If the stream_mode is PROVISIONED, this field is required. + Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See Amazon Kinesis Streams for more. + type: number + shardLevelMetrics: + description: A list of shard-level CloudWatch metrics which can + be enabled for the stream. See Monitoring with CloudWatch for + more. Note that the value ALL should not be used; instead you + should provide an explicit list of metrics you wish to enable. + items: + type: string + type: array + x-kubernetes-list-type: set + streamModeDetails: + description: Indicates the capacity mode of the data stream. Detailed + below. + properties: + streamMode: + description: Specifies the capacity mode of the stream. Must + be either PROVISIONED or ON_DEMAND. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + encryptionType: + description: The encryption type to use. The only acceptable values + are NONE or KMS. The default value is NONE. + type: string + enforceConsumerDeletion: + description: A boolean that indicates all registered consumers + should be deregistered from the stream so that the stream can + be destroyed without error. The default value is false. + type: boolean + kmsKeyId: + description: The GUID for the customer-managed KMS key to use + for encryption. You can also use a Kinesis-owned master key + by specifying the alias alias/aws/kinesis. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + retentionPeriod: + description: Length of time data records are accessible after + they are added to the stream. The maximum value of a stream's + retention period is 8760 hours. Minimum value is 24. Default + is 24. + type: number + shardCount: + description: |- + – The number of shards that the stream will use. If the stream_mode is PROVISIONED, this field is required. + Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See Amazon Kinesis Streams for more. + type: number + shardLevelMetrics: + description: A list of shard-level CloudWatch metrics which can + be enabled for the stream. See Monitoring with CloudWatch for + more. Note that the value ALL should not be used; instead you + should provide an explicit list of metrics you wish to enable. + items: + type: string + type: array + x-kubernetes-list-type: set + streamModeDetails: + description: Indicates the capacity mode of the data stream. Detailed + below. + properties: + streamMode: + description: Specifies the capacity mode of the stream. Must + be either PROVISIONED or ON_DEMAND. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: StreamStatus defines the observed state of Stream. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) specifying the Stream + (same as id) + type: string + encryptionType: + description: The encryption type to use. The only acceptable values + are NONE or KMS. The default value is NONE. + type: string + enforceConsumerDeletion: + description: A boolean that indicates all registered consumers + should be deregistered from the stream so that the stream can + be destroyed without error. The default value is false. + type: boolean + id: + description: The unique Stream id + type: string + kmsKeyId: + description: The GUID for the customer-managed KMS key to use + for encryption. You can also use a Kinesis-owned master key + by specifying the alias alias/aws/kinesis. + type: string + retentionPeriod: + description: Length of time data records are accessible after + they are added to the stream. The maximum value of a stream's + retention period is 8760 hours. Minimum value is 24. Default + is 24. + type: number + shardCount: + description: |- + – The number of shards that the stream will use. If the stream_mode is PROVISIONED, this field is required. + Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See Amazon Kinesis Streams for more. + type: number + shardLevelMetrics: + description: A list of shard-level CloudWatch metrics which can + be enabled for the stream. See Monitoring with CloudWatch for + more. Note that the value ALL should not be used; instead you + should provide an explicit list of metrics you wish to enable. + items: + type: string + type: array + x-kubernetes-list-type: set + streamModeDetails: + description: Indicates the capacity mode of the data stream. Detailed + below. + properties: + streamMode: + description: Specifies the capacity mode of the stream. Must + be either PROVISIONED or ON_DEMAND. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/kinesisanalytics.aws.upbound.io_applications.yaml b/package/crds/kinesisanalytics.aws.upbound.io_applications.yaml index 4ae96b27d4..aa1d5edcc0 100644 --- a/package/crds/kinesisanalytics.aws.upbound.io_applications.yaml +++ b/package/crds/kinesisanalytics.aws.upbound.io_applications.yaml @@ -2327,3 +2327,2161 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Application is the Schema for the Applications API. Provides + a AWS Kinesis Analytics Application + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ApplicationSpec defines the desired state of Application + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cloudwatchLoggingOptions: + description: |- + The CloudWatch log stream options to monitor application errors. + See CloudWatch Logging Options below for more details. + properties: + logStreamArn: + description: The ARN of the CloudWatch Log Stream. + type: string + logStreamArnRef: + description: Reference to a Stream in cloudwatchlogs to populate + logStreamArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logStreamArnSelector: + description: Selector for a Stream in cloudwatchlogs to populate + logStreamArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + roleArn: + description: The ARN of the IAM Role used to send application + messages. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + code: + description: SQL Code to transform input data, and generate output. + type: string + description: + description: Description of the application. + type: string + inputs: + description: Input configuration of the application. See Inputs + below for more details. + properties: + kinesisFirehose: + description: |- + The Kinesis Firehose configuration for the streaming source. Conflicts with kinesis_stream. + See Kinesis Firehose below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + roleArn: + description: The IAM Role ARN to read the data. + type: string + type: object + kinesisStream: + description: |- + The Kinesis Stream configuration for the streaming source. Conflicts with kinesis_firehose. + See Kinesis Stream below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + resourceArnRef: + description: Reference to a Stream in kinesis to populate + resourceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceArnSelector: + description: Selector for a Stream in kinesis to populate + resourceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + roleArn: + description: The IAM Role ARN to read the data. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + namePrefix: + description: The Name Prefix to use when creating an in-application + stream. + type: string + parallelism: + description: |- + The number of Parallel in-application streams to create. + See Parallelism below for more details. + properties: + count: + description: The Count of streams. + type: number + type: object + processingConfiguration: + description: |- + The Processing Configuration to transform records as they are received from the stream. + See Processing Configuration below for more details. + properties: + lambda: + description: The Lambda function configuration. See Lambda + below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + roleArn: + description: The IAM Role ARN to read the data. + type: string + type: object + type: object + schema: + description: The Schema format of the data in the streaming + source. See Source Schema below for more details. + properties: + recordColumns: + description: |- + The Record Column mapping for the streaming source data element. + See Record Columns below for more details. + items: + properties: + mapping: + description: The Mapping reference to the data element. + type: string + name: + description: Name of the column. + type: string + sqlType: + description: The SQL Type of the column. + type: string + type: object + type: array + recordEncoding: + description: The Encoding of the record in the streaming + source. + type: string + recordFormat: + description: |- + The Record Format and mapping information to schematize a record. + See Record Format below for more details. + properties: + mappingParameters: + description: |- + The Mapping Information for the record format. + See Mapping Parameters below for more details. + properties: + csv: + description: |- + Mapping information when the record format uses delimiters. + See CSV Mapping Parameters below for more details. + properties: + recordColumnDelimiter: + description: The Column Delimiter. + type: string + recordRowDelimiter: + description: The Row Delimiter. + type: string + type: object + json: + description: |- + Mapping information when JSON is the record format on the streaming source. + See JSON Mapping Parameters below for more details. + properties: + recordRowPath: + description: Path to the top-level parent + that contains the records. + type: string + type: object + type: object + type: object + type: object + startingPositionConfiguration: + description: |- + The point at which the application starts processing records from the streaming source. + See Starting Position Configuration below for more details. + items: + properties: + startingPosition: + description: 'The starting position on the stream. Valid + values: LAST_STOPPED_POINT, NOW, TRIM_HORIZON.' + type: string + type: object + type: array + type: object + outputs: + description: Output destination configuration of the application. + See Outputs below for more details. + items: + properties: + kinesisFirehose: + description: |- + The Kinesis Firehose configuration for the destination stream. Conflicts with kinesis_stream. + See Kinesis Firehose below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + resourceArnRef: + description: Reference to a DeliveryStream in firehose + to populate resourceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceArnSelector: + description: Selector for a DeliveryStream in firehose + to populate resourceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + roleArn: + description: The IAM Role ARN to read the data. + type: string + roleArnRef: + description: Reference to a Role in iam to populate + roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate + roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + kinesisStream: + description: |- + The Kinesis Stream configuration for the destination stream. Conflicts with kinesis_firehose. + See Kinesis Stream below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + roleArn: + description: The IAM Role ARN to read the data. + type: string + type: object + lambda: + description: The Lambda function destination. See Lambda + below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + roleArn: + description: The IAM Role ARN to read the data. + type: string + type: object + name: + description: The Name of the in-application stream. + type: string + schema: + description: The Schema format of the data written to the + destination. See Destination Schema below for more details. + properties: + recordFormatType: + description: The Format Type of the records on the output + stream. Can be CSV or JSON. + type: string + type: object + type: object + type: array + referenceDataSources: + description: |- + An S3 Reference Data Source for the application. + See Reference Data Sources below for more details. + properties: + s3: + description: The S3 configuration for the reference data source. + See S3 Reference below for more details. + properties: + bucketArn: + description: The S3 Bucket ARN. + type: string + fileKey: + description: The File Key name containing reference data. + type: string + roleArn: + description: The IAM Role ARN to read the data. + type: string + type: object + schema: + description: The Schema format of the data in the streaming + source. See Source Schema below for more details. + properties: + recordColumns: + description: |- + The Record Column mapping for the streaming source data element. + See Record Columns below for more details. + items: + properties: + mapping: + description: The Mapping reference to the data element. + type: string + name: + description: Name of the column. + type: string + sqlType: + description: The SQL Type of the column. + type: string + type: object + type: array + recordEncoding: + description: The Encoding of the record in the streaming + source. + type: string + recordFormat: + description: |- + The Record Format and mapping information to schematize a record. + See Record Format below for more details. + properties: + mappingParameters: + description: |- + The Mapping Information for the record format. + See Mapping Parameters below for more details. + properties: + csv: + description: |- + Mapping information when the record format uses delimiters. + See CSV Mapping Parameters below for more details. + properties: + recordColumnDelimiter: + description: The Column Delimiter. + type: string + recordRowDelimiter: + description: The Row Delimiter. + type: string + type: object + json: + description: |- + Mapping information when JSON is the record format on the streaming source. + See JSON Mapping Parameters below for more details. + properties: + recordRowPath: + description: Path to the top-level parent + that contains the records. + type: string + type: object + type: object + type: object + type: object + tableName: + description: The in-application Table Name. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + startApplication: + description: |- + Whether to start or stop the Kinesis Analytics Application. To start an application, an input with a defined starting_position must be configured. + To modify an application's starting position, first stop the application by setting start_application = false, then update starting_position and set start_application = true. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + cloudwatchLoggingOptions: + description: |- + The CloudWatch log stream options to monitor application errors. + See CloudWatch Logging Options below for more details. + properties: + logStreamArn: + description: The ARN of the CloudWatch Log Stream. + type: string + logStreamArnRef: + description: Reference to a Stream in cloudwatchlogs to populate + logStreamArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logStreamArnSelector: + description: Selector for a Stream in cloudwatchlogs to populate + logStreamArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + roleArn: + description: The ARN of the IAM Role used to send application + messages. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + code: + description: SQL Code to transform input data, and generate output. + type: string + description: + description: Description of the application. + type: string + inputs: + description: Input configuration of the application. See Inputs + below for more details. + properties: + kinesisFirehose: + description: |- + The Kinesis Firehose configuration for the streaming source. Conflicts with kinesis_stream. + See Kinesis Firehose below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + roleArn: + description: The IAM Role ARN to read the data. + type: string + type: object + kinesisStream: + description: |- + The Kinesis Stream configuration for the streaming source. Conflicts with kinesis_firehose. + See Kinesis Stream below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + resourceArnRef: + description: Reference to a Stream in kinesis to populate + resourceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceArnSelector: + description: Selector for a Stream in kinesis to populate + resourceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + roleArn: + description: The IAM Role ARN to read the data. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + namePrefix: + description: The Name Prefix to use when creating an in-application + stream. + type: string + parallelism: + description: |- + The number of Parallel in-application streams to create. + See Parallelism below for more details. + properties: + count: + description: The Count of streams. + type: number + type: object + processingConfiguration: + description: |- + The Processing Configuration to transform records as they are received from the stream. + See Processing Configuration below for more details. + properties: + lambda: + description: The Lambda function configuration. See Lambda + below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + roleArn: + description: The IAM Role ARN to read the data. + type: string + type: object + type: object + schema: + description: The Schema format of the data in the streaming + source. See Source Schema below for more details. + properties: + recordColumns: + description: |- + The Record Column mapping for the streaming source data element. + See Record Columns below for more details. + items: + properties: + mapping: + description: The Mapping reference to the data element. + type: string + name: + description: Name of the column. + type: string + sqlType: + description: The SQL Type of the column. + type: string + type: object + type: array + recordEncoding: + description: The Encoding of the record in the streaming + source. + type: string + recordFormat: + description: |- + The Record Format and mapping information to schematize a record. + See Record Format below for more details. + properties: + mappingParameters: + description: |- + The Mapping Information for the record format. + See Mapping Parameters below for more details. + properties: + csv: + description: |- + Mapping information when the record format uses delimiters. + See CSV Mapping Parameters below for more details. + properties: + recordColumnDelimiter: + description: The Column Delimiter. + type: string + recordRowDelimiter: + description: The Row Delimiter. + type: string + type: object + json: + description: |- + Mapping information when JSON is the record format on the streaming source. + See JSON Mapping Parameters below for more details. + properties: + recordRowPath: + description: Path to the top-level parent + that contains the records. + type: string + type: object + type: object + type: object + type: object + startingPositionConfiguration: + description: |- + The point at which the application starts processing records from the streaming source. + See Starting Position Configuration below for more details. + items: + properties: + startingPosition: + description: 'The starting position on the stream. Valid + values: LAST_STOPPED_POINT, NOW, TRIM_HORIZON.' + type: string + type: object + type: array + type: object + outputs: + description: Output destination configuration of the application. + See Outputs below for more details. + items: + properties: + kinesisFirehose: + description: |- + The Kinesis Firehose configuration for the destination stream. Conflicts with kinesis_stream. + See Kinesis Firehose below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + resourceArnRef: + description: Reference to a DeliveryStream in firehose + to populate resourceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceArnSelector: + description: Selector for a DeliveryStream in firehose + to populate resourceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + roleArn: + description: The IAM Role ARN to read the data. + type: string + roleArnRef: + description: Reference to a Role in iam to populate + roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate + roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + kinesisStream: + description: |- + The Kinesis Stream configuration for the destination stream. Conflicts with kinesis_firehose. + See Kinesis Stream below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + roleArn: + description: The IAM Role ARN to read the data. + type: string + type: object + lambda: + description: The Lambda function destination. See Lambda + below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + roleArn: + description: The IAM Role ARN to read the data. + type: string + type: object + name: + description: The Name of the in-application stream. + type: string + schema: + description: The Schema format of the data written to the + destination. See Destination Schema below for more details. + properties: + recordFormatType: + description: The Format Type of the records on the output + stream. Can be CSV or JSON. + type: string + type: object + type: object + type: array + referenceDataSources: + description: |- + An S3 Reference Data Source for the application. + See Reference Data Sources below for more details. + properties: + s3: + description: The S3 configuration for the reference data source. + See S3 Reference below for more details. + properties: + bucketArn: + description: The S3 Bucket ARN. + type: string + fileKey: + description: The File Key name containing reference data. + type: string + roleArn: + description: The IAM Role ARN to read the data. + type: string + type: object + schema: + description: The Schema format of the data in the streaming + source. See Source Schema below for more details. + properties: + recordColumns: + description: |- + The Record Column mapping for the streaming source data element. + See Record Columns below for more details. + items: + properties: + mapping: + description: The Mapping reference to the data element. + type: string + name: + description: Name of the column. + type: string + sqlType: + description: The SQL Type of the column. + type: string + type: object + type: array + recordEncoding: + description: The Encoding of the record in the streaming + source. + type: string + recordFormat: + description: |- + The Record Format and mapping information to schematize a record. + See Record Format below for more details. + properties: + mappingParameters: + description: |- + The Mapping Information for the record format. + See Mapping Parameters below for more details. + properties: + csv: + description: |- + Mapping information when the record format uses delimiters. + See CSV Mapping Parameters below for more details. + properties: + recordColumnDelimiter: + description: The Column Delimiter. + type: string + recordRowDelimiter: + description: The Row Delimiter. + type: string + type: object + json: + description: |- + Mapping information when JSON is the record format on the streaming source. + See JSON Mapping Parameters below for more details. + properties: + recordRowPath: + description: Path to the top-level parent + that contains the records. + type: string + type: object + type: object + type: object + type: object + tableName: + description: The in-application Table Name. + type: string + type: object + startApplication: + description: |- + Whether to start or stop the Kinesis Analytics Application. To start an application, an input with a defined starting_position must be configured. + To modify an application's starting position, first stop the application by setting start_application = false, then update starting_position and set start_application = true. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ApplicationStatus defines the observed state of Application. + properties: + atProvider: + properties: + arn: + description: The ARN of the Kinesis Analytics Appliation. + type: string + cloudwatchLoggingOptions: + description: |- + The CloudWatch log stream options to monitor application errors. + See CloudWatch Logging Options below for more details. + properties: + id: + description: The ARN of the Kinesis Analytics Application. + type: string + logStreamArn: + description: The ARN of the CloudWatch Log Stream. + type: string + roleArn: + description: The ARN of the IAM Role used to send application + messages. + type: string + type: object + code: + description: SQL Code to transform input data, and generate output. + type: string + createTimestamp: + description: The Timestamp when the application version was created. + type: string + description: + description: Description of the application. + type: string + id: + description: The ARN of the Kinesis Analytics Application. + type: string + inputs: + description: Input configuration of the application. See Inputs + below for more details. + properties: + id: + description: The ARN of the Kinesis Analytics Application. + type: string + kinesisFirehose: + description: |- + The Kinesis Firehose configuration for the streaming source. Conflicts with kinesis_stream. + See Kinesis Firehose below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + roleArn: + description: The IAM Role ARN to read the data. + type: string + type: object + kinesisStream: + description: |- + The Kinesis Stream configuration for the streaming source. Conflicts with kinesis_firehose. + See Kinesis Stream below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + roleArn: + description: The IAM Role ARN to read the data. + type: string + type: object + namePrefix: + description: The Name Prefix to use when creating an in-application + stream. + type: string + parallelism: + description: |- + The number of Parallel in-application streams to create. + See Parallelism below for more details. + properties: + count: + description: The Count of streams. + type: number + type: object + processingConfiguration: + description: |- + The Processing Configuration to transform records as they are received from the stream. + See Processing Configuration below for more details. + properties: + lambda: + description: The Lambda function configuration. See Lambda + below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + roleArn: + description: The IAM Role ARN to read the data. + type: string + type: object + type: object + schema: + description: The Schema format of the data in the streaming + source. See Source Schema below for more details. + properties: + recordColumns: + description: |- + The Record Column mapping for the streaming source data element. + See Record Columns below for more details. + items: + properties: + mapping: + description: The Mapping reference to the data element. + type: string + name: + description: Name of the column. + type: string + sqlType: + description: The SQL Type of the column. + type: string + type: object + type: array + recordEncoding: + description: The Encoding of the record in the streaming + source. + type: string + recordFormat: + description: |- + The Record Format and mapping information to schematize a record. + See Record Format below for more details. + properties: + mappingParameters: + description: |- + The Mapping Information for the record format. + See Mapping Parameters below for more details. + properties: + csv: + description: |- + Mapping information when the record format uses delimiters. + See CSV Mapping Parameters below for more details. + properties: + recordColumnDelimiter: + description: The Column Delimiter. + type: string + recordRowDelimiter: + description: The Row Delimiter. + type: string + type: object + json: + description: |- + Mapping information when JSON is the record format on the streaming source. + See JSON Mapping Parameters below for more details. + properties: + recordRowPath: + description: Path to the top-level parent + that contains the records. + type: string + type: object + type: object + recordFormatType: + description: The Format Type of the records on the + output stream. Can be CSV or JSON. + type: string + type: object + type: object + startingPositionConfiguration: + description: |- + The point at which the application starts processing records from the streaming source. + See Starting Position Configuration below for more details. + items: + properties: + startingPosition: + description: 'The starting position on the stream. Valid + values: LAST_STOPPED_POINT, NOW, TRIM_HORIZON.' + type: string + type: object + type: array + streamNames: + items: + type: string + type: array + type: object + lastUpdateTimestamp: + description: The Timestamp when the application was last updated. + type: string + outputs: + description: Output destination configuration of the application. + See Outputs below for more details. + items: + properties: + id: + description: The ARN of the Kinesis Analytics Application. + type: string + kinesisFirehose: + description: |- + The Kinesis Firehose configuration for the destination stream. Conflicts with kinesis_stream. + See Kinesis Firehose below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + roleArn: + description: The IAM Role ARN to read the data. + type: string + type: object + kinesisStream: + description: |- + The Kinesis Stream configuration for the destination stream. Conflicts with kinesis_firehose. + See Kinesis Stream below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + roleArn: + description: The IAM Role ARN to read the data. + type: string + type: object + lambda: + description: The Lambda function destination. See Lambda + below for more details. + properties: + resourceArn: + description: The ARN of the Lambda function. + type: string + roleArn: + description: The IAM Role ARN to read the data. + type: string + type: object + name: + description: The Name of the in-application stream. + type: string + schema: + description: The Schema format of the data written to the + destination. See Destination Schema below for more details. + properties: + recordFormatType: + description: The Format Type of the records on the output + stream. Can be CSV or JSON. + type: string + type: object + type: object + type: array + referenceDataSources: + description: |- + An S3 Reference Data Source for the application. + See Reference Data Sources below for more details. + properties: + id: + description: The ARN of the Kinesis Analytics Application. + type: string + s3: + description: The S3 configuration for the reference data source. + See S3 Reference below for more details. + properties: + bucketArn: + description: The S3 Bucket ARN. + type: string + fileKey: + description: The File Key name containing reference data. + type: string + roleArn: + description: The IAM Role ARN to read the data. + type: string + type: object + schema: + description: The Schema format of the data in the streaming + source. See Source Schema below for more details. + properties: + recordColumns: + description: |- + The Record Column mapping for the streaming source data element. + See Record Columns below for more details. + items: + properties: + mapping: + description: The Mapping reference to the data element. + type: string + name: + description: Name of the column. + type: string + sqlType: + description: The SQL Type of the column. + type: string + type: object + type: array + recordEncoding: + description: The Encoding of the record in the streaming + source. + type: string + recordFormat: + description: |- + The Record Format and mapping information to schematize a record. + See Record Format below for more details. + properties: + mappingParameters: + description: |- + The Mapping Information for the record format. + See Mapping Parameters below for more details. + properties: + csv: + description: |- + Mapping information when the record format uses delimiters. + See CSV Mapping Parameters below for more details. + properties: + recordColumnDelimiter: + description: The Column Delimiter. + type: string + recordRowDelimiter: + description: The Row Delimiter. + type: string + type: object + json: + description: |- + Mapping information when JSON is the record format on the streaming source. + See JSON Mapping Parameters below for more details. + properties: + recordRowPath: + description: Path to the top-level parent + that contains the records. + type: string + type: object + type: object + recordFormatType: + description: The Format Type of the records on the + output stream. Can be CSV or JSON. + type: string + type: object + type: object + tableName: + description: The in-application Table Name. + type: string + type: object + startApplication: + description: |- + Whether to start or stop the Kinesis Analytics Application. To start an application, an input with a defined starting_position must be configured. + To modify an application's starting position, first stop the application by setting start_application = false, then update starting_position and set start_application = true. + type: boolean + status: + description: The Status of the application. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + version: + description: The Version of the application. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/kinesisanalyticsv2.aws.upbound.io_applications.yaml b/package/crds/kinesisanalyticsv2.aws.upbound.io_applications.yaml index d465ae1e74..60adca49db 100644 --- a/package/crds/kinesisanalyticsv2.aws.upbound.io_applications.yaml +++ b/package/crds/kinesisanalyticsv2.aws.upbound.io_applications.yaml @@ -3515,3 +3515,3149 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Application is the Schema for the Applications API. Manages a + Kinesis Analytics v2 Application. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ApplicationSpec defines the desired state of Application + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + applicationConfiguration: + description: The application's configuration + properties: + applicationCodeConfiguration: + description: The code location and type parameters for the + application. + properties: + codeContent: + description: The location and type of the application + code. + properties: + s3ContentLocation: + description: Information about the Amazon S3 bucket + containing the application code. + properties: + bucketArn: + description: The ARN for the S3 bucket containing + the application code. + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate + bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate + bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + fileKey: + description: The file key for the object containing + the application code. + type: string + fileKeyRef: + description: Reference to a Object in s3 to populate + fileKey. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + fileKeySelector: + description: Selector for a Object in s3 to populate + fileKey. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + objectVersion: + description: The version of the object containing + the application code. + type: string + type: object + textContent: + description: The text-format code for the application. + type: string + type: object + codeContentType: + description: 'Specifies whether the code content is in + text or zip format. Valid values: PLAINTEXT, ZIPFILE.' + type: string + type: object + applicationSnapshotConfiguration: + description: Describes whether snapshots are enabled for a + Flink-based application. + properties: + snapshotsEnabled: + description: Describes whether snapshots are enabled for + a Flink-based Kinesis Data Analytics application. + type: boolean + type: object + environmentProperties: + description: Describes execution properties for a Flink-based + application. + properties: + propertyGroup: + description: Describes the execution property groups. + items: + properties: + propertyGroupId: + description: The key of the application execution + property key-value map. + type: string + propertyMap: + additionalProperties: + type: string + description: Application execution property key-value + map. + type: object + x-kubernetes-map-type: granular + type: object + type: array + type: object + flinkApplicationConfiguration: + description: The configuration of a Flink-based application. + properties: + checkpointConfiguration: + description: Describes an application's checkpointing + configuration. + properties: + checkpointInterval: + description: Describes the interval in milliseconds + between checkpoint operations. + type: number + checkpointingEnabled: + description: Describes whether checkpointing is enabled + for a Flink-based Kinesis Data Analytics application. + type: boolean + configurationType: + description: 'Describes whether the application uses + Kinesis Data Analytics'' default checkpointing behavior. + Valid values: CUSTOM, DEFAULT. Set this attribute + to CUSTOM in order for any specified checkpointing_enabled, + checkpoint_interval, or min_pause_between_checkpoints + attribute values to be effective. If this attribute + is set to DEFAULT, the application will always use + the following values:' + type: string + minPauseBetweenCheckpoints: + description: Describes the minimum time in milliseconds + after a checkpoint operation completes that a new + checkpoint operation can start. + type: number + type: object + monitoringConfiguration: + description: Describes configuration parameters for CloudWatch + logging for an application. + properties: + configurationType: + description: 'Describes whether the application uses + Kinesis Data Analytics'' default checkpointing behavior. + Valid values: CUSTOM, DEFAULT. Set this attribute + to CUSTOM in order for any specified checkpointing_enabled, + checkpoint_interval, or min_pause_between_checkpoints + attribute values to be effective. If this attribute + is set to DEFAULT, the application will always use + the following values:' + type: string + logLevel: + description: 'Describes the verbosity of the CloudWatch + Logs for an application. Valid values: DEBUG, ERROR, + INFO, WARN.' + type: string + metricsLevel: + description: 'Describes the granularity of the CloudWatch + Logs for an application. Valid values: APPLICATION, + OPERATOR, PARALLELISM, TASK.' + type: string + type: object + parallelismConfiguration: + description: Describes parameters for how an application + executes multiple tasks simultaneously. + properties: + autoScalingEnabled: + description: Describes whether the Kinesis Data Analytics + service can increase the parallelism of the application + in response to increased throughput. + type: boolean + configurationType: + description: 'Describes whether the application uses + Kinesis Data Analytics'' default checkpointing behavior. + Valid values: CUSTOM, DEFAULT. Set this attribute + to CUSTOM in order for any specified checkpointing_enabled, + checkpoint_interval, or min_pause_between_checkpoints + attribute values to be effective. If this attribute + is set to DEFAULT, the application will always use + the following values:' + type: string + parallelism: + description: Describes the initial number of parallel + tasks that a Flink-based Kinesis Data Analytics + application can perform. + type: number + parallelismPerKpu: + description: Describes the number of parallel tasks + that a Flink-based Kinesis Data Analytics application + can perform per Kinesis Processing Unit (KPU) used + by the application. + type: number + type: object + type: object + runConfiguration: + description: Describes the starting properties for a Flink-based + application. + properties: + applicationRestoreConfiguration: + description: The restore behavior of a restarting application. + properties: + applicationRestoreType: + description: 'Specifies how the application should + be restored. Valid values: RESTORE_FROM_CUSTOM_SNAPSHOT, + RESTORE_FROM_LATEST_SNAPSHOT, SKIP_RESTORE_FROM_SNAPSHOT.' + type: string + snapshotName: + description: The identifier of an existing snapshot + of application state to use to restart an application. + The application uses this value if RESTORE_FROM_CUSTOM_SNAPSHOT + is specified for application_restore_type. + type: string + type: object + flinkRunConfiguration: + description: The starting parameters for a Flink-based + Kinesis Data Analytics application. + properties: + allowNonRestoredState: + description: When restoring from a snapshot, specifies + whether the runtime is allowed to skip a state that + cannot be mapped to the new program. Default is + false. + type: boolean + type: object + type: object + sqlApplicationConfiguration: + description: The configuration of a SQL-based application. + properties: + input: + description: The input stream used by the application. + properties: + inputParallelism: + description: Describes the number of in-application + streams to create. + properties: + count: + description: The number of in-application streams + to create. + type: number + type: object + inputProcessingConfiguration: + description: |- + The input processing configuration for the input. + An input processor transforms records as they are received from the stream, before the application's SQL code executes. + properties: + inputLambdaProcessor: + description: Describes the Lambda function that + is used to preprocess the records in the stream + before being processed by your application code. + properties: + resourceArn: + description: The ARN of the Lambda function + that operates on records in the stream. + type: string + type: object + type: object + inputSchema: + description: Describes the format of the data in the + streaming source, and how each data element maps + to corresponding columns in the in-application stream + that is being created. + properties: + recordColumn: + description: Describes the mapping of each data + element in the streaming source to the corresponding + column in the in-application stream. + items: + properties: + mapping: + description: A reference to the data element + in the streaming input or the reference + data source. + type: string + name: + description: The name of the application. + type: string + sqlType: + description: The type of column created + in the in-application input stream or + reference table. + type: string + type: object + type: array + recordEncoding: + description: Specifies the encoding of the records + in the streaming source. For example, UTF-8. + type: string + recordFormat: + description: Specifies the format of the records + on the streaming source. + properties: + mappingParameters: + description: Provides additional mapping information + specific to the record format (such as JSON, + CSV, or record fields delimited by some + delimiter) on the streaming source. + properties: + csvMappingParameters: + description: Provides additional mapping + information when the record format uses + delimiters (for example, CSV). + properties: + recordColumnDelimiter: + description: The column delimiter. + For example, in a CSV format, a + comma (,) is the typical column + delimiter. + type: string + recordRowDelimiter: + description: The row delimiter. For + example, in a CSV format, \n is + the typical row delimiter. + type: string + type: object + jsonMappingParameters: + description: Provides additional mapping + information when JSON is the record + format on the streaming source. + properties: + recordRowPath: + description: The path to the top-level + parent that contains the records. + type: string + type: object + type: object + recordFormatType: + description: 'The type of record format. Valid + values: CSV, JSON.' + type: string + type: object + type: object + inputStartingPositionConfiguration: + description: The point at which the application starts + processing records from the streaming source. + items: + properties: + inputStartingPosition: + description: 'The starting position on the stream. + Valid values: LAST_STOPPED_POINT, NOW, TRIM_HORIZON.' + type: string + type: object + type: array + kinesisFirehoseInput: + description: If the streaming source is a Kinesis + Data Firehose delivery stream, identifies the delivery + stream's ARN. + properties: + resourceArn: + description: The ARN of the Lambda function that + operates on records in the stream. + type: string + type: object + kinesisStreamsInput: + description: If the streaming source is a Kinesis + data stream, identifies the stream's Amazon Resource + Name (ARN). + properties: + resourceArn: + description: The ARN of the Lambda function that + operates on records in the stream. + type: string + resourceArnRef: + description: Reference to a Stream in kinesis + to populate resourceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceArnSelector: + description: Selector for a Stream in kinesis + to populate resourceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + namePrefix: + description: The name prefix to use when creating + an in-application stream. + type: string + type: object + output: + description: The destination streams used by the application. + items: + properties: + destinationSchema: + description: Describes the data format when records + are written to the destination. + properties: + recordFormatType: + description: 'The type of record format. Valid + values: CSV, JSON.' + type: string + type: object + kinesisFirehoseOutput: + description: Identifies a Kinesis Data Firehose + delivery stream as the destination. + properties: + resourceArn: + description: The ARN of the Lambda function + that operates on records in the stream. + type: string + resourceArnRef: + description: Reference to a DeliveryStream in + firehose to populate resourceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceArnSelector: + description: Selector for a DeliveryStream in + firehose to populate resourceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + kinesisStreamsOutput: + description: Identifies a Kinesis data stream as + the destination. + properties: + resourceArn: + description: The ARN of the Lambda function + that operates on records in the stream. + type: string + type: object + lambdaOutput: + description: Identifies a Lambda function as the + destination. + properties: + resourceArn: + description: The ARN of the Lambda function + that operates on records in the stream. + type: string + resourceArnRef: + description: Reference to a Function in lambda + to populate resourceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceArnSelector: + description: Selector for a Function in lambda + to populate resourceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + name: + description: The name of the application. + type: string + type: object + type: array + referenceDataSource: + description: The reference data source used by the application. + properties: + referenceSchema: + description: Describes the format of the data in the + streaming source, and how each data element maps + to corresponding columns created in the in-application + stream. + properties: + recordColumn: + description: Describes the mapping of each data + element in the streaming source to the corresponding + column in the in-application stream. + items: + properties: + mapping: + description: A reference to the data element + in the streaming input or the reference + data source. + type: string + name: + description: The name of the application. + type: string + sqlType: + description: The type of column created + in the in-application input stream or + reference table. + type: string + type: object + type: array + recordEncoding: + description: Specifies the encoding of the records + in the streaming source. For example, UTF-8. + type: string + recordFormat: + description: Specifies the format of the records + on the streaming source. + properties: + mappingParameters: + description: Provides additional mapping information + specific to the record format (such as JSON, + CSV, or record fields delimited by some + delimiter) on the streaming source. + properties: + csvMappingParameters: + description: Provides additional mapping + information when the record format uses + delimiters (for example, CSV). + properties: + recordColumnDelimiter: + description: The column delimiter. + For example, in a CSV format, a + comma (,) is the typical column + delimiter. + type: string + recordRowDelimiter: + description: The row delimiter. For + example, in a CSV format, \n is + the typical row delimiter. + type: string + type: object + jsonMappingParameters: + description: Provides additional mapping + information when JSON is the record + format on the streaming source. + properties: + recordRowPath: + description: The path to the top-level + parent that contains the records. + type: string + type: object + type: object + recordFormatType: + description: 'The type of record format. Valid + values: CSV, JSON.' + type: string + type: object + type: object + s3ReferenceDataSource: + description: Identifies the S3 bucket and object that + contains the reference data. + properties: + bucketArn: + description: The ARN for the S3 bucket containing + the application code. + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate + bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate + bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + fileKey: + description: The file key for the object containing + the application code. + type: string + type: object + tableName: + description: The name of the in-application table + to create. + type: string + type: object + type: object + vpcConfiguration: + description: The VPC configuration of a Flink-based application. + properties: + securityGroupIds: + description: The Security Group IDs used by the VPC configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: The Subnet IDs used by the VPC configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + cloudwatchLoggingOptions: + description: A CloudWatch log stream to monitor application configuration + errors. + properties: + logStreamArn: + description: The ARN of the CloudWatch log stream to receive + application messages. + type: string + logStreamArnRef: + description: Reference to a Stream in cloudwatchlogs to populate + logStreamArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logStreamArnSelector: + description: Selector for a Stream in cloudwatchlogs to populate + logStreamArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + description: + description: A summary description of the application. + type: string + forceStop: + description: Whether to force stop an unresponsive Flink-based + application. + type: boolean + region: + description: Region is the region you'd like your resource to + be created in. + type: string + runtimeEnvironment: + description: 'The runtime environment for the application. Valid + values: SQL-1_0, FLINK-1_6, FLINK-1_8, FLINK-1_11, FLINK-1_13, + FLINK-1_15, FLINK-1_18.' + type: string + serviceExecutionRole: + description: The ARN of the IAM role used by the application to + access Kinesis data streams, Kinesis Data Firehose delivery + streams, Amazon S3 objects, and other external resources. + type: string + serviceExecutionRoleRef: + description: Reference to a Role in iam to populate serviceExecutionRole. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceExecutionRoleSelector: + description: Selector for a Role in iam to populate serviceExecutionRole. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + startApplication: + description: Whether to start or stop the application. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + applicationConfiguration: + description: The application's configuration + properties: + applicationCodeConfiguration: + description: The code location and type parameters for the + application. + properties: + codeContent: + description: The location and type of the application + code. + properties: + s3ContentLocation: + description: Information about the Amazon S3 bucket + containing the application code. + properties: + bucketArn: + description: The ARN for the S3 bucket containing + the application code. + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate + bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate + bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + fileKey: + description: The file key for the object containing + the application code. + type: string + fileKeyRef: + description: Reference to a Object in s3 to populate + fileKey. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + fileKeySelector: + description: Selector for a Object in s3 to populate + fileKey. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + objectVersion: + description: The version of the object containing + the application code. + type: string + type: object + textContent: + description: The text-format code for the application. + type: string + type: object + codeContentType: + description: 'Specifies whether the code content is in + text or zip format. Valid values: PLAINTEXT, ZIPFILE.' + type: string + type: object + applicationSnapshotConfiguration: + description: Describes whether snapshots are enabled for a + Flink-based application. + properties: + snapshotsEnabled: + description: Describes whether snapshots are enabled for + a Flink-based Kinesis Data Analytics application. + type: boolean + type: object + environmentProperties: + description: Describes execution properties for a Flink-based + application. + properties: + propertyGroup: + description: Describes the execution property groups. + items: + properties: + propertyGroupId: + description: The key of the application execution + property key-value map. + type: string + propertyMap: + additionalProperties: + type: string + description: Application execution property key-value + map. + type: object + x-kubernetes-map-type: granular + type: object + type: array + type: object + flinkApplicationConfiguration: + description: The configuration of a Flink-based application. + properties: + checkpointConfiguration: + description: Describes an application's checkpointing + configuration. + properties: + checkpointInterval: + description: Describes the interval in milliseconds + between checkpoint operations. + type: number + checkpointingEnabled: + description: Describes whether checkpointing is enabled + for a Flink-based Kinesis Data Analytics application. + type: boolean + configurationType: + description: 'Describes whether the application uses + Kinesis Data Analytics'' default checkpointing behavior. + Valid values: CUSTOM, DEFAULT. Set this attribute + to CUSTOM in order for any specified checkpointing_enabled, + checkpoint_interval, or min_pause_between_checkpoints + attribute values to be effective. If this attribute + is set to DEFAULT, the application will always use + the following values:' + type: string + minPauseBetweenCheckpoints: + description: Describes the minimum time in milliseconds + after a checkpoint operation completes that a new + checkpoint operation can start. + type: number + type: object + monitoringConfiguration: + description: Describes configuration parameters for CloudWatch + logging for an application. + properties: + configurationType: + description: 'Describes whether the application uses + Kinesis Data Analytics'' default checkpointing behavior. + Valid values: CUSTOM, DEFAULT. Set this attribute + to CUSTOM in order for any specified checkpointing_enabled, + checkpoint_interval, or min_pause_between_checkpoints + attribute values to be effective. If this attribute + is set to DEFAULT, the application will always use + the following values:' + type: string + logLevel: + description: 'Describes the verbosity of the CloudWatch + Logs for an application. Valid values: DEBUG, ERROR, + INFO, WARN.' + type: string + metricsLevel: + description: 'Describes the granularity of the CloudWatch + Logs for an application. Valid values: APPLICATION, + OPERATOR, PARALLELISM, TASK.' + type: string + type: object + parallelismConfiguration: + description: Describes parameters for how an application + executes multiple tasks simultaneously. + properties: + autoScalingEnabled: + description: Describes whether the Kinesis Data Analytics + service can increase the parallelism of the application + in response to increased throughput. + type: boolean + configurationType: + description: 'Describes whether the application uses + Kinesis Data Analytics'' default checkpointing behavior. + Valid values: CUSTOM, DEFAULT. Set this attribute + to CUSTOM in order for any specified checkpointing_enabled, + checkpoint_interval, or min_pause_between_checkpoints + attribute values to be effective. If this attribute + is set to DEFAULT, the application will always use + the following values:' + type: string + parallelism: + description: Describes the initial number of parallel + tasks that a Flink-based Kinesis Data Analytics + application can perform. + type: number + parallelismPerKpu: + description: Describes the number of parallel tasks + that a Flink-based Kinesis Data Analytics application + can perform per Kinesis Processing Unit (KPU) used + by the application. + type: number + type: object + type: object + runConfiguration: + description: Describes the starting properties for a Flink-based + application. + properties: + applicationRestoreConfiguration: + description: The restore behavior of a restarting application. + properties: + applicationRestoreType: + description: 'Specifies how the application should + be restored. Valid values: RESTORE_FROM_CUSTOM_SNAPSHOT, + RESTORE_FROM_LATEST_SNAPSHOT, SKIP_RESTORE_FROM_SNAPSHOT.' + type: string + snapshotName: + description: The identifier of an existing snapshot + of application state to use to restart an application. + The application uses this value if RESTORE_FROM_CUSTOM_SNAPSHOT + is specified for application_restore_type. + type: string + type: object + flinkRunConfiguration: + description: The starting parameters for a Flink-based + Kinesis Data Analytics application. + properties: + allowNonRestoredState: + description: When restoring from a snapshot, specifies + whether the runtime is allowed to skip a state that + cannot be mapped to the new program. Default is + false. + type: boolean + type: object + type: object + sqlApplicationConfiguration: + description: The configuration of a SQL-based application. + properties: + input: + description: The input stream used by the application. + properties: + inputParallelism: + description: Describes the number of in-application + streams to create. + properties: + count: + description: The number of in-application streams + to create. + type: number + type: object + inputProcessingConfiguration: + description: |- + The input processing configuration for the input. + An input processor transforms records as they are received from the stream, before the application's SQL code executes. + properties: + inputLambdaProcessor: + description: Describes the Lambda function that + is used to preprocess the records in the stream + before being processed by your application code. + properties: + resourceArn: + description: The ARN of the Lambda function + that operates on records in the stream. + type: string + type: object + type: object + inputSchema: + description: Describes the format of the data in the + streaming source, and how each data element maps + to corresponding columns in the in-application stream + that is being created. + properties: + recordColumn: + description: Describes the mapping of each data + element in the streaming source to the corresponding + column in the in-application stream. + items: + properties: + mapping: + description: A reference to the data element + in the streaming input or the reference + data source. + type: string + name: + description: The name of the application. + type: string + sqlType: + description: The type of column created + in the in-application input stream or + reference table. + type: string + type: object + type: array + recordEncoding: + description: Specifies the encoding of the records + in the streaming source. For example, UTF-8. + type: string + recordFormat: + description: Specifies the format of the records + on the streaming source. + properties: + mappingParameters: + description: Provides additional mapping information + specific to the record format (such as JSON, + CSV, or record fields delimited by some + delimiter) on the streaming source. + properties: + csvMappingParameters: + description: Provides additional mapping + information when the record format uses + delimiters (for example, CSV). + properties: + recordColumnDelimiter: + description: The column delimiter. + For example, in a CSV format, a + comma (,) is the typical column + delimiter. + type: string + recordRowDelimiter: + description: The row delimiter. For + example, in a CSV format, \n is + the typical row delimiter. + type: string + type: object + jsonMappingParameters: + description: Provides additional mapping + information when JSON is the record + format on the streaming source. + properties: + recordRowPath: + description: The path to the top-level + parent that contains the records. + type: string + type: object + type: object + recordFormatType: + description: 'The type of record format. Valid + values: CSV, JSON.' + type: string + type: object + type: object + inputStartingPositionConfiguration: + description: The point at which the application starts + processing records from the streaming source. + items: + properties: + inputStartingPosition: + description: 'The starting position on the stream. + Valid values: LAST_STOPPED_POINT, NOW, TRIM_HORIZON.' + type: string + type: object + type: array + kinesisFirehoseInput: + description: If the streaming source is a Kinesis + Data Firehose delivery stream, identifies the delivery + stream's ARN. + properties: + resourceArn: + description: The ARN of the Lambda function that + operates on records in the stream. + type: string + type: object + kinesisStreamsInput: + description: If the streaming source is a Kinesis + data stream, identifies the stream's Amazon Resource + Name (ARN). + properties: + resourceArn: + description: The ARN of the Lambda function that + operates on records in the stream. + type: string + resourceArnRef: + description: Reference to a Stream in kinesis + to populate resourceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceArnSelector: + description: Selector for a Stream in kinesis + to populate resourceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + namePrefix: + description: The name prefix to use when creating + an in-application stream. + type: string + type: object + output: + description: The destination streams used by the application. + items: + properties: + destinationSchema: + description: Describes the data format when records + are written to the destination. + properties: + recordFormatType: + description: 'The type of record format. Valid + values: CSV, JSON.' + type: string + type: object + kinesisFirehoseOutput: + description: Identifies a Kinesis Data Firehose + delivery stream as the destination. + properties: + resourceArn: + description: The ARN of the Lambda function + that operates on records in the stream. + type: string + resourceArnRef: + description: Reference to a DeliveryStream in + firehose to populate resourceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceArnSelector: + description: Selector for a DeliveryStream in + firehose to populate resourceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + kinesisStreamsOutput: + description: Identifies a Kinesis data stream as + the destination. + properties: + resourceArn: + description: The ARN of the Lambda function + that operates on records in the stream. + type: string + type: object + lambdaOutput: + description: Identifies a Lambda function as the + destination. + properties: + resourceArn: + description: The ARN of the Lambda function + that operates on records in the stream. + type: string + resourceArnRef: + description: Reference to a Function in lambda + to populate resourceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceArnSelector: + description: Selector for a Function in lambda + to populate resourceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + name: + description: The name of the application. + type: string + type: object + type: array + referenceDataSource: + description: The reference data source used by the application. + properties: + referenceSchema: + description: Describes the format of the data in the + streaming source, and how each data element maps + to corresponding columns created in the in-application + stream. + properties: + recordColumn: + description: Describes the mapping of each data + element in the streaming source to the corresponding + column in the in-application stream. + items: + properties: + mapping: + description: A reference to the data element + in the streaming input or the reference + data source. + type: string + name: + description: The name of the application. + type: string + sqlType: + description: The type of column created + in the in-application input stream or + reference table. + type: string + type: object + type: array + recordEncoding: + description: Specifies the encoding of the records + in the streaming source. For example, UTF-8. + type: string + recordFormat: + description: Specifies the format of the records + on the streaming source. + properties: + mappingParameters: + description: Provides additional mapping information + specific to the record format (such as JSON, + CSV, or record fields delimited by some + delimiter) on the streaming source. + properties: + csvMappingParameters: + description: Provides additional mapping + information when the record format uses + delimiters (for example, CSV). + properties: + recordColumnDelimiter: + description: The column delimiter. + For example, in a CSV format, a + comma (,) is the typical column + delimiter. + type: string + recordRowDelimiter: + description: The row delimiter. For + example, in a CSV format, \n is + the typical row delimiter. + type: string + type: object + jsonMappingParameters: + description: Provides additional mapping + information when JSON is the record + format on the streaming source. + properties: + recordRowPath: + description: The path to the top-level + parent that contains the records. + type: string + type: object + type: object + recordFormatType: + description: 'The type of record format. Valid + values: CSV, JSON.' + type: string + type: object + type: object + s3ReferenceDataSource: + description: Identifies the S3 bucket and object that + contains the reference data. + properties: + bucketArn: + description: The ARN for the S3 bucket containing + the application code. + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate + bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate + bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + fileKey: + description: The file key for the object containing + the application code. + type: string + type: object + tableName: + description: The name of the in-application table + to create. + type: string + type: object + type: object + vpcConfiguration: + description: The VPC configuration of a Flink-based application. + properties: + securityGroupIds: + description: The Security Group IDs used by the VPC configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: The Subnet IDs used by the VPC configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + cloudwatchLoggingOptions: + description: A CloudWatch log stream to monitor application configuration + errors. + properties: + logStreamArn: + description: The ARN of the CloudWatch log stream to receive + application messages. + type: string + logStreamArnRef: + description: Reference to a Stream in cloudwatchlogs to populate + logStreamArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logStreamArnSelector: + description: Selector for a Stream in cloudwatchlogs to populate + logStreamArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + description: + description: A summary description of the application. + type: string + forceStop: + description: Whether to force stop an unresponsive Flink-based + application. + type: boolean + runtimeEnvironment: + description: 'The runtime environment for the application. Valid + values: SQL-1_0, FLINK-1_6, FLINK-1_8, FLINK-1_11, FLINK-1_13, + FLINK-1_15, FLINK-1_18.' + type: string + serviceExecutionRole: + description: The ARN of the IAM role used by the application to + access Kinesis data streams, Kinesis Data Firehose delivery + streams, Amazon S3 objects, and other external resources. + type: string + serviceExecutionRoleRef: + description: Reference to a Role in iam to populate serviceExecutionRole. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceExecutionRoleSelector: + description: Selector for a Role in iam to populate serviceExecutionRole. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + startApplication: + description: Whether to start or stop the application. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.runtimeEnvironment is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.runtimeEnvironment) + || (has(self.initProvider) && has(self.initProvider.runtimeEnvironment))' + status: + description: ApplicationStatus defines the observed state of Application. + properties: + atProvider: + properties: + applicationConfiguration: + description: The application's configuration + properties: + applicationCodeConfiguration: + description: The code location and type parameters for the + application. + properties: + codeContent: + description: The location and type of the application + code. + properties: + s3ContentLocation: + description: Information about the Amazon S3 bucket + containing the application code. + properties: + bucketArn: + description: The ARN for the S3 bucket containing + the application code. + type: string + fileKey: + description: The file key for the object containing + the application code. + type: string + objectVersion: + description: The version of the object containing + the application code. + type: string + type: object + textContent: + description: The text-format code for the application. + type: string + type: object + codeContentType: + description: 'Specifies whether the code content is in + text or zip format. Valid values: PLAINTEXT, ZIPFILE.' + type: string + type: object + applicationSnapshotConfiguration: + description: Describes whether snapshots are enabled for a + Flink-based application. + properties: + snapshotsEnabled: + description: Describes whether snapshots are enabled for + a Flink-based Kinesis Data Analytics application. + type: boolean + type: object + environmentProperties: + description: Describes execution properties for a Flink-based + application. + properties: + propertyGroup: + description: Describes the execution property groups. + items: + properties: + propertyGroupId: + description: The key of the application execution + property key-value map. + type: string + propertyMap: + additionalProperties: + type: string + description: Application execution property key-value + map. + type: object + x-kubernetes-map-type: granular + type: object + type: array + type: object + flinkApplicationConfiguration: + description: The configuration of a Flink-based application. + properties: + checkpointConfiguration: + description: Describes an application's checkpointing + configuration. + properties: + checkpointInterval: + description: Describes the interval in milliseconds + between checkpoint operations. + type: number + checkpointingEnabled: + description: Describes whether checkpointing is enabled + for a Flink-based Kinesis Data Analytics application. + type: boolean + configurationType: + description: 'Describes whether the application uses + Kinesis Data Analytics'' default checkpointing behavior. + Valid values: CUSTOM, DEFAULT. Set this attribute + to CUSTOM in order for any specified checkpointing_enabled, + checkpoint_interval, or min_pause_between_checkpoints + attribute values to be effective. If this attribute + is set to DEFAULT, the application will always use + the following values:' + type: string + minPauseBetweenCheckpoints: + description: Describes the minimum time in milliseconds + after a checkpoint operation completes that a new + checkpoint operation can start. + type: number + type: object + monitoringConfiguration: + description: Describes configuration parameters for CloudWatch + logging for an application. + properties: + configurationType: + description: 'Describes whether the application uses + Kinesis Data Analytics'' default checkpointing behavior. + Valid values: CUSTOM, DEFAULT. Set this attribute + to CUSTOM in order for any specified checkpointing_enabled, + checkpoint_interval, or min_pause_between_checkpoints + attribute values to be effective. If this attribute + is set to DEFAULT, the application will always use + the following values:' + type: string + logLevel: + description: 'Describes the verbosity of the CloudWatch + Logs for an application. Valid values: DEBUG, ERROR, + INFO, WARN.' + type: string + metricsLevel: + description: 'Describes the granularity of the CloudWatch + Logs for an application. Valid values: APPLICATION, + OPERATOR, PARALLELISM, TASK.' + type: string + type: object + parallelismConfiguration: + description: Describes parameters for how an application + executes multiple tasks simultaneously. + properties: + autoScalingEnabled: + description: Describes whether the Kinesis Data Analytics + service can increase the parallelism of the application + in response to increased throughput. + type: boolean + configurationType: + description: 'Describes whether the application uses + Kinesis Data Analytics'' default checkpointing behavior. + Valid values: CUSTOM, DEFAULT. Set this attribute + to CUSTOM in order for any specified checkpointing_enabled, + checkpoint_interval, or min_pause_between_checkpoints + attribute values to be effective. If this attribute + is set to DEFAULT, the application will always use + the following values:' + type: string + parallelism: + description: Describes the initial number of parallel + tasks that a Flink-based Kinesis Data Analytics + application can perform. + type: number + parallelismPerKpu: + description: Describes the number of parallel tasks + that a Flink-based Kinesis Data Analytics application + can perform per Kinesis Processing Unit (KPU) used + by the application. + type: number + type: object + type: object + runConfiguration: + description: Describes the starting properties for a Flink-based + application. + properties: + applicationRestoreConfiguration: + description: The restore behavior of a restarting application. + properties: + applicationRestoreType: + description: 'Specifies how the application should + be restored. Valid values: RESTORE_FROM_CUSTOM_SNAPSHOT, + RESTORE_FROM_LATEST_SNAPSHOT, SKIP_RESTORE_FROM_SNAPSHOT.' + type: string + snapshotName: + description: The identifier of an existing snapshot + of application state to use to restart an application. + The application uses this value if RESTORE_FROM_CUSTOM_SNAPSHOT + is specified for application_restore_type. + type: string + type: object + flinkRunConfiguration: + description: The starting parameters for a Flink-based + Kinesis Data Analytics application. + properties: + allowNonRestoredState: + description: When restoring from a snapshot, specifies + whether the runtime is allowed to skip a state that + cannot be mapped to the new program. Default is + false. + type: boolean + type: object + type: object + sqlApplicationConfiguration: + description: The configuration of a SQL-based application. + properties: + input: + description: The input stream used by the application. + properties: + inAppStreamNames: + items: + type: string + type: array + inputId: + description: The application identifier. + type: string + inputParallelism: + description: Describes the number of in-application + streams to create. + properties: + count: + description: The number of in-application streams + to create. + type: number + type: object + inputProcessingConfiguration: + description: |- + The input processing configuration for the input. + An input processor transforms records as they are received from the stream, before the application's SQL code executes. + properties: + inputLambdaProcessor: + description: Describes the Lambda function that + is used to preprocess the records in the stream + before being processed by your application code. + properties: + resourceArn: + description: The ARN of the Lambda function + that operates on records in the stream. + type: string + type: object + type: object + inputSchema: + description: Describes the format of the data in the + streaming source, and how each data element maps + to corresponding columns in the in-application stream + that is being created. + properties: + recordColumn: + description: Describes the mapping of each data + element in the streaming source to the corresponding + column in the in-application stream. + items: + properties: + mapping: + description: A reference to the data element + in the streaming input or the reference + data source. + type: string + name: + description: The name of the application. + type: string + sqlType: + description: The type of column created + in the in-application input stream or + reference table. + type: string + type: object + type: array + recordEncoding: + description: Specifies the encoding of the records + in the streaming source. For example, UTF-8. + type: string + recordFormat: + description: Specifies the format of the records + on the streaming source. + properties: + mappingParameters: + description: Provides additional mapping information + specific to the record format (such as JSON, + CSV, or record fields delimited by some + delimiter) on the streaming source. + properties: + csvMappingParameters: + description: Provides additional mapping + information when the record format uses + delimiters (for example, CSV). + properties: + recordColumnDelimiter: + description: The column delimiter. + For example, in a CSV format, a + comma (,) is the typical column + delimiter. + type: string + recordRowDelimiter: + description: The row delimiter. For + example, in a CSV format, \n is + the typical row delimiter. + type: string + type: object + jsonMappingParameters: + description: Provides additional mapping + information when JSON is the record + format on the streaming source. + properties: + recordRowPath: + description: The path to the top-level + parent that contains the records. + type: string + type: object + type: object + recordFormatType: + description: 'The type of record format. Valid + values: CSV, JSON.' + type: string + type: object + type: object + inputStartingPositionConfiguration: + description: The point at which the application starts + processing records from the streaming source. + items: + properties: + inputStartingPosition: + description: 'The starting position on the stream. + Valid values: LAST_STOPPED_POINT, NOW, TRIM_HORIZON.' + type: string + type: object + type: array + kinesisFirehoseInput: + description: If the streaming source is a Kinesis + Data Firehose delivery stream, identifies the delivery + stream's ARN. + properties: + resourceArn: + description: The ARN of the Lambda function that + operates on records in the stream. + type: string + type: object + kinesisStreamsInput: + description: If the streaming source is a Kinesis + data stream, identifies the stream's Amazon Resource + Name (ARN). + properties: + resourceArn: + description: The ARN of the Lambda function that + operates on records in the stream. + type: string + type: object + namePrefix: + description: The name prefix to use when creating + an in-application stream. + type: string + type: object + output: + description: The destination streams used by the application. + items: + properties: + destinationSchema: + description: Describes the data format when records + are written to the destination. + properties: + recordFormatType: + description: 'The type of record format. Valid + values: CSV, JSON.' + type: string + type: object + kinesisFirehoseOutput: + description: Identifies a Kinesis Data Firehose + delivery stream as the destination. + properties: + resourceArn: + description: The ARN of the Lambda function + that operates on records in the stream. + type: string + type: object + kinesisStreamsOutput: + description: Identifies a Kinesis data stream as + the destination. + properties: + resourceArn: + description: The ARN of the Lambda function + that operates on records in the stream. + type: string + type: object + lambdaOutput: + description: Identifies a Lambda function as the + destination. + properties: + resourceArn: + description: The ARN of the Lambda function + that operates on records in the stream. + type: string + type: object + name: + description: The name of the application. + type: string + outputId: + description: The application identifier. + type: string + type: object + type: array + referenceDataSource: + description: The reference data source used by the application. + properties: + referenceId: + description: The application identifier. + type: string + referenceSchema: + description: Describes the format of the data in the + streaming source, and how each data element maps + to corresponding columns created in the in-application + stream. + properties: + recordColumn: + description: Describes the mapping of each data + element in the streaming source to the corresponding + column in the in-application stream. + items: + properties: + mapping: + description: A reference to the data element + in the streaming input or the reference + data source. + type: string + name: + description: The name of the application. + type: string + sqlType: + description: The type of column created + in the in-application input stream or + reference table. + type: string + type: object + type: array + recordEncoding: + description: Specifies the encoding of the records + in the streaming source. For example, UTF-8. + type: string + recordFormat: + description: Specifies the format of the records + on the streaming source. + properties: + mappingParameters: + description: Provides additional mapping information + specific to the record format (such as JSON, + CSV, or record fields delimited by some + delimiter) on the streaming source. + properties: + csvMappingParameters: + description: Provides additional mapping + information when the record format uses + delimiters (for example, CSV). + properties: + recordColumnDelimiter: + description: The column delimiter. + For example, in a CSV format, a + comma (,) is the typical column + delimiter. + type: string + recordRowDelimiter: + description: The row delimiter. For + example, in a CSV format, \n is + the typical row delimiter. + type: string + type: object + jsonMappingParameters: + description: Provides additional mapping + information when JSON is the record + format on the streaming source. + properties: + recordRowPath: + description: The path to the top-level + parent that contains the records. + type: string + type: object + type: object + recordFormatType: + description: 'The type of record format. Valid + values: CSV, JSON.' + type: string + type: object + type: object + s3ReferenceDataSource: + description: Identifies the S3 bucket and object that + contains the reference data. + properties: + bucketArn: + description: The ARN for the S3 bucket containing + the application code. + type: string + fileKey: + description: The file key for the object containing + the application code. + type: string + type: object + tableName: + description: The name of the in-application table + to create. + type: string + type: object + type: object + vpcConfiguration: + description: The VPC configuration of a Flink-based application. + properties: + securityGroupIds: + description: The Security Group IDs used by the VPC configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: The Subnet IDs used by the VPC configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcConfigurationId: + description: The application identifier. + type: string + vpcId: + description: The application identifier. + type: string + type: object + type: object + arn: + description: The ARN of the application. + type: string + cloudwatchLoggingOptions: + description: A CloudWatch log stream to monitor application configuration + errors. + properties: + cloudwatchLoggingOptionId: + description: The application identifier. + type: string + logStreamArn: + description: The ARN of the CloudWatch log stream to receive + application messages. + type: string + type: object + createTimestamp: + description: The current timestamp when the application was created. + type: string + description: + description: A summary description of the application. + type: string + forceStop: + description: Whether to force stop an unresponsive Flink-based + application. + type: boolean + id: + description: The application identifier. + type: string + lastUpdateTimestamp: + description: The current timestamp when the application was last + updated. + type: string + runtimeEnvironment: + description: 'The runtime environment for the application. Valid + values: SQL-1_0, FLINK-1_6, FLINK-1_8, FLINK-1_11, FLINK-1_13, + FLINK-1_15, FLINK-1_18.' + type: string + serviceExecutionRole: + description: The ARN of the IAM role used by the application to + access Kinesis data streams, Kinesis Data Firehose delivery + streams, Amazon S3 objects, and other external resources. + type: string + startApplication: + description: Whether to start or stop the application. + type: boolean + status: + description: The status of the application. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + versionId: + description: The current application version. Kinesis Data Analytics + updates the version_id each time the application is updated. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/lakeformation.aws.upbound.io_permissions.yaml b/package/crds/lakeformation.aws.upbound.io_permissions.yaml index c7b55d46d1..6fc3295ddb 100644 --- a/package/crds/lakeformation.aws.upbound.io_permissions.yaml +++ b/package/crds/lakeformation.aws.upbound.io_permissions.yaml @@ -1354,3 +1354,1294 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Permissions is the Schema for the Permissionss API. Grants permissions + to the principal to access metadata in the Data Catalog and data organized + in underlying data storage such as Amazon S3. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PermissionsSpec defines the desired state of Permissions + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + catalogId: + description: – Identifier for the Data Catalog. By default, the + account ID. The Data Catalog is the persistent metadata store. + It contains database definitions, table definitions, and other + control information to manage your Lake Formation environment. + type: string + catalogResource: + description: Whether the permissions are to be granted for the + Data Catalog. Defaults to false. + type: boolean + dataCellsFilter: + description: Configuration block for a data cells filter resource. + Detailed below. + properties: + databaseName: + description: The name of the database. + type: string + name: + description: The name of the data cells filter. + type: string + tableCatalogId: + description: The ID of the Data Catalog. + type: string + tableName: + description: The name of the table. + type: string + type: object + dataLocation: + description: Configuration block for a data location resource. + Detailed below. + properties: + arn: + description: – Amazon Resource Name (ARN) that uniquely identifies + the data location resource. + type: string + arnRef: + description: Reference to a Resource in lakeformation to populate + arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a Resource in lakeformation to populate + arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + catalogId: + description: Identifier for the Data Catalog where the location + is registered with Lake Formation. By default, it is the + account ID of the caller. + type: string + type: object + database: + description: Configuration block for a database resource. Detailed + below. + properties: + catalogId: + description: Identifier for the Data Catalog. By default, + it is the account ID of the caller. + type: string + name: + description: – Name of the database resource. Unique to the + Data Catalog. + type: string + nameRef: + description: Reference to a CatalogDatabase in glue to populate + name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a CatalogDatabase in glue to populate + name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + lfTag: + description: Configuration block for an LF-tag resource. Detailed + below. + properties: + catalogId: + description: Identifier for the Data Catalog. By default, + it is the account ID of the caller. + type: string + key: + description: name for the tag. + type: string + values: + description: A list of possible values an attribute can take. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + lfTagPolicy: + description: Configuration block for an LF-tag policy resource. + Detailed below. + properties: + catalogId: + description: Identifier for the Data Catalog. By default, + it is the account ID of the caller. + type: string + expression: + description: A list of tag conditions that apply to the resource's + tag policy. Configuration block for tag conditions that + apply to the policy. See expression below. + items: + properties: + key: + description: name of an LF-Tag. + type: string + values: + description: A list of possible values of an LF-Tag. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + resourceType: + description: – The resource type for which the tag policy + applies. Valid values are DATABASE and TABLE. + type: string + type: object + permissions: + description: – List of permissions granted to the principal. + Valid values may include ALL, ALTER, ASSOCIATE, CREATE_DATABASE, + CREATE_TABLE, DATA_LOCATION_ACCESS, DELETE, DESCRIBE, DROP, + INSERT, and SELECT. For details on each permission, see Lake + Formation Permissions Reference. + items: + type: string + type: array + permissionsWithGrantOption: + description: Subset of permissions which the principal can pass. + items: + type: string + type: array + principal: + description: account permissions. For more information, see Lake + Formation Permissions Reference. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + table: + description: Configuration block for a table resource. Detailed + below. + properties: + catalogId: + description: Identifier for the Data Catalog. By default, + it is the account ID of the caller. + type: string + databaseName: + description: – Name of the database for the table. Unique + to a Data Catalog. + type: string + name: + description: Name of the table. + type: string + wildcard: + description: Whether to use a wildcard representing every + table under a database. Defaults to false. + type: boolean + type: object + tableWithColumns: + description: Configuration block for a table with columns resource. + Detailed below. + properties: + catalogId: + description: Identifier for the Data Catalog. By default, + it is the account ID of the caller. + type: string + columnNames: + description: Set of column names for the table. + items: + type: string + type: array + x-kubernetes-list-type: set + databaseName: + description: – Name of the database for the table with columns + resource. Unique to the Data Catalog. + type: string + excludedColumnNames: + description: Set of column names for the table to exclude. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: – Name of the table resource. + type: string + nameRef: + description: Reference to a CatalogTable in glue to populate + name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a CatalogTable in glue to populate + name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + wildcard: + description: Whether to use a column wildcard. + type: boolean + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + catalogId: + description: – Identifier for the Data Catalog. By default, the + account ID. The Data Catalog is the persistent metadata store. + It contains database definitions, table definitions, and other + control information to manage your Lake Formation environment. + type: string + catalogResource: + description: Whether the permissions are to be granted for the + Data Catalog. Defaults to false. + type: boolean + dataCellsFilter: + description: Configuration block for a data cells filter resource. + Detailed below. + properties: + databaseName: + description: The name of the database. + type: string + name: + description: The name of the data cells filter. + type: string + tableCatalogId: + description: The ID of the Data Catalog. + type: string + tableName: + description: The name of the table. + type: string + type: object + dataLocation: + description: Configuration block for a data location resource. + Detailed below. + properties: + arn: + description: – Amazon Resource Name (ARN) that uniquely identifies + the data location resource. + type: string + arnRef: + description: Reference to a Resource in lakeformation to populate + arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a Resource in lakeformation to populate + arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + catalogId: + description: Identifier for the Data Catalog where the location + is registered with Lake Formation. By default, it is the + account ID of the caller. + type: string + type: object + database: + description: Configuration block for a database resource. Detailed + below. + properties: + catalogId: + description: Identifier for the Data Catalog. By default, + it is the account ID of the caller. + type: string + name: + description: – Name of the database resource. Unique to the + Data Catalog. + type: string + nameRef: + description: Reference to a CatalogDatabase in glue to populate + name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a CatalogDatabase in glue to populate + name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + lfTag: + description: Configuration block for an LF-tag resource. Detailed + below. + properties: + catalogId: + description: Identifier for the Data Catalog. By default, + it is the account ID of the caller. + type: string + key: + description: name for the tag. + type: string + values: + description: A list of possible values an attribute can take. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + lfTagPolicy: + description: Configuration block for an LF-tag policy resource. + Detailed below. + properties: + catalogId: + description: Identifier for the Data Catalog. By default, + it is the account ID of the caller. + type: string + expression: + description: A list of tag conditions that apply to the resource's + tag policy. Configuration block for tag conditions that + apply to the policy. See expression below. + items: + properties: + key: + description: name of an LF-Tag. + type: string + values: + description: A list of possible values of an LF-Tag. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + resourceType: + description: – The resource type for which the tag policy + applies. Valid values are DATABASE and TABLE. + type: string + type: object + permissions: + description: – List of permissions granted to the principal. + Valid values may include ALL, ALTER, ASSOCIATE, CREATE_DATABASE, + CREATE_TABLE, DATA_LOCATION_ACCESS, DELETE, DESCRIBE, DROP, + INSERT, and SELECT. For details on each permission, see Lake + Formation Permissions Reference. + items: + type: string + type: array + permissionsWithGrantOption: + description: Subset of permissions which the principal can pass. + items: + type: string + type: array + principal: + description: account permissions. For more information, see Lake + Formation Permissions Reference. + type: string + table: + description: Configuration block for a table resource. Detailed + below. + properties: + catalogId: + description: Identifier for the Data Catalog. By default, + it is the account ID of the caller. + type: string + databaseName: + description: – Name of the database for the table. Unique + to a Data Catalog. + type: string + name: + description: Name of the table. + type: string + wildcard: + description: Whether to use a wildcard representing every + table under a database. Defaults to false. + type: boolean + type: object + tableWithColumns: + description: Configuration block for a table with columns resource. + Detailed below. + properties: + catalogId: + description: Identifier for the Data Catalog. By default, + it is the account ID of the caller. + type: string + columnNames: + description: Set of column names for the table. + items: + type: string + type: array + x-kubernetes-list-type: set + databaseName: + description: – Name of the database for the table with columns + resource. Unique to the Data Catalog. + type: string + excludedColumnNames: + description: Set of column names for the table to exclude. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: – Name of the table resource. + type: string + nameRef: + description: Reference to a CatalogTable in glue to populate + name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a CatalogTable in glue to populate + name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + wildcard: + description: Whether to use a column wildcard. + type: boolean + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.permissions is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.permissions) + || (has(self.initProvider) && has(self.initProvider.permissions))' + - message: spec.forProvider.principal is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.principal) + || (has(self.initProvider) && has(self.initProvider.principal))' + status: + description: PermissionsStatus defines the observed state of Permissions. + properties: + atProvider: + properties: + catalogId: + description: – Identifier for the Data Catalog. By default, the + account ID. The Data Catalog is the persistent metadata store. + It contains database definitions, table definitions, and other + control information to manage your Lake Formation environment. + type: string + catalogResource: + description: Whether the permissions are to be granted for the + Data Catalog. Defaults to false. + type: boolean + dataCellsFilter: + description: Configuration block for a data cells filter resource. + Detailed below. + properties: + databaseName: + description: The name of the database. + type: string + name: + description: The name of the data cells filter. + type: string + tableCatalogId: + description: The ID of the Data Catalog. + type: string + tableName: + description: The name of the table. + type: string + type: object + dataLocation: + description: Configuration block for a data location resource. + Detailed below. + properties: + arn: + description: – Amazon Resource Name (ARN) that uniquely identifies + the data location resource. + type: string + catalogId: + description: Identifier for the Data Catalog where the location + is registered with Lake Formation. By default, it is the + account ID of the caller. + type: string + type: object + database: + description: Configuration block for a database resource. Detailed + below. + properties: + catalogId: + description: Identifier for the Data Catalog. By default, + it is the account ID of the caller. + type: string + name: + description: – Name of the database resource. Unique to the + Data Catalog. + type: string + type: object + id: + type: string + lfTag: + description: Configuration block for an LF-tag resource. Detailed + below. + properties: + catalogId: + description: Identifier for the Data Catalog. By default, + it is the account ID of the caller. + type: string + key: + description: name for the tag. + type: string + values: + description: A list of possible values an attribute can take. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + lfTagPolicy: + description: Configuration block for an LF-tag policy resource. + Detailed below. + properties: + catalogId: + description: Identifier for the Data Catalog. By default, + it is the account ID of the caller. + type: string + expression: + description: A list of tag conditions that apply to the resource's + tag policy. Configuration block for tag conditions that + apply to the policy. See expression below. + items: + properties: + key: + description: name of an LF-Tag. + type: string + values: + description: A list of possible values of an LF-Tag. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + resourceType: + description: – The resource type for which the tag policy + applies. Valid values are DATABASE and TABLE. + type: string + type: object + permissions: + description: – List of permissions granted to the principal. + Valid values may include ALL, ALTER, ASSOCIATE, CREATE_DATABASE, + CREATE_TABLE, DATA_LOCATION_ACCESS, DELETE, DESCRIBE, DROP, + INSERT, and SELECT. For details on each permission, see Lake + Formation Permissions Reference. + items: + type: string + type: array + permissionsWithGrantOption: + description: Subset of permissions which the principal can pass. + items: + type: string + type: array + principal: + description: account permissions. For more information, see Lake + Formation Permissions Reference. + type: string + table: + description: Configuration block for a table resource. Detailed + below. + properties: + catalogId: + description: Identifier for the Data Catalog. By default, + it is the account ID of the caller. + type: string + databaseName: + description: – Name of the database for the table. Unique + to a Data Catalog. + type: string + name: + description: Name of the table. + type: string + wildcard: + description: Whether to use a wildcard representing every + table under a database. Defaults to false. + type: boolean + type: object + tableWithColumns: + description: Configuration block for a table with columns resource. + Detailed below. + properties: + catalogId: + description: Identifier for the Data Catalog. By default, + it is the account ID of the caller. + type: string + columnNames: + description: Set of column names for the table. + items: + type: string + type: array + x-kubernetes-list-type: set + databaseName: + description: – Name of the database for the table with columns + resource. Unique to the Data Catalog. + type: string + excludedColumnNames: + description: Set of column names for the table to exclude. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: – Name of the table resource. + type: string + wildcard: + description: Whether to use a column wildcard. + type: boolean + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/lambda.aws.upbound.io_aliases.yaml b/package/crds/lambda.aws.upbound.io_aliases.yaml index 6eb8eba47c..52110898f9 100644 --- a/package/crds/lambda.aws.upbound.io_aliases.yaml +++ b/package/crds/lambda.aws.upbound.io_aliases.yaml @@ -489,3 +489,465 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Alias is the Schema for the Aliass API. Creates a Lambda function + alias. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AliasSpec defines the desired state of Alias + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the alias. + type: string + functionName: + description: Lambda Function name or ARN. + type: string + functionNameRef: + description: Reference to a Function in lambda to populate functionName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + functionNameSelector: + description: Selector for a Function in lambda to populate functionName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + functionVersion: + description: 'Lambda function version for which you are creating + the alias. Pattern: (\$LATEST|[0-9]+).' + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + routingConfig: + description: The Lambda alias' route configuration settings. Fields + documented below + properties: + additionalVersionWeights: + additionalProperties: + type: number + description: A map that defines the proportion of events that + should be sent to different versions of a lambda function. + type: object + x-kubernetes-map-type: granular + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the alias. + type: string + functionVersion: + description: 'Lambda function version for which you are creating + the alias. Pattern: (\$LATEST|[0-9]+).' + type: string + routingConfig: + description: The Lambda alias' route configuration settings. Fields + documented below + properties: + additionalVersionWeights: + additionalProperties: + type: number + description: A map that defines the proportion of events that + should be sent to different versions of a lambda function. + type: object + x-kubernetes-map-type: granular + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.functionVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.functionVersion) + || (has(self.initProvider) && has(self.initProvider.functionVersion))' + status: + description: AliasStatus defines the observed state of Alias. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) identifying your Lambda + function alias. + type: string + description: + description: Description of the alias. + type: string + functionName: + description: Lambda Function name or ARN. + type: string + functionVersion: + description: 'Lambda function version for which you are creating + the alias. Pattern: (\$LATEST|[0-9]+).' + type: string + id: + type: string + invokeArn: + description: The ARN to be used for invoking Lambda Function from + API Gateway - to be used in aws_api_gateway_integration's uri + type: string + routingConfig: + description: The Lambda alias' route configuration settings. Fields + documented below + properties: + additionalVersionWeights: + additionalProperties: + type: number + description: A map that defines the proportion of events that + should be sent to different versions of a lambda function. + type: object + x-kubernetes-map-type: granular + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/lambda.aws.upbound.io_codesigningconfigs.yaml b/package/crds/lambda.aws.upbound.io_codesigningconfigs.yaml index 32c2f5b4ff..b2d0f18a92 100644 --- a/package/crds/lambda.aws.upbound.io_codesigningconfigs.yaml +++ b/package/crds/lambda.aws.upbound.io_codesigningconfigs.yaml @@ -606,3 +606,579 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CodeSigningConfig is the Schema for the CodeSigningConfigs API. + Provides a Lambda Code Signing Config resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CodeSigningConfigSpec defines the desired state of CodeSigningConfig + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allowedPublishers: + description: A configuration block of allowed publishers as signing + profiles for this code signing configuration. Detailed below. + properties: + signingProfileVersionArns: + description: The Amazon Resource Name (ARN) for each of the + signing profiles. A signing profile defines a trusted user + who can sign a code package. + items: + type: string + type: array + x-kubernetes-list-type: set + signingProfileVersionArnsRefs: + description: References to SigningProfile in signer to populate + signingProfileVersionArns. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + signingProfileVersionArnsSelector: + description: Selector for a list of SigningProfile in signer + to populate signingProfileVersionArns. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + description: + description: Descriptive name for this code signing configuration. + type: string + policies: + description: A configuration block of code signing policies that + define the actions to take if the validation checks fail. Detailed + below. + properties: + untrustedArtifactOnDeployment: + description: 'Code signing configuration policy for deployment + validation failure. If you set the policy to Enforce, Lambda + blocks the deployment request if code-signing validation + checks fail. If you set the policy to Warn, Lambda allows + the deployment and creates a CloudWatch log. Valid values: + Warn, Enforce. Default value: Warn.' + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allowedPublishers: + description: A configuration block of allowed publishers as signing + profiles for this code signing configuration. Detailed below. + properties: + signingProfileVersionArns: + description: The Amazon Resource Name (ARN) for each of the + signing profiles. A signing profile defines a trusted user + who can sign a code package. + items: + type: string + type: array + x-kubernetes-list-type: set + signingProfileVersionArnsRefs: + description: References to SigningProfile in signer to populate + signingProfileVersionArns. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + signingProfileVersionArnsSelector: + description: Selector for a list of SigningProfile in signer + to populate signingProfileVersionArns. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + description: + description: Descriptive name for this code signing configuration. + type: string + policies: + description: A configuration block of code signing policies that + define the actions to take if the validation checks fail. Detailed + below. + properties: + untrustedArtifactOnDeployment: + description: 'Code signing configuration policy for deployment + validation failure. If you set the policy to Enforce, Lambda + blocks the deployment request if code-signing validation + checks fail. If you set the policy to Warn, Lambda allows + the deployment and creates a CloudWatch log. Valid values: + Warn, Enforce. Default value: Warn.' + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.allowedPublishers is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.allowedPublishers) + || (has(self.initProvider) && has(self.initProvider.allowedPublishers))' + status: + description: CodeSigningConfigStatus defines the observed state of CodeSigningConfig. + properties: + atProvider: + properties: + allowedPublishers: + description: A configuration block of allowed publishers as signing + profiles for this code signing configuration. Detailed below. + properties: + signingProfileVersionArns: + description: The Amazon Resource Name (ARN) for each of the + signing profiles. A signing profile defines a trusted user + who can sign a code package. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + arn: + description: The Amazon Resource Name (ARN) of the code signing + configuration. + type: string + configId: + description: Unique identifier for the code signing configuration. + type: string + description: + description: Descriptive name for this code signing configuration. + type: string + id: + type: string + lastModified: + description: The date and time that the code signing configuration + was last modified. + type: string + policies: + description: A configuration block of code signing policies that + define the actions to take if the validation checks fail. Detailed + below. + properties: + untrustedArtifactOnDeployment: + description: 'Code signing configuration policy for deployment + validation failure. If you set the policy to Enforce, Lambda + blocks the deployment request if code-signing validation + checks fail. If you set the policy to Warn, Lambda allows + the deployment and creates a CloudWatch log. Valid values: + Warn, Enforce. Default value: Warn.' + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/lambda.aws.upbound.io_eventsourcemappings.yaml b/package/crds/lambda.aws.upbound.io_eventsourcemappings.yaml index 513aedc659..518da76fb0 100644 --- a/package/crds/lambda.aws.upbound.io_eventsourcemappings.yaml +++ b/package/crds/lambda.aws.upbound.io_eventsourcemappings.yaml @@ -1225,3 +1225,1162 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: EventSourceMapping is the Schema for the EventSourceMappings + API. Provides a Lambda event source mapping. This allows Lambda functions + to get events from Kinesis, DynamoDB, SQS, Amazon MQ and Managed Streaming + for Apache Kafka (MSK). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EventSourceMappingSpec defines the desired state of EventSourceMapping + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + amazonManagedKafkaEventSourceConfig: + description: Additional configuration block for Amazon Managed + Kafka sources. Incompatible with "self_managed_event_source" + and "self_managed_kafka_event_source_config". Detailed below. + properties: + consumerGroupId: + description: A Kafka consumer group ID between 1 and 200 characters + for use when creating this event source mapping. If one + is not specified, this value will be automatically generated. + See AmazonManagedKafkaEventSourceConfig Syntax. + type: string + type: object + batchSize: + description: The largest number of records that Lambda will retrieve + from your event source at the time of invocation. Defaults to + 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS. + type: number + bisectBatchOnFunctionError: + description: If the function returns an error, split the batch + in two and retry. Only available for stream sources (DynamoDB + and Kinesis). Defaults to false. + type: boolean + destinationConfig: + description: An Amazon SQS queue, Amazon SNS topic or Amazon S3 + bucket (only available for Kafka sources) destination for failed + records. Only available for stream sources (DynamoDB and Kinesis) + and Kafka sources (Amazon MSK and Self-managed Apache Kafka). + Detailed below. + properties: + onFailure: + description: The destination configuration for failed invocations. + Detailed below. + properties: + destinationArn: + description: The Amazon Resource Name (ARN) of the destination + resource. + type: string + type: object + type: object + documentDbEventSourceConfig: + description: Configuration settings for a DocumentDB event source. + Detailed below. + properties: + collectionName: + description: The name of the collection to consume within + the database. If you do not specify a collection, Lambda + consumes all collections. + type: string + databaseName: + description: The name of the database to consume within the + DocumentDB cluster. + type: string + fullDocument: + description: 'Determines what DocumentDB sends to your event + stream during document update operations. If set to UpdateLookup, + DocumentDB sends a delta describing the changes, along with + a copy of the entire document. Otherwise, DocumentDB sends + only a partial document that contains the changes. Valid + values: UpdateLookup, Default.' + type: string + type: object + enabled: + description: Determines if the mapping will be enabled on creation. + Defaults to true. + type: boolean + eventSourceArn: + description: The event source ARN - this is required for Kinesis + stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or + DocumentDB change stream. It is incompatible with a Self Managed + Kafka source. + type: string + filterCriteria: + description: The criteria to use for event filtering Kinesis stream, + DynamoDB stream, SQS queue event sources. Detailed below. + properties: + filter: + description: A set of up to 5 filter. If an event satisfies + at least one, Lambda sends the event to the function or + adds it to the next batch. Detailed below. + items: + properties: + pattern: + description: A filter pattern up to 4096 characters. + See Filter Rule Syntax. + type: string + type: object + type: array + type: object + functionName: + description: The name or the ARN of the Lambda function that will + be subscribing to events. + type: string + functionNameRef: + description: Reference to a Function in lambda to populate functionName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + functionNameSelector: + description: Selector for a Function in lambda to populate functionName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + functionResponseTypes: + description: 'A list of current response type enums applied to + the event source mapping for AWS Lambda checkpointing. Only + available for SQS and stream sources (DynamoDB and Kinesis). + Valid values: ReportBatchItemFailures.' + items: + type: string + type: array + x-kubernetes-list-type: set + maximumBatchingWindowInSeconds: + description: The maximum amount of time to gather records before + invoking the function, in seconds (between 0 and 300). Records + will continue to buffer (or accumulate in the case of an SQS + queue event source) until either maximum_batching_window_in_seconds + expires or batch_size has been met. For streaming event sources, + defaults to as soon as records are available in the stream. + If the batch it reads from the stream/queue only has one record + in it, Lambda only sends one record to the function. Only available + for stream sources (DynamoDB and Kinesis) and SQS standard queues. + type: number + maximumRecordAgeInSeconds: + description: The maximum age of a record that Lambda sends to + a function for processing. Only available for stream sources + (DynamoDB and Kinesis). Must be either -1 (forever, and the + default value) or between 60 and 604800 (inclusive). + type: number + maximumRetryAttempts: + description: The maximum number of times to retry when the function + returns an error. Only available for stream sources (DynamoDB + and Kinesis). Minimum and default of -1 (forever), maximum of + 10000. + type: number + parallelizationFactor: + description: The number of batches to process from each shard + concurrently. Only available for stream sources (DynamoDB and + Kinesis). Minimum and default of 1, maximum of 10. + type: number + queues: + description: The name of the Amazon MQ broker destination queue + to consume. Only available for MQ sources. The list must contain + exactly one queue name. + items: + type: string + type: array + region: + description: Region is the region you'd like your resource to + be created in. + type: string + scalingConfig: + description: Scaling configuration of the event source. Only available + for SQS queues. Detailed below. + properties: + maximumConcurrency: + description: Limits the number of concurrent instances that + the Amazon SQS event source can invoke. Must be between + 2 and 1000. See Configuring maximum concurrency for Amazon + SQS event sources. + type: number + type: object + selfManagedEventSource: + description: For Self Managed Kafka sources, the location of the + self managed cluster. If set, configuration must also include + source_access_configuration. Detailed below. + properties: + endpoints: + additionalProperties: + type: string + description: A map of endpoints for the self managed source. For + Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS + and the value should be a string with a comma separated + list of broker endpoints. + type: object + x-kubernetes-map-type: granular + type: object + selfManagedKafkaEventSourceConfig: + description: Additional configuration block for Self Managed Kafka + sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". + Detailed below. + properties: + consumerGroupId: + description: A Kafka consumer group ID between 1 and 200 characters + for use when creating this event source mapping. If one + is not specified, this value will be automatically generated. + See SelfManagedKafkaEventSourceConfig Syntax. + type: string + type: object + sourceAccessConfiguration: + description: ': For Self Managed Kafka sources, the access configuration + for the source. If set, configuration must also include self_managed_event_source. + Detailed below.' + items: + properties: + type: + description: The type of authentication protocol, VPC components, + or virtual host for your event source. For valid values, + refer to the AWS documentation. + type: string + uri: + description: The URI for this configuration. For type VPC_SUBNET + the value should be subnet:subnet_id where subnet_id is + the value you would find in an aws_subnet resource's id + attribute. For type VPC_SECURITY_GROUP the value should + be security_group:security_group_id where security_group_id + is the value you would find in an aws_security_group resource's + id attribute. + type: string + type: object + type: array + startingPosition: + description: The position in the stream where AWS Lambda should + start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST + or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK + or Self Managed Apache Kafka. Must not be provided if getting + events from SQS. More information about these positions can + be found in the AWS DynamoDB Streams API Reference and AWS Kinesis + API Reference. + type: string + startingPositionTimestamp: + description: A timestamp in RFC3339 format of the data record + which to start reading when using starting_position set to AT_TIMESTAMP. + If a record with this exact timestamp does not exist, the next + later record is chosen. If the timestamp is older than the current + trim horizon, the oldest available record is chosen. + type: string + topics: + description: The name of the Kafka topics. Only available for + MSK sources. A single topic name must be specified. + items: + type: string + type: array + x-kubernetes-list-type: set + tumblingWindowInSeconds: + description: The duration in seconds of a processing window for + AWS Lambda streaming analytics. The range is between 1 second + up to 900 seconds. Only available for stream sources (DynamoDB + and Kinesis). + type: number + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + amazonManagedKafkaEventSourceConfig: + description: Additional configuration block for Amazon Managed + Kafka sources. Incompatible with "self_managed_event_source" + and "self_managed_kafka_event_source_config". Detailed below. + properties: + consumerGroupId: + description: A Kafka consumer group ID between 1 and 200 characters + for use when creating this event source mapping. If one + is not specified, this value will be automatically generated. + See AmazonManagedKafkaEventSourceConfig Syntax. + type: string + type: object + batchSize: + description: The largest number of records that Lambda will retrieve + from your event source at the time of invocation. Defaults to + 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS. + type: number + bisectBatchOnFunctionError: + description: If the function returns an error, split the batch + in two and retry. Only available for stream sources (DynamoDB + and Kinesis). Defaults to false. + type: boolean + destinationConfig: + description: An Amazon SQS queue, Amazon SNS topic or Amazon S3 + bucket (only available for Kafka sources) destination for failed + records. Only available for stream sources (DynamoDB and Kinesis) + and Kafka sources (Amazon MSK and Self-managed Apache Kafka). + Detailed below. + properties: + onFailure: + description: The destination configuration for failed invocations. + Detailed below. + properties: + destinationArn: + description: The Amazon Resource Name (ARN) of the destination + resource. + type: string + type: object + type: object + documentDbEventSourceConfig: + description: Configuration settings for a DocumentDB event source. + Detailed below. + properties: + collectionName: + description: The name of the collection to consume within + the database. If you do not specify a collection, Lambda + consumes all collections. + type: string + databaseName: + description: The name of the database to consume within the + DocumentDB cluster. + type: string + fullDocument: + description: 'Determines what DocumentDB sends to your event + stream during document update operations. If set to UpdateLookup, + DocumentDB sends a delta describing the changes, along with + a copy of the entire document. Otherwise, DocumentDB sends + only a partial document that contains the changes. Valid + values: UpdateLookup, Default.' + type: string + type: object + enabled: + description: Determines if the mapping will be enabled on creation. + Defaults to true. + type: boolean + eventSourceArn: + description: The event source ARN - this is required for Kinesis + stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or + DocumentDB change stream. It is incompatible with a Self Managed + Kafka source. + type: string + filterCriteria: + description: The criteria to use for event filtering Kinesis stream, + DynamoDB stream, SQS queue event sources. Detailed below. + properties: + filter: + description: A set of up to 5 filter. If an event satisfies + at least one, Lambda sends the event to the function or + adds it to the next batch. Detailed below. + items: + properties: + pattern: + description: A filter pattern up to 4096 characters. + See Filter Rule Syntax. + type: string + type: object + type: array + type: object + functionName: + description: The name or the ARN of the Lambda function that will + be subscribing to events. + type: string + functionNameRef: + description: Reference to a Function in lambda to populate functionName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + functionNameSelector: + description: Selector for a Function in lambda to populate functionName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + functionResponseTypes: + description: 'A list of current response type enums applied to + the event source mapping for AWS Lambda checkpointing. Only + available for SQS and stream sources (DynamoDB and Kinesis). + Valid values: ReportBatchItemFailures.' + items: + type: string + type: array + x-kubernetes-list-type: set + maximumBatchingWindowInSeconds: + description: The maximum amount of time to gather records before + invoking the function, in seconds (between 0 and 300). Records + will continue to buffer (or accumulate in the case of an SQS + queue event source) until either maximum_batching_window_in_seconds + expires or batch_size has been met. For streaming event sources, + defaults to as soon as records are available in the stream. + If the batch it reads from the stream/queue only has one record + in it, Lambda only sends one record to the function. Only available + for stream sources (DynamoDB and Kinesis) and SQS standard queues. + type: number + maximumRecordAgeInSeconds: + description: The maximum age of a record that Lambda sends to + a function for processing. Only available for stream sources + (DynamoDB and Kinesis). Must be either -1 (forever, and the + default value) or between 60 and 604800 (inclusive). + type: number + maximumRetryAttempts: + description: The maximum number of times to retry when the function + returns an error. Only available for stream sources (DynamoDB + and Kinesis). Minimum and default of -1 (forever), maximum of + 10000. + type: number + parallelizationFactor: + description: The number of batches to process from each shard + concurrently. Only available for stream sources (DynamoDB and + Kinesis). Minimum and default of 1, maximum of 10. + type: number + queues: + description: The name of the Amazon MQ broker destination queue + to consume. Only available for MQ sources. The list must contain + exactly one queue name. + items: + type: string + type: array + scalingConfig: + description: Scaling configuration of the event source. Only available + for SQS queues. Detailed below. + properties: + maximumConcurrency: + description: Limits the number of concurrent instances that + the Amazon SQS event source can invoke. Must be between + 2 and 1000. See Configuring maximum concurrency for Amazon + SQS event sources. + type: number + type: object + selfManagedEventSource: + description: For Self Managed Kafka sources, the location of the + self managed cluster. If set, configuration must also include + source_access_configuration. Detailed below. + properties: + endpoints: + additionalProperties: + type: string + description: A map of endpoints for the self managed source. For + Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS + and the value should be a string with a comma separated + list of broker endpoints. + type: object + x-kubernetes-map-type: granular + type: object + selfManagedKafkaEventSourceConfig: + description: Additional configuration block for Self Managed Kafka + sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". + Detailed below. + properties: + consumerGroupId: + description: A Kafka consumer group ID between 1 and 200 characters + for use when creating this event source mapping. If one + is not specified, this value will be automatically generated. + See SelfManagedKafkaEventSourceConfig Syntax. + type: string + type: object + sourceAccessConfiguration: + description: ': For Self Managed Kafka sources, the access configuration + for the source. If set, configuration must also include self_managed_event_source. + Detailed below.' + items: + properties: + type: + description: The type of authentication protocol, VPC components, + or virtual host for your event source. For valid values, + refer to the AWS documentation. + type: string + uri: + description: The URI for this configuration. For type VPC_SUBNET + the value should be subnet:subnet_id where subnet_id is + the value you would find in an aws_subnet resource's id + attribute. For type VPC_SECURITY_GROUP the value should + be security_group:security_group_id where security_group_id + is the value you would find in an aws_security_group resource's + id attribute. + type: string + type: object + type: array + startingPosition: + description: The position in the stream where AWS Lambda should + start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST + or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK + or Self Managed Apache Kafka. Must not be provided if getting + events from SQS. More information about these positions can + be found in the AWS DynamoDB Streams API Reference and AWS Kinesis + API Reference. + type: string + startingPositionTimestamp: + description: A timestamp in RFC3339 format of the data record + which to start reading when using starting_position set to AT_TIMESTAMP. + If a record with this exact timestamp does not exist, the next + later record is chosen. If the timestamp is older than the current + trim horizon, the oldest available record is chosen. + type: string + topics: + description: The name of the Kafka topics. Only available for + MSK sources. A single topic name must be specified. + items: + type: string + type: array + x-kubernetes-list-type: set + tumblingWindowInSeconds: + description: The duration in seconds of a processing window for + AWS Lambda streaming analytics. The range is between 1 second + up to 900 seconds. Only available for stream sources (DynamoDB + and Kinesis). + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: EventSourceMappingStatus defines the observed state of EventSourceMapping. + properties: + atProvider: + properties: + amazonManagedKafkaEventSourceConfig: + description: Additional configuration block for Amazon Managed + Kafka sources. Incompatible with "self_managed_event_source" + and "self_managed_kafka_event_source_config". Detailed below. + properties: + consumerGroupId: + description: A Kafka consumer group ID between 1 and 200 characters + for use when creating this event source mapping. If one + is not specified, this value will be automatically generated. + See AmazonManagedKafkaEventSourceConfig Syntax. + type: string + type: object + batchSize: + description: The largest number of records that Lambda will retrieve + from your event source at the time of invocation. Defaults to + 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS. + type: number + bisectBatchOnFunctionError: + description: If the function returns an error, split the batch + in two and retry. Only available for stream sources (DynamoDB + and Kinesis). Defaults to false. + type: boolean + destinationConfig: + description: An Amazon SQS queue, Amazon SNS topic or Amazon S3 + bucket (only available for Kafka sources) destination for failed + records. Only available for stream sources (DynamoDB and Kinesis) + and Kafka sources (Amazon MSK and Self-managed Apache Kafka). + Detailed below. + properties: + onFailure: + description: The destination configuration for failed invocations. + Detailed below. + properties: + destinationArn: + description: The Amazon Resource Name (ARN) of the destination + resource. + type: string + type: object + type: object + documentDbEventSourceConfig: + description: Configuration settings for a DocumentDB event source. + Detailed below. + properties: + collectionName: + description: The name of the collection to consume within + the database. If you do not specify a collection, Lambda + consumes all collections. + type: string + databaseName: + description: The name of the database to consume within the + DocumentDB cluster. + type: string + fullDocument: + description: 'Determines what DocumentDB sends to your event + stream during document update operations. If set to UpdateLookup, + DocumentDB sends a delta describing the changes, along with + a copy of the entire document. Otherwise, DocumentDB sends + only a partial document that contains the changes. Valid + values: UpdateLookup, Default.' + type: string + type: object + enabled: + description: Determines if the mapping will be enabled on creation. + Defaults to true. + type: boolean + eventSourceArn: + description: The event source ARN - this is required for Kinesis + stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or + DocumentDB change stream. It is incompatible with a Self Managed + Kafka source. + type: string + filterCriteria: + description: The criteria to use for event filtering Kinesis stream, + DynamoDB stream, SQS queue event sources. Detailed below. + properties: + filter: + description: A set of up to 5 filter. If an event satisfies + at least one, Lambda sends the event to the function or + adds it to the next batch. Detailed below. + items: + properties: + pattern: + description: A filter pattern up to 4096 characters. + See Filter Rule Syntax. + type: string + type: object + type: array + type: object + functionArn: + description: 'The the ARN of the Lambda function the event source + mapping is sending events to. (Note: this is a computed value + that differs from function_name above.)' + type: string + functionName: + description: The name or the ARN of the Lambda function that will + be subscribing to events. + type: string + functionResponseTypes: + description: 'A list of current response type enums applied to + the event source mapping for AWS Lambda checkpointing. Only + available for SQS and stream sources (DynamoDB and Kinesis). + Valid values: ReportBatchItemFailures.' + items: + type: string + type: array + x-kubernetes-list-type: set + id: + type: string + lastModified: + description: The date this resource was last modified. + type: string + lastProcessingResult: + description: The result of the last AWS Lambda invocation of your + Lambda function. + type: string + maximumBatchingWindowInSeconds: + description: The maximum amount of time to gather records before + invoking the function, in seconds (between 0 and 300). Records + will continue to buffer (or accumulate in the case of an SQS + queue event source) until either maximum_batching_window_in_seconds + expires or batch_size has been met. For streaming event sources, + defaults to as soon as records are available in the stream. + If the batch it reads from the stream/queue only has one record + in it, Lambda only sends one record to the function. Only available + for stream sources (DynamoDB and Kinesis) and SQS standard queues. + type: number + maximumRecordAgeInSeconds: + description: The maximum age of a record that Lambda sends to + a function for processing. Only available for stream sources + (DynamoDB and Kinesis). Must be either -1 (forever, and the + default value) or between 60 and 604800 (inclusive). + type: number + maximumRetryAttempts: + description: The maximum number of times to retry when the function + returns an error. Only available for stream sources (DynamoDB + and Kinesis). Minimum and default of -1 (forever), maximum of + 10000. + type: number + parallelizationFactor: + description: The number of batches to process from each shard + concurrently. Only available for stream sources (DynamoDB and + Kinesis). Minimum and default of 1, maximum of 10. + type: number + queues: + description: The name of the Amazon MQ broker destination queue + to consume. Only available for MQ sources. The list must contain + exactly one queue name. + items: + type: string + type: array + scalingConfig: + description: Scaling configuration of the event source. Only available + for SQS queues. Detailed below. + properties: + maximumConcurrency: + description: Limits the number of concurrent instances that + the Amazon SQS event source can invoke. Must be between + 2 and 1000. See Configuring maximum concurrency for Amazon + SQS event sources. + type: number + type: object + selfManagedEventSource: + description: For Self Managed Kafka sources, the location of the + self managed cluster. If set, configuration must also include + source_access_configuration. Detailed below. + properties: + endpoints: + additionalProperties: + type: string + description: A map of endpoints for the self managed source. For + Kafka self-managed sources, the key should be KAFKA_BOOTSTRAP_SERVERS + and the value should be a string with a comma separated + list of broker endpoints. + type: object + x-kubernetes-map-type: granular + type: object + selfManagedKafkaEventSourceConfig: + description: Additional configuration block for Self Managed Kafka + sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". + Detailed below. + properties: + consumerGroupId: + description: A Kafka consumer group ID between 1 and 200 characters + for use when creating this event source mapping. If one + is not specified, this value will be automatically generated. + See SelfManagedKafkaEventSourceConfig Syntax. + type: string + type: object + sourceAccessConfiguration: + description: ': For Self Managed Kafka sources, the access configuration + for the source. If set, configuration must also include self_managed_event_source. + Detailed below.' + items: + properties: + type: + description: The type of authentication protocol, VPC components, + or virtual host for your event source. For valid values, + refer to the AWS documentation. + type: string + uri: + description: The URI for this configuration. For type VPC_SUBNET + the value should be subnet:subnet_id where subnet_id is + the value you would find in an aws_subnet resource's id + attribute. For type VPC_SECURITY_GROUP the value should + be security_group:security_group_id where security_group_id + is the value you would find in an aws_security_group resource's + id attribute. + type: string + type: object + type: array + startingPosition: + description: The position in the stream where AWS Lambda should + start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST + or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK + or Self Managed Apache Kafka. Must not be provided if getting + events from SQS. More information about these positions can + be found in the AWS DynamoDB Streams API Reference and AWS Kinesis + API Reference. + type: string + startingPositionTimestamp: + description: A timestamp in RFC3339 format of the data record + which to start reading when using starting_position set to AT_TIMESTAMP. + If a record with this exact timestamp does not exist, the next + later record is chosen. If the timestamp is older than the current + trim horizon, the oldest available record is chosen. + type: string + state: + description: The state of the event source mapping. + type: string + stateTransitionReason: + description: The reason the event source mapping is in its current + state. + type: string + topics: + description: The name of the Kafka topics. Only available for + MSK sources. A single topic name must be specified. + items: + type: string + type: array + x-kubernetes-list-type: set + tumblingWindowInSeconds: + description: The duration in seconds of a processing window for + AWS Lambda streaming analytics. The range is between 1 second + up to 900 seconds. Only available for stream sources (DynamoDB + and Kinesis). + type: number + uuid: + description: The UUID of the created event source mapping. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/lambda.aws.upbound.io_functioneventinvokeconfigs.yaml b/package/crds/lambda.aws.upbound.io_functioneventinvokeconfigs.yaml index 77266c621c..d88ffeebfe 100644 --- a/package/crds/lambda.aws.upbound.io_functioneventinvokeconfigs.yaml +++ b/package/crds/lambda.aws.upbound.io_functioneventinvokeconfigs.yaml @@ -791,3 +791,747 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FunctionEventInvokeConfig is the Schema for the FunctionEventInvokeConfigs + API. Manages an asynchronous invocation configuration for a Lambda Function + or Alias. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FunctionEventInvokeConfigSpec defines the desired state of + FunctionEventInvokeConfig + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + destinationConfig: + description: Configuration block with destination configuration. + See below for details. + properties: + onFailure: + description: Configuration block with destination configuration + for failed asynchronous invocations. See below for details. + properties: + destination: + description: Amazon Resource Name (ARN) of the destination + resource. See the Lambda Developer Guide for acceptable + resource types and associated IAM permissions. + type: string + destinationRef: + description: Reference to a Queue in sqs to populate destination. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + destinationSelector: + description: Selector for a Queue in sqs to populate destination. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + onSuccess: + description: Configuration block with destination configuration + for successful asynchronous invocations. See below for details. + properties: + destination: + description: Amazon Resource Name (ARN) of the destination + resource. See the Lambda Developer Guide for acceptable + resource types and associated IAM permissions. + type: string + destinationRef: + description: Reference to a Topic in sns to populate destination. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + destinationSelector: + description: Selector for a Topic in sns to populate destination. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + functionName: + description: Name or Amazon Resource Name (ARN) of the Lambda + Function, omitting any version or alias qualifier. + type: string + maximumEventAgeInSeconds: + description: Maximum age of a request that Lambda sends to a function + for processing in seconds. Valid values between 60 and 21600. + type: number + maximumRetryAttempts: + description: Maximum number of times to retry when the function + returns an error. Valid values between 0 and 2. Defaults to + 2. + type: number + qualifier: + description: Lambda Function published version, $LATEST, or Lambda + Alias name. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + destinationConfig: + description: Configuration block with destination configuration. + See below for details. + properties: + onFailure: + description: Configuration block with destination configuration + for failed asynchronous invocations. See below for details. + properties: + destination: + description: Amazon Resource Name (ARN) of the destination + resource. See the Lambda Developer Guide for acceptable + resource types and associated IAM permissions. + type: string + destinationRef: + description: Reference to a Queue in sqs to populate destination. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + destinationSelector: + description: Selector for a Queue in sqs to populate destination. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + onSuccess: + description: Configuration block with destination configuration + for successful asynchronous invocations. See below for details. + properties: + destination: + description: Amazon Resource Name (ARN) of the destination + resource. See the Lambda Developer Guide for acceptable + resource types and associated IAM permissions. + type: string + destinationRef: + description: Reference to a Topic in sns to populate destination. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + destinationSelector: + description: Selector for a Topic in sns to populate destination. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + functionName: + description: Name or Amazon Resource Name (ARN) of the Lambda + Function, omitting any version or alias qualifier. + type: string + maximumEventAgeInSeconds: + description: Maximum age of a request that Lambda sends to a function + for processing in seconds. Valid values between 60 and 21600. + type: number + maximumRetryAttempts: + description: Maximum number of times to retry when the function + returns an error. Valid values between 0 and 2. Defaults to + 2. + type: number + qualifier: + description: Lambda Function published version, $LATEST, or Lambda + Alias name. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.functionName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.functionName) + || (has(self.initProvider) && has(self.initProvider.functionName))' + status: + description: FunctionEventInvokeConfigStatus defines the observed state + of FunctionEventInvokeConfig. + properties: + atProvider: + properties: + destinationConfig: + description: Configuration block with destination configuration. + See below for details. + properties: + onFailure: + description: Configuration block with destination configuration + for failed asynchronous invocations. See below for details. + properties: + destination: + description: Amazon Resource Name (ARN) of the destination + resource. See the Lambda Developer Guide for acceptable + resource types and associated IAM permissions. + type: string + type: object + onSuccess: + description: Configuration block with destination configuration + for successful asynchronous invocations. See below for details. + properties: + destination: + description: Amazon Resource Name (ARN) of the destination + resource. See the Lambda Developer Guide for acceptable + resource types and associated IAM permissions. + type: string + type: object + type: object + functionName: + description: Name or Amazon Resource Name (ARN) of the Lambda + Function, omitting any version or alias qualifier. + type: string + id: + description: Fully qualified Lambda Function name or Amazon Resource + Name (ARN) + type: string + maximumEventAgeInSeconds: + description: Maximum age of a request that Lambda sends to a function + for processing in seconds. Valid values between 60 and 21600. + type: number + maximumRetryAttempts: + description: Maximum number of times to retry when the function + returns an error. Valid values between 0 and 2. Defaults to + 2. + type: number + qualifier: + description: Lambda Function published version, $LATEST, or Lambda + Alias name. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/lambda.aws.upbound.io_functions.yaml b/package/crds/lambda.aws.upbound.io_functions.yaml index b1e5d78103..aa60d9e81c 100644 --- a/package/crds/lambda.aws.upbound.io_functions.yaml +++ b/package/crds/lambda.aws.upbound.io_functions.yaml @@ -2255,3 +2255,2186 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Function is the Schema for the Functions API. Provides a Lambda + Function resource. Lambda allows you to trigger execution of code in response + to events in AWS, enabling serverless backend solutions. The Lambda Function + itself includes source code and runtime configuration. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FunctionSpec defines the desired state of Function + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + architectures: + description: Instruction set architecture for your Lambda function. + Valid values are ["x86_64"] and ["arm64"]. Default is ["x86_64"]. + Removing this attribute, function's architecture stay the same. + items: + type: string + type: array + codeSigningConfigArn: + description: To enable code signing for this function, specify + the ARN of a code-signing configuration. A code-signing configuration + includes a set of signing profiles, which define the trusted + publishers for this function. + type: string + deadLetterConfig: + description: Configuration block. Detailed below. + properties: + targetArn: + description: ARN of an SNS topic or SQS queue to notify when + an invocation fails. If this option is used, the function's + IAM role must be granted suitable access to write to the + target object, which means allowing either the sns:Publish + or sqs:SendMessage action on this ARN, depending on which + service is targeted. + type: string + type: object + description: + description: Description of what your Lambda Function does. + type: string + environment: + description: Configuration block. Detailed below. + properties: + variables: + additionalProperties: + type: string + description: Map of environment variables that are accessible + from the function code during execution. If provided at + least one key must be present. + type: object + x-kubernetes-map-type: granular + type: object + ephemeralStorage: + description: The amount of Ephemeral storage(/tmp) to allocate + for the Lambda Function in MB. This parameter is used to expand + the total amount of Ephemeral storage available, beyond the + default amount of 512MB. Detailed below. + properties: + size: + description: The size of the Lambda function Ephemeral storage(/tmp) + represented in MB. The minimum supported ephemeral_storage + value defaults to 512MB and the maximum supported value + is 10240MB. + type: number + type: object + fileSystemConfig: + description: Configuration block. Detailed below. + properties: + arn: + description: Amazon Resource Name (ARN) of the Amazon EFS + Access Point that provides access to the file system. + type: string + arnRef: + description: Reference to a AccessPoint in efs to populate + arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a AccessPoint in efs to populate + arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + localMountPath: + description: Path where the function can access the file system, + starting with /mnt/. + type: string + type: object + handler: + description: Function entrypoint in your code. + type: string + imageConfig: + description: Configuration block. Detailed below. + properties: + command: + description: Parameters that you want to pass in with entry_point. + items: + type: string + type: array + entryPoint: + description: Entry point to your application, which is typically + the location of the runtime executable. + items: + type: string + type: array + workingDirectory: + description: Working directory. + type: string + type: object + imageUri: + description: ECR image URI containing the function's deployment + package. Exactly one of filename, image_uri, or s3_bucket must + be specified. + type: string + kmsKeyArn: + description: Amazon Resource Name (ARN) of the AWS Key Management + Service (KMS) key that is used to encrypt environment variables. + If this configuration is not provided when environment variables + are in use, AWS Lambda uses a default service key. To fix the + perpetual difference, remove this configuration. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + layers: + description: List of Lambda Layer Version ARNs (maximum of 5) + to attach to your Lambda Function. See Lambda Layers + items: + type: string + type: array + loggingConfig: + description: Configuration block used to specify advanced logging + settings. Detailed below. + properties: + applicationLogLevel: + description: for JSON structured logs, choose the detail level + of the logs your application sends to CloudWatch when using + supported logging libraries. + type: string + logFormat: + description: select between Text and structured JSON format + for your function's logs. + type: string + logGroup: + description: the CloudWatch log group your function sends + logs to. + type: string + systemLogLevel: + description: for JSON structured logs, choose the detail level + of the Lambda platform event logs sent to CloudWatch, such + as ERROR, DEBUG, or INFO. + type: string + type: object + memorySize: + description: Amount of memory in MB your Lambda Function can use + at runtime. Defaults to 128. See Limits + type: number + packageType: + description: Lambda deployment package type. Valid values are + Zip and Image. Defaults to Zip. + type: string + publish: + description: Whether to publish creation/change as new Lambda + Function Version. Defaults to false. + type: boolean + region: + description: Region is the region you'd like your resource to + be created in. + type: string + replaceSecurityGroupsOnDestroy: + description: AWS no longer supports this operation. This attribute + now has no effect and will be removed in a future major version. + Whether to replace the security groups on associated lambda + network interfaces upon destruction. Removing these security + groups from orphaned network interfaces can speed up security + group deletion times by avoiding a dependency on AWS's internal + cleanup operations. By default, the ENI security groups will + be replaced with the default security group in the function's + VPC. Set the replacement_security_group_ids attribute to use + a custom list of security groups for replacement. + type: boolean + replacementSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate replacementSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + replacementSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + replacementSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + replacementSecurityGroupIds: + description: List of security group IDs to assign to orphaned + Lambda function network interfaces upon destruction. replace_security_groups_on_destroy + must be set to true to use this attribute. + items: + type: string + type: array + x-kubernetes-list-type: set + reservedConcurrentExecutions: + description: Amount of reserved concurrent executions for this + lambda function. A value of 0 disables lambda from being triggered + and -1 removes any concurrency limitations. Defaults to Unreserved + Concurrency Limits -1. See Managing Concurrency + type: number + role: + description: Amazon Resource Name (ARN) of the function's execution + role. The role provides the function's identity and access to + AWS services and resources. + type: string + roleRef: + description: Reference to a Role in iam to populate role. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleSelector: + description: Selector for a Role in iam to populate role. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + runtime: + description: Identifier of the function's runtime. See Runtimes + for valid values. + type: string + s3Bucket: + description: S3 bucket location containing the function's deployment + package. This bucket must reside in the same AWS region where + you are creating the Lambda function. Exactly one of filename, + image_uri, or s3_bucket must be specified. When s3_bucket is + set, s3_key is required. + type: string + s3BucketRef: + description: Reference to a Bucket in s3 to populate s3Bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + s3BucketSelector: + description: Selector for a Bucket in s3 to populate s3Bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3Key: + description: S3 key of an object containing the function's deployment + package. When s3_bucket is set, s3_key is required. + type: string + s3ObjectVersion: + description: Object version containing the function's deployment + package. Conflicts with filename and image_uri. + type: string + skipDestroy: + type: boolean + snapStart: + description: Snap start settings block. Detailed below. + properties: + applyOn: + description: Conditions where snap start is enabled. Valid + values are PublishedVersions. + type: string + type: object + sourceCodeHash: + description: Used to trigger updates. Must be set to a base64 + encoded SHA256 hash of the package file specified with either + filename or s3_key. If you have specified this field manually, + it should be the actual (computed) hash of the underlying lambda + function specified in the filename, image_uri, s3_bucket fields. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + timeout: + description: Amount of time your Lambda Function has to run in + seconds. Defaults to 3. See Limits. + type: number + tracingConfig: + description: Configuration block. Detailed below. + properties: + mode: + description: Whether to sample and trace a subset of incoming + requests with AWS X-Ray. Valid values are PassThrough and + Active. If PassThrough, Lambda will only trace the request + from an upstream service if it contains a tracing header + with "sampled=1". If Active, Lambda will respect any tracing + header it receives from an upstream service. If no tracing + header is received, Lambda will call X-Ray for a tracing + decision. + type: string + type: object + vpcConfig: + description: Configuration block. Detailed below. + properties: + ipv6AllowedForDualStack: + description: Allows outbound IPv6 traffic on VPC functions + that are connected to dual-stack subnets. Default is false. + type: boolean + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate + securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to + populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: List of security group IDs associated with the + Lambda function. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: List of subnet IDs associated with the Lambda + function. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + architectures: + description: Instruction set architecture for your Lambda function. + Valid values are ["x86_64"] and ["arm64"]. Default is ["x86_64"]. + Removing this attribute, function's architecture stay the same. + items: + type: string + type: array + codeSigningConfigArn: + description: To enable code signing for this function, specify + the ARN of a code-signing configuration. A code-signing configuration + includes a set of signing profiles, which define the trusted + publishers for this function. + type: string + deadLetterConfig: + description: Configuration block. Detailed below. + properties: + targetArn: + description: ARN of an SNS topic or SQS queue to notify when + an invocation fails. If this option is used, the function's + IAM role must be granted suitable access to write to the + target object, which means allowing either the sns:Publish + or sqs:SendMessage action on this ARN, depending on which + service is targeted. + type: string + type: object + description: + description: Description of what your Lambda Function does. + type: string + environment: + description: Configuration block. Detailed below. + properties: + variables: + additionalProperties: + type: string + description: Map of environment variables that are accessible + from the function code during execution. If provided at + least one key must be present. + type: object + x-kubernetes-map-type: granular + type: object + ephemeralStorage: + description: The amount of Ephemeral storage(/tmp) to allocate + for the Lambda Function in MB. This parameter is used to expand + the total amount of Ephemeral storage available, beyond the + default amount of 512MB. Detailed below. + properties: + size: + description: The size of the Lambda function Ephemeral storage(/tmp) + represented in MB. The minimum supported ephemeral_storage + value defaults to 512MB and the maximum supported value + is 10240MB. + type: number + type: object + fileSystemConfig: + description: Configuration block. Detailed below. + properties: + arn: + description: Amazon Resource Name (ARN) of the Amazon EFS + Access Point that provides access to the file system. + type: string + arnRef: + description: Reference to a AccessPoint in efs to populate + arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a AccessPoint in efs to populate + arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + localMountPath: + description: Path where the function can access the file system, + starting with /mnt/. + type: string + type: object + handler: + description: Function entrypoint in your code. + type: string + imageConfig: + description: Configuration block. Detailed below. + properties: + command: + description: Parameters that you want to pass in with entry_point. + items: + type: string + type: array + entryPoint: + description: Entry point to your application, which is typically + the location of the runtime executable. + items: + type: string + type: array + workingDirectory: + description: Working directory. + type: string + type: object + imageUri: + description: ECR image URI containing the function's deployment + package. Exactly one of filename, image_uri, or s3_bucket must + be specified. + type: string + kmsKeyArn: + description: Amazon Resource Name (ARN) of the AWS Key Management + Service (KMS) key that is used to encrypt environment variables. + If this configuration is not provided when environment variables + are in use, AWS Lambda uses a default service key. To fix the + perpetual difference, remove this configuration. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + layers: + description: List of Lambda Layer Version ARNs (maximum of 5) + to attach to your Lambda Function. See Lambda Layers + items: + type: string + type: array + loggingConfig: + description: Configuration block used to specify advanced logging + settings. Detailed below. + properties: + applicationLogLevel: + description: for JSON structured logs, choose the detail level + of the logs your application sends to CloudWatch when using + supported logging libraries. + type: string + logFormat: + description: select between Text and structured JSON format + for your function's logs. + type: string + logGroup: + description: the CloudWatch log group your function sends + logs to. + type: string + systemLogLevel: + description: for JSON structured logs, choose the detail level + of the Lambda platform event logs sent to CloudWatch, such + as ERROR, DEBUG, or INFO. + type: string + type: object + memorySize: + description: Amount of memory in MB your Lambda Function can use + at runtime. Defaults to 128. See Limits + type: number + packageType: + description: Lambda deployment package type. Valid values are + Zip and Image. Defaults to Zip. + type: string + publish: + description: Whether to publish creation/change as new Lambda + Function Version. Defaults to false. + type: boolean + replaceSecurityGroupsOnDestroy: + description: AWS no longer supports this operation. This attribute + now has no effect and will be removed in a future major version. + Whether to replace the security groups on associated lambda + network interfaces upon destruction. Removing these security + groups from orphaned network interfaces can speed up security + group deletion times by avoiding a dependency on AWS's internal + cleanup operations. By default, the ENI security groups will + be replaced with the default security group in the function's + VPC. Set the replacement_security_group_ids attribute to use + a custom list of security groups for replacement. + type: boolean + replacementSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate replacementSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + replacementSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + replacementSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + replacementSecurityGroupIds: + description: List of security group IDs to assign to orphaned + Lambda function network interfaces upon destruction. replace_security_groups_on_destroy + must be set to true to use this attribute. + items: + type: string + type: array + x-kubernetes-list-type: set + reservedConcurrentExecutions: + description: Amount of reserved concurrent executions for this + lambda function. A value of 0 disables lambda from being triggered + and -1 removes any concurrency limitations. Defaults to Unreserved + Concurrency Limits -1. See Managing Concurrency + type: number + role: + description: Amazon Resource Name (ARN) of the function's execution + role. The role provides the function's identity and access to + AWS services and resources. + type: string + roleRef: + description: Reference to a Role in iam to populate role. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleSelector: + description: Selector for a Role in iam to populate role. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + runtime: + description: Identifier of the function's runtime. See Runtimes + for valid values. + type: string + s3Bucket: + description: S3 bucket location containing the function's deployment + package. This bucket must reside in the same AWS region where + you are creating the Lambda function. Exactly one of filename, + image_uri, or s3_bucket must be specified. When s3_bucket is + set, s3_key is required. + type: string + s3BucketRef: + description: Reference to a Bucket in s3 to populate s3Bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + s3BucketSelector: + description: Selector for a Bucket in s3 to populate s3Bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3Key: + description: S3 key of an object containing the function's deployment + package. When s3_bucket is set, s3_key is required. + type: string + s3ObjectVersion: + description: Object version containing the function's deployment + package. Conflicts with filename and image_uri. + type: string + skipDestroy: + type: boolean + snapStart: + description: Snap start settings block. Detailed below. + properties: + applyOn: + description: Conditions where snap start is enabled. Valid + values are PublishedVersions. + type: string + type: object + sourceCodeHash: + description: Used to trigger updates. Must be set to a base64 + encoded SHA256 hash of the package file specified with either + filename or s3_key. If you have specified this field manually, + it should be the actual (computed) hash of the underlying lambda + function specified in the filename, image_uri, s3_bucket fields. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + timeout: + description: Amount of time your Lambda Function has to run in + seconds. Defaults to 3. See Limits. + type: number + tracingConfig: + description: Configuration block. Detailed below. + properties: + mode: + description: Whether to sample and trace a subset of incoming + requests with AWS X-Ray. Valid values are PassThrough and + Active. If PassThrough, Lambda will only trace the request + from an upstream service if it contains a tracing header + with "sampled=1". If Active, Lambda will respect any tracing + header it receives from an upstream service. If no tracing + header is received, Lambda will call X-Ray for a tracing + decision. + type: string + type: object + vpcConfig: + description: Configuration block. Detailed below. + properties: + ipv6AllowedForDualStack: + description: Allows outbound IPv6 traffic on VPC functions + that are connected to dual-stack subnets. Default is false. + type: boolean + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate + securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to + populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroupIds: + description: List of security group IDs associated with the + Lambda function. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: List of subnet IDs associated with the Lambda + function. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: FunctionStatus defines the observed state of Function. + properties: + atProvider: + properties: + architectures: + description: Instruction set architecture for your Lambda function. + Valid values are ["x86_64"] and ["arm64"]. Default is ["x86_64"]. + Removing this attribute, function's architecture stay the same. + items: + type: string + type: array + arn: + description: Amazon Resource Name (ARN) identifying your Lambda + Function. + type: string + codeSigningConfigArn: + description: To enable code signing for this function, specify + the ARN of a code-signing configuration. A code-signing configuration + includes a set of signing profiles, which define the trusted + publishers for this function. + type: string + deadLetterConfig: + description: Configuration block. Detailed below. + properties: + targetArn: + description: ARN of an SNS topic or SQS queue to notify when + an invocation fails. If this option is used, the function's + IAM role must be granted suitable access to write to the + target object, which means allowing either the sns:Publish + or sqs:SendMessage action on this ARN, depending on which + service is targeted. + type: string + type: object + description: + description: Description of what your Lambda Function does. + type: string + environment: + description: Configuration block. Detailed below. + properties: + variables: + additionalProperties: + type: string + description: Map of environment variables that are accessible + from the function code during execution. If provided at + least one key must be present. + type: object + x-kubernetes-map-type: granular + type: object + ephemeralStorage: + description: The amount of Ephemeral storage(/tmp) to allocate + for the Lambda Function in MB. This parameter is used to expand + the total amount of Ephemeral storage available, beyond the + default amount of 512MB. Detailed below. + properties: + size: + description: The size of the Lambda function Ephemeral storage(/tmp) + represented in MB. The minimum supported ephemeral_storage + value defaults to 512MB and the maximum supported value + is 10240MB. + type: number + type: object + fileSystemConfig: + description: Configuration block. Detailed below. + properties: + arn: + description: Amazon Resource Name (ARN) of the Amazon EFS + Access Point that provides access to the file system. + type: string + localMountPath: + description: Path where the function can access the file system, + starting with /mnt/. + type: string + type: object + handler: + description: Function entrypoint in your code. + type: string + id: + type: string + imageConfig: + description: Configuration block. Detailed below. + properties: + command: + description: Parameters that you want to pass in with entry_point. + items: + type: string + type: array + entryPoint: + description: Entry point to your application, which is typically + the location of the runtime executable. + items: + type: string + type: array + workingDirectory: + description: Working directory. + type: string + type: object + imageUri: + description: ECR image URI containing the function's deployment + package. Exactly one of filename, image_uri, or s3_bucket must + be specified. + type: string + invokeArn: + description: ARN to be used for invoking Lambda Function from + API Gateway - to be used in aws_api_gateway_integration's uri. + type: string + kmsKeyArn: + description: Amazon Resource Name (ARN) of the AWS Key Management + Service (KMS) key that is used to encrypt environment variables. + If this configuration is not provided when environment variables + are in use, AWS Lambda uses a default service key. To fix the + perpetual difference, remove this configuration. + type: string + lastModified: + description: Date this resource was last modified. + type: string + layers: + description: List of Lambda Layer Version ARNs (maximum of 5) + to attach to your Lambda Function. See Lambda Layers + items: + type: string + type: array + loggingConfig: + description: Configuration block used to specify advanced logging + settings. Detailed below. + properties: + applicationLogLevel: + description: for JSON structured logs, choose the detail level + of the logs your application sends to CloudWatch when using + supported logging libraries. + type: string + logFormat: + description: select between Text and structured JSON format + for your function's logs. + type: string + logGroup: + description: the CloudWatch log group your function sends + logs to. + type: string + systemLogLevel: + description: for JSON structured logs, choose the detail level + of the Lambda platform event logs sent to CloudWatch, such + as ERROR, DEBUG, or INFO. + type: string + type: object + memorySize: + description: Amount of memory in MB your Lambda Function can use + at runtime. Defaults to 128. See Limits + type: number + packageType: + description: Lambda deployment package type. Valid values are + Zip and Image. Defaults to Zip. + type: string + publish: + description: Whether to publish creation/change as new Lambda + Function Version. Defaults to false. + type: boolean + qualifiedArn: + description: ARN identifying your Lambda Function Version (if + versioning is enabled via publish = true). + type: string + qualifiedInvokeArn: + description: Qualified ARN (ARN with lambda version number) to + be used for invoking Lambda Function from API Gateway - to be + used in aws_api_gateway_integration's uri. + type: string + replaceSecurityGroupsOnDestroy: + description: AWS no longer supports this operation. This attribute + now has no effect and will be removed in a future major version. + Whether to replace the security groups on associated lambda + network interfaces upon destruction. Removing these security + groups from orphaned network interfaces can speed up security + group deletion times by avoiding a dependency on AWS's internal + cleanup operations. By default, the ENI security groups will + be replaced with the default security group in the function's + VPC. Set the replacement_security_group_ids attribute to use + a custom list of security groups for replacement. + type: boolean + replacementSecurityGroupIds: + description: List of security group IDs to assign to orphaned + Lambda function network interfaces upon destruction. replace_security_groups_on_destroy + must be set to true to use this attribute. + items: + type: string + type: array + x-kubernetes-list-type: set + reservedConcurrentExecutions: + description: Amount of reserved concurrent executions for this + lambda function. A value of 0 disables lambda from being triggered + and -1 removes any concurrency limitations. Defaults to Unreserved + Concurrency Limits -1. See Managing Concurrency + type: number + role: + description: Amazon Resource Name (ARN) of the function's execution + role. The role provides the function's identity and access to + AWS services and resources. + type: string + runtime: + description: Identifier of the function's runtime. See Runtimes + for valid values. + type: string + s3Bucket: + description: S3 bucket location containing the function's deployment + package. This bucket must reside in the same AWS region where + you are creating the Lambda function. Exactly one of filename, + image_uri, or s3_bucket must be specified. When s3_bucket is + set, s3_key is required. + type: string + s3Key: + description: S3 key of an object containing the function's deployment + package. When s3_bucket is set, s3_key is required. + type: string + s3ObjectVersion: + description: Object version containing the function's deployment + package. Conflicts with filename and image_uri. + type: string + signingJobArn: + description: ARN of the signing job. + type: string + signingProfileVersionArn: + description: ARN of the signing profile version. + type: string + skipDestroy: + type: boolean + snapStart: + description: Snap start settings block. Detailed below. + properties: + applyOn: + description: Conditions where snap start is enabled. Valid + values are PublishedVersions. + type: string + optimizationStatus: + description: Optimization status of the snap start configuration. + Valid values are On and Off. + type: string + type: object + sourceCodeHash: + description: Used to trigger updates. Must be set to a base64 + encoded SHA256 hash of the package file specified with either + filename or s3_key. If you have specified this field manually, + it should be the actual (computed) hash of the underlying lambda + function specified in the filename, image_uri, s3_bucket fields. + type: string + sourceCodeSize: + description: Size in bytes of the function .zip file. + type: number + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + timeout: + description: Amount of time your Lambda Function has to run in + seconds. Defaults to 3. See Limits. + type: number + tracingConfig: + description: Configuration block. Detailed below. + properties: + mode: + description: Whether to sample and trace a subset of incoming + requests with AWS X-Ray. Valid values are PassThrough and + Active. If PassThrough, Lambda will only trace the request + from an upstream service if it contains a tracing header + with "sampled=1". If Active, Lambda will respect any tracing + header it receives from an upstream service. If no tracing + header is received, Lambda will call X-Ray for a tracing + decision. + type: string + type: object + version: + description: Latest published version of your Lambda Function. + type: string + vpcConfig: + description: Configuration block. Detailed below. + properties: + ipv6AllowedForDualStack: + description: Allows outbound IPv6 traffic on VPC functions + that are connected to dual-stack subnets. Default is false. + type: boolean + securityGroupIds: + description: List of security group IDs associated with the + Lambda function. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: List of subnet IDs associated with the Lambda + function. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcId: + description: ID of the VPC. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/lambda.aws.upbound.io_functionurls.yaml b/package/crds/lambda.aws.upbound.io_functionurls.yaml index 66951349ce..acaca05384 100644 --- a/package/crds/lambda.aws.upbound.io_functionurls.yaml +++ b/package/crds/lambda.aws.upbound.io_functionurls.yaml @@ -694,3 +694,670 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FunctionURL is the Schema for the FunctionURLs API. Provides + a Lambda function URL resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FunctionURLSpec defines the desired state of FunctionURL + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authorizationType: + description: The type of authentication that the function URL + uses. Set to "AWS_IAM" to restrict access to authenticated IAM + users only. Set to "NONE" to bypass IAM authentication and create + a public endpoint. See the AWS documentation for more details. + type: string + cors: + description: The cross-origin resource sharing (CORS) settings + for the function URL. Documented below. + properties: + allowCredentials: + description: Whether to allow cookies or other credentials + in requests to the function URL. The default is false. + type: boolean + allowHeaders: + description: 'The HTTP headers that origins can include in + requests to the function URL. For example: ["date", "keep-alive", + "x-custom-header"].' + items: + type: string + type: array + x-kubernetes-list-type: set + allowMethods: + description: 'The HTTP methods that are allowed when calling + the function URL. For example: ["GET", "POST", "DELETE"], + or the wildcard character (["*"]).' + items: + type: string + type: array + x-kubernetes-list-type: set + allowOrigins: + description: 'The origins that can access the function URL. + You can list any number of specific origins (or the wildcard + character ("*")), separated by a comma. For example: ["https://www.example.com", + "http://localhost:60905"].' + items: + type: string + type: array + x-kubernetes-list-type: set + exposeHeaders: + description: The HTTP headers in your function response that + you want to expose to origins that call the function URL. + items: + type: string + type: array + x-kubernetes-list-type: set + maxAge: + description: The maximum amount of time, in seconds, that + web browsers can cache results of a preflight request. By + default, this is set to 0, which means that the browser + doesn't cache results. The maximum value is 86400. + type: number + type: object + functionName: + description: The name (or ARN) of the Lambda function. + type: string + functionNameRef: + description: Reference to a Function in lambda to populate functionName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + functionNameSelector: + description: Selector for a Function in lambda to populate functionName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + invokeMode: + description: Determines how the Lambda function responds to an + invocation. Valid values are BUFFERED (default) and RESPONSE_STREAM. + See more in Configuring a Lambda function to stream responses. + type: string + qualifier: + description: The alias name or "$LATEST". + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authorizationType: + description: The type of authentication that the function URL + uses. Set to "AWS_IAM" to restrict access to authenticated IAM + users only. Set to "NONE" to bypass IAM authentication and create + a public endpoint. See the AWS documentation for more details. + type: string + cors: + description: The cross-origin resource sharing (CORS) settings + for the function URL. Documented below. + properties: + allowCredentials: + description: Whether to allow cookies or other credentials + in requests to the function URL. The default is false. + type: boolean + allowHeaders: + description: 'The HTTP headers that origins can include in + requests to the function URL. For example: ["date", "keep-alive", + "x-custom-header"].' + items: + type: string + type: array + x-kubernetes-list-type: set + allowMethods: + description: 'The HTTP methods that are allowed when calling + the function URL. For example: ["GET", "POST", "DELETE"], + or the wildcard character (["*"]).' + items: + type: string + type: array + x-kubernetes-list-type: set + allowOrigins: + description: 'The origins that can access the function URL. + You can list any number of specific origins (or the wildcard + character ("*")), separated by a comma. For example: ["https://www.example.com", + "http://localhost:60905"].' + items: + type: string + type: array + x-kubernetes-list-type: set + exposeHeaders: + description: The HTTP headers in your function response that + you want to expose to origins that call the function URL. + items: + type: string + type: array + x-kubernetes-list-type: set + maxAge: + description: The maximum amount of time, in seconds, that + web browsers can cache results of a preflight request. By + default, this is set to 0, which means that the browser + doesn't cache results. The maximum value is 86400. + type: number + type: object + functionName: + description: The name (or ARN) of the Lambda function. + type: string + functionNameRef: + description: Reference to a Function in lambda to populate functionName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + functionNameSelector: + description: Selector for a Function in lambda to populate functionName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + invokeMode: + description: Determines how the Lambda function responds to an + invocation. Valid values are BUFFERED (default) and RESPONSE_STREAM. + See more in Configuring a Lambda function to stream responses. + type: string + qualifier: + description: The alias name or "$LATEST". + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.authorizationType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.authorizationType) + || (has(self.initProvider) && has(self.initProvider.authorizationType))' + status: + description: FunctionURLStatus defines the observed state of FunctionURL. + properties: + atProvider: + properties: + authorizationType: + description: The type of authentication that the function URL + uses. Set to "AWS_IAM" to restrict access to authenticated IAM + users only. Set to "NONE" to bypass IAM authentication and create + a public endpoint. See the AWS documentation for more details. + type: string + cors: + description: The cross-origin resource sharing (CORS) settings + for the function URL. Documented below. + properties: + allowCredentials: + description: Whether to allow cookies or other credentials + in requests to the function URL. The default is false. + type: boolean + allowHeaders: + description: 'The HTTP headers that origins can include in + requests to the function URL. For example: ["date", "keep-alive", + "x-custom-header"].' + items: + type: string + type: array + x-kubernetes-list-type: set + allowMethods: + description: 'The HTTP methods that are allowed when calling + the function URL. For example: ["GET", "POST", "DELETE"], + or the wildcard character (["*"]).' + items: + type: string + type: array + x-kubernetes-list-type: set + allowOrigins: + description: 'The origins that can access the function URL. + You can list any number of specific origins (or the wildcard + character ("*")), separated by a comma. For example: ["https://www.example.com", + "http://localhost:60905"].' + items: + type: string + type: array + x-kubernetes-list-type: set + exposeHeaders: + description: The HTTP headers in your function response that + you want to expose to origins that call the function URL. + items: + type: string + type: array + x-kubernetes-list-type: set + maxAge: + description: The maximum amount of time, in seconds, that + web browsers can cache results of a preflight request. By + default, this is set to 0, which means that the browser + doesn't cache results. The maximum value is 86400. + type: number + type: object + functionArn: + description: The Amazon Resource Name (ARN) of the function. + type: string + functionName: + description: The name (or ARN) of the Lambda function. + type: string + functionUrl: + description: The HTTP URL endpoint for the function in the format + https://.lambda-url..on.aws/. + type: string + id: + type: string + invokeMode: + description: Determines how the Lambda function responds to an + invocation. Valid values are BUFFERED (default) and RESPONSE_STREAM. + See more in Configuring a Lambda function to stream responses. + type: string + qualifier: + description: The alias name or "$LATEST". + type: string + urlId: + description: A generated ID for the endpoint. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/lexmodels.aws.upbound.io_botaliases.yaml b/package/crds/lexmodels.aws.upbound.io_botaliases.yaml index 878db29466..02981fd2c8 100644 --- a/package/crds/lexmodels.aws.upbound.io_botaliases.yaml +++ b/package/crds/lexmodels.aws.upbound.io_botaliases.yaml @@ -511,3 +511,484 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BotAlias is the Schema for the BotAliass API. Provides an Amazon + Lex Bot Alias resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BotAliasSpec defines the desired state of BotAlias + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + botName: + description: The name of the bot. + type: string + botVersion: + description: The version of the bot. + type: string + conversationLogs: + description: The settings that determine how Amazon Lex uses conversation + logs for the alias. Attributes are documented under conversation_logs. + properties: + iamRoleArn: + description: The Amazon Resource Name (ARN) of the IAM role + used to write your logs to CloudWatch Logs or an S3 bucket. + Must be between 20 and 2048 characters in length. + type: string + logSettings: + description: The settings for your conversation logs. You + can log text, audio, or both. Attributes are documented + under log_settings. + items: + properties: + destination: + description: The destination where logs are delivered. + Options are CLOUDWATCH_LOGS or S3. + type: string + kmsKeyArn: + description: The Amazon Resource Name (ARN) of the key + used to encrypt audio logs in an S3 bucket. This can + only be specified when destination is set to S3. Must + be between 20 and 2048 characters in length. + type: string + logType: + description: The type of logging that is enabled. Options + are AUDIO or TEXT. + type: string + resourceArn: + description: The Amazon Resource Name (ARN) of the CloudWatch + Logs log group or S3 bucket where the logs are delivered. + Must be less than or equal to 2048 characters in length. + type: string + type: object + type: array + type: object + description: + description: A description of the alias. Must be less than or + equal to 200 characters in length. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + botName: + description: The name of the bot. + type: string + botVersion: + description: The version of the bot. + type: string + conversationLogs: + description: The settings that determine how Amazon Lex uses conversation + logs for the alias. Attributes are documented under conversation_logs. + properties: + iamRoleArn: + description: The Amazon Resource Name (ARN) of the IAM role + used to write your logs to CloudWatch Logs or an S3 bucket. + Must be between 20 and 2048 characters in length. + type: string + logSettings: + description: The settings for your conversation logs. You + can log text, audio, or both. Attributes are documented + under log_settings. + items: + properties: + destination: + description: The destination where logs are delivered. + Options are CLOUDWATCH_LOGS or S3. + type: string + kmsKeyArn: + description: The Amazon Resource Name (ARN) of the key + used to encrypt audio logs in an S3 bucket. This can + only be specified when destination is set to S3. Must + be between 20 and 2048 characters in length. + type: string + logType: + description: The type of logging that is enabled. Options + are AUDIO or TEXT. + type: string + resourceArn: + description: The Amazon Resource Name (ARN) of the CloudWatch + Logs log group or S3 bucket where the logs are delivered. + Must be less than or equal to 2048 characters in length. + type: string + type: object + type: array + type: object + description: + description: A description of the alias. Must be less than or + equal to 200 characters in length. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.botName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.botName) + || (has(self.initProvider) && has(self.initProvider.botName))' + - message: spec.forProvider.botVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.botVersion) + || (has(self.initProvider) && has(self.initProvider.botVersion))' + status: + description: BotAliasStatus defines the observed state of BotAlias. + properties: + atProvider: + properties: + arn: + description: The ARN of the bot alias. + type: string + botName: + description: The name of the bot. + type: string + botVersion: + description: The version of the bot. + type: string + checksum: + description: Checksum of the bot alias. + type: string + conversationLogs: + description: The settings that determine how Amazon Lex uses conversation + logs for the alias. Attributes are documented under conversation_logs. + properties: + iamRoleArn: + description: The Amazon Resource Name (ARN) of the IAM role + used to write your logs to CloudWatch Logs or an S3 bucket. + Must be between 20 and 2048 characters in length. + type: string + logSettings: + description: The settings for your conversation logs. You + can log text, audio, or both. Attributes are documented + under log_settings. + items: + properties: + destination: + description: The destination where logs are delivered. + Options are CLOUDWATCH_LOGS or S3. + type: string + kmsKeyArn: + description: The Amazon Resource Name (ARN) of the key + used to encrypt audio logs in an S3 bucket. This can + only be specified when destination is set to S3. Must + be between 20 and 2048 characters in length. + type: string + logType: + description: The type of logging that is enabled. Options + are AUDIO or TEXT. + type: string + resourceArn: + description: The Amazon Resource Name (ARN) of the CloudWatch + Logs log group or S3 bucket where the logs are delivered. + Must be less than or equal to 2048 characters in length. + type: string + resourcePrefix: + description: (Computed) The prefix of the S3 object + key for AUDIO logs or the log stream name for TEXT + logs. + type: string + type: object + type: array + type: object + createdDate: + description: The date that the bot alias was created. + type: string + description: + description: A description of the alias. Must be less than or + equal to 200 characters in length. + type: string + id: + type: string + lastUpdatedDate: + description: The date that the bot alias was updated. When you + create a resource, the creation date and the last updated date + are the same. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/lexmodels.aws.upbound.io_bots.yaml b/package/crds/lexmodels.aws.upbound.io_bots.yaml index 69a83098ba..821a54a72d 100644 --- a/package/crds/lexmodels.aws.upbound.io_bots.yaml +++ b/package/crds/lexmodels.aws.upbound.io_bots.yaml @@ -824,3 +824,794 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Bot is the Schema for the Bots API. Provides an Amazon Lex bot + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BotSpec defines the desired state of Bot + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + abortStatement: + description: The message that Amazon Lex uses to abort a conversation. + Attributes are documented under statement. + properties: + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. + items: + properties: + content: + description: The text of the message. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. + type: string + type: object + childDirected: + description: By specifying true, you confirm that your use of + Amazon Lex is related to a website, program, or other application + that is directed or targeted, in whole or in part, to children + under age 13 and subject to COPPA. For more information see + the Amazon Lex FAQ and the Amazon Lex PutBot API Docs. + type: boolean + clarificationPrompt: + description: The message that Amazon Lex uses when it doesn't + understand the user's request. Attributes are documented under + prompt. + properties: + maxAttempts: + description: The number of times to prompt the user for information. + type: number + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. + items: + properties: + content: + description: The text of the message. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. + type: string + type: object + createVersion: + description: Determines if a new bot version is created when the + initial resource is created and on each update. Defaults to + false. + type: boolean + description: + description: A description of the bot. Must be less than or equal + to 200 characters in length. + type: string + detectSentiment: + description: When set to true user utterances are sent to Amazon + Comprehend for sentiment analysis. If you don't specify detectSentiment, + the default is false. + type: boolean + enableModelImprovements: + description: Set to true to enable access to natural language + understanding improvements. When you set the enable_model_improvements + parameter to true you can use the nlu_intent_confidence_threshold + parameter to configure confidence scores. For more information, + see Confidence Scores. You can only set the enable_model_improvements + parameter in certain Regions. If you set the parameter to true, + your bot has access to accuracy improvements. For more information + see the Amazon Lex Bot PutBot API Docs. + type: boolean + idleSessionTtlInSeconds: + description: The maximum time in seconds that Amazon Lex retains + the data gathered in a conversation. Default is 300. Must be + a number between 60 and 86400 (inclusive). + type: number + intent: + description: A set of Intent objects. Each intent represents a + command that a user can express. Attributes are documented under + intent. Can have up to 250 Intent objects. + items: + properties: + intentName: + description: The name of the intent. Must be less than or + equal to 100 characters in length. + type: string + intentVersion: + description: The version of the intent. Must be less than + or equal to 64 characters in length. + type: string + type: object + type: array + locale: + description: Specifies the target locale for the bot. Any intent + used in the bot must be compatible with the locale of the bot. + For available locales, see Amazon Lex Bot PutBot API Docs. Default + is en-US. + type: string + nluIntentConfidenceThreshold: + description: Determines the threshold where Amazon Lex will insert + the AMAZON.FallbackIntent, AMAZON.KendraSearchIntent, or both + when returning alternative intents in a PostContent or PostText + response. AMAZON.FallbackIntent and AMAZON.KendraSearchIntent + are only inserted if they are configured for the bot. For more + information see Amazon Lex Bot PutBot API Docs This value requires + enable_model_improvements to be set to true and the default + is 0. Must be a float between 0 and 1. + type: number + processBehavior: + description: If you set the process_behavior element to BUILD, + Amazon Lex builds the bot so that it can be run. If you set + the element to SAVE Amazon Lex saves the bot, but doesn't build + it. Default is SAVE. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + voiceId: + description: The Amazon Polly voice ID that you want Amazon Lex + to use for voice interactions with the user. The locale configured + for the voice must match the locale of the bot. For more information, + see Available Voices in the Amazon Polly Developer Guide. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + abortStatement: + description: The message that Amazon Lex uses to abort a conversation. + Attributes are documented under statement. + properties: + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. + items: + properties: + content: + description: The text of the message. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. + type: string + type: object + childDirected: + description: By specifying true, you confirm that your use of + Amazon Lex is related to a website, program, or other application + that is directed or targeted, in whole or in part, to children + under age 13 and subject to COPPA. For more information see + the Amazon Lex FAQ and the Amazon Lex PutBot API Docs. + type: boolean + clarificationPrompt: + description: The message that Amazon Lex uses when it doesn't + understand the user's request. Attributes are documented under + prompt. + properties: + maxAttempts: + description: The number of times to prompt the user for information. + type: number + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. + items: + properties: + content: + description: The text of the message. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. + type: string + type: object + createVersion: + description: Determines if a new bot version is created when the + initial resource is created and on each update. Defaults to + false. + type: boolean + description: + description: A description of the bot. Must be less than or equal + to 200 characters in length. + type: string + detectSentiment: + description: When set to true user utterances are sent to Amazon + Comprehend for sentiment analysis. If you don't specify detectSentiment, + the default is false. + type: boolean + enableModelImprovements: + description: Set to true to enable access to natural language + understanding improvements. When you set the enable_model_improvements + parameter to true you can use the nlu_intent_confidence_threshold + parameter to configure confidence scores. For more information, + see Confidence Scores. You can only set the enable_model_improvements + parameter in certain Regions. If you set the parameter to true, + your bot has access to accuracy improvements. For more information + see the Amazon Lex Bot PutBot API Docs. + type: boolean + idleSessionTtlInSeconds: + description: The maximum time in seconds that Amazon Lex retains + the data gathered in a conversation. Default is 300. Must be + a number between 60 and 86400 (inclusive). + type: number + intent: + description: A set of Intent objects. Each intent represents a + command that a user can express. Attributes are documented under + intent. Can have up to 250 Intent objects. + items: + properties: + intentName: + description: The name of the intent. Must be less than or + equal to 100 characters in length. + type: string + intentVersion: + description: The version of the intent. Must be less than + or equal to 64 characters in length. + type: string + type: object + type: array + locale: + description: Specifies the target locale for the bot. Any intent + used in the bot must be compatible with the locale of the bot. + For available locales, see Amazon Lex Bot PutBot API Docs. Default + is en-US. + type: string + nluIntentConfidenceThreshold: + description: Determines the threshold where Amazon Lex will insert + the AMAZON.FallbackIntent, AMAZON.KendraSearchIntent, or both + when returning alternative intents in a PostContent or PostText + response. AMAZON.FallbackIntent and AMAZON.KendraSearchIntent + are only inserted if they are configured for the bot. For more + information see Amazon Lex Bot PutBot API Docs This value requires + enable_model_improvements to be set to true and the default + is 0. Must be a float between 0 and 1. + type: number + processBehavior: + description: If you set the process_behavior element to BUILD, + Amazon Lex builds the bot so that it can be run. If you set + the element to SAVE Amazon Lex saves the bot, but doesn't build + it. Default is SAVE. + type: string + voiceId: + description: The Amazon Polly voice ID that you want Amazon Lex + to use for voice interactions with the user. The locale configured + for the voice must match the locale of the bot. For more information, + see Available Voices in the Amazon Polly Developer Guide. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.abortStatement is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.abortStatement) + || (has(self.initProvider) && has(self.initProvider.abortStatement))' + - message: spec.forProvider.childDirected is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.childDirected) + || (has(self.initProvider) && has(self.initProvider.childDirected))' + - message: spec.forProvider.intent is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.intent) + || (has(self.initProvider) && has(self.initProvider.intent))' + status: + description: BotStatus defines the observed state of Bot. + properties: + atProvider: + properties: + abortStatement: + description: The message that Amazon Lex uses to abort a conversation. + Attributes are documented under statement. + properties: + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. + items: + properties: + content: + description: The text of the message. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. + type: string + type: object + arn: + type: string + checksum: + description: |- + Checksum identifying the version of the bot that was created. The checksum is not + included as an argument because the resource will add it automatically when updating the bot. + type: string + childDirected: + description: By specifying true, you confirm that your use of + Amazon Lex is related to a website, program, or other application + that is directed or targeted, in whole or in part, to children + under age 13 and subject to COPPA. For more information see + the Amazon Lex FAQ and the Amazon Lex PutBot API Docs. + type: boolean + clarificationPrompt: + description: The message that Amazon Lex uses when it doesn't + understand the user's request. Attributes are documented under + prompt. + properties: + maxAttempts: + description: The number of times to prompt the user for information. + type: number + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. + items: + properties: + content: + description: The text of the message. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. + type: string + type: object + createVersion: + description: Determines if a new bot version is created when the + initial resource is created and on each update. Defaults to + false. + type: boolean + createdDate: + description: The date when the bot version was created. + type: string + description: + description: A description of the bot. Must be less than or equal + to 200 characters in length. + type: string + detectSentiment: + description: When set to true user utterances are sent to Amazon + Comprehend for sentiment analysis. If you don't specify detectSentiment, + the default is false. + type: boolean + enableModelImprovements: + description: Set to true to enable access to natural language + understanding improvements. When you set the enable_model_improvements + parameter to true you can use the nlu_intent_confidence_threshold + parameter to configure confidence scores. For more information, + see Confidence Scores. You can only set the enable_model_improvements + parameter in certain Regions. If you set the parameter to true, + your bot has access to accuracy improvements. For more information + see the Amazon Lex Bot PutBot API Docs. + type: boolean + failureReason: + description: If status is FAILED, Amazon Lex provides the reason + that it failed to build the bot. + type: string + id: + type: string + idleSessionTtlInSeconds: + description: The maximum time in seconds that Amazon Lex retains + the data gathered in a conversation. Default is 300. Must be + a number between 60 and 86400 (inclusive). + type: number + intent: + description: A set of Intent objects. Each intent represents a + command that a user can express. Attributes are documented under + intent. Can have up to 250 Intent objects. + items: + properties: + intentName: + description: The name of the intent. Must be less than or + equal to 100 characters in length. + type: string + intentVersion: + description: The version of the intent. Must be less than + or equal to 64 characters in length. + type: string + type: object + type: array + lastUpdatedDate: + description: The date when the $LATEST version of this bot was + updated. + type: string + locale: + description: Specifies the target locale for the bot. Any intent + used in the bot must be compatible with the locale of the bot. + For available locales, see Amazon Lex Bot PutBot API Docs. Default + is en-US. + type: string + nluIntentConfidenceThreshold: + description: Determines the threshold where Amazon Lex will insert + the AMAZON.FallbackIntent, AMAZON.KendraSearchIntent, or both + when returning alternative intents in a PostContent or PostText + response. AMAZON.FallbackIntent and AMAZON.KendraSearchIntent + are only inserted if they are configured for the bot. For more + information see Amazon Lex Bot PutBot API Docs This value requires + enable_model_improvements to be set to true and the default + is 0. Must be a float between 0 and 1. + type: number + processBehavior: + description: If you set the process_behavior element to BUILD, + Amazon Lex builds the bot so that it can be run. If you set + the element to SAVE Amazon Lex saves the bot, but doesn't build + it. Default is SAVE. + type: string + status: + description: |- + When you send a request to create or update a bot, Amazon Lex sets the status response + element to BUILDING. After Amazon Lex builds the bot, it sets status to READY. If Amazon Lex can't + build the bot, it sets status to FAILED. Amazon Lex returns the reason for the failure in the + failure_reason response element. + type: string + version: + description: The version of the bot. + type: string + voiceId: + description: The Amazon Polly voice ID that you want Amazon Lex + to use for voice interactions with the user. The locale configured + for the voice must match the locale of the bot. For more information, + see Available Voices in the Amazon Polly Developer Guide. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/lexmodels.aws.upbound.io_intents.yaml b/package/crds/lexmodels.aws.upbound.io_intents.yaml index 408a64a0ed..6ff07d470e 100644 --- a/package/crds/lexmodels.aws.upbound.io_intents.yaml +++ b/package/crds/lexmodels.aws.upbound.io_intents.yaml @@ -1457,3 +1457,1370 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Intent is the Schema for the Intents API. Provides an Amazon + Lex intent resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IntentSpec defines the desired state of Intent + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + conclusionStatement: + description: |- + The statement that you want Amazon Lex to convey to the user + after the intent is successfully fulfilled by the Lambda function. This element is relevant only if + you provide a Lambda function in the fulfillment_activity. If you return the intent to the client + application, you can't specify this element. The follow_up_prompt and conclusion_statement are + mutually exclusive. You can specify only one. Attributes are documented under statement. + properties: + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be less than + or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + confirmationPrompt: + description: |- + Prompts the user to confirm the intent. This question should + have a yes or no answer. You you must provide both the rejection_statement and confirmation_prompt, + or neither. Attributes are documented under prompt. + properties: + maxAttempts: + description: The number of times to prompt the user for information. + Must be a number between 1 and 5 (inclusive). + type: number + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be less than + or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + createVersion: + description: |- + Determines if a new slot type version is created when the initial + resource is created and on each update. Defaults to false. + type: boolean + description: + description: A description of the intent. Must be less than or + equal to 200 characters in length. + type: string + dialogCodeHook: + description: |- + Specifies a Lambda function to invoke for each user input. You can + invoke this Lambda function to personalize user interaction. Attributes are documented under code_hook. + properties: + messageVersion: + description: |- + The version of the request-response that you want Amazon Lex to use + to invoke your Lambda function. For more information, see + Using Lambda Functions. Must be less than or equal to 5 characters in length. + type: string + uri: + description: The Amazon Resource Name (ARN) of the Lambda + function. + type: string + type: object + followUpPrompt: + description: |- + Amazon Lex uses this prompt to solicit additional activity after + fulfilling an intent. For example, after the OrderPizza intent is fulfilled, you might prompt the + user to order a drink. The follow_up_prompt field and the conclusion_statement field are mutually + exclusive. You can specify only one. Attributes are documented under follow_up_prompt. + properties: + prompt: + description: Prompts for information from the user. Attributes + are documented under prompt. + properties: + maxAttempts: + description: The number of times to prompt the user for + information. Must be a number between 1 and 5 (inclusive). + type: number + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be less + than or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + rejectionStatement: + description: |- + If the user answers "no" to the question defined in the prompt field, + Amazon Lex responds with this statement to acknowledge that the intent was canceled. Attributes are + documented below under statement. + properties: + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be less + than or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + type: object + fulfillmentActivity: + description: |- + Describes how the intent is fulfilled. For example, after a + user provides all of the information for a pizza order, fulfillment_activity defines how the bot + places an order with a local pizza store. Attributes are documented under fulfillment_activity. + properties: + codeHook: + description: |- + A description of the Lambda function that is run to fulfill the intent. + Required if type is CodeHook. Attributes are documented under code_hook. + properties: + messageVersion: + description: |- + The version of the request-response that you want Amazon Lex to use + to invoke your Lambda function. For more information, see + Using Lambda Functions. Must be less than or equal to 5 characters in length. + type: string + uri: + description: The Amazon Resource Name (ARN) of the Lambda + function. + type: string + type: object + type: + description: |- + How the intent should be fulfilled, either by running a Lambda function or by + returning the slot data to the client application. Type can be either ReturnIntent or CodeHook, as documented here. + type: string + type: object + parentIntentSignature: + description: |- + A unique identifier for the built-in intent to base this + intent on. To find the signature for an intent, see + Standard Built-in Intents + in the Alexa Skills Kit. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + rejectionStatement: + description: |- + When the user answers "no" to the question defined in + confirmation_prompt, Amazon Lex responds with this statement to acknowledge that the intent was + canceled. You must provide both the rejection_statement and the confirmation_prompt, or neither. + Attributes are documented under statement. + properties: + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be less than + or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + sampleUtterances: + description: |- + An array of utterances (strings) that a user might say to signal + the intent. For example, "I want {PizzaSize} pizza", "Order {Quantity} {PizzaSize} pizzas". + In each utterance, a slot name is enclosed in curly braces. Must have between 1 and 10 items in the list, and each item must be less than or equal to 200 characters in length. + items: + type: string + type: array + x-kubernetes-list-type: set + slot: + description: |- + An list of intent slots. At runtime, Amazon Lex elicits required slot values + from the user using prompts defined in the slots. Attributes are documented under slot. + items: + properties: + description: + description: A description of the bot. Must be less than + or equal to 200 characters in length. + type: string + name: + description: The name of the intent slot that you want to + create. The name is case sensitive. Must be less than + or equal to 100 characters in length. + type: string + priority: + description: |- + Directs Lex the order in which to elicit this slot value from the user. + For example, if the intent has two slots with priorities 1 and 2, AWS Lex first elicits a value for + the slot with priority 1. If multiple slots share the same priority, the order in which Lex elicits + values is arbitrary. Must be between 1 and 100. + type: number + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + sampleUtterances: + description: |- + If you know a specific pattern with which users might respond to + an Amazon Lex request for a slot value, you can provide those utterances to improve accuracy. This + is optional. In most cases, Amazon Lex is capable of understanding user utterances. Must have between 1 and 10 items in the list, and each item must be less than or equal to 200 characters in length. + items: + type: string + type: array + slotConstraint: + description: Specifies whether the slot is required or optional. + type: string + slotType: + description: |- + The type of the slot, either a custom slot type that you defined or one of + the built-in slot types. Must be less than or equal to 100 characters in length. + type: string + slotTypeVersion: + description: The version of the slot type. Must be less + than or equal to 64 characters in length. + type: string + valueElicitationPrompt: + description: |- + The prompt that Amazon Lex uses to elicit the slot value + from the user. Attributes are documented under prompt. + properties: + maxAttempts: + description: The number of times to prompt the user + for information. Must be a number between 1 and 5 + (inclusive). + type: number + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be + less than or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + conclusionStatement: + description: |- + The statement that you want Amazon Lex to convey to the user + after the intent is successfully fulfilled by the Lambda function. This element is relevant only if + you provide a Lambda function in the fulfillment_activity. If you return the intent to the client + application, you can't specify this element. The follow_up_prompt and conclusion_statement are + mutually exclusive. You can specify only one. Attributes are documented under statement. + properties: + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be less than + or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + confirmationPrompt: + description: |- + Prompts the user to confirm the intent. This question should + have a yes or no answer. You you must provide both the rejection_statement and confirmation_prompt, + or neither. Attributes are documented under prompt. + properties: + maxAttempts: + description: The number of times to prompt the user for information. + Must be a number between 1 and 5 (inclusive). + type: number + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be less than + or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + createVersion: + description: |- + Determines if a new slot type version is created when the initial + resource is created and on each update. Defaults to false. + type: boolean + description: + description: A description of the intent. Must be less than or + equal to 200 characters in length. + type: string + dialogCodeHook: + description: |- + Specifies a Lambda function to invoke for each user input. You can + invoke this Lambda function to personalize user interaction. Attributes are documented under code_hook. + properties: + messageVersion: + description: |- + The version of the request-response that you want Amazon Lex to use + to invoke your Lambda function. For more information, see + Using Lambda Functions. Must be less than or equal to 5 characters in length. + type: string + uri: + description: The Amazon Resource Name (ARN) of the Lambda + function. + type: string + type: object + followUpPrompt: + description: |- + Amazon Lex uses this prompt to solicit additional activity after + fulfilling an intent. For example, after the OrderPizza intent is fulfilled, you might prompt the + user to order a drink. The follow_up_prompt field and the conclusion_statement field are mutually + exclusive. You can specify only one. Attributes are documented under follow_up_prompt. + properties: + prompt: + description: Prompts for information from the user. Attributes + are documented under prompt. + properties: + maxAttempts: + description: The number of times to prompt the user for + information. Must be a number between 1 and 5 (inclusive). + type: number + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be less + than or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + rejectionStatement: + description: |- + If the user answers "no" to the question defined in the prompt field, + Amazon Lex responds with this statement to acknowledge that the intent was canceled. Attributes are + documented below under statement. + properties: + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be less + than or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + type: object + fulfillmentActivity: + description: |- + Describes how the intent is fulfilled. For example, after a + user provides all of the information for a pizza order, fulfillment_activity defines how the bot + places an order with a local pizza store. Attributes are documented under fulfillment_activity. + properties: + codeHook: + description: |- + A description of the Lambda function that is run to fulfill the intent. + Required if type is CodeHook. Attributes are documented under code_hook. + properties: + messageVersion: + description: |- + The version of the request-response that you want Amazon Lex to use + to invoke your Lambda function. For more information, see + Using Lambda Functions. Must be less than or equal to 5 characters in length. + type: string + uri: + description: The Amazon Resource Name (ARN) of the Lambda + function. + type: string + type: object + type: + description: |- + How the intent should be fulfilled, either by running a Lambda function or by + returning the slot data to the client application. Type can be either ReturnIntent or CodeHook, as documented here. + type: string + type: object + parentIntentSignature: + description: |- + A unique identifier for the built-in intent to base this + intent on. To find the signature for an intent, see + Standard Built-in Intents + in the Alexa Skills Kit. + type: string + rejectionStatement: + description: |- + When the user answers "no" to the question defined in + confirmation_prompt, Amazon Lex responds with this statement to acknowledge that the intent was + canceled. You must provide both the rejection_statement and the confirmation_prompt, or neither. + Attributes are documented under statement. + properties: + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be less than + or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + sampleUtterances: + description: |- + An array of utterances (strings) that a user might say to signal + the intent. For example, "I want {PizzaSize} pizza", "Order {Quantity} {PizzaSize} pizzas". + In each utterance, a slot name is enclosed in curly braces. Must have between 1 and 10 items in the list, and each item must be less than or equal to 200 characters in length. + items: + type: string + type: array + x-kubernetes-list-type: set + slot: + description: |- + An list of intent slots. At runtime, Amazon Lex elicits required slot values + from the user using prompts defined in the slots. Attributes are documented under slot. + items: + properties: + description: + description: A description of the bot. Must be less than + or equal to 200 characters in length. + type: string + name: + description: The name of the intent slot that you want to + create. The name is case sensitive. Must be less than + or equal to 100 characters in length. + type: string + priority: + description: |- + Directs Lex the order in which to elicit this slot value from the user. + For example, if the intent has two slots with priorities 1 and 2, AWS Lex first elicits a value for + the slot with priority 1. If multiple slots share the same priority, the order in which Lex elicits + values is arbitrary. Must be between 1 and 100. + type: number + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + sampleUtterances: + description: |- + If you know a specific pattern with which users might respond to + an Amazon Lex request for a slot value, you can provide those utterances to improve accuracy. This + is optional. In most cases, Amazon Lex is capable of understanding user utterances. Must have between 1 and 10 items in the list, and each item must be less than or equal to 200 characters in length. + items: + type: string + type: array + slotConstraint: + description: Specifies whether the slot is required or optional. + type: string + slotType: + description: |- + The type of the slot, either a custom slot type that you defined or one of + the built-in slot types. Must be less than or equal to 100 characters in length. + type: string + slotTypeVersion: + description: The version of the slot type. Must be less + than or equal to 64 characters in length. + type: string + valueElicitationPrompt: + description: |- + The prompt that Amazon Lex uses to elicit the slot value + from the user. Attributes are documented under prompt. + properties: + maxAttempts: + description: The number of times to prompt the user + for information. Must be a number between 1 and 5 + (inclusive). + type: number + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be + less than or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.fulfillmentActivity is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.fulfillmentActivity) + || (has(self.initProvider) && has(self.initProvider.fulfillmentActivity))' + status: + description: IntentStatus defines the observed state of Intent. + properties: + atProvider: + properties: + arn: + description: The ARN of the Lex intent. + type: string + checksum: + description: |- + Checksum identifying the version of the intent that was created. The checksum is not + included as an argument because the resource will add it automatically when updating the intent. + type: string + conclusionStatement: + description: |- + The statement that you want Amazon Lex to convey to the user + after the intent is successfully fulfilled by the Lambda function. This element is relevant only if + you provide a Lambda function in the fulfillment_activity. If you return the intent to the client + application, you can't specify this element. The follow_up_prompt and conclusion_statement are + mutually exclusive. You can specify only one. Attributes are documented under statement. + properties: + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be less than + or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + confirmationPrompt: + description: |- + Prompts the user to confirm the intent. This question should + have a yes or no answer. You you must provide both the rejection_statement and confirmation_prompt, + or neither. Attributes are documented under prompt. + properties: + maxAttempts: + description: The number of times to prompt the user for information. + Must be a number between 1 and 5 (inclusive). + type: number + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be less than + or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + createVersion: + description: |- + Determines if a new slot type version is created when the initial + resource is created and on each update. Defaults to false. + type: boolean + createdDate: + description: The date when the intent version was created. + type: string + description: + description: A description of the intent. Must be less than or + equal to 200 characters in length. + type: string + dialogCodeHook: + description: |- + Specifies a Lambda function to invoke for each user input. You can + invoke this Lambda function to personalize user interaction. Attributes are documented under code_hook. + properties: + messageVersion: + description: |- + The version of the request-response that you want Amazon Lex to use + to invoke your Lambda function. For more information, see + Using Lambda Functions. Must be less than or equal to 5 characters in length. + type: string + uri: + description: The Amazon Resource Name (ARN) of the Lambda + function. + type: string + type: object + followUpPrompt: + description: |- + Amazon Lex uses this prompt to solicit additional activity after + fulfilling an intent. For example, after the OrderPizza intent is fulfilled, you might prompt the + user to order a drink. The follow_up_prompt field and the conclusion_statement field are mutually + exclusive. You can specify only one. Attributes are documented under follow_up_prompt. + properties: + prompt: + description: Prompts for information from the user. Attributes + are documented under prompt. + properties: + maxAttempts: + description: The number of times to prompt the user for + information. Must be a number between 1 and 5 (inclusive). + type: number + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be less + than or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + rejectionStatement: + description: |- + If the user answers "no" to the question defined in the prompt field, + Amazon Lex responds with this statement to acknowledge that the intent was canceled. Attributes are + documented below under statement. + properties: + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be less + than or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + type: object + fulfillmentActivity: + description: |- + Describes how the intent is fulfilled. For example, after a + user provides all of the information for a pizza order, fulfillment_activity defines how the bot + places an order with a local pizza store. Attributes are documented under fulfillment_activity. + properties: + codeHook: + description: |- + A description of the Lambda function that is run to fulfill the intent. + Required if type is CodeHook. Attributes are documented under code_hook. + properties: + messageVersion: + description: |- + The version of the request-response that you want Amazon Lex to use + to invoke your Lambda function. For more information, see + Using Lambda Functions. Must be less than or equal to 5 characters in length. + type: string + uri: + description: The Amazon Resource Name (ARN) of the Lambda + function. + type: string + type: object + type: + description: |- + How the intent should be fulfilled, either by running a Lambda function or by + returning the slot data to the client application. Type can be either ReturnIntent or CodeHook, as documented here. + type: string + type: object + id: + type: string + lastUpdatedDate: + description: The date when the $LATEST version of this intent + was updated. + type: string + parentIntentSignature: + description: |- + A unique identifier for the built-in intent to base this + intent on. To find the signature for an intent, see + Standard Built-in Intents + in the Alexa Skills Kit. + type: string + rejectionStatement: + description: |- + When the user answers "no" to the question defined in + confirmation_prompt, Amazon Lex responds with this statement to acknowledge that the intent was + canceled. You must provide both the rejection_statement and the confirmation_prompt, or neither. + Attributes are documented under statement. + properties: + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be less than + or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + sampleUtterances: + description: |- + An array of utterances (strings) that a user might say to signal + the intent. For example, "I want {PizzaSize} pizza", "Order {Quantity} {PizzaSize} pizzas". + In each utterance, a slot name is enclosed in curly braces. Must have between 1 and 10 items in the list, and each item must be less than or equal to 200 characters in length. + items: + type: string + type: array + x-kubernetes-list-type: set + slot: + description: |- + An list of intent slots. At runtime, Amazon Lex elicits required slot values + from the user using prompts defined in the slots. Attributes are documented under slot. + items: + properties: + description: + description: A description of the bot. Must be less than + or equal to 200 characters in length. + type: string + name: + description: The name of the intent slot that you want to + create. The name is case sensitive. Must be less than + or equal to 100 characters in length. + type: string + priority: + description: |- + Directs Lex the order in which to elicit this slot value from the user. + For example, if the intent has two slots with priorities 1 and 2, AWS Lex first elicits a value for + the slot with priority 1. If multiple slots share the same priority, the order in which Lex elicits + values is arbitrary. Must be between 1 and 100. + type: number + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + sampleUtterances: + description: |- + If you know a specific pattern with which users might respond to + an Amazon Lex request for a slot value, you can provide those utterances to improve accuracy. This + is optional. In most cases, Amazon Lex is capable of understanding user utterances. Must have between 1 and 10 items in the list, and each item must be less than or equal to 200 characters in length. + items: + type: string + type: array + slotConstraint: + description: Specifies whether the slot is required or optional. + type: string + slotType: + description: |- + The type of the slot, either a custom slot type that you defined or one of + the built-in slot types. Must be less than or equal to 100 characters in length. + type: string + slotTypeVersion: + description: The version of the slot type. Must be less + than or equal to 64 characters in length. + type: string + valueElicitationPrompt: + description: |- + The prompt that Amazon Lex uses to elicit the slot value + from the user. Attributes are documented under prompt. + properties: + maxAttempts: + description: The number of times to prompt the user + for information. Must be a number between 1 and 5 + (inclusive). + type: number + message: + description: |- + A set of messages, each of which provides a message string and its type. + You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). + Attributes are documented under message. Must contain between 1 and 15 messages. + items: + properties: + content: + description: The text of the message. Must be + less than or equal to 1000 characters in length. + type: string + contentType: + description: The content type of the message string. + type: string + groupNumber: + description: |- + Identifies the message group that the message belongs to. When a group + is assigned to a message, Amazon Lex returns one message from each group in the response. Must be a number between 1 and 5 (inclusive). + type: number + type: object + type: array + responseCard: + description: |- + The response card. Amazon Lex will substitute session attributes and + slot values into the response card. For more information, see + Example: Using a Response Card. Must be less than or equal to 50000 characters in length. + type: string + type: object + type: object + type: array + version: + description: The version of the bot. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/lightsail.aws.upbound.io_containerservices.yaml b/package/crds/lightsail.aws.upbound.io_containerservices.yaml index a3903a67ff..90a62730ff 100644 --- a/package/crds/lightsail.aws.upbound.io_containerservices.yaml +++ b/package/crds/lightsail.aws.upbound.io_containerservices.yaml @@ -596,3 +596,560 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ContainerService is the Schema for the ContainerServices API. + Provides a resource to manage Lightsail container service + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ContainerServiceSpec defines the desired state of ContainerService + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + isDisabled: + description: A Boolean value indicating whether the container + service is disabled. Defaults to false. + type: boolean + power: + description: |- + The power specification for the container service. The power specifies the amount of memory, + the number of vCPUs, and the monthly price of each node of the container service. + Possible values: nano, micro, small, medium, large, xlarge. + type: string + privateRegistryAccess: + description: An object to describe the configuration for the container + service to access private container image repositories, such + as Amazon Elastic Container Registry (Amazon ECR) private repositories. + See Private Registry Access below for more details. + properties: + ecrImagePullerRole: + description: Describes a request to configure an Amazon Lightsail + container service to access private container image repositories, + such as Amazon Elastic Container Registry (Amazon ECR) private + repositories. See ECR Image Puller Role below for more details. + properties: + isActive: + description: A Boolean value that indicates whether to + activate the role. The default is false. + type: boolean + type: object + type: object + publicDomainNames: + description: |- + The public domain names to use with the container service, such as example.com + and www.example.com. You can specify up to four public domain names for a container service. The domain names that you + specify are used when you create a deployment with a container configured as the public endpoint of your container + service. If you don't specify public domain names, then you can use the default domain of the container service. + Defined below. + properties: + certificate: + items: + properties: + certificateName: + description: |- + The name for the container service. Names must be of length 1 to 63, and be + unique within each AWS Region in your Lightsail account. + type: string + domainNames: + items: + type: string + type: array + type: object + type: array + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + scale: + description: |- + The scale specification for the container service. The scale specifies the allocated compute + nodes of the container service. + type: number + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + isDisabled: + description: A Boolean value indicating whether the container + service is disabled. Defaults to false. + type: boolean + power: + description: |- + The power specification for the container service. The power specifies the amount of memory, + the number of vCPUs, and the monthly price of each node of the container service. + Possible values: nano, micro, small, medium, large, xlarge. + type: string + privateRegistryAccess: + description: An object to describe the configuration for the container + service to access private container image repositories, such + as Amazon Elastic Container Registry (Amazon ECR) private repositories. + See Private Registry Access below for more details. + properties: + ecrImagePullerRole: + description: Describes a request to configure an Amazon Lightsail + container service to access private container image repositories, + such as Amazon Elastic Container Registry (Amazon ECR) private + repositories. See ECR Image Puller Role below for more details. + properties: + isActive: + description: A Boolean value that indicates whether to + activate the role. The default is false. + type: boolean + type: object + type: object + publicDomainNames: + description: |- + The public domain names to use with the container service, such as example.com + and www.example.com. You can specify up to four public domain names for a container service. The domain names that you + specify are used when you create a deployment with a container configured as the public endpoint of your container + service. If you don't specify public domain names, then you can use the default domain of the container service. + Defined below. + properties: + certificate: + items: + properties: + certificateName: + description: |- + The name for the container service. Names must be of length 1 to 63, and be + unique within each AWS Region in your Lightsail account. + type: string + domainNames: + items: + type: string + type: array + type: object + type: array + type: object + scale: + description: |- + The scale specification for the container service. The scale specifies the allocated compute + nodes of the container service. + type: number + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.power is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.power) + || (has(self.initProvider) && has(self.initProvider.power))' + - message: spec.forProvider.scale is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scale) + || (has(self.initProvider) && has(self.initProvider.scale))' + status: + description: ContainerServiceStatus defines the observed state of ContainerService. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) of the container service. + type: string + availabilityZone: + description: The Availability Zone. Follows the format us-east-2a + (case-sensitive). + type: string + createdAt: + type: string + id: + description: Same as name. + type: string + isDisabled: + description: A Boolean value indicating whether the container + service is disabled. Defaults to false. + type: boolean + power: + description: |- + The power specification for the container service. The power specifies the amount of memory, + the number of vCPUs, and the monthly price of each node of the container service. + Possible values: nano, micro, small, medium, large, xlarge. + type: string + powerId: + description: The ID of the power of the container service. + type: string + principalArn: + description: |- + The principal ARN of the container service. The principal ARN can be used to create a trust + relationship between your standard AWS account and your Lightsail container service. This allows you to give your + service permission to access resources in your standard AWS account. + type: string + privateDomainName: + description: |- + The private domain name of the container service. The private domain name is accessible only + by other resources within the default virtual private cloud (VPC) of your Lightsail account. + type: string + privateRegistryAccess: + description: An object to describe the configuration for the container + service to access private container image repositories, such + as Amazon Elastic Container Registry (Amazon ECR) private repositories. + See Private Registry Access below for more details. + properties: + ecrImagePullerRole: + description: Describes a request to configure an Amazon Lightsail + container service to access private container image repositories, + such as Amazon Elastic Container Registry (Amazon ECR) private + repositories. See ECR Image Puller Role below for more details. + properties: + isActive: + description: A Boolean value that indicates whether to + activate the role. The default is false. + type: boolean + principalArn: + description: |- + The principal ARN of the container service. The principal ARN can be used to create a trust + relationship between your standard AWS account and your Lightsail container service. This allows you to give your + service permission to access resources in your standard AWS account. + type: string + type: object + type: object + publicDomainNames: + description: |- + The public domain names to use with the container service, such as example.com + and www.example.com. You can specify up to four public domain names for a container service. The domain names that you + specify are used when you create a deployment with a container configured as the public endpoint of your container + service. If you don't specify public domain names, then you can use the default domain of the container service. + Defined below. + properties: + certificate: + items: + properties: + certificateName: + description: |- + The name for the container service. Names must be of length 1 to 63, and be + unique within each AWS Region in your Lightsail account. + type: string + domainNames: + items: + type: string + type: array + type: object + type: array + type: object + resourceType: + description: The Lightsail resource type of the container service + (i.e., ContainerService). + type: string + scale: + description: |- + The scale specification for the container service. The scale specifies the allocated compute + nodes of the container service. + type: number + state: + description: The current state of the container service. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: |- + A map of tags assigned to the resource, including those inherited from the provider + default_tags configuration block. + type: object + x-kubernetes-map-type: granular + url: + description: |- + The publicly accessible URL of the container service. If no public endpoint is specified in the + currentDeployment, this URL returns a 404 response. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/lightsail.aws.upbound.io_instances.yaml b/package/crds/lightsail.aws.upbound.io_instances.yaml index b3644bd26c..8ada91a132 100644 --- a/package/crds/lightsail.aws.upbound.io_instances.yaml +++ b/package/crds/lightsail.aws.upbound.io_instances.yaml @@ -560,3 +560,539 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Instance is the Schema for the Instances API. Provides an Lightsail + Instance + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: InstanceSpec defines the desired state of Instance + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + addOn: + description: The add on configuration for the instance. Detailed + below. + properties: + snapshotTime: + description: The daily time when an automatic snapshot will + be created. Must be in HH:00 format, and in an hourly increment + and specified in Coordinated Universal Time (UTC). The snapshot + will be automatically created between the time specified + and up to 45 minutes after. + type: string + status: + description: 'The status of the add on. Valid Values: Enabled, + Disabled.' + type: string + type: + description: The add-on type. There is currently only one + valid type AutoSnapshot. + type: string + type: object + availabilityZone: + description: |- + The Availability Zone in which to create your instance. A + list of available zones can be obtained using the AWS CLI command: + aws lightsail get-regions --include-availability-zones. + type: string + blueprintId: + description: |- + The ID for a virtual private server image. A list of available + blueprint IDs can be obtained using the AWS CLI command: + aws lightsail get-blueprints. + type: string + bundleId: + description: |- + The bundle of specification information. A list of available + bundle IDs can be obtained using the AWS CLI command: + aws lightsail get-bundles. + type: string + ipAddressType: + description: 'The IP address type of the Lightsail Instance. Valid + Values: dualstack | ipv4.' + type: string + keyPairName: + description: |- + The name of your key pair. Created in the + Lightsail console (cannot use aws_key_pair at this time) + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userData: + description: Single lined launch script as a string to configure + server with additional user data + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + addOn: + description: The add on configuration for the instance. Detailed + below. + properties: + snapshotTime: + description: The daily time when an automatic snapshot will + be created. Must be in HH:00 format, and in an hourly increment + and specified in Coordinated Universal Time (UTC). The snapshot + will be automatically created between the time specified + and up to 45 minutes after. + type: string + status: + description: 'The status of the add on. Valid Values: Enabled, + Disabled.' + type: string + type: + description: The add-on type. There is currently only one + valid type AutoSnapshot. + type: string + type: object + availabilityZone: + description: |- + The Availability Zone in which to create your instance. A + list of available zones can be obtained using the AWS CLI command: + aws lightsail get-regions --include-availability-zones. + type: string + blueprintId: + description: |- + The ID for a virtual private server image. A list of available + blueprint IDs can be obtained using the AWS CLI command: + aws lightsail get-blueprints. + type: string + bundleId: + description: |- + The bundle of specification information. A list of available + bundle IDs can be obtained using the AWS CLI command: + aws lightsail get-bundles. + type: string + ipAddressType: + description: 'The IP address type of the Lightsail Instance. Valid + Values: dualstack | ipv4.' + type: string + keyPairName: + description: |- + The name of your key pair. Created in the + Lightsail console (cannot use aws_key_pair at this time) + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userData: + description: Single lined launch script as a string to configure + server with additional user data + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.availabilityZone is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.availabilityZone) + || (has(self.initProvider) && has(self.initProvider.availabilityZone))' + - message: spec.forProvider.blueprintId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.blueprintId) + || (has(self.initProvider) && has(self.initProvider.blueprintId))' + - message: spec.forProvider.bundleId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.bundleId) + || (has(self.initProvider) && has(self.initProvider.bundleId))' + status: + description: InstanceStatus defines the observed state of Instance. + properties: + atProvider: + properties: + addOn: + description: The add on configuration for the instance. Detailed + below. + properties: + snapshotTime: + description: The daily time when an automatic snapshot will + be created. Must be in HH:00 format, and in an hourly increment + and specified in Coordinated Universal Time (UTC). The snapshot + will be automatically created between the time specified + and up to 45 minutes after. + type: string + status: + description: 'The status of the add on. Valid Values: Enabled, + Disabled.' + type: string + type: + description: The add-on type. There is currently only one + valid type AutoSnapshot. + type: string + type: object + arn: + description: The ARN of the Lightsail instance (matches id). + type: string + availabilityZone: + description: |- + The Availability Zone in which to create your instance. A + list of available zones can be obtained using the AWS CLI command: + aws lightsail get-regions --include-availability-zones. + type: string + blueprintId: + description: |- + The ID for a virtual private server image. A list of available + blueprint IDs can be obtained using the AWS CLI command: + aws lightsail get-blueprints. + type: string + bundleId: + description: |- + The bundle of specification information. A list of available + bundle IDs can be obtained using the AWS CLI command: + aws lightsail get-bundles. + type: string + cpuCount: + description: The number of vCPUs the instance has. + type: number + createdAt: + description: The timestamp when the instance was created. + type: string + id: + description: The ARN of the Lightsail instance (matches arn). + type: string + ipAddressType: + description: 'The IP address type of the Lightsail Instance. Valid + Values: dualstack | ipv4.' + type: string + ipv6Addresses: + description: List of IPv6 addresses for the Lightsail instance. + items: + type: string + type: array + isStaticIp: + description: A Boolean value indicating whether this instance + has a static IP assigned to it. + type: boolean + keyPairName: + description: |- + The name of your key pair. Created in the + Lightsail console (cannot use aws_key_pair at this time) + type: string + privateIpAddress: + description: The private IP address of the instance. + type: string + publicIpAddress: + description: The public IP address of the instance. + type: string + ramSize: + description: The amount of RAM in GB on the instance (e.g., 1.0). + type: number + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + userData: + description: Single lined launch script as a string to configure + server with additional user data + type: string + username: + description: The user name for connecting to the instance (e.g., + ec2-user). + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/location.aws.upbound.io_placeindices.yaml b/package/crds/location.aws.upbound.io_placeindices.yaml index d2f683d539..db43d8a7fb 100644 --- a/package/crds/location.aws.upbound.io_placeindices.yaml +++ b/package/crds/location.aws.upbound.io_placeindices.yaml @@ -430,3 +430,409 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: PlaceIndex is the Schema for the PlaceIndexs API. Provides a + Location Service Place Index. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PlaceIndexSpec defines the desired state of PlaceIndex + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + dataSource: + description: Specifies the geospatial data provider for the new + place index. + type: string + dataSourceConfiguration: + description: Configuration block with the data storage option + chosen for requesting Places. Detailed below. + properties: + intendedUse: + description: 'Specifies how the results of an operation will + be stored by the caller. Valid values: SingleUse, Storage. + Default: SingleUse.' + type: string + type: object + description: + description: The optional description for the place index resource. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + dataSource: + description: Specifies the geospatial data provider for the new + place index. + type: string + dataSourceConfiguration: + description: Configuration block with the data storage option + chosen for requesting Places. Detailed below. + properties: + intendedUse: + description: 'Specifies how the results of an operation will + be stored by the caller. Valid values: SingleUse, Storage. + Default: SingleUse.' + type: string + type: object + description: + description: The optional description for the place index resource. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.dataSource is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.dataSource) + || (has(self.initProvider) && has(self.initProvider.dataSource))' + status: + description: PlaceIndexStatus defines the observed state of PlaceIndex. + properties: + atProvider: + properties: + createTime: + description: The timestamp for when the place index resource was + created in ISO 8601 format. + type: string + dataSource: + description: Specifies the geospatial data provider for the new + place index. + type: string + dataSourceConfiguration: + description: Configuration block with the data storage option + chosen for requesting Places. Detailed below. + properties: + intendedUse: + description: 'Specifies how the results of an operation will + be stored by the caller. Valid values: SingleUse, Storage. + Default: SingleUse.' + type: string + type: object + description: + description: The optional description for the place index resource. + type: string + id: + type: string + indexArn: + description: The Amazon Resource Name (ARN) for the place index + resource. Used to specify a resource across AWS. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + updateTime: + description: The timestamp for when the place index resource was + last update in ISO 8601. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/macie2.aws.upbound.io_classificationjobs.yaml b/package/crds/macie2.aws.upbound.io_classificationjobs.yaml index b320081439..f0ac303fc1 100644 --- a/package/crds/macie2.aws.upbound.io_classificationjobs.yaml +++ b/package/crds/macie2.aws.upbound.io_classificationjobs.yaml @@ -1640,3 +1640,1451 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ClassificationJob is the Schema for the ClassificationJobs API. + Provides a resource to manage an AWS Macie Classification Job. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClassificationJobSpec defines the desired state of ClassificationJob + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + customDataIdentifierIds: + description: The custom data identifiers to use for data analysis + and classification. + items: + type: string + type: array + description: + description: A custom description of the job. The description + can contain as many as 200 characters. + type: string + initialRun: + description: Specifies whether to analyze all existing, eligible + objects immediately after the job is created. + type: boolean + jobStatus: + description: 'The status for the job. Valid values are: CANCELLED, + RUNNING and USER_PAUSED' + type: string + jobType: + description: 'The schedule for running the job. Valid values are: + ONE_TIME - Run the job only once. If you specify this value, + don''t specify a value for the schedule_frequency property. + SCHEDULED - Run the job on a daily, weekly, or monthly basis. + If you specify this value, use the schedule_frequency property + to define the recurrence pattern for the job.' + type: string + name: + description: A custom name for the job. The name can contain as + many as 500 characters. Conflicts with name_prefix. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + s3JobDefinition: + description: The S3 buckets that contain the objects to analyze, + and the scope of that analysis. (documented below) + properties: + bucketCriteria: + description: The property- and tag-based conditions that determine + which S3 buckets to include or exclude from the analysis. + Conflicts with bucket_definitions. (documented below) + properties: + excludes: + description: The property- or tag-based conditions that + determine which objects to exclude from the analysis. + (documented below) + properties: + and: + description: An array of conditions, one for each + condition that determines which objects to include + or exclude from the job. (documented below) + items: + properties: + simpleCriterion: + description: A property-based condition that + defines a property, operator, and one or more + values for including or excluding an S3 buckets + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + values: + description: An array that lists the values + to use in the condition. + items: + type: string + type: array + type: object + tagCriterion: + description: A tag-based condition that defines + the operator and tag keys or tag key and value + pairs for including or excluding an S3 buckets + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + tagValues: + description: The tag keys or tag key and + value pairs to use in the condition. + items: + properties: + key: + description: The object property to + use in the condition. + type: string + value: + description: The tag value. + type: string + type: object + type: array + type: object + type: object + type: array + type: object + includes: + description: The property- or tag-based conditions that + determine which objects to include in the analysis. + (documented below) + properties: + and: + description: An array of conditions, one for each + condition that determines which objects to include + or exclude from the job. (documented below) + items: + properties: + simpleCriterion: + description: A property-based condition that + defines a property, operator, and one or more + values for including or excluding an S3 buckets + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + values: + description: An array that lists the values + to use in the condition. + items: + type: string + type: array + type: object + tagCriterion: + description: A tag-based condition that defines + the operator and tag keys or tag key and value + pairs for including or excluding an S3 buckets + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + tagValues: + description: The tag keys or tag key and + value pairs to use in the condition. + items: + properties: + key: + description: The object property to + use in the condition. + type: string + value: + description: The tag value. + type: string + type: object + type: array + type: object + type: object + type: array + type: object + type: object + bucketDefinitions: + description: An array of objects, one for each AWS account + that owns buckets to analyze. Each object specifies the + account ID for an account and one or more buckets to analyze + for the account. Conflicts with bucket_criteria. (documented + below) + items: + properties: + accountId: + description: The unique identifier for the AWS account + that owns the buckets. + type: string + buckets: + description: An array that lists the names of the buckets. + items: + type: string + type: array + type: object + type: array + scoping: + description: The property- and tag-based conditions that determine + which objects to include or exclude from the analysis. (documented + below) + properties: + excludes: + description: The property- or tag-based conditions that + determine which objects to exclude from the analysis. + (documented below) + properties: + and: + description: An array of conditions, one for each + condition that determines which objects to include + or exclude from the job. (documented below) + items: + properties: + simpleScopeTerm: + description: A property-based condition that + defines a property, operator, and one or more + values for including or excluding an object + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + values: + description: An array that lists the values + to use in the condition. + items: + type: string + type: array + type: object + tagScopeTerm: + description: A tag-based condition that defines + the operator and tag keys or tag key and value + pairs for including or excluding an object + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + tagValues: + description: The tag keys or tag key and + value pairs to use in the condition. + items: + properties: + key: + description: The object property to + use in the condition. + type: string + value: + description: The tag value. + type: string + type: object + type: array + target: + description: The type of object to apply + the condition to. The only valid value + is S3_OBJECT. + type: string + type: object + type: object + type: array + type: object + includes: + description: The property- or tag-based conditions that + determine which objects to include in the analysis. + (documented below) + properties: + and: + description: An array of conditions, one for each + condition that determines which objects to include + or exclude from the job. (documented below) + items: + properties: + simpleScopeTerm: + description: A property-based condition that + defines a property, operator, and one or more + values for including or excluding an object + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + values: + description: An array that lists the values + to use in the condition. + items: + type: string + type: array + type: object + tagScopeTerm: + description: A tag-based condition that defines + the operator and tag keys or tag key and value + pairs for including or excluding an object + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + tagValues: + description: The tag keys or tag key and + value pairs to use in the condition. + items: + properties: + key: + description: The object property to + use in the condition. + type: string + value: + description: The tag value. + type: string + type: object + type: array + target: + description: The type of object to apply + the condition to. The only valid value + is S3_OBJECT. + type: string + type: object + type: object + type: array + type: object + type: object + type: object + samplingPercentage: + description: The sampling depth, as a percentage, to apply when + processing objects. This value determines the percentage of + eligible objects that the job analyzes. If this value is less + than 100, Amazon Macie selects the objects to analyze at random, + up to the specified percentage, and analyzes all the data in + those objects. + type: number + scheduleFrequency: + description: The recurrence pattern for running the job. To run + the job only once, don't specify a value for this property and + set the value for the job_type property to ONE_TIME. (documented + below) + properties: + dailySchedule: + description: Specifies a daily recurrence pattern for running + the job. + type: boolean + monthlySchedule: + description: Specifies a monthly recurrence pattern for running + the job. + type: number + weeklySchedule: + description: Specifies a weekly recurrence pattern for running + the job. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + customDataIdentifierIds: + description: The custom data identifiers to use for data analysis + and classification. + items: + type: string + type: array + description: + description: A custom description of the job. The description + can contain as many as 200 characters. + type: string + initialRun: + description: Specifies whether to analyze all existing, eligible + objects immediately after the job is created. + type: boolean + jobStatus: + description: 'The status for the job. Valid values are: CANCELLED, + RUNNING and USER_PAUSED' + type: string + jobType: + description: 'The schedule for running the job. Valid values are: + ONE_TIME - Run the job only once. If you specify this value, + don''t specify a value for the schedule_frequency property. + SCHEDULED - Run the job on a daily, weekly, or monthly basis. + If you specify this value, use the schedule_frequency property + to define the recurrence pattern for the job.' + type: string + name: + description: A custom name for the job. The name can contain as + many as 500 characters. Conflicts with name_prefix. + type: string + s3JobDefinition: + description: The S3 buckets that contain the objects to analyze, + and the scope of that analysis. (documented below) + properties: + bucketCriteria: + description: The property- and tag-based conditions that determine + which S3 buckets to include or exclude from the analysis. + Conflicts with bucket_definitions. (documented below) + properties: + excludes: + description: The property- or tag-based conditions that + determine which objects to exclude from the analysis. + (documented below) + properties: + and: + description: An array of conditions, one for each + condition that determines which objects to include + or exclude from the job. (documented below) + items: + properties: + simpleCriterion: + description: A property-based condition that + defines a property, operator, and one or more + values for including or excluding an S3 buckets + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + values: + description: An array that lists the values + to use in the condition. + items: + type: string + type: array + type: object + tagCriterion: + description: A tag-based condition that defines + the operator and tag keys or tag key and value + pairs for including or excluding an S3 buckets + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + tagValues: + description: The tag keys or tag key and + value pairs to use in the condition. + items: + properties: + key: + description: The object property to + use in the condition. + type: string + value: + description: The tag value. + type: string + type: object + type: array + type: object + type: object + type: array + type: object + includes: + description: The property- or tag-based conditions that + determine which objects to include in the analysis. + (documented below) + properties: + and: + description: An array of conditions, one for each + condition that determines which objects to include + or exclude from the job. (documented below) + items: + properties: + simpleCriterion: + description: A property-based condition that + defines a property, operator, and one or more + values for including or excluding an S3 buckets + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + values: + description: An array that lists the values + to use in the condition. + items: + type: string + type: array + type: object + tagCriterion: + description: A tag-based condition that defines + the operator and tag keys or tag key and value + pairs for including or excluding an S3 buckets + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + tagValues: + description: The tag keys or tag key and + value pairs to use in the condition. + items: + properties: + key: + description: The object property to + use in the condition. + type: string + value: + description: The tag value. + type: string + type: object + type: array + type: object + type: object + type: array + type: object + type: object + bucketDefinitions: + description: An array of objects, one for each AWS account + that owns buckets to analyze. Each object specifies the + account ID for an account and one or more buckets to analyze + for the account. Conflicts with bucket_criteria. (documented + below) + items: + properties: + accountId: + description: The unique identifier for the AWS account + that owns the buckets. + type: string + buckets: + description: An array that lists the names of the buckets. + items: + type: string + type: array + type: object + type: array + scoping: + description: The property- and tag-based conditions that determine + which objects to include or exclude from the analysis. (documented + below) + properties: + excludes: + description: The property- or tag-based conditions that + determine which objects to exclude from the analysis. + (documented below) + properties: + and: + description: An array of conditions, one for each + condition that determines which objects to include + or exclude from the job. (documented below) + items: + properties: + simpleScopeTerm: + description: A property-based condition that + defines a property, operator, and one or more + values for including or excluding an object + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + values: + description: An array that lists the values + to use in the condition. + items: + type: string + type: array + type: object + tagScopeTerm: + description: A tag-based condition that defines + the operator and tag keys or tag key and value + pairs for including or excluding an object + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + tagValues: + description: The tag keys or tag key and + value pairs to use in the condition. + items: + properties: + key: + description: The object property to + use in the condition. + type: string + value: + description: The tag value. + type: string + type: object + type: array + target: + description: The type of object to apply + the condition to. The only valid value + is S3_OBJECT. + type: string + type: object + type: object + type: array + type: object + includes: + description: The property- or tag-based conditions that + determine which objects to include in the analysis. + (documented below) + properties: + and: + description: An array of conditions, one for each + condition that determines which objects to include + or exclude from the job. (documented below) + items: + properties: + simpleScopeTerm: + description: A property-based condition that + defines a property, operator, and one or more + values for including or excluding an object + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + values: + description: An array that lists the values + to use in the condition. + items: + type: string + type: array + type: object + tagScopeTerm: + description: A tag-based condition that defines + the operator and tag keys or tag key and value + pairs for including or excluding an object + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + tagValues: + description: The tag keys or tag key and + value pairs to use in the condition. + items: + properties: + key: + description: The object property to + use in the condition. + type: string + value: + description: The tag value. + type: string + type: object + type: array + target: + description: The type of object to apply + the condition to. The only valid value + is S3_OBJECT. + type: string + type: object + type: object + type: array + type: object + type: object + type: object + samplingPercentage: + description: The sampling depth, as a percentage, to apply when + processing objects. This value determines the percentage of + eligible objects that the job analyzes. If this value is less + than 100, Amazon Macie selects the objects to analyze at random, + up to the specified percentage, and analyzes all the data in + those objects. + type: number + scheduleFrequency: + description: The recurrence pattern for running the job. To run + the job only once, don't specify a value for this property and + set the value for the job_type property to ONE_TIME. (documented + below) + properties: + dailySchedule: + description: Specifies a daily recurrence pattern for running + the job. + type: boolean + monthlySchedule: + description: Specifies a monthly recurrence pattern for running + the job. + type: number + weeklySchedule: + description: Specifies a weekly recurrence pattern for running + the job. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.jobType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.jobType) + || (has(self.initProvider) && has(self.initProvider.jobType))' + - message: spec.forProvider.s3JobDefinition is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.s3JobDefinition) + || (has(self.initProvider) && has(self.initProvider.s3JobDefinition))' + status: + description: ClassificationJobStatus defines the observed state of ClassificationJob. + properties: + atProvider: + properties: + createdAt: + description: The date and time, in UTC and extended RFC 3339 format, + when the job was created. + type: string + customDataIdentifierIds: + description: The custom data identifiers to use for data analysis + and classification. + items: + type: string + type: array + description: + description: A custom description of the job. The description + can contain as many as 200 characters. + type: string + id: + description: The unique identifier (ID) of the macie classification + job. + type: string + initialRun: + description: Specifies whether to analyze all existing, eligible + objects immediately after the job is created. + type: boolean + jobArn: + type: string + jobId: + description: The unique identifier (ID) of the macie classification + job. + type: string + jobStatus: + description: 'The status for the job. Valid values are: CANCELLED, + RUNNING and USER_PAUSED' + type: string + jobType: + description: 'The schedule for running the job. Valid values are: + ONE_TIME - Run the job only once. If you specify this value, + don''t specify a value for the schedule_frequency property. + SCHEDULED - Run the job on a daily, weekly, or monthly basis. + If you specify this value, use the schedule_frequency property + to define the recurrence pattern for the job.' + type: string + name: + description: A custom name for the job. The name can contain as + many as 500 characters. Conflicts with name_prefix. + type: string + s3JobDefinition: + description: The S3 buckets that contain the objects to analyze, + and the scope of that analysis. (documented below) + properties: + bucketCriteria: + description: The property- and tag-based conditions that determine + which S3 buckets to include or exclude from the analysis. + Conflicts with bucket_definitions. (documented below) + properties: + excludes: + description: The property- or tag-based conditions that + determine which objects to exclude from the analysis. + (documented below) + properties: + and: + description: An array of conditions, one for each + condition that determines which objects to include + or exclude from the job. (documented below) + items: + properties: + simpleCriterion: + description: A property-based condition that + defines a property, operator, and one or more + values for including or excluding an S3 buckets + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + values: + description: An array that lists the values + to use in the condition. + items: + type: string + type: array + type: object + tagCriterion: + description: A tag-based condition that defines + the operator and tag keys or tag key and value + pairs for including or excluding an S3 buckets + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + tagValues: + description: The tag keys or tag key and + value pairs to use in the condition. + items: + properties: + key: + description: The object property to + use in the condition. + type: string + value: + description: The tag value. + type: string + type: object + type: array + type: object + type: object + type: array + type: object + includes: + description: The property- or tag-based conditions that + determine which objects to include in the analysis. + (documented below) + properties: + and: + description: An array of conditions, one for each + condition that determines which objects to include + or exclude from the job. (documented below) + items: + properties: + simpleCriterion: + description: A property-based condition that + defines a property, operator, and one or more + values for including or excluding an S3 buckets + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + values: + description: An array that lists the values + to use in the condition. + items: + type: string + type: array + type: object + tagCriterion: + description: A tag-based condition that defines + the operator and tag keys or tag key and value + pairs for including or excluding an S3 buckets + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + tagValues: + description: The tag keys or tag key and + value pairs to use in the condition. + items: + properties: + key: + description: The object property to + use in the condition. + type: string + value: + description: The tag value. + type: string + type: object + type: array + type: object + type: object + type: array + type: object + type: object + bucketDefinitions: + description: An array of objects, one for each AWS account + that owns buckets to analyze. Each object specifies the + account ID for an account and one or more buckets to analyze + for the account. Conflicts with bucket_criteria. (documented + below) + items: + properties: + accountId: + description: The unique identifier for the AWS account + that owns the buckets. + type: string + buckets: + description: An array that lists the names of the buckets. + items: + type: string + type: array + type: object + type: array + scoping: + description: The property- and tag-based conditions that determine + which objects to include or exclude from the analysis. (documented + below) + properties: + excludes: + description: The property- or tag-based conditions that + determine which objects to exclude from the analysis. + (documented below) + properties: + and: + description: An array of conditions, one for each + condition that determines which objects to include + or exclude from the job. (documented below) + items: + properties: + simpleScopeTerm: + description: A property-based condition that + defines a property, operator, and one or more + values for including or excluding an object + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + values: + description: An array that lists the values + to use in the condition. + items: + type: string + type: array + type: object + tagScopeTerm: + description: A tag-based condition that defines + the operator and tag keys or tag key and value + pairs for including or excluding an object + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + tagValues: + description: The tag keys or tag key and + value pairs to use in the condition. + items: + properties: + key: + description: The object property to + use in the condition. + type: string + value: + description: The tag value. + type: string + type: object + type: array + target: + description: The type of object to apply + the condition to. The only valid value + is S3_OBJECT. + type: string + type: object + type: object + type: array + type: object + includes: + description: The property- or tag-based conditions that + determine which objects to include in the analysis. + (documented below) + properties: + and: + description: An array of conditions, one for each + condition that determines which objects to include + or exclude from the job. (documented below) + items: + properties: + simpleScopeTerm: + description: A property-based condition that + defines a property, operator, and one or more + values for including or excluding an object + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + values: + description: An array that lists the values + to use in the condition. + items: + type: string + type: array + type: object + tagScopeTerm: + description: A tag-based condition that defines + the operator and tag keys or tag key and value + pairs for including or excluding an object + from the job. (documented below) + properties: + comparator: + description: 'The operator to use in a condition. + Valid values are: EQ, GT, GTE, LT, LTE, + NE, CONTAINS, STARTS_WITH' + type: string + key: + description: The object property to use + in the condition. + type: string + tagValues: + description: The tag keys or tag key and + value pairs to use in the condition. + items: + properties: + key: + description: The object property to + use in the condition. + type: string + value: + description: The tag value. + type: string + type: object + type: array + target: + description: The type of object to apply + the condition to. The only valid value + is S3_OBJECT. + type: string + type: object + type: object + type: array + type: object + type: object + type: object + samplingPercentage: + description: The sampling depth, as a percentage, to apply when + processing objects. This value determines the percentage of + eligible objects that the job analyzes. If this value is less + than 100, Amazon Macie selects the objects to analyze at random, + up to the specified percentage, and analyzes all the data in + those objects. + type: number + scheduleFrequency: + description: The recurrence pattern for running the job. To run + the job only once, don't specify a value for this property and + set the value for the job_type property to ONE_TIME. (documented + below) + properties: + dailySchedule: + description: Specifies a daily recurrence pattern for running + the job. + type: boolean + monthlySchedule: + description: Specifies a monthly recurrence pattern for running + the job. + type: number + weeklySchedule: + description: Specifies a weekly recurrence pattern for running + the job. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + userPausedDetails: + description: If the current status of the job is USER_PAUSED, + specifies when the job was paused and when the job or job run + will expire and be cancelled if it isn't resumed. This value + is present only if the value for job-status is USER_PAUSED. + items: + properties: + jobExpiresAt: + type: string + jobImminentExpirationHealthEventArn: + type: string + jobPausedAt: + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/macie2.aws.upbound.io_findingsfilters.yaml b/package/crds/macie2.aws.upbound.io_findingsfilters.yaml index a027863861..1a8efdec1c 100644 --- a/package/crds/macie2.aws.upbound.io_findingsfilters.yaml +++ b/package/crds/macie2.aws.upbound.io_findingsfilters.yaml @@ -607,3 +607,586 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FindingsFilter is the Schema for the FindingsFilters API. Provides + a resource to manage an Amazon Macie Findings Filter. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FindingsFilterSpec defines the desired state of FindingsFilter + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + action: + description: 'The action to perform on findings that meet the + filter criteria (finding_criteria). Valid values are: ARCHIVE, + suppress (automatically archive) the findings; and, NOOP, don''t + perform any action on the findings.' + type: string + description: + description: A custom description of the filter. The description + can contain as many as 512 characters. + type: string + findingCriteria: + description: The criteria to use to filter findings. + properties: + criterion: + description: A condition that specifies the property, operator, + and one or more values to use to filter the results. (documented + below) + items: + properties: + eq: + description: The value for the property matches (equals) + the specified value. If you specify multiple values, + Amazon Macie uses OR logic to join the values. + items: + type: string + type: array + x-kubernetes-list-type: set + eqExactMatch: + description: The value for the property exclusively + matches (equals an exact match for) all the specified + values. If you specify multiple values, Amazon Macie + uses AND logic to join the values. + items: + type: string + type: array + x-kubernetes-list-type: set + field: + description: The name of the field to be evaluated. + type: string + gt: + description: The value for the property is greater than + the specified value. + type: string + gte: + description: The value for the property is greater than + or equal to the specified value. + type: string + lt: + description: The value for the property is less than + the specified value. + type: string + lte: + description: The value for the property is less than + or equal to the specified value. + type: string + neq: + description: The value for the property doesn't match + (doesn't equal) the specified value. If you specify + multiple values, Amazon Macie uses OR logic to join + the values. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + name: + description: A custom name for the filter. The name must contain + at least 3 characters and can contain as many as 64 characters. + Conflicts with name_prefix. + type: string + position: + description: The position of the filter in the list of saved filters + on the Amazon Macie console. This value also determines the + order in which the filter is applied to findings, relative to + other filters that are also applied to the findings. + type: number + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + action: + description: 'The action to perform on findings that meet the + filter criteria (finding_criteria). Valid values are: ARCHIVE, + suppress (automatically archive) the findings; and, NOOP, don''t + perform any action on the findings.' + type: string + description: + description: A custom description of the filter. The description + can contain as many as 512 characters. + type: string + findingCriteria: + description: The criteria to use to filter findings. + properties: + criterion: + description: A condition that specifies the property, operator, + and one or more values to use to filter the results. (documented + below) + items: + properties: + eq: + description: The value for the property matches (equals) + the specified value. If you specify multiple values, + Amazon Macie uses OR logic to join the values. + items: + type: string + type: array + x-kubernetes-list-type: set + eqExactMatch: + description: The value for the property exclusively + matches (equals an exact match for) all the specified + values. If you specify multiple values, Amazon Macie + uses AND logic to join the values. + items: + type: string + type: array + x-kubernetes-list-type: set + field: + description: The name of the field to be evaluated. + type: string + gt: + description: The value for the property is greater than + the specified value. + type: string + gte: + description: The value for the property is greater than + or equal to the specified value. + type: string + lt: + description: The value for the property is less than + the specified value. + type: string + lte: + description: The value for the property is less than + or equal to the specified value. + type: string + neq: + description: The value for the property doesn't match + (doesn't equal) the specified value. If you specify + multiple values, Amazon Macie uses OR logic to join + the values. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + name: + description: A custom name for the filter. The name must contain + at least 3 characters and can contain as many as 64 characters. + Conflicts with name_prefix. + type: string + position: + description: The position of the filter in the list of saved filters + on the Amazon Macie console. This value also determines the + order in which the filter is applied to findings, relative to + other filters that are also applied to the findings. + type: number + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.action is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.action) + || (has(self.initProvider) && has(self.initProvider.action))' + - message: spec.forProvider.findingCriteria is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.findingCriteria) + || (has(self.initProvider) && has(self.initProvider.findingCriteria))' + status: + description: FindingsFilterStatus defines the observed state of FindingsFilter. + properties: + atProvider: + properties: + action: + description: 'The action to perform on findings that meet the + filter criteria (finding_criteria). Valid values are: ARCHIVE, + suppress (automatically archive) the findings; and, NOOP, don''t + perform any action on the findings.' + type: string + arn: + description: The Amazon Resource Name (ARN) of the Findings Filter. + type: string + description: + description: A custom description of the filter. The description + can contain as many as 512 characters. + type: string + findingCriteria: + description: The criteria to use to filter findings. + properties: + criterion: + description: A condition that specifies the property, operator, + and one or more values to use to filter the results. (documented + below) + items: + properties: + eq: + description: The value for the property matches (equals) + the specified value. If you specify multiple values, + Amazon Macie uses OR logic to join the values. + items: + type: string + type: array + x-kubernetes-list-type: set + eqExactMatch: + description: The value for the property exclusively + matches (equals an exact match for) all the specified + values. If you specify multiple values, Amazon Macie + uses AND logic to join the values. + items: + type: string + type: array + x-kubernetes-list-type: set + field: + description: The name of the field to be evaluated. + type: string + gt: + description: The value for the property is greater than + the specified value. + type: string + gte: + description: The value for the property is greater than + or equal to the specified value. + type: string + lt: + description: The value for the property is less than + the specified value. + type: string + lte: + description: The value for the property is less than + or equal to the specified value. + type: string + neq: + description: The value for the property doesn't match + (doesn't equal) the specified value. If you specify + multiple values, Amazon Macie uses OR logic to join + the values. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + id: + description: The unique identifier (ID) of the macie Findings + Filter. + type: string + name: + description: A custom name for the filter. The name must contain + at least 3 characters and can contain as many as 64 characters. + Conflicts with name_prefix. + type: string + position: + description: The position of the filter in the list of saved filters + on the Amazon Macie console. This value also determines the + order in which the filter is applied to findings, relative to + other filters that are also applied to the findings. + type: number + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/mediaconvert.aws.upbound.io_queues.yaml b/package/crds/mediaconvert.aws.upbound.io_queues.yaml index 883a4358c4..d661c86b6c 100644 --- a/package/crds/mediaconvert.aws.upbound.io_queues.yaml +++ b/package/crds/mediaconvert.aws.upbound.io_queues.yaml @@ -453,3 +453,432 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Queue is the Schema for the Queues API. Provides an AWS Elemental + MediaConvert Queue. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: QueueSpec defines the desired state of Queue + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A description of the queue + type: string + pricingPlan: + description: Specifies whether the pricing plan for the queue + is on-demand or reserved. Valid values are ON_DEMAND or RESERVED. + Default to ON_DEMAND. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + reservationPlanSettings: + description: A detail pricing plan of the reserved queue. See + below. + properties: + commitment: + description: The length of the term of your reserved queue + pricing plan commitment. Valid value is ONE_YEAR. + type: string + renewalType: + description: Specifies whether the term of your reserved queue + pricing plan. Valid values are AUTO_RENEW or EXPIRE. + type: string + reservedSlots: + description: Specifies the number of reserved transcode slots + (RTS) for queue. + type: number + type: object + status: + description: A status of the queue. Valid values are ACTIVE or + RESERVED. Default to PAUSED. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A description of the queue + type: string + pricingPlan: + description: Specifies whether the pricing plan for the queue + is on-demand or reserved. Valid values are ON_DEMAND or RESERVED. + Default to ON_DEMAND. + type: string + reservationPlanSettings: + description: A detail pricing plan of the reserved queue. See + below. + properties: + commitment: + description: The length of the term of your reserved queue + pricing plan commitment. Valid value is ONE_YEAR. + type: string + renewalType: + description: Specifies whether the term of your reserved queue + pricing plan. Valid values are AUTO_RENEW or EXPIRE. + type: string + reservedSlots: + description: Specifies the number of reserved transcode slots + (RTS) for queue. + type: number + type: object + status: + description: A status of the queue. Valid values are ACTIVE or + RESERVED. Default to PAUSED. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: QueueStatus defines the observed state of Queue. + properties: + atProvider: + properties: + arn: + description: The Arn of the queue + type: string + description: + description: A description of the queue + type: string + id: + description: The same as name + type: string + pricingPlan: + description: Specifies whether the pricing plan for the queue + is on-demand or reserved. Valid values are ON_DEMAND or RESERVED. + Default to ON_DEMAND. + type: string + reservationPlanSettings: + description: A detail pricing plan of the reserved queue. See + below. + properties: + commitment: + description: The length of the term of your reserved queue + pricing plan commitment. Valid value is ONE_YEAR. + type: string + renewalType: + description: Specifies whether the term of your reserved queue + pricing plan. Valid values are AUTO_RENEW or EXPIRE. + type: string + reservedSlots: + description: Specifies the number of reserved transcode slots + (RTS) for queue. + type: number + type: object + status: + description: A status of the queue. Valid values are ACTIVE or + RESERVED. Default to PAUSED. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/medialive.aws.upbound.io_channels.yaml b/package/crds/medialive.aws.upbound.io_channels.yaml index e1d9030e7d..de6e9e417d 100644 --- a/package/crds/medialive.aws.upbound.io_channels.yaml +++ b/package/crds/medialive.aws.upbound.io_channels.yaml @@ -10935,3 +10935,9420 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Channel is the Schema for the Channels API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ChannelSpec defines the desired state of Channel + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cdiInputSpecification: + description: Specification of CDI inputs for this channel. See + CDI Input Specification for more details. + properties: + resolution: + description: '- Maximum CDI input resolution.' + type: string + type: object + channelClass: + description: Concise argument description. + type: string + destinations: + description: Destinations for channel. See Destinations for more + details. + items: + properties: + id: + description: User-specified id. Ths is used in an output + group or an output. + type: string + mediaPackageSettings: + description: Destination settings for a MediaPackage output; + one destination for both encoders. See Media Package Settings + for more details. + items: + properties: + channelId: + description: ID of the channel in MediaPackage that + is the destination for this output group. + type: string + type: object + type: array + multiplexSettings: + description: Destination settings for a Multiplex output; + one destination for both encoders. See Multiplex Settings + for more details. + properties: + multiplexId: + description: The ID of the Multiplex that the encoder + is providing output to. + type: string + programName: + description: The program name of the Multiplex program + that the encoder is providing output to. + type: string + type: object + settings: + description: Destination settings for a standard output; + one destination for each redundant encoder. See Settings + for more details. + items: + properties: + passwordParam: + description: Key used to extract the password from + EC2 Parameter store. + type: string + streamName: + description: Stream name RTMP destinations (URLs of + type rtmp://) + type: string + url: + description: A URL specifying a destination. + type: string + username: + description: Username for destination. + type: string + type: object + type: array + type: object + type: array + encoderSettings: + description: Encoder settings. See Encoder Settings for more details. + properties: + audioDescriptions: + description: Audio descriptions for the channel. See Audio + Descriptions for more details. + items: + properties: + audioNormalizationSettings: + description: Advanced audio normalization settings. + See Audio Normalization Settings for more details. + properties: + algorithm: + description: Audio normalization algorithm to use. + itu17701 conforms to the CALM Act specification, + itu17702 to the EBU R-128 specification. + type: string + algorithmControl: + description: Algorithm control for the audio description. + type: string + targetLkfs: + description: Target LKFS (loudness) to adjust volume + to. + type: number + type: object + audioSelectorName: + description: The name of the audio selector in the input + that MediaLive should monitor to detect silence. Select + your most important rendition. If you didn't create + an audio selector in this input, leave blank. + type: string + audioType: + description: Applies only if audioTypeControl is useConfigured. + The values for audioType are defined in ISO-IEC 13818-1. + type: string + audioTypeControl: + description: Determined how audio type is determined. + type: string + audioWatermarkSettings: + description: Settings to configure one or more solutions + that insert audio watermarks in the audio encode. + See Audio Watermark Settings for more details. + properties: + nielsenWatermarksSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + nielsenCbetSettings: + description: Used to insert watermarks of type + Nielsen CBET. See Nielsen CBET Settings for + more details. + properties: + cbetCheckDigitString: + type: string + cbetStepaside: + description: Determines the method of CBET + insertion mode when prior encoding is + detected on the same layer. + type: string + csid: + description: CBET source ID to use in the + watermark. + type: string + type: object + nielsenDistributionType: + description: Distribution types to assign to + the watermarks. Options are PROGRAM_CONTENT + and FINAL_DISTRIBUTOR. + type: string + nielsenNaesIiNwSettings: + description: Used to insert watermarks of type + Nielsen NAES, II (N2) and Nielsen NAES VI + (NW). See Nielsen NAES II NW Settings for + more details. + items: + properties: + checkDigitString: + type: string + sid: + description: The Nielsen Source ID to + include in the watermark. + type: number + type: object + type: array + type: object + type: object + codecSettings: + description: Audio codec settings. See Audio Codec Settings + for more details. + properties: + aacSettings: + description: Aac Settings. See AAC Settings for + more details. + properties: + bitrate: + description: Average bitrate in bits/second. + type: number + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + inputType: + description: Set to "broadcasterMixedAd" when + input contains pre-mixed main audio + AD (narration) + as a stereo pair. + type: string + profile: + description: AAC profile. + type: string + rateControlMode: + description: The rate control mode. + type: string + rawFormat: + description: Sets LATM/LOAS AAC output for raw + containers. + type: string + sampleRate: + description: Sample rate in Hz. + type: number + spec: + description: Use MPEG-2 AAC audio instead of + MPEG-4 AAC audio for raw or MPEG-2 Transport + Stream containers. + type: string + vbrQuality: + description: VBR Quality Level - Only used if + rateControlMode is VBR. + type: string + type: object + ac3Settings: + description: Ac3 Settings. See AC3 Settings for + more details. + properties: + bitrate: + description: Average bitrate in bits/second. + type: number + bitstreamMode: + description: Specifies the bitstream mode (bsmod) + for the emitted AC-3 stream. + type: string + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + dialnorm: + description: Sets the dialnorm of the output. + type: number + drcProfile: + description: If set to filmStandard, adds dynamic + range compression signaling to the output + bitstream as defined in the Dolby Digital + specification. + type: string + lfeFilter: + description: When set to enabled, applies a + 120Hz lowpass filter to the LFE channel prior + to encoding. + type: string + metadataControl: + description: Metadata control. + type: string + type: object + eac3AtmosSettings: + description: '- Eac3 Atmos Settings. See EAC3 Atmos + Settings' + properties: + bitrate: + description: Average bitrate in bits/second. + type: number + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + dialnorm: + description: Sets the dialnorm of the output. + type: number + drcLine: + description: Sets the Dolby dynamic range compression + profile. + type: string + drcRf: + description: Sets the profile for heavy Dolby + dynamic range compression. + type: string + heightTrim: + description: Height dimensional trim. + type: number + surroundTrim: + description: Surround dimensional trim. + type: number + type: object + eac3Settings: + description: '- Eac3 Settings. See EAC3 Settings' + properties: + attenuationControl: + description: Sets the attenuation control. + type: string + bitrate: + description: Average bitrate in bits/second. + type: number + bitstreamMode: + description: Specifies the bitstream mode (bsmod) + for the emitted AC-3 stream. + type: string + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + dcFilter: + type: string + dialnorm: + description: Sets the dialnorm of the output. + type: number + drcLine: + description: Sets the Dolby dynamic range compression + profile. + type: string + drcRf: + description: Sets the profile for heavy Dolby + dynamic range compression. + type: string + lfeControl: + type: string + lfeFilter: + description: When set to enabled, applies a + 120Hz lowpass filter to the LFE channel prior + to encoding. + type: string + loRoCenterMixLevel: + description: H264 level. + type: number + loRoSurroundMixLevel: + description: H264 level. + type: number + ltRtCenterMixLevel: + description: H264 level. + type: number + ltRtSurroundMixLevel: + description: H264 level. + type: number + metadataControl: + description: Metadata control. + type: string + passthroughControl: + type: string + phaseControl: + type: string + stereoDownmix: + type: string + surroundExMode: + type: string + surroundMode: + type: string + type: object + mp2Settings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + bitrate: + description: Average bitrate in bits/second. + type: number + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + sampleRate: + description: Sample rate in Hz. + type: number + type: object + passThroughSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + type: object + wavSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + bitDepth: + type: number + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + sampleRate: + description: Sample rate in Hz. + type: number + type: object + type: object + languageCode: + description: Selects a specific three-letter language + code from within an audio source. + type: string + languageCodeControl: + type: string + name: + description: Name of the Channel. + type: string + remixSettings: + description: Destination settings for a standard output; + one destination for each redundant encoder. See Settings + for more details. + properties: + channelMappings: + items: + properties: + inputChannelLevels: + items: + properties: + gain: + type: number + inputChannel: + type: number + type: object + type: array + outputChannel: + type: number + type: object + type: array + channelsIn: + type: number + channelsOut: + type: number + type: object + streamName: + description: Stream name RTMP destinations (URLs of + type rtmp://) + type: string + type: object + type: array + availBlanking: + description: Settings for ad avail blanking. See Avail Blanking + for more details. + properties: + availBlankingImage: + description: Blanking image to be used. See Avail Blanking + Image for more details. + properties: + passwordParam: + description: Key used to extract the password from + EC2 Parameter store. + type: string + uri: + description: – Path to a file accessible to the live + stream. + type: string + username: + description: Username for destination. + type: string + type: object + state: + description: When set to enabled, causes video, audio + and captions to be blanked when insertion metadata is + added. + type: string + type: object + captionDescriptions: + description: Caption Descriptions. See Caption Descriptions + for more details. + items: + properties: + accessibility: + description: Indicates whether the caption track implements + accessibility features such as written descriptions + of spoken dialog, music, and sounds. + type: string + captionSelectorName: + description: Specifies which input caption selector + to use as a caption source when generating output + captions. This field should match a captionSelector + name. + type: string + destinationSettings: + description: Additional settings for captions destination + that depend on the destination type. See Destination + Settings for more details. + properties: + aribDestinationSettings: + description: ARIB Destination Settings. + type: object + burnInDestinationSettings: + description: Burn In Destination Settings. See Burn + In Destination Settings for more details. + properties: + alignment: + description: justify live subtitles and center-justify + pre-recorded subtitles. All burn-in and DVB-Sub + font settings must match. + type: string + backgroundColor: + description: in and DVB-Sub font settings must + match. + type: string + backgroundOpacity: + description: in and DVB-Sub font settings must + match. + type: number + font: + description: in. File extension must be ‘ttf’ + or ‘tte’. Although the user can select output + fonts for many different types of input captions, + embedded, STL and teletext sources use a strict + grid system. Using external fonts with these + caption sources could cause unexpected display + of proportional fonts. All burn-in and DVB-Sub + font settings must match. See Font for more + details. + properties: + passwordParam: + description: Key used to extract the password + from EC2 Parameter store. + type: string + uri: + description: – Path to a file accessible + to the live stream. + type: string + username: + description: Username for destination. + type: string + type: object + fontColor: + description: in captions. This option is not + valid for source captions that are STL, 608/embedded + or teletext. These source settings are already + pre-defined by the caption stream. All burn-in + and DVB-Sub font settings must match. + type: string + fontOpacity: + description: in captions. 255 is opaque; 0 is + transparent. All burn-in and DVB-Sub font + settings must match. + type: number + fontResolution: + description: in and DVB-Sub font settings must + match. + type: number + fontSize: + description: in and DVB-Sub font settings must + match. + type: string + outlineColor: + description: defined by the caption stream. + All burn-in and DVB-Sub font settings must + match. + type: string + outlineSize: + description: defined by the caption stream. + All burn-in and DVB-Sub font settings must + match. + type: number + shadowColor: + description: in and DVB-Sub font settings must + match. + type: string + shadowOpacity: + description: in and DVB-Sub font settings must + match. + type: number + shadowXOffset: + description: 2 would result in a shadow offset + 2 pixels to the left. All burn-in and DVB-Sub + font settings must match. + type: number + shadowYOffset: + description: 2 would result in a shadow offset + 2 pixels above the text. All burn-in and DVB-Sub + font settings must match. + type: number + teletextGridControl: + description: Sub/Burn-in outputs. + type: string + xPosition: + description: in and DVB-Sub font settings must + match. + type: number + yPosition: + description: in and DVB-Sub font settings must + match. + type: number + type: object + dvbSubDestinationSettings: + description: DVB Sub Destination Settings. See DVB + Sub Destination Settings for more details. + properties: + alignment: + description: justify live subtitles and center-justify + pre-recorded subtitles. All burn-in and DVB-Sub + font settings must match. + type: string + backgroundColor: + description: in and DVB-Sub font settings must + match. + type: string + backgroundOpacity: + description: in and DVB-Sub font settings must + match. + type: number + font: + description: in. File extension must be ‘ttf’ + or ‘tte’. Although the user can select output + fonts for many different types of input captions, + embedded, STL and teletext sources use a strict + grid system. Using external fonts with these + caption sources could cause unexpected display + of proportional fonts. All burn-in and DVB-Sub + font settings must match. See Font for more + details. + properties: + passwordParam: + description: Key used to extract the password + from EC2 Parameter store. + type: string + uri: + description: – Path to a file accessible + to the live stream. + type: string + username: + description: Username for destination. + type: string + type: object + fontColor: + description: in captions. This option is not + valid for source captions that are STL, 608/embedded + or teletext. These source settings are already + pre-defined by the caption stream. All burn-in + and DVB-Sub font settings must match. + type: string + fontOpacity: + description: in captions. 255 is opaque; 0 is + transparent. All burn-in and DVB-Sub font + settings must match. + type: number + fontResolution: + description: in and DVB-Sub font settings must + match. + type: number + fontSize: + description: in and DVB-Sub font settings must + match. + type: string + outlineColor: + description: defined by the caption stream. + All burn-in and DVB-Sub font settings must + match. + type: string + outlineSize: + description: defined by the caption stream. + All burn-in and DVB-Sub font settings must + match. + type: number + shadowColor: + description: in and DVB-Sub font settings must + match. + type: string + shadowOpacity: + description: in and DVB-Sub font settings must + match. + type: number + shadowXOffset: + description: 2 would result in a shadow offset + 2 pixels to the left. All burn-in and DVB-Sub + font settings must match. + type: number + shadowYOffset: + description: 2 would result in a shadow offset + 2 pixels above the text. All burn-in and DVB-Sub + font settings must match. + type: number + teletextGridControl: + description: Sub/Burn-in outputs. + type: string + xPosition: + description: in and DVB-Sub font settings must + match. + type: number + yPosition: + description: in and DVB-Sub font settings must + match. + type: number + type: object + ebuTtDDestinationSettings: + description: EBU TT D Destination Settings. See + EBU TT D Destination Settings for more details. + properties: + copyrightHolder: + description: – Complete this field if you want + to include the name of the copyright holder + in the copyright tag in the captions metadata. + type: string + fillLineGap: + description: 'line captions). - enabled: Fill + with the captions background color (as specified + in the input captions). - disabled: Leave + the gap unfilled.' + type: string + fontFamily: + description: TT captions. Valid only if styleControl + is set to include. If you leave this field + empty, the font family is set to “monospaced”. + (If styleControl is set to exclude, the font + family is always set to “monospaced”.) You + specify only the font family. All other style + information (color, bold, position and so + on) is copied from the input captions. The + size is always set to 100% to allow the downstream + player to choose the size. - Enter a list + of font families, as a comma-separated list + of font names, in order of preference. The + name can be a font family (such as “Arial”), + or a generic font family (such as “serif”), + or “default” (to let the downstream player + choose the font). - Leave blank to set the + family to “monospace”. + type: string + styleControl: + description: 'TT captions. - include: Take the + style information (font color, font position, + and so on) from the source captions and include + that information in the font data attached + to the EBU-TT captions. This option is valid + only if the source captions are Embedded or + Teletext. - exclude: In the font data attached + to the EBU-TT captions, set the font family + to “monospaced”. Do not include any other + style information.' + type: string + type: object + embeddedDestinationSettings: + description: Embedded Destination Settings. + type: object + embeddedPlusScte20DestinationSettings: + description: Embedded Plus SCTE20 Destination Settings. + type: object + rtmpCaptionInfoDestinationSettings: + description: RTMP Caption Info Destination Settings. + type: object + scte20PlusEmbeddedDestinationSettings: + description: SCTE20 Plus Embedded Destination Settings. + type: object + scte27DestinationSettings: + description: – SCTE27 Destination Settings. + type: object + smpteTtDestinationSettings: + description: – SMPTE TT Destination Settings. + type: object + teletextDestinationSettings: + description: – Teletext Destination Settings. + type: object + ttmlDestinationSettings: + description: – TTML Destination Settings. See TTML + Destination Settings for more details. + properties: + styleControl: + description: 'TT captions. - include: Take the + style information (font color, font position, + and so on) from the source captions and include + that information in the font data attached + to the EBU-TT captions. This option is valid + only if the source captions are Embedded or + Teletext. - exclude: In the font data attached + to the EBU-TT captions, set the font family + to “monospaced”. Do not include any other + style information.' + type: string + type: object + webvttDestinationSettings: + description: WebVTT Destination Settings. See WebVTT + Destination Settings for more details. + properties: + styleControl: + description: 'TT captions. - include: Take the + style information (font color, font position, + and so on) from the source captions and include + that information in the font data attached + to the EBU-TT captions. This option is valid + only if the source captions are Embedded or + Teletext. - exclude: In the font data attached + to the EBU-TT captions, set the font family + to “monospaced”. Do not include any other + style information.' + type: string + type: object + type: object + languageCode: + description: Selects a specific three-letter language + code from within an audio source. + type: string + languageDescription: + description: Human readable information to indicate + captions available for players (eg. English, or Spanish). + type: string + name: + description: Name of the Channel. + type: string + type: object + type: array + globalConfiguration: + description: Configuration settings that apply to the event + as a whole. See Global Configuration for more details. + properties: + initialAudioGain: + description: – Value to set the initial audio gain for + the Live Event. + type: number + inputEndAction: + description: of-file). When switchAndLoopInputs is configured + the encoder will restart at the beginning of the first + input. When “none” is configured the encoder will transcode + either black, a solid color, or a user specified slate + images per the “Input Loss Behavior” configuration until + the next input switch occurs (which is controlled through + the Channel Schedule API). + type: string + inputLossBehavior: + description: Settings for system actions when input is + lost. See Input Loss Behavior for more details. + properties: + blackFrameMsec: + type: number + inputLossImageColor: + type: string + inputLossImageSlate: + properties: + passwordParam: + description: Key used to extract the password + from EC2 Parameter store. + type: string + uri: + description: – Path to a file accessible to the + live stream. + type: string + username: + description: Username for destination. + type: string + type: object + inputLossImageType: + type: string + repeatFrameMsec: + type: number + type: object + outputLockingMode: + description: MediaLive will attempt to synchronize the + output of each pipeline to the other. EPOCH_LOCKING + - MediaLive will attempt to synchronize the output of + each pipeline to the Unix epoch. + type: string + outputTimingSource: + description: – Indicates whether the rate of frames emitted + by the Live encoder should be paced by its system clock + (which optionally may be locked to another source via + NTP) or should be locked to the clock of the source + that is providing the input stream. + type: string + supportLowFramerateInputs: + description: – Adjusts video input buffer for streams + with very low video framerates. This is commonly set + to enabled for music channels with less than one video + frame per second. + type: string + type: object + motionGraphicsConfiguration: + description: Settings for motion graphics. See Motion Graphics + Configuration for more details. + properties: + motionGraphicsInsertion: + description: – Motion Graphics Insertion. + type: string + motionGraphicsSettings: + description: – Motion Graphics Settings. See Motion Graphics + Settings for more details. + properties: + htmlMotionGraphicsSettings: + description: – Html Motion Graphics Settings. + type: object + type: object + type: object + nielsenConfiguration: + description: Nielsen configuration settings. See Nielsen Configuration + for more details. + properties: + distributorId: + description: – Enter the Distributor ID assigned to your + organization by Nielsen. + type: string + nielsenPcmToId3Tagging: + description: – Enables Nielsen PCM to ID3 tagging. + type: string + type: object + outputGroups: + description: Output groups for the channel. See Output Groups + for more details. + items: + properties: + name: + description: Name of the Channel. + type: string + outputGroupSettings: + description: Settings associated with the output group. + See Output Group Settings for more details. + properties: + archiveGroupSettings: + description: Archive group settings. See Archive + Group Settings for more details. + items: + properties: + archiveCdnSettings: + description: Parameters that control the interactions + with the CDN. See Archive CDN Settings for + more details. + properties: + archiveS3Settings: + description: Archive S3 Settings. See + Archive S3 Settings for more details. + properties: + cannedAcl: + description: Specify the canned ACL + to apply to each S3 request. + type: string + type: object + type: object + destination: + description: A director and base filename + where archive files should be written. See + Destination for more details. + properties: + destinationRefId: + description: Reference ID for the destination. + type: string + type: object + rolloverInterval: + description: Number of seconds to write to + archive file before closing and starting + a new one. + type: number + type: object + type: array + frameCaptureGroupSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + destination: + description: A director and base filename where + archive files should be written. See Destination + for more details. + properties: + destinationRefId: + description: Reference ID for the destination. + type: string + type: object + frameCaptureCdnSettings: + description: Destination settings for a standard + output; one destination for each redundant + encoder. See Settings for more details. + properties: + frameCaptureS3Settings: + description: Destination settings for a + standard output; one destination for each + redundant encoder. See Settings for more + details. + properties: + cannedAcl: + description: Specify the canned ACL + to apply to each S3 request. + type: string + type: object + type: object + type: object + hlsGroupSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + adMarkers: + description: The ad marker type for this output + group. + items: + type: string + type: array + baseUrlContent: + type: string + baseUrlContent1: + type: string + baseUrlManifest: + type: string + baseUrlManifest1: + type: string + captionLanguageMappings: + items: + properties: + captionChannel: + type: number + languageCode: + description: Selects a specific three-letter + language code from within an audio source. + type: string + languageDescription: + description: Human readable information + to indicate captions available for players + (eg. English, or Spanish). + type: string + type: object + type: array + captionLanguageSetting: + type: string + clientCache: + type: string + codecSpecification: + type: string + constantIv: + type: string + destination: + description: A director and base filename where + archive files should be written. See Destination + for more details. + properties: + destinationRefId: + description: Reference ID for the destination. + type: string + type: object + directoryStructure: + type: string + discontinuityTags: + description: Key-value map of resource tags. + type: string + encryptionType: + type: string + hlsCdnSettings: + description: Destination settings for a standard + output; one destination for each redundant + encoder. See Settings for more details. + items: + properties: + hlsAkamaiSettings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + connectionRetryInterval: + description: Number of seconds to + wait before retrying connection + to the flash media server if the + connection is lost. + type: number + filecacheDuration: + type: number + httpTransferMode: + type: string + numRetries: + description: Number of retry attempts. + type: number + restartDelay: + description: Number of seconds to + wait until a restart is initiated. + type: number + salt: + type: string + token: + type: string + type: object + hlsBasicPutSettings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + connectionRetryInterval: + description: Number of seconds to + wait before retrying connection + to the flash media server if the + connection is lost. + type: number + filecacheDuration: + type: number + numRetries: + description: Number of retry attempts. + type: number + restartDelay: + description: Number of seconds to + wait until a restart is initiated. + type: number + type: object + hlsMediaStoreSettings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + connectionRetryInterval: + description: Number of seconds to + wait before retrying connection + to the flash media server if the + connection is lost. + type: number + filecacheDuration: + type: number + mediaStoreStorageClass: + type: string + numRetries: + description: Number of retry attempts. + type: number + restartDelay: + description: Number of seconds to + wait until a restart is initiated. + type: number + type: object + hlsS3Settings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + cannedAcl: + description: Specify the canned ACL + to apply to each S3 request. + type: string + type: object + hlsWebdavSettings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + connectionRetryInterval: + description: Number of seconds to + wait before retrying connection + to the flash media server if the + connection is lost. + type: number + filecacheDuration: + type: number + httpTransferMode: + type: string + numRetries: + description: Number of retry attempts. + type: number + restartDelay: + description: Number of seconds to + wait until a restart is initiated. + type: number + type: object + type: object + type: array + hlsId3SegmentTagging: + type: string + iframeOnlyPlaylists: + type: string + incompleteSegmentBehavior: + type: string + indexNSegments: + type: number + inputLossAction: + description: Controls the behavior of the RTMP + group if input becomes unavailable. + type: string + ivInManifest: + type: string + ivSource: + description: The source for the timecode that + will be associated with the events outputs. + type: string + keepSegments: + type: number + keyFormat: + type: string + keyFormatVersions: + type: string + keyProviderSettings: + description: Destination settings for a standard + output; one destination for each redundant + encoder. See Settings for more details. + properties: + staticKeySettings: + description: Destination settings for a + standard output; one destination for each + redundant encoder. See Settings for more + details. + items: + properties: + keyProviderServer: + properties: + passwordParam: + description: Key used to extract + the password from EC2 Parameter + store. + type: string + uri: + description: – Path to a file + accessible to the live stream. + type: string + username: + description: Username for destination. + type: string + type: object + staticKeyValue: + type: string + type: object + type: array + type: object + manifestCompression: + type: string + manifestDurationFormat: + type: string + minSegmentLength: + type: number + mode: + type: string + outputSelection: + type: string + programDateTime: + type: string + programDateTimeClock: + type: string + programDateTimePeriod: + type: number + redundantManifest: + type: string + segmentLength: + type: number + segmentsPerSubdirectory: + type: number + streamInfResolution: + description: '- Maximum CDI input resolution.' + type: string + timedMetadataId3Frame: + description: Indicates ID3 frame that has the + timecode. + type: string + timedMetadataId3Period: + type: number + timestampDeltaMilliseconds: + type: number + tsFileMode: + type: string + type: object + mediaPackageGroupSettings: + description: Media package group settings. See Media + Package Group Settings for more details. + properties: + destination: + description: A director and base filename where + archive files should be written. See Destination + for more details. + properties: + destinationRefId: + description: Reference ID for the destination. + type: string + type: object + type: object + msSmoothGroupSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + acquisitionPointId: + description: User-specified id. Ths is used + in an output group or an output. + type: string + audioOnlyTimecodeControl: + type: string + certificateMode: + description: Setting to allow self signed or + verified RTMP certificates. + type: string + connectionRetryInterval: + description: Number of seconds to wait before + retrying connection to the flash media server + if the connection is lost. + type: number + destination: + description: A director and base filename where + archive files should be written. See Destination + for more details. + properties: + destinationRefId: + description: Reference ID for the destination. + type: string + type: object + eventId: + description: User-specified id. Ths is used + in an output group or an output. + type: string + eventIdMode: + type: string + eventStopBehavior: + type: string + filecacheDuration: + type: number + fragmentLength: + type: number + inputLossAction: + description: Controls the behavior of the RTMP + group if input becomes unavailable. + type: string + numRetries: + description: Number of retry attempts. + type: number + restartDelay: + description: Number of seconds to wait until + a restart is initiated. + type: number + segmentationMode: + type: string + sendDelayMs: + type: number + sparseTrackType: + type: string + streamManifestBehavior: + type: string + timestampOffset: + type: string + timestampOffsetMode: + type: string + type: object + multiplexGroupSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + type: object + rtmpGroupSettings: + description: RTMP group settings. See RTMP Group + Settings for more details. + properties: + adMarkers: + description: The ad marker type for this output + group. + items: + type: string + type: array + authenticationScheme: + description: Authentication scheme to use when + connecting with CDN. + type: string + cacheFullBehavior: + description: Controls behavior when content + cache fills up. + type: string + cacheLength: + description: Cache length in seconds, is used + to calculate buffer size. + type: number + captionData: + description: Controls the types of data that + passes to onCaptionInfo outputs. + type: string + inputLossAction: + description: Controls the behavior of the RTMP + group if input becomes unavailable. + type: string + restartDelay: + description: Number of seconds to wait until + a restart is initiated. + type: number + type: object + udpGroupSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + inputLossAction: + description: Controls the behavior of the RTMP + group if input becomes unavailable. + type: string + timedMetadataId3Frame: + description: Indicates ID3 frame that has the + timecode. + type: string + timedMetadataId3Period: + type: number + type: object + type: object + outputs: + description: List of outputs. See Outputs for more details. + items: + properties: + audioDescriptionNames: + description: The names of the audio descriptions + used as audio sources for the output. + items: + type: string + type: array + x-kubernetes-list-type: set + captionDescriptionNames: + description: The names of the caption descriptions + used as caption sources for the output. + items: + type: string + type: array + x-kubernetes-list-type: set + outputName: + description: The name used to identify an output. + type: string + outputSettings: + description: Settings for output. See Output Settings + for more details. + properties: + archiveOutputSettings: + description: Archive output settings. See + Archive Output Settings for more details. + properties: + containerSettings: + description: Settings specific to the + container type of the file. See Container + Settings for more details. + properties: + m2tsSettings: + description: M2TS Settings. See M2TS + Settings for more details. + properties: + absentInputAudioBehavior: + type: string + arib: + type: string + aribCaptionsPid: + description: Selects a specific + PID from within a source. + type: string + aribCaptionsPidControl: + type: string + audioBufferModel: + type: string + audioFramesPerPes: + type: number + audioPids: + type: string + audioStreamType: + type: string + bitrate: + description: Average bitrate in + bits/second. + type: number + bufferModel: + type: string + ccDescriptor: + type: string + dvbNitSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + networkId: + description: User-specified + id. Ths is used in an output + group or an output. + type: number + networkName: + description: Name of the Channel. + type: string + repInterval: + type: number + type: object + dvbSdtSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + outputSdt: + type: string + repInterval: + type: number + serviceName: + description: Name of the Channel. + type: string + serviceProviderName: + description: Name of the Channel. + type: string + type: object + dvbSubPids: + type: string + dvbTdtSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + repInterval: + type: number + type: object + dvbTeletextPid: + description: Selects a specific + PID from within a source. + type: string + ebif: + type: string + ebpAudioInterval: + type: string + ebpLookaheadMs: + type: number + ebpPlacement: + type: string + ecmPid: + description: Selects a specific + PID from within a source. + type: string + esRateInPes: + type: string + etvPlatformPid: + description: Selects a specific + PID from within a source. + type: string + etvSignalPid: + description: Selects a specific + PID from within a source. + type: string + fragmentTime: + type: number + klv: + type: string + klvDataPids: + type: string + nielsenId3Behavior: + type: string + nullPacketBitrate: + description: Average bitrate in + bits/second. + type: number + patInterval: + type: number + pcrControl: + type: string + pcrPeriod: + type: number + pcrPid: + description: Selects a specific + PID from within a source. + type: string + pmtInterval: + type: number + pmtPid: + description: Selects a specific + PID from within a source. + type: string + programNum: + type: number + rateMode: + type: string + scte27Pids: + type: string + scte35Control: + type: string + scte35Pid: + description: PID from which to + read SCTE-35 messages. + type: string + segmentationMarkers: + type: string + segmentationStyle: + type: string + segmentationTime: + type: number + timedMetadataBehavior: + type: string + timedMetadataPid: + description: Selects a specific + PID from within a source. + type: string + transportStreamId: + description: User-specified id. + Ths is used in an output group + or an output. + type: number + videoPid: + description: Selects a specific + PID from within a source. + type: string + type: object + rawSettings: + description: Raw Settings. This can + be set as an empty block. + type: object + type: object + extension: + description: Output file extension. + type: string + nameModifier: + description: String concatenated to the + end of the destination filename. Required + for multiple outputs of the same type. + type: string + type: object + frameCaptureOutputSettings: + description: Settings for output. See Output + Settings for more details. + properties: + nameModifier: + description: String concatenated to the + end of the destination filename. Required + for multiple outputs of the same type. + type: string + type: object + hlsOutputSettings: + description: Settings for output. See Output + Settings for more details. + properties: + h265PackagingType: + type: string + hlsSettings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + audioOnlyHlsSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. See + Settings for more details. + properties: + audioGroupId: + description: 'Specifies the GROUP-ID + in the #EXT-X-MEDIA tag of the + target HLS audio rendition.' + type: string + audioOnlyImage: + properties: + passwordParam: + description: Key used to extract + the password from EC2 Parameter + store. + type: string + uri: + description: – Path to a + file accessible to the live + stream. + type: string + username: + description: Username for + destination. + type: string + type: object + audioTrackType: + type: string + segmentType: + type: string + type: object + fmp4HlsSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. See + Settings for more details. + properties: + audioRenditionSets: + type: string + nielsenId3Behavior: + type: string + timedMetadataBehavior: + type: string + type: object + frameCaptureHlsSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. See + Settings for more details. + type: object + standardHlsSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. See + Settings for more details. + properties: + audioRenditionSets: + type: string + m3u8Settings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + audioFramesPerPes: + type: number + audioPids: + type: string + ecmPid: + description: Selects a specific + PID from within a source. + type: string + nielsenId3Behavior: + type: string + patInterval: + type: number + pcrControl: + type: string + pcrPeriod: + type: number + pcrPid: + description: Selects a specific + PID from within a source. + type: string + pmtInterval: + type: number + pmtPid: + description: Selects a specific + PID from within a source. + type: string + programNum: + type: number + scte35Behavior: + type: string + scte35Pid: + description: PID from which + to read SCTE-35 messages. + type: string + timedMetadataBehavior: + type: string + timedMetadataPid: + description: Selects a specific + PID from within a source. + type: string + transportStreamId: + description: User-specified + id. Ths is used in an output + group or an output. + type: number + videoPid: + description: Selects a specific + PID from within a source. + type: string + type: object + type: object + type: object + nameModifier: + description: String concatenated to the + end of the destination filename. Required + for multiple outputs of the same type. + type: string + segmentModifier: + type: string + type: object + mediaPackageOutputSettings: + description: Media package output settings. + This can be set as an empty block. + type: object + msSmoothOutputSettings: + description: Settings for output. See Output + Settings for more details. + properties: + h265PackagingType: + type: string + nameModifier: + description: String concatenated to the + end of the destination filename. Required + for multiple outputs of the same type. + type: string + type: object + multiplexOutputSettings: + description: Multiplex output settings. See + Multiplex Output Settings for more details. + properties: + destination: + description: A director and base filename + where archive files should be written. + See Destination for more details. + properties: + destinationRefId: + description: Reference ID for the + destination. + type: string + type: object + type: object + rtmpOutputSettings: + description: RTMP output settings. See RTMP + Output Settings for more details. + properties: + certificateMode: + description: Setting to allow self signed + or verified RTMP certificates. + type: string + connectionRetryInterval: + description: Number of seconds to wait + before retrying connection to the flash + media server if the connection is lost. + type: number + destination: + description: A director and base filename + where archive files should be written. + See Destination for more details. + properties: + destinationRefId: + description: Reference ID for the + destination. + type: string + type: object + numRetries: + description: Number of retry attempts. + type: number + type: object + udpOutputSettings: + description: UDP output settings. See UDP + Output Settings for more details. + properties: + bufferMsec: + description: UDP output buffering in milliseconds. + type: number + containerSettings: + description: Settings specific to the + container type of the file. See Container + Settings for more details. + properties: + m2tsSettings: + description: M2TS Settings. See M2TS + Settings for more details. + properties: + absentInputAudioBehavior: + type: string + arib: + type: string + aribCaptionsPid: + description: Selects a specific + PID from within a source. + type: string + aribCaptionsPidControl: + type: string + audioBufferModel: + type: string + audioFramesPerPes: + type: number + audioPids: + type: string + audioStreamType: + type: string + bitrate: + description: Average bitrate in + bits/second. + type: number + bufferModel: + type: string + ccDescriptor: + type: string + dvbNitSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + networkId: + description: User-specified + id. Ths is used in an output + group or an output. + type: number + networkName: + description: Name of the Channel. + type: string + repInterval: + type: number + type: object + dvbSdtSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + outputSdt: + type: string + repInterval: + type: number + serviceName: + description: Name of the Channel. + type: string + serviceProviderName: + description: Name of the Channel. + type: string + type: object + dvbSubPids: + type: string + dvbTdtSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + repInterval: + type: number + type: object + dvbTeletextPid: + description: Selects a specific + PID from within a source. + type: string + ebif: + type: string + ebpAudioInterval: + type: string + ebpLookaheadMs: + type: number + ebpPlacement: + type: string + ecmPid: + description: Selects a specific + PID from within a source. + type: string + esRateInPes: + type: string + etvPlatformPid: + description: Selects a specific + PID from within a source. + type: string + etvSignalPid: + description: Selects a specific + PID from within a source. + type: string + fragmentTime: + type: number + klv: + type: string + klvDataPids: + type: string + nielsenId3Behavior: + type: string + nullPacketBitrate: + description: Average bitrate in + bits/second. + type: number + patInterval: + type: number + pcrControl: + type: string + pcrPeriod: + type: number + pcrPid: + description: Selects a specific + PID from within a source. + type: string + pmtInterval: + type: number + pmtPid: + description: Selects a specific + PID from within a source. + type: string + programNum: + type: number + rateMode: + type: string + scte27Pids: + type: string + scte35Control: + type: string + scte35Pid: + description: PID from which to + read SCTE-35 messages. + type: string + segmentationMarkers: + type: string + segmentationStyle: + type: string + segmentationTime: + type: number + timedMetadataBehavior: + type: string + timedMetadataPid: + description: Selects a specific + PID from within a source. + type: string + transportStreamId: + description: User-specified id. + Ths is used in an output group + or an output. + type: number + videoPid: + description: Selects a specific + PID from within a source. + type: string + type: object + type: object + destination: + description: A director and base filename + where archive files should be written. + See Destination for more details. + properties: + destinationRefId: + description: Reference ID for the + destination. + type: string + type: object + fecOutputSettings: + description: Settings for output. See + Output Settings for more details. + properties: + columnDepth: + description: The height of the FEC + protection matrix. + type: number + includeFec: + description: Enables column only or + column and row based FEC. + type: string + rowLength: + description: The width of the FEC + protection matrix. + type: number + type: object + type: object + type: object + videoDescriptionName: + description: The name of the video description + used as video source for the output. + type: string + type: object + type: array + type: object + type: array + timecodeConfig: + description: Contains settings used to acquire and adjust + timecode information from inputs. See Timecode Config for + more details. + properties: + source: + description: The source for the timecode that will be + associated with the events outputs. + type: string + syncThreshold: + description: Threshold in frames beyond which output timecode + is resynchronized to the input timecode. + type: number + type: object + videoDescriptions: + description: Video Descriptions. See Video Descriptions for + more details. + items: + properties: + codecSettings: + description: Audio codec settings. See Audio Codec Settings + for more details. + properties: + frameCaptureSettings: + description: Frame capture settings. See Frame Capture + Settings for more details. + properties: + captureInterval: + description: The frequency at which to capture + frames for inclusion in the output. + type: number + captureIntervalUnits: + description: Unit for the frame capture interval. + type: string + type: object + h264Settings: + description: H264 settings. See H264 Settings for + more details. + properties: + adaptiveQuantization: + description: Enables or disables adaptive quantization. + type: string + afdSignaling: + description: Indicates that AFD values will + be written into the output stream. + type: string + bitrate: + description: Average bitrate in bits/second. + type: number + bufFillPct: + type: number + bufSize: + description: Size of buffer in bits. + type: number + colorMetadata: + description: Includes color space metadata in + the output. + type: string + entropyEncoding: + description: Entropy encoding mode. + type: string + filterSettings: + description: Filters to apply to an encode. + See H264 Filter Settings for more details. + properties: + temporalFilterSettings: + description: Temporal filter settings. See + Temporal Filter Settings + properties: + postFilterSharpening: + description: Post filter sharpening. + type: string + strength: + description: Filter strength. + type: string + type: object + type: object + fixedAfd: + description: Four bit AFD value to write on + all frames of video in the output stream. + type: string + flickerAq: + type: string + forceFieldPictures: + description: Controls whether coding is performed + on a field basis or on a frame basis. + type: string + framerateControl: + description: Indicates how the output video + frame rate is specified. + type: string + framerateDenominator: + description: Framerate denominator. + type: number + framerateNumerator: + description: Framerate numerator. + type: number + gopBReference: + description: GOP-B reference. + type: string + gopClosedCadence: + description: Frequency of closed GOPs. + type: number + gopNumBFrames: + description: Number of B-frames between reference + frames. + type: number + gopSize: + description: GOP size in units of either frames + of seconds per gop_size_units. + type: number + gopSizeUnits: + description: Indicates if the gop_size is specified + in frames or seconds. + type: string + level: + description: H264 level. + type: string + lookAheadRateControl: + description: Amount of lookahead. + type: string + maxBitrate: + description: Set the maximum bitrate in order + to accommodate expected spikes in the complexity + of the video. + type: number + minIInterval: + type: number + numRefFrames: + description: Number of reference frames to use. + type: number + parControl: + description: Indicates how the output pixel + aspect ratio is specified. + type: string + parDenominator: + description: Pixel Aspect Ratio denominator. + type: number + parNumerator: + description: Pixel Aspect Ratio numerator. + type: number + profile: + description: AAC profile. + type: string + qualityLevel: + description: Quality level. + type: string + qvbrQualityLevel: + description: Controls the target quality for + the video encode. + type: number + rateControlMode: + description: The rate control mode. + type: string + scanType: + description: Sets the scan type of the output. + type: string + sceneChangeDetect: + description: Scene change detection. + type: string + slices: + description: Number of slices per picture. + type: number + softness: + description: Softness. + type: number + spatialAq: + description: Makes adjustments within each frame + based on spatial variation of content complexity. + type: string + subgopLength: + description: Subgop length. + type: string + syntax: + description: Produces a bitstream compliant + with SMPTE RP-2027. + type: string + temporalAq: + description: Makes adjustments within each frame + based on temporal variation of content complexity. + type: string + timecodeInsertion: + description: Determines how timecodes should + be inserted into the video elementary stream. + type: string + type: object + h265Settings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + adaptiveQuantization: + description: Enables or disables adaptive quantization. + type: string + afdSignaling: + description: Indicates that AFD values will + be written into the output stream. + type: string + alternativeTransferFunction: + description: Whether or not EML should insert + an Alternative Transfer Function SEI message. + type: string + bitrate: + description: Average bitrate in bits/second. + type: number + bufSize: + description: Size of buffer in bits. + type: number + colorMetadata: + description: Includes color space metadata in + the output. + type: string + colorSpaceSettings: + description: Define the color metadata for the + output. H265 Color Space Settings for more + details. + properties: + colorSpacePassthroughSettings: + description: Sets the colorspace metadata + to be passed through. + type: object + dolbyVision81Settings: + description: Set the colorspace to Dolby + Vision81. + type: object + hdr10Settings: + description: Set the colorspace to be HDR10. + See H265 HDR10 Settings for more details. + properties: + maxCll: + description: Sets the MaxCLL value for + HDR10. + type: number + maxFall: + description: Sets the MaxFALL value + for HDR10. + type: number + type: object + rec601Settings: + description: Set the colorspace to Rec. + 601. + type: object + rec709Settings: + description: Set the colorspace to Rec. + 709. + type: object + type: object + filterSettings: + description: Filters to apply to an encode. + See H264 Filter Settings for more details. + properties: + temporalFilterSettings: + description: Temporal filter settings. See + Temporal Filter Settings + properties: + postFilterSharpening: + description: Post filter sharpening. + type: string + strength: + description: Filter strength. + type: string + type: object + type: object + fixedAfd: + description: Four bit AFD value to write on + all frames of video in the output stream. + type: string + flickerAq: + type: string + framerateDenominator: + description: Framerate denominator. + type: number + framerateNumerator: + description: Framerate numerator. + type: number + gopClosedCadence: + description: Frequency of closed GOPs. + type: number + gopSize: + description: GOP size in units of either frames + of seconds per gop_size_units. + type: number + gopSizeUnits: + description: Indicates if the gop_size is specified + in frames or seconds. + type: string + level: + description: H264 level. + type: string + lookAheadRateControl: + description: Amount of lookahead. + type: string + maxBitrate: + description: Set the maximum bitrate in order + to accommodate expected spikes in the complexity + of the video. + type: number + minIInterval: + type: number + parDenominator: + description: Pixel Aspect Ratio denominator. + type: number + parNumerator: + description: Pixel Aspect Ratio numerator. + type: number + profile: + description: AAC profile. + type: string + qvbrQualityLevel: + description: Controls the target quality for + the video encode. + type: number + rateControlMode: + description: The rate control mode. + type: string + scanType: + description: Sets the scan type of the output. + type: string + sceneChangeDetect: + description: Scene change detection. + type: string + slices: + description: Number of slices per picture. + type: number + tier: + description: Set the H265 tier in the output. + type: string + timecodeBurninSettings: + description: Apply a burned in timecode. See + H265 Timecode Burnin Settings for more details. + properties: + prefix: + description: Set a prefix on the burned + in timecode. + type: string + timecodeBurninFontSize: + description: Sets the size of the burned + in timecode. + type: string + timecodeBurninPosition: + description: Sets the position of the burned + in timecode. + type: string + type: object + timecodeInsertion: + description: Determines how timecodes should + be inserted into the video elementary stream. + type: string + type: object + type: object + height: + description: See the description in left_offset. For + height, specify the entire height of the rectangle + as a percentage of the underlying frame height. For + example, "80" means the rectangle height is 80% of + the underlying frame height. The top_offset and rectangle_height + must add up to 100% or less. This field corresponds + to tts:extent - Y in the TTML standard. + type: number + name: + description: Name of the Channel. + type: string + respondToAfd: + description: Indicate how to respond to the AFD values + that might be in the input video. + type: string + scalingBehavior: + description: Behavior on how to scale. + type: string + sharpness: + description: Changes the strength of the anti-alias + filter used for scaling. + type: number + width: + description: See the description in left_offset. For + width, specify the entire width of the rectangle as + a percentage of the underlying frame width. For example, + "80" means the rectangle width is 80% of the underlying + frame width. The left_offset and rectangle_width must + add up to 100% or less. This field corresponds to + tts:extent - X in the TTML standard. + type: number + type: object + type: array + type: object + inputAttachments: + description: Input attachments for the channel. See Input Attachments + for more details. + items: + properties: + automaticInputFailoverSettings: + description: User-specified settings for defining what the + conditions are for declaring the input unhealthy and failing + over to a different input. See Automatic Input Failover + Settings for more details. + properties: + errorClearTimeMsec: + description: This clear time defines the requirement + a recovered input must meet to be considered healthy. + The input must have no failover conditions for this + length of time. Enter a time in milliseconds. This + value is particularly important if the input_preference + for the failover pair is set to PRIMARY_INPUT_PREFERRED, + because after this time, MediaLive will switch back + to the primary input. + type: number + failoverCondition: + description: A list of failover conditions. If any of + these conditions occur, MediaLive will perform a failover + to the other input. See Failover Condition Block for + more details. + items: + properties: + failoverConditionSettings: + description: Failover condition type-specific + settings. See Failover Condition Settings for + more details. + properties: + audioSilenceSettings: + description: MediaLive will perform a failover + if the specified audio selector is silent + for the specified period. See Audio Silence + Failover Settings for more details. + properties: + audioSelectorName: + description: The name of the audio selector + in the input that MediaLive should monitor + to detect silence. Select your most + important rendition. If you didn't create + an audio selector in this input, leave + blank. + type: string + audioSilenceThresholdMsec: + description: The amount of time (in milliseconds) + that the active input must be silent + before automatic input failover occurs. + Silence is defined as audio loss or + audio quieter than -50 dBFS. + type: number + type: object + inputLossSettings: + description: MediaLive will perform a failover + if content is not detected in this input + for the specified period. See Input Loss + Failover Settings for more details. + properties: + inputLossThresholdMsec: + description: The amount of time (in milliseconds) + that no input is detected. After that + time, an input failover will occur. + type: number + type: object + videoBlackSettings: + description: MediaLive will perform a failover + if content is considered black for the specified + period. See Video Black Failover Settings + for more details. + properties: + blackDetectThreshold: + description: 'A value used in calculating + the threshold below which MediaLive + considers a pixel to be ''black''. For + the input to be considered black, every + pixel in a frame must be below this + threshold. The threshold is calculated + as a percentage (expressed as a decimal) + of white. Therefore .1 means 10% white + (or 90% black). Note how the formula + works for any color depth. For example, + if you set this field to 0.1 in 10-bit + color depth: (10230.1=102.3), which + means a pixel value of 102 or less is + ''black''. If you set this field to + .1 in an 8-bit color depth: (2550.1=25.5), + which means a pixel value of 25 or less + is ''black''. The range is 0.0 to 1.0, + with any number of decimal places.' + type: number + videoBlackThresholdMsec: + description: The amount of time (in milliseconds) + that the active input must be black + before automatic input failover occurs. + type: number + type: object + type: object + type: object + type: array + inputPreference: + description: Input preference when deciding which input + to make active when a previously failed input has + recovered. + type: string + secondaryInputId: + description: The input ID of the secondary input in + the automatic input failover pair. + type: string + type: object + inputAttachmentName: + description: User-specified name for the attachment. + type: string + inputId: + description: The ID of the input. + type: string + inputIdRef: + description: Reference to a Input in medialive to populate + inputId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + inputIdSelector: + description: Selector for a Input in medialive to populate + inputId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + inputSettings: + description: Settings of an input. See Input Settings for + more details. + properties: + audioSelector: + items: + properties: + name: + description: Name of the Channel. + type: string + selectorSettings: + description: The audio selector settings. See + Audio Selector Settings for more details. + properties: + audioHlsRenditionSelection: + description: Audio HLS Rendition Selection. + See Audio HLS Rendition Selection for more + details. + properties: + groupId: + description: 'Specifies the GROUP-ID in + the #EXT-X-MEDIA tag of the target HLS + audio rendition.' + type: string + name: + description: Name of the Channel. + type: string + type: object + audioLanguageSelection: + description: Audio Language Selection. See + Audio Language Selection for more details. + properties: + languageCode: + description: Selects a specific three-letter + language code from within an audio source. + type: string + languageSelectionPolicy: + description: When set to “strict”, the + transport stream demux strictly identifies + audio streams by their language descriptor. + If a PMT update occurs such that an + audio stream matching the initially + selected language is no longer present + then mute will be encoded until the + language returns. If “loose”, then on + a PMT update the demux will choose another + audio stream in the program with the + same stream type if it can’t find one + with the same language. + type: string + type: object + audioPidSelection: + description: Audio Pid Selection. See Audio + PID Selection for more details. + properties: + pid: + description: Selects a specific PID from + within a source. + type: number + type: object + audioTrackSelection: + description: Audio Track Selection. See Audio + Track Selection for more details. + properties: + dolbyEDecode: + description: Configure decoding options + for Dolby E streams - these should be + Dolby E frames carried in PCM streams + tagged with SMPTE-337. See Dolby E Decode + for more details. + properties: + programSelection: + description: Applies only to Dolby + E. Enter the program ID (according + to the metadata in the audio) of + the Dolby E program to extract from + the specified track. One program + extracted per audio selector. To + select multiple programs, create + multiple selectors with the same + Track and different Program numbers. + “All channels” means to ignore the + program IDs and include all the + channels in this selector; useful + if metadata is known to be incorrect. + type: string + type: object + tracks: + description: Selects one or more unique + audio tracks from within a source. See + Audio Tracks for more details. + items: + properties: + track: + description: 1-based integer value + that maps to a specific audio + track. + type: number + type: object + type: array + type: object + type: object + type: object + type: array + captionSelector: + items: + properties: + languageCode: + description: Selects a specific three-letter language + code from within an audio source. + type: string + name: + description: Name of the Channel. + type: string + selectorSettings: + description: The audio selector settings. See + Audio Selector Settings for more details. + properties: + ancillarySourceSettings: + description: Ancillary Source Settings. See + Ancillary Source Settings for more details. + properties: + sourceAncillaryChannelNumber: + description: Specifies the number (1 to + 4) of the captions channel you want + to extract from the ancillary captions. + If you plan to convert the ancillary + captions to another format, complete + this field. If you plan to choose Embedded + as the captions destination in the output + (to pass through all the channels in + the ancillary captions), leave this + field blank because MediaLive ignores + the field. + type: number + type: object + aribSourceSettings: + description: ARIB Source Settings. + type: object + dvbSubSourceSettings: + description: DVB Sub Source Settings. See + DVB Sub Source Settings for more details. + properties: + ocrLanguage: + description: If you will configure a WebVTT + caption description that references + this caption selector, use this field + to provide the language to consider + when translating the image-based source + to text. + type: string + pid: + description: Selects a specific PID from + within a source. + type: number + type: object + embeddedSourceSettings: + description: Embedded Source Settings. See + Embedded Source Settings for more details. + properties: + convert608To708: + description: If upconvert, 608 data is + both passed through via the “608 compatibility + bytes” fields of the 708 wrapper as + well as translated into 708. 708 data + present in the source content will be + discarded. + type: string + scte20Detection: + description: Set to “auto” to handle streams + with intermittent and/or non-aligned + SCTE-20 and Embedded captions. + type: string + source608ChannelNumber: + description: Specifies the 608/708 channel + number within the video track from which + to extract captions. Unused for passthrough. + type: number + type: object + scte20SourceSettings: + description: SCTE20 Source Settings. See SCTE + 20 Source Settings for more details. + properties: + convert608To708: + description: If upconvert, 608 data is + both passed through via the “608 compatibility + bytes” fields of the 708 wrapper as + well as translated into 708. 708 data + present in the source content will be + discarded. + type: string + source608ChannelNumber: + description: Specifies the 608/708 channel + number within the video track from which + to extract captions. Unused for passthrough. + type: number + type: object + scte27SourceSettings: + description: SCTE27 Source Settings. See SCTE + 27 Source Settings for more details. + properties: + ocrLanguage: + description: If you will configure a WebVTT + caption description that references + this caption selector, use this field + to provide the language to consider + when translating the image-based source + to text. + type: string + pid: + description: Selects a specific PID from + within a source. + type: number + type: object + teletextSourceSettings: + description: Teletext Source Settings. See + Teletext Source Settings for more details. + properties: + outputRectangle: + description: Optionally defines a region + where TTML style captions will be displayed. + See Caption Rectangle for more details. + properties: + height: + description: See the description in + left_offset. For height, specify + the entire height of the rectangle + as a percentage of the underlying + frame height. For example, "80" + means the rectangle height is 80% + of the underlying frame height. + The top_offset and rectangle_height + must add up to 100% or less. This + field corresponds to tts:extent + - Y in the TTML standard. + type: number + leftOffset: + description: Applies only if you plan + to convert these source captions + to EBU-TT-D or TTML in an output. + (Make sure to leave the default + if you don’t have either of these + formats in the output.) You can + define a display rectangle for the + captions that is smaller than the + underlying video frame. You define + the rectangle by specifying the + position of the left edge, top edge, + bottom edge, and right edge of the + rectangle, all within the underlying + video frame. The units for the measurements + are percentages. If you specify + a value for one of these fields, + you must specify a value for all + of them. For leftOffset, specify + the position of the left edge of + the rectangle, as a percentage of + the underlying frame width, and + relative to the left edge of the + frame. For example, "10" means the + measurement is 10% of the underlying + frame width. The rectangle left + edge starts at that position from + the left edge of the frame. This + field corresponds to tts:origin + - X in the TTML standard. + type: number + topOffset: + description: See the description in + left_offset. For top_offset, specify + the position of the top edge of + the rectangle, as a percentage of + the underlying frame height, and + relative to the top edge of the + frame. For example, "10" means the + measurement is 10% of the underlying + frame height. The rectangle top + edge starts at that position from + the top edge of the frame. This + field corresponds to tts:origin + - Y in the TTML standard. + type: number + width: + description: See the description in + left_offset. For width, specify + the entire width of the rectangle + as a percentage of the underlying + frame width. For example, "80" means + the rectangle width is 80% of the + underlying frame width. The left_offset + and rectangle_width must add up + to 100% or less. This field corresponds + to tts:extent - X in the TTML standard. + type: number + type: object + pageNumber: + description: Specifies the teletext page + number within the data stream from which + to extract captions. Range of 0x100 + (256) to 0x8FF (2303). Unused for passthrough. + Should be specified as a hexadecimal + string with no “0x” prefix. + type: string + type: object + type: object + type: object + type: array + deblockFilter: + description: Enable or disable the deblock filter when + filtering. + type: string + denoiseFilter: + description: Enable or disable the denoise filter when + filtering. + type: string + filterStrength: + description: Adjusts the magnitude of filtering from + 1 (minimal) to 5 (strongest). + type: number + inputFilter: + description: Turns on the filter for the input. + type: string + networkInputSettings: + description: Input settings. See Network Input Settings + for more details. + properties: + hlsInputSettings: + description: Specifies HLS input settings when the + uri is for a HLS manifest. See HLS Input Settings + for more details. + properties: + bandwidth: + description: The bitrate is specified in bits + per second, as in an HLS manifest. + type: number + bufferSegments: + description: Buffer segments. + type: number + retries: + description: The number of consecutive times + that attempts to read a manifest or segment + must fail before the input is considered unavailable. + type: number + retryInterval: + description: The number of seconds between retries + when an attempt to read a manifest or segment + fails. + type: number + scte35Source: + description: The source for the timecode that + will be associated with the events outputs. + type: string + type: object + serverValidation: + description: Check HTTPS server certificates. + type: string + type: object + scte35Pid: + description: PID from which to read SCTE-35 messages. + type: number + smpte2038DataPreference: + description: Specifies whether to extract applicable + ancillary data from a SMPTE-2038 source in the input. + type: string + sourceEndBehavior: + description: Loop input if it is a file. + type: string + videoSelector: + properties: + colorSpace: + type: string + colorSpaceUsage: + type: string + type: object + type: object + type: object + type: array + inputSpecification: + description: Specification of network and file inputs for the + channel. + properties: + codec: + type: string + inputResolution: + description: '- Maximum CDI input resolution.' + type: string + maximumBitrate: + description: Average bitrate in bits/second. + type: string + type: object + logLevel: + description: The log level to write to Cloudwatch logs. + type: string + maintenance: + description: Maintenance settings for this channel. See Maintenance + for more details. + properties: + maintenanceDay: + description: The day of the week to use for maintenance. + type: string + maintenanceStartTime: + description: The hour maintenance will start. + type: string + type: object + name: + description: Name of the Channel. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleArn: + description: Concise argument description. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + startChannel: + description: 'Whether to start/stop channel. Default: false' + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpc: + description: Settings for the VPC outputs. See VPC for more details. + properties: + publicAddressAllocationIds: + description: List of public address allocation ids to associate + with ENIs that will be created in Output VPC. Must specify + one for SINGLE_PIPELINE, two for STANDARD channels. + items: + type: string + type: array + securityGroupIds: + description: A list of up to 5 EC2 VPC security group IDs + to attach to the Output VPC network interfaces. If none + are specified then the VPC default security group will be + used. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of VPC subnet IDs from the same VPC. If + STANDARD channel, subnet IDs must be mapped to two unique + availability zones (AZ). + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + cdiInputSpecification: + description: Specification of CDI inputs for this channel. See + CDI Input Specification for more details. + properties: + resolution: + description: '- Maximum CDI input resolution.' + type: string + type: object + channelClass: + description: Concise argument description. + type: string + destinations: + description: Destinations for channel. See Destinations for more + details. + items: + properties: + id: + description: User-specified id. Ths is used in an output + group or an output. + type: string + mediaPackageSettings: + description: Destination settings for a MediaPackage output; + one destination for both encoders. See Media Package Settings + for more details. + items: + properties: + channelId: + description: ID of the channel in MediaPackage that + is the destination for this output group. + type: string + type: object + type: array + multiplexSettings: + description: Destination settings for a Multiplex output; + one destination for both encoders. See Multiplex Settings + for more details. + properties: + multiplexId: + description: The ID of the Multiplex that the encoder + is providing output to. + type: string + programName: + description: The program name of the Multiplex program + that the encoder is providing output to. + type: string + type: object + settings: + description: Destination settings for a standard output; + one destination for each redundant encoder. See Settings + for more details. + items: + properties: + passwordParam: + description: Key used to extract the password from + EC2 Parameter store. + type: string + streamName: + description: Stream name RTMP destinations (URLs of + type rtmp://) + type: string + url: + description: A URL specifying a destination. + type: string + username: + description: Username for destination. + type: string + type: object + type: array + type: object + type: array + encoderSettings: + description: Encoder settings. See Encoder Settings for more details. + properties: + audioDescriptions: + description: Audio descriptions for the channel. See Audio + Descriptions for more details. + items: + properties: + audioNormalizationSettings: + description: Advanced audio normalization settings. + See Audio Normalization Settings for more details. + properties: + algorithm: + description: Audio normalization algorithm to use. + itu17701 conforms to the CALM Act specification, + itu17702 to the EBU R-128 specification. + type: string + algorithmControl: + description: Algorithm control for the audio description. + type: string + targetLkfs: + description: Target LKFS (loudness) to adjust volume + to. + type: number + type: object + audioSelectorName: + description: The name of the audio selector in the input + that MediaLive should monitor to detect silence. Select + your most important rendition. If you didn't create + an audio selector in this input, leave blank. + type: string + audioType: + description: Applies only if audioTypeControl is useConfigured. + The values for audioType are defined in ISO-IEC 13818-1. + type: string + audioTypeControl: + description: Determined how audio type is determined. + type: string + audioWatermarkSettings: + description: Settings to configure one or more solutions + that insert audio watermarks in the audio encode. + See Audio Watermark Settings for more details. + properties: + nielsenWatermarksSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + nielsenCbetSettings: + description: Used to insert watermarks of type + Nielsen CBET. See Nielsen CBET Settings for + more details. + properties: + cbetCheckDigitString: + type: string + cbetStepaside: + description: Determines the method of CBET + insertion mode when prior encoding is + detected on the same layer. + type: string + csid: + description: CBET source ID to use in the + watermark. + type: string + type: object + nielsenDistributionType: + description: Distribution types to assign to + the watermarks. Options are PROGRAM_CONTENT + and FINAL_DISTRIBUTOR. + type: string + nielsenNaesIiNwSettings: + description: Used to insert watermarks of type + Nielsen NAES, II (N2) and Nielsen NAES VI + (NW). See Nielsen NAES II NW Settings for + more details. + items: + properties: + checkDigitString: + type: string + sid: + description: The Nielsen Source ID to + include in the watermark. + type: number + type: object + type: array + type: object + type: object + codecSettings: + description: Audio codec settings. See Audio Codec Settings + for more details. + properties: + aacSettings: + description: Aac Settings. See AAC Settings for + more details. + properties: + bitrate: + description: Average bitrate in bits/second. + type: number + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + inputType: + description: Set to "broadcasterMixedAd" when + input contains pre-mixed main audio + AD (narration) + as a stereo pair. + type: string + profile: + description: AAC profile. + type: string + rateControlMode: + description: The rate control mode. + type: string + rawFormat: + description: Sets LATM/LOAS AAC output for raw + containers. + type: string + sampleRate: + description: Sample rate in Hz. + type: number + spec: + description: Use MPEG-2 AAC audio instead of + MPEG-4 AAC audio for raw or MPEG-2 Transport + Stream containers. + type: string + vbrQuality: + description: VBR Quality Level - Only used if + rateControlMode is VBR. + type: string + type: object + ac3Settings: + description: Ac3 Settings. See AC3 Settings for + more details. + properties: + bitrate: + description: Average bitrate in bits/second. + type: number + bitstreamMode: + description: Specifies the bitstream mode (bsmod) + for the emitted AC-3 stream. + type: string + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + dialnorm: + description: Sets the dialnorm of the output. + type: number + drcProfile: + description: If set to filmStandard, adds dynamic + range compression signaling to the output + bitstream as defined in the Dolby Digital + specification. + type: string + lfeFilter: + description: When set to enabled, applies a + 120Hz lowpass filter to the LFE channel prior + to encoding. + type: string + metadataControl: + description: Metadata control. + type: string + type: object + eac3AtmosSettings: + description: '- Eac3 Atmos Settings. See EAC3 Atmos + Settings' + properties: + bitrate: + description: Average bitrate in bits/second. + type: number + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + dialnorm: + description: Sets the dialnorm of the output. + type: number + drcLine: + description: Sets the Dolby dynamic range compression + profile. + type: string + drcRf: + description: Sets the profile for heavy Dolby + dynamic range compression. + type: string + heightTrim: + description: Height dimensional trim. + type: number + surroundTrim: + description: Surround dimensional trim. + type: number + type: object + eac3Settings: + description: '- Eac3 Settings. See EAC3 Settings' + properties: + attenuationControl: + description: Sets the attenuation control. + type: string + bitrate: + description: Average bitrate in bits/second. + type: number + bitstreamMode: + description: Specifies the bitstream mode (bsmod) + for the emitted AC-3 stream. + type: string + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + dcFilter: + type: string + dialnorm: + description: Sets the dialnorm of the output. + type: number + drcLine: + description: Sets the Dolby dynamic range compression + profile. + type: string + drcRf: + description: Sets the profile for heavy Dolby + dynamic range compression. + type: string + lfeControl: + type: string + lfeFilter: + description: When set to enabled, applies a + 120Hz lowpass filter to the LFE channel prior + to encoding. + type: string + loRoCenterMixLevel: + description: H264 level. + type: number + loRoSurroundMixLevel: + description: H264 level. + type: number + ltRtCenterMixLevel: + description: H264 level. + type: number + ltRtSurroundMixLevel: + description: H264 level. + type: number + metadataControl: + description: Metadata control. + type: string + passthroughControl: + type: string + phaseControl: + type: string + stereoDownmix: + type: string + surroundExMode: + type: string + surroundMode: + type: string + type: object + mp2Settings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + bitrate: + description: Average bitrate in bits/second. + type: number + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + sampleRate: + description: Sample rate in Hz. + type: number + type: object + passThroughSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + type: object + wavSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + bitDepth: + type: number + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + sampleRate: + description: Sample rate in Hz. + type: number + type: object + type: object + languageCode: + description: Selects a specific three-letter language + code from within an audio source. + type: string + languageCodeControl: + type: string + name: + description: Name of the Channel. + type: string + remixSettings: + description: Destination settings for a standard output; + one destination for each redundant encoder. See Settings + for more details. + properties: + channelMappings: + items: + properties: + inputChannelLevels: + items: + properties: + gain: + type: number + inputChannel: + type: number + type: object + type: array + outputChannel: + type: number + type: object + type: array + channelsIn: + type: number + channelsOut: + type: number + type: object + streamName: + description: Stream name RTMP destinations (URLs of + type rtmp://) + type: string + type: object + type: array + availBlanking: + description: Settings for ad avail blanking. See Avail Blanking + for more details. + properties: + availBlankingImage: + description: Blanking image to be used. See Avail Blanking + Image for more details. + properties: + passwordParam: + description: Key used to extract the password from + EC2 Parameter store. + type: string + uri: + description: – Path to a file accessible to the live + stream. + type: string + username: + description: Username for destination. + type: string + type: object + state: + description: When set to enabled, causes video, audio + and captions to be blanked when insertion metadata is + added. + type: string + type: object + captionDescriptions: + description: Caption Descriptions. See Caption Descriptions + for more details. + items: + properties: + accessibility: + description: Indicates whether the caption track implements + accessibility features such as written descriptions + of spoken dialog, music, and sounds. + type: string + captionSelectorName: + description: Specifies which input caption selector + to use as a caption source when generating output + captions. This field should match a captionSelector + name. + type: string + destinationSettings: + description: Additional settings for captions destination + that depend on the destination type. See Destination + Settings for more details. + properties: + aribDestinationSettings: + description: ARIB Destination Settings. + type: object + burnInDestinationSettings: + description: Burn In Destination Settings. See Burn + In Destination Settings for more details. + properties: + alignment: + description: justify live subtitles and center-justify + pre-recorded subtitles. All burn-in and DVB-Sub + font settings must match. + type: string + backgroundColor: + description: in and DVB-Sub font settings must + match. + type: string + backgroundOpacity: + description: in and DVB-Sub font settings must + match. + type: number + font: + description: in. File extension must be ‘ttf’ + or ‘tte’. Although the user can select output + fonts for many different types of input captions, + embedded, STL and teletext sources use a strict + grid system. Using external fonts with these + caption sources could cause unexpected display + of proportional fonts. All burn-in and DVB-Sub + font settings must match. See Font for more + details. + properties: + passwordParam: + description: Key used to extract the password + from EC2 Parameter store. + type: string + uri: + description: – Path to a file accessible + to the live stream. + type: string + username: + description: Username for destination. + type: string + type: object + fontColor: + description: in captions. This option is not + valid for source captions that are STL, 608/embedded + or teletext. These source settings are already + pre-defined by the caption stream. All burn-in + and DVB-Sub font settings must match. + type: string + fontOpacity: + description: in captions. 255 is opaque; 0 is + transparent. All burn-in and DVB-Sub font + settings must match. + type: number + fontResolution: + description: in and DVB-Sub font settings must + match. + type: number + fontSize: + description: in and DVB-Sub font settings must + match. + type: string + outlineColor: + description: defined by the caption stream. + All burn-in and DVB-Sub font settings must + match. + type: string + outlineSize: + description: defined by the caption stream. + All burn-in and DVB-Sub font settings must + match. + type: number + shadowColor: + description: in and DVB-Sub font settings must + match. + type: string + shadowOpacity: + description: in and DVB-Sub font settings must + match. + type: number + shadowXOffset: + description: 2 would result in a shadow offset + 2 pixels to the left. All burn-in and DVB-Sub + font settings must match. + type: number + shadowYOffset: + description: 2 would result in a shadow offset + 2 pixels above the text. All burn-in and DVB-Sub + font settings must match. + type: number + teletextGridControl: + description: Sub/Burn-in outputs. + type: string + xPosition: + description: in and DVB-Sub font settings must + match. + type: number + yPosition: + description: in and DVB-Sub font settings must + match. + type: number + type: object + dvbSubDestinationSettings: + description: DVB Sub Destination Settings. See DVB + Sub Destination Settings for more details. + properties: + alignment: + description: justify live subtitles and center-justify + pre-recorded subtitles. All burn-in and DVB-Sub + font settings must match. + type: string + backgroundColor: + description: in and DVB-Sub font settings must + match. + type: string + backgroundOpacity: + description: in and DVB-Sub font settings must + match. + type: number + font: + description: in. File extension must be ‘ttf’ + or ‘tte’. Although the user can select output + fonts for many different types of input captions, + embedded, STL and teletext sources use a strict + grid system. Using external fonts with these + caption sources could cause unexpected display + of proportional fonts. All burn-in and DVB-Sub + font settings must match. See Font for more + details. + properties: + passwordParam: + description: Key used to extract the password + from EC2 Parameter store. + type: string + uri: + description: – Path to a file accessible + to the live stream. + type: string + username: + description: Username for destination. + type: string + type: object + fontColor: + description: in captions. This option is not + valid for source captions that are STL, 608/embedded + or teletext. These source settings are already + pre-defined by the caption stream. All burn-in + and DVB-Sub font settings must match. + type: string + fontOpacity: + description: in captions. 255 is opaque; 0 is + transparent. All burn-in and DVB-Sub font + settings must match. + type: number + fontResolution: + description: in and DVB-Sub font settings must + match. + type: number + fontSize: + description: in and DVB-Sub font settings must + match. + type: string + outlineColor: + description: defined by the caption stream. + All burn-in and DVB-Sub font settings must + match. + type: string + outlineSize: + description: defined by the caption stream. + All burn-in and DVB-Sub font settings must + match. + type: number + shadowColor: + description: in and DVB-Sub font settings must + match. + type: string + shadowOpacity: + description: in and DVB-Sub font settings must + match. + type: number + shadowXOffset: + description: 2 would result in a shadow offset + 2 pixels to the left. All burn-in and DVB-Sub + font settings must match. + type: number + shadowYOffset: + description: 2 would result in a shadow offset + 2 pixels above the text. All burn-in and DVB-Sub + font settings must match. + type: number + teletextGridControl: + description: Sub/Burn-in outputs. + type: string + xPosition: + description: in and DVB-Sub font settings must + match. + type: number + yPosition: + description: in and DVB-Sub font settings must + match. + type: number + type: object + ebuTtDDestinationSettings: + description: EBU TT D Destination Settings. See + EBU TT D Destination Settings for more details. + properties: + copyrightHolder: + description: – Complete this field if you want + to include the name of the copyright holder + in the copyright tag in the captions metadata. + type: string + fillLineGap: + description: 'line captions). - enabled: Fill + with the captions background color (as specified + in the input captions). - disabled: Leave + the gap unfilled.' + type: string + fontFamily: + description: TT captions. Valid only if styleControl + is set to include. If you leave this field + empty, the font family is set to “monospaced”. + (If styleControl is set to exclude, the font + family is always set to “monospaced”.) You + specify only the font family. All other style + information (color, bold, position and so + on) is copied from the input captions. The + size is always set to 100% to allow the downstream + player to choose the size. - Enter a list + of font families, as a comma-separated list + of font names, in order of preference. The + name can be a font family (such as “Arial”), + or a generic font family (such as “serif”), + or “default” (to let the downstream player + choose the font). - Leave blank to set the + family to “monospace”. + type: string + styleControl: + description: 'TT captions. - include: Take the + style information (font color, font position, + and so on) from the source captions and include + that information in the font data attached + to the EBU-TT captions. This option is valid + only if the source captions are Embedded or + Teletext. - exclude: In the font data attached + to the EBU-TT captions, set the font family + to “monospaced”. Do not include any other + style information.' + type: string + type: object + embeddedDestinationSettings: + description: Embedded Destination Settings. + type: object + embeddedPlusScte20DestinationSettings: + description: Embedded Plus SCTE20 Destination Settings. + type: object + rtmpCaptionInfoDestinationSettings: + description: RTMP Caption Info Destination Settings. + type: object + scte20PlusEmbeddedDestinationSettings: + description: SCTE20 Plus Embedded Destination Settings. + type: object + scte27DestinationSettings: + description: – SCTE27 Destination Settings. + type: object + smpteTtDestinationSettings: + description: – SMPTE TT Destination Settings. + type: object + teletextDestinationSettings: + description: – Teletext Destination Settings. + type: object + ttmlDestinationSettings: + description: – TTML Destination Settings. See TTML + Destination Settings for more details. + properties: + styleControl: + description: 'TT captions. - include: Take the + style information (font color, font position, + and so on) from the source captions and include + that information in the font data attached + to the EBU-TT captions. This option is valid + only if the source captions are Embedded or + Teletext. - exclude: In the font data attached + to the EBU-TT captions, set the font family + to “monospaced”. Do not include any other + style information.' + type: string + type: object + webvttDestinationSettings: + description: WebVTT Destination Settings. See WebVTT + Destination Settings for more details. + properties: + styleControl: + description: 'TT captions. - include: Take the + style information (font color, font position, + and so on) from the source captions and include + that information in the font data attached + to the EBU-TT captions. This option is valid + only if the source captions are Embedded or + Teletext. - exclude: In the font data attached + to the EBU-TT captions, set the font family + to “monospaced”. Do not include any other + style information.' + type: string + type: object + type: object + languageCode: + description: Selects a specific three-letter language + code from within an audio source. + type: string + languageDescription: + description: Human readable information to indicate + captions available for players (eg. English, or Spanish). + type: string + name: + description: Name of the Channel. + type: string + type: object + type: array + globalConfiguration: + description: Configuration settings that apply to the event + as a whole. See Global Configuration for more details. + properties: + initialAudioGain: + description: – Value to set the initial audio gain for + the Live Event. + type: number + inputEndAction: + description: of-file). When switchAndLoopInputs is configured + the encoder will restart at the beginning of the first + input. When “none” is configured the encoder will transcode + either black, a solid color, or a user specified slate + images per the “Input Loss Behavior” configuration until + the next input switch occurs (which is controlled through + the Channel Schedule API). + type: string + inputLossBehavior: + description: Settings for system actions when input is + lost. See Input Loss Behavior for more details. + properties: + blackFrameMsec: + type: number + inputLossImageColor: + type: string + inputLossImageSlate: + properties: + passwordParam: + description: Key used to extract the password + from EC2 Parameter store. + type: string + uri: + description: – Path to a file accessible to the + live stream. + type: string + username: + description: Username for destination. + type: string + type: object + inputLossImageType: + type: string + repeatFrameMsec: + type: number + type: object + outputLockingMode: + description: MediaLive will attempt to synchronize the + output of each pipeline to the other. EPOCH_LOCKING + - MediaLive will attempt to synchronize the output of + each pipeline to the Unix epoch. + type: string + outputTimingSource: + description: – Indicates whether the rate of frames emitted + by the Live encoder should be paced by its system clock + (which optionally may be locked to another source via + NTP) or should be locked to the clock of the source + that is providing the input stream. + type: string + supportLowFramerateInputs: + description: – Adjusts video input buffer for streams + with very low video framerates. This is commonly set + to enabled for music channels with less than one video + frame per second. + type: string + type: object + motionGraphicsConfiguration: + description: Settings for motion graphics. See Motion Graphics + Configuration for more details. + properties: + motionGraphicsInsertion: + description: – Motion Graphics Insertion. + type: string + motionGraphicsSettings: + description: – Motion Graphics Settings. See Motion Graphics + Settings for more details. + properties: + htmlMotionGraphicsSettings: + description: – Html Motion Graphics Settings. + type: object + type: object + type: object + nielsenConfiguration: + description: Nielsen configuration settings. See Nielsen Configuration + for more details. + properties: + distributorId: + description: – Enter the Distributor ID assigned to your + organization by Nielsen. + type: string + nielsenPcmToId3Tagging: + description: – Enables Nielsen PCM to ID3 tagging. + type: string + type: object + outputGroups: + description: Output groups for the channel. See Output Groups + for more details. + items: + properties: + name: + description: Name of the Channel. + type: string + outputGroupSettings: + description: Settings associated with the output group. + See Output Group Settings for more details. + properties: + archiveGroupSettings: + description: Archive group settings. See Archive + Group Settings for more details. + items: + properties: + archiveCdnSettings: + description: Parameters that control the interactions + with the CDN. See Archive CDN Settings for + more details. + properties: + archiveS3Settings: + description: Archive S3 Settings. See + Archive S3 Settings for more details. + properties: + cannedAcl: + description: Specify the canned ACL + to apply to each S3 request. + type: string + type: object + type: object + destination: + description: A director and base filename + where archive files should be written. See + Destination for more details. + properties: + destinationRefId: + description: Reference ID for the destination. + type: string + type: object + rolloverInterval: + description: Number of seconds to write to + archive file before closing and starting + a new one. + type: number + type: object + type: array + frameCaptureGroupSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + destination: + description: A director and base filename where + archive files should be written. See Destination + for more details. + properties: + destinationRefId: + description: Reference ID for the destination. + type: string + type: object + frameCaptureCdnSettings: + description: Destination settings for a standard + output; one destination for each redundant + encoder. See Settings for more details. + properties: + frameCaptureS3Settings: + description: Destination settings for a + standard output; one destination for each + redundant encoder. See Settings for more + details. + properties: + cannedAcl: + description: Specify the canned ACL + to apply to each S3 request. + type: string + type: object + type: object + type: object + hlsGroupSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + adMarkers: + description: The ad marker type for this output + group. + items: + type: string + type: array + baseUrlContent: + type: string + baseUrlContent1: + type: string + baseUrlManifest: + type: string + baseUrlManifest1: + type: string + captionLanguageMappings: + items: + properties: + captionChannel: + type: number + languageCode: + description: Selects a specific three-letter + language code from within an audio source. + type: string + languageDescription: + description: Human readable information + to indicate captions available for players + (eg. English, or Spanish). + type: string + type: object + type: array + captionLanguageSetting: + type: string + clientCache: + type: string + codecSpecification: + type: string + constantIv: + type: string + destination: + description: A director and base filename where + archive files should be written. See Destination + for more details. + properties: + destinationRefId: + description: Reference ID for the destination. + type: string + type: object + directoryStructure: + type: string + discontinuityTags: + description: Key-value map of resource tags. + type: string + encryptionType: + type: string + hlsCdnSettings: + description: Destination settings for a standard + output; one destination for each redundant + encoder. See Settings for more details. + items: + properties: + hlsAkamaiSettings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + connectionRetryInterval: + description: Number of seconds to + wait before retrying connection + to the flash media server if the + connection is lost. + type: number + filecacheDuration: + type: number + httpTransferMode: + type: string + numRetries: + description: Number of retry attempts. + type: number + restartDelay: + description: Number of seconds to + wait until a restart is initiated. + type: number + salt: + type: string + token: + type: string + type: object + hlsBasicPutSettings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + connectionRetryInterval: + description: Number of seconds to + wait before retrying connection + to the flash media server if the + connection is lost. + type: number + filecacheDuration: + type: number + numRetries: + description: Number of retry attempts. + type: number + restartDelay: + description: Number of seconds to + wait until a restart is initiated. + type: number + type: object + hlsMediaStoreSettings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + connectionRetryInterval: + description: Number of seconds to + wait before retrying connection + to the flash media server if the + connection is lost. + type: number + filecacheDuration: + type: number + mediaStoreStorageClass: + type: string + numRetries: + description: Number of retry attempts. + type: number + restartDelay: + description: Number of seconds to + wait until a restart is initiated. + type: number + type: object + hlsS3Settings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + cannedAcl: + description: Specify the canned ACL + to apply to each S3 request. + type: string + type: object + hlsWebdavSettings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + connectionRetryInterval: + description: Number of seconds to + wait before retrying connection + to the flash media server if the + connection is lost. + type: number + filecacheDuration: + type: number + httpTransferMode: + type: string + numRetries: + description: Number of retry attempts. + type: number + restartDelay: + description: Number of seconds to + wait until a restart is initiated. + type: number + type: object + type: object + type: array + hlsId3SegmentTagging: + type: string + iframeOnlyPlaylists: + type: string + incompleteSegmentBehavior: + type: string + indexNSegments: + type: number + inputLossAction: + description: Controls the behavior of the RTMP + group if input becomes unavailable. + type: string + ivInManifest: + type: string + ivSource: + description: The source for the timecode that + will be associated with the events outputs. + type: string + keepSegments: + type: number + keyFormat: + type: string + keyFormatVersions: + type: string + keyProviderSettings: + description: Destination settings for a standard + output; one destination for each redundant + encoder. See Settings for more details. + properties: + staticKeySettings: + description: Destination settings for a + standard output; one destination for each + redundant encoder. See Settings for more + details. + items: + properties: + keyProviderServer: + properties: + passwordParam: + description: Key used to extract + the password from EC2 Parameter + store. + type: string + uri: + description: – Path to a file + accessible to the live stream. + type: string + username: + description: Username for destination. + type: string + type: object + staticKeyValue: + type: string + type: object + type: array + type: object + manifestCompression: + type: string + manifestDurationFormat: + type: string + minSegmentLength: + type: number + mode: + type: string + outputSelection: + type: string + programDateTime: + type: string + programDateTimeClock: + type: string + programDateTimePeriod: + type: number + redundantManifest: + type: string + segmentLength: + type: number + segmentsPerSubdirectory: + type: number + streamInfResolution: + description: '- Maximum CDI input resolution.' + type: string + timedMetadataId3Frame: + description: Indicates ID3 frame that has the + timecode. + type: string + timedMetadataId3Period: + type: number + timestampDeltaMilliseconds: + type: number + tsFileMode: + type: string + type: object + mediaPackageGroupSettings: + description: Media package group settings. See Media + Package Group Settings for more details. + properties: + destination: + description: A director and base filename where + archive files should be written. See Destination + for more details. + properties: + destinationRefId: + description: Reference ID for the destination. + type: string + type: object + type: object + msSmoothGroupSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + acquisitionPointId: + description: User-specified id. Ths is used + in an output group or an output. + type: string + audioOnlyTimecodeControl: + type: string + certificateMode: + description: Setting to allow self signed or + verified RTMP certificates. + type: string + connectionRetryInterval: + description: Number of seconds to wait before + retrying connection to the flash media server + if the connection is lost. + type: number + destination: + description: A director and base filename where + archive files should be written. See Destination + for more details. + properties: + destinationRefId: + description: Reference ID for the destination. + type: string + type: object + eventId: + description: User-specified id. Ths is used + in an output group or an output. + type: string + eventIdMode: + type: string + eventStopBehavior: + type: string + filecacheDuration: + type: number + fragmentLength: + type: number + inputLossAction: + description: Controls the behavior of the RTMP + group if input becomes unavailable. + type: string + numRetries: + description: Number of retry attempts. + type: number + restartDelay: + description: Number of seconds to wait until + a restart is initiated. + type: number + segmentationMode: + type: string + sendDelayMs: + type: number + sparseTrackType: + type: string + streamManifestBehavior: + type: string + timestampOffset: + type: string + timestampOffsetMode: + type: string + type: object + multiplexGroupSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + type: object + rtmpGroupSettings: + description: RTMP group settings. See RTMP Group + Settings for more details. + properties: + adMarkers: + description: The ad marker type for this output + group. + items: + type: string + type: array + authenticationScheme: + description: Authentication scheme to use when + connecting with CDN. + type: string + cacheFullBehavior: + description: Controls behavior when content + cache fills up. + type: string + cacheLength: + description: Cache length in seconds, is used + to calculate buffer size. + type: number + captionData: + description: Controls the types of data that + passes to onCaptionInfo outputs. + type: string + inputLossAction: + description: Controls the behavior of the RTMP + group if input becomes unavailable. + type: string + restartDelay: + description: Number of seconds to wait until + a restart is initiated. + type: number + type: object + udpGroupSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + inputLossAction: + description: Controls the behavior of the RTMP + group if input becomes unavailable. + type: string + timedMetadataId3Frame: + description: Indicates ID3 frame that has the + timecode. + type: string + timedMetadataId3Period: + type: number + type: object + type: object + outputs: + description: List of outputs. See Outputs for more details. + items: + properties: + audioDescriptionNames: + description: The names of the audio descriptions + used as audio sources for the output. + items: + type: string + type: array + x-kubernetes-list-type: set + captionDescriptionNames: + description: The names of the caption descriptions + used as caption sources for the output. + items: + type: string + type: array + x-kubernetes-list-type: set + outputName: + description: The name used to identify an output. + type: string + outputSettings: + description: Settings for output. See Output Settings + for more details. + properties: + archiveOutputSettings: + description: Archive output settings. See + Archive Output Settings for more details. + properties: + containerSettings: + description: Settings specific to the + container type of the file. See Container + Settings for more details. + properties: + m2tsSettings: + description: M2TS Settings. See M2TS + Settings for more details. + properties: + absentInputAudioBehavior: + type: string + arib: + type: string + aribCaptionsPid: + description: Selects a specific + PID from within a source. + type: string + aribCaptionsPidControl: + type: string + audioBufferModel: + type: string + audioFramesPerPes: + type: number + audioPids: + type: string + audioStreamType: + type: string + bitrate: + description: Average bitrate in + bits/second. + type: number + bufferModel: + type: string + ccDescriptor: + type: string + dvbNitSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + networkId: + description: User-specified + id. Ths is used in an output + group or an output. + type: number + networkName: + description: Name of the Channel. + type: string + repInterval: + type: number + type: object + dvbSdtSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + outputSdt: + type: string + repInterval: + type: number + serviceName: + description: Name of the Channel. + type: string + serviceProviderName: + description: Name of the Channel. + type: string + type: object + dvbSubPids: + type: string + dvbTdtSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + repInterval: + type: number + type: object + dvbTeletextPid: + description: Selects a specific + PID from within a source. + type: string + ebif: + type: string + ebpAudioInterval: + type: string + ebpLookaheadMs: + type: number + ebpPlacement: + type: string + ecmPid: + description: Selects a specific + PID from within a source. + type: string + esRateInPes: + type: string + etvPlatformPid: + description: Selects a specific + PID from within a source. + type: string + etvSignalPid: + description: Selects a specific + PID from within a source. + type: string + fragmentTime: + type: number + klv: + type: string + klvDataPids: + type: string + nielsenId3Behavior: + type: string + nullPacketBitrate: + description: Average bitrate in + bits/second. + type: number + patInterval: + type: number + pcrControl: + type: string + pcrPeriod: + type: number + pcrPid: + description: Selects a specific + PID from within a source. + type: string + pmtInterval: + type: number + pmtPid: + description: Selects a specific + PID from within a source. + type: string + programNum: + type: number + rateMode: + type: string + scte27Pids: + type: string + scte35Control: + type: string + scte35Pid: + description: PID from which to + read SCTE-35 messages. + type: string + segmentationMarkers: + type: string + segmentationStyle: + type: string + segmentationTime: + type: number + timedMetadataBehavior: + type: string + timedMetadataPid: + description: Selects a specific + PID from within a source. + type: string + transportStreamId: + description: User-specified id. + Ths is used in an output group + or an output. + type: number + videoPid: + description: Selects a specific + PID from within a source. + type: string + type: object + rawSettings: + description: Raw Settings. This can + be set as an empty block. + type: object + type: object + extension: + description: Output file extension. + type: string + nameModifier: + description: String concatenated to the + end of the destination filename. Required + for multiple outputs of the same type. + type: string + type: object + frameCaptureOutputSettings: + description: Settings for output. See Output + Settings for more details. + properties: + nameModifier: + description: String concatenated to the + end of the destination filename. Required + for multiple outputs of the same type. + type: string + type: object + hlsOutputSettings: + description: Settings for output. See Output + Settings for more details. + properties: + h265PackagingType: + type: string + hlsSettings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + audioOnlyHlsSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. See + Settings for more details. + properties: + audioGroupId: + description: 'Specifies the GROUP-ID + in the #EXT-X-MEDIA tag of the + target HLS audio rendition.' + type: string + audioOnlyImage: + properties: + passwordParam: + description: Key used to extract + the password from EC2 Parameter + store. + type: string + uri: + description: – Path to a + file accessible to the live + stream. + type: string + username: + description: Username for + destination. + type: string + type: object + audioTrackType: + type: string + segmentType: + type: string + type: object + fmp4HlsSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. See + Settings for more details. + properties: + audioRenditionSets: + type: string + nielsenId3Behavior: + type: string + timedMetadataBehavior: + type: string + type: object + frameCaptureHlsSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. See + Settings for more details. + type: object + standardHlsSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. See + Settings for more details. + properties: + audioRenditionSets: + type: string + m3u8Settings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + audioFramesPerPes: + type: number + audioPids: + type: string + ecmPid: + description: Selects a specific + PID from within a source. + type: string + nielsenId3Behavior: + type: string + patInterval: + type: number + pcrControl: + type: string + pcrPeriod: + type: number + pcrPid: + description: Selects a specific + PID from within a source. + type: string + pmtInterval: + type: number + pmtPid: + description: Selects a specific + PID from within a source. + type: string + programNum: + type: number + scte35Behavior: + type: string + scte35Pid: + description: PID from which + to read SCTE-35 messages. + type: string + timedMetadataBehavior: + type: string + timedMetadataPid: + description: Selects a specific + PID from within a source. + type: string + transportStreamId: + description: User-specified + id. Ths is used in an output + group or an output. + type: number + videoPid: + description: Selects a specific + PID from within a source. + type: string + type: object + type: object + type: object + nameModifier: + description: String concatenated to the + end of the destination filename. Required + for multiple outputs of the same type. + type: string + segmentModifier: + type: string + type: object + mediaPackageOutputSettings: + description: Media package output settings. + This can be set as an empty block. + type: object + msSmoothOutputSettings: + description: Settings for output. See Output + Settings for more details. + properties: + h265PackagingType: + type: string + nameModifier: + description: String concatenated to the + end of the destination filename. Required + for multiple outputs of the same type. + type: string + type: object + multiplexOutputSettings: + description: Multiplex output settings. See + Multiplex Output Settings for more details. + properties: + destination: + description: A director and base filename + where archive files should be written. + See Destination for more details. + properties: + destinationRefId: + description: Reference ID for the + destination. + type: string + type: object + type: object + rtmpOutputSettings: + description: RTMP output settings. See RTMP + Output Settings for more details. + properties: + certificateMode: + description: Setting to allow self signed + or verified RTMP certificates. + type: string + connectionRetryInterval: + description: Number of seconds to wait + before retrying connection to the flash + media server if the connection is lost. + type: number + destination: + description: A director and base filename + where archive files should be written. + See Destination for more details. + properties: + destinationRefId: + description: Reference ID for the + destination. + type: string + type: object + numRetries: + description: Number of retry attempts. + type: number + type: object + udpOutputSettings: + description: UDP output settings. See UDP + Output Settings for more details. + properties: + bufferMsec: + description: UDP output buffering in milliseconds. + type: number + containerSettings: + description: Settings specific to the + container type of the file. See Container + Settings for more details. + properties: + m2tsSettings: + description: M2TS Settings. See M2TS + Settings for more details. + properties: + absentInputAudioBehavior: + type: string + arib: + type: string + aribCaptionsPid: + description: Selects a specific + PID from within a source. + type: string + aribCaptionsPidControl: + type: string + audioBufferModel: + type: string + audioFramesPerPes: + type: number + audioPids: + type: string + audioStreamType: + type: string + bitrate: + description: Average bitrate in + bits/second. + type: number + bufferModel: + type: string + ccDescriptor: + type: string + dvbNitSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + networkId: + description: User-specified + id. Ths is used in an output + group or an output. + type: number + networkName: + description: Name of the Channel. + type: string + repInterval: + type: number + type: object + dvbSdtSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + outputSdt: + type: string + repInterval: + type: number + serviceName: + description: Name of the Channel. + type: string + serviceProviderName: + description: Name of the Channel. + type: string + type: object + dvbSubPids: + type: string + dvbTdtSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + repInterval: + type: number + type: object + dvbTeletextPid: + description: Selects a specific + PID from within a source. + type: string + ebif: + type: string + ebpAudioInterval: + type: string + ebpLookaheadMs: + type: number + ebpPlacement: + type: string + ecmPid: + description: Selects a specific + PID from within a source. + type: string + esRateInPes: + type: string + etvPlatformPid: + description: Selects a specific + PID from within a source. + type: string + etvSignalPid: + description: Selects a specific + PID from within a source. + type: string + fragmentTime: + type: number + klv: + type: string + klvDataPids: + type: string + nielsenId3Behavior: + type: string + nullPacketBitrate: + description: Average bitrate in + bits/second. + type: number + patInterval: + type: number + pcrControl: + type: string + pcrPeriod: + type: number + pcrPid: + description: Selects a specific + PID from within a source. + type: string + pmtInterval: + type: number + pmtPid: + description: Selects a specific + PID from within a source. + type: string + programNum: + type: number + rateMode: + type: string + scte27Pids: + type: string + scte35Control: + type: string + scte35Pid: + description: PID from which to + read SCTE-35 messages. + type: string + segmentationMarkers: + type: string + segmentationStyle: + type: string + segmentationTime: + type: number + timedMetadataBehavior: + type: string + timedMetadataPid: + description: Selects a specific + PID from within a source. + type: string + transportStreamId: + description: User-specified id. + Ths is used in an output group + or an output. + type: number + videoPid: + description: Selects a specific + PID from within a source. + type: string + type: object + type: object + destination: + description: A director and base filename + where archive files should be written. + See Destination for more details. + properties: + destinationRefId: + description: Reference ID for the + destination. + type: string + type: object + fecOutputSettings: + description: Settings for output. See + Output Settings for more details. + properties: + columnDepth: + description: The height of the FEC + protection matrix. + type: number + includeFec: + description: Enables column only or + column and row based FEC. + type: string + rowLength: + description: The width of the FEC + protection matrix. + type: number + type: object + type: object + type: object + videoDescriptionName: + description: The name of the video description + used as video source for the output. + type: string + type: object + type: array + type: object + type: array + timecodeConfig: + description: Contains settings used to acquire and adjust + timecode information from inputs. See Timecode Config for + more details. + properties: + source: + description: The source for the timecode that will be + associated with the events outputs. + type: string + syncThreshold: + description: Threshold in frames beyond which output timecode + is resynchronized to the input timecode. + type: number + type: object + videoDescriptions: + description: Video Descriptions. See Video Descriptions for + more details. + items: + properties: + codecSettings: + description: Audio codec settings. See Audio Codec Settings + for more details. + properties: + frameCaptureSettings: + description: Frame capture settings. See Frame Capture + Settings for more details. + properties: + captureInterval: + description: The frequency at which to capture + frames for inclusion in the output. + type: number + captureIntervalUnits: + description: Unit for the frame capture interval. + type: string + type: object + h264Settings: + description: H264 settings. See H264 Settings for + more details. + properties: + adaptiveQuantization: + description: Enables or disables adaptive quantization. + type: string + afdSignaling: + description: Indicates that AFD values will + be written into the output stream. + type: string + bitrate: + description: Average bitrate in bits/second. + type: number + bufFillPct: + type: number + bufSize: + description: Size of buffer in bits. + type: number + colorMetadata: + description: Includes color space metadata in + the output. + type: string + entropyEncoding: + description: Entropy encoding mode. + type: string + filterSettings: + description: Filters to apply to an encode. + See H264 Filter Settings for more details. + properties: + temporalFilterSettings: + description: Temporal filter settings. See + Temporal Filter Settings + properties: + postFilterSharpening: + description: Post filter sharpening. + type: string + strength: + description: Filter strength. + type: string + type: object + type: object + fixedAfd: + description: Four bit AFD value to write on + all frames of video in the output stream. + type: string + flickerAq: + type: string + forceFieldPictures: + description: Controls whether coding is performed + on a field basis or on a frame basis. + type: string + framerateControl: + description: Indicates how the output video + frame rate is specified. + type: string + framerateDenominator: + description: Framerate denominator. + type: number + framerateNumerator: + description: Framerate numerator. + type: number + gopBReference: + description: GOP-B reference. + type: string + gopClosedCadence: + description: Frequency of closed GOPs. + type: number + gopNumBFrames: + description: Number of B-frames between reference + frames. + type: number + gopSize: + description: GOP size in units of either frames + of seconds per gop_size_units. + type: number + gopSizeUnits: + description: Indicates if the gop_size is specified + in frames or seconds. + type: string + level: + description: H264 level. + type: string + lookAheadRateControl: + description: Amount of lookahead. + type: string + maxBitrate: + description: Set the maximum bitrate in order + to accommodate expected spikes in the complexity + of the video. + type: number + minIInterval: + type: number + numRefFrames: + description: Number of reference frames to use. + type: number + parControl: + description: Indicates how the output pixel + aspect ratio is specified. + type: string + parDenominator: + description: Pixel Aspect Ratio denominator. + type: number + parNumerator: + description: Pixel Aspect Ratio numerator. + type: number + profile: + description: AAC profile. + type: string + qualityLevel: + description: Quality level. + type: string + qvbrQualityLevel: + description: Controls the target quality for + the video encode. + type: number + rateControlMode: + description: The rate control mode. + type: string + scanType: + description: Sets the scan type of the output. + type: string + sceneChangeDetect: + description: Scene change detection. + type: string + slices: + description: Number of slices per picture. + type: number + softness: + description: Softness. + type: number + spatialAq: + description: Makes adjustments within each frame + based on spatial variation of content complexity. + type: string + subgopLength: + description: Subgop length. + type: string + syntax: + description: Produces a bitstream compliant + with SMPTE RP-2027. + type: string + temporalAq: + description: Makes adjustments within each frame + based on temporal variation of content complexity. + type: string + timecodeInsertion: + description: Determines how timecodes should + be inserted into the video elementary stream. + type: string + type: object + h265Settings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + adaptiveQuantization: + description: Enables or disables adaptive quantization. + type: string + afdSignaling: + description: Indicates that AFD values will + be written into the output stream. + type: string + alternativeTransferFunction: + description: Whether or not EML should insert + an Alternative Transfer Function SEI message. + type: string + bitrate: + description: Average bitrate in bits/second. + type: number + bufSize: + description: Size of buffer in bits. + type: number + colorMetadata: + description: Includes color space metadata in + the output. + type: string + colorSpaceSettings: + description: Define the color metadata for the + output. H265 Color Space Settings for more + details. + properties: + colorSpacePassthroughSettings: + description: Sets the colorspace metadata + to be passed through. + type: object + dolbyVision81Settings: + description: Set the colorspace to Dolby + Vision81. + type: object + hdr10Settings: + description: Set the colorspace to be HDR10. + See H265 HDR10 Settings for more details. + properties: + maxCll: + description: Sets the MaxCLL value for + HDR10. + type: number + maxFall: + description: Sets the MaxFALL value + for HDR10. + type: number + type: object + rec601Settings: + description: Set the colorspace to Rec. + 601. + type: object + rec709Settings: + description: Set the colorspace to Rec. + 709. + type: object + type: object + filterSettings: + description: Filters to apply to an encode. + See H264 Filter Settings for more details. + properties: + temporalFilterSettings: + description: Temporal filter settings. See + Temporal Filter Settings + properties: + postFilterSharpening: + description: Post filter sharpening. + type: string + strength: + description: Filter strength. + type: string + type: object + type: object + fixedAfd: + description: Four bit AFD value to write on + all frames of video in the output stream. + type: string + flickerAq: + type: string + framerateDenominator: + description: Framerate denominator. + type: number + framerateNumerator: + description: Framerate numerator. + type: number + gopClosedCadence: + description: Frequency of closed GOPs. + type: number + gopSize: + description: GOP size in units of either frames + of seconds per gop_size_units. + type: number + gopSizeUnits: + description: Indicates if the gop_size is specified + in frames or seconds. + type: string + level: + description: H264 level. + type: string + lookAheadRateControl: + description: Amount of lookahead. + type: string + maxBitrate: + description: Set the maximum bitrate in order + to accommodate expected spikes in the complexity + of the video. + type: number + minIInterval: + type: number + parDenominator: + description: Pixel Aspect Ratio denominator. + type: number + parNumerator: + description: Pixel Aspect Ratio numerator. + type: number + profile: + description: AAC profile. + type: string + qvbrQualityLevel: + description: Controls the target quality for + the video encode. + type: number + rateControlMode: + description: The rate control mode. + type: string + scanType: + description: Sets the scan type of the output. + type: string + sceneChangeDetect: + description: Scene change detection. + type: string + slices: + description: Number of slices per picture. + type: number + tier: + description: Set the H265 tier in the output. + type: string + timecodeBurninSettings: + description: Apply a burned in timecode. See + H265 Timecode Burnin Settings for more details. + properties: + prefix: + description: Set a prefix on the burned + in timecode. + type: string + timecodeBurninFontSize: + description: Sets the size of the burned + in timecode. + type: string + timecodeBurninPosition: + description: Sets the position of the burned + in timecode. + type: string + type: object + timecodeInsertion: + description: Determines how timecodes should + be inserted into the video elementary stream. + type: string + type: object + type: object + height: + description: See the description in left_offset. For + height, specify the entire height of the rectangle + as a percentage of the underlying frame height. For + example, "80" means the rectangle height is 80% of + the underlying frame height. The top_offset and rectangle_height + must add up to 100% or less. This field corresponds + to tts:extent - Y in the TTML standard. + type: number + name: + description: Name of the Channel. + type: string + respondToAfd: + description: Indicate how to respond to the AFD values + that might be in the input video. + type: string + scalingBehavior: + description: Behavior on how to scale. + type: string + sharpness: + description: Changes the strength of the anti-alias + filter used for scaling. + type: number + width: + description: See the description in left_offset. For + width, specify the entire width of the rectangle as + a percentage of the underlying frame width. For example, + "80" means the rectangle width is 80% of the underlying + frame width. The left_offset and rectangle_width must + add up to 100% or less. This field corresponds to + tts:extent - X in the TTML standard. + type: number + type: object + type: array + type: object + inputAttachments: + description: Input attachments for the channel. See Input Attachments + for more details. + items: + properties: + automaticInputFailoverSettings: + description: User-specified settings for defining what the + conditions are for declaring the input unhealthy and failing + over to a different input. See Automatic Input Failover + Settings for more details. + properties: + errorClearTimeMsec: + description: This clear time defines the requirement + a recovered input must meet to be considered healthy. + The input must have no failover conditions for this + length of time. Enter a time in milliseconds. This + value is particularly important if the input_preference + for the failover pair is set to PRIMARY_INPUT_PREFERRED, + because after this time, MediaLive will switch back + to the primary input. + type: number + failoverCondition: + description: A list of failover conditions. If any of + these conditions occur, MediaLive will perform a failover + to the other input. See Failover Condition Block for + more details. + items: + properties: + failoverConditionSettings: + description: Failover condition type-specific + settings. See Failover Condition Settings for + more details. + properties: + audioSilenceSettings: + description: MediaLive will perform a failover + if the specified audio selector is silent + for the specified period. See Audio Silence + Failover Settings for more details. + properties: + audioSelectorName: + description: The name of the audio selector + in the input that MediaLive should monitor + to detect silence. Select your most + important rendition. If you didn't create + an audio selector in this input, leave + blank. + type: string + audioSilenceThresholdMsec: + description: The amount of time (in milliseconds) + that the active input must be silent + before automatic input failover occurs. + Silence is defined as audio loss or + audio quieter than -50 dBFS. + type: number + type: object + inputLossSettings: + description: MediaLive will perform a failover + if content is not detected in this input + for the specified period. See Input Loss + Failover Settings for more details. + properties: + inputLossThresholdMsec: + description: The amount of time (in milliseconds) + that no input is detected. After that + time, an input failover will occur. + type: number + type: object + videoBlackSettings: + description: MediaLive will perform a failover + if content is considered black for the specified + period. See Video Black Failover Settings + for more details. + properties: + blackDetectThreshold: + description: 'A value used in calculating + the threshold below which MediaLive + considers a pixel to be ''black''. For + the input to be considered black, every + pixel in a frame must be below this + threshold. The threshold is calculated + as a percentage (expressed as a decimal) + of white. Therefore .1 means 10% white + (or 90% black). Note how the formula + works for any color depth. For example, + if you set this field to 0.1 in 10-bit + color depth: (10230.1=102.3), which + means a pixel value of 102 or less is + ''black''. If you set this field to + .1 in an 8-bit color depth: (2550.1=25.5), + which means a pixel value of 25 or less + is ''black''. The range is 0.0 to 1.0, + with any number of decimal places.' + type: number + videoBlackThresholdMsec: + description: The amount of time (in milliseconds) + that the active input must be black + before automatic input failover occurs. + type: number + type: object + type: object + type: object + type: array + inputPreference: + description: Input preference when deciding which input + to make active when a previously failed input has + recovered. + type: string + secondaryInputId: + description: The input ID of the secondary input in + the automatic input failover pair. + type: string + type: object + inputAttachmentName: + description: User-specified name for the attachment. + type: string + inputId: + description: The ID of the input. + type: string + inputIdRef: + description: Reference to a Input in medialive to populate + inputId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + inputIdSelector: + description: Selector for a Input in medialive to populate + inputId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + inputSettings: + description: Settings of an input. See Input Settings for + more details. + properties: + audioSelector: + items: + properties: + name: + description: Name of the Channel. + type: string + selectorSettings: + description: The audio selector settings. See + Audio Selector Settings for more details. + properties: + audioHlsRenditionSelection: + description: Audio HLS Rendition Selection. + See Audio HLS Rendition Selection for more + details. + properties: + groupId: + description: 'Specifies the GROUP-ID in + the #EXT-X-MEDIA tag of the target HLS + audio rendition.' + type: string + name: + description: Name of the Channel. + type: string + type: object + audioLanguageSelection: + description: Audio Language Selection. See + Audio Language Selection for more details. + properties: + languageCode: + description: Selects a specific three-letter + language code from within an audio source. + type: string + languageSelectionPolicy: + description: When set to “strict”, the + transport stream demux strictly identifies + audio streams by their language descriptor. + If a PMT update occurs such that an + audio stream matching the initially + selected language is no longer present + then mute will be encoded until the + language returns. If “loose”, then on + a PMT update the demux will choose another + audio stream in the program with the + same stream type if it can’t find one + with the same language. + type: string + type: object + audioPidSelection: + description: Audio Pid Selection. See Audio + PID Selection for more details. + properties: + pid: + description: Selects a specific PID from + within a source. + type: number + type: object + audioTrackSelection: + description: Audio Track Selection. See Audio + Track Selection for more details. + properties: + dolbyEDecode: + description: Configure decoding options + for Dolby E streams - these should be + Dolby E frames carried in PCM streams + tagged with SMPTE-337. See Dolby E Decode + for more details. + properties: + programSelection: + description: Applies only to Dolby + E. Enter the program ID (according + to the metadata in the audio) of + the Dolby E program to extract from + the specified track. One program + extracted per audio selector. To + select multiple programs, create + multiple selectors with the same + Track and different Program numbers. + “All channels” means to ignore the + program IDs and include all the + channels in this selector; useful + if metadata is known to be incorrect. + type: string + type: object + tracks: + description: Selects one or more unique + audio tracks from within a source. See + Audio Tracks for more details. + items: + properties: + track: + description: 1-based integer value + that maps to a specific audio + track. + type: number + type: object + type: array + type: object + type: object + type: object + type: array + captionSelector: + items: + properties: + languageCode: + description: Selects a specific three-letter language + code from within an audio source. + type: string + name: + description: Name of the Channel. + type: string + selectorSettings: + description: The audio selector settings. See + Audio Selector Settings for more details. + properties: + ancillarySourceSettings: + description: Ancillary Source Settings. See + Ancillary Source Settings for more details. + properties: + sourceAncillaryChannelNumber: + description: Specifies the number (1 to + 4) of the captions channel you want + to extract from the ancillary captions. + If you plan to convert the ancillary + captions to another format, complete + this field. If you plan to choose Embedded + as the captions destination in the output + (to pass through all the channels in + the ancillary captions), leave this + field blank because MediaLive ignores + the field. + type: number + type: object + aribSourceSettings: + description: ARIB Source Settings. + type: object + dvbSubSourceSettings: + description: DVB Sub Source Settings. See + DVB Sub Source Settings for more details. + properties: + ocrLanguage: + description: If you will configure a WebVTT + caption description that references + this caption selector, use this field + to provide the language to consider + when translating the image-based source + to text. + type: string + pid: + description: Selects a specific PID from + within a source. + type: number + type: object + embeddedSourceSettings: + description: Embedded Source Settings. See + Embedded Source Settings for more details. + properties: + convert608To708: + description: If upconvert, 608 data is + both passed through via the “608 compatibility + bytes” fields of the 708 wrapper as + well as translated into 708. 708 data + present in the source content will be + discarded. + type: string + scte20Detection: + description: Set to “auto” to handle streams + with intermittent and/or non-aligned + SCTE-20 and Embedded captions. + type: string + source608ChannelNumber: + description: Specifies the 608/708 channel + number within the video track from which + to extract captions. Unused for passthrough. + type: number + type: object + scte20SourceSettings: + description: SCTE20 Source Settings. See SCTE + 20 Source Settings for more details. + properties: + convert608To708: + description: If upconvert, 608 data is + both passed through via the “608 compatibility + bytes” fields of the 708 wrapper as + well as translated into 708. 708 data + present in the source content will be + discarded. + type: string + source608ChannelNumber: + description: Specifies the 608/708 channel + number within the video track from which + to extract captions. Unused for passthrough. + type: number + type: object + scte27SourceSettings: + description: SCTE27 Source Settings. See SCTE + 27 Source Settings for more details. + properties: + ocrLanguage: + description: If you will configure a WebVTT + caption description that references + this caption selector, use this field + to provide the language to consider + when translating the image-based source + to text. + type: string + pid: + description: Selects a specific PID from + within a source. + type: number + type: object + teletextSourceSettings: + description: Teletext Source Settings. See + Teletext Source Settings for more details. + properties: + outputRectangle: + description: Optionally defines a region + where TTML style captions will be displayed. + See Caption Rectangle for more details. + properties: + height: + description: See the description in + left_offset. For height, specify + the entire height of the rectangle + as a percentage of the underlying + frame height. For example, "80" + means the rectangle height is 80% + of the underlying frame height. + The top_offset and rectangle_height + must add up to 100% or less. This + field corresponds to tts:extent + - Y in the TTML standard. + type: number + leftOffset: + description: Applies only if you plan + to convert these source captions + to EBU-TT-D or TTML in an output. + (Make sure to leave the default + if you don’t have either of these + formats in the output.) You can + define a display rectangle for the + captions that is smaller than the + underlying video frame. You define + the rectangle by specifying the + position of the left edge, top edge, + bottom edge, and right edge of the + rectangle, all within the underlying + video frame. The units for the measurements + are percentages. If you specify + a value for one of these fields, + you must specify a value for all + of them. For leftOffset, specify + the position of the left edge of + the rectangle, as a percentage of + the underlying frame width, and + relative to the left edge of the + frame. For example, "10" means the + measurement is 10% of the underlying + frame width. The rectangle left + edge starts at that position from + the left edge of the frame. This + field corresponds to tts:origin + - X in the TTML standard. + type: number + topOffset: + description: See the description in + left_offset. For top_offset, specify + the position of the top edge of + the rectangle, as a percentage of + the underlying frame height, and + relative to the top edge of the + frame. For example, "10" means the + measurement is 10% of the underlying + frame height. The rectangle top + edge starts at that position from + the top edge of the frame. This + field corresponds to tts:origin + - Y in the TTML standard. + type: number + width: + description: See the description in + left_offset. For width, specify + the entire width of the rectangle + as a percentage of the underlying + frame width. For example, "80" means + the rectangle width is 80% of the + underlying frame width. The left_offset + and rectangle_width must add up + to 100% or less. This field corresponds + to tts:extent - X in the TTML standard. + type: number + type: object + pageNumber: + description: Specifies the teletext page + number within the data stream from which + to extract captions. Range of 0x100 + (256) to 0x8FF (2303). Unused for passthrough. + Should be specified as a hexadecimal + string with no “0x” prefix. + type: string + type: object + type: object + type: object + type: array + deblockFilter: + description: Enable or disable the deblock filter when + filtering. + type: string + denoiseFilter: + description: Enable or disable the denoise filter when + filtering. + type: string + filterStrength: + description: Adjusts the magnitude of filtering from + 1 (minimal) to 5 (strongest). + type: number + inputFilter: + description: Turns on the filter for the input. + type: string + networkInputSettings: + description: Input settings. See Network Input Settings + for more details. + properties: + hlsInputSettings: + description: Specifies HLS input settings when the + uri is for a HLS manifest. See HLS Input Settings + for more details. + properties: + bandwidth: + description: The bitrate is specified in bits + per second, as in an HLS manifest. + type: number + bufferSegments: + description: Buffer segments. + type: number + retries: + description: The number of consecutive times + that attempts to read a manifest or segment + must fail before the input is considered unavailable. + type: number + retryInterval: + description: The number of seconds between retries + when an attempt to read a manifest or segment + fails. + type: number + scte35Source: + description: The source for the timecode that + will be associated with the events outputs. + type: string + type: object + serverValidation: + description: Check HTTPS server certificates. + type: string + type: object + scte35Pid: + description: PID from which to read SCTE-35 messages. + type: number + smpte2038DataPreference: + description: Specifies whether to extract applicable + ancillary data from a SMPTE-2038 source in the input. + type: string + sourceEndBehavior: + description: Loop input if it is a file. + type: string + videoSelector: + properties: + colorSpace: + type: string + colorSpaceUsage: + type: string + type: object + type: object + type: object + type: array + inputSpecification: + description: Specification of network and file inputs for the + channel. + properties: + codec: + type: string + inputResolution: + description: '- Maximum CDI input resolution.' + type: string + maximumBitrate: + description: Average bitrate in bits/second. + type: string + type: object + logLevel: + description: The log level to write to Cloudwatch logs. + type: string + maintenance: + description: Maintenance settings for this channel. See Maintenance + for more details. + properties: + maintenanceDay: + description: The day of the week to use for maintenance. + type: string + maintenanceStartTime: + description: The hour maintenance will start. + type: string + type: object + name: + description: Name of the Channel. + type: string + roleArn: + description: Concise argument description. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + startChannel: + description: 'Whether to start/stop channel. Default: false' + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpc: + description: Settings for the VPC outputs. See VPC for more details. + properties: + publicAddressAllocationIds: + description: List of public address allocation ids to associate + with ENIs that will be created in Output VPC. Must specify + one for SINGLE_PIPELINE, two for STANDARD channels. + items: + type: string + type: array + securityGroupIds: + description: A list of up to 5 EC2 VPC security group IDs + to attach to the Output VPC network interfaces. If none + are specified then the VPC default security group will be + used. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of VPC subnet IDs from the same VPC. If + STANDARD channel, subnet IDs must be mapped to two unique + availability zones (AZ). + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.channelClass is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.channelClass) + || (has(self.initProvider) && has(self.initProvider.channelClass))' + - message: spec.forProvider.destinations is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.destinations) + || (has(self.initProvider) && has(self.initProvider.destinations))' + - message: spec.forProvider.encoderSettings is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.encoderSettings) + || (has(self.initProvider) && has(self.initProvider.encoderSettings))' + - message: spec.forProvider.inputAttachments is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.inputAttachments) + || (has(self.initProvider) && has(self.initProvider.inputAttachments))' + - message: spec.forProvider.inputSpecification is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.inputSpecification) + || (has(self.initProvider) && has(self.initProvider.inputSpecification))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ChannelStatus defines the observed state of Channel. + properties: + atProvider: + properties: + arn: + description: ARN of the Channel. + type: string + cdiInputSpecification: + description: Specification of CDI inputs for this channel. See + CDI Input Specification for more details. + properties: + resolution: + description: '- Maximum CDI input resolution.' + type: string + type: object + channelClass: + description: Concise argument description. + type: string + channelId: + description: ID of the channel in MediaPackage that is the destination + for this output group. + type: string + destinations: + description: Destinations for channel. See Destinations for more + details. + items: + properties: + id: + description: User-specified id. Ths is used in an output + group or an output. + type: string + mediaPackageSettings: + description: Destination settings for a MediaPackage output; + one destination for both encoders. See Media Package Settings + for more details. + items: + properties: + channelId: + description: ID of the channel in MediaPackage that + is the destination for this output group. + type: string + type: object + type: array + multiplexSettings: + description: Destination settings for a Multiplex output; + one destination for both encoders. See Multiplex Settings + for more details. + properties: + multiplexId: + description: The ID of the Multiplex that the encoder + is providing output to. + type: string + programName: + description: The program name of the Multiplex program + that the encoder is providing output to. + type: string + type: object + settings: + description: Destination settings for a standard output; + one destination for each redundant encoder. See Settings + for more details. + items: + properties: + passwordParam: + description: Key used to extract the password from + EC2 Parameter store. + type: string + streamName: + description: Stream name RTMP destinations (URLs of + type rtmp://) + type: string + url: + description: A URL specifying a destination. + type: string + username: + description: Username for destination. + type: string + type: object + type: array + type: object + type: array + encoderSettings: + description: Encoder settings. See Encoder Settings for more details. + properties: + audioDescriptions: + description: Audio descriptions for the channel. See Audio + Descriptions for more details. + items: + properties: + audioNormalizationSettings: + description: Advanced audio normalization settings. + See Audio Normalization Settings for more details. + properties: + algorithm: + description: Audio normalization algorithm to use. + itu17701 conforms to the CALM Act specification, + itu17702 to the EBU R-128 specification. + type: string + algorithmControl: + description: Algorithm control for the audio description. + type: string + targetLkfs: + description: Target LKFS (loudness) to adjust volume + to. + type: number + type: object + audioSelectorName: + description: The name of the audio selector in the input + that MediaLive should monitor to detect silence. Select + your most important rendition. If you didn't create + an audio selector in this input, leave blank. + type: string + audioType: + description: Applies only if audioTypeControl is useConfigured. + The values for audioType are defined in ISO-IEC 13818-1. + type: string + audioTypeControl: + description: Determined how audio type is determined. + type: string + audioWatermarkSettings: + description: Settings to configure one or more solutions + that insert audio watermarks in the audio encode. + See Audio Watermark Settings for more details. + properties: + nielsenWatermarksSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + nielsenCbetSettings: + description: Used to insert watermarks of type + Nielsen CBET. See Nielsen CBET Settings for + more details. + properties: + cbetCheckDigitString: + type: string + cbetStepaside: + description: Determines the method of CBET + insertion mode when prior encoding is + detected on the same layer. + type: string + csid: + description: CBET source ID to use in the + watermark. + type: string + type: object + nielsenDistributionType: + description: Distribution types to assign to + the watermarks. Options are PROGRAM_CONTENT + and FINAL_DISTRIBUTOR. + type: string + nielsenNaesIiNwSettings: + description: Used to insert watermarks of type + Nielsen NAES, II (N2) and Nielsen NAES VI + (NW). See Nielsen NAES II NW Settings for + more details. + items: + properties: + checkDigitString: + type: string + sid: + description: The Nielsen Source ID to + include in the watermark. + type: number + type: object + type: array + type: object + type: object + codecSettings: + description: Audio codec settings. See Audio Codec Settings + for more details. + properties: + aacSettings: + description: Aac Settings. See AAC Settings for + more details. + properties: + bitrate: + description: Average bitrate in bits/second. + type: number + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + inputType: + description: Set to "broadcasterMixedAd" when + input contains pre-mixed main audio + AD (narration) + as a stereo pair. + type: string + profile: + description: AAC profile. + type: string + rateControlMode: + description: The rate control mode. + type: string + rawFormat: + description: Sets LATM/LOAS AAC output for raw + containers. + type: string + sampleRate: + description: Sample rate in Hz. + type: number + spec: + description: Use MPEG-2 AAC audio instead of + MPEG-4 AAC audio for raw or MPEG-2 Transport + Stream containers. + type: string + vbrQuality: + description: VBR Quality Level - Only used if + rateControlMode is VBR. + type: string + type: object + ac3Settings: + description: Ac3 Settings. See AC3 Settings for + more details. + properties: + bitrate: + description: Average bitrate in bits/second. + type: number + bitstreamMode: + description: Specifies the bitstream mode (bsmod) + for the emitted AC-3 stream. + type: string + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + dialnorm: + description: Sets the dialnorm of the output. + type: number + drcProfile: + description: If set to filmStandard, adds dynamic + range compression signaling to the output + bitstream as defined in the Dolby Digital + specification. + type: string + lfeFilter: + description: When set to enabled, applies a + 120Hz lowpass filter to the LFE channel prior + to encoding. + type: string + metadataControl: + description: Metadata control. + type: string + type: object + eac3AtmosSettings: + description: '- Eac3 Atmos Settings. See EAC3 Atmos + Settings' + properties: + bitrate: + description: Average bitrate in bits/second. + type: number + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + dialnorm: + description: Sets the dialnorm of the output. + type: number + drcLine: + description: Sets the Dolby dynamic range compression + profile. + type: string + drcRf: + description: Sets the profile for heavy Dolby + dynamic range compression. + type: string + heightTrim: + description: Height dimensional trim. + type: number + surroundTrim: + description: Surround dimensional trim. + type: number + type: object + eac3Settings: + description: '- Eac3 Settings. See EAC3 Settings' + properties: + attenuationControl: + description: Sets the attenuation control. + type: string + bitrate: + description: Average bitrate in bits/second. + type: number + bitstreamMode: + description: Specifies the bitstream mode (bsmod) + for the emitted AC-3 stream. + type: string + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + dcFilter: + type: string + dialnorm: + description: Sets the dialnorm of the output. + type: number + drcLine: + description: Sets the Dolby dynamic range compression + profile. + type: string + drcRf: + description: Sets the profile for heavy Dolby + dynamic range compression. + type: string + lfeControl: + type: string + lfeFilter: + description: When set to enabled, applies a + 120Hz lowpass filter to the LFE channel prior + to encoding. + type: string + loRoCenterMixLevel: + description: H264 level. + type: number + loRoSurroundMixLevel: + description: H264 level. + type: number + ltRtCenterMixLevel: + description: H264 level. + type: number + ltRtSurroundMixLevel: + description: H264 level. + type: number + metadataControl: + description: Metadata control. + type: string + passthroughControl: + type: string + phaseControl: + type: string + stereoDownmix: + type: string + surroundExMode: + type: string + surroundMode: + type: string + type: object + mp2Settings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + bitrate: + description: Average bitrate in bits/second. + type: number + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + sampleRate: + description: Sample rate in Hz. + type: number + type: object + passThroughSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + type: object + wavSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + bitDepth: + type: number + codingMode: + description: Mono, Stereo, or 5.1 channel layout. + type: string + sampleRate: + description: Sample rate in Hz. + type: number + type: object + type: object + languageCode: + description: Selects a specific three-letter language + code from within an audio source. + type: string + languageCodeControl: + type: string + name: + description: Name of the Channel. + type: string + remixSettings: + description: Destination settings for a standard output; + one destination for each redundant encoder. See Settings + for more details. + properties: + channelMappings: + items: + properties: + inputChannelLevels: + items: + properties: + gain: + type: number + inputChannel: + type: number + type: object + type: array + outputChannel: + type: number + type: object + type: array + channelsIn: + type: number + channelsOut: + type: number + type: object + streamName: + description: Stream name RTMP destinations (URLs of + type rtmp://) + type: string + type: object + type: array + availBlanking: + description: Settings for ad avail blanking. See Avail Blanking + for more details. + properties: + availBlankingImage: + description: Blanking image to be used. See Avail Blanking + Image for more details. + properties: + passwordParam: + description: Key used to extract the password from + EC2 Parameter store. + type: string + uri: + description: – Path to a file accessible to the live + stream. + type: string + username: + description: Username for destination. + type: string + type: object + state: + description: When set to enabled, causes video, audio + and captions to be blanked when insertion metadata is + added. + type: string + type: object + captionDescriptions: + description: Caption Descriptions. See Caption Descriptions + for more details. + items: + properties: + accessibility: + description: Indicates whether the caption track implements + accessibility features such as written descriptions + of spoken dialog, music, and sounds. + type: string + captionSelectorName: + description: Specifies which input caption selector + to use as a caption source when generating output + captions. This field should match a captionSelector + name. + type: string + destinationSettings: + description: Additional settings for captions destination + that depend on the destination type. See Destination + Settings for more details. + properties: + aribDestinationSettings: + description: ARIB Destination Settings. + type: object + burnInDestinationSettings: + description: Burn In Destination Settings. See Burn + In Destination Settings for more details. + properties: + alignment: + description: justify live subtitles and center-justify + pre-recorded subtitles. All burn-in and DVB-Sub + font settings must match. + type: string + backgroundColor: + description: in and DVB-Sub font settings must + match. + type: string + backgroundOpacity: + description: in and DVB-Sub font settings must + match. + type: number + font: + description: in. File extension must be ‘ttf’ + or ‘tte’. Although the user can select output + fonts for many different types of input captions, + embedded, STL and teletext sources use a strict + grid system. Using external fonts with these + caption sources could cause unexpected display + of proportional fonts. All burn-in and DVB-Sub + font settings must match. See Font for more + details. + properties: + passwordParam: + description: Key used to extract the password + from EC2 Parameter store. + type: string + uri: + description: – Path to a file accessible + to the live stream. + type: string + username: + description: Username for destination. + type: string + type: object + fontColor: + description: in captions. This option is not + valid for source captions that are STL, 608/embedded + or teletext. These source settings are already + pre-defined by the caption stream. All burn-in + and DVB-Sub font settings must match. + type: string + fontOpacity: + description: in captions. 255 is opaque; 0 is + transparent. All burn-in and DVB-Sub font + settings must match. + type: number + fontResolution: + description: in and DVB-Sub font settings must + match. + type: number + fontSize: + description: in and DVB-Sub font settings must + match. + type: string + outlineColor: + description: defined by the caption stream. + All burn-in and DVB-Sub font settings must + match. + type: string + outlineSize: + description: defined by the caption stream. + All burn-in and DVB-Sub font settings must + match. + type: number + shadowColor: + description: in and DVB-Sub font settings must + match. + type: string + shadowOpacity: + description: in and DVB-Sub font settings must + match. + type: number + shadowXOffset: + description: 2 would result in a shadow offset + 2 pixels to the left. All burn-in and DVB-Sub + font settings must match. + type: number + shadowYOffset: + description: 2 would result in a shadow offset + 2 pixels above the text. All burn-in and DVB-Sub + font settings must match. + type: number + teletextGridControl: + description: Sub/Burn-in outputs. + type: string + xPosition: + description: in and DVB-Sub font settings must + match. + type: number + yPosition: + description: in and DVB-Sub font settings must + match. + type: number + type: object + dvbSubDestinationSettings: + description: DVB Sub Destination Settings. See DVB + Sub Destination Settings for more details. + properties: + alignment: + description: justify live subtitles and center-justify + pre-recorded subtitles. All burn-in and DVB-Sub + font settings must match. + type: string + backgroundColor: + description: in and DVB-Sub font settings must + match. + type: string + backgroundOpacity: + description: in and DVB-Sub font settings must + match. + type: number + font: + description: in. File extension must be ‘ttf’ + or ‘tte’. Although the user can select output + fonts for many different types of input captions, + embedded, STL and teletext sources use a strict + grid system. Using external fonts with these + caption sources could cause unexpected display + of proportional fonts. All burn-in and DVB-Sub + font settings must match. See Font for more + details. + properties: + passwordParam: + description: Key used to extract the password + from EC2 Parameter store. + type: string + uri: + description: – Path to a file accessible + to the live stream. + type: string + username: + description: Username for destination. + type: string + type: object + fontColor: + description: in captions. This option is not + valid for source captions that are STL, 608/embedded + or teletext. These source settings are already + pre-defined by the caption stream. All burn-in + and DVB-Sub font settings must match. + type: string + fontOpacity: + description: in captions. 255 is opaque; 0 is + transparent. All burn-in and DVB-Sub font + settings must match. + type: number + fontResolution: + description: in and DVB-Sub font settings must + match. + type: number + fontSize: + description: in and DVB-Sub font settings must + match. + type: string + outlineColor: + description: defined by the caption stream. + All burn-in and DVB-Sub font settings must + match. + type: string + outlineSize: + description: defined by the caption stream. + All burn-in and DVB-Sub font settings must + match. + type: number + shadowColor: + description: in and DVB-Sub font settings must + match. + type: string + shadowOpacity: + description: in and DVB-Sub font settings must + match. + type: number + shadowXOffset: + description: 2 would result in a shadow offset + 2 pixels to the left. All burn-in and DVB-Sub + font settings must match. + type: number + shadowYOffset: + description: 2 would result in a shadow offset + 2 pixels above the text. All burn-in and DVB-Sub + font settings must match. + type: number + teletextGridControl: + description: Sub/Burn-in outputs. + type: string + xPosition: + description: in and DVB-Sub font settings must + match. + type: number + yPosition: + description: in and DVB-Sub font settings must + match. + type: number + type: object + ebuTtDDestinationSettings: + description: EBU TT D Destination Settings. See + EBU TT D Destination Settings for more details. + properties: + copyrightHolder: + description: – Complete this field if you want + to include the name of the copyright holder + in the copyright tag in the captions metadata. + type: string + fillLineGap: + description: 'line captions). - enabled: Fill + with the captions background color (as specified + in the input captions). - disabled: Leave + the gap unfilled.' + type: string + fontFamily: + description: TT captions. Valid only if styleControl + is set to include. If you leave this field + empty, the font family is set to “monospaced”. + (If styleControl is set to exclude, the font + family is always set to “monospaced”.) You + specify only the font family. All other style + information (color, bold, position and so + on) is copied from the input captions. The + size is always set to 100% to allow the downstream + player to choose the size. - Enter a list + of font families, as a comma-separated list + of font names, in order of preference. The + name can be a font family (such as “Arial”), + or a generic font family (such as “serif”), + or “default” (to let the downstream player + choose the font). - Leave blank to set the + family to “monospace”. + type: string + styleControl: + description: 'TT captions. - include: Take the + style information (font color, font position, + and so on) from the source captions and include + that information in the font data attached + to the EBU-TT captions. This option is valid + only if the source captions are Embedded or + Teletext. - exclude: In the font data attached + to the EBU-TT captions, set the font family + to “monospaced”. Do not include any other + style information.' + type: string + type: object + embeddedDestinationSettings: + description: Embedded Destination Settings. + type: object + embeddedPlusScte20DestinationSettings: + description: Embedded Plus SCTE20 Destination Settings. + type: object + rtmpCaptionInfoDestinationSettings: + description: RTMP Caption Info Destination Settings. + type: object + scte20PlusEmbeddedDestinationSettings: + description: SCTE20 Plus Embedded Destination Settings. + type: object + scte27DestinationSettings: + description: – SCTE27 Destination Settings. + type: object + smpteTtDestinationSettings: + description: – SMPTE TT Destination Settings. + type: object + teletextDestinationSettings: + description: – Teletext Destination Settings. + type: object + ttmlDestinationSettings: + description: – TTML Destination Settings. See TTML + Destination Settings for more details. + properties: + styleControl: + description: 'TT captions. - include: Take the + style information (font color, font position, + and so on) from the source captions and include + that information in the font data attached + to the EBU-TT captions. This option is valid + only if the source captions are Embedded or + Teletext. - exclude: In the font data attached + to the EBU-TT captions, set the font family + to “monospaced”. Do not include any other + style information.' + type: string + type: object + webvttDestinationSettings: + description: WebVTT Destination Settings. See WebVTT + Destination Settings for more details. + properties: + styleControl: + description: 'TT captions. - include: Take the + style information (font color, font position, + and so on) from the source captions and include + that information in the font data attached + to the EBU-TT captions. This option is valid + only if the source captions are Embedded or + Teletext. - exclude: In the font data attached + to the EBU-TT captions, set the font family + to “monospaced”. Do not include any other + style information.' + type: string + type: object + type: object + languageCode: + description: Selects a specific three-letter language + code from within an audio source. + type: string + languageDescription: + description: Human readable information to indicate + captions available for players (eg. English, or Spanish). + type: string + name: + description: Name of the Channel. + type: string + type: object + type: array + globalConfiguration: + description: Configuration settings that apply to the event + as a whole. See Global Configuration for more details. + properties: + initialAudioGain: + description: – Value to set the initial audio gain for + the Live Event. + type: number + inputEndAction: + description: of-file). When switchAndLoopInputs is configured + the encoder will restart at the beginning of the first + input. When “none” is configured the encoder will transcode + either black, a solid color, or a user specified slate + images per the “Input Loss Behavior” configuration until + the next input switch occurs (which is controlled through + the Channel Schedule API). + type: string + inputLossBehavior: + description: Settings for system actions when input is + lost. See Input Loss Behavior for more details. + properties: + blackFrameMsec: + type: number + inputLossImageColor: + type: string + inputLossImageSlate: + properties: + passwordParam: + description: Key used to extract the password + from EC2 Parameter store. + type: string + uri: + description: – Path to a file accessible to the + live stream. + type: string + username: + description: Username for destination. + type: string + type: object + inputLossImageType: + type: string + repeatFrameMsec: + type: number + type: object + outputLockingMode: + description: MediaLive will attempt to synchronize the + output of each pipeline to the other. EPOCH_LOCKING + - MediaLive will attempt to synchronize the output of + each pipeline to the Unix epoch. + type: string + outputTimingSource: + description: – Indicates whether the rate of frames emitted + by the Live encoder should be paced by its system clock + (which optionally may be locked to another source via + NTP) or should be locked to the clock of the source + that is providing the input stream. + type: string + supportLowFramerateInputs: + description: – Adjusts video input buffer for streams + with very low video framerates. This is commonly set + to enabled for music channels with less than one video + frame per second. + type: string + type: object + motionGraphicsConfiguration: + description: Settings for motion graphics. See Motion Graphics + Configuration for more details. + properties: + motionGraphicsInsertion: + description: – Motion Graphics Insertion. + type: string + motionGraphicsSettings: + description: – Motion Graphics Settings. See Motion Graphics + Settings for more details. + properties: + htmlMotionGraphicsSettings: + description: – Html Motion Graphics Settings. + type: object + type: object + type: object + nielsenConfiguration: + description: Nielsen configuration settings. See Nielsen Configuration + for more details. + properties: + distributorId: + description: – Enter the Distributor ID assigned to your + organization by Nielsen. + type: string + nielsenPcmToId3Tagging: + description: – Enables Nielsen PCM to ID3 tagging. + type: string + type: object + outputGroups: + description: Output groups for the channel. See Output Groups + for more details. + items: + properties: + name: + description: Name of the Channel. + type: string + outputGroupSettings: + description: Settings associated with the output group. + See Output Group Settings for more details. + properties: + archiveGroupSettings: + description: Archive group settings. See Archive + Group Settings for more details. + items: + properties: + archiveCdnSettings: + description: Parameters that control the interactions + with the CDN. See Archive CDN Settings for + more details. + properties: + archiveS3Settings: + description: Archive S3 Settings. See + Archive S3 Settings for more details. + properties: + cannedAcl: + description: Specify the canned ACL + to apply to each S3 request. + type: string + type: object + type: object + destination: + description: A director and base filename + where archive files should be written. See + Destination for more details. + properties: + destinationRefId: + description: Reference ID for the destination. + type: string + type: object + rolloverInterval: + description: Number of seconds to write to + archive file before closing and starting + a new one. + type: number + type: object + type: array + frameCaptureGroupSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + destination: + description: A director and base filename where + archive files should be written. See Destination + for more details. + properties: + destinationRefId: + description: Reference ID for the destination. + type: string + type: object + frameCaptureCdnSettings: + description: Destination settings for a standard + output; one destination for each redundant + encoder. See Settings for more details. + properties: + frameCaptureS3Settings: + description: Destination settings for a + standard output; one destination for each + redundant encoder. See Settings for more + details. + properties: + cannedAcl: + description: Specify the canned ACL + to apply to each S3 request. + type: string + type: object + type: object + type: object + hlsGroupSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + adMarkers: + description: The ad marker type for this output + group. + items: + type: string + type: array + baseUrlContent: + type: string + baseUrlContent1: + type: string + baseUrlManifest: + type: string + baseUrlManifest1: + type: string + captionLanguageMappings: + items: + properties: + captionChannel: + type: number + languageCode: + description: Selects a specific three-letter + language code from within an audio source. + type: string + languageDescription: + description: Human readable information + to indicate captions available for players + (eg. English, or Spanish). + type: string + type: object + type: array + captionLanguageSetting: + type: string + clientCache: + type: string + codecSpecification: + type: string + constantIv: + type: string + destination: + description: A director and base filename where + archive files should be written. See Destination + for more details. + properties: + destinationRefId: + description: Reference ID for the destination. + type: string + type: object + directoryStructure: + type: string + discontinuityTags: + description: Key-value map of resource tags. + type: string + encryptionType: + type: string + hlsCdnSettings: + description: Destination settings for a standard + output; one destination for each redundant + encoder. See Settings for more details. + items: + properties: + hlsAkamaiSettings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + connectionRetryInterval: + description: Number of seconds to + wait before retrying connection + to the flash media server if the + connection is lost. + type: number + filecacheDuration: + type: number + httpTransferMode: + type: string + numRetries: + description: Number of retry attempts. + type: number + restartDelay: + description: Number of seconds to + wait until a restart is initiated. + type: number + salt: + type: string + token: + type: string + type: object + hlsBasicPutSettings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + connectionRetryInterval: + description: Number of seconds to + wait before retrying connection + to the flash media server if the + connection is lost. + type: number + filecacheDuration: + type: number + numRetries: + description: Number of retry attempts. + type: number + restartDelay: + description: Number of seconds to + wait until a restart is initiated. + type: number + type: object + hlsMediaStoreSettings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + connectionRetryInterval: + description: Number of seconds to + wait before retrying connection + to the flash media server if the + connection is lost. + type: number + filecacheDuration: + type: number + mediaStoreStorageClass: + type: string + numRetries: + description: Number of retry attempts. + type: number + restartDelay: + description: Number of seconds to + wait until a restart is initiated. + type: number + type: object + hlsS3Settings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + cannedAcl: + description: Specify the canned ACL + to apply to each S3 request. + type: string + type: object + hlsWebdavSettings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + connectionRetryInterval: + description: Number of seconds to + wait before retrying connection + to the flash media server if the + connection is lost. + type: number + filecacheDuration: + type: number + httpTransferMode: + type: string + numRetries: + description: Number of retry attempts. + type: number + restartDelay: + description: Number of seconds to + wait until a restart is initiated. + type: number + type: object + type: object + type: array + hlsId3SegmentTagging: + type: string + iframeOnlyPlaylists: + type: string + incompleteSegmentBehavior: + type: string + indexNSegments: + type: number + inputLossAction: + description: Controls the behavior of the RTMP + group if input becomes unavailable. + type: string + ivInManifest: + type: string + ivSource: + description: The source for the timecode that + will be associated with the events outputs. + type: string + keepSegments: + type: number + keyFormat: + type: string + keyFormatVersions: + type: string + keyProviderSettings: + description: Destination settings for a standard + output; one destination for each redundant + encoder. See Settings for more details. + properties: + staticKeySettings: + description: Destination settings for a + standard output; one destination for each + redundant encoder. See Settings for more + details. + items: + properties: + keyProviderServer: + properties: + passwordParam: + description: Key used to extract + the password from EC2 Parameter + store. + type: string + uri: + description: – Path to a file + accessible to the live stream. + type: string + username: + description: Username for destination. + type: string + type: object + staticKeyValue: + type: string + type: object + type: array + type: object + manifestCompression: + type: string + manifestDurationFormat: + type: string + minSegmentLength: + type: number + mode: + type: string + outputSelection: + type: string + programDateTime: + type: string + programDateTimeClock: + type: string + programDateTimePeriod: + type: number + redundantManifest: + type: string + segmentLength: + type: number + segmentsPerSubdirectory: + type: number + streamInfResolution: + description: '- Maximum CDI input resolution.' + type: string + timedMetadataId3Frame: + description: Indicates ID3 frame that has the + timecode. + type: string + timedMetadataId3Period: + type: number + timestampDeltaMilliseconds: + type: number + tsFileMode: + type: string + type: object + mediaPackageGroupSettings: + description: Media package group settings. See Media + Package Group Settings for more details. + properties: + destination: + description: A director and base filename where + archive files should be written. See Destination + for more details. + properties: + destinationRefId: + description: Reference ID for the destination. + type: string + type: object + type: object + msSmoothGroupSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + acquisitionPointId: + description: User-specified id. Ths is used + in an output group or an output. + type: string + audioOnlyTimecodeControl: + type: string + certificateMode: + description: Setting to allow self signed or + verified RTMP certificates. + type: string + connectionRetryInterval: + description: Number of seconds to wait before + retrying connection to the flash media server + if the connection is lost. + type: number + destination: + description: A director and base filename where + archive files should be written. See Destination + for more details. + properties: + destinationRefId: + description: Reference ID for the destination. + type: string + type: object + eventId: + description: User-specified id. Ths is used + in an output group or an output. + type: string + eventIdMode: + type: string + eventStopBehavior: + type: string + filecacheDuration: + type: number + fragmentLength: + type: number + inputLossAction: + description: Controls the behavior of the RTMP + group if input becomes unavailable. + type: string + numRetries: + description: Number of retry attempts. + type: number + restartDelay: + description: Number of seconds to wait until + a restart is initiated. + type: number + segmentationMode: + type: string + sendDelayMs: + type: number + sparseTrackType: + type: string + streamManifestBehavior: + type: string + timestampOffset: + type: string + timestampOffsetMode: + type: string + type: object + multiplexGroupSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + type: object + rtmpGroupSettings: + description: RTMP group settings. See RTMP Group + Settings for more details. + properties: + adMarkers: + description: The ad marker type for this output + group. + items: + type: string + type: array + authenticationScheme: + description: Authentication scheme to use when + connecting with CDN. + type: string + cacheFullBehavior: + description: Controls behavior when content + cache fills up. + type: string + cacheLength: + description: Cache length in seconds, is used + to calculate buffer size. + type: number + captionData: + description: Controls the types of data that + passes to onCaptionInfo outputs. + type: string + inputLossAction: + description: Controls the behavior of the RTMP + group if input becomes unavailable. + type: string + restartDelay: + description: Number of seconds to wait until + a restart is initiated. + type: number + type: object + udpGroupSettings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + inputLossAction: + description: Controls the behavior of the RTMP + group if input becomes unavailable. + type: string + timedMetadataId3Frame: + description: Indicates ID3 frame that has the + timecode. + type: string + timedMetadataId3Period: + type: number + type: object + type: object + outputs: + description: List of outputs. See Outputs for more details. + items: + properties: + audioDescriptionNames: + description: The names of the audio descriptions + used as audio sources for the output. + items: + type: string + type: array + x-kubernetes-list-type: set + captionDescriptionNames: + description: The names of the caption descriptions + used as caption sources for the output. + items: + type: string + type: array + x-kubernetes-list-type: set + outputName: + description: The name used to identify an output. + type: string + outputSettings: + description: Settings for output. See Output Settings + for more details. + properties: + archiveOutputSettings: + description: Archive output settings. See + Archive Output Settings for more details. + properties: + containerSettings: + description: Settings specific to the + container type of the file. See Container + Settings for more details. + properties: + m2tsSettings: + description: M2TS Settings. See M2TS + Settings for more details. + properties: + absentInputAudioBehavior: + type: string + arib: + type: string + aribCaptionsPid: + description: Selects a specific + PID from within a source. + type: string + aribCaptionsPidControl: + type: string + audioBufferModel: + type: string + audioFramesPerPes: + type: number + audioPids: + type: string + audioStreamType: + type: string + bitrate: + description: Average bitrate in + bits/second. + type: number + bufferModel: + type: string + ccDescriptor: + type: string + dvbNitSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + networkId: + description: User-specified + id. Ths is used in an output + group or an output. + type: number + networkName: + description: Name of the Channel. + type: string + repInterval: + type: number + type: object + dvbSdtSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + outputSdt: + type: string + repInterval: + type: number + serviceName: + description: Name of the Channel. + type: string + serviceProviderName: + description: Name of the Channel. + type: string + type: object + dvbSubPids: + type: string + dvbTdtSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + repInterval: + type: number + type: object + dvbTeletextPid: + description: Selects a specific + PID from within a source. + type: string + ebif: + type: string + ebpAudioInterval: + type: string + ebpLookaheadMs: + type: number + ebpPlacement: + type: string + ecmPid: + description: Selects a specific + PID from within a source. + type: string + esRateInPes: + type: string + etvPlatformPid: + description: Selects a specific + PID from within a source. + type: string + etvSignalPid: + description: Selects a specific + PID from within a source. + type: string + fragmentTime: + type: number + klv: + type: string + klvDataPids: + type: string + nielsenId3Behavior: + type: string + nullPacketBitrate: + description: Average bitrate in + bits/second. + type: number + patInterval: + type: number + pcrControl: + type: string + pcrPeriod: + type: number + pcrPid: + description: Selects a specific + PID from within a source. + type: string + pmtInterval: + type: number + pmtPid: + description: Selects a specific + PID from within a source. + type: string + programNum: + type: number + rateMode: + type: string + scte27Pids: + type: string + scte35Control: + type: string + scte35Pid: + description: PID from which to + read SCTE-35 messages. + type: string + segmentationMarkers: + type: string + segmentationStyle: + type: string + segmentationTime: + type: number + timedMetadataBehavior: + type: string + timedMetadataPid: + description: Selects a specific + PID from within a source. + type: string + transportStreamId: + description: User-specified id. + Ths is used in an output group + or an output. + type: number + videoPid: + description: Selects a specific + PID from within a source. + type: string + type: object + rawSettings: + description: Raw Settings. This can + be set as an empty block. + type: object + type: object + extension: + description: Output file extension. + type: string + nameModifier: + description: String concatenated to the + end of the destination filename. Required + for multiple outputs of the same type. + type: string + type: object + frameCaptureOutputSettings: + description: Settings for output. See Output + Settings for more details. + properties: + nameModifier: + description: String concatenated to the + end of the destination filename. Required + for multiple outputs of the same type. + type: string + type: object + hlsOutputSettings: + description: Settings for output. See Output + Settings for more details. + properties: + h265PackagingType: + type: string + hlsSettings: + description: Destination settings for + a standard output; one destination for + each redundant encoder. See Settings + for more details. + properties: + audioOnlyHlsSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. See + Settings for more details. + properties: + audioGroupId: + description: 'Specifies the GROUP-ID + in the #EXT-X-MEDIA tag of the + target HLS audio rendition.' + type: string + audioOnlyImage: + properties: + passwordParam: + description: Key used to extract + the password from EC2 Parameter + store. + type: string + uri: + description: – Path to a + file accessible to the live + stream. + type: string + username: + description: Username for + destination. + type: string + type: object + audioTrackType: + type: string + segmentType: + type: string + type: object + fmp4HlsSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. See + Settings for more details. + properties: + audioRenditionSets: + type: string + nielsenId3Behavior: + type: string + timedMetadataBehavior: + type: string + type: object + frameCaptureHlsSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. See + Settings for more details. + type: object + standardHlsSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. See + Settings for more details. + properties: + audioRenditionSets: + type: string + m3u8Settings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + audioFramesPerPes: + type: number + audioPids: + type: string + ecmPid: + description: Selects a specific + PID from within a source. + type: string + nielsenId3Behavior: + type: string + patInterval: + type: number + pcrControl: + type: string + pcrPeriod: + type: number + pcrPid: + description: Selects a specific + PID from within a source. + type: string + pmtInterval: + type: number + pmtPid: + description: Selects a specific + PID from within a source. + type: string + programNum: + type: number + scte35Behavior: + type: string + scte35Pid: + description: PID from which + to read SCTE-35 messages. + type: string + timedMetadataBehavior: + type: string + timedMetadataPid: + description: Selects a specific + PID from within a source. + type: string + transportStreamId: + description: User-specified + id. Ths is used in an output + group or an output. + type: number + videoPid: + description: Selects a specific + PID from within a source. + type: string + type: object + type: object + type: object + nameModifier: + description: String concatenated to the + end of the destination filename. Required + for multiple outputs of the same type. + type: string + segmentModifier: + type: string + type: object + mediaPackageOutputSettings: + description: Media package output settings. + This can be set as an empty block. + type: object + msSmoothOutputSettings: + description: Settings for output. See Output + Settings for more details. + properties: + h265PackagingType: + type: string + nameModifier: + description: String concatenated to the + end of the destination filename. Required + for multiple outputs of the same type. + type: string + type: object + multiplexOutputSettings: + description: Multiplex output settings. See + Multiplex Output Settings for more details. + properties: + destination: + description: A director and base filename + where archive files should be written. + See Destination for more details. + properties: + destinationRefId: + description: Reference ID for the + destination. + type: string + type: object + type: object + rtmpOutputSettings: + description: RTMP output settings. See RTMP + Output Settings for more details. + properties: + certificateMode: + description: Setting to allow self signed + or verified RTMP certificates. + type: string + connectionRetryInterval: + description: Number of seconds to wait + before retrying connection to the flash + media server if the connection is lost. + type: number + destination: + description: A director and base filename + where archive files should be written. + See Destination for more details. + properties: + destinationRefId: + description: Reference ID for the + destination. + type: string + type: object + numRetries: + description: Number of retry attempts. + type: number + type: object + udpOutputSettings: + description: UDP output settings. See UDP + Output Settings for more details. + properties: + bufferMsec: + description: UDP output buffering in milliseconds. + type: number + containerSettings: + description: Settings specific to the + container type of the file. See Container + Settings for more details. + properties: + m2tsSettings: + description: M2TS Settings. See M2TS + Settings for more details. + properties: + absentInputAudioBehavior: + type: string + arib: + type: string + aribCaptionsPid: + description: Selects a specific + PID from within a source. + type: string + aribCaptionsPidControl: + type: string + audioBufferModel: + type: string + audioFramesPerPes: + type: number + audioPids: + type: string + audioStreamType: + type: string + bitrate: + description: Average bitrate in + bits/second. + type: number + bufferModel: + type: string + ccDescriptor: + type: string + dvbNitSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + networkId: + description: User-specified + id. Ths is used in an output + group or an output. + type: number + networkName: + description: Name of the Channel. + type: string + repInterval: + type: number + type: object + dvbSdtSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + outputSdt: + type: string + repInterval: + type: number + serviceName: + description: Name of the Channel. + type: string + serviceProviderName: + description: Name of the Channel. + type: string + type: object + dvbSubPids: + type: string + dvbTdtSettings: + description: Destination settings + for a standard output; one destination + for each redundant encoder. + See Settings for more details. + properties: + repInterval: + type: number + type: object + dvbTeletextPid: + description: Selects a specific + PID from within a source. + type: string + ebif: + type: string + ebpAudioInterval: + type: string + ebpLookaheadMs: + type: number + ebpPlacement: + type: string + ecmPid: + description: Selects a specific + PID from within a source. + type: string + esRateInPes: + type: string + etvPlatformPid: + description: Selects a specific + PID from within a source. + type: string + etvSignalPid: + description: Selects a specific + PID from within a source. + type: string + fragmentTime: + type: number + klv: + type: string + klvDataPids: + type: string + nielsenId3Behavior: + type: string + nullPacketBitrate: + description: Average bitrate in + bits/second. + type: number + patInterval: + type: number + pcrControl: + type: string + pcrPeriod: + type: number + pcrPid: + description: Selects a specific + PID from within a source. + type: string + pmtInterval: + type: number + pmtPid: + description: Selects a specific + PID from within a source. + type: string + programNum: + type: number + rateMode: + type: string + scte27Pids: + type: string + scte35Control: + type: string + scte35Pid: + description: PID from which to + read SCTE-35 messages. + type: string + segmentationMarkers: + type: string + segmentationStyle: + type: string + segmentationTime: + type: number + timedMetadataBehavior: + type: string + timedMetadataPid: + description: Selects a specific + PID from within a source. + type: string + transportStreamId: + description: User-specified id. + Ths is used in an output group + or an output. + type: number + videoPid: + description: Selects a specific + PID from within a source. + type: string + type: object + type: object + destination: + description: A director and base filename + where archive files should be written. + See Destination for more details. + properties: + destinationRefId: + description: Reference ID for the + destination. + type: string + type: object + fecOutputSettings: + description: Settings for output. See + Output Settings for more details. + properties: + columnDepth: + description: The height of the FEC + protection matrix. + type: number + includeFec: + description: Enables column only or + column and row based FEC. + type: string + rowLength: + description: The width of the FEC + protection matrix. + type: number + type: object + type: object + type: object + videoDescriptionName: + description: The name of the video description + used as video source for the output. + type: string + type: object + type: array + type: object + type: array + timecodeConfig: + description: Contains settings used to acquire and adjust + timecode information from inputs. See Timecode Config for + more details. + properties: + source: + description: The source for the timecode that will be + associated with the events outputs. + type: string + syncThreshold: + description: Threshold in frames beyond which output timecode + is resynchronized to the input timecode. + type: number + type: object + videoDescriptions: + description: Video Descriptions. See Video Descriptions for + more details. + items: + properties: + codecSettings: + description: Audio codec settings. See Audio Codec Settings + for more details. + properties: + frameCaptureSettings: + description: Frame capture settings. See Frame Capture + Settings for more details. + properties: + captureInterval: + description: The frequency at which to capture + frames for inclusion in the output. + type: number + captureIntervalUnits: + description: Unit for the frame capture interval. + type: string + type: object + h264Settings: + description: H264 settings. See H264 Settings for + more details. + properties: + adaptiveQuantization: + description: Enables or disables adaptive quantization. + type: string + afdSignaling: + description: Indicates that AFD values will + be written into the output stream. + type: string + bitrate: + description: Average bitrate in bits/second. + type: number + bufFillPct: + type: number + bufSize: + description: Size of buffer in bits. + type: number + colorMetadata: + description: Includes color space metadata in + the output. + type: string + entropyEncoding: + description: Entropy encoding mode. + type: string + filterSettings: + description: Filters to apply to an encode. + See H264 Filter Settings for more details. + properties: + temporalFilterSettings: + description: Temporal filter settings. See + Temporal Filter Settings + properties: + postFilterSharpening: + description: Post filter sharpening. + type: string + strength: + description: Filter strength. + type: string + type: object + type: object + fixedAfd: + description: Four bit AFD value to write on + all frames of video in the output stream. + type: string + flickerAq: + type: string + forceFieldPictures: + description: Controls whether coding is performed + on a field basis or on a frame basis. + type: string + framerateControl: + description: Indicates how the output video + frame rate is specified. + type: string + framerateDenominator: + description: Framerate denominator. + type: number + framerateNumerator: + description: Framerate numerator. + type: number + gopBReference: + description: GOP-B reference. + type: string + gopClosedCadence: + description: Frequency of closed GOPs. + type: number + gopNumBFrames: + description: Number of B-frames between reference + frames. + type: number + gopSize: + description: GOP size in units of either frames + of seconds per gop_size_units. + type: number + gopSizeUnits: + description: Indicates if the gop_size is specified + in frames or seconds. + type: string + level: + description: H264 level. + type: string + lookAheadRateControl: + description: Amount of lookahead. + type: string + maxBitrate: + description: Set the maximum bitrate in order + to accommodate expected spikes in the complexity + of the video. + type: number + minIInterval: + type: number + numRefFrames: + description: Number of reference frames to use. + type: number + parControl: + description: Indicates how the output pixel + aspect ratio is specified. + type: string + parDenominator: + description: Pixel Aspect Ratio denominator. + type: number + parNumerator: + description: Pixel Aspect Ratio numerator. + type: number + profile: + description: AAC profile. + type: string + qualityLevel: + description: Quality level. + type: string + qvbrQualityLevel: + description: Controls the target quality for + the video encode. + type: number + rateControlMode: + description: The rate control mode. + type: string + scanType: + description: Sets the scan type of the output. + type: string + sceneChangeDetect: + description: Scene change detection. + type: string + slices: + description: Number of slices per picture. + type: number + softness: + description: Softness. + type: number + spatialAq: + description: Makes adjustments within each frame + based on spatial variation of content complexity. + type: string + subgopLength: + description: Subgop length. + type: string + syntax: + description: Produces a bitstream compliant + with SMPTE RP-2027. + type: string + temporalAq: + description: Makes adjustments within each frame + based on temporal variation of content complexity. + type: string + timecodeInsertion: + description: Determines how timecodes should + be inserted into the video elementary stream. + type: string + type: object + h265Settings: + description: Destination settings for a standard + output; one destination for each redundant encoder. + See Settings for more details. + properties: + adaptiveQuantization: + description: Enables or disables adaptive quantization. + type: string + afdSignaling: + description: Indicates that AFD values will + be written into the output stream. + type: string + alternativeTransferFunction: + description: Whether or not EML should insert + an Alternative Transfer Function SEI message. + type: string + bitrate: + description: Average bitrate in bits/second. + type: number + bufSize: + description: Size of buffer in bits. + type: number + colorMetadata: + description: Includes color space metadata in + the output. + type: string + colorSpaceSettings: + description: Define the color metadata for the + output. H265 Color Space Settings for more + details. + properties: + colorSpacePassthroughSettings: + description: Sets the colorspace metadata + to be passed through. + type: object + dolbyVision81Settings: + description: Set the colorspace to Dolby + Vision81. + type: object + hdr10Settings: + description: Set the colorspace to be HDR10. + See H265 HDR10 Settings for more details. + properties: + maxCll: + description: Sets the MaxCLL value for + HDR10. + type: number + maxFall: + description: Sets the MaxFALL value + for HDR10. + type: number + type: object + rec601Settings: + description: Set the colorspace to Rec. + 601. + type: object + rec709Settings: + description: Set the colorspace to Rec. + 709. + type: object + type: object + filterSettings: + description: Filters to apply to an encode. + See H264 Filter Settings for more details. + properties: + temporalFilterSettings: + description: Temporal filter settings. See + Temporal Filter Settings + properties: + postFilterSharpening: + description: Post filter sharpening. + type: string + strength: + description: Filter strength. + type: string + type: object + type: object + fixedAfd: + description: Four bit AFD value to write on + all frames of video in the output stream. + type: string + flickerAq: + type: string + framerateDenominator: + description: Framerate denominator. + type: number + framerateNumerator: + description: Framerate numerator. + type: number + gopClosedCadence: + description: Frequency of closed GOPs. + type: number + gopSize: + description: GOP size in units of either frames + of seconds per gop_size_units. + type: number + gopSizeUnits: + description: Indicates if the gop_size is specified + in frames or seconds. + type: string + level: + description: H264 level. + type: string + lookAheadRateControl: + description: Amount of lookahead. + type: string + maxBitrate: + description: Set the maximum bitrate in order + to accommodate expected spikes in the complexity + of the video. + type: number + minIInterval: + type: number + parDenominator: + description: Pixel Aspect Ratio denominator. + type: number + parNumerator: + description: Pixel Aspect Ratio numerator. + type: number + profile: + description: AAC profile. + type: string + qvbrQualityLevel: + description: Controls the target quality for + the video encode. + type: number + rateControlMode: + description: The rate control mode. + type: string + scanType: + description: Sets the scan type of the output. + type: string + sceneChangeDetect: + description: Scene change detection. + type: string + slices: + description: Number of slices per picture. + type: number + tier: + description: Set the H265 tier in the output. + type: string + timecodeBurninSettings: + description: Apply a burned in timecode. See + H265 Timecode Burnin Settings for more details. + properties: + prefix: + description: Set a prefix on the burned + in timecode. + type: string + timecodeBurninFontSize: + description: Sets the size of the burned + in timecode. + type: string + timecodeBurninPosition: + description: Sets the position of the burned + in timecode. + type: string + type: object + timecodeInsertion: + description: Determines how timecodes should + be inserted into the video elementary stream. + type: string + type: object + type: object + height: + description: See the description in left_offset. For + height, specify the entire height of the rectangle + as a percentage of the underlying frame height. For + example, "80" means the rectangle height is 80% of + the underlying frame height. The top_offset and rectangle_height + must add up to 100% or less. This field corresponds + to tts:extent - Y in the TTML standard. + type: number + name: + description: Name of the Channel. + type: string + respondToAfd: + description: Indicate how to respond to the AFD values + that might be in the input video. + type: string + scalingBehavior: + description: Behavior on how to scale. + type: string + sharpness: + description: Changes the strength of the anti-alias + filter used for scaling. + type: number + width: + description: See the description in left_offset. For + width, specify the entire width of the rectangle as + a percentage of the underlying frame width. For example, + "80" means the rectangle width is 80% of the underlying + frame width. The left_offset and rectangle_width must + add up to 100% or less. This field corresponds to + tts:extent - X in the TTML standard. + type: number + type: object + type: array + type: object + id: + description: User-specified id. Ths is used in an output group + or an output. + type: string + inputAttachments: + description: Input attachments for the channel. See Input Attachments + for more details. + items: + properties: + automaticInputFailoverSettings: + description: User-specified settings for defining what the + conditions are for declaring the input unhealthy and failing + over to a different input. See Automatic Input Failover + Settings for more details. + properties: + errorClearTimeMsec: + description: This clear time defines the requirement + a recovered input must meet to be considered healthy. + The input must have no failover conditions for this + length of time. Enter a time in milliseconds. This + value is particularly important if the input_preference + for the failover pair is set to PRIMARY_INPUT_PREFERRED, + because after this time, MediaLive will switch back + to the primary input. + type: number + failoverCondition: + description: A list of failover conditions. If any of + these conditions occur, MediaLive will perform a failover + to the other input. See Failover Condition Block for + more details. + items: + properties: + failoverConditionSettings: + description: Failover condition type-specific + settings. See Failover Condition Settings for + more details. + properties: + audioSilenceSettings: + description: MediaLive will perform a failover + if the specified audio selector is silent + for the specified period. See Audio Silence + Failover Settings for more details. + properties: + audioSelectorName: + description: The name of the audio selector + in the input that MediaLive should monitor + to detect silence. Select your most + important rendition. If you didn't create + an audio selector in this input, leave + blank. + type: string + audioSilenceThresholdMsec: + description: The amount of time (in milliseconds) + that the active input must be silent + before automatic input failover occurs. + Silence is defined as audio loss or + audio quieter than -50 dBFS. + type: number + type: object + inputLossSettings: + description: MediaLive will perform a failover + if content is not detected in this input + for the specified period. See Input Loss + Failover Settings for more details. + properties: + inputLossThresholdMsec: + description: The amount of time (in milliseconds) + that no input is detected. After that + time, an input failover will occur. + type: number + type: object + videoBlackSettings: + description: MediaLive will perform a failover + if content is considered black for the specified + period. See Video Black Failover Settings + for more details. + properties: + blackDetectThreshold: + description: 'A value used in calculating + the threshold below which MediaLive + considers a pixel to be ''black''. For + the input to be considered black, every + pixel in a frame must be below this + threshold. The threshold is calculated + as a percentage (expressed as a decimal) + of white. Therefore .1 means 10% white + (or 90% black). Note how the formula + works for any color depth. For example, + if you set this field to 0.1 in 10-bit + color depth: (10230.1=102.3), which + means a pixel value of 102 or less is + ''black''. If you set this field to + .1 in an 8-bit color depth: (2550.1=25.5), + which means a pixel value of 25 or less + is ''black''. The range is 0.0 to 1.0, + with any number of decimal places.' + type: number + videoBlackThresholdMsec: + description: The amount of time (in milliseconds) + that the active input must be black + before automatic input failover occurs. + type: number + type: object + type: object + type: object + type: array + inputPreference: + description: Input preference when deciding which input + to make active when a previously failed input has + recovered. + type: string + secondaryInputId: + description: The input ID of the secondary input in + the automatic input failover pair. + type: string + type: object + inputAttachmentName: + description: User-specified name for the attachment. + type: string + inputId: + description: The ID of the input. + type: string + inputSettings: + description: Settings of an input. See Input Settings for + more details. + properties: + audioSelector: + items: + properties: + name: + description: Name of the Channel. + type: string + selectorSettings: + description: The audio selector settings. See + Audio Selector Settings for more details. + properties: + audioHlsRenditionSelection: + description: Audio HLS Rendition Selection. + See Audio HLS Rendition Selection for more + details. + properties: + groupId: + description: 'Specifies the GROUP-ID in + the #EXT-X-MEDIA tag of the target HLS + audio rendition.' + type: string + name: + description: Name of the Channel. + type: string + type: object + audioLanguageSelection: + description: Audio Language Selection. See + Audio Language Selection for more details. + properties: + languageCode: + description: Selects a specific three-letter + language code from within an audio source. + type: string + languageSelectionPolicy: + description: When set to “strict”, the + transport stream demux strictly identifies + audio streams by their language descriptor. + If a PMT update occurs such that an + audio stream matching the initially + selected language is no longer present + then mute will be encoded until the + language returns. If “loose”, then on + a PMT update the demux will choose another + audio stream in the program with the + same stream type if it can’t find one + with the same language. + type: string + type: object + audioPidSelection: + description: Audio Pid Selection. See Audio + PID Selection for more details. + properties: + pid: + description: Selects a specific PID from + within a source. + type: number + type: object + audioTrackSelection: + description: Audio Track Selection. See Audio + Track Selection for more details. + properties: + dolbyEDecode: + description: Configure decoding options + for Dolby E streams - these should be + Dolby E frames carried in PCM streams + tagged with SMPTE-337. See Dolby E Decode + for more details. + properties: + programSelection: + description: Applies only to Dolby + E. Enter the program ID (according + to the metadata in the audio) of + the Dolby E program to extract from + the specified track. One program + extracted per audio selector. To + select multiple programs, create + multiple selectors with the same + Track and different Program numbers. + “All channels” means to ignore the + program IDs and include all the + channels in this selector; useful + if metadata is known to be incorrect. + type: string + type: object + tracks: + description: Selects one or more unique + audio tracks from within a source. See + Audio Tracks for more details. + items: + properties: + track: + description: 1-based integer value + that maps to a specific audio + track. + type: number + type: object + type: array + type: object + type: object + type: object + type: array + captionSelector: + items: + properties: + languageCode: + description: Selects a specific three-letter language + code from within an audio source. + type: string + name: + description: Name of the Channel. + type: string + selectorSettings: + description: The audio selector settings. See + Audio Selector Settings for more details. + properties: + ancillarySourceSettings: + description: Ancillary Source Settings. See + Ancillary Source Settings for more details. + properties: + sourceAncillaryChannelNumber: + description: Specifies the number (1 to + 4) of the captions channel you want + to extract from the ancillary captions. + If you plan to convert the ancillary + captions to another format, complete + this field. If you plan to choose Embedded + as the captions destination in the output + (to pass through all the channels in + the ancillary captions), leave this + field blank because MediaLive ignores + the field. + type: number + type: object + aribSourceSettings: + description: ARIB Source Settings. + type: object + dvbSubSourceSettings: + description: DVB Sub Source Settings. See + DVB Sub Source Settings for more details. + properties: + ocrLanguage: + description: If you will configure a WebVTT + caption description that references + this caption selector, use this field + to provide the language to consider + when translating the image-based source + to text. + type: string + pid: + description: Selects a specific PID from + within a source. + type: number + type: object + embeddedSourceSettings: + description: Embedded Source Settings. See + Embedded Source Settings for more details. + properties: + convert608To708: + description: If upconvert, 608 data is + both passed through via the “608 compatibility + bytes” fields of the 708 wrapper as + well as translated into 708. 708 data + present in the source content will be + discarded. + type: string + scte20Detection: + description: Set to “auto” to handle streams + with intermittent and/or non-aligned + SCTE-20 and Embedded captions. + type: string + source608ChannelNumber: + description: Specifies the 608/708 channel + number within the video track from which + to extract captions. Unused for passthrough. + type: number + type: object + scte20SourceSettings: + description: SCTE20 Source Settings. See SCTE + 20 Source Settings for more details. + properties: + convert608To708: + description: If upconvert, 608 data is + both passed through via the “608 compatibility + bytes” fields of the 708 wrapper as + well as translated into 708. 708 data + present in the source content will be + discarded. + type: string + source608ChannelNumber: + description: Specifies the 608/708 channel + number within the video track from which + to extract captions. Unused for passthrough. + type: number + type: object + scte27SourceSettings: + description: SCTE27 Source Settings. See SCTE + 27 Source Settings for more details. + properties: + ocrLanguage: + description: If you will configure a WebVTT + caption description that references + this caption selector, use this field + to provide the language to consider + when translating the image-based source + to text. + type: string + pid: + description: Selects a specific PID from + within a source. + type: number + type: object + teletextSourceSettings: + description: Teletext Source Settings. See + Teletext Source Settings for more details. + properties: + outputRectangle: + description: Optionally defines a region + where TTML style captions will be displayed. + See Caption Rectangle for more details. + properties: + height: + description: See the description in + left_offset. For height, specify + the entire height of the rectangle + as a percentage of the underlying + frame height. For example, "80" + means the rectangle height is 80% + of the underlying frame height. + The top_offset and rectangle_height + must add up to 100% or less. This + field corresponds to tts:extent + - Y in the TTML standard. + type: number + leftOffset: + description: Applies only if you plan + to convert these source captions + to EBU-TT-D or TTML in an output. + (Make sure to leave the default + if you don’t have either of these + formats in the output.) You can + define a display rectangle for the + captions that is smaller than the + underlying video frame. You define + the rectangle by specifying the + position of the left edge, top edge, + bottom edge, and right edge of the + rectangle, all within the underlying + video frame. The units for the measurements + are percentages. If you specify + a value for one of these fields, + you must specify a value for all + of them. For leftOffset, specify + the position of the left edge of + the rectangle, as a percentage of + the underlying frame width, and + relative to the left edge of the + frame. For example, "10" means the + measurement is 10% of the underlying + frame width. The rectangle left + edge starts at that position from + the left edge of the frame. This + field corresponds to tts:origin + - X in the TTML standard. + type: number + topOffset: + description: See the description in + left_offset. For top_offset, specify + the position of the top edge of + the rectangle, as a percentage of + the underlying frame height, and + relative to the top edge of the + frame. For example, "10" means the + measurement is 10% of the underlying + frame height. The rectangle top + edge starts at that position from + the top edge of the frame. This + field corresponds to tts:origin + - Y in the TTML standard. + type: number + width: + description: See the description in + left_offset. For width, specify + the entire width of the rectangle + as a percentage of the underlying + frame width. For example, "80" means + the rectangle width is 80% of the + underlying frame width. The left_offset + and rectangle_width must add up + to 100% or less. This field corresponds + to tts:extent - X in the TTML standard. + type: number + type: object + pageNumber: + description: Specifies the teletext page + number within the data stream from which + to extract captions. Range of 0x100 + (256) to 0x8FF (2303). Unused for passthrough. + Should be specified as a hexadecimal + string with no “0x” prefix. + type: string + type: object + type: object + type: object + type: array + deblockFilter: + description: Enable or disable the deblock filter when + filtering. + type: string + denoiseFilter: + description: Enable or disable the denoise filter when + filtering. + type: string + filterStrength: + description: Adjusts the magnitude of filtering from + 1 (minimal) to 5 (strongest). + type: number + inputFilter: + description: Turns on the filter for the input. + type: string + networkInputSettings: + description: Input settings. See Network Input Settings + for more details. + properties: + hlsInputSettings: + description: Specifies HLS input settings when the + uri is for a HLS manifest. See HLS Input Settings + for more details. + properties: + bandwidth: + description: The bitrate is specified in bits + per second, as in an HLS manifest. + type: number + bufferSegments: + description: Buffer segments. + type: number + retries: + description: The number of consecutive times + that attempts to read a manifest or segment + must fail before the input is considered unavailable. + type: number + retryInterval: + description: The number of seconds between retries + when an attempt to read a manifest or segment + fails. + type: number + scte35Source: + description: The source for the timecode that + will be associated with the events outputs. + type: string + type: object + serverValidation: + description: Check HTTPS server certificates. + type: string + type: object + scte35Pid: + description: PID from which to read SCTE-35 messages. + type: number + smpte2038DataPreference: + description: Specifies whether to extract applicable + ancillary data from a SMPTE-2038 source in the input. + type: string + sourceEndBehavior: + description: Loop input if it is a file. + type: string + videoSelector: + properties: + colorSpace: + type: string + colorSpaceUsage: + type: string + type: object + type: object + type: object + type: array + inputSpecification: + description: Specification of network and file inputs for the + channel. + properties: + codec: + type: string + inputResolution: + description: '- Maximum CDI input resolution.' + type: string + maximumBitrate: + description: Average bitrate in bits/second. + type: string + type: object + logLevel: + description: The log level to write to Cloudwatch logs. + type: string + maintenance: + description: Maintenance settings for this channel. See Maintenance + for more details. + properties: + maintenanceDay: + description: The day of the week to use for maintenance. + type: string + maintenanceStartTime: + description: The hour maintenance will start. + type: string + type: object + name: + description: Name of the Channel. + type: string + roleArn: + description: Concise argument description. + type: string + startChannel: + description: 'Whether to start/stop channel. Default: false' + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + vpc: + description: Settings for the VPC outputs. See VPC for more details. + properties: + availabilityZones: + items: + type: string + type: array + x-kubernetes-list-type: set + networkInterfaceIds: + items: + type: string + type: array + x-kubernetes-list-type: set + publicAddressAllocationIds: + description: List of public address allocation ids to associate + with ENIs that will be created in Output VPC. Must specify + one for SINGLE_PIPELINE, two for STANDARD channels. + items: + type: string + type: array + securityGroupIds: + description: A list of up to 5 EC2 VPC security group IDs + to attach to the Output VPC network interfaces. If none + are specified then the VPC default security group will be + used. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of VPC subnet IDs from the same VPC. If + STANDARD channel, subnet IDs must be mapped to two unique + availability zones (AZ). + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/medialive.aws.upbound.io_inputs.yaml b/package/crds/medialive.aws.upbound.io_inputs.yaml index 09342f96dd..d48fcd5b75 100644 --- a/package/crds/medialive.aws.upbound.io_inputs.yaml +++ b/package/crds/medialive.aws.upbound.io_inputs.yaml @@ -776,3 +776,755 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Input is the Schema for the Inputs API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: InputSpec defines the desired state of Input + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + destinations: + description: Destination settings for PUSH type inputs. See Destinations + for more details. + items: + properties: + streamName: + description: A unique name for the location the RTMP stream + is being pushed to. + type: string + type: object + type: array + inputDevices: + description: Settings for the devices. See Input Devices for more + details. + items: + properties: + id: + description: The unique ID for the device. + type: string + type: object + type: array + inputSecurityGroups: + description: List of input security groups. + items: + type: string + type: array + mediaConnectFlows: + description: A list of the MediaConnect Flows. See Media Connect + Flows for more details. + items: + properties: + flowArn: + description: The ARN of the MediaConnect Flow + type: string + type: object + type: array + name: + description: Name of the input. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleArn: + description: The ARN of the role this input assumes during and + after creation. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sources: + description: The source URLs for a PULL-type input. See Sources + for more details. + items: + properties: + passwordParam: + description: The key used to extract the password from EC2 + Parameter store. + type: string + url: + description: The URL where the stream is pulled from. + type: string + username: + description: The username for the input source. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: The different types of inputs that AWS Elemental + MediaLive supports. + type: string + vpc: + description: Settings for a private VPC Input. See VPC for more + details. + properties: + securityGroupIds: + description: A list of up to 5 EC2 VPC security group IDs + to attach to the Input. + items: + type: string + type: array + subnetIds: + description: A list of 2 VPC subnet IDs from the same VPC. + items: + type: string + type: array + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + destinations: + description: Destination settings for PUSH type inputs. See Destinations + for more details. + items: + properties: + streamName: + description: A unique name for the location the RTMP stream + is being pushed to. + type: string + type: object + type: array + inputDevices: + description: Settings for the devices. See Input Devices for more + details. + items: + properties: + id: + description: The unique ID for the device. + type: string + type: object + type: array + inputSecurityGroups: + description: List of input security groups. + items: + type: string + type: array + mediaConnectFlows: + description: A list of the MediaConnect Flows. See Media Connect + Flows for more details. + items: + properties: + flowArn: + description: The ARN of the MediaConnect Flow + type: string + type: object + type: array + name: + description: Name of the input. + type: string + roleArn: + description: The ARN of the role this input assumes during and + after creation. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sources: + description: The source URLs for a PULL-type input. See Sources + for more details. + items: + properties: + passwordParam: + description: The key used to extract the password from EC2 + Parameter store. + type: string + url: + description: The URL where the stream is pulled from. + type: string + username: + description: The username for the input source. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: The different types of inputs that AWS Elemental + MediaLive supports. + type: string + vpc: + description: Settings for a private VPC Input. See VPC for more + details. + properties: + securityGroupIds: + description: A list of up to 5 EC2 VPC security group IDs + to attach to the Input. + items: + type: string + type: array + subnetIds: + description: A list of 2 VPC subnet IDs from the same VPC. + items: + type: string + type: array + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + status: + description: InputStatus defines the observed state of Input. + properties: + atProvider: + properties: + arn: + description: ARN of the Input. + type: string + attachedChannels: + description: Channels attached to Input. + items: + type: string + type: array + destinations: + description: Destination settings for PUSH type inputs. See Destinations + for more details. + items: + properties: + streamName: + description: A unique name for the location the RTMP stream + is being pushed to. + type: string + type: object + type: array + id: + description: The unique ID for the device. + type: string + inputClass: + description: The input class. + type: string + inputDevices: + description: Settings for the devices. See Input Devices for more + details. + items: + properties: + id: + description: The unique ID for the device. + type: string + type: object + type: array + inputPartnerIds: + description: A list of IDs for all Inputs which are partners of + this one. + items: + type: string + type: array + inputSecurityGroups: + description: List of input security groups. + items: + type: string + type: array + inputSourceType: + description: Source type of the input. + type: string + mediaConnectFlows: + description: A list of the MediaConnect Flows. See Media Connect + Flows for more details. + items: + properties: + flowArn: + description: The ARN of the MediaConnect Flow + type: string + type: object + type: array + name: + description: Name of the input. + type: string + roleArn: + description: The ARN of the role this input assumes during and + after creation. + type: string + sources: + description: The source URLs for a PULL-type input. See Sources + for more details. + items: + properties: + passwordParam: + description: The key used to extract the password from EC2 + Parameter store. + type: string + url: + description: The URL where the stream is pulled from. + type: string + username: + description: The username for the input source. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + type: + description: The different types of inputs that AWS Elemental + MediaLive supports. + type: string + vpc: + description: Settings for a private VPC Input. See VPC for more + details. + properties: + securityGroupIds: + description: A list of up to 5 EC2 VPC security group IDs + to attach to the Input. + items: + type: string + type: array + subnetIds: + description: A list of 2 VPC subnet IDs from the same VPC. + items: + type: string + type: array + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/medialive.aws.upbound.io_multiplices.yaml b/package/crds/medialive.aws.upbound.io_multiplices.yaml index acaf6bedaf..60cf7b5ed2 100644 --- a/package/crds/medialive.aws.upbound.io_multiplices.yaml +++ b/package/crds/medialive.aws.upbound.io_multiplices.yaml @@ -457,3 +457,436 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Multiplex is the Schema for the Multiplexs API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MultiplexSpec defines the desired state of Multiplex + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + availabilityZones: + description: A list of availability zones. You must specify exactly + two. + items: + type: string + type: array + multiplexSettings: + description: Multiplex settings. See Multiplex Settings for more + details. + properties: + maximumVideoBufferDelayMilliseconds: + description: Maximum video buffer delay. + type: number + transportStreamBitrate: + description: Transport stream bit rate. + type: number + transportStreamId: + description: Unique ID for each multiplex. + type: number + transportStreamReservedBitrate: + description: Transport stream reserved bit rate. + type: number + type: object + name: + description: name of Multiplex. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + startMultiplex: + description: Whether to start the Multiplex. Defaults to false. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + availabilityZones: + description: A list of availability zones. You must specify exactly + two. + items: + type: string + type: array + multiplexSettings: + description: Multiplex settings. See Multiplex Settings for more + details. + properties: + maximumVideoBufferDelayMilliseconds: + description: Maximum video buffer delay. + type: number + transportStreamBitrate: + description: Transport stream bit rate. + type: number + transportStreamId: + description: Unique ID for each multiplex. + type: number + transportStreamReservedBitrate: + description: Transport stream reserved bit rate. + type: number + type: object + name: + description: name of Multiplex. + type: string + startMultiplex: + description: Whether to start the Multiplex. Defaults to false. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.availabilityZones is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.availabilityZones) + || (has(self.initProvider) && has(self.initProvider.availabilityZones))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: MultiplexStatus defines the observed state of Multiplex. + properties: + atProvider: + properties: + arn: + description: ARN of the Multiplex. + type: string + availabilityZones: + description: A list of availability zones. You must specify exactly + two. + items: + type: string + type: array + id: + type: string + multiplexSettings: + description: Multiplex settings. See Multiplex Settings for more + details. + properties: + maximumVideoBufferDelayMilliseconds: + description: Maximum video buffer delay. + type: number + transportStreamBitrate: + description: Transport stream bit rate. + type: number + transportStreamId: + description: Unique ID for each multiplex. + type: number + transportStreamReservedBitrate: + description: Transport stream reserved bit rate. + type: number + type: object + name: + description: name of Multiplex. + type: string + startMultiplex: + description: Whether to start the Multiplex. Defaults to false. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/memorydb.aws.upbound.io_users.yaml b/package/crds/memorydb.aws.upbound.io_users.yaml index 82b4ca4c1f..3b3980f947 100644 --- a/package/crds/memorydb.aws.upbound.io_users.yaml +++ b/package/crds/memorydb.aws.upbound.io_users.yaml @@ -444,3 +444,423 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: User is the Schema for the Users API. Provides a MemoryDB User. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: UserSpec defines the desired state of User + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessString: + description: Access permissions string used for this user. + type: string + authenticationMode: + description: Denotes the user's authentication properties. Detailed + below. + properties: + passwordsSecretRef: + description: Set of passwords used for authentication if type + is set to password. You can create up to two passwords for + each user. + items: + description: A SecretKeySelector is a reference to a secret + key in an arbitrary namespace. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: array + type: + description: 'Specifies the authentication type. Valid values + are: password or iam.' + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessString: + description: Access permissions string used for this user. + type: string + authenticationMode: + description: Denotes the user's authentication properties. Detailed + below. + properties: + passwordsSecretRef: + items: + type: string + type: array + type: + description: 'Specifies the authentication type. Valid values + are: password or iam.' + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.accessString is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.accessString) + || (has(self.initProvider) && has(self.initProvider.accessString))' + - message: spec.forProvider.authenticationMode is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.authenticationMode) + || (has(self.initProvider) && has(self.initProvider.authenticationMode))' + status: + description: UserStatus defines the observed state of User. + properties: + atProvider: + properties: + accessString: + description: Access permissions string used for this user. + type: string + arn: + description: ARN of the user. + type: string + authenticationMode: + description: Denotes the user's authentication properties. Detailed + below. + properties: + passwordCount: + description: Number of passwords belonging to the user if + type is set to password. + type: number + type: + description: 'Specifies the authentication type. Valid values + are: password or iam.' + type: string + type: object + id: + description: Same as user_name. + type: string + minimumEngineVersion: + description: Minimum engine version supported for the user. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/mq.aws.upbound.io_brokers.yaml b/package/crds/mq.aws.upbound.io_brokers.yaml index 04ee8734c3..cb92eca96f 100644 --- a/package/crds/mq.aws.upbound.io_brokers.yaml +++ b/package/crds/mq.aws.upbound.io_brokers.yaml @@ -1728,3 +1728,1680 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Broker is the Schema for the Brokers API. Provides an MQ Broker + Resource + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BrokerSpec defines the desired state of Broker + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + applyImmediately: + description: Specifies whether any broker modifications are applied + immediately, or during the next maintenance window. Default + is false. + type: boolean + authenticationStrategy: + description: Authentication strategy used to secure the broker. + Valid values are simple and ldap. ldap is not supported for + engine_type RabbitMQ. + type: string + autoMinorVersionUpgrade: + description: Whether to automatically upgrade to new minor versions + of brokers as Amazon MQ makes releases available. + type: boolean + brokerName: + description: Name of the broker. + type: string + configuration: + description: Configuration block for broker configuration. Applies + to engine_type of ActiveMQ and RabbitMQ only. Detailed below. + properties: + id: + description: The Configuration ID. + type: string + idRef: + description: Reference to a Configuration in mq to populate + id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a Configuration in mq to populate + id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + revision: + description: Revision of the Configuration. + type: number + type: object + dataReplicationMode: + description: Defines whether this broker is a part of a data replication + pair. Valid values are CRDR and NONE. + type: string + dataReplicationPrimaryBrokerArn: + description: The Amazon Resource Name (ARN) of the primary broker + that is used to replicate data from in a data replication pair, + and is applied to the replica broker. Must be set when data_replication_mode + is CRDR. + type: string + dataReplicationPrimaryBrokerArnRef: + description: Reference to a Broker in mq to populate dataReplicationPrimaryBrokerArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataReplicationPrimaryBrokerArnSelector: + description: Selector for a Broker in mq to populate dataReplicationPrimaryBrokerArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + deploymentMode: + description: Deployment mode of the broker. Valid values are SINGLE_INSTANCE, + ACTIVE_STANDBY_MULTI_AZ, and CLUSTER_MULTI_AZ. Default is SINGLE_INSTANCE. + type: string + encryptionOptions: + description: Configuration block containing encryption options. + Detailed below. + properties: + kmsKeyId: + description: Amazon Resource Name (ARN) of Key Management + Service (KMS) Customer Master Key (CMK) to use for encryption + at rest. Requires setting use_aws_owned_key to false. To + perform drift detection when AWS-managed CMKs or customer-managed + CMKs are in use, this value must be configured. + type: string + useAwsOwnedKey: + description: Whether to enable an AWS-owned KMS CMK that is + not in your account. Defaults to true. Setting to false + without configuring kms_key_id will create an AWS-managed + CMK aliased to aws/mq in your account. + type: boolean + type: object + engineType: + description: Type of broker engine. Valid values are ActiveMQ + and RabbitMQ. + type: string + engineVersion: + description: Version of the broker engine. See the AmazonMQ Broker + Engine docs for supported versions. For example, 5.17.6. + type: string + hostInstanceType: + description: Broker's instance type. For example, mq.t3.micro, + mq.m5.large. + type: string + ldapServerMetadata: + description: Configuration block for the LDAP server used to authenticate + and authorize connections to the broker. Not supported for engine_type + RabbitMQ. Detailed below. (Currently, AWS may not process changes + to LDAP server metadata.) + properties: + hosts: + description: List of a fully qualified domain name of the + LDAP server and an optional failover server. + items: + type: string + type: array + roleBase: + description: Fully qualified name of the directory to search + for a user’s groups. + type: string + roleName: + description: Specifies the LDAP attribute that identifies + the group name attribute in the object returned from the + group membership query. + type: string + roleSearchMatching: + description: Search criteria for groups. + type: string + roleSearchSubtree: + description: Whether the directory search scope is the entire + sub-tree. + type: boolean + serviceAccountPasswordSecretRef: + description: Service account password. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + serviceAccountUsername: + description: Service account username. + type: string + userBase: + description: Fully qualified name of the directory where you + want to search for users. + type: string + userRoleName: + description: Specifies the name of the LDAP attribute for + the user group membership. + type: string + userSearchMatching: + description: Search criteria for users. + type: string + userSearchSubtree: + description: Whether the directory search scope is the entire + sub-tree. + type: boolean + type: object + logs: + description: Configuration block for the logging configuration + of the broker. Detailed below. + properties: + audit: + description: Enables audit logging. Auditing is only possible + for engine_type of ActiveMQ. User management action made + using JMX or the ActiveMQ Web Console is logged. Defaults + to false. + type: string + general: + description: Enables general logging via CloudWatch. Defaults + to false. + type: boolean + type: object + maintenanceWindowStartTime: + description: Configuration block for the maintenance window start + time. Detailed below. + properties: + dayOfWeek: + description: Day of the week, e.g., MONDAY, TUESDAY, or WEDNESDAY. + type: string + timeOfDay: + description: Time, in 24-hour format, e.g., 02:00. + type: string + timeZone: + description: Time zone in either the Country/City format or + the UTC offset format, e.g., CET. + type: string + type: object + publiclyAccessible: + description: Whether to enable connections from applications outside + of the VPC that hosts the broker's subnets. + type: boolean + region: + description: Region is the region you'd like your resource to + be created in. + type: string + securityGroupRefs: + description: References to SecurityGroup in ec2 to populate securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroups: + description: List of security group IDs assigned to the broker. + items: + type: string + type: array + x-kubernetes-list-type: set + storageType: + description: Storage type of the broker. For engine_type ActiveMQ, + the valid values are efs and ebs, and the AWS-default is efs. + For engine_type RabbitMQ, only ebs is supported. When using + ebs, only the mq.m5 broker instance type family is supported. + type: string + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: List of subnet IDs in which to launch the broker. + A SINGLE_INSTANCE deployment requires one subnet. An ACTIVE_STANDBY_MULTI_AZ + deployment requires multiple subnets. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + user: + description: Configuration block for broker users. For engine_type + of RabbitMQ, Amazon MQ does not return broker users preventing + this resource from making user updates and drift detection. + Detailed below. + items: + properties: + consoleAccess: + description: Whether to enable access to the ActiveMQ Web + Console for the user. Applies to engine_type of ActiveMQ + only. + type: boolean + groups: + description: List of groups (20 maximum) to which the ActiveMQ + user belongs. Applies to engine_type of ActiveMQ only. + items: + type: string + type: array + x-kubernetes-list-type: set + passwordSecretRef: + description: Password of the user. It must be 12 to 250 + characters long, at least 4 unique characters, and must + not contain commas. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + replicationUser: + description: Whether to set set replication user. Defaults + to false. + type: boolean + username: + description: Username of the user. + type: string + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + applyImmediately: + description: Specifies whether any broker modifications are applied + immediately, or during the next maintenance window. Default + is false. + type: boolean + authenticationStrategy: + description: Authentication strategy used to secure the broker. + Valid values are simple and ldap. ldap is not supported for + engine_type RabbitMQ. + type: string + autoMinorVersionUpgrade: + description: Whether to automatically upgrade to new minor versions + of brokers as Amazon MQ makes releases available. + type: boolean + brokerName: + description: Name of the broker. + type: string + configuration: + description: Configuration block for broker configuration. Applies + to engine_type of ActiveMQ and RabbitMQ only. Detailed below. + properties: + id: + description: The Configuration ID. + type: string + idRef: + description: Reference to a Configuration in mq to populate + id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a Configuration in mq to populate + id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + revision: + description: Revision of the Configuration. + type: number + type: object + dataReplicationMode: + description: Defines whether this broker is a part of a data replication + pair. Valid values are CRDR and NONE. + type: string + dataReplicationPrimaryBrokerArn: + description: The Amazon Resource Name (ARN) of the primary broker + that is used to replicate data from in a data replication pair, + and is applied to the replica broker. Must be set when data_replication_mode + is CRDR. + type: string + dataReplicationPrimaryBrokerArnRef: + description: Reference to a Broker in mq to populate dataReplicationPrimaryBrokerArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataReplicationPrimaryBrokerArnSelector: + description: Selector for a Broker in mq to populate dataReplicationPrimaryBrokerArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + deploymentMode: + description: Deployment mode of the broker. Valid values are SINGLE_INSTANCE, + ACTIVE_STANDBY_MULTI_AZ, and CLUSTER_MULTI_AZ. Default is SINGLE_INSTANCE. + type: string + encryptionOptions: + description: Configuration block containing encryption options. + Detailed below. + properties: + kmsKeyId: + description: Amazon Resource Name (ARN) of Key Management + Service (KMS) Customer Master Key (CMK) to use for encryption + at rest. Requires setting use_aws_owned_key to false. To + perform drift detection when AWS-managed CMKs or customer-managed + CMKs are in use, this value must be configured. + type: string + useAwsOwnedKey: + description: Whether to enable an AWS-owned KMS CMK that is + not in your account. Defaults to true. Setting to false + without configuring kms_key_id will create an AWS-managed + CMK aliased to aws/mq in your account. + type: boolean + type: object + engineType: + description: Type of broker engine. Valid values are ActiveMQ + and RabbitMQ. + type: string + engineVersion: + description: Version of the broker engine. See the AmazonMQ Broker + Engine docs for supported versions. For example, 5.17.6. + type: string + hostInstanceType: + description: Broker's instance type. For example, mq.t3.micro, + mq.m5.large. + type: string + ldapServerMetadata: + description: Configuration block for the LDAP server used to authenticate + and authorize connections to the broker. Not supported for engine_type + RabbitMQ. Detailed below. (Currently, AWS may not process changes + to LDAP server metadata.) + properties: + hosts: + description: List of a fully qualified domain name of the + LDAP server and an optional failover server. + items: + type: string + type: array + roleBase: + description: Fully qualified name of the directory to search + for a user’s groups. + type: string + roleName: + description: Specifies the LDAP attribute that identifies + the group name attribute in the object returned from the + group membership query. + type: string + roleSearchMatching: + description: Search criteria for groups. + type: string + roleSearchSubtree: + description: Whether the directory search scope is the entire + sub-tree. + type: boolean + serviceAccountPasswordSecretRef: + description: Service account password. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + serviceAccountUsername: + description: Service account username. + type: string + userBase: + description: Fully qualified name of the directory where you + want to search for users. + type: string + userRoleName: + description: Specifies the name of the LDAP attribute for + the user group membership. + type: string + userSearchMatching: + description: Search criteria for users. + type: string + userSearchSubtree: + description: Whether the directory search scope is the entire + sub-tree. + type: boolean + type: object + logs: + description: Configuration block for the logging configuration + of the broker. Detailed below. + properties: + audit: + description: Enables audit logging. Auditing is only possible + for engine_type of ActiveMQ. User management action made + using JMX or the ActiveMQ Web Console is logged. Defaults + to false. + type: string + general: + description: Enables general logging via CloudWatch. Defaults + to false. + type: boolean + type: object + maintenanceWindowStartTime: + description: Configuration block for the maintenance window start + time. Detailed below. + properties: + dayOfWeek: + description: Day of the week, e.g., MONDAY, TUESDAY, or WEDNESDAY. + type: string + timeOfDay: + description: Time, in 24-hour format, e.g., 02:00. + type: string + timeZone: + description: Time zone in either the Country/City format or + the UTC offset format, e.g., CET. + type: string + type: object + publiclyAccessible: + description: Whether to enable connections from applications outside + of the VPC that hosts the broker's subnets. + type: boolean + securityGroupRefs: + description: References to SecurityGroup in ec2 to populate securityGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + securityGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityGroups: + description: List of security group IDs assigned to the broker. + items: + type: string + type: array + x-kubernetes-list-type: set + storageType: + description: Storage type of the broker. For engine_type ActiveMQ, + the valid values are efs and ebs, and the AWS-default is efs. + For engine_type RabbitMQ, only ebs is supported. When using + ebs, only the mq.m5 broker instance type family is supported. + type: string + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: List of subnet IDs in which to launch the broker. + A SINGLE_INSTANCE deployment requires one subnet. An ACTIVE_STANDBY_MULTI_AZ + deployment requires multiple subnets. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + user: + description: Configuration block for broker users. For engine_type + of RabbitMQ, Amazon MQ does not return broker users preventing + this resource from making user updates and drift detection. + Detailed below. + items: + properties: + consoleAccess: + description: Whether to enable access to the ActiveMQ Web + Console for the user. Applies to engine_type of ActiveMQ + only. + type: boolean + groups: + description: List of groups (20 maximum) to which the ActiveMQ + user belongs. Applies to engine_type of ActiveMQ only. + items: + type: string + type: array + x-kubernetes-list-type: set + passwordSecretRef: + description: Password of the user. It must be 12 to 250 + characters long, at least 4 unique characters, and must + not contain commas. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + replicationUser: + description: Whether to set set replication user. Defaults + to false. + type: boolean + username: + description: Username of the user. + type: string + required: + - passwordSecretRef + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.brokerName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.brokerName) + || (has(self.initProvider) && has(self.initProvider.brokerName))' + - message: spec.forProvider.engineType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.engineType) + || (has(self.initProvider) && has(self.initProvider.engineType))' + - message: spec.forProvider.engineVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.engineVersion) + || (has(self.initProvider) && has(self.initProvider.engineVersion))' + - message: spec.forProvider.hostInstanceType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.hostInstanceType) + || (has(self.initProvider) && has(self.initProvider.hostInstanceType))' + - message: spec.forProvider.user is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.user) + || (has(self.initProvider) && has(self.initProvider.user))' + status: + description: BrokerStatus defines the observed state of Broker. + properties: + atProvider: + properties: + applyImmediately: + description: Specifies whether any broker modifications are applied + immediately, or during the next maintenance window. Default + is false. + type: boolean + arn: + description: ARN of the broker. + type: string + authenticationStrategy: + description: Authentication strategy used to secure the broker. + Valid values are simple and ldap. ldap is not supported for + engine_type RabbitMQ. + type: string + autoMinorVersionUpgrade: + description: Whether to automatically upgrade to new minor versions + of brokers as Amazon MQ makes releases available. + type: boolean + brokerName: + description: Name of the broker. + type: string + configuration: + description: Configuration block for broker configuration. Applies + to engine_type of ActiveMQ and RabbitMQ only. Detailed below. + properties: + id: + description: The Configuration ID. + type: string + revision: + description: Revision of the Configuration. + type: number + type: object + dataReplicationMode: + description: Defines whether this broker is a part of a data replication + pair. Valid values are CRDR and NONE. + type: string + dataReplicationPrimaryBrokerArn: + description: The Amazon Resource Name (ARN) of the primary broker + that is used to replicate data from in a data replication pair, + and is applied to the replica broker. Must be set when data_replication_mode + is CRDR. + type: string + deploymentMode: + description: Deployment mode of the broker. Valid values are SINGLE_INSTANCE, + ACTIVE_STANDBY_MULTI_AZ, and CLUSTER_MULTI_AZ. Default is SINGLE_INSTANCE. + type: string + encryptionOptions: + description: Configuration block containing encryption options. + Detailed below. + properties: + kmsKeyId: + description: Amazon Resource Name (ARN) of Key Management + Service (KMS) Customer Master Key (CMK) to use for encryption + at rest. Requires setting use_aws_owned_key to false. To + perform drift detection when AWS-managed CMKs or customer-managed + CMKs are in use, this value must be configured. + type: string + useAwsOwnedKey: + description: Whether to enable an AWS-owned KMS CMK that is + not in your account. Defaults to true. Setting to false + without configuring kms_key_id will create an AWS-managed + CMK aliased to aws/mq in your account. + type: boolean + type: object + engineType: + description: Type of broker engine. Valid values are ActiveMQ + and RabbitMQ. + type: string + engineVersion: + description: Version of the broker engine. See the AmazonMQ Broker + Engine docs for supported versions. For example, 5.17.6. + type: string + hostInstanceType: + description: Broker's instance type. For example, mq.t3.micro, + mq.m5.large. + type: string + id: + description: Unique ID that Amazon MQ generates for the broker. + type: string + instances: + description: List of information about allocated brokers (both + active & standby). + items: + properties: + consoleUrl: + description: The URL of the ActiveMQ Web Console or the + RabbitMQ Management UI depending on engine_type. + type: string + endpoints: + description: 'Broker''s wire-level protocol endpoints in + the following order & format referenceable e.g., as instances.0.endpoints.0 + (SSL):' + items: + type: string + type: array + ipAddress: + description: IP Address of the broker. + type: string + type: object + type: array + ldapServerMetadata: + description: Configuration block for the LDAP server used to authenticate + and authorize connections to the broker. Not supported for engine_type + RabbitMQ. Detailed below. (Currently, AWS may not process changes + to LDAP server metadata.) + properties: + hosts: + description: List of a fully qualified domain name of the + LDAP server and an optional failover server. + items: + type: string + type: array + roleBase: + description: Fully qualified name of the directory to search + for a user’s groups. + type: string + roleName: + description: Specifies the LDAP attribute that identifies + the group name attribute in the object returned from the + group membership query. + type: string + roleSearchMatching: + description: Search criteria for groups. + type: string + roleSearchSubtree: + description: Whether the directory search scope is the entire + sub-tree. + type: boolean + serviceAccountUsername: + description: Service account username. + type: string + userBase: + description: Fully qualified name of the directory where you + want to search for users. + type: string + userRoleName: + description: Specifies the name of the LDAP attribute for + the user group membership. + type: string + userSearchMatching: + description: Search criteria for users. + type: string + userSearchSubtree: + description: Whether the directory search scope is the entire + sub-tree. + type: boolean + type: object + logs: + description: Configuration block for the logging configuration + of the broker. Detailed below. + properties: + audit: + description: Enables audit logging. Auditing is only possible + for engine_type of ActiveMQ. User management action made + using JMX or the ActiveMQ Web Console is logged. Defaults + to false. + type: string + general: + description: Enables general logging via CloudWatch. Defaults + to false. + type: boolean + type: object + maintenanceWindowStartTime: + description: Configuration block for the maintenance window start + time. Detailed below. + properties: + dayOfWeek: + description: Day of the week, e.g., MONDAY, TUESDAY, or WEDNESDAY. + type: string + timeOfDay: + description: Time, in 24-hour format, e.g., 02:00. + type: string + timeZone: + description: Time zone in either the Country/City format or + the UTC offset format, e.g., CET. + type: string + type: object + pendingDataReplicationMode: + description: The data replication mode that will be applied after + reboot. + type: string + publiclyAccessible: + description: Whether to enable connections from applications outside + of the VPC that hosts the broker's subnets. + type: boolean + securityGroups: + description: List of security group IDs assigned to the broker. + items: + type: string + type: array + x-kubernetes-list-type: set + storageType: + description: Storage type of the broker. For engine_type ActiveMQ, + the valid values are efs and ebs, and the AWS-default is efs. + For engine_type RabbitMQ, only ebs is supported. When using + ebs, only the mq.m5 broker instance type family is supported. + type: string + subnetIds: + description: List of subnet IDs in which to launch the broker. + A SINGLE_INSTANCE deployment requires one subnet. An ACTIVE_STANDBY_MULTI_AZ + deployment requires multiple subnets. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + user: + description: Configuration block for broker users. For engine_type + of RabbitMQ, Amazon MQ does not return broker users preventing + this resource from making user updates and drift detection. + Detailed below. + items: + properties: + consoleAccess: + description: Whether to enable access to the ActiveMQ Web + Console for the user. Applies to engine_type of ActiveMQ + only. + type: boolean + groups: + description: List of groups (20 maximum) to which the ActiveMQ + user belongs. Applies to engine_type of ActiveMQ only. + items: + type: string + type: array + x-kubernetes-list-type: set + replicationUser: + description: Whether to set set replication user. Defaults + to false. + type: boolean + username: + description: Username of the user. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/neptune.aws.upbound.io_clusters.yaml b/package/crds/neptune.aws.upbound.io_clusters.yaml index 2c03292113..7f679f2ac7 100644 --- a/package/crds/neptune.aws.upbound.io_clusters.yaml +++ b/package/crds/neptune.aws.upbound.io_clusters.yaml @@ -1880,3 +1880,1859 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the Clusters API. Provides an Neptune + Cluster Resource + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterSpec defines the desired state of Cluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allowMajorVersionUpgrade: + description: Specifies whether upgrades between different major + versions are allowed. You must set it to true when providing + an engine_version parameter that uses a different major version + than the DB cluster's current version. Default is false. + type: boolean + applyImmediately: + description: Specifies whether any cluster modifications are applied + immediately, or during the next maintenance window. Default + is false. + type: boolean + availabilityZones: + description: A list of EC2 Availability Zones that instances in + the Neptune cluster can be created in. + items: + type: string + type: array + x-kubernetes-list-type: set + backupRetentionPeriod: + description: The days to retain backups for. Default 1 + type: number + copyTagsToSnapshot: + description: If set to true, tags are copied to any snapshot of + the DB cluster that is created. + type: boolean + deletionProtection: + description: A value that indicates whether the DB cluster has + deletion protection enabled.The database can't be deleted when + deletion protection is enabled. By default, deletion protection + is disabled. + type: boolean + enableCloudwatchLogsExports: + description: A list of the log types this DB cluster is configured + to export to Cloudwatch Logs. Currently only supports audit + and slowquery. + items: + type: string + type: array + x-kubernetes-list-type: set + engine: + description: The name of the database engine to be used for this + Neptune cluster. Defaults to neptune. + type: string + engineVersion: + description: The database engine version. + type: string + finalSnapshotIdentifier: + description: The name of your final Neptune snapshot when this + Neptune cluster is deleted. If omitted, no final snapshot will + be made. + type: string + globalClusterIdentifier: + description: The global cluster identifier specified on aws_neptune_global_cluster. + type: string + iamDatabaseAuthenticationEnabled: + description: Specifies whether or not mappings of AWS Identity + and Access Management (IAM) accounts to database accounts is + enabled. + type: boolean + iamRoleRefs: + description: References to Role in iam to populate iamRoles. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + iamRoleSelector: + description: Selector for a list of Role in iam to populate iamRoles. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + iamRoles: + description: A List of ARNs for the IAM roles to associate to + the Neptune Cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + kmsKeyArn: + description: The ARN for the KMS encryption key. When specifying + kms_key_arn, storage_encrypted needs to be set to true. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + neptuneClusterParameterGroupName: + description: A cluster parameter group to associate with the cluster. + type: string + neptuneClusterParameterGroupNameRef: + description: Reference to a ClusterParameterGroup in neptune to + populate neptuneClusterParameterGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + neptuneClusterParameterGroupNameSelector: + description: Selector for a ClusterParameterGroup in neptune to + populate neptuneClusterParameterGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + neptuneInstanceParameterGroupName: + description: The name of the DB parameter group to apply to all + instances of the DB cluster. + type: string + neptuneSubnetGroupName: + description: A Neptune subnet group to associate with this Neptune + instance. + type: string + neptuneSubnetGroupNameRef: + description: Reference to a SubnetGroup in neptune to populate + neptuneSubnetGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + neptuneSubnetGroupNameSelector: + description: Selector for a SubnetGroup in neptune to populate + neptuneSubnetGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + port: + description: The port on which the Neptune accepts connections. + Default is 8182. + type: number + preferredBackupWindow: + description: 'The daily time range during which automated backups + are created if automated backups are enabled using the BackupRetentionPeriod + parameter. Time in UTC. Default: A 30-minute window selected + at random from an 8-hour block of time per regionE.g., 04:00-09:00' + type: string + preferredMaintenanceWindow: + description: The weekly time range during which system maintenance + can occur, in (UTC) e.g., wed:04:00-wed:04:30 + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + replicationSourceIdentifier: + description: ARN of a source Neptune cluster or Neptune instance + if this Neptune cluster is to be created as a Read Replica. + type: string + replicationSourceIdentifierRef: + description: Reference to a Cluster in neptune to populate replicationSourceIdentifier. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + replicationSourceIdentifierSelector: + description: Selector for a Cluster in neptune to populate replicationSourceIdentifier. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serverlessV2ScalingConfiguration: + description: If set, create the Neptune cluster as a serverless + one. See Serverless for example block attributes. + properties: + maxCapacity: + description: ': (default: 128) The maximum Neptune Capacity + Units (NCUs) for this cluster. Must be lower or equal than + 128. See AWS Documentation for more details.' + type: number + minCapacity: + description: ': (default: 2.5) The minimum Neptune Capacity + Units (NCUs) for this cluster. Must be greater or equal + than 1. See AWS Documentation for more details.' + type: number + type: object + skipFinalSnapshot: + description: Determines whether a final Neptune snapshot is created + before the Neptune cluster is deleted. If true is specified, + no Neptune snapshot is created. If false is specified, a Neptune + snapshot is created before the Neptune cluster is deleted, using + the value from final_snapshot_identifier. Default is false. + type: boolean + snapshotIdentifier: + description: Specifies whether or not to create this cluster from + a snapshot. You can use either the name or ARN when specifying + a Neptune cluster snapshot, or the ARN when specifying a Neptune + snapshot. Automated snapshots should not be used for this attribute, + unless from a different cluster. Automated snapshots are deleted + as part of cluster destruction when the resource is replaced. + type: string + snapshotIdentifierRef: + description: Reference to a ClusterSnapshot in neptune to populate + snapshotIdentifier. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + snapshotIdentifierSelector: + description: Selector for a ClusterSnapshot in neptune to populate + snapshotIdentifier. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageEncrypted: + description: Specifies whether the Neptune cluster is encrypted. + The default is false if not specified. + type: boolean + storageType: + description: 'Storage type associated with the cluster standard/iopt1. + Default: standard' + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + vpcSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + vpcSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcSecurityGroupIds: + description: List of VPC security groups to associate with the + Cluster + items: + type: string + type: array + x-kubernetes-list-type: set + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allowMajorVersionUpgrade: + description: Specifies whether upgrades between different major + versions are allowed. You must set it to true when providing + an engine_version parameter that uses a different major version + than the DB cluster's current version. Default is false. + type: boolean + applyImmediately: + description: Specifies whether any cluster modifications are applied + immediately, or during the next maintenance window. Default + is false. + type: boolean + availabilityZones: + description: A list of EC2 Availability Zones that instances in + the Neptune cluster can be created in. + items: + type: string + type: array + x-kubernetes-list-type: set + backupRetentionPeriod: + description: The days to retain backups for. Default 1 + type: number + copyTagsToSnapshot: + description: If set to true, tags are copied to any snapshot of + the DB cluster that is created. + type: boolean + deletionProtection: + description: A value that indicates whether the DB cluster has + deletion protection enabled.The database can't be deleted when + deletion protection is enabled. By default, deletion protection + is disabled. + type: boolean + enableCloudwatchLogsExports: + description: A list of the log types this DB cluster is configured + to export to Cloudwatch Logs. Currently only supports audit + and slowquery. + items: + type: string + type: array + x-kubernetes-list-type: set + engine: + description: The name of the database engine to be used for this + Neptune cluster. Defaults to neptune. + type: string + engineVersion: + description: The database engine version. + type: string + finalSnapshotIdentifier: + description: The name of your final Neptune snapshot when this + Neptune cluster is deleted. If omitted, no final snapshot will + be made. + type: string + globalClusterIdentifier: + description: The global cluster identifier specified on aws_neptune_global_cluster. + type: string + iamDatabaseAuthenticationEnabled: + description: Specifies whether or not mappings of AWS Identity + and Access Management (IAM) accounts to database accounts is + enabled. + type: boolean + iamRoleRefs: + description: References to Role in iam to populate iamRoles. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + iamRoleSelector: + description: Selector for a list of Role in iam to populate iamRoles. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + iamRoles: + description: A List of ARNs for the IAM roles to associate to + the Neptune Cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + kmsKeyArn: + description: The ARN for the KMS encryption key. When specifying + kms_key_arn, storage_encrypted needs to be set to true. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + neptuneClusterParameterGroupName: + description: A cluster parameter group to associate with the cluster. + type: string + neptuneClusterParameterGroupNameRef: + description: Reference to a ClusterParameterGroup in neptune to + populate neptuneClusterParameterGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + neptuneClusterParameterGroupNameSelector: + description: Selector for a ClusterParameterGroup in neptune to + populate neptuneClusterParameterGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + neptuneInstanceParameterGroupName: + description: The name of the DB parameter group to apply to all + instances of the DB cluster. + type: string + neptuneSubnetGroupName: + description: A Neptune subnet group to associate with this Neptune + instance. + type: string + neptuneSubnetGroupNameRef: + description: Reference to a SubnetGroup in neptune to populate + neptuneSubnetGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + neptuneSubnetGroupNameSelector: + description: Selector for a SubnetGroup in neptune to populate + neptuneSubnetGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + port: + description: The port on which the Neptune accepts connections. + Default is 8182. + type: number + preferredBackupWindow: + description: 'The daily time range during which automated backups + are created if automated backups are enabled using the BackupRetentionPeriod + parameter. Time in UTC. Default: A 30-minute window selected + at random from an 8-hour block of time per regionE.g., 04:00-09:00' + type: string + preferredMaintenanceWindow: + description: The weekly time range during which system maintenance + can occur, in (UTC) e.g., wed:04:00-wed:04:30 + type: string + replicationSourceIdentifier: + description: ARN of a source Neptune cluster or Neptune instance + if this Neptune cluster is to be created as a Read Replica. + type: string + replicationSourceIdentifierRef: + description: Reference to a Cluster in neptune to populate replicationSourceIdentifier. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + replicationSourceIdentifierSelector: + description: Selector for a Cluster in neptune to populate replicationSourceIdentifier. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serverlessV2ScalingConfiguration: + description: If set, create the Neptune cluster as a serverless + one. See Serverless for example block attributes. + properties: + maxCapacity: + description: ': (default: 128) The maximum Neptune Capacity + Units (NCUs) for this cluster. Must be lower or equal than + 128. See AWS Documentation for more details.' + type: number + minCapacity: + description: ': (default: 2.5) The minimum Neptune Capacity + Units (NCUs) for this cluster. Must be greater or equal + than 1. See AWS Documentation for more details.' + type: number + type: object + skipFinalSnapshot: + description: Determines whether a final Neptune snapshot is created + before the Neptune cluster is deleted. If true is specified, + no Neptune snapshot is created. If false is specified, a Neptune + snapshot is created before the Neptune cluster is deleted, using + the value from final_snapshot_identifier. Default is false. + type: boolean + snapshotIdentifier: + description: Specifies whether or not to create this cluster from + a snapshot. You can use either the name or ARN when specifying + a Neptune cluster snapshot, or the ARN when specifying a Neptune + snapshot. Automated snapshots should not be used for this attribute, + unless from a different cluster. Automated snapshots are deleted + as part of cluster destruction when the resource is replaced. + type: string + snapshotIdentifierRef: + description: Reference to a ClusterSnapshot in neptune to populate + snapshotIdentifier. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + snapshotIdentifierSelector: + description: Selector for a ClusterSnapshot in neptune to populate + snapshotIdentifier. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageEncrypted: + description: Specifies whether the Neptune cluster is encrypted. + The default is false if not specified. + type: boolean + storageType: + description: 'Storage type associated with the cluster standard/iopt1. + Default: standard' + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + vpcSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + vpcSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcSecurityGroupIds: + description: List of VPC security groups to associate with the + Cluster + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ClusterStatus defines the observed state of Cluster. + properties: + atProvider: + properties: + allowMajorVersionUpgrade: + description: Specifies whether upgrades between different major + versions are allowed. You must set it to true when providing + an engine_version parameter that uses a different major version + than the DB cluster's current version. Default is false. + type: boolean + applyImmediately: + description: Specifies whether any cluster modifications are applied + immediately, or during the next maintenance window. Default + is false. + type: boolean + arn: + description: The Neptune Cluster Amazon Resource Name (ARN) + type: string + availabilityZones: + description: A list of EC2 Availability Zones that instances in + the Neptune cluster can be created in. + items: + type: string + type: array + x-kubernetes-list-type: set + backupRetentionPeriod: + description: The days to retain backups for. Default 1 + type: number + clusterMembers: + description: – List of Neptune Instances that are a part of this + cluster + items: + type: string + type: array + x-kubernetes-list-type: set + clusterResourceId: + description: The Neptune Cluster Resource ID + type: string + copyTagsToSnapshot: + description: If set to true, tags are copied to any snapshot of + the DB cluster that is created. + type: boolean + deletionProtection: + description: A value that indicates whether the DB cluster has + deletion protection enabled.The database can't be deleted when + deletion protection is enabled. By default, deletion protection + is disabled. + type: boolean + enableCloudwatchLogsExports: + description: A list of the log types this DB cluster is configured + to export to Cloudwatch Logs. Currently only supports audit + and slowquery. + items: + type: string + type: array + x-kubernetes-list-type: set + endpoint: + description: The DNS address of the Neptune instance + type: string + engine: + description: The name of the database engine to be used for this + Neptune cluster. Defaults to neptune. + type: string + engineVersion: + description: The database engine version. + type: string + finalSnapshotIdentifier: + description: The name of your final Neptune snapshot when this + Neptune cluster is deleted. If omitted, no final snapshot will + be made. + type: string + globalClusterIdentifier: + description: The global cluster identifier specified on aws_neptune_global_cluster. + type: string + hostedZoneId: + description: The Route53 Hosted Zone ID of the endpoint + type: string + iamDatabaseAuthenticationEnabled: + description: Specifies whether or not mappings of AWS Identity + and Access Management (IAM) accounts to database accounts is + enabled. + type: boolean + iamRoles: + description: A List of ARNs for the IAM roles to associate to + the Neptune Cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + description: The Neptune Cluster Identifier + type: string + kmsKeyArn: + description: The ARN for the KMS encryption key. When specifying + kms_key_arn, storage_encrypted needs to be set to true. + type: string + neptuneClusterParameterGroupName: + description: A cluster parameter group to associate with the cluster. + type: string + neptuneInstanceParameterGroupName: + description: The name of the DB parameter group to apply to all + instances of the DB cluster. + type: string + neptuneSubnetGroupName: + description: A Neptune subnet group to associate with this Neptune + instance. + type: string + port: + description: The port on which the Neptune accepts connections. + Default is 8182. + type: number + preferredBackupWindow: + description: 'The daily time range during which automated backups + are created if automated backups are enabled using the BackupRetentionPeriod + parameter. Time in UTC. Default: A 30-minute window selected + at random from an 8-hour block of time per regionE.g., 04:00-09:00' + type: string + preferredMaintenanceWindow: + description: The weekly time range during which system maintenance + can occur, in (UTC) e.g., wed:04:00-wed:04:30 + type: string + readerEndpoint: + description: A read-only endpoint for the Neptune cluster, automatically + load-balanced across replicas + type: string + replicationSourceIdentifier: + description: ARN of a source Neptune cluster or Neptune instance + if this Neptune cluster is to be created as a Read Replica. + type: string + serverlessV2ScalingConfiguration: + description: If set, create the Neptune cluster as a serverless + one. See Serverless for example block attributes. + properties: + maxCapacity: + description: ': (default: 128) The maximum Neptune Capacity + Units (NCUs) for this cluster. Must be lower or equal than + 128. See AWS Documentation for more details.' + type: number + minCapacity: + description: ': (default: 2.5) The minimum Neptune Capacity + Units (NCUs) for this cluster. Must be greater or equal + than 1. See AWS Documentation for more details.' + type: number + type: object + skipFinalSnapshot: + description: Determines whether a final Neptune snapshot is created + before the Neptune cluster is deleted. If true is specified, + no Neptune snapshot is created. If false is specified, a Neptune + snapshot is created before the Neptune cluster is deleted, using + the value from final_snapshot_identifier. Default is false. + type: boolean + snapshotIdentifier: + description: Specifies whether or not to create this cluster from + a snapshot. You can use either the name or ARN when specifying + a Neptune cluster snapshot, or the ARN when specifying a Neptune + snapshot. Automated snapshots should not be used for this attribute, + unless from a different cluster. Automated snapshots are deleted + as part of cluster destruction when the resource is replaced. + type: string + storageEncrypted: + description: Specifies whether the Neptune cluster is encrypted. + The default is false if not specified. + type: boolean + storageType: + description: 'Storage type associated with the cluster standard/iopt1. + Default: standard' + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + vpcSecurityGroupIds: + description: List of VPC security groups to associate with the + Cluster + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/networkfirewall.aws.upbound.io_firewallpolicies.yaml b/package/crds/networkfirewall.aws.upbound.io_firewallpolicies.yaml index 3e19e41364..5ca52502a4 100644 --- a/package/crds/networkfirewall.aws.upbound.io_firewallpolicies.yaml +++ b/package/crds/networkfirewall.aws.upbound.io_firewallpolicies.yaml @@ -1307,3 +1307,1232 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FirewallPolicy is the Schema for the FirewallPolicys API. Provides + an AWS Network Firewall Policy resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FirewallPolicySpec defines the desired state of FirewallPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A friendly description of the firewall policy. + type: string + encryptionConfiguration: + description: KMS encryption configuration settings. See Encryption + Configuration below for details. + properties: + keyId: + description: The ID of the customer managed key. You can use + any of the key identifiers that KMS supports, unless you're + using a key that's managed by another account. If you're + using a key managed by another account, then specify the + key ARN. + type: string + type: + description: The type of AWS KMS key to use for encryption + of your Network Firewall resources. Valid values are CUSTOMER_KMS + and AWS_OWNED_KMS_KEY. + type: string + type: object + firewallPolicy: + description: A configuration block describing the rule groups + and policy actions to use in the firewall policy. See Firewall + Policy below for details. + properties: + policyVariables: + description: . Contains variables that you can use to override + default Suricata settings in your firewall policy. See Rule + Variables for details. + properties: + ruleVariables: + items: + properties: + ipSet: + description: A configuration block that defines + a set of IP addresses. See IP Set below for details. + properties: + definition: + description: Set of IPv4 or IPv6 addresses in + CIDR notation to use for the Suricata HOME_NET + variable. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + key: + description: 'An alphanumeric string to identify + the ip_set. Valid values: HOME_NET' + type: string + type: object + type: array + type: object + statefulDefaultActions: + description: Set of actions to take on a packet if it does + not match any stateful rules in the policy. This can only + be specified if the policy has a stateful_engine_options + block with a rule_order value of STRICT_ORDER. You can specify + one of either or neither values of aws:drop_strict or aws:drop_established, + as well as any combination of aws:alert_strict and aws:alert_established. + items: + type: string + type: array + x-kubernetes-list-type: set + statefulEngineOptions: + description: A configuration block that defines options on + how the policy handles stateful rules. See Stateful Engine + Options below for details. + properties: + ruleOrder: + description: 'Indicates how to manage the order of stateful + rule evaluation for the policy. Default value: DEFAULT_ACTION_ORDER. + Valid values: DEFAULT_ACTION_ORDER, STRICT_ORDER.' + type: string + streamExceptionPolicy: + description: 'Describes how to treat traffic which has + broken midstream. Default value: DROP. Valid values: + DROP, CONTINUE, REJECT.' + type: string + type: object + statefulRuleGroupReference: + description: Set of configuration blocks containing references + to the stateful rule groups that are used in the policy. + See Stateful Rule Group Reference below for details. + items: + properties: + override: + description: Configuration block for override values + properties: + action: + description: The action that changes the rule group + from DROP to ALERT . This only applies to managed + rule groups. + type: string + type: object + priority: + description: An integer setting that indicates the order + in which to run the stateless rule groups in a single + policy. AWS Network Firewall applies each stateless + rule group to a packet starting with the group that + has the lowest priority setting. + type: number + resourceArn: + description: The Amazon Resource Name (ARN) of the stateless + rule group. + type: string + resourceArnRef: + description: Reference to a RuleGroup in networkfirewall + to populate resourceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceArnSelector: + description: Selector for a RuleGroup in networkfirewall + to populate resourceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + statelessCustomAction: + description: Set of configuration blocks describing the custom + action definitions that are available for use in the firewall + policy's stateless_default_actions. See Stateless Custom + Action below for details. + items: + properties: + actionDefinition: + description: A configuration block describing the custom + action associated with the action_name. See Action + Definition below for details. + properties: + publishMetricAction: + description: A configuration block describing the + stateless inspection criteria that publishes the + specified metrics to Amazon CloudWatch for the + matching packet. You can pair this custom action + with any of the standard stateless rule actions. + See Publish Metric Action below for details. + properties: + dimension: + description: Set of configuration blocks describing + dimension settings to use for Amazon CloudWatch + custom metrics. See Dimension below for more + details. + items: + properties: + value: + description: The string value to use in + the custom metric dimension. + type: string + type: object + type: array + type: object + type: object + actionName: + description: A friendly name of the custom action. + type: string + type: object + type: array + statelessDefaultActions: + description: |- + Set of actions to take on a packet if it does not match any of the stateless rules in the policy. You must specify one of the standard actions including: aws:drop, aws:pass, or aws:forward_to_sfe. + In addition, you can specify custom actions that are compatible with your standard action choice. If you want non-matching packets to be forwarded for stateful inspection, specify aws:forward_to_sfe. + items: + type: string + type: array + x-kubernetes-list-type: set + statelessFragmentDefaultActions: + description: |- + Set of actions to take on a fragmented packet if it does not match any of the stateless rules in the policy. You must specify one of the standard actions including: aws:drop, aws:pass, or aws:forward_to_sfe. + In addition, you can specify custom actions that are compatible with your standard action choice. If you want non-matching packets to be forwarded for stateful inspection, specify aws:forward_to_sfe. + items: + type: string + type: array + x-kubernetes-list-type: set + statelessRuleGroupReference: + description: Set of configuration blocks containing references + to the stateless rule groups that are used in the policy. + See Stateless Rule Group Reference below for details. + items: + properties: + priority: + description: An integer setting that indicates the order + in which to run the stateless rule groups in a single + policy. AWS Network Firewall applies each stateless + rule group to a packet starting with the group that + has the lowest priority setting. + type: number + resourceArn: + description: The Amazon Resource Name (ARN) of the stateless + rule group. + type: string + resourceArnRef: + description: Reference to a RuleGroup in networkfirewall + to populate resourceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceArnSelector: + description: Selector for a RuleGroup in networkfirewall + to populate resourceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + tlsInspectionConfigurationArn: + description: The (ARN) of the TLS Inspection policy to attach + to the FW Policy. This must be added at creation of the + resource per AWS documentation. "You can only add a TLS + inspection configuration to a new policy, not to an existing + policy." This cannot be removed from a FW Policy. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A friendly description of the firewall policy. + type: string + encryptionConfiguration: + description: KMS encryption configuration settings. See Encryption + Configuration below for details. + properties: + keyId: + description: The ID of the customer managed key. You can use + any of the key identifiers that KMS supports, unless you're + using a key that's managed by another account. If you're + using a key managed by another account, then specify the + key ARN. + type: string + type: + description: The type of AWS KMS key to use for encryption + of your Network Firewall resources. Valid values are CUSTOMER_KMS + and AWS_OWNED_KMS_KEY. + type: string + type: object + firewallPolicy: + description: A configuration block describing the rule groups + and policy actions to use in the firewall policy. See Firewall + Policy below for details. + properties: + policyVariables: + description: . Contains variables that you can use to override + default Suricata settings in your firewall policy. See Rule + Variables for details. + properties: + ruleVariables: + items: + properties: + ipSet: + description: A configuration block that defines + a set of IP addresses. See IP Set below for details. + properties: + definition: + description: Set of IPv4 or IPv6 addresses in + CIDR notation to use for the Suricata HOME_NET + variable. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + key: + description: 'An alphanumeric string to identify + the ip_set. Valid values: HOME_NET' + type: string + type: object + type: array + type: object + statefulDefaultActions: + description: Set of actions to take on a packet if it does + not match any stateful rules in the policy. This can only + be specified if the policy has a stateful_engine_options + block with a rule_order value of STRICT_ORDER. You can specify + one of either or neither values of aws:drop_strict or aws:drop_established, + as well as any combination of aws:alert_strict and aws:alert_established. + items: + type: string + type: array + x-kubernetes-list-type: set + statefulEngineOptions: + description: A configuration block that defines options on + how the policy handles stateful rules. See Stateful Engine + Options below for details. + properties: + ruleOrder: + description: 'Indicates how to manage the order of stateful + rule evaluation for the policy. Default value: DEFAULT_ACTION_ORDER. + Valid values: DEFAULT_ACTION_ORDER, STRICT_ORDER.' + type: string + streamExceptionPolicy: + description: 'Describes how to treat traffic which has + broken midstream. Default value: DROP. Valid values: + DROP, CONTINUE, REJECT.' + type: string + type: object + statefulRuleGroupReference: + description: Set of configuration blocks containing references + to the stateful rule groups that are used in the policy. + See Stateful Rule Group Reference below for details. + items: + properties: + override: + description: Configuration block for override values + properties: + action: + description: The action that changes the rule group + from DROP to ALERT . This only applies to managed + rule groups. + type: string + type: object + priority: + description: An integer setting that indicates the order + in which to run the stateless rule groups in a single + policy. AWS Network Firewall applies each stateless + rule group to a packet starting with the group that + has the lowest priority setting. + type: number + resourceArn: + description: The Amazon Resource Name (ARN) of the stateless + rule group. + type: string + resourceArnRef: + description: Reference to a RuleGroup in networkfirewall + to populate resourceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceArnSelector: + description: Selector for a RuleGroup in networkfirewall + to populate resourceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + statelessCustomAction: + description: Set of configuration blocks describing the custom + action definitions that are available for use in the firewall + policy's stateless_default_actions. See Stateless Custom + Action below for details. + items: + properties: + actionDefinition: + description: A configuration block describing the custom + action associated with the action_name. See Action + Definition below for details. + properties: + publishMetricAction: + description: A configuration block describing the + stateless inspection criteria that publishes the + specified metrics to Amazon CloudWatch for the + matching packet. You can pair this custom action + with any of the standard stateless rule actions. + See Publish Metric Action below for details. + properties: + dimension: + description: Set of configuration blocks describing + dimension settings to use for Amazon CloudWatch + custom metrics. See Dimension below for more + details. + items: + properties: + value: + description: The string value to use in + the custom metric dimension. + type: string + type: object + type: array + type: object + type: object + actionName: + description: A friendly name of the custom action. + type: string + type: object + type: array + statelessDefaultActions: + description: |- + Set of actions to take on a packet if it does not match any of the stateless rules in the policy. You must specify one of the standard actions including: aws:drop, aws:pass, or aws:forward_to_sfe. + In addition, you can specify custom actions that are compatible with your standard action choice. If you want non-matching packets to be forwarded for stateful inspection, specify aws:forward_to_sfe. + items: + type: string + type: array + x-kubernetes-list-type: set + statelessFragmentDefaultActions: + description: |- + Set of actions to take on a fragmented packet if it does not match any of the stateless rules in the policy. You must specify one of the standard actions including: aws:drop, aws:pass, or aws:forward_to_sfe. + In addition, you can specify custom actions that are compatible with your standard action choice. If you want non-matching packets to be forwarded for stateful inspection, specify aws:forward_to_sfe. + items: + type: string + type: array + x-kubernetes-list-type: set + statelessRuleGroupReference: + description: Set of configuration blocks containing references + to the stateless rule groups that are used in the policy. + See Stateless Rule Group Reference below for details. + items: + properties: + priority: + description: An integer setting that indicates the order + in which to run the stateless rule groups in a single + policy. AWS Network Firewall applies each stateless + rule group to a packet starting with the group that + has the lowest priority setting. + type: number + resourceArn: + description: The Amazon Resource Name (ARN) of the stateless + rule group. + type: string + resourceArnRef: + description: Reference to a RuleGroup in networkfirewall + to populate resourceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceArnSelector: + description: Selector for a RuleGroup in networkfirewall + to populate resourceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + tlsInspectionConfigurationArn: + description: The (ARN) of the TLS Inspection policy to attach + to the FW Policy. This must be added at creation of the + resource per AWS documentation. "You can only add a TLS + inspection configuration to a new policy, not to an existing + policy." This cannot be removed from a FW Policy. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.firewallPolicy is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.firewallPolicy) + || (has(self.initProvider) && has(self.initProvider.firewallPolicy))' + status: + description: FirewallPolicyStatus defines the observed state of FirewallPolicy. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) that identifies the + firewall policy. + type: string + description: + description: A friendly description of the firewall policy. + type: string + encryptionConfiguration: + description: KMS encryption configuration settings. See Encryption + Configuration below for details. + properties: + keyId: + description: The ID of the customer managed key. You can use + any of the key identifiers that KMS supports, unless you're + using a key that's managed by another account. If you're + using a key managed by another account, then specify the + key ARN. + type: string + type: + description: The type of AWS KMS key to use for encryption + of your Network Firewall resources. Valid values are CUSTOMER_KMS + and AWS_OWNED_KMS_KEY. + type: string + type: object + firewallPolicy: + description: A configuration block describing the rule groups + and policy actions to use in the firewall policy. See Firewall + Policy below for details. + properties: + policyVariables: + description: . Contains variables that you can use to override + default Suricata settings in your firewall policy. See Rule + Variables for details. + properties: + ruleVariables: + items: + properties: + ipSet: + description: A configuration block that defines + a set of IP addresses. See IP Set below for details. + properties: + definition: + description: Set of IPv4 or IPv6 addresses in + CIDR notation to use for the Suricata HOME_NET + variable. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + key: + description: 'An alphanumeric string to identify + the ip_set. Valid values: HOME_NET' + type: string + type: object + type: array + type: object + statefulDefaultActions: + description: Set of actions to take on a packet if it does + not match any stateful rules in the policy. This can only + be specified if the policy has a stateful_engine_options + block with a rule_order value of STRICT_ORDER. You can specify + one of either or neither values of aws:drop_strict or aws:drop_established, + as well as any combination of aws:alert_strict and aws:alert_established. + items: + type: string + type: array + x-kubernetes-list-type: set + statefulEngineOptions: + description: A configuration block that defines options on + how the policy handles stateful rules. See Stateful Engine + Options below for details. + properties: + ruleOrder: + description: 'Indicates how to manage the order of stateful + rule evaluation for the policy. Default value: DEFAULT_ACTION_ORDER. + Valid values: DEFAULT_ACTION_ORDER, STRICT_ORDER.' + type: string + streamExceptionPolicy: + description: 'Describes how to treat traffic which has + broken midstream. Default value: DROP. Valid values: + DROP, CONTINUE, REJECT.' + type: string + type: object + statefulRuleGroupReference: + description: Set of configuration blocks containing references + to the stateful rule groups that are used in the policy. + See Stateful Rule Group Reference below for details. + items: + properties: + override: + description: Configuration block for override values + properties: + action: + description: The action that changes the rule group + from DROP to ALERT . This only applies to managed + rule groups. + type: string + type: object + priority: + description: An integer setting that indicates the order + in which to run the stateless rule groups in a single + policy. AWS Network Firewall applies each stateless + rule group to a packet starting with the group that + has the lowest priority setting. + type: number + resourceArn: + description: The Amazon Resource Name (ARN) of the stateless + rule group. + type: string + type: object + type: array + statelessCustomAction: + description: Set of configuration blocks describing the custom + action definitions that are available for use in the firewall + policy's stateless_default_actions. See Stateless Custom + Action below for details. + items: + properties: + actionDefinition: + description: A configuration block describing the custom + action associated with the action_name. See Action + Definition below for details. + properties: + publishMetricAction: + description: A configuration block describing the + stateless inspection criteria that publishes the + specified metrics to Amazon CloudWatch for the + matching packet. You can pair this custom action + with any of the standard stateless rule actions. + See Publish Metric Action below for details. + properties: + dimension: + description: Set of configuration blocks describing + dimension settings to use for Amazon CloudWatch + custom metrics. See Dimension below for more + details. + items: + properties: + value: + description: The string value to use in + the custom metric dimension. + type: string + type: object + type: array + type: object + type: object + actionName: + description: A friendly name of the custom action. + type: string + type: object + type: array + statelessDefaultActions: + description: |- + Set of actions to take on a packet if it does not match any of the stateless rules in the policy. You must specify one of the standard actions including: aws:drop, aws:pass, or aws:forward_to_sfe. + In addition, you can specify custom actions that are compatible with your standard action choice. If you want non-matching packets to be forwarded for stateful inspection, specify aws:forward_to_sfe. + items: + type: string + type: array + x-kubernetes-list-type: set + statelessFragmentDefaultActions: + description: |- + Set of actions to take on a fragmented packet if it does not match any of the stateless rules in the policy. You must specify one of the standard actions including: aws:drop, aws:pass, or aws:forward_to_sfe. + In addition, you can specify custom actions that are compatible with your standard action choice. If you want non-matching packets to be forwarded for stateful inspection, specify aws:forward_to_sfe. + items: + type: string + type: array + x-kubernetes-list-type: set + statelessRuleGroupReference: + description: Set of configuration blocks containing references + to the stateless rule groups that are used in the policy. + See Stateless Rule Group Reference below for details. + items: + properties: + priority: + description: An integer setting that indicates the order + in which to run the stateless rule groups in a single + policy. AWS Network Firewall applies each stateless + rule group to a packet starting with the group that + has the lowest priority setting. + type: number + resourceArn: + description: The Amazon Resource Name (ARN) of the stateless + rule group. + type: string + type: object + type: array + tlsInspectionConfigurationArn: + description: The (ARN) of the TLS Inspection policy to attach + to the FW Policy. This must be added at creation of the + resource per AWS documentation. "You can only add a TLS + inspection configuration to a new policy, not to an existing + policy." This cannot be removed from a FW Policy. + type: string + type: object + id: + description: The Amazon Resource Name (ARN) that identifies the + firewall policy. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + updateToken: + description: A string token used when updating a firewall policy. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/networkfirewall.aws.upbound.io_firewalls.yaml b/package/crds/networkfirewall.aws.upbound.io_firewalls.yaml index ae48b3d80c..2851e792de 100644 --- a/package/crds/networkfirewall.aws.upbound.io_firewalls.yaml +++ b/package/crds/networkfirewall.aws.upbound.io_firewalls.yaml @@ -1056,3 +1056,1035 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Firewall is the Schema for the Firewalls API. Provides an AWS + Network Firewall Firewall resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FirewallSpec defines the desired state of Firewall + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + deleteProtection: + description: A flag indicating whether the firewall is protected + against deletion. Use this setting to protect against accidentally + deleting a firewall that is in use. Defaults to false. + type: boolean + description: + description: A friendly description of the firewall. + type: string + encryptionConfiguration: + description: KMS encryption configuration settings. See Encryption + Configuration below for details. + properties: + keyId: + description: The ID of the customer managed key. You can use + any of the key identifiers that KMS supports, unless you're + using a key that's managed by another account. If you're + using a key managed by another account, then specify the + key ARN. + type: string + type: + description: The type of AWS KMS key to use for encryption + of your Network Firewall resources. Valid values are CUSTOMER_KMS + and AWS_OWNED_KMS_KEY. + type: string + type: object + firewallPolicyArn: + description: The Amazon Resource Name (ARN) of the VPC Firewall + policy. + type: string + firewallPolicyArnRef: + description: Reference to a FirewallPolicy in networkfirewall + to populate firewallPolicyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + firewallPolicyArnSelector: + description: Selector for a FirewallPolicy in networkfirewall + to populate firewallPolicyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + firewallPolicyChangeProtection: + description: A flag indicating whether the firewall is protected + against a change to the firewall policy association. Use this + setting to protect against accidentally modifying the firewall + policy for a firewall that is in use. Defaults to false. + type: boolean + name: + description: A friendly name of the firewall. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + subnetChangeProtection: + description: A flag indicating whether the firewall is protected + against changes to the subnet associations. Use this setting + to protect against accidentally modifying the subnet associations + for a firewall that is in use. Defaults to false. + type: boolean + subnetMapping: + description: Set of configuration blocks describing the public + subnets. Each subnet must belong to a different Availability + Zone in the VPC. AWS Network Firewall creates a firewall endpoint + in each subnet. See Subnet Mapping below for details. + items: + properties: + ipAddressType: + description: 'The subnet''s IP address type. Valida values: + "DUALSTACK", "IPV4".' + type: string + subnetId: + description: The unique identifier for the subnet. + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcId: + description: The unique identifier of the VPC where AWS Network + Firewall should create the firewall. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + deleteProtection: + description: A flag indicating whether the firewall is protected + against deletion. Use this setting to protect against accidentally + deleting a firewall that is in use. Defaults to false. + type: boolean + description: + description: A friendly description of the firewall. + type: string + encryptionConfiguration: + description: KMS encryption configuration settings. See Encryption + Configuration below for details. + properties: + keyId: + description: The ID of the customer managed key. You can use + any of the key identifiers that KMS supports, unless you're + using a key that's managed by another account. If you're + using a key managed by another account, then specify the + key ARN. + type: string + type: + description: The type of AWS KMS key to use for encryption + of your Network Firewall resources. Valid values are CUSTOMER_KMS + and AWS_OWNED_KMS_KEY. + type: string + type: object + firewallPolicyArn: + description: The Amazon Resource Name (ARN) of the VPC Firewall + policy. + type: string + firewallPolicyArnRef: + description: Reference to a FirewallPolicy in networkfirewall + to populate firewallPolicyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + firewallPolicyArnSelector: + description: Selector for a FirewallPolicy in networkfirewall + to populate firewallPolicyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + firewallPolicyChangeProtection: + description: A flag indicating whether the firewall is protected + against a change to the firewall policy association. Use this + setting to protect against accidentally modifying the firewall + policy for a firewall that is in use. Defaults to false. + type: boolean + name: + description: A friendly name of the firewall. + type: string + subnetChangeProtection: + description: A flag indicating whether the firewall is protected + against changes to the subnet associations. Use this setting + to protect against accidentally modifying the subnet associations + for a firewall that is in use. Defaults to false. + type: boolean + subnetMapping: + description: Set of configuration blocks describing the public + subnets. Each subnet must belong to a different Availability + Zone in the VPC. AWS Network Firewall creates a firewall endpoint + in each subnet. See Subnet Mapping below for details. + items: + properties: + ipAddressType: + description: 'The subnet''s IP address type. Valida values: + "DUALSTACK", "IPV4".' + type: string + subnetId: + description: The unique identifier for the subnet. + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcId: + description: The unique identifier of the VPC where AWS Network + Firewall should create the firewall. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.subnetMapping is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.subnetMapping) + || (has(self.initProvider) && has(self.initProvider.subnetMapping))' + status: + description: FirewallStatus defines the observed state of Firewall. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) that identifies the + firewall. + type: string + deleteProtection: + description: A flag indicating whether the firewall is protected + against deletion. Use this setting to protect against accidentally + deleting a firewall that is in use. Defaults to false. + type: boolean + description: + description: A friendly description of the firewall. + type: string + encryptionConfiguration: + description: KMS encryption configuration settings. See Encryption + Configuration below for details. + properties: + keyId: + description: The ID of the customer managed key. You can use + any of the key identifiers that KMS supports, unless you're + using a key that's managed by another account. If you're + using a key managed by another account, then specify the + key ARN. + type: string + type: + description: The type of AWS KMS key to use for encryption + of your Network Firewall resources. Valid values are CUSTOMER_KMS + and AWS_OWNED_KMS_KEY. + type: string + type: object + firewallPolicyArn: + description: The Amazon Resource Name (ARN) of the VPC Firewall + policy. + type: string + firewallPolicyChangeProtection: + description: A flag indicating whether the firewall is protected + against a change to the firewall policy association. Use this + setting to protect against accidentally modifying the firewall + policy for a firewall that is in use. Defaults to false. + type: boolean + firewallStatus: + description: Nested list of information about the current status + of the firewall. + items: + properties: + syncStates: + description: Set of subnets configured for use by the firewall. + items: + properties: + attachment: + description: Nested list describing the attachment + status of the firewall's association with a single + VPC subnet. + items: + properties: + endpointId: + description: The identifier of the firewall + endpoint that AWS Network Firewall has instantiated + in the subnet. You use this to identify the + firewall endpoint in the VPC route tables, + when you redirect the VPC traffic through + the endpoint. + type: string + subnetId: + description: The unique identifier for the subnet. + type: string + type: object + type: array + availabilityZone: + description: The Availability Zone where the subnet + is configured. + type: string + type: object + type: array + type: object + type: array + id: + description: The Amazon Resource Name (ARN) that identifies the + firewall. + type: string + name: + description: A friendly name of the firewall. + type: string + subnetChangeProtection: + description: A flag indicating whether the firewall is protected + against changes to the subnet associations. Use this setting + to protect against accidentally modifying the subnet associations + for a firewall that is in use. Defaults to false. + type: boolean + subnetMapping: + description: Set of configuration blocks describing the public + subnets. Each subnet must belong to a different Availability + Zone in the VPC. AWS Network Firewall creates a firewall endpoint + in each subnet. See Subnet Mapping below for details. + items: + properties: + ipAddressType: + description: 'The subnet''s IP address type. Valida values: + "DUALSTACK", "IPV4".' + type: string + subnetId: + description: The unique identifier for the subnet. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + updateToken: + description: A string token used when updating a firewall. + type: string + vpcId: + description: The unique identifier of the VPC where AWS Network + Firewall should create the firewall. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/networkfirewall.aws.upbound.io_loggingconfigurations.yaml b/package/crds/networkfirewall.aws.upbound.io_loggingconfigurations.yaml index 742318b5fe..afb5d1429f 100644 --- a/package/crds/networkfirewall.aws.upbound.io_loggingconfigurations.yaml +++ b/package/crds/networkfirewall.aws.upbound.io_loggingconfigurations.yaml @@ -607,3 +607,586 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LoggingConfiguration is the Schema for the LoggingConfigurations + API. Provides an AWS Network Firewall Logging Configuration resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LoggingConfigurationSpec defines the desired state of LoggingConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + firewallArn: + description: The Amazon Resource Name (ARN) of the Network Firewall + firewall. + type: string + firewallArnRef: + description: Reference to a Firewall in networkfirewall to populate + firewallArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + firewallArnSelector: + description: Selector for a Firewall in networkfirewall to populate + firewallArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + loggingConfiguration: + description: A configuration block describing how AWS Network + Firewall performs logging for a firewall. See Logging Configuration + below for details. + properties: + logDestinationConfig: + description: Set of configuration blocks describing the logging + details for a firewall. See Log Destination Config below + for details. At most, only two blocks can be specified; + one for FLOW logs and one for ALERT logs. + items: + properties: + logDestination: + additionalProperties: + type: string + description: A map describing the logging destination + for the chosen log_destination_type. + type: object + x-kubernetes-map-type: granular + logDestinationType: + description: 'The location to send logs to. Valid values: + S3, CloudWatchLogs, KinesisDataFirehose.' + type: string + logType: + description: 'The type of log to send. Valid values: + ALERT or FLOW. Alert logs report traffic that matches + a StatefulRule with an action setting that sends a + log message. Flow logs are standard network traffic + flow logs.' + type: string + type: object + type: array + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + firewallArn: + description: The Amazon Resource Name (ARN) of the Network Firewall + firewall. + type: string + firewallArnRef: + description: Reference to a Firewall in networkfirewall to populate + firewallArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + firewallArnSelector: + description: Selector for a Firewall in networkfirewall to populate + firewallArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + loggingConfiguration: + description: A configuration block describing how AWS Network + Firewall performs logging for a firewall. See Logging Configuration + below for details. + properties: + logDestinationConfig: + description: Set of configuration blocks describing the logging + details for a firewall. See Log Destination Config below + for details. At most, only two blocks can be specified; + one for FLOW logs and one for ALERT logs. + items: + properties: + logDestination: + additionalProperties: + type: string + description: A map describing the logging destination + for the chosen log_destination_type. + type: object + x-kubernetes-map-type: granular + logDestinationType: + description: 'The location to send logs to. Valid values: + S3, CloudWatchLogs, KinesisDataFirehose.' + type: string + logType: + description: 'The type of log to send. Valid values: + ALERT or FLOW. Alert logs report traffic that matches + a StatefulRule with an action setting that sends a + log message. Flow logs are standard network traffic + flow logs.' + type: string + type: object + type: array + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.loggingConfiguration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.loggingConfiguration) + || (has(self.initProvider) && has(self.initProvider.loggingConfiguration))' + status: + description: LoggingConfigurationStatus defines the observed state of + LoggingConfiguration. + properties: + atProvider: + properties: + firewallArn: + description: The Amazon Resource Name (ARN) of the Network Firewall + firewall. + type: string + id: + description: The Amazon Resource Name (ARN) of the associated + firewall. + type: string + loggingConfiguration: + description: A configuration block describing how AWS Network + Firewall performs logging for a firewall. See Logging Configuration + below for details. + properties: + logDestinationConfig: + description: Set of configuration blocks describing the logging + details for a firewall. See Log Destination Config below + for details. At most, only two blocks can be specified; + one for FLOW logs and one for ALERT logs. + items: + properties: + logDestination: + additionalProperties: + type: string + description: A map describing the logging destination + for the chosen log_destination_type. + type: object + x-kubernetes-map-type: granular + logDestinationType: + description: 'The location to send logs to. Valid values: + S3, CloudWatchLogs, KinesisDataFirehose.' + type: string + logType: + description: 'The type of log to send. Valid values: + ALERT or FLOW. Alert logs report traffic that matches + a StatefulRule with an action setting that sends a + log message. Flow logs are standard network traffic + flow logs.' + type: string + type: object + type: array + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/networkfirewall.aws.upbound.io_rulegroups.yaml b/package/crds/networkfirewall.aws.upbound.io_rulegroups.yaml index 2668bb1b69..d03562d6bc 100644 --- a/package/crds/networkfirewall.aws.upbound.io_rulegroups.yaml +++ b/package/crds/networkfirewall.aws.upbound.io_rulegroups.yaml @@ -2122,3 +2122,1898 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: RuleGroup is the Schema for the RuleGroups API. Provides an AWS + Network Firewall Rule Group resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RuleGroupSpec defines the desired state of RuleGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + capacity: + description: The maximum number of operating resources that this + rule group can use. For a stateless rule group, the capacity + required is the sum of the capacity requirements of the individual + rules. For a stateful rule group, the minimum capacity required + is the number of individual rules. + type: number + description: + description: A friendly description of the rule group. + type: string + encryptionConfiguration: + description: KMS encryption configuration settings. See Encryption + Configuration below for details. + properties: + keyId: + description: The ID of the customer managed key. You can use + any of the key identifiers that KMS supports, unless you're + using a key that's managed by another account. If you're + using a key managed by another account, then specify the + key ARN. + type: string + type: + description: The type of AWS KMS key to use for encryption + of your Network Firewall resources. Valid values are CUSTOMER_KMS + and AWS_OWNED_KMS_KEY. + type: string + type: object + name: + description: A friendly name of the rule group. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + ruleGroup: + description: A configuration block that defines the rule group + rules. Required unless rules is specified. See Rule Group below + for details. + properties: + referenceSets: + description: A configuration block that defines the IP Set + References for the rule group. See Reference Sets below + for details. Please notes that there can only be a maximum + of 5 reference_sets in a rule_group. See the AWS documentation + for details. + properties: + ipSetReferences: + items: + properties: + ipSetReference: + description: Set of configuration blocks that define + the IP Reference information. See IP Set Reference + below for details. + items: + properties: + referenceArn: + description: Set of Managed Prefix IP ARN(s) + type: string + referenceArnRef: + description: Reference to a ManagedPrefixList + in ec2 to populate referenceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + referenceArnSelector: + description: Selector for a ManagedPrefixList + in ec2 to populate referenceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + key: + description: An unique alphanumeric string to identify + the port_set. + type: string + type: object + type: array + type: object + ruleVariables: + description: A configuration block that defines additional + settings available to use in the rules defined in the rule + group. Can only be specified for stateful rule groups. See + Rule Variables below for details. + properties: + ipSets: + description: Set of configuration blocks that define IP + address information. See IP Sets below for details. + items: + properties: + ipSet: + description: A configuration block that defines + a set of IP addresses. See IP Set below for details. + properties: + definition: + description: Set of port ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + key: + description: An unique alphanumeric string to identify + the port_set. + type: string + type: object + type: array + portSets: + description: Set of configuration blocks that define port + range information. See Port Sets below for details. + items: + properties: + key: + description: An unique alphanumeric string to identify + the port_set. + type: string + portSet: + description: A configuration block that defines + a set of port ranges. See Port Set below for details. + properties: + definition: + description: Set of port ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: array + type: object + rulesSource: + description: A configuration block that defines the stateful + or stateless rules for the rule group. See Rules Source + below for details. + properties: + rulesSourceList: + description: A configuration block containing stateful + inspection criteria for a domain list rule group. See + Rules Source List below for details. + properties: + generatedRulesType: + description: 'String value to specify whether domains + in the target list are allowed or denied access. + Valid values: ALLOWLIST, DENYLIST.' + type: string + targetTypes: + description: 'Set of types of domain specifications + that are provided in the targets argument. Valid + values: HTTP_HOST, TLS_SNI.' + items: + type: string + type: array + x-kubernetes-list-type: set + targets: + description: Set of domains that you want to inspect + for in your traffic flows. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + rulesString: + description: The fully qualified name of a file in an + S3 bucket that contains Suricata compatible intrusion + preventions system (IPS) rules or the Suricata rules + as a string. These rules contain stateful inspection + criteria and the action to take for traffic that matches + the criteria. + type: string + statefulRule: + description: Set of configuration blocks containing stateful + inspection criteria for 5-tuple rules to be used together + in a rule group. See Stateful Rule below for details. + items: + properties: + action: + description: 'Action to take with packets in a traffic + flow when the flow matches the stateful rule criteria. + For all actions, AWS Network Firewall performs + the specified action and discontinues stateful + inspection of the traffic flow. Valid values: + ALERT, DROP, PASS, or REJECT.' + type: string + header: + description: A configuration block containing the + stateful 5-tuple inspection criteria for the rule, + used to inspect traffic flows. See Header below + for details. + properties: + destination: + description: Set of configuration blocks describing + the destination IP address and address ranges + to inspect for, in CIDR notation. If not specified, + this matches with any destination address. + See Destination below for details. + type: string + destinationPort: + description: Set of configuration blocks describing + the destination ports to inspect for. If not + specified, this matches with any destination + port. See Destination Port below for details. + type: string + direction: + description: 'The direction of traffic flow + to inspect. Valid values: ANY or FORWARD.' + type: string + protocol: + description: 'The protocol to inspect. Valid + values: IP, TCP, UDP, ICMP, HTTP, FTP, TLS, + SMB, DNS, DCERPC, SSH, SMTP, IMAP, MSN, KRB5, + IKEV2, TFTP, NTP, DHCP.' + type: string + source: + description: Set of configuration blocks describing + the source IP address and address ranges to + inspect for, in CIDR notation. If not specified, + this matches with any source address. See + Source below for details. + type: string + sourcePort: + description: Set of configuration blocks describing + the source ports to inspect for. If not specified, + this matches with any source port. See Source + Port below for details. + type: string + type: object + ruleOption: + description: Set of configuration blocks containing + additional settings for a stateful rule. See Rule + Option below for details. + items: + properties: + keyword: + description: |- + Keyword defined by open source detection systems like Snort or Suricata for stateful rule inspection. + See Snort General Rule Options or Suricata Rule Options for more details. + type: string + settings: + description: Set of strings for additional + settings to use in stateful rule inspection. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + type: array + statelessRulesAndCustomActions: + description: A configuration block containing stateless + inspection criteria for a stateless rule group. See + Stateless Rules and Custom Actions below for details. + properties: + customAction: + description: Set of configuration blocks containing + custom action definitions that are available for + use by the set of stateless rule. See Custom Action + below for details. + items: + properties: + actionDefinition: + description: A configuration block describing + the custom action associated with the action_name. + See Action Definition below for details. + properties: + publishMetricAction: + description: A configuration block describing + the stateless inspection criteria that + publishes the specified metrics to Amazon + CloudWatch for the matching packet. You + can pair this custom action with any of + the standard stateless rule actions. See + Publish Metric Action below for details. + properties: + dimension: + description: Set of configuration blocks + containing the dimension settings + to use for Amazon CloudWatch custom + metrics. See Dimension below for details. + items: + properties: + value: + description: The value to use + in the custom metric dimension. + type: string + type: object + type: array + type: object + type: object + actionName: + description: A friendly name of the custom action. + type: string + type: object + type: array + statelessRule: + description: Set of configuration blocks containing + the stateless rules for use in the stateless rule + group. See Stateless Rule below for details. + items: + properties: + priority: + description: A setting that indicates the order + in which to run this rule relative to all + of the rules that are defined for a stateless + rule group. AWS Network Firewall evaluates + the rules in a rule group starting with the + lowest priority setting. + type: number + ruleDefinition: + description: A configuration block defining + the stateless 5-tuple packet inspection criteria + and the action to take on a packet that matches + the criteria. See Rule Definition below for + details. + properties: + actions: + description: 'Set of actions to take on + a packet that matches one of the stateless + rule definition''s match_attributes. For + every rule you must specify 1 standard + action, and you can add custom actions. + Standard actions include: aws:pass, aws:drop, + aws:forward_to_sfe.' + items: + type: string + type: array + x-kubernetes-list-type: set + matchAttributes: + description: A configuration block containing + criteria for AWS Network Firewall to use + to inspect an individual packet in stateless + rule inspection. See Match Attributes + below for details. + properties: + destination: + description: Set of configuration blocks + describing the destination IP address + and address ranges to inspect for, + in CIDR notation. If not specified, + this matches with any destination + address. See Destination below for + details. + items: + properties: + addressDefinition: + description: An IP address or + a block of IP addresses in CIDR + notation. AWS Network Firewall + supports all address ranges + for IPv4. + type: string + type: object + type: array + destinationPort: + description: Set of configuration blocks + describing the destination ports to + inspect for. If not specified, this + matches with any destination port. + See Destination Port below for details. + items: + properties: + fromPort: + description: The lower limit of + the port range. This must be + less than or equal to the to_port. + type: number + toPort: + description: The upper limit of + the port range. This must be + greater than or equal to the + from_port. + type: number + type: object + type: array + protocols: + description: Set of protocols to inspect + for, specified using the protocol's + assigned internet protocol number + (IANA). If not specified, this matches + with any protocol. + items: + type: number + type: array + x-kubernetes-list-type: set + source: + description: Set of configuration blocks + describing the source IP address and + address ranges to inspect for, in + CIDR notation. If not specified, this + matches with any source address. See + Source below for details. + items: + properties: + addressDefinition: + description: An IP address or + a block of IP addresses in CIDR + notation. AWS Network Firewall + supports all address ranges + for IPv4. + type: string + type: object + type: array + sourcePort: + description: Set of configuration blocks + describing the source ports to inspect + for. If not specified, this matches + with any source port. See Source Port + below for details. + items: + properties: + fromPort: + description: The lower limit of + the port range. This must be + less than or equal to the to_port. + type: number + toPort: + description: The upper limit of + the port range. This must be + greater than or equal to the + from_port. + type: number + type: object + type: array + tcpFlag: + description: Set of configuration blocks + containing the TCP flags and masks + to inspect for. If not specified, + this matches with any settings. + items: + properties: + flags: + description: |- + Set of flags to look for in a packet. This setting can only specify values that are also specified in masks. + Valid values: FIN, SYN, RST, PSH, ACK, URG, ECE, CWR. + items: + type: string + type: array + x-kubernetes-list-type: set + masks: + description: |- + Set of flags to consider in the inspection. To inspect all flags, leave this empty. + Valid values: FIN, SYN, RST, PSH, ACK, URG, ECE, CWR. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + type: object + type: object + type: array + type: object + type: object + statefulRuleOptions: + description: A configuration block that defines stateful rule + options for the rule group. See Stateful Rule Options below + for details. + properties: + ruleOrder: + description: 'Indicates how to manage the order of the + rule evaluation for the rule group. Default value: DEFAULT_ACTION_ORDER. + Valid values: DEFAULT_ACTION_ORDER, STRICT_ORDER.' + type: string + type: object + type: object + rules: + description: The stateful rule group rules specifications in Suricata + file format, with one rule per line. Use this to import your + existing Suricata compatible rule groups. Required unless rule_group + is specified. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: 'Whether the rule group is stateless (containing + stateless rules) or stateful (containing stateful rules). Valid + values include: STATEFUL or STATELESS.' + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + capacity: + description: The maximum number of operating resources that this + rule group can use. For a stateless rule group, the capacity + required is the sum of the capacity requirements of the individual + rules. For a stateful rule group, the minimum capacity required + is the number of individual rules. + type: number + description: + description: A friendly description of the rule group. + type: string + encryptionConfiguration: + description: KMS encryption configuration settings. See Encryption + Configuration below for details. + properties: + keyId: + description: The ID of the customer managed key. You can use + any of the key identifiers that KMS supports, unless you're + using a key that's managed by another account. If you're + using a key managed by another account, then specify the + key ARN. + type: string + type: + description: The type of AWS KMS key to use for encryption + of your Network Firewall resources. Valid values are CUSTOMER_KMS + and AWS_OWNED_KMS_KEY. + type: string + type: object + name: + description: A friendly name of the rule group. + type: string + ruleGroup: + description: A configuration block that defines the rule group + rules. Required unless rules is specified. See Rule Group below + for details. + properties: + referenceSets: + description: A configuration block that defines the IP Set + References for the rule group. See Reference Sets below + for details. Please notes that there can only be a maximum + of 5 reference_sets in a rule_group. See the AWS documentation + for details. + properties: + ipSetReferences: + items: + properties: + ipSetReference: + description: Set of configuration blocks that define + the IP Reference information. See IP Set Reference + below for details. + items: + properties: + referenceArn: + description: Set of Managed Prefix IP ARN(s) + type: string + referenceArnRef: + description: Reference to a ManagedPrefixList + in ec2 to populate referenceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + referenceArnSelector: + description: Selector for a ManagedPrefixList + in ec2 to populate referenceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + key: + description: An unique alphanumeric string to identify + the port_set. + type: string + type: object + type: array + type: object + ruleVariables: + description: A configuration block that defines additional + settings available to use in the rules defined in the rule + group. Can only be specified for stateful rule groups. See + Rule Variables below for details. + properties: + ipSets: + description: Set of configuration blocks that define IP + address information. See IP Sets below for details. + items: + properties: + ipSet: + description: A configuration block that defines + a set of IP addresses. See IP Set below for details. + properties: + definition: + description: Set of port ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + key: + description: An unique alphanumeric string to identify + the port_set. + type: string + type: object + type: array + portSets: + description: Set of configuration blocks that define port + range information. See Port Sets below for details. + items: + properties: + key: + description: An unique alphanumeric string to identify + the port_set. + type: string + portSet: + description: A configuration block that defines + a set of port ranges. See Port Set below for details. + properties: + definition: + description: Set of port ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: array + type: object + rulesSource: + description: A configuration block that defines the stateful + or stateless rules for the rule group. See Rules Source + below for details. + properties: + rulesSourceList: + description: A configuration block containing stateful + inspection criteria for a domain list rule group. See + Rules Source List below for details. + properties: + generatedRulesType: + description: 'String value to specify whether domains + in the target list are allowed or denied access. + Valid values: ALLOWLIST, DENYLIST.' + type: string + targetTypes: + description: 'Set of types of domain specifications + that are provided in the targets argument. Valid + values: HTTP_HOST, TLS_SNI.' + items: + type: string + type: array + x-kubernetes-list-type: set + targets: + description: Set of domains that you want to inspect + for in your traffic flows. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + rulesString: + description: The fully qualified name of a file in an + S3 bucket that contains Suricata compatible intrusion + preventions system (IPS) rules or the Suricata rules + as a string. These rules contain stateful inspection + criteria and the action to take for traffic that matches + the criteria. + type: string + statefulRule: + description: Set of configuration blocks containing stateful + inspection criteria for 5-tuple rules to be used together + in a rule group. See Stateful Rule below for details. + items: + properties: + action: + description: 'Action to take with packets in a traffic + flow when the flow matches the stateful rule criteria. + For all actions, AWS Network Firewall performs + the specified action and discontinues stateful + inspection of the traffic flow. Valid values: + ALERT, DROP, PASS, or REJECT.' + type: string + header: + description: A configuration block containing the + stateful 5-tuple inspection criteria for the rule, + used to inspect traffic flows. See Header below + for details. + properties: + destination: + description: Set of configuration blocks describing + the destination IP address and address ranges + to inspect for, in CIDR notation. If not specified, + this matches with any destination address. + See Destination below for details. + type: string + destinationPort: + description: Set of configuration blocks describing + the destination ports to inspect for. If not + specified, this matches with any destination + port. See Destination Port below for details. + type: string + direction: + description: 'The direction of traffic flow + to inspect. Valid values: ANY or FORWARD.' + type: string + protocol: + description: 'The protocol to inspect. Valid + values: IP, TCP, UDP, ICMP, HTTP, FTP, TLS, + SMB, DNS, DCERPC, SSH, SMTP, IMAP, MSN, KRB5, + IKEV2, TFTP, NTP, DHCP.' + type: string + source: + description: Set of configuration blocks describing + the source IP address and address ranges to + inspect for, in CIDR notation. If not specified, + this matches with any source address. See + Source below for details. + type: string + sourcePort: + description: Set of configuration blocks describing + the source ports to inspect for. If not specified, + this matches with any source port. See Source + Port below for details. + type: string + type: object + ruleOption: + description: Set of configuration blocks containing + additional settings for a stateful rule. See Rule + Option below for details. + items: + properties: + keyword: + description: |- + Keyword defined by open source detection systems like Snort or Suricata for stateful rule inspection. + See Snort General Rule Options or Suricata Rule Options for more details. + type: string + settings: + description: Set of strings for additional + settings to use in stateful rule inspection. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + type: array + statelessRulesAndCustomActions: + description: A configuration block containing stateless + inspection criteria for a stateless rule group. See + Stateless Rules and Custom Actions below for details. + properties: + customAction: + description: Set of configuration blocks containing + custom action definitions that are available for + use by the set of stateless rule. See Custom Action + below for details. + items: + properties: + actionDefinition: + description: A configuration block describing + the custom action associated with the action_name. + See Action Definition below for details. + properties: + publishMetricAction: + description: A configuration block describing + the stateless inspection criteria that + publishes the specified metrics to Amazon + CloudWatch for the matching packet. You + can pair this custom action with any of + the standard stateless rule actions. See + Publish Metric Action below for details. + properties: + dimension: + description: Set of configuration blocks + containing the dimension settings + to use for Amazon CloudWatch custom + metrics. See Dimension below for details. + items: + properties: + value: + description: The value to use + in the custom metric dimension. + type: string + type: object + type: array + type: object + type: object + actionName: + description: A friendly name of the custom action. + type: string + type: object + type: array + statelessRule: + description: Set of configuration blocks containing + the stateless rules for use in the stateless rule + group. See Stateless Rule below for details. + items: + properties: + priority: + description: A setting that indicates the order + in which to run this rule relative to all + of the rules that are defined for a stateless + rule group. AWS Network Firewall evaluates + the rules in a rule group starting with the + lowest priority setting. + type: number + ruleDefinition: + description: A configuration block defining + the stateless 5-tuple packet inspection criteria + and the action to take on a packet that matches + the criteria. See Rule Definition below for + details. + properties: + actions: + description: 'Set of actions to take on + a packet that matches one of the stateless + rule definition''s match_attributes. For + every rule you must specify 1 standard + action, and you can add custom actions. + Standard actions include: aws:pass, aws:drop, + aws:forward_to_sfe.' + items: + type: string + type: array + x-kubernetes-list-type: set + matchAttributes: + description: A configuration block containing + criteria for AWS Network Firewall to use + to inspect an individual packet in stateless + rule inspection. See Match Attributes + below for details. + properties: + destination: + description: Set of configuration blocks + describing the destination IP address + and address ranges to inspect for, + in CIDR notation. If not specified, + this matches with any destination + address. See Destination below for + details. + items: + properties: + addressDefinition: + description: An IP address or + a block of IP addresses in CIDR + notation. AWS Network Firewall + supports all address ranges + for IPv4. + type: string + type: object + type: array + destinationPort: + description: Set of configuration blocks + describing the destination ports to + inspect for. If not specified, this + matches with any destination port. + See Destination Port below for details. + items: + properties: + fromPort: + description: The lower limit of + the port range. This must be + less than or equal to the to_port. + type: number + toPort: + description: The upper limit of + the port range. This must be + greater than or equal to the + from_port. + type: number + type: object + type: array + protocols: + description: Set of protocols to inspect + for, specified using the protocol's + assigned internet protocol number + (IANA). If not specified, this matches + with any protocol. + items: + type: number + type: array + x-kubernetes-list-type: set + source: + description: Set of configuration blocks + describing the source IP address and + address ranges to inspect for, in + CIDR notation. If not specified, this + matches with any source address. See + Source below for details. + items: + properties: + addressDefinition: + description: An IP address or + a block of IP addresses in CIDR + notation. AWS Network Firewall + supports all address ranges + for IPv4. + type: string + type: object + type: array + sourcePort: + description: Set of configuration blocks + describing the source ports to inspect + for. If not specified, this matches + with any source port. See Source Port + below for details. + items: + properties: + fromPort: + description: The lower limit of + the port range. This must be + less than or equal to the to_port. + type: number + toPort: + description: The upper limit of + the port range. This must be + greater than or equal to the + from_port. + type: number + type: object + type: array + tcpFlag: + description: Set of configuration blocks + containing the TCP flags and masks + to inspect for. If not specified, + this matches with any settings. + items: + properties: + flags: + description: |- + Set of flags to look for in a packet. This setting can only specify values that are also specified in masks. + Valid values: FIN, SYN, RST, PSH, ACK, URG, ECE, CWR. + items: + type: string + type: array + x-kubernetes-list-type: set + masks: + description: |- + Set of flags to consider in the inspection. To inspect all flags, leave this empty. + Valid values: FIN, SYN, RST, PSH, ACK, URG, ECE, CWR. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + type: object + type: object + type: array + type: object + type: object + statefulRuleOptions: + description: A configuration block that defines stateful rule + options for the rule group. See Stateful Rule Options below + for details. + properties: + ruleOrder: + description: 'Indicates how to manage the order of the + rule evaluation for the rule group. Default value: DEFAULT_ACTION_ORDER. + Valid values: DEFAULT_ACTION_ORDER, STRICT_ORDER.' + type: string + type: object + type: object + rules: + description: The stateful rule group rules specifications in Suricata + file format, with one rule per line. Use this to import your + existing Suricata compatible rule groups. Required unless rule_group + is specified. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: 'Whether the rule group is stateless (containing + stateless rules) or stateful (containing stateful rules). Valid + values include: STATEFUL or STATELESS.' + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.capacity is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.capacity) + || (has(self.initProvider) && has(self.initProvider.capacity))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + status: + description: RuleGroupStatus defines the observed state of RuleGroup. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) that identifies the + rule group. + type: string + capacity: + description: The maximum number of operating resources that this + rule group can use. For a stateless rule group, the capacity + required is the sum of the capacity requirements of the individual + rules. For a stateful rule group, the minimum capacity required + is the number of individual rules. + type: number + description: + description: A friendly description of the rule group. + type: string + encryptionConfiguration: + description: KMS encryption configuration settings. See Encryption + Configuration below for details. + properties: + keyId: + description: The ID of the customer managed key. You can use + any of the key identifiers that KMS supports, unless you're + using a key that's managed by another account. If you're + using a key managed by another account, then specify the + key ARN. + type: string + type: + description: The type of AWS KMS key to use for encryption + of your Network Firewall resources. Valid values are CUSTOMER_KMS + and AWS_OWNED_KMS_KEY. + type: string + type: object + id: + description: The Amazon Resource Name (ARN) that identifies the + rule group. + type: string + name: + description: A friendly name of the rule group. + type: string + ruleGroup: + description: A configuration block that defines the rule group + rules. Required unless rules is specified. See Rule Group below + for details. + properties: + referenceSets: + description: A configuration block that defines the IP Set + References for the rule group. See Reference Sets below + for details. Please notes that there can only be a maximum + of 5 reference_sets in a rule_group. See the AWS documentation + for details. + properties: + ipSetReferences: + items: + properties: + ipSetReference: + description: Set of configuration blocks that define + the IP Reference information. See IP Set Reference + below for details. + items: + properties: + referenceArn: + description: Set of Managed Prefix IP ARN(s) + type: string + type: object + type: array + key: + description: An unique alphanumeric string to identify + the port_set. + type: string + type: object + type: array + type: object + ruleVariables: + description: A configuration block that defines additional + settings available to use in the rules defined in the rule + group. Can only be specified for stateful rule groups. See + Rule Variables below for details. + properties: + ipSets: + description: Set of configuration blocks that define IP + address information. See IP Sets below for details. + items: + properties: + ipSet: + description: A configuration block that defines + a set of IP addresses. See IP Set below for details. + properties: + definition: + description: Set of port ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + key: + description: An unique alphanumeric string to identify + the port_set. + type: string + type: object + type: array + portSets: + description: Set of configuration blocks that define port + range information. See Port Sets below for details. + items: + properties: + key: + description: An unique alphanumeric string to identify + the port_set. + type: string + portSet: + description: A configuration block that defines + a set of port ranges. See Port Set below for details. + properties: + definition: + description: Set of port ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: array + type: object + rulesSource: + description: A configuration block that defines the stateful + or stateless rules for the rule group. See Rules Source + below for details. + properties: + rulesSourceList: + description: A configuration block containing stateful + inspection criteria for a domain list rule group. See + Rules Source List below for details. + properties: + generatedRulesType: + description: 'String value to specify whether domains + in the target list are allowed or denied access. + Valid values: ALLOWLIST, DENYLIST.' + type: string + targetTypes: + description: 'Set of types of domain specifications + that are provided in the targets argument. Valid + values: HTTP_HOST, TLS_SNI.' + items: + type: string + type: array + x-kubernetes-list-type: set + targets: + description: Set of domains that you want to inspect + for in your traffic flows. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + rulesString: + description: The fully qualified name of a file in an + S3 bucket that contains Suricata compatible intrusion + preventions system (IPS) rules or the Suricata rules + as a string. These rules contain stateful inspection + criteria and the action to take for traffic that matches + the criteria. + type: string + statefulRule: + description: Set of configuration blocks containing stateful + inspection criteria for 5-tuple rules to be used together + in a rule group. See Stateful Rule below for details. + items: + properties: + action: + description: 'Action to take with packets in a traffic + flow when the flow matches the stateful rule criteria. + For all actions, AWS Network Firewall performs + the specified action and discontinues stateful + inspection of the traffic flow. Valid values: + ALERT, DROP, PASS, or REJECT.' + type: string + header: + description: A configuration block containing the + stateful 5-tuple inspection criteria for the rule, + used to inspect traffic flows. See Header below + for details. + properties: + destination: + description: Set of configuration blocks describing + the destination IP address and address ranges + to inspect for, in CIDR notation. If not specified, + this matches with any destination address. + See Destination below for details. + type: string + destinationPort: + description: Set of configuration blocks describing + the destination ports to inspect for. If not + specified, this matches with any destination + port. See Destination Port below for details. + type: string + direction: + description: 'The direction of traffic flow + to inspect. Valid values: ANY or FORWARD.' + type: string + protocol: + description: 'The protocol to inspect. Valid + values: IP, TCP, UDP, ICMP, HTTP, FTP, TLS, + SMB, DNS, DCERPC, SSH, SMTP, IMAP, MSN, KRB5, + IKEV2, TFTP, NTP, DHCP.' + type: string + source: + description: Set of configuration blocks describing + the source IP address and address ranges to + inspect for, in CIDR notation. If not specified, + this matches with any source address. See + Source below for details. + type: string + sourcePort: + description: Set of configuration blocks describing + the source ports to inspect for. If not specified, + this matches with any source port. See Source + Port below for details. + type: string + type: object + ruleOption: + description: Set of configuration blocks containing + additional settings for a stateful rule. See Rule + Option below for details. + items: + properties: + keyword: + description: |- + Keyword defined by open source detection systems like Snort or Suricata for stateful rule inspection. + See Snort General Rule Options or Suricata Rule Options for more details. + type: string + settings: + description: Set of strings for additional + settings to use in stateful rule inspection. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + type: array + statelessRulesAndCustomActions: + description: A configuration block containing stateless + inspection criteria for a stateless rule group. See + Stateless Rules and Custom Actions below for details. + properties: + customAction: + description: Set of configuration blocks containing + custom action definitions that are available for + use by the set of stateless rule. See Custom Action + below for details. + items: + properties: + actionDefinition: + description: A configuration block describing + the custom action associated with the action_name. + See Action Definition below for details. + properties: + publishMetricAction: + description: A configuration block describing + the stateless inspection criteria that + publishes the specified metrics to Amazon + CloudWatch for the matching packet. You + can pair this custom action with any of + the standard stateless rule actions. See + Publish Metric Action below for details. + properties: + dimension: + description: Set of configuration blocks + containing the dimension settings + to use for Amazon CloudWatch custom + metrics. See Dimension below for details. + items: + properties: + value: + description: The value to use + in the custom metric dimension. + type: string + type: object + type: array + type: object + type: object + actionName: + description: A friendly name of the custom action. + type: string + type: object + type: array + statelessRule: + description: Set of configuration blocks containing + the stateless rules for use in the stateless rule + group. See Stateless Rule below for details. + items: + properties: + priority: + description: A setting that indicates the order + in which to run this rule relative to all + of the rules that are defined for a stateless + rule group. AWS Network Firewall evaluates + the rules in a rule group starting with the + lowest priority setting. + type: number + ruleDefinition: + description: A configuration block defining + the stateless 5-tuple packet inspection criteria + and the action to take on a packet that matches + the criteria. See Rule Definition below for + details. + properties: + actions: + description: 'Set of actions to take on + a packet that matches one of the stateless + rule definition''s match_attributes. For + every rule you must specify 1 standard + action, and you can add custom actions. + Standard actions include: aws:pass, aws:drop, + aws:forward_to_sfe.' + items: + type: string + type: array + x-kubernetes-list-type: set + matchAttributes: + description: A configuration block containing + criteria for AWS Network Firewall to use + to inspect an individual packet in stateless + rule inspection. See Match Attributes + below for details. + properties: + destination: + description: Set of configuration blocks + describing the destination IP address + and address ranges to inspect for, + in CIDR notation. If not specified, + this matches with any destination + address. See Destination below for + details. + items: + properties: + addressDefinition: + description: An IP address or + a block of IP addresses in CIDR + notation. AWS Network Firewall + supports all address ranges + for IPv4. + type: string + type: object + type: array + destinationPort: + description: Set of configuration blocks + describing the destination ports to + inspect for. If not specified, this + matches with any destination port. + See Destination Port below for details. + items: + properties: + fromPort: + description: The lower limit of + the port range. This must be + less than or equal to the to_port. + type: number + toPort: + description: The upper limit of + the port range. This must be + greater than or equal to the + from_port. + type: number + type: object + type: array + protocols: + description: Set of protocols to inspect + for, specified using the protocol's + assigned internet protocol number + (IANA). If not specified, this matches + with any protocol. + items: + type: number + type: array + x-kubernetes-list-type: set + source: + description: Set of configuration blocks + describing the source IP address and + address ranges to inspect for, in + CIDR notation. If not specified, this + matches with any source address. See + Source below for details. + items: + properties: + addressDefinition: + description: An IP address or + a block of IP addresses in CIDR + notation. AWS Network Firewall + supports all address ranges + for IPv4. + type: string + type: object + type: array + sourcePort: + description: Set of configuration blocks + describing the source ports to inspect + for. If not specified, this matches + with any source port. See Source Port + below for details. + items: + properties: + fromPort: + description: The lower limit of + the port range. This must be + less than or equal to the to_port. + type: number + toPort: + description: The upper limit of + the port range. This must be + greater than or equal to the + from_port. + type: number + type: object + type: array + tcpFlag: + description: Set of configuration blocks + containing the TCP flags and masks + to inspect for. If not specified, + this matches with any settings. + items: + properties: + flags: + description: |- + Set of flags to look for in a packet. This setting can only specify values that are also specified in masks. + Valid values: FIN, SYN, RST, PSH, ACK, URG, ECE, CWR. + items: + type: string + type: array + x-kubernetes-list-type: set + masks: + description: |- + Set of flags to consider in the inspection. To inspect all flags, leave this empty. + Valid values: FIN, SYN, RST, PSH, ACK, URG, ECE, CWR. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + type: object + type: object + type: array + type: object + type: object + statefulRuleOptions: + description: A configuration block that defines stateful rule + options for the rule group. See Stateful Rule Options below + for details. + properties: + ruleOrder: + description: 'Indicates how to manage the order of the + rule evaluation for the rule group. Default value: DEFAULT_ACTION_ORDER. + Valid values: DEFAULT_ACTION_ORDER, STRICT_ORDER.' + type: string + type: object + type: object + rules: + description: The stateful rule group rules specifications in Suricata + file format, with one rule per line. Use this to import your + existing Suricata compatible rule groups. Required unless rule_group + is specified. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: + description: 'Whether the rule group is stateless (containing + stateless rules) or stateful (containing stateful rules). Valid + values include: STATEFUL or STATELESS.' + type: string + updateToken: + description: A string token used when updating the rule group. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/networkmanager.aws.upbound.io_connectattachments.yaml b/package/crds/networkmanager.aws.upbound.io_connectattachments.yaml index c816e91177..d93990468e 100644 --- a/package/crds/networkmanager.aws.upbound.io_connectattachments.yaml +++ b/package/crds/networkmanager.aws.upbound.io_connectattachments.yaml @@ -904,3 +904,883 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ConnectAttachment is the Schema for the ConnectAttachments API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ConnectAttachmentSpec defines the desired state of ConnectAttachment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + coreNetworkId: + description: The ID of a core network where you want to create + the attachment. + type: string + coreNetworkIdRef: + description: Reference to a CoreNetwork in networkmanager to populate + coreNetworkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + coreNetworkIdSelector: + description: Selector for a CoreNetwork in networkmanager to populate + coreNetworkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + edgeLocation: + description: The Region where the edge is located. + type: string + edgeLocationRef: + description: Reference to a VPCAttachment in networkmanager to + populate edgeLocation. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + edgeLocationSelector: + description: Selector for a VPCAttachment in networkmanager to + populate edgeLocation. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + options: + description: Options block. See options for more information. + properties: + protocol: + description: The protocol used for the attachment connection. + Possible values are GRE and NO_ENCAP. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + transportAttachmentId: + description: The ID of the attachment between the two connections. + type: string + transportAttachmentIdRef: + description: Reference to a VPCAttachment in networkmanager to + populate transportAttachmentId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + transportAttachmentIdSelector: + description: Selector for a VPCAttachment in networkmanager to + populate transportAttachmentId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + coreNetworkId: + description: The ID of a core network where you want to create + the attachment. + type: string + coreNetworkIdRef: + description: Reference to a CoreNetwork in networkmanager to populate + coreNetworkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + coreNetworkIdSelector: + description: Selector for a CoreNetwork in networkmanager to populate + coreNetworkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + edgeLocation: + description: The Region where the edge is located. + type: string + edgeLocationRef: + description: Reference to a VPCAttachment in networkmanager to + populate edgeLocation. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + edgeLocationSelector: + description: Selector for a VPCAttachment in networkmanager to + populate edgeLocation. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + options: + description: Options block. See options for more information. + properties: + protocol: + description: The protocol used for the attachment connection. + Possible values are GRE and NO_ENCAP. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + transportAttachmentId: + description: The ID of the attachment between the two connections. + type: string + transportAttachmentIdRef: + description: Reference to a VPCAttachment in networkmanager to + populate transportAttachmentId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + transportAttachmentIdSelector: + description: Selector for a VPCAttachment in networkmanager to + populate transportAttachmentId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.options is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.options) + || (has(self.initProvider) && has(self.initProvider.options))' + status: + description: ConnectAttachmentStatus defines the observed state of ConnectAttachment. + properties: + atProvider: + properties: + arn: + description: The ARN of the attachment. + type: string + attachmentId: + description: The ID of the attachment. + type: string + attachmentPolicyRuleNumber: + description: The policy rule number associated with the attachment. + type: number + attachmentType: + description: The type of attachment. + type: string + coreNetworkArn: + description: The ARN of a core network. + type: string + coreNetworkId: + description: The ID of a core network where you want to create + the attachment. + type: string + edgeLocation: + description: The Region where the edge is located. + type: string + id: + description: The ID of the attachment. + type: string + options: + description: Options block. See options for more information. + properties: + protocol: + description: The protocol used for the attachment connection. + Possible values are GRE and NO_ENCAP. + type: string + type: object + ownerAccountId: + description: The ID of the attachment account owner. + type: string + resourceArn: + description: The attachment resource ARN. + type: string + segmentName: + description: The name of the segment attachment. + type: string + state: + description: The state of the attachment. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + transportAttachmentId: + description: The ID of the attachment between the two connections. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/networkmanager.aws.upbound.io_devices.yaml b/package/crds/networkmanager.aws.upbound.io_devices.yaml index d1bbeb1d61..74df3d74f3 100644 --- a/package/crds/networkmanager.aws.upbound.io_devices.yaml +++ b/package/crds/networkmanager.aws.upbound.io_devices.yaml @@ -816,3 +816,789 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Device is the Schema for the Devices API. Creates a device in + a global network. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DeviceSpec defines the desired state of Device + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + awsLocation: + description: The AWS location of the device. Documented below. + properties: + subnetArn: + description: The Amazon Resource Name (ARN) of the subnet + that the device is located in. + type: string + zone: + description: The Zone that the device is located in. Specify + the ID of an Availability Zone, Local Zone, Wavelength Zone, + or an Outpost. + type: string + type: object + description: + description: A description of the device. + type: string + globalNetworkId: + description: The ID of the global network. + type: string + globalNetworkIdRef: + description: Reference to a GlobalNetwork in networkmanager to + populate globalNetworkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + globalNetworkIdSelector: + description: Selector for a GlobalNetwork in networkmanager to + populate globalNetworkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: The location of the device. Documented below. + properties: + address: + description: The physical address. + type: string + latitude: + description: The latitude. + type: string + longitude: + description: The longitude. + type: string + type: object + model: + description: The model of device. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + serialNumber: + description: The serial number of the device. + type: string + siteId: + description: The ID of the site. + type: string + siteIdRef: + description: Reference to a Site in networkmanager to populate + siteId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + siteIdSelector: + description: Selector for a Site in networkmanager to populate + siteId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: The type of device. + type: string + vendor: + description: The vendor of the device. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + awsLocation: + description: The AWS location of the device. Documented below. + properties: + subnetArn: + description: The Amazon Resource Name (ARN) of the subnet + that the device is located in. + type: string + zone: + description: The Zone that the device is located in. Specify + the ID of an Availability Zone, Local Zone, Wavelength Zone, + or an Outpost. + type: string + type: object + description: + description: A description of the device. + type: string + globalNetworkId: + description: The ID of the global network. + type: string + globalNetworkIdRef: + description: Reference to a GlobalNetwork in networkmanager to + populate globalNetworkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + globalNetworkIdSelector: + description: Selector for a GlobalNetwork in networkmanager to + populate globalNetworkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: The location of the device. Documented below. + properties: + address: + description: The physical address. + type: string + latitude: + description: The latitude. + type: string + longitude: + description: The longitude. + type: string + type: object + model: + description: The model of device. + type: string + serialNumber: + description: The serial number of the device. + type: string + siteId: + description: The ID of the site. + type: string + siteIdRef: + description: Reference to a Site in networkmanager to populate + siteId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + siteIdSelector: + description: Selector for a Site in networkmanager to populate + siteId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: The type of device. + type: string + vendor: + description: The vendor of the device. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DeviceStatus defines the observed state of Device. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) of the device. + type: string + awsLocation: + description: The AWS location of the device. Documented below. + properties: + subnetArn: + description: The Amazon Resource Name (ARN) of the subnet + that the device is located in. + type: string + zone: + description: The Zone that the device is located in. Specify + the ID of an Availability Zone, Local Zone, Wavelength Zone, + or an Outpost. + type: string + type: object + description: + description: A description of the device. + type: string + globalNetworkId: + description: The ID of the global network. + type: string + id: + type: string + location: + description: The location of the device. Documented below. + properties: + address: + description: The physical address. + type: string + latitude: + description: The latitude. + type: string + longitude: + description: The longitude. + type: string + type: object + model: + description: The model of device. + type: string + serialNumber: + description: The serial number of the device. + type: string + siteId: + description: The ID of the site. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: + description: The type of device. + type: string + vendor: + description: The vendor of the device. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/networkmanager.aws.upbound.io_links.yaml b/package/crds/networkmanager.aws.upbound.io_links.yaml index 666e6fa548..1e52dc4eb6 100644 --- a/package/crds/networkmanager.aws.upbound.io_links.yaml +++ b/package/crds/networkmanager.aws.upbound.io_links.yaml @@ -751,3 +751,730 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Link is the Schema for the Links API. Creates a link for a site. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinkSpec defines the desired state of Link + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bandwidth: + description: The upload speed and download speed in Mbps. Documented + below. + properties: + downloadSpeed: + description: Download speed in Mbps. + type: number + uploadSpeed: + description: Upload speed in Mbps. + type: number + type: object + description: + description: A description of the link. + type: string + globalNetworkId: + description: The ID of the global network. + type: string + globalNetworkIdRef: + description: Reference to a GlobalNetwork in networkmanager to + populate globalNetworkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + globalNetworkIdSelector: + description: Selector for a GlobalNetwork in networkmanager to + populate globalNetworkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + providerName: + description: The provider of the link. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + siteId: + description: The ID of the site. + type: string + siteIdRef: + description: Reference to a Site in networkmanager to populate + siteId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + siteIdSelector: + description: Selector for a Site in networkmanager to populate + siteId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: The type of the link. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bandwidth: + description: The upload speed and download speed in Mbps. Documented + below. + properties: + downloadSpeed: + description: Download speed in Mbps. + type: number + uploadSpeed: + description: Upload speed in Mbps. + type: number + type: object + description: + description: A description of the link. + type: string + globalNetworkId: + description: The ID of the global network. + type: string + globalNetworkIdRef: + description: Reference to a GlobalNetwork in networkmanager to + populate globalNetworkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + globalNetworkIdSelector: + description: Selector for a GlobalNetwork in networkmanager to + populate globalNetworkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + providerName: + description: The provider of the link. + type: string + siteId: + description: The ID of the site. + type: string + siteIdRef: + description: Reference to a Site in networkmanager to populate + siteId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + siteIdSelector: + description: Selector for a Site in networkmanager to populate + siteId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: The type of the link. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.bandwidth is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.bandwidth) + || (has(self.initProvider) && has(self.initProvider.bandwidth))' + status: + description: LinkStatus defines the observed state of Link. + properties: + atProvider: + properties: + arn: + description: Link Amazon Resource Name (ARN). + type: string + bandwidth: + description: The upload speed and download speed in Mbps. Documented + below. + properties: + downloadSpeed: + description: Download speed in Mbps. + type: number + uploadSpeed: + description: Upload speed in Mbps. + type: number + type: object + description: + description: A description of the link. + type: string + globalNetworkId: + description: The ID of the global network. + type: string + id: + type: string + providerName: + description: The provider of the link. + type: string + siteId: + description: The ID of the site. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: + description: The type of the link. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/networkmanager.aws.upbound.io_sites.yaml b/package/crds/networkmanager.aws.upbound.io_sites.yaml index fb9c3c51ad..ca6a47df8d 100644 --- a/package/crds/networkmanager.aws.upbound.io_sites.yaml +++ b/package/crds/networkmanager.aws.upbound.io_sites.yaml @@ -574,3 +574,553 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Site is the Schema for the Sites API. Creates a site in a global + network. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SiteSpec defines the desired state of Site + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the Site. + type: string + globalNetworkId: + description: The ID of the Global Network to create the site in. + type: string + globalNetworkIdRef: + description: Reference to a GlobalNetwork in networkmanager to + populate globalNetworkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + globalNetworkIdSelector: + description: Selector for a GlobalNetwork in networkmanager to + populate globalNetworkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: The site location as documented below. + properties: + address: + description: Address of the location. + type: string + latitude: + description: Latitude of the location. + type: string + longitude: + description: Longitude of the location. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the Site. + type: string + globalNetworkId: + description: The ID of the Global Network to create the site in. + type: string + globalNetworkIdRef: + description: Reference to a GlobalNetwork in networkmanager to + populate globalNetworkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + globalNetworkIdSelector: + description: Selector for a GlobalNetwork in networkmanager to + populate globalNetworkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: The site location as documented below. + properties: + address: + description: Address of the location. + type: string + latitude: + description: Latitude of the location. + type: string + longitude: + description: Longitude of the location. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SiteStatus defines the observed state of Site. + properties: + atProvider: + properties: + arn: + description: Site Amazon Resource Name (ARN) + type: string + description: + description: Description of the Site. + type: string + globalNetworkId: + description: The ID of the Global Network to create the site in. + type: string + id: + type: string + location: + description: The site location as documented below. + properties: + address: + description: Address of the location. + type: string + latitude: + description: Latitude of the location. + type: string + longitude: + description: Longitude of the location. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/networkmanager.aws.upbound.io_vpcattachments.yaml b/package/crds/networkmanager.aws.upbound.io_vpcattachments.yaml index f8eeff71a3..4bc1752917 100644 --- a/package/crds/networkmanager.aws.upbound.io_vpcattachments.yaml +++ b/package/crds/networkmanager.aws.upbound.io_vpcattachments.yaml @@ -926,3 +926,905 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VPCAttachment is the Schema for the VPCAttachments API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VPCAttachmentSpec defines the desired state of VPCAttachment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + coreNetworkId: + description: The ID of a core network for the VPC attachment. + type: string + coreNetworkIdRef: + description: Reference to a CoreNetwork in networkmanager to populate + coreNetworkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + coreNetworkIdSelector: + description: Selector for a CoreNetwork in networkmanager to populate + coreNetworkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + options: + description: Options for the VPC attachment. + properties: + applianceModeSupport: + description: |- + Indicates whether appliance mode is supported. + If enabled, traffic flow between a source and destination use the same Availability Zone for the VPC attachment for the lifetime of that flow. + If the VPC attachment is pending acceptance, changing this value will recreate the resource. + type: boolean + ipv6Support: + description: |- + Indicates whether IPv6 is supported. + If the VPC attachment is pending acceptance, changing this value will recreate the resource. + type: boolean + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + subnetArns: + description: The subnet ARN of the VPC attachment. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetArnsRefs: + description: References to Subnet in ec2 to populate subnetArns. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetArnsSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetArns. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcArn: + description: The ARN of the VPC. + type: string + vpcArnRef: + description: Reference to a VPC in ec2 to populate vpcArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcArnSelector: + description: Selector for a VPC in ec2 to populate vpcArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + coreNetworkId: + description: The ID of a core network for the VPC attachment. + type: string + coreNetworkIdRef: + description: Reference to a CoreNetwork in networkmanager to populate + coreNetworkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + coreNetworkIdSelector: + description: Selector for a CoreNetwork in networkmanager to populate + coreNetworkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + options: + description: Options for the VPC attachment. + properties: + applianceModeSupport: + description: |- + Indicates whether appliance mode is supported. + If enabled, traffic flow between a source and destination use the same Availability Zone for the VPC attachment for the lifetime of that flow. + If the VPC attachment is pending acceptance, changing this value will recreate the resource. + type: boolean + ipv6Support: + description: |- + Indicates whether IPv6 is supported. + If the VPC attachment is pending acceptance, changing this value will recreate the resource. + type: boolean + type: object + subnetArns: + description: The subnet ARN of the VPC attachment. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetArnsRefs: + description: References to Subnet in ec2 to populate subnetArns. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetArnsSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetArns. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcArn: + description: The ARN of the VPC. + type: string + vpcArnRef: + description: Reference to a VPC in ec2 to populate vpcArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcArnSelector: + description: Selector for a VPC in ec2 to populate vpcArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: VPCAttachmentStatus defines the observed state of VPCAttachment. + properties: + atProvider: + properties: + arn: + description: The ARN of the attachment. + type: string + attachmentPolicyRuleNumber: + description: The policy rule number associated with the attachment. + type: number + attachmentType: + description: The type of attachment. + type: string + coreNetworkArn: + description: The ARN of a core network. + type: string + coreNetworkId: + description: The ID of a core network for the VPC attachment. + type: string + edgeLocation: + description: The Region where the edge is located. + type: string + id: + description: The ID of the attachment. + type: string + options: + description: Options for the VPC attachment. + properties: + applianceModeSupport: + description: |- + Indicates whether appliance mode is supported. + If enabled, traffic flow between a source and destination use the same Availability Zone for the VPC attachment for the lifetime of that flow. + If the VPC attachment is pending acceptance, changing this value will recreate the resource. + type: boolean + ipv6Support: + description: |- + Indicates whether IPv6 is supported. + If the VPC attachment is pending acceptance, changing this value will recreate the resource. + type: boolean + type: object + ownerAccountId: + description: The ID of the attachment account owner. + type: string + resourceArn: + description: The attachment resource ARN. + type: string + segmentName: + description: The name of the segment attachment. + type: string + state: + description: The state of the attachment. + type: string + subnetArns: + description: The subnet ARN of the VPC attachment. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + vpcArn: + description: The ARN of the VPC. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/opensearch.aws.upbound.io_domains.yaml b/package/crds/opensearch.aws.upbound.io_domains.yaml index 7d05983cbb..f68bdc2d32 100644 --- a/package/crds/opensearch.aws.upbound.io_domains.yaml +++ b/package/crds/opensearch.aws.upbound.io_domains.yaml @@ -2207,3 +2207,1607 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Domain is the Schema for the Domains API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DomainSpec defines the desired state of Domain + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + advancedOptions: + additionalProperties: + type: string + description: Key-value string pairs to specify advanced configuration + options. + type: object + x-kubernetes-map-type: granular + advancedSecurityOptions: + description: Configuration block for fine-grained access control. + Detailed below. + properties: + anonymousAuthEnabled: + description: Whether Anonymous auth is enabled. Enables fine-grained + access control on an existing domain. Ignored unless advanced_security_options + are enabled. Can only be enabled on an existing domain. + type: boolean + enabled: + description: Whether advanced security is enabled. + type: boolean + internalUserDatabaseEnabled: + description: Whether the internal user database is enabled. + Default is false. + type: boolean + masterUserOptions: + description: Configuration block for the main user. Detailed + below. + properties: + masterUserArn: + description: ARN for the main user. Only specify if internal_user_database_enabled + is not set or set to false. + type: string + masterUserName: + description: Main user's username, which is stored in + the Amazon OpenSearch Service domain's internal database. + Only specify if internal_user_database_enabled is set + to true. + type: string + masterUserPasswordSecretRef: + description: Main user's password, which is stored in + the Amazon OpenSearch Service domain's internal database. + Only specify if internal_user_database_enabled is set + to true. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: object + autoTuneOptions: + description: Configuration block for the Auto-Tune options of + the domain. Detailed below. + properties: + desiredState: + description: 'Auto-Tune desired state for the domain. Valid + values: ENABLED or DISABLED.' + type: string + maintenanceSchedule: + description: Configuration block for Auto-Tune maintenance + windows. Can be specified multiple times for each maintenance + window. Detailed below. + items: + properties: + cronExpressionForRecurrence: + description: A cron expression specifying the recurrence + pattern for an Auto-Tune maintenance schedule. + type: string + duration: + description: Configuration block for the duration of + the Auto-Tune maintenance window. Detailed below. + properties: + unit: + description: 'Unit of time specifying the duration + of an Auto-Tune maintenance window. Valid values: + HOURS.' + type: string + value: + description: An integer specifying the value of + the duration of an Auto-Tune maintenance window. + type: number + type: object + startAt: + description: Date and time at which to start the Auto-Tune + maintenance schedule in RFC3339 format. + type: string + type: object + type: array + rollbackOnDisable: + description: 'Whether to roll back to default Auto-Tune settings + when disabling Auto-Tune. Valid values: DEFAULT_ROLLBACK + or NO_ROLLBACK.' + type: string + useOffPeakWindow: + description: Whether to schedule Auto-Tune optimizations that + require blue/green deployments during the domain's configured + daily off-peak window. Defaults to false. + type: boolean + type: object + clusterConfig: + description: Configuration block for the cluster of the domain. + Detailed below. + properties: + coldStorageOptions: + description: Configuration block containing cold storage configuration. + Detailed below. + properties: + enabled: + description: Boolean to enable cold storage for an OpenSearch + domain. Defaults to false. Master and ultrawarm nodes + must be enabled for cold storage. + type: boolean + type: object + dedicatedMasterCount: + description: Number of dedicated main nodes in the cluster. + type: number + dedicatedMasterEnabled: + description: Whether dedicated main nodes are enabled for + the cluster. + type: boolean + dedicatedMasterType: + description: Instance type of the dedicated main nodes in + the cluster. + type: string + instanceCount: + description: Number of instances in the cluster. + type: number + instanceType: + description: Instance type of data nodes in the cluster. + type: string + multiAzWithStandbyEnabled: + description: Whether a multi-AZ domain is turned on with a + standby AZ. For more information, see Configuring a multi-AZ + domain in Amazon OpenSearch Service. + type: boolean + warmCount: + description: Number of warm nodes in the cluster. Valid values + are between 2 and 150. warm_count can be only and must be + set when warm_enabled is set to true. + type: number + warmEnabled: + description: Whether to enable warm storage. + type: boolean + warmType: + description: Instance type for the OpenSearch cluster's warm + nodes. Valid values are ultrawarm1.medium.search, ultrawarm1.large.search + and ultrawarm1.xlarge.search. warm_type can be only and + must be set when warm_enabled is set to true. + type: string + zoneAwarenessConfig: + description: Configuration block containing zone awareness + settings. Detailed below. + properties: + availabilityZoneCount: + description: 'Number of Availability Zones for the domain + to use with zone_awareness_enabled. Defaults to 2. Valid + values: 2 or 3.' + type: number + type: object + zoneAwarenessEnabled: + description: Whether zone awareness is enabled, set to true + for multi-az deployment. To enable awareness with three + Availability Zones, the availability_zone_count within the + zone_awareness_config must be set to 3. + type: boolean + type: object + cognitoOptions: + description: Configuration block for authenticating dashboard + with Cognito. Detailed below. + properties: + enabled: + description: Whether Amazon Cognito authentication with Dashboard + is enabled or not. Default is false. + type: boolean + identityPoolId: + description: ID of the Cognito Identity Pool to use. + type: string + roleArn: + description: ARN of the IAM role that has the AmazonOpenSearchServiceCognitoAccess + policy attached. + type: string + userPoolId: + description: ID of the Cognito User Pool to use. + type: string + type: object + domainEndpointOptions: + description: Configuration block for domain endpoint HTTP(S) related + options. Detailed below. + properties: + customEndpoint: + description: Fully qualified domain for your custom endpoint. + type: string + customEndpointCertificateArn: + description: ACM certificate ARN for your custom endpoint. + type: string + customEndpointEnabled: + description: Whether to enable custom endpoint for the OpenSearch + domain. + type: boolean + enforceHttps: + description: Whether or not to require HTTPS. Defaults to + true. + type: boolean + tlsSecurityPolicy: + description: Name of the TLS security policy that needs to + be applied to the HTTPS endpoint. For valid values, refer + to the AWS documentation. + type: string + type: object + domainName: + description: Name of the domain. + type: string + ebsOptions: + description: Configuration block for EBS related options, may + be required based on chosen instance size. Detailed below. + properties: + ebsEnabled: + description: Whether EBS volumes are attached to data nodes + in the domain. + type: boolean + iops: + description: Baseline input/output (I/O) performance of EBS + volumes attached to data nodes. Applicable only for the + GP3 and Provisioned IOPS EBS volume types. + type: number + throughput: + description: Specifies the throughput (in MiB/s) of the EBS + volumes attached to data nodes. Applicable only for the + gp3 volume type. + type: number + volumeSize: + description: Size of EBS volumes attached to data nodes (in + GiB). + type: number + volumeType: + description: Type of EBS volumes attached to data nodes. + type: string + type: object + encryptAtRest: + description: Configuration block for encrypt at rest options. + Only available for certain instance types. Detailed below. + properties: + enabled: + description: Whether to enable encryption at rest. If the + encrypt_at_rest block is not provided then this defaults + to false. Enabling encryption on new domains requires an + engine_version of OpenSearch_X.Y or Elasticsearch_5.1 or + greater. + type: boolean + kmsKeyId: + description: KMS key ARN to encrypt the Elasticsearch domain + with. If not specified then it defaults to using the aws/es + service KMS key. Note that KMS will accept a KMS key ID + but will return the key ARN. + type: string + type: object + engineVersion: + description: while Elasticsearch has elasticsearch_version + type: string + logPublishingOptions: + description: Configuration block for publishing slow and application + logs to CloudWatch Logs. This block can be declared multiple + times, for each log_type, within the same resource. Detailed + below. + items: + properties: + cloudwatchLogGroupArn: + description: ARN of the Cloudwatch log group to which log + needs to be published. + type: string + cloudwatchLogGroupArnRef: + description: Reference to a Group in cloudwatchlogs to populate + cloudwatchLogGroupArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cloudwatchLogGroupArnSelector: + description: Selector for a Group in cloudwatchlogs to populate + cloudwatchLogGroupArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: Whether given log publishing option is enabled + or not. + type: boolean + logType: + description: 'Type of OpenSearch log. Valid values: INDEX_SLOW_LOGS, + SEARCH_SLOW_LOGS, ES_APPLICATION_LOGS, AUDIT_LOGS.' + type: string + type: object + type: array + nodeToNodeEncryption: + description: Configuration block for node-to-node encryption options. + Detailed below. + properties: + enabled: + description: Whether to enable node-to-node encryption. If + the node_to_node_encryption block is not provided then this + defaults to false. Enabling node-to-node encryption of a + new domain requires an engine_version of OpenSearch_X.Y + or Elasticsearch_6.0 or greater. + type: boolean + type: object + offPeakWindowOptions: + description: Configuration to add Off Peak update options. (documentation). + Detailed below. + properties: + enabled: + description: Enabled disabled toggle for off-peak update window. + type: boolean + offPeakWindow: + properties: + windowStartTime: + description: 10h window for updates + properties: + hours: + description: Starting hour of the 10-hour window for + updates + type: number + minutes: + description: Starting minute of the 10-hour window + for updates + type: number + type: object + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + snapshotOptions: + description: Configuration block for snapshot related options. + Detailed below. DEPRECATED. For domains running OpenSearch 5.3 + and later, Amazon OpenSearch takes hourly automated snapshots, + making this setting irrelevant. For domains running earlier + versions, OpenSearch takes daily automated snapshots. + properties: + automatedSnapshotStartHour: + description: Hour during which the service takes an automated + daily snapshot of the indices in the domain. + type: number + type: object + softwareUpdateOptions: + description: Software update options for the domain. Detailed + below. + properties: + autoSoftwareUpdateEnabled: + description: Whether automatic service software updates are + enabled for the domain. Defaults to false. + type: boolean + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcOptions: + description: Configuration block for VPC related options. Adding + or removing this configuration forces a new resource (documentation). + Detailed below. + properties: + securityGroupIds: + description: List of VPC Security Group IDs to be applied + to the OpenSearch domain endpoints. If omitted, the default + Security Group for the VPC will be used. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: List of VPC Subnet IDs for the OpenSearch domain + endpoints to be created in. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + advancedOptions: + additionalProperties: + type: string + description: Key-value string pairs to specify advanced configuration + options. + type: object + x-kubernetes-map-type: granular + advancedSecurityOptions: + description: Configuration block for fine-grained access control. + Detailed below. + properties: + anonymousAuthEnabled: + description: Whether Anonymous auth is enabled. Enables fine-grained + access control on an existing domain. Ignored unless advanced_security_options + are enabled. Can only be enabled on an existing domain. + type: boolean + enabled: + description: Whether advanced security is enabled. + type: boolean + internalUserDatabaseEnabled: + description: Whether the internal user database is enabled. + Default is false. + type: boolean + masterUserOptions: + description: Configuration block for the main user. Detailed + below. + properties: + masterUserArn: + description: ARN for the main user. Only specify if internal_user_database_enabled + is not set or set to false. + type: string + masterUserName: + description: Main user's username, which is stored in + the Amazon OpenSearch Service domain's internal database. + Only specify if internal_user_database_enabled is set + to true. + type: string + masterUserPasswordSecretRef: + description: Main user's password, which is stored in + the Amazon OpenSearch Service domain's internal database. + Only specify if internal_user_database_enabled is set + to true. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: object + autoTuneOptions: + description: Configuration block for the Auto-Tune options of + the domain. Detailed below. + properties: + desiredState: + description: 'Auto-Tune desired state for the domain. Valid + values: ENABLED or DISABLED.' + type: string + maintenanceSchedule: + description: Configuration block for Auto-Tune maintenance + windows. Can be specified multiple times for each maintenance + window. Detailed below. + items: + properties: + cronExpressionForRecurrence: + description: A cron expression specifying the recurrence + pattern for an Auto-Tune maintenance schedule. + type: string + duration: + description: Configuration block for the duration of + the Auto-Tune maintenance window. Detailed below. + properties: + unit: + description: 'Unit of time specifying the duration + of an Auto-Tune maintenance window. Valid values: + HOURS.' + type: string + value: + description: An integer specifying the value of + the duration of an Auto-Tune maintenance window. + type: number + type: object + startAt: + description: Date and time at which to start the Auto-Tune + maintenance schedule in RFC3339 format. + type: string + type: object + type: array + rollbackOnDisable: + description: 'Whether to roll back to default Auto-Tune settings + when disabling Auto-Tune. Valid values: DEFAULT_ROLLBACK + or NO_ROLLBACK.' + type: string + useOffPeakWindow: + description: Whether to schedule Auto-Tune optimizations that + require blue/green deployments during the domain's configured + daily off-peak window. Defaults to false. + type: boolean + type: object + clusterConfig: + description: Configuration block for the cluster of the domain. + Detailed below. + properties: + coldStorageOptions: + description: Configuration block containing cold storage configuration. + Detailed below. + properties: + enabled: + description: Boolean to enable cold storage for an OpenSearch + domain. Defaults to false. Master and ultrawarm nodes + must be enabled for cold storage. + type: boolean + type: object + dedicatedMasterCount: + description: Number of dedicated main nodes in the cluster. + type: number + dedicatedMasterEnabled: + description: Whether dedicated main nodes are enabled for + the cluster. + type: boolean + dedicatedMasterType: + description: Instance type of the dedicated main nodes in + the cluster. + type: string + instanceCount: + description: Number of instances in the cluster. + type: number + instanceType: + description: Instance type of data nodes in the cluster. + type: string + multiAzWithStandbyEnabled: + description: Whether a multi-AZ domain is turned on with a + standby AZ. For more information, see Configuring a multi-AZ + domain in Amazon OpenSearch Service. + type: boolean + warmCount: + description: Number of warm nodes in the cluster. Valid values + are between 2 and 150. warm_count can be only and must be + set when warm_enabled is set to true. + type: number + warmEnabled: + description: Whether to enable warm storage. + type: boolean + warmType: + description: Instance type for the OpenSearch cluster's warm + nodes. Valid values are ultrawarm1.medium.search, ultrawarm1.large.search + and ultrawarm1.xlarge.search. warm_type can be only and + must be set when warm_enabled is set to true. + type: string + zoneAwarenessConfig: + description: Configuration block containing zone awareness + settings. Detailed below. + properties: + availabilityZoneCount: + description: 'Number of Availability Zones for the domain + to use with zone_awareness_enabled. Defaults to 2. Valid + values: 2 or 3.' + type: number + type: object + zoneAwarenessEnabled: + description: Whether zone awareness is enabled, set to true + for multi-az deployment. To enable awareness with three + Availability Zones, the availability_zone_count within the + zone_awareness_config must be set to 3. + type: boolean + type: object + cognitoOptions: + description: Configuration block for authenticating dashboard + with Cognito. Detailed below. + properties: + enabled: + description: Whether Amazon Cognito authentication with Dashboard + is enabled or not. Default is false. + type: boolean + identityPoolId: + description: ID of the Cognito Identity Pool to use. + type: string + roleArn: + description: ARN of the IAM role that has the AmazonOpenSearchServiceCognitoAccess + policy attached. + type: string + userPoolId: + description: ID of the Cognito User Pool to use. + type: string + type: object + domainEndpointOptions: + description: Configuration block for domain endpoint HTTP(S) related + options. Detailed below. + properties: + customEndpoint: + description: Fully qualified domain for your custom endpoint. + type: string + customEndpointCertificateArn: + description: ACM certificate ARN for your custom endpoint. + type: string + customEndpointEnabled: + description: Whether to enable custom endpoint for the OpenSearch + domain. + type: boolean + enforceHttps: + description: Whether or not to require HTTPS. Defaults to + true. + type: boolean + tlsSecurityPolicy: + description: Name of the TLS security policy that needs to + be applied to the HTTPS endpoint. For valid values, refer + to the AWS documentation. + type: string + type: object + domainName: + description: Name of the domain. + type: string + ebsOptions: + description: Configuration block for EBS related options, may + be required based on chosen instance size. Detailed below. + properties: + ebsEnabled: + description: Whether EBS volumes are attached to data nodes + in the domain. + type: boolean + iops: + description: Baseline input/output (I/O) performance of EBS + volumes attached to data nodes. Applicable only for the + GP3 and Provisioned IOPS EBS volume types. + type: number + throughput: + description: Specifies the throughput (in MiB/s) of the EBS + volumes attached to data nodes. Applicable only for the + gp3 volume type. + type: number + volumeSize: + description: Size of EBS volumes attached to data nodes (in + GiB). + type: number + volumeType: + description: Type of EBS volumes attached to data nodes. + type: string + type: object + encryptAtRest: + description: Configuration block for encrypt at rest options. + Only available for certain instance types. Detailed below. + properties: + enabled: + description: Whether to enable encryption at rest. If the + encrypt_at_rest block is not provided then this defaults + to false. Enabling encryption on new domains requires an + engine_version of OpenSearch_X.Y or Elasticsearch_5.1 or + greater. + type: boolean + kmsKeyId: + description: KMS key ARN to encrypt the Elasticsearch domain + with. If not specified then it defaults to using the aws/es + service KMS key. Note that KMS will accept a KMS key ID + but will return the key ARN. + type: string + type: object + engineVersion: + description: while Elasticsearch has elasticsearch_version + type: string + logPublishingOptions: + description: Configuration block for publishing slow and application + logs to CloudWatch Logs. This block can be declared multiple + times, for each log_type, within the same resource. Detailed + below. + items: + properties: + cloudwatchLogGroupArn: + description: ARN of the Cloudwatch log group to which log + needs to be published. + type: string + cloudwatchLogGroupArnRef: + description: Reference to a Group in cloudwatchlogs to populate + cloudwatchLogGroupArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cloudwatchLogGroupArnSelector: + description: Selector for a Group in cloudwatchlogs to populate + cloudwatchLogGroupArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: Whether given log publishing option is enabled + or not. + type: boolean + logType: + description: 'Type of OpenSearch log. Valid values: INDEX_SLOW_LOGS, + SEARCH_SLOW_LOGS, ES_APPLICATION_LOGS, AUDIT_LOGS.' + type: string + type: object + type: array + nodeToNodeEncryption: + description: Configuration block for node-to-node encryption options. + Detailed below. + properties: + enabled: + description: Whether to enable node-to-node encryption. If + the node_to_node_encryption block is not provided then this + defaults to false. Enabling node-to-node encryption of a + new domain requires an engine_version of OpenSearch_X.Y + or Elasticsearch_6.0 or greater. + type: boolean + type: object + offPeakWindowOptions: + description: Configuration to add Off Peak update options. (documentation). + Detailed below. + properties: + enabled: + description: Enabled disabled toggle for off-peak update window. + type: boolean + offPeakWindow: + properties: + windowStartTime: + description: 10h window for updates + properties: + hours: + description: Starting hour of the 10-hour window for + updates + type: number + minutes: + description: Starting minute of the 10-hour window + for updates + type: number + type: object + type: object + type: object + snapshotOptions: + description: Configuration block for snapshot related options. + Detailed below. DEPRECATED. For domains running OpenSearch 5.3 + and later, Amazon OpenSearch takes hourly automated snapshots, + making this setting irrelevant. For domains running earlier + versions, OpenSearch takes daily automated snapshots. + properties: + automatedSnapshotStartHour: + description: Hour during which the service takes an automated + daily snapshot of the indices in the domain. + type: number + type: object + softwareUpdateOptions: + description: Software update options for the domain. Detailed + below. + properties: + autoSoftwareUpdateEnabled: + description: Whether automatic service software updates are + enabled for the domain. Defaults to false. + type: boolean + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcOptions: + description: Configuration block for VPC related options. Adding + or removing this configuration forces a new resource (documentation). + Detailed below. + properties: + securityGroupIds: + description: List of VPC Security Group IDs to be applied + to the OpenSearch domain endpoints. If omitted, the default + Security Group for the VPC will be used. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: List of VPC Subnet IDs for the OpenSearch domain + endpoints to be created in. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.domainName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.domainName) + || (has(self.initProvider) && has(self.initProvider.domainName))' + status: + description: DomainStatus defines the observed state of Domain. + properties: + atProvider: + properties: + accessPolicies: + description: ', are prefaced with es: for both.' + type: string + advancedOptions: + additionalProperties: + type: string + description: Key-value string pairs to specify advanced configuration + options. + type: object + x-kubernetes-map-type: granular + advancedSecurityOptions: + description: Configuration block for fine-grained access control. + Detailed below. + properties: + anonymousAuthEnabled: + description: Whether Anonymous auth is enabled. Enables fine-grained + access control on an existing domain. Ignored unless advanced_security_options + are enabled. Can only be enabled on an existing domain. + type: boolean + enabled: + description: Whether advanced security is enabled. + type: boolean + internalUserDatabaseEnabled: + description: Whether the internal user database is enabled. + Default is false. + type: boolean + masterUserOptions: + description: Configuration block for the main user. Detailed + below. + properties: + masterUserArn: + description: ARN for the main user. Only specify if internal_user_database_enabled + is not set or set to false. + type: string + masterUserName: + description: Main user's username, which is stored in + the Amazon OpenSearch Service domain's internal database. + Only specify if internal_user_database_enabled is set + to true. + type: string + type: object + type: object + arn: + description: ARN of the domain. + type: string + autoTuneOptions: + description: Configuration block for the Auto-Tune options of + the domain. Detailed below. + properties: + desiredState: + description: 'Auto-Tune desired state for the domain. Valid + values: ENABLED or DISABLED.' + type: string + maintenanceSchedule: + description: Configuration block for Auto-Tune maintenance + windows. Can be specified multiple times for each maintenance + window. Detailed below. + items: + properties: + cronExpressionForRecurrence: + description: A cron expression specifying the recurrence + pattern for an Auto-Tune maintenance schedule. + type: string + duration: + description: Configuration block for the duration of + the Auto-Tune maintenance window. Detailed below. + properties: + unit: + description: 'Unit of time specifying the duration + of an Auto-Tune maintenance window. Valid values: + HOURS.' + type: string + value: + description: An integer specifying the value of + the duration of an Auto-Tune maintenance window. + type: number + type: object + startAt: + description: Date and time at which to start the Auto-Tune + maintenance schedule in RFC3339 format. + type: string + type: object + type: array + rollbackOnDisable: + description: 'Whether to roll back to default Auto-Tune settings + when disabling Auto-Tune. Valid values: DEFAULT_ROLLBACK + or NO_ROLLBACK.' + type: string + useOffPeakWindow: + description: Whether to schedule Auto-Tune optimizations that + require blue/green deployments during the domain's configured + daily off-peak window. Defaults to false. + type: boolean + type: object + clusterConfig: + description: Configuration block for the cluster of the domain. + Detailed below. + properties: + coldStorageOptions: + description: Configuration block containing cold storage configuration. + Detailed below. + properties: + enabled: + description: Boolean to enable cold storage for an OpenSearch + domain. Defaults to false. Master and ultrawarm nodes + must be enabled for cold storage. + type: boolean + type: object + dedicatedMasterCount: + description: Number of dedicated main nodes in the cluster. + type: number + dedicatedMasterEnabled: + description: Whether dedicated main nodes are enabled for + the cluster. + type: boolean + dedicatedMasterType: + description: Instance type of the dedicated main nodes in + the cluster. + type: string + instanceCount: + description: Number of instances in the cluster. + type: number + instanceType: + description: Instance type of data nodes in the cluster. + type: string + multiAzWithStandbyEnabled: + description: Whether a multi-AZ domain is turned on with a + standby AZ. For more information, see Configuring a multi-AZ + domain in Amazon OpenSearch Service. + type: boolean + warmCount: + description: Number of warm nodes in the cluster. Valid values + are between 2 and 150. warm_count can be only and must be + set when warm_enabled is set to true. + type: number + warmEnabled: + description: Whether to enable warm storage. + type: boolean + warmType: + description: Instance type for the OpenSearch cluster's warm + nodes. Valid values are ultrawarm1.medium.search, ultrawarm1.large.search + and ultrawarm1.xlarge.search. warm_type can be only and + must be set when warm_enabled is set to true. + type: string + zoneAwarenessConfig: + description: Configuration block containing zone awareness + settings. Detailed below. + properties: + availabilityZoneCount: + description: 'Number of Availability Zones for the domain + to use with zone_awareness_enabled. Defaults to 2. Valid + values: 2 or 3.' + type: number + type: object + zoneAwarenessEnabled: + description: Whether zone awareness is enabled, set to true + for multi-az deployment. To enable awareness with three + Availability Zones, the availability_zone_count within the + zone_awareness_config must be set to 3. + type: boolean + type: object + cognitoOptions: + description: Configuration block for authenticating dashboard + with Cognito. Detailed below. + properties: + enabled: + description: Whether Amazon Cognito authentication with Dashboard + is enabled or not. Default is false. + type: boolean + identityPoolId: + description: ID of the Cognito Identity Pool to use. + type: string + roleArn: + description: ARN of the IAM role that has the AmazonOpenSearchServiceCognitoAccess + policy attached. + type: string + userPoolId: + description: ID of the Cognito User Pool to use. + type: string + type: object + dashboardEndpoint: + description: Domain-specific endpoint for Dashboard without https + scheme. + type: string + domainEndpointOptions: + description: Configuration block for domain endpoint HTTP(S) related + options. Detailed below. + properties: + customEndpoint: + description: Fully qualified domain for your custom endpoint. + type: string + customEndpointCertificateArn: + description: ACM certificate ARN for your custom endpoint. + type: string + customEndpointEnabled: + description: Whether to enable custom endpoint for the OpenSearch + domain. + type: boolean + enforceHttps: + description: Whether or not to require HTTPS. Defaults to + true. + type: boolean + tlsSecurityPolicy: + description: Name of the TLS security policy that needs to + be applied to the HTTPS endpoint. For valid values, refer + to the AWS documentation. + type: string + type: object + domainId: + description: Unique identifier for the domain. + type: string + domainName: + description: Name of the domain. + type: string + ebsOptions: + description: Configuration block for EBS related options, may + be required based on chosen instance size. Detailed below. + properties: + ebsEnabled: + description: Whether EBS volumes are attached to data nodes + in the domain. + type: boolean + iops: + description: Baseline input/output (I/O) performance of EBS + volumes attached to data nodes. Applicable only for the + GP3 and Provisioned IOPS EBS volume types. + type: number + throughput: + description: Specifies the throughput (in MiB/s) of the EBS + volumes attached to data nodes. Applicable only for the + gp3 volume type. + type: number + volumeSize: + description: Size of EBS volumes attached to data nodes (in + GiB). + type: number + volumeType: + description: Type of EBS volumes attached to data nodes. + type: string + type: object + encryptAtRest: + description: Configuration block for encrypt at rest options. + Only available for certain instance types. Detailed below. + properties: + enabled: + description: Whether to enable encryption at rest. If the + encrypt_at_rest block is not provided then this defaults + to false. Enabling encryption on new domains requires an + engine_version of OpenSearch_X.Y or Elasticsearch_5.1 or + greater. + type: boolean + kmsKeyId: + description: KMS key ARN to encrypt the Elasticsearch domain + with. If not specified then it defaults to using the aws/es + service KMS key. Note that KMS will accept a KMS key ID + but will return the key ARN. + type: string + type: object + endpoint: + description: Domain-specific endpoint used to submit index, search, + and data upload requests. + type: string + engineVersion: + description: while Elasticsearch has elasticsearch_version + type: string + id: + type: string + kibanaEndpoint: + description: (Deprecated) Domain-specific endpoint for kibana + without https scheme. Use the dashboard_endpoint attribute instead. + type: string + logPublishingOptions: + description: Configuration block for publishing slow and application + logs to CloudWatch Logs. This block can be declared multiple + times, for each log_type, within the same resource. Detailed + below. + items: + properties: + cloudwatchLogGroupArn: + description: ARN of the Cloudwatch log group to which log + needs to be published. + type: string + enabled: + description: Whether given log publishing option is enabled + or not. + type: boolean + logType: + description: 'Type of OpenSearch log. Valid values: INDEX_SLOW_LOGS, + SEARCH_SLOW_LOGS, ES_APPLICATION_LOGS, AUDIT_LOGS.' + type: string + type: object + type: array + nodeToNodeEncryption: + description: Configuration block for node-to-node encryption options. + Detailed below. + properties: + enabled: + description: Whether to enable node-to-node encryption. If + the node_to_node_encryption block is not provided then this + defaults to false. Enabling node-to-node encryption of a + new domain requires an engine_version of OpenSearch_X.Y + or Elasticsearch_6.0 or greater. + type: boolean + type: object + offPeakWindowOptions: + description: Configuration to add Off Peak update options. (documentation). + Detailed below. + properties: + enabled: + description: Enabled disabled toggle for off-peak update window. + type: boolean + offPeakWindow: + properties: + windowStartTime: + description: 10h window for updates + properties: + hours: + description: Starting hour of the 10-hour window for + updates + type: number + minutes: + description: Starting minute of the 10-hour window + for updates + type: number + type: object + type: object + type: object + snapshotOptions: + description: Configuration block for snapshot related options. + Detailed below. DEPRECATED. For domains running OpenSearch 5.3 + and later, Amazon OpenSearch takes hourly automated snapshots, + making this setting irrelevant. For domains running earlier + versions, OpenSearch takes daily automated snapshots. + properties: + automatedSnapshotStartHour: + description: Hour during which the service takes an automated + daily snapshot of the indices in the domain. + type: number + type: object + softwareUpdateOptions: + description: Software update options for the domain. Detailed + below. + properties: + autoSoftwareUpdateEnabled: + description: Whether automatic service software updates are + enabled for the domain. Defaults to false. + type: boolean + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + vpcOptions: + description: Configuration block for VPC related options. Adding + or removing this configuration forces a new resource (documentation). + Detailed below. + properties: + availabilityZones: + description: If the domain was created inside a VPC, the names + of the availability zones the configured subnet_ids were + created inside. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIds: + description: List of VPC Security Group IDs to be applied + to the OpenSearch domain endpoints. If omitted, the default + Security Group for the VPC will be used. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: List of VPC Subnet IDs for the OpenSearch domain + endpoints to be created in. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcId: + description: If the domain was created inside a VPC, the ID + of the VPC. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/opensearch.aws.upbound.io_domainsamloptions.yaml b/package/crds/opensearch.aws.upbound.io_domainsamloptions.yaml index 53667ddc4f..b3f5eb9129 100644 --- a/package/crds/opensearch.aws.upbound.io_domainsamloptions.yaml +++ b/package/crds/opensearch.aws.upbound.io_domainsamloptions.yaml @@ -649,3 +649,617 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DomainSAMLOptions is the Schema for the DomainSAMLOptionss API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DomainSAMLOptionsSpec defines the desired state of DomainSAMLOptions + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + domainName: + description: Name of the domain. + type: string + domainNameRef: + description: Reference to a Domain in opensearch to populate domainName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + domainNameSelector: + description: Selector for a Domain in opensearch to populate domainName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + samlOptions: + description: SAML authentication options for an AWS OpenSearch + Domain. + properties: + enabled: + description: Whether SAML authentication is enabled. + type: boolean + idp: + description: Information from your identity provider. + properties: + entityId: + description: Unique Entity ID of the application in SAML + Identity Provider. + type: string + metadataContent: + description: Metadata of the SAML application in xml format. + type: string + type: object + masterBackendRole: + description: This backend role from the SAML IdP receives + full permissions to the cluster, equivalent to a new master + user. + type: string + masterUserNameSecretRef: + description: This username from the SAML IdP receives full + permissions to the cluster, equivalent to a new master user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + rolesKey: + description: Element of the SAML assertion to use for backend + roles. Default is roles. + type: string + sessionTimeoutMinutes: + description: Duration of a session in minutes after a user + logs in. Default is 60. Maximum value is 1,440. + type: number + subjectKey: + description: Element of the SAML assertion to use for username. + Default is NameID. + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + domainName: + description: Name of the domain. + type: string + domainNameRef: + description: Reference to a Domain in opensearch to populate domainName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + domainNameSelector: + description: Selector for a Domain in opensearch to populate domainName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + samlOptions: + description: SAML authentication options for an AWS OpenSearch + Domain. + properties: + enabled: + description: Whether SAML authentication is enabled. + type: boolean + idp: + description: Information from your identity provider. + properties: + entityId: + description: Unique Entity ID of the application in SAML + Identity Provider. + type: string + metadataContent: + description: Metadata of the SAML application in xml format. + type: string + type: object + masterBackendRole: + description: This backend role from the SAML IdP receives + full permissions to the cluster, equivalent to a new master + user. + type: string + masterUserNameSecretRef: + description: This username from the SAML IdP receives full + permissions to the cluster, equivalent to a new master user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + rolesKey: + description: Element of the SAML assertion to use for backend + roles. Default is roles. + type: string + sessionTimeoutMinutes: + description: Duration of a session in minutes after a user + logs in. Default is 60. Maximum value is 1,440. + type: number + subjectKey: + description: Element of the SAML assertion to use for username. + Default is NameID. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DomainSAMLOptionsStatus defines the observed state of DomainSAMLOptions. + properties: + atProvider: + properties: + domainName: + description: Name of the domain. + type: string + id: + description: Name of the domain the SAML options are associated + with. + type: string + samlOptions: + description: SAML authentication options for an AWS OpenSearch + Domain. + properties: + enabled: + description: Whether SAML authentication is enabled. + type: boolean + idp: + description: Information from your identity provider. + properties: + entityId: + description: Unique Entity ID of the application in SAML + Identity Provider. + type: string + metadataContent: + description: Metadata of the SAML application in xml format. + type: string + type: object + masterBackendRole: + description: This backend role from the SAML IdP receives + full permissions to the cluster, equivalent to a new master + user. + type: string + rolesKey: + description: Element of the SAML assertion to use for backend + roles. Default is roles. + type: string + sessionTimeoutMinutes: + description: Duration of a session in minutes after a user + logs in. Default is 60. Maximum value is 1,440. + type: number + subjectKey: + description: Element of the SAML assertion to use for username. + Default is NameID. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/opensearchserverless.aws.upbound.io_securityconfigs.yaml b/package/crds/opensearchserverless.aws.upbound.io_securityconfigs.yaml index f9b8e404a6..eea308057d 100644 --- a/package/crds/opensearchserverless.aws.upbound.io_securityconfigs.yaml +++ b/package/crds/opensearchserverless.aws.upbound.io_securityconfigs.yaml @@ -407,3 +407,392 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SecurityConfig is the Schema for the SecurityConfigs API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SecurityConfigSpec defines the desired state of SecurityConfig + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the security configuration. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + samlOptions: + description: Configuration block for SAML options. + properties: + groupAttribute: + description: Group attribute for this SAML integration. + type: string + metadata: + description: The XML IdP metadata file generated from your + identity provider. + type: string + sessionTimeout: + description: Session timeout, in minutes. Minimum is 5 minutes + and maximum is 720 minutes (12 hours). Default is 60 minutes. + type: number + userAttribute: + description: User attribute for this SAML integration. + type: string + type: object + type: + description: Type of configuration. Must be saml. + type: string + required: + - region + - type + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the security configuration. + type: string + samlOptions: + description: Configuration block for SAML options. + properties: + groupAttribute: + description: Group attribute for this SAML integration. + type: string + metadata: + description: The XML IdP metadata file generated from your + identity provider. + type: string + sessionTimeout: + description: Session timeout, in minutes. Minimum is 5 minutes + and maximum is 720 minutes (12 hours). Default is 60 minutes. + type: number + userAttribute: + description: User attribute for this SAML integration. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.samlOptions is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.samlOptions) + || (has(self.initProvider) && has(self.initProvider.samlOptions))' + status: + description: SecurityConfigStatus defines the observed state of SecurityConfig. + properties: + atProvider: + properties: + configVersion: + description: Version of the configuration. + type: string + description: + description: Description of the security configuration. + type: string + id: + type: string + samlOptions: + description: Configuration block for SAML options. + properties: + groupAttribute: + description: Group attribute for this SAML integration. + type: string + metadata: + description: The XML IdP metadata file generated from your + identity provider. + type: string + sessionTimeout: + description: Session timeout, in minutes. Minimum is 5 minutes + and maximum is 720 minutes (12 hours). Default is 60 minutes. + type: number + userAttribute: + description: User attribute for this SAML integration. + type: string + type: object + type: + description: Type of configuration. Must be saml. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/opsworks.aws.upbound.io_customlayers.yaml b/package/crds/opsworks.aws.upbound.io_customlayers.yaml index adb5e87152..b75b1456b6 100644 --- a/package/crds/opsworks.aws.upbound.io_customlayers.yaml +++ b/package/crds/opsworks.aws.upbound.io_customlayers.yaml @@ -1504,3 +1504,1450 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CustomLayer is the Schema for the CustomLayers API. Provides + an OpsWorks custom layer resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CustomLayerSpec defines the desired state of CustomLayer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + description: Will create an EBS volume and connect it to the layer's + instances. See Cloudwatch Configuration. + properties: + enabled: + type: boolean + logStreams: + description: A block the specifies how an opsworks logs look + like. See Log Streams. + items: + properties: + batchCount: + description: Specifies the max number of log events + in a batch, up to 10000. The default value is 1000. + type: number + batchSize: + description: Specifies the maximum size of log events + in a batch, in bytes, up to 1048576 bytes. The default + value is 32768 bytes. + type: number + bufferDuration: + description: Specifies the time duration for the batching + of log events. The minimum value is 5000 and default + value is 5000. + type: number + datetimeFormat: + description: Specifies how the timestamp is extracted + from logs. For more information, see the CloudWatch + Logs Agent Reference (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AgentReference.html). + type: string + encoding: + description: Specifies the encoding of the log file + so that the file can be read correctly. The default + is utf_8. + type: string + file: + description: Specifies log files that you want to push + to CloudWatch Logs. File can point to a specific file + or multiple files (by using wild card characters such + as /var/log/system.log*). + type: string + fileFingerprintLines: + description: Specifies the range of lines for identifying + a file. The valid values are one number, or two dash-delimited + numbers, such as 1, 2-5. The default value is 1. + type: string + initialPosition: + description: Specifies where to start to read data (start_of_file + or end_of_file). The default is start_of_file. + type: string + logGroupName: + description: Specifies the destination log group. A + log group is created automatically if it doesn't already + exist. + type: string + multilineStartPattern: + description: Specifies the pattern for identifying the + start of a log message. + type: string + timeZone: + description: Specifies the time zone of log event time + stamps. + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: Will create an EBS volume and connect it to the layer's + instances. See EBS Volume. + items: + properties: + encrypted: + description: Encrypt the volume. + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + description: Load-based auto scaling configuration. See Load Based + AutoScaling + properties: + downscaling: + description: The downscaling settings, as defined below, used + for load-based autoscaling + properties: + alarms: + description: Custom Cloudwatch auto scaling alarms, to + be used as thresholds. This parameter takes a list of + up to five alarm names, which are case sensitive and + must be in the same region as the stack. + items: + type: string + type: array + cpuThreshold: + description: The CPU utilization threshold, as a percent + of the available CPU. A value of -1 disables the threshold. + type: number + ignoreMetricsTime: + description: The amount of time (in minutes) after a scaling + event occurs that AWS OpsWorks Stacks should ignore + metrics and suppress additional scaling events. + type: number + instanceCount: + description: The number of instances to add or remove + when the load exceeds a threshold. + type: number + loadThreshold: + description: The load threshold. A value of -1 disables + the threshold. + type: number + memoryThreshold: + description: The memory utilization threshold, as a percent + of the available memory. A value of -1 disables the + threshold. + type: number + thresholdsWaitTime: + description: The amount of time, in minutes, that the + load must exceed a threshold before more instances are + added or removed. + type: number + type: object + enable: + description: Whether load-based auto scaling is enabled for + the layer. + type: boolean + upscaling: + description: The upscaling settings, as defined below, used + for load-based autoscaling + properties: + alarms: + description: Custom Cloudwatch auto scaling alarms, to + be used as thresholds. This parameter takes a list of + up to five alarm names, which are case sensitive and + must be in the same region as the stack. + items: + type: string + type: array + cpuThreshold: + description: The CPU utilization threshold, as a percent + of the available CPU. A value of -1 disables the threshold. + type: number + ignoreMetricsTime: + description: The amount of time (in minutes) after a scaling + event occurs that AWS OpsWorks Stacks should ignore + metrics and suppress additional scaling events. + type: number + instanceCount: + description: The number of instances to add or remove + when the load exceeds a threshold. + type: number + loadThreshold: + description: The load threshold. A value of -1 disables + the threshold. + type: number + memoryThreshold: + description: The memory utilization threshold, as a percent + of the available memory. A value of -1 disables the + threshold. + type: number + thresholdsWaitTime: + description: The amount of time, in minutes, that the + load must exceed a threshold before more instances are + added or removed. + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + shortName: + description: A short, machine-readable name for the layer, which + will be used to identify it in the Chef node JSON. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + description: Will create an EBS volume and connect it to the layer's + instances. See Cloudwatch Configuration. + properties: + enabled: + type: boolean + logStreams: + description: A block the specifies how an opsworks logs look + like. See Log Streams. + items: + properties: + batchCount: + description: Specifies the max number of log events + in a batch, up to 10000. The default value is 1000. + type: number + batchSize: + description: Specifies the maximum size of log events + in a batch, in bytes, up to 1048576 bytes. The default + value is 32768 bytes. + type: number + bufferDuration: + description: Specifies the time duration for the batching + of log events. The minimum value is 5000 and default + value is 5000. + type: number + datetimeFormat: + description: Specifies how the timestamp is extracted + from logs. For more information, see the CloudWatch + Logs Agent Reference (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AgentReference.html). + type: string + encoding: + description: Specifies the encoding of the log file + so that the file can be read correctly. The default + is utf_8. + type: string + file: + description: Specifies log files that you want to push + to CloudWatch Logs. File can point to a specific file + or multiple files (by using wild card characters such + as /var/log/system.log*). + type: string + fileFingerprintLines: + description: Specifies the range of lines for identifying + a file. The valid values are one number, or two dash-delimited + numbers, such as 1, 2-5. The default value is 1. + type: string + initialPosition: + description: Specifies where to start to read data (start_of_file + or end_of_file). The default is start_of_file. + type: string + logGroupName: + description: Specifies the destination log group. A + log group is created automatically if it doesn't already + exist. + type: string + multilineStartPattern: + description: Specifies the pattern for identifying the + start of a log message. + type: string + timeZone: + description: Specifies the time zone of log event time + stamps. + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: Will create an EBS volume and connect it to the layer's + instances. See EBS Volume. + items: + properties: + encrypted: + description: Encrypt the volume. + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + description: Load-based auto scaling configuration. See Load Based + AutoScaling + properties: + downscaling: + description: The downscaling settings, as defined below, used + for load-based autoscaling + properties: + alarms: + description: Custom Cloudwatch auto scaling alarms, to + be used as thresholds. This parameter takes a list of + up to five alarm names, which are case sensitive and + must be in the same region as the stack. + items: + type: string + type: array + cpuThreshold: + description: The CPU utilization threshold, as a percent + of the available CPU. A value of -1 disables the threshold. + type: number + ignoreMetricsTime: + description: The amount of time (in minutes) after a scaling + event occurs that AWS OpsWorks Stacks should ignore + metrics and suppress additional scaling events. + type: number + instanceCount: + description: The number of instances to add or remove + when the load exceeds a threshold. + type: number + loadThreshold: + description: The load threshold. A value of -1 disables + the threshold. + type: number + memoryThreshold: + description: The memory utilization threshold, as a percent + of the available memory. A value of -1 disables the + threshold. + type: number + thresholdsWaitTime: + description: The amount of time, in minutes, that the + load must exceed a threshold before more instances are + added or removed. + type: number + type: object + enable: + description: Whether load-based auto scaling is enabled for + the layer. + type: boolean + upscaling: + description: The upscaling settings, as defined below, used + for load-based autoscaling + properties: + alarms: + description: Custom Cloudwatch auto scaling alarms, to + be used as thresholds. This parameter takes a list of + up to five alarm names, which are case sensitive and + must be in the same region as the stack. + items: + type: string + type: array + cpuThreshold: + description: The CPU utilization threshold, as a percent + of the available CPU. A value of -1 disables the threshold. + type: number + ignoreMetricsTime: + description: The amount of time (in minutes) after a scaling + event occurs that AWS OpsWorks Stacks should ignore + metrics and suppress additional scaling events. + type: number + instanceCount: + description: The number of instances to add or remove + when the load exceeds a threshold. + type: number + loadThreshold: + description: The load threshold. A value of -1 disables + the threshold. + type: number + memoryThreshold: + description: The memory utilization threshold, as a percent + of the available memory. A value of -1 disables the + threshold. + type: number + thresholdsWaitTime: + description: The amount of time, in minutes, that the + load must exceed a threshold before more instances are + added or removed. + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + shortName: + description: A short, machine-readable name for the layer, which + will be used to identify it in the Chef node JSON. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.shortName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.shortName) + || (has(self.initProvider) && has(self.initProvider.shortName))' + status: + description: CustomLayerStatus defines the observed state of CustomLayer. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name(ARN) of the layer. + type: string + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + description: Will create an EBS volume and connect it to the layer's + instances. See Cloudwatch Configuration. + properties: + enabled: + type: boolean + logStreams: + description: A block the specifies how an opsworks logs look + like. See Log Streams. + items: + properties: + batchCount: + description: Specifies the max number of log events + in a batch, up to 10000. The default value is 1000. + type: number + batchSize: + description: Specifies the maximum size of log events + in a batch, in bytes, up to 1048576 bytes. The default + value is 32768 bytes. + type: number + bufferDuration: + description: Specifies the time duration for the batching + of log events. The minimum value is 5000 and default + value is 5000. + type: number + datetimeFormat: + description: Specifies how the timestamp is extracted + from logs. For more information, see the CloudWatch + Logs Agent Reference (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AgentReference.html). + type: string + encoding: + description: Specifies the encoding of the log file + so that the file can be read correctly. The default + is utf_8. + type: string + file: + description: Specifies log files that you want to push + to CloudWatch Logs. File can point to a specific file + or multiple files (by using wild card characters such + as /var/log/system.log*). + type: string + fileFingerprintLines: + description: Specifies the range of lines for identifying + a file. The valid values are one number, or two dash-delimited + numbers, such as 1, 2-5. The default value is 1. + type: string + initialPosition: + description: Specifies where to start to read data (start_of_file + or end_of_file). The default is start_of_file. + type: string + logGroupName: + description: Specifies the destination log group. A + log group is created automatically if it doesn't already + exist. + type: string + multilineStartPattern: + description: Specifies the pattern for identifying the + start of a log message. + type: string + timeZone: + description: Specifies the time zone of log event time + stamps. + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: Will create an EBS volume and connect it to the layer's + instances. See EBS Volume. + items: + properties: + encrypted: + description: Encrypt the volume. + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + id: + description: The id of the layer. + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + description: Load-based auto scaling configuration. See Load Based + AutoScaling + properties: + downscaling: + description: The downscaling settings, as defined below, used + for load-based autoscaling + properties: + alarms: + description: Custom Cloudwatch auto scaling alarms, to + be used as thresholds. This parameter takes a list of + up to five alarm names, which are case sensitive and + must be in the same region as the stack. + items: + type: string + type: array + cpuThreshold: + description: The CPU utilization threshold, as a percent + of the available CPU. A value of -1 disables the threshold. + type: number + ignoreMetricsTime: + description: The amount of time (in minutes) after a scaling + event occurs that AWS OpsWorks Stacks should ignore + metrics and suppress additional scaling events. + type: number + instanceCount: + description: The number of instances to add or remove + when the load exceeds a threshold. + type: number + loadThreshold: + description: The load threshold. A value of -1 disables + the threshold. + type: number + memoryThreshold: + description: The memory utilization threshold, as a percent + of the available memory. A value of -1 disables the + threshold. + type: number + thresholdsWaitTime: + description: The amount of time, in minutes, that the + load must exceed a threshold before more instances are + added or removed. + type: number + type: object + enable: + description: Whether load-based auto scaling is enabled for + the layer. + type: boolean + upscaling: + description: The upscaling settings, as defined below, used + for load-based autoscaling + properties: + alarms: + description: Custom Cloudwatch auto scaling alarms, to + be used as thresholds. This parameter takes a list of + up to five alarm names, which are case sensitive and + must be in the same region as the stack. + items: + type: string + type: array + cpuThreshold: + description: The CPU utilization threshold, as a percent + of the available CPU. A value of -1 disables the threshold. + type: number + ignoreMetricsTime: + description: The amount of time (in minutes) after a scaling + event occurs that AWS OpsWorks Stacks should ignore + metrics and suppress additional scaling events. + type: number + instanceCount: + description: The number of instances to add or remove + when the load exceeds a threshold. + type: number + loadThreshold: + description: The load threshold. A value of -1 disables + the threshold. + type: number + memoryThreshold: + description: The memory utilization threshold, as a percent + of the available memory. A value of -1 disables the + threshold. + type: number + thresholdsWaitTime: + description: The amount of time, in minutes, that the + load must exceed a threshold before more instances are + added or removed. + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + shortName: + description: A short, machine-readable name for the layer, which + will be used to identify it in the Chef node JSON. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/opsworks.aws.upbound.io_ecsclusterlayers.yaml b/package/crds/opsworks.aws.upbound.io_ecsclusterlayers.yaml index 6d2df9eb8c..9149a8967b 100644 --- a/package/crds/opsworks.aws.upbound.io_ecsclusterlayers.yaml +++ b/package/crds/opsworks.aws.upbound.io_ecsclusterlayers.yaml @@ -1382,3 +1382,1343 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: EcsClusterLayer is the Schema for the EcsClusterLayers API. Provides + an OpsWorks HAProxy layer resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EcsClusterLayerSpec defines the desired state of EcsClusterLayer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + ecsClusterArn: + description: The ECS Cluster ARN of the layer. + type: string + ecsClusterArnRef: + description: Reference to a Cluster in ecs to populate ecsClusterArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + ecsClusterArnSelector: + description: Selector for a Cluster in ecs to populate ecsClusterArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + ecsClusterArn: + description: The ECS Cluster ARN of the layer. + type: string + ecsClusterArnRef: + description: Reference to a Cluster in ecs to populate ecsClusterArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + ecsClusterArnSelector: + description: Selector for a Cluster in ecs to populate ecsClusterArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: EcsClusterLayerStatus defines the observed state of EcsClusterLayer. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name(ARN) of the layer. + type: string + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + ecsClusterArn: + description: The ECS Cluster ARN of the layer. + type: string + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + id: + description: The id of the layer. + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/opsworks.aws.upbound.io_ganglialayers.yaml b/package/crds/opsworks.aws.upbound.io_ganglialayers.yaml index 90145596b7..027a708537 100644 --- a/package/crds/opsworks.aws.upbound.io_ganglialayers.yaml +++ b/package/crds/opsworks.aws.upbound.io_ganglialayers.yaml @@ -1263,3 +1263,1224 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: GangliaLayer is the Schema for the GangliaLayers API. Provides + an OpsWorks Ganglia layer resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GangliaLayerSpec defines the desired state of GangliaLayer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + password: + description: The password to use for Ganglia. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + url: + description: The URL path to use for Ganglia. Defaults to "/ganglia". + type: string + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + username: + description: (Optiona) The username to use for Ganglia. Defaults + to "opsworks". + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + password: + description: The password to use for Ganglia. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + url: + description: The URL path to use for Ganglia. Defaults to "/ganglia". + type: string + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + username: + description: (Optiona) The username to use for Ganglia. Defaults + to "opsworks". + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.password is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.password) + || (has(self.initProvider) && has(self.initProvider.password))' + status: + description: GangliaLayerStatus defines the observed state of GangliaLayer. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name(ARN) of the layer. + type: string + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + id: + description: The id of the layer. + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + password: + description: The password to use for Ganglia. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + url: + description: The URL path to use for Ganglia. Defaults to "/ganglia". + type: string + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + username: + description: (Optiona) The username to use for Ganglia. Defaults + to "opsworks". + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/opsworks.aws.upbound.io_haproxylayers.yaml b/package/crds/opsworks.aws.upbound.io_haproxylayers.yaml index 54ca3fdb0b..3835f11ba0 100644 --- a/package/crds/opsworks.aws.upbound.io_haproxylayers.yaml +++ b/package/crds/opsworks.aws.upbound.io_haproxylayers.yaml @@ -1293,3 +1293,1254 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: HAProxyLayer is the Schema for the HAProxyLayers API. Provides + an OpsWorks HAProxy layer resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: HAProxyLayerSpec defines the desired state of HAProxyLayer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + healthcheckMethod: + description: HTTP method to use for instance healthchecks. Defaults + to "OPTIONS". + type: string + healthcheckUrl: + description: URL path to use for instance healthchecks. Defaults + to "/". + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + statsEnabled: + description: Whether to enable HAProxy stats. + type: boolean + statsPassword: + description: The password to use for HAProxy stats. + type: string + statsUrl: + description: The HAProxy stats URL. Defaults to "/haproxy?stats". + type: string + statsUser: + description: The username for HAProxy stats. Defaults to "opsworks". + type: string + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + healthcheckMethod: + description: HTTP method to use for instance healthchecks. Defaults + to "OPTIONS". + type: string + healthcheckUrl: + description: URL path to use for instance healthchecks. Defaults + to "/". + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + statsEnabled: + description: Whether to enable HAProxy stats. + type: boolean + statsPassword: + description: The password to use for HAProxy stats. + type: string + statsUrl: + description: The HAProxy stats URL. Defaults to "/haproxy?stats". + type: string + statsUser: + description: The username for HAProxy stats. Defaults to "opsworks". + type: string + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.statsPassword is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.statsPassword) + || (has(self.initProvider) && has(self.initProvider.statsPassword))' + status: + description: HAProxyLayerStatus defines the observed state of HAProxyLayer. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name(ARN) of the layer. + type: string + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + healthcheckMethod: + description: HTTP method to use for instance healthchecks. Defaults + to "OPTIONS". + type: string + healthcheckUrl: + description: URL path to use for instance healthchecks. Defaults + to "/". + type: string + id: + description: The id of the layer. + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + statsEnabled: + description: Whether to enable HAProxy stats. + type: boolean + statsPassword: + description: The password to use for HAProxy stats. + type: string + statsUrl: + description: The HAProxy stats URL. Defaults to "/haproxy?stats". + type: string + statsUser: + description: The username for HAProxy stats. Defaults to "opsworks". + type: string + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/opsworks.aws.upbound.io_javaapplayers.yaml b/package/crds/opsworks.aws.upbound.io_javaapplayers.yaml index adead52460..a49e52d6b0 100644 --- a/package/crds/opsworks.aws.upbound.io_javaapplayers.yaml +++ b/package/crds/opsworks.aws.upbound.io_javaapplayers.yaml @@ -1279,3 +1279,1240 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: JavaAppLayer is the Schema for the JavaAppLayers API. Provides + an OpsWorks Java application layer resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: JavaAppLayerSpec defines the desired state of JavaAppLayer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appServer: + description: Keyword for the application container to use. Defaults + to "tomcat". + type: string + appServerVersion: + description: Version of the selected application container to + use. Defaults to "7". + type: string + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + jvmOptions: + description: Options to set for the JVM. + type: string + jvmType: + description: Keyword for the type of JVM to use. Defaults to openjdk. + type: string + jvmVersion: + description: Version of JVM to use. Defaults to "7". + type: string + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appServer: + description: Keyword for the application container to use. Defaults + to "tomcat". + type: string + appServerVersion: + description: Version of the selected application container to + use. Defaults to "7". + type: string + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + jvmOptions: + description: Options to set for the JVM. + type: string + jvmType: + description: Keyword for the type of JVM to use. Defaults to openjdk. + type: string + jvmVersion: + description: Version of JVM to use. Defaults to "7". + type: string + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: JavaAppLayerStatus defines the observed state of JavaAppLayer. + properties: + atProvider: + properties: + appServer: + description: Keyword for the application container to use. Defaults + to "tomcat". + type: string + appServerVersion: + description: Version of the selected application container to + use. Defaults to "7". + type: string + arn: + description: The Amazon Resource Name(ARN) of the layer. + type: string + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + id: + description: The id of the layer. + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + jvmOptions: + description: Options to set for the JVM. + type: string + jvmType: + description: Keyword for the type of JVM to use. Defaults to openjdk. + type: string + jvmVersion: + description: Version of JVM to use. Defaults to "7". + type: string + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/opsworks.aws.upbound.io_memcachedlayers.yaml b/package/crds/opsworks.aws.upbound.io_memcachedlayers.yaml index a4430b6d98..7b757960a7 100644 --- a/package/crds/opsworks.aws.upbound.io_memcachedlayers.yaml +++ b/package/crds/opsworks.aws.upbound.io_memcachedlayers.yaml @@ -1240,3 +1240,1201 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MemcachedLayer is the Schema for the MemcachedLayers API. Provides + an OpsWorks memcached layer resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MemcachedLayerSpec defines the desired state of MemcachedLayer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allocatedMemory: + description: Amount of memory to allocate for the cache on each + instance, in megabytes. Defaults to 512MB. + type: number + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allocatedMemory: + description: Amount of memory to allocate for the cache on each + instance, in megabytes. Defaults to 512MB. + type: number + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: MemcachedLayerStatus defines the observed state of MemcachedLayer. + properties: + atProvider: + properties: + allocatedMemory: + description: Amount of memory to allocate for the cache on each + instance, in megabytes. Defaults to 512MB. + type: number + arn: + description: The Amazon Resource Name(ARN) of the layer. + type: string + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + id: + description: The id of the layer. + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/opsworks.aws.upbound.io_mysqllayers.yaml b/package/crds/opsworks.aws.upbound.io_mysqllayers.yaml index 1443e151c2..f203e60ba0 100644 --- a/package/crds/opsworks.aws.upbound.io_mysqllayers.yaml +++ b/package/crds/opsworks.aws.upbound.io_mysqllayers.yaml @@ -1249,3 +1249,1210 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MySQLLayer is the Schema for the MySQLLayers API. Provides an + OpsWorks MySQL layer resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MySQLLayerSpec defines the desired state of MySQLLayer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + rootPassword: + description: Root password to use for MySQL. + type: string + rootPasswordOnAllInstances: + description: Whether to set the root user password to all instances + in the stack so they can access the instances in this layer. + type: boolean + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + rootPassword: + description: Root password to use for MySQL. + type: string + rootPasswordOnAllInstances: + description: Whether to set the root user password to all instances + in the stack so they can access the instances in this layer. + type: boolean + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: MySQLLayerStatus defines the observed state of MySQLLayer. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name(ARN) of the layer. + type: string + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + id: + description: The id of the layer. + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + rootPassword: + description: Root password to use for MySQL. + type: string + rootPasswordOnAllInstances: + description: Whether to set the root user password to all instances + in the stack so they can access the instances in this layer. + type: boolean + stackId: + description: ID of the stack the layer will belong to. + type: string + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/opsworks.aws.upbound.io_nodejsapplayers.yaml b/package/crds/opsworks.aws.upbound.io_nodejsapplayers.yaml index 1c6d4755af..ae13ca8f25 100644 --- a/package/crds/opsworks.aws.upbound.io_nodejsapplayers.yaml +++ b/package/crds/opsworks.aws.upbound.io_nodejsapplayers.yaml @@ -1237,3 +1237,1198 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: NodeJSAppLayer is the Schema for the NodeJSAppLayers API. Provides + an OpsWorks NodeJS application layer resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NodeJSAppLayerSpec defines the desired state of NodeJSAppLayer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + nodejsVersion: + description: The version of NodeJS to use. Defaults to "0.10.38". + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + nodejsVersion: + description: The version of NodeJS to use. Defaults to "0.10.38". + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: NodeJSAppLayerStatus defines the observed state of NodeJSAppLayer. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name(ARN) of the layer. + type: string + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + id: + description: The id of the layer. + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + nodejsVersion: + description: The version of NodeJS to use. Defaults to "0.10.38". + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/opsworks.aws.upbound.io_phpapplayers.yaml b/package/crds/opsworks.aws.upbound.io_phpapplayers.yaml index 26ac9b59fc..d9ebf394f0 100644 --- a/package/crds/opsworks.aws.upbound.io_phpapplayers.yaml +++ b/package/crds/opsworks.aws.upbound.io_phpapplayers.yaml @@ -1228,3 +1228,1189 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: PHPAppLayer is the Schema for the PHPAppLayers API. Provides + an OpsWorks PHP application layer resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PHPAppLayerSpec defines the desired state of PHPAppLayer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: PHPAppLayerStatus defines the observed state of PHPAppLayer. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name(ARN) of the layer. + type: string + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + id: + description: The id of the layer. + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/opsworks.aws.upbound.io_railsapplayers.yaml b/package/crds/opsworks.aws.upbound.io_railsapplayers.yaml index 95a543df0b..2ff400b1e1 100644 --- a/package/crds/opsworks.aws.upbound.io_railsapplayers.yaml +++ b/package/crds/opsworks.aws.upbound.io_railsapplayers.yaml @@ -1285,3 +1285,1246 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: RailsAppLayer is the Schema for the RailsAppLayers API. Provides + an OpsWorks Ruby on Rails application layer resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RailsAppLayerSpec defines the desired state of RailsAppLayer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appServer: + description: Keyword for the app server to use. Defaults to "apache_passenger". + type: string + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + bundlerVersion: + description: When OpsWorks is managing Bundler, which version + to use. Defaults to "1.5.3". + type: string + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + manageBundler: + description: Whether OpsWorks should manage bundler. On by default. + type: boolean + name: + description: A human-readable name for the layer. + type: string + passengerVersion: + description: The version of Passenger to use. Defaults to "4.0.46". + type: string + rubyVersion: + description: The version of Ruby to use. Defaults to "2.0.0". + type: string + rubygemsVersion: + description: The version of RubyGems to use. Defaults to "2.2.2". + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appServer: + description: Keyword for the app server to use. Defaults to "apache_passenger". + type: string + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + bundlerVersion: + description: When OpsWorks is managing Bundler, which version + to use. Defaults to "1.5.3". + type: string + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + manageBundler: + description: Whether OpsWorks should manage bundler. On by default. + type: boolean + name: + description: A human-readable name for the layer. + type: string + passengerVersion: + description: The version of Passenger to use. Defaults to "4.0.46". + type: string + rubyVersion: + description: The version of Ruby to use. Defaults to "2.0.0". + type: string + rubygemsVersion: + description: The version of RubyGems to use. Defaults to "2.2.2". + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: RailsAppLayerStatus defines the observed state of RailsAppLayer. + properties: + atProvider: + properties: + appServer: + description: Keyword for the app server to use. Defaults to "apache_passenger". + type: string + arn: + description: The Amazon Resource Name(ARN) of the layer. + type: string + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + bundlerVersion: + description: When OpsWorks is managing Bundler, which version + to use. Defaults to "1.5.3". + type: string + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + description: Custom JSON attributes to apply to the layer. + type: string + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + id: + description: The id of the layer. + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + manageBundler: + description: Whether OpsWorks should manage bundler. On by default. + type: boolean + name: + description: A human-readable name for the layer. + type: string + passengerVersion: + description: The version of Passenger to use. Defaults to "4.0.46". + type: string + rubyVersion: + description: The version of Ruby to use. Defaults to "2.0.0". + type: string + rubygemsVersion: + description: The version of RubyGems to use. Defaults to "2.2.2". + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/opsworks.aws.upbound.io_stacks.yaml b/package/crds/opsworks.aws.upbound.io_stacks.yaml index 71b2aabafe..aa25a99fd2 100644 --- a/package/crds/opsworks.aws.upbound.io_stacks.yaml +++ b/package/crds/opsworks.aws.upbound.io_stacks.yaml @@ -1334,3 +1334,1305 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Stack is the Schema for the Stacks API. Provides an OpsWorks + stack resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StackSpec defines the desired state of Stack + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + agentVersion: + description: If set to "LATEST", OpsWorks will automatically install + the latest version. + type: string + berkshelfVersion: + description: If manage_berkshelf is enabled, the version of Berkshelf + to use. + type: string + color: + description: Color to paint next to the stack's resources in the + OpsWorks console. + type: string + configurationManagerName: + description: Name of the configuration manager to use. Defaults + to "Chef". + type: string + configurationManagerVersion: + description: Version of the configuration manager to use. Defaults + to "11.4". + type: string + customCookbooksSource: + description: When use_custom_cookbooks is set, provide this sub-object + as described below. + properties: + passwordSecretRef: + description: Password to use when authenticating to the source. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + revision: + description: For sources that are version-aware, the revision + to use. + type: string + sshKeySecretRef: + description: SSH key to use when authenticating to the source. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: + description: The type of source to use. For example, "archive". + type: string + url: + description: The URL where the cookbooks resource can be found. + type: string + username: + description: Username to use when authenticating to the source. + type: string + type: object + customJson: + description: User defined JSON passed to "Chef". Use a "here doc" + for multiline JSON. + type: string + defaultAvailabilityZone: + description: |- + Name of the availability zone where instances will be created by default. + Cannot be set when vpc_id is set. + type: string + defaultInstanceProfileArn: + description: The ARN of an IAM Instance Profile that created instances + will have by default. + type: string + defaultInstanceProfileArnRef: + description: Reference to a InstanceProfile in iam to populate + defaultInstanceProfileArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + defaultInstanceProfileArnSelector: + description: Selector for a InstanceProfile in iam to populate + defaultInstanceProfileArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + defaultOs: + description: Name of OS that will be installed on instances by + default. + type: string + defaultRootDeviceType: + description: Name of the type of root device instances will have + by default. + type: string + defaultSshKeyName: + description: Name of the SSH keypair that instances will have + by default. + type: string + defaultSubnetId: + description: |- + ID of the subnet in which instances will be created by default. + Required if vpc_id is set to a VPC other than the default VPC, and forbidden if it isn't. + type: string + defaultSubnetIdRef: + description: Reference to a Subnet in ec2 to populate defaultSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + defaultSubnetIdSelector: + description: Selector for a Subnet in ec2 to populate defaultSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hostnameTheme: + description: Keyword representing the naming scheme that will + be used for instance hostnames within this stack. + type: string + manageBerkshelf: + description: Boolean value controlling whether Opsworks will run + Berkshelf for this stack. + type: boolean + name: + description: The name of the stack. + type: string + region: + description: The name of the region where the stack will exist. + type: string + serviceRoleArn: + description: The ARN of an IAM role that the OpsWorks service + will act as. + type: string + serviceRoleArnRef: + description: Reference to a Role in iam to populate serviceRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceRoleArnSelector: + description: Selector for a Role in iam to populate serviceRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useCustomCookbooks: + description: Boolean value controlling whether the custom cookbook + settings are enabled. + type: boolean + useOpsworksSecurityGroups: + description: Boolean value controlling whether the standard OpsWorks + security groups apply to created instances. + type: boolean + vpcId: + description: |- + ID of the VPC that this stack belongs to. + Defaults to the region's default VPC. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + agentVersion: + description: If set to "LATEST", OpsWorks will automatically install + the latest version. + type: string + berkshelfVersion: + description: If manage_berkshelf is enabled, the version of Berkshelf + to use. + type: string + color: + description: Color to paint next to the stack's resources in the + OpsWorks console. + type: string + configurationManagerName: + description: Name of the configuration manager to use. Defaults + to "Chef". + type: string + configurationManagerVersion: + description: Version of the configuration manager to use. Defaults + to "11.4". + type: string + customCookbooksSource: + description: When use_custom_cookbooks is set, provide this sub-object + as described below. + properties: + passwordSecretRef: + description: Password to use when authenticating to the source. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + revision: + description: For sources that are version-aware, the revision + to use. + type: string + sshKeySecretRef: + description: SSH key to use when authenticating to the source. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: + description: The type of source to use. For example, "archive". + type: string + url: + description: The URL where the cookbooks resource can be found. + type: string + username: + description: Username to use when authenticating to the source. + type: string + type: object + customJson: + description: User defined JSON passed to "Chef". Use a "here doc" + for multiline JSON. + type: string + defaultAvailabilityZone: + description: |- + Name of the availability zone where instances will be created by default. + Cannot be set when vpc_id is set. + type: string + defaultInstanceProfileArn: + description: The ARN of an IAM Instance Profile that created instances + will have by default. + type: string + defaultInstanceProfileArnRef: + description: Reference to a InstanceProfile in iam to populate + defaultInstanceProfileArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + defaultInstanceProfileArnSelector: + description: Selector for a InstanceProfile in iam to populate + defaultInstanceProfileArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + defaultOs: + description: Name of OS that will be installed on instances by + default. + type: string + defaultRootDeviceType: + description: Name of the type of root device instances will have + by default. + type: string + defaultSshKeyName: + description: Name of the SSH keypair that instances will have + by default. + type: string + defaultSubnetId: + description: |- + ID of the subnet in which instances will be created by default. + Required if vpc_id is set to a VPC other than the default VPC, and forbidden if it isn't. + type: string + defaultSubnetIdRef: + description: Reference to a Subnet in ec2 to populate defaultSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + defaultSubnetIdSelector: + description: Selector for a Subnet in ec2 to populate defaultSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hostnameTheme: + description: Keyword representing the naming scheme that will + be used for instance hostnames within this stack. + type: string + manageBerkshelf: + description: Boolean value controlling whether Opsworks will run + Berkshelf for this stack. + type: boolean + name: + description: The name of the stack. + type: string + serviceRoleArn: + description: The ARN of an IAM role that the OpsWorks service + will act as. + type: string + serviceRoleArnRef: + description: Reference to a Role in iam to populate serviceRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceRoleArnSelector: + description: Selector for a Role in iam to populate serviceRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useCustomCookbooks: + description: Boolean value controlling whether the custom cookbook + settings are enabled. + type: boolean + useOpsworksSecurityGroups: + description: Boolean value controlling whether the standard OpsWorks + security groups apply to created instances. + type: boolean + vpcId: + description: |- + ID of the VPC that this stack belongs to. + Defaults to the region's default VPC. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: StackStatus defines the observed state of Stack. + properties: + atProvider: + properties: + agentVersion: + description: If set to "LATEST", OpsWorks will automatically install + the latest version. + type: string + arn: + type: string + berkshelfVersion: + description: If manage_berkshelf is enabled, the version of Berkshelf + to use. + type: string + color: + description: Color to paint next to the stack's resources in the + OpsWorks console. + type: string + configurationManagerName: + description: Name of the configuration manager to use. Defaults + to "Chef". + type: string + configurationManagerVersion: + description: Version of the configuration manager to use. Defaults + to "11.4". + type: string + customCookbooksSource: + description: When use_custom_cookbooks is set, provide this sub-object + as described below. + properties: + revision: + description: For sources that are version-aware, the revision + to use. + type: string + type: + description: The type of source to use. For example, "archive". + type: string + url: + description: The URL where the cookbooks resource can be found. + type: string + username: + description: Username to use when authenticating to the source. + type: string + type: object + customJson: + description: User defined JSON passed to "Chef". Use a "here doc" + for multiline JSON. + type: string + defaultAvailabilityZone: + description: |- + Name of the availability zone where instances will be created by default. + Cannot be set when vpc_id is set. + type: string + defaultInstanceProfileArn: + description: The ARN of an IAM Instance Profile that created instances + will have by default. + type: string + defaultOs: + description: Name of OS that will be installed on instances by + default. + type: string + defaultRootDeviceType: + description: Name of the type of root device instances will have + by default. + type: string + defaultSshKeyName: + description: Name of the SSH keypair that instances will have + by default. + type: string + defaultSubnetId: + description: |- + ID of the subnet in which instances will be created by default. + Required if vpc_id is set to a VPC other than the default VPC, and forbidden if it isn't. + type: string + hostnameTheme: + description: Keyword representing the naming scheme that will + be used for instance hostnames within this stack. + type: string + id: + description: The id of the stack. + type: string + manageBerkshelf: + description: Boolean value controlling whether Opsworks will run + Berkshelf for this stack. + type: boolean + name: + description: The name of the stack. + type: string + region: + description: The name of the region where the stack will exist. + type: string + serviceRoleArn: + description: The ARN of an IAM role that the OpsWorks service + will act as. + type: string + stackEndpoint: + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + useCustomCookbooks: + description: Boolean value controlling whether the custom cookbook + settings are enabled. + type: boolean + useOpsworksSecurityGroups: + description: Boolean value controlling whether the standard OpsWorks + security groups apply to created instances. + type: boolean + vpcId: + description: |- + ID of the VPC that this stack belongs to. + Defaults to the region's default VPC. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/opsworks.aws.upbound.io_staticweblayers.yaml b/package/crds/opsworks.aws.upbound.io_staticweblayers.yaml index e51ba06eb3..d4829a68d0 100644 --- a/package/crds/opsworks.aws.upbound.io_staticweblayers.yaml +++ b/package/crds/opsworks.aws.upbound.io_staticweblayers.yaml @@ -1225,3 +1225,1186 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: StaticWebLayer is the Schema for the StaticWebLayers API. Provides + an OpsWorks static web server layer resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StaticWebLayerSpec defines the desired state of StaticWebLayer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + type: string + customSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate customSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + customSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + customSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + stackIdRef: + description: Reference to a Stack in opsworks to populate stackId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + stackIdSelector: + description: Selector for a Stack in opsworks to populate stackId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: StaticWebLayerStatus defines the observed state of StaticWebLayer. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name(ARN) of the layer. + type: string + autoAssignElasticIps: + description: Whether to automatically assign an elastic IP address + to the layer's instances. + type: boolean + autoAssignPublicIps: + description: For stacks belonging to a VPC, whether to automatically + assign a public IP address to each of the layer's instances. + type: boolean + autoHealing: + description: Whether to enable auto-healing for the layer. + type: boolean + cloudwatchConfiguration: + properties: + enabled: + type: boolean + logStreams: + items: + properties: + batchCount: + type: number + batchSize: + type: number + bufferDuration: + type: number + datetimeFormat: + type: string + encoding: + type: string + file: + type: string + fileFingerprintLines: + type: string + initialPosition: + type: string + logGroupName: + description: A human-readable name for the layer. + type: string + multilineStartPattern: + type: string + timeZone: + type: string + type: object + type: array + type: object + customConfigureRecipes: + items: + type: string + type: array + customDeployRecipes: + items: + type: string + type: array + customInstanceProfileArn: + description: The ARN of an IAM profile that will be used for the + layer's instances. + type: string + customJson: + type: string + customSecurityGroupIds: + description: Ids for a set of security groups to apply to the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + customSetupRecipes: + items: + type: string + type: array + customShutdownRecipes: + items: + type: string + type: array + customUndeployRecipes: + items: + type: string + type: array + drainElbOnShutdown: + description: Whether to enable Elastic Load Balancing connection + draining. + type: boolean + ebsVolume: + description: ebs_volume blocks, as described below, will each + create an EBS volume and connect it to the layer's instances. + items: + properties: + encrypted: + type: boolean + iops: + description: For PIOPS volumes, the IOPS per disk. + type: number + mountPoint: + description: The path to mount the EBS volume on the layer's + instances. + type: string + numberOfDisks: + description: The number of disks to use for the EBS volume. + type: number + raidLevel: + description: The RAID level to use for the volume. + type: string + size: + description: The size of the volume in gigabytes. + type: number + type: + description: The type of volume to create. This may be standard + (the default), io1 or gp2. + type: string + type: object + type: array + elasticLoadBalancer: + description: Name of an Elastic Load Balancer to attach to this + layer + type: string + id: + description: The id of the layer. + type: string + installUpdatesOnBoot: + description: Whether to install OS and package updates on each + instance when it boots. + type: boolean + instanceShutdownTimeout: + description: The time, in seconds, that OpsWorks will wait for + Chef to complete after triggering the Shutdown event. + type: number + loadBasedAutoScaling: + properties: + downscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + enable: + type: boolean + upscaling: + properties: + alarms: + items: + type: string + type: array + cpuThreshold: + type: number + ignoreMetricsTime: + type: number + instanceCount: + type: number + loadThreshold: + type: number + memoryThreshold: + type: number + thresholdsWaitTime: + type: number + type: object + type: object + name: + description: A human-readable name for the layer. + type: string + stackId: + description: ID of the stack the layer will belong to. + type: string + systemPackages: + description: Names of a set of system packages to install on the + layer's instances. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + useEbsOptimizedInstances: + description: Whether to use EBS-optimized instances. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/pinpoint.aws.upbound.io_apps.yaml b/package/crds/pinpoint.aws.upbound.io_apps.yaml index 682621e0d1..bf5dad98b8 100644 --- a/package/crds/pinpoint.aws.upbound.io_apps.yaml +++ b/package/crds/pinpoint.aws.upbound.io_apps.yaml @@ -559,3 +559,526 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: App is the Schema for the Apps API. Provides a Pinpoint App resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AppSpec defines the desired state of App + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + campaignHook: + description: Specifies settings for invoking an AWS Lambda function + that customizes a segment for a campaign + properties: + lambdaFunctionName: + description: Lambda function name or ARN to be called for + delivery. Conflicts with web_url + type: string + mode: + description: What mode Lambda should be invoked in. Valid + values for this parameter are DELIVERY, FILTER. + type: string + webUrl: + description: Web URL to call for hook. If the URL has authentication + specified it will be added as authentication to the request. + Conflicts with lambda_function_name + type: string + type: object + limits: + description: The default campaign limits for the app. These limits + apply to each campaign for the app, unless the campaign overrides + the default with limits of its own + properties: + daily: + description: The maximum number of messages that the campaign + can send daily. + type: number + maximumDuration: + description: The length of time (in seconds) that the campaign + can run before it ends and message deliveries stop. This + duration begins at the scheduled start time for the campaign. + The minimum value is 60. + type: number + messagesPerSecond: + description: The number of messages that the campaign can + send per second. The minimum value is 50, and the maximum + is 20000. + type: number + total: + description: The maximum total number of messages that the + campaign can send. + type: number + type: object + name: + description: The application name + type: string + quietTime: + description: The default quiet time for the app. Each campaign + for this app sends no messages during this time unless the campaign + overrides the default with a quiet time of its own + properties: + end: + description: The default end time for quiet time in ISO 8601 + format. Required if start is set + type: string + start: + description: The default start time for quiet time in ISO + 8601 format. Required if end is set + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + campaignHook: + description: Specifies settings for invoking an AWS Lambda function + that customizes a segment for a campaign + properties: + lambdaFunctionName: + description: Lambda function name or ARN to be called for + delivery. Conflicts with web_url + type: string + mode: + description: What mode Lambda should be invoked in. Valid + values for this parameter are DELIVERY, FILTER. + type: string + webUrl: + description: Web URL to call for hook. If the URL has authentication + specified it will be added as authentication to the request. + Conflicts with lambda_function_name + type: string + type: object + limits: + description: The default campaign limits for the app. These limits + apply to each campaign for the app, unless the campaign overrides + the default with limits of its own + properties: + daily: + description: The maximum number of messages that the campaign + can send daily. + type: number + maximumDuration: + description: The length of time (in seconds) that the campaign + can run before it ends and message deliveries stop. This + duration begins at the scheduled start time for the campaign. + The minimum value is 60. + type: number + messagesPerSecond: + description: The number of messages that the campaign can + send per second. The minimum value is 50, and the maximum + is 20000. + type: number + total: + description: The maximum total number of messages that the + campaign can send. + type: number + type: object + name: + description: The application name + type: string + quietTime: + description: The default quiet time for the app. Each campaign + for this app sends no messages during this time unless the campaign + overrides the default with a quiet time of its own + properties: + end: + description: The default end time for quiet time in ISO 8601 + format. Required if start is set + type: string + start: + description: The default start time for quiet time in ISO + 8601 format. Required if end is set + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: AppStatus defines the observed state of App. + properties: + atProvider: + properties: + applicationId: + description: The Application ID of the Pinpoint App. + type: string + arn: + description: Amazon Resource Name (ARN) of the PinPoint Application + type: string + campaignHook: + description: Specifies settings for invoking an AWS Lambda function + that customizes a segment for a campaign + properties: + lambdaFunctionName: + description: Lambda function name or ARN to be called for + delivery. Conflicts with web_url + type: string + mode: + description: What mode Lambda should be invoked in. Valid + values for this parameter are DELIVERY, FILTER. + type: string + webUrl: + description: Web URL to call for hook. If the URL has authentication + specified it will be added as authentication to the request. + Conflicts with lambda_function_name + type: string + type: object + id: + type: string + limits: + description: The default campaign limits for the app. These limits + apply to each campaign for the app, unless the campaign overrides + the default with limits of its own + properties: + daily: + description: The maximum number of messages that the campaign + can send daily. + type: number + maximumDuration: + description: The length of time (in seconds) that the campaign + can run before it ends and message deliveries stop. This + duration begins at the scheduled start time for the campaign. + The minimum value is 60. + type: number + messagesPerSecond: + description: The number of messages that the campaign can + send per second. The minimum value is 50, and the maximum + is 20000. + type: number + total: + description: The maximum total number of messages that the + campaign can send. + type: number + type: object + name: + description: The application name + type: string + quietTime: + description: The default quiet time for the app. Each campaign + for this app sends no messages during this time unless the campaign + overrides the default with a quiet time of its own + properties: + end: + description: The default end time for quiet time in ISO 8601 + format. Required if start is set + type: string + start: + description: The default start time for quiet time in ISO + 8601 format. Required if end is set + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/qldb.aws.upbound.io_streams.yaml b/package/crds/qldb.aws.upbound.io_streams.yaml index 933ceb9706..d67ebbedee 100644 --- a/package/crds/qldb.aws.upbound.io_streams.yaml +++ b/package/crds/qldb.aws.upbound.io_streams.yaml @@ -965,3 +965,944 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Stream is the Schema for the Streams API. Provides a QLDB Stream + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StreamSpec defines the desired state of Stream + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + exclusiveEndTime: + description: 'The exclusive date and time that specifies when + the stream ends. If you don''t define this parameter, the stream + runs indefinitely until you cancel it. It must be in ISO 8601 + date and time format and in Universal Coordinated Time (UTC). + For example: "2019-06-13T21:36:34Z".' + type: string + inclusiveStartTime: + description: 'The inclusive start date and time from which to + start streaming journal data. This parameter must be in ISO + 8601 date and time format and in Universal Coordinated Time + (UTC). For example: "2019-06-13T21:36:34Z". This cannot be + in the future and must be before exclusive_end_time. If you + provide a value that is before the ledger''s CreationDateTime, + QLDB effectively defaults it to the ledger''s CreationDateTime.' + type: string + kinesisConfiguration: + description: The configuration settings of the Kinesis Data Streams + destination for your stream request. Documented below. + properties: + aggregationEnabled: + description: 'Enables QLDB to publish multiple data records + in a single Kinesis Data Streams record, increasing the + number of records sent per API call. Default: true.' + type: boolean + streamArn: + description: The Amazon Resource Name (ARN) of the Kinesis + Data Streams resource. + type: string + streamArnRef: + description: Reference to a Stream in kinesis to populate + streamArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamArnSelector: + description: Selector for a Stream in kinesis to populate + streamArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + ledgerName: + description: The name of the QLDB ledger. + type: string + ledgerNameRef: + description: Reference to a Ledger in qldb to populate ledgerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + ledgerNameSelector: + description: Selector for a Ledger in qldb to populate ledgerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleArn: + description: The Amazon Resource Name (ARN) of the IAM role that + grants QLDB permissions for a journal stream to write data records + to a Kinesis Data Streams resource. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + streamName: + description: The name that you want to assign to the QLDB journal + stream. User-defined names can help identify and indicate the + purpose of a stream. Your stream name must be unique among + other active streams for a given ledger. Stream names have the + same naming constraints as ledger names, as defined in the Amazon + QLDB Developer Guide. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + exclusiveEndTime: + description: 'The exclusive date and time that specifies when + the stream ends. If you don''t define this parameter, the stream + runs indefinitely until you cancel it. It must be in ISO 8601 + date and time format and in Universal Coordinated Time (UTC). + For example: "2019-06-13T21:36:34Z".' + type: string + inclusiveStartTime: + description: 'The inclusive start date and time from which to + start streaming journal data. This parameter must be in ISO + 8601 date and time format and in Universal Coordinated Time + (UTC). For example: "2019-06-13T21:36:34Z". This cannot be + in the future and must be before exclusive_end_time. If you + provide a value that is before the ledger''s CreationDateTime, + QLDB effectively defaults it to the ledger''s CreationDateTime.' + type: string + kinesisConfiguration: + description: The configuration settings of the Kinesis Data Streams + destination for your stream request. Documented below. + properties: + aggregationEnabled: + description: 'Enables QLDB to publish multiple data records + in a single Kinesis Data Streams record, increasing the + number of records sent per API call. Default: true.' + type: boolean + streamArn: + description: The Amazon Resource Name (ARN) of the Kinesis + Data Streams resource. + type: string + streamArnRef: + description: Reference to a Stream in kinesis to populate + streamArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamArnSelector: + description: Selector for a Stream in kinesis to populate + streamArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + ledgerName: + description: The name of the QLDB ledger. + type: string + ledgerNameRef: + description: Reference to a Ledger in qldb to populate ledgerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + ledgerNameSelector: + description: Selector for a Ledger in qldb to populate ledgerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + roleArn: + description: The Amazon Resource Name (ARN) of the IAM role that + grants QLDB permissions for a journal stream to write data records + to a Kinesis Data Streams resource. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + streamName: + description: The name that you want to assign to the QLDB journal + stream. User-defined names can help identify and indicate the + purpose of a stream. Your stream name must be unique among + other active streams for a given ledger. Stream names have the + same naming constraints as ledger names, as defined in the Amazon + QLDB Developer Guide. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.inclusiveStartTime is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.inclusiveStartTime) + || (has(self.initProvider) && has(self.initProvider.inclusiveStartTime))' + - message: spec.forProvider.kinesisConfiguration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.kinesisConfiguration) + || (has(self.initProvider) && has(self.initProvider.kinesisConfiguration))' + - message: spec.forProvider.streamName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.streamName) + || (has(self.initProvider) && has(self.initProvider.streamName))' + status: + description: StreamStatus defines the observed state of Stream. + properties: + atProvider: + properties: + arn: + description: The ARN of the QLDB Stream. + type: string + exclusiveEndTime: + description: 'The exclusive date and time that specifies when + the stream ends. If you don''t define this parameter, the stream + runs indefinitely until you cancel it. It must be in ISO 8601 + date and time format and in Universal Coordinated Time (UTC). + For example: "2019-06-13T21:36:34Z".' + type: string + id: + description: The ID of the QLDB Stream. + type: string + inclusiveStartTime: + description: 'The inclusive start date and time from which to + start streaming journal data. This parameter must be in ISO + 8601 date and time format and in Universal Coordinated Time + (UTC). For example: "2019-06-13T21:36:34Z". This cannot be + in the future and must be before exclusive_end_time. If you + provide a value that is before the ledger''s CreationDateTime, + QLDB effectively defaults it to the ledger''s CreationDateTime.' + type: string + kinesisConfiguration: + description: The configuration settings of the Kinesis Data Streams + destination for your stream request. Documented below. + properties: + aggregationEnabled: + description: 'Enables QLDB to publish multiple data records + in a single Kinesis Data Streams record, increasing the + number of records sent per API call. Default: true.' + type: boolean + streamArn: + description: The Amazon Resource Name (ARN) of the Kinesis + Data Streams resource. + type: string + type: object + ledgerName: + description: The name of the QLDB ledger. + type: string + roleArn: + description: The Amazon Resource Name (ARN) of the IAM role that + grants QLDB permissions for a journal stream to write data records + to a Kinesis Data Streams resource. + type: string + streamName: + description: The name that you want to assign to the QLDB journal + stream. User-defined names can help identify and indicate the + purpose of a stream. Your stream name must be unique among + other active streams for a given ledger. Stream names have the + same naming constraints as ledger names, as defined in the Amazon + QLDB Developer Guide. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/rds.aws.upbound.io_clusters.yaml b/package/crds/rds.aws.upbound.io_clusters.yaml index 12c07b3a81..c493873489 100644 --- a/package/crds/rds.aws.upbound.io_clusters.yaml +++ b/package/crds/rds.aws.upbound.io_clusters.yaml @@ -2724,3 +2724,2685 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the Clusters API. Manages an RDS Aurora + Cluster or a RDS Multi-AZ DB Cluster + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterSpec defines the desired state of Cluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allocatedStorage: + description: The amount of storage in gibibytes (GiB) to allocate + to each DB instance in the Multi-AZ DB cluster. + type: number + allowMajorVersionUpgrade: + description: Enable to allow major engine version upgrades when + changing engine versions. Defaults to false. + type: boolean + applyImmediately: + description: Specifies whether any cluster modifications are applied + immediately, or during the next maintenance window. Default + is false. See Amazon RDS Documentation for more information. + type: boolean + autoGeneratePassword: + description: If true, the password will be auto-generated and + stored in the Secret referenced by the masterPasswordSecretRef + field. + type: boolean + availabilityZones: + description: |- + List of EC2 Availability Zones for the DB cluster storage where DB cluster instances can be created. + We recommend specifying 3 AZs or using the if necessary. + A maximum of 3 AZs can be configured. + items: + type: string + type: array + x-kubernetes-list-type: set + backtrackWindow: + description: Target backtrack window, in seconds. Only available + for aurora and aurora-mysql engines currently. To disable backtracking, + set this value to 0. Defaults to 0. Must be between 0 and 259200 + (72 hours) + type: number + backupRetentionPeriod: + description: Days to retain backups for. Default 1 + type: number + clusterMembers: + description: – List of RDS Instances that are a part of this cluster + items: + type: string + type: array + x-kubernetes-list-type: set + copyTagsToSnapshot: + description: – Copy all Cluster tags to snapshots. Default is + false. + type: boolean + databaseName: + description: 'Name for an automatically created database on cluster + creation. There are different naming restrictions per database + engine: RDS Naming Constraints' + type: string + dbClusterInstanceClass: + description: The compute and memory capacity of each DB instance + in the Multi-AZ DB cluster, for example db.m6g.xlarge. Not all + DB instance classes are available in all AWS Regions, or for + all database engines. For the full list of DB instance classes + and availability for your engine, see DB instance class in the + Amazon RDS User Guide. + type: string + dbClusterParameterGroupName: + description: A cluster parameter group to associate with the cluster. + type: string + dbClusterParameterGroupNameRef: + description: Reference to a ClusterParameterGroup in rds to populate + dbClusterParameterGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dbClusterParameterGroupNameSelector: + description: Selector for a ClusterParameterGroup in rds to populate + dbClusterParameterGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dbInstanceParameterGroupName: + description: Instance parameter group to associate with all instances + of the DB cluster. The db_instance_parameter_group_name parameter + is only valid in combination with the allow_major_version_upgrade + parameter. + type: string + dbInstanceParameterGroupNameRef: + description: Reference to a ParameterGroup in rds to populate + dbInstanceParameterGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dbInstanceParameterGroupNameSelector: + description: Selector for a ParameterGroup in rds to populate + dbInstanceParameterGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dbSubnetGroupName: + description: |- + DB subnet group to associate with this DB cluster. + NOTE: This must match the db_subnet_group_name specified on every aws_rds_cluster_instance in the cluster. + type: string + dbSubnetGroupNameRef: + description: Reference to a SubnetGroup in rds to populate dbSubnetGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dbSubnetGroupNameSelector: + description: Selector for a SubnetGroup in rds to populate dbSubnetGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dbSystemId: + description: For use with RDS Custom. + type: string + deleteAutomatedBackups: + description: Specifies whether to remove automated backups immediately + after the DB cluster is deleted. Default is true. + type: boolean + deletionProtection: + description: |- + If the DB cluster should have deletion protection enabled. + The database can't be deleted when this value is set to true. + The default is false. + type: boolean + domain: + description: The ID of the Directory Service Active Directory + domain to create the cluster in. + type: string + domainIamRoleName: + description: The name of the IAM role to be used when making API + calls to the Directory Service. + type: string + enableGlobalWriteForwarding: + description: Whether cluster should forward writes to an associated + global cluster. Applied to secondary clusters to enable them + to forward writes to an aws_rds_global_cluster's primary cluster. + See the User Guide for Aurora for more information. + type: boolean + enableHttpEndpoint: + description: 'Enable HTTP endpoint (data API). Only valid for + some combinations of engine_mode, engine and engine_version + and only available in some regions. See the Region and version + availability section of the documentation. This option also + does not work with any of these options specified: snapshot_identifier, + replication_source_identifier, s3_import.' + type: boolean + enableLocalWriteForwarding: + description: 'Whether read replicas can forward write operations + to the writer DB instance in the DB cluster. By default, write + operations aren''t allowed on reader DB instances.. See the + User Guide for Aurora for more information. NOTE: Local write + forwarding requires Aurora MySQL version 3.04 or higher.' + type: boolean + enabledCloudwatchLogsExports: + description: 'Set of log types to export to cloudwatch. If omitted, + no logs will be exported. The following log types are supported: + audit, error, general, slowquery, postgresql (PostgreSQL).' + items: + type: string + type: array + x-kubernetes-list-type: set + engine: + description: 'Name of the database engine to be used for this + DB cluster. Valid Values: aurora-mysql, aurora-postgresql, mysql, + postgres. (Note that mysql and postgres are Multi-AZ RDS clusters).' + type: string + engineMode: + description: 'Database engine mode. Valid values: global (only + valid for Aurora MySQL 1.21 and earlier), parallelquery, provisioned, + serverless. Defaults to: provisioned. See the RDS User Guide + for limitations when using serverless.' + type: string + engineVersion: + description: Database engine version. Updating this argument results + in an outage. See the Aurora MySQL and Aurora Postgres documentation + for your configured engine to determine this value, or by running + aws rds describe-db-engine-versions. For example with Aurora + MySQL 2, a potential value for this argument is 5.7.mysql_aurora.2.03.2. + The value can contain a partial version where supported by the + API. The actual engine version used is returned in the attribute + engine_version_actual, , see Attribute Reference below. + type: string + finalSnapshotIdentifier: + description: Name of your final DB snapshot when this DB cluster + is deleted. If omitted, no final snapshot will be made. + type: string + globalClusterIdentifier: + description: Global cluster identifier specified on aws_rds_global_cluster. + type: string + iamDatabaseAuthenticationEnabled: + description: Specifies whether or not mappings of AWS Identity + and Access Management (IAM) accounts to database accounts is + enabled. Please see AWS Documentation for availability and limitations. + type: boolean + iops: + description: Amount of Provisioned IOPS (input/output operations + per second) to be initially allocated for each DB instance in + the Multi-AZ DB cluster. For information about valid Iops values, + see Amazon RDS Provisioned IOPS storage to improve performance + in the Amazon RDS User Guide. (This setting is required to create + a Multi-AZ DB cluster). Must be a multiple between .5 and 50 + of the storage amount for the DB cluster. + type: number + kmsKeyId: + description: ARN for the KMS encryption key. When specifying kms_key_id, + storage_encrypted needs to be set to true. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + manageMasterUserPassword: + description: Set to true to allow RDS to manage the master user + password in Secrets Manager. Cannot be set if master_password + is provided. + type: boolean + masterPasswordSecretRef: + description: |- + Password for the master DB user. Note that this may show up in logs, and it will be stored in the state file. Please refer to the RDS Naming Constraints. Cannot be set if manage_master_user_password is set to true. + Password for the master DB user. If you set autoGeneratePassword to true, the Secret referenced here will be created or updated with generated password if it does not already contain one. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + masterUserSecretKmsKeyId: + description: Amazon Web Services KMS key identifier is the key + ARN, key ID, alias ARN, or alias name for the KMS key. To use + a KMS key in a different Amazon Web Services account, specify + the key ARN or alias ARN. If not specified, the default KMS + key for your Amazon Web Services account is used. + type: string + masterUserSecretKmsKeyIdRef: + description: Reference to a Key in kms to populate masterUserSecretKmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + masterUserSecretKmsKeyIdSelector: + description: Selector for a Key in kms to populate masterUserSecretKmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + masterUsername: + description: Username for the master DB user. Please refer to + the RDS Naming Constraints. This argument does not support in-place + updates and cannot be changed during a restore from snapshot. + type: string + networkType: + description: 'Network type of the cluster. Valid values: IPV4, + DUAL.' + type: string + port: + description: Port on which the DB accepts connections + type: number + preferredBackupWindow: + description: 'Daily time range during which automated backups + are created if automated backups are enabled using the BackupRetentionPeriod + parameter.Time in UTC. Default: A 30-minute window selected + at random from an 8-hour block of time per regionE.g., 04:00-09:00' + type: string + preferredMaintenanceWindow: + description: Weekly time range during which system maintenance + can occur, in (UTC) e.g., wed:04:00-wed:04:30 + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + replicationSourceIdentifier: + description: ARN of a source DB cluster or DB instance if this + DB cluster is to be created as a Read Replica. + type: string + restoreToPointInTime: + description: Nested attribute for point in time restore. More + details below. + properties: + restoreToTime: + description: Date and time in UTC format to restore the database + cluster to. Conflicts with use_latest_restorable_time. + type: string + restoreType: + description: |- + Type of restore to be performed. + Valid options are full-copy (default) and copy-on-write. + type: string + sourceClusterIdentifier: + description: Identifier of the source database cluster from + which to restore. When restoring from a cluster in another + AWS account, the identifier is the ARN of that cluster. + type: string + sourceClusterIdentifierRef: + description: Reference to a Cluster in rds to populate sourceClusterIdentifier. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceClusterIdentifierSelector: + description: Selector for a Cluster in rds to populate sourceClusterIdentifier. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + useLatestRestorableTime: + description: Set to true to restore the database cluster to + the latest restorable backup time. Defaults to false. Conflicts + with restore_to_time. + type: boolean + type: object + s3Import: + description: Port on which the DB accepts connections + properties: + bucketName: + description: Bucket name where your backup is stored + type: string + bucketNameRef: + description: Reference to a Bucket in s3 to populate bucketName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketNameSelector: + description: Selector for a Bucket in s3 to populate bucketName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bucketPrefix: + description: Can be blank, but is the path to your backup + type: string + ingestionRole: + description: Role applied to load the data. + type: string + sourceEngine: + description: Source engine for the backup + type: string + sourceEngineVersion: + description: Version of the source engine used to make the + backup + type: string + type: object + scalingConfiguration: + description: Nested attribute with scaling properties. Only valid + when engine_mode is set to serverless. More details below. + properties: + autoPause: + description: Whether to enable automatic pause. A DB cluster + can be paused only when it's idle (it has no connections). + If a DB cluster is paused for more than seven days, the + DB cluster might be backed up with a snapshot. In this case, + the DB cluster is restored when there is a request to connect + to it. Defaults to true. + type: boolean + maxCapacity: + description: Maximum capacity for an Aurora DB cluster in + serverless DB engine mode. The maximum capacity must be + greater than or equal to the minimum capacity. Valid Aurora + MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. + Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, + 32, 64, 192, and 384). Defaults to 16. + type: number + minCapacity: + description: Minimum capacity for an Aurora DB cluster in + serverless DB engine mode. The minimum capacity must be + lesser than or equal to the maximum capacity. Valid Aurora + MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. + Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, + 32, 64, 192, and 384). Defaults to 1. + type: number + secondsUntilAutoPause: + description: Time, in seconds, before an Aurora DB cluster + in serverless mode is paused. Valid values are 300 through + 86400. Defaults to 300. + type: number + timeoutAction: + description: 'Action to take when the timeout is reached. + Valid values: ForceApplyCapacityChange, RollbackCapacityChange. + Defaults to RollbackCapacityChange. See documentation.' + type: string + type: object + serverlessv2ScalingConfiguration: + description: Nested attribute with scaling properties for ServerlessV2. + Only valid when engine_mode is set to provisioned. More details + below. + properties: + maxCapacity: + description: Maximum capacity for an Aurora DB cluster in + serverless DB engine mode. The maximum capacity must be + greater than or equal to the minimum capacity. Valid Aurora + MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. + Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, + 32, 64, 192, and 384). Defaults to 16. + type: number + minCapacity: + description: Minimum capacity for an Aurora DB cluster in + serverless DB engine mode. The minimum capacity must be + lesser than or equal to the maximum capacity. Valid Aurora + MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. + Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, + 32, 64, 192, and 384). Defaults to 1. + type: number + type: object + skipFinalSnapshot: + description: Determines whether a final DB snapshot is created + before the DB cluster is deleted. If true is specified, no DB + snapshot is created. If false is specified, a DB snapshot is + created before the DB cluster is deleted, using the value from + final_snapshot_identifier. Default is false. + type: boolean + snapshotIdentifier: + description: Specifies whether or not to create this cluster from + a snapshot. You can use either the name or ARN when specifying + a DB cluster snapshot, or the ARN when specifying a DB snapshot. + Conflicts with global_cluster_identifier. Clusters cannot be + restored from snapshot and joined to an existing global cluster + in a single operation. See the AWS documentation or the Global + Cluster Restored From Snapshot example for instructions on building + a global cluster starting with a snapshot. + type: string + sourceRegion: + description: The source region for an encrypted replica DB cluster. + type: string + storageEncrypted: + description: Specifies whether the DB cluster is encrypted. The + default is false for provisioned engine_mode and true for serverless + engine_mode. When restoring an unencrypted snapshot_identifier, + the kms_key_id argument must be provided to encrypt the restored + cluster. + type: boolean + storageType: + description: '(Forces new for Multi-AZ DB clusters) Specifies + the storage type to be associated with the DB cluster. For Aurora + DB clusters, storage_type modifications can be done in-place. + For Multi-AZ DB Clusters, the iops argument must also be set. + Valid values are: "", aurora-iopt1 (Aurora DB Clusters); io1, + io2 (Multi-AZ DB Clusters). Default: "" (Aurora DB Clusters); + io1 (Multi-AZ DB Clusters).' + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + vpcSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + vpcSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcSecurityGroupIds: + description: List of VPC security groups to associate with the + Cluster + items: + type: string + type: array + x-kubernetes-list-type: set + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allocatedStorage: + description: The amount of storage in gibibytes (GiB) to allocate + to each DB instance in the Multi-AZ DB cluster. + type: number + allowMajorVersionUpgrade: + description: Enable to allow major engine version upgrades when + changing engine versions. Defaults to false. + type: boolean + applyImmediately: + description: Specifies whether any cluster modifications are applied + immediately, or during the next maintenance window. Default + is false. See Amazon RDS Documentation for more information. + type: boolean + availabilityZones: + description: |- + List of EC2 Availability Zones for the DB cluster storage where DB cluster instances can be created. + We recommend specifying 3 AZs or using the if necessary. + A maximum of 3 AZs can be configured. + items: + type: string + type: array + x-kubernetes-list-type: set + backtrackWindow: + description: Target backtrack window, in seconds. Only available + for aurora and aurora-mysql engines currently. To disable backtracking, + set this value to 0. Defaults to 0. Must be between 0 and 259200 + (72 hours) + type: number + backupRetentionPeriod: + description: Days to retain backups for. Default 1 + type: number + clusterMembers: + description: – List of RDS Instances that are a part of this cluster + items: + type: string + type: array + x-kubernetes-list-type: set + copyTagsToSnapshot: + description: – Copy all Cluster tags to snapshots. Default is + false. + type: boolean + databaseName: + description: 'Name for an automatically created database on cluster + creation. There are different naming restrictions per database + engine: RDS Naming Constraints' + type: string + dbClusterInstanceClass: + description: The compute and memory capacity of each DB instance + in the Multi-AZ DB cluster, for example db.m6g.xlarge. Not all + DB instance classes are available in all AWS Regions, or for + all database engines. For the full list of DB instance classes + and availability for your engine, see DB instance class in the + Amazon RDS User Guide. + type: string + dbClusterParameterGroupName: + description: A cluster parameter group to associate with the cluster. + type: string + dbClusterParameterGroupNameRef: + description: Reference to a ClusterParameterGroup in rds to populate + dbClusterParameterGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dbClusterParameterGroupNameSelector: + description: Selector for a ClusterParameterGroup in rds to populate + dbClusterParameterGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dbInstanceParameterGroupName: + description: Instance parameter group to associate with all instances + of the DB cluster. The db_instance_parameter_group_name parameter + is only valid in combination with the allow_major_version_upgrade + parameter. + type: string + dbInstanceParameterGroupNameRef: + description: Reference to a ParameterGroup in rds to populate + dbInstanceParameterGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dbInstanceParameterGroupNameSelector: + description: Selector for a ParameterGroup in rds to populate + dbInstanceParameterGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dbSubnetGroupName: + description: |- + DB subnet group to associate with this DB cluster. + NOTE: This must match the db_subnet_group_name specified on every aws_rds_cluster_instance in the cluster. + type: string + dbSubnetGroupNameRef: + description: Reference to a SubnetGroup in rds to populate dbSubnetGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dbSubnetGroupNameSelector: + description: Selector for a SubnetGroup in rds to populate dbSubnetGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dbSystemId: + description: For use with RDS Custom. + type: string + deleteAutomatedBackups: + description: Specifies whether to remove automated backups immediately + after the DB cluster is deleted. Default is true. + type: boolean + deletionProtection: + description: |- + If the DB cluster should have deletion protection enabled. + The database can't be deleted when this value is set to true. + The default is false. + type: boolean + domain: + description: The ID of the Directory Service Active Directory + domain to create the cluster in. + type: string + domainIamRoleName: + description: The name of the IAM role to be used when making API + calls to the Directory Service. + type: string + enableGlobalWriteForwarding: + description: Whether cluster should forward writes to an associated + global cluster. Applied to secondary clusters to enable them + to forward writes to an aws_rds_global_cluster's primary cluster. + See the User Guide for Aurora for more information. + type: boolean + enableHttpEndpoint: + description: 'Enable HTTP endpoint (data API). Only valid for + some combinations of engine_mode, engine and engine_version + and only available in some regions. See the Region and version + availability section of the documentation. This option also + does not work with any of these options specified: snapshot_identifier, + replication_source_identifier, s3_import.' + type: boolean + enableLocalWriteForwarding: + description: 'Whether read replicas can forward write operations + to the writer DB instance in the DB cluster. By default, write + operations aren''t allowed on reader DB instances.. See the + User Guide for Aurora for more information. NOTE: Local write + forwarding requires Aurora MySQL version 3.04 or higher.' + type: boolean + enabledCloudwatchLogsExports: + description: 'Set of log types to export to cloudwatch. If omitted, + no logs will be exported. The following log types are supported: + audit, error, general, slowquery, postgresql (PostgreSQL).' + items: + type: string + type: array + x-kubernetes-list-type: set + engine: + description: 'Name of the database engine to be used for this + DB cluster. Valid Values: aurora-mysql, aurora-postgresql, mysql, + postgres. (Note that mysql and postgres are Multi-AZ RDS clusters).' + type: string + engineMode: + description: 'Database engine mode. Valid values: global (only + valid for Aurora MySQL 1.21 and earlier), parallelquery, provisioned, + serverless. Defaults to: provisioned. See the RDS User Guide + for limitations when using serverless.' + type: string + engineVersion: + description: Database engine version. Updating this argument results + in an outage. See the Aurora MySQL and Aurora Postgres documentation + for your configured engine to determine this value, or by running + aws rds describe-db-engine-versions. For example with Aurora + MySQL 2, a potential value for this argument is 5.7.mysql_aurora.2.03.2. + The value can contain a partial version where supported by the + API. The actual engine version used is returned in the attribute + engine_version_actual, , see Attribute Reference below. + type: string + finalSnapshotIdentifier: + description: Name of your final DB snapshot when this DB cluster + is deleted. If omitted, no final snapshot will be made. + type: string + globalClusterIdentifier: + description: Global cluster identifier specified on aws_rds_global_cluster. + type: string + iamDatabaseAuthenticationEnabled: + description: Specifies whether or not mappings of AWS Identity + and Access Management (IAM) accounts to database accounts is + enabled. Please see AWS Documentation for availability and limitations. + type: boolean + iops: + description: Amount of Provisioned IOPS (input/output operations + per second) to be initially allocated for each DB instance in + the Multi-AZ DB cluster. For information about valid Iops values, + see Amazon RDS Provisioned IOPS storage to improve performance + in the Amazon RDS User Guide. (This setting is required to create + a Multi-AZ DB cluster). Must be a multiple between .5 and 50 + of the storage amount for the DB cluster. + type: number + kmsKeyId: + description: ARN for the KMS encryption key. When specifying kms_key_id, + storage_encrypted needs to be set to true. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + manageMasterUserPassword: + description: Set to true to allow RDS to manage the master user + password in Secrets Manager. Cannot be set if master_password + is provided. + type: boolean + masterPasswordSecretRef: + description: |- + Password for the master DB user. Note that this may show up in logs, and it will be stored in the state file. Please refer to the RDS Naming Constraints. Cannot be set if manage_master_user_password is set to true. + Password for the master DB user. If you set autoGeneratePassword to true, the Secret referenced here will be created or updated with generated password if it does not already contain one. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + masterUserSecretKmsKeyId: + description: Amazon Web Services KMS key identifier is the key + ARN, key ID, alias ARN, or alias name for the KMS key. To use + a KMS key in a different Amazon Web Services account, specify + the key ARN or alias ARN. If not specified, the default KMS + key for your Amazon Web Services account is used. + type: string + masterUserSecretKmsKeyIdRef: + description: Reference to a Key in kms to populate masterUserSecretKmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + masterUserSecretKmsKeyIdSelector: + description: Selector for a Key in kms to populate masterUserSecretKmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + masterUsername: + description: Username for the master DB user. Please refer to + the RDS Naming Constraints. This argument does not support in-place + updates and cannot be changed during a restore from snapshot. + type: string + networkType: + description: 'Network type of the cluster. Valid values: IPV4, + DUAL.' + type: string + port: + description: Port on which the DB accepts connections + type: number + preferredBackupWindow: + description: 'Daily time range during which automated backups + are created if automated backups are enabled using the BackupRetentionPeriod + parameter.Time in UTC. Default: A 30-minute window selected + at random from an 8-hour block of time per regionE.g., 04:00-09:00' + type: string + preferredMaintenanceWindow: + description: Weekly time range during which system maintenance + can occur, in (UTC) e.g., wed:04:00-wed:04:30 + type: string + replicationSourceIdentifier: + description: ARN of a source DB cluster or DB instance if this + DB cluster is to be created as a Read Replica. + type: string + restoreToPointInTime: + description: Nested attribute for point in time restore. More + details below. + properties: + restoreToTime: + description: Date and time in UTC format to restore the database + cluster to. Conflicts with use_latest_restorable_time. + type: string + restoreType: + description: |- + Type of restore to be performed. + Valid options are full-copy (default) and copy-on-write. + type: string + sourceClusterIdentifier: + description: Identifier of the source database cluster from + which to restore. When restoring from a cluster in another + AWS account, the identifier is the ARN of that cluster. + type: string + sourceClusterIdentifierRef: + description: Reference to a Cluster in rds to populate sourceClusterIdentifier. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceClusterIdentifierSelector: + description: Selector for a Cluster in rds to populate sourceClusterIdentifier. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + useLatestRestorableTime: + description: Set to true to restore the database cluster to + the latest restorable backup time. Defaults to false. Conflicts + with restore_to_time. + type: boolean + type: object + s3Import: + description: Port on which the DB accepts connections + properties: + bucketName: + description: Bucket name where your backup is stored + type: string + bucketNameRef: + description: Reference to a Bucket in s3 to populate bucketName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketNameSelector: + description: Selector for a Bucket in s3 to populate bucketName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + bucketPrefix: + description: Can be blank, but is the path to your backup + type: string + ingestionRole: + description: Role applied to load the data. + type: string + sourceEngine: + description: Source engine for the backup + type: string + sourceEngineVersion: + description: Version of the source engine used to make the + backup + type: string + type: object + scalingConfiguration: + description: Nested attribute with scaling properties. Only valid + when engine_mode is set to serverless. More details below. + properties: + autoPause: + description: Whether to enable automatic pause. A DB cluster + can be paused only when it's idle (it has no connections). + If a DB cluster is paused for more than seven days, the + DB cluster might be backed up with a snapshot. In this case, + the DB cluster is restored when there is a request to connect + to it. Defaults to true. + type: boolean + maxCapacity: + description: Maximum capacity for an Aurora DB cluster in + serverless DB engine mode. The maximum capacity must be + greater than or equal to the minimum capacity. Valid Aurora + MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. + Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, + 32, 64, 192, and 384). Defaults to 16. + type: number + minCapacity: + description: Minimum capacity for an Aurora DB cluster in + serverless DB engine mode. The minimum capacity must be + lesser than or equal to the maximum capacity. Valid Aurora + MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. + Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, + 32, 64, 192, and 384). Defaults to 1. + type: number + secondsUntilAutoPause: + description: Time, in seconds, before an Aurora DB cluster + in serverless mode is paused. Valid values are 300 through + 86400. Defaults to 300. + type: number + timeoutAction: + description: 'Action to take when the timeout is reached. + Valid values: ForceApplyCapacityChange, RollbackCapacityChange. + Defaults to RollbackCapacityChange. See documentation.' + type: string + type: object + serverlessv2ScalingConfiguration: + description: Nested attribute with scaling properties for ServerlessV2. + Only valid when engine_mode is set to provisioned. More details + below. + properties: + maxCapacity: + description: Maximum capacity for an Aurora DB cluster in + serverless DB engine mode. The maximum capacity must be + greater than or equal to the minimum capacity. Valid Aurora + MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. + Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, + 32, 64, 192, and 384). Defaults to 16. + type: number + minCapacity: + description: Minimum capacity for an Aurora DB cluster in + serverless DB engine mode. The minimum capacity must be + lesser than or equal to the maximum capacity. Valid Aurora + MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. + Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, + 32, 64, 192, and 384). Defaults to 1. + type: number + type: object + skipFinalSnapshot: + description: Determines whether a final DB snapshot is created + before the DB cluster is deleted. If true is specified, no DB + snapshot is created. If false is specified, a DB snapshot is + created before the DB cluster is deleted, using the value from + final_snapshot_identifier. Default is false. + type: boolean + snapshotIdentifier: + description: Specifies whether or not to create this cluster from + a snapshot. You can use either the name or ARN when specifying + a DB cluster snapshot, or the ARN when specifying a DB snapshot. + Conflicts with global_cluster_identifier. Clusters cannot be + restored from snapshot and joined to an existing global cluster + in a single operation. See the AWS documentation or the Global + Cluster Restored From Snapshot example for instructions on building + a global cluster starting with a snapshot. + type: string + sourceRegion: + description: The source region for an encrypted replica DB cluster. + type: string + storageEncrypted: + description: Specifies whether the DB cluster is encrypted. The + default is false for provisioned engine_mode and true for serverless + engine_mode. When restoring an unencrypted snapshot_identifier, + the kms_key_id argument must be provided to encrypt the restored + cluster. + type: boolean + storageType: + description: '(Forces new for Multi-AZ DB clusters) Specifies + the storage type to be associated with the DB cluster. For Aurora + DB clusters, storage_type modifications can be done in-place. + For Multi-AZ DB Clusters, the iops argument must also be set. + Valid values are: "", aurora-iopt1 (Aurora DB Clusters); io1, + io2 (Multi-AZ DB Clusters). Default: "" (Aurora DB Clusters); + io1 (Multi-AZ DB Clusters).' + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + vpcSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + vpcSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcSecurityGroupIds: + description: List of VPC security groups to associate with the + Cluster + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.engine is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.engine) + || (has(self.initProvider) && has(self.initProvider.engine))' + status: + description: ClusterStatus defines the observed state of Cluster. + properties: + atProvider: + properties: + allocatedStorage: + description: The amount of storage in gibibytes (GiB) to allocate + to each DB instance in the Multi-AZ DB cluster. + type: number + allowMajorVersionUpgrade: + description: Enable to allow major engine version upgrades when + changing engine versions. Defaults to false. + type: boolean + applyImmediately: + description: Specifies whether any cluster modifications are applied + immediately, or during the next maintenance window. Default + is false. See Amazon RDS Documentation for more information. + type: boolean + arn: + description: Amazon Resource Name (ARN) of cluster + type: string + availabilityZones: + description: |- + List of EC2 Availability Zones for the DB cluster storage where DB cluster instances can be created. + We recommend specifying 3 AZs or using the if necessary. + A maximum of 3 AZs can be configured. + items: + type: string + type: array + x-kubernetes-list-type: set + backtrackWindow: + description: Target backtrack window, in seconds. Only available + for aurora and aurora-mysql engines currently. To disable backtracking, + set this value to 0. Defaults to 0. Must be between 0 and 259200 + (72 hours) + type: number + backupRetentionPeriod: + description: Days to retain backups for. Default 1 + type: number + clusterMembers: + description: – List of RDS Instances that are a part of this cluster + items: + type: string + type: array + x-kubernetes-list-type: set + clusterResourceId: + description: RDS Cluster Resource ID + type: string + copyTagsToSnapshot: + description: – Copy all Cluster tags to snapshots. Default is + false. + type: boolean + databaseName: + description: 'Name for an automatically created database on cluster + creation. There are different naming restrictions per database + engine: RDS Naming Constraints' + type: string + dbClusterInstanceClass: + description: The compute and memory capacity of each DB instance + in the Multi-AZ DB cluster, for example db.m6g.xlarge. Not all + DB instance classes are available in all AWS Regions, or for + all database engines. For the full list of DB instance classes + and availability for your engine, see DB instance class in the + Amazon RDS User Guide. + type: string + dbClusterParameterGroupName: + description: A cluster parameter group to associate with the cluster. + type: string + dbInstanceParameterGroupName: + description: Instance parameter group to associate with all instances + of the DB cluster. The db_instance_parameter_group_name parameter + is only valid in combination with the allow_major_version_upgrade + parameter. + type: string + dbSubnetGroupName: + description: |- + DB subnet group to associate with this DB cluster. + NOTE: This must match the db_subnet_group_name specified on every aws_rds_cluster_instance in the cluster. + type: string + dbSystemId: + description: For use with RDS Custom. + type: string + deleteAutomatedBackups: + description: Specifies whether to remove automated backups immediately + after the DB cluster is deleted. Default is true. + type: boolean + deletionProtection: + description: |- + If the DB cluster should have deletion protection enabled. + The database can't be deleted when this value is set to true. + The default is false. + type: boolean + domain: + description: The ID of the Directory Service Active Directory + domain to create the cluster in. + type: string + domainIamRoleName: + description: The name of the IAM role to be used when making API + calls to the Directory Service. + type: string + enableGlobalWriteForwarding: + description: Whether cluster should forward writes to an associated + global cluster. Applied to secondary clusters to enable them + to forward writes to an aws_rds_global_cluster's primary cluster. + See the User Guide for Aurora for more information. + type: boolean + enableHttpEndpoint: + description: 'Enable HTTP endpoint (data API). Only valid for + some combinations of engine_mode, engine and engine_version + and only available in some regions. See the Region and version + availability section of the documentation. This option also + does not work with any of these options specified: snapshot_identifier, + replication_source_identifier, s3_import.' + type: boolean + enableLocalWriteForwarding: + description: 'Whether read replicas can forward write operations + to the writer DB instance in the DB cluster. By default, write + operations aren''t allowed on reader DB instances.. See the + User Guide for Aurora for more information. NOTE: Local write + forwarding requires Aurora MySQL version 3.04 or higher.' + type: boolean + enabledCloudwatchLogsExports: + description: 'Set of log types to export to cloudwatch. If omitted, + no logs will be exported. The following log types are supported: + audit, error, general, slowquery, postgresql (PostgreSQL).' + items: + type: string + type: array + x-kubernetes-list-type: set + endpoint: + description: DNS address of the RDS instance + type: string + engine: + description: 'Name of the database engine to be used for this + DB cluster. Valid Values: aurora-mysql, aurora-postgresql, mysql, + postgres. (Note that mysql and postgres are Multi-AZ RDS clusters).' + type: string + engineMode: + description: 'Database engine mode. Valid values: global (only + valid for Aurora MySQL 1.21 and earlier), parallelquery, provisioned, + serverless. Defaults to: provisioned. See the RDS User Guide + for limitations when using serverless.' + type: string + engineVersion: + description: Database engine version. Updating this argument results + in an outage. See the Aurora MySQL and Aurora Postgres documentation + for your configured engine to determine this value, or by running + aws rds describe-db-engine-versions. For example with Aurora + MySQL 2, a potential value for this argument is 5.7.mysql_aurora.2.03.2. + The value can contain a partial version where supported by the + API. The actual engine version used is returned in the attribute + engine_version_actual, , see Attribute Reference below. + type: string + engineVersionActual: + description: Running version of the database. + type: string + finalSnapshotIdentifier: + description: Name of your final DB snapshot when this DB cluster + is deleted. If omitted, no final snapshot will be made. + type: string + globalClusterIdentifier: + description: Global cluster identifier specified on aws_rds_global_cluster. + type: string + hostedZoneId: + description: Route53 Hosted Zone ID of the endpoint + type: string + iamDatabaseAuthenticationEnabled: + description: Specifies whether or not mappings of AWS Identity + and Access Management (IAM) accounts to database accounts is + enabled. Please see AWS Documentation for availability and limitations. + type: boolean + iamRoles: + description: List of ARNs for the IAM roles to associate to the + RDS Cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + description: RDS Cluster Identifier + type: string + iops: + description: Amount of Provisioned IOPS (input/output operations + per second) to be initially allocated for each DB instance in + the Multi-AZ DB cluster. For information about valid Iops values, + see Amazon RDS Provisioned IOPS storage to improve performance + in the Amazon RDS User Guide. (This setting is required to create + a Multi-AZ DB cluster). Must be a multiple between .5 and 50 + of the storage amount for the DB cluster. + type: number + kmsKeyId: + description: ARN for the KMS encryption key. When specifying kms_key_id, + storage_encrypted needs to be set to true. + type: string + manageMasterUserPassword: + description: Set to true to allow RDS to manage the master user + password in Secrets Manager. Cannot be set if master_password + is provided. + type: boolean + masterUserSecret: + description: Block that specifies the master user secret. Only + available when manage_master_user_password is set to true. Documented + below. + items: + properties: + kmsKeyId: + description: Amazon Web Services KMS key identifier that + is used to encrypt the secret. + type: string + secretArn: + description: Amazon Resource Name (ARN) of the secret. + type: string + secretStatus: + description: 'Status of the secret. Valid Values: creating + | active | rotating | impaired.' + type: string + type: object + type: array + masterUserSecretKmsKeyId: + description: Amazon Web Services KMS key identifier is the key + ARN, key ID, alias ARN, or alias name for the KMS key. To use + a KMS key in a different Amazon Web Services account, specify + the key ARN or alias ARN. If not specified, the default KMS + key for your Amazon Web Services account is used. + type: string + masterUsername: + description: Username for the master DB user. Please refer to + the RDS Naming Constraints. This argument does not support in-place + updates and cannot be changed during a restore from snapshot. + type: string + networkType: + description: 'Network type of the cluster. Valid values: IPV4, + DUAL.' + type: string + port: + description: Port on which the DB accepts connections + type: number + preferredBackupWindow: + description: 'Daily time range during which automated backups + are created if automated backups are enabled using the BackupRetentionPeriod + parameter.Time in UTC. Default: A 30-minute window selected + at random from an 8-hour block of time per regionE.g., 04:00-09:00' + type: string + preferredMaintenanceWindow: + description: Weekly time range during which system maintenance + can occur, in (UTC) e.g., wed:04:00-wed:04:30 + type: string + readerEndpoint: + description: |- + Read-only endpoint for the Aurora cluster, automatically + load-balanced across replicas + type: string + replicationSourceIdentifier: + description: ARN of a source DB cluster or DB instance if this + DB cluster is to be created as a Read Replica. + type: string + restoreToPointInTime: + description: Nested attribute for point in time restore. More + details below. + properties: + restoreToTime: + description: Date and time in UTC format to restore the database + cluster to. Conflicts with use_latest_restorable_time. + type: string + restoreType: + description: |- + Type of restore to be performed. + Valid options are full-copy (default) and copy-on-write. + type: string + sourceClusterIdentifier: + description: Identifier of the source database cluster from + which to restore. When restoring from a cluster in another + AWS account, the identifier is the ARN of that cluster. + type: string + useLatestRestorableTime: + description: Set to true to restore the database cluster to + the latest restorable backup time. Defaults to false. Conflicts + with restore_to_time. + type: boolean + type: object + s3Import: + description: Port on which the DB accepts connections + properties: + bucketName: + description: Bucket name where your backup is stored + type: string + bucketPrefix: + description: Can be blank, but is the path to your backup + type: string + ingestionRole: + description: Role applied to load the data. + type: string + sourceEngine: + description: Source engine for the backup + type: string + sourceEngineVersion: + description: Version of the source engine used to make the + backup + type: string + type: object + scalingConfiguration: + description: Nested attribute with scaling properties. Only valid + when engine_mode is set to serverless. More details below. + properties: + autoPause: + description: Whether to enable automatic pause. A DB cluster + can be paused only when it's idle (it has no connections). + If a DB cluster is paused for more than seven days, the + DB cluster might be backed up with a snapshot. In this case, + the DB cluster is restored when there is a request to connect + to it. Defaults to true. + type: boolean + maxCapacity: + description: Maximum capacity for an Aurora DB cluster in + serverless DB engine mode. The maximum capacity must be + greater than or equal to the minimum capacity. Valid Aurora + MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. + Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, + 32, 64, 192, and 384). Defaults to 16. + type: number + minCapacity: + description: Minimum capacity for an Aurora DB cluster in + serverless DB engine mode. The minimum capacity must be + lesser than or equal to the maximum capacity. Valid Aurora + MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. + Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, + 32, 64, 192, and 384). Defaults to 1. + type: number + secondsUntilAutoPause: + description: Time, in seconds, before an Aurora DB cluster + in serverless mode is paused. Valid values are 300 through + 86400. Defaults to 300. + type: number + timeoutAction: + description: 'Action to take when the timeout is reached. + Valid values: ForceApplyCapacityChange, RollbackCapacityChange. + Defaults to RollbackCapacityChange. See documentation.' + type: string + type: object + serverlessv2ScalingConfiguration: + description: Nested attribute with scaling properties for ServerlessV2. + Only valid when engine_mode is set to provisioned. More details + below. + properties: + maxCapacity: + description: Maximum capacity for an Aurora DB cluster in + serverless DB engine mode. The maximum capacity must be + greater than or equal to the minimum capacity. Valid Aurora + MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. + Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, + 32, 64, 192, and 384). Defaults to 16. + type: number + minCapacity: + description: Minimum capacity for an Aurora DB cluster in + serverless DB engine mode. The minimum capacity must be + lesser than or equal to the maximum capacity. Valid Aurora + MySQL capacity values are 1, 2, 4, 8, 16, 32, 64, 128, 256. + Valid Aurora PostgreSQL capacity values are (2, 4, 8, 16, + 32, 64, 192, and 384). Defaults to 1. + type: number + type: object + skipFinalSnapshot: + description: Determines whether a final DB snapshot is created + before the DB cluster is deleted. If true is specified, no DB + snapshot is created. If false is specified, a DB snapshot is + created before the DB cluster is deleted, using the value from + final_snapshot_identifier. Default is false. + type: boolean + snapshotIdentifier: + description: Specifies whether or not to create this cluster from + a snapshot. You can use either the name or ARN when specifying + a DB cluster snapshot, or the ARN when specifying a DB snapshot. + Conflicts with global_cluster_identifier. Clusters cannot be + restored from snapshot and joined to an existing global cluster + in a single operation. See the AWS documentation or the Global + Cluster Restored From Snapshot example for instructions on building + a global cluster starting with a snapshot. + type: string + sourceRegion: + description: The source region for an encrypted replica DB cluster. + type: string + storageEncrypted: + description: Specifies whether the DB cluster is encrypted. The + default is false for provisioned engine_mode and true for serverless + engine_mode. When restoring an unencrypted snapshot_identifier, + the kms_key_id argument must be provided to encrypt the restored + cluster. + type: boolean + storageType: + description: '(Forces new for Multi-AZ DB clusters) Specifies + the storage type to be associated with the DB cluster. For Aurora + DB clusters, storage_type modifications can be done in-place. + For Multi-AZ DB Clusters, the iops argument must also be set. + Valid values are: "", aurora-iopt1 (Aurora DB Clusters); io1, + io2 (Multi-AZ DB Clusters). Default: "" (Aurora DB Clusters); + io1 (Multi-AZ DB Clusters).' + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + vpcSecurityGroupIds: + description: List of VPC security groups to associate with the + Cluster + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/rds.aws.upbound.io_instances.yaml b/package/crds/rds.aws.upbound.io_instances.yaml index 4b2c948844..9a2c6507e6 100644 --- a/package/crds/rds.aws.upbound.io_instances.yaml +++ b/package/crds/rds.aws.upbound.io_instances.yaml @@ -5373,3 +5373,2786 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta3 + schema: + openAPIV3Schema: + description: Instance is the Schema for the Instances API. Provides an RDS + instance resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: InstanceSpec defines the desired state of Instance + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allocatedStorage: + description: The allocated storage in gibibytes. If max_allocated_storage + is configured, this argument represents the initial storage + allocation and differences from the configuration will be ignored + automatically when Storage Autoscaling occurs. If replicate_source_db + is set, the value is ignored during the creation of the instance. + type: number + allowMajorVersionUpgrade: + description: |- + Indicates that major version + upgrades are allowed. Changing this parameter does not result in an outage and + the change is asynchronously applied as soon as possible. + type: boolean + applyImmediately: + description: |- + Specifies whether any database modifications + are applied immediately, or during the next maintenance window. Default is + false. See Amazon RDS Documentation for more + information. + type: boolean + autoGeneratePassword: + description: |- + Password for the master DB user. Note that this may show up in + logs, and it will be stored in the state file. Cannot be set if manage_master_user_password is set to true. + If true, the password will be auto-generated and stored in the Secret referenced by the passwordSecretRef field. + type: boolean + autoMinorVersionUpgrade: + description: |- + Indicates that minor engine upgrades + will be applied automatically to the DB instance during the maintenance window. + Defaults to true. + type: boolean + availabilityZone: + description: The AZ for the RDS instance. + type: string + backupRetentionPeriod: + description: |- + The days to retain backups for. + Must be between 0 and 35. + Default is 0. + Must be greater than 0 if the database is used as a source for a Read Replica, + uses low-downtime updates, + or will use RDS Blue/Green deployments. + type: number + backupTarget: + description: Specifies where automated backups and manual snapshots + are stored. Possible values are region (default) and outposts. + See Working with Amazon RDS on AWS Outposts for more information. + type: string + backupWindow: + description: |- + The daily time range (in UTC) during which automated backups are created if they are enabled. + Example: "09:46-10:16". Must not overlap with maintenance_window. + type: string + blueGreenUpdate: + description: |- + Enables low-downtime updates using RDS Blue/Green deployments. + See blue_green_update below. + properties: + enabled: + description: |- + Enables low-downtime updates when true. + Default is false. + type: boolean + type: object + caCertIdentifier: + description: The identifier of the CA certificate for the DB instance. + type: string + characterSetName: + description: |- + The character set name to use for DB encoding in Oracle and Microsoft SQL instances (collation). + This can't be changed. + See Oracle Character Sets Supported in Amazon RDS or + Server-Level Collation for Microsoft SQL Server for more information. + Cannot be set with replicate_source_db, restore_to_point_in_time, s3_import, or snapshot_identifier. + type: string + copyTagsToSnapshot: + description: – Copy all Instance tags to snapshots. Default is + false. + type: boolean + customIamInstanceProfile: + description: The instance profile associated with the underlying + Amazon EC2 instance of an RDS Custom DB instance. + type: string + customerOwnedIpEnabled: + description: Indicates whether to enable a customer-owned IP address + (CoIP) for an RDS on Outposts DB instance. See CoIP for RDS + on Outposts for more information. + type: boolean + dbName: + description: The name of the database to create when the DB instance + is created. If this parameter is not specified, no database + is created in the DB instance. Note that this does not apply + for Oracle or SQL Server engines. See the AWS documentation + for more details on what applies for those engines. If you are + providing an Oracle db name, it needs to be in all upper case. + Cannot be specified for a replica. + type: string + dbSubnetGroupName: + description: |- + Name of DB subnet group. DB instance will + be created in the VPC associated with the DB subnet group. If unspecified, will + be created in the default VPC, or in EC2 Classic, if available. When working + with read replicas, it should be specified only if the source database + specifies an instance in another AWS Region. See DBSubnetGroupName in API + action CreateDBInstanceReadReplica + for additional read replica constraints. + type: string + dbSubnetGroupNameRef: + description: Reference to a SubnetGroup in rds to populate dbSubnetGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dbSubnetGroupNameSelector: + description: Selector for a SubnetGroup in rds to populate dbSubnetGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dedicatedLogVolume: + description: Use a dedicated log volume (DLV) for the DB instance. + Requires Provisioned IOPS. See the AWS documentation for more + details. + type: boolean + deleteAutomatedBackups: + description: Specifies whether to remove automated backups immediately + after the DB instance is deleted. Default is true. + type: boolean + deletionProtection: + description: If the DB instance should have deletion protection + enabled. The database can't be deleted when this value is set + to true. The default is false. + type: boolean + domain: + description: The ID of the Directory Service Active Directory + domain to create the instance in. Conflicts with domain_fqdn, + domain_ou, domain_auth_secret_arn and a domain_dns_ips. + type: string + domainAuthSecretArn: + description: The ARN for the Secrets Manager secret with the self + managed Active Directory credentials for the user joining the + domain. Conflicts with domain and domain_iam_role_name. + type: string + domainDnsIps: + description: The IPv4 DNS IP addresses of your primary and secondary + self managed Active Directory domain controllers. Two IP addresses + must be provided. If there isn't a secondary domain controller, + use the IP address of the primary domain controller for both + entries in the list. Conflicts with domain and domain_iam_role_name. + items: + type: string + type: array + x-kubernetes-list-type: set + domainFqdn: + description: The fully qualified domain name (FQDN) of the self + managed Active Directory domain. Conflicts with domain and domain_iam_role_name. + type: string + domainIamRoleName: + description: The name of the IAM role to be used when making API + calls to the Directory Service. Conflicts with domain_fqdn, + domain_ou, domain_auth_secret_arn and a domain_dns_ips. + type: string + domainOu: + description: The self managed Active Directory organizational + unit for your DB instance to join. Conflicts with domain and + domain_iam_role_name. + type: string + enabledCloudwatchLogsExports: + description: Set of log types to enable for exporting to CloudWatch + logs. If omitted, no logs will be exported. For supported values, + see the EnableCloudwatchLogsExports.member.N parameter in API + action CreateDBInstance. + items: + type: string + type: array + x-kubernetes-list-type: set + engine: + description: The database engine to use. For supported values, + see the Engine parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). + Note that for Amazon Aurora instances the engine must match + the [DB Cluster](https://marketplace.upbound.io/providers/upbound/provider-aws/latest/resources/rds.aws.upbound.io/Cluster/v1beta1)'s + engine'. For information on the difference between the available + Aurora MySQL engines see Comparison in the [Amazon RDS Release + Notes](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraMySQLReleaseNotes/Welcome.html). + type: string + engineVersion: + description: The engine version to use. If `autoMinorVersionUpgrade` + is enabled, you can provide a prefix of the version such as + 5.7 (for 5.7.10). The actual engine version used is returned + in the attribute `status.atProvider.engineVersionActual`. For + supported values, see the EngineVersion parameter in [API action + CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). + Note that for Amazon Aurora instances the engine version must + match the [DB Cluster](https://marketplace.upbound.io/providers/upbound/provider-aws/latest/resources/rds.aws.upbound.io/Cluster/v1beta1)'s + engine version'. + type: string + finalSnapshotIdentifier: + description: |- + The name of your final DB snapshot + when this DB instance is deleted. Must be provided if skip_final_snapshot is + set to false. The value must begin with a letter, only contain alphanumeric characters and hyphens, and not end with a hyphen or contain two consecutive hyphens. Must not be provided when deleting a read replica. + type: string + iamDatabaseAuthenticationEnabled: + description: |- + Specifies whether mappings of AWS Identity and Access Management (IAM) accounts to database + accounts is enabled. + type: boolean + identifier: + description: Required if restore_to_point_in_time is specified. + type: string + identifierPrefix: + description: Creates a unique identifier beginning with the specified + prefix. Conflicts with identifier. + type: string + instanceClass: + description: The instance type of the RDS instance. + type: string + iops: + description: |- + The amount of provisioned IOPS. Setting this implies a + storage_type of "io1". Can only be set when storage_type is "io1" or "gp3". + Cannot be specified for gp3 storage if the allocated_storage value is below a per-engine threshold. + See the RDS User Guide for details. + type: number + kmsKeyId: + description: |- + The ARN for the KMS encryption key. If creating an + encrypted replica, set this to the destination KMS ARN. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + licenseModel: + description: 'License model information for this DB instance. + Valid values for this field are as follows:' + type: string + maintenanceWindow: + description: |- + The window to perform maintenance in. + Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00". See RDS + Maintenance Window + docs + for more information. + type: string + manageMasterUserPassword: + description: Set to true to allow RDS to manage the master user + password in Secrets Manager. Cannot be set if password is provided. + type: boolean + masterUserSecretKmsKeyId: + description: The Amazon Web Services KMS key identifier is the + key ARN, key ID, alias ARN, or alias name for the KMS key. To + use a KMS key in a different Amazon Web Services account, specify + the key ARN or alias ARN. If not specified, the default KMS + key for your Amazon Web Services account is used. + type: string + masterUserSecretKmsKeyIdRef: + description: Reference to a Key in kms to populate masterUserSecretKmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + masterUserSecretKmsKeyIdSelector: + description: Selector for a Key in kms to populate masterUserSecretKmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + maxAllocatedStorage: + description: When configured, the upper limit to which Amazon + RDS can automatically scale the storage of the DB instance. + Configuring this will automatically ignore differences to allocated_storage. + Must be greater than or equal to allocated_storage or 0 to disable + Storage Autoscaling. + type: number + monitoringInterval: + description: |- + The interval, in seconds, between points + when Enhanced Monitoring metrics are collected for the DB instance. To disable + collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid + Values: 0, 1, 5, 10, 15, 30, 60. + type: number + monitoringRoleArn: + description: |- + The ARN for the IAM role that permits RDS + to send enhanced monitoring metrics to CloudWatch Logs. You can find more + information on the AWS + Documentation + what IAM permissions are needed to allow Enhanced Monitoring for RDS Instances. + type: string + monitoringRoleArnRef: + description: Reference to a Role in iam to populate monitoringRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + monitoringRoleArnSelector: + description: Selector for a Role in iam to populate monitoringRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + multiAz: + description: Specifies if the RDS instance is multi-AZ + type: boolean + ncharCharacterSetName: + description: |- + The national character set is used in the NCHAR, NVARCHAR2, and NCLOB data types for Oracle instances. This can't be changed. See Oracle Character Sets + Supported in Amazon RDS. + type: string + networkType: + description: 'The network type of the DB instance. Valid values: + IPV4, DUAL.' + type: string + optionGroupName: + description: Name of the DB option group to associate. + type: string + parameterGroupName: + description: Name of the DB parameter group to associate. + type: string + parameterGroupNameRef: + description: Reference to a ParameterGroup in rds to populate + parameterGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + parameterGroupNameSelector: + description: Selector for a ParameterGroup in rds to populate + parameterGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + passwordSecretRef: + description: |- + Password for the master DB user. Note that this may show up in + logs, and it will be stored in the state file. Cannot be set if manage_master_user_password is set to true. + Password for the master DB user. If you set autoGeneratePassword to true, the Secret referenced here will be created or updated with generated password if it does not already contain one. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + performanceInsightsEnabled: + description: Specifies whether Performance Insights are enabled. + Defaults to false. + type: boolean + performanceInsightsKmsKeyId: + description: The ARN for the KMS key to encrypt Performance Insights + data. When specifying performance_insights_kms_key_id, performance_insights_enabled + needs to be set to true. Once KMS key is set, it can never be + changed. + type: string + performanceInsightsRetentionPeriod: + description: Amount of time in days to retain Performance Insights + data. Valid values are 7, 731 (2 years) or a multiple of 31. + When specifying performance_insights_retention_period, performance_insights_enabled + needs to be set to true. Defaults to '7'. + type: number + port: + description: The port on which the DB accepts connections. + type: number + publiclyAccessible: + description: |- + Bool to control if instance is publicly + accessible. Default is false. + type: boolean + region: + description: Region is the region you'd like your resource to + be created in. + type: string + replicaMode: + description: |- + Specifies whether the replica is in either mounted or open-read-only mode. This attribute + is only supported by Oracle instances. Oracle replicas operate in open-read-only mode unless otherwise specified. See Working with Oracle Read Replicas for more information. + type: string + replicateSourceDb: + description: |- + Specifies that this resource is a Replicate + database, and to use this value as the source database. This correlates to the + identifier of another Amazon RDS Database to replicate (if replicating within + a single region) or ARN of the Amazon RDS Database to replicate (if replicating + cross-region). Note that if you are + creating a cross-region replica of an encrypted database you will also need to + specify a kms_key_id. See DB Instance Replication and Working with + PostgreSQL and MySQL Read Replicas + for more information on using Replication. + type: string + replicateSourceDbRef: + description: Reference to a Instance in rds to populate replicateSourceDb. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + replicateSourceDbSelector: + description: Selector for a Instance in rds to populate replicateSourceDb. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + restoreToPointInTime: + description: A configuration block for restoring a DB instance + to an arbitrary point in time. Requires the identifier argument + to be set with the name of the new DB instance to be created. + See Restore To Point In Time below for details. + properties: + restoreTime: + description: The date and time to restore from. Value must + be a time in Universal Coordinated Time (UTC) format and + must be before the latest restorable time for the DB instance. + Cannot be specified with use_latest_restorable_time. + type: string + sourceDbInstanceAutomatedBackupsArn: + description: The ARN of the automated backup from which to + restore. Required if source_db_instance_identifier or source_dbi_resource_id + is not specified. + type: string + sourceDbInstanceIdentifier: + description: The identifier of the source DB instance from + which to restore. Must match the identifier of an existing + DB instance. Required if source_db_instance_automated_backups_arn + or source_dbi_resource_id is not specified. + type: string + sourceDbiResourceId: + description: The resource ID of the source DB instance from + which to restore. Required if source_db_instance_identifier + or source_db_instance_automated_backups_arn is not specified. + type: string + useLatestRestorableTime: + description: A boolean value that indicates whether the DB + instance is restored from the latest backup time. Defaults + to false. Cannot be specified with restore_time. + type: boolean + type: object + s3Import: + description: Restore from a Percona Xtrabackup in S3. See Importing + Data into an Amazon RDS MySQL DB Instance + properties: + bucketName: + description: The bucket name where your backup is stored + type: string + bucketPrefix: + description: Can be blank, but is the path to your backup + type: string + ingestionRole: + description: Role applied to load the data. + type: string + sourceEngine: + description: Source engine for the backup + type: string + sourceEngineVersion: + description: Version of the source engine used to make the + backup + type: string + type: object + skipFinalSnapshot: + description: |- + Determines whether a final DB snapshot is + created before the DB instance is deleted. If true is specified, no DBSnapshot + is created. If false is specified, a DB snapshot is created before the DB + instance is deleted, using the value from final_snapshot_identifier. Default + is false. + type: boolean + snapshotIdentifier: + description: |- + Specifies whether or not to create this + database from a snapshot. This correlates to the snapshot ID you'd find in the + RDS console, e.g: rds:production-2015-06-26-06-05. + type: string + storageEncrypted: + description: |- + Specifies whether the DB instance is + encrypted. Note that if you are creating a cross-region read replica this field + is ignored and you should instead declare kms_key_id with a valid ARN. The + default is false if not specified. + type: boolean + storageThroughput: + description: The storage throughput value for the DB instance. + Can only be set when storage_type is "gp3". Cannot be specified + if the allocated_storage value is below a per-engine threshold. + See the RDS User Guide for details. + type: number + storageType: + description: |- + One of "standard" (magnetic), "gp2" (general + purpose SSD), "gp3" (general purpose SSD that needs iops independently) + or "io1" (provisioned IOPS SSD). The default is "io1" if iops is specified, + "gp2" if not. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + timezone: + description: |- + Time zone of the DB instance. timezone is currently + only supported by Microsoft SQL Server. The timezone can only be set on + creation. See MSSQL User + Guide + for more information. + type: string + username: + description: Username for the master DB user. Cannot be specified + for a replica. + type: string + vpcSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + vpcSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + vpcSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcSecurityGroupIds: + description: |- + List of VPC security groups to + associate. + items: + type: string + type: array + x-kubernetes-list-type: set + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allocatedStorage: + description: The allocated storage in gibibytes. If max_allocated_storage + is configured, this argument represents the initial storage + allocation and differences from the configuration will be ignored + automatically when Storage Autoscaling occurs. If replicate_source_db + is set, the value is ignored during the creation of the instance. + type: number + allowMajorVersionUpgrade: + description: |- + Indicates that major version + upgrades are allowed. Changing this parameter does not result in an outage and + the change is asynchronously applied as soon as possible. + type: boolean + applyImmediately: + description: |- + Specifies whether any database modifications + are applied immediately, or during the next maintenance window. Default is + false. See Amazon RDS Documentation for more + information. + type: boolean + autoMinorVersionUpgrade: + description: |- + Indicates that minor engine upgrades + will be applied automatically to the DB instance during the maintenance window. + Defaults to true. + type: boolean + availabilityZone: + description: The AZ for the RDS instance. + type: string + backupRetentionPeriod: + description: |- + The days to retain backups for. + Must be between 0 and 35. + Default is 0. + Must be greater than 0 if the database is used as a source for a Read Replica, + uses low-downtime updates, + or will use RDS Blue/Green deployments. + type: number + backupTarget: + description: Specifies where automated backups and manual snapshots + are stored. Possible values are region (default) and outposts. + See Working with Amazon RDS on AWS Outposts for more information. + type: string + backupWindow: + description: |- + The daily time range (in UTC) during which automated backups are created if they are enabled. + Example: "09:46-10:16". Must not overlap with maintenance_window. + type: string + blueGreenUpdate: + description: |- + Enables low-downtime updates using RDS Blue/Green deployments. + See blue_green_update below. + properties: + enabled: + description: |- + Enables low-downtime updates when true. + Default is false. + type: boolean + type: object + caCertIdentifier: + description: The identifier of the CA certificate for the DB instance. + type: string + characterSetName: + description: |- + The character set name to use for DB encoding in Oracle and Microsoft SQL instances (collation). + This can't be changed. + See Oracle Character Sets Supported in Amazon RDS or + Server-Level Collation for Microsoft SQL Server for more information. + Cannot be set with replicate_source_db, restore_to_point_in_time, s3_import, or snapshot_identifier. + type: string + copyTagsToSnapshot: + description: – Copy all Instance tags to snapshots. Default is + false. + type: boolean + customIamInstanceProfile: + description: The instance profile associated with the underlying + Amazon EC2 instance of an RDS Custom DB instance. + type: string + customerOwnedIpEnabled: + description: Indicates whether to enable a customer-owned IP address + (CoIP) for an RDS on Outposts DB instance. See CoIP for RDS + on Outposts for more information. + type: boolean + dbName: + description: The name of the database to create when the DB instance + is created. If this parameter is not specified, no database + is created in the DB instance. Note that this does not apply + for Oracle or SQL Server engines. See the AWS documentation + for more details on what applies for those engines. If you are + providing an Oracle db name, it needs to be in all upper case. + Cannot be specified for a replica. + type: string + dbSubnetGroupName: + description: |- + Name of DB subnet group. DB instance will + be created in the VPC associated with the DB subnet group. If unspecified, will + be created in the default VPC, or in EC2 Classic, if available. When working + with read replicas, it should be specified only if the source database + specifies an instance in another AWS Region. See DBSubnetGroupName in API + action CreateDBInstanceReadReplica + for additional read replica constraints. + type: string + dbSubnetGroupNameRef: + description: Reference to a SubnetGroup in rds to populate dbSubnetGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dbSubnetGroupNameSelector: + description: Selector for a SubnetGroup in rds to populate dbSubnetGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dedicatedLogVolume: + description: Use a dedicated log volume (DLV) for the DB instance. + Requires Provisioned IOPS. See the AWS documentation for more + details. + type: boolean + deleteAutomatedBackups: + description: Specifies whether to remove automated backups immediately + after the DB instance is deleted. Default is true. + type: boolean + deletionProtection: + description: If the DB instance should have deletion protection + enabled. The database can't be deleted when this value is set + to true. The default is false. + type: boolean + domain: + description: The ID of the Directory Service Active Directory + domain to create the instance in. Conflicts with domain_fqdn, + domain_ou, domain_auth_secret_arn and a domain_dns_ips. + type: string + domainAuthSecretArn: + description: The ARN for the Secrets Manager secret with the self + managed Active Directory credentials for the user joining the + domain. Conflicts with domain and domain_iam_role_name. + type: string + domainDnsIps: + description: The IPv4 DNS IP addresses of your primary and secondary + self managed Active Directory domain controllers. Two IP addresses + must be provided. If there isn't a secondary domain controller, + use the IP address of the primary domain controller for both + entries in the list. Conflicts with domain and domain_iam_role_name. + items: + type: string + type: array + x-kubernetes-list-type: set + domainFqdn: + description: The fully qualified domain name (FQDN) of the self + managed Active Directory domain. Conflicts with domain and domain_iam_role_name. + type: string + domainIamRoleName: + description: The name of the IAM role to be used when making API + calls to the Directory Service. Conflicts with domain_fqdn, + domain_ou, domain_auth_secret_arn and a domain_dns_ips. + type: string + domainOu: + description: The self managed Active Directory organizational + unit for your DB instance to join. Conflicts with domain and + domain_iam_role_name. + type: string + enabledCloudwatchLogsExports: + description: Set of log types to enable for exporting to CloudWatch + logs. If omitted, no logs will be exported. For supported values, + see the EnableCloudwatchLogsExports.member.N parameter in API + action CreateDBInstance. + items: + type: string + type: array + x-kubernetes-list-type: set + engine: + description: The database engine to use. For supported values, + see the Engine parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). + Note that for Amazon Aurora instances the engine must match + the [DB Cluster](https://marketplace.upbound.io/providers/upbound/provider-aws/latest/resources/rds.aws.upbound.io/Cluster/v1beta1)'s + engine'. For information on the difference between the available + Aurora MySQL engines see Comparison in the [Amazon RDS Release + Notes](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraMySQLReleaseNotes/Welcome.html). + type: string + engineVersion: + description: The engine version to use. If `autoMinorVersionUpgrade` + is enabled, you can provide a prefix of the version such as + 5.7 (for 5.7.10). The actual engine version used is returned + in the attribute `status.atProvider.engineVersionActual`. For + supported values, see the EngineVersion parameter in [API action + CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). + Note that for Amazon Aurora instances the engine version must + match the [DB Cluster](https://marketplace.upbound.io/providers/upbound/provider-aws/latest/resources/rds.aws.upbound.io/Cluster/v1beta1)'s + engine version'. + type: string + finalSnapshotIdentifier: + description: |- + The name of your final DB snapshot + when this DB instance is deleted. Must be provided if skip_final_snapshot is + set to false. The value must begin with a letter, only contain alphanumeric characters and hyphens, and not end with a hyphen or contain two consecutive hyphens. Must not be provided when deleting a read replica. + type: string + iamDatabaseAuthenticationEnabled: + description: |- + Specifies whether mappings of AWS Identity and Access Management (IAM) accounts to database + accounts is enabled. + type: boolean + identifier: + description: Required if restore_to_point_in_time is specified. + type: string + identifierPrefix: + description: Creates a unique identifier beginning with the specified + prefix. Conflicts with identifier. + type: string + instanceClass: + description: The instance type of the RDS instance. + type: string + iops: + description: |- + The amount of provisioned IOPS. Setting this implies a + storage_type of "io1". Can only be set when storage_type is "io1" or "gp3". + Cannot be specified for gp3 storage if the allocated_storage value is below a per-engine threshold. + See the RDS User Guide for details. + type: number + kmsKeyId: + description: |- + The ARN for the KMS encryption key. If creating an + encrypted replica, set this to the destination KMS ARN. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + licenseModel: + description: 'License model information for this DB instance. + Valid values for this field are as follows:' + type: string + maintenanceWindow: + description: |- + The window to perform maintenance in. + Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00". See RDS + Maintenance Window + docs + for more information. + type: string + manageMasterUserPassword: + description: Set to true to allow RDS to manage the master user + password in Secrets Manager. Cannot be set if password is provided. + type: boolean + masterUserSecretKmsKeyId: + description: The Amazon Web Services KMS key identifier is the + key ARN, key ID, alias ARN, or alias name for the KMS key. To + use a KMS key in a different Amazon Web Services account, specify + the key ARN or alias ARN. If not specified, the default KMS + key for your Amazon Web Services account is used. + type: string + masterUserSecretKmsKeyIdRef: + description: Reference to a Key in kms to populate masterUserSecretKmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + masterUserSecretKmsKeyIdSelector: + description: Selector for a Key in kms to populate masterUserSecretKmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + maxAllocatedStorage: + description: When configured, the upper limit to which Amazon + RDS can automatically scale the storage of the DB instance. + Configuring this will automatically ignore differences to allocated_storage. + Must be greater than or equal to allocated_storage or 0 to disable + Storage Autoscaling. + type: number + monitoringInterval: + description: |- + The interval, in seconds, between points + when Enhanced Monitoring metrics are collected for the DB instance. To disable + collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid + Values: 0, 1, 5, 10, 15, 30, 60. + type: number + monitoringRoleArn: + description: |- + The ARN for the IAM role that permits RDS + to send enhanced monitoring metrics to CloudWatch Logs. You can find more + information on the AWS + Documentation + what IAM permissions are needed to allow Enhanced Monitoring for RDS Instances. + type: string + monitoringRoleArnRef: + description: Reference to a Role in iam to populate monitoringRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + monitoringRoleArnSelector: + description: Selector for a Role in iam to populate monitoringRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + multiAz: + description: Specifies if the RDS instance is multi-AZ + type: boolean + ncharCharacterSetName: + description: |- + The national character set is used in the NCHAR, NVARCHAR2, and NCLOB data types for Oracle instances. This can't be changed. See Oracle Character Sets + Supported in Amazon RDS. + type: string + networkType: + description: 'The network type of the DB instance. Valid values: + IPV4, DUAL.' + type: string + optionGroupName: + description: Name of the DB option group to associate. + type: string + parameterGroupName: + description: Name of the DB parameter group to associate. + type: string + parameterGroupNameRef: + description: Reference to a ParameterGroup in rds to populate + parameterGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + parameterGroupNameSelector: + description: Selector for a ParameterGroup in rds to populate + parameterGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + passwordSecretRef: + description: |- + Password for the master DB user. Note that this may show up in + logs, and it will be stored in the state file. Cannot be set if manage_master_user_password is set to true. + Password for the master DB user. If you set autoGeneratePassword to true, the Secret referenced here will be created or updated with generated password if it does not already contain one. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + performanceInsightsEnabled: + description: Specifies whether Performance Insights are enabled. + Defaults to false. + type: boolean + performanceInsightsKmsKeyId: + description: The ARN for the KMS key to encrypt Performance Insights + data. When specifying performance_insights_kms_key_id, performance_insights_enabled + needs to be set to true. Once KMS key is set, it can never be + changed. + type: string + performanceInsightsRetentionPeriod: + description: Amount of time in days to retain Performance Insights + data. Valid values are 7, 731 (2 years) or a multiple of 31. + When specifying performance_insights_retention_period, performance_insights_enabled + needs to be set to true. Defaults to '7'. + type: number + port: + description: The port on which the DB accepts connections. + type: number + publiclyAccessible: + description: |- + Bool to control if instance is publicly + accessible. Default is false. + type: boolean + replicaMode: + description: |- + Specifies whether the replica is in either mounted or open-read-only mode. This attribute + is only supported by Oracle instances. Oracle replicas operate in open-read-only mode unless otherwise specified. See Working with Oracle Read Replicas for more information. + type: string + replicateSourceDb: + description: |- + Specifies that this resource is a Replicate + database, and to use this value as the source database. This correlates to the + identifier of another Amazon RDS Database to replicate (if replicating within + a single region) or ARN of the Amazon RDS Database to replicate (if replicating + cross-region). Note that if you are + creating a cross-region replica of an encrypted database you will also need to + specify a kms_key_id. See DB Instance Replication and Working with + PostgreSQL and MySQL Read Replicas + for more information on using Replication. + type: string + replicateSourceDbRef: + description: Reference to a Instance in rds to populate replicateSourceDb. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + replicateSourceDbSelector: + description: Selector for a Instance in rds to populate replicateSourceDb. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + restoreToPointInTime: + description: A configuration block for restoring a DB instance + to an arbitrary point in time. Requires the identifier argument + to be set with the name of the new DB instance to be created. + See Restore To Point In Time below for details. + properties: + restoreTime: + description: The date and time to restore from. Value must + be a time in Universal Coordinated Time (UTC) format and + must be before the latest restorable time for the DB instance. + Cannot be specified with use_latest_restorable_time. + type: string + sourceDbInstanceAutomatedBackupsArn: + description: The ARN of the automated backup from which to + restore. Required if source_db_instance_identifier or source_dbi_resource_id + is not specified. + type: string + sourceDbInstanceIdentifier: + description: The identifier of the source DB instance from + which to restore. Must match the identifier of an existing + DB instance. Required if source_db_instance_automated_backups_arn + or source_dbi_resource_id is not specified. + type: string + sourceDbiResourceId: + description: The resource ID of the source DB instance from + which to restore. Required if source_db_instance_identifier + or source_db_instance_automated_backups_arn is not specified. + type: string + useLatestRestorableTime: + description: A boolean value that indicates whether the DB + instance is restored from the latest backup time. Defaults + to false. Cannot be specified with restore_time. + type: boolean + type: object + s3Import: + description: Restore from a Percona Xtrabackup in S3. See Importing + Data into an Amazon RDS MySQL DB Instance + properties: + bucketName: + description: The bucket name where your backup is stored + type: string + bucketPrefix: + description: Can be blank, but is the path to your backup + type: string + ingestionRole: + description: Role applied to load the data. + type: string + sourceEngine: + description: Source engine for the backup + type: string + sourceEngineVersion: + description: Version of the source engine used to make the + backup + type: string + type: object + skipFinalSnapshot: + description: |- + Determines whether a final DB snapshot is + created before the DB instance is deleted. If true is specified, no DBSnapshot + is created. If false is specified, a DB snapshot is created before the DB + instance is deleted, using the value from final_snapshot_identifier. Default + is false. + type: boolean + snapshotIdentifier: + description: |- + Specifies whether or not to create this + database from a snapshot. This correlates to the snapshot ID you'd find in the + RDS console, e.g: rds:production-2015-06-26-06-05. + type: string + storageEncrypted: + description: |- + Specifies whether the DB instance is + encrypted. Note that if you are creating a cross-region read replica this field + is ignored and you should instead declare kms_key_id with a valid ARN. The + default is false if not specified. + type: boolean + storageThroughput: + description: The storage throughput value for the DB instance. + Can only be set when storage_type is "gp3". Cannot be specified + if the allocated_storage value is below a per-engine threshold. + See the RDS User Guide for details. + type: number + storageType: + description: |- + One of "standard" (magnetic), "gp2" (general + purpose SSD), "gp3" (general purpose SSD that needs iops independently) + or "io1" (provisioned IOPS SSD). The default is "io1" if iops is specified, + "gp2" if not. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + timezone: + description: |- + Time zone of the DB instance. timezone is currently + only supported by Microsoft SQL Server. The timezone can only be set on + creation. See MSSQL User + Guide + for more information. + type: string + username: + description: Username for the master DB user. Cannot be specified + for a replica. + type: string + vpcSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + vpcSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + vpcSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcSecurityGroupIds: + description: |- + List of VPC security groups to + associate. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.instanceClass is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.instanceClass) + || (has(self.initProvider) && has(self.initProvider.instanceClass))' + status: + description: InstanceStatus defines the observed state of Instance. + properties: + atProvider: + properties: + address: + description: The hostname of the RDS instance. See also endpoint + and port. + type: string + allocatedStorage: + description: The allocated storage in gibibytes. If max_allocated_storage + is configured, this argument represents the initial storage + allocation and differences from the configuration will be ignored + automatically when Storage Autoscaling occurs. If replicate_source_db + is set, the value is ignored during the creation of the instance. + type: number + allowMajorVersionUpgrade: + description: |- + Indicates that major version + upgrades are allowed. Changing this parameter does not result in an outage and + the change is asynchronously applied as soon as possible. + type: boolean + applyImmediately: + description: |- + Specifies whether any database modifications + are applied immediately, or during the next maintenance window. Default is + false. See Amazon RDS Documentation for more + information. + type: boolean + arn: + description: The ARN of the RDS instance. + type: string + autoMinorVersionUpgrade: + description: |- + Indicates that minor engine upgrades + will be applied automatically to the DB instance during the maintenance window. + Defaults to true. + type: boolean + availabilityZone: + description: The AZ for the RDS instance. + type: string + backupRetentionPeriod: + description: |- + The days to retain backups for. + Must be between 0 and 35. + Default is 0. + Must be greater than 0 if the database is used as a source for a Read Replica, + uses low-downtime updates, + or will use RDS Blue/Green deployments. + type: number + backupTarget: + description: Specifies where automated backups and manual snapshots + are stored. Possible values are region (default) and outposts. + See Working with Amazon RDS on AWS Outposts for more information. + type: string + backupWindow: + description: |- + The daily time range (in UTC) during which automated backups are created if they are enabled. + Example: "09:46-10:16". Must not overlap with maintenance_window. + type: string + blueGreenUpdate: + description: |- + Enables low-downtime updates using RDS Blue/Green deployments. + See blue_green_update below. + properties: + enabled: + description: |- + Enables low-downtime updates when true. + Default is false. + type: boolean + type: object + caCertIdentifier: + description: The identifier of the CA certificate for the DB instance. + type: string + characterSetName: + description: |- + The character set name to use for DB encoding in Oracle and Microsoft SQL instances (collation). + This can't be changed. + See Oracle Character Sets Supported in Amazon RDS or + Server-Level Collation for Microsoft SQL Server for more information. + Cannot be set with replicate_source_db, restore_to_point_in_time, s3_import, or snapshot_identifier. + type: string + copyTagsToSnapshot: + description: – Copy all Instance tags to snapshots. Default is + false. + type: boolean + customIamInstanceProfile: + description: The instance profile associated with the underlying + Amazon EC2 instance of an RDS Custom DB instance. + type: string + customerOwnedIpEnabled: + description: Indicates whether to enable a customer-owned IP address + (CoIP) for an RDS on Outposts DB instance. See CoIP for RDS + on Outposts for more information. + type: boolean + dbName: + description: The name of the database to create when the DB instance + is created. If this parameter is not specified, no database + is created in the DB instance. Note that this does not apply + for Oracle or SQL Server engines. See the AWS documentation + for more details on what applies for those engines. If you are + providing an Oracle db name, it needs to be in all upper case. + Cannot be specified for a replica. + type: string + dbSubnetGroupName: + description: |- + Name of DB subnet group. DB instance will + be created in the VPC associated with the DB subnet group. If unspecified, will + be created in the default VPC, or in EC2 Classic, if available. When working + with read replicas, it should be specified only if the source database + specifies an instance in another AWS Region. See DBSubnetGroupName in API + action CreateDBInstanceReadReplica + for additional read replica constraints. + type: string + dedicatedLogVolume: + description: Use a dedicated log volume (DLV) for the DB instance. + Requires Provisioned IOPS. See the AWS documentation for more + details. + type: boolean + deleteAutomatedBackups: + description: Specifies whether to remove automated backups immediately + after the DB instance is deleted. Default is true. + type: boolean + deletionProtection: + description: If the DB instance should have deletion protection + enabled. The database can't be deleted when this value is set + to true. The default is false. + type: boolean + domain: + description: The ID of the Directory Service Active Directory + domain to create the instance in. Conflicts with domain_fqdn, + domain_ou, domain_auth_secret_arn and a domain_dns_ips. + type: string + domainAuthSecretArn: + description: The ARN for the Secrets Manager secret with the self + managed Active Directory credentials for the user joining the + domain. Conflicts with domain and domain_iam_role_name. + type: string + domainDnsIps: + description: The IPv4 DNS IP addresses of your primary and secondary + self managed Active Directory domain controllers. Two IP addresses + must be provided. If there isn't a secondary domain controller, + use the IP address of the primary domain controller for both + entries in the list. Conflicts with domain and domain_iam_role_name. + items: + type: string + type: array + x-kubernetes-list-type: set + domainFqdn: + description: The fully qualified domain name (FQDN) of the self + managed Active Directory domain. Conflicts with domain and domain_iam_role_name. + type: string + domainIamRoleName: + description: The name of the IAM role to be used when making API + calls to the Directory Service. Conflicts with domain_fqdn, + domain_ou, domain_auth_secret_arn and a domain_dns_ips. + type: string + domainOu: + description: The self managed Active Directory organizational + unit for your DB instance to join. Conflicts with domain and + domain_iam_role_name. + type: string + enabledCloudwatchLogsExports: + description: Set of log types to enable for exporting to CloudWatch + logs. If omitted, no logs will be exported. For supported values, + see the EnableCloudwatchLogsExports.member.N parameter in API + action CreateDBInstance. + items: + type: string + type: array + x-kubernetes-list-type: set + endpoint: + description: The connection endpoint in address:port format. + type: string + engine: + description: The database engine to use. For supported values, + see the Engine parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). + Note that for Amazon Aurora instances the engine must match + the [DB Cluster](https://marketplace.upbound.io/providers/upbound/provider-aws/latest/resources/rds.aws.upbound.io/Cluster/v1beta1)'s + engine'. For information on the difference between the available + Aurora MySQL engines see Comparison in the [Amazon RDS Release + Notes](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraMySQLReleaseNotes/Welcome.html). + type: string + engineVersion: + description: The engine version to use. If `autoMinorVersionUpgrade` + is enabled, you can provide a prefix of the version such as + 5.7 (for 5.7.10). The actual engine version used is returned + in the attribute `status.atProvider.engineVersionActual`. For + supported values, see the EngineVersion parameter in [API action + CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). + Note that for Amazon Aurora instances the engine version must + match the [DB Cluster](https://marketplace.upbound.io/providers/upbound/provider-aws/latest/resources/rds.aws.upbound.io/Cluster/v1beta1)'s + engine version'. + type: string + engineVersionActual: + description: The running version of the database. + type: string + finalSnapshotIdentifier: + description: |- + The name of your final DB snapshot + when this DB instance is deleted. Must be provided if skip_final_snapshot is + set to false. The value must begin with a letter, only contain alphanumeric characters and hyphens, and not end with a hyphen or contain two consecutive hyphens. Must not be provided when deleting a read replica. + type: string + hostedZoneId: + description: |- + The canonical hosted zone ID of the DB instance (to be used + in a Route 53 Alias record). + type: string + iamDatabaseAuthenticationEnabled: + description: |- + Specifies whether mappings of AWS Identity and Access Management (IAM) accounts to database + accounts is enabled. + type: boolean + id: + description: RDS DBI resource ID. + type: string + identifier: + description: Required if restore_to_point_in_time is specified. + type: string + identifierPrefix: + description: Creates a unique identifier beginning with the specified + prefix. Conflicts with identifier. + type: string + instanceClass: + description: The instance type of the RDS instance. + type: string + iops: + description: |- + The amount of provisioned IOPS. Setting this implies a + storage_type of "io1". Can only be set when storage_type is "io1" or "gp3". + Cannot be specified for gp3 storage if the allocated_storage value is below a per-engine threshold. + See the RDS User Guide for details. + type: number + kmsKeyId: + description: |- + The ARN for the KMS encryption key. If creating an + encrypted replica, set this to the destination KMS ARN. + type: string + latestRestorableTime: + description: The latest time, in UTC RFC3339 format, to which + a database can be restored with point-in-time restore. + type: string + licenseModel: + description: 'License model information for this DB instance. + Valid values for this field are as follows:' + type: string + listenerEndpoint: + description: Specifies the listener connection endpoint for SQL + Server Always On. See endpoint below. + items: + properties: + address: + description: The hostname of the RDS instance. See also + endpoint and port. + type: string + hostedZoneId: + description: |- + The canonical hosted zone ID of the DB instance (to be used + in a Route 53 Alias record). + type: string + port: + description: The port on which the DB accepts connections. + type: number + type: object + type: array + maintenanceWindow: + description: |- + The window to perform maintenance in. + Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00". See RDS + Maintenance Window + docs + for more information. + type: string + manageMasterUserPassword: + description: Set to true to allow RDS to manage the master user + password in Secrets Manager. Cannot be set if password is provided. + type: boolean + masterUserSecret: + description: A block that specifies the master user secret. Only + available when manage_master_user_password is set to true. Documented + below. + items: + properties: + kmsKeyId: + description: The Amazon Web Services KMS key identifier + that is used to encrypt the secret. + type: string + secretArn: + description: The Amazon Resource Name (ARN) of the secret. + type: string + secretStatus: + description: 'The status of the secret. Valid Values: creating + | active | rotating | impaired.' + type: string + type: object + type: array + masterUserSecretKmsKeyId: + description: The Amazon Web Services KMS key identifier is the + key ARN, key ID, alias ARN, or alias name for the KMS key. To + use a KMS key in a different Amazon Web Services account, specify + the key ARN or alias ARN. If not specified, the default KMS + key for your Amazon Web Services account is used. + type: string + maxAllocatedStorage: + description: When configured, the upper limit to which Amazon + RDS can automatically scale the storage of the DB instance. + Configuring this will automatically ignore differences to allocated_storage. + Must be greater than or equal to allocated_storage or 0 to disable + Storage Autoscaling. + type: number + monitoringInterval: + description: |- + The interval, in seconds, between points + when Enhanced Monitoring metrics are collected for the DB instance. To disable + collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid + Values: 0, 1, 5, 10, 15, 30, 60. + type: number + monitoringRoleArn: + description: |- + The ARN for the IAM role that permits RDS + to send enhanced monitoring metrics to CloudWatch Logs. You can find more + information on the AWS + Documentation + what IAM permissions are needed to allow Enhanced Monitoring for RDS Instances. + type: string + multiAz: + description: Specifies if the RDS instance is multi-AZ + type: boolean + ncharCharacterSetName: + description: |- + The national character set is used in the NCHAR, NVARCHAR2, and NCLOB data types for Oracle instances. This can't be changed. See Oracle Character Sets + Supported in Amazon RDS. + type: string + networkType: + description: 'The network type of the DB instance. Valid values: + IPV4, DUAL.' + type: string + optionGroupName: + description: Name of the DB option group to associate. + type: string + parameterGroupName: + description: Name of the DB parameter group to associate. + type: string + performanceInsightsEnabled: + description: Specifies whether Performance Insights are enabled. + Defaults to false. + type: boolean + performanceInsightsKmsKeyId: + description: The ARN for the KMS key to encrypt Performance Insights + data. When specifying performance_insights_kms_key_id, performance_insights_enabled + needs to be set to true. Once KMS key is set, it can never be + changed. + type: string + performanceInsightsRetentionPeriod: + description: Amount of time in days to retain Performance Insights + data. Valid values are 7, 731 (2 years) or a multiple of 31. + When specifying performance_insights_retention_period, performance_insights_enabled + needs to be set to true. Defaults to '7'. + type: number + port: + description: The port on which the DB accepts connections. + type: number + publiclyAccessible: + description: |- + Bool to control if instance is publicly + accessible. Default is false. + type: boolean + replicaMode: + description: |- + Specifies whether the replica is in either mounted or open-read-only mode. This attribute + is only supported by Oracle instances. Oracle replicas operate in open-read-only mode unless otherwise specified. See Working with Oracle Read Replicas for more information. + type: string + replicas: + items: + type: string + type: array + replicateSourceDb: + description: |- + Specifies that this resource is a Replicate + database, and to use this value as the source database. This correlates to the + identifier of another Amazon RDS Database to replicate (if replicating within + a single region) or ARN of the Amazon RDS Database to replicate (if replicating + cross-region). Note that if you are + creating a cross-region replica of an encrypted database you will also need to + specify a kms_key_id. See DB Instance Replication and Working with + PostgreSQL and MySQL Read Replicas + for more information on using Replication. + type: string + resourceId: + description: The RDS Resource ID of this instance. + type: string + restoreToPointInTime: + description: A configuration block for restoring a DB instance + to an arbitrary point in time. Requires the identifier argument + to be set with the name of the new DB instance to be created. + See Restore To Point In Time below for details. + properties: + restoreTime: + description: The date and time to restore from. Value must + be a time in Universal Coordinated Time (UTC) format and + must be before the latest restorable time for the DB instance. + Cannot be specified with use_latest_restorable_time. + type: string + sourceDbInstanceAutomatedBackupsArn: + description: The ARN of the automated backup from which to + restore. Required if source_db_instance_identifier or source_dbi_resource_id + is not specified. + type: string + sourceDbInstanceIdentifier: + description: The identifier of the source DB instance from + which to restore. Must match the identifier of an existing + DB instance. Required if source_db_instance_automated_backups_arn + or source_dbi_resource_id is not specified. + type: string + sourceDbiResourceId: + description: The resource ID of the source DB instance from + which to restore. Required if source_db_instance_identifier + or source_db_instance_automated_backups_arn is not specified. + type: string + useLatestRestorableTime: + description: A boolean value that indicates whether the DB + instance is restored from the latest backup time. Defaults + to false. Cannot be specified with restore_time. + type: boolean + type: object + s3Import: + description: Restore from a Percona Xtrabackup in S3. See Importing + Data into an Amazon RDS MySQL DB Instance + properties: + bucketName: + description: The bucket name where your backup is stored + type: string + bucketPrefix: + description: Can be blank, but is the path to your backup + type: string + ingestionRole: + description: Role applied to load the data. + type: string + sourceEngine: + description: Source engine for the backup + type: string + sourceEngineVersion: + description: Version of the source engine used to make the + backup + type: string + type: object + skipFinalSnapshot: + description: |- + Determines whether a final DB snapshot is + created before the DB instance is deleted. If true is specified, no DBSnapshot + is created. If false is specified, a DB snapshot is created before the DB + instance is deleted, using the value from final_snapshot_identifier. Default + is false. + type: boolean + snapshotIdentifier: + description: |- + Specifies whether or not to create this + database from a snapshot. This correlates to the snapshot ID you'd find in the + RDS console, e.g: rds:production-2015-06-26-06-05. + type: string + status: + description: The RDS instance status. + type: string + storageEncrypted: + description: |- + Specifies whether the DB instance is + encrypted. Note that if you are creating a cross-region read replica this field + is ignored and you should instead declare kms_key_id with a valid ARN. The + default is false if not specified. + type: boolean + storageThroughput: + description: The storage throughput value for the DB instance. + Can only be set when storage_type is "gp3". Cannot be specified + if the allocated_storage value is below a per-engine threshold. + See the RDS User Guide for details. + type: number + storageType: + description: |- + One of "standard" (magnetic), "gp2" (general + purpose SSD), "gp3" (general purpose SSD that needs iops independently) + or "io1" (provisioned IOPS SSD). The default is "io1" if iops is specified, + "gp2" if not. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + timezone: + description: |- + Time zone of the DB instance. timezone is currently + only supported by Microsoft SQL Server. The timezone can only be set on + creation. See MSSQL User + Guide + for more information. + type: string + username: + description: Username for the master DB user. Cannot be specified + for a replica. + type: string + vpcSecurityGroupIds: + description: |- + List of VPC security groups to + associate. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/rds.aws.upbound.io_proxydefaulttargetgroups.yaml b/package/crds/rds.aws.upbound.io_proxydefaulttargetgroups.yaml index b80282ee17..67e9a4a42f 100644 --- a/package/crds/rds.aws.upbound.io_proxydefaulttargetgroups.yaml +++ b/package/crds/rds.aws.upbound.io_proxydefaulttargetgroups.yaml @@ -656,3 +656,626 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ProxyDefaultTargetGroup is the Schema for the ProxyDefaultTargetGroups + API. Manage an RDS DB proxy default target group resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ProxyDefaultTargetGroupSpec defines the desired state of + ProxyDefaultTargetGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + connectionPoolConfig: + description: The settings that determine the size and behavior + of the connection pool for the target group. + properties: + connectionBorrowTimeout: + description: The number of seconds for a proxy to wait for + a connection to become available in the connection pool. + Only applies when the proxy has opened its maximum number + of connections and all connections are busy with client + sessions. + type: number + initQuery: + description: One or more SQL statements for the proxy to run + when opening each new database connection. Typically used + with SET statements to make sure that each connection has + identical settings such as time zone and character set. + This setting is empty by default. For multiple statements, + use semicolons as the separator. You can also include multiple + variables in a single SET statement, such as SET x=1, y=2. + type: string + maxConnectionsPercent: + description: The maximum size of the connection pool for each + target in a target group. For Aurora MySQL, it is expressed + as a percentage of the max_connections setting for the RDS + DB instance or Aurora DB cluster used by the target group. + type: number + maxIdleConnectionsPercent: + description: Controls how actively the proxy closes idle database + connections in the connection pool. A high value enables + the proxy to leave a high percentage of idle connections + open. A low value causes the proxy to close idle client + connections and return the underlying database connections + to the connection pool. For Aurora MySQL, it is expressed + as a percentage of the max_connections setting for the RDS + DB instance or Aurora DB cluster used by the target group. + type: number + sessionPinningFilters: + description: Each item in the list represents a class of SQL + operations that normally cause all later statements in a + session using a proxy to be pinned to the same underlying + database connection. Including an item in the list exempts + that class of SQL operations from the pinning behavior. + Currently, the only allowed value is EXCLUDE_VARIABLE_SETS. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + dbProxyName: + description: Name of the RDS DB Proxy. + type: string + dbProxyNameRef: + description: Reference to a Proxy in rds to populate dbProxyName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dbProxyNameSelector: + description: Selector for a Proxy in rds to populate dbProxyName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + connectionPoolConfig: + description: The settings that determine the size and behavior + of the connection pool for the target group. + properties: + connectionBorrowTimeout: + description: The number of seconds for a proxy to wait for + a connection to become available in the connection pool. + Only applies when the proxy has opened its maximum number + of connections and all connections are busy with client + sessions. + type: number + initQuery: + description: One or more SQL statements for the proxy to run + when opening each new database connection. Typically used + with SET statements to make sure that each connection has + identical settings such as time zone and character set. + This setting is empty by default. For multiple statements, + use semicolons as the separator. You can also include multiple + variables in a single SET statement, such as SET x=1, y=2. + type: string + maxConnectionsPercent: + description: The maximum size of the connection pool for each + target in a target group. For Aurora MySQL, it is expressed + as a percentage of the max_connections setting for the RDS + DB instance or Aurora DB cluster used by the target group. + type: number + maxIdleConnectionsPercent: + description: Controls how actively the proxy closes idle database + connections in the connection pool. A high value enables + the proxy to leave a high percentage of idle connections + open. A low value causes the proxy to close idle client + connections and return the underlying database connections + to the connection pool. For Aurora MySQL, it is expressed + as a percentage of the max_connections setting for the RDS + DB instance or Aurora DB cluster used by the target group. + type: number + sessionPinningFilters: + description: Each item in the list represents a class of SQL + operations that normally cause all later statements in a + session using a proxy to be pinned to the same underlying + database connection. Including an item in the list exempts + that class of SQL operations from the pinning behavior. + Currently, the only allowed value is EXCLUDE_VARIABLE_SETS. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + dbProxyName: + description: Name of the RDS DB Proxy. + type: string + dbProxyNameRef: + description: Reference to a Proxy in rds to populate dbProxyName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dbProxyNameSelector: + description: Selector for a Proxy in rds to populate dbProxyName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ProxyDefaultTargetGroupStatus defines the observed state + of ProxyDefaultTargetGroup. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) representing the target + group. + type: string + connectionPoolConfig: + description: The settings that determine the size and behavior + of the connection pool for the target group. + properties: + connectionBorrowTimeout: + description: The number of seconds for a proxy to wait for + a connection to become available in the connection pool. + Only applies when the proxy has opened its maximum number + of connections and all connections are busy with client + sessions. + type: number + initQuery: + description: One or more SQL statements for the proxy to run + when opening each new database connection. Typically used + with SET statements to make sure that each connection has + identical settings such as time zone and character set. + This setting is empty by default. For multiple statements, + use semicolons as the separator. You can also include multiple + variables in a single SET statement, such as SET x=1, y=2. + type: string + maxConnectionsPercent: + description: The maximum size of the connection pool for each + target in a target group. For Aurora MySQL, it is expressed + as a percentage of the max_connections setting for the RDS + DB instance or Aurora DB cluster used by the target group. + type: number + maxIdleConnectionsPercent: + description: Controls how actively the proxy closes idle database + connections in the connection pool. A high value enables + the proxy to leave a high percentage of idle connections + open. A low value causes the proxy to close idle client + connections and return the underlying database connections + to the connection pool. For Aurora MySQL, it is expressed + as a percentage of the max_connections setting for the RDS + DB instance or Aurora DB cluster used by the target group. + type: number + sessionPinningFilters: + description: Each item in the list represents a class of SQL + operations that normally cause all later statements in a + session using a proxy to be pinned to the same underlying + database connection. Including an item in the list exempts + that class of SQL operations from the pinning behavior. + Currently, the only allowed value is EXCLUDE_VARIABLE_SETS. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + dbProxyName: + description: Name of the RDS DB Proxy. + type: string + id: + description: Name of the RDS DB Proxy. + type: string + name: + description: The name of the default target group. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/redshift.aws.upbound.io_clusters.yaml b/package/crds/redshift.aws.upbound.io_clusters.yaml index ac2317a22d..34ca09efa7 100644 --- a/package/crds/redshift.aws.upbound.io_clusters.yaml +++ b/package/crds/redshift.aws.upbound.io_clusters.yaml @@ -1761,3 +1761,1731 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the Clusters API. Provides a Redshift + Cluster resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterSpec defines the desired state of Cluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allowVersionUpgrade: + description: If true , major version upgrades can be applied during + the maintenance window to the Amazon Redshift engine that is + running on the cluster. Default is true. + type: boolean + applyImmediately: + description: Specifies whether any cluster modifications are applied + immediately, or during the next maintenance window. Default + is false. + type: boolean + aquaConfigurationStatus: + description: |- + The value represents how the cluster is configured to use AQUA (Advanced Query Accelerator) after the cluster is restored. + No longer supported by the AWS API. + Always returns auto. + type: string + automatedSnapshotRetentionPeriod: + description: The number of days that automated snapshots are retained. + If the value is 0, automated snapshots are disabled. Even if + automated snapshots are disabled, you can still create manual + snapshots when you want with create-cluster-snapshot. Default + is 1. + type: number + availabilityZone: + description: The EC2 Availability Zone (AZ) in which you want + Amazon Redshift to provision the cluster. For example, if you + have several EC2 instances running in a specific Availability + Zone, then you might want the cluster to be provisioned in the + same zone in order to decrease network latency. Can only be + changed if availability_zone_relocation_enabled is true. + type: string + availabilityZoneRelocationEnabled: + description: If true, the cluster can be relocated to another + availabity zone, either automatically by AWS or when requested. + Default is false. Available for use on clusters from the RA3 + instance family. + type: boolean + clusterParameterGroupName: + description: The name of the parameter group to be associated + with this cluster. + type: string + clusterPublicKey: + description: The public key for the cluster + type: string + clusterRevisionNumber: + description: The specific revision number of the database in the + cluster + type: string + clusterSubnetGroupName: + description: The name of a cluster subnet group to be associated + with this cluster. If this parameter is not provided the resulting + cluster will be deployed outside virtual private cloud (VPC). + type: string + clusterType: + description: The cluster type to use. Either single-node or multi-node. + type: string + clusterVersion: + description: |- + The version of the Amazon Redshift engine software that you want to deploy on the cluster. + The version selected runs on all the nodes in the cluster. + type: string + databaseName: + description: |- + The name of the first database to be created when the cluster is created. + If you do not provide a name, Amazon Redshift will create a default database called dev. + type: string + defaultIamRoleArn: + description: The Amazon Resource Name (ARN) for the IAM role that + was set as default for the cluster when the cluster was created. + type: string + defaultIamRoleArnRef: + description: Reference to a Role in iam to populate defaultIamRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + defaultIamRoleArnSelector: + description: Selector for a Role in iam to populate defaultIamRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + elasticIp: + description: The Elastic IP (EIP) address for the cluster. + type: string + encrypted: + description: If true , the data in the cluster is encrypted at + rest. + type: boolean + endpoint: + description: The connection endpoint + type: string + enhancedVpcRouting: + description: If true , enhanced VPC routing is enabled. + type: boolean + finalSnapshotIdentifier: + description: The identifier of the final snapshot that is to be + created immediately before deleting the cluster. If this parameter + is provided, skip_final_snapshot must be false. + type: string + iamRoleRefs: + description: References to Role in iam to populate iamRoles. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + iamRoleSelector: + description: Selector for a list of Role in iam to populate iamRoles. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + iamRoles: + description: A list of IAM Role ARNs to associate with the cluster. + A Maximum of 10 can be associated to the cluster at any time. + items: + type: string + type: array + x-kubernetes-list-type: set + kmsKeyId: + description: The ARN for the KMS encryption key. When specifying + kms_key_id, encrypted needs to be set to true. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + logging: + description: Logging, documented below. + properties: + bucketName: + description: |- + The name of an existing S3 bucket where the log files are to be stored. Must be in the same region as the cluster and the cluster must have read bucket and put object permissions. + For more information on the permissions required for the bucket, please read the AWS documentation + type: string + enable: + description: Enables logging information such as queries and + connection attempts, for the specified Amazon Redshift cluster. + type: boolean + logDestinationType: + description: The log destination type. An enum with possible + values of s3 and cloudwatch. + type: string + logExports: + description: The collection of exported log types. Log types + include the connection log, user log and user activity log. + Required when log_destination_type is cloudwatch. Valid + log types are connectionlog, userlog, and useractivitylog. + items: + type: string + type: array + x-kubernetes-list-type: set + s3KeyPrefix: + description: The prefix applied to the log file names. + type: string + type: object + maintenanceTrackName: + description: The name of the maintenance track for the restored + cluster. When you take a snapshot, the snapshot inherits the + MaintenanceTrack value from the cluster. The snapshot might + be on a different track than the cluster that was the source + for the snapshot. For example, suppose that you take a snapshot + of a cluster that is on the current track and then change the + cluster to be on the trailing track. In this case, the snapshot + and the source cluster are on different tracks. Default value + is current. + type: string + manageMasterPassword: + description: |- + Whether to use AWS SecretsManager to manage the cluster admin credentials. + Conflicts with master_password. + One of master_password or manage_master_password is required unless snapshot_identifier is provided. + type: boolean + manualSnapshotRetentionPeriod: + description: The default number of days to retain a manual snapshot. + If the value is -1, the snapshot is retained indefinitely. This + setting doesn't change the retention period of existing snapshots. + Valid values are between -1 and 3653. Default value is -1. + type: number + masterPasswordSecretKmsKeyId: + description: ID of the KMS key used to encrypt the cluster admin + credentials secret. + type: string + masterPasswordSecretRef: + description: |- + Password for the master DB user. + Conflicts with manage_master_password. + One of master_password or manage_master_password is required unless snapshot_identifier is provided. + Note that this may show up in logs, and it will be stored in the state file. + Password must contain at least 8 characters and contain at least one uppercase letter, one lowercase letter, and one number. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + masterUsername: + description: Username for the master DB user. + type: string + multiAz: + description: Specifies if the Redshift cluster is multi-AZ. + type: boolean + nodeType: + description: The node type to be provisioned for the cluster. + type: string + numberOfNodes: + description: The number of compute nodes in the cluster. This + parameter is required when the ClusterType parameter is specified + as multi-node. Default is 1. + type: number + ownerAccount: + description: The AWS customer account used to create or copy the + snapshot. Required if you are restoring a snapshot you do not + own, optional if you own the snapshot. + type: string + port: + description: |- + The port number on which the cluster accepts incoming connections. Valid values are between 1115 and 65535. + The cluster is accessible only via the JDBC and ODBC connection strings. + Part of the connection string requires the port on which the cluster will listen for incoming connections. + Default port is 5439. + type: number + preferredMaintenanceWindow: + description: |- + The weekly time range (in UTC) during which automated cluster maintenance can occur. + Format: ddd:hh24:mi-ddd:hh24:mi + type: string + publiclyAccessible: + description: If true, the cluster can be accessed from a public + network. Default is true. + type: boolean + region: + description: Region is the region you'd like your resource to + be created in. + type: string + skipFinalSnapshot: + description: Determines whether a final snapshot of the cluster + is created before Amazon Redshift deletes the cluster. If true + , a final cluster snapshot is not created. If false , a final + cluster snapshot is created before the cluster is deleted. Default + is false. + type: boolean + snapshotArn: + description: The ARN of the snapshot from which to create the + new cluster. Conflicts with snapshot_identifier. + type: string + snapshotClusterIdentifier: + description: The name of the cluster the source snapshot was created + from. + type: string + snapshotCopy: + description: Configuration of automatic copy of snapshots from + one region to another. Documented below. + properties: + destinationRegion: + description: The destination region that you want to copy + snapshots to. + type: string + grantName: + description: The name of the snapshot copy grant to use when + snapshots of an AWS KMS-encrypted cluster are copied to + the destination region. + type: string + retentionPeriod: + description: The number of days to retain automated snapshots + in the destination region after they are copied from the + source region. Defaults to 7. + type: number + type: object + snapshotIdentifier: + description: The name of the snapshot from which to create the + new cluster. Conflicts with snapshot_arn. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + vpcSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + vpcSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcSecurityGroupIds: + description: A list of Virtual Private Cloud (VPC) security groups + to be associated with the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allowVersionUpgrade: + description: If true , major version upgrades can be applied during + the maintenance window to the Amazon Redshift engine that is + running on the cluster. Default is true. + type: boolean + applyImmediately: + description: Specifies whether any cluster modifications are applied + immediately, or during the next maintenance window. Default + is false. + type: boolean + aquaConfigurationStatus: + description: |- + The value represents how the cluster is configured to use AQUA (Advanced Query Accelerator) after the cluster is restored. + No longer supported by the AWS API. + Always returns auto. + type: string + automatedSnapshotRetentionPeriod: + description: The number of days that automated snapshots are retained. + If the value is 0, automated snapshots are disabled. Even if + automated snapshots are disabled, you can still create manual + snapshots when you want with create-cluster-snapshot. Default + is 1. + type: number + availabilityZone: + description: The EC2 Availability Zone (AZ) in which you want + Amazon Redshift to provision the cluster. For example, if you + have several EC2 instances running in a specific Availability + Zone, then you might want the cluster to be provisioned in the + same zone in order to decrease network latency. Can only be + changed if availability_zone_relocation_enabled is true. + type: string + availabilityZoneRelocationEnabled: + description: If true, the cluster can be relocated to another + availabity zone, either automatically by AWS or when requested. + Default is false. Available for use on clusters from the RA3 + instance family. + type: boolean + clusterParameterGroupName: + description: The name of the parameter group to be associated + with this cluster. + type: string + clusterPublicKey: + description: The public key for the cluster + type: string + clusterRevisionNumber: + description: The specific revision number of the database in the + cluster + type: string + clusterSubnetGroupName: + description: The name of a cluster subnet group to be associated + with this cluster. If this parameter is not provided the resulting + cluster will be deployed outside virtual private cloud (VPC). + type: string + clusterType: + description: The cluster type to use. Either single-node or multi-node. + type: string + clusterVersion: + description: |- + The version of the Amazon Redshift engine software that you want to deploy on the cluster. + The version selected runs on all the nodes in the cluster. + type: string + databaseName: + description: |- + The name of the first database to be created when the cluster is created. + If you do not provide a name, Amazon Redshift will create a default database called dev. + type: string + defaultIamRoleArn: + description: The Amazon Resource Name (ARN) for the IAM role that + was set as default for the cluster when the cluster was created. + type: string + defaultIamRoleArnRef: + description: Reference to a Role in iam to populate defaultIamRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + defaultIamRoleArnSelector: + description: Selector for a Role in iam to populate defaultIamRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + elasticIp: + description: The Elastic IP (EIP) address for the cluster. + type: string + encrypted: + description: If true , the data in the cluster is encrypted at + rest. + type: boolean + endpoint: + description: The connection endpoint + type: string + enhancedVpcRouting: + description: If true , enhanced VPC routing is enabled. + type: boolean + finalSnapshotIdentifier: + description: The identifier of the final snapshot that is to be + created immediately before deleting the cluster. If this parameter + is provided, skip_final_snapshot must be false. + type: string + iamRoleRefs: + description: References to Role in iam to populate iamRoles. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + iamRoleSelector: + description: Selector for a list of Role in iam to populate iamRoles. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + iamRoles: + description: A list of IAM Role ARNs to associate with the cluster. + A Maximum of 10 can be associated to the cluster at any time. + items: + type: string + type: array + x-kubernetes-list-type: set + kmsKeyId: + description: The ARN for the KMS encryption key. When specifying + kms_key_id, encrypted needs to be set to true. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + logging: + description: Logging, documented below. + properties: + bucketName: + description: |- + The name of an existing S3 bucket where the log files are to be stored. Must be in the same region as the cluster and the cluster must have read bucket and put object permissions. + For more information on the permissions required for the bucket, please read the AWS documentation + type: string + enable: + description: Enables logging information such as queries and + connection attempts, for the specified Amazon Redshift cluster. + type: boolean + logDestinationType: + description: The log destination type. An enum with possible + values of s3 and cloudwatch. + type: string + logExports: + description: The collection of exported log types. Log types + include the connection log, user log and user activity log. + Required when log_destination_type is cloudwatch. Valid + log types are connectionlog, userlog, and useractivitylog. + items: + type: string + type: array + x-kubernetes-list-type: set + s3KeyPrefix: + description: The prefix applied to the log file names. + type: string + type: object + maintenanceTrackName: + description: The name of the maintenance track for the restored + cluster. When you take a snapshot, the snapshot inherits the + MaintenanceTrack value from the cluster. The snapshot might + be on a different track than the cluster that was the source + for the snapshot. For example, suppose that you take a snapshot + of a cluster that is on the current track and then change the + cluster to be on the trailing track. In this case, the snapshot + and the source cluster are on different tracks. Default value + is current. + type: string + manageMasterPassword: + description: |- + Whether to use AWS SecretsManager to manage the cluster admin credentials. + Conflicts with master_password. + One of master_password or manage_master_password is required unless snapshot_identifier is provided. + type: boolean + manualSnapshotRetentionPeriod: + description: The default number of days to retain a manual snapshot. + If the value is -1, the snapshot is retained indefinitely. This + setting doesn't change the retention period of existing snapshots. + Valid values are between -1 and 3653. Default value is -1. + type: number + masterPasswordSecretKmsKeyId: + description: ID of the KMS key used to encrypt the cluster admin + credentials secret. + type: string + masterPasswordSecretRef: + description: |- + Password for the master DB user. + Conflicts with manage_master_password. + One of master_password or manage_master_password is required unless snapshot_identifier is provided. + Note that this may show up in logs, and it will be stored in the state file. + Password must contain at least 8 characters and contain at least one uppercase letter, one lowercase letter, and one number. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + masterUsername: + description: Username for the master DB user. + type: string + multiAz: + description: Specifies if the Redshift cluster is multi-AZ. + type: boolean + nodeType: + description: The node type to be provisioned for the cluster. + type: string + numberOfNodes: + description: The number of compute nodes in the cluster. This + parameter is required when the ClusterType parameter is specified + as multi-node. Default is 1. + type: number + ownerAccount: + description: The AWS customer account used to create or copy the + snapshot. Required if you are restoring a snapshot you do not + own, optional if you own the snapshot. + type: string + port: + description: |- + The port number on which the cluster accepts incoming connections. Valid values are between 1115 and 65535. + The cluster is accessible only via the JDBC and ODBC connection strings. + Part of the connection string requires the port on which the cluster will listen for incoming connections. + Default port is 5439. + type: number + preferredMaintenanceWindow: + description: |- + The weekly time range (in UTC) during which automated cluster maintenance can occur. + Format: ddd:hh24:mi-ddd:hh24:mi + type: string + publiclyAccessible: + description: If true, the cluster can be accessed from a public + network. Default is true. + type: boolean + skipFinalSnapshot: + description: Determines whether a final snapshot of the cluster + is created before Amazon Redshift deletes the cluster. If true + , a final cluster snapshot is not created. If false , a final + cluster snapshot is created before the cluster is deleted. Default + is false. + type: boolean + snapshotArn: + description: The ARN of the snapshot from which to create the + new cluster. Conflicts with snapshot_identifier. + type: string + snapshotClusterIdentifier: + description: The name of the cluster the source snapshot was created + from. + type: string + snapshotCopy: + description: Configuration of automatic copy of snapshots from + one region to another. Documented below. + properties: + destinationRegion: + description: The destination region that you want to copy + snapshots to. + type: string + grantName: + description: The name of the snapshot copy grant to use when + snapshots of an AWS KMS-encrypted cluster are copied to + the destination region. + type: string + retentionPeriod: + description: The number of days to retain automated snapshots + in the destination region after they are copied from the + source region. Defaults to 7. + type: number + type: object + snapshotIdentifier: + description: The name of the snapshot from which to create the + new cluster. Conflicts with snapshot_arn. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcSecurityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate vpcSecurityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + vpcSecurityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to populate + vpcSecurityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpcSecurityGroupIds: + description: A list of Virtual Private Cloud (VPC) security groups + to be associated with the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.nodeType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.nodeType) + || (has(self.initProvider) && has(self.initProvider.nodeType))' + status: + description: ClusterStatus defines the observed state of Cluster. + properties: + atProvider: + properties: + allowVersionUpgrade: + description: If true , major version upgrades can be applied during + the maintenance window to the Amazon Redshift engine that is + running on the cluster. Default is true. + type: boolean + applyImmediately: + description: Specifies whether any cluster modifications are applied + immediately, or during the next maintenance window. Default + is false. + type: boolean + aquaConfigurationStatus: + description: |- + The value represents how the cluster is configured to use AQUA (Advanced Query Accelerator) after the cluster is restored. + No longer supported by the AWS API. + Always returns auto. + type: string + arn: + description: Amazon Resource Name (ARN) of cluster + type: string + automatedSnapshotRetentionPeriod: + description: The number of days that automated snapshots are retained. + If the value is 0, automated snapshots are disabled. Even if + automated snapshots are disabled, you can still create manual + snapshots when you want with create-cluster-snapshot. Default + is 1. + type: number + availabilityZone: + description: The EC2 Availability Zone (AZ) in which you want + Amazon Redshift to provision the cluster. For example, if you + have several EC2 instances running in a specific Availability + Zone, then you might want the cluster to be provisioned in the + same zone in order to decrease network latency. Can only be + changed if availability_zone_relocation_enabled is true. + type: string + availabilityZoneRelocationEnabled: + description: If true, the cluster can be relocated to another + availabity zone, either automatically by AWS or when requested. + Default is false. Available for use on clusters from the RA3 + instance family. + type: boolean + clusterNamespaceArn: + description: The namespace Amazon Resource Name (ARN) of the cluster + type: string + clusterNodes: + description: The nodes in the cluster. Cluster node blocks are + documented below + items: + properties: + nodeRole: + description: Whether the node is a leader node or a compute + node + type: string + privateIpAddress: + description: The private IP address of a node within a cluster + type: string + publicIpAddress: + description: The public IP address of a node within a cluster + type: string + type: object + type: array + clusterParameterGroupName: + description: The name of the parameter group to be associated + with this cluster. + type: string + clusterPublicKey: + description: The public key for the cluster + type: string + clusterRevisionNumber: + description: The specific revision number of the database in the + cluster + type: string + clusterSubnetGroupName: + description: The name of a cluster subnet group to be associated + with this cluster. If this parameter is not provided the resulting + cluster will be deployed outside virtual private cloud (VPC). + type: string + clusterType: + description: The cluster type to use. Either single-node or multi-node. + type: string + clusterVersion: + description: |- + The version of the Amazon Redshift engine software that you want to deploy on the cluster. + The version selected runs on all the nodes in the cluster. + type: string + databaseName: + description: |- + The name of the first database to be created when the cluster is created. + If you do not provide a name, Amazon Redshift will create a default database called dev. + type: string + defaultIamRoleArn: + description: The Amazon Resource Name (ARN) for the IAM role that + was set as default for the cluster when the cluster was created. + type: string + dnsName: + description: The DNS name of the cluster + type: string + elasticIp: + description: The Elastic IP (EIP) address for the cluster. + type: string + encrypted: + description: If true , the data in the cluster is encrypted at + rest. + type: boolean + endpoint: + description: The connection endpoint + type: string + enhancedVpcRouting: + description: If true , enhanced VPC routing is enabled. + type: boolean + finalSnapshotIdentifier: + description: The identifier of the final snapshot that is to be + created immediately before deleting the cluster. If this parameter + is provided, skip_final_snapshot must be false. + type: string + iamRoles: + description: A list of IAM Role ARNs to associate with the cluster. + A Maximum of 10 can be associated to the cluster at any time. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + description: The Redshift Cluster ID. + type: string + kmsKeyId: + description: The ARN for the KMS encryption key. When specifying + kms_key_id, encrypted needs to be set to true. + type: string + logging: + description: Logging, documented below. + properties: + bucketName: + description: |- + The name of an existing S3 bucket where the log files are to be stored. Must be in the same region as the cluster and the cluster must have read bucket and put object permissions. + For more information on the permissions required for the bucket, please read the AWS documentation + type: string + enable: + description: Enables logging information such as queries and + connection attempts, for the specified Amazon Redshift cluster. + type: boolean + logDestinationType: + description: The log destination type. An enum with possible + values of s3 and cloudwatch. + type: string + logExports: + description: The collection of exported log types. Log types + include the connection log, user log and user activity log. + Required when log_destination_type is cloudwatch. Valid + log types are connectionlog, userlog, and useractivitylog. + items: + type: string + type: array + x-kubernetes-list-type: set + s3KeyPrefix: + description: The prefix applied to the log file names. + type: string + type: object + maintenanceTrackName: + description: The name of the maintenance track for the restored + cluster. When you take a snapshot, the snapshot inherits the + MaintenanceTrack value from the cluster. The snapshot might + be on a different track than the cluster that was the source + for the snapshot. For example, suppose that you take a snapshot + of a cluster that is on the current track and then change the + cluster to be on the trailing track. In this case, the snapshot + and the source cluster are on different tracks. Default value + is current. + type: string + manageMasterPassword: + description: |- + Whether to use AWS SecretsManager to manage the cluster admin credentials. + Conflicts with master_password. + One of master_password or manage_master_password is required unless snapshot_identifier is provided. + type: boolean + manualSnapshotRetentionPeriod: + description: The default number of days to retain a manual snapshot. + If the value is -1, the snapshot is retained indefinitely. This + setting doesn't change the retention period of existing snapshots. + Valid values are between -1 and 3653. Default value is -1. + type: number + masterPasswordSecretArn: + description: ARN of the cluster admin credentials secret + type: string + masterPasswordSecretKmsKeyId: + description: ID of the KMS key used to encrypt the cluster admin + credentials secret. + type: string + masterUsername: + description: Username for the master DB user. + type: string + multiAz: + description: Specifies if the Redshift cluster is multi-AZ. + type: boolean + nodeType: + description: The node type to be provisioned for the cluster. + type: string + numberOfNodes: + description: The number of compute nodes in the cluster. This + parameter is required when the ClusterType parameter is specified + as multi-node. Default is 1. + type: number + ownerAccount: + description: The AWS customer account used to create or copy the + snapshot. Required if you are restoring a snapshot you do not + own, optional if you own the snapshot. + type: string + port: + description: |- + The port number on which the cluster accepts incoming connections. Valid values are between 1115 and 65535. + The cluster is accessible only via the JDBC and ODBC connection strings. + Part of the connection string requires the port on which the cluster will listen for incoming connections. + Default port is 5439. + type: number + preferredMaintenanceWindow: + description: |- + The weekly time range (in UTC) during which automated cluster maintenance can occur. + Format: ddd:hh24:mi-ddd:hh24:mi + type: string + publiclyAccessible: + description: If true, the cluster can be accessed from a public + network. Default is true. + type: boolean + skipFinalSnapshot: + description: Determines whether a final snapshot of the cluster + is created before Amazon Redshift deletes the cluster. If true + , a final cluster snapshot is not created. If false , a final + cluster snapshot is created before the cluster is deleted. Default + is false. + type: boolean + snapshotArn: + description: The ARN of the snapshot from which to create the + new cluster. Conflicts with snapshot_identifier. + type: string + snapshotClusterIdentifier: + description: The name of the cluster the source snapshot was created + from. + type: string + snapshotCopy: + description: Configuration of automatic copy of snapshots from + one region to another. Documented below. + properties: + destinationRegion: + description: The destination region that you want to copy + snapshots to. + type: string + grantName: + description: The name of the snapshot copy grant to use when + snapshots of an AWS KMS-encrypted cluster are copied to + the destination region. + type: string + retentionPeriod: + description: The number of days to retain automated snapshots + in the destination region after they are copied from the + source region. Defaults to 7. + type: number + type: object + snapshotIdentifier: + description: The name of the snapshot from which to create the + new cluster. Conflicts with snapshot_arn. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + vpcSecurityGroupIds: + description: A list of Virtual Private Cloud (VPC) security groups + to be associated with the cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/redshift.aws.upbound.io_scheduledactions.yaml b/package/crds/redshift.aws.upbound.io_scheduledactions.yaml index 6654636f5e..3d3300df96 100644 --- a/package/crds/redshift.aws.upbound.io_scheduledactions.yaml +++ b/package/crds/redshift.aws.upbound.io_scheduledactions.yaml @@ -716,3 +716,671 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ScheduledAction is the Schema for the ScheduledActions API. Provides + a Redshift Scheduled Action resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ScheduledActionSpec defines the desired state of ScheduledAction + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: The description of the scheduled action. + type: string + enable: + description: Whether to enable the scheduled action. Default is + true . + type: boolean + endTime: + description: The end time in UTC when the schedule is active, + in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). + type: string + iamRole: + description: The IAM role to assume to run the scheduled action. + type: string + iamRoleRef: + description: Reference to a Role in iam to populate iamRole. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamRoleSelector: + description: Selector for a Role in iam to populate iamRole. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + schedule: + description: The schedule of action. The schedule is defined format + of "at expression" or "cron expression", for example at(2016-03-04T17:27:00) + or cron(0 10 ? * MON *). See Scheduled Action for more information. + type: string + startTime: + description: The start time in UTC when the schedule is active, + in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). + type: string + targetAction: + description: Target action. Documented below. + properties: + pauseCluster: + description: An action that runs a PauseCluster API operation. + Documented below. + properties: + clusterIdentifier: + description: The identifier of the cluster to be paused. + type: string + type: object + resizeCluster: + description: An action that runs a ResizeCluster API operation. + Documented below. + properties: + classic: + description: 'A boolean value indicating whether the resize + operation is using the classic resize process. Default: + false.' + type: boolean + clusterIdentifier: + description: The unique identifier for the cluster to + resize. + type: string + clusterType: + description: The new cluster type for the specified cluster. + type: string + nodeType: + description: The new node type for the nodes you are adding. + type: string + numberOfNodes: + description: The new number of nodes for the cluster. + type: number + type: object + resumeCluster: + description: An action that runs a ResumeCluster API operation. + Documented below. + properties: + clusterIdentifier: + description: The identifier of the cluster to be resumed. + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: The description of the scheduled action. + type: string + enable: + description: Whether to enable the scheduled action. Default is + true . + type: boolean + endTime: + description: The end time in UTC when the schedule is active, + in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). + type: string + iamRole: + description: The IAM role to assume to run the scheduled action. + type: string + iamRoleRef: + description: Reference to a Role in iam to populate iamRole. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamRoleSelector: + description: Selector for a Role in iam to populate iamRole. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + schedule: + description: The schedule of action. The schedule is defined format + of "at expression" or "cron expression", for example at(2016-03-04T17:27:00) + or cron(0 10 ? * MON *). See Scheduled Action for more information. + type: string + startTime: + description: The start time in UTC when the schedule is active, + in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). + type: string + targetAction: + description: Target action. Documented below. + properties: + pauseCluster: + description: An action that runs a PauseCluster API operation. + Documented below. + properties: + clusterIdentifier: + description: The identifier of the cluster to be paused. + type: string + type: object + resizeCluster: + description: An action that runs a ResizeCluster API operation. + Documented below. + properties: + classic: + description: 'A boolean value indicating whether the resize + operation is using the classic resize process. Default: + false.' + type: boolean + clusterIdentifier: + description: The unique identifier for the cluster to + resize. + type: string + clusterType: + description: The new cluster type for the specified cluster. + type: string + nodeType: + description: The new node type for the nodes you are adding. + type: string + numberOfNodes: + description: The new number of nodes for the cluster. + type: number + type: object + resumeCluster: + description: An action that runs a ResumeCluster API operation. + Documented below. + properties: + clusterIdentifier: + description: The identifier of the cluster to be resumed. + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.schedule is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.schedule) + || (has(self.initProvider) && has(self.initProvider.schedule))' + - message: spec.forProvider.targetAction is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.targetAction) + || (has(self.initProvider) && has(self.initProvider.targetAction))' + status: + description: ScheduledActionStatus defines the observed state of ScheduledAction. + properties: + atProvider: + properties: + description: + description: The description of the scheduled action. + type: string + enable: + description: Whether to enable the scheduled action. Default is + true . + type: boolean + endTime: + description: The end time in UTC when the schedule is active, + in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). + type: string + iamRole: + description: The IAM role to assume to run the scheduled action. + type: string + id: + description: The Redshift Scheduled Action name. + type: string + schedule: + description: The schedule of action. The schedule is defined format + of "at expression" or "cron expression", for example at(2016-03-04T17:27:00) + or cron(0 10 ? * MON *). See Scheduled Action for more information. + type: string + startTime: + description: The start time in UTC when the schedule is active, + in UTC RFC3339 format(for example, YYYY-MM-DDTHH:MM:SSZ). + type: string + targetAction: + description: Target action. Documented below. + properties: + pauseCluster: + description: An action that runs a PauseCluster API operation. + Documented below. + properties: + clusterIdentifier: + description: The identifier of the cluster to be paused. + type: string + type: object + resizeCluster: + description: An action that runs a ResizeCluster API operation. + Documented below. + properties: + classic: + description: 'A boolean value indicating whether the resize + operation is using the classic resize process. Default: + false.' + type: boolean + clusterIdentifier: + description: The unique identifier for the cluster to + resize. + type: string + clusterType: + description: The new cluster type for the specified cluster. + type: string + nodeType: + description: The new node type for the nodes you are adding. + type: string + numberOfNodes: + description: The new number of nodes for the cluster. + type: number + type: object + resumeCluster: + description: An action that runs a ResumeCluster API operation. + Documented below. + properties: + clusterIdentifier: + description: The identifier of the cluster to be resumed. + type: string + type: object + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/resourcegroups.aws.upbound.io_groups.yaml b/package/crds/resourcegroups.aws.upbound.io_groups.yaml index be07e7dac0..32e93eedbf 100644 --- a/package/crds/resourcegroups.aws.upbound.io_groups.yaml +++ b/package/crds/resourcegroups.aws.upbound.io_groups.yaml @@ -490,3 +490,466 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Group is the Schema for the Groups API. Provides a Resource Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GroupSpec defines the desired state of Group + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + configuration: + description: A configuration associates the resource group with + an AWS service and specifies how the service can interact with + the resources in the group. See below for details. + items: + properties: + parameters: + description: A collection of parameters for this group configuration + item. See below for details. + items: + properties: + name: + description: The name of the group configuration parameter. + type: string + values: + description: The value or values to be used for the + specified parameter. + items: + type: string + type: array + type: object + type: array + type: + description: Specifies the type of group configuration item. + type: string + type: object + type: array + description: + description: A description of the resource group. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + resourceQuery: + description: A resource_query block. Resource queries are documented + below. + properties: + query: + description: The resource query as a JSON string. + type: string + type: + description: The type of the resource query. Defaults to TAG_FILTERS_1_0. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + configuration: + description: A configuration associates the resource group with + an AWS service and specifies how the service can interact with + the resources in the group. See below for details. + items: + properties: + parameters: + description: A collection of parameters for this group configuration + item. See below for details. + items: + properties: + name: + description: The name of the group configuration parameter. + type: string + values: + description: The value or values to be used for the + specified parameter. + items: + type: string + type: array + type: object + type: array + type: + description: Specifies the type of group configuration item. + type: string + type: object + type: array + description: + description: A description of the resource group. + type: string + resourceQuery: + description: A resource_query block. Resource queries are documented + below. + properties: + query: + description: The resource query as a JSON string. + type: string + type: + description: The type of the resource query. Defaults to TAG_FILTERS_1_0. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: GroupStatus defines the observed state of Group. + properties: + atProvider: + properties: + arn: + description: The ARN assigned by AWS for this resource group. + type: string + configuration: + description: A configuration associates the resource group with + an AWS service and specifies how the service can interact with + the resources in the group. See below for details. + items: + properties: + parameters: + description: A collection of parameters for this group configuration + item. See below for details. + items: + properties: + name: + description: The name of the group configuration parameter. + type: string + values: + description: The value or values to be used for the + specified parameter. + items: + type: string + type: array + type: object + type: array + type: + description: Specifies the type of group configuration item. + type: string + type: object + type: array + description: + description: A description of the resource group. + type: string + id: + type: string + resourceQuery: + description: A resource_query block. Resource queries are documented + below. + properties: + query: + description: The resource query as a JSON string. + type: string + type: + description: The type of the resource query. Defaults to TAG_FILTERS_1_0. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/route53.aws.upbound.io_records.yaml b/package/crds/route53.aws.upbound.io_records.yaml index bef08b73d1..54767219fe 100644 --- a/package/crds/route53.aws.upbound.io_records.yaml +++ b/package/crds/route53.aws.upbound.io_records.yaml @@ -1138,3 +1138,1072 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Record is the Schema for the Records API. Provides a Route53 + record resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RecordSpec defines the desired state of Record + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + alias: + description: |- + An alias block. Conflicts with ttl & records. + Documented below. + properties: + evaluateTargetHealth: + description: Set to true if you want Route 53 to determine + whether to respond to DNS queries using this resource record + set by checking the health of the resource record set. Some + resources have special requirements, see related part of + documentation. + type: boolean + name: + description: The name of the record. + type: string + zoneId: + description: The ID of the hosted zone to contain this record. + type: string + type: object + allowOverwrite: + description: false by default. This configuration is not recommended + for most environments. + type: boolean + cidrRoutingPolicy: + description: A block indicating a routing policy based on the + IP network ranges of requestors. Conflicts with any other routing + policy. Documented below. + properties: + collectionId: + description: The CIDR collection ID. See the aws_route53_cidr_collection + resource for more details. + type: string + locationName: + description: The CIDR collection location name. See the aws_route53_cidr_location + resource for more details. A location_name with an asterisk + "*" can be used to create a default CIDR record. collection_id + is still required for default record. + type: string + type: object + failoverRoutingPolicy: + description: A block indicating the routing behavior when associated + health check fails. Conflicts with any other routing policy. + Documented below. + properties: + type: + description: The record type. Valid values are A, AAAA, CAA, + CNAME, DS, MX, NAPTR, NS, PTR, SOA, SPF, SRV and TXT. + type: string + type: object + geolocationRoutingPolicy: + description: A block indicating a routing policy based on the + geolocation of the requestor. Conflicts with any other routing + policy. Documented below. + properties: + continent: + description: A two-letter continent code. See http://docs.aws.amazon.com/Route53/latest/APIReference/API_GetGeoLocation.html + for code details. Either continent or country must be specified. + type: string + country: + description: A two-character country code or * to indicate + a default resource record set. + type: string + subdivision: + description: A subdivision code for a country. + type: string + type: object + geoproximityRoutingPolicy: + description: A block indicating a routing policy based on the + geoproximity of the requestor. Conflicts with any other routing + policy. Documented below. + properties: + awsRegion: + description: A AWS region where the resource is present. + type: string + bias: + description: Route more traffic or less traffic to the resource + by specifying a value ranges between -90 to 90. See https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy-geoproximity.html + for bias details. + type: number + coordinates: + description: Specify latitude and longitude for routing traffic + to non-AWS resources. + items: + properties: + latitude: + type: string + longitude: + type: string + type: object + type: array + localZoneGroup: + description: A AWS local zone group where the resource is + present. See https://docs.aws.amazon.com/local-zones/latest/ug/available-local-zones.html + for local zone group list. + type: string + type: object + healthCheckId: + description: The health check the record should be associated + with. + type: string + healthCheckIdRef: + description: Reference to a HealthCheck in route53 to populate + healthCheckId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + healthCheckIdSelector: + description: Selector for a HealthCheck in route53 to populate + healthCheckId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + latencyRoutingPolicy: + description: A block indicating a routing policy based on the + latency between the requestor and an AWS region. Conflicts with + any other routing policy. Documented below. + properties: + region: + description: An AWS region from which to measure latency. + See http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-latency + type: string + required: + - region + type: object + multivalueAnswerRoutingPolicy: + description: Set to true to indicate a multivalue answer routing + policy. Conflicts with any other routing policy. + type: boolean + name: + description: The name of the record. + type: string + records: + description: A string list of records.g., "first255characters\"\"morecharacters"). + items: + type: string + type: array + x-kubernetes-list-type: set + region: + description: |- + An AWS region from which to measure latency. See http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-latency + Region is the region you'd like your resource to be created in. + type: string + setIdentifier: + description: Unique identifier to differentiate records with routing + policies from one another. Required if using cidr_routing_policy, + failover_routing_policy, geolocation_routing_policy,geoproximity_routing_policy, + latency_routing_policy, multivalue_answer_routing_policy, or + weighted_routing_policy. + type: string + ttl: + description: The TTL of the record. + type: number + type: + description: The record type. Valid values are A, AAAA, CAA, CNAME, + DS, MX, NAPTR, NS, PTR, SOA, SPF, SRV and TXT. + type: string + weightedRoutingPolicy: + description: A block indicating a weighted routing policy. Conflicts + with any other routing policy. Documented below. + properties: + weight: + description: A numeric value indicating the relative weight + of the record. See http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-weighted. + type: number + type: object + zoneId: + description: The ID of the hosted zone to contain this record. + type: string + zoneIdRef: + description: Reference to a Zone in route53 to populate zoneId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + zoneIdSelector: + description: Selector for a Zone in route53 to populate zoneId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + alias: + description: |- + An alias block. Conflicts with ttl & records. + Documented below. + properties: + evaluateTargetHealth: + description: Set to true if you want Route 53 to determine + whether to respond to DNS queries using this resource record + set by checking the health of the resource record set. Some + resources have special requirements, see related part of + documentation. + type: boolean + name: + description: The name of the record. + type: string + zoneId: + description: The ID of the hosted zone to contain this record. + type: string + type: object + allowOverwrite: + description: false by default. This configuration is not recommended + for most environments. + type: boolean + cidrRoutingPolicy: + description: A block indicating a routing policy based on the + IP network ranges of requestors. Conflicts with any other routing + policy. Documented below. + properties: + collectionId: + description: The CIDR collection ID. See the aws_route53_cidr_collection + resource for more details. + type: string + locationName: + description: The CIDR collection location name. See the aws_route53_cidr_location + resource for more details. A location_name with an asterisk + "*" can be used to create a default CIDR record. collection_id + is still required for default record. + type: string + type: object + failoverRoutingPolicy: + description: A block indicating the routing behavior when associated + health check fails. Conflicts with any other routing policy. + Documented below. + properties: + type: + description: The record type. Valid values are A, AAAA, CAA, + CNAME, DS, MX, NAPTR, NS, PTR, SOA, SPF, SRV and TXT. + type: string + type: object + geolocationRoutingPolicy: + description: A block indicating a routing policy based on the + geolocation of the requestor. Conflicts with any other routing + policy. Documented below. + properties: + continent: + description: A two-letter continent code. See http://docs.aws.amazon.com/Route53/latest/APIReference/API_GetGeoLocation.html + for code details. Either continent or country must be specified. + type: string + country: + description: A two-character country code or * to indicate + a default resource record set. + type: string + subdivision: + description: A subdivision code for a country. + type: string + type: object + geoproximityRoutingPolicy: + description: A block indicating a routing policy based on the + geoproximity of the requestor. Conflicts with any other routing + policy. Documented below. + properties: + awsRegion: + description: A AWS region where the resource is present. + type: string + bias: + description: Route more traffic or less traffic to the resource + by specifying a value ranges between -90 to 90. See https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy-geoproximity.html + for bias details. + type: number + coordinates: + description: Specify latitude and longitude for routing traffic + to non-AWS resources. + items: + properties: + latitude: + type: string + longitude: + type: string + type: object + type: array + localZoneGroup: + description: A AWS local zone group where the resource is + present. See https://docs.aws.amazon.com/local-zones/latest/ug/available-local-zones.html + for local zone group list. + type: string + type: object + healthCheckId: + description: The health check the record should be associated + with. + type: string + healthCheckIdRef: + description: Reference to a HealthCheck in route53 to populate + healthCheckId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + healthCheckIdSelector: + description: Selector for a HealthCheck in route53 to populate + healthCheckId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + latencyRoutingPolicy: + description: A block indicating a routing policy based on the + latency between the requestor and an AWS region. Conflicts with + any other routing policy. Documented below. + type: object + multivalueAnswerRoutingPolicy: + description: Set to true to indicate a multivalue answer routing + policy. Conflicts with any other routing policy. + type: boolean + name: + description: The name of the record. + type: string + records: + description: A string list of records.g., "first255characters\"\"morecharacters"). + items: + type: string + type: array + x-kubernetes-list-type: set + setIdentifier: + description: Unique identifier to differentiate records with routing + policies from one another. Required if using cidr_routing_policy, + failover_routing_policy, geolocation_routing_policy,geoproximity_routing_policy, + latency_routing_policy, multivalue_answer_routing_policy, or + weighted_routing_policy. + type: string + ttl: + description: The TTL of the record. + type: number + type: + description: The record type. Valid values are A, AAAA, CAA, CNAME, + DS, MX, NAPTR, NS, PTR, SOA, SPF, SRV and TXT. + type: string + weightedRoutingPolicy: + description: A block indicating a weighted routing policy. Conflicts + with any other routing policy. Documented below. + properties: + weight: + description: A numeric value indicating the relative weight + of the record. See http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-weighted. + type: number + type: object + zoneId: + description: The ID of the hosted zone to contain this record. + type: string + zoneIdRef: + description: Reference to a Zone in route53 to populate zoneId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + zoneIdSelector: + description: Selector for a Zone in route53 to populate zoneId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + status: + description: RecordStatus defines the observed state of Record. + properties: + atProvider: + properties: + alias: + description: |- + An alias block. Conflicts with ttl & records. + Documented below. + properties: + evaluateTargetHealth: + description: Set to true if you want Route 53 to determine + whether to respond to DNS queries using this resource record + set by checking the health of the resource record set. Some + resources have special requirements, see related part of + documentation. + type: boolean + name: + description: The name of the record. + type: string + zoneId: + description: The ID of the hosted zone to contain this record. + type: string + type: object + allowOverwrite: + description: false by default. This configuration is not recommended + for most environments. + type: boolean + cidrRoutingPolicy: + description: A block indicating a routing policy based on the + IP network ranges of requestors. Conflicts with any other routing + policy. Documented below. + properties: + collectionId: + description: The CIDR collection ID. See the aws_route53_cidr_collection + resource for more details. + type: string + locationName: + description: The CIDR collection location name. See the aws_route53_cidr_location + resource for more details. A location_name with an asterisk + "*" can be used to create a default CIDR record. collection_id + is still required for default record. + type: string + type: object + failoverRoutingPolicy: + description: A block indicating the routing behavior when associated + health check fails. Conflicts with any other routing policy. + Documented below. + properties: + type: + description: The record type. Valid values are A, AAAA, CAA, + CNAME, DS, MX, NAPTR, NS, PTR, SOA, SPF, SRV and TXT. + type: string + type: object + fqdn: + description: FQDN built using the zone domain and name. + type: string + geolocationRoutingPolicy: + description: A block indicating a routing policy based on the + geolocation of the requestor. Conflicts with any other routing + policy. Documented below. + properties: + continent: + description: A two-letter continent code. See http://docs.aws.amazon.com/Route53/latest/APIReference/API_GetGeoLocation.html + for code details. Either continent or country must be specified. + type: string + country: + description: A two-character country code or * to indicate + a default resource record set. + type: string + subdivision: + description: A subdivision code for a country. + type: string + type: object + geoproximityRoutingPolicy: + description: A block indicating a routing policy based on the + geoproximity of the requestor. Conflicts with any other routing + policy. Documented below. + properties: + awsRegion: + description: A AWS region where the resource is present. + type: string + bias: + description: Route more traffic or less traffic to the resource + by specifying a value ranges between -90 to 90. See https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy-geoproximity.html + for bias details. + type: number + coordinates: + description: Specify latitude and longitude for routing traffic + to non-AWS resources. + items: + properties: + latitude: + type: string + longitude: + type: string + type: object + type: array + localZoneGroup: + description: A AWS local zone group where the resource is + present. See https://docs.aws.amazon.com/local-zones/latest/ug/available-local-zones.html + for local zone group list. + type: string + type: object + healthCheckId: + description: The health check the record should be associated + with. + type: string + id: + type: string + latencyRoutingPolicy: + description: A block indicating a routing policy based on the + latency between the requestor and an AWS region. Conflicts with + any other routing policy. Documented below. + properties: + region: + description: An AWS region from which to measure latency. + See http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-latency + type: string + type: object + multivalueAnswerRoutingPolicy: + description: Set to true to indicate a multivalue answer routing + policy. Conflicts with any other routing policy. + type: boolean + name: + description: The name of the record. + type: string + records: + description: A string list of records.g., "first255characters\"\"morecharacters"). + items: + type: string + type: array + x-kubernetes-list-type: set + setIdentifier: + description: Unique identifier to differentiate records with routing + policies from one another. Required if using cidr_routing_policy, + failover_routing_policy, geolocation_routing_policy,geoproximity_routing_policy, + latency_routing_policy, multivalue_answer_routing_policy, or + weighted_routing_policy. + type: string + ttl: + description: The TTL of the record. + type: number + type: + description: The record type. Valid values are A, AAAA, CAA, CNAME, + DS, MX, NAPTR, NS, PTR, SOA, SPF, SRV and TXT. + type: string + weightedRoutingPolicy: + description: A block indicating a weighted routing policy. Conflicts + with any other routing policy. Documented below. + properties: + weight: + description: A numeric value indicating the relative weight + of the record. See http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-weighted. + type: number + type: object + zoneId: + description: The ID of the hosted zone to contain this record. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/route53recoverycontrolconfig.aws.upbound.io_safetyrules.yaml b/package/crds/route53recoverycontrolconfig.aws.upbound.io_safetyrules.yaml index 63b899796c..29b90ad8de 100644 --- a/package/crds/route53recoverycontrolconfig.aws.upbound.io_safetyrules.yaml +++ b/package/crds/route53recoverycontrolconfig.aws.upbound.io_safetyrules.yaml @@ -811,3 +811,790 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SafetyRule is the Schema for the SafetyRules API. Provides an + AWS Route 53 Recovery Control Config Safety Rule + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SafetyRuleSpec defines the desired state of SafetyRule + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + assertedControls: + description: Routing controls that are part of transactions that + are evaluated to determine if a request to change a routing + control state is allowed. + items: + type: string + type: array + assertedControlsRefs: + description: References to RoutingControl in route53recoverycontrolconfig + to populate assertedControls. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + assertedControlsSelector: + description: Selector for a list of RoutingControl in route53recoverycontrolconfig + to populate assertedControls. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + controlPanelArn: + description: ARN of the control panel in which this safety rule + will reside. + type: string + controlPanelArnRef: + description: Reference to a ControlPanel in route53recoverycontrolconfig + to populate controlPanelArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + controlPanelArnSelector: + description: Selector for a ControlPanel in route53recoverycontrolconfig + to populate controlPanelArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + gatingControls: + description: Gating controls for the new gating rule. That is, + routing controls that are evaluated by the rule configuration + that you specify. + items: + type: string + type: array + name: + description: Name describing the safety rule. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + ruleConfig: + description: Configuration block for safety rule criteria. See + below. + properties: + inverted: + description: Logical negation of the rule. + type: boolean + threshold: + description: Number of controls that must be set when you + specify an ATLEAST type rule. + type: number + type: + description: Rule type. Valid values are ATLEAST, AND, and + OR. + type: string + type: object + targetControls: + description: Routing controls that can only be set or unset if + the specified rule_config evaluates to true for the specified + gating_controls. + items: + type: string + type: array + waitPeriodMs: + description: Evaluation period, in milliseconds (ms), during which + any request against the target routing controls will fail. + type: number + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + assertedControls: + description: Routing controls that are part of transactions that + are evaluated to determine if a request to change a routing + control state is allowed. + items: + type: string + type: array + assertedControlsRefs: + description: References to RoutingControl in route53recoverycontrolconfig + to populate assertedControls. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + assertedControlsSelector: + description: Selector for a list of RoutingControl in route53recoverycontrolconfig + to populate assertedControls. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + controlPanelArn: + description: ARN of the control panel in which this safety rule + will reside. + type: string + controlPanelArnRef: + description: Reference to a ControlPanel in route53recoverycontrolconfig + to populate controlPanelArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + controlPanelArnSelector: + description: Selector for a ControlPanel in route53recoverycontrolconfig + to populate controlPanelArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + gatingControls: + description: Gating controls for the new gating rule. That is, + routing controls that are evaluated by the rule configuration + that you specify. + items: + type: string + type: array + name: + description: Name describing the safety rule. + type: string + ruleConfig: + description: Configuration block for safety rule criteria. See + below. + properties: + inverted: + description: Logical negation of the rule. + type: boolean + threshold: + description: Number of controls that must be set when you + specify an ATLEAST type rule. + type: number + type: + description: Rule type. Valid values are ATLEAST, AND, and + OR. + type: string + type: object + targetControls: + description: Routing controls that can only be set or unset if + the specified rule_config evaluates to true for the specified + gating_controls. + items: + type: string + type: array + waitPeriodMs: + description: Evaluation period, in milliseconds (ms), during which + any request against the target routing controls will fail. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.ruleConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.ruleConfig) + || (has(self.initProvider) && has(self.initProvider.ruleConfig))' + - message: spec.forProvider.waitPeriodMs is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.waitPeriodMs) + || (has(self.initProvider) && has(self.initProvider.waitPeriodMs))' + status: + description: SafetyRuleStatus defines the observed state of SafetyRule. + properties: + atProvider: + properties: + arn: + description: ARN of the safety rule. + type: string + assertedControls: + description: Routing controls that are part of transactions that + are evaluated to determine if a request to change a routing + control state is allowed. + items: + type: string + type: array + controlPanelArn: + description: ARN of the control panel in which this safety rule + will reside. + type: string + gatingControls: + description: Gating controls for the new gating rule. That is, + routing controls that are evaluated by the rule configuration + that you specify. + items: + type: string + type: array + id: + type: string + name: + description: Name describing the safety rule. + type: string + ruleConfig: + description: Configuration block for safety rule criteria. See + below. + properties: + inverted: + description: Logical negation of the rule. + type: boolean + threshold: + description: Number of controls that must be set when you + specify an ATLEAST type rule. + type: number + type: + description: Rule type. Valid values are ATLEAST, AND, and + OR. + type: string + type: object + status: + description: Status of the safety rule. PENDING when it is being + created/updated, PENDING_DELETION when it is being deleted, + and DEPLOYED otherwise. + type: string + targetControls: + description: Routing controls that can only be set or unset if + the specified rule_config evaluates to true for the specified + gating_controls. + items: + type: string + type: array + waitPeriodMs: + description: Evaluation period, in milliseconds (ms), during which + any request against the target routing controls will fail. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/route53recoveryreadiness.aws.upbound.io_resourcesets.yaml b/package/crds/route53recoveryreadiness.aws.upbound.io_resourcesets.yaml index 6aded51cac..867c344450 100644 --- a/package/crds/route53recoveryreadiness.aws.upbound.io_resourcesets.yaml +++ b/package/crds/route53recoveryreadiness.aws.upbound.io_resourcesets.yaml @@ -742,3 +742,700 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ResourceSet is the Schema for the ResourceSets API. Provides + an AWS Route 53 Recovery Readiness Resource Set + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ResourceSetSpec defines the desired state of ResourceSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + region: + description: Region is the region you'd like your resource to + be created in. + type: string + resourceSetType: + description: Type of the resources in the resource set. + type: string + resources: + description: List of resources to add to this resource set. See + below. + items: + properties: + dnsTargetResource: + description: Component for DNS/Routing Control Readiness + Checks. + properties: + domainName: + description: DNS Name that acts as the ingress point + to a portion of application. + type: string + hostedZoneArn: + description: Hosted Zone ARN that contains the DNS record + with the provided name of target resource. + type: string + recordSetId: + description: Route53 record set id to uniquely identify + a record given a domain_name and a record_type. + type: string + recordType: + description: Type of DNS Record of target resource. + type: string + targetResource: + description: Target resource the R53 record specified + with the above params points to. + properties: + nlbResource: + description: NLB resource a DNS Target Resource + points to. Required if r53_resource is not set. + properties: + arn: + description: NLB resource ARN. + type: string + type: object + r53Resource: + description: Route53 resource a DNS Target Resource + record points to. + properties: + domainName: + description: Domain name that is targeted. + type: string + recordSetId: + description: Resource record set ID that is + targeted. + type: string + type: object + type: object + type: object + readinessScopes: + description: Recovery group ARN or cell ARN that contains + this resource set. + items: + type: string + type: array + resourceArn: + description: ARN of the resource. + type: string + resourceArnRef: + description: Reference to a MetricAlarm in cloudwatch to + populate resourceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceArnSelector: + description: Selector for a MetricAlarm in cloudwatch to + populate resourceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + resourceSetType: + description: Type of the resources in the resource set. + type: string + resources: + description: List of resources to add to this resource set. See + below. + items: + properties: + dnsTargetResource: + description: Component for DNS/Routing Control Readiness + Checks. + properties: + domainName: + description: DNS Name that acts as the ingress point + to a portion of application. + type: string + hostedZoneArn: + description: Hosted Zone ARN that contains the DNS record + with the provided name of target resource. + type: string + recordSetId: + description: Route53 record set id to uniquely identify + a record given a domain_name and a record_type. + type: string + recordType: + description: Type of DNS Record of target resource. + type: string + targetResource: + description: Target resource the R53 record specified + with the above params points to. + properties: + nlbResource: + description: NLB resource a DNS Target Resource + points to. Required if r53_resource is not set. + properties: + arn: + description: NLB resource ARN. + type: string + type: object + r53Resource: + description: Route53 resource a DNS Target Resource + record points to. + properties: + domainName: + description: Domain name that is targeted. + type: string + recordSetId: + description: Resource record set ID that is + targeted. + type: string + type: object + type: object + type: object + readinessScopes: + description: Recovery group ARN or cell ARN that contains + this resource set. + items: + type: string + type: array + resourceArn: + description: ARN of the resource. + type: string + resourceArnRef: + description: Reference to a MetricAlarm in cloudwatch to + populate resourceArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceArnSelector: + description: Selector for a MetricAlarm in cloudwatch to + populate resourceArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.resourceSetType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.resourceSetType) + || (has(self.initProvider) && has(self.initProvider.resourceSetType))' + - message: spec.forProvider.resources is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.resources) + || (has(self.initProvider) && has(self.initProvider.resources))' + status: + description: ResourceSetStatus defines the observed state of ResourceSet. + properties: + atProvider: + properties: + arn: + description: ARN of the resource set + type: string + id: + type: string + resourceSetType: + description: Type of the resources in the resource set. + type: string + resources: + description: List of resources to add to this resource set. See + below. + items: + properties: + componentId: + description: Unique identified for DNS Target Resources, + use for readiness checks. + type: string + dnsTargetResource: + description: Component for DNS/Routing Control Readiness + Checks. + properties: + domainName: + description: DNS Name that acts as the ingress point + to a portion of application. + type: string + hostedZoneArn: + description: Hosted Zone ARN that contains the DNS record + with the provided name of target resource. + type: string + recordSetId: + description: Route53 record set id to uniquely identify + a record given a domain_name and a record_type. + type: string + recordType: + description: Type of DNS Record of target resource. + type: string + targetResource: + description: Target resource the R53 record specified + with the above params points to. + properties: + nlbResource: + description: NLB resource a DNS Target Resource + points to. Required if r53_resource is not set. + properties: + arn: + description: NLB resource ARN. + type: string + type: object + r53Resource: + description: Route53 resource a DNS Target Resource + record points to. + properties: + domainName: + description: Domain name that is targeted. + type: string + recordSetId: + description: Resource record set ID that is + targeted. + type: string + type: object + type: object + type: object + readinessScopes: + description: Recovery group ARN or cell ARN that contains + this resource set. + items: + type: string + type: array + resourceArn: + description: ARN of the resource. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/rum.aws.upbound.io_appmonitors.yaml b/package/crds/rum.aws.upbound.io_appmonitors.yaml index e5bff41053..20e82ce4a2 100644 --- a/package/crds/rum.aws.upbound.io_appmonitors.yaml +++ b/package/crds/rum.aws.upbound.io_appmonitors.yaml @@ -652,3 +652,625 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: AppMonitor is the Schema for the AppMonitors API. Provides a + CloudWatch RUM App Monitor resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AppMonitorSpec defines the desired state of AppMonitor + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appMonitorConfiguration: + description: configuration data for the app monitor. See app_monitor_configuration + below. + properties: + allowCookies: + description: If you set this to true, RUM web client sets + two cookies, a session cookie and a user cookie. The cookies + allow the RUM web client to collect data relating to the + number of users an application has and the behavior of the + application across a sequence of events. Cookies are stored + in the top-level domain of the current page. + type: boolean + enableXray: + description: If you set this to true, RUM enables X-Ray tracing + for the user sessions that RUM samples. RUM adds an X-Ray + trace header to allowed HTTP requests. It also records an + X-Ray segment for allowed HTTP requests. + type: boolean + excludedPages: + description: A list of URLs in your website or application + to exclude from RUM data collection. + items: + type: string + type: array + x-kubernetes-list-type: set + favoritePages: + description: A list of pages in the CloudWatch RUM console + that are to be displayed with a "favorite" icon. + items: + type: string + type: array + x-kubernetes-list-type: set + guestRoleArn: + description: The ARN of the guest IAM role that is attached + to the Amazon Cognito identity pool that is used to authorize + the sending of data to RUM. + type: string + identityPoolId: + description: The ID of the Amazon Cognito identity pool that + is used to authorize the sending of data to RUM. + type: string + includedPages: + description: If this app monitor is to collect data from only + certain pages in your application, this structure lists + those pages. + items: + type: string + type: array + x-kubernetes-list-type: set + sessionSampleRate: + description: Specifies the percentage of user sessions to + use for RUM data collection. Choosing a higher percentage + gives you more data but also incurs more costs. The number + you specify is the percentage of user sessions that will + be used. Default value is 0.1. + type: number + telemetries: + description: An array that lists the types of telemetry data + that this app monitor is to collect. Valid values are errors, + performance, and http. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + customEvents: + description: Specifies whether this app monitor allows the web + client to define and send custom events. If you omit this parameter, + custom events are DISABLED. See custom_events below. + properties: + status: + description: Specifies whether this app monitor allows the + web client to define and send custom events. The default + is for custom events to be DISABLED. Valid values are DISABLED + and ENABLED. + type: string + type: object + cwLogEnabled: + description: Data collected by RUM is kept by RUM for 30 days + and then deleted. This parameter specifies whether RUM sends + a copy of this telemetry data to Amazon CloudWatch Logs in your + account. This enables you to keep the telemetry data for more + than 30 days, but it does incur Amazon CloudWatch Logs charges. + Default value is false. + type: boolean + domain: + description: The top-level internet domain name for which your + application has administrative authority. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appMonitorConfiguration: + description: configuration data for the app monitor. See app_monitor_configuration + below. + properties: + allowCookies: + description: If you set this to true, RUM web client sets + two cookies, a session cookie and a user cookie. The cookies + allow the RUM web client to collect data relating to the + number of users an application has and the behavior of the + application across a sequence of events. Cookies are stored + in the top-level domain of the current page. + type: boolean + enableXray: + description: If you set this to true, RUM enables X-Ray tracing + for the user sessions that RUM samples. RUM adds an X-Ray + trace header to allowed HTTP requests. It also records an + X-Ray segment for allowed HTTP requests. + type: boolean + excludedPages: + description: A list of URLs in your website or application + to exclude from RUM data collection. + items: + type: string + type: array + x-kubernetes-list-type: set + favoritePages: + description: A list of pages in the CloudWatch RUM console + that are to be displayed with a "favorite" icon. + items: + type: string + type: array + x-kubernetes-list-type: set + guestRoleArn: + description: The ARN of the guest IAM role that is attached + to the Amazon Cognito identity pool that is used to authorize + the sending of data to RUM. + type: string + identityPoolId: + description: The ID of the Amazon Cognito identity pool that + is used to authorize the sending of data to RUM. + type: string + includedPages: + description: If this app monitor is to collect data from only + certain pages in your application, this structure lists + those pages. + items: + type: string + type: array + x-kubernetes-list-type: set + sessionSampleRate: + description: Specifies the percentage of user sessions to + use for RUM data collection. Choosing a higher percentage + gives you more data but also incurs more costs. The number + you specify is the percentage of user sessions that will + be used. Default value is 0.1. + type: number + telemetries: + description: An array that lists the types of telemetry data + that this app monitor is to collect. Valid values are errors, + performance, and http. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + customEvents: + description: Specifies whether this app monitor allows the web + client to define and send custom events. If you omit this parameter, + custom events are DISABLED. See custom_events below. + properties: + status: + description: Specifies whether this app monitor allows the + web client to define and send custom events. The default + is for custom events to be DISABLED. Valid values are DISABLED + and ENABLED. + type: string + type: object + cwLogEnabled: + description: Data collected by RUM is kept by RUM for 30 days + and then deleted. This parameter specifies whether RUM sends + a copy of this telemetry data to Amazon CloudWatch Logs in your + account. This enables you to keep the telemetry data for more + than 30 days, but it does incur Amazon CloudWatch Logs charges. + Default value is false. + type: boolean + domain: + description: The top-level internet domain name for which your + application has administrative authority. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.domain is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.domain) + || (has(self.initProvider) && has(self.initProvider.domain))' + status: + description: AppMonitorStatus defines the observed state of AppMonitor. + properties: + atProvider: + properties: + appMonitorConfiguration: + description: configuration data for the app monitor. See app_monitor_configuration + below. + properties: + allowCookies: + description: If you set this to true, RUM web client sets + two cookies, a session cookie and a user cookie. The cookies + allow the RUM web client to collect data relating to the + number of users an application has and the behavior of the + application across a sequence of events. Cookies are stored + in the top-level domain of the current page. + type: boolean + enableXray: + description: If you set this to true, RUM enables X-Ray tracing + for the user sessions that RUM samples. RUM adds an X-Ray + trace header to allowed HTTP requests. It also records an + X-Ray segment for allowed HTTP requests. + type: boolean + excludedPages: + description: A list of URLs in your website or application + to exclude from RUM data collection. + items: + type: string + type: array + x-kubernetes-list-type: set + favoritePages: + description: A list of pages in the CloudWatch RUM console + that are to be displayed with a "favorite" icon. + items: + type: string + type: array + x-kubernetes-list-type: set + guestRoleArn: + description: The ARN of the guest IAM role that is attached + to the Amazon Cognito identity pool that is used to authorize + the sending of data to RUM. + type: string + identityPoolId: + description: The ID of the Amazon Cognito identity pool that + is used to authorize the sending of data to RUM. + type: string + includedPages: + description: If this app monitor is to collect data from only + certain pages in your application, this structure lists + those pages. + items: + type: string + type: array + x-kubernetes-list-type: set + sessionSampleRate: + description: Specifies the percentage of user sessions to + use for RUM data collection. Choosing a higher percentage + gives you more data but also incurs more costs. The number + you specify is the percentage of user sessions that will + be used. Default value is 0.1. + type: number + telemetries: + description: An array that lists the types of telemetry data + that this app monitor is to collect. Valid values are errors, + performance, and http. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + appMonitorId: + description: The unique ID of the app monitor. Useful for JS templates. + type: string + arn: + description: The Amazon Resource Name (ARN) specifying the app + monitor. + type: string + customEvents: + description: Specifies whether this app monitor allows the web + client to define and send custom events. If you omit this parameter, + custom events are DISABLED. See custom_events below. + properties: + status: + description: Specifies whether this app monitor allows the + web client to define and send custom events. The default + is for custom events to be DISABLED. Valid values are DISABLED + and ENABLED. + type: string + type: object + cwLogEnabled: + description: Data collected by RUM is kept by RUM for 30 days + and then deleted. This parameter specifies whether RUM sends + a copy of this telemetry data to Amazon CloudWatch Logs in your + account. This enables you to keep the telemetry data for more + than 30 days, but it does incur Amazon CloudWatch Logs charges. + Default value is false. + type: boolean + cwLogGroup: + description: The name of the log group where the copies are stored. + type: string + domain: + description: The top-level internet domain name for which your + application has administrative authority. + type: string + id: + description: The CloudWatch RUM name as it is the identifier of + a RUM. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3.aws.upbound.io_bucketacls.yaml b/package/crds/s3.aws.upbound.io_bucketacls.yaml index 5d662a5d1f..12dd537a8f 100644 --- a/package/crds/s3.aws.upbound.io_bucketacls.yaml +++ b/package/crds/s3.aws.upbound.io_bucketacls.yaml @@ -675,3 +675,642 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BucketACL is the Schema for the BucketACLs API. Provides an S3 + bucket ACL resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BucketACLSpec defines the desired state of BucketACL + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessControlPolicy: + description: Configuration block that sets the ACL permissions + for an object per grantee. See below. + properties: + grant: + description: Set of grant configuration blocks. See below. + items: + properties: + grantee: + description: Configuration block for the person being + granted permissions. See below. + properties: + emailAddress: + description: Email address of the grantee. See Regions + and Endpoints for supported AWS regions where + this argument can be specified. + type: string + id: + description: ID of the owner. + type: string + type: + description: 'Type of grantee. Valid values: CanonicalUser, + AmazonCustomerByEmail, Group.' + type: string + uri: + description: URI of the grantee group. + type: string + type: object + permission: + description: 'Logging permissions assigned to the grantee + for the bucket. Valid values: FULL_CONTROL, WRITE, + WRITE_ACP, READ, READ_ACP. See What permissions can + I grant? for more details about what each permission + means in the context of buckets.' + type: string + type: object + type: array + owner: + description: Configuration block for the bucket owner's display + name and ID. See below. + properties: + displayName: + description: Display name of the owner. + type: string + id: + description: ID of the owner. + type: string + type: object + type: object + acl: + description: Canned ACL to apply to the bucket. + type: string + bucket: + description: Bucket to which to apply the ACL. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessControlPolicy: + description: Configuration block that sets the ACL permissions + for an object per grantee. See below. + properties: + grant: + description: Set of grant configuration blocks. See below. + items: + properties: + grantee: + description: Configuration block for the person being + granted permissions. See below. + properties: + emailAddress: + description: Email address of the grantee. See Regions + and Endpoints for supported AWS regions where + this argument can be specified. + type: string + id: + description: ID of the owner. + type: string + type: + description: 'Type of grantee. Valid values: CanonicalUser, + AmazonCustomerByEmail, Group.' + type: string + uri: + description: URI of the grantee group. + type: string + type: object + permission: + description: 'Logging permissions assigned to the grantee + for the bucket. Valid values: FULL_CONTROL, WRITE, + WRITE_ACP, READ, READ_ACP. See What permissions can + I grant? for more details about what each permission + means in the context of buckets.' + type: string + type: object + type: array + owner: + description: Configuration block for the bucket owner's display + name and ID. See below. + properties: + displayName: + description: Display name of the owner. + type: string + id: + description: ID of the owner. + type: string + type: object + type: object + acl: + description: Canned ACL to apply to the bucket. + type: string + bucket: + description: Bucket to which to apply the ACL. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: BucketACLStatus defines the observed state of BucketACL. + properties: + atProvider: + properties: + accessControlPolicy: + description: Configuration block that sets the ACL permissions + for an object per grantee. See below. + properties: + grant: + description: Set of grant configuration blocks. See below. + items: + properties: + grantee: + description: Configuration block for the person being + granted permissions. See below. + properties: + displayName: + description: Display name of the owner. + type: string + emailAddress: + description: Email address of the grantee. See Regions + and Endpoints for supported AWS regions where + this argument can be specified. + type: string + id: + description: ID of the owner. + type: string + type: + description: 'Type of grantee. Valid values: CanonicalUser, + AmazonCustomerByEmail, Group.' + type: string + uri: + description: URI of the grantee group. + type: string + type: object + permission: + description: 'Logging permissions assigned to the grantee + for the bucket. Valid values: FULL_CONTROL, WRITE, + WRITE_ACP, READ, READ_ACP. See What permissions can + I grant? for more details about what each permission + means in the context of buckets.' + type: string + type: object + type: array + owner: + description: Configuration block for the bucket owner's display + name and ID. See below. + properties: + displayName: + description: Display name of the owner. + type: string + id: + description: ID of the owner. + type: string + type: object + type: object + acl: + description: Canned ACL to apply to the bucket. + type: string + bucket: + description: Bucket to which to apply the ACL. + type: string + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + id: + description: The bucket, expected_bucket_owner (if configured), + and acl (if configured) separated by commas (,). + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3.aws.upbound.io_bucketanalyticsconfigurations.yaml b/package/crds/s3.aws.upbound.io_bucketanalyticsconfigurations.yaml index 2c4b0a7564..a0e3eb3f65 100644 --- a/package/crds/s3.aws.upbound.io_bucketanalyticsconfigurations.yaml +++ b/package/crds/s3.aws.upbound.io_bucketanalyticsconfigurations.yaml @@ -852,3 +852,799 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BucketAnalyticsConfiguration is the Schema for the BucketAnalyticsConfigurations + API. Provides a S3 bucket analytics configuration resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BucketAnalyticsConfigurationSpec defines the desired state + of BucketAnalyticsConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bucket: + description: Name of the bucket this analytics configuration is + associated with. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + filter: + description: Object filtering that accepts a prefix, tags, or + a logical AND of prefix and tags (documented below). + properties: + prefix: + description: Object prefix for filtering. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + name: + description: Unique identifier of the analytics configuration + for the bucket. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + storageClassAnalysis: + description: Configuration for the analytics data export (documented + below). + properties: + dataExport: + description: Data export configuration (documented below). + properties: + destination: + description: Specifies the destination for the exported + analytics data (documented below). + properties: + s3BucketDestination: + description: Analytics data export currently only + supports an S3 bucket destination (documented below). + properties: + bucketAccountId: + description: Account ID that owns the destination + bucket. + type: string + bucketArn: + description: ARN of the destination bucket. + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate + bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate + bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + format: + description: 'Output format of exported analytics + data. Allowed values: CSV. Default value: CSV.' + type: string + prefix: + description: Object prefix for filtering. + type: string + type: object + type: object + outputSchemaVersion: + description: 'Schema version of exported analytics data. + Allowed values: V_1. Default value: V_1.' + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bucket: + description: Name of the bucket this analytics configuration is + associated with. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + filter: + description: Object filtering that accepts a prefix, tags, or + a logical AND of prefix and tags (documented below). + properties: + prefix: + description: Object prefix for filtering. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + name: + description: Unique identifier of the analytics configuration + for the bucket. + type: string + storageClassAnalysis: + description: Configuration for the analytics data export (documented + below). + properties: + dataExport: + description: Data export configuration (documented below). + properties: + destination: + description: Specifies the destination for the exported + analytics data (documented below). + properties: + s3BucketDestination: + description: Analytics data export currently only + supports an S3 bucket destination (documented below). + properties: + bucketAccountId: + description: Account ID that owns the destination + bucket. + type: string + bucketArn: + description: ARN of the destination bucket. + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate + bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate + bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + format: + description: 'Output format of exported analytics + data. Allowed values: CSV. Default value: CSV.' + type: string + prefix: + description: Object prefix for filtering. + type: string + type: object + type: object + outputSchemaVersion: + description: 'Schema version of exported analytics data. + Allowed values: V_1. Default value: V_1.' + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: BucketAnalyticsConfigurationStatus defines the observed state + of BucketAnalyticsConfiguration. + properties: + atProvider: + properties: + bucket: + description: Name of the bucket this analytics configuration is + associated with. + type: string + filter: + description: Object filtering that accepts a prefix, tags, or + a logical AND of prefix and tags (documented below). + properties: + prefix: + description: Object prefix for filtering. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + id: + type: string + name: + description: Unique identifier of the analytics configuration + for the bucket. + type: string + storageClassAnalysis: + description: Configuration for the analytics data export (documented + below). + properties: + dataExport: + description: Data export configuration (documented below). + properties: + destination: + description: Specifies the destination for the exported + analytics data (documented below). + properties: + s3BucketDestination: + description: Analytics data export currently only + supports an S3 bucket destination (documented below). + properties: + bucketAccountId: + description: Account ID that owns the destination + bucket. + type: string + bucketArn: + description: ARN of the destination bucket. + type: string + format: + description: 'Output format of exported analytics + data. Allowed values: CSV. Default value: CSV.' + type: string + prefix: + description: Object prefix for filtering. + type: string + type: object + type: object + outputSchemaVersion: + description: 'Schema version of exported analytics data. + Allowed values: V_1. Default value: V_1.' + type: string + type: object + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3.aws.upbound.io_bucketintelligenttieringconfigurations.yaml b/package/crds/s3.aws.upbound.io_bucketintelligenttieringconfigurations.yaml index e347e97a1d..3bec8dff7a 100644 --- a/package/crds/s3.aws.upbound.io_bucketintelligenttieringconfigurations.yaml +++ b/package/crds/s3.aws.upbound.io_bucketintelligenttieringconfigurations.yaml @@ -624,3 +624,603 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BucketIntelligentTieringConfiguration is the Schema for the BucketIntelligentTieringConfigurations + API. Provides an S3 Intelligent-Tiering configuration resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BucketIntelligentTieringConfigurationSpec defines the desired + state of BucketIntelligentTieringConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bucket: + description: Name of the bucket this intelligent tiering configuration + is associated with. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + filter: + description: Bucket filter. The configuration only includes objects + that meet the filter's criteria (documented below). + properties: + prefix: + description: Object key name prefix that identifies the subset + of objects to which the configuration applies. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + name: + description: Unique name used to identify the S3 Intelligent-Tiering + configuration for the bucket. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + status: + description: 'Specifies the status of the configuration. Valid + values: Enabled, Disabled.' + type: string + tiering: + description: S3 Intelligent-Tiering storage class tiers of the + configuration (documented below). + items: + properties: + accessTier: + description: 'S3 Intelligent-Tiering access tier. Valid + values: ARCHIVE_ACCESS, DEEP_ARCHIVE_ACCESS.' + type: string + days: + description: Number of consecutive days of no access after + which an object will be eligible to be transitioned to + the corresponding tier. + type: number + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bucket: + description: Name of the bucket this intelligent tiering configuration + is associated with. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + filter: + description: Bucket filter. The configuration only includes objects + that meet the filter's criteria (documented below). + properties: + prefix: + description: Object key name prefix that identifies the subset + of objects to which the configuration applies. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + name: + description: Unique name used to identify the S3 Intelligent-Tiering + configuration for the bucket. + type: string + status: + description: 'Specifies the status of the configuration. Valid + values: Enabled, Disabled.' + type: string + tiering: + description: S3 Intelligent-Tiering storage class tiers of the + configuration (documented below). + items: + properties: + accessTier: + description: 'S3 Intelligent-Tiering access tier. Valid + values: ARCHIVE_ACCESS, DEEP_ARCHIVE_ACCESS.' + type: string + days: + description: Number of consecutive days of no access after + which an object will be eligible to be transitioned to + the corresponding tier. + type: number + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.tiering is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.tiering) + || (has(self.initProvider) && has(self.initProvider.tiering))' + status: + description: BucketIntelligentTieringConfigurationStatus defines the observed + state of BucketIntelligentTieringConfiguration. + properties: + atProvider: + properties: + bucket: + description: Name of the bucket this intelligent tiering configuration + is associated with. + type: string + filter: + description: Bucket filter. The configuration only includes objects + that meet the filter's criteria (documented below). + properties: + prefix: + description: Object key name prefix that identifies the subset + of objects to which the configuration applies. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + id: + type: string + name: + description: Unique name used to identify the S3 Intelligent-Tiering + configuration for the bucket. + type: string + status: + description: 'Specifies the status of the configuration. Valid + values: Enabled, Disabled.' + type: string + tiering: + description: S3 Intelligent-Tiering storage class tiers of the + configuration (documented below). + items: + properties: + accessTier: + description: 'S3 Intelligent-Tiering access tier. Valid + values: ARCHIVE_ACCESS, DEEP_ARCHIVE_ACCESS.' + type: string + days: + description: Number of consecutive days of no access after + which an object will be eligible to be transitioned to + the corresponding tier. + type: number + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3.aws.upbound.io_bucketinventories.yaml b/package/crds/s3.aws.upbound.io_bucketinventories.yaml index e406153c36..7c9697862f 100644 --- a/package/crds/s3.aws.upbound.io_bucketinventories.yaml +++ b/package/crds/s3.aws.upbound.io_bucketinventories.yaml @@ -956,3 +956,892 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BucketInventory is the Schema for the BucketInventorys API. Provides + a S3 bucket inventory configuration resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BucketInventorySpec defines the desired state of BucketInventory + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bucket: + description: Name of the source bucket that inventory lists the + objects for. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + destination: + description: Contains information about where to publish the inventory + results (documented below). + properties: + bucket: + description: Name of the source bucket that inventory lists + the objects for. + properties: + accountId: + description: ID of the account that owns the destination + bucket. Recommended to be set to prevent problems if + the destination bucket ownership changes. + type: string + bucketArn: + description: Amazon S3 bucket ARN of the destination. + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + encryption: + description: Contains the type of server-side encryption + to use to encrypt the inventory (documented below). + properties: + sseKms: + description: Specifies to use server-side encryption + with AWS KMS-managed keys to encrypt the inventory + file (documented below). + properties: + keyId: + description: ARN of the KMS customer master key + (CMK) used to encrypt the inventory file. + type: string + type: object + sseS3: + description: Specifies to use server-side encryption + with Amazon S3-managed keys (SSE-S3) to encrypt + the inventory file. + type: object + type: object + format: + description: Specifies the output format of the inventory + results. Can be CSV, ORC or Parquet. + type: string + prefix: + description: Prefix that an object must have to be included + in the inventory results. + type: string + type: object + type: object + enabled: + description: Specifies whether the inventory is enabled or disabled. + type: boolean + filter: + description: Specifies an inventory filter. The inventory only + includes objects that meet the filter's criteria (documented + below). + properties: + prefix: + description: Prefix that an object must have to be included + in the inventory results. + type: string + type: object + includedObjectVersions: + description: 'Object versions to include in the inventory list. + Valid values: All, Current.' + type: string + name: + description: Unique identifier of the inventory configuration + for the bucket. + type: string + optionalFields: + description: List of optional fields that are included in the + inventory results. Please refer to the S3 documentation for + more details. + items: + type: string + type: array + x-kubernetes-list-type: set + region: + description: Region is the region you'd like your resource to + be created in. + type: string + schedule: + description: Specifies the schedule for generating inventory results + (documented below). + properties: + frequency: + description: 'Specifies how frequently inventory results are + produced. Valid values: Daily, Weekly.' + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bucket: + description: Name of the source bucket that inventory lists the + objects for. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + destination: + description: Contains information about where to publish the inventory + results (documented below). + properties: + bucket: + description: Name of the source bucket that inventory lists + the objects for. + properties: + accountId: + description: ID of the account that owns the destination + bucket. Recommended to be set to prevent problems if + the destination bucket ownership changes. + type: string + bucketArn: + description: Amazon S3 bucket ARN of the destination. + type: string + bucketArnRef: + description: Reference to a Bucket in s3 to populate bucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketArnSelector: + description: Selector for a Bucket in s3 to populate bucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + encryption: + description: Contains the type of server-side encryption + to use to encrypt the inventory (documented below). + properties: + sseKms: + description: Specifies to use server-side encryption + with AWS KMS-managed keys to encrypt the inventory + file (documented below). + properties: + keyId: + description: ARN of the KMS customer master key + (CMK) used to encrypt the inventory file. + type: string + type: object + sseS3: + description: Specifies to use server-side encryption + with Amazon S3-managed keys (SSE-S3) to encrypt + the inventory file. + type: object + type: object + format: + description: Specifies the output format of the inventory + results. Can be CSV, ORC or Parquet. + type: string + prefix: + description: Prefix that an object must have to be included + in the inventory results. + type: string + type: object + type: object + enabled: + description: Specifies whether the inventory is enabled or disabled. + type: boolean + filter: + description: Specifies an inventory filter. The inventory only + includes objects that meet the filter's criteria (documented + below). + properties: + prefix: + description: Prefix that an object must have to be included + in the inventory results. + type: string + type: object + includedObjectVersions: + description: 'Object versions to include in the inventory list. + Valid values: All, Current.' + type: string + name: + description: Unique identifier of the inventory configuration + for the bucket. + type: string + optionalFields: + description: List of optional fields that are included in the + inventory results. Please refer to the S3 documentation for + more details. + items: + type: string + type: array + x-kubernetes-list-type: set + schedule: + description: Specifies the schedule for generating inventory results + (documented below). + properties: + frequency: + description: 'Specifies how frequently inventory results are + produced. Valid values: Daily, Weekly.' + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.destination is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.destination) + || (has(self.initProvider) && has(self.initProvider.destination))' + - message: spec.forProvider.includedObjectVersions is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.includedObjectVersions) + || (has(self.initProvider) && has(self.initProvider.includedObjectVersions))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.schedule is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.schedule) + || (has(self.initProvider) && has(self.initProvider.schedule))' + status: + description: BucketInventoryStatus defines the observed state of BucketInventory. + properties: + atProvider: + properties: + bucket: + description: Name of the source bucket that inventory lists the + objects for. + type: string + destination: + description: Contains information about where to publish the inventory + results (documented below). + properties: + bucket: + description: Name of the source bucket that inventory lists + the objects for. + properties: + accountId: + description: ID of the account that owns the destination + bucket. Recommended to be set to prevent problems if + the destination bucket ownership changes. + type: string + bucketArn: + description: Amazon S3 bucket ARN of the destination. + type: string + encryption: + description: Contains the type of server-side encryption + to use to encrypt the inventory (documented below). + properties: + sseKms: + description: Specifies to use server-side encryption + with AWS KMS-managed keys to encrypt the inventory + file (documented below). + properties: + keyId: + description: ARN of the KMS customer master key + (CMK) used to encrypt the inventory file. + type: string + type: object + sseS3: + description: Specifies to use server-side encryption + with Amazon S3-managed keys (SSE-S3) to encrypt + the inventory file. + type: object + type: object + format: + description: Specifies the output format of the inventory + results. Can be CSV, ORC or Parquet. + type: string + prefix: + description: Prefix that an object must have to be included + in the inventory results. + type: string + type: object + type: object + enabled: + description: Specifies whether the inventory is enabled or disabled. + type: boolean + filter: + description: Specifies an inventory filter. The inventory only + includes objects that meet the filter's criteria (documented + below). + properties: + prefix: + description: Prefix that an object must have to be included + in the inventory results. + type: string + type: object + id: + type: string + includedObjectVersions: + description: 'Object versions to include in the inventory list. + Valid values: All, Current.' + type: string + name: + description: Unique identifier of the inventory configuration + for the bucket. + type: string + optionalFields: + description: List of optional fields that are included in the + inventory results. Please refer to the S3 documentation for + more details. + items: + type: string + type: array + x-kubernetes-list-type: set + schedule: + description: Specifies the schedule for generating inventory results + (documented below). + properties: + frequency: + description: 'Specifies how frequently inventory results are + produced. Valid values: Daily, Weekly.' + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3.aws.upbound.io_bucketlifecycleconfigurations.yaml b/package/crds/s3.aws.upbound.io_bucketlifecycleconfigurations.yaml index 93d04ebeae..2b4a47365d 100644 --- a/package/crds/s3.aws.upbound.io_bucketlifecycleconfigurations.yaml +++ b/package/crds/s3.aws.upbound.io_bucketlifecycleconfigurations.yaml @@ -1074,3 +1074,1035 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BucketLifecycleConfiguration is the Schema for the BucketLifecycleConfigurations + API. Provides a S3 bucket lifecycle configuration resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BucketLifecycleConfigurationSpec defines the desired state + of BucketLifecycleConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bucket: + description: Name of the source S3 bucket you want Amazon S3 to + monitor. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + expectedBucketOwner: + description: Account ID of the expected bucket owner. If the bucket + is owned by a different account, the request will fail with + an HTTP 403 (Access Denied) error. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + rule: + description: List of configuration blocks describing the rules + managing the replication. See below. + items: + properties: + abortIncompleteMultipartUpload: + description: Configuration block that specifies the days + since the initiation of an incomplete multipart upload + that Amazon S3 will wait before permanently removing all + parts of the upload. See below. + properties: + daysAfterInitiation: + description: Number of days after which Amazon S3 aborts + an incomplete multipart upload. + type: number + type: object + expiration: + description: Configuration block that specifies the expiration + for the lifecycle of the object in the form of date, days + and, whether the object has a delete marker. See below. + properties: + date: + description: Date objects are transitioned to the specified + storage class. The date value must be in RFC3339 full-date + format e.g. 2023-08-22. + type: string + days: + description: Number of days after creation when objects + are transitioned to the specified storage class. The + value must be a positive integer. If both days and + date are not specified, defaults to 0. Valid values + depend on storage_class, see Transition objects using + Amazon S3 Lifecycle for more details. + type: number + expiredObjectDeleteMarker: + description: Indicates whether Amazon S3 will remove + a delete marker with no noncurrent versions. If set + to true, the delete marker will be expired; if set + to false the policy takes no action. + type: boolean + type: object + filter: + description: Configuration block used to identify objects + that a Lifecycle Rule applies to. See below. If not specified, + the rule will default to using prefix. + properties: + and: + description: Configuration block used to apply a logical + AND to two or more predicates. See below. The Lifecycle + Rule will apply to any object matching all the predicates + configured inside the and block. + properties: + objectSizeGreaterThan: + description: Minimum object size (in bytes) to which + the rule applies. + type: number + objectSizeLessThan: + description: Maximum object size (in bytes) to which + the rule applies. + type: number + prefix: + description: DEPRECATED Use filter instead. This + has been deprecated by Amazon S3. Prefix identifying + one or more objects to which the rule applies. + Defaults to an empty string ("") if filter is + not specified. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. All + of these tags must exist in the object's tag set + in order for the rule to apply. + type: object + x-kubernetes-map-type: granular + type: object + objectSizeGreaterThan: + description: Minimum object size (in bytes) to which + the rule applies. + type: string + objectSizeLessThan: + description: Maximum object size (in bytes) to which + the rule applies. + type: string + prefix: + description: DEPRECATED Use filter instead. This has + been deprecated by Amazon S3. Prefix identifying one + or more objects to which the rule applies. Defaults + to an empty string ("") if filter is not specified. + type: string + tag: + description: Configuration block for specifying a tag + key and value. See below. + properties: + key: + description: Name of the object key. + type: string + value: + description: Value of the tag. + type: string + type: object + type: object + id: + description: Unique identifier for the rule. The value cannot + be longer than 255 characters. + type: string + noncurrentVersionExpiration: + description: Configuration block that specifies when noncurrent + object versions expire. See below. + properties: + newerNoncurrentVersions: + description: Number of noncurrent versions Amazon S3 + will retain. Must be a non-zero positive integer. + type: string + noncurrentDays: + description: Number of days an object is noncurrent + before Amazon S3 can perform the associated action. + type: number + type: object + noncurrentVersionTransition: + description: Set of configuration blocks that specify the + transition rule for the lifecycle rule that describes + when noncurrent objects transition to a specific storage + class. See below. + items: + properties: + newerNoncurrentVersions: + description: Number of noncurrent versions Amazon + S3 will retain. Must be a non-zero positive integer. + type: string + noncurrentDays: + description: Number of days an object is noncurrent + before Amazon S3 can perform the associated action. + type: number + storageClass: + description: 'Class of storage used to store the object. + Valid Values: GLACIER, STANDARD_IA, ONEZONE_IA, + INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR.' + type: string + type: object + type: array + prefix: + description: DEPRECATED Use filter instead. This has been + deprecated by Amazon S3. Prefix identifying one or more + objects to which the rule applies. Defaults to an empty + string ("") if filter is not specified. + type: string + status: + description: 'Whether the rule is currently being applied. + Valid values: Enabled or Disabled.' + type: string + transition: + description: Set of configuration blocks that specify when + an Amazon S3 object transitions to a specified storage + class. See below. + items: + properties: + date: + description: Date objects are transitioned to the + specified storage class. The date value must be + in RFC3339 full-date format e.g. 2023-08-22. + type: string + days: + description: Number of days after creation when objects + are transitioned to the specified storage class. + The value must be a positive integer. If both days + and date are not specified, defaults to 0. Valid + values depend on storage_class, see Transition objects + using Amazon S3 Lifecycle for more details. + type: number + storageClass: + description: 'Class of storage used to store the object. + Valid Values: GLACIER, STANDARD_IA, ONEZONE_IA, + INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR.' + type: string + type: object + type: array + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bucket: + description: Name of the source S3 bucket you want Amazon S3 to + monitor. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + expectedBucketOwner: + description: Account ID of the expected bucket owner. If the bucket + is owned by a different account, the request will fail with + an HTTP 403 (Access Denied) error. + type: string + rule: + description: List of configuration blocks describing the rules + managing the replication. See below. + items: + properties: + abortIncompleteMultipartUpload: + description: Configuration block that specifies the days + since the initiation of an incomplete multipart upload + that Amazon S3 will wait before permanently removing all + parts of the upload. See below. + properties: + daysAfterInitiation: + description: Number of days after which Amazon S3 aborts + an incomplete multipart upload. + type: number + type: object + expiration: + description: Configuration block that specifies the expiration + for the lifecycle of the object in the form of date, days + and, whether the object has a delete marker. See below. + properties: + date: + description: Date objects are transitioned to the specified + storage class. The date value must be in RFC3339 full-date + format e.g. 2023-08-22. + type: string + days: + description: Number of days after creation when objects + are transitioned to the specified storage class. The + value must be a positive integer. If both days and + date are not specified, defaults to 0. Valid values + depend on storage_class, see Transition objects using + Amazon S3 Lifecycle for more details. + type: number + expiredObjectDeleteMarker: + description: Indicates whether Amazon S3 will remove + a delete marker with no noncurrent versions. If set + to true, the delete marker will be expired; if set + to false the policy takes no action. + type: boolean + type: object + filter: + description: Configuration block used to identify objects + that a Lifecycle Rule applies to. See below. If not specified, + the rule will default to using prefix. + properties: + and: + description: Configuration block used to apply a logical + AND to two or more predicates. See below. The Lifecycle + Rule will apply to any object matching all the predicates + configured inside the and block. + properties: + objectSizeGreaterThan: + description: Minimum object size (in bytes) to which + the rule applies. + type: number + objectSizeLessThan: + description: Maximum object size (in bytes) to which + the rule applies. + type: number + prefix: + description: DEPRECATED Use filter instead. This + has been deprecated by Amazon S3. Prefix identifying + one or more objects to which the rule applies. + Defaults to an empty string ("") if filter is + not specified. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. All + of these tags must exist in the object's tag set + in order for the rule to apply. + type: object + x-kubernetes-map-type: granular + type: object + objectSizeGreaterThan: + description: Minimum object size (in bytes) to which + the rule applies. + type: string + objectSizeLessThan: + description: Maximum object size (in bytes) to which + the rule applies. + type: string + prefix: + description: DEPRECATED Use filter instead. This has + been deprecated by Amazon S3. Prefix identifying one + or more objects to which the rule applies. Defaults + to an empty string ("") if filter is not specified. + type: string + tag: + description: Configuration block for specifying a tag + key and value. See below. + properties: + key: + description: Name of the object key. + type: string + value: + description: Value of the tag. + type: string + type: object + type: object + id: + description: Unique identifier for the rule. The value cannot + be longer than 255 characters. + type: string + noncurrentVersionExpiration: + description: Configuration block that specifies when noncurrent + object versions expire. See below. + properties: + newerNoncurrentVersions: + description: Number of noncurrent versions Amazon S3 + will retain. Must be a non-zero positive integer. + type: string + noncurrentDays: + description: Number of days an object is noncurrent + before Amazon S3 can perform the associated action. + type: number + type: object + noncurrentVersionTransition: + description: Set of configuration blocks that specify the + transition rule for the lifecycle rule that describes + when noncurrent objects transition to a specific storage + class. See below. + items: + properties: + newerNoncurrentVersions: + description: Number of noncurrent versions Amazon + S3 will retain. Must be a non-zero positive integer. + type: string + noncurrentDays: + description: Number of days an object is noncurrent + before Amazon S3 can perform the associated action. + type: number + storageClass: + description: 'Class of storage used to store the object. + Valid Values: GLACIER, STANDARD_IA, ONEZONE_IA, + INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR.' + type: string + type: object + type: array + prefix: + description: DEPRECATED Use filter instead. This has been + deprecated by Amazon S3. Prefix identifying one or more + objects to which the rule applies. Defaults to an empty + string ("") if filter is not specified. + type: string + status: + description: 'Whether the rule is currently being applied. + Valid values: Enabled or Disabled.' + type: string + transition: + description: Set of configuration blocks that specify when + an Amazon S3 object transitions to a specified storage + class. See below. + items: + properties: + date: + description: Date objects are transitioned to the + specified storage class. The date value must be + in RFC3339 full-date format e.g. 2023-08-22. + type: string + days: + description: Number of days after creation when objects + are transitioned to the specified storage class. + The value must be a positive integer. If both days + and date are not specified, defaults to 0. Valid + values depend on storage_class, see Transition objects + using Amazon S3 Lifecycle for more details. + type: number + storageClass: + description: 'Class of storage used to store the object. + Valid Values: GLACIER, STANDARD_IA, ONEZONE_IA, + INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR.' + type: string + type: object + type: array + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.rule is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.rule) + || (has(self.initProvider) && has(self.initProvider.rule))' + status: + description: BucketLifecycleConfigurationStatus defines the observed state + of BucketLifecycleConfiguration. + properties: + atProvider: + properties: + bucket: + description: Name of the source S3 bucket you want Amazon S3 to + monitor. + type: string + expectedBucketOwner: + description: Account ID of the expected bucket owner. If the bucket + is owned by a different account, the request will fail with + an HTTP 403 (Access Denied) error. + type: string + id: + description: and status) + type: string + rule: + description: List of configuration blocks describing the rules + managing the replication. See below. + items: + properties: + abortIncompleteMultipartUpload: + description: Configuration block that specifies the days + since the initiation of an incomplete multipart upload + that Amazon S3 will wait before permanently removing all + parts of the upload. See below. + properties: + daysAfterInitiation: + description: Number of days after which Amazon S3 aborts + an incomplete multipart upload. + type: number + type: object + expiration: + description: Configuration block that specifies the expiration + for the lifecycle of the object in the form of date, days + and, whether the object has a delete marker. See below. + properties: + date: + description: Date objects are transitioned to the specified + storage class. The date value must be in RFC3339 full-date + format e.g. 2023-08-22. + type: string + days: + description: Number of days after creation when objects + are transitioned to the specified storage class. The + value must be a positive integer. If both days and + date are not specified, defaults to 0. Valid values + depend on storage_class, see Transition objects using + Amazon S3 Lifecycle for more details. + type: number + expiredObjectDeleteMarker: + description: Indicates whether Amazon S3 will remove + a delete marker with no noncurrent versions. If set + to true, the delete marker will be expired; if set + to false the policy takes no action. + type: boolean + type: object + filter: + description: Configuration block used to identify objects + that a Lifecycle Rule applies to. See below. If not specified, + the rule will default to using prefix. + properties: + and: + description: Configuration block used to apply a logical + AND to two or more predicates. See below. The Lifecycle + Rule will apply to any object matching all the predicates + configured inside the and block. + properties: + objectSizeGreaterThan: + description: Minimum object size (in bytes) to which + the rule applies. + type: number + objectSizeLessThan: + description: Maximum object size (in bytes) to which + the rule applies. + type: number + prefix: + description: DEPRECATED Use filter instead. This + has been deprecated by Amazon S3. Prefix identifying + one or more objects to which the rule applies. + Defaults to an empty string ("") if filter is + not specified. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. All + of these tags must exist in the object's tag set + in order for the rule to apply. + type: object + x-kubernetes-map-type: granular + type: object + objectSizeGreaterThan: + description: Minimum object size (in bytes) to which + the rule applies. + type: string + objectSizeLessThan: + description: Maximum object size (in bytes) to which + the rule applies. + type: string + prefix: + description: DEPRECATED Use filter instead. This has + been deprecated by Amazon S3. Prefix identifying one + or more objects to which the rule applies. Defaults + to an empty string ("") if filter is not specified. + type: string + tag: + description: Configuration block for specifying a tag + key and value. See below. + properties: + key: + description: Name of the object key. + type: string + value: + description: Value of the tag. + type: string + type: object + type: object + id: + description: Unique identifier for the rule. The value cannot + be longer than 255 characters. + type: string + noncurrentVersionExpiration: + description: Configuration block that specifies when noncurrent + object versions expire. See below. + properties: + newerNoncurrentVersions: + description: Number of noncurrent versions Amazon S3 + will retain. Must be a non-zero positive integer. + type: string + noncurrentDays: + description: Number of days an object is noncurrent + before Amazon S3 can perform the associated action. + type: number + type: object + noncurrentVersionTransition: + description: Set of configuration blocks that specify the + transition rule for the lifecycle rule that describes + when noncurrent objects transition to a specific storage + class. See below. + items: + properties: + newerNoncurrentVersions: + description: Number of noncurrent versions Amazon + S3 will retain. Must be a non-zero positive integer. + type: string + noncurrentDays: + description: Number of days an object is noncurrent + before Amazon S3 can perform the associated action. + type: number + storageClass: + description: 'Class of storage used to store the object. + Valid Values: GLACIER, STANDARD_IA, ONEZONE_IA, + INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR.' + type: string + type: object + type: array + prefix: + description: DEPRECATED Use filter instead. This has been + deprecated by Amazon S3. Prefix identifying one or more + objects to which the rule applies. Defaults to an empty + string ("") if filter is not specified. + type: string + status: + description: 'Whether the rule is currently being applied. + Valid values: Enabled or Disabled.' + type: string + transition: + description: Set of configuration blocks that specify when + an Amazon S3 object transitions to a specified storage + class. See below. + items: + properties: + date: + description: Date objects are transitioned to the + specified storage class. The date value must be + in RFC3339 full-date format e.g. 2023-08-22. + type: string + days: + description: Number of days after creation when objects + are transitioned to the specified storage class. + The value must be a positive integer. If both days + and date are not specified, defaults to 0. Valid + values depend on storage_class, see Transition objects + using Amazon S3 Lifecycle for more details. + type: number + storageClass: + description: 'Class of storage used to store the object. + Valid Values: GLACIER, STANDARD_IA, ONEZONE_IA, + INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR.' + type: string + type: object + type: array + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3.aws.upbound.io_bucketloggings.yaml b/package/crds/s3.aws.upbound.io_bucketloggings.yaml index 79602e04e6..b077cf01dc 100644 --- a/package/crds/s3.aws.upbound.io_bucketloggings.yaml +++ b/package/crds/s3.aws.upbound.io_bucketloggings.yaml @@ -842,3 +842,800 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BucketLogging is the Schema for the BucketLoggings API. Provides + an S3 bucket (server access) logging resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BucketLoggingSpec defines the desired state of BucketLogging + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bucket: + description: Name of the bucket. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + targetBucket: + description: Name of the bucket where you want Amazon S3 to store + server access logs. + type: string + targetBucketRef: + description: Reference to a Bucket in s3 to populate targetBucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetBucketSelector: + description: Selector for a Bucket in s3 to populate targetBucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetGrant: + description: Set of configuration blocks with information for + granting permissions. See below. + items: + properties: + grantee: + description: Configuration block for the person being granted + permissions. See below. + properties: + emailAddress: + description: Email address of the grantee. See Regions + and Endpoints for supported AWS regions where this + argument can be specified. + type: string + id: + description: Canonical user ID of the grantee. + type: string + type: + description: 'Type of grantee. Valid values: CanonicalUser, + AmazonCustomerByEmail, Group.' + type: string + uri: + description: URI of the grantee group. + type: string + type: object + permission: + description: 'Logging permissions assigned to the grantee + for the bucket. Valid values: FULL_CONTROL, READ, WRITE.' + type: string + type: object + type: array + targetObjectKeyFormat: + description: Amazon S3 key format for log objects. See below. + properties: + partitionedPrefix: + description: Partitioned S3 key for log objects. See below. + properties: + partitionDateSource: + description: 'Specifies the partition date source for + the partitioned prefix. Valid values: EventTime, DeliveryTime.' + type: string + type: object + simplePrefix: + description: Use the simple format for S3 keys for log objects. + To use, set simple_prefix {}. + type: object + type: object + targetPrefix: + description: Prefix for all log object keys. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bucket: + description: Name of the bucket. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + targetBucket: + description: Name of the bucket where you want Amazon S3 to store + server access logs. + type: string + targetBucketRef: + description: Reference to a Bucket in s3 to populate targetBucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetBucketSelector: + description: Selector for a Bucket in s3 to populate targetBucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetGrant: + description: Set of configuration blocks with information for + granting permissions. See below. + items: + properties: + grantee: + description: Configuration block for the person being granted + permissions. See below. + properties: + emailAddress: + description: Email address of the grantee. See Regions + and Endpoints for supported AWS regions where this + argument can be specified. + type: string + id: + description: Canonical user ID of the grantee. + type: string + type: + description: 'Type of grantee. Valid values: CanonicalUser, + AmazonCustomerByEmail, Group.' + type: string + uri: + description: URI of the grantee group. + type: string + type: object + permission: + description: 'Logging permissions assigned to the grantee + for the bucket. Valid values: FULL_CONTROL, READ, WRITE.' + type: string + type: object + type: array + targetObjectKeyFormat: + description: Amazon S3 key format for log objects. See below. + properties: + partitionedPrefix: + description: Partitioned S3 key for log objects. See below. + properties: + partitionDateSource: + description: 'Specifies the partition date source for + the partitioned prefix. Valid values: EventTime, DeliveryTime.' + type: string + type: object + simplePrefix: + description: Use the simple format for S3 keys for log objects. + To use, set simple_prefix {}. + type: object + type: object + targetPrefix: + description: Prefix for all log object keys. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.targetPrefix is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.targetPrefix) + || (has(self.initProvider) && has(self.initProvider.targetPrefix))' + status: + description: BucketLoggingStatus defines the observed state of BucketLogging. + properties: + atProvider: + properties: + bucket: + description: Name of the bucket. + type: string + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + id: + description: The bucket or bucket and expected_bucket_owner separated + by a comma (,) if the latter is provided. + type: string + targetBucket: + description: Name of the bucket where you want Amazon S3 to store + server access logs. + type: string + targetGrant: + description: Set of configuration blocks with information for + granting permissions. See below. + items: + properties: + grantee: + description: Configuration block for the person being granted + permissions. See below. + properties: + displayName: + type: string + emailAddress: + description: Email address of the grantee. See Regions + and Endpoints for supported AWS regions where this + argument can be specified. + type: string + id: + description: Canonical user ID of the grantee. + type: string + type: + description: 'Type of grantee. Valid values: CanonicalUser, + AmazonCustomerByEmail, Group.' + type: string + uri: + description: URI of the grantee group. + type: string + type: object + permission: + description: 'Logging permissions assigned to the grantee + for the bucket. Valid values: FULL_CONTROL, READ, WRITE.' + type: string + type: object + type: array + targetObjectKeyFormat: + description: Amazon S3 key format for log objects. See below. + properties: + partitionedPrefix: + description: Partitioned S3 key for log objects. See below. + properties: + partitionDateSource: + description: 'Specifies the partition date source for + the partitioned prefix. Valid values: EventTime, DeliveryTime.' + type: string + type: object + simplePrefix: + description: Use the simple format for S3 keys for log objects. + To use, set simple_prefix {}. + type: object + type: object + targetPrefix: + description: Prefix for all log object keys. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3.aws.upbound.io_bucketmetrics.yaml b/package/crds/s3.aws.upbound.io_bucketmetrics.yaml index d18f8ca600..29adb7842d 100644 --- a/package/crds/s3.aws.upbound.io_bucketmetrics.yaml +++ b/package/crds/s3.aws.upbound.io_bucketmetrics.yaml @@ -713,3 +713,692 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BucketMetric is the Schema for the BucketMetrics API. Provides + a S3 bucket metrics configuration resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BucketMetricSpec defines the desired state of BucketMetric + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bucket: + description: Name of the bucket to put metric configuration. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + filter: + description: Object filtering that accepts a prefix, tags, or + a logical AND of prefix and tags (documented below). + properties: + accessPoint: + description: S3 Access Point ARN for filtering (singular). + type: string + accessPointRef: + description: Reference to a AccessPoint in s3control to populate + accessPoint. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accessPointSelector: + description: Selector for a AccessPoint in s3control to populate + accessPoint. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + prefix: + description: Object prefix for filtering (singular). + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + name: + description: Unique identifier of the metrics configuration for + the bucket. Must be less than or equal to 64 characters in length. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bucket: + description: Name of the bucket to put metric configuration. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + filter: + description: Object filtering that accepts a prefix, tags, or + a logical AND of prefix and tags (documented below). + properties: + accessPoint: + description: S3 Access Point ARN for filtering (singular). + type: string + accessPointRef: + description: Reference to a AccessPoint in s3control to populate + accessPoint. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accessPointSelector: + description: Selector for a AccessPoint in s3control to populate + accessPoint. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + prefix: + description: Object prefix for filtering (singular). + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + name: + description: Unique identifier of the metrics configuration for + the bucket. Must be less than or equal to 64 characters in length. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: BucketMetricStatus defines the observed state of BucketMetric. + properties: + atProvider: + properties: + bucket: + description: Name of the bucket to put metric configuration. + type: string + filter: + description: Object filtering that accepts a prefix, tags, or + a logical AND of prefix and tags (documented below). + properties: + accessPoint: + description: S3 Access Point ARN for filtering (singular). + type: string + prefix: + description: Object prefix for filtering (singular). + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + id: + type: string + name: + description: Unique identifier of the metrics configuration for + the bucket. Must be less than or equal to 64 characters in length. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3.aws.upbound.io_bucketobjectlockconfigurations.yaml b/package/crds/s3.aws.upbound.io_bucketobjectlockconfigurations.yaml index 0d127e1300..a8c85ad179 100644 --- a/package/crds/s3.aws.upbound.io_bucketobjectlockconfigurations.yaml +++ b/package/crds/s3.aws.upbound.io_bucketobjectlockconfigurations.yaml @@ -634,3 +634,607 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BucketObjectLockConfiguration is the Schema for the BucketObjectLockConfigurations + API. Provides an S3 bucket Object Lock configuration resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BucketObjectLockConfigurationSpec defines the desired state + of BucketObjectLockConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bucket: + description: Name of the bucket. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + objectLockEnabled: + description: 'Indicates whether this bucket has an Object Lock + configuration enabled. Defaults to Enabled. Valid values: Enabled.' + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + rule: + description: Configuration block for specifying the Object Lock + rule for the specified object. See below. + properties: + defaultRetention: + description: Configuration block for specifying the default + Object Lock retention settings for new objects placed in + the specified bucket. See below. + properties: + days: + description: Number of days that you want to specify for + the default retention period. + type: number + mode: + description: 'Default Object Lock retention mode you want + to apply to new objects placed in the specified bucket. + Valid values: COMPLIANCE, GOVERNANCE.' + type: string + years: + description: Number of years that you want to specify + for the default retention period. + type: number + type: object + type: object + tokenSecretRef: + description: |- + Token to allow Object Lock to be enabled for an existing bucket. You must contact AWS support for the bucket's "Object Lock token". + The token is generated in the back-end when versioning is enabled on a bucket. For more details on versioning, see the aws_s3_bucket_versioning resource. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bucket: + description: Name of the bucket. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + objectLockEnabled: + description: 'Indicates whether this bucket has an Object Lock + configuration enabled. Defaults to Enabled. Valid values: Enabled.' + type: string + rule: + description: Configuration block for specifying the Object Lock + rule for the specified object. See below. + properties: + defaultRetention: + description: Configuration block for specifying the default + Object Lock retention settings for new objects placed in + the specified bucket. See below. + properties: + days: + description: Number of days that you want to specify for + the default retention period. + type: number + mode: + description: 'Default Object Lock retention mode you want + to apply to new objects placed in the specified bucket. + Valid values: COMPLIANCE, GOVERNANCE.' + type: string + years: + description: Number of years that you want to specify + for the default retention period. + type: number + type: object + type: object + tokenSecretRef: + description: |- + Token to allow Object Lock to be enabled for an existing bucket. You must contact AWS support for the bucket's "Object Lock token". + The token is generated in the back-end when versioning is enabled on a bucket. For more details on versioning, see the aws_s3_bucket_versioning resource. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: BucketObjectLockConfigurationStatus defines the observed + state of BucketObjectLockConfiguration. + properties: + atProvider: + properties: + bucket: + description: Name of the bucket. + type: string + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + id: + description: The bucket or bucket and expected_bucket_owner separated + by a comma (,) if the latter is provided. + type: string + objectLockEnabled: + description: 'Indicates whether this bucket has an Object Lock + configuration enabled. Defaults to Enabled. Valid values: Enabled.' + type: string + rule: + description: Configuration block for specifying the Object Lock + rule for the specified object. See below. + properties: + defaultRetention: + description: Configuration block for specifying the default + Object Lock retention settings for new objects placed in + the specified bucket. See below. + properties: + days: + description: Number of days that you want to specify for + the default retention period. + type: number + mode: + description: 'Default Object Lock retention mode you want + to apply to new objects placed in the specified bucket. + Valid values: COMPLIANCE, GOVERNANCE.' + type: string + years: + description: Number of years that you want to specify + for the default retention period. + type: number + type: object + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3.aws.upbound.io_bucketownershipcontrols.yaml b/package/crds/s3.aws.upbound.io_bucketownershipcontrols.yaml index 5d43f9e91b..8736747601 100644 --- a/package/crds/s3.aws.upbound.io_bucketownershipcontrols.yaml +++ b/package/crds/s3.aws.upbound.io_bucketownershipcontrols.yaml @@ -531,3 +531,510 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BucketOwnershipControls is the Schema for the BucketOwnershipControlss + API. Manages S3 Bucket Ownership Controls. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BucketOwnershipControlsSpec defines the desired state of + BucketOwnershipControls + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bucket: + description: Name of the bucket that you want to associate this + access point with. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + rule: + description: Configuration block(s) with Ownership Controls rules. + Detailed below. + properties: + objectOwnership: + description: 'Object ownership. Valid values: BucketOwnerPreferred, + ObjectWriter or BucketOwnerEnforced' + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bucket: + description: Name of the bucket that you want to associate this + access point with. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rule: + description: Configuration block(s) with Ownership Controls rules. + Detailed below. + properties: + objectOwnership: + description: 'Object ownership. Valid values: BucketOwnerPreferred, + ObjectWriter or BucketOwnerEnforced' + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.rule is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.rule) + || (has(self.initProvider) && has(self.initProvider.rule))' + status: + description: BucketOwnershipControlsStatus defines the observed state + of BucketOwnershipControls. + properties: + atProvider: + properties: + bucket: + description: Name of the bucket that you want to associate this + access point with. + type: string + id: + description: S3 Bucket name. + type: string + rule: + description: Configuration block(s) with Ownership Controls rules. + Detailed below. + properties: + objectOwnership: + description: 'Object ownership. Valid values: BucketOwnerPreferred, + ObjectWriter or BucketOwnerEnforced' + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3.aws.upbound.io_bucketreplicationconfigurations.yaml b/package/crds/s3.aws.upbound.io_bucketreplicationconfigurations.yaml index 404c3abb06..1c8bfdc070 100644 --- a/package/crds/s3.aws.upbound.io_bucketreplicationconfigurations.yaml +++ b/package/crds/s3.aws.upbound.io_bucketreplicationconfigurations.yaml @@ -1741,3 +1741,1621 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BucketReplicationConfiguration is the Schema for the BucketReplicationConfigurations + API. Provides a S3 bucket replication configuration resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BucketReplicationConfigurationSpec defines the desired state + of BucketReplicationConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bucket: + description: Name of the source S3 bucket you want Amazon S3 to + monitor. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + role: + description: ARN of the IAM role for Amazon S3 to assume when + replicating the objects. + type: string + roleRef: + description: Reference to a Role in iam to populate role. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleSelector: + description: Selector for a Role in iam to populate role. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rule: + description: List of configuration blocks describing the rules + managing the replication. See below. + items: + properties: + deleteMarkerReplication: + description: Whether delete markers are replicated. This + argument is only valid with V2 replication configurations + (i.e., when filter is used)documented below. + properties: + status: + description: Whether delete markers should be replicated. + Either "Enabled" or "Disabled". + type: string + type: object + destination: + description: Specifies the destination for the rule. See + below. + properties: + accessControlTranslation: + description: Configuration block that specifies the + overrides to use for object owners on replication. + See below. Specify this only in a cross-account scenario + (where source and destination bucket owners are not + the same), and you want to change replica ownership + to the AWS account that owns the destination bucket. + If this is not specified in the replication configuration, + the replicas are owned by same AWS account that owns + the source object. Must be used in conjunction with + account owner override configuration. + properties: + owner: + description: 'Specifies the replica ownership. For + default and valid values, see PUT bucket replication + in the Amazon S3 API Reference. Valid values: + Destination.' + type: string + type: object + account: + description: Account ID to specify the replica ownership. + Must be used in conjunction with access_control_translation + override configuration. + type: string + bucket: + description: ARN of the bucket where you want Amazon + S3 to store the results. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate + bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate + bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + encryptionConfiguration: + description: Configuration block that provides information + about encryption. See below. If source_selection_criteria + is specified, you must specify this element. + properties: + replicaKmsKeyId: + description: ID (Key ARN or Alias ARN) of the customer + managed AWS KMS key stored in AWS Key Management + Service (KMS) for the destination bucket. + type: string + replicaKmsKeyIdRef: + description: Reference to a Key in kms to populate + replicaKmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + replicaKmsKeyIdSelector: + description: Selector for a Key in kms to populate + replicaKmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + metrics: + description: Configuration block that specifies replication + metrics-related settings enabling replication metrics + and events. See below. + properties: + eventThreshold: + description: Configuration block that specifies + the time threshold for emitting the s3:Replication:OperationMissedThreshold + event. See below. + properties: + minutes: + description: 'Time in minutes. Valid values: + 15.' + type: number + type: object + status: + description: Whether the existing objects should + be replicated. Either "Enabled" or "Disabled". + type: string + type: object + replicationTime: + description: Configuration block that specifies S3 Replication + Time Control (S3 RTC), including whether S3 RTC is + enabled and the time when all objects and operations + on objects must be replicated. See below. Replication + Time Control must be used in conjunction with metrics. + properties: + status: + description: Whether the existing objects should + be replicated. Either "Enabled" or "Disabled". + type: string + time: + description: Configuration block specifying the + time by which replication should be complete for + all objects and operations on objects. See below. + properties: + minutes: + description: 'Time in minutes. Valid values: + 15.' + type: number + type: object + type: object + storageClass: + description: The storage class used to store the object. + By default, Amazon S3 uses the storage class of the + source object to create the object replica. + type: string + type: object + existingObjectReplication: + description: Replicate existing objects in the source bucket + according to the rule configurations. See below. + properties: + status: + description: Whether the existing objects should be + replicated. Either "Enabled" or "Disabled". + type: string + type: object + filter: + description: Filter that identifies subset of objects to + which the replication rule applies. See below. If not + specified, the rule will default to using prefix. + properties: + and: + description: Configuration block for specifying rule + filters. This element is required only if you specify + more than one filter. See and below for more details. + properties: + prefix: + description: Object key name prefix identifying + one or more objects to which the rule applies. + Must be less than or equal to 1024 characters + in length. Defaults to an empty string ("") if + filter is not specified. + type: string + tags: + additionalProperties: + type: string + description: Map of tags (key and value pairs) that + identifies a subset of objects to which the rule + applies. The rule applies only to objects having + all the tags in its tagset. + type: object + x-kubernetes-map-type: granular + type: object + prefix: + description: Object key name prefix identifying one + or more objects to which the rule applies. Must be + less than or equal to 1024 characters in length. Defaults + to an empty string ("") if filter is not specified. + type: string + tag: + description: Configuration block for specifying a tag + key and value. See below. + properties: + key: + description: Name of the object key. + type: string + value: + description: Value of the tag. + type: string + type: object + type: object + id: + description: Unique identifier for the rule. Must be less + than or equal to 255 characters in length. + type: string + prefix: + description: Object key name prefix identifying one or more + objects to which the rule applies. Must be less than or + equal to 1024 characters in length. Defaults to an empty + string ("") if filter is not specified. + type: string + priority: + description: Priority associated with the rule. Priority + should only be set if filter is configured. If not provided, + defaults to 0. Priority must be unique between multiple + rules. + type: number + sourceSelectionCriteria: + description: Specifies special object selection criteria. + See below. + properties: + replicaModifications: + description: Configuration block that you can specify + for selections for modifications on replicas. Amazon + S3 doesn't replicate replica modifications by default. + In the latest version of replication configuration + (when filter is specified), you can specify this element + and set the status to Enabled to replicate modifications + on replicas. + properties: + status: + description: Whether the existing objects should + be replicated. Either "Enabled" or "Disabled". + type: string + type: object + sseKmsEncryptedObjects: + description: Configuration block for filter information + for the selection of Amazon S3 objects encrypted with + AWS KMS. If specified, replica_kms_key_id in destination + encryption_configuration must be specified as well. + properties: + status: + description: Whether the existing objects should + be replicated. Either "Enabled" or "Disabled". + type: string + type: object + type: object + status: + description: Status of the rule. Either "Enabled" or "Disabled". + The rule is ignored if status is not "Enabled". + type: string + type: object + type: array + tokenSecretRef: + description: |- + Token to allow replication to be enabled on an Object Lock-enabled bucket. You must contact AWS support for the bucket's "Object Lock token". + For more details, see Using S3 Object Lock with replication. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bucket: + description: Name of the source S3 bucket you want Amazon S3 to + monitor. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + role: + description: ARN of the IAM role for Amazon S3 to assume when + replicating the objects. + type: string + roleRef: + description: Reference to a Role in iam to populate role. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleSelector: + description: Selector for a Role in iam to populate role. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rule: + description: List of configuration blocks describing the rules + managing the replication. See below. + items: + properties: + deleteMarkerReplication: + description: Whether delete markers are replicated. This + argument is only valid with V2 replication configurations + (i.e., when filter is used)documented below. + properties: + status: + description: Whether delete markers should be replicated. + Either "Enabled" or "Disabled". + type: string + type: object + destination: + description: Specifies the destination for the rule. See + below. + properties: + accessControlTranslation: + description: Configuration block that specifies the + overrides to use for object owners on replication. + See below. Specify this only in a cross-account scenario + (where source and destination bucket owners are not + the same), and you want to change replica ownership + to the AWS account that owns the destination bucket. + If this is not specified in the replication configuration, + the replicas are owned by same AWS account that owns + the source object. Must be used in conjunction with + account owner override configuration. + properties: + owner: + description: 'Specifies the replica ownership. For + default and valid values, see PUT bucket replication + in the Amazon S3 API Reference. Valid values: + Destination.' + type: string + type: object + account: + description: Account ID to specify the replica ownership. + Must be used in conjunction with access_control_translation + override configuration. + type: string + bucket: + description: ARN of the bucket where you want Amazon + S3 to store the results. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate + bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate + bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + encryptionConfiguration: + description: Configuration block that provides information + about encryption. See below. If source_selection_criteria + is specified, you must specify this element. + properties: + replicaKmsKeyId: + description: ID (Key ARN or Alias ARN) of the customer + managed AWS KMS key stored in AWS Key Management + Service (KMS) for the destination bucket. + type: string + replicaKmsKeyIdRef: + description: Reference to a Key in kms to populate + replicaKmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + replicaKmsKeyIdSelector: + description: Selector for a Key in kms to populate + replicaKmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + metrics: + description: Configuration block that specifies replication + metrics-related settings enabling replication metrics + and events. See below. + properties: + eventThreshold: + description: Configuration block that specifies + the time threshold for emitting the s3:Replication:OperationMissedThreshold + event. See below. + properties: + minutes: + description: 'Time in minutes. Valid values: + 15.' + type: number + type: object + status: + description: Whether the existing objects should + be replicated. Either "Enabled" or "Disabled". + type: string + type: object + replicationTime: + description: Configuration block that specifies S3 Replication + Time Control (S3 RTC), including whether S3 RTC is + enabled and the time when all objects and operations + on objects must be replicated. See below. Replication + Time Control must be used in conjunction with metrics. + properties: + status: + description: Whether the existing objects should + be replicated. Either "Enabled" or "Disabled". + type: string + time: + description: Configuration block specifying the + time by which replication should be complete for + all objects and operations on objects. See below. + properties: + minutes: + description: 'Time in minutes. Valid values: + 15.' + type: number + type: object + type: object + storageClass: + description: The storage class used to store the object. + By default, Amazon S3 uses the storage class of the + source object to create the object replica. + type: string + type: object + existingObjectReplication: + description: Replicate existing objects in the source bucket + according to the rule configurations. See below. + properties: + status: + description: Whether the existing objects should be + replicated. Either "Enabled" or "Disabled". + type: string + type: object + filter: + description: Filter that identifies subset of objects to + which the replication rule applies. See below. If not + specified, the rule will default to using prefix. + properties: + and: + description: Configuration block for specifying rule + filters. This element is required only if you specify + more than one filter. See and below for more details. + properties: + prefix: + description: Object key name prefix identifying + one or more objects to which the rule applies. + Must be less than or equal to 1024 characters + in length. Defaults to an empty string ("") if + filter is not specified. + type: string + tags: + additionalProperties: + type: string + description: Map of tags (key and value pairs) that + identifies a subset of objects to which the rule + applies. The rule applies only to objects having + all the tags in its tagset. + type: object + x-kubernetes-map-type: granular + type: object + prefix: + description: Object key name prefix identifying one + or more objects to which the rule applies. Must be + less than or equal to 1024 characters in length. Defaults + to an empty string ("") if filter is not specified. + type: string + tag: + description: Configuration block for specifying a tag + key and value. See below. + properties: + key: + description: Name of the object key. + type: string + value: + description: Value of the tag. + type: string + type: object + type: object + id: + description: Unique identifier for the rule. Must be less + than or equal to 255 characters in length. + type: string + prefix: + description: Object key name prefix identifying one or more + objects to which the rule applies. Must be less than or + equal to 1024 characters in length. Defaults to an empty + string ("") if filter is not specified. + type: string + priority: + description: Priority associated with the rule. Priority + should only be set if filter is configured. If not provided, + defaults to 0. Priority must be unique between multiple + rules. + type: number + sourceSelectionCriteria: + description: Specifies special object selection criteria. + See below. + properties: + replicaModifications: + description: Configuration block that you can specify + for selections for modifications on replicas. Amazon + S3 doesn't replicate replica modifications by default. + In the latest version of replication configuration + (when filter is specified), you can specify this element + and set the status to Enabled to replicate modifications + on replicas. + properties: + status: + description: Whether the existing objects should + be replicated. Either "Enabled" or "Disabled". + type: string + type: object + sseKmsEncryptedObjects: + description: Configuration block for filter information + for the selection of Amazon S3 objects encrypted with + AWS KMS. If specified, replica_kms_key_id in destination + encryption_configuration must be specified as well. + properties: + status: + description: Whether the existing objects should + be replicated. Either "Enabled" or "Disabled". + type: string + type: object + type: object + status: + description: Status of the rule. Either "Enabled" or "Disabled". + The rule is ignored if status is not "Enabled". + type: string + type: object + type: array + tokenSecretRef: + description: |- + Token to allow replication to be enabled on an Object Lock-enabled bucket. You must contact AWS support for the bucket's "Object Lock token". + For more details, see Using S3 Object Lock with replication. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.rule is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.rule) + || (has(self.initProvider) && has(self.initProvider.rule))' + status: + description: BucketReplicationConfigurationStatus defines the observed + state of BucketReplicationConfiguration. + properties: + atProvider: + properties: + bucket: + description: Name of the source S3 bucket you want Amazon S3 to + monitor. + type: string + id: + description: S3 source bucket name. + type: string + role: + description: ARN of the IAM role for Amazon S3 to assume when + replicating the objects. + type: string + rule: + description: List of configuration blocks describing the rules + managing the replication. See below. + items: + properties: + deleteMarkerReplication: + description: Whether delete markers are replicated. This + argument is only valid with V2 replication configurations + (i.e., when filter is used)documented below. + properties: + status: + description: Whether delete markers should be replicated. + Either "Enabled" or "Disabled". + type: string + type: object + destination: + description: Specifies the destination for the rule. See + below. + properties: + accessControlTranslation: + description: Configuration block that specifies the + overrides to use for object owners on replication. + See below. Specify this only in a cross-account scenario + (where source and destination bucket owners are not + the same), and you want to change replica ownership + to the AWS account that owns the destination bucket. + If this is not specified in the replication configuration, + the replicas are owned by same AWS account that owns + the source object. Must be used in conjunction with + account owner override configuration. + properties: + owner: + description: 'Specifies the replica ownership. For + default and valid values, see PUT bucket replication + in the Amazon S3 API Reference. Valid values: + Destination.' + type: string + type: object + account: + description: Account ID to specify the replica ownership. + Must be used in conjunction with access_control_translation + override configuration. + type: string + bucket: + description: ARN of the bucket where you want Amazon + S3 to store the results. + type: string + encryptionConfiguration: + description: Configuration block that provides information + about encryption. See below. If source_selection_criteria + is specified, you must specify this element. + properties: + replicaKmsKeyId: + description: ID (Key ARN or Alias ARN) of the customer + managed AWS KMS key stored in AWS Key Management + Service (KMS) for the destination bucket. + type: string + type: object + metrics: + description: Configuration block that specifies replication + metrics-related settings enabling replication metrics + and events. See below. + properties: + eventThreshold: + description: Configuration block that specifies + the time threshold for emitting the s3:Replication:OperationMissedThreshold + event. See below. + properties: + minutes: + description: 'Time in minutes. Valid values: + 15.' + type: number + type: object + status: + description: Whether the existing objects should + be replicated. Either "Enabled" or "Disabled". + type: string + type: object + replicationTime: + description: Configuration block that specifies S3 Replication + Time Control (S3 RTC), including whether S3 RTC is + enabled and the time when all objects and operations + on objects must be replicated. See below. Replication + Time Control must be used in conjunction with metrics. + properties: + status: + description: Whether the existing objects should + be replicated. Either "Enabled" or "Disabled". + type: string + time: + description: Configuration block specifying the + time by which replication should be complete for + all objects and operations on objects. See below. + properties: + minutes: + description: 'Time in minutes. Valid values: + 15.' + type: number + type: object + type: object + storageClass: + description: The storage class used to store the object. + By default, Amazon S3 uses the storage class of the + source object to create the object replica. + type: string + type: object + existingObjectReplication: + description: Replicate existing objects in the source bucket + according to the rule configurations. See below. + properties: + status: + description: Whether the existing objects should be + replicated. Either "Enabled" or "Disabled". + type: string + type: object + filter: + description: Filter that identifies subset of objects to + which the replication rule applies. See below. If not + specified, the rule will default to using prefix. + properties: + and: + description: Configuration block for specifying rule + filters. This element is required only if you specify + more than one filter. See and below for more details. + properties: + prefix: + description: Object key name prefix identifying + one or more objects to which the rule applies. + Must be less than or equal to 1024 characters + in length. Defaults to an empty string ("") if + filter is not specified. + type: string + tags: + additionalProperties: + type: string + description: Map of tags (key and value pairs) that + identifies a subset of objects to which the rule + applies. The rule applies only to objects having + all the tags in its tagset. + type: object + x-kubernetes-map-type: granular + type: object + prefix: + description: Object key name prefix identifying one + or more objects to which the rule applies. Must be + less than or equal to 1024 characters in length. Defaults + to an empty string ("") if filter is not specified. + type: string + tag: + description: Configuration block for specifying a tag + key and value. See below. + properties: + key: + description: Name of the object key. + type: string + value: + description: Value of the tag. + type: string + type: object + type: object + id: + description: Unique identifier for the rule. Must be less + than or equal to 255 characters in length. + type: string + prefix: + description: Object key name prefix identifying one or more + objects to which the rule applies. Must be less than or + equal to 1024 characters in length. Defaults to an empty + string ("") if filter is not specified. + type: string + priority: + description: Priority associated with the rule. Priority + should only be set if filter is configured. If not provided, + defaults to 0. Priority must be unique between multiple + rules. + type: number + sourceSelectionCriteria: + description: Specifies special object selection criteria. + See below. + properties: + replicaModifications: + description: Configuration block that you can specify + for selections for modifications on replicas. Amazon + S3 doesn't replicate replica modifications by default. + In the latest version of replication configuration + (when filter is specified), you can specify this element + and set the status to Enabled to replicate modifications + on replicas. + properties: + status: + description: Whether the existing objects should + be replicated. Either "Enabled" or "Disabled". + type: string + type: object + sseKmsEncryptedObjects: + description: Configuration block for filter information + for the selection of Amazon S3 objects encrypted with + AWS KMS. If specified, replica_kms_key_id in destination + encryption_configuration must be specified as well. + properties: + status: + description: Whether the existing objects should + be replicated. Either "Enabled" or "Disabled". + type: string + type: object + type: object + status: + description: Status of the rule. Either "Enabled" or "Disabled". + The rule is ignored if status is not "Enabled". + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3.aws.upbound.io_buckets.yaml b/package/crds/s3.aws.upbound.io_buckets.yaml index 5cb6d95aef..3e44659610 100644 --- a/package/crds/s3.aws.upbound.io_buckets.yaml +++ b/package/crds/s3.aws.upbound.io_buckets.yaml @@ -926,3 +926,866 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Bucket is the Schema for the Buckets API. Provides a S3 bucket + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BucketSpec defines the desired state of Bucket + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + forceDestroy: + description: Boolean that indicates all objects (including any + locked objects) should be deleted from the bucket when the bucket + is destroyed so that the bucket can be destroyed without error. + These objects are not recoverable. This only deletes objects + when the bucket is destroyed, not when setting this parameter + to true. If setting this field in the same operation that would + require replacing the bucket or destroying the bucket, this + flag will not work. + type: boolean + objectLockEnabled: + description: Indicates whether this bucket has an Object Lock + configuration enabled. Valid values are true or false. This + argument is not supported in all regions or partitions. + type: boolean + region: + description: |- + AWS region this bucket resides in. + Region is the region you'd like your resource to be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + forceDestroy: + description: Boolean that indicates all objects (including any + locked objects) should be deleted from the bucket when the bucket + is destroyed so that the bucket can be destroyed without error. + These objects are not recoverable. This only deletes objects + when the bucket is destroyed, not when setting this parameter + to true. If setting this field in the same operation that would + require replacing the bucket or destroying the bucket, this + flag will not work. + type: boolean + objectLockEnabled: + description: Indicates whether this bucket has an Object Lock + configuration enabled. Valid values are true or false. This + argument is not supported in all regions or partitions. + type: boolean + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: BucketStatus defines the observed state of Bucket. + properties: + atProvider: + properties: + accelerationStatus: + description: |- + Sets the accelerate configuration of an existing bucket. Can be Enabled or Suspended. Cannot be used in cn-north-1 or us-gov-west-1. + Use the resource aws_s3_bucket_accelerate_configuration instead. + type: string + acl: + description: The canned ACL to apply. Valid values are private, + public-read, public-read-write, aws-exec-read, authenticated-read, + and log-delivery-write. Defaults to private. Conflicts with + grant. Use the resource aws_s3_bucket_acl instead. + type: string + arn: + description: ARN of the bucket. Will be of format arn:aws:s3:::bucketname. + type: string + bucketDomainName: + description: Bucket domain name. Will be of format bucketname.s3.amazonaws.com. + type: string + bucketRegionalDomainName: + description: 'The bucket region-specific domain name. The bucket + domain name including the region name. Please refer to the S3 + endpoints reference for format. Note: AWS CloudFront allows + specifying an S3 region-specific endpoint when creating an S3 + origin. This will prevent redirect issues from CloudFront to + the S3 Origin URL. For more information, see the Virtual Hosted-Style + Requests for Other Regions section in the AWS S3 User Guide.' + type: string + corsRule: + description: Rule of Cross-Origin Resource Sharing. See CORS rule + below for details. Use the resource aws_s3_bucket_cors_configuration + instead. + items: + properties: + allowedHeaders: + description: List of headers allowed. + items: + type: string + type: array + allowedMethods: + description: One or more HTTP methods that you allow the + origin to execute. Can be GET, PUT, POST, DELETE or HEAD. + items: + type: string + type: array + allowedOrigins: + description: One or more origins you want customers to be + able to access the bucket from. + items: + type: string + type: array + exposeHeaders: + description: One or more headers in the response that you + want customers to be able to access from their applications + (for example, from a JavaScript XMLHttpRequest object). + items: + type: string + type: array + maxAgeSeconds: + description: Specifies time in seconds that browser can + cache the response for a preflight request. + type: number + type: object + type: array + forceDestroy: + description: Boolean that indicates all objects (including any + locked objects) should be deleted from the bucket when the bucket + is destroyed so that the bucket can be destroyed without error. + These objects are not recoverable. This only deletes objects + when the bucket is destroyed, not when setting this parameter + to true. If setting this field in the same operation that would + require replacing the bucket or destroying the bucket, this + flag will not work. + type: boolean + grant: + description: An ACL policy grant. See Grant below for details. + Conflicts with acl. Use the resource aws_s3_bucket_acl instead. + items: + properties: + id: + description: Canonical user id to grant for. Used only when + type is CanonicalUser. + type: string + permissions: + description: List of permissions to apply for grantee. Valid + values are READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Type of grantee to apply for. Valid values + are CanonicalUser and Group. AmazonCustomerByEmail is + not supported. + type: string + uri: + description: Uri address to grant for. Used only when type + is Group. + type: string + type: object + type: array + hostedZoneId: + description: Route 53 Hosted Zone ID for this bucket's region. + type: string + id: + description: Name of the bucket. + type: string + lifecycleRule: + description: |- + Configuration of object lifecycle management. See Lifecycle Rule below for details. + Use the resource aws_s3_bucket_lifecycle_configuration instead. + items: + properties: + abortIncompleteMultipartUploadDays: + description: Specifies the number of days after initiating + a multipart upload when the multipart upload must be completed. + type: number + enabled: + description: Specifies lifecycle rule status. + type: boolean + expiration: + description: Specifies a period in the object's expire. + See Expiration below for details. + properties: + date: + description: Specifies the date after which you want + the corresponding action to take effect. + type: string + days: + description: Specifies the number of days after object + creation when the specific rule action takes effect. + type: number + expiredObjectDeleteMarker: + description: On a versioned bucket (versioning-enabled + or versioning-suspended bucket), you can add this + element in the lifecycle configuration to direct Amazon + S3 to delete expired object delete markers. This cannot + be specified with Days or Date in a Lifecycle Expiration + Policy. + type: boolean + type: object + id: + description: Unique identifier for the rule. Must be less + than or equal to 255 characters in length. + type: string + noncurrentVersionExpiration: + description: Specifies when noncurrent object versions expire. + See Noncurrent Version Expiration below for details. + properties: + days: + description: Specifies the number of days after object + creation when the specific rule action takes effect. + type: number + type: object + noncurrentVersionTransition: + description: Specifies when noncurrent object versions transitions. + See Noncurrent Version Transition below for details. + items: + properties: + days: + description: Specifies the number of days after object + creation when the specific rule action takes effect. + type: number + storageClass: + description: Specifies the Amazon S3 storage class + to which you want the object to transition. + type: string + type: object + type: array + prefix: + description: Object key prefix identifying one or more objects + to which the rule applies. + type: string + tags: + additionalProperties: + type: string + description: Specifies object tags key and value. + type: object + x-kubernetes-map-type: granular + transition: + description: Specifies a period in the object's transitions. + See Transition below for details. + items: + properties: + date: + description: Specifies the date after which you want + the corresponding action to take effect. + type: string + days: + description: Specifies the number of days after object + creation when the specific rule action takes effect. + type: number + storageClass: + description: Specifies the Amazon S3 storage class + to which you want the object to transition. + type: string + type: object + type: array + type: object + type: array + logging: + description: |- + Configuration of S3 bucket logging parameters. See Logging below for details. + Use the resource aws_s3_bucket_logging instead. + properties: + targetBucket: + description: Name of the bucket that will receive the log + objects. + type: string + targetPrefix: + description: To specify a key prefix for log objects. + type: string + type: object + objectLockConfiguration: + description: |- + Configuration of S3 object locking. See Object Lock Configuration below for details. + Use the object_lock_enabled parameter and the resource aws_s3_bucket_object_lock_configuration instead. + properties: + objectLockEnabled: + description: Indicates whether this bucket has an Object Lock + configuration enabled. Valid value is Enabled. Use the top-level + argument object_lock_enabled instead. + type: string + rule: + description: Object Lock rule in place for this bucket (documented + below). + properties: + defaultRetention: + description: Default retention period that you want to + apply to new objects placed in this bucket (documented + below). + properties: + days: + description: Number of days that you want to specify + for the default retention period. + type: number + mode: + description: Default Object Lock retention mode you + want to apply to new objects placed in this bucket. + Valid values are GOVERNANCE and COMPLIANCE. + type: string + years: + description: Number of years that you want to specify + for the default retention period. + type: number + type: object + type: object + type: object + objectLockEnabled: + description: Indicates whether this bucket has an Object Lock + configuration enabled. Valid values are true or false. This + argument is not supported in all regions or partitions. + type: boolean + policy: + description: |- + Valid bucket policy JSON document. In this case, please make sure you use the verbose/specific version of the policy. + Use the resource aws_s3_bucket_policy instead. + type: string + region: + description: |- + AWS region this bucket resides in. + Region is the region you'd like your resource to be created in. + type: string + replicationConfiguration: + description: |- + Configuration of replication configuration. See Replication Configuration below for details. + Use the resource aws_s3_bucket_replication_configuration instead. + properties: + role: + description: ARN of the IAM role for Amazon S3 to assume when + replicating the objects. + type: string + rules: + description: Specifies the rules managing the replication + (documented below). + items: + properties: + deleteMarkerReplicationStatus: + description: Whether delete markers are replicated. + The only valid value is Enabled. To disable, omit + this argument. This argument is only valid with V2 + replication configurations (i.e., when filter is used). + type: string + destination: + description: Specifies the destination for the rule + (documented below). + properties: + accessControlTranslation: + description: Specifies the overrides to use for + object owners on replication (documented below). + Must be used in conjunction with account_id owner + override configuration. + properties: + owner: + description: Specifies the replica ownership. + For default and valid values, see PUT bucket + replication in the Amazon S3 API Reference. + The only valid value is Destination. + type: string + type: object + accountId: + description: Account ID to use for overriding the + object owner on replication. Must be used in conjunction + with access_control_translation override configuration. + type: string + bucket: + description: ARN of the S3 bucket where you want + Amazon S3 to store replicas of the object identified + by the rule. + type: string + metrics: + description: Enables replication metrics (documented + below). + properties: + minutes: + description: Threshold within which objects + are to be replicated. The only valid value + is 15. + type: number + status: + description: Status of RTC. Either Enabled or + Disabled. + type: string + type: object + replicaKmsKeyId: + description: |- + Destination KMS encryption key ARN for SSE-KMS replication. Must be used in conjunction with + sse_kms_encrypted_objects source selection criteria. + type: string + replicationTime: + description: Enables S3 Replication Time Control + (S3 RTC) (documented below). + properties: + minutes: + description: Threshold within which objects + are to be replicated. The only valid value + is 15. + type: number + status: + description: Status of RTC. Either Enabled or + Disabled. + type: string + type: object + storageClass: + description: Specifies the Amazon S3 storage class + to which you want the object to transition. + type: string + type: object + filter: + description: Filter that identifies subset of objects + to which the replication rule applies (documented + below). + properties: + prefix: + description: Object keyname prefix that identifies + subset of objects to which the rule applies. Must + be less than or equal to 1024 characters in length. + type: string + tags: + additionalProperties: + type: string + description: |- + A map of tags that identifies subset of objects to which the rule applies. + The rule applies only to objects having all the tags in its tagset. + type: object + x-kubernetes-map-type: granular + type: object + id: + description: Unique identifier for the rule. Must be + less than or equal to 255 characters in length. + type: string + prefix: + description: Object keyname prefix identifying one or + more objects to which the rule applies. Must be less + than or equal to 1024 characters in length. + type: string + priority: + description: Priority associated with the rule. Priority + should only be set if filter is configured. If not + provided, defaults to 0. Priority must be unique between + multiple rules. + type: number + sourceSelectionCriteria: + description: Specifies special object selection criteria + (documented below). + properties: + sseKmsEncryptedObjects: + description: |- + Match SSE-KMS encrypted objects (documented below). If specified, replica_kms_key_id + in destination must be specified as well. + properties: + enabled: + description: Enable versioning. Once you version-enable + a bucket, it can never return to an unversioned + state. You can, however, suspend versioning + on that bucket. + type: boolean + type: object + type: object + status: + description: Status of the rule. Either Enabled or Disabled. + The rule is ignored if status is not Enabled. + type: string + type: object + type: array + type: object + requestPayer: + description: |- + Specifies who should bear the cost of Amazon S3 data transfer. + Can be either BucketOwner or Requester. By default, the owner of the S3 bucket would incur the costs of any data transfer. + See Requester Pays Buckets developer guide for more information. + Use the resource aws_s3_bucket_request_payment_configuration instead. + type: string + serverSideEncryptionConfiguration: + description: |- + Configuration of server-side encryption configuration. See Server Side Encryption Configuration below for details. + Use the resource aws_s3_bucket_server_side_encryption_configuration instead. + properties: + rule: + description: Single object for server-side encryption by default + configuration. (documented below) + properties: + applyServerSideEncryptionByDefault: + description: Single object for setting server-side encryption + by default. (documented below) + properties: + kmsMasterKeyId: + description: AWS KMS master key ID used for the SSE-KMS + encryption. This can only be used when you set the + value of sse_algorithm as aws:kms. The default aws/s3 + AWS KMS master key is used if this element is absent + while the sse_algorithm is aws:kms. + type: string + sseAlgorithm: + description: Server-side encryption algorithm to use. + Valid values are AES256 and aws:kms + type: string + type: object + bucketKeyEnabled: + description: Whether or not to use Amazon S3 Bucket Keys + for SSE-KMS. + type: boolean + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + versioning: + description: Configuration of the S3 bucket versioning state. + See Versioning below for details. Use the resource aws_s3_bucket_versioning + instead. + properties: + enabled: + description: Enable versioning. Once you version-enable a + bucket, it can never return to an unversioned state. You + can, however, suspend versioning on that bucket. + type: boolean + mfaDelete: + description: Enable MFA delete for either Change the versioning + state of your bucket or Permanently delete an object version. + Default is false. This cannot be used to toggle this setting + but is available to allow managed buckets to reflect the + state in AWS + type: boolean + type: object + website: + description: |- + Configuration of the S3 bucket website. See Website below for details. + Use the resource aws_s3_bucket_website_configuration instead. + properties: + errorDocument: + description: Absolute path to the document to return in case + of a 4XX error. + type: string + indexDocument: + description: Amazon S3 returns this index document when requests + are made to the root domain or any of the subfolders. + type: string + redirectAllRequestsTo: + description: Hostname to redirect all website requests for + this bucket to. Hostname can optionally be prefixed with + a protocol (http:// or https://) to use when redirecting + requests. The default is the protocol that is used in the + original request. + type: string + routingRules: + description: |- + JSON array containing routing rules + describing redirect behavior and when redirects are applied. + type: string + type: object + websiteDomain: + description: (Deprecated) Domain of the website endpoint, if the + bucket is configured with a website. If not, this will be an + empty string. This is used to create Route 53 alias records. + Use the resource aws_s3_bucket_website_configuration instead. + type: string + websiteEndpoint: + description: (Deprecated) Website endpoint, if the bucket is configured + with a website. If not, this will be an empty string. Use the + resource aws_s3_bucket_website_configuration instead. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3.aws.upbound.io_bucketserversideencryptionconfigurations.yaml b/package/crds/s3.aws.upbound.io_bucketserversideencryptionconfigurations.yaml index 427a7bdb3a..ac6881d895 100644 --- a/package/crds/s3.aws.upbound.io_bucketserversideencryptionconfigurations.yaml +++ b/package/crds/s3.aws.upbound.io_bucketserversideencryptionconfigurations.yaml @@ -745,3 +745,720 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BucketServerSideEncryptionConfiguration is the Schema for the + BucketServerSideEncryptionConfigurations API. Provides a S3 bucket server-side + encryption configuration resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BucketServerSideEncryptionConfigurationSpec defines the desired + state of BucketServerSideEncryptionConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bucket: + description: ID (name) of the bucket. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + rule: + description: Set of server-side encryption configuration rules. + See below. Currently, only a single rule is supported. + items: + properties: + applyServerSideEncryptionByDefault: + description: Single object for setting server-side encryption + by default. See below. + properties: + kmsMasterKeyId: + description: AWS KMS master key ID used for the SSE-KMS + encryption. This can only be used when you set the + value of sse_algorithm as aws:kms. The default aws/s3 + AWS KMS master key is used if this element is absent + while the sse_algorithm is aws:kms. + type: string + kmsMasterKeyIdRef: + description: Reference to a Key in kms to populate kmsMasterKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsMasterKeyIdSelector: + description: Selector for a Key in kms to populate kmsMasterKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sseAlgorithm: + description: Server-side encryption algorithm to use. + Valid values are AES256, aws:kms, and aws:kms:dsse + type: string + type: object + bucketKeyEnabled: + description: Whether or not to use Amazon S3 Bucket Keys + for SSE-KMS. + type: boolean + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bucket: + description: ID (name) of the bucket. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + rule: + description: Set of server-side encryption configuration rules. + See below. Currently, only a single rule is supported. + items: + properties: + applyServerSideEncryptionByDefault: + description: Single object for setting server-side encryption + by default. See below. + properties: + kmsMasterKeyId: + description: AWS KMS master key ID used for the SSE-KMS + encryption. This can only be used when you set the + value of sse_algorithm as aws:kms. The default aws/s3 + AWS KMS master key is used if this element is absent + while the sse_algorithm is aws:kms. + type: string + kmsMasterKeyIdRef: + description: Reference to a Key in kms to populate kmsMasterKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsMasterKeyIdSelector: + description: Selector for a Key in kms to populate kmsMasterKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sseAlgorithm: + description: Server-side encryption algorithm to use. + Valid values are AES256, aws:kms, and aws:kms:dsse + type: string + type: object + bucketKeyEnabled: + description: Whether or not to use Amazon S3 Bucket Keys + for SSE-KMS. + type: boolean + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.rule is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.rule) + || (has(self.initProvider) && has(self.initProvider.rule))' + status: + description: BucketServerSideEncryptionConfigurationStatus defines the + observed state of BucketServerSideEncryptionConfiguration. + properties: + atProvider: + properties: + bucket: + description: ID (name) of the bucket. + type: string + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + id: + description: The bucket or bucket and expected_bucket_owner separated + by a comma (,) if the latter is provided. + type: string + rule: + description: Set of server-side encryption configuration rules. + See below. Currently, only a single rule is supported. + items: + properties: + applyServerSideEncryptionByDefault: + description: Single object for setting server-side encryption + by default. See below. + properties: + kmsMasterKeyId: + description: AWS KMS master key ID used for the SSE-KMS + encryption. This can only be used when you set the + value of sse_algorithm as aws:kms. The default aws/s3 + AWS KMS master key is used if this element is absent + while the sse_algorithm is aws:kms. + type: string + sseAlgorithm: + description: Server-side encryption algorithm to use. + Valid values are AES256, aws:kms, and aws:kms:dsse + type: string + type: object + bucketKeyEnabled: + description: Whether or not to use Amazon S3 Bucket Keys + for SSE-KMS. + type: boolean + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3.aws.upbound.io_bucketversionings.yaml b/package/crds/s3.aws.upbound.io_bucketversionings.yaml index cf8de7b80f..658964c797 100644 --- a/package/crds/s3.aws.upbound.io_bucketversionings.yaml +++ b/package/crds/s3.aws.upbound.io_bucketversionings.yaml @@ -572,3 +572,551 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BucketVersioning is the Schema for the BucketVersionings API. + Provides an S3 bucket versioning resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BucketVersioningSpec defines the desired state of BucketVersioning + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bucket: + description: Name of the S3 bucket. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + mfa: + description: Concatenation of the authentication device's serial + number, a space, and the value that is displayed on your authentication + device. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + versioningConfiguration: + description: Configuration block for the versioning parameters. + See below. + properties: + mfaDelete: + description: 'Specifies whether MFA delete is enabled in the + bucket versioning configuration. Valid values: Enabled or + Disabled.' + type: string + status: + description: 'Versioning state of the bucket. Valid values: + Enabled, Suspended, or Disabled. Disabled should only be + used when creating or importing resources that correspond + to unversioned S3 buckets.' + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bucket: + description: Name of the S3 bucket. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + mfa: + description: Concatenation of the authentication device's serial + number, a space, and the value that is displayed on your authentication + device. + type: string + versioningConfiguration: + description: Configuration block for the versioning parameters. + See below. + properties: + mfaDelete: + description: 'Specifies whether MFA delete is enabled in the + bucket versioning configuration. Valid values: Enabled or + Disabled.' + type: string + status: + description: 'Versioning state of the bucket. Valid values: + Enabled, Suspended, or Disabled. Disabled should only be + used when creating or importing resources that correspond + to unversioned S3 buckets.' + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.versioningConfiguration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.versioningConfiguration) + || (has(self.initProvider) && has(self.initProvider.versioningConfiguration))' + status: + description: BucketVersioningStatus defines the observed state of BucketVersioning. + properties: + atProvider: + properties: + bucket: + description: Name of the S3 bucket. + type: string + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + id: + description: The bucket or bucket and expected_bucket_owner separated + by a comma (,) if the latter is provided. + type: string + mfa: + description: Concatenation of the authentication device's serial + number, a space, and the value that is displayed on your authentication + device. + type: string + versioningConfiguration: + description: Configuration block for the versioning parameters. + See below. + properties: + mfaDelete: + description: 'Specifies whether MFA delete is enabled in the + bucket versioning configuration. Valid values: Enabled or + Disabled.' + type: string + status: + description: 'Versioning state of the bucket. Valid values: + Enabled, Suspended, or Disabled. Disabled should only be + used when creating or importing resources that correspond + to unversioned S3 buckets.' + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3.aws.upbound.io_bucketwebsiteconfigurations.yaml b/package/crds/s3.aws.upbound.io_bucketwebsiteconfigurations.yaml index c7979679b3..1c1a83b22b 100644 --- a/package/crds/s3.aws.upbound.io_bucketwebsiteconfigurations.yaml +++ b/package/crds/s3.aws.upbound.io_bucketwebsiteconfigurations.yaml @@ -801,3 +801,756 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BucketWebsiteConfiguration is the Schema for the BucketWebsiteConfigurations + API. Provides an S3 bucket website configuration resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BucketWebsiteConfigurationSpec defines the desired state + of BucketWebsiteConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bucket: + description: Name of the bucket. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + errorDocument: + description: Name of the error document for the website. See below. + properties: + key: + description: Object key name to use when a 4XX class error + occurs. + type: string + type: object + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + indexDocument: + description: Name of the index document for the website. See below. + properties: + suffix: + description: |- + Suffix that is appended to a request that is for a directory on the website endpoint. + For example, if the suffix is index.html and you make a request to samplebucket/images/, the data that is returned will be for the object with the key name images/index.html. + The suffix must not be empty and must not include a slash character. + type: string + type: object + redirectAllRequestsTo: + description: Redirect behavior for every request to this bucket's + website endpoint. See below. Conflicts with error_document, + index_document, and routing_rule. + properties: + hostName: + description: Name of the host where requests are redirected. + type: string + protocol: + description: 'Protocol to use when redirecting requests. The + default is the protocol that is used in the original request. + Valid values: http, https.' + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + routingRule: + description: List of rules that define when a redirect is applied + and the redirect behavior. See below. + items: + properties: + condition: + description: Configuration block for describing a condition + that must be met for the specified redirect to apply. + See below. + properties: + httpErrorCodeReturnedEquals: + description: HTTP error code when the redirect is applied. + If specified with key_prefix_equals, then both must + be true for the redirect to be applied. + type: string + keyPrefixEquals: + description: Object key name prefix when the redirect + is applied. If specified with http_error_code_returned_equals, + then both must be true for the redirect to be applied. + type: string + type: object + redirect: + description: Configuration block for redirect information. + See below. + properties: + hostName: + description: Name of the host where requests are redirected. + type: string + httpRedirectCode: + description: HTTP redirect code to use on the response. + type: string + protocol: + description: 'Protocol to use when redirecting requests. + The default is the protocol that is used in the original + request. Valid values: http, https.' + type: string + replaceKeyPrefixWith: + description: Object key prefix to use in the redirect + request. For example, to redirect requests for all + pages with prefix docs/ (objects in the docs/ folder) + to documents/, you can set a condition block with + key_prefix_equals set to docs/ and in the redirect + set replace_key_prefix_with to /documents. + type: string + replaceKeyWith: + description: Specific object key to use in the redirect + request. For example, redirect request to error.html. + type: string + type: object + type: object + type: array + routingRules: + description: |- + JSON array containing routing rules + describing redirect behavior and when redirects are applied. Use this parameter when your routing rules contain empty String values ("") as seen in the example above. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bucket: + description: Name of the bucket. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + errorDocument: + description: Name of the error document for the website. See below. + properties: + key: + description: Object key name to use when a 4XX class error + occurs. + type: string + type: object + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + indexDocument: + description: Name of the index document for the website. See below. + properties: + suffix: + description: |- + Suffix that is appended to a request that is for a directory on the website endpoint. + For example, if the suffix is index.html and you make a request to samplebucket/images/, the data that is returned will be for the object with the key name images/index.html. + The suffix must not be empty and must not include a slash character. + type: string + type: object + redirectAllRequestsTo: + description: Redirect behavior for every request to this bucket's + website endpoint. See below. Conflicts with error_document, + index_document, and routing_rule. + properties: + hostName: + description: Name of the host where requests are redirected. + type: string + protocol: + description: 'Protocol to use when redirecting requests. The + default is the protocol that is used in the original request. + Valid values: http, https.' + type: string + type: object + routingRule: + description: List of rules that define when a redirect is applied + and the redirect behavior. See below. + items: + properties: + condition: + description: Configuration block for describing a condition + that must be met for the specified redirect to apply. + See below. + properties: + httpErrorCodeReturnedEquals: + description: HTTP error code when the redirect is applied. + If specified with key_prefix_equals, then both must + be true for the redirect to be applied. + type: string + keyPrefixEquals: + description: Object key name prefix when the redirect + is applied. If specified with http_error_code_returned_equals, + then both must be true for the redirect to be applied. + type: string + type: object + redirect: + description: Configuration block for redirect information. + See below. + properties: + hostName: + description: Name of the host where requests are redirected. + type: string + httpRedirectCode: + description: HTTP redirect code to use on the response. + type: string + protocol: + description: 'Protocol to use when redirecting requests. + The default is the protocol that is used in the original + request. Valid values: http, https.' + type: string + replaceKeyPrefixWith: + description: Object key prefix to use in the redirect + request. For example, to redirect requests for all + pages with prefix docs/ (objects in the docs/ folder) + to documents/, you can set a condition block with + key_prefix_equals set to docs/ and in the redirect + set replace_key_prefix_with to /documents. + type: string + replaceKeyWith: + description: Specific object key to use in the redirect + request. For example, redirect request to error.html. + type: string + type: object + type: object + type: array + routingRules: + description: |- + JSON array containing routing rules + describing redirect behavior and when redirects are applied. Use this parameter when your routing rules contain empty String values ("") as seen in the example above. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: BucketWebsiteConfigurationStatus defines the observed state + of BucketWebsiteConfiguration. + properties: + atProvider: + properties: + bucket: + description: Name of the bucket. + type: string + errorDocument: + description: Name of the error document for the website. See below. + properties: + key: + description: Object key name to use when a 4XX class error + occurs. + type: string + type: object + expectedBucketOwner: + description: Account ID of the expected bucket owner. + type: string + id: + description: The bucket or bucket and expected_bucket_owner separated + by a comma (,) if the latter is provided. + type: string + indexDocument: + description: Name of the index document for the website. See below. + properties: + suffix: + description: |- + Suffix that is appended to a request that is for a directory on the website endpoint. + For example, if the suffix is index.html and you make a request to samplebucket/images/, the data that is returned will be for the object with the key name images/index.html. + The suffix must not be empty and must not include a slash character. + type: string + type: object + redirectAllRequestsTo: + description: Redirect behavior for every request to this bucket's + website endpoint. See below. Conflicts with error_document, + index_document, and routing_rule. + properties: + hostName: + description: Name of the host where requests are redirected. + type: string + protocol: + description: 'Protocol to use when redirecting requests. The + default is the protocol that is used in the original request. + Valid values: http, https.' + type: string + type: object + routingRule: + description: List of rules that define when a redirect is applied + and the redirect behavior. See below. + items: + properties: + condition: + description: Configuration block for describing a condition + that must be met for the specified redirect to apply. + See below. + properties: + httpErrorCodeReturnedEquals: + description: HTTP error code when the redirect is applied. + If specified with key_prefix_equals, then both must + be true for the redirect to be applied. + type: string + keyPrefixEquals: + description: Object key name prefix when the redirect + is applied. If specified with http_error_code_returned_equals, + then both must be true for the redirect to be applied. + type: string + type: object + redirect: + description: Configuration block for redirect information. + See below. + properties: + hostName: + description: Name of the host where requests are redirected. + type: string + httpRedirectCode: + description: HTTP redirect code to use on the response. + type: string + protocol: + description: 'Protocol to use when redirecting requests. + The default is the protocol that is used in the original + request. Valid values: http, https.' + type: string + replaceKeyPrefixWith: + description: Object key prefix to use in the redirect + request. For example, to redirect requests for all + pages with prefix docs/ (objects in the docs/ folder) + to documents/, you can set a condition block with + key_prefix_equals set to docs/ and in the redirect + set replace_key_prefix_with to /documents. + type: string + replaceKeyWith: + description: Specific object key to use in the redirect + request. For example, redirect request to error.html. + type: string + type: object + type: object + type: array + routingRules: + description: |- + JSON array containing routing rules + describing redirect behavior and when redirects are applied. Use this parameter when your routing rules contain empty String values ("") as seen in the example above. + type: string + websiteDomain: + description: Domain of the website endpoint. This is used to create + Route 53 alias records. + type: string + websiteEndpoint: + description: Website endpoint. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3.aws.upbound.io_objects.yaml b/package/crds/s3.aws.upbound.io_objects.yaml index 8650e40a37..43a6f25410 100644 --- a/package/crds/s3.aws.upbound.io_objects.yaml +++ b/package/crds/s3.aws.upbound.io_objects.yaml @@ -1083,3 +1083,1056 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Object is the Schema for the Objects API. Provides an S3 object + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ObjectSpec defines the desired state of Object + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + acl: + description: Canned ACL to apply. Valid values are private, public-read, + public-read-write, aws-exec-read, authenticated-read, bucket-owner-read, + and bucket-owner-full-control. + type: string + bucket: + description: Name of the bucket to put the file in. Alternatively, + an S3 access point ARN can be specified. + type: string + bucketKeyEnabled: + description: Whether or not to use Amazon S3 Bucket Keys for SSE-KMS. + type: boolean + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + cacheControl: + description: Caching behavior along the request/reply chain Read + w3c cache_control for further details. + type: string + checksumAlgorithm: + description: 'Indicates the algorithm used to create the checksum + for the object. If a value is specified and the object is encrypted + with KMS, you must have permission to use the kms:Decrypt action. + Valid values: CRC32, CRC32C, SHA1, SHA256.' + type: string + content: + description: Literal string value to use as the object content, + which will be uploaded as UTF-8-encoded text. + type: string + contentBase64: + description: Base64-encoded data that will be decoded and uploaded + as raw bytes for the object content. This allows safely uploading + non-UTF8 binary data, but is recommended only for small content + such as the result of the gzipbase64 function with small text + strings. For larger objects, use source to stream the content + from a disk file. + type: string + contentDisposition: + description: Presentational information for the object. Read w3c + content_disposition for further information. + type: string + contentEncoding: + description: Content encodings that have been applied to the object + and thus what decoding mechanisms must be applied to obtain + the media-type referenced by the Content-Type header field. + Read w3c content encoding for further information. + type: string + contentLanguage: + description: Language the content is in e.g., en-US or en-GB. + type: string + contentType: + description: Standard MIME type describing the format of the object + data, e.g., application/octet-stream. All Valid MIME Types are + valid for this input. + type: string + etag: + description: Triggers updates when the value changes.11.11.11 + or earlier). This attribute is not compatible with KMS encryption, + kms_key_id or server_side_encryption = "aws:kms", also if an + object is larger than 16 MB, the AWS Management Console will + upload or copy that object as a Multipart Upload, and therefore + the ETag will not be an MD5 digest (see source_hash instead). + type: string + forceDestroy: + description: Whether to allow the object to be deleted by removing + any legal hold on any object version. Default is false. This + value should be set to true only if the bucket has S3 object + lock enabled. + type: boolean + key: + description: Name of the object once it is in the bucket. + type: string + kmsKeyId: + description: ARN of the KMS Key to use for object encryption. + If the S3 Bucket has server-side encryption enabled, that value + will automatically be used. If referencing the aws_kms_key resource, + use the arn attribute. If referencing the aws_kms_alias data + source or resource, use the target_key_arn attribute. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + metadata: + additionalProperties: + type: string + description: Map of keys/values to provision metadata (will be + automatically prefixed by x-amz-meta-, note that only lowercase + label are currently supported by the AWS Go API). + type: object + x-kubernetes-map-type: granular + objectLockLegalHoldStatus: + description: Legal hold status that you want to apply to the specified + object. Valid values are ON and OFF. + type: string + objectLockMode: + description: Object lock retention mode that you want to apply + to this object. Valid values are GOVERNANCE and COMPLIANCE. + type: string + objectLockRetainUntilDate: + description: Date and time, in RFC3339 format, when this object's + object lock will expire. + type: string + overrideProvider: + description: Override provider-level configuration options. See + Override Provider below for more details. + properties: + defaultTags: + description: Override the provider default_tags configuration + block. + properties: + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + serverSideEncryption: + description: Server-side encryption of the object in S3. Valid + values are "AES256" and "aws:kms". + type: string + source: + description: Path to a file that will be read and uploaded as + raw bytes for the object content. + type: string + sourceHash: + description: Triggers updates like etag but useful to address + etag encryption limitations.11.12 or later). (The value is only + stored in state and not saved by AWS.) + type: string + storageClass: + description: Storage Class for the object. Defaults to "STANDARD". + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + websiteRedirect: + description: Target URL for website redirect. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + acl: + description: Canned ACL to apply. Valid values are private, public-read, + public-read-write, aws-exec-read, authenticated-read, bucket-owner-read, + and bucket-owner-full-control. + type: string + bucket: + description: Name of the bucket to put the file in. Alternatively, + an S3 access point ARN can be specified. + type: string + bucketKeyEnabled: + description: Whether or not to use Amazon S3 Bucket Keys for SSE-KMS. + type: boolean + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + cacheControl: + description: Caching behavior along the request/reply chain Read + w3c cache_control for further details. + type: string + checksumAlgorithm: + description: 'Indicates the algorithm used to create the checksum + for the object. If a value is specified and the object is encrypted + with KMS, you must have permission to use the kms:Decrypt action. + Valid values: CRC32, CRC32C, SHA1, SHA256.' + type: string + content: + description: Literal string value to use as the object content, + which will be uploaded as UTF-8-encoded text. + type: string + contentBase64: + description: Base64-encoded data that will be decoded and uploaded + as raw bytes for the object content. This allows safely uploading + non-UTF8 binary data, but is recommended only for small content + such as the result of the gzipbase64 function with small text + strings. For larger objects, use source to stream the content + from a disk file. + type: string + contentDisposition: + description: Presentational information for the object. Read w3c + content_disposition for further information. + type: string + contentEncoding: + description: Content encodings that have been applied to the object + and thus what decoding mechanisms must be applied to obtain + the media-type referenced by the Content-Type header field. + Read w3c content encoding for further information. + type: string + contentLanguage: + description: Language the content is in e.g., en-US or en-GB. + type: string + contentType: + description: Standard MIME type describing the format of the object + data, e.g., application/octet-stream. All Valid MIME Types are + valid for this input. + type: string + etag: + description: Triggers updates when the value changes.11.11.11 + or earlier). This attribute is not compatible with KMS encryption, + kms_key_id or server_side_encryption = "aws:kms", also if an + object is larger than 16 MB, the AWS Management Console will + upload or copy that object as a Multipart Upload, and therefore + the ETag will not be an MD5 digest (see source_hash instead). + type: string + forceDestroy: + description: Whether to allow the object to be deleted by removing + any legal hold on any object version. Default is false. This + value should be set to true only if the bucket has S3 object + lock enabled. + type: boolean + key: + description: Name of the object once it is in the bucket. + type: string + kmsKeyId: + description: ARN of the KMS Key to use for object encryption. + If the S3 Bucket has server-side encryption enabled, that value + will automatically be used. If referencing the aws_kms_key resource, + use the arn attribute. If referencing the aws_kms_alias data + source or resource, use the target_key_arn attribute. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + metadata: + additionalProperties: + type: string + description: Map of keys/values to provision metadata (will be + automatically prefixed by x-amz-meta-, note that only lowercase + label are currently supported by the AWS Go API). + type: object + x-kubernetes-map-type: granular + objectLockLegalHoldStatus: + description: Legal hold status that you want to apply to the specified + object. Valid values are ON and OFF. + type: string + objectLockMode: + description: Object lock retention mode that you want to apply + to this object. Valid values are GOVERNANCE and COMPLIANCE. + type: string + objectLockRetainUntilDate: + description: Date and time, in RFC3339 format, when this object's + object lock will expire. + type: string + overrideProvider: + description: Override provider-level configuration options. See + Override Provider below for more details. + properties: + defaultTags: + description: Override the provider default_tags configuration + block. + properties: + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + type: object + serverSideEncryption: + description: Server-side encryption of the object in S3. Valid + values are "AES256" and "aws:kms". + type: string + source: + description: Path to a file that will be read and uploaded as + raw bytes for the object content. + type: string + sourceHash: + description: Triggers updates like etag but useful to address + etag encryption limitations.11.12 or later). (The value is only + stored in state and not saved by AWS.) + type: string + storageClass: + description: Storage Class for the object. Defaults to "STANDARD". + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + websiteRedirect: + description: Target URL for website redirect. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.key is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.key) + || (has(self.initProvider) && has(self.initProvider.key))' + status: + description: ObjectStatus defines the observed state of Object. + properties: + atProvider: + properties: + acl: + description: Canned ACL to apply. Valid values are private, public-read, + public-read-write, aws-exec-read, authenticated-read, bucket-owner-read, + and bucket-owner-full-control. + type: string + arn: + description: ARN of the object. + type: string + bucket: + description: Name of the bucket to put the file in. Alternatively, + an S3 access point ARN can be specified. + type: string + bucketKeyEnabled: + description: Whether or not to use Amazon S3 Bucket Keys for SSE-KMS. + type: boolean + cacheControl: + description: Caching behavior along the request/reply chain Read + w3c cache_control for further details. + type: string + checksumAlgorithm: + description: 'Indicates the algorithm used to create the checksum + for the object. If a value is specified and the object is encrypted + with KMS, you must have permission to use the kms:Decrypt action. + Valid values: CRC32, CRC32C, SHA1, SHA256.' + type: string + checksumCrc32: + description: The base64-encoded, 32-bit CRC32 checksum of the + object. + type: string + checksumCrc32C: + description: The base64-encoded, 32-bit CRC32C checksum of the + object. + type: string + checksumSha1: + description: The base64-encoded, 160-bit SHA-1 digest of the object. + type: string + checksumSha256: + description: The base64-encoded, 256-bit SHA-256 digest of the + object. + type: string + content: + description: Literal string value to use as the object content, + which will be uploaded as UTF-8-encoded text. + type: string + contentBase64: + description: Base64-encoded data that will be decoded and uploaded + as raw bytes for the object content. This allows safely uploading + non-UTF8 binary data, but is recommended only for small content + such as the result of the gzipbase64 function with small text + strings. For larger objects, use source to stream the content + from a disk file. + type: string + contentDisposition: + description: Presentational information for the object. Read w3c + content_disposition for further information. + type: string + contentEncoding: + description: Content encodings that have been applied to the object + and thus what decoding mechanisms must be applied to obtain + the media-type referenced by the Content-Type header field. + Read w3c content encoding for further information. + type: string + contentLanguage: + description: Language the content is in e.g., en-US or en-GB. + type: string + contentType: + description: Standard MIME type describing the format of the object + data, e.g., application/octet-stream. All Valid MIME Types are + valid for this input. + type: string + etag: + description: Triggers updates when the value changes.11.11.11 + or earlier). This attribute is not compatible with KMS encryption, + kms_key_id or server_side_encryption = "aws:kms", also if an + object is larger than 16 MB, the AWS Management Console will + upload or copy that object as a Multipart Upload, and therefore + the ETag will not be an MD5 digest (see source_hash instead). + type: string + forceDestroy: + description: Whether to allow the object to be deleted by removing + any legal hold on any object version. Default is false. This + value should be set to true only if the bucket has S3 object + lock enabled. + type: boolean + id: + type: string + key: + description: Name of the object once it is in the bucket. + type: string + kmsKeyId: + description: ARN of the KMS Key to use for object encryption. + If the S3 Bucket has server-side encryption enabled, that value + will automatically be used. If referencing the aws_kms_key resource, + use the arn attribute. If referencing the aws_kms_alias data + source or resource, use the target_key_arn attribute. + type: string + metadata: + additionalProperties: + type: string + description: Map of keys/values to provision metadata (will be + automatically prefixed by x-amz-meta-, note that only lowercase + label are currently supported by the AWS Go API). + type: object + x-kubernetes-map-type: granular + objectLockLegalHoldStatus: + description: Legal hold status that you want to apply to the specified + object. Valid values are ON and OFF. + type: string + objectLockMode: + description: Object lock retention mode that you want to apply + to this object. Valid values are GOVERNANCE and COMPLIANCE. + type: string + objectLockRetainUntilDate: + description: Date and time, in RFC3339 format, when this object's + object lock will expire. + type: string + overrideProvider: + description: Override provider-level configuration options. See + Override Provider below for more details. + properties: + defaultTags: + description: Override the provider default_tags configuration + block. + properties: + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + type: object + serverSideEncryption: + description: Server-side encryption of the object in S3. Valid + values are "AES256" and "aws:kms". + type: string + source: + description: Path to a file that will be read and uploaded as + raw bytes for the object content. + type: string + sourceHash: + description: Triggers updates like etag but useful to address + etag encryption limitations.11.12 or later). (The value is only + stored in state and not saved by AWS.) + type: string + storageClass: + description: Storage Class for the object. Defaults to "STANDARD". + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + versionId: + description: Unique version ID value for the object, if bucket + versioning is enabled. + type: string + websiteRedirect: + description: Target URL for website redirect. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3control.aws.upbound.io_accesspoints.yaml b/package/crds/s3control.aws.upbound.io_accesspoints.yaml index 9c272cc1d3..863df8a025 100644 --- a/package/crds/s3control.aws.upbound.io_accesspoints.yaml +++ b/package/crds/s3control.aws.upbound.io_accesspoints.yaml @@ -881,3 +881,851 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: AccessPoint is the Schema for the AccessPoints API. Manages an + S3 Access Point. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AccessPointSpec defines the desired state of AccessPoint + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountId: + description: AWS account ID for the owner of the bucket for which + you want to create an access point. + type: string + bucket: + description: Name of an AWS Partition S3 General Purpose Bucket + or the ARN of S3 on Outposts Bucket that you want to associate + this access point with. + type: string + bucketAccountId: + description: AWS account ID associated with the S3 bucket associated + with this access point. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Name you want to assign to this access point. + type: string + policy: + description: Valid JSON document that specifies the policy that + you want to apply to this access point. Removing policy from + your configuration or setting policy to null or an empty string + (i.e., policy = "") will not delete the policy since it could + have been set by aws_s3control_access_point_policy. To remove + the policy, set it to "{}" (an empty JSON document). + type: string + publicAccessBlockConfiguration: + description: Configuration block to manage the PublicAccessBlock + configuration that you want to apply to this Amazon S3 bucket. + You can enable the configuration options in any combination. + Detailed below. + properties: + blockPublicAcls: + description: 'Whether Amazon S3 should block public ACLs for + buckets in this account. Defaults to true. Enabling this + setting does not affect existing policies or ACLs. When + set to true causes the following behavior:' + type: boolean + blockPublicPolicy: + description: 'Whether Amazon S3 should block public bucket + policies for buckets in this account. Defaults to true. + Enabling this setting does not affect existing bucket policies. + When set to true causes Amazon S3 to:' + type: boolean + ignorePublicAcls: + description: 'Whether Amazon S3 should ignore public ACLs + for buckets in this account. Defaults to true. Enabling + this setting does not affect the persistence of any existing + ACLs and doesn''t prevent new public ACLs from being set. + When set to true causes Amazon S3 to:' + type: boolean + restrictPublicBuckets: + description: 'Whether Amazon S3 should restrict public bucket + policies for buckets in this account. Defaults to true. + Enabling this setting does not affect previously stored + bucket policies, except that public and cross-account access + within any public bucket policy, including non-public delegation + to specific accounts, is blocked. When set to true:' + type: boolean + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + vpcConfiguration: + description: Configuration block to restrict access to this access + point to requests from the specified Virtual Private Cloud (VPC). + Required for S3 on Outposts. Detailed below. + properties: + vpcId: + description: This access point will only allow connections + from the specified VPC ID. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accountId: + description: AWS account ID for the owner of the bucket for which + you want to create an access point. + type: string + bucket: + description: Name of an AWS Partition S3 General Purpose Bucket + or the ARN of S3 on Outposts Bucket that you want to associate + this access point with. + type: string + bucketAccountId: + description: AWS account ID associated with the S3 bucket associated + with this access point. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Name you want to assign to this access point. + type: string + policy: + description: Valid JSON document that specifies the policy that + you want to apply to this access point. Removing policy from + your configuration or setting policy to null or an empty string + (i.e., policy = "") will not delete the policy since it could + have been set by aws_s3control_access_point_policy. To remove + the policy, set it to "{}" (an empty JSON document). + type: string + publicAccessBlockConfiguration: + description: Configuration block to manage the PublicAccessBlock + configuration that you want to apply to this Amazon S3 bucket. + You can enable the configuration options in any combination. + Detailed below. + properties: + blockPublicAcls: + description: 'Whether Amazon S3 should block public ACLs for + buckets in this account. Defaults to true. Enabling this + setting does not affect existing policies or ACLs. When + set to true causes the following behavior:' + type: boolean + blockPublicPolicy: + description: 'Whether Amazon S3 should block public bucket + policies for buckets in this account. Defaults to true. + Enabling this setting does not affect existing bucket policies. + When set to true causes Amazon S3 to:' + type: boolean + ignorePublicAcls: + description: 'Whether Amazon S3 should ignore public ACLs + for buckets in this account. Defaults to true. Enabling + this setting does not affect the persistence of any existing + ACLs and doesn''t prevent new public ACLs from being set. + When set to true causes Amazon S3 to:' + type: boolean + restrictPublicBuckets: + description: 'Whether Amazon S3 should restrict public bucket + policies for buckets in this account. Defaults to true. + Enabling this setting does not affect previously stored + bucket policies, except that public and cross-account access + within any public bucket policy, including non-public delegation + to specific accounts, is blocked. When set to true:' + type: boolean + type: object + vpcConfiguration: + description: Configuration block to restrict access to this access + point to requests from the specified Virtual Private Cloud (VPC). + Required for S3 on Outposts. Detailed below. + properties: + vpcId: + description: This access point will only allow connections + from the specified VPC ID. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: AccessPointStatus defines the observed state of AccessPoint. + properties: + atProvider: + properties: + accountId: + description: AWS account ID for the owner of the bucket for which + you want to create an access point. + type: string + alias: + description: Alias of the S3 Access Point. + type: string + arn: + description: ARN of the S3 Access Point. + type: string + bucket: + description: Name of an AWS Partition S3 General Purpose Bucket + or the ARN of S3 on Outposts Bucket that you want to associate + this access point with. + type: string + bucketAccountId: + description: AWS account ID associated with the S3 bucket associated + with this access point. + type: string + domainName: + description: |- + DNS domain name of the S3 Access Point in the format name-account_id.s3-accesspoint.region.amazonaws.com. + Note: S3 access points only support secure access by HTTPS. HTTP isn't supported. + type: string + endpoints: + additionalProperties: + type: string + description: VPC endpoints for the S3 Access Point. + type: object + x-kubernetes-map-type: granular + hasPublicAccessPolicy: + description: Indicates whether this access point currently has + a policy that allows public access. + type: boolean + id: + description: For Access Point of an AWS Partition S3 Bucket, the + AWS account ID and access point name separated by a colon (:). + For S3 on Outposts Bucket, the ARN of the Access Point. + type: string + name: + description: Name you want to assign to this access point. + type: string + networkOrigin: + description: Indicates whether this access point allows access + from the public Internet. Values are VPC (the access point doesn't + allow access from the public Internet) and Internet (the access + point allows access from the public Internet, subject to the + access point and bucket access policies). + type: string + policy: + description: Valid JSON document that specifies the policy that + you want to apply to this access point. Removing policy from + your configuration or setting policy to null or an empty string + (i.e., policy = "") will not delete the policy since it could + have been set by aws_s3control_access_point_policy. To remove + the policy, set it to "{}" (an empty JSON document). + type: string + publicAccessBlockConfiguration: + description: Configuration block to manage the PublicAccessBlock + configuration that you want to apply to this Amazon S3 bucket. + You can enable the configuration options in any combination. + Detailed below. + properties: + blockPublicAcls: + description: 'Whether Amazon S3 should block public ACLs for + buckets in this account. Defaults to true. Enabling this + setting does not affect existing policies or ACLs. When + set to true causes the following behavior:' + type: boolean + blockPublicPolicy: + description: 'Whether Amazon S3 should block public bucket + policies for buckets in this account. Defaults to true. + Enabling this setting does not affect existing bucket policies. + When set to true causes Amazon S3 to:' + type: boolean + ignorePublicAcls: + description: 'Whether Amazon S3 should ignore public ACLs + for buckets in this account. Defaults to true. Enabling + this setting does not affect the persistence of any existing + ACLs and doesn''t prevent new public ACLs from being set. + When set to true causes Amazon S3 to:' + type: boolean + restrictPublicBuckets: + description: 'Whether Amazon S3 should restrict public bucket + policies for buckets in this account. Defaults to true. + Enabling this setting does not affect previously stored + bucket policies, except that public and cross-account access + within any public bucket policy, including non-public delegation + to specific accounts, is blocked. When set to true:' + type: boolean + type: object + vpcConfiguration: + description: Configuration block to restrict access to this access + point to requests from the specified Virtual Private Cloud (VPC). + Required for S3 on Outposts. Detailed below. + properties: + vpcId: + description: This access point will only allow connections + from the specified VPC ID. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3control.aws.upbound.io_multiregionaccesspointpolicies.yaml b/package/crds/s3control.aws.upbound.io_multiregionaccesspointpolicies.yaml index 400b9dad1a..6e98c64597 100644 --- a/package/crds/s3control.aws.upbound.io_multiregionaccesspointpolicies.yaml +++ b/package/crds/s3control.aws.upbound.io_multiregionaccesspointpolicies.yaml @@ -413,3 +413,392 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MultiRegionAccessPointPolicy is the Schema for the MultiRegionAccessPointPolicys + API. Provides a resource to manage an S3 Multi-Region Access Point access + control policy. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MultiRegionAccessPointPolicySpec defines the desired state + of MultiRegionAccessPointPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountId: + description: The AWS account ID for the owner of the Multi-Region + Access Point. + type: string + details: + description: A configuration block containing details about the + policy for the Multi-Region Access Point. See Details Configuration + Block below for more details + properties: + name: + description: The name of the Multi-Region Access Point. + type: string + policy: + description: A valid JSON document that specifies the policy + that you want to associate with this Multi-Region Access + Point. Once applied, the policy can be edited, but not deleted. + For more information, see the documentation on Multi-Region + Access Point Permissions. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accountId: + description: The AWS account ID for the owner of the Multi-Region + Access Point. + type: string + details: + description: A configuration block containing details about the + policy for the Multi-Region Access Point. See Details Configuration + Block below for more details + properties: + name: + description: The name of the Multi-Region Access Point. + type: string + policy: + description: A valid JSON document that specifies the policy + that you want to associate with this Multi-Region Access + Point. Once applied, the policy can be edited, but not deleted. + For more information, see the documentation on Multi-Region + Access Point Permissions. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.details is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.details) + || (has(self.initProvider) && has(self.initProvider.details))' + status: + description: MultiRegionAccessPointPolicyStatus defines the observed state + of MultiRegionAccessPointPolicy. + properties: + atProvider: + properties: + accountId: + description: The AWS account ID for the owner of the Multi-Region + Access Point. + type: string + details: + description: A configuration block containing details about the + policy for the Multi-Region Access Point. See Details Configuration + Block below for more details + properties: + name: + description: The name of the Multi-Region Access Point. + type: string + policy: + description: A valid JSON document that specifies the policy + that you want to associate with this Multi-Region Access + Point. Once applied, the policy can be edited, but not deleted. + For more information, see the documentation on Multi-Region + Access Point Permissions. + type: string + type: object + established: + description: The last established policy for the Multi-Region + Access Point. + type: string + id: + description: The AWS account ID and access point name separated + by a colon (:). + type: string + proposed: + description: The proposed policy for the Multi-Region Access Point. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3control.aws.upbound.io_multiregionaccesspoints.yaml b/package/crds/s3control.aws.upbound.io_multiregionaccesspoints.yaml index f2e849c744..43f7caad5c 100644 --- a/package/crds/s3control.aws.upbound.io_multiregionaccesspoints.yaml +++ b/package/crds/s3control.aws.upbound.io_multiregionaccesspoints.yaml @@ -640,3 +640,607 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MultiRegionAccessPoint is the Schema for the MultiRegionAccessPoints + API. Provides a resource to manage an S3 Multi-Region Access Point associated + with specified buckets. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MultiRegionAccessPointSpec defines the desired state of MultiRegionAccessPoint + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountId: + description: The AWS account ID for the owner of the buckets for + which you want to create a Multi-Region Access Point. + type: string + details: + description: A configuration block containing details about the + Multi-Region Access Point. See Details Configuration Block below + for more details + properties: + name: + description: The name of the Multi-Region Access Point. + type: string + publicAccessBlock: + description: Configuration block to manage the PublicAccessBlock + configuration that you want to apply to this Multi-Region + Access Point. You can enable the configuration options in + any combination. See Public Access Block Configuration below + for more details. + properties: + blockPublicAcls: + description: 'Whether Amazon S3 should block public ACLs + for buckets in this account. Defaults to true. Enabling + this setting does not affect existing policies or ACLs. + When set to true causes the following behavior:' + type: boolean + blockPublicPolicy: + description: 'Whether Amazon S3 should block public bucket + policies for buckets in this account. Defaults to true. + Enabling this setting does not affect existing bucket + policies. When set to true causes Amazon S3 to:' + type: boolean + ignorePublicAcls: + description: 'Whether Amazon S3 should ignore public ACLs + for buckets in this account. Defaults to true. Enabling + this setting does not affect the persistence of any + existing ACLs and doesn''t prevent new public ACLs from + being set. When set to true causes Amazon S3 to:' + type: boolean + restrictPublicBuckets: + description: 'Whether Amazon S3 should restrict public + bucket policies for buckets in this account. Defaults + to true. Enabling this setting does not affect previously + stored bucket policies, except that public and cross-account + access within any public bucket policy, including non-public + delegation to specific accounts, is blocked. When set + to true:' + type: boolean + type: object + region: + description: The Region configuration block to specify the + bucket associated with the Multi-Region Access Point. See + Region Configuration below for more details. + items: + properties: + bucket: + description: The name of the associated bucket for the + Region. + type: string + bucketAccountId: + description: The AWS account ID that owns the Amazon + S3 bucket that's associated with this Multi-Region + Access Point. + type: string + bucketRef: + description: Reference to a Bucket in s3 to populate + bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in s3 to populate + bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + required: + - region + type: object + region: + description: |- + The Region configuration block to specify the bucket associated with the Multi-Region Access Point. See Region Configuration below for more details. + Region is the region you'd like your resource to be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accountId: + description: The AWS account ID for the owner of the buckets for + which you want to create a Multi-Region Access Point. + type: string + details: + description: A configuration block containing details about the + Multi-Region Access Point. See Details Configuration Block below + for more details + properties: + name: + description: The name of the Multi-Region Access Point. + type: string + publicAccessBlock: + description: Configuration block to manage the PublicAccessBlock + configuration that you want to apply to this Multi-Region + Access Point. You can enable the configuration options in + any combination. See Public Access Block Configuration below + for more details. + properties: + blockPublicAcls: + description: 'Whether Amazon S3 should block public ACLs + for buckets in this account. Defaults to true. Enabling + this setting does not affect existing policies or ACLs. + When set to true causes the following behavior:' + type: boolean + blockPublicPolicy: + description: 'Whether Amazon S3 should block public bucket + policies for buckets in this account. Defaults to true. + Enabling this setting does not affect existing bucket + policies. When set to true causes Amazon S3 to:' + type: boolean + ignorePublicAcls: + description: 'Whether Amazon S3 should ignore public ACLs + for buckets in this account. Defaults to true. Enabling + this setting does not affect the persistence of any + existing ACLs and doesn''t prevent new public ACLs from + being set. When set to true causes Amazon S3 to:' + type: boolean + restrictPublicBuckets: + description: 'Whether Amazon S3 should restrict public + bucket policies for buckets in this account. Defaults + to true. Enabling this setting does not affect previously + stored bucket policies, except that public and cross-account + access within any public bucket policy, including non-public + delegation to specific accounts, is blocked. When set + to true:' + type: boolean + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.details is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.details) + || (has(self.initProvider) && has(self.initProvider.details))' + status: + description: MultiRegionAccessPointStatus defines the observed state of + MultiRegionAccessPoint. + properties: + atProvider: + properties: + accountId: + description: The AWS account ID for the owner of the buckets for + which you want to create a Multi-Region Access Point. + type: string + alias: + description: The alias for the Multi-Region Access Point. + type: string + arn: + description: Amazon Resource Name (ARN) of the Multi-Region Access + Point. + type: string + details: + description: A configuration block containing details about the + Multi-Region Access Point. See Details Configuration Block below + for more details + properties: + name: + description: The name of the Multi-Region Access Point. + type: string + publicAccessBlock: + description: Configuration block to manage the PublicAccessBlock + configuration that you want to apply to this Multi-Region + Access Point. You can enable the configuration options in + any combination. See Public Access Block Configuration below + for more details. + properties: + blockPublicAcls: + description: 'Whether Amazon S3 should block public ACLs + for buckets in this account. Defaults to true. Enabling + this setting does not affect existing policies or ACLs. + When set to true causes the following behavior:' + type: boolean + blockPublicPolicy: + description: 'Whether Amazon S3 should block public bucket + policies for buckets in this account. Defaults to true. + Enabling this setting does not affect existing bucket + policies. When set to true causes Amazon S3 to:' + type: boolean + ignorePublicAcls: + description: 'Whether Amazon S3 should ignore public ACLs + for buckets in this account. Defaults to true. Enabling + this setting does not affect the persistence of any + existing ACLs and doesn''t prevent new public ACLs from + being set. When set to true causes Amazon S3 to:' + type: boolean + restrictPublicBuckets: + description: 'Whether Amazon S3 should restrict public + bucket policies for buckets in this account. Defaults + to true. Enabling this setting does not affect previously + stored bucket policies, except that public and cross-account + access within any public bucket policy, including non-public + delegation to specific accounts, is blocked. When set + to true:' + type: boolean + type: object + region: + description: The Region configuration block to specify the + bucket associated with the Multi-Region Access Point. See + Region Configuration below for more details. + items: + properties: + bucket: + description: The name of the associated bucket for the + Region. + type: string + bucketAccountId: + description: The AWS account ID that owns the Amazon + S3 bucket that's associated with this Multi-Region + Access Point. + type: string + region: + description: The Region configuration block to specify + the bucket associated with the Multi-Region Access + Point. See Region Configuration below for more details. + type: string + type: object + type: array + type: object + domainName: + description: The DNS domain name of the S3 Multi-Region Access + Point in the format alias.accesspoint.s3-global.amazonaws.com. + For more information, see the documentation on Multi-Region + Access Point Requests. + type: string + id: + description: The AWS account ID and access point name separated + by a colon (:). + type: string + status: + description: 'The current status of the Multi-Region Access Point. + One of: READY, INCONSISTENT_ACROSS_REGIONS, CREATING, PARTIALLY_CREATED, + PARTIALLY_DELETED, DELETING.' + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3control.aws.upbound.io_objectlambdaaccesspoints.yaml b/package/crds/s3control.aws.upbound.io_objectlambdaaccesspoints.yaml index 7956370734..59e82977d4 100644 --- a/package/crds/s3control.aws.upbound.io_objectlambdaaccesspoints.yaml +++ b/package/crds/s3control.aws.upbound.io_objectlambdaaccesspoints.yaml @@ -863,3 +863,828 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ObjectLambdaAccessPoint is the Schema for the ObjectLambdaAccessPoints + API. Provides a resource to manage an S3 Object Lambda Access Point. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ObjectLambdaAccessPointSpec defines the desired state of + ObjectLambdaAccessPoint + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountId: + description: The AWS account ID for the owner of the bucket for + which you want to create an Object Lambda Access Point. + type: string + configuration: + description: A configuration block containing details about the + Object Lambda Access Point. See Configuration below for more + details. + properties: + allowedFeatures: + description: 'Allowed features. Valid values: GetObject-Range, + GetObject-PartNumber.' + items: + type: string + type: array + x-kubernetes-list-type: set + cloudWatchMetricsEnabled: + description: Whether or not the CloudWatch metrics configuration + is enabled. + type: boolean + supportingAccessPoint: + description: Standard access point associated with the Object + Lambda Access Point. + type: string + supportingAccessPointRef: + description: Reference to a AccessPoint in s3control to populate + supportingAccessPoint. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + supportingAccessPointSelector: + description: Selector for a AccessPoint in s3control to populate + supportingAccessPoint. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + transformationConfiguration: + description: List of transformation configurations for the + Object Lambda Access Point. See Transformation Configuration + below for more details. + items: + properties: + actions: + description: 'The actions of an Object Lambda Access + Point configuration. Valid values: GetObject.' + items: + type: string + type: array + x-kubernetes-list-type: set + contentTransformation: + description: The content transformation of an Object + Lambda Access Point configuration. See Content Transformation + below for more details. + properties: + awsLambda: + description: Configuration for an AWS Lambda function. + See AWS Lambda below for more details. + properties: + functionArn: + description: The Amazon Resource Name (ARN) + of the AWS Lambda function. + type: string + functionArnRef: + description: Reference to a Function in lambda + to populate functionArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + functionArnSelector: + description: Selector for a Function in lambda + to populate functionArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + functionPayload: + description: Additional JSON that provides supplemental + data to the Lambda function used to transform + objects. + type: string + type: object + type: object + type: object + type: array + type: object + name: + description: The name for this Object Lambda Access Point. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accountId: + description: The AWS account ID for the owner of the bucket for + which you want to create an Object Lambda Access Point. + type: string + configuration: + description: A configuration block containing details about the + Object Lambda Access Point. See Configuration below for more + details. + properties: + allowedFeatures: + description: 'Allowed features. Valid values: GetObject-Range, + GetObject-PartNumber.' + items: + type: string + type: array + x-kubernetes-list-type: set + cloudWatchMetricsEnabled: + description: Whether or not the CloudWatch metrics configuration + is enabled. + type: boolean + supportingAccessPoint: + description: Standard access point associated with the Object + Lambda Access Point. + type: string + supportingAccessPointRef: + description: Reference to a AccessPoint in s3control to populate + supportingAccessPoint. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + supportingAccessPointSelector: + description: Selector for a AccessPoint in s3control to populate + supportingAccessPoint. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + transformationConfiguration: + description: List of transformation configurations for the + Object Lambda Access Point. See Transformation Configuration + below for more details. + items: + properties: + actions: + description: 'The actions of an Object Lambda Access + Point configuration. Valid values: GetObject.' + items: + type: string + type: array + x-kubernetes-list-type: set + contentTransformation: + description: The content transformation of an Object + Lambda Access Point configuration. See Content Transformation + below for more details. + properties: + awsLambda: + description: Configuration for an AWS Lambda function. + See AWS Lambda below for more details. + properties: + functionArn: + description: The Amazon Resource Name (ARN) + of the AWS Lambda function. + type: string + functionArnRef: + description: Reference to a Function in lambda + to populate functionArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + functionArnSelector: + description: Selector for a Function in lambda + to populate functionArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + functionPayload: + description: Additional JSON that provides supplemental + data to the Lambda function used to transform + objects. + type: string + type: object + type: object + type: object + type: array + type: object + name: + description: The name for this Object Lambda Access Point. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.configuration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.configuration) + || (has(self.initProvider) && has(self.initProvider.configuration))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ObjectLambdaAccessPointStatus defines the observed state + of ObjectLambdaAccessPoint. + properties: + atProvider: + properties: + accountId: + description: The AWS account ID for the owner of the bucket for + which you want to create an Object Lambda Access Point. + type: string + alias: + description: Alias for the S3 Object Lambda Access Point. + type: string + arn: + description: Amazon Resource Name (ARN) of the Object Lambda Access + Point. + type: string + configuration: + description: A configuration block containing details about the + Object Lambda Access Point. See Configuration below for more + details. + properties: + allowedFeatures: + description: 'Allowed features. Valid values: GetObject-Range, + GetObject-PartNumber.' + items: + type: string + type: array + x-kubernetes-list-type: set + cloudWatchMetricsEnabled: + description: Whether or not the CloudWatch metrics configuration + is enabled. + type: boolean + supportingAccessPoint: + description: Standard access point associated with the Object + Lambda Access Point. + type: string + transformationConfiguration: + description: List of transformation configurations for the + Object Lambda Access Point. See Transformation Configuration + below for more details. + items: + properties: + actions: + description: 'The actions of an Object Lambda Access + Point configuration. Valid values: GetObject.' + items: + type: string + type: array + x-kubernetes-list-type: set + contentTransformation: + description: The content transformation of an Object + Lambda Access Point configuration. See Content Transformation + below for more details. + properties: + awsLambda: + description: Configuration for an AWS Lambda function. + See AWS Lambda below for more details. + properties: + functionArn: + description: The Amazon Resource Name (ARN) + of the AWS Lambda function. + type: string + functionPayload: + description: Additional JSON that provides supplemental + data to the Lambda function used to transform + objects. + type: string + type: object + type: object + type: object + type: array + type: object + id: + description: The AWS account ID and access point name separated + by a colon (:). + type: string + name: + description: The name for this Object Lambda Access Point. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/s3control.aws.upbound.io_storagelensconfigurations.yaml b/package/crds/s3control.aws.upbound.io_storagelensconfigurations.yaml index 2d6ddbea96..ceca227e80 100644 --- a/package/crds/s3control.aws.upbound.io_storagelensconfigurations.yaml +++ b/package/crds/s3control.aws.upbound.io_storagelensconfigurations.yaml @@ -1387,3 +1387,1222 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: StorageLensConfiguration is the Schema for the StorageLensConfigurations + API. Provides a resource to manage an S3 Storage Lens configuration. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StorageLensConfigurationSpec defines the desired state of + StorageLensConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountId: + description: The AWS account ID for the S3 Storage Lens configuration. + type: string + configId: + description: The ID of the S3 Storage Lens configuration. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + storageLensConfiguration: + description: The S3 Storage Lens configuration. See Storage Lens + Configuration below for more details. + properties: + accountLevel: + description: level configurations of the S3 Storage Lens configuration. + See Account Level below for more details. + properties: + activityMetrics: + description: S3 Storage Lens activity metrics. See Activity + Metrics below for more details. + properties: + enabled: + description: Whether the activity metrics are enabled. + type: boolean + type: object + advancedCostOptimizationMetrics: + description: optimization metrics for S3 Storage Lens. + See Advanced Cost-Optimization Metrics below for more + details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + advancedDataProtectionMetrics: + description: protection metrics for S3 Storage Lens. See + Advanced Data-Protection Metrics below for more details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + bucketLevel: + description: level configuration. See Bucket Level below + for more details. + properties: + activityMetrics: + description: S3 Storage Lens activity metrics. See + Activity Metrics below for more details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + advancedCostOptimizationMetrics: + description: optimization metrics for S3 Storage Lens. + See Advanced Cost-Optimization Metrics below for + more details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + advancedDataProtectionMetrics: + description: protection metrics for S3 Storage Lens. + See Advanced Data-Protection Metrics below for more + details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + detailedStatusCodeMetrics: + description: Detailed status code metrics for S3 Storage + Lens. See Detailed Status Code Metrics below for + more details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + prefixLevel: + description: level metrics for S3 Storage Lens. See + Prefix Level below for more details. + properties: + storageMetrics: + description: level storage metrics for S3 Storage + Lens. See Prefix Level Storage Metrics below + for more details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + selectionCriteria: + description: Selection criteria. See Selection + Criteria below for more details. + properties: + delimiter: + description: The delimiter of the selection + criteria being used. + type: string + maxDepth: + description: The max depth of the selection + criteria. + type: number + minStorageBytesPercentage: + description: The minimum number of storage + bytes percentage whose metrics will + be selected. + type: number + type: object + type: object + type: object + type: object + detailedStatusCodeMetrics: + description: Detailed status code metrics for S3 Storage + Lens. See Detailed Status Code Metrics below for more + details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + type: object + awsOrg: + description: The Amazon Web Services organization for the + S3 Storage Lens configuration. See AWS Org below for more + details. + properties: + arn: + description: The Amazon Resource Name (ARN) of the bucket. + type: string + type: object + dataExport: + description: Properties of S3 Storage Lens metrics export + including the destination, schema and format. See Data Export + below for more details. + properties: + cloudWatchMetrics: + description: Amazon CloudWatch publishing for S3 Storage + Lens metrics. See Cloud Watch Metrics below for more + details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + s3BucketDestination: + description: The bucket where the S3 Storage Lens metrics + export will be located. See S3 Bucket Destination below + for more details. + properties: + accountId: + description: The account ID of the owner of the S3 + Storage Lens metrics export bucket. + type: string + arn: + description: The Amazon Resource Name (ARN) of the + bucket. + type: string + arnRef: + description: Reference to a Bucket in s3 to populate + arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a Bucket in s3 to populate + arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + encryption: + description: Encryption of the metrics exports in + this bucket. See Encryption below for more details. + properties: + sseKms: + description: KMS encryption. See SSE KMS below + for more details. + properties: + keyId: + description: KMS key ARN. + type: string + type: object + sseS3: + description: S3 encryption. An empty configuration + block {} should be used. + items: + type: object + type: array + type: object + format: + description: 'The export format. Valid values: CSV, + Parquet.' + type: string + outputSchemaVersion: + description: 'The schema version of the export file. + Valid values: V_1.' + type: string + prefix: + description: The prefix of the destination bucket + where the metrics export will be delivered. + type: string + type: object + type: object + enabled: + description: Whether the S3 Storage Lens configuration is + enabled. + type: boolean + exclude: + description: What is excluded in this configuration. Conflicts + with include. See Exclude below for more details. + properties: + buckets: + description: List of S3 bucket ARNs. + items: + type: string + type: array + x-kubernetes-list-type: set + regions: + description: List of AWS Regions. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + include: + description: What is included in this configuration. Conflicts + with exclude. See Include below for more details. + properties: + buckets: + description: List of S3 bucket ARNs. + items: + type: string + type: array + x-kubernetes-list-type: set + regions: + description: List of AWS Regions. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accountId: + description: The AWS account ID for the S3 Storage Lens configuration. + type: string + configId: + description: The ID of the S3 Storage Lens configuration. + type: string + storageLensConfiguration: + description: The S3 Storage Lens configuration. See Storage Lens + Configuration below for more details. + properties: + accountLevel: + description: level configurations of the S3 Storage Lens configuration. + See Account Level below for more details. + properties: + activityMetrics: + description: S3 Storage Lens activity metrics. See Activity + Metrics below for more details. + properties: + enabled: + description: Whether the activity metrics are enabled. + type: boolean + type: object + advancedCostOptimizationMetrics: + description: optimization metrics for S3 Storage Lens. + See Advanced Cost-Optimization Metrics below for more + details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + advancedDataProtectionMetrics: + description: protection metrics for S3 Storage Lens. See + Advanced Data-Protection Metrics below for more details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + bucketLevel: + description: level configuration. See Bucket Level below + for more details. + properties: + activityMetrics: + description: S3 Storage Lens activity metrics. See + Activity Metrics below for more details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + advancedCostOptimizationMetrics: + description: optimization metrics for S3 Storage Lens. + See Advanced Cost-Optimization Metrics below for + more details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + advancedDataProtectionMetrics: + description: protection metrics for S3 Storage Lens. + See Advanced Data-Protection Metrics below for more + details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + detailedStatusCodeMetrics: + description: Detailed status code metrics for S3 Storage + Lens. See Detailed Status Code Metrics below for + more details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + prefixLevel: + description: level metrics for S3 Storage Lens. See + Prefix Level below for more details. + properties: + storageMetrics: + description: level storage metrics for S3 Storage + Lens. See Prefix Level Storage Metrics below + for more details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + selectionCriteria: + description: Selection criteria. See Selection + Criteria below for more details. + properties: + delimiter: + description: The delimiter of the selection + criteria being used. + type: string + maxDepth: + description: The max depth of the selection + criteria. + type: number + minStorageBytesPercentage: + description: The minimum number of storage + bytes percentage whose metrics will + be selected. + type: number + type: object + type: object + type: object + type: object + detailedStatusCodeMetrics: + description: Detailed status code metrics for S3 Storage + Lens. See Detailed Status Code Metrics below for more + details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + type: object + awsOrg: + description: The Amazon Web Services organization for the + S3 Storage Lens configuration. See AWS Org below for more + details. + properties: + arn: + description: The Amazon Resource Name (ARN) of the bucket. + type: string + type: object + dataExport: + description: Properties of S3 Storage Lens metrics export + including the destination, schema and format. See Data Export + below for more details. + properties: + cloudWatchMetrics: + description: Amazon CloudWatch publishing for S3 Storage + Lens metrics. See Cloud Watch Metrics below for more + details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + s3BucketDestination: + description: The bucket where the S3 Storage Lens metrics + export will be located. See S3 Bucket Destination below + for more details. + properties: + accountId: + description: The account ID of the owner of the S3 + Storage Lens metrics export bucket. + type: string + arn: + description: The Amazon Resource Name (ARN) of the + bucket. + type: string + arnRef: + description: Reference to a Bucket in s3 to populate + arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a Bucket in s3 to populate + arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + encryption: + description: Encryption of the metrics exports in + this bucket. See Encryption below for more details. + properties: + sseKms: + description: KMS encryption. See SSE KMS below + for more details. + properties: + keyId: + description: KMS key ARN. + type: string + type: object + sseS3: + description: S3 encryption. An empty configuration + block {} should be used. + items: + type: object + type: array + type: object + format: + description: 'The export format. Valid values: CSV, + Parquet.' + type: string + outputSchemaVersion: + description: 'The schema version of the export file. + Valid values: V_1.' + type: string + prefix: + description: The prefix of the destination bucket + where the metrics export will be delivered. + type: string + type: object + type: object + enabled: + description: Whether the S3 Storage Lens configuration is + enabled. + type: boolean + exclude: + description: What is excluded in this configuration. Conflicts + with include. See Exclude below for more details. + properties: + buckets: + description: List of S3 bucket ARNs. + items: + type: string + type: array + x-kubernetes-list-type: set + regions: + description: List of AWS Regions. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + include: + description: What is included in this configuration. Conflicts + with exclude. See Include below for more details. + properties: + buckets: + description: List of S3 bucket ARNs. + items: + type: string + type: array + x-kubernetes-list-type: set + regions: + description: List of AWS Regions. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.configId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.configId) + || (has(self.initProvider) && has(self.initProvider.configId))' + - message: spec.forProvider.storageLensConfiguration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageLensConfiguration) + || (has(self.initProvider) && has(self.initProvider.storageLensConfiguration))' + status: + description: StorageLensConfigurationStatus defines the observed state + of StorageLensConfiguration. + properties: + atProvider: + properties: + accountId: + description: The AWS account ID for the S3 Storage Lens configuration. + type: string + arn: + description: Amazon Resource Name (ARN) of the S3 Storage Lens + configuration. + type: string + configId: + description: The ID of the S3 Storage Lens configuration. + type: string + id: + type: string + storageLensConfiguration: + description: The S3 Storage Lens configuration. See Storage Lens + Configuration below for more details. + properties: + accountLevel: + description: level configurations of the S3 Storage Lens configuration. + See Account Level below for more details. + properties: + activityMetrics: + description: S3 Storage Lens activity metrics. See Activity + Metrics below for more details. + properties: + enabled: + description: Whether the activity metrics are enabled. + type: boolean + type: object + advancedCostOptimizationMetrics: + description: optimization metrics for S3 Storage Lens. + See Advanced Cost-Optimization Metrics below for more + details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + advancedDataProtectionMetrics: + description: protection metrics for S3 Storage Lens. See + Advanced Data-Protection Metrics below for more details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + bucketLevel: + description: level configuration. See Bucket Level below + for more details. + properties: + activityMetrics: + description: S3 Storage Lens activity metrics. See + Activity Metrics below for more details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + advancedCostOptimizationMetrics: + description: optimization metrics for S3 Storage Lens. + See Advanced Cost-Optimization Metrics below for + more details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + advancedDataProtectionMetrics: + description: protection metrics for S3 Storage Lens. + See Advanced Data-Protection Metrics below for more + details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + detailedStatusCodeMetrics: + description: Detailed status code metrics for S3 Storage + Lens. See Detailed Status Code Metrics below for + more details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + prefixLevel: + description: level metrics for S3 Storage Lens. See + Prefix Level below for more details. + properties: + storageMetrics: + description: level storage metrics for S3 Storage + Lens. See Prefix Level Storage Metrics below + for more details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + selectionCriteria: + description: Selection criteria. See Selection + Criteria below for more details. + properties: + delimiter: + description: The delimiter of the selection + criteria being used. + type: string + maxDepth: + description: The max depth of the selection + criteria. + type: number + minStorageBytesPercentage: + description: The minimum number of storage + bytes percentage whose metrics will + be selected. + type: number + type: object + type: object + type: object + type: object + detailedStatusCodeMetrics: + description: Detailed status code metrics for S3 Storage + Lens. See Detailed Status Code Metrics below for more + details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + type: object + awsOrg: + description: The Amazon Web Services organization for the + S3 Storage Lens configuration. See AWS Org below for more + details. + properties: + arn: + description: The Amazon Resource Name (ARN) of the bucket. + type: string + type: object + dataExport: + description: Properties of S3 Storage Lens metrics export + including the destination, schema and format. See Data Export + below for more details. + properties: + cloudWatchMetrics: + description: Amazon CloudWatch publishing for S3 Storage + Lens metrics. See Cloud Watch Metrics below for more + details. + properties: + enabled: + description: Whether the S3 Storage Lens configuration + is enabled. + type: boolean + type: object + s3BucketDestination: + description: The bucket where the S3 Storage Lens metrics + export will be located. See S3 Bucket Destination below + for more details. + properties: + accountId: + description: The account ID of the owner of the S3 + Storage Lens metrics export bucket. + type: string + arn: + description: The Amazon Resource Name (ARN) of the + bucket. + type: string + encryption: + description: Encryption of the metrics exports in + this bucket. See Encryption below for more details. + properties: + sseKms: + description: KMS encryption. See SSE KMS below + for more details. + properties: + keyId: + description: KMS key ARN. + type: string + type: object + sseS3: + description: S3 encryption. An empty configuration + block {} should be used. + items: + type: object + type: array + type: object + format: + description: 'The export format. Valid values: CSV, + Parquet.' + type: string + outputSchemaVersion: + description: 'The schema version of the export file. + Valid values: V_1.' + type: string + prefix: + description: The prefix of the destination bucket + where the metrics export will be delivered. + type: string + type: object + type: object + enabled: + description: Whether the S3 Storage Lens configuration is + enabled. + type: boolean + exclude: + description: What is excluded in this configuration. Conflicts + with include. See Exclude below for more details. + properties: + buckets: + description: List of S3 bucket ARNs. + items: + type: string + type: array + x-kubernetes-list-type: set + regions: + description: List of AWS Regions. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + include: + description: What is included in this configuration. Conflicts + with exclude. See Include below for more details. + properties: + buckets: + description: List of S3 bucket ARNs. + items: + type: string + type: array + x-kubernetes-list-type: set + regions: + description: List of AWS Regions. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sagemaker.aws.upbound.io_appimageconfigs.yaml b/package/crds/sagemaker.aws.upbound.io_appimageconfigs.yaml index 32314e7bee..f56a90d6c9 100644 --- a/package/crds/sagemaker.aws.upbound.io_appimageconfigs.yaml +++ b/package/crds/sagemaker.aws.upbound.io_appimageconfigs.yaml @@ -838,3 +838,751 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: AppImageConfig is the Schema for the AppImageConfigs API. Provides + a SageMaker App Image Config resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AppImageConfigSpec defines the desired state of AppImageConfig + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + codeEditorAppImageConfig: + description: The CodeEditorAppImageConfig. You can only specify + one image kernel in the AppImageConfig API. This kernel is shown + to users before the image starts. After the image runs, all + kernels are visible in Code Editor. See Code Editor App Image + Config details below. + properties: + containerConfig: + description: The configuration used to run the application + image container. See Container Config details below. + properties: + containerArguments: + description: The arguments for the container when you're + running the application. + items: + type: string + type: array + containerEntrypoint: + description: The entrypoint used to run the application + in the container. + items: + type: string + type: array + containerEnvironmentVariables: + additionalProperties: + type: string + description: The environment variables to set in the container. + type: object + x-kubernetes-map-type: granular + type: object + fileSystemConfig: + description: The URL where the Git repository is located. + See File System Config details below. + properties: + defaultGid: + description: The default POSIX group ID (GID). If not + specified, defaults to 100. Valid values are 0 and 100. + type: number + defaultUid: + description: The default POSIX user ID (UID). If not specified, + defaults to 1000. Valid values are 0 and 1000. + type: number + mountPath: + description: The path within the image to mount the user's + EFS home directory. The directory should be empty. If + not specified, defaults to /home/sagemaker-user. + type: string + type: object + type: object + jupyterLabImageConfig: + description: The JupyterLabAppImageConfig. You can only specify + one image kernel in the AppImageConfig API. This kernel is shown + to users before the image starts. After the image runs, all + kernels are visible in JupyterLab. See Jupyter Lab Image Config + details below. + properties: + containerConfig: + description: The configuration used to run the application + image container. See Container Config details below. + properties: + containerArguments: + description: The arguments for the container when you're + running the application. + items: + type: string + type: array + containerEntrypoint: + description: The entrypoint used to run the application + in the container. + items: + type: string + type: array + containerEnvironmentVariables: + additionalProperties: + type: string + description: The environment variables to set in the container. + type: object + x-kubernetes-map-type: granular + type: object + fileSystemConfig: + description: The URL where the Git repository is located. + See File System Config details below. + properties: + defaultGid: + description: The default POSIX group ID (GID). If not + specified, defaults to 100. Valid values are 0 and 100. + type: number + defaultUid: + description: The default POSIX user ID (UID). If not specified, + defaults to 1000. Valid values are 0 and 1000. + type: number + mountPath: + description: The path within the image to mount the user's + EFS home directory. The directory should be empty. If + not specified, defaults to /home/sagemaker-user. + type: string + type: object + type: object + kernelGatewayImageConfig: + description: The configuration for the file system and kernels + in a SageMaker image running as a KernelGateway app. See Kernel + Gateway Image Config details below. + properties: + fileSystemConfig: + description: The URL where the Git repository is located. + See File System Config details below. + properties: + defaultGid: + description: The default POSIX group ID (GID). If not + specified, defaults to 100. Valid values are 0 and 100. + type: number + defaultUid: + description: The default POSIX user ID (UID). If not specified, + defaults to 1000. Valid values are 0 and 1000. + type: number + mountPath: + description: The path within the image to mount the user's + EFS home directory. The directory should be empty. If + not specified, defaults to /home/sagemaker-user. + type: string + type: object + kernelSpec: + description: The default branch for the Git repository. See + Kernel Spec details below. + items: + properties: + displayName: + description: The display name of the kernel. + type: string + name: + description: The name of the kernel. + type: string + type: object + type: array + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + codeEditorAppImageConfig: + description: The CodeEditorAppImageConfig. You can only specify + one image kernel in the AppImageConfig API. This kernel is shown + to users before the image starts. After the image runs, all + kernels are visible in Code Editor. See Code Editor App Image + Config details below. + properties: + containerConfig: + description: The configuration used to run the application + image container. See Container Config details below. + properties: + containerArguments: + description: The arguments for the container when you're + running the application. + items: + type: string + type: array + containerEntrypoint: + description: The entrypoint used to run the application + in the container. + items: + type: string + type: array + containerEnvironmentVariables: + additionalProperties: + type: string + description: The environment variables to set in the container. + type: object + x-kubernetes-map-type: granular + type: object + fileSystemConfig: + description: The URL where the Git repository is located. + See File System Config details below. + properties: + defaultGid: + description: The default POSIX group ID (GID). If not + specified, defaults to 100. Valid values are 0 and 100. + type: number + defaultUid: + description: The default POSIX user ID (UID). If not specified, + defaults to 1000. Valid values are 0 and 1000. + type: number + mountPath: + description: The path within the image to mount the user's + EFS home directory. The directory should be empty. If + not specified, defaults to /home/sagemaker-user. + type: string + type: object + type: object + jupyterLabImageConfig: + description: The JupyterLabAppImageConfig. You can only specify + one image kernel in the AppImageConfig API. This kernel is shown + to users before the image starts. After the image runs, all + kernels are visible in JupyterLab. See Jupyter Lab Image Config + details below. + properties: + containerConfig: + description: The configuration used to run the application + image container. See Container Config details below. + properties: + containerArguments: + description: The arguments for the container when you're + running the application. + items: + type: string + type: array + containerEntrypoint: + description: The entrypoint used to run the application + in the container. + items: + type: string + type: array + containerEnvironmentVariables: + additionalProperties: + type: string + description: The environment variables to set in the container. + type: object + x-kubernetes-map-type: granular + type: object + fileSystemConfig: + description: The URL where the Git repository is located. + See File System Config details below. + properties: + defaultGid: + description: The default POSIX group ID (GID). If not + specified, defaults to 100. Valid values are 0 and 100. + type: number + defaultUid: + description: The default POSIX user ID (UID). If not specified, + defaults to 1000. Valid values are 0 and 1000. + type: number + mountPath: + description: The path within the image to mount the user's + EFS home directory. The directory should be empty. If + not specified, defaults to /home/sagemaker-user. + type: string + type: object + type: object + kernelGatewayImageConfig: + description: The configuration for the file system and kernels + in a SageMaker image running as a KernelGateway app. See Kernel + Gateway Image Config details below. + properties: + fileSystemConfig: + description: The URL where the Git repository is located. + See File System Config details below. + properties: + defaultGid: + description: The default POSIX group ID (GID). If not + specified, defaults to 100. Valid values are 0 and 100. + type: number + defaultUid: + description: The default POSIX user ID (UID). If not specified, + defaults to 1000. Valid values are 0 and 1000. + type: number + mountPath: + description: The path within the image to mount the user's + EFS home directory. The directory should be empty. If + not specified, defaults to /home/sagemaker-user. + type: string + type: object + kernelSpec: + description: The default branch for the Git repository. See + Kernel Spec details below. + items: + properties: + displayName: + description: The display name of the kernel. + type: string + name: + description: The name of the kernel. + type: string + type: object + type: array + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: AppImageConfigStatus defines the observed state of AppImageConfig. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) assigned by AWS to + this App Image Config. + type: string + codeEditorAppImageConfig: + description: The CodeEditorAppImageConfig. You can only specify + one image kernel in the AppImageConfig API. This kernel is shown + to users before the image starts. After the image runs, all + kernels are visible in Code Editor. See Code Editor App Image + Config details below. + properties: + containerConfig: + description: The configuration used to run the application + image container. See Container Config details below. + properties: + containerArguments: + description: The arguments for the container when you're + running the application. + items: + type: string + type: array + containerEntrypoint: + description: The entrypoint used to run the application + in the container. + items: + type: string + type: array + containerEnvironmentVariables: + additionalProperties: + type: string + description: The environment variables to set in the container. + type: object + x-kubernetes-map-type: granular + type: object + fileSystemConfig: + description: The URL where the Git repository is located. + See File System Config details below. + properties: + defaultGid: + description: The default POSIX group ID (GID). If not + specified, defaults to 100. Valid values are 0 and 100. + type: number + defaultUid: + description: The default POSIX user ID (UID). If not specified, + defaults to 1000. Valid values are 0 and 1000. + type: number + mountPath: + description: The path within the image to mount the user's + EFS home directory. The directory should be empty. If + not specified, defaults to /home/sagemaker-user. + type: string + type: object + type: object + id: + description: The name of the App Image Config. + type: string + jupyterLabImageConfig: + description: The JupyterLabAppImageConfig. You can only specify + one image kernel in the AppImageConfig API. This kernel is shown + to users before the image starts. After the image runs, all + kernels are visible in JupyterLab. See Jupyter Lab Image Config + details below. + properties: + containerConfig: + description: The configuration used to run the application + image container. See Container Config details below. + properties: + containerArguments: + description: The arguments for the container when you're + running the application. + items: + type: string + type: array + containerEntrypoint: + description: The entrypoint used to run the application + in the container. + items: + type: string + type: array + containerEnvironmentVariables: + additionalProperties: + type: string + description: The environment variables to set in the container. + type: object + x-kubernetes-map-type: granular + type: object + fileSystemConfig: + description: The URL where the Git repository is located. + See File System Config details below. + properties: + defaultGid: + description: The default POSIX group ID (GID). If not + specified, defaults to 100. Valid values are 0 and 100. + type: number + defaultUid: + description: The default POSIX user ID (UID). If not specified, + defaults to 1000. Valid values are 0 and 1000. + type: number + mountPath: + description: The path within the image to mount the user's + EFS home directory. The directory should be empty. If + not specified, defaults to /home/sagemaker-user. + type: string + type: object + type: object + kernelGatewayImageConfig: + description: The configuration for the file system and kernels + in a SageMaker image running as a KernelGateway app. See Kernel + Gateway Image Config details below. + properties: + fileSystemConfig: + description: The URL where the Git repository is located. + See File System Config details below. + properties: + defaultGid: + description: The default POSIX group ID (GID). If not + specified, defaults to 100. Valid values are 0 and 100. + type: number + defaultUid: + description: The default POSIX user ID (UID). If not specified, + defaults to 1000. Valid values are 0 and 1000. + type: number + mountPath: + description: The path within the image to mount the user's + EFS home directory. The directory should be empty. If + not specified, defaults to /home/sagemaker-user. + type: string + type: object + kernelSpec: + description: The default branch for the Git repository. See + Kernel Spec details below. + items: + properties: + displayName: + description: The display name of the kernel. + type: string + name: + description: The name of the kernel. + type: string + type: object + type: array + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sagemaker.aws.upbound.io_apps.yaml b/package/crds/sagemaker.aws.upbound.io_apps.yaml index a3f282c032..da889772f6 100644 --- a/package/crds/sagemaker.aws.upbound.io_apps.yaml +++ b/package/crds/sagemaker.aws.upbound.io_apps.yaml @@ -804,3 +804,780 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: App is the Schema for the Apps API. Provides a SageMaker App + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AppSpec defines the desired state of App + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appName: + description: The name of the app. + type: string + appType: + description: The type of app. Valid values are JupyterServer, + KernelGateway, RStudioServerPro, RSessionGateway and TensorBoard. + type: string + domainId: + description: The domain ID. + type: string + domainIdRef: + description: Reference to a Domain in sagemaker to populate domainId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + domainIdSelector: + description: Selector for a Domain in sagemaker to populate domainId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + resourceSpec: + description: The instance type and the Amazon Resource Name (ARN) + of the SageMaker image created on the instance.See Resource + Spec below. + properties: + instanceType: + description: The instance type that the image version runs + on. For valid values see SageMaker Instance Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the image + version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created on the instance. + type: string + type: object + spaceName: + description: The name of the space. At least one of user_profile_name + or space_name required. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userProfileName: + description: The user profile name. At least one of user_profile_name + or space_name required. + type: string + userProfileNameRef: + description: Reference to a UserProfile in sagemaker to populate + userProfileName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userProfileNameSelector: + description: Selector for a UserProfile in sagemaker to populate + userProfileName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appName: + description: The name of the app. + type: string + appType: + description: The type of app. Valid values are JupyterServer, + KernelGateway, RStudioServerPro, RSessionGateway and TensorBoard. + type: string + domainId: + description: The domain ID. + type: string + domainIdRef: + description: Reference to a Domain in sagemaker to populate domainId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + domainIdSelector: + description: Selector for a Domain in sagemaker to populate domainId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceSpec: + description: The instance type and the Amazon Resource Name (ARN) + of the SageMaker image created on the instance.See Resource + Spec below. + properties: + instanceType: + description: The instance type that the image version runs + on. For valid values see SageMaker Instance Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the image + version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created on the instance. + type: string + type: object + spaceName: + description: The name of the space. At least one of user_profile_name + or space_name required. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userProfileName: + description: The user profile name. At least one of user_profile_name + or space_name required. + type: string + userProfileNameRef: + description: Reference to a UserProfile in sagemaker to populate + userProfileName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userProfileNameSelector: + description: Selector for a UserProfile in sagemaker to populate + userProfileName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.appName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.appName) + || (has(self.initProvider) && has(self.initProvider.appName))' + - message: spec.forProvider.appType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.appType) + || (has(self.initProvider) && has(self.initProvider.appType))' + status: + description: AppStatus defines the observed state of App. + properties: + atProvider: + properties: + appName: + description: The name of the app. + type: string + appType: + description: The type of app. Valid values are JupyterServer, + KernelGateway, RStudioServerPro, RSessionGateway and TensorBoard. + type: string + arn: + description: The Amazon Resource Name (ARN) of the app. + type: string + domainId: + description: The domain ID. + type: string + id: + description: The Amazon Resource Name (ARN) of the app. + type: string + resourceSpec: + description: The instance type and the Amazon Resource Name (ARN) + of the SageMaker image created on the instance.See Resource + Spec below. + properties: + instanceType: + description: The instance type that the image version runs + on. For valid values see SageMaker Instance Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the image + version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created on the instance. + type: string + type: object + spaceName: + description: The name of the space. At least one of user_profile_name + or space_name required. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + userProfileName: + description: The user profile name. At least one of user_profile_name + or space_name required. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sagemaker.aws.upbound.io_coderepositories.yaml b/package/crds/sagemaker.aws.upbound.io_coderepositories.yaml index 08ec3b2492..d0572fc7eb 100644 --- a/package/crds/sagemaker.aws.upbound.io_coderepositories.yaml +++ b/package/crds/sagemaker.aws.upbound.io_coderepositories.yaml @@ -578,3 +578,557 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CodeRepository is the Schema for the CodeRepositorys API. Provides + a SageMaker Code Repository resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CodeRepositorySpec defines the desired state of CodeRepository + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + gitConfig: + description: Specifies details about the repository. see Git Config + details below. + properties: + branch: + description: The default branch for the Git repository. + type: string + repositoryUrl: + description: The URL where the Git repository is located. + type: string + secretArn: + description: 'The Amazon Resource Name (ARN) of the AWS Secrets + Manager secret that contains the credentials used to access + the git repository. The secret must have a staging label + of AWSCURRENT and must be in the following format: {"username": + UserName, "password": Password}' + type: string + secretArnRef: + description: Reference to a Secret in secretsmanager to populate + secretArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + secretArnSelector: + description: Selector for a Secret in secretsmanager to populate + secretArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + gitConfig: + description: Specifies details about the repository. see Git Config + details below. + properties: + branch: + description: The default branch for the Git repository. + type: string + repositoryUrl: + description: The URL where the Git repository is located. + type: string + secretArn: + description: 'The Amazon Resource Name (ARN) of the AWS Secrets + Manager secret that contains the credentials used to access + the git repository. The secret must have a staging label + of AWSCURRENT and must be in the following format: {"username": + UserName, "password": Password}' + type: string + secretArnRef: + description: Reference to a Secret in secretsmanager to populate + secretArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + secretArnSelector: + description: Selector for a Secret in secretsmanager to populate + secretArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.gitConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.gitConfig) + || (has(self.initProvider) && has(self.initProvider.gitConfig))' + status: + description: CodeRepositoryStatus defines the observed state of CodeRepository. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) assigned by AWS to + this Code Repository. + type: string + gitConfig: + description: Specifies details about the repository. see Git Config + details below. + properties: + branch: + description: The default branch for the Git repository. + type: string + repositoryUrl: + description: The URL where the Git repository is located. + type: string + secretArn: + description: 'The Amazon Resource Name (ARN) of the AWS Secrets + Manager secret that contains the credentials used to access + the git repository. The secret must have a staging label + of AWSCURRENT and must be in the following format: {"username": + UserName, "password": Password}' + type: string + type: object + id: + description: The name of the Code Repository. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sagemaker.aws.upbound.io_devicefleet.yaml b/package/crds/sagemaker.aws.upbound.io_devicefleet.yaml index 6686c810f8..09356a42b7 100644 --- a/package/crds/sagemaker.aws.upbound.io_devicefleet.yaml +++ b/package/crds/sagemaker.aws.upbound.io_devicefleet.yaml @@ -603,3 +603,582 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DeviceFleet is the Schema for the DeviceFleets API. Provides + a SageMaker Device Fleet resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DeviceFleetSpec defines the desired state of DeviceFleet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A description of the fleet. + type: string + enableIotRoleAlias: + description: 'Whether to create an AWS IoT Role Alias during device + fleet creation. The name of the role alias generated will match + this pattern: "SageMakerEdge-{DeviceFleetName}".' + type: boolean + outputConfig: + description: Specifies details about the repository. see Output + Config details below. + properties: + kmsKeyId: + description: The AWS Key Management Service (AWS KMS) key + that Amazon SageMaker uses to encrypt data on the storage + volume after compilation job. If you don't provide a KMS + key ID, Amazon SageMaker uses the default KMS key for Amazon + S3 for your role's account. + type: string + s3OutputLocation: + description: The Amazon Simple Storage (S3) bucker URI. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleArn: + description: The Amazon Resource Name (ARN) that has access to + AWS Internet of Things (IoT). + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A description of the fleet. + type: string + enableIotRoleAlias: + description: 'Whether to create an AWS IoT Role Alias during device + fleet creation. The name of the role alias generated will match + this pattern: "SageMakerEdge-{DeviceFleetName}".' + type: boolean + outputConfig: + description: Specifies details about the repository. see Output + Config details below. + properties: + kmsKeyId: + description: The AWS Key Management Service (AWS KMS) key + that Amazon SageMaker uses to encrypt data on the storage + volume after compilation job. If you don't provide a KMS + key ID, Amazon SageMaker uses the default KMS key for Amazon + S3 for your role's account. + type: string + s3OutputLocation: + description: The Amazon Simple Storage (S3) bucker URI. + type: string + type: object + roleArn: + description: The Amazon Resource Name (ARN) that has access to + AWS Internet of Things (IoT). + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.outputConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.outputConfig) + || (has(self.initProvider) && has(self.initProvider.outputConfig))' + status: + description: DeviceFleetStatus defines the observed state of DeviceFleet. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) assigned by AWS to + this Device Fleet. + type: string + description: + description: A description of the fleet. + type: string + enableIotRoleAlias: + description: 'Whether to create an AWS IoT Role Alias during device + fleet creation. The name of the role alias generated will match + this pattern: "SageMakerEdge-{DeviceFleetName}".' + type: boolean + id: + description: The name of the Device Fleet. + type: string + iotRoleAlias: + type: string + outputConfig: + description: Specifies details about the repository. see Output + Config details below. + properties: + kmsKeyId: + description: The AWS Key Management Service (AWS KMS) key + that Amazon SageMaker uses to encrypt data on the storage + volume after compilation job. If you don't provide a KMS + key ID, Amazon SageMaker uses the default KMS key for Amazon + S3 for your role's account. + type: string + s3OutputLocation: + description: The Amazon Simple Storage (S3) bucker URI. + type: string + type: object + roleArn: + description: The Amazon Resource Name (ARN) that has access to + AWS Internet of Things (IoT). + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sagemaker.aws.upbound.io_devices.yaml b/package/crds/sagemaker.aws.upbound.io_devices.yaml index dff02a2a89..7c79bd5590 100644 --- a/package/crds/sagemaker.aws.upbound.io_devices.yaml +++ b/package/crds/sagemaker.aws.upbound.io_devices.yaml @@ -554,3 +554,533 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Device is the Schema for the Devices API. Provides a SageMaker + Device resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DeviceSpec defines the desired state of Device + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + device: + description: The device to register with SageMaker Edge Manager. + See Device details below. + properties: + description: + description: A description for the device. + type: string + deviceName: + description: The name of the device. + type: string + iotThingName: + description: Amazon Web Services Internet of Things (IoT) + object name. + type: string + type: object + deviceFleetName: + description: The name of the Device Fleet. + type: string + deviceFleetNameRef: + description: Reference to a DeviceFleet in sagemaker to populate + deviceFleetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + deviceFleetNameSelector: + description: Selector for a DeviceFleet in sagemaker to populate + deviceFleetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + device: + description: The device to register with SageMaker Edge Manager. + See Device details below. + properties: + description: + description: A description for the device. + type: string + deviceName: + description: The name of the device. + type: string + iotThingName: + description: Amazon Web Services Internet of Things (IoT) + object name. + type: string + type: object + deviceFleetName: + description: The name of the Device Fleet. + type: string + deviceFleetNameRef: + description: Reference to a DeviceFleet in sagemaker to populate + deviceFleetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + deviceFleetNameSelector: + description: Selector for a DeviceFleet in sagemaker to populate + deviceFleetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.device is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.device) + || (has(self.initProvider) && has(self.initProvider.device))' + status: + description: DeviceStatus defines the observed state of Device. + properties: + atProvider: + properties: + agentVersion: + type: string + arn: + description: The Amazon Resource Name (ARN) assigned by AWS to + this Device. + type: string + device: + description: The device to register with SageMaker Edge Manager. + See Device details below. + properties: + description: + description: A description for the device. + type: string + deviceName: + description: The name of the device. + type: string + iotThingName: + description: Amazon Web Services Internet of Things (IoT) + object name. + type: string + type: object + deviceFleetName: + description: The name of the Device Fleet. + type: string + id: + description: The id is constructed from device-fleet-name/device-name. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sagemaker.aws.upbound.io_domains.yaml b/package/crds/sagemaker.aws.upbound.io_domains.yaml index 3c9328ce4a..8d9af26ea1 100644 --- a/package/crds/sagemaker.aws.upbound.io_domains.yaml +++ b/package/crds/sagemaker.aws.upbound.io_domains.yaml @@ -3826,3 +3826,3499 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Domain is the Schema for the Domains API. Provides a SageMaker + Domain resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DomainSpec defines the desired state of Domain + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appNetworkAccessType: + description: Specifies the VPC used for non-EFS traffic. The default + value is PublicInternetOnly. Valid values are PublicInternetOnly + and VpcOnly. + type: string + appSecurityGroupManagement: + description: The entity that creates and manages the required + security groups for inter-app communication in VPCOnly mode. + Valid values are Service and Customer. + type: string + authMode: + description: The mode of authentication that members use to access + the domain. Valid values are IAM and SSO. + type: string + defaultSpaceSettings: + description: The default space settings. See default_space_settings + Block below. + properties: + executionRole: + description: The execution role for the space. + type: string + jupyterServerAppSettings: + description: The Jupyter server's app settings. See jupyter_server_app_settings + Block below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see code_repository Block below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + kernelGatewayAppSettings: + description: The kernel gateway app settings. See kernel_gateway_app_settings + Block below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see custom_image + Block below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + securityGroups: + description: The security groups for the Amazon Virtual Private + Cloud that the space uses for communication. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + defaultUserSettings: + description: The default user settings. See default_user_settings + Block below. + properties: + canvasAppSettings: + description: The Canvas app settings. See canvas_app_settings + Block below. + properties: + directDeploySettings: + description: The model deployment settings for the SageMaker + Canvas application. See direct_deploy_settings Block + below. + properties: + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + identityProviderOauthSettings: + description: The settings for connecting to an external + data source with OAuth. See identity_provider_oauth_settings + Block below. + items: + properties: + dataSourceName: + description: The name of the data source that you're + connecting to. Canvas currently supports OAuth + for Snowflake and Salesforce Data Cloud. Valid + values are SalesforceGenie and Snowflake. + type: string + secretArn: + description: The ARN of an Amazon Web Services Secrets + Manager secret that stores the credentials from + your identity provider, such as the client ID + and secret, authorization URL, and token URL. + type: string + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + type: array + kendraSettings: + description: The settings for document querying. See kendra_settings + Block below. + properties: + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + modelRegisterSettings: + description: The model registry settings for the SageMaker + Canvas application. See model_register_settings Block + below. + properties: + crossAccountModelRegisterRoleArn: + description: The Amazon Resource Name (ARN) of the + SageMaker model registry account. Required only + to register model versions created by a different + SageMaker Canvas AWS account than the AWS account + in which SageMaker model registry is set up. + type: string + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + timeSeriesForecastingSettings: + description: Time series forecast settings for the Canvas + app. See time_series_forecasting_settings Block below. + properties: + amazonForecastRoleArn: + description: The IAM role that Canvas passes to Amazon + Forecast for time series forecasting. By default, + Canvas uses the execution role specified in the + UserProfile that launches the Canvas app. If an + execution role is not specified in the UserProfile, + Canvas uses the execution role specified in the + Domain that owns the UserProfile. To allow time + series forecasting, this IAM role should have the + AmazonSageMakerCanvasForecastAccess policy attached + and forecast.amazonaws.com added in the trust relationship + as a service principal. + type: string + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + workspaceSettings: + description: The workspace settings for the SageMaker + Canvas application. See workspace_settings Block below. + properties: + s3ArtifactPath: + description: The Amazon S3 bucket used to store artifacts + generated by Canvas. Updating the Amazon S3 location + impacts existing configuration settings, and Canvas + users no longer have access to their artifacts. + Canvas users must log out and log back in to apply + the new location. + type: string + s3KmsKeyId: + description: The Amazon Web Services Key Management + Service (KMS) encryption key ID that is used to + encrypt artifacts generated by Canvas in the Amazon + S3 bucket. + type: string + type: object + type: object + codeEditorAppSettings: + description: The Code Editor application settings. See code_editor_app_settings + Block below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + customFileSystemConfig: + description: The settings for assigning a custom file system + to a user profile. Permitted users can access this file + system in Amazon SageMaker Studio. See custom_file_system_config + Block below. + items: + properties: + efsFileSystemConfig: + description: The default EBS storage settings for a + private space. See efs_file_system_config Block below. + properties: + fileSystemId: + description: The ID of your Amazon EFS file system. + type: string + fileSystemPath: + description: The path to the file system directory + that is accessible in Amazon SageMaker Studio. + Permitted users can access only this directory + and below. + type: string + type: object + type: object + type: array + customPosixUserConfig: + description: Details about the POSIX identity that is used + for file system operations. See custom_posix_user_config + Block below. + properties: + gid: + description: The POSIX group ID. + type: number + uid: + description: The POSIX user ID. + type: number + type: object + defaultLandingUri: + description: 'The default experience that the user is directed + to when accessing the domain. The supported values are: + studio::: Indicates that Studio is the default experience. + This value can only be passed if StudioWebPortal is set + to ENABLED. app:JupyterServer:: Indicates that Studio Classic + is the default experience.' + type: string + executionRole: + description: The execution role ARN for the user. + type: string + executionRoleRef: + description: Reference to a Role in iam to populate executionRole. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + executionRoleSelector: + description: Selector for a Role in iam to populate executionRole. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + jupyterLabAppSettings: + description: The settings for the JupyterLab application. + See jupyter_lab_app_settings Block below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see code_repository Block below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see custom_image + Block below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + jupyterServerAppSettings: + description: The Jupyter server's app settings. See jupyter_server_app_settings + Block below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see code_repository Block below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + kernelGatewayAppSettings: + description: The kernel gateway app settings. See kernel_gateway_app_settings + Block below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see custom_image + Block below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + appImageConfigNameRef: + description: Reference to a AppImageConfig in sagemaker + to populate appImageConfigName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + appImageConfigNameSelector: + description: Selector for a AppImageConfig in sagemaker + to populate appImageConfigName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + imageName: + description: The name of the Custom Image. + type: string + imageNameRef: + description: Reference to a ImageVersion in sagemaker + to populate imageName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + imageNameSelector: + description: Selector for a ImageVersion in sagemaker + to populate imageName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + rSessionAppSettings: + description: The RSession app settings. See r_session_app_settings + Block below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see custom_image + Block below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + rStudioServerProAppSettings: + description: A collection of settings that configure user + interaction with the RStudioServerPro app. See r_studio_server_pro_app_settings + Block below. + properties: + accessStatus: + description: Indicates whether the current user has access + to the RStudioServerPro app. Valid values are ENABLED + and DISABLED. + type: string + userGroup: + description: The level of permissions that the user has + within the RStudioServerPro app. This value defaults + to R_STUDIO_USER. The R_STUDIO_ADMIN value allows the + user access to the RStudio Administrative Dashboard. + Valid values are R_STUDIO_USER and R_STUDIO_ADMIN. + type: string + type: object + securityGroups: + description: A list of security group IDs that will be attached + to the user. + items: + type: string + type: array + x-kubernetes-list-type: set + sharingSettings: + description: The sharing settings. See sharing_settings Block + below. + properties: + notebookOutputOption: + description: Whether to include the notebook cell output + when sharing the notebook. The default is Disabled. + Valid values are Allowed and Disabled. + type: string + s3KmsKeyId: + description: The Amazon Web Services Key Management Service + (KMS) encryption key ID that is used to encrypt artifacts + generated by Canvas in the Amazon S3 bucket. + type: string + s3OutputPath: + description: When notebook_output_option is Allowed, the + Amazon S3 bucket used to save the notebook cell output. + type: string + type: object + spaceStorageSettings: + description: The storage settings for a private space. See + space_storage_settings Block below. + properties: + defaultEbsStorageSettings: + description: The default EBS storage settings for a private + space. See default_ebs_storage_settings Block below. + properties: + defaultEbsVolumeSizeInGb: + description: The default size of the EBS storage volume + for a private space. + type: number + maximumEbsVolumeSizeInGb: + description: The maximum size of the EBS storage volume + for a private space. + type: number + type: object + type: object + studioWebPortal: + description: Whether the user can access Studio. If this value + is set to DISABLED, the user cannot access Studio, even + if that is the default experience for the domain. Valid + values are ENABLED and DISABLED. + type: string + tensorBoardAppSettings: + description: The TensorBoard app settings. See tensor_board_app_settings + Block below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + type: object + domainName: + description: The domain name. + type: string + domainSettings: + description: The domain settings. See domain_settings Block below. + properties: + executionRoleIdentityConfig: + description: The configuration for attaching a SageMaker user + profile name to the execution role as a sts:SourceIdentity + key AWS Docs. Valid values are USER_PROFILE_NAME and DISABLED. + type: string + rStudioServerProDomainSettings: + description: A collection of settings that configure the RStudioServerPro + Domain-level app. see r_studio_server_pro_domain_settings + Block below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + domainExecutionRoleArn: + description: The ARN of the execution role for the RStudioServerPro + Domain-level app. + type: string + rStudioConnectUrl: + description: A URL pointing to an RStudio Connect server. + type: string + rStudioPackageManagerUrl: + description: A URL pointing to an RStudio Package Manager + server. + type: string + type: object + securityGroupIds: + description: The security groups for the Amazon Virtual Private + Cloud that the Domain uses for communication between Domain-level + apps and user apps. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + kmsKeyId: + description: The AWS KMS customer managed CMK used to encrypt + the EFS volume attached to the domain. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + retentionPolicy: + description: The retention policy for this domain, which specifies + whether resources will be retained after the Domain is deleted. + By default, all resources are retained. See retention_policy + Block below. + properties: + homeEfsFileSystem: + description: The retention policy for data stored on an Amazon + Elastic File System (EFS) volume. Valid values are Retain + or Delete. Default value is Retain. + type: string + type: object + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: The VPC subnets that Studio uses for communication. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcId: + description: The ID of the Amazon Virtual Private Cloud (VPC) + that Studio uses for communication. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appNetworkAccessType: + description: Specifies the VPC used for non-EFS traffic. The default + value is PublicInternetOnly. Valid values are PublicInternetOnly + and VpcOnly. + type: string + appSecurityGroupManagement: + description: The entity that creates and manages the required + security groups for inter-app communication in VPCOnly mode. + Valid values are Service and Customer. + type: string + authMode: + description: The mode of authentication that members use to access + the domain. Valid values are IAM and SSO. + type: string + defaultSpaceSettings: + description: The default space settings. See default_space_settings + Block below. + properties: + executionRole: + description: The execution role for the space. + type: string + jupyterServerAppSettings: + description: The Jupyter server's app settings. See jupyter_server_app_settings + Block below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see code_repository Block below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + kernelGatewayAppSettings: + description: The kernel gateway app settings. See kernel_gateway_app_settings + Block below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see custom_image + Block below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + securityGroups: + description: The security groups for the Amazon Virtual Private + Cloud that the space uses for communication. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + defaultUserSettings: + description: The default user settings. See default_user_settings + Block below. + properties: + canvasAppSettings: + description: The Canvas app settings. See canvas_app_settings + Block below. + properties: + directDeploySettings: + description: The model deployment settings for the SageMaker + Canvas application. See direct_deploy_settings Block + below. + properties: + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + identityProviderOauthSettings: + description: The settings for connecting to an external + data source with OAuth. See identity_provider_oauth_settings + Block below. + items: + properties: + dataSourceName: + description: The name of the data source that you're + connecting to. Canvas currently supports OAuth + for Snowflake and Salesforce Data Cloud. Valid + values are SalesforceGenie and Snowflake. + type: string + secretArn: + description: The ARN of an Amazon Web Services Secrets + Manager secret that stores the credentials from + your identity provider, such as the client ID + and secret, authorization URL, and token URL. + type: string + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + type: array + kendraSettings: + description: The settings for document querying. See kendra_settings + Block below. + properties: + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + modelRegisterSettings: + description: The model registry settings for the SageMaker + Canvas application. See model_register_settings Block + below. + properties: + crossAccountModelRegisterRoleArn: + description: The Amazon Resource Name (ARN) of the + SageMaker model registry account. Required only + to register model versions created by a different + SageMaker Canvas AWS account than the AWS account + in which SageMaker model registry is set up. + type: string + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + timeSeriesForecastingSettings: + description: Time series forecast settings for the Canvas + app. See time_series_forecasting_settings Block below. + properties: + amazonForecastRoleArn: + description: The IAM role that Canvas passes to Amazon + Forecast for time series forecasting. By default, + Canvas uses the execution role specified in the + UserProfile that launches the Canvas app. If an + execution role is not specified in the UserProfile, + Canvas uses the execution role specified in the + Domain that owns the UserProfile. To allow time + series forecasting, this IAM role should have the + AmazonSageMakerCanvasForecastAccess policy attached + and forecast.amazonaws.com added in the trust relationship + as a service principal. + type: string + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + workspaceSettings: + description: The workspace settings for the SageMaker + Canvas application. See workspace_settings Block below. + properties: + s3ArtifactPath: + description: The Amazon S3 bucket used to store artifacts + generated by Canvas. Updating the Amazon S3 location + impacts existing configuration settings, and Canvas + users no longer have access to their artifacts. + Canvas users must log out and log back in to apply + the new location. + type: string + s3KmsKeyId: + description: The Amazon Web Services Key Management + Service (KMS) encryption key ID that is used to + encrypt artifacts generated by Canvas in the Amazon + S3 bucket. + type: string + type: object + type: object + codeEditorAppSettings: + description: The Code Editor application settings. See code_editor_app_settings + Block below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + customFileSystemConfig: + description: The settings for assigning a custom file system + to a user profile. Permitted users can access this file + system in Amazon SageMaker Studio. See custom_file_system_config + Block below. + items: + properties: + efsFileSystemConfig: + description: The default EBS storage settings for a + private space. See efs_file_system_config Block below. + properties: + fileSystemId: + description: The ID of your Amazon EFS file system. + type: string + fileSystemPath: + description: The path to the file system directory + that is accessible in Amazon SageMaker Studio. + Permitted users can access only this directory + and below. + type: string + type: object + type: object + type: array + customPosixUserConfig: + description: Details about the POSIX identity that is used + for file system operations. See custom_posix_user_config + Block below. + properties: + gid: + description: The POSIX group ID. + type: number + uid: + description: The POSIX user ID. + type: number + type: object + defaultLandingUri: + description: 'The default experience that the user is directed + to when accessing the domain. The supported values are: + studio::: Indicates that Studio is the default experience. + This value can only be passed if StudioWebPortal is set + to ENABLED. app:JupyterServer:: Indicates that Studio Classic + is the default experience.' + type: string + executionRole: + description: The execution role ARN for the user. + type: string + executionRoleRef: + description: Reference to a Role in iam to populate executionRole. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + executionRoleSelector: + description: Selector for a Role in iam to populate executionRole. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + jupyterLabAppSettings: + description: The settings for the JupyterLab application. + See jupyter_lab_app_settings Block below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see code_repository Block below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see custom_image + Block below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + jupyterServerAppSettings: + description: The Jupyter server's app settings. See jupyter_server_app_settings + Block below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see code_repository Block below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + kernelGatewayAppSettings: + description: The kernel gateway app settings. See kernel_gateway_app_settings + Block below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see custom_image + Block below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + appImageConfigNameRef: + description: Reference to a AppImageConfig in sagemaker + to populate appImageConfigName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + appImageConfigNameSelector: + description: Selector for a AppImageConfig in sagemaker + to populate appImageConfigName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + imageName: + description: The name of the Custom Image. + type: string + imageNameRef: + description: Reference to a ImageVersion in sagemaker + to populate imageName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + imageNameSelector: + description: Selector for a ImageVersion in sagemaker + to populate imageName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + rSessionAppSettings: + description: The RSession app settings. See r_session_app_settings + Block below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see custom_image + Block below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + rStudioServerProAppSettings: + description: A collection of settings that configure user + interaction with the RStudioServerPro app. See r_studio_server_pro_app_settings + Block below. + properties: + accessStatus: + description: Indicates whether the current user has access + to the RStudioServerPro app. Valid values are ENABLED + and DISABLED. + type: string + userGroup: + description: The level of permissions that the user has + within the RStudioServerPro app. This value defaults + to R_STUDIO_USER. The R_STUDIO_ADMIN value allows the + user access to the RStudio Administrative Dashboard. + Valid values are R_STUDIO_USER and R_STUDIO_ADMIN. + type: string + type: object + securityGroups: + description: A list of security group IDs that will be attached + to the user. + items: + type: string + type: array + x-kubernetes-list-type: set + sharingSettings: + description: The sharing settings. See sharing_settings Block + below. + properties: + notebookOutputOption: + description: Whether to include the notebook cell output + when sharing the notebook. The default is Disabled. + Valid values are Allowed and Disabled. + type: string + s3KmsKeyId: + description: The Amazon Web Services Key Management Service + (KMS) encryption key ID that is used to encrypt artifacts + generated by Canvas in the Amazon S3 bucket. + type: string + s3OutputPath: + description: When notebook_output_option is Allowed, the + Amazon S3 bucket used to save the notebook cell output. + type: string + type: object + spaceStorageSettings: + description: The storage settings for a private space. See + space_storage_settings Block below. + properties: + defaultEbsStorageSettings: + description: The default EBS storage settings for a private + space. See default_ebs_storage_settings Block below. + properties: + defaultEbsVolumeSizeInGb: + description: The default size of the EBS storage volume + for a private space. + type: number + maximumEbsVolumeSizeInGb: + description: The maximum size of the EBS storage volume + for a private space. + type: number + type: object + type: object + studioWebPortal: + description: Whether the user can access Studio. If this value + is set to DISABLED, the user cannot access Studio, even + if that is the default experience for the domain. Valid + values are ENABLED and DISABLED. + type: string + tensorBoardAppSettings: + description: The TensorBoard app settings. See tensor_board_app_settings + Block below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + type: object + domainName: + description: The domain name. + type: string + domainSettings: + description: The domain settings. See domain_settings Block below. + properties: + executionRoleIdentityConfig: + description: The configuration for attaching a SageMaker user + profile name to the execution role as a sts:SourceIdentity + key AWS Docs. Valid values are USER_PROFILE_NAME and DISABLED. + type: string + rStudioServerProDomainSettings: + description: A collection of settings that configure the RStudioServerPro + Domain-level app. see r_studio_server_pro_domain_settings + Block below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + domainExecutionRoleArn: + description: The ARN of the execution role for the RStudioServerPro + Domain-level app. + type: string + rStudioConnectUrl: + description: A URL pointing to an RStudio Connect server. + type: string + rStudioPackageManagerUrl: + description: A URL pointing to an RStudio Package Manager + server. + type: string + type: object + securityGroupIds: + description: The security groups for the Amazon Virtual Private + Cloud that the Domain uses for communication between Domain-level + apps and user apps. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + kmsKeyId: + description: The AWS KMS customer managed CMK used to encrypt + the EFS volume attached to the domain. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + retentionPolicy: + description: The retention policy for this domain, which specifies + whether resources will be retained after the Domain is deleted. + By default, all resources are retained. See retention_policy + Block below. + properties: + homeEfsFileSystem: + description: The retention policy for data stored on an Amazon + Elastic File System (EFS) volume. Valid values are Retain + or Delete. Default value is Retain. + type: string + type: object + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: The VPC subnets that Studio uses for communication. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcId: + description: The ID of the Amazon Virtual Private Cloud (VPC) + that Studio uses for communication. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.authMode is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.authMode) + || (has(self.initProvider) && has(self.initProvider.authMode))' + - message: spec.forProvider.defaultUserSettings is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.defaultUserSettings) + || (has(self.initProvider) && has(self.initProvider.defaultUserSettings))' + - message: spec.forProvider.domainName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.domainName) + || (has(self.initProvider) && has(self.initProvider.domainName))' + status: + description: DomainStatus defines the observed state of Domain. + properties: + atProvider: + properties: + appNetworkAccessType: + description: Specifies the VPC used for non-EFS traffic. The default + value is PublicInternetOnly. Valid values are PublicInternetOnly + and VpcOnly. + type: string + appSecurityGroupManagement: + description: The entity that creates and manages the required + security groups for inter-app communication in VPCOnly mode. + Valid values are Service and Customer. + type: string + arn: + description: The Amazon Resource Name (ARN) assigned by AWS to + this Domain. + type: string + authMode: + description: The mode of authentication that members use to access + the domain. Valid values are IAM and SSO. + type: string + defaultSpaceSettings: + description: The default space settings. See default_space_settings + Block below. + properties: + executionRole: + description: The execution role for the space. + type: string + jupyterServerAppSettings: + description: The Jupyter server's app settings. See jupyter_server_app_settings + Block below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see code_repository Block below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + kernelGatewayAppSettings: + description: The kernel gateway app settings. See kernel_gateway_app_settings + Block below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see custom_image + Block below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + securityGroups: + description: The security groups for the Amazon Virtual Private + Cloud that the space uses for communication. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + defaultUserSettings: + description: The default user settings. See default_user_settings + Block below. + properties: + canvasAppSettings: + description: The Canvas app settings. See canvas_app_settings + Block below. + properties: + directDeploySettings: + description: The model deployment settings for the SageMaker + Canvas application. See direct_deploy_settings Block + below. + properties: + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + identityProviderOauthSettings: + description: The settings for connecting to an external + data source with OAuth. See identity_provider_oauth_settings + Block below. + items: + properties: + dataSourceName: + description: The name of the data source that you're + connecting to. Canvas currently supports OAuth + for Snowflake and Salesforce Data Cloud. Valid + values are SalesforceGenie and Snowflake. + type: string + secretArn: + description: The ARN of an Amazon Web Services Secrets + Manager secret that stores the credentials from + your identity provider, such as the client ID + and secret, authorization URL, and token URL. + type: string + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + type: array + kendraSettings: + description: The settings for document querying. See kendra_settings + Block below. + properties: + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + modelRegisterSettings: + description: The model registry settings for the SageMaker + Canvas application. See model_register_settings Block + below. + properties: + crossAccountModelRegisterRoleArn: + description: The Amazon Resource Name (ARN) of the + SageMaker model registry account. Required only + to register model versions created by a different + SageMaker Canvas AWS account than the AWS account + in which SageMaker model registry is set up. + type: string + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + timeSeriesForecastingSettings: + description: Time series forecast settings for the Canvas + app. See time_series_forecasting_settings Block below. + properties: + amazonForecastRoleArn: + description: The IAM role that Canvas passes to Amazon + Forecast for time series forecasting. By default, + Canvas uses the execution role specified in the + UserProfile that launches the Canvas app. If an + execution role is not specified in the UserProfile, + Canvas uses the execution role specified in the + Domain that owns the UserProfile. To allow time + series forecasting, this IAM role should have the + AmazonSageMakerCanvasForecastAccess policy attached + and forecast.amazonaws.com added in the trust relationship + as a service principal. + type: string + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + workspaceSettings: + description: The workspace settings for the SageMaker + Canvas application. See workspace_settings Block below. + properties: + s3ArtifactPath: + description: The Amazon S3 bucket used to store artifacts + generated by Canvas. Updating the Amazon S3 location + impacts existing configuration settings, and Canvas + users no longer have access to their artifacts. + Canvas users must log out and log back in to apply + the new location. + type: string + s3KmsKeyId: + description: The Amazon Web Services Key Management + Service (KMS) encryption key ID that is used to + encrypt artifacts generated by Canvas in the Amazon + S3 bucket. + type: string + type: object + type: object + codeEditorAppSettings: + description: The Code Editor application settings. See code_editor_app_settings + Block below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + customFileSystemConfig: + description: The settings for assigning a custom file system + to a user profile. Permitted users can access this file + system in Amazon SageMaker Studio. See custom_file_system_config + Block below. + items: + properties: + efsFileSystemConfig: + description: The default EBS storage settings for a + private space. See efs_file_system_config Block below. + properties: + fileSystemId: + description: The ID of your Amazon EFS file system. + type: string + fileSystemPath: + description: The path to the file system directory + that is accessible in Amazon SageMaker Studio. + Permitted users can access only this directory + and below. + type: string + type: object + type: object + type: array + customPosixUserConfig: + description: Details about the POSIX identity that is used + for file system operations. See custom_posix_user_config + Block below. + properties: + gid: + description: The POSIX group ID. + type: number + uid: + description: The POSIX user ID. + type: number + type: object + defaultLandingUri: + description: 'The default experience that the user is directed + to when accessing the domain. The supported values are: + studio::: Indicates that Studio is the default experience. + This value can only be passed if StudioWebPortal is set + to ENABLED. app:JupyterServer:: Indicates that Studio Classic + is the default experience.' + type: string + executionRole: + description: The execution role ARN for the user. + type: string + jupyterLabAppSettings: + description: The settings for the JupyterLab application. + See jupyter_lab_app_settings Block below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see code_repository Block below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see custom_image + Block below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + jupyterServerAppSettings: + description: The Jupyter server's app settings. See jupyter_server_app_settings + Block below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see code_repository Block below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + kernelGatewayAppSettings: + description: The kernel gateway app settings. See kernel_gateway_app_settings + Block below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see custom_image + Block below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + rSessionAppSettings: + description: The RSession app settings. See r_session_app_settings + Block below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see custom_image + Block below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + rStudioServerProAppSettings: + description: A collection of settings that configure user + interaction with the RStudioServerPro app. See r_studio_server_pro_app_settings + Block below. + properties: + accessStatus: + description: Indicates whether the current user has access + to the RStudioServerPro app. Valid values are ENABLED + and DISABLED. + type: string + userGroup: + description: The level of permissions that the user has + within the RStudioServerPro app. This value defaults + to R_STUDIO_USER. The R_STUDIO_ADMIN value allows the + user access to the RStudio Administrative Dashboard. + Valid values are R_STUDIO_USER and R_STUDIO_ADMIN. + type: string + type: object + securityGroups: + description: A list of security group IDs that will be attached + to the user. + items: + type: string + type: array + x-kubernetes-list-type: set + sharingSettings: + description: The sharing settings. See sharing_settings Block + below. + properties: + notebookOutputOption: + description: Whether to include the notebook cell output + when sharing the notebook. The default is Disabled. + Valid values are Allowed and Disabled. + type: string + s3KmsKeyId: + description: The Amazon Web Services Key Management Service + (KMS) encryption key ID that is used to encrypt artifacts + generated by Canvas in the Amazon S3 bucket. + type: string + s3OutputPath: + description: When notebook_output_option is Allowed, the + Amazon S3 bucket used to save the notebook cell output. + type: string + type: object + spaceStorageSettings: + description: The storage settings for a private space. See + space_storage_settings Block below. + properties: + defaultEbsStorageSettings: + description: The default EBS storage settings for a private + space. See default_ebs_storage_settings Block below. + properties: + defaultEbsVolumeSizeInGb: + description: The default size of the EBS storage volume + for a private space. + type: number + maximumEbsVolumeSizeInGb: + description: The maximum size of the EBS storage volume + for a private space. + type: number + type: object + type: object + studioWebPortal: + description: Whether the user can access Studio. If this value + is set to DISABLED, the user cannot access Studio, even + if that is the default experience for the domain. Valid + values are ENABLED and DISABLED. + type: string + tensorBoardAppSettings: + description: The TensorBoard app settings. See tensor_board_app_settings + Block below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + type: object + domainName: + description: The domain name. + type: string + domainSettings: + description: The domain settings. See domain_settings Block below. + properties: + executionRoleIdentityConfig: + description: The configuration for attaching a SageMaker user + profile name to the execution role as a sts:SourceIdentity + key AWS Docs. Valid values are USER_PROFILE_NAME and DISABLED. + type: string + rStudioServerProDomainSettings: + description: A collection of settings that configure the RStudioServerPro + Domain-level app. see r_studio_server_pro_domain_settings + Block below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see default_resource_spec Block below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + domainExecutionRoleArn: + description: The ARN of the execution role for the RStudioServerPro + Domain-level app. + type: string + rStudioConnectUrl: + description: A URL pointing to an RStudio Connect server. + type: string + rStudioPackageManagerUrl: + description: A URL pointing to an RStudio Package Manager + server. + type: string + type: object + securityGroupIds: + description: The security groups for the Amazon Virtual Private + Cloud that the Domain uses for communication between Domain-level + apps and user apps. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + homeEfsFileSystemId: + description: The ID of the Amazon Elastic File System (EFS) managed + by this Domain. + type: string + id: + description: The ID of the Domain. + type: string + kmsKeyId: + description: The AWS KMS customer managed CMK used to encrypt + the EFS volume attached to the domain. + type: string + retentionPolicy: + description: The retention policy for this domain, which specifies + whether resources will be retained after the Domain is deleted. + By default, all resources are retained. See retention_policy + Block below. + properties: + homeEfsFileSystem: + description: The retention policy for data stored on an Amazon + Elastic File System (EFS) volume. Valid values are Retain + or Delete. Default value is Retain. + type: string + type: object + securityGroupIdForDomainBoundary: + description: The ID of the security group that authorizes traffic + between the RSessionGateway apps and the RStudioServerPro app. + type: string + singleSignOnApplicationArn: + description: The ARN of the application managed by SageMaker in + IAM Identity Center. This value is only returned for domains + created after September 19, 2023. + type: string + singleSignOnManagedApplicationInstanceId: + description: The SSO managed application instance ID. + type: string + subnetIds: + description: The VPC subnets that Studio uses for communication. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + url: + description: The domain's URL. + type: string + vpcId: + description: The ID of the Amazon Virtual Private Cloud (VPC) + that Studio uses for communication. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sagemaker.aws.upbound.io_endpointconfigurations.yaml b/package/crds/sagemaker.aws.upbound.io_endpointconfigurations.yaml index 8f9b873040..0c4d867f58 100644 --- a/package/crds/sagemaker.aws.upbound.io_endpointconfigurations.yaml +++ b/package/crds/sagemaker.aws.upbound.io_endpointconfigurations.yaml @@ -1719,3 +1719,1638 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: EndpointConfiguration is the Schema for the EndpointConfigurations + API. Provides a SageMaker Endpoint Configuration resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EndpointConfigurationSpec defines the desired state of EndpointConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + asyncInferenceConfig: + description: Specifies configuration for how an endpoint performs + asynchronous inference. + properties: + clientConfig: + description: Configures the behavior of the client used by + Amazon SageMaker to interact with the model container during + asynchronous inference. + properties: + maxConcurrentInvocationsPerInstance: + description: The maximum number of concurrent requests + sent by the SageMaker client to the model container. + If no value is provided, Amazon SageMaker will choose + an optimal value for you. + type: number + type: object + outputConfig: + description: Specifies the configuration for asynchronous + inference invocation outputs. + properties: + kmsKeyId: + description: The Amazon Web Services Key Management Service + (Amazon Web Services KMS) key that Amazon SageMaker + uses to encrypt the asynchronous inference output in + Amazon S3. + type: string + notificationConfig: + description: Specifies the configuration for notifications + of inference results for asynchronous inference. + properties: + errorTopic: + description: Amazon SNS topic to post a notification + to when inference fails. If no topic is provided, + no notification is sent on failure. + type: string + includeInferenceResponseIn: + description: The Amazon SNS topics where you want + the inference response to be included. Valid values + are SUCCESS_NOTIFICATION_TOPIC and ERROR_NOTIFICATION_TOPIC. + items: + type: string + type: array + x-kubernetes-list-type: set + successTopic: + description: Amazon SNS topic to post a notification + to when inference completes successfully. If no + topic is provided, no notification is sent on success. + type: string + type: object + s3FailurePath: + description: The Amazon S3 location to upload failure + inference responses to. + type: string + s3OutputPath: + description: The Amazon S3 location to upload inference + responses to. + type: string + type: object + type: object + dataCaptureConfig: + description: Specifies the parameters to capture input/output + of SageMaker models endpoints. Fields are documented below. + properties: + captureContentTypeHeader: + description: The content type headers to capture. Fields are + documented below. + properties: + csvContentTypes: + description: The CSV content type headers to capture. + items: + type: string + type: array + x-kubernetes-list-type: set + jsonContentTypes: + description: The JSON content type headers to capture. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + captureOptions: + description: Specifies what data to capture. Fields are documented + below. + items: + properties: + captureMode: + description: Specifies the data to be captured. Should + be one of Input or Output. + type: string + type: object + type: array + destinationS3Uri: + description: The URL for S3 location where the captured data + is stored. + type: string + enableCapture: + description: Flag to enable data capture. Defaults to false. + type: boolean + initialSamplingPercentage: + description: Portion of data to capture. Should be between + 0 and 100. + type: number + kmsKeyId: + description: Amazon Resource Name (ARN) of a AWS Key Management + Service key that Amazon SageMaker uses to encrypt the captured + data on Amazon S3. + type: string + type: object + kmsKeyArn: + description: Amazon Resource Name (ARN) of a AWS Key Management + Service key that Amazon SageMaker uses to encrypt data on the + storage volume attached to the ML compute instance that hosts + the endpoint. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + productionVariants: + description: An list of ProductionVariant objects, one for each + model that you want to host at this endpoint. Fields are documented + below. + items: + properties: + acceleratorType: + description: The size of the Elastic Inference (EI) instance + to use for the production variant. + type: string + containerStartupHealthCheckTimeoutInSeconds: + description: The timeout value, in seconds, for your inference + container to pass health check by SageMaker Hosting. For + more information about health check, see How Your Container + Should Respond to Health Check (Ping) Requests. Valid + values between 60 and 3600. + type: number + coreDumpConfig: + description: Specifies configuration for a core dump from + the model container when the process crashes. Fields are + documented below. + properties: + destinationS3Uri: + description: The Amazon S3 bucket to send the core dump + to. + type: string + kmsKeyId: + description: The Amazon Web Services Key Management + Service (Amazon Web Services KMS) key that SageMaker + uses to encrypt the core dump data at rest using Amazon + S3 server-side encryption. + type: string + type: object + enableSsmAccess: + description: You can use this parameter to turn on native + Amazon Web Services Systems Manager (SSM) access for a + production variant behind an endpoint. By default, SSM + access is disabled for all production variants behind + an endpoints. + type: boolean + initialInstanceCount: + description: Initial number of instances used for auto-scaling. + type: number + initialVariantWeight: + description: Determines initial traffic distribution among + all of the models that you specify in the endpoint configuration. + If unspecified, it defaults to 1.0. + type: number + instanceType: + description: The type of instance to start. + type: string + modelDataDownloadTimeoutInSeconds: + description: The timeout value, in seconds, to download + and extract the model that you want to host from Amazon + S3 to the individual inference instance associated with + this production variant. Valid values between 60 and 3600. + type: number + modelName: + description: The name of the model to use. + type: string + modelNameRef: + description: Reference to a Model in sagemaker to populate + modelName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + modelNameSelector: + description: Selector for a Model in sagemaker to populate + modelName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routingConfig: + description: Sets how the endpoint routes incoming traffic. + See routing_config below. + items: + properties: + routingStrategy: + description: Sets how the endpoint routes incoming + traffic. Valid values are LEAST_OUTSTANDING_REQUESTS + and RANDOM. LEAST_OUTSTANDING_REQUESTS routes requests + to the specific instances that have more capacity + to process them. RANDOM routes each request to a + randomly chosen instance. + type: string + type: object + type: array + serverlessConfig: + description: Specifies configuration for how an endpoint + performs asynchronous inference. + properties: + maxConcurrency: + description: The maximum number of concurrent invocations + your serverless endpoint can process. Valid values + are between 1 and 200. + type: number + memorySizeInMb: + description: 'The memory size of your serverless endpoint. + Valid values are in 1 GB increments: 1024 MB, 2048 + MB, 3072 MB, 4096 MB, 5120 MB, or 6144 MB.' + type: number + provisionedConcurrency: + description: The amount of provisioned concurrency to + allocate for the serverless endpoint. Should be less + than or equal to max_concurrency. Valid values are + between 1 and 200. + type: number + type: object + variantName: + description: The name of the variant. + type: string + volumeSizeInGb: + description: The size, in GB, of the ML storage volume attached + to individual inference instance associated with the production + variant. Valid values between 1 and 512. + type: number + type: object + type: array + region: + description: Region is the region you'd like your resource to + be created in. + type: string + shadowProductionVariants: + description: Array of ProductionVariant objects. There is one + for each model that you want to host at this endpoint in shadow + mode with production traffic replicated from the model specified + on ProductionVariants. If you use this field, you can only specify + one variant for ProductionVariants and one variant for ShadowProductionVariants. + Fields are documented below. + items: + properties: + acceleratorType: + description: The size of the Elastic Inference (EI) instance + to use for the production variant. + type: string + containerStartupHealthCheckTimeoutInSeconds: + description: The timeout value, in seconds, for your inference + container to pass health check by SageMaker Hosting. For + more information about health check, see How Your Container + Should Respond to Health Check (Ping) Requests. Valid + values between 60 and 3600. + type: number + coreDumpConfig: + description: Specifies configuration for a core dump from + the model container when the process crashes. Fields are + documented below. + properties: + destinationS3Uri: + description: The Amazon S3 bucket to send the core dump + to. + type: string + kmsKeyId: + description: The Amazon Web Services Key Management + Service (Amazon Web Services KMS) key that SageMaker + uses to encrypt the core dump data at rest using Amazon + S3 server-side encryption. + type: string + type: object + enableSsmAccess: + description: You can use this parameter to turn on native + Amazon Web Services Systems Manager (SSM) access for a + production variant behind an endpoint. By default, SSM + access is disabled for all production variants behind + an endpoints. + type: boolean + initialInstanceCount: + description: Initial number of instances used for auto-scaling. + type: number + initialVariantWeight: + description: Determines initial traffic distribution among + all of the models that you specify in the endpoint configuration. + If unspecified, it defaults to 1.0. + type: number + instanceType: + description: The type of instance to start. + type: string + modelDataDownloadTimeoutInSeconds: + description: The timeout value, in seconds, to download + and extract the model that you want to host from Amazon + S3 to the individual inference instance associated with + this production variant. Valid values between 60 and 3600. + type: number + modelName: + description: The name of the model to use. + type: string + routingConfig: + description: Sets how the endpoint routes incoming traffic. + See routing_config below. + items: + properties: + routingStrategy: + description: Sets how the endpoint routes incoming + traffic. Valid values are LEAST_OUTSTANDING_REQUESTS + and RANDOM. LEAST_OUTSTANDING_REQUESTS routes requests + to the specific instances that have more capacity + to process them. RANDOM routes each request to a + randomly chosen instance. + type: string + type: object + type: array + serverlessConfig: + description: Specifies configuration for how an endpoint + performs asynchronous inference. + properties: + maxConcurrency: + description: The maximum number of concurrent invocations + your serverless endpoint can process. Valid values + are between 1 and 200. + type: number + memorySizeInMb: + description: 'The memory size of your serverless endpoint. + Valid values are in 1 GB increments: 1024 MB, 2048 + MB, 3072 MB, 4096 MB, 5120 MB, or 6144 MB.' + type: number + provisionedConcurrency: + description: The amount of provisioned concurrency to + allocate for the serverless endpoint. Should be less + than or equal to max_concurrency. Valid values are + between 1 and 200. + type: number + type: object + variantName: + description: The name of the variant. + type: string + volumeSizeInGb: + description: The size, in GB, of the ML storage volume attached + to individual inference instance associated with the production + variant. Valid values between 1 and 512. + type: number + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + asyncInferenceConfig: + description: Specifies configuration for how an endpoint performs + asynchronous inference. + properties: + clientConfig: + description: Configures the behavior of the client used by + Amazon SageMaker to interact with the model container during + asynchronous inference. + properties: + maxConcurrentInvocationsPerInstance: + description: The maximum number of concurrent requests + sent by the SageMaker client to the model container. + If no value is provided, Amazon SageMaker will choose + an optimal value for you. + type: number + type: object + outputConfig: + description: Specifies the configuration for asynchronous + inference invocation outputs. + properties: + kmsKeyId: + description: The Amazon Web Services Key Management Service + (Amazon Web Services KMS) key that Amazon SageMaker + uses to encrypt the asynchronous inference output in + Amazon S3. + type: string + notificationConfig: + description: Specifies the configuration for notifications + of inference results for asynchronous inference. + properties: + errorTopic: + description: Amazon SNS topic to post a notification + to when inference fails. If no topic is provided, + no notification is sent on failure. + type: string + includeInferenceResponseIn: + description: The Amazon SNS topics where you want + the inference response to be included. Valid values + are SUCCESS_NOTIFICATION_TOPIC and ERROR_NOTIFICATION_TOPIC. + items: + type: string + type: array + x-kubernetes-list-type: set + successTopic: + description: Amazon SNS topic to post a notification + to when inference completes successfully. If no + topic is provided, no notification is sent on success. + type: string + type: object + s3FailurePath: + description: The Amazon S3 location to upload failure + inference responses to. + type: string + s3OutputPath: + description: The Amazon S3 location to upload inference + responses to. + type: string + type: object + type: object + dataCaptureConfig: + description: Specifies the parameters to capture input/output + of SageMaker models endpoints. Fields are documented below. + properties: + captureContentTypeHeader: + description: The content type headers to capture. Fields are + documented below. + properties: + csvContentTypes: + description: The CSV content type headers to capture. + items: + type: string + type: array + x-kubernetes-list-type: set + jsonContentTypes: + description: The JSON content type headers to capture. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + captureOptions: + description: Specifies what data to capture. Fields are documented + below. + items: + properties: + captureMode: + description: Specifies the data to be captured. Should + be one of Input or Output. + type: string + type: object + type: array + destinationS3Uri: + description: The URL for S3 location where the captured data + is stored. + type: string + enableCapture: + description: Flag to enable data capture. Defaults to false. + type: boolean + initialSamplingPercentage: + description: Portion of data to capture. Should be between + 0 and 100. + type: number + kmsKeyId: + description: Amazon Resource Name (ARN) of a AWS Key Management + Service key that Amazon SageMaker uses to encrypt the captured + data on Amazon S3. + type: string + type: object + kmsKeyArn: + description: Amazon Resource Name (ARN) of a AWS Key Management + Service key that Amazon SageMaker uses to encrypt data on the + storage volume attached to the ML compute instance that hosts + the endpoint. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + productionVariants: + description: An list of ProductionVariant objects, one for each + model that you want to host at this endpoint. Fields are documented + below. + items: + properties: + acceleratorType: + description: The size of the Elastic Inference (EI) instance + to use for the production variant. + type: string + containerStartupHealthCheckTimeoutInSeconds: + description: The timeout value, in seconds, for your inference + container to pass health check by SageMaker Hosting. For + more information about health check, see How Your Container + Should Respond to Health Check (Ping) Requests. Valid + values between 60 and 3600. + type: number + coreDumpConfig: + description: Specifies configuration for a core dump from + the model container when the process crashes. Fields are + documented below. + properties: + destinationS3Uri: + description: The Amazon S3 bucket to send the core dump + to. + type: string + kmsKeyId: + description: The Amazon Web Services Key Management + Service (Amazon Web Services KMS) key that SageMaker + uses to encrypt the core dump data at rest using Amazon + S3 server-side encryption. + type: string + type: object + enableSsmAccess: + description: You can use this parameter to turn on native + Amazon Web Services Systems Manager (SSM) access for a + production variant behind an endpoint. By default, SSM + access is disabled for all production variants behind + an endpoints. + type: boolean + initialInstanceCount: + description: Initial number of instances used for auto-scaling. + type: number + initialVariantWeight: + description: Determines initial traffic distribution among + all of the models that you specify in the endpoint configuration. + If unspecified, it defaults to 1.0. + type: number + instanceType: + description: The type of instance to start. + type: string + modelDataDownloadTimeoutInSeconds: + description: The timeout value, in seconds, to download + and extract the model that you want to host from Amazon + S3 to the individual inference instance associated with + this production variant. Valid values between 60 and 3600. + type: number + modelName: + description: The name of the model to use. + type: string + modelNameRef: + description: Reference to a Model in sagemaker to populate + modelName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + modelNameSelector: + description: Selector for a Model in sagemaker to populate + modelName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routingConfig: + description: Sets how the endpoint routes incoming traffic. + See routing_config below. + items: + properties: + routingStrategy: + description: Sets how the endpoint routes incoming + traffic. Valid values are LEAST_OUTSTANDING_REQUESTS + and RANDOM. LEAST_OUTSTANDING_REQUESTS routes requests + to the specific instances that have more capacity + to process them. RANDOM routes each request to a + randomly chosen instance. + type: string + type: object + type: array + serverlessConfig: + description: Specifies configuration for how an endpoint + performs asynchronous inference. + properties: + maxConcurrency: + description: The maximum number of concurrent invocations + your serverless endpoint can process. Valid values + are between 1 and 200. + type: number + memorySizeInMb: + description: 'The memory size of your serverless endpoint. + Valid values are in 1 GB increments: 1024 MB, 2048 + MB, 3072 MB, 4096 MB, 5120 MB, or 6144 MB.' + type: number + provisionedConcurrency: + description: The amount of provisioned concurrency to + allocate for the serverless endpoint. Should be less + than or equal to max_concurrency. Valid values are + between 1 and 200. + type: number + type: object + variantName: + description: The name of the variant. + type: string + volumeSizeInGb: + description: The size, in GB, of the ML storage volume attached + to individual inference instance associated with the production + variant. Valid values between 1 and 512. + type: number + type: object + type: array + shadowProductionVariants: + description: Array of ProductionVariant objects. There is one + for each model that you want to host at this endpoint in shadow + mode with production traffic replicated from the model specified + on ProductionVariants. If you use this field, you can only specify + one variant for ProductionVariants and one variant for ShadowProductionVariants. + Fields are documented below. + items: + properties: + acceleratorType: + description: The size of the Elastic Inference (EI) instance + to use for the production variant. + type: string + containerStartupHealthCheckTimeoutInSeconds: + description: The timeout value, in seconds, for your inference + container to pass health check by SageMaker Hosting. For + more information about health check, see How Your Container + Should Respond to Health Check (Ping) Requests. Valid + values between 60 and 3600. + type: number + coreDumpConfig: + description: Specifies configuration for a core dump from + the model container when the process crashes. Fields are + documented below. + properties: + destinationS3Uri: + description: The Amazon S3 bucket to send the core dump + to. + type: string + kmsKeyId: + description: The Amazon Web Services Key Management + Service (Amazon Web Services KMS) key that SageMaker + uses to encrypt the core dump data at rest using Amazon + S3 server-side encryption. + type: string + type: object + enableSsmAccess: + description: You can use this parameter to turn on native + Amazon Web Services Systems Manager (SSM) access for a + production variant behind an endpoint. By default, SSM + access is disabled for all production variants behind + an endpoints. + type: boolean + initialInstanceCount: + description: Initial number of instances used for auto-scaling. + type: number + initialVariantWeight: + description: Determines initial traffic distribution among + all of the models that you specify in the endpoint configuration. + If unspecified, it defaults to 1.0. + type: number + instanceType: + description: The type of instance to start. + type: string + modelDataDownloadTimeoutInSeconds: + description: The timeout value, in seconds, to download + and extract the model that you want to host from Amazon + S3 to the individual inference instance associated with + this production variant. Valid values between 60 and 3600. + type: number + modelName: + description: The name of the model to use. + type: string + routingConfig: + description: Sets how the endpoint routes incoming traffic. + See routing_config below. + items: + properties: + routingStrategy: + description: Sets how the endpoint routes incoming + traffic. Valid values are LEAST_OUTSTANDING_REQUESTS + and RANDOM. LEAST_OUTSTANDING_REQUESTS routes requests + to the specific instances that have more capacity + to process them. RANDOM routes each request to a + randomly chosen instance. + type: string + type: object + type: array + serverlessConfig: + description: Specifies configuration for how an endpoint + performs asynchronous inference. + properties: + maxConcurrency: + description: The maximum number of concurrent invocations + your serverless endpoint can process. Valid values + are between 1 and 200. + type: number + memorySizeInMb: + description: 'The memory size of your serverless endpoint. + Valid values are in 1 GB increments: 1024 MB, 2048 + MB, 3072 MB, 4096 MB, 5120 MB, or 6144 MB.' + type: number + provisionedConcurrency: + description: The amount of provisioned concurrency to + allocate for the serverless endpoint. Should be less + than or equal to max_concurrency. Valid values are + between 1 and 200. + type: number + type: object + variantName: + description: The name of the variant. + type: string + volumeSizeInGb: + description: The size, in GB, of the ML storage volume attached + to individual inference instance associated with the production + variant. Valid values between 1 and 512. + type: number + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.productionVariants is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.productionVariants) + || (has(self.initProvider) && has(self.initProvider.productionVariants))' + status: + description: EndpointConfigurationStatus defines the observed state of + EndpointConfiguration. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) assigned by AWS to + this endpoint configuration. + type: string + asyncInferenceConfig: + description: Specifies configuration for how an endpoint performs + asynchronous inference. + properties: + clientConfig: + description: Configures the behavior of the client used by + Amazon SageMaker to interact with the model container during + asynchronous inference. + properties: + maxConcurrentInvocationsPerInstance: + description: The maximum number of concurrent requests + sent by the SageMaker client to the model container. + If no value is provided, Amazon SageMaker will choose + an optimal value for you. + type: number + type: object + outputConfig: + description: Specifies the configuration for asynchronous + inference invocation outputs. + properties: + kmsKeyId: + description: The Amazon Web Services Key Management Service + (Amazon Web Services KMS) key that Amazon SageMaker + uses to encrypt the asynchronous inference output in + Amazon S3. + type: string + notificationConfig: + description: Specifies the configuration for notifications + of inference results for asynchronous inference. + properties: + errorTopic: + description: Amazon SNS topic to post a notification + to when inference fails. If no topic is provided, + no notification is sent on failure. + type: string + includeInferenceResponseIn: + description: The Amazon SNS topics where you want + the inference response to be included. Valid values + are SUCCESS_NOTIFICATION_TOPIC and ERROR_NOTIFICATION_TOPIC. + items: + type: string + type: array + x-kubernetes-list-type: set + successTopic: + description: Amazon SNS topic to post a notification + to when inference completes successfully. If no + topic is provided, no notification is sent on success. + type: string + type: object + s3FailurePath: + description: The Amazon S3 location to upload failure + inference responses to. + type: string + s3OutputPath: + description: The Amazon S3 location to upload inference + responses to. + type: string + type: object + type: object + dataCaptureConfig: + description: Specifies the parameters to capture input/output + of SageMaker models endpoints. Fields are documented below. + properties: + captureContentTypeHeader: + description: The content type headers to capture. Fields are + documented below. + properties: + csvContentTypes: + description: The CSV content type headers to capture. + items: + type: string + type: array + x-kubernetes-list-type: set + jsonContentTypes: + description: The JSON content type headers to capture. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + captureOptions: + description: Specifies what data to capture. Fields are documented + below. + items: + properties: + captureMode: + description: Specifies the data to be captured. Should + be one of Input or Output. + type: string + type: object + type: array + destinationS3Uri: + description: The URL for S3 location where the captured data + is stored. + type: string + enableCapture: + description: Flag to enable data capture. Defaults to false. + type: boolean + initialSamplingPercentage: + description: Portion of data to capture. Should be between + 0 and 100. + type: number + kmsKeyId: + description: Amazon Resource Name (ARN) of a AWS Key Management + Service key that Amazon SageMaker uses to encrypt the captured + data on Amazon S3. + type: string + type: object + id: + type: string + kmsKeyArn: + description: Amazon Resource Name (ARN) of a AWS Key Management + Service key that Amazon SageMaker uses to encrypt data on the + storage volume attached to the ML compute instance that hosts + the endpoint. + type: string + productionVariants: + description: An list of ProductionVariant objects, one for each + model that you want to host at this endpoint. Fields are documented + below. + items: + properties: + acceleratorType: + description: The size of the Elastic Inference (EI) instance + to use for the production variant. + type: string + containerStartupHealthCheckTimeoutInSeconds: + description: The timeout value, in seconds, for your inference + container to pass health check by SageMaker Hosting. For + more information about health check, see How Your Container + Should Respond to Health Check (Ping) Requests. Valid + values between 60 and 3600. + type: number + coreDumpConfig: + description: Specifies configuration for a core dump from + the model container when the process crashes. Fields are + documented below. + properties: + destinationS3Uri: + description: The Amazon S3 bucket to send the core dump + to. + type: string + kmsKeyId: + description: The Amazon Web Services Key Management + Service (Amazon Web Services KMS) key that SageMaker + uses to encrypt the core dump data at rest using Amazon + S3 server-side encryption. + type: string + type: object + enableSsmAccess: + description: You can use this parameter to turn on native + Amazon Web Services Systems Manager (SSM) access for a + production variant behind an endpoint. By default, SSM + access is disabled for all production variants behind + an endpoints. + type: boolean + initialInstanceCount: + description: Initial number of instances used for auto-scaling. + type: number + initialVariantWeight: + description: Determines initial traffic distribution among + all of the models that you specify in the endpoint configuration. + If unspecified, it defaults to 1.0. + type: number + instanceType: + description: The type of instance to start. + type: string + modelDataDownloadTimeoutInSeconds: + description: The timeout value, in seconds, to download + and extract the model that you want to host from Amazon + S3 to the individual inference instance associated with + this production variant. Valid values between 60 and 3600. + type: number + modelName: + description: The name of the model to use. + type: string + routingConfig: + description: Sets how the endpoint routes incoming traffic. + See routing_config below. + items: + properties: + routingStrategy: + description: Sets how the endpoint routes incoming + traffic. Valid values are LEAST_OUTSTANDING_REQUESTS + and RANDOM. LEAST_OUTSTANDING_REQUESTS routes requests + to the specific instances that have more capacity + to process them. RANDOM routes each request to a + randomly chosen instance. + type: string + type: object + type: array + serverlessConfig: + description: Specifies configuration for how an endpoint + performs asynchronous inference. + properties: + maxConcurrency: + description: The maximum number of concurrent invocations + your serverless endpoint can process. Valid values + are between 1 and 200. + type: number + memorySizeInMb: + description: 'The memory size of your serverless endpoint. + Valid values are in 1 GB increments: 1024 MB, 2048 + MB, 3072 MB, 4096 MB, 5120 MB, or 6144 MB.' + type: number + provisionedConcurrency: + description: The amount of provisioned concurrency to + allocate for the serverless endpoint. Should be less + than or equal to max_concurrency. Valid values are + between 1 and 200. + type: number + type: object + variantName: + description: The name of the variant. + type: string + volumeSizeInGb: + description: The size, in GB, of the ML storage volume attached + to individual inference instance associated with the production + variant. Valid values between 1 and 512. + type: number + type: object + type: array + shadowProductionVariants: + description: Array of ProductionVariant objects. There is one + for each model that you want to host at this endpoint in shadow + mode with production traffic replicated from the model specified + on ProductionVariants. If you use this field, you can only specify + one variant for ProductionVariants and one variant for ShadowProductionVariants. + Fields are documented below. + items: + properties: + acceleratorType: + description: The size of the Elastic Inference (EI) instance + to use for the production variant. + type: string + containerStartupHealthCheckTimeoutInSeconds: + description: The timeout value, in seconds, for your inference + container to pass health check by SageMaker Hosting. For + more information about health check, see How Your Container + Should Respond to Health Check (Ping) Requests. Valid + values between 60 and 3600. + type: number + coreDumpConfig: + description: Specifies configuration for a core dump from + the model container when the process crashes. Fields are + documented below. + properties: + destinationS3Uri: + description: The Amazon S3 bucket to send the core dump + to. + type: string + kmsKeyId: + description: The Amazon Web Services Key Management + Service (Amazon Web Services KMS) key that SageMaker + uses to encrypt the core dump data at rest using Amazon + S3 server-side encryption. + type: string + type: object + enableSsmAccess: + description: You can use this parameter to turn on native + Amazon Web Services Systems Manager (SSM) access for a + production variant behind an endpoint. By default, SSM + access is disabled for all production variants behind + an endpoints. + type: boolean + initialInstanceCount: + description: Initial number of instances used for auto-scaling. + type: number + initialVariantWeight: + description: Determines initial traffic distribution among + all of the models that you specify in the endpoint configuration. + If unspecified, it defaults to 1.0. + type: number + instanceType: + description: The type of instance to start. + type: string + modelDataDownloadTimeoutInSeconds: + description: The timeout value, in seconds, to download + and extract the model that you want to host from Amazon + S3 to the individual inference instance associated with + this production variant. Valid values between 60 and 3600. + type: number + modelName: + description: The name of the model to use. + type: string + routingConfig: + description: Sets how the endpoint routes incoming traffic. + See routing_config below. + items: + properties: + routingStrategy: + description: Sets how the endpoint routes incoming + traffic. Valid values are LEAST_OUTSTANDING_REQUESTS + and RANDOM. LEAST_OUTSTANDING_REQUESTS routes requests + to the specific instances that have more capacity + to process them. RANDOM routes each request to a + randomly chosen instance. + type: string + type: object + type: array + serverlessConfig: + description: Specifies configuration for how an endpoint + performs asynchronous inference. + properties: + maxConcurrency: + description: The maximum number of concurrent invocations + your serverless endpoint can process. Valid values + are between 1 and 200. + type: number + memorySizeInMb: + description: 'The memory size of your serverless endpoint. + Valid values are in 1 GB increments: 1024 MB, 2048 + MB, 3072 MB, 4096 MB, 5120 MB, or 6144 MB.' + type: number + provisionedConcurrency: + description: The amount of provisioned concurrency to + allocate for the serverless endpoint. Should be less + than or equal to max_concurrency. Valid values are + between 1 and 200. + type: number + type: object + variantName: + description: The name of the variant. + type: string + volumeSizeInGb: + description: The size, in GB, of the ML storage volume attached + to individual inference instance associated with the production + variant. Valid values between 1 and 512. + type: number + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sagemaker.aws.upbound.io_endpoints.yaml b/package/crds/sagemaker.aws.upbound.io_endpoints.yaml index 21d217556c..ba701d27c2 100644 --- a/package/crds/sagemaker.aws.upbound.io_endpoints.yaml +++ b/package/crds/sagemaker.aws.upbound.io_endpoints.yaml @@ -1046,3 +1046,947 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Endpoint is the Schema for the Endpoints API. Provides a SageMaker + Endpoint resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EndpointSpec defines the desired state of Endpoint + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + deploymentConfig: + description: The deployment configuration for an endpoint, which + contains the desired deployment strategy and rollback configurations. + See Deployment Config. + properties: + autoRollbackConfiguration: + description: Automatic rollback configuration for handling + endpoint deployment failures and recovery. See Auto Rollback + Configuration. + properties: + alarms: + description: List of CloudWatch alarms in your account + that are configured to monitor metrics on an endpoint. + If any alarms are tripped during a deployment, SageMaker + rolls back the deployment. See Alarms. + items: + properties: + alarmName: + description: The name of a CloudWatch alarm in your + account. + type: string + type: object + type: array + type: object + blueGreenUpdatePolicy: + description: Update policy for a blue/green deployment. If + this update policy is specified, SageMaker creates a new + fleet during the deployment while maintaining the old fleet. + SageMaker flips traffic to the new fleet according to the + specified traffic routing configuration. Only one update + policy should be used in the deployment configuration. If + no update policy is specified, SageMaker uses a blue/green + deployment strategy with all at once traffic shifting by + default. See Blue Green Update Config. + properties: + maximumExecutionTimeoutInSeconds: + description: Maximum execution timeout for the deployment. + Note that the timeout value should be larger than the + total waiting time specified in termination_wait_in_seconds + and wait_interval_in_seconds. Valid values are between + 600 and 14400. + type: number + terminationWaitInSeconds: + description: Additional waiting time in seconds after + the completion of an endpoint deployment before terminating + the old endpoint fleet. Default is 0. Valid values are + between 0 and 3600. + type: number + trafficRoutingConfiguration: + description: Defines the traffic routing strategy to shift + traffic from the old fleet to the new fleet during an + endpoint deployment. See Traffic Routing Configuration. + properties: + canarySize: + description: Batch size for the first step to turn + on traffic on the new endpoint fleet. Value must + be less than or equal to 50% of the variant's total + instance count. See Canary Size. + properties: + type: + description: 'Traffic routing strategy type. Valid + values are: ALL_AT_ONCE, CANARY, and LINEAR.' + type: string + value: + description: Defines the capacity size, either + as a number of instances or a capacity percentage. + type: number + type: object + linearStepSize: + description: Batch size for each step to turn on traffic + on the new endpoint fleet. Value must be 10-50% + of the variant's total instance count. See Linear + Step Size. + properties: + type: + description: 'Traffic routing strategy type. Valid + values are: ALL_AT_ONCE, CANARY, and LINEAR.' + type: string + value: + description: Defines the capacity size, either + as a number of instances or a capacity percentage. + type: number + type: object + type: + description: 'Traffic routing strategy type. Valid + values are: ALL_AT_ONCE, CANARY, and LINEAR.' + type: string + waitIntervalInSeconds: + description: The length of the baking period, during + which SageMaker monitors alarms for each batch on + the new fleet. Valid values are between 0 and 3600. + type: number + type: object + type: object + rollingUpdatePolicy: + description: Specifies a rolling deployment strategy for updating + a SageMaker endpoint. See Rolling Update Policy. + properties: + maximumBatchSize: + description: Batch size for each rolling step to provision + capacity and turn on traffic on the new endpoint fleet, + and terminate capacity on the old endpoint fleet. Value + must be between 5% to 50% of the variant's total instance + count. See Maximum Batch Size. + properties: + type: + description: 'Traffic routing strategy type. Valid + values are: ALL_AT_ONCE, CANARY, and LINEAR.' + type: string + value: + description: Defines the capacity size, either as + a number of instances or a capacity percentage. + type: number + type: object + maximumExecutionTimeoutInSeconds: + description: Maximum execution timeout for the deployment. + Note that the timeout value should be larger than the + total waiting time specified in termination_wait_in_seconds + and wait_interval_in_seconds. Valid values are between + 600 and 14400. + type: number + rollbackMaximumBatchSize: + description: Batch size for rollback to the old endpoint + fleet. Each rolling step to provision capacity and turn + on traffic on the old endpoint fleet, and terminate + capacity on the new endpoint fleet. If this field is + absent, the default value will be set to 100% of total + capacity which means to bring up the whole capacity + of the old fleet at once during rollback. See Rollback + Maximum Batch Size. + properties: + type: + description: 'Traffic routing strategy type. Valid + values are: ALL_AT_ONCE, CANARY, and LINEAR.' + type: string + value: + description: Defines the capacity size, either as + a number of instances or a capacity percentage. + type: number + type: object + waitIntervalInSeconds: + description: The length of the baking period, during which + SageMaker monitors alarms for each batch on the new + fleet. Valid values are between 0 and 3600. + type: number + type: object + type: object + endpointConfigName: + description: The name of the endpoint configuration to use. + type: string + endpointConfigNameRef: + description: Reference to a EndpointConfiguration in sagemaker + to populate endpointConfigName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + endpointConfigNameSelector: + description: Selector for a EndpointConfiguration in sagemaker + to populate endpointConfigName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + deploymentConfig: + description: The deployment configuration for an endpoint, which + contains the desired deployment strategy and rollback configurations. + See Deployment Config. + properties: + autoRollbackConfiguration: + description: Automatic rollback configuration for handling + endpoint deployment failures and recovery. See Auto Rollback + Configuration. + properties: + alarms: + description: List of CloudWatch alarms in your account + that are configured to monitor metrics on an endpoint. + If any alarms are tripped during a deployment, SageMaker + rolls back the deployment. See Alarms. + items: + properties: + alarmName: + description: The name of a CloudWatch alarm in your + account. + type: string + type: object + type: array + type: object + blueGreenUpdatePolicy: + description: Update policy for a blue/green deployment. If + this update policy is specified, SageMaker creates a new + fleet during the deployment while maintaining the old fleet. + SageMaker flips traffic to the new fleet according to the + specified traffic routing configuration. Only one update + policy should be used in the deployment configuration. If + no update policy is specified, SageMaker uses a blue/green + deployment strategy with all at once traffic shifting by + default. See Blue Green Update Config. + properties: + maximumExecutionTimeoutInSeconds: + description: Maximum execution timeout for the deployment. + Note that the timeout value should be larger than the + total waiting time specified in termination_wait_in_seconds + and wait_interval_in_seconds. Valid values are between + 600 and 14400. + type: number + terminationWaitInSeconds: + description: Additional waiting time in seconds after + the completion of an endpoint deployment before terminating + the old endpoint fleet. Default is 0. Valid values are + between 0 and 3600. + type: number + trafficRoutingConfiguration: + description: Defines the traffic routing strategy to shift + traffic from the old fleet to the new fleet during an + endpoint deployment. See Traffic Routing Configuration. + properties: + canarySize: + description: Batch size for the first step to turn + on traffic on the new endpoint fleet. Value must + be less than or equal to 50% of the variant's total + instance count. See Canary Size. + properties: + type: + description: 'Traffic routing strategy type. Valid + values are: ALL_AT_ONCE, CANARY, and LINEAR.' + type: string + value: + description: Defines the capacity size, either + as a number of instances or a capacity percentage. + type: number + type: object + linearStepSize: + description: Batch size for each step to turn on traffic + on the new endpoint fleet. Value must be 10-50% + of the variant's total instance count. See Linear + Step Size. + properties: + type: + description: 'Traffic routing strategy type. Valid + values are: ALL_AT_ONCE, CANARY, and LINEAR.' + type: string + value: + description: Defines the capacity size, either + as a number of instances or a capacity percentage. + type: number + type: object + type: + description: 'Traffic routing strategy type. Valid + values are: ALL_AT_ONCE, CANARY, and LINEAR.' + type: string + waitIntervalInSeconds: + description: The length of the baking period, during + which SageMaker monitors alarms for each batch on + the new fleet. Valid values are between 0 and 3600. + type: number + type: object + type: object + rollingUpdatePolicy: + description: Specifies a rolling deployment strategy for updating + a SageMaker endpoint. See Rolling Update Policy. + properties: + maximumBatchSize: + description: Batch size for each rolling step to provision + capacity and turn on traffic on the new endpoint fleet, + and terminate capacity on the old endpoint fleet. Value + must be between 5% to 50% of the variant's total instance + count. See Maximum Batch Size. + properties: + type: + description: 'Traffic routing strategy type. Valid + values are: ALL_AT_ONCE, CANARY, and LINEAR.' + type: string + value: + description: Defines the capacity size, either as + a number of instances or a capacity percentage. + type: number + type: object + maximumExecutionTimeoutInSeconds: + description: Maximum execution timeout for the deployment. + Note that the timeout value should be larger than the + total waiting time specified in termination_wait_in_seconds + and wait_interval_in_seconds. Valid values are between + 600 and 14400. + type: number + rollbackMaximumBatchSize: + description: Batch size for rollback to the old endpoint + fleet. Each rolling step to provision capacity and turn + on traffic on the old endpoint fleet, and terminate + capacity on the new endpoint fleet. If this field is + absent, the default value will be set to 100% of total + capacity which means to bring up the whole capacity + of the old fleet at once during rollback. See Rollback + Maximum Batch Size. + properties: + type: + description: 'Traffic routing strategy type. Valid + values are: ALL_AT_ONCE, CANARY, and LINEAR.' + type: string + value: + description: Defines the capacity size, either as + a number of instances or a capacity percentage. + type: number + type: object + waitIntervalInSeconds: + description: The length of the baking period, during which + SageMaker monitors alarms for each batch on the new + fleet. Valid values are between 0 and 3600. + type: number + type: object + type: object + endpointConfigName: + description: The name of the endpoint configuration to use. + type: string + endpointConfigNameRef: + description: Reference to a EndpointConfiguration in sagemaker + to populate endpointConfigName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + endpointConfigNameSelector: + description: Selector for a EndpointConfiguration in sagemaker + to populate endpointConfigName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: EndpointStatus defines the observed state of Endpoint. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) assigned by AWS to + this endpoint. + type: string + deploymentConfig: + description: The deployment configuration for an endpoint, which + contains the desired deployment strategy and rollback configurations. + See Deployment Config. + properties: + autoRollbackConfiguration: + description: Automatic rollback configuration for handling + endpoint deployment failures and recovery. See Auto Rollback + Configuration. + properties: + alarms: + description: List of CloudWatch alarms in your account + that are configured to monitor metrics on an endpoint. + If any alarms are tripped during a deployment, SageMaker + rolls back the deployment. See Alarms. + items: + properties: + alarmName: + description: The name of a CloudWatch alarm in your + account. + type: string + type: object + type: array + type: object + blueGreenUpdatePolicy: + description: Update policy for a blue/green deployment. If + this update policy is specified, SageMaker creates a new + fleet during the deployment while maintaining the old fleet. + SageMaker flips traffic to the new fleet according to the + specified traffic routing configuration. Only one update + policy should be used in the deployment configuration. If + no update policy is specified, SageMaker uses a blue/green + deployment strategy with all at once traffic shifting by + default. See Blue Green Update Config. + properties: + maximumExecutionTimeoutInSeconds: + description: Maximum execution timeout for the deployment. + Note that the timeout value should be larger than the + total waiting time specified in termination_wait_in_seconds + and wait_interval_in_seconds. Valid values are between + 600 and 14400. + type: number + terminationWaitInSeconds: + description: Additional waiting time in seconds after + the completion of an endpoint deployment before terminating + the old endpoint fleet. Default is 0. Valid values are + between 0 and 3600. + type: number + trafficRoutingConfiguration: + description: Defines the traffic routing strategy to shift + traffic from the old fleet to the new fleet during an + endpoint deployment. See Traffic Routing Configuration. + properties: + canarySize: + description: Batch size for the first step to turn + on traffic on the new endpoint fleet. Value must + be less than or equal to 50% of the variant's total + instance count. See Canary Size. + properties: + type: + description: 'Traffic routing strategy type. Valid + values are: ALL_AT_ONCE, CANARY, and LINEAR.' + type: string + value: + description: Defines the capacity size, either + as a number of instances or a capacity percentage. + type: number + type: object + linearStepSize: + description: Batch size for each step to turn on traffic + on the new endpoint fleet. Value must be 10-50% + of the variant's total instance count. See Linear + Step Size. + properties: + type: + description: 'Traffic routing strategy type. Valid + values are: ALL_AT_ONCE, CANARY, and LINEAR.' + type: string + value: + description: Defines the capacity size, either + as a number of instances or a capacity percentage. + type: number + type: object + type: + description: 'Traffic routing strategy type. Valid + values are: ALL_AT_ONCE, CANARY, and LINEAR.' + type: string + waitIntervalInSeconds: + description: The length of the baking period, during + which SageMaker monitors alarms for each batch on + the new fleet. Valid values are between 0 and 3600. + type: number + type: object + type: object + rollingUpdatePolicy: + description: Specifies a rolling deployment strategy for updating + a SageMaker endpoint. See Rolling Update Policy. + properties: + maximumBatchSize: + description: Batch size for each rolling step to provision + capacity and turn on traffic on the new endpoint fleet, + and terminate capacity on the old endpoint fleet. Value + must be between 5% to 50% of the variant's total instance + count. See Maximum Batch Size. + properties: + type: + description: 'Traffic routing strategy type. Valid + values are: ALL_AT_ONCE, CANARY, and LINEAR.' + type: string + value: + description: Defines the capacity size, either as + a number of instances or a capacity percentage. + type: number + type: object + maximumExecutionTimeoutInSeconds: + description: Maximum execution timeout for the deployment. + Note that the timeout value should be larger than the + total waiting time specified in termination_wait_in_seconds + and wait_interval_in_seconds. Valid values are between + 600 and 14400. + type: number + rollbackMaximumBatchSize: + description: Batch size for rollback to the old endpoint + fleet. Each rolling step to provision capacity and turn + on traffic on the old endpoint fleet, and terminate + capacity on the new endpoint fleet. If this field is + absent, the default value will be set to 100% of total + capacity which means to bring up the whole capacity + of the old fleet at once during rollback. See Rollback + Maximum Batch Size. + properties: + type: + description: 'Traffic routing strategy type. Valid + values are: ALL_AT_ONCE, CANARY, and LINEAR.' + type: string + value: + description: Defines the capacity size, either as + a number of instances or a capacity percentage. + type: number + type: object + waitIntervalInSeconds: + description: The length of the baking period, during which + SageMaker monitors alarms for each batch on the new + fleet. Valid values are between 0 and 3600. + type: number + type: object + type: object + endpointConfigName: + description: The name of the endpoint configuration to use. + type: string + id: + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sagemaker.aws.upbound.io_featuregroups.yaml b/package/crds/sagemaker.aws.upbound.io_featuregroups.yaml index 84e3337a2b..da763fd615 100644 --- a/package/crds/sagemaker.aws.upbound.io_featuregroups.yaml +++ b/package/crds/sagemaker.aws.upbound.io_featuregroups.yaml @@ -900,3 +900,846 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FeatureGroup is the Schema for the FeatureGroups API. Provides + a SageMaker Feature Group resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FeatureGroupSpec defines the desired state of FeatureGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A free-form description of a Feature Group. + type: string + eventTimeFeatureName: + description: The name of the feature that stores the EventTime + of a Record in a Feature Group. + type: string + featureDefinition: + description: A list of Feature names and types. See Feature Definition + Below. + items: + properties: + featureName: + description: 'The name of a feature. feature_name cannot + be any of the following: is_deleted, write_time, api_invocation_time.' + type: string + featureType: + description: The value type of a feature. Valid values are + Integral, Fractional, or String. + type: string + type: object + type: array + offlineStoreConfig: + description: The Offline Feature Store Configuration. See Offline + Store Config Below. + properties: + dataCatalogConfig: + description: The meta data of the Glue table that is autogenerated + when an OfflineStore is created. See Data Catalog Config + Below. + properties: + catalog: + description: The name of the Glue table catalog. + type: string + database: + description: The name of the Glue table database. + type: string + tableName: + description: The name of the Glue table. + type: string + type: object + disableGlueTableCreation: + description: Set to true to turn Online Store On. + type: boolean + s3StorageConfig: + description: The Amazon Simple Storage (Amazon S3) location + of OfflineStore. See S3 Storage Config Below. + properties: + kmsKeyId: + description: The AWS Key Management Service (KMS) key + ID of the key used to encrypt any objects written into + the OfflineStore S3 location. + type: string + resolvedOutputS3Uri: + description: The S3 path where offline records are written. + type: string + s3Uri: + description: The S3 URI, or location in Amazon S3, of + OfflineStore. + type: string + type: object + tableFormat: + description: Format for the offline store table. Supported + formats are Glue (Default) and Apache Iceberg (https://iceberg.apache.org/). + type: string + type: object + onlineStoreConfig: + description: The Online Feature Store Configuration. See Online + Store Config Below. + properties: + enableOnlineStore: + description: Set to true to disable the automatic creation + of an AWS Glue table when configuring an OfflineStore. + type: boolean + securityConfig: + description: Security config for at-rest encryption of your + OnlineStore. See Security Config Below. + properties: + kmsKeyId: + description: The AWS Key Management Service (KMS) key + ID of the key used to encrypt any objects written into + the OfflineStore S3 location. + type: string + type: object + storageType: + description: Option for different tiers of low latency storage + for real-time data retrieval. Valid values are Standard, + or InMemory. + type: string + ttlDuration: + description: Time to live duration, where the record is hard + deleted after the expiration time is reached; ExpiresAt + = EventTime + TtlDuration.. See TTl Duration Below. + properties: + unit: + description: TtlDuration time unit. Valid values are Seconds, + Minutes, Hours, Days, or Weeks. + type: string + value: + description: TtlDuration time value. + type: number + type: object + type: object + recordIdentifierFeatureName: + description: The name of the Feature whose value uniquely identifies + a Record defined in the Feature Store. Only the latest record + per identifier value will be stored in the Online Store. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleArn: + description: The Amazon Resource Name (ARN) of the IAM execution + role used to persist data into the Offline Store if an offline_store_config + is provided. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A free-form description of a Feature Group. + type: string + eventTimeFeatureName: + description: The name of the feature that stores the EventTime + of a Record in a Feature Group. + type: string + featureDefinition: + description: A list of Feature names and types. See Feature Definition + Below. + items: + properties: + featureName: + description: 'The name of a feature. feature_name cannot + be any of the following: is_deleted, write_time, api_invocation_time.' + type: string + featureType: + description: The value type of a feature. Valid values are + Integral, Fractional, or String. + type: string + type: object + type: array + offlineStoreConfig: + description: The Offline Feature Store Configuration. See Offline + Store Config Below. + properties: + dataCatalogConfig: + description: The meta data of the Glue table that is autogenerated + when an OfflineStore is created. See Data Catalog Config + Below. + properties: + catalog: + description: The name of the Glue table catalog. + type: string + database: + description: The name of the Glue table database. + type: string + tableName: + description: The name of the Glue table. + type: string + type: object + disableGlueTableCreation: + description: Set to true to turn Online Store On. + type: boolean + s3StorageConfig: + description: The Amazon Simple Storage (Amazon S3) location + of OfflineStore. See S3 Storage Config Below. + properties: + kmsKeyId: + description: The AWS Key Management Service (KMS) key + ID of the key used to encrypt any objects written into + the OfflineStore S3 location. + type: string + resolvedOutputS3Uri: + description: The S3 path where offline records are written. + type: string + s3Uri: + description: The S3 URI, or location in Amazon S3, of + OfflineStore. + type: string + type: object + tableFormat: + description: Format for the offline store table. Supported + formats are Glue (Default) and Apache Iceberg (https://iceberg.apache.org/). + type: string + type: object + onlineStoreConfig: + description: The Online Feature Store Configuration. See Online + Store Config Below. + properties: + enableOnlineStore: + description: Set to true to disable the automatic creation + of an AWS Glue table when configuring an OfflineStore. + type: boolean + securityConfig: + description: Security config for at-rest encryption of your + OnlineStore. See Security Config Below. + properties: + kmsKeyId: + description: The AWS Key Management Service (KMS) key + ID of the key used to encrypt any objects written into + the OfflineStore S3 location. + type: string + type: object + storageType: + description: Option for different tiers of low latency storage + for real-time data retrieval. Valid values are Standard, + or InMemory. + type: string + ttlDuration: + description: Time to live duration, where the record is hard + deleted after the expiration time is reached; ExpiresAt + = EventTime + TtlDuration.. See TTl Duration Below. + properties: + unit: + description: TtlDuration time unit. Valid values are Seconds, + Minutes, Hours, Days, or Weeks. + type: string + value: + description: TtlDuration time value. + type: number + type: object + type: object + recordIdentifierFeatureName: + description: The name of the Feature whose value uniquely identifies + a Record defined in the Feature Store. Only the latest record + per identifier value will be stored in the Online Store. + type: string + roleArn: + description: The Amazon Resource Name (ARN) of the IAM execution + role used to persist data into the Offline Store if an offline_store_config + is provided. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.eventTimeFeatureName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.eventTimeFeatureName) + || (has(self.initProvider) && has(self.initProvider.eventTimeFeatureName))' + - message: spec.forProvider.featureDefinition is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.featureDefinition) + || (has(self.initProvider) && has(self.initProvider.featureDefinition))' + - message: spec.forProvider.recordIdentifierFeatureName is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.recordIdentifierFeatureName) + || (has(self.initProvider) && has(self.initProvider.recordIdentifierFeatureName))' + status: + description: FeatureGroupStatus defines the observed state of FeatureGroup. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) assigned by AWS to + this feature_group. + type: string + description: + description: A free-form description of a Feature Group. + type: string + eventTimeFeatureName: + description: The name of the feature that stores the EventTime + of a Record in a Feature Group. + type: string + featureDefinition: + description: A list of Feature names and types. See Feature Definition + Below. + items: + properties: + featureName: + description: 'The name of a feature. feature_name cannot + be any of the following: is_deleted, write_time, api_invocation_time.' + type: string + featureType: + description: The value type of a feature. Valid values are + Integral, Fractional, or String. + type: string + type: object + type: array + id: + type: string + offlineStoreConfig: + description: The Offline Feature Store Configuration. See Offline + Store Config Below. + properties: + dataCatalogConfig: + description: The meta data of the Glue table that is autogenerated + when an OfflineStore is created. See Data Catalog Config + Below. + properties: + catalog: + description: The name of the Glue table catalog. + type: string + database: + description: The name of the Glue table database. + type: string + tableName: + description: The name of the Glue table. + type: string + type: object + disableGlueTableCreation: + description: Set to true to turn Online Store On. + type: boolean + s3StorageConfig: + description: The Amazon Simple Storage (Amazon S3) location + of OfflineStore. See S3 Storage Config Below. + properties: + kmsKeyId: + description: The AWS Key Management Service (KMS) key + ID of the key used to encrypt any objects written into + the OfflineStore S3 location. + type: string + resolvedOutputS3Uri: + description: The S3 path where offline records are written. + type: string + s3Uri: + description: The S3 URI, or location in Amazon S3, of + OfflineStore. + type: string + type: object + tableFormat: + description: Format for the offline store table. Supported + formats are Glue (Default) and Apache Iceberg (https://iceberg.apache.org/). + type: string + type: object + onlineStoreConfig: + description: The Online Feature Store Configuration. See Online + Store Config Below. + properties: + enableOnlineStore: + description: Set to true to disable the automatic creation + of an AWS Glue table when configuring an OfflineStore. + type: boolean + securityConfig: + description: Security config for at-rest encryption of your + OnlineStore. See Security Config Below. + properties: + kmsKeyId: + description: The AWS Key Management Service (KMS) key + ID of the key used to encrypt any objects written into + the OfflineStore S3 location. + type: string + type: object + storageType: + description: Option for different tiers of low latency storage + for real-time data retrieval. Valid values are Standard, + or InMemory. + type: string + ttlDuration: + description: Time to live duration, where the record is hard + deleted after the expiration time is reached; ExpiresAt + = EventTime + TtlDuration.. See TTl Duration Below. + properties: + unit: + description: TtlDuration time unit. Valid values are Seconds, + Minutes, Hours, Days, or Weeks. + type: string + value: + description: TtlDuration time value. + type: number + type: object + type: object + recordIdentifierFeatureName: + description: The name of the Feature whose value uniquely identifies + a Record defined in the Feature Store. Only the latest record + per identifier value will be stored in the Online Store. + type: string + roleArn: + description: The Amazon Resource Name (ARN) of the IAM execution + role used to persist data into the Offline Store if an offline_store_config + is provided. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sagemaker.aws.upbound.io_models.yaml b/package/crds/sagemaker.aws.upbound.io_models.yaml index 6389139c7b..b6bd8a7a3d 100644 --- a/package/crds/sagemaker.aws.upbound.io_models.yaml +++ b/package/crds/sagemaker.aws.upbound.io_models.yaml @@ -1207,3 +1207,1138 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Model is the Schema for the Models API. Provides a SageMaker + model resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ModelSpec defines the desired state of Model + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + container: + description: Specifies containers in the inference pipeline. If + not specified, the primary_container argument is required. Fields + are documented below. + items: + properties: + containerHostname: + description: The DNS host name for the container. + type: string + environment: + additionalProperties: + type: string + description: |- + Environment variables for the Docker container. + A list of key value pairs. + type: object + x-kubernetes-map-type: granular + image: + description: The registry path where the inference code + image is stored in Amazon ECR. + type: string + imageConfig: + description: Specifies whether the model container is in + Amazon ECR or a private Docker registry accessible from + your Amazon Virtual Private Cloud (VPC). For more information + see Using a Private Docker Registry for Real-Time Inference + Containers. see Image Config. + properties: + repositoryAccessMode: + description: 'Specifies whether the model container + is in Amazon ECR or a private Docker registry accessible + from your Amazon Virtual Private Cloud (VPC). Allowed + values are: Platform and Vpc.' + type: string + repositoryAuthConfig: + description: Specifies an authentication configuration + for the private docker registry where your model image + is hosted. Specify a value for this property only + if you specified Vpc as the value for the RepositoryAccessMode + field, and the private Docker registry where the model + image is hosted requires authentication. see Repository + Auth Config. + properties: + repositoryCredentialsProviderArn: + description: The Amazon Resource Name (ARN) of an + AWS Lambda function that provides credentials + to authenticate to the private Docker registry + where your model image is hosted. For information + about how to create an AWS Lambda function, see + Create a Lambda function with the console in the + AWS Lambda Developer Guide. + type: string + type: object + type: object + mode: + description: The container hosts value SingleModel/MultiModel. + The default value is SingleModel. + type: string + modelDataSource: + description: The location of model data to deploy. Use this + for uncompressed model deployment. For information about + how to deploy an uncompressed model, see Deploying uncompressed + models in the AWS SageMaker Developer Guide. + properties: + s3DataSource: + description: The S3 location of model data to deploy. + items: + properties: + compressionType: + description: 'How the model data is prepared. + Allowed values are: None and Gzip.' + type: string + s3DataType: + description: 'The type of model data to deploy. + Allowed values are: S3Object and S3Prefix.' + type: string + s3Uri: + description: The S3 path of model data to deploy. + type: string + type: object + type: array + type: object + modelDataUrl: + description: The URL for the S3 location where model artifacts + are stored. + type: string + modelPackageName: + description: The Amazon Resource Name (ARN) of the model + package to use to create the model. + type: string + type: object + type: array + enableNetworkIsolation: + description: Isolates the model container. No inbound or outbound + network calls can be made to or from the model container. + type: boolean + executionRoleArn: + description: A role that SageMaker can assume to access model + artifacts and docker images for deployment. + type: string + executionRoleArnRef: + description: Reference to a Role in iam to populate executionRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + executionRoleArnSelector: + description: Selector for a Role in iam to populate executionRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + inferenceExecutionConfig: + description: Specifies details of how containers in a multi-container + endpoint are called. see Inference Execution Config. + properties: + mode: + description: The container hosts value SingleModel/MultiModel. + The default value is SingleModel. + type: string + type: object + primaryContainer: + description: The primary docker image containing inference code + that is used when the model is deployed for predictions. If + not specified, the container argument is required. Fields are + documented below. + properties: + containerHostname: + description: The DNS host name for the container. + type: string + environment: + additionalProperties: + type: string + description: |- + Environment variables for the Docker container. + A list of key value pairs. + type: object + x-kubernetes-map-type: granular + image: + description: The registry path where the inference code image + is stored in Amazon ECR. + type: string + imageConfig: + description: Specifies whether the model container is in Amazon + ECR or a private Docker registry accessible from your Amazon + Virtual Private Cloud (VPC). For more information see Using + a Private Docker Registry for Real-Time Inference Containers. + see Image Config. + properties: + repositoryAccessMode: + description: 'Specifies whether the model container is + in Amazon ECR or a private Docker registry accessible + from your Amazon Virtual Private Cloud (VPC). Allowed + values are: Platform and Vpc.' + type: string + repositoryAuthConfig: + description: Specifies an authentication configuration + for the private docker registry where your model image + is hosted. Specify a value for this property only if + you specified Vpc as the value for the RepositoryAccessMode + field, and the private Docker registry where the model + image is hosted requires authentication. see Repository + Auth Config. + properties: + repositoryCredentialsProviderArn: + description: The Amazon Resource Name (ARN) of an + AWS Lambda function that provides credentials to + authenticate to the private Docker registry where + your model image is hosted. For information about + how to create an AWS Lambda function, see Create + a Lambda function with the console in the AWS Lambda + Developer Guide. + type: string + type: object + type: object + mode: + description: The container hosts value SingleModel/MultiModel. + The default value is SingleModel. + type: string + modelDataSource: + description: The location of model data to deploy. Use this + for uncompressed model deployment. For information about + how to deploy an uncompressed model, see Deploying uncompressed + models in the AWS SageMaker Developer Guide. + properties: + s3DataSource: + description: The S3 location of model data to deploy. + items: + properties: + compressionType: + description: 'How the model data is prepared. Allowed + values are: None and Gzip.' + type: string + s3DataType: + description: 'The type of model data to deploy. + Allowed values are: S3Object and S3Prefix.' + type: string + s3Uri: + description: The S3 path of model data to deploy. + type: string + type: object + type: array + type: object + modelDataUrl: + description: The URL for the S3 location where model artifacts + are stored. + type: string + modelPackageName: + description: The Amazon Resource Name (ARN) of the model package + to use to create the model. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcConfig: + description: Specifies the VPC that you want your model to connect + to. VpcConfig is used in hosting services and in batch transform. + properties: + securityGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + subnets: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + container: + description: Specifies containers in the inference pipeline. If + not specified, the primary_container argument is required. Fields + are documented below. + items: + properties: + containerHostname: + description: The DNS host name for the container. + type: string + environment: + additionalProperties: + type: string + description: |- + Environment variables for the Docker container. + A list of key value pairs. + type: object + x-kubernetes-map-type: granular + image: + description: The registry path where the inference code + image is stored in Amazon ECR. + type: string + imageConfig: + description: Specifies whether the model container is in + Amazon ECR or a private Docker registry accessible from + your Amazon Virtual Private Cloud (VPC). For more information + see Using a Private Docker Registry for Real-Time Inference + Containers. see Image Config. + properties: + repositoryAccessMode: + description: 'Specifies whether the model container + is in Amazon ECR or a private Docker registry accessible + from your Amazon Virtual Private Cloud (VPC). Allowed + values are: Platform and Vpc.' + type: string + repositoryAuthConfig: + description: Specifies an authentication configuration + for the private docker registry where your model image + is hosted. Specify a value for this property only + if you specified Vpc as the value for the RepositoryAccessMode + field, and the private Docker registry where the model + image is hosted requires authentication. see Repository + Auth Config. + properties: + repositoryCredentialsProviderArn: + description: The Amazon Resource Name (ARN) of an + AWS Lambda function that provides credentials + to authenticate to the private Docker registry + where your model image is hosted. For information + about how to create an AWS Lambda function, see + Create a Lambda function with the console in the + AWS Lambda Developer Guide. + type: string + type: object + type: object + mode: + description: The container hosts value SingleModel/MultiModel. + The default value is SingleModel. + type: string + modelDataSource: + description: The location of model data to deploy. Use this + for uncompressed model deployment. For information about + how to deploy an uncompressed model, see Deploying uncompressed + models in the AWS SageMaker Developer Guide. + properties: + s3DataSource: + description: The S3 location of model data to deploy. + items: + properties: + compressionType: + description: 'How the model data is prepared. + Allowed values are: None and Gzip.' + type: string + s3DataType: + description: 'The type of model data to deploy. + Allowed values are: S3Object and S3Prefix.' + type: string + s3Uri: + description: The S3 path of model data to deploy. + type: string + type: object + type: array + type: object + modelDataUrl: + description: The URL for the S3 location where model artifacts + are stored. + type: string + modelPackageName: + description: The Amazon Resource Name (ARN) of the model + package to use to create the model. + type: string + type: object + type: array + enableNetworkIsolation: + description: Isolates the model container. No inbound or outbound + network calls can be made to or from the model container. + type: boolean + executionRoleArn: + description: A role that SageMaker can assume to access model + artifacts and docker images for deployment. + type: string + executionRoleArnRef: + description: Reference to a Role in iam to populate executionRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + executionRoleArnSelector: + description: Selector for a Role in iam to populate executionRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + inferenceExecutionConfig: + description: Specifies details of how containers in a multi-container + endpoint are called. see Inference Execution Config. + properties: + mode: + description: The container hosts value SingleModel/MultiModel. + The default value is SingleModel. + type: string + type: object + primaryContainer: + description: The primary docker image containing inference code + that is used when the model is deployed for predictions. If + not specified, the container argument is required. Fields are + documented below. + properties: + containerHostname: + description: The DNS host name for the container. + type: string + environment: + additionalProperties: + type: string + description: |- + Environment variables for the Docker container. + A list of key value pairs. + type: object + x-kubernetes-map-type: granular + image: + description: The registry path where the inference code image + is stored in Amazon ECR. + type: string + imageConfig: + description: Specifies whether the model container is in Amazon + ECR or a private Docker registry accessible from your Amazon + Virtual Private Cloud (VPC). For more information see Using + a Private Docker Registry for Real-Time Inference Containers. + see Image Config. + properties: + repositoryAccessMode: + description: 'Specifies whether the model container is + in Amazon ECR or a private Docker registry accessible + from your Amazon Virtual Private Cloud (VPC). Allowed + values are: Platform and Vpc.' + type: string + repositoryAuthConfig: + description: Specifies an authentication configuration + for the private docker registry where your model image + is hosted. Specify a value for this property only if + you specified Vpc as the value for the RepositoryAccessMode + field, and the private Docker registry where the model + image is hosted requires authentication. see Repository + Auth Config. + properties: + repositoryCredentialsProviderArn: + description: The Amazon Resource Name (ARN) of an + AWS Lambda function that provides credentials to + authenticate to the private Docker registry where + your model image is hosted. For information about + how to create an AWS Lambda function, see Create + a Lambda function with the console in the AWS Lambda + Developer Guide. + type: string + type: object + type: object + mode: + description: The container hosts value SingleModel/MultiModel. + The default value is SingleModel. + type: string + modelDataSource: + description: The location of model data to deploy. Use this + for uncompressed model deployment. For information about + how to deploy an uncompressed model, see Deploying uncompressed + models in the AWS SageMaker Developer Guide. + properties: + s3DataSource: + description: The S3 location of model data to deploy. + items: + properties: + compressionType: + description: 'How the model data is prepared. Allowed + values are: None and Gzip.' + type: string + s3DataType: + description: 'The type of model data to deploy. + Allowed values are: S3Object and S3Prefix.' + type: string + s3Uri: + description: The S3 path of model data to deploy. + type: string + type: object + type: array + type: object + modelDataUrl: + description: The URL for the S3 location where model artifacts + are stored. + type: string + modelPackageName: + description: The Amazon Resource Name (ARN) of the model package + to use to create the model. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + vpcConfig: + description: Specifies the VPC that you want your model to connect + to. VpcConfig is used in hosting services and in batch transform. + properties: + securityGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + subnets: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ModelStatus defines the observed state of Model. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) assigned by AWS to + this model. + type: string + container: + description: Specifies containers in the inference pipeline. If + not specified, the primary_container argument is required. Fields + are documented below. + items: + properties: + containerHostname: + description: The DNS host name for the container. + type: string + environment: + additionalProperties: + type: string + description: |- + Environment variables for the Docker container. + A list of key value pairs. + type: object + x-kubernetes-map-type: granular + image: + description: The registry path where the inference code + image is stored in Amazon ECR. + type: string + imageConfig: + description: Specifies whether the model container is in + Amazon ECR or a private Docker registry accessible from + your Amazon Virtual Private Cloud (VPC). For more information + see Using a Private Docker Registry for Real-Time Inference + Containers. see Image Config. + properties: + repositoryAccessMode: + description: 'Specifies whether the model container + is in Amazon ECR or a private Docker registry accessible + from your Amazon Virtual Private Cloud (VPC). Allowed + values are: Platform and Vpc.' + type: string + repositoryAuthConfig: + description: Specifies an authentication configuration + for the private docker registry where your model image + is hosted. Specify a value for this property only + if you specified Vpc as the value for the RepositoryAccessMode + field, and the private Docker registry where the model + image is hosted requires authentication. see Repository + Auth Config. + properties: + repositoryCredentialsProviderArn: + description: The Amazon Resource Name (ARN) of an + AWS Lambda function that provides credentials + to authenticate to the private Docker registry + where your model image is hosted. For information + about how to create an AWS Lambda function, see + Create a Lambda function with the console in the + AWS Lambda Developer Guide. + type: string + type: object + type: object + mode: + description: The container hosts value SingleModel/MultiModel. + The default value is SingleModel. + type: string + modelDataSource: + description: The location of model data to deploy. Use this + for uncompressed model deployment. For information about + how to deploy an uncompressed model, see Deploying uncompressed + models in the AWS SageMaker Developer Guide. + properties: + s3DataSource: + description: The S3 location of model data to deploy. + items: + properties: + compressionType: + description: 'How the model data is prepared. + Allowed values are: None and Gzip.' + type: string + s3DataType: + description: 'The type of model data to deploy. + Allowed values are: S3Object and S3Prefix.' + type: string + s3Uri: + description: The S3 path of model data to deploy. + type: string + type: object + type: array + type: object + modelDataUrl: + description: The URL for the S3 location where model artifacts + are stored. + type: string + modelPackageName: + description: The Amazon Resource Name (ARN) of the model + package to use to create the model. + type: string + type: object + type: array + enableNetworkIsolation: + description: Isolates the model container. No inbound or outbound + network calls can be made to or from the model container. + type: boolean + executionRoleArn: + description: A role that SageMaker can assume to access model + artifacts and docker images for deployment. + type: string + id: + type: string + inferenceExecutionConfig: + description: Specifies details of how containers in a multi-container + endpoint are called. see Inference Execution Config. + properties: + mode: + description: The container hosts value SingleModel/MultiModel. + The default value is SingleModel. + type: string + type: object + primaryContainer: + description: The primary docker image containing inference code + that is used when the model is deployed for predictions. If + not specified, the container argument is required. Fields are + documented below. + properties: + containerHostname: + description: The DNS host name for the container. + type: string + environment: + additionalProperties: + type: string + description: |- + Environment variables for the Docker container. + A list of key value pairs. + type: object + x-kubernetes-map-type: granular + image: + description: The registry path where the inference code image + is stored in Amazon ECR. + type: string + imageConfig: + description: Specifies whether the model container is in Amazon + ECR or a private Docker registry accessible from your Amazon + Virtual Private Cloud (VPC). For more information see Using + a Private Docker Registry for Real-Time Inference Containers. + see Image Config. + properties: + repositoryAccessMode: + description: 'Specifies whether the model container is + in Amazon ECR or a private Docker registry accessible + from your Amazon Virtual Private Cloud (VPC). Allowed + values are: Platform and Vpc.' + type: string + repositoryAuthConfig: + description: Specifies an authentication configuration + for the private docker registry where your model image + is hosted. Specify a value for this property only if + you specified Vpc as the value for the RepositoryAccessMode + field, and the private Docker registry where the model + image is hosted requires authentication. see Repository + Auth Config. + properties: + repositoryCredentialsProviderArn: + description: The Amazon Resource Name (ARN) of an + AWS Lambda function that provides credentials to + authenticate to the private Docker registry where + your model image is hosted. For information about + how to create an AWS Lambda function, see Create + a Lambda function with the console in the AWS Lambda + Developer Guide. + type: string + type: object + type: object + mode: + description: The container hosts value SingleModel/MultiModel. + The default value is SingleModel. + type: string + modelDataSource: + description: The location of model data to deploy. Use this + for uncompressed model deployment. For information about + how to deploy an uncompressed model, see Deploying uncompressed + models in the AWS SageMaker Developer Guide. + properties: + s3DataSource: + description: The S3 location of model data to deploy. + items: + properties: + compressionType: + description: 'How the model data is prepared. Allowed + values are: None and Gzip.' + type: string + s3DataType: + description: 'The type of model data to deploy. + Allowed values are: S3Object and S3Prefix.' + type: string + s3Uri: + description: The S3 path of model data to deploy. + type: string + type: object + type: array + type: object + modelDataUrl: + description: The URL for the S3 location where model artifacts + are stored. + type: string + modelPackageName: + description: The Amazon Resource Name (ARN) of the model package + to use to create the model. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + vpcConfig: + description: Specifies the VPC that you want your model to connect + to. VpcConfig is used in hosting services and in batch transform. + properties: + securityGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + subnets: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sagemaker.aws.upbound.io_notebookinstances.yaml b/package/crds/sagemaker.aws.upbound.io_notebookinstances.yaml index eb2c8548da..6c18f84366 100644 --- a/package/crds/sagemaker.aws.upbound.io_notebookinstances.yaml +++ b/package/crds/sagemaker.aws.upbound.io_notebookinstances.yaml @@ -1235,3 +1235,1214 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: NotebookInstance is the Schema for the NotebookInstances API. + Provides a SageMaker Notebook Instance resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NotebookInstanceSpec defines the desired state of NotebookInstance + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + acceleratorTypes: + description: 'A list of Elastic Inference (EI) instance types + to associate with this notebook instance. See Elastic Inference + Accelerator for more details. Valid values: ml.eia1.medium, + ml.eia1.large, ml.eia1.xlarge, ml.eia2.medium, ml.eia2.large, + ml.eia2.xlarge.' + items: + type: string + type: array + x-kubernetes-list-type: set + additionalCodeRepositories: + description: |- + An array of up to three Git repositories to associate with the notebook instance. + These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. + items: + type: string + type: array + x-kubernetes-list-type: set + defaultCodeRepository: + description: The Git repository associated with the notebook instance + as its default code repository. This can be either the name + of a Git repository stored as a resource in your account, or + the URL of a Git repository in AWS CodeCommit or in any other + Git repository. + type: string + defaultCodeRepositoryRef: + description: Reference to a CodeRepository in sagemaker to populate + defaultCodeRepository. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + defaultCodeRepositorySelector: + description: Selector for a CodeRepository in sagemaker to populate + defaultCodeRepository. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + directInternetAccess: + description: 'Set to Disabled to disable internet access to notebook. + Requires security_groups and subnet_id to be set. Supported + values: Enabled (Default) or Disabled. If set to Disabled, the + notebook instance will be able to access resources only in your + VPC, and will not be able to connect to Amazon SageMaker training + and endpoint services unless your configure a NAT Gateway in + your VPC.' + type: string + instanceMetadataServiceConfiguration: + description: Information on the IMDS configuration of the notebook + instance. Conflicts with instance_metadata_service_configuration. + see details below. + properties: + minimumInstanceMetadataServiceVersion: + description: Indicates the minimum IMDS version that the notebook + instance supports. When passed "1" is passed. This means + that both IMDSv1 and IMDSv2 are supported. Valid values + are 1 and 2. + type: string + type: object + instanceType: + description: The name of ML compute instance type. + type: string + kmsKeyId: + description: The AWS Key Management Service (AWS KMS) key that + Amazon SageMaker uses to encrypt the model artifacts at rest + using Amazon S3 server-side encryption. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + lifecycleConfigName: + description: The name of a lifecycle configuration to associate + with the notebook instance. + type: string + platformIdentifier: + description: The platform identifier of the notebook instance + runtime environment. This value can be either notebook-al1-v1, + notebook-al2-v1, or notebook-al2-v2, depending on which version + of Amazon Linux you require. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleArn: + description: The ARN of the IAM role to be used by the notebook + instance which allows SageMaker to call other services on your + behalf. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rootAccess: + description: Whether root access is Enabled or Disabled for users + of the notebook instance. The default value is Enabled. + type: string + securityGroups: + description: The associated security groups. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The VPC subnet ID. + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + volumeSize: + description: The size, in GB, of the ML storage volume to attach + to the notebook instance. The default value is 5 GB. + type: number + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + acceleratorTypes: + description: 'A list of Elastic Inference (EI) instance types + to associate with this notebook instance. See Elastic Inference + Accelerator for more details. Valid values: ml.eia1.medium, + ml.eia1.large, ml.eia1.xlarge, ml.eia2.medium, ml.eia2.large, + ml.eia2.xlarge.' + items: + type: string + type: array + x-kubernetes-list-type: set + additionalCodeRepositories: + description: |- + An array of up to three Git repositories to associate with the notebook instance. + These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. + items: + type: string + type: array + x-kubernetes-list-type: set + defaultCodeRepository: + description: The Git repository associated with the notebook instance + as its default code repository. This can be either the name + of a Git repository stored as a resource in your account, or + the URL of a Git repository in AWS CodeCommit or in any other + Git repository. + type: string + defaultCodeRepositoryRef: + description: Reference to a CodeRepository in sagemaker to populate + defaultCodeRepository. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + defaultCodeRepositorySelector: + description: Selector for a CodeRepository in sagemaker to populate + defaultCodeRepository. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + directInternetAccess: + description: 'Set to Disabled to disable internet access to notebook. + Requires security_groups and subnet_id to be set. Supported + values: Enabled (Default) or Disabled. If set to Disabled, the + notebook instance will be able to access resources only in your + VPC, and will not be able to connect to Amazon SageMaker training + and endpoint services unless your configure a NAT Gateway in + your VPC.' + type: string + instanceMetadataServiceConfiguration: + description: Information on the IMDS configuration of the notebook + instance. Conflicts with instance_metadata_service_configuration. + see details below. + properties: + minimumInstanceMetadataServiceVersion: + description: Indicates the minimum IMDS version that the notebook + instance supports. When passed "1" is passed. This means + that both IMDSv1 and IMDSv2 are supported. Valid values + are 1 and 2. + type: string + type: object + instanceType: + description: The name of ML compute instance type. + type: string + kmsKeyId: + description: The AWS Key Management Service (AWS KMS) key that + Amazon SageMaker uses to encrypt the model artifacts at rest + using Amazon S3 server-side encryption. + type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + lifecycleConfigName: + description: The name of a lifecycle configuration to associate + with the notebook instance. + type: string + platformIdentifier: + description: The platform identifier of the notebook instance + runtime environment. This value can be either notebook-al1-v1, + notebook-al2-v1, or notebook-al2-v2, depending on which version + of Amazon Linux you require. + type: string + roleArn: + description: The ARN of the IAM role to be used by the notebook + instance which allows SageMaker to call other services on your + behalf. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rootAccess: + description: Whether root access is Enabled or Disabled for users + of the notebook instance. The default value is Enabled. + type: string + securityGroups: + description: The associated security groups. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The VPC subnet ID. + type: string + subnetIdRef: + description: Reference to a Subnet in ec2 to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in ec2 to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + volumeSize: + description: The size, in GB, of the ML storage volume to attach + to the notebook instance. The default value is 5 GB. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.instanceType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.instanceType) + || (has(self.initProvider) && has(self.initProvider.instanceType))' + status: + description: NotebookInstanceStatus defines the observed state of NotebookInstance. + properties: + atProvider: + properties: + acceleratorTypes: + description: 'A list of Elastic Inference (EI) instance types + to associate with this notebook instance. See Elastic Inference + Accelerator for more details. Valid values: ml.eia1.medium, + ml.eia1.large, ml.eia1.xlarge, ml.eia2.medium, ml.eia2.large, + ml.eia2.xlarge.' + items: + type: string + type: array + x-kubernetes-list-type: set + additionalCodeRepositories: + description: |- + An array of up to three Git repositories to associate with the notebook instance. + These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. + items: + type: string + type: array + x-kubernetes-list-type: set + arn: + description: The Amazon Resource Name (ARN) assigned by AWS to + this notebook instance. + type: string + defaultCodeRepository: + description: The Git repository associated with the notebook instance + as its default code repository. This can be either the name + of a Git repository stored as a resource in your account, or + the URL of a Git repository in AWS CodeCommit or in any other + Git repository. + type: string + directInternetAccess: + description: 'Set to Disabled to disable internet access to notebook. + Requires security_groups and subnet_id to be set. Supported + values: Enabled (Default) or Disabled. If set to Disabled, the + notebook instance will be able to access resources only in your + VPC, and will not be able to connect to Amazon SageMaker training + and endpoint services unless your configure a NAT Gateway in + your VPC.' + type: string + id: + description: The name of the notebook instance. + type: string + instanceMetadataServiceConfiguration: + description: Information on the IMDS configuration of the notebook + instance. Conflicts with instance_metadata_service_configuration. + see details below. + properties: + minimumInstanceMetadataServiceVersion: + description: Indicates the minimum IMDS version that the notebook + instance supports. When passed "1" is passed. This means + that both IMDSv1 and IMDSv2 are supported. Valid values + are 1 and 2. + type: string + type: object + instanceType: + description: The name of ML compute instance type. + type: string + kmsKeyId: + description: The AWS Key Management Service (AWS KMS) key that + Amazon SageMaker uses to encrypt the model artifacts at rest + using Amazon S3 server-side encryption. + type: string + lifecycleConfigName: + description: The name of a lifecycle configuration to associate + with the notebook instance. + type: string + networkInterfaceId: + description: The network interface ID that Amazon SageMaker created + at the time of creating the instance. Only available when setting + subnet_id. + type: string + platformIdentifier: + description: The platform identifier of the notebook instance + runtime environment. This value can be either notebook-al1-v1, + notebook-al2-v1, or notebook-al2-v2, depending on which version + of Amazon Linux you require. + type: string + roleArn: + description: The ARN of the IAM role to be used by the notebook + instance which allows SageMaker to call other services on your + behalf. + type: string + rootAccess: + description: Whether root access is Enabled or Disabled for users + of the notebook instance. The default value is Enabled. + type: string + securityGroups: + description: The associated security groups. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The VPC subnet ID. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + url: + description: The URL that you use to connect to the Jupyter notebook + that is running in your notebook instance. + type: string + volumeSize: + description: The size, in GB, of the ML storage volume to attach + to the notebook instance. The default value is 5 GB. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sagemaker.aws.upbound.io_spaces.yaml b/package/crds/sagemaker.aws.upbound.io_spaces.yaml index 1986e44fa3..aaf1e04d71 100644 --- a/package/crds/sagemaker.aws.upbound.io_spaces.yaml +++ b/package/crds/sagemaker.aws.upbound.io_spaces.yaml @@ -1314,3 +1314,1197 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Space is the Schema for the Spaces API. Provides a SageMaker + Space resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SpaceSpec defines the desired state of Space + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + domainId: + description: The ID of the associated Domain. + type: string + domainIdRef: + description: Reference to a Domain in sagemaker to populate domainId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + domainIdSelector: + description: Selector for a Domain in sagemaker to populate domainId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + ownershipSettings: + description: A collection of ownership settings. See Ownership + Settings below. + properties: + ownerUserProfileName: + description: The user profile who is the owner of the private + space. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + spaceDisplayName: + description: The name of the space that appears in the SageMaker + Studio UI. + type: string + spaceName: + description: The name of the space. + type: string + spaceSettings: + description: A collection of space settings. See Space Settings + below. + properties: + appType: + description: The type of app created within the space. + type: string + codeEditorAppSettings: + description: The Code Editor application settings. See Code + Editor App Settings below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The Amazon Resource Name (ARN) of the + SageMaker image created on the instance. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + customFileSystem: + description: A file system, created by you, that you assign + to a space for an Amazon SageMaker Domain. See Custom File + System below. + items: + properties: + efsFileSystem: + description: A custom file system in Amazon EFS. see + EFS File System below. + properties: + fileSystemId: + description: The ID of your Amazon EFS file system. + type: string + type: object + type: object + type: array + jupyterLabAppSettings: + description: The settings for the JupyterLab application. + See Jupyter Lab App Settings below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see Code Repository below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The Amazon Resource Name (ARN) of the + SageMaker image created on the instance. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + jupyterServerAppSettings: + description: The Jupyter server's app settings. See Jupyter + Server App Settings below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see Code Repository below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The Amazon Resource Name (ARN) of the + SageMaker image created on the instance. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + kernelGatewayAppSettings: + description: The kernel gateway app settings. See Kernel Gateway + App Settings below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see Custom + Image below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The Amazon Resource Name (ARN) of the + SageMaker image created on the instance. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + spaceStorageSettings: + properties: + ebsStorageSettings: + properties: + ebsVolumeSizeInGb: + type: number + type: object + type: object + type: object + spaceSharingSettings: + description: A collection of space sharing settings. See Space + Sharing Settings below. + properties: + sharingType: + description: Specifies the sharing type of the space. Valid + values are Private and Shared. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + domainId: + description: The ID of the associated Domain. + type: string + domainIdRef: + description: Reference to a Domain in sagemaker to populate domainId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + domainIdSelector: + description: Selector for a Domain in sagemaker to populate domainId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + ownershipSettings: + description: A collection of ownership settings. See Ownership + Settings below. + properties: + ownerUserProfileName: + description: The user profile who is the owner of the private + space. + type: string + type: object + spaceDisplayName: + description: The name of the space that appears in the SageMaker + Studio UI. + type: string + spaceName: + description: The name of the space. + type: string + spaceSettings: + description: A collection of space settings. See Space Settings + below. + properties: + appType: + description: The type of app created within the space. + type: string + codeEditorAppSettings: + description: The Code Editor application settings. See Code + Editor App Settings below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The Amazon Resource Name (ARN) of the + SageMaker image created on the instance. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + customFileSystem: + description: A file system, created by you, that you assign + to a space for an Amazon SageMaker Domain. See Custom File + System below. + items: + properties: + efsFileSystem: + description: A custom file system in Amazon EFS. see + EFS File System below. + properties: + fileSystemId: + description: The ID of your Amazon EFS file system. + type: string + type: object + type: object + type: array + jupyterLabAppSettings: + description: The settings for the JupyterLab application. + See Jupyter Lab App Settings below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see Code Repository below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The Amazon Resource Name (ARN) of the + SageMaker image created on the instance. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + jupyterServerAppSettings: + description: The Jupyter server's app settings. See Jupyter + Server App Settings below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see Code Repository below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The Amazon Resource Name (ARN) of the + SageMaker image created on the instance. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + kernelGatewayAppSettings: + description: The kernel gateway app settings. See Kernel Gateway + App Settings below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see Custom + Image below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The Amazon Resource Name (ARN) of the + SageMaker image created on the instance. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + spaceStorageSettings: + properties: + ebsStorageSettings: + properties: + ebsVolumeSizeInGb: + type: number + type: object + type: object + type: object + spaceSharingSettings: + description: A collection of space sharing settings. See Space + Sharing Settings below. + properties: + sharingType: + description: Specifies the sharing type of the space. Valid + values are Private and Shared. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.spaceName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.spaceName) + || (has(self.initProvider) && has(self.initProvider.spaceName))' + status: + description: SpaceStatus defines the observed state of Space. + properties: + atProvider: + properties: + arn: + description: The space's Amazon Resource Name (ARN). + type: string + domainId: + description: The ID of the associated Domain. + type: string + homeEfsFileSystemUid: + description: The ID of the space's profile in the Amazon Elastic + File System volume. + type: string + id: + description: The space's Amazon Resource Name (ARN). + type: string + ownershipSettings: + description: A collection of ownership settings. See Ownership + Settings below. + properties: + ownerUserProfileName: + description: The user profile who is the owner of the private + space. + type: string + type: object + spaceDisplayName: + description: The name of the space that appears in the SageMaker + Studio UI. + type: string + spaceName: + description: The name of the space. + type: string + spaceSettings: + description: A collection of space settings. See Space Settings + below. + properties: + appType: + description: The type of app created within the space. + type: string + codeEditorAppSettings: + description: The Code Editor application settings. See Code + Editor App Settings below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The Amazon Resource Name (ARN) of the + SageMaker image created on the instance. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + customFileSystem: + description: A file system, created by you, that you assign + to a space for an Amazon SageMaker Domain. See Custom File + System below. + items: + properties: + efsFileSystem: + description: A custom file system in Amazon EFS. see + EFS File System below. + properties: + fileSystemId: + description: The ID of your Amazon EFS file system. + type: string + type: object + type: object + type: array + jupyterLabAppSettings: + description: The settings for the JupyterLab application. + See Jupyter Lab App Settings below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see Code Repository below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The Amazon Resource Name (ARN) of the + SageMaker image created on the instance. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + jupyterServerAppSettings: + description: The Jupyter server's app settings. See Jupyter + Server App Settings below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see Code Repository below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The Amazon Resource Name (ARN) of the + SageMaker image created on the instance. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + kernelGatewayAppSettings: + description: The kernel gateway app settings. See Kernel Gateway + App Settings below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see Custom + Image below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The Amazon Resource Name (ARN) of the + SageMaker image created on the instance. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + spaceStorageSettings: + properties: + ebsStorageSettings: + properties: + ebsVolumeSizeInGb: + type: number + type: object + type: object + type: object + spaceSharingSettings: + description: A collection of space sharing settings. See Space + Sharing Settings below. + properties: + sharingType: + description: Specifies the sharing type of the space. Valid + values are Private and Shared. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + url: + description: Returns the URL of the space. If the space is created + with Amazon Web Services IAM Identity Center (Successor to Amazon + Web Services Single Sign-On) authentication, users can navigate + to the URL after appending the respective redirect parameter + for the application type to be federated through Amazon Web + Services IAM Identity Center. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sagemaker.aws.upbound.io_userprofiles.yaml b/package/crds/sagemaker.aws.upbound.io_userprofiles.yaml index 0fa9763a7f..286c0bc5de 100644 --- a/package/crds/sagemaker.aws.upbound.io_userprofiles.yaml +++ b/package/crds/sagemaker.aws.upbound.io_userprofiles.yaml @@ -2335,3 +2335,2110 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: UserProfile is the Schema for the UserProfiles API. Provides + a SageMaker User Profile resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: UserProfileSpec defines the desired state of UserProfile + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + domainId: + description: The ID of the associated Domain. + type: string + domainIdRef: + description: Reference to a Domain in sagemaker to populate domainId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + domainIdSelector: + description: Selector for a Domain in sagemaker to populate domainId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + singleSignOnUserIdentifier: + description: A specifier for the type of value specified in single_sign_on_user_value. + Currently, the only supported value is UserName. If the Domain's + AuthMode is SSO, this field is required. If the Domain's AuthMode + is not SSO, this field cannot be specified. + type: string + singleSignOnUserValue: + description: The username of the associated AWS Single Sign-On + User for this User Profile. If the Domain's AuthMode is SSO, + this field is required, and must match a valid username of a + user in your directory. If the Domain's AuthMode is not SSO, + this field cannot be specified. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userProfileName: + description: The name for the User Profile. + type: string + userSettings: + description: The user settings. See User Settings below. + properties: + canvasAppSettings: + description: The Canvas app settings. See Canvas App Settings + below. + properties: + directDeploySettings: + description: The model deployment settings for the SageMaker + Canvas application. See Direct Deploy Settings below. + properties: + status: + description: Describes whether model deployment permissions + are enabled or disabled in the Canvas application. + Valid values are ENABLED and DISABLED. + type: string + type: object + identityProviderOauthSettings: + description: The settings for connecting to an external + data source with OAuth. See Identity Provider OAuth + Settings below. + items: + properties: + dataSourceName: + description: The name of the data source that you're + connecting to. Canvas currently supports OAuth + for Snowflake and Salesforce Data Cloud. Valid + values are SalesforceGenie and Snowflake. + type: string + secretArn: + description: The ARN of an Amazon Web Services Secrets + Manager secret that stores the credentials from + your identity provider, such as the client ID + and secret, authorization URL, and token URL. + type: string + status: + description: Describes whether OAuth for a data + source is enabled or disabled in the Canvas application. + Valid values are ENABLED and DISABLED. + type: string + type: object + type: array + kendraSettings: + description: The settings for document querying. See Kendra + Settings below. + properties: + status: + description: Describes whether the document querying + feature is enabled or disabled in the Canvas application. + Valid values are ENABLED and DISABLED. + type: string + type: object + modelRegisterSettings: + description: The model registry settings for the SageMaker + Canvas application. See Model Register Settings below. + properties: + crossAccountModelRegisterRoleArn: + description: The Amazon Resource Name (ARN) of the + SageMaker model registry account. Required only + to register model versions created by a different + SageMaker Canvas AWS account than the AWS account + in which SageMaker model registry is set up. + type: string + status: + description: Describes whether the integration to + the model registry is enabled or disabled in the + Canvas application. Valid values are ENABLED and + DISABLED. + type: string + type: object + timeSeriesForecastingSettings: + description: Time series forecast settings for the Canvas + app. See Time Series Forecasting Settings below. + properties: + amazonForecastRoleArn: + description: The IAM role that Canvas passes to Amazon + Forecast for time series forecasting. By default, + Canvas uses the execution role specified in the + UserProfile that launches the Canvas app. If an + execution role is not specified in the UserProfile, + Canvas uses the execution role specified in the + Domain that owns the UserProfile. To allow time + series forecasting, this IAM role should have the + AmazonSageMakerCanvasForecastAccess policy attached + and forecast.amazonaws.com added in the trust relationship + as a service principal. + type: string + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + workspaceSettings: + description: The workspace settings for the SageMaker + Canvas application. See Workspace Settings below. + properties: + s3ArtifactPath: + description: The Amazon S3 bucket used to store artifacts + generated by Canvas. Updating the Amazon S3 location + impacts existing configuration settings, and Canvas + users no longer have access to their artifacts. + Canvas users must log out and log back in to apply + the new location. + type: string + s3KmsKeyId: + description: The Amazon Web Services Key Management + Service (KMS) encryption key ID that is used to + encrypt artifacts generated by Canvas in the Amazon + S3 bucket. + type: string + type: object + type: object + codeEditorAppSettings: + description: The Code Editor application settings. See Code + Editor App Settings below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + customFileSystemConfig: + description: The settings for assigning a custom file system + to a user profile. Permitted users can access this file + system in Amazon SageMaker Studio. See Custom File System + Config below. + items: + properties: + efsFileSystemConfig: + description: The default EBS storage settings for a + private space. See EFS File System Config below. + items: + properties: + fileSystemId: + description: The ID of your Amazon EFS file system. + type: string + fileSystemPath: + description: The path to the file system directory + that is accessible in Amazon SageMaker Studio. + Permitted users can access only this directory + and below. + type: string + type: object + type: array + type: object + type: array + customPosixUserConfig: + description: Details about the POSIX identity that is used + for file system operations. See Custom Posix User Config + below. + properties: + gid: + description: The POSIX group ID. + type: number + uid: + description: The POSIX user ID. + type: number + type: object + defaultLandingUri: + description: 'The default experience that the user is directed + to when accessing the domain. The supported values are: + studio::: Indicates that Studio is the default experience. + This value can only be passed if StudioWebPortal is set + to ENABLED. app:JupyterServer:: Indicates that Studio Classic + is the default experience.' + type: string + executionRole: + description: The execution role ARN for the user. + type: string + jupyterLabAppSettings: + description: The settings for the JupyterLab application. + See Jupyter Lab App Settings below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see Code Repository below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see Custom + Image below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + jupyterServerAppSettings: + description: The Jupyter server's app settings. See Jupyter + Server App Settings below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see Code Repository below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + kernelGatewayAppSettings: + description: The kernel gateway app settings. See Kernel Gateway + App Settings below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see Custom + Image below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + rSessionAppSettings: + description: The RSession app settings. See RSession App Settings + below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see Custom + Image below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + rStudioServerProAppSettings: + description: A collection of settings that configure user + interaction with the RStudioServerPro app. See RStudioServerProAppSettings + below. + properties: + accessStatus: + description: Indicates whether the current user has access + to the RStudioServerPro app. Valid values are ENABLED + and DISABLED. + type: string + userGroup: + description: The level of permissions that the user has + within the RStudioServerPro app. This value defaults + to R_STUDIO_USER. The R_STUDIO_ADMIN value allows the + user access to the RStudio Administrative Dashboard. + Valid values are R_STUDIO_USER and R_STUDIO_ADMIN. + type: string + type: object + securityGroups: + description: A list of security group IDs that will be attached + to the user. + items: + type: string + type: array + x-kubernetes-list-type: set + sharingSettings: + description: The sharing settings. See Sharing Settings below. + properties: + notebookOutputOption: + description: Whether to include the notebook cell output + when sharing the notebook. The default is Disabled. + Valid values are Allowed and Disabled. + type: string + s3KmsKeyId: + description: When notebook_output_option is Allowed, the + AWS Key Management Service (KMS) encryption key ID used + to encrypt the notebook cell output in the Amazon S3 + bucket. + type: string + s3OutputPath: + description: When notebook_output_option is Allowed, the + Amazon S3 bucket used to save the notebook cell output. + type: string + type: object + spaceStorageSettings: + description: The storage settings for a private space. See + Space Storage Settings below. + properties: + defaultEbsStorageSettings: + description: The default EBS storage settings for a private + space. See Default EBS Storage Settings below. + properties: + defaultEbsVolumeSizeInGb: + description: The default size of the EBS storage volume + for a private space. + type: number + maximumEbsVolumeSizeInGb: + description: The maximum size of the EBS storage volume + for a private space. + type: number + type: object + type: object + studioWebPortal: + description: Whether the user can access Studio. If this value + is set to DISABLED, the user cannot access Studio, even + if that is the default experience for the domain. Valid + values are ENABLED and DISABLED. + type: string + tensorBoardAppSettings: + description: The TensorBoard app settings. See TensorBoard + App Settings below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + domainId: + description: The ID of the associated Domain. + type: string + domainIdRef: + description: Reference to a Domain in sagemaker to populate domainId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + domainIdSelector: + description: Selector for a Domain in sagemaker to populate domainId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + singleSignOnUserIdentifier: + description: A specifier for the type of value specified in single_sign_on_user_value. + Currently, the only supported value is UserName. If the Domain's + AuthMode is SSO, this field is required. If the Domain's AuthMode + is not SSO, this field cannot be specified. + type: string + singleSignOnUserValue: + description: The username of the associated AWS Single Sign-On + User for this User Profile. If the Domain's AuthMode is SSO, + this field is required, and must match a valid username of a + user in your directory. If the Domain's AuthMode is not SSO, + this field cannot be specified. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + userProfileName: + description: The name for the User Profile. + type: string + userSettings: + description: The user settings. See User Settings below. + properties: + canvasAppSettings: + description: The Canvas app settings. See Canvas App Settings + below. + properties: + directDeploySettings: + description: The model deployment settings for the SageMaker + Canvas application. See Direct Deploy Settings below. + properties: + status: + description: Describes whether model deployment permissions + are enabled or disabled in the Canvas application. + Valid values are ENABLED and DISABLED. + type: string + type: object + identityProviderOauthSettings: + description: The settings for connecting to an external + data source with OAuth. See Identity Provider OAuth + Settings below. + items: + properties: + dataSourceName: + description: The name of the data source that you're + connecting to. Canvas currently supports OAuth + for Snowflake and Salesforce Data Cloud. Valid + values are SalesforceGenie and Snowflake. + type: string + secretArn: + description: The ARN of an Amazon Web Services Secrets + Manager secret that stores the credentials from + your identity provider, such as the client ID + and secret, authorization URL, and token URL. + type: string + status: + description: Describes whether OAuth for a data + source is enabled or disabled in the Canvas application. + Valid values are ENABLED and DISABLED. + type: string + type: object + type: array + kendraSettings: + description: The settings for document querying. See Kendra + Settings below. + properties: + status: + description: Describes whether the document querying + feature is enabled or disabled in the Canvas application. + Valid values are ENABLED and DISABLED. + type: string + type: object + modelRegisterSettings: + description: The model registry settings for the SageMaker + Canvas application. See Model Register Settings below. + properties: + crossAccountModelRegisterRoleArn: + description: The Amazon Resource Name (ARN) of the + SageMaker model registry account. Required only + to register model versions created by a different + SageMaker Canvas AWS account than the AWS account + in which SageMaker model registry is set up. + type: string + status: + description: Describes whether the integration to + the model registry is enabled or disabled in the + Canvas application. Valid values are ENABLED and + DISABLED. + type: string + type: object + timeSeriesForecastingSettings: + description: Time series forecast settings for the Canvas + app. See Time Series Forecasting Settings below. + properties: + amazonForecastRoleArn: + description: The IAM role that Canvas passes to Amazon + Forecast for time series forecasting. By default, + Canvas uses the execution role specified in the + UserProfile that launches the Canvas app. If an + execution role is not specified in the UserProfile, + Canvas uses the execution role specified in the + Domain that owns the UserProfile. To allow time + series forecasting, this IAM role should have the + AmazonSageMakerCanvasForecastAccess policy attached + and forecast.amazonaws.com added in the trust relationship + as a service principal. + type: string + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + workspaceSettings: + description: The workspace settings for the SageMaker + Canvas application. See Workspace Settings below. + properties: + s3ArtifactPath: + description: The Amazon S3 bucket used to store artifacts + generated by Canvas. Updating the Amazon S3 location + impacts existing configuration settings, and Canvas + users no longer have access to their artifacts. + Canvas users must log out and log back in to apply + the new location. + type: string + s3KmsKeyId: + description: The Amazon Web Services Key Management + Service (KMS) encryption key ID that is used to + encrypt artifacts generated by Canvas in the Amazon + S3 bucket. + type: string + type: object + type: object + codeEditorAppSettings: + description: The Code Editor application settings. See Code + Editor App Settings below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + customFileSystemConfig: + description: The settings for assigning a custom file system + to a user profile. Permitted users can access this file + system in Amazon SageMaker Studio. See Custom File System + Config below. + items: + properties: + efsFileSystemConfig: + description: The default EBS storage settings for a + private space. See EFS File System Config below. + items: + properties: + fileSystemId: + description: The ID of your Amazon EFS file system. + type: string + fileSystemPath: + description: The path to the file system directory + that is accessible in Amazon SageMaker Studio. + Permitted users can access only this directory + and below. + type: string + type: object + type: array + type: object + type: array + customPosixUserConfig: + description: Details about the POSIX identity that is used + for file system operations. See Custom Posix User Config + below. + properties: + gid: + description: The POSIX group ID. + type: number + uid: + description: The POSIX user ID. + type: number + type: object + defaultLandingUri: + description: 'The default experience that the user is directed + to when accessing the domain. The supported values are: + studio::: Indicates that Studio is the default experience. + This value can only be passed if StudioWebPortal is set + to ENABLED. app:JupyterServer:: Indicates that Studio Classic + is the default experience.' + type: string + executionRole: + description: The execution role ARN for the user. + type: string + jupyterLabAppSettings: + description: The settings for the JupyterLab application. + See Jupyter Lab App Settings below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see Code Repository below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see Custom + Image below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + jupyterServerAppSettings: + description: The Jupyter server's app settings. See Jupyter + Server App Settings below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see Code Repository below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + kernelGatewayAppSettings: + description: The kernel gateway app settings. See Kernel Gateway + App Settings below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see Custom + Image below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + rSessionAppSettings: + description: The RSession app settings. See RSession App Settings + below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see Custom + Image below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + rStudioServerProAppSettings: + description: A collection of settings that configure user + interaction with the RStudioServerPro app. See RStudioServerProAppSettings + below. + properties: + accessStatus: + description: Indicates whether the current user has access + to the RStudioServerPro app. Valid values are ENABLED + and DISABLED. + type: string + userGroup: + description: The level of permissions that the user has + within the RStudioServerPro app. This value defaults + to R_STUDIO_USER. The R_STUDIO_ADMIN value allows the + user access to the RStudio Administrative Dashboard. + Valid values are R_STUDIO_USER and R_STUDIO_ADMIN. + type: string + type: object + securityGroups: + description: A list of security group IDs that will be attached + to the user. + items: + type: string + type: array + x-kubernetes-list-type: set + sharingSettings: + description: The sharing settings. See Sharing Settings below. + properties: + notebookOutputOption: + description: Whether to include the notebook cell output + when sharing the notebook. The default is Disabled. + Valid values are Allowed and Disabled. + type: string + s3KmsKeyId: + description: When notebook_output_option is Allowed, the + AWS Key Management Service (KMS) encryption key ID used + to encrypt the notebook cell output in the Amazon S3 + bucket. + type: string + s3OutputPath: + description: When notebook_output_option is Allowed, the + Amazon S3 bucket used to save the notebook cell output. + type: string + type: object + spaceStorageSettings: + description: The storage settings for a private space. See + Space Storage Settings below. + properties: + defaultEbsStorageSettings: + description: The default EBS storage settings for a private + space. See Default EBS Storage Settings below. + properties: + defaultEbsVolumeSizeInGb: + description: The default size of the EBS storage volume + for a private space. + type: number + maximumEbsVolumeSizeInGb: + description: The maximum size of the EBS storage volume + for a private space. + type: number + type: object + type: object + studioWebPortal: + description: Whether the user can access Studio. If this value + is set to DISABLED, the user cannot access Studio, even + if that is the default experience for the domain. Valid + values are ENABLED and DISABLED. + type: string + tensorBoardAppSettings: + description: The TensorBoard app settings. See TensorBoard + App Settings below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.userProfileName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.userProfileName) + || (has(self.initProvider) && has(self.initProvider.userProfileName))' + status: + description: UserProfileStatus defines the observed state of UserProfile. + properties: + atProvider: + properties: + arn: + description: The user profile Amazon Resource Name (ARN). + type: string + domainId: + description: The ID of the associated Domain. + type: string + homeEfsFileSystemUid: + description: The ID of the user's profile in the Amazon Elastic + File System (EFS) volume. + type: string + id: + description: The user profile Amazon Resource Name (ARN). + type: string + singleSignOnUserIdentifier: + description: A specifier for the type of value specified in single_sign_on_user_value. + Currently, the only supported value is UserName. If the Domain's + AuthMode is SSO, this field is required. If the Domain's AuthMode + is not SSO, this field cannot be specified. + type: string + singleSignOnUserValue: + description: The username of the associated AWS Single Sign-On + User for this User Profile. If the Domain's AuthMode is SSO, + this field is required, and must match a valid username of a + user in your directory. If the Domain's AuthMode is not SSO, + this field cannot be specified. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + userProfileName: + description: The name for the User Profile. + type: string + userSettings: + description: The user settings. See User Settings below. + properties: + canvasAppSettings: + description: The Canvas app settings. See Canvas App Settings + below. + properties: + directDeploySettings: + description: The model deployment settings for the SageMaker + Canvas application. See Direct Deploy Settings below. + properties: + status: + description: Describes whether model deployment permissions + are enabled or disabled in the Canvas application. + Valid values are ENABLED and DISABLED. + type: string + type: object + identityProviderOauthSettings: + description: The settings for connecting to an external + data source with OAuth. See Identity Provider OAuth + Settings below. + items: + properties: + dataSourceName: + description: The name of the data source that you're + connecting to. Canvas currently supports OAuth + for Snowflake and Salesforce Data Cloud. Valid + values are SalesforceGenie and Snowflake. + type: string + secretArn: + description: The ARN of an Amazon Web Services Secrets + Manager secret that stores the credentials from + your identity provider, such as the client ID + and secret, authorization URL, and token URL. + type: string + status: + description: Describes whether OAuth for a data + source is enabled or disabled in the Canvas application. + Valid values are ENABLED and DISABLED. + type: string + type: object + type: array + kendraSettings: + description: The settings for document querying. See Kendra + Settings below. + properties: + status: + description: Describes whether the document querying + feature is enabled or disabled in the Canvas application. + Valid values are ENABLED and DISABLED. + type: string + type: object + modelRegisterSettings: + description: The model registry settings for the SageMaker + Canvas application. See Model Register Settings below. + properties: + crossAccountModelRegisterRoleArn: + description: The Amazon Resource Name (ARN) of the + SageMaker model registry account. Required only + to register model versions created by a different + SageMaker Canvas AWS account than the AWS account + in which SageMaker model registry is set up. + type: string + status: + description: Describes whether the integration to + the model registry is enabled or disabled in the + Canvas application. Valid values are ENABLED and + DISABLED. + type: string + type: object + timeSeriesForecastingSettings: + description: Time series forecast settings for the Canvas + app. See Time Series Forecasting Settings below. + properties: + amazonForecastRoleArn: + description: The IAM role that Canvas passes to Amazon + Forecast for time series forecasting. By default, + Canvas uses the execution role specified in the + UserProfile that launches the Canvas app. If an + execution role is not specified in the UserProfile, + Canvas uses the execution role specified in the + Domain that owns the UserProfile. To allow time + series forecasting, this IAM role should have the + AmazonSageMakerCanvasForecastAccess policy attached + and forecast.amazonaws.com added in the trust relationship + as a service principal. + type: string + status: + description: Describes whether time series forecasting + is enabled or disabled in the Canvas app. Valid + values are ENABLED and DISABLED. + type: string + type: object + workspaceSettings: + description: The workspace settings for the SageMaker + Canvas application. See Workspace Settings below. + properties: + s3ArtifactPath: + description: The Amazon S3 bucket used to store artifacts + generated by Canvas. Updating the Amazon S3 location + impacts existing configuration settings, and Canvas + users no longer have access to their artifacts. + Canvas users must log out and log back in to apply + the new location. + type: string + s3KmsKeyId: + description: The Amazon Web Services Key Management + Service (KMS) encryption key ID that is used to + encrypt artifacts generated by Canvas in the Amazon + S3 bucket. + type: string + type: object + type: object + codeEditorAppSettings: + description: The Code Editor application settings. See Code + Editor App Settings below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + customFileSystemConfig: + description: The settings for assigning a custom file system + to a user profile. Permitted users can access this file + system in Amazon SageMaker Studio. See Custom File System + Config below. + items: + properties: + efsFileSystemConfig: + description: The default EBS storage settings for a + private space. See EFS File System Config below. + items: + properties: + fileSystemId: + description: The ID of your Amazon EFS file system. + type: string + fileSystemPath: + description: The path to the file system directory + that is accessible in Amazon SageMaker Studio. + Permitted users can access only this directory + and below. + type: string + type: object + type: array + type: object + type: array + customPosixUserConfig: + description: Details about the POSIX identity that is used + for file system operations. See Custom Posix User Config + below. + properties: + gid: + description: The POSIX group ID. + type: number + uid: + description: The POSIX user ID. + type: number + type: object + defaultLandingUri: + description: 'The default experience that the user is directed + to when accessing the domain. The supported values are: + studio::: Indicates that Studio is the default experience. + This value can only be passed if StudioWebPortal is set + to ENABLED. app:JupyterServer:: Indicates that Studio Classic + is the default experience.' + type: string + executionRole: + description: The execution role ARN for the user. + type: string + jupyterLabAppSettings: + description: The settings for the JupyterLab application. + See Jupyter Lab App Settings below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see Code Repository below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see Custom + Image below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + jupyterServerAppSettings: + description: The Jupyter server's app settings. See Jupyter + Server App Settings below. + properties: + codeRepository: + description: A list of Git repositories that SageMaker + automatically displays to users for cloning in the JupyterServer + application. see Code Repository below. + items: + properties: + repositoryUrl: + description: The URL of the Git repository. + type: string + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + kernelGatewayAppSettings: + description: The kernel gateway app settings. See Kernel Gateway + App Settings below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see Custom + Image below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + lifecycleConfigArns: + description: The Amazon Resource Name (ARN) of the Lifecycle + Configurations. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + rSessionAppSettings: + description: The RSession app settings. See RSession App Settings + below. + properties: + customImage: + description: A list of custom SageMaker images that are + configured to run as a KernelGateway app. see Custom + Image below. + items: + properties: + appImageConfigName: + description: The name of the App Image Config. + type: string + imageName: + description: The name of the Custom Image. + type: string + imageVersionNumber: + description: The version number of the Custom Image. + type: number + type: object + type: array + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + rStudioServerProAppSettings: + description: A collection of settings that configure user + interaction with the RStudioServerPro app. See RStudioServerProAppSettings + below. + properties: + accessStatus: + description: Indicates whether the current user has access + to the RStudioServerPro app. Valid values are ENABLED + and DISABLED. + type: string + userGroup: + description: The level of permissions that the user has + within the RStudioServerPro app. This value defaults + to R_STUDIO_USER. The R_STUDIO_ADMIN value allows the + user access to the RStudio Administrative Dashboard. + Valid values are R_STUDIO_USER and R_STUDIO_ADMIN. + type: string + type: object + securityGroups: + description: A list of security group IDs that will be attached + to the user. + items: + type: string + type: array + x-kubernetes-list-type: set + sharingSettings: + description: The sharing settings. See Sharing Settings below. + properties: + notebookOutputOption: + description: Whether to include the notebook cell output + when sharing the notebook. The default is Disabled. + Valid values are Allowed and Disabled. + type: string + s3KmsKeyId: + description: When notebook_output_option is Allowed, the + AWS Key Management Service (KMS) encryption key ID used + to encrypt the notebook cell output in the Amazon S3 + bucket. + type: string + s3OutputPath: + description: When notebook_output_option is Allowed, the + Amazon S3 bucket used to save the notebook cell output. + type: string + type: object + spaceStorageSettings: + description: The storage settings for a private space. See + Space Storage Settings below. + properties: + defaultEbsStorageSettings: + description: The default EBS storage settings for a private + space. See Default EBS Storage Settings below. + properties: + defaultEbsVolumeSizeInGb: + description: The default size of the EBS storage volume + for a private space. + type: number + maximumEbsVolumeSizeInGb: + description: The maximum size of the EBS storage volume + for a private space. + type: number + type: object + type: object + studioWebPortal: + description: Whether the user can access Studio. If this value + is set to DISABLED, the user cannot access Studio, even + if that is the default experience for the domain. Valid + values are ENABLED and DISABLED. + type: string + tensorBoardAppSettings: + description: The TensorBoard app settings. See TensorBoard + App Settings below. + properties: + defaultResourceSpec: + description: The default instance type and the Amazon + Resource Name (ARN) of the SageMaker image created on + the instance. see Default Resource Spec below. + properties: + instanceType: + description: The instance type that the image version + runs on.. For valid values see SageMaker Instance + Types. + type: string + lifecycleConfigArn: + description: The Amazon Resource Name (ARN) of the + Lifecycle Configuration attached to the Resource. + type: string + sagemakerImageArn: + description: The ARN of the SageMaker image that the + image version belongs to. + type: string + sagemakerImageVersionAlias: + description: The SageMaker Image Version Alias. + type: string + sagemakerImageVersionArn: + description: The ARN of the image version created + on the instance. + type: string + type: object + type: object + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sagemaker.aws.upbound.io_workforces.yaml b/package/crds/sagemaker.aws.upbound.io_workforces.yaml index 311f6aa529..e884b90f92 100644 --- a/package/crds/sagemaker.aws.upbound.io_workforces.yaml +++ b/package/crds/sagemaker.aws.upbound.io_workforces.yaml @@ -960,3 +960,915 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Workforce is the Schema for the Workforces API. Provides a SageMaker + Workforce resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WorkforceSpec defines the desired state of Workforce + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cognitoConfig: + description: Use this parameter to configure an Amazon Cognito + private workforce. A single Cognito workforce is created using + and corresponds to a single Amazon Cognito user pool. Conflicts + with oidc_config. see Cognito Config details below. + properties: + clientId: + description: The client ID for your Amazon Cognito user pool. + type: string + clientIdRef: + description: Reference to a UserPoolClient in cognitoidp to + populate clientId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clientIdSelector: + description: Selector for a UserPoolClient in cognitoidp to + populate clientId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userPool: + description: ID for your Amazon Cognito user pool. + type: string + userPoolRef: + description: Reference to a UserPoolDomain in cognitoidp to + populate userPool. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userPoolSelector: + description: Selector for a UserPoolDomain in cognitoidp to + populate userPool. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + oidcConfig: + description: Use this parameter to configure a private workforce + using your own OIDC Identity Provider. Conflicts with cognito_config. + see OIDC Config details below. + properties: + authorizationEndpoint: + description: The OIDC IdP authorization endpoint used to configure + your private workforce. + type: string + clientId: + description: The client ID for your Amazon Cognito user pool. + type: string + clientSecretSecretRef: + description: The OIDC IdP client secret used to configure + your private workforce. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + issuer: + description: The OIDC IdP issuer used to configure your private + workforce. + type: string + jwksUri: + description: The OIDC IdP JSON Web Key Set (Jwks) URI used + to configure your private workforce. + type: string + logoutEndpoint: + description: The OIDC IdP logout endpoint used to configure + your private workforce. + type: string + tokenEndpoint: + description: The OIDC IdP token endpoint used to configure + your private workforce. + type: string + userInfoEndpoint: + description: The OIDC IdP user information endpoint used to + configure your private workforce. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + sourceIpConfig: + description: A list of IP address ranges Used to create an allow + list of IP addresses for a private workforce. By default, a + workforce isn't restricted to specific IP addresses. see Source + Ip Config details below. + properties: + cidrs: + description: A list of up to 10 CIDR values. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + workforceVpcConfig: + description: configure a workforce using VPC. see Workforce VPC + Config details below. + properties: + securityGroupIds: + description: The VPC security group IDs. The security groups + must be for the same VPC as specified in the subnet. + items: + type: string + type: array + x-kubernetes-list-type: set + subnets: + description: The ID of the subnets in the VPC that you want + to connect. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcId: + description: The ID of the VPC that the workforce uses for + communication. + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + cognitoConfig: + description: Use this parameter to configure an Amazon Cognito + private workforce. A single Cognito workforce is created using + and corresponds to a single Amazon Cognito user pool. Conflicts + with oidc_config. see Cognito Config details below. + properties: + clientId: + description: The client ID for your Amazon Cognito user pool. + type: string + clientIdRef: + description: Reference to a UserPoolClient in cognitoidp to + populate clientId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clientIdSelector: + description: Selector for a UserPoolClient in cognitoidp to + populate clientId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userPool: + description: ID for your Amazon Cognito user pool. + type: string + userPoolRef: + description: Reference to a UserPoolDomain in cognitoidp to + populate userPool. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userPoolSelector: + description: Selector for a UserPoolDomain in cognitoidp to + populate userPool. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + oidcConfig: + description: Use this parameter to configure a private workforce + using your own OIDC Identity Provider. Conflicts with cognito_config. + see OIDC Config details below. + properties: + authorizationEndpoint: + description: The OIDC IdP authorization endpoint used to configure + your private workforce. + type: string + clientId: + description: The client ID for your Amazon Cognito user pool. + type: string + clientSecretSecretRef: + description: The OIDC IdP client secret used to configure + your private workforce. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + issuer: + description: The OIDC IdP issuer used to configure your private + workforce. + type: string + jwksUri: + description: The OIDC IdP JSON Web Key Set (Jwks) URI used + to configure your private workforce. + type: string + logoutEndpoint: + description: The OIDC IdP logout endpoint used to configure + your private workforce. + type: string + tokenEndpoint: + description: The OIDC IdP token endpoint used to configure + your private workforce. + type: string + userInfoEndpoint: + description: The OIDC IdP user information endpoint used to + configure your private workforce. + type: string + required: + - clientSecretSecretRef + type: object + sourceIpConfig: + description: A list of IP address ranges Used to create an allow + list of IP addresses for a private workforce. By default, a + workforce isn't restricted to specific IP addresses. see Source + Ip Config details below. + properties: + cidrs: + description: A list of up to 10 CIDR values. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + workforceVpcConfig: + description: configure a workforce using VPC. see Workforce VPC + Config details below. + properties: + securityGroupIds: + description: The VPC security group IDs. The security groups + must be for the same VPC as specified in the subnet. + items: + type: string + type: array + x-kubernetes-list-type: set + subnets: + description: The ID of the subnets in the VPC that you want + to connect. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcId: + description: The ID of the VPC that the workforce uses for + communication. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: WorkforceStatus defines the observed state of Workforce. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) assigned by AWS to + this Workforce. + type: string + cognitoConfig: + description: Use this parameter to configure an Amazon Cognito + private workforce. A single Cognito workforce is created using + and corresponds to a single Amazon Cognito user pool. Conflicts + with oidc_config. see Cognito Config details below. + properties: + clientId: + description: The client ID for your Amazon Cognito user pool. + type: string + userPool: + description: ID for your Amazon Cognito user pool. + type: string + type: object + id: + description: The name of the Workforce. + type: string + oidcConfig: + description: Use this parameter to configure a private workforce + using your own OIDC Identity Provider. Conflicts with cognito_config. + see OIDC Config details below. + properties: + authorizationEndpoint: + description: The OIDC IdP authorization endpoint used to configure + your private workforce. + type: string + clientId: + description: The client ID for your Amazon Cognito user pool. + type: string + issuer: + description: The OIDC IdP issuer used to configure your private + workforce. + type: string + jwksUri: + description: The OIDC IdP JSON Web Key Set (Jwks) URI used + to configure your private workforce. + type: string + logoutEndpoint: + description: The OIDC IdP logout endpoint used to configure + your private workforce. + type: string + tokenEndpoint: + description: The OIDC IdP token endpoint used to configure + your private workforce. + type: string + userInfoEndpoint: + description: The OIDC IdP user information endpoint used to + configure your private workforce. + type: string + type: object + sourceIpConfig: + description: A list of IP address ranges Used to create an allow + list of IP addresses for a private workforce. By default, a + workforce isn't restricted to specific IP addresses. see Source + Ip Config details below. + properties: + cidrs: + description: A list of up to 10 CIDR values. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + subdomain: + description: The subdomain for your OIDC Identity Provider. + type: string + workforceVpcConfig: + description: configure a workforce using VPC. see Workforce VPC + Config details below. + properties: + securityGroupIds: + description: The VPC security group IDs. The security groups + must be for the same VPC as specified in the subnet. + items: + type: string + type: array + x-kubernetes-list-type: set + subnets: + description: The ID of the subnets in the VPC that you want + to connect. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcEndpointId: + description: The IDs for the VPC service endpoints of your + VPC workforce. + type: string + vpcId: + description: The ID of the VPC that the workforce uses for + communication. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sagemaker.aws.upbound.io_workteams.yaml b/package/crds/sagemaker.aws.upbound.io_workteams.yaml index 13625107b7..3ace93b868 100644 --- a/package/crds/sagemaker.aws.upbound.io_workteams.yaml +++ b/package/crds/sagemaker.aws.upbound.io_workteams.yaml @@ -1185,3 +1185,1152 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Workteam is the Schema for the Workteams API. Provides a SageMaker + Workteam resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WorkteamSpec defines the desired state of Workteam + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A description of the work team. + type: string + memberDefinition: + description: A list of Member Definitions that contains objects + that identify the workers that make up the work team. Workforces + can be created using Amazon Cognito or your own OIDC Identity + Provider (IdP). For private workforces created using Amazon + Cognito use cognito_member_definition. For workforces created + using your own OIDC identity provider (IdP) use oidc_member_definition. + Do not provide input for both of these parameters in a single + request. see Member Definition details below. + items: + properties: + cognitoMemberDefinition: + description: The Amazon Cognito user group that is part + of the work team. See Cognito Member Definition details + below. + properties: + clientId: + description: An identifier for an application client. + You must create the app client ID using Amazon Cognito. + type: string + clientIdRef: + description: Reference to a UserPoolClient in cognitoidp + to populate clientId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clientIdSelector: + description: Selector for a UserPoolClient in cognitoidp + to populate clientId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userGroup: + description: An identifier for a user group. + type: string + userGroupRef: + description: Reference to a UserGroup in cognitoidp + to populate userGroup. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userGroupSelector: + description: Selector for a UserGroup in cognitoidp + to populate userGroup. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userPool: + description: An identifier for a user pool. The user + pool must be in the same region as the service that + you are calling. + type: string + userPoolRef: + description: Reference to a UserPoolDomain in cognitoidp + to populate userPool. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userPoolSelector: + description: Selector for a UserPoolDomain in cognitoidp + to populate userPool. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + oidcMemberDefinition: + description: A list user groups that exist in your OIDC + Identity Provider (IdP). One to ten groups can be used + to create a single private work team. See Cognito Member + Definition details below. + properties: + groups: + description: A list of comma separated strings that + identifies user groups in your OIDC IdP. Each user + group is made up of a group of private workers. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: array + notificationConfiguration: + description: Configures notification of workers regarding available + or expiring work items. see Notification Configuration details + below. + properties: + notificationTopicArn: + description: The ARN for the SNS topic to which notifications + should be published. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + workforceName: + description: The name of the Workteam (must be unique). + type: string + workforceNameRef: + description: Reference to a Workforce in sagemaker to populate + workforceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + workforceNameSelector: + description: Selector for a Workforce in sagemaker to populate + workforceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A description of the work team. + type: string + memberDefinition: + description: A list of Member Definitions that contains objects + that identify the workers that make up the work team. Workforces + can be created using Amazon Cognito or your own OIDC Identity + Provider (IdP). For private workforces created using Amazon + Cognito use cognito_member_definition. For workforces created + using your own OIDC identity provider (IdP) use oidc_member_definition. + Do not provide input for both of these parameters in a single + request. see Member Definition details below. + items: + properties: + cognitoMemberDefinition: + description: The Amazon Cognito user group that is part + of the work team. See Cognito Member Definition details + below. + properties: + clientId: + description: An identifier for an application client. + You must create the app client ID using Amazon Cognito. + type: string + clientIdRef: + description: Reference to a UserPoolClient in cognitoidp + to populate clientId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clientIdSelector: + description: Selector for a UserPoolClient in cognitoidp + to populate clientId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userGroup: + description: An identifier for a user group. + type: string + userGroupRef: + description: Reference to a UserGroup in cognitoidp + to populate userGroup. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userGroupSelector: + description: Selector for a UserGroup in cognitoidp + to populate userGroup. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userPool: + description: An identifier for a user pool. The user + pool must be in the same region as the service that + you are calling. + type: string + userPoolRef: + description: Reference to a UserPoolDomain in cognitoidp + to populate userPool. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userPoolSelector: + description: Selector for a UserPoolDomain in cognitoidp + to populate userPool. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + oidcMemberDefinition: + description: A list user groups that exist in your OIDC + Identity Provider (IdP). One to ten groups can be used + to create a single private work team. See Cognito Member + Definition details below. + properties: + groups: + description: A list of comma separated strings that + identifies user groups in your OIDC IdP. Each user + group is made up of a group of private workers. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: array + notificationConfiguration: + description: Configures notification of workers regarding available + or expiring work items. see Notification Configuration details + below. + properties: + notificationTopicArn: + description: The ARN for the SNS topic to which notifications + should be published. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + workforceName: + description: The name of the Workteam (must be unique). + type: string + workforceNameRef: + description: Reference to a Workforce in sagemaker to populate + workforceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + workforceNameSelector: + description: Selector for a Workforce in sagemaker to populate + workforceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.description is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.description) + || (has(self.initProvider) && has(self.initProvider.description))' + - message: spec.forProvider.memberDefinition is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.memberDefinition) + || (has(self.initProvider) && has(self.initProvider.memberDefinition))' + status: + description: WorkteamStatus defines the observed state of Workteam. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) assigned by AWS to + this Workteam. + type: string + description: + description: A description of the work team. + type: string + id: + description: The name of the Workteam. + type: string + memberDefinition: + description: A list of Member Definitions that contains objects + that identify the workers that make up the work team. Workforces + can be created using Amazon Cognito or your own OIDC Identity + Provider (IdP). For private workforces created using Amazon + Cognito use cognito_member_definition. For workforces created + using your own OIDC identity provider (IdP) use oidc_member_definition. + Do not provide input for both of these parameters in a single + request. see Member Definition details below. + items: + properties: + cognitoMemberDefinition: + description: The Amazon Cognito user group that is part + of the work team. See Cognito Member Definition details + below. + properties: + clientId: + description: An identifier for an application client. + You must create the app client ID using Amazon Cognito. + type: string + userGroup: + description: An identifier for a user group. + type: string + userPool: + description: An identifier for a user pool. The user + pool must be in the same region as the service that + you are calling. + type: string + type: object + oidcMemberDefinition: + description: A list user groups that exist in your OIDC + Identity Provider (IdP). One to ten groups can be used + to create a single private work team. See Cognito Member + Definition details below. + properties: + groups: + description: A list of comma separated strings that + identifies user groups in your OIDC IdP. Each user + group is made up of a group of private workers. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: array + notificationConfiguration: + description: Configures notification of workers regarding available + or expiring work items. see Notification Configuration details + below. + properties: + notificationTopicArn: + description: The ARN for the SNS topic to which notifications + should be published. + type: string + type: object + subdomain: + description: The subdomain for your OIDC Identity Provider. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + workforceName: + description: The name of the Workteam (must be unique). + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/scheduler.aws.upbound.io_schedules.yaml b/package/crds/scheduler.aws.upbound.io_schedules.yaml index ae03a6b6f9..b6ee10ce24 100644 --- a/package/crds/scheduler.aws.upbound.io_schedules.yaml +++ b/package/crds/scheduler.aws.upbound.io_schedules.yaml @@ -1713,3 +1713,1623 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Schedule is the Schema for the Schedules API. Provides an EventBridge + Scheduler Schedule resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ScheduleSpec defines the desired state of Schedule + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Brief description of the schedule. + type: string + endDate: + description: 'The date, in UTC, before which the schedule can + invoke its target. Depending on the schedule''s recurrence expression, + invocations might stop on, or before, the end date you specify. + EventBridge Scheduler ignores the end date for one-time schedules. + Example: 2030-01-01T01:00:00Z.' + type: string + flexibleTimeWindow: + description: Configures a time window during which EventBridge + Scheduler invokes the schedule. Detailed below. + properties: + maximumWindowInMinutes: + description: Maximum time window during which a schedule can + be invoked. Ranges from 1 to 1440 minutes. + type: number + mode: + description: 'Determines whether the schedule is invoked within + a flexible time window. One of: OFF, FLEXIBLE.' + type: string + type: object + groupName: + description: Name of the schedule group to associate with this + schedule. When omitted, the default schedule group is used. + type: string + kmsKeyArn: + description: ARN for the customer managed KMS key that EventBridge + Scheduler will use to encrypt and decrypt your data. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Name of the schedule. Conflicts with name_prefix. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + scheduleExpression: + description: Defines when the schedule runs. Read more in Schedule + types on EventBridge Scheduler. + type: string + scheduleExpressionTimezone: + description: 'Timezone in which the scheduling expression is evaluated. + Defaults to UTC. Example: Australia/Sydney.' + type: string + startDate: + description: 'The date, in UTC, after which the schedule can begin + invoking its target. Depending on the schedule''s recurrence + expression, invocations might occur on, or after, the start + date you specify. EventBridge Scheduler ignores the start date + for one-time schedules. Example: 2030-01-01T01:00:00Z.' + type: string + state: + description: 'Specifies whether the schedule is enabled or disabled. + One of: ENABLED (default), DISABLED.' + type: string + target: + description: Configures the target of the schedule. Detailed below. + properties: + arn: + description: ARN of the target of this schedule, such as a + SQS queue or ECS cluster. For universal targets, this is + a Service ARN specific to the target service. + type: string + arnRef: + description: Reference to a Queue in sqs to populate arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a Queue in sqs to populate arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + deadLetterConfig: + description: Information about an Amazon SQS queue that EventBridge + Scheduler uses as a dead-letter queue for your schedule. + If specified, EventBridge Scheduler delivers failed events + that could not be successfully delivered to a target to + the queue. Detailed below. + properties: + arn: + description: ARN of the target of this schedule, such + as a SQS queue or ECS cluster. For universal targets, + this is a Service ARN specific to the target service. + type: string + type: object + ecsParameters: + description: Templated target type for the Amazon ECS RunTask + API operation. Detailed below. + properties: + capacityProviderStrategy: + description: Up to 6 capacity provider strategies to use + for the task. Detailed below. + items: + properties: + base: + description: How many tasks, at a minimum, to run + on the specified capacity provider. Only one capacity + provider in a capacity provider strategy can have + a base defined. Ranges from 0 (default) to 100000. + type: number + capacityProvider: + description: Short name of the capacity provider. + type: string + weight: + description: Designates the relative percentage + of the total number of tasks launched that should + use the specified capacity provider. The weight + value is taken into consideration after the base + value, if defined, is satisfied. Ranges from from + 0 to 1000. + type: number + type: object + type: array + enableEcsManagedTags: + description: Specifies whether to enable Amazon ECS managed + tags for the task. For more information, see Tagging + Your Amazon ECS Resources in the Amazon ECS Developer + Guide. + type: boolean + enableExecuteCommand: + description: Specifies whether to enable the execute command + functionality for the containers in this task. + type: boolean + group: + description: Specifies an ECS task group for the task. + At most 255 characters. + type: string + launchType: + description: 'Specifies the launch type on which your + task is running. The launch type that you specify here + must match one of the launch type (compatibilities) + of the target task. One of: EC2, FARGATE, EXTERNAL.' + type: string + networkConfiguration: + description: Configures the networking associated with + the task. Detailed below. + properties: + assignPublicIp: + description: Specifies whether the task's elastic + network interface receives a public IP address. + This attribute is a boolean type, where true maps + to ENABLED and false to DISABLED. You can specify + true only when the launch_type is set to FARGATE. + type: boolean + securityGroups: + description: Set of 1 to 5 Security Group ID-s to + be associated with the task. These security groups + must all be in the same VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + subnets: + description: Set of 1 to 16 subnets to be associated + with the task. These subnets must all be in the + same VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + placementConstraints: + description: A set of up to 10 placement constraints to + use for the task. Detailed below. + items: + properties: + expression: + description: A cluster query language expression + to apply to the constraint. You cannot specify + an expression if the constraint type is distinctInstance. + For more information, see Cluster query language + in the Amazon ECS Developer Guide. + type: string + type: + description: 'The type of placement strategy. One + of: random, spread, binpack.' + type: string + type: object + type: array + placementStrategy: + description: A set of up to 5 placement strategies. Detailed + below. + items: + properties: + field: + description: The field to apply the placement strategy + against. + type: string + type: + description: 'The type of placement strategy. One + of: random, spread, binpack.' + type: string + type: object + type: array + platformVersion: + description: Specifies the platform version for the task. + Specify only the numeric portion of the platform version, + such as 1.1.0. + type: string + propagateTags: + description: 'Specifies whether to propagate the tags + from the task definition to the task. One of: TASK_DEFINITION.' + type: string + referenceId: + description: Reference ID to use for the task. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + taskCount: + description: The number of tasks to create. Ranges from + 1 (default) to 10. + type: number + taskDefinitionArn: + description: ARN of the task definition to use. + type: string + type: object + eventbridgeParameters: + description: Templated target type for the EventBridge PutEvents + API operation. Detailed below. + properties: + detailType: + description: Free-form string used to decide what fields + to expect in the event detail. Up to 128 characters. + type: string + source: + description: Source of the event. + type: string + type: object + input: + description: Text, or well-formed JSON, passed to the target. + Read more in Universal target. + type: string + kinesisParameters: + description: Templated target type for the Amazon Kinesis + PutRecord API operation. Detailed below. + properties: + partitionKey: + description: Specifies the shard to which EventBridge + Scheduler sends the event. Up to 256 characters. + type: string + type: object + retryPolicy: + description: Information about the retry policy settings. + Detailed below. + properties: + maximumEventAgeInSeconds: + description: Maximum amount of time, in seconds, to continue + to make retry attempts. Ranges from 60 to 86400 (default). + type: number + maximumRetryAttempts: + description: Maximum number of retry attempts to make + before the request fails. Ranges from 0 to 185 (default). + type: number + type: object + roleArn: + description: ARN of the IAM role that EventBridge Scheduler + will use for this target when the schedule is invoked. Read + more in Set up the execution role. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sagemakerPipelineParameters: + description: Templated target type for the Amazon SageMaker + StartPipelineExecution API operation. Detailed below. + properties: + pipelineParameter: + description: Set of up to 200 parameter names and values + to use when executing the SageMaker Model Building Pipeline. + Detailed below. + items: + properties: + name: + description: Name of parameter to start execution + of a SageMaker Model Building Pipeline. + type: string + value: + description: Value of parameter to start execution + of a SageMaker Model Building Pipeline. + type: string + type: object + type: array + type: object + sqsParameters: + description: The templated target type for the Amazon SQS + SendMessage API operation. Detailed below. + properties: + messageGroupId: + description: FIFO message group ID to use as the target. + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Brief description of the schedule. + type: string + endDate: + description: 'The date, in UTC, before which the schedule can + invoke its target. Depending on the schedule''s recurrence expression, + invocations might stop on, or before, the end date you specify. + EventBridge Scheduler ignores the end date for one-time schedules. + Example: 2030-01-01T01:00:00Z.' + type: string + flexibleTimeWindow: + description: Configures a time window during which EventBridge + Scheduler invokes the schedule. Detailed below. + properties: + maximumWindowInMinutes: + description: Maximum time window during which a schedule can + be invoked. Ranges from 1 to 1440 minutes. + type: number + mode: + description: 'Determines whether the schedule is invoked within + a flexible time window. One of: OFF, FLEXIBLE.' + type: string + type: object + groupName: + description: Name of the schedule group to associate with this + schedule. When omitted, the default schedule group is used. + type: string + kmsKeyArn: + description: ARN for the customer managed KMS key that EventBridge + Scheduler will use to encrypt and decrypt your data. + type: string + kmsKeyArnRef: + description: Reference to a Key in kms to populate kmsKeyArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyArnSelector: + description: Selector for a Key in kms to populate kmsKeyArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Name of the schedule. Conflicts with name_prefix. + type: string + scheduleExpression: + description: Defines when the schedule runs. Read more in Schedule + types on EventBridge Scheduler. + type: string + scheduleExpressionTimezone: + description: 'Timezone in which the scheduling expression is evaluated. + Defaults to UTC. Example: Australia/Sydney.' + type: string + startDate: + description: 'The date, in UTC, after which the schedule can begin + invoking its target. Depending on the schedule''s recurrence + expression, invocations might occur on, or after, the start + date you specify. EventBridge Scheduler ignores the start date + for one-time schedules. Example: 2030-01-01T01:00:00Z.' + type: string + state: + description: 'Specifies whether the schedule is enabled or disabled. + One of: ENABLED (default), DISABLED.' + type: string + target: + description: Configures the target of the schedule. Detailed below. + properties: + arn: + description: ARN of the target of this schedule, such as a + SQS queue or ECS cluster. For universal targets, this is + a Service ARN specific to the target service. + type: string + arnRef: + description: Reference to a Queue in sqs to populate arn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + arnSelector: + description: Selector for a Queue in sqs to populate arn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + deadLetterConfig: + description: Information about an Amazon SQS queue that EventBridge + Scheduler uses as a dead-letter queue for your schedule. + If specified, EventBridge Scheduler delivers failed events + that could not be successfully delivered to a target to + the queue. Detailed below. + properties: + arn: + description: ARN of the target of this schedule, such + as a SQS queue or ECS cluster. For universal targets, + this is a Service ARN specific to the target service. + type: string + type: object + ecsParameters: + description: Templated target type for the Amazon ECS RunTask + API operation. Detailed below. + properties: + capacityProviderStrategy: + description: Up to 6 capacity provider strategies to use + for the task. Detailed below. + items: + properties: + base: + description: How many tasks, at a minimum, to run + on the specified capacity provider. Only one capacity + provider in a capacity provider strategy can have + a base defined. Ranges from 0 (default) to 100000. + type: number + capacityProvider: + description: Short name of the capacity provider. + type: string + weight: + description: Designates the relative percentage + of the total number of tasks launched that should + use the specified capacity provider. The weight + value is taken into consideration after the base + value, if defined, is satisfied. Ranges from from + 0 to 1000. + type: number + type: object + type: array + enableEcsManagedTags: + description: Specifies whether to enable Amazon ECS managed + tags for the task. For more information, see Tagging + Your Amazon ECS Resources in the Amazon ECS Developer + Guide. + type: boolean + enableExecuteCommand: + description: Specifies whether to enable the execute command + functionality for the containers in this task. + type: boolean + group: + description: Specifies an ECS task group for the task. + At most 255 characters. + type: string + launchType: + description: 'Specifies the launch type on which your + task is running. The launch type that you specify here + must match one of the launch type (compatibilities) + of the target task. One of: EC2, FARGATE, EXTERNAL.' + type: string + networkConfiguration: + description: Configures the networking associated with + the task. Detailed below. + properties: + assignPublicIp: + description: Specifies whether the task's elastic + network interface receives a public IP address. + This attribute is a boolean type, where true maps + to ENABLED and false to DISABLED. You can specify + true only when the launch_type is set to FARGATE. + type: boolean + securityGroups: + description: Set of 1 to 5 Security Group ID-s to + be associated with the task. These security groups + must all be in the same VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + subnets: + description: Set of 1 to 16 subnets to be associated + with the task. These subnets must all be in the + same VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + placementConstraints: + description: A set of up to 10 placement constraints to + use for the task. Detailed below. + items: + properties: + expression: + description: A cluster query language expression + to apply to the constraint. You cannot specify + an expression if the constraint type is distinctInstance. + For more information, see Cluster query language + in the Amazon ECS Developer Guide. + type: string + type: + description: 'The type of placement strategy. One + of: random, spread, binpack.' + type: string + type: object + type: array + placementStrategy: + description: A set of up to 5 placement strategies. Detailed + below. + items: + properties: + field: + description: The field to apply the placement strategy + against. + type: string + type: + description: 'The type of placement strategy. One + of: random, spread, binpack.' + type: string + type: object + type: array + platformVersion: + description: Specifies the platform version for the task. + Specify only the numeric portion of the platform version, + such as 1.1.0. + type: string + propagateTags: + description: 'Specifies whether to propagate the tags + from the task definition to the task. One of: TASK_DEFINITION.' + type: string + referenceId: + description: Reference ID to use for the task. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + taskCount: + description: The number of tasks to create. Ranges from + 1 (default) to 10. + type: number + taskDefinitionArn: + description: ARN of the task definition to use. + type: string + type: object + eventbridgeParameters: + description: Templated target type for the EventBridge PutEvents + API operation. Detailed below. + properties: + detailType: + description: Free-form string used to decide what fields + to expect in the event detail. Up to 128 characters. + type: string + source: + description: Source of the event. + type: string + type: object + input: + description: Text, or well-formed JSON, passed to the target. + Read more in Universal target. + type: string + kinesisParameters: + description: Templated target type for the Amazon Kinesis + PutRecord API operation. Detailed below. + properties: + partitionKey: + description: Specifies the shard to which EventBridge + Scheduler sends the event. Up to 256 characters. + type: string + type: object + retryPolicy: + description: Information about the retry policy settings. + Detailed below. + properties: + maximumEventAgeInSeconds: + description: Maximum amount of time, in seconds, to continue + to make retry attempts. Ranges from 60 to 86400 (default). + type: number + maximumRetryAttempts: + description: Maximum number of retry attempts to make + before the request fails. Ranges from 0 to 185 (default). + type: number + type: object + roleArn: + description: ARN of the IAM role that EventBridge Scheduler + will use for this target when the schedule is invoked. Read + more in Set up the execution role. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sagemakerPipelineParameters: + description: Templated target type for the Amazon SageMaker + StartPipelineExecution API operation. Detailed below. + properties: + pipelineParameter: + description: Set of up to 200 parameter names and values + to use when executing the SageMaker Model Building Pipeline. + Detailed below. + items: + properties: + name: + description: Name of parameter to start execution + of a SageMaker Model Building Pipeline. + type: string + value: + description: Value of parameter to start execution + of a SageMaker Model Building Pipeline. + type: string + type: object + type: array + type: object + sqsParameters: + description: The templated target type for the Amazon SQS + SendMessage API operation. Detailed below. + properties: + messageGroupId: + description: FIFO message group ID to use as the target. + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.flexibleTimeWindow is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.flexibleTimeWindow) + || (has(self.initProvider) && has(self.initProvider.flexibleTimeWindow))' + - message: spec.forProvider.scheduleExpression is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scheduleExpression) + || (has(self.initProvider) && has(self.initProvider.scheduleExpression))' + - message: spec.forProvider.target is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.target) + || (has(self.initProvider) && has(self.initProvider.target))' + status: + description: ScheduleStatus defines the observed state of Schedule. + properties: + atProvider: + properties: + arn: + description: ARN of the SQS queue specified as the destination + for the dead-letter queue. + type: string + description: + description: Brief description of the schedule. + type: string + endDate: + description: 'The date, in UTC, before which the schedule can + invoke its target. Depending on the schedule''s recurrence expression, + invocations might stop on, or before, the end date you specify. + EventBridge Scheduler ignores the end date for one-time schedules. + Example: 2030-01-01T01:00:00Z.' + type: string + flexibleTimeWindow: + description: Configures a time window during which EventBridge + Scheduler invokes the schedule. Detailed below. + properties: + maximumWindowInMinutes: + description: Maximum time window during which a schedule can + be invoked. Ranges from 1 to 1440 minutes. + type: number + mode: + description: 'Determines whether the schedule is invoked within + a flexible time window. One of: OFF, FLEXIBLE.' + type: string + type: object + groupName: + description: Name of the schedule group to associate with this + schedule. When omitted, the default schedule group is used. + type: string + id: + description: Name of the schedule. + type: string + kmsKeyArn: + description: ARN for the customer managed KMS key that EventBridge + Scheduler will use to encrypt and decrypt your data. + type: string + name: + description: Name of the schedule. Conflicts with name_prefix. + type: string + scheduleExpression: + description: Defines when the schedule runs. Read more in Schedule + types on EventBridge Scheduler. + type: string + scheduleExpressionTimezone: + description: 'Timezone in which the scheduling expression is evaluated. + Defaults to UTC. Example: Australia/Sydney.' + type: string + startDate: + description: 'The date, in UTC, after which the schedule can begin + invoking its target. Depending on the schedule''s recurrence + expression, invocations might occur on, or after, the start + date you specify. EventBridge Scheduler ignores the start date + for one-time schedules. Example: 2030-01-01T01:00:00Z.' + type: string + state: + description: 'Specifies whether the schedule is enabled or disabled. + One of: ENABLED (default), DISABLED.' + type: string + target: + description: Configures the target of the schedule. Detailed below. + properties: + arn: + description: ARN of the target of this schedule, such as a + SQS queue or ECS cluster. For universal targets, this is + a Service ARN specific to the target service. + type: string + deadLetterConfig: + description: Information about an Amazon SQS queue that EventBridge + Scheduler uses as a dead-letter queue for your schedule. + If specified, EventBridge Scheduler delivers failed events + that could not be successfully delivered to a target to + the queue. Detailed below. + properties: + arn: + description: ARN of the target of this schedule, such + as a SQS queue or ECS cluster. For universal targets, + this is a Service ARN specific to the target service. + type: string + type: object + ecsParameters: + description: Templated target type for the Amazon ECS RunTask + API operation. Detailed below. + properties: + capacityProviderStrategy: + description: Up to 6 capacity provider strategies to use + for the task. Detailed below. + items: + properties: + base: + description: How many tasks, at a minimum, to run + on the specified capacity provider. Only one capacity + provider in a capacity provider strategy can have + a base defined. Ranges from 0 (default) to 100000. + type: number + capacityProvider: + description: Short name of the capacity provider. + type: string + weight: + description: Designates the relative percentage + of the total number of tasks launched that should + use the specified capacity provider. The weight + value is taken into consideration after the base + value, if defined, is satisfied. Ranges from from + 0 to 1000. + type: number + type: object + type: array + enableEcsManagedTags: + description: Specifies whether to enable Amazon ECS managed + tags for the task. For more information, see Tagging + Your Amazon ECS Resources in the Amazon ECS Developer + Guide. + type: boolean + enableExecuteCommand: + description: Specifies whether to enable the execute command + functionality for the containers in this task. + type: boolean + group: + description: Specifies an ECS task group for the task. + At most 255 characters. + type: string + launchType: + description: 'Specifies the launch type on which your + task is running. The launch type that you specify here + must match one of the launch type (compatibilities) + of the target task. One of: EC2, FARGATE, EXTERNAL.' + type: string + networkConfiguration: + description: Configures the networking associated with + the task. Detailed below. + properties: + assignPublicIp: + description: Specifies whether the task's elastic + network interface receives a public IP address. + This attribute is a boolean type, where true maps + to ENABLED and false to DISABLED. You can specify + true only when the launch_type is set to FARGATE. + type: boolean + securityGroups: + description: Set of 1 to 5 Security Group ID-s to + be associated with the task. These security groups + must all be in the same VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + subnets: + description: Set of 1 to 16 subnets to be associated + with the task. These subnets must all be in the + same VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + placementConstraints: + description: A set of up to 10 placement constraints to + use for the task. Detailed below. + items: + properties: + expression: + description: A cluster query language expression + to apply to the constraint. You cannot specify + an expression if the constraint type is distinctInstance. + For more information, see Cluster query language + in the Amazon ECS Developer Guide. + type: string + type: + description: 'The type of placement strategy. One + of: random, spread, binpack.' + type: string + type: object + type: array + placementStrategy: + description: A set of up to 5 placement strategies. Detailed + below. + items: + properties: + field: + description: The field to apply the placement strategy + against. + type: string + type: + description: 'The type of placement strategy. One + of: random, spread, binpack.' + type: string + type: object + type: array + platformVersion: + description: Specifies the platform version for the task. + Specify only the numeric portion of the platform version, + such as 1.1.0. + type: string + propagateTags: + description: 'Specifies whether to propagate the tags + from the task definition to the task. One of: TASK_DEFINITION.' + type: string + referenceId: + description: Reference ID to use for the task. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + taskCount: + description: The number of tasks to create. Ranges from + 1 (default) to 10. + type: number + taskDefinitionArn: + description: ARN of the task definition to use. + type: string + type: object + eventbridgeParameters: + description: Templated target type for the EventBridge PutEvents + API operation. Detailed below. + properties: + detailType: + description: Free-form string used to decide what fields + to expect in the event detail. Up to 128 characters. + type: string + source: + description: Source of the event. + type: string + type: object + input: + description: Text, or well-formed JSON, passed to the target. + Read more in Universal target. + type: string + kinesisParameters: + description: Templated target type for the Amazon Kinesis + PutRecord API operation. Detailed below. + properties: + partitionKey: + description: Specifies the shard to which EventBridge + Scheduler sends the event. Up to 256 characters. + type: string + type: object + retryPolicy: + description: Information about the retry policy settings. + Detailed below. + properties: + maximumEventAgeInSeconds: + description: Maximum amount of time, in seconds, to continue + to make retry attempts. Ranges from 60 to 86400 (default). + type: number + maximumRetryAttempts: + description: Maximum number of retry attempts to make + before the request fails. Ranges from 0 to 185 (default). + type: number + type: object + roleArn: + description: ARN of the IAM role that EventBridge Scheduler + will use for this target when the schedule is invoked. Read + more in Set up the execution role. + type: string + sagemakerPipelineParameters: + description: Templated target type for the Amazon SageMaker + StartPipelineExecution API operation. Detailed below. + properties: + pipelineParameter: + description: Set of up to 200 parameter names and values + to use when executing the SageMaker Model Building Pipeline. + Detailed below. + items: + properties: + name: + description: Name of parameter to start execution + of a SageMaker Model Building Pipeline. + type: string + value: + description: Value of parameter to start execution + of a SageMaker Model Building Pipeline. + type: string + type: object + type: array + type: object + sqsParameters: + description: The templated target type for the Amazon SQS + SendMessage API operation. Detailed below. + properties: + messageGroupId: + description: FIFO message group ID to use as the target. + type: string + type: object + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/secretsmanager.aws.upbound.io_secretrotations.yaml b/package/crds/secretsmanager.aws.upbound.io_secretrotations.yaml index 0bf701bb6e..9a3afeeec1 100644 --- a/package/crds/secretsmanager.aws.upbound.io_secretrotations.yaml +++ b/package/crds/secretsmanager.aws.upbound.io_secretrotations.yaml @@ -766,3 +766,745 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SecretRotation is the Schema for the SecretRotations API. Provides + a resource to manage AWS Secrets Manager secret rotation + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SecretRotationSpec defines the desired state of SecretRotation + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + region: + description: Region is the region you'd like your resource to + be created in. + type: string + rotateImmediately: + description: Specifies whether to rotate the secret immediately + or wait until the next scheduled rotation window. The rotation + schedule is defined in rotation_rules. For secrets that use + a Lambda rotation function to rotate, if you don't immediately + rotate the secret, Secrets Manager tests the rotation configuration + by running the testSecret step (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html) + of the Lambda rotation function. The test creates an AWSPENDING + version of the secret and then removes it. Defaults to true. + type: boolean + rotationLambdaArn: + description: Specifies the ARN of the Lambda function that can + rotate the secret. Must be supplied if the secret is not managed + by AWS. + type: string + rotationLambdaArnRef: + description: Reference to a Function in lambda to populate rotationLambdaArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + rotationLambdaArnSelector: + description: Selector for a Function in lambda to populate rotationLambdaArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rotationRules: + description: A structure that defines the rotation configuration + for this secret. Defined below. + properties: + automaticallyAfterDays: + description: Specifies the number of days between automatic + scheduled rotations of the secret. Either automatically_after_days + or schedule_expression must be specified. + type: number + duration: + description: '- The length of the rotation window in hours. + For example, 3h for a three hour window.' + type: string + scheduleExpression: + description: A cron() or rate() expression that defines the + schedule for rotating your secret. Either automatically_after_days + or schedule_expression must be specified. + type: string + type: object + secretId: + description: Specifies the secret to which you want to add a new + version. You can specify either the Amazon Resource Name (ARN) + or the friendly name of the secret. The secret must already + exist. + type: string + secretIdRef: + description: Reference to a Secret in secretsmanager to populate + secretId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + secretIdSelector: + description: Selector for a Secret in secretsmanager to populate + secretId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + rotateImmediately: + description: Specifies whether to rotate the secret immediately + or wait until the next scheduled rotation window. The rotation + schedule is defined in rotation_rules. For secrets that use + a Lambda rotation function to rotate, if you don't immediately + rotate the secret, Secrets Manager tests the rotation configuration + by running the testSecret step (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html) + of the Lambda rotation function. The test creates an AWSPENDING + version of the secret and then removes it. Defaults to true. + type: boolean + rotationLambdaArn: + description: Specifies the ARN of the Lambda function that can + rotate the secret. Must be supplied if the secret is not managed + by AWS. + type: string + rotationLambdaArnRef: + description: Reference to a Function in lambda to populate rotationLambdaArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + rotationLambdaArnSelector: + description: Selector for a Function in lambda to populate rotationLambdaArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rotationRules: + description: A structure that defines the rotation configuration + for this secret. Defined below. + properties: + automaticallyAfterDays: + description: Specifies the number of days between automatic + scheduled rotations of the secret. Either automatically_after_days + or schedule_expression must be specified. + type: number + duration: + description: '- The length of the rotation window in hours. + For example, 3h for a three hour window.' + type: string + scheduleExpression: + description: A cron() or rate() expression that defines the + schedule for rotating your secret. Either automatically_after_days + or schedule_expression must be specified. + type: string + type: object + secretId: + description: Specifies the secret to which you want to add a new + version. You can specify either the Amazon Resource Name (ARN) + or the friendly name of the secret. The secret must already + exist. + type: string + secretIdRef: + description: Reference to a Secret in secretsmanager to populate + secretId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + secretIdSelector: + description: Selector for a Secret in secretsmanager to populate + secretId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.rotationRules is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.rotationRules) + || (has(self.initProvider) && has(self.initProvider.rotationRules))' + status: + description: SecretRotationStatus defines the observed state of SecretRotation. + properties: + atProvider: + properties: + id: + description: Amazon Resource Name (ARN) of the secret. + type: string + rotateImmediately: + description: Specifies whether to rotate the secret immediately + or wait until the next scheduled rotation window. The rotation + schedule is defined in rotation_rules. For secrets that use + a Lambda rotation function to rotate, if you don't immediately + rotate the secret, Secrets Manager tests the rotation configuration + by running the testSecret step (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html) + of the Lambda rotation function. The test creates an AWSPENDING + version of the secret and then removes it. Defaults to true. + type: boolean + rotationEnabled: + description: Specifies whether automatic rotation is enabled for + this secret. + type: boolean + rotationLambdaArn: + description: Specifies the ARN of the Lambda function that can + rotate the secret. Must be supplied if the secret is not managed + by AWS. + type: string + rotationRules: + description: A structure that defines the rotation configuration + for this secret. Defined below. + properties: + automaticallyAfterDays: + description: Specifies the number of days between automatic + scheduled rotations of the secret. Either automatically_after_days + or schedule_expression must be specified. + type: number + duration: + description: '- The length of the rotation window in hours. + For example, 3h for a three hour window.' + type: string + scheduleExpression: + description: A cron() or rate() expression that defines the + schedule for rotating your secret. Either automatically_after_days + or schedule_expression must be specified. + type: string + type: object + secretId: + description: Specifies the secret to which you want to add a new + version. You can specify either the Amazon Resource Name (ARN) + or the friendly name of the secret. The secret must already + exist. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/securityhub.aws.upbound.io_insights.yaml b/package/crds/securityhub.aws.upbound.io_insights.yaml index c377a1369e..742d7a0059 100644 --- a/package/crds/securityhub.aws.upbound.io_insights.yaml +++ b/package/crds/securityhub.aws.upbound.io_insights.yaml @@ -5156,3 +5156,5036 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Insight is the Schema for the Insights API. Provides a Security + Hub custom insight resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: InsightSpec defines the desired state of Insight + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + filters: + description: A configuration block including one or more (up to + 10 distinct) attributes used to filter the findings included + in the insight. The insight only includes findings that match + criteria defined in the filters. See filters below for more + details. + properties: + awsAccountId: + description: AWS account ID that a finding is generated in. + See String_Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + companyName: + description: The name of the findings provider (company) that + owns the solution (product) that generates findings. See + String_Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + complianceStatus: + description: Exclusive to findings that are generated as the + result of a check run against a specific rule in a supported + standard, such as CIS AWS Foundations. Contains security + standard-related finding details. See String Filter below + for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + confidence: + description: A finding's confidence. Confidence is defined + as the likelihood that a finding accurately identifies the + behavior or issue that it was intended to identify. Confidence + is scored on a 0-100 basis using a ratio scale, where 0 + means zero percent confidence and 100 means 100 percent + confidence. See Number Filter below for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + createdAt: + description: An ISO8601-formatted timestamp that indicates + when the security-findings provider captured the potential + security issue that a finding captured. See Date Filter + below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + criticality: + description: The level of importance assigned to the resources + associated with the finding. A score of 0 means that the + underlying resources have no criticality, and a score of + 100 is reserved for the most critical resources. See Number + Filter below for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + description: + description: A finding's description. See String Filter below + for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + findingProviderFieldsConfidence: + description: The finding provider value for the finding confidence. + Confidence is defined as the likelihood that a finding accurately + identifies the behavior or issue that it was intended to + identify. Confidence is scored on a 0-100 basis using a + ratio scale, where 0 means zero percent confidence and 100 + means 100 percent confidence. See Number Filter below for + more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + findingProviderFieldsCriticality: + description: The finding provider value for the level of importance + assigned to the resources associated with the findings. + A score of 0 means that the underlying resources have no + criticality, and a score of 100 is reserved for the most + critical resources. See Number Filter below for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + findingProviderFieldsRelatedFindingsId: + description: The finding identifier of a related finding that + is identified by the finding provider. See String Filter + below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + findingProviderFieldsRelatedFindingsProductArn: + description: The ARN of the solution that generated a related + finding that is identified by the finding provider. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + findingProviderFieldsSeverityLabel: + description: The finding provider value for the severity label. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + findingProviderFieldsSeverityOriginal: + description: The finding provider's original value for the + severity. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + findingProviderFieldsTypes: + description: 'One or more finding types that the finding provider + assigned to the finding. Uses the format of namespace/category/classifier + that classify a finding. Valid namespace values include: + Software and Configuration Checks, TTPs, Effects, Unusual + Behaviors, and Sensitive Data Identifications. See String + Filter below for more details.' + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + firstObservedAt: + description: An ISO8601-formatted timestamp that indicates + when the security-findings provider first observed the potential + security issue that a finding captured. See Date Filter + below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + generatorId: + description: The identifier for the solution-specific component + (a discrete unit of logic) that generated a finding. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + id: + description: The security findings provider-specific identifier + for a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + keyword: + description: A keyword for a finding. See Keyword Filter below + for more details. + items: + properties: + value: + description: A value for the keyword. + type: string + type: object + type: array + lastObservedAt: + description: An ISO8601-formatted timestamp that indicates + when the security-findings provider most recently observed + the potential security issue that a finding captured. See + Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + malwareName: + description: The name of the malware that was observed. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + malwarePath: + description: The filesystem path of the malware that was observed. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + malwareState: + description: The state of the malware that was observed. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + malwareType: + description: The type of the malware that was observed. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkDestinationDomain: + description: The destination domain of network-related information + about a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkDestinationIpv4: + description: The destination IPv4 address of network-related + information about a finding. See Ip Filter below for more + details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + networkDestinationIpv6: + description: The destination IPv6 address of network-related + information about a finding. See Ip Filter below for more + details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + networkDestinationPort: + description: The destination port of network-related information + about a finding. See Number Filter below for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + networkDirection: + description: Indicates the direction of network traffic associated + with a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkProtocol: + description: The protocol of network-related information about + a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkSourceDomain: + description: The source domain of network-related information + about a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkSourceIpv4: + description: The source IPv4 address of network-related information + about a finding. See Ip Filter below for more details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + networkSourceIpv6: + description: The source IPv6 address of network-related information + about a finding. See Ip Filter below for more details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + networkSourceMac: + description: The source media access control (MAC) address + of network-related information about a finding. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkSourcePort: + description: The source port of network-related information + about a finding. See Number Filter below for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + noteText: + description: The text of a note. See String Filter below for + more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + noteUpdatedAt: + description: The timestamp of when the note was updated. See + Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + noteUpdatedBy: + description: The principal that created a note. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + processLaunchedAt: + description: The date/time that the process was launched. + See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + processName: + description: The name of the process. See String Filter below + for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + processParentPid: + description: The parent process ID. See Number Filter below + for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + processPath: + description: The path to the process executable. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + processPid: + description: The process ID. See Number Filter below for more + details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + processTerminatedAt: + description: The date/time that the process was terminated. + See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + productArn: + description: The ARN generated by Security Hub that uniquely + identifies a third-party company (security findings provider) + after this provider's product (solution that generates findings) + is registered with Security Hub. See String Filter below + for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + productFields: + description: A data type where security-findings providers + can include additional solution-specific details that aren't + part of the defined AwsSecurityFinding format. See Map Filter + below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + key: + description: The key of the map filter. For example, + for ResourceTags, Key identifies the name of the tag. + For UserDefinedFields, Key is the name of the field. + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + productName: + description: The name of the solution (product) that generates + findings. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + recommendationText: + description: The recommendation of what to do about the issue + described in a finding. See String Filter below for more + details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + recordState: + description: The updated record state for the finding. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + relatedFindingsId: + description: The solution-generated identifier for a related + finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + relatedFindingsProductArn: + description: The ARN of the solution that generated a related + finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceIamInstanceProfileArn: + description: The IAM profile ARN of the instance. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceImageId: + description: The Amazon Machine Image (AMI) ID of the instance. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceIpv4Addresses: + description: The IPv4 addresses associated with the instance. + See Ip Filter below for more details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + resourceAwsEc2InstanceIpv6Addresses: + description: The IPv6 addresses associated with the instance. + See Ip Filter below for more details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + resourceAwsEc2InstanceKeyName: + description: The key name associated with the instance. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceLaunchedAt: + description: The date and time the instance was launched. + See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + resourceAwsEc2InstanceSubnetId: + description: The identifier of the subnet that the instance + was launched in. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceType: + description: The instance type of the instance. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceVpcId: + description: The identifier of the VPC that the instance was + launched in. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsIamAccessKeyCreatedAt: + description: The creation date/time of the IAM access key + related to a finding. See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + resourceAwsIamAccessKeyStatus: + description: The status of the IAM access key related to a + finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsIamAccessKeyUserName: + description: The user associated with the IAM access key related + to a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsS3BucketOwnerId: + description: The canonical user ID of the owner of the S3 + bucket. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsS3BucketOwnerName: + description: The display name of the owner of the S3 bucket. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceContainerImageId: + description: The identifier of the image related to a finding. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceContainerImageName: + description: The name of the image related to a finding. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceContainerLaunchedAt: + description: The date/time that the container was started. + See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + resourceContainerName: + description: The name of the container related to a finding. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceDetailsOther: + description: The details of a resource that doesn't have a + specific subfield for the resource type defined. See Map + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + key: + description: The key of the map filter. For example, + for ResourceTags, Key identifies the name of the tag. + For UserDefinedFields, Key is the name of the field. + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceId: + description: The canonical identifier for the given resource + type. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourcePartition: + description: The canonical AWS partition name that the Region + is assigned to. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceRegion: + description: The canonical AWS external Region name where + this resource is located. See String Filter below for more + details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceTags: + description: A list of AWS tags associated with a resource + at the time the finding was processed. See Map Filter below + for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + key: + description: The key of the map filter. For example, + for ResourceTags, Key identifies the name of the tag. + For UserDefinedFields, Key is the name of the field. + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceType: + description: Specifies the type of the resource that details + are provided for. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + severityLabel: + description: The label of a finding's severity. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + sourceUrl: + description: A URL that links to a page about the current + finding in the security-findings provider's solution. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + threatIntelIndicatorCategory: + description: The category of a threat intelligence indicator. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + threatIntelIndicatorLastObservedAt: + description: The date/time of the last observation of a threat + intelligence indicator. See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + threatIntelIndicatorSource: + description: The source of the threat intelligence. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + threatIntelIndicatorSourceUrl: + description: The URL for more details from the source of the + threat intelligence. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + threatIntelIndicatorType: + description: The type of a threat intelligence indicator. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + threatIntelIndicatorValue: + description: The value of a threat intelligence indicator. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + title: + description: A finding's title. See String Filter below for + more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + type: + description: A finding type in the format of namespace/category/classifier + that classifies a finding. See String Filter below for more + details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + updatedAt: + description: An ISO8601-formatted timestamp that indicates + when the security-findings provider last updated the finding + record. See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + userDefinedValues: + description: A list of name/value string pairs associated + with the finding. These are custom, user-defined fields + added to a finding. See Map Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + key: + description: The key of the map filter. For example, + for ResourceTags, Key identifies the name of the tag. + For UserDefinedFields, Key is the name of the field. + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + verificationState: + description: The veracity of a finding. See String Filter + below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + workflowStatus: + description: The status of the investigation into a finding. + See Workflow Status Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + type: object + groupByAttribute: + description: The attribute used to group the findings for the + insight e.g., if an insight is grouped by ResourceId, then the + insight produces a list of resource identifiers. + type: string + name: + description: The name of the custom insight. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + filters: + description: A configuration block including one or more (up to + 10 distinct) attributes used to filter the findings included + in the insight. The insight only includes findings that match + criteria defined in the filters. See filters below for more + details. + properties: + awsAccountId: + description: AWS account ID that a finding is generated in. + See String_Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + companyName: + description: The name of the findings provider (company) that + owns the solution (product) that generates findings. See + String_Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + complianceStatus: + description: Exclusive to findings that are generated as the + result of a check run against a specific rule in a supported + standard, such as CIS AWS Foundations. Contains security + standard-related finding details. See String Filter below + for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + confidence: + description: A finding's confidence. Confidence is defined + as the likelihood that a finding accurately identifies the + behavior or issue that it was intended to identify. Confidence + is scored on a 0-100 basis using a ratio scale, where 0 + means zero percent confidence and 100 means 100 percent + confidence. See Number Filter below for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + createdAt: + description: An ISO8601-formatted timestamp that indicates + when the security-findings provider captured the potential + security issue that a finding captured. See Date Filter + below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + criticality: + description: The level of importance assigned to the resources + associated with the finding. A score of 0 means that the + underlying resources have no criticality, and a score of + 100 is reserved for the most critical resources. See Number + Filter below for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + description: + description: A finding's description. See String Filter below + for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + findingProviderFieldsConfidence: + description: The finding provider value for the finding confidence. + Confidence is defined as the likelihood that a finding accurately + identifies the behavior or issue that it was intended to + identify. Confidence is scored on a 0-100 basis using a + ratio scale, where 0 means zero percent confidence and 100 + means 100 percent confidence. See Number Filter below for + more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + findingProviderFieldsCriticality: + description: The finding provider value for the level of importance + assigned to the resources associated with the findings. + A score of 0 means that the underlying resources have no + criticality, and a score of 100 is reserved for the most + critical resources. See Number Filter below for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + findingProviderFieldsRelatedFindingsId: + description: The finding identifier of a related finding that + is identified by the finding provider. See String Filter + below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + findingProviderFieldsRelatedFindingsProductArn: + description: The ARN of the solution that generated a related + finding that is identified by the finding provider. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + findingProviderFieldsSeverityLabel: + description: The finding provider value for the severity label. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + findingProviderFieldsSeverityOriginal: + description: The finding provider's original value for the + severity. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + findingProviderFieldsTypes: + description: 'One or more finding types that the finding provider + assigned to the finding. Uses the format of namespace/category/classifier + that classify a finding. Valid namespace values include: + Software and Configuration Checks, TTPs, Effects, Unusual + Behaviors, and Sensitive Data Identifications. See String + Filter below for more details.' + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + firstObservedAt: + description: An ISO8601-formatted timestamp that indicates + when the security-findings provider first observed the potential + security issue that a finding captured. See Date Filter + below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + generatorId: + description: The identifier for the solution-specific component + (a discrete unit of logic) that generated a finding. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + id: + description: The security findings provider-specific identifier + for a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + keyword: + description: A keyword for a finding. See Keyword Filter below + for more details. + items: + properties: + value: + description: A value for the keyword. + type: string + type: object + type: array + lastObservedAt: + description: An ISO8601-formatted timestamp that indicates + when the security-findings provider most recently observed + the potential security issue that a finding captured. See + Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + malwareName: + description: The name of the malware that was observed. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + malwarePath: + description: The filesystem path of the malware that was observed. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + malwareState: + description: The state of the malware that was observed. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + malwareType: + description: The type of the malware that was observed. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkDestinationDomain: + description: The destination domain of network-related information + about a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkDestinationIpv4: + description: The destination IPv4 address of network-related + information about a finding. See Ip Filter below for more + details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + networkDestinationIpv6: + description: The destination IPv6 address of network-related + information about a finding. See Ip Filter below for more + details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + networkDestinationPort: + description: The destination port of network-related information + about a finding. See Number Filter below for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + networkDirection: + description: Indicates the direction of network traffic associated + with a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkProtocol: + description: The protocol of network-related information about + a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkSourceDomain: + description: The source domain of network-related information + about a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkSourceIpv4: + description: The source IPv4 address of network-related information + about a finding. See Ip Filter below for more details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + networkSourceIpv6: + description: The source IPv6 address of network-related information + about a finding. See Ip Filter below for more details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + networkSourceMac: + description: The source media access control (MAC) address + of network-related information about a finding. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkSourcePort: + description: The source port of network-related information + about a finding. See Number Filter below for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + noteText: + description: The text of a note. See String Filter below for + more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + noteUpdatedAt: + description: The timestamp of when the note was updated. See + Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + noteUpdatedBy: + description: The principal that created a note. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + processLaunchedAt: + description: The date/time that the process was launched. + See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + processName: + description: The name of the process. See String Filter below + for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + processParentPid: + description: The parent process ID. See Number Filter below + for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + processPath: + description: The path to the process executable. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + processPid: + description: The process ID. See Number Filter below for more + details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + processTerminatedAt: + description: The date/time that the process was terminated. + See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + productArn: + description: The ARN generated by Security Hub that uniquely + identifies a third-party company (security findings provider) + after this provider's product (solution that generates findings) + is registered with Security Hub. See String Filter below + for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + productFields: + description: A data type where security-findings providers + can include additional solution-specific details that aren't + part of the defined AwsSecurityFinding format. See Map Filter + below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + key: + description: The key of the map filter. For example, + for ResourceTags, Key identifies the name of the tag. + For UserDefinedFields, Key is the name of the field. + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + productName: + description: The name of the solution (product) that generates + findings. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + recommendationText: + description: The recommendation of what to do about the issue + described in a finding. See String Filter below for more + details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + recordState: + description: The updated record state for the finding. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + relatedFindingsId: + description: The solution-generated identifier for a related + finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + relatedFindingsProductArn: + description: The ARN of the solution that generated a related + finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceIamInstanceProfileArn: + description: The IAM profile ARN of the instance. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceImageId: + description: The Amazon Machine Image (AMI) ID of the instance. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceIpv4Addresses: + description: The IPv4 addresses associated with the instance. + See Ip Filter below for more details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + resourceAwsEc2InstanceIpv6Addresses: + description: The IPv6 addresses associated with the instance. + See Ip Filter below for more details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + resourceAwsEc2InstanceKeyName: + description: The key name associated with the instance. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceLaunchedAt: + description: The date and time the instance was launched. + See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + resourceAwsEc2InstanceSubnetId: + description: The identifier of the subnet that the instance + was launched in. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceType: + description: The instance type of the instance. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceVpcId: + description: The identifier of the VPC that the instance was + launched in. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsIamAccessKeyCreatedAt: + description: The creation date/time of the IAM access key + related to a finding. See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + resourceAwsIamAccessKeyStatus: + description: The status of the IAM access key related to a + finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsIamAccessKeyUserName: + description: The user associated with the IAM access key related + to a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsS3BucketOwnerId: + description: The canonical user ID of the owner of the S3 + bucket. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsS3BucketOwnerName: + description: The display name of the owner of the S3 bucket. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceContainerImageId: + description: The identifier of the image related to a finding. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceContainerImageName: + description: The name of the image related to a finding. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceContainerLaunchedAt: + description: The date/time that the container was started. + See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + resourceContainerName: + description: The name of the container related to a finding. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceDetailsOther: + description: The details of a resource that doesn't have a + specific subfield for the resource type defined. See Map + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + key: + description: The key of the map filter. For example, + for ResourceTags, Key identifies the name of the tag. + For UserDefinedFields, Key is the name of the field. + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceId: + description: The canonical identifier for the given resource + type. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourcePartition: + description: The canonical AWS partition name that the Region + is assigned to. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceRegion: + description: The canonical AWS external Region name where + this resource is located. See String Filter below for more + details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceTags: + description: A list of AWS tags associated with a resource + at the time the finding was processed. See Map Filter below + for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + key: + description: The key of the map filter. For example, + for ResourceTags, Key identifies the name of the tag. + For UserDefinedFields, Key is the name of the field. + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceType: + description: Specifies the type of the resource that details + are provided for. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + severityLabel: + description: The label of a finding's severity. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + sourceUrl: + description: A URL that links to a page about the current + finding in the security-findings provider's solution. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + threatIntelIndicatorCategory: + description: The category of a threat intelligence indicator. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + threatIntelIndicatorLastObservedAt: + description: The date/time of the last observation of a threat + intelligence indicator. See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + threatIntelIndicatorSource: + description: The source of the threat intelligence. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + threatIntelIndicatorSourceUrl: + description: The URL for more details from the source of the + threat intelligence. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + threatIntelIndicatorType: + description: The type of a threat intelligence indicator. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + threatIntelIndicatorValue: + description: The value of a threat intelligence indicator. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + title: + description: A finding's title. See String Filter below for + more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + type: + description: A finding type in the format of namespace/category/classifier + that classifies a finding. See String Filter below for more + details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + updatedAt: + description: An ISO8601-formatted timestamp that indicates + when the security-findings provider last updated the finding + record. See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + userDefinedValues: + description: A list of name/value string pairs associated + with the finding. These are custom, user-defined fields + added to a finding. See Map Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + key: + description: The key of the map filter. For example, + for ResourceTags, Key identifies the name of the tag. + For UserDefinedFields, Key is the name of the field. + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + verificationState: + description: The veracity of a finding. See String Filter + below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + workflowStatus: + description: The status of the investigation into a finding. + See Workflow Status Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + type: object + groupByAttribute: + description: The attribute used to group the findings for the + insight e.g., if an insight is grouped by ResourceId, then the + insight produces a list of resource identifiers. + type: string + name: + description: The name of the custom insight. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.filters is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.filters) + || (has(self.initProvider) && has(self.initProvider.filters))' + - message: spec.forProvider.groupByAttribute is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.groupByAttribute) + || (has(self.initProvider) && has(self.initProvider.groupByAttribute))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: InsightStatus defines the observed state of Insight. + properties: + atProvider: + properties: + arn: + description: ARN of the insight. + type: string + filters: + description: A configuration block including one or more (up to + 10 distinct) attributes used to filter the findings included + in the insight. The insight only includes findings that match + criteria defined in the filters. See filters below for more + details. + properties: + awsAccountId: + description: AWS account ID that a finding is generated in. + See String_Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + companyName: + description: The name of the findings provider (company) that + owns the solution (product) that generates findings. See + String_Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + complianceStatus: + description: Exclusive to findings that are generated as the + result of a check run against a specific rule in a supported + standard, such as CIS AWS Foundations. Contains security + standard-related finding details. See String Filter below + for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + confidence: + description: A finding's confidence. Confidence is defined + as the likelihood that a finding accurately identifies the + behavior or issue that it was intended to identify. Confidence + is scored on a 0-100 basis using a ratio scale, where 0 + means zero percent confidence and 100 means 100 percent + confidence. See Number Filter below for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + createdAt: + description: An ISO8601-formatted timestamp that indicates + when the security-findings provider captured the potential + security issue that a finding captured. See Date Filter + below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + criticality: + description: The level of importance assigned to the resources + associated with the finding. A score of 0 means that the + underlying resources have no criticality, and a score of + 100 is reserved for the most critical resources. See Number + Filter below for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + description: + description: A finding's description. See String Filter below + for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + findingProviderFieldsConfidence: + description: The finding provider value for the finding confidence. + Confidence is defined as the likelihood that a finding accurately + identifies the behavior or issue that it was intended to + identify. Confidence is scored on a 0-100 basis using a + ratio scale, where 0 means zero percent confidence and 100 + means 100 percent confidence. See Number Filter below for + more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + findingProviderFieldsCriticality: + description: The finding provider value for the level of importance + assigned to the resources associated with the findings. + A score of 0 means that the underlying resources have no + criticality, and a score of 100 is reserved for the most + critical resources. See Number Filter below for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + findingProviderFieldsRelatedFindingsId: + description: The finding identifier of a related finding that + is identified by the finding provider. See String Filter + below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + findingProviderFieldsRelatedFindingsProductArn: + description: The ARN of the solution that generated a related + finding that is identified by the finding provider. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + findingProviderFieldsSeverityLabel: + description: The finding provider value for the severity label. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + findingProviderFieldsSeverityOriginal: + description: The finding provider's original value for the + severity. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + findingProviderFieldsTypes: + description: 'One or more finding types that the finding provider + assigned to the finding. Uses the format of namespace/category/classifier + that classify a finding. Valid namespace values include: + Software and Configuration Checks, TTPs, Effects, Unusual + Behaviors, and Sensitive Data Identifications. See String + Filter below for more details.' + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + firstObservedAt: + description: An ISO8601-formatted timestamp that indicates + when the security-findings provider first observed the potential + security issue that a finding captured. See Date Filter + below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + generatorId: + description: The identifier for the solution-specific component + (a discrete unit of logic) that generated a finding. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + id: + description: The security findings provider-specific identifier + for a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + keyword: + description: A keyword for a finding. See Keyword Filter below + for more details. + items: + properties: + value: + description: A value for the keyword. + type: string + type: object + type: array + lastObservedAt: + description: An ISO8601-formatted timestamp that indicates + when the security-findings provider most recently observed + the potential security issue that a finding captured. See + Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + malwareName: + description: The name of the malware that was observed. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + malwarePath: + description: The filesystem path of the malware that was observed. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + malwareState: + description: The state of the malware that was observed. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + malwareType: + description: The type of the malware that was observed. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkDestinationDomain: + description: The destination domain of network-related information + about a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkDestinationIpv4: + description: The destination IPv4 address of network-related + information about a finding. See Ip Filter below for more + details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + networkDestinationIpv6: + description: The destination IPv6 address of network-related + information about a finding. See Ip Filter below for more + details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + networkDestinationPort: + description: The destination port of network-related information + about a finding. See Number Filter below for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + networkDirection: + description: Indicates the direction of network traffic associated + with a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkProtocol: + description: The protocol of network-related information about + a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkSourceDomain: + description: The source domain of network-related information + about a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkSourceIpv4: + description: The source IPv4 address of network-related information + about a finding. See Ip Filter below for more details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + networkSourceIpv6: + description: The source IPv6 address of network-related information + about a finding. See Ip Filter below for more details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + networkSourceMac: + description: The source media access control (MAC) address + of network-related information about a finding. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + networkSourcePort: + description: The source port of network-related information + about a finding. See Number Filter below for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + noteText: + description: The text of a note. See String Filter below for + more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + noteUpdatedAt: + description: The timestamp of when the note was updated. See + Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + noteUpdatedBy: + description: The principal that created a note. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + processLaunchedAt: + description: The date/time that the process was launched. + See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + processName: + description: The name of the process. See String Filter below + for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + processParentPid: + description: The parent process ID. See Number Filter below + for more details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + processPath: + description: The path to the process executable. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + processPid: + description: The process ID. See Number Filter below for more + details. + items: + properties: + eq: + description: The equal-to condition to be applied to + a single field when querying for findings, provided + as a String. + type: string + gte: + description: The greater-than-equal condition to be + applied to a single field when querying for findings, + provided as a String. + type: string + lte: + description: The less-than-equal condition to be applied + to a single field when querying for findings, provided + as a String. + type: string + type: object + type: array + processTerminatedAt: + description: The date/time that the process was terminated. + See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + productArn: + description: The ARN generated by Security Hub that uniquely + identifies a third-party company (security findings provider) + after this provider's product (solution that generates findings) + is registered with Security Hub. See String Filter below + for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + productFields: + description: A data type where security-findings providers + can include additional solution-specific details that aren't + part of the defined AwsSecurityFinding format. See Map Filter + below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + key: + description: The key of the map filter. For example, + for ResourceTags, Key identifies the name of the tag. + For UserDefinedFields, Key is the name of the field. + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + productName: + description: The name of the solution (product) that generates + findings. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + recommendationText: + description: The recommendation of what to do about the issue + described in a finding. See String Filter below for more + details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + recordState: + description: The updated record state for the finding. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + relatedFindingsId: + description: The solution-generated identifier for a related + finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + relatedFindingsProductArn: + description: The ARN of the solution that generated a related + finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceIamInstanceProfileArn: + description: The IAM profile ARN of the instance. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceImageId: + description: The Amazon Machine Image (AMI) ID of the instance. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceIpv4Addresses: + description: The IPv4 addresses associated with the instance. + See Ip Filter below for more details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + resourceAwsEc2InstanceIpv6Addresses: + description: The IPv6 addresses associated with the instance. + See Ip Filter below for more details. + items: + properties: + cidr: + description: A finding's CIDR value. + type: string + type: object + type: array + resourceAwsEc2InstanceKeyName: + description: The key name associated with the instance. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceLaunchedAt: + description: The date and time the instance was launched. + See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + resourceAwsEc2InstanceSubnetId: + description: The identifier of the subnet that the instance + was launched in. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceType: + description: The instance type of the instance. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsEc2InstanceVpcId: + description: The identifier of the VPC that the instance was + launched in. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsIamAccessKeyCreatedAt: + description: The creation date/time of the IAM access key + related to a finding. See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + resourceAwsIamAccessKeyStatus: + description: The status of the IAM access key related to a + finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsIamAccessKeyUserName: + description: The user associated with the IAM access key related + to a finding. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsS3BucketOwnerId: + description: The canonical user ID of the owner of the S3 + bucket. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceAwsS3BucketOwnerName: + description: The display name of the owner of the S3 bucket. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceContainerImageId: + description: The identifier of the image related to a finding. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceContainerImageName: + description: The name of the image related to a finding. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceContainerLaunchedAt: + description: The date/time that the container was started. + See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + resourceContainerName: + description: The name of the container related to a finding. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceDetailsOther: + description: The details of a resource that doesn't have a + specific subfield for the resource type defined. See Map + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + key: + description: The key of the map filter. For example, + for ResourceTags, Key identifies the name of the tag. + For UserDefinedFields, Key is the name of the field. + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceId: + description: The canonical identifier for the given resource + type. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourcePartition: + description: The canonical AWS partition name that the Region + is assigned to. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceRegion: + description: The canonical AWS external Region name where + this resource is located. See String Filter below for more + details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceTags: + description: A list of AWS tags associated with a resource + at the time the finding was processed. See Map Filter below + for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + key: + description: The key of the map filter. For example, + for ResourceTags, Key identifies the name of the tag. + For UserDefinedFields, Key is the name of the field. + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + resourceType: + description: Specifies the type of the resource that details + are provided for. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + severityLabel: + description: The label of a finding's severity. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + sourceUrl: + description: A URL that links to a page about the current + finding in the security-findings provider's solution. See + String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + threatIntelIndicatorCategory: + description: The category of a threat intelligence indicator. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + threatIntelIndicatorLastObservedAt: + description: The date/time of the last observation of a threat + intelligence indicator. See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + threatIntelIndicatorSource: + description: The source of the threat intelligence. See String + Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + threatIntelIndicatorSourceUrl: + description: The URL for more details from the source of the + threat intelligence. See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + threatIntelIndicatorType: + description: The type of a threat intelligence indicator. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + threatIntelIndicatorValue: + description: The value of a threat intelligence indicator. + See String Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + title: + description: A finding's title. See String Filter below for + more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + type: + description: A finding type in the format of namespace/category/classifier + that classifies a finding. See String Filter below for more + details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + updatedAt: + description: An ISO8601-formatted timestamp that indicates + when the security-findings provider last updated the finding + record. See Date Filter below for more details. + items: + properties: + dateRange: + description: A configuration block of the date range + for the date filter. See date_range below for more + details. + properties: + unit: + description: 'A date range unit for the date filter. + Valid values: DAYS.' + type: string + value: + description: A value for the keyword. + type: number + type: object + end: + description: An end date for the date filter. Required + with start if date_range is not specified. + type: string + start: + description: A start date for the date filter. Required + with end if date_range is not specified. + type: string + type: object + type: array + userDefinedValues: + description: A list of name/value string pairs associated + with the finding. These are custom, user-defined fields + added to a finding. See Map Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + key: + description: The key of the map filter. For example, + for ResourceTags, Key identifies the name of the tag. + For UserDefinedFields, Key is the name of the field. + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + verificationState: + description: The veracity of a finding. See String Filter + below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + workflowStatus: + description: The status of the investigation into a finding. + See Workflow Status Filter below for more details. + items: + properties: + comparison: + description: 'The condition to apply to a string value + when querying for findings. Valid values include: + EQUALS and NOT_EQUALS.' + type: string + value: + description: A value for the keyword. + type: string + type: object + type: array + type: object + groupByAttribute: + description: The attribute used to group the findings for the + insight e.g., if an insight is grouped by ResourceId, then the + insight produces a list of resource identifiers. + type: string + id: + description: ARN of the insight. + type: string + name: + description: The name of the custom insight. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/servicecatalog.aws.upbound.io_products.yaml b/package/crds/servicecatalog.aws.upbound.io_products.yaml index 4a0daf4a2e..471f0834b3 100644 --- a/package/crds/servicecatalog.aws.upbound.io_products.yaml +++ b/package/crds/servicecatalog.aws.upbound.io_products.yaml @@ -580,3 +580,559 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Product is the Schema for the Products API. Manages a Service + Catalog Product + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ProductSpec defines the desired state of Product + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + acceptLanguage: + description: 'Language code. Valid values: en (English), jp (Japanese), + zh (Chinese). Default value is en.' + type: string + description: + description: Description of the product. + type: string + distributor: + description: Distributor (i.e., vendor) of the product. + type: string + name: + description: Name of the product. + type: string + owner: + description: Owner of the product. + type: string + provisioningArtifactParameters: + description: Configuration block for provisioning artifact (i.e., + version) parameters. Detailed below. + properties: + description: + description: Description of the provisioning artifact (i.e., + version), including how it differs from the previous provisioning + artifact. + type: string + disableTemplateValidation: + description: Whether AWS Service Catalog stops validating + the specified provisioning artifact template even if it + is invalid. + type: boolean + name: + description: Name of the provisioning artifact (for example, + v1, v2beta). No spaces are allowed. + type: string + templatePhysicalId: + description: Template source as the physical ID of the resource + that contains the template. Currently only supports CloudFormation + stack ARN. Specify the physical ID as arn:[partition]:cloudformation:[region]:[account + ID]:stack/[stack name]/[resource ID]. + type: string + templateUrl: + description: Template source as URL of the CloudFormation + template in Amazon S3. + type: string + type: + description: Type of provisioning artifact. See AWS Docs for + valid list of values. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + supportDescription: + description: Support information about the product. + type: string + supportEmail: + description: Contact email for product support. + type: string + supportUrl: + description: Contact URL for product support. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: Type of product. See AWS Docs for valid list of values. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + acceptLanguage: + description: 'Language code. Valid values: en (English), jp (Japanese), + zh (Chinese). Default value is en.' + type: string + description: + description: Description of the product. + type: string + distributor: + description: Distributor (i.e., vendor) of the product. + type: string + name: + description: Name of the product. + type: string + owner: + description: Owner of the product. + type: string + provisioningArtifactParameters: + description: Configuration block for provisioning artifact (i.e., + version) parameters. Detailed below. + properties: + description: + description: Description of the provisioning artifact (i.e., + version), including how it differs from the previous provisioning + artifact. + type: string + disableTemplateValidation: + description: Whether AWS Service Catalog stops validating + the specified provisioning artifact template even if it + is invalid. + type: boolean + name: + description: Name of the provisioning artifact (for example, + v1, v2beta). No spaces are allowed. + type: string + templatePhysicalId: + description: Template source as the physical ID of the resource + that contains the template. Currently only supports CloudFormation + stack ARN. Specify the physical ID as arn:[partition]:cloudformation:[region]:[account + ID]:stack/[stack name]/[resource ID]. + type: string + templateUrl: + description: Template source as URL of the CloudFormation + template in Amazon S3. + type: string + type: + description: Type of provisioning artifact. See AWS Docs for + valid list of values. + type: string + type: object + supportDescription: + description: Support information about the product. + type: string + supportEmail: + description: Contact email for product support. + type: string + supportUrl: + description: Contact URL for product support. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: Type of product. See AWS Docs for valid list of values. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.owner is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.owner) + || (has(self.initProvider) && has(self.initProvider.owner))' + - message: spec.forProvider.provisioningArtifactParameters is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.provisioningArtifactParameters) + || (has(self.initProvider) && has(self.initProvider.provisioningArtifactParameters))' + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + status: + description: ProductStatus defines the observed state of Product. + properties: + atProvider: + properties: + acceptLanguage: + description: 'Language code. Valid values: en (English), jp (Japanese), + zh (Chinese). Default value is en.' + type: string + arn: + description: ARN of the product. + type: string + createdTime: + description: Time when the product was created. + type: string + description: + description: Description of the product. + type: string + distributor: + description: Distributor (i.e., vendor) of the product. + type: string + hasDefaultPath: + description: Whether the product has a default path. If the product + does not have a default path, call ListLaunchPaths to disambiguate + between paths. Otherwise, ListLaunchPaths is not required, + and the output of ProductViewSummary can be used directly with + DescribeProvisioningParameters. + type: boolean + id: + description: Product ID. For example, prod-dnigbtea24ste. + type: string + name: + description: Name of the product. + type: string + owner: + description: Owner of the product. + type: string + provisioningArtifactParameters: + description: Configuration block for provisioning artifact (i.e., + version) parameters. Detailed below. + properties: + description: + description: Description of the provisioning artifact (i.e., + version), including how it differs from the previous provisioning + artifact. + type: string + disableTemplateValidation: + description: Whether AWS Service Catalog stops validating + the specified provisioning artifact template even if it + is invalid. + type: boolean + name: + description: Name of the provisioning artifact (for example, + v1, v2beta). No spaces are allowed. + type: string + templatePhysicalId: + description: Template source as the physical ID of the resource + that contains the template. Currently only supports CloudFormation + stack ARN. Specify the physical ID as arn:[partition]:cloudformation:[region]:[account + ID]:stack/[stack name]/[resource ID]. + type: string + templateUrl: + description: Template source as URL of the CloudFormation + template in Amazon S3. + type: string + type: + description: Type of provisioning artifact. See AWS Docs for + valid list of values. + type: string + type: object + status: + description: Status of the product. + type: string + supportDescription: + description: Support information about the product. + type: string + supportEmail: + description: Contact email for product support. + type: string + supportUrl: + description: Contact URL for product support. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: + description: Type of product. See AWS Docs for valid list of values. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/servicecatalog.aws.upbound.io_serviceactions.yaml b/package/crds/servicecatalog.aws.upbound.io_serviceactions.yaml index 0b849a4c8f..521a348168 100644 --- a/package/crds/servicecatalog.aws.upbound.io_serviceactions.yaml +++ b/package/crds/servicecatalog.aws.upbound.io_serviceactions.yaml @@ -454,3 +454,433 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ServiceAction is the Schema for the ServiceActions API. Manages + a Service Catalog Service Action + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServiceActionSpec defines the desired state of ServiceAction + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + acceptLanguage: + description: Language code. Valid values are en (English), jp + (Japanese), and zh (Chinese). Default is en. + type: string + definition: + description: Self-service action definition configuration block. + Detailed below. + properties: + assumeRole: + description: ARN of the role that performs the self-service + actions on your behalf. For example, arn:aws:iam::12345678910:role/ActionRole. + To reuse the provisioned product launch role, set to LAUNCH_ROLE. + type: string + name: + description: Name of the SSM document. For example, AWS-RestartEC2Instance. + If you are using a shared SSM document, you must provide + the ARN instead of the name. + type: string + parameters: + description: 'List of parameters in JSON format. For example: + [{\"Name\":\"InstanceId\",\"Type\":\"TARGET\"}] or [{\"Name\":\"InstanceId\",\"Type\":\"TEXT_VALUE\"}].' + type: string + type: + description: Service action definition type. Valid value is + SSM_AUTOMATION. Default is SSM_AUTOMATION. + type: string + version: + description: SSM document version. For example, 1. + type: string + type: object + description: + description: Self-service action description. + type: string + name: + description: Self-service action name. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + acceptLanguage: + description: Language code. Valid values are en (English), jp + (Japanese), and zh (Chinese). Default is en. + type: string + definition: + description: Self-service action definition configuration block. + Detailed below. + properties: + assumeRole: + description: ARN of the role that performs the self-service + actions on your behalf. For example, arn:aws:iam::12345678910:role/ActionRole. + To reuse the provisioned product launch role, set to LAUNCH_ROLE. + type: string + name: + description: Name of the SSM document. For example, AWS-RestartEC2Instance. + If you are using a shared SSM document, you must provide + the ARN instead of the name. + type: string + parameters: + description: 'List of parameters in JSON format. For example: + [{\"Name\":\"InstanceId\",\"Type\":\"TARGET\"}] or [{\"Name\":\"InstanceId\",\"Type\":\"TEXT_VALUE\"}].' + type: string + type: + description: Service action definition type. Valid value is + SSM_AUTOMATION. Default is SSM_AUTOMATION. + type: string + version: + description: SSM document version. For example, 1. + type: string + type: object + description: + description: Self-service action description. + type: string + name: + description: Self-service action name. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.definition is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.definition) + || (has(self.initProvider) && has(self.initProvider.definition))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ServiceActionStatus defines the observed state of ServiceAction. + properties: + atProvider: + properties: + acceptLanguage: + description: Language code. Valid values are en (English), jp + (Japanese), and zh (Chinese). Default is en. + type: string + definition: + description: Self-service action definition configuration block. + Detailed below. + properties: + assumeRole: + description: ARN of the role that performs the self-service + actions on your behalf. For example, arn:aws:iam::12345678910:role/ActionRole. + To reuse the provisioned product launch role, set to LAUNCH_ROLE. + type: string + name: + description: Name of the SSM document. For example, AWS-RestartEC2Instance. + If you are using a shared SSM document, you must provide + the ARN instead of the name. + type: string + parameters: + description: 'List of parameters in JSON format. For example: + [{\"Name\":\"InstanceId\",\"Type\":\"TARGET\"}] or [{\"Name\":\"InstanceId\",\"Type\":\"TEXT_VALUE\"}].' + type: string + type: + description: Service action definition type. Valid value is + SSM_AUTOMATION. Default is SSM_AUTOMATION. + type: string + version: + description: SSM document version. For example, 1. + type: string + type: object + description: + description: Self-service action description. + type: string + id: + description: Identifier of the service action. + type: string + name: + description: Self-service action name. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/servicediscovery.aws.upbound.io_services.yaml b/package/crds/servicediscovery.aws.upbound.io_services.yaml index 0faa6c0aaf..ae5e952502 100644 --- a/package/crds/servicediscovery.aws.upbound.io_services.yaml +++ b/package/crds/servicediscovery.aws.upbound.io_services.yaml @@ -784,3 +784,748 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Service is the Schema for the Services API. Provides a Service + Discovery Service resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServiceSpec defines the desired state of Service + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: The description of the service. + type: string + dnsConfig: + description: A complex type that contains information about the + resource record sets that you want Amazon Route 53 to create + when you register an instance. + properties: + dnsRecords: + description: An array that contains one DnsRecord object for + each resource record set. + items: + properties: + ttl: + description: The amount of time, in seconds, that you + want DNS resolvers to cache the settings for this + resource record set. + type: number + type: + description: 'The type of the resource, which indicates + the value that Amazon Route 53 returns in response + to DNS queries. Valid Values: A, AAAA, SRV, CNAME' + type: string + type: object + type: array + namespaceId: + description: The ID of the namespace to use for DNS configuration. + type: string + namespaceIdRef: + description: Reference to a PrivateDNSNamespace in servicediscovery + to populate namespaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + namespaceIdSelector: + description: Selector for a PrivateDNSNamespace in servicediscovery + to populate namespaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routingPolicy: + description: 'The routing policy that you want to apply to + all records that Route 53 creates when you register an instance + and specify the service. Valid Values: MULTIVALUE, WEIGHTED' + type: string + type: object + forceDestroy: + description: A boolean that indicates all instances should be + deleted from the service so that the service can be destroyed + without error. These instances are not recoverable. + type: boolean + healthCheckConfig: + description: A complex type that contains settings for an optional + health check. Only for Public DNS namespaces. + properties: + failureThreshold: + description: The number of consecutive health checks. Maximum + value of 10. + type: number + resourcePath: + description: The path that you want Route 53 to request when + performing health checks. Route 53 automatically adds the + DNS name for the service. If you don't specify a value, + the default value is /. + type: string + type: + description: 'The type of health check that you want to create, + which indicates how Route 53 determines whether an endpoint + is healthy. Valid Values: HTTP, HTTPS, TCP' + type: string + type: object + healthCheckCustomConfig: + description: A complex type that contains settings for ECS managed + health checks. + properties: + failureThreshold: + description: The number of 30-second intervals that you want + service discovery to wait before it changes the health status + of a service instance. Maximum value of 10. + type: number + type: object + name: + description: The name of the service. + type: string + namespaceId: + description: The ID of the namespace that you want to use to create + the service. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: If present, specifies that the service instances + are only discoverable using the DiscoverInstances API operation. + No DNS records is registered for the service instances. The + only valid value is HTTP. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: The description of the service. + type: string + dnsConfig: + description: A complex type that contains information about the + resource record sets that you want Amazon Route 53 to create + when you register an instance. + properties: + dnsRecords: + description: An array that contains one DnsRecord object for + each resource record set. + items: + properties: + ttl: + description: The amount of time, in seconds, that you + want DNS resolvers to cache the settings for this + resource record set. + type: number + type: + description: 'The type of the resource, which indicates + the value that Amazon Route 53 returns in response + to DNS queries. Valid Values: A, AAAA, SRV, CNAME' + type: string + type: object + type: array + namespaceId: + description: The ID of the namespace to use for DNS configuration. + type: string + namespaceIdRef: + description: Reference to a PrivateDNSNamespace in servicediscovery + to populate namespaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + namespaceIdSelector: + description: Selector for a PrivateDNSNamespace in servicediscovery + to populate namespaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routingPolicy: + description: 'The routing policy that you want to apply to + all records that Route 53 creates when you register an instance + and specify the service. Valid Values: MULTIVALUE, WEIGHTED' + type: string + type: object + forceDestroy: + description: A boolean that indicates all instances should be + deleted from the service so that the service can be destroyed + without error. These instances are not recoverable. + type: boolean + healthCheckConfig: + description: A complex type that contains settings for an optional + health check. Only for Public DNS namespaces. + properties: + failureThreshold: + description: The number of consecutive health checks. Maximum + value of 10. + type: number + resourcePath: + description: The path that you want Route 53 to request when + performing health checks. Route 53 automatically adds the + DNS name for the service. If you don't specify a value, + the default value is /. + type: string + type: + description: 'The type of health check that you want to create, + which indicates how Route 53 determines whether an endpoint + is healthy. Valid Values: HTTP, HTTPS, TCP' + type: string + type: object + healthCheckCustomConfig: + description: A complex type that contains settings for ECS managed + health checks. + properties: + failureThreshold: + description: The number of 30-second intervals that you want + service discovery to wait before it changes the health status + of a service instance. Maximum value of 10. + type: number + type: object + name: + description: The name of the service. + type: string + namespaceId: + description: The ID of the namespace that you want to use to create + the service. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: + description: If present, specifies that the service instances + are only discoverable using the DiscoverInstances API operation. + No DNS records is registered for the service instances. The + only valid value is HTTP. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ServiceStatus defines the observed state of Service. + properties: + atProvider: + properties: + arn: + description: The ARN of the service. + type: string + description: + description: The description of the service. + type: string + dnsConfig: + description: A complex type that contains information about the + resource record sets that you want Amazon Route 53 to create + when you register an instance. + properties: + dnsRecords: + description: An array that contains one DnsRecord object for + each resource record set. + items: + properties: + ttl: + description: The amount of time, in seconds, that you + want DNS resolvers to cache the settings for this + resource record set. + type: number + type: + description: 'The type of the resource, which indicates + the value that Amazon Route 53 returns in response + to DNS queries. Valid Values: A, AAAA, SRV, CNAME' + type: string + type: object + type: array + namespaceId: + description: The ID of the namespace to use for DNS configuration. + type: string + routingPolicy: + description: 'The routing policy that you want to apply to + all records that Route 53 creates when you register an instance + and specify the service. Valid Values: MULTIVALUE, WEIGHTED' + type: string + type: object + forceDestroy: + description: A boolean that indicates all instances should be + deleted from the service so that the service can be destroyed + without error. These instances are not recoverable. + type: boolean + healthCheckConfig: + description: A complex type that contains settings for an optional + health check. Only for Public DNS namespaces. + properties: + failureThreshold: + description: The number of consecutive health checks. Maximum + value of 10. + type: number + resourcePath: + description: The path that you want Route 53 to request when + performing health checks. Route 53 automatically adds the + DNS name for the service. If you don't specify a value, + the default value is /. + type: string + type: + description: 'The type of health check that you want to create, + which indicates how Route 53 determines whether an endpoint + is healthy. Valid Values: HTTP, HTTPS, TCP' + type: string + type: object + healthCheckCustomConfig: + description: A complex type that contains settings for ECS managed + health checks. + properties: + failureThreshold: + description: The number of 30-second intervals that you want + service discovery to wait before it changes the health status + of a service instance. Maximum value of 10. + type: number + type: object + id: + description: The ID of the service. + type: string + name: + description: The name of the service. + type: string + namespaceId: + description: The ID of the namespace that you want to use to create + the service. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: + description: If present, specifies that the service instances + are only discoverable using the DiscoverInstances API operation. + No DNS records is registered for the service instances. The + only valid value is HTTP. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ses.aws.upbound.io_configurationsets.yaml b/package/crds/ses.aws.upbound.io_configurationsets.yaml index 6fab3a8510..78f92538f7 100644 --- a/package/crds/ses.aws.upbound.io_configurationsets.yaml +++ b/package/crds/ses.aws.upbound.io_configurationsets.yaml @@ -450,3 +450,423 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ConfigurationSet is the Schema for the ConfigurationSets API. + Provides an SES configuration set + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ConfigurationSetSpec defines the desired state of ConfigurationSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + deliveryOptions: + description: Whether messages that use the configuration set are + required to use TLS. See below. + properties: + tlsPolicy: + description: 'Whether messages that use the configuration + set are required to use Transport Layer Security (TLS). + If the value is Require, messages are only delivered if + a TLS connection can be established. If the value is Optional, + messages can be delivered in plain text if a TLS connection + can''t be established. Valid values: Require or Optional. + Defaults to Optional.' + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + reputationMetricsEnabled: + description: Whether or not Amazon SES publishes reputation metrics + for the configuration set, such as bounce and complaint rates, + to Amazon CloudWatch. The default value is false. + type: boolean + sendingEnabled: + description: Whether email sending is enabled or disabled for + the configuration set. The default value is true. + type: boolean + trackingOptions: + description: 'Domain that is used to redirect email recipients + to an Amazon SES-operated domain. See below. NOTE: This functionality + is best effort.' + properties: + customRedirectDomain: + description: Custom subdomain that is used to redirect email + recipients to the Amazon SES event tracking domain. + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + deliveryOptions: + description: Whether messages that use the configuration set are + required to use TLS. See below. + properties: + tlsPolicy: + description: 'Whether messages that use the configuration + set are required to use Transport Layer Security (TLS). + If the value is Require, messages are only delivered if + a TLS connection can be established. If the value is Optional, + messages can be delivered in plain text if a TLS connection + can''t be established. Valid values: Require or Optional. + Defaults to Optional.' + type: string + type: object + reputationMetricsEnabled: + description: Whether or not Amazon SES publishes reputation metrics + for the configuration set, such as bounce and complaint rates, + to Amazon CloudWatch. The default value is false. + type: boolean + sendingEnabled: + description: Whether email sending is enabled or disabled for + the configuration set. The default value is true. + type: boolean + trackingOptions: + description: 'Domain that is used to redirect email recipients + to an Amazon SES-operated domain. See below. NOTE: This functionality + is best effort.' + properties: + customRedirectDomain: + description: Custom subdomain that is used to redirect email + recipients to the Amazon SES event tracking domain. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ConfigurationSetStatus defines the observed state of ConfigurationSet. + properties: + atProvider: + properties: + arn: + description: SES configuration set ARN. + type: string + deliveryOptions: + description: Whether messages that use the configuration set are + required to use TLS. See below. + properties: + tlsPolicy: + description: 'Whether messages that use the configuration + set are required to use Transport Layer Security (TLS). + If the value is Require, messages are only delivered if + a TLS connection can be established. If the value is Optional, + messages can be delivered in plain text if a TLS connection + can''t be established. Valid values: Require or Optional. + Defaults to Optional.' + type: string + type: object + id: + description: SES configuration set name. + type: string + lastFreshStart: + description: Date and time at which the reputation metrics for + the configuration set were last reset. Resetting these metrics + is known as a fresh start. + type: string + reputationMetricsEnabled: + description: Whether or not Amazon SES publishes reputation metrics + for the configuration set, such as bounce and complaint rates, + to Amazon CloudWatch. The default value is false. + type: boolean + sendingEnabled: + description: Whether email sending is enabled or disabled for + the configuration set. The default value is true. + type: boolean + trackingOptions: + description: 'Domain that is used to redirect email recipients + to an Amazon SES-operated domain. See below. NOTE: This functionality + is best effort.' + properties: + customRedirectDomain: + description: Custom subdomain that is used to redirect email + recipients to the Amazon SES event tracking domain. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ses.aws.upbound.io_eventdestinations.yaml b/package/crds/ses.aws.upbound.io_eventdestinations.yaml index f3b8d72e4b..a3f3bf0c92 100644 --- a/package/crds/ses.aws.upbound.io_eventdestinations.yaml +++ b/package/crds/ses.aws.upbound.io_eventdestinations.yaml @@ -1092,3 +1092,1065 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: EventDestination is the Schema for the EventDestinations API. + Provides an SES event destination + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EventDestinationSpec defines the desired state of EventDestination + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cloudwatchDestination: + description: CloudWatch destination for the events + items: + properties: + defaultValue: + description: The default value for the event + type: string + dimensionName: + description: The name for the dimension + type: string + valueSource: + description: The source for the value. May be any of "messageTag", + "emailHeader" or "linkTag". + type: string + type: object + type: array + configurationSetName: + description: The name of the configuration set + type: string + configurationSetNameRef: + description: Reference to a ConfigurationSet in ses to populate + configurationSetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + configurationSetNameSelector: + description: Selector for a ConfigurationSet in ses to populate + configurationSetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: If true, the event destination will be enabled + type: boolean + kinesisDestination: + description: Send the events to a kinesis firehose destination + properties: + roleArn: + description: The ARN of the role that has permissions to access + the Kinesis Stream + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + streamArn: + description: The ARN of the Kinesis Stream + type: string + streamArnRef: + description: Reference to a DeliveryStream in firehose to + populate streamArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamArnSelector: + description: Selector for a DeliveryStream in firehose to + populate streamArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + matchingTypes: + description: A list of matching types. May be any of "send", "reject", + "bounce", "complaint", "delivery", "open", "click", or "renderingFailure". + items: + type: string + type: array + x-kubernetes-list-type: set + region: + description: Region is the region you'd like your resource to + be created in. + type: string + snsDestination: + description: Send the events to an SNS Topic destination + properties: + topicArn: + description: The ARN of the SNS topic + type: string + topicArnRef: + description: Reference to a Topic in sns to populate topicArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + topicArnSelector: + description: Selector for a Topic in sns to populate topicArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + cloudwatchDestination: + description: CloudWatch destination for the events + items: + properties: + defaultValue: + description: The default value for the event + type: string + dimensionName: + description: The name for the dimension + type: string + valueSource: + description: The source for the value. May be any of "messageTag", + "emailHeader" or "linkTag". + type: string + type: object + type: array + configurationSetName: + description: The name of the configuration set + type: string + configurationSetNameRef: + description: Reference to a ConfigurationSet in ses to populate + configurationSetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + configurationSetNameSelector: + description: Selector for a ConfigurationSet in ses to populate + configurationSetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: If true, the event destination will be enabled + type: boolean + kinesisDestination: + description: Send the events to a kinesis firehose destination + properties: + roleArn: + description: The ARN of the role that has permissions to access + the Kinesis Stream + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + streamArn: + description: The ARN of the Kinesis Stream + type: string + streamArnRef: + description: Reference to a DeliveryStream in firehose to + populate streamArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamArnSelector: + description: Selector for a DeliveryStream in firehose to + populate streamArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + matchingTypes: + description: A list of matching types. May be any of "send", "reject", + "bounce", "complaint", "delivery", "open", "click", or "renderingFailure". + items: + type: string + type: array + x-kubernetes-list-type: set + snsDestination: + description: Send the events to an SNS Topic destination + properties: + topicArn: + description: The ARN of the SNS topic + type: string + topicArnRef: + description: Reference to a Topic in sns to populate topicArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + topicArnSelector: + description: Selector for a Topic in sns to populate topicArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.matchingTypes is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.matchingTypes) + || (has(self.initProvider) && has(self.initProvider.matchingTypes))' + status: + description: EventDestinationStatus defines the observed state of EventDestination. + properties: + atProvider: + properties: + arn: + description: The SES event destination ARN. + type: string + cloudwatchDestination: + description: CloudWatch destination for the events + items: + properties: + defaultValue: + description: The default value for the event + type: string + dimensionName: + description: The name for the dimension + type: string + valueSource: + description: The source for the value. May be any of "messageTag", + "emailHeader" or "linkTag". + type: string + type: object + type: array + configurationSetName: + description: The name of the configuration set + type: string + enabled: + description: If true, the event destination will be enabled + type: boolean + id: + description: The SES event destination name. + type: string + kinesisDestination: + description: Send the events to a kinesis firehose destination + properties: + roleArn: + description: The ARN of the role that has permissions to access + the Kinesis Stream + type: string + streamArn: + description: The ARN of the Kinesis Stream + type: string + type: object + matchingTypes: + description: A list of matching types. May be any of "send", "reject", + "bounce", "complaint", "delivery", "open", "click", or "renderingFailure". + items: + type: string + type: array + x-kubernetes-list-type: set + snsDestination: + description: Send the events to an SNS Topic destination + properties: + topicArn: + description: The ARN of the SNS topic + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sesv2.aws.upbound.io_configurationseteventdestinations.yaml b/package/crds/sesv2.aws.upbound.io_configurationseteventdestinations.yaml index d5cff7bd38..8404b2399c 100644 --- a/package/crds/sesv2.aws.upbound.io_configurationseteventdestinations.yaml +++ b/package/crds/sesv2.aws.upbound.io_configurationseteventdestinations.yaml @@ -1403,3 +1403,1335 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ConfigurationSetEventDestination is the Schema for the ConfigurationSetEventDestinations + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ConfigurationSetEventDestinationSpec defines the desired + state of ConfigurationSetEventDestination + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + configurationSetName: + description: The name of the configuration set. + type: string + configurationSetNameRef: + description: Reference to a ConfigurationSet in sesv2 to populate + configurationSetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + configurationSetNameSelector: + description: Selector for a ConfigurationSet in sesv2 to populate + configurationSetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + eventDestination: + description: A name that identifies the event destination within + the configuration set. + properties: + cloudWatchDestination: + description: An object that defines an Amazon CloudWatch destination + for email events. See cloud_watch_destination below + properties: + dimensionConfiguration: + description: An array of objects that define the dimensions + to use when you send email events to Amazon CloudWatch. + See dimension_configuration below. + items: + properties: + defaultDimensionValue: + description: The default value of the dimension + that is published to Amazon CloudWatch if you + don't provide the value of the dimension when + you send an email. + type: string + dimensionName: + description: The name of an Amazon CloudWatch dimension + associated with an email sending metric. + type: string + dimensionValueSource: + description: 'The location where the Amazon SES + API v2 finds the value of a dimension to publish + to Amazon CloudWatch. Valid values: MESSAGE_TAG, + EMAIL_HEADER, LINK_TAG.' + type: string + type: object + type: array + type: object + enabled: + description: 'When the event destination is enabled, the specified + event types are sent to the destinations. Default: false.' + type: boolean + kinesisFirehoseDestination: + description: An object that defines an Amazon Kinesis Data + Firehose destination for email events. See kinesis_firehose_destination + below. + properties: + deliveryStreamArn: + description: The Amazon Resource Name (ARN) of the Amazon + Kinesis Data Firehose stream that the Amazon SES API + v2 sends email events to. + type: string + deliveryStreamArnRef: + description: Reference to a DeliveryStream in firehose + to populate deliveryStreamArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + deliveryStreamArnSelector: + description: Selector for a DeliveryStream in firehose + to populate deliveryStreamArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + iamRoleArn: + description: The Amazon Resource Name (ARN) of the IAM + role that the Amazon SES API v2 uses to send email events + to the Amazon Kinesis Data Firehose stream. + type: string + iamRoleArnRef: + description: Reference to a Role in iam to populate iamRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamRoleArnSelector: + description: Selector for a Role in iam to populate iamRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + matchingEventTypes: + description: '- An array that specifies which events the Amazon + SES API v2 should send to the destinations. Valid values: + SEND, REJECT, BOUNCE, COMPLAINT, DELIVERY, OPEN, CLICK, + RENDERING_FAILURE, DELIVERY_DELAY, SUBSCRIPTION.' + items: + type: string + type: array + pinpointDestination: + description: An object that defines an Amazon Pinpoint project + destination for email events. See pinpoint_destination below. + properties: + applicationArn: + type: string + applicationArnRef: + description: Reference to a App in pinpoint to populate + applicationArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + applicationArnSelector: + description: Selector for a App in pinpoint to populate + applicationArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + snsDestination: + description: An object that defines an Amazon SNS destination + for email events. See sns_destination below. + properties: + topicArn: + description: The Amazon Resource Name (ARN) of the Amazon + SNS topic to publish email events to. + type: string + topicArnRef: + description: Reference to a Topic in sns to populate topicArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + topicArnSelector: + description: Selector for a Topic in sns to populate topicArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + eventDestinationName: + description: An object that defines the event destination. See + event_destination below. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + configurationSetName: + description: The name of the configuration set. + type: string + configurationSetNameRef: + description: Reference to a ConfigurationSet in sesv2 to populate + configurationSetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + configurationSetNameSelector: + description: Selector for a ConfigurationSet in sesv2 to populate + configurationSetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + eventDestination: + description: A name that identifies the event destination within + the configuration set. + properties: + cloudWatchDestination: + description: An object that defines an Amazon CloudWatch destination + for email events. See cloud_watch_destination below + properties: + dimensionConfiguration: + description: An array of objects that define the dimensions + to use when you send email events to Amazon CloudWatch. + See dimension_configuration below. + items: + properties: + defaultDimensionValue: + description: The default value of the dimension + that is published to Amazon CloudWatch if you + don't provide the value of the dimension when + you send an email. + type: string + dimensionName: + description: The name of an Amazon CloudWatch dimension + associated with an email sending metric. + type: string + dimensionValueSource: + description: 'The location where the Amazon SES + API v2 finds the value of a dimension to publish + to Amazon CloudWatch. Valid values: MESSAGE_TAG, + EMAIL_HEADER, LINK_TAG.' + type: string + type: object + type: array + type: object + enabled: + description: 'When the event destination is enabled, the specified + event types are sent to the destinations. Default: false.' + type: boolean + kinesisFirehoseDestination: + description: An object that defines an Amazon Kinesis Data + Firehose destination for email events. See kinesis_firehose_destination + below. + properties: + deliveryStreamArn: + description: The Amazon Resource Name (ARN) of the Amazon + Kinesis Data Firehose stream that the Amazon SES API + v2 sends email events to. + type: string + deliveryStreamArnRef: + description: Reference to a DeliveryStream in firehose + to populate deliveryStreamArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + deliveryStreamArnSelector: + description: Selector for a DeliveryStream in firehose + to populate deliveryStreamArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + iamRoleArn: + description: The Amazon Resource Name (ARN) of the IAM + role that the Amazon SES API v2 uses to send email events + to the Amazon Kinesis Data Firehose stream. + type: string + iamRoleArnRef: + description: Reference to a Role in iam to populate iamRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iamRoleArnSelector: + description: Selector for a Role in iam to populate iamRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + matchingEventTypes: + description: '- An array that specifies which events the Amazon + SES API v2 should send to the destinations. Valid values: + SEND, REJECT, BOUNCE, COMPLAINT, DELIVERY, OPEN, CLICK, + RENDERING_FAILURE, DELIVERY_DELAY, SUBSCRIPTION.' + items: + type: string + type: array + pinpointDestination: + description: An object that defines an Amazon Pinpoint project + destination for email events. See pinpoint_destination below. + properties: + applicationArn: + type: string + applicationArnRef: + description: Reference to a App in pinpoint to populate + applicationArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + applicationArnSelector: + description: Selector for a App in pinpoint to populate + applicationArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + snsDestination: + description: An object that defines an Amazon SNS destination + for email events. See sns_destination below. + properties: + topicArn: + description: The Amazon Resource Name (ARN) of the Amazon + SNS topic to publish email events to. + type: string + topicArnRef: + description: Reference to a Topic in sns to populate topicArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + topicArnSelector: + description: Selector for a Topic in sns to populate topicArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + eventDestinationName: + description: An object that defines the event destination. See + event_destination below. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.eventDestination is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.eventDestination) + || (has(self.initProvider) && has(self.initProvider.eventDestination))' + - message: spec.forProvider.eventDestinationName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.eventDestinationName) + || (has(self.initProvider) && has(self.initProvider.eventDestinationName))' + status: + description: ConfigurationSetEventDestinationStatus defines the observed + state of ConfigurationSetEventDestination. + properties: + atProvider: + properties: + configurationSetName: + description: The name of the configuration set. + type: string + eventDestination: + description: A name that identifies the event destination within + the configuration set. + properties: + cloudWatchDestination: + description: An object that defines an Amazon CloudWatch destination + for email events. See cloud_watch_destination below + properties: + dimensionConfiguration: + description: An array of objects that define the dimensions + to use when you send email events to Amazon CloudWatch. + See dimension_configuration below. + items: + properties: + defaultDimensionValue: + description: The default value of the dimension + that is published to Amazon CloudWatch if you + don't provide the value of the dimension when + you send an email. + type: string + dimensionName: + description: The name of an Amazon CloudWatch dimension + associated with an email sending metric. + type: string + dimensionValueSource: + description: 'The location where the Amazon SES + API v2 finds the value of a dimension to publish + to Amazon CloudWatch. Valid values: MESSAGE_TAG, + EMAIL_HEADER, LINK_TAG.' + type: string + type: object + type: array + type: object + enabled: + description: 'When the event destination is enabled, the specified + event types are sent to the destinations. Default: false.' + type: boolean + kinesisFirehoseDestination: + description: An object that defines an Amazon Kinesis Data + Firehose destination for email events. See kinesis_firehose_destination + below. + properties: + deliveryStreamArn: + description: The Amazon Resource Name (ARN) of the Amazon + Kinesis Data Firehose stream that the Amazon SES API + v2 sends email events to. + type: string + iamRoleArn: + description: The Amazon Resource Name (ARN) of the IAM + role that the Amazon SES API v2 uses to send email events + to the Amazon Kinesis Data Firehose stream. + type: string + type: object + matchingEventTypes: + description: '- An array that specifies which events the Amazon + SES API v2 should send to the destinations. Valid values: + SEND, REJECT, BOUNCE, COMPLAINT, DELIVERY, OPEN, CLICK, + RENDERING_FAILURE, DELIVERY_DELAY, SUBSCRIPTION.' + items: + type: string + type: array + pinpointDestination: + description: An object that defines an Amazon Pinpoint project + destination for email events. See pinpoint_destination below. + properties: + applicationArn: + type: string + type: object + snsDestination: + description: An object that defines an Amazon SNS destination + for email events. See sns_destination below. + properties: + topicArn: + description: The Amazon Resource Name (ARN) of the Amazon + SNS topic to publish email events to. + type: string + type: object + type: object + eventDestinationName: + description: An object that defines the event destination. See + event_destination below. + type: string + id: + description: A pipe-delimited string combining configuration_set_name + and event_destination_name. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sesv2.aws.upbound.io_configurationsets.yaml b/package/crds/sesv2.aws.upbound.io_configurationsets.yaml index b08b5ad77d..635af3025c 100644 --- a/package/crds/sesv2.aws.upbound.io_configurationsets.yaml +++ b/package/crds/sesv2.aws.upbound.io_configurationsets.yaml @@ -652,3 +652,586 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ConfigurationSet is the Schema for the ConfigurationSets API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ConfigurationSetSpec defines the desired state of ConfigurationSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + deliveryOptions: + description: An object that defines the dedicated IP pool that + is used to send emails that you send using the configuration + set. + properties: + sendingPoolName: + description: The name of the dedicated IP pool to associate + with the configuration set. + type: string + tlsPolicy: + description: 'Specifies whether messages that use the configuration + set are required to use Transport Layer Security (TLS). + Valid values: REQUIRE, OPTIONAL.' + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + reputationOptions: + description: An object that defines whether or not Amazon SES + collects reputation metrics for the emails that you send that + use the configuration set. + properties: + reputationMetricsEnabled: + description: If true, tracking of reputation metrics is enabled + for the configuration set. If false, tracking of reputation + metrics is disabled for the configuration set. + type: boolean + type: object + sendingOptions: + description: An object that defines whether or not Amazon SES + can send email that you send using the configuration set. + properties: + sendingEnabled: + description: If true, email sending is enabled for the configuration + set. If false, email sending is disabled for the configuration + set. + type: boolean + type: object + suppressionOptions: + description: An object that contains information about the suppression + list preferences for your account. + properties: + suppressedReasons: + description: 'A list that contains the reasons that email + addresses are automatically added to the suppression list + for your account. Valid values: BOUNCE, COMPLAINT.' + items: + type: string + type: array + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + trackingOptions: + description: An object that defines the open and click tracking + options for emails that you send using the configuration set. + properties: + customRedirectDomain: + description: The domain to use for tracking open and click + events. + type: string + type: object + vdmOptions: + description: An object that defines the VDM settings that apply + to emails that you send using the configuration set. + properties: + dashboardOptions: + description: Specifies additional settings for your VDM configuration + as applicable to the Dashboard. + properties: + engagementMetrics: + description: 'Specifies the status of your VDM engagement + metrics collection. Valid values: ENABLED, DISABLED.' + type: string + type: object + guardianOptions: + description: Specifies additional settings for your VDM configuration + as applicable to the Guardian. + properties: + optimizedSharedDelivery: + description: 'Specifies the status of your VDM optimized + shared delivery. Valid values: ENABLED, DISABLED.' + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + deliveryOptions: + description: An object that defines the dedicated IP pool that + is used to send emails that you send using the configuration + set. + properties: + sendingPoolName: + description: The name of the dedicated IP pool to associate + with the configuration set. + type: string + tlsPolicy: + description: 'Specifies whether messages that use the configuration + set are required to use Transport Layer Security (TLS). + Valid values: REQUIRE, OPTIONAL.' + type: string + type: object + reputationOptions: + description: An object that defines whether or not Amazon SES + collects reputation metrics for the emails that you send that + use the configuration set. + properties: + reputationMetricsEnabled: + description: If true, tracking of reputation metrics is enabled + for the configuration set. If false, tracking of reputation + metrics is disabled for the configuration set. + type: boolean + type: object + sendingOptions: + description: An object that defines whether or not Amazon SES + can send email that you send using the configuration set. + properties: + sendingEnabled: + description: If true, email sending is enabled for the configuration + set. If false, email sending is disabled for the configuration + set. + type: boolean + type: object + suppressionOptions: + description: An object that contains information about the suppression + list preferences for your account. + properties: + suppressedReasons: + description: 'A list that contains the reasons that email + addresses are automatically added to the suppression list + for your account. Valid values: BOUNCE, COMPLAINT.' + items: + type: string + type: array + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + trackingOptions: + description: An object that defines the open and click tracking + options for emails that you send using the configuration set. + properties: + customRedirectDomain: + description: The domain to use for tracking open and click + events. + type: string + type: object + vdmOptions: + description: An object that defines the VDM settings that apply + to emails that you send using the configuration set. + properties: + dashboardOptions: + description: Specifies additional settings for your VDM configuration + as applicable to the Dashboard. + properties: + engagementMetrics: + description: 'Specifies the status of your VDM engagement + metrics collection. Valid values: ENABLED, DISABLED.' + type: string + type: object + guardianOptions: + description: Specifies additional settings for your VDM configuration + as applicable to the Guardian. + properties: + optimizedSharedDelivery: + description: 'Specifies the status of your VDM optimized + shared delivery. Valid values: ENABLED, DISABLED.' + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ConfigurationSetStatus defines the observed state of ConfigurationSet. + properties: + atProvider: + properties: + arn: + description: ARN of the Configuration Set. + type: string + deliveryOptions: + description: An object that defines the dedicated IP pool that + is used to send emails that you send using the configuration + set. + properties: + sendingPoolName: + description: The name of the dedicated IP pool to associate + with the configuration set. + type: string + tlsPolicy: + description: 'Specifies whether messages that use the configuration + set are required to use Transport Layer Security (TLS). + Valid values: REQUIRE, OPTIONAL.' + type: string + type: object + id: + type: string + reputationOptions: + description: An object that defines whether or not Amazon SES + collects reputation metrics for the emails that you send that + use the configuration set. + properties: + lastFreshStart: + description: The date and time (in Unix time) when the reputation + metrics were last given a fresh start. When your account + is given a fresh start, your reputation metrics are calculated + starting from the date of the fresh start. + type: string + reputationMetricsEnabled: + description: If true, tracking of reputation metrics is enabled + for the configuration set. If false, tracking of reputation + metrics is disabled for the configuration set. + type: boolean + type: object + sendingOptions: + description: An object that defines whether or not Amazon SES + can send email that you send using the configuration set. + properties: + sendingEnabled: + description: If true, email sending is enabled for the configuration + set. If false, email sending is disabled for the configuration + set. + type: boolean + type: object + suppressionOptions: + description: An object that contains information about the suppression + list preferences for your account. + properties: + suppressedReasons: + description: 'A list that contains the reasons that email + addresses are automatically added to the suppression list + for your account. Valid values: BOUNCE, COMPLAINT.' + items: + type: string + type: array + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + trackingOptions: + description: An object that defines the open and click tracking + options for emails that you send using the configuration set. + properties: + customRedirectDomain: + description: The domain to use for tracking open and click + events. + type: string + type: object + vdmOptions: + description: An object that defines the VDM settings that apply + to emails that you send using the configuration set. + properties: + dashboardOptions: + description: Specifies additional settings for your VDM configuration + as applicable to the Dashboard. + properties: + engagementMetrics: + description: 'Specifies the status of your VDM engagement + metrics collection. Valid values: ENABLED, DISABLED.' + type: string + type: object + guardianOptions: + description: Specifies additional settings for your VDM configuration + as applicable to the Guardian. + properties: + optimizedSharedDelivery: + description: 'Specifies the status of your VDM optimized + shared delivery. Valid values: ENABLED, DISABLED.' + type: string + type: object + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sesv2.aws.upbound.io_emailidentities.yaml b/package/crds/sesv2.aws.upbound.io_emailidentities.yaml index 21f2ef7f2d..f9fc11aa8d 100644 --- a/package/crds/sesv2.aws.upbound.io_emailidentities.yaml +++ b/package/crds/sesv2.aws.upbound.io_emailidentities.yaml @@ -655,3 +655,634 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: EmailIdentity is the Schema for the EmailIdentitys API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EmailIdentitySpec defines the desired state of EmailIdentity + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + configurationSetName: + description: The configuration set to use by default when sending + from this identity. Note that any configuration set defined + in the email sending request takes precedence. + type: string + configurationSetNameRef: + description: Reference to a ConfigurationSet in sesv2 to populate + configurationSetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + configurationSetNameSelector: + description: Selector for a ConfigurationSet in sesv2 to populate + configurationSetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dkimSigningAttributes: + description: The configuration of the DKIM authentication settings + for an email domain identity. + properties: + domainSigningPrivateKeySecretRef: + description: '[Bring Your Own DKIM] A private key that''s + used to generate a DKIM signature. The private key must + use 1024 or 2048-bit RSA encryption, and must be encoded + using base64 encoding.' + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + domainSigningSelector: + description: '[Bring Your Own DKIM] A string that''s used + to identify a public key in the DNS configuration for a + domain.' + type: string + nextSigningKeyLength: + description: '[Easy DKIM] The key length of the future DKIM + key pair to be generated. This can be changed at most once + per day. Valid values: RSA_1024_BIT, RSA_2048_BIT.' + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + configurationSetName: + description: The configuration set to use by default when sending + from this identity. Note that any configuration set defined + in the email sending request takes precedence. + type: string + configurationSetNameRef: + description: Reference to a ConfigurationSet in sesv2 to populate + configurationSetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + configurationSetNameSelector: + description: Selector for a ConfigurationSet in sesv2 to populate + configurationSetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dkimSigningAttributes: + description: The configuration of the DKIM authentication settings + for an email domain identity. + properties: + domainSigningPrivateKeySecretRef: + description: '[Bring Your Own DKIM] A private key that''s + used to generate a DKIM signature. The private key must + use 1024 or 2048-bit RSA encryption, and must be encoded + using base64 encoding.' + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + domainSigningSelector: + description: '[Bring Your Own DKIM] A string that''s used + to identify a public key in the DNS configuration for a + domain.' + type: string + nextSigningKeyLength: + description: '[Easy DKIM] The key length of the future DKIM + key pair to be generated. This can be changed at most once + per day. Valid values: RSA_1024_BIT, RSA_2048_BIT.' + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: EmailIdentityStatus defines the observed state of EmailIdentity. + properties: + atProvider: + properties: + arn: + description: ARN of the Email Identity. + type: string + configurationSetName: + description: The configuration set to use by default when sending + from this identity. Note that any configuration set defined + in the email sending request takes precedence. + type: string + dkimSigningAttributes: + description: The configuration of the DKIM authentication settings + for an email domain identity. + properties: + currentSigningKeyLength: + description: '[Easy DKIM] The key length of the DKIM key pair + in use.' + type: string + domainSigningSelector: + description: '[Bring Your Own DKIM] A string that''s used + to identify a public key in the DNS configuration for a + domain.' + type: string + lastKeyGenerationTimestamp: + description: '[Easy DKIM] The last time a key pair was generated + for this identity.' + type: string + nextSigningKeyLength: + description: '[Easy DKIM] The key length of the future DKIM + key pair to be generated. This can be changed at most once + per day. Valid values: RSA_1024_BIT, RSA_2048_BIT.' + type: string + signingAttributesOrigin: + description: A string that indicates how DKIM was configured + for the identity. AWS_SES indicates that DKIM was configured + for the identity by using Easy DKIM. EXTERNAL indicates + that DKIM was configured for the identity by using Bring + Your Own DKIM (BYODKIM). + type: string + status: + description: Describes whether or not Amazon SES has successfully + located the DKIM records in the DNS records for the domain. + See the AWS SES API v2 Reference for supported statuses. + type: string + tokens: + description: If you used Easy DKIM to configure DKIM authentication + for the domain, then this object contains a set of unique + strings that you use to create a set of CNAME records that + you add to the DNS configuration for your domain. When Amazon + SES detects these records in the DNS configuration for your + domain, the DKIM authentication process is complete. If + you configured DKIM authentication for the domain by providing + your own public-private key pair, then this object contains + the selector for the public key. + items: + type: string + type: array + type: object + id: + type: string + identityType: + description: 'The email identity type. Valid values: EMAIL_ADDRESS, + DOMAIN.' + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: Map of tags assigned to the resource, including those + inherited from the provider default_tags configuration block. + type: object + x-kubernetes-map-type: granular + verifiedForSendingStatus: + description: Specifies whether or not the identity is verified. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sfn.aws.upbound.io_statemachines.yaml b/package/crds/sfn.aws.upbound.io_statemachines.yaml index b964d06e14..eb688756a5 100644 --- a/package/crds/sfn.aws.upbound.io_statemachines.yaml +++ b/package/crds/sfn.aws.upbound.io_statemachines.yaml @@ -689,3 +689,662 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: StateMachine is the Schema for the StateMachines API. Provides + a Step Function State Machine resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StateMachineSpec defines the desired state of StateMachine + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + definition: + description: The Amazon States Language definition of the state + machine. + type: string + loggingConfiguration: + description: Defines what execution history events are logged + and where they are logged. The logging_configuration parameter + is only valid when type is set to EXPRESS. Defaults to OFF. + For more information see Logging Express Workflows and Log Levels + in the AWS Step Functions User Guide. + properties: + includeExecutionData: + description: Determines whether execution data is included + in your log. When set to false, data is excluded. + type: boolean + level: + description: 'Defines which category of execution history + events are logged. Valid values: ALL, ERROR, FATAL, OFF' + type: string + logDestination: + description: Amazon Resource Name (ARN) of a CloudWatch log + group. Make sure the State Machine has the correct IAM policies + for logging. The ARN must end with :* + type: string + type: object + publish: + description: 'Set to true to publish a version of the state machine + during creation. Default: false.' + type: boolean + region: + description: Region is the region you'd like your resource to + be created in. + type: string + roleArn: + description: The Amazon Resource Name (ARN) of the IAM role to + use for this state machine. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tracingConfiguration: + description: Selects whether AWS X-Ray tracing is enabled. + properties: + enabled: + description: When set to true, AWS X-Ray tracing is enabled. + Make sure the State Machine has the correct IAM policies + for logging. See the AWS Step Functions Developer Guide + for details. + type: boolean + type: object + type: + description: 'Determines whether a Standard or Express state machine + is created. The default is STANDARD. You cannot update the type + of a state machine once it has been created. Valid values: STANDARD, + EXPRESS.' + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + definition: + description: The Amazon States Language definition of the state + machine. + type: string + loggingConfiguration: + description: Defines what execution history events are logged + and where they are logged. The logging_configuration parameter + is only valid when type is set to EXPRESS. Defaults to OFF. + For more information see Logging Express Workflows and Log Levels + in the AWS Step Functions User Guide. + properties: + includeExecutionData: + description: Determines whether execution data is included + in your log. When set to false, data is excluded. + type: boolean + level: + description: 'Defines which category of execution history + events are logged. Valid values: ALL, ERROR, FATAL, OFF' + type: string + logDestination: + description: Amazon Resource Name (ARN) of a CloudWatch log + group. Make sure the State Machine has the correct IAM policies + for logging. The ARN must end with :* + type: string + type: object + publish: + description: 'Set to true to publish a version of the state machine + during creation. Default: false.' + type: boolean + roleArn: + description: The Amazon Resource Name (ARN) of the IAM role to + use for this state machine. + type: string + roleArnRef: + description: Reference to a Role in iam to populate roleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleArnSelector: + description: Selector for a Role in iam to populate roleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tracingConfiguration: + description: Selects whether AWS X-Ray tracing is enabled. + properties: + enabled: + description: When set to true, AWS X-Ray tracing is enabled. + Make sure the State Machine has the correct IAM policies + for logging. See the AWS Step Functions Developer Guide + for details. + type: boolean + type: object + type: + description: 'Determines whether a Standard or Express state machine + is created. The default is STANDARD. You cannot update the type + of a state machine once it has been created. Valid values: STANDARD, + EXPRESS.' + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.definition is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.definition) + || (has(self.initProvider) && has(self.initProvider.definition))' + status: + description: StateMachineStatus defines the observed state of StateMachine. + properties: + atProvider: + properties: + arn: + description: The ARN of the state machine. + type: string + creationDate: + description: The date the state machine was created. + type: string + definition: + description: The Amazon States Language definition of the state + machine. + type: string + description: + type: string + id: + description: The ARN of the state machine. + type: string + loggingConfiguration: + description: Defines what execution history events are logged + and where they are logged. The logging_configuration parameter + is only valid when type is set to EXPRESS. Defaults to OFF. + For more information see Logging Express Workflows and Log Levels + in the AWS Step Functions User Guide. + properties: + includeExecutionData: + description: Determines whether execution data is included + in your log. When set to false, data is excluded. + type: boolean + level: + description: 'Defines which category of execution history + events are logged. Valid values: ALL, ERROR, FATAL, OFF' + type: string + logDestination: + description: Amazon Resource Name (ARN) of a CloudWatch log + group. Make sure the State Machine has the correct IAM policies + for logging. The ARN must end with :* + type: string + type: object + publish: + description: 'Set to true to publish a version of the state machine + during creation. Default: false.' + type: boolean + revisionId: + description: The ARN of the state machine. + type: string + roleArn: + description: The Amazon Resource Name (ARN) of the IAM role to + use for this state machine. + type: string + stateMachineVersionArn: + description: The ARN of the state machine version. + type: string + status: + description: The current status of the state machine. Either ACTIVE + or DELETING. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + tracingConfiguration: + description: Selects whether AWS X-Ray tracing is enabled. + properties: + enabled: + description: When set to true, AWS X-Ray tracing is enabled. + Make sure the State Machine has the correct IAM policies + for logging. See the AWS Step Functions Developer Guide + for details. + type: boolean + type: object + type: + description: 'Determines whether a Standard or Express state machine + is created. The default is STANDARD. You cannot update the type + of a state machine once it has been created. Valid values: STANDARD, + EXPRESS.' + type: string + versionDescription: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/signer.aws.upbound.io_signingjobs.yaml b/package/crds/signer.aws.upbound.io_signingjobs.yaml index ee56068467..008dd2db9c 100644 --- a/package/crds/signer.aws.upbound.io_signingjobs.yaml +++ b/package/crds/signer.aws.upbound.io_signingjobs.yaml @@ -729,3 +729,690 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SigningJob is the Schema for the SigningJobs API. Creates a Signer + Signing Job. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SigningJobSpec defines the desired state of SigningJob + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + destination: + description: The S3 bucket in which to save your signed object. + See Destination below for details. + properties: + s3: + description: 'A configuration block describing the S3 Source + object: See S3 Source below for details.' + properties: + bucket: + description: Name of the S3 bucket. + type: string + prefix: + description: An Amazon S3 object key prefix that you can + use to limit signed objects keys to begin with the specified + prefix. + type: string + type: object + type: object + ignoreSigningJobFailure: + description: Set this argument to true to ignore signing job failures + and retrieve failed status and reason. Default false. + type: boolean + profileName: + description: The name of the profile to initiate the signing operation. + type: string + profileNameRef: + description: Reference to a SigningProfile in signer to populate + profileName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + profileNameSelector: + description: Selector for a SigningProfile in signer to populate + profileName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + source: + description: The S3 bucket that contains the object to sign. See + Source below for details. + properties: + s3: + description: 'A configuration block describing the S3 Source + object: See S3 Source below for details.' + properties: + bucket: + description: Name of the S3 bucket. + type: string + key: + description: Key name of the object that contains your + unsigned code. + type: string + version: + description: Version of your source image in your version + enabled S3 bucket. + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + destination: + description: The S3 bucket in which to save your signed object. + See Destination below for details. + properties: + s3: + description: 'A configuration block describing the S3 Source + object: See S3 Source below for details.' + properties: + bucket: + description: Name of the S3 bucket. + type: string + prefix: + description: An Amazon S3 object key prefix that you can + use to limit signed objects keys to begin with the specified + prefix. + type: string + type: object + type: object + ignoreSigningJobFailure: + description: Set this argument to true to ignore signing job failures + and retrieve failed status and reason. Default false. + type: boolean + profileName: + description: The name of the profile to initiate the signing operation. + type: string + profileNameRef: + description: Reference to a SigningProfile in signer to populate + profileName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + profileNameSelector: + description: Selector for a SigningProfile in signer to populate + profileName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + source: + description: The S3 bucket that contains the object to sign. See + Source below for details. + properties: + s3: + description: 'A configuration block describing the S3 Source + object: See S3 Source below for details.' + properties: + bucket: + description: Name of the S3 bucket. + type: string + key: + description: Key name of the object that contains your + unsigned code. + type: string + version: + description: Version of your source image in your version + enabled S3 bucket. + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.destination is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.destination) + || (has(self.initProvider) && has(self.initProvider.destination))' + - message: spec.forProvider.source is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.source) + || (has(self.initProvider) && has(self.initProvider.source))' + status: + description: SigningJobStatus defines the observed state of SigningJob. + properties: + atProvider: + properties: + completedAt: + description: Date and time in RFC3339 format that the signing + job was completed. + type: string + createdAt: + description: Date and time in RFC3339 format that the signing + job was created. + type: string + destination: + description: The S3 bucket in which to save your signed object. + See Destination below for details. + properties: + s3: + description: 'A configuration block describing the S3 Source + object: See S3 Source below for details.' + properties: + bucket: + description: Name of the S3 bucket. + type: string + prefix: + description: An Amazon S3 object key prefix that you can + use to limit signed objects keys to begin with the specified + prefix. + type: string + type: object + type: object + id: + type: string + ignoreSigningJobFailure: + description: Set this argument to true to ignore signing job failures + and retrieve failed status and reason. Default false. + type: boolean + jobId: + description: The ID of the signing job on output. + type: string + jobInvoker: + description: The IAM entity that initiated the signing job. + type: string + jobOwner: + description: The AWS account ID of the job owner. + type: string + platformDisplayName: + description: A human-readable name for the signing platform associated + with the signing job. + type: string + platformId: + description: The platform to which your signed code image will + be distributed. + type: string + profileName: + description: The name of the profile to initiate the signing operation. + type: string + profileVersion: + description: The version of the signing profile used to initiate + the signing job. + type: string + requestedBy: + description: The IAM principal that requested the signing job. + type: string + revocationRecord: + description: A revocation record if the signature generated by + the signing job has been revoked. Contains a timestamp and the + ID of the IAM entity that revoked the signature. + items: + properties: + reason: + type: string + revokedAt: + type: string + revokedBy: + type: string + type: object + type: array + signatureExpiresAt: + description: The time when the signature of a signing job expires. + type: string + signedObject: + description: Name of the S3 bucket where the signed code image + is saved by code signing. + items: + properties: + s3: + description: 'A configuration block describing the S3 Source + object: See S3 Source below for details.' + items: + properties: + bucket: + description: Name of the S3 bucket. + type: string + key: + description: Key name of the object that contains + your unsigned code. + type: string + type: object + type: array + type: object + type: array + source: + description: The S3 bucket that contains the object to sign. See + Source below for details. + properties: + s3: + description: 'A configuration block describing the S3 Source + object: See S3 Source below for details.' + properties: + bucket: + description: Name of the S3 bucket. + type: string + key: + description: Key name of the object that contains your + unsigned code. + type: string + version: + description: Version of your source image in your version + enabled S3 bucket. + type: string + type: object + type: object + status: + description: Status of the signing job. + type: string + statusReason: + description: String value that contains the status reason. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/signer.aws.upbound.io_signingprofiles.yaml b/package/crds/signer.aws.upbound.io_signingprofiles.yaml index 51f945c828..a311877746 100644 --- a/package/crds/signer.aws.upbound.io_signingprofiles.yaml +++ b/package/crds/signer.aws.upbound.io_signingprofiles.yaml @@ -486,3 +486,459 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SigningProfile is the Schema for the SigningProfiles API. Creates + a Signer Signing Profile. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SigningProfileSpec defines the desired state of SigningProfile + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + platformId: + description: The ID of the platform that is used by the target + signing profile. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + signatureValidityPeriod: + description: The validity period for a signing job. See signature_validity_period + Block below for details. + properties: + type: + description: 'The time unit for signature validity. Valid + values: DAYS, MONTHS, YEARS.' + type: string + value: + description: The numerical value of the time unit for signature + validity. + type: number + type: object + signingMaterial: + description: The AWS Certificate Manager certificate that will + be used to sign code with the new signing profile. See signing_material + Block below for details. + properties: + certificateArn: + description: The Amazon Resource Name (ARN) of the certificates + that is used to sign your code. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + platformId: + description: The ID of the platform that is used by the target + signing profile. + type: string + signatureValidityPeriod: + description: The validity period for a signing job. See signature_validity_period + Block below for details. + properties: + type: + description: 'The time unit for signature validity. Valid + values: DAYS, MONTHS, YEARS.' + type: string + value: + description: The numerical value of the time unit for signature + validity. + type: number + type: object + signingMaterial: + description: The AWS Certificate Manager certificate that will + be used to sign code with the new signing profile. See signing_material + Block below for details. + properties: + certificateArn: + description: The Amazon Resource Name (ARN) of the certificates + that is used to sign your code. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.platformId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.platformId) + || (has(self.initProvider) && has(self.initProvider.platformId))' + status: + description: SigningProfileStatus defines the observed state of SigningProfile. + properties: + atProvider: + properties: + arn: + description: The Amazon Resource Name (ARN) for the signing profile. + type: string + id: + type: string + platformDisplayName: + description: A human-readable name for the signing platform associated + with the signing profile. + type: string + platformId: + description: The ID of the platform that is used by the target + signing profile. + type: string + revocationRecord: + description: Revocation information for a signing profile. See + revocation_record Block below for details. + items: + properties: + revocationEffectiveFrom: + description: The time when revocation becomes effective. + type: string + revokedAt: + description: The time when the signing profile was revoked. + type: string + revokedBy: + description: The identity of the revoker. + type: string + type: object + type: array + signatureValidityPeriod: + description: The validity period for a signing job. See signature_validity_period + Block below for details. + properties: + type: + description: 'The time unit for signature validity. Valid + values: DAYS, MONTHS, YEARS.' + type: string + value: + description: The numerical value of the time unit for signature + validity. + type: number + type: object + signingMaterial: + description: The AWS Certificate Manager certificate that will + be used to sign code with the new signing profile. See signing_material + Block below for details. + properties: + certificateArn: + description: The Amazon Resource Name (ARN) of the certificates + that is used to sign your code. + type: string + type: object + status: + description: The status of the target signing profile. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + version: + description: The current version of the signing profile. + type: string + versionArn: + description: The signing profile ARN, including the profile version. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ssm.aws.upbound.io_associations.yaml b/package/crds/ssm.aws.upbound.io_associations.yaml index b537ceb787..ab0fc1e123 100644 --- a/package/crds/ssm.aws.upbound.io_associations.yaml +++ b/package/crds/ssm.aws.upbound.io_associations.yaml @@ -790,3 +790,769 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Association is the Schema for the Associations API. Associates + an SSM Document to an instance or EC2 tag. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AssociationSpec defines the desired state of Association + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + applyOnlyAtCronInterval: + description: 'By default, when you create a new or update associations, + the system runs it immediately and then according to the schedule + you specified. Enable this option if you do not want an association + to run immediately after you create or update it. This parameter + is not supported for rate expressions. Default: false.' + type: boolean + associationName: + description: The descriptive name for the association. + type: string + automationTargetParameterName: + description: Specify the target for the association. This target + is required for associations that use an Automation document + and target resources by using rate controls. This should be + set to the SSM document parameter that will define how your + automation will branch out. + type: string + complianceSeverity: + description: 'The compliance severity for the association. Can + be one of the following: UNSPECIFIED, LOW, MEDIUM, HIGH or CRITICAL' + type: string + documentVersion: + description: The document version you want to associate with the + target(s). Can be a specific version or the default version. + type: string + instanceId: + description: The instance ID to apply an SSM document to. Use + targets with key InstanceIds for document schema versions 2.0 + and above. Use the targets attribute instead. + type: string + maxConcurrency: + description: The maximum number of targets allowed to run the + association at the same time. You can specify a number, for + example 10, or a percentage of the target set, for example 10%. + type: string + maxErrors: + description: The number of errors that are allowed before the + system stops sending requests to run the association on additional + targets. You can specify a number, for example 10, or a percentage + of the target set, for example 10%. If you specify a threshold + of 3, the stop command is sent when the fourth error is returned. + If you specify a threshold of 10% for 50 associations, the stop + command is sent when the sixth error is returned. + type: string + name: + description: The name of the SSM document to apply. + type: string + nameRef: + description: Reference to a Document in ssm to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a Document in ssm to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + outputLocation: + description: An output location block. Output Location is documented + below. + properties: + s3BucketName: + description: The S3 bucket name. + type: string + s3KeyPrefix: + description: The S3 bucket prefix. Results stored in the root + if not configured. + type: string + s3Region: + description: The S3 bucket region. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A block of arbitrary string parameters to pass to + the SSM document. + type: object + x-kubernetes-map-type: granular + region: + description: Region is the region you'd like your resource to + be created in. + type: string + scheduleExpression: + description: A cron or rate expression that specifies when the + association runs. + type: string + syncCompliance: + description: The mode for generating association compliance. You + can specify AUTO or MANUAL. + type: string + targets: + description: A block containing the targets of the SSM association. + Targets are documented below. AWS currently supports a maximum + of 5 targets. + items: + properties: + key: + description: Either InstanceIds or tag:Tag Name to specify + an EC2 tag. + type: string + values: + description: A list of instance IDs or tag values. AWS currently + limits this list size to one value. + items: + type: string + type: array + type: object + type: array + waitForSuccessTimeoutSeconds: + description: The number of seconds to wait for the association + status to be Success. If Success status is not reached within + the given time, create opration will fail. + type: number + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + applyOnlyAtCronInterval: + description: 'By default, when you create a new or update associations, + the system runs it immediately and then according to the schedule + you specified. Enable this option if you do not want an association + to run immediately after you create or update it. This parameter + is not supported for rate expressions. Default: false.' + type: boolean + associationName: + description: The descriptive name for the association. + type: string + automationTargetParameterName: + description: Specify the target for the association. This target + is required for associations that use an Automation document + and target resources by using rate controls. This should be + set to the SSM document parameter that will define how your + automation will branch out. + type: string + complianceSeverity: + description: 'The compliance severity for the association. Can + be one of the following: UNSPECIFIED, LOW, MEDIUM, HIGH or CRITICAL' + type: string + documentVersion: + description: The document version you want to associate with the + target(s). Can be a specific version or the default version. + type: string + instanceId: + description: The instance ID to apply an SSM document to. Use + targets with key InstanceIds for document schema versions 2.0 + and above. Use the targets attribute instead. + type: string + maxConcurrency: + description: The maximum number of targets allowed to run the + association at the same time. You can specify a number, for + example 10, or a percentage of the target set, for example 10%. + type: string + maxErrors: + description: The number of errors that are allowed before the + system stops sending requests to run the association on additional + targets. You can specify a number, for example 10, or a percentage + of the target set, for example 10%. If you specify a threshold + of 3, the stop command is sent when the fourth error is returned. + If you specify a threshold of 10% for 50 associations, the stop + command is sent when the sixth error is returned. + type: string + name: + description: The name of the SSM document to apply. + type: string + nameRef: + description: Reference to a Document in ssm to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a Document in ssm to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + outputLocation: + description: An output location block. Output Location is documented + below. + properties: + s3BucketName: + description: The S3 bucket name. + type: string + s3KeyPrefix: + description: The S3 bucket prefix. Results stored in the root + if not configured. + type: string + s3Region: + description: The S3 bucket region. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A block of arbitrary string parameters to pass to + the SSM document. + type: object + x-kubernetes-map-type: granular + scheduleExpression: + description: A cron or rate expression that specifies when the + association runs. + type: string + syncCompliance: + description: The mode for generating association compliance. You + can specify AUTO or MANUAL. + type: string + targets: + description: A block containing the targets of the SSM association. + Targets are documented below. AWS currently supports a maximum + of 5 targets. + items: + properties: + key: + description: Either InstanceIds or tag:Tag Name to specify + an EC2 tag. + type: string + values: + description: A list of instance IDs or tag values. AWS currently + limits this list size to one value. + items: + type: string + type: array + type: object + type: array + waitForSuccessTimeoutSeconds: + description: The number of seconds to wait for the association + status to be Success. If Success status is not reached within + the given time, create opration will fail. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: AssociationStatus defines the observed state of Association. + properties: + atProvider: + properties: + applyOnlyAtCronInterval: + description: 'By default, when you create a new or update associations, + the system runs it immediately and then according to the schedule + you specified. Enable this option if you do not want an association + to run immediately after you create or update it. This parameter + is not supported for rate expressions. Default: false.' + type: boolean + arn: + description: The ARN of the SSM association + type: string + associationId: + description: The ID of the SSM association. + type: string + associationName: + description: The descriptive name for the association. + type: string + automationTargetParameterName: + description: Specify the target for the association. This target + is required for associations that use an Automation document + and target resources by using rate controls. This should be + set to the SSM document parameter that will define how your + automation will branch out. + type: string + complianceSeverity: + description: 'The compliance severity for the association. Can + be one of the following: UNSPECIFIED, LOW, MEDIUM, HIGH or CRITICAL' + type: string + documentVersion: + description: The document version you want to associate with the + target(s). Can be a specific version or the default version. + type: string + id: + type: string + instanceId: + description: The instance ID to apply an SSM document to. Use + targets with key InstanceIds for document schema versions 2.0 + and above. Use the targets attribute instead. + type: string + maxConcurrency: + description: The maximum number of targets allowed to run the + association at the same time. You can specify a number, for + example 10, or a percentage of the target set, for example 10%. + type: string + maxErrors: + description: The number of errors that are allowed before the + system stops sending requests to run the association on additional + targets. You can specify a number, for example 10, or a percentage + of the target set, for example 10%. If you specify a threshold + of 3, the stop command is sent when the fourth error is returned. + If you specify a threshold of 10% for 50 associations, the stop + command is sent when the sixth error is returned. + type: string + name: + description: The name of the SSM document to apply. + type: string + outputLocation: + description: An output location block. Output Location is documented + below. + properties: + s3BucketName: + description: The S3 bucket name. + type: string + s3KeyPrefix: + description: The S3 bucket prefix. Results stored in the root + if not configured. + type: string + s3Region: + description: The S3 bucket region. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A block of arbitrary string parameters to pass to + the SSM document. + type: object + x-kubernetes-map-type: granular + scheduleExpression: + description: A cron or rate expression that specifies when the + association runs. + type: string + syncCompliance: + description: The mode for generating association compliance. You + can specify AUTO or MANUAL. + type: string + targets: + description: A block containing the targets of the SSM association. + Targets are documented below. AWS currently supports a maximum + of 5 targets. + items: + properties: + key: + description: Either InstanceIds or tag:Tag Name to specify + an EC2 tag. + type: string + values: + description: A list of instance IDs or tag values. AWS currently + limits this list size to one value. + items: + type: string + type: array + type: object + type: array + waitForSuccessTimeoutSeconds: + description: The number of seconds to wait for the association + status to be Success. If Success status is not reached within + the given time, create opration will fail. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ssm.aws.upbound.io_maintenancewindowtasks.yaml b/package/crds/ssm.aws.upbound.io_maintenancewindowtasks.yaml index b59afd1b10..694ed140f4 100644 --- a/package/crds/ssm.aws.upbound.io_maintenancewindowtasks.yaml +++ b/package/crds/ssm.aws.upbound.io_maintenancewindowtasks.yaml @@ -1983,3 +1983,1909 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MaintenanceWindowTask is the Schema for the MaintenanceWindowTasks + API. Provides an SSM Maintenance Window Task resource + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MaintenanceWindowTaskSpec defines the desired state of MaintenanceWindowTask + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cutoffBehavior: + description: Indicates whether tasks should continue to run after + the cutoff time specified in the maintenance windows is reached. + Valid values are CONTINUE_TASK and CANCEL_TASK. + type: string + description: + description: The description of the maintenance window task. + type: string + maxConcurrency: + description: The maximum number of targets this task can be run + for in parallel. + type: string + maxErrors: + description: The maximum number of errors allowed before this + task stops being scheduled. + type: string + name: + description: The name of the maintenance window task. + type: string + priority: + description: The priority of the task in the Maintenance Window, + the lower the number the higher the priority. Tasks in a Maintenance + Window are scheduled in priority order with tasks that have + the same priority scheduled in parallel. + type: number + region: + description: Region is the region you'd like your resource to + be created in. + type: string + serviceRoleArn: + description: The role that should be assumed when executing the + task. If a role is not provided, Systems Manager uses your account's + service-linked role. If no service-linked role for Systems Manager + exists in your account, it is created for you. + type: string + serviceRoleArnRef: + description: Reference to a Role in iam to populate serviceRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceRoleArnSelector: + description: Selector for a Role in iam to populate serviceRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targets: + description: The targets (either instances or window target ids). + Instances are specified using Key=InstanceIds,Values=instanceid1,instanceid2. + Window target ids are specified using Key=WindowTargetIds,Values=window + target id1, window target id2. + items: + properties: + key: + type: string + values: + description: The array of strings. + items: + type: string + type: array + type: object + type: array + taskArn: + description: The ARN of the task to execute. + type: string + taskArnRef: + description: Reference to a Function in lambda to populate taskArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + taskArnSelector: + description: Selector for a Function in lambda to populate taskArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + taskInvocationParameters: + description: Configuration block with parameters for task execution. + properties: + automationParameters: + description: The parameters for an AUTOMATION task type. Documented + below. + properties: + documentVersion: + description: The version of an Automation document to + use during task execution. + type: string + parameter: + description: The parameters for the RUN_COMMAND task execution. + Documented below. + items: + properties: + name: + description: The name of the maintenance window + task. + type: string + values: + description: The array of strings. + items: + type: string + type: array + type: object + type: array + type: object + lambdaParameters: + description: The parameters for a LAMBDA task type. Documented + below. + properties: + clientContext: + description: Pass client-specific information to the Lambda + function that you are invoking. + type: string + payloadSecretRef: + description: JSON to provide to your Lambda function as + input. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + qualifier: + description: Specify a Lambda function version or alias + name. + type: string + type: object + runCommandParameters: + description: The parameters for a RUN_COMMAND task type. Documented + below. + properties: + cloudwatchConfig: + description: Configuration options for sending command + output to CloudWatch Logs. Documented below. + properties: + cloudwatchLogGroupName: + description: 'The name of the CloudWatch log group + where you want to send command output. If you don''t + specify a group name, Systems Manager automatically + creates a log group for you. The log group uses + the following naming format: aws/ssm/SystemsManagerDocumentName.' + type: string + cloudwatchOutputEnabled: + description: Enables Systems Manager to send command + output to CloudWatch Logs. + type: boolean + type: object + comment: + description: Information about the command(s) to execute. + type: string + documentHash: + description: The SHA-256 or SHA-1 hash created by the + system when the document was created. SHA-1 hashes have + been deprecated. + type: string + documentHashType: + description: 'SHA-256 or SHA-1. SHA-1 hashes have been + deprecated. Valid values: Sha256 and Sha1' + type: string + documentVersion: + description: The version of an Automation document to + use during task execution. + type: string + notificationConfig: + description: Configurations for sending notifications + about command status changes on a per-instance basis. + Documented below. + properties: + notificationArn: + description: An Amazon Resource Name (ARN) for a Simple + Notification Service (SNS) topic. Run Command pushes + notifications about command status changes to this + topic. + type: string + notificationArnRef: + description: Reference to a Topic in sns to populate + notificationArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + notificationArnSelector: + description: Selector for a Topic in sns to populate + notificationArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + notificationEvents: + description: 'The different events for which you can + receive notifications. Valid values: All, InProgress, + Success, TimedOut, Cancelled, and Failed' + items: + type: string + type: array + notificationType: + description: 'When specified with Command, receive + notification when the status of a command changes. + When specified with Invocation, for commands sent + to multiple instances, receive notification on a + per-instance basis when the status of a command + changes. Valid values: Command and Invocation' + type: string + type: object + outputS3Bucket: + description: The name of the Amazon S3 bucket. + type: string + outputS3BucketRef: + description: Reference to a Bucket in s3 to populate outputS3Bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + outputS3BucketSelector: + description: Selector for a Bucket in s3 to populate outputS3Bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + outputS3KeyPrefix: + description: The Amazon S3 bucket subfolder. + type: string + parameter: + description: The parameters for the RUN_COMMAND task execution. + Documented below. + items: + properties: + name: + description: The name of the maintenance window + task. + type: string + values: + description: The array of strings. + items: + type: string + type: array + type: object + type: array + serviceRoleArn: + description: The role that should be assumed when executing + the task. If a role is not provided, Systems Manager + uses your account's service-linked role. If no service-linked + role for Systems Manager exists in your account, it + is created for you. + type: string + serviceRoleArnRef: + description: Reference to a Role in iam to populate serviceRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceRoleArnSelector: + description: Selector for a Role in iam to populate serviceRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + timeoutSeconds: + description: If this time is reached and the command has + not already started executing, it doesn't run. + type: number + type: object + stepFunctionsParameters: + description: The parameters for a STEP_FUNCTIONS task type. + Documented below. + properties: + inputSecretRef: + description: The inputs for the STEP_FUNCTION task. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + name: + description: The name of the maintenance window task. + type: string + type: object + type: object + taskType: + description: 'The type of task being registered. Valid values: + AUTOMATION, LAMBDA, RUN_COMMAND or STEP_FUNCTIONS.' + type: string + windowId: + description: The Id of the maintenance window to register the + task with. + type: string + windowIdRef: + description: Reference to a MaintenanceWindow in ssm to populate + windowId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + windowIdSelector: + description: Selector for a MaintenanceWindow in ssm to populate + windowId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + cutoffBehavior: + description: Indicates whether tasks should continue to run after + the cutoff time specified in the maintenance windows is reached. + Valid values are CONTINUE_TASK and CANCEL_TASK. + type: string + description: + description: The description of the maintenance window task. + type: string + maxConcurrency: + description: The maximum number of targets this task can be run + for in parallel. + type: string + maxErrors: + description: The maximum number of errors allowed before this + task stops being scheduled. + type: string + name: + description: The name of the maintenance window task. + type: string + priority: + description: The priority of the task in the Maintenance Window, + the lower the number the higher the priority. Tasks in a Maintenance + Window are scheduled in priority order with tasks that have + the same priority scheduled in parallel. + type: number + serviceRoleArn: + description: The role that should be assumed when executing the + task. If a role is not provided, Systems Manager uses your account's + service-linked role. If no service-linked role for Systems Manager + exists in your account, it is created for you. + type: string + serviceRoleArnRef: + description: Reference to a Role in iam to populate serviceRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceRoleArnSelector: + description: Selector for a Role in iam to populate serviceRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targets: + description: The targets (either instances or window target ids). + Instances are specified using Key=InstanceIds,Values=instanceid1,instanceid2. + Window target ids are specified using Key=WindowTargetIds,Values=window + target id1, window target id2. + items: + properties: + key: + type: string + values: + description: The array of strings. + items: + type: string + type: array + type: object + type: array + taskArn: + description: The ARN of the task to execute. + type: string + taskArnRef: + description: Reference to a Function in lambda to populate taskArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + taskArnSelector: + description: Selector for a Function in lambda to populate taskArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + taskInvocationParameters: + description: Configuration block with parameters for task execution. + properties: + automationParameters: + description: The parameters for an AUTOMATION task type. Documented + below. + properties: + documentVersion: + description: The version of an Automation document to + use during task execution. + type: string + parameter: + description: The parameters for the RUN_COMMAND task execution. + Documented below. + items: + properties: + name: + description: The name of the maintenance window + task. + type: string + values: + description: The array of strings. + items: + type: string + type: array + type: object + type: array + type: object + lambdaParameters: + description: The parameters for a LAMBDA task type. Documented + below. + properties: + clientContext: + description: Pass client-specific information to the Lambda + function that you are invoking. + type: string + payloadSecretRef: + description: JSON to provide to your Lambda function as + input. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + qualifier: + description: Specify a Lambda function version or alias + name. + type: string + type: object + runCommandParameters: + description: The parameters for a RUN_COMMAND task type. Documented + below. + properties: + cloudwatchConfig: + description: Configuration options for sending command + output to CloudWatch Logs. Documented below. + properties: + cloudwatchLogGroupName: + description: 'The name of the CloudWatch log group + where you want to send command output. If you don''t + specify a group name, Systems Manager automatically + creates a log group for you. The log group uses + the following naming format: aws/ssm/SystemsManagerDocumentName.' + type: string + cloudwatchOutputEnabled: + description: Enables Systems Manager to send command + output to CloudWatch Logs. + type: boolean + type: object + comment: + description: Information about the command(s) to execute. + type: string + documentHash: + description: The SHA-256 or SHA-1 hash created by the + system when the document was created. SHA-1 hashes have + been deprecated. + type: string + documentHashType: + description: 'SHA-256 or SHA-1. SHA-1 hashes have been + deprecated. Valid values: Sha256 and Sha1' + type: string + documentVersion: + description: The version of an Automation document to + use during task execution. + type: string + notificationConfig: + description: Configurations for sending notifications + about command status changes on a per-instance basis. + Documented below. + properties: + notificationArn: + description: An Amazon Resource Name (ARN) for a Simple + Notification Service (SNS) topic. Run Command pushes + notifications about command status changes to this + topic. + type: string + notificationArnRef: + description: Reference to a Topic in sns to populate + notificationArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + notificationArnSelector: + description: Selector for a Topic in sns to populate + notificationArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + notificationEvents: + description: 'The different events for which you can + receive notifications. Valid values: All, InProgress, + Success, TimedOut, Cancelled, and Failed' + items: + type: string + type: array + notificationType: + description: 'When specified with Command, receive + notification when the status of a command changes. + When specified with Invocation, for commands sent + to multiple instances, receive notification on a + per-instance basis when the status of a command + changes. Valid values: Command and Invocation' + type: string + type: object + outputS3Bucket: + description: The name of the Amazon S3 bucket. + type: string + outputS3BucketRef: + description: Reference to a Bucket in s3 to populate outputS3Bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + outputS3BucketSelector: + description: Selector for a Bucket in s3 to populate outputS3Bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + outputS3KeyPrefix: + description: The Amazon S3 bucket subfolder. + type: string + parameter: + description: The parameters for the RUN_COMMAND task execution. + Documented below. + items: + properties: + name: + description: The name of the maintenance window + task. + type: string + values: + description: The array of strings. + items: + type: string + type: array + type: object + type: array + serviceRoleArn: + description: The role that should be assumed when executing + the task. If a role is not provided, Systems Manager + uses your account's service-linked role. If no service-linked + role for Systems Manager exists in your account, it + is created for you. + type: string + serviceRoleArnRef: + description: Reference to a Role in iam to populate serviceRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceRoleArnSelector: + description: Selector for a Role in iam to populate serviceRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + timeoutSeconds: + description: If this time is reached and the command has + not already started executing, it doesn't run. + type: number + type: object + stepFunctionsParameters: + description: The parameters for a STEP_FUNCTIONS task type. + Documented below. + properties: + inputSecretRef: + description: The inputs for the STEP_FUNCTION task. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + name: + description: The name of the maintenance window task. + type: string + type: object + type: object + taskType: + description: 'The type of task being registered. Valid values: + AUTOMATION, LAMBDA, RUN_COMMAND or STEP_FUNCTIONS.' + type: string + windowId: + description: The Id of the maintenance window to register the + task with. + type: string + windowIdRef: + description: Reference to a MaintenanceWindow in ssm to populate + windowId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + windowIdSelector: + description: Selector for a MaintenanceWindow in ssm to populate + windowId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.taskType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.taskType) + || (has(self.initProvider) && has(self.initProvider.taskType))' + status: + description: MaintenanceWindowTaskStatus defines the observed state of + MaintenanceWindowTask. + properties: + atProvider: + properties: + arn: + description: The ARN of the maintenance window task. + type: string + cutoffBehavior: + description: Indicates whether tasks should continue to run after + the cutoff time specified in the maintenance windows is reached. + Valid values are CONTINUE_TASK and CANCEL_TASK. + type: string + description: + description: The description of the maintenance window task. + type: string + id: + description: The ID of the maintenance window task. + type: string + maxConcurrency: + description: The maximum number of targets this task can be run + for in parallel. + type: string + maxErrors: + description: The maximum number of errors allowed before this + task stops being scheduled. + type: string + name: + description: The name of the maintenance window task. + type: string + priority: + description: The priority of the task in the Maintenance Window, + the lower the number the higher the priority. Tasks in a Maintenance + Window are scheduled in priority order with tasks that have + the same priority scheduled in parallel. + type: number + serviceRoleArn: + description: The role that should be assumed when executing the + task. If a role is not provided, Systems Manager uses your account's + service-linked role. If no service-linked role for Systems Manager + exists in your account, it is created for you. + type: string + targets: + description: The targets (either instances or window target ids). + Instances are specified using Key=InstanceIds,Values=instanceid1,instanceid2. + Window target ids are specified using Key=WindowTargetIds,Values=window + target id1, window target id2. + items: + properties: + key: + type: string + values: + description: The array of strings. + items: + type: string + type: array + type: object + type: array + taskArn: + description: The ARN of the task to execute. + type: string + taskInvocationParameters: + description: Configuration block with parameters for task execution. + properties: + automationParameters: + description: The parameters for an AUTOMATION task type. Documented + below. + properties: + documentVersion: + description: The version of an Automation document to + use during task execution. + type: string + parameter: + description: The parameters for the RUN_COMMAND task execution. + Documented below. + items: + properties: + name: + description: The name of the maintenance window + task. + type: string + values: + description: The array of strings. + items: + type: string + type: array + type: object + type: array + type: object + lambdaParameters: + description: The parameters for a LAMBDA task type. Documented + below. + properties: + clientContext: + description: Pass client-specific information to the Lambda + function that you are invoking. + type: string + qualifier: + description: Specify a Lambda function version or alias + name. + type: string + type: object + runCommandParameters: + description: The parameters for a RUN_COMMAND task type. Documented + below. + properties: + cloudwatchConfig: + description: Configuration options for sending command + output to CloudWatch Logs. Documented below. + properties: + cloudwatchLogGroupName: + description: 'The name of the CloudWatch log group + where you want to send command output. If you don''t + specify a group name, Systems Manager automatically + creates a log group for you. The log group uses + the following naming format: aws/ssm/SystemsManagerDocumentName.' + type: string + cloudwatchOutputEnabled: + description: Enables Systems Manager to send command + output to CloudWatch Logs. + type: boolean + type: object + comment: + description: Information about the command(s) to execute. + type: string + documentHash: + description: The SHA-256 or SHA-1 hash created by the + system when the document was created. SHA-1 hashes have + been deprecated. + type: string + documentHashType: + description: 'SHA-256 or SHA-1. SHA-1 hashes have been + deprecated. Valid values: Sha256 and Sha1' + type: string + documentVersion: + description: The version of an Automation document to + use during task execution. + type: string + notificationConfig: + description: Configurations for sending notifications + about command status changes on a per-instance basis. + Documented below. + properties: + notificationArn: + description: An Amazon Resource Name (ARN) for a Simple + Notification Service (SNS) topic. Run Command pushes + notifications about command status changes to this + topic. + type: string + notificationEvents: + description: 'The different events for which you can + receive notifications. Valid values: All, InProgress, + Success, TimedOut, Cancelled, and Failed' + items: + type: string + type: array + notificationType: + description: 'When specified with Command, receive + notification when the status of a command changes. + When specified with Invocation, for commands sent + to multiple instances, receive notification on a + per-instance basis when the status of a command + changes. Valid values: Command and Invocation' + type: string + type: object + outputS3Bucket: + description: The name of the Amazon S3 bucket. + type: string + outputS3KeyPrefix: + description: The Amazon S3 bucket subfolder. + type: string + parameter: + description: The parameters for the RUN_COMMAND task execution. + Documented below. + items: + properties: + name: + description: The name of the maintenance window + task. + type: string + values: + description: The array of strings. + items: + type: string + type: array + type: object + type: array + serviceRoleArn: + description: The role that should be assumed when executing + the task. If a role is not provided, Systems Manager + uses your account's service-linked role. If no service-linked + role for Systems Manager exists in your account, it + is created for you. + type: string + timeoutSeconds: + description: If this time is reached and the command has + not already started executing, it doesn't run. + type: number + type: object + stepFunctionsParameters: + description: The parameters for a STEP_FUNCTIONS task type. + Documented below. + properties: + name: + description: The name of the maintenance window task. + type: string + type: object + type: object + taskType: + description: 'The type of task being registered. Valid values: + AUTOMATION, LAMBDA, RUN_COMMAND or STEP_FUNCTIONS.' + type: string + windowId: + description: The Id of the maintenance window to register the + task with. + type: string + windowTaskId: + description: The ID of the maintenance window task. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ssm.aws.upbound.io_resourcedatasyncs.yaml b/package/crds/ssm.aws.upbound.io_resourcedatasyncs.yaml index f6367f713e..75a24e4b4b 100644 --- a/package/crds/ssm.aws.upbound.io_resourcedatasyncs.yaml +++ b/package/crds/ssm.aws.upbound.io_resourcedatasyncs.yaml @@ -629,3 +629,608 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ResourceDataSync is the Schema for the ResourceDataSyncs API. + Provides a SSM resource data sync. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ResourceDataSyncSpec defines the desired state of ResourceDataSync + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + region: + description: |- + Region with the bucket targeted by the Resource Data Sync. + Region is the region you'd like your resource to be created in. + type: string + s3Destination: + description: Amazon S3 configuration details for the sync. + properties: + bucketName: + description: Name of S3 bucket where the aggregated data is + stored. + type: string + bucketNameRef: + description: Reference to a Bucket in s3 to populate bucketName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketNameSelector: + description: Selector for a Bucket in s3 to populate bucketName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + kmsKeyArn: + description: ARN of an encryption key for a destination in + Amazon S3. + type: string + prefix: + description: Prefix for the bucket. + type: string + region: + description: Region with the bucket targeted by the Resource + Data Sync. + type: string + regionRef: + description: Reference to a Bucket in s3 to populate region. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + regionSelector: + description: Selector for a Bucket in s3 to populate region. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + syncFormat: + description: A supported sync format. Only JsonSerDe is currently + supported. Defaults to JsonSerDe. + type: string + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + s3Destination: + description: Amazon S3 configuration details for the sync. + properties: + bucketName: + description: Name of S3 bucket where the aggregated data is + stored. + type: string + bucketNameRef: + description: Reference to a Bucket in s3 to populate bucketName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketNameSelector: + description: Selector for a Bucket in s3 to populate bucketName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + kmsKeyArn: + description: ARN of an encryption key for a destination in + Amazon S3. + type: string + prefix: + description: Prefix for the bucket. + type: string + syncFormat: + description: A supported sync format. Only JsonSerDe is currently + supported. Defaults to JsonSerDe. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.s3Destination is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.s3Destination) + || (has(self.initProvider) && has(self.initProvider.s3Destination))' + status: + description: ResourceDataSyncStatus defines the observed state of ResourceDataSync. + properties: + atProvider: + properties: + id: + type: string + s3Destination: + description: Amazon S3 configuration details for the sync. + properties: + bucketName: + description: Name of S3 bucket where the aggregated data is + stored. + type: string + kmsKeyArn: + description: ARN of an encryption key for a destination in + Amazon S3. + type: string + prefix: + description: Prefix for the bucket. + type: string + region: + description: Region with the bucket targeted by the Resource + Data Sync. + type: string + syncFormat: + description: A supported sync format. Only JsonSerDe is currently + supported. Defaults to JsonSerDe. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ssoadmin.aws.upbound.io_customermanagedpolicyattachments.yaml b/package/crds/ssoadmin.aws.upbound.io_customermanagedpolicyattachments.yaml index c3790a43d7..fdf2dbf980 100644 --- a/package/crds/ssoadmin.aws.upbound.io_customermanagedpolicyattachments.yaml +++ b/package/crds/ssoadmin.aws.upbound.io_customermanagedpolicyattachments.yaml @@ -628,3 +628,607 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CustomerManagedPolicyAttachment is the Schema for the CustomerManagedPolicyAttachments + API. Manages a customer managed policy for a Single Sign-On (SSO) Permission + Set + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CustomerManagedPolicyAttachmentSpec defines the desired state + of CustomerManagedPolicyAttachment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + customerManagedPolicyReference: + description: Specifies the name and path of a customer managed + policy. See below. + properties: + name: + description: Name of the customer managed IAM Policy to be + attached. + type: string + path: + description: The path to the IAM policy to be attached. The + default is /. See IAM Identifiers for more information. + type: string + policyNameRef: + description: Reference to a Policy in iam to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + policyNameSelector: + description: Selector for a Policy in iam to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + instanceArn: + description: The Amazon Resource Name (ARN) of the SSO Instance + under which the operation will be executed. + type: string + permissionSetArn: + description: The Amazon Resource Name (ARN) of the Permission + Set. + type: string + permissionSetArnRef: + description: Reference to a PermissionSet in ssoadmin to populate + permissionSetArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + permissionSetArnSelector: + description: Selector for a PermissionSet in ssoadmin to populate + permissionSetArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - instanceArn + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + customerManagedPolicyReference: + description: Specifies the name and path of a customer managed + policy. See below. + properties: + name: + description: Name of the customer managed IAM Policy to be + attached. + type: string + path: + description: The path to the IAM policy to be attached. The + default is /. See IAM Identifiers for more information. + type: string + policyNameRef: + description: Reference to a Policy in iam to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + policyNameSelector: + description: Selector for a Policy in iam to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.customerManagedPolicyReference is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.customerManagedPolicyReference) + || (has(self.initProvider) && has(self.initProvider.customerManagedPolicyReference))' + status: + description: CustomerManagedPolicyAttachmentStatus defines the observed + state of CustomerManagedPolicyAttachment. + properties: + atProvider: + properties: + customerManagedPolicyReference: + description: Specifies the name and path of a customer managed + policy. See below. + properties: + name: + description: Name of the customer managed IAM Policy to be + attached. + type: string + path: + description: The path to the IAM policy to be attached. The + default is /. See IAM Identifiers for more information. + type: string + type: object + id: + description: Policy Name, Policy Path, Permission Set Amazon Resource + Name (ARN), and SSO Instance ARN, each separated by a comma + (,). + type: string + instanceArn: + description: The Amazon Resource Name (ARN) of the SSO Instance + under which the operation will be executed. + type: string + permissionSetArn: + description: The Amazon Resource Name (ARN) of the Permission + Set. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/ssoadmin.aws.upbound.io_permissionsboundaryattachments.yaml b/package/crds/ssoadmin.aws.upbound.io_permissionsboundaryattachments.yaml index eb18d8d124..132a0863fd 100644 --- a/package/crds/ssoadmin.aws.upbound.io_permissionsboundaryattachments.yaml +++ b/package/crds/ssoadmin.aws.upbound.io_permissionsboundaryattachments.yaml @@ -660,3 +660,633 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: PermissionsBoundaryAttachment is the Schema for the PermissionsBoundaryAttachments + API. Attaches a permissions boundary policy to a Single Sign-On (SSO) Permission + Set resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PermissionsBoundaryAttachmentSpec defines the desired state + of PermissionsBoundaryAttachment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + instanceArn: + description: The Amazon Resource Name (ARN) of the SSO Instance + under which the operation will be executed. + type: string + permissionSetArn: + description: The Amazon Resource Name (ARN) of the Permission + Set. + type: string + permissionSetArnRef: + description: Reference to a PermissionSet in ssoadmin to populate + permissionSetArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + permissionSetArnSelector: + description: Selector for a PermissionSet in ssoadmin to populate + permissionSetArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + permissionsBoundary: + description: The permissions boundary policy. See below. + properties: + customerManagedPolicyReference: + description: Specifies the name and path of a customer managed + policy. See below. + properties: + name: + description: Name of the customer managed IAM Policy to + be attached. + type: string + nameRef: + description: Reference to a Policy in iam to populate + name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a Policy in iam to populate + name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + path: + description: The path to the IAM policy to be attached. + The default is /. See IAM Identifiers for more information. + type: string + type: object + managedPolicyArn: + description: AWS-managed IAM policy ARN to use as the permissions + boundary. + type: string + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - instanceArn + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + permissionsBoundary: + description: The permissions boundary policy. See below. + properties: + customerManagedPolicyReference: + description: Specifies the name and path of a customer managed + policy. See below. + properties: + name: + description: Name of the customer managed IAM Policy to + be attached. + type: string + nameRef: + description: Reference to a Policy in iam to populate + name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a Policy in iam to populate + name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + path: + description: The path to the IAM policy to be attached. + The default is /. See IAM Identifiers for more information. + type: string + type: object + managedPolicyArn: + description: AWS-managed IAM policy ARN to use as the permissions + boundary. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.permissionsBoundary is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.permissionsBoundary) + || (has(self.initProvider) && has(self.initProvider.permissionsBoundary))' + status: + description: PermissionsBoundaryAttachmentStatus defines the observed + state of PermissionsBoundaryAttachment. + properties: + atProvider: + properties: + id: + description: Permission Set Amazon Resource Name (ARN) and SSO + Instance ARN, separated by a comma (,). + type: string + instanceArn: + description: The Amazon Resource Name (ARN) of the SSO Instance + under which the operation will be executed. + type: string + permissionSetArn: + description: The Amazon Resource Name (ARN) of the Permission + Set. + type: string + permissionsBoundary: + description: The permissions boundary policy. See below. + properties: + customerManagedPolicyReference: + description: Specifies the name and path of a customer managed + policy. See below. + properties: + name: + description: Name of the customer managed IAM Policy to + be attached. + type: string + path: + description: The path to the IAM policy to be attached. + The default is /. See IAM Identifiers for more information. + type: string + type: object + managedPolicyArn: + description: AWS-managed IAM policy ARN to use as the permissions + boundary. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/timestreamwrite.aws.upbound.io_tables.yaml b/package/crds/timestreamwrite.aws.upbound.io_tables.yaml index a17e0aa21f..0f0c5d5780 100644 --- a/package/crds/timestreamwrite.aws.upbound.io_tables.yaml +++ b/package/crds/timestreamwrite.aws.upbound.io_tables.yaml @@ -753,3 +753,687 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Table is the Schema for the Tables API. Provides a Timestream + table resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TableSpec defines the desired state of Table + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + databaseName: + description: – The name of the Timestream database. + type: string + databaseNameRef: + description: Reference to a Database in timestreamwrite to populate + databaseName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseNameSelector: + description: Selector for a Database in timestreamwrite to populate + databaseName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + magneticStoreWriteProperties: + description: Contains properties to set on the table when enabling + magnetic store writes. See Magnetic Store Write Properties below + for more details. + properties: + enableMagneticStoreWrites: + description: A flag to enable magnetic store writes. + type: boolean + magneticStoreRejectedDataLocation: + description: The location to write error reports for records + rejected asynchronously during magnetic store writes. See + Magnetic Store Rejected Data Location below for more details. + properties: + s3Configuration: + description: Configuration of an S3 location to write + error reports for records rejected, asynchronously, + during magnetic store writes. See S3 Configuration below + for more details. + properties: + bucketName: + description: Bucket name of the customer S3 bucket. + type: string + encryptionOption: + description: Encryption option for the customer s3 + location. Options are S3 server side encryption + with an S3-managed key or KMS managed key. Valid + values are SSE_KMS and SSE_S3. + type: string + kmsKeyId: + description: KMS key arn for the customer s3 location + when encrypting with a KMS managed key. + type: string + objectKeyPrefix: + description: Object key prefix for the customer S3 + location. + type: string + type: object + type: object + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + retentionProperties: + description: The retention duration for the memory store and magnetic + store. See Retention Properties below for more details. If not + provided, magnetic_store_retention_period_in_days default to + 73000 and memory_store_retention_period_in_hours defaults to + 6. + properties: + magneticStoreRetentionPeriodInDays: + description: The duration for which data must be stored in + the magnetic store. Minimum value of 1. Maximum value of + 73000. + type: number + memoryStoreRetentionPeriodInHours: + description: The duration for which data must be stored in + the memory store. Minimum value of 1. Maximum value of 8766. + type: number + type: object + schema: + description: The schema of the table. See Schema below for more + details. + properties: + compositePartitionKey: + description: A non-empty list of partition keys defining the + attributes used to partition the table data. The order of + the list determines the partition hierarchy. The name and + type of each partition key as well as the partition key + order cannot be changed after the table is created. However, + the enforcement level of each partition key can be changed. + See Composite Partition Key below for more details. + properties: + enforcementInRecord: + description: 'The level of enforcement for the specification + of a dimension key in ingested records. Valid values: + REQUIRED, OPTIONAL.' + type: string + name: + description: The name of the attribute used for a dimension + key. + type: string + type: + description: 'The type of the partition key. Valid values: + DIMENSION, MEASURE.' + type: string + type: object + type: object + tableName: + description: The name of the Timestream table. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + - tableName + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + magneticStoreWriteProperties: + description: Contains properties to set on the table when enabling + magnetic store writes. See Magnetic Store Write Properties below + for more details. + properties: + enableMagneticStoreWrites: + description: A flag to enable magnetic store writes. + type: boolean + magneticStoreRejectedDataLocation: + description: The location to write error reports for records + rejected asynchronously during magnetic store writes. See + Magnetic Store Rejected Data Location below for more details. + properties: + s3Configuration: + description: Configuration of an S3 location to write + error reports for records rejected, asynchronously, + during magnetic store writes. See S3 Configuration below + for more details. + properties: + bucketName: + description: Bucket name of the customer S3 bucket. + type: string + encryptionOption: + description: Encryption option for the customer s3 + location. Options are S3 server side encryption + with an S3-managed key or KMS managed key. Valid + values are SSE_KMS and SSE_S3. + type: string + kmsKeyId: + description: KMS key arn for the customer s3 location + when encrypting with a KMS managed key. + type: string + objectKeyPrefix: + description: Object key prefix for the customer S3 + location. + type: string + type: object + type: object + type: object + retentionProperties: + description: The retention duration for the memory store and magnetic + store. See Retention Properties below for more details. If not + provided, magnetic_store_retention_period_in_days default to + 73000 and memory_store_retention_period_in_hours defaults to + 6. + properties: + magneticStoreRetentionPeriodInDays: + description: The duration for which data must be stored in + the magnetic store. Minimum value of 1. Maximum value of + 73000. + type: number + memoryStoreRetentionPeriodInHours: + description: The duration for which data must be stored in + the memory store. Minimum value of 1. Maximum value of 8766. + type: number + type: object + schema: + description: The schema of the table. See Schema below for more + details. + properties: + compositePartitionKey: + description: A non-empty list of partition keys defining the + attributes used to partition the table data. The order of + the list determines the partition hierarchy. The name and + type of each partition key as well as the partition key + order cannot be changed after the table is created. However, + the enforcement level of each partition key can be changed. + See Composite Partition Key below for more details. + properties: + enforcementInRecord: + description: 'The level of enforcement for the specification + of a dimension key in ingested records. Valid values: + REQUIRED, OPTIONAL.' + type: string + name: + description: The name of the attribute used for a dimension + key. + type: string + type: + description: 'The type of the partition key. Valid values: + DIMENSION, MEASURE.' + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: TableStatus defines the observed state of Table. + properties: + atProvider: + properties: + arn: + description: The ARN that uniquely identifies this table. + type: string + databaseName: + description: – The name of the Timestream database. + type: string + id: + description: The table_name and database_name separated by a colon + (:). + type: string + magneticStoreWriteProperties: + description: Contains properties to set on the table when enabling + magnetic store writes. See Magnetic Store Write Properties below + for more details. + properties: + enableMagneticStoreWrites: + description: A flag to enable magnetic store writes. + type: boolean + magneticStoreRejectedDataLocation: + description: The location to write error reports for records + rejected asynchronously during magnetic store writes. See + Magnetic Store Rejected Data Location below for more details. + properties: + s3Configuration: + description: Configuration of an S3 location to write + error reports for records rejected, asynchronously, + during magnetic store writes. See S3 Configuration below + for more details. + properties: + bucketName: + description: Bucket name of the customer S3 bucket. + type: string + encryptionOption: + description: Encryption option for the customer s3 + location. Options are S3 server side encryption + with an S3-managed key or KMS managed key. Valid + values are SSE_KMS and SSE_S3. + type: string + kmsKeyId: + description: KMS key arn for the customer s3 location + when encrypting with a KMS managed key. + type: string + objectKeyPrefix: + description: Object key prefix for the customer S3 + location. + type: string + type: object + type: object + type: object + retentionProperties: + description: The retention duration for the memory store and magnetic + store. See Retention Properties below for more details. If not + provided, magnetic_store_retention_period_in_days default to + 73000 and memory_store_retention_period_in_hours defaults to + 6. + properties: + magneticStoreRetentionPeriodInDays: + description: The duration for which data must be stored in + the magnetic store. Minimum value of 1. Maximum value of + 73000. + type: number + memoryStoreRetentionPeriodInHours: + description: The duration for which data must be stored in + the memory store. Minimum value of 1. Maximum value of 8766. + type: number + type: object + schema: + description: The schema of the table. See Schema below for more + details. + properties: + compositePartitionKey: + description: A non-empty list of partition keys defining the + attributes used to partition the table data. The order of + the list determines the partition hierarchy. The name and + type of each partition key as well as the partition key + order cannot be changed after the table is created. However, + the enforcement level of each partition key can be changed. + See Composite Partition Key below for more details. + properties: + enforcementInRecord: + description: 'The level of enforcement for the specification + of a dimension key in ingested records. Valid values: + REQUIRED, OPTIONAL.' + type: string + name: + description: The name of the attribute used for a dimension + key. + type: string + type: + description: 'The type of the partition key. Valid values: + DIMENSION, MEASURE.' + type: string + type: object + type: object + tableName: + description: The name of the Timestream table. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/transcribe.aws.upbound.io_languagemodels.yaml b/package/crds/transcribe.aws.upbound.io_languagemodels.yaml index 9f803f1be8..5b201639d1 100644 --- a/package/crds/transcribe.aws.upbound.io_languagemodels.yaml +++ b/package/crds/transcribe.aws.upbound.io_languagemodels.yaml @@ -586,3 +586,565 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LanguageModel is the Schema for the LanguageModels API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LanguageModelSpec defines the desired state of LanguageModel + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + baseModelName: + description: Name of reference base model. + type: string + inputDataConfig: + description: The input data config for the LanguageModel. See + Input Data Config for more details. + properties: + dataAccessRoleArn: + description: IAM role with access to S3 bucket. + type: string + dataAccessRoleArnRef: + description: Reference to a Role in iam to populate dataAccessRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataAccessRoleArnSelector: + description: Selector for a Role in iam to populate dataAccessRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3Uri: + description: S3 URI where training data is located. + type: string + tuningDataS3Uri: + description: S3 URI where tuning data is located. + type: string + type: object + languageCode: + description: The language code you selected for your language + model. Refer to the supported languages page for accepted codes. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + baseModelName: + description: Name of reference base model. + type: string + inputDataConfig: + description: The input data config for the LanguageModel. See + Input Data Config for more details. + properties: + dataAccessRoleArn: + description: IAM role with access to S3 bucket. + type: string + dataAccessRoleArnRef: + description: Reference to a Role in iam to populate dataAccessRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataAccessRoleArnSelector: + description: Selector for a Role in iam to populate dataAccessRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + s3Uri: + description: S3 URI where training data is located. + type: string + tuningDataS3Uri: + description: S3 URI where tuning data is located. + type: string + type: object + languageCode: + description: The language code you selected for your language + model. Refer to the supported languages page for accepted codes. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.baseModelName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.baseModelName) + || (has(self.initProvider) && has(self.initProvider.baseModelName))' + - message: spec.forProvider.inputDataConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.inputDataConfig) + || (has(self.initProvider) && has(self.initProvider.inputDataConfig))' + - message: spec.forProvider.languageCode is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.languageCode) + || (has(self.initProvider) && has(self.initProvider.languageCode))' + status: + description: LanguageModelStatus defines the observed state of LanguageModel. + properties: + atProvider: + properties: + arn: + description: ARN of the LanguageModel. + type: string + baseModelName: + description: Name of reference base model. + type: string + id: + description: LanguageModel name. + type: string + inputDataConfig: + description: The input data config for the LanguageModel. See + Input Data Config for more details. + properties: + dataAccessRoleArn: + description: IAM role with access to S3 bucket. + type: string + s3Uri: + description: S3 URI where training data is located. + type: string + tuningDataS3Uri: + description: S3 URI where tuning data is located. + type: string + type: object + languageCode: + description: The language code you selected for your language + model. Refer to the supported languages page for accepted codes. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/transfer.aws.upbound.io_servers.yaml b/package/crds/transfer.aws.upbound.io_servers.yaml index 931268d84e..6a36883b8e 100644 --- a/package/crds/transfer.aws.upbound.io_servers.yaml +++ b/package/crds/transfer.aws.upbound.io_servers.yaml @@ -1728,3 +1728,1671 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Server is the Schema for the Servers API. Provides a AWS Transfer + Server resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServerSpec defines the desired state of Server + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + certificate: + description: The Amazon Resource Name (ARN) of the AWS Certificate + Manager (ACM) certificate. This is required when protocols is + set to FTPS + type: string + certificateRef: + description: Reference to a Certificate in acm to populate certificate. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + certificateSelector: + description: Selector for a Certificate in acm to populate certificate. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + directoryId: + description: The directory service ID of the directory service + you want to connect to with an identity_provider_type of AWS_DIRECTORY_SERVICE. + type: string + directoryIdRef: + description: Reference to a Directory in ds to populate directoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + directoryIdSelector: + description: Selector for a Directory in ds to populate directoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + domain: + description: 'The domain of the storage system that is used for + file transfers. Valid values are: S3 and EFS. The default value + is S3.' + type: string + endpointDetails: + description: The virtual private cloud (VPC) endpoint settings + that you want to configure for your SFTP server. See endpoint_details + block below for details. + properties: + addressAllocationIds: + description: A list of address allocation IDs that are required + to attach an Elastic IP address to your SFTP server's endpoint. + This property can only be used when endpoint_type is set + to VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIds: + description: A list of security groups IDs that are available + to attach to your server's endpoint. If no security groups + are specified, the VPC's default security groups are automatically + assigned to your endpoint. This property can only be used + when endpoint_type is set to VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of subnet IDs that are required to host + your SFTP server endpoint in your VPC. This property can + only be used when endpoint_type is set to VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcEndpointId: + description: The ID of the VPC endpoint. This property can + only be used when endpoint_type is set to VPC_ENDPOINT + type: string + vpcId: + description: The VPC ID of the virtual private cloud in which + the SFTP server's endpoint will be hosted. This property + can only be used when endpoint_type is set to VPC. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + endpointType: + description: The type of endpoint that you want your SFTP server + connect to. If you connect to a VPC (or VPC_ENDPOINT), your + SFTP server isn't accessible over the public internet. If you + want to connect your SFTP server via public internet, set PUBLIC. Defaults + to PUBLIC. + type: string + forceDestroy: + description: A boolean that indicates all users associated with + the server should be deleted so that the Server can be destroyed + without error. The default value is false. This option only + applies to servers configured with a SERVICE_MANAGED identity_provider_type. + type: boolean + function: + description: The ARN for a lambda function to use for the Identity + provider. + type: string + hostKeySecretRef: + description: RSA, ECDSA, or ED25519 private key (e.g., as generated + by the ssh-keygen -t rsa -b 2048 -N "" -m PEM -f my-new-server-key, + ssh-keygen -t ecdsa -b 256 -N "" -m PEM -f my-new-server-key + or ssh-keygen -t ed25519 -N "" -f my-new-server-key commands). + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + identityProviderType: + description: The mode of authentication enabled for this service. + The default value is SERVICE_MANAGED, which allows you to store + and access SFTP user credentials within the service. API_GATEWAY + indicates that user authentication requires a call to an API + Gateway endpoint URL provided by you to integrate an identity + provider of your choice. Using AWS_DIRECTORY_SERVICE will allow + for authentication against AWS Managed Active Directory or Microsoft + Active Directory in your on-premises environment, or in AWS + using AD Connectors. Use the AWS_LAMBDA value to directly use + a Lambda function as your identity provider. If you choose this + value, you must specify the ARN for the lambda function in the + function argument. + type: string + invocationRole: + description: Amazon Resource Name (ARN) of the IAM role used to + authenticate the user account with an identity_provider_type + of API_GATEWAY. + type: string + loggingRole: + description: Amazon Resource Name (ARN) of an IAM role that allows + the service to write your SFTP users’ activity to your Amazon + CloudWatch logs for monitoring and auditing purposes. + type: string + loggingRoleRef: + description: Reference to a Role in iam to populate loggingRole. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + loggingRoleSelector: + description: Selector for a Role in iam to populate loggingRole. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + postAuthenticationLoginBannerSecretRef: + description: Specify a string to display when users connect to + a server. This string is displayed after the user authenticates. + The SFTP protocol does not support post-authentication display + banners. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + preAuthenticationLoginBannerSecretRef: + description: Specify a string to display when users connect to + a server. This string is displayed before the user authenticates. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + protocolDetails: + description: The protocol settings that are configured for your + server. See protocol_details block below for details. + properties: + as2Transports: + description: Indicates the transport method for the AS2 messages. + Currently, only HTTP is supported. + items: + type: string + type: array + x-kubernetes-list-type: set + passiveIp: + description: Indicates passive mode, for FTP and FTPS protocols. + Enter a single IPv4 address, such as the public IP address + of a firewall, router, or load balancer. + type: string + setStatOption: + description: 'Use to ignore the error that is generated when + the client attempts to use SETSTAT on a file you are uploading + to an S3 bucket. Valid values: DEFAULT, ENABLE_NO_OP.' + type: string + tlsSessionResumptionMode: + description: 'A property used with Transfer Family servers + that use the FTPS protocol. Provides a mechanism to resume + or share a negotiated secret key between the control and + data connection for an FTPS session. Valid values: DISABLED, + ENABLED, ENFORCED.' + type: string + type: object + protocols: + description: 'Specifies the file transfer protocol or protocols + over which your file transfer protocol client can connect to + your server''s endpoint. This defaults to SFTP . The available + protocols are:' + items: + type: string + type: array + x-kubernetes-list-type: set + region: + description: Region is the region you'd like your resource to + be created in. + type: string + s3StorageOptions: + description: Specifies whether or not performance for your Amazon + S3 directories is optimized. This is disabled by default. See + s3_storage_options block below for details. + properties: + directoryListingOptimization: + description: Specifies whether or not performance for your + Amazon S3 directories is optimized. Valid values are DISABLED, + ENABLED. + type: string + type: object + securityPolicyName: + description: 'Specifies the name of the security policy that is + attached to the server. Default value is: TransferSecurityPolicy-2018-11. + The available values are:' + type: string + sftpAuthenticationMethods: + description: 'For SFTP-enabled servers, and for custom identity + providers only. Valid values are PASSWORD, PUBLIC_KEY, PUBLIC_KEY_OR_PASSWORD + and PUBLIC_KEY_AND_PASSWORD. Default value is: PUBLIC_KEY_OR_PASSWORD.' + type: string + structuredLogDestinations: + description: |- + A set of ARNs of destinations that will receive structured logs from the transfer server such as CloudWatch Log Group ARNs. If provided this enables the transfer server to emit structured logs to the specified locations. + This is a set of arns of destinations that will receive structured logs from the transfer server + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + url: + description: '- URL of the service endpoint used to authenticate + users with an identity_provider_type of API_GATEWAY.' + type: string + workflowDetails: + description: Specifies the workflow details. See workflow_details + block below for details. + properties: + onPartialUpload: + description: A trigger that starts a workflow if a file is + only partially uploaded. See Workflow Detail below. See + on_partial_upload block below for details. + properties: + executionRole: + description: Includes the necessary permissions for S3, + EFS, and Lambda operations that Transfer can assume, + so that all workflow steps can operate on the required + resources. + type: string + workflowId: + description: A unique identifier for the workflow. + type: string + type: object + onUpload: + description: 'A trigger that starts a workflow: the workflow + begins to execute after a file is uploaded. See on_upload + block below for details.' + properties: + executionRole: + description: Includes the necessary permissions for S3, + EFS, and Lambda operations that Transfer can assume, + so that all workflow steps can operate on the required + resources. + type: string + workflowId: + description: A unique identifier for the workflow. + type: string + type: object + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + certificate: + description: The Amazon Resource Name (ARN) of the AWS Certificate + Manager (ACM) certificate. This is required when protocols is + set to FTPS + type: string + certificateRef: + description: Reference to a Certificate in acm to populate certificate. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + certificateSelector: + description: Selector for a Certificate in acm to populate certificate. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + directoryId: + description: The directory service ID of the directory service + you want to connect to with an identity_provider_type of AWS_DIRECTORY_SERVICE. + type: string + directoryIdRef: + description: Reference to a Directory in ds to populate directoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + directoryIdSelector: + description: Selector for a Directory in ds to populate directoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + domain: + description: 'The domain of the storage system that is used for + file transfers. Valid values are: S3 and EFS. The default value + is S3.' + type: string + endpointDetails: + description: The virtual private cloud (VPC) endpoint settings + that you want to configure for your SFTP server. See endpoint_details + block below for details. + properties: + addressAllocationIds: + description: A list of address allocation IDs that are required + to attach an Elastic IP address to your SFTP server's endpoint. + This property can only be used when endpoint_type is set + to VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIds: + description: A list of security groups IDs that are available + to attach to your server's endpoint. If no security groups + are specified, the VPC's default security groups are automatically + assigned to your endpoint. This property can only be used + when endpoint_type is set to VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of subnet IDs that are required to host + your SFTP server endpoint in your VPC. This property can + only be used when endpoint_type is set to VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcEndpointId: + description: The ID of the VPC endpoint. This property can + only be used when endpoint_type is set to VPC_ENDPOINT + type: string + vpcId: + description: The VPC ID of the virtual private cloud in which + the SFTP server's endpoint will be hosted. This property + can only be used when endpoint_type is set to VPC. + type: string + vpcIdRef: + description: Reference to a VPC in ec2 to populate vpcId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpcIdSelector: + description: Selector for a VPC in ec2 to populate vpcId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + endpointType: + description: The type of endpoint that you want your SFTP server + connect to. If you connect to a VPC (or VPC_ENDPOINT), your + SFTP server isn't accessible over the public internet. If you + want to connect your SFTP server via public internet, set PUBLIC. Defaults + to PUBLIC. + type: string + forceDestroy: + description: A boolean that indicates all users associated with + the server should be deleted so that the Server can be destroyed + without error. The default value is false. This option only + applies to servers configured with a SERVICE_MANAGED identity_provider_type. + type: boolean + function: + description: The ARN for a lambda function to use for the Identity + provider. + type: string + hostKeySecretRef: + description: RSA, ECDSA, or ED25519 private key (e.g., as generated + by the ssh-keygen -t rsa -b 2048 -N "" -m PEM -f my-new-server-key, + ssh-keygen -t ecdsa -b 256 -N "" -m PEM -f my-new-server-key + or ssh-keygen -t ed25519 -N "" -f my-new-server-key commands). + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + identityProviderType: + description: The mode of authentication enabled for this service. + The default value is SERVICE_MANAGED, which allows you to store + and access SFTP user credentials within the service. API_GATEWAY + indicates that user authentication requires a call to an API + Gateway endpoint URL provided by you to integrate an identity + provider of your choice. Using AWS_DIRECTORY_SERVICE will allow + for authentication against AWS Managed Active Directory or Microsoft + Active Directory in your on-premises environment, or in AWS + using AD Connectors. Use the AWS_LAMBDA value to directly use + a Lambda function as your identity provider. If you choose this + value, you must specify the ARN for the lambda function in the + function argument. + type: string + invocationRole: + description: Amazon Resource Name (ARN) of the IAM role used to + authenticate the user account with an identity_provider_type + of API_GATEWAY. + type: string + loggingRole: + description: Amazon Resource Name (ARN) of an IAM role that allows + the service to write your SFTP users’ activity to your Amazon + CloudWatch logs for monitoring and auditing purposes. + type: string + loggingRoleRef: + description: Reference to a Role in iam to populate loggingRole. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + loggingRoleSelector: + description: Selector for a Role in iam to populate loggingRole. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + postAuthenticationLoginBannerSecretRef: + description: Specify a string to display when users connect to + a server. This string is displayed after the user authenticates. + The SFTP protocol does not support post-authentication display + banners. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + preAuthenticationLoginBannerSecretRef: + description: Specify a string to display when users connect to + a server. This string is displayed before the user authenticates. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + protocolDetails: + description: The protocol settings that are configured for your + server. See protocol_details block below for details. + properties: + as2Transports: + description: Indicates the transport method for the AS2 messages. + Currently, only HTTP is supported. + items: + type: string + type: array + x-kubernetes-list-type: set + passiveIp: + description: Indicates passive mode, for FTP and FTPS protocols. + Enter a single IPv4 address, such as the public IP address + of a firewall, router, or load balancer. + type: string + setStatOption: + description: 'Use to ignore the error that is generated when + the client attempts to use SETSTAT on a file you are uploading + to an S3 bucket. Valid values: DEFAULT, ENABLE_NO_OP.' + type: string + tlsSessionResumptionMode: + description: 'A property used with Transfer Family servers + that use the FTPS protocol. Provides a mechanism to resume + or share a negotiated secret key between the control and + data connection for an FTPS session. Valid values: DISABLED, + ENABLED, ENFORCED.' + type: string + type: object + protocols: + description: 'Specifies the file transfer protocol or protocols + over which your file transfer protocol client can connect to + your server''s endpoint. This defaults to SFTP . The available + protocols are:' + items: + type: string + type: array + x-kubernetes-list-type: set + s3StorageOptions: + description: Specifies whether or not performance for your Amazon + S3 directories is optimized. This is disabled by default. See + s3_storage_options block below for details. + properties: + directoryListingOptimization: + description: Specifies whether or not performance for your + Amazon S3 directories is optimized. Valid values are DISABLED, + ENABLED. + type: string + type: object + securityPolicyName: + description: 'Specifies the name of the security policy that is + attached to the server. Default value is: TransferSecurityPolicy-2018-11. + The available values are:' + type: string + sftpAuthenticationMethods: + description: 'For SFTP-enabled servers, and for custom identity + providers only. Valid values are PASSWORD, PUBLIC_KEY, PUBLIC_KEY_OR_PASSWORD + and PUBLIC_KEY_AND_PASSWORD. Default value is: PUBLIC_KEY_OR_PASSWORD.' + type: string + structuredLogDestinations: + description: |- + A set of ARNs of destinations that will receive structured logs from the transfer server such as CloudWatch Log Group ARNs. If provided this enables the transfer server to emit structured logs to the specified locations. + This is a set of arns of destinations that will receive structured logs from the transfer server + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + url: + description: '- URL of the service endpoint used to authenticate + users with an identity_provider_type of API_GATEWAY.' + type: string + workflowDetails: + description: Specifies the workflow details. See workflow_details + block below for details. + properties: + onPartialUpload: + description: A trigger that starts a workflow if a file is + only partially uploaded. See Workflow Detail below. See + on_partial_upload block below for details. + properties: + executionRole: + description: Includes the necessary permissions for S3, + EFS, and Lambda operations that Transfer can assume, + so that all workflow steps can operate on the required + resources. + type: string + workflowId: + description: A unique identifier for the workflow. + type: string + type: object + onUpload: + description: 'A trigger that starts a workflow: the workflow + begins to execute after a file is uploaded. See on_upload + block below for details.' + properties: + executionRole: + description: Includes the necessary permissions for S3, + EFS, and Lambda operations that Transfer can assume, + so that all workflow steps can operate on the required + resources. + type: string + workflowId: + description: A unique identifier for the workflow. + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ServerStatus defines the observed state of Server. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of Transfer Server + type: string + certificate: + description: The Amazon Resource Name (ARN) of the AWS Certificate + Manager (ACM) certificate. This is required when protocols is + set to FTPS + type: string + directoryId: + description: The directory service ID of the directory service + you want to connect to with an identity_provider_type of AWS_DIRECTORY_SERVICE. + type: string + domain: + description: 'The domain of the storage system that is used for + file transfers. Valid values are: S3 and EFS. The default value + is S3.' + type: string + endpoint: + description: The endpoint of the Transfer Server (e.g., s-12345678.server.transfer.REGION.amazonaws.com) + type: string + endpointDetails: + description: The virtual private cloud (VPC) endpoint settings + that you want to configure for your SFTP server. See endpoint_details + block below for details. + properties: + addressAllocationIds: + description: A list of address allocation IDs that are required + to attach an Elastic IP address to your SFTP server's endpoint. + This property can only be used when endpoint_type is set + to VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + securityGroupIds: + description: A list of security groups IDs that are available + to attach to your server's endpoint. If no security groups + are specified, the VPC's default security groups are automatically + assigned to your endpoint. This property can only be used + when endpoint_type is set to VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: A list of subnet IDs that are required to host + your SFTP server endpoint in your VPC. This property can + only be used when endpoint_type is set to VPC. + items: + type: string + type: array + x-kubernetes-list-type: set + vpcEndpointId: + description: The ID of the VPC endpoint. This property can + only be used when endpoint_type is set to VPC_ENDPOINT + type: string + vpcId: + description: The VPC ID of the virtual private cloud in which + the SFTP server's endpoint will be hosted. This property + can only be used when endpoint_type is set to VPC. + type: string + type: object + endpointType: + description: The type of endpoint that you want your SFTP server + connect to. If you connect to a VPC (or VPC_ENDPOINT), your + SFTP server isn't accessible over the public internet. If you + want to connect your SFTP server via public internet, set PUBLIC. Defaults + to PUBLIC. + type: string + forceDestroy: + description: A boolean that indicates all users associated with + the server should be deleted so that the Server can be destroyed + without error. The default value is false. This option only + applies to servers configured with a SERVICE_MANAGED identity_provider_type. + type: boolean + function: + description: The ARN for a lambda function to use for the Identity + provider. + type: string + hostKeyFingerprint: + description: This value contains the message-digest algorithm + (MD5) hash of the server's host key. This value is equivalent + to the output of the ssh-keygen -l -E md5 -f my-new-server-key + command. + type: string + id: + description: The Server ID of the Transfer Server (e.g., s-12345678) + type: string + identityProviderType: + description: The mode of authentication enabled for this service. + The default value is SERVICE_MANAGED, which allows you to store + and access SFTP user credentials within the service. API_GATEWAY + indicates that user authentication requires a call to an API + Gateway endpoint URL provided by you to integrate an identity + provider of your choice. Using AWS_DIRECTORY_SERVICE will allow + for authentication against AWS Managed Active Directory or Microsoft + Active Directory in your on-premises environment, or in AWS + using AD Connectors. Use the AWS_LAMBDA value to directly use + a Lambda function as your identity provider. If you choose this + value, you must specify the ARN for the lambda function in the + function argument. + type: string + invocationRole: + description: Amazon Resource Name (ARN) of the IAM role used to + authenticate the user account with an identity_provider_type + of API_GATEWAY. + type: string + loggingRole: + description: Amazon Resource Name (ARN) of an IAM role that allows + the service to write your SFTP users’ activity to your Amazon + CloudWatch logs for monitoring and auditing purposes. + type: string + protocolDetails: + description: The protocol settings that are configured for your + server. See protocol_details block below for details. + properties: + as2Transports: + description: Indicates the transport method for the AS2 messages. + Currently, only HTTP is supported. + items: + type: string + type: array + x-kubernetes-list-type: set + passiveIp: + description: Indicates passive mode, for FTP and FTPS protocols. + Enter a single IPv4 address, such as the public IP address + of a firewall, router, or load balancer. + type: string + setStatOption: + description: 'Use to ignore the error that is generated when + the client attempts to use SETSTAT on a file you are uploading + to an S3 bucket. Valid values: DEFAULT, ENABLE_NO_OP.' + type: string + tlsSessionResumptionMode: + description: 'A property used with Transfer Family servers + that use the FTPS protocol. Provides a mechanism to resume + or share a negotiated secret key between the control and + data connection for an FTPS session. Valid values: DISABLED, + ENABLED, ENFORCED.' + type: string + type: object + protocols: + description: 'Specifies the file transfer protocol or protocols + over which your file transfer protocol client can connect to + your server''s endpoint. This defaults to SFTP . The available + protocols are:' + items: + type: string + type: array + x-kubernetes-list-type: set + s3StorageOptions: + description: Specifies whether or not performance for your Amazon + S3 directories is optimized. This is disabled by default. See + s3_storage_options block below for details. + properties: + directoryListingOptimization: + description: Specifies whether or not performance for your + Amazon S3 directories is optimized. Valid values are DISABLED, + ENABLED. + type: string + type: object + securityPolicyName: + description: 'Specifies the name of the security policy that is + attached to the server. Default value is: TransferSecurityPolicy-2018-11. + The available values are:' + type: string + sftpAuthenticationMethods: + description: 'For SFTP-enabled servers, and for custom identity + providers only. Valid values are PASSWORD, PUBLIC_KEY, PUBLIC_KEY_OR_PASSWORD + and PUBLIC_KEY_AND_PASSWORD. Default value is: PUBLIC_KEY_OR_PASSWORD.' + type: string + structuredLogDestinations: + description: |- + A set of ARNs of destinations that will receive structured logs from the transfer server such as CloudWatch Log Group ARNs. If provided this enables the transfer server to emit structured logs to the specified locations. + This is a set of arns of destinations that will receive structured logs from the transfer server + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + url: + description: '- URL of the service endpoint used to authenticate + users with an identity_provider_type of API_GATEWAY.' + type: string + workflowDetails: + description: Specifies the workflow details. See workflow_details + block below for details. + properties: + onPartialUpload: + description: A trigger that starts a workflow if a file is + only partially uploaded. See Workflow Detail below. See + on_partial_upload block below for details. + properties: + executionRole: + description: Includes the necessary permissions for S3, + EFS, and Lambda operations that Transfer can assume, + so that all workflow steps can operate on the required + resources. + type: string + workflowId: + description: A unique identifier for the workflow. + type: string + type: object + onUpload: + description: 'A trigger that starts a workflow: the workflow + begins to execute after a file is uploaded. See on_upload + block below for details.' + properties: + executionRole: + description: Includes the necessary permissions for S3, + EFS, and Lambda operations that Transfer can assume, + so that all workflow steps can operate on the required + resources. + type: string + workflowId: + description: A unique identifier for the workflow. + type: string + type: object + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/transfer.aws.upbound.io_users.yaml b/package/crds/transfer.aws.upbound.io_users.yaml index 14a210cca8..6f1bf53025 100644 --- a/package/crds/transfer.aws.upbound.io_users.yaml +++ b/package/crds/transfer.aws.upbound.io_users.yaml @@ -856,3 +856,835 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: User is the Schema for the Users API. Provides a AWS Transfer + User resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: UserSpec defines the desired state of User + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + homeDirectory: + description: The landing directory (folder) for a user when they + log in to the server using their SFTP client. It should begin + with a /. The first item in the path is the name of the home + bucket (accessible as ${Transfer:HomeBucket} in the policy) + and the rest is the home directory (accessible as ${Transfer:HomeDirectory} + in the policy). For example, /example-bucket-1234/username would + set the home bucket to example-bucket-1234 and the home directory + to username. + type: string + homeDirectoryMappings: + description: Logical directory mappings that specify what S3 paths + and keys should be visible to your user and how you want to + make them visible. See Home Directory Mappings below. + items: + properties: + entry: + description: Represents an entry and a target. + type: string + target: + description: Represents the map target. + type: string + type: object + type: array + homeDirectoryType: + description: The type of landing directory (folder) you mapped + for your users' home directory. Valid values are PATH and LOGICAL. + type: string + policy: + description: An IAM JSON policy document that scopes down user + access to portions of their Amazon S3 bucket. IAM variables + you can use inside this policy include ${Transfer:UserName}, + ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}. These + are evaluated on-the-fly when navigating the bucket. + type: string + posixProfile: + description: Specifies the full POSIX identity, including user + ID (Uid), group ID (Gid), and any secondary groups IDs (SecondaryGids), + that controls your users' access to your Amazon EFS file systems. + See Posix Profile below. + properties: + gid: + description: The POSIX group ID used for all EFS operations + by this user. + type: number + secondaryGids: + description: The secondary POSIX group IDs used for all EFS + operations by this user. + items: + type: number + type: array + x-kubernetes-list-type: set + uid: + description: The POSIX user ID used for all EFS operations + by this user. + type: number + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + role: + description: Amazon Resource Name (ARN) of an IAM role that allows + the service to control your user’s access to your Amazon S3 + bucket. + type: string + roleRef: + description: Reference to a Role in iam to populate role. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleSelector: + description: Selector for a Role in iam to populate role. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serverId: + description: The Server ID of the Transfer Server (e.g., s-12345678) + type: string + serverIdRef: + description: Reference to a Server in transfer to populate serverId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serverIdSelector: + description: Selector for a Server in transfer to populate serverId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + homeDirectory: + description: The landing directory (folder) for a user when they + log in to the server using their SFTP client. It should begin + with a /. The first item in the path is the name of the home + bucket (accessible as ${Transfer:HomeBucket} in the policy) + and the rest is the home directory (accessible as ${Transfer:HomeDirectory} + in the policy). For example, /example-bucket-1234/username would + set the home bucket to example-bucket-1234 and the home directory + to username. + type: string + homeDirectoryMappings: + description: Logical directory mappings that specify what S3 paths + and keys should be visible to your user and how you want to + make them visible. See Home Directory Mappings below. + items: + properties: + entry: + description: Represents an entry and a target. + type: string + target: + description: Represents the map target. + type: string + type: object + type: array + homeDirectoryType: + description: The type of landing directory (folder) you mapped + for your users' home directory. Valid values are PATH and LOGICAL. + type: string + policy: + description: An IAM JSON policy document that scopes down user + access to portions of their Amazon S3 bucket. IAM variables + you can use inside this policy include ${Transfer:UserName}, + ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}. These + are evaluated on-the-fly when navigating the bucket. + type: string + posixProfile: + description: Specifies the full POSIX identity, including user + ID (Uid), group ID (Gid), and any secondary groups IDs (SecondaryGids), + that controls your users' access to your Amazon EFS file systems. + See Posix Profile below. + properties: + gid: + description: The POSIX group ID used for all EFS operations + by this user. + type: number + secondaryGids: + description: The secondary POSIX group IDs used for all EFS + operations by this user. + items: + type: number + type: array + x-kubernetes-list-type: set + uid: + description: The POSIX user ID used for all EFS operations + by this user. + type: number + type: object + role: + description: Amazon Resource Name (ARN) of an IAM role that allows + the service to control your user’s access to your Amazon S3 + bucket. + type: string + roleRef: + description: Reference to a Role in iam to populate role. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + roleSelector: + description: Selector for a Role in iam to populate role. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serverId: + description: The Server ID of the Transfer Server (e.g., s-12345678) + type: string + serverIdRef: + description: Reference to a Server in transfer to populate serverId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serverIdSelector: + description: Selector for a Server in transfer to populate serverId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: UserStatus defines the observed state of User. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of Transfer User + type: string + homeDirectory: + description: The landing directory (folder) for a user when they + log in to the server using their SFTP client. It should begin + with a /. The first item in the path is the name of the home + bucket (accessible as ${Transfer:HomeBucket} in the policy) + and the rest is the home directory (accessible as ${Transfer:HomeDirectory} + in the policy). For example, /example-bucket-1234/username would + set the home bucket to example-bucket-1234 and the home directory + to username. + type: string + homeDirectoryMappings: + description: Logical directory mappings that specify what S3 paths + and keys should be visible to your user and how you want to + make them visible. See Home Directory Mappings below. + items: + properties: + entry: + description: Represents an entry and a target. + type: string + target: + description: Represents the map target. + type: string + type: object + type: array + homeDirectoryType: + description: The type of landing directory (folder) you mapped + for your users' home directory. Valid values are PATH and LOGICAL. + type: string + id: + type: string + policy: + description: An IAM JSON policy document that scopes down user + access to portions of their Amazon S3 bucket. IAM variables + you can use inside this policy include ${Transfer:UserName}, + ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}. These + are evaluated on-the-fly when navigating the bucket. + type: string + posixProfile: + description: Specifies the full POSIX identity, including user + ID (Uid), group ID (Gid), and any secondary groups IDs (SecondaryGids), + that controls your users' access to your Amazon EFS file systems. + See Posix Profile below. + properties: + gid: + description: The POSIX group ID used for all EFS operations + by this user. + type: number + secondaryGids: + description: The secondary POSIX group IDs used for all EFS + operations by this user. + items: + type: number + type: array + x-kubernetes-list-type: set + uid: + description: The POSIX user ID used for all EFS operations + by this user. + type: number + type: object + role: + description: Amazon Resource Name (ARN) of an IAM role that allows + the service to control your user’s access to your Amazon S3 + bucket. + type: string + serverId: + description: The Server ID of the Transfer Server (e.g., s-12345678) + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/transfer.aws.upbound.io_workflows.yaml b/package/crds/transfer.aws.upbound.io_workflows.yaml index f9e6604fe6..fdaca612ba 100644 --- a/package/crds/transfer.aws.upbound.io_workflows.yaml +++ b/package/crds/transfer.aws.upbound.io_workflows.yaml @@ -1903,3 +1903,1744 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Workflow is the Schema for the Workflows API. Provides a AWS + Transfer Workflow resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WorkflowSpec defines the desired state of Workflow + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A textual description for the workflow. + type: string + onExceptionSteps: + description: Specifies the steps (actions) to take if errors are + encountered during execution of the workflow. See Workflow Steps + below. + items: + properties: + copyStepDetails: + description: Details for a step that performs a file copy. + See Copy Step Details below. + properties: + destinationFileLocation: + description: Specifies the location for the file being + copied. Use ${Transfer:username} in this field to + parametrize the destination prefix by username. + properties: + efsFileLocation: + description: Specifies the details for the EFS file + being copied. + properties: + fileSystemId: + description: The ID of the file system, assigned + by Amazon EFS. + type: string + path: + description: The pathname for the folder being + used by a workflow. + type: string + type: object + s3FileLocation: + description: Specifies the details for the S3 file + being copied. + properties: + bucket: + description: Specifies the S3 bucket for the + customer input file. + type: string + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + type: object + type: object + name: + description: The name of the step, used as an identifier. + type: string + overwriteExisting: + description: A flag that indicates whether or not to + overwrite an existing file of the same name. The default + is FALSE. Valid values are TRUE and FALSE. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: object + customStepDetails: + description: Details for a step that invokes a lambda function. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + target: + description: The ARN for the lambda function that is + being called. + type: string + timeoutSeconds: + description: Timeout, in seconds, for the step. + type: number + type: object + decryptStepDetails: + description: Details for a step that decrypts the file. + properties: + destinationFileLocation: + description: Specifies the location for the file being + copied. Use ${Transfer:username} in this field to + parametrize the destination prefix by username. + properties: + efsFileLocation: + description: Specifies the details for the EFS file + being copied. + properties: + fileSystemId: + description: The ID of the file system, assigned + by Amazon EFS. + type: string + path: + description: The pathname for the folder being + used by a workflow. + type: string + type: object + s3FileLocation: + description: Specifies the details for the S3 file + being copied. + properties: + bucket: + description: Specifies the S3 bucket for the + customer input file. + type: string + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + type: object + type: object + name: + description: The name of the step, used as an identifier. + type: string + overwriteExisting: + description: A flag that indicates whether or not to + overwrite an existing file of the same name. The default + is FALSE. Valid values are TRUE and FALSE. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: + description: One of the following step types are supported. + COPY, CUSTOM, DECRYPT, DELETE, and TAG. + type: string + type: object + deleteStepDetails: + description: Details for a step that deletes the file. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: object + tagStepDetails: + description: Details for a step that creates one or more + tags. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + tags: + description: Key-value map of resource tags. + items: + properties: + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + value: + description: The value that corresponds to the + key. + type: string + type: object + type: array + type: object + type: + description: One of the following step types are supported. + COPY, CUSTOM, DECRYPT, DELETE, and TAG. + type: string + type: object + type: array + region: + description: Region is the region you'd like your resource to + be created in. + type: string + steps: + description: Specifies the details for the steps that are in the + specified workflow. See Workflow Steps below. + items: + properties: + copyStepDetails: + description: Details for a step that performs a file copy. + See Copy Step Details below. + properties: + destinationFileLocation: + description: Specifies the location for the file being + copied. Use ${Transfer:username} in this field to + parametrize the destination prefix by username. + properties: + efsFileLocation: + description: Specifies the details for the EFS file + being copied. + properties: + fileSystemId: + description: The ID of the file system, assigned + by Amazon EFS. + type: string + path: + description: The pathname for the folder being + used by a workflow. + type: string + type: object + s3FileLocation: + description: Specifies the details for the S3 file + being copied. + properties: + bucket: + description: Specifies the S3 bucket for the + customer input file. + type: string + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + type: object + type: object + name: + description: The name of the step, used as an identifier. + type: string + overwriteExisting: + description: A flag that indicates whether or not to + overwrite an existing file of the same name. The default + is FALSE. Valid values are TRUE and FALSE. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: object + customStepDetails: + description: Details for a step that invokes a lambda function. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + target: + description: The ARN for the lambda function that is + being called. + type: string + targetRef: + description: Reference to a Function in lambda to populate + target. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetSelector: + description: Selector for a Function in lambda to populate + target. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + timeoutSeconds: + description: Timeout, in seconds, for the step. + type: number + type: object + decryptStepDetails: + description: Details for a step that decrypts the file. + properties: + destinationFileLocation: + description: Specifies the location for the file being + copied. Use ${Transfer:username} in this field to + parametrize the destination prefix by username. + properties: + efsFileLocation: + description: Specifies the details for the EFS file + being copied. + properties: + fileSystemId: + description: The ID of the file system, assigned + by Amazon EFS. + type: string + path: + description: The pathname for the folder being + used by a workflow. + type: string + type: object + s3FileLocation: + description: Specifies the details for the S3 file + being copied. + properties: + bucket: + description: Specifies the S3 bucket for the + customer input file. + type: string + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + type: object + type: object + name: + description: The name of the step, used as an identifier. + type: string + overwriteExisting: + description: A flag that indicates whether or not to + overwrite an existing file of the same name. The default + is FALSE. Valid values are TRUE and FALSE. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: + description: One of the following step types are supported. + COPY, CUSTOM, DECRYPT, DELETE, and TAG. + type: string + type: object + deleteStepDetails: + description: Details for a step that deletes the file. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: object + tagStepDetails: + description: Details for a step that creates one or more + tags. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + tags: + description: Key-value map of resource tags. + items: + properties: + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + value: + description: The value that corresponds to the + key. + type: string + type: object + type: array + type: object + type: + description: One of the following step types are supported. + COPY, CUSTOM, DECRYPT, DELETE, and TAG. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A textual description for the workflow. + type: string + onExceptionSteps: + description: Specifies the steps (actions) to take if errors are + encountered during execution of the workflow. See Workflow Steps + below. + items: + properties: + copyStepDetails: + description: Details for a step that performs a file copy. + See Copy Step Details below. + properties: + destinationFileLocation: + description: Specifies the location for the file being + copied. Use ${Transfer:username} in this field to + parametrize the destination prefix by username. + properties: + efsFileLocation: + description: Specifies the details for the EFS file + being copied. + properties: + fileSystemId: + description: The ID of the file system, assigned + by Amazon EFS. + type: string + path: + description: The pathname for the folder being + used by a workflow. + type: string + type: object + s3FileLocation: + description: Specifies the details for the S3 file + being copied. + properties: + bucket: + description: Specifies the S3 bucket for the + customer input file. + type: string + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + type: object + type: object + name: + description: The name of the step, used as an identifier. + type: string + overwriteExisting: + description: A flag that indicates whether or not to + overwrite an existing file of the same name. The default + is FALSE. Valid values are TRUE and FALSE. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: object + customStepDetails: + description: Details for a step that invokes a lambda function. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + target: + description: The ARN for the lambda function that is + being called. + type: string + timeoutSeconds: + description: Timeout, in seconds, for the step. + type: number + type: object + decryptStepDetails: + description: Details for a step that decrypts the file. + properties: + destinationFileLocation: + description: Specifies the location for the file being + copied. Use ${Transfer:username} in this field to + parametrize the destination prefix by username. + properties: + efsFileLocation: + description: Specifies the details for the EFS file + being copied. + properties: + fileSystemId: + description: The ID of the file system, assigned + by Amazon EFS. + type: string + path: + description: The pathname for the folder being + used by a workflow. + type: string + type: object + s3FileLocation: + description: Specifies the details for the S3 file + being copied. + properties: + bucket: + description: Specifies the S3 bucket for the + customer input file. + type: string + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + type: object + type: object + name: + description: The name of the step, used as an identifier. + type: string + overwriteExisting: + description: A flag that indicates whether or not to + overwrite an existing file of the same name. The default + is FALSE. Valid values are TRUE and FALSE. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: + description: One of the following step types are supported. + COPY, CUSTOM, DECRYPT, DELETE, and TAG. + type: string + type: object + deleteStepDetails: + description: Details for a step that deletes the file. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: object + tagStepDetails: + description: Details for a step that creates one or more + tags. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + tags: + description: Key-value map of resource tags. + items: + properties: + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + value: + description: The value that corresponds to the + key. + type: string + type: object + type: array + type: object + type: + description: One of the following step types are supported. + COPY, CUSTOM, DECRYPT, DELETE, and TAG. + type: string + type: object + type: array + steps: + description: Specifies the details for the steps that are in the + specified workflow. See Workflow Steps below. + items: + properties: + copyStepDetails: + description: Details for a step that performs a file copy. + See Copy Step Details below. + properties: + destinationFileLocation: + description: Specifies the location for the file being + copied. Use ${Transfer:username} in this field to + parametrize the destination prefix by username. + properties: + efsFileLocation: + description: Specifies the details for the EFS file + being copied. + properties: + fileSystemId: + description: The ID of the file system, assigned + by Amazon EFS. + type: string + path: + description: The pathname for the folder being + used by a workflow. + type: string + type: object + s3FileLocation: + description: Specifies the details for the S3 file + being copied. + properties: + bucket: + description: Specifies the S3 bucket for the + customer input file. + type: string + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + type: object + type: object + name: + description: The name of the step, used as an identifier. + type: string + overwriteExisting: + description: A flag that indicates whether or not to + overwrite an existing file of the same name. The default + is FALSE. Valid values are TRUE and FALSE. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: object + customStepDetails: + description: Details for a step that invokes a lambda function. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + target: + description: The ARN for the lambda function that is + being called. + type: string + targetRef: + description: Reference to a Function in lambda to populate + target. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetSelector: + description: Selector for a Function in lambda to populate + target. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + timeoutSeconds: + description: Timeout, in seconds, for the step. + type: number + type: object + decryptStepDetails: + description: Details for a step that decrypts the file. + properties: + destinationFileLocation: + description: Specifies the location for the file being + copied. Use ${Transfer:username} in this field to + parametrize the destination prefix by username. + properties: + efsFileLocation: + description: Specifies the details for the EFS file + being copied. + properties: + fileSystemId: + description: The ID of the file system, assigned + by Amazon EFS. + type: string + path: + description: The pathname for the folder being + used by a workflow. + type: string + type: object + s3FileLocation: + description: Specifies the details for the S3 file + being copied. + properties: + bucket: + description: Specifies the S3 bucket for the + customer input file. + type: string + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + type: object + type: object + name: + description: The name of the step, used as an identifier. + type: string + overwriteExisting: + description: A flag that indicates whether or not to + overwrite an existing file of the same name. The default + is FALSE. Valid values are TRUE and FALSE. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: + description: One of the following step types are supported. + COPY, CUSTOM, DECRYPT, DELETE, and TAG. + type: string + type: object + deleteStepDetails: + description: Details for a step that deletes the file. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: object + tagStepDetails: + description: Details for a step that creates one or more + tags. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + tags: + description: Key-value map of resource tags. + items: + properties: + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + value: + description: The value that corresponds to the + key. + type: string + type: object + type: array + type: object + type: + description: One of the following step types are supported. + COPY, CUSTOM, DECRYPT, DELETE, and TAG. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.steps is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.steps) + || (has(self.initProvider) && has(self.initProvider.steps))' + status: + description: WorkflowStatus defines the observed state of Workflow. + properties: + atProvider: + properties: + arn: + description: The Workflow ARN. + type: string + description: + description: A textual description for the workflow. + type: string + id: + description: The Workflow id. + type: string + onExceptionSteps: + description: Specifies the steps (actions) to take if errors are + encountered during execution of the workflow. See Workflow Steps + below. + items: + properties: + copyStepDetails: + description: Details for a step that performs a file copy. + See Copy Step Details below. + properties: + destinationFileLocation: + description: Specifies the location for the file being + copied. Use ${Transfer:username} in this field to + parametrize the destination prefix by username. + properties: + efsFileLocation: + description: Specifies the details for the EFS file + being copied. + properties: + fileSystemId: + description: The ID of the file system, assigned + by Amazon EFS. + type: string + path: + description: The pathname for the folder being + used by a workflow. + type: string + type: object + s3FileLocation: + description: Specifies the details for the S3 file + being copied. + properties: + bucket: + description: Specifies the S3 bucket for the + customer input file. + type: string + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + type: object + type: object + name: + description: The name of the step, used as an identifier. + type: string + overwriteExisting: + description: A flag that indicates whether or not to + overwrite an existing file of the same name. The default + is FALSE. Valid values are TRUE and FALSE. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: object + customStepDetails: + description: Details for a step that invokes a lambda function. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + target: + description: The ARN for the lambda function that is + being called. + type: string + timeoutSeconds: + description: Timeout, in seconds, for the step. + type: number + type: object + decryptStepDetails: + description: Details for a step that decrypts the file. + properties: + destinationFileLocation: + description: Specifies the location for the file being + copied. Use ${Transfer:username} in this field to + parametrize the destination prefix by username. + properties: + efsFileLocation: + description: Specifies the details for the EFS file + being copied. + properties: + fileSystemId: + description: The ID of the file system, assigned + by Amazon EFS. + type: string + path: + description: The pathname for the folder being + used by a workflow. + type: string + type: object + s3FileLocation: + description: Specifies the details for the S3 file + being copied. + properties: + bucket: + description: Specifies the S3 bucket for the + customer input file. + type: string + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + type: object + type: object + name: + description: The name of the step, used as an identifier. + type: string + overwriteExisting: + description: A flag that indicates whether or not to + overwrite an existing file of the same name. The default + is FALSE. Valid values are TRUE and FALSE. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: + description: One of the following step types are supported. + COPY, CUSTOM, DECRYPT, DELETE, and TAG. + type: string + type: object + deleteStepDetails: + description: Details for a step that deletes the file. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: object + tagStepDetails: + description: Details for a step that creates one or more + tags. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + tags: + description: Key-value map of resource tags. + items: + properties: + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + value: + description: The value that corresponds to the + key. + type: string + type: object + type: array + type: object + type: + description: One of the following step types are supported. + COPY, CUSTOM, DECRYPT, DELETE, and TAG. + type: string + type: object + type: array + steps: + description: Specifies the details for the steps that are in the + specified workflow. See Workflow Steps below. + items: + properties: + copyStepDetails: + description: Details for a step that performs a file copy. + See Copy Step Details below. + properties: + destinationFileLocation: + description: Specifies the location for the file being + copied. Use ${Transfer:username} in this field to + parametrize the destination prefix by username. + properties: + efsFileLocation: + description: Specifies the details for the EFS file + being copied. + properties: + fileSystemId: + description: The ID of the file system, assigned + by Amazon EFS. + type: string + path: + description: The pathname for the folder being + used by a workflow. + type: string + type: object + s3FileLocation: + description: Specifies the details for the S3 file + being copied. + properties: + bucket: + description: Specifies the S3 bucket for the + customer input file. + type: string + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + type: object + type: object + name: + description: The name of the step, used as an identifier. + type: string + overwriteExisting: + description: A flag that indicates whether or not to + overwrite an existing file of the same name. The default + is FALSE. Valid values are TRUE and FALSE. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: object + customStepDetails: + description: Details for a step that invokes a lambda function. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + target: + description: The ARN for the lambda function that is + being called. + type: string + timeoutSeconds: + description: Timeout, in seconds, for the step. + type: number + type: object + decryptStepDetails: + description: Details for a step that decrypts the file. + properties: + destinationFileLocation: + description: Specifies the location for the file being + copied. Use ${Transfer:username} in this field to + parametrize the destination prefix by username. + properties: + efsFileLocation: + description: Specifies the details for the EFS file + being copied. + properties: + fileSystemId: + description: The ID of the file system, assigned + by Amazon EFS. + type: string + path: + description: The pathname for the folder being + used by a workflow. + type: string + type: object + s3FileLocation: + description: Specifies the details for the S3 file + being copied. + properties: + bucket: + description: Specifies the S3 bucket for the + customer input file. + type: string + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + type: object + type: object + name: + description: The name of the step, used as an identifier. + type: string + overwriteExisting: + description: A flag that indicates whether or not to + overwrite an existing file of the same name. The default + is FALSE. Valid values are TRUE and FALSE. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: + description: One of the following step types are supported. + COPY, CUSTOM, DECRYPT, DELETE, and TAG. + type: string + type: object + deleteStepDetails: + description: Details for a step that deletes the file. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + type: object + tagStepDetails: + description: Details for a step that creates one or more + tags. + properties: + name: + description: The name of the step, used as an identifier. + type: string + sourceFileLocation: + description: 'Specifies which file to use as input to + the workflow step: either the output from the previous + step, or the originally uploaded file for the workflow. + Enter ${previous.file} to use the previous file as + the input. In this case, this workflow step uses the + output file from the previous workflow step as input. + This is the default value. Enter ${original.file} + to use the originally-uploaded file location as input + for this step.' + type: string + tags: + description: Key-value map of resource tags. + items: + properties: + key: + description: The name assigned to the file when + it was created in S3. You use the object key + to retrieve the object. + type: string + value: + description: The value that corresponds to the + key. + type: string + type: object + type: array + type: object + type: + description: One of the following step types are supported. + COPY, CUSTOM, DECRYPT, DELETE, and TAG. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/waf.aws.upbound.io_bytematchsets.yaml b/package/crds/waf.aws.upbound.io_bytematchsets.yaml index d9b0eee426..fb442fdde0 100644 --- a/package/crds/waf.aws.upbound.io_bytematchsets.yaml +++ b/package/crds/waf.aws.upbound.io_bytematchsets.yaml @@ -495,3 +495,474 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ByteMatchSet is the Schema for the ByteMatchSets API. Provides + a AWS WAF Byte Match Set resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ByteMatchSetSpec defines the desired state of ByteMatchSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + byteMatchTuples: + description: |- + Specifies the bytes (typically a string that corresponds + with ASCII characters) that you want to search for in web requests, + the location in requests that you want to search, and other settings. + items: + properties: + fieldToMatch: + description: The part of a web request that you want to + search, such as a specified header or a query string. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + positionalConstraint: + description: |- + Within the portion of a web request that you want to search + (for example, in the query string, if any), specify where you want to search. + e.g., CONTAINS, CONTAINS_WORD or EXACTLY. + See docs + for all supported values. + type: string + targetString: + description: |- + The value that you want to search for within the field specified by field_to_match, e.g., badrefer1. + See docs + for all supported values. + type: string + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + If you specify a transformation, AWS WAF performs the transformation on target_string before inspecting a request for a match. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + name: + description: The name or description of the Byte Match Set. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + byteMatchTuples: + description: |- + Specifies the bytes (typically a string that corresponds + with ASCII characters) that you want to search for in web requests, + the location in requests that you want to search, and other settings. + items: + properties: + fieldToMatch: + description: The part of a web request that you want to + search, such as a specified header or a query string. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + positionalConstraint: + description: |- + Within the portion of a web request that you want to search + (for example, in the query string, if any), specify where you want to search. + e.g., CONTAINS, CONTAINS_WORD or EXACTLY. + See docs + for all supported values. + type: string + targetString: + description: |- + The value that you want to search for within the field specified by field_to_match, e.g., badrefer1. + See docs + for all supported values. + type: string + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + If you specify a transformation, AWS WAF performs the transformation on target_string before inspecting a request for a match. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + name: + description: The name or description of the Byte Match Set. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ByteMatchSetStatus defines the observed state of ByteMatchSet. + properties: + atProvider: + properties: + byteMatchTuples: + description: |- + Specifies the bytes (typically a string that corresponds + with ASCII characters) that you want to search for in web requests, + the location in requests that you want to search, and other settings. + items: + properties: + fieldToMatch: + description: The part of a web request that you want to + search, such as a specified header or a query string. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + positionalConstraint: + description: |- + Within the portion of a web request that you want to search + (for example, in the query string, if any), specify where you want to search. + e.g., CONTAINS, CONTAINS_WORD or EXACTLY. + See docs + for all supported values. + type: string + targetString: + description: |- + The value that you want to search for within the field specified by field_to_match, e.g., badrefer1. + See docs + for all supported values. + type: string + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + If you specify a transformation, AWS WAF performs the transformation on target_string before inspecting a request for a match. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + id: + description: The ID of the WAF Byte Match Set. + type: string + name: + description: The name or description of the Byte Match Set. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/waf.aws.upbound.io_regexmatchsets.yaml b/package/crds/waf.aws.upbound.io_regexmatchsets.yaml index 8c70eb535d..3485670d3c 100644 --- a/package/crds/waf.aws.upbound.io_regexmatchsets.yaml +++ b/package/crds/waf.aws.upbound.io_regexmatchsets.yaml @@ -611,3 +611,590 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: RegexMatchSet is the Schema for the RegexMatchSets API. Provides + a AWS WAF Regex Match Set resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RegexMatchSetSpec defines the desired state of RegexMatchSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + name: + description: The name or description of the Regex Match Set. + type: string + regexMatchTuple: + description: The regular expression pattern that you want AWS + WAF to search for in web requests, the location in requests + that you want AWS WAF to search, and other settings. See below. + items: + properties: + fieldToMatch: + description: The part of a web request that you want to + search, such as a specified header or a query string. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + regexPatternSetId: + description: The ID of a Regex Pattern Set. + type: string + regexPatternSetIdRef: + description: Reference to a RegexPatternSet in waf to populate + regexPatternSetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + regexPatternSetIdSelector: + description: Selector for a RegexPatternSet in waf to populate + regexPatternSetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + name: + description: The name or description of the Regex Match Set. + type: string + regexMatchTuple: + description: The regular expression pattern that you want AWS + WAF to search for in web requests, the location in requests + that you want AWS WAF to search, and other settings. See below. + items: + properties: + fieldToMatch: + description: The part of a web request that you want to + search, such as a specified header or a query string. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + regexPatternSetId: + description: The ID of a Regex Pattern Set. + type: string + regexPatternSetIdRef: + description: Reference to a RegexPatternSet in waf to populate + regexPatternSetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + regexPatternSetIdSelector: + description: Selector for a RegexPatternSet in waf to populate + regexPatternSetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: RegexMatchSetStatus defines the observed state of RegexMatchSet. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) + type: string + id: + description: The ID of the WAF Regex Match Set. + type: string + name: + description: The name or description of the Regex Match Set. + type: string + regexMatchTuple: + description: The regular expression pattern that you want AWS + WAF to search for in web requests, the location in requests + that you want AWS WAF to search, and other settings. See below. + items: + properties: + fieldToMatch: + description: The part of a web request that you want to + search, such as a specified header or a query string. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + regexPatternSetId: + description: The ID of a Regex Pattern Set. + type: string + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/waf.aws.upbound.io_sizeconstraintsets.yaml b/package/crds/waf.aws.upbound.io_sizeconstraintsets.yaml index 3f95f3032d..c38786880a 100644 --- a/package/crds/waf.aws.upbound.io_sizeconstraintsets.yaml +++ b/package/crds/waf.aws.upbound.io_sizeconstraintsets.yaml @@ -477,3 +477,453 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SizeConstraintSet is the Schema for the SizeConstraintSets API. + The + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SizeConstraintSetSpec defines the desired state of SizeConstraintSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + name: + description: Name or description of the Size Constraint Set. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + sizeConstraints: + description: Parts of web requests that you want to inspect the + size of. + items: + properties: + comparisonOperator: + description: Type of comparison you want to perform, such + as EQ, NE, LT, or GT. Please refer to the documentation + for a complete list of supported values. + type: string + fieldToMatch: + description: Parameter that specifies where in a web request + to look for the size constraint. + properties: + data: + description: When the type is HEADER, specify the name + of the header that you want to search using the data + field, for example, User-Agent or Referer. If the + type is any other value, you can omit this field. + type: string + type: + description: Part of the web request that you want AWS + WAF to search for a specified string. For example, + HEADER, METHOD, or BODY. See the docs for all supported + values. + type: string + type: object + size: + description: Size in bytes that you want to compare against + the size of the specified field_to_match. Valid values + for size are between 0 and 21474836480 bytes (0 and 20 + GB). + type: number + textTransformation: + description: |- + Parameter is used to eliminate unusual formatting that attackers may use in web requests to bypass AWS WAF. When a transformation is specified, AWS WAF performs the transformation on the field_to_match before inspecting the request for a match. Some examples of supported transformations are CMD_LINE, HTML_ENTITY_DECODE, and NONE. You can find a complete list of supported values in the AWS WAF API Reference. + Note: If you choose BODY as the type, you must also choose NONE because CloudFront only forwards the first 8192 bytes for inspection. + type: string + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + name: + description: Name or description of the Size Constraint Set. + type: string + sizeConstraints: + description: Parts of web requests that you want to inspect the + size of. + items: + properties: + comparisonOperator: + description: Type of comparison you want to perform, such + as EQ, NE, LT, or GT. Please refer to the documentation + for a complete list of supported values. + type: string + fieldToMatch: + description: Parameter that specifies where in a web request + to look for the size constraint. + properties: + data: + description: When the type is HEADER, specify the name + of the header that you want to search using the data + field, for example, User-Agent or Referer. If the + type is any other value, you can omit this field. + type: string + type: + description: Part of the web request that you want AWS + WAF to search for a specified string. For example, + HEADER, METHOD, or BODY. See the docs for all supported + values. + type: string + type: object + size: + description: Size in bytes that you want to compare against + the size of the specified field_to_match. Valid values + for size are between 0 and 21474836480 bytes (0 and 20 + GB). + type: number + textTransformation: + description: |- + Parameter is used to eliminate unusual formatting that attackers may use in web requests to bypass AWS WAF. When a transformation is specified, AWS WAF performs the transformation on the field_to_match before inspecting the request for a match. Some examples of supported transformations are CMD_LINE, HTML_ENTITY_DECODE, and NONE. You can find a complete list of supported values in the AWS WAF API Reference. + Note: If you choose BODY as the type, you must also choose NONE because CloudFront only forwards the first 8192 bytes for inspection. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: SizeConstraintSetStatus defines the observed state of SizeConstraintSet. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN). + type: string + id: + description: ID of the WAF Size Constraint Set. + type: string + name: + description: Name or description of the Size Constraint Set. + type: string + sizeConstraints: + description: Parts of web requests that you want to inspect the + size of. + items: + properties: + comparisonOperator: + description: Type of comparison you want to perform, such + as EQ, NE, LT, or GT. Please refer to the documentation + for a complete list of supported values. + type: string + fieldToMatch: + description: Parameter that specifies where in a web request + to look for the size constraint. + properties: + data: + description: When the type is HEADER, specify the name + of the header that you want to search using the data + field, for example, User-Agent or Referer. If the + type is any other value, you can omit this field. + type: string + type: + description: Part of the web request that you want AWS + WAF to search for a specified string. For example, + HEADER, METHOD, or BODY. See the docs for all supported + values. + type: string + type: object + size: + description: Size in bytes that you want to compare against + the size of the specified field_to_match. Valid values + for size are between 0 and 21474836480 bytes (0 and 20 + GB). + type: number + textTransformation: + description: |- + Parameter is used to eliminate unusual formatting that attackers may use in web requests to bypass AWS WAF. When a transformation is specified, AWS WAF performs the transformation on the field_to_match before inspecting the request for a match. Some examples of supported transformations are CMD_LINE, HTML_ENTITY_DECODE, and NONE. You can find a complete list of supported values in the AWS WAF API Reference. + Note: If you choose BODY as the type, you must also choose NONE because CloudFront only forwards the first 8192 bytes for inspection. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/waf.aws.upbound.io_sqlinjectionmatchsets.yaml b/package/crds/waf.aws.upbound.io_sqlinjectionmatchsets.yaml index 6c66ffdc32..b40e5a1eb4 100644 --- a/package/crds/waf.aws.upbound.io_sqlinjectionmatchsets.yaml +++ b/package/crds/waf.aws.upbound.io_sqlinjectionmatchsets.yaml @@ -454,3 +454,433 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SQLInjectionMatchSet is the Schema for the SQLInjectionMatchSets + API. Provides a AWS WAF SQL Injection Match Set resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SQLInjectionMatchSetSpec defines the desired state of SQLInjectionMatchSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + name: + description: The name or description of the SQL Injection Match + Set. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + sqlInjectionMatchTuples: + description: The parts of web requests that you want AWS WAF to + inspect for malicious SQL code and, if you want AWS WAF to inspect + a header, the name of the header. + items: + properties: + fieldToMatch: + description: Specifies where in a web request to look for + snippets of malicious SQL code. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + name: + description: The name or description of the SQL Injection Match + Set. + type: string + sqlInjectionMatchTuples: + description: The parts of web requests that you want AWS WAF to + inspect for malicious SQL code and, if you want AWS WAF to inspect + a header, the name of the header. + items: + properties: + fieldToMatch: + description: Specifies where in a web request to look for + snippets of malicious SQL code. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: SQLInjectionMatchSetStatus defines the observed state of + SQLInjectionMatchSet. + properties: + atProvider: + properties: + id: + description: The ID of the WAF SQL Injection Match Set. + type: string + name: + description: The name or description of the SQL Injection Match + Set. + type: string + sqlInjectionMatchTuples: + description: The parts of web requests that you want AWS WAF to + inspect for malicious SQL code and, if you want AWS WAF to inspect + a header, the name of the header. + items: + properties: + fieldToMatch: + description: Specifies where in a web request to look for + snippets of malicious SQL code. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/waf.aws.upbound.io_webacls.yaml b/package/crds/waf.aws.upbound.io_webacls.yaml index c657d5466f..0005856b57 100644 --- a/package/crds/waf.aws.upbound.io_webacls.yaml +++ b/package/crds/waf.aws.upbound.io_webacls.yaml @@ -982,3 +982,931 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: WebACL is the Schema for the WebACLs API. Provides a AWS WAF + web access control group (ACL) resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WebACLSpec defines the desired state of WebACL + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + defaultAction: + description: Configuration block with action that you want AWS + WAF to take when a request doesn't match the criteria in any + of the rules that are associated with the web ACL. Detailed + below. + properties: + type: + description: |- + Specifies how you want AWS WAF to respond to requests that don't match the criteria in any of the rules. + e.g., ALLOW or BLOCK + type: string + type: object + loggingConfiguration: + description: Configuration block to enable WAF logging. Detailed + below. + properties: + logDestination: + description: Amazon Resource Name (ARN) of Kinesis Firehose + Delivery Stream + type: string + logDestinationRef: + description: Reference to a DeliveryStream in firehose to + populate logDestination. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logDestinationSelector: + description: Selector for a DeliveryStream in firehose to + populate logDestination. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + redactedFields: + description: Configuration block containing parts of the request + that you want redacted from the logs. Detailed below. + properties: + fieldToMatch: + description: Set of configuration blocks for fields to + redact. Detailed below. + items: + properties: + data: + description: When the value of type is HEADER, enter + the name of the header that you want the WAF to + search, for example, User-Agent or Referer. If + the value of type is any other value, omit data. + type: string + type: + description: 'valid values are: BLOCK, ALLOW, or + COUNT' + type: string + type: object + type: array + type: object + type: object + metricName: + description: The name or description for the Amazon CloudWatch + metric of this web ACL. + type: string + name: + description: The name or description of the web ACL. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + rules: + description: Configuration blocks containing rules to associate + with the web ACL and the settings for each rule. Detailed below. + items: + properties: + action: + description: The action that CloudFront or AWS WAF takes + when a web request matches the conditions in the rule. + Not used if type is GROUP. + properties: + type: + description: 'valid values are: BLOCK, ALLOW, or COUNT' + type: string + type: object + overrideAction: + description: Override the action that a group requests CloudFront + or AWS WAF takes when a web request matches the conditions + in the rule. Only used if type is GROUP. + properties: + type: + description: 'valid values are: BLOCK, ALLOW, or COUNT' + type: string + type: object + priority: + description: |- + Specifies the order in which the rules in a WebACL are evaluated. + Rules with a lower value are evaluated before rules with a higher value. + type: number + ruleId: + description: ID of the associated WAF (Global) rule (e.g., + aws_waf_rule). WAF (Regional) rules cannot be used. + type: string + ruleIdRef: + description: Reference to a Rule in waf to populate ruleId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + ruleIdSelector: + description: Selector for a Rule in waf to populate ruleId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: The rule type, either REGULAR, as defined by + Rule, RATE_BASED, as defined by RateBasedRule, or GROUP, + as defined by RuleGroup. The default is REGULAR. If you + add a RATE_BASED rule, you need to set type as RATE_BASED. + If you add a GROUP rule, you need to set type as GROUP. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + defaultAction: + description: Configuration block with action that you want AWS + WAF to take when a request doesn't match the criteria in any + of the rules that are associated with the web ACL. Detailed + below. + properties: + type: + description: |- + Specifies how you want AWS WAF to respond to requests that don't match the criteria in any of the rules. + e.g., ALLOW or BLOCK + type: string + type: object + loggingConfiguration: + description: Configuration block to enable WAF logging. Detailed + below. + properties: + logDestination: + description: Amazon Resource Name (ARN) of Kinesis Firehose + Delivery Stream + type: string + logDestinationRef: + description: Reference to a DeliveryStream in firehose to + populate logDestination. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logDestinationSelector: + description: Selector for a DeliveryStream in firehose to + populate logDestination. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + redactedFields: + description: Configuration block containing parts of the request + that you want redacted from the logs. Detailed below. + properties: + fieldToMatch: + description: Set of configuration blocks for fields to + redact. Detailed below. + items: + properties: + data: + description: When the value of type is HEADER, enter + the name of the header that you want the WAF to + search, for example, User-Agent or Referer. If + the value of type is any other value, omit data. + type: string + type: + description: 'valid values are: BLOCK, ALLOW, or + COUNT' + type: string + type: object + type: array + type: object + type: object + metricName: + description: The name or description for the Amazon CloudWatch + metric of this web ACL. + type: string + name: + description: The name or description of the web ACL. + type: string + rules: + description: Configuration blocks containing rules to associate + with the web ACL and the settings for each rule. Detailed below. + items: + properties: + action: + description: The action that CloudFront or AWS WAF takes + when a web request matches the conditions in the rule. + Not used if type is GROUP. + properties: + type: + description: 'valid values are: BLOCK, ALLOW, or COUNT' + type: string + type: object + overrideAction: + description: Override the action that a group requests CloudFront + or AWS WAF takes when a web request matches the conditions + in the rule. Only used if type is GROUP. + properties: + type: + description: 'valid values are: BLOCK, ALLOW, or COUNT' + type: string + type: object + priority: + description: |- + Specifies the order in which the rules in a WebACL are evaluated. + Rules with a lower value are evaluated before rules with a higher value. + type: number + ruleId: + description: ID of the associated WAF (Global) rule (e.g., + aws_waf_rule). WAF (Regional) rules cannot be used. + type: string + ruleIdRef: + description: Reference to a Rule in waf to populate ruleId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + ruleIdSelector: + description: Selector for a Rule in waf to populate ruleId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: The rule type, either REGULAR, as defined by + Rule, RATE_BASED, as defined by RateBasedRule, or GROUP, + as defined by RuleGroup. The default is REGULAR. If you + add a RATE_BASED rule, you need to set type as RATE_BASED. + If you add a GROUP rule, you need to set type as GROUP. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.defaultAction is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.defaultAction) + || (has(self.initProvider) && has(self.initProvider.defaultAction))' + - message: spec.forProvider.metricName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.metricName) + || (has(self.initProvider) && has(self.initProvider.metricName))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: WebACLStatus defines the observed state of WebACL. + properties: + atProvider: + properties: + arn: + description: The ARN of the WAF WebACL. + type: string + defaultAction: + description: Configuration block with action that you want AWS + WAF to take when a request doesn't match the criteria in any + of the rules that are associated with the web ACL. Detailed + below. + properties: + type: + description: |- + Specifies how you want AWS WAF to respond to requests that don't match the criteria in any of the rules. + e.g., ALLOW or BLOCK + type: string + type: object + id: + description: The ID of the WAF WebACL. + type: string + loggingConfiguration: + description: Configuration block to enable WAF logging. Detailed + below. + properties: + logDestination: + description: Amazon Resource Name (ARN) of Kinesis Firehose + Delivery Stream + type: string + redactedFields: + description: Configuration block containing parts of the request + that you want redacted from the logs. Detailed below. + properties: + fieldToMatch: + description: Set of configuration blocks for fields to + redact. Detailed below. + items: + properties: + data: + description: When the value of type is HEADER, enter + the name of the header that you want the WAF to + search, for example, User-Agent or Referer. If + the value of type is any other value, omit data. + type: string + type: + description: 'valid values are: BLOCK, ALLOW, or + COUNT' + type: string + type: object + type: array + type: object + type: object + metricName: + description: The name or description for the Amazon CloudWatch + metric of this web ACL. + type: string + name: + description: The name or description of the web ACL. + type: string + rules: + description: Configuration blocks containing rules to associate + with the web ACL and the settings for each rule. Detailed below. + items: + properties: + action: + description: The action that CloudFront or AWS WAF takes + when a web request matches the conditions in the rule. + Not used if type is GROUP. + properties: + type: + description: 'valid values are: BLOCK, ALLOW, or COUNT' + type: string + type: object + overrideAction: + description: Override the action that a group requests CloudFront + or AWS WAF takes when a web request matches the conditions + in the rule. Only used if type is GROUP. + properties: + type: + description: 'valid values are: BLOCK, ALLOW, or COUNT' + type: string + type: object + priority: + description: |- + Specifies the order in which the rules in a WebACL are evaluated. + Rules with a lower value are evaluated before rules with a higher value. + type: number + ruleId: + description: ID of the associated WAF (Global) rule (e.g., + aws_waf_rule). WAF (Regional) rules cannot be used. + type: string + type: + description: The rule type, either REGULAR, as defined by + Rule, RATE_BASED, as defined by RateBasedRule, or GROUP, + as defined by RuleGroup. The default is REGULAR. If you + add a RATE_BASED rule, you need to set type as RATE_BASED. + If you add a GROUP rule, you need to set type as GROUP. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/waf.aws.upbound.io_xssmatchsets.yaml b/package/crds/waf.aws.upbound.io_xssmatchsets.yaml index 5ee979879a..7f0a059108 100644 --- a/package/crds/waf.aws.upbound.io_xssmatchsets.yaml +++ b/package/crds/waf.aws.upbound.io_xssmatchsets.yaml @@ -450,3 +450,429 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: XSSMatchSet is the Schema for the XSSMatchSets API. Provides + a AWS WAF XssMatchSet resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: XSSMatchSetSpec defines the desired state of XSSMatchSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + name: + description: The name or description of the SizeConstraintSet. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + xssMatchTuples: + description: The parts of web requests that you want to inspect + for cross-site scripting attacks. + items: + properties: + fieldToMatch: + description: Specifies where in a web request to look for + cross-site scripting attacks. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + If you specify a transformation, AWS WAF performs the transformation on target_string before inspecting a request for a match. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + name: + description: The name or description of the SizeConstraintSet. + type: string + xssMatchTuples: + description: The parts of web requests that you want to inspect + for cross-site scripting attacks. + items: + properties: + fieldToMatch: + description: Specifies where in a web request to look for + cross-site scripting attacks. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + If you specify a transformation, AWS WAF performs the transformation on target_string before inspecting a request for a match. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: XSSMatchSetStatus defines the observed state of XSSMatchSet. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) + type: string + id: + description: The ID of the WAF XssMatchSet. + type: string + name: + description: The name or description of the SizeConstraintSet. + type: string + xssMatchTuples: + description: The parts of web requests that you want to inspect + for cross-site scripting attacks. + items: + properties: + fieldToMatch: + description: Specifies where in a web request to look for + cross-site scripting attacks. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + If you specify a transformation, AWS WAF performs the transformation on target_string before inspecting a request for a match. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/wafregional.aws.upbound.io_bytematchsets.yaml b/package/crds/wafregional.aws.upbound.io_bytematchsets.yaml index 913d4c3970..af97e86986 100644 --- a/package/crds/wafregional.aws.upbound.io_bytematchsets.yaml +++ b/package/crds/wafregional.aws.upbound.io_bytematchsets.yaml @@ -456,3 +456,435 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ByteMatchSet is the Schema for the ByteMatchSets API. Provides + a AWS WAF Regional ByteMatchSet resource for use with ALB. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ByteMatchSetSpec defines the desired state of ByteMatchSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + byteMatchTuples: + description: Settings for the ByteMatchSet, such as the bytes + (typically a string that corresponds with ASCII characters) + that you want AWS WAF to search for in web requests. ByteMatchTuple + documented below. + items: + properties: + fieldToMatch: + description: Settings for the ByteMatchTuple. FieldToMatch + documented below. + properties: + data: + description: When the value of Type is HEADER, enter + the name of the header that you want AWS WAF to search, + for example, User-Agent or Referer. If the value of + Type is any other value, omit Data. + type: string + type: + description: The part of the web request that you want + AWS WAF to search for a specified string. + type: string + type: object + positionalConstraint: + description: Within the portion of a web request that you + want to search. + type: string + targetString: + description: The value that you want AWS WAF to search for. + The maximum length of the value is 50 bytes. + type: string + textTransformation: + description: The formatting way for web request. + type: string + type: object + type: array + name: + description: The name or description of the ByteMatchSet. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + byteMatchTuples: + description: Settings for the ByteMatchSet, such as the bytes + (typically a string that corresponds with ASCII characters) + that you want AWS WAF to search for in web requests. ByteMatchTuple + documented below. + items: + properties: + fieldToMatch: + description: Settings for the ByteMatchTuple. FieldToMatch + documented below. + properties: + data: + description: When the value of Type is HEADER, enter + the name of the header that you want AWS WAF to search, + for example, User-Agent or Referer. If the value of + Type is any other value, omit Data. + type: string + type: + description: The part of the web request that you want + AWS WAF to search for a specified string. + type: string + type: object + positionalConstraint: + description: Within the portion of a web request that you + want to search. + type: string + targetString: + description: The value that you want AWS WAF to search for. + The maximum length of the value is 50 bytes. + type: string + textTransformation: + description: The formatting way for web request. + type: string + type: object + type: array + name: + description: The name or description of the ByteMatchSet. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ByteMatchSetStatus defines the observed state of ByteMatchSet. + properties: + atProvider: + properties: + byteMatchTuples: + description: Settings for the ByteMatchSet, such as the bytes + (typically a string that corresponds with ASCII characters) + that you want AWS WAF to search for in web requests. ByteMatchTuple + documented below. + items: + properties: + fieldToMatch: + description: Settings for the ByteMatchTuple. FieldToMatch + documented below. + properties: + data: + description: When the value of Type is HEADER, enter + the name of the header that you want AWS WAF to search, + for example, User-Agent or Referer. If the value of + Type is any other value, omit Data. + type: string + type: + description: The part of the web request that you want + AWS WAF to search for a specified string. + type: string + type: object + positionalConstraint: + description: Within the portion of a web request that you + want to search. + type: string + targetString: + description: The value that you want AWS WAF to search for. + The maximum length of the value is 50 bytes. + type: string + textTransformation: + description: The formatting way for web request. + type: string + type: object + type: array + id: + description: The ID of the WAF ByteMatchSet. + type: string + name: + description: The name or description of the ByteMatchSet. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/wafregional.aws.upbound.io_regexmatchsets.yaml b/package/crds/wafregional.aws.upbound.io_regexmatchsets.yaml index 625026c926..3c24ed017a 100644 --- a/package/crds/wafregional.aws.upbound.io_regexmatchsets.yaml +++ b/package/crds/wafregional.aws.upbound.io_regexmatchsets.yaml @@ -608,3 +608,587 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: RegexMatchSet is the Schema for the RegexMatchSets API. Provides + a AWS WAF Regional Regex Match Set resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RegexMatchSetSpec defines the desired state of RegexMatchSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + name: + description: The name or description of the Regex Match Set. + type: string + regexMatchTuple: + description: The regular expression pattern that you want AWS + WAF to search for in web requests, the location in requests + that you want AWS WAF to search, and other settings. See below. + items: + properties: + fieldToMatch: + description: The part of a web request that you want to + search, such as a specified header or a query string. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + regexPatternSetId: + description: The ID of a Regex Pattern Set. + type: string + regexPatternSetIdRef: + description: Reference to a RegexPatternSet in wafregional + to populate regexPatternSetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + regexPatternSetIdSelector: + description: Selector for a RegexPatternSet in wafregional + to populate regexPatternSetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + region: + description: Region is the region you'd like your resource to + be created in. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + name: + description: The name or description of the Regex Match Set. + type: string + regexMatchTuple: + description: The regular expression pattern that you want AWS + WAF to search for in web requests, the location in requests + that you want AWS WAF to search, and other settings. See below. + items: + properties: + fieldToMatch: + description: The part of a web request that you want to + search, such as a specified header or a query string. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + regexPatternSetId: + description: The ID of a Regex Pattern Set. + type: string + regexPatternSetIdRef: + description: Reference to a RegexPatternSet in wafregional + to populate regexPatternSetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + regexPatternSetIdSelector: + description: Selector for a RegexPatternSet in wafregional + to populate regexPatternSetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: RegexMatchSetStatus defines the observed state of RegexMatchSet. + properties: + atProvider: + properties: + id: + description: The ID of the WAF Regional Regex Match Set. + type: string + name: + description: The name or description of the Regex Match Set. + type: string + regexMatchTuple: + description: The regular expression pattern that you want AWS + WAF to search for in web requests, the location in requests + that you want AWS WAF to search, and other settings. See below. + items: + properties: + fieldToMatch: + description: The part of a web request that you want to + search, such as a specified header or a query string. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + regexPatternSetId: + description: The ID of a Regex Pattern Set. + type: string + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/wafregional.aws.upbound.io_sizeconstraintsets.yaml b/package/crds/wafregional.aws.upbound.io_sizeconstraintsets.yaml index aab63fc829..efaad10c7c 100644 --- a/package/crds/wafregional.aws.upbound.io_sizeconstraintsets.yaml +++ b/package/crds/wafregional.aws.upbound.io_sizeconstraintsets.yaml @@ -485,3 +485,464 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SizeConstraintSet is the Schema for the SizeConstraintSets API. + Provides an AWS WAF Regional Size Constraint Set resource for use with ALB. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SizeConstraintSetSpec defines the desired state of SizeConstraintSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + name: + description: The name or description of the Size Constraint Set. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + sizeConstraints: + description: Specifies the parts of web requests that you want + to inspect the size of. + items: + properties: + comparisonOperator: + description: |- + The type of comparison you want to perform. + e.g., EQ, NE, LT, GT. + See docs for all supported values. + type: string + fieldToMatch: + description: Specifies where in a web request to look for + the size constraint. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + size: + description: |- + The size in bytes that you want to compare against the size of the specified field_to_match. + Valid values are between 0 - 21474836480 bytes (0 - 20 GB). + type: number + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + Note: if you choose BODY as type, you must choose NONE because CloudFront forwards only the first 8192 bytes for inspection. + type: string + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + name: + description: The name or description of the Size Constraint Set. + type: string + sizeConstraints: + description: Specifies the parts of web requests that you want + to inspect the size of. + items: + properties: + comparisonOperator: + description: |- + The type of comparison you want to perform. + e.g., EQ, NE, LT, GT. + See docs for all supported values. + type: string + fieldToMatch: + description: Specifies where in a web request to look for + the size constraint. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + size: + description: |- + The size in bytes that you want to compare against the size of the specified field_to_match. + Valid values are between 0 - 21474836480 bytes (0 - 20 GB). + type: number + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + Note: if you choose BODY as type, you must choose NONE because CloudFront forwards only the first 8192 bytes for inspection. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: SizeConstraintSetStatus defines the observed state of SizeConstraintSet. + properties: + atProvider: + properties: + arn: + type: string + id: + description: The ID of the WAF Size Constraint Set. + type: string + name: + description: The name or description of the Size Constraint Set. + type: string + sizeConstraints: + description: Specifies the parts of web requests that you want + to inspect the size of. + items: + properties: + comparisonOperator: + description: |- + The type of comparison you want to perform. + e.g., EQ, NE, LT, GT. + See docs for all supported values. + type: string + fieldToMatch: + description: Specifies where in a web request to look for + the size constraint. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + size: + description: |- + The size in bytes that you want to compare against the size of the specified field_to_match. + Valid values are between 0 - 21474836480 bytes (0 - 20 GB). + type: number + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + Note: if you choose BODY as type, you must choose NONE because CloudFront forwards only the first 8192 bytes for inspection. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/wafregional.aws.upbound.io_sqlinjectionmatchsets.yaml b/package/crds/wafregional.aws.upbound.io_sqlinjectionmatchsets.yaml index 20592a13ff..3680bcec60 100644 --- a/package/crds/wafregional.aws.upbound.io_sqlinjectionmatchsets.yaml +++ b/package/crds/wafregional.aws.upbound.io_sqlinjectionmatchsets.yaml @@ -452,3 +452,431 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SQLInjectionMatchSet is the Schema for the SQLInjectionMatchSets + API. Provides a AWS WAF Regional SqlInjectionMatchSet resource for use with + ALB. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SQLInjectionMatchSetSpec defines the desired state of SQLInjectionMatchSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + name: + description: The name or description of the SizeConstraintSet. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + sqlInjectionMatchTuple: + description: The parts of web requests that you want AWS WAF to + inspect for malicious SQL code and, if you want AWS WAF to inspect + a header, the name of the header. + items: + properties: + fieldToMatch: + description: Specifies where in a web request to look for + snippets of malicious SQL code. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + name: + description: The name or description of the SizeConstraintSet. + type: string + sqlInjectionMatchTuple: + description: The parts of web requests that you want AWS WAF to + inspect for malicious SQL code and, if you want AWS WAF to inspect + a header, the name of the header. + items: + properties: + fieldToMatch: + description: Specifies where in a web request to look for + snippets of malicious SQL code. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: SQLInjectionMatchSetStatus defines the observed state of + SQLInjectionMatchSet. + properties: + atProvider: + properties: + id: + description: The ID of the WAF SqlInjectionMatchSet. + type: string + name: + description: The name or description of the SizeConstraintSet. + type: string + sqlInjectionMatchTuple: + description: The parts of web requests that you want AWS WAF to + inspect for malicious SQL code and, if you want AWS WAF to inspect + a header, the name of the header. + items: + properties: + fieldToMatch: + description: Specifies where in a web request to look for + snippets of malicious SQL code. + properties: + data: + description: |- + When type is HEADER, enter the name of the header that you want to search, e.g., User-Agent or Referer. + If type is any other value, omit this field. + type: string + type: + description: |- + The part of the web request that you want AWS WAF to search for a specified string. + e.g., HEADER, METHOD or BODY. + See docs + for all supported values. + type: string + type: object + textTransformation: + description: |- + Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. + If you specify a transformation, AWS WAF performs the transformation on field_to_match before inspecting a request for a match. + e.g., CMD_LINE, HTML_ENTITY_DECODE or NONE. + See docs + for all supported values. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/wafregional.aws.upbound.io_webacls.yaml b/package/crds/wafregional.aws.upbound.io_webacls.yaml index 4e392f106a..03e2590a71 100644 --- a/package/crds/wafregional.aws.upbound.io_webacls.yaml +++ b/package/crds/wafregional.aws.upbound.io_webacls.yaml @@ -1019,3 +1019,968 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: WebACL is the Schema for the WebACLs API. Provides a AWS WAF + Regional web access control group (ACL) resource for use with ALB. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WebACLSpec defines the desired state of WebACL + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + defaultAction: + description: The action that you want AWS WAF Regional to take + when a request doesn't match the criteria in any of the rules + that are associated with the web ACL. + properties: + type: + description: Specifies how you want AWS WAF Regional to respond + to requests that match the settings in a ruleE.g., ALLOW, + BLOCK or COUNT + type: string + type: object + loggingConfiguration: + description: Configuration block to enable WAF logging. Detailed + below. + properties: + logDestination: + description: Amazon Resource Name (ARN) of Kinesis Firehose + Delivery Stream + type: string + logDestinationRef: + description: Reference to a DeliveryStream in firehose to + populate logDestination. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logDestinationSelector: + description: Selector for a DeliveryStream in firehose to + populate logDestination. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + redactedFields: + description: Configuration block containing parts of the request + that you want redacted from the logs. Detailed below. + properties: + fieldToMatch: + description: Set of configuration blocks for fields to + redact. Detailed below. + items: + properties: + data: + description: When the value of type is HEADER, enter + the name of the header that you want the WAF to + search, for example, User-Agent or Referer. If + the value of type is any other value, omit data. + type: string + type: + description: Specifies how you want AWS WAF Regional + to respond to requests that match the settings + in a rule. Valid values for action are ALLOW, + BLOCK or COUNT. Valid values for override_action + are COUNT and NONE. + type: string + type: object + type: array + type: object + type: object + metricName: + description: The name or description for the Amazon CloudWatch + metric of this web ACL. + type: string + name: + description: The name or description of the web ACL. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + rule: + description: Set of configuration blocks containing rules for + the web ACL. Detailed below. + items: + properties: + action: + description: Configuration block of the action that CloudFront + or AWS WAF takes when a web request matches the conditions + in the rule. Not used if type is GROUP. Detailed below. + properties: + type: + description: Specifies how you want AWS WAF Regional + to respond to requests that match the settings in + a rule. Valid values for action are ALLOW, BLOCK or + COUNT. Valid values for override_action are COUNT + and NONE. + type: string + type: object + overrideAction: + description: Configuration block of the override the action + that a group requests CloudFront or AWS WAF takes when + a web request matches the conditions in the rule. Only + used if type is GROUP. Detailed below. + properties: + type: + description: Specifies how you want AWS WAF Regional + to respond to requests that match the settings in + a rule. Valid values for action are ALLOW, BLOCK or + COUNT. Valid values for override_action are COUNT + and NONE. + type: string + type: object + priority: + description: |- + Specifies the order in which the rules in a WebACL are evaluated. + Rules with a lower value are evaluated before rules with a higher value. + type: number + ruleId: + description: ID of the associated WAF (Regional) rule (e.g., + aws_wafregional_rule). WAF (Global) rules cannot be used. + type: string + ruleIdRef: + description: Reference to a Rule in wafregional to populate + ruleId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + ruleIdSelector: + description: Selector for a Rule in wafregional to populate + ruleId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: The rule type, either REGULAR, as defined by + Rule, RATE_BASED, as defined by RateBasedRule, or GROUP, + as defined by RuleGroup. The default is REGULAR. If you + add a RATE_BASED rule, you need to set type as RATE_BASED. + If you add a GROUP rule, you need to set type as GROUP. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + defaultAction: + description: The action that you want AWS WAF Regional to take + when a request doesn't match the criteria in any of the rules + that are associated with the web ACL. + properties: + type: + description: Specifies how you want AWS WAF Regional to respond + to requests that match the settings in a ruleE.g., ALLOW, + BLOCK or COUNT + type: string + type: object + loggingConfiguration: + description: Configuration block to enable WAF logging. Detailed + below. + properties: + logDestination: + description: Amazon Resource Name (ARN) of Kinesis Firehose + Delivery Stream + type: string + logDestinationRef: + description: Reference to a DeliveryStream in firehose to + populate logDestination. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logDestinationSelector: + description: Selector for a DeliveryStream in firehose to + populate logDestination. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + redactedFields: + description: Configuration block containing parts of the request + that you want redacted from the logs. Detailed below. + properties: + fieldToMatch: + description: Set of configuration blocks for fields to + redact. Detailed below. + items: + properties: + data: + description: When the value of type is HEADER, enter + the name of the header that you want the WAF to + search, for example, User-Agent or Referer. If + the value of type is any other value, omit data. + type: string + type: + description: Specifies how you want AWS WAF Regional + to respond to requests that match the settings + in a rule. Valid values for action are ALLOW, + BLOCK or COUNT. Valid values for override_action + are COUNT and NONE. + type: string + type: object + type: array + type: object + type: object + metricName: + description: The name or description for the Amazon CloudWatch + metric of this web ACL. + type: string + name: + description: The name or description of the web ACL. + type: string + rule: + description: Set of configuration blocks containing rules for + the web ACL. Detailed below. + items: + properties: + action: + description: Configuration block of the action that CloudFront + or AWS WAF takes when a web request matches the conditions + in the rule. Not used if type is GROUP. Detailed below. + properties: + type: + description: Specifies how you want AWS WAF Regional + to respond to requests that match the settings in + a rule. Valid values for action are ALLOW, BLOCK or + COUNT. Valid values for override_action are COUNT + and NONE. + type: string + type: object + overrideAction: + description: Configuration block of the override the action + that a group requests CloudFront or AWS WAF takes when + a web request matches the conditions in the rule. Only + used if type is GROUP. Detailed below. + properties: + type: + description: Specifies how you want AWS WAF Regional + to respond to requests that match the settings in + a rule. Valid values for action are ALLOW, BLOCK or + COUNT. Valid values for override_action are COUNT + and NONE. + type: string + type: object + priority: + description: |- + Specifies the order in which the rules in a WebACL are evaluated. + Rules with a lower value are evaluated before rules with a higher value. + type: number + ruleId: + description: ID of the associated WAF (Regional) rule (e.g., + aws_wafregional_rule). WAF (Global) rules cannot be used. + type: string + ruleIdRef: + description: Reference to a Rule in wafregional to populate + ruleId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + ruleIdSelector: + description: Selector for a Rule in wafregional to populate + ruleId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: The rule type, either REGULAR, as defined by + Rule, RATE_BASED, as defined by RateBasedRule, or GROUP, + as defined by RuleGroup. The default is REGULAR. If you + add a RATE_BASED rule, you need to set type as RATE_BASED. + If you add a GROUP rule, you need to set type as GROUP. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.defaultAction is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.defaultAction) + || (has(self.initProvider) && has(self.initProvider.defaultAction))' + - message: spec.forProvider.metricName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.metricName) + || (has(self.initProvider) && has(self.initProvider.metricName))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: WebACLStatus defines the observed state of WebACL. + properties: + atProvider: + properties: + arn: + description: Amazon Resource Name (ARN) of the WAF Regional WebACL. + type: string + defaultAction: + description: The action that you want AWS WAF Regional to take + when a request doesn't match the criteria in any of the rules + that are associated with the web ACL. + properties: + type: + description: Specifies how you want AWS WAF Regional to respond + to requests that match the settings in a ruleE.g., ALLOW, + BLOCK or COUNT + type: string + type: object + id: + description: The ID of the WAF Regional WebACL. + type: string + loggingConfiguration: + description: Configuration block to enable WAF logging. Detailed + below. + properties: + logDestination: + description: Amazon Resource Name (ARN) of Kinesis Firehose + Delivery Stream + type: string + redactedFields: + description: Configuration block containing parts of the request + that you want redacted from the logs. Detailed below. + properties: + fieldToMatch: + description: Set of configuration blocks for fields to + redact. Detailed below. + items: + properties: + data: + description: When the value of type is HEADER, enter + the name of the header that you want the WAF to + search, for example, User-Agent or Referer. If + the value of type is any other value, omit data. + type: string + type: + description: Specifies how you want AWS WAF Regional + to respond to requests that match the settings + in a rule. Valid values for action are ALLOW, + BLOCK or COUNT. Valid values for override_action + are COUNT and NONE. + type: string + type: object + type: array + type: object + type: object + metricName: + description: The name or description for the Amazon CloudWatch + metric of this web ACL. + type: string + name: + description: The name or description of the web ACL. + type: string + rule: + description: Set of configuration blocks containing rules for + the web ACL. Detailed below. + items: + properties: + action: + description: Configuration block of the action that CloudFront + or AWS WAF takes when a web request matches the conditions + in the rule. Not used if type is GROUP. Detailed below. + properties: + type: + description: Specifies how you want AWS WAF Regional + to respond to requests that match the settings in + a rule. Valid values for action are ALLOW, BLOCK or + COUNT. Valid values for override_action are COUNT + and NONE. + type: string + type: object + overrideAction: + description: Configuration block of the override the action + that a group requests CloudFront or AWS WAF takes when + a web request matches the conditions in the rule. Only + used if type is GROUP. Detailed below. + properties: + type: + description: Specifies how you want AWS WAF Regional + to respond to requests that match the settings in + a rule. Valid values for action are ALLOW, BLOCK or + COUNT. Valid values for override_action are COUNT + and NONE. + type: string + type: object + priority: + description: |- + Specifies the order in which the rules in a WebACL are evaluated. + Rules with a lower value are evaluated before rules with a higher value. + type: number + ruleId: + description: ID of the associated WAF (Regional) rule (e.g., + aws_wafregional_rule). WAF (Global) rules cannot be used. + type: string + type: + description: The rule type, either REGULAR, as defined by + Rule, RATE_BASED, as defined by RateBasedRule, or GROUP, + as defined by RuleGroup. The default is REGULAR. If you + add a RATE_BASED rule, you need to set type as RATE_BASED. + If you add a GROUP rule, you need to set type as GROUP. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/wafregional.aws.upbound.io_xssmatchsets.yaml b/package/crds/wafregional.aws.upbound.io_xssmatchsets.yaml index b556513e87..7772d23fb8 100644 --- a/package/crds/wafregional.aws.upbound.io_xssmatchsets.yaml +++ b/package/crds/wafregional.aws.upbound.io_xssmatchsets.yaml @@ -435,3 +435,414 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: XSSMatchSet is the Schema for the XSSMatchSets API. Provides + an AWS WAF Regional XSS Match Set resource for use with ALB. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: XSSMatchSetSpec defines the desired state of XSSMatchSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + name: + description: The name of the set + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + xssMatchTuple: + description: The parts of web requests that you want to inspect + for cross-site scripting attacks. + items: + properties: + fieldToMatch: + description: Specifies where in a web request to look for + cross-site scripting attacks. + properties: + data: + description: When the value of type is HEADER, enter + the name of the header that you want the WAF to search, + for example, User-Agent or Referer. If the value of + type is any other value, omit data. + type: string + type: + description: The part of the web request that you want + AWS WAF to search for a specified stringE.g., HEADER + or METHOD + type: string + type: object + textTransformation: + description: Which text transformation, if any, to perform + on the web request before inspecting the request for cross-site + scripting attacks. + type: string + type: object + type: array + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + name: + description: The name of the set + type: string + xssMatchTuple: + description: The parts of web requests that you want to inspect + for cross-site scripting attacks. + items: + properties: + fieldToMatch: + description: Specifies where in a web request to look for + cross-site scripting attacks. + properties: + data: + description: When the value of type is HEADER, enter + the name of the header that you want the WAF to search, + for example, User-Agent or Referer. If the value of + type is any other value, omit data. + type: string + type: + description: The part of the web request that you want + AWS WAF to search for a specified stringE.g., HEADER + or METHOD + type: string + type: object + textTransformation: + description: Which text transformation, if any, to perform + on the web request before inspecting the request for cross-site + scripting attacks. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: XSSMatchSetStatus defines the observed state of XSSMatchSet. + properties: + atProvider: + properties: + id: + description: The ID of the Regional WAF XSS Match Set. + type: string + name: + description: The name of the set + type: string + xssMatchTuple: + description: The parts of web requests that you want to inspect + for cross-site scripting attacks. + items: + properties: + fieldToMatch: + description: Specifies where in a web request to look for + cross-site scripting attacks. + properties: + data: + description: When the value of type is HEADER, enter + the name of the header that you want the WAF to search, + for example, User-Agent or Referer. If the value of + type is any other value, omit data. + type: string + type: + description: The part of the web request that you want + AWS WAF to search for a specified stringE.g., HEADER + or METHOD + type: string + type: object + textTransformation: + description: Which text transformation, if any, to perform + on the web request before inspecting the request for cross-site + scripting attacks. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/workspaces.aws.upbound.io_directories.yaml b/package/crds/workspaces.aws.upbound.io_directories.yaml index fae4bab5ee..b3ea913cca 100644 --- a/package/crds/workspaces.aws.upbound.io_directories.yaml +++ b/package/crds/workspaces.aws.upbound.io_directories.yaml @@ -1193,3 +1193,1154 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Directory is the Schema for the Directorys API. Provides a WorkSpaces + directory in AWS WorkSpaces Service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DirectorySpec defines the desired state of Directory + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + directoryId: + description: The directory identifier for registration in WorkSpaces + service. + type: string + directoryIdRef: + description: Reference to a Directory in ds to populate directoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + directoryIdSelector: + description: Selector for a Directory in ds to populate directoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + ipGroupIds: + description: The identifiers of the IP access control groups associated + with the directory. + items: + type: string + type: array + x-kubernetes-list-type: set + region: + description: Region is the region you'd like your resource to + be created in. + type: string + selfServicePermissions: + description: service capabilities. Defined below. + properties: + changeComputeType: + description: – Whether WorkSpaces directory users can change + the compute type (bundle) for their workspace. Default false. + type: boolean + increaseVolumeSize: + description: – Whether WorkSpaces directory users can increase + the volume size of the drives on their workspace. Default + false. + type: boolean + rebuildWorkspace: + description: – Whether WorkSpaces directory users can rebuild + the operating system of a workspace to its original state. + Default false. + type: boolean + restartWorkspace: + description: – Whether WorkSpaces directory users can restart + their workspace. Default true. + type: boolean + switchRunningMode: + description: – Whether WorkSpaces directory users can switch + the running mode of their workspace. Default false. + type: boolean + type: object + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: The identifiers of the subnets where the directory + resides. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + workspaceAccessProperties: + description: – Specifies which devices and operating systems + users can use to access their WorkSpaces. Defined below. + properties: + deviceTypeAndroid: + description: – Indicates whether users can use Android devices + to access their WorkSpaces. + type: string + deviceTypeChromeos: + description: – Indicates whether users can use Chromebooks + to access their WorkSpaces. + type: string + deviceTypeIos: + description: – Indicates whether users can use iOS devices + to access their WorkSpaces. + type: string + deviceTypeLinux: + description: – Indicates whether users can use Linux clients + to access their WorkSpaces. + type: string + deviceTypeOsx: + description: – Indicates whether users can use macOS clients + to access their WorkSpaces. + type: string + deviceTypeWeb: + description: – Indicates whether users can access their WorkSpaces + through a web browser. + type: string + deviceTypeWindows: + description: – Indicates whether users can use Windows clients + to access their WorkSpaces. + type: string + deviceTypeZeroclient: + description: – Indicates whether users can use zero client + devices to access their WorkSpaces. + type: string + type: object + workspaceCreationProperties: + description: – Default properties that are used for creating + WorkSpaces. Defined below. + properties: + customSecurityGroupId: + description: – The identifier of your custom security group. + Should relate to the same VPC, where workspaces reside in. + type: string + customSecurityGroupIdRef: + description: Reference to a SecurityGroup in ec2 to populate + customSecurityGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + customSecurityGroupIdSelector: + description: Selector for a SecurityGroup in ec2 to populate + customSecurityGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + defaultOu: + description: – The default organizational unit (OU) for your + WorkSpace directories. Should conform "OU=,DC=,...,DC=" + pattern. + type: string + enableInternetAccess: + description: – Indicates whether internet access is enabled + for your WorkSpaces. + type: boolean + enableMaintenanceMode: + description: – Indicates whether maintenance mode is enabled + for your WorkSpaces. For more information, see WorkSpace + Maintenance.. + type: boolean + userEnabledAsLocalAdministrator: + description: – Indicates whether users are local administrators + of their WorkSpaces. + type: boolean + type: object + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + directoryId: + description: The directory identifier for registration in WorkSpaces + service. + type: string + directoryIdRef: + description: Reference to a Directory in ds to populate directoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + directoryIdSelector: + description: Selector for a Directory in ds to populate directoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + ipGroupIds: + description: The identifiers of the IP access control groups associated + with the directory. + items: + type: string + type: array + x-kubernetes-list-type: set + selfServicePermissions: + description: service capabilities. Defined below. + properties: + changeComputeType: + description: – Whether WorkSpaces directory users can change + the compute type (bundle) for their workspace. Default false. + type: boolean + increaseVolumeSize: + description: – Whether WorkSpaces directory users can increase + the volume size of the drives on their workspace. Default + false. + type: boolean + rebuildWorkspace: + description: – Whether WorkSpaces directory users can rebuild + the operating system of a workspace to its original state. + Default false. + type: boolean + restartWorkspace: + description: – Whether WorkSpaces directory users can restart + their workspace. Default true. + type: boolean + switchRunningMode: + description: – Whether WorkSpaces directory users can switch + the running mode of their workspace. Default false. + type: boolean + type: object + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetIds: + description: The identifiers of the subnets where the directory + resides. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + workspaceAccessProperties: + description: – Specifies which devices and operating systems + users can use to access their WorkSpaces. Defined below. + properties: + deviceTypeAndroid: + description: – Indicates whether users can use Android devices + to access their WorkSpaces. + type: string + deviceTypeChromeos: + description: – Indicates whether users can use Chromebooks + to access their WorkSpaces. + type: string + deviceTypeIos: + description: – Indicates whether users can use iOS devices + to access their WorkSpaces. + type: string + deviceTypeLinux: + description: – Indicates whether users can use Linux clients + to access their WorkSpaces. + type: string + deviceTypeOsx: + description: – Indicates whether users can use macOS clients + to access their WorkSpaces. + type: string + deviceTypeWeb: + description: – Indicates whether users can access their WorkSpaces + through a web browser. + type: string + deviceTypeWindows: + description: – Indicates whether users can use Windows clients + to access their WorkSpaces. + type: string + deviceTypeZeroclient: + description: – Indicates whether users can use zero client + devices to access their WorkSpaces. + type: string + type: object + workspaceCreationProperties: + description: – Default properties that are used for creating + WorkSpaces. Defined below. + properties: + customSecurityGroupId: + description: – The identifier of your custom security group. + Should relate to the same VPC, where workspaces reside in. + type: string + customSecurityGroupIdRef: + description: Reference to a SecurityGroup in ec2 to populate + customSecurityGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + customSecurityGroupIdSelector: + description: Selector for a SecurityGroup in ec2 to populate + customSecurityGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + defaultOu: + description: – The default organizational unit (OU) for your + WorkSpace directories. Should conform "OU=,DC=,...,DC=" + pattern. + type: string + enableInternetAccess: + description: – Indicates whether internet access is enabled + for your WorkSpaces. + type: boolean + enableMaintenanceMode: + description: – Indicates whether maintenance mode is enabled + for your WorkSpaces. For more information, see WorkSpace + Maintenance.. + type: boolean + userEnabledAsLocalAdministrator: + description: – Indicates whether users are local administrators + of their WorkSpaces. + type: boolean + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DirectoryStatus defines the observed state of Directory. + properties: + atProvider: + properties: + alias: + description: The directory alias. + type: string + customerUserName: + description: The user name for the service account. + type: string + directoryId: + description: The directory identifier for registration in WorkSpaces + service. + type: string + directoryName: + description: The name of the directory. + type: string + directoryType: + description: The directory type. + type: string + dnsIpAddresses: + description: The IP addresses of the DNS servers for the directory. + items: + type: string + type: array + x-kubernetes-list-type: set + iamRoleId: + description: The identifier of the IAM role. This is the role + that allows Amazon WorkSpaces to make calls to other services, + such as Amazon EC2, on your behalf. + type: string + id: + description: The WorkSpaces directory identifier. + type: string + ipGroupIds: + description: The identifiers of the IP access control groups associated + with the directory. + items: + type: string + type: array + x-kubernetes-list-type: set + registrationCode: + description: The registration code for the directory. This is + the code that users enter in their Amazon WorkSpaces client + application to connect to the directory. + type: string + selfServicePermissions: + description: service capabilities. Defined below. + properties: + changeComputeType: + description: – Whether WorkSpaces directory users can change + the compute type (bundle) for their workspace. Default false. + type: boolean + increaseVolumeSize: + description: – Whether WorkSpaces directory users can increase + the volume size of the drives on their workspace. Default + false. + type: boolean + rebuildWorkspace: + description: – Whether WorkSpaces directory users can rebuild + the operating system of a workspace to its original state. + Default false. + type: boolean + restartWorkspace: + description: – Whether WorkSpaces directory users can restart + their workspace. Default true. + type: boolean + switchRunningMode: + description: – Whether WorkSpaces directory users can switch + the running mode of their workspace. Default false. + type: boolean + type: object + subnetIds: + description: The identifiers of the subnets where the directory + resides. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + workspaceAccessProperties: + description: – Specifies which devices and operating systems + users can use to access their WorkSpaces. Defined below. + properties: + deviceTypeAndroid: + description: – Indicates whether users can use Android devices + to access their WorkSpaces. + type: string + deviceTypeChromeos: + description: – Indicates whether users can use Chromebooks + to access their WorkSpaces. + type: string + deviceTypeIos: + description: – Indicates whether users can use iOS devices + to access their WorkSpaces. + type: string + deviceTypeLinux: + description: – Indicates whether users can use Linux clients + to access their WorkSpaces. + type: string + deviceTypeOsx: + description: – Indicates whether users can use macOS clients + to access their WorkSpaces. + type: string + deviceTypeWeb: + description: – Indicates whether users can access their WorkSpaces + through a web browser. + type: string + deviceTypeWindows: + description: – Indicates whether users can use Windows clients + to access their WorkSpaces. + type: string + deviceTypeZeroclient: + description: – Indicates whether users can use zero client + devices to access their WorkSpaces. + type: string + type: object + workspaceCreationProperties: + description: – Default properties that are used for creating + WorkSpaces. Defined below. + properties: + customSecurityGroupId: + description: – The identifier of your custom security group. + Should relate to the same VPC, where workspaces reside in. + type: string + defaultOu: + description: – The default organizational unit (OU) for your + WorkSpace directories. Should conform "OU=,DC=,...,DC=" + pattern. + type: string + enableInternetAccess: + description: – Indicates whether internet access is enabled + for your WorkSpaces. + type: boolean + enableMaintenanceMode: + description: – Indicates whether maintenance mode is enabled + for your WorkSpaces. For more information, see WorkSpace + Maintenance.. + type: boolean + userEnabledAsLocalAdministrator: + description: – Indicates whether users are local administrators + of their WorkSpaces. + type: boolean + type: object + workspaceSecurityGroupId: + description: The identifier of the security group that is assigned + to new WorkSpaces. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/xray.aws.upbound.io_groups.yaml b/package/crds/xray.aws.upbound.io_groups.yaml index 4dd1d586f9..b21306928c 100644 --- a/package/crds/xray.aws.upbound.io_groups.yaml +++ b/package/crds/xray.aws.upbound.io_groups.yaml @@ -429,3 +429,405 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Group is the Schema for the Groups API. Creates and manages an + AWS XRay Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GroupSpec defines the desired state of Group + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + filterExpression: + description: The filter expression defining criteria by which + to group traces. more info can be found in official docs. + type: string + groupName: + description: The name of the group. + type: string + insightsConfiguration: + description: Configuration options for enabling insights. + properties: + insightsEnabled: + description: Specifies whether insights are enabled. + type: boolean + notificationsEnabled: + description: Specifies whether insight notifications are enabled. + type: boolean + type: object + region: + description: Region is the region you'd like your resource to + be created in. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + filterExpression: + description: The filter expression defining criteria by which + to group traces. more info can be found in official docs. + type: string + groupName: + description: The name of the group. + type: string + insightsConfiguration: + description: Configuration options for enabling insights. + properties: + insightsEnabled: + description: Specifies whether insights are enabled. + type: boolean + notificationsEnabled: + description: Specifies whether insight notifications are enabled. + type: boolean + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.filterExpression is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.filterExpression) + || (has(self.initProvider) && has(self.initProvider.filterExpression))' + - message: spec.forProvider.groupName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.groupName) + || (has(self.initProvider) && has(self.initProvider.groupName))' + status: + description: GroupStatus defines the observed state of Group. + properties: + atProvider: + properties: + arn: + description: The ARN of the Group. + type: string + filterExpression: + description: The filter expression defining criteria by which + to group traces. more info can be found in official docs. + type: string + groupName: + description: The name of the group. + type: string + id: + description: The ARN of the Group. + type: string + insightsConfiguration: + description: Configuration options for enabling insights. + properties: + insightsEnabled: + description: Specifies whether insights are enabled. + type: boolean + notificationsEnabled: + description: Specifies whether insight notifications are enabled. + type: boolean + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} From 2a77b49a8927c6991ec359deb0a83dafc2d8b7a2 Mon Sep 17 00:00:00 2001 From: Alper Rifat Ulucinar Date: Wed, 29 May 2024 18:47:02 +0300 Subject: [PATCH 05/10] Add eks/v1beta2.ExternalNameIfClusterActive value extractor Signed-off-by: Alper Rifat Ulucinar --- apis/eks/v1beta1/reference.go | 4 ++++ apis/eks/v1beta2/reference.go | 26 ++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) create mode 100644 apis/eks/v1beta2/reference.go diff --git a/apis/eks/v1beta1/reference.go b/apis/eks/v1beta1/reference.go index 97105fe388..31f8e5591c 100644 --- a/apis/eks/v1beta1/reference.go +++ b/apis/eks/v1beta1/reference.go @@ -1,3 +1,7 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: CC0-1.0 + package v1beta1 import ( diff --git a/apis/eks/v1beta2/reference.go b/apis/eks/v1beta2/reference.go new file mode 100644 index 0000000000..f0e388cb29 --- /dev/null +++ b/apis/eks/v1beta2/reference.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: CC0-1.0 + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/pkg/reference" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "k8s.io/utils/ptr" +) + +// ExternalNameIfClusterActive returns the external name only if the EKS cluster +// is in ACTIVE state. +func ExternalNameIfClusterActive() reference.ExtractValueFn { + return func(mr xpresource.Managed) string { + cl, ok := mr.(*Cluster) + if !ok { + return "" + } + if ptr.Deref(cl.Status.AtProvider.Status, "") != "ACTIVE" { + return "" + } + return reference.ExternalName()(mr) + } +} From 76312d36b6637b02534fc1ca45786ff6ef806c25 Mon Sep 17 00:00:00 2001 From: Alper Rifat Ulucinar Date: Wed, 29 May 2024 22:53:25 +0300 Subject: [PATCH 06/10] Remove SecurityConfig.opensearchserverless resource's singleton list conversion for the samlOptions field as its schema is already a single nested block. - saml_options is thus already generated as an embedded object and not a singleton list. Signed-off-by: Alper Rifat Ulucinar --- .../v1beta1/zz_generated.conversion_hubs.go | 3 + .../v1beta2/zz_generated.conversion_hubs.go | 10 - .../v1beta2/zz_generated.deepcopy.go | 312 -------------- .../v1beta2/zz_generated.managed.go | 68 --- .../v1beta2/zz_generated.managedlist.go | 17 - .../v1beta2/zz_groupversion_info.go | 32 -- .../v1beta2/zz_securityconfig_terraformed.go | 129 ------ .../v1beta2/zz_securityconfig_types.go | 170 -------- apis/zz_register.go | 2 - config/opensearchserverless/config.go | 7 + go.mod | 2 + go.sum | 4 +- ...erless.aws.upbound.io_securityconfigs.yaml | 389 ------------------ 13 files changed, 14 insertions(+), 1131 deletions(-) delete mode 100755 apis/opensearchserverless/v1beta2/zz_generated.conversion_hubs.go delete mode 100644 apis/opensearchserverless/v1beta2/zz_generated.deepcopy.go delete mode 100644 apis/opensearchserverless/v1beta2/zz_generated.managed.go delete mode 100644 apis/opensearchserverless/v1beta2/zz_generated.managedlist.go delete mode 100755 apis/opensearchserverless/v1beta2/zz_groupversion_info.go delete mode 100755 apis/opensearchserverless/v1beta2/zz_securityconfig_terraformed.go delete mode 100755 apis/opensearchserverless/v1beta2/zz_securityconfig_types.go diff --git a/apis/opensearchserverless/v1beta1/zz_generated.conversion_hubs.go b/apis/opensearchserverless/v1beta1/zz_generated.conversion_hubs.go index c32f5352fd..89e575faa8 100755 --- a/apis/opensearchserverless/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/opensearchserverless/v1beta1/zz_generated.conversion_hubs.go @@ -15,6 +15,9 @@ func (tr *Collection) Hub() {} // Hub marks this type as a conversion hub. func (tr *LifecyclePolicy) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *SecurityConfig) Hub() {} + // Hub marks this type as a conversion hub. func (tr *SecurityPolicy) Hub() {} diff --git a/apis/opensearchserverless/v1beta2/zz_generated.conversion_hubs.go b/apis/opensearchserverless/v1beta2/zz_generated.conversion_hubs.go deleted file mode 100755 index 607288f4ed..0000000000 --- a/apis/opensearchserverless/v1beta2/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta2 - -// Hub marks this type as a conversion hub. -func (tr *SecurityConfig) Hub() {} diff --git a/apis/opensearchserverless/v1beta2/zz_generated.deepcopy.go b/apis/opensearchserverless/v1beta2/zz_generated.deepcopy.go deleted file mode 100644 index 89a77e6677..0000000000 --- a/apis/opensearchserverless/v1beta2/zz_generated.deepcopy.go +++ /dev/null @@ -1,312 +0,0 @@ -//go:build !ignore_autogenerated - -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by controller-gen. DO NOT EDIT. - -package v1beta2 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SAMLOptionsInitParameters) DeepCopyInto(out *SAMLOptionsInitParameters) { - *out = *in - if in.GroupAttribute != nil { - in, out := &in.GroupAttribute, &out.GroupAttribute - *out = new(string) - **out = **in - } - if in.Metadata != nil { - in, out := &in.Metadata, &out.Metadata - *out = new(string) - **out = **in - } - if in.SessionTimeout != nil { - in, out := &in.SessionTimeout, &out.SessionTimeout - *out = new(float64) - **out = **in - } - if in.UserAttribute != nil { - in, out := &in.UserAttribute, &out.UserAttribute - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLOptionsInitParameters. -func (in *SAMLOptionsInitParameters) DeepCopy() *SAMLOptionsInitParameters { - if in == nil { - return nil - } - out := new(SAMLOptionsInitParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SAMLOptionsObservation) DeepCopyInto(out *SAMLOptionsObservation) { - *out = *in - if in.GroupAttribute != nil { - in, out := &in.GroupAttribute, &out.GroupAttribute - *out = new(string) - **out = **in - } - if in.Metadata != nil { - in, out := &in.Metadata, &out.Metadata - *out = new(string) - **out = **in - } - if in.SessionTimeout != nil { - in, out := &in.SessionTimeout, &out.SessionTimeout - *out = new(float64) - **out = **in - } - if in.UserAttribute != nil { - in, out := &in.UserAttribute, &out.UserAttribute - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLOptionsObservation. -func (in *SAMLOptionsObservation) DeepCopy() *SAMLOptionsObservation { - if in == nil { - return nil - } - out := new(SAMLOptionsObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SAMLOptionsParameters) DeepCopyInto(out *SAMLOptionsParameters) { - *out = *in - if in.GroupAttribute != nil { - in, out := &in.GroupAttribute, &out.GroupAttribute - *out = new(string) - **out = **in - } - if in.Metadata != nil { - in, out := &in.Metadata, &out.Metadata - *out = new(string) - **out = **in - } - if in.SessionTimeout != nil { - in, out := &in.SessionTimeout, &out.SessionTimeout - *out = new(float64) - **out = **in - } - if in.UserAttribute != nil { - in, out := &in.UserAttribute, &out.UserAttribute - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAMLOptionsParameters. -func (in *SAMLOptionsParameters) DeepCopy() *SAMLOptionsParameters { - if in == nil { - return nil - } - out := new(SAMLOptionsParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecurityConfig) DeepCopyInto(out *SecurityConfig) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfig. -func (in *SecurityConfig) DeepCopy() *SecurityConfig { - if in == nil { - return nil - } - out := new(SecurityConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SecurityConfig) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecurityConfigInitParameters) DeepCopyInto(out *SecurityConfigInitParameters) { - *out = *in - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(string) - **out = **in - } - if in.SAMLOptions != nil { - in, out := &in.SAMLOptions, &out.SAMLOptions - *out = new(SAMLOptionsInitParameters) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigInitParameters. -func (in *SecurityConfigInitParameters) DeepCopy() *SecurityConfigInitParameters { - if in == nil { - return nil - } - out := new(SecurityConfigInitParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecurityConfigList) DeepCopyInto(out *SecurityConfigList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]SecurityConfig, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigList. -func (in *SecurityConfigList) DeepCopy() *SecurityConfigList { - if in == nil { - return nil - } - out := new(SecurityConfigList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SecurityConfigList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecurityConfigObservation) DeepCopyInto(out *SecurityConfigObservation) { - *out = *in - if in.ConfigVersion != nil { - in, out := &in.ConfigVersion, &out.ConfigVersion - *out = new(string) - **out = **in - } - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(string) - **out = **in - } - if in.ID != nil { - in, out := &in.ID, &out.ID - *out = new(string) - **out = **in - } - if in.SAMLOptions != nil { - in, out := &in.SAMLOptions, &out.SAMLOptions - *out = new(SAMLOptionsObservation) - (*in).DeepCopyInto(*out) - } - if in.Type != nil { - in, out := &in.Type, &out.Type - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigObservation. -func (in *SecurityConfigObservation) DeepCopy() *SecurityConfigObservation { - if in == nil { - return nil - } - out := new(SecurityConfigObservation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecurityConfigParameters) DeepCopyInto(out *SecurityConfigParameters) { - *out = *in - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(string) - **out = **in - } - if in.Region != nil { - in, out := &in.Region, &out.Region - *out = new(string) - **out = **in - } - if in.SAMLOptions != nil { - in, out := &in.SAMLOptions, &out.SAMLOptions - *out = new(SAMLOptionsParameters) - (*in).DeepCopyInto(*out) - } - if in.Type != nil { - in, out := &in.Type, &out.Type - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigParameters. -func (in *SecurityConfigParameters) DeepCopy() *SecurityConfigParameters { - if in == nil { - return nil - } - out := new(SecurityConfigParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecurityConfigSpec) DeepCopyInto(out *SecurityConfigSpec) { - *out = *in - in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) - in.ForProvider.DeepCopyInto(&out.ForProvider) - in.InitProvider.DeepCopyInto(&out.InitProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigSpec. -func (in *SecurityConfigSpec) DeepCopy() *SecurityConfigSpec { - if in == nil { - return nil - } - out := new(SecurityConfigSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecurityConfigStatus) DeepCopyInto(out *SecurityConfigStatus) { - *out = *in - in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) - in.AtProvider.DeepCopyInto(&out.AtProvider) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigStatus. -func (in *SecurityConfigStatus) DeepCopy() *SecurityConfigStatus { - if in == nil { - return nil - } - out := new(SecurityConfigStatus) - in.DeepCopyInto(out) - return out -} diff --git a/apis/opensearchserverless/v1beta2/zz_generated.managed.go b/apis/opensearchserverless/v1beta2/zz_generated.managed.go deleted file mode 100644 index 8fb23e1f30..0000000000 --- a/apis/opensearchserverless/v1beta2/zz_generated.managed.go +++ /dev/null @@ -1,68 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 -// Code generated by angryjet. DO NOT EDIT. - -package v1beta2 - -import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - -// GetCondition of this SecurityConfig. -func (mg *SecurityConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return mg.Status.GetCondition(ct) -} - -// GetDeletionPolicy of this SecurityConfig. -func (mg *SecurityConfig) GetDeletionPolicy() xpv1.DeletionPolicy { - return mg.Spec.DeletionPolicy -} - -// GetManagementPolicies of this SecurityConfig. -func (mg *SecurityConfig) GetManagementPolicies() xpv1.ManagementPolicies { - return mg.Spec.ManagementPolicies -} - -// GetProviderConfigReference of this SecurityConfig. -func (mg *SecurityConfig) GetProviderConfigReference() *xpv1.Reference { - return mg.Spec.ProviderConfigReference -} - -// GetPublishConnectionDetailsTo of this SecurityConfig. -func (mg *SecurityConfig) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { - return mg.Spec.PublishConnectionDetailsTo -} - -// GetWriteConnectionSecretToReference of this SecurityConfig. -func (mg *SecurityConfig) GetWriteConnectionSecretToReference() *xpv1.SecretReference { - return mg.Spec.WriteConnectionSecretToReference -} - -// SetConditions of this SecurityConfig. -func (mg *SecurityConfig) SetConditions(c ...xpv1.Condition) { - mg.Status.SetConditions(c...) -} - -// SetDeletionPolicy of this SecurityConfig. -func (mg *SecurityConfig) SetDeletionPolicy(r xpv1.DeletionPolicy) { - mg.Spec.DeletionPolicy = r -} - -// SetManagementPolicies of this SecurityConfig. -func (mg *SecurityConfig) SetManagementPolicies(r xpv1.ManagementPolicies) { - mg.Spec.ManagementPolicies = r -} - -// SetProviderConfigReference of this SecurityConfig. -func (mg *SecurityConfig) SetProviderConfigReference(r *xpv1.Reference) { - mg.Spec.ProviderConfigReference = r -} - -// SetPublishConnectionDetailsTo of this SecurityConfig. -func (mg *SecurityConfig) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { - mg.Spec.PublishConnectionDetailsTo = r -} - -// SetWriteConnectionSecretToReference of this SecurityConfig. -func (mg *SecurityConfig) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { - mg.Spec.WriteConnectionSecretToReference = r -} diff --git a/apis/opensearchserverless/v1beta2/zz_generated.managedlist.go b/apis/opensearchserverless/v1beta2/zz_generated.managedlist.go deleted file mode 100644 index 086a8fdeab..0000000000 --- a/apis/opensearchserverless/v1beta2/zz_generated.managedlist.go +++ /dev/null @@ -1,17 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 -// Code generated by angryjet. DO NOT EDIT. - -package v1beta2 - -import resource "github.com/crossplane/crossplane-runtime/pkg/resource" - -// GetItems of this SecurityConfigList. -func (l *SecurityConfigList) GetItems() []resource.Managed { - items := make([]resource.Managed, len(l.Items)) - for i := range l.Items { - items[i] = &l.Items[i] - } - return items -} diff --git a/apis/opensearchserverless/v1beta2/zz_groupversion_info.go b/apis/opensearchserverless/v1beta2/zz_groupversion_info.go deleted file mode 100755 index 240888af55..0000000000 --- a/apis/opensearchserverless/v1beta2/zz_groupversion_info.go +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -// +kubebuilder:object:generate=true -// +groupName=opensearchserverless.aws.upbound.io -// +versionName=v1beta2 -package v1beta2 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -// Package type metadata. -const ( - CRDGroup = "opensearchserverless.aws.upbound.io" - CRDVersion = "v1beta2" -) - -var ( - // CRDGroupVersion is the API Group Version used to register the objects - CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme -) diff --git a/apis/opensearchserverless/v1beta2/zz_securityconfig_terraformed.go b/apis/opensearchserverless/v1beta2/zz_securityconfig_terraformed.go deleted file mode 100755 index 2f8bd64d95..0000000000 --- a/apis/opensearchserverless/v1beta2/zz_securityconfig_terraformed.go +++ /dev/null @@ -1,129 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta2 - -import ( - "dario.cat/mergo" - "github.com/pkg/errors" - - "github.com/crossplane/upjet/pkg/resource" - "github.com/crossplane/upjet/pkg/resource/json" -) - -// GetTerraformResourceType returns Terraform resource type for this SecurityConfig -func (mg *SecurityConfig) GetTerraformResourceType() string { - return "aws_opensearchserverless_security_config" -} - -// GetConnectionDetailsMapping for this SecurityConfig -func (tr *SecurityConfig) GetConnectionDetailsMapping() map[string]string { - return nil -} - -// GetObservation of this SecurityConfig -func (tr *SecurityConfig) GetObservation() (map[string]any, error) { - o, err := json.TFParser.Marshal(tr.Status.AtProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(o, &base) -} - -// SetObservation for this SecurityConfig -func (tr *SecurityConfig) SetObservation(obs map[string]any) error { - p, err := json.TFParser.Marshal(obs) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) -} - -// GetID returns ID of underlying Terraform resource of this SecurityConfig -func (tr *SecurityConfig) GetID() string { - if tr.Status.AtProvider.ID == nil { - return "" - } - return *tr.Status.AtProvider.ID -} - -// GetParameters of this SecurityConfig -func (tr *SecurityConfig) GetParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.ForProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) -} - -// SetParameters for this SecurityConfig -func (tr *SecurityConfig) SetParameters(params map[string]any) error { - p, err := json.TFParser.Marshal(params) - if err != nil { - return err - } - return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) -} - -// GetInitParameters of this SecurityConfig -func (tr *SecurityConfig) GetInitParameters() (map[string]any, error) { - p, err := json.TFParser.Marshal(tr.Spec.InitProvider) - if err != nil { - return nil, err - } - base := map[string]any{} - return base, json.TFParser.Unmarshal(p, &base) -} - -// GetInitParameters of this SecurityConfig -func (tr *SecurityConfig) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { - params, err := tr.GetParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) - } - if !shouldMergeInitProvider { - return params, nil - } - - initParams, err := tr.GetInitParameters() - if err != nil { - return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) - } - - // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the - // slices from the initProvider to forProvider. As it also sets - // overwrite to true, we need to set it back to false, we don't - // want to overwrite the forProvider fields with the initProvider - // fields. - err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { - c.Overwrite = false - }) - if err != nil { - return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) - } - - return params, nil -} - -// LateInitialize this SecurityConfig using its observed tfState. -// returns True if there are any spec changes for the resource. -func (tr *SecurityConfig) LateInitialize(attrs []byte) (bool, error) { - params := &SecurityConfigParameters{} - if err := json.TFParser.Unmarshal(attrs, params); err != nil { - return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") - } - opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} - - li := resource.NewGenericLateInitializer(opts...) - return li.LateInitialize(&tr.Spec.ForProvider, params) -} - -// GetTerraformSchemaVersion returns the associated Terraform schema version -func (tr *SecurityConfig) GetTerraformSchemaVersion() int { - return 0 -} diff --git a/apis/opensearchserverless/v1beta2/zz_securityconfig_types.go b/apis/opensearchserverless/v1beta2/zz_securityconfig_types.go deleted file mode 100755 index 30b4d306b9..0000000000 --- a/apis/opensearchserverless/v1beta2/zz_securityconfig_types.go +++ /dev/null @@ -1,170 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta2 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - - v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" -) - -type SAMLOptionsInitParameters struct { - - // Group attribute for this SAML integration. - GroupAttribute *string `json:"groupAttribute,omitempty" tf:"group_attribute,omitempty"` - - // The XML IdP metadata file generated from your identity provider. - Metadata *string `json:"metadata,omitempty" tf:"metadata,omitempty"` - - // Session timeout, in minutes. Minimum is 5 minutes and maximum is 720 minutes (12 hours). Default is 60 minutes. - SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` - - // User attribute for this SAML integration. - UserAttribute *string `json:"userAttribute,omitempty" tf:"user_attribute,omitempty"` -} - -type SAMLOptionsObservation struct { - - // Group attribute for this SAML integration. - GroupAttribute *string `json:"groupAttribute,omitempty" tf:"group_attribute,omitempty"` - - // The XML IdP metadata file generated from your identity provider. - Metadata *string `json:"metadata,omitempty" tf:"metadata,omitempty"` - - // Session timeout, in minutes. Minimum is 5 minutes and maximum is 720 minutes (12 hours). Default is 60 minutes. - SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` - - // User attribute for this SAML integration. - UserAttribute *string `json:"userAttribute,omitempty" tf:"user_attribute,omitempty"` -} - -type SAMLOptionsParameters struct { - - // Group attribute for this SAML integration. - // +kubebuilder:validation:Optional - GroupAttribute *string `json:"groupAttribute,omitempty" tf:"group_attribute,omitempty"` - - // The XML IdP metadata file generated from your identity provider. - // +kubebuilder:validation:Optional - Metadata *string `json:"metadata" tf:"metadata,omitempty"` - - // Session timeout, in minutes. Minimum is 5 minutes and maximum is 720 minutes (12 hours). Default is 60 minutes. - // +kubebuilder:validation:Optional - SessionTimeout *float64 `json:"sessionTimeout,omitempty" tf:"session_timeout,omitempty"` - - // User attribute for this SAML integration. - // +kubebuilder:validation:Optional - UserAttribute *string `json:"userAttribute,omitempty" tf:"user_attribute,omitempty"` -} - -type SecurityConfigInitParameters struct { - - // Description of the security configuration. - Description *string `json:"description,omitempty" tf:"description,omitempty"` - - // Configuration block for SAML options. - SAMLOptions *SAMLOptionsInitParameters `json:"samlOptions,omitempty" tf:"saml_options,omitempty"` -} - -type SecurityConfigObservation struct { - - // Version of the configuration. - ConfigVersion *string `json:"configVersion,omitempty" tf:"config_version,omitempty"` - - // Description of the security configuration. - Description *string `json:"description,omitempty" tf:"description,omitempty"` - - ID *string `json:"id,omitempty" tf:"id,omitempty"` - - // Configuration block for SAML options. - SAMLOptions *SAMLOptionsObservation `json:"samlOptions,omitempty" tf:"saml_options,omitempty"` - - // Type of configuration. Must be saml. - Type *string `json:"type,omitempty" tf:"type,omitempty"` -} - -type SecurityConfigParameters struct { - - // Description of the security configuration. - // +kubebuilder:validation:Optional - Description *string `json:"description,omitempty" tf:"description,omitempty"` - - // Region is the region you'd like your resource to be created in. - // +upjet:crd:field:TFTag=- - // +kubebuilder:validation:Required - Region *string `json:"region" tf:"-"` - - // Configuration block for SAML options. - // +kubebuilder:validation:Optional - SAMLOptions *SAMLOptionsParameters `json:"samlOptions,omitempty" tf:"saml_options,omitempty"` - - // Type of configuration. Must be saml. - // +kubebuilder:validation:Required - Type *string `json:"type" tf:"type,omitempty"` -} - -// SecurityConfigSpec defines the desired state of SecurityConfig -type SecurityConfigSpec struct { - v1.ResourceSpec `json:",inline"` - ForProvider SecurityConfigParameters `json:"forProvider"` - // THIS IS A BETA FIELD. It will be honored - // unless the Management Policies feature flag is disabled. - // InitProvider holds the same fields as ForProvider, with the exception - // of Identifier and other resource reference fields. The fields that are - // in InitProvider are merged into ForProvider when the resource is created. - // The same fields are also added to the terraform ignore_changes hook, to - // avoid updating them after creation. This is useful for fields that are - // required on creation, but we do not desire to update them after creation, - // for example because of an external controller is managing them, like an - // autoscaler. - InitProvider SecurityConfigInitParameters `json:"initProvider,omitempty"` -} - -// SecurityConfigStatus defines the observed state of SecurityConfig. -type SecurityConfigStatus struct { - v1.ResourceStatus `json:",inline"` - AtProvider SecurityConfigObservation `json:"atProvider,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status - -// SecurityConfig is the Schema for the SecurityConfigs API. -// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" -// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} -type SecurityConfig struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.samlOptions) || (has(self.initProvider) && has(self.initProvider.samlOptions))",message="spec.forProvider.samlOptions is a required parameter" - Spec SecurityConfigSpec `json:"spec"` - Status SecurityConfigStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// SecurityConfigList contains a list of SecurityConfigs -type SecurityConfigList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []SecurityConfig `json:"items"` -} - -// Repository type metadata. -var ( - SecurityConfig_Kind = "SecurityConfig" - SecurityConfig_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SecurityConfig_Kind}.String() - SecurityConfig_KindAPIVersion = SecurityConfig_Kind + "." + CRDGroupVersion.String() - SecurityConfig_GroupVersionKind = CRDGroupVersion.WithKind(SecurityConfig_Kind) -) - -func init() { - SchemeBuilder.Register(&SecurityConfig{}, &SecurityConfigList{}) -} diff --git a/apis/zz_register.go b/apis/zz_register.go index 1740f7a0e4..ab91fcd102 100755 --- a/apis/zz_register.go +++ b/apis/zz_register.go @@ -217,7 +217,6 @@ import ( v1beta1opensearch "github.com/upbound/provider-aws/apis/opensearch/v1beta1" v1beta2opensearch "github.com/upbound/provider-aws/apis/opensearch/v1beta2" v1beta1opensearchserverless "github.com/upbound/provider-aws/apis/opensearchserverless/v1beta1" - v1beta2opensearchserverless "github.com/upbound/provider-aws/apis/opensearchserverless/v1beta2" v1beta1opsworks "github.com/upbound/provider-aws/apis/opsworks/v1beta1" v1beta2opsworks "github.com/upbound/provider-aws/apis/opsworks/v1beta2" v1beta1organizations "github.com/upbound/provider-aws/apis/organizations/v1beta1" @@ -510,7 +509,6 @@ func init() { v1beta1opensearch.SchemeBuilder.AddToScheme, v1beta2opensearch.SchemeBuilder.AddToScheme, v1beta1opensearchserverless.SchemeBuilder.AddToScheme, - v1beta2opensearchserverless.SchemeBuilder.AddToScheme, v1beta1opsworks.SchemeBuilder.AddToScheme, v1beta2opsworks.SchemeBuilder.AddToScheme, v1beta1organizations.SchemeBuilder.AddToScheme, diff --git a/config/opensearchserverless/config.go b/config/opensearchserverless/config.go index b13386db66..4b9f8674e0 100644 --- a/config/opensearchserverless/config.go +++ b/config/opensearchserverless/config.go @@ -11,6 +11,13 @@ import ( // Configure adds configurations for the opensearchserverless group. func Configure(p *config.Provider) { p.AddResourceConfigurator("aws_opensearchserverless_security_config", func(r *config.Resource) { + r.RemoveSingletonListConversion("saml_options") + // set the path saml_options as an embedded object to honor + // its single nested block schema. We need to have it converted + // into an embedded object but there's no need for + // the Terraform conversion (it already needs to be treated + // as an object at the Terraform layer and in the current MR API, + // it's already an embedded object). r.SchemaElementOptions.SetEmbeddedObject("saml_options") }) p.AddResourceConfigurator("aws_opensearchserverless_security_policy", func(r *config.Resource) { diff --git a/go.mod b/go.mod index 80d6258f20..82916f681e 100644 --- a/go.mod +++ b/go.mod @@ -363,3 +363,5 @@ require ( replace github.com/hashicorp/terraform-plugin-log => github.com/gdavison/terraform-plugin-log v0.0.0-20230928191232-6c653d8ef8fb replace github.com/hashicorp/terraform-provider-aws => github.com/upbound/terraform-provider-aws v0.0.0-20240523140457-101595b8576e + +replace github.com/crossplane/upjet => github.com/ulucinar/upbound-upjet v0.0.0-20240529194811-fc6948e3c67c diff --git a/go.sum b/go.sum index ce2c0011b2..2d54b03327 100644 --- a/go.sum +++ b/go.sum @@ -422,8 +422,6 @@ github.com/crossplane/crossplane-runtime v1.16.0-rc.2.0.20240510094504-3f697876f github.com/crossplane/crossplane-runtime v1.16.0-rc.2.0.20240510094504-3f697876fa57/go.mod h1:Pz2tdGVMF6KDGzHZOkvKro0nKc8EzK0sb/nSA7pH4Dc= github.com/crossplane/crossplane-tools v0.0.0-20230925130601-628280f8bf79 h1:HigXs5tEQxWz0fcj8hzbU2UAZgEM7wPe0XRFOsrtF8Y= github.com/crossplane/crossplane-tools v0.0.0-20230925130601-628280f8bf79/go.mod h1:+e4OaFlOcmr0JvINHl/yvEYBrZawzTgj6pQumOH1SS0= -github.com/crossplane/upjet v1.4.1 h1:f2HawXz8OGte1jhe3Fa+IIOca+eCsNFE3Fc5rnkF24Y= -github.com/crossplane/upjet v1.4.1/go.mod h1:3pDVtCgyBc5f2Zx4K5HEPxxhjndmOc5CHCJNpIivK/g= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/dave/jennifer v1.4.1 h1:XyqG6cn5RQsTj3qlWQTKlRGAyrTcsk1kUmWdZBzRjDw= @@ -712,6 +710,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmccombs/hcl2json v0.3.3 h1:+DLNYqpWE0CsOQiEZu+OZm5ZBImake3wtITYxQ8uLFQ= github.com/tmccombs/hcl2json v0.3.3/go.mod h1:Y2chtz2x9bAeRTvSibVRVgbLJhLJXKlUeIvjeVdnm4w= +github.com/ulucinar/upbound-upjet v0.0.0-20240529194811-fc6948e3c67c h1:IVb1OMy4jR4WrxZ+mrGtqiKAPpgQvqfykL6VsTXrMl8= +github.com/ulucinar/upbound-upjet v0.0.0-20240529194811-fc6948e3c67c/go.mod h1:3pDVtCgyBc5f2Zx4K5HEPxxhjndmOc5CHCJNpIivK/g= github.com/upbound/terraform-provider-aws v0.0.0-20240523140457-101595b8576e h1:Oq2PA7E9F52eMoc8vAqKMffek7gqymBSiXd5kiitHbE= github.com/upbound/terraform-provider-aws v0.0.0-20240523140457-101595b8576e/go.mod h1:yWeBcsPCtmEKOurig3GUxpnbQnw8AbEsHFVnakEsHUU= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= diff --git a/package/crds/opensearchserverless.aws.upbound.io_securityconfigs.yaml b/package/crds/opensearchserverless.aws.upbound.io_securityconfigs.yaml index eea308057d..f9b8e404a6 100644 --- a/package/crds/opensearchserverless.aws.upbound.io_securityconfigs.yaml +++ b/package/crds/opensearchserverless.aws.upbound.io_securityconfigs.yaml @@ -407,392 +407,3 @@ spec: storage: true subresources: status: {} - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=='Synced')].status - name: SYNCED - type: string - - jsonPath: .status.conditions[?(@.type=='Ready')].status - name: READY - type: string - - jsonPath: .metadata.annotations.crossplane\.io/external-name - name: EXTERNAL-NAME - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1beta2 - schema: - openAPIV3Schema: - description: SecurityConfig is the Schema for the SecurityConfigs API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SecurityConfigSpec defines the desired state of SecurityConfig - properties: - deletionPolicy: - default: Delete - description: |- - DeletionPolicy specifies what will happen to the underlying external - when this managed resource is deleted - either "Delete" or "Orphan" the - external resource. - This field is planned to be deprecated in favor of the ManagementPolicies - field in a future release. Currently, both could be set independently and - non-default values would be honored if the feature flag is enabled. - See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 - enum: - - Orphan - - Delete - type: string - forProvider: - properties: - description: - description: Description of the security configuration. - type: string - region: - description: Region is the region you'd like your resource to - be created in. - type: string - samlOptions: - description: Configuration block for SAML options. - properties: - groupAttribute: - description: Group attribute for this SAML integration. - type: string - metadata: - description: The XML IdP metadata file generated from your - identity provider. - type: string - sessionTimeout: - description: Session timeout, in minutes. Minimum is 5 minutes - and maximum is 720 minutes (12 hours). Default is 60 minutes. - type: number - userAttribute: - description: User attribute for this SAML integration. - type: string - type: object - type: - description: Type of configuration. Must be saml. - type: string - required: - - region - - type - type: object - initProvider: - description: |- - THIS IS A BETA FIELD. It will be honored - unless the Management Policies feature flag is disabled. - InitProvider holds the same fields as ForProvider, with the exception - of Identifier and other resource reference fields. The fields that are - in InitProvider are merged into ForProvider when the resource is created. - The same fields are also added to the terraform ignore_changes hook, to - avoid updating them after creation. This is useful for fields that are - required on creation, but we do not desire to update them after creation, - for example because of an external controller is managing them, like an - autoscaler. - properties: - description: - description: Description of the security configuration. - type: string - samlOptions: - description: Configuration block for SAML options. - properties: - groupAttribute: - description: Group attribute for this SAML integration. - type: string - metadata: - description: The XML IdP metadata file generated from your - identity provider. - type: string - sessionTimeout: - description: Session timeout, in minutes. Minimum is 5 minutes - and maximum is 720 minutes (12 hours). Default is 60 minutes. - type: number - userAttribute: - description: User attribute for this SAML integration. - type: string - type: object - type: object - managementPolicies: - default: - - '*' - description: |- - THIS IS A BETA FIELD. It is on by default but can be opted out - through a Crossplane feature flag. - ManagementPolicies specify the array of actions Crossplane is allowed to - take on the managed and external resources. - This field is planned to replace the DeletionPolicy field in a future - release. Currently, both could be set independently and non-default - values would be honored if the feature flag is enabled. If both are - custom, the DeletionPolicy field will be ignored. - See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 - and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md - items: - description: |- - A ManagementAction represents an action that the Crossplane controllers - can take on an external resource. - enum: - - Observe - - Create - - Update - - Delete - - LateInitialize - - '*' - type: string - type: array - providerConfigRef: - default: - name: default - description: |- - ProviderConfigReference specifies how the provider that will be used to - create, observe, update, and delete this managed resource should be - configured. - properties: - name: - description: Name of the referenced object. - type: string - policy: - description: Policies for referencing. - properties: - resolution: - default: Required - description: |- - Resolution specifies whether resolution of this reference is required. - The default is 'Required', which means the reconcile will fail if the - reference cannot be resolved. 'Optional' means this reference will be - a no-op if it cannot be resolved. - enum: - - Required - - Optional - type: string - resolve: - description: |- - Resolve specifies when this reference should be resolved. The default - is 'IfNotPresent', which will attempt to resolve the reference only when - the corresponding field is not present. Use 'Always' to resolve the - reference on every reconcile. - enum: - - Always - - IfNotPresent - type: string - type: object - required: - - name - type: object - publishConnectionDetailsTo: - description: |- - PublishConnectionDetailsTo specifies the connection secret config which - contains a name, metadata and a reference to secret store config to - which any connection details for this managed resource should be written. - Connection details frequently include the endpoint, username, - and password required to connect to the managed resource. - properties: - configRef: - default: - name: default - description: |- - SecretStoreConfigRef specifies which secret store config should be used - for this ConnectionSecret. - properties: - name: - description: Name of the referenced object. - type: string - policy: - description: Policies for referencing. - properties: - resolution: - default: Required - description: |- - Resolution specifies whether resolution of this reference is required. - The default is 'Required', which means the reconcile will fail if the - reference cannot be resolved. 'Optional' means this reference will be - a no-op if it cannot be resolved. - enum: - - Required - - Optional - type: string - resolve: - description: |- - Resolve specifies when this reference should be resolved. The default - is 'IfNotPresent', which will attempt to resolve the reference only when - the corresponding field is not present. Use 'Always' to resolve the - reference on every reconcile. - enum: - - Always - - IfNotPresent - type: string - type: object - required: - - name - type: object - metadata: - description: Metadata is the metadata for connection secret. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations are the annotations to be added to connection secret. - - For Kubernetes secrets, this will be used as "metadata.annotations". - - It is up to Secret Store implementation for others store types. - type: object - labels: - additionalProperties: - type: string - description: |- - Labels are the labels/tags to be added to connection secret. - - For Kubernetes secrets, this will be used as "metadata.labels". - - It is up to Secret Store implementation for others store types. - type: object - type: - description: |- - Type is the SecretType for the connection secret. - - Only valid for Kubernetes Secret Stores. - type: string - type: object - name: - description: Name is the name of the connection secret. - type: string - required: - - name - type: object - writeConnectionSecretToRef: - description: |- - WriteConnectionSecretToReference specifies the namespace and name of a - Secret to which any connection details for this managed resource should - be written. Connection details frequently include the endpoint, username, - and password required to connect to the managed resource. - This field is planned to be replaced in a future release in favor of - PublishConnectionDetailsTo. Currently, both could be set independently - and connection details would be published to both without affecting - each other. - properties: - name: - description: Name of the secret. - type: string - namespace: - description: Namespace of the secret. - type: string - required: - - name - - namespace - type: object - required: - - forProvider - type: object - x-kubernetes-validations: - - message: spec.forProvider.samlOptions is a required parameter - rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies - || ''Update'' in self.managementPolicies) || has(self.forProvider.samlOptions) - || (has(self.initProvider) && has(self.initProvider.samlOptions))' - status: - description: SecurityConfigStatus defines the observed state of SecurityConfig. - properties: - atProvider: - properties: - configVersion: - description: Version of the configuration. - type: string - description: - description: Description of the security configuration. - type: string - id: - type: string - samlOptions: - description: Configuration block for SAML options. - properties: - groupAttribute: - description: Group attribute for this SAML integration. - type: string - metadata: - description: The XML IdP metadata file generated from your - identity provider. - type: string - sessionTimeout: - description: Session timeout, in minutes. Minimum is 5 minutes - and maximum is 720 minutes (12 hours). Default is 60 minutes. - type: number - userAttribute: - description: User attribute for this SAML integration. - type: string - type: object - type: - description: Type of configuration. Must be saml. - type: string - type: object - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: false - subresources: - status: {} From 266f5bebdcc42aaeed211e22afc26f48997261a0 Mon Sep 17 00:00:00 2001 From: Alper Rifat Ulucinar Date: Thu, 30 May 2024 14:04:48 +0300 Subject: [PATCH 07/10] Run "make generate" Signed-off-by: Alper Rifat Ulucinar --- .../zz_bucketlifecycleconfiguration_types.go | 12 +++--- ...ound.io_bucketlifecycleconfigurations.yaml | 42 +++++++------------ 2 files changed, 21 insertions(+), 33 deletions(-) diff --git a/apis/s3/v1beta2/zz_bucketlifecycleconfiguration_types.go b/apis/s3/v1beta2/zz_bucketlifecycleconfiguration_types.go index 4fdc3e6c13..7b063da66f 100755 --- a/apis/s3/v1beta2/zz_bucketlifecycleconfiguration_types.go +++ b/apis/s3/v1beta2/zz_bucketlifecycleconfiguration_types.go @@ -40,7 +40,7 @@ type AndInitParameters struct { // Maximum object size (in bytes) to which the rule applies. ObjectSizeLessThan *float64 `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` - // DEPRECATED Use filter instead. This has been deprecated by Amazon S3. Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if filter is not specified. + // Prefix identifying one or more objects to which the rule applies. Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` // Key-value map of resource tags. All of these tags must exist in the object's tag set in order for the rule to apply. @@ -56,7 +56,7 @@ type AndObservation struct { // Maximum object size (in bytes) to which the rule applies. ObjectSizeLessThan *float64 `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` - // DEPRECATED Use filter instead. This has been deprecated by Amazon S3. Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if filter is not specified. + // Prefix identifying one or more objects to which the rule applies. Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` // Key-value map of resource tags. All of these tags must exist in the object's tag set in order for the rule to apply. @@ -74,7 +74,7 @@ type AndParameters struct { // +kubebuilder:validation:Optional ObjectSizeLessThan *float64 `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` - // DEPRECATED Use filter instead. This has been deprecated by Amazon S3. Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if filter is not specified. + // Prefix identifying one or more objects to which the rule applies. // +kubebuilder:validation:Optional Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` @@ -300,7 +300,7 @@ type RuleFilterInitParameters struct { // Maximum object size (in bytes) to which the rule applies. ObjectSizeLessThan *string `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` - // DEPRECATED Use filter instead. This has been deprecated by Amazon S3. Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if filter is not specified. + // Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if not specified. Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` // Configuration block for specifying a tag key and value. See below. @@ -318,7 +318,7 @@ type RuleFilterObservation struct { // Maximum object size (in bytes) to which the rule applies. ObjectSizeLessThan *string `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` - // DEPRECATED Use filter instead. This has been deprecated by Amazon S3. Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if filter is not specified. + // Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if not specified. Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` // Configuration block for specifying a tag key and value. See below. @@ -339,7 +339,7 @@ type RuleFilterParameters struct { // +kubebuilder:validation:Optional ObjectSizeLessThan *string `json:"objectSizeLessThan,omitempty" tf:"object_size_less_than,omitempty"` - // DEPRECATED Use filter instead. This has been deprecated by Amazon S3. Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if filter is not specified. + // Prefix identifying one or more objects to which the rule applies. Defaults to an empty string ("") if not specified. // +kubebuilder:validation:Optional Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` diff --git a/package/crds/s3.aws.upbound.io_bucketlifecycleconfigurations.yaml b/package/crds/s3.aws.upbound.io_bucketlifecycleconfigurations.yaml index 2b4a47365d..f5cea54c4c 100644 --- a/package/crds/s3.aws.upbound.io_bucketlifecycleconfigurations.yaml +++ b/package/crds/s3.aws.upbound.io_bucketlifecycleconfigurations.yaml @@ -1278,11 +1278,8 @@ spec: the rule applies. type: number prefix: - description: DEPRECATED Use filter instead. This - has been deprecated by Amazon S3. Prefix identifying - one or more objects to which the rule applies. - Defaults to an empty string ("") if filter is - not specified. + description: Prefix identifying one or more objects + to which the rule applies. type: string tags: additionalProperties: @@ -1302,10 +1299,9 @@ spec: the rule applies. type: string prefix: - description: DEPRECATED Use filter instead. This has - been deprecated by Amazon S3. Prefix identifying one - or more objects to which the rule applies. Defaults - to an empty string ("") if filter is not specified. + description: Prefix identifying one or more objects + to which the rule applies. Defaults to an empty string + ("") if not specified. type: string tag: description: Configuration block for specifying a tag @@ -1556,11 +1552,8 @@ spec: the rule applies. type: number prefix: - description: DEPRECATED Use filter instead. This - has been deprecated by Amazon S3. Prefix identifying - one or more objects to which the rule applies. - Defaults to an empty string ("") if filter is - not specified. + description: Prefix identifying one or more objects + to which the rule applies. type: string tags: additionalProperties: @@ -1580,10 +1573,9 @@ spec: the rule applies. type: string prefix: - description: DEPRECATED Use filter instead. This has - been deprecated by Amazon S3. Prefix identifying one - or more objects to which the rule applies. Defaults - to an empty string ("") if filter is not specified. + description: Prefix identifying one or more objects + to which the rule applies. Defaults to an empty string + ("") if not specified. type: string tag: description: Configuration block for specifying a tag @@ -1926,11 +1918,8 @@ spec: the rule applies. type: number prefix: - description: DEPRECATED Use filter instead. This - has been deprecated by Amazon S3. Prefix identifying - one or more objects to which the rule applies. - Defaults to an empty string ("") if filter is - not specified. + description: Prefix identifying one or more objects + to which the rule applies. type: string tags: additionalProperties: @@ -1950,10 +1939,9 @@ spec: the rule applies. type: string prefix: - description: DEPRECATED Use filter instead. This has - been deprecated by Amazon S3. Prefix identifying one - or more objects to which the rule applies. Defaults - to an empty string ("") if filter is not specified. + description: Prefix identifying one or more objects + to which the rule applies. Defaults to an empty string + ("") if not specified. type: string tag: description: Configuration block for specifying a tag From b4886b8a37008e16a9583d8f834dea105280e607 Mon Sep 17 00:00:00 2001 From: Alper Rifat Ulucinar Date: Thu, 30 May 2024 21:51:21 +0300 Subject: [PATCH 08/10] Sync the MaxItems constraints for singleton lists from the Go schema to the JSON schema. Signed-off-by: Alper Rifat Ulucinar --- config/registry.go | 36 +++++++++++++++++++++--------------- go.mod | 4 ++-- go.sum | 2 -- 3 files changed, 23 insertions(+), 19 deletions(-) diff --git a/config/registry.go b/config/registry.go index 8be9150ad7..656e130547 100644 --- a/config/registry.go +++ b/config/registry.go @@ -6,19 +6,18 @@ package config import ( "context" + // Note(ezgidemirel): we are importing this to embed provider schema document + _ "embed" "fmt" "regexp" "strconv" - // Note(ezgidemirel): we are importing this to embed provider schema document - _ "embed" - "github.com/crossplane/upjet/pkg/config" "github.com/crossplane/upjet/pkg/config/conversion" "github.com/crossplane/upjet/pkg/registry/reference" + "github.com/crossplane/upjet/pkg/schema/traverser" conversiontfjson "github.com/crossplane/upjet/pkg/types/conversion/tfjson" tfjson "github.com/hashicorp/terraform-json" - fwprovider "github.com/hashicorp/terraform-plugin-framework/provider" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/xpprovider" "github.com/pkg/errors" @@ -94,17 +93,24 @@ func getProviderSchema(s string) (*schema.Provider, error) { // In that case, we will only use the JSON schema for generating // the CRDs. func GetProvider(ctx context.Context, generationProvider bool) (*config.Provider, error) { - var p *schema.Provider - var fwProvider fwprovider.Provider - var err error - if generationProvider { - p, err = getProviderSchema(providerSchema) - fwProvider, _, _ = xpprovider.GetProvider(ctx) - } else { - fwProvider, p, err = xpprovider.GetProvider(ctx) - } + fwProvider, sdkProvider, err := xpprovider.GetProvider(ctx) if err != nil { - return nil, errors.Wrapf(err, "cannot get the Terraform provider schema with generation mode set to %t", generationProvider) + return nil, errors.Wrap(err, "cannot get the Terraform framework and SDK providers") + } + + if generationProvider { + p, err := getProviderSchema(providerSchema) + if err != nil { + return nil, errors.Wrap(err, "cannot read the Terraform SDK provider from the JSON schema for code generation") + } + if err := traverser.TFResourceSchema(sdkProvider.ResourcesMap).TraverseTFSchemas(traverser.NewMaxItemsSync(p.ResourcesMap)); err != nil { + return nil, errors.Wrap(err, "cannot sync the MaxItems constraints between the Go schema and the JSON schema") + } + // use the JSON schema to temporarily prevent float64->int64 + // conversions in the CRD APIs. + // We would like to convert to int64s with the next major release of + // the provider. + sdkProvider = p } modulePath := "github.com/upbound/provider-aws" @@ -119,7 +125,7 @@ func GetProvider(ctx context.Context, generationProvider bool) (*config.Provider config.WithSkipList(skipList), config.WithFeaturesPackage("internal/features"), config.WithMainTemplate(hack.MainTemplate), - config.WithTerraformProvider(p), + config.WithTerraformProvider(sdkProvider), config.WithTerraformPluginFrameworkProvider(fwProvider), config.WithSchemaTraversers(&config.SingletonListEmbedder{}), config.WithDefaultResourceOptions( diff --git a/go.mod b/go.mod index 82916f681e..db031b33cd 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,6 @@ require ( github.com/google/go-cmp v0.6.0 github.com/hashicorp/awspolicyequivalence v1.6.0 github.com/hashicorp/terraform-json v0.21.0 - github.com/hashicorp/terraform-plugin-framework v1.8.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0 github.com/hashicorp/terraform-provider-aws v0.0.0-00010101000000-000000000000 github.com/json-iterator/go v1.1.12 @@ -274,6 +273,7 @@ require ( github.com/hashicorp/hcl/v2 v2.20.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.20.0 // indirect + github.com/hashicorp/terraform-plugin-framework v1.8.0 // indirect github.com/hashicorp/terraform-plugin-framework-jsontypes v0.1.0 // indirect github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 // indirect github.com/hashicorp/terraform-plugin-framework-timetypes v0.3.0 // indirect @@ -364,4 +364,4 @@ replace github.com/hashicorp/terraform-plugin-log => github.com/gdavison/terrafo replace github.com/hashicorp/terraform-provider-aws => github.com/upbound/terraform-provider-aws v0.0.0-20240523140457-101595b8576e -replace github.com/crossplane/upjet => github.com/ulucinar/upbound-upjet v0.0.0-20240529194811-fc6948e3c67c +replace github.com/crossplane/upjet => ../upjet diff --git a/go.sum b/go.sum index 2d54b03327..455bd07f22 100644 --- a/go.sum +++ b/go.sum @@ -710,8 +710,6 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmccombs/hcl2json v0.3.3 h1:+DLNYqpWE0CsOQiEZu+OZm5ZBImake3wtITYxQ8uLFQ= github.com/tmccombs/hcl2json v0.3.3/go.mod h1:Y2chtz2x9bAeRTvSibVRVgbLJhLJXKlUeIvjeVdnm4w= -github.com/ulucinar/upbound-upjet v0.0.0-20240529194811-fc6948e3c67c h1:IVb1OMy4jR4WrxZ+mrGtqiKAPpgQvqfykL6VsTXrMl8= -github.com/ulucinar/upbound-upjet v0.0.0-20240529194811-fc6948e3c67c/go.mod h1:3pDVtCgyBc5f2Zx4K5HEPxxhjndmOc5CHCJNpIivK/g= github.com/upbound/terraform-provider-aws v0.0.0-20240523140457-101595b8576e h1:Oq2PA7E9F52eMoc8vAqKMffek7gqymBSiXd5kiitHbE= github.com/upbound/terraform-provider-aws v0.0.0-20240523140457-101595b8576e/go.mod h1:yWeBcsPCtmEKOurig3GUxpnbQnw8AbEsHFVnakEsHUU= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= From e703dc19d69f13ecc440b5956c24245d11ac8a83 Mon Sep 17 00:00:00 2001 From: Alper Rifat Ulucinar Date: Fri, 31 May 2024 04:08:59 +0300 Subject: [PATCH 09/10] Delete the generated CRD API hub & spoke files during generation Signed-off-by: Alper Rifat Ulucinar --- .../v1beta1/zz_generated.conversion_hubs.go | 10 ------ .../v1beta1/zz_generated.conversion_hubs.go | 10 ------ .../v1beta1/zz_generated.conversion_hubs.go | 28 --------------- .../v1beta1/zz_generated.conversion_hubs.go | 10 ------ .../v1beta1/zz_generated.conversion_hubs.go | 13 ------- .../v1beta1/zz_generated.conversion_hubs.go | 13 ------- .../v1beta1/zz_generated.conversion_hubs.go | 16 --------- .../v1beta1/zz_generated.conversion_hubs.go | 13 ------- .../v1beta1/zz_generated.conversion_hubs.go | 10 ------ .../v1beta1/zz_generated.conversion_hubs.go | 13 ------- .../v1beta1/zz_generated.conversion_hubs.go | 10 ------ .../v1beta1/zz_generated.conversion_hubs.go | 10 ------ .../v1beta1/zz_generated.conversion_hubs.go | 10 ------ apis/generate.go | 1 + .../v1beta1/zz_generated.conversion_hubs.go | 22 ------------ .../v1beta1/zz_generated.conversion_hubs.go | 10 ------ .../v1beta1/zz_generated.conversion_hubs.go | 10 ------ .../v1beta1/zz_generated.conversion_hubs.go | 19 ----------- .../v1beta1/zz_generated.conversion_spokes.go | 34 ------------------- .../v1beta1/zz_generated.conversion_hubs.go | 10 ------ 20 files changed, 1 insertion(+), 271 deletions(-) delete mode 100755 apis/appflow/v1beta1/zz_generated.conversion_hubs.go delete mode 100755 apis/appintegrations/v1beta1/zz_generated.conversion_hubs.go delete mode 100755 apis/appmesh/v1beta1/zz_generated.conversion_hubs.go delete mode 100755 apis/autoscalingplans/v1beta1/zz_generated.conversion_hubs.go delete mode 100755 apis/batch/v1beta1/zz_generated.conversion_hubs.go delete mode 100755 apis/budgets/v1beta1/zz_generated.conversion_hubs.go delete mode 100755 apis/codepipeline/v1beta1/zz_generated.conversion_hubs.go delete mode 100755 apis/datasync/v1beta1/zz_generated.conversion_hubs.go delete mode 100755 apis/dlm/v1beta1/zz_generated.conversion_hubs.go delete mode 100755 apis/elastictranscoder/v1beta1/zz_generated.conversion_hubs.go delete mode 100755 apis/emrserverless/v1beta1/zz_generated.conversion_hubs.go delete mode 100755 apis/firehose/v1beta1/zz_generated.conversion_hubs.go delete mode 100755 apis/fis/v1beta1/zz_generated.conversion_hubs.go delete mode 100755 apis/kendra/v1beta1/zz_generated.conversion_hubs.go delete mode 100755 apis/kinesisanalytics/v1beta1/zz_generated.conversion_hubs.go delete mode 100755 apis/mediaconvert/v1beta1/zz_generated.conversion_hubs.go delete mode 100755 apis/networkfirewall/v1beta1/zz_generated.conversion_hubs.go delete mode 100755 apis/opensearchserverless/v1beta1/zz_generated.conversion_spokes.go delete mode 100755 apis/resourcegroups/v1beta1/zz_generated.conversion_hubs.go diff --git a/apis/appflow/v1beta1/zz_generated.conversion_hubs.go b/apis/appflow/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index 65811d4bf3..0000000000 --- a/apis/appflow/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *Flow) Hub() {} diff --git a/apis/appintegrations/v1beta1/zz_generated.conversion_hubs.go b/apis/appintegrations/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index 0f79769b79..0000000000 --- a/apis/appintegrations/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *EventIntegration) Hub() {} diff --git a/apis/appmesh/v1beta1/zz_generated.conversion_hubs.go b/apis/appmesh/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index 165ba4d2bb..0000000000 --- a/apis/appmesh/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,28 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *GatewayRoute) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Mesh) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Route) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *VirtualGateway) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *VirtualNode) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *VirtualRouter) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *VirtualService) Hub() {} diff --git a/apis/autoscalingplans/v1beta1/zz_generated.conversion_hubs.go b/apis/autoscalingplans/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index 145765452a..0000000000 --- a/apis/autoscalingplans/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *ScalingPlan) Hub() {} diff --git a/apis/batch/v1beta1/zz_generated.conversion_hubs.go b/apis/batch/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index a1bf6e20ea..0000000000 --- a/apis/batch/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,13 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *JobDefinition) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SchedulingPolicy) Hub() {} diff --git a/apis/budgets/v1beta1/zz_generated.conversion_hubs.go b/apis/budgets/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index c7c1d28b6e..0000000000 --- a/apis/budgets/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,13 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *Budget) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *BudgetAction) Hub() {} diff --git a/apis/codepipeline/v1beta1/zz_generated.conversion_hubs.go b/apis/codepipeline/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index 3c6e64a922..0000000000 --- a/apis/codepipeline/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,16 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *Codepipeline) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *CustomActionType) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Webhook) Hub() {} diff --git a/apis/datasync/v1beta1/zz_generated.conversion_hubs.go b/apis/datasync/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index 6f29d8dfee..0000000000 --- a/apis/datasync/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,13 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *LocationS3) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Task) Hub() {} diff --git a/apis/dlm/v1beta1/zz_generated.conversion_hubs.go b/apis/dlm/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index ae7d0b1146..0000000000 --- a/apis/dlm/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *LifecyclePolicy) Hub() {} diff --git a/apis/elastictranscoder/v1beta1/zz_generated.conversion_hubs.go b/apis/elastictranscoder/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index 527818071f..0000000000 --- a/apis/elastictranscoder/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,13 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *Pipeline) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Preset) Hub() {} diff --git a/apis/emrserverless/v1beta1/zz_generated.conversion_hubs.go b/apis/emrserverless/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index 4ceeb8659c..0000000000 --- a/apis/emrserverless/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *Application) Hub() {} diff --git a/apis/firehose/v1beta1/zz_generated.conversion_hubs.go b/apis/firehose/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index bc29aa2197..0000000000 --- a/apis/firehose/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *DeliveryStream) Hub() {} diff --git a/apis/fis/v1beta1/zz_generated.conversion_hubs.go b/apis/fis/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index f07f19f2b4..0000000000 --- a/apis/fis/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *ExperimentTemplate) Hub() {} diff --git a/apis/generate.go b/apis/generate.go index c18ca606ea..ba649db96a 100644 --- a/apis/generate.go +++ b/apis/generate.go @@ -12,6 +12,7 @@ //go:generate rm -rf ../package/crds // Remove generated files +//go:generate bash -c "find . \\( -iname 'zz_generated.conversion_hubs.go' -o -iname 'zz_generated.conversion_spokes.go' \\) -delete" //go:generate bash -c "find . -type d -empty -delete" //go:generate bash -c "find ../internal/controller -iname 'zz_*' -delete" //go:generate bash -c "find ../internal/controller -type d -empty -delete" diff --git a/apis/kendra/v1beta1/zz_generated.conversion_hubs.go b/apis/kendra/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index b33ff602ff..0000000000 --- a/apis/kendra/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,22 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *DataSource) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Experience) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Index) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *QuerySuggestionsBlockList) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Thesaurus) Hub() {} diff --git a/apis/kinesisanalytics/v1beta1/zz_generated.conversion_hubs.go b/apis/kinesisanalytics/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index 4ceeb8659c..0000000000 --- a/apis/kinesisanalytics/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *Application) Hub() {} diff --git a/apis/mediaconvert/v1beta1/zz_generated.conversion_hubs.go b/apis/mediaconvert/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index 3ffcd96c98..0000000000 --- a/apis/mediaconvert/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *Queue) Hub() {} diff --git a/apis/networkfirewall/v1beta1/zz_generated.conversion_hubs.go b/apis/networkfirewall/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index 1dbc71f194..0000000000 --- a/apis/networkfirewall/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,19 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *Firewall) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *FirewallPolicy) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LoggingConfiguration) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *RuleGroup) Hub() {} diff --git a/apis/opensearchserverless/v1beta1/zz_generated.conversion_spokes.go b/apis/opensearchserverless/v1beta1/zz_generated.conversion_spokes.go deleted file mode 100755 index 02d2057ba6..0000000000 --- a/apis/opensearchserverless/v1beta1/zz_generated.conversion_spokes.go +++ /dev/null @@ -1,34 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -import ( - ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" - "github.com/crossplane/upjet/pkg/resource" - "github.com/pkg/errors" - "sigs.k8s.io/controller-runtime/pkg/conversion" -) - -// ConvertTo converts this SecurityConfig to the hub type. -func (tr *SecurityConfig) ConvertTo(dstRaw conversion.Hub) error { - spokeVersion := tr.GetObjectKind().GroupVersionKind().Version - hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version - if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { - return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) - } - return nil -} - -// ConvertFrom converts from the hub type to the SecurityConfig type. -func (tr *SecurityConfig) ConvertFrom(srcRaw conversion.Hub) error { - spokeVersion := tr.GetObjectKind().GroupVersionKind().Version - hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version - if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { - return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) - } - return nil -} diff --git a/apis/resourcegroups/v1beta1/zz_generated.conversion_hubs.go b/apis/resourcegroups/v1beta1/zz_generated.conversion_hubs.go deleted file mode 100755 index 21a8da1f4a..0000000000 --- a/apis/resourcegroups/v1beta1/zz_generated.conversion_hubs.go +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-FileCopyrightText: 2024 The Crossplane Authors -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by upjet. DO NOT EDIT. - -package v1beta1 - -// Hub marks this type as a conversion hub. -func (tr *Group) Hub() {} From d349a4d089f713d1cefadcc01d7660d817c10afe Mon Sep 17 00:00:00 2001 From: Alper Rifat Ulucinar Date: Fri, 31 May 2024 10:30:34 +0300 Subject: [PATCH 10/10] Bump upjet to commit 37c7f4e91d57 Signed-off-by: Alper Rifat Ulucinar --- apis/acm/v1beta1/zz_generated.resolvers.go | 52 +- apis/acm/v1beta2/zz_certificate_types.go | 18 + apis/acm/v1beta2/zz_generated.deepcopy.go | 20 + apis/acm/v1beta2/zz_generated.resolvers.go | 67 ++ apis/opensearch/v1beta2/zz_domain_types.go | 64 ++ .../v1beta2/zz_generated.deepcopy.go | 68 ++ .../v1beta2/zz_generated.resolvers.go | 128 +++ .../v1beta1/zz_endpointaccess_types.go | 4 +- .../v1beta1/zz_generated.conversion_hubs.go | 3 + .../v1beta1/zz_generated.resolvers.go | 4 +- .../v1beta1/zz_generated.conversion_spokes.go | 20 + .../v1beta2/zz_connector_terraformed.go | 129 +++ apis/transfer/v1beta2/zz_connector_types.go | 334 +++++++ .../v1beta2/zz_generated.conversion_hubs.go | 3 + .../transfer/v1beta2/zz_generated.deepcopy.go | 611 ++++++++++++ apis/transfer/v1beta2/zz_generated.managed.go | 60 ++ .../v1beta2/zz_generated.managedlist.go | 9 + .../v1beta2/zz_generated.resolvers.go | 98 +- config/registry.go | 2 +- .../transfer/v1beta2/connector.yaml | 24 + go.mod | 6 +- go.sum | 6 + .../crds/acm.aws.upbound.io_certificates.yaml | 152 +++ .../opensearch.aws.upbound.io_domains.yaml | 462 ++++++++++ .../transfer.aws.upbound.io_connectors.yaml | 866 ++++++++++++++++++ 25 files changed, 3198 insertions(+), 12 deletions(-) create mode 100644 apis/acm/v1beta2/zz_generated.resolvers.go create mode 100755 apis/transfer/v1beta2/zz_connector_terraformed.go create mode 100755 apis/transfer/v1beta2/zz_connector_types.go create mode 100644 examples-generated/transfer/v1beta2/connector.yaml diff --git a/apis/acm/v1beta1/zz_generated.resolvers.go b/apis/acm/v1beta1/zz_generated.resolvers.go index 37fe17bd6b..227cd509f0 100644 --- a/apis/acm/v1beta1/zz_generated.resolvers.go +++ b/apis/acm/v1beta1/zz_generated.resolvers.go @@ -16,12 +16,62 @@ import ( client "sigs.k8s.io/controller-runtime/pkg/client" ) -func (mg *CertificateValidation) ResolveReferences( // ResolveReferences of this CertificateValidation. +func (mg *Certificate) ResolveReferences( // ResolveReferences of this Certificate. ctx context.Context, c client.Reader) error { var m xpresource.Managed var l xpresource.ManagedList r := reference.NewAPIResolver(c, mg) + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta1", "CertificateAuthority", "CertificateAuthorityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CertificateAuthorityArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.CertificateAuthorityArnRef, + Selector: mg.Spec.ForProvider.CertificateAuthorityArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CertificateAuthorityArn") + } + mg.Spec.ForProvider.CertificateAuthorityArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CertificateAuthorityArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta1", "CertificateAuthority", "CertificateAuthorityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CertificateAuthorityArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.CertificateAuthorityArnRef, + Selector: mg.Spec.InitProvider.CertificateAuthorityArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CertificateAuthorityArn") + } + mg.Spec.InitProvider.CertificateAuthorityArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CertificateAuthorityArnRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this CertificateValidation. +func (mg *CertificateValidation) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + var rsp reference.ResolutionResponse var err error { diff --git a/apis/acm/v1beta2/zz_certificate_types.go b/apis/acm/v1beta2/zz_certificate_types.go index 6c2773aa05..0566571849 100755 --- a/apis/acm/v1beta2/zz_certificate_types.go +++ b/apis/acm/v1beta2/zz_certificate_types.go @@ -16,8 +16,17 @@ import ( type CertificateInitParameters struct { // ARN of an ACM PCA + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acmpca/v1beta2.CertificateAuthority CertificateAuthorityArn *string `json:"certificateAuthorityArn,omitempty" tf:"certificate_authority_arn,omitempty"` + // Reference to a CertificateAuthority in acmpca to populate certificateAuthorityArn. + // +kubebuilder:validation:Optional + CertificateAuthorityArnRef *v1.Reference `json:"certificateAuthorityArnRef,omitempty" tf:"-"` + + // Selector for a CertificateAuthority in acmpca to populate certificateAuthorityArn. + // +kubebuilder:validation:Optional + CertificateAuthorityArnSelector *v1.Selector `json:"certificateAuthorityArnSelector,omitempty" tf:"-"` + // Certificate's PEM-formatted public key CertificateBody *string `json:"certificateBody,omitempty" tf:"certificate_body,omitempty"` @@ -142,9 +151,18 @@ type CertificateObservation struct { type CertificateParameters struct { // ARN of an ACM PCA + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/acmpca/v1beta2.CertificateAuthority // +kubebuilder:validation:Optional CertificateAuthorityArn *string `json:"certificateAuthorityArn,omitempty" tf:"certificate_authority_arn,omitempty"` + // Reference to a CertificateAuthority in acmpca to populate certificateAuthorityArn. + // +kubebuilder:validation:Optional + CertificateAuthorityArnRef *v1.Reference `json:"certificateAuthorityArnRef,omitempty" tf:"-"` + + // Selector for a CertificateAuthority in acmpca to populate certificateAuthorityArn. + // +kubebuilder:validation:Optional + CertificateAuthorityArnSelector *v1.Selector `json:"certificateAuthorityArnSelector,omitempty" tf:"-"` + // Certificate's PEM-formatted public key // +kubebuilder:validation:Optional CertificateBody *string `json:"certificateBody,omitempty" tf:"certificate_body,omitempty"` diff --git a/apis/acm/v1beta2/zz_generated.deepcopy.go b/apis/acm/v1beta2/zz_generated.deepcopy.go index 06ee9f8d60..18b1e14c91 100644 --- a/apis/acm/v1beta2/zz_generated.deepcopy.go +++ b/apis/acm/v1beta2/zz_generated.deepcopy.go @@ -48,6 +48,16 @@ func (in *CertificateInitParameters) DeepCopyInto(out *CertificateInitParameters *out = new(string) **out = **in } + if in.CertificateAuthorityArnRef != nil { + in, out := &in.CertificateAuthorityArnRef, &out.CertificateAuthorityArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CertificateAuthorityArnSelector != nil { + in, out := &in.CertificateAuthorityArnSelector, &out.CertificateAuthorityArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } if in.CertificateBody != nil { in, out := &in.CertificateBody, &out.CertificateBody *out = new(string) @@ -344,6 +354,16 @@ func (in *CertificateParameters) DeepCopyInto(out *CertificateParameters) { *out = new(string) **out = **in } + if in.CertificateAuthorityArnRef != nil { + in, out := &in.CertificateAuthorityArnRef, &out.CertificateAuthorityArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CertificateAuthorityArnSelector != nil { + in, out := &in.CertificateAuthorityArnSelector, &out.CertificateAuthorityArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } if in.CertificateBody != nil { in, out := &in.CertificateBody, &out.CertificateBody *out = new(string) diff --git a/apis/acm/v1beta2/zz_generated.resolvers.go b/apis/acm/v1beta2/zz_generated.resolvers.go new file mode 100644 index 0000000000..51a5d29dd2 --- /dev/null +++ b/apis/acm/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Certificate) ResolveReferences( // ResolveReferences of this Certificate. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta2", "CertificateAuthority", "CertificateAuthorityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CertificateAuthorityArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.CertificateAuthorityArnRef, + Selector: mg.Spec.ForProvider.CertificateAuthorityArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CertificateAuthorityArn") + } + mg.Spec.ForProvider.CertificateAuthorityArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CertificateAuthorityArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("acmpca.aws.upbound.io", "v1beta2", "CertificateAuthority", "CertificateAuthorityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CertificateAuthorityArn), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.CertificateAuthorityArnRef, + Selector: mg.Spec.InitProvider.CertificateAuthorityArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CertificateAuthorityArn") + } + mg.Spec.InitProvider.CertificateAuthorityArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CertificateAuthorityArnRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/opensearch/v1beta2/zz_domain_types.go b/apis/opensearch/v1beta2/zz_domain_types.go index 79b65343d3..515c7b56fa 100755 --- a/apis/opensearch/v1beta2/zz_domain_types.go +++ b/apis/opensearch/v1beta2/zz_domain_types.go @@ -675,7 +675,17 @@ type EncryptAtRestInitParameters struct { Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` // KMS key ARN to encrypt the Elasticsearch domain with. If not specified then it defaults to using the aws/es service KMS key. Note that KMS will accept a KMS key ID but will return the key ARN. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` } type EncryptAtRestObservation struct { @@ -694,8 +704,18 @@ type EncryptAtRestParameters struct { Enabled *bool `json:"enabled" tf:"enabled,omitempty"` // KMS key ARN to encrypt the Elasticsearch domain with. If not specified then it defaults to using the aws/es service KMS key. Note that KMS will accept a KMS key ID but will return the key ARN. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() // +kubebuilder:validation:Optional KMSKeyID *string `json:"kmsKeyId,omitempty" tf:"kms_key_id,omitempty"` + + // Reference to a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDRef *v1.Reference `json:"kmsKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKeyId. + // +kubebuilder:validation:Optional + KMSKeyIDSelector *v1.Selector `json:"kmsKeyIdSelector,omitempty" tf:"-"` } type LogPublishingOptionsInitParameters struct { @@ -936,11 +956,33 @@ type SoftwareUpdateOptionsParameters struct { type VPCOptionsInitParameters struct { + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + // List of VPC Security Group IDs to be applied to the OpenSearch domain endpoints. If omitted, the default Security Group for the VPC will be used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector // +listType=set SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // List of VPC Subnet IDs for the OpenSearch domain endpoints to be created in. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector // +listType=set SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` } @@ -965,12 +1007,34 @@ type VPCOptionsObservation struct { type VPCOptionsParameters struct { + // References to SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDRefs []v1.Reference `json:"securityGroupIdRefs,omitempty" tf:"-"` + + // Selector for a list of SecurityGroup in ec2 to populate securityGroupIds. + // +kubebuilder:validation:Optional + SecurityGroupIDSelector *v1.Selector `json:"securityGroupIdSelector,omitempty" tf:"-"` + // List of VPC Security Group IDs to be applied to the OpenSearch domain endpoints. If omitted, the default Security Group for the VPC will be used. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.SecurityGroup + // +crossplane:generate:reference:refFieldName=SecurityGroupIDRefs + // +crossplane:generate:reference:selectorFieldName=SecurityGroupIDSelector // +kubebuilder:validation:Optional // +listType=set SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + // References to Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDRefs []v1.Reference `json:"subnetIdRefs,omitempty" tf:"-"` + + // Selector for a list of Subnet in ec2 to populate subnetIds. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + // List of VPC Subnet IDs for the OpenSearch domain endpoints to be created in. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/ec2/v1beta1.Subnet + // +crossplane:generate:reference:refFieldName=SubnetIDRefs + // +crossplane:generate:reference:selectorFieldName=SubnetIDSelector // +kubebuilder:validation:Optional // +listType=set SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` diff --git a/apis/opensearch/v1beta2/zz_generated.deepcopy.go b/apis/opensearch/v1beta2/zz_generated.deepcopy.go index 17f099a36d..0adffc87e1 100644 --- a/apis/opensearch/v1beta2/zz_generated.deepcopy.go +++ b/apis/opensearch/v1beta2/zz_generated.deepcopy.go @@ -1668,6 +1668,16 @@ func (in *EncryptAtRestInitParameters) DeepCopyInto(out *EncryptAtRestInitParame *out = new(string) **out = **in } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptAtRestInitParameters. @@ -1718,6 +1728,16 @@ func (in *EncryptAtRestParameters) DeepCopyInto(out *EncryptAtRestParameters) { *out = new(string) **out = **in } + if in.KMSKeyIDRef != nil { + in, out := &in.KMSKeyIDRef, &out.KMSKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeyIDSelector != nil { + in, out := &in.KMSKeyIDSelector, &out.KMSKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptAtRestParameters. @@ -2553,6 +2573,18 @@ func (in *SoftwareUpdateOptionsParameters) DeepCopy() *SoftwareUpdateOptionsPara // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VPCOptionsInitParameters) DeepCopyInto(out *VPCOptionsInitParameters) { *out = *in + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } if in.SecurityGroupIds != nil { in, out := &in.SecurityGroupIds, &out.SecurityGroupIds *out = make([]*string, len(*in)) @@ -2564,6 +2596,18 @@ func (in *VPCOptionsInitParameters) DeepCopyInto(out *VPCOptionsInitParameters) } } } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } if in.SubnetIds != nil { in, out := &in.SubnetIds, &out.SubnetIds *out = make([]*string, len(*in)) @@ -2643,6 +2687,18 @@ func (in *VPCOptionsObservation) DeepCopy() *VPCOptionsObservation { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VPCOptionsParameters) DeepCopyInto(out *VPCOptionsParameters) { *out = *in + if in.SecurityGroupIDRefs != nil { + in, out := &in.SecurityGroupIDRefs, &out.SecurityGroupIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroupIDSelector != nil { + in, out := &in.SecurityGroupIDSelector, &out.SecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } if in.SecurityGroupIds != nil { in, out := &in.SecurityGroupIds, &out.SecurityGroupIds *out = make([]*string, len(*in)) @@ -2654,6 +2710,18 @@ func (in *VPCOptionsParameters) DeepCopyInto(out *VPCOptionsParameters) { } } } + if in.SubnetIDRefs != nil { + in, out := &in.SubnetIDRefs, &out.SubnetIDRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } if in.SubnetIds != nil { in, out := &in.SubnetIds, &out.SubnetIds *out = make([]*string, len(*in)) diff --git a/apis/opensearch/v1beta2/zz_generated.resolvers.go b/apis/opensearch/v1beta2/zz_generated.resolvers.go index 6ada605bd1..92adfd5aed 100644 --- a/apis/opensearch/v1beta2/zz_generated.resolvers.go +++ b/apis/opensearch/v1beta2/zz_generated.resolvers.go @@ -13,6 +13,7 @@ import ( errors "github.com/pkg/errors" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" client "sigs.k8s.io/controller-runtime/pkg/client" // ResolveReferences of this Domain. @@ -25,8 +26,30 @@ func (mg *Domain) ResolveReferences(ctx context.Context, c client.Reader) error r := reference.NewAPIResolver(c, mg) var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse var err error + if mg.Spec.ForProvider.EncryptAtRest != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EncryptAtRest.KMSKeyID), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.EncryptAtRest.KMSKeyIDRef, + Selector: mg.Spec.ForProvider.EncryptAtRest.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EncryptAtRest.KMSKeyID") + } + mg.Spec.ForProvider.EncryptAtRest.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EncryptAtRest.KMSKeyIDRef = rsp.ResolvedReference + + } for i3 := 0; i3 < len(mg.Spec.ForProvider.LogPublishingOptions); i3++ { { m, l, err = apisresolver.GetManagedResource("cloudwatchlogs.aws.upbound.io", "v1beta1", "Group", "GroupList") @@ -47,6 +70,69 @@ func (mg *Domain) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.ForProvider.LogPublishingOptions[i3].CloudwatchLogGroupArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.LogPublishingOptions[i3].CloudwatchLogGroupArnRef = rsp.ResolvedReference + } + if mg.Spec.ForProvider.VPCOptions != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCOptions.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCOptions.SecurityGroupIDRefs, + Selector: mg.Spec.ForProvider.VPCOptions.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCOptions.SecurityGroupIds") + } + mg.Spec.ForProvider.VPCOptions.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCOptions.SecurityGroupIDRefs = mrsp.ResolvedReferences + + } + if mg.Spec.ForProvider.VPCOptions != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.VPCOptions.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.ForProvider.VPCOptions.SubnetIDRefs, + Selector: mg.Spec.ForProvider.VPCOptions.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPCOptions.SubnetIds") + } + mg.Spec.ForProvider.VPCOptions.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.VPCOptions.SubnetIDRefs = mrsp.ResolvedReferences + + } + if mg.Spec.InitProvider.EncryptAtRest != nil { + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EncryptAtRest.KMSKeyID), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.EncryptAtRest.KMSKeyIDRef, + Selector: mg.Spec.InitProvider.EncryptAtRest.KMSKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EncryptAtRest.KMSKeyID") + } + mg.Spec.InitProvider.EncryptAtRest.KMSKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EncryptAtRest.KMSKeyIDRef = rsp.ResolvedReference + } for i3 := 0; i3 < len(mg.Spec.InitProvider.LogPublishingOptions); i3++ { { @@ -68,6 +154,48 @@ func (mg *Domain) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.InitProvider.LogPublishingOptions[i3].CloudwatchLogGroupArn = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.LogPublishingOptions[i3].CloudwatchLogGroupArnRef = rsp.ResolvedReference + } + if mg.Spec.InitProvider.VPCOptions != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCOptions.SecurityGroupIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCOptions.SecurityGroupIDRefs, + Selector: mg.Spec.InitProvider.VPCOptions.SecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCOptions.SecurityGroupIds") + } + mg.Spec.InitProvider.VPCOptions.SecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCOptions.SecurityGroupIDRefs = mrsp.ResolvedReferences + + } + if mg.Spec.InitProvider.VPCOptions != nil { + { + m, l, err = apisresolver.GetManagedResource("ec2.aws.upbound.io", "v1beta1", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.VPCOptions.SubnetIds), + Extract: reference.ExternalName(), + References: mg.Spec.InitProvider.VPCOptions.SubnetIDRefs, + Selector: mg.Spec.InitProvider.VPCOptions.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPCOptions.SubnetIds") + } + mg.Spec.InitProvider.VPCOptions.SubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.VPCOptions.SubnetIDRefs = mrsp.ResolvedReferences + } return nil diff --git a/apis/redshift/v1beta1/zz_endpointaccess_types.go b/apis/redshift/v1beta1/zz_endpointaccess_types.go index bad9925f9a..a5cf22e2a0 100755 --- a/apis/redshift/v1beta1/zz_endpointaccess_types.go +++ b/apis/redshift/v1beta1/zz_endpointaccess_types.go @@ -16,7 +16,7 @@ import ( type EndpointAccessInitParameters struct { // The cluster identifier of the cluster to access. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/redshift/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/redshift/v1beta2.Cluster ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` // Reference to a Cluster in redshift to populate clusterIdentifier. @@ -90,7 +90,7 @@ type EndpointAccessObservation struct { type EndpointAccessParameters struct { // The cluster identifier of the cluster to access. - // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/redshift/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/redshift/v1beta2.Cluster // +kubebuilder:validation:Optional ClusterIdentifier *string `json:"clusterIdentifier,omitempty" tf:"cluster_identifier,omitempty"` diff --git a/apis/redshift/v1beta1/zz_generated.conversion_hubs.go b/apis/redshift/v1beta1/zz_generated.conversion_hubs.go index bb716ef044..d283b3ec0e 100755 --- a/apis/redshift/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/redshift/v1beta1/zz_generated.conversion_hubs.go @@ -9,6 +9,9 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *AuthenticationProfile) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *EndpointAccess) Hub() {} + // Hub marks this type as a conversion hub. func (tr *EventSubscription) Hub() {} diff --git a/apis/redshift/v1beta1/zz_generated.resolvers.go b/apis/redshift/v1beta1/zz_generated.resolvers.go index 1b2766f6b7..1b7ea4c0f4 100644 --- a/apis/redshift/v1beta1/zz_generated.resolvers.go +++ b/apis/redshift/v1beta1/zz_generated.resolvers.go @@ -194,7 +194,7 @@ func (mg *EndpointAccess) ResolveReferences(ctx context.Context, c client.Reader var mrsp reference.MultiResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("redshift.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("redshift.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -251,7 +251,7 @@ func (mg *EndpointAccess) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.ForProvider.VPCSecurityGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) mg.Spec.ForProvider.VPCSecurityGroupIDRefs = mrsp.ResolvedReferences { - m, l, err = apisresolver.GetManagedResource("redshift.aws.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("redshift.aws.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/transfer/v1beta1/zz_generated.conversion_spokes.go b/apis/transfer/v1beta1/zz_generated.conversion_spokes.go index 833fae5ff5..7e0dd468ea 100755 --- a/apis/transfer/v1beta1/zz_generated.conversion_spokes.go +++ b/apis/transfer/v1beta1/zz_generated.conversion_spokes.go @@ -13,6 +13,26 @@ import ( "sigs.k8s.io/controller-runtime/pkg/conversion" ) +// ConvertTo converts this Connector to the hub type. +func (tr *Connector) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Connector type. +func (tr *Connector) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + // ConvertTo converts this Server to the hub type. func (tr *Server) ConvertTo(dstRaw conversion.Hub) error { spokeVersion := tr.GetObjectKind().GroupVersionKind().Version diff --git a/apis/transfer/v1beta2/zz_connector_terraformed.go b/apis/transfer/v1beta2/zz_connector_terraformed.go new file mode 100755 index 0000000000..8c79b73a2c --- /dev/null +++ b/apis/transfer/v1beta2/zz_connector_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Connector +func (mg *Connector) GetTerraformResourceType() string { + return "aws_transfer_connector" +} + +// GetConnectionDetailsMapping for this Connector +func (tr *Connector) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Connector +func (tr *Connector) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Connector +func (tr *Connector) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Connector +func (tr *Connector) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Connector +func (tr *Connector) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Connector +func (tr *Connector) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Connector +func (tr *Connector) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Connector +func (tr *Connector) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Connector using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Connector) LateInitialize(attrs []byte) (bool, error) { + params := &ConnectorParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Connector) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/transfer/v1beta2/zz_connector_types.go b/apis/transfer/v1beta2/zz_connector_types.go new file mode 100755 index 0000000000..1f1047e7d7 --- /dev/null +++ b/apis/transfer/v1beta2/zz_connector_types.go @@ -0,0 +1,334 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type As2ConfigInitParameters struct { + + // Specifies weather AS2 file is compressed. The valud values are ZLIB and DISABLED. + Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` + + // The algorithm that is used to encrypt the file. The valid values are AES128_CBC | AES192_CBC | AES256_CBC | NONE. + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // The unique identifier for the AS2 local profile. + LocalProfileID *string `json:"localProfileId,omitempty" tf:"local_profile_id,omitempty"` + + // Used for outbound requests to determine if a partner response for transfers is synchronous or asynchronous. The valid values are SYNC and NONE. + MdnResponse *string `json:"mdnResponse,omitempty" tf:"mdn_response,omitempty"` + + // The signing algorithm for the Mdn response. The valid values are SHA256 | SHA384 | SHA512 | SHA1 | NONE | DEFAULT. + MdnSigningAlgorithm *string `json:"mdnSigningAlgorithm,omitempty" tf:"mdn_signing_algorithm,omitempty"` + + // Used as the subject HTTP header attribute in AS2 messages that are being sent with the connector. + MessageSubject *string `json:"messageSubject,omitempty" tf:"message_subject,omitempty"` + + // The unique identifier for the AS2 partner profile. + PartnerProfileID *string `json:"partnerProfileId,omitempty" tf:"partner_profile_id,omitempty"` + + // The algorithm that is used to sign AS2 messages sent with the connector. The valid values are SHA256 | SHA384 | SHA512 | SHA1 | NONE . + SigningAlgorithm *string `json:"signingAlgorithm,omitempty" tf:"signing_algorithm,omitempty"` +} + +type As2ConfigObservation struct { + + // Specifies weather AS2 file is compressed. The valud values are ZLIB and DISABLED. + Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` + + // The algorithm that is used to encrypt the file. The valid values are AES128_CBC | AES192_CBC | AES256_CBC | NONE. + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // The unique identifier for the AS2 local profile. + LocalProfileID *string `json:"localProfileId,omitempty" tf:"local_profile_id,omitempty"` + + // Used for outbound requests to determine if a partner response for transfers is synchronous or asynchronous. The valid values are SYNC and NONE. + MdnResponse *string `json:"mdnResponse,omitempty" tf:"mdn_response,omitempty"` + + // The signing algorithm for the Mdn response. The valid values are SHA256 | SHA384 | SHA512 | SHA1 | NONE | DEFAULT. + MdnSigningAlgorithm *string `json:"mdnSigningAlgorithm,omitempty" tf:"mdn_signing_algorithm,omitempty"` + + // Used as the subject HTTP header attribute in AS2 messages that are being sent with the connector. + MessageSubject *string `json:"messageSubject,omitempty" tf:"message_subject,omitempty"` + + // The unique identifier for the AS2 partner profile. + PartnerProfileID *string `json:"partnerProfileId,omitempty" tf:"partner_profile_id,omitempty"` + + // The algorithm that is used to sign AS2 messages sent with the connector. The valid values are SHA256 | SHA384 | SHA512 | SHA1 | NONE . + SigningAlgorithm *string `json:"signingAlgorithm,omitempty" tf:"signing_algorithm,omitempty"` +} + +type As2ConfigParameters struct { + + // Specifies weather AS2 file is compressed. The valud values are ZLIB and DISABLED. + // +kubebuilder:validation:Optional + Compression *string `json:"compression" tf:"compression,omitempty"` + + // The algorithm that is used to encrypt the file. The valid values are AES128_CBC | AES192_CBC | AES256_CBC | NONE. + // +kubebuilder:validation:Optional + EncryptionAlgorithm *string `json:"encryptionAlgorithm" tf:"encryption_algorithm,omitempty"` + + // The unique identifier for the AS2 local profile. + // +kubebuilder:validation:Optional + LocalProfileID *string `json:"localProfileId" tf:"local_profile_id,omitempty"` + + // Used for outbound requests to determine if a partner response for transfers is synchronous or asynchronous. The valid values are SYNC and NONE. + // +kubebuilder:validation:Optional + MdnResponse *string `json:"mdnResponse" tf:"mdn_response,omitempty"` + + // The signing algorithm for the Mdn response. The valid values are SHA256 | SHA384 | SHA512 | SHA1 | NONE | DEFAULT. + // +kubebuilder:validation:Optional + MdnSigningAlgorithm *string `json:"mdnSigningAlgorithm,omitempty" tf:"mdn_signing_algorithm,omitempty"` + + // Used as the subject HTTP header attribute in AS2 messages that are being sent with the connector. + // +kubebuilder:validation:Optional + MessageSubject *string `json:"messageSubject,omitempty" tf:"message_subject,omitempty"` + + // The unique identifier for the AS2 partner profile. + // +kubebuilder:validation:Optional + PartnerProfileID *string `json:"partnerProfileId" tf:"partner_profile_id,omitempty"` + + // The algorithm that is used to sign AS2 messages sent with the connector. The valid values are SHA256 | SHA384 | SHA512 | SHA1 | NONE . + // +kubebuilder:validation:Optional + SigningAlgorithm *string `json:"signingAlgorithm" tf:"signing_algorithm,omitempty"` +} + +type ConnectorInitParameters struct { + + // The IAM Role which provides read and write access to the parent directory of the file location mentioned in the StartFileTransfer request. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + AccessRole *string `json:"accessRole,omitempty" tf:"access_role,omitempty"` + + // Reference to a Role in iam to populate accessRole. + // +kubebuilder:validation:Optional + AccessRoleRef *v1.Reference `json:"accessRoleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate accessRole. + // +kubebuilder:validation:Optional + AccessRoleSelector *v1.Selector `json:"accessRoleSelector,omitempty" tf:"-"` + + // Either SFTP or AS2 is configured.The parameters to configure for the connector object. Fields documented below. + As2Config *As2ConfigInitParameters `json:"as2Config,omitempty" tf:"as2_config,omitempty"` + + // The IAM Role which is required for allowing the connector to turn on CloudWatch logging for Amazon S3 events. + LoggingRole *string `json:"loggingRole,omitempty" tf:"logging_role,omitempty"` + + // Name of the security policy for the connector. + SecurityPolicyName *string `json:"securityPolicyName,omitempty" tf:"security_policy_name,omitempty"` + + // Either SFTP or AS2 is configured.The parameters to configure for the connector object. Fields documented below. + SftpConfig *SftpConfigInitParameters `json:"sftpConfig,omitempty" tf:"sftp_config,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The URL of the partners AS2 endpoint or SFTP endpoint. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type ConnectorObservation struct { + + // The IAM Role which provides read and write access to the parent directory of the file location mentioned in the StartFileTransfer request. + AccessRole *string `json:"accessRole,omitempty" tf:"access_role,omitempty"` + + // The ARN of the connector. + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Either SFTP or AS2 is configured.The parameters to configure for the connector object. Fields documented below. + As2Config *As2ConfigObservation `json:"as2Config,omitempty" tf:"as2_config,omitempty"` + + // The unique identifier for the AS2 profile or SFTP Profile. + ConnectorID *string `json:"connectorId,omitempty" tf:"connector_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The IAM Role which is required for allowing the connector to turn on CloudWatch logging for Amazon S3 events. + LoggingRole *string `json:"loggingRole,omitempty" tf:"logging_role,omitempty"` + + // Name of the security policy for the connector. + SecurityPolicyName *string `json:"securityPolicyName,omitempty" tf:"security_policy_name,omitempty"` + + // Either SFTP or AS2 is configured.The parameters to configure for the connector object. Fields documented below. + SftpConfig *SftpConfigObservation `json:"sftpConfig,omitempty" tf:"sftp_config,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // The URL of the partners AS2 endpoint or SFTP endpoint. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type ConnectorParameters struct { + + // The IAM Role which provides read and write access to the parent directory of the file location mentioned in the StartFileTransfer request. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + AccessRole *string `json:"accessRole,omitempty" tf:"access_role,omitempty"` + + // Reference to a Role in iam to populate accessRole. + // +kubebuilder:validation:Optional + AccessRoleRef *v1.Reference `json:"accessRoleRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate accessRole. + // +kubebuilder:validation:Optional + AccessRoleSelector *v1.Selector `json:"accessRoleSelector,omitempty" tf:"-"` + + // Either SFTP or AS2 is configured.The parameters to configure for the connector object. Fields documented below. + // +kubebuilder:validation:Optional + As2Config *As2ConfigParameters `json:"as2Config,omitempty" tf:"as2_config,omitempty"` + + // The IAM Role which is required for allowing the connector to turn on CloudWatch logging for Amazon S3 events. + // +kubebuilder:validation:Optional + LoggingRole *string `json:"loggingRole,omitempty" tf:"logging_role,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // Name of the security policy for the connector. + // +kubebuilder:validation:Optional + SecurityPolicyName *string `json:"securityPolicyName,omitempty" tf:"security_policy_name,omitempty"` + + // Either SFTP or AS2 is configured.The parameters to configure for the connector object. Fields documented below. + // +kubebuilder:validation:Optional + SftpConfig *SftpConfigParameters `json:"sftpConfig,omitempty" tf:"sftp_config,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The URL of the partners AS2 endpoint or SFTP endpoint. + // +kubebuilder:validation:Optional + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type SftpConfigInitParameters struct { + + // A list of public portion of the host key, or keys, that are used to authenticate the user to the external server to which you are connecting.(https://docs.aws.amazon.com/transfer/latest/userguide/API_SftpConnectorConfig.html) + // +listType=set + TrustedHostKeys []*string `json:"trustedHostKeys,omitempty" tf:"trusted_host_keys,omitempty"` + + // The identifier for the secret (in AWS Secrets Manager) that contains the SFTP user's private key, password, or both. The identifier can be either the Amazon Resource Name (ARN) or the name of the secret. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/secretsmanager/v1beta1.Secret + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + UserSecretID *string `json:"userSecretId,omitempty" tf:"user_secret_id,omitempty"` + + // Reference to a Secret in secretsmanager to populate userSecretId. + // +kubebuilder:validation:Optional + UserSecretIDRef *v1.Reference `json:"userSecretIdRef,omitempty" tf:"-"` + + // Selector for a Secret in secretsmanager to populate userSecretId. + // +kubebuilder:validation:Optional + UserSecretIDSelector *v1.Selector `json:"userSecretIdSelector,omitempty" tf:"-"` +} + +type SftpConfigObservation struct { + + // A list of public portion of the host key, or keys, that are used to authenticate the user to the external server to which you are connecting.(https://docs.aws.amazon.com/transfer/latest/userguide/API_SftpConnectorConfig.html) + // +listType=set + TrustedHostKeys []*string `json:"trustedHostKeys,omitempty" tf:"trusted_host_keys,omitempty"` + + // The identifier for the secret (in AWS Secrets Manager) that contains the SFTP user's private key, password, or both. The identifier can be either the Amazon Resource Name (ARN) or the name of the secret. + UserSecretID *string `json:"userSecretId,omitempty" tf:"user_secret_id,omitempty"` +} + +type SftpConfigParameters struct { + + // A list of public portion of the host key, or keys, that are used to authenticate the user to the external server to which you are connecting.(https://docs.aws.amazon.com/transfer/latest/userguide/API_SftpConnectorConfig.html) + // +kubebuilder:validation:Optional + // +listType=set + TrustedHostKeys []*string `json:"trustedHostKeys,omitempty" tf:"trusted_host_keys,omitempty"` + + // The identifier for the secret (in AWS Secrets Manager) that contains the SFTP user's private key, password, or both. The identifier can be either the Amazon Resource Name (ARN) or the name of the secret. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/secretsmanager/v1beta1.Secret + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + UserSecretID *string `json:"userSecretId,omitempty" tf:"user_secret_id,omitempty"` + + // Reference to a Secret in secretsmanager to populate userSecretId. + // +kubebuilder:validation:Optional + UserSecretIDRef *v1.Reference `json:"userSecretIdRef,omitempty" tf:"-"` + + // Selector for a Secret in secretsmanager to populate userSecretId. + // +kubebuilder:validation:Optional + UserSecretIDSelector *v1.Selector `json:"userSecretIdSelector,omitempty" tf:"-"` +} + +// ConnectorSpec defines the desired state of Connector +type ConnectorSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ConnectorParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ConnectorInitParameters `json:"initProvider,omitempty"` +} + +// ConnectorStatus defines the observed state of Connector. +type ConnectorStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ConnectorObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Connector is the Schema for the Connectors API. Provides a AWS Transfer AS2 Connector Resource +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Connector struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.url) || (has(self.initProvider) && has(self.initProvider.url))",message="spec.forProvider.url is a required parameter" + Spec ConnectorSpec `json:"spec"` + Status ConnectorStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConnectorList contains a list of Connectors +type ConnectorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Connector `json:"items"` +} + +// Repository type metadata. +var ( + Connector_Kind = "Connector" + Connector_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Connector_Kind}.String() + Connector_KindAPIVersion = Connector_Kind + "." + CRDGroupVersion.String() + Connector_GroupVersionKind = CRDGroupVersion.WithKind(Connector_Kind) +) + +func init() { + SchemeBuilder.Register(&Connector{}, &ConnectorList{}) +} diff --git a/apis/transfer/v1beta2/zz_generated.conversion_hubs.go b/apis/transfer/v1beta2/zz_generated.conversion_hubs.go index af012d5586..b3e6b7109c 100755 --- a/apis/transfer/v1beta2/zz_generated.conversion_hubs.go +++ b/apis/transfer/v1beta2/zz_generated.conversion_hubs.go @@ -6,6 +6,9 @@ package v1beta2 +// Hub marks this type as a conversion hub. +func (tr *Connector) Hub() {} + // Hub marks this type as a conversion hub. func (tr *Server) Hub() {} diff --git a/apis/transfer/v1beta2/zz_generated.deepcopy.go b/apis/transfer/v1beta2/zz_generated.deepcopy.go index eb4a746558..2d73238011 100644 --- a/apis/transfer/v1beta2/zz_generated.deepcopy.go +++ b/apis/transfer/v1beta2/zz_generated.deepcopy.go @@ -13,6 +13,504 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *As2ConfigInitParameters) DeepCopyInto(out *As2ConfigInitParameters) { + *out = *in + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.LocalProfileID != nil { + in, out := &in.LocalProfileID, &out.LocalProfileID + *out = new(string) + **out = **in + } + if in.MdnResponse != nil { + in, out := &in.MdnResponse, &out.MdnResponse + *out = new(string) + **out = **in + } + if in.MdnSigningAlgorithm != nil { + in, out := &in.MdnSigningAlgorithm, &out.MdnSigningAlgorithm + *out = new(string) + **out = **in + } + if in.MessageSubject != nil { + in, out := &in.MessageSubject, &out.MessageSubject + *out = new(string) + **out = **in + } + if in.PartnerProfileID != nil { + in, out := &in.PartnerProfileID, &out.PartnerProfileID + *out = new(string) + **out = **in + } + if in.SigningAlgorithm != nil { + in, out := &in.SigningAlgorithm, &out.SigningAlgorithm + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new As2ConfigInitParameters. +func (in *As2ConfigInitParameters) DeepCopy() *As2ConfigInitParameters { + if in == nil { + return nil + } + out := new(As2ConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *As2ConfigObservation) DeepCopyInto(out *As2ConfigObservation) { + *out = *in + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.LocalProfileID != nil { + in, out := &in.LocalProfileID, &out.LocalProfileID + *out = new(string) + **out = **in + } + if in.MdnResponse != nil { + in, out := &in.MdnResponse, &out.MdnResponse + *out = new(string) + **out = **in + } + if in.MdnSigningAlgorithm != nil { + in, out := &in.MdnSigningAlgorithm, &out.MdnSigningAlgorithm + *out = new(string) + **out = **in + } + if in.MessageSubject != nil { + in, out := &in.MessageSubject, &out.MessageSubject + *out = new(string) + **out = **in + } + if in.PartnerProfileID != nil { + in, out := &in.PartnerProfileID, &out.PartnerProfileID + *out = new(string) + **out = **in + } + if in.SigningAlgorithm != nil { + in, out := &in.SigningAlgorithm, &out.SigningAlgorithm + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new As2ConfigObservation. +func (in *As2ConfigObservation) DeepCopy() *As2ConfigObservation { + if in == nil { + return nil + } + out := new(As2ConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *As2ConfigParameters) DeepCopyInto(out *As2ConfigParameters) { + *out = *in + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.LocalProfileID != nil { + in, out := &in.LocalProfileID, &out.LocalProfileID + *out = new(string) + **out = **in + } + if in.MdnResponse != nil { + in, out := &in.MdnResponse, &out.MdnResponse + *out = new(string) + **out = **in + } + if in.MdnSigningAlgorithm != nil { + in, out := &in.MdnSigningAlgorithm, &out.MdnSigningAlgorithm + *out = new(string) + **out = **in + } + if in.MessageSubject != nil { + in, out := &in.MessageSubject, &out.MessageSubject + *out = new(string) + **out = **in + } + if in.PartnerProfileID != nil { + in, out := &in.PartnerProfileID, &out.PartnerProfileID + *out = new(string) + **out = **in + } + if in.SigningAlgorithm != nil { + in, out := &in.SigningAlgorithm, &out.SigningAlgorithm + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new As2ConfigParameters. +func (in *As2ConfigParameters) DeepCopy() *As2ConfigParameters { + if in == nil { + return nil + } + out := new(As2ConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Connector) DeepCopyInto(out *Connector) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connector. +func (in *Connector) DeepCopy() *Connector { + if in == nil { + return nil + } + out := new(Connector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Connector) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorInitParameters) DeepCopyInto(out *ConnectorInitParameters) { + *out = *in + if in.AccessRole != nil { + in, out := &in.AccessRole, &out.AccessRole + *out = new(string) + **out = **in + } + if in.AccessRoleRef != nil { + in, out := &in.AccessRoleRef, &out.AccessRoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccessRoleSelector != nil { + in, out := &in.AccessRoleSelector, &out.AccessRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.As2Config != nil { + in, out := &in.As2Config, &out.As2Config + *out = new(As2ConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LoggingRole != nil { + in, out := &in.LoggingRole, &out.LoggingRole + *out = new(string) + **out = **in + } + if in.SecurityPolicyName != nil { + in, out := &in.SecurityPolicyName, &out.SecurityPolicyName + *out = new(string) + **out = **in + } + if in.SftpConfig != nil { + in, out := &in.SftpConfig, &out.SftpConfig + *out = new(SftpConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorInitParameters. +func (in *ConnectorInitParameters) DeepCopy() *ConnectorInitParameters { + if in == nil { + return nil + } + out := new(ConnectorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorList) DeepCopyInto(out *ConnectorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Connector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorList. +func (in *ConnectorList) DeepCopy() *ConnectorList { + if in == nil { + return nil + } + out := new(ConnectorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConnectorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorObservation) DeepCopyInto(out *ConnectorObservation) { + *out = *in + if in.AccessRole != nil { + in, out := &in.AccessRole, &out.AccessRole + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.As2Config != nil { + in, out := &in.As2Config, &out.As2Config + *out = new(As2ConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ConnectorID != nil { + in, out := &in.ConnectorID, &out.ConnectorID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LoggingRole != nil { + in, out := &in.LoggingRole, &out.LoggingRole + *out = new(string) + **out = **in + } + if in.SecurityPolicyName != nil { + in, out := &in.SecurityPolicyName, &out.SecurityPolicyName + *out = new(string) + **out = **in + } + if in.SftpConfig != nil { + in, out := &in.SftpConfig, &out.SftpConfig + *out = new(SftpConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorObservation. +func (in *ConnectorObservation) DeepCopy() *ConnectorObservation { + if in == nil { + return nil + } + out := new(ConnectorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorParameters) DeepCopyInto(out *ConnectorParameters) { + *out = *in + if in.AccessRole != nil { + in, out := &in.AccessRole, &out.AccessRole + *out = new(string) + **out = **in + } + if in.AccessRoleRef != nil { + in, out := &in.AccessRoleRef, &out.AccessRoleRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccessRoleSelector != nil { + in, out := &in.AccessRoleSelector, &out.AccessRoleSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.As2Config != nil { + in, out := &in.As2Config, &out.As2Config + *out = new(As2ConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.LoggingRole != nil { + in, out := &in.LoggingRole, &out.LoggingRole + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.SecurityPolicyName != nil { + in, out := &in.SecurityPolicyName, &out.SecurityPolicyName + *out = new(string) + **out = **in + } + if in.SftpConfig != nil { + in, out := &in.SftpConfig, &out.SftpConfig + *out = new(SftpConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorParameters. +func (in *ConnectorParameters) DeepCopy() *ConnectorParameters { + if in == nil { + return nil + } + out := new(ConnectorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorSpec) DeepCopyInto(out *ConnectorSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorSpec. +func (in *ConnectorSpec) DeepCopy() *ConnectorSpec { + if in == nil { + return nil + } + out := new(ConnectorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorStatus) DeepCopyInto(out *ConnectorStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorStatus. +func (in *ConnectorStatus) DeepCopy() *ConnectorStatus { + if in == nil { + return nil + } + out := new(ConnectorStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CopyStepDetailsDestinationFileLocationEFSFileLocationInitParameters) DeepCopyInto(out *CopyStepDetailsDestinationFileLocationEFSFileLocationInitParameters) { *out = *in @@ -2712,6 +3210,119 @@ func (in *ServerStatus) DeepCopy() *ServerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SftpConfigInitParameters) DeepCopyInto(out *SftpConfigInitParameters) { + *out = *in + if in.TrustedHostKeys != nil { + in, out := &in.TrustedHostKeys, &out.TrustedHostKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UserSecretID != nil { + in, out := &in.UserSecretID, &out.UserSecretID + *out = new(string) + **out = **in + } + if in.UserSecretIDRef != nil { + in, out := &in.UserSecretIDRef, &out.UserSecretIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserSecretIDSelector != nil { + in, out := &in.UserSecretIDSelector, &out.UserSecretIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SftpConfigInitParameters. +func (in *SftpConfigInitParameters) DeepCopy() *SftpConfigInitParameters { + if in == nil { + return nil + } + out := new(SftpConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SftpConfigObservation) DeepCopyInto(out *SftpConfigObservation) { + *out = *in + if in.TrustedHostKeys != nil { + in, out := &in.TrustedHostKeys, &out.TrustedHostKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UserSecretID != nil { + in, out := &in.UserSecretID, &out.UserSecretID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SftpConfigObservation. +func (in *SftpConfigObservation) DeepCopy() *SftpConfigObservation { + if in == nil { + return nil + } + out := new(SftpConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SftpConfigParameters) DeepCopyInto(out *SftpConfigParameters) { + *out = *in + if in.TrustedHostKeys != nil { + in, out := &in.TrustedHostKeys, &out.TrustedHostKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UserSecretID != nil { + in, out := &in.UserSecretID, &out.UserSecretID + *out = new(string) + **out = **in + } + if in.UserSecretIDRef != nil { + in, out := &in.UserSecretIDRef, &out.UserSecretIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserSecretIDSelector != nil { + in, out := &in.UserSecretIDSelector, &out.UserSecretIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SftpConfigParameters. +func (in *SftpConfigParameters) DeepCopy() *SftpConfigParameters { + if in == nil { + return nil + } + out := new(SftpConfigParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StepsCopyStepDetailsInitParameters) DeepCopyInto(out *StepsCopyStepDetailsInitParameters) { *out = *in diff --git a/apis/transfer/v1beta2/zz_generated.managed.go b/apis/transfer/v1beta2/zz_generated.managed.go index 5573ba3c50..8de7593e1d 100644 --- a/apis/transfer/v1beta2/zz_generated.managed.go +++ b/apis/transfer/v1beta2/zz_generated.managed.go @@ -7,6 +7,66 @@ package v1beta2 import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +// GetCondition of this Connector. +func (mg *Connector) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Connector. +func (mg *Connector) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Connector. +func (mg *Connector) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Connector. +func (mg *Connector) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Connector. +func (mg *Connector) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Connector. +func (mg *Connector) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Connector. +func (mg *Connector) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Connector. +func (mg *Connector) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Connector. +func (mg *Connector) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Connector. +func (mg *Connector) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Connector. +func (mg *Connector) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Connector. +func (mg *Connector) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this Server. func (mg *Server) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) diff --git a/apis/transfer/v1beta2/zz_generated.managedlist.go b/apis/transfer/v1beta2/zz_generated.managedlist.go index 363443357a..65f783522c 100644 --- a/apis/transfer/v1beta2/zz_generated.managedlist.go +++ b/apis/transfer/v1beta2/zz_generated.managedlist.go @@ -7,6 +7,15 @@ package v1beta2 import resource "github.com/crossplane/crossplane-runtime/pkg/resource" +// GetItems of this ConnectorList. +func (l *ConnectorList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this ServerList. func (l *ServerList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) diff --git a/apis/transfer/v1beta2/zz_generated.resolvers.go b/apis/transfer/v1beta2/zz_generated.resolvers.go index 875c2839e7..117d703907 100644 --- a/apis/transfer/v1beta2/zz_generated.resolvers.go +++ b/apis/transfer/v1beta2/zz_generated.resolvers.go @@ -14,12 +14,104 @@ import ( xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" common "github.com/upbound/provider-aws/config/common" - client "sigs.k8s.io/controller-runtime/pkg/client" - - // ResolveReferences of this Server. apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" ) +func (mg *Connector) ResolveReferences( // ResolveReferences of this Connector. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AccessRole), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.AccessRoleRef, + Selector: mg.Spec.ForProvider.AccessRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AccessRole") + } + mg.Spec.ForProvider.AccessRole = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AccessRoleRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.SftpConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("secretsmanager.aws.upbound.io", "v1beta1", "Secret", "SecretList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SftpConfig.UserSecretID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SftpConfig.UserSecretIDRef, + Selector: mg.Spec.ForProvider.SftpConfig.UserSecretIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SftpConfig.UserSecretID") + } + mg.Spec.ForProvider.SftpConfig.UserSecretID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SftpConfig.UserSecretIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AccessRole), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.AccessRoleRef, + Selector: mg.Spec.InitProvider.AccessRoleSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AccessRole") + } + mg.Spec.InitProvider.AccessRole = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AccessRoleRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.SftpConfig != nil { + { + m, l, err = apisresolver.GetManagedResource("secretsmanager.aws.upbound.io", "v1beta1", "Secret", "SecretList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SftpConfig.UserSecretID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SftpConfig.UserSecretIDRef, + Selector: mg.Spec.InitProvider.SftpConfig.UserSecretIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SftpConfig.UserSecretID") + } + mg.Spec.InitProvider.SftpConfig.UserSecretID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SftpConfig.UserSecretIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this Server. func (mg *Server) ResolveReferences(ctx context.Context, c client.Reader) error { var m xpresource.Managed var l xpresource.ManagedList diff --git a/config/registry.go b/config/registry.go index 656e130547..ec41e339a8 100644 --- a/config/registry.go +++ b/config/registry.go @@ -103,7 +103,7 @@ func GetProvider(ctx context.Context, generationProvider bool) (*config.Provider if err != nil { return nil, errors.Wrap(err, "cannot read the Terraform SDK provider from the JSON schema for code generation") } - if err := traverser.TFResourceSchema(sdkProvider.ResourcesMap).TraverseTFSchemas(traverser.NewMaxItemsSync(p.ResourcesMap)); err != nil { + if err := traverser.TFResourceSchema(sdkProvider.ResourcesMap).Traverse(traverser.NewMaxItemsSync(p.ResourcesMap)); err != nil { return nil, errors.Wrap(err, "cannot sync the MaxItems constraints between the Go schema and the JSON schema") } // use the JSON schema to temporarily prevent float64->int64 diff --git a/examples-generated/transfer/v1beta2/connector.yaml b/examples-generated/transfer/v1beta2/connector.yaml new file mode 100644 index 0000000000..b5971d95d0 --- /dev/null +++ b/examples-generated/transfer/v1beta2/connector.yaml @@ -0,0 +1,24 @@ +apiVersion: transfer.aws.upbound.io/v1beta2 +kind: Connector +metadata: + annotations: + meta.upbound.io/example-id: transfer/v1beta2/connector + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accessRoleSelector: + matchLabels: + testing.upbound.io/example-name: test + as2Config: + - compression: DISABLED + encryptionAlgorithm: AWS128_CBC + localProfileId: ${aws_transfer_profile.local.profile_id} + mdnResponse: NONE + mdnSigningAlgorithm: NONE + messageSubject: For Connector + partnerProfileId: ${aws_transfer_profile.partner.profile_id} + signingAlgorithm: NONE + region: us-west-1 + url: http://www.test.com diff --git a/go.mod b/go.mod index db031b33cd..5132820a71 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/aws/smithy-go v1.20.2 github.com/crossplane/crossplane-runtime v1.16.0-rc.2.0.20240510094504-3f697876fa57 github.com/crossplane/crossplane-tools v0.0.0-20230925130601-628280f8bf79 - github.com/crossplane/upjet v1.4.1 + github.com/crossplane/upjet v1.4.1-0.20240612123927-37c7f4e91d57 github.com/go-ini/ini v1.46.0 github.com/google/go-cmp v0.6.0 github.com/hashicorp/awspolicyequivalence v1.6.0 @@ -44,6 +44,7 @@ require ( github.com/YakDriver/go-version v0.1.0 // indirect github.com/YakDriver/regexache v0.23.0 // indirect github.com/agext/levenshtein v1.2.3 // indirect + github.com/alecthomas/kingpin/v2 v2.4.0 // indirect github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/antchfx/htmlquery v1.2.4 // indirect @@ -323,6 +324,7 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect + github.com/xhit/go-str2duration/v2 v2.1.0 // indirect github.com/yuin/goldmark v1.4.13 // indirect github.com/zclconf/go-cty v1.14.3 // indirect github.com/zclconf/go-cty-yaml v1.0.3 // indirect @@ -363,5 +365,3 @@ require ( replace github.com/hashicorp/terraform-plugin-log => github.com/gdavison/terraform-plugin-log v0.0.0-20230928191232-6c653d8ef8fb replace github.com/hashicorp/terraform-provider-aws => github.com/upbound/terraform-provider-aws v0.0.0-20240523140457-101595b8576e - -replace github.com/crossplane/upjet => ../upjet diff --git a/go.sum b/go.sum index 455bd07f22..03562f0557 100644 --- a/go.sum +++ b/go.sum @@ -11,6 +11,8 @@ github.com/YakDriver/regexache v0.23.0/go.mod h1:K4BZ3MYKAqSFbYWqmbsG+OzYUDyJjnM github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/kong v0.2.16/go.mod h1:kQOmtJgV+Lb4aj+I2LEn40cbtawdWJ9Y8QLq+lElKxE= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -422,6 +424,8 @@ github.com/crossplane/crossplane-runtime v1.16.0-rc.2.0.20240510094504-3f697876f github.com/crossplane/crossplane-runtime v1.16.0-rc.2.0.20240510094504-3f697876fa57/go.mod h1:Pz2tdGVMF6KDGzHZOkvKro0nKc8EzK0sb/nSA7pH4Dc= github.com/crossplane/crossplane-tools v0.0.0-20230925130601-628280f8bf79 h1:HigXs5tEQxWz0fcj8hzbU2UAZgEM7wPe0XRFOsrtF8Y= github.com/crossplane/crossplane-tools v0.0.0-20230925130601-628280f8bf79/go.mod h1:+e4OaFlOcmr0JvINHl/yvEYBrZawzTgj6pQumOH1SS0= +github.com/crossplane/upjet v1.4.1-0.20240612123927-37c7f4e91d57 h1:hyKXccOb8BepVJj79KsvwYsid5Lhlr2DojO29Sqi+rw= +github.com/crossplane/upjet v1.4.1-0.20240612123927-37c7f4e91d57/go.mod h1:wkdZf/Cvhr6PI30VdHIOjg4dX39Z5uijqnLWFk5PbGM= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/dave/jennifer v1.4.1 h1:XyqG6cn5RQsTj3qlWQTKlRGAyrTcsk1kUmWdZBzRjDw= @@ -730,6 +734,8 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= diff --git a/package/crds/acm.aws.upbound.io_certificates.yaml b/package/crds/acm.aws.upbound.io_certificates.yaml index ba31ca751b..ac2b10ae25 100644 --- a/package/crds/acm.aws.upbound.io_certificates.yaml +++ b/package/crds/acm.aws.upbound.io_certificates.yaml @@ -871,6 +871,82 @@ spec: certificateAuthorityArn: description: ARN of an ACM PCA type: string + certificateAuthorityArnRef: + description: Reference to a CertificateAuthority in acmpca to + populate certificateAuthorityArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + certificateAuthorityArnSelector: + description: Selector for a CertificateAuthority in acmpca to + populate certificateAuthorityArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object certificateBody: description: Certificate's PEM-formatted public key type: string @@ -980,6 +1056,82 @@ spec: certificateAuthorityArn: description: ARN of an ACM PCA type: string + certificateAuthorityArnRef: + description: Reference to a CertificateAuthority in acmpca to + populate certificateAuthorityArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + certificateAuthorityArnSelector: + description: Selector for a CertificateAuthority in acmpca to + populate certificateAuthorityArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object certificateBody: description: Certificate's PEM-formatted public key type: string diff --git a/package/crds/opensearch.aws.upbound.io_domains.yaml b/package/crds/opensearch.aws.upbound.io_domains.yaml index f68bdc2d32..3cf944a938 100644 --- a/package/crds/opensearch.aws.upbound.io_domains.yaml +++ b/package/crds/opensearch.aws.upbound.io_domains.yaml @@ -2525,6 +2525,80 @@ spec: service KMS key. Note that KMS will accept a KMS key ID but will return the key ARN. type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object type: object engineVersion: description: while Elasticsearch has elasticsearch_version @@ -2697,6 +2771,85 @@ spec: or removing this configuration forces a new resource (documentation). Detailed below. properties: + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate + securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to + populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object securityGroupIds: description: List of VPC Security Group IDs to be applied to the OpenSearch domain endpoints. If omitted, the default @@ -2705,6 +2858,84 @@ spec: type: string type: array x-kubernetes-list-type: set + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object subnetIds: description: List of VPC Subnet IDs for the OpenSearch domain endpoints to be created in. @@ -2993,6 +3224,80 @@ spec: service KMS key. Note that KMS will accept a KMS key ID but will return the key ARN. type: string + kmsKeyIdRef: + description: Reference to a Key in kms to populate kmsKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeyIdSelector: + description: Selector for a Key in kms to populate kmsKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object type: object engineVersion: description: while Elasticsearch has elasticsearch_version @@ -3161,6 +3466,85 @@ spec: or removing this configuration forces a new resource (documentation). Detailed below. properties: + securityGroupIdRefs: + description: References to SecurityGroup in ec2 to populate + securityGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + securityGroupIdSelector: + description: Selector for a list of SecurityGroup in ec2 to + populate securityGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object securityGroupIds: description: List of VPC Security Group IDs to be applied to the OpenSearch domain endpoints. If omitted, the default @@ -3169,6 +3553,84 @@ spec: type: string type: array x-kubernetes-list-type: set + subnetIdRefs: + description: References to Subnet in ec2 to populate subnetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + subnetIdSelector: + description: Selector for a list of Subnet in ec2 to populate + subnetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object subnetIds: description: List of VPC Subnet IDs for the OpenSearch domain endpoints to be created in. diff --git a/package/crds/transfer.aws.upbound.io_connectors.yaml b/package/crds/transfer.aws.upbound.io_connectors.yaml index 9126a3b921..054e18ebe7 100644 --- a/package/crds/transfer.aws.upbound.io_connectors.yaml +++ b/package/crds/transfer.aws.upbound.io_connectors.yaml @@ -896,3 +896,869 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Connector is the Schema for the Connectors API. Provides a AWS + Transfer AS2 Connector Resource + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ConnectorSpec defines the desired state of Connector + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessRole: + description: The IAM Role which provides read and write access + to the parent directory of the file location mentioned in the + StartFileTransfer request. + type: string + accessRoleRef: + description: Reference to a Role in iam to populate accessRole. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accessRoleSelector: + description: Selector for a Role in iam to populate accessRole. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + as2Config: + description: Either SFTP or AS2 is configured.The parameters to + configure for the connector object. Fields documented below. + properties: + compression: + description: Specifies weather AS2 file is compressed. The + valud values are ZLIB and DISABLED. + type: string + encryptionAlgorithm: + description: The algorithm that is used to encrypt the file. + The valid values are AES128_CBC | AES192_CBC | AES256_CBC + | NONE. + type: string + localProfileId: + description: The unique identifier for the AS2 local profile. + type: string + mdnResponse: + description: Used for outbound requests to determine if a + partner response for transfers is synchronous or asynchronous. + The valid values are SYNC and NONE. + type: string + mdnSigningAlgorithm: + description: The signing algorithm for the Mdn response. The + valid values are SHA256 | SHA384 | SHA512 | SHA1 | NONE + | DEFAULT. + type: string + messageSubject: + description: Used as the subject HTTP header attribute in + AS2 messages that are being sent with the connector. + type: string + partnerProfileId: + description: The unique identifier for the AS2 partner profile. + type: string + signingAlgorithm: + description: The algorithm that is used to sign AS2 messages + sent with the connector. The valid values are SHA256 | SHA384 + | SHA512 | SHA1 | NONE . + type: string + type: object + loggingRole: + description: The IAM Role which is required for allowing the connector + to turn on CloudWatch logging for Amazon S3 events. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + securityPolicyName: + description: Name of the security policy for the connector. + type: string + sftpConfig: + description: Either SFTP or AS2 is configured.The parameters to + configure for the connector object. Fields documented below. + properties: + trustedHostKeys: + description: A list of public portion of the host key, or + keys, that are used to authenticate the user to the external + server to which you are connecting.(https://docs.aws.amazon.com/transfer/latest/userguide/API_SftpConnectorConfig.html) + items: + type: string + type: array + x-kubernetes-list-type: set + userSecretId: + description: The identifier for the secret (in AWS Secrets + Manager) that contains the SFTP user's private key, password, + or both. The identifier can be either the Amazon Resource + Name (ARN) or the name of the secret. + type: string + userSecretIdRef: + description: Reference to a Secret in secretsmanager to populate + userSecretId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userSecretIdSelector: + description: Selector for a Secret in secretsmanager to populate + userSecretId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + url: + description: The URL of the partners AS2 endpoint or SFTP endpoint. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessRole: + description: The IAM Role which provides read and write access + to the parent directory of the file location mentioned in the + StartFileTransfer request. + type: string + accessRoleRef: + description: Reference to a Role in iam to populate accessRole. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accessRoleSelector: + description: Selector for a Role in iam to populate accessRole. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + as2Config: + description: Either SFTP or AS2 is configured.The parameters to + configure for the connector object. Fields documented below. + properties: + compression: + description: Specifies weather AS2 file is compressed. The + valud values are ZLIB and DISABLED. + type: string + encryptionAlgorithm: + description: The algorithm that is used to encrypt the file. + The valid values are AES128_CBC | AES192_CBC | AES256_CBC + | NONE. + type: string + localProfileId: + description: The unique identifier for the AS2 local profile. + type: string + mdnResponse: + description: Used for outbound requests to determine if a + partner response for transfers is synchronous or asynchronous. + The valid values are SYNC and NONE. + type: string + mdnSigningAlgorithm: + description: The signing algorithm for the Mdn response. The + valid values are SHA256 | SHA384 | SHA512 | SHA1 | NONE + | DEFAULT. + type: string + messageSubject: + description: Used as the subject HTTP header attribute in + AS2 messages that are being sent with the connector. + type: string + partnerProfileId: + description: The unique identifier for the AS2 partner profile. + type: string + signingAlgorithm: + description: The algorithm that is used to sign AS2 messages + sent with the connector. The valid values are SHA256 | SHA384 + | SHA512 | SHA1 | NONE . + type: string + type: object + loggingRole: + description: The IAM Role which is required for allowing the connector + to turn on CloudWatch logging for Amazon S3 events. + type: string + securityPolicyName: + description: Name of the security policy for the connector. + type: string + sftpConfig: + description: Either SFTP or AS2 is configured.The parameters to + configure for the connector object. Fields documented below. + properties: + trustedHostKeys: + description: A list of public portion of the host key, or + keys, that are used to authenticate the user to the external + server to which you are connecting.(https://docs.aws.amazon.com/transfer/latest/userguide/API_SftpConnectorConfig.html) + items: + type: string + type: array + x-kubernetes-list-type: set + userSecretId: + description: The identifier for the secret (in AWS Secrets + Manager) that contains the SFTP user's private key, password, + or both. The identifier can be either the Amazon Resource + Name (ARN) or the name of the secret. + type: string + userSecretIdRef: + description: Reference to a Secret in secretsmanager to populate + userSecretId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userSecretIdSelector: + description: Selector for a Secret in secretsmanager to populate + userSecretId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + url: + description: The URL of the partners AS2 endpoint or SFTP endpoint. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.url is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.url) + || (has(self.initProvider) && has(self.initProvider.url))' + status: + description: ConnectorStatus defines the observed state of Connector. + properties: + atProvider: + properties: + accessRole: + description: The IAM Role which provides read and write access + to the parent directory of the file location mentioned in the + StartFileTransfer request. + type: string + arn: + description: The ARN of the connector. + type: string + as2Config: + description: Either SFTP or AS2 is configured.The parameters to + configure for the connector object. Fields documented below. + properties: + compression: + description: Specifies weather AS2 file is compressed. The + valud values are ZLIB and DISABLED. + type: string + encryptionAlgorithm: + description: The algorithm that is used to encrypt the file. + The valid values are AES128_CBC | AES192_CBC | AES256_CBC + | NONE. + type: string + localProfileId: + description: The unique identifier for the AS2 local profile. + type: string + mdnResponse: + description: Used for outbound requests to determine if a + partner response for transfers is synchronous or asynchronous. + The valid values are SYNC and NONE. + type: string + mdnSigningAlgorithm: + description: The signing algorithm for the Mdn response. The + valid values are SHA256 | SHA384 | SHA512 | SHA1 | NONE + | DEFAULT. + type: string + messageSubject: + description: Used as the subject HTTP header attribute in + AS2 messages that are being sent with the connector. + type: string + partnerProfileId: + description: The unique identifier for the AS2 partner profile. + type: string + signingAlgorithm: + description: The algorithm that is used to sign AS2 messages + sent with the connector. The valid values are SHA256 | SHA384 + | SHA512 | SHA1 | NONE . + type: string + type: object + connectorId: + description: The unique identifier for the AS2 profile or SFTP + Profile. + type: string + id: + type: string + loggingRole: + description: The IAM Role which is required for allowing the connector + to turn on CloudWatch logging for Amazon S3 events. + type: string + securityPolicyName: + description: Name of the security policy for the connector. + type: string + sftpConfig: + description: Either SFTP or AS2 is configured.The parameters to + configure for the connector object. Fields documented below. + properties: + trustedHostKeys: + description: A list of public portion of the host key, or + keys, that are used to authenticate the user to the external + server to which you are connecting.(https://docs.aws.amazon.com/transfer/latest/userguide/API_SftpConnectorConfig.html) + items: + type: string + type: array + x-kubernetes-list-type: set + userSecretId: + description: The identifier for the secret (in AWS Secrets + Manager) that contains the SFTP user's private key, password, + or both. The identifier can be either the Amazon Resource + Name (ARN) or the name of the secret. + type: string + type: object + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + url: + description: The URL of the partners AS2 endpoint or SFTP endpoint. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {}